diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 9e7882a00..000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = cinder -omit = cinder/tests/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index b724864ef..000000000 --- a/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -*.DS_Store -*.log -*.mo -*.pyc -*.sqlite -/.* -!.coveragerc -!.gitignore -!.mailmap -!.testr.conf -.*.sw? -AUTHORS -Authors -build/* -build-stamp -CA/ -ChangeLog -cinder.egg-info -cover/* -covhtml -dist/* -etc/cinder/cinder.conf.sample -instances -keeper -keys -local_settings.py -tools/lintstack.head.py -tools/pylint_exceptions -tags -# Files created by Sphinx build -doc/build -doc/source/_static/cinder.conf.sample -doc/source/drivers.rst - -#Files created for API reference -api-ref/build - -# Files created by releasenotes build -releasenotes/build diff --git a/.gitreview b/.gitreview deleted file mode 100644 index eecf93944..000000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/cinder.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index f706ba7f8..000000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./cinder/tests/unit} $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 0ffb31d1a..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not in GitHub's issue tracker: - - https://bugs.launchpad.net/cinder diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index d33b9d371..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,56 +0,0 @@ -Cinder Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - http://docs.openstack.org/developer/hacking/ -- Step 2: Read on - -Cinder Specific Commandments ----------------------------- -- [N314] Check for vi editor configuration in source files. -- [N322] Ensure default arguments are not mutable. -- [N323] Add check for explicit import of _() to ensure proper translation. -- [N325] str() and unicode() cannot be used on an exception. Remove or use six.text_type(). -- [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. -- [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now(). -- [C302] six.text_type should be used instead of unicode. -- [C303] Ensure that there are no 'print()' statements in code that is being committed. -- [C304] Enforce no use of LOG.audit messages. LOG.info should be used instead. -- [C305] Prevent use of deprecated contextlib.nested. -- [C306] timeutils.strtime() must not be used (deprecated). -- [C307] LOG.warn is deprecated. Enforce use of LOG.warning. -- [C308] timeutils.isotime() must not be used (deprecated). -- [C309] Unit tests should not perform logging. -- [C310] Check for improper use of logging format arguments. -- [C311] Check for proper naming and usage in option registration. -- [C312] Validate that logs are not translated. -- [C313] Check that assertTrue(value) is used and not assertEqual(True, value). - -General -------- -- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: - - except Exception as e: - ... - raise e # BAD - - except Exception: - ... - raise # OKAY - - - -Creating Unit Tests -------------------- -For every new feature, unit tests should be created that both test and -(implicitly) document the usage of said feature. If submitting a patch for a -bug that had no unit test, a new passing unit test should be added. If a -submitted bug fix does have a unit test, be sure to add a new one that fails -without the patch and passes with the patch. - -Cinder is transitioning to use mock, rather than mox, and so new tests should -use mock only. - -For more information on creating unit tests and utilizing the testing -infrastructure in OpenStack Cinder, please see -http://docs.openstack.org/developer/cinder/devref/testing.html diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 000000000..8fcd2b2f8 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 8e010fad8..000000000 --- a/README.rst +++ /dev/null @@ -1,34 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/cinder.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -====== -CINDER -====== - -You have come across a storage service for an open cloud computing service. -It has identified itself as `Cinder`. It was abstracted from the Nova project. - -* Wiki: https://wiki.openstack.org/Cinder -* Developer docs: https://docs.openstack.org/cinder/latest/ - -Getting Started ---------------- - -If you'd like to run from the master branch, you can clone the git repo: - - git clone https://git.openstack.org/openstack/cinder.git - -For developer information please see -`HACKING.rst `_ - -You can raise bugs here https://bugs.launchpad.net/cinder - -Python client -------------- -https://git.openstack.org/cgit/openstack/python-cinderclient diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index f5385d9bf..000000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Cinder documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import subprocess -import sys - -import openstackdocstheme # noqa - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -extensions = [ - 'os_api_ref' -] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Block Storage API Reference' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from cinder.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# Config logABug feature -giturl = u'https://git.openstack.org/cgit/openstack/cinder/tree/api-ref/source' -# source tree -# html_context allows us to pass arbitrary values into the html template -html_context = {"bug_tag": "api-ref", - "giturl": giturl, - "bug_project": "cinder"} - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -html_theme = 'openstackdocs' -html_theme_path = [openstackdocstheme.get_html_theme_path()] -html_theme_options = { - "sidebar_mode": "toc", -} - - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' -git_cmd = ["git", "log", "--pretty=format:%ad, commit %h", "--date=local", - "-n1"] -html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8') - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cinderdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Cinder.tex', u'OpenStack Block Storage API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 67c30d934..000000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -================= -Block Storage API -================= - -Contents: - -.. toctree:: - :maxdepth: 2 - - v3/index - v2/index - v1/index - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/api-ref/source/v1/index.rst b/api-ref/source/v1/index.rst deleted file mode 100644 index 7fc691a9b..000000000 --- a/api-ref/source/v1/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -:tocdepth: 2 - -================================= -Block Storage API V1 (DEPRECATED) -================================= - -.. rest_expand_all:: - -.. include:: os-quota-sets-v1.inc -.. include:: volumes-v1-snapshots.inc -.. include:: volumes-v1-types.inc -.. include:: volumes-v1-versions.inc -.. include:: volumes-v1-volumes.inc diff --git a/api-ref/source/v1/os-quota-sets-v1.inc b/api-ref/source/v1/os-quota-sets-v1.inc deleted file mode 100644 index c4a9dc556..000000000 --- a/api-ref/source/v1/os-quota-sets-v1.inc +++ /dev/null @@ -1,408 +0,0 @@ -.. -*- rst -*- - -Quota sets extension (os-quota-sets) -==================================== - -Administrators only, depending on policy settings. - -Shows, updates, and deletes quotas for a tenant. - - -Show quota details for user (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{admin_tenant_id}/os-quota-sets/{tenant_id}/detail/{user_id} - -Shows details for quotas for a tenant and user. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - - user_id: user_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - limit: limit - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - -Response Example ----------------- - -.. literalinclude:: ./samples/user-quotas-show-detail-response.json - :language: javascript - -Show default quotas -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/os-quota-sets/defaults - -Shows default quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-defaults-show-response.json - :language: javascript - - -Show quotas (v1) -~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Shows quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - - usage: usage - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-show-response.json - :language: javascript - - -Update quotas (v1) -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Updates quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - id: id - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - injected_file_path_bytes: injected_file_path_bytes - - security_groups: security_groups - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/quotas-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-update-response.json - :language: javascript - - -Delete quotas (v1) -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Deletes quotas for a tenant so the quotas revert to default values. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - -Response Example ----------------- - -.. literalinclude:: ./samples/user-quotas-delete-response.json - :language: javascript - -Show quotas for user (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{admin_tenant_id}/os-quota-sets/{tenant_id}/{user_id} - -Enables an admin user to show quotas for a tenant and user. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - - user_id: user_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - - - -Response Example ----------------- - -.. literalinclude:: ./samples/user-quotas-show-response.json - :language: javascript - - - - -Update quotas for user (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v1/{admin_tenant_id}/os-quota-sets/{tenant_id}/{user_id} - -Updates quotas for a tenant and user. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - id: id - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - injected_file_path_bytes: injected_file_path_bytes - - security_groups: security_groups - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - - user_id: user_id - -Request Example ---------------- - -.. literalinclude:: ./samples/user-quotas-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - -Response Example ----------------- - -.. literalinclude:: ./samples/user-quotas-update-response.json - :language: javascript - - -Delete quotas for user (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/{admin_tenant_id}/os-quota-sets/{tenant_id}/{user_id} - -Deletes quotas for a user so that the quotas revert to default values. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - tenant_id: tenant_id - - user_id: user_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/user-quotas-delete-response.json - :language: javascript diff --git a/api-ref/source/v1/parameters.yaml b/api-ref/source/v1/parameters.yaml deleted file mode 100644 index 3e4604fcb..000000000 --- a/api-ref/source/v1/parameters.yaml +++ /dev/null @@ -1,648 +0,0 @@ -# variables in header -x-openstack-request-id: - description: > - foo - in: header - required: false - type: string - -# variables in path -admin_tenant_id: - description: | - The UUID of the administrative tenant. - in: path - required: false - type: string -snapshot_id_1: - description: | - The UUID of the snapshot. - in: path - required: false - type: string -tenant_id: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: path - required: false - type: string -user_id: - description: | - The user ID. Specify in the URI as - ``user_id={user_id}``. - in: path - required: false - type: string -volume_id: - description: | - The UUID of the volume. - in: path - required: false - type: string -volume_type_id: - description: | - The UUID for an existing volume type. - in: path - required: false - type: string - -# variables in query -usage: - description: | - Set to ``usage=true`` to show quota usage. - Default is ``false``. - in: query - required: false - type: boolean - -# variables in body -attachments: - description: | - Instance attachment information. If this volume - is attached to a server instance, the attachments list includes - the UUID of the attached server, an attachment UUID, the name of - the attached host, if any, the volume UUID, the device, and the - device UUID. Otherwise, this list is empty. - in: body - required: true - type: array -availability_zone: - description: | - The availability zone. - in: body - required: false - type: string -availability_zone_1: - description: | - The availability zone. - in: body - required: true - type: string -bootable: - description: | - Enables or disables the bootable attribute. You - can boot an instance from a bootable volume. - in: body - required: true - type: boolean -consistencygroup_id: - description: | - The UUID of the consistency group. - in: body - required: false - type: string -consistencygroup_id_1: - description: | - The UUID of the consistency group. - in: body - required: true - type: string -cores: - description: | - The number of instance cores that are allowed for - each tenant. - in: body - required: true - type: integer -cores_1: - description: | - A ``cores`` object. - in: body - required: true - type: string -cores_2: - description: | - The number of instance cores that are allowed for - each tenant. - in: body - required: false - type: integer -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -description: - description: | - The volume description. - in: body - required: false - type: string -description_1: - description: | - The volume description. - in: body - required: true - type: string -encrypted: - description: | - If true, this volume is encrypted. - in: body - required: true - type: boolean -extra_specs: - description: | - A set of key and value pairs that contains the - specifications for a volume type. - in: body - required: true - type: object -fixed_ips: - description: | - The number of fixed IP addresses that are allowed - for each tenant. Must be equal to or greater than the number of - allowed instances. - in: body - required: true - type: integer -fixed_ips_1: - description: | - A ``fixed_ips`` object. - in: body - required: true - type: string -fixed_ips_2: - description: | - The number of fixed IP addresses that are allowed - for each tenant. Must be equal to or greater than the number of - allowed instances. - in: body - required: false - type: integer -floating_ips: - description: | - The number of floating IP addresses that are - allowed for each tenant. - in: body - required: true - type: integer -floating_ips_1: - description: | - A ``floating_ips`` object. - in: body - required: true - type: string -floating_ips_2: - description: | - The number of floating IP addresses that are - allowed for each tenant. - in: body - required: false - type: integer -id: - description: | - The UUID of the volume. - in: body - required: true - type: string -id_1: - description: | - The ID for the quota set. - in: body - required: true - type: integer -id_2: - description: | - The ID for the quota set. - in: body - required: true - type: string -id_3: - description: | - The ID for the quota set. - in: body - required: false - type: integer -imageRef: - description: | - The UUID of the image from which you want to - create the volume. Required to create a bootable volume. - in: body - required: false - type: string -in_use: - description: | - The in use data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: string -in_use_1: - description: | - The number of items in use. - in: body - required: true - type: integer -injected_file_content_bytes: - description: | - The number of bytes of content that are allowed - for each injected file. - in: body - required: true - type: integer -injected_file_content_bytes_1: - description: | - An ``injected_file_content_bytes`` object. - in: body - required: true - type: string -injected_file_content_bytes_2: - description: | - The number of bytes of content that are allowed - for each injected file. - in: body - required: false - type: integer -injected_file_path_bytes: - description: | - The number of bytes that are allowed for each - injected file path. - in: body - required: true - type: integer -injected_file_path_bytes_1: - description: | - An ``injected_file_path_bytes`` object. - in: body - required: true - type: string -injected_file_path_bytes_2: - description: | - The number of bytes that are allowed for each - injected file path. - in: body - required: false - type: integer -injected_files: - description: | - The number of injected files that are allowed for - each tenant. - in: body - required: true - type: integer -injected_files_1: - description: | - An ``injected_files`` object. - in: body - required: true - type: string -injected_files_2: - description: | - The number of injected files that are allowed for - each tenant. - in: body - required: false - type: integer -instances: - description: | - The number of instances that are allowed for each - tenant. - in: body - required: true - type: integer -instances_1: - description: | - An ``instances`` object. - in: body - required: true - type: string -instances_2: - description: | - The number of instances that are allowed for each - tenant. - in: body - required: false - type: integer -key_pairs: - description: | - The number of key pairs that are allowed for each - user. - in: body - required: true - type: integer -key_pairs_1: - description: | - A ``key_pairs`` object. - in: body - required: true - type: string -key_pairs_2: - description: | - The number of key pairs that are allowed for each - user. - in: body - required: false - type: integer -limit: - description: | - The number of items permitted for this tenant. - in: body - required: true - type: integer -links: - description: | - The volume links. - in: body - required: true - type: array -metadata: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: false - type: object -metadata_1: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: true - type: object -metadata_2: - description: | - One or more metadata key and value pairs for the - snapshot. - in: body - required: false - type: object -metadata_items: - description: | - The number of metadata items that are allowed for - each instance. - in: body - required: true - type: integer -metadata_items_1: - description: | - A ``metadata_items`` object. - in: body - required: true - type: string -metadata_items_2: - description: | - The number of metadata items that are allowed for - each instance. - in: body - required: false - type: integer -migration_status: - description: | - The volume migration status. - in: body - required: true - type: string -multiattach: - description: | - To enable this volume to attach to more than one - server, set this value to ``true``. Default is ``false``. - in: body - required: false - type: boolean -multiattach_1: - description: | - If true, this volume can attach to more than one - instance. - in: body - required: true - type: boolean -name: - description: | - The name of the volume type. - in: body - required: true - type: string -name_1: - description: | - The volume name. - in: body - required: false - type: string -name_2: - description: | - The volume name. - in: body - required: true - type: string -OS-SCH-HNT:scheduler_hints: - description: | - The dictionary of data to send to the scheduler. - in: body - required: false - type: object -quota_set: - description: | - A ``quota_set`` object. - in: body - required: true - type: object -quota_set_1: - description: | - A ``quota_set`` object. - in: body - required: true - type: string -ram: - description: | - The amount of instance RAM in megabytes that are - allowed for each tenant. - in: body - required: true - type: integer -ram_1: - description: | - A ``ram`` object. - in: body - required: true - type: string -ram_2: - description: | - The amount of instance RAM in megabytes that are - allowed for each tenant. - in: body - required: false - type: integer -replication_status: - description: | - The volume replication status. - in: body - required: true - type: string -reserved: - description: | - Reserved volume size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -reserved_1: - description: | - The number of reserved items. - in: body - required: true - type: integer -security_group_rules: - description: | - The number of rules that are allowed for each - security group. - in: body - required: false - type: integer -security_group_rules_1: - description: | - A ``security_group_rules`` object. - in: body - required: true - type: string -security_groups: - description: | - The number of security groups that are allowed - for each tenant. - in: body - required: true - type: integer -security_groups_1: - description: | - A ``security_groups`` object. - in: body - required: true - type: string -security_groups_2: - description: | - The number of security groups that are allowed - for each tenant. - in: body - required: false - type: integer -size: - description: | - The size of the volume, in gibibytes (GiB). - in: body - required: true - type: integer -snapshot: - description: | - A ``snapshot`` object. - in: body - required: true - type: object -snapshot_id: - description: | - To create a volume from an existing snapshot, - specify the UUID of the volume snapshot. The volume is created in - same availability zone and with same size as the snapshot. - in: body - required: false - type: string -snapshot_id_2: - description: | - The UUID of the source volume snapshot. The API - creates a new volume snapshot with the same size as the source - volume snapshot. - in: body - required: true - type: string -source_replica: - description: | - The UUID of the primary volume to clone. - in: body - required: false - type: string -source_volid: - description: | - The UUID of the source volume. The API creates a - new volume with the same size as the source volume. - in: body - required: false - type: string -source_volid_1: - description: | - The UUID of the source volume. - in: body - required: true - type: string -status: - description: | - The volume status. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the resource was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -user_id_1: - description: | - The UUID of the user. - in: body - required: true - type: string -volume: - description: | - A ``volume`` object. - in: body - required: true - type: object -volume_type: - description: | - The volume type. To create an environment with - multiple-storage back ends, you must specify a volume type. Block - Storage volume back ends are spawned as children to ``cinder- - volume``, and they are keyed from a unique queue. They are named - ``cinder- volume.HOST.BACKEND``. For example, ``cinder- - volume.ubuntu.lvmdriver``. When a volume is created, the scheduler - chooses an appropriate back end to handle the request based on the - volume type. Default is ``None``. For information about how to - use volume types to create multiple- storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: false - type: string -volume_type_1: - description: | - The volume type. In an environment with multiple- - storage back ends, the scheduler determines where to send the - volume based on the volume type. For information about how to use - volume types to create multiple- storage back ends, see `Configure - multiple-storage back ends `_. - in: body - required: true - type: string -volumes: - description: | - A list of ``volume`` objects. - in: body - required: true - type: array diff --git a/api-ref/source/v1/samples/quotas-defaults-show-response.json b/api-ref/source/v1/samples/quotas-defaults-show-response.json deleted file mode 100644 index 239c64d23..000000000 --- a/api-ref/source/v1/samples/quotas-defaults-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "id": "fake_tenant", - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v1/samples/quotas-show-response.json b/api-ref/source/v1/samples/quotas-show-response.json deleted file mode 100644 index 239c64d23..000000000 --- a/api-ref/source/v1/samples/quotas-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "id": "fake_tenant", - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v1/samples/quotas-update-request.json b/api-ref/source/v1/samples/quotas-update-request.json deleted file mode 100644 index 1f12caa04..000000000 --- a/api-ref/source/v1/samples/quotas-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "quota_set": { - "security_groups": 45 - } -} diff --git a/api-ref/source/v1/samples/quotas-update-response.json b/api-ref/source/v1/samples/quotas-update-response.json deleted file mode 100644 index 2be76d472..000000000 --- a/api-ref/source/v1/samples/quotas-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 45 - } -} diff --git a/api-ref/source/v1/samples/snapshot-create-request.json b/api-ref/source/v1/samples/snapshot-create-request.json deleted file mode 100644 index cc8ce2865..000000000 --- a/api-ref/source/v1/samples/snapshot-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "snapshot": { - "display_name": "snap-001", - "display_description": "Daily backup", - "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "force": true - } -} diff --git a/api-ref/source/v1/samples/snapshot-metadata-show-response.json b/api-ref/source/v1/samples/snapshot-metadata-show-response.json deleted file mode 100644 index 68c54641d..000000000 --- a/api-ref/source/v1/samples/snapshot-metadata-show-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "snapshot": { - "status": "available", - "os-extended-snapshot-attributes:progress": "0%", - "description": null, - "created_at": "2014-05-06T17:59:52.000000", - "metadata": { - "key": "v1" - }, - "volume_id": "ebd80b99-bc3d-4154-9d28-5583baa80580", - "os-extended-snapshot-attributes:project_id": "7e0105e19cd2466193729ef78b604f79", - "size": 10, - "id": "dfcd17fe-3b64-44ba-b95f-1c9c7109ef95", - "name": "my-snapshot" - } -} diff --git a/api-ref/source/v1/samples/snapshot-metadata-update-request.json b/api-ref/source/v1/samples/snapshot-metadata-update-request.json deleted file mode 100644 index 75accc1a6..000000000 --- a/api-ref/source/v1/samples/snapshot-metadata-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v1" - } -} diff --git a/api-ref/source/v1/samples/snapshot-metadata-update-response.json b/api-ref/source/v1/samples/snapshot-metadata-update-response.json deleted file mode 100644 index 75accc1a6..000000000 --- a/api-ref/source/v1/samples/snapshot-metadata-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v1" - } -} diff --git a/api-ref/source/v1/samples/snapshot-show-response.json b/api-ref/source/v1/samples/snapshot-show-response.json deleted file mode 100644 index f1514e860..000000000 --- a/api-ref/source/v1/samples/snapshot-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "snapshot": { - "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", - "display_name": "snap-001", - "display_description": "Daily backup", - "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "status": "available", - "size": 30, - "created_at": "2012-02-29T03:50:07Z" - } -} diff --git a/api-ref/source/v1/samples/snapshots-list-response.json b/api-ref/source/v1/samples/snapshots-list-response.json deleted file mode 100644 index d148577e1..000000000 --- a/api-ref/source/v1/samples/snapshots-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "snapshots": [ - { - "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5", - "display_name": "snap-001", - "display_description": "Daily backup", - "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "status": "available", - "size": 30, - "created_at": "2012-02-29T03:50:07Z", - "metadata": { - "contents": "junk" - } - }, - { - "id": "e479997c-650b-40a4-9dfe-77655818b0d2", - "display_name": "snap-002", - "display_description": "Weekly backup", - "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", - "status": "available", - "size": 25, - "created_at": "2012-03-19T01:52:47Z", - "metadata": {} - } - ] -} diff --git a/api-ref/source/v1/samples/user-quotas-delete-response.json b/api-ref/source/v1/samples/user-quotas-delete-response.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/api-ref/source/v1/samples/user-quotas-show-detail-response.json b/api-ref/source/v1/samples/user-quotas-show-detail-response.json deleted file mode 100644 index 53ecff0ba..000000000 --- a/api-ref/source/v1/samples/user-quotas-show-detail-response.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "quota_set": { - "cores": { - "in_use": 0, - "limit": 20, - "reserved": 0 - }, - "fixed_ips": { - "in_use": 0, - "limit": -1, - "reserved": 0 - }, - "floating_ips": { - "in_use": 0, - "limit": 10, - "reserved": 0 - }, - "injected_files": { - "in_use": 0, - "limit": 5, - "reserved": 0 - }, - "instances": { - "in_use": 0, - "limit": 10, - "reserved": 0 - }, - "key_pairs": { - "in_use": 0, - "limit": 100, - "reserved": 0 - }, - "metadata_items": { - "in_use": 0, - "limit": 128, - "reserved": 0 - }, - "ram": { - "in_use": 0, - "limit": 51200, - "reserved": 0 - }, - "security_groups": { - "in_use": 0, - "limit": 10, - "reserved": 0 - }, - "injected_file_content_bytes": { - "in_use": 0, - "limit": 10240, - "reserved": 0 - }, - "injected_file_path_bytes": { - "in_use": 0, - "limit": 255, - "reserved": 0 - }, - "security_group_rules": { - "in_use": 0, - "limit": 20, - "reserved": 0 - } - } -} diff --git a/api-ref/source/v1/samples/user-quotas-show-response.json b/api-ref/source/v1/samples/user-quotas-show-response.json deleted file mode 100644 index 239c64d23..000000000 --- a/api-ref/source/v1/samples/user-quotas-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "id": "fake_tenant", - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v1/samples/user-quotas-update-request.json b/api-ref/source/v1/samples/user-quotas-update-request.json deleted file mode 100644 index 6e5195f9a..000000000 --- a/api-ref/source/v1/samples/user-quotas-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "quota_set": { - "force": true, - "instances": 9 - } -} diff --git a/api-ref/source/v1/samples/user-quotas-update-response.json b/api-ref/source/v1/samples/user-quotas-update-response.json deleted file mode 100644 index 553933292..000000000 --- a/api-ref/source/v1/samples/user-quotas-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "floating_ips": 10, - "fixed_ips": -1, - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 9, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v1/samples/version-show-response.json b/api-ref/source/v1/samples/version-show-response.json deleted file mode 100644 index 594c5f18a..000000000 --- a/api-ref/source/v1/samples/version-show-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "version": { - "id": "v1.0", - "links": [ - { - "href": "http://23.253.211.234:8776/v1/", - "rel": "self" - }, - { - "href": "http://docs.openstack.org/", - "rel": "describedby", - "type": "text/html" - } - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "status": "DEPRECATED", - "updated": "2014-06-28T12:20:21Z" - } -} diff --git a/api-ref/source/v1/samples/versions-list-response.json b/api-ref/source/v1/samples/versions-list-response.json deleted file mode 100644 index 29b36bd17..000000000 --- a/api-ref/source/v1/samples/versions-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "versions": [ - { - "id": "v1.0", - "links": [ - { - "href": "http://23.253.211.234:8776/v1/", - "rel": "self" - } - ], - "status": "DEPRECATED", - "updated": "2014-06-28T12:20:21Z" - }, - { - "id": "v2.0", - "links": [ - { - "href": "http://23.253.211.234:8776/v2/", - "rel": "self" - } - ], - "status": "CURRENT", - "updated": "2012-11-21T11:33:21Z" - } - ] -} diff --git a/api-ref/source/v1/samples/volume-create-request.json b/api-ref/source/v1/samples/volume-create-request.json deleted file mode 100644 index 171a68c39..000000000 --- a/api-ref/source/v1/samples/volume-create-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "volume": { - "display_name": "vol-001", - "display_description": "Another volume.", - "size": 30, - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - "metadata": { - "contents": "junk" - }, - "availability_zone": "us-east1" - }, - "OS-SCH-HNT:scheduler_hints": { - "same_host": [ - "a0cf03a5-d921-4877-bb5c-86d26cf818e1", - "8c19174f-4220-44f0-824a-cd1eeef10287" - ] - } -} diff --git a/api-ref/source/v1/samples/volume-show-response.json b/api-ref/source/v1/samples/volume-show-response.json deleted file mode 100644 index 0118c2fe2..000000000 --- a/api-ref/source/v1/samples/volume-show-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "volume": { - "id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "display_name": "vol-001", - "display_description": "Another volume.", - "status": "active", - "size": 30, - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - "metadata": { - "contents": "junk" - }, - "availability_zone": "us-east1", - "bootable": "false", - "snapshot_id": null, - "attachments": [ - { - "attachment_id": "03987cd1-0ad5-40d1-9b2a-7cc48295d4fa", - "id": "47e9ecc5-4045-4ee3-9a4b-d859d546a0cf", - "volume_id": "6c80f8ac-e3e2-480c-8e6e-f1db92fe4bfe", - "server_id": "d1c4788b-9435-42e2-9b81-29f3be1cd01f", - "host_name": "mitaka", - "device": "/" - } - ], - "created_at": "2012-02-14T20:53:07Z" - } -} diff --git a/api-ref/source/v1/samples/volume-type-create-request.json b/api-ref/source/v1/samples/volume-type-create-request.json deleted file mode 100644 index af7e47f6b..000000000 --- a/api-ref/source/v1/samples/volume-type-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "volume_type": { - "name": "vol-type-001", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v1/samples/volume-type-show-response.json b/api-ref/source/v1/samples/volume-type-show-response.json deleted file mode 100644 index a91f2e94d..000000000 --- a/api-ref/source/v1/samples/volume-type-show-response.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "volume_type": { - "id": "289da7f8-6440-407c-9fb4-7db01ec49164", - "name": "vol-type-001", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v1/samples/volume-types-list-response.json b/api-ref/source/v1/samples/volume-types-list-response.json deleted file mode 100644 index dc4ae5046..000000000 --- a/api-ref/source/v1/samples/volume-types-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "volume_types": [ - { - "id": "289da7f8-6440-407c-9fb4-7db01ec49164", - "name": "vol-type-001", - "extra_specs": { - "capabilities": "gpu" - } - }, - { - "id": "96c3bda7-c82a-4f50-be73-ca7621794835", - "name": "vol-type-002", - "extra_specs": {} - } - ] -} diff --git a/api-ref/source/v1/samples/volumes-list-response.json b/api-ref/source/v1/samples/volumes-list-response.json deleted file mode 100644 index 0523be7aa..000000000 --- a/api-ref/source/v1/samples/volumes-list-response.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "volumes": [ - { - "id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "display_name": "vol-001", - "display_description": "Another volume.", - "status": "active", - "size": 30, - "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", - "metadata": { - "contents": "junk" - }, - "availability_zone": "us-east1", - "snapshot_id": null, - "attachments": [ - { - "attachment_id": "03987cd1-0ad5-40d1-9b2a-7cc48295d4fa", - "id": "47e9ecc5-4045-4ee3-9a4b-d859d546a0cf", - "volume_id": "6c80f8ac-e3e2-480c-8e6e-f1db92fe4bfe", - "server_id": "d1c4788b-9435-42e2-9b81-29f3be1cd01f", - "host_name": "mitaka", - "device": "/" - } - ], - "created_at": "2012-02-14T20:53:07Z" - }, - { - "id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", - "display_name": "vol-002", - "display_description": "Yet another volume.", - "status": "active", - "size": 25, - "volume_type": "96c3bda7-c82a-4f50-be73-ca7621794835", - "metadata": {}, - "availability_zone": "us-east2", - "snapshot_id": null, - "attachments": [], - "created_at": "2012-03-15T19:10:03Z" - } - ] -} diff --git a/api-ref/source/v1/volumes-v1-snapshots.inc b/api-ref/source/v1/volumes-v1-snapshots.inc deleted file mode 100644 index e246aa98b..000000000 --- a/api-ref/source/v1/volumes-v1-snapshots.inc +++ /dev/null @@ -1,187 +0,0 @@ -.. -*- rst -*- - -Snapshots -========= - -Creates, lists, shows information for, and deletes snapshots. Shows -and updates snapshot metadata. - - -Show snapshot details (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/snapshots/{snapshot_id} - -Shows details for a snapshot. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-show-response.json - :language: javascript - - -Delete snapshot (v1) -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/{tenant_id}/snapshots/{snapshot_id} - -Deletes a snapshot. - -Normal response codes:202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id - - -List snapshots with details (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/snapshots/detail - -Lists all snapshots, with details. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-response.json - :language: javascript - - -Create snapshot (v1) -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v1/{tenant_id}/snapshots - -Creates a snapshot. - -Normal response codes:201 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - snapshot: snapshot - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-create-request.json - :language: javascript - -List snapshots (v1) -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/snapshots - -Lists all snapshots. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-response.json - :language: javascript - - -Show snapshot metadata (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/snapshots/{snapshot_id}/metadata - -Shows metadata for a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-show-response.json - :language: javascript - - -Update snapshot metadata (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/{tenant_id}/snapshots/{snapshot_id}/metadata - -Updates metadata for a snapshot. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-request.json - :language: javascript - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-response.json - :language: javascript diff --git a/api-ref/source/v1/volumes-v1-types.inc b/api-ref/source/v1/volumes-v1-types.inc deleted file mode 100644 index 2f0cb9ffc..000000000 --- a/api-ref/source/v1/volumes-v1-types.inc +++ /dev/null @@ -1,217 +0,0 @@ -.. -*- rst -*- - -Volume types -============ - -Lists, creates, updates, shows information for, and deletes volume -types. - - -List volume types (v1) -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/types - -Lists volume types. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-types-list-response.json - :language: javascript - - -Create volume type (v1) -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v1/{tenant_id}/types - -Creates a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Update volume type (v1) -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/{tenant_id}/types/{volume_type_id} - -Updates a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - - tenant_id: tenant_id - - volume_type_id: volume_type_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Update extra specs for a volume type (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v1/{tenant_id}/types/{volume_type_id} - -Updates the extra specifications for a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - - tenant_id: tenant_id - - volume_type_id: volume_type_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Show volume type details (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/types/{volume_type_id} - -Shows details for a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_type_id: volume_type_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Delete volume type (v1) -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/{tenant_id}/types/{volume_type_id} - -Deletes a volume type. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_type_id: volume_type_id diff --git a/api-ref/source/v1/volumes-v1-versions.inc b/api-ref/source/v1/volumes-v1-versions.inc deleted file mode 100644 index f260a3621..000000000 --- a/api-ref/source/v1/volumes-v1-versions.inc +++ /dev/null @@ -1,52 +0,0 @@ -.. -*- rst -*- - -API versions -============ - -Lists information about API versions. - - -Show API v1 details -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1 - -Shows Block Storage API v1 details. - - -Normal response codes: 200 -Error response codes:203, - - -Request -------- - - -Response Example ----------------- - -.. literalinclude:: ./samples/version-show-response.json - :language: javascript - - -List API versions (v1) -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET / - -Lists information about all Block Storage API versions. - - -Normal response codes: 300 - - -Request -------- - - - -Response Example ----------------- - -.. literalinclude:: ./samples/versions-list-response.json - :language: javascript diff --git a/api-ref/source/v1/volumes-v1-volumes.inc b/api-ref/source/v1/volumes-v1-volumes.inc deleted file mode 100644 index 520d54f28..000000000 --- a/api-ref/source/v1/volumes-v1-volumes.inc +++ /dev/null @@ -1,233 +0,0 @@ -.. -*- rst -*- - -Volumes -======= - -The ``snapshot_id`` and ``source_volid`` parameters specify the ID -of the snapshot or volume from which the volume originates. If the -volume was not created from a snapshot or source volume, these -values are null. - - -List volumes, with details (v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/volumes/detail - -Lists all volumes, with details. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata - - status: status - - description: description - - multiattach: multiattach - - source_volid: source_volid - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable - - created_at: created_at - - volume_type: volume_type - - volumes: volumes - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-response.json - :language: javascript - - -Create volume (v1) -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v1/{tenant_id}/volumes - -Creates a volume. - -Normal response codes: 201, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - size: size - - description: description - - imageRef: imageRef - - multiattach: multiattach - - availability_zone: availability_zone - - source_volid: source_volid - - name: name - - volume: volume - - consistencygroup_id: consistencygroup_id - - volume_type: volume_type - - snapshot_id: snapshot_id - - OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints - - source_replica: source_replica - - metadata: metadata - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - description: description - - imageRef: imageRef - - multiattach: multiattach - - created_at: created_at - - availability_zone: availability_zone - - source_volid: source_volid - - name: name - - volume: volume - - volume_type: volume_type - - snapshot_id: snapshot_id - - size: size - - metadata: metadata - - -List volumes (v1) -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/volumes - -Lists all volumes. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volumes: volumes - - id: id - - links: links - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-response.json - :language: javascript - - -Show volume details (v1) -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v1/{tenant_id}/volumes/{volume_id} - -Shows details for a volume. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata - - status: status - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable - - created_at: created_at - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-show-response.json - :language: javascript - -Delete volume (v1) -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v1/{tenant_id}/volumes/{volume_id} - -Deletes a volume. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id diff --git a/api-ref/source/v2/api-versions.inc b/api-ref/source/v2/api-versions.inc deleted file mode 100644 index 7dbc4761a..000000000 --- a/api-ref/source/v2/api-versions.inc +++ /dev/null @@ -1,26 +0,0 @@ -.. -*- rst -*- - -List Api Versions -================= - -.. rest_method:: GET / - -Lists information for all Block Storage API versions. - - -Normal response codes: 200,300 - -Error response codes: computeFault(400, 500), serviceUnavailable(503), badRequest(400), -unauthorized(401), forbidden(403), badMethod(405), itemNotFound(404) - -Request -~~~~~~~ - -Response -~~~~~~~~ - -**Example List Api Versions: JSON request** - - -.. literalinclude:: ./samples/versions-response.json - :language: javascript diff --git a/api-ref/source/v2/capabilities-v2.inc b/api-ref/source/v2/capabilities-v2.inc deleted file mode 100644 index c8f872068..000000000 --- a/api-ref/source/v2/capabilities-v2.inc +++ /dev/null @@ -1,50 +0,0 @@ -.. -*- rst -*- - -Capabilities for storage back ends (capabilities) -================================================= - -Shows capabilities for a storage back end. - - -Show back-end capabilities -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/capabilities/{hostname} - -Shows capabilities for a storage back end on the host. -The ``hostname`` takes the form of ``hostname@volume_backend_name``. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - hostname: hostname - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name - - description: description - - volume_backend_name: volume_backend_name - - namespace: namespace - - visibility: visibility - - driver_version: driver_version - - vendor_name: vendor_name - - properties: properties - - storage_protocol: storage_protocol - - replication_targets: replication_targets - - display_name: display_name - -Response Example ----------------- - -.. literalinclude:: ./samples/backend-capabilities-response.json - :language: javascript diff --git a/api-ref/source/v2/consistencygroups-v2.inc b/api-ref/source/v2/consistencygroups-v2.inc deleted file mode 100644 index da8458fb7..000000000 --- a/api-ref/source/v2/consistencygroups-v2.inc +++ /dev/null @@ -1,267 +0,0 @@ -.. -*- rst -*- - -Consistency groups -================== - -Consistency groups enable you to create snapshots at the exact same -point in time from multiple volumes. For example, a database might -place its tables, logs, and configuration on separate volumes. To -restore this database from a previous point in time, it makes sense -to restore the logs, tables, and configuration together from the -exact same point in time. - -Use the ``policy.json`` file to grant permissions for these actions -to limit roles. - - -List consistency groups -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/consistencygroups - -Lists consistency groups. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-groups-list-response.json - :language: javascript - - -Create consistency group -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/consistencygroups - -Creates a consistency group. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - description: description_6 - - availability_zone: availability_zone - - volume_types: volume_types_2 - - name: name_15 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description_11 - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - name: name_15 - - id: consistencygroup_id_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-create-request.json - :language: javascript - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-group-create-response.json - :language: javascript - -Show consistency group details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/consistencygroups/{consistencygroup_id} - -Shows details for a consistency group. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - consistencygroup_id: consistencygroup_id - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-group-show-response.json - :language: javascript - - -Create consistency group from source -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/consistencygroups/create_from_src - -Creates a consistency group from source. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - user_id: user_id - - description: description - - cgsnapshot_id: cgsnapshot_id - - source_cgid: source_cgid - - project_id: project_id - - name: name - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-create-from-src-request.json - :language: javascript - - -Delete consistency group -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/consistencygroups/{consistencygroup_id}/delete - -Deletes a consistency group. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - force: force - - tenant_id: tenant_id - - consistencygroup_id: consistencygroup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-delete-request.json - :language: javascript - - -List consistency groups with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/consistencygroups/detail - -Lists consistency groups with details. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-groups-list-detailed-response.json - :language: javascript - - -Update consistency group -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/consistencygroups/{consistencygroup_id}/update - -Updates a consistency group. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - remove_volumes: remove_volumes - - description: description - - add_volumes: add_volumes - - name: name - - tenant_id: tenant_id - - consistencygroup_id: consistencygroup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-update-request.json - :language: javascript diff --git a/api-ref/source/v2/ext-backups-actions-v2.inc b/api-ref/source/v2/ext-backups-actions-v2.inc deleted file mode 100644 index 85ebf15f3..000000000 --- a/api-ref/source/v2/ext-backups-actions-v2.inc +++ /dev/null @@ -1,68 +0,0 @@ -.. -*- rst -*- - -Backup actions (backups, action) -================================ - -Force-deletes a backup and reset status for a backup. - - -Force-delete backup -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/backups/{backup_id}/action - -Force-deletes a backup. Specify the ``os-force_delete`` action in the request body. - -This operations deletes the backup and any backup data. - -The backup driver returns the ``405`` status code if it does not -support this operation. - -Normal response codes: 202 -Error response codes: itemNotFound(404), badMethod(405) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-force_delete: os-force_delete - - tenant_id: tenant_id - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-force-delete-request.json - :language: javascript - - - -Reset backup's status -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/backups/{backup_id}/action - -Reset a backup's status. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_7 - - os-reset_status: os-reset_status - - tenant_id: tenant_id - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-reset-status-request.json - :language: javascript - diff --git a/api-ref/source/v2/ext-backups.inc b/api-ref/source/v2/ext-backups.inc deleted file mode 100644 index 5c14449fc..000000000 --- a/api-ref/source/v2/ext-backups.inc +++ /dev/null @@ -1,286 +0,0 @@ -.. -*- rst -*- - -Backups (backups) -================= - -A backup is a full copy of a volume stored in an external service. -The service can be configured. The only supported service is Object -Storage. A backup can subsequently be restored from the external -service to either the same volume that the backup was originally -taken from or to a new volume. Backup and restore operations can -only be carried out on volumes that are in an unattached and -available state. - -When you create, list, or delete backups, these status values are -possible: - -**Backup statuses** - -+-----------------+---------------------------------------------+ -| Status | Description | -+-----------------+---------------------------------------------+ -| creating | The backup is being created. | -+-----------------+---------------------------------------------+ -| available | The backup is ready to restore to a volume. | -+-----------------+---------------------------------------------+ -| deleting | The backup is being deleted. | -+-----------------+---------------------------------------------+ -| error | A backup error occurred. | -+-----------------+---------------------------------------------+ -| restoring | The backup is being restored to a volume. | -+-----------------+---------------------------------------------+ -| error_restoring | A backup restoration error occurred. | -+-----------------+---------------------------------------------+ - - -If an error occurs, you can find more information about the error -in the ``fail_reason`` field for the backup. - - -List backups with details -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/backups/detail - -Lists Block Storage backups, with details, to which the tenant has access. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_4 - - object_count: object_count - - fail_reason: fail_reason - - description: description - - links: links - - availability_zone: availability_zone - - created_at: created_at - - updated_at: updated_at - - name: name - - has_dependent_backups: has_dependent_backups - - volume_id: volume_id - - container: container - - backups: backups - - size: size - - id: id - - is_incremental: is_incremental - - data_timestamp: data_timestamp - - snapshot_id: snapshot_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/backups-list-detailed-response.json - :language: javascript - - -Show backup details -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/backups/{backup_id} - -Shows details for a backup. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - backup_id: backup_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_4 - - object_count: object_count - - container: container - - description: description - - links: links - - availability_zone: availability_zone - - created_at: created_at - - updated_at: updated_at - - name: name - - has_dependent_backups: has_dependent_backups - - volume_id: volume_id - - fail_reason: fail_reason - - size: size - - backup: backup - - id: id - - is_incremental: is_incremental - - data_timestamp: data_timestamp - - snapshot_id: snapshot_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/backup-show-response.json - :language: javascript - - -Delete backup -~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/backups/{backup_id} - -Deletes a backup. - -Normal response codes: 202, -Error response codes: Bad Request(400) - - - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - backup_id: backup_id - - -Restore backup -~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/backups/{backup_id}/restore - -Restores a Block Storage backup to an existing or new Block Storage volume. - -You must specify either the UUID or name of the volume. If you -specify both the UUID and name, the UUID takes priority. - -Normal response codes: 202, -Error response codes: Bad Request(400), Request Entity Too Large(413) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - restore: restore - - name: name - - volume_id: volume_id - - tenant_id: tenant_id - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-restore-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - restore: restore - - backup_id: backup_id - - volume_id: volume_id - - volume_name: volume_name - -Create backup -~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/backups - -Creates a Block Storage backup from a volume. - -Normal response codes: 202, -Error response codes: Bad Request(400), Internal Server Error(500) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - container: container - - description: description - - incremental: incremental - - volume_id: volume_id - - force: force - - backup: backup - - name: name - - tenant_id: tenant_id - - snapshot_id: snapshot_id_2 - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - backup: backup - - id: id - - links: links - - name: name - -List backups -~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/backups - -Lists Block Storage backups to which the tenant has access. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - backups: backups - - id: id - - links: links - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/backups-list-response.json - :language: javascript diff --git a/api-ref/source/v2/hosts.inc b/api-ref/source/v2/hosts.inc deleted file mode 100644 index 500a6911e..000000000 --- a/api-ref/source/v2/hosts.inc +++ /dev/null @@ -1,81 +0,0 @@ -.. -*- rst -*- - -Hosts extension (os-hosts) -==================================== - -Administrators only, depending on policy settings. - -Lists, shows hosts. - - -List all hosts -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{admin_tenant_id}/os-hosts - -Lists all hosts summary info that is not disabled. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - service-status: service_status - - service: host_service - - zone: availability_zone_3 - - service-state: service_state - - host_name: hostname - - last-update: updated_at - -Response Example ----------------- - -.. literalinclude:: ./samples/hosts-list-response.json - :language: javascript - -Show Host Details -~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{admin_tenant_id}/os-hosts/{host_name} - -Shows details for a host. - -Normal response codes: 200 - -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_tenant_id: admin_tenant_id - - host_name: hostname - -Response --------- - -.. rest_parameters:: parameters.yaml - - - volume_count: total_count - - total_volume_gb: totalGigabytesUsed - - total_snapshot_gb: totalSnapshotsUsed - - project: admin_tenant_id - - host: host - - snapshot_count: totalSnapshotsUsed - -**Example Show Host Details** - -.. literalinclude:: ./samples/hosts-get-response.json - :language: javascript diff --git a/api-ref/source/v2/index.rst b/api-ref/source/v2/index.rst deleted file mode 100644 index 435f53add..000000000 --- a/api-ref/source/v2/index.rst +++ /dev/null @@ -1,29 +0,0 @@ -:tocdepth: 2 - -================================= -Block Storage API V2 (DEPRECATED) -================================= - -.. rest_expand_all:: - -.. include:: api-versions.inc -.. include:: ext-backups.inc -.. include:: ext-backups-actions-v2.inc -.. include:: capabilities-v2.inc -.. include:: os-cgsnapshots-v2.inc -.. include:: consistencygroups-v2.inc -.. include:: hosts.inc -.. include:: limits.inc -.. include:: os-vol-image-meta-v2.inc -.. include:: os-vol-pool-v2.inc -.. include:: os-vol-transfer-v2.inc -.. include:: qos-specs-v2-qos-specs.inc -.. include:: quota-sets.inc -.. include:: volume-manage.inc -.. include:: volume-type-access.inc -.. include:: volumes-v2-extensions.inc -.. include:: volumes-v2-snapshots.inc -.. include:: volumes-v2-types.inc -.. include:: volumes-v2-versions.inc -.. include:: volumes-v2-volumes-actions.inc -.. include:: volumes-v2-volumes.inc diff --git a/api-ref/source/v2/limits.inc b/api-ref/source/v2/limits.inc deleted file mode 100644 index a2ac731e7..000000000 --- a/api-ref/source/v2/limits.inc +++ /dev/null @@ -1,56 +0,0 @@ -.. -*- rst -*- - -Limits (limits) -=============== - -Shows absolute limits for a tenant. - -An absolute limit value of ``-1`` indicates that the absolute limit -for the item is infinite. - - -Show absolute limits -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/limits - -Shows absolute limits for a tenant. - -An absolute limit value of ``-1`` indicates that the absolute limit -for the item is infinite. - - -Normal response codes: 200 -Error response codes:203, - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - totalSnapshotsUsed: totalSnapshotsUsed - - maxTotalBackups: maxTotalBackups - - maxTotalVolumeGigabytes: maxTotalVolumeGigabytes - - limits: limits - - maxTotalSnapshots: maxTotalSnapshots - - maxTotalBackupGigabytes: maxTotalBackupGigabytes - - totalBackupGigabytesUsed: totalBackupGigabytesUsed - - maxTotalVolumes: maxTotalVolumes - - totalVolumesUsed: totalVolumesUsed - - rate: rate - - totalBackupsUsed: totalBackupsUsed - - totalGigabytesUsed: totalGigabytesUsed - - absolute: absolute - -Response Example ----------------- - -.. literalinclude:: ./samples/limits-show-response.json - :language: javascript diff --git a/api-ref/source/v2/os-cgsnapshots-v2.inc b/api-ref/source/v2/os-cgsnapshots-v2.inc deleted file mode 100644 index 08339f678..000000000 --- a/api-ref/source/v2/os-cgsnapshots-v2.inc +++ /dev/null @@ -1,178 +0,0 @@ -.. -*- rst -*- - -Consistency group snapshots -=========================== - -Lists all, lists all with details, shows details for, creates, and -deletes consistency group snapshots. - - -Delete consistency group snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/cgsnapshots/{cgsnapshot_id} - -Deletes a consistency group snapshot. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - cgsnapshot_id: cgsnapshot_id - - -Show consistency group snapshot details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/cgsnapshots/{cgsnapshot_id} - -Shows details for a consistency group snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - cgsnapshot_id: cgsnapshot_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-show-response.json - :language: javascript - - -List consistency group snapshots with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/cgsnapshots/detail - -Lists all consistency group snapshots with details. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json - :language: javascript - -List consistency group snapshots -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/cgsnapshots - -Lists all consistency group snapshots. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - - - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-list-response.json - :language: javascript - - - - -Create consistency group snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/cgsnapshots - -Creates a consistency group snapshot. - -Normal response codes: 202, - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/cgsnapshots-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name diff --git a/api-ref/source/v2/os-vol-image-meta-v2.inc b/api-ref/source/v2/os-vol-image-meta-v2.inc deleted file mode 100644 index f6def86b1..000000000 --- a/api-ref/source/v2/os-vol-image-meta-v2.inc +++ /dev/null @@ -1,45 +0,0 @@ -.. -*- rst -*- - -Volume image metadata extension (os-vol-image-meta) -=================================================== - -Shows image metadata that is associated with a volume. - - -Show image metadata for volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/os-vol-image-meta - -Shows image metadata for a volume. - -When the request is made, the caller must specify a reference to an -existing storage volume in the ``ref`` element. Each storage driver -may interpret the existing storage volume reference differently but -should accept a reference structure containing either a ``source- -volume-id`` or ``source-volume-name`` element, if possible. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - availability_zone: availability_zone - - bootable: bootable - - volume_type: volume_type - - name: name - - volume: volume - - host: host - - ref: ref - - metadata: metadata - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/image-metadata-show-request.json - :language: javascript diff --git a/api-ref/source/v2/os-vol-pool-v2.inc b/api-ref/source/v2/os-vol-pool-v2.inc deleted file mode 100644 index 67361ecd1..000000000 --- a/api-ref/source/v2/os-vol-pool-v2.inc +++ /dev/null @@ -1,49 +0,0 @@ -.. -*- rst -*- - -Back-end storage pools -====================== - -Administrator only. Lists all back-end storage pools that are known -to the scheduler service. - - -List back-end storage pools -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/scheduler-stats/get_pools - -Lists all back-end storage pools. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - detail: detail - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - updated: updated - - QoS_support: QoS_support - - name: name - - total_capacity: total_capacity - - volume_backend_name: volume_backend_name - - capabilities: capabilities - - free_capacity: free_capacity - - driver_version: driver_version - - reserved_percentage: reserved_percentage - - storage_protocol: storage_protocol - -Response Example ----------------- - -.. literalinclude:: ./samples/pools-list-detailed-response.json - :language: javascript diff --git a/api-ref/source/v2/os-vol-transfer-v2.inc b/api-ref/source/v2/os-vol-transfer-v2.inc deleted file mode 100644 index b25bb5dab..000000000 --- a/api-ref/source/v2/os-vol-transfer-v2.inc +++ /dev/null @@ -1,216 +0,0 @@ -.. -*- rst -*- - -Volume transfer -=============== - -Transfers a volume from one user to another user. - - -Accept volume transfer -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/os-volume-transfer/{transfer_id}/accept - -Accepts a volume transfer. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - auth_key: auth_key - - transfer_id: transfer_id - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-transfer-accept-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Create volume transfer -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/os-volume-transfer - -Creates a volume transfer. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - volume_id: volume_id - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-transfer-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - auth_key: auth_key - - links: links - - created_at: created_at - - volume_id: volume_id - - id: id - - name: name - - -List volume transfers -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/os-volume-transfer - -Lists volume transfers. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfers-list-response.json - :language: javascript - - -Show volume transfer details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/os-volume-transfer/{transfer_id} - -Shows details for a volume transfer. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - transfer_id: transfer_id - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - created_at: created_at - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfer-show-response.json - :language: javascript - - -Delete volume transfer -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/os-volume-transfer/{transfer_id} - -Deletes a volume transfer. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - transfer_id: transfer_id - - tenant_id: tenant_id - - -List volume transfers, with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/os-volume-transfer/detail - -Lists volume transfers, with details. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - created_at: created_at - - volume_id: volume_id - - id: id - - links: links - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfers-list-detailed-response.json - :language: javascript diff --git a/api-ref/source/v2/parameters.yaml b/api-ref/source/v2/parameters.yaml deleted file mode 100644 index cbcfa0554..000000000 --- a/api-ref/source/v2/parameters.yaml +++ /dev/null @@ -1,1849 +0,0 @@ -# variables in header -x-openstack-request-id: - description: > - foo - in: header - required: false - type: string - -# variables in path -admin_tenant_id: - description: | - The UUID of the administrative tenant. - in: path - required: true - type: string -backup_id: - description: | - The UUID for a backup. - in: path - required: true - type: string -cascade: - description: | - Remove any snapshots along with the volume. Default=False. - in: path - required: false - type: boolean -cgsnapshot_id_1: - description: | - The ID of the consistency group snapshot. - in: path - required: false - type: string -consistencygroup_id_2: - description: | - The ID of the consistency group. - in: path - required: false - type: string -encryption_id: - description: | - The ID of the encryption type. - in: path - required: true - type: string -force_3: - description: | - To delete a QoS specification even if it is in- - use, set to ``true``. Default is ``false``. - in: path - required: false - type: boolean -hostname: - description: | - The name of the host that hosts the storage back - end. - in: path - required: false - type: string -key_1: - description: | - The metadata key name for the metadata that you - want to remove. - in: path - required: true - type: string -key_2: - description: | - The metadata key name for the metadata that you - want to see. - in: path - required: true - type: string -project_id_path: - description: | - The UUID of the project in a multi-tenancy cloud. - in: path - required: true - type: string -qos_id: - description: | - The ID of the QoS specification. - in: path - required: true - type: string -quotas_tenant_id: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: path - required: true - type: string -snapshot_id_1: - description: | - The UUID of the snapshot. - in: path - required: false - type: string -tenant_id: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: path - required: false - type: string -transfer_id: - description: | - The unique identifier for a volume transfer. - in: path - required: false - type: string -user_id_1: - description: | - The user ID. Specify in the URI as - ``user_id={user_id}``. - in: path - required: false - type: string -vol_type_id: - description: | - The UUID for an existing volume type. - in: path - required: true - type: string -volume_id_path: - description: | - The UUID of the volume. - in: path - required: true - type: string -volume_type: - description: | - The ID of Volume Type to be accessed by project. - in: path - required: false - type: string -volume_type_id: - description: | - The UUID for an existing volume type. - in: path - required: false - type: string - -# variables in query -action: - description: | - The action. Valid values are "set" or "unset." - in: query - required: true - type: string -all-tenants: - description: | - Shows details for all tenants. Admin only.. - in: query - required: false - type: string -bootable_query: - description: | - Filters results by bootable status. Default=None. - in: query - required: false - type: boolean -detail: - description: | - Indicates whether to show pool details or only - pool names in the response. Set to ``true`` to show pool details. - Set to ``false`` to show only pool names. Default is ``false``. - in: query - required: false - type: boolean -image-id: - description: | - Creates volume from image ID. Default=None. - in: query - required: false - type: string -limit: - description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. - in: query - required: false - type: integer -marker: - description: | - The ID of the last-seen item. Use the ``limit`` - parameter to make an initial limited request and use the ID of the - last-seen item from the response as the ``marker`` parameter value - in a subsequent limited request. - in: query - required: false - type: string -metadata_query: - description: | - Filters results by a metadata key and value pair. - Default=None. - in: query - required: true - type: object -migration_status_query: - description: | - Filters results by a migration status. Default=None. - Admin only. - in: query - required: false - type: string -name_volume: - description: | - Filters results by a name. Default=None. - in: query - required: false - type: string -offset: - description: | - Used in conjunction with ``limit`` to return a slice of items. ``offset`` - is where to start in the list. - in: query - required: false - type: integer -sort: - description: | - Comma-separated list of sort keys and optional - sort directions in the form of < key > [: < direction > ]. A valid - direction is ``asc`` (ascending) or ``desc`` (descending). - in: query - required: false - type: string -sort_dir: - description: | - Sorts by one or more sets of attribute and sort - direction combinations. If you omit the sort direction in a set, - default is ``desc``. - in: query - required: false - type: string -sort_key: - description: | - Sorts by an attribute. A valid value is ``name``, - ``status``, ``container_format``, ``disk_format``, ``size``, - ``id``, ``created_at``, or ``updated_at``. Default is - ``created_at``. The API uses the natural sorting direction of the - ``sort_key`` attribute value. - in: query - required: false - type: string -sort_key_1: - description: | - Sorts by an image attribute. A valid value is - ``name``, ``status``, ``container_format``, ``disk_format``, - ``size``, ``id``, ``created_at``, or ``updated_at``. Default is - ``created_at``. The API uses the natural sorting direction of the - ``sort_key`` attribute value. - in: query - required: false - type: string -status_query: - description: | - Filters results by a status. Default=None. - in: query - required: false - type: boolean -usage: - description: | - Set to ``usage=true`` to show quota usage. - Default is ``false``. - in: query - required: false - type: boolean - -# variables in body -absolute: - description: | - An ``absolute`` limits object. - in: body - required: true - type: object -add_volumes: - description: | - One or more volume UUIDs, separated by commas, to - add to the volume consistency group. - in: body - required: false - type: string -alias: - description: | - The alias for the extension. For example, - "FOXNSOX", "os- availability-zone", "os-extended-quotas", "os- - share-unmanage" or "os-used-limits." - in: body - required: true - type: string -attach_status: - description: | - The volume attach status. - in: body - required: false - type: string -attachment_id: - description: | - The interface ID. - in: body - required: false - type: string -attachments: - description: | - Instance attachment information. If this volume - is attached to a server instance, the attachments list includes - the UUID of the attached server, an attachment UUID, the name of - the attached host, if any, the volume UUID, the device, and the - device UUID. Otherwise, this list is empty. - in: body - required: true - type: array -auth_key: - description: | - The authentication key for the volume transfer. - in: body - required: true - type: string -availability_zone: - description: | - The name of the availability zone. - in: body - required: false - type: string -availability_zone_1: - description: | - The availability zone. - in: body - required: false - type: string -availability_zone_2: - description: | - The availability zone. - in: body - required: true - type: string -availability_zone_3: - description: | - The availability zone name. - in: body - required: true - type: string -backup: - description: | - A ``backup`` object. - in: body - required: true - type: object -backups: - description: | - A list of ``backup`` objects. - in: body - required: true - type: array -bootable: - description: | - Enables or disables the bootable attribute. You - can boot an instance from a bootable volume. - in: body - required: true - type: boolean -bootable_response: - description: | - Enables or disables the bootable attribute. You - can boot an instance from a bootable volume. - in: body - required: true - type: string -capabilities: - description: | - The capabilities for the back end. The value is - either ``null`` or a string value that indicates the capabilities - for each pool. For example, ``total_capacity`` or ``QoS_support``. - in: body - required: true - type: object -cgsnapshot_id: - description: | - The UUID of the consistency group snapshot. - in: body - required: false - type: string -cipher: - description: | - The encryption algorithm or mode. For example, aes-xts-plain64. The default - value is None. - in: body - required: false - type: string -connector: - description: | - The ``connector`` object. - in: body - required: false - type: object -consistencygroup_id: - description: | - The UUID of the consistency group. - in: body - required: true - type: string -consistencygroup_id_1: - description: | - The UUID of the consistency group. - in: body - required: false - type: string -consumer: - description: | - The consumer type. - in: body - required: false - type: string -consumer_1: - description: | - The consumer type. - in: body - required: true - type: string -container: - description: | - The container name or null. - in: body - required: false - type: string -control_location: - description: | - Notional service where encryption is performed. Valid values are - "front-end" or "back-end". The default value is "front-end". - in: body - required: false - type: string -cores: - description: | - The number of instance cores that are allowed for - each tenant. - in: body - required: true - type: integer -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -created_at_1: - description: | - Date and time when the volume was created. - in: body - required: true - type: string -data_timestamp: - description: | - The time when the data on the volume was first saved. If it is - a backup from volume, it will be the same as ``created_at`` - for a backup. If it is a backup from a snapshot, it will be the - same as ``created_at`` for the snapshot. - in: body - required: true - type: string -deleted: - description: | - The resource is deleted or not. - in: body - required: true - type: boolean -deleted_at: - description: | - The date and time when the resource was deleted. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``deleted_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -description: - description: | - The backup description or null. - in: body - required: false - type: string -description_1: - description: | - The consistency group snapshot description. - in: body - required: true - type: string -description_10: - description: | - The capabilities description. - in: body - required: true - type: string -description_11: - description: | - The consistency group description. - in: body - required: false - type: string -description_2: - description: | - The description of the consistency group. - in: body - required: false - type: string -description_3: - description: | - The description of the consistency group. - in: body - required: true - type: string -description_4: - description: | - A description for the snapshot. Default is - ``None``. - in: body - required: false - type: string -description_5: - description: | - The volume description. - in: body - required: false - type: string -description_6: - description: | - The consistency group description. - in: body - required: true - type: string -description_7: - description: | - The extension description. - in: body - required: true - type: string -description_8: - description: | - A description for the snapshot. - in: body - required: true - type: string -description_9: - description: | - The volume description. - in: body - required: true - type: string -display_name: - description: | - The name of volume backend capabilities. - in: body - required: true - type: string -driver_version: - description: | - The driver version. - in: body - required: true - type: string -encrypted: - description: | - If true, this volume is encrypted. - in: body - required: true - type: boolean -encryption: - description: | - The encryption information. - in: body - required: true - type: object -encryption_id_body: - description: | - The UUID of the encryption. - in: body - required: true - type: string -extra_specs: - description: | - A set of key and value pairs that contains the - specifications for a volume type. - in: body - required: true - type: object -extra_specs_1: - description: | - A key and value pair that contains additional - specifications that are associated with the volume type. Examples - include capabilities, capacity, compression, and so on, depending - on the storage driver in use. - in: body - required: true - type: object -fail_reason: - description: | - If the backup failed, the reason for the failure. - Otherwise, null. - in: body - required: true - type: string -fixed_ips: - description: | - The number of fixed IP addresses that are allowed - for each tenant. Must be equal to or greater than the number of - allowed instances. - in: body - required: true - type: integer -floating_ips: - description: | - The number of floating IP addresses that are - allowed for each tenant. - in: body - required: true - type: integer -force: - description: | - Indicates whether to backup, even if the volume - is attached. Default is ``false``. - in: body - required: false - type: boolean -force_1: - description: | - Indicates whether to snapshot, even if the volume - is attached. Default is ``false``. - in: body - required: false - type: boolean -force_2: - description: | - If set to ``true``, forces deletion of a - consistency group that has a registered volume. - in: body - required: false - type: boolean -free_capacity: - description: | - The amount of free capacity for the back-end - volume, in GBs. A valid value is a string, such as ``unknown``, or - an integer. - in: body - required: true - type: string -has_dependent_backups: - description: | - If this value is ``true``, the backup depends on - other backups. - in: body - required: false - type: boolean -host: - description: | - The OpenStack Block Storage host where the - existing volume resides. - in: body - required: true - type: string -host_name: - description: | - The name of the attaching host. - in: body - required: false - type: string -host_service: - description: | - The name of the service which is running on the host. - in: body - required: true - type: string -id: - description: | - The UUID of the volume transfer. - in: body - required: true - type: string -id_1: - description: | - The UUID of the backup. - in: body - required: true - type: string -id_2: - description: | - The UUID of the consistency group snapshot. - in: body - required: true - type: string -id_3: - description: | - The generated ID for the QoS specification. - in: body - required: true - type: string -id_4: - description: | - The snapshot UUID. - in: body - required: true - type: string -id_5: - description: | - The UUID of the volume. - in: body - required: true - type: string -id_6: - description: | - The UUID of the consistency group. - in: body - required: true - type: string -id_7: - description: | - The ID for the quota set. - in: body - required: true - type: integer -imageRef: - description: | - The UUID of the image from which you want to - create the volume. Required to create a bootable volume. - in: body - required: false - type: string -in_use: - description: | - The in use data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: string -incremental: - description: | - The backup mode. A valid value is ``true`` for - incremental backup mode or ``false`` for full backup mode. Default - is ``false``. - in: body - required: false - type: boolean -injected_file_content_bytes: - description: | - The number of bytes of content that are allowed - for each injected file. - in: body - required: true - type: integer -injected_file_path_bytes: - description: | - The number of bytes that are allowed for each - injected file path. - in: body - required: true - type: integer -injected_files: - description: | - The number of injected files that are allowed for - each tenant. - in: body - required: true - type: integer -instance_uuid: - description: | - The UUID of the attaching instance. - in: body - required: false - type: string -instances: - description: | - The number of instances that are allowed for each - tenant. - in: body - required: true - type: integer -is_incremental: - description: | - Indicates whether the backup mode is incremental. - If this value is ``true``, the backup mode is incremental. If this - value is ``false``, the backup mode is full. - in: body - required: false - type: boolean -is_public: - description: - Volume type which is accessible to the public. - in: body - required: false - type: boolean -key: - description: | - The metadata key name for the metadata that you - want to remove. - in: body - required: true - type: string -key_pairs: - description: | - The number of key pairs that are allowed for each - user. - in: body - required: true - type: integer -key_size: - description: | - Size of encryption key, in bits. For example, 128 or 256. The default value - is None. - in: body - required: false - type: integer -keys: - description: | - List of Keys. - in: body - required: true - type: array -limits: - description: | - A list of ``limit`` objects. - in: body - required: true - type: object -links: - description: | - Links for the volume transfer. - in: body - required: true - type: array -links_1: - description: | - Links for the backup. - in: body - required: true - type: array -links_2: - description: | - The QoS specification links. - in: body - required: true - type: array -links_3: - description: | - The volume links. - in: body - required: true - type: array -links_4: - description: | - List of links related to the extension. - in: body - required: true - type: array -location: - description: | - Full URL to a service or server. - format: uri - in: body - required: true - type: string -maxTotalBackupGigabytes: - description: | - The maximum total amount of backups, in gibibytes - (GiB). - in: body - required: true - type: integer -maxTotalBackups: - description: | - The maximum number of backups. - in: body - required: true - type: integer -maxTotalSnapshots: - description: | - The maximum number of snapshots. - in: body - required: true - type: integer -maxTotalVolumeGigabytes: - description: | - The maximum total amount of volumes, in gibibytes - (GiB). - in: body - required: true - type: integer -maxTotalVolumes: - description: | - The maximum number of volumes. - in: body - required: true - type: integer -metadata: - description: | - One or more metadata key and value pairs for the - snapshot, if any. - in: body - required: true - type: object -metadata_1: - description: | - A ``metadata`` object. Contains one or more - metadata key and value pairs that are associated with the volume. - in: body - required: true - type: object -metadata_2: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: false - type: object -metadata_3: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: true - type: object -metadata_4: - description: | - One or more metadata key and value pairs to - associate with the volume. - in: body - required: false - type: string -metadata_5: - description: | - The image metadata to add to the volume as a set - of metadata key and value pairs. - in: body - required: true - type: object -metadata_6: - description: | - One or more metadata key and value pairs to - associate with the volume. - in: body - required: false - type: object -metadata_7: - description: | - One or more metadata key and value pairs for the - snapshot. - in: body - required: false - type: object -metadata_items: - description: | - The number of metadata items that are allowed for - each instance. - in: body - required: true - type: integer -migration_policy: - description: | - Specify if the volume should be migrated when it is re-typed. - Possible values are ``on-demand`` or ``never``. If not specified, the - default is ``never``. - - .. note:: If the volume is attached to a server instance and will be - migrated, then by default policy only users with the administrative role - should attempt the retype operation. - in: body - required: false - type: string -migration_status: - description: | - The volume migration status. - in: body - required: true - type: string -migration_status_1: - description: | - The volume migration status. - in: body - required: false - type: string -mountpoint: - description: | - The attaching mount point. - in: body - required: true - type: string -multiattach: - description: | - To enable this volume to attach to more than one - server, set this value to ``true``. Default is ``false``. - in: body - required: false - type: boolean -multiattach_1: - description: | - If true, this volume can attach to more than one - instance. - in: body - required: true - type: boolean -name: - description: | - The name of the Volume Transfer. - in: body - required: true - type: string -name_1: - description: | - The backup name. - in: body - required: true - type: string -name_10: - description: | - The name of the extension. For example, "Fox In - Socks." - in: body - required: true - type: string -name_11: - description: | - The name of the back-end volume. - in: body - required: true - type: string -name_12: - description: | - The name of the snapshot. - in: body - required: true - type: string -name_13: - description: | - The volume name. - in: body - required: true - type: string -name_14: - description: | - The name of the volume to which you want to - restore a backup. - in: body - required: false - type: string -name_15: - description: | - The consistency group name. - in: body - required: false - type: string -name_2: - description: | - The consistency group snapshot name. - in: body - required: true - type: string -name_3: - description: | - The name of the consistency group. - in: body - required: true - type: string -name_4: - description: | - The name of the QoS specification. - in: body - required: true - type: string -name_5: - description: | - The name of the snapshot. Default is ``None``. - in: body - required: false - type: string -name_6: - description: | - The volume transfer name. - in: body - required: false - type: string -name_7: - description: | - The name of the volume type. - in: body - required: true - type: string -name_8: - description: | - The volume name. - in: body - required: false - type: string -name_9: - description: | - The consistency group name. - in: body - required: true - type: string -namespace: - description: | - Link associated to the extension. - in: body - required: true - type: string -namespace_1: - description: | - The storage namespace, such as - ``OS::Storage::Capabilities::foo``. - in: body - required: true - type: string -new_size: - description: | - The new size of the volume, in gibibytes (GiB). - in: body - required: true - type: integer -new_type: - description: | - The new volume type that volume is changed with. - in: body - required: true - type: string -object_count: - description: | - The number of objects in the backup. - in: body - required: true - type: integer -os-attach: - description: | - The ``os-attach`` action. - in: body - required: true - type: object -os-detach: - description: | - The ``os-detach`` action. - in: body - required: true - type: object -os-extend: - description: | - The ``os-extend`` action. - in: body - required: true - type: object -os-extended-snapshot-attributes:progress: - description: | - A percentage value for the build progress. - in: body - required: true - type: integer -os-extended-snapshot-attributes:project_id: - description: | - The UUID of the owning project. - in: body - required: true - type: string -os-force_delete: - description: | - The ``os-force_delete`` action. - in: body - required: true - type: string -os-force_detach: - description: | - The ``os-force_detach`` action. - in: body - required: true - type: object -os-reset_status: - description: | - The ``os-reset_status`` action. - in: body - required: true - type: object -os-retype: - description: | - The ``os-retype`` action. - in: body - required: true - type: object -OS-SCH-HNT:scheduler_hints: - description: | - The dictionary of data to send to the scheduler. - in: body - required: false - type: object -os-set_bootable: - description: | - The ``os-set_bootable`` action. - in: body - required: true - type: object -os-set_image_metadata: - description: | - The ``os-set_image_metadata`` action. - in: body - required: true - type: object -os-unmanage: - description: | - The ``os-unmanage`` action. This action removes - the specified volume from Cinder management. - in: body - required: true - type: object -os-unset_image_metadata: - description: | - The ``os-unset_image_metadata`` action. This - action removes the key-value pairs from the image metadata. - in: body - required: true - type: object -os-vol-host-attr:host: - description: | - Current back-end of the volume. - in: body - required: true - type: string -os-vol-mig-status-attr:migstat: - description: | - The status of this volume migration (None means - that a migration is not currently in progress). - in: body - required: true - type: string -os-vol-mig-status-attr:name_id: - description: | - The volume ID that this volume name on the back- - end is based on. - in: body - required: true - type: string -os-vol-tenant-attr:tenant_id: - description: | - The tenant ID which the volume belongs to. - in: body - required: true - type: string -os-volume-replication:driver_data: - description: | - The name of the volume replication driver. - in: body - required: false - type: string -os-volume-replication:extended_status: - description: | - The volume replication status managed by the - driver of backend storage. - in: body - required: false - type: string -os-volume-replication:extended_status_1: - description: | - The status of the volume replication. - in: body - required: false - type: string -pool_name: - description: | - The name of the storage pool. - in: body - required: true - type: string -project: - description: | - The ID of the project. Volume Type access to be - added to this project ID. - in: body - required: true - type: string -project_id: - description: | - The UUID of the project. - in: body - required: true - type: string -project_id_1: - description: | - The Project ID having access to this volume type. - in: body - required: true - type: string -properties: - description: | - The backend volume capabilities list, which is - consisted of cinder standard capabilities and vendor unique - properties. - in: body - required: true - type: object -provider: - description: | - The class that provides encryption support. - in: body - required: true - type: string -provider_optional: - description: | - The class that provides encryption support. - in: body - required: false - type: string -qos_specs: - description: | - A ``qos_specs`` object. - in: body - required: true - type: object -QoS_support: - description: | - The quality of service (QoS) support. - in: body - required: true - type: boolean -quota_set: - description: | - A ``quota_set`` object. - in: body - required: true - type: object -ram: - description: | - The amount of instance RAM in megabytes that are - allowed for each tenant. - in: body - required: true - type: integer -rate: - description: | - Rate-limit volume copy bandwidth, used to - mitigate slow down of data access from the instances. - in: body - required: true - type: array -ref: - description: | - A reference to the existing volume. The internal - structure of this reference depends on the volume driver - implementation. For details about the required elements in the - structure, see the documentation for the volume driver. - in: body - required: true - type: string -ref_1: - description: | - A reference to the existing volume. The internal - structure of this reference is dependent on the implementation of - the volume driver, see the specific driver's documentation for - details of the required elements in the structure. - in: body - required: true - type: object -remove_volumes: - description: | - One or more volume UUIDs, separated by commas, to - remove from the volume consistency group. - in: body - required: false - type: string -replication_status: - description: | - The volume replication status. - in: body - required: true - type: string -replication_targets: - description: | - A list of volume backends used to replicate volumes - on this backend. - in: body - required: true - type: list -reserved: - description: | - Reserved volume size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -reserved_percentage: - description: | - The percentage of the total capacity that is - reserved for the internal use by the back end. - in: body - required: true - type: integer -restore: - description: | - A ``restore`` object. - in: body - required: true - type: object -security_group_rules: - description: | - The number of rules that are allowed for each - security group. - in: body - required: false - type: integer -security_groups: - description: | - The number of security groups that are allowed - for each tenant. - in: body - required: true - type: integer -service_state: - description: | - The state of the service. One of ``available`` or ``unavailable``. - in: body - required: true - type: string -service_status: - description: | - The status of the service. One of ``enabled`` or ``disabled``. - in: body - required: true - type: string -size: - description: | - The size of the volume, in gibibytes (GiB). - in: body - required: true - type: integer -size_1: - description: | - The size of the backup, in GB. - in: body - required: true - type: integer -snapshot: - description: | - A partial representation of a snapshot used in - the creation process. - in: body - required: true - type: string -snapshot_1: - description: | - A ``snapshot`` object. - in: body - required: true - type: object -snapshot_id: - description: | - To create a volume from an existing snapshot, - specify the UUID of the volume snapshot. The volume is created in - same availability zone and with same size as the snapshot. - in: body - required: false - type: string -snapshot_id_2: - description: | - The UUID of the source volume snapshot. - in: body - required: true - type: string -snapshot_id_3: - description: | - The UUID of the source volume snapshot. The API - creates a new volume snapshot with the same size as the source - volume snapshot. - in: body - required: true - type: string -source_cgid: - description: | - The UUID of the source consistency group. - in: body - required: false - type: string -source_replica: - description: | - The UUID of the primary volume to clone. - in: body - required: false - type: string -source_volid: - description: | - The UUID of the source volume. The API creates a - new volume with the same size as the source volume. - in: body - required: false - type: string -source_volid_1: - description: | - The UUID of the source volume. - in: body - required: true - type: string -specs: - description: | - A ``specs`` object. - in: body - required: true - type: object -specs_1: - description: | - Specification key and value pairs. - in: body - required: true - type: object -specs_2: - description: | - Specification key and value pairs. - in: body - required: true - type: string -status: - description: | - The ``status`` of the consistency group snapshot. - in: body - required: false - type: string -status_1: - description: | - The status of the consistency group. - in: body - required: true - type: string -status_2: - description: | - The status for the snapshot. - in: body - required: true - type: string -status_3: - description: | - The volume status. - in: body - required: true - type: string -status_4: - description: | - The backup status. Refer to Backup statuses table - for the possible status value. - in: body - required: true - type: string -status_5: - description: | - The consistency group status. A valid value is - ``creating``, ``available``, ``error``, ``deleting``, - ``updating``, or ``invalid``. - in: body - required: true - type: string -status_6: - description: | - The volume status. - in: body - required: false - type: string -status_7: - description: | - The status for the backup. - in: body - required: true - type: string -storage_protocol: - description: | - The storage back end for the back-end volume. For - example, ``iSCSI`` or ``FC``. - in: body - required: true - type: string -storage_protocol_1: - description: | - The storage protocol, such as Fibre Channel, - iSCSI, NFS, and so on. - in: body - required: true - type: string -total_capacity: - description: | - The total capacity for the back-end volume, in - GBs. A valid value is a string, such as ``unknown``, or an - integer. - in: body - required: true - type: string -total_count: - description: | - Total number of volumes. - in: body - required: true - type: integer -totalBackupGigabytesUsed: - description: | - The total number of backups gibibytes (GiB) used. - in: body - required: true - type: integer -totalBackupsUsed: - description: | - The total number of backups used. - in: body - required: true - type: integer -totalGigabytesUsed: - description: | - The total number of gibibytes (GiB) used. - in: body - required: true - type: integer -totalSnapshotsUsed: - description: | - The total number of snapshots used. - in: body - required: true - type: integer -totalVolumesUsed: - description: | - The total number of volumes used. - in: body - required: true - type: integer -updated: - description: | - The date and time stamp when the extension was - last updated. - in: body - required: true - type: string -updated_1: - description: | - The date and time stamp when the API request was - issued. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the resource was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -user_id: - description: | - The UUID of the user. - in: body - required: true - type: string -vendor_name: - description: | - The name of the vendor. - in: body - required: true - type: string -visibility: - description: | - The volume type access. - in: body - required: true - type: string -volume: - description: | - A ``volume`` object. - in: body - required: true - type: object -volume_1: - description: | - A ``volume`` object. - in: body - required: true - type: string -volume_backend_name: - description: | - The name of the back-end volume. - in: body - required: true - type: string -volume_id: - description: | - The UUID of the volume. - in: body - required: true - type: string -volume_id_2: - description: | - The UUID of the volume that you want to back up. - in: body - required: true - type: string -volume_id_3: - description: | - To create a snapshot from an existing volume, - specify the UUID of the existing volume. - in: body - required: true - type: string -volume_id_4: - description: | - The UUID of the volume from which the backup was - created. - in: body - required: true - type: string -volume_id_5: - description: | - If the snapshot was created from a volume, the - volume ID. - in: body - required: true - type: string -volume_id_6: - description: | - The UUID of the volume to which you want to - restore a backup. - in: body - required: false - type: string -volume_name: - description: | - The volume name. - in: body - required: true - type: string -volume_type_1: - description: | - A ``volume_type`` object. - in: body - required: true - type: object -volume_type_2: - description: | - The volume type. To create an environment with - multiple-storage back ends, you must specify a volume type. Block - Storage volume back ends are spawned as children to ``cinder- - volume``, and they are keyed from a unique queue. They are named - ``cinder- volume.HOST.BACKEND``. For example, ``cinder- - volume.ubuntu.lvmdriver``. When a volume is created, the scheduler - chooses an appropriate back end to handle the request based on the - volume type. Default is ``None``. For information about how to - use volume types to create multiple- storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: false - type: string -volume_type_3: - description: | - The volume type. In an environment with multiple- - storage back ends, the scheduler determines where to send the - volume based on the volume type. For information about how to use - volume types to create multiple- storage back ends, see `Configure - multiple-storage back ends `_. - in: body - required: true - type: string -volume_type_4: - description: | - The associated volume type. - in: body - required: false - type: string -volume_type_5: - description: | - A list of ``volume_type`` objects. - in: body - required: true - type: array -volume_type_id_body: - description: | - The UUID of the volume type. - in: body - required: true - type: string -volume_types: - description: | - The list of volume types. In an environment with - multiple-storage back ends, the scheduler determines where to send - the volume based on the volume type. For information about how to - use volume types to create multiple- storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: true - type: array -volume_types_2: - description: | - The list of volume types separated by commas. In an environment with - multiple-storage back ends, the scheduler determines where to send - the volume based on the volume type. For information about how to - use volume types to create multiple-storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: true - type: string -volumes: - description: | - A list of ``volume`` objects. - in: body - required: true - type: array -volumes: - description: | - A list of ``volume`` objects. - in: body - required: true - type: array diff --git a/api-ref/source/v2/qos-specs-v2-qos-specs.inc b/api-ref/source/v2/qos-specs-v2-qos-specs.inc deleted file mode 100644 index 1e9e01ace..000000000 --- a/api-ref/source/v2/qos-specs-v2-qos-specs.inc +++ /dev/null @@ -1,315 +0,0 @@ -.. -*- rst -*- - -Quality of service (QoS) specifications (qos-specs) -=================================================== - -Administrators only. - -Creates, lists, shows details for, associates, disassociates, sets -keys, unsets keys, and deletes quality of service (QoS) -specifications. - - -Disassociate QoS specification from all associations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs/{qos_id}/disassociate_all - -Disassociates a QoS specification from all associations. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - -Unset keys in QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/qos-specs/{qos_id}/delete_keys - -Unsets keys in a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - keys: keys - - tenant_id: tenant_id - - qos_id: qos_id - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-unset-request.json - :language: javascript - - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-unset-response.json - :language: javascript - - -Get all associations for QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs/{qos_id}/associations - -Lists all associations for a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-show-response.json - :language: javascript - - -Associate QoS specification with volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs/{qos_id}/associate - -Associates a QoS specification with a volume type. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - vol_type_id: vol_type_id - - -Disassociate QoS specification from volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs/{qos_id}/disassociate - -Disassociates a QoS specification from a volume type. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - vol_type_id: vol_type_id - - -Show QoS specification details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs/{qos_id} - -Shows details for a QoS specification. - - -Normal response codes: 200 -Error response codes: 413, 405, 404, 403, 401, 400, 503, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: name - - links: links - - id: id - - qos_specs: qos_specs - - consumer: consumer - - specs: specs - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-show-response.json - :language: javascript - - -Set keys in QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/qos-specs/{qos_id} - -Sets keys in a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - qos_specs: qos_specs - - specs: specs - - tenant_id: tenant_id - - qos_id: qos_id - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-update-response.json - :language: javascript - - -Delete QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/qos-specs/{qos_id} - -Deletes a QoS specification. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - qos_id: qos_id - - force: force - - -Create QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/qos-specs - -Creates a QoS specification. - -Specify one or more key and value pairs in the request body. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - qos_specs: qos_specs - - consumer: consumer - - name: name - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: name - - links: links - - id: id - - qos_specs: qos_specs - - consumer: consumer - - specs: specs - - -List QoS specs -~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/qos-specs - -Lists quality of service (QoS) specifications. - - -Normal response codes: 200, 300, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - specs: specs - - qos_specs: qos_specs - - consumer: consumer - - id: id - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-list-response.json - :language: javascript diff --git a/api-ref/source/v2/quota-sets.inc b/api-ref/source/v2/quota-sets.inc deleted file mode 100644 index 174cd2cd9..000000000 --- a/api-ref/source/v2/quota-sets.inc +++ /dev/null @@ -1,206 +0,0 @@ -.. -*- rst -*- - -Quota sets extension (os-quota-sets) -==================================== - -Administrators only, depending on policy settings. - -Shows, updates, and deletes quotas for a tenant. - -Show quotas -~~~~~~~~~~~ - -.. rest_method:: GET /v2/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Shows quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: quotas_tenant_id - - admin_tenant_id: admin_tenant_id - - usage: usage - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-show-response.json - :language: javascript - -Update quotas -~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Updates quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - - tenant_id: quotas_tenant_id - - admin_tenant_id: admin_tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/quotas-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-update-response.json - :language: javascript - -Delete quotas -~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{admin_tenant_id}/os-quota-sets/{tenant_id} - -Deletes quotas for a tenant so the quotas revert to default values. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: quotas_tenant_id - - admin_tenant_id: admin_tenant_id - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-delete-response.json - :language: javascript - -Get default quotas -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{admin_tenant_id}/os-quota-sets/{tenant_id}/defaults - -Gets default quotas for a tenant. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - admin_tenant_id: admin_tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - injected_file_content_bytes: injected_file_content_bytes - - metadata_items: metadata_items - - reserved: reserved - - in_use: in_use - - ram: ram - - floating_ips: floating_ips - - key_pairs: key_pairs - - injected_file_path_bytes: injected_file_path_bytes - - instances: instances - - security_group_rules: security_group_rules - - injected_files: injected_files - - quota_set: quota_set - - cores: cores - - fixed_ips: fixed_ips - - id: id - - security_groups: security_groups - - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-show-defaults-response.json - :language: javascript diff --git a/api-ref/source/v2/samples/backend-capabilities-response.json b/api-ref/source/v2/samples/backend-capabilities-response.json deleted file mode 100644 index dcf213d24..000000000 --- a/api-ref/source/v2/samples/backend-capabilities-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "namespace": "OS::Storage::Capabilities::fake", - "vendor_name": "OpenStack", - "volume_backend_name": "lvmdriver-1", - "pool_name": "pool", - "driver_version": "2.0.0", - "storage_protocol": "iSCSI", - "display_name": "Capabilities of Cinder LVM driver", - "description": "These are volume type options provided by Cinder LVM driver, blah, blah.", - "visibility": "public", - "replication_targets": [], - "properties": { - "compression": { - "title": "Compression", - "description": "Enables compression.", - "type": "boolean" - }, - "qos": { - "title": "QoS", - "description": "Enables QoS.", - "type": "boolean" - }, - "replication": { - "title": "Replication", - "description": "Enables replication.", - "type": "boolean" - }, - "thin_provisioning": { - "title": "Thin Provisioning", - "description": "Sets thin provisioning.", - "type": "boolean" - } - } -} diff --git a/api-ref/source/v2/samples/backup-create-request.json b/api-ref/source/v2/samples/backup-create-request.json deleted file mode 100644 index c7f8a74d5..000000000 --- a/api-ref/source/v2/samples/backup-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "backup": { - "container": null, - "description": null, - "name": "backup001", - "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607", - "incremental": true - } -} diff --git a/api-ref/source/v2/samples/backup-create-response.json b/api-ref/source/v2/samples/backup-create-response.json deleted file mode 100644 index e2aec1c8f..000000000 --- a/api-ref/source/v2/samples/backup-create-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "backup": { - "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "links": [ - { - "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "bookmark" - } - ], - "name": "backup001" - } -} diff --git a/api-ref/source/v2/samples/backup-force-delete-request.json b/api-ref/source/v2/samples/backup-force-delete-request.json deleted file mode 100644 index 5c56464d9..000000000 --- a/api-ref/source/v2/samples/backup-force-delete-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-force_delete": {} -} diff --git a/api-ref/source/v2/samples/backup-record-export-response.json b/api-ref/source/v2/samples/backup-record-export-response.json deleted file mode 100644 index 8783eeda0..000000000 --- a/api-ref/source/v2/samples/backup-record-export-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "backup-record": { - "backup_service": "cinder.backup.drivers.swift", - "backup_url": "eyJzdGF0" - } -} diff --git a/api-ref/source/v2/samples/backup-record-import-request.json b/api-ref/source/v2/samples/backup-record-import-request.json deleted file mode 100644 index 8783eeda0..000000000 --- a/api-ref/source/v2/samples/backup-record-import-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "backup-record": { - "backup_service": "cinder.backup.drivers.swift", - "backup_url": "eyJzdGF0" - } -} diff --git a/api-ref/source/v2/samples/backup-record-import-response.json b/api-ref/source/v2/samples/backup-record-import-response.json deleted file mode 100644 index 60eeabbc9..000000000 --- a/api-ref/source/v2/samples/backup-record-import-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "backup": { - "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "links": [ - { - "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "bookmark" - } - ], - "name": null - } -} diff --git a/api-ref/source/v2/samples/backup-reset-status-request.json b/api-ref/source/v2/samples/backup-reset-status-request.json deleted file mode 100644 index b18b65a68..000000000 --- a/api-ref/source/v2/samples/backup-reset-status-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-reset_status": { - "status": "available" - } -} diff --git a/api-ref/source/v2/samples/backup-restore-request.json b/api-ref/source/v2/samples/backup-restore-request.json deleted file mode 100644 index 2ccb7e516..000000000 --- a/api-ref/source/v2/samples/backup-restore-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "restore": { - "name": "vol-01", - "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607" - } -} diff --git a/api-ref/source/v2/samples/backup-restore-response.json b/api-ref/source/v2/samples/backup-restore-response.json deleted file mode 100644 index a344ea56c..000000000 --- a/api-ref/source/v2/samples/backup-restore-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "restore": { - "backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "volume_id": "795114e8-7489-40be-a978-83797f2c1dd3" - } -} diff --git a/api-ref/source/v2/samples/backup-show-response.json b/api-ref/source/v2/samples/backup-show-response.json deleted file mode 100644 index c4fe0ffc4..000000000 --- a/api-ref/source/v2/samples/backup-show-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "backup": { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:35:27.000000", - "description": null, - "fail_reason": null, - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001", - "object_count": 22, - "size": 1, - "status": "available", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - } -} diff --git a/api-ref/source/v2/samples/backups-list-detailed-response.json b/api-ref/source/v2/samples/backups-list-detailed-response.json deleted file mode 100644 index d729ada68..000000000 --- a/api-ref/source/v2/samples/backups-list-detailed-response.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "backups": [ - { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:35:27.000000", - "description": null, - "fail_reason": null, - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001", - "object_count": 22, - "size": 1, - "status": "available", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - }, - { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:21:48.000000", - "description": null, - "fail_reason": null, - "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "bookmark" - } - ], - "name": "backup002", - "object_count": 22, - "size": 1, - "status": "available", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - } - ] -} diff --git a/api-ref/source/v2/samples/backups-list-response.json b/api-ref/source/v2/samples/backups-list-response.json deleted file mode 100644 index 8dd7d785a..000000000 --- a/api-ref/source/v2/samples/backups-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "backups": [ - { - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001" - }, - { - "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "bookmark" - } - ], - "name": "backup002" - } - ] -} diff --git a/api-ref/source/v2/samples/cgsnapshots-create-request.json b/api-ref/source/v2/samples/cgsnapshots-create-request.json deleted file mode 100644 index 36d6f4537..000000000 --- a/api-ref/source/v2/samples/cgsnapshots-create-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "cgsnapshot": { - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546", - "name": "firstcg", - "description": "first consistency group", - "user_id": "6f519a48-3183-46cf-a32f-41815f814444", - "project_id": "6f519a48-3183-46cf-a32f-41815f815555", - "status": "creating" - } -} diff --git a/api-ref/source/v2/samples/cgsnapshots-create-response.json b/api-ref/source/v2/samples/cgsnapshots-create-response.json deleted file mode 100644 index 6d24a97f1..000000000 --- a/api-ref/source/v2/samples/cgsnapshots-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "cgsnapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f816666", - "name": "firstcg" - } -} diff --git a/api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json b/api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json deleted file mode 100644 index 93ad12870..000000000 --- a/api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "cgsnapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", - "status": "error", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my-cg2", - "description": "Edited description" - } - ] -} diff --git a/api-ref/source/v2/samples/cgsnapshots-list-response.json b/api-ref/source/v2/samples/cgsnapshots-list-response.json deleted file mode 100644 index 726aa803a..000000000 --- a/api-ref/source/v2/samples/cgsnapshots-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cgsnapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my-cg1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my-cg2" - } - ] -} diff --git a/api-ref/source/v2/samples/cgsnapshots-show-response.json b/api-ref/source/v2/samples/cgsnapshots-show-response.json deleted file mode 100644 index 632a5afba..000000000 --- a/api-ref/source/v2/samples/cgsnapshots-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "cgsnapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group" - } -} diff --git a/api-ref/source/v2/samples/consistency-group-create-from-src-request.json b/api-ref/source/v2/samples/consistency-group-create-from-src-request.json deleted file mode 100644 index ad25c5d02..000000000 --- a/api-ref/source/v2/samples/consistency-group-create-from-src-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "consistencygroup-from-src": { - "name": "firstcg", - "description": "first consistency group", - "cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", - "source_cgid": "6f519a48-3183-46cf-a32f-41815f814546", - "user_id": "6f519a48-3183-46cf-a32f-41815f815555", - "project_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "creating" - } -} diff --git a/api-ref/source/v2/samples/consistency-group-create-request.json b/api-ref/source/v2/samples/consistency-group-create-request.json deleted file mode 100644 index 8c9fbc2b0..000000000 --- a/api-ref/source/v2/samples/consistency-group-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "consistencygroup": { - "name": "firstcg", - "description": "first consistency group", - "volume_types": "type1,type2", - "availability_zone": "az0" - } -} diff --git a/api-ref/source/v2/samples/consistency-group-create-response.json b/api-ref/source/v2/samples/consistency-group-create-response.json deleted file mode 100644 index 15a5ec02a..000000000 --- a/api-ref/source/v2/samples/consistency-group-create-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "consistencygroup": { - "status": "error", - "description": "first consistency group", - "availability_zone": "az0", - "created_at": "2016-08-19T19:32:19.000000", - "volume_types": ["type1", "type2"], - "id": "63d1a274-de38-4384-a97e-475306777027", - "name": "firstcg" - } -} diff --git a/api-ref/source/v2/samples/consistency-group-delete-request.json b/api-ref/source/v2/samples/consistency-group-delete-request.json deleted file mode 100644 index 8ad8745e7..000000000 --- a/api-ref/source/v2/samples/consistency-group-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "consistencygroup": { - "force": false - } -} diff --git a/api-ref/source/v2/samples/consistency-group-show-response.json b/api-ref/source/v2/samples/consistency-group-show-response.json deleted file mode 100644 index 3cbb87d74..000000000 --- a/api-ref/source/v2/samples/consistency-group-show-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "consistencygroup": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group", - "volume_types": [ - "123456" - ] - } -} diff --git a/api-ref/source/v2/samples/consistency-group-update-request.json b/api-ref/source/v2/samples/consistency-group-update-request.json deleted file mode 100644 index 945465516..000000000 --- a/api-ref/source/v2/samples/consistency-group-update-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "consistencygroup": { - "name": "my_cg", - "description": "My consistency group", - "add_volumes": "volume-uuid-1,volume-uuid-2", - "remove_volumes": "volume-uuid-8,volume-uuid-9" - } -} diff --git a/api-ref/source/v2/samples/consistency-groups-list-detailed-response.json b/api-ref/source/v2/samples/consistency-groups-list-detailed-response.json deleted file mode 100644 index 618c65882..000000000 --- a/api-ref/source/v2/samples/consistency-groups-list-detailed-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "consistencygroups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group", - "volume_types": [ - "123456" - ] - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "status": "error", - "availability_zone": "az2", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my-cg2", - "description": "Edited description", - "volume_types": [ - "234567" - ] - } - ] -} diff --git a/api-ref/source/v2/samples/consistency-groups-list-response.json b/api-ref/source/v2/samples/consistency-groups-list-response.json deleted file mode 100644 index a53863f43..000000000 --- a/api-ref/source/v2/samples/consistency-groups-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "consistencygroups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my-cg1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my-cg2" - } - ] -} diff --git a/api-ref/source/v2/samples/encryption-type-create-request.json b/api-ref/source/v2/samples/encryption-type-create-request.json deleted file mode 100644 index f93c14261..000000000 --- a/api-ref/source/v2/samples/encryption-type-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "encryption":{ - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "control_location":"front-end", - "cipher": "aes-xts-plain64" - } -} diff --git a/api-ref/source/v2/samples/encryption-type-create-response.json b/api-ref/source/v2/samples/encryption-type-create-response.json deleted file mode 100644 index 3e2a2aeec..000000000 --- a/api-ref/source/v2/samples/encryption-type-create-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "encryption": { - "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", - "control_location": "front-end", - "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "cipher": "aes-xts-plain64" - } -} diff --git a/api-ref/source/v2/samples/encryption-type-show-response.json b/api-ref/source/v2/samples/encryption-type-show-response.json deleted file mode 100644 index 83ecab88b..000000000 --- a/api-ref/source/v2/samples/encryption-type-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", - "control_location": "front-end", - "deleted": false, - "created_at": "2016-12-28T02:32:25.000000", - "updated_at": null, - "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "deleted_at": null, - "cipher": "aes-xts-plain64" -} \ No newline at end of file diff --git a/api-ref/source/v2/samples/encryption-type-update-request.json b/api-ref/source/v2/samples/encryption-type-update-request.json deleted file mode 100644 index 7a587b771..000000000 --- a/api-ref/source/v2/samples/encryption-type-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "encryption":{ - "key_size": 64, - "provider": "cinder.keymgr.conf_key_mgr.ConfKeyManager", - "control_location":"back-end" - } -} \ No newline at end of file diff --git a/api-ref/source/v2/samples/encryption-type-update-response.json b/api-ref/source/v2/samples/encryption-type-update-response.json deleted file mode 100644 index 7a587b771..000000000 --- a/api-ref/source/v2/samples/encryption-type-update-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "encryption":{ - "key_size": 64, - "provider": "cinder.keymgr.conf_key_mgr.ConfKeyManager", - "control_location":"back-end" - } -} \ No newline at end of file diff --git a/api-ref/source/v2/samples/extensions-list-response.json b/api-ref/source/v2/samples/extensions-list-response.json deleted file mode 100644 index 55003a906..000000000 --- a/api-ref/source/v2/samples/extensions-list-response.json +++ /dev/null @@ -1,212 +0,0 @@ -{ - "extensions": [ - { - "updated": "2013-04-18T00:00:00+00:00", - "name": "SchedulerHints", - "links": [], - "namespace": "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v2", - "alias": "OS-SCH-HNT", - "description": "Pass arbitrary key/value pairs to the scheduler." - }, - { - "updated": "2011-06-29T00:00:00+00:00", - "name": "Hosts", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/hosts/api/v1.1", - "alias": "os-hosts", - "description": "Admin-only host administration." - }, - { - "updated": "2011-11-03T00:00:00+00:00", - "name": "VolumeTenantAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_tenant_attribute/api/v1", - "alias": "os-vol-tenant-attr", - "description": "Expose the internal project_id as an attribute of a volume." - }, - { - "updated": "2011-08-08T00:00:00+00:00", - "name": "Quotas", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/quotas-sets/api/v1.1", - "alias": "os-quota-sets", - "description": "Quota management support." - }, - { - "updated": "2011-08-24T00:00:00+00:00", - "name": "TypesManage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/types-manage/api/v1", - "alias": "os-types-manage", - "description": "Types manage support." - }, - { - "updated": "2013-07-10T00:00:00+00:00", - "name": "VolumeEncryptionMetadata", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-volume-encryption-metadata/api/v1", - "alias": "os-volume-encryption-metadata", - "description": "Volume encryption metadata retrieval support." - }, - { - "updated": "2012-12-12T00:00:00+00:00", - "name": "Backups", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/backups/api/v1", - "alias": "backups", - "description": "Backups support." - }, - { - "updated": "2013-07-16T00:00:00+00:00", - "name": "SnapshotActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1", - "alias": "os-snapshot-actions", - "description": "Enable snapshot manager actions." - }, - { - "updated": "2012-05-31T00:00:00+00:00", - "name": "VolumeActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1", - "alias": "os-volume-actions", - "description": "Enable volume actions\n " - }, - { - "updated": "2013-10-03T00:00:00+00:00", - "name": "UsedLimits", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/used-limits/api/v1.1", - "alias": "os-used-limits", - "description": "Provide data on limited resources that are being used." - }, - { - "updated": "2012-05-31T00:00:00+00:00", - "name": "VolumeUnmanage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-unmanage/api/v1.1", - "alias": "os-volume-unmanage", - "description": "Enable volume unmanage operation." - }, - { - "updated": "2011-11-03T00:00:00+00:00", - "name": "VolumeHostAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_host_attribute/api/v1", - "alias": "os-vol-host-attr", - "description": "Expose host as an attribute of a volume." - }, - { - "updated": "2013-07-01T00:00:00+00:00", - "name": "VolumeTypeEncryption", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-type-encryption/api/v1", - "alias": "encryption", - "description": "Encryption support for volume types." - }, - { - "updated": "2013-06-27T00:00:00+00:00", - "name": "AvailabilityZones", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-availability-zone/api/v1", - "alias": "os-availability-zone", - "description": "Describe Availability Zones." - }, - { - "updated": "2013-08-02T00:00:00+00:00", - "name": "Qos_specs_manage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/qos-specs/api/v1", - "alias": "qos-specs", - "description": "QoS specs support." - }, - { - "updated": "2011-08-24T00:00:00+00:00", - "name": "TypesExtraSpecs", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1", - "alias": "os-types-extra-specs", - "description": "Type extra specs support." - }, - { - "updated": "2013-08-08T00:00:00+00:00", - "name": "VolumeMigStatusAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_mig_status_attribute/api/v1", - "alias": "os-vol-mig-status-attr", - "description": "Expose migration_status as an attribute of a volume." - }, - { - "updated": "2012-08-13T00:00:00+00:00", - "name": "CreateVolumeExtension", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/image-create/api/v1", - "alias": "os-image-create", - "description": "Allow creating a volume from an image in the Create Volume v1 API." - }, - { - "updated": "2014-01-10T00:00:00-00:00", - "name": "ExtendedServices", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/extended_services/api/v2", - "alias": "os-extended-services", - "description": "Extended services support." - }, - { - "updated": "2012-06-19T00:00:00+00:00", - "name": "ExtendedSnapshotAttributes", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/extended_snapshot_attributes/api/v1", - "alias": "os-extended-snapshot-attributes", - "description": "Extended SnapshotAttributes support." - }, - { - "updated": "2012-12-07T00:00:00+00:00", - "name": "VolumeImageMetadata", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_image_metadata/api/v1", - "alias": "os-vol-image-meta", - "description": "Show image metadata associated with the volume." - }, - { - "updated": "2012-03-12T00:00:00+00:00", - "name": "QuotaClasses", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/quota-classes-sets/api/v1.1", - "alias": "os-quota-class-sets", - "description": "Quota classes management support." - }, - { - "updated": "2013-05-29T00:00:00+00:00", - "name": "VolumeTransfer", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-transfer/api/v1.1", - "alias": "os-volume-transfer", - "description": "Volume transfer management support." - }, - { - "updated": "2014-02-10T00:00:00+00:00", - "name": "VolumeManage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-volume-manage/api/v1", - "alias": "os-volume-manage", - "description": "Allows existing backend storage to be 'managed' by Cinder." - }, - { - "updated": "2012-08-25T00:00:00+00:00", - "name": "AdminActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1", - "alias": "os-admin-actions", - "description": "Enable admin actions." - }, - { - "updated": "2012-10-28T00:00:00-00:00", - "name": "Services", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/services/api/v2", - "alias": "os-services", - "description": "Services support." - } - ] -} diff --git a/api-ref/source/v2/samples/host-attach-request.json b/api-ref/source/v2/samples/host-attach-request.json deleted file mode 100644 index 01d064451..000000000 --- a/api-ref/source/v2/samples/host-attach-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-attach": { - "host_name": "my_host" - } -} diff --git a/api-ref/source/v2/samples/hosts-get-response.json b/api-ref/source/v2/samples/hosts-get-response.json deleted file mode 100644 index b3e237028..000000000 --- a/api-ref/source/v2/samples/hosts-get-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "host": [{ - "resource": { - "volume_count": "8", - "total_volume_gb": "11", - "total_snapshot_gb": "1", - "project": "(total)", - "host": "node1@rbd-sas", - "snapshot_count": "1" - } - }, - { - "resource": { - "volume_count": "8", - "total_volume_gb": "11", - "total_snapshot_gb": "1", - "project": "f21a9c86d7114bf99c711f4874d80474", - "host": "node1@rbd-sas", - "snapshot_count": "1" - } - }] -} diff --git a/api-ref/source/v2/samples/hosts-list-response.json b/api-ref/source/v2/samples/hosts-list-response.json deleted file mode 100644 index 1ae780898..000000000 --- a/api-ref/source/v2/samples/hosts-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "hosts": [{ - "service-status": "available", - "service": "cinder-backup", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1", - "last-update": "2017-03-09T21:38:41.000000" - }, - { - "service-status": "available", - "service": "cinder-scheduler", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1", - "last-update": "2017-03-09T21:38:38.000000" - }, - { - "service-status": "available", - "service": "cinder-volume", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1@lvm", - "last-update": "2017-03-09T21:38:35.000000" - }] -} diff --git a/api-ref/source/v2/samples/image-metadata-show-request.json b/api-ref/source/v2/samples/image-metadata-show-request.json deleted file mode 100644 index f84e8261d..000000000 --- a/api-ref/source/v2/samples/image-metadata-show-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "volume": { - "host": "geraint-VirtualBox", - "ref": { - "source-volume-name": "existingLV", - "source-volume-id": "1234" - }, - "name": "New Volume", - "availability_zone": "az2", - "description": "Volume imported from existingLV", - "volume_type": null, - "bootable": true, - "metadata": { - "key1": "value1", - "key2": "value2" - } - } -} diff --git a/api-ref/source/v2/samples/image-metadata-show-response.json b/api-ref/source/v2/samples/image-metadata-show-response.json deleted file mode 100644 index 343ca66bc..000000000 --- a/api-ref/source/v2/samples/image-metadata-show-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "creating", - "user_id": "eae1472b5fc5496998a3d06550929e7e", - "attachments": [], - "links": [ - { - "href": "http://10.0.2.15:8776/v2/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "self" - }, - { - "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "bookmark" - } - ], - "availability_zone": "az2", - "bootable": "false", - "encrypted": "false", - "created_at": "2014-07-18T00:12:54.000000", - "description": "Volume imported from existingLV", - "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", - "volume_type": null, - "name": "New Volume", - "source_volid": null, - "snapshot_id": null, - "metadata": { - "key2": "value2", - "key1": "value1" - }, - "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "size": 0 - } -} diff --git a/api-ref/source/v2/samples/limits-show-response.json b/api-ref/source/v2/samples/limits-show-response.json deleted file mode 100644 index 38d0ccd3c..000000000 --- a/api-ref/source/v2/samples/limits-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "limits": { - "rate": [], - "absolute": { - "totalSnapshotsUsed": 0, - "maxTotalBackups": 10, - "maxTotalVolumeGigabytes": 1000, - "maxTotalSnapshots": 10, - "maxTotalBackupGigabytes": 1000, - "totalBackupGigabytesUsed": 0, - "maxTotalVolumes": 10, - "totalVolumesUsed": 0, - "totalBackupsUsed": 0, - "totalGigabytesUsed": 0 - } - } -} diff --git a/api-ref/source/v2/samples/pools-list-detailed-response.json b/api-ref/source/v2/samples/pools-list-detailed-response.json deleted file mode 100644 index 3fc28a299..000000000 --- a/api-ref/source/v2/samples/pools-list-detailed-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "pools": [ - { - "name": "pool1", - "capabilities": { - "updated": "2014-10-28T00:00:00-00:00", - "total_capacity": 1024, - "free_capacity": 100, - "volume_backend_name": "pool1", - "reserved_percentage": 0, - "driver_version": "1.0.0", - "storage_protocol": "iSCSI", - "QoS_support": false - } - }, - { - "name": "pool2", - "capabilities": { - "updated": "2014-10-28T00:00:00-00:00", - "total_capacity": 512, - "free_capacity": 200, - "volume_backend_name": "pool2", - "reserved_percentage": 0, - "driver_version": "1.0.1", - "storage_protocol": "iSER", - "QoS_support": true - } - } - ] -} diff --git a/api-ref/source/v2/samples/qos-create-request.json b/api-ref/source/v2/samples/qos-create-request.json deleted file mode 100644 index c0db909bd..000000000 --- a/api-ref/source/v2/samples/qos-create-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "qos_specs": { - "availability": "100", - "name": "reliability-spec", - "numberOfFailures": "0" - } -} diff --git a/api-ref/source/v2/samples/qos-create-response.json b/api-ref/source/v2/samples/qos-create-response.json deleted file mode 100644 index 8fbf23376..000000000 --- a/api-ref/source/v2/samples/qos-create-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "qos_specs": { - "specs": { - "numberOfFailures": "0", - "availability": "100" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "599ef437-1c99-42ec-9fc6-239d0519fef1" - }, - "links": [ - { - "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", - "rel": "bookmark" - } - ] -} diff --git a/api-ref/source/v2/samples/qos-list-response.json b/api-ref/source/v2/samples/qos-list-response.json deleted file mode 100644 index 92f2a6216..000000000 --- a/api-ref/source/v2/samples/qos-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "qos_specs": [ - { - "specs": { - "availability": "100", - "numberOfFailures": "0" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" - }, - { - "specs": { - "delay": "0", - "throughput": "100" - }, - "consumer": "back-end", - "name": "performance-spec", - "id": "ecfc6e2e-7117-44a4-8eec-f84d04f531a8" - } - ] -} diff --git a/api-ref/source/v2/samples/qos-show-response.json b/api-ref/source/v2/samples/qos-show-response.json deleted file mode 100644 index d9a1dc191..000000000 --- a/api-ref/source/v2/samples/qos-show-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "qos_specs": { - "specs": { - "availability": "100", - "numberOfFailures": "0" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" - }, - "links": [ - { - "href": "http://23.253.228.211:8776/v2/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", - "rel": "self" - }, - { - "href": "http://23.253.228.211:8776/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", - "rel": "bookmark" - } - ] -} diff --git a/api-ref/source/v2/samples/qos-unset-request.json b/api-ref/source/v2/samples/qos-unset-request.json deleted file mode 100644 index 4193b7392..000000000 --- a/api-ref/source/v2/samples/qos-unset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "keys": [ - "key1" - ] -} diff --git a/api-ref/source/v2/samples/qos-unset-response.json b/api-ref/source/v2/samples/qos-unset-response.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/api-ref/source/v2/samples/qos-update-request.json b/api-ref/source/v2/samples/qos-update-request.json deleted file mode 100644 index 1d3987705..000000000 --- a/api-ref/source/v2/samples/qos-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "qos_specs": { - "delay": "1" - } -} diff --git a/api-ref/source/v2/samples/qos-update-response.json b/api-ref/source/v2/samples/qos-update-response.json deleted file mode 100644 index 1d3987705..000000000 --- a/api-ref/source/v2/samples/qos-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "qos_specs": { - "delay": "1" - } -} diff --git a/api-ref/source/v2/samples/qos_show_response.json b/api-ref/source/v2/samples/qos_show_response.json deleted file mode 100644 index 4a5d9db6a..000000000 --- a/api-ref/source/v2/samples/qos_show_response.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "qos_associations": [ - { - "association_type": "volume_type", - "name": "reliability-type", - "id": "a12983c2-83bd-4afa-be9f-ad796573ead6" - } - ] -} diff --git a/api-ref/source/v2/samples/quotas-delete-response.json b/api-ref/source/v2/samples/quotas-delete-response.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/api-ref/source/v2/samples/quotas-show-defaults-response.json b/api-ref/source/v2/samples/quotas-show-defaults-response.json deleted file mode 100644 index 6c267112c..000000000 --- a/api-ref/source/v2/samples/quotas-show-defaults-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "quota_set": { - "gigabytes": 5, - "snapshots": 10, - "volumes": 20 - } -} diff --git a/api-ref/source/v2/samples/quotas-show-response.json b/api-ref/source/v2/samples/quotas-show-response.json deleted file mode 100644 index 6c267112c..000000000 --- a/api-ref/source/v2/samples/quotas-show-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "quota_set": { - "gigabytes": 5, - "snapshots": 10, - "volumes": 20 - } -} diff --git a/api-ref/source/v2/samples/quotas-update-request.json b/api-ref/source/v2/samples/quotas-update-request.json deleted file mode 100644 index 9ab32c11c..000000000 --- a/api-ref/source/v2/samples/quotas-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "quota_set": { - "snapshots": 45 - } -} diff --git a/api-ref/source/v2/samples/quotas-update-response.json b/api-ref/source/v2/samples/quotas-update-response.json deleted file mode 100644 index 9ab32c11c..000000000 --- a/api-ref/source/v2/samples/quotas-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "quota_set": { - "snapshots": 45 - } -} diff --git a/api-ref/source/v2/samples/quotas-user-show-detailed-response.json b/api-ref/source/v2/samples/quotas-user-show-detailed-response.json deleted file mode 100644 index 79609eb84..000000000 --- a/api-ref/source/v2/samples/quotas-user-show-detailed-response.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "quota_set": { - "gigabytes": { - "in_use": 100, - "limit": -1, - "reserved": 0 - }, - "snapshots": { - "in_use": 12, - "limit": -1, - "reserved": 0 - }, - "volumes": { - "in_use": 1, - "limit": -1, - "reserved": 0 - } - } -} diff --git a/api-ref/source/v2/samples/quotas-user-show-response.json b/api-ref/source/v2/samples/quotas-user-show-response.json deleted file mode 100644 index 6c267112c..000000000 --- a/api-ref/source/v2/samples/quotas-user-show-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "quota_set": { - "gigabytes": 5, - "snapshots": 10, - "volumes": 20 - } -} diff --git a/api-ref/source/v2/samples/snapshot-create-request.json b/api-ref/source/v2/samples/snapshot-create-request.json deleted file mode 100644 index 3c0fe5d88..000000000 --- a/api-ref/source/v2/samples/snapshot-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "snapshot": { - "name": "snap-001", - "description": "Daily backup", - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "force": true - } -} diff --git a/api-ref/source/v2/samples/snapshot-create-response.json b/api-ref/source/v2/samples/snapshot-create-response.json deleted file mode 100644 index d8901e88d..000000000 --- a/api-ref/source/v2/samples/snapshot-create-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "snapshot": { - "status": "creating", - "description": "Daily backup", - "created_at": "2013-02-25T03:56:53.081642", - "metadata": {}, - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "size": 1, - "id": "ffa9bc5e-1172-4021-acaf-cdcd78a9584d", - "name": "snap-001" - } -} diff --git a/api-ref/source/v2/samples/snapshot-metadata-create-request.json b/api-ref/source/v2/samples/snapshot-metadata-create-request.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v2/samples/snapshot-metadata-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v2/samples/snapshot-metadata-create-response.json b/api-ref/source/v2/samples/snapshot-metadata-create-response.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v2/samples/snapshot-metadata-create-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v2/samples/snapshot-metadata-show-response.json b/api-ref/source/v2/samples/snapshot-metadata-show-response.json deleted file mode 100644 index cbfe4ef7a..000000000 --- a/api-ref/source/v2/samples/snapshot-metadata-show-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "test" - } -} diff --git a/api-ref/source/v2/samples/snapshot-metadata-update-request.json b/api-ref/source/v2/samples/snapshot-metadata-update-request.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v2/samples/snapshot-metadata-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v2/samples/snapshot-metadata-update-response.json b/api-ref/source/v2/samples/snapshot-metadata-update-response.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v2/samples/snapshot-metadata-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v2/samples/snapshot-show-response.json b/api-ref/source/v2/samples/snapshot-show-response.json deleted file mode 100644 index 25a8c6c9d..000000000 --- a/api-ref/source/v2/samples/snapshot-show-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "snapshot": { - "status": "available", - "os-extended-snapshot-attributes:progress": "100%", - "description": "Daily backup", - "created_at": "2013-02-25T04:13:17.000000", - "metadata": {}, - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "id": "2bb856e1-b3d8-4432-a858-09e4ce939389", - "name": "snap-001" - } -} diff --git a/api-ref/source/v2/samples/snapshot-status-reset-request.json b/api-ref/source/v2/samples/snapshot-status-reset-request.json deleted file mode 100644 index 2bca0c286..000000000 --- a/api-ref/source/v2/samples/snapshot-status-reset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-reset_status": { - "status": "available", - } -} diff --git a/api-ref/source/v2/samples/snapshot-update-request.json b/api-ref/source/v2/samples/snapshot-update-request.json deleted file mode 100644 index 0e0895717..000000000 --- a/api-ref/source/v2/samples/snapshot-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "snapshot": { - "name": "snap-002", - "description": "This is yet, another snapshot." - } -} diff --git a/api-ref/source/v2/samples/snapshot-update-response.json b/api-ref/source/v2/samples/snapshot-update-response.json deleted file mode 100644 index a2fa27793..000000000 --- a/api-ref/source/v2/samples/snapshot-update-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "snapshot": { - "created_at": "2013-02-20T08:11:34.000000", - "description": "This is yet, another snapshot", - "name": "snap-002", - "id": "4b502fcb-1f26-45f8-9fe5-3b9a0a52eaf2", - "size": 1, - "status": "available", - "volume_id": "2402b902-0b7a-458c-9c07-7435a826f794" - } -} diff --git a/api-ref/source/v2/samples/snapshots-list-detailed-response.json b/api-ref/source/v2/samples/snapshots-list-detailed-response.json deleted file mode 100644 index 463b98ec1..000000000 --- a/api-ref/source/v2/samples/snapshots-list-detailed-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "snapshots": [ - { - "status": "available", - "metadata": { - "name": "test" - }, - "os-extended-snapshot-attributes:progress": "100%", - "name": "test-volume-snapshot", - "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "os-extended-snapshot-attributes:project_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "created_at": "2015-11-29T02:25:51.000000", - "size": 1, - "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", - "description": "volume snapshot" - } - ] -} diff --git a/api-ref/source/v2/samples/snapshots-list-response.json b/api-ref/source/v2/samples/snapshots-list-response.json deleted file mode 100644 index 8d7e4973a..000000000 --- a/api-ref/source/v2/samples/snapshots-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "snapshots": [ - { - "status": "available", - "metadata": { - "name": "test" - }, - "name": "test-volume-snapshot", - "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "created_at": "2015-11-29T02:25:51.000000", - "size": 1, - "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", - "description": "volume snapshot" - } - ] -} diff --git a/api-ref/source/v2/samples/user-quotas-show-response.json b/api-ref/source/v2/samples/user-quotas-show-response.json deleted file mode 100644 index 239c64d23..000000000 --- a/api-ref/source/v2/samples/user-quotas-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "id": "fake_tenant", - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v2/samples/user-quotas-update-request.json b/api-ref/source/v2/samples/user-quotas-update-request.json deleted file mode 100644 index 6e5195f9a..000000000 --- a/api-ref/source/v2/samples/user-quotas-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "quota_set": { - "force": true, - "instances": 9 - } -} diff --git a/api-ref/source/v2/samples/user-quotas-update-response.json b/api-ref/source/v2/samples/user-quotas-update-response.json deleted file mode 100644 index 553933292..000000000 --- a/api-ref/source/v2/samples/user-quotas-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "floating_ips": 10, - "fixed_ips": -1, - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 9, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v2/samples/version-show-response.json b/api-ref/source/v2/samples/version-show-response.json deleted file mode 100644 index 04458b755..000000000 --- a/api-ref/source/v2/samples/version-show-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": { - "status": "CURRENT", - "updated": "2012-01-04T11:33:21Z", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0", - "links": [ - { - "href": "http://23.253.228.211:8776/v1/", - "rel": "self" - }, - { - "href": "http://jorgew.github.com/block-storage-api/content/os-block-storage-1.0.pdf", - "type": "application/pdf", - "rel": "describedby" - }, - { - "href": "http://docs.rackspacecloud.com/servers/api/v1.1/application.wadl", - "type": "application/vnd.sun.wadl+xml", - "rel": "describedby" - } - ] - } -} diff --git a/api-ref/source/v2/samples/version-v2-show-response.json b/api-ref/source/v2/samples/version-v2-show-response.json deleted file mode 100644 index 811a3e80e..000000000 --- a/api-ref/source/v2/samples/version-v2-show-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "choices": [ - { - "status": "SUPPORTED", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0", - "links": [ - { - "href": "http://23.253.248.171:8776/v1/v2.json", - "rel": "self" - } - ] - }, - { - "status": "CURRENT", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v2.0", - "links": [ - { - "href": "http://23.253.248.171:8776/v2/v2.json", - "rel": "self" - } - ] - } - ] -} diff --git a/api-ref/source/v2/samples/versions-response.json b/api-ref/source/v2/samples/versions-response.json deleted file mode 100644 index d8a0c6a63..000000000 --- a/api-ref/source/v2/samples/versions-response.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "versions": [ - { - "status": "DEPRECATED", - "updated": "2014-06-28T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v1/", - "rel": "self" - } - ], - "min_version": "", - "version": "", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0" - }, - { - "status": "SUPPORTED", - "updated": "2014-06-28T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v2/", - "rel": "self" - } - ], - "min_version": "", - "version": "", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v2.0" - }, - { - "status": "CURRENT", - "updated": "2016-02-08T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v3/", - "rel": "self" - } - ], - "min_version": "3.0", - "version": "{Current_Max_Version}", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v3.0" - } - ] -} diff --git a/api-ref/source/v2/samples/volume-attach-request.json b/api-ref/source/v2/samples/volume-attach-request.json deleted file mode 100644 index a779f9fbf..000000000 --- a/api-ref/source/v2/samples/volume-attach-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-attach": { - "instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66", - "mountpoint": "/dev/vdc" - } -} diff --git a/api-ref/source/v2/samples/volume-bootable-status-update-request.json b/api-ref/source/v2/samples/volume-bootable-status-update-request.json deleted file mode 100644 index abcdf3deb..000000000 --- a/api-ref/source/v2/samples/volume-bootable-status-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-set_bootable": { - "bootable": "True" - } -} diff --git a/api-ref/source/v2/samples/volume-create-request.json b/api-ref/source/v2/samples/volume-create-request.json deleted file mode 100644 index 2aab20252..000000000 --- a/api-ref/source/v2/samples/volume-create-request.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "volume": { - "size": 10, - "availability_zone": null, - "source_volid": null, - "description": null, - "multiattach ": false, - "snapshot_id": null, - "name": null, - "imageRef": null, - "volume_type": null, - "metadata": {}, - "source_replica": null, - "consistencygroup_id": null - }, - "OS-SCH-HNT:scheduler_hints": { - "same_host": [ - "a0cf03a5-d921-4877-bb5c-86d26cf818e1", - "8c19174f-4220-44f0-824a-cd1eeef10287" - ] - } -} diff --git a/api-ref/source/v2/samples/volume-create-response.json b/api-ref/source/v2/samples/volume-create-response.json deleted file mode 100644 index a4f4de88b..000000000 --- a/api-ref/source/v2/samples/volume-create-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "volume": { - "status": "creating", - "migration_status": null, - "user_id": "0eea4eabcf184061a3b6db1e0daaf010", - "attachments": [], - "links": [ - { - "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "encrypted": false, - "created_at": "2015-11-29T03:01:44.000000", - "description": null, - "updated_at": null, - "volume_type": "lvmdriver-1", - "name": "test-volume-attachments", - "replication_status": "disabled", - "consistencygroup_id": null, - "source_volid": null, - "snapshot_id": null, - "multiattach": false, - "metadata": {}, - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "size": 2 - } -} diff --git a/api-ref/source/v2/samples/volume-detach-request.json b/api-ref/source/v2/samples/volume-detach-request.json deleted file mode 100644 index f2e9937a0..000000000 --- a/api-ref/source/v2/samples/volume-detach-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-detach": { - "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1" - } -} - diff --git a/api-ref/source/v2/samples/volume-extend-request.json b/api-ref/source/v2/samples/volume-extend-request.json deleted file mode 100644 index a051cb3cb..000000000 --- a/api-ref/source/v2/samples/volume-extend-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-extend": { - "new_size": 3 - } -} diff --git a/api-ref/source/v2/samples/volume-force-delete-request.json b/api-ref/source/v2/samples/volume-force-delete-request.json deleted file mode 100644 index a7fe0fe2a..000000000 --- a/api-ref/source/v2/samples/volume-force-delete-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-force_delete": {} -} diff --git a/api-ref/source/v2/samples/volume-force-detach-request.json b/api-ref/source/v2/samples/volume-force-detach-request.json deleted file mode 100644 index 277849d8c..000000000 --- a/api-ref/source/v2/samples/volume-force-detach-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "os-force_detach": { - "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1", - "connector": { - "initiator": "iqn.2012-07.org.fake:01" - } - } -} diff --git a/api-ref/source/v2/samples/volume-image-metadata-set-request.json b/api-ref/source/v2/samples/volume-image-metadata-set-request.json deleted file mode 100644 index 1f2be3d6e..000000000 --- a/api-ref/source/v2/samples/volume-image-metadata-set-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "os-set_image_metadata": { - "metadata": { - "image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "image_name": "image", - "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "ramdisk_id": "somedisk" - } - } -} diff --git a/api-ref/source/v2/samples/volume-image-metadata-unset-request.json b/api-ref/source/v2/samples/volume-image-metadata-unset-request.json deleted file mode 100644 index 49d3295c5..000000000 --- a/api-ref/source/v2/samples/volume-image-metadata-unset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-unset_image_metadata": { - "key": "ramdisk_id" - } -} diff --git a/api-ref/source/v2/samples/volume-manage-request.json b/api-ref/source/v2/samples/volume-manage-request.json deleted file mode 100644 index 363214784..000000000 --- a/api-ref/source/v2/samples/volume-manage-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "volume": { - "host": "geraint-VirtualBox", - "ref": { - "source-name": "existingLV", - "source-id": "1234" - }, - "name": "New Volume", - "availability_zone": "az2", - "description": "Volume imported from existingLV", - "volume_type": null, - "bootable": true, - "metadata": { - "key1": "value1", - "key2": "value2" - } - } -} diff --git a/api-ref/source/v2/samples/volume-manage-response.json b/api-ref/source/v2/samples/volume-manage-response.json deleted file mode 100644 index 343ca66bc..000000000 --- a/api-ref/source/v2/samples/volume-manage-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "creating", - "user_id": "eae1472b5fc5496998a3d06550929e7e", - "attachments": [], - "links": [ - { - "href": "http://10.0.2.15:8776/v2/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "self" - }, - { - "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "bookmark" - } - ], - "availability_zone": "az2", - "bootable": "false", - "encrypted": "false", - "created_at": "2014-07-18T00:12:54.000000", - "description": "Volume imported from existingLV", - "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", - "volume_type": null, - "name": "New Volume", - "source_volid": null, - "snapshot_id": null, - "metadata": { - "key2": "value2", - "key1": "value1" - }, - "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "size": 0 - } -} diff --git a/api-ref/source/v2/samples/volume-metadata-create-request.json b/api-ref/source/v2/samples/volume-metadata-create-request.json deleted file mode 100644 index 1ff9aae27..000000000 --- a/api-ref/source/v2/samples/volume-metadata-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata0" - } -} diff --git a/api-ref/source/v2/samples/volume-metadata-create-response.json b/api-ref/source/v2/samples/volume-metadata-create-response.json deleted file mode 100644 index 1ff9aae27..000000000 --- a/api-ref/source/v2/samples/volume-metadata-create-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata0" - } -} diff --git a/api-ref/source/v2/samples/volume-metadata-show-response.json b/api-ref/source/v2/samples/volume-metadata-show-response.json deleted file mode 100644 index 5937a8665..000000000 --- a/api-ref/source/v2/samples/volume-metadata-show-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "metadata": {} -} diff --git a/api-ref/source/v2/samples/volume-metadata-update-request.json b/api-ref/source/v2/samples/volume-metadata-update-request.json deleted file mode 100644 index 4d96ad848..000000000 --- a/api-ref/source/v2/samples/volume-metadata-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata1" - } -} diff --git a/api-ref/source/v2/samples/volume-metadata-update-response.json b/api-ref/source/v2/samples/volume-metadata-update-response.json deleted file mode 100644 index 4d96ad848..000000000 --- a/api-ref/source/v2/samples/volume-metadata-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata1" - } -} diff --git a/api-ref/source/v2/samples/volume-os-retype-request.json b/api-ref/source/v2/samples/volume-os-retype-request.json deleted file mode 100644 index 118b96254..000000000 --- a/api-ref/source/v2/samples/volume-os-retype-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-retype": { - "new_type": "dedup-tier-replicaton", - "migration_policy": "never" - } -} \ No newline at end of file diff --git a/api-ref/source/v2/samples/volume-show-response.json b/api-ref/source/v2/samples/volume-show-response.json deleted file mode 100644 index f9d73aae8..000000000 --- a/api-ref/source/v2/samples/volume-show-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "available", - "attachments": [], - "links": [ - { - "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "os-vol-host-attr:host": "ip-10-168-107-25", - "source_volid": null, - "snapshot_id": null, - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "description": "Super volume.", - "name": "vol-002", - "created_at": "2013-02-25T02:40:21.000000", - "volume_type": "None", - "os-vol-tenant-attr:tenant_id": "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "os-volume-replication:driver_data": null, - "os-volume-replication:extended_status": null, - "metadata": { - "contents": "not junk" - } - } -} diff --git a/api-ref/source/v2/samples/volume-status-reset-request.json b/api-ref/source/v2/samples/volume-status-reset-request.json deleted file mode 100644 index 506b61019..000000000 --- a/api-ref/source/v2/samples/volume-status-reset-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "os-reset_status": { - "status": "available", - "attach_status": "detached", - "migration_status": "migrating" - } -} diff --git a/api-ref/source/v2/samples/volume-transfer-accept-request.json b/api-ref/source/v2/samples/volume-transfer-accept-request.json deleted file mode 100644 index 3399f1e0c..000000000 --- a/api-ref/source/v2/samples/volume-transfer-accept-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "accept": { - "auth_key": "9266c59563c84664" - } -} diff --git a/api-ref/source/v2/samples/volume-transfer-accept-response.json b/api-ref/source/v2/samples/volume-transfer-accept-response.json deleted file mode 100644 index bee4d4ae2..000000000 --- a/api-ref/source/v2/samples/volume-transfer-accept-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "transfer": { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v2/samples/volume-transfer-create-request.json b/api-ref/source/v2/samples/volume-transfer-create-request.json deleted file mode 100644 index f517b7498..000000000 --- a/api-ref/source/v2/samples/volume-transfer-create-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "transfer": { - "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", - "name": "first volume" - } -} diff --git a/api-ref/source/v2/samples/volume-transfer-create-response.json b/api-ref/source/v2/samples/volume-transfer-create-response.json deleted file mode 100644 index 4a5fb16ca..000000000 --- a/api-ref/source/v2/samples/volume-transfer-create-response.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "transfer": { - "id": "1a7059f5-8ed7-45b7-8d05-2811e5d09f24", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume", - "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", - "auth_key": "9266c59563c84664", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/3", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/3", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v2/samples/volume-transfer-show-response.json b/api-ref/source/v2/samples/volume-transfer-show-response.json deleted file mode 100644 index c73b62cc1..000000000 --- a/api-ref/source/v2/samples/volume-transfer-show-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "transfer": { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v2/samples/volume-transfers-list-detailed-response.json b/api-ref/source/v2/samples/volume-transfers-list-detailed-response.json deleted file mode 100644 index 9e5d8c0a1..000000000 --- a/api-ref/source/v2/samples/volume-transfers-list-detailed-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "transfers": [ - { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - }, - { - "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", - "created_at": "2015-03-25T03:56:53.081642", - "name": "second volume transfer", - "volume_id": "673db275-379f-41af-8371-e1652132b4c1", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/2", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/2", - "rel": "bookmark" - } - ] - } - ] -} diff --git a/api-ref/source/v2/samples/volume-transfers-list-response.json b/api-ref/source/v2/samples/volume-transfers-list-response.json deleted file mode 100644 index 02711d1ed..000000000 --- a/api-ref/source/v2/samples/volume-transfers-list-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "transfers": [ - { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - }, - { - "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", - "name": "second volume transfer", - "volume_id": "673db275-379f-41af-8371-e1652132b4c1", - "links": [ - { - "href": "http://localhost/v2/firstproject/volumes/2", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/2", - "rel": "bookmark" - } - ] - } - ] -} diff --git a/api-ref/source/v2/samples/volume-type-access-add-request.json b/api-ref/source/v2/samples/volume-type-access-add-request.json deleted file mode 100644 index b7481edbb..000000000 --- a/api-ref/source/v2/samples/volume-type-access-add-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "addProjectAccess": { - "project": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v2/samples/volume-type-access-delete-request.json b/api-ref/source/v2/samples/volume-type-access-delete-request.json deleted file mode 100644 index 144997bfc..000000000 --- a/api-ref/source/v2/samples/volume-type-access-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "removeProjectAccess": { - "project": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v2/samples/volume-type-access-list-response.json b/api-ref/source/v2/samples/volume-type-access-list-response.json deleted file mode 100644 index afcffb081..000000000 --- a/api-ref/source/v2/samples/volume-type-access-list-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "volume_type_access": { - "volume_type_id": "3c67e124-39ad-4ace-a507-8bb7bf510c26", - "project_id": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v2/samples/volume-type-create-request.json b/api-ref/source/v2/samples/volume-type-create-request.json deleted file mode 100644 index 13d86bfdf..000000000 --- a/api-ref/source/v2/samples/volume-type-create-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "volume_type": { - "name": "vol-type-001", - "description": "volume type 0001", - "os-volume-type-access:is_public": true, - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v2/samples/volume-type-show-request.json b/api-ref/source/v2/samples/volume-type-show-request.json deleted file mode 100644 index a91f2e94d..000000000 --- a/api-ref/source/v2/samples/volume-type-show-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "volume_type": { - "id": "289da7f8-6440-407c-9fb4-7db01ec49164", - "name": "vol-type-001", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v2/samples/volume-type-show-response.json b/api-ref/source/v2/samples/volume-type-show-response.json deleted file mode 100644 index 7a0420f20..000000000 --- a/api-ref/source/v2/samples/volume-type-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "volume_type": { - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "vol-type-001", - "description": "volume type 001", - "is_public": "true", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v2/samples/volume-type-update-request.json b/api-ref/source/v2/samples/volume-type-update-request.json deleted file mode 100644 index 8bdc5befb..000000000 --- a/api-ref/source/v2/samples/volume-type-update-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "volume_type": { - "name": "vol-type-001", - "description": "volume type 0001", - "is_public": true, - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v2/samples/volume-types-list-response.json b/api-ref/source/v2/samples/volume-types-list-response.json deleted file mode 100644 index 1d72f923e..000000000 --- a/api-ref/source/v2/samples/volume-types-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "volume_types": [ - { - "extra_specs": { - "capabilities": "gpu" - }, - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "SSD" - }, - { - "extra_specs": {}, - "id": "8eb69a46-df97-4e41-9586-9a40a7533803", - "name": "SATA" - } - ] -} diff --git a/api-ref/source/v2/samples/volume-unmanage-request.json b/api-ref/source/v2/samples/volume-unmanage-request.json deleted file mode 100644 index a75950bb9..000000000 --- a/api-ref/source/v2/samples/volume-unmanage-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-unmanage": {} -} diff --git a/api-ref/source/v2/samples/volume-update-request.json b/api-ref/source/v2/samples/volume-update-request.json deleted file mode 100644 index 8e52dacb6..000000000 --- a/api-ref/source/v2/samples/volume-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "volume": { - "name": "vol-003", - "description": "This is yet, another volume." - } -} diff --git a/api-ref/source/v2/samples/volume-update-response.json b/api-ref/source/v2/samples/volume-update-response.json deleted file mode 100644 index f87bcd2ce..000000000 --- a/api-ref/source/v2/samples/volume-update-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "volume": { - "status": "available", - "migration_status": null, - "user_id": "0eea4eabcf184061a3b6db1e0daaf010", - "attachments": [], - "links": [ - { - "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "encrypted": false, - "created_at": "2015-11-29T03:01:44.000000", - "description": "This is yet, another volume.", - "updated_at": null, - "volume_type": "lvmdriver-1", - "name": "vol-003", - "replication_status": "disabled", - "consistencygroup_id": null, - "source_volid": null, - "snapshot_id": null, - "multiattach": false, - "metadata": { - "contents": "not junk" - }, - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "size": 1 - } -} diff --git a/api-ref/source/v2/samples/volumes-list-detailed-response.json b/api-ref/source/v2/samples/volumes-list-detailed-response.json deleted file mode 100644 index e91029c14..000000000 --- a/api-ref/source/v2/samples/volumes-list-detailed-response.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "volumes": [ - { - "migration_status": null, - "attachments": [ - { - "server_id": "f4fda93b-06e0-4743-8117-bc8bcecd651b", - "attachment_id": "3b4db356-253d-4fab-bfa0-e3626c0b8405", - "host_name": null, - "volume_id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "device": "/dev/vdb", - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38" - } - ], - "links": [ - { - "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", - "encrypted": false, - "os-volume-replication:extended_status": null, - "replication_status": "disabled", - "snapshot_id": null, - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "size": 2, - "user_id": "32779452fcd34ae1a53a797ac8a1e064", - "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "os-vol-mig-status-attr:migstat": null, - "metadata": { - "readonly": false, - "attached_mode": "rw" - }, - "status": "in-use", - "description": null, - "multiattach": true, - "os-volume-replication:driver_data": null, - "source_volid": null, - "consistencygroup_id": null, - "os-vol-mig-status-attr:name_id": null, - "name": "test-volume-attachments", - "bootable": "false", - "created_at": "2015-11-29T03:01:44.000000", - "volume_type": "lvmdriver-1" - }, - { - "migration_status": null, - "attachments": [], - "links": [ - { - "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", - "encrypted": false, - "os-volume-replication:extended_status": null, - "replication_status": "disabled", - "snapshot_id": null, - "id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "size": 1, - "user_id": "32779452fcd34ae1a53a797ac8a1e064", - "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "os-vol-mig-status-attr:migstat": null, - "metadata": {}, - "status": "available", - "volume_image_metadata": { - "kernel_id": "8a55f5f1-78f7-4477-8168-977d8519342c", - "checksum": "eb9139e4942121f22bbc2afc0400b2a4", - "min_ram": "0", - "ramdisk_id": "5f6bdf8a-92db-4988-865b-60bdd808d9ef", - "disk_format": "ami", - "image_name": "cirros-0.3.4-x86_64-uec", - "image_id": "b48c53e1-9a96-4a5a-a630-2e74ec54ddcc", - "container_format": "ami", - "min_disk": "0", - "size": "25165824" - }, - "description": "", - "multiattach": false, - "os-volume-replication:driver_data": null, - "source_volid": null, - "consistencygroup_id": null, - "os-vol-mig-status-attr:name_id": null, - "name": "test-volume", - "bootable": "true", - "created_at": "2015-11-29T02:25:18.000000", - "volume_type": "lvmdriver-1" - } - ] -} diff --git a/api-ref/source/v2/samples/volumes-list-response.json b/api-ref/source/v2/samples/volumes-list-response.json deleted file mode 100644 index b3c7cc051..000000000 --- a/api-ref/source/v2/samples/volumes-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "volumes": [ - { - "id": "45baf976-c20a-4894-a7c3-c94b7376bf55", - "links": [ - { - "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", - "rel": "bookmark" - } - ], - "name": "vol-004" - }, - { - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "links": [ - { - "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "name": "vol-003" - } - ] -} diff --git a/api-ref/source/v2/volume-manage.inc b/api-ref/source/v2/volume-manage.inc deleted file mode 100644 index ca25ceac8..000000000 --- a/api-ref/source/v2/volume-manage.inc +++ /dev/null @@ -1,49 +0,0 @@ -.. -*- rst -*- - -Volume manage extension (os-volume-manage) -========================================== - -Creates volumes by using existing storage instead of allocating new -storage. - - -Manage existing volume -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/os-volume-manage - -Creates a Block Storage volume by using existing storage rather than allocating new storage. - -The caller must specify a reference to an existing storage volume -in the ref parameter in the request. Although each storage driver -might interpret this reference differently, the driver should -accept a reference structure that contains either a source-id -or source-name element, if possible. - -The API chooses the size of the volume by rounding up the size of -the existing storage volume to the next gibibyte (GiB). - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - availability_zone: availability_zone - - bootable: bootable - - volume_type: volume_type - - name: name - - volume: volume - - host: host - - ref: ref - - metadata: metadata - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-manage-request.json - :language: javascript diff --git a/api-ref/source/v2/volume-type-access.inc b/api-ref/source/v2/volume-type-access.inc deleted file mode 100644 index d69d524d6..000000000 --- a/api-ref/source/v2/volume-type-access.inc +++ /dev/null @@ -1,102 +0,0 @@ -.. -*- rst -*- - -Volume type access (volumes) -============================ - -Private volume type access to project. - -By default, volumes types are public. To create a private volume -type, set the ``is_public`` boolean field to ``false`` at volume -type creation time. To control access to a private volume type, -user needs to add a project to or remove a project from the volume -type. Private volume types without projects are only accessible by -users with the administrative role and context. - - -Add private volume type access -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/types/{volume_type}/action - -Adds private volume type access to a project. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project: project - - tenant_id: tenant_id - - volume_type: volume_type - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-access-add-request.json - :language: javascript - - -Remove private volume type access -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/types/{volume_type}/action - -Removes private volume type access from a project. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project: project - - tenant_id: tenant_id - - volume_type: volume_type - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-access-delete-request.json - :language: javascript - - -List private volume type access details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/types/{volume_type}/os-volume-type-access - -Lists project IDs that have access to private volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_type: volume_type - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-access-list-response.json - :language: javascript diff --git a/api-ref/source/v2/volumes-v2-extensions.inc b/api-ref/source/v2/volumes-v2-extensions.inc deleted file mode 100644 index aa236689b..000000000 --- a/api-ref/source/v2/volumes-v2-extensions.inc +++ /dev/null @@ -1,50 +0,0 @@ -.. -*- rst -*- - -API extensions (extensions) -=========================== - - - - -List API extensions -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/extensions - -Lists Block Storage API extensions. - - -Normal response codes: 200, 300 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - updated: updated - - description: description - - links: links - - namespace: namespace - - alias: alias - - name: name - - - - -Response Example ----------------- - -.. literalinclude:: ./samples/extensions-list-response.json - :language: javascript - - - diff --git a/api-ref/source/v2/volumes-v2-snapshots-actions.inc b/api-ref/source/v2/volumes-v2-snapshots-actions.inc deleted file mode 100644 index 933c58aeb..000000000 --- a/api-ref/source/v2/volumes-v2-snapshots-actions.inc +++ /dev/null @@ -1,34 +0,0 @@ -.. -*- rst -*- - -==================================== -Snapshot actions (snapshots, action) -==================================== - -Administrator only. Resets status for a snapshot. - - -Reset a snapshot's status -========================= - -.. rest_method:: POST /v2/{tenant_id}/snapshots/{snapshot_id}/action - -Resets the status. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-reset_status: os-reset_status - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-status-reset-request.json - :language: javascript diff --git a/api-ref/source/v2/volumes-v2-snapshots.inc b/api-ref/source/v2/volumes-v2-snapshots.inc deleted file mode 100644 index 0bcea538b..000000000 --- a/api-ref/source/v2/volumes-v2-snapshots.inc +++ /dev/null @@ -1,394 +0,0 @@ -.. -*- rst -*- - -Volume snapshots (snapshots) -============================ - -A snapshot is a point-in-time copy of the data that a volume -contains. - -When you create, list, or delete snapshots, these status values are -possible: - -**Snapshot statuses** - -+----------------+-------------------------------------+ -| Status | Description | -+----------------+-------------------------------------+ -| creating | The snapshot is being created. | -+----------------+-------------------------------------+ -| available | The snapshot is ready to use. | -+----------------+-------------------------------------+ -| deleting | The snapshot is being deleted. | -+----------------+-------------------------------------+ -| error | A snapshot creation error occurred. | -+----------------+-------------------------------------+ -| error_deleting | A snapshot deletion error occurred. | -+----------------+-------------------------------------+ - - -List snapshots with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/snapshots/detail - -Lists all Block Storage snapshots, with details, that the tenant can access. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-detailed-response.json - :language: javascript - - -Create snapshot -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/snapshots - -Creates a volume snapshot, which is a point-in-time, complete copy of a volume. You can create a volume from a snapshot. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - snapshot: snapshot - - volume_id: volume_id - - force: force - - description: description - - name: name - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - - -List snapshots -~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/snapshots - -Lists all Block Storage snapshots, with summary information, that the tenant can access. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-response.json - :language: javascript - - -Show snapshot metadata -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/snapshots/{snapshot_id}/metadata - -Shows metadata for a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-show-response.json - :language: javascript - - -Create snapshot metadata -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/snapshots/{snapshot_id}/metadata - -Updates metadata for a snapshot. - -Creates or replaces metadata items that match keys. Does not modify items that -are not in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-create-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-create-response.json - :language: javascript - - -Update snapshot metadata -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/snapshots/{snapshot_id}/metadata - -Replaces all the snapshot's metadata with the key-value pairs in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-response.json - :language: javascript - - -Show snapshot details -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/snapshots/{snapshot_id} - -Shows details for a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-show-response.json - :language: javascript - - -Update snapshot -~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/snapshots/{snapshot_id} - -Updates a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - snapshot: snapshot - - description: description - - name: name - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-update-response.json - :language: javascript - - -Delete snapshot -~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/snapshots/{snapshot_id} - -Deletes a snapshot. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - snapshot_id: snapshot_id diff --git a/api-ref/source/v2/volumes-v2-types.inc b/api-ref/source/v2/volumes-v2-types.inc deleted file mode 100644 index 28246cc4b..000000000 --- a/api-ref/source/v2/volumes-v2-types.inc +++ /dev/null @@ -1,444 +0,0 @@ -.. -*- rst -*- - -Volume types (types) -==================== - - -Update volume type -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/types/{volume_type_id} - -Updates a volume type. - -To create an environment with multiple-storage back ends, you must -specify a volume type. The API spawns Block Storage volume back -ends as children to ``cinder-volume``, and keys them from a unique -queue. The API names the back ends ``cinder-volume.HOST.BACKEND``. -For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a -volume, the scheduler chooses an appropriate back end for the -volume type to handle the request. - -For information about how to use volume types to create multiple- -storage back ends, see `Configure multiple-storage back ends -`_. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type: volume_type - - volume_type_id: volume_type_id - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Update extra specs for a volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/types/{volume_type_id} - -Updates the extra specifications that are assigned to a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - volume_type: volume_type - - volume_type_id: volume_type_id - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Show volume type details for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/types/{volume_type_id} - -Shows details for a volume type. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Delete volume type -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/types/{volume_type_id} - -Deletes a volume type. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - -List all volume types for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/types - -Lists volume types. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_types: volume_types - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-types-list-response.json - :language: javascript - - -Create volume type for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/types - -Creates a volume type. - -To create an environment with multiple-storage back ends, you must -specify a volume type. Block Storage volume back ends are spawned -as children to ``cinder-volume``, and they are keyed from a unique -queue. They are named ``cinder-volume.HOST.BACKEND``. For example, -``cinder-volume.ubuntu.lvmdriver``. When a volume is created, the -scheduler chooses an appropriate back end to handle the request -based on the volume type. - -For information about how to use volume types to create multiple- -storage back ends, see `Configure multiple-storage back ends -`_. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type: volume_type - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - -Show an encryption type for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/types/{volume_type_id}/encryption - -Show an encryption type. - -To show an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id_body - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - - deleted: deleted - - created_at: created_at - - updated_at: updated_at - - deleted_at: deleted_at - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-show-response.json - :language: javascript - - -Delete an encryption type for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/types/{volume_type_id}/encryption/{encryption_id} - -Delete an encryption type. - -To delete an encryption type for an existing volume type. - -Normal response codes: 202 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - encryption_id: encryption_id - -Create an encryption type for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/types/{volume_type_id}/encryption - -Creates an encryption type. - -To create an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - -Request Example ---------------- - -.. literalinclude:: ./samples/encryption-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id_body - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-create-response.json - :language: javascript - - -Update an encryption type for v2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/types/{volume_type_id}/encryption/{encryption_id} - -Update an encryption type. - -To update an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - tenant_id: tenant_id - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider_optional - - control_location: control_location - - cipher: cipher - -Request Example ---------------- - -.. literalinclude:: ./samples/encryption-type-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - encryption: encryption - - key_size: key_size - - provider: provider_optional - - control_location: control_location - - cipher: cipher - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-update-response.json - :language: javascript diff --git a/api-ref/source/v2/volumes-v2-versions.inc b/api-ref/source/v2/volumes-v2-versions.inc deleted file mode 100644 index 27fb0ebb3..000000000 --- a/api-ref/source/v2/volumes-v2-versions.inc +++ /dev/null @@ -1,40 +0,0 @@ -.. -*- rst -*- - -API versions -============ - - - - -Show API v2 details -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2 - -Shows details for Block Storage API v2. - - -Normal response codes: 200 - - -Request -------- - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - location: location - - - -Response Example ----------------- - -.. literalinclude:: ./samples/version-v2-show-response.json - :language: javascript - - diff --git a/api-ref/source/v2/volumes-v2-volumes-actions.inc b/api-ref/source/v2/volumes-v2-volumes-actions.inc deleted file mode 100644 index 000aa6161..000000000 --- a/api-ref/source/v2/volumes-v2-volumes-actions.inc +++ /dev/null @@ -1,406 +0,0 @@ -.. -*- rst -*- - -Volume actions (volumes, action) -================================ - -Extends the size of, resets statuses for, sets image metadata for, -and removes image metadata from a volume. Attaches a volume to a -server, detaches a volume from a server, and removes a volume from -Block Storage management without actually removing the back-end -storage object associated with it. - - -Extend volume size -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Extends the size of a volume to a requested size, in gibibytes (GiB). Specify the ``os-extend`` action in the request body. - -Preconditions - -- Volume status must be ``available``. - -- Sufficient amount of storage must exist to extend the volume. - -- The user quota must have sufficient volume storage. - -Troubleshooting - -- An ``error_extending`` volume status indicates that the request - failed. Ensure that you meet the preconditions and retry the - request. If the request fails again, investigate the storage back - end. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-extend: os-extend - - new_size: new_size - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-extend-request.json - :language: javascript - - - - - - - -Reset volume statuses -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Administrator only. Resets the status, attach status, and migration status for a volume. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_3 - - migration_status: migration_status - - os-reset_status: os-reset_status - - attach_status: attach_status - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-status-reset-request.json - :language: javascript - - - - - - - -Set image metadata for volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Sets the image metadata for a volume. Specify the ``os-set_image_metadata`` action in the request body. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-set_image_metadata: os-set_image_metadata - - metadata: metadata - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-image-metadata-set-request.json - :language: javascript - - - - - - - -Remove image metadata from volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Removes image metadata, by key, from a volume. Specify the ``os-unset_image_metadata`` action in the request body and the ``key`` for the metadata key and value pair that you want to remove. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-unset_image_metadata: os-unset_image_metadata - - key: key - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-image-metadata-unset-request.json - :language: javascript - - - - - - - -Attach volume to server -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Attaches a volume to a server. Specify the ``os-attach`` action in the request body. - -Preconditions - -- Volume status must be ``available``. - -- You should set ``instance_uuid`` or ``host_name``. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - instance_uuid: instance_uuid - - mountpoint: mountpoint - - host_name: host_name - - os-attach: os-attach - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-attach-request.json - :language: javascript - - - - - - - -Detach volume from a server -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Detaches a volume from a server. Specify the ``os-detach`` action in the request body. - -Preconditions - -- Volume status must be ``in-use``. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - attachment_id: attachment_id - - os-detach: os-detach - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-detach-request.json - :language: javascript - - - - - - - - - - -Unmanage volume -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. Specify the ``os-unmanage`` action in the request body. - -Preconditions - -- Volume status must be ``available``. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-unmanage: os-unmanage - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-unmanage-request.json - :language: javascript - - - - - - - -Force detach volume -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Forces a volume to detach. Specify the ``os-force_detach`` action in the request body. - -Rolls back an unsuccessful detach operation after you disconnect -the volume. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - connector: connector - - attachment_id: attachment_id - - os-force_detach: os-force_detach - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-force-detach-request.json - :language: javascript - - - - - - - -Retype volume -~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action - -Change type of existing volume. Specify the ``os-retype`` action in the request body. - -Change the volume type of existing volume, Cinder may migrate the volume to -proper volume host according to the new volume type. - -Policy defaults enable only users with the administrative role or the owner of -the volume to perform this operation. Cloud providers can change these -permissions through the policy.json file. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - new_type: new_type - - migration_policy: migration_policy - - os-retype: os-retype - - volume_id: volume_id_path - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-os-retype-request.json - :language: javascript - - - - - - - -Force delete volume -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Attempts force-delete of volume, regardless of state. Specify the ``os-force_delete`` action -in the request body. - - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-force_delete: os-force_delete - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-force-delete-request.json - :language: javascript - - - - - - - -Update volume bootable status -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/action - -Update the bootable status for a volume, mark it as a bootable volume. Specify the ``os-set_bootable`` action in the request body. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - os-set_bootable: os-set_bootable - - bootable: bootable - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-bootable-status-update-request.json - :language: javascript - diff --git a/api-ref/source/v2/volumes-v2-volumes.inc b/api-ref/source/v2/volumes-v2-volumes.inc deleted file mode 100644 index 2abfba974..000000000 --- a/api-ref/source/v2/volumes-v2-volumes.inc +++ /dev/null @@ -1,650 +0,0 @@ -.. -*- rst -*- - -Volumes (volumes) -================= - -A volume is a detachable block storage device similar to a USB hard -drive. You can attach a volume to one instance at a time. - -The ``snapshot_id`` and ``source_volid`` parameters specify the ID -of the snapshot or volume from which this volume originates. If the -volume was not created from a snapshot or source volume, these -values are null. - -When you create, list, update, or delete volumes, the possible -status values are: - -**Volume statuses** - -+------------------+--------------------------------------------------------+ -| Status | Description | -+------------------+--------------------------------------------------------+ -| creating | The volume is being created. | -+------------------+--------------------------------------------------------+ -| available | The volume is ready to attach to an instance. | -+------------------+--------------------------------------------------------+ -| attaching | The volume is attaching to an instance. | -+------------------+--------------------------------------------------------+ -| detaching | The volume is detaching from an instance. | -+------------------+--------------------------------------------------------+ -| in-use | The volume is attached to an instance. | -+------------------+--------------------------------------------------------+ -| maintenance | The volume is locked and being migrated. | -+------------------+--------------------------------------------------------+ -| deleting | The volume is being deleted. | -+------------------+--------------------------------------------------------+ -| awaiting-transfer| The volume is awaiting for transfer. | -+------------------+--------------------------------------------------------+ -| error | A volume creation error occurred. | -+------------------+--------------------------------------------------------+ -| error_deleting | A volume deletion error occurred. | -+------------------+--------------------------------------------------------+ -| backing-up | The volume is being backed up. | -+------------------+--------------------------------------------------------+ -| restoring-backup | A backup is being restored to the volume. | -+------------------+--------------------------------------------------------+ -| error_backing-up | A backup error occurred. | -+------------------+--------------------------------------------------------+ -| error_restoring | A backup restoration error occurred. | -+------------------+--------------------------------------------------------+ -| error_extending | An error occurred while attempting to extend a volume. | -+------------------+--------------------------------------------------------+ -| downloading | The volume is downloading an image. | -+------------------+--------------------------------------------------------+ -| uploading | The volume is being uploaded to an image. | -+------------------+--------------------------------------------------------+ -| retyping | The volume is changing type to another volume type. | -+------------------+--------------------------------------------------------+ -| extending | The volume is being extended. | -+------------------+--------------------------------------------------------+ - - -List volumes with details -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/volumes/detail - -Lists all Block Storage volumes, with details, that the tenant can access. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - os-vol-host-attr:host: os-vol-host-attr:host - - encrypted: encrypted - - updated_at: updated_at - - os-volume-replication:extended_status: os-volume-replication:extended_status - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - consistencygroup_id: consistencygroup_id - - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - - name: name - - bootable: bootable_response - - created_at: created_at - - os-volume-replication:driver_data: os-volume-replication:driver_data - - volumes: volumes - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-detailed-response.json - :language: javascript - - - - -Create volume -~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes - -Creates a volume. - -To create a bootable volume, include the UUID of the image from -which you want to create the volume in the ``imageRef`` attribute -in the request body. - -Preconditions - -- You must have enough volume storage quota remaining to create a - volume of size requested. - -Asynchronous Postconditions - -- With correct permissions, you can see the volume status as - ``available`` through API calls. - -- With correct access, you can see the created volume in the storage - system that OpenStack Block Storage manages. - -Troubleshooting - -- If volume status remains ``creating`` or shows another error - status, the request failed. Ensure you meet the preconditions - then investigate the storage back end. - -- Volume is not created in the storage system that OpenStack Block - Storage manages. - -- The storage node needs enough free storage space to match the size - of the volume creation request. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - size: size - - description: description_9 - - imageRef: imageRef - - multiattach: multiattach - - availability_zone: availability_zone - - source_volid: source_volid - - name: name_13 - - volume: volume - - consistencygroup_id: consistencygroup_id_1 - - volume_type: volume_type_2 - - snapshot_id: snapshot_id - - OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints - - source_replica: source_replica - - metadata: metadata_2 - - tenant_id: tenant_id - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable_response - - created_at: created_at - - volume_type: volume_type - - - - - -List volumes -~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/volumes - -Lists summary information for all Block Storage volumes that the tenant can access. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - sort: sort - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volumes: volumes - - id: id - - links: links - - name: name - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-response.json - :language: javascript - - - - -Show volume details -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/volumes/{volume_id} - -Shows details for a volume. - -Preconditions - -- The volume must exist. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - os-vol-host-attr:host: os-vol-host-attr:host - - encrypted: encrypted - - updated_at: updated_at - - os-volume-replication:extended_status: os-volume-replication:extended_status - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - - name: name - - bootable: bootable_response - - created_at: created_at - - os-volume-replication:driver_data: os-volume-replication:driver_data - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-show-response.json - :language: javascript - - - - -Update volume -~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/volumes/{volume_id} - -Updates a volume. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume: volume - - description: description - - name: name - - metadata: metadata_2 - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata_3 - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable_response - - created_at: created_at - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-update-response.json - :language: javascript - - - - -Delete volume -~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/volumes/{volume_id} - -Deletes a volume. - -Preconditions - -- Volume status must be ``available``, ``in-use``, ``error``, or - ``error_restoring``. - -- You cannot already have a snapshot of the volume. - -- You cannot delete a volume that is in a migration. - -Asynchronous Postconditions - -- The volume is deleted in volume index. - -- The volume managed by OpenStack Block Storage is deleted in - storage node. - -Troubleshooting - -- If volume status remains in ``deleting`` or becomes - ``error_deleting`` the request failed. Ensure you meet the - preconditions then investigate the storage back end. - -- The volume managed by OpenStack Block Storage is not deleted from - the storage system. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - cascade: cascade - - - - - - -Create volume metadata -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v2/{tenant_id}/volumes/{volume_id}/metadata - -Creates or replaces metadata for a volume. Does not modify items that are not -in the request. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-metadata-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-create-response.json - :language: javascript - - - -Show volume metadata -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/volumes/{volume_id}/metadata - -Shows metadata for a volume. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-show-response.json - :language: javascript - - - - -Update volume metadata -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v2/{tenant_id}/volumes/{volume_id}/metadata - -Replaces all the volume's metadata with the key-value pairs in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - tenant_id: tenant_id - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-metadata-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-update-response.json - :language: javascript - - - -Show volume metadata for a specific key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v2/{tenant_id}/volumes/{volume_id}/metadata/{key} - -Shows metadata for a volume for a specific key. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - key: key_2 - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-show-response.json - :language: javascript - - - - -Delete volume metadata -~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v2/{tenant_id}/volumes/{volume_id}/metadata/{key} - -Deletes metadata for a volume. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - tenant_id: tenant_id - - volume_id: volume_id_path - - key: key_1 diff --git a/api-ref/source/v3/api-versions.inc b/api-ref/source/v3/api-versions.inc deleted file mode 100644 index 41ec0e8a0..000000000 --- a/api-ref/source/v3/api-versions.inc +++ /dev/null @@ -1,26 +0,0 @@ -.. -*- rst -*- - -List All Api Versions -===================== - -.. rest_method:: GET / - -Lists information for all Block Storage API versions. - - -Normal response codes: 300 - -Error response codes: computeFault(400, 500), serviceUnavailable(503), badRequest(400), -unauthorized(401), forbidden(403), badMethod(405), itemNotFound(404), conflict(409) - -Request -~~~~~~~ - -Response -~~~~~~~~ - -**Example List Api Versions: JSON request** - - -.. literalinclude:: ./samples/versions-response.json - :language: javascript diff --git a/api-ref/source/v3/attachments.inc b/api-ref/source/v3/attachments.inc deleted file mode 100644 index bcc5dd4c5..000000000 --- a/api-ref/source/v3/attachments.inc +++ /dev/null @@ -1,253 +0,0 @@ -.. -*- rst -*- - -Attachments -=========== - -Lists all, lists all with details, shows details for, creates, and -deletes attachment. - - -Delete attachment -~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/attachments/{attachment_id} - -Deletes an attachment. - -Normal response codes: 200 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - attachment_id: attachment_id_1 - - -Show attachment details -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/attachments/{attachment_id} - -Shows details for an attachment. - - -Normal response codes: 200 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - attachment_id: attachment_id_1 - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_9 - - detached_at: detached_at - - connection_info: connection_info - - attached_at: attached_at - - attach_mode: attach_mode - - instance: instance_uuid - - volume_id: volume_id_7 - - id: attachment_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/attachment-show-response.json - :language: javascript - - -List attachments with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/attachments/detail - -Lists all attachments with details, since v3.31 if non-admin -users specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_9 - - detached_at: detached_at - - connection_info: connection_info - - attached_at: attached_at - - attach_mode: attach_mode - - instance: instance_uuid - - volume_id: volume_id_7 - - id: attachment_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/attachment-list-detailed-response.json - :language: javascript - -List attachments -~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/attachments - -Lists all attachments, since v3.31 if non-admin users -specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_9 - - instance: instance_uuid - - volume_id: volume_id_7 - - id: attachment_id_2 - - -Response Example ----------------- - -.. literalinclude:: ./samples/attachment-list-response.json - :language: javascript - - -Create attachment -~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/attachments - -Creates an attachment. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - instance_uuid: instance_uuid_1 - - connector: connector - - volume_uuid: volume_id_7 - -Request Example ---------------- - -.. literalinclude:: ./samples/attachment-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_9 - - detached_at: detached_at - - connection_info: connection_info - - attached_at: attached_at - - attach_mode: attach_mode - - instance: instance_uuid - - volume_id: volume_id_7 - - id: attachment_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/attachment-create-response.json - :language: javascript - - -Update an attachment -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/attachments/{attachment_id} - -Update a reserved attachment record with connector information -and set up the appropriate connection_info from the driver. - -Normal response codes: 200 -Error response codes: badRequest(400), itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - attachment_id: attachment_id_1 - - connector: connector_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/attachment-update-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_9 - - detached_at: detached_at - - connection_info: connection_info - - attached_at: attached_at - - attach_mode: attach_mode - - instance: instance_uuid - - volume_id: volume_id_7 - - id: attachment_id_2 - -Response Example ----------------- - -.. literalinclude:: ./samples/attachment-update-response.json - :language: javascript diff --git a/api-ref/source/v3/capabilities-v3.inc b/api-ref/source/v3/capabilities-v3.inc deleted file mode 100644 index 19a87155a..000000000 --- a/api-ref/source/v3/capabilities-v3.inc +++ /dev/null @@ -1,49 +0,0 @@ -.. -*- rst -*- - -Capabilities for storage back ends (capabilities) -================================================= - -Shows capabilities for a storage back end. - - -Show all back-end capabilities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/capabilities/{hostname} - -Shows capabilities for a storage back end on the host. -The ``hostname`` takes the form of ``hostname@volume_backend_name``. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - hostname: hostname - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - pool_name: pool_name - - description: description - - volume_backend_name: volume_backend_name - - namespace: namespace - - visibility: visibility - - driver_version: driver_version - - vendor_name: vendor_name - - properties: properties - - storage_protocol: storage_protocol - - replication_targets: replication_targets - - display_name: display_name - -Response Example ----------------- - -.. literalinclude:: ./samples/backend-capabilities-response.json - :language: javascript diff --git a/api-ref/source/v3/consistencygroups-v3.inc b/api-ref/source/v3/consistencygroups-v3.inc deleted file mode 100644 index 3d1094ee2..000000000 --- a/api-ref/source/v3/consistencygroups-v3.inc +++ /dev/null @@ -1,267 +0,0 @@ -.. -*- rst -*- - -Consistency groups -================== - -Consistency groups enable you to create snapshots at the exact same -point in time from multiple volumes. For example, a database might -place its tables, logs, and configuration on separate volumes. To -restore this database from a previous point in time, it makes sense -to restore the logs, tables, and configuration together from the -exact same point in time. - -Use the ``policy.json`` file to grant permissions for these actions -to limit roles. - - -List project's consistency groups -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/consistencygroups - -Lists consistency groups. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-groups-list-response.json - :language: javascript - - -Create a consistency group -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/consistencygroups - -Creates a consistency group. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - description: description_6 - - availability_zone: availability_zone - - volume_types: volume_types_2 - - name: name_15 - -Response --------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description_11 - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - name: name_15 - - id: consistencygroup_id_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-create-request.json - :language: javascript - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-group-create-response.json - :language: javascript - -Show a consistency group's details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/consistencygroups/{consistencygroup_id} - -Shows details for a consistency group. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - consistencygroup_id: consistencygroup_id - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-group-show-response.json - :language: javascript - - -Create a consistency group from source -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/consistencygroups/create_from_src - -Creates a consistency group from source. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - user_id: user_id - - description: description - - cgsnapshot_id: cgsnapshot_id - - source_cgid: source_cgid - - project_id: project_id - - name: name - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-create-from-src-request.json - :language: javascript - - -Delete a consistency group -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/consistencygroups/{consistencygroup_id}/delete - -Deletes a consistency group. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - force: force - - project_id: project_id_path - - consistencygroup_id: consistencygroup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-delete-request.json - :language: javascript - - -List consistency groups and details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/consistencygroups/detail - -Lists consistency groups with details. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_1 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/consistency-groups-list-detailed-response.json - :language: javascript - - -Update a consistency group -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/consistencygroups/{consistencygroup_id}/update - -Updates a consistency group. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - remove_volumes: remove_volumes - - description: description - - add_volumes: add_volumes - - name: name - - project_id: project_id_path - - consistencygroup_id: consistencygroup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/consistency-group-update-request.json - :language: javascript diff --git a/api-ref/source/v3/ext-backups-actions-v3.inc b/api-ref/source/v3/ext-backups-actions-v3.inc deleted file mode 100644 index 0c3f35d4c..000000000 --- a/api-ref/source/v3/ext-backups-actions-v3.inc +++ /dev/null @@ -1,67 +0,0 @@ -.. -*- rst -*- - -Backup actions (backups, action) -================================ - -Force-deletes a backup and reset status for a backup. - - -Force-delete a backup -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action - -Force-deletes a backup. Specify the ``os-force_delete`` action in the request body. - -This operations deletes the backup and any backup data. - -The backup driver returns the ``405`` status code if it does not -support this operation. - -Normal response codes: 202 -Error response codes: itemNotFound(404), badMethod(405) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-force_delete: os-force_delete - - project_id: project_id_path - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-force-delete-request.json - :language: javascript - - - -Reset a backup's status -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action - -Reset a backup's status. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_10 - - os-reset_status: os-reset_status - - project_id: project_id_path - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-reset-status-request.json - :language: javascript - diff --git a/api-ref/source/v3/ext-backups.inc b/api-ref/source/v3/ext-backups.inc deleted file mode 100644 index 1c0ce302f..000000000 --- a/api-ref/source/v3/ext-backups.inc +++ /dev/null @@ -1,336 +0,0 @@ -.. -*- rst -*- - -Backups (backups) -================= - -A backup is a full copy of a volume stored in an external service. -The service can be configured. The only supported service is Object -Storage. A backup can subsequently be restored from the external -service to either the same volume that the backup was originally -taken from or to a new volume. Backup and restore operations can -only be carried out on volumes that are in an unattached and -available state. - -When you create, list, or delete backups, these status values are -possible: - -**Backup statuses** - -+-----------------+---------------------------------------------+ -| Status | Description | -+-----------------+---------------------------------------------+ -| creating | The backup is being created. | -+-----------------+---------------------------------------------+ -| available | The backup is ready to restore to a volume. | -+-----------------+---------------------------------------------+ -| deleting | The backup is being deleted. | -+-----------------+---------------------------------------------+ -| error | A backup error occurred. | -+-----------------+---------------------------------------------+ -| restoring | The backup is being restored to a volume. | -+-----------------+---------------------------------------------+ -| error_restoring | A backup restoration error occurred. | -+-----------------+---------------------------------------------+ - - -If an error occurs, you can find more information about the error -in the ``fail_reason`` field for the backup. - - -List backups with detail -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/backups/detail - -Lists Block Storage backups, with details, to which the project has access, -since v3.31 if non-admin users specify invalid filters in the url, -API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_4 - - object_count: object_count - - fail_reason: fail_reason - - description: description - - links: links - - availability_zone: availability_zone - - created_at: created_at - - updated_at: updated_at - - name: name - - has_dependent_backups: has_dependent_backups - - volume_id: volume_id - - container: container - - backups: backups - - size: size - - id: id - - is_incremental: is_incremental - - data_timestamp: data_timestamp - - snapshot_id: snapshot_id_2 - - os-backup-project-attr:project_id: os-backup-project-attr:project_id - -Response Example ----------------- - -.. literalinclude:: ./samples/backups-list-detailed-response.json - :language: javascript - - -Show backup detail -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/backups/{backup_id} - -Shows details for a backup. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - backup_id: backup_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_4 - - object_count: object_count - - container: container - - description: description - - links: links - - availability_zone: availability_zone - - created_at: created_at - - updated_at: updated_at - - name: name - - has_dependent_backups: has_dependent_backups - - volume_id: volume_id - - fail_reason: fail_reason - - size: size - - backup: backup - - id: id - - is_incremental: is_incremental - - data_timestamp: data_timestamp - - snapshot_id: snapshot_id_2 - - os-backup-project-attr:project_id: os-backup-project-attr:project_id - -Response Example ----------------- - -.. literalinclude:: ./samples/backup-show-response.json - :language: javascript - - -Delete a backup -~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/backups/{backup_id} - -Deletes a backup. - -Normal response codes: 202, -Error response codes: Bad Request(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - backup_id: backup_id - - -Restore a backup -~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/restore - -Restores a Block Storage backup to an existing or new Block Storage volume. - -You must specify either the UUID or name of the volume. If you -specify both the UUID and name, the UUID takes priority. - -Normal response codes: 202, -Error response codes: Bad Request(400), Request Entity Too Large(413) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - restore: restore - - name: name - - volume_id: volume_id - - project_id: project_id_path - - backup_id: backup_id - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-restore-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - restore: restore - - backup_id: backup_id - - volume_id: volume_id - - volume_name: volume_name - -Create a backup -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/backups - -Creates a Block Storage backup from a volume. - -Normal response codes: 202, -Error response codes: Bad Request(400), Internal Server Error(500) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - container: container - - description: description - - incremental: incremental - - volume_id: volume_id - - force: force - - backup: backup - - name: name_optional - - snapshot_id: snapshot_id_2 - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - backup: backup - - id: id - - links: links - - name: name - - -Update a backup -~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/backups/{backup_id} - -Update a Block Storage backup. This API is available since v3.9. - -Normal response codes: 202, -Error response codes: Bad Request(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - backup_id: backup_id - - backup: backup - - description: description - - name: name_optional - - -Request Example ---------------- - -.. literalinclude:: ./samples/backup-update-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - backup: backup - - id: id - - links: links - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/backup-update-response.json - :language: javascript - - -List backups for project -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/backups - -Lists Block Storage backups to which the project has access, -since v3.31 if non-admin users specify invalid filters in the -url, API will return bad request. - -Normal response codes: 200 -Error response codes: badRequest(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - backups: backups - - id: id - - links: links - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/backups-list-response.json - :language: javascript diff --git a/api-ref/source/v3/group-snapshots.inc b/api-ref/source/v3/group-snapshots.inc deleted file mode 100644 index 0b8ad170b..000000000 --- a/api-ref/source/v3/group-snapshots.inc +++ /dev/null @@ -1,220 +0,0 @@ -.. -*- rst -*- - -Group snapshots -=============== - -Lists all, lists all with details, shows details for, creates, and -deletes group snapshots. - - -Delete group snapshot -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/group_snapshots/{group_snapshot_id} - -Deletes a group snapshot. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - group_snapshot_id: group_snapshot_id - - -Show group snapshot details -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_snapshots/{group_snapshot_id} - -Shows details for a group snapshot. - - -Normal response codes: 200 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - group_snapshot_id: group_snapshot_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_7 - - description: description - - created_at: created_at - - group_id: group_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/group-snapshots-show-response.json - :language: javascript - - -List group snapshots with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_snapshots/detail - -Lists all group snapshots with details, since v3.31 if non-admin -users specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key_group_snapshot - - sort_dir: sort_dir_group_snapshot - - limit: limit_group_snapshot - - offset: offset_group_snapshot - - marker: marker_group_snapshot - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_7 - - description: description - - created_at: created_at - - group_id: group_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/group-snapshots-list-detailed-response.json - :language: javascript - -List group snapshots -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_snapshots - -Lists all group snapshots, since v3.31 if non-admin users -specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key_group_snapshot - - sort_dir: sort_dir_group_snapshot - - limit: limit_group_snapshot - - offset: offset_group_snapshot - - marker: marker_group_snapshot - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/group-snapshots-list-response.json - :language: javascript - - -Create group snapshot -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/group_snapshots - -Creates a group snapshot. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - description: description - - group_id: group_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-snapshots-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_7 - - description: description - - created_at: created_at - - group_id: group_id - - id: id - - name: name - - -Reset group snapshot status -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/group_snapshots/{group_snapshot_id}/action - -Resets the status for a group snapshot. Specifies the ``reset_status`` action in the request body. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - reset_status: reset_status - - status: status - - project_id: project_id - - group_snapshot_id: group_snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/group-snapshot-reset-status-request.json - :language: javascript diff --git a/api-ref/source/v3/group-type-specs.inc b/api-ref/source/v3/group-type-specs.inc deleted file mode 100644 index b284ffcfb..000000000 --- a/api-ref/source/v3/group-type-specs.inc +++ /dev/null @@ -1,191 +0,0 @@ -.. -*- rst -*- - -Group type specs -================ - -Create group specs for a group type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/group_types/{group_type_id}/group_specs - -Create group specs for a group type, if the specification key already exists in group specs, -this API will update the specification as well. - - -Normal response codes: 202 -Error response codes: badRequest(400), forbidden(403), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_specs: group_specs_2 - - group_type_id: group_type_id_1 - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-type-specs-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - group_specs: group_specs_2 - - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-specs-create-response.json - :language: javascript - - -List type specs -================ - -List group specs for a group type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}/group_specs - -List all the group specs for a group type, - - -Normal response codes: 200 -Error response codes: itemNotFound(404), forbidden(403) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id_1 - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - group_specs: group_specs_2 - - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-specs-list-response.json - :language: javascript - - -Show type spec -============== - -Show one specific group spec for a group type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} - -Show a group spec for a group type, - - -Normal response codes: 200 -Error response codes: itemNotFound(404), forbidden(403) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id_1 - - project_id: project_id_path - - spec_id: spec_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - spec: spec_value - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-specs-show-response.json - :language: javascript - - -Update type spec -================ - -Update one specific group spec for a group type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} - -Update a group spec for a group type, - - -Normal response codes: 202 -Error response codes: itemNotFound(404), forbidden(403) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id_1 - - project_id: project_id_path - - spec_id: spec_id - - spec: spec_value - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - spec: spec_value - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-specs-update-response.json - :language: javascript - - -Delete type spec -================ - -Delete one specific group spec for a group type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} - -Delete a group spec for a group type, - - -Normal response codes: 202 -Error response codes: itemNotFound(404), forbidden(403) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id_1 - - project_id: project_id_path - - spec_id: spec_id diff --git a/api-ref/source/v3/group-types.inc b/api-ref/source/v3/group-types.inc deleted file mode 100644 index 6cebbac96..000000000 --- a/api-ref/source/v3/group-types.inc +++ /dev/null @@ -1,202 +0,0 @@ -.. -*- rst -*- - -Group types -=========== - - -Update group type -~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/group_types/{group_type_id} - -Updates a group type. - -To create a generic volume group, you must specify a group type. - -Normal response codes: 200 -Error response codes: badRequest(400), forbidden(403), itemNotFound(404), -conflict(409), computeFault(500) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type: group_type - - group_type_id: group_type_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-type-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - group_specs: group_specs - - description: description - - group_type: group_type - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-show-response.json - :language: javascript - - -Show group type details -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_types/{group_type_id} - -Shows details for a group type. - - -Normal response codes: 200 -Error response codes: itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - group_specs: group_specs - - description: description - - group_type: group_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-show-response.json - :language: javascript - - -Delete group type -~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/group_types/{group_type_id} - -Deletes a group type. - -Normal response codes: 202 -Error response codes: badRequest(400), forbidden(403), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type_id: group_type_id - - project_id: project_id_path - - -List group types -~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/group_types - -Lists group types. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - group_types: group_types - - group_specs: group_specs - - name: name - - group_type: group_type - -Response Example ----------------- - -.. literalinclude:: ./samples/group-types-list-response.json - :language: javascript - - -Create group type -~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/group_types - -Creates a group type. - -To create a generic volume group, you must specify a group type. - - -Normal response codes: 202 -Error response codes: badRequest(400), forbidden(403), itemNotFound(404), -conflict(409) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - group_type: group_type - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - group_specs: group_specs - - description: description - - group_type: group_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/group-type-show-response.json - :language: javascript diff --git a/api-ref/source/v3/groups.inc b/api-ref/source/v3/groups.inc deleted file mode 100644 index a83b8ab0b..000000000 --- a/api-ref/source/v3/groups.inc +++ /dev/null @@ -1,288 +0,0 @@ -.. -*- rst -*- - -Generic volume groups -===================== - -Generic volume groups enable you to create a group of volumes and -manage them together. - -How is generic volume groups different from consistency groups? -Currently consistency groups in cinder only support consistent group -snapshot. It cannot be extended easily to serve other purposes. A project -may want to put volumes used in the same application together in a group -so that it is easier to manage them together, and this group of volumes -may or may not support consistent group snapshot. Generic volume group -is introduced to solve this problem. By decoupling the tight relationship -between the group construct and the consistency concept, generic volume -groups can be extended to support other features in the future. - - -List groups -~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/groups - -Lists groups, since v3.31 if non-admin users specify -invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/groups-list-response.json - :language: javascript - - -Create group -~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/groups - -Creates a group. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - availability_zone: availability_zone - - group_type: group_type - - volume_types: volume_types - - name: name - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-create-request.json - :language: javascript - - -Show group details -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/groups/{group_id} - -Shows details for a group. - -Normal response codes: 200 -Error response codes: itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - group_id: group_id - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_8 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - group_type: group_type - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/group-show-response.json - :language: javascript - - -Create group from source -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/groups/action - -Creates a group from source. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - group_snapshot_id: group_snapshot_id - - source_group_id: source_group_id - - name: name - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/group-create-from-src-request.json - :language: javascript - - -Delete group -~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action - -Deletes a group. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - delete-volumes: delete-volumes - - project_id: project_id_path - - group_id: group_id - -Request Example ---------------- - -.. literalinclude:: ./samples/group-delete-request.json - :language: javascript - - -List groups with details -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/groups/detail - -Lists groups with details, since v3.31 if non-admin -users specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_8 - - description: description - - availability_zone: availability_zone - - created_at: created_at - - group_type: group_type - - volume_types: volume_types - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/groups-list-detailed-response.json - :language: javascript - - -Update group -~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/groups/{group_id} - -Updates a group. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - remove_volumes: remove_volumes - - description: description - - add_volumes: add_volumes - - name: name - - project_id: project_id_path - - group_id: group_id - -Request Example ---------------- - -.. literalinclude:: ./samples/group-update-request.json - :language: javascript - - -Reset group status -~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action - -Resets the status for a group. Specify the ``reset_status`` action in the request body. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - reset_status: reset_status - - status: status - - project_id: project_id - - group_id: group_id - -Request Example ---------------- - -.. literalinclude:: ./samples/group-reset-status-request.json - :language: javascript diff --git a/api-ref/source/v3/hosts.inc b/api-ref/source/v3/hosts.inc deleted file mode 100644 index 0193a134e..000000000 --- a/api-ref/source/v3/hosts.inc +++ /dev/null @@ -1,81 +0,0 @@ -.. -*- rst -*- - -Hosts extension (os-hosts) -==================================== - -Administrators only, depending on policy settings. - -Lists, shows hosts. - - -List all hosts for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{admin_project_id}/os-hosts - -Lists all hosts summary info that is not disabled. - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_project_id: admin_project_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - service-status: service_status - - service: host_service - - zone: availability_zone_3 - - service-state: service_state - - host_name: hostname - - last-update: updated_at - -Response Example ----------------- - -.. literalinclude:: ./samples/hosts-list-response.json - :language: javascript - -Show Host Details for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{admin_project_id}/os-hosts/{host_name} - -Shows details for a host. - -Normal response codes: 200 - -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_project_id: admin_project_id - - host_name: hostname - -Response --------- - -.. rest_parameters:: parameters.yaml - - - volume_count: total_count - - total_volume_gb: totalGigabytesUsed - - total_snapshot_gb: totalSnapshotsUsed - - project: admin_project_id - - host: host - - snapshot_count: totalSnapshotsUsed - -**Example Show Host Details** - -.. literalinclude:: ./samples/hosts-get-response.json - :language: javascript diff --git a/api-ref/source/v3/index.rst b/api-ref/source/v3/index.rst deleted file mode 100644 index a4b524bb4..000000000 --- a/api-ref/source/v3/index.rst +++ /dev/null @@ -1,50 +0,0 @@ -:tocdepth: 2 - -============================== -Block Storage API V3 (CURRENT) -============================== - -.. rest_expand_all:: - -.. First thing we want to see is the version discovery document. -.. include:: api-versions.inc -.. include:: volumes-v3-versions.inc - -.. Next top-level thing could be listing extensions available on this endpoint. -.. include:: volumes-v3-extensions.inc - -.. To create a volume, I might need a volume type, so list those next. -.. include:: volumes-v3-types.inc -.. include:: volume-type-access.inc - -.. Now my primary focus is on volumes and what I can do with them. -.. include:: volumes-v3-volumes.inc -.. include:: volumes-v3-volumes-actions.inc - -.. List the other random volume APIs in just alphabetical order. -.. include:: os-vol-image-meta-v3.inc -.. include:: volume-manage.inc -.. include:: volumes-v3-snapshots.inc -.. include:: snapshot-manage.inc -.. include:: os-vol-transfer-v3.inc - -.. Now the other random things in alphabetical order. -.. include:: attachments.inc -.. include:: os-vol-pool-v3.inc -.. include:: ext-backups.inc -.. include:: ext-backups-actions-v3.inc -.. include:: capabilities-v3.inc -.. include:: consistencygroups-v3.inc -.. include:: os-cgsnapshots-v3.inc -.. include:: groups.inc -.. include:: group-snapshots.inc -.. include:: group-types.inc -.. include:: group-type-specs.inc -.. include:: hosts.inc -.. include:: limits.inc -.. include:: messages.inc -.. include:: resource-filters.inc -.. include:: qos-specs-v3-qos-specs.inc -.. quota-sets should arguably live closer to limits, but that would mess up - our nice alphabetical ordering -.. include:: quota-sets.inc diff --git a/api-ref/source/v3/limits.inc b/api-ref/source/v3/limits.inc deleted file mode 100644 index 05851b0ba..000000000 --- a/api-ref/source/v3/limits.inc +++ /dev/null @@ -1,56 +0,0 @@ -.. -*- rst -*- - -Limits (limits) -=============== - -Shows absolute limits for a project. - -An absolute limit value of ``-1`` indicates that the absolute limit -for the item is infinite. - - -Show absolute limits for project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/limits - -Shows absolute limits for a project. - -An absolute limit value of ``-1`` indicates that the absolute limit -for the item is infinite. - - -Normal response codes: 200 -Error response codes:203, - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - totalSnapshotsUsed: totalSnapshotsUsed - - maxTotalBackups: maxTotalBackups - - maxTotalVolumeGigabytes: maxTotalVolumeGigabytes - - limits: limits - - maxTotalSnapshots: maxTotalSnapshots - - maxTotalBackupGigabytes: maxTotalBackupGigabytes - - totalBackupGigabytesUsed: totalBackupGigabytesUsed - - maxTotalVolumes: maxTotalVolumes - - totalVolumesUsed: totalVolumesUsed - - rate: rate - - totalBackupsUsed: totalBackupsUsed - - totalGigabytesUsed: totalGigabytesUsed - - absolute: absolute - -Response Example ----------------- - -.. literalinclude:: ./samples/limits-show-response.json - :language: javascript diff --git a/api-ref/source/v3/messages.inc b/api-ref/source/v3/messages.inc deleted file mode 100644 index 74a6ed653..000000000 --- a/api-ref/source/v3/messages.inc +++ /dev/null @@ -1,118 +0,0 @@ -.. -*- rst -*- - -Messages -======== - -Lists all, shows, and deletes messages. - - -Delete message -~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/messages/{message_id} - -Deletes a message. - -Normal response codes: 202 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - message_id: message_id - - -Show message details -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/messages/{message_id} - -Shows details for a message. - - -Normal response codes: 200 -Error response codes: badRequest(400), itemNotFound(404) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - message_id: message_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - request_id: request_id - - links: links_5 - - message_level: message_level - - event_id: event_id - - created_at: created_at - - guaranteed_until: guaranteed_until - - resource_uuid: resource_uuid - - id: id_8 - - resource_type: resource_type - - user_message: user_message - -Response Example ----------------- - -.. literalinclude:: ./samples/messages-show-response.json - :language: javascript - - -List messages -~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/messages - -Lists all messages, since v3.31 if non-admin users -specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - request_id: request_id - - links: links_5 - - message_level: message_level - - event_id: event_id - - created_at: created_at - - guaranteed_until: guaranteed_until - - resource_uuid: resource_uuid - - id: id_8 - - resource_type: resource_type - - user_message: user_message - -Response Example ----------------- - -.. literalinclude:: ./samples/messages-list-response.json - :language: javascript diff --git a/api-ref/source/v3/os-cgsnapshots-v3.inc b/api-ref/source/v3/os-cgsnapshots-v3.inc deleted file mode 100644 index a665aa940..000000000 --- a/api-ref/source/v3/os-cgsnapshots-v3.inc +++ /dev/null @@ -1,178 +0,0 @@ -.. -*- rst -*- - -Consistency group snapshots -=========================== - -Lists all, lists all with details, shows details for, creates, and -deletes consistency group snapshots. - - -Delete a consistency group snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/cgsnapshots/{cgsnapshot_id} - -Deletes a consistency group snapshot. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - cgsnapshot_id: cgsnapshot_id - - -Show consistency group snapshot detail -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/cgsnapshots/{cgsnapshot_id} - -Shows details for a consistency group snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - cgsnapshot_id: cgsnapshot_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-show-response.json - :language: javascript - - -List all consistency group snapshots with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/cgsnapshots/detail - -Lists all consistency group snapshots with details. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json - :language: javascript - -List all consistency group snapshots -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/cgsnapshots - -Lists all consistency group snapshots. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - id: id - - name: name - - - -Response Example ----------------- - -.. literalinclude:: ./samples/cgsnapshots-list-response.json - :language: javascript - - - - -Create a consistency group snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/cgsnapshots - -Creates a consistency group snapshot. - -Normal response codes: 202 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/cgsnapshots-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status - - description: description - - created_at: created_at - - consistencygroup_id: consistencygroup_id - - id: id - - name: name diff --git a/api-ref/source/v3/os-vol-image-meta-v3.inc b/api-ref/source/v3/os-vol-image-meta-v3.inc deleted file mode 100644 index b3d83abdd..000000000 --- a/api-ref/source/v3/os-vol-image-meta-v3.inc +++ /dev/null @@ -1,45 +0,0 @@ -.. -*- rst -*- - -Volume image metadata extension (os-vol-image-meta) -=================================================== - -Shows image metadata that is associated with a volume. - - -Show image metadata for a volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/os-vol-image-meta - -Shows image metadata for a volume. - -When the request is made, the caller must specify a reference to an -existing storage volume in the ``ref`` element. Each storage driver -may interpret the existing storage volume reference differently but -should accept a reference structure containing either a ``source- -volume-id`` or ``source-volume-name`` element, if possible. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - availability_zone: availability_zone - - bootable: bootable - - volume_type: volume_type - - name: name - - volume: volume - - host: host - - ref: ref - - metadata: metadata - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/image-metadata-show-request.json - :language: javascript diff --git a/api-ref/source/v3/os-vol-pool-v3.inc b/api-ref/source/v3/os-vol-pool-v3.inc deleted file mode 100644 index 87324f9c9..000000000 --- a/api-ref/source/v3/os-vol-pool-v3.inc +++ /dev/null @@ -1,50 +0,0 @@ -.. -*- rst -*- - -Back-end storage pools -====================== - -Administrator only. Lists all back-end storage pools that are known -to the scheduler service. - - -List all back-end storage pools -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/scheduler-stats/get_pools - -Lists all back-end storage pools, since v3.31 if non-admin users -specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - detail: detail - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - updated: updated - - QoS_support: QoS_support - - name: name - - total_capacity: total_capacity - - volume_backend_name: volume_backend_name - - capabilities: capabilities - - free_capacity: free_capacity - - driver_version: driver_version - - reserved_percentage: reserved_percentage - - storage_protocol: storage_protocol - -Response Example ----------------- - -.. literalinclude:: ./samples/pools-list-detailed-response.json - :language: javascript diff --git a/api-ref/source/v3/os-vol-transfer-v3.inc b/api-ref/source/v3/os-vol-transfer-v3.inc deleted file mode 100644 index 778bf247d..000000000 --- a/api-ref/source/v3/os-vol-transfer-v3.inc +++ /dev/null @@ -1,216 +0,0 @@ -.. -*- rst -*- - -Volume transfer -=============== - -Transfers a volume from one user to another user. - - -Accept a volume transfer -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/os-volume-transfer/{transfer_id}/accept - -Accepts a volume transfer. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - auth_key: auth_key - - transfer_id: transfer_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-transfer-accept-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Create a volume transfer -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/os-volume-transfer - -Creates a volume transfer. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - name: name - - volume_id: volume_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-transfer-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - auth_key: auth_key - - links: links - - created_at: created_at - - volume_id: volume_id - - id: id - - name: name - - -List volume transfers for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/os-volume-transfer - -Lists volume transfers. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfers-list-response.json - :language: javascript - - -Show volume transfer detail -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/os-volume-transfer/{transfer_id} - -Shows details for a volume transfer. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - transfer_id: transfer_id - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - created_at: created_at - - volume_id: volume_id - - id: id - - links: links - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfer-show-response.json - :language: javascript - - -Delete a volume transfer -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/os-volume-transfer/{transfer_id} - -Deletes a volume transfer. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - transfer_id: transfer_id - - project_id: project_id_path - - -List volume transfers and details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/os-volume-transfer/detail - -Lists volume transfers, with details. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - created_at: created_at - - volume_id: volume_id - - id: id - - links: links - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-transfers-list-detailed-response.json - :language: javascript diff --git a/api-ref/source/v3/parameters.yaml b/api-ref/source/v3/parameters.yaml deleted file mode 100644 index bdf65ee89..000000000 --- a/api-ref/source/v3/parameters.yaml +++ /dev/null @@ -1,2417 +0,0 @@ -# variables in header -x-openstack-request-id: - description: > - foo - in: header - required: false - type: string - -# variables in path -admin_project_id: - description: | - The UUID of the administrative project. - in: path - required: true - type: string -all_tenants: - description: | - Shows details for all projects. - in: path - required: false - type: string -attachment_id_1: - description: | - The ID of the attachment. - in: path - required: true - type: string -backup_id: - description: | - The UUID for a backup. - in: path - required: true - type: string -cascade: - description: | - Remove any snapshots along with the volume. Default=False. - in: path - required: false - type: boolean -cgsnapshot_id_1: - description: | - The ID of the consistency group snapshot. - in: path - required: false - type: string -consistencygroup_id_2: - description: | - The ID of the consistency group. - in: path - required: false - type: string -encryption_id: - description: | - The ID of the encryption type. - in: path - required: true - type: string -force_3: - description: | - To delete a QoS specification even if it is in- - use, set to ``true``. Default is ``false``. - in: path - required: false - type: boolean -group_id: - description: | - The ID of the group. - in: path - required: false - type: string -group_snapshot_id: - description: | - The ID of the group snapshot. - in: path - required: false - type: string -group_type_id: - description: | - The UUID for an existing group type. - in: path - required: false - type: string -group_type_id_1: - description: | - The UUID for an existing group type. - in: path - required: true - type: string -hostname: - description: | - The name of the host that hosts the storage back - end. - in: path - required: true - type: string -key_1: - description: | - The metadata key name for the metadata that you - want to remove. - in: path - required: true - type: string -key_2: - description: | - The metadata key name for the metadata that you - want to see. - in: path - required: true - type: string -key_3: - description: | - The metadata key name for the metadata that you - want to update. - in: path - required: true - type: string -project_id_path: - description: | - The UUID of the project in a multi-tenancy cloud. - in: path - required: true - type: string -qos_id: - description: | - The ID of the QoS specification. - in: path - required: true - type: string -quotas_project_id: - description: | - The UUID of the tenant in a multi-tenancy cloud. - in: path - required: true - type: string -resource: - description: | - Filter filters by resource name. - in: path - required: false - type: string -snapshot_id_path: - description: | - The UUID of the snapshot. - in: path - required: true - type: string -transfer_id: - description: | - The unique identifier for a volume transfer. - in: path - required: false - type: string -user_id_1: - description: | - The user ID. Specify in the URI as - ``user_id={user_id}``. - in: path - required: false - type: string -vol_type_id: - description: | - The UUID for an existing volume type. - in: path - required: true - type: string -volume_id_path: - description: | - The UUID of the volume. - in: path - required: true - type: string -volume_type: - description: | - The ID of Volume Type to be accessed by project. - in: path - required: false - type: string -volume_type_id: - description: | - The UUID for an existing volume type. - in: path - required: false - type: string - -# variables in query -action: - description: | - The action. Valid values are "set" or "unset." - in: query - required: true - type: string -all-tenants: - description: | - Shows details for all project. Admin only.. - in: query - required: false - type: string -bootable_query: - description: | - Filters results by bootable status. Default=None. - in: query - required: false - type: boolean -detail: - description: | - Indicates whether to show pool details or only - pool names in the response. Set to ``true`` to show pool details. - Set to ``false`` to show only pool names. Default is ``false``. - in: query - required: false - type: boolean -image-id: - description: | - Creates volume from image ID. Default=None. - in: query - required: false - type: string -limit: - description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. - in: query - required: false - type: integer -limit_group_snapshot: - description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. - in: query - required: false - type: integer - min_version: 3.29 -limit_group_snapshot: - description: | - Requests a page size of items. Returns a number - of items up to a limit value. Use the ``limit`` parameter to make - an initial limited request and use the ID of the last-seen item - from the response as the ``marker`` parameter value in a - subsequent limited request. - in: query - required: false - type: integer - min_version: 3.29 -marker: - description: | - The ID of the last-seen item. Use the ``limit`` - parameter to make an initial limited request and use the ID of the - last-seen item from the response as the ``marker`` parameter value - in a subsequent limited request. - in: query - required: false - type: string -marker_group_snapshot: - description: | - The ID of the last-seen item. Use the ``limit`` - parameter to make an initial limited request and use the ID of the - last-seen item from the response as the ``marker`` parameter value - in a subsequent limited request. - in: query - required: false - type: string - min_version: 3.29 -message_id: - description: | - The UUID of the message. - in: query - required: true - type: string -metadata_query: - description: | - Filters results by a metadata key and value pair. - Default=None. - in: query - required: true - type: object -migration_status_query: - description: | - Filters results by a migration status. Default=None. - Admin only. - in: query - required: false - type: string -name_volume: - description: | - Filters results by a name. Default=None. - in: query - required: false - type: string -offset: - description: | - Used in conjunction with ``limit`` to return a slice of items. ``offset`` - is where to start in the list. - in: query - required: false - type: integer -offset_group_snapshot: - description: | - Used in conjunction with ``limit`` to return a slice of items. ``offset`` - is where to start in the list. - in: query - required: false - type: integer - min_version: 3.29 -sort: - description: | - Comma-separated list of sort keys and optional - sort directions in the form of < key > [: < direction > ]. A valid - direction is ``asc`` (ascending) or ``desc`` (descending). - in: query - required: false - type: string -sort_dir: - description: | - Sorts by one or more sets of attribute and sort - direction combinations. If you omit the sort direction in a set, - default is ``desc``. - in: query - required: false - type: string -sort_dir_group_snapshot: - description: | - Sorts by one or more sets of attribute and sort - direction combinations. If you omit the sort direction in a set, - default is ``desc``. - in: query - required: false - type: string - min_version: 3.29 -sort_key: - description: | - Sorts by an attribute. A valid value is ``name``, - ``status``, ``container_format``, ``disk_format``, ``size``, - ``id``, ``created_at``, or ``updated_at``. Default is - ``created_at``. The API uses the natural sorting direction of the - ``sort_key`` attribute value. - in: query - required: false - type: string -sort_key_1: - description: | - Sorts by an image attribute. A valid value is - ``name``, ``status``, ``container_format``, ``disk_format``, - ``size``, ``id``, ``created_at``, or ``updated_at``. Default is - ``created_at``. The API uses the natural sorting direction of the - ``sort_key`` attribute value. - in: query - required: false - type: string -sort_key_group_snapshot: - description: | - Sorts by an attribute. A valid value is ``name``, - ``status``, ``group_id``, ``group_type_id``, ``size``, - ``id``, ``created_at``, or ``updated_at``. Default is - ``created_at``. The API uses the natural sorting direction of the - ``sort_key`` attribute value. - in: query - required: false - type: string - min_version: 3.29 -spec_id: - description: | - The id (key) of the group specification. - in: query - required: true - type: string -status_query: - description: | - Filters results by a status. Default=None. - in: query - required: false - type: boolean -usage: - description: | - Set to ``usage=true`` to show quota usage. - Default is ``false``. - in: query - required: false - type: boolean - -# variables in body -absolute: - description: | - An ``absolute`` limits object. - in: body - required: true - type: object -add_volumes: - description: | - One or more volume UUIDs, separated by commas, to - add to the volume group or consistency group. - in: body - required: false - type: string -alias: - description: | - The alias for the extension. For example, - "FOXNSOX", "os- availability-zone", "os-extended-quotas", "os- - share-unmanage" or "os-used-limits." - in: body - required: true - type: string -allocated: - description: | - Allocated data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -attach_mode: - description: | - The attach mode of attachment, read-only ('ro') or - read-and-write ('rw'), default is 'ro'. - in: body - required: false - type: string -attach_status: - description: | - The volume attach status. - in: body - required: false - type: string -attached_at: - description: | - The time when attachment is attached. - in: body - required: false - type: string -attachment_id: - description: | - The interface ID. - in: body - required: false - type: string -attachment_id_2: - description: | - The ID of attachment. - in: body - required: true - type: string -attachments: - description: | - Instance attachment information. If this volume - is attached to a server instance, the attachments list includes - the UUID of the attached server, an attachment UUID, the name of - the attached host, if any, the volume UUID, the device, and the - device UUID. Otherwise, this list is empty. - in: body - required: true - type: array -auth_key: - description: | - The authentication key for the volume transfer. - in: body - required: true - type: string -availability_zone: - description: | - The name of the availability zone. - in: body - required: false - type: string -availability_zone_1: - description: | - The availability zone. - in: body - required: false - type: string -availability_zone_2: - description: | - The availability zone. - in: body - required: true - type: string -availability_zone_3: - description: | - The availability zone name. - in: body - required: true - type: string -backup: - description: | - A ``backup`` object. - in: body - required: true - type: object -backup_gigabytes: - description: | - The size(GB) of backups that are allowed for each project. - in: body - required: true - type: integer -backups: - description: | - A list of ``backup`` objects. - in: body - required: true - type: array -backups_number: - description: | - The number of backups that are allowed for each project. - in: body - required: true - type: integer -bootable: - description: | - Enables or disables the bootable attribute. You - can boot an instance from a bootable volume. - in: body - required: true - type: boolean -bootable_response: - description: | - Enables or disables the bootable attribute. You - can boot an instance from a bootable volume. - in: body - required: true - type: string -capabilities: - description: | - The capabilities for the back end. The value is - either ``null`` or a string value that indicates the capabilities - for each pool. For example, ``total_capacity`` or ``QoS_support``. - in: body - required: true - type: object -cgsnapshot_id: - description: | - The UUID of the consistency group snapshot. - in: body - required: false - type: string -cinder_id: - description: | - The UUID of the snapshot in Cinder. - in: body - required: false - type: string -cipher: - description: | - The encryption algorithm or mode. For example, aes-xts-plain64. The default - value is None. - in: body - required: false - type: string -cluster_mutex: - description: | - The OpenStack Block Storage cluster where the resource resides. Optional - only if host field is provided. - in: body - required: false - type: string -connection_info: - description: | - The connection info used for server to connect the volume. - in: body - required: true - type: object -connector: - description: | - The ``connector`` object. - in: body - required: false - type: object -connector_1: - description: | - The ``connector`` object. The internal structure of connector depends on - the volume driver implementation. For details about the required elements - in the structure, see the documentation for the volume driver. - in: body - required: true - type: object -consistencygroup_id: - description: | - The UUID of the consistency group. - in: body - required: true - type: string -consistencygroup_id_1: - description: | - The UUID of the consistency group. - in: body - required: false - type: string -consumer: - description: | - The consumer type. - in: body - required: false - type: string -consumer_1: - description: | - The consumer type. - in: body - required: true - type: string -container: - description: | - The container name or null. - in: body - required: false - type: string -container_format: - description: | - Container format for the new image. Defualt is bare. - in: body - required: false - type: string -control_location: - description: | - Notional service where encryption is performed. Valid values are - "front-end" or "back-end". The default value is "front-end". - in: body - required: false - type: string -created_at: - description: | - The date and time when the resource was created. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. - in: body - required: true - type: string -created_at_1: - description: | - Date and time when the volume was created. - in: body - required: true - type: string -data_timestamp: - description: | - The time when the data on the volume was first saved. If it is - a backup from volume, it will be the same as ``created_at`` - for a backup. If it is a backup from a snapshot, it will be the - same as ``created_at`` for the snapshot. - in: body - required: true - type: string -delete-volumes: - description: | - If set to ``true``, allows deletion of a - group as well as all volumes in the group. - in: body - required: false - type: boolean -deleted: - description: | - The resource is deleted or not. - in: body - required: true - type: boolean -deleted_at: - description: | - The date and time when the resource was deleted. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``deleted_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -description: - description: | - The backup description or null. - in: body - required: false - type: string -description_1: - description: | - The consistency group snapshot description. - in: body - required: true - type: string -description_10: - description: | - The capabilities description. - in: body - required: true - type: string -description_11: - description: | - The consistency group description. - in: body - required: false - type: string -description_12: - description: | - The group type description. - in: body - required: false - type: string -description_13: - description: | - The group description. - in: body - required: false - type: string -description_14: - description: | - The group snapshot description. - in: body - required: false - type: string -description_15: - description: | - The volume type description. - in: body - required: false - type: string -description_2: - description: | - The description of the consistency group. - in: body - required: false - type: string -description_3: - description: | - The description of the consistency group. - in: body - required: true - type: string -description_4: - description: | - A description for the snapshot. Default is - ``None``. - in: body - required: false - type: string -description_5: - description: | - The volume description. - in: body - required: false - type: string -description_6: - description: | - The consistency group description. - in: body - required: true - type: string -description_7: - description: | - The extension description. - in: body - required: true - type: string -description_8: - description: | - A description for the snapshot. - in: body - required: true - type: string -description_9: - description: | - The volume description. - in: body - required: true - type: string -detached_at: - description: | - The time when attachment is detached. - in: body - required: false - type: string -disk_format: - description: | - Disk format for the new image. Default is raw. - in: body - required: false - type: string -display_name: - description: | - The name of volume backend capabilities. - in: body - required: true - type: string -driver_version: - description: | - The driver version. - in: body - required: true - type: string -encrypted: - description: | - If true, this volume is encrypted. - in: body - required: true - type: boolean -encryption: - description: | - The encryption information. - in: body - required: true - type: object -encryption_id_body: - description: | - The UUID of the encryption. - in: body - required: true - type: string -event_id: - description: | - The id of the event to this message, this id could - eventually be translated into ``user_message``. - in: body - required: true - type: string -extra_info: - description: | - More information about the resource. - in: body - required: false - type: string -extra_specs: - description: | - A set of key and value pairs that contains the - specifications for a volume type. - in: body - required: true - type: object -extra_specs_1: - description: | - A key and value pair that contains additional - specifications that are associated with the volume type. Examples - include capabilities, capacity, compression, and so on, depending - on the storage driver in use. - in: body - required: true - type: object -fail_reason: - description: | - If the backup failed, the reason for the failure. - Otherwise, null. - in: body - required: true - type: string -force: - description: | - Indicates whether to backup, even if the volume - is attached. Default is ``false``. - in: body - required: false - type: boolean -force_1: - description: | - Indicates whether to snapshot, even if the volume - is attached. Default is ``false``. - in: body - required: false - type: boolean -force_2: - description: | - If set to ``true``, forces deletion of a - consistency group that has a registered volume. - in: body - required: false - type: boolean -force_4: - description: | - Enables or disables upload of a volume that is - attached to an instance. Default=False. - in: body - required: false - type: boolean -free_capacity: - description: | - The amount of free capacity for the back-end - volume, in GBs. A valid value is a string, such as ``unknown``, or - an integer. - in: body - required: true - type: string -gigabytes: - description: | - The size(GB) of volumes and snapshots that are allowed for each project. - in: body - required: true - type: integer -gigabytes_for_type: - description: | - The size(GB) of volumes and snapshots that are allowed for each project - and the specifed volume type. - in: body - required: true - type: integer -group_id_1: - description: | - The ID of the group. - in: body - required: false - type: string -group_snapshot_id_1: - description: | - The ID of the group snapshot. - in: body - required: false - type: string -group_specs: - description: | - A set of key and value pairs that contains the - specifications for a group type. - in: body - required: false - type: object -group_specs_2: - description: | - A set of key and value pairs that contains the - specifications for a group type. - in: body - required: true - type: object -group_type: - description: | - A ``group_type`` object. - in: body - required: true - type: object -group_types: - description: | - The list of group types. - in: body - required: true - type: array -groups_number: - description: | - The number of groups that are allowed for each project. - in: body - required: true - type: integer -guaranteed_until: - description: | - The expire time of the message, this message could be - be deleted after this time. - in: body - required: false - type: string -has_dependent_backups: - description: | - If this value is ``true``, the backup depends on - other backups. - in: body - required: false - type: boolean -host: - description: | - The OpenStack Block Storage host where the - existing volume resides. - in: body - required: true - type: string -host_mutex: - description: | - The OpenStack Block Storage host where the existing resource resides. - Optional only if cluster field is provided. - in: body - required: false - type: string -host_name: - description: | - The name of the attaching host. - in: body - required: false - type: string -host_service: - description: | - The name of the service which is running on the host. - in: body - required: true - type: string -id: - description: | - The UUID of the volume transfer. - in: body - required: true - type: string -id_1: - description: | - The UUID of the backup. - in: body - required: true - type: string -id_2: - description: | - The UUID of the consistency group snapshot. - in: body - required: true - type: string -id_3: - description: | - The generated ID for the QoS specification. - in: body - required: true - type: string -id_4: - description: | - The snapshot UUID. - in: body - required: true - type: string -id_5: - description: | - The UUID of the volume. - in: body - required: true - type: string -id_6: - description: | - The UUID of the consistency group. - in: body - required: true - type: string -id_7: - description: | - The ID for the quota set. - in: body - required: true - type: integer -id_8: - description: | - The ID for the message. - in: body - required: true - type: integer -image_id: - description: | - The uuid for the new image. - in: body - required: true - type: string -image_name: - description: | - The name for the new image. - in: body - required: true - type: string -imageRef: - description: | - The UUID of the image from which you want to - create the volume. Required to create a bootable volume. - in: body - required: false - type: string -in_use: - description: | - The in use data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -incremental: - description: | - The backup mode. A valid value is ``true`` for - incremental backup mode or ``false`` for full backup mode. Default - is ``false``. - in: body - required: false - type: boolean -instance_uuid: - description: | - The UUID of the attaching instance. - in: body - required: false - type: string -instance_uuid_1: - description: | - The UUID of the attaching instance. - in: body - required: true - type: string -is_incremental: - description: | - Indicates whether the backup mode is incremental. - If this value is ``true``, the backup mode is incremental. If this - value is ``false``, the backup mode is full. - in: body - required: false - type: boolean -is_public: - description: - Volume type which is accessible to the public. - in: body - required: false - type: boolean -is_public_1: - description: - Group type which is accessible to the public. - in: body - required: false - type: boolean -key: - description: | - The metadata key name for the metadata that you - want to remove. - in: body - required: true - type: string -key_size: - description: | - Size of encryption key, in bits. For example, 128 or 256. The default value - is None. - in: body - required: false - type: integer -keys: - description: | - List of Keys. For example, CryptsetupEncryptor, LuksEncryptor or - NoOpEncryptor. - in: body - required: true - type: array -limit_usage: - description: | - The limit data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -limits: - description: | - A list of ``limit`` objects. - in: body - required: true - type: object -links: - description: | - Links for the volume transfer. - in: body - required: true - type: array -links_1: - description: | - Links for the backup. - in: body - required: true - type: array -links_2: - description: | - The QoS specification links. - in: body - required: true - type: array -links_3: - description: | - The volume links. - in: body - required: true - type: array -links_4: - description: | - List of links related to the extension. - in: body - required: true - type: array -links_5: - description: | - Links for the message. - in: body - required: false - type: array -location: - description: | - Full URL to a service or server. - format: uri - in: body - required: true - type: string -manageable-snapshots: - description: | - A list of manageable snapshots. - in: body - required: true - type: list -manageable-volumes: - description: | - A list of manageable volumes. - in: body - required: true - type: list -maxTotalBackupGigabytes: - description: | - The maximum total amount of backups, in gibibytes - (GiB). - in: body - required: true - type: integer -maxTotalBackups: - description: | - The maximum number of backups. - in: body - required: true - type: integer -maxTotalSnapshots: - description: | - The maximum number of snapshots. - in: body - required: true - type: integer -maxTotalVolumeGigabytes: - description: | - The maximum total amount of volumes, in gibibytes - (GiB). - in: body - required: true - type: integer -maxTotalVolumes: - description: | - The maximum number of volumes. - in: body - required: true - type: integer -message_level: - description: | - The level of the message, possible value is - only 'ERROR' now. - in: body - required: true - type: string -meta: - description: | - The metadata key and value pair for the volume. - in: body - required: true - type: object -meta_1: - description: | - The metadata key and value pair for the snapshot. - in: body - required: true - type: object -metadata: - description: | - One or more metadata key and value pairs for the - snapshot, if any. - in: body - required: true - type: object -metadata_1: - description: | - A ``metadata`` object. Contains one or more - metadata key and value pairs that are associated with the volume. - in: body - required: true - type: object -metadata_2: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: false - type: object -metadata_3: - description: | - One or more metadata key and value pairs that are - associated with the volume. - in: body - required: true - type: object -metadata_4: - description: | - One or more metadata key and value pairs to - associate with the volume. - in: body - required: false - type: string -metadata_5: - description: | - The image metadata to add to the volume as a set - of metadata key and value pairs. - in: body - required: true - type: object -metadata_6: - description: | - One or more metadata key and value pairs to - associate with the volume. - in: body - required: false - type: object -metadata_7: - description: | - One or more metadata key and value pairs for the - snapshot. - in: body - required: false - type: object -migration_policy: - description: | - Specify if the volume should be migrated when it is re-typed. - Possible values are ``on-demand`` or ``never``. If not specified, the - default is ``never``. - - .. note:: If the volume is attached to a server instance and will be - migrated, then by default policy only users with the administrative role - should attempt the retype operation. - in: body - required: false - type: string -migration_status: - description: | - The volume migration status. - in: body - required: true - type: string -migration_status_1: - description: | - The volume migration status. - in: body - required: false - type: string -mountpoint: - description: | - The attaching mount point. - in: body - required: true - type: string -multiattach: - description: | - To enable this volume to attach to more than one - server, set this value to ``true``. Default is ``false``. - in: body - required: false - type: boolean -multiattach_1: - description: | - If true, this volume can attach to more than one - instance. - in: body - required: true - type: boolean -name: - description: | - The name of the Volume Transfer. - in: body - required: true - type: string -name_1: - description: | - The backup name. - in: body - required: true - type: string -name_10: - description: | - The name of the extension. For example, "Fox In - Socks." - in: body - required: true - type: string -name_11: - description: | - The name of the back-end volume. - in: body - required: true - type: string -name_12: - description: | - The name of the snapshot. - in: body - required: true - type: string -name_13: - description: | - The volume name. - in: body - required: true - type: string -name_14: - description: | - The name of the volume to which you want to - restore a backup. - in: body - required: false - type: string -name_15: - description: | - The consistency group name. - in: body - required: false - type: string -name_16: - description: | - The group type name. - in: body - required: false - type: string -name_17: - description: | - The group name. - in: body - required: false - type: string -name_18: - description: | - The group snapshot name. - in: body - required: false - type: string -name_2: - description: | - The consistency group snapshot name. - in: body - required: true - type: string -name_3: - description: | - The name of the consistency group. - in: body - required: true - type: string -name_4: - description: | - The name of the QoS specification. - in: body - required: true - type: string -name_5: - description: | - The name of the snapshot. Default is ``None``. - in: body - required: false - type: string -name_6: - description: | - The volume transfer name. - in: body - required: false - type: string -name_7: - description: | - The name of the volume type. - in: body - required: true - type: string -name_8: - description: | - The volume name. - in: body - required: false - type: string -name_9: - description: | - The consistency group name. - in: body - required: true - type: string -name_optional: - description: | - The name of the Volume Backup. - in: body - required: false - type: string -namespace: - description: | - Link associated to the extension. - in: body - required: true - type: string -namespace_1: - description: | - The storage namespace, such as - ``OS::Storage::Capabilities::foo``. - in: body - required: true - type: string -new_size: - description: | - The new size of the volume, in gibibytes (GiB). - - .. note:: Some volume backends require the storage to be in some multiple - value rather than incremental. For example, the EMC ScaleIO backend - requires storage in multiples of 8GB. There is a known limitation such - that a request to extend the size of a volume for these backends will be - rounded up to the nearest multiple but the actual physical size of the - storage will not be reflected back in the API for the volume size. For - example, a request to extend the size of an 8GB ScaleIO-backed volume - to 9GB will actually result in 16GB of physical storage but only 9GB will - be reflected in the API and counted for quota usage. - in: body - required: true - type: integer -new_type: - description: | - The new volume type that volume is changed with. - in: body - required: true - type: string -object_count: - description: | - The number of objects in the backup. - in: body - required: true - type: integer -os-attach: - description: | - The ``os-attach`` action. - in: body - required: true - type: object -os-backup-project-attr:project_id: - description: | - The UUID of the owning project. - in: body - required: true - type: string. - in: body - required: true - type: string - min_version: 3.18 -os-detach: - description: | - The ``os-detach`` action. - in: body - required: true - type: object -os-extend: - description: | - The ``os-extend`` action. - in: body - required: true - type: object -os-extended-snapshot-attributes:progress: - description: | - A percentage value for the build progress. - in: body - required: true - type: integer -os-extended-snapshot-attributes:project_id: - description: | - The UUID of the owning project. - in: body - required: true - type: string -os-force_delete: - description: | - The ``os-force_delete`` action. - in: body - required: true - type: string -os-force_detach: - description: | - The ``os-force_detach`` action. - in: body - required: true - type: object -os-reset_status: - description: | - The ``os-reset_status`` action. - in: body - required: true - type: object -os-retype: - description: | - The ``os-retype`` action. - in: body - required: true - type: object -OS-SCH-HNT:scheduler_hints: - description: | - The dictionary of data to send to the scheduler. - in: body - required: false - type: object -os-set_bootable: - description: | - The ``os-set_bootable`` action. - in: body - required: true - type: object -os-set_image_metadata: - description: | - The ``os-set_image_metadata`` action. - in: body - required: true - type: object -os-unmanage: - description: | - The ``os-unmanage`` action. This action removes - the specified volume from Cinder management. - in: body - required: true - type: object -os-unset_image_metadata: - description: | - The ``os-unset_image_metadata`` action. This - action removes the key-value pairs from the image metadata. - in: body - required: true - type: object -os-vol-host-attr:host: - description: | - Current back-end of the volume. - in: body - required: true - type: string -os-vol-mig-status-attr:migstat: - description: | - The status of this volume migration (None means - that a migration is not currently in progress). - in: body - required: true - type: string -os-vol-mig-status-attr:name_id: - description: | - The volume ID that this volume name on the back- - end is based on. - in: body - required: true - type: string -os-vol-tenant-attr:tenant_id: - description: | - The tenant ID which the volume belongs to. - in: body - required: true - type: string -os-volume-replication:driver_data: - description: | - The name of the volume replication driver. - in: body - required: false - type: string -os-volume-replication:extended_status: - description: | - The volume replication status managed by the - driver of backend storage. - in: body - required: false - type: string -os-volume-replication:extended_status_1: - description: | - The status of the volume replication. - in: body - required: false - type: string -os-volume-type-access:is_public: - description: | - Make type accessible to the public. - in: body - required: false - type: boolean -os-volume_upload_image: - description: | - The ``os-volume_upload_image`` action. This - action uploads the specified volume to image service. - in: body - required: true - type: object -per_volume_gigabytes: - description: | - The size(GB) of volumes that are allowed for each volume. - in: body - required: true - type: integer -pool_name: - description: | - The name of the storage pool. - in: body - required: true - type: string -project: - description: | - The ID of the project. Volume Type access to be - added to this project ID. - in: body - required: true - type: string -project_id: - description: | - The UUID of the project. - in: body - required: true - type: string -project_id_1: - description: | - The Project ID having access to this volume type. - in: body - required: true - type: string -properties: - description: | - The backend volume capabilities list, which is - consisted of cinder standard capabilities and vendor unique - properties. - in: body - required: true - type: object -protected: - description: | - Whether the new image is protected. Default=False. - in: body - required: false - type: boolean -provider: - description: | - The class that provides encryption support. - in: body - required: true - type: string -provider_optional: - description: | - The class that provides encryption support. - in: body - required: false - type: string -qos_specs: - description: | - A ``qos_specs`` object. - in: body - required: true - type: object -QoS_support: - description: | - The quality of service (QoS) support. - in: body - required: true - type: boolean -quota_set: - description: | - A ``quota_set`` object. - in: body - required: true - type: object -rate: - description: | - Rate-limit volume copy bandwidth, used to - mitigate slow down of data access from the instances. - in: body - required: true - type: array -reason_not_safe: - description: | - The reason why the resource can't be managed. - in: body - required: false - type: string -ref: - description: | - A reference to the existing volume. The internal - structure of this reference depends on the volume driver - implementation. For details about the required elements in the - structure, see the documentation for the volume driver. - in: body - required: true - type: string -ref_1: - description: | - A reference to the existing volume. The internal - structure of this reference is dependent on the implementation of - the volume driver, see the specific driver's documentation for - details of the required elements in the structure. - in: body - required: true - type: object -reference: - description: | - Some information for the resource. - in: body - required: true - type: object -remove_volumes: - description: | - One or more volume UUIDs, separated by commas, to - remove from the volume group or consistency group. - in: body - required: false - type: string -replication_status: - description: | - The volume replication status. - in: body - required: true - type: string -replication_targets: - description: | - A list of volume backends used to replicate volumes - on this backend. - in: body - required: true - type: list -request_id: - description: | - The id of the request during which the message was created. - in: body - required: true - type: string -reserved: - description: | - Reserved data size. Visible only if you set the - ``usage=true`` query parameter. - in: body - required: false - type: integer -reserved_percentage: - description: | - The percentage of the total capacity that is - reserved for the internal use by the back end. - in: body - required: true - type: integer -reset_status: - description: | - The ``reset_status`` action. - in: body - required: true - type: object -resource_1: - description: | - Resource which the filters will be applied to. - in: body - required: true - type: string -resource_filters: - description: | - The resource filter array. - in: body - required: true - type: array -resource_type: - description: | - The resource type corresponding to ``resource_uuid``. - in: body - required: false - type: string -resource_uuid: - description: | - The UUID of the resource during whose operation the - message was created. - in: body - required: false - type: string -restore: - description: | - A ``restore`` object. - in: body - required: true - type: object -revert: - description: | - The ``revert`` action. - in: body - required: true - type: object -safe_to_manage: - description: | - If the resource can be managed or not. - in: body - required: true - type: boolean -service_state: - description: | - The state of the service. One of ``available`` or ``unavailable``. - in: body - required: true - type: string -service_status: - description: | - The status of the service. One of ``enabled`` or ``disabled``. - in: body - required: true - type: string -size: - description: | - The size of the volume, in gibibytes (GiB). - in: body - required: true - type: integer -size_1: - description: | - The size of the backup, in GB. - in: body - required: true - type: integer -skip_validation: - description: | - If set to false, the quota value can't be set lower than the in_use quota. - Default is True. - in: body - required: false - type: boolean -snapshot: - description: | - A partial representation of a snapshot used in - the creation process. - in: body - required: true - type: string -snapshot_1: - description: | - A ``snapshot`` object. - in: body - required: true - type: object -snapshot_id: - description: | - To create a volume from an existing snapshot, - specify the UUID of the volume snapshot. The volume is created in - same availability zone and with same size as the snapshot. - in: body - required: false - type: string -snapshot_id_2: - description: | - The UUID of the source volume snapshot. - in: body - required: true - type: string -snapshot_id_3: - description: | - The UUID of the source volume snapshot. The API - creates a new volume snapshot with the same size as the source - volume snapshot. - in: body - required: true - type: string -snapshot_id_4: - description: | - The UUID of the snapshot. The API - reverts the volume with this snapshot. - in: body - required: true - type: string -snapshots_number: - description: | - The number of snapshots that are allowed for each project. - in: body - required: true - type: integer -snapshots_number_for_type: - description: | - The number of snapshots that are allowed for each project and - the specified volume type. - in: body - required: true - type: integer -source-name: - description: | - The resource's name. - in: body - required: true - type: string -source_cgid: - description: | - The UUID of the source consistency group. - in: body - required: false - type: string -source_group_id: - description: | - The UUID of the source group. - in: body - required: false - type: string -source_reference: - description: | - The snapshot's origin volume information. - in: body - required: true - type: object -source_replica: - description: | - The UUID of the primary volume to clone. - in: body - required: false - type: string -source_volid: - description: | - The UUID of the source volume. The API creates a - new volume with the same size as the source volume. - in: body - required: false - type: string -source_volid_1: - description: | - The UUID of the source volume. - in: body - required: true - type: string -spec_value: - description: | - The value of the group specification corresponding to the specified key. - in: body - required: true - type: string -specs: - description: | - A ``specs`` object. - in: body - required: true - type: object -specs_1: - description: | - Specification key and value pairs. - in: body - required: true - type: object -specs_2: - description: | - Specification key and value pairs. - in: body - required: true - type: string -status: - description: | - The ``status`` of the consistency group snapshot. - in: body - required: false - type: string -status_1: - description: | - The status of the consistency group. - in: body - required: true - type: string -status_10: - description: | - The status for the backup. - in: body - required: true - type: string -status_2: - description: | - The status for the snapshot. - in: body - required: true - type: string -status_3: - description: | - The volume status. - in: body - required: true - type: string -status_4: - description: | - The backup status. Refer to Backup statuses table - for the possible status value. - in: body - required: true - type: string -status_5: - description: | - The consistency group status. A valid value is - ``creating``, ``available``, ``error``, ``deleting``, - ``updating``, or ``invalid``. - in: body - required: true - type: string -status_6: - description: | - The volume status. - in: body - required: false - type: string -status_7: - description: | - The ``status`` of the generic group snapshot. - in: body - required: false - type: string -status_8: - description: | - The status of the generic group. - in: body - required: true - type: string -status_9: - description: | - The status of the attachment. - in: body - required: true - type: string -storage_protocol: - description: | - The storage back end for the back-end volume. For - example, ``iSCSI`` or ``FC``. - in: body - required: true - type: string -storage_protocol_1: - description: | - The storage protocol, such as Fibre Channel, - iSCSI, NFS, and so on. - in: body - required: true - type: string -summary_metadata: - description: | - The dictionary of lists contains all the volumes' metadata, - classified by metadata key. - in: body - required: true - type: object - min_version: 3.36 -total_capacity: - description: | - The total capacity for the back-end volume, in - GBs. A valid value is a string, such as ``unknown``, or an - integer. - in: body - required: true - type: string -total_count: - description: | - Total number of volumes. - in: body - required: true - type: integer -total_size: - description: | - Total size of volumes in GB. - in: body - required: true - type: integer -totalBackupGigabytesUsed: - description: | - The total number of backups gibibytes (GiB) used. - in: body - required: true - type: integer -totalBackupsUsed: - description: | - The total number of backups used. - in: body - required: true - type: integer -totalGigabytesUsed: - description: | - The total number of gibibytes (GiB) used. - in: body - required: true - type: integer -totalSnapshotsUsed: - description: | - The total number of snapshots used. - in: body - required: true - type: integer -totalVolumesUsed: - description: | - The total number of volumes used. - in: body - required: true - type: integer -updated: - description: | - The date and time stamp when the extension was - last updated. - in: body - required: true - type: string -updated_1: - description: | - The date and time stamp when the API request was - issued. - in: body - required: true - type: string -updated_at: - description: | - The date and time when the resource was updated. - - The date and time stamp format is `ISO 8601 - `_: - - :: - - CCYY-MM-DDThh:mm:ss±hh:mm - - For example, ``2015-08-27T09:49:58-05:00``. - - The ``±hh:mm`` value, if included, is the time zone as an offset - from UTC. In the previous example, the offset value is ``-05:00``. - - If the ``updated_at`` date and time stamp is not set, its value is - ``null``. - in: body - required: true - type: string -user_id: - description: | - The UUID of the user. - in: body - required: true - type: string -user_id_2: - description: | - The UUID of the user. - in: body - required: true - type: string - min_version: 3.41 -user_message: - description: | - The translated readable message corresponding to ``event_id``. - in: body - required: false - type: string -vendor_name: - description: | - The name of the vendor. - in: body - required: true - type: string -visibility: - description: | - The volume type access. - in: body - required: true - type: string -visibility_1: - description: | - The visibility property of the new image. Default is private. - in: body - required: false - type: string -volume: - description: | - A ``volume`` object. - in: body - required: true - type: object -volume-summary: - description: | - Dictionary of ``volume-summary`` objects. - in: body - required: true - type: object -volume_1: - description: | - A ``volume`` object. - in: body - required: true - type: string -volume_backend_name: - description: | - The name of the back-end volume. - in: body - required: true - type: string -volume_id: - description: | - The UUID of the volume. - in: body - required: true - type: string -volume_id_2: - description: | - The UUID of the volume that you want to back up. - in: body - required: true - type: string -volume_id_3: - description: | - To create a snapshot from an existing volume, - specify the UUID of the existing volume. - in: body - required: true - type: string -volume_id_4: - description: | - The UUID of the volume from which the backup was - created. - in: body - required: true - type: string -volume_id_5: - description: | - If the snapshot was created from a volume, the - volume ID. - in: body - required: true - type: string -volume_id_6: - description: | - The UUID of the volume to which you want to - restore a backup. - in: body - required: false - type: string -volume_id_7: - description: | - The UUID of the volume which the attachment belongs - to. - in: body - required: true - type: string -volume_name: - description: | - The volume name. - in: body - required: true - type: string -volume_type_1: - description: | - A ``volume_type`` object. - in: body - required: true - type: object -volume_type_2: - description: | - The volume type. To create an environment with - multiple-storage back ends, you must specify a volume type. Block - Storage volume back ends are spawned as children to ``cinder- - volume``, and they are keyed from a unique queue. They are named - ``cinder- volume.HOST.BACKEND``. For example, ``cinder- - volume.ubuntu.lvmdriver``. When a volume is created, the scheduler - chooses an appropriate back end to handle the request based on the - volume type. Default is ``None``. For information about how to - use volume types to create multiple- storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: false - type: string -volume_type_3: - description: | - The volume type. In an environment with multiple- - storage back ends, the scheduler determines where to send the - volume based on the volume type. For information about how to use - volume types to create multiple- storage back ends, see `Configure - multiple-storage back ends `_. - in: body - required: true - type: string -volume_type_4: - description: | - The associated volume type. - in: body - required: false - type: string -volume_type_5: - description: | - A list of ``volume_type`` objects. - in: body - required: true - type: array -volume_type_6: - description: | - The associated volume type for the volume. - in: body - required: true - type: string -volume_type_id_body: - description: | - The UUID of the volume type. - in: body - required: true - type: string -volume_types: - description: | - The list of volume types. In an environment with - multiple-storage back ends, the scheduler determines where to send - the volume based on the volume type. For information about how to - use volume types to create multiple- storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: true - type: array -volume_types_2: - description: | - The list of volume types separated by commas. In an environment with - multiple-storage back ends, the scheduler determines where to send - the volume based on the volume type. For information about how to - use volume types to create multiple-storage back ends, see - `Configure multiple-storage back ends - `_. - in: body - required: true - type: string -volumes: - description: | - A list of ``volume`` objects. - in: body - required: true - type: array -volumes_number: - description: | - The number of volumes that are allowed for each project. - in: body - required: true - type: integer -volumes_number_for_type: - description: | - The number of volumes that are allowed for each project and - the specified volume type. - in: body - required: true - type: integer diff --git a/api-ref/source/v3/qos-specs-v3-qos-specs.inc b/api-ref/source/v3/qos-specs-v3-qos-specs.inc deleted file mode 100644 index 4aa4a1557..000000000 --- a/api-ref/source/v3/qos-specs-v3-qos-specs.inc +++ /dev/null @@ -1,316 +0,0 @@ -.. -*- rst -*- - -Quality of service (QoS) specifications (qos-specs) -=================================================== - -Administrators only, depending on policy settings. - -Creates, lists, shows details for, associates, disassociates, sets -keys, unsets keys, and deletes quality of service (QoS) -specifications. - - -Disassociate a QoS specification from all associations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate_all - -Disassociates a QoS specification from all associations. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - -Unset keys in a QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id}/delete_keys - -Unsets keys in a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - keys: keys - - project_id: project_id_path - - qos_id: qos_id - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-unset-request.json - :language: javascript - - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-unset-response.json - :language: javascript - - -Get all associations for a QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associations - -Lists all associations for a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-show-response.json - :language: javascript - - -Associate QoS specification with a volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associate - -Associates a QoS specification with a volume type. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - vol_type_id: vol_type_id - - -Disassociate QoS specification from a volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate - -Disassociates a QoS specification from a volume type. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - vol_type_id: vol_type_id - - -Show a QoS specification details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id} - -Shows details for a QoS specification. - - -Normal response codes: 200 -Error response codes:413,405,404,403,401,400,503, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: name - - links: links - - id: id - - qos_specs: qos_specs - - consumer: consumer - - specs: specs - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-show-response.json - :language: javascript - - -Set keys in a QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id} - -Sets keys in a QoS specification. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - qos_specs: qos_specs - - specs: specs - - project_id: project_id_path - - qos_id: qos_id - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-update-response.json - :language: javascript - - -Delete a QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/qos-specs/{qos_id} - -Deletes a QoS specification. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - qos_id: qos_id - - force: force - - -Create a QoS specification -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/qos-specs - -Creates a QoS specification. - -Specify one or more key and value pairs in the request body. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - qos_specs: qos_specs - - consumer: consumer - - name: name - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/qos-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - name: name - - links: links - - id: id - - qos_specs: qos_specs - - consumer: consumer - - specs: specs - - -List QoS Specifications -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/qos-specs - -Lists quality of service (QoS) specifications. - - -Normal response codes: 200 -Error response codes:300, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - specs: specs - - qos_specs: qos_specs - - consumer: consumer - - id: id - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/qos-list-response.json - :language: javascript diff --git a/api-ref/source/v3/quota-sets.inc b/api-ref/source/v3/quota-sets.inc deleted file mode 100644 index 700c75114..000000000 --- a/api-ref/source/v3/quota-sets.inc +++ /dev/null @@ -1,189 +0,0 @@ -.. -*- rst -*- - -Quota sets extension (os-quota-sets) -==================================== - -Administrators only, depending on policy settings. - -Shows, updates, and deletes quotas for a project. - - -Show quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id} - -Shows quotas for a project. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: quotas_project_id - - admin_project_id: admin_project_id - - usage: usage - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - quota_set: quota_set - - id: project_id - - volumes: volumes_number - - volumes_{volume_type}: volumes_number_for_type - - snapshots: snapshots_number - - snapshots_{volume_type}: snapshots_number_for_type - - backups: backups_number - - groups: groups_number - - per_volume_gigabytes: per_volume_gigabytes - - gigabytes: gigabytes - - gigabytes_{volume_type}: gigabytes_for_type - - backup_gigabytes: backup_gigabytes - - in_use: in_use - - reserved: reserved - - limit: limit_usage - - allocated: allocated - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-show-response.json - :language: javascript - -Update quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{admin_project_id}/os-quota-sets/{project_id} - -Updates quotas for a project. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - admin_project_id: admin_project_id - - project_id: quotas_project_id - - quota_set: quota_set - - volumes: volumes_number - - volumes_{volume_type}: volumes_number_for_type - - snapshots: snapshots_number - - snapshots_{volume_type}: snapshots_number_for_type - - backups: backups_number - - groups: groups_number - - per_volume_gigabytes: per_volume_gigabytes - - gigabytes: gigabytes - - gigabytes_{volume_type}: gigabytes_for_type - - backup_gigabytes: backup_gigabytes - - skip_validation: skip_validation - -Request Example ---------------- - -.. literalinclude:: ./samples/quotas-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - quota_set: quota_set - - volumes: volumes_number - - volumes_{volume_type}: volumes_number_for_type - - snapshots: snapshots_number - - snapshots_{volume_type}: snapshots_number_for_type - - backups: backups_number - - groups: groups_number - - per_volume_gigabytes: per_volume_gigabytes - - gigabytes: gigabytes - - gigabytes_{volume_type}: gigabytes_for_type - - backup_gigabytes: backup_gigabytes - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-update-response.json - :language: javascript - -Delete quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{admin_project_id}/os-quota-sets/{project_id} - -Deletes quotas for a project so the quotas revert to default values. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: quotas_project_id - - admin_project_id: admin_project_id - - - - -Get default quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id}/defaults - -Gets default quotas for a project. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - admin_project_id: admin_project_id - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - quota_set: quota_set - - id: project_id - - volumes: volumes_number - - volumes_{volume_type}: volumes_number_for_type - - snapshots: snapshots_number - - snapshots_{volume_type}: snapshots_number_for_type - - backups: backups_number - - groups: groups_number - - per_volume_gigabytes: per_volume_gigabytes - - gigabytes: gigabytes - - gigabytes_{volume_type}: gigabytes_for_type - - backup_gigabytes: backup_gigabytes - -Response Example ----------------- - -.. literalinclude:: ./samples/quotas-show-defaults-response.json - :language: javascript diff --git a/api-ref/source/v3/resource-filters.inc b/api-ref/source/v3/resource-filters.inc deleted file mode 100644 index 0bd8bc1df..000000000 --- a/api-ref/source/v3/resource-filters.inc +++ /dev/null @@ -1,41 +0,0 @@ -.. -*- rst -*- - -Resource Filters -================ - -Lists all resource filters, available since -microversion 3.33. - - -List resource filters -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/resource_filters - -List filters. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - resource: resource - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - filters: resource_filters - - resource: resource_1 - -Response Example ----------------- - -.. literalinclude:: ./samples/resource-filters-list-response.json - :language: javascript diff --git a/api-ref/source/v3/samples/attachment-create-request.json b/api-ref/source/v3/samples/attachment-create-request.json deleted file mode 100644 index 6d8501c50..000000000 --- a/api-ref/source/v3/samples/attachment-create-request.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "attachment": { - "instance_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d", - "connector": { - "initiator": "iqn.1993-08.org.debian: 01: cad181614cec", - "ip": "192.168.1.20", - "platform": "x86_64", - "host": "tempest-1", - "os_type": "linux2", - "multipath": false, - "mountpoint": "/dev/vdb", - "mode": "ro" - }, - "volume_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-create-response.json b/api-ref/source/v3/samples/attachment-create-response.json deleted file mode 100644 index 570801dec..000000000 --- a/api-ref/source/v3/samples/attachment-create-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "attachment": { - "status": "attaching", - "detached_at": "2015-09-16T09:28:52.000000", - "connection_info": {}, - "attached_at": "2015-09-16T09:28:52.000000", - "attach_mode": "ro", - "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", - "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", - "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-list-detailed-response.json b/api-ref/source/v3/samples/attachment-list-detailed-response.json deleted file mode 100644 index 11578487e..000000000 --- a/api-ref/source/v3/samples/attachment-list-detailed-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "attachments": [ - { - "status": "attaching", - "detached_at": "2015-09-16T09:28:52.000000", - "connection_info": {}, - "attached_at": "2015-09-16T09:28:52.000000", - "attach_mode": "ro", - "instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7", - "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", - "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-list-response.json b/api-ref/source/v3/samples/attachment-list-response.json deleted file mode 100644 index 99b4e5912..000000000 --- a/api-ref/source/v3/samples/attachment-list-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "attachments": [ - { - "status": "attaching", - "instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7", - "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c", - "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-show-response.json b/api-ref/source/v3/samples/attachment-show-response.json deleted file mode 100644 index 570801dec..000000000 --- a/api-ref/source/v3/samples/attachment-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "attachment": { - "status": "attaching", - "detached_at": "2015-09-16T09:28:52.000000", - "connection_info": {}, - "attached_at": "2015-09-16T09:28:52.000000", - "attach_mode": "ro", - "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", - "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", - "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-update-request.json b/api-ref/source/v3/samples/attachment-update-request.json deleted file mode 100644 index 45c67c183..000000000 --- a/api-ref/source/v3/samples/attachment-update-request.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "attachment": { - "connector": { - "initiator": "iqn.1993-08.org.debian: 01: cad181614cec", - "ip": "192.168.1.20", - "platform": "x86_64", - "host": "tempest-1", - "os_type": "linux2", - "multipath": false, - "mountpoint": "/dev/vdb", - "mode": "ro" - } - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/attachment-update-response.json b/api-ref/source/v3/samples/attachment-update-response.json deleted file mode 100644 index 570801dec..000000000 --- a/api-ref/source/v3/samples/attachment-update-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "attachment": { - "status": "attaching", - "detached_at": "2015-09-16T09:28:52.000000", - "connection_info": {}, - "attached_at": "2015-09-16T09:28:52.000000", - "attach_mode": "ro", - "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", - "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", - "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/backend-capabilities-response.json b/api-ref/source/v3/samples/backend-capabilities-response.json deleted file mode 100644 index dcf213d24..000000000 --- a/api-ref/source/v3/samples/backend-capabilities-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "namespace": "OS::Storage::Capabilities::fake", - "vendor_name": "OpenStack", - "volume_backend_name": "lvmdriver-1", - "pool_name": "pool", - "driver_version": "2.0.0", - "storage_protocol": "iSCSI", - "display_name": "Capabilities of Cinder LVM driver", - "description": "These are volume type options provided by Cinder LVM driver, blah, blah.", - "visibility": "public", - "replication_targets": [], - "properties": { - "compression": { - "title": "Compression", - "description": "Enables compression.", - "type": "boolean" - }, - "qos": { - "title": "QoS", - "description": "Enables QoS.", - "type": "boolean" - }, - "replication": { - "title": "Replication", - "description": "Enables replication.", - "type": "boolean" - }, - "thin_provisioning": { - "title": "Thin Provisioning", - "description": "Sets thin provisioning.", - "type": "boolean" - } - } -} diff --git a/api-ref/source/v3/samples/backup-create-request.json b/api-ref/source/v3/samples/backup-create-request.json deleted file mode 100644 index c7f8a74d5..000000000 --- a/api-ref/source/v3/samples/backup-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "backup": { - "container": null, - "description": null, - "name": "backup001", - "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607", - "incremental": true - } -} diff --git a/api-ref/source/v3/samples/backup-create-response.json b/api-ref/source/v3/samples/backup-create-response.json deleted file mode 100644 index 815327fcd..000000000 --- a/api-ref/source/v3/samples/backup-create-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "backup": { - "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "links": [ - { - "href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "bookmark" - } - ], - "name": "backup001" - } -} diff --git a/api-ref/source/v3/samples/backup-force-delete-request.json b/api-ref/source/v3/samples/backup-force-delete-request.json deleted file mode 100644 index 5c56464d9..000000000 --- a/api-ref/source/v3/samples/backup-force-delete-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-force_delete": {} -} diff --git a/api-ref/source/v3/samples/backup-record-export-response.json b/api-ref/source/v3/samples/backup-record-export-response.json deleted file mode 100644 index 8783eeda0..000000000 --- a/api-ref/source/v3/samples/backup-record-export-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "backup-record": { - "backup_service": "cinder.backup.drivers.swift", - "backup_url": "eyJzdGF0" - } -} diff --git a/api-ref/source/v3/samples/backup-record-import-request.json b/api-ref/source/v3/samples/backup-record-import-request.json deleted file mode 100644 index 8783eeda0..000000000 --- a/api-ref/source/v3/samples/backup-record-import-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "backup-record": { - "backup_service": "cinder.backup.drivers.swift", - "backup_url": "eyJzdGF0" - } -} diff --git a/api-ref/source/v3/samples/backup-record-import-response.json b/api-ref/source/v3/samples/backup-record-import-response.json deleted file mode 100644 index 1d6f1798b..000000000 --- a/api-ref/source/v3/samples/backup-record-import-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "backup": { - "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "links": [ - { - "href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", - "rel": "bookmark" - } - ], - "name": null - } -} diff --git a/api-ref/source/v3/samples/backup-reset-status-request.json b/api-ref/source/v3/samples/backup-reset-status-request.json deleted file mode 100644 index b18b65a68..000000000 --- a/api-ref/source/v3/samples/backup-reset-status-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-reset_status": { - "status": "available" - } -} diff --git a/api-ref/source/v3/samples/backup-restore-request.json b/api-ref/source/v3/samples/backup-restore-request.json deleted file mode 100644 index 2ccb7e516..000000000 --- a/api-ref/source/v3/samples/backup-restore-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "restore": { - "name": "vol-01", - "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607" - } -} diff --git a/api-ref/source/v3/samples/backup-restore-response.json b/api-ref/source/v3/samples/backup-restore-response.json deleted file mode 100644 index a344ea56c..000000000 --- a/api-ref/source/v3/samples/backup-restore-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "restore": { - "backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "volume_id": "795114e8-7489-40be-a978-83797f2c1dd3" - } -} diff --git a/api-ref/source/v3/samples/backup-show-response.json b/api-ref/source/v3/samples/backup-show-response.json deleted file mode 100644 index 2b7a2dd86..000000000 --- a/api-ref/source/v3/samples/backup-show-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "backup": { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:35:27.000000", - "description": null, - "fail_reason": null, - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001", - "object_count": 22, - "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6c4ec90cf0b", - "size": 1, - "status": "available", - "updated_at": "2013-04-02T10:35:27.000000", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - } -} diff --git a/api-ref/source/v3/samples/backup-update-request.json b/api-ref/source/v3/samples/backup-update-request.json deleted file mode 100644 index bcb52e5f8..000000000 --- a/api-ref/source/v3/samples/backup-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "backup":{ - "name":"test", - "description": "this is a backup" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/backup-update-response.json b/api-ref/source/v3/samples/backup-update-response.json deleted file mode 100644 index 693377b3d..000000000 --- a/api-ref/source/v3/samples/backup-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "backup": { - "id": "fad41a83-203d-4998-9d3b-444fd5da5aba", - "links": [ - { - "href": "http://10.3.150.25:8776/v3/a7090a26bc554d93aa845a4d41808251/backups/fad41a83-203d-4998-9d3b-444fd5da5aba", - "rel": "self" - }, - { - "href": "http://10.3.150.25:8776/a7090a26bc554d93aa845a4d41808251/backups/fad41a83-203d-4998-9d3b-444fd5da5aba", - "rel": "bookmark" - } - ], - "name": "test" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/backups-list-detailed-response.json b/api-ref/source/v3/samples/backups-list-detailed-response.json deleted file mode 100644 index d4cb687bf..000000000 --- a/api-ref/source/v3/samples/backups-list-detailed-response.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "backups": [ - { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:35:27.000000", - "description": null, - "fail_reason": null, - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001", - "object_count": 22, - "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6c4ec90cf0b", - "size": 1, - "status": "available", - "updated_at": "2013-04-02T10:35:27.000000", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - }, - { - "availability_zone": "az1", - "container": "volumebackups", - "created_at": "2013-04-02T10:21:48.000000", - "description": null, - "fail_reason": null, - "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "bookmark" - } - ], - "name": "backup002", - "object_count": 22, - "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6c4ec90cf0b", - "size": 1, - "status": "available", - "updated_at": "2013-04-02T10:21:48.000000", - "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", - "is_incremental": true, - "has_dependent_backups": false - } - ] -} diff --git a/api-ref/source/v3/samples/backups-list-response.json b/api-ref/source/v3/samples/backups-list-response.json deleted file mode 100644 index 8dd7d785a..000000000 --- a/api-ref/source/v3/samples/backups-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "backups": [ - { - "id": "2ef47aee-8844-490c-804d-2a8efe561c65", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", - "rel": "bookmark" - } - ], - "name": "backup001" - }, - { - "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "links": [ - { - "href": "http://localhost:8776/v1/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "self" - }, - { - "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", - "rel": "bookmark" - } - ], - "name": "backup002" - } - ] -} diff --git a/api-ref/source/v3/samples/cgsnapshots-create-request.json b/api-ref/source/v3/samples/cgsnapshots-create-request.json deleted file mode 100644 index 36d6f4537..000000000 --- a/api-ref/source/v3/samples/cgsnapshots-create-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "cgsnapshot": { - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546", - "name": "firstcg", - "description": "first consistency group", - "user_id": "6f519a48-3183-46cf-a32f-41815f814444", - "project_id": "6f519a48-3183-46cf-a32f-41815f815555", - "status": "creating" - } -} diff --git a/api-ref/source/v3/samples/cgsnapshots-create-response.json b/api-ref/source/v3/samples/cgsnapshots-create-response.json deleted file mode 100644 index 6d24a97f1..000000000 --- a/api-ref/source/v3/samples/cgsnapshots-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "cgsnapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f816666", - "name": "firstcg" - } -} diff --git a/api-ref/source/v3/samples/cgsnapshots-list-detailed-response.json b/api-ref/source/v3/samples/cgsnapshots-list-detailed-response.json deleted file mode 100644 index 93ad12870..000000000 --- a/api-ref/source/v3/samples/cgsnapshots-list-detailed-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "cgsnapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", - "status": "error", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my-cg2", - "description": "Edited description" - } - ] -} diff --git a/api-ref/source/v3/samples/cgsnapshots-list-response.json b/api-ref/source/v3/samples/cgsnapshots-list-response.json deleted file mode 100644 index 726aa803a..000000000 --- a/api-ref/source/v3/samples/cgsnapshots-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cgsnapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my-cg1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my-cg2" - } - ] -} diff --git a/api-ref/source/v3/samples/cgsnapshots-show-response.json b/api-ref/source/v3/samples/cgsnapshots-show-response.json deleted file mode 100644 index 632a5afba..000000000 --- a/api-ref/source/v3/samples/cgsnapshots-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "cgsnapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group" - } -} diff --git a/api-ref/source/v3/samples/consistency-group-create-from-src-request.json b/api-ref/source/v3/samples/consistency-group-create-from-src-request.json deleted file mode 100644 index ad25c5d02..000000000 --- a/api-ref/source/v3/samples/consistency-group-create-from-src-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "consistencygroup-from-src": { - "name": "firstcg", - "description": "first consistency group", - "cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", - "source_cgid": "6f519a48-3183-46cf-a32f-41815f814546", - "user_id": "6f519a48-3183-46cf-a32f-41815f815555", - "project_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "creating" - } -} diff --git a/api-ref/source/v3/samples/consistency-group-create-request.json b/api-ref/source/v3/samples/consistency-group-create-request.json deleted file mode 100644 index 8c9fbc2b0..000000000 --- a/api-ref/source/v3/samples/consistency-group-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "consistencygroup": { - "name": "firstcg", - "description": "first consistency group", - "volume_types": "type1,type2", - "availability_zone": "az0" - } -} diff --git a/api-ref/source/v3/samples/consistency-group-create-response.json b/api-ref/source/v3/samples/consistency-group-create-response.json deleted file mode 100644 index 15a5ec02a..000000000 --- a/api-ref/source/v3/samples/consistency-group-create-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "consistencygroup": { - "status": "error", - "description": "first consistency group", - "availability_zone": "az0", - "created_at": "2016-08-19T19:32:19.000000", - "volume_types": ["type1", "type2"], - "id": "63d1a274-de38-4384-a97e-475306777027", - "name": "firstcg" - } -} diff --git a/api-ref/source/v3/samples/consistency-group-delete-request.json b/api-ref/source/v3/samples/consistency-group-delete-request.json deleted file mode 100644 index 8ad8745e7..000000000 --- a/api-ref/source/v3/samples/consistency-group-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "consistencygroup": { - "force": false - } -} diff --git a/api-ref/source/v3/samples/consistency-group-show-response.json b/api-ref/source/v3/samples/consistency-group-show-response.json deleted file mode 100644 index 3cbb87d74..000000000 --- a/api-ref/source/v3/samples/consistency-group-show-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "consistencygroup": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group", - "volume_types": [ - "123456" - ] - } -} diff --git a/api-ref/source/v3/samples/consistency-group-update-request.json b/api-ref/source/v3/samples/consistency-group-update-request.json deleted file mode 100644 index 945465516..000000000 --- a/api-ref/source/v3/samples/consistency-group-update-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "consistencygroup": { - "name": "my_cg", - "description": "My consistency group", - "add_volumes": "volume-uuid-1,volume-uuid-2", - "remove_volumes": "volume-uuid-8,volume-uuid-9" - } -} diff --git a/api-ref/source/v3/samples/consistency-groups-list-detailed-response.json b/api-ref/source/v3/samples/consistency-groups-list-detailed-response.json deleted file mode 100644 index 618c65882..000000000 --- a/api-ref/source/v3/samples/consistency-groups-list-detailed-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "consistencygroups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my-cg1", - "description": "my first consistency group", - "volume_types": [ - "123456" - ] - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "status": "error", - "availability_zone": "az2", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my-cg2", - "description": "Edited description", - "volume_types": [ - "234567" - ] - } - ] -} diff --git a/api-ref/source/v3/samples/consistency-groups-list-response.json b/api-ref/source/v3/samples/consistency-groups-list-response.json deleted file mode 100644 index a53863f43..000000000 --- a/api-ref/source/v3/samples/consistency-groups-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "consistencygroups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my-cg1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my-cg2" - } - ] -} diff --git a/api-ref/source/v3/samples/encryption-type-create-request.json b/api-ref/source/v3/samples/encryption-type-create-request.json deleted file mode 100644 index f93c14261..000000000 --- a/api-ref/source/v3/samples/encryption-type-create-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "encryption":{ - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "control_location":"front-end", - "cipher": "aes-xts-plain64" - } -} diff --git a/api-ref/source/v3/samples/encryption-type-create-response.json b/api-ref/source/v3/samples/encryption-type-create-response.json deleted file mode 100644 index 3e2a2aeec..000000000 --- a/api-ref/source/v3/samples/encryption-type-create-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "encryption": { - "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", - "control_location": "front-end", - "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "cipher": "aes-xts-plain64" - } -} diff --git a/api-ref/source/v3/samples/encryption-type-show-response.json b/api-ref/source/v3/samples/encryption-type-show-response.json deleted file mode 100644 index 83ecab88b..000000000 --- a/api-ref/source/v3/samples/encryption-type-show-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", - "control_location": "front-end", - "deleted": false, - "created_at": "2016-12-28T02:32:25.000000", - "updated_at": null, - "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", - "key_size": 128, - "provider": "nova.volume.encryptors.luks.LuksEncryptor", - "deleted_at": null, - "cipher": "aes-xts-plain64" -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/encryption-type-update-request.json b/api-ref/source/v3/samples/encryption-type-update-request.json deleted file mode 100644 index 7a587b771..000000000 --- a/api-ref/source/v3/samples/encryption-type-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "encryption":{ - "key_size": 64, - "provider": "cinder.keymgr.conf_key_mgr.ConfKeyManager", - "control_location":"back-end" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/encryption-type-update-response.json b/api-ref/source/v3/samples/encryption-type-update-response.json deleted file mode 100644 index 7a587b771..000000000 --- a/api-ref/source/v3/samples/encryption-type-update-response.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "encryption":{ - "key_size": 64, - "provider": "cinder.keymgr.conf_key_mgr.ConfKeyManager", - "control_location":"back-end" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/extensions-list-response.json b/api-ref/source/v3/samples/extensions-list-response.json deleted file mode 100644 index 9ae83518c..000000000 --- a/api-ref/source/v3/samples/extensions-list-response.json +++ /dev/null @@ -1,212 +0,0 @@ -{ - "extensions": [ - { - "updated": "2013-04-18T00:00:00+00:00", - "name": "SchedulerHints", - "links": [], - "namespace": "http://docs.openstack.org/block-service/ext/scheduler-hints/api/v3", - "alias": "OS-SCH-HNT", - "description": "Pass arbitrary key/value pairs to the scheduler." - }, - { - "updated": "2011-06-29T00:00:00+00:00", - "name": "Hosts", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/hosts/api/v1.1", - "alias": "os-hosts", - "description": "Admin-only host administration." - }, - { - "updated": "2011-11-03T00:00:00+00:00", - "name": "VolumeTenantAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_tenant_attribute/api/v1", - "alias": "os-vol-tenant-attr", - "description": "Expose the internal project_id as an attribute of a volume." - }, - { - "updated": "2011-08-08T00:00:00+00:00", - "name": "Quotas", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/quotas-sets/api/v1.1", - "alias": "os-quota-sets", - "description": "Quota management support." - }, - { - "updated": "2011-08-24T00:00:00+00:00", - "name": "TypesManage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/types-manage/api/v1", - "alias": "os-types-manage", - "description": "Types manage support." - }, - { - "updated": "2013-07-10T00:00:00+00:00", - "name": "VolumeEncryptionMetadata", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-volume-encryption-metadata/api/v1", - "alias": "os-volume-encryption-metadata", - "description": "Volume encryption metadata retrieval support." - }, - { - "updated": "2012-12-12T00:00:00+00:00", - "name": "Backups", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/backups/api/v1", - "alias": "backups", - "description": "Backups support." - }, - { - "updated": "2013-07-16T00:00:00+00:00", - "name": "SnapshotActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1", - "alias": "os-snapshot-actions", - "description": "Enable snapshot manager actions." - }, - { - "updated": "2012-05-31T00:00:00+00:00", - "name": "VolumeActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1", - "alias": "os-volume-actions", - "description": "Enable volume actions\n " - }, - { - "updated": "2013-10-03T00:00:00+00:00", - "name": "UsedLimits", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/used-limits/api/v1.1", - "alias": "os-used-limits", - "description": "Provide data on limited resources that are being used." - }, - { - "updated": "2012-05-31T00:00:00+00:00", - "name": "VolumeUnmanage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-unmanage/api/v1.1", - "alias": "os-volume-unmanage", - "description": "Enable volume unmanage operation." - }, - { - "updated": "2011-11-03T00:00:00+00:00", - "name": "VolumeHostAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_host_attribute/api/v1", - "alias": "os-vol-host-attr", - "description": "Expose host as an attribute of a volume." - }, - { - "updated": "2013-07-01T00:00:00+00:00", - "name": "VolumeTypeEncryption", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-type-encryption/api/v1", - "alias": "encryption", - "description": "Encryption support for volume types." - }, - { - "updated": "2013-06-27T00:00:00+00:00", - "name": "AvailabilityZones", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-availability-zone/api/v1", - "alias": "os-availability-zone", - "description": "Describe Availability Zones." - }, - { - "updated": "2013-08-02T00:00:00+00:00", - "name": "Qos_specs_manage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/qos-specs/api/v1", - "alias": "qos-specs", - "description": "QoS specs support." - }, - { - "updated": "2011-08-24T00:00:00+00:00", - "name": "TypesExtraSpecs", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1", - "alias": "os-types-extra-specs", - "description": "Type extra specs support." - }, - { - "updated": "2013-08-08T00:00:00+00:00", - "name": "VolumeMigStatusAttribute", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_mig_status_attribute/api/v1", - "alias": "os-vol-mig-status-attr", - "description": "Expose migration_status as an attribute of a volume." - }, - { - "updated": "2012-08-13T00:00:00+00:00", - "name": "CreateVolumeExtension", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/image-create/api/v1", - "alias": "os-image-create", - "description": "Allow creating a volume from an image in the Create Volume v1 API." - }, - { - "updated": "2014-01-10T00:00:00-00:00", - "name": "ExtendedServices", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/extended_services/api/v3", - "alias": "os-extended-services", - "description": "Extended services support." - }, - { - "updated": "2012-06-19T00:00:00+00:00", - "name": "ExtendedSnapshotAttributes", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/extended_snapshot_attributes/api/v1", - "alias": "os-extended-snapshot-attributes", - "description": "Extended SnapshotAttributes support." - }, - { - "updated": "2012-12-07T00:00:00+00:00", - "name": "VolumeImageMetadata", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume_image_metadata/api/v1", - "alias": "os-vol-image-meta", - "description": "Show image metadata associated with the volume." - }, - { - "updated": "2012-03-12T00:00:00+00:00", - "name": "QuotaClasses", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/quota-classes-sets/api/v1.1", - "alias": "os-quota-class-sets", - "description": "Quota classes management support." - }, - { - "updated": "2013-05-29T00:00:00+00:00", - "name": "VolumeTransfer", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/volume-transfer/api/v1.1", - "alias": "os-volume-transfer", - "description": "Volume transfer management support." - }, - { - "updated": "2014-02-10T00:00:00+00:00", - "name": "VolumeManage", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/os-volume-manage/api/v1", - "alias": "os-volume-manage", - "description": "Allows existing backend storage to be 'managed' by Cinder." - }, - { - "updated": "2012-08-25T00:00:00+00:00", - "name": "AdminActions", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1", - "alias": "os-admin-actions", - "description": "Enable admin actions." - }, - { - "updated": "2012-10-28T00:00:00-00:00", - "name": "Services", - "links": [], - "namespace": "http://docs.openstack.org/volume/ext/services/api/v3", - "alias": "os-services", - "description": "Services support." - } - ] -} diff --git a/api-ref/source/v3/samples/group-create-from-src-request.json b/api-ref/source/v3/samples/group-create-from-src-request.json deleted file mode 100644 index 336b648bf..000000000 --- a/api-ref/source/v3/samples/group-create-from-src-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "create-from-src": { - "name": "first_group", - "description": "first group", - "group_snapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", - "source_group_id": None, - "user_id": "6f519a48-3183-46cf-a32f-41815f815555", - "project_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "creating" - } -} diff --git a/api-ref/source/v3/samples/group-create-request.json b/api-ref/source/v3/samples/group-create-request.json deleted file mode 100644 index 347611097..000000000 --- a/api-ref/source/v3/samples/group-create-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "group": { - "name": "first_group", - "description": "first group", - "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", - "volume_types": [ - "4e9e6d23-eed0-426d-b90a-28f87a94b6fe", - "c4daaf47-c530-4901-b28e-f5f0a359c4e6" - ], - "availability_zone": "az0", - } -} diff --git a/api-ref/source/v3/samples/group-create-response.json b/api-ref/source/v3/samples/group-create-response.json deleted file mode 100644 index b280d32cc..000000000 --- a/api-ref/source/v3/samples/group-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "group": { - "id": "6f519a48-3183-46cf-a32f-41815f816666", - "name": "first_group" - } -} diff --git a/api-ref/source/v3/samples/group-delete-request.json b/api-ref/source/v3/samples/group-delete-request.json deleted file mode 100644 index 27775df7a..000000000 --- a/api-ref/source/v3/samples/group-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "delete": { - "delete-volumes": False - } -} diff --git a/api-ref/source/v3/samples/group-reset-status-request.json b/api-ref/source/v3/samples/group-reset-status-request.json deleted file mode 100644 index 413483087..000000000 --- a/api-ref/source/v3/samples/group-reset-status-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "reset_status": { - "status": "available" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/group-show-response.json b/api-ref/source/v3/samples/group-show-response.json deleted file mode 100644 index 89ca220b4..000000000 --- a/api-ref/source/v3/samples/group-show-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "group": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "first_group", - "description": "my first group", - "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", - "volume_types": [ - "c4daaf47-c530-4901-b28e-f5f0a359c4e6" - ], - "group_snapshot_id": None, - "source_group_id": None - } -} diff --git a/api-ref/source/v3/samples/group-snapshot-reset-status-request.json b/api-ref/source/v3/samples/group-snapshot-reset-status-request.json deleted file mode 100644 index 413483087..000000000 --- a/api-ref/source/v3/samples/group-snapshot-reset-status-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "reset_status": { - "status": "available" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/group-snapshots-create-request.json b/api-ref/source/v3/samples/group-snapshots-create-request.json deleted file mode 100644 index 6b4a7e144..000000000 --- a/api-ref/source/v3/samples/group-snapshots-create-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "group_snapshot": { - "group_id": "6f519a48-3183-46cf-a32f-41815f814546", - "name": "first_group_snapshot", - "description": "first group snapshot", - } -} diff --git a/api-ref/source/v3/samples/group-snapshots-create-response.json b/api-ref/source/v3/samples/group-snapshots-create-response.json deleted file mode 100644 index 95e8c7e1f..000000000 --- a/api-ref/source/v3/samples/group-snapshots-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "group_snapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f816666", - "name": "first_group_snapshot" - } -} diff --git a/api-ref/source/v3/samples/group-snapshots-list-detailed-response.json b/api-ref/source/v3/samples/group-snapshots-list-detailed-response.json deleted file mode 100644 index a5af02899..000000000 --- a/api-ref/source/v3/samples/group-snapshots-list-detailed-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "group_snapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "group_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my_group_snapshot1", - "description": "my first group snapshot" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "group_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", - "status": "error", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my_group_snapshot2", - "description": "Edited description" - } - ] -} diff --git a/api-ref/source/v3/samples/group-snapshots-list-response.json b/api-ref/source/v3/samples/group-snapshots-list-response.json deleted file mode 100644 index 40a214c4c..000000000 --- a/api-ref/source/v3/samples/group-snapshots-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "group_snapshots": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my_group_snapshot1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my_group_snapshot2" - } - ] -} diff --git a/api-ref/source/v3/samples/group-snapshots-show-response.json b/api-ref/source/v3/samples/group-snapshots-show-response.json deleted file mode 100644 index 256224959..000000000 --- a/api-ref/source/v3/samples/group-snapshots-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "group_snapshot": { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "group_id": "6f519a48-3183-46cf-a32f-41815f814444", - "status": "available", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my_group_snapshot1", - "description": "my first group snapshot" - } -} diff --git a/api-ref/source/v3/samples/group-type-create-request.json b/api-ref/source/v3/samples/group-type-create-request.json deleted file mode 100644 index 6997ad147..000000000 --- a/api-ref/source/v3/samples/group-type-create-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "group_type": { - "name": "grp-type-001", - "description": "group type 0001", - "is_public": True, - "group_specs": { - "consistent_group_snapshot_enabled": " False" - } - } -} diff --git a/api-ref/source/v3/samples/group-type-show-request.json b/api-ref/source/v3/samples/group-type-show-request.json deleted file mode 100644 index 77ff6329c..000000000 --- a/api-ref/source/v3/samples/group-type-show-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "group_type": { - "id": "289da7f8-6440-407c-9fb4-7db01ec49164", - "name": "grp-type-001", - "group_specs": { - "consistent_group_snapshot_enabled": " False" - } - } -} diff --git a/api-ref/source/v3/samples/group-type-show-response.json b/api-ref/source/v3/samples/group-type-show-response.json deleted file mode 100644 index 91b62e985..000000000 --- a/api-ref/source/v3/samples/group-type-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "group_type": { - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "grp-type-001", - "description": "group type 001", - "is_public": "true", - "group_specs": { - "consistent_group_snapshot_enabled": " False" - } - } -} diff --git a/api-ref/source/v3/samples/group-type-specs-create-request.json b/api-ref/source/v3/samples/group-type-specs-create-request.json deleted file mode 100644 index 03b7282d9..000000000 --- a/api-ref/source/v3/samples/group-type-specs-create-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "group_specs": { - "key1": "value1", - "key2": "value2" - } -} diff --git a/api-ref/source/v3/samples/group-type-specs-create-response.json b/api-ref/source/v3/samples/group-type-specs-create-response.json deleted file mode 100644 index 03b7282d9..000000000 --- a/api-ref/source/v3/samples/group-type-specs-create-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "group_specs": { - "key1": "value1", - "key2": "value2" - } -} diff --git a/api-ref/source/v3/samples/group-type-specs-list-response.json b/api-ref/source/v3/samples/group-type-specs-list-response.json deleted file mode 100644 index 03b7282d9..000000000 --- a/api-ref/source/v3/samples/group-type-specs-list-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "group_specs": { - "key1": "value1", - "key2": "value2" - } -} diff --git a/api-ref/source/v3/samples/group-type-specs-show-response.json b/api-ref/source/v3/samples/group-type-specs-show-response.json deleted file mode 100644 index 513df0922..000000000 --- a/api-ref/source/v3/samples/group-type-specs-show-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "key1": "value1" -} diff --git a/api-ref/source/v3/samples/group-type-specs-update-response.json b/api-ref/source/v3/samples/group-type-specs-update-response.json deleted file mode 100644 index 513df0922..000000000 --- a/api-ref/source/v3/samples/group-type-specs-update-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "key1": "value1" -} diff --git a/api-ref/source/v3/samples/group-type-update-request.json b/api-ref/source/v3/samples/group-type-update-request.json deleted file mode 100644 index 5478412ef..000000000 --- a/api-ref/source/v3/samples/group-type-update-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "group_type": { - "name": "grp-type-001", - "description": "group type 0001", - "is_public": true, - } -} diff --git a/api-ref/source/v3/samples/group-types-list-response.json b/api-ref/source/v3/samples/group-types-list-response.json deleted file mode 100644 index d760604f7..000000000 --- a/api-ref/source/v3/samples/group-types-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "group_types": [ - { - "group_specs": { - "consistent_group_snapshot_enabled": " False" - }, - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "group_type1" - }, - { - "group_specs": {}, - "id": "8eb69a46-df97-4e41-9586-9a40a7533803", - "name": "group_type2" - } - ] -} diff --git a/api-ref/source/v3/samples/group-update-request.json b/api-ref/source/v3/samples/group-update-request.json deleted file mode 100644 index 14004803a..000000000 --- a/api-ref/source/v3/samples/group-update-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "group": { - "name": "my_group", - "description": "My group", - "add_volumes": "volume-uuid-1,volume-uuid-2", - "remove_volumes": "volume-uuid-8,volume-uuid-9" - } -} diff --git a/api-ref/source/v3/samples/groups-list-detailed-response.json b/api-ref/source/v3/samples/groups-list-detailed-response.json deleted file mode 100644 index 467951413..000000000 --- a/api-ref/source/v3/samples/groups-list-detailed-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "groups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "status": "available", - "availability_zone": "az1", - "created_at": "2015-09-16T09:28:52.000000", - "name": "my_group1", - "description": "my first group", - "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", - "volume_types": [ - "4e9e6d23-eed0-426d-b90a-28f87a94b6fe", - "a3d55d15-eeb1-4816-ada9-bf82decc09b3" - ] - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "status": "error", - "availability_zone": "az2", - "created_at": "2015-09-16T09:31:15.000000", - "name": "my_group2", - "description": "Edited description", - "group_type": "f8645498-1323-47a2-9442-5c57724d2e3c", - "volume_types": [ - "c4daaf47-c530-4901-b28e-f5f0a359c4e6" - ] - } - ] -} diff --git a/api-ref/source/v3/samples/groups-list-response.json b/api-ref/source/v3/samples/groups-list-response.json deleted file mode 100644 index cfbf24640..000000000 --- a/api-ref/source/v3/samples/groups-list-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "groups": [ - { - "id": "6f519a48-3183-46cf-a32f-41815f813986", - "name": "my_group1" - }, - { - "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", - "name": "my_group2" - } - ] -} diff --git a/api-ref/source/v3/samples/host-attach-request.json b/api-ref/source/v3/samples/host-attach-request.json deleted file mode 100644 index 01d064451..000000000 --- a/api-ref/source/v3/samples/host-attach-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-attach": { - "host_name": "my_host" - } -} diff --git a/api-ref/source/v3/samples/hosts-get-response.json b/api-ref/source/v3/samples/hosts-get-response.json deleted file mode 100644 index b3e237028..000000000 --- a/api-ref/source/v3/samples/hosts-get-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "host": [{ - "resource": { - "volume_count": "8", - "total_volume_gb": "11", - "total_snapshot_gb": "1", - "project": "(total)", - "host": "node1@rbd-sas", - "snapshot_count": "1" - } - }, - { - "resource": { - "volume_count": "8", - "total_volume_gb": "11", - "total_snapshot_gb": "1", - "project": "f21a9c86d7114bf99c711f4874d80474", - "host": "node1@rbd-sas", - "snapshot_count": "1" - } - }] -} diff --git a/api-ref/source/v3/samples/hosts-list-response.json b/api-ref/source/v3/samples/hosts-list-response.json deleted file mode 100644 index 1ae780898..000000000 --- a/api-ref/source/v3/samples/hosts-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "hosts": [{ - "service-status": "available", - "service": "cinder-backup", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1", - "last-update": "2017-03-09T21:38:41.000000" - }, - { - "service-status": "available", - "service": "cinder-scheduler", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1", - "last-update": "2017-03-09T21:38:38.000000" - }, - { - "service-status": "available", - "service": "cinder-volume", - "zone": "nova", - "service-state": "enabled", - "host_name": "node1@lvm", - "last-update": "2017-03-09T21:38:35.000000" - }] -} diff --git a/api-ref/source/v3/samples/image-metadata-show-request.json b/api-ref/source/v3/samples/image-metadata-show-request.json deleted file mode 100644 index f84e8261d..000000000 --- a/api-ref/source/v3/samples/image-metadata-show-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "volume": { - "host": "geraint-VirtualBox", - "ref": { - "source-volume-name": "existingLV", - "source-volume-id": "1234" - }, - "name": "New Volume", - "availability_zone": "az2", - "description": "Volume imported from existingLV", - "volume_type": null, - "bootable": true, - "metadata": { - "key1": "value1", - "key2": "value2" - } - } -} diff --git a/api-ref/source/v3/samples/image-metadata-show-response.json b/api-ref/source/v3/samples/image-metadata-show-response.json deleted file mode 100644 index a53976b9c..000000000 --- a/api-ref/source/v3/samples/image-metadata-show-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "creating", - "user_id": "eae1472b5fc5496998a3d06550929e7e", - "attachments": [], - "links": [ - { - "href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "self" - }, - { - "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "bookmark" - } - ], - "availability_zone": "az2", - "bootable": "false", - "encrypted": "false", - "created_at": "2014-07-18T00:12:54.000000", - "description": "Volume imported from existingLV", - "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", - "volume_type": null, - "name": "New Volume", - "source_volid": null, - "snapshot_id": null, - "metadata": { - "key2": "value2", - "key1": "value1" - }, - "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "size": 0 - } -} diff --git a/api-ref/source/v3/samples/limits-show-response.json b/api-ref/source/v3/samples/limits-show-response.json deleted file mode 100644 index 38d0ccd3c..000000000 --- a/api-ref/source/v3/samples/limits-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "limits": { - "rate": [], - "absolute": { - "totalSnapshotsUsed": 0, - "maxTotalBackups": 10, - "maxTotalVolumeGigabytes": 1000, - "maxTotalSnapshots": 10, - "maxTotalBackupGigabytes": 1000, - "totalBackupGigabytesUsed": 0, - "maxTotalVolumes": 10, - "totalVolumesUsed": 0, - "totalBackupsUsed": 0, - "totalGigabytesUsed": 0 - } - } -} diff --git a/api-ref/source/v3/samples/messages-list-response.json b/api-ref/source/v3/samples/messages-list-response.json deleted file mode 100644 index 020582d3f..000000000 --- a/api-ref/source/v3/samples/messages-list-response.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "messages": [{ - "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", - "links": [ - { - "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "self" - }, - { - "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "bookmark" - } - ], - "message_level": "ERROR", - "event_id": "VOLUME_000002", - "created_at": "2014-10-28T00:00:00-00:00", - "guaranteed_until": "2014-10-28T00:00:00-00:00", - "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf", - "id": "c506cd4b-9048-43bc-97ef-0d7dec369b42", - "resource_type": "VOLUME", - "user_message": "No storage could be allocated for this volume request." - },{ - "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", - "links": [ - { - "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "self" - }, - { - "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "bookmark" - } - ], - "message_level": "ERROR", - "event_id": "VOLUME_000002", - "created_at": "2014-10-28T00:00:00-00:00", - "guaranteed_until": "2014-10-28T00:00:00-00:00", - "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4df4e", - "id": "c506cd4b-9048-43bc-97ef-0d7dec36d5gt", - "resource_type": "VOLUME", - "user_message": "No storage could be allocated for this volume request." - }] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/messages-show-response.json b/api-ref/source/v3/samples/messages-show-response.json deleted file mode 100644 index d674f772b..000000000 --- a/api-ref/source/v3/samples/messages-show-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "message": { - "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", - "links": [ - { - "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "self" - }, - { - "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", - "rel": "bookmark" - } - ], - "message_level": "ERROR", - "event_id": "VOLUME_000002", - "created_at": "2014-10-28T00:00:00-00:00", - "guaranteed_until": "2014-10-28T00:00:00-00:00", - "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf", - "id": "c506cd4b-9048-43bc-97ef-0d7dec369b42", - "resource_type": "VOLUME", - "user_message": "No storage could be allocated for this volume request." - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/pools-list-detailed-response.json b/api-ref/source/v3/samples/pools-list-detailed-response.json deleted file mode 100644 index 3fc28a299..000000000 --- a/api-ref/source/v3/samples/pools-list-detailed-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "pools": [ - { - "name": "pool1", - "capabilities": { - "updated": "2014-10-28T00:00:00-00:00", - "total_capacity": 1024, - "free_capacity": 100, - "volume_backend_name": "pool1", - "reserved_percentage": 0, - "driver_version": "1.0.0", - "storage_protocol": "iSCSI", - "QoS_support": false - } - }, - { - "name": "pool2", - "capabilities": { - "updated": "2014-10-28T00:00:00-00:00", - "total_capacity": 512, - "free_capacity": 200, - "volume_backend_name": "pool2", - "reserved_percentage": 0, - "driver_version": "1.0.1", - "storage_protocol": "iSER", - "QoS_support": true - } - } - ] -} diff --git a/api-ref/source/v3/samples/qos-create-request.json b/api-ref/source/v3/samples/qos-create-request.json deleted file mode 100644 index c0db909bd..000000000 --- a/api-ref/source/v3/samples/qos-create-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "qos_specs": { - "availability": "100", - "name": "reliability-spec", - "numberOfFailures": "0" - } -} diff --git a/api-ref/source/v3/samples/qos-create-response.json b/api-ref/source/v3/samples/qos-create-response.json deleted file mode 100644 index d743bb545..000000000 --- a/api-ref/source/v3/samples/qos-create-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "qos_specs": { - "specs": { - "numberOfFailures": "0", - "availability": "100" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "599ef437-1c99-42ec-9fc6-239d0519fef1" - }, - "links": [ - { - "href": "http://23.253.248.171:8776/v3/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", - "rel": "bookmark" - } - ] -} diff --git a/api-ref/source/v3/samples/qos-list-response.json b/api-ref/source/v3/samples/qos-list-response.json deleted file mode 100644 index 92f2a6216..000000000 --- a/api-ref/source/v3/samples/qos-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "qos_specs": [ - { - "specs": { - "availability": "100", - "numberOfFailures": "0" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" - }, - { - "specs": { - "delay": "0", - "throughput": "100" - }, - "consumer": "back-end", - "name": "performance-spec", - "id": "ecfc6e2e-7117-44a4-8eec-f84d04f531a8" - } - ] -} diff --git a/api-ref/source/v3/samples/qos-show-response.json b/api-ref/source/v3/samples/qos-show-response.json deleted file mode 100644 index 6cf22c817..000000000 --- a/api-ref/source/v3/samples/qos-show-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "qos_specs": { - "specs": { - "availability": "100", - "numberOfFailures": "0" - }, - "consumer": "back-end", - "name": "reliability-spec", - "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" - }, - "links": [ - { - "href": "http://23.253.228.211:8776/v3/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", - "rel": "self" - }, - { - "href": "http://23.253.228.211:8776/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", - "rel": "bookmark" - } - ] -} diff --git a/api-ref/source/v3/samples/qos-unset-request.json b/api-ref/source/v3/samples/qos-unset-request.json deleted file mode 100644 index 4193b7392..000000000 --- a/api-ref/source/v3/samples/qos-unset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "keys": [ - "key1" - ] -} diff --git a/api-ref/source/v3/samples/qos-unset-response.json b/api-ref/source/v3/samples/qos-unset-response.json deleted file mode 100644 index e69de29bb..000000000 diff --git a/api-ref/source/v3/samples/qos-update-request.json b/api-ref/source/v3/samples/qos-update-request.json deleted file mode 100644 index 1d3987705..000000000 --- a/api-ref/source/v3/samples/qos-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "qos_specs": { - "delay": "1" - } -} diff --git a/api-ref/source/v3/samples/qos-update-response.json b/api-ref/source/v3/samples/qos-update-response.json deleted file mode 100644 index 1d3987705..000000000 --- a/api-ref/source/v3/samples/qos-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "qos_specs": { - "delay": "1" - } -} diff --git a/api-ref/source/v3/samples/qos_show_response.json b/api-ref/source/v3/samples/qos_show_response.json deleted file mode 100644 index 4a5d9db6a..000000000 --- a/api-ref/source/v3/samples/qos_show_response.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "qos_associations": [ - { - "association_type": "volume_type", - "name": "reliability-type", - "id": "a12983c2-83bd-4afa-be9f-ad796573ead6" - } - ] -} diff --git a/api-ref/source/v3/samples/quotas-show-defaults-response.json b/api-ref/source/v3/samples/quotas-show-defaults-response.json deleted file mode 100644 index adc54993e..000000000 --- a/api-ref/source/v3/samples/quotas-show-defaults-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "quota_set": { - "id": "a7090a26bc554d93aa845a4d41808251", - "volumes": 10, - "volumes_ceph": -1, - "volumes_lvm-thin": -1, - "volumes_lvmdriver-1": -1, - "snapshots": 10, - "snapshots_ceph": -1, - "snapshots_lvm-thin": -1, - "snapshots_lvmdriver-1": -1, - "backups": 10, - "groups": 10, - "per_volume_gigabytes": -1, - "gigabytes": 1000, - "gigabytes_ceph": -1, - "gigabytes_lvm-thin": -1, - "gigabytes_lvmdriver-1": -1, - "backup_gigabytes": 1000 - } -} diff --git a/api-ref/source/v3/samples/quotas-show-response.json b/api-ref/source/v3/samples/quotas-show-response.json deleted file mode 100644 index adc54993e..000000000 --- a/api-ref/source/v3/samples/quotas-show-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "quota_set": { - "id": "a7090a26bc554d93aa845a4d41808251", - "volumes": 10, - "volumes_ceph": -1, - "volumes_lvm-thin": -1, - "volumes_lvmdriver-1": -1, - "snapshots": 10, - "snapshots_ceph": -1, - "snapshots_lvm-thin": -1, - "snapshots_lvmdriver-1": -1, - "backups": 10, - "groups": 10, - "per_volume_gigabytes": -1, - "gigabytes": 1000, - "gigabytes_ceph": -1, - "gigabytes_lvm-thin": -1, - "gigabytes_lvmdriver-1": -1, - "backup_gigabytes": 1000 - } -} diff --git a/api-ref/source/v3/samples/quotas-update-request.json b/api-ref/source/v3/samples/quotas-update-request.json deleted file mode 100644 index 7f0234ab8..000000000 --- a/api-ref/source/v3/samples/quotas-update-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "quota_set":{ - "groups": 11, - "volumes": 5, - "volumes_ceph": 3, - "backups": 4 - }, - "skip_validation": false -} diff --git a/api-ref/source/v3/samples/quotas-update-response.json b/api-ref/source/v3/samples/quotas-update-response.json deleted file mode 100644 index b239ec5d5..000000000 --- a/api-ref/source/v3/samples/quotas-update-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "quota_set": { - "volumes": 10, - "volumes_ceph": -1, - "volumes_lvm-thin": -1, - "volumes_lvmdriver-1": -1, - "snapshots": 10, - "snapshots_ceph": -1, - "snapshots_lvm-thin": -1, - "snapshots_lvmdriver-1": -1, - "backups": 10, - "groups": 10, - "per_volume_gigabytes": -1, - "gigabytes": 1000, - "gigabytes_ceph": -1, - "gigabytes_lvm-thin": -1, - "gigabytes_lvmdriver-1": -1, - "backup_gigabytes": 1000 - } -} diff --git a/api-ref/source/v3/samples/resource-filters-list-response.json b/api-ref/source/v3/samples/resource-filters-list-response.json deleted file mode 100644 index 6a1da8fbc..000000000 --- a/api-ref/source/v3/samples/resource-filters-list-response.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "resource_filters": [ - { - "filters": [ - "name", - "status", - "image_metadata", "bootable", - "migration_status" - ], - "resource": "volume" - }, - { - "filters": [ - "name", - "status", - "volume_id" - ], - "resource": "snapshot" - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/snapshot-create-request.json b/api-ref/source/v3/samples/snapshot-create-request.json deleted file mode 100644 index 5fb46cd0c..000000000 --- a/api-ref/source/v3/samples/snapshot-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "snapshot": { - "name": "snap-001", - "description": "Daily backup", - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "force": true, - "metadata": null - } -} diff --git a/api-ref/source/v3/samples/snapshot-create-response.json b/api-ref/source/v3/samples/snapshot-create-response.json deleted file mode 100644 index 15a139531..000000000 --- a/api-ref/source/v3/samples/snapshot-create-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "snapshot": { - "status": "creating", - "description": "Daily backup", - "created_at": "2013-02-25T03:56:53.081642", - "metadata": {}, - "updated_at": "2013-02-25T03:58:53.081642", - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "size": 1, - "user_id": "40c2102f4a554b848d96b14f3eec39ed", - "id": "ffa9bc5e-1172-4021-acaf-cdcd78a9584d", - "name": "snap-001" - } -} - diff --git a/api-ref/source/v3/samples/snapshot-manage-list-detail-response.json b/api-ref/source/v3/samples/snapshot-manage-list-detail-response.json deleted file mode 100644 index 256e840c8..000000000 --- a/api-ref/source/v3/samples/snapshot-manage-list-detail-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "manageable-snapshots": [ - { - "cinder_id": null, - "reason_not_safe": null, - "reference": { - "source-name": "lvol0" - }, - "source_reference": { - "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b"}, - "safe_to_manage": true, - "size": 1, - "extra_info": null - }, - { - "cinder_id": "d0c84570-a01f-4579-9789-5e9f266587cd", - "reason_not_safe": "already managed", - "reference": { - "source-name":"_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd" - }, - "source_reference": { - "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" - }, - "safe_to_manage": false, - "size": 1, - "extra_info": null - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/snapshot-manage-list-response.json b/api-ref/source/v3/samples/snapshot-manage-list-response.json deleted file mode 100644 index efc293977..000000000 --- a/api-ref/source/v3/samples/snapshot-manage-list-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "manageable-snapshots": [ - { - "source_reference": { - "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" - }, - "safe_to_manage": true, - "reference": { - "source-name": "lvol0" - }, - "size": 1 - }, - { - "source_reference": { - "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" - }, - "safe_to_manage": false, - "reference": { - "source-name": "_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd" - }, - "size": 1 - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/snapshot-manage-request.json b/api-ref/source/v3/samples/snapshot-manage-request.json deleted file mode 100644 index d341a1b37..000000000 --- a/api-ref/source/v3/samples/snapshot-manage-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "snapshot": { - "description": null, - "metadata": null, - "ref": { - "source-name": "lvol0" - }, - "name": null, - "volume_id": "7c064b34-1e4b-40bd-93ca-4ac5a973661b" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/snapshot-metadata-create-request.json b/api-ref/source/v3/samples/snapshot-metadata-create-request.json deleted file mode 100644 index 97cd30d5e..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v3" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-create-response.json b/api-ref/source/v3/samples/snapshot-metadata-create-response.json deleted file mode 100644 index 97cd30d5e..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-create-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v3" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-show-key-response.json b/api-ref/source/v3/samples/snapshot-metadata-show-key-response.json deleted file mode 100644 index ebd589c1b..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-show-key-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "test" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-show-response.json b/api-ref/source/v3/samples/snapshot-metadata-show-response.json deleted file mode 100644 index cbfe4ef7a..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-show-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "test" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-update-key-request.json b/api-ref/source/v3/samples/snapshot-metadata-update-key-request.json deleted file mode 100644 index 023712fce..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-update-key-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "new_name" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-update-key-response.json b/api-ref/source/v3/samples/snapshot-metadata-update-key-response.json deleted file mode 100644 index 023712fce..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-update-key-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "new_name" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-update-request.json b/api-ref/source/v3/samples/snapshot-metadata-update-request.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v3/samples/snapshot-metadata-update-response.json b/api-ref/source/v3/samples/snapshot-metadata-update-response.json deleted file mode 100644 index 4373b0018..000000000 --- a/api-ref/source/v3/samples/snapshot-metadata-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "key": "v2" - } -} diff --git a/api-ref/source/v3/samples/snapshot-show-response.json b/api-ref/source/v3/samples/snapshot-show-response.json deleted file mode 100644 index f5524180a..000000000 --- a/api-ref/source/v3/samples/snapshot-show-response.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "snapshot": { - "status": "available", - "os-extended-snapshot-attributes:progress": "100%", - "description": "Daily backup", - "created_at": "2013-02-25T04:13:17.000000", - "metadata": {}, - "user_id": "40c2102f4a554b848d96b14f3eec39ed", - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "id": "2bb856e1-b3d8-4432-a858-09e4ce939389", - "name": "snap-001" - } -} diff --git a/api-ref/source/v3/samples/snapshot-status-reset-request.json b/api-ref/source/v3/samples/snapshot-status-reset-request.json deleted file mode 100644 index 2bca0c286..000000000 --- a/api-ref/source/v3/samples/snapshot-status-reset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-reset_status": { - "status": "available", - } -} diff --git a/api-ref/source/v3/samples/snapshot-update-request.json b/api-ref/source/v3/samples/snapshot-update-request.json deleted file mode 100644 index 0e0895717..000000000 --- a/api-ref/source/v3/samples/snapshot-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "snapshot": { - "name": "snap-002", - "description": "This is yet, another snapshot." - } -} diff --git a/api-ref/source/v3/samples/snapshot-update-response.json b/api-ref/source/v3/samples/snapshot-update-response.json deleted file mode 100644 index 6c4415b04..000000000 --- a/api-ref/source/v3/samples/snapshot-update-response.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "snapshot": { - "created_at": "2013-02-20T08:11:34.000000", - "description": "This is yet, another snapshot", - "name": "snap-002", - "id": "4b502fcb-1f26-45f8-9fe5-3b9a0a52eaf2", - "size": 1, - "status": "available", - "user_id": "40c2102f4a554b848d96b14f3eec39ed", - "volume_id": "2402b902-0b7a-458c-9c07-7435a826f794" - } -} diff --git a/api-ref/source/v3/samples/snapshots-list-detailed-response.json b/api-ref/source/v3/samples/snapshots-list-detailed-response.json deleted file mode 100644 index f077cf629..000000000 --- a/api-ref/source/v3/samples/snapshots-list-detailed-response.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "snapshots": [ - { - "status": "available", - "metadata": { - "name": "test" - }, - "os-extended-snapshot-attributes:progress": "100%", - "name": "test-volume-snapshot", - "user_id": "40c2102f4a554b848d96b14f3eec39ed", - "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "os-extended-snapshot-attributes:project_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "created_at": "2015-11-29T02:25:51.000000", - "size": 1, - "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", - "description": "volume snapshot" - } - ] -} diff --git a/api-ref/source/v3/samples/snapshots-list-response.json b/api-ref/source/v3/samples/snapshots-list-response.json deleted file mode 100644 index 877722893..000000000 --- a/api-ref/source/v3/samples/snapshots-list-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "snapshots": [ - { - "status": "available", - "metadata": { - "name": "test" - }, - "name": "test-volume-snapshot", - "user_id": "40c2102f4a554b848d96b14f3eec39ed", - "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "created_at": "2015-11-29T02:25:51.000000", - "size": 1, - "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", - "description": "volume snapshot" - } - ] -} diff --git a/api-ref/source/v3/samples/user-quotas-show-response.json b/api-ref/source/v3/samples/user-quotas-show-response.json deleted file mode 100644 index 239c64d23..000000000 --- a/api-ref/source/v3/samples/user-quotas-show-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "fixed_ips": -1, - "floating_ips": 10, - "id": "fake_tenant", - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 10, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v3/samples/user-quotas-update-request.json b/api-ref/source/v3/samples/user-quotas-update-request.json deleted file mode 100644 index 6e5195f9a..000000000 --- a/api-ref/source/v3/samples/user-quotas-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "quota_set": { - "force": true, - "instances": 9 - } -} diff --git a/api-ref/source/v3/samples/user-quotas-update-response.json b/api-ref/source/v3/samples/user-quotas-update-response.json deleted file mode 100644 index 553933292..000000000 --- a/api-ref/source/v3/samples/user-quotas-update-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "quota_set": { - "cores": 20, - "floating_ips": 10, - "fixed_ips": -1, - "injected_file_content_bytes": 10240, - "injected_file_path_bytes": 255, - "injected_files": 5, - "instances": 9, - "key_pairs": 100, - "metadata_items": 128, - "ram": 51200, - "security_group_rules": 20, - "security_groups": 10 - } -} diff --git a/api-ref/source/v3/samples/version-show-response.json b/api-ref/source/v3/samples/version-show-response.json deleted file mode 100644 index 04458b755..000000000 --- a/api-ref/source/v3/samples/version-show-response.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": { - "status": "CURRENT", - "updated": "2012-01-04T11:33:21Z", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0", - "links": [ - { - "href": "http://23.253.228.211:8776/v1/", - "rel": "self" - }, - { - "href": "http://jorgew.github.com/block-storage-api/content/os-block-storage-1.0.pdf", - "type": "application/pdf", - "rel": "describedby" - }, - { - "href": "http://docs.rackspacecloud.com/servers/api/v1.1/application.wadl", - "type": "application/vnd.sun.wadl+xml", - "rel": "describedby" - } - ] - } -} diff --git a/api-ref/source/v3/samples/version-v2-show-response.json b/api-ref/source/v3/samples/version-v2-show-response.json deleted file mode 100644 index 811a3e80e..000000000 --- a/api-ref/source/v3/samples/version-v2-show-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "choices": [ - { - "status": "SUPPORTED", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0", - "links": [ - { - "href": "http://23.253.248.171:8776/v1/v2.json", - "rel": "self" - } - ] - }, - { - "status": "CURRENT", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v2.0", - "links": [ - { - "href": "http://23.253.248.171:8776/v2/v2.json", - "rel": "self" - } - ] - } - ] -} diff --git a/api-ref/source/v3/samples/version-v3-show-response.json b/api-ref/source/v3/samples/version-v3-show-response.json deleted file mode 100644 index 1103ad054..000000000 --- a/api-ref/source/v3/samples/version-v3-show-response.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "versions": [ - { - "id": "v1.0", - "links": [ - { - "href": "http://docs.openstack.org/", - "rel": "describedby", - "type": "text/html" - }, - { - "href": "http://23.253.248.171:8776/v1/", - "rel": "self" - } - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "min_version": "", - "status": "DEPRECATED", - "updated": "2014-06-28T12:20:21Z", - "version": "" - }, - { - "id": "v2.0", - "links": [ - { - "href": "http://docs.openstack.org/", - "rel": "describedby", - "type": "text/html" - }, - { - "href": "http://23.253.248.171:8776/v2/", - "rel": "self" - } - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "min_version": "", - "status": "SUPPORTED", - "updated": "2014-06-28T12:20:21Z", - "version": "" - }, - { - "id": "v3.0", - "links": [ - { - "href": "http://docs.openstack.org/", - "rel": "describedby", - "type": "text/html" - }, - { - "href": "http://23.253.248.171:8776/v3/", - "rel": "self" - } - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "min_version": "3.0", - "status": "CURRENT", - "updated": "2016-02-08T12:20:21Z", - "version": "3.0" - } - ] -} diff --git a/api-ref/source/v3/samples/versions-response.json b/api-ref/source/v3/samples/versions-response.json deleted file mode 100644 index 4d97d8421..000000000 --- a/api-ref/source/v3/samples/versions-response.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "versions": [ - { - "status": "DEPRECATED", - "updated": "2014-06-28T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v1/", - "rel": "self" - } - ], - "min_version": "", - "version": "", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1" - } - ], - "id": "v1.0" - }, - { - "status": "SUPPORTED", - "updated": "2014-06-28T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v2/", - "rel": "self" - } - ], - "min_version": "", - "version": "", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=2" - } - ], - "id": "v2.0" - }, - { - "status": "CURRENT", - "updated": "2016-02-08T12:20:21Z", - "links": [ - { - "href": "http://docs.openstack.org/", - "type": "text/html", - "rel": "describedby" - }, - { - "href": "http://10.0.2.15:8776/v3/", - "rel": "self" - } - ], - "min_version": "3.0", - "version": "{Current_Max_Version}", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=3" - } - ], - "id": "v3.0" - } - ] -} diff --git a/api-ref/source/v3/samples/volume-attach-request.json b/api-ref/source/v3/samples/volume-attach-request.json deleted file mode 100644 index a779f9fbf..000000000 --- a/api-ref/source/v3/samples/volume-attach-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-attach": { - "instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66", - "mountpoint": "/dev/vdc" - } -} diff --git a/api-ref/source/v3/samples/volume-bootable-status-update-request.json b/api-ref/source/v3/samples/volume-bootable-status-update-request.json deleted file mode 100644 index abcdf3deb..000000000 --- a/api-ref/source/v3/samples/volume-bootable-status-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-set_bootable": { - "bootable": "True" - } -} diff --git a/api-ref/source/v3/samples/volume-create-request.json b/api-ref/source/v3/samples/volume-create-request.json deleted file mode 100644 index 2aab20252..000000000 --- a/api-ref/source/v3/samples/volume-create-request.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "volume": { - "size": 10, - "availability_zone": null, - "source_volid": null, - "description": null, - "multiattach ": false, - "snapshot_id": null, - "name": null, - "imageRef": null, - "volume_type": null, - "metadata": {}, - "source_replica": null, - "consistencygroup_id": null - }, - "OS-SCH-HNT:scheduler_hints": { - "same_host": [ - "a0cf03a5-d921-4877-bb5c-86d26cf818e1", - "8c19174f-4220-44f0-824a-cd1eeef10287" - ] - } -} diff --git a/api-ref/source/v3/samples/volume-create-response.json b/api-ref/source/v3/samples/volume-create-response.json deleted file mode 100644 index 57441f030..000000000 --- a/api-ref/source/v3/samples/volume-create-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "volume": { - "status": "creating", - "migration_status": null, - "user_id": "0eea4eabcf184061a3b6db1e0daaf010", - "attachments": [], - "links": [ - { - "href": "http://23.253.248.171:8776/v3/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "encrypted": false, - "created_at": "2015-11-29T03:01:44.000000", - "description": null, - "updated_at": null, - "volume_type": "lvmdriver-1", - "name": "test-volume-attachments", - "replication_status": "disabled", - "consistencygroup_id": null, - "source_volid": null, - "snapshot_id": null, - "multiattach": false, - "metadata": {}, - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "size": 2 - } -} diff --git a/api-ref/source/v3/samples/volume-detach-request.json b/api-ref/source/v3/samples/volume-detach-request.json deleted file mode 100644 index 88f4119a3..000000000 --- a/api-ref/source/v3/samples/volume-detach-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-detach": { - "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1" - } -} - diff --git a/api-ref/source/v3/samples/volume-extend-request.json b/api-ref/source/v3/samples/volume-extend-request.json deleted file mode 100644 index a051cb3cb..000000000 --- a/api-ref/source/v3/samples/volume-extend-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-extend": { - "new_size": 3 - } -} diff --git a/api-ref/source/v3/samples/volume-force-delete-request.json b/api-ref/source/v3/samples/volume-force-delete-request.json deleted file mode 100644 index a7fe0fe2a..000000000 --- a/api-ref/source/v3/samples/volume-force-delete-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-force_delete": {} -} diff --git a/api-ref/source/v3/samples/volume-force-detach-request.json b/api-ref/source/v3/samples/volume-force-detach-request.json deleted file mode 100644 index 277849d8c..000000000 --- a/api-ref/source/v3/samples/volume-force-detach-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "os-force_detach": { - "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1", - "connector": { - "initiator": "iqn.2012-07.org.fake:01" - } - } -} diff --git a/api-ref/source/v3/samples/volume-image-metadata-set-request.json b/api-ref/source/v3/samples/volume-image-metadata-set-request.json deleted file mode 100644 index 1f2be3d6e..000000000 --- a/api-ref/source/v3/samples/volume-image-metadata-set-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "os-set_image_metadata": { - "metadata": { - "image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", - "image_name": "image", - "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "ramdisk_id": "somedisk" - } - } -} diff --git a/api-ref/source/v3/samples/volume-image-metadata-unset-request.json b/api-ref/source/v3/samples/volume-image-metadata-unset-request.json deleted file mode 100644 index 49d3295c5..000000000 --- a/api-ref/source/v3/samples/volume-image-metadata-unset-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "os-unset_image_metadata": { - "key": "ramdisk_id" - } -} diff --git a/api-ref/source/v3/samples/volume-manage-list-detail-response.json b/api-ref/source/v3/samples/volume-manage-list-detail-response.json deleted file mode 100644 index 21c16bafa..000000000 --- a/api-ref/source/v3/samples/volume-manage-list-detail-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "manageable-volumes": [ - { - "cinder_id": "9ba5bb53-4a18-4b38-be06-992999da338d", - "reason_not_safe": "already managed", - "reference": { - "source-name": "volume-9ba5bb53-4a18-4b38-be06-992999da338d" - }, - "safe_to_manage": false, - "size": 1, - "extra_info": null - }, - { - "cinder_id": null, - "reason_not_safe": null, - "reference": { - "source-name": "lvol0" - }, - "safe_to_manage": true, - "size": 1, - "extra_info": null - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/volume-manage-list-response.json b/api-ref/source/v3/samples/volume-manage-list-response.json deleted file mode 100644 index 5c2abcc94..000000000 --- a/api-ref/source/v3/samples/volume-manage-list-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "manageable-volumes": [ - { - "safe_to_manage": false, - "reference": { - "source-name": "volume-3a81fdac-e8ae-4e61-b6a2-2e14ff316f19" - }, - "size": 1 - }, - { - "safe_to_manage": true, - "reference": { - "source-name": "lvol0" - }, - "size": 1 - } - ] -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/volume-manage-request-cluster.json b/api-ref/source/v3/samples/volume-manage-request-cluster.json deleted file mode 100644 index 5467c98d7..000000000 --- a/api-ref/source/v3/samples/volume-manage-request-cluster.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "volume": { - "host": null, - "cluster": "cluster@backend", - "ref": { - "source-name": "existingLV", - "source-id": "1234" - }, - "name": "New Volume", - "availability_zone": "az2", - "description": "Volume imported from existingLV", - "volume_type": null, - "bootable": true, - "metadata": { - "key1": "value1", - "key2": "value2" - } - } -} diff --git a/api-ref/source/v3/samples/volume-manage-request.json b/api-ref/source/v3/samples/volume-manage-request.json deleted file mode 100644 index 363214784..000000000 --- a/api-ref/source/v3/samples/volume-manage-request.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "volume": { - "host": "geraint-VirtualBox", - "ref": { - "source-name": "existingLV", - "source-id": "1234" - }, - "name": "New Volume", - "availability_zone": "az2", - "description": "Volume imported from existingLV", - "volume_type": null, - "bootable": true, - "metadata": { - "key1": "value1", - "key2": "value2" - } - } -} diff --git a/api-ref/source/v3/samples/volume-manage-response.json b/api-ref/source/v3/samples/volume-manage-response.json deleted file mode 100644 index a53976b9c..000000000 --- a/api-ref/source/v3/samples/volume-manage-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "creating", - "user_id": "eae1472b5fc5496998a3d06550929e7e", - "attachments": [], - "links": [ - { - "href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "self" - }, - { - "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "rel": "bookmark" - } - ], - "availability_zone": "az2", - "bootable": "false", - "encrypted": "false", - "created_at": "2014-07-18T00:12:54.000000", - "description": "Volume imported from existingLV", - "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", - "volume_type": null, - "name": "New Volume", - "source_volid": null, - "snapshot_id": null, - "metadata": { - "key2": "value2", - "key1": "value1" - }, - "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", - "size": 0 - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-create-request.json b/api-ref/source/v3/samples/volume-metadata-create-request.json deleted file mode 100644 index 1ff9aae27..000000000 --- a/api-ref/source/v3/samples/volume-metadata-create-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata0" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-create-response.json b/api-ref/source/v3/samples/volume-metadata-create-response.json deleted file mode 100644 index 1ff9aae27..000000000 --- a/api-ref/source/v3/samples/volume-metadata-create-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata0" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-show-key-response.json b/api-ref/source/v3/samples/volume-metadata-show-key-response.json deleted file mode 100644 index ebd589c1b..000000000 --- a/api-ref/source/v3/samples/volume-metadata-show-key-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "test" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-show-response.json b/api-ref/source/v3/samples/volume-metadata-show-response.json deleted file mode 100644 index 5937a8665..000000000 --- a/api-ref/source/v3/samples/volume-metadata-show-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "metadata": {} -} diff --git a/api-ref/source/v3/samples/volume-metadata-update-key-request.json b/api-ref/source/v3/samples/volume-metadata-update-key-request.json deleted file mode 100644 index 023712fce..000000000 --- a/api-ref/source/v3/samples/volume-metadata-update-key-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "new_name" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-update-key-response.json b/api-ref/source/v3/samples/volume-metadata-update-key-response.json deleted file mode 100644 index 023712fce..000000000 --- a/api-ref/source/v3/samples/volume-metadata-update-key-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "meta": { - "name": "new_name" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-update-request.json b/api-ref/source/v3/samples/volume-metadata-update-request.json deleted file mode 100644 index 4d96ad848..000000000 --- a/api-ref/source/v3/samples/volume-metadata-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata1" - } -} diff --git a/api-ref/source/v3/samples/volume-metadata-update-response.json b/api-ref/source/v3/samples/volume-metadata-update-response.json deleted file mode 100644 index 4d96ad848..000000000 --- a/api-ref/source/v3/samples/volume-metadata-update-response.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "metadata": { - "name": "metadata1" - } -} diff --git a/api-ref/source/v3/samples/volume-os-retype-request.json b/api-ref/source/v3/samples/volume-os-retype-request.json deleted file mode 100644 index b6e6e8c32..000000000 --- a/api-ref/source/v3/samples/volume-os-retype-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "os-retype": { - "new_type": "dedup-tier-replicaton", - "migration_policy": "never" - } -} diff --git a/api-ref/source/v3/samples/volume-revert-to-snapshot-request.json b/api-ref/source/v3/samples/volume-revert-to-snapshot-request.json deleted file mode 100644 index 543194e5a..000000000 --- a/api-ref/source/v3/samples/volume-revert-to-snapshot-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "revert": { - "snapshot_id": "5aa119a8-d25b-45a7-8d1b-88e127885635" - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/volume-show-response.json b/api-ref/source/v3/samples/volume-show-response.json deleted file mode 100644 index c17556186..000000000 --- a/api-ref/source/v3/samples/volume-show-response.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "volume": { - "status": "available", - "attachments": [], - "links": [ - { - "href": "http://localhost:8776/v3/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "os-vol-host-attr:host": "ip-10-168-107-25", - "source_volid": null, - "snapshot_id": null, - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "description": "Super volume.", - "name": "vol-002", - "created_at": "2013-02-25T02:40:21.000000", - "volume_type": "None", - "os-vol-tenant-attr:tenant_id": "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "os-volume-replication:driver_data": null, - "os-volume-replication:extended_status": null, - "metadata": { - "contents": "not junk" - } - } -} diff --git a/api-ref/source/v3/samples/volume-status-reset-request.json b/api-ref/source/v3/samples/volume-status-reset-request.json deleted file mode 100644 index 506b61019..000000000 --- a/api-ref/source/v3/samples/volume-status-reset-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "os-reset_status": { - "status": "available", - "attach_status": "detached", - "migration_status": "migrating" - } -} diff --git a/api-ref/source/v3/samples/volume-transfer-accept-request.json b/api-ref/source/v3/samples/volume-transfer-accept-request.json deleted file mode 100644 index 3399f1e0c..000000000 --- a/api-ref/source/v3/samples/volume-transfer-accept-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "accept": { - "auth_key": "9266c59563c84664" - } -} diff --git a/api-ref/source/v3/samples/volume-transfer-accept-response.json b/api-ref/source/v3/samples/volume-transfer-accept-response.json deleted file mode 100644 index ab4604dd1..000000000 --- a/api-ref/source/v3/samples/volume-transfer-accept-response.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "transfer": { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v3/samples/volume-transfer-create-request.json b/api-ref/source/v3/samples/volume-transfer-create-request.json deleted file mode 100644 index f517b7498..000000000 --- a/api-ref/source/v3/samples/volume-transfer-create-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "transfer": { - "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", - "name": "first volume" - } -} diff --git a/api-ref/source/v3/samples/volume-transfer-create-response.json b/api-ref/source/v3/samples/volume-transfer-create-response.json deleted file mode 100644 index 7f0e9e2c5..000000000 --- a/api-ref/source/v3/samples/volume-transfer-create-response.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "transfer": { - "id": "1a7059f5-8ed7-45b7-8d05-2811e5d09f24", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume", - "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", - "auth_key": "9266c59563c84664", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/3", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/3", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v3/samples/volume-transfer-show-response.json b/api-ref/source/v3/samples/volume-transfer-show-response.json deleted file mode 100644 index 366e54725..000000000 --- a/api-ref/source/v3/samples/volume-transfer-show-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "transfer": { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - } -} diff --git a/api-ref/source/v3/samples/volume-transfers-list-detailed-response.json b/api-ref/source/v3/samples/volume-transfers-list-detailed-response.json deleted file mode 100644 index 52ba78bc0..000000000 --- a/api-ref/source/v3/samples/volume-transfers-list-detailed-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "transfers": [ - { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "created_at": "2015-02-25T03:56:53.081642", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - }, - { - "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", - "created_at": "2015-03-25T03:56:53.081642", - "name": "second volume transfer", - "volume_id": "673db275-379f-41af-8371-e1652132b4c1", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/2", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/2", - "rel": "bookmark" - } - ] - } - ] -} diff --git a/api-ref/source/v3/samples/volume-transfers-list-response.json b/api-ref/source/v3/samples/volume-transfers-list-response.json deleted file mode 100644 index 66a2db54e..000000000 --- a/api-ref/source/v3/samples/volume-transfers-list-response.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "transfers": [ - { - "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", - "name": "first volume transfer", - "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/1", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/1", - "rel": "bookmark" - } - ] - }, - { - "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", - "name": "second volume transfer", - "volume_id": "673db275-379f-41af-8371-e1652132b4c1", - "links": [ - { - "href": "http://localhost/v3/firstproject/volumes/2", - "rel": "self" - }, - { - "href": "http://localhost/firstproject/volumes/2", - "rel": "bookmark" - } - ] - } - ] -} diff --git a/api-ref/source/v3/samples/volume-type-access-add-request.json b/api-ref/source/v3/samples/volume-type-access-add-request.json deleted file mode 100644 index b7481edbb..000000000 --- a/api-ref/source/v3/samples/volume-type-access-add-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "addProjectAccess": { - "project": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v3/samples/volume-type-access-delete-request.json b/api-ref/source/v3/samples/volume-type-access-delete-request.json deleted file mode 100644 index 144997bfc..000000000 --- a/api-ref/source/v3/samples/volume-type-access-delete-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "removeProjectAccess": { - "project": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v3/samples/volume-type-access-list-response.json b/api-ref/source/v3/samples/volume-type-access-list-response.json deleted file mode 100644 index afcffb081..000000000 --- a/api-ref/source/v3/samples/volume-type-access-list-response.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "volume_type_access": { - "volume_type_id": "3c67e124-39ad-4ace-a507-8bb7bf510c26", - "project_id": "f270b245cb11498ca4031deb7e141cfa" - } -} diff --git a/api-ref/source/v3/samples/volume-type-create-request.json b/api-ref/source/v3/samples/volume-type-create-request.json deleted file mode 100644 index 94ab0449e..000000000 --- a/api-ref/source/v3/samples/volume-type-create-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "volume_type": { - "name": "vol-type-001", - "description": "volume type 0001", - "is_public": true, - } -} diff --git a/api-ref/source/v3/samples/volume-type-create-response.json b/api-ref/source/v3/samples/volume-type-create-response.json deleted file mode 100644 index d2db3b09c..000000000 --- a/api-ref/source/v3/samples/volume-type-create-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "volume_type": { - "name": "test_type", - "extra_specs": {}, - "os-volume-type-access:is_public": true, - "is_public": true, - "id": "6d0ff92a-0007-4780-9ece-acfe5876966a", - "description": "test_type_desc" - } -} - diff --git a/api-ref/source/v3/samples/volume-type-show-request.json b/api-ref/source/v3/samples/volume-type-show-request.json deleted file mode 100644 index a91f2e94d..000000000 --- a/api-ref/source/v3/samples/volume-type-show-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "volume_type": { - "id": "289da7f8-6440-407c-9fb4-7db01ec49164", - "name": "vol-type-001", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v3/samples/volume-type-show-response.json b/api-ref/source/v3/samples/volume-type-show-response.json deleted file mode 100644 index 7a0420f20..000000000 --- a/api-ref/source/v3/samples/volume-type-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "volume_type": { - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "vol-type-001", - "description": "volume type 001", - "is_public": "true", - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v3/samples/volume-type-update-request.json b/api-ref/source/v3/samples/volume-type-update-request.json deleted file mode 100644 index 8bdc5befb..000000000 --- a/api-ref/source/v3/samples/volume-type-update-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "volume_type": { - "name": "vol-type-001", - "description": "volume type 0001", - "is_public": true, - "extra_specs": { - "capabilities": "gpu" - } - } -} diff --git a/api-ref/source/v3/samples/volume-types-list-response.json b/api-ref/source/v3/samples/volume-types-list-response.json deleted file mode 100644 index 1d72f923e..000000000 --- a/api-ref/source/v3/samples/volume-types-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "volume_types": [ - { - "extra_specs": { - "capabilities": "gpu" - }, - "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", - "name": "SSD" - }, - { - "extra_specs": {}, - "id": "8eb69a46-df97-4e41-9586-9a40a7533803", - "name": "SATA" - } - ] -} diff --git a/api-ref/source/v3/samples/volume-unmanage-request.json b/api-ref/source/v3/samples/volume-unmanage-request.json deleted file mode 100644 index a75950bb9..000000000 --- a/api-ref/source/v3/samples/volume-unmanage-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "os-unmanage": {} -} diff --git a/api-ref/source/v3/samples/volume-update-request.json b/api-ref/source/v3/samples/volume-update-request.json deleted file mode 100644 index 8e52dacb6..000000000 --- a/api-ref/source/v3/samples/volume-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "volume": { - "name": "vol-003", - "description": "This is yet, another volume." - } -} diff --git a/api-ref/source/v3/samples/volume-update-response.json b/api-ref/source/v3/samples/volume-update-response.json deleted file mode 100644 index 1fc925e91..000000000 --- a/api-ref/source/v3/samples/volume-update-response.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "volume": { - "status": "available", - "migration_status": null, - "user_id": "0eea4eabcf184061a3b6db1e0daaf010", - "attachments": [], - "links": [ - { - "href": "http://localhost:8776/v3/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "encrypted": false, - "created_at": "2015-11-29T03:01:44.000000", - "description": "This is yet, another volume.", - "updated_at": null, - "volume_type": "lvmdriver-1", - "name": "vol-003", - "replication_status": "disabled", - "consistencygroup_id": null, - "source_volid": null, - "snapshot_id": null, - "multiattach": false, - "metadata": { - "contents": "not junk" - }, - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "size": 1 - } -} diff --git a/api-ref/source/v3/samples/volume-upload-to-image-request.json b/api-ref/source/v3/samples/volume-upload-to-image-request.json deleted file mode 100644 index 4718d4bce..000000000 --- a/api-ref/source/v3/samples/volume-upload-to-image-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "os-volume_upload_image":{ - "image_name": "test", - "force": false, - "disk_format": "raw", - "container_format": "bare", - "visibility": "private", - "protected": false - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/volume-upload-to-image-response.json b/api-ref/source/v3/samples/volume-upload-to-image-response.json deleted file mode 100644 index 58ce05f28..000000000 --- a/api-ref/source/v3/samples/volume-upload-to-image-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "os-volume_upload_image": { - "status": "uploading", - "container_format": "bare", - "image_name": "test", - "visibility": "private", - "updated_at": "2017-06-05T08:44:28.000000", - "image_id": "de75b74e-7f0d-4b59-a263-bd87bfc313bd", - "display_description": null, - "id": "3a81fdac-e8ae-4e61-b6a2-2e14ff316f19", - "size": 1, - "disk_format": "raw", - "volume_type": null, - "protected": false - } -} \ No newline at end of file diff --git a/api-ref/source/v3/samples/volumes-list-detailed-response.json b/api-ref/source/v3/samples/volumes-list-detailed-response.json deleted file mode 100644 index 28768fd30..000000000 --- a/api-ref/source/v3/samples/volumes-list-detailed-response.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "volumes": [ - { - "migration_status": null, - "attachments": [ - { - "server_id": "f4fda93b-06e0-4743-8117-bc8bcecd651b", - "attachment_id": "3b4db356-253d-4fab-bfa0-e3626c0b8405", - "host_name": null, - "volume_id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "device": "/dev/vdb", - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38" - } - ], - "links": [ - { - "href": "http://23.253.248.171:8776/v3/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", - "encrypted": false, - "os-volume-replication:extended_status": null, - "replication_status": "disabled", - "snapshot_id": null, - "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", - "size": 2, - "user_id": "32779452fcd34ae1a53a797ac8a1e064", - "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "os-vol-mig-status-attr:migstat": null, - "metadata": { - "readonly": false, - "attached_mode": "rw" - }, - "status": "in-use", - "description": null, - "multiattach": true, - "os-volume-replication:driver_data": null, - "source_volid": null, - "consistencygroup_id": null, - "os-vol-mig-status-attr:name_id": null, - "name": "test-volume-attachments", - "bootable": "false", - "created_at": "2015-11-29T03:01:44.000000", - "volume_type": "lvmdriver-1" - }, - { - "migration_status": null, - "attachments": [], - "links": [ - { - "href": "http://23.253.248.171:8776/v3/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", - "rel": "self" - }, - { - "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", - "encrypted": false, - "os-volume-replication:extended_status": null, - "replication_status": "disabled", - "snapshot_id": null, - "id": "173f7b48-c4c1-4e70-9acc-086b39073506", - "size": 1, - "user_id": "32779452fcd34ae1a53a797ac8a1e064", - "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", - "os-vol-mig-status-attr:migstat": null, - "metadata": {}, - "status": "available", - "volume_image_metadata": { - "kernel_id": "8a55f5f1-78f7-4477-8168-977d8519342c", - "checksum": "eb9139e4942121f22bbc2afc0400b2a4", - "min_ram": "0", - "ramdisk_id": "5f6bdf8a-92db-4988-865b-60bdd808d9ef", - "disk_format": "ami", - "image_name": "cirros-0.3.4-x86_64-uec", - "image_id": "b48c53e1-9a96-4a5a-a630-2e74ec54ddcc", - "container_format": "ami", - "min_disk": "0", - "size": "25165824" - }, - "description": "", - "multiattach": false, - "os-volume-replication:driver_data": null, - "source_volid": null, - "consistencygroup_id": null, - "os-vol-mig-status-attr:name_id": null, - "name": "test-volume", - "bootable": "true", - "created_at": "2015-11-29T02:25:18.000000", - "volume_type": "lvmdriver-1" - } - ] -} diff --git a/api-ref/source/v3/samples/volumes-list-response.json b/api-ref/source/v3/samples/volumes-list-response.json deleted file mode 100644 index eb4ebafa1..000000000 --- a/api-ref/source/v3/samples/volumes-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "volumes": [ - { - "id": "45baf976-c20a-4894-a7c3-c94b7376bf55", - "links": [ - { - "href": "http://localhost:8776/v3/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", - "rel": "bookmark" - } - ], - "name": "vol-004" - }, - { - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "links": [ - { - "href": "http://localhost:8776/v3/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "self" - }, - { - "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", - "rel": "bookmark" - } - ], - "name": "vol-003" - } - ] -} diff --git a/api-ref/source/v3/samples/volumes-list-summary-response.json b/api-ref/source/v3/samples/volumes-list-summary-response.json deleted file mode 100644 index 26bf3d939..000000000 --- a/api-ref/source/v3/samples/volumes-list-summary-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "volume-summary": { - "total_size": 4, - "total_count": 4, - "metadata": { - "key1": ["value1", "value2"], - "key2": ["value2"] - } - } -} \ No newline at end of file diff --git a/api-ref/source/v3/snapshot-manage.inc b/api-ref/source/v3/snapshot-manage.inc deleted file mode 100644 index 6da63a979..000000000 --- a/api-ref/source/v3/snapshot-manage.inc +++ /dev/null @@ -1,139 +0,0 @@ -.. -*- rst -*- - -Snapshot manage extension (manageable_snapshots) -================================================ - -Creates or lists snapshots by using existing storage instead of allocating new -storage. - - -Manage an existing snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/manageable_snapshots - -Creates a snapshot by using existing storage rather than allocating new storage. - -The caller must specify a reference to an existing storage volume -in the ref parameter in the request. Although each storage driver -might interpret this reference differently, the driver should -accept a reference structure that contains either a source-id -or source-name element, if possible. - -The API chooses the size of the snapshot by rounding up the size of -the existing snapshot to the next gibibyte (GiB). - - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - name: name - - ref: ref - - volume_id: volume_id - - metadata: metadata - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-manage-request.json - :language: javascript - - - -List summary of snapshots available to manage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/manageable_snapshots - -Search a volume backend and list summary of snapshots which are available to -manage. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - sort: sort - - offset: offset - - limit: limit - - marker: marker - - project_id: project_id_path - - host: hostname - - -Response --------- - -.. rest_parameters:: parameters.yaml - - - manageable-snapshots: manageable-snapshots - - source_reference: source_reference - - safe_to_manage: safe_to_manage - - reference: reference - - source-name: source-name - - size: size - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-manage-list-response.json - :language: javascript - - - -List detail of snapshots available to manage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/manageable_snapshots/detail - -Search a volume backend and list detail of snapshots which are available to -manage. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - sort: sort - - offset: offset - - limit: limit - - marker: marker - - project_id: project_id_path - - host: hostname - - -Response --------- - -.. rest_parameters:: parameters.yaml - - - manageable-snapshots: manageable-snapshots - - cinder_id: cinder_id - - source_reference: source_reference - - safe_to_manage: safe_to_manage - - reason_not_safe: reason_not_safe - - reference: reference - - source-name: source-name - - size: size - - extra_info: extra_info - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-manage-list-detail-response.json - :language: javascript diff --git a/api-ref/source/v3/volume-manage.inc b/api-ref/source/v3/volume-manage.inc deleted file mode 100644 index 0e2d20cd9..000000000 --- a/api-ref/source/v3/volume-manage.inc +++ /dev/null @@ -1,147 +0,0 @@ -.. -*- rst -*- - -Volume manage extension (manageable_volumes) -============================================ - -Creates or lists volumes by using existing storage instead of allocating new -storage. - - -Manage an existing volume -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/manageable_volumes - -Creates a Block Storage volume by using existing storage rather than allocating new storage. - -The caller must specify a reference to an existing storage volume -in the ref parameter in the request. Although each storage driver -might interpret this reference differently, the driver should -accept a reference structure that contains either a source-id -or source-name element, if possible. - -The API chooses the size of the volume by rounding up the size of -the existing storage volume to the next gibibyte (GiB). - -Prior to microversion 3.16 host field was required, with the possibility of -defining the cluster it is no longer required, but we must have either a host -or a cluster field but we cannot have them both with values. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - description: description - - availability_zone: availability_zone - - bootable: bootable - - volume_type: volume_type - - name: name - - volume: volume - - host: host_mutex - - cluster: cluster_mutex - - ref: ref - - metadata: metadata - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-manage-request.json - :language: javascript - -.. literalinclude:: ./samples/volume-manage-request-cluster.json - :language: javascript - - -List summary of volumes available to manage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/manageable_volumes - -Search a volume backend and list summary of volumes which are available to -manage. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - sort: sort - - offset: offset - - limit: limit - - marker: marker - - project_id: project_id_path - - host: hostname - - -Response --------- - -.. rest_parameters:: parameters.yaml - - - manageable-volumes: manageable-volumes - - safe_to_manage: safe_to_manage - - reference: reference - - source-name: source-name - - size: size - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-manage-list-response.json - :language: javascript - - - -List detail of volumes available to manage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/manageable_volumes/detail - -Search a volume backend and list detail of volumes which are available to -manage. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - sort: sort - - offset: offset - - limit: limit - - marker: marker - - project_id: project_id_path - - host: hostname - - -Response --------- - -.. rest_parameters:: parameters.yaml - - - manageable-volumes: manageable-volumes - - cinder_id: cinder_id - - safe_to_manage: safe_to_manage - - reason_not_safe: reason_not_safe - - reference: reference - - source-name: source-name - - size: size - - extra_info: extra_info - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-manage-list-detail-response.json - :language: javascript diff --git a/api-ref/source/v3/volume-type-access.inc b/api-ref/source/v3/volume-type-access.inc deleted file mode 100644 index d1bfa1121..000000000 --- a/api-ref/source/v3/volume-type-access.inc +++ /dev/null @@ -1,102 +0,0 @@ -.. -*- rst -*- - -Volume type access (volumes) -============================ - -Private volume type access to project. - -By default, volumes types are public. To create a private volume -type, set the ``is_public`` boolean field to ``false`` at volume -type creation time. To control access to a private volume type, -user needs to add a project to or remove a project from the volume -type. Private volume types without projects are only accessible by -users with the administrative role and context. - - -Add private volume type access to project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/types/{volume_type}/action - -Adds private volume type access to a project. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project: project - - project_id: project_id_path - - volume_type: volume_type - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-access-add-request.json - :language: javascript - - -Remove private volume type access from project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/types/{volume_type}/action - -Removes private volume type access from a project. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project: project - - project_id: project_id_path - - volume_type: volume_type - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-access-delete-request.json - :language: javascript - - -List private volume type access detail -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/types/{volume_type}/os-volume-type-access - -Lists project IDs that have access to private volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_type: volume_type - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-access-list-response.json - :language: javascript diff --git a/api-ref/source/v3/volumes-v3-extensions.inc b/api-ref/source/v3/volumes-v3-extensions.inc deleted file mode 100644 index 77122fee6..000000000 --- a/api-ref/source/v3/volumes-v3-extensions.inc +++ /dev/null @@ -1,51 +0,0 @@ -.. -*- rst -*- - -API extensions (extensions) -=========================== - - - - -List Known API extensions -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/extensions - -Lists Block Storage API extensions. - - -Normal response codes: 200 -Error response codes:300, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - updated: updated - - description: description - - links: links - - namespace: namespace - - alias: alias - - name: name - - - - -Response Example ----------------- - -.. literalinclude:: ./samples/extensions-list-response.json - :language: javascript - - - diff --git a/api-ref/source/v3/volumes-v3-snapshots-actions.inc b/api-ref/source/v3/volumes-v3-snapshots-actions.inc deleted file mode 100644 index a9c3a8513..000000000 --- a/api-ref/source/v3/volumes-v3-snapshots-actions.inc +++ /dev/null @@ -1,34 +0,0 @@ -.. -*- rst -*- - -==================================== -Snapshot actions (snapshots, action) -==================================== - -Administrator only. Resets status for a snapshot. - - -Reset a snapshot's status -========================= - -.. rest_method:: POST /v3/{tenant_id}/snapshots/{snapshot_id}/action - -Resets the status. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202, - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-reset_status: os-reset_status - - tenant_id: tenant_id - - snapshot_id: snapshot_id - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-status-reset-request.json - :language: javascript diff --git a/api-ref/source/v3/volumes-v3-snapshots.inc b/api-ref/source/v3/volumes-v3-snapshots.inc deleted file mode 100644 index 13643c052..000000000 --- a/api-ref/source/v3/volumes-v3-snapshots.inc +++ /dev/null @@ -1,516 +0,0 @@ -.. -*- rst -*- - -Volume snapshots (snapshots) -============================ - -A snapshot is a point-in-time copy of the data that a volume -contains. - -When you create, list, or delete snapshots, these status values are -possible: - -**Snapshot statuses** - -+----------------+-------------------------------------+ -| Status | Description | -+----------------+-------------------------------------+ -| creating | The snapshot is being created. | -+----------------+-------------------------------------+ -| available | The snapshot is ready to use. | -+----------------+-------------------------------------+ -| backing-up | The snapshot is being backed up. | -+----------------+-------------------------------------+ -| deleting | The snapshot is being deleted. | -+----------------+-------------------------------------+ -| error | A snapshot creation error occurred. | -+----------------+-------------------------------------+ -| error_deleting | A snapshot deletion error occurred. | -+----------------+-------------------------------------+ - - -List snapshots and details -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/snapshots/detail - -Lists all Block Storage snapshots, with details, that the project can access, -since v3.31 if non-admin users specify invalid filters in the url, API will -return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - user_id: user_id_2 - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-detailed-response.json - :language: javascript - - -Create a snapshot -~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/snapshots - -Creates a volume snapshot, which is a point-in-time, complete copy of a volume. You can create a volume from a snapshot. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - snapshot: snapshot - - volume_id: volume_id - - force: force - - description: description - - name: name - - project_id: project_id_path - - metadata: metadata_7 - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - user_id: user_id_2 - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-create-response.json - :language: javascript - - -List accessible snapshots -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/snapshots - -Lists all Block Storage snapshots, with summary information, -that the project can access, since v3.31 if non-admin users -specify invalid filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - user_id: user_id_2 - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshots-list-response.json - :language: javascript - - -Show a snapshot's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id}/metadata - -Shows metadata for a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-show-response.json - :language: javascript - - -Create a snapshot's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/metadata - -Updates metadata for a snapshot. - -Creates or replaces metadata items that match keys. Does not modify items that -are not in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata - - project_id: project_id_path - - snapshot_id: snapshot_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-create-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-create-response.json - :language: javascript - -Update a snapshot's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata - -Replaces all the snapshot's metadata with the key-value pairs in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata - - project_id: project_id_path - - snapshot_id: snapshot_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-request.json - :language: javascript - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-response.json - :language: javascript - - -Show a snapshot's details -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id} - -Shows details for a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - os-extended-snapshot-attributes:progress: os-extended-snapshot-attributes:progress - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - user_id: user_id_2 - - volume_id: volume_id - - os-extended-snapshot-attributes:project_id: os-extended-snapshot-attributes:project_id - - size: size - - id: id - - metadata: metadata - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-show-response.json - :language: javascript - - -Update a snapshot -~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id} - -Updates a snapshot. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - snapshot: snapshot - - description: description - - name: name - - project_id: project_id_path - - snapshot_id: snapshot_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - status: status_2 - - description: description - - created_at: created_at - - name: name - - snapshot: snapshot - - user_id: user_id_2 - - volume_id: volume_id - - metadata: metadata - - id: id - - size: size - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-update-response.json - :language: javascript - - -Delete a snapshot -~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/snapshots/{snapshot_id} - -Deletes a snapshot. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - - - -Show a snapshot's metadata for a specific key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/snapshot/{snapshot_id}/metadata/{key} - -Shows metadata for a snapshot for a specific key. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - key: key_2 - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - meta: meta_1 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-show-key-response.json - :language: javascript - - - -Delete a snapshot's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key} - -Deletes metadata for a snapshot. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - key: key_1 - - -Update a snapshot's metadata for a specific key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key} - -Update metadata for a snapshot for a specific key. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - snapshot_id: snapshot_id_path - - key: key_3 - - meta: meta_1 - -Request Example ---------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-key-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - meta: meta_1 - -Response Example ----------------- - -.. literalinclude:: ./samples/snapshot-metadata-update-key-response.json - :language: javascript diff --git a/api-ref/source/v3/volumes-v3-types.inc b/api-ref/source/v3/volumes-v3-types.inc deleted file mode 100644 index 26864d61f..000000000 --- a/api-ref/source/v3/volumes-v3-types.inc +++ /dev/null @@ -1,449 +0,0 @@ -.. -*- rst -*- - -Volume types (types) -==================== - - -Update a volume type -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/types/{volume_type_id} - -Updates a volume type. - -To create an environment with multiple-storage back ends, you must -specify a volume type. The API spawns Block Storage volume back -ends as children to ``cinder-volume``, and keys them from a unique -queue. The API names the back ends ``cinder-volume.HOST.BACKEND``. -For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a -volume, the scheduler chooses an appropriate back end for the -volume type to handle the request. - -For information about how to use volume types to create multiple- -storage back ends, see `Configure multiple-storage back ends -`_. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type: volume_type - - volume_type_id: volume_type_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Update extra specs for volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/types/{volume_type_id} - -Updates the extra specifications that are assigned to a volume type. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - extra_specs: extra_specs - - volume_type: volume_type - - volume_type_id: volume_type_id - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Show volume type detail -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/types/{volume_type_id} - -Shows details for a volume type. - - -Normal response codes: 200 -Error response codes: - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-show-response.json - :language: javascript - - -Delete a volume type -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/types/{volume_type_id} - -Deletes a volume type. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - -List all volume types -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/types - -Lists volume types. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_types: volume_types - - extra_specs: extra_specs - - name: name - - volume_type: volume_type - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-types-list-response.json - :language: javascript - - -Create a volume type -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/types - -Creates a volume type. - -To create an environment with multiple-storage back ends, you must -specify a volume type. Block Storage volume back ends are spawned -as children to ``cinder-volume``, and they are keyed from a unique -queue. They are named ``cinder-volume.HOST.BACKEND``. For example, -``cinder-volume.ubuntu.lvmdriver``. When a volume is created, the -scheduler chooses an appropriate back end to handle the request -based on the volume type. - -For information about how to use volume types to create multiple- -storage back ends, see `Configure multiple-storage back ends -`_. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type: volume_type - - project_id: project_id_path - - name: name_7 - - is_public: is_public - - description: description_15 - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - is_public: is_public - - extra_specs: extra_specs - - description: description - - volume_type: volume_type - - name: name - - id: id - - os-volume-type-access:is_public: os-volume-type-access:is_public - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-type-create-response.json - :language: javascript - -Show an encryption type -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption - -Show an encryption type. - -To show an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id_body - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - - deleted: deleted - - created_at: created_at - - updated_at: updated_at - - deleted_at: deleted_at - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-show-response.json - :language: javascript - - -Delete an encryption type -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id} - -Delete an encryption type. - -To delete an encryption type for an existing volume type. - -Normal response codes: 202 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - encryption_id: encryption_id - - -Create an encryption type -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/encryption - -Creates an encryption type. - -To create an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - -Request Example ---------------- - -.. literalinclude:: ./samples/encryption-type-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id_body - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider - - control_location: control_location - - cipher: cipher - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-create-response.json - :language: javascript - - -Update an encryption type -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id} - -Update an encryption type. - -To update an encryption type for an existing volume type. - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume_type_id: volume_type_id - - project_id: project_id_path - - encryption_id: encryption_id - - encryption: encryption - - key_size: key_size - - provider: provider_optional - - control_location: control_location - - cipher: cipher - -Request Example ---------------- - -.. literalinclude:: ./samples/encryption-type-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - encryption: encryption - - key_size: key_size - - provider: provider_optional - - control_location: control_location - - cipher: cipher - -Response Example ----------------- - -.. literalinclude:: ./samples/encryption-type-update-response.json - :language: javascript diff --git a/api-ref/source/v3/volumes-v3-versions.inc b/api-ref/source/v3/volumes-v3-versions.inc deleted file mode 100644 index 38708695b..000000000 --- a/api-ref/source/v3/volumes-v3-versions.inc +++ /dev/null @@ -1,42 +0,0 @@ -.. -*- rst -*- - -API versions -============ - - - - - -Show API v3 details -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3 - -Shows details for Block Storage API v3. - - -Normal response codes: 200 -Error response codes:203, - - -Request -------- - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - location: location - - - -Response Example ----------------- - -.. literalinclude:: ./samples/version-v3-show-response.json - :language: javascript - - diff --git a/api-ref/source/v3/volumes-v3-volumes-actions.inc b/api-ref/source/v3/volumes-v3-volumes-actions.inc deleted file mode 100644 index 18e45d48c..000000000 --- a/api-ref/source/v3/volumes-v3-volumes-actions.inc +++ /dev/null @@ -1,507 +0,0 @@ -.. -*- rst -*- - -Volume actions (volumes, action) -================================ - -Extends the size of, resets statuses for, sets image metadata for, -and removes image metadata from a volume. Attaches a volume to a -server, detaches a volume from a server, and removes a volume from -Block Storage management without actually removing the back-end -storage object associated with it. - - -Extend a volume size -~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Extends the size of a volume to a requested size, in gibibytes (GiB). -Specify the ``os-extend`` action in the request body. - -Preconditions - -- Prior to microversion ``3.42`` the volume status must be ``available``. - Starting with microversion ``3.42``, attached volumes with status ``in-use`` - may be able to be extended depending on policy and backend volume and - compute driver constraints in the cloud. Note that ``reserved`` is not a - valid state for extend. - -- Sufficient amount of storage must exist to extend the volume. - -- The user quota must have sufficient volume storage. - -Postconditions - -- If the request is processed successfully, the volume status will change to - ``extending`` while the volume size is being extended. - -- Upon successful completion of the extend operation, the volume status will - go back to its original value. - -- Starting with microversion ``3.42``, when extending the size of an attached - volume, the Block Storage service will notify the Compute service that an - attached volume has been extended. The Compute service will asynchronously - process the volume size change for the related server instance. This can be - monitored using the ``GET /servers/{server_id}/os-instance-actions`` API in - the Compute service. - -Troubleshooting - -- An ``error_extending`` volume status indicates that the request - failed. Ensure that you meet the preconditions and retry the - request. If the request fails again, investigate the storage back - end. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-extend: os-extend - - new_size: new_size - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-extend-request.json - :language: javascript - - - - - - - -Reset a volume's statuses -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Administrator only. Resets the status, attach status, revert to snapshot, -and migration status for a volume. Specify the ``os-reset_status`` action in the request body. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - status: status_3 - - migration_status: migration_status - - os-reset_status: os-reset_status - - attach_status: attach_status - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-status-reset-request.json - :language: javascript - - -Revert volume to snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Revert a volume to its latest snapshot, this API only support reverting a detached volume. - -Normal response codes: 202 -Error response codes: 400, 404 - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id - - revert: revert - - snapshot_id: snapshot_id_4 - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-revert-to-snapshot-request.json - :language: javascript - - -Set image metadata for a volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Sets the image metadata for a volume. Specify the ``os-set_image_metadata`` action in the request body. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-set_image_metadata: os-set_image_metadata - - metadata: metadata - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-image-metadata-set-request.json - :language: javascript - - - - - - - -Remove image metadata from a volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Removes image metadata, by key, from a volume. Specify the ``os-unset_image_metadata`` action in the request body and the ``key`` for the metadata key and value pair that you want to remove. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-unset_image_metadata: os-unset_image_metadata - - key: key - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-image-metadata-unset-request.json - :language: javascript - - - - - - - -Attach volume to a server -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Attaches a volume to a server. Specify the ``os-attach`` action in the request body. - -Preconditions - -- Volume status must be ``available``. - -- You should set ``instance_uuid`` or ``host_name``. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - instance_uuid: instance_uuid - - mountpoint: mountpoint - - host_name: host_name - - os-attach: os-attach - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-attach-request.json - :language: javascript - - - - - - - -Detach volume from server -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Detaches a volume from a server. Specify the ``os-detach`` action in the request body. - -Preconditions - -- Volume status must be ``in-use``. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - attachment_id: attachment_id - - os-detach: os-detach - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-detach-request.json - :language: javascript - - - - - - - -Unmanage a volume -~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. Specify the ``os-unmanage`` action in the request body. - -Preconditions - -- Volume status must be ``available``. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-unmanage: os-unmanage - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-unmanage-request.json - :language: javascript - - - - - - - -Force detach a volume -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Forces a volume to detach. Specify the ``os-force_detach`` action in the request body. - -Rolls back an unsuccessful detach operation after you disconnect -the volume. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - connector: connector - - attachment_id: attachment_id - - os-force_detach: os-force_detach - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-force-detach-request.json - :language: javascript - - - - - - - -Retype a volume -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Change type of existing volume. Specify the ``os-retype`` action in the request body. - -Change the volume type of existing volume, Cinder may migrate the volume to -proper volume host according to the new volume type. - -Policy defaults enable only users with the administrative role or the owner of -the volume to perform this operation. Cloud providers can change these -permissions through the policy.json file. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - new_type: new_type - - migration_policy: migration_policy - - os-retype: os-retype - - volume_id: volume_id_path - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-os-retype-request.json - :language: javascript - - - - - - - -Force delete a volume -~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Attempts force-delete of volume, regardless of state. Specify the ``os-force_delete`` action -in the request body. - - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - os-force_delete: os-force_delete - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-force-delete-request.json - :language: javascript - - - - - - - -Update a volume's bootable status -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Update the bootable status for a volume, mark it as a bootable volume. Specify the ``os-set_bootable`` action in the request body. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - os-set_bootable: os-set_bootable - - bootable: bootable - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-bootable-status-update-request.json - :language: javascript - - - - - - -Upload volume to image -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action - -Uploads the specified volume to image service. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - os-volume_upload_image: os-volume_upload_image - - image_name: image_name - - force: force_4 - - disk_format: disk_format - - container_format: container_format - - visibility: visibility_1 - - protected: protected - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-upload-to-image-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - os-volume_upload_image: os-volume_upload_image - - status: status_3 - - image_name: image_name - - disk_format: disk_format - - container_format: container_format - - visibility: visibility_1 - - protected: protected - - updated_at: updated_at - - image_id: image_id - - display_description: description_9 - - id: id_5 - - size: size - - volume_type: volume_type_6 - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-upload-to-image-response.json - :language: javascript diff --git a/api-ref/source/v3/volumes-v3-volumes.inc b/api-ref/source/v3/volumes-v3-volumes.inc deleted file mode 100644 index f095e4bbe..000000000 --- a/api-ref/source/v3/volumes-v3-volumes.inc +++ /dev/null @@ -1,735 +0,0 @@ -.. -*- rst -*- - -Volumes (volumes) -================= - -A volume is a detachable block storage device similar to a USB hard -drive. You can attach a volume to one instance at a time. - -The ``snapshot_id`` and ``source_volid`` parameters specify the ID -of the snapshot or volume from which this volume originates. If the -volume was not created from a snapshot or source volume, these -values are null. - -When you create, list, update, or delete volumes, the possible -status values are: - -**Volume statuses** - -+------------------+--------------------------------------------------------+ -| Status | Description | -+------------------+--------------------------------------------------------+ -| creating | The volume is being created. | -+------------------+--------------------------------------------------------+ -| available | The volume is ready to attach to an instance. | -+------------------+--------------------------------------------------------+ -| reserved | The volume is reserved for attaching or shelved. | -+------------------+--------------------------------------------------------+ -| attaching | The volume is attaching to an instance. | -+------------------+--------------------------------------------------------+ -| detaching | The volume is detaching from an instance. | -+------------------+--------------------------------------------------------+ -| in-use | The volume is attached to an instance. | -+------------------+--------------------------------------------------------+ -| maintenance | The volume is locked and being migrated. | -+------------------+--------------------------------------------------------+ -| deleting | The volume is being deleted. | -+------------------+--------------------------------------------------------+ -| awaiting-transfer| The volume is awaiting for transfer. | -+------------------+--------------------------------------------------------+ -| error | A volume creation error occurred. | -+------------------+--------------------------------------------------------+ -| error_deleting | A volume deletion error occurred. | -+------------------+--------------------------------------------------------+ -| backing-up | The volume is being backed up. | -+------------------+--------------------------------------------------------+ -| restoring-backup | A backup is being restored to the volume. | -+------------------+--------------------------------------------------------+ -| error_backing-up | A backup error occurred. | -+------------------+--------------------------------------------------------+ -| error_restoring | A backup restoration error occurred. | -+------------------+--------------------------------------------------------+ -| error_extending | An error occurred while attempting to extend a volume. | -+------------------+--------------------------------------------------------+ -| downloading | The volume is downloading an image. | -+------------------+--------------------------------------------------------+ -| uploading | The volume is being uploaded to an image. | -+------------------+--------------------------------------------------------+ -| retyping | The volume is changing type to another volume type. | -+------------------+--------------------------------------------------------+ -| extending | The volume is being extended. | -+------------------+--------------------------------------------------------+ - - -List accessible volumes with details -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes/detail - -Lists all Block Storage volumes, with details, that the project can access, -since v3.31 if non-admin users specify invalid filters in the url, API will -return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - os-vol-host-attr:host: os-vol-host-attr:host - - encrypted: encrypted - - updated_at: updated_at - - os-volume-replication:extended_status: os-volume-replication:extended_status - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - consistencygroup_id: consistencygroup_id - - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - - name: name - - bootable: bootable_response - - created_at: created_at - - os-volume-replication:driver_data: os-volume-replication:driver_data - - volumes: volumes - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-detailed-response.json - :language: javascript - - - - -Create a volume -~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes - -Creates a volume. - -To create a bootable volume, include the UUID of the image from -which you want to create the volume in the ``imageRef`` attribute -in the request body. - -Preconditions - -- You must have enough volume storage quota remaining to create a - volume of size requested. - -Asynchronous Postconditions - -- With correct permissions, you can see the volume status as - ``available`` through API calls. - -- With correct access, you can see the created volume in the storage - system that OpenStack Block Storage manages. - -Troubleshooting - -- If volume status remains ``creating`` or shows another error - status, the request failed. Ensure you meet the preconditions - then investigate the storage back end. - -- Volume is not created in the storage system that OpenStack Block - Storage manages. - -- The storage node needs enough free storage space to match the size - of the volume creation request. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - size: size - - description: description - - imageRef: imageRef - - multiattach: multiattach - - availability_zone: availability_zone - - source_volid: source_volid - - name: name - - volume: volume - - consistencygroup_id: consistencygroup_id - - volume_type: volume_type - - snapshot_id: snapshot_id - - OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints - - source_replica: source_replica - - metadata: metadata - - project_id: project_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable_response - - created_at: created_at - - volume_type: volume_type - - - - - -List accessible volumes -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes - -Lists summary information for all Block Storage volumes that the -project can access, since v3.31 if non-admin users specify invalid -filters in the url, API will return bad request. - - -Normal response codes: 200 -Error response codes: badRequest(400) - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - sort_key: sort_key - - sort_dir: sort_dir - - limit: limit - - offset: offset - - marker: marker - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volumes: volumes - - id: id - - links: links - - name: name - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-response.json - :language: javascript - - - - -Show a volume's details -~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes/{volume_id} - -Shows details for a volume. - -Preconditions - -- The volume must exist. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - os-vol-host-attr:host: os-vol-host-attr:host - - encrypted: encrypted - - updated_at: updated_at - - os-volume-replication:extended_status: os-volume-replication:extended_status - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - - name: name - - bootable: bootable_response - - created_at: created_at - - os-volume-replication:driver_data: os-volume-replication:driver_data - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-show-response.json - :language: javascript - - - - -Update a volume -~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id} - -Updates a volume. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - volume: volume - - description: description - - name: name - - metadata: metadata - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - migration_status: migration_status - - attachments: attachments - - links: links - - availability_zone: availability_zone - - encrypted: encrypted - - updated_at: updated_at - - replication_status: replication_status - - snapshot_id: snapshot_id - - id: id - - size: size - - user_id: user_id - - metadata: metadata - - status: status_3 - - description: description - - multiattach: multiattach - - source_volid: source_volid - - volume: volume - - consistencygroup_id: consistencygroup_id - - name: name - - bootable: bootable_response - - created_at: created_at - - volume_type: volume_type - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-update-response.json - :language: javascript - - - - -Delete a volume -~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id} - -Deletes a volume. - -Preconditions - -- Volume status must be ``available``, ``in-use``, ``error``, or - ``error_restoring``. - -- You cannot already have a snapshot of the volume. - -- You cannot delete a volume that is in a migration. - -Asynchronous Postconditions - -- The volume is deleted in volume index. - -- The volume managed by OpenStack Block Storage is deleted in - storage node. - -Troubleshooting - -- If volume status remains in ``deleting`` or becomes - ``error_deleting`` the request failed. Ensure you meet the - preconditions then investigate the storage back end. - -- The volume managed by OpenStack Block Storage is not deleted from - the storage system. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - cascade: cascade - - - - - - -Create metadata for volume -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/metadata - -Creates or replaces metadata for a volume. Does not modify items that are not -in the request. - -Normal response codes: 202 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-metadata-create-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-create-response.json - :language: javascript - - - - -Show a volume's metadata -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata - -Shows metadata for a volume. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-show-response.json - :language: javascript - - - - -Update a volume's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata - -Replaces all the volume's metadata with the key-value pairs in the request. - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - project_id: project_id_path - - volume_id: volume_id_path - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-metadata-update-request.json - :language: javascript - - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - metadata: metadata_3 - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-update-response.json - :language: javascript - - -Show a volume's metadata for a specific key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata/{key} - -Shows metadata for a volume for a specific key. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - key: key_2 - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - meta: meta - - - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-show-key-response.json - :language: javascript - - - -Delete a volume's metadata -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id}/metadata/{key} - -Deletes metadata for a volume. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - key: key_1 - - -Update a volume's metadata for a specific key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata/{key} - -Update metadata for a volume for a specific key. - -Normal response codes: 200 - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - volume_id: volume_id_path - - key: key_3 - - meta: meta - -Request Example ---------------- - -.. literalinclude:: ./samples/volume-metadata-update-key-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - meta: meta - -Response Example ----------------- - -.. literalinclude:: ./samples/volume-metadata-update-key-response.json - :language: javascript - - -Get volumes summary -~~~~~~~~~~~~~~~~~~~ - -.. rest_method:: GET /v3/{project_id}/volumes/summary - -Display volumes summary with total number of volumes and total size in GB - - -Normal response codes: 200 -Error response codes: - - -Request -------- - -.. rest_parameters:: parameters.yaml - - - project_id: project_id_path - - all_tenants: all_tenants - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - volume-summary: volume-summary - - total_size: total_size - - total_count: total_count - - metadata: summary_metadata - - -Response Example ----------------- - -.. literalinclude:: ./samples/volumes-list-summary-response.json - :language: javascript diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76..000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 22a288467..000000000 --- a/bindep.txt +++ /dev/null @@ -1,26 +0,0 @@ -# This is a cross-platform list tracking distribution packages needed for install and tests; # see http://docs.openstack.org/infra/bindep/ for additional information. - -build-essential [platform:dpkg test] -gcc [platform:rpm test] -gettext [!platform:suse] -gettext-runtime [platform:suse] -libffi-dev [platform:dpkg] -libffi-devel [platform:redhat] -libffi48-devel [platform:suse] -virtual/libffi [platform:gentoo] -libssl-dev [platform:dpkg] -openssl-devel [platform:rpm !platform:suse] -libopenssl-devel [platform:suse !platform:rpm] -locales [platform:debian] -mariadb [platform:rpm] -mariadb-server [platform:redhat] -mariadb-devel [platform:redhat] -libmysqlclient-dev [platform:dpkg] -libmysqlclient-devel [platform:suse] -mysql-client [platform:dpkg] -mysql-server [platform:dpkg] -postgresql -postgresql-client [platform:dpkg] -postgresql-devel [platform:rpm] -postgresql-server [platform:rpm] -thin-provisioning-tools [platform:debian] diff --git a/cinder/__init__.py b/cinder/__init__.py deleted file mode 100644 index b5f6222d5..000000000 --- a/cinder/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Root Cinder module.""" - -import os - -# Ensure compatibility issues are covered with pythondsn -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' - -# Make sure eventlet is loaded -import eventlet # noqa diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py deleted file mode 100644 index 333016580..000000000 --- a/cinder/api/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_log import log as logging -import paste.urlmap - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def root_app_factory(loader, global_conf, **local_conf): - if CONF.enable_v1_api: - LOG.warning('The v1 API is deprecated and is not under active ' - 'development. You should set enable_v1_api=false ' - 'and enable_v3_api=true in your cinder.conf file.') - else: - del local_conf['/v1'] - if not CONF.enable_v2_api: - del local_conf['/v2'] - return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/cinder/api/common.py b/cinder/api/common.py deleted file mode 100644 index 4bf09eb87..000000000 --- a/cinder/api/common.py +++ /dev/null @@ -1,506 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import json -import os -import re - -import enum -from oslo_config import cfg -from oslo_log import log as logging -from six.moves import urllib -import webob - -from cinder.common import constants -from cinder import exception -from cinder.i18n import _ -import cinder.policy -from cinder import utils - - -api_common_opts = [ - cfg.IntOpt('osapi_max_limit', - default=1000, - help='The maximum number of items that a collection ' - 'resource returns in a single response'), - cfg.StrOpt('osapi_volume_base_URL', - help='DEPRECATED: Base URL that will be presented to users in ' - 'links to the OpenStack Volume API', - deprecated_name='osapi_compute_link_prefix', - deprecated_since='Pike', - deprecated_reason='Duplicate config option.'), - cfg.StrOpt('resource_query_filters_file', - default='/etc/cinder/resource_filters.json', - help="Json file indicating user visible filter " - "parameters for list queries.", - deprecated_name='query_volume_filters'), - cfg.ListOpt('query_volume_filters', - default=['name', 'status', 'metadata', - 'availability_zone', - 'bootable', 'group_id'], - deprecated_for_removal=True, - help="Volume filter options which " - "non-admin user could use to " - "query volumes. Default values " - "are: ['name', 'status', " - "'metadata', 'availability_zone' ," - "'bootable', 'group_id']") -] - -CONF = cfg.CONF -CONF.import_opt('public_endpoint', 'cinder.api.views.versions') -CONF.register_opts(api_common_opts) - -LOG = logging.getLogger(__name__) -_FILTERS_COLLECTION = None -FILTERING_VERSION = '3.31' -LIKE_FILTER_VERSION = '3.34' - -ATTRIBUTE_CONVERTERS = {'name~': 'display_name~', - 'description~': 'display_description~'} - - -METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image') - - -# Regex that matches alphanumeric characters, periods, hyphens, -# colons and underscores: -# ^ assert position at start of the string -# [\w\.\-\:\_] match expression -# $ assert position at end of the string -VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE) - - -def validate_key_names(key_names_list): - """Validate each item of the list to match key name regex.""" - for key_name in key_names_list: - if not VALID_KEY_NAME_REGEX.match(key_name): - return False - return True - - -def validate_policy(context, action): - try: - cinder.policy.enforce_action(context, action) - return True - except exception.PolicyNotAuthorized: - return False - - -def get_pagination_params(params, max_limit=None): - """Return marker, limit, offset tuple from request. - - :param params: `wsgi.Request`'s GET dictionary, possibly containing - 'marker', 'limit', and 'offset' variables. 'marker' is the - id of the last element the client has seen, 'limit' is the - maximum number of items to return and 'offset' is the number - of items to skip from the marker or from the first element. - If 'limit' is not specified, or > max_limit, we default to - max_limit. Negative values for either offset or limit will - cause exc.HTTPBadRequest() exceptions to be raised. If no - offset is present we'll default to 0 and if no marker is - present we'll default to None. - :max_limit: Max value 'limit' return value can take - :returns: Tuple (marker, limit, offset) - """ - max_limit = max_limit or CONF.osapi_max_limit - limit = _get_limit_param(params, max_limit) - marker = _get_marker_param(params) - offset = _get_offset_param(params) - return marker, limit, offset - - -def _get_limit_param(params, max_limit=None): - """Extract integer limit from request's dictionary or fail. - - Defaults to max_limit if not present and returns max_limit if present - 'limit' is greater than max_limit. - """ - max_limit = max_limit or CONF.osapi_max_limit - try: - limit = int(params.pop('limit', max_limit)) - except ValueError: - msg = _('limit param must be an integer') - raise webob.exc.HTTPBadRequest(explanation=msg) - if limit < 0: - msg = _('limit param must be positive') - raise webob.exc.HTTPBadRequest(explanation=msg) - limit = min(limit, max_limit) - return limit - - -def _get_marker_param(params): - """Extract marker id from request's dictionary (defaults to None).""" - return params.pop('marker', None) - - -def _get_offset_param(params): - """Extract offset id from request's dictionary (defaults to 0) or fail.""" - offset = params.pop('offset', 0) - return utils.validate_integer(offset, 'offset', 0, constants.DB_MAX_INT) - - -def limited(items, request, max_limit=None): - """Return a slice of items according to requested offset and limit. - - :param items: A sliceable entity - :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' - GET variables. 'offset' is where to start in the list, - and 'limit' is the maximum number of items to return. If - 'limit' is not specified, 0, or > max_limit, we default - to max_limit. Negative values for either offset or limit - will cause exc.HTTPBadRequest() exceptions to be raised. - :kwarg max_limit: The maximum number of items to return from 'items' - """ - max_limit = max_limit or CONF.osapi_max_limit - marker, limit, offset = get_pagination_params(request.GET.copy(), - max_limit) - range_end = offset + (limit or max_limit) - return items[offset:range_end] - - -def limited_by_marker(items, request, max_limit=None): - """Return a slice of items according to the requested marker and limit.""" - max_limit = max_limit or CONF.osapi_max_limit - marker, limit, __ = get_pagination_params(request.GET.copy(), max_limit) - - start_index = 0 - if marker: - start_index = -1 - for i, item in enumerate(items): - if 'flavorid' in item: - if item['flavorid'] == marker: - start_index = i + 1 - break - elif item['id'] == marker or item.get('uuid') == marker: - start_index = i + 1 - break - if start_index < 0: - msg = _('marker [%s] not found') % marker - raise webob.exc.HTTPBadRequest(explanation=msg) - range_end = start_index + limit - return items[start_index:range_end] - - -def get_sort_params(params, default_key='created_at', default_dir='desc'): - """Retrieves sort keys/directions parameters. - - Processes the parameters to create a list of sort keys and sort directions - that correspond to either the 'sort' parameter or the 'sort_key' and - 'sort_dir' parameter values. The value of the 'sort' parameter is a comma- - separated list of sort keys, each key is optionally appended with - ':'. - - Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo - and an exception is raised if they are supplied with the 'sort' parameter. - - The sort parameters are removed from the request parameters by this - function. - - :param params: webob.multidict of request parameters (from - cinder.api.openstack.wsgi.Request.params) - :param default_key: default sort key value, added to the list if no - sort keys are supplied - :param default_dir: default sort dir value, added to the list if the - corresponding key does not have a direction - specified - :returns: list of sort keys, list of sort dirs - :raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or - 'sort_dir' are supplied parameters - """ - if 'sort' in params and ('sort_key' in params or 'sort_dir' in params): - msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and " - "cannot be used with the 'sort' parameter.") - raise webob.exc.HTTPBadRequest(explanation=msg) - sort_keys = [] - sort_dirs = [] - if 'sort' in params: - for sort in params.pop('sort').strip().split(','): - sort_key, _sep, sort_dir = sort.partition(':') - if not sort_dir: - sort_dir = default_dir - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir.strip()) - else: - sort_key = params.pop('sort_key', default_key) - sort_dir = params.pop('sort_dir', default_dir) - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir.strip()) - return sort_keys, sort_dirs - - -def get_request_url(request): - url = request.application_url - headers = request.headers - forwarded = headers.get('X-Forwarded-Host') - if forwarded: - url_parts = list(urllib.parse.urlsplit(url)) - url_parts[1] = re.split(r',\s?', forwarded)[-1] - url = urllib.parse.urlunsplit(url_parts).rstrip('/') - return url - - -def remove_version_from_href(href): - """Removes the first API version from the href. - - Given: 'http://cinder.example.com/v1.1/123' - Returns: 'http://cinder.example.com/123' - - Given: 'http://cinder.example.com/v1.1' - Returns: 'http://cinder.example.com' - - Given: 'http://cinder.example.com/volume/drivers/v1.1/flashsystem' - Returns: 'http://cinder.example.com/volume/drivers/flashsystem' - - """ - parsed_url = urllib.parse.urlsplit(href) - url_parts = parsed_url.path.split('/', 2) - - # NOTE: this should match vX.X or vX - expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') - for x in range(len(url_parts)): - if expression.match(url_parts[x]): - del url_parts[x] - break - - new_path = '/'.join(url_parts) - - if new_path == parsed_url.path: - msg = 'href %s does not contain version' % href - LOG.debug(msg) - raise ValueError(msg) - - parsed_url = list(parsed_url) - parsed_url[2] = new_path - return urllib.parse.urlunsplit(parsed_url) - - -class ViewBuilder(object): - """Model API responses as dictionaries.""" - - _collection_name = None - - def _get_links(self, request, identifier): - return [{"rel": "self", - "href": self._get_href_link(request, identifier), }, - {"rel": "bookmark", - "href": self._get_bookmark_link(request, identifier), }] - - def _get_next_link(self, request, identifier, collection_name): - """Return href string with proper limit and marker params.""" - params = request.params.copy() - params["marker"] = identifier - prefix = self._update_link_prefix(get_request_url(request), - CONF.public_endpoint) - url = os.path.join(prefix, - request.environ["cinder.context"].project_id, - collection_name) - return "%s?%s" % (url, urllib.parse.urlencode(params)) - - def _get_href_link(self, request, identifier): - """Return an href string pointing to this object.""" - prefix = self._update_link_prefix(get_request_url(request), - CONF.public_endpoint) - return os.path.join(prefix, - request.environ["cinder.context"].project_id, - self._collection_name, - str(identifier)) - - def _get_bookmark_link(self, request, identifier): - """Create a URL that refers to a specific resource.""" - base_url = remove_version_from_href(get_request_url(request)) - base_url = self._update_link_prefix(base_url, - CONF.public_endpoint) - return os.path.join(base_url, - request.environ["cinder.context"].project_id, - self._collection_name, - str(identifier)) - - def _get_collection_links(self, request, items, collection_name, - item_count=None, id_key="uuid"): - """Retrieve 'next' link, if applicable. - - The next link is included if we are returning as many items as we can, - given the restrictions of limit optional request parameter and - osapi_max_limit configuration parameter as long as we are returning - some elements. - - So we return next link if: - - 1) 'limit' param is specified and equal to the number of items. - 2) 'limit' param is NOT specified and the number of items is - equal to CONF.osapi_max_limit. - - :param request: API request - :param items: List of collection items - :param collection_name: Name of collection, used to generate the - next link for a pagination query - :param item_count: Length of the list of the original collection - items - :param id_key: Attribute key used to retrieve the unique ID, used - to generate the next link marker for a pagination query - :returns: links - """ - item_count = item_count or len(items) - limit = _get_limit_param(request.GET.copy()) - if len(items) and limit <= item_count: - return self._generate_next_link(items, id_key, request, - collection_name) - - return [] - - def _generate_next_link(self, items, id_key, request, - collection_name): - links = [] - last_item = items[-1] - if id_key in last_item: - last_item_id = last_item[id_key] - else: - last_item_id = last_item["id"] - links.append({ - "rel": "next", - "href": self._get_next_link(request, last_item_id, - collection_name), - }) - return links - - def _update_link_prefix(self, orig_url, prefix): - if not prefix: - return orig_url - url_parts = list(urllib.parse.urlsplit(orig_url)) - prefix_parts = list(urllib.parse.urlsplit(prefix)) - url_parts[0:2] = prefix_parts[0:2] - url_parts[2] = prefix_parts[2] + url_parts[2] - - return urllib.parse.urlunsplit(url_parts).rstrip('/') - - -def get_cluster_host(req, params, cluster_version=None): - """Get cluster and host from the parameters. - - This method checks the presence of cluster and host parameters and returns - them depending on the cluster_version. - - If cluster_version is False we will never return the cluster_name and we - will require the presence of the host parameter. - - If cluster_version is None we will always check for the presence of the - cluster parameter, and if cluster_version is a string with a version we - will only check for the presence of the parameter if the version of the - request is not less than it. In both cases we will require one and only - one parameter, host or cluster. - """ - if (cluster_version is not False and - req.api_version_request.matches(cluster_version)): - cluster_name = params.get('cluster') - msg = _('One and only one of cluster and host must be set.') - else: - cluster_name = None - msg = _('Host field is missing.') - - host = params.get('host') - if bool(cluster_name) == bool(host): - raise exception.InvalidInput(reason=msg) - return cluster_name, host - - -def _initialize_filters(): - global _FILTERS_COLLECTION - if not _FILTERS_COLLECTION: - with open(CONF.resource_query_filters_file, 'r') as filters_file: - _FILTERS_COLLECTION = json.load(filters_file) - - -def get_enabled_resource_filters(resource=None): - """Get list of configured/allowed filters for the specified resource. - - This method checks resource_query_filters_file and returns dictionary - which contains the specified resource and its allowed filters: - - .. code-block:: json - - { - "resource": ["filter1", "filter2", "filter3"] - } - - if resource is not specified, all of the configuration will be returned, - and if the resource is not found, empty dict will be returned. - """ - try: - _initialize_filters() - if not resource: - return _FILTERS_COLLECTION - else: - return {resource: _FILTERS_COLLECTION[resource]} - except Exception: - LOG.debug("Failed to collect resource %s's filters.", resource) - return {} - - -def convert_filter_attributes(filters, resource): - for key in filters.copy().keys(): - if resource in ['volume', 'backup', - 'snapshot'] and key in ATTRIBUTE_CONVERTERS.keys(): - filters[ATTRIBUTE_CONVERTERS[key]] = filters[key] - filters.pop(key) - - -def reject_invalid_filters(context, filters, resource, - enable_like_filter=False): - if context.is_admin and resource not in ['pool']: - # Allow all options except resource is pool - # pool API is only available for admin - return - # Check the configured filters against those passed in resource - configured_filters = get_enabled_resource_filters(resource) - if configured_filters: - configured_filters = configured_filters[resource] - else: - configured_filters = [] - invalid_filters = [] - for key in filters.copy().keys(): - if not enable_like_filter: - if key not in configured_filters: - invalid_filters.append(key) - else: - # If 'key~' is configured, both 'key' and 'key~' are valid. - if not (key in configured_filters or - "%s~" % key in configured_filters): - invalid_filters.append(key) - if invalid_filters: - raise webob.exc.HTTPBadRequest( - explanation=_('Invalid filters %s are found in query ' - 'options.') % ','.join(invalid_filters)) - - -def process_general_filtering(resource): - def wrapper(process_non_general_filtering): - def _decorator(*args, **kwargs): - req_version = kwargs.get('req_version') - filters = kwargs.get('filters') - context = kwargs.get('context') - if req_version.matches(FILTERING_VERSION): - support_like = False - if req_version.matches(LIKE_FILTER_VERSION): - support_like = True - reject_invalid_filters(context, filters, - resource, support_like) - convert_filter_attributes(filters, resource) - - else: - process_non_general_filtering(*args, **kwargs) - return _decorator - return wrapper diff --git a/cinder/api/contrib/__init__.py b/cinder/api/contrib/__init__.py deleted file mode 100644 index 9c89ad909..000000000 --- a/cinder/api/contrib/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Contrib contains extensions that are shipped with cinder. - -It can't be called 'extensions' because that causes namespacing problems. - -""" - -from oslo_config import cfg -from oslo_log import log as logging - -from cinder.api import extensions - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def standard_extensions(ext_mgr): - extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) - - -def select_extensions(ext_mgr): - extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, - CONF.osapi_volume_ext_list) diff --git a/cinder/api/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py deleted file mode 100644 index 4d9dfea0a..000000000 --- a/cinder/api/contrib/admin_actions.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import oslo_messaging as messaging -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import backup -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import rpc -from cinder import utils -from cinder import volume - - -LOG = logging.getLogger(__name__) - - -class AdminController(wsgi.Controller): - """Abstract base class for AdminControllers.""" - - collection = None # api collection to extend - - # FIXME(clayg): this will be hard to keep up-to-date - # Concrete classes can expand or over-ride - valid_status = set(['creating', - 'available', - 'deleting', - 'error', - 'error_deleting', - 'error_managing', - 'managing', ]) - - def __init__(self, *args, **kwargs): - super(AdminController, self).__init__(*args, **kwargs) - # singular name of the resource - self.resource_name = self.collection.rstrip('s') - self.volume_api = volume.API() - self.backup_api = backup.API() - - def _update(self, *args, **kwargs): - raise NotImplementedError() - - def _get(self, *args, **kwargs): - raise NotImplementedError() - - def _delete(self, *args, **kwargs): - raise NotImplementedError() - - def validate_update(self, body): - update = {} - try: - update['status'] = body['status'].lower() - except (TypeError, KeyError): - raise exc.HTTPBadRequest(explanation=_("Must specify 'status'")) - if update['status'] not in self.valid_status: - raise exc.HTTPBadRequest( - explanation=_("Must specify a valid status")) - return update - - def authorize(self, context, action_name): - # e.g. "snapshot_admin_actions:reset_status" - action = '%s_admin_actions:%s' % (self.resource_name, action_name) - extensions.extension_authorizer('volume', action)(context) - - def _remove_worker(self, context, id): - # Remove the cleanup worker from the DB when we change a resource - # status since it renders useless the entry. - res = db.worker_destroy(context, resource_type=self.collection.title(), - resource_id=id) - if res: - LOG.debug('Worker entry for %s with id %s has been deleted.', - self.collection, id) - - @wsgi.action('os-reset_status') - def _reset_status(self, req, id, body): - """Reset status on the resource.""" - - def _clean_volume_attachment(context, id): - attachments = ( - db.volume_attachment_get_all_by_volume_id(context, id)) - for attachment in attachments: - db.volume_detached(context, id, attachment.id) - db.volume_admin_metadata_delete(context, id, - 'attached_mode') - - context = req.environ['cinder.context'] - self.authorize(context, 'reset_status') - update = self.validate_update(body['os-reset_status']) - msg = "Updating %(resource)s '%(id)s' with '%(update)r'" - LOG.debug(msg, {'resource': self.resource_name, 'id': id, - 'update': update}) - - notifier_info = dict(id=id, update=update) - notifier = rpc.get_notifier('volumeStatusUpdate') - notifier.info(context, self.collection + '.reset_status.start', - notifier_info) - - # Not found exception will be handled at the wsgi level - self._update(context, id, update) - self._remove_worker(context, id) - if update.get('attach_status') == 'detached': - _clean_volume_attachment(context, id) - - notifier.info(context, self.collection + '.reset_status.end', - notifier_info) - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-force_delete') - def _force_delete(self, req, id, body): - """Delete a resource, bypassing the check that it must be available.""" - context = req.environ['cinder.context'] - self.authorize(context, 'force_delete') - # Not found exception will be handled at the wsgi level - resource = self._get(context, id) - self._delete(context, resource, force=True) - return webob.Response(status_int=http_client.ACCEPTED) - - -class VolumeAdminController(AdminController): - """AdminController for Volumes.""" - - collection = 'volumes' - - # FIXME(jdg): We're appending additional valid status - # entries to the set we declare in the parent class - # this doesn't make a ton of sense, we should probably - # look at the structure of this whole process again - # Perhaps we don't even want any definitions in the abstract - # parent class? - valid_status = AdminController.valid_status.union( - ('attaching', 'in-use', 'detaching', 'maintenance')) - valid_attach_status = (fields.VolumeAttachStatus.ATTACHED, - fields.VolumeAttachStatus.DETACHED,) - valid_migration_status = ('migrating', 'error', - 'success', 'completing', - 'none', 'starting',) - - def _update(self, *args, **kwargs): - db.volume_update(*args, **kwargs) - - def _get(self, *args, **kwargs): - return self.volume_api.get(*args, **kwargs) - - def _delete(self, *args, **kwargs): - return self.volume_api.delete(*args, **kwargs) - - def validate_update(self, body): - update = {} - status = body.get('status', None) - attach_status = body.get('attach_status', None) - migration_status = body.get('migration_status', None) - - valid = False - if status: - valid = True - update = super(VolumeAdminController, self).validate_update(body) - - if attach_status: - valid = True - update['attach_status'] = attach_status.lower() - if update['attach_status'] not in self.valid_attach_status: - raise exc.HTTPBadRequest( - explanation=_("Must specify a valid attach status")) - - if migration_status: - valid = True - update['migration_status'] = migration_status.lower() - if update['migration_status'] not in self.valid_migration_status: - raise exc.HTTPBadRequest( - explanation=_("Must specify a valid migration status")) - if update['migration_status'] == 'none': - update['migration_status'] = None - - if not valid: - raise exc.HTTPBadRequest( - explanation=_("Must specify 'status', 'attach_status' " - "or 'migration_status' for update.")) - return update - - @wsgi.action('os-force_detach') - def _force_detach(self, req, id, body): - """Roll back a bad detach after the volume been disconnected.""" - context = req.environ['cinder.context'] - self.authorize(context, 'force_detach') - # Not found exception will be handled at the wsgi level - volume = self._get(context, id) - try: - connector = body['os-force_detach'].get('connector', None) - except AttributeError: - msg = _("Invalid value '%s' for " - "os-force_detach.") % body['os-force_detach'] - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - self.volume_api.terminate_connection(context, volume, connector) - except exception.VolumeBackendAPIException as error: - msg = _("Unable to terminate volume connection from backend.") - raise webob.exc.HTTPInternalServerError(explanation=msg) - - attachment_id = body['os-force_detach'].get('attachment_id', None) - - try: - self.volume_api.detach(context, volume, attachment_id) - except messaging.RemoteError as error: - if error.exc_type in ['VolumeAttachmentNotFound', - 'InvalidVolume']: - msg = "Error force detaching volume - %(err_type)s: " \ - "%(err_msg)s" % {'err_type': error.exc_type, - 'err_msg': error.value} - raise webob.exc.HTTPBadRequest(explanation=msg) - else: - # There are also few cases where force-detach call could fail - # due to db or volume driver errors. These errors shouldn't - # be exposed to the user and in such cases it should raise - # 500 error. - raise - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-migrate_volume') - def _migrate_volume(self, req, id, body): - """Migrate a volume to the specified host.""" - context = req.environ['cinder.context'] - self.authorize(context, 'migrate_volume') - # Not found exception will be handled at the wsgi level - volume = self._get(context, id) - params = body['os-migrate_volume'] - - cluster_name, host = common.get_cluster_host(req, params, '3.16') - force_host_copy = utils.get_bool_param('force_host_copy', params) - lock_volume = utils.get_bool_param('lock_volume', params) - self.volume_api.migrate_volume(context, volume, host, cluster_name, - force_host_copy, lock_volume) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-migrate_volume_completion') - def _migrate_volume_completion(self, req, id, body): - """Complete an in-progress migration.""" - context = req.environ['cinder.context'] - self.authorize(context, 'migrate_volume_completion') - # Not found exception will be handled at the wsgi level - volume = self._get(context, id) - params = body['os-migrate_volume_completion'] - try: - new_volume_id = params['new_volume'] - except KeyError: - raise exc.HTTPBadRequest( - explanation=_("Must specify 'new_volume'")) - # Not found exception will be handled at the wsgi level - new_volume = self._get(context, new_volume_id) - error = params.get('error', False) - ret = self.volume_api.migrate_volume_completion(context, volume, - new_volume, error) - return {'save_volume_id': ret} - - -class SnapshotAdminController(AdminController): - """AdminController for Snapshots.""" - - collection = 'snapshots' - valid_status = fields.SnapshotStatus.ALL - - def _update(self, *args, **kwargs): - context = args[0] - snapshot_id = args[1] - fields = args[2] - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - snapshot.update(fields) - snapshot.save() - - def _get(self, *args, **kwargs): - return self.volume_api.get_snapshot(*args, **kwargs) - - def _delete(self, *args, **kwargs): - return self.volume_api.delete_snapshot(*args, **kwargs) - - -class BackupAdminController(AdminController): - """AdminController for Backups.""" - - collection = 'backups' - - valid_status = set(['available', - 'error' - ]) - - def _get(self, *args, **kwargs): - return self.backup_api.get(*args, **kwargs) - - def _delete(self, *args, **kwargs): - return self.backup_api.delete(*args, **kwargs) - - @wsgi.action('os-reset_status') - def _reset_status(self, req, id, body): - """Reset status on the resource.""" - context = req.environ['cinder.context'] - self.authorize(context, 'reset_status') - update = self.validate_update(body['os-reset_status']) - msg = "Updating %(resource)s '%(id)s' with '%(update)r'" - LOG.debug(msg, {'resource': self.resource_name, 'id': id, - 'update': update}) - - notifier_info = {'id': id, 'update': update} - notifier = rpc.get_notifier('backupStatusUpdate') - notifier.info(context, self.collection + '.reset_status.start', - notifier_info) - - # Not found exception will be handled at the wsgi level - self.backup_api.reset_status(context=context, backup_id=id, - status=update['status']) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Admin_actions(extensions.ExtensionDescriptor): - """Enable admin actions.""" - - name = "AdminActions" - alias = "os-admin-actions" - updated = "2012-08-25T00:00:00+00:00" - - def get_controller_extensions(self): - exts = [] - for class_ in (VolumeAdminController, SnapshotAdminController, - BackupAdminController): - controller = class_() - extension = extensions.ControllerExtension( - self, class_.collection, controller) - exts.append(extension) - return exts diff --git a/cinder/api/contrib/availability_zones.py b/cinder/api/contrib/availability_zones.py deleted file mode 100644 index c3f7b3f8d..000000000 --- a/cinder/api/contrib/availability_zones.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions -from cinder.api.openstack import wsgi -import cinder.api.views.availability_zones -import cinder.exception -import cinder.volume.api - - -class Controller(wsgi.Controller): - - _view_builder_class = cinder.api.views.availability_zones.ViewBuilder - - def __init__(self, *args, **kwargs): - super(Controller, self).__init__(*args, **kwargs) - self.volume_api = cinder.volume.api.API() - - def index(self, req): - """Describe all known availability zones.""" - azs = self.volume_api.list_availability_zones() - return self._view_builder.list(req, azs) - - -class Availability_zones(extensions.ExtensionDescriptor): - """Describe Availability Zones.""" - - name = 'AvailabilityZones' - alias = 'os-availability-zone' - updated = '2013-06-27T00:00:00+00:00' - - def get_resources(self): - controller = Controller() - res = extensions.ResourceExtension(Availability_zones.alias, - controller) - return [res] diff --git a/cinder/api/contrib/backups.py b/cinder/api/contrib/backups.py deleted file mode 100644 index a23a65b3d..000000000 --- a/cinder/api/contrib/backups.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2014 TrilioData, Inc -# Copyright (c) 2015 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The backups api.""" - -from oslo_log import log as logging -from six.moves import http_client -from webob import exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import backups as backup_views -from cinder import backup as backupAPI -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -LOG = logging.getLogger(__name__) - - -class BackupsController(wsgi.Controller): - """The Backups API controller for the OpenStack API.""" - - _view_builder_class = backup_views.ViewBuilder - - def __init__(self): - self.backup_api = backupAPI.API() - super(BackupsController, self).__init__() - - def show(self, req, id): - """Return data about the given backup.""" - LOG.debug('Show backup with id: %s.', id) - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - backup = self.backup_api.get(context, backup_id=id) - req.cache_db_backup(backup) - - return self._view_builder.detail(req, backup) - - @wsgi.response(http_client.ACCEPTED) - def delete(self, req, id): - """Delete a backup.""" - context = req.environ['cinder.context'] - - LOG.info('Delete backup with id: %s.', id) - - try: - backup = self.backup_api.get(context, id) - self.backup_api.delete(context, backup) - # Not found exception will be handled at the wsgi level - except exception.InvalidBackup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - def index(self, req): - """Returns a summary list of backups.""" - return self._get_backups(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of backups.""" - return self._get_backups(req, is_detail=True) - - @staticmethod - def _get_backup_filter_options(): - """Return volume search options allowed by non-admin.""" - return ('name', 'status', 'volume_id') - - @common.process_general_filtering('backup') - def _process_backup_filtering(self, context=None, filters=None, - req_version=None): - utils.remove_invalid_filter_options(context, - filters, - self._get_backup_filter_options()) - - def _convert_sort_name(self, req_version, sort_keys): - """Convert sort key "name" to "display_name". """ - pass - - def _get_backups(self, req, is_detail): - """Returns a list of backups, transformed through view builder.""" - context = req.environ['cinder.context'] - filters = req.params.copy() - req_version = req.api_version_request - marker, limit, offset = common.get_pagination_params(filters) - sort_keys, sort_dirs = common.get_sort_params(filters) - - self._convert_sort_name(req_version, sort_keys) - self._process_backup_filtering(context=context, filters=filters, - req_version=req_version) - - if 'name' in filters: - filters['display_name'] = filters.pop('name') - - backups = self.backup_api.get_all(context, search_opts=filters, - marker=marker, - limit=limit, - offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - ) - - req.cache_db_backups(backups.objects) - - if is_detail: - backups = self._view_builder.detail_list(req, backups.objects) - else: - backups = self._view_builder.summary_list(req, backups.objects) - return backups - - # TODO(frankm): Add some checks here including - # - whether requested volume_id exists so we can return some errors - # immediately - # - maybe also do validation of swift container name - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new backup.""" - LOG.debug('Creating new backup %s', body) - self.assert_valid_body(body, 'backup') - - context = req.environ['cinder.context'] - backup = body['backup'] - - try: - volume_id = backup['volume_id'] - except KeyError: - msg = _("Incorrect request body format") - raise exc.HTTPBadRequest(explanation=msg) - container = backup.get('container', None) - if container: - utils.check_string_length(container, 'Backup container', - min_length=0, max_length=255) - self.validate_name_and_description(backup) - name = backup.get('name', None) - description = backup.get('description', None) - incremental = backup.get('incremental', False) - force = backup.get('force', False) - snapshot_id = backup.get('snapshot_id', None) - LOG.info("Creating backup of volume %(volume_id)s in container" - " %(container)s", - {'volume_id': volume_id, 'container': container}, - context=context) - - try: - new_backup = self.backup_api.create(context, name, description, - volume_id, container, - incremental, None, force, - snapshot_id) - except (exception.InvalidVolume, - exception.InvalidSnapshot) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - # Other not found exceptions will be handled at the wsgi level - except exception.ServiceNotFound as error: - raise exc.HTTPInternalServerError(explanation=error.msg) - - retval = self._view_builder.summary(req, dict(new_backup)) - return retval - - @wsgi.response(http_client.ACCEPTED) - def restore(self, req, id, body): - """Restore an existing backup to a volume.""" - LOG.debug('Restoring backup %(backup_id)s (%(body)s)', - {'backup_id': id, 'body': body}) - self.assert_valid_body(body, 'restore') - - context = req.environ['cinder.context'] - restore = body['restore'] - volume_id = restore.get('volume_id', None) - name = restore.get('name', None) - - LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s.", - {'backup_id': id, 'volume_id': volume_id}, - context=context) - - try: - new_restore = self.backup_api.restore(context, - backup_id=id, - volume_id=volume_id, - name=name) - # Not found exception will be handled at the wsgi level - except (exception.InvalidInput, - exception.InvalidVolume, - exception.InvalidBackup) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - except (exception.VolumeSizeExceedsAvailableQuota, - exception.VolumeLimitExceeded) as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.msg, headers={'Retry-After': '0'}) - - retval = self._view_builder.restore_summary( - req, dict(new_restore)) - return retval - - def export_record(self, req, id): - """Export a backup.""" - LOG.debug('Export record for backup %s.', id) - context = req.environ['cinder.context'] - - try: - backup_info = self.backup_api.export_record(context, id) - # Not found exception will be handled at the wsgi level - except exception.InvalidBackup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - retval = self._view_builder.export_summary( - req, dict(backup_info)) - LOG.debug('Exported record output: %s.', retval) - return retval - - @wsgi.response(http_client.CREATED) - def import_record(self, req, body): - """Import a backup.""" - LOG.debug('Importing record from %s.', body) - self.assert_valid_body(body, 'backup-record') - context = req.environ['cinder.context'] - import_data = body['backup-record'] - # Verify that body elements are provided - try: - backup_service = import_data['backup_service'] - backup_url = import_data['backup_url'] - except KeyError: - msg = _("Incorrect request body format.") - raise exc.HTTPBadRequest(explanation=msg) - LOG.debug('Importing backup using %(service)s and url %(url)s.', - {'service': backup_service, 'url': backup_url}) - - try: - new_backup = self.backup_api.import_record(context, - backup_service, - backup_url) - except exception.InvalidBackup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - # Other Not found exceptions will be handled at the wsgi level - except exception.ServiceNotFound as error: - raise exc.HTTPInternalServerError(explanation=error.msg) - - retval = self._view_builder.summary(req, dict(new_backup)) - LOG.debug('Import record output: %s.', retval) - return retval - - -class Backups(extensions.ExtensionDescriptor): - """Backups support.""" - - name = 'Backups' - alias = 'backups' - updated = '2012-12-12T00:00:00+00:00' - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Backups.alias, BackupsController(), - collection_actions={'detail': 'GET', 'import_record': 'POST'}, - member_actions={'restore': 'POST', 'export_record': 'GET', - 'action': 'POST'}) - resources.append(res) - return resources diff --git a/cinder/api/contrib/capabilities.py b/cinder/api/contrib/capabilities.py deleted file mode 100644 index edb5edc31..000000000 --- a/cinder/api/contrib/capabilities.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2015 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_messaging - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import capabilities as capabilities_view -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.volume import rpcapi - - -def authorize(context, action_name): - extensions.extension_authorizer('volume', action_name)(context) - - -class CapabilitiesController(wsgi.Controller): - """The Capabilities controller for the OpenStack API.""" - - _view_builder_class = capabilities_view.ViewBuilder - - def __init__(self): - # FIXME(jdg): Is it kosher that this just - # skips the volume.api and goes straight to RPC - # from here? - self.volume_api = rpcapi.VolumeAPI() - super(CapabilitiesController, self).__init__() - - def show(self, req, id): - """Return capabilities list of given backend.""" - context = req.environ['cinder.context'] - authorize(context, 'capabilities') - filters = {'host_or_cluster': id, 'binary': 'cinder-volume'} - services = objects.ServiceList.get_all(context, filters) - if not services: - msg = (_("Can't find service: %s") % id) - raise exception.NotFound(msg) - topic = services[0].service_topic_queue - try: - capabilities = self.volume_api.get_capabilities(context, topic, - False) - except oslo_messaging.MessagingTimeout: - raise exception.RPCTimeout(service=topic) - return self._view_builder.summary(req, capabilities, topic) - - -class Capabilities(extensions.ExtensionDescriptor): - """Capabilities support.""" - - name = "Capabilities" - alias = "capabilities" - updated = "2015-08-31T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Capabilities.alias, - CapabilitiesController()) - - resources.append(res) - return resources diff --git a/cinder/api/contrib/cgsnapshots.py b/cinder/api/contrib/cgsnapshots.py deleted file mode 100644 index 16344671b..000000000 --- a/cinder/api/contrib/cgsnapshots.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The cgsnapshots api.""" - -from oslo_log import log as logging -import six -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import cgsnapshots as cgsnapshot_views -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ - -LOG = logging.getLogger(__name__) - - -class CgsnapshotsController(wsgi.Controller): - """The cgsnapshots API controller for the OpenStack API.""" - - _view_builder_class = cgsnapshot_views.ViewBuilder - - def __init__(self): - self.group_snapshot_api = group_api.API() - super(CgsnapshotsController, self).__init__() - - def show(self, req, id): - """Return data about the given cgsnapshot.""" - LOG.debug('show called for member %s', id) - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - cgsnapshot = self._get_cgsnapshot(context, id) - - return self._view_builder.detail(req, cgsnapshot) - - def delete(self, req, id): - """Delete a cgsnapshot.""" - LOG.debug('delete called for member %s', id) - context = req.environ['cinder.context'] - - LOG.info('Delete cgsnapshot with id: %s', id) - - try: - cgsnapshot = self._get_cgsnapshot(context, id) - self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot) - except exception.GroupSnapshotNotFound: - # Not found exception will be handled at the wsgi level - raise - except exception.InvalidGroupSnapshot as e: - raise exc.HTTPBadRequest(explanation=six.text_type(e)) - except Exception: - msg = _("Failed cgsnapshot") - raise exc.HTTPBadRequest(explanation=msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - def index(self, req): - """Returns a summary list of cgsnapshots.""" - return self._get_cgsnapshots(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of cgsnapshots.""" - return self._get_cgsnapshots(req, is_detail=True) - - def _get_cg(self, context, id): - # Not found exception will be handled at the wsgi level - consistencygroup = self.group_snapshot_api.get(context, group_id=id) - - return consistencygroup - - def _get_cgsnapshot(self, context, id): - # Not found exception will be handled at the wsgi level - cgsnapshot = self.group_snapshot_api.get_group_snapshot( - context, group_snapshot_id=id) - - return cgsnapshot - - def _get_cgsnapshots(self, req, is_detail): - """Returns a list of cgsnapshots, transformed through view builder.""" - context = req.environ['cinder.context'] - grp_snapshots = self.group_snapshot_api.get_all_group_snapshots( - context) - grpsnap_limited_list = common.limited(grp_snapshots, req) - - if is_detail: - grp_snapshots = self._view_builder.detail_list( - req, grpsnap_limited_list) - else: - grp_snapshots = self._view_builder.summary_list( - req, grpsnap_limited_list) - - return grp_snapshots - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new cgsnapshot.""" - LOG.debug('Creating new cgsnapshot %s', body) - self.assert_valid_body(body, 'cgsnapshot') - - context = req.environ['cinder.context'] - cgsnapshot = body['cgsnapshot'] - self.validate_name_and_description(cgsnapshot) - - try: - group_id = cgsnapshot['consistencygroup_id'] - except KeyError: - msg = _("'consistencygroup_id' must be specified") - raise exc.HTTPBadRequest(explanation=msg) - - # Not found exception will be handled at the wsgi level - group = self._get_cg(context, group_id) - - name = cgsnapshot.get('name', None) - description = cgsnapshot.get('description', None) - - LOG.info("Creating cgsnapshot %(name)s.", - {'name': name}, - context=context) - - try: - new_cgsnapshot = self.group_snapshot_api.create_group_snapshot( - context, group, name, description) - # Not found exception will be handled at the wsgi level - except (exception.InvalidGroup, - exception.InvalidGroupSnapshot, - exception.InvalidVolume) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - retval = self._view_builder.summary(req, new_cgsnapshot) - - return retval - - -class Cgsnapshots(extensions.ExtensionDescriptor): - """cgsnapshots support.""" - - name = 'Cgsnapshots' - alias = 'cgsnapshots' - updated = '2014-08-18T00:00:00+00:00' - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Cgsnapshots.alias, CgsnapshotsController(), - collection_actions={'detail': 'GET'}) - resources.append(res) - return resources diff --git a/cinder/api/contrib/consistencygroups.py b/cinder/api/contrib/consistencygroups.py deleted file mode 100644 index b9d541a5e..000000000 --- a/cinder/api/contrib/consistencygroups.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The consistencygroups api.""" - -from oslo_log import log as logging -from oslo_utils import strutils -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import consistencygroups as consistencygroup_views -from cinder.consistencygroup import api as consistencygroup_api -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ -from cinder.volume import group_types - -LOG = logging.getLogger(__name__) - - -class ConsistencyGroupsController(wsgi.Controller): - """The ConsistencyGroups API controller for the OpenStack API.""" - - _view_builder_class = consistencygroup_views.ViewBuilder - - def __init__(self): - self.group_api = group_api.API() - super(ConsistencyGroupsController, self).__init__() - - def show(self, req, id): - """Return data about the given consistency group.""" - LOG.debug('show called for member %s', id) - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - consistencygroup = self._get(context, id) - - return self._view_builder.detail(req, consistencygroup) - - def delete(self, req, id, body): - """Delete a consistency group.""" - LOG.debug('delete called for member %s', id) - context = req.environ['cinder.context'] - force = False - if body: - if not self.is_valid_body(body, 'consistencygroup'): - msg = _("Missing required element 'consistencygroup' in " - "request body.") - raise exc.HTTPBadRequest(explanation=msg) - - cg_body = body['consistencygroup'] - try: - force = strutils.bool_from_string(cg_body.get('force', False), - strict=True) - except ValueError: - msg = _("Invalid value '%s' for force.") % force - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info('Delete consistency group with id: %s', id) - - try: - group = self._get(context, id) - consistencygroup_api.check_policy(context, 'delete') - self.group_api.delete(context, group, force) - # Not found exception will be handled at the wsgi level - except exception.InvalidConsistencyGroup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - def index(self, req): - """Returns a summary list of consistency groups.""" - return self._get_consistencygroups(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of consistency groups.""" - return self._get_consistencygroups(req, is_detail=True) - - def _get(self, context, id): - # Not found exception will be handled at the wsgi level - consistencygroup = self.group_api.get(context, group_id=id) - - return consistencygroup - - def _get_cgsnapshot(self, context, id): - # Not found exception will be handled at the wsgi level - cgsnapshot = self.group_api.get_group_snapshot( - context, - group_snapshot_id=id) - - return cgsnapshot - - def _get_consistencygroups(self, req, is_detail): - """Returns a list of consistency groups through view builder.""" - context = req.environ['cinder.context'] - filters = req.params.copy() - - # make another copy of filters, since it is being modified in - # consistencygroup_api while getting consistencygroups - marker, limit, offset = common.get_pagination_params(filters) - sort_keys, sort_dirs = common.get_sort_params(filters) - - groups = self.group_api.get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - - if is_detail: - groups = self._view_builder.detail_list(req, groups) - else: - groups = self._view_builder.summary_list(req, groups) - - return groups - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new consistency group.""" - LOG.debug('Creating new consistency group %s', body) - self.assert_valid_body(body, 'consistencygroup') - - context = req.environ['cinder.context'] - consistencygroup = body['consistencygroup'] - self.validate_name_and_description(consistencygroup) - name = consistencygroup.get('name', None) - description = consistencygroup.get('description', None) - volume_types = consistencygroup.get('volume_types', None) - if not volume_types: - msg = _("volume_types must be provided to create " - "consistency group %(name)s.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - volume_types = volume_types.rstrip(',').split(',') - availability_zone = consistencygroup.get('availability_zone', None) - group_type = group_types.get_default_cgsnapshot_type() - if not group_type: - msg = (_('Group type %s not found. Rerun migration script to ' - 'create the default cgsnapshot type.') % - group_types.DEFAULT_CGSNAPSHOT_TYPE) - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info("Creating consistency group %(name)s.", - {'name': name}) - - try: - consistencygroup_api.check_policy(context, 'create') - new_consistencygroup = self.group_api.create( - context, name, description, group_type['id'], volume_types, - availability_zone=availability_zone) - except (exception.InvalidConsistencyGroup, - exception.InvalidGroup, - exception.InvalidVolumeType, - exception.ObjectActionError) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - except exception.NotFound: - # Not found exception will be handled at the wsgi level - raise - - retval = self._view_builder.summary(req, new_consistencygroup) - return retval - - @wsgi.response(http_client.ACCEPTED) - def create_from_src(self, req, body): - """Create a new consistency group from a source. - - The source can be a CG snapshot or a CG. Note that - this does not require volume_types as the "create" - API above. - """ - LOG.debug('Creating new consistency group %s.', body) - self.assert_valid_body(body, 'consistencygroup-from-src') - - context = req.environ['cinder.context'] - consistencygroup = body['consistencygroup-from-src'] - self.validate_name_and_description(consistencygroup) - name = consistencygroup.get('name', None) - description = consistencygroup.get('description', None) - cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None) - source_cgid = consistencygroup.get('source_cgid', None) - if not cgsnapshot_id and not source_cgid: - msg = _("Either 'cgsnapshot_id' or 'source_cgid' must be " - "provided to create consistency group %(name)s " - "from source.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - - if cgsnapshot_id and source_cgid: - msg = _("Cannot provide both 'cgsnapshot_id' and 'source_cgid' " - "to create consistency group %(name)s from " - "source.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - - if cgsnapshot_id: - LOG.info("Creating consistency group %(name)s from " - "cgsnapshot %(snap)s.", - {'name': name, 'snap': cgsnapshot_id}) - elif source_cgid: - LOG.info("Creating consistency group %(name)s from " - "source consistency group %(source_cgid)s.", - {'name': name, 'source_cgid': source_cgid}) - - try: - if source_cgid: - self._get(context, source_cgid) - if cgsnapshot_id: - self._get_cgsnapshot(context, cgsnapshot_id) - consistencygroup_api.check_policy(context, 'create') - new_group = self.group_api.create_from_src( - context, name, description, cgsnapshot_id, source_cgid) - except exception.NotFound: - # Not found exception will be handled at the wsgi level - raise - except exception.CinderException as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - retval = self._view_builder.summary(req, new_group) - return retval - - def _check_update_parameters(self, name, description, add_volumes, - remove_volumes): - if not (name or description or add_volumes or remove_volumes): - msg = _("Name, description, add_volumes, and remove_volumes " - "can not be all empty in the request body.") - raise exc.HTTPBadRequest(explanation=msg) - - def _update(self, context, id, name, description, add_volumes, - remove_volumes, - allow_empty=False): - LOG.info("Updating consistency group %(id)s with name %(name)s " - "description: %(description)s add_volumes: " - "%(add_volumes)s remove_volumes: %(remove_volumes)s.", - {'id': id, - 'name': name, - 'description': description, - 'add_volumes': add_volumes, - 'remove_volumes': remove_volumes}) - - group = self._get(context, id) - self.group_api.update(context, group, name, description, - add_volumes, remove_volumes) - - def update(self, req, id, body): - """Update the consistency group. - - Expected format of the input parameter 'body': - - .. code-block:: json - - { - "consistencygroup": - { - "name": "my_cg", - "description": "My consistency group", - "add_volumes": "volume-uuid-1,volume-uuid-2,...", - "remove_volumes": "volume-uuid-8,volume-uuid-9,..." - } - } - - """ - LOG.debug('Update called for consistency group %s.', id) - if not body: - msg = _("Missing request body.") - raise exc.HTTPBadRequest(explanation=msg) - - self.assert_valid_body(body, 'consistencygroup') - context = req.environ['cinder.context'] - consistencygroup = body.get('consistencygroup', None) - self.validate_name_and_description(consistencygroup) - name = consistencygroup.get('name', None) - description = consistencygroup.get('description', None) - add_volumes = consistencygroup.get('add_volumes', None) - remove_volumes = consistencygroup.get('remove_volumes', None) - - self._check_update_parameters(name, description, add_volumes, - remove_volumes) - self._update(context, id, name, description, add_volumes, - remove_volumes) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Consistencygroups(extensions.ExtensionDescriptor): - """consistency groups support.""" - - name = 'Consistencygroups' - alias = 'consistencygroups' - updated = '2014-08-18T00:00:00+00:00' - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Consistencygroups.alias, ConsistencyGroupsController(), - collection_actions={'detail': 'GET', 'create_from_src': 'POST'}, - member_actions={'delete': 'POST', 'update': 'PUT'}) - resources.append(res) - return resources diff --git a/cinder/api/contrib/extended_services.py b/cinder/api/contrib/extended_services.py deleted file mode 100644 index 92619ea7e..000000000 --- a/cinder/api/contrib/extended_services.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions - - -class Extended_services(extensions.ExtensionDescriptor): - """Extended services support.""" - - name = "ExtendedServices" - alias = "os-extended-services" - updated = "2014-01-10T00:00:00-00:00" diff --git a/cinder/api/contrib/extended_snapshot_attributes.py b/cinder/api/contrib/extended_snapshot_attributes.py deleted file mode 100644 index f958510e9..000000000 --- a/cinder/api/contrib/extended_snapshot_attributes.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Extended Snapshot Attributes API extension.""" - -from cinder.api import extensions -from cinder.api.openstack import wsgi - - -authorize = extensions.soft_extension_authorizer( - 'volume', - 'extended_snapshot_attributes') - - -class ExtendedSnapshotAttributesController(wsgi.Controller): - def _extend_snapshot(self, req, resp_snap): - db_snap = req.get_db_snapshot(resp_snap['id']) - for attr in ['project_id', 'progress']: - key = "%s:%s" % (Extended_snapshot_attributes.alias, attr) - resp_snap[key] = db_snap[attr] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if authorize(context): - # Attach our slave template to the response object - snapshot = resp_obj.obj['snapshot'] - self._extend_snapshot(req, snapshot) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - # Attach our slave template to the response object - for snapshot in list(resp_obj.obj['snapshots']): - self._extend_snapshot(req, snapshot) - - -class Extended_snapshot_attributes(extensions.ExtensionDescriptor): - """Extended SnapshotAttributes support.""" - - name = "ExtendedSnapshotAttributes" - alias = "os-extended-snapshot-attributes" - updated = "2012-06-19T00:00:00+00:00" - - def get_controller_extensions(self): - controller = ExtendedSnapshotAttributesController() - extension = extensions.ControllerExtension(self, 'snapshots', - controller) - return [extension] diff --git a/cinder/api/contrib/hosts.py b/cinder/api/contrib/hosts.py deleted file mode 100644 index 4fc08b075..000000000 --- a/cinder/api/contrib/hosts.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The hosts admin extension.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -import webob.exc - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.common import constants -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.volume import api as volume_api - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('volume', 'hosts') - - -def _list_hosts(req, service=None): - """Returns a summary list of hosts.""" - curr_time = timeutils.utcnow(with_timezone=True) - context = req.environ['cinder.context'] - filters = {'disabled': False} - services = objects.ServiceList.get_all(context, filters) - zone = '' - if 'zone' in req.GET: - zone = req.GET['zone'] - if zone: - services = [s for s in services if s['availability_zone'] == zone] - hosts = [] - for host in services: - delta = curr_time - (host.updated_at or host.created_at) - alive = abs(delta.total_seconds()) <= CONF.service_down_time - status = "available" if alive else "unavailable" - active = 'enabled' - if host.disabled: - active = 'disabled' - LOG.debug('status, active and update: %s, %s, %s', - status, active, host.updated_at) - updated_at = host.updated_at - if updated_at: - updated_at = timeutils.normalize_time(updated_at) - hosts.append({'host_name': host.host, - 'service': host.topic, - 'zone': host.availability_zone, - 'service-status': status, - 'service-state': active, - 'last-update': updated_at, - }) - if service: - hosts = [host for host in hosts - if host['service'] == service] - return hosts - - -def check_host(fn): - """Makes sure that the host exists.""" - def wrapped(self, req, id, service=None, *args, **kwargs): - listed_hosts = _list_hosts(req, service) - hosts = [h["host_name"] for h in listed_hosts] - if id in hosts: - return fn(self, req, id, *args, **kwargs) - raise exception.HostNotFound(host=id) - return wrapped - - -class HostController(wsgi.Controller): - """The Hosts API controller for the OpenStack API.""" - def __init__(self): - self.api = volume_api.HostAPI() - super(HostController, self).__init__() - - def index(self, req): - authorize(req.environ['cinder.context']) - return {'hosts': _list_hosts(req)} - - @check_host - def update(self, req, id, body): - authorize(req.environ['cinder.context']) - update_values = {} - for raw_key, raw_val in body.items(): - key = raw_key.lower().strip() - val = raw_val.lower().strip() - if key == "status": - if val in ("enable", "disable"): - update_values['status'] = val.startswith("enable") - else: - explanation = _("Invalid status: '%s'") % raw_val - raise webob.exc.HTTPBadRequest(explanation=explanation) - else: - explanation = _("Invalid update setting: '%s'") % raw_key - raise webob.exc.HTTPBadRequest(explanation=explanation) - update_setters = {'status': self._set_enabled_status} - result = {} - for key, value in update_values.items(): - result.update(update_setters[key](req, id, value)) - return result - - def _set_enabled_status(self, req, host, enabled): - """Sets the specified host's ability to accept new volumes.""" - context = req.environ['cinder.context'] - state = "enabled" if enabled else "disabled" - LOG.info("Setting host %(host)s to %(state)s.", - {'host': host, 'state': state}) - result = self.api.set_host_enabled(context, - host=host, - enabled=enabled) - if result not in ("enabled", "disabled"): - # An error message was returned - raise webob.exc.HTTPBadRequest(explanation=result) - return {"host": host, "status": result} - - def show(self, req, id): - """Shows the volume usage info given by hosts. - - :param context: security context - :param host: hostname - :returns: expected to use HostShowTemplate. - ex.:: - - {'host': {'resource':D},..} - D: {'host': 'hostname','project': 'admin', - 'volume_count': 1, 'total_volume_gb': 2048} - """ - host = id - context = req.environ['cinder.context'] - if not context.is_admin: - msg = _("Describe-resource is admin only functionality") - raise webob.exc.HTTPForbidden(explanation=msg) - - # Not found exception will be handled at the wsgi level - host_ref = objects.Service.get_by_host_and_topic( - context, host, constants.VOLUME_TOPIC) - - # Getting total available/used resource - # TODO(jdg): Add summary info for Snapshots - volume_refs = db.volume_get_all_by_host(context, host_ref.host) - (count, sum) = db.volume_data_get_for_host(context, - host_ref.host) - - snap_count_total = 0 - snap_sum_total = 0 - resources = [{'resource': {'host': host, 'project': '(total)', - 'volume_count': str(count), - 'total_volume_gb': str(sum), - 'snapshot_count': str(snap_count_total), - 'total_snapshot_gb': str(snap_sum_total)}}] - - project_ids = [v['project_id'] for v in volume_refs] - project_ids = list(set(project_ids)) - for project_id in project_ids: - (count, sum) = db.volume_data_get_for_project(context, project_id) - (snap_count, snap_sum) = ( - objects.Snapshot.snapshot_data_get_for_project(context, - project_id)) - resources.append( - {'resource': - {'host': host, - 'project': project_id, - 'volume_count': str(count), - 'total_volume_gb': str(sum), - 'snapshot_count': str(snap_count), - 'total_snapshot_gb': str(snap_sum)}}) - snap_count_total += int(snap_count) - snap_sum_total += int(snap_sum) - resources[0]['resource']['snapshot_count'] = str(snap_count_total) - resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) - return {"host": resources} - - -class Hosts(extensions.ExtensionDescriptor): - """Admin-only host administration.""" - - name = "Hosts" - alias = "os-hosts" - updated = "2011-06-29T00:00:00+00:00" - - def get_resources(self): - resources = [extensions.ResourceExtension('os-hosts', - HostController(), - collection_actions={ - 'update': 'PUT'}, - member_actions={ - 'startup': 'GET', - 'shutdown': 'GET', - 'reboot': 'GET'})] - return resources diff --git a/cinder/api/contrib/image_create.py b/cinder/api/contrib/image_create.py deleted file mode 100644 index eb2358a30..000000000 --- a/cinder/api/contrib/image_create.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2012 NTT. -# Copyright (c) 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Create Volume from Image extension.""" - - -from cinder.api import extensions - - -class Image_create(extensions.ExtensionDescriptor): - """Allow creating a volume from an image in the Create Volume v1 API.""" - - name = "CreateVolumeExtension" - alias = "os-image-create" - updated = "2012-08-13T00:00:00+00:00" diff --git a/cinder/api/contrib/qos_specs_manage.py b/cinder/api/contrib/qos_specs_manage.py deleted file mode 100644 index 8f97ed794..000000000 --- a/cinder/api/contrib/qos_specs_manage.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright (c) 2013 eBay Inc. -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The QoS specs extension""" - -from oslo_log import log as logging -import six -from six.moves import http_client -import webob - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import qos_specs as view_qos_specs -from cinder import exception -from cinder.i18n import _ -from cinder import rpc -from cinder import utils -from cinder.volume import qos_specs - - -LOG = logging.getLogger(__name__) - -authorize_create = extensions.extension_authorizer('volume', - 'qos_specs_manage:create') -authorize_get = extensions.extension_authorizer('volume', - 'qos_specs_manage:get') -authorize_get_all = extensions.extension_authorizer('volume', - 'qos_specs_manage:get_all') -authorize_update = extensions.extension_authorizer('volume', - 'qos_specs_manage:update') -authorize_delete = extensions.extension_authorizer('volume', - 'qos_specs_manage:delete') - - -def _check_specs(context, specs_id): - # Not found exception will be handled at the wsgi level - qos_specs.get_qos_specs(context, specs_id) - - -class QoSSpecsController(wsgi.Controller): - """The volume type extra specs API controller for the OpenStack API.""" - - _view_builder_class = view_qos_specs.ViewBuilder - - @staticmethod - @utils.if_notifications_enabled - def _notify_qos_specs_error(context, method, payload): - rpc.get_notifier('QoSSpecs').error(context, - method, - payload) - - def index(self, req): - """Returns the list of qos_specs.""" - context = req.environ['cinder.context'] - authorize_get_all(context) - - params = req.params.copy() - - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - allowed_search_options = ('id', 'name', 'consumer') - utils.remove_invalid_filter_options(context, filters, - allowed_search_options) - - specs = qos_specs.get_all_specs(context, filters=filters, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - return self._view_builder.summary_list(req, specs) - - def create(self, req, body=None): - context = req.environ['cinder.context'] - authorize_create(context) - - self.assert_valid_body(body, 'qos_specs') - - specs = body['qos_specs'] - name = specs.pop('name', None) - if name is None: - msg = _("Please specify a name for QoS specs.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - self.validate_string_length(name, 'name', min_length=1, - max_length=255, remove_whitespaces=True) - name = name.strip() - - # Validate the key-value pairs in the qos spec. - utils.validate_dictionary_string_length(specs) - - try: - spec = qos_specs.create(context, name, specs) - notifier_info = dict(name=name, specs=specs) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.create', - notifier_info) - except exception.InvalidQoSSpecs as err: - notifier_err = dict(name=name, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.create', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) - except exception.QoSSpecsExists as err: - notifier_err = dict(name=name, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.create', - notifier_err) - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.QoSSpecsCreateFailed as err: - notifier_err = dict(name=name, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.create', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return self._view_builder.detail(req, spec) - - def update(self, req, id, body=None): - context = req.environ['cinder.context'] - authorize_update(context) - - self.assert_valid_body(body, 'qos_specs') - specs = body['qos_specs'] - try: - qos_specs.update(context, id, specs) - notifier_info = dict(id=id, specs=specs) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.update', - notifier_info) - except (exception.QoSSpecsNotFound, exception.InvalidQoSSpecs) as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.update', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.QoSSpecsUpdateFailed as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.update', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return body - - def show(self, req, id): - """Return a single qos spec item.""" - context = req.environ['cinder.context'] - authorize_get(context) - - # Not found exception will be handled at the wsgi level - spec = qos_specs.get_qos_specs(context, id) - - return self._view_builder.detail(req, spec) - - def delete(self, req, id): - """Deletes an existing qos specs.""" - context = req.environ['cinder.context'] - authorize_delete(context) - - # Convert string to bool type in strict manner - force = utils.get_bool_param('force', req.params) - LOG.debug("Delete qos_spec: %(id)s, force: %(force)s", - {'id': id, 'force': force}) - - try: - qos_specs.delete(context, id, force) - notifier_info = dict(id=id) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.delete', - notifier_info) - except exception.QoSSpecsNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.delete', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.QoSSpecsInUse as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.delete', - notifier_err) - if force: - msg = _('Failed to disassociate qos specs.') - raise webob.exc.HTTPInternalServerError(explanation=msg) - msg = _('Qos specs still in use.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - def delete_keys(self, req, id, body): - """Deletes specified keys in qos specs.""" - context = req.environ['cinder.context'] - authorize_delete(context) - - if not (body and 'keys' in body - and isinstance(body.get('keys'), list)): - raise webob.exc.HTTPBadRequest() - - keys = body['keys'] - LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s", - {'id': id, 'keys': keys}) - - try: - qos_specs.delete_keys(context, id, keys) - notifier_info = dict(id=id) - rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys', - notifier_info) - except exception.NotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.delete_keys', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - - return webob.Response(status_int=http_client.ACCEPTED) - - def associations(self, req, id): - """List all associations of given qos specs.""" - context = req.environ['cinder.context'] - authorize_get_all(context) - - LOG.debug("Get associations for qos_spec id: %s", id) - - try: - associates = qos_specs.get_associations(context, id) - notifier_info = dict(id=id) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.associations', - notifier_info) - except exception.QoSSpecsNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associations', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.CinderException as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associations', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return self._view_builder.associations(req, associates) - - def associate(self, req, id): - """Associate a qos specs with a volume type.""" - context = req.environ['cinder.context'] - authorize_update(context) - - type_id = req.params.get('vol_type_id', None) - - if not type_id: - msg = _('Volume Type id must not be None.') - notifier_err = dict(id=id, error_message=msg) - self._notify_qos_specs_error(context, - 'qos_specs.delete', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=msg) - LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s", - {'id': id, 'type_id': type_id}) - - try: - qos_specs.associate_qos_with_type(context, id, type_id) - notifier_info = dict(id=id, type_id=type_id) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.associate', - notifier_info) - except exception.NotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associate', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.InvalidVolumeType as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associate', - notifier_err) - self._notify_qos_specs_error(context, - 'qos_specs.associate', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(err)) - except exception.QoSSpecsAssociateFailed as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.associate', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return webob.Response(status_int=http_client.ACCEPTED) - - def disassociate(self, req, id): - """Disassociate a qos specs from a volume type.""" - context = req.environ['cinder.context'] - authorize_update(context) - - type_id = req.params.get('vol_type_id', None) - - if not type_id: - msg = _('Volume Type id must not be None.') - notifier_err = dict(id=id, error_message=msg) - self._notify_qos_specs_error(context, - 'qos_specs.delete', - notifier_err) - raise webob.exc.HTTPBadRequest(explanation=msg) - LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s", - {'id': id, 'type_id': type_id}) - - try: - qos_specs.disassociate_qos_specs(context, id, type_id) - notifier_info = dict(id=id, type_id=type_id) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.disassociate', - notifier_info) - except exception.NotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.disassociate', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.QoSSpecsDisassociateFailed as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.disassociate', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return webob.Response(status_int=http_client.ACCEPTED) - - def disassociate_all(self, req, id): - """Disassociate a qos specs from all volume types.""" - context = req.environ['cinder.context'] - authorize_update(context) - - LOG.debug("Disassociate qos_spec: %s from all.", id) - - try: - qos_specs.disassociate_all(context, id) - notifier_info = dict(id=id) - rpc.get_notifier('QoSSpecs').info(context, - 'qos_specs.disassociate_all', - notifier_info) - except exception.QoSSpecsNotFound as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.disassociate_all', - notifier_err) - # Not found exception will be handled at the wsgi level - raise - except exception.QoSSpecsDisassociateFailed as err: - notifier_err = dict(id=id, error_message=err) - self._notify_qos_specs_error(context, - 'qos_specs.disassociate_all', - notifier_err) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return webob.Response(status_int=http_client.ACCEPTED) - - -class Qos_specs_manage(extensions.ExtensionDescriptor): - """QoS specs support.""" - - name = "Qos_specs_manage" - alias = "qos-specs" - updated = "2013-08-02T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Qos_specs_manage.alias, - QoSSpecsController(), - member_actions={"associations": "GET", - "associate": "GET", - "disassociate": "GET", - "disassociate_all": "GET", - "delete_keys": "PUT"}) - - resources.append(res) - - return resources diff --git a/cinder/api/contrib/quota_classes.py b/cinder/api/contrib/quota_classes.py deleted file mode 100644 index bffd1a56f..000000000 --- a/cinder/api/contrib/quota_classes.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import quota -from cinder import utils - - -QUOTAS = quota.QUOTAS -GROUP_QUOTAS = quota.GROUP_QUOTAS - - -authorize = extensions.extension_authorizer('volume', 'quota_classes') - - -class QuotaClassSetsController(wsgi.Controller): - - def _format_quota_set(self, quota_class, quota_set): - """Convert the quota object to a result dict.""" - - quota_set['id'] = str(quota_class) - - return dict(quota_class_set=quota_set) - - def show(self, req, id): - context = req.environ['cinder.context'] - authorize(context) - try: - db.sqlalchemy.api.authorize_quota_class_context(context, id) - except exception.NotAuthorized: - raise webob.exc.HTTPForbidden() - quota_set = QUOTAS.get_class_quotas(context, id) - group_quota_set = GROUP_QUOTAS.get_class_quotas(context, id) - quota_set.update(group_quota_set) - - return self._format_quota_set(id, quota_set) - - def update(self, req, id, body): - context = req.environ['cinder.context'] - authorize(context) - self.validate_string_length(id, 'quota_class_name', - min_length=1, max_length=255) - - quota_class = id - if not self.is_valid_body(body, 'quota_class_set'): - msg = (_("Missing required element quota_class_set" - " in request body.")) - raise webob.exc.HTTPBadRequest(explanation=msg) - - for key, value in body['quota_class_set'].items(): - if key in QUOTAS or key in GROUP_QUOTAS: - try: - value = utils.validate_integer(value, key, min_value=-1, - max_value=db.MAX_INT) - db.quota_class_update(context, quota_class, key, value) - except exception.QuotaClassNotFound: - db.quota_class_create(context, quota_class, key, value) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() - - quota_set = QUOTAS.get_class_quotas(context, quota_class) - group_quota_set = GROUP_QUOTAS.get_class_quotas(context, quota_class) - quota_set.update(group_quota_set) - - return {'quota_class_set': quota_set} - - -class Quota_classes(extensions.ExtensionDescriptor): - """Quota classes management support.""" - - name = "QuotaClasses" - alias = "os-quota-class-sets" - updated = "2012-03-12T00:00:00+00:00" - - def get_resources(self): - resources = [] - - res = extensions.ResourceExtension('os-quota-class-sets', - QuotaClassSetsController()) - resources.append(res) - - return resources diff --git a/cinder/api/contrib/quotas.py b/cinder/api/contrib/quotas.py deleted file mode 100644 index 334426eed..000000000 --- a/cinder/api/contrib/quotas.py +++ /dev/null @@ -1,461 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from oslo_utils import strutils - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder import exception -from cinder.i18n import _ -from cinder import quota -from cinder import quota_utils -from cinder import utils - -QUOTAS = quota.QUOTAS -GROUP_QUOTAS = quota.GROUP_QUOTAS -NON_QUOTA_KEYS = ['tenant_id', 'id'] - -authorize_update = extensions.extension_authorizer('volume', 'quotas:update') -authorize_show = extensions.extension_authorizer('volume', 'quotas:show') -authorize_delete = extensions.extension_authorizer('volume', 'quotas:delete') - - -class QuotaSetsController(wsgi.Controller): - - def _format_quota_set(self, project_id, quota_set): - """Convert the quota object to a result dict.""" - - quota_set['id'] = str(project_id) - - return dict(quota_set=quota_set) - - def _validate_existing_resource(self, key, value, quota_values): - # -1 limit will always be greater than the existing value - if key == 'per_volume_gigabytes' or value == -1: - return - v = quota_values.get(key, {}) - used = (v.get('in_use', 0) + v.get('reserved', 0)) - if QUOTAS.using_nested_quotas(): - used += v.get('allocated', 0) - if value < used: - msg = (_("Quota %(key)s limit must be equal or greater than " - "existing resources. Current usage is %(usage)s " - "and the requested limit is %(limit)s.") - % {'key': key, - 'usage': used, - 'limit': value}) - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_quotas(self, context, id, usages=False): - values = QUOTAS.get_project_quotas(context, id, usages=usages) - group_values = GROUP_QUOTAS.get_project_quotas(context, id, - usages=usages) - values.update(group_values) - - if usages: - return values - else: - return {k: v['limit'] for k, v in values.items()} - - def _authorize_update_or_delete(self, context_project, - target_project_id, - parent_id): - """Checks if update or delete are allowed in the current hierarchy. - - With hierarchical projects, only the admin of the parent or the root - project has privilege to perform quota update and delete operations. - - :param context_project: The project in which the user is scoped to. - :param target_project_id: The id of the project in which the - user want to perform an update or - delete operation. - :param parent_id: The parent id of the project in which the user - want to perform an update or delete operation. - """ - if context_project.is_admin_project: - # The calling project has admin privileges and should be able - # to operate on all quotas. - return - if context_project.parent_id and parent_id != context_project.id: - msg = _("Update and delete quota operations can only be made " - "by an admin of immediate parent or by the CLOUD admin.") - raise webob.exc.HTTPForbidden(explanation=msg) - - if context_project.id != target_project_id: - if not self._is_descendant(target_project_id, - context_project.subtree): - msg = _("Update and delete quota operations can only be made " - "to projects in the same hierarchy of the project in " - "which users are scoped to.") - raise webob.exc.HTTPForbidden(explanation=msg) - else: - msg = _("Update and delete quota operations can only be made " - "by an admin of immediate parent or by the CLOUD admin.") - raise webob.exc.HTTPForbidden(explanation=msg) - - def _authorize_show(self, context_project, target_project): - """Checks if show is allowed in the current hierarchy. - - With hierarchical projects, users are allowed to perform a quota show - operation if they have the cloud admin role or if they belong to at - least one of the following projects: the target project, its immediate - parent project, or the root project of its hierarchy. - - :param context_project: The project in which the user - is scoped to. - :param target_project: The project in which the user wants - to perform a show operation. - """ - if context_project.is_admin_project: - # The calling project has admin privileges and should be able - # to view all quotas. - return - if target_project.parent_id: - if target_project.id != context_project.id: - if not self._is_descendant(target_project.id, - context_project.subtree): - msg = _("Show operations can only be made to projects in " - "the same hierarchy of the project in which users " - "are scoped to.") - raise webob.exc.HTTPForbidden(explanation=msg) - if context_project.id != target_project.parent_id: - if context_project.parent_id: - msg = _("Only users with token scoped to immediate " - "parents or root projects are allowed to see " - "its children quotas.") - raise webob.exc.HTTPForbidden(explanation=msg) - elif context_project.parent_id: - msg = _("An user with a token scoped to a subproject is not " - "allowed to see the quota of its parents.") - raise webob.exc.HTTPForbidden(explanation=msg) - - def _is_descendant(self, target_project_id, subtree): - if subtree is not None: - for key, value in subtree.items(): - if key == target_project_id: - return True - if self._is_descendant(target_project_id, value): - return True - return False - - def show(self, req, id): - """Show quota for a particular tenant - - This works for hierarchical and non-hierarchical projects. For - hierarchical projects admin of current project, immediate - parent of the project or the CLOUD admin are able to perform - a show. - - :param req: request - :param id: target project id that needs to be shown - """ - context = req.environ['cinder.context'] - authorize_show(context) - params = req.params - target_project_id = id - - if not hasattr(params, '__call__') and 'usage' in params: - usage = utils.get_bool_param('usage', params) - else: - usage = False - - if QUOTAS.using_nested_quotas(): - # With hierarchical projects, only the admin of the current project - # or the root project has privilege to perform quota show - # operations. - target_project = quota_utils.get_project_hierarchy( - context, target_project_id) - context_project = quota_utils.get_project_hierarchy( - context, context.project_id, subtree_as_ids=True, - is_admin_project=context.is_admin) - - self._authorize_show(context_project, target_project) - - try: - sqlalchemy_api.authorize_project_context(context, - target_project_id) - except exception.NotAuthorized: - raise webob.exc.HTTPForbidden() - - quotas = self._get_quotas(context, target_project_id, usage) - return self._format_quota_set(target_project_id, quotas) - - def update(self, req, id, body): - """Update Quota for a particular tenant - - This works for hierarchical and non-hierarchical projects. For - hierarchical projects only immediate parent admin or the - CLOUD admin are able to perform an update. - - :param req: request - :param id: target project id that needs to be updated - :param body: key, value pair that will be applied to - the resources if the update succeeds - """ - context = req.environ['cinder.context'] - authorize_update(context) - self.validate_string_length(id, 'quota_set_name', - min_length=1, max_length=255) - - self.assert_valid_body(body, 'quota_set') - - # Get the optional argument 'skip_validation' from body, - # if skip_validation is False, then validate existing resource. - skip_flag = body.get('skip_validation', True) - if not strutils.is_valid_boolstr(skip_flag): - msg = _("Invalid value '%s' for skip_validation.") % skip_flag - raise exception.InvalidParameterValue(err=msg) - skip_flag = strutils.bool_from_string(skip_flag) - - target_project_id = id - bad_keys = [] - - # NOTE(ankit): Pass #1 - In this loop for body['quota_set'].items(), - # we figure out if we have any bad keys. - for key, value in body['quota_set'].items(): - if (key not in QUOTAS and key not in GROUP_QUOTAS and key not in - NON_QUOTA_KEYS): - bad_keys.append(key) - continue - - if len(bad_keys) > 0: - msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys) - raise webob.exc.HTTPBadRequest(explanation=msg) - - # Saving off this value since we need to use it multiple times - use_nested_quotas = QUOTAS.using_nested_quotas() - if use_nested_quotas: - # Get the parent_id of the target project to verify whether we are - # dealing with hierarchical namespace or non-hierarchical namespace - target_project = quota_utils.get_project_hierarchy( - context, target_project_id, parents_as_ids=True) - parent_id = target_project.parent_id - - if parent_id: - # Get the children of the project which the token is scoped to - # in order to know if the target_project is in its hierarchy. - context_project = quota_utils.get_project_hierarchy( - context, context.project_id, subtree_as_ids=True, - is_admin_project=context.is_admin) - self._authorize_update_or_delete(context_project, - target_project.id, - parent_id) - - # NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(), - # we validate the quota limits to ensure that we can bail out if - # any of the items in the set is bad. Meanwhile we validate value - # to ensure that the value can't be lower than number of existing - # resources. - quota_values = QUOTAS.get_project_quotas(context, target_project_id, - defaults=False) - group_quota_values = GROUP_QUOTAS.get_project_quotas(context, - target_project_id, - defaults=False) - quota_values.update(group_quota_values) - valid_quotas = {} - reservations = [] - for key in body['quota_set'].keys(): - if key in NON_QUOTA_KEYS: - continue - - value = utils.validate_integer( - body['quota_set'][key], key, min_value=-1, - max_value=db.MAX_INT) - - # Can't skip the validation of nested quotas since it could mess up - # hierarchy if parent limit is less than childrens' current usage - if not skip_flag or use_nested_quotas: - self._validate_existing_resource(key, value, quota_values) - - if use_nested_quotas: - try: - reservations += self._update_nested_quota_allocated( - context, target_project, quota_values, key, value) - except exception.OverQuota as e: - if reservations: - db.reservation_rollback(context, reservations) - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - valid_quotas[key] = value - - # NOTE(ankit): Pass #3 - At this point we know that all the keys and - # values are valid and we can iterate and update them all in one shot - # without having to worry about rolling back etc as we have done - # the validation up front in the 2 loops above. - for key, value in valid_quotas.items(): - try: - db.quota_update(context, target_project_id, key, value) - except exception.ProjectQuotaNotFound: - db.quota_create(context, target_project_id, key, value) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() - - if reservations: - db.reservation_commit(context, reservations) - return {'quota_set': self._get_quotas(context, target_project_id)} - - def _get_quota_usage(self, quota_obj): - return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) + - quota_obj.get('reserved', 0)) - - def _update_nested_quota_allocated(self, ctxt, target_project, - target_project_quotas, res, new_limit): - reservations = [] - # per_volume_gigabytes doesn't make sense to nest - if res == "per_volume_gigabytes": - return reservations - - quota_for_res = target_project_quotas.get(res, {}) - orig_quota_from_target_proj = quota_for_res.get('limit', 0) - # If limit was -1, we were "taking" current child's usage from parent - if orig_quota_from_target_proj == -1: - orig_quota_from_target_proj = self._get_quota_usage(quota_for_res) - - new_quota_from_target_proj = new_limit - # If we set limit to -1, we will "take" the current usage from parent - if new_limit == -1: - new_quota_from_target_proj = self._get_quota_usage(quota_for_res) - - res_change = new_quota_from_target_proj - orig_quota_from_target_proj - if res_change != 0: - deltas = {res: res_change} - resources = QUOTAS.resources - resources.update(GROUP_QUOTAS.resources) - reservations += quota_utils.update_alloc_to_next_hard_limit( - ctxt, resources, deltas, res, None, target_project.id) - - return reservations - - def defaults(self, req, id): - context = req.environ['cinder.context'] - authorize_show(context) - defaults = QUOTAS.get_defaults(context, project_id=id) - group_defaults = GROUP_QUOTAS.get_defaults(context, project_id=id) - defaults.update(group_defaults) - return self._format_quota_set(id, defaults) - - def delete(self, req, id): - """Delete Quota for a particular tenant. - - This works for hierarchical and non-hierarchical projects. For - hierarchical projects only immediate parent admin or the - CLOUD admin are able to perform a delete. - - :param req: request - :param id: target project id that needs to be deleted - """ - context = req.environ['cinder.context'] - authorize_delete(context) - - if QUOTAS.using_nested_quotas(): - self._delete_nested_quota(context, id) - else: - try: - db.quota_destroy_by_project(context, id) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() - - def _delete_nested_quota(self, ctxt, proj_id): - # Get the parent_id of the target project to verify whether we are - # dealing with hierarchical namespace or non-hierarchical - # namespace. - try: - project_quotas = QUOTAS.get_project_quotas( - ctxt, proj_id, usages=True, defaults=False) - project_group_quotas = GROUP_QUOTAS.get_project_quotas( - ctxt, proj_id, usages=True, defaults=False) - project_quotas.update(project_group_quotas) - except exception.NotAuthorized: - raise webob.exc.HTTPForbidden() - - target_project = quota_utils.get_project_hierarchy( - ctxt, proj_id) - parent_id = target_project.parent_id - if parent_id: - # Get the children of the project which the token is scoped to - # in order to know if the target_project is in its hierarchy. - context_project = quota_utils.get_project_hierarchy( - ctxt, ctxt.project_id, subtree_as_ids=True) - self._authorize_update_or_delete(context_project, - target_project.id, - parent_id) - - defaults = QUOTAS.get_defaults(ctxt, proj_id) - defaults.update(GROUP_QUOTAS.get_defaults(ctxt, proj_id)) - # If the project which is being deleted has allocated part of its - # quota to its subprojects, then subprojects' quotas should be - # deleted first. - for res, value in project_quotas.items(): - if 'allocated' in project_quotas[res].keys(): - if project_quotas[res]['allocated'] > 0: - msg = _("About to delete child projects having " - "non-zero quota. This should not be performed") - raise webob.exc.HTTPBadRequest(explanation=msg) - # Ensure quota usage wouldn't exceed limit on a delete - self._validate_existing_resource( - res, defaults[res], project_quotas) - - try: - db.quota_destroy_by_project(ctxt, target_project.id) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() - - for res, limit in project_quotas.items(): - # Update child limit to 0 so the parent hierarchy gets it's - # allocated values updated properly - self._update_nested_quota_allocated( - ctxt, target_project, project_quotas, res, 0) - - def validate_setup_for_nested_quota_use(self, req): - """Validates that the setup supports using nested quotas. - - Ensures that Keystone v3 or greater is being used, and that the - existing quotas make sense to nest in the current hierarchy (e.g. that - no child quota would be larger than it's parent). - """ - ctxt = req.environ['cinder.context'] - params = req.params - try: - resources = QUOTAS.resources - resources.update(GROUP_QUOTAS.resources) - - quota_utils.validate_setup_for_nested_quota_use( - ctxt, resources, quota.NestedDbQuotaDriver(), - fix_allocated_quotas=params.get('fix_allocated_quotas')) - except exception.InvalidNestedQuotaSetup as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - -class Quotas(extensions.ExtensionDescriptor): - """Quota management support.""" - - name = "Quotas" - alias = "os-quota-sets" - updated = "2011-08-08T00:00:00+00:00" - - def get_resources(self): - resources = [] - - res = extensions.ResourceExtension( - 'os-quota-sets', QuotaSetsController(), - member_actions={'defaults': 'GET'}, - collection_actions={'validate_setup_for_nested_quota_use': 'GET'}) - resources.append(res) - - return resources diff --git a/cinder/api/contrib/resource_common_manage.py b/cinder/api/contrib/resource_common_manage.py deleted file mode 100644 index 013965168..000000000 --- a/cinder/api/contrib/resource_common_manage.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_messaging as messaging - -from cinder.api import common -from cinder import exception -from cinder.i18n import _ - - -def get_manageable_resources(req, is_detail, function_get_manageable, - view_builder): - context = req.environ['cinder.context'] - params = req.params.copy() - cluster_name, host = common.get_cluster_host(req, params, '3.17') - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params, - default_key='reference') - - # These parameters are generally validated at the DB layer, but in this - # case sorting is not done by the DB - valid_sort_keys = ('reference', 'size') - invalid_keys = [key for key in sort_keys if key not in valid_sort_keys] - if invalid_keys: - msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) - raise exception.InvalidParameterValue(err=msg) - valid_sort_dirs = ('asc', 'desc') - invalid_dirs = [d for d in sort_dirs if d not in valid_sort_dirs] - if invalid_dirs: - msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) - raise exception.InvalidParameterValue(err=msg) - - try: - resources = function_get_manageable(context, host, cluster_name, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - except messaging.RemoteError as err: - if err.exc_type == "InvalidInput": - raise exception.InvalidInput(err.value) - raise - - resource_count = len(resources) - - if is_detail: - resources = view_builder.detail_list(req, resources, resource_count) - else: - resources = view_builder.summary_list(req, resources, resource_count) - return resources diff --git a/cinder/api/contrib/scheduler_hints.py b/cinder/api/contrib/scheduler_hints.py deleted file mode 100644 index 1bae13862..000000000 --- a/cinder/api/contrib/scheduler_hints.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob.exc - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.i18n import _ - - -class SchedulerHintsController(wsgi.Controller): - - @staticmethod - def _extract_scheduler_hints(body): - hints = {} - - attr = '%s:scheduler_hints' % Scheduler_hints.alias - try: - if attr in body: - hints.update(body[attr]) - except ValueError: - msg = _("Malformed scheduler_hints attribute") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return hints - - @wsgi.extends - def create(self, req, body): - hints = self._extract_scheduler_hints(body) - - if 'volume' in body: - body['volume']['scheduler_hints'] = hints - yield - - -class Scheduler_hints(extensions.ExtensionDescriptor): - """Pass arbitrary key/value pairs to the scheduler.""" - - name = "SchedulerHints" - alias = "OS-SCH-HNT" - updated = "2013-04-18T00:00:00+00:00" - - def get_controller_extensions(self): - controller = SchedulerHintsController() - ext = extensions.ControllerExtension(self, 'volumes', controller) - return [ext] diff --git a/cinder/api/contrib/scheduler_stats.py b/cinder/api/contrib/scheduler_stats.py deleted file mode 100644 index 875d42ca5..000000000 --- a/cinder/api/contrib/scheduler_stats.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2014 eBay Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Scheduler Stats extension""" - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import scheduler_stats as scheduler_stats_view -from cinder.scheduler import rpcapi -from cinder import utils - -GET_POOL_NAME_FILTER_MICRO_VERSION = '3.28' -GET_POOL_VOLUME_TYPE_FILTER_MICRO_VERSION = '3.35' - - -def authorize(context, action_name): - action = 'scheduler_stats:%s' % action_name - extensions.extension_authorizer('scheduler', action)(context) - - -class SchedulerStatsController(wsgi.Controller): - """The Scheduler Stats controller for the OpenStack API.""" - - _view_builder_class = scheduler_stats_view.ViewBuilder - - def __init__(self): - self.scheduler_api = rpcapi.SchedulerAPI() - super(SchedulerStatsController, self).__init__() - - @common.process_general_filtering('pool') - def _process_pool_filtering(self, context=None, filters=None, - req_version=None): - if not req_version.matches(GET_POOL_NAME_FILTER_MICRO_VERSION): - filters.clear() - - def get_pools(self, req): - """List all active pools in scheduler.""" - context = req.environ['cinder.context'] - authorize(context, 'get_pools') - - detail = utils.get_bool_param('detail', req.params) - - req_version = req.api_version_request - filters = req.params.copy() - filters.pop('detail', None) - - self._process_pool_filtering(context=context, - filters=filters, - req_version=req_version) - - if not req_version.matches(GET_POOL_VOLUME_TYPE_FILTER_MICRO_VERSION): - filters.pop('volume_type', None) - - pools = self.scheduler_api.get_pools(context, filters=filters) - - return self._view_builder.pools(req, pools, detail) - - -class Scheduler_stats(extensions.ExtensionDescriptor): - """Scheduler stats support.""" - - name = "Scheduler_stats" - alias = "scheduler-stats" - updated = "2014-09-07T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Scheduler_stats.alias, - SchedulerStatsController(), - collection_actions={"get_pools": "GET"}) - - resources.append(res) - - return resources diff --git a/cinder/api/contrib/services.py b/cinder/api/contrib/services.py deleted file mode 100644 index 4a7866537..000000000 --- a/cinder/api/contrib/services.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright 2012 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from six.moves import http_client -import webob.exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.backup import rpcapi as backup_rpcapi -from cinder.common import constants -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import utils -from cinder import volume -from cinder.volume import rpcapi as volume_rpcapi - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('volume', 'services') - - -class ServiceController(wsgi.Controller): - LOG_BINARIES = (constants.SCHEDULER_BINARY, constants.VOLUME_BINARY, - constants.BACKUP_BINARY, constants.API_BINARY) - - def __init__(self, ext_mgr=None): - self.ext_mgr = ext_mgr - super(ServiceController, self).__init__() - self.volume_api = volume.API() - self.rpc_apis = { - constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI(), - constants.VOLUME_BINARY: volume_rpcapi.VolumeAPI(), - constants.BACKUP_BINARY: backup_rpcapi.BackupAPI(), - } - - def index(self, req): - """Return a list of all running services. - - Filter by host & service name. - """ - context = req.environ['cinder.context'] - authorize(context, action='index') - detailed = self.ext_mgr.is_loaded('os-extended-services') - now = timeutils.utcnow(with_timezone=True) - - filters = {} - - if 'host' in req.GET: - filters['host'] = req.GET['host'] - if 'binary' in req.GET: - filters['binary'] = req.GET['binary'] - - services = objects.ServiceList.get_all(context, filters) - - svcs = [] - for svc in services: - updated_at = svc.updated_at - delta = now - (svc.updated_at or svc.created_at) - delta_sec = delta.total_seconds() - if svc.modified_at: - delta_mod = now - svc.modified_at - if abs(delta_sec) >= abs(delta_mod.total_seconds()): - updated_at = svc.modified_at - alive = abs(delta_sec) <= CONF.service_down_time - art = "up" if alive else "down" - active = 'enabled' - if svc.disabled: - active = 'disabled' - if updated_at: - updated_at = timeutils.normalize_time(updated_at) - ret_fields = {'binary': svc.binary, 'host': svc.host, - 'zone': svc.availability_zone, - 'status': active, 'state': art, - 'updated_at': updated_at} - - # On V3.7 we added cluster support - if req.api_version_request.matches('3.7'): - ret_fields['cluster'] = svc.cluster_name - - if detailed: - ret_fields['disabled_reason'] = svc.disabled_reason - if svc.binary == "cinder-volume": - ret_fields['replication_status'] = svc.replication_status - ret_fields['active_backend_id'] = svc.active_backend_id - ret_fields['frozen'] = svc.frozen - svcs.append(ret_fields) - return {'services': svcs} - - def _is_valid_as_reason(self, reason): - if not reason: - return False - try: - utils.check_string_length(reason, 'Disabled reason', min_length=1, - max_length=255, allow_all_spaces=False) - except exception.InvalidInput: - return False - - return True - - def _volume_api_proxy(self, fun, *args): - try: - return fun(*args) - except exception.ServiceNotFound as ex: - raise exception.InvalidInput(ex.msg) - - def _freeze(self, context, req, body): - cluster_name, host = common.get_cluster_host(req, body, '3.26') - return self._volume_api_proxy(self.volume_api.freeze_host, context, - host, cluster_name) - - def _thaw(self, context, req, body): - cluster_name, host = common.get_cluster_host(req, body, '3.26') - return self._volume_api_proxy(self.volume_api.thaw_host, context, - host, cluster_name) - - def _failover(self, context, req, body, clustered): - # We set version to None to always get the cluster name from the body, - # to False when we don't want to get it, and '3.26' when we only want - # it if the requested version is 3.26 or higher. - version = '3.26' if clustered else False - cluster_name, host = common.get_cluster_host(req, body, version) - self._volume_api_proxy(self.volume_api.failover, context, host, - cluster_name, body.get('backend_id')) - return webob.Response(status_int=http_client.ACCEPTED) - - def _log_params_binaries_services(self, context, body): - """Get binaries and services referred by given log set/get request.""" - query_filters = {'is_up': True} - - binary = body.get('binary') - if binary in ('*', None, ''): - binaries = self.LOG_BINARIES - elif binary == constants.API_BINARY: - return [binary], [] - elif binary in self.LOG_BINARIES: - binaries = [binary] - query_filters['binary'] = binary - else: - raise exception.InvalidInput(reason=_('%s is not a valid binary.') - % binary) - - server = body.get('server') - if server: - query_filters['host_or_cluster'] = server - services = objects.ServiceList.get_all(context, filters=query_filters) - - return binaries, services - - def _set_log(self, context, body): - """Set log levels of services dynamically.""" - prefix = body.get('prefix') - level = body.get('level') - # Validate log level - utils.get_log_method(level) - - binaries, services = self._log_params_binaries_services(context, body) - - log_req = objects.LogLevel(context, prefix=prefix, level=level) - - if constants.API_BINARY in binaries: - utils.set_log_levels(prefix, level) - for service in services: - self.rpc_apis[service.binary].set_log_levels(context, - service, log_req) - - return webob.Response(status_int=202) - - def _get_log(self, context, body): - """Get current log levels for services.""" - prefix = body.get('prefix') - binaries, services = self._log_params_binaries_services(context, body) - - result = [] - - log_req = objects.LogLevel(context, prefix=prefix) - - if constants.API_BINARY in binaries: - levels = utils.get_log_levels(prefix) - result.append({'host': CONF.host, - 'binary': constants.API_BINARY, - 'levels': levels}) - for service in services: - levels = self.rpc_apis[service.binary].get_log_levels(context, - service, - log_req) - result.append({'host': service.host, - 'binary': service.binary, - 'levels': {l.prefix: l.level for l in levels}}) - - return {'log_levels': result} - - def update(self, req, id, body): - """Enable/Disable scheduling for a service. - - Includes Freeze/Thaw which sends call down to drivers - and allows volume.manager for the specified host to - disable the service rather than accessing the service - directly in this API layer. - """ - context = req.environ['cinder.context'] - authorize(context, action='update') - - support_dynamic_log = req.api_version_request.matches('3.32') - - ext_loaded = self.ext_mgr.is_loaded('os-extended-services') - ret_val = {} - if id == "enable": - disabled = False - status = "enabled" - if ext_loaded: - ret_val['disabled_reason'] = None - elif (id == "disable" or - (id == "disable-log-reason" and ext_loaded)): - disabled = True - status = "disabled" - elif id == "freeze": - return self._freeze(context, req, body) - elif id == "thaw": - return self._thaw(context, req, body) - elif id == "failover_host": - return self._failover(context, req, body, False) - elif req.api_version_request.matches('3.26') and id == 'failover': - return self._failover(context, req, body, True) - elif support_dynamic_log and id == 'set-log': - return self._set_log(context, body) - elif support_dynamic_log and id == 'get-log': - return self._get_log(context, body) - else: - raise exception.InvalidInput(reason=_("Unknown action")) - - host = common.get_cluster_host(req, body, False)[1] - - ret_val['disabled'] = disabled - if id == "disable-log-reason" and ext_loaded: - reason = body.get('disabled_reason') - if not self._is_valid_as_reason(reason): - msg = _('Disabled reason contains invalid characters ' - 'or is too long') - raise webob.exc.HTTPBadRequest(explanation=msg) - ret_val['disabled_reason'] = reason - - # NOTE(uni): deprecating service request key, binary takes precedence - # Still keeping service key here for API compatibility sake. - service = body.get('service', '') - binary = body.get('binary', '') - binary_key = binary or service - if not binary_key: - raise webob.exc.HTTPBadRequest() - - # Not found exception will be handled at the wsgi level - svc = objects.Service.get_by_args(context, host, binary_key) - - svc.disabled = ret_val['disabled'] - if 'disabled_reason' in ret_val: - svc.disabled_reason = ret_val['disabled_reason'] - svc.save() - - ret_val.update({'host': host, 'service': service, - 'binary': binary, 'status': status}) - return ret_val - - -class Services(extensions.ExtensionDescriptor): - """Services support.""" - - name = "Services" - alias = "os-services" - updated = "2012-10-28T00:00:00-00:00" - - def get_resources(self): - resources = [] - controller = ServiceController(self.ext_mgr) - resource = extensions.ResourceExtension('os-services', controller) - resources.append(resource) - return resources diff --git a/cinder/api/contrib/snapshot_actions.py b/cinder/api/contrib/snapshot_actions.py deleted file mode 100644 index fd8cb7a01..000000000 --- a/cinder/api/contrib/snapshot_actions.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2013, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields - -LOG = logging.getLogger(__name__) - - -def authorize(context, action_name): - action = 'snapshot_actions:%s' % action_name - extensions.extension_authorizer('snapshot', action)(context) - - -class SnapshotActionsController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(SnapshotActionsController, self).__init__(*args, **kwargs) - LOG.debug("SnapshotActionsController initialized") - - @wsgi.action('os-update_snapshot_status') - def _update_snapshot_status(self, req, id, body): - """Update database fields related to status of a snapshot. - - Intended for creation of snapshots, so snapshot state - must start as 'creating' and be changed to 'available', - 'creating', or 'error'. - """ - - context = req.environ['cinder.context'] - authorize(context, 'update_snapshot_status') - - LOG.debug("body: %s", body) - try: - status = body['os-update_snapshot_status']['status'] - except KeyError: - msg = _("'status' must be specified.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - # Allowed state transitions - status_map = {fields.SnapshotStatus.CREATING: - [fields.SnapshotStatus.CREATING, - fields.SnapshotStatus.AVAILABLE, - fields.SnapshotStatus.ERROR], - fields.SnapshotStatus.DELETING: - [fields.SnapshotStatus.DELETING, - fields.SnapshotStatus.ERROR_DELETING]} - - current_snapshot = objects.Snapshot.get_by_id(context, id) - - if current_snapshot.status not in status_map: - msg = _("Snapshot status %(cur)s not allowed for " - "update_snapshot_status") % { - 'cur': current_snapshot.status} - raise webob.exc.HTTPBadRequest(explanation=msg) - - if status not in status_map[current_snapshot.status]: - msg = _("Provided snapshot status %(provided)s not allowed for " - "snapshot with status %(current)s.") % \ - {'provided': status, - 'current': current_snapshot.status} - raise webob.exc.HTTPBadRequest(explanation=msg) - - update_dict = {'id': id, - 'status': status} - - progress = body['os-update_snapshot_status'].get('progress', None) - if progress: - # This is expected to be a string like '73%' - msg = _('progress must be an integer percentage') - try: - integer = int(progress[:-1]) - except ValueError: - raise webob.exc.HTTPBadRequest(explanation=msg) - if integer < 0 or integer > 100 or progress[-1] != '%': - raise webob.exc.HTTPBadRequest(explanation=msg) - - update_dict.update({'progress': progress}) - - LOG.info("Updating snapshot %(id)s with info %(dict)s", - {'id': id, 'dict': update_dict}) - - current_snapshot.update(update_dict) - current_snapshot.save() - return webob.Response(status_int=http_client.ACCEPTED) - - -class Snapshot_actions(extensions.ExtensionDescriptor): - """Enable snapshot manager actions.""" - - name = "SnapshotActions" - alias = "os-snapshot-actions" - updated = "2013-07-16T00:00:00+00:00" - - def get_controller_extensions(self): - controller = SnapshotActionsController() - extension = extensions.ControllerExtension(self, - 'snapshots', - controller) - return [extension] diff --git a/cinder/api/contrib/snapshot_manage.py b/cinder/api/contrib/snapshot_manage.py deleted file mode 100644 index 842156996..000000000 --- a/cinder/api/contrib/snapshot_manage.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client -from webob import exc - -from cinder.api.contrib import resource_common_manage -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import manageable_snapshots as list_manageable_view -from cinder.api.views import snapshots as snapshot_views -from cinder.i18n import _ -from cinder import volume as cinder_volume - -LOG = logging.getLogger(__name__) -authorize_manage = extensions.extension_authorizer('snapshot', - 'snapshot_manage') -authorize_list_manageable = extensions.extension_authorizer('snapshot', - 'list_manageable') - - -class SnapshotManageController(wsgi.Controller): - """The /os-snapshot-manage controller for the OpenStack API.""" - - _view_builder_class = snapshot_views.ViewBuilder - - def __init__(self, *args, **kwargs): - super(SnapshotManageController, self).__init__(*args, **kwargs) - self.volume_api = cinder_volume.API() - self._list_manageable_view = list_manageable_view.ViewBuilder() - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Instruct Cinder to manage a storage snapshot object. - - Manages an existing backend storage snapshot object (e.g. a Linux - logical volume or a SAN disk) by creating the Cinder objects required - to manage it, and possibly renaming the backend storage snapshot object - (driver dependent). - - From an API perspective, this operation behaves very much like a - snapshot creation operation. - - Required HTTP Body: - - .. code-block:: json - - { - "snapshot": - { - "volume_id": "", - "ref": - "" - } - } - - See the appropriate Cinder drivers' implementations of the - manage_snapshot method to find out the accepted format of 'ref'. - For example,in LVM driver, it will be the logic volume name of snapshot - which you want to manage. - - This API call will return with an error if any of the above elements - are missing from the request, or if the 'volume_id' element refers to - a cinder volume that could not be found. - - The snapshot will later enter the error state if it is discovered that - 'ref' is bad. - - Optional elements to 'snapshot' are:: - - name A name for the new snapshot. - description A description for the new snapshot. - metadata Key/value pairs to be associated with the new snapshot. - - """ - context = req.environ['cinder.context'] - authorize_manage(context) - - if not self.is_valid_body(body, 'snapshot'): - msg = _("Missing required element snapshot in request body.") - raise exc.HTTPBadRequest(explanation=msg) - - snapshot = body['snapshot'] - - # Check that the required keys are present, return an error if they - # are not. - required_keys = ('ref', 'volume_id') - missing_keys = set(required_keys) - set(snapshot.keys()) - - if missing_keys: - msg = _("The following elements are required: " - "%s") % ', '.join(missing_keys) - raise exc.HTTPBadRequest(explanation=msg) - - # Check whether volume exists - volume_id = snapshot['volume_id'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, volume_id) - - LOG.debug('Manage snapshot request body: %s', body) - - snapshot_parameters = {} - - snapshot_parameters['metadata'] = snapshot.get('metadata', None) - snapshot_parameters['description'] = snapshot.get('description', None) - snapshot_parameters['name'] = snapshot.get('name') - - # Not found exception will be handled at the wsgi level - new_snapshot = self.volume_api.manage_existing_snapshot( - context, - snapshot['ref'], - volume, - **snapshot_parameters) - - return self._view_builder.detail(req, new_snapshot) - - @wsgi.extends - def index(self, req): - """Returns a summary list of snapshots available to manage.""" - context = req.environ['cinder.context'] - authorize_list_manageable(context) - return resource_common_manage.get_manageable_resources( - req, False, self.volume_api.get_manageable_snapshots, - self._list_manageable_view) - - @wsgi.extends - def detail(self, req): - """Returns a detailed list of snapshots available to manage.""" - context = req.environ['cinder.context'] - authorize_list_manageable(context) - return resource_common_manage.get_manageable_resources( - req, True, self.volume_api.get_manageable_snapshots, - self._list_manageable_view) - - -class Snapshot_manage(extensions.ExtensionDescriptor): - """Allows existing backend storage to be 'managed' by Cinder.""" - - name = 'SnapshotManage' - alias = 'os-snapshot-manage' - updated = '2014-12-31T00:00:00+00:00' - - def get_resources(self): - controller = SnapshotManageController() - return [extensions.ResourceExtension(Snapshot_manage.alias, - controller, - collection_actions= - {'detail': 'GET'})] diff --git a/cinder/api/contrib/snapshot_unmanage.py b/cinder/api/contrib/snapshot_unmanage.py deleted file mode 100644 index a594a458c..000000000 --- a/cinder/api/contrib/snapshot_unmanage.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import exception -from cinder import volume - -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('snapshot', 'snapshot_unmanage') - - -class SnapshotUnmanageController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(SnapshotUnmanageController, self).__init__(*args, **kwargs) - self.volume_api = volume.API() - - @wsgi.response(http_client.ACCEPTED) - @wsgi.action('os-unmanage') - def unmanage(self, req, id, body): - """Stop managing a snapshot. - - This action is very much like a delete, except that a different - method (unmanage) is called on the Cinder driver. This has the effect - of removing the snapshot from Cinder management without actually - removing the backend storage object associated with it. - - There are no required parameters. - - A Not Found error is returned if the specified snapshot does not exist. - """ - context = req.environ['cinder.context'] - authorize(context) - - LOG.info("Unmanage snapshot with id: %s", id) - - try: - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.delete_snapshot(context, snapshot, - unmanage_only=True) - # Not found exception will be handled at the wsgi level - except exception.InvalidSnapshot as ex: - raise exc.HTTPBadRequest(explanation=ex.msg) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Snapshot_unmanage(extensions.ExtensionDescriptor): - """Enable volume unmanage operation.""" - - name = "SnapshotUnmanage" - alias = "os-snapshot-unmanage" - updated = "2014-12-31T00:00:00+00:00" - - def get_controller_extensions(self): - controller = SnapshotUnmanageController() - extension = extensions.ControllerExtension(self, 'snapshots', - controller) - return [extension] diff --git a/cinder/api/contrib/types_extra_specs.py b/cinder/api/contrib/types_extra_specs.py deleted file mode 100644 index b09270a14..000000000 --- a/cinder/api/contrib/types_extra_specs.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume types extra specs extension""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from six.moves import http_client -import webob - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import context as ctxt -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import rpc -from cinder import utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -extraspec_opts = [ - cfg.BoolOpt('allow_inuse_volume_type_modification', - default=False, - deprecated_for_removal=True, - help="DEPRECATED: Allow the ability to modify the " - "extra-spec settings of an in-use volume-type."), - -] - -CONF = cfg.CONF -CONF.register_opts(extraspec_opts) - -authorize = extensions.extension_authorizer('volume', 'types_extra_specs') - - -class VolumeTypeExtraSpecsController(wsgi.Controller): - """The volume type extra specs API controller for the OpenStack API.""" - - def _get_extra_specs(self, context, type_id): - extra_specs = db.volume_type_extra_specs_get(context, type_id) - specs_dict = {} - for key, value in extra_specs.items(): - specs_dict[key] = value - return dict(extra_specs=specs_dict) - - def _check_type(self, context, type_id): - # Not found exception will be handled at the wsgi level - volume_types.get_volume_type(context, type_id) - - def index(self, req, type_id): - """Returns the list of extra specs for a given volume type.""" - context = req.environ['cinder.context'] - authorize(context, action="index") - self._check_type(context, type_id) - return self._get_extra_specs(context, type_id) - - def _allow_update(self, context, type_id): - if (not CONF.allow_inuse_volume_type_modification): - vols = db.volume_get_all( - ctxt.get_admin_context(), - limit=1, - filters={'volume_type_id': type_id}) - if len(vols): - expl = _('Volume Type is currently in use.') - raise webob.exc.HTTPBadRequest(explanation=expl) - else: - msg = ("The option 'allow_inuse_volume_type_modification' " - "is deprecated and will be removed in a future " - "release. The default behavior going forward will " - "be to disallow modificaton of in-use types.") - versionutils.report_deprecated_feature(LOG, msg) - return - - def create(self, req, type_id, body=None): - context = req.environ['cinder.context'] - authorize(context, action='create') - self._allow_update(context, type_id) - - self.assert_valid_body(body, 'extra_specs') - - self._check_type(context, type_id) - specs = body['extra_specs'] - self._check_key_names(specs.keys()) - utils.validate_dictionary_string_length(specs) - - db.volume_type_extra_specs_update_or_create(context, - type_id, - specs) - # Get created_at and updated_at for notification - volume_type = volume_types.get_volume_type(context, type_id) - notifier_info = dict(type_id=type_id, specs=specs, - created_at=volume_type['created_at'], - updated_at=volume_type['updated_at']) - notifier = rpc.get_notifier('volumeTypeExtraSpecs') - notifier.info(context, 'volume_type_extra_specs.create', - notifier_info) - return body - - def update(self, req, type_id, id, body=None): - context = req.environ['cinder.context'] - authorize(context, action='update') - self._allow_update(context, type_id) - - if not body: - expl = _('Request body empty') - raise webob.exc.HTTPBadRequest(explanation=expl) - self._check_type(context, type_id) - if id not in body: - expl = _('Request body and URI mismatch') - raise webob.exc.HTTPBadRequest(explanation=expl) - if len(body) > 1: - expl = _('Request body contains too many items') - raise webob.exc.HTTPBadRequest(explanation=expl) - self._check_key_names(body.keys()) - utils.validate_dictionary_string_length(body) - - db.volume_type_extra_specs_update_or_create(context, - type_id, - body) - # Get created_at and updated_at for notification - volume_type = volume_types.get_volume_type(context, type_id) - notifier_info = dict(type_id=type_id, id=id, - created_at=volume_type['created_at'], - updated_at=volume_type['updated_at']) - notifier = rpc.get_notifier('volumeTypeExtraSpecs') - notifier.info(context, - 'volume_type_extra_specs.update', - notifier_info) - return body - - def show(self, req, type_id, id): - """Return a single extra spec item.""" - context = req.environ['cinder.context'] - authorize(context, action='show') - self._check_type(context, type_id) - specs = self._get_extra_specs(context, type_id) - if id in specs['extra_specs']: - return {id: specs['extra_specs'][id]} - else: - raise exception.VolumeTypeExtraSpecsNotFound( - volume_type_id=type_id, extra_specs_key=id) - - def delete(self, req, type_id, id): - """Deletes an existing extra spec.""" - context = req.environ['cinder.context'] - self._check_type(context, type_id) - authorize(context, action='delete') - self._allow_update(context, type_id) - - # Not found exception will be handled at the wsgi level - db.volume_type_extra_specs_delete(context, type_id, id) - - # Get created_at and updated_at for notification - volume_type = volume_types.get_volume_type(context, type_id) - notifier_info = dict(type_id=type_id, id=id, - created_at=volume_type['created_at'], - updated_at=volume_type['updated_at'], - deleted_at=volume_type['deleted_at']) - notifier = rpc.get_notifier('volumeTypeExtraSpecs') - notifier.info(context, - 'volume_type_extra_specs.delete', - notifier_info) - return webob.Response(status_int=http_client.ACCEPTED) - - def _check_key_names(self, keys): - if not common.validate_key_names(keys): - expl = _('Key names can only contain alphanumeric characters, ' - 'underscores, periods, colons and hyphens.') - - raise webob.exc.HTTPBadRequest(explanation=expl) - - -class Types_extra_specs(extensions.ExtensionDescriptor): - """Type extra specs support.""" - - name = "TypesExtraSpecs" - alias = "os-types-extra-specs" - updated = "2011-08-24T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension('extra_specs', - VolumeTypeExtraSpecsController(), - parent=dict(member_name='type', - collection_name='types') - ) - resources.append(res) - - return resources diff --git a/cinder/api/contrib/types_manage.py b/cinder/api/contrib/types_manage.py deleted file mode 100644 index 756f9dc27..000000000 --- a/cinder/api/contrib/types_manage.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume types manage extension.""" - -import six -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import types as views_types -from cinder import exception -from cinder.i18n import _ -from cinder import rpc -from cinder import utils -from cinder.volume import volume_types - - -authorize = extensions.extension_authorizer('volume', 'types_manage') - - -class VolumeTypesManageController(wsgi.Controller): - """The volume types API controller for the OpenStack API.""" - - _view_builder_class = views_types.ViewBuilder - - @utils.if_notifications_enabled - def _notify_volume_type_error(self, context, method, err, - volume_type=None, id=None, name=None): - payload = dict( - volume_types=volume_type, name=name, id=id, error_message=err) - rpc.get_notifier('volumeType').error(context, method, payload) - - @utils.if_notifications_enabled - def _notify_volume_type_info(self, context, method, volume_type): - payload = dict(volume_types=volume_type) - rpc.get_notifier('volumeType').info(context, method, payload) - - @wsgi.action("create") - def _create(self, req, body): - """Creates a new volume type.""" - context = req.environ['cinder.context'] - authorize(context) - - self.assert_valid_body(body, 'volume_type') - - vol_type = body['volume_type'] - name = vol_type.get('name', None) - description = vol_type.get('description') - specs = vol_type.get('extra_specs', {}) - utils.validate_dictionary_string_length(specs) - is_public = utils.get_bool_param('os-volume-type-access:is_public', - vol_type, True) - - if name is None or len(name.strip()) == 0: - msg = _("Volume type name can not be empty.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - utils.check_string_length(name, 'Type name', - min_length=1, max_length=255) - - if description is not None: - utils.check_string_length(description, 'Type description', - min_length=0, max_length=255) - - try: - volume_types.create(context, - name, - specs, - is_public, - description=description) - vol_type = volume_types.get_volume_type_by_name(context, name) - req.cache_resource(vol_type, name='types') - self._notify_volume_type_info( - context, 'volume_type.create', vol_type) - - except exception.VolumeTypeExists as err: - self._notify_volume_type_error( - context, 'volume_type.create', err, volume_type=vol_type) - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.VolumeTypeNotFoundByName as err: - self._notify_volume_type_error( - context, 'volume_type.create', err, name=name) - # Not found exception will be handled at the wsgi level - raise - - return self._view_builder.show(req, vol_type) - - @wsgi.action("update") - def _update(self, req, id, body): - # Update description for a given volume type. - context = req.environ['cinder.context'] - authorize(context) - - self.assert_valid_body(body, 'volume_type') - - vol_type = body['volume_type'] - description = vol_type.get('description') - name = vol_type.get('name') - is_public = vol_type.get('is_public') - - # Name and description can not be both None. - # If name specified, name can not be empty. - if name and len(name.strip()) == 0: - msg = _("Volume type name can not be empty.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if name is None and description is None and is_public is None: - msg = _("Specify volume type name, description, is_public or " - "a combination thereof.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if is_public is not None: - is_public = utils.get_bool_param('is_public', vol_type) - - if name: - utils.check_string_length(name, 'Type name', - min_length=1, max_length=255) - - if description is not None: - utils.check_string_length(description, 'Type description', - min_length=0, max_length=255) - - try: - volume_types.update(context, id, name, description, - is_public=is_public) - # Get the updated - vol_type = volume_types.get_volume_type(context, id) - req.cache_resource(vol_type, name='types') - self._notify_volume_type_info( - context, 'volume_type.update', vol_type) - - except exception.VolumeTypeNotFound as err: - self._notify_volume_type_error( - context, 'volume_type.update', err, id=id) - # Not found exception will be handled at the wsgi level - raise - except exception.VolumeTypeExists as err: - self._notify_volume_type_error( - context, 'volume_type.update', err, volume_type=vol_type) - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.VolumeTypeUpdateFailed as err: - self._notify_volume_type_error( - context, 'volume_type.update', err, volume_type=vol_type) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return self._view_builder.show(req, vol_type) - - @wsgi.action("delete") - def _delete(self, req, id): - """Deletes an existing volume type.""" - context = req.environ['cinder.context'] - authorize(context) - - try: - vol_type = volume_types.get_volume_type(context, id) - volume_types.destroy(context, vol_type['id']) - self._notify_volume_type_info( - context, 'volume_type.delete', vol_type) - except exception.VolumeTypeInUse as err: - self._notify_volume_type_error( - context, 'volume_type.delete', err, volume_type=vol_type) - msg = _('Target volume type is still in use.') - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.VolumeTypeNotFound as err: - self._notify_volume_type_error( - context, 'volume_type.delete', err, id=id) - # Not found exception will be handled at the wsgi level - raise - - return webob.Response(status_int=http_client.ACCEPTED) - - -class Types_manage(extensions.ExtensionDescriptor): - """Types manage support.""" - - name = "TypesManage" - alias = "os-types-manage" - updated = "2011-08-24T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeTypesManageController() - extension = extensions.ControllerExtension(self, 'types', controller) - return [extension] diff --git a/cinder/api/contrib/used_limits.py b/cinder/api/contrib/used_limits.py deleted file mode 100644 index 1c638a282..000000000 --- a/cinder/api/contrib/used_limits.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import quota - -QUOTAS = quota.QUOTAS - -authorize = extensions.soft_extension_authorizer('limits', 'used_limits') - - -class UsedLimitsController(wsgi.Controller): - - @wsgi.extends - def index(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - params = req.params.copy() - req_version = req.api_version_request - - # TODO(wangxiyuan): Support "tenant_id" here to keep the backwards - # compatibility. Remove it once we drop all support for "tenant". - if req_version.matches(None, "3.38") or not context.is_admin: - params.pop('project_id', None) - params.pop('tenant_id', None) - project_id = params.get( - 'project_id', params.get('tenant_id', context.project_id)) - quotas = QUOTAS.get_project_quotas(context, project_id, - usages=True) - - quota_map = { - 'totalVolumesUsed': 'volumes', - 'totalGigabytesUsed': 'gigabytes', - 'totalSnapshotsUsed': 'snapshots', - 'totalBackupsUsed': 'backups', - 'totalBackupGigabytesUsed': 'backup_gigabytes' - } - - used_limits = {} - for display_name, single_quota in quota_map.items(): - if single_quota in quotas: - used_limits[display_name] = quotas[single_quota]['in_use'] - - resp_obj.obj['limits']['absolute'].update(used_limits) - - -class Used_limits(extensions.ExtensionDescriptor): - """Provide data on limited resources that are being used.""" - - name = "UsedLimits" - alias = 'os-used-limits' - updated = "2013-10-03T00:00:00+00:00" - - def get_controller_extensions(self): - controller = UsedLimitsController() - extension = extensions.ControllerExtension(self, 'limits', controller) - return [extension] diff --git a/cinder/api/contrib/volume_actions.py b/cinder/api/contrib/volume_actions.py deleted file mode 100644 index 99b1c540d..000000000 --- a/cinder/api/contrib/volume_actions.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_utils import encodeutils -from oslo_utils import strutils -import six -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import api_version_request -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import keymgr -from cinder import utils -from cinder import volume - - -CONF = cfg.CONF - - -def authorize(context, action_name): - action = 'volume_actions:%s' % action_name - extensions.extension_authorizer('volume', action)(context) - - -class VolumeActionsController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(VolumeActionsController, self).__init__(*args, **kwargs) - self._key_mgr = None - self.volume_api = volume.API() - - @property - def _key_manager(self): - # Allows for lazy initialization of the key manager - if self._key_mgr is None: - self._key_mgr = keymgr.API(CONF) - - return self._key_mgr - - @wsgi.action('os-attach') - def _attach(self, req, id, body): - """Add attachment metadata.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - # instance UUID is an option now - instance_uuid = None - if 'instance_uuid' in body['os-attach']: - instance_uuid = body['os-attach']['instance_uuid'] - host_name = None - # Keep API backward compatibility - if 'host_name' in body['os-attach']: - host_name = body['os-attach']['host_name'] - if 'mountpoint' not in body['os-attach']: - msg = _("Must specify 'mountpoint'") - raise webob.exc.HTTPBadRequest(explanation=msg) - mountpoint = body['os-attach']['mountpoint'] - if 'mode' in body['os-attach']: - mode = body['os-attach']['mode'] - else: - mode = 'rw' - - if instance_uuid is None and host_name is None: - msg = _("Invalid request to attach volume to an invalid target") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if mode not in ('rw', 'ro'): - msg = _("Invalid request to attach volume with an invalid mode. " - "Attaching mode should be 'rw' or 'ro'") - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - self.volume_api.attach(context, volume, - instance_uuid, host_name, mountpoint, mode) - except messaging.RemoteError as error: - if error.exc_type in ['InvalidVolume', 'InvalidUUID', - 'InvalidVolumeAttachMode']: - msg = _("Error attaching volume - %(err_type)s: " - "%(err_msg)s") % { - 'err_type': error.exc_type, 'err_msg': error.value} - raise webob.exc.HTTPBadRequest(explanation=msg) - else: - # There are also few cases where attach call could fail due to - # db or volume driver errors. These errors shouldn't be exposed - # to the user and in such cases it should raise 500 error. - raise - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-detach') - def _detach(self, req, id, body): - """Clear attachment metadata.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - attachment_id = None - if body['os-detach']: - attachment_id = body['os-detach'].get('attachment_id', None) - - try: - self.volume_api.detach(context, volume, attachment_id) - except messaging.RemoteError as error: - if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: - msg = _("Error detaching volume - %(err_type)s: " - "%(err_msg)s") % { - 'err_type': error.exc_type, 'err_msg': error.value} - raise webob.exc.HTTPBadRequest(explanation=msg) - else: - # There are also few cases where detach call could fail due to - # db or volume driver errors. These errors shouldn't be exposed - # to the user and in such cases it should raise 500 error. - raise - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-reserve') - def _reserve(self, req, id, body): - """Mark volume as reserved.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - self.volume_api.reserve_volume(context, volume) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-unreserve') - def _unreserve(self, req, id, body): - """Unmark volume as reserved.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - self.volume_api.unreserve_volume(context, volume) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-begin_detaching') - def _begin_detaching(self, req, id, body): - """Update volume status to 'detaching'.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - self.volume_api.begin_detaching(context, volume) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-roll_detaching') - def _roll_detaching(self, req, id, body): - """Roll back volume status to 'in-use'.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - self.volume_api.roll_detaching(context, volume) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-initialize_connection') - def _initialize_connection(self, req, id, body): - """Initialize volume attachment.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - try: - connector = body['os-initialize_connection']['connector'] - except KeyError: - raise webob.exc.HTTPBadRequest( - explanation=_("Must specify 'connector'")) - try: - info = self.volume_api.initialize_connection(context, - volume, - connector) - except exception.InvalidInput as err: - raise webob.exc.HTTPBadRequest( - explanation=err.msg) - except exception.VolumeBackendAPIException: - msg = _("Unable to fetch connection information from backend.") - raise webob.exc.HTTPInternalServerError(explanation=msg) - except messaging.RemoteError as error: - if error.exc_type == 'InvalidInput': - raise exception.InvalidInput(reason=error.value) - raise - - return {'connection_info': info} - - @wsgi.action('os-terminate_connection') - def _terminate_connection(self, req, id, body): - """Terminate volume attachment.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - try: - connector = body['os-terminate_connection']['connector'] - except KeyError: - raise webob.exc.HTTPBadRequest( - explanation=_("Must specify 'connector'")) - try: - self.volume_api.terminate_connection(context, volume, connector) - except exception.VolumeBackendAPIException: - msg = _("Unable to terminate volume connection from backend.") - raise webob.exc.HTTPInternalServerError(explanation=msg) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.response(http_client.ACCEPTED) - @wsgi.action('os-volume_upload_image') - def _volume_upload_image(self, req, id, body): - """Uploads the specified volume to image service.""" - context = req.environ['cinder.context'] - params = body['os-volume_upload_image'] - req_version = req.api_version_request - if not params.get("image_name"): - msg = _("No image_name was specified in request.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - force = params.get('force', 'False') - try: - force = strutils.bool_from_string(force, strict=True) - except ValueError as error: - err_msg = encodeutils.exception_to_unicode(error) - msg = _("Invalid value for 'force': '%s'") % err_msg - raise webob.exc.HTTPBadRequest(explanation=msg) - - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - authorize(context, "upload_image") - # check for valid disk-format - disk_format = params.get("disk_format", "raw") - if not image_utils.validate_disk_format(disk_format): - msg = _("Invalid disk-format '%(disk_format)s' is specified. " - "Allowed disk-formats are %(allowed_disk_formats)s.") % { - "disk_format": disk_format, - "allowed_disk_formats": ", ".join( - image_utils.VALID_DISK_FORMATS) - } - raise webob.exc.HTTPBadRequest(explanation=msg) - - image_metadata = {"container_format": params.get( - "container_format", "bare"), - "disk_format": disk_format, - "name": params["image_name"]} - - if volume.encryption_key_id: - # Clone volume encryption key: the current key cannot - # be reused because it will be deleted when the volume is - # deleted. - # TODO(eharney): Currently, there is no mechanism to remove - # these keys, because Glance will not delete the key from - # Barbican when the image is deleted. - encryption_key_id = self._key_manager.store( - context, - self._key_manager.get(context, volume.encryption_key_id)) - - image_metadata['cinder_encryption_key_id'] = encryption_key_id - - if req_version >= api_version_request.APIVersionRequest('3.1'): - - image_metadata['visibility'] = params.get('visibility', 'private') - image_metadata['protected'] = params.get('protected', 'False') - - if image_metadata['visibility'] == 'public': - authorize(context, 'upload_public') - - if CONF.glance_api_version != 2: - # Replace visibility with is_public for Glance V1 - image_metadata['is_public'] = ( - image_metadata['visibility'] == 'public') - image_metadata.pop('visibility', None) - - image_metadata['protected'] = ( - utils.get_bool_param('protected', image_metadata)) - - try: - response = self.volume_api.copy_volume_to_image(context, - volume, - image_metadata, - force) - except exception.InvalidVolume as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - except ValueError as error: - raise webob.exc.HTTPBadRequest(explanation=six.text_type(error)) - except messaging.RemoteError as error: - msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, - 'err_msg': error.value} - raise webob.exc.HTTPBadRequest(explanation=msg) - except Exception as error: - raise webob.exc.HTTPBadRequest(explanation=six.text_type(error)) - return {'os-volume_upload_image': response} - - @wsgi.response(http_client.ACCEPTED) - @wsgi.action('os-extend') - def _extend(self, req, id, body): - """Extend size of volume.""" - context = req.environ['cinder.context'] - req_version = req.api_version_request - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - try: - size = int(body['os-extend']['new_size']) - except (KeyError, ValueError, TypeError): - msg = _("New volume size must be specified as an integer.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - try: - if req_version.matches("3.42") and volume.status in ['in-use']: - self.volume_api.extend_attached_volume(context, volume, size) - else: - self.volume_api.extend(context, volume, size) - except exception.InvalidVolume as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - @wsgi.action('os-update_readonly_flag') - def _volume_readonly_update(self, req, id, body): - """Update volume readonly flag.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - try: - readonly_flag = body['os-update_readonly_flag']['readonly'] - except KeyError: - msg = _("Must specify readonly in request.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - try: - readonly_flag = strutils.bool_from_string(readonly_flag, - strict=True) - except ValueError as error: - err_msg = encodeutils.exception_to_unicode(error) - msg = _("Invalid value for 'readonly': '%s'") % err_msg - raise webob.exc.HTTPBadRequest(explanation=msg) - - self.volume_api.update_readonly_flag(context, volume, readonly_flag) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-retype') - def _retype(self, req, id, body): - """Change type of existing volume.""" - context = req.environ['cinder.context'] - volume = self.volume_api.get(context, id) - try: - new_type = body['os-retype']['new_type'] - except KeyError: - msg = _("New volume type must be specified.") - raise webob.exc.HTTPBadRequest(explanation=msg) - policy = body['os-retype'].get('migration_policy') - - self.volume_api.retype(context, volume, new_type, policy) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('os-set_bootable') - def _set_bootable(self, req, id, body): - """Update bootable status of a volume.""" - context = req.environ['cinder.context'] - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - - try: - bootable = body['os-set_bootable']['bootable'] - except KeyError: - msg = _("Must specify bootable in request.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - try: - bootable = strutils.bool_from_string(bootable, - strict=True) - except ValueError as error: - err_msg = encodeutils.exception_to_unicode(error) - msg = _("Invalid value for 'bootable': '%s'") % err_msg - raise webob.exc.HTTPBadRequest(explanation=msg) - - update_dict = {'bootable': bootable} - - self.volume_api.update(context, volume, update_dict) - return webob.Response(status_int=http_client.OK) - - -class Volume_actions(extensions.ExtensionDescriptor): - """Enable volume actions.""" - - name = "VolumeActions" - alias = "os-volume-actions" - updated = "2012-05-31T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeActionsController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/contrib/volume_encryption_metadata.py b/cinder/api/contrib/volume_encryption_metadata.py deleted file mode 100644 index 66f18277a..000000000 --- a/cinder/api/contrib/volume_encryption_metadata.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume encryption metadata extension.""" - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import db - -authorize = extensions.extension_authorizer('volume', - 'volume_encryption_metadata') - - -class VolumeEncryptionMetadataController(wsgi.Controller): - """The volume encryption metadata API extension.""" - - def index(self, req, volume_id): - """Returns the encryption metadata for a given volume.""" - context = req.environ['cinder.context'] - authorize(context) - return db.volume_encryption_metadata_get(context, volume_id) - - def show(self, req, volume_id, id): - """Return a single encryption item.""" - encryption_item = self.index(req, volume_id) - if encryption_item is not None: - return encryption_item[id] - else: - return None - - -class Volume_encryption_metadata(extensions.ExtensionDescriptor): - """Volume encryption metadata retrieval support.""" - - name = "VolumeEncryptionMetadata" - alias = "os-volume-encryption-metadata" - updated = "2013-07-10T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - 'encryption', VolumeEncryptionMetadataController(), - parent=dict(member_name='volume', collection_name='volumes')) - resources.append(res) - return resources diff --git a/cinder/api/contrib/volume_host_attribute.py b/cinder/api/contrib/volume_host_attribute.py deleted file mode 100644 index efe7cfefb..000000000 --- a/cinder/api/contrib/volume_host_attribute.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions -from cinder.api.openstack import wsgi - - -authorize = extensions.soft_extension_authorizer('volume', - 'volume_host_attribute') - - -class VolumeHostAttributeController(wsgi.Controller): - def _add_volume_host_attribute(self, req, resp_volume): - db_volume = req.get_db_volume(resp_volume['id']) - key = "%s:host" % Volume_host_attribute.alias - resp_volume[key] = db_volume['host'] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if authorize(context): - volume = resp_obj.obj['volume'] - self._add_volume_host_attribute(req, volume) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - for vol in list(resp_obj.obj['volumes']): - self._add_volume_host_attribute(req, vol) - - -class Volume_host_attribute(extensions.ExtensionDescriptor): - """Expose host as an attribute of a volume.""" - - name = "VolumeHostAttribute" - alias = "os-vol-host-attr" - updated = "2011-11-03T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeHostAttributeController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/contrib/volume_image_metadata.py b/cinder/api/contrib/volume_image_metadata.py deleted file mode 100644 index db613746f..000000000 --- a/cinder/api/contrib/volume_image_metadata.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Volume Image Metadata API extension.""" -from six.moves import http_client -import webob - -from oslo_log import log as logging - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder import volume - - -LOG = logging.getLogger(__name__) - -authorize = extensions.soft_extension_authorizer('volume', - 'volume_image_metadata') - - -class VolumeImageMetadataController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(VolumeImageMetadataController, self).__init__(*args, **kwargs) - self.volume_api = volume.API() - - def _get_image_metadata(self, context, volume_id): - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, volume_id) - meta = self.volume_api.get_volume_image_metadata(context, volume) - return (volume, meta) - - def _add_image_metadata(self, context, resp_volume_list, image_metas=None): - """Appends the image metadata to each of the given volume. - - :param context: the request context - :param resp_volume_list: the response volume list - :param image_metas: The image metadata to append, if None is provided - it will be retrieved from the database. An empty - dict means there is no metadata and it should not - be retrieved from the db. - """ - vol_id_list = [] - for vol in resp_volume_list: - vol_id_list.append(vol['id']) - if image_metas is None: - try: - image_metas = self.volume_api.get_list_volumes_image_metadata( - context, vol_id_list) - except Exception as e: - LOG.debug('Get image metadata error: %s', e) - return - if image_metas: - for vol in resp_volume_list: - image_meta = image_metas.get(vol['id']) - if image_meta: - vol['volume_image_metadata'] = dict(image_meta) - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if authorize(context): - self._add_image_metadata(context, [resp_obj.obj['volume']]) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - # Just get the image metadata of those volumes in response. - volumes = list(resp_obj.obj.get('volumes', [])) - if volumes: - self._add_image_metadata(context, volumes) - - @wsgi.action("os-set_image_metadata") - def create(self, req, id, body): - context = req.environ['cinder.context'] - if authorize(context): - try: - metadata = body['os-set_image_metadata']['metadata'] - except (KeyError, TypeError): - msg = _("Malformed request body.") - raise webob.exc.HTTPBadRequest(explanation=msg) - new_metadata = self._update_volume_image_metadata(context, - id, - metadata, - delete=False) - - return {'metadata': new_metadata} - - def _update_volume_image_metadata(self, context, - volume_id, - metadata, - delete=False): - try: - volume = self.volume_api.get(context, volume_id) - return self.volume_api.update_volume_metadata( - context, - volume, - metadata, - delete=False, - meta_type=common.METADATA_TYPES.image) - # Not found exception will be handled at the wsgi level - except (ValueError, AttributeError): - msg = _("Malformed request body.") - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.InvalidVolumeMetadata as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - except exception.InvalidVolumeMetadataSize as error: - raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) - - @wsgi.action("os-show_image_metadata") - def index(self, req, id, body): - context = req.environ['cinder.context'] - return {'metadata': self._get_image_metadata(context, id)[1]} - - @wsgi.action("os-unset_image_metadata") - def delete(self, req, id, body): - """Deletes an existing image metadata.""" - context = req.environ['cinder.context'] - if authorize(context): - try: - key = body['os-unset_image_metadata']['key'] - except (KeyError, TypeError): - msg = _("Malformed request body.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if key: - vol, metadata = self._get_image_metadata(context, id) - if key not in metadata: - raise exception.GlanceMetadataNotFound(id=id) - - self.volume_api.delete_volume_metadata( - context, vol, key, - meta_type=common.METADATA_TYPES.image) - else: - msg = _("The key cannot be None.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return webob.Response(status_int=http_client.OK) - - -class Volume_image_metadata(extensions.ExtensionDescriptor): - """Show image metadata associated with the volume.""" - - name = "VolumeImageMetadata" - alias = "os-vol-image-meta" - updated = "2012-12-07T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeImageMetadataController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/contrib/volume_manage.py b/cinder/api/contrib/volume_manage.py deleted file mode 100644 index 99ff03778..000000000 --- a/cinder/api/contrib/volume_manage.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client - -from cinder.api import common -from cinder.api.contrib import resource_common_manage -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.v2.views import volumes as volume_views -from cinder.api.views import manageable_volumes as list_manageable_view -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder import volume as cinder_volume -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) -authorize_manage = extensions.extension_authorizer('volume', 'volume_manage') -authorize_list_manageable = extensions.extension_authorizer('volume', - 'list_manageable') - - -class VolumeManageController(wsgi.Controller): - """The /os-volume-manage controller for the OpenStack API.""" - - _view_builder_class = volume_views.ViewBuilder - - def __init__(self, *args, **kwargs): - super(VolumeManageController, self).__init__(*args, **kwargs) - self.volume_api = cinder_volume.API() - self._list_manageable_view = list_manageable_view.ViewBuilder() - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Instruct Cinder to manage a storage object. - - Manages an existing backend storage object (e.g. a Linux logical - volume or a SAN disk) by creating the Cinder objects required to manage - it, and possibly renaming the backend storage object - (driver dependent) - - From an API perspective, this operation behaves very much like a - volume creation operation, except that properties such as image, - snapshot and volume references don't make sense, because we are taking - an existing storage object into Cinder management. - - Required HTTP Body: - - .. code-block:: json - - { - "volume": { - "host": "", - "cluster": "", - "ref": "" - } - } - - See the appropriate Cinder drivers' implementations of the - manage_volume method to find out the accepted format of 'ref'. - - This API call will return with an error if any of the above elements - are missing from the request, or if the 'host' element refers to a - cinder host that is not registered. - - The volume will later enter the error state if it is discovered that - 'ref' is bad. - - Optional elements to 'volume' are:: - - name A name for the new volume. - description A description for the new volume. - volume_type ID or name of a volume type to associate with - the new Cinder volume. Does not necessarily - guarantee that the managed volume will have the - properties described in the volume_type. The - driver may choose to fail if it identifies that - the specified volume_type is not compatible with - the backend storage object. - metadata Key/value pairs to be associated with the new - volume. - availability_zone The availability zone to associate with the new - volume. - bootable If set to True, marks the volume as bootable. - - """ - context = req.environ['cinder.context'] - authorize_manage(context) - - self.assert_valid_body(body, 'volume') - - volume = body['volume'] - self.validate_name_and_description(volume) - - # Check that the required keys are present, return an error if they - # are not. - if 'ref' not in volume: - raise exception.MissingRequired(element='ref') - - cluster_name, host = common.get_cluster_host(req, volume, '3.16') - - LOG.debug('Manage volume request body: %s', body) - - kwargs = {} - req_volume_type = volume.get('volume_type', None) - if req_volume_type: - try: - kwargs['volume_type'] = volume_types.get_by_name_or_id( - context, req_volume_type) - except exception.VolumeTypeNotFound: - msg = _("Cannot find requested '%s' " - "volume type") % req_volume_type - raise exception.InvalidVolumeType(reason=msg) - else: - kwargs['volume_type'] = {} - - kwargs['name'] = volume.get('name', None) - kwargs['description'] = volume.get('description', None) - kwargs['metadata'] = volume.get('metadata', None) - kwargs['availability_zone'] = volume.get('availability_zone', None) - kwargs['bootable'] = utils.get_bool_param('bootable', volume) - - utils.check_metadata_properties(kwargs['metadata']) - - try: - new_volume = self.volume_api.manage_existing(context, - host, - cluster_name, - volume['ref'], - **kwargs) - except exception.ServiceNotFound: - msg = _("Host '%s' not found") % volume['host'] - raise exception.ServiceUnavailable(message=msg) - - utils.add_visible_admin_metadata(new_volume) - - return self._view_builder.detail(req, new_volume) - - @wsgi.extends - def index(self, req): - """Returns a summary list of volumes available to manage.""" - context = req.environ['cinder.context'] - authorize_list_manageable(context) - return resource_common_manage.get_manageable_resources( - req, False, self.volume_api.get_manageable_volumes, - self._list_manageable_view) - - @wsgi.extends - def detail(self, req): - """Returns a detailed list of volumes available to manage.""" - context = req.environ['cinder.context'] - authorize_list_manageable(context) - return resource_common_manage.get_manageable_resources( - req, True, self.volume_api.get_manageable_volumes, - self._list_manageable_view) - - -class Volume_manage(extensions.ExtensionDescriptor): - """Allows existing backend storage to be 'managed' by Cinder.""" - - name = 'VolumeManage' - alias = 'os-volume-manage' - updated = '2014-02-10T00:00:00+00:00' - - def get_resources(self): - controller = VolumeManageController() - res = extensions.ResourceExtension(Volume_manage.alias, - controller, - collection_actions= - {'detail': 'GET'}) - return [res] diff --git a/cinder/api/contrib/volume_mig_status_attribute.py b/cinder/api/contrib/volume_mig_status_attribute.py deleted file mode 100644 index ce2e6d1fa..000000000 --- a/cinder/api/contrib/volume_mig_status_attribute.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions -from cinder.api.openstack import wsgi - -authorize = extensions.soft_extension_authorizer('volume', - 'volume_mig_status_attribute') - - -class VolumeMigStatusAttributeController(wsgi.Controller): - def _add_volume_mig_status_attribute(self, req, resp_volume): - db_volume = req.get_db_volume(resp_volume['id']) - key = "%s:migstat" % Volume_mig_status_attribute.alias - resp_volume[key] = db_volume['migration_status'] - key = "%s:name_id" % Volume_mig_status_attribute.alias - resp_volume[key] = db_volume['_name_id'] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if authorize(context): - self._add_volume_mig_status_attribute(req, resp_obj.obj['volume']) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - for vol in list(resp_obj.obj['volumes']): - self._add_volume_mig_status_attribute(req, vol) - - -class Volume_mig_status_attribute(extensions.ExtensionDescriptor): - """Expose migration_status as an attribute of a volume.""" - - name = "VolumeMigStatusAttribute" - alias = "os-vol-mig-status-attr" - updated = "2013-08-08T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeMigStatusAttributeController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/contrib/volume_tenant_attribute.py b/cinder/api/contrib/volume_tenant_attribute.py deleted file mode 100644 index 263704abc..000000000 --- a/cinder/api/contrib/volume_tenant_attribute.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import extensions -from cinder.api.openstack import wsgi - - -authorize = extensions.soft_extension_authorizer('volume', - 'volume_tenant_attribute') - - -class VolumeTenantAttributeController(wsgi.Controller): - def _add_volume_tenant_attribute(self, req, resp_volume): - db_volume = req.get_db_volume(resp_volume['id']) - key = "%s:tenant_id" % Volume_tenant_attribute.alias - resp_volume[key] = db_volume['project_id'] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if authorize(context): - volume = resp_obj.obj['volume'] - self._add_volume_tenant_attribute(req, volume) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if authorize(context): - for vol in list(resp_obj.obj['volumes']): - self._add_volume_tenant_attribute(req, vol) - - -class Volume_tenant_attribute(extensions.ExtensionDescriptor): - """Expose the internal project_id as an attribute of a volume.""" - - name = "VolumeTenantAttribute" - alias = "os-vol-tenant-attr" - updated = "2011-11-03T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeTenantAttributeController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/contrib/volume_transfer.py b/cinder/api/contrib/volume_transfer.py deleted file mode 100644 index e55c42a01..000000000 --- a/cinder/api/contrib/volume_transfer.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder.api.views import transfers as transfer_view -from cinder import exception -from cinder.i18n import _ -from cinder import transfer as transferAPI - -LOG = logging.getLogger(__name__) - - -class VolumeTransferController(wsgi.Controller): - """The Volume Transfer API controller for the OpenStack API.""" - - _view_builder_class = transfer_view.ViewBuilder - - def __init__(self): - self.transfer_api = transferAPI.API() - super(VolumeTransferController, self).__init__() - - def show(self, req, id): - """Return data about active transfers.""" - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - transfer = self.transfer_api.get(context, transfer_id=id) - - return self._view_builder.detail(req, transfer) - - def index(self, req): - """Returns a summary list of transfers.""" - return self._get_transfers(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of transfers.""" - return self._get_transfers(req, is_detail=True) - - def _get_transfers(self, req, is_detail): - """Returns a list of transfers, transformed through view builder.""" - context = req.environ['cinder.context'] - filters = req.params.copy() - LOG.debug('Listing volume transfers') - transfers = self.transfer_api.get_all(context, filters=filters) - transfer_count = len(transfers) - limited_list = common.limited(transfers, req) - - if is_detail: - transfers = self._view_builder.detail_list(req, limited_list, - transfer_count) - else: - transfers = self._view_builder.summary_list(req, limited_list, - transfer_count) - - return transfers - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new volume transfer.""" - LOG.debug('Creating new volume transfer %s', body) - self.assert_valid_body(body, 'transfer') - - context = req.environ['cinder.context'] - transfer = body['transfer'] - - try: - volume_id = transfer['volume_id'] - except KeyError: - msg = _("Incorrect request body format") - raise exc.HTTPBadRequest(explanation=msg) - - name = transfer.get('name', None) - if name is not None: - self.validate_string_length(name, 'Transfer name', - min_length=1, max_length=255, - remove_whitespaces=True) - name = name.strip() - - LOG.info("Creating transfer of volume %s", - volume_id) - - try: - new_transfer = self.transfer_api.create(context, volume_id, name) - # Not found exception will be handled at the wsgi level - except exception.InvalidVolume as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - transfer = self._view_builder.create(req, - dict(new_transfer)) - return transfer - - @wsgi.response(http_client.ACCEPTED) - def accept(self, req, id, body): - """Accept a new volume transfer.""" - transfer_id = id - LOG.debug('Accepting volume transfer %s', transfer_id) - self.assert_valid_body(body, 'accept') - - context = req.environ['cinder.context'] - accept = body['accept'] - - try: - auth_key = accept['auth_key'] - except KeyError: - msg = _("Incorrect request body format") - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info("Accepting transfer %s", transfer_id) - - try: - accepted_transfer = self.transfer_api.accept(context, transfer_id, - auth_key) - except exception.VolumeSizeExceedsAvailableQuota as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.msg, headers={'Retry-After': '0'}) - except exception.InvalidVolume as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - transfer = \ - self._view_builder.summary(req, - dict(accepted_transfer)) - return transfer - - def delete(self, req, id): - """Delete a transfer.""" - context = req.environ['cinder.context'] - - LOG.info("Delete transfer with id: %s", id) - - # Not found exception will be handled at the wsgi level - self.transfer_api.delete(context, transfer_id=id) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Volume_transfer(extensions.ExtensionDescriptor): - """Volume transfer management support.""" - - name = "VolumeTransfer" - alias = "os-volume-transfer" - updated = "2013-05-29T00:00:00+00:00" - - def get_resources(self): - resources = [] - - res = extensions.ResourceExtension(Volume_transfer.alias, - VolumeTransferController(), - collection_actions={'detail': - 'GET'}, - member_actions={'accept': 'POST'}) - resources.append(res) - return resources diff --git a/cinder/api/contrib/volume_type_access.py b/cinder/api/contrib/volume_type_access.py deleted file mode 100644 index d778eb3ec..000000000 --- a/cinder/api/contrib/volume_type_access.py +++ /dev/null @@ -1,155 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume type access extension.""" - -from oslo_utils import uuidutils -import six -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder.volume import volume_types - - -soft_authorize = extensions.soft_extension_authorizer('volume', - 'volume_type_access') -authorize = extensions.extension_authorizer('volume', 'volume_type_access') - - -def _marshall_volume_type_access(vol_type): - rval = [] - for project_id in vol_type['projects']: - rval.append({'volume_type_id': vol_type['id'], - 'project_id': project_id}) - - return {'volume_type_access': rval} - - -class VolumeTypeAccessController(object): - """The volume type access API controller for the OpenStack API.""" - - def index(self, req, type_id): - context = req.environ['cinder.context'] - authorize(context) - - # Not found exception will be handled at the wsgi level - vol_type = volume_types.get_volume_type( - context, type_id, expected_fields=['projects']) - - if vol_type['is_public']: - expl = _("Access list not available for public volume types.") - raise exception.VolumeTypeAccessNotFound(message=expl) - - return _marshall_volume_type_access(vol_type) - - -class VolumeTypeActionController(wsgi.Controller): - """The volume type access API controller for the OpenStack API.""" - - def _check_body(self, body, action_name): - self.assert_valid_body(body, action_name) - access = body[action_name] - project = access.get('project') - if not uuidutils.is_uuid_like(project): - msg = _("Bad project format: " - "project is not in proper format (%s)") % project - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _extend_vol_type(self, vol_type_rval, vol_type_ref): - if vol_type_ref: - key = "%s:is_public" % (Volume_type_access.alias) - vol_type_rval[key] = vol_type_ref.get('is_public', True) - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['cinder.context'] - if soft_authorize(context): - vol_type = req.cached_resource_by_id(id, name='types') - self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) - - @wsgi.extends - def index(self, req, resp_obj): - context = req.environ['cinder.context'] - if soft_authorize(context): - for vol_type_rval in list(resp_obj.obj['volume_types']): - type_id = vol_type_rval['id'] - vol_type = req.cached_resource_by_id(type_id, name='types') - self._extend_vol_type(vol_type_rval, vol_type) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['cinder.context'] - if soft_authorize(context): - for vol_type_rval in list(resp_obj.obj['volume_types']): - type_id = vol_type_rval['id'] - vol_type = req.cached_resource_by_id(type_id, name='types') - self._extend_vol_type(vol_type_rval, vol_type) - - @wsgi.extends(action='create') - def create(self, req, body, resp_obj): - context = req.environ['cinder.context'] - if soft_authorize(context): - type_id = resp_obj.obj['volume_type']['id'] - vol_type = req.cached_resource_by_id(type_id, name='types') - self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) - - @wsgi.action('addProjectAccess') - def _addProjectAccess(self, req, id, body): - context = req.environ['cinder.context'] - authorize(context, action="addProjectAccess") - self._check_body(body, 'addProjectAccess') - project = body['addProjectAccess']['project'] - - try: - volume_types.add_volume_type_access(context, id, project) - # Not found exception will be handled at the wsgi level - except exception.VolumeTypeAccessExists as err: - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.action('removeProjectAccess') - def _removeProjectAccess(self, req, id, body): - context = req.environ['cinder.context'] - authorize(context, action="removeProjectAccess") - self._check_body(body, 'removeProjectAccess') - project = body['removeProjectAccess']['project'] - - # Not found exception will be handled at the wsgi level - volume_types.remove_volume_type_access(context, id, project) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Volume_type_access(extensions.ExtensionDescriptor): - """Volume type access support.""" - - name = "VolumeTypeAccess" - alias = "os-volume-type-access" - updated = "2014-06-26T00:00:00Z" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Volume_type_access.alias, - VolumeTypeAccessController(), - parent=dict(member_name='type', collection_name='types')) - resources.append(res) - return resources - - def get_controller_extensions(self): - controller = VolumeTypeActionController() - extension = extensions.ControllerExtension(self, 'types', controller) - return [extension] diff --git a/cinder/api/contrib/volume_type_encryption.py b/cinder/api/contrib/volume_type_encryption.py deleted file mode 100644 index 4cc4e7020..000000000 --- a/cinder/api/contrib/volume_type_encryption.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume types encryption extension.""" - -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import rpc -from cinder import utils -from cinder.volume import volume_types - -authorize = extensions.extension_authorizer('volume', - 'volume_type_encryption') - -CONTROL_LOCATION = ['front-end', 'back-end'] - - -class VolumeTypeEncryptionController(wsgi.Controller): - """The volume type encryption API controller for the OpenStack API.""" - - def _get_volume_type_encryption(self, context, type_id): - encryption_ref = db.volume_type_encryption_get(context, type_id) - encryption_specs = {} - if not encryption_ref: - return encryption_specs - for key, value in encryption_ref.items(): - encryption_specs[key] = value - return encryption_specs - - def _check_type(self, context, type_id): - # Not found exception will be handled at the wsgi level - volume_types.get_volume_type(context, type_id) - - def _check_encryption_input(self, encryption, create=True): - if encryption.get('key_size') is not None: - encryption['key_size'] = utils.validate_integer( - encryption['key_size'], 'key_size', - min_value=0, max_value=db.MAX_INT) - - if create: - msg = None - if 'provider' not in encryption.keys(): - msg = _('provider must be defined') - elif 'control_location' not in encryption.keys(): - msg = _('control_location must be defined') - - if msg is not None: - raise exception.InvalidInput(reason=msg) - - # Check control location - if 'control_location' in encryption.keys(): - if encryption['control_location'] not in CONTROL_LOCATION: - msg = _("Valid control location are: %s") % CONTROL_LOCATION - raise exception.InvalidInput(reason=msg) - - def _encrypted_type_in_use(self, context, volume_type_id): - volume_list = db.volume_type_encryption_volume_get(context, - volume_type_id) - # If there is at least one volume in the list - # returned, this type is in use by a volume. - if len(volume_list) > 0: - return True - else: - return False - - def index(self, req, type_id): - """Returns the encryption specs for a given volume type.""" - context = req.environ['cinder.context'] - authorize(context) - self._check_type(context, type_id) - return self._get_volume_type_encryption(context, type_id) - - def create(self, req, type_id, body=None): - """Create encryption specs for an existing volume type.""" - context = req.environ['cinder.context'] - authorize(context) - - if self._encrypted_type_in_use(context, type_id): - expl = _('Cannot create encryption specs. Volume type in use.') - raise webob.exc.HTTPBadRequest(explanation=expl) - - self.assert_valid_body(body, 'encryption') - - self._check_type(context, type_id) - - encryption_specs = self._get_volume_type_encryption(context, type_id) - if encryption_specs: - raise exception.VolumeTypeEncryptionExists(type_id=type_id) - - encryption_specs = body['encryption'] - - self._check_encryption_input(encryption_specs) - - db.volume_type_encryption_create(context, type_id, encryption_specs) - notifier_info = dict(type_id=type_id, specs=encryption_specs) - notifier = rpc.get_notifier('volumeTypeEncryption') - notifier.info(context, 'volume_type_encryption.create', notifier_info) - return body - - def update(self, req, type_id, id, body=None): - """Update encryption specs for a given volume type.""" - context = req.environ['cinder.context'] - authorize(context) - - self.assert_valid_body(body, 'encryption') - - if len(body) > 1: - expl = _('Request body contains too many items.') - raise webob.exc.HTTPBadRequest(explanation=expl) - - self._check_type(context, type_id) - - if self._encrypted_type_in_use(context, type_id): - expl = _('Cannot update encryption specs. Volume type in use.') - raise webob.exc.HTTPBadRequest(explanation=expl) - - encryption_specs = body['encryption'] - self._check_encryption_input(encryption_specs, create=False) - - db.volume_type_encryption_update(context, type_id, encryption_specs) - notifier_info = dict(type_id=type_id, id=id) - notifier = rpc.get_notifier('volumeTypeEncryption') - notifier.info(context, 'volume_type_encryption.update', notifier_info) - - return body - - def show(self, req, type_id, id): - """Return a single encryption item.""" - context = req.environ['cinder.context'] - authorize(context) - - self._check_type(context, type_id) - - encryption_specs = self._get_volume_type_encryption(context, type_id) - - if id not in encryption_specs: - raise exception.VolumeTypeEncryptionNotFound(type_id=type_id) - - return {id: encryption_specs[id]} - - def delete(self, req, type_id, id): - """Delete encryption specs for a given volume type.""" - context = req.environ['cinder.context'] - authorize(context) - - if self._encrypted_type_in_use(context, type_id): - expl = _('Cannot delete encryption specs. Volume type in use.') - raise webob.exc.HTTPBadRequest(explanation=expl) - else: - # Not found exception will be handled at the wsgi level - db.volume_type_encryption_delete(context, type_id) - - return webob.Response(status_int=http_client.ACCEPTED) - - -class Volume_type_encryption(extensions.ExtensionDescriptor): - """Encryption support for volume types.""" - - name = "VolumeTypeEncryption" - alias = "encryption" - updated = "2013-07-01T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - Volume_type_encryption.alias, - VolumeTypeEncryptionController(), - parent=dict(member_name='type', collection_name='types')) - resources.append(res) - return resources - - def get_controller_extensions(self): - controller = VolumeTypeEncryptionController() - extension = extensions.ControllerExtension(self, 'types', controller) - return [extension] diff --git a/cinder/api/contrib/volume_unmanage.py b/cinder/api/contrib/volume_unmanage.py deleted file mode 100644 index c16651527..000000000 --- a/cinder/api/contrib/volume_unmanage.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import volume - -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('volume', 'volume_unmanage') - - -class VolumeUnmanageController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(VolumeUnmanageController, self).__init__(*args, **kwargs) - self.volume_api = volume.API() - - @wsgi.response(http_client.ACCEPTED) - @wsgi.action('os-unmanage') - def unmanage(self, req, id, body): - """Stop managing a volume. - - This action is very much like a delete, except that a different - method (unmanage) is called on the Cinder driver. This has the effect - of removing the volume from Cinder management without actually - removing the backend storage object associated with it. - - There are no required parameters. - - A Not Found error is returned if the specified volume does not exist. - - A Bad Request error is returned if the specified volume is still - attached to an instance. - """ - context = req.environ['cinder.context'] - authorize(context) - - LOG.info("Unmanage volume with id: %s", id) - - # Not found exception will be handled at the wsgi level - vol = self.volume_api.get(context, id) - self.volume_api.delete(context, vol, unmanage_only=True) - return webob.Response(status_int=http_client.ACCEPTED) - - -class Volume_unmanage(extensions.ExtensionDescriptor): - """Enable volume unmanage operation.""" - - name = "VolumeUnmanage" - alias = "os-volume-unmanage" - updated = "2012-05-31T00:00:00+00:00" - - def get_controller_extensions(self): - controller = VolumeUnmanageController() - extension = extensions.ControllerExtension(self, 'volumes', controller) - return [extension] diff --git a/cinder/api/extensions.py b/cinder/api/extensions.py deleted file mode 100644 index 3733bc301..000000000 --- a/cinder/api/extensions.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -import webob.dec -import webob.exc - -import cinder.api.openstack -from cinder.api.openstack import wsgi -from cinder import exception -import cinder.policy - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) -FILES_TO_SKIP = ['resource_common_manage.py'] - - -class ExtensionDescriptor(object): - """Base class that defines the contract for extensions. - - Note that you don't have to derive from this class to have a valid - extension; it is purely a convenience. - - """ - - # The name of the extension, e.g., 'Fox In Socks' - name = None - - # The alias for the extension, e.g., 'FOXNSOX' - alias = None - - # The timestamp when the extension was last updated, e.g., - # '2011-01-22T13:25:27-06:00' - updated = None - - def __init__(self, ext_mgr): - """Register extension with the extension manager.""" - - ext_mgr.register(self) - self.ext_mgr = ext_mgr - - def get_resources(self): - """List of extensions.ResourceExtension extension objects. - - Resources define new nouns, and are accessible through URLs. - - """ - resources = [] - return resources - - def get_controller_extensions(self): - """List of extensions.ControllerExtension extension objects. - - Controller extensions are used to extend existing controllers. - """ - controller_exts = [] - return controller_exts - - -class ExtensionsResource(wsgi.Resource): - - def __init__(self, extension_manager): - self.extension_manager = extension_manager - super(ExtensionsResource, self).__init__(None) - - def _translate(self, ext): - ext_data = {} - ext_data['name'] = ext.name - ext_data['alias'] = ext.alias - ext_data['description'] = ext.__doc__ - ext_data['updated'] = ext.updated - ext_data['links'] = [] # TODO(dprince): implement extension links - return ext_data - - def index(self, req): - extensions = [] - for _alias, ext in self.extension_manager.extensions.items(): - extensions.append(self._translate(ext)) - return dict(extensions=extensions) - - def show(self, req, id): - try: - # NOTE(dprince): the extensions alias is used as the 'id' for show - ext = self.extension_manager.extensions[id] - except KeyError: - raise webob.exc.HTTPNotFound() - - return dict(extension=self._translate(ext)) - - def delete(self, req, id): - raise webob.exc.HTTPNotFound() - - def create(self, req): - raise webob.exc.HTTPNotFound() - - -class ExtensionManager(object): - """Load extensions from the configured extension path. - - See cinder/tests/api/extensions/foxinsocks/extension.py for an - example extension implementation. - - """ - - def __init__(self): - LOG.info('Initializing extension manager.') - - self.cls_list = CONF.osapi_volume_extension - self.extensions = {} - self._load_extensions() - - def is_loaded(self, alias): - return alias in self.extensions - - def register(self, ext): - # Do nothing if the extension doesn't check out - if not self._check_extension(ext): - return - - alias = ext.alias - LOG.info('Loaded extension: %s', alias) - - if alias in self.extensions: - raise exception.Error("Found duplicate extension: %s" % alias) - self.extensions[alias] = ext - - def get_resources(self): - """Returns a list of ResourceExtension objects.""" - - resources = [] - resources.append(ResourceExtension('extensions', - ExtensionsResource(self))) - - for ext in self.extensions.values(): - try: - resources.extend(ext.get_resources()) - except AttributeError: - # NOTE(dprince): Extension aren't required to have resource - # extensions - pass - return resources - - def get_controller_extensions(self): - """Returns a list of ControllerExtension objects.""" - controller_exts = [] - for ext in self.extensions.values(): - try: - get_ext_method = ext.get_controller_extensions - except AttributeError: - # NOTE(Vek): Extensions aren't required to have - # controller extensions - continue - controller_exts.extend(get_ext_method()) - return controller_exts - - def _check_extension(self, extension): - """Checks for required methods in extension objects.""" - try: - LOG.debug('Ext name: %s', extension.name) - LOG.debug('Ext alias: %s', extension.alias) - LOG.debug('Ext description: %s', - ' '.join(extension.__doc__.strip().split())) - LOG.debug('Ext updated: %s', extension.updated) - except AttributeError: - LOG.exception("Exception loading extension.") - return False - - return True - - def load_extension(self, ext_factory): - """Execute an extension factory. - - Loads an extension. The 'ext_factory' is the name of a - callable that will be imported and called with one - argument--the extension manager. The factory callable is - expected to call the register() method at least once. - """ - - LOG.debug("Loading extension %s", ext_factory) - - # Load the factory - factory = importutils.import_class(ext_factory) - - # Call it - LOG.debug("Calling extension factory %s", ext_factory) - factory(self) - - def _load_extensions(self): - """Load extensions specified on the command line.""" - - extensions = list(self.cls_list) - - for ext_factory in extensions: - try: - self.load_extension(ext_factory) - except Exception as exc: - LOG.warning('Failed to load extension %(ext_factory)s: ' - '%(exc)s', - {'ext_factory': ext_factory, 'exc': exc}) - - -class ControllerExtension(object): - """Extend core controllers of cinder OpenStack API. - - Provide a way to extend existing cinder OpenStack API core - controllers. - """ - - def __init__(self, extension, collection, controller): - self.extension = extension - self.collection = collection - self.controller = controller - - -class ResourceExtension(object): - """Add top level resources to the OpenStack API in cinder.""" - - def __init__(self, collection, controller, parent=None, - collection_actions=None, member_actions=None, - custom_routes_fn=None): - if not collection_actions: - collection_actions = {} - if not member_actions: - member_actions = {} - self.collection = collection - self.controller = controller - self.parent = parent - self.collection_actions = collection_actions - self.member_actions = member_actions - self.custom_routes_fn = custom_routes_fn - - -def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): - """Registers all standard API extensions.""" - - # Walk through all the modules in our directory... - our_dir = path[0] - for dirpath, dirnames, filenames in os.walk(our_dir): - # Compute the relative package name from the dirpath - relpath = os.path.relpath(dirpath, our_dir) - if relpath == '.': - relpkg = '' - else: - relpkg = '.%s' % '.'.join(relpath.split(os.sep)) - - # Now, consider each file in turn, only considering .py and .pyc files - for fname in filenames: - root, ext = os.path.splitext(fname) - - # Skip __init__ and anything that's not .py and .pyc - if ((ext not in ('.py', '.pyc')) or root == '__init__' or - fname in FILES_TO_SKIP): - continue - - # If .pyc and .py both exist, skip .pyc - if ext == '.pyc' and ((root + '.py') in filenames): - continue - - # Try loading it - classname = "%s%s" % (root[0].upper(), root[1:]) - classpath = ("%s%s.%s.%s" % - (package, relpkg, root, classname)) - - if ext_list is not None and classname not in ext_list: - logger.debug("Skipping extension: %s" % classpath) - continue - - try: - ext_mgr.load_extension(classpath) - except Exception as exc: - logger.warning('Failed to load extension %(classpath)s: ' - '%(exc)s', - {'classpath': classpath, 'exc': exc}) - - # Now, let's consider any subdirectories we may have... - subdirs = [] - for dname in dirnames: - # Skip it if it does not have __init__.py - if not os.path.exists(os.path.join(dirpath, dname, - '__init__.py')): - continue - - # If it has extension(), delegate... - ext_name = ("%s%s.%s.extension" % - (package, relpkg, dname)) - try: - ext = importutils.import_class(ext_name) - except ImportError: - # extension() doesn't exist on it, so we'll explore - # the directory for ourselves - subdirs.append(dname) - else: - try: - ext(ext_mgr) - except Exception as exc: - logger.warning('Failed to load extension ' - '%(ext_name)s: %(exc)s', - {'ext_name': ext_name, 'exc': exc}) - - # Update the list of directories we'll explore... - dirnames[:] = subdirs - - -def extension_authorizer(api_name, extension_name): - def authorize(context, target=None, action=None): - if target is None: - target = {'project_id': context.project_id, - 'user_id': context.user_id} - if action is None: - act = '%s_extension:%s' % (api_name, extension_name) - else: - act = '%s_extension:%s:%s' % (api_name, extension_name, action) - cinder.policy.enforce(context, act, target) - return authorize - - -def soft_extension_authorizer(api_name, extension_name): - hard_authorize = extension_authorizer(api_name, extension_name) - - def authorize(context): - try: - hard_authorize(context) - return True - except exception.NotAuthorized: - return False - return authorize diff --git a/cinder/api/middleware/__init__.py b/cinder/api/middleware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/middleware/auth.py b/cinder/api/middleware/auth.py deleted file mode 100644 index 8d145701d..000000000 --- a/cinder/api/middleware/auth.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Common Auth Middleware. - -""" - - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_middleware import request_id -from oslo_serialization import jsonutils -from six.moves import http_client -import webob.dec -import webob.exc - -from cinder.api.openstack import wsgi -from cinder import context -from cinder.i18n import _ -from cinder.wsgi import common as base_wsgi - - -use_forwarded_for_opt = cfg.BoolOpt( - 'use_forwarded_for', - default=False, - help='Treat X-Forwarded-For as the canonical remote address. ' - 'Only enable this if you have a sanitizing proxy.') - -CONF = cfg.CONF -CONF.register_opt(use_forwarded_for_opt) - -LOG = logging.getLogger(__name__) - - -def pipeline_factory(loader, global_conf, **local_conf): - """A paste pipeline replica that keys off of auth_strategy.""" - pipeline = local_conf[CONF.auth_strategy] - if not CONF.api_rate_limit: - limit_name = CONF.auth_strategy + '_nolimit' - pipeline = local_conf.get(limit_name, pipeline) - pipeline = pipeline.split() - filters = [loader.get_filter(n) for n in pipeline[:-1]] - app = loader.get_app(pipeline[-1]) - filters.reverse() - for filter in filters: - app = filter(app) - return app - - -class InjectContext(base_wsgi.Middleware): - """Add a 'cinder.context' to WSGI environ.""" - - def __init__(self, context, *args, **kwargs): - self.context = context - super(InjectContext, self).__init__(*args, **kwargs) - - @webob.dec.wsgify(RequestClass=base_wsgi.Request) - def __call__(self, req): - req.environ['cinder.context'] = self.context - return self.application - - -class CinderKeystoneContext(base_wsgi.Middleware): - """Make a request context from keystone headers.""" - - @webob.dec.wsgify(RequestClass=base_wsgi.Request) - def __call__(self, req): - - # NOTE(jamielennox): from_environ handles these in newer versions - project_name = req.headers.get('X_TENANT_NAME') - req_id = req.environ.get(request_id.ENV_REQUEST_ID) - - # Build a context, including the auth_token... - remote_address = req.remote_addr - - service_catalog = None - if req.headers.get('X_SERVICE_CATALOG') is not None: - try: - catalog_header = req.headers.get('X_SERVICE_CATALOG') - service_catalog = jsonutils.loads(catalog_header) - except ValueError: - raise webob.exc.HTTPInternalServerError( - explanation=_('Invalid service catalog json.')) - - if CONF.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - - ctx = context.RequestContext.from_environ( - req.environ, - request_id=req_id, - remote_address=remote_address, - project_name=project_name, - service_catalog=service_catalog) - - if ctx.user_id is None: - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - - req.environ['cinder.context'] = ctx - return self.application - - -class NoAuthMiddleware(base_wsgi.Middleware): - """Return a fake token if one isn't specified.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - if 'X-Auth-Token' not in req.headers: - user_id = req.headers.get('X-Auth-User', 'admin') - project_id = req.headers.get('X-Auth-Project-Id', 'admin') - os_url = os.path.join(req.url, project_id) - res = webob.Response() - # NOTE(vish): This is expecting and returning Auth(1.1), whereas - # keystone uses 2.0 auth. We should probably allow - # 2.0 auth here as well. - res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) - res.headers['X-Server-Management-Url'] = os_url - res.content_type = 'text/plain' - res.status_int = http_client.NO_CONTENT - return res - - token = req.headers['X-Auth-Token'] - user_id, _sep, project_id = token.partition(':') - project_id = project_id or user_id - remote_address = getattr(req, 'remote_address', '127.0.0.1') - if CONF.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - is_admin=True, - remote_address=remote_address) - - req.environ['cinder.context'] = ctx - return self.application diff --git a/cinder/api/middleware/fault.py b/cinder/api/middleware/fault.py deleted file mode 100644 index e25de7956..000000000 --- a/cinder/api/middleware/fault.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six -from six.moves import http_client -import webob.dec -import webob.exc - -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.wsgi import common as base_wsgi - - -LOG = logging.getLogger(__name__) - - -class FaultWrapper(base_wsgi.Middleware): - """Calls down the middleware stack, making exceptions into faults.""" - - _status_to_type = {} - - @staticmethod - def status_to_type(status): - if not FaultWrapper._status_to_type: - for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): - FaultWrapper._status_to_type[clazz.code] = clazz - return FaultWrapper._status_to_type.get( - status, webob.exc.HTTPInternalServerError)() - - def _error(self, inner, req): - if not isinstance(inner, exception.QuotaError): - LOG.exception("Caught error: %(type)s %(error)s", - {'type': type(inner), - 'error': inner}) - safe = getattr(inner, 'safe', False) - headers = getattr(inner, 'headers', None) - status = getattr(inner, 'code', http_client.INTERNAL_SERVER_ERROR) - if status is None: - status = http_client.INTERNAL_SERVER_ERROR - - msg_dict = dict(url=req.url, status=status) - LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) - outer = self.status_to_type(status) - if headers: - outer.headers = headers - # NOTE(johannes): We leave the explanation empty here on - # purpose. It could possibly have sensitive information - # that should not be returned back to the user. See - # bugs 868360 and 874472 - # NOTE(eglynn): However, it would be over-conservative and - # inconsistent with the EC2 API to hide every exception, - # including those that are safe to expose, see bug 1021373 - if safe: - msg = (inner.msg if isinstance(inner, exception.CinderException) - else six.text_type(inner)) - params = {'exception': inner.__class__.__name__, - 'explanation': msg} - outer.explanation = _('%(exception)s: %(explanation)s') % params - return wsgi.Fault(outer) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - return req.get_response(self.application) - except Exception as ex: - return self._error(ex, req) diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py deleted file mode 100644 index 2294135d1..000000000 --- a/cinder/api/openstack/__init__.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for OpenStack API controllers. -""" - -from oslo_log import log as logging -from oslo_service import wsgi as base_wsgi -import routes - -from cinder.api.openstack import wsgi -from cinder.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class APIMapper(routes.Mapper): - def routematch(self, url=None, environ=None): - if url is "": - result = self._match("", environ) - return result[0], result[1] - return routes.Mapper.routematch(self, url, environ) - - def connect(self, *args, **kwargs): - # NOTE(inhye): Default the format part of a route to only accept json - # so it doesn't eat all characters after a '.' - # in the url. - kwargs.setdefault('requirements', {}) - if not kwargs['requirements'].get('format'): - kwargs['requirements']['format'] = 'json' - return routes.Mapper.connect(self, *args, **kwargs) - - -class ProjectMapper(APIMapper): - def resource(self, member_name, collection_name, **kwargs): - if 'parent_resource' not in kwargs: - kwargs['path_prefix'] = '{project_id}/' - else: - parent_resource = kwargs['parent_resource'] - p_collection = parent_resource['collection_name'] - p_member = parent_resource['member_name'] - kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, - p_member) - routes.Mapper.resource(self, - member_name, - collection_name, - **kwargs) - - -class APIRouter(base_wsgi.Router): - """Routes requests on the API to the appropriate controller and method.""" - ExtensionManager = None # override in subclasses - - @classmethod - def factory(cls, global_config, **local_config): - """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have.""" - return cls() - - def __init__(self, ext_mgr=None): - if ext_mgr is None: - if self.ExtensionManager: - ext_mgr = self.ExtensionManager() - else: - raise Exception(_("Must specify an ExtensionManager class")) - - mapper = ProjectMapper() - self.resources = {} - self._setup_routes(mapper, ext_mgr) - self._setup_ext_routes(mapper, ext_mgr) - self._setup_extensions(ext_mgr) - super(APIRouter, self).__init__(mapper) - - def _setup_ext_routes(self, mapper, ext_mgr): - for resource in ext_mgr.get_resources(): - LOG.debug('Extended resource: %s', - resource.collection) - - wsgi_resource = wsgi.Resource(resource.controller) - self.resources[resource.collection] = wsgi_resource - kargs = dict( - controller=wsgi_resource, - collection=resource.collection_actions, - member=resource.member_actions) - - if resource.parent: - kargs['parent_resource'] = resource.parent - - mapper.resource(resource.collection, resource.collection, **kargs) - - if resource.custom_routes_fn: - resource.custom_routes_fn(mapper, wsgi_resource) - - def _setup_extensions(self, ext_mgr): - for extension in ext_mgr.get_controller_extensions(): - collection = extension.collection - controller = extension.controller - - if collection not in self.resources: - LOG.warning('Extension %(ext_name)s: Cannot extend ' - 'resource %(collection)s: No such resource', - {'ext_name': extension.extension.name, - 'collection': collection}) - continue - - LOG.debug('Extension %(ext_name)s extending resource: ' - '%(collection)s', - {'ext_name': extension.extension.name, - 'collection': collection}) - - resource = self.resources[collection] - resource.register_actions(controller) - resource.register_extensions(controller) - - def _setup_routes(self, mapper, ext_mgr): - raise NotImplementedError diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py deleted file mode 100644 index 68636a719..000000000 --- a/cinder/api/openstack/api_version_request.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2014 IBM Corp. -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from cinder.api.openstack import versioned_method -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -# Define the minimum and maximum version of the API across all of the -# REST API. The format of the version is: -# X.Y where: -# -# - X will only be changed if a significant backwards incompatible API -# change is made which affects the API as whole. That is, something -# that is only very very rarely incremented. -# -# - Y when you make any change to the API. Note that this includes -# semantic changes which may not affect the input or output formats or -# even originate in the API code layer. We are not distinguishing -# between backwards compatible and backwards incompatible changes in -# the versioning system. It must be made clear in the documentation as -# to what is a backwards compatible change and what is a backwards -# incompatible one. - -# -# You must update the API version history string below with a one or -# two line description as well as update rest_api_version_history.rst -REST_API_VERSION_HISTORY = """ - - REST API Version History: - - * 3.0 - Includes all V2 APIs and extensions. V1 API is still supported. - * 3.0 - Versions API updated to reflect beginning of microversions epoch. - * 3.1 - Adds visibility and protected to _volume_upload_image parameters. - * 3.2 - Bootable filters in volume GET call no longer treats all values - passed to it as true. - * 3.3 - Add user messages APIs. - * 3.4 - Adds glance_metadata filter to list/detail volumes in _get_volumes. - * 3.5 - Add pagination support to messages API. - * 3.6 - Allows to set empty description and empty name for consistency - group in consisgroup-update operation. - * 3.7 - Add cluster API and cluster_name field to service list API - * 3.8 - Adds resources from volume_manage and snapshot_manage extensions. - * 3.9 - Add backup update interface. - * 3.10 - Add group_id filter to list/detail volumes in _get_volumes. - * 3.11 - Add group types and group specs API. - * 3.12 - Add volumes summary API. - * 3.13 - Add generic volume groups API. - * 3.14 - Add group snapshot and create group from src APIs. - * 3.15 - Inject the response's `Etag` header to avoid the lost update - problem with volume metadata. - * 3.16 - Migrate volume now supports cluster - * 3.17 - Getting manageable volumes and snapshots now accepts cluster. - * 3.18 - Add backup project attribute. - * 3.19 - Add API reset status actions 'reset_status' to group snapshot. - * 3.20 - Add API reset status actions 'reset_status' to generic - volume group. - * 3.21 - Show provider_id in detailed view of a volume for admin. - * 3.22 - Add filtering based on metadata for snapshot listing. - * 3.23 - Allow passing force parameter to volume delete. - * 3.24 - Add workers/cleanup endpoint. - * 3.25 - Add ``volumes`` field to group list/detail and group show. - * 3.26 - Add failover action and cluster listings accept new filters and - return new data. - * 3.27 - Add attachment API - * 3.28 - Add filters support to get_pools - * 3.29 - Add filter, sorter and pagination support in group snapshot. - * 3.30 - Support sort snapshots with "name". - * 3.31 - Add support for configure resource query filters. - * 3.32 - Add set-log and get-log service actions. - * 3.33 - Add ``resource_filters`` API to retrieve configured - resource filters. - * 3.34 - Add like filter support in ``volume``, ``backup``, ``snapshot``, - ``message``, ``attachment``, ``group`` and ``group-snapshot`` - list APIs. - * 3.35 - Add ``volume-type`` filter to Get-Pools API. - * 3.36 - Add metadata to volumes/summary response body. - * 3.37 - Support sort backup by "name". - * 3.38 - Add replication group API (Tiramisu). - * 3.39 - Add ``project_id`` admin filters support to limits. - * 3.40 - Add volume revert to its latest snapshot support. - * 3.41 - Add ``user_id`` field to snapshot list/detail and snapshot show. - * 3.42 - Add ability to extend 'in-use' volume. User should be aware of the - whole environment before using this feature because it's dependent - on several external factors below: - 1. nova-compute version - needs to be the latest for Pike. - 2. only the libvirt compute driver supports this currently. - 3. only iscsi and fibre channel volume types are supported - on the nova side currently. - Administrator can disable this ability by updating the - 'volume:extend_attached_volume' policy rule. Extend in reserved - state is intentionally NOT allowed. -""" - -# The minimum and maximum versions of the API supported -# The default api version request is defined to be the -# minimum version of the API supported. -# Explicitly using /v1 or /v2 endpoints will still work -_MIN_API_VERSION = "3.0" -_MAX_API_VERSION = "3.42" -_LEGACY_API_VERSION1 = "1.0" -_LEGACY_API_VERSION2 = "2.0" - - -# NOTE(cyeoh): min and max versions declared as functions so we can -# mock them for unittests. Do not use the constants directly anywhere -# else. -def min_api_version(): - return APIVersionRequest(_MIN_API_VERSION) - - -def max_api_version(): - return APIVersionRequest(_MAX_API_VERSION) - - -def legacy_api_version1(): - return APIVersionRequest(_LEGACY_API_VERSION1) - - -def legacy_api_version2(): - return APIVersionRequest(_LEGACY_API_VERSION2) - - -class APIVersionRequest(utils.ComparableMixin): - """This class represents an API Version Request. - - This class includes convenience methods for manipulation - and comparison of version numbers as needed to implement - API microversions. - """ - - def __init__(self, version_string=None, experimental=False): - """Create an API version request object.""" - self._ver_major = None - self._ver_minor = None - - if version_string is not None: - match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", - version_string) - if match: - self._ver_major = int(match.group(1)) - self._ver_minor = int(match.group(2)) - else: - raise exception.InvalidAPIVersionString(version=version_string) - - def __str__(self): - """Debug/Logging representation of object.""" - return ("API Version Request Major: %(major)s, Minor: %(minor)s" - % {'major': self._ver_major, 'minor': self._ver_minor}) - - def __bool__(self): - return (self._ver_major or self._ver_minor) is not None - - __nonzero__ = __bool__ - - def _cmpkey(self): - """Return the value used by ComparableMixin for rich comparisons.""" - return self._ver_major, self._ver_minor - - def matches_versioned_method(self, method): - """Compares this version to that of a versioned method.""" - - if type(method) != versioned_method.VersionedMethod: - msg = _('An API version request must be compared ' - 'to a VersionedMethod object.') - raise exception.InvalidParameterValue(err=msg) - - return self.matches(method.start_version, - method.end_version, - method.experimental) - - def matches(self, min_version, max_version=None, experimental=False): - """Compares this version to the specified min/max range. - - Returns whether the version object represents a version - greater than or equal to the minimum version and less than - or equal to the maximum version. - - If min_version is null then there is no minimum limit. - If max_version is null then there is no maximum limit. - If self is null then raise ValueError. - - :param min_version: Minimum acceptable version. - :param max_version: Maximum acceptable version. - :param experimental: Whether to match experimental APIs. - :returns: boolean - """ - - if not self: - raise ValueError - - if isinstance(min_version, str): - min_version = APIVersionRequest(version_string=min_version) - if isinstance(max_version, str): - max_version = APIVersionRequest(version_string=max_version) - - if not min_version and not max_version: - return True - - if not max_version: - return min_version <= self - if not min_version: - return self <= max_version - return min_version <= self <= max_version - - def get_string(self): - """Returns a string representation of this object. - - If this method is used to create an APIVersionRequest, - the resulting object will be an equivalent request. - """ - if not self: - raise ValueError - return ("%(major)s.%(minor)s" % - {'major': self._ver_major, 'minor': self._ver_minor}) diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst deleted file mode 100644 index 1bbd5e03d..000000000 --- a/cinder/api/openstack/rest_api_version_history.rst +++ /dev/null @@ -1,363 +0,0 @@ -REST API Version History -======================== - -This documents the changes made to the REST API with every -microversion change. The description for each version should be a -verbose one which has enough information to be suitable for use in -user documentation. - -3.0 ---- - The 3.0 Cinder API includes all v2 core APIs existing prior to - the introduction of microversions. The /v3 URL is used to call - 3.0 APIs. - This is the initial version of the Cinder API which supports - microversions. - - A user can specify a header in the API request:: - - OpenStack-API-Version: volume - - where ```` is any valid api version for this API. - - If no version is specified then the API will behave as if version 3.0 - was requested. - - The only API change in version 3.0 is versions, i.e. - GET http://localhost:8786/, which now returns information about - 3.0 and later versions and their respective /v3 endpoints. - - All other 3.0 APIs are functionally identical to version 2.0. - -3.1 ---- - Added the parameters ``protected`` and ``visibility`` to - _volume_upload_image requests. - -3.2 ---- - Change in return value of 'GET API request' for fetching cinder volume - list on the basis of 'bootable' status of volume as filter. - - Before V3.2, 'GET API request' to fetch volume list returns non-bootable - volumes if bootable filter value is any of the false or False. - For any other value provided to this filter, it always returns - bootable volume list. - - But in V3.2, this behavior is updated. - In V3.2, bootable volume list will be returned for any of the - 'T/True/1/true' bootable filter values only. - Non-bootable volume list will be returned for any of 'F/False/0/false' - bootable filter values. - But for any other values passed for bootable filter, it will return - "Invalid input received: bootable={filter value}' error. - -3.3 ---- - Added /messages API. - -3.4 ---- - Added the filter parameters ``glance_metadata`` to - list/detail volumes requests. - -3.5 ---- - Added pagination support to /messages API - -3.6 ---- - Allowed to set empty description and empty name for consistency - group in consisgroup-update operation. - -3.7 ---- - Added ``cluster_name`` field to service list/detail. - - Added /clusters endpoint to list/show/update clusters. - - Show endpoint requires the cluster name and optionally the binary as a URL - parameter (default is "cinder-volume"). Returns: - - .. code-block:: json - - { - "cluster": { - "created_at": "", - "disabled_reason": null, - "last_heartbeat": "", - "name": "cluster_name", - "num_down_hosts": 4, - "num_hosts": 2, - "state": "up", - "status": "enabled", - "updated_at": "" - } - } - - Update endpoint allows enabling and disabling a cluster in a similar way to - service's update endpoint, but in the body we must specify the name and - optionally the binary ("cinder-volume" is the default) and the disabled - reason. Returns: - - .. code-block:: json - - { - "cluster": { - "name": "cluster_name", - "state": "up", - "status": "enabled", - "disabled_reason": null - } - } - - Index and detail accept filtering by `name`, `binary`, `disabled`, - `num_hosts` , `num_down_hosts`, and up/down status (`is_up`) as URL - parameters. - - Index endpoint returns: - - .. code-block:: json - - { - "clusters": [ - { - "name": "cluster_name", - "state": "up", - "status": "enabled" - } - ] - } - - Detail endpoint returns: - - .. code-block:: json - - { - "clusters": [ - { - "created_at": "", - "disabled_reason": null, - "last_heartbeat": "", - "name": "cluster_name", - "num_down_hosts": 4, - "num_hosts": 2, - "state": "up", - "status": "enabled", - "updated_at": "" - } - ] - } - -3.8 ---- - Adds the following resources that were previously in extensions: - - os-volume-manage => /v3//manageable_volumes - - os-snapshot-manage => /v3//manageable_snapshots - -3.9 ---- - Added backup update interface to change name and description. - Returns: - - .. code-block:: json - - { - "backup": { - "id": "backup_id", - "name": "backup_name", - "links": "backup_link" - } - } - -3.10 ----- - Added the filter parameters ``group_id`` to - list/detail volumes requests. - -3.11 ----- - Added group types and group specs APIs. - -3.12 ----- - Added volumes/summary API. - -3.13 ----- - Added create/delete/update/list/show APIs for generic volume groups. - -3.14 ----- - Added group snapshots and create group from src APIs. - -3.15 (Maximum in Newton) ------------------------- - Added injecting the response's `Etag` header to avoid the lost update - problem with volume metadata. - -3.16 ----- - os-migrate_volume now accepts ``cluster`` parameter when we want to migrate a - volume to a cluster. If we pass the ``host`` parameter for a volume that is - in a cluster, the request will be sent to the cluster as if we had requested - that specific cluster. Only ``host`` or ``cluster`` can be provided. - - Creating a managed volume also supports the cluster parameter. - -3.17 ----- - os-snapshot-manage and os-volume-manage now support ``cluster`` parameter on - listings (summary and detailed). Both location parameters, ``cluster`` and - ``host`` are exclusive and only one should be provided. - -3.18 ----- - Added backup project attribute. - -3.19 ----- - Added reset status actions 'reset_status' to group snapshot. - -3.20 ----- - Added reset status actions 'reset_status' to generic volume group. - -3.21 ----- - Show provider_id in detailed view of a volume for admin. - -3.22 ----- - Added support to filter snapshot list based on metadata of snapshot. - -3.23 ----- - Allow passing force parameter to volume delete. - -3.24 ----- - New API endpoint /workers/cleanup allows triggering cleanup for cinder-volume - services. Meant for cleaning ongoing operations from failed nodes. - - The cleanup will be performed by other services belonging to the same - cluster, so at least one of them must be up to be able to do the cleanup. - - Cleanup cannot be triggered during a cloud upgrade. - - If no arguments are provided cleanup will try to issue a clean message for - all nodes that are down, but we can restrict which nodes we want to be - cleaned using parameters ``service_id``, ``cluster_name``, ``host``, - ``binary``, and ``disabled``. - - Cleaning specific resources is also possible using ``resource_type`` and - ``resource_id`` parameters. - - We can even force cleanup on nodes that are up with ``is_up``, but that's - not recommended and should only used if you know what you are doing. For - example if you know a specific cinder-volume is down even though it's still - not being reported as down when listing the services and you know the cluster - has at least another service to do the cleanup. - - API will return a dictionary with 2 lists, one with services that have been - issued a cleanup request (``cleaning`` key) and the other with services - that cannot be cleaned right now because there is no alternative service to - do the cleanup in that cluster (``unavailable`` key). - - Data returned for each service element in these two lists consist of the - ``id``, ``host``, ``binary``, and ``cluster_name``. These are not the - services that will be performing the cleanup, but the services that will be - cleaned up or couldn't be cleaned up. - -3.25 ----- - Add ``volumes`` field to group list/detail and group show. - -3.26 ----- - - New ``failover`` action equivalent to ``failover_host``, but accepting - ``cluster`` parameter as well as the ``host`` cluster that - ``failover_host`` accepts. - - - ``freeze`` and ``thaw`` actions accept ``cluster`` parameter. - - - Cluster listing accepts ``replication_status``, ``frozen`` and - ``active_backend_id`` as filters, and returns additional fields for each - cluster: ``replication_status``, ``frozen``, ``active_backend_id``. - -3.27 (Maximum in Ocata) ------------------------ - Added new attachment APIs - -3.28 ----- - Add filters support to get_pools - -3.29 ----- - Add filter, sorter and pagination support in group snapshot. - -3.30 ----- - Support sort snapshots with "name". - -3.31 ----- - Add support for configure resource query filters. - -3.32 ----- - Added ``set-log`` and ``get-log`` service actions. - -3.33 ----- - Add ``resource_filters`` API to retrieve configured resource filters. - -3.34 ----- - Add like filter support in ``volume``, ``backup``, ``snapshot``, ``message``, - ``attachment``, ``group`` and ``group-snapshot`` list APIs. - -3.35 ----- - Add ``volume-type`` filter to Get-Pools API. - -3.36 ----- - Add metadata to volumes/summary response body. - -3.37 ----- - Support sort backup by "name". - -3.38 ----- - Added enable_replication/disable_replication/failover_replication/ - list_replication_targets for replication groups (Tiramisu). - -3.39 ----- - Add ``project_id`` admin filters support to limits. - -3.40 ----- - Add volume revert to its latest snapshot support. - -3.41 ----- - Add ``user_id`` field to snapshot list/detail and snapshot show. - -3.42 ----- - Add ability to extend 'in-use' volume. User should be aware of the - whole environment before using this feature because it's dependent - on several external factors below: - - 1. nova-compute version - needs to be the latest for Pike. - 2. only the libvirt compute driver supports this currently. - 3. only iscsi and fibre channel volume types are supported on the - nova side currently. - - Administrator can disable this ability by updating the - ``volume:extend_attached_volume`` policy rule. Extend of a resered - Volume is NOT allowed. diff --git a/cinder/api/openstack/versioned_method.py b/cinder/api/openstack/versioned_method.py deleted file mode 100644 index 077e87149..000000000 --- a/cinder/api/openstack/versioned_method.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2014 IBM Corp. -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import utils - - -class VersionedMethod(utils.ComparableMixin): - - def __init__(self, name, start_version, end_version, experimental, func): - """Versioning information for a single method. - - Minimum and maximums are inclusive. - - :param name: Name of the method - :param start_version: Minimum acceptable version - :param end_version: Maximum acceptable_version - :param func: Method to call - """ - self.name = name - self.start_version = start_version - self.end_version = end_version - self.experimental = experimental - self.func = func - - def __str__(self): - args = { - 'name': self.name, - 'start': self.start_version, - 'end': self.end_version - } - return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args) - - def _cmpkey(self): - """Return the value used by ComparableMixin for rich comparisons.""" - return self.start_version diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py deleted file mode 100644 index eeb9df1cc..000000000 --- a/cinder/api/openstack/wsgi.py +++ /dev/null @@ -1,1433 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import inspect -import math -import time - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import strutils -import six -from six.moves import http_client -import webob -import webob.exc - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.openstack import versioned_method -from cinder import exception - -from cinder import i18n -i18n.enable_lazy() - -from cinder.i18n import _ -from cinder import policy -from cinder import utils -from cinder.wsgi import common as wsgi - - -LOG = logging.getLogger(__name__) - -SUPPORTED_CONTENT_TYPES = ( - 'application/json', - 'application/vnd.openstack.volume+json', -) - -_MEDIA_TYPE_MAP = { - 'application/vnd.openstack.volume+json': 'json', - 'application/json': 'json', -} - - -# name of attribute to keep version method information -VER_METHOD_ATTR = 'versioned_methods' - -# Name of header used by clients to request a specific version -# of the REST API -API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version' - -VOLUME_SERVICE = 'volume' - - -class Request(webob.Request): - """Add some OpenStack API-specific logic to the base webob.Request.""" - - def __init__(self, *args, **kwargs): - super(Request, self).__init__(*args, **kwargs) - self._resource_cache = {} - if not hasattr(self, 'api_version_request'): - self.api_version_request = api_version.APIVersionRequest() - - def cache_resource(self, resource_to_cache, id_attribute='id', name=None): - """Cache the given resource. - - Allow API methods to cache objects, such as results from a DB query, - to be used by API extensions within the same API request. - - The resource_to_cache can be a list or an individual resource, - but ultimately resources are cached individually using the given - id_attribute. - - Different resources types might need to be cached during the same - request, they can be cached using the name parameter. For example: - - Controller 1: - request.cache_resource(db_volumes, 'volumes') - request.cache_resource(db_volume_types, 'types') - Controller 2: - db_volumes = request.cached_resource('volumes') - db_type_1 = request.cached_resource_by_id('1', 'types') - - If no name is given, a default name will be used for the resource. - - An instance of this class only lives for the lifetime of a - single API request, so there's no need to implement full - cache management. - """ - if not isinstance(resource_to_cache, list): - resource_to_cache = [resource_to_cache] - if not name: - name = self.path - cached_resources = self._resource_cache.setdefault(name, {}) - for resource in resource_to_cache: - cached_resources[resource[id_attribute]] = resource - - def cached_resource(self, name=None): - """Get the cached resources cached under the given resource name. - - Allow an API extension to get previously stored objects within - the same API request. - - Note that the object data will be slightly stale. - - :returns: a dict of id_attribute to the resource from the cached - resources, an empty map if an empty collection was cached, - or None if nothing has been cached yet under this name - """ - if not name: - name = self.path - if name not in self._resource_cache: - # Nothing has been cached for this key yet - return None - return self._resource_cache[name] - - def cached_resource_by_id(self, resource_id, name=None): - """Get a resource by ID cached under the given resource name. - - Allow an API extension to get a previously stored object - within the same API request. This is basically a convenience method - to lookup by ID on the dictionary of all cached resources. - - Note that the object data will be slightly stale. - - :returns: the cached resource or None if the item is not in the cache - """ - resources = self.cached_resource(name) - if not resources: - # Nothing has been cached yet for this key yet - return None - return resources.get(resource_id) - - def cache_db_items(self, key, items, item_key='id'): - """Get cached database items. - - Allow API methods to store objects from a DB query to be - used by API extensions within the same API request. - - An instance of this class only lives for the lifetime of a - single API request, so there's no need to implement full - cache management. - """ - self.cache_resource(items, item_key, key) - - def get_db_items(self, key): - """Get database items. - - Allow an API extension to get previously stored objects within - the same API request. - - Note that the object data will be slightly stale. - """ - return self.cached_resource(key) - - def get_db_item(self, key, item_key): - """Get database item. - - Allow an API extension to get a previously stored object - within the same API request. - - Note that the object data will be slightly stale. - """ - return self.get_db_items(key).get(item_key) - - def cache_db_volumes(self, volumes): - # NOTE(mgagne) Cache it twice for backward compatibility reasons - self.cache_db_items('volumes', volumes, 'id') - self.cache_db_items(self.path, volumes, 'id') - - def cache_db_volume(self, volume): - # NOTE(mgagne) Cache it twice for backward compatibility reasons - self.cache_db_items('volumes', [volume], 'id') - self.cache_db_items(self.path, [volume], 'id') - - def get_db_volumes(self): - return (self.get_db_items('volumes') or - self.get_db_items(self.path)) - - def get_db_volume(self, volume_id): - return (self.get_db_item('volumes', volume_id) or - self.get_db_item(self.path, volume_id)) - - def cache_db_volume_types(self, volume_types): - self.cache_db_items('volume_types', volume_types, 'id') - - def cache_db_volume_type(self, volume_type): - self.cache_db_items('volume_types', [volume_type], 'id') - - def get_db_volume_types(self): - return self.get_db_items('volume_types') - - def get_db_volume_type(self, volume_type_id): - return self.get_db_item('volume_types', volume_type_id) - - def cache_db_snapshots(self, snapshots): - self.cache_db_items('snapshots', snapshots, 'id') - - def cache_db_snapshot(self, snapshot): - self.cache_db_items('snapshots', [snapshot], 'id') - - def get_db_snapshots(self): - return self.get_db_items('snapshots') - - def get_db_snapshot(self, snapshot_id): - return self.get_db_item('snapshots', snapshot_id) - - def cache_db_backups(self, backups): - self.cache_db_items('backups', backups, 'id') - - def cache_db_backup(self, backup): - self.cache_db_items('backups', [backup], 'id') - - def get_db_backups(self): - return self.get_db_items('backups') - - def get_db_backup(self, backup_id): - return self.get_db_item('backups', backup_id) - - def best_match_content_type(self): - """Determine the requested response content-type.""" - if 'cinder.best_content_type' not in self.environ: - # Calculate the best MIME type - content_type = None - - # Check URL path suffix - parts = self.path.rsplit('.', 1) - if len(parts) > 1: - possible_type = 'application/' + parts[1] - if possible_type in SUPPORTED_CONTENT_TYPES: - content_type = possible_type - - if not content_type: - content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) - - self.environ['cinder.best_content_type'] = (content_type or - 'application/json') - - return self.environ['cinder.best_content_type'] - - def get_content_type(self): - """Determine content type of the request body. - - Does not do any body introspection, only checks header - """ - if "Content-Type" not in self.headers: - return None - - allowed_types = SUPPORTED_CONTENT_TYPES - content_type = self.content_type - - if content_type not in allowed_types: - raise exception.InvalidContentType(content_type=content_type) - - return content_type - - def best_match_language(self): - """Determines best available locale from the Accept-Language header. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not self.accept_language: - return None - all_languages = i18n.get_available_languages() - return self.accept_language.best_match(all_languages) - - def set_api_version_request(self, url): - """Set API version request based on the request header information. - - Microversions starts with /v3, so if a client sends a request for - version 1.0 or 2.0 with the /v3 endpoint, throw an exception. - Sending a header with any microversion to a /v1 or /v2 endpoint will - be ignored. - Note that a microversion must be set for the legacy endpoints. This - will appear as 1.0 and 2.0 for /v1 and /v2. - """ - if API_VERSION_REQUEST_HEADER in self.headers and 'v3' in url: - hdr_string = self.headers[API_VERSION_REQUEST_HEADER] - # 'latest' is a special keyword which is equivalent to requesting - # the maximum version of the API supported - hdr_string_list = hdr_string.split(",") - volume_version = None - for hdr in hdr_string_list: - if VOLUME_SERVICE in hdr: - service, volume_version = hdr.split() - break - if not volume_version: - raise exception.VersionNotFoundForAPIMethod( - version=volume_version) - if volume_version == 'latest': - self.api_version_request = api_version.max_api_version() - else: - self.api_version_request = api_version.APIVersionRequest( - volume_version) - - # Check that the version requested is within the global - # minimum/maximum of supported API versions - if not self.api_version_request.matches( - api_version.min_api_version(), - api_version.max_api_version()): - raise exception.InvalidGlobalAPIVersion( - req_ver=self.api_version_request.get_string(), - min_ver=api_version.min_api_version().get_string(), - max_ver=api_version.max_api_version().get_string()) - - else: - if 'v1' in url: - self.api_version_request = api_version.legacy_api_version1() - elif 'v2' in url: - self.api_version_request = api_version.legacy_api_version2() - else: - self.api_version_request = api_version.APIVersionRequest( - api_version._MIN_API_VERSION) - - -class ActionDispatcher(object): - """Maps method name to local methods through action name.""" - - def dispatch(self, *args, **kwargs): - """Find and call local method.""" - action = kwargs.pop('action', 'default') - action_method = getattr(self, six.text_type(action), self.default) - return action_method(*args, **kwargs) - - def default(self, data): - raise NotImplementedError() - - -class TextDeserializer(ActionDispatcher): - """Default request body deserialization.""" - - def deserialize(self, datastring, action='default'): - return self.dispatch(datastring, action=action) - - def default(self, datastring): - return {} - - -class JSONDeserializer(TextDeserializer): - - def _from_json(self, datastring): - try: - return jsonutils.loads(datastring) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - def default(self, datastring): - return {'body': self._from_json(datastring)} - - -class DictSerializer(ActionDispatcher): - """Default request body serialization.""" - - def serialize(self, data, action='default'): - return self.dispatch(data, action=action) - - def default(self, data): - return "" - - -class JSONDictSerializer(DictSerializer): - """Default JSON request body serialization.""" - - def default(self, data): - return jsonutils.dump_as_bytes(data) - - -def serializers(**serializers): - """Attaches serializers to a method. - - This decorator associates a dictionary of serializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_serializers'): - func.wsgi_serializers = {} - func.wsgi_serializers.update(serializers) - return func - return decorator - - -def deserializers(**deserializers): - """Attaches deserializers to a method. - - This decorator associates a dictionary of deserializers with a - method. Note that the function attributes are directly - manipulated; the method is not wrapped. - """ - - def decorator(func): - if not hasattr(func, 'wsgi_deserializers'): - func.wsgi_deserializers = {} - func.wsgi_deserializers.update(deserializers) - return func - return decorator - - -def response(code): - """Attaches response code to a method. - - This decorator associates a response code with a method. Note - that the function attributes are directly manipulated; the method - is not wrapped. - """ - - def decorator(func): - func.wsgi_code = code - return func - return decorator - - -class ResponseObject(object): - """Bundles a response object with appropriate serializers. - - Object that app methods may return in order to bind alternate - serializers with a response object to be serialized. Its use is - optional. - """ - - def __init__(self, obj, code=None, headers=None, **serializers): - """Binds serializers with an object. - - Takes keyword arguments akin to the @serializer() decorator - for specifying serializers. Serializers specified will be - given preference over default serializers or method-specific - serializers on return. - """ - - self.obj = obj - self.serializers = serializers - self._default_code = http_client.OK - self._code = code - self._headers = headers or {} - self.serializer = None - self.media_type = None - - def __getitem__(self, key): - """Retrieves a header with the given name.""" - - return self._headers[key.lower()] - - def __setitem__(self, key, value): - """Sets a header with the given name to the given value.""" - - self._headers[key.lower()] = value - - def __delitem__(self, key): - """Deletes the header with the given name.""" - - del self._headers[key.lower()] - - def _bind_method_serializers(self, meth_serializers): - """Binds method serializers with the response object. - - Binds the method serializers with the response object. - Serializers specified to the constructor will take precedence - over serializers specified to this method. - - :param meth_serializers: A dictionary with keys mapping to - response types and values containing - serializer objects. - """ - - # We can't use update because that would be the wrong - # precedence - for mtype, serializer in meth_serializers.items(): - self.serializers.setdefault(mtype, serializer) - - def get_serializer(self, content_type, default_serializers=None): - """Returns the serializer for the wrapped object. - - Returns the serializer for the wrapped object subject to the - indicated content type. If no serializer matching the content - type is attached, an appropriate serializer drawn from the - default serializers will be used. If no appropriate - serializer is available, raises InvalidContentType. - """ - - default_serializers = default_serializers or {} - - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in self.serializers: - return mtype, self.serializers[mtype] - else: - return mtype, default_serializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - def preserialize(self, content_type, default_serializers=None): - """Prepares the serializer that will be used to serialize. - - Determines the serializer that will be used and prepares an - instance of it for later call. This allows the serializer to - be accessed by extensions for, e.g., template extension. - """ - - mtype, serializer = self.get_serializer(content_type, - default_serializers) - self.media_type = mtype - self.serializer = serializer() - - def attach(self, **kwargs): - """Attach slave templates to serializers.""" - - if self.media_type in kwargs: - self.serializer.attach(kwargs[self.media_type]) - - def serialize(self, request, content_type, default_serializers=None): - """Serializes the wrapped object. - - Utility method for serializing the wrapped object. Returns a - webob.Response object. - """ - - if self.serializer: - serializer = self.serializer - else: - _mtype, _serializer = self.get_serializer(content_type, - default_serializers) - serializer = _serializer() - - response = webob.Response() - response.status_int = self.code - for hdr, value in self._headers.items(): - response.headers[hdr] = six.text_type(value) - response.headers['Content-Type'] = six.text_type(content_type) - if self.obj is not None: - body = serializer.serialize(self.obj) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - response.body = body - - return response - - @property - def code(self): - """Retrieve the response status.""" - - return self._code or self._default_code - - @property - def headers(self): - """Retrieve the headers.""" - - return self._headers.copy() - - -def action_peek_json(body): - """Determine action to invoke.""" - - try: - decoded = jsonutils.loads(body) - except ValueError: - msg = _("cannot understand JSON") - raise exception.MalformedRequestBody(reason=msg) - - # Make sure there's exactly one key... - if len(decoded) != 1: - msg = _("too many body keys") - raise exception.MalformedRequestBody(reason=msg) - - # Return the action and the decoded body... - return list(decoded.keys())[0] - - -class ResourceExceptionHandler(object): - """Context manager to handle Resource exceptions. - - Used when processing exceptions generated by API implementation - methods (or their extensions). Converts most exceptions to Fault - exceptions, with the appropriate logging. - """ - - def __enter__(self): - return None - - def __exit__(self, ex_type, ex_value, ex_traceback): - if not ex_value: - return True - - if isinstance(ex_value, exception.NotAuthorized): - msg = six.text_type(ex_value) - raise Fault(webob.exc.HTTPForbidden(explanation=msg)) - elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): - raise - elif isinstance(ex_value, (exception.Invalid, exception.NotFound)): - raise Fault(exception.ConvertedException( - code=ex_value.code, explanation=six.text_type(ex_value))) - elif isinstance(ex_value, TypeError): - exc_info = (ex_type, ex_value, ex_traceback) - LOG.error('Exception handling resource: %s', - ex_value, exc_info=exc_info) - raise Fault(webob.exc.HTTPBadRequest()) - elif isinstance(ex_value, Fault): - LOG.info("Fault thrown: %s", ex_value) - raise ex_value - elif isinstance(ex_value, webob.exc.HTTPException): - LOG.info("HTTP exception thrown: %s", ex_value) - raise Fault(ex_value) - - # We didn't handle the exception - return False - - -class Resource(wsgi.Application): - """WSGI app that handles (de)serialization and controller dispatch. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon its controller. All - controller action methods must accept a 'req' argument, which is the - incoming wsgi.Request. If the operation is a PUT or POST, the controller - method must also accept a 'body' argument (the deserialized request body). - They may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - - Exceptions derived from webob.exc.HTTPException will be automatically - wrapped in Fault() to provide API friendly error responses. - """ - support_api_request_version = True - - def __init__(self, controller, action_peek=None, **deserializers): - """Initialize Resource. - - :param controller: object that implement methods created by routes lib - :param action_peek: dictionary of routines for peeking into an action - request body to determine the desired action - """ - - self.controller = controller - - default_deserializers = dict(json=JSONDeserializer) - default_deserializers.update(deserializers) - - self.default_deserializers = default_deserializers - self.default_serializers = dict(json=JSONDictSerializer) - - self.action_peek = dict(json=action_peek_json) - self.action_peek.update(action_peek or {}) - - # Copy over the actions dictionary - self.wsgi_actions = {} - if controller: - self.register_actions(controller) - - # Save a mapping of extensions - self.wsgi_extensions = {} - self.wsgi_action_extensions = {} - - def register_actions(self, controller): - """Registers controller actions with this resource.""" - - actions = getattr(controller, 'wsgi_actions', {}) - for key, method_name in actions.items(): - self.wsgi_actions[key] = getattr(controller, method_name) - - def register_extensions(self, controller): - """Registers controller extensions with this resource.""" - - extensions = getattr(controller, 'wsgi_extensions', []) - for method_name, action_name in extensions: - # Look up the extending method - extension = getattr(controller, method_name) - - if action_name: - # Extending an action... - if action_name not in self.wsgi_action_extensions: - self.wsgi_action_extensions[action_name] = [] - self.wsgi_action_extensions[action_name].append(extension) - else: - # Extending a regular method - if method_name not in self.wsgi_extensions: - self.wsgi_extensions[method_name] = [] - self.wsgi_extensions[method_name].append(extension) - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - - # NOTE(Vek): Check for get_action_args() override in the - # controller - if hasattr(self.controller, 'get_action_args'): - return self.controller.get_action_args(request_environment) - - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except (KeyError, IndexError, AttributeError): - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - def get_body(self, request): - - if len(request.body) == 0: - LOG.debug("Empty body provided in request") - return None, '' - - try: - content_type = request.get_content_type() - except exception.InvalidContentType: - LOG.debug("Unrecognized Content-Type provided in request") - return None, '' - - if not content_type: - LOG.debug("No Content-Type provided in request") - return None, '' - - return content_type, request.body - - def deserialize(self, meth, content_type, body): - meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) - try: - mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) - if mtype in meth_deserializers: - deserializer = meth_deserializers[mtype] - else: - deserializer = self.default_deserializers[mtype] - except (KeyError, TypeError): - raise exception.InvalidContentType(content_type=content_type) - - return deserializer().deserialize(body) - - def pre_process_extensions(self, extensions, request, action_args): - # List of callables for post-processing extensions - post = [] - - for ext in extensions: - if inspect.isgeneratorfunction(ext): - response = None - - # If it's a generator function, the part before the - # yield is the preprocessing stage - try: - with ResourceExceptionHandler(): - gen = ext(req=request, **action_args) - response = next(gen) - except Fault as ex: - response = ex - - # We had a response... - if response: - return response, [] - - # No response, queue up generator for post-processing - post.append(gen) - else: - # Regular functions only perform post-processing - post.append(ext) - - # Run post-processing in the reverse order - return None, reversed(post) - - def post_process_extensions(self, extensions, resp_obj, request, - action_args): - for ext in extensions: - response = None - if inspect.isgenerator(ext): - # If it's a generator, run the second half of - # processing - try: - with ResourceExceptionHandler(): - response = ext.send(resp_obj) - except StopIteration: - # Normal exit of generator - continue - except Fault as ex: - response = ex - else: - # Regular functions get post-processing... - try: - with ResourceExceptionHandler(): - response = ext(req=request, resp_obj=resp_obj, - **action_args) - except exception.VersionNotFoundForAPIMethod: - # If an attached extension (@wsgi.extends) for the - # method has no version match its not an error. We - # just don't run the extends code - continue - except Fault as ex: - response = ex - - # We had a response... - if response: - return response - - return None - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - - LOG.info("%(method)s %(url)s", - {"method": request.method, - "url": request.url}) - - if self.support_api_request_version: - # Set the version of the API requested based on the header - try: - request.set_api_version_request(request.url) - except exception.InvalidAPIVersionString as e: - return Fault(webob.exc.HTTPBadRequest( - explanation=six.text_type(e))) - except exception.InvalidGlobalAPIVersion as e: - return Fault(webob.exc.HTTPNotAcceptable( - explanation=six.text_type(e))) - - # Identify the action, its arguments, and the requested - # content type - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - content_type, body = self.get_body(request) - accept = request.best_match_content_type() - - # NOTE(Vek): Splitting the function up this way allows for - # auditing by external tools that wrap the existing - # function. If we try to audit __call__(), we can - # run into troubles due to the @webob.dec.wsgify() - # decorator. - return self._process_stack(request, action, action_args, - content_type, body, accept) - - def _process_stack(self, request, action, action_args, - content_type, body, accept): - """Implement the processing stack.""" - - # Get the implementing method - try: - meth, extensions = self.get_method(request, action, - content_type, body) - except (AttributeError, TypeError): - return Fault(webob.exc.HTTPNotFound()) - except KeyError as ex: - msg = _("There is no such action: %s") % ex.args[0] - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - if body: - decoded_body = encodeutils.safe_decode(body, errors='ignore') - msg = ("Action: '%(action)s', calling method: %(meth)s, body: " - "%(body)s") % {'action': action, - 'body': six.text_type(decoded_body), - 'meth': six.text_type(meth)} - LOG.debug(strutils.mask_password(msg)) - else: - LOG.debug("Calling method '%(meth)s'", - {'meth': six.text_type(meth)}) - - # Now, deserialize the request body... - try: - if content_type: - contents = self.deserialize(meth, content_type, body) - else: - contents = {} - except exception.InvalidContentType: - msg = _("Unsupported Content-Type") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except exception.MalformedRequestBody: - msg = _("Malformed request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Update the action args - action_args.update(contents) - - project_id = action_args.pop("project_id", None) - context = request.environ.get('cinder.context') - if (context and project_id and (project_id != context.project_id)): - msg = _("Malformed request url") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - - # Run pre-processing extensions - response, post = self.pre_process_extensions(extensions, - request, action_args) - - if not response: - try: - with ResourceExceptionHandler(): - action_result = self.dispatch(meth, request, action_args) - except Fault as ex: - response = ex - - if not response: - # No exceptions; convert action_result into a - # ResponseObject - resp_obj = None - if isinstance(action_result, dict) or action_result is None: - resp_obj = ResponseObject(action_result) - elif isinstance(action_result, ResponseObject): - resp_obj = action_result - else: - response = action_result - - # Run post-processing extensions - if resp_obj: - _set_request_id_header(request, resp_obj) - # Do a preserialize to set up the response object - serializers = getattr(meth, 'wsgi_serializers', {}) - resp_obj._bind_method_serializers(serializers) - if hasattr(meth, 'wsgi_code'): - resp_obj._default_code = meth.wsgi_code - resp_obj.preserialize(accept, self.default_serializers) - - # Process post-processing extensions - response = self.post_process_extensions(post, resp_obj, - request, action_args) - - if resp_obj and not response: - response = resp_obj.serialize(request, accept, - self.default_serializers) - - try: - msg_dict = dict(url=request.url, status=response.status_int) - msg = "%(url)s returned with HTTP %(status)d" - except AttributeError as e: - msg_dict = dict(url=request.url, e=e) - msg = "%(url)s returned a fault: %(e)s" - - LOG.info(msg, msg_dict) - - if hasattr(response, 'headers'): - for hdr, val in response.headers.items(): - # Headers must be utf-8 strings - val = utils.convert_str(val) - - response.headers[hdr] = val - - if (request.api_version_request and - not _is_legacy_endpoint(request)): - response.headers[API_VERSION_REQUEST_HEADER] = ( - VOLUME_SERVICE + ' ' + - request.api_version_request.get_string()) - response.headers['Vary'] = API_VERSION_REQUEST_HEADER - - return response - - def get_method(self, request, action, content_type, body): - """Look up the action-specific method and its extensions.""" - - # Look up the method - try: - if not self.controller: - meth = getattr(self, action) - else: - meth = getattr(self.controller, action) - except AttributeError as e: - with excutils.save_and_reraise_exception(e) as ctxt: - if (not self.wsgi_actions or action not in ['action', - 'create', - 'delete', - 'update']): - LOG.exception('Get method error.') - else: - ctxt.reraise = False - else: - return meth, self.wsgi_extensions.get(action, []) - - if action == 'action': - # OK, it's an action; figure out which action... - mtype = _MEDIA_TYPE_MAP.get(content_type) - action_name = self.action_peek[mtype](body) - LOG.debug("Action body: %s", body) - else: - action_name = action - - # Look up the action method - return (self.wsgi_actions[action_name], - self.wsgi_action_extensions.get(action_name, [])) - - def dispatch(self, method, request, action_args): - """Dispatch a call to the action-specific method.""" - - try: - return method(req=request, **action_args) - except exception.VersionNotFoundForAPIMethod: - # We deliberately don't return any message information - # about the exception to the user so it looks as if - # the method is simply not implemented. - return Fault(webob.exc.HTTPNotFound()) - - -def action(name): - """Mark a function as an action. - - The given name will be taken as the action key in the body. - - This is also overloaded to allow extensions to provide - non-extending definitions of create and delete operations. - """ - - def decorator(func): - func.wsgi_action = name - return func - return decorator - - -def extends(*args, **kwargs): - """Indicate a function extends an operation. - - Can be used as either:: - - @extends - def index(...): - pass - - or as:: - - @extends(action='resize') - def _action_resize(...): - pass - """ - - def decorator(func): - # Store enough information to find what we're extending - func.wsgi_extends = (func.__name__, kwargs.get('action')) - return func - - # If we have positional arguments, call the decorator - if args: - return decorator(*args) - - # OK, return the decorator instead - return decorator - - -class ControllerMetaclass(type): - """Controller metaclass. - - This metaclass automates the task of assembling a dictionary - mapping action keys to method names. - """ - - def __new__(mcs, name, bases, cls_dict): - """Adds the wsgi_actions dictionary to the class.""" - - # Find all actions - actions = {} - extensions = [] - # NOTE(geguileo): We'll keep a list of versioned methods that have been - # added by the new metaclass (dictionary in attribute VER_METHOD_ATTR - # on Controller class) and all the versioned methods from the different - # base classes so we can consolidate them. - versioned_methods = [] - - # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute - # between API controller class creations. This allows us - # to use a class decorator on the API methods that doesn't - # require naming explicitly what method is being versioned as - # it can be implicit based on the method decorated. It is a bit - # ugly. - if bases != (object,) and VER_METHOD_ATTR in vars(Controller): - # Get the versioned methods that this metaclass creation has added - # to the Controller class - versioned_methods.append(getattr(Controller, VER_METHOD_ATTR)) - # Remove them so next metaclass has a clean start - delattr(Controller, VER_METHOD_ATTR) - - # start with wsgi actions from base classes - for base in bases: - actions.update(getattr(base, 'wsgi_actions', {})) - - # Get the versioned methods that this base has - if VER_METHOD_ATTR in vars(base): - versioned_methods.append(getattr(base, VER_METHOD_ATTR)) - - for key, value in cls_dict.items(): - if not callable(value): - continue - if getattr(value, 'wsgi_action', None): - actions[value.wsgi_action] = key - elif getattr(value, 'wsgi_extends', None): - extensions.append(value.wsgi_extends) - - # Add the actions and extensions to the class dict - cls_dict['wsgi_actions'] = actions - cls_dict['wsgi_extensions'] = extensions - if versioned_methods: - cls_dict[VER_METHOD_ATTR] = mcs.consolidate_vers(versioned_methods) - - return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, - cls_dict) - - @staticmethod - def consolidate_vers(versioned_methods): - """Consolidates a list of versioned methods dictionaries.""" - if not versioned_methods: - return {} - result = versioned_methods.pop(0) - for base_methods in versioned_methods: - for name, methods in base_methods.items(): - method_list = result.setdefault(name, []) - method_list.extend(methods) - method_list.sort(reverse=True) - return result - - -@six.add_metaclass(ControllerMetaclass) -class Controller(object): - """Default controller.""" - - _view_builder_class = None - - def __init__(self, view_builder=None): - """Initialize controller with a view builder instance.""" - if view_builder: - self._view_builder = view_builder - elif self._view_builder_class: - self._view_builder = self._view_builder_class() - else: - self._view_builder = None - - def __getattribute__(self, key): - - def version_select(*args, **kwargs): - """Select and call the matching version of the specified method. - - Look for the method which matches the name supplied and version - constraints and calls it with the supplied arguments. - - :returns: Returns the result of the method called - :raises VersionNotFoundForAPIMethod: if there is no method which - matches the name and version constraints - """ - - # The first arg to all versioned methods is always the request - # object. The version for the request is attached to the - # request object - if len(args) == 0: - version_request = kwargs['req'].api_version_request - else: - version_request = args[0].api_version_request - - func_list = self.versioned_methods[key] - for func in func_list: - if version_request.matches_versioned_method(func): - # Update the version_select wrapper function so - # other decorator attributes like wsgi.response - # are still respected. - functools.update_wrapper(version_select, func.func) - return func.func(self, *args, **kwargs) - - # No version match - raise exception.VersionNotFoundForAPIMethod( - version=version_request) - - try: - version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) - except AttributeError: - # No versioning on this class - return object.__getattribute__(self, key) - - if (version_meth_dict and key in - object.__getattribute__(self, VER_METHOD_ATTR)): - - return version_select - - return object.__getattribute__(self, key) - - # NOTE(cyeoh): This decorator MUST appear first (the outermost - # decorator) on an API method for it to work correctly - @classmethod - def api_version(cls, min_ver, max_ver=None, experimental=False): - """Decorator for versioning API methods. - - Add the decorator to any method which takes a request object - as the first parameter and belongs to a class which inherits from - wsgi.Controller. - - :param min_ver: string representing minimum version - :param max_ver: optional string representing maximum version - """ - - def decorator(f): - obj_min_ver = api_version.APIVersionRequest(min_ver) - if max_ver: - obj_max_ver = api_version.APIVersionRequest(max_ver) - else: - obj_max_ver = api_version.APIVersionRequest() - - # Add to list of versioned methods registered - func_name = f.__name__ - new_func = versioned_method.VersionedMethod( - func_name, obj_min_ver, obj_max_ver, experimental, f) - - func_dict = getattr(cls, VER_METHOD_ATTR, {}) - if not func_dict: - setattr(cls, VER_METHOD_ATTR, func_dict) - - func_list = func_dict.get(func_name, []) - if not func_list: - func_dict[func_name] = func_list - func_list.append(new_func) - # Ensure the list is sorted by minimum version (reversed) - # so later when we work through the list in order we find - # the method which has the latest version which supports - # the version requested. - # TODO(cyeoh): Add check to ensure that there are no overlapping - # ranges of valid versions as that is ambiguous - func_list.sort(reverse=True) - - # NOTE(geguileo): To avoid PEP8 errors when defining multiple - # microversions of the same method in the same class we add the - # api_version decorator to the function so it can be used instead, - # thus preventing method redefinition errors. - f.api_version = cls.api_version - - return f - - return decorator - - @staticmethod - def is_valid_body(body, entity_name): - if not (body and entity_name in body): - return False - - def is_dict(d): - try: - d.get(None) - return True - except AttributeError: - return False - - if not is_dict(body[entity_name]): - return False - - return True - - @staticmethod - def assert_valid_body(body, entity_name): - # NOTE: After v1 api is deprecated need to merge 'is_valid_body' and - # 'assert_valid_body' in to one method. Right now it is not - # possible to modify 'is_valid_body' to raise exception because - # in case of V1 api when 'is_valid_body' return False, - # 'HTTPUnprocessableEntity' exception is getting raised and in - # V2 api 'HTTPBadRequest' exception is getting raised. - if not Controller.is_valid_body(body, entity_name): - raise webob.exc.HTTPBadRequest( - explanation=_("Missing required element '%s' in " - "request body.") % entity_name) - - @staticmethod - def validate_name_and_description(body): - for attribute in ['name', 'description', - 'display_name', 'display_description']: - value = body.get(attribute) - if value is not None: - if isinstance(value, six.string_types): - body[attribute] = value.strip() - try: - utils.check_string_length(body[attribute], attribute, - min_length=0, max_length=255) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - @staticmethod - def validate_string_length(value, entity_name, min_length=0, - max_length=None, remove_whitespaces=False): - """Check the length of specified string. - - :param value: the value of the string - :param entity_name: the name of the string - :param min_length: the min_length of the string - :param max_length: the max_length of the string - :param remove_whitespaces: True if trimming whitespaces is needed - else False - """ - if isinstance(value, six.string_types) and remove_whitespaces: - value = value.strip() - try: - utils.check_string_length(value, entity_name, - min_length=min_length, - max_length=max_length) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - @staticmethod - def get_policy_checker(prefix): - @staticmethod - def policy_checker(req, action, resource=None): - ctxt = req.environ['cinder.context'] - target = { - 'project_id': ctxt.project_id, - 'user_id': ctxt.user_id, - } - if resource: - target.update(resource) - - _action = '%s:%s' % (prefix, action) - policy.enforce(ctxt, _action, target) - return ctxt - return policy_checker - - -class Fault(webob.exc.HTTPException): - """Wrap webob.exc.HTTPException to provide API friendly response.""" - - _fault_names = {http_client.BAD_REQUEST: "badRequest", - http_client.UNAUTHORIZED: "unauthorized", - http_client.FORBIDDEN: "forbidden", - http_client.NOT_FOUND: "itemNotFound", - http_client.METHOD_NOT_ALLOWED: "badMethod", - http_client.CONFLICT: "conflictingRequest", - http_client.REQUEST_ENTITY_TOO_LARGE: "overLimit", - http_client.UNSUPPORTED_MEDIA_TYPE: "badMediaType", - http_client.NOT_IMPLEMENTED: "notImplemented", - http_client.SERVICE_UNAVAILABLE: "serviceUnavailable"} - - def __init__(self, exception): - """Create a Fault for the given webob.exc.exception.""" - self.wrapped_exc = exception - self.status_int = exception.status_int - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Generate a WSGI response based on the exception passed to ctor.""" - # Replace the body with fault details. - locale = req.best_match_language() - code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "computeFault") - explanation = self.wrapped_exc.explanation - fault_data = { - fault_name: { - 'code': code, - 'message': i18n.translate(explanation, locale)}} - if code == http_client.REQUEST_ENTITY_TOO_LARGE: - retry = self.wrapped_exc.headers.get('Retry-After', None) - if retry: - fault_data[fault_name]['retryAfter'] = retry - - if req.api_version_request and not _is_legacy_endpoint(req): - self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = ( - VOLUME_SERVICE + ' ' + req.api_version_request.get_string()) - self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER - - content_type = req.best_match_content_type() - serializer = { - 'application/json': JSONDictSerializer(), - }[content_type] - - body = serializer.serialize(fault_data) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - self.wrapped_exc.body = body - self.wrapped_exc.content_type = content_type - _set_request_id_header(req, self.wrapped_exc.headers) - - return self.wrapped_exc - - def __str__(self): - return self.wrapped_exc.__str__() - - -def _set_request_id_header(req, headers): - context = req.environ.get('cinder.context') - if context: - headers['x-compute-request-id'] = context.request_id - - -def _is_legacy_endpoint(request): - version_str = request.api_version_request.get_string() - return '1.0' in version_str or '2.0' in version_str - - -class OverLimitFault(webob.exc.HTTPException): - """Rate-limited request response.""" - - def __init__(self, message, details, retry_time): - """Initialize new `OverLimitFault` with relevant information.""" - hdrs = OverLimitFault._retry_after(retry_time) - self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) - self.content = { - "overLimitFault": { - "code": self.wrapped_exc.status_int, - "message": message, - "details": details, - }, - } - - @staticmethod - def _retry_after(retry_time): - delay = int(math.ceil(retry_time - time.time())) - retry_after = delay if delay > 0 else 0 - headers = {'Retry-After': '%d' % retry_after} - return headers - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, request): - """Serializes the wrapped exception conforming to our error format.""" - content_type = request.best_match_content_type() - - def translate(msg): - locale = request.best_match_language() - return i18n.translate(msg, locale) - - self.content['overLimitFault']['message'] = \ - translate(self.content['overLimitFault']['message']) - self.content['overLimitFault']['details'] = \ - translate(self.content['overLimitFault']['details']) - - serializer = { - 'application/json': JSONDictSerializer(), - }[content_type] - - content = serializer.serialize(self.content) - self.wrapped_exc.body = content - - return self.wrapped_exc diff --git a/cinder/api/urlmap.py b/cinder/api/urlmap.py deleted file mode 100644 index ed1afb39c..000000000 --- a/cinder/api/urlmap.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import paste.urlmap -try: - from urllib.request import parse_http_list # pylint: disable=E0611 -except ImportError: - from urllib2 import parse_http_list # Python 2 - -from cinder.api.openstack import wsgi - - -_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' -_option_header_piece_re = re.compile( - r';\s*([^\s;=]+|%s)\s*' - r'(?:=\s*([^;]+|%s))?\s*' % - (_quoted_string_re, _quoted_string_re)) - - -def unquote_header_value(value): - """Unquotes a header value. - - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - return value - - -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - :param value: a string with a list header. - :return: :class:`list` - """ - result = [] - for item in parse_http_list(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -def parse_options_header(value): - """Parse 'Content-Type'-like header into a tuple. - - Parse a ``Content-Type`` like header into a tuple with the content - type and the options: - - >>> parse_options_header('Content-Type: text/html; mimetype=text/html') - ('Content-Type:', {'mimetype': 'text/html'}) - - :param value: the header to parse. - :return: (str, options) - """ - def _tokenize(string): - for match in _option_header_piece_re.finditer(string): - key, value = match.groups() - key = unquote_header_value(key) - if value is not None: - value = unquote_header_value(value) - yield key, value - - if not value: - return '', {} - - parts = _tokenize(';' + value) - name = next(parts)[0] - extra = dict(parts) - return name, extra - - -class Accept(object): - def __init__(self, value): - self._content_types = [parse_options_header(v) for v in - parse_list_header(value)] - - def best_match(self, supported_content_types): - # FIXME: Should we have a more sophisticated matching algorithm that - # takes into account the version as well? - best_quality = -1 - best_content_type = None - best_params = {} - best_match = '*/*' - - for content_type in supported_content_types: - for content_mask, params in self._content_types: - try: - quality = float(params.get('q', 1)) - except ValueError: - continue - - if quality < best_quality: - continue - elif best_quality == quality: - if best_match.count('*') <= content_mask.count('*'): - continue - - if self._match_mask(content_mask, content_type): - best_quality = quality - best_content_type = content_type - best_params = params - best_match = content_mask - - return best_content_type, best_params - - def content_type_params(self, best_content_type): - """Find parameters in Accept header for given content type.""" - for content_type, params in self._content_types: - if best_content_type == content_type: - return params - - return {} - - def _match_mask(self, mask, content_type): - if '*' not in mask: - return content_type == mask - if mask == '*/*': - return True - mask_major = mask[:-2] - content_type_major = content_type.split('/', 1)[0] - return content_type_major == mask_major - - -def urlmap_factory(loader, global_conf, **local_conf): - if 'not_found_app' in local_conf: - not_found_app = local_conf.pop('not_found_app') - else: - not_found_app = global_conf.get('not_found_app') - if not_found_app: - not_found_app = loader.get_app(not_found_app, global_conf=global_conf) - urlmap = URLMap(not_found_app=not_found_app) - for path, app_name in local_conf.items(): - path = paste.urlmap.parse_path_expression(path) - app = loader.get_app(app_name, global_conf=global_conf) - urlmap[path] = app - return urlmap - - -class URLMap(paste.urlmap.URLMap): - def _match(self, host, port, path_info): - """Find longest match for a given URL path.""" - for (domain, app_url), app in self.applications: - if domain and domain != host and domain != host + ':' + port: - continue - if (path_info == app_url or path_info.startswith(app_url + '/')): - return app, app_url - - return None, None - - def _set_script_name(self, app, app_url): - def wrap(environ, start_response): - environ['SCRIPT_NAME'] += app_url - return app(environ, start_response) - - return wrap - - def _munge_path(self, app, path_info, app_url): - def wrap(environ, start_response): - environ['SCRIPT_NAME'] += app_url - environ['PATH_INFO'] = path_info[len(app_url):] - return app(environ, start_response) - - return wrap - - def _path_strategy(self, host, port, path_info): - """Check path suffix for MIME type and path prefix for API version.""" - mime_type = app = app_url = None - - parts = path_info.rsplit('.', 1) - if len(parts) > 1: - possible_type = 'application/' + parts[1] - if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: - mime_type = possible_type - - parts = path_info.split('/') - if len(parts) > 1: - possible_app, possible_app_url = self._match(host, port, path_info) - # Don't use prefix if it ends up matching default - if possible_app and possible_app_url: - app_url = possible_app_url - app = self._munge_path(possible_app, path_info, app_url) - - return mime_type, app, app_url - - def _content_type_strategy(self, host, port, environ): - """Check Content-Type header for API version.""" - app = None - params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] - if 'version' in params: - app, app_url = self._match(host, port, '/v' + params['version']) - if app: - app = self._set_script_name(app, app_url) - - return app - - def _accept_strategy(self, host, port, environ, supported_content_types): - """Check Accept header for best matching MIME type and API version.""" - accept = Accept(environ.get('HTTP_ACCEPT', '')) - - app = None - - # Find the best match in the Accept header - mime_type, params = accept.best_match(supported_content_types) - if 'version' in params: - app, app_url = self._match(host, port, '/v' + params['version']) - if app: - app = self._set_script_name(app, app_url) - - return mime_type, app - - def __call__(self, environ, start_response): - host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() - if ':' in host: - host, port = host.split(':', 1) - else: - if environ['wsgi.url_scheme'] == 'http': - port = '80' - else: - port = '443' - - path_info = environ['PATH_INFO'] - path_info = self.normalize_url(path_info, False)[1] - - # The MIME type for the response is determined in one of two ways: - # 1) URL path suffix (eg /servers/detail.json) - # 2) Accept header (eg application/json;q=0.8) - - # The API version is determined in one of three ways: - # 1) URL path prefix (eg /v1.1/tenant/servers/detail) - # 2) Content-Type header (eg application/json;version=1.1) - # 3) Accept header (eg application/json;q=0.8;version=1.1) - - supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) - - mime_type, app, app_url = self._path_strategy(host, port, path_info) - - if not app: - app = self._content_type_strategy(host, port, environ) - - if not mime_type or not app: - possible_mime_type, possible_app = self._accept_strategy( - host, port, environ, supported_content_types) - if possible_mime_type and not mime_type: - mime_type = possible_mime_type - if possible_app and not app: - app = possible_app - - if not mime_type: - mime_type = 'application/json' - - if not app: - # Didn't match a particular version, probably matches default - app, app_url = self._match(host, port, path_info) - if app: - app = self._munge_path(app, path_info, app_url) - - if app: - environ['cinder.best_content_type'] = mime_type - return app(environ, start_response) - - environ['paste.urlmap_object'] = self - return self.not_found_application(environ, start_response) diff --git a/cinder/api/v1/__init__.py b/cinder/api/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/v1/router.py b/cinder/api/v1/router.py deleted file mode 100644 index 1c3b89ac2..000000000 --- a/cinder/api/v1/router.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2011 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for OpenStack Volume API. -""" - -from cinder.api import extensions -import cinder.api.openstack -from cinder.api.v1 import snapshots -from cinder.api.v1 import volumes -from cinder.api.v2 import limits -from cinder.api.v2 import snapshot_metadata -from cinder.api.v2 import types -from cinder.api.v2 import volume_metadata -from cinder.api import versions - - -class APIRouter(cinder.api.openstack.APIRouter): - """Routes requests on the API to the appropriate controller and method.""" - ExtensionManager = extensions.ExtensionManager - - def _setup_routes(self, mapper, ext_mgr): - self.resources['versions'] = versions.create_resource() - mapper.connect("versions", "/", - controller=self.resources['versions'], - action='index') - - mapper.redirect("", "/") - - self.resources['volumes'] = volumes.create_resource(ext_mgr) - mapper.resource("volume", "volumes", - controller=self.resources['volumes'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['types'] = types.create_resource() - mapper.resource("type", "types", - controller=self.resources['types']) - - self.resources['snapshots'] = snapshots.create_resource(ext_mgr) - mapper.resource("snapshot", "snapshots", - controller=self.resources['snapshots'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['snapshot_metadata'] = \ - snapshot_metadata.create_resource() - snapshot_metadata_controller = self.resources['snapshot_metadata'] - - mapper.resource("snapshot_metadata", "metadata", - controller=snapshot_metadata_controller, - parent_resource=dict(member_name='snapshot', - collection_name='snapshots')) - - mapper.connect("metadata", - "/{project_id}/snapshots/{snapshot_id}/metadata", - controller=snapshot_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) - - self.resources['limits'] = limits.create_resource() - mapper.resource("limit", "limits", - controller=self.resources['limits']) - self.resources['volume_metadata'] = \ - volume_metadata.create_resource() - volume_metadata_controller = self.resources['volume_metadata'] - - mapper.resource("volume_metadata", "metadata", - controller=volume_metadata_controller, - parent_resource=dict(member_name='volume', - collection_name='volumes')) - - mapper.connect("metadata", - "/{project_id}/volumes/{volume_id}/metadata", - controller=volume_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) diff --git a/cinder/api/v1/snapshots.py b/cinder/api/v1/snapshots.py deleted file mode 100644 index 76c44165f..000000000 --- a/cinder/api/v1/snapshots.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes snapshots api.""" - - -from six.moves import http_client -from webob import exc - -from cinder.api.openstack import wsgi -from cinder.api.v2 import snapshots as snapshots_v2 - - -def _snapshot_v2_to_v1(snapv2_result): - """Transform a v2 snapshot dict to v1.""" - snapshots = snapv2_result.get('snapshots') - if snapshots is None: - snapshots = [snapv2_result['snapshot']] - - for snapv1 in snapshots: - # The updated_at property was added in v2 - snapv1.pop('updated_at', None) - - # Name and description were renamed - snapv1['display_name'] = snapv1.pop('name', '') - snapv1['display_description'] = snapv1.pop('description', '') - - return snapv2_result - - -def _update_search_opts(req): - """Update the requested search options. - - This is a little silly, as ``display_name`` needs to be switched - to just ``name``, which internally to v2 gets switched to be - ``display_name``. Oh well. - """ - if 'display_name' in req.GET: - req.GET['name'] = req.GET.pop('display_name') - return req - - -class SnapshotsController(snapshots_v2.SnapshotsController): - """The Snapshots API controller for the OpenStack API.""" - - def show(self, req, id): - """Return data about the given snapshot.""" - result = super(SnapshotsController, self).show(req, id) - return _snapshot_v2_to_v1(result) - - def index(self, req): - """Returns a summary list of snapshots.""" - return _snapshot_v2_to_v1( - super(SnapshotsController, self).index( - _update_search_opts(req))) - - def detail(self, req): - """Returns a detailed list of snapshots.""" - return _snapshot_v2_to_v1( - super(SnapshotsController, self).detail( - _update_search_opts(req))) - - @wsgi.response(http_client.OK) - def create(self, req, body): - """Creates a new snapshot.""" - if (body is None or not body.get('snapshot') or - not isinstance(body['snapshot'], dict)): - raise exc.HTTPUnprocessableEntity() - - if 'display_name' in body['snapshot']: - body['snapshot']['name'] = body['snapshot'].pop('display_name') - - if 'display_description' in body['snapshot']: - body['snapshot']['description'] = body['snapshot'].pop( - 'display_description') - - if 'metadata' not in body['snapshot']: - body['snapshot']['metadata'] = {} - - return _snapshot_v2_to_v1( - super(SnapshotsController, self).create(req, body)) - - def update(self, req, id, body): - """Update a snapshot.""" - try: - return _snapshot_v2_to_v1( - super(SnapshotsController, self).update(req, id, body)) - except exc.HTTPBadRequest: - raise exc.HTTPUnprocessableEntity() - - -def create_resource(ext_mgr): - return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v1/volumes.py b/cinder/api/v1/volumes.py deleted file mode 100644 index 61dea6698..000000000 --- a/cinder/api/v1/volumes.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes api.""" - -from oslo_log import log as logging -from six.moves import http_client -from webob import exc - -from cinder.api.openstack import wsgi -from cinder.api.v2 import volumes as volumes_v2 - - -LOG = logging.getLogger(__name__) - - -def _attachment_v2_to_v1(vol): - """Converts v2 attachment details to v1 format.""" - d = [] - attachments = vol.pop('attachments', []) - for attachment in attachments: - a = {'id': attachment.get('id'), - 'attachment_id': attachment.get('attachment_id'), - 'volume_id': attachment.get('volume_id'), - 'server_id': attachment.get('server_id'), - 'host_name': attachment.get('host_name'), - 'device': attachment.get('device'), - } - d.append(a) - - return d - - -def _volume_v2_to_v1(volv2_results, image_id=None): - """Converts v2 volume details to v1 format.""" - volumes = volv2_results.get('volumes') - if volumes is None: - volumes = [volv2_results['volume']] - - for vol in volumes: - # Need to form the string true/false explicitly here to - # maintain our API contract - if vol.get('multiattach'): - vol['multiattach'] = 'true' - else: - vol['multiattach'] = 'false' - - if not vol.get('image_id') and image_id: - vol['image_id'] = image_id - - vol['attachments'] = _attachment_v2_to_v1(vol) - - if not vol.get('metadata'): - vol['metadata'] = {} - - # Convert the name changes - vol['display_name'] = vol.pop('name') - vol['display_description'] = vol.pop('description', '') - - # Remove the properties not present for v1 - vol.pop('consistencygroup_id', None) - vol.pop('encryption_key_id', None) - vol.pop('links', None) - vol.pop('migration_status', None) - vol.pop('replication_status', None) - vol.pop('updated_at', None) - vol.pop('user_id', None) - - LOG.debug("vol=%s", vol) - - return volv2_results - - -class VolumeController(volumes_v2.VolumeController): - """The Volumes API controller for the OpenStack API.""" - - def show(self, req, id): - """Return data about the given volume.""" - return _volume_v2_to_v1(super(VolumeController, self).show( - req, id)) - - def index(self, req): - """Returns a summary list of volumes.""" - - # The v1 info was much more detailed than the v2 non-detailed result - return _volume_v2_to_v1( - super(VolumeController, self).detail(req)) - - def detail(self, req): - """Returns a detailed list of volumes.""" - return _volume_v2_to_v1( - super(VolumeController, self).detail(req)) - - @wsgi.response(http_client.OK) - def create(self, req, body): - """Creates a new volume.""" - if (body is None or not body.get('volume') or - not isinstance(body['volume'], dict)): - raise exc.HTTPUnprocessableEntity() - - image_id = None - if body.get('volume'): - image_id = body['volume'].get('imageRef') - - try: - return _volume_v2_to_v1( - super(VolumeController, self).create(req, body), - image_id=image_id) - except exc.HTTPBadRequest as e: - # Image failures are the only ones that actually used - # HTTPBadRequest - error_msg = '%s' % e - if 'Invalid image' in error_msg: - raise - raise exc.HTTPUnprocessableEntity() - - def update(self, req, id, body): - """Update a volume.""" - if (body is None or not body.get('volume') or - not isinstance(body['volume'], dict)): - raise exc.HTTPUnprocessableEntity() - - try: - return _volume_v2_to_v1(super(VolumeController, self).update( - req, id, body)) - except exc.HTTPBadRequest: - raise exc.HTTPUnprocessableEntity() - - -def create_resource(ext_mgr): - return wsgi.Resource(VolumeController(ext_mgr)) diff --git a/cinder/api/v2/__init__.py b/cinder/api/v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/v2/limits.py b/cinder/api/v2/limits.py deleted file mode 100644 index e306fd5dd..000000000 --- a/cinder/api/v2/limits.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Module dedicated functions/classes dealing with rate limiting requests. -""" - -import collections -import copy -import math -import re -import time - -from oslo_serialization import jsonutils -from oslo_utils import importutils -from six.moves import http_client -import webob.dec -import webob.exc - -from cinder.api.openstack import wsgi -from cinder.api.views import limits as limits_views -from cinder.i18n import _ -from cinder import quota -from cinder.wsgi import common as base_wsgi - -QUOTAS = quota.QUOTAS -LIMITS_PREFIX = "limits." - - -# Convenience constants for the limits dictionary passed to Limiter(). -PER_SECOND = 1 -PER_MINUTE = 60 -PER_HOUR = 60 * 60 -PER_DAY = 60 * 60 * 24 - - -class LimitsController(wsgi.Controller): - """Controller for accessing limits in the OpenStack API.""" - - def index(self, req): - """Return all global and rate limit information.""" - context = req.environ['cinder.context'] - quotas = QUOTAS.get_project_quotas(context, context.project_id, - usages=False) - abs_limits = {k: v['limit'] for k, v in quotas.items()} - rate_limits = req.environ.get("cinder.limits", []) - - builder = self._get_view_builder(req) - return builder.build(rate_limits, abs_limits) - - def _get_view_builder(self, req): - return limits_views.ViewBuilder() - - -def create_resource(): - return wsgi.Resource(LimitsController()) - - -class Limit(object): - """Stores information about a limit for HTTP requests.""" - - UNITS = { - 1: "SECOND", - 60: "MINUTE", - 60 * 60: "HOUR", - 60 * 60 * 24: "DAY", - } - - UNIT_MAP = {v: k for k, v in UNITS.items()} - - def __init__(self, verb, uri, regex, value, unit): - """Initialize a new `Limit`. - - @param verb: HTTP verb (POST, PUT, etc.) - @param uri: Human-readable URI - @param regex: Regular expression format for this limit - @param value: Integer number of requests which can be made - @param unit: Unit of measure for the value parameter - """ - self.verb = verb - self.uri = uri - self.regex = regex - self.value = int(value) - self.unit = unit - self.unit_string = self.display_unit().lower() - self.remaining = int(value) - - if value <= 0: - raise ValueError("Limit value must be > 0") - - self.last_request = None - self.next_request = None - - self.water_level = 0 - self.capacity = self.unit - self.request_value = float(self.capacity) / float(self.value) - msg = (_("Only %(value)s %(verb)s request(s) can be " - "made to %(uri)s every %(unit_string)s.") % - {'value': self.value, 'verb': self.verb, - 'uri': self.uri, 'unit_string': self.unit_string}) - self.error_message = msg - - def __call__(self, verb, url): - """Represent a call to this limit from a relevant request. - - @param verb: string http verb (POST, GET, etc.) - @param url: string URL - """ - if self.verb != verb or not re.match(self.regex, url): - return - - now = self._get_time() - - if self.last_request is None: - self.last_request = now - - leak_value = now - self.last_request - - self.water_level -= leak_value - self.water_level = max(self.water_level, 0) - self.water_level += self.request_value - - difference = self.water_level - self.capacity - - self.last_request = now - - if difference > 0: - self.water_level -= self.request_value - self.next_request = now + difference - return difference - - cap = self.capacity - water = self.water_level - val = self.value - - self.remaining = math.floor(((cap - water) / cap) * val) - self.next_request = now - - def _get_time(self): - """Retrieve the current time. Broken out for testability.""" - return time.time() - - def display_unit(self): - """Display the string name of the unit.""" - return self.UNITS.get(self.unit, "UNKNOWN") - - def display(self): - """Return a useful representation of this class.""" - return { - "verb": self.verb, - "URI": self.uri, - "regex": self.regex, - "value": self.value, - "remaining": int(self.remaining), - "unit": self.display_unit(), - "resetTime": int(self.next_request or self._get_time()), - } - -# "Limit" format is a dictionary with the HTTP verb, human-readable URI, -# a regular-expression to match, value and unit of measure (PER_DAY, etc.) - -DEFAULT_LIMITS = [ - Limit("POST", "*", ".*", 10, PER_MINUTE), - Limit("POST", "*/servers", "^/servers", 50, PER_DAY), - Limit("PUT", "*", ".*", 10, PER_MINUTE), - Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), - Limit("DELETE", "*", ".*", 100, PER_MINUTE), -] - - -class RateLimitingMiddleware(base_wsgi.Middleware): - """Rate-limits requests passing through this middleware. - - All limit information is stored in memory for this implementation. - """ - - def __init__(self, application, limits=None, limiter=None, **kwargs): - """Initialize class, wrap WSGI app, and set up given limits. - - :param application: WSGI application to wrap - :param limits: String describing limits - :param limiter: String identifying class for representing limits - - Other parameters are passed to the constructor for the limiter. - """ - base_wsgi.Middleware.__init__(self, application) - - # Select the limiter class - if limiter is None: - limiter = Limiter - else: - limiter = importutils.import_class(limiter) - - # Parse the limits, if any are provided - if limits is not None: - limits = limiter.parse_limits(limits) - - self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - """Represents a single call through this middleware. - - We should record the request if we have a limit relevant to it. - If no limit is relevant to the request, ignore it. If the request - should be rate limited, return a fault telling the user they are - over the limit and need to retry later. - """ - verb = req.method - url = req.url - context = req.environ.get("cinder.context") - - if context: - username = context.user_id - else: - username = None - - delay, error = self._limiter.check_for_delay(verb, url, username) - - if delay: - msg = _("This request was rate-limited.") - retry = time.time() + delay - return wsgi.OverLimitFault(msg, error, retry) - - req.environ["cinder.limits"] = self._limiter.get_limits(username) - - return self.application - - -class Limiter(object): - """Rate-limit checking class which handles limits in memory.""" - - def __init__(self, limits, **kwargs): - """Initialize the new `Limiter`. - - @param limits: List of `Limit` objects - """ - self.limits = copy.deepcopy(limits) - self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) - - # Pick up any per-user limit information - for key, value in kwargs.items(): - if key.startswith(LIMITS_PREFIX): - username = key[len(LIMITS_PREFIX):] - self.levels[username] = self.parse_limits(value) - - def get_limits(self, username=None): - """Return the limits for a given user.""" - return [limit.display() for limit in self.levels[username]] - - def check_for_delay(self, verb, url, username=None): - """Check the given verb/user/user triplet for limit. - - @return: Tuple of delay (in seconds) and error message (or None, None) - """ - delays = [] - - for limit in self.levels[username]: - delay = limit(verb, url) - if delay: - delays.append((delay, limit.error_message)) - - if delays: - delays.sort() - return delays[0] - - return None, None - - # Note: This method gets called before the class is instantiated, - # so this must be either a static method or a class method. It is - # used to develop a list of limits to feed to the constructor. We - # put this in the class so that subclasses can override the - # default limit parsing. - @staticmethod - def parse_limits(limits): - """Convert a string into a list of Limit instances. - - This implementation expects a semicolon-separated sequence of - parenthesized groups, where each group contains a - comma-separated sequence consisting of HTTP method, - user-readable URI, a URI reg-exp, an integer number of - requests which can be made, and a unit of measure. Valid - values for the latter are "SECOND", "MINUTE", "HOUR", and - "DAY". - - @return: List of Limit instances. - """ - - # Handle empty limit strings - limits = limits.strip() - if not limits: - return [] - - # Split up the limits by semicolon - result = [] - for group in limits.split(';'): - group = group.strip() - if group[:1] != '(' or group[-1:] != ')': - raise ValueError("Limit rules must be surrounded by " - "parentheses") - group = group[1:-1] - - # Extract the Limit arguments - args = [a.strip() for a in group.split(',')] - if len(args) != 5: - raise ValueError("Limit rules must contain the following " - "arguments: verb, uri, regex, value, unit") - - # Pull out the arguments - verb, uri, regex, value, unit = args - - # Upper-case the verb - verb = verb.upper() - - # Convert value--raises ValueError if it's not integer - value = int(value) - - # Convert unit - unit = unit.upper() - if unit not in Limit.UNIT_MAP: - raise ValueError("Invalid units specified") - unit = Limit.UNIT_MAP[unit] - - # Build a limit - result.append(Limit(verb, uri, regex, value, unit)) - - return result - - -class WsgiLimiter(object): - """Rate-limit checking from a WSGI application. - - Uses an in-memory `Limiter`. - - To use, POST ``/`` with JSON data such as:: - - { - "verb" : GET, - "path" : "/servers" - } - - and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds - header containing the number of seconds to wait before the action would - succeed. - """ - - def __init__(self, limits=None): - """Initialize the new `WsgiLimiter`. - - @param limits: List of `Limit` objects - """ - self._limiter = Limiter(limits or DEFAULT_LIMITS) - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, request): - """Handles a call to this application. - - Returns 204 if the request is acceptable to the limiter, else a 403 - is returned with a relevant header indicating when the request - *will* succeed. - """ - if request.method != "POST": - raise webob.exc.HTTPMethodNotAllowed() - - try: - info = dict(jsonutils.loads(request.body)) - except ValueError: - raise webob.exc.HTTPBadRequest() - - username = request.path_info_pop() - verb = info.get("verb") - path = info.get("path") - - delay, error = self._limiter.check_for_delay(verb, path, username) - - if delay: - headers = {"X-Wait-Seconds": "%.2f" % delay} - return webob.exc.HTTPForbidden(headers=headers, explanation=error) - else: - return webob.exc.HTTPNoContent() - - -class WsgiLimiterProxy(object): - """Rate-limit requests based on answers from a remote source.""" - - def __init__(self, limiter_address): - """Initialize the new `WsgiLimiterProxy`. - - @param limiter_address: IP/port combination of where to request limit - """ - self.limiter_address = limiter_address - - def check_for_delay(self, verb, path, username=None): - body = jsonutils.dump_as_bytes({"verb": verb, "path": path}) - headers = {"Content-Type": "application/json"} - - conn = http_client.HTTPConnection(self.limiter_address) - - if username: - conn.request("POST", "/%s" % (username), body, headers) - else: - conn.request("POST", "/", body, headers) - - resp = conn.getresponse() - - if http_client.OK >= resp.status < http_client.MULTIPLE_CHOICES: - return None, None - - return resp.getheader("X-Wait-Seconds"), resp.read() or None - - # Note: This method gets called before the class is instantiated, - # so this must be either a static method or a class method. It is - # used to develop a list of limits to feed to the constructor. - # This implementation returns an empty list, since all limit - # decisions are made by a remote server. - @staticmethod - def parse_limits(limits): - """Ignore a limits string--simply doesn't apply for the limit proxy. - - @return: Empty list. - """ - - return [] diff --git a/cinder/api/v2/router.py b/cinder/api/v2/router.py deleted file mode 100644 index df442f558..000000000 --- a/cinder/api/v2/router.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2011 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for OpenStack Volume API. -""" - -from cinder.api import extensions -import cinder.api.openstack -from cinder.api.v2 import limits -from cinder.api.v2 import snapshot_metadata -from cinder.api.v2 import snapshots -from cinder.api.v2 import types -from cinder.api.v2 import volume_metadata -from cinder.api.v2 import volumes -from cinder.api import versions - - -class APIRouter(cinder.api.openstack.APIRouter): - """Routes requests on the API to the appropriate controller and method.""" - ExtensionManager = extensions.ExtensionManager - - def _setup_routes(self, mapper, ext_mgr): - self.resources['versions'] = versions.create_resource() - mapper.connect("versions", "/", - controller=self.resources['versions'], - action='index') - - mapper.redirect("", "/") - - self.resources['volumes'] = volumes.create_resource(ext_mgr) - mapper.resource("volume", "volumes", - controller=self.resources['volumes'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['types'] = types.create_resource() - mapper.resource("type", "types", - controller=self.resources['types'], - member={'action': 'POST'}) - - self.resources['snapshots'] = snapshots.create_resource(ext_mgr) - mapper.resource("snapshot", "snapshots", - controller=self.resources['snapshots'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['limits'] = limits.create_resource() - mapper.resource("limit", "limits", - controller=self.resources['limits']) - - self.resources['snapshot_metadata'] = \ - snapshot_metadata.create_resource() - snapshot_metadata_controller = self.resources['snapshot_metadata'] - - mapper.resource("snapshot_metadata", "metadata", - controller=snapshot_metadata_controller, - parent_resource=dict(member_name='snapshot', - collection_name='snapshots')) - - mapper.connect("metadata", - "/{project_id}/snapshots/{snapshot_id}/metadata", - controller=snapshot_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) - - self.resources['volume_metadata'] = \ - volume_metadata.create_resource() - volume_metadata_controller = self.resources['volume_metadata'] - - mapper.resource("volume_metadata", "metadata", - controller=volume_metadata_controller, - parent_resource=dict(member_name='volume', - collection_name='volumes')) - - mapper.connect("metadata", - "/{project_id}/volumes/{volume_id}/metadata", - controller=volume_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) diff --git a/cinder/api/v2/snapshot_metadata.py b/cinder/api/v2/snapshot_metadata.py deleted file mode 100644 index 8a4da4830..000000000 --- a/cinder/api/v2/snapshot_metadata.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves import http_client -import webob -from webob import exc - -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder import volume - - -class Controller(wsgi.Controller): - """The snapshot metadata API controller for the OpenStack API.""" - - def __init__(self): - self.volume_api = volume.API() - super(Controller, self).__init__() - - def _get_metadata(self, context, snapshot_id): - return self._get_snapshot_and_metadata(context, snapshot_id)[1] - - def _get_snapshot_and_metadata(self, context, snapshot_id): - # Not found exception will be handled at the wsgi level - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - meta = self.volume_api.get_snapshot_metadata(context, snapshot) - return snapshot, meta - - def index(self, req, snapshot_id): - """Returns the list of metadata for a given snapshot.""" - context = req.environ['cinder.context'] - return {'metadata': self._get_metadata(context, snapshot_id)} - - def create(self, req, snapshot_id, body): - self.assert_valid_body(body, 'metadata') - context = req.environ['cinder.context'] - metadata = body['metadata'] - - new_metadata = self._update_snapshot_metadata(context, - snapshot_id, - metadata, - delete=False) - - return {'metadata': new_metadata} - - def update(self, req, snapshot_id, id, body): - self.assert_valid_body(body, 'meta') - meta_item = body['meta'] - - if id not in meta_item: - expl = _('Request body and URI mismatch') - raise exc.HTTPBadRequest(explanation=expl) - - if len(meta_item) > 1: - expl = _('Request body contains too many items') - raise exc.HTTPBadRequest(explanation=expl) - - context = req.environ['cinder.context'] - self._update_snapshot_metadata(context, - snapshot_id, - meta_item, - delete=False) - - return {'meta': meta_item} - - def update_all(self, req, snapshot_id, body): - self.assert_valid_body(body, 'metadata') - context = req.environ['cinder.context'] - metadata = body['metadata'] - - new_metadata = self._update_snapshot_metadata(context, - snapshot_id, - metadata, - delete=True) - - return {'metadata': new_metadata} - - def _update_snapshot_metadata(self, context, - snapshot_id, metadata, - delete=False): - try: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - return self.volume_api.update_snapshot_metadata(context, - snapshot, - metadata, - delete) - # Not found exception will be handled at the wsgi level - except (ValueError, AttributeError): - msg = _("Malformed request body") - raise exc.HTTPBadRequest(explanation=msg) - - except exception.InvalidVolumeMetadata as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - except exception.InvalidVolumeMetadataSize as error: - raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) - - def show(self, req, snapshot_id, id): - """Return a single metadata item.""" - context = req.environ['cinder.context'] - data = self._get_metadata(context, snapshot_id) - - try: - return {'meta': {id: data[id]}} - except KeyError: - raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, - metadata_key=id) - - def delete(self, req, snapshot_id, id): - """Deletes an existing metadata.""" - context = req.environ['cinder.context'] - - snapshot, metadata = self._get_snapshot_and_metadata(context, - snapshot_id) - - if id not in metadata: - raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, - metadata_key=id) - - self.volume_api.delete_snapshot_metadata(context, snapshot, id) - return webob.Response(status_int=http_client.OK) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/snapshots.py b/cinder/api/v2/snapshots.py deleted file mode 100644 index 903096a84..000000000 --- a/cinder/api/v2/snapshots.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes snapshots api.""" - -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import strutils -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.views import snapshots as snapshot_views -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder import volume -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - - -class SnapshotsController(wsgi.Controller): - """The Snapshots API controller for the OpenStack API.""" - - _view_builder_class = snapshot_views.ViewBuilder - - def __init__(self, ext_mgr=None): - self.volume_api = volume.API() - self.ext_mgr = ext_mgr - super(SnapshotsController, self).__init__() - - def show(self, req, id): - """Return data about the given snapshot.""" - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - snapshot = self.volume_api.get_snapshot(context, id) - req.cache_db_snapshot(snapshot) - - return self._view_builder.detail(req, snapshot) - - def delete(self, req, id): - """Delete a snapshot.""" - context = req.environ['cinder.context'] - - LOG.info("Delete snapshot with id: %s", id) - - # Not found exception will be handled at the wsgi level - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.delete_snapshot(context, snapshot) - - return webob.Response(status_int=http_client.ACCEPTED) - - def index(self, req): - """Returns a summary list of snapshots.""" - return self._items(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of snapshots.""" - return self._items(req, is_detail=True) - - def _items(self, req, is_detail=True): - """Returns a list of snapshots, transformed through view builder.""" - context = req.environ['cinder.context'] - - # Pop out non search_opts and create local variables - search_opts = req.GET.copy() - sort_keys, sort_dirs = common.get_sort_params(search_opts) - marker, limit, offset = common.get_pagination_params(search_opts) - - # Filter out invalid options - allowed_search_options = ('status', 'volume_id', 'name') - utils.remove_invalid_filter_options(context, search_opts, - allowed_search_options) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in search_opts: - search_opts['display_name'] = search_opts.pop('name') - - snapshots = self.volume_api.get_all_snapshots(context, - search_opts=search_opts, - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - offset=offset) - - req.cache_db_snapshots(snapshots.objects) - - if is_detail: - snapshots = self._view_builder.detail_list(req, snapshots.objects) - else: - snapshots = self._view_builder.summary_list(req, snapshots.objects) - return snapshots - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Creates a new snapshot.""" - kwargs = {} - context = req.environ['cinder.context'] - - self.assert_valid_body(body, 'snapshot') - - snapshot = body['snapshot'] - kwargs['metadata'] = snapshot.get('metadata', None) - - try: - volume_id = snapshot['volume_id'] - except KeyError: - msg = _("'volume_id' must be specified") - raise exc.HTTPBadRequest(explanation=msg) - - volume = self.volume_api.get(context, volume_id) - force = snapshot.get('force', False) - LOG.info("Create snapshot from volume %s", volume_id) - self.validate_name_and_description(snapshot) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in snapshot: - snapshot['display_name'] = snapshot.pop('name') - - try: - force = strutils.bool_from_string(force, strict=True) - except ValueError as error: - err_msg = encodeutils.exception_to_unicode(error) - msg = _("Invalid value for 'force': '%s'") % err_msg - raise exception.InvalidParameterValue(err=msg) - - if force: - new_snapshot = self.volume_api.create_snapshot_force( - context, - volume, - snapshot.get('display_name'), - snapshot.get('description'), - **kwargs) - else: - new_snapshot = self.volume_api.create_snapshot( - context, - volume, - snapshot.get('display_name'), - snapshot.get('description'), - **kwargs) - req.cache_db_snapshot(new_snapshot) - - return self._view_builder.detail(req, new_snapshot) - - def update(self, req, id, body): - """Update a snapshot.""" - context = req.environ['cinder.context'] - - if not body: - msg = _("Missing request body") - raise exc.HTTPBadRequest(explanation=msg) - - if 'snapshot' not in body: - msg = (_("Missing required element '%s' in request body") % - 'snapshot') - raise exc.HTTPBadRequest(explanation=msg) - - snapshot = body['snapshot'] - update_dict = {} - - valid_update_keys = ( - 'name', - 'description', - 'display_name', - 'display_description', - ) - self.validate_name_and_description(snapshot) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in snapshot: - snapshot['display_name'] = snapshot.pop('name') - - # NOTE(thingee): v2 API allows description instead of - # display_description - if 'description' in snapshot: - snapshot['display_description'] = snapshot.pop('description') - - for key in valid_update_keys: - if key in snapshot: - update_dict[key] = snapshot[key] - - # Not found exception will be handled at the wsgi level - snapshot = self.volume_api.get_snapshot(context, id) - volume_utils.notify_about_snapshot_usage(context, snapshot, - 'update.start') - self.volume_api.update_snapshot(context, snapshot, update_dict) - - snapshot.update(update_dict) - req.cache_db_snapshot(snapshot) - volume_utils.notify_about_snapshot_usage(context, snapshot, - 'update.end') - - return self._view_builder.detail(req, snapshot) - - -def create_resource(ext_mgr): - return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v2/types.py b/cinder/api/v2/types.py deleted file mode 100644 index 8f3a29407..000000000 --- a/cinder/api/v2/types.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume type & volume types extra specs extension.""" - -from oslo_utils import strutils -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v2.views import types as views_types -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import volume_types - - -class VolumeTypesController(wsgi.Controller): - """The volume types API controller for the OpenStack API.""" - - _view_builder_class = views_types.ViewBuilder - - def index(self, req): - """Returns the list of volume types.""" - limited_types = self._get_volume_types(req) - req.cache_resource(limited_types, name='types') - return self._view_builder.index(req, limited_types) - - def show(self, req, id): - """Return a single volume type item.""" - context = req.environ['cinder.context'] - - # get default volume type - if id is not None and id == 'default': - vol_type = volume_types.get_default_volume_type() - if not vol_type: - msg = _("Default volume type can not be found.") - raise exception.VolumeTypeNotFound(message=msg) - req.cache_resource(vol_type, name='types') - else: - # Not found exception will be handled at wsgi level - vol_type = volume_types.get_volume_type(context, id) - req.cache_resource(vol_type, name='types') - - return self._view_builder.show(req, vol_type) - - def _parse_is_public(self, is_public): - """Parse is_public into something usable. - - * True: List public volume types only - * False: List private volume types only - * None: List both public and private volume types - """ - - if is_public is None: - # preserve default value of showing only public types - return True - elif utils.is_none_string(is_public): - return None - else: - try: - return strutils.bool_from_string(is_public, strict=True) - except ValueError: - msg = _('Invalid is_public filter [%s]') % is_public - raise exc.HTTPBadRequest(explanation=msg) - - def _get_volume_types(self, req): - """Helper function that returns a list of type dicts.""" - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - # NOTE(wanghao): Currently, we still only support to filter by - # is_public. If we want to filter by more args, we should set params - # to filters. - filters = {} - context = req.environ['cinder.context'] - if context.is_admin: - # Only admin has query access to all volume types - filters['is_public'] = self._parse_is_public( - req.params.get('is_public', None)) - else: - filters['is_public'] = True - utils.remove_invalid_filter_options(context, - filters, - self._get_vol_type_filter_options() - ) - limited_types = volume_types.get_all_types(context, - filters=filters, - marker=marker, limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - offset=offset, - list_result=True) - return limited_types - - def _get_vol_type_filter_options(self): - """Return volume type search options allowed by non-admin.""" - return ['is_public'] - - -def create_resource(): - return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/v2/views/__init__.py b/cinder/api/v2/views/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/v2/views/types.py b/cinder/api/v2/views/types.py deleted file mode 100644 index fbf063111..000000000 --- a/cinder/api/v2/views/types.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# Copyright 2015 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - - def show(self, request, volume_type, brief=False): - """Trim away extraneous volume type attributes.""" - context = request.environ['cinder.context'] - trimmed = dict(id=volume_type.get('id'), - name=volume_type.get('name'), - is_public=volume_type.get('is_public'), - description=volume_type.get('description')) - if common.validate_policy( - context, - 'volume_extension:access_types_extra_specs'): - trimmed['extra_specs'] = volume_type.get('extra_specs') - if common.validate_policy( - context, - 'volume_extension:access_types_qos_specs_id'): - trimmed['qos_specs_id'] = volume_type.get('qos_specs_id') - return trimmed if brief else dict(volume_type=trimmed) - - def index(self, request, volume_types): - """Index over trimmed volume types.""" - volume_types_list = [self.show(request, volume_type, True) - for volume_type in volume_types] - volume_type_links = self._get_collection_links(request, volume_types, - 'types') - volume_types_dict = dict(volume_types=volume_types_list) - if volume_type_links: - volume_types_dict['volume_type_links'] = volume_type_links - return volume_types_dict diff --git a/cinder/api/v2/views/volumes.py b/cinder/api/v2/views/volumes.py deleted file mode 100644 index 8e9139be0..000000000 --- a/cinder/api/v2/views/volumes.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from cinder.api import common -from cinder import group as group_api -from cinder.objects import fields -from cinder.volume import group_types - - -class ViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "volumes" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, volumes, volume_count=None): - """Show a list of volumes without many details.""" - return self._list_view(self.summary, request, volumes, - volume_count) - - def detail_list(self, request, volumes, volume_count=None): - """Detailed view of a list of volumes.""" - return self._list_view(self.detail, request, volumes, - volume_count, - self._collection_name + '/detail') - - def summary(self, request, volume): - """Generic, non-detailed view of a volume.""" - return { - 'volume': { - 'id': volume['id'], - 'name': volume['display_name'], - 'links': self._get_links(request, - volume['id']), - }, - } - - def _get_volume_status(self, volume): - # NOTE(wanghao): for fixing bug 1504007, we introduce 'managing', - # 'error_managing' and 'error_managing_deleting' status into managing - # process, but still expose 'creating' and 'error' and 'deleting' - # status to user for API compatibility. - status_map = { - 'managing': 'creating', - 'error_managing': 'error', - 'error_managing_deleting': 'deleting', - } - vol_status = volume.get('status') - return status_map.get(vol_status, vol_status) - - def detail(self, request, volume): - """Detailed view of a single volume.""" - volume_ref = { - 'volume': { - 'id': volume.get('id'), - 'status': self._get_volume_status(volume), - 'size': volume.get('size'), - 'availability_zone': volume.get('availability_zone'), - 'created_at': volume.get('created_at'), - 'updated_at': volume.get('updated_at'), - 'attachments': self._get_attachments(volume), - 'name': volume.get('display_name'), - 'description': volume.get('display_description'), - 'volume_type': self._get_volume_type(volume), - 'snapshot_id': volume.get('snapshot_id'), - 'source_volid': volume.get('source_volid'), - 'metadata': self._get_volume_metadata(volume), - 'links': self._get_links(request, volume['id']), - 'user_id': volume.get('user_id'), - 'bootable': six.text_type(volume.get('bootable')).lower(), - 'encrypted': self._is_volume_encrypted(volume), - 'replication_status': volume.get('replication_status'), - 'consistencygroup_id': volume.get('consistencygroup_id'), - 'multiattach': volume.get('multiattach'), - } - } - if request.environ['cinder.context'].is_admin: - volume_ref['volume']['migration_status'] = ( - volume.get('migration_status')) - - # NOTE(xyang): Display group_id as consistencygroup_id in detailed - # view of the volume if group is converted from cg. - group_id = volume.get('group_id') - if group_id is not None: - # Not found exception will be handled at the wsgi level - ctxt = request.environ['cinder.context'] - grp = group_api.API().get(ctxt, group_id) - cgsnap_type = group_types.get_default_cgsnapshot_type() - if grp.group_type_id == cgsnap_type['id']: - volume_ref['volume']['consistencygroup_id'] = group_id - - return volume_ref - - def _is_volume_encrypted(self, volume): - """Determine if volume is encrypted.""" - return volume.get('encryption_key_id') is not None - - def _get_attachments(self, volume): - """Retrieve the attachments of the volume object.""" - attachments = [] - - if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: - attaches = volume.volume_attachment - for attachment in attaches: - if (attachment.get('attach_status') == - fields.VolumeAttachStatus.ATTACHED): - a = {'id': attachment.get('volume_id'), - 'attachment_id': attachment.get('id'), - 'volume_id': attachment.get('volume_id'), - 'server_id': attachment.get('instance_uuid'), - 'host_name': attachment.get('attached_host'), - 'device': attachment.get('mountpoint'), - 'attached_at': attachment.get('attach_time'), - } - attachments.append(a) - - return attachments - - def _get_volume_metadata(self, volume): - """Retrieve the metadata of the volume object.""" - return volume.metadata - - def _get_volume_type(self, volume): - """Retrieve the type the volume object.""" - if volume['volume_type_id'] and volume.get('volume_type'): - return volume['volume_type']['name'] - else: - return volume['volume_type_id'] - - def _list_view(self, func, request, volumes, volume_count, - coll_name=_collection_name): - """Provide a view for a list of volumes. - - :param func: Function used to format the volume data - :param request: API request - :param volumes: List of volumes in dictionary format - :param volume_count: Length of the original list of volumes - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: Volume data in dictionary format - """ - volumes_list = [func(request, volume)['volume'] for volume in volumes] - volumes_links = self._get_collection_links(request, - volumes, - coll_name, - volume_count) - volumes_dict = dict(volumes=volumes_list) - - if volumes_links: - volumes_dict['volumes_links'] = volumes_links - - return volumes_dict diff --git a/cinder/api/v2/volume_metadata.py b/cinder/api/v2/volume_metadata.py deleted file mode 100644 index 5799494f6..000000000 --- a/cinder/api/v2/volume_metadata.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves import http_client -import webob - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ -from cinder import volume - - -class Controller(wsgi.Controller): - """The volume metadata API controller for the OpenStack API.""" - - def __init__(self): - self.volume_api = volume.API() - super(Controller, self).__init__() - - def _get_metadata(self, context, volume_id): - # The metadata is at the second position of the tuple returned - # from _get_volume_and_metadata - return self._get_volume_and_metadata(context, volume_id)[1] - - def _get_volume_and_metadata(self, context, volume_id): - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, volume_id) - meta = self.volume_api.get_volume_metadata(context, volume) - return (volume, meta) - - def index(self, req, volume_id): - """Returns the list of metadata for a given volume.""" - context = req.environ['cinder.context'] - return {'metadata': self._get_metadata(context, volume_id)} - - def create(self, req, volume_id, body): - self.assert_valid_body(body, 'metadata') - context = req.environ['cinder.context'] - metadata = body['metadata'] - - new_metadata = self._update_volume_metadata(context, volume_id, - metadata, delete=False, - use_create=True) - return {'metadata': new_metadata} - - def update(self, req, volume_id, id, body): - self.assert_valid_body(body, 'meta') - meta_item = body['meta'] - - if id not in meta_item: - expl = _('Request body and URI mismatch') - raise webob.exc.HTTPBadRequest(explanation=expl) - - if len(meta_item) > 1: - expl = _('Request body contains too many items') - raise webob.exc.HTTPBadRequest(explanation=expl) - - context = req.environ['cinder.context'] - self._update_volume_metadata(context, - volume_id, - meta_item, - delete=False) - - return {'meta': meta_item} - - def update_all(self, req, volume_id, body): - self.assert_valid_body(body, 'metadata') - metadata = body['metadata'] - context = req.environ['cinder.context'] - - new_metadata = self._update_volume_metadata(context, - volume_id, - metadata, - delete=True) - - return {'metadata': new_metadata} - - def _update_volume_metadata(self, context, volume_id, metadata, - delete=False, use_create=False): - try: - volume = self.volume_api.get(context, volume_id) - if use_create: - return self.volume_api.create_volume_metadata(context, volume, - metadata) - else: - return self.volume_api.update_volume_metadata( - context, volume, metadata, delete, - meta_type=common.METADATA_TYPES.user) - # Not found exception will be handled at the wsgi level - except (ValueError, AttributeError): - msg = _("Malformed request body") - raise webob.exc.HTTPBadRequest(explanation=msg) - - except exception.InvalidVolumeMetadata as error: - raise webob.exc.HTTPBadRequest(explanation=error.msg) - - except exception.InvalidVolumeMetadataSize as error: - raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) - - def show(self, req, volume_id, id): - """Return a single metadata item.""" - context = req.environ['cinder.context'] - data = self._get_metadata(context, volume_id) - - try: - return {'meta': {id: data[id]}} - except KeyError: - raise exception.VolumeMetadataNotFound(volume_id=volume_id, - metadata_key=id) - - def delete(self, req, volume_id, id): - """Deletes an existing metadata.""" - context = req.environ['cinder.context'] - - volume, metadata = self._get_volume_and_metadata(context, volume_id) - - if id not in metadata: - raise exception.VolumeMetadataNotFound(volume_id=volume_id, - metadata_key=id) - - # Not found exception will be handled at the wsgi level - self.volume_api.delete_volume_metadata( - context, - volume, - id, - meta_type=common.METADATA_TYPES.user) - return webob.Response(status_int=http_client.OK) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py deleted file mode 100644 index cb69ce394..000000000 --- a/cinder/api/v2/volumes.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes api.""" - - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v2.views import volumes as volume_views -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ -from cinder.image import glance -from cinder import objects -from cinder import utils -from cinder import volume as cinder_volume -from cinder.volume import utils as volume_utils - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class VolumeController(wsgi.Controller): - """The Volumes API controller for the OpenStack API.""" - - _view_builder_class = volume_views.ViewBuilder - - def __init__(self, ext_mgr): - self.volume_api = cinder_volume.API() - self.group_api = group_api.API() - self.ext_mgr = ext_mgr - super(VolumeController, self).__init__() - - def show(self, req, id): - """Return data about the given volume.""" - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - vol = self.volume_api.get(context, id, viewable_admin_meta=True) - req.cache_db_volume(vol) - - utils.add_visible_admin_metadata(vol) - - return self._view_builder.detail(req, vol) - - def delete(self, req, id): - """Delete a volume.""" - context = req.environ['cinder.context'] - - cascade = utils.get_bool_param('cascade', req.params) - - LOG.info("Delete volume with id: %s", id) - - # Not found exception will be handled at the wsgi level - volume = self.volume_api.get(context, id) - self.volume_api.delete(context, volume, cascade=cascade) - return webob.Response(status_int=http_client.ACCEPTED) - - def index(self, req): - """Returns a summary list of volumes.""" - return self._get_volumes(req, is_detail=False) - - def detail(self, req): - """Returns a detailed list of volumes.""" - return self._get_volumes(req, is_detail=True) - - def _get_volumes(self, req, is_detail): - """Returns a list of volumes, transformed through view builder.""" - - context = req.environ['cinder.context'] - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - # NOTE(wanghao): Always removing glance_metadata since we support it - # only in API version >= 3.4. - filters.pop('glance_metadata', None) - utils.remove_invalid_filter_options(context, - filters, - self._get_volume_filter_options()) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in sort_keys: - sort_keys[sort_keys.index('name')] = 'display_name' - - if 'name' in filters: - filters['display_name'] = filters.pop('name') - - self.volume_api.check_volume_filters(filters) - volumes = self.volume_api.get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - viewable_admin_meta=True, - offset=offset) - - for volume in volumes: - utils.add_visible_admin_metadata(volume) - - req.cache_db_volumes(volumes.objects) - - if is_detail: - volumes = self._view_builder.detail_list(req, volumes) - else: - volumes = self._view_builder.summary_list(req, volumes) - return volumes - - def _image_uuid_from_ref(self, image_ref, context): - # If the image ref was generated by nova api, strip image_ref - # down to an id. - image_uuid = None - try: - image_uuid = image_ref.split('/').pop() - except AttributeError: - msg = _("Invalid imageRef provided.") - raise exc.HTTPBadRequest(explanation=msg) - - image_service = glance.get_default_image_service() - - # First see if this is an actual image ID - if uuidutils.is_uuid_like(image_uuid): - try: - image = image_service.show(context, image_uuid) - if 'id' in image: - return image['id'] - except Exception: - # Pass and see if there is a matching image name - pass - - # Could not find by ID, check if it is an image name - try: - params = {'filters': {'name': image_ref}} - images = list(image_service.detail(context, **params)) - if len(images) > 1: - msg = _("Multiple matches found for '%s', use an ID to be more" - " specific.") % image_ref - raise exc.HTTPConflict(explanation=msg) - for img in images: - return img['id'] - except exc.HTTPConflict: - raise - except Exception: - # Pass the other exception and let default not found error - # handling take care of it - pass - - msg = _("Invalid image identifier or unable to " - "access requested image.") - raise exc.HTTPBadRequest(explanation=msg) - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Creates a new volume.""" - self.assert_valid_body(body, 'volume') - - LOG.debug('Create volume request body: %s', body) - context = req.environ['cinder.context'] - volume = body['volume'] - - kwargs = {} - self.validate_name_and_description(volume) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in volume: - volume['display_name'] = volume.pop('name') - - # NOTE(thingee): v2 API allows description instead of - # display_description - if 'description' in volume: - volume['display_description'] = volume.pop('description') - - if 'image_id' in volume: - volume['imageRef'] = volume.pop('image_id') - - req_volume_type = volume.get('volume_type', None) - if req_volume_type: - # Not found exception will be handled at the wsgi level - kwargs['volume_type'] = ( - objects.VolumeType.get_by_name_or_id(context, req_volume_type)) - - kwargs['metadata'] = volume.get('metadata', None) - - snapshot_id = volume.get('snapshot_id') - if snapshot_id is not None: - if not uuidutils.is_uuid_like(snapshot_id): - msg = _("Snapshot ID must be in UUID form.") - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['snapshot'] = self.volume_api.get_snapshot(context, - snapshot_id) - else: - kwargs['snapshot'] = None - - source_volid = volume.get('source_volid') - if source_volid is not None: - if not uuidutils.is_uuid_like(source_volid): - msg = _("Source volume ID '%s' must be a " - "valid UUID.") % source_volid - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['source_volume'] = \ - self.volume_api.get_volume(context, - source_volid) - else: - kwargs['source_volume'] = None - - source_replica = volume.get('source_replica') - if source_replica is not None: - if not uuidutils.is_uuid_like(source_replica): - msg = _("Source replica ID '%s' must be a " - "valid UUID") % source_replica - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - src_vol = self.volume_api.get_volume(context, - source_replica) - if src_vol['replication_status'] == 'disabled': - explanation = _('source volume id:%s is not' - ' replicated') % source_replica - raise exc.HTTPBadRequest(explanation=explanation) - kwargs['source_replica'] = src_vol - else: - kwargs['source_replica'] = None - - kwargs['group'] = None - kwargs['consistencygroup'] = None - consistencygroup_id = volume.get('consistencygroup_id') - if consistencygroup_id is not None: - if not uuidutils.is_uuid_like(consistencygroup_id): - msg = _("Consistency group ID '%s' must be a " - "valid UUID.") % consistencygroup_id - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['group'] = self.group_api.get(context, consistencygroup_id) - - size = volume.get('size', None) - if size is None and kwargs['snapshot'] is not None: - size = kwargs['snapshot']['volume_size'] - elif size is None and kwargs['source_volume'] is not None: - size = kwargs['source_volume']['size'] - elif size is None and kwargs['source_replica'] is not None: - size = kwargs['source_replica']['size'] - - LOG.info("Create volume of %s GB", size) - - if self.ext_mgr.is_loaded('os-image-create'): - image_ref = volume.get('imageRef') - if image_ref is not None: - image_uuid = self._image_uuid_from_ref(image_ref, context) - kwargs['image_id'] = image_uuid - - kwargs['availability_zone'] = volume.get('availability_zone', None) - kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) - kwargs['multiattach'] = utils.get_bool_param('multiattach', volume) - - new_volume = self.volume_api.create(context, - size, - volume.get('display_name'), - volume.get('display_description'), - **kwargs) - - retval = self._view_builder.detail(req, new_volume) - - return retval - - def _get_volume_filter_options(self): - """Return volume search options allowed by non-admin.""" - return CONF.query_volume_filters - - def update(self, req, id, body): - """Update a volume.""" - context = req.environ['cinder.context'] - - if not body: - msg = _("Missing request body") - raise exc.HTTPBadRequest(explanation=msg) - - if 'volume' not in body: - msg = _("Missing required element '%s' in request body") % 'volume' - raise exc.HTTPBadRequest(explanation=msg) - - volume = body['volume'] - update_dict = {} - - valid_update_keys = ( - 'name', - 'description', - 'display_name', - 'display_description', - 'metadata', - ) - - for key in valid_update_keys: - if key in volume: - update_dict[key] = volume[key] - - self.validate_name_and_description(update_dict) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in update_dict: - update_dict['display_name'] = update_dict.pop('name') - - # NOTE(thingee): v2 API allows description instead of - # display_description - if 'description' in update_dict: - update_dict['display_description'] = update_dict.pop('description') - - # Not found and Invalid exceptions will be handled at the wsgi level - try: - volume = self.volume_api.get(context, id, viewable_admin_meta=True) - volume_utils.notify_about_volume_usage(context, volume, - 'update.start') - self.volume_api.update(context, volume, update_dict) - except exception.InvalidVolumeMetadataSize as error: - raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) - - volume.update(update_dict) - - utils.add_visible_admin_metadata(volume) - - volume_utils.notify_about_volume_usage(context, volume, - 'update.end') - - return self._view_builder.detail(req, volume) - - -def create_resource(ext_mgr): - return wsgi.Resource(VolumeController(ext_mgr)) diff --git a/cinder/api/v3/__init__.py b/cinder/api/v3/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/v3/attachments.py b/cinder/api/v3/attachments.py deleted file mode 100644 index 6dde935a1..000000000 --- a/cinder/api/v3/attachments.py +++ /dev/null @@ -1,270 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes attachments API.""" - -from oslo_log import log as logging -import webob - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import attachments as attachment_views -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import utils -from cinder.volume import api as volume_api - - -LOG = logging.getLogger(__name__) -API_VERSION = '3.27' - - -class AttachmentsController(wsgi.Controller): - """The Attachments API controller for the OpenStack API.""" - - _view_builder_class = attachment_views.ViewBuilder - - allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'} - - def __init__(self, ext_mgr=None): - """Initialize controller class.""" - self.volume_api = volume_api.API() - self.ext_mgr = ext_mgr - super(AttachmentsController, self).__init__() - - @wsgi.Controller.api_version(API_VERSION) - def show(self, req, id): - """Return data about the given attachment.""" - context = req.environ['cinder.context'] - attachment = objects.VolumeAttachment.get_by_id(context, id) - return attachment_views.ViewBuilder.detail(attachment) - - @wsgi.Controller.api_version(API_VERSION) - def index(self, req): - """Return a summary list of attachments.""" - attachments = self._items(req) - return attachment_views.ViewBuilder.list(attachments) - - @wsgi.Controller.api_version(API_VERSION) - def detail(self, req): - """Return a detailed list of attachments.""" - attachments = self._items(req) - return attachment_views.ViewBuilder.list(attachments, detail=True) - - @common.process_general_filtering('attachment') - def _process_attachment_filtering(self, context=None, filters=None, - req_version=None): - utils.remove_invalid_filter_options(context, filters, - self.allowed_filters) - - def _items(self, req): - """Return a list of attachments, transformed through view builder.""" - context = req.environ['cinder.context'] - req_version = req.api_version_request - - # Pop out non search_opts and create local variables - search_opts = req.GET.copy() - sort_keys, sort_dirs = common.get_sort_params(search_opts) - marker, limit, offset = common.get_pagination_params(search_opts) - - self._process_attachment_filtering(context=context, - filters=search_opts, - req_version=req_version) - if search_opts.get('instance_id', None): - search_opts['instance_uuid'] = search_opts.pop('instance_id', None) - if context.is_admin and 'all_tenants' in search_opts: - del search_opts['all_tenants'] - return objects.VolumeAttachmentList.get_all( - context, search_opts=search_opts, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs) - else: - return objects.VolumeAttachmentList.get_all_by_project( - context, context.project_id, search_opts=search_opts, - marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, - sort_direction=sort_dirs) - - @wsgi.Controller.api_version(API_VERSION) - @wsgi.response(202) - def create(self, req, body): - """Create an attachment. - - This method can be used to create an empty attachment (reserve) or to - create and initialize a volume attachment based on the provided input - parameters. - - If the caller does not yet have the connector information but needs to - reserve an attachment for the volume (ie Nova BootFromVolume) the - create can be called with just the volume-uuid and the server - identifier. This will reserve an attachment, mark the volume as - reserved and prevent any new attachment_create calls from being made - until the attachment is updated (completed). - - The alternative is that the connection can be reserved and initialized - all at once with a single call if the caller has all of the required - information (connector data) at the time of the call. - - NOTE: In Nova terms server == instance, the server_id parameter - referenced below is the UUID of the Instance, for non-nova consumers - this can be a server UUID or some other arbitrary unique identifier. - - Expected format of the input parameter 'body': - - .. code-block:: json - - { - "attachment": - { - "volume_uuid": "volume-uuid", - "instance_uuid": "nova-server-uuid", - "connector": "null|" - } - } - - Example connector: - - .. code-block:: json - - { - "connector": - { - "initiator": "iqn.1993-08.org.debian:01:cad181614cec", - "ip":"192.168.1.20", - "platform": "x86_64", - "host": "tempest-1", - "os_type": "linux2", - "multipath": false, - "mountpoint": "/dev/vdb", - "mode": "null|rw|ro" - } - } - - NOTE all that's required for a reserve is volume_uuid - and a instance_uuid. - - returns: A summary view of the attachment object - """ - context = req.environ['cinder.context'] - instance_uuid = body['attachment'].get('instance_uuid', None) - if not instance_uuid: - raise webob.exc.HTTPBadRequest( - explanation=_("Must specify 'instance_uuid' " - "to create attachment.")) - - volume_uuid = body['attachment'].get('volume_uuid', None) - if not volume_uuid: - raise webob.exc.HTTPBadRequest( - explanation=_("Must specify 'volume_uuid' " - "to create attachment.")) - - volume_ref = objects.Volume.get_by_id( - context, - volume_uuid) - connector = body['attachment'].get('connector', None) - err_msg = None - try: - attachment_ref = ( - self.volume_api.attachment_create(context, - volume_ref, - instance_uuid, - connector=connector)) - except exception.NotAuthorized: - raise - except exception.CinderException as ex: - err_msg = _( - "Unable to create attachment for volume (%s).") % ex.msg - LOG.exception(err_msg) - except Exception as ex: - err_msg = _("Unable to create attachment for volume.") - LOG.exception(err_msg) - finally: - if err_msg: - raise webob.exc.HTTPInternalServerError(explanation=err_msg) - return attachment_views.ViewBuilder.detail(attachment_ref) - - @wsgi.Controller.api_version(API_VERSION) - def update(self, req, id, body): - """Update an attachment record. - - Update a reserved attachment record with connector information and set - up the appropriate connection_info from the driver. - - Expected format of the input parameter 'body': - - .. code-block:: json - { - "attachment": - { - "connector": - { - "initiator": "iqn.1993-08.org.debian:01:cad181614cec", - "ip":"192.168.1.20", - "platform": "x86_64", - "host": "tempest-1", - "os_type": "linux2", - "multipath": False, - "mountpoint": "/dev/vdb", - "mode": None|"rw"|"ro", - } - } - } - """ - context = req.environ['cinder.context'] - attachment_ref = ( - objects.VolumeAttachment.get_by_id(context, id)) - connector = body['attachment'].get('connector', None) - if not connector: - raise webob.exc.HTTPBadRequest( - explanation=_("Must specify 'connector' " - "to update attachment.")) - err_msg = None - try: - attachment_ref = ( - self.volume_api.attachment_update(context, - attachment_ref, - connector)) - except exception.NotAuthorized: - raise - except exception.CinderException as ex: - err_msg = ( - _("Unable to update attachment.(%s).") % ex.msg) - LOG.exception(err_msg) - except Exception: - err_msg = _("Unable to update the attachment.") - LOG.exception(err_msg) - finally: - if err_msg: - raise webob.exc.HTTPInternalServerError(explanation=err_msg) - - # TODO(jdg): Test this out some more, do we want to return and object - # or a dict? - return attachment_views.ViewBuilder.detail(attachment_ref) - - @wsgi.Controller.api_version(API_VERSION) - def delete(self, req, id): - """Delete an attachment. - - Disconnects/Deletes the specified attachment, returns a list of any - known shared attachment-id's for the effected backend device. - - returns: A summary list of any attachments sharing this connection - - """ - context = req.environ['cinder.context'] - attachment = objects.VolumeAttachment.get_by_id(context, id) - attachments = self.volume_api.attachment_delete(context, attachment) - return attachment_views.ViewBuilder.list(attachments) - - -def create_resource(ext_mgr): - """Create the wsgi resource for this controller.""" - return wsgi.Resource(AttachmentsController(ext_mgr)) diff --git a/cinder/api/v3/backups.py b/cinder/api/v3/backups.py deleted file mode 100644 index c75308f5a..000000000 --- a/cinder/api/v3/backups.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2016 Intel, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The backups V3 API.""" - -from oslo_log import log as logging -from webob import exc - -from cinder.api.contrib import backups as backups_v2 -from cinder.api.openstack import wsgi -from cinder.backup import api as backup_api -from cinder import exception -from cinder.i18n import _ - - -BACKUP_UPDATE_MICRO_VERSION = '3.9' -BACKUP_TENANT_MICRO_VERSION = '3.18' - -LOG = logging.getLogger(__name__) - - -class BackupsController(backups_v2.BackupsController): - """The backups API controller for the OpenStack API V3.""" - - @wsgi.Controller.api_version(BACKUP_UPDATE_MICRO_VERSION) - def update(self, req, id, body): - """Update a backup.""" - context = req.environ['cinder.context'] - self.assert_valid_body(body, 'backup') - - backup_update = body['backup'] - - self.validate_name_and_description(backup_update) - update_dict = {} - if 'name' in backup_update: - update_dict['display_name'] = backup_update.pop('name') - if 'description' in backup_update: - update_dict['display_description'] = ( - backup_update.pop('description')) - # Check no unsupported fields. - if backup_update: - msg = _("Unsupported fields %s.") % (", ".join(backup_update)) - raise exc.HTTPBadRequest(explanation=msg) - - new_backup = self.backup_api.update(context, id, update_dict) - - return self._view_builder.summary(req, new_backup) - - def _add_backup_project_attribute(self, req, backup): - db_backup = req.get_db_backup(backup['id']) - key = "os-backup-project-attr:project_id" - backup[key] = db_backup['project_id'] - - def show(self, req, id): - """Return data about the given backup.""" - LOG.debug('Show backup with id %s.', id) - context = req.environ['cinder.context'] - req_version = req.api_version_request - - # Not found exception will be handled at the wsgi level - backup = self.backup_api.get(context, backup_id=id) - req.cache_db_backup(backup) - - resp_backup = self._view_builder.detail(req, backup) - if req_version.matches(BACKUP_TENANT_MICRO_VERSION): - try: - backup_api.check_policy(context, 'backup_project_attribute') - self._add_backup_project_attribute(req, resp_backup['backup']) - except exception.PolicyNotAuthorized: - pass - return resp_backup - - def detail(self, req): - resp_backup = super(BackupsController, self).detail(req) - context = req.environ['cinder.context'] - req_version = req.api_version_request - - if req_version.matches(BACKUP_TENANT_MICRO_VERSION): - try: - backup_api.check_policy(context, 'backup_project_attribute') - for bak in resp_backup['backups']: - self._add_backup_project_attribute(req, bak) - except exception.PolicyNotAuthorized: - pass - return resp_backup - - def _convert_sort_name(self, req_version, sort_keys): - if req_version.matches("3.37") and 'name' in sort_keys: - sort_keys[sort_keys.index('name')] = 'display_name' - - -def create_resource(): - return wsgi.Resource(BackupsController()) diff --git a/cinder/api/v3/clusters.py b/cinder/api/v3/clusters.py deleted file mode 100644 index 120f1e6a1..000000000 --- a/cinder/api/v3/clusters.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2016 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api.openstack import wsgi -from cinder.api.v3.views import clusters as clusters_view -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import utils - - -CLUSTER_MICRO_VERSION = '3.7' -REPLICATION_DATA_MICRO_VERSION = '3.26' - - -class ClusterController(wsgi.Controller): - allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts', - 'num_down_hosts', 'binary', 'replication_status', - 'frozen', 'active_backend_id'} - replication_fields = {'replication_status', 'frozen', 'active_backend_id'} - - policy_checker = wsgi.Controller.get_policy_checker('clusters') - - @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) - def show(self, req, id, binary='cinder-volume'): - """Return data for a given cluster name with optional binary.""" - # Let the wsgi middleware convert NotAuthorized exceptions - context = self.policy_checker(req, 'get') - # Let the wsgi middleware convert NotFound exceptions - cluster = objects.Cluster.get_by_id(context, None, binary=binary, - name=id, services_summary=True) - replication_data = req.api_version_request.matches( - REPLICATION_DATA_MICRO_VERSION) - return clusters_view.ViewBuilder.detail(cluster, replication_data) - - @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) - def index(self, req): - """Return a non detailed list of all existing clusters. - - Filter by is_up, disabled, num_hosts, and num_down_hosts. - """ - return self._get_clusters(req, detail=False) - - @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) - def detail(self, req): - """Return a detailed list of all existing clusters. - - Filter by is_up, disabled, num_hosts, and num_down_hosts. - """ - return self._get_clusters(req, detail=True) - - def _get_clusters(self, req, detail): - # Let the wsgi middleware convert NotAuthorized exceptions - context = self.policy_checker(req, 'get_all') - replication_data = req.api_version_request.matches( - REPLICATION_DATA_MICRO_VERSION) - filters = dict(req.GET) - allowed = self.allowed_list_keys - if not replication_data: - allowed = allowed.difference(self.replication_fields) - - # Check filters are valid - if not allowed.issuperset(filters): - invalid_keys = set(filters).difference(allowed) - msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys) - raise exception.InvalidInput(reason=msg) - - # Check boolean values - for bool_key in ('disabled', 'is_up'): - if bool_key in filters: - filters[bool_key] = utils.get_bool_param(bool_key, req.GET) - - # For detailed view we need the services summary information - filters['services_summary'] = detail - - clusters = objects.ClusterList.get_all(context, **filters) - return clusters_view.ViewBuilder.list(clusters, detail, - replication_data) - - @wsgi.Controller.api_version(CLUSTER_MICRO_VERSION) - def update(self, req, id, body): - """Enable/Disable scheduling for a cluster.""" - # NOTE(geguileo): This method tries to be consistent with services - # update endpoint API. - - # Let the wsgi middleware convert NotAuthorized exceptions - context = self.policy_checker(req, 'update') - - if id not in ('enable', 'disable'): - raise exception.NotFound(message=_("Unknown action")) - - disabled = id != 'enable' - disabled_reason = self._get_disabled_reason(body) if disabled else None - - if not disabled and disabled_reason: - msg = _("Unexpected 'disabled_reason' found on enable request.") - raise exception.InvalidInput(reason=msg) - - name = body.get('name') - if not name: - raise exception.MissingRequired(element='name') - - binary = body.get('binary', 'cinder-volume') - - # Let wsgi handle NotFound exception - cluster = objects.Cluster.get_by_id(context, None, binary=binary, - name=name) - cluster.disabled = disabled - cluster.disabled_reason = disabled_reason - cluster.save() - - # We return summary data plus the disabled reason - replication_data = req.api_version_request.matches( - REPLICATION_DATA_MICRO_VERSION) - ret_val = clusters_view.ViewBuilder.summary(cluster, replication_data) - ret_val['cluster']['disabled_reason'] = disabled_reason - - return ret_val - - def _get_disabled_reason(self, body): - reason = body.get('disabled_reason') - if reason: - # Let wsgi handle InvalidInput exception - reason = reason.strip() - utils.check_string_length(reason, 'Disabled reason', min_length=1, - max_length=255) - return reason - - -def create_resource(): - return wsgi.Resource(ClusterController()) diff --git a/cinder/api/v3/consistencygroups.py b/cinder/api/v3/consistencygroups.py deleted file mode 100644 index a7e542293..000000000 --- a/cinder/api/v3/consistencygroups.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The consistencygroups V3 API.""" - -from oslo_log import log as logging -from six.moves import http_client -import webob -from webob import exc - -from cinder.api.contrib import consistencygroups as cg_v2 -from cinder.api.openstack import wsgi -from cinder.i18n import _ - -LOG = logging.getLogger(__name__) - - -class ConsistencyGroupsController(cg_v2.ConsistencyGroupsController): - """The ConsistencyGroups API controller for the OpenStack API V3.""" - - def _check_update_parameters_v3(self, req, name, description, add_volumes, - remove_volumes): - allow_empty = req.api_version_request.matches('3.6', None) - if allow_empty: - if (name is None and description is None - and not add_volumes and not remove_volumes): - msg = _("Must specify one or more of the following keys to " - "update: name, description, " - "add_volumes, remove_volumes.") - raise exc.HTTPBadRequest(explanation=msg) - else: - if not (name or description or add_volumes or remove_volumes): - msg = _("Name, description, add_volumes, and remove_volumes " - "can not be all empty in the request body.") - raise exc.HTTPBadRequest(explanation=msg) - return allow_empty - - def update(self, req, id, body): - """Update the consistency group. - - Expected format of the input parameter 'body': - - .. code-block:: json - - { - "consistencygroup": - { - "name": "my_cg", - "description": "My consistency group", - "add_volumes": "volume-uuid-1,volume-uuid-2,...", - "remove_volumes": "volume-uuid-8,volume-uuid-9,..." - } - } - - """ - LOG.debug('Update called for consistency group %s.', id) - if not body: - msg = _("Missing request body.") - raise exc.HTTPBadRequest(explanation=msg) - - self.assert_valid_body(body, 'consistencygroup') - context = req.environ['cinder.context'] - consistencygroup = body.get('consistencygroup', None) - self.validate_name_and_description(consistencygroup) - name = consistencygroup.get('name', None) - description = consistencygroup.get('description', None) - add_volumes = consistencygroup.get('add_volumes', None) - remove_volumes = consistencygroup.get('remove_volumes', None) - - allow_empty = self._check_update_parameters_v3(req, name, - description, - add_volumes, - remove_volumes) - self._update(context, id, name, description, add_volumes, - remove_volumes, allow_empty) - return webob.Response(status_int=http_client.ACCEPTED) - - -def create_resource(): - return wsgi.Resource(ConsistencyGroupsController()) diff --git a/cinder/api/v3/group_snapshots.py b/cinder/api/v3/group_snapshots.py deleted file mode 100644 index f1b18763a..000000000 --- a/cinder/api/v3/group_snapshots.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The group_snapshots API.""" - -from oslo_log import log as logging -import six -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import group_snapshots as group_snapshot_views -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ -from cinder import rpc -from cinder.volume import group_types - -LOG = logging.getLogger(__name__) - -GROUP_SNAPSHOT_API_VERSION = '3.14' - - -class GroupSnapshotsController(wsgi.Controller): - """The group_snapshots API controller for the OpenStack API.""" - - _view_builder_class = group_snapshot_views.ViewBuilder - - def __init__(self): - self.group_snapshot_api = group_api.API() - super(GroupSnapshotsController, self).__init__() - - def _check_default_cgsnapshot_type(self, group_type_id): - if group_types.is_default_cgsnapshot_type(group_type_id): - msg = (_("Group_type %(group_type)s is reserved for migrating " - "CGs to groups. Migrated group snapshots can only be " - "operated by CG snapshot APIs.") - % {'group_type': group_type_id}) - raise exc.HTTPBadRequest(explanation=msg) - - @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) - def show(self, req, id): - """Return data about the given group_snapshot.""" - LOG.debug('show called for member %s', id) - context = req.environ['cinder.context'] - - group_snapshot = self.group_snapshot_api.get_group_snapshot( - context, - group_snapshot_id=id) - - self._check_default_cgsnapshot_type(group_snapshot.group_type_id) - - return self._view_builder.detail(req, group_snapshot) - - @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) - def delete(self, req, id): - """Delete a group_snapshot.""" - LOG.debug('delete called for member %s', id) - context = req.environ['cinder.context'] - - LOG.info('Delete group_snapshot with id: %s', id, context=context) - - try: - group_snapshot = self.group_snapshot_api.get_group_snapshot( - context, - group_snapshot_id=id) - self._check_default_cgsnapshot_type(group_snapshot.group_type_id) - self.group_snapshot_api.delete_group_snapshot(context, - group_snapshot) - except exception.InvalidGroupSnapshot as e: - raise exc.HTTPBadRequest(explanation=six.text_type(e)) - except exception.GroupSnapshotNotFound: - # Not found exception will be handled at the wsgi level - raise - except Exception: - msg = _("Error occurred when deleting group snapshot %s.") % id - LOG.exception(msg) - raise exc.HTTPBadRequest(explanation=msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) - def index(self, req): - """Returns a summary list of group_snapshots.""" - return self._get_group_snapshots(req, is_detail=False) - - @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) - def detail(self, req): - """Returns a detailed list of group_snapshots.""" - return self._get_group_snapshots(req, is_detail=True) - - def _get_group_snapshots(self, req, is_detail): - """Returns a list of group_snapshots through view builder.""" - - context = req.environ['cinder.context'] - req_version = req.api_version_request - filters = marker = limit = offset = sort_keys = sort_dirs = None - if req_version.matches("3.29"): - filters = req.params.copy() - marker, limit, offset = common.get_pagination_params(filters) - sort_keys, sort_dirs = common.get_sort_params(filters) - - if req_version.matches(common.FILTERING_VERSION): - support_like = (True if req_version.matches( - common.LIKE_FILTER_VERSION) else False) - common.reject_invalid_filters(context, filters, 'group_snapshot', - support_like) - - group_snapshots = self.group_snapshot_api.get_all_group_snapshots( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - if is_detail: - group_snapshots = self._view_builder.detail_list(req, - group_snapshots) - else: - group_snapshots = self._view_builder.summary_list(req, - group_snapshots) - - new_group_snapshots = [] - for grp_snap in group_snapshots['group_snapshots']: - try: - # Only show group snapshots not migrated from CG snapshots - self._check_default_cgsnapshot_type(grp_snap['group_type_id']) - if not is_detail: - grp_snap.pop('group_type_id', None) - new_group_snapshots.append(grp_snap) - except exc.HTTPBadRequest: - # Skip migrated group snapshot - pass - - group_snapshots['group_snapshots'] = new_group_snapshots - return group_snapshots - - @wsgi.Controller.api_version(GROUP_SNAPSHOT_API_VERSION) - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new group_snapshot.""" - LOG.debug('Creating new group_snapshot %s', body) - self.assert_valid_body(body, 'group_snapshot') - - context = req.environ['cinder.context'] - group_snapshot = body['group_snapshot'] - self.validate_name_and_description(group_snapshot) - - try: - group_id = group_snapshot['group_id'] - except KeyError: - msg = _("'group_id' must be specified") - raise exc.HTTPBadRequest(explanation=msg) - - group = self.group_snapshot_api.get(context, group_id) - self._check_default_cgsnapshot_type(group.group_type_id) - name = group_snapshot.get('name', None) - description = group_snapshot.get('description', None) - - LOG.info("Creating group_snapshot %(name)s.", - {'name': name}, - context=context) - - try: - new_group_snapshot = self.group_snapshot_api.create_group_snapshot( - context, group, name, description) - except (exception.InvalidGroup, - exception.InvalidGroupSnapshot, - exception.InvalidVolume) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - retval = self._view_builder.summary(req, new_group_snapshot) - - return retval - - @wsgi.Controller.api_version('3.19') - @wsgi.action("reset_status") - def reset_status(self, req, id, body): - return self._reset_status(req, id, body) - - def _reset_status(self, req, id, body): - """Reset status on group snapshots""" - - context = req.environ['cinder.context'] - try: - status = body['reset_status']['status'].lower() - except (TypeError, KeyError): - raise exc.HTTPBadRequest(explanation=_("Must specify 'status'")) - - LOG.debug("Updating group '%(id)s' with " - "'%(update)s'", {'id': id, - 'update': status}) - try: - notifier = rpc.get_notifier('groupSnapshotStatusUpdate') - notifier.info(context, 'groupsnapshots.reset_status.start', - {'id': id, - 'update': status}) - gsnapshot = self.group_snapshot_api.get_group_snapshot(context, id) - - self.group_snapshot_api.reset_group_snapshot_status(context, - gsnapshot, - status) - notifier.info(context, 'groupsnapshots.reset_status.end', - {'id': id, - 'update': status}) - except exception.GroupSnapshotNotFound as error: - # Not found exception will be handled at the wsgi level - notifier.error(context, 'groupsnapshots.reset_status', - {'error_message': error.msg, - 'id': id}) - raise - except exception.InvalidGroupSnapshotStatus as error: - notifier.error(context, 'groupsnapshots.reset_status', - {'error_message': error.msg, - 'id': id}) - raise exc.HTTPBadRequest(explanation=error.msg) - return webob.Response(status_int=http_client.ACCEPTED) - - -def create_resource(): - return wsgi.Resource(GroupSnapshotsController()) diff --git a/cinder/api/v3/group_specs.py b/cinder/api/v3/group_specs.py deleted file mode 100644 index ee89cfe08..000000000 --- a/cinder/api/v3/group_specs.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The group types specs controller""" - -from six.moves import http_client -import webob - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import policy -from cinder import rpc -from cinder import utils -from cinder.volume import group_types - - -class GroupTypeSpecsController(wsgi.Controller): - """The group type specs API controller for the OpenStack API.""" - - def _check_policy(self, context): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - policy.enforce(context, 'group:group_types_specs', target) - - def _get_group_specs(self, context, group_type_id): - group_specs = db.group_type_specs_get(context, group_type_id) - specs_dict = {} - for key, value in group_specs.items(): - specs_dict[key] = value - return dict(group_specs=specs_dict) - - def _check_type(self, context, group_type_id): - try: - group_types.get_group_type(context, group_type_id) - except exception.GroupTypeNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.msg) - - @wsgi.Controller.api_version('3.11') - def index(self, req, group_type_id): - """Returns the list of group specs for a given group type.""" - context = req.environ['cinder.context'] - self._check_policy(context) - self._check_type(context, group_type_id) - return self._get_group_specs(context, group_type_id) - - @wsgi.Controller.api_version('3.11') - @wsgi.response(http_client.ACCEPTED) - def create(self, req, group_type_id, body=None): - context = req.environ['cinder.context'] - self._check_policy(context) - self.assert_valid_body(body, 'group_specs') - - self._check_type(context, group_type_id) - specs = body['group_specs'] - self._check_key_names(specs.keys()) - utils.validate_dictionary_string_length(specs) - - db.group_type_specs_update_or_create(context, - group_type_id, - specs) - notifier_info = dict(type_id=group_type_id, specs=specs) - notifier = rpc.get_notifier('groupTypeSpecs') - notifier.info(context, 'group_type_specs.create', - notifier_info) - return body - - @wsgi.Controller.api_version('3.11') - def update(self, req, group_type_id, id, body=None): - context = req.environ['cinder.context'] - self._check_policy(context) - - if not body: - expl = _('Request body empty') - raise webob.exc.HTTPBadRequest(explanation=expl) - self._check_type(context, group_type_id) - if id not in body: - expl = _('Request body and URI mismatch') - raise webob.exc.HTTPBadRequest(explanation=expl) - if len(body) > 1: - expl = _('Request body contains too many items') - raise webob.exc.HTTPBadRequest(explanation=expl) - self._check_key_names(body.keys()) - utils.validate_dictionary_string_length(body) - - db.group_type_specs_update_or_create(context, - group_type_id, - body) - notifier_info = dict(type_id=group_type_id, id=id) - notifier = rpc.get_notifier('groupTypeSpecs') - notifier.info(context, - 'group_type_specs.update', - notifier_info) - return body - - @wsgi.Controller.api_version('3.11') - def show(self, req, group_type_id, id): - """Return a single extra spec item.""" - context = req.environ['cinder.context'] - self._check_policy(context) - - self._check_type(context, group_type_id) - specs = self._get_group_specs(context, group_type_id) - if id in specs['group_specs']: - return {id: specs['group_specs'][id]} - else: - msg = _("Group Type %(type_id)s has no extra spec with key " - "%(id)s.") % ({'type_id': group_type_id, 'id': id}) - raise webob.exc.HTTPNotFound(explanation=msg) - - @wsgi.Controller.api_version('3.11') - def delete(self, req, group_type_id, id): - """Deletes an existing group spec.""" - context = req.environ['cinder.context'] - self._check_policy(context) - - self._check_type(context, group_type_id) - - try: - db.group_type_specs_delete(context, group_type_id, id) - except exception.GroupTypeSpecsNotFound as error: - raise webob.exc.HTTPNotFound(explanation=error.msg) - - notifier_info = dict(type_id=group_type_id, id=id) - notifier = rpc.get_notifier('groupTypeSpecs') - notifier.info(context, - 'group_type_specs.delete', - notifier_info) - return webob.Response(status_int=http_client.ACCEPTED) - - def _check_key_names(self, keys): - if not common.validate_key_names(keys): - expl = _('Key names can only contain alphanumeric characters, ' - 'underscores, periods, colons and hyphens.') - - raise webob.exc.HTTPBadRequest(explanation=expl) - - -def create_resource(): - return wsgi.Resource(GroupTypeSpecsController()) diff --git a/cinder/api/v3/group_types.py b/cinder/api/v3/group_types.py deleted file mode 100644 index 1163f5197..000000000 --- a/cinder/api/v3/group_types.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The group type & group type specs controller.""" - -from oslo_utils import strutils -import six -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import group_types as views_types -from cinder import exception -from cinder.i18n import _ -from cinder import policy -from cinder import rpc -from cinder import utils -from cinder.volume import group_types - - -class GroupTypesController(wsgi.Controller): - """The group types API controller for the OpenStack API.""" - - _view_builder_class = views_types.ViewBuilder - - def _check_policy(self, context): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - policy.enforce(context, 'group:group_types_manage', target) - - @utils.if_notifications_enabled - def _notify_group_type_error(self, context, method, err, - group_type=None, id=None, name=None): - payload = dict( - group_types=group_type, name=name, id=id, error_message=err) - rpc.get_notifier('groupType').error(context, method, payload) - - @utils.if_notifications_enabled - def _notify_group_type_info(self, context, method, group_type): - payload = dict(group_types=group_type) - rpc.get_notifier('groupType').info(context, method, payload) - - @wsgi.Controller.api_version('3.11') - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Creates a new group type.""" - context = req.environ['cinder.context'] - self._check_policy(context) - - self.assert_valid_body(body, 'group_type') - - grp_type = body['group_type'] - name = grp_type.get('name', None) - description = grp_type.get('description') - specs = grp_type.get('group_specs', {}) - is_public = utils.get_bool_param('is_public', grp_type, True) - - if name is None or len(name.strip()) == 0: - msg = _("Group type name can not be empty.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - utils.check_string_length(name, 'Type name', - min_length=1, max_length=255) - - if description is not None: - utils.check_string_length(description, 'Type description', - min_length=0, max_length=255) - - try: - group_types.create(context, - name, - specs, - is_public, - description=description) - grp_type = group_types.get_group_type_by_name(context, name) - req.cache_resource(grp_type, name='group_types') - self._notify_group_type_info( - context, 'group_type.create', grp_type) - - except exception.GroupTypeExists as err: - self._notify_group_type_error( - context, 'group_type.create', err, group_type=grp_type) - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.GroupTypeNotFoundByName as err: - self._notify_group_type_error( - context, 'group_type.create', err, name=name) - raise webob.exc.HTTPNotFound(explanation=err.msg) - - return self._view_builder.show(req, grp_type) - - @wsgi.Controller.api_version('3.11') - def update(self, req, id, body): - # Update description for a given group type. - context = req.environ['cinder.context'] - self._check_policy(context) - - self.assert_valid_body(body, 'group_type') - - grp_type = body['group_type'] - description = grp_type.get('description') - name = grp_type.get('name') - is_public = grp_type.get('is_public') - - # Name and description can not be both None. - # If name specified, name can not be empty. - if name and len(name.strip()) == 0: - msg = _("Group type name can not be empty.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if name is None and description is None and is_public is None: - msg = _("Specify group type name, description or " - "a combination thereof.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if is_public is not None: - is_public = utils.get_bool_param('is_public', grp_type) - - if name: - utils.check_string_length(name, 'Type name', - min_length=1, max_length=255) - - if description is not None: - utils.check_string_length(description, 'Type description', - min_length=0, max_length=255) - - try: - group_types.update(context, id, name, description, - is_public=is_public) - # Get the updated - grp_type = group_types.get_group_type(context, id) - req.cache_resource(grp_type, name='group_types') - self._notify_group_type_info( - context, 'group_type.update', grp_type) - - except exception.GroupTypeNotFound as err: - self._notify_group_type_error( - context, 'group_type.update', err, id=id) - raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) - except exception.GroupTypeExists as err: - self._notify_group_type_error( - context, 'group_type.update', err, group_type=grp_type) - raise webob.exc.HTTPConflict(explanation=six.text_type(err)) - except exception.GroupTypeUpdateFailed as err: - self._notify_group_type_error( - context, 'group_type.update', err, group_type=grp_type) - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(err)) - - return self._view_builder.show(req, grp_type) - - @wsgi.Controller.api_version('3.11') - def delete(self, req, id): - """Deletes an existing group type.""" - context = req.environ['cinder.context'] - self._check_policy(context) - - try: - grp_type = group_types.get_group_type(context, id) - group_types.destroy(context, grp_type['id']) - self._notify_group_type_info( - context, 'group_type.delete', grp_type) - except exception.GroupTypeInUse as err: - self._notify_group_type_error( - context, 'group_type.delete', err, group_type=grp_type) - msg = _('Target group type is still in use.') - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.GroupTypeNotFound as err: - self._notify_group_type_error( - context, 'group_type.delete', err, id=id) - raise webob.exc.HTTPNotFound(explanation=err.msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.Controller.api_version('3.11') - def index(self, req): - """Returns the list of group types.""" - limited_types = self._get_group_types(req) - req.cache_resource(limited_types, name='group_types') - return self._view_builder.index(req, limited_types) - - @wsgi.Controller.api_version('3.11') - def show(self, req, id): - """Return a single group type item.""" - context = req.environ['cinder.context'] - - # get default group type - if id is not None and id == 'default': - grp_type = group_types.get_default_group_type() - if not grp_type: - msg = _("Default group type can not be found.") - raise exc.HTTPNotFound(explanation=msg) - req.cache_resource(grp_type, name='group_types') - else: - try: - grp_type = group_types.get_group_type(context, id) - req.cache_resource(grp_type, name='group_types') - except exception.GroupTypeNotFound as error: - raise exc.HTTPNotFound(explanation=error.msg) - - return self._view_builder.show(req, grp_type) - - def _parse_is_public(self, is_public): - """Parse is_public into something usable. - - * True: List public group types only - * False: List private group types only - * None: List both public and private group types - """ - - if is_public is None: - # preserve default value of showing only public types - return True - elif utils.is_none_string(is_public): - return None - else: - try: - return strutils.bool_from_string(is_public, strict=True) - except ValueError: - msg = _('Invalid is_public filter [%s]') % is_public - raise exc.HTTPBadRequest(explanation=msg) - - def _get_group_types(self, req): - """Helper function that returns a list of type dicts.""" - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = {} - context = req.environ['cinder.context'] - if context.is_admin: - # Only admin has query access to all group types - filters['is_public'] = self._parse_is_public( - req.params.get('is_public', None)) - else: - filters['is_public'] = True - utils.remove_invalid_filter_options(context, - filters, - self._get_grp_type_filter_options() - ) - limited_types = group_types.get_all_group_types(context, - filters=filters, - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - offset=offset, - list_result=True) - return limited_types - - def _get_grp_type_filter_options(self): - """Return group type search options allowed by non-admin.""" - return ['is_public'] - - -def create_resource(): - return wsgi.Resource(GroupTypesController()) diff --git a/cinder/api/v3/groups.py b/cinder/api/v3/groups.py deleted file mode 100644 index b0e029b0f..000000000 --- a/cinder/api/v3/groups.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The groups controller.""" - -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import uuidutils -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import groups as views_groups -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ -from cinder import rpc -from cinder.volume import group_types - -LOG = logging.getLogger(__name__) - -GROUP_API_VERSION = '3.13' -GROUP_CREATE_FROM_SRC_API_VERSION = '3.14' -GROUP_REPLICATION_API_VERSION = '3.38' - - -class GroupsController(wsgi.Controller): - """The groups API controller for the OpenStack API.""" - - _view_builder_class = views_groups.ViewBuilder - - def __init__(self): - self.group_api = group_api.API() - super(GroupsController, self).__init__() - - def _check_default_cgsnapshot_type(self, group_type_id): - if group_types.is_default_cgsnapshot_type(group_type_id): - msg = _("Group_type %(group_type)s is reserved for migrating " - "CGs to groups. Migrated group can only be operated by " - "CG APIs.") % {'group_type': group_type_id} - raise exc.HTTPBadRequest(explanation=msg) - - @wsgi.Controller.api_version(GROUP_API_VERSION) - def show(self, req, id): - """Return data about the given group.""" - LOG.debug('show called for member %s', id) - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - group = self.group_api.get( - context, - group_id=id) - - self._check_default_cgsnapshot_type(group.group_type_id) - - return self._view_builder.detail(req, group) - - @wsgi.Controller.api_version('3.20') - @wsgi.action("reset_status") - def reset_status(self, req, id, body): - return self._reset_status(req, id, body) - - def _reset_status(self, req, id, body): - """Reset status on generic group.""" - - context = req.environ['cinder.context'] - try: - status = body['reset_status']['status'].lower() - except (TypeError, KeyError): - raise exc.HTTPBadRequest(explanation=_("Must specify 'status'")) - - LOG.debug("Updating group '%(id)s' with " - "'%(update)s'", {'id': id, - 'update': status}) - try: - notifier = rpc.get_notifier('groupStatusUpdate') - notifier.info(context, 'groups.reset_status.start', - {'id': id, - 'update': status}) - group = self.group_api.get(context, id) - - self.group_api.reset_status(context, group, status) - notifier.info(context, 'groups.reset_status.end', - {'id': id, - 'update': status}) - except exception.GroupNotFound as error: - # Not found exception will be handled at the wsgi level - notifier.error(context, 'groups.reset_status', - {'error_message': error.msg, - 'id': id}) - raise - except exception.InvalidGroupStatus as error: - notifier.error(context, 'groups.reset_status', - {'error_message': error.msg, - 'id': id}) - raise exc.HTTPBadRequest(explanation=error.msg) - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.Controller.api_version(GROUP_API_VERSION) - @wsgi.action("delete") - def delete_group(self, req, id, body): - return self._delete(req, id, body) - - def _delete(self, req, id, body): - """Delete a group.""" - LOG.debug('delete called for group %s', id) - context = req.environ['cinder.context'] - del_vol = False - if body: - if not self.is_valid_body(body, 'delete'): - msg = _("Missing required element 'delete' in " - "request body.") - raise exc.HTTPBadRequest(explanation=msg) - - grp_body = body['delete'] - try: - del_vol = strutils.bool_from_string( - grp_body.get('delete-volumes', False), - strict=True) - except ValueError: - msg = (_("Invalid value '%s' for delete-volumes flag.") - % del_vol) - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info('Delete group with id: %s', id, - context=context) - - try: - group = self.group_api.get(context, id) - self._check_default_cgsnapshot_type(group.group_type_id) - self.group_api.delete(context, group, del_vol) - except exception.GroupNotFound: - # Not found exception will be handled at the wsgi level - raise - except exception.InvalidGroup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.Controller.api_version(GROUP_API_VERSION) - def index(self, req): - """Returns a summary list of groups.""" - return self._get_groups(req, is_detail=False) - - @wsgi.Controller.api_version(GROUP_API_VERSION) - def detail(self, req): - """Returns a detailed list of groups.""" - return self._get_groups(req, is_detail=True) - - def _get_groups(self, req, is_detail): - """Returns a list of groups through view builder.""" - context = req.environ['cinder.context'] - filters = req.params.copy() - api_version = req.api_version_request - marker, limit, offset = common.get_pagination_params(filters) - sort_keys, sort_dirs = common.get_sort_params(filters) - - filters.pop('list_volume', None) - if api_version.matches(common.FILTERING_VERSION): - support_like = (True if api_version.matches( - common.LIKE_FILTER_VERSION) else False) - common.reject_invalid_filters(context, filters, 'group', - support_like) - - groups = self.group_api.get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - - new_groups = [] - for grp in groups: - try: - # Only show groups not migrated from CGs - self._check_default_cgsnapshot_type(grp.group_type_id) - new_groups.append(grp) - except exc.HTTPBadRequest: - # Skip migrated group - pass - - if is_detail: - groups = self._view_builder.detail_list( - req, new_groups) - else: - groups = self._view_builder.summary_list( - req, new_groups) - return groups - - @wsgi.Controller.api_version(GROUP_API_VERSION) - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Create a new group.""" - LOG.debug('Creating new group %s', body) - self.assert_valid_body(body, 'group') - - context = req.environ['cinder.context'] - group = body['group'] - self.validate_name_and_description(group) - name = group.get('name') - description = group.get('description') - group_type = group.get('group_type') - if not group_type: - msg = _("group_type must be provided to create " - "group %(name)s.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - if not uuidutils.is_uuid_like(group_type): - req_group_type = group_types.get_group_type_by_name(context, - group_type) - group_type = req_group_type['id'] - self._check_default_cgsnapshot_type(group_type) - volume_types = group.get('volume_types') - if not volume_types: - msg = _("volume_types must be provided to create " - "group %(name)s.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - availability_zone = group.get('availability_zone') - - LOG.info("Creating group %(name)s.", - {'name': name}, - context=context) - - try: - new_group = self.group_api.create( - context, name, description, group_type, volume_types, - availability_zone=availability_zone) - except (exception.Invalid, exception.ObjectActionError) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - except exception.NotFound: - # Not found exception will be handled at the wsgi level - raise - - retval = self._view_builder.summary(req, new_group) - return retval - - @wsgi.Controller.api_version(GROUP_CREATE_FROM_SRC_API_VERSION) - @wsgi.action("create-from-src") - @wsgi.response(http_client.ACCEPTED) - def create_from_src(self, req, body): - """Create a new group from a source. - - The source can be a group snapshot or a group. Note that - this does not require group_type and volume_types as the - "create" API above. - """ - LOG.debug('Creating new group %s.', body) - self.assert_valid_body(body, 'create-from-src') - - context = req.environ['cinder.context'] - group = body['create-from-src'] - self.validate_name_and_description(group) - name = group.get('name', None) - description = group.get('description', None) - group_snapshot_id = group.get('group_snapshot_id', None) - source_group_id = group.get('source_group_id', None) - if not group_snapshot_id and not source_group_id: - msg = (_("Either 'group_snapshot_id' or 'source_group_id' must be " - "provided to create group %(name)s from source.") - % {'name': name}) - raise exc.HTTPBadRequest(explanation=msg) - - if group_snapshot_id and source_group_id: - msg = _("Cannot provide both 'group_snapshot_id' and " - "'source_group_id' to create group %(name)s from " - "source.") % {'name': name} - raise exc.HTTPBadRequest(explanation=msg) - - group_type_id = None - if group_snapshot_id: - LOG.info("Creating group %(name)s from group_snapshot " - "%(snap)s.", - {'name': name, 'snap': group_snapshot_id}, - context=context) - grp_snap = self.group_api.get_group_snapshot(context, - group_snapshot_id) - group_type_id = grp_snap.group_type_id - elif source_group_id: - LOG.info("Creating group %(name)s from " - "source group %(source_group_id)s.", - {'name': name, 'source_group_id': source_group_id}, - context=context) - source_group = self.group_api.get(context, source_group_id) - group_type_id = source_group.group_type_id - - self._check_default_cgsnapshot_type(group_type_id) - - try: - new_group = self.group_api.create_from_src( - context, name, description, group_snapshot_id, source_group_id) - except exception.InvalidGroup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - except (exception.GroupNotFound, exception.GroupSnapshotNotFound): - # Not found exception will be handled at the wsgi level - raise - except exception.CinderException as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - retval = self._view_builder.summary(req, new_group) - return retval - - @wsgi.Controller.api_version(GROUP_API_VERSION) - def update(self, req, id, body): - """Update the group. - - Expected format of the input parameter 'body': - - .. code-block:: json - - { - "group": - { - "name": "my_group", - "description": "My group", - "add_volumes": "volume-uuid-1,volume-uuid-2,...", - "remove_volumes": "volume-uuid-8,volume-uuid-9,..." - } - } - - """ - LOG.debug('Update called for group %s.', id) - - if not body: - msg = _("Missing request body.") - raise exc.HTTPBadRequest(explanation=msg) - - self.assert_valid_body(body, 'group') - context = req.environ['cinder.context'] - - group = body.get('group') - self.validate_name_and_description(group) - name = group.get('name') - description = group.get('description') - add_volumes = group.get('add_volumes') - remove_volumes = group.get('remove_volumes') - - # Allow name or description to be changed to an empty string ''. - if (name is None and description is None and not add_volumes - and not remove_volumes): - msg = _("Name, description, add_volumes, and remove_volumes " - "can not be all empty in the request body.") - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info("Updating group %(id)s with name %(name)s " - "description: %(description)s add_volumes: " - "%(add_volumes)s remove_volumes: %(remove_volumes)s.", - {'id': id, 'name': name, - 'description': description, - 'add_volumes': add_volumes, - 'remove_volumes': remove_volumes}, - context=context) - - try: - group = self.group_api.get(context, id) - self._check_default_cgsnapshot_type(group.group_type_id) - self.group_api.update( - context, group, name, description, - add_volumes, remove_volumes) - except exception.GroupNotFound: - # Not found exception will be handled at the wsgi level - raise - except exception.InvalidGroup as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=http_client.ACCEPTED) - - @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION) - @wsgi.action("enable_replication") - def enable_replication(self, req, id, body): - """Enables replications for a group.""" - context = req.environ['cinder.context'] - if body: - if not self.is_valid_body(body, 'enable_replication'): - msg = _("Missing required element 'enable_replication' in " - "request body.") - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info('Enable replication group with id: %s.', id, - context=context) - - try: - group = self.group_api.get(context, id) - self.group_api.enable_replication(context, group) - # Not found exception will be handled at the wsgi level - except (exception.InvalidGroup, exception.InvalidGroupType, - exception.InvalidVolume, exception.InvalidVolumeType) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=202) - - @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION) - @wsgi.action("disable_replication") - def disable_replication(self, req, id, body): - """Disables replications for a group.""" - context = req.environ['cinder.context'] - if body: - if not self.is_valid_body(body, 'disable_replication'): - msg = _("Missing required element 'disable_replication' in " - "request body.") - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info('Disable replication group with id: %s.', id, - context=context) - - try: - group = self.group_api.get(context, id) - self.group_api.disable_replication(context, group) - # Not found exception will be handled at the wsgi level - except (exception.InvalidGroup, exception.InvalidGroupType, - exception.InvalidVolume, exception.InvalidVolumeType) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=202) - - @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION) - @wsgi.action("failover_replication") - def failover_replication(self, req, id, body): - """Fails over replications for a group.""" - context = req.environ['cinder.context'] - if body: - if not self.is_valid_body(body, 'failover_replication'): - msg = _("Missing required element 'failover_replication' in " - "request body.") - raise exc.HTTPBadRequest(explanation=msg) - - grp_body = body['failover_replication'] - try: - allow_attached = strutils.bool_from_string( - grp_body.get('allow_attached_volume', False), - strict=True) - except ValueError: - msg = (_("Invalid value '%s' for allow_attached_volume flag.") - % grp_body) - raise exc.HTTPBadRequest(explanation=msg) - secondary_backend_id = grp_body.get('secondary_backend_id') - - LOG.info('Failover replication group with id: %s.', id, - context=context) - - try: - group = self.group_api.get(context, id) - self.group_api.failover_replication(context, group, allow_attached, - secondary_backend_id) - # Not found exception will be handled at the wsgi level - except (exception.InvalidGroup, exception.InvalidGroupType, - exception.InvalidVolume, exception.InvalidVolumeType) as error: - raise exc.HTTPBadRequest(explanation=error.msg) - - return webob.Response(status_int=202) - - @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION) - @wsgi.action("list_replication_targets") - def list_replication_targets(self, req, id, body): - """List replication targets for a group.""" - context = req.environ['cinder.context'] - if body: - if not self.is_valid_body(body, 'list_replication_targets'): - msg = _("Missing required element 'list_replication_targets' " - "in request body.") - raise exc.HTTPBadRequest(explanation=msg) - - LOG.info('List replication targets for group with id: %s.', id, - context=context) - - # Not found exception will be handled at the wsgi level - group = self.group_api.get(context, id) - replication_targets = self.group_api.list_replication_targets( - context, group) - - return replication_targets - - -def create_resource(): - return wsgi.Resource(GroupsController()) diff --git a/cinder/api/v3/limits.py b/cinder/api/v3/limits.py deleted file mode 100644 index cc739a3df..000000000 --- a/cinder/api/v3/limits.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The limits V3 api.""" - -from cinder.api.openstack import wsgi -from cinder.api.v2 import limits as limits_v2 -from cinder.api.views import limits as limits_views -from cinder import quota - -QUOTAS = quota.QUOTAS - - -class LimitsController(limits_v2.LimitsController): - """Controller for accessing limits in the OpenStack API.""" - - def index(self, req): - """Return all global and rate limit information.""" - context = req.environ['cinder.context'] - params = req.params.copy() - req_version = req.api_version_request - - # TODO(wangxiyuan): Support "tenant_id" here to keep the backwards - # compatibility. Remove it once we drop all support for "tenant". - if req_version.matches(None, "3.38") or not context.is_admin: - params.pop('project_id', None) - params.pop('tenant_id', None) - project_id = params.get( - 'project_id', params.get('tenant_id', context.project_id)) - - quotas = QUOTAS.get_project_quotas(context, project_id, - usages=False) - abs_limits = {k: v['limit'] for k, v in quotas.items()} - rate_limits = req.environ.get("cinder.limits", []) - - builder = self._get_view_builder(req) - return builder.build(rate_limits, abs_limits) - - def _get_view_builder(self, req): - return limits_views.ViewBuilder() - - -def create_resource(): - return wsgi.Resource(LimitsController()) diff --git a/cinder/api/v3/messages.py b/cinder/api/v3/messages.py deleted file mode 100644 index ee1a789bb..000000000 --- a/cinder/api/v3/messages.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The messages API.""" - - -from six.moves import http_client -import webob - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import messages as messages_view -from cinder.message import api as message_api -from cinder.message import defined_messages -from cinder.message import message_field -import cinder.policy - - -MESSAGES_BASE_MICRO_VERSION = '3.3' - - -def check_policy(context, action, target_obj=None): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - target.update(target_obj or {}) - - _action = 'message:%s' % action - cinder.policy.enforce(context, _action, target) - - -class MessagesController(wsgi.Controller): - """The User Messages API controller for the OpenStack API.""" - - _view_builder_class = messages_view.ViewBuilder - - def __init__(self, ext_mgr): - self.message_api = message_api.API() - self.ext_mgr = ext_mgr - super(MessagesController, self).__init__() - - def _build_user_message(self, message): - # NOTE(tommylikehu): if the `action_id` is empty, we use 'event_id' - # to translate the user message. - if message is None: - return - if message['action_id'] is None and message['event_id'] is not None: - message['user_message'] = defined_messages.get_message_text( - message['event_id']) - else: - message['user_message'] = "%s:%s" % ( - message_field.translate_action(message['action_id']), - message_field.translate_detail(message['detail_id'])) - - @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION) - def show(self, req, id): - """Return the given message.""" - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - message = self.message_api.get(context, id) - - check_policy(context, 'get', message) - - self._build_user_message(message) - return self._view_builder.detail(req, message) - - @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION) - def delete(self, req, id): - """Delete a message.""" - context = req.environ['cinder.context'] - - # Not found exception will be handled at the wsgi level - message = self.message_api.get(context, id) - check_policy(context, 'delete', message) - self.message_api.delete(context, message) - - return webob.Response(status_int=http_client.NO_CONTENT) - - @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION) - def index(self, req): - """Returns a list of messages, transformed through view builder.""" - context = req.environ['cinder.context'] - api_version = req.api_version_request - check_policy(context, 'get_all') - filters = None - marker = None - limit = None - offset = None - sort_keys = None - sort_dirs = None - - if api_version.matches("3.5"): - filters = req.params.copy() - marker, limit, offset = common.get_pagination_params(filters) - sort_keys, sort_dirs = common.get_sort_params(filters) - - if api_version.matches(common.FILTERING_VERSION): - support_like = (True if api_version.matches( - common.LIKE_FILTER_VERSION) else False) - common.reject_invalid_filters(context, filters, 'message', - support_like) - - messages = self.message_api.get_all(context, filters=filters, - marker=marker, limit=limit, - offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - for message in messages: - self._build_user_message(message) - messages = self._view_builder.index(req, messages) - return messages - - -def create_resource(ext_mgr): - return wsgi.Resource(MessagesController(ext_mgr)) diff --git a/cinder/api/v3/resource_common_manage.py b/cinder/api/v3/resource_common_manage.py deleted file mode 100644 index 4265fca3d..000000000 --- a/cinder/api/v3/resource_common_manage.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common -from cinder.api import extensions -from cinder.api.openstack import wsgi -from cinder import exception -from cinder.i18n import _ - - -class ManageResource(object): - """Mixin class for v3 of ManageVolume and ManageSnapshot. - - It requires that any class inheriting from this one has `volume_api` and - `_list_manageable_view` attributes. - """ - VALID_SORT_KEYS = {'reference', 'size'} - VALID_SORT_DIRS = {'asc', 'desc'} - - def _set_resource_type(self, resource): - self._authorizer = extensions.extension_authorizer(resource, - 'list_manageable') - self.get_manageable = getattr(self.volume_api, - 'get_manageable_%ss' % resource) - - def _ensure_min_version(self, req, allowed_version): - version = req.api_version_request - if not version.matches(allowed_version, None): - raise exception.VersionNotFoundForAPIMethod(version=version) - - def _get_resources(self, req, is_detail): - self._ensure_min_version(req, '3.8') - - context = req.environ['cinder.context'] - self._authorizer(context) - - params = req.params.copy() - cluster_name, host = common.get_cluster_host(req, params, '3.17') - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params, - default_key='reference') - - # These parameters are generally validated at the DB layer, but in this - # case sorting is not done by the DB - invalid_keys = set(sort_keys).difference(self.VALID_SORT_KEYS) - if invalid_keys: - msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) - raise exception.InvalidParameterValue(err=msg) - - invalid_dirs = set(sort_dirs).difference(self.VALID_SORT_DIRS) - if invalid_dirs: - msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) - raise exception.InvalidParameterValue(err=msg) - - resources = self.get_manageable(context, host, cluster_name, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - view_builder = getattr(self._list_manageable_view, - 'detail_list' if is_detail else 'summary_list') - return view_builder(req, resources, len(resources)) - - @wsgi.extends - def index(self, req): - """Returns a summary list of volumes available to manage.""" - return self._get_resources(req, False) - - @wsgi.extends - def detail(self, req): - """Returns a detailed list of volumes available to manage.""" - return self._get_resources(req, True) diff --git a/cinder/api/v3/resource_filters.py b/cinder/api/v3/resource_filters.py deleted file mode 100644 index 955ae2089..000000000 --- a/cinder/api/v3/resource_filters.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The resource filters api.""" - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v3.views import resource_filters as filter_views - - -FILTER_API_VERSION = '3.33' - - -class ResourceFiltersController(wsgi.Controller): - """The resource filter API controller for the OpenStack API.""" - - _view_builder_class = filter_views.ViewBuilder - - def __init__(self, ext_mgr=None): - """Initialize controller class.""" - self.ext_mgr = ext_mgr - super(ResourceFiltersController, self).__init__() - - @wsgi.Controller.api_version(FILTER_API_VERSION) - def index(self, req): - """Return a list of resource filters.""" - resource = req.params.get('resource', None) - filters = common.get_enabled_resource_filters(resource=resource) - return filter_views.ViewBuilder.list(filters) - - -def create_resource(ext_mgr): - """Create the wsgi resource for this controller.""" - return wsgi.Resource(ResourceFiltersController(ext_mgr)) diff --git a/cinder/api/v3/router.py b/cinder/api/v3/router.py deleted file mode 100644 index f45e33768..000000000 --- a/cinder/api/v3/router.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2011 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for OpenStack Volume API. -""" - -from cinder.api import extensions -import cinder.api.openstack -from cinder.api.v2 import snapshot_metadata -from cinder.api.v2 import types -from cinder.api.v3 import attachments -from cinder.api.v3 import backups -from cinder.api.v3 import clusters -from cinder.api.v3 import consistencygroups -from cinder.api.v3 import group_snapshots -from cinder.api.v3 import group_specs -from cinder.api.v3 import group_types -from cinder.api.v3 import groups -from cinder.api.v3 import limits -from cinder.api.v3 import messages -from cinder.api.v3 import resource_filters -from cinder.api.v3 import snapshot_manage -from cinder.api.v3 import snapshots -from cinder.api.v3 import volume_manage -from cinder.api.v3 import volume_metadata -from cinder.api.v3 import volumes -from cinder.api.v3 import workers -from cinder.api import versions - - -class APIRouter(cinder.api.openstack.APIRouter): - """Routes requests on the API to the appropriate controller and method.""" - ExtensionManager = extensions.ExtensionManager - - def _setup_routes(self, mapper, ext_mgr): - self.resources['versions'] = versions.create_resource() - mapper.connect("versions", "/", - controller=self.resources['versions'], - action='index') - - mapper.redirect("", "/") - - self.resources['volumes'] = volumes.create_resource(ext_mgr) - mapper.resource("volume", "volumes", - controller=self.resources['volumes'], - collection={'detail': 'GET', 'summary': 'GET'}, - member={'action': 'POST'}) - - self.resources['messages'] = messages.create_resource(ext_mgr) - mapper.resource("message", "messages", - controller=self.resources['messages'], - collection={'detail': 'GET'}) - - self.resources['clusters'] = clusters.create_resource() - mapper.resource('cluster', 'clusters', - controller=self.resources['clusters'], - collection={'detail': 'GET'}) - - self.resources['types'] = types.create_resource() - mapper.resource("type", "types", - controller=self.resources['types'], - member={'action': 'POST'}) - - self.resources['group_types'] = group_types.create_resource() - mapper.resource("group_type", "group_types", - controller=self.resources['group_types'], - member={'action': 'POST'}) - - self.resources['group_specs'] = group_specs.create_resource() - mapper.resource("group_spec", "group_specs", - controller=self.resources['group_specs'], - parent_resource=dict(member_name='group_type', - collection_name='group_types')) - - self.resources['groups'] = groups.create_resource() - mapper.resource("group", "groups", - controller=self.resources['groups'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - mapper.connect("groups", - "/{project_id}/groups/{id}/action", - controller=self.resources["groups"], - action="action", - conditions={"method": ["POST"]}) - mapper.connect("groups/action", - "/{project_id}/groups/action", - controller=self.resources["groups"], - action="action", - conditions={"method": ["POST"]}) - - self.resources['group_snapshots'] = group_snapshots.create_resource() - mapper.resource("group_snapshot", "group_snapshots", - controller=self.resources['group_snapshots'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - mapper.connect("group_snapshots", - "/{project_id}/group_snapshots/{id}/action", - controller=self.resources["group_snapshots"], - action="action", - conditions={"method": ["POST"]}) - self.resources['snapshots'] = snapshots.create_resource(ext_mgr) - mapper.resource("snapshot", "snapshots", - controller=self.resources['snapshots'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['limits'] = limits.create_resource() - mapper.resource("limit", "limits", - controller=self.resources['limits']) - - self.resources['snapshot_metadata'] = \ - snapshot_metadata.create_resource() - snapshot_metadata_controller = self.resources['snapshot_metadata'] - - mapper.resource("snapshot_metadata", "metadata", - controller=snapshot_metadata_controller, - parent_resource=dict(member_name='snapshot', - collection_name='snapshots')) - - mapper.connect("metadata", - "/{project_id}/snapshots/{snapshot_id}/metadata", - controller=snapshot_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) - - self.resources['volume_metadata'] = \ - volume_metadata.create_resource() - volume_metadata_controller = self.resources['volume_metadata'] - - mapper.resource("volume_metadata", "metadata", - controller=volume_metadata_controller, - parent_resource=dict(member_name='volume', - collection_name='volumes')) - - mapper.connect("metadata", - "/{project_id}/volumes/{volume_id}/metadata", - controller=volume_metadata_controller, - action='update_all', - conditions={"method": ['PUT']}) - - self.resources['consistencygroups'] = ( - consistencygroups.create_resource()) - mapper.resource("consistencygroup", "consistencygroups", - controller=self.resources['consistencygroups'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - self.resources['manageable_volumes'] = volume_manage.create_resource() - mapper.resource("manageable_volume", "manageable_volumes", - controller=self.resources['manageable_volumes'], - collection={'detail': 'GET'}) - - self.resources['manageable_snapshots'] = \ - snapshot_manage.create_resource() - mapper.resource("manageable_snapshot", "manageable_snapshots", - controller=self.resources['manageable_snapshots'], - collection={'detail': 'GET'}) - - self.resources['backups'] = ( - backups.create_resource()) - mapper.resource("backup", "backups", - controller=self.resources['backups'], - collection={'detail': 'GET'}) - - self.resources['attachments'] = attachments.create_resource(ext_mgr) - mapper.resource("attachment", "attachments", - controller=self.resources['attachments'], - collection={'detail': 'GET', 'summary': 'GET'}, - member={'action': 'POST'}) - - self.resources['workers'] = workers.create_resource() - mapper.resource('worker', 'workers', - controller=self.resources['workers'], - collection={'cleanup': 'POST'}) - - self.resources['resource_filters'] = resource_filters.create_resource( - ext_mgr) - mapper.resource('resource_filter', 'resource_filters', - controller=self.resources['resource_filters']) diff --git a/cinder/api/v3/snapshot_manage.py b/cinder/api/v3/snapshot_manage.py deleted file mode 100644 index ae9a54d60..000000000 --- a/cinder/api/v3/snapshot_manage.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves import http_client - -from cinder.api.contrib import snapshot_manage as snapshot_manage_v2 -from cinder.api.openstack import wsgi -from cinder.api.v3 import resource_common_manage as common - - -class SnapshotManageController(common.ManageResource, - snapshot_manage_v2.SnapshotManageController): - def __init__(self, *args, **kwargs): - super(SnapshotManageController, self).__init__(*args, **kwargs) - self._set_resource_type('snapshot') - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - self._ensure_min_version(req, "3.8") - return super(SnapshotManageController, self).create(req, body) - - -def create_resource(): - return wsgi.Resource(SnapshotManageController()) diff --git a/cinder/api/v3/snapshots.py b/cinder/api/v3/snapshots.py deleted file mode 100644 index 025e74fe0..000000000 --- a/cinder/api/v3/snapshots.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes snapshots V3 API.""" - -import ast - -from oslo_log import log as logging - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v2 import snapshots as snapshots_v2 -from cinder.api.v3.views import snapshots as snapshot_views -from cinder import utils - -LOG = logging.getLogger(__name__) - - -class SnapshotsController(snapshots_v2.SnapshotsController): - """The Snapshots API controller for the OpenStack API.""" - - _view_builder_class = snapshot_views.ViewBuilder - - def _get_snapshot_filter_options(self): - """returns tuple of valid filter options""" - - return 'status', 'volume_id', 'name', 'metadata' - - def _format_snapshot_filter_options(self, search_opts): - """Convert valid filter options to correct expected format""" - - # Get the dict object out of queried metadata - # convert metadata query value from string to dict - if 'metadata' in search_opts.keys(): - try: - search_opts['metadata'] = ast.literal_eval( - search_opts['metadata']) - except (ValueError, SyntaxError): - LOG.debug('Could not evaluate value %s, assuming string', - search_opts['metadata']) - - @common.process_general_filtering('snapshot') - def _process_snapshot_filtering(self, context=None, filters=None, - req_version=None): - """Formats allowed filters""" - - # if the max version is less than or same as 3.21 - # metadata based filtering is not supported - if req_version.matches(None, "3.21"): - filters.pop('metadata', None) - - # Filter out invalid options - allowed_search_options = self._get_snapshot_filter_options() - - utils.remove_invalid_filter_options(context, filters, - allowed_search_options) - - def _items(self, req, is_detail=True): - """Returns a list of snapshots, transformed through view builder.""" - context = req.environ['cinder.context'] - req_version = req.api_version_request - # Pop out non search_opts and create local variables - search_opts = req.GET.copy() - sort_keys, sort_dirs = common.get_sort_params(search_opts) - marker, limit, offset = common.get_pagination_params(search_opts) - - # process filters - self._process_snapshot_filtering(context=context, - filters=search_opts, - req_version=req_version) - # process snapshot filters to appropriate formats if required - self._format_snapshot_filter_options(search_opts) - - req_version = req.api_version_request - if req_version.matches("3.30", None) and 'name' in sort_keys: - sort_keys[sort_keys.index('name')] = 'display_name' - - # NOTE(thingee): v3 API allows name instead of display_name - if 'name' in search_opts: - search_opts['display_name'] = search_opts.pop('name') - - snapshots = self.volume_api.get_all_snapshots(context, - search_opts=search_opts, - marker=marker, - limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - offset=offset) - - req.cache_db_snapshots(snapshots.objects) - - if is_detail: - snapshots = self._view_builder.detail_list(req, snapshots.objects) - else: - snapshots = self._view_builder.summary_list(req, snapshots.objects) - return snapshots - - -def create_resource(ext_mgr): - return wsgi.Resource(SnapshotsController(ext_mgr)) diff --git a/cinder/api/v3/views/__init__.py b/cinder/api/v3/views/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/v3/views/attachments.py b/cinder/api/v3/views/attachments.py deleted file mode 100644 index b24785a88..000000000 --- a/cinder/api/v3/views/attachments.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - - -class ViewBuilder(object): - """Model an attachment API response as a python dictionary.""" - - _collection_name = "attachments" - - @staticmethod - def _normalize(date): - if date: - return timeutils.normalize_time(date) - return '' - - @classmethod - def detail(cls, attachment, flat=False): - """Detailed view of an attachment.""" - result = cls.summary(attachment, flat=True) - result.update( - attached_at=cls._normalize(attachment.attach_time), - detached_at=cls._normalize(attachment.detach_time), - attach_mode=attachment.attach_mode, - connection_info=attachment.connection_info) - if flat: - return result - return {'attachment': result} - - @staticmethod - def summary(attachment, flat=False): - """Non detailed view of an attachment.""" - result = { - 'id': attachment.id, - 'status': attachment.attach_status, - 'instance': attachment.instance_uuid, - 'volume_id': attachment.volume_id, } - if flat: - return result - return {'attachment': result} - - @classmethod - def list(cls, attachments, detail=False): - """Build a view of a list of attachments.""" - func = cls.detail if detail else cls.summary - return {'attachments': [func(attachment, flat=True) for attachment in - attachments]} diff --git a/cinder/api/v3/views/clusters.py b/cinder/api/v3/views/clusters.py deleted file mode 100644 index 15bf4f32e..000000000 --- a/cinder/api/v3/views/clusters.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2016 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - - -class ViewBuilder(object): - """Map Cluster into dicts for API responses.""" - - @staticmethod - def _normalize(date): - if date: - return timeutils.normalize_time(date) - return '' - - @classmethod - def detail(cls, cluster, replication_data=False, flat=False): - """Detailed view of a cluster.""" - result = cls.summary(cluster, flat=True) - result.update( - num_hosts=cluster.num_hosts, - num_down_hosts=cluster.num_down_hosts, - last_heartbeat=cls._normalize(cluster.last_heartbeat), - created_at=cls._normalize(cluster.created_at), - updated_at=cls._normalize(cluster.updated_at), - disabled_reason=cluster.disabled_reason, - replication_status=cluster.replication_status, - frozen=cluster.frozen, - active_backend_id=cluster.active_backend_id, - ) - if not replication_data: - for field in ('replication_status', 'frozen', 'active_backend_id'): - del result[field] - if flat: - return result - return {'cluster': result} - - @staticmethod - def summary(cluster, replication_data=False, flat=False): - """Generic, non-detailed view of a cluster.""" - result = { - 'name': cluster.name, - 'binary': cluster.binary, - 'state': 'up' if cluster.is_up else 'down', - 'status': 'disabled' if cluster.disabled else 'enabled', - 'replication_status': cluster.replication_status, - } - if not replication_data: - del result['replication_status'] - if flat: - return result - return {'cluster': result} - - @classmethod - def list(cls, clusters, detail=False, replication_data=False): - func = cls.detail if detail else cls.summary - return {'clusters': [func(n, replication_data, flat=True) - for n in clusters]} diff --git a/cinder/api/v3/views/group_snapshots.py b/cinder/api/v3/views/group_snapshots.py deleted file mode 100644 index ef7cdc457..000000000 --- a/cinder/api/v3/views/group_snapshots.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model group_snapshot API responses as a python dictionary.""" - - _collection_name = "group_snapshots" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, group_snapshots): - """Show a list of group_snapshots without many details.""" - return self._list_view(self.summary, request, group_snapshots) - - def detail_list(self, request, group_snapshots): - """Detailed view of a list of group_snapshots .""" - return self._list_view(self.detail, request, group_snapshots) - - def summary(self, request, group_snapshot): - """Generic, non-detailed view of a group_snapshot.""" - return { - 'group_snapshot': { - 'id': group_snapshot.id, - 'name': group_snapshot.name, - # NOTE(xyang): group_type_id is added for migrating CGs - # to generic volume groups - 'group_type_id': group_snapshot.group_type_id, - } - } - - def detail(self, request, group_snapshot): - """Detailed view of a single group_snapshot.""" - return { - 'group_snapshot': { - 'id': group_snapshot.id, - 'group_id': group_snapshot.group_id, - 'group_type_id': group_snapshot.group_type_id, - 'status': group_snapshot.status, - 'created_at': group_snapshot.created_at, - 'name': group_snapshot.name, - 'description': group_snapshot.description - } - } - - def _list_view(self, func, request, group_snapshots): - """Provide a view for a list of group_snapshots.""" - group_snapshots_list = [func(request, group_snapshot)['group_snapshot'] - for group_snapshot in group_snapshots] - group_snapshot_links = self._get_collection_links( - request, group_snapshots_list, self._collection_name) - group_snapshots_dict = dict(group_snapshots=group_snapshots_list) - if group_snapshot_links: - group_snapshots_dict['group_snapshot_links'] = group_snapshot_links - - return group_snapshots_dict diff --git a/cinder/api/v3/views/group_types.py b/cinder/api/v3/views/group_types.py deleted file mode 100644 index 6313d7712..000000000 --- a/cinder/api/v3/views/group_types.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - - def show(self, request, group_type, brief=False): - """Trim away extraneous group type attributes.""" - context = request.environ['cinder.context'] - trimmed = dict(id=group_type.get('id'), - name=group_type.get('name'), - description=group_type.get('description'), - is_public=group_type.get('is_public')) - if common.validate_policy( - context, - 'group:access_group_types_specs'): - trimmed['group_specs'] = group_type.get('group_specs') - return trimmed if brief else dict(group_type=trimmed) - - def index(self, request, group_types): - """Index over trimmed group types.""" - group_types_list = [self.show(request, group_type, True) - for group_type in group_types] - group_type_links = self._get_collection_links(request, group_types, - 'group_types') - group_types_dict = dict(group_types=group_types_list) - if group_type_links: - group_types_dict['group_type_links'] = group_type_links - return group_types_dict diff --git a/cinder/api/v3/views/groups.py b/cinder/api/v3/views/groups.py deleted file mode 100644 index 5b20c2928..000000000 --- a/cinder/api/v3/views/groups.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common -from cinder import utils - - -class ViewBuilder(common.ViewBuilder): - """Model group API responses as a python dictionary.""" - - _collection_name = "groups" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, groups): - """Show a list of groups without many details.""" - return self._list_view(self.summary, request, groups) - - def detail_list(self, request, groups): - """Detailed view of a list of groups .""" - return self._list_view(self.detail, request, groups) - - def summary(self, request, group): - """Generic, non-detailed view of a group.""" - return { - 'group': { - 'id': group.id, - 'name': group.name - } - } - - def detail(self, request, group): - """Detailed view of a single group.""" - group_ref = { - 'group': { - 'id': group.id, - 'status': group.status, - 'availability_zone': group.availability_zone, - 'created_at': group.created_at, - 'name': group.name, - 'description': group.description, - 'group_type': group.group_type_id, - 'volume_types': [v_type.id for v_type in group.volume_types], - } - } - - req_version = request.api_version_request - # Add group_snapshot_id and source_group_id if min version is greater - # than or equal to 3.14. - if req_version.matches("3.14", None): - group_ref['group']['group_snapshot_id'] = group.group_snapshot_id - group_ref['group']['source_group_id'] = group.source_group_id - - # Add volumes if min version is greater than or equal to 3.25. - if req_version.matches("3.25", None): - if utils.get_bool_param('list_volume', request.params): - group_ref['group']['volumes'] = [volume.id - for volume in group.volumes] - - # Add replication_status if min version is greater than or equal - # to 3.38. - if req_version.matches("3.38", None): - group_ref['group']['replication_status'] = group.replication_status - - return group_ref - - def _list_view(self, func, request, groups): - """Provide a view for a list of groups.""" - groups_list = [ - func(request, group)['group'] - for group in groups] - grp_links = self._get_collection_links(request, - groups, - self._collection_name) - groups_dict = dict(groups=groups_list) - if grp_links: - groups_dict['group_links'] = grp_links - - return groups_dict diff --git a/cinder/api/v3/views/messages.py b/cinder/api/v3/views/messages.py deleted file mode 100644 index 5f95f5b28..000000000 --- a/cinder/api/v3/views/messages.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model a server API response as a python dictionary.""" - - _collection_name = "messages" - - def index(self, request, messages, message_count=None): - """Show a list of messages.""" - return self._list_view(self.detail, request, messages, message_count) - - def detail(self, request, message): - """Detailed view of a single message.""" - message_ref = { - 'id': message.get('id'), - 'event_id': message.get('event_id'), - 'user_message': message.get('user_message'), - 'message_level': message.get('message_level'), - 'created_at': message.get('created_at'), - 'guaranteed_until': message.get('expires_at'), - 'request_id': message.get('request_id'), - 'links': self._get_links(request, message['id']), - } - - if message.get('resource_type'): - message_ref['resource_type'] = message.get('resource_type') - if message.get('resource_uuid'): - message_ref['resource_uuid'] = message.get('resource_uuid') - - return {'message': message_ref} - - def _list_view(self, func, request, messages, message_count=None, - coll_name=_collection_name): - """Provide a view for a list of messages. - - :param func: Function used to format the message data - :param request: API request - :param messages: List of messages in dictionary format - :param message_count: Length of the original list of messages - :param coll_name: Name of collection, used to generate the next link - for a pagination query - :returns: message data in dictionary format - """ - messages_list = [func(request, message)['message'] - for message in messages] - messages_links = self._get_collection_links(request, - messages, - coll_name, - message_count) - messages_dict = dict(messages=messages_list) - - if messages_links: - messages_dict['messages_links'] = messages_links - - return messages_dict diff --git a/cinder/api/v3/views/resource_filters.py b/cinder/api/v3/views/resource_filters.py deleted file mode 100644 index 7450b6684..000000000 --- a/cinder/api/v3/views/resource_filters.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ViewBuilder(object): - """Model an resource filters API response as a python dictionary.""" - - _collection_name = "resource_filters" - - @classmethod - def list(cls, filters): - """Build a view of a list of resource filters. - - .. code-block:: json - - { - "resource_filters": [{ - "resource": "resource_1", - "filters": ["filter1", "filter2", "filter3"] - }] - } - """ - - return {'resource_filters': [{ - 'resource': fil[0], - 'filters': fil[1]} for fil in filters.items()]} diff --git a/cinder/api/v3/views/snapshots.py b/cinder/api/v3/views/snapshots.py deleted file mode 100644 index c5bd87005..000000000 --- a/cinder/api/v3/views/snapshots.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api.views import snapshots as views_v2 - - -class ViewBuilder(views_v2.ViewBuilder): - """Model a snapshots API V3 response as a python dictionary.""" - - def detail(self, request, snapshot): - """Detailed view of a single snapshot.""" - snapshot_ref = super(ViewBuilder, self).detail(request, snapshot) - - req_version = request.api_version_request - # Add group_snapshot_id if min version is greater than or equal - # to 3.14. - if req_version.matches("3.14", None): - snapshot_ref['snapshot']['group_snapshot_id'] = ( - snapshot.get('group_snapshot_id')) - if req_version.matches("3.41", None): - snapshot_ref['snapshot']['user_id'] = snapshot.get('user_id') - return snapshot_ref diff --git a/cinder/api/v3/views/volumes.py b/cinder/api/v3/views/volumes.py deleted file mode 100644 index eb9b40587..000000000 --- a/cinder/api/v3/views/volumes.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api.v2.views import volumes as views_v2 - - -class ViewBuilder(views_v2.ViewBuilder): - """Model a volumes API V3 response as a python dictionary.""" - - def quick_summary(self, volume_count, volume_size, - all_distinct_metadata=None): - """View of volumes summary. - - It includes number of volumes, size of volumes and all distinct - metadata of volumes. - """ - summary = { - 'volume-summary': { - 'total_count': volume_count, - 'total_size': volume_size - } - } - if all_distinct_metadata is not None: - summary['volume-summary']['metadata'] = all_distinct_metadata - return summary - - def detail(self, request, volume): - """Detailed view of a single volume.""" - volume_ref = super(ViewBuilder, self).detail(request, volume) - - req_version = request.api_version_request - # Add group_id if min version is greater than or equal to 3.13. - if req_version.matches("3.13", None): - volume_ref['volume']['group_id'] = volume.get('group_id') - - # Add provider_id if min version is greater than or equal to 3.21 - # for admin. - if (request.environ['cinder.context'].is_admin and - req_version.matches("3.21", None)): - volume_ref['volume']['provider_id'] = volume.get('provider_id') - - return volume_ref diff --git a/cinder/api/v3/views/workers.py b/cinder/api/v3/views/workers.py deleted file mode 100644 index c4b20b2dd..000000000 --- a/cinder/api/v3/views/workers.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2016 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ViewBuilder(object): - """Map Cluster into dicts for API responses.""" - - _collection_name = 'workers' - - @classmethod - def service_list(cls, services): - return [{'id': s.id, 'host': s.host, 'binary': s.binary, - 'cluster_name': s.cluster_name} for s in services] diff --git a/cinder/api/v3/volume_manage.py b/cinder/api/v3/volume_manage.py deleted file mode 100644 index 4b2c5677e..000000000 --- a/cinder/api/v3/volume_manage.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves import http_client - -from cinder.api.contrib import volume_manage as volume_manage_v2 -from cinder.api.openstack import wsgi -from cinder.api.v3 import resource_common_manage as common - - -class VolumeManageController(common.ManageResource, - volume_manage_v2.VolumeManageController): - def __init__(self, *args, **kwargs): - super(VolumeManageController, self).__init__(*args, **kwargs) - self._set_resource_type('volume') - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - self._ensure_min_version(req, "3.8") - return super(VolumeManageController, self).create(req, body) - - -def create_resource(): - return wsgi.Resource(VolumeManageController()) diff --git a/cinder/api/v3/volume_metadata.py b/cinder/api/v3/volume_metadata.py deleted file mode 100644 index 37cd7787c..000000000 --- a/cinder/api/v3/volume_metadata.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volume metadata V3 api.""" - -import hashlib - -from oslo_serialization import jsonutils -import six -from six.moves import http_client -import webob - -from cinder.api.openstack import wsgi -from cinder.api.v2 import volume_metadata as volume_meta_v2 -from cinder import exception - - -METADATA_MICRO_VERSION = '3.15' - - -class Controller(volume_meta_v2.Controller): - """The volume metadata API controller for the OpenStack API.""" - def _validate_etag(self, req, volume_id): - if not req.if_match: - return True - context = req.environ['cinder.context'] - metadata = self._get_metadata(context, volume_id) - data = jsonutils.dumps({"metadata": metadata}) - if six.PY3: - data = data.encode('utf-8') - checksum = hashlib.md5(data).hexdigest() - return checksum in req.if_match.etags - - def _ensure_min_version(self, req, allowed_version): - version = req.api_version_request - if not version.matches(allowed_version, None): - raise exception.VersionNotFoundForAPIMethod(version=version) - - @wsgi.extends - def index(self, req, volume_id): - self._ensure_min_version(req, METADATA_MICRO_VERSION) - metadata = super(Controller, self).index(req, volume_id) - resp = webob.Response() - data = jsonutils.dumps(metadata) - if six.PY3: - data = data.encode('utf-8') - resp.headers['Etag'] = hashlib.md5(data).hexdigest() - resp.body = data - return resp - - @wsgi.extends - def update(self, req, volume_id, id, body): - self._ensure_min_version(req, METADATA_MICRO_VERSION) - if not self._validate_etag(req, volume_id): - return webob.Response(status_int=http_client.PRECONDITION_FAILED) - return super(Controller, self).update(req, volume_id, - id, body) - - @wsgi.extends - def update_all(self, req, volume_id, body): - self._ensure_min_version(req, METADATA_MICRO_VERSION) - if not self._validate_etag(req, volume_id): - return webob.Response(status_int=http_client.PRECONDITION_FAILED) - return super(Controller, self).update_all(req, volume_id, - body) - - -def create_resource(): - return wsgi.Resource(Controller()) diff --git a/cinder/api/v3/volumes.py b/cinder/api/v3/volumes.py deleted file mode 100644 index 68f0b48d7..000000000 --- a/cinder/api/v3/volumes.py +++ /dev/null @@ -1,342 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The volumes V3 api.""" - -from oslo_log import log as logging -from oslo_utils import uuidutils -import six -from six.moves import http_client -import webob -from webob import exc - -from cinder.api import common -from cinder.api.openstack import wsgi -from cinder.api.v2 import volumes as volumes_v2 -from cinder.api.v3.views import volumes as volume_views_v3 -from cinder import exception -from cinder import group as group_api -from cinder.i18n import _ -from cinder import objects -import cinder.policy -from cinder import utils - -LOG = logging.getLogger(__name__) - -SUMMARY_BASE_MICRO_VERSION = '3.12' - - -def check_policy(context, action, target_obj=None): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id - } - target.update(target_obj or {}) - - _action = 'volume:%s' % action - cinder.policy.enforce(context, _action, target) - - -class VolumeController(volumes_v2.VolumeController): - """The Volumes API controller for the OpenStack API V3.""" - - _view_builder_class = volume_views_v3.ViewBuilder - - def __init__(self, ext_mgr): - self.group_api = group_api.API() - super(VolumeController, self).__init__(ext_mgr) - - def delete(self, req, id): - """Delete a volume.""" - context = req.environ['cinder.context'] - req_version = req.api_version_request - - cascade = utils.get_bool_param('cascade', req.params) - force = False - - params = "" - if req_version.matches('3.23'): - force = utils.get_bool_param('force', req.params) - if cascade or force: - params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade, - 'f': force} - - LOG.info("Delete volume with id: %(id)s %(params)s", - {'id': id, 'params': params}, context=context) - - if force: - check_policy(context, 'force_delete') - - volume = self.volume_api.get(context, id) - - self.volume_api.delete(context, volume, - cascade=cascade, - force=force) - - return webob.Response(status_int=202) - - @common.process_general_filtering('volume') - def _process_volume_filtering(self, context=None, filters=None, - req_version=None): - if req_version.matches(None, "3.3"): - filters.pop('glance_metadata', None) - - if req_version.matches(None, "3.9"): - filters.pop('group_id', None) - - utils.remove_invalid_filter_options( - context, filters, - self._get_volume_filter_options()) - - def _get_volumes(self, req, is_detail): - """Returns a list of volumes, transformed through view builder.""" - - context = req.environ['cinder.context'] - req_version = req.api_version_request - - params = req.params.copy() - marker, limit, offset = common.get_pagination_params(params) - sort_keys, sort_dirs = common.get_sort_params(params) - filters = params - - self._process_volume_filtering(context=context, filters=filters, - req_version=req_version) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in sort_keys: - sort_keys[sort_keys.index('name')] = 'display_name' - - if 'name' in filters: - filters['display_name'] = filters.pop('name') - - strict = req.api_version_request.matches("3.2", None) - self.volume_api.check_volume_filters(filters, strict) - - volumes = self.volume_api.get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - viewable_admin_meta=True, - offset=offset) - - for volume in volumes: - utils.add_visible_admin_metadata(volume) - - req.cache_db_volumes(volumes.objects) - - if is_detail: - volumes = self._view_builder.detail_list(req, volumes) - else: - volumes = self._view_builder.summary_list(req, volumes) - return volumes - - @wsgi.Controller.api_version(SUMMARY_BASE_MICRO_VERSION) - def summary(self, req): - """Return summary of volumes.""" - view_builder_v3 = volume_views_v3.ViewBuilder() - context = req.environ['cinder.context'] - filters = req.params.copy() - - utils.remove_invalid_filter_options(context, filters, - self._get_volume_filter_options()) - - num_vols, sum_size, metadata = self.volume_api.get_volume_summary( - context, filters=filters) - - req_version = req.api_version_request - if req_version.matches("3.36"): - all_distinct_metadata = metadata - else: - all_distinct_metadata = None - - return view_builder_v3.quick_summary(num_vols, int(sum_size), - all_distinct_metadata) - - @wsgi.response(http_client.ACCEPTED) - @wsgi.Controller.api_version('3.40') - @wsgi.action('revert') - def revert(self, req, id, body): - """revert a volume to a snapshot""" - - context = req.environ['cinder.context'] - self.assert_valid_body(body, 'revert') - snapshot_id = body['revert'].get('snapshot_id') - volume = self.volume_api.get_volume(context, id) - try: - l_snap = volume.get_latest_snapshot() - except exception.VolumeSnapshotNotFound: - msg = _("Volume %s doesn't have any snapshots.") - raise exc.HTTPBadRequest(explanation=msg % volume.id) - # Ensure volume and snapshot match. - if snapshot_id is None or snapshot_id != l_snap.id: - msg = _("Specified snapshot %(s_id)s is None or not " - "the latest one of volume %(v_id)s.") - raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id, - 'v_id': volume.id}) - try: - msg = 'Reverting volume %(v_id)s to snapshot %(s_id)s.' - LOG.info(msg, {'v_id': volume.id, - 's_id': l_snap.id}) - self.volume_api.revert_to_snapshot(context, volume, l_snap) - except (exception.InvalidVolume, exception.InvalidSnapshot) as e: - raise exc.HTTPConflict(explanation=six.text_type(e)) - except exception.VolumeSizeExceedsAvailableQuota as e: - raise exc.HTTPForbidden(explanation=six.text_type(e)) - - @wsgi.response(http_client.ACCEPTED) - def create(self, req, body): - """Creates a new volume. - - :param req: the request - :param body: the request body - :returns: dict -- the new volume dictionary - :raises HTTPNotFound, HTTPBadRequest: - """ - self.assert_valid_body(body, 'volume') - - LOG.debug('Create volume request body: %s', body) - context = req.environ['cinder.context'] - - req_version = req.api_version_request - # Remove group_id from body if max version is less than 3.13. - if req_version.matches(None, "3.12"): - # NOTE(xyang): The group_id is from a group created with a - # group_type. So with this group_id, we've got a group_type - # for this volume. Also if group_id is passed in, that means - # we already know which backend is hosting the group and the - # volume will be created on the same backend as well. So it - # won't go through the scheduler again if a group_id is - # passed in. - try: - body.get('volume', {}).pop('group_id', None) - except AttributeError: - msg = (_("Invalid body provided for creating volume. " - "Request API version: %s.") % req_version) - raise exc.HTTPBadRequest(explanation=msg) - - volume = body['volume'] - kwargs = {} - self.validate_name_and_description(volume) - - # NOTE(thingee): v2 API allows name instead of display_name - if 'name' in volume: - volume['display_name'] = volume.pop('name') - - # NOTE(thingee): v2 API allows description instead of - # display_description - if 'description' in volume: - volume['display_description'] = volume.pop('description') - - if 'image_id' in volume: - volume['imageRef'] = volume.pop('image_id') - - req_volume_type = volume.get('volume_type', None) - if req_volume_type: - # Not found exception will be handled at the wsgi level - kwargs['volume_type'] = ( - objects.VolumeType.get_by_name_or_id(context, req_volume_type)) - - kwargs['metadata'] = volume.get('metadata', None) - - snapshot_id = volume.get('snapshot_id') - if snapshot_id is not None: - if not uuidutils.is_uuid_like(snapshot_id): - msg = _("Snapshot ID must be in UUID form.") - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['snapshot'] = self.volume_api.get_snapshot(context, - snapshot_id) - else: - kwargs['snapshot'] = None - - source_volid = volume.get('source_volid') - if source_volid is not None: - if not uuidutils.is_uuid_like(source_volid): - msg = _("Source volume ID '%s' must be a " - "valid UUID.") % source_volid - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['source_volume'] = ( - self.volume_api.get_volume(context, - source_volid)) - else: - kwargs['source_volume'] = None - - source_replica = volume.get('source_replica') - if source_replica is not None: - if not uuidutils.is_uuid_like(source_replica): - msg = _("Source replica ID '%s' must be a " - "valid UUID") % source_replica - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - src_vol = self.volume_api.get_volume(context, - source_replica) - if src_vol['replication_status'] == 'disabled': - explanation = _('source volume id:%s is not' - ' replicated') % source_replica - raise exc.HTTPBadRequest(explanation=explanation) - kwargs['source_replica'] = src_vol - else: - kwargs['source_replica'] = None - - kwargs['group'] = None - kwargs['consistencygroup'] = None - consistencygroup_id = volume.get('consistencygroup_id') - if consistencygroup_id is not None: - if not uuidutils.is_uuid_like(consistencygroup_id): - msg = _("Consistency group ID '%s' must be a " - "valid UUID.") % consistencygroup_id - raise exc.HTTPBadRequest(explanation=msg) - # Not found exception will be handled at the wsgi level - kwargs['group'] = self.group_api.get(context, consistencygroup_id) - - # Get group_id if volume is in a group. - group_id = volume.get('group_id') - if group_id is not None: - # Not found exception will be handled at the wsgi level - kwargs['group'] = self.group_api.get(context, group_id) - - size = volume.get('size', None) - if size is None and kwargs['snapshot'] is not None: - size = kwargs['snapshot']['volume_size'] - elif size is None and kwargs['source_volume'] is not None: - size = kwargs['source_volume']['size'] - elif size is None and kwargs['source_replica'] is not None: - size = kwargs['source_replica']['size'] - - LOG.info("Create volume of %s GB", size) - - if self.ext_mgr.is_loaded('os-image-create'): - image_ref = volume.get('imageRef') - if image_ref is not None: - image_uuid = self._image_uuid_from_ref(image_ref, context) - kwargs['image_id'] = image_uuid - - kwargs['availability_zone'] = volume.get('availability_zone', None) - kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) - multiattach = volume.get('multiattach', False) - kwargs['multiattach'] = multiattach - - new_volume = self.volume_api.create(context, - size, - volume.get('display_name'), - volume.get('display_description'), - **kwargs) - - retval = self._view_builder.detail(req, new_volume) - - return retval - - -def create_resource(ext_mgr): - return wsgi.Resource(VolumeController(ext_mgr)) diff --git a/cinder/api/v3/workers.py b/cinder/api/v3/workers.py deleted file mode 100644 index f38a5baa9..000000000 --- a/cinder/api/v3/workers.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from cinder.api.openstack import wsgi -from cinder.api.v3.views import workers as workers_view -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import cleanable -from cinder.scheduler import rpcapi as sch_rpc -from cinder import utils - - -class WorkerController(wsgi.Controller): - allowed_clean_keys = {'service_id', 'cluster_name', 'host', 'binary', - 'is_up', 'disabled', 'resource_id', 'resource_type', - 'until'} - - policy_checker = wsgi.Controller.get_policy_checker('workers') - - def __init__(self, *args, **kwargs): - self.sch_api = sch_rpc.SchedulerAPI() - - def _prepare_params(self, ctxt, params, allowed): - if not allowed.issuperset(params): - invalid_keys = set(params).difference(allowed) - msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys) - raise exception.InvalidInput(reason=msg) - - if params.get('binary') not in (None, 'cinder-volume', - 'cinder-scheduler'): - msg = _('binary must be empty or set to cinder-volume or ' - 'cinder-scheduler') - raise exception.InvalidInput(reason=msg) - - for boolean in ('disabled', 'is_up'): - if params.get(boolean) is not None: - params[boolean] = utils.get_bool_param(boolean, params) - - resource_type = params.get('resource_type') - - if resource_type: - resource_type = resource_type.title() - types = cleanable.CinderCleanableObject.cleanable_resource_types - if resource_type not in types: - msg = (_('Resource type %s not valid, must be ') % - resource_type) - msg = utils.build_or_str(types, msg + '%s.') - raise exception.InvalidInput(reason=msg) - params['resource_type'] = resource_type - - resource_id = params.get('resource_id') - if resource_id: - if not uuidutils.is_uuid_like(resource_id): - msg = (_('Resource ID must be a UUID, and %s is not.') % - resource_id) - raise exception.InvalidInput(reason=msg) - - # If we have the resource type but we don't have where it is - # located, we get it from the DB to limit the distribution of the - # request by the scheduler, otherwise it will be distributed to all - # the services. - location_keys = {'service_id', 'cluster_name', 'host'} - if not location_keys.intersection(params): - workers = db.worker_get_all(ctxt, resource_id=resource_id, - binary=params.get('binary'), - resource_type=resource_type) - - if len(workers) == 0: - msg = (_('There is no resource with UUID %s pending ' - 'cleanup.'), resource_id) - raise exception.InvalidInput(reason=msg) - if len(workers) > 1: - msg = (_('There are multiple resources with UUID %s ' - 'pending cleanup. Please be more specific.'), - resource_id) - raise exception.InvalidInput(reason=msg) - - worker = workers[0] - params.update(service_id=worker.service_id, - resource_type=worker.resource_type) - - return params - - @wsgi.Controller.api_version('3.24') - @wsgi.response(202) - def cleanup(self, req, body=None): - """Do the cleanup on resources from a specific service/host/node.""" - # Let the wsgi middleware convert NotAuthorized exceptions - ctxt = self.policy_checker(req, 'cleanup') - body = body or {} - - params = self._prepare_params(ctxt, body, self.allowed_clean_keys) - params['until'] = timeutils.utcnow() - - # NOTE(geguileo): If is_up is not specified in the request - # CleanupRequest's default will be used (False) - cleanup_request = objects.CleanupRequest(**params) - cleaning, unavailable = self.sch_api.work_cleanup(ctxt, - cleanup_request) - return { - 'cleaning': workers_view.ViewBuilder.service_list(cleaning), - 'unavailable': workers_view.ViewBuilder.service_list(unavailable), - } - - -def create_resource(): - return wsgi.Resource(WorkerController()) diff --git a/cinder/api/versions.py b/cinder/api/versions.py deleted file mode 100644 index 56b9495ca..000000000 --- a/cinder/api/versions.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -from six.moves import http_client - -from cinder.api import extensions -from cinder.api import openstack -from cinder.api.openstack import api_version_request -from cinder.api.openstack import wsgi -from cinder.api.views import versions as views_versions - - -_LINKS = [{ - "rel": "describedby", - "type": "text/html", - "href": "http://docs.openstack.org/", -}] - - -_KNOWN_VERSIONS = { - "v1.0": { - "id": "v1.0", - "status": "DEPRECATED", - "version": "", - "min_version": "", - "updated": "2016-05-02T20:25:19Z", - "links": _LINKS, - "media-types": [{ - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=1", - }] - }, - "v2.0": { - "id": "v2.0", - "status": "DEPRECATED", - "version": "", - "min_version": "", - "updated": "2017-02-25T12:00:00Z", - "links": _LINKS, - "media-types": [{ - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=2", - }] - }, - "v3.0": { - "id": "v3.0", - "status": "CURRENT", - "version": api_version_request._MAX_API_VERSION, - "min_version": api_version_request._MIN_API_VERSION, - "updated": "2016-02-08T12:20:21Z", - "links": _LINKS, - "media-types": [{ - "base": "application/json", - "type": "application/vnd.openstack.volume+json;version=3", - }] - }, -} - - -class Versions(openstack.APIRouter): - """Route versions requests.""" - - ExtensionManager = extensions.ExtensionManager - - def _setup_routes(self, mapper, ext_mgr): - self.resources['versions'] = create_resource() - mapper.connect('versions', '/', - controller=self.resources['versions'], - action='all') - mapper.redirect('', '/') - - -class VersionsController(wsgi.Controller): - - def __init__(self): - super(VersionsController, self).__init__(None) - - @wsgi.Controller.api_version('1.0') - def index(self, req): # pylint: disable=E0102 - """Return versions supported prior to the microversions epoch.""" - builder = views_versions.get_view_builder(req) - known_versions = copy.deepcopy(_KNOWN_VERSIONS) - known_versions.pop('v2.0') - known_versions.pop('v3.0') - return builder.build_versions(known_versions) - - @index.api_version('2.0') - def index(self, req): # pylint: disable=E0102 - """Return versions supported prior to the microversions epoch.""" - builder = views_versions.get_view_builder(req) - known_versions = copy.deepcopy(_KNOWN_VERSIONS) - known_versions.pop('v1.0') - known_versions.pop('v3.0') - return builder.build_versions(known_versions) - - @index.api_version('3.0') - def index(self, req): # pylint: disable=E0102 - """Return versions supported after the start of microversions.""" - builder = views_versions.get_view_builder(req) - known_versions = copy.deepcopy(_KNOWN_VERSIONS) - known_versions.pop('v1.0') - known_versions.pop('v2.0') - return builder.build_versions(known_versions) - - # NOTE (cknight): Calling the versions API without - # /v1, /v2, or /v3 in the URL will lead to this unversioned - # method, which should always return info about all - # available versions. - @wsgi.response(http_client.MULTIPLE_CHOICES) - def all(self, req): - """Return all known versions.""" - builder = views_versions.get_view_builder(req) - known_versions = copy.deepcopy(_KNOWN_VERSIONS) - return builder.build_versions(known_versions) - - -def create_resource(): - return wsgi.Resource(VersionsController()) diff --git a/cinder/api/views/__init__.py b/cinder/api/views/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/api/views/availability_zones.py b/cinder/api/views/availability_zones.py deleted file mode 100644 index bcf658af6..000000000 --- a/cinder/api/views/availability_zones.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cinder.api.common - - -class ViewBuilder(cinder.api.common.ViewBuilder): - """Map cinder.volumes.api list_availability_zones response into dicts.""" - - def list(self, request, availability_zones): - def fmt(az): - return { - 'zoneName': az['name'], - 'zoneState': {'available': az['available']}, - } - - return {'availabilityZoneInfo': [fmt(az) for az in availability_zones]} diff --git a/cinder/api/views/backups.py b/cinder/api/views/backups.py deleted file mode 100644 index 6778df62d..000000000 --- a/cinder/api/views/backups.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model backup API responses as a python dictionary.""" - - _collection_name = "backups" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, backups, backup_count=None): - """Show a list of backups without many details.""" - return self._list_view(self.summary, request, backups, backup_count) - - def detail_list(self, request, backups, backup_count=None): - """Detailed view of a list of backups .""" - return self._list_view(self.detail, request, backups, backup_count) - - def summary(self, request, backup): - """Generic, non-detailed view of a backup.""" - return { - 'backup': { - 'id': backup['id'], - 'name': backup['display_name'], - 'links': self._get_links(request, - backup['id']), - }, - } - - def restore_summary(self, request, restore): - """Generic, non-detailed view of a restore.""" - return { - 'restore': { - 'backup_id': restore['backup_id'], - 'volume_id': restore['volume_id'], - 'volume_name': restore['volume_name'], - }, - } - - def detail(self, request, backup): - """Detailed view of a single backup.""" - return { - 'backup': { - 'id': backup.get('id'), - 'status': backup.get('status'), - 'size': backup.get('size'), - 'object_count': backup.get('object_count'), - 'availability_zone': backup.get('availability_zone'), - 'container': backup.get('container'), - 'created_at': backup.get('created_at'), - 'updated_at': backup.get('updated_at'), - 'name': backup.get('display_name'), - 'description': backup.get('display_description'), - 'fail_reason': backup.get('fail_reason'), - 'volume_id': backup.get('volume_id'), - 'links': self._get_links(request, backup['id']), - 'is_incremental': backup.is_incremental, - 'has_dependent_backups': backup.has_dependent_backups, - 'snapshot_id': backup.snapshot_id, - 'data_timestamp': backup.data_timestamp, - } - } - - def _list_view(self, func, request, backups, backup_count): - """Provide a view for a list of backups.""" - backups_list = [func(request, backup)['backup'] for backup in backups] - backups_links = self._get_collection_links(request, - backups, - self._collection_name, - backup_count) - backups_dict = dict(backups=backups_list) - - if backups_links: - backups_dict['backups_links'] = backups_links - - return backups_dict - - def export_summary(self, request, export): - """Generic view of an export.""" - return { - 'backup-record': { - 'backup_service': export['backup_service'], - 'backup_url': export['backup_url'], - }, - } diff --git a/cinder/api/views/capabilities.py b/cinder/api/views/capabilities.py deleted file mode 100644 index a95ab0e14..000000000 --- a/cinder/api/views/capabilities.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2015 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model capabilities API responses as a python dictionary.""" - - _collection_name = "capabilities" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary(self, request, capabilities, id): - """Summary view of a backend capabilities.""" - return { - 'namespace': 'OS::Storage::Capabilities::%s' % id, - 'vendor_name': capabilities.get('vendor_name'), - 'volume_backend_name': capabilities.get('volume_backend_name'), - 'pool_name': capabilities.get('pool_name'), - 'driver_version': capabilities.get('driver_version'), - 'storage_protocol': capabilities.get('storage_protocol'), - 'display_name': capabilities.get('display_name'), - 'description': capabilities.get('description'), - 'visibility': capabilities.get('visibility'), - 'replication_targets': capabilities.get('replication_targets', []), - 'properties': capabilities.get('properties'), - } diff --git a/cinder/api/views/cgsnapshots.py b/cinder/api/views/cgsnapshots.py deleted file mode 100644 index 26396e2e8..000000000 --- a/cinder/api/views/cgsnapshots.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model cgsnapshot API responses as a python dictionary.""" - - _collection_name = "cgsnapshots" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, cgsnapshots): - """Show a list of cgsnapshots without many details.""" - return self._list_view(self.summary, request, cgsnapshots) - - def detail_list(self, request, cgsnapshots): - """Detailed view of a list of cgsnapshots .""" - return self._list_view(self.detail, request, cgsnapshots) - - def summary(self, request, cgsnapshot): - """Generic, non-detailed view of a cgsnapshot.""" - return { - 'cgsnapshot': { - 'id': cgsnapshot.id, - 'name': cgsnapshot.name - } - } - - def detail(self, request, cgsnapshot): - """Detailed view of a single cgsnapshot.""" - try: - group_id = cgsnapshot.consistencygroup_id - except AttributeError: - try: - group_id = cgsnapshot.group_id - except AttributeError: - group_id = None - else: - group_id = None - - return { - 'cgsnapshot': { - 'id': cgsnapshot.id, - 'consistencygroup_id': group_id, - 'status': cgsnapshot.status, - 'created_at': cgsnapshot.created_at, - 'name': cgsnapshot.name, - 'description': cgsnapshot.description - } - } - - def _list_view(self, func, request, cgsnapshots): - """Provide a view for a list of cgsnapshots.""" - cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot'] - for cgsnapshot in cgsnapshots] - cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list) - - return cgsnapshots_dict diff --git a/cinder/api/views/consistencygroups.py b/cinder/api/views/consistencygroups.py deleted file mode 100644 index 0644cabb5..000000000 --- a/cinder/api/views/consistencygroups.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model consistencygroup API responses as a python dictionary.""" - - _collection_name = "consistencygroups" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, consistencygroups): - """Show a list of consistency groups without many details.""" - return self._list_view(self.summary, request, consistencygroups) - - def detail_list(self, request, consistencygroups): - """Detailed view of a list of consistency groups .""" - return self._list_view(self.detail, request, consistencygroups) - - def summary(self, request, consistencygroup): - """Generic, non-detailed view of a consistency group.""" - return { - 'consistencygroup': { - 'id': consistencygroup.id, - 'name': consistencygroup.name - } - } - - def detail(self, request, consistencygroup): - """Detailed view of a single consistency group.""" - try: - volume_types = (consistencygroup.volume_type_id.split(",") - if consistencygroup.volume_type_id else []) - volume_types = [type_id for type_id in volume_types if type_id] - except AttributeError: - try: - volume_types = [v_type.id for v_type in - consistencygroup.volume_types] - except AttributeError: - volume_types = [] - - return { - 'consistencygroup': { - 'id': consistencygroup.id, - 'status': consistencygroup.status, - 'availability_zone': consistencygroup.availability_zone, - 'created_at': consistencygroup.created_at, - 'name': consistencygroup.name, - 'description': consistencygroup.description, - 'volume_types': volume_types, - } - } - - def _list_view(self, func, request, consistencygroups): - """Provide a view for a list of consistency groups.""" - consistencygroups_list = [ - func(request, consistencygroup)['consistencygroup'] - for consistencygroup in consistencygroups] - cg_links = self._get_collection_links(request, - consistencygroups, - self._collection_name) - consistencygroups_dict = dict(consistencygroups=consistencygroups_list) - if cg_links: - consistencygroups_dict['consistencygroup_links'] = cg_links - - return consistencygroups_dict diff --git a/cinder/api/views/limits.py b/cinder/api/views/limits.py deleted file mode 100644 index 39d460a7d..000000000 --- a/cinder/api/views/limits.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - - -class ViewBuilder(object): - """OpenStack API base limits view builder.""" - - def build(self, rate_limits, absolute_limits): - rate_limits = self._build_rate_limits(rate_limits) - absolute_limits = self._build_absolute_limits(absolute_limits) - - output = { - "limits": { - "rate": rate_limits, - "absolute": absolute_limits, - }, - } - - return output - - def _build_absolute_limits(self, absolute_limits): - """Builder for absolute limits - - absolute_limits should be given as a dict of limits. - For example: {"ram": 512, "gigabytes": 1024}. - - """ - limit_names = { - "gigabytes": ["maxTotalVolumeGigabytes"], - "backup_gigabytes": ["maxTotalBackupGigabytes"], - "volumes": ["maxTotalVolumes"], - "snapshots": ["maxTotalSnapshots"], - "backups": ["maxTotalBackups"], - } - limits = {} - for name, value in absolute_limits.items(): - if name in limit_names and value is not None: - for name in limit_names[name]: - limits[name] = value - return limits - - def _build_rate_limits(self, rate_limits): - limits = [] - for rate_limit in rate_limits: - _rate_limit_key = None - _rate_limit = self._build_rate_limit(rate_limit) - - # check for existing key - for limit in limits: - if (limit["uri"] == rate_limit["URI"] and - limit["regex"] == rate_limit["regex"]): - _rate_limit_key = limit - break - - # ensure we have a key if we didn't find one - if not _rate_limit_key: - _rate_limit_key = { - "uri": rate_limit["URI"], - "regex": rate_limit["regex"], - "limit": [], - } - limits.append(_rate_limit_key) - - _rate_limit_key["limit"].append(_rate_limit) - - return limits - - def _build_rate_limit(self, rate_limit): - _get_utc = datetime.datetime.utcfromtimestamp - next_avail = _get_utc(rate_limit["resetTime"]) - return { - "verb": rate_limit["verb"], - "value": rate_limit["value"], - "remaining": int(rate_limit["remaining"]), - "unit": rate_limit["unit"], - "next-available": next_avail.isoformat(), - } diff --git a/cinder/api/views/manageable_snapshots.py b/cinder/api/views/manageable_snapshots.py deleted file mode 100644 index 105358129..000000000 --- a/cinder/api/views/manageable_snapshots.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model manageable snapshot responses as a python dictionary.""" - - _collection_name = "os-snapshot-manage" - - def summary_list(self, request, snapshots, count): - """Show a list of manageable snapshots without many details.""" - return self._list_view(self.summary, request, snapshots, count) - - def detail_list(self, request, snapshots, count): - """Detailed view of a list of manageable snapshots.""" - return self._list_view(self.detail, request, snapshots, count) - - def summary(self, request, snapshot): - """Generic, non-detailed view of a manageable snapshot description.""" - return { - 'reference': snapshot['reference'], - 'size': snapshot['size'], - 'safe_to_manage': snapshot['safe_to_manage'], - 'source_reference': snapshot['source_reference'] - } - - def detail(self, request, snapshot): - """Detailed view of a manageable snapshot description.""" - return { - 'reference': snapshot['reference'], - 'size': snapshot['size'], - 'safe_to_manage': snapshot['safe_to_manage'], - 'reason_not_safe': snapshot['reason_not_safe'], - 'extra_info': snapshot['extra_info'], - 'cinder_id': snapshot['cinder_id'], - 'source_reference': snapshot['source_reference'] - } - - def _list_view(self, func, request, snapshots, count): - """Provide a view for a list of manageable snapshots.""" - snap_list = [func(request, snapshot) for snapshot in snapshots] - return {"manageable-snapshots": snap_list} diff --git a/cinder/api/views/manageable_volumes.py b/cinder/api/views/manageable_volumes.py deleted file mode 100644 index ef0bdad5d..000000000 --- a/cinder/api/views/manageable_volumes.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model manageable volume responses as a python dictionary.""" - - _collection_name = "os-volume-manage" - - def summary_list(self, request, volumes, count): - """Show a list of manageable volumes without many details.""" - return self._list_view(self.summary, request, volumes, count) - - def detail_list(self, request, volumes, count): - """Detailed view of a list of manageable volumes.""" - return self._list_view(self.detail, request, volumes, count) - - def summary(self, request, volume): - """Generic, non-detailed view of a manageable volume description.""" - return { - 'reference': volume['reference'], - 'size': volume['size'], - 'safe_to_manage': volume['safe_to_manage'] - } - - def detail(self, request, volume): - """Detailed view of a manageable volume description.""" - return { - 'reference': volume['reference'], - 'size': volume['size'], - 'safe_to_manage': volume['safe_to_manage'], - 'reason_not_safe': volume['reason_not_safe'], - 'cinder_id': volume['cinder_id'], - 'extra_info': volume['extra_info'] - } - - def _list_view(self, func, request, volumes, count): - """Provide a view for a list of manageable volumes.""" - vol_list = [func(request, volume) for volume in volumes] - return {"manageable-volumes": vol_list} diff --git a/cinder/api/views/qos_specs.py b/cinder/api/views/qos_specs.py deleted file mode 100644 index 2efc786f1..000000000 --- a/cinder/api/views/qos_specs.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2013 eBay Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model QoS specs API responses as a python dictionary.""" - - _collection_name = "qos-specs" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, qos_specs, qos_count=None): - """Show a list of qos_specs without many details.""" - return self._list_view(self.detail, request, qos_specs, qos_count) - - def summary(self, request, qos_spec): - """Generic, non-detailed view of a qos_specs.""" - return self.detail(request, qos_spec) - - def detail(self, request, qos_spec): - """Detailed view of a single qos_spec.""" - # TODO(zhiteng) Add associations to detailed view - return { - 'qos_specs': { - 'id': qos_spec.id, - 'name': qos_spec.name, - 'consumer': qos_spec.consumer, - 'specs': qos_spec.specs, - }, - 'links': self._get_links(request, - qos_spec.id), - } - - def associations(self, request, associates): - """View of qos specs associations.""" - return { - 'qos_associations': associates - } - - def _list_view(self, func, request, qos_specs, qos_count=None): - """Provide a view for a list of qos_specs.""" - specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs] - specs_links = self._get_collection_links(request, qos_specs, - self._collection_name, - qos_count) - specs_dict = dict(qos_specs=specs_list) - if specs_links: - specs_dict['qos_specs_links'] = specs_links - - return specs_dict diff --git a/cinder/api/views/scheduler_stats.py b/cinder/api/views/scheduler_stats.py deleted file mode 100644 index 0a01a59a2..000000000 --- a/cinder/api/views/scheduler_stats.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2014 eBay Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model scheduler-stats API responses as a python dictionary.""" - - _collection_name = "scheduler-stats" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary(self, request, pool): - """Summary view of a single pool.""" - return { - 'pool': { - 'name': pool.get('name'), - } - } - - def detail(self, request, pool): - """Detailed view of a single pool.""" - return { - 'pool': { - 'name': pool.get('name'), - 'capabilities': pool.get('capabilities'), - } - } - - def pools(self, request, pools, detail): - """Detailed/Summary view of a list of pools seen by scheduler.""" - if detail: - plist = [self.detail(request, pool)['pool'] for pool in pools] - else: - plist = [self.summary(request, pool)['pool'] for pool in pools] - pools_dict = dict(pools=plist) - - return pools_dict diff --git a/cinder/api/views/snapshots.py b/cinder/api/views/snapshots.py deleted file mode 100644 index 56f77337b..000000000 --- a/cinder/api/views/snapshots.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model snapshot API responses as a python dictionary.""" - - _collection_name = "snapshots" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, snapshots, snapshot_count=None): - """Show a list of snapshots without many details.""" - return self._list_view(self.summary, request, snapshots, - snapshot_count) - - def detail_list(self, request, snapshots, snapshot_count=None): - """Detailed view of a list of snapshots.""" - return self._list_view(self.detail, request, snapshots, snapshot_count, - coll_name=self._collection_name + '/detail') - - def summary(self, request, snapshot): - """Generic, non-detailed view of a snapshot.""" - if isinstance(snapshot.metadata, dict): - metadata = snapshot.metadata - else: - metadata = {} - - return { - 'snapshot': { - 'id': snapshot.id, - 'created_at': snapshot.created_at, - 'updated_at': snapshot.updated_at, - 'name': snapshot.display_name, - 'description': snapshot.display_description, - 'volume_id': snapshot.volume_id, - 'status': snapshot.status, - 'size': snapshot.volume_size, - 'metadata': metadata, - } - } - - def detail(self, request, snapshot): - """Detailed view of a single snapshot.""" - # NOTE(geguileo): No additional data at the moment - return self.summary(request, snapshot) - - def _list_view(self, func, request, snapshots, snapshot_count, - coll_name=_collection_name): - """Provide a view for a list of snapshots.""" - snapshots_list = [func(request, snapshot)['snapshot'] - for snapshot in snapshots] - snapshots_links = self._get_collection_links(request, - snapshots, - coll_name, - snapshot_count) - snapshots_dict = {self._collection_name: snapshots_list} - - if snapshots_links: - snapshots_dict[self._collection_name + '_links'] = snapshots_links - - return snapshots_dict diff --git a/cinder/api/views/transfers.py b/cinder/api/views/transfers.py deleted file mode 100644 index 167dffb7e..000000000 --- a/cinder/api/views/transfers.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - """Model transfer API responses as a python dictionary.""" - - _collection_name = "os-volume-transfer" - - def __init__(self): - """Initialize view builder.""" - super(ViewBuilder, self).__init__() - - def summary_list(self, request, transfers, origin_transfer_count): - """Show a list of transfers without many details.""" - return self._list_view(self.summary, request, transfers, - origin_transfer_count) - - def detail_list(self, request, transfers, origin_transfer_count): - """Detailed view of a list of transfers .""" - return self._list_view(self.detail, request, transfers, - origin_transfer_count) - - def summary(self, request, transfer): - """Generic, non-detailed view of a transfer.""" - return { - 'transfer': { - 'id': transfer['id'], - 'volume_id': transfer.get('volume_id'), - 'name': transfer['display_name'], - 'links': self._get_links(request, - transfer['id']), - }, - } - - def detail(self, request, transfer): - """Detailed view of a single transfer.""" - return { - 'transfer': { - 'id': transfer.get('id'), - 'created_at': transfer.get('created_at'), - 'name': transfer.get('display_name'), - 'volume_id': transfer.get('volume_id'), - 'links': self._get_links(request, transfer['id']) - } - } - - def create(self, request, transfer): - """Detailed view of a single transfer when created.""" - return { - 'transfer': { - 'id': transfer.get('id'), - 'created_at': transfer.get('created_at'), - 'name': transfer.get('display_name'), - 'volume_id': transfer.get('volume_id'), - 'auth_key': transfer.get('auth_key'), - 'links': self._get_links(request, transfer['id']) - } - } - - def _list_view(self, func, request, transfers, origin_transfer_count): - """Provide a view for a list of transfers.""" - transfers_list = [func(request, transfer)['transfer'] for transfer in - transfers] - transfers_links = self._get_collection_links(request, - transfers, - self._collection_name, - origin_transfer_count) - transfers_dict = dict(transfers=transfers_list) - - if transfers_links: - transfers_dict['transfers_links'] = transfers_links - - return transfers_dict diff --git a/cinder/api/views/types.py b/cinder/api/views/types.py deleted file mode 100644 index c8c1d4ff9..000000000 --- a/cinder/api/views/types.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.api import common - - -class ViewBuilder(common.ViewBuilder): - - def show(self, request, volume_type, brief=False): - """Trim away extraneous volume type attributes.""" - trimmed = dict(id=volume_type.get('id'), - name=volume_type.get('name'), - is_public=volume_type.get('is_public'), - extra_specs=volume_type.get('extra_specs'), - description=volume_type.get('description')) - return trimmed if brief else dict(volume_type=trimmed) - - def index(self, request, volume_types): - """Index over trimmed volume types.""" - volume_types_list = [self.show(request, volume_type, True) - for volume_type in volume_types] - return dict(volume_types=volume_types_list) diff --git a/cinder/api/views/versions.py b/cinder/api/views/versions.py deleted file mode 100644 index bf0fd434a..000000000 --- a/cinder/api/views/versions.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import re - -from oslo_config import cfg -from six.moves import urllib - - -versions_opts = [ - cfg.StrOpt('public_endpoint', - help="Public url to use for versions endpoint. The default " - "is None, which will use the request's host_url " - "attribute to populate the URL base. If Cinder is " - "operating behind a proxy, you will want to change " - "this to represent the proxy's URL.", - deprecated_name='osapi_volume_base_URL'), -] - -CONF = cfg.CONF -CONF.register_opts(versions_opts) - - -def get_view_builder(req): - base_url = CONF.public_endpoint or req.application_url - return ViewBuilder(base_url) - - -class ViewBuilder(object): - def __init__(self, base_url): - """Initialize ViewBuilder. - - :param base_url: url of the root wsgi application - """ - self.base_url = base_url - - def build_versions(self, versions): - views = [self._build_version(versions[key]) - for key in sorted(list(versions.keys()))] - return dict(versions=views) - - def _build_version(self, version): - view = copy.deepcopy(version) - view['links'] = self._build_links(version) - return view - - def _build_links(self, version_data): - """Generate a container of links that refer to the provided version.""" - links = copy.deepcopy(version_data.get('links', {})) - version_num = version_data["id"].split('.')[0] - links.append({'rel': 'self', - 'href': self._generate_href(version=version_num)}) - return links - - def _generate_href(self, version='v3', path=None): - """Create a URL that refers to a specific version_number.""" - base_url = self._get_base_url_without_version() - href = urllib.parse.urljoin(base_url, version).rstrip('/') + '/' - if path: - href += path.lstrip('/') - return href - - def _get_base_url_without_version(self): - """Get the base URL with out the /v1 suffix.""" - return re.sub('v[1-9]+/?$', '', self.base_url) diff --git a/cinder/backup/__init__.py b/cinder/backup/__init__.py deleted file mode 100644 index d4799b591..000000000 --- a/cinder/backup/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Importing full names to not pollute the namespace and cause possible -# collisions with use of 'from cinder.backup import ' elsewhere. - -from oslo_utils import importutils - -from cinder.common import config - - -CONF = config.CONF - - -def API(*args, **kwargs): - class_name = CONF.backup_api_class - return importutils.import_object(class_name, *args, **kwargs) diff --git a/cinder/backup/api.py b/cinder/backup/api.py deleted file mode 100644 index 84a910a54..000000000 --- a/cinder/backup/api.py +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2014 TrilioData, Inc -# Copyright (c) 2015 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to the volume backups service. -""" - -from datetime import datetime -from eventlet import greenthread -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import strutils -from pytz import timezone -import random - -from cinder.backup import rpcapi as backup_rpcapi -from cinder.common import constants -from cinder import context -from cinder.db import base -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -import cinder.policy -from cinder import quota -from cinder import quota_utils -import cinder.volume -from cinder.volume import utils as volume_utils - -backup_api_opts = [ - cfg.BoolOpt('backup_use_same_host', - default=False, - help='Backup services use same backend.') -] - -CONF = cfg.CONF -CONF.register_opts(backup_api_opts) -LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS -IMPORT_VOLUME_ID = '00000000-0000-0000-0000-000000000000' - - -def check_policy(context, action): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - _action = 'backup:%s' % action - cinder.policy.enforce(context, _action, target) - - -class API(base.Base): - """API for interacting with the volume backup manager.""" - - def __init__(self, db=None): - self.backup_rpcapi = backup_rpcapi.BackupAPI() - self.volume_api = cinder.volume.API() - super(API, self).__init__(db) - - def get(self, context, backup_id): - check_policy(context, 'get') - return objects.Backup.get_by_id(context, backup_id) - - def _check_support_to_force_delete(self, context, backup_host): - result = self.backup_rpcapi.check_support_to_force_delete(context, - backup_host) - return result - - def delete(self, context, backup, force=False): - """Make the RPC call to delete a volume backup. - - Call backup manager to execute backup delete or force delete operation. - :param context: running context - :param backup: the dict of backup that is got from DB. - :param force: indicate force delete or not - :raises InvalidBackup: - :raises BackupDriverException: - :raises ServiceNotFound: - """ - check_policy(context, 'delete') - if not force and backup.status not in [fields.BackupStatus.AVAILABLE, - fields.BackupStatus.ERROR]: - msg = _('Backup status must be available or error') - raise exception.InvalidBackup(reason=msg) - if force and not self._check_support_to_force_delete(context, - backup.host): - msg = _('force delete') - raise exception.NotSupportedOperation(operation=msg) - - # Don't allow backup to be deleted if there are incremental - # backups dependent on it. - deltas = self.get_all(context, search_opts={'parent_id': backup.id}) - if deltas and len(deltas): - msg = _('Incremental backups exist for this backup.') - raise exception.InvalidBackup(reason=msg) - - backup.status = fields.BackupStatus.DELETING - backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone) - backup.save() - self.backup_rpcapi.delete_backup(context, backup) - - def get_all(self, context, search_opts=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - check_policy(context, 'get_all') - - search_opts = search_opts or {} - - all_tenants = search_opts.pop('all_tenants', '0') - if not strutils.is_valid_boolstr(all_tenants): - msg = _("all_tenants must be a boolean, got '%s'.") % all_tenants - raise exception.InvalidParameterValue(err=msg) - - if context.is_admin and strutils.bool_from_string(all_tenants): - backups = objects.BackupList.get_all(context, search_opts, - marker, limit, offset, - sort_keys, sort_dirs) - else: - backups = objects.BackupList.get_all_by_project( - context, context.project_id, search_opts, - marker, limit, offset, sort_keys, sort_dirs - ) - - return backups - - def _az_matched(self, service, availability_zone): - return ((not availability_zone) or - service.availability_zone == availability_zone) - - def _is_backup_service_enabled(self, availability_zone, host): - """Check if there is a backup service available.""" - topic = constants.BACKUP_TOPIC - ctxt = context.get_admin_context() - services = objects.ServiceList.get_all_by_topic( - ctxt, topic, disabled=False) - for srv in services: - if (self._az_matched(srv, availability_zone) and - srv.host == host and srv.is_up): - return True - return False - - def _get_any_available_backup_service(self, availability_zone): - """Get an available backup service host. - - Get an available backup service host in the specified - availability zone. - """ - services = [srv for srv in self._list_backup_services()] - random.shuffle(services) - # Get the next running service with matching availability zone. - idx = 0 - while idx < len(services): - srv = services[idx] - if(self._az_matched(srv, availability_zone) and - srv.is_up): - return srv.host - idx = idx + 1 - return None - - def _get_available_backup_service_host(self, host, az): - """Return an appropriate backup service host.""" - backup_host = None - if not host or not CONF.backup_use_same_host: - backup_host = self._get_any_available_backup_service(az) - elif self._is_backup_service_enabled(az, host): - backup_host = host - if not backup_host: - raise exception.ServiceNotFound(service_id='cinder-backup') - return backup_host - - def _list_backup_services(self): - """List all enabled backup services. - - :returns: list -- hosts for services that are enabled for backup. - """ - topic = constants.BACKUP_TOPIC - ctxt = context.get_admin_context() - services = objects.ServiceList.get_all_by_topic( - ctxt, topic, disabled=False) - return services - - def _list_backup_hosts(self): - services = self._list_backup_services() - return [srv.host for srv in services - if not srv.disabled and srv.is_up] - - def create(self, context, name, description, volume_id, - container, incremental=False, availability_zone=None, - force=False, snapshot_id=None): - """Make the RPC call to create a volume backup.""" - check_policy(context, 'create') - volume = self.volume_api.get(context, volume_id) - snapshot = None - if snapshot_id: - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - - if volume_id != snapshot.volume_id: - msg = (_('Volume %(vol1)s does not match with ' - 'snapshot.volume_id %(vol2)s.') - % {'vol1': volume_id, - 'vol2': snapshot.volume_id}) - raise exception.InvalidVolume(reason=msg) - if snapshot['status'] not in ["available"]: - msg = (_('Snapshot to be backed up must be available, ' - 'but the current status is "%s".') - % snapshot['status']) - raise exception.InvalidSnapshot(reason=msg) - elif volume['status'] not in ["available", "in-use"]: - msg = (_('Volume to be backed up must be available ' - 'or in-use, but the current status is "%s".') - % volume['status']) - raise exception.InvalidVolume(reason=msg) - elif volume['status'] in ["in-use"] and not force: - msg = _('Backing up an in-use volume must use ' - 'the force flag.') - raise exception.InvalidVolume(reason=msg) - - previous_status = volume['status'] - volume_host = volume_utils.extract_host(volume.host, 'host') - host = self._get_available_backup_service_host( - volume_host, volume.availability_zone) - - # Reserve a quota before setting volume status and backup status - try: - reserve_opts = {'backups': 1, - 'backup_gigabytes': volume['size']} - reservations = QUOTAS.reserve(context, **reserve_opts) - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota( - context, e, - resource='backups', - size=volume.size) - # Find the latest backup and use it as the parent backup to do an - # incremental backup. - latest_backup = None - if incremental: - backups = objects.BackupList.get_all_by_volume(context.elevated(), - volume_id) - if backups.objects: - # NOTE(xyang): The 'data_timestamp' field records the time - # when the data on the volume was first saved. If it is - # a backup from volume, 'data_timestamp' will be the same - # as 'created_at' for a backup. If it is a backup from a - # snapshot, 'data_timestamp' will be the same as - # 'created_at' for a snapshot. - # If not backing up from snapshot, the backup with the latest - # 'data_timestamp' will be the parent; If backing up from - # snapshot, the backup with the latest 'data_timestamp' will - # be chosen only if 'data_timestamp' is earlier than the - # 'created_at' timestamp of the snapshot; Otherwise, the - # backup will not be chosen as the parent. - # For example, a volume has a backup taken at 8:00, then - # a snapshot taken at 8:10, and then a backup at 8:20. - # When taking an incremental backup of the snapshot, the - # parent should be the backup at 8:00, not 8:20, and the - # 'data_timestamp' of this new backup will be 8:10. - latest_backup = max( - backups.objects, - key=lambda x: x['data_timestamp'] - if (not snapshot or (snapshot and x['data_timestamp'] - < snapshot['created_at'])) - else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC'))) - else: - msg = _('No backups available to do an incremental backup.') - raise exception.InvalidBackup(reason=msg) - - parent_id = None - if latest_backup: - parent_id = latest_backup.id - if latest_backup['status'] != fields.BackupStatus.AVAILABLE: - msg = _('The parent backup must be available for ' - 'incremental backup.') - raise exception.InvalidBackup(reason=msg) - - data_timestamp = None - if snapshot_id: - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - data_timestamp = snapshot.created_at - self.db.snapshot_update( - context, snapshot_id, - {'status': fields.SnapshotStatus.BACKING_UP}) - else: - self.db.volume_update(context, volume_id, - {'status': 'backing-up', - 'previous_status': previous_status}) - - backup = None - try: - kwargs = { - 'user_id': context.user_id, - 'project_id': context.project_id, - 'display_name': name, - 'display_description': description, - 'volume_id': volume_id, - 'status': fields.BackupStatus.CREATING, - 'container': container, - 'parent_id': parent_id, - 'size': volume['size'], - 'host': host, - 'snapshot_id': snapshot_id, - 'data_timestamp': data_timestamp, - } - backup = objects.Backup(context=context, **kwargs) - backup.create() - if not snapshot_id: - backup.data_timestamp = backup.created_at - backup.save() - QUOTAS.commit(context, reservations) - except Exception: - with excutils.save_and_reraise_exception(): - try: - if backup and 'id' in backup: - backup.destroy() - finally: - QUOTAS.rollback(context, reservations) - - # TODO(DuncanT): In future, when we have a generic local attach, - # this can go via the scheduler, which enables - # better load balancing and isolation of services - self.backup_rpcapi.create_backup(context, backup) - - return backup - - def restore(self, context, backup_id, volume_id=None, name=None): - """Make the RPC call to restore a volume backup.""" - check_policy(context, 'restore') - backup = self.get(context, backup_id) - if backup['status'] != fields.BackupStatus.AVAILABLE: - msg = _('Backup status must be available') - raise exception.InvalidBackup(reason=msg) - - size = backup['size'] - if size is None: - msg = _('Backup to be restored has invalid size') - raise exception.InvalidBackup(reason=msg) - - # Create a volume if none specified. If a volume is specified check - # it is large enough for the backup - if volume_id is None: - if name is None: - name = 'restore_backup_%s' % backup_id - - description = 'auto-created_from_restore_from_backup' - - LOG.info("Creating volume of %(size)s GB for restore of " - "backup %(backup_id)s.", - {'size': size, 'backup_id': backup_id}) - volume = self.volume_api.create(context, size, name, description) - volume_id = volume['id'] - - while True: - volume = self.volume_api.get(context, volume_id) - if volume['status'] != 'creating': - break - greenthread.sleep(1) - - if volume['status'] == "error": - msg = (_('Error while creating volume %(volume_id)s ' - 'for restoring backup %(backup_id)s.') % - {'volume_id': volume_id, 'backup_id': backup_id}) - raise exception.InvalidVolume(reason=msg) - else: - volume = self.volume_api.get(context, volume_id) - - if volume['status'] != "available": - msg = _('Volume to be restored to must be available') - raise exception.InvalidVolume(reason=msg) - - LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', - {'bs': size, 'vs': volume['size']}) - if size > volume['size']: - msg = (_('volume size %(volume_size)d is too small to restore ' - 'backup of size %(size)d.') % - {'volume_size': volume['size'], 'size': size}) - raise exception.InvalidVolume(reason=msg) - - LOG.info("Overwriting volume %(volume_id)s with restore of " - "backup %(backup_id)s", - {'volume_id': volume_id, 'backup_id': backup_id}) - - # Setting the status here rather than setting at start and unrolling - # for each error condition, it should be a very small window - backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone) - backup.status = fields.BackupStatus.RESTORING - backup.restore_volume_id = volume.id - backup.save() - self.db.volume_update(context, volume_id, {'status': - 'restoring-backup'}) - - self.backup_rpcapi.restore_backup(context, backup.host, backup, - volume_id) - - d = {'backup_id': backup_id, - 'volume_id': volume_id, - 'volume_name': volume['display_name'], } - - return d - - def reset_status(self, context, backup_id, status): - """Make the RPC call to reset a volume backup's status. - - Call backup manager to execute backup status reset operation. - :param context: running context - :param backup_id: which backup's status to be reset - :param status: backup's status to be reset - :raises InvalidBackup: - """ - # get backup info - backup = self.get(context, backup_id) - backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone) - backup.save() - # send to manager to do reset operation - self.backup_rpcapi.reset_status(ctxt=context, backup=backup, - status=status) - - def export_record(self, context, backup_id): - """Make the RPC call to export a volume backup. - - Call backup manager to execute backup export. - - :param context: running context - :param backup_id: backup id to export - :returns: dictionary -- a description of how to import the backup - :returns: contains 'backup_url' and 'backup_service' - :raises InvalidBackup: - """ - check_policy(context, 'backup-export') - backup = self.get(context, backup_id) - if backup['status'] != fields.BackupStatus.AVAILABLE: - msg = (_('Backup status must be available and not %s.') % - backup['status']) - raise exception.InvalidBackup(reason=msg) - - LOG.debug("Calling RPCAPI with context: " - "%(ctx)s, host: %(host)s, backup: %(id)s.", - {'ctx': context, - 'host': backup['host'], - 'id': backup['id']}) - - backup.host = self._get_available_backup_service_host( - backup.host, backup.availability_zone) - backup.save() - export_data = self.backup_rpcapi.export_record(context, backup) - - return export_data - - def _get_import_backup(self, context, backup_url): - """Prepare database backup record for import. - - This method decodes provided backup_url and expects to find the id of - the backup in there. - - Then checks the DB for the presence of this backup record and if it - finds it and is not deleted it will raise an exception because the - record cannot be created or used. - - If the record is in deleted status then we must be trying to recover - this record, so we'll reuse it. - - If the record doesn't already exist we create it with provided id. - - :param context: running context - :param backup_url: backup description to be used by the backup driver - :return: BackupImport object - :raises InvalidBackup: - :raises InvalidInput: - """ - # Deserialize string backup record into a dictionary - backup_record = objects.Backup.decode_record(backup_url) - - # ID is a required field since it's what links incremental backups - if 'id' not in backup_record: - msg = _('Provided backup record is missing an id') - raise exception.InvalidInput(reason=msg) - - kwargs = { - 'user_id': context.user_id, - 'project_id': context.project_id, - 'volume_id': IMPORT_VOLUME_ID, - 'status': fields.BackupStatus.CREATING, - } - - try: - # Try to get the backup with that ID in all projects even among - # deleted entries. - backup = objects.BackupImport.get_by_id(context, - backup_record['id'], - read_deleted='yes', - project_only=False) - - # If record exists and it's not deleted we cannot proceed with the - # import - if backup.status != fields.BackupStatus.DELETED: - msg = _('Backup already exists in database.') - raise exception.InvalidBackup(reason=msg) - - # Otherwise we'll "revive" delete backup record - backup.update(kwargs) - backup.save() - - except exception.BackupNotFound: - # If record doesn't exist create it with the specific ID - backup = objects.BackupImport(context=context, - id=backup_record['id'], **kwargs) - backup.create() - - return backup - - def import_record(self, context, backup_service, backup_url): - """Make the RPC call to import a volume backup. - - :param context: running context - :param backup_service: backup service name - :param backup_url: backup description to be used by the backup driver - :raises InvalidBackup: - :raises ServiceNotFound: - :raises InvalidInput: - """ - check_policy(context, 'backup-import') - - # NOTE(ronenkat): since we don't have a backup-scheduler - # we need to find a host that support the backup service - # that was used to create the backup. - # We send it to the first backup service host, and the backup manager - # on that host will forward it to other hosts on the hosts list if it - # cannot support correct service itself. - hosts = self._list_backup_hosts() - if len(hosts) == 0: - raise exception.ServiceNotFound(service_id=backup_service) - - # Get Backup object that will be used to import this backup record - backup = self._get_import_backup(context, backup_url) - - first_host = hosts.pop() - self.backup_rpcapi.import_record(context, - first_host, - backup, - backup_service, - backup_url, - hosts) - - return backup - - def update(self, context, backup_id, fields): - check_policy(context, 'update') - backup = self.get(context, backup_id) - backup.update(fields) - backup.save() - return backup diff --git a/cinder/backup/chunkeddriver.py b/cinder/backup/chunkeddriver.py deleted file mode 100644 index 6bc064870..000000000 --- a/cinder/backup/chunkeddriver.py +++ /dev/null @@ -1,741 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2014 TrilioData, Inc -# Copyright (c) 2015 EMC Corporation -# Copyright (C) 2015 Kevin Fox -# Copyright (C) 2015 Tom Barron -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic base class to implement metadata, compression and chunked data - operations -""" - -import abc -import hashlib -import json -import os - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder.backup import driver -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -chunkedbackup_service_opts = [ - cfg.StrOpt('backup_compression_algorithm', - default='zlib', - choices=['none', 'off', 'no', - 'zlib', 'gzip', - 'bz2', 'bzip2'], - help='Compression algorithm (None to disable)'), -] - -CONF = cfg.CONF -CONF.register_opts(chunkedbackup_service_opts) - - -@six.add_metaclass(abc.ABCMeta) -class ChunkedBackupDriver(driver.BackupDriver): - """Abstract chunked backup driver. - - Implements common functionality for backup drivers that store volume - data in multiple "chunks" in a backup repository when the size of - the backed up cinder volume exceeds the size of a backup repository - "chunk." - - Provides abstract methods to be implemented in concrete chunking - drivers. - """ - - DRIVER_VERSION = '1.0.0' - DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'} - - def _get_compressor(self, algorithm): - try: - if algorithm.lower() in ('none', 'off', 'no'): - return None - elif algorithm.lower() in ('zlib', 'gzip'): - import zlib as compressor - return compressor - elif algorithm.lower() in ('bz2', 'bzip2'): - import bz2 as compressor - return compressor - except ImportError: - pass - - err = _('unsupported compression algorithm: %s') % algorithm - raise ValueError(err) - - def __init__(self, context, chunk_size_bytes, sha_block_size_bytes, - backup_default_container, enable_progress_timer, - db=None): - super(ChunkedBackupDriver, self).__init__(context, db) - self.chunk_size_bytes = chunk_size_bytes - self.sha_block_size_bytes = sha_block_size_bytes - self.backup_default_container = backup_default_container - self.enable_progress_timer = enable_progress_timer - - self.backup_timer_interval = CONF.backup_timer_interval - self.data_block_num = CONF.backup_object_number_per_notification - self.az = CONF.storage_availability_zone - self.backup_compression_algorithm = CONF.backup_compression_algorithm - self.compressor = \ - self._get_compressor(CONF.backup_compression_algorithm) - self.support_force_delete = True - - # To create your own "chunked" backup driver, implement the following - # abstract methods. - - @abc.abstractmethod - def put_container(self, container): - """Create the container if needed. No failure if it pre-exists.""" - return - - @abc.abstractmethod - def get_container_entries(self, container, prefix): - """Get container entry names.""" - return - - @abc.abstractmethod - def get_object_writer(self, container, object_name, extra_metadata=None): - """Returns a writer object which stores the chunk data in backup repository. - - The object returned should be a context handler that can be used - in a "with" context. - """ - return - - @abc.abstractmethod - def get_object_reader(self, container, object_name, extra_metadata=None): - """Returns a reader object for the backed up chunk.""" - return - - @abc.abstractmethod - def delete_object(self, container, object_name): - """Delete object from container.""" - return - - @abc.abstractmethod - def _generate_object_name_prefix(self, backup): - return - - @abc.abstractmethod - def update_container_name(self, backup, container): - """Allow sub-classes to override container name. - - This method exists so that sub-classes can override the container name - as it comes in to the driver in the backup object. Implementations - should return None if no change to the container name is desired. - """ - return - - @abc.abstractmethod - def get_extra_metadata(self, backup, volume): - """Return extra metadata to use in prepare_backup. - - This method allows for collection of extra metadata in prepare_backup() - which will be passed to get_object_reader() and get_object_writer(). - Subclass extensions can use this extra information to optimize - data transfers. Return a json serializable object. - """ - return - - def _create_container(self, backup): - # Container's name will be decided by the driver (returned by method - # update_container_name), if no change is required by the driver then - # we'll use the one the backup object already has, but if it doesn't - # have one backup_default_container will be used. - new_container = self.update_container_name(backup, backup.container) - if new_container: - # If the driver is not really changing the name we don't want to - # dirty the field in the object and save it to the DB with the same - # value. - if new_container != backup.container: - backup.container = new_container - elif backup.container is None: - backup.container = self.backup_default_container - - LOG.debug('_create_container started, container: %(container)s,' - 'backup: %(backup_id)s.', - {'container': backup.container, 'backup_id': backup.id}) - - backup.save() - self.put_container(backup.container) - return backup.container - - def _generate_object_names(self, backup): - prefix = backup['service_metadata'] - object_names = self.get_container_entries(backup['container'], prefix) - LOG.debug('generated object list: %s.', object_names) - return object_names - - def _metadata_filename(self, backup): - object_name = backup['service_metadata'] - filename = '%s_metadata' % object_name - return filename - - def _sha256_filename(self, backup): - object_name = backup['service_metadata'] - filename = '%s_sha256file' % object_name - return filename - - def _write_metadata(self, backup, volume_id, container, object_list, - volume_meta, extra_metadata=None): - filename = self._metadata_filename(backup) - LOG.debug('_write_metadata started, container name: %(container)s,' - ' metadata filename: %(filename)s.', - {'container': container, 'filename': filename}) - metadata = {} - metadata['version'] = self.DRIVER_VERSION - metadata['backup_id'] = backup['id'] - metadata['volume_id'] = volume_id - metadata['backup_name'] = backup['display_name'] - metadata['backup_description'] = backup['display_description'] - metadata['created_at'] = str(backup['created_at']) - metadata['objects'] = object_list - metadata['parent_id'] = backup['parent_id'] - metadata['volume_meta'] = volume_meta - if extra_metadata: - metadata['extra_metadata'] = extra_metadata - metadata_json = json.dumps(metadata, sort_keys=True, indent=2) - if six.PY3: - metadata_json = metadata_json.encode('utf-8') - with self.get_object_writer(container, filename) as writer: - writer.write(metadata_json) - LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json) - - def _write_sha256file(self, backup, volume_id, container, sha256_list): - filename = self._sha256_filename(backup) - LOG.debug('_write_sha256file started, container name: %(container)s,' - ' sha256file filename: %(filename)s.', - {'container': container, 'filename': filename}) - sha256file = {} - sha256file['version'] = self.DRIVER_VERSION - sha256file['backup_id'] = backup['id'] - sha256file['volume_id'] = volume_id - sha256file['backup_name'] = backup['display_name'] - sha256file['backup_description'] = backup['display_description'] - sha256file['created_at'] = six.text_type(backup['created_at']) - sha256file['chunk_size'] = self.sha_block_size_bytes - sha256file['sha256s'] = sha256_list - sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2) - if six.PY3: - sha256file_json = sha256file_json.encode('utf-8') - with self.get_object_writer(container, filename) as writer: - writer.write(sha256file_json) - LOG.debug('_write_sha256file finished.') - - def _read_metadata(self, backup): - container = backup['container'] - filename = self._metadata_filename(backup) - LOG.debug('_read_metadata started, container name: %(container)s, ' - 'metadata filename: %(filename)s.', - {'container': container, 'filename': filename}) - with self.get_object_reader(container, filename) as reader: - metadata_json = reader.read() - if six.PY3: - metadata_json = metadata_json.decode('utf-8') - metadata = json.loads(metadata_json) - LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json) - return metadata - - def _read_sha256file(self, backup): - container = backup['container'] - filename = self._sha256_filename(backup) - LOG.debug('_read_sha256file started, container name: %(container)s, ' - 'sha256 filename: %(filename)s.', - {'container': container, 'filename': filename}) - with self.get_object_reader(container, filename) as reader: - sha256file_json = reader.read() - if six.PY3: - sha256file_json = sha256file_json.decode('utf-8') - sha256file = json.loads(sha256file_json) - LOG.debug('_read_sha256file finished.') - return sha256file - - def _prepare_backup(self, backup): - """Prepare the backup process and return the backup metadata.""" - volume = self.db.volume_get(self.context, backup.volume_id) - - if volume['size'] <= 0: - err = _('volume size %d is invalid.') % volume['size'] - raise exception.InvalidVolume(reason=err) - - container = self._create_container(backup) - - object_prefix = self._generate_object_name_prefix(backup) - backup.service_metadata = object_prefix - backup.save() - - volume_size_bytes = volume['size'] * units.Gi - availability_zone = self.az - LOG.debug('starting backup of volume: %(volume_id)s,' - ' volume size: %(volume_size_bytes)d, object names' - ' prefix %(object_prefix)s, availability zone:' - ' %(availability_zone)s', - { - 'volume_id': backup.volume_id, - 'volume_size_bytes': volume_size_bytes, - 'object_prefix': object_prefix, - 'availability_zone': availability_zone, - }) - object_meta = {'id': 1, 'list': [], 'prefix': object_prefix, - 'volume_meta': None} - object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix} - extra_metadata = self.get_extra_metadata(backup, volume) - if extra_metadata is not None: - object_meta['extra_metadata'] = extra_metadata - - return (object_meta, object_sha256, extra_metadata, container, - volume_size_bytes) - - def _backup_chunk(self, backup, container, data, data_offset, - object_meta, extra_metadata): - """Backup data chunk based on the object metadata and offset.""" - object_prefix = object_meta['prefix'] - object_list = object_meta['list'] - - object_id = object_meta['id'] - object_name = '%s-%05d' % (object_prefix, object_id) - obj = {} - obj[object_name] = {} - obj[object_name]['offset'] = data_offset - obj[object_name]['length'] = len(data) - LOG.debug('Backing up chunk of data from volume.') - algorithm, output_data = self._prepare_output_data(data) - obj[object_name]['compression'] = algorithm - LOG.debug('About to put_object') - with self.get_object_writer( - container, object_name, extra_metadata=extra_metadata - ) as writer: - writer.write(output_data) - md5 = hashlib.md5(data).hexdigest() - obj[object_name]['md5'] = md5 - LOG.debug('backup MD5 for %(object_name)s: %(md5)s', - {'object_name': object_name, 'md5': md5}) - object_list.append(obj) - object_id += 1 - object_meta['list'] = object_list - object_meta['id'] = object_id - - LOG.debug('Calling eventlet.sleep(0)') - eventlet.sleep(0) - - def _prepare_output_data(self, data): - if self.compressor is None: - return 'none', data - data_size_bytes = len(data) - compressed_data = self.compressor.compress(data) - comp_size_bytes = len(compressed_data) - algorithm = CONF.backup_compression_algorithm.lower() - if comp_size_bytes >= data_size_bytes: - LOG.debug('Compression of this chunk was ineffective: ' - 'original length: %(data_size_bytes)d, ' - 'compressed length: %(compressed_size_bytes)d. ' - 'Using original data for this chunk.', - {'data_size_bytes': data_size_bytes, - 'compressed_size_bytes': comp_size_bytes, - }) - return 'none', data - LOG.debug('Compressed %(data_size_bytes)d bytes of data ' - 'to %(comp_size_bytes)d bytes using %(algorithm)s.', - {'data_size_bytes': data_size_bytes, - 'comp_size_bytes': comp_size_bytes, - 'algorithm': algorithm, - }) - return algorithm, compressed_data - - def _finalize_backup(self, backup, container, object_meta, object_sha256): - """Write the backup's metadata to the backup repository.""" - object_list = object_meta['list'] - object_id = object_meta['id'] - volume_meta = object_meta['volume_meta'] - sha256_list = object_sha256['sha256s'] - extra_metadata = object_meta.get('extra_metadata') - self._write_sha256file(backup, - backup.volume_id, - container, - sha256_list) - self._write_metadata(backup, - backup.volume_id, - container, - object_list, - volume_meta, - extra_metadata) - backup.object_count = object_id - backup.save() - LOG.debug('backup %s finished.', backup['id']) - - def _backup_metadata(self, backup, object_meta): - """Backup volume metadata. - - NOTE(dosaboy): the metadata we are backing up is obtained from a - versioned api so we should not alter it in any way here. - We must also be sure that the service that will perform - the restore is compatible with version used. - """ - json_meta = self.get_metadata(backup['volume_id']) - if not json_meta: - LOG.debug("No volume metadata to backup.") - return - - object_meta["volume_meta"] = json_meta - - def _send_progress_end(self, context, backup, object_meta): - object_meta['backup_percent'] = 100 - volume_utils.notify_about_backup_usage(context, - backup, - "createprogress", - extra_usage_info= - object_meta) - - def _send_progress_notification(self, context, backup, object_meta, - total_block_sent_num, total_volume_size): - backup_percent = total_block_sent_num * 100 / total_volume_size - object_meta['backup_percent'] = backup_percent - volume_utils.notify_about_backup_usage(context, - backup, - "createprogress", - extra_usage_info= - object_meta) - - def backup(self, backup, volume_file, backup_metadata=True): - """Backup the given volume. - - If backup['parent_id'] is given, then an incremental backup - is performed. - """ - if self.chunk_size_bytes % self.sha_block_size_bytes: - err = _('Chunk size is not multiple of ' - 'block size for creating hash.') - raise exception.InvalidBackup(reason=err) - - # Read the shafile of the parent backup if backup['parent_id'] - # is given. - parent_backup_shafile = None - parent_backup = None - if backup.parent_id: - parent_backup = objects.Backup.get_by_id(self.context, - backup.parent_id) - parent_backup_shafile = self._read_sha256file(parent_backup) - parent_backup_shalist = parent_backup_shafile['sha256s'] - if (parent_backup_shafile['chunk_size'] != - self.sha_block_size_bytes): - err = (_('Hash block size has changed since the last ' - 'backup. New hash block size: %(new)s. Old hash ' - 'block size: %(old)s. Do a full backup.') - % {'old': parent_backup_shafile['chunk_size'], - 'new': self.sha_block_size_bytes}) - raise exception.InvalidBackup(reason=err) - # If the volume size increased since the last backup, fail - # the incremental backup and ask user to do a full backup. - if backup.size > parent_backup.size: - err = _('Volume size increased since the last ' - 'backup. Do a full backup.') - raise exception.InvalidBackup(reason=err) - - (object_meta, object_sha256, extra_metadata, container, - volume_size_bytes) = self._prepare_backup(backup) - - counter = 0 - total_block_sent_num = 0 - - # There are two mechanisms to send the progress notification. - # 1. The notifications are periodically sent in a certain interval. - # 2. The notifications are sent after a certain number of chunks. - # Both of them are working simultaneously during the volume backup, - # when "chunked" backup drivers are deployed. - def _notify_progress(): - self._send_progress_notification(self.context, backup, - object_meta, - total_block_sent_num, - volume_size_bytes) - timer = loopingcall.FixedIntervalLoopingCall( - _notify_progress) - if self.enable_progress_timer: - timer.start(interval=self.backup_timer_interval) - - sha256_list = object_sha256['sha256s'] - shaindex = 0 - is_backup_canceled = False - while True: - # First of all, we check the status of this backup. If it - # has been changed to delete or has been deleted, we cancel the - # backup process to do forcing delete. - backup = objects.Backup.get_by_id(self.context, backup.id) - if backup.status in (fields.BackupStatus.DELETING, - fields.BackupStatus.DELETED): - is_backup_canceled = True - # To avoid the chunk left when deletion complete, need to - # clean up the object of chunk again. - self.delete_backup(backup) - LOG.debug('Cancel the backup process of %s.', backup.id) - break - data_offset = volume_file.tell() - data = volume_file.read(self.chunk_size_bytes) - if data == b'': - break - - # Calculate new shas with the datablock. - shalist = [] - off = 0 - datalen = len(data) - while off < datalen: - chunk_start = off - chunk_end = chunk_start + self.sha_block_size_bytes - if chunk_end > datalen: - chunk_end = datalen - chunk = data[chunk_start:chunk_end] - sha = hashlib.sha256(chunk).hexdigest() - shalist.append(sha) - off += self.sha_block_size_bytes - sha256_list.extend(shalist) - - # If parent_backup is not None, that means an incremental - # backup will be performed. - if parent_backup: - # Find the extent that needs to be backed up. - extent_off = -1 - for idx, sha in enumerate(shalist): - if sha != parent_backup_shalist[shaindex]: - if extent_off == -1: - # Start of new extent. - extent_off = idx * self.sha_block_size_bytes - else: - if extent_off != -1: - # We've reached the end of extent. - extent_end = idx * self.sha_block_size_bytes - segment = data[extent_off:extent_end] - self._backup_chunk(backup, container, segment, - data_offset + extent_off, - object_meta, - extra_metadata) - extent_off = -1 - shaindex += 1 - - # The last extent extends to the end of data buffer. - if extent_off != -1: - extent_end = datalen - segment = data[extent_off:extent_end] - self._backup_chunk(backup, container, segment, - data_offset + extent_off, - object_meta, extra_metadata) - extent_off = -1 - else: # Do a full backup. - self._backup_chunk(backup, container, data, data_offset, - object_meta, extra_metadata) - - # Notifications - total_block_sent_num += self.data_block_num - counter += 1 - if counter == self.data_block_num: - # Send the notification to Ceilometer when the chunk - # number reaches the data_block_num. The backup percentage - # is put in the metadata as the extra information. - self._send_progress_notification(self.context, backup, - object_meta, - total_block_sent_num, - volume_size_bytes) - # Reset the counter - counter = 0 - - # Stop the timer. - timer.stop() - # If backup has been cancelled we have nothing more to do - # but timer.stop(). - if is_backup_canceled: - return - # All the data have been sent, the backup_percent reaches 100. - self._send_progress_end(self.context, backup, object_meta) - - object_sha256['sha256s'] = sha256_list - if backup_metadata: - try: - self._backup_metadata(backup, object_meta) - # Whatever goes wrong, we want to log, cleanup, and re-raise. - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception("Backup volume metadata failed.") - self.delete_backup(backup) - - self._finalize_backup(backup, container, object_meta, object_sha256) - - def _restore_v1(self, backup, volume_id, metadata, volume_file): - """Restore a v1 volume backup.""" - backup_id = backup['id'] - LOG.debug('v1 volume backup restore of %s started.', backup_id) - extra_metadata = metadata.get('extra_metadata') - container = backup['container'] - metadata_objects = metadata['objects'] - metadata_object_names = [] - for obj in metadata_objects: - metadata_object_names.extend(obj.keys()) - LOG.debug('metadata_object_names = %s.', metadata_object_names) - prune_list = [self._metadata_filename(backup), - self._sha256_filename(backup)] - object_names = [object_name for object_name in - self._generate_object_names(backup) - if object_name not in prune_list] - if sorted(object_names) != sorted(metadata_object_names): - err = _('restore_backup aborted, actual object list ' - 'does not match object list stored in metadata.') - raise exception.InvalidBackup(reason=err) - - for metadata_object in metadata_objects: - object_name, obj = list(metadata_object.items())[0] - LOG.debug('restoring object. backup: %(backup_id)s, ' - 'container: %(container)s, object name: ' - '%(object_name)s, volume: %(volume_id)s.', - { - 'backup_id': backup_id, - 'container': container, - 'object_name': object_name, - 'volume_id': volume_id, - }) - - with self.get_object_reader( - container, object_name, - extra_metadata=extra_metadata) as reader: - body = reader.read() - compression_algorithm = metadata_object[object_name]['compression'] - decompressor = self._get_compressor(compression_algorithm) - volume_file.seek(obj['offset']) - if decompressor is not None: - LOG.debug('decompressing data using %s algorithm', - compression_algorithm) - decompressed = decompressor.decompress(body) - volume_file.write(decompressed) - else: - volume_file.write(body) - - # force flush every write to avoid long blocking write on close - volume_file.flush() - - # Be tolerant to IO implementations that do not support fileno() - try: - fileno = volume_file.fileno() - except IOError: - LOG.info("volume_file does not support fileno() so skipping " - "fsync()") - else: - os.fsync(fileno) - - # Restoring a backup to a volume can take some time. Yield so other - # threads can run, allowing for among other things the service - # status to be updated - eventlet.sleep(0) - LOG.debug('v1 volume backup restore of %s finished.', - backup_id) - - def restore(self, backup, volume_id, volume_file): - """Restore the given volume backup from backup repository.""" - backup_id = backup['id'] - container = backup['container'] - object_prefix = backup['service_metadata'] - LOG.debug('starting restore of backup %(object_prefix)s ' - 'container: %(container)s, to volume %(volume_id)s, ' - 'backup: %(backup_id)s.', - { - 'object_prefix': object_prefix, - 'container': container, - 'volume_id': volume_id, - 'backup_id': backup_id, - }) - metadata = self._read_metadata(backup) - metadata_version = metadata['version'] - LOG.debug('Restoring backup version %s', metadata_version) - try: - restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get( - metadata_version)) - except TypeError: - err = (_('No support to restore backup version %s') - % metadata_version) - raise exception.InvalidBackup(reason=err) - - # Build a list of backups based on parent_id. A full backup - # will be the last one in the list. - backup_list = [] - backup_list.append(backup) - current_backup = backup - while current_backup.parent_id: - prev_backup = objects.Backup.get_by_id(self.context, - current_backup.parent_id) - backup_list.append(prev_backup) - current_backup = prev_backup - - # Do a full restore first, then layer the incremental backups - # on top of it in order. - index = len(backup_list) - 1 - while index >= 0: - backup1 = backup_list[index] - index = index - 1 - metadata = self._read_metadata(backup1) - restore_func(backup1, volume_id, metadata, volume_file) - - volume_meta = metadata.get('volume_meta', None) - try: - if volume_meta: - self.put_metadata(volume_id, volume_meta) - else: - LOG.debug("No volume metadata in this backup.") - except exception.BackupMetadataUnsupportedVersion: - msg = _("Metadata restore failed due to incompatible version.") - LOG.error(msg) - raise exception.BackupOperationError(msg) - - LOG.debug('restore %(backup_id)s to %(volume_id)s finished.', - {'backup_id': backup_id, 'volume_id': volume_id}) - - def delete_backup(self, backup): - """Delete the given backup.""" - container = backup['container'] - object_prefix = backup['service_metadata'] - LOG.debug('delete started, backup: %(id)s, container: %(cont)s, ' - 'prefix: %(pre)s.', - {'id': backup['id'], - 'cont': container, - 'pre': object_prefix}) - - if container is not None and object_prefix is not None: - object_names = [] - try: - object_names = self._generate_object_names(backup) - except Exception: - LOG.warning('Error while listing objects, continuing' - ' with delete.') - - for object_name in object_names: - self.delete_object(container, object_name) - LOG.debug('deleted object: %(object_name)s' - ' in container: %(container)s.', - { - 'object_name': object_name, - 'container': container - }) - # Deleting a backup's objects can take some time. - # Yield so other threads can run - eventlet.sleep(0) - - LOG.debug('delete %s finished.', backup['id']) diff --git a/cinder/backup/driver.py b/cinder/backup/driver.py deleted file mode 100644 index 0e65fce64..000000000 --- a/cinder/backup/driver.py +++ /dev/null @@ -1,426 +0,0 @@ -# Copyright (C) 2013 Deutsche Telekom AG -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base class for all backup drivers.""" - -import abc - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -import six - -from cinder.db import base -from cinder import exception -from cinder.i18n import _ -from cinder import keymgr as key_manager - -service_opts = [ - cfg.IntOpt('backup_metadata_version', default=2, - help='Backup metadata version to be used when backing up ' - 'volume metadata. If this number is bumped, make sure the ' - 'service doing the restore supports the new version.'), - cfg.IntOpt('backup_object_number_per_notification', - default=10, - help='The number of chunks or objects, for which one ' - 'Ceilometer notification will be sent'), - cfg.IntOpt('backup_timer_interval', - default=120, - help='Interval, in seconds, between two progress notifications ' - 'reporting the backup status'), -] - -CONF = cfg.CONF -CONF.register_opts(service_opts) - -LOG = logging.getLogger(__name__) - - -class BackupMetadataAPI(base.Base): - - TYPE_TAG_VOL_BASE_META = 'volume-base-metadata' - TYPE_TAG_VOL_META = 'volume-metadata' - TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata' - - def __init__(self, context, db=None): - super(BackupMetadataAPI, self).__init__(db) - self.context = context - - @staticmethod - def _is_serializable(value): - """Returns True if value is serializable.""" - try: - jsonutils.dumps(value) - except TypeError: - LOG.info("Value with type=%s is not serializable", - type(value)) - return False - - return True - - def _save_vol_base_meta(self, container, volume_id): - """Save base volume metadata to container. - - This will fetch all fields from the db Volume object for volume_id and - save them in the provided container dictionary. - """ - type_tag = self.TYPE_TAG_VOL_BASE_META - LOG.debug("Getting metadata type '%s'", type_tag) - meta = self.db.volume_get(self.context, volume_id) - if meta: - container[type_tag] = {} - for key, value in meta: - # Exclude fields that are "not JSON serializable" - if not self._is_serializable(value): - LOG.info("Unable to serialize field '%s' - excluding " - "from backup", key) - continue - # Copy the encryption key UUID for backup - if key is 'encryption_key_id' and value is not None: - km = key_manager.API(CONF) - value = km.store(self.context, km.get(self.context, value)) - LOG.debug("Copying encryption key UUID for backup.") - container[type_tag][key] = value - - LOG.debug("Completed fetching metadata type '%s'", type_tag) - else: - LOG.debug("No metadata type '%s' available", type_tag) - - def _save_vol_meta(self, container, volume_id): - """Save volume metadata to container. - - This will fetch all fields from the db VolumeMetadata object for - volume_id and save them in the provided container dictionary. - """ - type_tag = self.TYPE_TAG_VOL_META - LOG.debug("Getting metadata type '%s'", type_tag) - meta = self.db.volume_metadata_get(self.context, volume_id) - if meta: - container[type_tag] = {} - for entry in meta: - # Exclude fields that are "not JSON serializable" - if not self._is_serializable(meta[entry]): - LOG.info("Unable to serialize field '%s' - excluding " - "from backup", entry) - continue - container[type_tag][entry] = meta[entry] - - LOG.debug("Completed fetching metadata type '%s'", type_tag) - else: - LOG.debug("No metadata type '%s' available", type_tag) - - def _save_vol_glance_meta(self, container, volume_id): - """Save volume Glance metadata to container. - - This will fetch all fields from the db VolumeGlanceMetadata object for - volume_id and save them in the provided container dictionary. - """ - type_tag = self.TYPE_TAG_VOL_GLANCE_META - LOG.debug("Getting metadata type '%s'", type_tag) - try: - meta = self.db.volume_glance_metadata_get(self.context, volume_id) - if meta: - container[type_tag] = {} - for entry in meta: - # Exclude fields that are "not JSON serializable" - if not self._is_serializable(entry.value): - LOG.info("Unable to serialize field '%s' - " - "excluding from backup", entry) - continue - container[type_tag][entry.key] = entry.value - - LOG.debug("Completed fetching metadata type '%s'", type_tag) - except exception.GlanceMetadataNotFound: - LOG.debug("No metadata type '%s' available", type_tag) - - @staticmethod - def _filter(metadata, fields, excludes=None): - """Returns set of metadata restricted to required fields. - - If fields is empty list, the full set is returned. - - :param metadata: master set of metadata - :param fields: list of fields we want to extract - :param excludes: fields to be excluded - :returns: filtered metadata - """ - if not fields: - return metadata - - if not excludes: - excludes = [] - - subset = {} - for field in fields: - if field in metadata and field not in excludes: - subset[field] = metadata[field] - else: - LOG.debug("Excluding field '%s'", field) - - return subset - - def _restore_vol_base_meta(self, metadata, volume_id, fields): - """Restore values to Volume object for provided fields.""" - LOG.debug("Restoring volume base metadata") - excludes = [] - - # Ignore unencrypted backups. - key = 'encryption_key_id' - if key in fields and key in metadata and metadata[key] is not None: - self._restore_vol_encryption_meta(volume_id, - metadata['volume_type_id']) - - # NOTE(dosaboy): if the target volume looks like it was auto-created - # as part of this restore operation and we have a name to restore - # then apply the name to the target volume. However, if that target - # volume already existed and it has a name or we do not have a name to - # restore, then ignore this key. This is intended to be a less drastic - # solution than commit 7ee80f7. - key = 'display_name' - if key in fields and key in metadata: - target_vol = self.db.volume_get(self.context, volume_id) - name = target_vol.get(key, '') - if (not metadata.get(key) or name and - not name.startswith('restore_backup_')): - excludes.append(key) - excludes.append('display_description') - - metadata = self._filter(metadata, fields, excludes=excludes) - self.db.volume_update(self.context, volume_id, metadata) - - def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id): - """Restores the volume_type_id for encryption if needed. - - Only allow restoration of an encrypted backup if the destination - volume has the same volume type as the source volume. Otherwise - encryption will not work. If volume types are already the same, - no action is needed. - """ - dest_vol = self.db.volume_get(self.context, volume_id) - if dest_vol['volume_type_id'] != src_volume_type_id: - LOG.debug("Volume type id's do not match.") - # If the volume types do not match, and the destination volume - # does not have a volume type, force the destination volume - # to have the encrypted volume type, provided it still exists. - if dest_vol['volume_type_id'] is None: - try: - self.db.volume_type_get( - self.context, src_volume_type_id) - except exception.VolumeTypeNotFound: - LOG.debug("Volume type of source volume has been " - "deleted. Encrypted backup restore has " - "failed.") - msg = _("The source volume type '%s' is not " - "available.") % (src_volume_type_id) - raise exception.EncryptedBackupOperationFailed(msg) - # Update dest volume with src volume's volume_type_id. - LOG.debug("The volume type of the destination volume " - "will become the volume type of the source " - "volume.") - self.db.volume_update(self.context, volume_id, - {'volume_type_id': src_volume_type_id}) - else: - # Volume type id's do not match, and destination volume - # has a volume type. Throw exception. - LOG.warning("Destination volume type is different from " - "source volume type for an encrypted volume. " - "Encrypted backup restore has failed.") - msg = (_("The source volume type '%(src)s' is different " - "than the destination volume type '%(dest)s'.") % - {'src': src_volume_type_id, - 'dest': dest_vol['volume_type_id']}) - raise exception.EncryptedBackupOperationFailed(msg) - - def _restore_vol_meta(self, metadata, volume_id, fields): - """Restore values to VolumeMetadata object for provided fields.""" - LOG.debug("Restoring volume metadata") - metadata = self._filter(metadata, fields) - self.db.volume_metadata_update(self.context, volume_id, metadata, True) - - def _restore_vol_glance_meta(self, metadata, volume_id, fields): - """Restore values to VolumeGlanceMetadata object for provided fields. - - First delete any existing metadata then save new values. - """ - LOG.debug("Restoring volume glance metadata") - metadata = self._filter(metadata, fields) - self.db.volume_glance_metadata_delete_by_volume(self.context, - volume_id) - for key, value in metadata.items(): - self.db.volume_glance_metadata_create(self.context, - volume_id, - key, value) - - # Now mark the volume as bootable - self.db.volume_update(self.context, volume_id, - {'bootable': True}) - - def _v1_restore_factory(self): - """All metadata is backed up but we selectively restore. - - Returns a dictionary of the form: - - {: (, )} - - Empty field list indicates that all backed up fields should be - restored. - """ - return {self.TYPE_TAG_VOL_BASE_META: - (self._restore_vol_base_meta, - ['display_name', 'display_description']), - self.TYPE_TAG_VOL_META: - (self._restore_vol_meta, []), - self.TYPE_TAG_VOL_GLANCE_META: - (self._restore_vol_glance_meta, [])} - - def _v2_restore_factory(self): - """All metadata is backed up but we selectively restore. - - Returns a dictionary of the form: - - {: (, )} - - Empty field list indicates that all backed up fields should be - restored. - """ - return {self.TYPE_TAG_VOL_BASE_META: - (self._restore_vol_base_meta, - ['display_name', 'display_description', 'encryption_key_id']), - self.TYPE_TAG_VOL_META: - (self._restore_vol_meta, []), - self.TYPE_TAG_VOL_GLANCE_META: - (self._restore_vol_glance_meta, [])} - - def get(self, volume_id): - """Get volume metadata. - - Returns a json-encoded dict containing all metadata and the restore - version i.e. the version used to decide what actually gets restored - from this container when doing a backup restore. - """ - container = {'version': CONF.backup_metadata_version} - self._save_vol_base_meta(container, volume_id) - self._save_vol_meta(container, volume_id) - self._save_vol_glance_meta(container, volume_id) - - if container: - return jsonutils.dumps(container) - else: - return None - - def put(self, volume_id, json_metadata): - """Restore volume metadata to a volume. - - The json container should contain a version that is supported here. - """ - meta_container = jsonutils.loads(json_metadata) - version = meta_container['version'] - if version == 1: - factory = self._v1_restore_factory() - elif version == 2: - factory = self._v2_restore_factory() - else: - msg = (_("Unsupported backup metadata version (%s)") % (version)) - raise exception.BackupMetadataUnsupportedVersion(msg) - - for type in factory: - func = factory[type][0] - fields = factory[type][1] - if type in meta_container: - func(meta_container[type], volume_id, fields) - else: - LOG.debug("No metadata of type '%s' to restore", type) - - -@six.add_metaclass(abc.ABCMeta) -class BackupDriver(base.Base): - - def __init__(self, context, db=None): - super(BackupDriver, self).__init__(db) - self.context = context - self.backup_meta_api = BackupMetadataAPI(context, db) - # This flag indicates if backup driver supports force - # deletion. So it should be set to True if the driver that inherits - # from BackupDriver supports the force deletion function. - self.support_force_delete = False - - def get_metadata(self, volume_id): - return self.backup_meta_api.get(volume_id) - - def put_metadata(self, volume_id, json_metadata): - self.backup_meta_api.put(volume_id, json_metadata) - - @abc.abstractmethod - def backup(self, backup, volume_file, backup_metadata=False): - """Start a backup of a specified volume.""" - return - - @abc.abstractmethod - def restore(self, backup, volume_id, volume_file): - """Restore a saved backup.""" - return - - @abc.abstractmethod - def delete_backup(self, backup): - """Delete a saved backup.""" - return - - def export_record(self, backup): - """Export driver specific backup record information. - - If backup backend needs additional driver specific information to - import backup record back into the system it must overwrite this method - and return it here as a dictionary so it can be serialized into a - string. - - Default backup driver implementation has no extra information. - - :param backup: backup object to export - :returns: driver_info - dictionary with extra information - """ - return {} - - def import_record(self, backup, driver_info): - """Import driver specific backup record information. - - If backup backend needs additional driver specific information to - import backup record back into the system it must overwrite this method - since it will be called with the extra information that was provided by - export_record when exporting the backup. - - Default backup driver implementation does nothing since it didn't - export any specific data in export_record. - - :param backup: backup object to export - :param driver_info: dictionary with driver specific backup record - information - :returns: nothing - """ - return - - -@six.add_metaclass(abc.ABCMeta) -class BackupDriverWithVerify(BackupDriver): - @abc.abstractmethod - def verify(self, backup): - """Verify that the backup exists on the backend. - - Verify that the backup is OK, possibly following an import record - operation. - - :param backup: backup id of the backup to verify - :raises InvalidBackup, NotImplementedError: - """ - return diff --git a/cinder/backup/drivers/__init__.py b/cinder/backup/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py deleted file mode 100644 index 27d9115b1..000000000 --- a/cinder/backup/drivers/ceph.py +++ /dev/null @@ -1,1235 +0,0 @@ -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Ceph Backup Service Implementation. - -This driver supports backing up volumes of any type to a Ceph object store. It -is also capable of detecting whether the volume to be backed up is a Ceph RBD -volume and, if so, attempts to perform incremental/differential backups. - -Support is also included for the following in the case of a source volume being -a Ceph RBD volume: - - * backing up within the same Ceph pool (not recommended) - * backing up between different Ceph pools - * backing up between different Ceph clusters - -At the time of writing, differential backup support in Ceph/librbd was quite -new so this driver accounts for this by first attempting differential backup -and falling back to full backup/copy if the former fails. It is recommended -that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results. - -If incremental backups are used, multiple backups of the same volume are stored -as snapshots so that minimal space is consumed in the object store and -restoring the volume takes a far reduced amount of time compared to a full -copy. - -Note that Cinder supports restoring to a new volume or the original volume the -backup was taken from. For the latter case, a full copy is enforced since this -was deemed the safest action to take. It is therefore recommended to always -restore to a new volume (default). -""" - -import fcntl -import os -import re -import subprocess -import time - -import eventlet -from os_brick.initiator import linuxrbd -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -from six.moves import range - -from cinder.backup import driver -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -import cinder.volume.drivers.rbd as rbd_driver - -try: - import rados - import rbd -except ImportError: - rados = None - rbd = None - -LOG = logging.getLogger(__name__) - -service_opts = [ - cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf', - help='Ceph configuration file to use.'), - cfg.StrOpt('backup_ceph_user', default='cinder', - help='The Ceph user to connect with. Default here is to use ' - 'the same user as for Cinder volumes. If not using cephx ' - 'this should be set to None.'), - cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128), - help='The chunk size, in bytes, that a backup is broken into ' - 'before transfer to the Ceph object store.'), - cfg.StrOpt('backup_ceph_pool', default='backups', - help='The Ceph pool where volume backups are stored.'), - cfg.IntOpt('backup_ceph_stripe_unit', default=0, - help='RBD stripe unit to use when creating a backup image.'), - cfg.IntOpt('backup_ceph_stripe_count', default=0, - help='RBD stripe count to use when creating a backup image.'), - cfg.BoolOpt('backup_ceph_image_journals', default=False, - help='If True, apply JOURNALING and EXCLUSIVE_LOCK feature ' - 'bits to the backup RBD objects to allow mirroring'), - cfg.BoolOpt('restore_discard_excess_bytes', default=True, - help='If True, always discard excess bytes when restoring ' - 'volumes i.e. pad with zeroes.') -] - -CONF = cfg.CONF -CONF.register_opts(service_opts) - - -class VolumeMetadataBackup(object): - - def __init__(self, client, backup_id): - self._client = client - self._backup_id = backup_id - - @property - def name(self): - return utils.convert_str("backup.%s.meta" % self._backup_id) - - @property - def exists(self): - meta_obj = rados.Object(self._client.ioctx, self.name) - return self._exists(meta_obj) - - def _exists(self, obj): - try: - obj.stat() - except rados.ObjectNotFound: - return False - else: - return True - - def set(self, json_meta): - """Write JSON metadata to a new object. - - This should only be called once per backup. Raises - VolumeMetadataBackupExists if the object already exists. - """ - meta_obj = rados.Object(self._client.ioctx, self.name) - if self._exists(meta_obj): - msg = _("Metadata backup object '%s' already exists") % self.name - raise exception.VolumeMetadataBackupExists(msg) - - meta_obj.write(json_meta) - - def get(self): - """Get metadata backup object. - - Returns None if the object does not exist. - """ - meta_obj = rados.Object(self._client.ioctx, self.name) - if not self._exists(meta_obj): - LOG.debug("Metadata backup object %s does not exist", self.name) - return None - - return meta_obj.read() - - def remove_if_exists(self): - meta_obj = rados.Object(self._client.ioctx, self.name) - try: - meta_obj.remove() - except rados.ObjectNotFound: - LOG.debug("Metadata backup object '%s' not found - ignoring", - self.name) - - -@interface.backupdriver -class CephBackupDriver(driver.BackupDriver): - """Backup Cinder volumes to Ceph Object Store. - - This class enables backing up Cinder volumes to a Ceph object store. - Backups may be stored in their own pool or even cluster. Store location is - defined by the Ceph conf file and service config options supplied. - - If the source volume is itself an RBD volume, the backup will be performed - using incremental differential backups which *should* give a performance - gain. - """ - - def __init__(self, context, db=None, execute=None): - super(CephBackupDriver, self).__init__(context, db) - self.rbd = rbd - self.rados = rados - self.chunk_size = CONF.backup_ceph_chunk_size - self._execute = execute or utils.execute - - if self._supports_stripingv2: - self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit - self.rbd_stripe_count = CONF.backup_ceph_stripe_count - else: - LOG.info("RBD striping not supported - ignoring configuration " - "settings for rbd striping.") - self.rbd_stripe_count = 0 - self.rbd_stripe_unit = 0 - - self._ceph_backup_user = utils.convert_str(CONF.backup_ceph_user) - self._ceph_backup_pool = utils.convert_str(CONF.backup_ceph_pool) - self._ceph_backup_conf = utils.convert_str(CONF.backup_ceph_conf) - - def _validate_string_args(self, *args): - """Ensure all args are non-None and non-empty.""" - return all(args) - - def _ceph_args(self, user, conf=None, pool=None): - """Create default ceph args for executing rbd commands. - - If no --conf is provided, rbd will look in the default locations e.g. - /etc/ceph/ceph.conf - """ - - # Make sure user arg is valid since rbd command may not fail if - # invalid/no user provided, resulting in unexpected behaviour. - if not self._validate_string_args(user): - raise exception.BackupInvalidCephArgs(_("invalid user '%s'") % - user) - - args = ['--id', user] - if conf: - args.extend(['--conf', conf]) - if pool: - args.extend(['--pool', pool]) - - return args - - @property - def _supports_layering(self): - """Determine if copy-on-write is supported by our version of librbd.""" - return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') - - @property - def _supports_stripingv2(self): - """Determine if striping is supported by our version of librbd.""" - return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2') - - @property - def _supports_exclusive_lock(self): - """Determine if exclusive-lock is supported by librbd.""" - return hasattr(self.rbd, 'RBD_FEATURE_EXCLUSIVE_LOCK') - - @property - def _supports_journaling(self): - """Determine if journaling is supported by our version of librbd.""" - return hasattr(self.rbd, 'RBD_FEATURE_JOURNALING') - - def _get_rbd_support(self): - """Determine RBD features supported by our version of librbd.""" - old_format = True - features = 0 - if self._supports_layering: - old_format = False - features |= self.rbd.RBD_FEATURE_LAYERING - if self._supports_stripingv2: - old_format = False - features |= self.rbd.RBD_FEATURE_STRIPINGV2 - - # journaling requires exclusive_lock; check both together - if CONF.backup_ceph_image_journals: - if self._supports_exclusive_lock and self._supports_journaling: - old_format = False - features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK | - self.rbd.RBD_FEATURE_JOURNALING) - else: - # FIXME (tasker): when the backup manager supports loading the - # driver during its initialization, this exception should be - # moved to the driver's initialization so that it can stop - # the service from starting when the underyling RBD does not - # support the requested features. - LOG.error("RBD journaling not supported - unable to " - "support per image mirroring in backup pool") - raise exception.BackupInvalidCephArgs( - _("Image Journaling set but RBD backend does " - "not support journaling") - ) - - return (old_format, features) - - def _connect_to_rados(self, pool=None): - """Establish connection to the backup Ceph cluster.""" - client = self.rados.Rados(rados_id=self._ceph_backup_user, - conffile=self._ceph_backup_conf) - try: - client.connect() - pool_to_open = utils.convert_str(pool or self._ceph_backup_pool) - ioctx = client.open_ioctx(pool_to_open) - return client, ioctx - except self.rados.Error: - # shutdown cannot raise an exception - client.shutdown() - raise - - def _disconnect_from_rados(self, client, ioctx): - """Terminate connection with the backup Ceph cluster.""" - # closing an ioctx cannot raise an exception - ioctx.close() - client.shutdown() - - def _get_backup_base_name(self, volume_id, backup_id=None, - diff_format=False): - """Return name of base image used for backup. - - Incremental backups use a new base name so we support old and new style - format. - """ - # Ensure no unicode - if diff_format: - return utils.convert_str("volume-%s.backup.base" % volume_id) - else: - if backup_id is None: - msg = _("Backup id required") - raise exception.InvalidParameterValue(msg) - return utils.convert_str("volume-%s.backup.%s" - % (volume_id, backup_id)) - - def _discard_bytes(self, volume, offset, length): - """Trim length bytes from offset. - - If the volume is an rbd do a discard() otherwise assume it is a file - and pad with zeroes. - """ - if length: - LOG.debug("Discarding %(length)s bytes from offset %(offset)s", - {'length': length, 'offset': offset}) - if self._file_is_rbd(volume): - volume.rbd_image.discard(offset, length) - else: - zeroes = '\0' * self.chunk_size - chunks = int(length / self.chunk_size) - for chunk in range(0, chunks): - LOG.debug("Writing zeroes chunk %d", chunk) - volume.write(zeroes) - volume.flush() - # yield to any other pending backups - eventlet.sleep(0) - - rem = int(length % self.chunk_size) - if rem: - zeroes = '\0' * rem - volume.write(zeroes) - volume.flush() - - def _transfer_data(self, src, src_name, dest, dest_name, length): - """Transfer data between files (Python IO objects).""" - LOG.debug("Transferring data between '%(src)s' and '%(dest)s'", - {'src': src_name, 'dest': dest_name}) - - chunks = int(length / self.chunk_size) - LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred", - {'chunks': chunks, 'bytes': self.chunk_size}) - - for chunk in range(0, chunks): - before = time.time() - data = src.read(self.chunk_size) - # If we have reach end of source, discard any extraneous bytes from - # destination volume if trim is enabled and stop writing. - if data == b'': - if CONF.restore_discard_excess_bytes: - self._discard_bytes(dest, dest.tell(), - length - dest.tell()) - - return - - dest.write(data) - dest.flush() - delta = (time.time() - before) - rate = (self.chunk_size / delta) / 1024 - LOG.debug("Transferred chunk %(chunk)s of %(chunks)s " - "(%(rate)dK/s)", - {'chunk': chunk + 1, - 'chunks': chunks, - 'rate': rate}) - - # yield to any other pending backups - eventlet.sleep(0) - - rem = int(length % self.chunk_size) - if rem: - LOG.debug("Transferring remaining %s bytes", rem) - data = src.read(rem) - if data == b'': - if CONF.restore_discard_excess_bytes: - self._discard_bytes(dest, dest.tell(), rem) - else: - dest.write(data) - dest.flush() - # yield to any other pending backups - eventlet.sleep(0) - - def _create_base_image(self, name, size, rados_client): - """Create a base backup image. - - This will be the base image used for storing differential exports. - """ - LOG.debug("Creating base image '%s'", name) - old_format, features = self._get_rbd_support() - self.rbd.RBD().create(ioctx=rados_client.ioctx, - name=name, - size=size, - old_format=old_format, - features=features, - stripe_unit=self.rbd_stripe_unit, - stripe_count=self.rbd_stripe_count) - - def _delete_backup_snapshot(self, rados_client, base_name, backup_id): - """Delete snapshot associated with this backup if one exists. - - A backup should have at most ONE associated snapshot. - - This is required before attempting to delete the base image. The - snapshot on the original volume can be left as it will be purged when - the volume is deleted. - - Returns tuple(deleted_snap_name, num_of_remaining_snaps). - """ - remaining_snaps = 0 - base_rbd = self.rbd.Image(rados_client.ioctx, base_name) - try: - snap_name = self._get_backup_snap_name(base_rbd, base_name, - backup_id) - if snap_name: - LOG.debug("Deleting backup snapshot='%s'", snap_name) - base_rbd.remove_snap(snap_name) - else: - LOG.debug("No backup snapshot to delete") - - # Now check whether any snapshots remain on the base image - backup_snaps = self.get_backup_snaps(base_rbd) - if backup_snaps: - remaining_snaps = len(backup_snaps) - finally: - base_rbd.close() - - return snap_name, remaining_snaps - - def _try_delete_base_image(self, backup, base_name=None): - """Try to delete backup RBD image. - - If the rbd image is a base image for incremental backups, it may have - snapshots. Delete the snapshot associated with backup_id and if the - image has no more snapshots, delete it. Otherwise return. - - If no base name is provided try normal (full) format then diff format - image name. - - If a base name is provided but does not exist, ImageNotFound will be - raised. - - If the image is busy, a number of retries will be performed if - ImageBusy is received, after which the exception will be propagated to - the caller. - """ - retries = 3 - delay = 5 - try_diff_format = False - volume_id = backup.volume_id - - if base_name is None: - try_diff_format = True - - base_name = self._get_backup_base_name(volume_id, backup.id) - LOG.debug("Trying diff format basename='%(basename)s' for " - "backup base image of volume %(volume)s.", - {'basename': base_name, 'volume': volume_id}) - - with rbd_driver.RADOSClient(self, backup.container) as client: - rbd_exists, base_name = \ - self._rbd_image_exists(base_name, volume_id, client, - try_diff_format=try_diff_format) - if not rbd_exists: - raise self.rbd.ImageNotFound(_("image %s not found") % - base_name) - - while retries >= 0: - # First delete associated snapshot from base image (if exists) - snap, rem = self._delete_backup_snapshot(client, base_name, - backup.id) - if rem: - LOG.info( - "Backup base image of volume %(volume)s still " - "has %(snapshots)s snapshots so skipping base " - "image delete.", - {'snapshots': rem, 'volume': volume_id}) - return - - LOG.info("Deleting backup base image='%(basename)s' of " - "volume %(volume)s.", - {'basename': base_name, 'volume': volume_id}) - # Delete base if no more snapshots - try: - self.rbd.RBD().remove(client.ioctx, base_name) - except self.rbd.ImageBusy: - # Allow a retry if the image is busy - if retries > 0: - LOG.info("Backup image of volume %(volume)s is " - "busy, retrying %(retries)s more time(s) " - "in %(delay)ss.", - {'retries': retries, - 'delay': delay, - 'volume': volume_id}) - eventlet.sleep(delay) - else: - LOG.error("Max retries reached deleting backup " - "%(basename)s image of volume %(volume)s.", - {'volume': volume_id, - 'basename': base_name}) - raise - else: - LOG.debug("Base backup image='%(basename)s' of volume " - "%(volume)s deleted.", - {'basename': base_name, 'volume': volume_id}) - retries = 0 - finally: - retries -= 1 - - # Since we have deleted the base image we can delete the source - # volume backup snapshot. - src_name = utils.convert_str(volume_id) - if src_name in self.rbd.RBD().list(client.ioctx): - LOG.debug("Deleting source volume snapshot '%(snapshot)s' " - "for backup %(basename)s.", - {'snapshot': snap, 'basename': base_name}) - src_rbd = self.rbd.Image(client.ioctx, src_name) - try: - src_rbd.remove_snap(snap) - finally: - src_rbd.close() - - def _piped_execute(self, cmd1, cmd2): - """Pipe output of cmd1 into cmd2.""" - LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) - LOG.debug("cmd2='%s'", ' '.join(cmd2)) - - try: - p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - except OSError as e: - LOG.error("Pipe1 failed - %s ", e) - raise - - # NOTE(dosaboy): ensure that the pipe is blocking. This is to work - # around the case where evenlet.green.subprocess is used which seems to - # use a non-blocking pipe. - flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) - fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) - - try: - p2 = subprocess.Popen(cmd2, stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - except OSError as e: - LOG.error("Pipe2 failed - %s ", e) - raise - - p1.stdout.close() - stdout, stderr = p2.communicate() - return p2.returncode, stderr - - def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool, - src_user, src_conf, dest_user, dest_conf, - src_snap=None, from_snap=None): - """Copy only extents changed between two points. - - If no snapshot is provided, the diff extents will be all those changed - since the rbd volume/base was created, otherwise it will be those - changed since the snapshot was created. - """ - LOG.debug("Performing differential transfer from '%(src)s' to " - "'%(dest)s'", - {'src': src_name, 'dest': dest_name}) - - # NOTE(dosaboy): Need to be tolerant of clusters/clients that do - # not support these operations since at the time of writing they - # were very new. - - src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool) - dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool) - - cmd1 = ['rbd', 'export-diff'] + src_ceph_args - if from_snap is not None: - cmd1.extend(['--from-snap', from_snap]) - if src_snap: - path = utils.convert_str("%s/%s@%s" - % (src_pool, src_name, src_snap)) - else: - path = utils.convert_str("%s/%s" % (src_pool, src_name)) - cmd1.extend([path, '-']) - - cmd2 = ['rbd', 'import-diff'] + dest_ceph_args - rbd_path = utils.convert_str("%s/%s" % (dest_pool, dest_name)) - cmd2.extend(['-', rbd_path]) - - ret, stderr = self._piped_execute(cmd1, cmd2) - if ret: - msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % - {'ret': ret, 'stderr': stderr}) - LOG.info(msg) - raise exception.BackupRBDOperationFailed(msg) - - def _rbd_image_exists(self, name, volume_id, client, - try_diff_format=False): - """Return tuple (exists, name).""" - rbds = self.rbd.RBD().list(client.ioctx) - if name not in rbds: - LOG.debug("Image '%s' not found - trying diff format name", name) - if try_diff_format: - name = self._get_backup_base_name(volume_id, diff_format=True) - if name not in rbds: - LOG.debug("Diff format image '%s' not found", name) - return False, name - else: - return False, name - - return True, name - - def _snap_exists(self, base_name, snap_name, client): - """Return True if snapshot exists in base image.""" - base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) - try: - snaps = base_rbd.list_snaps() - finally: - base_rbd.close() - - if snaps is None: - return False - - for snap in snaps: - if snap['name'] == snap_name: - return True - - return False - - def _backup_rbd(self, backup, volume_file, volume_name, length): - """Create an incremental backup from an RBD image.""" - rbd_user = volume_file.rbd_user - rbd_pool = volume_file.rbd_pool - rbd_conf = volume_file.rbd_conf - source_rbd_image = volume_file.rbd_image - volume_id = backup.volume_id - # Identify our --from-snap point (if one exists) - from_snap = self._get_most_recent_snap(source_rbd_image) - LOG.debug("Using --from-snap '%(snap)s' for incremental backup of " - "volume %(volume)s.", - {'snap': from_snap, 'volume': volume_id}) - - base_name = self._get_backup_base_name(volume_id, diff_format=True) - image_created = False - with rbd_driver.RADOSClient(self, backup.container) as client: - # If from_snap does not exist at the destination (and the - # destination exists), this implies a previous backup has failed. - # In this case we will force a full backup. - # - # TODO(dosaboy): find a way to repair the broken backup - # - if base_name not in self.rbd.RBD().list(ioctx=client.ioctx): - # If a from_snap is defined but the base does not exist, we - # ignore it since it is stale and waiting to be cleaned up. - if from_snap: - LOG.debug("Source snapshot '%(snapshot)s' of volume " - "%(volume)s is stale so deleting.", - {'snapshot': from_snap, 'volume': volume_id}) - source_rbd_image.remove_snap(from_snap) - from_snap = None - - # Create new base image - self._create_base_image(base_name, length, client) - image_created = True - else: - # If a from_snap is defined but does not exist in the back base - # then we cannot proceed (see above) - if not self._snap_exists(base_name, from_snap, client): - errmsg = (_("Snapshot='%(snap)s' does not exist in base " - "image='%(base)s' - aborting incremental " - "backup") % - {'snap': from_snap, 'base': base_name}) - LOG.info(errmsg) - # Raise this exception so that caller can try another - # approach - raise exception.BackupRBDOperationFailed(errmsg) - - # Snapshot source volume so that we have a new point-in-time - new_snap = self._get_new_snap_name(backup.id) - LOG.debug("Creating backup snapshot='%s'", new_snap) - source_rbd_image.create_snap(new_snap) - - # Attempt differential backup. If this fails, perhaps because librbd - # or Ceph cluster version does not support it, do a full backup - # instead. - # - # TODO(dosaboy): find a way to determine if the operation is supported - # rather than brute force approach. - try: - before = time.time() - self._rbd_diff_transfer(volume_name, rbd_pool, base_name, - backup.container, - src_user=rbd_user, - src_conf=rbd_conf, - dest_user=self._ceph_backup_user, - dest_conf=self._ceph_backup_conf, - src_snap=new_snap, - from_snap=from_snap) - - LOG.debug("Differential backup transfer completed in %.4fs", - (time.time() - before)) - - # We don't need the previous snapshot (if there was one) anymore so - # delete it. - if from_snap: - source_rbd_image.remove_snap(from_snap) - - except exception.BackupRBDOperationFailed: - with excutils.save_and_reraise_exception(): - LOG.debug("Differential backup transfer failed") - - # Clean up if image was created as part of this operation - if image_created: - self._try_delete_base_image(backup, base_name=base_name) - - # Delete snapshot - LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of " - "source volume='%(volume)s'.", - {'snapshot': new_snap, 'volume': volume_id}) - source_rbd_image.remove_snap(new_snap) - - def _file_is_rbd(self, volume_file): - """Returns True if the volume_file is actually an RBD image.""" - return hasattr(volume_file, 'rbd_image') - - def _full_backup(self, backup, src_volume, src_name, length): - """Perform a full backup of src volume. - - First creates a base backup image in our backup location then performs - an chunked copy of all data from source volume to a new backup rbd - image. - """ - volume_id = backup.volume_id - backup_name = self._get_backup_base_name(volume_id, backup.id) - - with rbd_driver.RADOSClient(self, backup.container) as client: - # First create base backup image - old_format, features = self._get_rbd_support() - LOG.debug("Creating backup base image='%(name)s' for volume " - "%(volume)s.", - {'name': backup_name, 'volume': volume_id}) - self.rbd.RBD().create(ioctx=client.ioctx, - name=backup_name, - size=length, - old_format=old_format, - features=features, - stripe_unit=self.rbd_stripe_unit, - stripe_count=self.rbd_stripe_count) - - LOG.debug("Copying data from volume %s.", volume_id) - dest_rbd = self.rbd.Image(client.ioctx, backup_name) - try: - rbd_meta = linuxrbd.RBDImageMetadata(dest_rbd, - backup.container, - self._ceph_backup_user, - self._ceph_backup_conf) - rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta) - self._transfer_data(src_volume, src_name, rbd_fd, backup_name, - length) - finally: - dest_rbd.close() - - @staticmethod - def backup_snapshot_name_pattern(): - """Returns the pattern used to match backup snapshots. - - It is essential that snapshots created for purposes other than backups - do not have this name format. - """ - return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$" - - @classmethod - def get_backup_snaps(cls, rbd_image, sort=False): - """Get all backup snapshots for the given rbd image. - - NOTE: this call is made public since these snapshots must be deleted - before the base volume can be deleted. - """ - snaps = rbd_image.list_snaps() - - backup_snaps = [] - for snap in snaps: - search_key = cls.backup_snapshot_name_pattern() - result = re.search(search_key, snap['name']) - if result: - backup_snaps.append({'name': result.group(0), - 'backup_id': result.group(1), - 'timestamp': result.group(2)}) - - if sort: - # Sort into ascending order of timestamp - backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True) - - return backup_snaps - - def _get_new_snap_name(self, backup_id): - return utils.convert_str("backup.%s.snap.%s" - % (backup_id, time.time())) - - def _get_backup_snap_name(self, rbd_image, name, backup_id): - """Return the name of the snapshot associated with backup_id. - - The rbd image provided must be the base image used for an incremental - backup. - - A backup is only allowed ONE associated snapshot. If more are found, - exception.BackupOperationError is raised. - """ - snaps = self.get_backup_snaps(rbd_image) - - LOG.debug("Looking for snapshot of backup base '%s'", name) - - if not snaps: - LOG.debug("Backup base '%s' has no snapshots", name) - return None - - snaps = [snap['name'] for snap in snaps - if snap['backup_id'] == backup_id] - - if not snaps: - LOG.debug("Backup '%s' has no snapshot", backup_id) - return None - - if len(snaps) > 1: - msg = (_("Backup should only have one snapshot but instead has %s") - % len(snaps)) - LOG.error(msg) - raise exception.BackupOperationError(msg) - - LOG.debug("Found snapshot '%s'", snaps[0]) - return snaps[0] - - def _get_most_recent_snap(self, rbd_image): - """Get the most recent backup snapshot of the provided image. - - Returns name of most recent backup snapshot or None if there are no - backup snapshots. - """ - backup_snaps = self.get_backup_snaps(rbd_image, sort=True) - if not backup_snaps: - return None - - return backup_snaps[0]['name'] - - def _get_volume_size_gb(self, volume): - """Return the size in gigabytes of the given volume. - - Raises exception.InvalidParameterValue if volume size is 0. - """ - if int(volume['size']) == 0: - errmsg = _("Need non-zero volume size") - raise exception.InvalidParameterValue(errmsg) - - return int(volume['size']) * units.Gi - - def _backup_metadata(self, backup): - """Backup volume metadata. - - NOTE(dosaboy): the metadata we are backing up is obtained from a - versioned api so we should not alter it in any way here. - We must also be sure that the service that will perform - the restore is compatible with version used. - """ - json_meta = self.get_metadata(backup.volume_id) - if not json_meta: - LOG.debug("No metadata to backup for volume %s.", backup.volume_id) - return - - LOG.debug("Backing up metadata for volume %s.", backup.volume_id) - try: - with rbd_driver.RADOSClient(self, backup.container) as client: - vol_meta_backup = VolumeMetadataBackup(client, backup.id) - vol_meta_backup.set(json_meta) - except exception.VolumeMetadataBackupExists as e: - msg = (_("Failed to backup volume metadata - %s") % e) - raise exception.BackupOperationError(msg) - - def backup(self, backup, volume_file, backup_metadata=True): - """Backup volume and metadata (if available) to Ceph object store. - - If the source volume is an RBD we will attempt to do an - incremental/differential backup, otherwise a full copy is performed. - If this fails we will attempt to fall back to full copy. - """ - volume = self.db.volume_get(self.context, backup.volume_id) - if not backup.container: - backup.container = self._ceph_backup_pool - backup.save() - - LOG.debug("Starting backup of volume='%s'.", volume.id) - - # Ensure we are at the beginning of the volume - volume_file.seek(0) - length = self._get_volume_size_gb(volume) - - do_full_backup = False - if self._file_is_rbd(volume_file): - # If volume an RBD, attempt incremental backup. - LOG.debug("Volume file is RBD: attempting incremental backup.") - try: - self._backup_rbd(backup, volume_file, volume.name, length) - except exception.BackupRBDOperationFailed: - LOG.debug("Forcing full backup of volume %s.", volume.id) - do_full_backup = True - else: - LOG.debug("Volume file is NOT RBD: will do full backup.") - do_full_backup = True - - if do_full_backup: - try: - self._full_backup(backup, volume_file, volume.name, length) - except exception.BackupOperationError: - with excutils.save_and_reraise_exception(): - self.delete_backup(backup) - - if backup_metadata: - try: - self._backup_metadata(backup) - except exception.BackupOperationError: - with excutils.save_and_reraise_exception(): - # Cleanup. - self.delete_backup(backup) - - LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.", - {'backup_id': backup.id, 'volume_id': volume.id}) - - def _full_restore(self, backup, dest_file, dest_name, length, - src_snap=None): - """Restore volume using full copy i.e. all extents. - - This will result in all extents being copied from source to - destination. - """ - with rbd_driver.RADOSClient(self, backup.container) as client: - # If a source snapshot is provided we assume the base is diff - # format. - if src_snap: - diff_format = True - else: - diff_format = False - - backup_name = self._get_backup_base_name(backup.volume_id, - backup_id=backup.id, - diff_format=diff_format) - - # Retrieve backup volume - src_rbd = self.rbd.Image(client.ioctx, backup_name, - snapshot=src_snap, read_only=True) - try: - rbd_meta = linuxrbd.RBDImageMetadata(src_rbd, - backup.container, - self._ceph_backup_user, - self._ceph_backup_conf) - rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta) - self._transfer_data(rbd_fd, backup_name, dest_file, dest_name, - length) - finally: - src_rbd.close() - - def _check_restore_vol_size(self, backup, restore_vol, restore_length, - src_pool): - """Ensure that the restore volume is the correct size. - - If the restore volume was bigger than the backup, the diff restore will - shrink it to the size of the original backup so we need to - post-process and resize it back to its expected size. - """ - backup_base = self._get_backup_base_name(backup.volume_id, - diff_format=True) - - with rbd_driver.RADOSClient(self, backup.container) as client: - adjust_size = 0 - base_image = self.rbd.Image(client.ioctx, - utils.convert_str(backup_base), - read_only=True) - try: - if restore_length != base_image.size(): - adjust_size = restore_length - finally: - base_image.close() - - if adjust_size: - with rbd_driver.RADOSClient(self, src_pool) as client: - restore_vol_encode = utils.convert_str(restore_vol) - dest_image = self.rbd.Image(client.ioctx, restore_vol_encode) - try: - LOG.debug("Adjusting restore vol size") - dest_image.resize(adjust_size) - finally: - dest_image.close() - - def _diff_restore_rbd(self, backup, restore_file, restore_name, - restore_point, restore_length): - """Attempt restore rbd volume from backup using diff transfer.""" - rbd_user = restore_file.rbd_user - rbd_pool = restore_file.rbd_pool - rbd_conf = restore_file.rbd_conf - base_name = self._get_backup_base_name(backup.volume_id, - diff_format=True) - - LOG.debug("Attempting incremental restore from base='%(base)s' " - "snap='%(snap)s'", - {'base': base_name, 'snap': restore_point}) - before = time.time() - try: - self._rbd_diff_transfer(base_name, backup.container, - restore_name, rbd_pool, - src_user=self._ceph_backup_user, - src_conf=self._ceph_backup_conf, - dest_user=rbd_user, dest_conf=rbd_conf, - src_snap=restore_point) - except exception.BackupRBDOperationFailed: - LOG.exception("Differential restore failed, trying full restore") - raise - - # If the volume we are restoring to is larger than the backup volume, - # we will need to resize it after the diff import since import-diff - # appears to shrink the target rbd volume to the size of the original - # backup volume. - self._check_restore_vol_size(backup, restore_name, restore_length, - rbd_pool) - - LOG.debug("Restore transfer completed in %.4fs", - (time.time() - before)) - - def _get_restore_point(self, base_name, backup_id): - """Get restore point snapshot name for incremental backup. - - If the backup was not incremental (determined by the fact that the - base has no snapshots/restore points), None is returned. Otherwise, the - restore point associated with backup_id is returned. - """ - with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: - base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True) - try: - restore_point = self._get_backup_snap_name(base_rbd, base_name, - backup_id) - finally: - base_rbd.close() - - return restore_point - - def _rbd_has_extents(self, rbd_volume): - """Check whether the given rbd volume has extents. - - Return True if has extents, otherwise False. - """ - extents = [] - - def iter_cb(offset, length, exists): - if exists: - extents.append(length) - - rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb) - - if extents: - LOG.debug("RBD has %s extents", sum(extents)) - return True - - return False - - def _diff_restore_allowed(self, base_name, backup, volume, volume_file, - rados_client): - """Determine if differential restore is possible and restore point. - - Determine whether a differential restore is possible/allowed, - and find out the restore point if backup base is diff-format. - - In order for a differential restore to be performed we need: - * destination volume must be RBD - * destination volume must have zero extents - * backup base image must exist - * backup must have a restore point - * target volume is different from source volume of backup - - Returns True if differential restore is allowed, False otherwise. - Return the restore point if back base is diff-format. - """ - # NOTE(dosaboy): base_name here must be diff format. - rbd_exists, base_name = self._rbd_image_exists(base_name, - backup.volume_id, - rados_client) - - if not rbd_exists: - return False, None - - # Get the restore point. If no restore point is found, we assume - # that the backup was not performed using diff/incremental methods - # so we enforce full copy. - restore_point = self._get_restore_point(base_name, backup.id) - - if restore_point: - if self._file_is_rbd(volume_file): - LOG.debug("Volume file is RBD.") - # If the volume we are restoring to is the volume the backup - # was made from, force a full restore since a diff will not - # work in this case. - if volume.id == backup.volume_id: - LOG.debug("Destination volume is same as backup source " - "volume %s - forcing full copy.", volume.id) - return False, restore_point - - # If the destination volume has extents we cannot allow a diff - # restore. - if self._rbd_has_extents(volume_file.rbd_image): - # We return the restore point so that a full copy is done - # from snapshot. - LOG.debug("Destination has extents - forcing full copy") - return False, restore_point - - return True, restore_point - else: - LOG.debug("Volume file is NOT RBD.") - else: - LOG.info("No restore point found for backup='%(backup)s' of " - "volume %(volume)s although base image is found - " - "forcing full copy.", - {'backup': backup.id, - 'volume': backup.volume_id}) - return False, restore_point - - def _restore_volume(self, backup, volume, volume_file): - """Restore volume from backup using diff transfer if possible. - - Attempts a differential restore and reverts to full copy if diff fails. - """ - length = int(volume.size) * units.Gi - - base_name = self._get_backup_base_name(backup.volume_id, - diff_format=True) - - with rbd_driver.RADOSClient(self, backup.container) as client: - diff_allowed, restore_point = \ - self._diff_restore_allowed(base_name, backup, volume, - volume_file, client) - - do_full_restore = True - if diff_allowed: - # Attempt diff - try: - LOG.debug("Attempting differential restore.") - self._diff_restore_rbd(backup, volume_file, volume.name, - restore_point, length) - do_full_restore = False - except exception.BackupRBDOperationFailed: - LOG.debug("Forcing full restore to volume %s.", - volume.id) - - if do_full_restore: - # Otherwise full copy - LOG.debug("Running full restore.") - self._full_restore(backup, volume_file, volume.name, - length, src_snap=restore_point) - - def _restore_metadata(self, backup, volume_id): - """Restore volume metadata from backup. - - If this backup has associated metadata, save it to the restore target - otherwise do nothing. - """ - try: - with rbd_driver.RADOSClient(self) as client: - meta_bak = VolumeMetadataBackup(client, backup.id) - meta = meta_bak.get() - if meta is not None: - self.put_metadata(volume_id, meta) - else: - LOG.debug("Volume %s has no backed up metadata.", - backup.volume_id) - except exception.BackupMetadataUnsupportedVersion: - msg = _("Metadata restore failed due to incompatible version") - LOG.error(msg) - raise exception.BackupOperationError(msg) - - def restore(self, backup, volume_id, volume_file): - """Restore volume from backup in Ceph object store. - - If volume metadata is available this will also be restored. - """ - target_volume = self.db.volume_get(self.context, volume_id) - LOG.debug('Starting restore from Ceph backup=%(src)s to ' - 'volume=%(dest)s', - {'src': backup.id, 'dest': target_volume.name}) - - try: - self._restore_volume(backup, target_volume, volume_file) - - # Be tolerant of IO implementations that do not support fileno() - try: - fileno = volume_file.fileno() - except IOError: - LOG.debug("Restore target I/O object does not support " - "fileno() - skipping call to fsync().") - else: - os.fsync(fileno) - - self._restore_metadata(backup, volume_id) - - LOG.debug('Restore to volume %s finished successfully.', - volume_id) - except exception.BackupOperationError as e: - LOG.error('Restore to volume %(volume)s finished with error - ' - '%(error)s.', {'error': e, 'volume': volume_id}) - raise - - def delete_backup(self, backup): - """Delete the given backup from Ceph object store.""" - LOG.debug('Delete started for backup=%s', backup.id) - - delete_failed = False - has_pool = True - try: - self._try_delete_base_image(backup) - except self.rbd.ImageNotFound: - LOG.warning( - "RBD image for backup %(backup)s of volume %(volume)s " - "not found. Deleting backup metadata.", - {'backup': backup.id, 'volume': backup.volume_id}) - delete_failed = True - except self.rados.ObjectNotFound: - LOG.warning("The pool %(pool)s doesn't exist.", - {'pool': backup.container}) - delete_failed = True - has_pool = False - - if has_pool: - with rbd_driver.RADOSClient(self, backup.container) as client: - VolumeMetadataBackup(client, backup.id).remove_if_exists() - - if delete_failed: - LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' " - "finished with warning.", - {'backup': backup.id, 'volume': backup.volume_id}) - else: - LOG.debug("Delete of backup '%(backup)s' for volume " - "'%(volume)s' finished.", - {'backup': backup.id, 'volume': backup.volume_id}) - - -def get_backup_driver(context): - return CephBackupDriver(context) diff --git a/cinder/backup/drivers/glusterfs.py b/cinder/backup/drivers/glusterfs.py deleted file mode 100644 index 807c06ba5..000000000 --- a/cinder/backup/drivers/glusterfs.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a backup service that uses GlusterFS as the backend.""" - -import os -import stat - -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_concurrency import processutils as putils -from oslo_config import cfg - -from cinder.backup.drivers import posix -from cinder import exception -from cinder import interface -from cinder import utils - - -glusterfsbackup_service_opts = [ - cfg.StrOpt('glusterfs_backup_mount_point', - default='$state_path/backup_mount', - help='Base dir containing mount point for gluster share.'), - cfg.StrOpt('glusterfs_backup_share', - help='GlusterFS share in ' - ': format. ' - 'Eg: 1.2.3.4:backup_vol'), -] - -CONF = cfg.CONF -CONF.register_opts(glusterfsbackup_service_opts) - - -@interface.backupdriver -class GlusterfsBackupDriver(posix.PosixBackupDriver): - """Provides backup, restore and delete using GlusterFS repository.""" - - def __init__(self, context, db=None): - self._check_configuration() - self.backup_mount_point_base = CONF.glusterfs_backup_mount_point - self.backup_share = CONF.glusterfs_backup_share - self._execute = putils.execute - self._root_helper = utils.get_root_helper() - backup_path = self._init_backup_repo_path() - super(GlusterfsBackupDriver, self).__init__(context, - backup_path=backup_path) - - @staticmethod - def _check_configuration(): - """Raises error if any required configuration flag is missing.""" - required_flags = ['glusterfs_backup_share'] - for flag in required_flags: - if not getattr(CONF, flag, None): - raise exception.ConfigNotFound(path=flag) - - def _init_backup_repo_path(self): - remotefsclient = remotefs_brick.RemoteFsClient( - 'glusterfs', - self._root_helper, - glusterfs_mount_point_base=self.backup_mount_point_base) - remotefsclient.mount(self.backup_share) - - # Ensure we can write to this share - mount_path = remotefsclient.get_mount_point(self.backup_share) - - group_id = os.getegid() - current_group_id = utils.get_file_gid(mount_path) - current_mode = utils.get_file_mode(mount_path) - - if group_id != current_group_id: - cmd = ['chgrp', group_id, mount_path] - self._execute(*cmd, root_helper=self._root_helper, - run_as_root=True) - - if not (current_mode & stat.S_IWGRP): - cmd = ['chmod', 'g+w', mount_path] - self._execute(*cmd, root_helper=self._root_helper, - run_as_root=True) - - return mount_path - - -def get_backup_driver(context): - return GlusterfsBackupDriver(context) diff --git a/cinder/backup/drivers/google.py b/cinder/backup/drivers/google.py deleted file mode 100644 index 1d61c998e..000000000 --- a/cinder/backup/drivers/google.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2014 TrilioData, Inc -# Copyright (c) 2015 EMC Corporation -# Copyright (C) 2015 Kevin Fox -# Copyright (C) 2015 Tom Barron -# Copyright (C) 2016 Vedams Inc. -# Copyright (C) 2016 Google Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a backup service using Google Cloud Storage(GCS) - -Google Cloud Storage json apis are used for backup operations. -Authentication and authorization are based on OAuth2.0. -Server-centric flow is used for authentication. -""" - -import base64 -import hashlib -import httplib2 - -from googleapiclient import discovery -from googleapiclient import errors -from googleapiclient import http -from oauth2client import client -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -import six - -from cinder.backup import chunkeddriver -from cinder import exception -from cinder.i18n import _ -from cinder import interface - - -LOG = logging.getLogger(__name__) - -gcsbackup_service_opts = [ - cfg.StrOpt('backup_gcs_bucket', - help='The GCS bucket to use.'), - cfg.IntOpt('backup_gcs_object_size', - default=52428800, - help='The size in bytes of GCS backup objects.'), - cfg.IntOpt('backup_gcs_block_size', - default=32768, - help='The size in bytes that changes are tracked ' - 'for incremental backups. backup_gcs_object_size ' - 'has to be multiple of backup_gcs_block_size.'), - cfg.IntOpt('backup_gcs_reader_chunk_size', - default=2097152, - help='GCS object will be downloaded in chunks of bytes.'), - cfg.IntOpt('backup_gcs_writer_chunk_size', - default=2097152, - help='GCS object will be uploaded in chunks of bytes. ' - 'Pass in a value of -1 if the file ' - 'is to be uploaded as a single chunk.'), - cfg.IntOpt('backup_gcs_num_retries', - default=3, - help='Number of times to retry.'), - cfg.ListOpt('backup_gcs_retry_error_codes', - default=['429'], - help='List of GCS error codes.'), - cfg.StrOpt('backup_gcs_bucket_location', - default='US', - help='Location of GCS bucket.'), - cfg.StrOpt('backup_gcs_storage_class', - default='NEARLINE', - help='Storage class of GCS bucket.'), - cfg.StrOpt('backup_gcs_credential_file', - help='Absolute path of GCS service account credential file.'), - cfg.StrOpt('backup_gcs_project_id', - help='Owner project id for GCS bucket.'), - cfg.StrOpt('backup_gcs_user_agent', - default='gcscinder', - help='Http user-agent string for gcs api.'), - cfg.BoolOpt('backup_gcs_enable_progress_timer', - default=True, - help='Enable or Disable the timer to send the periodic ' - 'progress notifications to Ceilometer when backing ' - 'up the volume to the GCS backend storage. The ' - 'default value is True to enable the timer.'), - cfg.URIOpt('backup_gcs_proxy_url', - help='URL for http proxy access.', - secret=True), - -] - -CONF = cfg.CONF -CONF.register_opts(gcsbackup_service_opts) - - -def gcs_logger(func): - def func_wrapper(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except errors.Error as err: - raise exception.GCSApiFailure(reason=err) - except client.Error as err: - raise exception.GCSOAuth2Failure(reason=err) - except Exception as err: - raise exception.GCSConnectionFailure(reason=err) - - return func_wrapper - - -@interface.backupdriver -class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver): - """Provides backup, restore and delete of backup objects within GCS.""" - - def __init__(self, context, db=None): - self.check_gcs_options() - backup_bucket = CONF.backup_gcs_bucket - backup_credential = CONF.backup_gcs_credential_file - self.gcs_project_id = CONF.backup_gcs_project_id - chunk_size_bytes = CONF.backup_gcs_object_size - sha_block_size_bytes = CONF.backup_gcs_block_size - enable_progress_timer = CONF.backup_gcs_enable_progress_timer - super(GoogleBackupDriver, self).__init__(context, chunk_size_bytes, - sha_block_size_bytes, - backup_bucket, - enable_progress_timer, - db) - credentials = client.GoogleCredentials.from_stream(backup_credential) - self.reader_chunk_size = CONF.backup_gcs_reader_chunk_size - self.writer_chunk_size = CONF.backup_gcs_writer_chunk_size - self.bucket_location = CONF.backup_gcs_bucket_location - self.storage_class = CONF.backup_gcs_storage_class - self.num_retries = CONF.backup_gcs_num_retries - http_user_agent = http.set_user_agent( - httplib2.Http(proxy_info=self.get_gcs_proxy_info()), - CONF.backup_gcs_user_agent) - self.conn = discovery.build('storage', - 'v1', - http=http_user_agent, - credentials=credentials) - self.resumable = self.writer_chunk_size != -1 - - def get_gcs_proxy_info(self): - if CONF.backup_gcs_proxy_url: - return httplib2.proxy_info_from_url(CONF.backup_gcs_proxy_url) - else: - return httplib2.proxy_info_from_environment() - - def check_gcs_options(self): - required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file', - 'backup_gcs_project_id') - unset_options = [opt for opt in required_options - if not getattr(CONF, opt, None)] - if unset_options: - msg = _('Unset gcs options: %s') % unset_options - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - @gcs_logger - def put_container(self, bucket): - """Create the bucket if not exists.""" - buckets = self.conn.buckets().list( - project=self.gcs_project_id, - prefix=bucket, - fields="items(name)").execute( - num_retries=self.num_retries).get('items', []) - if not any(b.get('name') == bucket for b in buckets): - self.conn.buckets().insert( - project=self.gcs_project_id, - body={'name': bucket, - 'location': self.bucket_location, - 'storageClass': self.storage_class}).execute( - num_retries=self.num_retries) - - @gcs_logger - def get_container_entries(self, bucket, prefix): - """Get bucket entry names.""" - obj_list_dict = self.conn.objects().list( - bucket=bucket, - fields="items(name)", - prefix=prefix).execute(num_retries=self.num_retries).get( - 'items', []) - return [obj_dict.get('name') for obj_dict in obj_list_dict] - - def get_object_writer(self, bucket, object_name, extra_metadata=None): - """Return a writer object. - - Returns a writer object that stores a chunk of volume data in a - GCS object store. - """ - return GoogleObjectWriter(bucket, object_name, self.conn, - self.writer_chunk_size, - self.num_retries, - self.resumable) - - def get_object_reader(self, bucket, object_name, extra_metadata=None): - """Return reader object. - - Returns a reader object that retrieves a chunk of backed-up volume data - from a GCS object store. - """ - return GoogleObjectReader(bucket, object_name, self.conn, - self.reader_chunk_size, - self.num_retries) - - @gcs_logger - def delete_object(self, bucket, object_name): - """Deletes a backup object from a GCS object store.""" - self.conn.objects().delete( - bucket=bucket, - object=object_name).execute(num_retries=self.num_retries) - - def _generate_object_name_prefix(self, backup): - """Generates a GCS backup object name prefix. - - prefix = volume_volid/timestamp/az_saz_backup_bakid - - volid is volume id. - timestamp is time in UTC with format of YearMonthDateHourMinuteSecond. - saz is storage_availability_zone. - bakid is backup id for volid. - """ - az = 'az_%s' % self.az - backup_name = '%s_backup_%s' % (az, backup.id) - volume = 'volume_%s' % (backup.volume_id) - timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") - prefix = volume + '/' + timestamp + '/' + backup_name - LOG.debug('generate_object_name_prefix: %s', prefix) - return prefix - - def update_container_name(self, backup, bucket): - """Use the bucket name as provided - don't update.""" - return - - def get_extra_metadata(self, backup, volume): - """GCS driver does not use any extra metadata.""" - return - - -class GoogleObjectWriter(object): - def __init__(self, bucket, object_name, conn, writer_chunk_size, - num_retries, resumable): - self.bucket = bucket - self.object_name = object_name - self.conn = conn - self.data = bytearray() - self.chunk_size = writer_chunk_size - self.num_retries = num_retries - self.resumable = resumable - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def write(self, data): - self.data += data - - @gcs_logger - def close(self): - media = http.MediaIoBaseUpload(six.BytesIO(self.data), - 'application/octet-stream', - chunksize=self.chunk_size, - resumable=self.resumable) - resp = self.conn.objects().insert( - bucket=self.bucket, - name=self.object_name, - body={}, - media_body=media).execute(num_retries=self.num_retries) - etag = resp['md5Hash'] - md5 = hashlib.md5(self.data).digest() - if six.PY3: - md5 = md5.encode('utf-8') - etag = etag.encode('utf-8') - md5 = base64.b64encode(md5) - if etag != md5: - err = _('MD5 of object: %(object_name)s before: ' - '%(md5)s and after: %(etag)s is not same.') % { - 'object_name': self.object_name, - 'md5': md5, 'etag': etag, } - raise exception.InvalidBackup(reason=err) - else: - LOG.debug('MD5 before: %(md5)s and after: %(etag)s ' - 'writing object: %(object_name)s in GCS.', - {'etag': etag, 'md5': md5, - 'object_name': self.object_name, }) - return md5 - - -class GoogleObjectReader(object): - def __init__(self, bucket, object_name, conn, reader_chunk_size, - num_retries): - self.bucket = bucket - self.object_name = object_name - self.conn = conn - self.chunk_size = reader_chunk_size - self.num_retries = num_retries - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - @gcs_logger - def read(self): - req = self.conn.objects().get_media( - bucket=self.bucket, - object=self.object_name) - fh = six.BytesIO() - downloader = GoogleMediaIoBaseDownload( - fh, req, chunksize=self.chunk_size) - done = False - while not done: - status, done = downloader.next_chunk(num_retries=self.num_retries) - LOG.debug('GCS Object download Complete.') - return fh.getvalue() - - -class GoogleMediaIoBaseDownload(http.MediaIoBaseDownload): - - @http.util.positional(1) - def next_chunk(self, num_retries=None): - error_codes = CONF.backup_gcs_retry_error_codes - headers = {'range': 'bytes=%d-%d' % - (self._progress, self._progress + self._chunksize)} - - gcs_http = self._request.http - for retry_num in range(num_retries + 1): - if retry_num > 0: - self._sleep(self._rand() * 2 ** retry_num) - - resp, content = gcs_http.request(self._uri, headers=headers) - if resp.status < 500 and (six.text_type(resp.status) - not in error_codes): - break - if resp.status in [200, 206]: - if 'content-location' in resp and ( - resp['content-location'] != self._uri): - self._uri = resp['content-location'] - self._progress += len(content) - self._fd.write(content) - - if 'content-range' in resp: - content_range = resp['content-range'] - length = content_range.rsplit('/', 1)[1] - self._total_size = int(length) - elif 'content-length' in resp: - self._total_size = int(resp['content-length']) - - if self._progress == self._total_size: - self._done = True - return (http.MediaDownloadProgress(self._progress, - self._total_size), self._done) - - else: - raise http.HttpError(resp, content, uri=self._uri) - - -def get_backup_driver(context): - return GoogleBackupDriver(context) diff --git a/cinder/backup/drivers/nfs.py b/cinder/backup/drivers/nfs.py deleted file mode 100644 index 3e658c678..000000000 --- a/cinder/backup/drivers/nfs.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2015 Tom Barron -# Copyright (C) 2015 Kevin Fox -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a backup service that uses NFS storage as the backend.""" - -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_config import cfg -from oslo_log import log as logging - -from cinder.backup.drivers import posix -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils - -LOG = logging.getLogger(__name__) - - -nfsbackup_service_opts = [ - cfg.StrOpt('backup_mount_point_base', - default='$state_path/backup_mount', - help='Base dir containing mount point for NFS share.'), - cfg.StrOpt('backup_share', - help='NFS share in hostname:path, ipv4addr:path, ' - 'or "[ipv6addr]:path" format.'), - cfg.StrOpt('backup_mount_options', - help=('Mount options passed to the NFS client. See NFS ' - 'man page for details.')), -] - -CONF = cfg.CONF -CONF.register_opts(nfsbackup_service_opts) - - -@interface.backupdriver -class NFSBackupDriver(posix.PosixBackupDriver): - """Provides backup, restore and delete using NFS supplied repository.""" - - def __init__(self, context, db=None): - self._check_configuration() - self.backup_mount_point_base = CONF.backup_mount_point_base - self.backup_share = CONF.backup_share - self.mount_options = CONF.backup_mount_options - backup_path = self._init_backup_repo_path() - LOG.debug("Using NFS backup repository: %s", backup_path) - super(NFSBackupDriver, self).__init__(context, - backup_path=backup_path) - - @staticmethod - def _check_configuration(): - """Raises error if any required configuration flag is missing.""" - required_flags = ['backup_share'] - for flag in required_flags: - if not getattr(CONF, flag, None): - raise exception.ConfigNotFound(_( - 'Required flag %s is not set') % flag) - - def _init_backup_repo_path(self): - remotefsclient = remotefs_brick.RemoteFsClient( - 'nfs', - utils.get_root_helper(), - nfs_mount_point_base=self.backup_mount_point_base, - nfs_mount_options=self.mount_options) - remotefsclient.mount(self.backup_share) - return remotefsclient.get_mount_point(self.backup_share) - - -def get_backup_driver(context): - return NFSBackupDriver(context) diff --git a/cinder/backup/drivers/posix.py b/cinder/backup/drivers/posix.py deleted file mode 100644 index 3d84b8a5c..000000000 --- a/cinder/backup/drivers/posix.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2015 Tom Barron -# Copyright (C) 2015 Kevin Fox -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a backup service that uses a posix filesystem as the - backend.""" - -import os -import os.path -import stat - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from cinder.backup import chunkeddriver -from cinder import exception -from cinder import interface - -LOG = logging.getLogger(__name__) - -SHA_SIZE = 32768 -# Multiple of SHA_SIZE, close to a characteristic OS max file system size. -BACKUP_FILE_SIZE = 61035 * 32768 - -posixbackup_service_opts = [ - cfg.IntOpt('backup_file_size', - default=BACKUP_FILE_SIZE, - help='The maximum size in bytes of the files used to hold ' - 'backups. If the volume being backed up exceeds this ' - 'size, then it will be backed up into multiple files.' - 'backup_file_size must be a multiple of ' - 'backup_sha_block_size_bytes.'), - cfg.IntOpt('backup_sha_block_size_bytes', - default=SHA_SIZE, - help='The size in bytes that changes are tracked ' - 'for incremental backups. backup_file_size has ' - 'to be multiple of backup_sha_block_size_bytes.'), - cfg.BoolOpt('backup_enable_progress_timer', - default=True, - help='Enable or Disable the timer to send the periodic ' - 'progress notifications to Ceilometer when backing ' - 'up the volume to the backend storage. The ' - 'default value is True to enable the timer.'), - cfg.StrOpt('backup_posix_path', - default='$state_path/backup', - help='Path specifying where to store backups.'), - cfg.StrOpt('backup_container', - help='Custom directory to use for backups.'), -] - -CONF = cfg.CONF -CONF.register_opts(posixbackup_service_opts) - - -@interface.backupdriver -class PosixBackupDriver(chunkeddriver.ChunkedBackupDriver): - """Provides backup, restore and delete using a Posix file system.""" - - def __init__(self, context, db=None, backup_path=None): - chunk_size_bytes = CONF.backup_file_size - sha_block_size_bytes = CONF.backup_sha_block_size_bytes - backup_default_container = CONF.backup_container - enable_progress_timer = CONF.backup_enable_progress_timer - super(PosixBackupDriver, self).__init__(context, chunk_size_bytes, - sha_block_size_bytes, - backup_default_container, - enable_progress_timer, - db) - self.backup_path = backup_path - if not backup_path: - self.backup_path = CONF.backup_posix_path - if not self.backup_path: - raise exception.ConfigNotFound(path='backup_path') - LOG.debug("Using backup repository: %s", self.backup_path) - - def update_container_name(self, backup, container): - if container is not None: - return container - id = backup['id'] - return os.path.join(id[0:2], id[2:4], id) - - def put_container(self, container): - path = os.path.join(self.backup_path, container) - if not os.path.exists(path): - os.makedirs(path) - permissions = ( - stat.S_IRUSR | - stat.S_IWUSR | - stat.S_IXUSR | - stat.S_IRGRP | - stat.S_IWGRP | - stat.S_IXGRP) - os.chmod(path, permissions) - - def get_container_entries(self, container, prefix): - path = os.path.join(self.backup_path, container) - return [i for i in os.listdir(path) if i.startswith(prefix)] - - def get_object_writer(self, container, object_name, extra_metadata=None): - path = os.path.join(self.backup_path, container, object_name) - f = open(path, 'wb') - permissions = ( - stat.S_IRUSR | - stat.S_IWUSR | - stat.S_IRGRP | - stat.S_IWGRP) - os.chmod(path, permissions) - return f - - def get_object_reader(self, container, object_name, extra_metadata=None): - path = os.path.join(self.backup_path, container, object_name) - return open(path, 'rb') - - def delete_object(self, container, object_name): - # TODO(tbarron): clean up the container path if it is empty - path = os.path.join(self.backup_path, container, object_name) - os.remove(path) - - def _generate_object_name_prefix(self, backup): - timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") - prefix = 'volume_%s_%s_backup_%s' % (backup.volume_id, timestamp, - backup.id) - LOG.debug('_generate_object_name_prefix: %s', prefix) - return prefix - - def get_extra_metadata(self, backup, volume): - return None - - -def get_backup_driver(context): - return PosixBackupDriver(context) diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py deleted file mode 100644 index f67be62ce..000000000 --- a/cinder/backup/drivers/swift.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2014 TrilioData, Inc -# Copyright (c) 2015 EMC Corporation -# Copyright (C) 2015 Kevin Fox -# Copyright (C) 2015 Tom Barron -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a backup service that uses Swift as the backend - -**Related Flags** - -:backup_swift_url: The URL of the Swift endpoint (default: None, use catalog). -:backup_swift_auth_url: The URL of the Keystone endpoint for authentication - (default: None, use catalog). -:swift_catalog_info: Info to match when looking for swift in the service ' - catalog. -:keystone_catalog_info: Info to match when looking for keystone in the service - catalog. -:backup_swift_object_size: The size in bytes of the Swift objects used - for volume backups (default: 52428800). -:backup_swift_retry_attempts: The number of retries to make for Swift - operations (default: 10). -:backup_swift_retry_backoff: The backoff time in seconds between retrying - failed Swift operations (default: 10). -:backup_compression_algorithm: Compression algorithm to use for volume - backups. Supported options are: - None (to disable), zlib and bz2 (default: zlib) -:backup_swift_ca_cert_file: The location of the CA certificate file to use - for swift client requests (default: None) -:backup_swift_auth_insecure: If true, bypass verification of server's - certificate for SSL connections (default: False) -""" - -import hashlib -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -import six -from swiftclient import client as swift - -from cinder.backup import chunkeddriver -from cinder import exception -from cinder.i18n import _ -from cinder import interface - -LOG = logging.getLogger(__name__) - -swiftbackup_service_opts = [ - cfg.URIOpt('backup_swift_url', - help='The URL of the Swift endpoint'), - cfg.URIOpt('backup_swift_auth_url', - help='The URL of the Keystone endpoint'), - cfg.StrOpt('swift_catalog_info', - default='object-store:swift:publicURL', - help='Info to match when looking for swift in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if backup_swift_url is unset'), - cfg.StrOpt('keystone_catalog_info', - default='identity:Identity Service:publicURL', - help='Info to match when looking for keystone in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if backup_swift_auth_url is unset'), - cfg.StrOpt('backup_swift_auth', - default='per_user', - help='Swift authentication mechanism'), - cfg.StrOpt('backup_swift_auth_version', - default='1', - help='Swift authentication version. Specify "1" for auth 1.0' - ', or "2" for auth 2.0 or "3" for auth 3.0'), - cfg.StrOpt('backup_swift_tenant', - help='Swift tenant/account name. Required when connecting' - ' to an auth 2.0 system'), - cfg.StrOpt('backup_swift_user_domain', - default=None, - help='Swift user domain name. Required when connecting' - ' to an auth 3.0 system'), - cfg.StrOpt('backup_swift_project_domain', - default=None, - help='Swift project domain name. Required when connecting' - ' to an auth 3.0 system'), - cfg.StrOpt('backup_swift_project', - default=None, - help='Swift project/account name. Required when connecting' - ' to an auth 3.0 system'), - cfg.StrOpt('backup_swift_user', - help='Swift user name'), - cfg.StrOpt('backup_swift_key', - secret=True, - help='Swift key for authentication'), - cfg.StrOpt('backup_swift_container', - default='volumebackups', - help='The default Swift container to use'), - cfg.IntOpt('backup_swift_object_size', - default=52428800, - help='The size in bytes of Swift backup objects'), - cfg.IntOpt('backup_swift_block_size', - default=32768, - help='The size in bytes that changes are tracked ' - 'for incremental backups. backup_swift_object_size ' - 'has to be multiple of backup_swift_block_size.'), - cfg.IntOpt('backup_swift_retry_attempts', - default=3, - help='The number of retries to make for Swift operations'), - cfg.IntOpt('backup_swift_retry_backoff', - default=2, - help='The backoff time in seconds between Swift retries'), - cfg.BoolOpt('backup_swift_enable_progress_timer', - default=True, - help='Enable or Disable the timer to send the periodic ' - 'progress notifications to Ceilometer when backing ' - 'up the volume to the Swift backend storage. The ' - 'default value is True to enable the timer.'), - cfg.StrOpt('backup_swift_ca_cert_file', - help='Location of the CA certificate file to use for swift ' - 'client requests.'), - cfg.BoolOpt('backup_swift_auth_insecure', - default=False, - help='Bypass verification of server certificate when ' - 'making SSL connection to Swift.'), -] - -CONF = cfg.CONF -CONF.register_opts(swiftbackup_service_opts) - - -@interface.backupdriver -class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver): - """Provides backup, restore and delete of backup objects within Swift.""" - - def __init__(self, context, db=None): - chunk_size_bytes = CONF.backup_swift_object_size - sha_block_size_bytes = CONF.backup_swift_block_size - backup_default_container = CONF.backup_swift_container - enable_progress_timer = CONF.backup_swift_enable_progress_timer - super(SwiftBackupDriver, self).__init__(context, chunk_size_bytes, - sha_block_size_bytes, - backup_default_container, - enable_progress_timer, - db) - self.swift_attempts = CONF.backup_swift_retry_attempts - self.swift_backoff = CONF.backup_swift_retry_backoff - self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure - - if CONF.backup_swift_auth == 'single_user': - if CONF.backup_swift_user is None: - LOG.error("single_user auth mode enabled, " - "but %(param)s not set", - {'param': 'backup_swift_user'}) - raise exception.ParameterNotFound(param='backup_swift_user') - if CONF.backup_swift_auth_url is None: - self.auth_url = None - info = CONF.keystone_catalog_info - try: - service_type, service_name, endpoint_type = info.split(':') - except ValueError: - raise exception.BackupDriverException(_( - "Failed to parse the configuration option " - "'keystone_catalog_info', must be in the form " - "::")) - for entry in context.service_catalog: - if entry.get('type') == service_type: - # It is assumed that service_types are unique within - # the service catalog, so once the correct one is found - # it is safe to break out of the loop - self.auth_url = entry.get( - 'endpoints')[0].get(endpoint_type) - break - else: - self.auth_url = CONF.backup_swift_auth_url - if self.auth_url is None: - raise exception.BackupDriverException(_( - "Could not determine which Keystone endpoint to use. This " - "can either be set in the service catalog or with the " - "cinder.conf config option 'backup_swift_auth_url'.")) - LOG.debug("Using auth URL %s", self.auth_url) - LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_auth_url, - CONF.backup_swift_auth) - - os_options = {} - if CONF.backup_swift_user_domain is not None: - os_options['user_domain_name'] = CONF.backup_swift_user_domain - if CONF.backup_swift_project_domain is not None: - os_options['project_domain_name'] = ( - CONF.backup_swift_project_domain - ) - if CONF.backup_swift_project is not None: - os_options['project_name'] = CONF.backup_swift_project - self.conn = swift.Connection( - authurl=self.auth_url, - auth_version=CONF.backup_swift_auth_version, - tenant_name=CONF.backup_swift_tenant, - user=CONF.backup_swift_user, - key=CONF.backup_swift_key, - os_options=os_options, - retries=self.swift_attempts, - starting_backoff=self.swift_backoff, - insecure=self.backup_swift_auth_insecure, - cacert=CONF.backup_swift_ca_cert_file) - else: - if CONF.backup_swift_url is None: - self.swift_url = None - info = CONF.swift_catalog_info - try: - service_type, service_name, endpoint_type = info.split(':') - except ValueError: - raise exception.BackupDriverException(_( - "Failed to parse the configuration option " - "'swift_catalog_info', must be in the form " - "::")) - for entry in context.service_catalog: - if entry.get('type') == service_type: - # It is assumed that service_types are unique within - # the service catalog, so once the correct one is found - # it is safe to break out of the loop - self.swift_url = entry.get( - 'endpoints')[0].get(endpoint_type) - break - else: - self.swift_url = '%s%s' % (CONF.backup_swift_url, - context.project_id) - if self.swift_url is None: - raise exception.BackupDriverException(_( - "Could not determine which Swift endpoint to use. This " - "can either be set in the service catalog or with the " - "cinder.conf config option 'backup_swift_url'.")) - LOG.debug("Using swift URL %s", self.swift_url) - LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_url, - CONF.backup_swift_auth) - - self.conn = swift.Connection(retries=self.swift_attempts, - preauthurl=self.swift_url, - preauthtoken=self.context.auth_token, - starting_backoff=self.swift_backoff, - insecure=( - self.backup_swift_auth_insecure), - cacert=CONF.backup_swift_ca_cert_file) - - class SwiftObjectWriter(object): - def __init__(self, container, object_name, conn): - self.container = container - self.object_name = object_name - self.conn = conn - self.data = bytearray() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def write(self, data): - self.data += data - - def close(self): - reader = six.BytesIO(self.data) - try: - etag = self.conn.put_object(self.container, self.object_name, - reader, - content_length=len(self.data)) - except socket.error as err: - raise exception.SwiftConnectionFailed(reason=err) - LOG.debug('swift MD5 for %(object_name)s: %(etag)s', - {'object_name': self.object_name, 'etag': etag, }) - md5 = hashlib.md5(self.data).hexdigest() - LOG.debug('backup MD5 for %(object_name)s: %(md5)s', - {'object_name': self.object_name, 'md5': md5}) - if etag != md5: - err = _('error writing object to swift, MD5 of object in ' - 'swift %(etag)s is not the same as MD5 of object sent ' - 'to swift %(md5)s'), {'etag': etag, 'md5': md5} - raise exception.InvalidBackup(reason=err) - return md5 - - class SwiftObjectReader(object): - def __init__(self, container, object_name, conn): - self.container = container - self.object_name = object_name - self.conn = conn - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def read(self): - try: - (_resp, body) = self.conn.get_object(self.container, - self.object_name) - except socket.error as err: - raise exception.SwiftConnectionFailed(reason=err) - return body - - def put_container(self, container): - """Create the container if needed. No failure if it pre-exists.""" - try: - self.conn.put_container(container) - except socket.error as err: - raise exception.SwiftConnectionFailed(reason=err) - return - - def get_container_entries(self, container, prefix): - """Get container entry names""" - try: - swift_objects = self.conn.get_container(container, - prefix=prefix, - full_listing=True)[1] - except socket.error as err: - raise exception.SwiftConnectionFailed(reason=err) - swift_object_names = [swift_obj['name'] for swift_obj in swift_objects] - return swift_object_names - - def get_object_writer(self, container, object_name, extra_metadata=None): - """Return a writer object. - - Returns a writer object that stores a chunk of volume data in a - Swift object store. - """ - return self.SwiftObjectWriter(container, object_name, self.conn) - - def get_object_reader(self, container, object_name, extra_metadata=None): - """Return reader object. - - Returns a reader object that retrieves a chunk of backed-up volume data - from a Swift object store. - """ - return self.SwiftObjectReader(container, object_name, self.conn) - - def delete_object(self, container, object_name): - """Deletes a backup object from a Swift object store.""" - try: - self.conn.delete_object(container, object_name) - except socket.error as err: - raise exception.SwiftConnectionFailed(reason=err) - - def _generate_object_name_prefix(self, backup): - """Generates a Swift backup object name prefix.""" - az = 'az_%s' % self.az - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") - prefix = volume + '/' + timestamp + '/' + backup_name - LOG.debug('generate_object_name_prefix: %s', prefix) - return prefix - - def update_container_name(self, backup, container): - """Use the container name as provided - don't update.""" - return container - - def get_extra_metadata(self, backup, volume): - """Swift driver does not use any extra metadata.""" - return None - - -def get_backup_driver(context): - return SwiftBackupDriver(context) diff --git a/cinder/backup/drivers/tsm.py b/cinder/backup/drivers/tsm.py deleted file mode 100644 index 9987d6024..000000000 --- a/cinder/backup/drivers/tsm.py +++ /dev/null @@ -1,536 +0,0 @@ -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Backup driver for IBM Tivoli Storage Manager (TSM). - -Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM) -as the backend. The driver uses TSM command line dsmc utility to -run the backup and restore operations. -This version supports backup of block devices, e.g, FC, iSCSI, local as well as -regular files. - -A prerequisite for using the IBM TSM backup service is configuring the -Cinder host for using TSM. -""" - -import json -import os -import stat - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging - -from cinder.backup import driver -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils - -LOG = logging.getLogger(__name__) - -tsm_opts = [ - cfg.StrOpt('backup_tsm_volume_prefix', - default='backup', - help='Volume prefix for the backup id when backing up to TSM'), - cfg.StrOpt('backup_tsm_password', - default='password', - help='TSM password for the running username', - secret=True), - cfg.BoolOpt('backup_tsm_compression', - default=True, - help='Enable or Disable compression for backups'), -] - -CONF = cfg.CONF -CONF.register_opts(tsm_opts) - -VALID_BACKUP_MODES = ['image', 'file'] - - -def _get_backup_metadata(backup, operation): - """Return metadata persisted with backup object.""" - try: - svc_dict = json.loads(backup.service_metadata) - backup_path = svc_dict.get('backup_path') - backup_mode = svc_dict.get('backup_mode') - except TypeError: - # for backwards compatibility - vol_prefix = CONF.backup_tsm_volume_prefix - backup_id = backup['id'] - backup_path = utils.make_dev_path('%s-%s' % - (vol_prefix, backup_id)) - backup_mode = 'image' - - if backup_mode not in VALID_BACKUP_MODES: - volume_id = backup['volume_id'] - backup_id = backup['id'] - err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. ' - 'Backup object has unexpected mode. Image or file ' - 'backups supported, actual mode is %(vol_mode)s.') - % {'op': operation, - 'bck_id': backup_id, - 'vol_id': volume_id, - 'vol_mode': backup_mode}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - return backup_path, backup_mode - - -def _image_mode(backup_mode): - """True if backup is image type.""" - return backup_mode == 'image' - - -def _make_link(volume_path, backup_path, vol_id): - """Create a hard link for the volume block device. - - The IBM TSM client performs an image backup on a block device. - The name of the block device is the backup prefix plus the backup id - - :param volume_path: real device path name for volume - :param backup_path: path name TSM will use as volume to backup - :param vol_id: id of volume to backup (for reporting) - - :raises: InvalidBackup - """ - - try: - utils.execute('ln', volume_path, backup_path, - run_as_root=True, - check_exit_code=True) - except processutils.ProcessExecutionError as exc: - err = (_('backup: %(vol_id)s failed to create device hardlink ' - 'from %(vpath)s to %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': vol_id, - 'vpath': volume_path, - 'bpath': backup_path, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - -def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode): - """Create a consistent hardlink for the volume block device. - - Create a consistent hardlink using the backup id so TSM - will be able to backup and restore to the same block device. - - :param backup_id: the backup id - :param volume_path: real path of the backup/restore device - :param volume_id: Volume id for backup or as restore target - :param bckup_mode: TSM backup mode, either 'image' or 'file' - :raises: InvalidBackup - :returns: str -- hardlink path of the volume block device - """ - if _image_mode(bckup_mode): - hardlink_path = utils.make_dev_path('%s-%s' % - (CONF.backup_tsm_volume_prefix, - backup_id)) - else: - dir, volname = os.path.split(volume_path) - hardlink_path = ('%s/%s-%s' % - (dir, - CONF.backup_tsm_volume_prefix, - backup_id)) - _make_link(volume_path, hardlink_path, volume_id) - return hardlink_path - - -def _check_dsmc_output(output, check_attrs, exact_match=True): - """Check dsmc command line utility output. - - Parse the output of the dsmc command and make sure that a given - attribute is present, and that it has the proper value. - TSM attribute has the format of "text : value". - - :param output: TSM output to parse - :param check_attrs: text to identify in the output - :param exact_match: if True, the check will pass only if the parsed - value is equal to the value specified in check_attrs. If false, the - check will pass if the parsed value is greater than or equal to the - value specified in check_attrs. This is needed because for file - backups, the parent directories may also be included the first a - volume is backed up. - :returns: bool -- indicate if requited output attribute found in output - """ - - parsed_attrs = {} - for line in output.split('\n'): - # parse TSM output: look for "msg : value - key, sep, val = line.partition(':') - if sep is not None and key is not None and len(val.strip()) > 0: - parsed_attrs[key] = val.strip() - - for ckey, cval in check_attrs.items(): - if ckey not in parsed_attrs: - return False - elif exact_match and parsed_attrs[ckey] != cval: - return False - elif not exact_match and int(parsed_attrs[ckey]) < int(cval): - return False - - return True - - -def _get_volume_realpath(volume_file, volume_id): - """Get the real path for the volume block device. - - If the volume is not a block device or a regular file issue an - InvalidBackup exception. - - :param volume_file: file object representing the volume - :param volume_id: Volume id for backup or as restore target - :raises: InvalidBackup - :returns: str -- real path of volume device - :returns: str -- backup mode to be used - """ - - try: - # Get real path - volume_path = os.path.realpath(volume_file.name) - # Verify that path is a block device - volume_mode = os.stat(volume_path).st_mode - if stat.S_ISBLK(volume_mode): - backup_mode = 'image' - elif stat.S_ISREG(volume_mode): - backup_mode = 'file' - else: - err = (_('backup: %(vol_id)s failed. ' - '%(path)s is unexpected file type. Block or regular ' - 'files supported, actual file mode is %(vol_mode)s.') - % {'vol_id': volume_id, - 'path': volume_path, - 'vol_mode': volume_mode}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - except AttributeError: - err = (_('backup: %(vol_id)s failed. Cannot obtain real path ' - 'to volume at %(path)s.') - % {'vol_id': volume_id, - 'path': volume_file}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - except OSError: - err = (_('backup: %(vol_id)s failed. ' - '%(path)s is not a file.') - % {'vol_id': volume_id, - 'path': volume_path}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - return volume_path, backup_mode - - -def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id): - """Remove the hardlink for the volume block device. - - :param hardlink_path: hardlink to the volume block device - :param volume_path: real path of the backup/restore device - :param volume_id: Volume id for backup or as restore target - """ - - try: - utils.execute('rm', - '-f', - hardlink_path, - run_as_root=True) - except processutils.ProcessExecutionError as exc: - LOG.error('backup: %(vol_id)s failed to remove backup hardlink ' - 'from %(vpath)s to %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s.', - {'vol_id': volume_id, - 'vpath': volume_path, - 'bpath': hardlink_path, - 'out': exc.stdout, - 'err': exc.stderr}) - - -@interface.backupdriver -class TSMBackupDriver(driver.BackupDriver): - """Provides backup, restore and delete of volumes backup for TSM.""" - - DRIVER_VERSION = '1.0.0' - - def __init__(self, context, db=None): - super(TSMBackupDriver, self).__init__(context, db) - self.tsm_password = CONF.backup_tsm_password - self.volume_prefix = CONF.backup_tsm_volume_prefix - - def _do_backup(self, backup_path, vol_id, backup_mode): - """Perform the actual backup operation. - - :param backup_path: volume path - :param vol_id: volume id - :param backup_mode: file mode of source volume; 'image' or 'file' - :raises: InvalidBackup - """ - - backup_attrs = {'Total number of objects backed up': '1'} - compr_flag = 'yes' if CONF.backup_tsm_compression else 'no' - - backup_cmd = ['dsmc', 'backup'] - if _image_mode(backup_mode): - backup_cmd.append('image') - backup_cmd.extend(['-quiet', - '-compression=%s' % compr_flag, - '-password=%s' % self.tsm_password, - backup_path]) - - out, err = utils.execute(*backup_cmd, - run_as_root=True, - check_exit_code=False) - - success = _check_dsmc_output(out, backup_attrs, exact_match=False) - if not success: - err = (_('backup: %(vol_id)s failed to obtain backup ' - 'success notification from server.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': vol_id, - 'out': out, - 'err': err}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - def _do_restore(self, backup_path, restore_path, vol_id, backup_mode): - """Perform the actual restore operation. - - :param backup_path: the path the backup was created from, this - identifies the backup to tsm - :param restore_path: volume path to restore into - :param vol_id: volume id - :param backup_mode: mode used to create the backup ('image' or 'file') - :raises: InvalidBackup - """ - - restore_attrs = {'Total number of objects restored': '1'} - restore_cmd = ['dsmc', 'restore'] - if _image_mode(backup_mode): - restore_cmd.append('image') - restore_cmd.append('-noprompt') # suppress prompt - else: - restore_cmd.append('-replace=yes') # suppress prompt - - restore_cmd.extend(['-quiet', - '-password=%s' % self.tsm_password, - backup_path]) - - if restore_path != backup_path: - restore_cmd.append(restore_path) - - out, err = utils.execute(*restore_cmd, - run_as_root=True, - check_exit_code=False) - - success = _check_dsmc_output(out, restore_attrs) - if not success: - err = (_('restore: %(vol_id)s failed.\n' - 'stdout: %(out)s\n stderr: %(err)s.') - % {'vol_id': vol_id, - 'out': out, - 'err': err}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - def backup(self, backup, volume_file, backup_metadata=False): - """Backup the given volume to TSM. - - TSM performs a backup of a volume. The volume_file is used - to determine the path of the block device that TSM will back-up. - - :param backup: backup information for volume - :param volume_file: file object representing the volume - :param backup_metadata: whether or not to backup volume metadata - :raises InvalidBackup: - """ - - # TODO(dosaboy): this needs implementing (see backup.drivers.ceph for - # an example) - if backup_metadata: - msg = _("Volume metadata backup requested but this driver does " - "not yet support this feature.") - raise exception.InvalidBackup(reason=msg) - - volume_path, backup_mode = _get_volume_realpath(volume_file, - backup.volume_id) - LOG.debug('Starting backup of volume: %(volume_id)s to TSM,' - ' volume path: %(volume_path)s, mode: %(mode)s.', - {'volume_id': backup.volume_id, - 'volume_path': volume_path, - 'mode': backup_mode}) - - backup_path = _create_unique_device_link(backup.id, - volume_path, - backup.volume_id, - backup_mode) - - service_metadata = {'backup_mode': backup_mode, - 'backup_path': backup_path} - backup.service_metadata = json.dumps(service_metadata) - backup.save() - - try: - self._do_backup(backup_path, backup.volume_id, backup_mode) - except processutils.ProcessExecutionError as exc: - err = (_('backup: %(vol_id)s failed to run dsmc ' - 'on %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': backup.volume_id, - 'bpath': backup_path, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - except exception.Error as exc: - err = (_('backup: %(vol_id)s failed to run dsmc ' - 'due to invalid arguments ' - 'on %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': backup.volume_id, - 'bpath': backup_path, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - finally: - _cleanup_device_hardlink(backup_path, volume_path, - backup.volume_id) - - LOG.debug('Backup %s finished.', backup.id) - - def restore(self, backup, volume_id, volume_file): - """Restore the given volume backup from TSM server. - - :param backup: backup information for volume - :param volume_id: volume id - :param volume_file: file object representing the volume - :raises: InvalidBackup - """ - - # backup_path is the path that was originally backed up. - backup_path, backup_mode = _get_backup_metadata(backup, 'restore') - - LOG.debug('Starting restore of backup from TSM ' - 'to volume %(volume_id)s, ' - 'backup: %(backup_id)s, ' - 'mode: %(mode)s.', - {'volume_id': volume_id, - 'backup_id': backup.id, - 'mode': backup_mode}) - - # volume_path is the path to restore into. This may - # be different than the original volume. - volume_path, unused = _get_volume_realpath(volume_file, - volume_id) - - restore_path = _create_unique_device_link(backup.id, - volume_path, - volume_id, - backup_mode) - - try: - self._do_restore(backup_path, restore_path, volume_id, backup_mode) - except processutils.ProcessExecutionError as exc: - err = (_('restore: %(vol_id)s failed to run dsmc ' - 'on %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': volume_id, - 'bpath': restore_path, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - except exception.Error as exc: - err = (_('restore: %(vol_id)s failed to run dsmc ' - 'due to invalid arguments ' - 'on %(bpath)s.\n' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': volume_id, - 'bpath': restore_path, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - finally: - _cleanup_device_hardlink(restore_path, volume_path, volume_id) - - LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.', - {'backup_id': backup.id, - 'volume_id': volume_id}) - - def delete_backup(self, backup): - """Delete the given backup from TSM server. - - :param backup: backup information for volume - :raises: InvalidBackup - """ - - delete_attrs = {'Total number of objects deleted': '1'} - delete_path, backup_mode = _get_backup_metadata(backup, 'restore') - - LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.', - {'backup': backup.id, - 'mode': backup_mode}) - - try: - out, err = utils.execute('dsmc', - 'delete', - 'backup', - '-quiet', - '-noprompt', - '-objtype=%s' % backup_mode, - '-password=%s' % self.tsm_password, - delete_path, - run_as_root=True, - check_exit_code=False) - - except processutils.ProcessExecutionError as exc: - err = (_('delete: %(vol_id)s failed to run dsmc with ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': backup.volume_id, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - except exception.Error as exc: - err = (_('delete: %(vol_id)s failed to run dsmc ' - 'due to invalid arguments with ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'vol_id': backup.volume_id, - 'out': exc.stdout, - 'err': exc.stderr}) - LOG.error(err) - raise exception.InvalidBackup(reason=err) - - success = _check_dsmc_output(out, delete_attrs) - if not success: - # log error if tsm cannot delete the backup object - # but do not raise exception so that cinder backup - # object can be removed. - LOG.error('delete: %(vol_id)s failed with ' - 'stdout: %(out)s\n stderr: %(err)s', - {'vol_id': backup.volume_id, - 'out': out, - 'err': err}) - - LOG.debug('Delete %s finished.', backup['id']) - - -def get_backup_driver(context): - return TSMBackupDriver(context) diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py deleted file mode 100644 index 65163a9a6..000000000 --- a/cinder/backup/manager.py +++ /dev/null @@ -1,910 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Backup manager manages volume backups. - -Volume Backups are full copies of persistent volumes stored in a backup -store e.g. an object store or any other backup store if and when support is -added. They are usable without the original object being available. A -volume backup can be restored to the original volume it was created from or -any other available volume with a minimum size of the original volume. -Volume backups can be created, restored, deleted and listed. - -**Related Flags** - -:backup_manager: The module name of a class derived from - :class:`manager.Manager` (default: - :class:`cinder.backup.manager.Manager`). - -""" - -import os -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_utils import excutils -from oslo_utils import importutils -import six - -from cinder.backup import driver -from cinder.backup import rpcapi as backup_rpcapi -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import manager -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import rpc -from cinder import utils -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -backup_manager_opts = [ - cfg.StrOpt('backup_driver', - default='cinder.backup.drivers.swift', - help='Driver to use for backups.',), - cfg.BoolOpt('backup_service_inithost_offload', - default=True, - help='Offload pending backup delete during ' - 'backup service startup. If false, the backup service ' - 'will remain down until all pending backups are ' - 'deleted.',), -] - -# This map doesn't need to be extended in the future since it's only -# for old backup services -mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift', - 'cinder.backup.services.ceph': 'cinder.backup.drivers.ceph'} - -CONF = cfg.CONF -CONF.register_opts(backup_manager_opts) -CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver') -CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver') -QUOTAS = quota.QUOTAS - - -class BackupManager(manager.ThreadPoolManager): - """Manages backup of block storage devices.""" - - RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, *args, **kwargs): - self.service = importutils.import_module(self.driver_name) - self.az = CONF.storage_availability_zone - self.volume_managers = {} - self.backup_rpcapi = backup_rpcapi.BackupAPI() - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - super(BackupManager, self).__init__(*args, **kwargs) - - @property - def driver_name(self): - """This function maps old backup services to backup drivers.""" - - return self._map_service_to_driver(CONF.backup_driver) - - def _map_service_to_driver(self, service): - """Maps services to drivers.""" - - if service in mapper: - return mapper[service] - return service - - def _update_backup_error(self, backup, err): - backup.status = fields.BackupStatus.ERROR - backup.fail_reason = err - backup.save() - - def init_host(self, **kwargs): - """Run initialization needed for a standalone service.""" - ctxt = context.get_admin_context() - - try: - self._cleanup_incomplete_backup_operations(ctxt) - except Exception: - # Don't block startup of the backup service. - LOG.exception("Problem cleaning incomplete backup operations.") - - def reset(self): - super(BackupManager, self).reset() - self.backup_rpcapi = backup_rpcapi.BackupAPI() - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - - def _cleanup_incomplete_backup_operations(self, ctxt): - LOG.info("Cleaning up incomplete backup operations.") - - # TODO(smulcahy) implement full resume of backup and restore - # operations on restart (rather than simply resetting) - backups = objects.BackupList.get_all_by_host(ctxt, self.host) - for backup in backups: - try: - self._cleanup_one_backup(ctxt, backup) - except Exception: - LOG.exception("Problem cleaning up backup %(bkup)s.", - {'bkup': backup['id']}) - try: - self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt, - backup) - except Exception: - LOG.exception("Problem cleaning temp volumes and " - "snapshots for backup %(bkup)s.", - {'bkup': backup['id']}) - - def _cleanup_one_volume(self, ctxt, volume): - if volume['status'] == 'backing-up': - self._detach_all_attachments(ctxt, volume) - LOG.info('Resetting volume %(vol_id)s to previous ' - 'status %(status)s (was backing-up).', - {'vol_id': volume['id'], - 'status': volume['previous_status']}) - self.db.volume_update(ctxt, volume['id'], - {'status': volume['previous_status']}) - elif volume['status'] == 'restoring-backup': - self._detach_all_attachments(ctxt, volume) - LOG.info('Setting volume %s to error_restoring ' - '(was restoring-backup).', volume['id']) - self.db.volume_update(ctxt, volume['id'], - {'status': 'error_restoring'}) - - def _cleanup_one_backup(self, ctxt, backup): - if backup['status'] == fields.BackupStatus.CREATING: - LOG.info('Resetting backup %s to error (was creating).', - backup['id']) - - volume = objects.Volume.get_by_id(ctxt, backup.volume_id) - self._cleanup_one_volume(ctxt, volume) - - err = 'incomplete backup reset on manager restart' - self._update_backup_error(backup, err) - elif backup['status'] == fields.BackupStatus.RESTORING: - LOG.info('Resetting backup %s to ' - 'available (was restoring).', - backup['id']) - volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id) - self._cleanup_one_volume(ctxt, volume) - - backup.status = fields.BackupStatus.AVAILABLE - backup.save() - elif backup['status'] == fields.BackupStatus.DELETING: - LOG.info('Resuming delete on backup: %s.', backup['id']) - if CONF.backup_service_inithost_offload: - # Offload all the pending backup delete operations to the - # threadpool to prevent the main backup service thread - # from being blocked. - self._add_to_threadpool(self.delete_backup, ctxt, backup) - else: - # Delete backups sequentially - self.delete_backup(ctxt, backup) - - def _detach_all_attachments(self, ctxt, volume): - attachments = volume['volume_attachment'] or [] - for attachment in attachments: - if (attachment['attached_host'] == self.host and - attachment['instance_uuid'] is None): - try: - rpcapi = self.volume_rpcapi - rpcapi.detach_volume(ctxt, volume, attachment['id']) - except Exception: - LOG.exception("Detach attachment %(attach_id)s failed.", - {'attach_id': attachment['id']}, - resource=volume) - - def _delete_temp_volume(self, ctxt, backup): - try: - temp_volume = objects.Volume.get_by_id( - ctxt, backup.temp_volume_id) - self.volume_rpcapi.delete_volume(ctxt, temp_volume) - except exception.VolumeNotFound: - LOG.debug("Could not find temp volume %(vol)s to clean up " - "for backup %(backup)s.", - {'vol': backup.temp_volume_id, - 'backup': backup.id}) - backup.temp_volume_id = None - backup.save() - - def _delete_temp_snapshot(self, ctxt, backup): - try: - temp_snapshot = objects.Snapshot.get_by_id( - ctxt, backup.temp_snapshot_id) - self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot) - except exception.SnapshotNotFound: - LOG.debug("Could not find temp snapshot %(snap)s to clean " - "up for backup %(backup)s.", - {'snap': backup.temp_snapshot_id, - 'backup': backup.id}) - backup.temp_snapshot_id = None - backup.save() - - def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup): - # NOTE(xyang): If the service crashes or gets restarted during the - # backup operation, there could be temporary volumes or snapshots - # that are not deleted. Make sure any temporary volumes or snapshots - # create by the backup job are deleted when service is started. - if (backup.temp_volume_id - and backup.status == fields.BackupStatus.ERROR): - self._delete_temp_volume(ctxt, backup) - - if (backup.temp_snapshot_id - and backup.status == fields.BackupStatus.ERROR): - self._delete_temp_snapshot(ctxt, backup) - - def _cleanup_temp_volumes_snapshots_when_backup_created( - self, ctxt, backup): - # Delete temp volumes or snapshots when backup creation is completed. - if backup.temp_volume_id: - self._delete_temp_volume(ctxt, backup) - - if backup.temp_snapshot_id: - self._delete_temp_snapshot(ctxt, backup) - - def create_backup(self, context, backup): - """Create volume backups using configured backup service.""" - volume_id = backup.volume_id - snapshot_id = backup.snapshot_id - volume = objects.Volume.get_by_id(context, volume_id) - snapshot = objects.Snapshot.get_by_id( - context, snapshot_id) if snapshot_id else None - previous_status = volume.get('previous_status', None) - if snapshot_id: - log_message = ('Create backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s snapshot: %(snapshot_id)s.' - % {'backup_id': backup.id, - 'volume_id': volume_id, - 'snapshot_id': snapshot_id}) - else: - log_message = ('Create backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s.' - % {'backup_id': backup.id, - 'volume_id': volume_id}) - LOG.info(log_message) - - self._notify_about_backup_usage(context, backup, "create.start") - - backup.host = self.host - backup.service = self.driver_name - backup.availability_zone = self.az - backup.save() - - expected_status = "backing-up" - if snapshot_id: - actual_status = snapshot['status'] - if actual_status != expected_status: - err = _('Create backup aborted, expected snapshot status ' - '%(expected_status)s but got %(actual_status)s.') % { - 'expected_status': expected_status, - 'actual_status': actual_status, - } - self._update_backup_error(backup, err) - raise exception.InvalidSnapshot(reason=err) - else: - actual_status = volume['status'] - if actual_status != expected_status: - err = _('Create backup aborted, expected volume status ' - '%(expected_status)s but got %(actual_status)s.') % { - 'expected_status': expected_status, - 'actual_status': actual_status, - } - self._update_backup_error(backup, err) - raise exception.InvalidVolume(reason=err) - - expected_status = fields.BackupStatus.CREATING - actual_status = backup.status - if actual_status != expected_status: - err = _('Create backup aborted, expected backup status ' - '%(expected_status)s but got %(actual_status)s.') % { - 'expected_status': expected_status, - 'actual_status': actual_status, - } - self._update_backup_error(backup, err) - backup.save() - raise exception.InvalidBackup(reason=err) - - try: - self._run_backup(context, backup, volume) - except Exception as err: - with excutils.save_and_reraise_exception(): - if snapshot_id: - snapshot.status = fields.SnapshotStatus.AVAILABLE - snapshot.save() - else: - self.db.volume_update( - context, volume_id, - {'status': previous_status, - 'previous_status': 'error_backing-up'}) - self._update_backup_error(backup, six.text_type(err)) - - # Restore the original status. - if snapshot_id: - self.db.snapshot_update(context, snapshot_id, - {'status': fields.BackupStatus.AVAILABLE}) - else: - self.db.volume_update(context, volume_id, - {'status': previous_status, - 'previous_status': 'backing-up'}) - backup.status = fields.BackupStatus.AVAILABLE - backup.size = volume['size'] - backup.save() - - # Handle the num_dependent_backups of parent backup when child backup - # has created successfully. - if backup.parent_id: - parent_backup = objects.Backup.get_by_id(context, - backup.parent_id) - parent_backup.num_dependent_backups += 1 - parent_backup.save() - LOG.info('Create backup finished. backup: %s.', backup.id) - self._notify_about_backup_usage(context, backup, "create.end") - - def _run_backup(self, context, backup, volume): - backup_service = self.service.get_backup_driver(context) - - properties = utils.brick_get_connector_properties() - try: - backup_device = self.volume_rpcapi.get_backup_device(context, - backup, - volume) - attach_info = self._attach_device(context, - backup_device.device_obj, - properties, - backup_device.is_snapshot) - try: - device_path = attach_info['device']['path'] - if (isinstance(device_path, six.string_types) and - not os.path.isdir(device_path)): - if backup_device.secure_enabled: - with open(device_path) as device_file: - backup_service.backup(backup, device_file) - else: - with utils.temporary_chown(device_path): - with open(device_path) as device_file: - backup_service.backup(backup, device_file) - # device_path is already file-like so no need to open it - else: - backup_service.backup(backup, device_path) - - finally: - self._detach_device(context, attach_info, - backup_device.device_obj, properties, - backup_device.is_snapshot) - finally: - backup = objects.Backup.get_by_id(context, backup.id) - self._cleanup_temp_volumes_snapshots_when_backup_created( - context, backup) - - def restore_backup(self, context, backup, volume_id): - """Restore volume backups from configured backup service.""" - LOG.info('Restore backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s.', - {'backup_id': backup.id, 'volume_id': volume_id}) - - volume = objects.Volume.get_by_id(context, volume_id) - self._notify_about_backup_usage(context, backup, "restore.start") - - backup.host = self.host - backup.save() - - expected_status = 'restoring-backup' - actual_status = volume['status'] - if actual_status != expected_status: - err = (_('Restore backup aborted, expected volume status ' - '%(expected_status)s but got %(actual_status)s.') % - {'expected_status': expected_status, - 'actual_status': actual_status}) - backup.status = fields.BackupStatus.AVAILABLE - backup.save() - raise exception.InvalidVolume(reason=err) - - expected_status = fields.BackupStatus.RESTORING - actual_status = backup['status'] - if actual_status != expected_status: - err = (_('Restore backup aborted: expected backup status ' - '%(expected_status)s but got %(actual_status)s.') % - {'expected_status': expected_status, - 'actual_status': actual_status}) - self._update_backup_error(backup, err) - self.db.volume_update(context, volume_id, {'status': 'error'}) - raise exception.InvalidBackup(reason=err) - - if volume['size'] > backup['size']: - LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is ' - 'larger than backup: %(backup_id)s, ' - 'size: %(backup_size)d, continuing with restore.', - {'vol_id': volume['id'], - 'vol_size': volume['size'], - 'backup_id': backup['id'], - 'backup_size': backup['size']}) - - backup_service = self._map_service_to_driver(backup['service']) - configured_service = self.driver_name - if backup_service != configured_service: - err = _('Restore backup aborted, the backup service currently' - ' configured [%(configured_service)s] is not the' - ' backup service that was used to create this' - ' backup [%(backup_service)s].') % { - 'configured_service': configured_service, - 'backup_service': backup_service, - } - backup.status = fields.BackupStatus.AVAILABLE - backup.save() - self.db.volume_update(context, volume_id, {'status': 'error'}) - raise exception.InvalidBackup(reason=err) - - try: - self._run_restore(context, backup, volume) - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, volume_id, - {'status': 'error_restoring'}) - backup.status = fields.BackupStatus.AVAILABLE - backup.save() - - self.db.volume_update(context, volume_id, {'status': 'available'}) - backup.status = fields.BackupStatus.AVAILABLE - backup.save() - LOG.info('Restore backup finished, backup %(backup_id)s restored' - ' to volume %(volume_id)s.', - {'backup_id': backup.id, 'volume_id': volume_id}) - self._notify_about_backup_usage(context, backup, "restore.end") - - def _run_restore(self, context, backup, volume): - backup_service = self.service.get_backup_driver(context) - - properties = utils.brick_get_connector_properties() - secure_enabled = ( - self.volume_rpcapi.secure_file_operations_enabled(context, - volume)) - attach_info = self._attach_device(context, volume, properties) - try: - device_path = attach_info['device']['path'] - if (isinstance(device_path, six.string_types) and - not os.path.isdir(device_path)): - if secure_enabled: - with open(device_path, 'wb') as device_file: - backup_service.restore(backup, volume.id, device_file) - else: - with utils.temporary_chown(device_path): - with open(device_path, 'wb') as device_file: - backup_service.restore(backup, volume.id, - device_file) - # device_path is already file-like so no need to open it - else: - backup_service.restore(backup, volume.id, device_path) - finally: - self._detach_device(context, attach_info, volume, properties) - - def delete_backup(self, context, backup): - """Delete volume backup from configured backup service.""" - LOG.info('Delete backup started, backup: %s.', backup.id) - - self._notify_about_backup_usage(context, backup, "delete.start") - backup.host = self.host - backup.save() - - expected_status = fields.BackupStatus.DELETING - actual_status = backup.status - if actual_status != expected_status: - err = _('Delete_backup aborted, expected backup status ' - '%(expected_status)s but got %(actual_status)s.') \ - % {'expected_status': expected_status, - 'actual_status': actual_status} - self._update_backup_error(backup, err) - raise exception.InvalidBackup(reason=err) - - backup_service = self._map_service_to_driver(backup['service']) - if backup_service is not None: - configured_service = self.driver_name - if backup_service != configured_service: - err = _('Delete backup aborted, the backup service currently' - ' configured [%(configured_service)s] is not the' - ' backup service that was used to create this' - ' backup [%(backup_service)s].')\ - % {'configured_service': configured_service, - 'backup_service': backup_service} - self._update_backup_error(backup, err) - raise exception.InvalidBackup(reason=err) - - try: - backup_service = self.service.get_backup_driver(context) - backup_service.delete_backup(backup) - except Exception as err: - with excutils.save_and_reraise_exception(): - self._update_backup_error(backup, six.text_type(err)) - - # Get reservations - try: - reserve_opts = { - 'backups': -1, - 'backup_gigabytes': -backup.size, - } - reservations = QUOTAS.reserve(context, - project_id=backup.project_id, - **reserve_opts) - except Exception: - reservations = None - LOG.exception("Failed to update usages deleting backup") - - backup.destroy() - # If this backup is incremental backup, handle the - # num_dependent_backups of parent backup - if backup.parent_id: - parent_backup = objects.Backup.get_by_id(context, - backup.parent_id) - if parent_backup.has_dependent_backups: - parent_backup.num_dependent_backups -= 1 - parent_backup.save() - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, - project_id=backup.project_id) - - LOG.info('Delete backup finished, backup %s deleted.', backup.id) - self._notify_about_backup_usage(context, backup, "delete.end") - - def _notify_about_backup_usage(self, - context, - backup, - event_suffix, - extra_usage_info=None): - volume_utils.notify_about_backup_usage( - context, backup, event_suffix, - extra_usage_info=extra_usage_info, - host=self.host) - - def export_record(self, context, backup): - """Export all volume backup metadata details to allow clean import. - - Export backup metadata so it could be re-imported into the database - without any prerequisite in the backup database. - - :param context: running context - :param backup: backup object to export - :returns: backup_record - a description of how to import the backup - :returns: contains 'backup_url' - how to import the backup, and - :returns: 'backup_service' describing the needed driver. - :raises InvalidBackup: - """ - LOG.info('Export record started, backup: %s.', backup.id) - - expected_status = fields.BackupStatus.AVAILABLE - actual_status = backup.status - if actual_status != expected_status: - err = (_('Export backup aborted, expected backup status ' - '%(expected_status)s but got %(actual_status)s.') % - {'expected_status': expected_status, - 'actual_status': actual_status}) - raise exception.InvalidBackup(reason=err) - - backup_record = {'backup_service': backup.service} - backup_service = self._map_service_to_driver(backup.service) - configured_service = self.driver_name - if backup_service != configured_service: - err = (_('Export record aborted, the backup service currently ' - 'configured [%(configured_service)s] is not the ' - 'backup service that was used to create this ' - 'backup [%(backup_service)s].') % - {'configured_service': configured_service, - 'backup_service': backup_service}) - raise exception.InvalidBackup(reason=err) - - # Call driver to create backup description string - try: - backup_service = self.service.get_backup_driver(context) - driver_info = backup_service.export_record(backup) - backup_url = backup.encode_record(driver_info=driver_info) - backup_record['backup_url'] = backup_url - except Exception as err: - msg = six.text_type(err) - raise exception.InvalidBackup(reason=msg) - - LOG.info('Export record finished, backup %s exported.', backup.id) - return backup_record - - def import_record(self, - context, - backup, - backup_service, - backup_url, - backup_hosts): - """Import all volume backup metadata details to the backup db. - - :param context: running context - :param backup: The new backup object for the import - :param backup_service: The needed backup driver for import - :param backup_url: An identifier string to locate the backup - :param backup_hosts: Potential hosts to execute the import - :raises InvalidBackup: - :raises ServiceNotFound: - """ - LOG.info('Import record started, backup_url: %s.', backup_url) - - # Can we import this backup? - if backup_service != self.driver_name: - # No, are there additional potential backup hosts in the list? - if len(backup_hosts) > 0: - # try the next host on the list, maybe he can import - first_host = backup_hosts.pop() - self.backup_rpcapi.import_record(context, - first_host, - backup, - backup_service, - backup_url, - backup_hosts) - else: - # empty list - we are the last host on the list, fail - err = _('Import record failed, cannot find backup ' - 'service to perform the import. Request service ' - '%(service)s.') % {'service': backup_service} - self._update_backup_error(backup, err) - raise exception.ServiceNotFound(service_id=backup_service) - else: - # Yes... - try: - # Deserialize backup record information - backup_options = backup.decode_record(backup_url) - - # Extract driver specific info and pass it to the driver - driver_options = backup_options.pop('driver_info', {}) - backup_service = self.service.get_backup_driver(context) - backup_service.import_record(backup, driver_options) - except Exception as err: - msg = six.text_type(err) - self._update_backup_error(backup, msg) - raise exception.InvalidBackup(reason=msg) - - required_import_options = { - 'display_name', - 'display_description', - 'container', - 'size', - 'service_metadata', - 'object_count', - 'id' - } - - # Check for missing fields in imported data - missing_opts = required_import_options - set(backup_options) - if missing_opts: - msg = (_('Driver successfully decoded imported backup data, ' - 'but there are missing fields (%s).') % - ', '.join(missing_opts)) - self._update_backup_error(backup, msg) - raise exception.InvalidBackup(reason=msg) - - # Confirm the ID from the record in the DB is the right one - backup_id = backup_options['id'] - if backup_id != backup.id: - msg = (_('Trying to import backup metadata from id %(meta_id)s' - ' into backup %(id)s.') % - {'meta_id': backup_id, 'id': backup.id}) - self._update_backup_error(backup, msg) - raise exception.InvalidBackup(reason=msg) - - # Overwrite some fields - backup_options['service'] = self.driver_name - backup_options['availability_zone'] = self.az - backup_options['host'] = self.host - - # Remove some values which are not actual fields and some that - # were set by the API node - for key in ('name', 'user_id', 'project_id', 'deleted_at', - 'deleted', 'fail_reason', 'status'): - backup_options.pop(key, None) - - # Update the database - backup.update(backup_options) - backup.save() - - # Verify backup - try: - if isinstance(backup_service, driver.BackupDriverWithVerify): - backup_service.verify(backup.id) - else: - LOG.warning('Backup service %(service)s does not ' - 'support verify. Backup id %(id)s is ' - 'not verified. Skipping verify.', - {'service': self.driver_name, - 'id': backup.id}) - except exception.InvalidBackup as err: - with excutils.save_and_reraise_exception(): - self._update_backup_error(backup, six.text_type(err)) - - # Update the backup's status - backup.update({"status": fields.BackupStatus.AVAILABLE}) - backup.save() - - LOG.info('Import record id %s metadata from driver ' - 'finished.', backup.id) - - def reset_status(self, context, backup, status): - """Reset volume backup status. - - :param context: running context - :param backup: The backup object for reset status operation - :param status: The status to be set - :raises InvalidBackup: - :raises BackupVerifyUnsupportedDriver: - :raises AttributeError: - """ - LOG.info('Reset backup status started, backup_id: ' - '%(backup_id)s, status: %(status)s.', - {'backup_id': backup.id, - 'status': status}) - - backup_service_name = self._map_service_to_driver(backup.service) - LOG.info('Backup service: %s.', backup_service_name) - if backup_service_name is not None: - configured_service = self.driver_name - if backup_service_name != configured_service: - err = _('Reset backup status aborted, the backup service' - ' currently configured [%(configured_service)s] ' - 'is not the backup service that was used to create' - ' this backup [%(backup_service)s].') % \ - {'configured_service': configured_service, - 'backup_service': backup_service_name} - raise exception.InvalidBackup(reason=err) - # Verify backup - try: - # check whether the backup is ok or not - if (status == fields.BackupStatus.AVAILABLE - and backup['status'] != fields.BackupStatus.RESTORING): - # check whether we could verify the backup is ok or not - backup_service = self.service.get_backup_driver(context) - if isinstance(backup_service, - driver.BackupDriverWithVerify): - backup_service.verify(backup.id) - backup.status = status - backup.save() - # driver does not support verify function - else: - msg = (_('Backup service %(configured_service)s ' - 'does not support verify. Backup id' - ' %(id)s is not verified. ' - 'Skipping verify.') % - {'configured_service': self.driver_name, - 'id': backup.id}) - raise exception.BackupVerifyUnsupportedDriver( - reason=msg) - # reset status to error or from restoring to available - else: - if (status == fields.BackupStatus.ERROR or - (status == fields.BackupStatus.AVAILABLE and - backup.status == fields.BackupStatus.RESTORING)): - backup.status = status - backup.save() - except exception.InvalidBackup: - with excutils.save_and_reraise_exception(): - LOG.error("Backup id %s is not invalid. Skipping reset.", - backup.id) - except exception.BackupVerifyUnsupportedDriver: - with excutils.save_and_reraise_exception(): - LOG.error('Backup service %(configured_service)s ' - 'does not support verify. Backup id ' - '%(id)s is not verified. ' - 'Skipping verify.', - {'configured_service': self.driver_name, - 'id': backup.id}) - except AttributeError: - msg = (_('Backup service %(service)s does not support ' - 'verify. Backup id %(id)s is not verified. ' - 'Skipping reset.') % - {'service': self.driver_name, - 'id': backup.id}) - LOG.error(msg) - raise exception.BackupVerifyUnsupportedDriver( - reason=msg) - - # Needs to clean temporary volumes and snapshots. - try: - self._cleanup_temp_volumes_snapshots_for_one_backup( - context, backup) - except Exception: - LOG.exception("Problem cleaning temp volumes and " - "snapshots for backup %(bkup)s.", - {'bkup': backup.id}) - - # send notification to ceilometer - notifier_info = {'id': backup.id, 'update': {'status': status}} - notifier = rpc.get_notifier('backupStatusUpdate') - notifier.info(context, "backups.reset_status.end", - notifier_info) - - def check_support_to_force_delete(self, context): - """Check if the backup driver supports force delete operation. - - :param context: running context - """ - backup_service = self.service.get_backup_driver(context) - return backup_service.support_force_delete - - def _attach_device(self, ctxt, backup_device, - properties, is_snapshot=False): - """Attach backup device.""" - if not is_snapshot: - return self._attach_volume(ctxt, backup_device, properties) - else: - return self._attach_snapshot(ctxt, backup_device, properties) - - def _attach_volume(self, context, volume, properties): - """Attach a volume.""" - - try: - conn = self.volume_rpcapi.initialize_connection(context, - volume, - properties) - return self._connect_device(conn) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.volume_rpcapi.terminate_connection(context, volume, - properties, - force=True) - except Exception: - LOG.warning("Failed to terminate the connection " - "of volume %(volume_id)s, but it is " - "acceptable.", - {'volume_id', volume.id}) - - def _attach_snapshot(self, ctxt, snapshot, properties): - """Attach a snapshot.""" - - try: - conn = self.volume_rpcapi.initialize_connection_snapshot( - ctxt, snapshot, properties) - return self._connect_device(conn) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.volume_rpcapi.terminate_connection_snapshot( - ctxt, snapshot, properties, force=True) - except Exception: - LOG.warning("Failed to terminate the connection " - "of snapshot %(snapshot_id)s, but it is " - "acceptable.", - {'snapshot_id', snapshot.id}) - - def _connect_device(self, conn): - """Establish connection to device.""" - use_multipath = CONF.use_multipath_for_image_xfer - device_scan_attempts = CONF.num_volume_device_scan_tries - protocol = conn['driver_volume_type'] - connector = utils.brick_get_connector( - protocol, - use_multipath=use_multipath, - device_scan_attempts=device_scan_attempts, - conn=conn) - vol_handle = connector.connect_volume(conn['data']) - - return {'conn': conn, 'device': vol_handle, 'connector': connector} - - def _detach_device(self, ctxt, attach_info, device, - properties, is_snapshot=False, force=False): - """Disconnect the volume or snapshot from the host. """ - connector = attach_info['connector'] - connector.disconnect_volume(attach_info['conn']['data'], - attach_info['device']) - rpcapi = self.volume_rpcapi - if not is_snapshot: - rpcapi.terminate_connection(ctxt, device, properties, - force=force) - rpcapi.remove_export(ctxt, device) - else: - rpcapi.terminate_connection_snapshot(ctxt, device, - properties, force=force) - rpcapi.remove_export_snapshot(ctxt, device) diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py deleted file mode 100644 index 0988fc7cd..000000000 --- a/cinder/backup/rpcapi.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of the volume backup RPC API. -""" - - -from oslo_log import log as logging - -from cinder.common import constants -from cinder import rpc - - -LOG = logging.getLogger(__name__) - - -class BackupAPI(rpc.RPCAPI): - """Client side of the volume rpc API. - - API version history: - - .. code-block:: none - - 1.0 - Initial version. - 1.1 - Changed methods to accept backup objects instead of IDs. - 1.2 - A version that got in by mistake (without breaking anything). - 1.3 - Dummy version bump to mark start of having cinder-backup service - decoupled from cinder-volume. - - ... Mitaka supports messaging 1.3. Any changes to existing methods in - 1.x after this point should be done so that they can handle version cap - set to 1.3. - - 2.0 - Remove 1.x compatibility - 2.1 - Adds set_log_levels and get_log_levels - """ - - RPC_API_VERSION = '2.1' - RPC_DEFAULT_VERSION = '2.0' - TOPIC = constants.BACKUP_TOPIC - BINARY = 'cinder-backup' - - def create_backup(self, ctxt, backup): - LOG.debug("create_backup in rpcapi backup_id %s", backup.id) - cctxt = self._get_cctxt(server=backup.host) - cctxt.cast(ctxt, 'create_backup', backup=backup) - - def restore_backup(self, ctxt, volume_host, backup, volume_id): - LOG.debug("restore_backup in rpcapi backup_id %s", backup.id) - cctxt = self._get_cctxt(server=volume_host) - cctxt.cast(ctxt, 'restore_backup', backup=backup, - volume_id=volume_id) - - def delete_backup(self, ctxt, backup): - LOG.debug("delete_backup rpcapi backup_id %s", backup.id) - cctxt = self._get_cctxt(server=backup.host) - cctxt.cast(ctxt, 'delete_backup', backup=backup) - - def export_record(self, ctxt, backup): - LOG.debug("export_record in rpcapi backup_id %(id)s " - "on host %(host)s.", - {'id': backup.id, - 'host': backup.host}) - cctxt = self._get_cctxt(server=backup.host) - return cctxt.call(ctxt, 'export_record', backup=backup) - - def import_record(self, ctxt, host, backup, backup_service, backup_url, - backup_hosts): - LOG.debug("import_record rpcapi backup id %(id)s " - "on host %(host)s for backup_url %(url)s.", - {'id': backup.id, 'host': host, 'url': backup_url}) - cctxt = self._get_cctxt(server=host) - cctxt.cast(ctxt, 'import_record', - backup=backup, - backup_service=backup_service, - backup_url=backup_url, - backup_hosts=backup_hosts) - - def reset_status(self, ctxt, backup, status): - LOG.debug("reset_status in rpcapi backup_id %(id)s " - "on host %(host)s.", - {'id': backup.id, 'host': backup.host}) - cctxt = self._get_cctxt(server=backup.host) - return cctxt.cast(ctxt, 'reset_status', backup=backup, status=status) - - def check_support_to_force_delete(self, ctxt, host): - LOG.debug("Check if backup driver supports force delete " - "on host %(host)s.", {'host': host}) - cctxt = self._get_cctxt(server=host) - return cctxt.call(ctxt, 'check_support_to_force_delete') - - @rpc.assert_min_rpc_version('2.1') - def set_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(server=service.host, version='2.1') - cctxt.cast(context, 'set_log_levels', log_request=log_request) - - @rpc.assert_min_rpc_version('2.1') - def get_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(server=service.host, version='2.1') - return cctxt.call(context, 'get_log_levels', log_request=log_request) diff --git a/cinder/brick/README.txt b/cinder/brick/README.txt deleted file mode 100644 index 35f90ca36..000000000 --- a/cinder/brick/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -Brick has been migrated to a new standalone -pypi library called os-brick. - -We are leaving the local_dev directory here for the time -being until we can migrate it to a new home. diff --git a/cinder/brick/__init__.py b/cinder/brick/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/brick/local_dev/__init__.py b/cinder/brick/local_dev/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py deleted file mode 100644 index 7dcb20838..000000000 --- a/cinder/brick/local_dev/lvm.py +++ /dev/null @@ -1,872 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -LVM class for performing LVM operations. -""" - -import math -import os -import re - -from os_brick import executor -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import excutils -from six import moves - -from cinder import exception -from cinder import utils - - -LOG = logging.getLogger(__name__) - - -class LVM(executor.Executor): - """LVM object to enable various LVM related operations.""" - LVM_CMD_PREFIX = ['env', 'LC_ALL=C'] - _supports_pvs_ignoreskippedcluster = None - - def __init__(self, vg_name, root_helper, create_vg=False, - physical_volumes=None, lvm_type='default', - executor=putils.execute, lvm_conf=None, - suppress_fd_warn=False): - - """Initialize the LVM object. - - The LVM object is based on an LVM VolumeGroup, one instantiation - for each VolumeGroup you have/use. - - :param vg_name: Name of existing VG or VG to create - :param root_helper: Execution root_helper method to use - :param create_vg: Indicates the VG doesn't exist - and we want to create it - :param physical_volumes: List of PVs to build VG on - :param lvm_type: VG and Volume type (default, or thin) - :param executor: Execute method to use, None uses common/processutils - :param suppress_fd_warn: Add suppress FD Warn to LVM env - - """ - super(LVM, self).__init__(execute=executor, root_helper=root_helper) - self.vg_name = vg_name - self.pv_list = [] - self.vg_size = 0.0 - self.vg_free_space = 0.0 - self.vg_lv_count = 0 - self.vg_uuid = None - self.vg_thin_pool = None - self.vg_thin_pool_size = 0.0 - self.vg_thin_pool_free_space = 0.0 - self._supports_snapshot_lv_activation = None - self._supports_lvchange_ignoreskipactivation = None - self.vg_provisioned_capacity = 0.0 - - if lvm_type not in ['default', 'thin']: - raise exception.Invalid('lvm_type must be "default" or "thin"') - - # Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX - # before the first LVM command is executed, and use the directory - # where the specified lvm_conf file is located as the value. - - # NOTE(jdg): We use the temp var here because LVM_CMD_PREFIX is a - # class global and if you use append here, you'll literally just keep - # appending values to the global. - _lvm_cmd_prefix = ['env', 'LC_ALL=C'] - - if lvm_conf and os.path.isfile(lvm_conf): - lvm_sys_dir = os.path.dirname(lvm_conf) - _lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir) - - if suppress_fd_warn: - _lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1') - LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix - - if create_vg and physical_volumes is not None: - self.pv_list = physical_volumes - - try: - self._create_vg(physical_volumes) - except putils.ProcessExecutionError as err: - LOG.exception('Error creating Volume Group') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) - - if self._vg_exists() is False: - LOG.error('Unable to locate Volume Group %s', vg_name) - raise exception.VolumeGroupNotFound(vg_name=vg_name) - - # NOTE: we assume that the VG has been activated outside of Cinder - - if lvm_type == 'thin': - pool_name = "%s-pool" % self.vg_name - if self.get_volume(pool_name) is None: - try: - self.create_thin_pool(pool_name) - except putils.ProcessExecutionError: - # Maybe we just lost the race against another copy of - # this driver being in init in parallel - e.g. - # cinder-volume and cinder-backup starting in parallel - if self.get_volume(pool_name) is None: - raise - - self.vg_thin_pool = pool_name - self.activate_lv(self.vg_thin_pool) - self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) - - def _vg_exists(self): - """Simple check to see if VG exists. - - :returns: True if vg specified in object exists, else False - - """ - exists = False - cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', - '-o', 'name', self.vg_name] - (out, _err) = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - - if out is not None: - volume_groups = out.split() - if self.vg_name in volume_groups: - exists = True - - return exists - - def _create_vg(self, pv_list): - cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] - self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) - - def _get_thin_pool_free_space(self, vg_name, thin_pool_name): - """Returns available thin pool free space. - - :param vg_name: the vg where the pool is placed - :param thin_pool_name: the thin pool to gather info for - :returns: Free space in GB (float), calculated using data_percent - - """ - cmd = LVM.LVM_CMD_PREFIX +\ - ['lvs', '--noheadings', '--unit=g', - '-o', 'size,data_percent', '--separator', - ':', '--nosuffix'] - # NOTE(gfidente): data_percent only applies to some types of LV so we - # make sure to append the actual thin pool name - cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) - - free_space = 0.0 - - try: - (out, err) = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - if out is not None: - out = out.strip() - data = out.split(':') - pool_size = float(data[0]) - data_percent = float(data[1]) - consumed_space = pool_size / 100 * data_percent - free_space = pool_size - consumed_space - free_space = round(free_space, 2) - except putils.ProcessExecutionError as err: - LOG.exception('Error querying thin pool about data_percent') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - - return free_space - - @staticmethod - def get_lvm_version(root_helper): - """Static method to get LVM version from system. - - :param root_helper: root_helper to use for execute - :returns: version 3-tuple - - """ - - cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version'] - (out, _err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) - lines = out.split('\n') - - for line in lines: - if 'LVM version' in line: - version_list = line.split() - # NOTE(gfidente): version is formatted as follows: - # major.minor.patchlevel(library API version)[-customisation] - version = version_list[2] - version_filter = r"(\d+)\.(\d+)\.(\d+).*" - r = re.search(version_filter, version) - version_tuple = tuple(map(int, r.group(1, 2, 3))) - return version_tuple - - @staticmethod - def supports_thin_provisioning(root_helper): - """Static method to check for thin LVM support on a system. - - :param root_helper: root_helper to use for execute - :returns: True if supported, False otherwise - - """ - - return LVM.get_lvm_version(root_helper) >= (2, 2, 95) - - @property - def supports_snapshot_lv_activation(self): - """Property indicating whether snap activation changes are supported. - - Check for LVM version >= 2.02.91. - (LVM2 git: e8a40f6 Allow to activate snapshot) - - :returns: True/False indicating support - """ - - if self._supports_snapshot_lv_activation is not None: - return self._supports_snapshot_lv_activation - - self._supports_snapshot_lv_activation = ( - self.get_lvm_version(self._root_helper) >= (2, 2, 91)) - - return self._supports_snapshot_lv_activation - - @property - def supports_lvchange_ignoreskipactivation(self): - """Property indicating whether lvchange can ignore skip activation. - - Check for LVM version >= 2.02.99. - (LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange) - """ - - if self._supports_lvchange_ignoreskipactivation is not None: - return self._supports_lvchange_ignoreskipactivation - - self._supports_lvchange_ignoreskipactivation = ( - self.get_lvm_version(self._root_helper) >= (2, 2, 99)) - - return self._supports_lvchange_ignoreskipactivation - - @staticmethod - def supports_pvs_ignoreskippedcluster(root_helper): - """Property indicating whether pvs supports --ignoreskippedcluster - - Check for LVM version >= 2.02.103. - (LVM2 git: baf95bbff cmdline: Add --ignoreskippedcluster. - """ - - if LVM._supports_pvs_ignoreskippedcluster is not None: - return LVM._supports_pvs_ignoreskippedcluster - - LVM._supports_pvs_ignoreskippedcluster = ( - LVM.get_lvm_version(root_helper) >= (2, 2, 103)) - - return LVM._supports_pvs_ignoreskippedcluster - - @staticmethod - def get_lv_info(root_helper, vg_name=None, lv_name=None): - """Retrieve info about LVs (all, in a VG, or a single LV). - - :param root_helper: root_helper to use for execute - :param vg_name: optional, gathers info for only the specified VG - :param lv_name: optional, gathers info for only the specified LV - :returns: List of Dictionaries with LV info - - """ - - cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', - '-o', 'vg_name,name,size', '--nosuffix'] - if lv_name is not None and vg_name is not None: - cmd.append("%s/%s" % (vg_name, lv_name)) - elif vg_name is not None: - cmd.append(vg_name) - - try: - (out, _err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - with excutils.save_and_reraise_exception(reraise=True) as ctx: - if "not found" in err.stderr or "Failed to find" in err.stderr: - ctx.reraise = False - LOG.info("Logical Volume not found when querying " - "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s", - {'vg': vg_name, 'lv': lv_name}) - out = None - - lv_list = [] - if out is not None: - volumes = out.split() - iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101 - for vg, name, size in iterator: - lv_list.append({"vg": vg, "name": name, "size": size}) - - return lv_list - - def get_volumes(self, lv_name=None): - """Get all LV's associated with this instantiation (VG). - - :returns: List of Dictionaries with LV info - - """ - return self.get_lv_info(self._root_helper, - self.vg_name, - lv_name) - - def get_volume(self, name): - """Get reference object of volume specified by name. - - :returns: dict representation of Logical Volume if exists - - """ - ref_list = self.get_volumes(name) - for r in ref_list: - if r['name'] == name: - return r - return None - - @staticmethod - def get_all_physical_volumes(root_helper, vg_name=None): - """Static method to get all PVs on a system. - - :param root_helper: root_helper to use for execute - :param vg_name: optional, gathers info for only the specified VG - :returns: List of Dictionaries with PV info - - """ - field_sep = '|' - cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings', - '--unit=g', - '-o', 'vg_name,name,size,free', - '--separator', field_sep, - '--nosuffix'] - if LVM.supports_pvs_ignoreskippedcluster(root_helper): - cmd.append('--ignoreskippedcluster') - - (out, _err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) - - pvs = out.split() - if vg_name is not None: - pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]] - - pv_list = [] - for pv in pvs: - fields = pv.split(field_sep) - pv_list.append({'vg': fields[0], - 'name': fields[1], - 'size': float(fields[2]), - 'available': float(fields[3])}) - return pv_list - - @staticmethod - def get_all_volume_groups(root_helper, vg_name=None): - """Static method to get all VGs on a system. - - :param root_helper: root_helper to use for execute - :param vg_name: optional, gathers info for only the specified VG - :returns: List of Dictionaries with VG info - - """ - cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', - '--unit=g', '-o', - 'name,size,free,lv_count,uuid', - '--separator', ':', - '--nosuffix'] - if vg_name is not None: - cmd.append(vg_name) - - (out, _err) = putils.execute(*cmd, - root_helper=root_helper, - run_as_root=True) - vg_list = [] - if out is not None: - vgs = out.split() - for vg in vgs: - fields = vg.split(':') - vg_list.append({'name': fields[0], - 'size': float(fields[1]), - 'available': float(fields[2]), - 'lv_count': int(fields[3]), - 'uuid': fields[4]}) - - return vg_list - - def update_volume_group_info(self): - """Update VG info for this instantiation. - - Used to update member fields of object and - provide a dict of info for caller. - - :returns: Dictionaries of VG info - - """ - vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) - - if len(vg_list) != 1: - LOG.error('Unable to find VG: %s', self.vg_name) - raise exception.VolumeGroupNotFound(vg_name=self.vg_name) - - self.vg_size = float(vg_list[0]['size']) - self.vg_free_space = float(vg_list[0]['available']) - self.vg_lv_count = int(vg_list[0]['lv_count']) - self.vg_uuid = vg_list[0]['uuid'] - - total_vols_size = 0.0 - if self.vg_thin_pool is not None: - # NOTE(xyang): If providing only self.vg_name, - # get_lv_info will output info on the thin pool and all - # individual volumes. - # get_lv_info(self._root_helper, 'stack-vg') - # sudo lvs --noheadings --unit=g -o vg_name,name,size - # --nosuffix stack-vg - # stack-vg stack-pool 9.51 - # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 - # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 - # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 - # - # If providing both self.vg_name and self.vg_thin_pool, - # get_lv_info will output only info on the thin pool, but not - # individual volumes. - # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') - # sudo lvs --noheadings --unit=g -o vg_name,name,size - # --nosuffix stack-vg/stack-pool - # stack-vg stack-pool 9.51 - # - # We need info on both the thin pool and the volumes, - # therefore we should provide only self.vg_name, but not - # self.vg_thin_pool here. - for lv in self.get_lv_info(self._root_helper, - self.vg_name): - lvsize = lv['size'] - # get_lv_info runs "lvs" command with "--nosuffix". - # This removes "g" from "1.00g" and only outputs "1.00". - # Running "lvs" command without "--nosuffix" will output - # "1.00g" if "g" is the unit. - # Remove the unit if it is in lv['size']. - if not lv['size'][-1].isdigit(): - lvsize = lvsize[:-1] - if lv['name'] == self.vg_thin_pool: - self.vg_thin_pool_size = lvsize - tpfs = self._get_thin_pool_free_space(self.vg_name, - self.vg_thin_pool) - self.vg_thin_pool_free_space = tpfs - else: - total_vols_size = total_vols_size + float(lvsize) - total_vols_size = round(total_vols_size, 2) - - self.vg_provisioned_capacity = total_vols_size - - def _calculate_thin_pool_size(self): - """Calculates the correct size for a thin pool. - - Ideally we would use 100% of the containing volume group and be done. - But the 100%VG notation to lvcreate is not implemented and thus cannot - be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347 - - Further, some amount of free space must remain in the volume group for - metadata for the contained logical volumes. The exact amount depends - on how much volume sharing you expect. - - :returns: An lvcreate-ready string for the number of calculated bytes. - """ - - # make sure volume group information is current - self.update_volume_group_info() - - # leave 5% free for metadata - return "%sg" % (self.vg_free_space * 0.95) - - def create_thin_pool(self, name=None, size_str=None): - """Creates a thin provisioning pool for this VG. - - The syntax here is slightly different than the default - lvcreate -T, so we'll just write a custom cmd here - and do it. - - :param name: Name to use for pool, default is "-pool" - :param size_str: Size to allocate for pool, default is entire VG - :returns: The size string passed to the lvcreate command - - """ - - if not self.supports_thin_provisioning(self._root_helper): - LOG.error('Requested to setup thin provisioning, ' - 'however current LVM version does not ' - 'support it.') - return None - - if name is None: - name = '%s-pool' % self.vg_name - - vg_pool_name = '%s/%s' % (self.vg_name, name) - - if not size_str: - size_str = self._calculate_thin_pool_size() - - cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str, - vg_pool_name] - LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of " - "total %(free)sg", {'pool': vg_pool_name, - 'size': size_str, - 'free': self.vg_free_space}) - - self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - - self.vg_thin_pool = name - return size_str - - def create_volume(self, name, size_str, lv_type='default', mirror_count=0): - """Creates a logical volume on the object's VG. - - :param name: Name to use when creating Logical Volume - :param size_str: Size to use when creating Logical Volume - :param lv_type: Type of Volume (default or thin) - :param mirror_count: Use LVM mirroring with specified count - - """ - - if lv_type == 'thin': - pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) - cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n', - name, pool_path] - else: - cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name, - '-L', size_str] - - if mirror_count > 0: - cmd.extend(['-m', mirror_count, '--nosync', - '--mirrorlog', 'mirrored']) - terras = int(size_str[:-1]) / 1024.0 - if terras >= 1.5: - rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) - # NOTE(vish): Next power of two for region size. See: - # http://red.ht/U2BPOD - cmd.extend(['-R', str(rsize)]) - - try: - self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error creating Volume') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - LOG.error('Current state: %s', - self.get_all_volume_groups(self._root_helper)) - raise - - @utils.retry(putils.ProcessExecutionError) - def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): - """Creates a snapshot of a logical volume. - - :param name: Name to assign to new snapshot - :param source_lv_name: Name of Logical Volume to snapshot - :param lv_type: Type of LV (default or thin) - - """ - source_lvref = self.get_volume(source_lv_name) - if source_lvref is None: - LOG.error("Trying to create snapshot by non-existent LV: %s", - source_lv_name) - raise exception.VolumeDeviceNotFound(device=source_lv_name) - cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', - '%s/%s' % (self.vg_name, source_lv_name)] - if lv_type != 'thin': - size = source_lvref['size'] - cmd.extend(['-L', '%sg' % (size)]) - - try: - self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error creating snapshot') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise - - def _mangle_lv_name(self, name): - # Linux LVM reserves name that starts with snapshot, so that - # such volume name can't be created. Mangle it. - if not name.startswith('snapshot'): - return name - return '_' + name - - def _lv_is_active(self, name): - cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', - 'Attr', '%s/%s' % (self.vg_name, name)] - out, _err = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - if out: - out = out.strip() - if (out[4] == 'a'): - return True - return False - - def deactivate_lv(self, name): - lv_path = self.vg_name + '/' + self._mangle_lv_name(name) - cmd = ['lvchange', '-a', 'n'] - cmd.append(lv_path) - try: - self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error deactivating LV') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise - - # Wait until lv is deactivated to return in - # order to prevent a race condition. - self._wait_for_volume_deactivation(name) - - @utils.retry(exceptions=exception.VolumeNotDeactivated, retries=5, - backoff_rate=2) - def _wait_for_volume_deactivation(self, name): - LOG.debug("Checking to see if volume %s has been deactivated.", - name) - if self._lv_is_active(name): - LOG.debug("Volume %s is still active.", name) - raise exception.VolumeNotDeactivated(name=name) - else: - LOG.debug("Volume %s has been deactivated.", name) - - def activate_lv(self, name, is_snapshot=False, permanent=False): - """Ensure that logical volume/snapshot logical volume is activated. - - :param name: Name of LV to activate - :param is_snapshot: whether LV is a snapshot - :param permanent: whether we should drop skipactivation flag - :raises putils.ProcessExecutionError: - """ - - # This is a no-op if requested for a snapshot on a version - # of LVM that doesn't support snapshot activation. - # (Assume snapshot LV is always active.) - if is_snapshot and not self.supports_snapshot_lv_activation: - return - - lv_path = self.vg_name + '/' + self._mangle_lv_name(name) - - # Must pass --yes to activate both the snap LV and its origin LV. - # Otherwise lvchange asks if you would like to do this interactively, - # and fails. - cmd = ['lvchange', '-a', 'y', '--yes'] - - if self.supports_lvchange_ignoreskipactivation: - # If permanent=True is specified, drop the skipactivation flag in - # order to make this LV automatically activated after next reboot. - if permanent: - cmd += ['-k', 'n'] - else: - cmd.append('-K') - - cmd.append(lv_path) - - try: - self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error activating LV') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise - - @utils.retry(putils.ProcessExecutionError) - def delete(self, name): - """Delete logical volume or snapshot. - - :param name: Name of LV to delete - - """ - - def run_udevadm_settle(): - self._execute('udevadm', 'settle', - root_helper=self._root_helper, run_as_root=True, - check_exit_code=False) - - # LV removal seems to be a race with other writers or udev in - # some cases (see LP #1270192), so we enable retry deactivation - LVM_CONFIG = 'activation { retry_deactivation = 1} ' - - try: - self._execute( - 'lvremove', - '--config', LVM_CONFIG, - '-f', - '%s/%s' % (self.vg_name, name), - root_helper=self._root_helper, run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.debug('Error reported running lvremove: CMD: %(command)s, ' - 'RESPONSE: %(response)s', - {'command': err.cmd, 'response': err.stderr}) - - LOG.debug('Attempting udev settle and retry of lvremove...') - run_udevadm_settle() - - # The previous failing lvremove -f might leave behind - # suspended devices; when lvmetad is not available, any - # further lvm command will block forever. - # Therefore we need to skip suspended devices on retry. - LVM_CONFIG += 'devices { ignore_suspended_devices = 1}' - - self._execute( - 'lvremove', - '--config', LVM_CONFIG, - '-f', - '%s/%s' % (self.vg_name, name), - root_helper=self._root_helper, run_as_root=True) - LOG.debug('Successfully deleted volume: %s after ' - 'udev settle.', name) - - def revert(self, snapshot_name): - """Revert an LV to snapshot. - - :param snapshot_name: Name of snapshot to revert - """ - - cmd = ['lvconvert', '--merge', '%s/%s' % (self.vg_name, snapshot_name)] - try: - self._execute(*cmd, root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error Revert Volume') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise - - def lv_has_snapshot(self, name): - cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', - 'Attr', '%s/%s' % (self.vg_name, name)] - out, _err = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - if out: - out = out.strip() - if (out[0] == 'o') or (out[0] == 'O'): - return True - return False - - def lv_is_snapshot(self, name): - """Return True if LV is a snapshot, False otherwise.""" - cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', - 'Attr', '%s/%s' % (self.vg_name, name)] - out, _err = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - out = out.strip() - if out: - if (out[0] == 's'): - return True - return False - - def lv_is_open(self, name): - """Return True if LV is currently open, False otherwise.""" - cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', - 'Attr', '%s/%s' % (self.vg_name, name)] - out, _err = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - out = out.strip() - if out: - if (out[5] == 'o'): - return True - return False - - def lv_get_origin(self, name): - """Return the origin of an LV that is a snapshot, None otherwise.""" - cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', - 'Origin', '%s/%s' % (self.vg_name, name)] - out, _err = self._execute(*cmd, - root_helper=self._root_helper, - run_as_root=True) - out = out.strip() - if out: - return out - return None - - def extend_volume(self, lv_name, new_size): - """Extend the size of an existing volume.""" - # Volumes with snaps have attributes 'o' or 'O' and will be - # deactivated, but Thin Volumes with snaps have attribute 'V' - # and won't be deactivated because the lv_has_snapshot method looks - # for 'o' or 'O' - has_snapshot = self.lv_has_snapshot(lv_name) - if has_snapshot: - self.deactivate_lv(lv_name) - try: - cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size, - '%s/%s' % (self.vg_name, lv_name)] - self._execute(*cmd, root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error extending Volume') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise - if has_snapshot: - self.activate_lv(lv_name) - - def vg_mirror_free_space(self, mirror_count): - free_capacity = 0.0 - - disks = [] - for pv in self.pv_list: - disks.append(float(pv['available'])) - - while True: - disks = sorted([a for a in disks if a > 0.0], reverse=True) - if len(disks) <= mirror_count: - break - # consume the smallest disk - disk = disks[-1] - disks = disks[:-1] - # match extents for each mirror on the largest disks - for index in list(range(mirror_count)): - disks[index] -= disk - free_capacity += disk - - return free_capacity - - def vg_mirror_size(self, mirror_count): - return (self.vg_free_space / (mirror_count + 1)) - - def rename_volume(self, lv_name, new_name): - """Change the name of an existing volume.""" - - try: - self._execute('lvrename', self.vg_name, lv_name, new_name, - root_helper=self._root_helper, - run_as_root=True) - except putils.ProcessExecutionError as err: - LOG.exception('Error renaming logical volume') - LOG.error('Cmd :%s', err.cmd) - LOG.error('StdOut :%s', err.stdout) - LOG.error('StdErr :%s', err.stderr) - raise diff --git a/cinder/cmd/__init__.py b/cinder/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/cmd/api.py b/cinder/cmd/api.py deleted file mode 100644 index aef3f58cb..000000000 --- a/cinder/cmd/api.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for Cinder OS API.""" - -import eventlet -eventlet.monkey_patch() - -import logging as python_logging -import sys - -from cinder import objects - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from cinder import i18n -i18n.enable_lazy() - -# Need to register global_opts -from cinder.common import config -from cinder import rpc -from cinder import service -from cinder import utils -from cinder import version - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - gmr_opts.set_defaults(CONF) - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - config.set_middleware_defaults() - logging.setup(CONF, "cinder") - python_logging.captureWarnings(True) - utils.monkey_patch() - - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - - rpc.init(CONF) - launcher = service.process_launcher() - server = service.WSGIService('osapi_volume') - launcher.launch_service(server, workers=server.workers) - launcher.wait() diff --git a/cinder/cmd/backup.py b/cinder/cmd/backup.py deleted file mode 100644 index 8c913053b..000000000 --- a/cinder/cmd/backup.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for Cinder Volume Backup.""" - -import logging as python_logging -import shlex -import sys - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_privsep import priv_context -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -eventlet.monkey_patch() - -from cinder import i18n -i18n.enable_lazy() - -# Need to register global_opts -from cinder.common import config # noqa -from cinder import objects -from cinder import service -from cinder import utils -from cinder import version - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - gmr_opts.set_defaults(CONF) - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - python_logging.captureWarnings(True) - priv_context.init(root_helper=shlex.split(utils.get_root_helper())) - utils.monkey_patch() - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - server = service.Service.create(binary='cinder-backup', - coordination=True) - service.serve(server) - service.wait() diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py deleted file mode 100644 index f38f7a929..000000000 --- a/cinder/cmd/manage.py +++ /dev/null @@ -1,810 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Interactive shell based on Django: -# -# Copyright (c) 2005, the Lawrence Journal-World -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# 3. Neither the name of Django nor the names of its contributors may be -# used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -""" - CLI interface for cinder management. -""" - -from __future__ import print_function - - -import logging as python_logging -import os -import prettytable -import sys -import time - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import migration -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_utils import timeutils - -# Need to register global_opts -from cinder.common import config # noqa -from cinder.common import constants -from cinder import context -from cinder import db -from cinder.db import migration as db_migration -from cinder.db.sqlalchemy import api as db_api -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import rpc -from cinder import version -from cinder.volume import utils as vutils - - -CONF = cfg.CONF - - -# Decorators for actions -def args(*args, **kwargs): - def _decorator(func): - func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) - return func - return _decorator - - -class ShellCommands(object): - def bpython(self): - """Runs a bpython shell. - - Falls back to Ipython/python shell if unavailable - """ - self.run('bpython') - - def ipython(self): - """Runs an Ipython shell. - - Falls back to Python shell if unavailable - """ - self.run('ipython') - - def python(self): - """Runs a python shell. - - Falls back to Python shell if unavailable - """ - self.run('python') - - @args('--shell', - metavar='', - help='Python shell') - def run(self, shell=None): - """Runs a Python interactive interpreter.""" - if not shell: - shell = 'bpython' - - if shell == 'bpython': - try: - import bpython - bpython.embed() - except ImportError: - shell = 'ipython' - if shell == 'ipython': - try: - from IPython import embed - embed() - except ImportError: - try: - # Ipython < 0.11 - # Explicitly pass an empty list as arguments, because - # otherwise IPython would use sys.argv from this script. - import IPython - - shell = IPython.Shell.IPShell(argv=[]) - shell.mainloop() - except ImportError: - # no IPython module - shell = 'python' - - if shell == 'python': - import code - try: - # Try activating rlcompleter, because it's handy. - import readline - except ImportError: - pass - else: - # We don't have to wrap the following import in a 'try', - # because we already know 'readline' was imported successfully. - import rlcompleter # noqa - readline.parse_and_bind("tab:complete") - code.interact() - - @args('--path', required=True, help='Script path') - def script(self, path): - """Runs the script from the specified path with flags set properly.""" - exec(compile(open(path).read(), path, 'exec'), locals(), globals()) - - -def _db_error(caught_exception): - print('%s' % caught_exception) - print(_("The above error may show that the database has not " - "been created.\nPlease create a database using " - "'cinder-manage db sync' before running this command.")) - sys.exit(1) - - -class HostCommands(object): - """List hosts.""" - - @args('zone', nargs='?', default=None, - help='Availability Zone (default: %(default)s)') - def list(self, zone=None): - """Show a list of all physical hosts. - - Can be filtered by zone. - args: [zone] - """ - print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) - ctxt = context.get_admin_context() - services = objects.ServiceList.get_all(ctxt) - if zone: - services = [s for s in services if s.availability_zone == zone] - hosts = [] - for srv in services: - if not [h for h in hosts if h['host'] == srv['host']]: - hosts.append(srv) - - for h in hosts: - print(_("%(host)-25s\t%(availability_zone)-15s") - % {'host': h['host'], - 'availability_zone': h['availability_zone']}) - - -class DbCommands(object): - """Class for managing the database.""" - - online_migrations = () - - def __init__(self): - pass - - @args('version', nargs='?', default=None, type=int, - help='Database version') - def sync(self, version=None): - """Sync the database up to the most recent version.""" - if version is not None and version > db.MAX_INT: - print(_('Version should be less than or equal to ' - '%(max_version)d.') % {'max_version': db.MAX_INT}) - sys.exit(1) - try: - return db_migration.db_sync(version) - except db_exc.DbMigrationError as ex: - print("Error during database migration: %s" % ex) - sys.exit(1) - - def version(self): - """Print the current database version.""" - print(migration.db_version(db_api.get_engine(), - db_migration.MIGRATE_REPO_PATH, - db_migration.INIT_VERSION)) - - @args('age_in_days', type=int, - help='Purge deleted rows older than age in days') - def purge(self, age_in_days): - """Purge deleted rows older than a given age from cinder tables.""" - age_in_days = int(age_in_days) - if age_in_days <= 0: - print(_("Must supply a positive, non-zero value for age")) - sys.exit(1) - if age_in_days >= (int(time.time()) / 86400): - print(_("Maximum age is count of days since epoch.")) - sys.exit(1) - ctxt = context.get_admin_context() - - try: - db.purge_deleted_rows(ctxt, age_in_days) - except db_exc.DBReferenceError: - print(_("Purge command failed, check cinder-manage " - "logs for more details.")) - sys.exit(1) - - def _run_migration(self, ctxt, max_count, ignore_state): - ran = 0 - migrations = {} - for migration_meth in self.online_migrations: - count = max_count - ran - try: - found, done = migration_meth(ctxt, count, ignore_state) - except Exception: - print(_("Error attempting to run %(method)s") % - {'method': migration_meth.__name__}) - found = done = 0 - - name = migration_meth.__name__ - remaining = found - done - if found: - print(_('%(found)i rows matched query %(meth)s, %(done)i ' - 'migrated, %(remaining)i remaining') % {'found': found, - 'meth': name, - 'done': done, - 'remaining': - remaining}) - migrations.setdefault(name, (0, 0, 0)) - migrations[name] = (migrations[name][0] + found, - migrations[name][1] + done, - migrations[name][2] + remaining) - if max_count is not None: - ran += done - if ran >= max_count: - break - return migrations - - @args('--max_count', metavar='', dest='max_count', type=int, - help='Maximum number of objects to consider.') - @args('--ignore_state', action='store_true', dest='ignore_state', - help='Force records to migrate even if another operation is ' - 'performed on them. This may be dangerous, please refer to ' - 'release notes for more information.') - def online_data_migrations(self, max_count=None, ignore_state=False): - """Perform online data migrations for the release in batches.""" - ctxt = context.get_admin_context() - if max_count is not None: - unlimited = False - if max_count < 1: - print(_('Must supply a positive value for max_number.')) - sys.exit(127) - else: - unlimited = True - max_count = 50 - print(_('Running batches of %i until complete.') % max_count) - - ran = None - migration_info = {} - while ran is None or ran != 0: - migrations = self._run_migration(ctxt, max_count, ignore_state) - migration_info.update(migrations) - ran = sum([done for found, done, remaining in migrations.values()]) - if not unlimited: - break - - t = prettytable.PrettyTable([_('Migration'), - _('Found'), - _('Done'), - _('Remaining')]) - for name in sorted(migration_info.keys()): - info = migration_info[name] - t.add_row([name, info[0], info[1], info[2]]) - print(t) - - sys.exit(1 if ran else 0) - - -class VersionCommands(object): - """Class for exposing the codebase version.""" - - def __init__(self): - pass - - def list(self): - print(version.version_string()) - - def __call__(self): - self.list() - - -class VolumeCommands(object): - """Methods for dealing with a cloud in an odd state.""" - - def __init__(self): - self._client = None - - def _rpc_client(self): - if self._client is None: - if not rpc.initialized(): - rpc.init(CONF) - target = messaging.Target(topic=constants.VOLUME_TOPIC) - serializer = objects.base.CinderObjectSerializer() - self._client = rpc.get_client(target, serializer=serializer) - - return self._client - - @args('volume_id', - help='Volume ID to be deleted') - def delete(self, volume_id): - """Delete a volume, bypassing the check that it must be available.""" - ctxt = context.get_admin_context() - volume = objects.Volume.get_by_id(ctxt, volume_id) - host = vutils.extract_host(volume.host) if volume.host else None - - if not host: - print(_("Volume not yet assigned to host.")) - print(_("Deleting volume from database and skipping rpc.")) - volume.destroy() - return - - if volume.status == 'in-use': - print(_("Volume is in-use.")) - print(_("Detach volume from instance and then try again.")) - return - - cctxt = self._rpc_client().prepare(server=host) - cctxt.cast(ctxt, "delete_volume", volume_id=volume.id, volume=volume) - - @args('--currenthost', required=True, help='Existing volume host name') - @args('--newhost', required=True, help='New volume host name') - def update_host(self, currenthost, newhost): - """Modify the host name associated with a volume. - - Particularly to recover from cases where one has moved - their Cinder Volume node, or modified their backend_name in a - multi-backend config. - """ - ctxt = context.get_admin_context() - volumes = db.volume_get_all_by_host(ctxt, - currenthost) - for v in volumes: - db.volume_update(ctxt, v['id'], - {'host': newhost}) - - -class ConfigCommands(object): - """Class for exposing the flags defined by flag_file(s).""" - - def __init__(self): - pass - - @args('param', nargs='?', default=None, - help='Configuration parameter to display (default: %(default)s)') - def list(self, param=None): - """List parameters configured for cinder. - - Lists all parameters configured for cinder unless an optional argument - is specified. If the parameter is specified we only print the - requested parameter. If the parameter is not found an appropriate - error is produced by .get*(). - """ - param = param and param.strip() - if param: - print('%s = %s' % (param, CONF.get(param))) - else: - for key, value in CONF.items(): - print('%s = %s' % (key, value)) - - -class GetLogCommands(object): - """Get logging information.""" - - def errors(self): - """Get all of the errors from the log files.""" - error_found = 0 - if CONF.log_dir: - logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] - for file in logs: - log_file = os.path.join(CONF.log_dir, file) - lines = [line.strip() for line in open(log_file, "r")] - lines.reverse() - print_name = 0 - for index, line in enumerate(lines): - if line.find(" ERROR ") > 0: - error_found += 1 - if print_name == 0: - print(log_file + ":-") - print_name = 1 - print(_("Line %(dis)d : %(line)s") % - {'dis': len(lines) - index, 'line': line}) - if error_found == 0: - print(_("No errors in logfiles!")) - - @args('num_entries', nargs='?', type=int, default=10, - help='Number of entries to list (default: %(default)d)') - def syslog(self, num_entries=10): - """Get of the cinder syslog events.""" - entries = int(num_entries) - count = 0 - log_file = '' - if os.path.exists('/var/log/syslog'): - log_file = '/var/log/syslog' - elif os.path.exists('/var/log/messages'): - log_file = '/var/log/messages' - else: - print(_("Unable to find system log file!")) - sys.exit(1) - lines = [line.strip() for line in open(log_file, "r")] - lines.reverse() - print(_("Last %s cinder syslog entries:-") % (entries)) - for line in lines: - if line.find("cinder") > 0: - count += 1 - print(_("%s") % (line)) - if count == entries: - break - - if count == 0: - print(_("No cinder entries in syslog!")) - - -class BackupCommands(object): - """Methods for managing backups.""" - - def list(self): - """List all backups. - - List all backups (including ones in progress) and the host - on which the backup operation is running. - """ - ctxt = context.get_admin_context() - backups = objects.BackupList.get_all(ctxt) - - hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" - print(hdr % (_('ID'), - _('User ID'), - _('Project ID'), - _('Host'), - _('Name'), - _('Container'), - _('Status'), - _('Size'), - _('Object Count'))) - - res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" - for backup in backups: - object_count = 0 - if backup['object_count'] is not None: - object_count = backup['object_count'] - print(res % (backup['id'], - backup['user_id'], - backup['project_id'], - backup['host'], - backup['display_name'], - backup['container'], - backup['status'], - backup['size'], - object_count)) - - @args('--currenthost', required=True, help='Existing backup host name') - @args('--newhost', required=True, help='New backup host name') - def update_backup_host(self, currenthost, newhost): - """Modify the host name associated with a backup. - - Particularly to recover from cases where one has moved - their Cinder Backup node, and not set backup_use_same_backend. - """ - ctxt = context.get_admin_context() - backups = objects.BackupList.get_all_by_host(ctxt, currenthost) - for bk in backups: - bk.host = newhost - bk.save() - - -class BaseCommand(object): - @staticmethod - def _normalize_time(time_field): - return time_field and timeutils.normalize_time(time_field) - - @staticmethod - def _state_repr(is_up): - return ':-)' if is_up else 'XXX' - - -class ServiceCommands(BaseCommand): - """Methods for managing services.""" - def list(self): - """Show a list of all cinder services.""" - ctxt = context.get_admin_context() - services = objects.ServiceList.get_all(ctxt) - print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" - print(print_format % (_('Binary'), - _('Host'), - _('Zone'), - _('Status'), - _('State'), - _('Updated At'), - _('RPC Version'), - _('Object Version'), - _('Cluster'))) - for svc in services: - art = self._state_repr(svc.is_up) - status = 'disabled' if svc.disabled else 'enabled' - updated_at = self._normalize_time(svc.updated_at) - rpc_version = svc.rpc_current_version - object_version = svc.object_current_version - cluster = svc.cluster_name or '' - print(print_format % (svc.binary, svc.host.partition('.')[0], - svc.availability_zone, status, art, - updated_at, rpc_version, object_version, - cluster)) - - @args('binary', type=str, - help='Service to delete from the host.') - @args('host_name', type=str, - help='Host from which to remove the service.') - def remove(self, binary, host_name): - """Completely removes a service.""" - ctxt = context.get_admin_context() - try: - svc = objects.Service.get_by_args(ctxt, host_name, binary) - svc.destroy() - except exception.ServiceNotFound as e: - print(_("Host not found. Failed to remove %(service)s" - " on %(host)s.") % - {'service': binary, 'host': host_name}) - print(u"%s" % e.args) - return 2 - print(_("Service %(service)s on host %(host)s removed.") % - {'service': binary, 'host': host_name}) - - -class ClusterCommands(BaseCommand): - """Methods for managing clusters.""" - def list(self): - """Show a list of all cinder services.""" - ctxt = context.get_admin_context() - clusters = objects.ClusterList.get_all(ctxt, services_summary=True) - print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" - print(print_format % (_('Name'), - _('Binary'), - _('Status'), - _('State'), - _('Heartbeat'), - _('Hosts'), - _('Down Hosts'), - _('Updated At'))) - for cluster in clusters: - art = self._state_repr(cluster.is_up) - status = 'disabled' if cluster.disabled else 'enabled' - heartbeat = self._normalize_time(cluster.last_heartbeat) - updated_at = self._normalize_time(cluster.updated_at) - print(print_format % (cluster.name, cluster.binary, status, art, - heartbeat, cluster.num_hosts, - cluster.num_down_hosts, updated_at)) - - @args('--recursive', action='store_true', default=False, - help='Delete associated hosts.') - @args('binary', type=str, - help='Service to delete from the cluster.') - @args('cluster-name', type=str, help='Cluster to delete.') - def remove(self, recursive, binary, cluster_name): - """Completely removes a cluster.""" - ctxt = context.get_admin_context() - try: - cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name, - binary=binary, - get_services=recursive) - except exception.ClusterNotFound: - print(_("Couldn't remove cluster %s because it doesn't exist.") % - cluster_name) - return 2 - - if recursive: - for service in cluster.services: - service.destroy() - - try: - cluster.destroy() - except exception.ClusterHasHosts: - print(_("Couldn't remove cluster %s because it still has hosts.") % - cluster_name) - return 2 - - msg = _('Cluster %s successfully removed.') % cluster_name - if recursive: - msg = (_('%(msg)s And %(num)s services from the cluster were also ' - 'removed.') % {'msg': msg, 'num': len(cluster.services)}) - print(msg) - - @args('--full-rename', dest='partial', - action='store_false', default=True, - help='Do full cluster rename instead of just replacing provided ' - 'current cluster name and preserving backend and/or pool info.') - @args('current', help='Current cluster name.') - @args('new', help='New cluster name.') - def rename(self, partial, current, new): - """Rename cluster name for Volumes and Consistency Groups. - - Useful when you want to rename a cluster, particularly when the - backend_name has been modified in a multi-backend config or we have - moved from a single backend to multi-backend. - """ - ctxt = context.get_admin_context() - - # Convert empty strings to None - current = current or None - new = new or None - - # Update Volumes - num_vols = objects.VolumeList.include_in_cluster( - ctxt, new, partial_rename=partial, cluster_name=current) - - # Update Consistency Groups - num_cgs = objects.ConsistencyGroupList.include_in_cluster( - ctxt, new, partial_rename=partial, cluster_name=current) - - if num_vols or num_cgs: - msg = _('Successfully renamed %(num_vols)s volumes and ' - '%(num_cgs)s consistency groups from cluster %(current)s ' - 'to %(new)s') - print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new, - 'current': current}) - else: - msg = _('No volumes or consistency groups exist in cluster ' - '%(current)s.') - print(msg % {'current': current}) - return 2 - - -class ConsistencyGroupCommands(object): - """Methods for managing consistency groups.""" - - @args('--currenthost', required=True, help='Existing CG host name') - @args('--newhost', required=True, help='New CG host name') - def update_cg_host(self, currenthost, newhost): - """Modify the host name associated with a Consistency Group. - - Particularly to recover from cases where one has moved - a host from single backend to multi-backend, or changed the host - configuration option, or modified the backend_name in a multi-backend - config. - """ - - ctxt = context.get_admin_context() - groups = objects.ConsistencyGroupList.get_all( - ctxt, {'host': currenthost}) - for gr in groups: - gr.host = newhost - gr.save() - - -CATEGORIES = { - 'backup': BackupCommands, - 'config': ConfigCommands, - 'cluster': ClusterCommands, - 'cg': ConsistencyGroupCommands, - 'db': DbCommands, - 'host': HostCommands, - 'logs': GetLogCommands, - 'service': ServiceCommands, - 'shell': ShellCommands, - 'version': VersionCommands, - 'volume': VolumeCommands, -} - - -def methods_of(obj): - """Return non-private methods from an object. - - Get all callable methods of an object that don't start with underscore - :return: a list of tuples of the form (method_name, method) - """ - result = [] - for i in dir(obj): - if callable(getattr(obj, i)) and not i.startswith('_'): - result.append((i, getattr(obj, i))) - return result - - -def add_command_parsers(subparsers): - for category in CATEGORIES: - command_object = CATEGORIES[category]() - - parser = subparsers.add_parser(category) - parser.set_defaults(command_object=command_object) - - category_subparsers = parser.add_subparsers(dest='action') - - for (action, action_fn) in methods_of(command_object): - parser = category_subparsers.add_parser(action) - - action_kwargs = [] - for args, kwargs in getattr(action_fn, 'args', []): - parser.add_argument(*args, **kwargs) - - parser.set_defaults(action_fn=action_fn) - parser.set_defaults(action_kwargs=action_kwargs) - - -category_opt = cfg.SubCommandOpt('category', - title='Command categories', - handler=add_command_parsers) - - -def get_arg_string(args): - if args[0] == '-': - # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars - # is optional args. Notice that cfg module takes care of - # actual ArgParser so prefix_chars is always '-'. - if args[1] == '-': - # This is long optional arg - args = args[2:] - else: - args = args[1:] - - # We convert dashes to underscores so we can have cleaner optional arg - # names - if args: - args = args.replace('-', '_') - - return args - - -def fetch_func_args(func): - fn_kwargs = {} - for args, kwargs in getattr(func, 'args', []): - # Argparser `dest` configuration option takes precedence for the name - arg = kwargs.get('dest') or get_arg_string(args[0]) - fn_kwargs[arg] = getattr(CONF.category, arg) - - return fn_kwargs - - -def main(): - objects.register_all() - """Parse options and call the appropriate class/method.""" - CONF.register_cli_opt(category_opt) - script_name = sys.argv[0] - if len(sys.argv) < 2: - print(_("\nOpenStack Cinder version: %(version)s\n") % - {'version': version.version_string()}) - print(script_name + " category action []") - print(_("Available categories:")) - for category in CATEGORIES: - print(_("\t%s") % category) - sys.exit(2) - - try: - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - python_logging.captureWarnings(True) - except cfg.ConfigDirNotFoundError as details: - print(_("Invalid directory: %s") % details) - sys.exit(2) - except cfg.ConfigFilesNotFoundError as e: - cfg_files = e.config_files - print(_("Failed to read configuration file(s): %s") % cfg_files) - sys.exit(2) - - fn = CONF.category.action_fn - fn_kwargs = fetch_func_args(fn) - fn(**fn_kwargs) diff --git a/cinder/cmd/rtstool.py b/cinder/cmd/rtstool.py deleted file mode 100644 index 7713c685b..000000000 --- a/cinder/cmd/rtstool.py +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 - 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -# We always use rtslib-fb, but until version 2.1.52 it didn't have its own -# namespace, so we must be backwards compatible. -try: - import rtslib_fb -except ImportError: - import rtslib as rtslib_fb - - -from cinder import i18n -from cinder.i18n import _ - -i18n.enable_lazy() - - -class RtstoolError(Exception): - pass - - -class RtstoolImportError(RtstoolError): - pass - - -def create(backing_device, name, userid, password, iser_enabled, - initiator_iqns=None, portals_ips=None, portals_port=3260): - # List of IPS that will not raise an error when they fail binding. - # Originally we will fail on all binding errors. - ips_allow_fail = () - - try: - rtsroot = rtslib_fb.root.RTSRoot() - except rtslib_fb.utils.RTSLibError: - print(_('Ensure that configfs is mounted at /sys/kernel/config.')) - raise - - # Look to see if BlockStorageObject already exists - for x in rtsroot.storage_objects: - if x.name == name: - # Already exists, use this one - return - - so_new = rtslib_fb.BlockStorageObject(name=name, - dev=backing_device) - - target_new = rtslib_fb.Target(rtslib_fb.FabricModule('iscsi'), name, - 'create') - - tpg_new = rtslib_fb.TPG(target_new, mode='create') - tpg_new.set_attribute('authentication', '1') - - lun_new = rtslib_fb.LUN(tpg_new, storage_object=so_new) - - if initiator_iqns: - initiator_iqns = initiator_iqns.strip(' ') - for i in initiator_iqns.split(','): - acl_new = rtslib_fb.NodeACL(tpg_new, i, mode='create') - acl_new.chap_userid = userid - acl_new.chap_password = password - - rtslib_fb.MappedLUN(acl_new, lun_new.lun, lun_new.lun) - - tpg_new.enable = 1 - - # If no ips are given we'll bind to all IPv4 and v6 - if not portals_ips: - portals_ips = ('0.0.0.0', '[::0]') - # TODO(emh): Binding to IPv6 fails sometimes -- let pass for now. - ips_allow_fail = ('[::0]',) - - for ip in portals_ips: - try: - # rtslib expects IPv6 addresses to be surrounded by brackets - portal = rtslib_fb.NetworkPortal(tpg_new, _canonicalize_ip(ip), - portals_port, mode='any') - except rtslib_fb.utils.RTSLibError: - raise_exc = ip not in ips_allow_fail - msg_type = 'Error' if raise_exc else 'Warning' - print(_('%(msg_type)s: creating NetworkPortal: ensure port ' - '%(port)d on ip %(ip)s is not in use by another service.') - % {'msg_type': msg_type, 'port': portals_port, 'ip': ip}) - if raise_exc: - raise - else: - try: - if iser_enabled == 'True': - portal.iser = True - except rtslib_fb.utils.RTSLibError: - print(_('Error enabling iSER for NetworkPortal: please ensure ' - 'that RDMA is supported on your iSCSI port %(port)d ' - 'on ip %(ip)s.') % {'port': portals_port, 'ip': ip}) - raise - - -def _lookup_target(target_iqn, initiator_iqn): - try: - rtsroot = rtslib_fb.root.RTSRoot() - except rtslib_fb.utils.RTSLibError: - print(_('Ensure that configfs is mounted at /sys/kernel/config.')) - raise - - # Look for the target - for t in rtsroot.targets: - if t.wwn == target_iqn: - return t - raise RtstoolError(_('Could not find target %s') % target_iqn) - - -def add_initiator(target_iqn, initiator_iqn, userid, password): - target = _lookup_target(target_iqn, initiator_iqn) - tpg = next(target.tpgs) # get the first one - for acl in tpg.node_acls: - # See if this ACL configuration already exists - if acl.node_wwn.lower() == initiator_iqn.lower(): - # No further action required - return - - acl_new = rtslib_fb.NodeACL(tpg, initiator_iqn, mode='create') - acl_new.chap_userid = userid - acl_new.chap_password = password - - rtslib_fb.MappedLUN(acl_new, 0, tpg_lun=0) - - -def delete_initiator(target_iqn, initiator_iqn): - target = _lookup_target(target_iqn, initiator_iqn) - tpg = next(target.tpgs) # get the first one - for acl in tpg.node_acls: - if acl.node_wwn.lower() == initiator_iqn.lower(): - acl.delete() - return - - print(_('delete_initiator: %s ACL not found. Continuing.') % initiator_iqn) - # Return successfully. - - -def get_targets(): - rtsroot = rtslib_fb.root.RTSRoot() - for x in rtsroot.targets: - print(x.wwn) - - -def delete(iqn): - rtsroot = rtslib_fb.root.RTSRoot() - for x in rtsroot.targets: - if x.wwn == iqn: - x.delete() - break - - for x in rtsroot.storage_objects: - if x.name == iqn: - x.delete() - break - - -def verify_rtslib(): - for member in ['BlockStorageObject', 'FabricModule', 'LUN', - 'MappedLUN', 'NetworkPortal', 'NodeACL', 'root', - 'Target', 'TPG']: - if not hasattr(rtslib_fb, member): - raise RtstoolImportError(_("rtslib_fb is missing member %s: You " - "may need a newer python-rtslib-fb.") % - member) - - -def usage(): - print("Usage:") - print(sys.argv[0] + - " create [device] [name] [userid] [password] [iser_enabled]" + - " [-a] [-pPORT]") - print(sys.argv[0] + - " add-initiator [target_iqn] [userid] [password] [initiator_iqn]") - print(sys.argv[0] + - " delete-initiator [target_iqn] [initiator_iqn]") - print(sys.argv[0] + " get-targets") - print(sys.argv[0] + " delete [iqn]") - print(sys.argv[0] + " verify") - print(sys.argv[0] + " save [path_to_file]") - sys.exit(1) - - -def save_to_file(destination_file): - rtsroot = rtslib_fb.root.RTSRoot() - try: - # If default destination use rtslib default save file - if not destination_file: - destination_file = rtslib_fb.root.default_save_file - path_to_file = os.path.dirname(destination_file) - - # NOTE(geguileo): With default file we ensure path exists and - # create it if doesn't. - # Cinder's LIO target helper runs this as root, so it will have no - # problem creating directory /etc/target. - # If run manually from the command line without being root you will - # get an error, same as when creating and removing targets. - if not os.path.exists(path_to_file): - os.makedirs(path_to_file, 0o755) - - except OSError as exc: - raise RtstoolError(_('targetcli not installed and could not create ' - 'default directory (%(default_path)s): %(exc)s') % - {'default_path': path_to_file, 'exc': exc}) - try: - rtsroot.save_to_file(destination_file) - except (OSError, IOError) as exc: - raise RtstoolError(_('Could not save configuration to %(file_path)s: ' - '%(exc)s') % - {'file_path': destination_file, 'exc': exc}) - - -def restore_from_file(configration_file): - rtsroot = rtslib_fb.root.RTSRoot() - # If configuration file is None, use rtslib default save file. - if not configration_file: - configration_file = rtslib_fb.root.default_save_file - - try: - rtsroot.restore_from_file(configration_file) - except (OSError, IOError) as exc: - raise RtstoolError(_('Could not restore configuration file ' - '%(file_path)s: %(exc)s'), - {'file_path': configration_file, 'exc': exc}) - - -def parse_optional_create(argv): - optional_args = {} - - for arg in argv: - if arg.startswith('-a'): - ips = [ip for ip in arg[2:].split(',') if ip] - if not ips: - usage() - optional_args['portals_ips'] = ips - elif arg.startswith('-p'): - try: - optional_args['portals_port'] = int(arg[2:]) - except ValueError: - usage() - else: - optional_args['initiator_iqns'] = arg - return optional_args - - -def _canonicalize_ip(ip): - if ip.startswith('[') or "." in ip: - return ip - return "[" + ip + "]" - - -def main(argv=None): - if argv is None: - argv = sys.argv - - if len(argv) < 2: - usage() - - if argv[1] == 'create': - if len(argv) < 7: - usage() - - if len(argv) > 10: - usage() - - backing_device = argv[2] - name = argv[3] - userid = argv[4] - password = argv[5] - iser_enabled = argv[6] - - if len(argv) > 7: - optional_args = parse_optional_create(argv[7:]) - else: - optional_args = {} - - create(backing_device, name, userid, password, iser_enabled, - **optional_args) - - elif argv[1] == 'add-initiator': - if len(argv) < 6: - usage() - - target_iqn = argv[2] - userid = argv[3] - password = argv[4] - initiator_iqn = argv[5] - - add_initiator(target_iqn, initiator_iqn, userid, password) - - elif argv[1] == 'delete-initiator': - if len(argv) < 4: - usage() - - target_iqn = argv[2] - initiator_iqn = argv[3] - - delete_initiator(target_iqn, initiator_iqn) - - elif argv[1] == 'get-targets': - get_targets() - - elif argv[1] == 'delete': - if len(argv) < 3: - usage() - - iqn = argv[2] - delete(iqn) - - elif argv[1] == 'verify': - # This is used to verify that this script can be called by cinder, - # and that rtslib_fb is new enough to work. - verify_rtslib() - return 0 - - elif argv[1] == 'save': - if len(argv) > 3: - usage() - - destination_file = argv[2] if len(argv) > 2 else None - save_to_file(destination_file) - return 0 - - elif argv[1] == 'restore': - if len(argv) > 3: - usage() - - configuration_file = argv[2] if len(argv) > 2 else None - restore_from_file(configuration_file) - return 0 - - else: - usage() - - return 0 diff --git a/cinder/cmd/scheduler.py b/cinder/cmd/scheduler.py deleted file mode 100644 index 3b3c10332..000000000 --- a/cinder/cmd/scheduler.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for Cinder Scheduler.""" - -import eventlet -eventlet.monkey_patch() - -import logging as python_logging -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from cinder import i18n -i18n.enable_lazy() - -# Need to register global_opts -from cinder.common import config # noqa -from cinder import objects -from cinder import service -from cinder import utils -from cinder import version - - -CONF = cfg.CONF - - -def main(): - objects.register_all() - gmr_opts.set_defaults(CONF) - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - python_logging.captureWarnings(True) - utils.monkey_patch() - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - server = service.Service.create(binary='cinder-scheduler') - service.serve(server) - service.wait() diff --git a/cinder/cmd/volume.py b/cinder/cmd/volume.py deleted file mode 100644 index 3807145b1..000000000 --- a/cinder/cmd/volume.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for Cinder Volume.""" - -import logging as python_logging -import os - -import eventlet - -from cinder import objects - -if os.name == 'nt': - # eventlet monkey patching the os module causes subprocess.Popen to fail - # on Windows when using pipes due to missing non-blocking IO support. - eventlet.monkey_patch(os=False) -else: - eventlet.monkey_patch() - -import shlex -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_privsep import priv_context -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from cinder import i18n -i18n.enable_lazy() - -# Need to register global_opts -from cinder.common import config # noqa -from cinder.db import api as session -from cinder.i18n import _ -from cinder import service -from cinder import utils -from cinder import version - - -CONF = cfg.CONF - -host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.') -CONF.register_cli_opt(host_opt) - -# TODO(geguileo): Once we complete the work on A-A update the option's help. -cluster_opt = cfg.StrOpt('cluster', - default=None, - help='Name of this cluster. Used to group volume ' - 'hosts that share the same backend ' - 'configurations to work in HA Active-Active ' - 'mode. Active-Active is not yet supported.') -CONF.register_opt(cluster_opt) - - -def main(): - objects.register_all() - gmr_opts.set_defaults(CONF) - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - python_logging.captureWarnings(True) - priv_context.init(root_helper=shlex.split(utils.get_root_helper())) - utils.monkey_patch() - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - launcher = service.get_launcher() - LOG = logging.getLogger(__name__) - service_started = False - - if CONF.enabled_backends: - for backend in filter(None, CONF.enabled_backends): - CONF.register_opt(host_opt, group=backend) - backend_host = getattr(CONF, backend).backend_host - host = "%s@%s" % (backend_host or CONF.host, backend) - # We also want to set cluster to None on empty strings, and we - # ignore leading and trailing spaces. - cluster = CONF.cluster and CONF.cluster.strip() - cluster = (cluster or None) and '%s@%s' % (cluster, backend) - try: - server = service.Service.create(host=host, - service_name=backend, - binary='cinder-volume', - coordination=True, - cluster=cluster) - except Exception: - msg = _('Volume service %s failed to start.') % host - LOG.exception(msg) - else: - # Dispose of the whole DB connection pool here before - # starting another process. Otherwise we run into cases where - # child processes share DB connections which results in errors. - session.dispose_engine() - launcher.launch_service(server) - service_started = True - else: - LOG.error('Configuration for cinder-volume does not specify ' - '"enabled_backends". Using DEFAULT section to configure ' - 'drivers is not supported since Ocata.') - - if not service_started: - msg = _('No volume service(s) started successfully, terminating.') - LOG.error(msg) - sys.exit(1) - - launcher.wait() diff --git a/cinder/cmd/volume_usage_audit.py b/cinder/cmd/volume_usage_audit.py deleted file mode 100644 index 144f3b87d..000000000 --- a/cinder/cmd/volume_usage_audit.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" - Cron script to generate usage notifications for volumes existing during - the audit period. - - Together with the notifications generated by volumes - create/delete/resize, over that time period, this allows an external - system consuming usage notification feeds to calculate volume usage - for each tenant. - - Time periods are specified as 'hour', 'month', 'day' or 'year' - - - `hour` - previous hour. If run at 9:07am, will generate usage for - 8-9am. - - `month` - previous month. If the script is run April 1, it will - generate usages for March 1 through March 31. - - `day` - previous day. if run on July 4th, it generates usages for - July 3rd. - - `year` - previous year. If run on Jan 1, it generates usages for - Jan 1 through Dec 31 of the previous year. - -""" - -from __future__ import print_function - -import datetime -from iso8601 import iso8601 -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from cinder import i18n -i18n.enable_lazy() -from cinder import context -from cinder.i18n import _ -from cinder import objects -from cinder import rpc -from cinder import utils -from cinder import version -import cinder.volume.utils - - -CONF = cfg.CONF -script_opts = [ - cfg.StrOpt('start_time', - help="If this option is specified then the start time " - "specified is used instead of the start time of the " - "last completed audit period."), - cfg.StrOpt('end_time', - help="If this option is specified then the end time " - "specified is used instead of the end time of the " - "last completed audit period."), - cfg.BoolOpt('send_actions', - default=False, - help="Send the volume and snapshot create and delete " - "notifications generated in the specified period."), -] -CONF.register_cli_opts(script_opts) - - -def _time_error(LOG, begin, end): - if CONF.start_time: - begin = datetime.datetime.strptime(CONF.start_time, - "%Y-%m-%d %H:%M:%S") - if CONF.end_time: - end = datetime.datetime.strptime(CONF.end_time, - "%Y-%m-%d %H:%M:%S") - begin = begin.replace(tzinfo=iso8601.Utc()) - end = end.replace(tzinfo=iso8601.Utc()) - if not end > begin: - msg = _("The end time (%(end)s) must be after the start " - "time (%(start)s).") % {'start': begin, - 'end': end} - LOG.error(msg) - sys.exit(-1) - return begin, end - - -def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context): - """volume_ref notify usage""" - try: - LOG.debug("Send exists notification for " - "<%(extra_info)s>", - {'volume_id': volume_ref.id, - 'project_id': volume_ref.project_id, - 'extra_info': extra_info}) - cinder.volume.utils.notify_about_volume_usage( - admin_context, volume_ref, 'exists', extra_usage_info=extra_info) - except Exception as exc_msg: - LOG.error("Exists volume notification failed: %s", - exc_msg, resource=volume_ref) - - -def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context): - """snapshot_ref notify usage""" - try: - LOG.debug("Send notification for " - " <%(extra_info)s>", - {'snapshot_id': snapshot_ref.id, - 'project_id': snapshot_ref.project_id, - 'extra_info': extra_info}) - cinder.volume.utils.notify_about_snapshot_usage( - admin_context, snapshot_ref, 'exists', extra_info) - except Exception as exc_msg: - LOG.error("Exists snapshot notification failed: %s", - exc_msg, resource=snapshot_ref) - - -def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context): - """backup_ref notify usage""" - try: - cinder.volume.utils.notify_about_backup_usage( - admin_context, backup_ref, 'exists', extra_info) - LOG.debug("Sent notification for " - " <%(extra_info)s>", - {'backup_id': backup_ref.id, - 'project_id': backup_ref.project_id, - 'extra_info': extra_info}) - except Exception as exc_msg: - LOG.error("Exists backups notification failed: %s", exc_msg) - - -def _create_action(obj_ref, admin_context, LOG, notify_about_usage, - type_id_str, type_name): - try: - local_extra_info = { - 'audit_period_beginning': str(obj_ref.created_at), - 'audit_period_ending': str(obj_ref.created_at), - } - LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> " - " <%(extra_info)s>", - {'type_id_str': type_id_str, - '_id': obj_ref.id, - 'project_id': obj_ref.project_id, - 'extra_info': local_extra_info}) - notify_about_usage(admin_context, obj_ref, - 'create.start', extra_usage_info=local_extra_info) - notify_about_usage(admin_context, obj_ref, - 'create.end', extra_usage_info=local_extra_info) - except Exception as exc_msg: - LOG.error("Create %(type)s notification failed: %(exc_msg)s", - {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) - - -def _delete_action(obj_ref, admin_context, LOG, notify_about_usage, - type_id_str, type_name): - try: - local_extra_info = { - 'audit_period_beginning': str(obj_ref.deleted_at), - 'audit_period_ending': str(obj_ref.deleted_at), - } - LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> " - " <%(extra_info)s>", - {'type_id_str': type_id_str, - '_id': obj_ref.id, - 'project_id': obj_ref.project_id, - 'extra_info': local_extra_info}) - notify_about_usage(admin_context, obj_ref, - 'delete.start', extra_usage_info=local_extra_info) - notify_about_usage(admin_context, obj_ref, - 'delete.end', extra_usage_info=local_extra_info) - except Exception as exc_msg: - LOG.error("Delete %(type)s notification failed: %(exc_msg)s", - {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) - - -def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context, - begin, end, notify_about_usage, type_id_str, type_name): - _notify_usage(LOG, obj_ref, extra_info, admin_context) - if CONF.send_actions: - if begin < obj_ref.created_at < end: - _create_action(obj_ref, admin_context, LOG, - notify_about_usage, type_id_str, type_name) - - if obj_ref.deleted_at and begin < obj_ref.deleted_at < end: - _delete_action(obj_ref, admin_context, LOG, - notify_about_usage, type_id_str, type_name) - - -def main(): - objects.register_all() - admin_context = context.get_admin_context() - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - LOG = logging.getLogger("cinder") - rpc.init(CONF) - - begin, end = utils.last_completed_audit_period() - begin, end = _time_error(LOG, begin, end) - - LOG.info("Starting volume usage audit") - LOG.info("Creating usages for %(begin_period)s until %(end_period)s", - {"begin_period": begin, "end_period": end}) - - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - - volumes = objects.VolumeList.get_all_active_by_window(admin_context, - begin, - end) - - LOG.info("Found %d volumes", len(volumes)) - for volume_ref in volumes: - _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info, - admin_context, begin, end, - cinder.volume.utils.notify_about_volume_usage, - "volume_id", "volume") - - snapshots = objects.SnapshotList.get_all_active_by_window(admin_context, - begin, end) - LOG.info("Found %d snapshots", len(snapshots)) - for snapshot_ref in snapshots: - _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info, - admin_context, begin, - end, cinder.volume.utils.notify_about_snapshot_usage, - "snapshot_id", "snapshot") - - backups = objects.BackupList.get_all_active_by_window(admin_context, - begin, end) - - LOG.info("Found %d backups", len(backups)) - for backup_ref in backups: - _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info, - admin_context, begin, - end, cinder.volume.utils.notify_about_backup_usage, - "backup_id", "backup") - LOG.info("Volume usage audit completed") diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/common/config.py b/cinder/common/config.py deleted file mode 100644 index 334fd93e0..000000000 --- a/cinder/common/config.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 NTT corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command-line flag library. - -Emulates gflags by wrapping cfg.ConfigOpts. - -The idea is to move fully to cfg eventually, and this wrapper is a -stepping stone. - -""" - -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_middleware import cors -from oslo_utils import netutils - - -CONF = cfg.CONF -logging.register_options(CONF) - -core_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/cinder', - deprecated_name='pybasedir', - help="Top-level directory for maintaining cinder's state"), ] - -CONF.register_cli_opts(core_opts) - -global_opts = [ - cfg.HostAddressOpt('my_ip', - default=netutils.get_my_ipv4(), - help='IP address of this host'), - cfg.ListOpt('glance_api_servers', - default=None, - help='A list of the URLs of glance API servers available to ' - 'cinder ([http[s]://][hostname|ip]:port). If protocol ' - 'is not specified it defaults to http.'), - cfg.IntOpt('glance_api_version', - default=2, - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason='Glance v1 support will be removed in Queens', - help='Version of the glance API to use'), - cfg.IntOpt('glance_num_retries', - min=0, - default=0, - help='Number retries when downloading an image from glance'), - cfg.BoolOpt('glance_api_insecure', - default=False, - help='Allow to perform insecure SSL (https) requests to ' - 'glance (https will be used but cert validation will ' - 'not be performed).'), - cfg.BoolOpt('glance_api_ssl_compression', - default=False, - help='Enables or disables negotiation of SSL layer ' - 'compression. In some cases disabling compression ' - 'can improve data throughput, such as when high ' - 'network bandwidth is available and you use ' - 'compressed image formats like qcow2.'), - cfg.StrOpt('glance_ca_certificates_file', - help='Location of ca certificates file to use for glance ' - 'client requests.'), - cfg.IntOpt('glance_request_timeout', - help='http/https timeout value for glance operations. If no ' - 'value (None) is supplied here, the glanceclient default ' - 'value is used.'), - cfg.BoolOpt('enable_v1_api', - default=False, - deprecated_for_removal=True, - help="DEPRECATED: Deploy v1 of the Cinder API."), - cfg.BoolOpt('enable_v2_api', - default=True, - deprecated_for_removal=True, - help="DEPRECATED: Deploy v2 of the Cinder API."), - cfg.BoolOpt('enable_v3_api', - default=True, - help="Deploy v3 of the Cinder API."), - cfg.BoolOpt('api_rate_limit', - default=True, - help='Enables or disables rate limit of the API.'), - cfg.ListOpt('osapi_volume_ext_list', - default=[], - help='Specify list of extensions to load when using osapi_' - 'volume_extension option with cinder.api.contrib.' - 'select_extensions'), - cfg.MultiStrOpt('osapi_volume_extension', - default=['cinder.api.contrib.standard_extensions'], - help='osapi volume extension to load'), - cfg.StrOpt('volume_manager', - default='cinder.volume.manager.VolumeManager', - help='Full class name for the Manager for volume'), - cfg.StrOpt('backup_manager', - default='cinder.backup.manager.BackupManager', - help='Full class name for the Manager for volume backup'), - cfg.StrOpt('scheduler_manager', - default='cinder.scheduler.manager.SchedulerManager', - help='Full class name for the Manager for scheduler'), - cfg.HostAddressOpt('host', - default=socket.gethostname(), - help='Name of this node. This can be an opaque ' - 'identifier. It is not necessarily a host name, ' - 'FQDN, or IP address.'), - # NOTE(vish): default to nova for compatibility with nova installs - cfg.StrOpt('storage_availability_zone', - default='nova', - help='Availability zone of this node. Can be overridden per ' - 'volume backend with the option ' - '"backend_availability_zone".'), - cfg.StrOpt('default_availability_zone', - help='Default availability zone for new volumes. If not set, ' - 'the storage_availability_zone option value is used as ' - 'the default for new volumes.'), - cfg.BoolOpt('allow_availability_zone_fallback', - default=False, - help='If the requested Cinder availability zone is ' - 'unavailable, fall back to the value of ' - 'default_availability_zone, then ' - 'storage_availability_zone, instead of failing.'), - cfg.StrOpt('default_volume_type', - help='Default volume type to use'), - cfg.StrOpt('default_group_type', - help='Default group type to use'), - cfg.StrOpt('volume_usage_audit_period', - default='month', - help='Time period for which to generate volume usages. ' - 'The options are hour, day, month, or year.'), - cfg.StrOpt('rootwrap_config', - default='/etc/cinder/rootwrap.conf', - help='Path to the rootwrap configuration file to use for ' - 'running commands as root'), - cfg.BoolOpt('monkey_patch', - default=False, - help='Enable monkey patching'), - cfg.ListOpt('monkey_patch_modules', - default=[], - help='List of modules/decorators to monkey patch'), - cfg.IntOpt('service_down_time', - default=60, - help='Maximum time since last check-in for a service to be ' - 'considered up'), - cfg.StrOpt('volume_api_class', - default='cinder.volume.api.API', - help='The full class name of the volume API class to use'), - cfg.StrOpt('backup_api_class', - default='cinder.backup.api.API', - help='The full class name of the volume backup API class'), - cfg.StrOpt('auth_strategy', - default='keystone', - choices=['noauth', 'keystone'], - help='The strategy to use for auth. Supports noauth or ' - 'keystone.'), - cfg.ListOpt('enabled_backends', - help='A list of backend names to use. These backend names ' - 'should be backed by a unique [CONFIG] group ' - 'with its options'), - cfg.BoolOpt('no_snapshot_gb_quota', - default=False, - help='Whether snapshots count against gigabyte quota'), - cfg.StrOpt('transfer_api_class', - default='cinder.transfer.api.API', - help='The full class name of the volume transfer API class'), - cfg.StrOpt('consistencygroup_api_class', - default='cinder.consistencygroup.api.API', - help='The full class name of the consistencygroup API class'), - cfg.StrOpt('group_api_class', - default='cinder.group.api.API', - help='The full class name of the group API class'), - cfg.StrOpt('os_privileged_user_name', - help='OpenStack privileged account username. Used for requests ' - 'to other services (such as Nova) that require an account ' - 'with special rights.', - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason='Use the [nova] section for configuring ' - 'Keystone authentication for a privileged user.'), - cfg.StrOpt('os_privileged_user_password', - help='Password associated with the OpenStack privileged ' - 'account.', - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason='Use the [nova] section to configure ' - 'Keystone authentication for a privileged user.', - secret=True), - cfg.StrOpt('os_privileged_user_tenant', - help='Tenant name associated with the OpenStack privileged ' - 'account.', - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason='Use the [nova] section to configure ' - 'Keystone authentication for a privileged user.'), - cfg.URIOpt('os_privileged_user_auth_url', - help='Auth URL associated with the OpenStack privileged ' - 'account.', - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason='Use the [nova] section to configure ' - 'Keystone authentication for a privileged user.') -] - -CONF.register_opts(core_opts) -CONF.register_opts(global_opts) - - -def set_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id', - 'X-OpenStack-Request-ID', - 'X-Trace-Info', - 'X-Trace-HMAC', - 'OpenStack-API-Version'], - expose_headers=['X-Auth-Token', - 'X-Subject-Token', - 'X-Service-Token', - 'X-OpenStack-Request-ID', - 'OpenStack-API-Version'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH', - 'HEAD'] - ) diff --git a/cinder/common/constants.py b/cinder/common/constants.py deleted file mode 100644 index d93960d2d..000000000 --- a/cinder/common/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# The maximum value a signed INT type may have -DB_MAX_INT = 0x7FFFFFFF - -# The cinder services binaries and topics' names -API_BINARY = "cinder-api" -SCHEDULER_BINARY = "cinder-scheduler" -VOLUME_BINARY = "cinder-volume" -BACKUP_BINARY = "cinder-backup" -SCHEDULER_TOPIC = SCHEDULER_BINARY -VOLUME_TOPIC = VOLUME_BINARY -BACKUP_TOPIC = BACKUP_BINARY diff --git a/cinder/common/sqlalchemyutils.py b/cinder/common/sqlalchemyutils.py deleted file mode 100644 index 4b4e211a8..000000000 --- a/cinder/common/sqlalchemyutils.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2010-2011 OpenStack Foundation -# Copyright 2012 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of paginate query.""" -import datetime - -from oslo_log import log as logging -from six.moves import range -import sqlalchemy -import sqlalchemy.sql as sa_sql -from sqlalchemy.sql import type_api - -from cinder.db import api -from cinder import exception -from cinder.i18n import _ - - -LOG = logging.getLogger(__name__) - -_TYPE_SCHEMA = { - 'datetime': datetime.datetime(1900, 1, 1), - 'big_integer': 0, - 'integer': 0, - 'string': '' -} - - -def _get_default_column_value(model, column_name): - """Return the default value of the columns from DB table. - - In postgreDB case, if no right default values are being set, an - psycopg2.DataError will be thrown. - """ - attr = getattr(model, column_name) - # Return the default value directly if the model contains. Otherwise return - # a default value which is not None. - if attr.default and isinstance(attr.default, type_api.TypeEngine): - return attr.default.arg - - attr_type = attr.type - return _TYPE_SCHEMA[attr_type.__visit_name__] - - -# TODO(wangxiyuan): Use oslo_db.sqlalchemy.utils.paginate_query once it is -# stable and afforded by the minimum version in requirement.txt. -# copied from glance/db/sqlalchemy/api.py -def paginate_query(query, model, limit, sort_keys, marker=None, - sort_dir=None, sort_dirs=None, offset=None): - """Returns a query with sorting / pagination criteria added. - - Pagination works by requiring a unique sort_key, specified by sort_keys. - (If sort_keys is not unique, then we risk looping through values.) - We use the last row in the previous page as the 'marker' for pagination. - So we must return values that follow the passed marker in the order. - With a single-valued sort_key, this would be easy: sort_key > X. - With a compound-values sort_key, (k1, k2, k3) we must do this to repeat - the lexicographical ordering: - (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) - - We also have to cope with different sort_directions. - - Typically, the id of the last row is used as the client-facing pagination - marker, then the actual marker object must be fetched from the db and - passed in to us as marker. - - :param query: the query object to which we should add paging/sorting - :param model: the ORM model class - :param limit: maximum number of items to return - :param sort_keys: array of attributes by which results should be sorted - :param marker: the last item of the previous page; we returns the next - results after this value. - :param sort_dir: direction in which results should be sorted (asc, desc) - :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys - :param offset: the number of items to skip from the marker or from the - first element. - - :rtype: sqlalchemy.orm.query.Query - :return: The query with sorting/pagination added. - """ - - if 'id' not in sort_keys: - # TODO(justinsb): If this ever gives a false-positive, check - # the actual primary key, rather than assuming its id - LOG.warning('Id not in sort_keys; is sort_keys unique?') - - assert(not (sort_dir and sort_dirs)) - - # Default the sort direction to ascending - if sort_dirs is None and sort_dir is None: - sort_dir = 'asc' - - # Ensure a per-column sort direction - if sort_dirs is None: - sort_dirs = [sort_dir for _sort_key in sort_keys] - - assert(len(sort_dirs) == len(sort_keys)) - - # Add sorting - for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): - sort_dir_func = { - 'asc': sqlalchemy.asc, - 'desc': sqlalchemy.desc, - }[current_sort_dir] - - try: - sort_key_attr = getattr(model, current_sort_key) - except AttributeError: - raise exception.InvalidInput(reason='Invalid sort key') - if not api.is_orm_value(sort_key_attr): - raise exception.InvalidInput(reason='Invalid sort key') - query = query.order_by(sort_dir_func(sort_key_attr)) - - # Add pagination - if marker is not None: - marker_values = [] - for sort_key in sort_keys: - v = getattr(marker, sort_key) - if v is None: - v = _get_default_column_value(model, sort_key) - marker_values.append(v) - - # Build up an array of sort criteria as in the docstring - criteria_list = [] - for i in range(0, len(sort_keys)): - crit_attrs = [] - for j in range(0, i): - model_attr = getattr(model, sort_keys[j]) - default = _get_default_column_value(model, sort_keys[j]) - attr = sa_sql.expression.case([(model_attr.isnot(None), - model_attr), ], - else_=default) - crit_attrs.append((attr == marker_values[j])) - - model_attr = getattr(model, sort_keys[i]) - default = _get_default_column_value(model, sort_keys[i]) - attr = sa_sql.expression.case([(model_attr.isnot(None), - model_attr), ], - else_=default) - if sort_dirs[i] == 'desc': - crit_attrs.append((attr < marker_values[i])) - elif sort_dirs[i] == 'asc': - crit_attrs.append((attr > marker_values[i])) - else: - raise ValueError(_("Unknown sort direction, " - "must be 'desc' or 'asc'")) - - criteria = sqlalchemy.sql.and_(*crit_attrs) - criteria_list.append(criteria) - - f = sqlalchemy.sql.or_(*criteria_list) - query = query.filter(f) - - if limit is not None: - query = query.limit(limit) - - if offset: - query = query.offset(offset) - - return query diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py deleted file mode 100644 index 24a4d1f57..000000000 --- a/cinder/compute/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import importutils - - -compute_opts = [ - cfg.StrOpt('compute_api_class', - default='cinder.compute.nova.API', - help='The full class name of the ' - 'compute API class to use'), -] - -CONF = cfg.CONF -CONF.register_opts(compute_opts) - - -def API(): - compute_api_class = CONF.compute_api_class - cls = importutils.import_class(compute_api_class) - return cls() diff --git a/cinder/compute/nova.py b/cinder/compute/nova.py deleted file mode 100644 index 43274df73..000000000 --- a/cinder/compute/nova.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests to Nova. -""" - -from keystoneauth1 import identity -from keystoneauth1 import loading as ks_loading -from novaclient import api_versions -from novaclient import client as nova_client -from novaclient import exceptions as nova_exceptions -from oslo_config import cfg -from oslo_log import log as logging -from requests import exceptions as request_exceptions - -from cinder.db import base -from cinder import exception - -old_opts = [ - cfg.StrOpt('nova_catalog_info', - default='compute:Compute Service:publicURL', - help='Match this value when searching for nova in the ' - 'service catalog. Format is: separated values of ' - 'the form: ' - '::', - deprecated_for_removal=True), - cfg.StrOpt('nova_catalog_admin_info', - default='compute:Compute Service:publicURL', - help='Same as nova_catalog_info, but for admin endpoint.', - deprecated_for_removal=True), - cfg.StrOpt('nova_endpoint_template', - help='Override service catalog lookup with template for nova ' - 'endpoint e.g. http://localhost:8774/v2/%(project_id)s', - deprecated_for_removal=True), - cfg.StrOpt('nova_endpoint_admin_template', - help='Same as nova_endpoint_template, but for admin endpoint.', - deprecated_for_removal=True), -] - -nova_opts = [ - cfg.StrOpt('region_name', - help='Name of nova region to use. Useful if keystone manages ' - 'more than one region.', - deprecated_name="os_region_name", - deprecated_group="DEFAULT"), - cfg.StrOpt('interface', - default='public', - choices=['public', 'admin', 'internal'], - help='Type of the nova endpoint to use. This endpoint will ' - 'be looked up in the keystone catalog and should be ' - 'one of public, internal or admin.'), - cfg.StrOpt('token_auth_url', - help='The authentication URL for the nova connection when ' - 'using the current user''s token'), -] - - -NOVA_GROUP = 'nova' -CONF = cfg.CONF - -deprecations = {'cafile': [cfg.DeprecatedOpt('nova_ca_certificates_file')], - 'insecure': [cfg.DeprecatedOpt('nova_api_insecure')]} -nova_session_opts = ks_loading.get_session_conf_options( - deprecated_opts=deprecations) -nova_auth_opts = ks_loading.get_auth_common_conf_options() - -CONF.register_opts(old_opts) -CONF.register_opts(nova_opts, group=NOVA_GROUP) -CONF.register_opts(nova_session_opts, group=NOVA_GROUP) -CONF.register_opts(nova_auth_opts, group=NOVA_GROUP) - -LOG = logging.getLogger(__name__) - -NOVA_API_VERSION = "2.1" - -nova_extensions = [ext for ext in - nova_client.discover_extensions(NOVA_API_VERSION) - if ext.name in ("assisted_volume_snapshots", - "list_extensions", - "server_external_events")] - - -def _get_identity_endpoint_from_sc(context): - # Search for the identity endpoint in the service catalog - for service in context.service_catalog: - if service.get('type') != 'identity': - continue - for endpoint in service['endpoints']: - if (not CONF[NOVA_GROUP].region_name or - endpoint.get('region') == CONF[NOVA_GROUP].region_name): - return endpoint.get(CONF[NOVA_GROUP].interface + 'URL') - raise nova_exceptions.EndpointNotFound() - - -def novaclient(context, privileged_user=False, timeout=None, api_version=None): - """Returns a Nova client - - @param privileged_user: - If True, use the account from configuration - (requires 'auth_type' and the other usual Keystone authentication - options to be set in the [nova] section) - @param timeout: - Number of seconds to wait for an answer before raising a - Timeout exception (None to disable) - @param api_version: - api version of nova - """ - - if privileged_user and CONF[NOVA_GROUP].auth_type: - LOG.debug('Creating Keystone auth plugin from conf') - n_auth = ks_loading.load_auth_from_conf_options(CONF, NOVA_GROUP) - elif privileged_user and CONF.os_privileged_user_name: - # Fall back to the deprecated os_privileged_xxx settings. - # TODO(gyurco): Remove it after Pike. - if CONF.os_privileged_user_auth_url: - url = CONF.os_privileged_user_auth_url - else: - url = _get_identity_endpoint_from_sc(context) - LOG.debug('Creating Keystone password plugin from legacy settings ' - 'using URL: %s', url) - n_auth = identity.Password( - auth_url=url, - username=CONF.os_privileged_user_name, - password=CONF.os_privileged_user_password, - project_name=CONF.os_privileged_user_tenant, - project_domain_id=context.project_domain, - user_domain_id=context.user_domain) - else: - if CONF[NOVA_GROUP].token_auth_url: - url = CONF[NOVA_GROUP].token_auth_url - else: - url = _get_identity_endpoint_from_sc(context) - LOG.debug('Creating Keystone token plugin using URL: %s', url) - n_auth = identity.Token(auth_url=url, - token=context.auth_token, - project_name=context.project_name, - project_domain_id=context.project_domain) - - keystone_session = ks_loading.load_session_from_conf_options( - CONF, - NOVA_GROUP, - auth=n_auth) - - c = nova_client.Client( - api_versions.APIVersion(api_version or NOVA_API_VERSION), - session=keystone_session, - insecure=CONF[NOVA_GROUP].insecure, - timeout=timeout, - region_name=CONF[NOVA_GROUP].region_name, - endpoint_type=CONF[NOVA_GROUP].interface, - cacert=CONF[NOVA_GROUP].cafile, - global_request_id=context.global_id, - extensions=nova_extensions) - - return c - - -class API(base.Base): - """API for interacting with novaclient.""" - - def _get_volume_extended_event(self, server_id, volume_id): - return {'name': 'volume-extended', - 'server_uuid': server_id, - 'tag': volume_id} - - def _send_events(self, context, events, api_version=None): - nova = novaclient(context, privileged_user=True, - api_version=api_version) - try: - response = nova.server_external_events.create(events) - except nova_exceptions.NotFound: - LOG.warning('Nova returned NotFound for events: %s.', events) - except Exception: - LOG.exception('Failed to notify nova on events: %s.', events) - else: - if not isinstance(response, list): - LOG.error('Error response returned from nova: %s.', response) - return - response_error = False - for event in response: - code = event.get('code') - if code is None: - response_error = True - continue - if code != 200: - LOG.warning( - 'Nova event: %s returned with failed status.', event) - else: - LOG.info('Nova event response: %s.', event) - if response_error: - LOG.error('Error response returned from nova: %s.', response) - - def has_extension(self, context, extension, timeout=None): - try: - nova_exts = novaclient(context).list_extensions.show_all() - except request_exceptions.Timeout: - raise exception.APITimeout(service='Nova') - return extension in [e.name for e in nova_exts] - - def update_server_volume(self, context, server_id, src_volid, - new_volume_id): - nova = novaclient(context, privileged_user=True) - nova.volumes.update_server_volume(server_id, - src_volid, - new_volume_id) - - def create_volume_snapshot(self, context, volume_id, create_info): - nova = novaclient(context, privileged_user=True) - - # pylint: disable=E1101 - nova.assisted_volume_snapshots.create( - volume_id, - create_info=create_info) - - def delete_volume_snapshot(self, context, snapshot_id, delete_info): - nova = novaclient(context, privileged_user=True) - - # pylint: disable=E1101 - nova.assisted_volume_snapshots.delete( - snapshot_id, - delete_info=delete_info) - - def get_server(self, context, server_id, privileged_user=False, - timeout=None): - try: - return novaclient(context, privileged_user=privileged_user, - timeout=timeout).servers.get(server_id) - except nova_exceptions.NotFound: - raise exception.ServerNotFound(uuid=server_id) - except request_exceptions.Timeout: - raise exception.APITimeout(service='Nova') - - def extend_volume(self, context, server_ids, volume_id): - api_version = '2.51' - events = [self._get_volume_extended_event(server_id, volume_id) - for server_id in server_ids] - self._send_events(context, events, api_version=api_version) diff --git a/cinder/config/__init__.py b/cinder/config/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/config/cinder-config-generator.conf b/cinder/config/cinder-config-generator.conf deleted file mode 100644 index 6692e0de8..000000000 --- a/cinder/config/cinder-config-generator.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -output_file = etc/cinder/cinder.conf.sample -wrap_width = 79 -namespace = castellan.config -namespace = cinder -namespace = keystonemiddleware.auth_token -namespace = osprofiler -namespace = oslo.config -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.middleware -namespace = oslo.policy -namespace = oslo.reports -namespace = oslo.service.periodic_task -namespace = oslo.service.service -namespace = oslo.service.sslutils -namespace = oslo.service.wsgi -namespace = oslo.versionedobjects diff --git a/cinder/config/generate_cinder_opts.py b/cinder/config/generate_cinder_opts.py deleted file mode 100644 index eb2c95e70..000000000 --- a/cinder/config/generate_cinder_opts.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import os -import subprocess -import textwrap - -from cinder.volume import configuration - -OrderedDict = collections.OrderedDict - -BASEDIR = os.path.split(os.path.realpath(__file__))[0] + "/../../" - - -if __name__ == "__main__": - os.chdir(BASEDIR) - opt_file = open("cinder/opts.py", 'w') - opt_dict = OrderedDict() - dir_trees_list = [] - REGISTER_OPTS_STR = "CONF.register_opts(" - REGISTER_OPT_STR = "CONF.register_opt(" - - license_str = textwrap.dedent( - """ - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - # implied. - # See the License for the specific language governing permissions and - # limitations under the License.\n - """) - opt_file.write(license_str) - - edit_header = textwrap.dedent( - """ - ################################################################### - # WARNING! - # - # Do not edit this file directly. This file should be generated by - # running the command "tox -e genopts" any time a config option - # has been added, changed, or removed. - ###################################################################\n - """) - opt_file.write(edit_header) - - opt_file.write("import itertools\n\n") - - # NOTE(geguileo): We need to register all OVOs before importing any other - # cinder files, otherwise any decorator that uses cinder.objects.YYY will - # fail with exception AttributeError: 'module' object has no attribute - # 'YYY' when running tox -egenconfig - opt_file.write("from cinder import objects\nobjects.register_all()\n\n") - - targetdir = 'cinder' - - common_string = ('find ' + targetdir + ' -type f -name "*.py" ! ' - '-path "*/tests/*" -exec grep -l "%s" {} ' - '+ | sed -e "s|^' + BASEDIR + - '|/|g" | sort -u') - - cmd_opts = common_string % REGISTER_OPTS_STR - output_opts = subprocess.check_output( # nosec : command is hardcoded - '{}'.format(cmd_opts), shell=True, - universal_newlines=True) - dir_trees_list = output_opts.split() - - cmd_opt = common_string % REGISTER_OPT_STR - output_opt = subprocess.check_output( # nosec : command is hardcoded - '{}'.format(cmd_opt), shell=True, - universal_newlines=True) - temp_list = output_opt.split() - - for item in temp_list: - dir_trees_list.append(item) - dir_trees_list.sort() - - flag = False - - def _check_import(aline): - if len(aline) > 79: - new_lines = aline.partition(' as ') - return new_lines - else: - return [aline] - - for atree in dir_trees_list: - - if atree in ["cinder/config/generate_cinder_opts.py", - "cinder/hacking/checks.py", - "cinder/volume/configuration.py", - "cinder/test.py"]: - continue - - dirs_list = atree.split('/') - - import_module = "from " - init_import_module = "" - import_name = "" - - for dir in dirs_list: - if dir.find(".py") == -1: - import_module += dir + "." - init_import_module += dir + "." - import_name += dir + "_" - else: - if dir[:-3] != "__init__": - import_name += dir[:-3].replace("_", "") - import_module = (import_module[:-1] + " import " + - dir[:-3] + " as " + import_name) - lines = _check_import(import_module) - if len(lines) > 1: - opt_file.write(lines[0] + lines[1] + "\\\n") - opt_file.write(" " + lines[2] + "\n") - else: - opt_file.write(lines[0] + "\n") - - else: - import_name = import_name[:-1].replace('/', '.') - init_import = atree[:-12].replace('/', '.') - opt_file.write("import " + init_import + "\n") - - flag = True - if flag is False: - opt_dict[import_name] = atree - else: - opt_dict[init_import_module.strip(".")] = atree - - flag = False - - registered_opts_dict = OrderedDict([('DEFAULT', [])]) - - def _write_item(opts): - list_name = opts[-3:] - if list_name.lower() == "opt": - line_to_write = " [" + opts.strip("\n") + "],\n" - opt_line = _check_line_length(line_to_write) - if len(opt_line) > 1: - opt_file.write(opt_line[0] + opt_line[1] + "\n") - opt_file.write(" " + opt_line[2]) - else: - opt_file.write(opt_line[0]) - else: - line_to_write = " " + opts.strip("\n") + ",\n" - opt_line = _check_line_length(line_to_write) - if len(opt_line) > 1: - opt_file.write(opt_line[0] + opt_line[1] + "\n") - opt_file.write(" " + opt_line[2]) - else: - opt_file.write(opt_line[0]) - - def _retrieve_name(aline): - if REGISTER_OPT_STR in aline: - str_to_replace = REGISTER_OPT_STR - else: - str_to_replace = REGISTER_OPTS_STR - return aline.replace(str_to_replace, "") - - def _check_line_length(aline): - if len(aline) > 79: - temp = aline.split(".") - lines_to_write = [] - - for section in temp: - lines_to_write.append(section) - lines_to_write.append('.') - - return lines_to_write - - else: - return [aline] - - for key in opt_dict: - fd = os.open(opt_dict[key], os.O_RDONLY) - afile = os.fdopen(fd, "r") - - for aline in afile: - exists = aline.find("CONF.register_opt") - if exists != -1: - # TODO(kjnelson) FIX THIS LATER. These are instances where - # CONF.register_opts is happening without actually registering - # real lists of opts - - exists = aline.find('base_san_opts') - if (exists != -1) or (key == 'cinder_volume_configuration'): - continue - - group_exists = aline.find(', group=') - formatted_opt = _retrieve_name(aline[: group_exists]) - formatted_opt = formatted_opt.replace(')', '').strip() - if group_exists != -1: - group_name = aline[group_exists:-1].replace( - ', group=\"\'', '').replace( - ', group=', '').strip( - "\'\")").upper() - - # NOTE(dulek): Hack to resolve constants manually. - if (group_name.endswith('SHARED_CONF_GROUP') - or group_name.lower() == 'backend_defaults'): - group_name = configuration.SHARED_CONF_GROUP - - if group_name in registered_opts_dict: - line = key + "." + formatted_opt - registered_opts_dict[group_name].append(line) - else: - line = key + "." + formatted_opt - registered_opts_dict[group_name] = [line] - else: - line = key + "." + formatted_opt - registered_opts_dict['DEFAULT'].append(line) - - setup_str = ("\n\n" - "def list_opts():\n" - " return [\n") - opt_file.write(setup_str) - - registered_opts_dict = OrderedDict(sorted(registered_opts_dict.items(), - key = lambda x: x[0])) - - for key in registered_opts_dict: - section_start_str = (" ('" + key + "',\n" - " itertools.chain(\n") - opt_file.write(section_start_str) - for item in registered_opts_dict[key]: - _write_item(item) - section_end_str = " )),\n" - opt_file.write(section_end_str) - - closing_str = (" ]\n") - opt_file.write(closing_str) - opt_file.close() diff --git a/cinder/consistencygroup/__init__.py b/cinder/consistencygroup/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/consistencygroup/api.py b/cinder/consistencygroup/api.py deleted file mode 100644 index 8f28caa1a..000000000 --- a/cinder/consistencygroup/api.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to consistency groups. -""" - -import cinder.policy - - -def check_policy(context, action, target_obj=None): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - target_obj = target_obj.fields if target_obj else {} - target.update(target_obj) - _action = 'consistencygroup:%s' % action - cinder.policy.enforce(context, _action, target) diff --git a/cinder/context.py b/cinder/context.py deleted file mode 100644 index babfc43dc..000000000 --- a/cinder/context.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""RequestContext: context for requests that persist through all of cinder.""" - -import copy - -from oslo_config import cfg -from oslo_context import context -from oslo_log import log as logging -from oslo_utils import timeutils -import six - -from cinder.i18n import _ -from cinder import policy - -context_opts = [ - cfg.StrOpt('cinder_internal_tenant_project_id', - help='ID of the project which will be used as the Cinder ' - 'internal tenant.'), - cfg.StrOpt('cinder_internal_tenant_user_id', - help='ID of the user to be used in volume operations as the ' - 'Cinder internal tenant.'), -] - -CONF = cfg.CONF -CONF.register_opts(context_opts) - -LOG = logging.getLogger(__name__) - - -class RequestContext(context.RequestContext): - """Security context and request information. - - Represents the user taking a given action within the system. - - """ - def __init__(self, user_id=None, project_id=None, is_admin=None, - read_deleted="no", project_name=None, remote_address=None, - timestamp=None, quota_class=None, service_catalog=None, - **kwargs): - """Initialize RequestContext. - - :param read_deleted: 'no' indicates deleted records are hidden, 'yes' - indicates deleted records are visible, 'only' indicates that - *only* deleted records are visible. - - :param overwrite: Set to False to ensure that the greenthread local - copy of the index is not overwritten. - """ - # NOTE(jamielennox): oslo.context still uses some old variables names. - # These arguments are maintained instead of passed as kwargs to - # maintain the interface for tests. - kwargs.setdefault('user', user_id) - kwargs.setdefault('tenant', project_id) - - super(RequestContext, self).__init__(is_admin=is_admin, **kwargs) - - self.project_name = project_name - self.read_deleted = read_deleted - self.remote_address = remote_address - if not timestamp: - timestamp = timeutils.utcnow() - elif isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - self.timestamp = timestamp - self.quota_class = quota_class - - if service_catalog: - # Only include required parts of service_catalog - self.service_catalog = [s for s in service_catalog - if s.get('type') in - ('identity', 'compute', 'object-store', - 'image')] - else: - # if list is empty or none - self.service_catalog = [] - - # We need to have RequestContext attributes defined - # when policy.check_is_admin invokes request logging - # to make it loggable. - if self.is_admin is None: - self.is_admin = policy.check_is_admin(self.roles, self) - elif self.is_admin and 'admin' not in self.roles: - self.roles.append('admin') - - def _get_read_deleted(self): - return self._read_deleted - - def _set_read_deleted(self, read_deleted): - if read_deleted not in ('no', 'yes', 'only'): - raise ValueError(_("read_deleted can only be one of 'no', " - "'yes' or 'only', not %r") % read_deleted) - self._read_deleted = read_deleted - - def _del_read_deleted(self): - del self._read_deleted - - read_deleted = property(_get_read_deleted, _set_read_deleted, - _del_read_deleted) - - def to_dict(self): - result = super(RequestContext, self).to_dict() - result['user_id'] = self.user_id - result['project_id'] = self.project_id - result['project_name'] = self.project_name - result['domain'] = self.domain - result['read_deleted'] = self.read_deleted - result['remote_address'] = self.remote_address - result['timestamp'] = self.timestamp.isoformat() - result['quota_class'] = self.quota_class - result['service_catalog'] = self.service_catalog - result['request_id'] = self.request_id - return result - - @classmethod - def from_dict(cls, values): - return cls(user_id=values.get('user_id'), - project_id=values.get('project_id'), - project_name=values.get('project_name'), - domain=values.get('domain'), - read_deleted=values.get('read_deleted'), - remote_address=values.get('remote_address'), - timestamp=values.get('timestamp'), - quota_class=values.get('quota_class'), - service_catalog=values.get('service_catalog'), - request_id=values.get('request_id'), - is_admin=values.get('is_admin'), - roles=values.get('roles'), - auth_token=values.get('auth_token'), - user_domain=values.get('user_domain'), - project_domain=values.get('project_domain')) - - def to_policy_values(self): - policy = super(RequestContext, self).to_policy_values() - - policy['is_admin'] = self.is_admin - - return policy - - def elevated(self, read_deleted=None, overwrite=False): - """Return a version of this context with admin flag set.""" - context = self.deepcopy() - context.is_admin = True - - if 'admin' not in context.roles: - context.roles.append('admin') - - if read_deleted is not None: - context.read_deleted = read_deleted - - return context - - def deepcopy(self): - return copy.deepcopy(self) - - # NOTE(sirp): the openstack/common version of RequestContext uses - # tenant/user whereas the Cinder version uses project_id/user_id. - # NOTE(adrienverge): The Cinder version of RequestContext now uses - # tenant/user internally, so it is compatible with context-aware code from - # openstack/common. We still need this shim for the rest of Cinder's - # code. - @property - def project_id(self): - return self.tenant - - @project_id.setter - def project_id(self, value): - self.tenant = value - - @property - def user_id(self): - return self.user - - @user_id.setter - def user_id(self, value): - self.user = value - - -def get_admin_context(read_deleted="no"): - return RequestContext(user_id=None, - project_id=None, - is_admin=True, - read_deleted=read_deleted, - overwrite=False) - - -def get_internal_tenant_context(): - """Build and return the Cinder internal tenant context object - - This request context will only work for internal Cinder operations. It will - not be able to make requests to remote services. To do so it will need to - use the keystone client to get an auth_token. - """ - project_id = CONF.cinder_internal_tenant_project_id - user_id = CONF.cinder_internal_tenant_user_id - - if project_id and user_id: - return RequestContext(user_id=user_id, - project_id=project_id, - is_admin=True) - else: - LOG.warning('Unable to get internal tenant context: Missing ' - 'required config parameters.') - return None diff --git a/cinder/coordination.py b/cinder/coordination.py deleted file mode 100644 index f347ba089..000000000 --- a/cinder/coordination.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2015 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Coordination and locking utilities.""" - -import inspect -import uuid - -import decorator -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -from tooz import coordination - -from cinder import exception -from cinder.i18n import _ - -LOG = log.getLogger(__name__) - -coordination_opts = [ - cfg.StrOpt('backend_url', - default='file://$state_path', - help='The backend URL to use for distributed coordination.'), - cfg.FloatOpt('heartbeat', - default=1.0, - help='Number of seconds between heartbeats for distributed ' - 'coordination. No longer used since distributed ' - 'coordination manages its heartbeat internally.', - deprecated_for_removal=True, - deprecated_reason='This option is no longer used.', - deprecated_since='11.0.0'), - cfg.FloatOpt('initial_reconnect_backoff', - default=0.1, - help='Initial number of seconds to wait after failed ' - 'reconnection. No longer used since distributed ' - 'coordination manages its heartbeat internally.', - deprecated_for_removal=True, - deprecated_reason='This option is no longer used.', - deprecated_since='11.0.0'), - cfg.FloatOpt('max_reconnect_backoff', - default=60.0, - help='Maximum number of seconds between sequential ' - 'reconnection retries. No longer used since ' - 'distributed coordination manages its heartbeat ' - 'internally.', - deprecated_for_removal=True, - deprecated_reason='This option is no longer used.', - deprecated_since='11.0.0'), -] - -CONF = cfg.CONF -CONF.register_opts(coordination_opts, group='coordination') - - -class Coordinator(object): - """Tooz coordination wrapper. - - Coordination member id is created from concatenated - `prefix` and `agent_id` parameters. - - :param str agent_id: Agent identifier - :param str prefix: Used to provide member identifier with a - meaningful prefix. - """ - - def __init__(self, agent_id=None, prefix=''): - self.coordinator = None - self.agent_id = agent_id or str(uuid.uuid4()) - self.started = False - self.prefix = prefix - - def start(self): - if self.started: - return - - # NOTE(bluex): Tooz expects member_id as a byte string. - member_id = (self.prefix + self.agent_id).encode('ascii') - self.coordinator = coordination.get_coordinator( - cfg.CONF.coordination.backend_url, member_id) - self.coordinator.start(start_heart=True) - self.started = True - - def stop(self): - """Disconnect from coordination backend and stop heartbeat.""" - if self.started: - self.coordinator.stop() - self.coordinator = None - self.started = False - - def get_lock(self, name): - """Return a Tooz backend lock. - - :param str name: The lock name that is used to identify it - across all nodes. - """ - # NOTE(bluex): Tooz expects lock name as a byte string. - lock_name = (self.prefix + name).encode('ascii') - if self.coordinator is not None: - return self.coordinator.get_lock(lock_name) - else: - raise exception.LockCreationFailed(_('Coordinator uninitialized.')) - - -COORDINATOR = Coordinator(prefix='cinder-') - - -def synchronized(lock_name, blocking=True, coordinator=COORDINATOR): - """Synchronization decorator. - - :param str lock_name: Lock name. - :param blocking: If True, blocks until the lock is acquired. - If False, raises exception when not acquired. Otherwise, - the value is used as a timeout value and if lock is not acquired - after this number of seconds exception is raised. - :param coordinator: Coordinator class to use when creating lock. - Defaults to the global coordinator. - :raises tooz.coordination.LockAcquireFailed: if lock is not acquired - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one process will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - - Lock name can be formatted using Python format string syntax:: - - @synchronized('{f_name}-{vol.id}-{snap[name]}') - def foo(self, vol, snap): - ... - - Available field names are: decorated function parameters and - `f_name` as a decorated function name. - """ - - @decorator.decorator - def _synchronized(f, *a, **k): - call_args = inspect.getcallargs(f, *a, **k) - call_args['f_name'] = f.__name__ - lock = coordinator.get_lock(lock_name.format(**call_args)) - t1 = timeutils.now() - t2 = None - try: - with lock(blocking): - t2 = timeutils.now() - LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' - 'waited %(wait_secs)0.3fs', - {'name': lock.name, - 'function': f.__name__, - 'wait_secs': (t2 - t1)}) - return f(*a, **k) - finally: - t3 = timeutils.now() - if t2 is None: - held_secs = "N/A" - else: - held_secs = "%0.3fs" % (t3 - t2) - LOG.debug('Lock "%(name)s" released by "%(function)s" :: held ' - '%(held_secs)s', - {'name': lock.name, - 'function': f.__name__, - 'held_secs': held_secs}) - - return _synchronized diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py deleted file mode 100644 index 0fa3ac6bb..000000000 --- a/cinder/db/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -DB abstraction for Cinder -""" - -from cinder.db.api import * # noqa diff --git a/cinder/db/api.py b/cinder/db/api.py deleted file mode 100644 index a3426380b..000000000 --- a/cinder/db/api.py +++ /dev/null @@ -1,1865 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Defines interface for DB access. - -Functions in this module are imported into the cinder.db namespace. Call these -functions from cinder.db namespace, not the cinder.db.api namespace. - -All functions in this module return objects that implement a dictionary-like -interface. Currently, many of these objects are sqlalchemy objects that -implement a dictionary interface. However, a future goal is to have all of -these objects be simple dictionaries. - - -**Related Flags** - -:connection: string specifying the sqlalchemy connection to use, like: - `sqlite:///var/lib/cinder/cinder.sqlite`. - -:enable_new_services: when adding a new service to the database, is it in the - pool of available hardware (Default: True) - -""" - -from oslo_config import cfg -from oslo_db import api as oslo_db_api -from oslo_db import options as db_options - -from cinder.api import common -from cinder.common import constants -from cinder.i18n import _ - -db_opts = [ - cfg.BoolOpt('enable_new_services', - default=True, - help='Services to be added to the available pool on create'), - cfg.StrOpt('volume_name_template', - default='volume-%s', - help='Template string to be used to generate volume names'), - cfg.StrOpt('snapshot_name_template', - default='snapshot-%s', - help='Template string to be used to generate snapshot names'), - cfg.StrOpt('backup_name_template', - default='backup-%s', - help='Template string to be used to generate backup names'), ] - - -CONF = cfg.CONF -CONF.register_opts(db_opts) -db_options.set_defaults(CONF) - -_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'} - - -IMPL = oslo_db_api.DBAPI.from_config(conf=CONF, - backend_mapping=_BACKEND_MAPPING, - lazy=True) - -# The maximum value a signed INT type may have -MAX_INT = constants.DB_MAX_INT - - -################### - -def dispose_engine(): - """Force the engine to establish new connections.""" - - # FIXME(jdg): When using sqlite if we do the dispose - # we seem to lose our DB here. Adding this check - # means we don't do the dispose, but we keep our sqlite DB - # This likely isn't the best way to handle this - - if 'sqlite' not in IMPL.get_engine().name: - return IMPL.dispose_engine() - else: - return - - -################### - - -def service_destroy(context, service_id): - """Destroy the service or raise if it does not exist.""" - return IMPL.service_destroy(context, service_id) - - -def service_get(context, service_id=None, backend_match_level=None, **filters): - """Get a service that matches the criteria. - - A possible filter is is_up=True and it will filter nodes that are down. - - :param service_id: Id of the service. - :param filters: Filters for the query in the form of key/value. - :param backend_match_level: 'pool', 'backend', or 'host' for host and - cluster filters (as defined in _filter_host - method) - :raise ServiceNotFound: If service doesn't exist. - """ - return IMPL.service_get(context, service_id, backend_match_level, - **filters) - - -def service_get_all(context, backend_match_level=None, **filters): - """Get all services that match the criteria. - - A possible filter is is_up=True and it will filter nodes that are down, - as well as host_or_cluster, that lets you look for services using both - of these properties. - - :param filters: Filters for the query in the form of key/value arguments. - :param backend_match_level: 'pool', 'backend', or 'host' for host and - cluster filters (as defined in _filter_host - method) - """ - return IMPL.service_get_all(context, backend_match_level, **filters) - - -def service_create(context, values): - """Create a service from the values dictionary.""" - return IMPL.service_create(context, values) - - -def service_update(context, service_id, values): - """Set the given properties on an service and update it. - - Raises NotFound if service does not exist. - """ - return IMPL.service_update(context, service_id, values) - - -############### - - -def is_backend_frozen(context, host, cluster_name): - """Check if a storage backend is frozen based on host and cluster_name.""" - return IMPL.is_backend_frozen(context, host, cluster_name) - - -############### - - -def cluster_get(context, id=None, is_up=None, get_services=False, - services_summary=False, read_deleted='no', - name_match_level=None, **filters): - """Get a cluster that matches the criteria. - - :param id: Id of the cluster. - :param is_up: Boolean value to filter based on the cluster's up status. - :param get_services: If we want to load all services from this cluster. - :param services_summary: If we want to load num_hosts and - num_down_hosts fields. - :param read_deleted: Filtering based on delete status. Default value is - "no". - :param name_match_level: 'pool', 'backend', or 'host' for name filter (as - defined in _filter_host method) - :param filters: Field based filters in the form of key/value. - :raise ClusterNotFound: If cluster doesn't exist. - """ - return IMPL.cluster_get(context, id, is_up, get_services, services_summary, - read_deleted, name_match_level, **filters) - - -def cluster_get_all(context, is_up=None, get_services=False, - services_summary=False, read_deleted='no', - name_match_level=None, **filters): - """Get all clusters that match the criteria. - - :param is_up: Boolean value to filter based on the cluster's up status. - :param get_services: If we want to load all services from this cluster. - :param services_summary: If we want to load num_hosts and - num_down_hosts fields. - :param read_deleted: Filtering based on delete status. Default value is - "no". - :param name_match_level: 'pool', 'backend', or 'host' for name filter (as - defined in _filter_host method) - :param filters: Field based filters in the form of key/value. - """ - return IMPL.cluster_get_all(context, is_up, get_services, services_summary, - read_deleted, name_match_level, **filters) - - -def cluster_create(context, values): - """Create a cluster from the values dictionary.""" - return IMPL.cluster_create(context, values) - - -def cluster_update(context, id, values): - """Set the given properties on an cluster and update it. - - Raises ClusterNotFound if cluster does not exist. - """ - return IMPL.cluster_update(context, id, values) - - -def cluster_destroy(context, id): - """Destroy the cluster or raise if it does not exist or has hosts. - - :raise ClusterNotFound: If cluster doesn't exist. - """ - return IMPL.cluster_destroy(context, id) - - -############### - - -def volume_attach(context, values): - """Attach a volume.""" - return IMPL.volume_attach(context, values) - - -def volume_attached(context, volume_id, instance_id, host_name, mountpoint, - attach_mode='rw'): - """Ensure that a volume is set as attached.""" - return IMPL.volume_attached(context, volume_id, instance_id, host_name, - mountpoint, attach_mode) - - -def volume_create(context, values): - """Create a volume from the values dictionary.""" - return IMPL.volume_create(context, values) - - -def volume_data_get_for_host(context, host, count_only=False): - """Get (volume_count, gigabytes) for project.""" - return IMPL.volume_data_get_for_host(context, - host, - count_only) - - -def volume_data_get_for_project(context, project_id): - """Get (volume_count, gigabytes) for project.""" - return IMPL.volume_data_get_for_project(context, project_id) - - -def volume_destroy(context, volume_id): - """Destroy the volume or raise if it does not exist.""" - return IMPL.volume_destroy(context, volume_id) - - -def volume_detached(context, volume_id, attachment_id): - """Ensure that a volume is set as detached.""" - return IMPL.volume_detached(context, volume_id, attachment_id) - - -def volume_get(context, volume_id): - """Get a volume or raise if it does not exist.""" - return IMPL.volume_get(context, volume_id) - - -def volume_get_all(context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - """Get all volumes.""" - return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, filters=filters, - offset=offset) - - -def volume_get_all_by_host(context, host, filters=None): - """Get all volumes belonging to a host.""" - return IMPL.volume_get_all_by_host(context, host, filters=filters) - - -def volume_get_all_by_group(context, group_id, filters=None): - """Get all volumes belonging to a consistency group.""" - return IMPL.volume_get_all_by_group(context, group_id, filters=filters) - - -def volume_get_all_by_generic_group(context, group_id, filters=None): - """Get all volumes belonging to a generic volume group.""" - return IMPL.volume_get_all_by_generic_group(context, group_id, - filters=filters) - - -def volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Get all volumes belonging to a project.""" - return IMPL.volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - - -def get_volume_summary(context, project_only): - """Get volume summary.""" - return IMPL.get_volume_summary(context, project_only) - - -def volume_update(context, volume_id, values): - """Set the given properties on a volume and update it. - - Raises NotFound if volume does not exist. - - """ - return IMPL.volume_update(context, volume_id, values) - - -def volumes_update(context, values_list): - """Set the given properties on a list of volumes and update them. - - Raises NotFound if a volume does not exist. - """ - return IMPL.volumes_update(context, values_list) - - -def volume_include_in_cluster(context, cluster, partial_rename=True, - **filters): - """Include all volumes matching the filters into a cluster. - - When partial_rename is set we will not set the cluster_name with cluster - parameter value directly, we'll replace provided cluster_name or host - filter value with cluster instead. - - This is useful when we want to replace just the cluster name but leave - the backend and pool information as it is. If we are using cluster_name - to filter, we'll use that same DB field to replace the cluster value and - leave the rest as it is. Likewise if we use the host to filter. - - Returns the number of volumes that have been changed. - """ - return IMPL.volume_include_in_cluster(context, cluster, partial_rename, - **filters) - - -def volume_attachment_update(context, attachment_id, values): - return IMPL.volume_attachment_update(context, attachment_id, values) - - -def volume_attachment_get(context, attachment_id): - return IMPL.volume_attachment_get(context, attachment_id) - - -def volume_attachment_get_all_by_volume_id(context, volume_id, - session=None): - return IMPL.volume_attachment_get_all_by_volume_id(context, - volume_id, - session) - - -def volume_attachment_get_all_by_host(context, host, filters=None): - # FIXME(jdg): Not using filters - return IMPL.volume_attachment_get_all_by_host(context, host) - - -def volume_attachment_get_all_by_instance_uuid(context, - instance_uuid, filters=None): - # FIXME(jdg): Not using filters - return IMPL.volume_attachment_get_all_by_instance_uuid(context, - instance_uuid) - - -def volume_attachment_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - return IMPL.volume_attachment_get_all(context, filters, marker, limit, - offset, sort_keys, sort_dirs) - - -def volume_attachment_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - return IMPL.volume_attachment_get_all_by_project(context, project_id, - filters, marker, limit, - offset, sort_keys, - sort_dirs) - - -def attachment_destroy(context, attachment_id): - """Destroy the attachment or raise if it does not exist.""" - return IMPL.attachment_destroy(context, attachment_id) - - -def volume_update_status_based_on_attachment(context, volume_id): - """Update volume status according to attached instance id""" - return IMPL.volume_update_status_based_on_attachment(context, volume_id) - - -def volume_has_snapshots_filter(): - return IMPL.volume_has_snapshots_filter() - - -def volume_has_undeletable_snapshots_filter(): - return IMPL.volume_has_undeletable_snapshots_filter() - - -def volume_has_snapshots_in_a_cgsnapshot_filter(): - return IMPL.volume_has_snapshots_in_a_cgsnapshot_filter() - - -def volume_has_attachments_filter(): - return IMPL.volume_has_attachments_filter() - - -def volume_qos_allows_retype(new_vol_type): - return IMPL.volume_qos_allows_retype(new_vol_type) - - -def volume_has_other_project_snp_filter(): - return IMPL.volume_has_other_project_snp_filter() - - -#################### - - -def snapshot_create(context, values): - """Create a snapshot from the values dictionary.""" - return IMPL.snapshot_create(context, values) - - -def snapshot_destroy(context, snapshot_id): - """Destroy the snapshot or raise if it does not exist.""" - return IMPL.snapshot_destroy(context, snapshot_id) - - -def snapshot_get(context, snapshot_id): - """Get a snapshot or raise if it does not exist.""" - return IMPL.snapshot_get(context, snapshot_id) - - -def snapshot_get_all(context, filters=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - """Get all snapshots.""" - return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys, - sort_dirs, offset) - - -def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None): - """Get all snapshots belonging to a project.""" - return IMPL.snapshot_get_all_by_project(context, project_id, filters, - marker, limit, sort_keys, - sort_dirs, offset) - - -def snapshot_get_all_by_host(context, host, filters=None): - """Get all snapshots belonging to a host. - - :param host: Include include snapshots only for specified host. - :param filters: Filters for the query in the form of key/value. - """ - return IMPL.snapshot_get_all_by_host(context, host, filters) - - -def snapshot_get_all_for_cgsnapshot(context, project_id): - """Get all snapshots belonging to a cgsnapshot.""" - return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id) - - -def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): - """Get all snapshots belonging to a group snapshot.""" - return IMPL.snapshot_get_all_for_group_snapshot(context, group_snapshot_id) - - -def snapshot_get_all_for_volume(context, volume_id): - """Get all snapshots for a volume.""" - return IMPL.snapshot_get_all_for_volume(context, volume_id) - - -def snapshot_get_latest_for_volume(context, volume_id): - """Get latest snapshot for a volume""" - return IMPL.snapshot_get_latest_for_volume(context, volume_id) - - -def snapshot_update(context, snapshot_id, values): - """Set the given properties on an snapshot and update it. - - Raises NotFound if snapshot does not exist. - - """ - return IMPL.snapshot_update(context, snapshot_id, values) - - -def snapshot_data_get_for_project(context, project_id, volume_type_id=None): - """Get count and gigabytes used for snapshots for specified project.""" - return IMPL.snapshot_data_get_for_project(context, - project_id, - volume_type_id) - - -def snapshot_get_all_active_by_window(context, begin, end=None, - project_id=None): - """Get all the snapshots inside the window. - - Specifying a project_id will filter for a certain project. - """ - return IMPL.snapshot_get_all_active_by_window(context, begin, end, - project_id) - - -#################### - - -def snapshot_metadata_get(context, snapshot_id): - """Get all metadata for a snapshot.""" - return IMPL.snapshot_metadata_get(context, snapshot_id) - - -def snapshot_metadata_delete(context, snapshot_id, key): - """Delete the given metadata item.""" - return IMPL.snapshot_metadata_delete(context, snapshot_id, key) - - -def snapshot_metadata_update(context, snapshot_id, metadata, delete): - """Update metadata if it exists, otherwise create it.""" - return IMPL.snapshot_metadata_update(context, snapshot_id, - metadata, delete) - - -#################### - - -def volume_metadata_get(context, volume_id): - """Get all metadata for a volume.""" - return IMPL.volume_metadata_get(context, volume_id) - - -def volume_metadata_delete(context, volume_id, key, - meta_type=common.METADATA_TYPES.user): - """Delete the given metadata item.""" - return IMPL.volume_metadata_delete(context, volume_id, - key, meta_type) - - -def volume_metadata_update(context, volume_id, metadata, - delete, meta_type=common.METADATA_TYPES.user): - """Update metadata if it exists, otherwise create it.""" - return IMPL.volume_metadata_update(context, volume_id, metadata, - delete, meta_type) - - -################## - - -def volume_admin_metadata_get(context, volume_id): - """Get all administration metadata for a volume.""" - return IMPL.volume_admin_metadata_get(context, volume_id) - - -def volume_admin_metadata_delete(context, volume_id, key): - """Delete the given metadata item.""" - return IMPL.volume_admin_metadata_delete(context, volume_id, key) - - -def volume_admin_metadata_update(context, volume_id, metadata, delete, - add=True, update=True): - """Update metadata if it exists, otherwise create it.""" - return IMPL.volume_admin_metadata_update(context, volume_id, metadata, - delete, add, update) - - -################## - - -def volume_type_create(context, values, projects=None): - """Create a new volume type.""" - return IMPL.volume_type_create(context, values, projects) - - -def volume_type_update(context, volume_type_id, values): - return IMPL.volume_type_update(context, volume_type_id, values) - - -def volume_type_get_all(context, inactive=False, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Get all volume types. - - :param context: context to query under - :param inactive: Include inactive volume types to the result set - :param filters: Filters for the query in the form of key/value. - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param list_result: For compatibility, if list_result = True, return a list - instead of dict. - - :is_public: Filter volume types based on visibility: - - * **True**: List public volume types only - * **False**: List private volume types only - * **None**: List both public and private volume types - - :returns: list/dict of matching volume types - """ - - return IMPL.volume_type_get_all(context, inactive, filters, marker=marker, - limit=limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, offset=offset, - list_result=list_result) - - -def volume_type_get(context, id, inactive=False, expected_fields=None): - """Get volume type by id. - - :param context: context to query under - :param id: Volume type id to get. - :param inactive: Consider inactive volume types when searching - :param expected_fields: Return those additional fields. - Supported fields are: projects. - :returns: volume type - """ - return IMPL.volume_type_get(context, id, inactive, expected_fields) - - -def volume_type_get_by_name(context, name): - """Get volume type by name.""" - return IMPL.volume_type_get_by_name(context, name) - - -def volume_types_get_by_name_or_id(context, volume_type_list): - """Get volume types by name or id.""" - return IMPL.volume_types_get_by_name_or_id(context, volume_type_list) - - -def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): - """Get volume types that are associated with specific qos specs.""" - return IMPL.volume_type_qos_associations_get(context, - qos_specs_id, - inactive) - - -def volume_type_qos_associate(context, type_id, qos_specs_id): - """Associate a volume type with specific qos specs.""" - return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id) - - -def volume_type_qos_disassociate(context, qos_specs_id, type_id): - """Disassociate a volume type from specific qos specs.""" - return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id) - - -def volume_type_qos_disassociate_all(context, qos_specs_id): - """Disassociate all volume types from specific qos specs.""" - return IMPL.volume_type_qos_disassociate_all(context, - qos_specs_id) - - -def volume_type_qos_specs_get(context, type_id): - """Get all qos specs for given volume type.""" - return IMPL.volume_type_qos_specs_get(context, type_id) - - -def volume_type_destroy(context, id): - """Delete a volume type.""" - return IMPL.volume_type_destroy(context, id) - - -def volume_get_all_active_by_window(context, begin, end=None, project_id=None): - """Get all the volumes inside the window. - - Specifying a project_id will filter for a certain project. - """ - return IMPL.volume_get_all_active_by_window(context, begin, end, - project_id) - - -def volume_type_access_get_all(context, type_id): - """Get all volume type access of a volume type.""" - return IMPL.volume_type_access_get_all(context, type_id) - - -def volume_type_access_add(context, type_id, project_id): - """Add volume type access for project.""" - return IMPL.volume_type_access_add(context, type_id, project_id) - - -def volume_type_access_remove(context, type_id, project_id): - """Remove volume type access for project.""" - return IMPL.volume_type_access_remove(context, type_id, project_id) - - -#################### - - -def group_type_create(context, values, projects=None): - """Create a new group type.""" - return IMPL.group_type_create(context, values, projects) - - -def group_type_update(context, group_type_id, values): - return IMPL.group_type_update(context, group_type_id, values) - - -def group_type_get_all(context, inactive=False, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Get all group types. - - :param context: context to query under - :param inactive: Include inactive group types to the result set - :param filters: Filters for the query in the form of key/value. - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param list_result: For compatibility, if list_result = True, return a list - instead of dict. - - :is_public: Filter group types based on visibility: - - * **True**: List public group types only - * **False**: List private group types only - * **None**: List both public and private group types - - :returns: list/dict of matching group types - """ - - return IMPL.group_type_get_all(context, inactive, filters, marker=marker, - limit=limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, offset=offset, - list_result=list_result) - - -def group_type_get(context, id, inactive=False, expected_fields=None): - """Get group type by id. - - :param context: context to query under - :param id: Group type id to get. - :param inactive: Consider inactive group types when searching - :param expected_fields: Return those additional fields. - Supported fields are: projects. - :returns: group type - """ - return IMPL.group_type_get(context, id, inactive, expected_fields) - - -def group_type_get_by_name(context, name): - """Get group type by name.""" - return IMPL.group_type_get_by_name(context, name) - - -def group_types_get_by_name_or_id(context, group_type_list): - """Get group types by name or id.""" - return IMPL.group_types_get_by_name_or_id(context, group_type_list) - - -def group_type_destroy(context, id): - """Delete a group type.""" - return IMPL.group_type_destroy(context, id) - - -def group_type_access_get_all(context, type_id): - """Get all group type access of a group type.""" - return IMPL.group_type_access_get_all(context, type_id) - - -def group_type_access_add(context, type_id, project_id): - """Add group type access for project.""" - return IMPL.group_type_access_add(context, type_id, project_id) - - -def group_type_access_remove(context, type_id, project_id): - """Remove group type access for project.""" - return IMPL.group_type_access_remove(context, type_id, project_id) - - -def volume_type_get_all_by_group(context, group_id): - """Get all volumes in a group.""" - return IMPL.volume_type_get_all_by_group(context, group_id) - - -#################### - - -def volume_type_extra_specs_get(context, volume_type_id): - """Get all extra specs for a volume type.""" - return IMPL.volume_type_extra_specs_get(context, volume_type_id) - - -def volume_type_extra_specs_delete(context, volume_type_id, key): - """Delete the given extra specs item.""" - return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) - - -def volume_type_extra_specs_update_or_create(context, - volume_type_id, - extra_specs): - """Create or update volume type extra specs. - - This adds or modifies the key/value pairs specified in the extra specs dict - argument. - """ - return IMPL.volume_type_extra_specs_update_or_create(context, - volume_type_id, - extra_specs) - - -################### - - -def group_type_specs_get(context, group_type_id): - """Get all group specs for a group type.""" - return IMPL.group_type_specs_get(context, group_type_id) - - -def group_type_specs_delete(context, group_type_id, key): - """Delete the given group specs item.""" - return IMPL.group_type_specs_delete(context, group_type_id, key) - - -def group_type_specs_update_or_create(context, - group_type_id, - group_specs): - """Create or update group type specs. - - This adds or modifies the key/value pairs specified in the group specs dict - argument. - """ - return IMPL.group_type_specs_update_or_create(context, - group_type_id, - group_specs) - - -################### - - -def volume_type_encryption_get(context, volume_type_id, session=None): - return IMPL.volume_type_encryption_get(context, volume_type_id, session) - - -def volume_type_encryption_delete(context, volume_type_id): - return IMPL.volume_type_encryption_delete(context, volume_type_id) - - -def volume_type_encryption_create(context, volume_type_id, encryption_specs): - return IMPL.volume_type_encryption_create(context, volume_type_id, - encryption_specs) - - -def volume_type_encryption_update(context, volume_type_id, encryption_specs): - return IMPL.volume_type_encryption_update(context, volume_type_id, - encryption_specs) - - -def volume_type_encryption_volume_get(context, volume_type_id, session=None): - return IMPL.volume_type_encryption_volume_get(context, volume_type_id, - session) - - -def volume_encryption_metadata_get(context, volume_id, session=None): - return IMPL.volume_encryption_metadata_get(context, volume_id, session) - - -################### - - -def qos_specs_create(context, values): - """Create a qos_specs.""" - return IMPL.qos_specs_create(context, values) - - -def qos_specs_get(context, qos_specs_id): - """Get all specification for a given qos_specs.""" - return IMPL.qos_specs_get(context, qos_specs_id) - - -def qos_specs_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Get all qos_specs.""" - return IMPL.qos_specs_get_all(context, filters=filters, marker=marker, - limit=limit, offset=offset, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -def qos_specs_get_by_name(context, name): - """Get all specification for a given qos_specs.""" - return IMPL.qos_specs_get_by_name(context, name) - - -def qos_specs_associations_get(context, qos_specs_id): - """Get all associated volume types for a given qos_specs.""" - return IMPL.qos_specs_associations_get(context, qos_specs_id) - - -def qos_specs_associate(context, qos_specs_id, type_id): - """Associate qos_specs from volume type.""" - return IMPL.qos_specs_associate(context, qos_specs_id, type_id) - - -def qos_specs_disassociate(context, qos_specs_id, type_id): - """Disassociate qos_specs from volume type.""" - return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id) - - -def qos_specs_disassociate_all(context, qos_specs_id): - """Disassociate qos_specs from all entities.""" - return IMPL.qos_specs_disassociate_all(context, qos_specs_id) - - -def qos_specs_delete(context, qos_specs_id): - """Delete the qos_specs.""" - return IMPL.qos_specs_delete(context, qos_specs_id) - - -def qos_specs_item_delete(context, qos_specs_id, key): - """Delete specified key in the qos_specs.""" - return IMPL.qos_specs_item_delete(context, qos_specs_id, key) - - -def qos_specs_update(context, qos_specs_id, specs): - """Update qos specs. - - This adds or modifies the key/value pairs specified in the - specs dict argument for a given qos_specs. - """ - return IMPL.qos_specs_update(context, qos_specs_id, specs) - - -################### - - -def volume_glance_metadata_create(context, volume_id, key, value): - """Update the Glance metadata for the specified volume.""" - return IMPL.volume_glance_metadata_create(context, - volume_id, - key, - value) - - -def volume_glance_metadata_bulk_create(context, volume_id, metadata): - """Add Glance metadata for specified volume (multiple pairs).""" - return IMPL.volume_glance_metadata_bulk_create(context, volume_id, - metadata) - - -def volume_glance_metadata_get_all(context): - """Return the glance metadata for all volumes.""" - return IMPL.volume_glance_metadata_get_all(context) - - -def volume_glance_metadata_get(context, volume_id): - """Return the glance metadata for a volume.""" - return IMPL.volume_glance_metadata_get(context, volume_id) - - -def volume_glance_metadata_list_get(context, volume_id_list): - """Return the glance metadata for a volume list.""" - return IMPL.volume_glance_metadata_list_get(context, volume_id_list) - - -def volume_snapshot_glance_metadata_get(context, snapshot_id): - """Return the Glance metadata for the specified snapshot.""" - return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) - - -def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): - """Update the Glance metadata for a snapshot. - - This will copy all of the key:value pairs from the originating volume, - to ensure that a volume created from the snapshot will retain the - original metadata. - """ - return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, - volume_id) - - -def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): - """Update the Glance metadata from a volume (created from a snapshot). - - This will copy all of the key:value pairs from the originating snapshot, - to ensure that the Glance metadata from the original volume is retained. - """ - return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, - snapshot_id) - - -def volume_glance_metadata_delete_by_volume(context, volume_id): - """Delete the glance metadata for a volume.""" - return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) - - -def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): - """Delete the glance metadata for a snapshot.""" - return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) - - -def volume_glance_metadata_copy_from_volume_to_volume(context, - src_volume_id, - volume_id): - """Update the Glance metadata for a volume. - - Update the Glance metadata for a volume by copying all of the key:value - pairs from the originating volume. - - This is so that a volume created from the volume (clone) will retain the - original metadata. - """ - return IMPL.volume_glance_metadata_copy_from_volume_to_volume( - context, - src_volume_id, - volume_id) - - -################### - - -def quota_create(context, project_id, resource, limit, allocated=0): - """Create a quota for the given project and resource.""" - return IMPL.quota_create(context, project_id, resource, limit, - allocated=allocated) - - -def quota_get(context, project_id, resource): - """Retrieve a quota or raise if it does not exist.""" - return IMPL.quota_get(context, project_id, resource) - - -def quota_get_all_by_project(context, project_id): - """Retrieve all quotas associated with a given project.""" - return IMPL.quota_get_all_by_project(context, project_id) - - -def quota_allocated_get_all_by_project(context, project_id): - """Retrieve all allocated quotas associated with a given project.""" - return IMPL.quota_allocated_get_all_by_project(context, project_id) - - -def quota_allocated_update(context, project_id, - resource, allocated): - """Update allocated quota to subprojects or raise if it does not exist. - - :raises cinder.exception.ProjectQuotaNotFound: - """ - return IMPL.quota_allocated_update(context, project_id, - resource, allocated) - - -def quota_update(context, project_id, resource, limit): - """Update a quota or raise if it does not exist.""" - return IMPL.quota_update(context, project_id, resource, limit) - - -def quota_update_resource(context, old_res, new_res): - """Update resource of quotas.""" - return IMPL.quota_update_resource(context, old_res, new_res) - - -def quota_destroy(context, project_id, resource): - """Destroy the quota or raise if it does not exist.""" - return IMPL.quota_destroy(context, project_id, resource) - - -################### - - -def quota_class_create(context, class_name, resource, limit): - """Create a quota class for the given name and resource.""" - return IMPL.quota_class_create(context, class_name, resource, limit) - - -def quota_class_get(context, class_name, resource): - """Retrieve a quota class or raise if it does not exist.""" - return IMPL.quota_class_get(context, class_name, resource) - - -def quota_class_get_defaults(context): - """Retrieve all default quotas.""" - return IMPL.quota_class_get_defaults(context) - - -def quota_class_get_all_by_name(context, class_name): - """Retrieve all quotas associated with a given quota class.""" - return IMPL.quota_class_get_all_by_name(context, class_name) - - -def quota_class_update(context, class_name, resource, limit): - """Update a quota class or raise if it does not exist.""" - return IMPL.quota_class_update(context, class_name, resource, limit) - - -def quota_class_update_resource(context, resource, new_resource): - """Update resource name in quota_class.""" - return IMPL.quota_class_update_resource(context, resource, new_resource) - - -def quota_class_destroy(context, class_name, resource): - """Destroy the quota class or raise if it does not exist.""" - return IMPL.quota_class_destroy(context, class_name, resource) - - -def quota_class_destroy_all_by_name(context, class_name): - """Destroy all quotas associated with a given quota class.""" - return IMPL.quota_class_destroy_all_by_name(context, class_name) - - -################### - - -def quota_usage_get(context, project_id, resource): - """Retrieve a quota usage or raise if it does not exist.""" - return IMPL.quota_usage_get(context, project_id, resource) - - -def quota_usage_get_all_by_project(context, project_id): - """Retrieve all usage associated with a given resource.""" - return IMPL.quota_usage_get_all_by_project(context, project_id) - - -################### - - -def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None, - is_allocated_reserve=False): - """Check quotas and create appropriate reservations.""" - return IMPL.quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=project_id, - is_allocated_reserve=is_allocated_reserve) - - -def reservation_commit(context, reservations, project_id=None): - """Commit quota reservations.""" - return IMPL.reservation_commit(context, reservations, - project_id=project_id) - - -def reservation_rollback(context, reservations, project_id=None): - """Roll back quota reservations.""" - return IMPL.reservation_rollback(context, reservations, - project_id=project_id) - - -def quota_destroy_by_project(context, project_id): - """Destroy all quotas associated with a given project.""" - return IMPL.quota_destroy_by_project(context, project_id) - - -def reservation_expire(context): - """Roll back any expired reservations.""" - return IMPL.reservation_expire(context) - - -def quota_usage_update_resource(context, old_res, new_res): - """Update resource field in quota_usages.""" - return IMPL.quota_usage_update_resource(context, old_res, new_res) - - -################### - - -def backup_get(context, backup_id, read_deleted=None, project_only=True): - """Get a backup or raise if it does not exist.""" - return IMPL.backup_get(context, backup_id, read_deleted, project_only) - - -def backup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Get all backups.""" - return IMPL.backup_get_all(context, filters=filters, marker=marker, - limit=limit, offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def backup_get_all_by_host(context, host): - """Get all backups belonging to a host.""" - return IMPL.backup_get_all_by_host(context, host) - - -def backup_create(context, values): - """Create a backup from the values dictionary.""" - return IMPL.backup_create(context, values) - - -def backup_get_all_by_project(context, project_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - """Get all backups belonging to a project.""" - return IMPL.backup_get_all_by_project(context, project_id, - filters=filters, marker=marker, - limit=limit, offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def backup_get_all_by_volume(context, volume_id, filters=None): - """Get all backups belonging to a volume.""" - return IMPL.backup_get_all_by_volume(context, volume_id, - filters=filters) - - -def backup_get_all_active_by_window(context, begin, end=None, project_id=None): - """Get all the backups inside the window. - - Specifying a project_id will filter for a certain project. - """ - return IMPL.backup_get_all_active_by_window(context, begin, end, - project_id) - - -def backup_update(context, backup_id, values): - """Set the given properties on a backup and update it. - - Raises NotFound if backup does not exist. - """ - return IMPL.backup_update(context, backup_id, values) - - -def backup_destroy(context, backup_id): - """Destroy the backup or raise if it does not exist.""" - return IMPL.backup_destroy(context, backup_id) - - -################### - - -def transfer_get(context, transfer_id): - """Get a volume transfer record or raise if it does not exist.""" - return IMPL.transfer_get(context, transfer_id) - - -def transfer_get_all(context): - """Get all volume transfer records.""" - return IMPL.transfer_get_all(context) - - -def transfer_get_all_by_project(context, project_id): - """Get all volume transfer records for specified project.""" - return IMPL.transfer_get_all_by_project(context, project_id) - - -def transfer_create(context, values): - """Create an entry in the transfers table.""" - return IMPL.transfer_create(context, values) - - -def transfer_destroy(context, transfer_id): - """Destroy a record in the volume transfer table.""" - return IMPL.transfer_destroy(context, transfer_id) - - -def transfer_accept(context, transfer_id, user_id, project_id): - """Accept a volume transfer.""" - return IMPL.transfer_accept(context, transfer_id, user_id, project_id) - - -################### - - -def consistencygroup_get(context, consistencygroup_id): - """Get a consistencygroup or raise if it does not exist.""" - return IMPL.consistencygroup_get(context, consistencygroup_id) - - -def consistencygroup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Get all consistencygroups.""" - return IMPL.consistencygroup_get_all(context, filters=filters, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): - """Create a consistencygroup from the values dictionary.""" - return IMPL.consistencygroup_create(context, values, cg_snap_id, cg_id) - - -def consistencygroup_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Get all consistencygroups belonging to a project.""" - return IMPL.consistencygroup_get_all_by_project(context, project_id, - filters=filters, - marker=marker, limit=limit, - offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def consistencygroup_update(context, consistencygroup_id, values): - """Set the given properties on a consistencygroup and update it. - - Raises NotFound if consistencygroup does not exist. - """ - return IMPL.consistencygroup_update(context, consistencygroup_id, values) - - -def consistencygroup_destroy(context, consistencygroup_id): - """Destroy the consistencygroup or raise if it does not exist.""" - return IMPL.consistencygroup_destroy(context, consistencygroup_id) - - -def cg_has_cgsnapshot_filter(): - """Return a filter that checks if a CG has CG Snapshots.""" - return IMPL.cg_has_cgsnapshot_filter() - - -def cg_has_volumes_filter(attached_or_with_snapshots=False): - """Return a filter to check if a CG has volumes. - - When attached_or_with_snapshots parameter is given a True value only - attached volumes or those with snapshots will be considered. - """ - return IMPL.cg_has_volumes_filter(attached_or_with_snapshots) - - -def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): - """Return a filter to check if a CG is being used as creation source. - - Returned filter is meant to be used in the Conditional Update mechanism and - checks if provided CG ID or CG Snapshot ID is currently being used to - create another CG. - - This filter will not include CGs that have used the ID but have already - finished their creation (status is no longer creating). - - Filter uses a subquery that allows it to be used on updates to the - consistencygroups table. - """ - return IMPL.cg_creating_from_src(cg_id, cgsnapshot_id) - - -def consistencygroup_include_in_cluster(context, cluster, partial_rename=True, - **filters): - """Include all consistency groups matching the filters into a cluster. - - When partial_rename is set we will not set the cluster_name with cluster - parameter value directly, we'll replace provided cluster_name or host - filter value with cluster instead. - - This is useful when we want to replace just the cluster name but leave - the backend and pool information as it is. If we are using cluster_name - to filter, we'll use that same DB field to replace the cluster value and - leave the rest as it is. Likewise if we use the host to filter. - - Returns the number of consistency groups that have been changed. - """ - return IMPL.consistencygroup_include_in_cluster(context, cluster, - partial_rename, - **filters) - - -################### - - -def group_get(context, group_id): - """Get a group or raise if it does not exist.""" - return IMPL.group_get(context, group_id) - - -def group_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Get all groups.""" - return IMPL.group_get_all(context, filters=filters, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def group_create(context, values, group_snapshot_id=None, group_id=None): - """Create a group from the values dictionary.""" - return IMPL.group_create(context, values, group_snapshot_id, group_id) - - -def group_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Get all groups belonging to a project.""" - return IMPL.group_get_all_by_project(context, project_id, - filters=filters, - marker=marker, limit=limit, - offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - -def group_update(context, group_id, values): - """Set the given properties on a group and update it. - - Raises NotFound if group does not exist. - """ - return IMPL.group_update(context, group_id, values) - - -def group_destroy(context, group_id): - """Destroy the group or raise if it does not exist.""" - return IMPL.group_destroy(context, group_id) - - -def group_has_group_snapshot_filter(): - """Return a filter that checks if a Group has Group Snapshots.""" - return IMPL.group_has_group_snapshot_filter() - - -def group_has_volumes_filter(attached_or_with_snapshots=False): - """Return a filter to check if a Group has volumes. - - When attached_or_with_snapshots parameter is given a True value only - attached volumes or those with snapshots will be considered. - """ - return IMPL.group_has_volumes_filter(attached_or_with_snapshots) - - -def group_creating_from_src(group_id=None, group_snapshot_id=None): - """Return a filter to check if a Group is being used as creation source. - - Returned filter is meant to be used in the Conditional Update mechanism and - checks if provided Group ID or Group Snapshot ID is currently being used to - create another Group. - - This filter will not include Groups that have used the ID but have already - finished their creation (status is no longer creating). - - Filter uses a subquery that allows it to be used on updates to the - groups table. - """ - return IMPL.group_creating_from_src(group_id, group_snapshot_id) - - -def group_volume_type_mapping_create(context, group_id, volume_type_id): - """Create a group volume_type mapping entry.""" - return IMPL.group_volume_type_mapping_create(context, group_id, - volume_type_id) - - -################### - - -def cgsnapshot_get(context, cgsnapshot_id): - """Get a cgsnapshot or raise if it does not exist.""" - return IMPL.cgsnapshot_get(context, cgsnapshot_id) - - -def cgsnapshot_get_all(context, filters=None): - """Get all cgsnapshots.""" - return IMPL.cgsnapshot_get_all(context, filters) - - -def cgsnapshot_create(context, values): - """Create a cgsnapshot from the values dictionary.""" - return IMPL.cgsnapshot_create(context, values) - - -def cgsnapshot_get_all_by_group(context, group_id, filters=None): - """Get all cgsnapshots belonging to a consistency group.""" - return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters) - - -def cgsnapshot_get_all_by_project(context, project_id, filters=None): - """Get all cgsnapshots belonging to a project.""" - return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters) - - -def cgsnapshot_update(context, cgsnapshot_id, values): - """Set the given properties on a cgsnapshot and update it. - - Raises NotFound if cgsnapshot does not exist. - """ - return IMPL.cgsnapshot_update(context, cgsnapshot_id, values) - - -def cgsnapshot_destroy(context, cgsnapshot_id): - """Destroy the cgsnapshot or raise if it does not exist.""" - return IMPL.cgsnapshot_destroy(context, cgsnapshot_id) - - -def cgsnapshot_creating_from_src(): - """Get a filter that checks if a CGSnapshot is being created from a CG.""" - return IMPL.cgsnapshot_creating_from_src() - - -################### - - -def group_snapshot_get(context, group_snapshot_id): - """Get a group snapshot or raise if it does not exist.""" - return IMPL.group_snapshot_get(context, group_snapshot_id) - - -def group_snapshot_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Get all group snapshots.""" - return IMPL.group_snapshot_get_all(context, filters, marker, limit, - offset, sort_keys, sort_dirs) - - -def group_snapshot_create(context, values): - """Create a group snapshot from the values dictionary.""" - return IMPL.group_snapshot_create(context, values) - - -def group_snapshot_get_all_by_group(context, group_id, filters=None, - marker=None, limit=None, - offset=None, sort_keys=None, - sort_dirs=None): - """Get all group snapshots belonging to a group.""" - return IMPL.group_snapshot_get_all_by_group(context, group_id, - filters, marker, limit, - offset, sort_keys, sort_dirs) - - -def group_snapshot_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, - offset=None, sort_keys=None, - sort_dirs=None): - """Get all group snapshots belonging to a project.""" - return IMPL.group_snapshot_get_all_by_project(context, project_id, - filters, marker, limit, - offset, sort_keys, sort_dirs) - - -def group_snapshot_update(context, group_snapshot_id, values): - """Set the given properties on a group snapshot and update it. - - Raises NotFound if group snapshot does not exist. - """ - return IMPL.group_snapshot_update(context, group_snapshot_id, values) - - -def group_snapshot_destroy(context, group_snapshot_id): - """Destroy the group snapshot or raise if it does not exist.""" - return IMPL.group_snapshot_destroy(context, group_snapshot_id) - - -def group_snapshot_creating_from_src(): - """Get a filter to check if a grp snapshot is being created from a grp.""" - return IMPL.group_snapshot_creating_from_src() - - -################### - - -def purge_deleted_rows(context, age_in_days): - """Purge deleted rows older than given age from cinder tables - - Raises InvalidParameterValue if age_in_days is incorrect. - :returns: number of deleted rows - """ - return IMPL.purge_deleted_rows(context, age_in_days=age_in_days) - - -def get_booleans_for_table(table_name): - return IMPL.get_booleans_for_table(table_name) - - -################### - - -def driver_initiator_data_insert_by_key(context, initiator, - namespace, key, value): - """Updates DriverInitiatorData entry. - - Sets the value for the specified key within the namespace. - - If the entry already exists return False, if it inserted successfully - return True. - """ - return IMPL.driver_initiator_data_insert_by_key(context, - initiator, - namespace, - key, - value) - - -def driver_initiator_data_get(context, initiator, namespace): - """Query for an DriverInitiatorData that has the specified key""" - return IMPL.driver_initiator_data_get(context, initiator, namespace) - - -################### - - -def image_volume_cache_create(context, host, cluster_name, image_id, - image_updated_at, volume_id, size): - """Create a new image volume cache entry.""" - return IMPL.image_volume_cache_create(context, - host, - cluster_name, - image_id, - image_updated_at, - volume_id, - size) - - -def image_volume_cache_delete(context, volume_id): - """Delete an image volume cache entry specified by volume id.""" - return IMPL.image_volume_cache_delete(context, volume_id) - - -def image_volume_cache_get_and_update_last_used(context, image_id, **filters): - """Query for an image volume cache entry.""" - return IMPL.image_volume_cache_get_and_update_last_used(context, - image_id, - **filters) - - -def image_volume_cache_get_by_volume_id(context, volume_id): - """Query to see if a volume id is an image-volume contained in the cache""" - return IMPL.image_volume_cache_get_by_volume_id(context, volume_id) - - -def image_volume_cache_get_all(context, **filters): - """Query for all image volume cache entry for a host.""" - return IMPL.image_volume_cache_get_all(context, **filters) - - -def image_volume_cache_include_in_cluster(context, cluster, - partial_rename=True, **filters): - """Include in cluster image volume cache entries matching the filters. - - When partial_rename is set we will not set the cluster_name with cluster - parameter value directly, we'll replace provided cluster_name or host - filter value with cluster instead. - - This is useful when we want to replace just the cluster name but leave - the backend and pool information as it is. If we are using cluster_name - to filter, we'll use that same DB field to replace the cluster value and - leave the rest as it is. Likewise if we use the host to filter. - - Returns the number of volumes that have been changed. - """ - return IMPL.image_volume_cache_include_in_cluster( - context, cluster, partial_rename, **filters) - - -################### - - -def message_get(context, message_id): - """Return a message with the specified ID.""" - return IMPL.message_get(context, message_id) - - -def message_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - return IMPL.message_get_all(context, filters=filters, marker=marker, - limit=limit, offset=offset, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -def message_create(context, values): - """Creates a new message with the specified values.""" - return IMPL.message_create(context, values) - - -def message_destroy(context, message_id): - """Deletes message with the specified ID.""" - return IMPL.message_destroy(context, message_id) - - -def cleanup_expired_messages(context): - """Soft delete expired messages""" - return IMPL.cleanup_expired_messages(context) - - -################### - - -def workers_init(): - """Check if DB supports subsecond resolution and set global flag. - - MySQL 5.5 doesn't support subsecond resolution in datetime fields, so we - have to take it into account when working with the worker's table. - - Once we drop support for MySQL 5.5 we can remove this method. - """ - return IMPL.workers_init() - - -def worker_create(context, **values): - """Create a worker entry from optional arguments.""" - return IMPL.worker_create(context, **values) - - -def worker_get(context, **filters): - """Get a worker or raise exception if it does not exist.""" - return IMPL.worker_get(context, **filters) - - -def worker_get_all(context, until=None, db_filters=None, **filters): - """Get all workers that match given criteria.""" - return IMPL.worker_get_all(context, until=until, db_filters=db_filters, - **filters) - - -def worker_update(context, id, filters=None, orm_worker=None, **values): - """Update a worker with given values.""" - return IMPL.worker_update(context, id, filters=filters, - orm_worker=orm_worker, **values) - - -def worker_claim_for_cleanup(context, claimer_id, orm_worker): - """Soft delete a worker, change the service_id and update the worker.""" - return IMPL.worker_claim_for_cleanup(context, claimer_id, orm_worker) - - -def worker_destroy(context, **filters): - """Delete a worker (no soft delete).""" - return IMPL.worker_destroy(context, **filters) - - -################### - - -def resource_exists(context, model, resource_id): - return IMPL.resource_exists(context, model, resource_id) - - -def get_model_for_versioned_object(versioned_object): - return IMPL.get_model_for_versioned_object(versioned_object) - - -def get_by_id(context, model, id, *args, **kwargs): - return IMPL.get_by_id(context, model, id, *args, **kwargs) - - -class Condition(object): - """Class for normal condition values for conditional_update.""" - def __init__(self, value, field=None): - self.value = value - # Field is optional and can be passed when getting the filter - self.field = field - - def get_filter(self, model, field=None): - return IMPL.condition_db_filter(model, self._get_field(field), - self.value) - - def _get_field(self, field=None): - # We must have a defined field on initialization or when called - field = field or self.field - if not field: - raise ValueError(_('Condition has no field.')) - return field - -################### - - -def attachment_specs_get(context, attachment_id): - """Get all specs for an attachment.""" - return IMPL.attachment_specs_get(context, attachment_id) - - -def attachment_specs_delete(context, attachment_id, key): - """Delete the given attachment specs item.""" - return IMPL.attachment_specs_delete(context, attachment_id, key) - - -def attachment_specs_update_or_create(context, - attachment_id, - specs): - """Create or update attachment specs. - - This adds or modifies the key/value pairs specified in the attachment - specs dict argument. - """ - return IMPL.attachment_specs_update_or_create(context, - attachment_id, - specs) - -################### - - -class Not(Condition): - """Class for negated condition values for conditional_update. - - By default NULL values will be treated like Python treats None instead of - how SQL treats it. - - So for example when values are (1, 2) it will evaluate to True when we have - value 3 or NULL, instead of only with 3 like SQL does. - """ - def __init__(self, value, field=None, auto_none=True): - super(Not, self).__init__(value, field) - self.auto_none = auto_none - - def get_filter(self, model, field=None): - # If implementation has a specific method use it - if hasattr(IMPL, 'condition_not_db_filter'): - return IMPL.condition_not_db_filter(model, self._get_field(field), - self.value, self.auto_none) - - # Otherwise non negated object must adming ~ operator for not - return ~super(Not, self).get_filter(model, field) - - -class Case(object): - """Class for conditional value selection for conditional_update.""" - def __init__(self, whens, value=None, else_=None): - self.whens = whens - self.value = value - self.else_ = else_ - - -def is_orm_value(obj): - """Check if object is an ORM field.""" - return IMPL.is_orm_value(obj) - - -def conditional_update(context, model, values, expected_values, filters=(), - include_deleted='no', project_only=False, order=None): - """Compare-and-swap conditional update. - - Update will only occur in the DB if conditions are met. - - We have 4 different condition types we can use in expected_values: - - Equality: {'status': 'available'} - - Inequality: {'status': vol_obj.Not('deleting')} - - In range: {'status': ['available', 'error'] - - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) - - Method accepts additional filters, which are basically anything that can be - passed to a sqlalchemy query's filter method, for example: - - .. code-block:: python - - [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] - - We can select values based on conditions using Case objects in the 'values' - argument. For example: - - .. code-block:: python - - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - case_values = db.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - db.conditional_update(context, models.Volume, {'status': case_values}, - {'status': 'available'}) - - And we can use DB fields for example to store previous status in the - corresponding field even though we don't know which value is in the db from - those we allowed: - - .. code-block:: python - - db.conditional_update(context, models.Volume, - {'status': 'deleting', - 'previous_status': models.Volume.status}, - {'status': ('available', 'error')}) - - :param values: Dictionary of key-values to update in the DB. - :param expected_values: Dictionary of conditions that must be met for the - update to be executed. - :param filters: Iterable with additional filters. - :param include_deleted: Should the update include deleted items, this is - equivalent to read_deleted. - :param project_only: Should the query be limited to context's project. - :param order: Specific order of fields in which to update the values - :returns: Number of db rows that were updated. - """ - return IMPL.conditional_update(context, model, values, expected_values, - filters, include_deleted, project_only, - order) diff --git a/cinder/db/base.py b/cinder/db/base.py deleted file mode 100644 index facb71f1c..000000000 --- a/cinder/db/base.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base class for classes that need modular database access.""" - - -from oslo_config import cfg -from oslo_utils import importutils - - -db_driver_opt = cfg.StrOpt('db_driver', - default='cinder.db', - help='Driver to use for database access') - -CONF = cfg.CONF -CONF.register_opt(db_driver_opt) - - -class Base(object): - """DB driver is injected in the init method.""" - - def __init__(self, db_driver=None): - # NOTE(mriedem): Without this call, multiple inheritance involving - # the db Base class does not work correctly. - super(Base, self).__init__() - if not db_driver: - db_driver = CONF.db_driver - self.db = importutils.import_module(db_driver) # pylint: disable=C0103 - self.db.dispose_engine() diff --git a/cinder/db/migration.py b/cinder/db/migration.py deleted file mode 100644 index d16ff9cfc..000000000 --- a/cinder/db/migration.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -import os -import threading - -from oslo_config import cfg -from oslo_db import options -from stevedore import driver - -from cinder.db.sqlalchemy import api as db_api - - -INIT_VERSION = 72 - -_IMPL = None -_LOCK = threading.Lock() - -options.set_defaults(cfg.CONF) - -MIGRATE_REPO_PATH = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', - 'migrate_repo', -) - - -def get_backend(): - global _IMPL - if _IMPL is None: - with _LOCK: - if _IMPL is None: - _IMPL = driver.DriverManager( - "cinder.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def db_sync(version=None, init_version=INIT_VERSION, engine=None): - """Migrate the database to `version` or the most recent version.""" - - if engine is None: - engine = db_api.get_engine() - return get_backend().db_sync(engine=engine, - abs_path=MIGRATE_REPO_PATH, - version=version, - init_version=init_version) diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py deleted file mode 100644 index 64256a476..000000000 --- a/cinder/db/sqlalchemy/api.py +++ /dev/null @@ -1,6824 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of SQLAlchemy backend.""" - - -import collections -import datetime as dt -import functools -import itertools -import re -import sys -import threading -import time -import uuid - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db import options -from oslo_db.sqlalchemy import session as db_session -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') -import six -import sqlalchemy -from sqlalchemy import MetaData -from sqlalchemy import or_, and_, case -from sqlalchemy.orm import joinedload, joinedload_all, undefer_group -from sqlalchemy.orm import RelationshipProperty -from sqlalchemy import sql -from sqlalchemy.sql.expression import bindparam -from sqlalchemy.sql.expression import desc -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql.expression import true -from sqlalchemy.sql import func -from sqlalchemy.sql import sqltypes - -from cinder.api import common -from cinder.common import sqlalchemyutils -from cinder import db -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import utils -from cinder.volume import utils as vol_utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') - -_LOCK = threading.Lock() -_FACADE = None - - -def _create_facade_lazily(): - global _LOCK - with _LOCK: - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade( - CONF.database.connection, - **dict(CONF.database) - ) - - # NOTE(geguileo): To avoid a cyclical dependency we import the - # group here. Dependency cycle is objects.base requires db.api, - # which requires db.sqlalchemy.api, which requires service which - # requires objects.base - CONF.import_group("profiler", "cinder.service") - if CONF.profiler.enabled: - if CONF.profiler.trace_sqlalchemy: - osprofiler_sqlalchemy.add_tracing(sqlalchemy, - _FACADE.get_engine(), - "db") - - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def dispose_engine(): - get_engine().dispose() - -_DEFAULT_QUOTA_NAME = 'default' - - -def get_backend(): - """The backend is this module itself.""" - - return sys.modules[__name__] - - -def is_admin_context(context): - """Indicates if the request context is an administrator.""" - if not context: - raise exception.CinderException( - 'Use of empty request context is deprecated') - return context.is_admin - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context: - return False - if context.is_admin: - return False - if not context.user_id or not context.project_id: - return False - return True - - -def authorize_project_context(context, project_id): - """Ensures a request has permission to access the given project.""" - if is_user_context(context): - if not context.project_id: - raise exception.NotAuthorized() - elif context.project_id != project_id: - raise exception.NotAuthorized() - - -def authorize_user_context(context, user_id): - """Ensures a request has permission to access the given user.""" - if is_user_context(context): - if not context.user_id: - raise exception.NotAuthorized() - elif context.user_id != user_id: - raise exception.NotAuthorized() - - -def authorize_quota_class_context(context, class_name): - """Ensures a request has permission to access the given quota class.""" - if is_user_context(context): - if not context.quota_class: - raise exception.NotAuthorized() - elif context.quota_class != class_name: - raise exception.NotAuthorized() - - -def require_admin_context(f): - """Decorator to require admin request context. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]): - raise exception.AdminRequired() - return f(*args, **kwargs) - return wrapper - - -def require_context(f): - """Decorator to require *any* user or admin context. - - This does no authorization for user or project access matching, see - :py:func:`authorize_project_context` and - :py:func:`authorize_user_context`. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]) and not is_user_context(args[0]): - raise exception.NotAuthorized() - return f(*args, **kwargs) - return wrapper - - -def require_volume_exists(f): - """Decorator to require the specified volume to exist. - - Requires the wrapped function to use context and volume_id as - their first two arguments. - """ - - @functools.wraps(f) - def wrapper(context, volume_id, *args, **kwargs): - if not resource_exists(context, models.Volume, volume_id): - raise exception.VolumeNotFound(volume_id=volume_id) - return f(context, volume_id, *args, **kwargs) - return wrapper - - -def require_snapshot_exists(f): - """Decorator to require the specified snapshot to exist. - - Requires the wrapped function to use context and snapshot_id as - their first two arguments. - """ - - @functools.wraps(f) - def wrapper(context, snapshot_id, *args, **kwargs): - if not resource_exists(context, models.Snapshot, snapshot_id): - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - return f(context, snapshot_id, *args, **kwargs) - return wrapper - - -def _retry_on_deadlock(f): - """Decorator to retry a DB API call if Deadlock was received.""" - @functools.wraps(f) - def wrapped(*args, **kwargs): - while True: - try: - return f(*args, **kwargs) - except db_exc.DBDeadlock: - LOG.warning("Deadlock detected when running " - "'%(func_name)s': Retrying...", - dict(func_name=f.__name__)) - # Retry! - time.sleep(0.5) - continue - functools.update_wrapper(wrapped, f) - return wrapped - - -def handle_db_data_error(f): - def wrapper(*args, **kwargs): - try: - return f(*args, **kwargs) - except db_exc.DBDataError: - msg = _('Error writing field to database') - LOG.exception(msg) - raise exception.Invalid(msg) - - return wrapper - - -def model_query(context, model, *args, **kwargs): - """Query helper that accounts for context's `read_deleted` field. - - :param context: context to query under - :param session: if present, the session to use - :param read_deleted: if present, overrides context's read_deleted field. - :param project_only: if present and context is user-type, then restrict - query to match the context's project_id. - """ - session = kwargs.get('session') or get_session() - read_deleted = kwargs.get('read_deleted') or context.read_deleted - project_only = kwargs.get('project_only') - - query = session.query(model, *args) - - if read_deleted == 'no': - query = query.filter_by(deleted=False) - elif read_deleted == 'yes': - pass # omit the filter to include deleted and active - elif read_deleted == 'only': - query = query.filter_by(deleted=True) - elif read_deleted == 'int_no': - query = query.filter_by(deleted=0) - else: - raise Exception( - _("Unrecognized read_deleted value '%s'") % read_deleted) - - if project_only and is_user_context(context): - if model is models.VolumeAttachment: - # NOTE(dulek): In case of VolumeAttachment, we need to join - # `project_id` through `volume` relationship. - query = query.filter(models.Volume.project_id == - context.project_id) - else: - query = query.filter_by(project_id=context.project_id) - - return query - - -def _sync_volumes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (volumes, _gigs) = _volume_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'volumes' - if volume_type_name: - key += '_' + volume_type_name - return {key: volumes} - - -def _sync_snapshots(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (snapshots, _gigs) = _snapshot_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'snapshots' - if volume_type_name: - key += '_' + volume_type_name - return {key: snapshots} - - -def _sync_backups(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (backups, _gigs) = _backup_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'backups' - return {key: backups} - - -def _sync_gigabytes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (_junk, vol_gigs) = _volume_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'gigabytes' - if volume_type_name: - key += '_' + volume_type_name - if CONF.no_snapshot_gb_quota: - return {key: vol_gigs} - (_junk, snap_gigs) = _snapshot_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - return {key: vol_gigs + snap_gigs} - - -def _sync_consistencygroups(context, project_id, session, - volume_type_id=None, - volume_type_name=None): - (_junk, groups) = _consistencygroup_data_get_for_project( - context, project_id, session=session) - key = 'consistencygroups' - return {key: groups} - - -def _sync_groups(context, project_id, session, - volume_type_id=None, - volume_type_name=None): - (_junk, groups) = _group_data_get_for_project( - context, project_id, session=session) - key = 'groups' - return {key: groups} - - -def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - key = 'backup_gigabytes' - (_junk, backup_gigs) = _backup_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - return {key: backup_gigs} - - -QUOTA_SYNC_FUNCTIONS = { - '_sync_volumes': _sync_volumes, - '_sync_snapshots': _sync_snapshots, - '_sync_gigabytes': _sync_gigabytes, - '_sync_consistencygroups': _sync_consistencygroups, - '_sync_backups': _sync_backups, - '_sync_backup_gigabytes': _sync_backup_gigabytes, - '_sync_groups': _sync_groups, -} - - -################### - - -def _clean_filters(filters): - return {k: v for k, v in filters.items() if v is not None} - - -def _filter_host(field, value, match_level=None): - """Generate a filter condition for host and cluster fields. - - Levels are: - - 'pool': Will search for an exact match - - 'backend': Will search for exact match and value#* - - 'host'; Will search for exact match, value@* and value#* - - If no level is provided we'll determine it based on the value we want to - match: - - 'pool': If '#' is present in value - - 'backend': If '@' is present in value and '#' is not present - - 'host': In any other case - - :param field: ORM field. Ex: objects.Volume.model.host - :param value: String to compare with - :param match_level: 'pool', 'backend', or 'host' - """ - # If we don't set level we'll try to determine it automatically. LIKE - # operations are expensive, so we try to reduce them to the minimum. - if match_level is None: - if '#' in value: - match_level = 'pool' - elif '@' in value: - match_level = 'backend' - else: - match_level = 'host' - - # Mysql is not doing case sensitive filtering, so we force it - conn_str = CONF.database.connection - if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']: - cmp_value = func.binary(value) - like_op = 'LIKE BINARY' - else: - cmp_value = value - like_op = 'LIKE' - - conditions = [field == cmp_value] - if match_level != 'pool': - conditions.append(field.op(like_op)(value + '#%')) - if match_level == 'host': - conditions.append(field.op(like_op)(value + '@%')) - - return or_(*conditions) - - -def _clustered_bool_field_filter(query, field_name, filter_value): - # Now that we have clusters, a service is disabled/frozen if the service - # doesn't belong to a cluster or if it belongs to a cluster and the cluster - # itself is disabled/frozen. - if filter_value is not None: - query_filter = or_( - and_(models.Service.cluster_name.is_(None), - getattr(models.Service, field_name)), - and_(models.Service.cluster_name.isnot(None), - sql.exists().where(and_( - models.Cluster.name == models.Service.cluster_name, - models.Cluster.binary == models.Service.binary, - ~models.Cluster.deleted, - getattr(models.Cluster, field_name))))) - if not filter_value: - query_filter = ~query_filter - query = query.filter(query_filter) - return query - - -def _service_query(context, session=None, read_deleted='no', host=None, - cluster_name=None, is_up=None, host_or_cluster=None, - backend_match_level=None, disabled=None, frozen=None, - **filters): - filters = _clean_filters(filters) - if filters and not is_valid_model_filters(models.Service, filters): - return None - - query = model_query(context, models.Service, session=session, - read_deleted=read_deleted) - - # Host and cluster are particular cases of filters, because we must - # retrieve not only exact matches (single backend configuration), but also - # match those that have the backend defined (multi backend configuration). - if host: - query = query.filter(_filter_host(models.Service.host, host, - backend_match_level)) - if cluster_name: - query = query.filter(_filter_host(models.Service.cluster_name, - cluster_name, backend_match_level)) - if host_or_cluster: - query = query.filter(or_( - _filter_host(models.Service.host, host_or_cluster, - backend_match_level), - _filter_host(models.Service.cluster_name, host_or_cluster, - backend_match_level), - )) - - query = _clustered_bool_field_filter(query, 'disabled', disabled) - query = _clustered_bool_field_filter(query, 'frozen', frozen) - - if filters: - query = query.filter_by(**filters) - - if is_up is not None: - date_limit = utils.service_expired_time() - svc = models.Service - filter_ = or_( - and_(svc.created_at.isnot(None), svc.created_at >= date_limit), - and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit)) - query = query.filter(filter_ == is_up) - - return query - - -@require_admin_context -def service_destroy(context, service_id): - query = _service_query(context, id=service_id) - updated_values = models.Service.delete_values() - if not query.update(updated_values): - raise exception.ServiceNotFound(service_id=service_id) - return updated_values - - -@require_admin_context -def service_get(context, service_id=None, backend_match_level=None, **filters): - """Get a service that matches the criteria. - - A possible filter is is_up=True and it will filter nodes that are down. - - :param service_id: Id of the service. - :param filters: Filters for the query in the form of key/value. - :param backend_match_level: 'pool', 'backend', or 'host' for host and - cluster filters (as defined in _filter_host - method) - :raise ServiceNotFound: If service doesn't exist. - """ - query = _service_query(context, backend_match_level=backend_match_level, - id=service_id, **filters) - service = None if not query else query.first() - if not service: - serv_id = service_id or filters.get('topic') or filters.get('binary') - raise exception.ServiceNotFound(service_id=serv_id, - host=filters.get('host')) - return service - - -@require_admin_context -def service_get_all(context, backend_match_level=None, **filters): - """Get all services that match the criteria. - - A possible filter is is_up=True and it will filter nodes that are down. - - :param filters: Filters for the query in the form of key/value. - :param backend_match_level: 'pool', 'backend', or 'host' for host and - cluster filters (as defined in _filter_host - method) - """ - query = _service_query(context, backend_match_level=backend_match_level, - **filters) - return [] if not query else query.all() - - -@require_admin_context -def service_create(context, values): - service_ref = models.Service() - service_ref.update(values) - if not CONF.enable_new_services: - service_ref.disabled = True - - session = get_session() - with session.begin(): - service_ref.save(session) - return service_ref - - -@require_admin_context -@_retry_on_deadlock -def service_update(context, service_id, values): - if 'disabled' in values: - values = values.copy() - values['modified_at'] = values.get('modified_at', timeutils.utcnow()) - values['updated_at'] = values.get('updated_at', - literal_column('updated_at')) - query = _service_query(context, id=service_id) - result = query.update(values) - if not result: - raise exception.ServiceNotFound(service_id=service_id) - - -################### - - -@require_admin_context -def is_backend_frozen(context, host, cluster_name): - """Check if a storage backend is frozen based on host and cluster_name.""" - if cluster_name: - model = models.Cluster - conditions = [model.name == vol_utils.extract_host(cluster_name)] - else: - model = models.Service - conditions = [model.host == vol_utils.extract_host(host)] - conditions.extend((~model.deleted, model.frozen)) - query = get_session().query(sql.exists().where(and_(*conditions))) - frozen = query.scalar() - return frozen - - -################### - -def _cluster_query(context, is_up=None, get_services=False, - services_summary=False, read_deleted='no', - name_match_level=None, name=None, session=None, **filters): - filters = _clean_filters(filters) - if filters and not is_valid_model_filters(models.Cluster, filters): - return None - - query = model_query(context, models.Cluster, session=session, - read_deleted=read_deleted) - - # Cluster is a special case of filter, because we must match exact match - # as well as hosts that specify the backend - if name: - query = query.filter(_filter_host(models.Cluster.name, name, - name_match_level)) - - if filters: - query = query.filter_by(**filters) - - if services_summary: - query = query.options(undefer_group('services_summary')) - # We bind the expiration time to now (as it changes with each query) - # and is required by num_down_hosts - query = query.params(expired=utils.service_expired_time()) - elif 'num_down_hosts' in filters: - query = query.params(expired=utils.service_expired_time()) - - if get_services: - query = query.options(joinedload_all('services')) - - if is_up is not None: - date_limit = utils.service_expired_time() - filter_ = and_(models.Cluster.last_heartbeat.isnot(None), - models.Cluster.last_heartbeat >= date_limit) - query = query.filter(filter_ == is_up) - - return query - - -@require_admin_context -def cluster_get(context, id=None, is_up=None, get_services=False, - services_summary=False, read_deleted='no', - name_match_level=None, **filters): - """Get a cluster that matches the criteria. - - :param id: Id of the cluster. - :param is_up: Boolean value to filter based on the cluster's up status. - :param get_services: If we want to load all services from this cluster. - :param services_summary: If we want to load num_hosts and - num_down_hosts fields. - :param read_deleted: Filtering based on delete status. Default value is - "no". - :param filters: Field based filters in the form of key/value. - :param name_match_level: 'pool', 'backend', or 'host' for name filter (as - defined in _filter_host method) - :raise ClusterNotFound: If cluster doesn't exist. - """ - query = _cluster_query(context, is_up, get_services, services_summary, - read_deleted, name_match_level, id=id, **filters) - cluster = None if not query else query.first() - if not cluster: - cluster_id = id or six.text_type(filters) - raise exception.ClusterNotFound(id=cluster_id) - return cluster - - -@require_admin_context -def cluster_get_all(context, is_up=None, get_services=False, - services_summary=False, read_deleted='no', - name_match_level=None, **filters): - """Get all clusters that match the criteria. - - :param is_up: Boolean value to filter based on the cluster's up status. - :param get_services: If we want to load all services from this cluster. - :param services_summary: If we want to load num_hosts and - num_down_hosts fields. - :param read_deleted: Filtering based on delete status. Default value is - "no". - :param name_match_level: 'pool', 'backend', or 'host' for name filter (as - defined in _filter_host method) - :param filters: Field based filters in the form of key/value. - """ - query = _cluster_query(context, is_up, get_services, services_summary, - read_deleted, name_match_level, **filters) - return [] if not query else query.all() - - -@require_admin_context -def cluster_create(context, values): - """Create a cluster from the values dictionary.""" - cluster_ref = models.Cluster() - cluster_ref.update(values) - # Provided disabled value takes precedence - if values.get('disabled') is None: - cluster_ref.disabled = not CONF.enable_new_services - - session = get_session() - try: - with session.begin(): - cluster_ref.save(session) - # We mark that newly created cluster has no hosts to prevent - # problems at the OVO level - cluster_ref.last_heartbeat = None - return cluster_ref - # If we had a race condition (another non deleted cluster exists with the - # same name) raise Duplicate exception. - except db_exc.DBDuplicateEntry: - raise exception.ClusterExists(name=values.get('name')) - - -@require_admin_context -@_retry_on_deadlock -def cluster_update(context, id, values): - """Set the given properties on an cluster and update it. - - Raises ClusterNotFound if cluster does not exist. - """ - query = _cluster_query(context, id=id) - result = query.update(values) - if not result: - raise exception.ClusterNotFound(id=id) - - -@require_admin_context -def cluster_destroy(context, id): - """Destroy the cluster or raise if it does not exist or has hosts.""" - query = _cluster_query(context, id=id) - query = query.filter(models.Cluster.num_hosts == 0) - # If the update doesn't succeed we don't know if it's because the - # cluster doesn't exist or because it has hosts. - result = query.update(models.Cluster.delete_values(), - synchronize_session=False) - - if not result: - # This will fail if the cluster doesn't exist raising the right - # exception - cluster_get(context, id=id) - # If it doesn't fail, then the problem is that there are hosts - raise exception.ClusterHasHosts(id=id) - - -################### - - -def _metadata_refs(metadata_dict, meta_class): - metadata_refs = [] - if metadata_dict: - for k, v in metadata_dict.items(): - metadata_ref = meta_class() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - return metadata_refs - - -def _dict_with_extra_specs_if_authorized(context, inst_type_query): - """Convert type query result to dict with extra_spec and rate_limit. - - Takes a volume type query returned by sqlalchemy and returns it - as a dictionary, converting the extra_specs entry from a list - of dicts. NOTE the contents of extra-specs are admin readable - only. If the context passed in for this request is not admin - then we will return an empty extra-specs dict rather than - providing the admin only details. - - Example response with admin context: - - 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] - to a single dict: - 'extra_specs' : {'k1': 'v1'} - - """ - - inst_type_dict = dict(inst_type_query) - - extra_specs = {x['key']: x['value'] - for x in inst_type_query['extra_specs']} - inst_type_dict['extra_specs'] = extra_specs - - return inst_type_dict - - -################### - - -def _dict_with_group_specs_if_authorized(context, inst_type_query): - """Convert group type query result to dict with spec and rate_limit. - - Takes a group type query returned by sqlalchemy and returns it - as a dictionary, converting the extra_specs entry from a list - of dicts. NOTE the contents of extra-specs are admin readable - only. If the context passed in for this request is not admin - then we will return an empty extra-specs dict rather than - providing the admin only details. - - Example response with admin context: - - 'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] - to a single dict: - 'group_specs' : {'k1': 'v1'} - - """ - - inst_type_dict = dict(inst_type_query) - if not is_admin_context(context): - del(inst_type_dict['group_specs']) - else: - group_specs = {x['key']: x['value'] - for x in inst_type_query['group_specs']} - inst_type_dict['group_specs'] = group_specs - return inst_type_dict - - -################### - - -@require_context -def _quota_get(context, project_id, resource, session=None): - result = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.ProjectQuotaNotFound(project_id=project_id) - - return result - - -@require_context -def quota_get(context, project_id, resource): - return _quota_get(context, project_id, resource) - - -@require_context -def quota_get_all_by_project(context, project_id): - - rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_context -def quota_allocated_get_all_by_project(context, project_id, session=None): - rows = model_query(context, models.Quota, read_deleted='no', - session=session).filter_by(project_id=project_id).all() - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.allocated - return result - - -@require_context -def _quota_get_all_by_resource(context, resource, session=None): - rows = model_query(context, models.Quota, - session=session, - read_deleted='no').filter_by( - resource=resource).all() - return rows - - -@require_context -def quota_create(context, project_id, resource, limit, allocated): - quota_ref = models.Quota() - quota_ref.project_id = project_id - quota_ref.resource = resource - quota_ref.hard_limit = limit - if allocated: - quota_ref.allocated = allocated - - session = get_session() - with session.begin(): - quota_ref.save(session) - return quota_ref - - -@require_context -def quota_update(context, project_id, resource, limit): - session = get_session() - with session.begin(): - quota_ref = _quota_get(context, project_id, resource, session=session) - quota_ref.hard_limit = limit - return quota_ref - - -@require_context -def quota_update_resource(context, old_res, new_res): - session = get_session() - with session.begin(): - quotas = _quota_get_all_by_resource(context, old_res, session=session) - for quota in quotas: - quota.resource = new_res - - -@require_admin_context -def quota_allocated_update(context, project_id, resource, allocated): - session = get_session() - with session.begin(): - quota_ref = _quota_get(context, project_id, resource, session=session) - quota_ref.allocated = allocated - return quota_ref - - -@require_admin_context -def quota_destroy(context, project_id, resource): - session = get_session() - with session.begin(): - quota_ref = _quota_get(context, project_id, resource, session=session) - return quota_ref.delete(session=session) - - -################### - - -@require_context -def _quota_class_get(context, class_name, resource, session=None): - result = model_query(context, models.QuotaClass, session=session, - read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaClassNotFound(class_name=class_name) - - return result - - -@require_context -def quota_class_get(context, class_name, resource): - return _quota_class_get(context, class_name, resource) - - -def quota_class_get_defaults(context): - rows = model_query(context, models.QuotaClass, - read_deleted="no").\ - filter_by(class_name=_DEFAULT_QUOTA_NAME).all() - - result = {'class_name': _DEFAULT_QUOTA_NAME} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_context -def quota_class_get_all_by_name(context, class_name): - - rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - result = {'class_name': class_name} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_context -def _quota_class_get_all_by_resource(context, resource, session): - result = model_query(context, models.QuotaClass, - session=session, - read_deleted="no").\ - filter_by(resource=resource).\ - all() - - return result - - -@handle_db_data_error -@require_context -def quota_class_create(context, class_name, resource, limit): - quota_class_ref = models.QuotaClass() - quota_class_ref.class_name = class_name - quota_class_ref.resource = resource - quota_class_ref.hard_limit = limit - - session = get_session() - with session.begin(): - quota_class_ref.save(session) - return quota_class_ref - - -@require_context -def quota_class_update(context, class_name, resource, limit): - session = get_session() - with session.begin(): - quota_class_ref = _quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.hard_limit = limit - return quota_class_ref - - -@require_context -def quota_class_update_resource(context, old_res, new_res): - session = get_session() - with session.begin(): - quota_class_list = _quota_class_get_all_by_resource( - context, old_res, session) - for quota_class in quota_class_list: - quota_class.resource = new_res - - -@require_context -def quota_class_destroy(context, class_name, resource): - session = get_session() - with session.begin(): - quota_class_ref = _quota_class_get(context, class_name, resource, - session=session) - return quota_class_ref.delete(session=session) - - -@require_context -def quota_class_destroy_all_by_name(context, class_name): - session = get_session() - with session.begin(): - quota_classes = model_query(context, models.QuotaClass, - session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - for quota_class_ref in quota_classes: - quota_class_ref.delete(session=session) - - -################### - - -@require_context -def quota_usage_get(context, project_id, resource): - result = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaUsageNotFound(project_id=project_id) - - return result - - -@require_context -def quota_usage_get_all_by_project(context, project_id): - - rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) - - return result - - -@require_admin_context -def _quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh, session=None): - - quota_usage_ref = models.QuotaUsage() - quota_usage_ref.project_id = project_id - quota_usage_ref.resource = resource - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - quota_usage_ref.save(session=session) - - return quota_usage_ref - - -################### - - -def _reservation_create(context, uuid, usage, project_id, resource, delta, - expire, session=None, allocated_id=None): - usage_id = usage['id'] if usage else None - reservation_ref = models.Reservation() - reservation_ref.uuid = uuid - reservation_ref.usage_id = usage_id - reservation_ref.project_id = project_id - reservation_ref.resource = resource - reservation_ref.delta = delta - reservation_ref.expire = expire - reservation_ref.allocated_id = allocated_id - reservation_ref.save(session=session) - return reservation_ref - - -################### - - -# NOTE(johannes): The quota code uses SQL locking to ensure races don't -# cause under or over counting of resources. To avoid deadlocks, this -# code always acquires the lock on quota_usages before acquiring the lock -# on reservations. - -def _get_quota_usages(context, session, project_id): - # Broken out for testability - rows = model_query(context, models.QuotaUsage, - read_deleted="no", - session=session).\ - filter_by(project_id=project_id).\ - order_by(models.QuotaUsage.id.asc()).\ - with_lockmode('update').\ - all() - return {row.resource: row for row in rows} - - -def _get_quota_usages_by_resource(context, session, resource): - rows = model_query(context, models.QuotaUsage, - deleted="no", - session=session).\ - filter_by(resource=resource).\ - order_by(models.QuotaUsage.id.asc()).\ - with_lockmode('update').\ - all() - return rows - - -@require_context -@_retry_on_deadlock -def quota_usage_update_resource(context, old_res, new_res): - session = get_session() - with session.begin(): - usages = _get_quota_usages_by_resource(context, session, old_res) - for usage in usages: - usage.resource = new_res - usage.until_refresh = 1 - - -@require_context -@_retry_on_deadlock -def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None, - is_allocated_reserve=False): - elevated = context.elevated() - session = get_session() - with session.begin(): - if project_id is None: - project_id = context.project_id - - # Get the current usages - usages = _get_quota_usages(context, session, project_id) - allocated = quota_allocated_get_all_by_project(context, project_id, - session=session) - allocated.pop('project_id') - - # Handle usage refresh - work = set(deltas.keys()) - while work: - resource = work.pop() - - # Do we need to refresh the usage? - refresh = False - if resource not in usages: - usages[resource] = _quota_usage_create(elevated, - project_id, - resource, - 0, 0, - until_refresh or None, - session=session) - refresh = True - elif usages[resource].in_use < 0: - # Negative in_use count indicates a desync, so try to - # heal from that... - refresh = True - elif usages[resource].until_refresh is not None: - usages[resource].until_refresh -= 1 - if usages[resource].until_refresh <= 0: - refresh = True - elif max_age and usages[resource].updated_at is not None and ( - (timeutils.utcnow() - - usages[resource].updated_at).total_seconds() >= max_age): - refresh = True - - # OK, refresh the usage - if refresh: - # Grab the sync routine - sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] - volume_type_id = getattr(resources[resource], - 'volume_type_id', None) - volume_type_name = getattr(resources[resource], - 'volume_type_name', None) - updates = sync(elevated, project_id, - volume_type_id=volume_type_id, - volume_type_name=volume_type_name, - session=session) - for res, in_use in updates.items(): - # Make sure we have a destination for the usage! - if res not in usages: - usages[res] = _quota_usage_create( - elevated, - project_id, - res, - 0, 0, - until_refresh or None, - session=session - ) - - # Update the usage - usages[res].in_use = in_use - usages[res].until_refresh = until_refresh or None - - # Because more than one resource may be refreshed - # by the call to the sync routine, and we don't - # want to double-sync, we make sure all refreshed - # resources are dropped from the work set. - work.discard(res) - - # NOTE(Vek): We make the assumption that the sync - # routine actually refreshes the - # resources that it is the sync routine - # for. We don't check, because this is - # a best-effort mechanism. - - # Check for deltas that would go negative - if is_allocated_reserve: - unders = [r for r, delta in deltas.items() - if delta < 0 and delta + allocated.get(r, 0) < 0] - else: - unders = [r for r, delta in deltas.items() - if delta < 0 and delta + usages[r].in_use < 0] - - # TODO(mc_nair): Should ignore/zero alloc if using non-nested driver - - # Now, let's check the quotas - # NOTE(Vek): We're only concerned about positive increments. - # If a project has gone over quota, we want them to - # be able to reduce their usage without any - # problems. - overs = [r for r, delta in deltas.items() - if quotas[r] >= 0 and delta >= 0 and - quotas[r] < delta + usages[r].total + allocated.get(r, 0)] - - # NOTE(Vek): The quota check needs to be in the transaction, - # but the transaction doesn't fail just because - # we're over quota, so the OverQuota raise is - # outside the transaction. If we did the raise - # here, our usage updates would be discarded, but - # they're not invalidated by being over-quota. - - # Create the reservations - if not overs: - reservations = [] - for resource, delta in deltas.items(): - usage = usages[resource] - allocated_id = None - if is_allocated_reserve: - try: - quota = _quota_get(context, project_id, resource, - session=session) - except exception.ProjectQuotaNotFound: - # If we were using the default quota, create DB entry - quota = quota_create(context, project_id, resource, - quotas[resource], 0) - # Since there's no reserved/total for allocated, update - # allocated immediately and subtract on rollback if needed - quota_allocated_update(context, project_id, resource, - quota.allocated + delta) - allocated_id = quota.id - usage = None - reservation = _reservation_create( - elevated, str(uuid.uuid4()), usage, project_id, resource, - delta, expire, session=session, allocated_id=allocated_id) - - reservations.append(reservation.uuid) - - # Also update the reserved quantity - # NOTE(Vek): Again, we are only concerned here about - # positive increments. Here, though, we're - # worried about the following scenario: - # - # 1) User initiates resize down. - # 2) User allocates a new instance. - # 3) Resize down fails or is reverted. - # 4) User is now over quota. - # - # To prevent this, we only update the - # reserved value if the delta is positive. - if delta > 0 and not is_allocated_reserve: - usages[resource].reserved += delta - - if unders: - LOG.warning("Change will make usage less than 0 for the following " - "resources: %s", unders) - if overs: - usages = {k: dict(in_use=v.in_use, reserved=v.reserved, - allocated=allocated.get(k, 0)) - for k, v in usages.items()} - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages=usages) - - return reservations - - -def _quota_reservations(session, context, reservations): - """Return the relevant reservations.""" - - # Get the listed reservations - return model_query(context, models.Reservation, - read_deleted="no", - session=session).\ - filter(models.Reservation.uuid.in_(reservations)).\ - with_lockmode('update').\ - all() - - -def _dict_with_usage_id(usages): - return {row.id: row for row in usages.values()} - - -@require_context -@_retry_on_deadlock -def reservation_commit(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - usages = _dict_with_usage_id(usages) - - for reservation in _quota_reservations(session, context, reservations): - # Allocated reservations will have already been bumped - if not reservation.allocated_id: - usage = usages[reservation.usage_id] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - usage.in_use += reservation.delta - - reservation.delete(session=session) - - -@require_context -@_retry_on_deadlock -def reservation_rollback(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - usages = _dict_with_usage_id(usages) - for reservation in _quota_reservations(session, context, reservations): - if reservation.allocated_id: - reservation.quota.allocated -= reservation.delta - else: - usage = usages[reservation.usage_id] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - - reservation.delete(session=session) - - -def quota_destroy_by_project(*args, **kwargs): - """Destroy all limit quotas associated with a project. - - Leaves usage and reservation quotas intact. - """ - quota_destroy_all_by_project(only_quotas=True, *args, **kwargs) - - -@require_admin_context -@_retry_on_deadlock -def quota_destroy_all_by_project(context, project_id, only_quotas=False): - """Destroy all quotas associated with a project. - - This includes limit quotas, usage quotas and reservation quotas. - Optionally can only remove limit quotas and leave other types as they are. - - :param context: The request context, for access checks. - :param project_id: The ID of the project being deleted. - :param only_quotas: Only delete limit quotas, leave other types intact. - """ - session = get_session() - with session.begin(): - quotas = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_ref in quotas: - quota_ref.delete(session=session) - - if only_quotas: - return - - quota_usages = model_query(context, models.QuotaUsage, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_usage_ref in quota_usages: - quota_usage_ref.delete(session=session) - - reservations = model_query(context, models.Reservation, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for reservation_ref in reservations: - reservation_ref.delete(session=session) - - -@require_admin_context -@_retry_on_deadlock -def reservation_expire(context): - session = get_session() - with session.begin(): - current_time = timeutils.utcnow() - results = model_query(context, models.Reservation, session=session, - read_deleted="no").\ - filter(models.Reservation.expire < current_time).\ - all() - - if results: - for reservation in results: - if reservation.delta >= 0: - if reservation.allocated_id: - reservation.quota.allocated -= reservation.delta - reservation.quota.save(session=session) - else: - reservation.usage.reserved -= reservation.delta - reservation.usage.save(session=session) - - reservation.delete(session=session) - - -################### - - -@require_admin_context -def volume_attach(context, values): - volume_attachment_ref = models.VolumeAttachment() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - volume_attachment_ref.update(values) - session = get_session() - with session.begin(): - volume_attachment_ref.save(session=session) - return _attachment_get(context, values['id'], - session=session) - - -@require_admin_context -def volume_attached(context, attachment_id, instance_uuid, host_name, - mountpoint, attach_mode='rw'): - """This method updates a volume attachment entry. - - This function saves the information related to a particular - attachment for a volume. It also updates the volume record - to mark the volume as attached. - - """ - if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - session = get_session() - with session.begin(): - volume_attachment_ref = _attachment_get(context, attachment_id, - session=session) - - updated_values = {'mountpoint': mountpoint, - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'instance_uuid': instance_uuid, - 'attached_host': host_name, - 'attach_time': timeutils.utcnow(), - 'attach_mode': attach_mode, - 'updated_at': literal_column('updated_at')} - volume_attachment_ref.update(updated_values) - volume_attachment_ref.save(session=session) - del updated_values['updated_at'] - - volume_ref = _volume_get(context, volume_attachment_ref['volume_id'], - session=session) - volume_ref['status'] = 'in-use' - volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED - volume_ref.save(session=session) - return (volume_ref, updated_values) - - -@handle_db_data_error -@require_context -def volume_create(context, values): - values['volume_metadata'] = _metadata_refs(values.get('metadata'), - models.VolumeMetadata) - if is_admin_context(context): - values['volume_admin_metadata'] = \ - _metadata_refs(values.get('admin_metadata'), - models.VolumeAdminMetadata) - elif values.get('volume_admin_metadata'): - del values['volume_admin_metadata'] - - volume_ref = models.Volume() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - volume_ref.update(values) - - session = get_session() - with session.begin(): - session.add(volume_ref) - - return _volume_get(context, values['id'], session=session) - - -def get_booleans_for_table(table_name): - booleans = set() - table = getattr(models, table_name.capitalize()) - if hasattr(table, '__table__'): - columns = table.__table__.columns - for column in columns: - if isinstance(column.type, sqltypes.Boolean): - booleans.add(column.name) - - return booleans - - -@require_admin_context -def volume_data_get_for_host(context, host, count_only=False): - host_attr = models.Volume.host - conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] - if count_only: - result = model_query(context, - func.count(models.Volume.id), - read_deleted="no").filter( - or_(*conditions)).first() - return result[0] or 0 - else: - result = model_query(context, - func.count(models.Volume.id), - func.sum(models.Volume.size), - read_deleted="no").filter( - or_(*conditions)).first() - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def _volume_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - query = model_query(context, - func.count(models.Volume.id), - func.sum(models.Volume.size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def _backup_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - query = model_query(context, - func.count(models.Backup.id), - func.sum(models.Backup.size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def volume_data_get_for_project(context, project_id, volume_type_id=None): - return _volume_data_get_for_project(context, project_id, volume_type_id) - - -@require_admin_context -@_retry_on_deadlock -def volume_destroy(context, volume_id): - session = get_session() - now = timeutils.utcnow() - with session.begin(): - updated_values = {'status': 'deleted', - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at'), - 'migration_status': None} - model_query(context, models.Volume, session=session).\ - filter_by(id=volume_id).\ - update(updated_values) - model_query(context, models.VolumeMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.VolumeAdminMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.Transfer, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - del updated_values['updated_at'] - return updated_values - - -def _include_in_cluster(context, cluster, model, partial_rename, filters): - """Generic include in cluster method. - - When we include resources in a cluster we have to be careful to preserve - the addressing sections that have not been provided. That's why we allow - partial_renaming, so we can preserve the backend and pool if we are only - providing host/cluster level information, and preserve pool information if - we only provide backend level information. - - For example when we include a host in a cluster we receive calls with - filters like {'host': 'localhost@lvmdriver-1'} and cluster with something - like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the - host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we - want to include original pool in the new cluster_name. So we want to store - in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'. - """ - filters = _clean_filters(filters) - if filters and not is_valid_model_filters(model, filters): - return None - - query = get_session().query(model) - if hasattr(model, 'deleted'): - query = query.filter_by(deleted=False) - - # cluster_name and host are special filter cases - for field in {'cluster_name', 'host'}.intersection(filters): - value = filters.pop(field) - # We do a special backend filter - query = query.filter(_filter_host(getattr(model, field), value)) - # If we want do do a partial rename and we haven't set the cluster - # already, the value we want to set is a SQL replace of existing field - # value. - if partial_rename and isinstance(cluster, six.string_types): - cluster = func.replace(getattr(model, field), value, cluster) - - query = query.filter_by(**filters) - result = query.update({'cluster_name': cluster}, synchronize_session=False) - return result - - -@require_admin_context -def volume_include_in_cluster(context, cluster, partial_rename=True, - **filters): - """Include all volumes matching the filters into a cluster.""" - return _include_in_cluster(context, cluster, models.Volume, - partial_rename, filters) - - -@require_admin_context -def volume_detached(context, volume_id, attachment_id): - """This updates a volume attachment and marks it as detached. - - This method also ensures that the volume entry is correctly - marked as either still attached/in-use or detached/available - if this was the last detachment made. - - """ - - # NOTE(jdg): This is a funky band-aid for the earlier attempts at - # multiattach, it's a bummer because these things aren't really being used - # but at the same time we don't want to break them until we work out the - # new proposal for multi-attach - remain_attachment = True - session = get_session() - with session.begin(): - try: - attachment = _attachment_get(context, attachment_id, - session=session) - except exception.VolumeAttachmentNotFound: - attachment_updates = None - attachment = None - - if attachment: - now = timeutils.utcnow() - attachment_updates = { - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'detach_time': now, - 'deleted': True, - 'deleted_at': now, - 'updated_at': - literal_column('updated_at'), - } - attachment.update(attachment_updates) - attachment.save(session=session) - del attachment_updates['updated_at'] - - attachment_list = None - volume_ref = _volume_get(context, volume_id, - session=session) - volume_updates = {'updated_at': literal_column('updated_at')} - if not volume_ref.volume_attachment: - # NOTE(jdg): We kept the old arg style allowing session exclusively - # for this one call - attachment_list = volume_attachment_get_all_by_volume_id( - context, volume_id, session=session) - remain_attachment = False - if attachment_list and len(attachment_list) > 0: - remain_attachment = True - - if not remain_attachment: - # Hide status update from user if we're performing volume migration - # or uploading it to image - if ((not volume_ref.migration_status and - not (volume_ref.status == 'uploading')) or - volume_ref.migration_status in ('success', 'error')): - volume_updates['status'] = 'available' - - volume_updates['attach_status'] = ( - fields.VolumeAttachStatus.DETACHED) - else: - # Volume is still attached - volume_updates['status'] = 'in-use' - volume_updates['attach_status'] = ( - fields.VolumeAttachStatus.ATTACHED) - - volume_ref.update(volume_updates) - volume_ref.save(session=session) - del volume_updates['updated_at'] - return (volume_updates, attachment_updates) - - -def _process_model_like_filter(model, query, filters): - """Applies regex expression filtering to a query. - - :param model: model to apply filters to - :param query: query to apply filters to - :param filters: dictionary of filters with regex values - :returns: the updated query. - """ - if query is None: - return query - - for key in sorted(filters): - column_attr = getattr(model, key) - if 'property' == type(column_attr).__name__: - continue - value = filters[key] - if not (isinstance(value, six.string_types) or isinstance(value, int)): - continue - query = query.filter( - column_attr.op('LIKE')(u'%%%s%%' % value)) - return query - - -def apply_like_filters(model): - def decorator_filters(process_exact_filters): - def _decorator(query, filters): - exact_filters = filters.copy() - regex_filters = {} - for key, value in filters.items(): - # NOTE(tommylikehu): For inexact match, the filter keys - # are in the format of 'key~=value' - if key.endswith('~'): - exact_filters.pop(key) - regex_filters[key.rstrip('~')] = value - query = process_exact_filters(query, exact_filters) - return _process_model_like_filter(model, query, regex_filters) - return _decorator - return decorator_filters - - -@require_context -def _volume_get_query(context, session=None, project_only=False, - joined_load=True): - """Get the query to retrieve the volume. - - :param context: the context used to run the method _volume_get_query - :param session: the session to use - :param project_only: the boolean used to decide whether to query the - volume in the current project or all projects - :param joined_load: the boolean used to decide whether the query loads - the other models, which join the volume model in - the database. Currently, the False value for this - parameter is specially for the case of updating - database during volume migration - :returns: updated query or None - """ - if not joined_load: - return model_query(context, models.Volume, session=session, - project_only=project_only) - if is_admin_context(context): - return model_query(context, models.Volume, session=session, - project_only=project_only).\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_admin_metadata')).\ - options(joinedload('volume_type')).\ - options(joinedload('volume_attachment')).\ - options(joinedload('consistencygroup')).\ - options(joinedload('group')) - else: - return model_query(context, models.Volume, session=session, - project_only=project_only).\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_type')).\ - options(joinedload('volume_attachment')).\ - options(joinedload('consistencygroup')).\ - options(joinedload('group')) - - -@require_context -def _volume_get(context, volume_id, session=None, joined_load=True): - result = _volume_get_query(context, session=session, project_only=True, - joined_load=joined_load) - if joined_load: - result = result.options(joinedload('volume_type.extra_specs')) - result = result.filter_by(id=volume_id).first() - - if not result: - raise exception.VolumeNotFound(volume_id=volume_id) - - return result - - -def _attachment_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - - if filters and not is_valid_model_filters(models.VolumeAttachment, - filters, - exclude_list=['project_id']): - return [] - - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, models.VolumeAttachment) - if query is None: - return [] - return query.all() - - -def _attachment_get(context, attachment_id, session=None, read_deleted=False, - project_only=True): - result = (model_query(context, models.VolumeAttachment, session=session, - read_deleted=read_deleted) - .filter_by(id=attachment_id) - .options(joinedload('volume')) - .first()) - - if not result: - msg = _("Unable to find attachment with id: %s"), attachment_id - raise exception.VolumeAttachmentNotFound(msg) - return result - - -def _attachment_get_query(context, session=None, project_only=False): - return model_query(context, models.VolumeAttachment, session=session, - project_only=project_only).options(joinedload('volume')) - - -@apply_like_filters(model=models.VolumeAttachment) -def _process_attachment_filters(query, filters): - if filters: - project_id = filters.pop('project_id', None) - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.VolumeAttachment, filters): - return - if project_id: - volume = models.Volume - query = query.filter(volume.id == - models.VolumeAttachment.volume_id, - volume.project_id == project_id) - - query = query.filter_by(**filters) - return query - - -@require_admin_context -def volume_attachment_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Retrieve all Attachment records with filter and pagination options.""" - return _attachment_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@require_context -def volume_attachment_get_all_by_volume_id(context, volume_id, session=None): - result = model_query(context, models.VolumeAttachment, - session=session).\ - filter_by(volume_id=volume_id).\ - filter(models.VolumeAttachment.attach_status != - fields.VolumeAttachStatus.DETACHED). \ - options(joinedload('volume')).\ - all() - return result - - -@require_context -def volume_attachment_get_all_by_host(context, host): - session = get_session() - with session.begin(): - result = model_query(context, models.VolumeAttachment, - session=session).\ - filter_by(attached_host=host).\ - filter(models.VolumeAttachment.attach_status != - fields.VolumeAttachStatus.DETACHED). \ - options(joinedload('volume')).\ - all() - return result - - -@require_context -def volume_attachment_get(context, attachment_id): - """Fetch the specified attachment record.""" - return _attachment_get(context, attachment_id) - - -@require_context -def volume_attachment_get_all_by_instance_uuid(context, - instance_uuid): - """Fetch all attachment records associated with the specified instance.""" - session = get_session() - with session.begin(): - result = model_query(context, models.VolumeAttachment, - session=session).\ - filter_by(instance_uuid=instance_uuid).\ - filter(models.VolumeAttachment.attach_status != - fields.VolumeAttachStatus.DETACHED).\ - options(joinedload('volume')).\ - all() - return result - - -@require_context -def volume_attachment_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Retrieve all Attachment records for specific project.""" - authorize_project_context(context, project_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['project_id'] = project_id - - return _attachment_get_all(context, filters, marker, - limit, offset, sort_keys, - sort_dirs) - - -@require_admin_context -@_retry_on_deadlock -def attachment_destroy(context, attachment_id): - """Destroy the specified attachment record.""" - utcnow = timeutils.utcnow() - session = get_session() - with session.begin(): - updated_values = {'attach_status': 'deleted', - 'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')} - model_query(context, models.VolumeAttachment, session=session).\ - filter_by(id=attachment_id).\ - update(updated_values) - model_query(context, models.AttachmentSpecs, session=session).\ - filter_by(attachment_id=attachment_id).\ - update({'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')}) - del updated_values['updated_at'] - return updated_values - - -def _attachment_specs_query(context, attachment_id, session=None): - return model_query(context, models.AttachmentSpecs, session=session, - read_deleted="no").\ - filter_by(attachment_id=attachment_id) - - -@require_context -def attachment_specs_get(context, attachment_id): - """Fetch the attachment_specs for the specified attachment record.""" - rows = _attachment_specs_query(context, attachment_id).\ - all() - - result = {row['key']: row['value'] for row in rows} - return result - - -@require_context -def attachment_specs_delete(context, attachment_id, key): - """Delete attachment_specs for the specified attachment record.""" - session = get_session() - with session.begin(): - _attachment_specs_get_item(context, - attachment_id, - key, - session) - _attachment_specs_query(context, attachment_id, session).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _attachment_specs_get_item(context, - attachment_id, - key, - session=None): - result = _attachment_specs_query( - context, attachment_id, session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.AttachmentSpecsNotFound( - specs_key=key, - attachment_id=attachment_id) - - return result - - -@handle_db_data_error -@require_context -def attachment_specs_update_or_create(context, - attachment_id, - specs): - """Update attachment_specs for the specified attachment record.""" - session = get_session() - with session.begin(): - spec_ref = None - for key, value in specs.items(): - try: - spec_ref = _attachment_specs_get_item( - context, attachment_id, key, session) - except exception.AttachmentSpecsNotFound: - spec_ref = models.AttachmentSpecs() - spec_ref.update({"key": key, "value": value, - "attachment_id": attachment_id, - "deleted": False}) - spec_ref.save(session=session) - - return specs - - -@require_context -def volume_get(context, volume_id): - return _volume_get(context, volume_id) - - -@require_admin_context -def volume_get_all(context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - """Retrieves all volumes. - - If no sort parameters are specified then the returned volumes are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :returns: list of matching volumes - """ - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def get_volume_summary(context, project_only): - """Retrieves all volumes summary. - - :param context: context to query under - :param project_only: limit summary to project volumes - :returns: volume summary - """ - if not (project_only or is_admin_context(context)): - raise exception.AdminRequired() - query = model_query(context, func.count(models.Volume.id), - func.sum(models.Volume.size), read_deleted="no") - if project_only: - query = query.filter_by(project_id=context.project_id) - - if query is None: - return [] - - result = query.first() - - query_metadata = model_query( - context, models.VolumeMetadata.key, models.VolumeMetadata.value, - read_deleted="no") - if project_only: - query_metadata = query_metadata.join( - models.Volume, - models.Volume.id == models.VolumeMetadata.volume_id).filter_by( - project_id=context.project_id) - result_metadata = query_metadata.distinct().all() - - result_metadata_list = collections.defaultdict(list) - for key, value in result_metadata: - result_metadata_list[key].append(value) - - return (result[0] or 0, result[1] or 0, result_metadata_list) - - -@require_admin_context -def volume_get_all_by_host(context, host, filters=None): - """Retrieves all volumes hosted on a host. - - :param context: context to query under - :param host: host for all volumes being retrieved - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :returns: list of matching volumes - """ - # As a side effect of the introduction of pool-aware scheduler, - # newly created volumes will have pool information appended to - # 'host' field of a volume record. So a volume record in DB can - # now be either form below: - # Host - # Host#Pool - if host and isinstance(host, six.string_types): - session = get_session() - with session.begin(): - host_attr = getattr(models.Volume, 'host') - conditions = [host_attr == host, - host_attr.op('LIKE')(host + '#%')] - query = _volume_get_query(context).filter(or_(*conditions)) - if filters: - query = _process_volume_filters(query, filters) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - elif not host: - return [] - - -@require_context -def volume_get_all_by_group(context, group_id, filters=None): - """Retrieves all volumes associated with the group_id. - - :param context: context to query under - :param group_id: consistency group ID for all volumes being retrieved - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :returns: list of matching volumes - """ - query = _volume_get_query(context).filter_by(consistencygroup_id=group_id) - if filters: - query = _process_volume_filters(query, filters) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def volume_get_all_by_generic_group(context, group_id, filters=None): - """Retrieves all volumes associated with the group_id. - - :param context: context to query under - :param group_id: group ID for all volumes being retrieved - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :returns: list of matching volumes - """ - query = _volume_get_query(context).filter_by(group_id=group_id) - if filters: - query = _process_volume_filters(query, filters) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -@require_context -def volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - """Retrieves all volumes in a project. - - If no sort parameters are specified then the returned volumes are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all volumes being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :returns: list of matching volumes - """ - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - # Add in the project filter without modifying the given filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -def _generate_paginate_query(context, session, marker, limit, sort_keys, - sort_dirs, filters, offset=None, - paginate_type=models.Volume): - """Generate the query to include the filters and the paginate options. - - Returns a query with sorting / pagination criteria added or None - if the given filters will not yield any results. - - :param context: context to query under - :param session: the session to use - :param marker: the last item of the previous page; we returns the next - results after this value. - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_filters - function for more information - :param offset: number of items to skip - :param paginate_type: type of pagination to generate - :returns: updated query or None - """ - get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] - - sort_keys, sort_dirs = process_sort_params(sort_keys, - sort_dirs, - default_dir='desc') - query = get_query(context, session=session) - - if filters: - query = process_filters(query, filters) - if query is None: - return None - - marker_object = None - if marker is not None: - marker_object = get(context, marker, session) - - return sqlalchemyutils.paginate_query(query, paginate_type, limit, - sort_keys, - marker=marker_object, - sort_dirs=sort_dirs, - offset=offset) - - -@apply_like_filters(model=models.Volume) -def _process_volume_filters(query, filters): - """Common filter processing for Volume queries. - - Filter values that are in lists, tuples, or sets cause an 'IN' operator - to be used, while exact matching ('==' operator) is used for other values. - - A filter key/value of 'no_migration_targets'=True causes volumes with - either a NULL 'migration_status' or a 'migration_status' that does not - start with 'target:' to be retrieved. - - A 'metadata' filter key must correspond to a dictionary value of metadata - key-value pairs. - - :param query: Model query to use - :param filters: dictionary of filters - :returns: updated query or None - """ - filters = filters.copy() - - # 'no_migration_targets' is unique, must be either NULL or - # not start with 'target:' - if filters.get('no_migration_targets', False): - filters.pop('no_migration_targets') - try: - column_attr = getattr(models.Volume, 'migration_status') - conditions = [column_attr == None, # noqa - column_attr.op('NOT LIKE')('target:%')] - query = query.filter(or_(*conditions)) - except AttributeError: - LOG.debug("'migration_status' column could not be found.") - return None - - host = filters.pop('host', None) - if host: - query = query.filter(_filter_host(models.Volume.host, host)) - - cluster_name = filters.pop('cluster_name', None) - if cluster_name: - query = query.filter(_filter_host(models.Volume.cluster_name, - cluster_name)) - - # Apply exact match filters for everything else, ensure that the - # filter value exists on the model - for key in filters.keys(): - # metadata/glance_metadata is unique, must be a dict - if key in ('metadata', 'glance_metadata'): - if not isinstance(filters[key], dict): - LOG.debug("'%s' filter value is not valid.", key) - return None - continue - try: - column_attr = getattr(models.Volume, key) - # Do not allow relationship properties since those require - # schema specific knowledge - prop = getattr(column_attr, 'property') - if isinstance(prop, RelationshipProperty): - LOG.debug(("'%s' filter key is not valid, " - "it maps to a relationship."), key) - return None - except AttributeError: - LOG.debug("'%s' filter key is not valid.", key) - return None - - # Holds the simple exact matches - filter_dict = {} - - # Iterate over all filters, special case the filter if necessary - for key, value in filters.items(): - if key == 'metadata': - # model.VolumeMetadata defines the backref to Volumes as - # 'volume_metadata' or 'volume_admin_metadata', use those as - # column attribute keys - col_attr = getattr(models.Volume, 'volume_metadata') - col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') - for k, v in value.items(): - query = query.filter(or_(col_attr.any(key=k, value=v), - col_ad_attr.any(key=k, value=v))) - elif key == 'glance_metadata': - # use models.Volume.volume_glance_metadata as column attribute key. - col_gl_attr = models.Volume.volume_glance_metadata - for k, v in value.items(): - query = query.filter(col_gl_attr.any(key=k, value=v)) - elif isinstance(value, (list, tuple, set, frozenset)): - # Looking for values in a list; apply to query directly - column_attr = getattr(models.Volume, key) - query = query.filter(column_attr.in_(value)) - else: - # OK, simple exact match; save for later - filter_dict[key] = value - - # Apply simple exact matches - if filter_dict: - query = query.filter_by(**filter_dict) - return query - - -def process_sort_params(sort_keys, sort_dirs, default_keys=None, - default_dir='asc'): - """Process the sort parameters to include default keys. - - Creates a list of sort keys and a list of sort directions. Adds the default - keys to the end of the list if they are not already included. - - When adding the default keys to the sort keys list, the associated - direction is: - 1) The first element in the 'sort_dirs' list (if specified), else - 2) 'default_dir' value (Note that 'asc' is the default value since this is - the default in sqlalchemy.utils.paginate_query) - - :param sort_keys: List of sort keys to include in the processed list - :param sort_dirs: List of sort directions to include in the processed list - :param default_keys: List of sort keys that need to be included in the - processed list, they are added at the end of the list - if not already specified. - :param default_dir: Sort direction associated with each of the default - keys that are not supplied, used when they are added - to the processed list - :returns: list of sort keys, list of sort directions - :raise exception.InvalidInput: If more sort directions than sort keys - are specified or if an invalid sort - direction is specified - """ - if default_keys is None: - default_keys = ['created_at', 'id'] - - # Determine direction to use for when adding default keys - if sort_dirs and len(sort_dirs): - default_dir_value = sort_dirs[0] - else: - default_dir_value = default_dir - - # Create list of keys (do not modify the input list) - if sort_keys: - result_keys = list(sort_keys) - else: - result_keys = [] - - # If a list of directions is not provided, use the default sort direction - # for all provided keys. - if sort_dirs: - result_dirs = [] - # Verify sort direction - for sort_dir in sort_dirs: - if sort_dir not in ('asc', 'desc'): - msg = _("Unknown sort direction, must be 'desc' or 'asc'.") - raise exception.InvalidInput(reason=msg) - result_dirs.append(sort_dir) - else: - result_dirs = [default_dir_value for _sort_key in result_keys] - - # Ensure that the key and direction length match - while len(result_dirs) < len(result_keys): - result_dirs.append(default_dir_value) - # Unless more direction are specified, which is an error - if len(result_dirs) > len(result_keys): - msg = _("Sort direction array size exceeds sort key array size.") - raise exception.InvalidInput(reason=msg) - - # Ensure defaults are included - for key in default_keys: - if key not in result_keys: - result_keys.append(key) - result_dirs.append(default_dir_value) - - return result_keys, result_dirs - - -@handle_db_data_error -@require_context -def volume_update(context, volume_id, values): - session = get_session() - with session.begin(): - metadata = values.get('metadata') - if metadata is not None: - _volume_user_metadata_update(context, - volume_id, - values.pop('metadata'), - delete=True, - session=session) - - admin_metadata = values.get('admin_metadata') - if is_admin_context(context) and admin_metadata is not None: - _volume_admin_metadata_update(context, - volume_id, - values.pop('admin_metadata'), - delete=True, - session=session) - - query = _volume_get_query(context, session, joined_load=False) - result = query.filter_by(id=volume_id).update(values) - if not result: - raise exception.VolumeNotFound(volume_id=volume_id) - - -@handle_db_data_error -@require_context -def volumes_update(context, values_list): - session = get_session() - with session.begin(): - volume_refs = [] - for values in values_list: - volume_id = values['id'] - values.pop('id') - metadata = values.get('metadata') - if metadata is not None: - _volume_user_metadata_update(context, - volume_id, - values.pop('metadata'), - delete=True, - session=session) - - admin_metadata = values.get('admin_metadata') - if is_admin_context(context) and admin_metadata is not None: - _volume_admin_metadata_update(context, - volume_id, - values.pop('admin_metadata'), - delete=True, - session=session) - - volume_ref = _volume_get(context, volume_id, session=session) - volume_ref.update(values) - volume_refs.append(volume_ref) - - return volume_refs - - -@require_context -def volume_attachment_update(context, attachment_id, values): - query = model_query(context, models.VolumeAttachment) - result = query.filter_by(id=attachment_id).update(values) - if not result: - raise exception.VolumeAttachmentNotFound( - filter='attachment_id = ' + attachment_id) - - -def volume_update_status_based_on_attachment(context, volume_id): - """Update volume status based on attachment. - - Get volume and check if 'volume_attachment' parameter is present in volume. - If 'volume_attachment' is None then set volume status to 'available' - else set volume status to 'in-use'. - - :param context: context to query under - :param volume_id: id of volume to be updated - :returns: updated volume - """ - session = get_session() - with session.begin(): - volume_ref = _volume_get(context, volume_id, session=session) - # We need to get and update volume using same session because - # there is possibility that instance is deleted between the 'get' - # and 'update' volume call. - if not volume_ref['volume_attachment']: - volume_ref.update({'status': 'available'}) - else: - volume_ref.update({'status': 'in-use'}) - - return volume_ref - - -def volume_has_snapshots_filter(): - return sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - ~models.Snapshot.deleted)) - - -def volume_has_undeletable_snapshots_filter(): - deletable_statuses = ['available', 'error'] - return sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - ~models.Snapshot.deleted, - or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None - models.Snapshot.status.notin_(deletable_statuses)), - or_(models.Snapshot.group_snapshot_id != None, # noqa: != None - models.Snapshot.status.notin_(deletable_statuses)))) - - -def volume_has_snapshots_in_a_cgsnapshot_filter(): - return sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - models.Snapshot.cgsnapshot_id.isnot(None))) - - -def volume_has_attachments_filter(): - return sql.exists().where( - and_(models.Volume.id == models.VolumeAttachment.volume_id, - models.VolumeAttachment.attach_status != - fields.VolumeAttachStatus.DETACHED, - ~models.VolumeAttachment.deleted)) - - -def volume_qos_allows_retype(new_vol_type): - """Filter to check that qos allows retyping the volume to new_vol_type. - - Returned sqlalchemy filter will evaluate to True when volume's status is - available or when it's 'in-use' but the qos in new_vol_type is the same as - the qos of the volume or when it doesn't exist a consumer spec key that - specifies anything other than the back-end in any of the 2 volume_types. - """ - # Query to get the qos of the volume type new_vol_type - q = sql.select([models.VolumeTypes.qos_specs_id]).where(and_( - ~models.VolumeTypes.deleted, - models.VolumeTypes.id == new_vol_type)) - # Construct the filter to check qos when volume is 'in-use' - return or_( - # If volume is available - models.Volume.status == 'available', - # Or both volume types have the same qos specs - sql.exists().where(and_( - ~models.VolumeTypes.deleted, - models.VolumeTypes.id == models.Volume.volume_type_id, - models.VolumeTypes.qos_specs_id == q.as_scalar())), - # Or they are different specs but they are handled by the backend or - # it is not specified. The way SQL evaluatels value != 'back-end' - # makes it result in False not only for 'back-end' values but for - # NULL as well, and with the double negation we ensure that we only - # allow QoS with 'consumer' values of 'back-end' and NULL. - and_( - ~sql.exists().where(and_( - ~models.VolumeTypes.deleted, - models.VolumeTypes.id == models.Volume.volume_type_id, - (models.VolumeTypes.qos_specs_id == - models.QualityOfServiceSpecs.specs_id), - models.QualityOfServiceSpecs.key == 'consumer', - models.QualityOfServiceSpecs.value != 'back-end')), - ~sql.exists().where(and_( - ~models.VolumeTypes.deleted, - models.VolumeTypes.id == new_vol_type, - (models.VolumeTypes.qos_specs_id == - models.QualityOfServiceSpecs.specs_id), - models.QualityOfServiceSpecs.key == 'consumer', - models.QualityOfServiceSpecs.value != 'back-end')))) - - -def volume_has_other_project_snp_filter(): - return sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - models.Volume.project_id != models.Snapshot.project_id)) - - -#################### - - -def _volume_x_metadata_get_query(context, volume_id, model, session=None): - return model_query(context, model, session=session, read_deleted="no").\ - filter_by(volume_id=volume_id) - - -def _volume_x_metadata_get(context, volume_id, model, session=None): - rows = _volume_x_metadata_get_query(context, volume_id, model, - session=session).all() - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, - session=None): - result = _volume_x_metadata_get_query(context, volume_id, - model, session=session).\ - filter_by(key=key).\ - first() - - if not result: - if model is models.VolumeGlanceMetadata: - raise notfound_exec(id=volume_id) - else: - raise notfound_exec(metadata_key=key, volume_id=volume_id) - return result - - -def _volume_x_metadata_update(context, volume_id, metadata, delete, model, - session=None, add=True, update=True): - session = session or get_session() - metadata = metadata.copy() - - with session.begin(subtransactions=True): - # Set existing metadata to deleted if delete argument is True. This is - # committed immediately to the DB - if delete: - expected_values = {'volume_id': volume_id} - # We don't want to delete keys we are going to update - if metadata: - expected_values['key'] = db.Not(metadata.keys()) - conditional_update(context, model, - {'deleted': True, - 'deleted_at': timeutils.utcnow()}, - expected_values) - - # Get existing metadata - db_meta = _volume_x_metadata_get_query(context, volume_id, model).all() - save = [] - skip = [] - - # We only want to send changed metadata. - for row in db_meta: - if row.key in metadata: - value = metadata.pop(row.key) - if row.value != value and update: - # ORM objects will not be saved until we do the bulk save - row.value = value - save.append(row) - continue - skip.append(row) - - # We also want to save non-existent metadata - if add: - save.extend(model(key=key, value=value, volume_id=volume_id) - for key, value in metadata.items()) - # Do a bulk save - if save: - session.bulk_save_objects(save, update_changed_only=True) - - # Construct result dictionary with current metadata - save.extend(skip) - result = {row['key']: row['value'] for row in save} - return result - - -def _volume_user_metadata_get_query(context, volume_id, session=None): - return _volume_x_metadata_get_query(context, volume_id, - models.VolumeMetadata, session=session) - - -def _volume_image_metadata_get_query(context, volume_id, session=None): - return _volume_x_metadata_get_query(context, volume_id, - models.VolumeGlanceMetadata, - session=session) - - -@require_context -def _volume_user_metadata_get(context, volume_id, session=None): - return _volume_x_metadata_get(context, volume_id, - models.VolumeMetadata, session=session) - - -@require_context -def _volume_user_metadata_get_item(context, volume_id, key, session=None): - return _volume_x_metadata_get_item(context, volume_id, key, - models.VolumeMetadata, - exception.VolumeMetadataNotFound, - session=session) - - -@require_context -@require_volume_exists -def _volume_user_metadata_update(context, volume_id, metadata, delete, - session=None): - return _volume_x_metadata_update(context, volume_id, metadata, delete, - models.VolumeMetadata, - session=session) - - -@require_context -@require_volume_exists -def _volume_image_metadata_update(context, volume_id, metadata, delete, - session=None): - return _volume_x_metadata_update(context, volume_id, metadata, delete, - models.VolumeGlanceMetadata, - session=session) - - -@require_context -def _volume_glance_metadata_key_to_id(context, volume_id, key): - db_data = volume_glance_metadata_get(context, volume_id) - metadata = {meta_entry.key: meta_entry.id - for meta_entry in db_data - if meta_entry.key == key} - metadata_id = metadata[key] - return metadata_id - - -@require_context -@require_volume_exists -def volume_metadata_get(context, volume_id): - return _volume_user_metadata_get(context, volume_id) - - -@require_context -@require_volume_exists -@_retry_on_deadlock -def volume_metadata_delete(context, volume_id, key, meta_type): - if meta_type == common.METADATA_TYPES.user: - (_volume_user_metadata_get_query(context, volume_id). - filter_by(key=key). - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')})) - elif meta_type == common.METADATA_TYPES.image: - metadata_id = _volume_glance_metadata_key_to_id(context, - volume_id, key) - (_volume_image_metadata_get_query(context, volume_id). - filter_by(id=metadata_id). - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')})) - else: - raise exception.InvalidMetadataType(metadata_type=meta_type, - id=volume_id) - - -@require_context -@handle_db_data_error -@_retry_on_deadlock -def volume_metadata_update(context, volume_id, metadata, delete, meta_type): - if meta_type == common.METADATA_TYPES.user: - return _volume_user_metadata_update(context, - volume_id, - metadata, - delete) - elif meta_type == common.METADATA_TYPES.image: - return _volume_image_metadata_update(context, - volume_id, - metadata, - delete) - else: - raise exception.InvalidMetadataType(metadata_type=meta_type, - id=volume_id) - - -################### - - -def _volume_admin_metadata_get_query(context, volume_id, session=None): - return _volume_x_metadata_get_query(context, volume_id, - models.VolumeAdminMetadata, - session=session) - - -@require_admin_context -@require_volume_exists -def _volume_admin_metadata_get(context, volume_id, session=None): - return _volume_x_metadata_get(context, volume_id, - models.VolumeAdminMetadata, session=session) - - -@require_admin_context -@require_volume_exists -def _volume_admin_metadata_update(context, volume_id, metadata, delete, - session=None, add=True, update=True): - return _volume_x_metadata_update(context, volume_id, metadata, delete, - models.VolumeAdminMetadata, - session=session, add=add, update=update) - - -@require_admin_context -def volume_admin_metadata_get(context, volume_id): - return _volume_admin_metadata_get(context, volume_id) - - -@require_admin_context -@require_volume_exists -@_retry_on_deadlock -def volume_admin_metadata_delete(context, volume_id, key): - _volume_admin_metadata_get_query(context, volume_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -@_retry_on_deadlock -def volume_admin_metadata_update(context, volume_id, metadata, delete, - add=True, update=True): - return _volume_admin_metadata_update(context, volume_id, metadata, delete, - add=add, update=update) - - -################### - - -@require_context -@handle_db_data_error -def snapshot_create(context, values): - values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), - models.SnapshotMetadata) - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - snapshot_ref = models.Snapshot() - snapshot_ref.update(values) - session.add(snapshot_ref) - - return _snapshot_get(context, values['id'], session=session) - - -@require_admin_context -@_retry_on_deadlock -def snapshot_destroy(context, snapshot_id): - utcnow = timeutils.utcnow() - session = get_session() - with session.begin(): - updated_values = {'status': 'deleted', - 'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')} - model_query(context, models.Snapshot, session=session).\ - filter_by(id=snapshot_id).\ - update(updated_values) - model_query(context, models.SnapshotMetadata, session=session).\ - filter_by(snapshot_id=snapshot_id).\ - update({'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')}) - del updated_values['updated_at'] - return updated_values - - -@require_context -def _snapshot_get(context, snapshot_id, session=None): - result = model_query(context, models.Snapshot, session=session, - project_only=True).\ - options(joinedload('volume')).\ - options(joinedload('snapshot_metadata')).\ - filter_by(id=snapshot_id).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - - return result - - -@require_context -def snapshot_get(context, snapshot_id): - return _snapshot_get(context, snapshot_id) - - -@require_admin_context -def snapshot_get_all(context, filters=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - """Retrieves all snapshots. - - If no sorting parameters are specified then returned snapshots are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param filters: dictionary of filters; will do exact matching on values. - Special keys host and cluster_name refer to the volume. - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :returns: list of matching snapshots - """ - if filters and not is_valid_model_filters(models.Snapshot, filters, - exclude_list=('host', - 'cluster_name')): - return [] - - session = get_session() - with session.begin(): - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Snapshot) - - # No snapshots would match, return empty list - if not query: - return [] - return query.all() - - -def _snaps_get_query(context, session=None, project_only=False): - return model_query(context, models.Snapshot, session=session, - project_only=project_only).\ - options(joinedload('snapshot_metadata')) - - -@apply_like_filters(model=models.Snapshot) -def _process_snaps_filters(query, filters): - if filters: - filters = filters.copy() - - exclude_list = ('host', 'cluster_name') - - # Ensure that filters' keys exist on the model or is metadata - for key in filters.keys(): - # Ensure if filtering based on metadata filter is queried - # then the filters value is a dictionary - if key == 'metadata': - if not isinstance(filters[key], dict): - LOG.debug("Metadata filter value is not valid dictionary") - return None - continue - - if key in exclude_list: - continue - - # for keys in filter other than metadata and exclude_list - # ensure that the keys are in Snapshot modelt - try: - column_attr = getattr(models.Snapshot, key) - prop = getattr(column_attr, 'property') - if isinstance(prop, RelationshipProperty): - LOG.debug( - "'%s' key is not valid, it maps to a relationship.", - key) - return None - except AttributeError: - LOG.debug("'%s' filter key is not valid.", key) - return None - - # filter handling for host and cluster name - host = filters.pop('host', None) - cluster = filters.pop('cluster_name', None) - if host or cluster: - query = query.join(models.Snapshot.volume) - vol_field = models.Volume - if host: - query = query.filter(_filter_host(vol_field.host, host)) - if cluster: - query = query.filter(_filter_host(vol_field.cluster_name, cluster)) - - filters_dict = {} - LOG.debug("Building query based on filter") - for key, value in filters.items(): - if key == 'metadata': - col_attr = getattr(models.Snapshot, 'snapshot_metadata') - for k, v in value.items(): - query = query.filter(col_attr.any(key=k, value=v)) - else: - filters_dict[key] = value - - # Apply exact matches - if filters_dict: - query = query.filter_by(**filters_dict) - - return query - - -@require_context -def snapshot_get_all_for_volume(context, volume_id): - return model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(volume_id=volume_id).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_latest_for_volume(context, volume_id): - result = model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(volume_id=volume_id).\ - options(joinedload('snapshot_metadata')).\ - order_by(desc(models.Snapshot.created_at)).\ - first() - if not result: - raise exception.VolumeSnapshotNotFound(volume_id=volume_id) - return result - - -@require_context -def snapshot_get_all_by_host(context, host, filters=None): - if filters and not is_valid_model_filters(models.Snapshot, filters): - return [] - - query = model_query(context, models.Snapshot, read_deleted='no', - project_only=True) - if filters: - query = query.filter_by(**filters) - - # As a side effect of the introduction of pool-aware scheduler, - # newly created volumes will have pool information appended to - # 'host' field of a volume record. So a volume record in DB can - # now be either form below: - # Host - # Host#Pool - if host and isinstance(host, six.string_types): - session = get_session() - with session.begin(): - host_attr = getattr(models.Volume, 'host') - conditions = [host_attr == host, - host_attr.op('LIKE')(host + '#%')] - query = query.join(models.Snapshot.volume).filter( - or_(*conditions)).options(joinedload('snapshot_metadata')) - return query.all() - elif not host: - return [] - - -@require_context -def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): - return model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(cgsnapshot_id=cgsnapshot_id).\ - options(joinedload('volume')).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): - return model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(group_snapshot_id=group_snapshot_id).\ - options(joinedload('volume')).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None): - """"Retrieves all snapshots in a project. - - If no sorting parameters are specified then returned snapshots are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param project_id: project for all snapshots being retrieved - :param filters: dictionary of filters; will do exact matching on values - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :returns: list of matching snapshots - """ - if filters and not is_valid_model_filters(models.Snapshot, filters): - return [] - - authorize_project_context(context, project_id) - - # Add project_id to filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - - session = get_session() - with session.begin(): - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.Snapshot) - - # No snapshots would match, return empty list - if not query: - return [] - - query = query.options(joinedload('snapshot_metadata')) - return query.all() - - -@require_context -def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - authorize_project_context(context, project_id) - query = model_query(context, - func.count(models.Snapshot.id), - func.sum(models.Snapshot.volume_size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.join('volume').filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_context -def snapshot_data_get_for_project(context, project_id, volume_type_id=None): - return _snapshot_data_get_for_project(context, project_id, volume_type_id) - - -@require_context -def snapshot_get_all_active_by_window(context, begin, end=None, - project_id=None): - """Return snapshots that were active during window.""" - - query = model_query(context, models.Snapshot, read_deleted="yes") - query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa - models.Snapshot.deleted_at > begin)) - query = query.options(joinedload(models.Snapshot.volume)) - query = query.options(joinedload('snapshot_metadata')) - if end: - query = query.filter(models.Snapshot.created_at < end) - if project_id: - query = query.filter_by(project_id=project_id) - - return query.all() - - -@handle_db_data_error -@require_context -def snapshot_update(context, snapshot_id, values): - query = model_query(context, models.Snapshot, project_only=True) - result = query.filter_by(id=snapshot_id).update(values) - if not result: - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - - -#################### - - -def _snapshot_metadata_get_query(context, snapshot_id, session=None): - return model_query(context, models.SnapshotMetadata, - session=session, read_deleted="no").\ - filter_by(snapshot_id=snapshot_id) - - -@require_context -def _snapshot_metadata_get(context, snapshot_id, session=None): - rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -@require_context -@require_snapshot_exists -def snapshot_metadata_get(context, snapshot_id): - return _snapshot_metadata_get(context, snapshot_id) - - -@require_context -@require_snapshot_exists -@_retry_on_deadlock -def snapshot_metadata_delete(context, snapshot_id, key): - _snapshot_metadata_get_query(context, snapshot_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): - result = _snapshot_metadata_get_query(context, - snapshot_id, - session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.SnapshotMetadataNotFound(metadata_key=key, - snapshot_id=snapshot_id) - return result - - -@require_context -@require_snapshot_exists -@handle_db_data_error -@_retry_on_deadlock -def snapshot_metadata_update(context, snapshot_id, metadata, delete): - session = get_session() - with session.begin(): - # Set existing metadata to deleted if delete argument is True - if delete: - original_metadata = _snapshot_metadata_get(context, snapshot_id, - session) - for meta_key, meta_value in original_metadata.items(): - if meta_key not in metadata: - meta_ref = _snapshot_metadata_get_item(context, - snapshot_id, - meta_key, session) - meta_ref.update({'deleted': True, - 'deleted_at': timeutils.utcnow()}) - meta_ref.save(session=session) - - meta_ref = None - - # Now update all existing items with new values, or create new meta - # objects - for meta_key, meta_value in metadata.items(): - - # update the value whether it exists or not - item = {"value": meta_value} - - try: - meta_ref = _snapshot_metadata_get_item(context, snapshot_id, - meta_key, session) - except exception.SnapshotMetadataNotFound: - meta_ref = models.SnapshotMetadata() - item.update({"key": meta_key, "snapshot_id": snapshot_id}) - - meta_ref.update(item) - meta_ref.save(session=session) - - return snapshot_metadata_get(context, snapshot_id) - -################### - - -@handle_db_data_error -@require_admin_context -def volume_type_create(context, values, projects=None): - """Create a new volume type. - - In order to pass in extra specs, the values dict should contain a - 'extra_specs' key/value pair: - {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} - """ - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - projects = projects or [] - orm_projects = [] - - session = get_session() - with session.begin(): - try: - _volume_type_get_by_name(context, values['name'], session) - raise exception.VolumeTypeExists(id=values['name']) - except exception.VolumeTypeNotFoundByName: - pass - try: - _volume_type_get(context, values['id'], session) - raise exception.VolumeTypeExists(id=values['id']) - except exception.VolumeTypeNotFound: - pass - try: - values['extra_specs'] = _metadata_refs(values.get('extra_specs'), - models.VolumeTypeExtraSpecs) - volume_type_ref = models.VolumeTypes() - volume_type_ref.update(values) - session.add(volume_type_ref) - except Exception as e: - raise db_exc.DBError(e) - for project in set(projects): - access_ref = models.VolumeTypeProjects() - access_ref.update({"volume_type_id": volume_type_ref.id, - "project_id": project}) - access_ref.save(session=session) - orm_projects.append(access_ref) - volume_type_ref.projects = orm_projects - return volume_type_ref - - -@handle_db_data_error -@require_admin_context -def group_type_create(context, values, projects=None): - """Create a new group type. - - In order to pass in group specs, the values dict should contain a - 'group_specs' key/value pair: - {'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}} - """ - if not values.get('id'): - values['id'] = six.text_type(uuid.uuid4()) - - projects = projects or [] - - session = get_session() - with session.begin(): - try: - _group_type_get_by_name(context, values['name'], session) - raise exception.GroupTypeExists(id=values['name']) - except exception.GroupTypeNotFoundByName: - pass - try: - _group_type_get(context, values['id'], session) - raise exception.GroupTypeExists(id=values['id']) - except exception.GroupTypeNotFound: - pass - try: - values['group_specs'] = _metadata_refs(values.get('group_specs'), - models.GroupTypeSpecs) - group_type_ref = models.GroupTypes() - group_type_ref.update(values) - session.add(group_type_ref) - except Exception as e: - raise db_exc.DBError(e) - for project in set(projects): - access_ref = models.GroupTypeProjects() - access_ref.update({"group_type_id": group_type_ref.id, - "project_id": project}) - access_ref.save(session=session) - return group_type_ref - - -def _volume_type_get_query(context, session=None, read_deleted='no', - expected_fields=None): - expected_fields = expected_fields or [] - query = model_query(context, - models.VolumeTypes, - session=session, - read_deleted=read_deleted).\ - options(joinedload('extra_specs')) - - for expected in expected_fields: - query = query.options(joinedload(expected)) - - if not context.is_admin: - the_filter = [models.VolumeTypes.is_public == true()] - projects_attr = getattr(models.VolumeTypes, 'projects') - the_filter.extend([ - projects_attr.any(project_id=context.project_id) - ]) - query = query.filter(or_(*the_filter)) - - return query - - -def _group_type_get_query(context, session=None, read_deleted='no', - expected_fields=None): - expected_fields = expected_fields or [] - query = model_query(context, - models.GroupTypes, - session=session, - read_deleted=read_deleted).\ - options(joinedload('group_specs')) - - if 'projects' in expected_fields: - query = query.options(joinedload('projects')) - - if not context.is_admin: - the_filter = [models.GroupTypes.is_public == true()] - projects_attr = models.GroupTypes.projects - the_filter.extend([ - projects_attr.any(project_id=context.project_id) - ]) - query = query.filter(or_(*the_filter)) - - return query - - -def _process_volume_types_filters(query, filters): - context = filters.pop('context', None) - if 'is_public' in filters and filters['is_public'] is not None: - the_filter = [models.VolumeTypes.is_public == filters['is_public']] - if filters['is_public'] and context.project_id is not None: - projects_attr = getattr(models.VolumeTypes, 'projects') - the_filter.extend([ - projects_attr.any(project_id=context.project_id, deleted=0) - ]) - if len(the_filter) > 1: - query = query.filter(or_(*the_filter)) - else: - query = query.filter(the_filter[0]) - if 'is_public' in filters: - del filters['is_public'] - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.VolumeTypes, filters): - return - if filters.get('extra_specs') is not None: - the_filter = [] - searchdict = filters.pop('extra_specs') - extra_specs = getattr(models.VolumeTypes, 'extra_specs') - for k, v in searchdict.items(): - the_filter.extend([extra_specs.any(key=k, value=v, - deleted=False)]) - if len(the_filter) > 1: - query = query.filter(and_(*the_filter)) - else: - query = query.filter(the_filter[0]) - query = query.filter_by(**filters) - return query - - -def _process_group_types_filters(query, filters): - context = filters.pop('context', None) - if 'is_public' in filters and filters['is_public'] is not None: - the_filter = [models.GroupTypes.is_public == filters['is_public']] - if filters['is_public'] and context.project_id is not None: - projects_attr = getattr(models.GroupTypes, 'projects') - the_filter.extend([ - projects_attr.any(project_id=context.project_id, deleted=0) - ]) - if len(the_filter) > 1: - query = query.filter(or_(*the_filter)) - else: - query = query.filter(the_filter[0]) - if 'is_public' in filters: - del filters['is_public'] - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.GroupTypes, filters): - return - if filters.get('group_specs') is not None: - the_filter = [] - searchdict = filters.pop('group_specs') - group_specs = getattr(models.GroupTypes, 'group_specs') - for k, v in searchdict.items(): - the_filter.extend([group_specs.any(key=k, value=v, - deleted=False)]) - if len(the_filter) > 1: - query = query.filter(and_(*the_filter)) - else: - query = query.filter(the_filter[0]) - query = query.filter_by(**filters) - return query - - -@handle_db_data_error -@require_admin_context -def _type_update(context, type_id, values, is_group): - if is_group: - model = models.GroupTypes - exists_exc = exception.GroupTypeExists - else: - model = models.VolumeTypes - exists_exc = exception.VolumeTypeExists - - session = get_session() - with session.begin(): - # No description change - if values['description'] is None: - del values['description'] - - # No is_public change - if values['is_public'] is None: - del values['is_public'] - - # No name change - if values['name'] is None: - del values['name'] - else: - # Group type name is unique. If change to a name that belongs to - # a different group_type, it should be prevented. - conditions = and_(model.name == values['name'], - model.id != type_id, ~model.deleted) - query = session.query(sql.exists().where(conditions)) - if query.scalar(): - raise exists_exc(id=values['name']) - - query = model_query(context, model, project_only=True, session=session) - result = query.filter_by(id=type_id).update(values) - if not result: - if is_group: - raise exception.GroupTypeNotFound(group_type_id=type_id) - else: - raise exception.VolumeTypeNotFound(volume_type_id=type_id) - - -def volume_type_update(context, volume_type_id, values): - _type_update(context, volume_type_id, values, is_group=False) - - -def group_type_update(context, group_type_id, values): - _type_update(context, group_type_id, values, is_group=True) - - -@require_context -def volume_type_get_all(context, inactive=False, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Returns a dict describing all volume_types with name as key. - - If no sort parameters are specified then the returned volume types are - sorted first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_type_filters - function for more information - :param list_result: For compatibility, if list_result = True, return a list - instead of dict. - :returns: list/dict of matching volume types - """ - session = get_session() - with session.begin(): - # Add context for _process_volume_types_filters - filters = filters or {} - filters['context'] = context - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset, - models.VolumeTypes) - # No volume types would match, return empty dict or list - if query is None: - if list_result: - return [] - return {} - - rows = query.all() - if list_result: - result = [_dict_with_extra_specs_if_authorized(context, row) - for row in rows] - return result - result = {row['name']: _dict_with_extra_specs_if_authorized(context, - row) - for row in rows} - return result - - -@require_context -def group_type_get_all(context, inactive=False, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Returns a dict describing all group_types with name as key. - - If no sort parameters are specified then the returned group types are - sorted first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see _process_volume_type_filters - function for more information - :param list_result: For compatibility, if list_result = True, return a list - instead of dict. - :returns: list/dict of matching group types - """ - session = get_session() - with session.begin(): - # Add context for _process_group_types_filters - filters = filters or {} - filters['context'] = context - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, offset, - models.GroupTypes) - # No group types would match, return empty dict or list - if query is None: - if list_result: - return [] - return {} - - rows = query.all() - if list_result: - result = [_dict_with_group_specs_if_authorized(context, row) - for row in rows] - return result - result = {row['name']: _dict_with_group_specs_if_authorized(context, - row) - for row in rows} - return result - - -def _volume_type_get_id_from_volume_type_query(context, id, session=None): - return model_query( - context, models.VolumeTypes.id, read_deleted="no", - session=session, base_model=models.VolumeTypes).\ - filter_by(id=id) - - -def _group_type_get_id_from_group_type_query(context, id, session=None): - return model_query( - context, models.GroupTypes.id, read_deleted="no", - session=session, base_model=models.GroupTypes).\ - filter_by(id=id) - - -def _volume_type_get_id_from_volume_type(context, id, session=None): - result = _volume_type_get_id_from_volume_type_query( - context, id, session=session).first() - if not result: - raise exception.VolumeTypeNotFound(volume_type_id=id) - return result[0] - - -def _group_type_get_id_from_group_type(context, id, session=None): - result = _group_type_get_id_from_group_type_query( - context, id, session=session).first() - if not result: - raise exception.GroupTypeNotFound(group_type_id=id) - return result[0] - - -def _volume_type_get_db_object(context, id, session=None, inactive=False, - expected_fields=None): - read_deleted = "yes" if inactive else "no" - result = _volume_type_get_query( - context, session, read_deleted, expected_fields).\ - filter_by(id=id).\ - first() - return result - - -def _group_type_get_db_object(context, id, session=None, inactive=False, - expected_fields=None): - read_deleted = "yes" if inactive else "no" - result = _group_type_get_query( - context, session, read_deleted, expected_fields).\ - filter_by(id=id).\ - first() - return result - - -@require_context -def _volume_type_get(context, id, session=None, inactive=False, - expected_fields=None): - expected_fields = expected_fields or [] - result = _volume_type_get_db_object(context, id, session, inactive, - expected_fields) - if not result: - raise exception.VolumeTypeNotFound(volume_type_id=id) - - vtype = _dict_with_extra_specs_if_authorized(context, result) - - if 'projects' in expected_fields: - vtype['projects'] = [p['project_id'] for p in result['projects']] - - if 'qos_specs' in expected_fields: - vtype['qos_specs'] = result.qos_specs - - return vtype - - -@require_context -def _group_type_get(context, id, session=None, inactive=False, - expected_fields=None): - expected_fields = expected_fields or [] - result = _group_type_get_db_object(context, id, session, inactive, - expected_fields) - if not result: - raise exception.GroupTypeNotFound(group_type_id=id) - - gtype = _dict_with_group_specs_if_authorized(context, result) - - if 'projects' in expected_fields: - gtype['projects'] = [p['project_id'] for p in result['projects']] - - return gtype - - -@require_context -def volume_type_get(context, id, inactive=False, expected_fields=None): - """Return a dict describing specific volume_type.""" - - return _volume_type_get(context, id, - session=None, - inactive=inactive, - expected_fields=expected_fields) - - -@require_context -def group_type_get(context, id, inactive=False, expected_fields=None): - """Return a dict describing specific group_type.""" - - return _group_type_get(context, id, - session=None, - inactive=inactive, - expected_fields=expected_fields) - - -def _volume_type_get_full(context, id): - """Return dict for a specific volume_type with extra_specs and projects.""" - return _volume_type_get(context, id, session=None, inactive=False, - expected_fields=('extra_specs', 'projects')) - - -def _group_type_get_full(context, id): - """Return dict for a specific group_type with group_specs and projects.""" - return _group_type_get(context, id, session=None, inactive=False, - expected_fields=('group_specs', 'projects')) - - -@require_context -def _volume_type_ref_get(context, id, session=None, inactive=False): - read_deleted = "yes" if inactive else "no" - result = model_query(context, - models.VolumeTypes, - session=session, - read_deleted=read_deleted).\ - options(joinedload('extra_specs')).\ - filter_by(id=id).\ - first() - - if not result: - raise exception.VolumeTypeNotFound(volume_type_id=id) - - return result - - -@require_context -def _group_type_ref_get(context, id, session=None, inactive=False): - read_deleted = "yes" if inactive else "no" - result = model_query(context, - models.GroupTypes, - session=session, - read_deleted=read_deleted).\ - options(joinedload('group_specs')).\ - filter_by(id=id).\ - first() - - if not result: - raise exception.GroupTypeNotFound(group_type_id=id) - - return result - - -@require_context -def _volume_type_get_by_name(context, name, session=None): - result = model_query(context, models.VolumeTypes, session=session).\ - options(joinedload('extra_specs')).\ - filter_by(name=name).\ - first() - - if not result: - raise exception.VolumeTypeNotFoundByName(volume_type_name=name) - - return _dict_with_extra_specs_if_authorized(context, result) - - -@require_context -def _group_type_get_by_name(context, name, session=None): - result = model_query(context, models.GroupTypes, session=session).\ - options(joinedload('group_specs')).\ - filter_by(name=name).\ - first() - - if not result: - raise exception.GroupTypeNotFoundByName(group_type_name=name) - - return _dict_with_group_specs_if_authorized(context, result) - - -@require_context -def volume_type_get_by_name(context, name): - """Return a dict describing specific volume_type.""" - - return _volume_type_get_by_name(context, name) - - -@require_context -def group_type_get_by_name(context, name): - """Return a dict describing specific group_type.""" - - return _group_type_get_by_name(context, name) - - -@require_context -def volume_types_get_by_name_or_id(context, volume_type_list): - """Return a dict describing specific volume_type.""" - req_volume_types = [] - for vol_t in volume_type_list: - if not uuidutils.is_uuid_like(vol_t): - vol_type = _volume_type_get_by_name(context, vol_t) - else: - vol_type = _volume_type_get(context, vol_t) - req_volume_types.append(vol_type) - return req_volume_types - - -@require_context -def group_types_get_by_name_or_id(context, group_type_list): - """Return a dict describing specific group_type.""" - req_group_types = [] - for grp_t in group_type_list: - if not uuidutils.is_uuid_like(grp_t): - grp_type = _group_type_get_by_name(context, grp_t) - else: - grp_type = _group_type_get(context, grp_t) - req_group_types.append(grp_type) - return req_group_types - - -@require_admin_context -def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): - read_deleted = "yes" if inactive else "no" - # Raise QoSSpecsNotFound if no specs found - if not resource_exists(context, - models.QualityOfServiceSpecs, - qos_specs_id): - raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) - vts = (model_query(context, models.VolumeTypes, read_deleted=read_deleted). - options(joinedload('extra_specs')). - options(joinedload('projects')). - filter_by(qos_specs_id=qos_specs_id).all()) - return vts - - -@require_admin_context -def volume_type_qos_associate(context, type_id, qos_specs_id): - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - session.query(models.VolumeTypes). \ - filter_by(id=type_id). \ - update({'qos_specs_id': qos_specs_id, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_disassociate(context, qos_specs_id, type_id): - """Disassociate volume type from qos specs.""" - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - session.query(models.VolumeTypes). \ - filter_by(id=type_id). \ - filter_by(qos_specs_id=qos_specs_id). \ - update({'qos_specs_id': None, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_disassociate_all(context, qos_specs_id): - """Disassociate all volume types associated with specified qos specs.""" - session = get_session() - with session.begin(): - session.query(models.VolumeTypes). \ - filter_by(qos_specs_id=qos_specs_id). \ - update({'qos_specs_id': None, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_specs_get(context, type_id): - """Return all qos specs for given volume type. - - result looks like: - { - 'qos_specs': - { - 'id': 'qos-specs-id', - 'name': 'qos_specs_name', - 'consumer': 'Consumer', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3' - } - } - } - - """ - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - row = session.query(models.VolumeTypes). \ - options(joinedload('qos_specs')). \ - filter_by(id=type_id). \ - first() - - # row.qos_specs is a list of QualityOfServiceSpecs ref - specs = _dict_with_qos_specs(row.qos_specs) - - if not specs: - # turn empty list to None - specs = None - else: - specs = specs[0] - - return {'qos_specs': specs} - - -@require_admin_context -@_retry_on_deadlock -def volume_type_destroy(context, id): - utcnow = timeutils.utcnow() - session = get_session() - with session.begin(): - _volume_type_get(context, id, session) - results = model_query(context, models.Volume, session=session). \ - filter_by(volume_type_id=id).all() - group_count = model_query(context, - models.GroupVolumeTypeMapping, - read_deleted="no", - session=session).\ - filter_by(volume_type_id=id).count() - cg_count = model_query(context, models.ConsistencyGroup, - session=session).filter( - models.ConsistencyGroup.volume_type_id.contains(id)).count() - if results or group_count or cg_count: - LOG.error('VolumeType %s deletion failed, VolumeType in use.', id) - raise exception.VolumeTypeInUse(volume_type_id=id) - updated_values = {'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')} - model_query(context, models.VolumeTypes, session=session).\ - filter_by(id=id).\ - update(updated_values) - model_query(context, models.VolumeTypeExtraSpecs, session=session).\ - filter_by(volume_type_id=id).\ - update({'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.VolumeTypeProjects, session=session, - read_deleted="int_no").filter_by( - volume_type_id=id).soft_delete(synchronize_session=False) - del updated_values['updated_at'] - return updated_values - - -@require_admin_context -@_retry_on_deadlock -def group_type_destroy(context, id): - session = get_session() - with session.begin(): - _group_type_get(context, id, session) - results = model_query(context, models.Group, session=session). \ - filter_by(group_type_id=id).all() - if results: - LOG.error('GroupType %s deletion failed, ' - 'GroupType in use.', id) - raise exception.GroupTypeInUse(group_type_id=id) - model_query(context, models.GroupTypes, session=session).\ - filter_by(id=id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - model_query(context, models.GroupTypeSpecs, session=session).\ - filter_by(group_type_id=id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def volume_get_all_active_by_window(context, - begin, - end=None, - project_id=None): - """Return volumes that were active during window.""" - query = model_query(context, models.Volume, read_deleted="yes") - query = query.filter(or_(models.Volume.deleted_at == None, # noqa - models.Volume.deleted_at > begin)) - if end: - query = query.filter(models.Volume.created_at < end) - if project_id: - query = query.filter_by(project_id=project_id) - - query = (query.options(joinedload('volume_metadata')). - options(joinedload('volume_type')). - options(joinedload('volume_attachment')). - options(joinedload('consistencygroup')). - options(joinedload('group'))) - - if is_admin_context(context): - query = query.options(joinedload('volume_admin_metadata')) - - return query.all() - - -def _volume_type_access_query(context, session=None): - return model_query(context, models.VolumeTypeProjects, session=session, - read_deleted="int_no") - - -def _group_type_access_query(context, session=None): - return model_query(context, models.GroupTypeProjects, session=session, - read_deleted="int_no") - - -@require_admin_context -def volume_type_access_get_all(context, type_id): - volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) - return _volume_type_access_query(context).\ - filter_by(volume_type_id=volume_type_id).all() - - -@require_admin_context -def group_type_access_get_all(context, type_id): - group_type_id = _group_type_get_id_from_group_type(context, type_id) - return _group_type_access_query(context).\ - filter_by(group_type_id=group_type_id).all() - - -def _group_volume_type_mapping_query(context, session=None): - return model_query(context, models.GroupVolumeTypeMapping, session=session, - read_deleted="no") - - -@require_admin_context -def volume_type_get_all_by_group(context, group_id): - # Generic volume group - mappings = (_group_volume_type_mapping_query(context). - filter_by(group_id=group_id).all()) - session = get_session() - with session.begin(): - volume_type_ids = [mapping.volume_type_id for mapping in mappings] - query = (model_query(context, - models.VolumeTypes, - session=session, - read_deleted='no'). - filter(models.VolumeTypes.id.in_(volume_type_ids)). - options(joinedload('extra_specs')). - options(joinedload('projects')). - all()) - return query - - -def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id, - volume_type_id): - mappings = _group_volume_type_mapping_query(context).\ - filter_by(group_id=group_id).\ - filter_by(volume_type_id=volume_type_id).all() - return mappings - - -@require_admin_context -def volume_type_access_add(context, type_id, project_id): - """Add given tenant to the volume type access list.""" - volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) - - access_ref = models.VolumeTypeProjects() - access_ref.update({"volume_type_id": volume_type_id, - "project_id": project_id}) - - session = get_session() - with session.begin(): - try: - access_ref.save(session=session) - except db_exc.DBDuplicateEntry: - raise exception.VolumeTypeAccessExists(volume_type_id=type_id, - project_id=project_id) - return access_ref - - -@require_admin_context -def group_type_access_add(context, type_id, project_id): - """Add given tenant to the group type access list.""" - group_type_id = _group_type_get_id_from_group_type(context, type_id) - - access_ref = models.GroupTypeProjects() - access_ref.update({"group_type_id": group_type_id, - "project_id": project_id}) - - session = get_session() - with session.begin(): - try: - access_ref.save(session=session) - except db_exc.DBDuplicateEntry: - raise exception.GroupTypeAccessExists(group_type_id=type_id, - project_id=project_id) - return access_ref - - -@require_admin_context -def volume_type_access_remove(context, type_id, project_id): - """Remove given tenant from the volume type access list.""" - volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) - - count = (_volume_type_access_query(context). - filter_by(volume_type_id=volume_type_id). - filter_by(project_id=project_id). - soft_delete(synchronize_session=False)) - if count == 0: - raise exception.VolumeTypeAccessNotFound( - volume_type_id=type_id, project_id=project_id) - - -@require_admin_context -def group_type_access_remove(context, type_id, project_id): - """Remove given tenant from the group type access list.""" - group_type_id = _group_type_get_id_from_group_type(context, type_id) - - count = (_group_type_access_query(context). - filter_by(group_type_id=group_type_id). - filter_by(project_id=project_id). - soft_delete(synchronize_session=False)) - if count == 0: - raise exception.GroupTypeAccessNotFound( - group_type_id=type_id, project_id=project_id) - - -#################### - - -def _volume_type_extra_specs_query(context, volume_type_id, session=None): - return model_query(context, models.VolumeTypeExtraSpecs, session=session, - read_deleted="no").\ - filter_by(volume_type_id=volume_type_id) - - -@require_context -def volume_type_extra_specs_get(context, volume_type_id): - rows = _volume_type_extra_specs_query(context, volume_type_id).\ - all() - - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -@require_context -def volume_type_extra_specs_delete(context, volume_type_id, key): - session = get_session() - with session.begin(): - _volume_type_extra_specs_get_item(context, volume_type_id, key, - session) - _volume_type_extra_specs_query(context, volume_type_id, session).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _volume_type_extra_specs_get_item(context, volume_type_id, key, - session=None): - result = _volume_type_extra_specs_query( - context, volume_type_id, session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.VolumeTypeExtraSpecsNotFound( - extra_specs_key=key, - volume_type_id=volume_type_id) - - return result - - -@handle_db_data_error -@require_context -def volume_type_extra_specs_update_or_create(context, volume_type_id, - specs): - session = get_session() - with session.begin(): - spec_ref = None - for key, value in specs.items(): - try: - spec_ref = _volume_type_extra_specs_get_item( - context, volume_type_id, key, session) - except exception.VolumeTypeExtraSpecsNotFound: - spec_ref = models.VolumeTypeExtraSpecs() - spec_ref.update({"key": key, "value": value, - "volume_type_id": volume_type_id, - "deleted": False}) - spec_ref.save(session=session) - - return specs - - -#################### - - -def _group_type_specs_query(context, group_type_id, session=None): - return model_query(context, models.GroupTypeSpecs, session=session, - read_deleted="no").\ - filter_by(group_type_id=group_type_id) - - -@require_context -def group_type_specs_get(context, group_type_id): - rows = _group_type_specs_query(context, group_type_id).\ - all() - - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -@require_context -def group_type_specs_delete(context, group_type_id, key): - session = get_session() - with session.begin(): - _group_type_specs_get_item(context, group_type_id, key, - session) - _group_type_specs_query(context, group_type_id, session).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _group_type_specs_get_item(context, group_type_id, key, - session=None): - result = _group_type_specs_query( - context, group_type_id, session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.GroupTypeSpecsNotFound( - group_specs_key=key, - group_type_id=group_type_id) - - return result - - -@handle_db_data_error -@require_context -def group_type_specs_update_or_create(context, group_type_id, - specs): - session = get_session() - with session.begin(): - spec_ref = None - for key, value in specs.items(): - try: - spec_ref = _group_type_specs_get_item( - context, group_type_id, key, session) - except exception.GroupTypeSpecsNotFound: - spec_ref = models.GroupTypeSpecs() - spec_ref.update({"key": key, "value": value, - "group_type_id": group_type_id, - "deleted": False}) - spec_ref.save(session=session) - - return specs - - -#################### - - -@require_admin_context -def qos_specs_create(context, values): - """Create a new QoS specs. - - :param values dictionary that contains specifications for QoS - e.g. {'name': 'Name', - 'consumer': 'front-end', - 'specs': { - 'total_iops_sec': 1000, - 'total_bytes_sec': 1024000 - } - } - """ - specs_id = str(uuid.uuid4()) - session = get_session() - with session.begin(): - try: - _qos_specs_get_all_by_name(context, values['name'], session) - raise exception.QoSSpecsExists(specs_id=values['name']) - except exception.QoSSpecsNotFound: - pass - try: - # Insert a root entry for QoS specs - specs_root = models.QualityOfServiceSpecs() - root = dict(id=specs_id) - # 'QoS_Specs_Name' is an internal reserved key to store - # the name of QoS specs - root['key'] = 'QoS_Specs_Name' - root['value'] = values['name'] - LOG.debug("DB qos_specs_create(): root %s", root) - specs_root.update(root) - specs_root.save(session=session) - - # Save 'consumer' value directly as it will not be in - # values['specs'] and so we avoid modifying/copying passed in dict - consumer = {'key': 'consumer', - 'value': values['consumer'], - 'specs_id': specs_id, - 'id': six.text_type(uuid.uuid4())} - cons_entry = models.QualityOfServiceSpecs() - cons_entry.update(consumer) - cons_entry.save(session=session) - - # Insert all specification entries for QoS specs - for k, v in values.get('specs', {}).items(): - item = dict(key=k, value=v, specs_id=specs_id) - item['id'] = str(uuid.uuid4()) - spec_entry = models.QualityOfServiceSpecs() - spec_entry.update(item) - spec_entry.save(session=session) - except db_exc.DBDataError: - msg = _('Error writing field to database') - LOG.exception(msg) - raise exception.Invalid(msg) - except Exception as e: - raise db_exc.DBError(e) - - return dict(id=specs_root.id, name=specs_root.value) - - -@require_admin_context -def _qos_specs_get_all_by_name(context, name, session=None, inactive=False): - read_deleted = 'yes' if inactive else 'no' - results = model_query(context, models.QualityOfServiceSpecs, - read_deleted=read_deleted, session=session). \ - filter_by(key='QoS_Specs_Name'). \ - filter_by(value=name). \ - options(joinedload('specs')).all() - - if not results: - raise exception.QoSSpecsNotFound(specs_id=name) - - return results - - -@require_admin_context -def _qos_specs_get_all_ref(context, qos_specs_id, session=None, - inactive=False): - read_deleted = 'yes' if inactive else 'no' - result = model_query(context, models.QualityOfServiceSpecs, - read_deleted=read_deleted, session=session). \ - filter_by(id=qos_specs_id). \ - options(joinedload_all('specs')).all() - - if not result: - raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) - - return result - - -def _dict_with_children_specs(specs): - """Convert specs list to a dict.""" - result = {} - for spec in specs: - # Skip deleted keys - if not spec['deleted']: - result.update({spec['key']: spec['value']}) - - return result - - -def _dict_with_qos_specs(rows): - """Convert qos specs query results to list. - - Qos specs query results are a list of quality_of_service_specs refs, - some are root entry of a qos specs (key == 'QoS_Specs_Name') and the - rest are children entry, a.k.a detailed specs for a qos specs. This - function converts query results to a dict using spec name as key. - """ - result = [] - for row in rows: - if row['key'] == 'QoS_Specs_Name': - member = {'name': row['value'], 'id': row['id']} - if row.specs: - spec_dict = _dict_with_children_specs(row.specs) - member['consumer'] = spec_dict.pop('consumer') - member.update(dict(specs=spec_dict)) - result.append(member) - return result - - -@require_admin_context -def qos_specs_get(context, qos_specs_id, inactive=False): - rows = _qos_specs_get_all_ref(context, qos_specs_id, None, inactive) - - return _dict_with_qos_specs(rows)[0] - - -@require_admin_context -def qos_specs_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Returns a list of all qos_specs. - - Results is like: - [{ - 'id': SPECS-UUID, - 'name': 'qos_spec-1', - 'consumer': 'back-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - ... - } - }, - { - 'id': SPECS-UUID, - 'name': 'qos_spec-2', - 'consumer': 'front-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - ... - } - }, - ] - """ - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_keys, sort_dirs, filters, - offset, models.QualityOfServiceSpecs) - # No Qos specs would match, return empty list - if query is None: - return [] - rows = query.all() - return _dict_with_qos_specs(rows) - - -@require_admin_context -def _qos_specs_get_query(context, session): - rows = model_query(context, models.QualityOfServiceSpecs, - session=session, - read_deleted='no').\ - options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name') - return rows - - -def _process_qos_specs_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.QualityOfServiceSpecs, filters): - return - query = query.filter_by(**filters) - return query - - -@require_admin_context -def _qos_specs_get(context, qos_spec_id, session=None): - result = model_query(context, models.QualityOfServiceSpecs, - session=session, - read_deleted='no').\ - filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first() - - if not result: - raise exception.QoSSpecsNotFound(specs_id=qos_spec_id) - - return result - - -@require_admin_context -def qos_specs_get_by_name(context, name, inactive=False): - rows = _qos_specs_get_all_by_name(context, name, None, inactive) - - return _dict_with_qos_specs(rows)[0] - - -@require_admin_context -def qos_specs_associations_get(context, qos_specs_id): - """Return all entities associated with specified qos specs. - - For now, the only entity that is possible to associate with - a qos specs is volume type, so this is just a wrapper of - volume_type_qos_associations_get(). But it's possible to - extend qos specs association to other entities, such as volumes, - sometime in future. - """ - return volume_type_qos_associations_get(context, qos_specs_id) - - -@require_admin_context -def qos_specs_associate(context, qos_specs_id, type_id): - """Associate volume type from specified qos specs.""" - return volume_type_qos_associate(context, type_id, qos_specs_id) - - -@require_admin_context -def qos_specs_disassociate(context, qos_specs_id, type_id): - """Disassociate volume type from specified qos specs.""" - return volume_type_qos_disassociate(context, qos_specs_id, type_id) - - -@require_admin_context -def qos_specs_disassociate_all(context, qos_specs_id): - """Disassociate all entities associated with specified qos specs. - - For now, the only entity that is possible to associate with - a qos specs is volume type, so this is just a wrapper of - volume_type_qos_disassociate_all(). But it's possible to - extend qos specs association to other entities, such as volumes, - sometime in future. - """ - return volume_type_qos_disassociate_all(context, qos_specs_id) - - -@require_admin_context -def qos_specs_item_delete(context, qos_specs_id, key): - session = get_session() - with session.begin(): - session.query(models.QualityOfServiceSpecs). \ - filter(models.QualityOfServiceSpecs.key == key). \ - filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -def qos_specs_delete(context, qos_specs_id): - session = get_session() - with session.begin(): - _qos_specs_get_all_ref(context, qos_specs_id, session) - updated_values = {'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')} - session.query(models.QualityOfServiceSpecs).\ - filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, - models.QualityOfServiceSpecs.specs_id == - qos_specs_id)).\ - update(updated_values) - del updated_values['updated_at'] - return updated_values - - -@require_admin_context -def _qos_specs_get_item(context, qos_specs_id, key, session=None): - result = model_query(context, models.QualityOfServiceSpecs, - session=session). \ - filter(models.QualityOfServiceSpecs.key == key). \ - filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ - first() - - if not result: - raise exception.QoSSpecsKeyNotFound( - specs_key=key, - specs_id=qos_specs_id) - - return result - - -@handle_db_data_error -@require_admin_context -def qos_specs_update(context, qos_specs_id, updates): - """Make updates to an existing qos specs. - - Perform add, update or delete key/values to a qos specs. - """ - - session = get_session() - with session.begin(): - # make sure qos specs exists - exists = resource_exists(context, models.QualityOfServiceSpecs, - qos_specs_id, session) - if not exists: - raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) - specs = updates.get('specs', {}) - - if 'consumer' in updates: - # Massage consumer to the right place for DB and copy specs - # before updating so we don't modify dict for caller - specs = specs.copy() - specs['consumer'] = updates['consumer'] - spec_ref = None - for key in specs.keys(): - try: - spec_ref = _qos_specs_get_item( - context, qos_specs_id, key, session) - except exception.QoSSpecsKeyNotFound: - spec_ref = models.QualityOfServiceSpecs() - id = None - if spec_ref.get('id', None): - id = spec_ref['id'] - else: - id = str(uuid.uuid4()) - value = dict(id=id, key=key, value=specs[key], - specs_id=qos_specs_id, - deleted=False) - LOG.debug('qos_specs_update() value: %s', value) - spec_ref.update(value) - spec_ref.save(session=session) - - return specs - - -#################### - - -@require_context -def volume_type_encryption_get(context, volume_type_id, session=None): - return model_query(context, models.Encryption, session=session, - read_deleted="no").\ - filter_by(volume_type_id=volume_type_id).first() - - -@require_admin_context -def volume_type_encryption_delete(context, volume_type_id): - session = get_session() - with session.begin(): - encryption = volume_type_encryption_get(context, volume_type_id, - session) - if not encryption: - raise exception.VolumeTypeEncryptionNotFound( - type_id=volume_type_id) - encryption.update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@handle_db_data_error -@require_admin_context -def volume_type_encryption_create(context, volume_type_id, values): - session = get_session() - with session.begin(): - encryption = models.Encryption() - - if 'volume_type_id' not in values: - values['volume_type_id'] = volume_type_id - - if 'encryption_id' not in values: - values['encryption_id'] = six.text_type(uuid.uuid4()) - - encryption.update(values) - session.add(encryption) - - return encryption - - -@handle_db_data_error -@require_admin_context -def volume_type_encryption_update(context, volume_type_id, values): - query = model_query(context, models.Encryption) - result = query.filter_by(volume_type_id=volume_type_id).update(values) - if not result: - raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id) - - -def volume_type_encryption_volume_get(context, volume_type_id, session=None): - volume_list = _volume_get_query(context, session=session, - project_only=False).\ - filter_by(volume_type_id=volume_type_id).\ - all() - return volume_list - -#################### - - -@require_context -def volume_encryption_metadata_get(context, volume_id, session=None): - """Return the encryption metadata for a given volume.""" - - volume_ref = _volume_get(context, volume_id) - encryption_ref = volume_type_encryption_get(context, - volume_ref['volume_type_id']) - - values = { - 'encryption_key_id': volume_ref['encryption_key_id'], - } - - if encryption_ref: - for key in ['control_location', 'cipher', 'key_size', 'provider']: - values[key] = encryption_ref[key] - - return values - - -#################### - - -@require_context -def _volume_glance_metadata_get_all(context, session=None): - query = model_query(context, - models.VolumeGlanceMetadata, - session=session) - if is_user_context(context): - query = query.filter( - models.Volume.id == models.VolumeGlanceMetadata.volume_id, - models.Volume.project_id == context.project_id) - return query.all() - - -@require_context -def volume_glance_metadata_get_all(context): - """Return the Glance metadata for all volumes.""" - - return _volume_glance_metadata_get_all(context) - - -@require_context -def volume_glance_metadata_list_get(context, volume_id_list): - """Return the glance metadata for a volume list.""" - query = model_query(context, - models.VolumeGlanceMetadata, - session=None) - query = query.filter( - models.VolumeGlanceMetadata.volume_id.in_(volume_id_list)) - return query.all() - - -@require_context -@require_volume_exists -def _volume_glance_metadata_get(context, volume_id, session=None): - rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - filter_by(deleted=False).\ - all() - - if not rows: - raise exception.GlanceMetadataNotFound(id=volume_id) - - return rows - - -@require_context -def volume_glance_metadata_get(context, volume_id): - """Return the Glance metadata for the specified volume.""" - - return _volume_glance_metadata_get(context, volume_id) - - -@require_context -@require_snapshot_exists -def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): - rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ - filter_by(snapshot_id=snapshot_id).\ - filter_by(deleted=False).\ - all() - - if not rows: - raise exception.GlanceMetadataNotFound(id=snapshot_id) - - return rows - - -@require_context -def volume_snapshot_glance_metadata_get(context, snapshot_id): - """Return the Glance metadata for the specified snapshot.""" - - return _volume_snapshot_glance_metadata_get(context, snapshot_id) - - -@require_context -@require_volume_exists -def volume_glance_metadata_create(context, volume_id, key, value): - """Update the Glance metadata for a volume by adding a new key:value pair. - - This API does not support changing the value of a key once it has been - created. - """ - - session = get_session() - with session.begin(): - rows = session.query(models.VolumeGlanceMetadata).\ - filter_by(volume_id=volume_id).\ - filter_by(key=key).\ - filter_by(deleted=False).all() - - if len(rows) > 0: - raise exception.GlanceMetadataExists(key=key, - volume_id=volume_id) - - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = key - vol_glance_metadata.value = six.text_type(value) - session.add(vol_glance_metadata) - - return - - -@require_context -@require_volume_exists -def volume_glance_metadata_bulk_create(context, volume_id, metadata): - """Update the Glance metadata for a volume by adding new key:value pairs. - - This API does not support changing the value of a key once it has been - created. - """ - - session = get_session() - with session.begin(): - for (key, value) in metadata.items(): - rows = session.query(models.VolumeGlanceMetadata).\ - filter_by(volume_id=volume_id).\ - filter_by(key=key).\ - filter_by(deleted=False).all() - - if len(rows) > 0: - raise exception.GlanceMetadataExists(key=key, - volume_id=volume_id) - - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = key - vol_glance_metadata.value = six.text_type(value) - session.add(vol_glance_metadata) - - -@require_context -@require_snapshot_exists -def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): - """Update the Glance metadata for a snapshot. - - This copies all of the key:value pairs from the originating volume, to - ensure that a volume created from the snapshot will retain the - original metadata. - """ - - session = get_session() - with session.begin(): - metadata = _volume_glance_metadata_get(context, volume_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.snapshot_id = snapshot_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -def volume_glance_metadata_copy_from_volume_to_volume(context, - src_volume_id, - volume_id): - """Update the Glance metadata for a volume. - - This copies all all of the key:value pairs from the originating volume, - to ensure that a volume created from the volume (clone) will - retain the original metadata. - """ - - session = get_session() - with session.begin(): - metadata = _volume_glance_metadata_get(context, - src_volume_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -@require_volume_exists -def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): - """Update Glance metadata from a volume. - - Update the Glance metadata from a volume (created from a snapshot) by - copying all of the key:value pairs from the originating snapshot. - - This is so that the Glance metadata from the original volume is retained. - """ - - session = get_session() - with session.begin(): - metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -def volume_glance_metadata_delete_by_volume(context, volume_id): - model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): - model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ - filter_by(snapshot_id=snapshot_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -############################### - - -@require_context -def backup_get(context, backup_id, read_deleted=None, project_only=True): - return _backup_get(context, backup_id, - read_deleted=read_deleted, - project_only=project_only) - - -def _backup_get(context, backup_id, session=None, read_deleted=None, - project_only=True): - result = model_query(context, models.Backup, session=session, - project_only=project_only, - read_deleted=read_deleted).\ - filter_by(id=backup_id).\ - first() - - if not result: - raise exception.BackupNotFound(backup_id=backup_id) - - return result - - -def _backup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - if filters and not is_valid_model_filters(models.Backup, filters): - return [] - - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, models.Backup) - if query is None: - return [] - return query.all() - - -def _backups_get_query(context, session=None, project_only=False): - return model_query(context, models.Backup, session=session, - project_only=project_only) - - -@apply_like_filters(model=models.Backup) -def _process_backups_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.Backup, filters): - return - query = query.filter_by(**filters) - return query - - -@require_admin_context -def backup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - return _backup_get_all(context, filters, marker, limit, offset, sort_keys, - sort_dirs) - - -@require_admin_context -def backup_get_all_by_host(context, host): - return model_query(context, models.Backup).filter_by(host=host).all() - - -@require_context -def backup_get_all_by_project(context, project_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - - authorize_project_context(context, project_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['project_id'] = project_id - - return _backup_get_all(context, filters, marker, limit, offset, sort_keys, - sort_dirs) - - -@require_context -def backup_get_all_by_volume(context, volume_id, filters=None): - - authorize_project_context(context, volume_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['volume_id'] = volume_id - - return _backup_get_all(context, filters) - - -@require_context -def backup_get_all_active_by_window(context, begin, end=None, project_id=None): - """Return backups that were active during window.""" - - query = model_query(context, models.Backup, read_deleted="yes") - query = query.filter(or_(models.Backup.deleted_at == None, # noqa - models.Backup.deleted_at > begin)) - if end: - query = query.filter(models.Backup.created_at < end) - if project_id: - query = query.filter_by(project_id=project_id) - - return query.all() - - -@handle_db_data_error -@require_context -def backup_create(context, values): - backup = models.Backup() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - backup.update(values) - - session = get_session() - with session.begin(): - backup.save(session) - return backup - - -@handle_db_data_error -@require_context -def backup_update(context, backup_id, values): - if 'fail_reason' in values: - values = values.copy() - values['fail_reason'] = (values['fail_reason'] or '')[:255] - query = model_query(context, models.Backup, read_deleted="yes") - result = query.filter_by(id=backup_id).update(values) - if not result: - raise exception.BackupNotFound(backup_id=backup_id) - - -@require_admin_context -def backup_destroy(context, backup_id): - updated_values = {'status': fields.BackupStatus.DELETED, - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')} - model_query(context, models.Backup).\ - filter_by(id=backup_id).\ - update(updated_values) - del updated_values['updated_at'] - return updated_values - - -############################### - - -@require_context -def _transfer_get(context, transfer_id, session=None): - query = model_query(context, models.Transfer, - session=session).\ - filter_by(id=transfer_id) - - if not is_admin_context(context): - volume = models.Volume - query = query.filter(models.Transfer.volume_id == volume.id, - volume.project_id == context.project_id) - - result = query.first() - - if not result: - raise exception.TransferNotFound(transfer_id=transfer_id) - - return result - - -@require_context -def transfer_get(context, transfer_id): - return _transfer_get(context, transfer_id) - - -def _translate_transfers(transfers): - fields = ('id', 'volume_id', 'display_name', 'created_at', 'deleted') - return [{k: transfer[k] for k in fields} for transfer in transfers] - - -@require_admin_context -def transfer_get_all(context): - results = model_query(context, models.Transfer).all() - return _translate_transfers(results) - - -@require_context -def transfer_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - query = (model_query(context, models.Transfer) - .filter(models.Volume.id == models.Transfer.volume_id, - models.Volume.project_id == project_id)) - results = query.all() - return _translate_transfers(results) - - -@require_context -@handle_db_data_error -def transfer_create(context, values): - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - transfer_id = values['id'] - volume_id = values['volume_id'] - session = get_session() - with session.begin(): - expected = {'id': volume_id, - 'status': 'available'} - update = {'status': 'awaiting-transfer'} - if not conditional_update(context, models.Volume, update, expected): - msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' - 'expected in available state.') - % {'transfer_id': transfer_id, 'volume_id': volume_id}) - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - transfer = models.Transfer() - transfer.update(values) - session.add(transfer) - return transfer - - -@require_context -@_retry_on_deadlock -def transfer_destroy(context, transfer_id): - utcnow = timeutils.utcnow() - session = get_session() - with session.begin(): - volume_id = _transfer_get(context, transfer_id, session)['volume_id'] - expected = {'id': volume_id, - 'status': 'awaiting-transfer'} - update = {'status': 'available'} - if not conditional_update(context, models.Volume, update, expected): - # If the volume state is not 'awaiting-transfer' don't change it, - # but we can still mark the transfer record as deleted. - msg = (_('Transfer %(transfer_id)s: Volume expected in ' - 'awaiting-transfer state.') - % {'transfer_id': transfer_id}) - LOG.error(msg) - - updated_values = {'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')} - (model_query(context, models.Transfer, session=session) - .filter_by(id=transfer_id) - .update(updated_values)) - del updated_values['updated_at'] - return updated_values - - -@require_context -def transfer_accept(context, transfer_id, user_id, project_id): - session = get_session() - with session.begin(): - volume_id = _transfer_get(context, transfer_id, session)['volume_id'] - expected = {'id': volume_id, - 'status': 'awaiting-transfer'} - update = {'status': 'available', - 'user_id': user_id, - 'project_id': project_id, - 'updated_at': models.Volume.updated_at} - if not conditional_update(context, models.Volume, update, expected): - msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' - 'expected in awaiting-transfer state.') - % {'transfer_id': transfer_id, 'volume_id': volume_id}) - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - (session.query(models.Transfer) - .filter_by(id=transfer_id) - .update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')})) - - -############################### - - -@require_admin_context -def _consistencygroup_data_get_for_project(context, project_id, - session=None): - query = model_query(context, - func.count(models.ConsistencyGroup.id), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - result = query.first() - - return (0, result[0] or 0) - - -@require_context -def _consistencygroup_get(context, consistencygroup_id, session=None): - result = model_query(context, models.ConsistencyGroup, session=session, - project_only=True).\ - filter_by(id=consistencygroup_id).\ - first() - - if not result: - raise exception.ConsistencyGroupNotFound( - consistencygroup_id=consistencygroup_id) - - return result - - -@require_context -def consistencygroup_get(context, consistencygroup_id): - return _consistencygroup_get(context, consistencygroup_id) - - -def _consistencygroups_get_query(context, session=None, project_only=False): - return model_query(context, models.ConsistencyGroup, session=session, - project_only=project_only) - - -def _process_consistencygroups_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.ConsistencyGroup, filters): - return - query = query.filter_by(**filters) - return query - - -def _consistencygroup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - if filters and not is_valid_model_filters(models.ConsistencyGroup, - filters): - return [] - - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, models.ConsistencyGroup) - if query is None: - return [] - return query.all() - - -@require_admin_context -def consistencygroup_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Retrieves all consistency groups. - - If no sort parameters are specified then the returned cgs are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: Filters for the query in the form of key/value. - :returns: list of matching consistency groups - """ - return _consistencygroup_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@require_context -def consistencygroup_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Retrieves all consistency groups in a project. - - If no sort parameters are specified then the returned cgs are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: Filters for the query in the form of key/value. - :returns: list of matching consistency groups - """ - authorize_project_context(context, project_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['project_id'] = project_id - return _consistencygroup_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@handle_db_data_error -@require_context -def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): - cg_model = models.ConsistencyGroup - - values = values.copy() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - if cg_snap_id: - conditions = [cg_model.id == models.Cgsnapshot.consistencygroup_id, - models.Cgsnapshot.id == cg_snap_id] - elif cg_id: - conditions = [cg_model.id == cg_id] - else: - conditions = None - - if conditions: - # We don't want duplicated field values - names = ['volume_type_id', 'availability_zone', 'host', - 'cluster_name'] - for name in names: - values.pop(name, None) - - fields = [getattr(cg_model, name) for name in names] - fields.extend(bindparam(k, v) for k, v in values.items()) - sel = session.query(*fields).filter(*conditions) - names.extend(values.keys()) - insert_stmt = cg_model.__table__.insert().from_select(names, sel) - result = session.execute(insert_stmt) - # If we couldn't insert the row because of the conditions raise - # the right exception - if not result.rowcount: - if cg_id: - raise exception.ConsistencyGroupNotFound( - consistencygroup_id=cg_id) - raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id) - else: - consistencygroup = cg_model() - consistencygroup.update(values) - session.add(consistencygroup) - - return _consistencygroup_get(context, values['id'], session=session) - - -@handle_db_data_error -@require_context -def consistencygroup_update(context, consistencygroup_id, values): - query = model_query(context, models.ConsistencyGroup, project_only=True) - result = query.filter_by(id=consistencygroup_id).update(values) - if not result: - raise exception.ConsistencyGroupNotFound( - consistencygroup_id=consistencygroup_id) - - -@require_admin_context -def consistencygroup_destroy(context, consistencygroup_id): - utcnow = timeutils.utcnow() - session = get_session() - with session.begin(): - updated_values = {'status': fields.ConsistencyGroupStatus.DELETED, - 'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')} - model_query(context, models.ConsistencyGroup, session=session).\ - filter_by(id=consistencygroup_id).\ - update({'status': fields.ConsistencyGroupStatus.DELETED, - 'deleted': True, - 'deleted_at': utcnow, - 'updated_at': literal_column('updated_at')}) - - del updated_values['updated_at'] - return updated_values - - -@require_admin_context -def cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids, - volume_ids, snapshot_ids, session): - utcnow = timeutils.utcnow() - if snapshot_ids: - snaps = (model_query(context, models.Snapshot, - session=session, read_deleted="no"). - filter(models.Snapshot.id.in_(snapshot_ids)). - all()) - for snap in snaps: - snap.update({'cgsnapshot_id': None, - 'updated_at': utcnow}) - - if cgsnapshot_ids: - cg_snaps = (model_query(context, models.Cgsnapshot, - session=session, read_deleted="no"). - filter(models.Cgsnapshot.id.in_(cgsnapshot_ids)). - all()) - - for cg_snap in cg_snaps: - cg_snap.delete(session=session) - - if volume_ids: - vols = (model_query(context, models.Volume, - session=session, read_deleted="no"). - filter(models.Volume.id.in_(volume_ids)). - all()) - for vol in vols: - vol.update({'consistencygroup_id': None, - 'updated_at': utcnow}) - - if cg_ids: - cgs = (model_query(context, models.ConsistencyGroup, - session=session, read_deleted="no"). - filter(models.ConsistencyGroup.id.in_(cg_ids)). - all()) - - for cg in cgs: - cg.delete(session=session) - - -def cg_has_cgsnapshot_filter(): - """Return a filter that checks if a CG has CG Snapshots.""" - return sql.exists().where(and_( - models.Cgsnapshot.consistencygroup_id == models.ConsistencyGroup.id, - ~models.Cgsnapshot.deleted)) - - -def cg_has_volumes_filter(attached_or_with_snapshots=False): - """Return a filter to check if a CG has volumes. - - When attached_or_with_snapshots parameter is given a True value only - attached volumes or those with snapshots will be considered. - """ - query = sql.exists().where( - and_(models.Volume.consistencygroup_id == models.ConsistencyGroup.id, - ~models.Volume.deleted)) - - if attached_or_with_snapshots: - query = query.where(or_( - models.Volume.attach_status == 'attached', - sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - ~models.Snapshot.deleted)))) - return query - - -def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): - """Return a filter to check if a CG is being used as creation source. - - Returned filter is meant to be used in the Conditional Update mechanism and - checks if provided CG ID or CG Snapshot ID is currently being used to - create another CG. - - This filter will not include CGs that have used the ID but have already - finished their creation (status is no longer creating). - - Filter uses a subquery that allows it to be used on updates to the - consistencygroups table. - """ - # NOTE(geguileo): As explained in devref api_conditional_updates we use a - # subquery to trick MySQL into using the same table in the update and the - # where clause. - subq = sql.select([models.ConsistencyGroup]).where( - and_(~models.ConsistencyGroup.deleted, - models.ConsistencyGroup.status == 'creating')).alias('cg2') - - if cg_id: - match_id = subq.c.source_cgid == cg_id - elif cgsnapshot_id: - match_id = subq.c.cgsnapshot_id == cgsnapshot_id - else: - msg = _('cg_creating_from_src must be called with cg_id or ' - 'cgsnapshot_id parameter.') - raise exception.ProgrammingError(reason=msg) - - return sql.exists([subq]).where(match_id) - - -@require_admin_context -def consistencygroup_include_in_cluster(context, cluster, - partial_rename=True, **filters): - """Include all consistency groups matching the filters into a cluster.""" - return _include_in_cluster(context, cluster, models.ConsistencyGroup, - partial_rename, filters) - - -############################### - - -@require_admin_context -def _group_data_get_for_project(context, project_id, - session=None): - query = model_query(context, - func.count(models.Group.id), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - result = query.first() - - return (0, result[0] or 0) - - -@require_context -def _group_get(context, group_id, session=None): - result = (model_query(context, models.Group, session=session, - project_only=True). - filter_by(id=group_id). - first()) - - if not result: - raise exception.GroupNotFound(group_id=group_id) - - return result - - -@require_context -def group_get(context, group_id): - return _group_get(context, group_id) - - -def _groups_get_query(context, session=None, project_only=False): - return model_query(context, models.Group, session=session, - project_only=project_only) - - -def _group_snapshot_get_query(context, session=None, project_only=False): - return model_query(context, models.GroupSnapshot, session=session, - project_only=project_only) - - -@apply_like_filters(model=models.Group) -def _process_groups_filters(query, filters): - if filters: - # NOTE(xyang): backend_match_level needs to be handled before - # is_valid_model_filters is called as it is not a column name - # in the db. - backend_match_level = filters.pop('backend_match_level', 'backend') - # host is a valid filter. Filter the query by host and - # backend_match_level first. - host = filters.pop('host', None) - if host: - query = query.filter(_filter_host(models.Group.host, host, - match_level=backend_match_level)) - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.Group, filters): - return - query = query.filter_by(**filters) - return query - - -@apply_like_filters(model=models.GroupSnapshot) -def _process_group_snapshot_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.GroupSnapshot, filters): - return - query = query.filter_by(**filters) - return query - - -def _group_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - # No need to call is_valid_model_filters here. It is called - # in _process_group_filters when _generate_paginate_query - # is called below. - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, models.Group) - - return query.all() if query else [] - - -@require_admin_context -def group_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Retrieves all groups. - - If no sort parameters are specified then the returned groups are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: Filters for the query in the form of key/value. - :returns: list of matching groups - """ - return _group_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@require_context -def group_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Retrieves all groups in a project. - - If no sort parameters are specified then the returned groups are sorted - first by the 'created_at' key and then by the 'id' key in descending - order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: Filters for the query in the form of key/value. - :returns: list of matching groups - """ - authorize_project_context(context, project_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['project_id'] = project_id - return _group_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@handle_db_data_error -@require_context -def group_create(context, values, group_snapshot_id=None, - source_group_id=None): - group_model = models.Group - - values = values.copy() - if not values.get('id'): - values['id'] = six.text_type(uuid.uuid4()) - - session = get_session() - with session.begin(): - if group_snapshot_id: - conditions = [group_model.id == models.GroupSnapshot.group_id, - models.GroupSnapshot.id == group_snapshot_id] - elif source_group_id: - conditions = [group_model.id == source_group_id] - else: - conditions = None - - if conditions: - # We don't want duplicated field values - values.pop('group_type_id', None) - values.pop('availability_zone', None) - values.pop('host', None) - # NOTE(xyang): Save volume_type_ids to update later. - volume_type_ids = values.pop('volume_type_ids', []) - - sel = session.query(group_model.group_type_id, - group_model.availability_zone, - group_model.host, - *(bindparam(k, v) for k, v in values.items()) - ).filter(*conditions) - names = ['group_type_id', 'availability_zone', 'host'] - names.extend(values.keys()) - insert_stmt = group_model.__table__.insert().from_select( - names, sel) - result = session.execute(insert_stmt) - # If we couldn't insert the row because of the conditions raise - # the right exception - if not result.rowcount: - if source_group_id: - raise exception.GroupNotFound( - group_id=source_group_id) - raise exception.GroupSnapshotNotFound( - group_snapshot_id=group_snapshot_id) - - for item in volume_type_ids: - mapping = models.GroupVolumeTypeMapping() - mapping['volume_type_id'] = item - mapping['group_id'] = values['id'] - session.add(mapping) - else: - for item in values.get('volume_type_ids') or []: - mapping = models.GroupVolumeTypeMapping() - mapping['volume_type_id'] = item - mapping['group_id'] = values['id'] - session.add(mapping) - - group = group_model() - group.update(values) - session.add(group) - - return _group_get(context, values['id'], session=session) - - -@handle_db_data_error -@require_context -def group_volume_type_mapping_create(context, group_id, volume_type_id): - """Add group volume_type mapping entry.""" - # Verify group exists - _group_get(context, group_id) - # Verify volume type exists - _volume_type_get_id_from_volume_type(context, volume_type_id) - - existing = _group_volume_type_mapping_get_all_by_group_volume_type( - context, group_id, volume_type_id) - if existing: - raise exception.GroupVolumeTypeMappingExists( - group_id=group_id, - volume_type_id=volume_type_id) - - mapping = models.GroupVolumeTypeMapping() - mapping.update({"group_id": group_id, - "volume_type_id": volume_type_id}) - - session = get_session() - with session.begin(): - try: - mapping.save(session=session) - except db_exc.DBDuplicateEntry: - raise exception.GroupVolumeTypeMappingExists( - group_id=group_id, - volume_type_id=volume_type_id) - return mapping - - -@handle_db_data_error -@require_context -def group_update(context, group_id, values): - query = model_query(context, models.Group, project_only=True) - result = query.filter_by(id=group_id).update(values) - if not result: - raise exception.GroupNotFound(group_id=group_id) - - -@require_admin_context -def group_destroy(context, group_id): - session = get_session() - with session.begin(): - (model_query(context, models.Group, session=session). - filter_by(id=group_id). - update({'status': fields.GroupStatus.DELETED, - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')})) - - (session.query(models.GroupVolumeTypeMapping). - filter_by(group_id=group_id). - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')})) - - -def group_has_group_snapshot_filter(): - return sql.exists().where(and_( - models.GroupSnapshot.group_id == models.Group.id, - ~models.GroupSnapshot.deleted)) - - -def group_has_volumes_filter(attached_or_with_snapshots=False): - query = sql.exists().where( - and_(models.Volume.group_id == models.Group.id, - ~models.Volume.deleted)) - - if attached_or_with_snapshots: - query = query.where(or_( - models.Volume.attach_status == 'attached', - sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - ~models.Snapshot.deleted)))) - return query - - -def group_creating_from_src(group_id=None, group_snapshot_id=None): - # NOTE(geguileo): As explained in devref api_conditional_updates we use a - # subquery to trick MySQL into using the same table in the update and the - # where clause. - subq = sql.select([models.Group]).where( - and_(~models.Group.deleted, - models.Group.status == 'creating')).alias('group2') - - if group_id: - match_id = subq.c.source_group_id == group_id - elif group_snapshot_id: - match_id = subq.c.group_snapshot_id == group_snapshot_id - else: - msg = _('group_creating_from_src must be called with group_id or ' - 'group_snapshot_id parameter.') - raise exception.ProgrammingError(reason=msg) - - return sql.exists([subq]).where(match_id) - - -############################### - - -@require_context -def _cgsnapshot_get(context, cgsnapshot_id, session=None): - result = model_query(context, models.Cgsnapshot, session=session, - project_only=True).\ - filter_by(id=cgsnapshot_id).\ - first() - - if not result: - raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) - - return result - - -@require_context -def cgsnapshot_get(context, cgsnapshot_id): - return _cgsnapshot_get(context, cgsnapshot_id) - - -def is_valid_model_filters(model, filters, exclude_list=None): - """Return True if filter values exist on the model - - :param model: a Cinder model - :param filters: dictionary of filters - """ - for key in filters.keys(): - if exclude_list and key in exclude_list: - continue - try: - key = key.rstrip('~') - getattr(model, key) - except AttributeError: - LOG.debug("'%s' filter key is not valid.", key) - return False - return True - - -def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None): - query = model_query(context, models.Cgsnapshot) - - if filters: - if not is_valid_model_filters(models.Cgsnapshot, filters): - return [] - query = query.filter_by(**filters) - - if project_id: - query = query.filter_by(project_id=project_id) - - if group_id: - query = query.filter_by(consistencygroup_id=group_id) - - return query.all() - - -@require_admin_context -def cgsnapshot_get_all(context, filters=None): - return _cgsnapshot_get_all(context, filters=filters) - - -@require_admin_context -def cgsnapshot_get_all_by_group(context, group_id, filters=None): - return _cgsnapshot_get_all(context, group_id=group_id, filters=filters) - - -@require_context -def cgsnapshot_get_all_by_project(context, project_id, filters=None): - authorize_project_context(context, project_id) - return _cgsnapshot_get_all(context, project_id=project_id, filters=filters) - - -@handle_db_data_error -@require_context -def cgsnapshot_create(context, values): - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - cg_id = values.get('consistencygroup_id') - session = get_session() - model = models.Cgsnapshot - with session.begin(): - if cg_id: - # There has to exist at least 1 volume in the CG and the CG cannot - # be updating the composing volumes or being created. - conditions = [ - sql.exists().where(and_( - ~models.Volume.deleted, - models.Volume.consistencygroup_id == cg_id)), - ~models.ConsistencyGroup.deleted, - models.ConsistencyGroup.id == cg_id, - ~models.ConsistencyGroup.status.in_(('creating', 'updating'))] - - # NOTE(geguileo): We build a "fake" from_select clause instead of - # using transaction isolation on the session because we would need - # SERIALIZABLE level and that would have a considerable performance - # penalty. - binds = (bindparam(k, v) for k, v in values.items()) - sel = session.query(*binds).filter(*conditions) - insert_stmt = model.__table__.insert().from_select(values.keys(), - sel) - result = session.execute(insert_stmt) - # If we couldn't insert the row because of the conditions raise - # the right exception - if not result.rowcount: - msg = _("Source CG cannot be empty or in 'creating' or " - "'updating' state. No cgsnapshot will be created.") - raise exception.InvalidConsistencyGroup(reason=msg) - else: - cgsnapshot = model() - cgsnapshot.update(values) - session.add(cgsnapshot) - return _cgsnapshot_get(context, values['id'], session=session) - - -@require_context -@handle_db_data_error -def cgsnapshot_update(context, cgsnapshot_id, values): - query = model_query(context, models.Cgsnapshot, project_only=True) - result = query.filter_by(id=cgsnapshot_id).update(values) - if not result: - raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) - - -@require_admin_context -def cgsnapshot_destroy(context, cgsnapshot_id): - session = get_session() - with session.begin(): - updated_values = {'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')} - model_query(context, models.Cgsnapshot, session=session).\ - filter_by(id=cgsnapshot_id).\ - update(updated_values) - del updated_values['updated_at'] - return updated_values - - -def cgsnapshot_creating_from_src(): - """Get a filter that checks if a CGSnapshot is being created from a CG.""" - return sql.exists().where(and_( - models.Cgsnapshot.consistencygroup_id == models.ConsistencyGroup.id, - ~models.Cgsnapshot.deleted, - models.Cgsnapshot.status == 'creating')) - - -############################### - - -@require_context -def _group_snapshot_get(context, group_snapshot_id, session=None): - result = model_query(context, models.GroupSnapshot, session=session, - project_only=True).\ - filter_by(id=group_snapshot_id).\ - first() - - if not result: - raise exception.GroupSnapshotNotFound( - group_snapshot_id=group_snapshot_id) - - return result - - -@require_context -def group_snapshot_get(context, group_snapshot_id): - return _group_snapshot_get(context, group_snapshot_id) - - -def _group_snapshot_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - if filters and not is_valid_model_filters(models.GroupSnapshot, - filters): - return [] - - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, models.GroupSnapshot) - - return query.all() if query else [] - - -@require_admin_context -def group_snapshot_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - - return _group_snapshot_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@require_admin_context -def group_snapshot_get_all_by_group(context, group_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - if filters is None: - filters = {} - if group_id: - filters['group_id'] = group_id - return _group_snapshot_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@require_context -def group_snapshot_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - authorize_project_context(context, project_id) - if filters is None: - filters = {} - if project_id: - filters['project_id'] = project_id - return _group_snapshot_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - - -@handle_db_data_error -@require_context -def group_snapshot_create(context, values): - if not values.get('id'): - values['id'] = six.text_type(uuid.uuid4()) - - group_id = values.get('group_id') - session = get_session() - model = models.GroupSnapshot - with session.begin(): - if group_id: - # There has to exist at least 1 volume in the group and the group - # cannot be updating the composing volumes or being created. - conditions = [ - sql.exists().where(and_( - ~models.Volume.deleted, - models.Volume.group_id == group_id)), - ~models.Group.deleted, - models.Group.id == group_id, - ~models.Group.status.in_(('creating', 'updating'))] - - # NOTE(geguileo): We build a "fake" from_select clause instead of - # using transaction isolation on the session because we would need - # SERIALIZABLE level and that would have a considerable performance - # penalty. - binds = (bindparam(k, v) for k, v in values.items()) - sel = session.query(*binds).filter(*conditions) - insert_stmt = model.__table__.insert().from_select(values.keys(), - sel) - result = session.execute(insert_stmt) - # If we couldn't insert the row because of the conditions raise - # the right exception - if not result.rowcount: - msg = _("Source group cannot be empty or in 'creating' or " - "'updating' state. No group snapshot will be created.") - raise exception.InvalidGroup(reason=msg) - else: - group_snapshot = model() - group_snapshot.update(values) - session.add(group_snapshot) - return _group_snapshot_get(context, values['id'], session=session) - - -@require_context -@handle_db_data_error -def group_snapshot_update(context, group_snapshot_id, values): - session = get_session() - with session.begin(): - result = model_query(context, models.GroupSnapshot, - project_only=True).\ - filter_by(id=group_snapshot_id).\ - first() - - if not result: - raise exception.GroupSnapshotNotFound( - _("No group snapshot with id %s") % group_snapshot_id) - - result.update(values) - result.save(session=session) - return result - - -@require_admin_context -def group_snapshot_destroy(context, group_snapshot_id): - session = get_session() - with session.begin(): - updated_values = {'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')} - model_query(context, models.GroupSnapshot, session=session).\ - filter_by(id=group_snapshot_id).\ - update(updated_values) - del updated_values['updated_at'] - return updated_values - - -def group_snapshot_creating_from_src(): - """Get a filter to check if a grp snapshot is being created from a grp.""" - return sql.exists().where(and_( - models.GroupSnapshot.group_id == models.Group.id, - ~models.GroupSnapshot.deleted, - models.GroupSnapshot.status == 'creating')) - - -############################### - - -@require_admin_context -def purge_deleted_rows(context, age_in_days): - """Purge deleted rows older than age from cinder tables.""" - try: - age_in_days = int(age_in_days) - except ValueError: - msg = _('Invalid value for age, %(age)s') % {'age': age_in_days} - LOG.exception(msg) - raise exception.InvalidParameterValue(msg) - - engine = get_engine() - session = get_session() - metadata = MetaData() - metadata.reflect(engine) - - for table in reversed(metadata.sorted_tables): - if 'deleted' not in table.columns.keys(): - continue - LOG.info('Purging deleted rows older than age=%(age)d days ' - 'from table=%(table)s', {'age': age_in_days, - 'table': table}) - deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) - try: - with session.begin(): - # Delete child records first from quality_of_service_specs - # table to avoid FK constraints - if six.text_type(table) == "quality_of_service_specs": - session.query(models.QualityOfServiceSpecs).filter( - and_(models.QualityOfServiceSpecs.specs_id.isnot( - None), models.QualityOfServiceSpecs.deleted == 1, - models.QualityOfServiceSpecs.deleted_at < - deleted_age)).delete() - result = session.execute( - table.delete() - .where(table.c.deleted_at < deleted_age)) - except db_exc.DBReferenceError as ex: - LOG.error('DBError detected when purging from ' - '%(tablename)s: %(error)s.', - {'tablename': table, 'error': ex}) - raise - - rows_purged = result.rowcount - if rows_purged != 0: - LOG.info("Deleted %(row)d rows from table=%(table)s", - {'row': rows_purged, 'table': table}) - - -############################### - - -def _translate_messages(messages): - return [_translate_message(message) for message in messages] - - -def _translate_message(message): - """Translate the Message model to a dict.""" - return { - 'id': message['id'], - 'project_id': message['project_id'], - 'request_id': message['request_id'], - 'resource_type': message['resource_type'], - 'resource_uuid': message.get('resource_uuid'), - 'event_id': message['event_id'], - 'detail_id': message['detail_id'], - 'action_id': message['action_id'], - 'message_level': message['message_level'], - 'created_at': message['created_at'], - 'expires_at': message.get('expires_at'), - } - - -def _message_get(context, message_id, session=None): - query = model_query(context, - models.Message, - read_deleted="no", - project_only="yes", - session=session) - result = query.filter_by(id=message_id).first() - if not result: - raise exception.MessageNotFound(message_id=message_id) - return result - - -@require_context -def message_get(context, message_id, session=None): - result = _message_get(context, message_id, session) - return _translate_message(result) - - -@require_context -def message_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - """Retrieves all messages. - - If no sort parameters are specified then the returned messages are - sorted first by the 'created_at' key and then by the 'id' key in - descending order. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_keys: list of attributes by which results should be sorted, - paired with corresponding item in sort_dirs - :param sort_dirs: list of directions in which results should be sorted, - paired with corresponding item in sort_keys - :param filters: dictionary of filters; values that are in lists, tuples, - or sets cause an 'IN' operation, while exact matching - is used for other values, see - _process_messages_filters function for more - information - :returns: list of matching messages - """ - messages = models.Message - - session = get_session() - with session.begin(): - # Generate the paginate query - query = _generate_paginate_query(context, session, marker, - limit, sort_keys, sort_dirs, filters, - offset, messages) - if query is None: - return [] - results = query.all() - return _translate_messages(results) - - -@apply_like_filters(model=models.Message) -def _process_messages_filters(query, filters): - if filters: - # Ensure that filters' keys exist on the model - if not is_valid_model_filters(models.Message, filters): - return None - query = query.filter_by(**filters) - return query - - -def _messages_get_query(context, session=None, project_only=False): - return model_query(context, models.Message, session=session, - project_only=project_only) - - -@require_context -def message_create(context, values): - message_ref = models.Message() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - message_ref.update(values) - - session = get_session() - with session.begin(): - session.add(message_ref) - - -@require_admin_context -def message_destroy(context, message): - session = get_session() - now = timeutils.utcnow() - with session.begin(): - updated_values = {'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')} - (model_query(context, models.Message, session=session). - filter_by(id=message.get('id')). - update(updated_values)) - del updated_values['updated_at'] - return updated_values - - -@require_admin_context -def cleanup_expired_messages(context): - session = get_session() - now = timeutils.utcnow() - with session.begin(): - # NOTE(tommylikehu): Directly delete the expired - # messages here. - return session.query(models.Message).filter( - models.Message.expires_at < now).delete() - - -############################### - - -@require_context -def driver_initiator_data_insert_by_key(context, initiator, namespace, - key, value): - data = models.DriverInitiatorData() - data.initiator = initiator - data.namespace = namespace - data.key = key - data.value = value - session = get_session() - try: - with session.begin(): - session.add(data) - return True - except db_exc.DBDuplicateEntry: - return False - - -@require_context -def driver_initiator_data_get(context, initiator, namespace): - session = get_session() - with session.begin(): - return session.query(models.DriverInitiatorData).\ - filter_by(initiator=initiator).\ - filter_by(namespace=namespace).\ - all() - - -############################### - - -PAGINATION_HELPERS = { - models.Volume: (_volume_get_query, _process_volume_filters, _volume_get), - models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get), - models.Backup: (_backups_get_query, _process_backups_filters, _backup_get), - models.QualityOfServiceSpecs: (_qos_specs_get_query, - _process_qos_specs_filters, _qos_specs_get), - models.VolumeTypes: (_volume_type_get_query, _process_volume_types_filters, - _volume_type_get_db_object), - models.ConsistencyGroup: (_consistencygroups_get_query, - _process_consistencygroups_filters, - _consistencygroup_get), - models.Message: (_messages_get_query, _process_messages_filters, - _message_get), - models.GroupTypes: (_group_type_get_query, _process_group_types_filters, - _group_type_get_db_object), - models.Group: (_groups_get_query, - _process_groups_filters, - _group_get), - models.GroupSnapshot: (_group_snapshot_get_query, - _process_group_snapshot_filters, - _group_snapshot_get), - models.VolumeAttachment: (_attachment_get_query, - _process_attachment_filters, - _attachment_get), -} - - -############################### - - -@require_context -def image_volume_cache_create(context, host, cluster_name, image_id, - image_updated_at, volume_id, size): - session = get_session() - with session.begin(): - cache_entry = models.ImageVolumeCacheEntry() - cache_entry.host = host - cache_entry.cluster_name = cluster_name - cache_entry.image_id = image_id - cache_entry.image_updated_at = image_updated_at - cache_entry.volume_id = volume_id - cache_entry.size = size - session.add(cache_entry) - return cache_entry - - -@require_context -def image_volume_cache_delete(context, volume_id): - session = get_session() - with session.begin(): - session.query(models.ImageVolumeCacheEntry).\ - filter_by(volume_id=volume_id).\ - delete() - - -@require_context -def image_volume_cache_get_and_update_last_used(context, image_id, **filters): - filters = _clean_filters(filters) - session = get_session() - with session.begin(): - entry = session.query(models.ImageVolumeCacheEntry).\ - filter_by(image_id=image_id).\ - filter_by(**filters).\ - order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ - first() - - if entry: - entry.last_used = timeutils.utcnow() - entry.save(session=session) - return entry - - -@require_context -def image_volume_cache_get_by_volume_id(context, volume_id): - session = get_session() - with session.begin(): - return session.query(models.ImageVolumeCacheEntry).\ - filter_by(volume_id=volume_id).\ - first() - - -@require_context -def image_volume_cache_get_all(context, **filters): - filters = _clean_filters(filters) - session = get_session() - with session.begin(): - return session.query(models.ImageVolumeCacheEntry).\ - filter_by(**filters).\ - order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ - all() - - -@require_admin_context -def image_volume_cache_include_in_cluster(context, cluster, - partial_rename=True, **filters): - """Include all volumes matching the filters into a cluster.""" - filters = _clean_filters(filters) - return _include_in_cluster(context, cluster, models.ImageVolumeCacheEntry, - partial_rename, filters) - - -################### - - -def _worker_query(context, session=None, until=None, db_filters=None, - ignore_sentinel=True, **filters): - # Remove all filters based on the workers table that are set to None - filters = _clean_filters(filters) - - if filters and not is_valid_model_filters(models.Worker, filters): - return None - - query = model_query(context, models.Worker, session=session) - - # TODO(geguileo): Once we remove support for MySQL 5.5 we can remove this - if ignore_sentinel: - # We don't want to retrieve the workers sentinel - query = query.filter(models.Worker.resource_type != 'SENTINEL') - - if until: - db_filters = list(db_filters) if db_filters else [] - # Since we set updated_at at creation time we don't need to check - # created_at field. - db_filters.append(models.Worker.updated_at <= until) - - if db_filters: - query = query.filter(and_(*db_filters)) - - if filters: - query = query.filter_by(**filters) - - return query - - -DB_SUPPORTS_SUBSECOND_RESOLUTION = True - - -def workers_init(): - """Check if DB supports subsecond resolution and set global flag. - - MySQL 5.5 doesn't support subsecond resolution in datetime fields, so we - have to take it into account when working with the worker's table. - - To do this we'll have 1 row in the DB, created by the migration script, - where we have tried to set the microseconds and we'll check it. - - Once we drop support for MySQL 5.5 we can remove this method. - """ - global DB_SUPPORTS_SUBSECOND_RESOLUTION - session = get_session() - query = session.query(models.Worker).filter_by(resource_type='SENTINEL') - worker = query.first() - DB_SUPPORTS_SUBSECOND_RESOLUTION = bool(worker.updated_at.microsecond) - - -def _worker_set_updated_at_field(values): - # TODO(geguileo): Once we drop support for MySQL 5.5 we can simplify this - # method. - updated_at = values.get('updated_at', timeutils.utcnow()) - if isinstance(updated_at, six.string_types): - return - if not DB_SUPPORTS_SUBSECOND_RESOLUTION: - updated_at = updated_at.replace(microsecond=0) - values['updated_at'] = updated_at - - -def worker_create(context, **values): - """Create a worker entry from optional arguments.""" - _worker_set_updated_at_field(values) - worker = models.Worker(**values) - session = get_session() - try: - with session.begin(): - worker.save(session) - except db_exc.DBDuplicateEntry: - raise exception.WorkerExists(type=values.get('resource_type'), - id=values.get('resource_id')) - return worker - - -def worker_get(context, **filters): - """Get a worker or raise exception if it does not exist.""" - query = _worker_query(context, **filters) - worker = query.first() if query else None - if not worker: - raise exception.WorkerNotFound(**filters) - return worker - - -def worker_get_all(context, **filters): - """Get all workers that match given criteria.""" - query = _worker_query(context, **filters) - return query.all() if query else [] - - -def _orm_worker_update(worker, values): - if not worker: - return - for key, value in values.items(): - setattr(worker, key, value) - - -def worker_update(context, id, filters=None, orm_worker=None, **values): - """Update a worker with given values.""" - filters = filters or {} - query = _worker_query(context, id=id, **filters) - - # If we want to update the orm_worker and we don't set the update_at field - # we set it here instead of letting SQLAlchemy do it to be able to update - # the orm_worker. - _worker_set_updated_at_field(values) - reference = orm_worker or models.Worker - values['race_preventer'] = reference.race_preventer + 1 - result = query.update(values) - if not result: - raise exception.WorkerNotFound(id=id, **filters) - _orm_worker_update(orm_worker, values) - return result - - -def worker_claim_for_cleanup(context, claimer_id, orm_worker): - """Claim a worker entry for cleanup.""" - # We set updated_at value so we are sure we update the DB entry even if the - # service_id is the same in the DB, thus flagging the claim. - values = {'service_id': claimer_id, - 'race_preventer': orm_worker.race_preventer + 1, - 'updated_at': timeutils.utcnow()} - _worker_set_updated_at_field(values) - - # We only update the worker entry if it hasn't been claimed by other host - # or thread - query = _worker_query(context, - status=orm_worker.status, - service_id=orm_worker.service_id, - race_preventer=orm_worker.race_preventer, - until=orm_worker.updated_at, - id=orm_worker.id) - - result = query.update(values, synchronize_session=False) - if result: - _orm_worker_update(orm_worker, values) - return result - - -def worker_destroy(context, **filters): - """Delete a worker (no soft delete).""" - query = _worker_query(context, **filters) - return query.delete() - - -############################### - - -@require_context -def resource_exists(context, model, resource_id, session=None): - # Match non deleted resources by the id - conditions = [model.id == resource_id, ~model.deleted] - # If the context is not admin we limit it to the context's project - if is_user_context(context) and hasattr(model, 'project_id'): - conditions.append(model.project_id == context.project_id) - session = session or get_session() - query = session.query(sql.exists().where(and_(*conditions))) - return query.scalar() - - -def get_model_for_versioned_object(versioned_object): - # Exceptions to model mapping, in general Versioned Objects have the same - # name as their ORM models counterparts, but there are some that diverge - VO_TO_MODEL_EXCEPTIONS = { - 'BackupImport': models.Backup, - 'VolumeType': models.VolumeTypes, - 'CGSnapshot': models.Cgsnapshot, - 'GroupType': models.GroupTypes, - 'GroupSnapshot': models.GroupSnapshot, - } - - if isinstance(versioned_object, six.string_types): - model_name = versioned_object - else: - model_name = versioned_object.obj_name() - return (VO_TO_MODEL_EXCEPTIONS.get(model_name) or - getattr(models, model_name)) - - -def _get_get_method(model): - # Exceptions to model to get methods, in general method names are a simple - # conversion changing ORM name from camel case to snake format and adding - # _get to the string - GET_EXCEPTIONS = { - models.ConsistencyGroup: consistencygroup_get, - models.VolumeTypes: _volume_type_get_full, - models.QualityOfServiceSpecs: qos_specs_get, - models.GroupTypes: _group_type_get_full, - } - - if model in GET_EXCEPTIONS: - return GET_EXCEPTIONS[model] - - # General conversion - # Convert camel cased model name to snake format - s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) - # Get method must be snake formatted model name concatenated with _get - method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' - return globals().get(method_name) - - -_GET_METHODS = {} - - -@require_context -def get_by_id(context, model, id, *args, **kwargs): - # Add get method to cache dictionary if it's not already there - if not _GET_METHODS.get(model): - _GET_METHODS[model] = _get_get_method(model) - - return _GET_METHODS[model](context, id, *args, **kwargs) - - -def condition_db_filter(model, field, value): - """Create matching filter. - - If value is an iterable other than a string, any of the values is - a valid match (OR), so we'll use SQL IN operator. - - If it's not an iterator == operator will be used. - """ - orm_field = getattr(model, field) - # For values that must match and are iterables we use IN - if (isinstance(value, collections.Iterable) and - not isinstance(value, six.string_types)): - # We cannot use in_ when one of the values is None - if None not in value: - return orm_field.in_(value) - - return or_(orm_field == v for v in value) - - # For values that must match and are not iterables we use == - return orm_field == value - - -def condition_not_db_filter(model, field, value, auto_none=True): - """Create non matching filter. - - If value is an iterable other than a string, any of the values is - a valid match (OR), so we'll use SQL IN operator. - - If it's not an iterator == operator will be used. - - If auto_none is True then we'll consider NULL values as different as well, - like we do in Python and not like SQL does. - """ - result = ~condition_db_filter(model, field, value) - - if (auto_none - and ((isinstance(value, collections.Iterable) and - not isinstance(value, six.string_types) - and None not in value) - or (value is not None))): - orm_field = getattr(model, field) - result = or_(result, orm_field.is_(None)) - - return result - - -def is_orm_value(obj): - """Check if object is an ORM field or expression.""" - return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, - sqlalchemy.sql.expression.ColumnElement)) - - -def _check_is_not_multitable(values, model): - """Check that we don't try to do multitable updates. - - Since PostgreSQL doesn't support multitable updates we want to always fail - if we have such a query in our code, even if with MySQL it would work. - """ - used_models = set() - for field in values: - if isinstance(field, sqlalchemy.orm.attributes.InstrumentedAttribute): - used_models.add(field.class_) - elif isinstance(field, six.string_types): - used_models.add(model) - else: - raise exception.ProgrammingError( - reason='DB Conditional update - Unknown field type, must be ' - 'string or ORM field.') - if len(used_models) > 1: - raise exception.ProgrammingError( - reason='DB Conditional update - Error in query, multitable ' - 'updates are not supported.') - - -@require_context -@_retry_on_deadlock -def conditional_update(context, model, values, expected_values, filters=(), - include_deleted='no', project_only=False, order=None): - """Compare-and-swap conditional update SQLAlchemy implementation.""" - _check_is_not_multitable(values, model) - - # Provided filters will become part of the where clause - where_conds = list(filters) - - # Build where conditions with operators ==, !=, NOT IN and IN - for field, condition in expected_values.items(): - if not isinstance(condition, db.Condition): - condition = db.Condition(condition, field) - where_conds.append(condition.get_filter(model, field)) - - # Create the query with the where clause - query = model_query(context, model, read_deleted=include_deleted, - project_only=project_only).filter(*where_conds) - - # NOTE(geguileo): Some DBs' update method are order dependent, and they - # behave differently depending on the order of the values, example on a - # volume with 'available' status: - # UPDATE volumes SET previous_status=status, status='reyping' - # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; - # Will result in a volume with 'retyping' status and 'available' - # previous_status both on SQLite and MariaDB, but - # UPDATE volumes SET status='retyping', previous_status=status - # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; - # Will yield the same result in SQLite but will result in a volume with - # status and previous_status set to 'retyping' in MariaDB, which is not - # what we want, so order must be taken into consideration. - # Order for the update will be: - # 1- Order specified in argument order - # 2- Values that refer to other ORM field (simple and using operations, - # like size + 10) - # 3- Values that use Case clause (since they may be using fields as well) - # 4- All other values - order = list(order) if order else tuple() - orm_field_list = [] - case_list = [] - unordered_list = [] - for key, value in values.items(): - if isinstance(value, db.Case): - value = case(value.whens, value.value, value.else_) - - if key in order: - order[order.index(key)] = (key, value) - continue - # NOTE(geguileo): Check Case first since it's a type of orm value - if isinstance(value, sql.elements.Case): - value_list = case_list - elif is_orm_value(value): - value_list = orm_field_list - else: - value_list = unordered_list - value_list.append((key, value)) - - update_args = {'synchronize_session': False} - - # If we don't have to enforce any kind of order just pass along the values - # dictionary since it will be a little more efficient. - if order or orm_field_list or case_list: - # If we are doing an update with ordered parameters, we need to add - # remaining values to the list - values = itertools.chain(order, orm_field_list, case_list, - unordered_list) - # And we have to tell SQLAlchemy that we want to preserve the order - update_args['update_args'] = {'preserve_parameter_order': True} - - # Return True if we were able to change any DB entry, False otherwise - result = query.update(values, **update_args) - return 0 != result diff --git a/cinder/db/sqlalchemy/migrate_repo/README b/cinder/db/sqlalchemy/migrate_repo/README deleted file mode 100644 index 2f81df17a..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,7 +0,0 @@ -This is a database migration repository. - -More information at: - https://github.com/openstack/sqlalchemy-migrate - -Original project is no longer maintained at: - http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/sqlalchemy/migrate_repo/__init__.py b/cinder/db/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index 22877d940..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from cinder.db.sqlalchemy import migrate_repo - -from migrate.versioning.shell import main - - -if __name__ == '__main__': - main(debug='False', - repository=os.path.abspath(os.path.dirname(migrate_repo.__file__))) diff --git a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index 10c685c0e..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=cinder - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/073_cinder_init.py b/cinder/db/sqlalchemy/migrate_repo/versions/073_cinder_init.py deleted file mode 100644 index f0bcb9b99..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/073_cinder_init.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_config import cfg -from oslo_db.sqlalchemy import ndb -from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index -from sqlalchemy import Integer, MetaData, String, Table, Text, UniqueConstraint - -# Get default values via config. The defaults will either -# come from the default values set in the quota option -# configuration or via cinder.conf if the user has configured -# default values for quotas there. -CONF = cfg.CONF -CONF.import_opt('quota_volumes', 'cinder.quota') -CONF.import_opt('quota_snapshots', 'cinder.quota') -CONF.import_opt('quota_gigabytes', 'cinder.quota') -CONF.import_opt('quota_consistencygroups', 'cinder.quota') - -CLASS_NAME = 'default' -CREATED_AT = datetime.datetime.now() # noqa - - -def define_tables(meta): - services = Table( - 'services', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('host', String(255)), - Column('binary', String(255)), - Column('topic', String(255)), - Column('report_count', Integer, nullable=False), - Column('disabled', Boolean), - Column('availability_zone', String(255)), - Column('disabled_reason', String(255)), - Column('modified_at', DateTime(timezone=False)), - Column('rpc_current_version', String(36)), - Column('object_current_version', String(36)), - Column('replication_status', String(36), default='not-capable'), - Column('frozen', Boolean, default=False), - Column('active_backend_id', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - consistencygroups = Table( - 'consistencygroups', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', String(36), primary_key=True, nullable=False), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('host', String(255)), - Column('availability_zone', String(255)), - Column('name', String(255)), - Column('description', String(255)), - Column('volume_type_id', String(255)), - Column('status', String(255)), - Column('cgsnapshot_id', String(36)), - Column('source_cgid', String(36)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - cgsnapshots = Table( - 'cgsnapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', String(36), primary_key=True, nullable=False), - Column('consistencygroup_id', String(36), - ForeignKey('consistencygroups.id'), - nullable=False), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('name', String(255)), - Column('description', String(255)), - Column('status', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volumes = Table( - 'volumes', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('ec2_id', String(255)), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('host', String(255)), - Column('size', Integer), - Column('availability_zone', ndb.AutoStringTinyText(255)), - Column('status', ndb.AutoStringSize(255, 64)), - Column('attach_status', ndb.AutoStringSize(255, 64)), - Column('scheduled_at', DateTime), - Column('launched_at', DateTime), - Column('terminated_at', DateTime), - Column('display_name', String(255)), - Column('display_description', ndb.AutoStringTinyText(255)), - Column('provider_location', String(256)), - Column('provider_auth', String(256)), - Column('snapshot_id', String(36)), - Column('volume_type_id', String(36)), - Column('source_volid', String(36)), - Column('bootable', Boolean), - Column('provider_geometry', String(255)), - Column('_name_id', String(36)), - Column('encryption_key_id', String(36)), - Column('migration_status', ndb.AutoStringSize(255, 64)), - Column('replication_status', ndb.AutoStringSize(255, 64)), - Column('replication_extended_status', ndb.AutoStringTinyText(255)), - Column('replication_driver_data', ndb.AutoStringTinyText(255)), - Column('consistencygroup_id', String(36), - ForeignKey('consistencygroups.id')), - Column('provider_id', String(255)), - Column('multiattach', Boolean), - Column('previous_status', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_attachment = Table( - 'volume_attachment', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('volume_id', String(36), ForeignKey('volumes.id'), - nullable=False), - Column('attached_host', String(255)), - Column('instance_uuid', String(36)), - Column('mountpoint', String(255)), - Column('attach_time', DateTime), - Column('detach_time', DateTime), - Column('attach_mode', String(36)), - Column('attach_status', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - snapshots = Table( - 'snapshots', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('volume_id', String(36), - ForeignKey('volumes.id', name='snapshots_volume_id_fkey'), - nullable=False), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('status', String(255)), - Column('progress', String(255)), - Column('volume_size', Integer), - Column('scheduled_at', DateTime), - Column('display_name', String(255)), - Column('display_description', String(255)), - Column('provider_location', String(255)), - Column('encryption_key_id', String(36)), - Column('volume_type_id', String(36)), - Column('cgsnapshot_id', String(36), - ForeignKey('cgsnapshots.id')), - Column('provider_id', String(255)), - Column('provider_auth', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - snapshot_metadata = Table( - 'snapshot_metadata', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('snapshot_id', String(36), ForeignKey('snapshots.id'), - nullable=False), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quality_of_service_specs = Table( - 'quality_of_service_specs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', String(36), primary_key=True, nullable=False), - Column('specs_id', String(36), - ForeignKey('quality_of_service_specs.id')), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_types = Table( - 'volume_types', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('name', String(255)), - Column('qos_specs_id', String(36), - ForeignKey('quality_of_service_specs.id')), - Column('is_public', Boolean), - Column('description', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_type_projects = Table( - 'volume_type_projects', meta, - Column('id', Integer, primary_key=True, nullable=False), - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('volume_type_id', String(36), - ForeignKey('volume_types.id')), - Column('project_id', String(255)), - Column('deleted', Integer), - UniqueConstraint('volume_type_id', 'project_id', 'deleted'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_metadata = Table( - 'volume_metadata', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('volume_id', String(36), ForeignKey('volumes.id'), - nullable=False), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_type_extra_specs = Table( - 'volume_type_extra_specs', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('volume_type_id', String(36), - ForeignKey('volume_types.id', - name='volume_type_extra_specs_ibfk_1'), - nullable=False), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quotas = Table( - 'quotas', meta, - Column('id', Integer, primary_key=True, nullable=False), - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('project_id', String(255)), - Column('resource', String(255), nullable=False), - Column('hard_limit', Integer), - Column('allocated', Integer, default=0), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - quota_classes = Table( - 'quota_classes', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, - name=None)), - Column('id', Integer(), primary_key=True), - Column('class_name', String(255), index=True), - Column('resource', String(255)), - Column('hard_limit', Integer(), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - quota_usages = Table( - 'quota_usages', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, - name=None)), - Column('id', Integer(), primary_key=True), - Column('project_id', String(255), index=True), - Column('resource', String(255)), - Column('in_use', Integer(), nullable=False), - Column('reserved', Integer(), nullable=False), - Column('until_refresh', Integer(), nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - reservations = Table( - 'reservations', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, - name=None)), - Column('id', Integer(), primary_key=True), - Column('uuid', String(36), nullable=False), - Column('usage_id', - Integer(), - ForeignKey('quota_usages.id'), - nullable=True), - Column('project_id', String(255), index=True), - Column('resource', String(255)), - Column('delta', Integer(), nullable=False), - Column('expire', DateTime(timezone=False)), - Column('allocated_id', Integer, ForeignKey('quotas.id'), - nullable=True), - Index('reservations_deleted_expire_idx', - 'deleted', 'expire'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - volume_glance_metadata = Table( - 'volume_glance_metadata', - meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('volume_id', String(36), ForeignKey('volumes.id')), - Column('snapshot_id', String(36), - ForeignKey('snapshots.id')), - Column('key', String(255)), - Column('value', Text), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - backups = Table( - 'backups', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', String(36), primary_key=True, nullable=False), - Column('volume_id', String(36), nullable=False), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('host', String(255)), - Column('availability_zone', String(255)), - Column('display_name', String(255)), - Column('display_description', String(255)), - Column('container', String(255)), - Column('status', String(255)), - Column('fail_reason', String(255)), - Column('service_metadata', String(255)), - Column('service', String(255)), - Column('size', Integer()), - Column('object_count', Integer()), - Column('parent_id', String(36)), - Column('temp_volume_id', String(36)), - Column('temp_snapshot_id', String(36)), - Column('num_dependent_backups', Integer, default=0), - Column('snapshot_id', String(36)), - Column('data_timestamp', DateTime), - Column('restore_volume_id', String(36)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - transfers = Table( - 'transfers', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('volume_id', String(36), ForeignKey('volumes.id'), - nullable=False), - Column('display_name', String(255)), - Column('salt', String(255)), - Column('crypt_hash', String(255)), - Column('expires_at', DateTime(timezone=False)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - # Sqlite needs to handle nullable differently - is_nullable = (meta.bind.name == 'sqlite') - - encryption = Table( - 'encryption', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('cipher', String(255)), - Column('control_location', String(255), nullable=is_nullable), - Column('key_size', Integer), - Column('provider', String(255), nullable=is_nullable), - # NOTE(joel-coffman): The volume_type_id must be unique or else the - # referenced volume type becomes ambiguous. That is, specifying the - # volume type is not sufficient to identify a particular encryption - # scheme unless each volume type is associated with at most one - # encryption scheme. - Column('volume_type_id', String(36), nullable=is_nullable), - # NOTE (smcginnis): nullable=True triggers this to not set a default - # value, but since it's a primary key the resulting schema will end up - # still being NOT NULL. This is avoiding a case in MySQL where it will - # otherwise set this to NOT NULL DEFAULT ''. May be harmless, but - # inconsistent with previous schema. - Column('encryption_id', String(36), primary_key=True, nullable=True), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - volume_admin_metadata = Table( - 'volume_admin_metadata', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('volume_id', String(36), ForeignKey('volumes.id'), - nullable=False), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - initiator_data = Table( - 'driver_initiator_data', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('id', Integer, primary_key=True, nullable=False), - Column('initiator', String(255), index=True, nullable=False), - Column('namespace', String(255), nullable=False), - Column('key', String(255), nullable=False), - Column('value', String(255)), - UniqueConstraint('initiator', 'namespace', 'key'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - image_volume_cache = Table( - 'image_volume_cache_entries', meta, - Column('image_updated_at', DateTime(timezone=False)), - Column('id', Integer, primary_key=True, nullable=False), - Column('host', String(255), index=True, nullable=False), - Column('image_id', String(36), index=True, nullable=False), - Column('volume_id', String(36), nullable=False), - Column('size', Integer, nullable=False), - Column('last_used', DateTime, nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - return [consistencygroups, - cgsnapshots, - volumes, - volume_attachment, - snapshots, - snapshot_metadata, - quality_of_service_specs, - volume_types, - volume_type_projects, - quotas, - services, - volume_metadata, - volume_type_extra_specs, - quota_classes, - quota_usages, - reservations, - volume_glance_metadata, - backups, - transfers, - encryption, - volume_admin_metadata, - initiator_data, - image_volume_cache] - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # create all tables - # Take care on create order for those with FK dependencies - tables = define_tables(meta) - - for table in tables: - table.create() - - if migrate_engine.name == "mysql": - tables = ["consistencygroups", - "cgsnapshots", - "snapshots", - "snapshot_metadata", - "quality_of_service_specs", - "volume_types", - "volume_type_projects", - "volumes", - "volume_attachment", - "migrate_version", - "quotas", - "services", - "volume_metadata", - "volume_type_extra_specs", - "quota_classes", - "quota_usages", - "reservations", - "volume_glance_metadata", - "backups", - "transfers", - "encryption", - "volume_admin_metadata", - "driver_initiator_data", - "image_volume_cache_entries"] - - migrate_engine.execute("SET foreign_key_checks = 0") - for table in tables: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % - migrate_engine.url.database) - migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table) - - # Set default quota class values - quota_classes = Table('quota_classes', meta, autoload=True) - qci = quota_classes.insert() - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'volumes', - 'hard_limit': CONF.quota_volumes, - 'deleted': False, }) - # Set default snapshots - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'snapshots', - 'hard_limit': CONF.quota_snapshots, - 'deleted': False, }) - # Set default gigabytes - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'gigabytes', - 'hard_limit': CONF.quota_gigabytes, - 'deleted': False, }) - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'consistencygroups', - 'hard_limit': CONF.quota_consistencygroups, - 'deleted': False, }) - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'per_volume_gigabytes', - 'hard_limit': -1, - 'deleted': False, }) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/074_add_message_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/074_add_message_table.py deleted file mode 100644 index 35bfea218..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/074_add_message_table.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime -from sqlalchemy import MetaData, String, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # New table - messages = Table( - 'messages', - meta, - Column('id', String(36), primary_key=True, nullable=False), - Column('project_id', String(36), nullable=False), - Column('request_id', String(255), nullable=False), - Column('resource_type', String(36)), - Column('resource_uuid', String(255), nullable=True), - Column('event_id', String(255), nullable=False), - Column('message_level', String(255), nullable=False), - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean), - Column('expires_at', DateTime(timezone=False)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - messages.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py b/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py deleted file mode 100644 index ceb0a414b..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/075_add_cluster_and_fields.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, Integer -from sqlalchemy import MetaData, String, Table, UniqueConstraint - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # New cluster table - cluster = Table( - 'clusters', meta, - # Inherited fields from CinderBase - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(), default=False), - - # Cluster specific fields - Column('id', Integer, primary_key=True, nullable=False), - Column('name', String(255), nullable=False), - Column('binary', String(255), nullable=False), - Column('disabled', Boolean(), default=False), - Column('disabled_reason', String(255)), - Column('race_preventer', Integer, nullable=False, default=0), - - # To remove potential races on creation we have a constraint set on - # name and race_preventer fields, and we set value on creation to 0, so - # 2 clusters with the same name will fail this constraint. On deletion - # we change this field to the same value as the id which will be unique - # and will not conflict with the creation of another cluster with the - # same name. - UniqueConstraint('name', 'binary', 'race_preventer'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - cluster.create() - - # Add the cluster flag to Service, ConsistencyGroup, and Volume tables. - for table_name in ('services', 'consistencygroups', 'volumes'): - table = Table(table_name, meta, autoload=True) - cluster_name = Column('cluster_name', String(255), nullable=True) - table.create_column(cluster_name) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py deleted file mode 100644 index 3480346b5..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/076_add_workers_table.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, Integer -from sqlalchemy import MetaData, String, Table, UniqueConstraint -from migrate.changeset.constraint import ForeignKeyConstraint - - -def upgrade(migrate_engine): - """Add workers table.""" - meta = MetaData() - meta.bind = migrate_engine - - workers = Table( - 'workers', meta, - # Inherited fields from CinderBase - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(), default=False), - - # Workers table specific fields - Column('id', Integer, primary_key=True), - Column('resource_type', String(40), nullable=False), - Column('resource_id', String(36), nullable=False), - Column('status', String(255), nullable=False), - Column('service_id', Integer, nullable=True), - UniqueConstraint('resource_type', 'resource_id'), - - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - workers.create() - - services = Table('services', meta, autoload=True) - - ForeignKeyConstraint( - columns=[workers.c.service_id], - refcolumns=[services.c.id]).create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py deleted file mode 100644 index 2357dbf3a..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/077_add_group_types_and_group_specs_table.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, Integer -from sqlalchemy import ForeignKey, MetaData, String, Table, UniqueConstraint - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # New table - group_types = Table( - 'group_types', - meta, - Column('id', String(36), primary_key=True, nullable=False), - Column('name', String(255), nullable=False), - Column('description', String(255)), - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean), - Column('is_public', Boolean), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_types.create() - - # New table - group_type_specs = Table( - 'group_type_specs', - meta, - Column('id', Integer, primary_key=True, nullable=False), - Column('key', String(255)), - Column('value', String(255)), - Column('group_type_id', String(36), - ForeignKey('group_types.id'), - nullable=False), - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_type_specs.create() - - # New table - group_type_projects = Table( - 'group_type_projects', meta, - Column('id', Integer, primary_key=True, nullable=False), - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('group_type_id', String(36), - ForeignKey('group_types.id')), - Column('project_id', String(length=255)), - Column('deleted', Boolean(create_constraint=True, name=None)), - UniqueConstraint('group_type_id', 'project_id', 'deleted'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_type_projects.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py deleted file mode 100644 index 04f09cb79..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/078_add_groups_and_group_volume_type_mapping_table.py +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from sqlalchemy import Boolean, Column, DateTime, Integer -from sqlalchemy import ForeignKey, MetaData, String, Table, func, select - -# Default number of quota groups. We should not read from config file. -DEFAULT_QUOTA_GROUPS = 10 - -CLASS_NAME = 'default' -CREATED_AT = datetime.datetime.now() # noqa - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # New table - groups = Table( - 'groups', - meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean), - Column('id', String(36), primary_key=True, nullable=False), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('cluster_name', String(255)), - Column('host', String(length=255)), - Column('availability_zone', String(length=255)), - Column('name', String(length=255)), - Column('description', String(length=255)), - Column('group_type_id', String(length=36)), - Column('status', String(length=255)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - groups.create() - - # Add column to volumes table - volumes = Table('volumes', meta, autoload=True) - group_id = Column('group_id', String(36), - ForeignKey('groups.id')) - volumes.create_column(group_id) - volumes.update().values(group_id=None).execute() - - # New group_volume_type_mapping table - Table('volume_types', meta, autoload=True) - - grp_vt_mapping = Table( - 'group_volume_type_mapping', meta, - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Boolean), - Column('id', Integer, primary_key=True, nullable=False), - Column('volume_type_id', String(36), ForeignKey('volume_types.id'), - nullable=False), - Column('group_id', String(36), - ForeignKey('groups.id'), nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - grp_vt_mapping.create() - - # Add group quota data into DB. - quota_classes = Table('quota_classes', meta, autoload=True) - - rows = select([func.count()]).select_from(quota_classes).where( - quota_classes.c.resource == 'groups').execute().scalar() - - # Do not add entries if there are already 'groups' entries. - if rows: - return - - # Set groups - qci = quota_classes.insert() - qci.execute({'created_at': CREATED_AT, - 'class_name': CLASS_NAME, - 'resource': 'groups', - 'hard_limit': DEFAULT_QUOTA_GROUPS, - 'deleted': False, }) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py deleted file mode 100644 index 5c52d425a..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/079_add_group_snapshots.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime -from sqlalchemy import ForeignKey, MetaData, String, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - groups = Table('groups', meta, autoload=True) - - # New table - group_snapshots = Table( - 'group_snapshots', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', String(36), primary_key=True), - Column('group_id', String(36), - ForeignKey('groups.id'), - nullable=False), - Column('user_id', String(length=255)), - Column('project_id', String(length=255)), - Column('name', String(length=255)), - Column('description', String(length=255)), - Column('status', String(length=255)), - Column('group_type_id', String(length=36)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - group_snapshots.create() - - # Add group_snapshot_id column to snapshots table - snapshots = Table('snapshots', meta, autoload=True) - group_snapshot_id = Column('group_snapshot_id', String(36), - ForeignKey('group_snapshots.id')) - - snapshots.create_column(group_snapshot_id) - snapshots.update().values(group_snapshot_id=None).execute() - - # Add group_snapshot_id column to groups table - group_snapshot_id = Column('group_snapshot_id', String(36)) - groups.create_column(group_snapshot_id) - - # Add source_group_id column to groups table - source_group_id = Column('source_group_id', String(36)) - groups.create_column(source_group_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/080_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/080_placeholder.py deleted file mode 100644 index 995c6a1cc..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/080_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Newton backports. -# Do not use this number for new Ocata work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/081_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/081_placeholder.py deleted file mode 100644 index 995c6a1cc..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/081_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Newton backports. -# Do not use this number for new Ocata work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/082_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/082_placeholder.py deleted file mode 100644 index 995c6a1cc..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/082_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Newton backports. -# Do not use this number for new Ocata work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/083_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/083_placeholder.py deleted file mode 100644 index 995c6a1cc..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/083_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Newton backports. -# Do not use this number for new Ocata work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/084_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/084_placeholder.py deleted file mode 100644 index 995c6a1cc..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/084_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Newton backports. -# Do not use this number for new Ocata work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/085_modify_workers_updated_at.py b/cinder/db/sqlalchemy/migrate_repo/versions/085_modify_workers_updated_at.py deleted file mode 100644 index 841acfcef..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/085_modify_workers_updated_at.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from sqlalchemy.dialects import mysql -from sqlalchemy import MetaData, Table - - -def upgrade(migrate_engine): - """Add microseconds precision on updated_at field in MySQL databases. - - PostgreSQL, SQLite, and MSSQL have sub-second precision by default, but - MySQL defaults to second precision in DateTime fields, which creates - problems for the resource cleanup mechanism. - """ - meta = MetaData() - meta.bind = migrate_engine - workers = Table('workers', meta, autoload=True) - - # This is only necessary for mysql, and since the table is not in use this - # will only be an schema update. - if migrate_engine.name.startswith('mysql'): - try: - workers.c.updated_at.alter(mysql.DATETIME(fsp=6)) - except Exception: - # MySQL v5.5 or earlier don't support sub-second resolution so we - # may have cleanup races in Active-Active configurations, that's - # why upgrading is recommended in that case. - # Code in Cinder is capable of working with 5.5, so for 5.5 there's - # no problem - pass - - # TODO(geguileo): Once we remove support for MySQL 5.5 we have to create - # an upgrade migration to remove this row. - # Set workers table sub-second support sentinel - wi = workers.insert() - now = timeutils.utcnow().replace(microsecond=123) - wi.execute({'created_at': now, - 'updated_at': now, - 'deleted': False, - 'resource_type': 'SENTINEL', - 'resource_id': 'SUB-SECOND', - 'status': 'OK'}) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/086_create_default_cgsnapshot_type.py b/cinder/db/sqlalchemy/migrate_repo/versions/086_create_default_cgsnapshot_type.py deleted file mode 100644 index a86c29192..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/086_create_default_cgsnapshot_type.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_utils import timeutils -import six -from sqlalchemy import MetaData, Table - -from cinder.volume import group_types as volume_group_types - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - now = timeutils.utcnow() - - group_types = Table('group_types', meta, autoload=True) - group_type_specs = Table('group_type_specs', meta, autoload=True) - - # Create a default group_type for migrating cgsnapshots - results = list(group_types.select().where( - group_types.c.name == volume_group_types.DEFAULT_CGSNAPSHOT_TYPE and - group_types.c.deleted is False). - execute()) - if not results: - grp_type_id = six.text_type(uuid.uuid4()) - group_type_dicts = { - 'id': grp_type_id, - 'name': volume_group_types.DEFAULT_CGSNAPSHOT_TYPE, - 'description': 'Default group type for migrating cgsnapshot', - 'created_at': now, - 'updated_at': now, - 'deleted': False, - 'is_public': True, - } - grp_type = group_types.insert() - grp_type.execute(group_type_dicts) - else: - grp_type_id = results[0]['id'] - - results = list(group_type_specs.select().where( - group_type_specs.c.group_type_id == grp_type_id and - group_type_specs.c.deleted is False). - execute()) - if not results: - group_spec_dicts = { - 'key': 'consistent_group_snapshot_enabled', - 'value': ' True', - 'group_type_id': grp_type_id, - 'created_at': now, - 'updated_at': now, - 'deleted': False, - } - grp_spec = group_type_specs.insert() - grp_spec.execute(group_spec_dicts) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/087_allow_null_request_id_in_message.py b/cinder/db/sqlalchemy/migrate_repo/versions/087_allow_null_request_id_in_message.py deleted file mode 100644 index d855f11f1..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/087_allow_null_request_id_in_message.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - messages = Table('messages', meta, autoload=True) - messages.c.request_id.alter(nullable=True) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_add_replication_info_to_cluster.py b/cinder/db/sqlalchemy/migrate_repo/versions/088_add_replication_info_to_cluster.py deleted file mode 100644 index 0e8ea25aa..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/088_add_replication_info_to_cluster.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, MetaData, String, Table, text - - -def upgrade(migrate_engine): - """Add replication info to clusters table.""" - meta = MetaData() - meta.bind = migrate_engine - - clusters = Table('clusters', meta, autoload=True) - replication_status = Column('replication_status', String(length=36), - default="not-capable") - active_backend_id = Column('active_backend_id', String(length=255)) - frozen = Column('frozen', Boolean, nullable=False, default=False, - server_default=text('false')) - - clusters.create_column(replication_status) - clusters.create_column(frozen) - clusters.create_column(active_backend_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/089_add_cluster_name_image_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/089_add_cluster_name_image_cache.py deleted file mode 100644 index e05cfe69a..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/089_add_cluster_name_image_cache.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData, String, Table - - -def upgrade(migrate_engine): - """Add cluster name to image cache entries.""" - meta = MetaData() - meta.bind = migrate_engine - - image_cache = Table('image_volume_cache_entries', meta, autoload=True) - cluster_name = Column('cluster_name', String(255), nullable=True) - image_cache.create_column(cluster_name) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_add_race_preventer_to_workers.py b/cinder/db/sqlalchemy/migrate_repo/versions/090_add_race_preventer_to_workers.py deleted file mode 100644 index 715e67821..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/090_add_race_preventer_to_workers.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, Integer, MetaData, Table, text - - -def upgrade(migrate_engine): - """Add race preventer field to workers table.""" - meta = MetaData() - meta.bind = migrate_engine - - workers = Table('workers', meta, autoload=True) - race_preventer = Column('race_preventer', Integer, nullable=False, - default=0, server_default=text('0')) - race_preventer.create(workers, populate_default=True) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/091_add_attachment_specs.py b/cinder/db/sqlalchemy/migrate_repo/versions/091_add_attachment_specs.py deleted file mode 100644 index cf0fcdbff..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/091_add_attachment_specs.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer -from sqlalchemy import MetaData, String, Table - - -def upgrade(migrate_engine): - """Add attachment_specs table.""" - - meta = MetaData() - meta.bind = migrate_engine - Table('volume_attachment', meta, autoload=True) - - attachment_specs = Table( - 'attachment_specs', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(), default=False), - Column('id', Integer, primary_key=True, nullable=False), - Column('attachment_id', String(36), - ForeignKey('volume_attachment.id'), - nullable=False), - Column('key', String(255)), - Column('value', String(255)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - attachment_specs.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/092_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/092_placeholder.py deleted file mode 100644 index 7f0c9af0d..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/092_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Mitaka backports. -# Do not use this number for new Newton work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/093_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/093_placeholder.py deleted file mode 100644 index 7f0c9af0d..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/093_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Mitaka backports. -# Do not use this number for new Newton work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/094_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/094_placeholder.py deleted file mode 100644 index 7f0c9af0d..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/094_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Mitaka backports. -# Do not use this number for new Newton work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/095_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/095_placeholder.py deleted file mode 100644 index 7f0c9af0d..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/095_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Mitaka backports. -# Do not use this number for new Newton work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py deleted file mode 100644 index 7f0c9af0d..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Mitaka backports. -# Do not use this number for new Newton work. New work starts after -# all the placeholders. -# -# See this for more information: -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/097_enforce_ocata_online_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/097_enforce_ocata_online_migrations.py deleted file mode 100644 index a0b91acef..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/097_enforce_ocata_online_migrations.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, func, select - -from cinder import exception -from cinder.i18n import _ - - -WARNING_MSG = _('There are still %(count)i unmigrated records in ' - 'the %(table)s table. Migration cannot continue ' - 'until all records have been migrated.') - - -def upgrade(migrate_engine): - meta = MetaData(migrate_engine) - - # CGs to Generic Volume Groups transition - consistencygroups = Table('consistencygroups', meta, autoload=True) - cgsnapshots = Table('cgsnapshots', meta, autoload=True) - for table in (consistencygroups, cgsnapshots): - count = select([func.count()]).select_from(table).where( - table.c.deleted == False).execute().scalar() # NOQA - if count > 0: - msg = WARNING_MSG % { - 'count': count, - 'table': table.name, - } - raise exception.ValidationError(detail=msg) - - # VOLUME_ prefix addition in message IDs - messages = Table('messages', meta, autoload=True) - count = select([func.count()]).select_from(messages).where( - (messages.c.deleted == False) & - (~messages.c.event_id.like('VOLUME_%'))).execute().scalar() # NOQA - if count > 0: - msg = WARNING_MSG % { - 'count': count, - 'table': 'messages', - } - raise exception.ValidationError(detail=msg) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/098_message_add_expire_at_index.py b/cinder/db/sqlalchemy/migrate_repo/versions/098_message_add_expire_at_index.py deleted file mode 100644 index 0436b6348..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/098_message_add_expire_at_index.py +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import utils - - -def upgrade(migrate_engine): - if not utils.index_exists_on_columns( - migrate_engine, 'messages', ['expires_at']): - utils.add_index(migrate_engine, 'messages', - 'messages_expire_at_idx', ['expires_at']) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/099_add_connection_info_to_attachment.py b/cinder/db/sqlalchemy/migrate_repo/versions/099_add_connection_info_to_attachment.py deleted file mode 100644 index 3993a1711..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/099_add_connection_info_to_attachment.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, Text - - -def upgrade(migrate_engine): - meta = MetaData(migrate_engine) - - # Add connection_info column to attachment table - attachment = Table('volume_attachment', meta, autoload=True) - connection_info = Column('connection_info', Text) - attachment.create_column(connection_info) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/100_add_foreign_key_indexes.py b/cinder/db/sqlalchemy/migrate_repo/versions/100_add_foreign_key_indexes.py deleted file mode 100644 index f21d23761..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/100_add_foreign_key_indexes.py +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db.sqlalchemy import utils -from oslo_log import log as logging -from sqlalchemy import MetaData - -LOG = logging.getLogger(__name__) - - -def ensure_index_exists(migrate_engine, table_name, column): - index_name = table_name + '_' + column + '_idx' - columns = [column] - - if utils.index_exists_on_columns(migrate_engine, table_name, columns): - LOG.info( - 'Skipped adding %s because an equivalent index already exists.', - index_name - ) - else: - utils.add_index(migrate_engine, table_name, index_name, columns) - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - for table_name, column in INDEXES_TO_CREATE: - ensure_index_exists(migrate_engine, table_name, column) - - -INDEXES_TO_CREATE = ( - ('attachment_specs', 'attachment_id'), - ('cgsnapshots', 'consistencygroup_id'), - ('group_snapshots', 'group_id'), - ('group_type_specs', 'group_type_id'), - ('group_volume_type_mapping', 'group_id'), - ('group_volume_type_mapping', 'volume_type_id'), - ('quality_of_service_specs', 'specs_id'), - ('reservations', 'allocated_id'), - ('reservations', 'usage_id'), - ('snapshot_metadata', 'snapshot_id'), - ('snapshots', 'cgsnapshot_id'), - ('snapshots', 'group_snapshot_id'), - ('snapshots', 'volume_id'), - ('transfers', 'volume_id'), - ('volume_admin_metadata', 'volume_id'), - ('volume_attachment', 'volume_id'), - ('volume_glance_metadata', 'snapshot_id'), - ('volume_glance_metadata', 'volume_id'), - ('volume_metadata', 'volume_id'), - ('volume_type_extra_specs', 'volume_type_id'), - ('volume_types', 'qos_specs_id'), - ('volumes', 'consistencygroup_id'), - ('volumes', 'group_id'), - ('workers', 'service_id'), -) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/101_fix_replication_status_default_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/101_fix_replication_status_default_upgrade.sql deleted file mode 100644 index 503beb41a..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/101_fix_replication_status_default_upgrade.sql +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2016 Red Hat, Inc. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. -*/ - -/* Fix replication_status field in volumes table. - - There are some drivers that did not update the replication_status field on - the volumes on creation and since the scheduler was not updating them on - creation there is an inconsistency between the database and the storage - device backend. - - Some of the drivers that have been detected to be missing this are: - - kaminario - - pure - - solidfire - - This migration will fix this updating the volume_status field based on the - volume type's replication status. -*/ - -UPDATE volumes -SET replication_status='enabled' -WHERE (not volumes.deleted or volumes.deleted IS NULL) - AND volumes.replication_status='disabled' - AND EXISTS( - SELECT * - FROM volume_type_extra_specs - WHERE volumes.volume_type_id=volume_type_extra_specs.volume_type_id - AND (volume_type_extra_specs.deleted IS NULL - OR not volume_type_extra_specs.deleted) - AND volume_type_extra_specs.key='replication_enabled' - AND volume_type_extra_specs.value=' True' - ); diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py deleted file mode 100644 index 08f367dda..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData, String, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - # Add replication_status column to groups table - table = Table('groups', meta, autoload=True) - if not hasattr(table.c, 'replication_status'): - new_column = Column('replication_status', String(255), nullable=True) - table.create_column(new_column) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/103_message_action_detail_column.py b/cinder/db/sqlalchemy/migrate_repo/versions/103_message_action_detail_column.py deleted file mode 100644 index 968cbb5fa..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/103_message_action_detail_column.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, String, MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData(migrate_engine) - - messages = Table('messages', meta, autoload=True) - detail_id = Column('detail_id', String(10), nullable=True) - action_id = Column('action_id', String(10), nullable=True) - messages.create_column(detail_id) - messages.create_column(action_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/104_change_size_of_project_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/104_change_size_of_project_id.py deleted file mode 100644 index f954badc2..000000000 --- a/cinder/db/sqlalchemy/migrate_repo/versions/104_change_size_of_project_id.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, String, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - messages = Table('messages', meta, autoload=True) - messages.c.project_id.alter(type=String(255)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py b/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py deleted file mode 100644 index 854aa8f3c..000000000 --- a/cinder/db/sqlalchemy/models.py +++ /dev/null @@ -1,905 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Piston Cloud Computing, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for cinder data. -""" - -from oslo_config import cfg -from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import ndb -from oslo_utils import timeutils -from sqlalchemy import and_, func, select -from sqlalchemy import bindparam -from sqlalchemy import Column, Integer, String, Text, schema -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey, DateTime, Boolean, UniqueConstraint -from sqlalchemy.orm import backref, column_property, relationship, validates - - -CONF = cfg.CONF -BASE = declarative_base() - - -class CinderBase(models.TimestampMixin, - models.ModelBase): - """Base class for Cinder Models.""" - - __table_args__ = {'mysql_engine': 'InnoDB'} - - # TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage - # of implementing of BP db-cleanup - deleted_at = Column(DateTime) - deleted = Column(Boolean, default=False) - metadata = None - - @staticmethod - def delete_values(): - return {'deleted': True, - 'deleted_at': timeutils.utcnow()} - - def delete(self, session): - """Delete this object.""" - updated_values = self.delete_values() - self.update(updated_values) - self.save(session=session) - return updated_values - - -class Service(BASE, CinderBase): - """Represents a running service on a host.""" - - __tablename__ = 'services' - id = Column(Integer, primary_key=True) - cluster_name = Column(String(255), nullable=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) - binary = Column(String(255)) - # We want to overwrite default updated_at definition so we timestamp at - # creation as well, so we only need to check updated_at for the heartbeat - updated_at = Column(DateTime, default=timeutils.utcnow, - onupdate=timeutils.utcnow) - topic = Column(String(255)) - report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) - availability_zone = Column(String(255), default='cinder') - disabled_reason = Column(String(255)) - # adding column modified_at to contain timestamp - # for manual enable/disable of cinder services - # updated_at column will now contain timestamps for - # periodic updates - modified_at = Column(DateTime) - - # Version columns to support rolling upgrade. These report the max RPC API - # and objects versions that the manager of the service is able to support. - rpc_current_version = Column(String(36)) - object_current_version = Column(String(36)) - - # replication_status can be: enabled, disabled, not-capable, error, - # failed-over or not-configured - replication_status = Column(String(36), default="not-capable") - active_backend_id = Column(String(255)) - frozen = Column(Boolean, nullable=False, default=False) - - cluster = relationship('Cluster', - backref='services', - foreign_keys=cluster_name, - primaryjoin='and_(' - 'Service.cluster_name == Cluster.name,' - 'Service.deleted == False)') - - -class Cluster(BASE, CinderBase): - """Represents a cluster of hosts.""" - __tablename__ = 'clusters' - # To remove potential races on creation we have a constraint set on name - # and race_preventer fields, and we set value on creation to 0, so 2 - # clusters with the same name will fail this constraint. On deletion we - # change this field to the same value as the id which will be unique and - # will not conflict with the creation of another cluster with the same - # name. - __table_args__ = (UniqueConstraint('name', 'binary', 'race_preventer'),) - - id = Column(Integer, primary_key=True) - # NOTE(geguileo): Name is constructed in the same way that Server.host but - # using cluster configuration option instead of host. - name = Column(String(255), nullable=False) - binary = Column(String(255), nullable=False) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) - race_preventer = Column(Integer, nullable=False, default=0) - - replication_status = Column(String(36), default="not-capable") - active_backend_id = Column(String(255)) - frozen = Column(Boolean, nullable=False, default=False) - - # Last heartbeat reported by any of the services of this cluster. This is - # not deferred since we always want to load this field. - last_heartbeat = column_property( - select([func.max(Service.updated_at)]). - where(and_(Service.cluster_name == name, ~Service.deleted)). - correlate_except(Service), deferred=False) - - # Number of existing services for this cluster - num_hosts = column_property( - select([func.count(Service.id)]). - where(and_(Service.cluster_name == name, ~Service.deleted)). - correlate_except(Service), - group='services_summary', deferred=True) - - # Number of services that are down for this cluster - num_down_hosts = column_property( - select([func.count(Service.id)]). - where(and_(Service.cluster_name == name, - ~Service.deleted, - Service.updated_at < bindparam('expired'))). - correlate_except(Service), - group='services_summary', deferred=True) - - @staticmethod - def delete_values(): - return {'race_preventer': Cluster.id, - 'deleted': True, - 'deleted_at': timeutils.utcnow()} - - -class ConsistencyGroup(BASE, CinderBase): - """Represents a consistencygroup.""" - __tablename__ = 'consistencygroups' - id = Column(String(36), primary_key=True) - - user_id = Column(String(255), nullable=False) - project_id = Column(String(255), nullable=False) - - cluster_name = Column(String(255), nullable=True) - host = Column(String(255)) - availability_zone = Column(String(255)) - name = Column(String(255)) - description = Column(String(255)) - volume_type_id = Column(String(255)) - status = Column(String(255)) - cgsnapshot_id = Column(String(36)) - source_cgid = Column(String(36)) - - -class Group(BASE, CinderBase): - """Represents a generic volume group.""" - __tablename__ = 'groups' - id = Column(String(36), primary_key=True) - - user_id = Column(String(255), nullable=False) - project_id = Column(String(255), nullable=False) - - cluster_name = Column(String(255)) - host = Column(String(255)) - availability_zone = Column(String(255)) - name = Column(String(255)) - description = Column(String(255)) - status = Column(String(255)) - group_type_id = Column(String(36)) - group_snapshot_id = Column(String(36)) - source_group_id = Column(String(36)) - - replication_status = Column(String(255)) - - -class Cgsnapshot(BASE, CinderBase): - """Represents a cgsnapshot.""" - __tablename__ = 'cgsnapshots' - id = Column(String(36), primary_key=True) - - consistencygroup_id = Column(String(36)) - user_id = Column(String(255), nullable=False) - project_id = Column(String(255), nullable=False) - - name = Column(String(255)) - description = Column(String(255)) - status = Column(String(255)) - - consistencygroup = relationship( - ConsistencyGroup, - backref="cgsnapshots", - foreign_keys=consistencygroup_id, - primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id') - - -class GroupSnapshot(BASE, CinderBase): - """Represents a group snapshot.""" - __tablename__ = 'group_snapshots' - id = Column(String(36), primary_key=True) - - group_id = Column(String(36), nullable=False) - user_id = Column(String(255)) - project_id = Column(String(255)) - - name = Column(String(255)) - description = Column(String(255)) - status = Column(String(255)) - group_type_id = Column(String(36)) - - group = relationship( - Group, - backref="group_snapshots", - foreign_keys=group_id, - primaryjoin='GroupSnapshot.group_id == Group.id') - - -class Volume(BASE, CinderBase): - """Represents a block storage device that can be attached to a vm.""" - __tablename__ = 'volumes' - id = Column(String(36), primary_key=True) - _name_id = Column(String(36)) # Don't access/modify this directly! - - @property - def name_id(self): - return self.id if not self._name_id else self._name_id - - @name_id.setter - def name_id(self, value): - self._name_id = value - - @property - def name(self): - return CONF.volume_name_template % self.name_id - - ec2_id = Column(Integer) - user_id = Column(String(255)) - project_id = Column(String(255)) - - snapshot_id = Column(String(36)) - - cluster_name = Column(String(255), nullable=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) - size = Column(Integer) - availability_zone = Column(ndb.AutoStringTinyText(255)) # TODO(vish):fkey? - status = Column(ndb.AutoStringSize(255, 64)) # TODO(vish): enum? - attach_status = Column(ndb.AutoStringSize(255, 64)) # TODO(vish): enum - migration_status = Column(ndb.AutoStringSize(255, 64)) - - scheduled_at = Column(DateTime) - launched_at = Column(DateTime) - terminated_at = Column(DateTime) - - display_name = Column(String(255)) - display_description = Column(ndb.AutoStringTinyText(255)) - - provider_location = Column(String(255)) - provider_auth = Column(String(255)) - provider_geometry = Column(String(255)) - provider_id = Column(String(255)) - - volume_type_id = Column(String(36)) - source_volid = Column(String(36)) - encryption_key_id = Column(String(36)) - - consistencygroup_id = Column(String(36)) - group_id = Column(String(36)) - - bootable = Column(Boolean, default=False) - multiattach = Column(Boolean, default=False) - - replication_status = Column(ndb.AutoStringSize(255, 64)) - replication_extended_status = Column(ndb.AutoStringTinyText(255)) - replication_driver_data = Column(ndb.AutoStringTinyText(255)) - - previous_status = Column(String(255)) - - consistencygroup = relationship( - ConsistencyGroup, - backref="volumes", - foreign_keys=consistencygroup_id, - primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id') - - group = relationship( - Group, - backref="volumes", - foreign_keys=group_id, - primaryjoin='Volume.group_id == Group.id') - - -class VolumeMetadata(BASE, CinderBase): - """Represents a metadata key/value pair for a volume.""" - __tablename__ = 'volume_metadata' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) - volume = relationship(Volume, backref="volume_metadata", - foreign_keys=volume_id, - primaryjoin='and_(' - 'VolumeMetadata.volume_id == Volume.id,' - 'VolumeMetadata.deleted == False)') - - -class VolumeAdminMetadata(BASE, CinderBase): - """Represents an administrator metadata key/value pair for a volume.""" - __tablename__ = 'volume_admin_metadata' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) - volume = relationship(Volume, backref="volume_admin_metadata", - foreign_keys=volume_id, - primaryjoin='and_(' - 'VolumeAdminMetadata.volume_id == Volume.id,' - 'VolumeAdminMetadata.deleted == False)') - - -class VolumeAttachment(BASE, CinderBase): - """Represents a volume attachment for a vm.""" - __tablename__ = 'volume_attachment' - id = Column(String(36), primary_key=True) - - volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) - volume = relationship(Volume, backref="volume_attachment", - foreign_keys=volume_id, - primaryjoin='and_(' - 'VolumeAttachment.volume_id == Volume.id,' - 'VolumeAttachment.deleted == False)') - instance_uuid = Column(String(36)) - attached_host = Column(String(255)) - mountpoint = Column(String(255)) - attach_time = Column(DateTime) - detach_time = Column(DateTime) - attach_status = Column(String(255)) - attach_mode = Column(String(255)) - connection_info = Column(Text) - - -class VolumeTypes(BASE, CinderBase): - """Represent possible volume_types of volumes offered.""" - __tablename__ = "volume_types" - id = Column(String(36), primary_key=True) - name = Column(String(255)) - description = Column(String(255)) - # A reference to qos_specs entity - qos_specs_id = Column(String(36), - ForeignKey('quality_of_service_specs.id')) - is_public = Column(Boolean, default=True) - volumes = relationship(Volume, - backref=backref('volume_type', uselist=False), - foreign_keys=id, - primaryjoin='and_(' - 'Volume.volume_type_id == VolumeTypes.id, ' - 'VolumeTypes.deleted == False)') - - -class GroupTypes(BASE, CinderBase): - """Represent possible group_types of groups offered.""" - __tablename__ = "group_types" - id = Column(String(36), primary_key=True) - name = Column(String(255)) - description = Column(String(255)) - is_public = Column(Boolean, default=True) - groups = relationship(Group, - backref=backref('group_type', uselist=False), - foreign_keys=id, - primaryjoin='and_(' - 'Group.group_type_id == GroupTypes.id, ' - 'GroupTypes.deleted == False)') - - -class GroupVolumeTypeMapping(BASE, CinderBase): - """Represent mapping between groups and volume_types.""" - __tablename__ = "group_volume_type_mapping" - id = Column(Integer, primary_key=True, nullable=False) - volume_type_id = Column(String(36), - ForeignKey('volume_types.id'), - nullable=False) - group_id = Column(String(36), - ForeignKey('groups.id'), - nullable=False) - - group = relationship( - Group, - backref="volume_types", - foreign_keys=group_id, - primaryjoin='and_(' - 'GroupVolumeTypeMapping.group_id == Group.id,' - 'GroupVolumeTypeMapping.deleted == False)' - ) - - -class VolumeTypeProjects(BASE, CinderBase): - """Represent projects associated volume_types.""" - __tablename__ = "volume_type_projects" - __table_args__ = (schema.UniqueConstraint( - "volume_type_id", "project_id", "deleted", - name="uniq_volume_type_projects0volume_type_id0project_id0deleted"), - ) - id = Column(Integer, primary_key=True) - volume_type_id = Column(String, ForeignKey('volume_types.id'), - nullable=False) - project_id = Column(String(255)) - deleted = Column(Integer, default=0) - - volume_type = relationship( - VolumeTypes, - backref="projects", - foreign_keys=volume_type_id, - primaryjoin='and_(' - 'VolumeTypeProjects.volume_type_id == VolumeTypes.id,' - 'VolumeTypeProjects.deleted == 0)') - - -class GroupTypeProjects(BASE, CinderBase): - """Represent projects associated group_types.""" - __tablename__ = "group_type_projects" - __table_args__ = (schema.UniqueConstraint( - "group_type_id", "project_id", "deleted", - name="uniq_group_type_projects0group_type_id0project_id0deleted"), - ) - id = Column(Integer, primary_key=True) - group_type_id = Column(String, ForeignKey('group_types.id'), - nullable=False) - project_id = Column(String(255)) - deleted = Column(Integer, default=0) - - group_type = relationship( - GroupTypes, - backref="projects", - foreign_keys=group_type_id, - primaryjoin='and_(' - 'GroupTypeProjects.group_type_id == GroupTypes.id,' - 'GroupTypeProjects.deleted == 0)') - - -class VolumeTypeExtraSpecs(BASE, CinderBase): - """Represents additional specs as key/value pairs for a volume_type.""" - __tablename__ = 'volume_type_extra_specs' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - volume_type_id = Column(String(36), - ForeignKey('volume_types.id'), - nullable=False) - volume_type = relationship( - VolumeTypes, - backref="extra_specs", - foreign_keys=volume_type_id, - primaryjoin='and_(' - 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' - 'VolumeTypeExtraSpecs.deleted == False)' - ) - - -class GroupTypeSpecs(BASE, CinderBase): - """Represents additional specs as key/value pairs for a group_type.""" - __tablename__ = 'group_type_specs' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - group_type_id = Column(String(36), - ForeignKey('group_types.id'), - nullable=False) - group_type = relationship( - GroupTypes, - backref="group_specs", - foreign_keys=group_type_id, - primaryjoin='and_(' - 'GroupTypeSpecs.group_type_id == GroupTypes.id,' - 'GroupTypeSpecs.deleted == False)' - ) - - -class QualityOfServiceSpecs(BASE, CinderBase): - """Represents QoS specs as key/value pairs. - - QoS specs is standalone entity that can be associated/disassociated - with volume types (one to many relation). Adjacency list relationship - pattern is used in this model in order to represent following hierarchical - data with in flat table, e.g, following structure: - - .. code-block:: none - - qos-specs-1 'Rate-Limit' - | - +------> consumer = 'front-end' - +------> total_bytes_sec = 1048576 - +------> total_iops_sec = 500 - - qos-specs-2 'QoS_Level1' - | - +------> consumer = 'back-end' - +------> max-iops = 1000 - +------> min-iops = 200 - - is represented by: - - id specs_id key value - ------ -------- ------------- ----- - UUID-1 NULL QoSSpec_Name Rate-Limit - UUID-2 UUID-1 consumer front-end - UUID-3 UUID-1 total_bytes_sec 1048576 - UUID-4 UUID-1 total_iops_sec 500 - UUID-5 NULL QoSSpec_Name QoS_Level1 - UUID-6 UUID-5 consumer back-end - UUID-7 UUID-5 max-iops 1000 - UUID-8 UUID-5 min-iops 200 - """ - __tablename__ = 'quality_of_service_specs' - id = Column(String(36), primary_key=True) - specs_id = Column(String(36), ForeignKey(id)) - key = Column(String(255)) - value = Column(String(255)) - - specs = relationship( - "QualityOfServiceSpecs", - cascade="all, delete-orphan", - backref=backref("qos_spec", remote_side=id), - ) - - vol_types = relationship( - VolumeTypes, - backref=backref('qos_specs'), - foreign_keys=id, - primaryjoin='and_(' - 'or_(VolumeTypes.qos_specs_id == ' - 'QualityOfServiceSpecs.id,' - 'VolumeTypes.qos_specs_id == ' - 'QualityOfServiceSpecs.specs_id),' - 'QualityOfServiceSpecs.deleted == False)') - - -class VolumeGlanceMetadata(BASE, CinderBase): - """Glance metadata for a bootable volume.""" - __tablename__ = 'volume_glance_metadata' - id = Column(Integer, primary_key=True, nullable=False) - volume_id = Column(String(36), ForeignKey('volumes.id')) - snapshot_id = Column(String(36), ForeignKey('snapshots.id')) - key = Column(String(255)) - value = Column(Text) - volume = relationship(Volume, backref="volume_glance_metadata", - foreign_keys=volume_id, - primaryjoin='and_(' - 'VolumeGlanceMetadata.volume_id == Volume.id,' - 'VolumeGlanceMetadata.deleted == False)') - - -class Quota(BASE, CinderBase): - """Represents a single quota override for a project. - - If there is no row for a given project id and resource, then the - default for the quota class is used. If there is no row for a - given quota class and resource, then the default for the - deployment is used. If the row is present but the hard limit is - Null, then the resource is unlimited. - """ - - __tablename__ = 'quotas' - id = Column(Integer, primary_key=True) - - project_id = Column(String(255), index=True) - - resource = Column(String(255)) - hard_limit = Column(Integer, nullable=True) - allocated = Column(Integer, default=0) - - -class QuotaClass(BASE, CinderBase): - """Represents a single quota override for a quota class. - - If there is no row for a given quota class and resource, then the - default for the deployment is used. If the row is present but the - hard limit is Null, then the resource is unlimited. - """ - - __tablename__ = 'quota_classes' - id = Column(Integer, primary_key=True) - - class_name = Column(String(255), index=True) - - resource = Column(String(255)) - hard_limit = Column(Integer, nullable=True) - - -class QuotaUsage(BASE, CinderBase): - """Represents the current usage for a given resource.""" - - __tablename__ = 'quota_usages' - id = Column(Integer, primary_key=True) - - project_id = Column(String(255), index=True) - resource = Column(String(255)) - - in_use = Column(Integer) - reserved = Column(Integer) - - @property - def total(self): - return self.in_use + self.reserved - - until_refresh = Column(Integer, nullable=True) - - -class Reservation(BASE, CinderBase): - """Represents a resource reservation for quotas.""" - - __tablename__ = 'reservations' - id = Column(Integer, primary_key=True) - uuid = Column(String(36), nullable=False) - - usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=True) - allocated_id = Column(Integer, ForeignKey('quotas.id'), nullable=True) - - project_id = Column(String(255), index=True) - resource = Column(String(255)) - - delta = Column(Integer) - expire = Column(DateTime, nullable=False) - - usage = relationship( - "QuotaUsage", - foreign_keys=usage_id, - primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' - 'QuotaUsage.deleted == 0)') - quota = relationship( - "Quota", - foreign_keys=allocated_id, - primaryjoin='and_(Reservation.allocated_id == Quota.id)') - - -class Snapshot(BASE, CinderBase): - """Represents a snapshot of volume.""" - __tablename__ = 'snapshots' - id = Column(String(36), primary_key=True) - - @property - def name(self): - return CONF.snapshot_name_template % self.id - - @property - def volume_name(self): - return self.volume.name # pylint: disable=E1101 - - user_id = Column(String(255)) - project_id = Column(String(255)) - - volume_id = Column(String(36)) - cgsnapshot_id = Column(String(36)) - group_snapshot_id = Column(String(36)) - status = Column(String(255)) - progress = Column(String(255)) - volume_size = Column(Integer) - - display_name = Column(String(255)) - display_description = Column(String(255)) - - encryption_key_id = Column(String(36)) - volume_type_id = Column(String(36)) - - provider_location = Column(String(255)) - provider_id = Column(String(255)) - provider_auth = Column(String(255)) - - volume = relationship(Volume, backref="snapshots", - foreign_keys=volume_id, - primaryjoin='Snapshot.volume_id == Volume.id') - - cgsnapshot = relationship( - Cgsnapshot, - backref="snapshots", - foreign_keys=cgsnapshot_id, - primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id') - - group_snapshot = relationship( - GroupSnapshot, - backref="snapshots", - foreign_keys=group_snapshot_id, - primaryjoin='Snapshot.group_snapshot_id == GroupSnapshot.id') - - -class SnapshotMetadata(BASE, CinderBase): - """Represents a metadata key/value pair for a snapshot.""" - __tablename__ = 'snapshot_metadata' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - snapshot_id = Column(String(36), - ForeignKey('snapshots.id'), - nullable=False) - snapshot = relationship(Snapshot, backref="snapshot_metadata", - foreign_keys=snapshot_id, - primaryjoin='and_(' - 'SnapshotMetadata.snapshot_id == Snapshot.id,' - 'SnapshotMetadata.deleted == False)') - - -class Backup(BASE, CinderBase): - """Represents a backup of a volume to Swift.""" - __tablename__ = 'backups' - id = Column(String(36), primary_key=True) - - @property - def name(self): - return CONF.backup_name_template % self.id - - user_id = Column(String(255), nullable=False) - project_id = Column(String(255), nullable=False) - - volume_id = Column(String(36), nullable=False) - host = Column(String(255)) - availability_zone = Column(String(255)) - display_name = Column(String(255)) - display_description = Column(String(255)) - container = Column(String(255)) - parent_id = Column(String(36)) - status = Column(String(255)) - fail_reason = Column(String(255)) - service_metadata = Column(String(255)) - service = Column(String(255)) - size = Column(Integer) - object_count = Column(Integer) - temp_volume_id = Column(String(36)) - temp_snapshot_id = Column(String(36)) - num_dependent_backups = Column(Integer) - snapshot_id = Column(String(36)) - data_timestamp = Column(DateTime) - restore_volume_id = Column(String(36)) - - @validates('fail_reason') - def validate_fail_reason(self, key, fail_reason): - return fail_reason and fail_reason[:255] or '' - - -class Encryption(BASE, CinderBase): - """Represents encryption requirement for a volume type. - - Encryption here is a set of performance characteristics describing - cipher, provider, and key_size for a certain volume type. - """ - - __tablename__ = 'encryption' - encryption_id = Column(String(36), primary_key=True) - cipher = Column(String(255)) - key_size = Column(Integer) - provider = Column(String(255)) - control_location = Column(String(255)) - volume_type_id = Column(String(36), ForeignKey('volume_types.id')) - volume_type = relationship( - VolumeTypes, - backref="encryption", - foreign_keys=volume_type_id, - primaryjoin='and_(' - 'Encryption.volume_type_id == VolumeTypes.id,' - 'Encryption.deleted == False)' - ) - - -class Transfer(BASE, CinderBase): - """Represents a volume transfer request.""" - __tablename__ = 'transfers' - id = Column(String(36), primary_key=True) - volume_id = Column(String(36), ForeignKey('volumes.id')) - display_name = Column(String(255)) - salt = Column(String(255)) - crypt_hash = Column(String(255)) - expires_at = Column(DateTime) - volume = relationship(Volume, backref="transfer", - foreign_keys=volume_id, - primaryjoin='and_(' - 'Transfer.volume_id == Volume.id,' - 'Transfer.deleted == False)') - - -class DriverInitiatorData(BASE, models.TimestampMixin, models.ModelBase): - """Represents private key-value pair specific an initiator for drivers""" - __tablename__ = 'driver_initiator_data' - __table_args__ = ( - schema.UniqueConstraint("initiator", "namespace", "key"), - {'mysql_engine': 'InnoDB'} - ) - id = Column(Integer, primary_key=True, nullable=False) - initiator = Column(String(255), index=True, nullable=False) - namespace = Column(String(255), nullable=False) - key = Column(String(255), nullable=False) - value = Column(String(255)) - - -class Message(BASE, CinderBase): - """Represents a message""" - __tablename__ = 'messages' - id = Column(String(36), primary_key=True, nullable=False) - project_id = Column(String(255), nullable=False) - # Info/Error/Warning. - message_level = Column(String(255), nullable=False) - request_id = Column(String(255), nullable=True) - resource_type = Column(String(255)) - # The UUID of the related resource. - resource_uuid = Column(String(36), nullable=True) - # Operation specific event ID. - event_id = Column(String(255), nullable=False) - # Message detail ID. - detail_id = Column(String(10), nullable=True) - # Operation specific action. - action_id = Column(String(10), nullable=True) - # After this time the message may no longer exist - expires_at = Column(DateTime, nullable=True) - - -class ImageVolumeCacheEntry(BASE, models.ModelBase): - """Represents an image volume cache entry""" - __tablename__ = 'image_volume_cache_entries' - id = Column(Integer, primary_key=True, nullable=False) - host = Column(String(255), index=True, nullable=False) - cluster_name = Column(String(255), nullable=True) - image_id = Column(String(36), index=True, nullable=False) - image_updated_at = Column(DateTime, nullable=False) - volume_id = Column(String(36), nullable=False) - size = Column(Integer, nullable=False) - last_used = Column(DateTime, default=lambda: timeutils.utcnow()) - - -class Worker(BASE, CinderBase): - """Represents all resources that are being worked on by a node.""" - __tablename__ = 'workers' - __table_args__ = (schema.UniqueConstraint('resource_type', 'resource_id'), - {'mysql_engine': 'InnoDB'}) - - # We want to overwrite default updated_at definition so we timestamp at - # creation as well - updated_at = Column(DateTime, default=timeutils.utcnow, - onupdate=timeutils.utcnow) - - # Id added for convenience and speed on some operations - id = Column(Integer, primary_key=True, autoincrement=True) - - # Type of the resource we are working on (Volume, Snapshot, Backup) it must - # match the Versioned Object class name. - resource_type = Column(String(40), primary_key=True, nullable=False) - # UUID of the resource we are working on - resource_id = Column(String(36), primary_key=True, nullable=False) - - # Status that should be cleaned on service failure - status = Column(String(255), nullable=False) - - # Service that is currently processing the operation - service_id = Column(Integer, nullable=True) - - # To prevent claiming and updating races - race_preventer = Column(Integer, nullable=False, default=0) - - # This is a flag we don't need to store in the DB as it is only used when - # we are doing the cleanup to let decorators know - cleaning = False - - service = relationship( - 'Service', - backref="workers", - foreign_keys=service_id, - primaryjoin='Worker.service_id == Service.id') - - -class AttachmentSpecs(BASE, CinderBase): - """Represents attachment specs as k/v pairs for a volume_attachment.""" - - __tablename__ = 'attachment_specs' - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - attachment_id = ( - Column(String(36), - ForeignKey('volume_attachment.id'), - nullable=False)) - volume_attachment = relationship( - VolumeAttachment, - backref="attachment_specs", - foreign_keys=attachment_id, - primaryjoin='and_(' - 'AttachmentSpecs.attachment_id == VolumeAttachment.id,' - 'AttachmentSpecs.deleted == False)' - ) diff --git a/cinder/exception.py b/cinder/exception.py deleted file mode 100644 index 66fb30d49..000000000 --- a/cinder/exception.py +++ /dev/null @@ -1,1428 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cinder base exception handling. - -Includes decorator for re-raising Cinder-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_versionedobjects import exception as obj_exc -import six -import webob.exc -from webob.util import status_generic_reasons -from webob.util import status_reasons - -from cinder.i18n import _ - - -LOG = logging.getLogger(__name__) - -exc_log_opts = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal.'), -] - -CONF = cfg.CONF -CONF.register_opts(exc_log_opts) - - -class ConvertedException(webob.exc.WSGIHTTPException): - def __init__(self, code=500, title="", explanation=""): - self.code = code - # There is a strict rule about constructing status line for HTTP: - # '...Status-Line, consisting of the protocol version followed by a - # numeric status code and its associated textual phrase, with each - # element separated by SP characters' - # (http://www.faqs.org/rfcs/rfc2616.html) - # 'code' and 'title' can not be empty because they correspond - # to numeric status code and its associated text - if title: - self.title = title - else: - try: - self.title = status_reasons[self.code] - except KeyError: - generic_code = self.code // 100 - self.title = status_generic_reasons[generic_code] - self.explanation = explanation - super(ConvertedException, self).__init__() - - -class Error(Exception): - pass - - -class CinderException(Exception): - """Base Cinder Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - self.kwargs['message'] = message - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - for k, v in self.kwargs.items(): - if isinstance(v, Exception): - # NOTE(tommylikehu): If this is a cinder exception it will - # return the msg object, so we won't be preventing - # translations. - self.kwargs[k] = six.text_type(v) - - if self._should_format(): - try: - message = self.message % kwargs - - except Exception: - exc_info = sys.exc_info() - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%(name)s: %(value)s", - {'name': name, 'value': value}) - if CONF.fatal_exception_format_errors: - six.reraise(*exc_info) - # at least get the core message out if something happened - message = self.message - elif isinstance(message, Exception): - # NOTE(tommylikehu): If this is a cinder exception it will - # return the msg object, so we won't be preventing - # translations. - message = six.text_type(message) - - # NOTE(luisg): We put the actual message in 'msg' so that we can access - # it, because if we try to access the message via 'message' it will be - # overshadowed by the class' message attribute - self.msg = message - super(CinderException, self).__init__(message) - - def _should_format(self): - return self.kwargs['message'] is None or '%(message)' in self.message - - # NOTE(tommylikehu): self.msg is already an unicode compatible object - # as the __init__ method ensures of it, and we should not be modifying - # it in any way with str(), unicode(), or six.text_type() as we would - # be preventing translations from happening. - def __unicode__(self): - return self.msg - - -class VolumeBackendAPIException(CinderException): - message = _("Bad or unexpected response from the storage volume " - "backend API: %(data)s") - - -class VolumeDriverException(CinderException): - message = _("Volume driver reported an error: %(message)s") - - -class BackupDriverException(CinderException): - message = _("Backup driver reported an error: %(message)s") - - -class GlanceConnectionFailed(CinderException): - message = _("Connection to glance failed: %(reason)s") - - -class ProgrammingError(CinderException): - message = _('Programming error in Cinder: %(reason)s') - - -class NotAuthorized(CinderException): - message = _("Not authorized.") - code = 403 - - -class AdminRequired(NotAuthorized): - message = _("User does not have admin privileges") - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class ImageNotAuthorized(CinderException): - message = _("Not authorized for image %(image_id)s.") - - -class DriverNotInitialized(CinderException): - message = _("Volume driver not ready.") - - -class Invalid(CinderException): - message = _("Unacceptable parameters.") - code = 400 - - -class InvalidSnapshot(Invalid): - message = _("Invalid snapshot: %(reason)s") - - -class InvalidVolumeAttachMode(Invalid): - message = _("Invalid attaching mode '%(mode)s' for " - "volume %(volume_id)s.") - - -class VolumeAttached(Invalid): - message = _("Volume %(volume_id)s is still attached, detach volume first.") - - -class InvalidResults(Invalid): - message = _("The results are invalid.") - - -class InvalidInput(Invalid): - message = _("Invalid input received: %(reason)s") - - -class InvalidVolumeType(Invalid): - message = _("Invalid volume type: %(reason)s") - - -class InvalidGroupType(Invalid): - message = _("Invalid group type: %(reason)s") - - -class InvalidVolume(Invalid): - message = _("Invalid volume: %(reason)s") - - -class InvalidContentType(Invalid): - message = _("Invalid content type %(content_type)s.") - - -class InvalidHost(Invalid): - message = _("Invalid host: %(reason)s") - - -# Cannot be templated as the error syntax varies. -# msg needs to be constructed when raised. -class InvalidParameterValue(Invalid): - message = "%(err)s" - - -class InvalidAuthKey(Invalid): - message = _("Invalid auth key: %(reason)s") - - -class InvalidConfigurationValue(Invalid): - message = _('Value "%(value)s" is not valid for ' - 'configuration option "%(option)s"') - - -class ServiceUnavailable(Invalid): - message = _("Service is unavailable at this time.") - - -class UnavailableDuringUpgrade(Invalid): - message = _('Cannot perform %(action)s during system upgrade.') - - -class ImageUnacceptable(Invalid): - message = _("Image %(image_id)s is unacceptable: %(reason)s") - - -class ImageTooBig(Invalid): - message = _("Image %(image_id)s size exceeded available " - "disk space: %(reason)s") - - -class DeviceUnavailable(Invalid): - message = _("The device in the path %(path)s is unavailable: %(reason)s") - - -class SnapshotUnavailable(VolumeBackendAPIException): - message = _("The snapshot is unavailable: %(data)s") - - -class InvalidUUID(Invalid): - message = _("Expected a UUID but received %(uuid)s.") - - -class InvalidAPIVersionString(Invalid): - message = _("API Version String %(version)s is of invalid format. Must " - "be of format MajorNum.MinorNum.") - - -class VersionNotFoundForAPIMethod(Invalid): - message = _("API version %(version)s is not supported on this method.") - - -class InvalidGlobalAPIVersion(Invalid): - message = _("Version %(req_ver)s is not supported by the API. Minimum " - "is %(min_ver)s and maximum is %(max_ver)s.") - - -class MissingRequired(Invalid): - message = _("Missing required element '%(element)s' in request body.") - - -class ValidationError(Invalid): - message = "%(detail)s" - - -class APIException(CinderException): - message = _("Error while requesting %(service)s API.") - - def __init__(self, message=None, **kwargs): - if 'service' not in kwargs: - kwargs['service'] = 'unknown' - super(APIException, self).__init__(message, **kwargs) - - -class APITimeout(APIException): - message = _("Timeout while requesting %(service)s API.") - - -class RPCTimeout(CinderException): - message = _("Timeout while requesting capabilities from backend " - "%(service)s.") - code = 502 - - -class Duplicate(CinderException): - pass - - -class NotFound(CinderException): - message = _("Resource could not be found.") - code = 404 - safe = True - - -class VolumeNotFound(NotFound): - message = _("Volume %(volume_id)s could not be found.") - - -class MessageNotFound(NotFound): - message = _("Message %(message_id)s could not be found.") - - -class VolumeAttachmentNotFound(NotFound): - message = _("Volume attachment could not be found with " - "filter: %(filter)s .") - - -class VolumeMetadataNotFound(NotFound): - message = _("Volume %(volume_id)s has no metadata with " - "key %(metadata_key)s.") - - -class VolumeAdminMetadataNotFound(NotFound): - message = _("Volume %(volume_id)s has no administration metadata with " - "key %(metadata_key)s.") - - -class InvalidVolumeMetadata(Invalid): - message = _("Invalid metadata: %(reason)s") - - -class InvalidVolumeMetadataSize(Invalid): - message = _("Invalid metadata size: %(reason)s") - - -class SnapshotMetadataNotFound(NotFound): - message = _("Snapshot %(snapshot_id)s has no metadata with " - "key %(metadata_key)s.") - - -class VolumeTypeNotFound(NotFound): - message = _("Volume type %(volume_type_id)s could not be found.") - - -class VolumeTypeNotFoundByName(VolumeTypeNotFound): - message = _("Volume type with name %(volume_type_name)s " - "could not be found.") - - -class VolumeTypeAccessNotFound(NotFound): - message = _("Volume type access not found for %(volume_type_id)s / " - "%(project_id)s combination.") - - -class VolumeTypeExtraSpecsNotFound(NotFound): - message = _("Volume Type %(volume_type_id)s has no extra specs with " - "key %(extra_specs_key)s.") - - -class VolumeTypeInUse(CinderException): - message = _("Volume Type %(volume_type_id)s deletion is not allowed with " - "volumes present with the type.") - - -class GroupTypeNotFound(NotFound): - message = _("Group type %(group_type_id)s could not be found.") - - -class GroupTypeNotFoundByName(GroupTypeNotFound): - message = _("Group type with name %(group_type_name)s " - "could not be found.") - - -class GroupTypeAccessNotFound(NotFound): - message = _("Group type access not found for %(group_type_id)s / " - "%(project_id)s combination.") - - -class GroupTypeSpecsNotFound(NotFound): - message = _("Group Type %(group_type_id)s has no specs with " - "key %(group_specs_key)s.") - - -class GroupTypeInUse(CinderException): - message = _("Group Type %(group_type_id)s deletion is not allowed with " - "groups present with the type.") - - -class SnapshotNotFound(NotFound): - message = _("Snapshot %(snapshot_id)s could not be found.") - - -class ServerNotFound(NotFound): - message = _("Instance %(uuid)s could not be found.") - - -class VolumeSnapshotNotFound(NotFound): - message = _("No snapshots found for volume %(volume_id)s.") - - -class VolumeIsBusy(CinderException): - message = _("deleting volume %(volume_name)s that has snapshot") - - -class SnapshotIsBusy(CinderException): - message = _("deleting snapshot %(snapshot_name)s that has " - "dependent volumes") - - -class ISCSITargetNotFoundForVolume(NotFound): - message = _("No target id found for volume %(volume_id)s.") - - -class InvalidImageRef(Invalid): - message = _("Invalid image href %(image_href)s.") - - -class ImageNotFound(NotFound): - message = _("Image %(image_id)s could not be found.") - - -class ServiceNotFound(NotFound): - - def __init__(self, message=None, **kwargs): - if not message: - if kwargs.get('host', None): - self.message = _("Service %(service_id)s could not be " - "found on host %(host)s.") - else: - self.message = _("Service %(service_id)s could not be found.") - super(ServiceNotFound, self).__init__(message, **kwargs) - - -class ServiceTooOld(Invalid): - message = _("Service is too old to fulfil this request.") - - -class WorkerNotFound(NotFound): - message = _("Worker with %s could not be found.") - - def __init__(self, message=None, **kwargs): - keys_list = ('{0}=%({0})s'.format(key) for key in kwargs) - placeholder = ', '.join(keys_list) - self.message = self.message % placeholder - super(WorkerNotFound, self).__init__(message, **kwargs) - - -class WorkerExists(Duplicate): - message = _("Worker for %(type)s %(id)s already exists.") - - -class CleanableInUse(Invalid): - message = _('%(type)s with id %(id)s is already being cleaned up or ' - 'another host has taken over it.') - - -class ClusterNotFound(NotFound): - message = _('Cluster %(id)s could not be found.') - - -class ClusterHasHosts(Invalid): - message = _("Cluster %(id)s still has hosts.") - - -class ClusterExists(Duplicate): - message = _("Cluster %(name)s already exists.") - - -class HostNotFound(NotFound): - message = _("Host %(host)s could not be found.") - - -class SchedulerHostFilterNotFound(NotFound): - message = _("Scheduler Host Filter %(filter_name)s could not be found.") - - -class SchedulerHostWeigherNotFound(NotFound): - message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") - - -class InvalidReservationExpiration(Invalid): - message = _("Invalid reservation expiration %(expire)s.") - - -class InvalidQuotaValue(Invalid): - message = _("Change would make usage less than 0 for the following " - "resources: %(unders)s") - - -class InvalidNestedQuotaSetup(CinderException): - message = _("Project quotas are not properly setup for nested quotas: " - "%(reason)s.") - - -class QuotaNotFound(NotFound): - message = _("Quota could not be found") - - -class QuotaResourceUnknown(QuotaNotFound): - message = _("Unknown quota resources %(unknown)s.") - - -class ProjectQuotaNotFound(QuotaNotFound): - message = _("Quota for project %(project_id)s could not be found.") - - -class QuotaClassNotFound(QuotaNotFound): - message = _("Quota class %(class_name)s could not be found.") - - -class QuotaUsageNotFound(QuotaNotFound): - message = _("Quota usage for project %(project_id)s could not be found.") - - -class ReservationNotFound(QuotaNotFound): - message = _("Quota reservation %(uuid)s could not be found.") - - -class OverQuota(CinderException): - message = _("Quota exceeded for resources: %(overs)s") - - -class FileNotFound(NotFound): - message = _("File %(file_path)s could not be found.") - - -class VolumeTypeExists(Duplicate): - message = _("Volume Type %(id)s already exists.") - - -class VolumeTypeAccessExists(Duplicate): - message = _("Volume type access for %(volume_type_id)s / " - "%(project_id)s combination already exists.") - - -class VolumeTypeEncryptionExists(Invalid): - message = _("Volume type encryption for type %(type_id)s already exists.") - - -class VolumeTypeEncryptionNotFound(NotFound): - message = _("Volume type encryption for type %(type_id)s does not exist.") - - -class GroupTypeExists(Duplicate): - message = _("Group Type %(id)s already exists.") - - -class GroupTypeAccessExists(Duplicate): - message = _("Group type access for %(group_type_id)s / " - "%(project_id)s combination already exists.") - - -class GroupVolumeTypeMappingExists(Duplicate): - message = _("Group volume type mapping for %(group_id)s / " - "%(volume_type_id)s combination already exists.") - - -class GroupTypeEncryptionExists(Invalid): - message = _("Group type encryption for type %(type_id)s already exists.") - - -class GroupTypeEncryptionNotFound(NotFound): - message = _("Group type encryption for type %(type_id)s does not exist.") - - -class MalformedRequestBody(CinderException): - message = _("Malformed message body: %(reason)s") - - -class ConfigNotFound(NotFound): - message = _("Could not find config at %(path)s") - - -class ParameterNotFound(NotFound): - message = _("Could not find parameter %(param)s") - - -class NoValidBackend(CinderException): - message = _("No valid backend was found. %(reason)s") - - -class NoMoreTargets(CinderException): - """No more available targets.""" - pass - - -class QuotaError(CinderException): - message = _("Quota exceeded: code=%(code)s") - code = 413 - headers = {'Retry-After': '0'} - safe = True - - -class VolumeSizeExceedsAvailableQuota(QuotaError): - message = _("Requested volume or snapshot exceeds allowed %(name)s " - "quota. Requested %(requested)sG, quota is %(quota)sG and " - "%(consumed)sG has been consumed.") - - def __init__(self, message=None, **kwargs): - kwargs.setdefault('name', 'gigabytes') - super(VolumeSizeExceedsAvailableQuota, self).__init__( - message, **kwargs) - - -class VolumeSizeExceedsLimit(QuotaError): - message = _("Requested volume size %(size)dG is larger than " - "maximum allowed limit %(limit)dG.") - - -class VolumeBackupSizeExceedsAvailableQuota(QuotaError): - message = _("Requested backup exceeds allowed Backup gigabytes " - "quota. Requested %(requested)sG, quota is %(quota)sG and " - "%(consumed)sG has been consumed.") - - -class VolumeLimitExceeded(QuotaError): - message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for " - "quota '%(name)s'.") - - def __init__(self, message=None, **kwargs): - kwargs.setdefault('name', 'volumes') - super(VolumeLimitExceeded, self).__init__(message, **kwargs) - - -class SnapshotLimitExceeded(QuotaError): - message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") - - -class UnexpectedOverQuota(QuotaError): - message = _("Unexpected over quota on %(name)s.") - - -class BackupLimitExceeded(QuotaError): - message = _("Maximum number of backups allowed (%(allowed)d) exceeded") - - -class ImageLimitExceeded(QuotaError): - message = _("Image quota exceeded") - - -class DuplicateSfVolumeNames(Duplicate): - message = _("Detected more than one volume with name %(vol_name)s") - - -class VolumeTypeCreateFailed(CinderException): - message = _("Cannot create volume_type with " - "name %(name)s and specs %(extra_specs)s") - - -class VolumeTypeUpdateFailed(CinderException): - message = _("Cannot update volume_type %(id)s") - - -class GroupTypeCreateFailed(CinderException): - message = _("Cannot create group_type with " - "name %(name)s and specs %(group_specs)s") - - -class GroupTypeUpdateFailed(CinderException): - message = _("Cannot update group_type %(id)s") - - -class GroupLimitExceeded(QuotaError): - message = _("Maximum number of groups allowed (%(allowed)d) exceeded") - - -class UnknownCmd(VolumeDriverException): - message = _("Unknown or unsupported command %(cmd)s") - - -class MalformedResponse(VolumeDriverException): - message = _("Malformed response to command %(cmd)s: %(reason)s") - - -class FailedCmdWithDump(VolumeDriverException): - message = _("Operation failed with status=%(status)s. Full dump: %(data)s") - - -class InvalidConnectorException(VolumeDriverException): - message = _("Connector doesn't have required information: %(missing)s") - - -class GlanceMetadataExists(Invalid): - message = _("Glance metadata cannot be updated, key %(key)s" - " exists for volume id %(volume_id)s") - - -class GlanceMetadataNotFound(NotFound): - message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") - - -class ExportFailure(Invalid): - message = _("Failed to export for volume: %(reason)s") - - -class RemoveExportException(VolumeDriverException): - message = _("Failed to remove export for volume %(volume)s: %(reason)s") - - -class MetadataCreateFailure(Invalid): - message = _("Failed to create metadata for volume: %(reason)s") - - -class MetadataUpdateFailure(Invalid): - message = _("Failed to update metadata for volume: %(reason)s") - - -class MetadataCopyFailure(Invalid): - message = _("Failed to copy metadata to volume: %(reason)s") - - -class InvalidMetadataType(Invalid): - message = _("The type of metadata: %(metadata_type)s for volume/snapshot " - "%(id)s is invalid.") - - -class ImageCopyFailure(Invalid): - message = _("Failed to copy image to volume: %(reason)s") - - -class BackupInvalidCephArgs(BackupDriverException): - message = _("Invalid Ceph args provided for backup rbd operation") - - -class BackupOperationError(Invalid): - message = _("An error has occurred during backup operation") - - -class BackupMetadataUnsupportedVersion(BackupDriverException): - message = _("Unsupported backup metadata version requested") - - -class BackupVerifyUnsupportedDriver(BackupDriverException): - message = _("Unsupported backup verify driver") - - -class VolumeMetadataBackupExists(BackupDriverException): - message = _("Metadata backup already exists for this volume") - - -class BackupRBDOperationFailed(BackupDriverException): - message = _("Backup RBD operation failed") - - -class EncryptedBackupOperationFailed(BackupDriverException): - message = _("Backup operation of an encrypted volume failed.") - - -class BackupNotFound(NotFound): - message = _("Backup %(backup_id)s could not be found.") - - -class BackupFailedToGetVolumeBackend(NotFound): - message = _("Failed to identify volume backend.") - - -class InvalidBackup(Invalid): - message = _("Invalid backup: %(reason)s") - - -class SwiftConnectionFailed(BackupDriverException): - message = _("Connection to swift failed: %(reason)s") - - -class TransferNotFound(NotFound): - message = _("Transfer %(transfer_id)s could not be found.") - - -class VolumeMigrationFailed(CinderException): - message = _("Volume migration failed: %(reason)s") - - -class SSHInjectionThreat(CinderException): - message = _("SSH command injection detected: %(command)s") - - -class QoSSpecsExists(Duplicate): - message = _("QoS Specs %(specs_id)s already exists.") - - -class QoSSpecsCreateFailed(CinderException): - message = _("Failed to create qos_specs: " - "%(name)s with specs %(qos_specs)s.") - - -class QoSSpecsUpdateFailed(CinderException): - message = _("Failed to update qos_specs: " - "%(specs_id)s with specs %(qos_specs)s.") - - -class QoSSpecsNotFound(NotFound): - message = _("No such QoS spec %(specs_id)s.") - - -class QoSSpecsAssociateFailed(CinderException): - message = _("Failed to associate qos_specs: " - "%(specs_id)s with type %(type_id)s.") - - -class QoSSpecsDisassociateFailed(CinderException): - message = _("Failed to disassociate qos_specs: " - "%(specs_id)s with type %(type_id)s.") - - -class QoSSpecsKeyNotFound(NotFound): - message = _("QoS spec %(specs_id)s has no spec with " - "key %(specs_key)s.") - - -class InvalidQoSSpecs(Invalid): - message = _("Invalid qos specs: %(reason)s") - - -class QoSSpecsInUse(CinderException): - message = _("QoS Specs %(specs_id)s is still associated with entities.") - - -class KeyManagerError(CinderException): - message = _("key manager error: %(reason)s") - - -class ManageExistingInvalidReference(CinderException): - message = _("Manage existing volume failed due to invalid backend " - "reference %(existing_ref)s: %(reason)s") - - -class ManageExistingAlreadyManaged(CinderException): - message = _("Unable to manage existing volume. " - "Volume %(volume_ref)s already managed.") - - -class InvalidReplicationTarget(Invalid): - message = _("Invalid Replication Target: %(reason)s") - - -class UnableToFailOver(CinderException): - message = _("Unable to failover to replication target: %(reason)s).") - - -class ReplicationError(CinderException): - message = _("Volume %(volume_id)s replication " - "error: %(reason)s") - - -class ReplicationGroupError(CinderException): - message = _("Group %(group_id)s replication " - "error: %(reason)s.") - - -class ReplicationNotFound(NotFound): - message = _("Volume replication for %(volume_id)s " - "could not be found.") - - -class ManageExistingVolumeTypeMismatch(CinderException): - message = _("Manage existing volume failed due to volume type mismatch: " - "%(reason)s") - - -class ExtendVolumeError(CinderException): - message = _("Error extending volume: %(reason)s") - - -class EvaluatorParseException(Exception): - message = _("Error during evaluator parsing: %(reason)s") - - -class LockCreationFailed(CinderException): - message = _('Unable to create lock. Coordination backend not started.') - - -class LockingFailed(CinderException): - message = _('Lock acquisition failed.') - - -UnsupportedObjectError = obj_exc.UnsupportedObjectError -OrphanedObjectError = obj_exc.OrphanedObjectError -IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion -ReadOnlyFieldError = obj_exc.ReadOnlyFieldError -ObjectActionError = obj_exc.ObjectActionError -ObjectFieldInvalid = obj_exc.ObjectFieldInvalid - - -class CappedVersionUnknown(CinderException): - message = _("Unrecoverable Error: Versioned Objects in DB are capped to " - "unknown version %(version)s. Most likely your environment " - "contains only new services and you're trying to start an " - "older one. Use `cinder-manage service list` to check that " - "and upgrade this service.") - - -class VolumeGroupNotFound(CinderException): - message = _('Unable to find Volume Group: %(vg_name)s') - - -class VolumeGroupCreationFailed(CinderException): - message = _('Failed to create Volume Group: %(vg_name)s') - - -class VolumeNotDeactivated(CinderException): - message = _('Volume %(name)s was not deactivated in time.') - - -class VolumeDeviceNotFound(CinderException): - message = _('Volume device not found at %(device)s.') - - -# Driver specific exceptions -# Dell -class DellDriverRetryableException(VolumeBackendAPIException): - message = _("Retryable Dell Exception encountered") - - -class DellDriverUnknownSpec(VolumeDriverException): - message = _("Dell driver failure: %(reason)s") - - -# Pure Storage -class PureDriverException(VolumeDriverException): - message = _("Pure Storage Cinder driver failure: %(reason)s") - - -class PureRetryableException(VolumeBackendAPIException): - message = _("Retryable Pure Storage Exception encountered") - - -# SolidFire -class SolidFireAPIException(VolumeBackendAPIException): - message = _("Bad response from SolidFire API") - - -class SolidFireDriverException(VolumeDriverException): - message = _("SolidFire Cinder Driver exception") - - -class SolidFireAPIDataException(SolidFireAPIException): - message = _("Error in SolidFire API response: data=%(data)s") - - -class SolidFireAccountNotFound(SolidFireDriverException): - message = _("Unable to locate account %(account_name)s on " - "Solidfire device") - - -class SolidFireRetryableException(VolumeBackendAPIException): - message = _("Retryable SolidFire Exception encountered") - - -# HP 3Par -class Invalid3PARDomain(VolumeDriverException): - message = _("Invalid 3PAR Domain: %(err)s") - - -# RemoteFS drivers -class RemoteFSException(VolumeDriverException): - message = _("Unknown RemoteFS exception") - - -class RemoteFSConcurrentRequest(RemoteFSException): - message = _("A concurrent, possibly contradictory, request " - "has been made.") - - -class RemoteFSNoSharesMounted(RemoteFSException): - message = _("No mounted shares found") - - -class RemoteFSNoSuitableShareFound(RemoteFSException): - message = _("There is no share which can host %(volume_size)sG") - - -# NFS driver -class NfsException(RemoteFSException): - message = _("Unknown NFS exception") - - -class NfsNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted NFS shares found") - - -class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG") - - -# Smbfs driver -class SmbfsException(RemoteFSException): - message = _("Unknown SMBFS exception.") - - -class SmbfsNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted SMBFS shares found.") - - -class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG.") - - -# Virtuozzo Storage Driver - -class VzStorageException(RemoteFSException): - message = _("Unknown Virtuozzo Storage exception") - - -class VzStorageNoSharesMounted(RemoteFSNoSharesMounted): - message = _("No mounted Virtuozzo Storage shares found") - - -class VzStorageNoSuitableShareFound(RemoteFSNoSuitableShareFound): - message = _("There is no share which can host %(volume_size)sG") - - -# Fibre Channel Zone Manager -class ZoneManagerException(CinderException): - message = _("Fibre Channel connection control failure: %(reason)s") - - -class FCZoneDriverException(CinderException): - message = _("Fibre Channel Zone operation failed: %(reason)s") - - -class FCSanLookupServiceException(CinderException): - message = _("Fibre Channel SAN Lookup failure: %(reason)s") - - -class ZoneManagerNotInitialized(CinderException): - message = _("Fibre Channel Zone Manager not initialized") - - -class BrocadeZoningCliException(CinderException): - message = _("Brocade Fibre Channel Zoning CLI error: %(reason)s") - - -class BrocadeZoningHttpException(CinderException): - message = _("Brocade Fibre Channel Zoning HTTP error: %(reason)s") - - -class CiscoZoningCliException(CinderException): - message = _("Cisco Fibre Channel Zoning CLI error: %(reason)s") - - -class NetAppDriverException(VolumeDriverException): - message = _("NetApp Cinder Driver exception.") - - -class EMCVnxCLICmdError(VolumeBackendAPIException): - message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s " - "(Return Code: %(rc)s) (Output: %(out)s).") - - -class EMCSPUnavailableException(EMCVnxCLICmdError): - message = _("EMC VNX Cinder Driver SPUnavailableException: %(cmd)s " - "(Return Code: %(rc)s) (Output: %(out)s).") - - -# ConsistencyGroup -class ConsistencyGroupNotFound(NotFound): - message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.") - - -class InvalidConsistencyGroup(Invalid): - message = _("Invalid ConsistencyGroup: %(reason)s") - - -# Group -class GroupNotFound(NotFound): - message = _("Group %(group_id)s could not be found.") - - -class InvalidGroup(Invalid): - message = _("Invalid Group: %(reason)s") - - -class InvalidGroupStatus(Invalid): - message = _("Invalid Group Status: %(reason)s") - - -# CgSnapshot -class CgSnapshotNotFound(NotFound): - message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") - - -class InvalidCgSnapshot(Invalid): - message = _("Invalid CgSnapshot: %(reason)s") - - -# GroupSnapshot -class GroupSnapshotNotFound(NotFound): - message = _("GroupSnapshot %(group_snapshot_id)s could not be found.") - - -class InvalidGroupSnapshot(Invalid): - message = _("Invalid GroupSnapshot: %(reason)s") - - -class InvalidGroupSnapshotStatus(Invalid): - message = _("Invalid GroupSnapshot Status: %(reason)s") - - -# Hitachi Block Storage Driver -class HBSDError(VolumeDriverException): - message = _("HBSD error occurs.") - - -class HBSDCmdError(HBSDError): - - def __init__(self, message=None, ret=None, err=None): - self.ret = ret - self.stderr = err - - super(HBSDCmdError, self).__init__(message=message) - - -class HBSDBusy(HBSDError): - message = "Device or resource is busy." - - -class HBSDNotFound(NotFound): - message = _("Storage resource could not be found.") - - -class HBSDVolumeIsBusy(VolumeIsBusy): - message = _("Volume %(volume_name)s is busy.") - - -# Hitachi VSP Driver -class VSPError(VolumeDriverException): - message = _("VSP error occurred. %(message)s") - - -class VSPBusy(VSPError): - message = _("Device or resource is busy.") - - -class VSPNotSupported(VSPError): - message = _("The function on the storage is not supported.") - - -# Datera driver -class DateraAPIException(VolumeBackendAPIException): - message = _("Bad response from Datera API") - - -# Target drivers -class ISCSITargetCreateFailed(CinderException): - message = _("Failed to create iscsi target for volume %(volume_id)s.") - - -class ISCSITargetRemoveFailed(CinderException): - message = _("Failed to remove iscsi target for volume %(volume_id)s.") - - -class ISCSITargetAttachFailed(CinderException): - message = _("Failed to attach iSCSI target for volume %(volume_id)s.") - - -class ISCSITargetDetachFailed(CinderException): - message = _("Failed to detach iSCSI target for volume %(volume_id)s.") - - -class ISCSITargetHelperCommandFailed(CinderException): - message = "%(error_message)s" - - -# X-IO driver exception. -class XIODriverException(VolumeDriverException): - message = _("X-IO Volume Driver exception!") - - -# Violin Memory drivers -class ViolinInvalidBackendConfig(VolumeDriverException): - message = _("Volume backend config is invalid: %(reason)s") - - -class ViolinRequestRetryTimeout(VolumeDriverException): - message = _("Backend service retry timeout hit: %(timeout)s sec") - - -class ViolinBackendErr(VolumeBackendAPIException): - message = _("Backend reports: %(message)s") - - -class ViolinBackendErrExists(VolumeBackendAPIException): - message = _("Backend reports: item already exists") - - -class ViolinBackendErrNotFound(NotFound): - message = _("Backend reports: item not found") - - -class ViolinResourceNotFound(NotFound): - message = _("Backend reports: %(message)s") - - -class BadHTTPResponseStatus(VolumeDriverException): - message = _("Bad HTTP response status %(status)s") - - -class BadResetResourceStatus(CinderException): - message = _("Bad reset resource status : %(message)s") - - -# ZADARA STORAGE VPSA driver exception -class ZadaraServerCreateFailure(VolumeDriverException): - message = _("Unable to create server object for initiator %(name)s") - - -class ZadaraServerNotFound(NotFound): - message = _("Unable to find server object for initiator %(name)s") - - -class ZadaraVPSANoActiveController(VolumeDriverException): - message = _("Unable to find any active VPSA controller") - - -class ZadaraAttachmentsNotFound(NotFound): - message = _("Failed to retrieve attachments for volume %(name)s") - - -class ZadaraInvalidAttachmentInfo(Invalid): - message = _("Invalid attachment info for volume %(name)s: %(reason)s") - - -class ZadaraVolumeNotFound(VolumeDriverException): - message = "%(reason)s" - - -# ZFSSA NFS driver exception. -class WebDAVClientError(VolumeDriverException): - message = _("The WebDAV request failed. Reason: %(msg)s, " - "Return code/reason: %(code)s, Source Volume: %(src)s, " - "Destination Volume: %(dst)s, Method: %(method)s.") - - -# XtremIO Drivers -class XtremIOAlreadyMappedError(VolumeDriverException): - message = _("Volume to Initiator Group mapping already exists") - - -class XtremIOArrayBusy(VolumeDriverException): - message = _("System is busy, retry operation.") - - -class XtremIOSnapshotsLimitExceeded(VolumeDriverException): - message = _("Exceeded the limit of snapshots per volume") - - -# Infortrend EonStor DS Driver -class InfortrendCliException(VolumeDriverException): - message = _("Infortrend CLI exception: %(err)s Param: %(param)s " - "(Return Code: %(rc)s) (Output: %(out)s)") - - -# DOTHILL drivers -class DotHillInvalidBackend(VolumeDriverException): - message = _("Backend doesn't exist (%(backend)s)") - - -class DotHillConnectionError(VolumeDriverException): - message = "%(message)s" - - -class DotHillAuthenticationError(VolumeDriverException): - message = "%(message)s" - - -class DotHillNotEnoughSpace(VolumeDriverException): - message = _("Not enough space on backend (%(backend)s)") - - -class DotHillRequestError(VolumeDriverException): - message = "%(message)s" - - -class DotHillNotTargetPortal(VolumeDriverException): - message = _("No active iSCSI portals with supplied iSCSI IPs") - - -class DotHillDriverNotSupported(VolumeDriverException): - message = _("The Dot Hill driver is no longer supported.") - - -# Sheepdog -class SheepdogError(VolumeBackendAPIException): - message = _("An error has occurred in SheepdogDriver. " - "(Reason: %(reason)s)") - - -class SheepdogCmdError(SheepdogError): - message = _("(Command: %(cmd)s) " - "(Return Code: %(exit_code)s) " - "(Stdout: %(stdout)s) " - "(Stderr: %(stderr)s)") - - -class MetadataAbsent(CinderException): - message = _("There is no metadata in DB object.") - - -class NotSupportedOperation(Invalid): - message = _("Operation not supported: %(operation)s.") - code = 405 - - -# Hitachi HNAS drivers -class HNASConnError(VolumeDriverException): - message = "%(message)s" - - -# Coho drivers -class CohoException(VolumeDriverException): - message = _("Coho Data Cinder driver failure: %(message)s") - - -# Tegile Storage drivers -class TegileAPIException(VolumeBackendAPIException): - message = _("Unexpected response from Tegile IntelliFlash API") - - -# NexentaStor driver exception -class NexentaException(VolumeDriverException): - message = "%(message)s" - - -# Google Cloud Storage(GCS) backup driver -class GCSConnectionFailure(BackupDriverException): - message = _("Google Cloud Storage connection failure: %(reason)s") - - -class GCSApiFailure(BackupDriverException): - message = _("Google Cloud Storage api failure: %(reason)s") - - -class GCSOAuth2Failure(BackupDriverException): - message = _("Google Cloud Storage oauth2 failure: %(reason)s") - - -# Kaminario K2 -class KaminarioCinderDriverException(VolumeDriverException): - message = _("KaminarioCinderDriver failure: %(reason)s") - - -class KaminarioRetryableException(VolumeDriverException): - message = _("Kaminario retryable exception: %(reason)s") - - -# Synology driver -class SynoAPIHTTPError(VolumeDriverException): - message = _("HTTP exit code: [%(code)s]") - - -class SynoAuthError(VolumeDriverException): - message = _("Synology driver authentication failed: %(reason)s.") - - -class SynoLUNNotExist(VolumeDriverException): - message = _("LUN not found by UUID: %(uuid)s.") - - -# Reduxio driver -class RdxAPICommandException(VolumeDriverException): - message = _("Reduxio API Command Exception") - - -class RdxAPIConnectionException(VolumeDriverException): - message = _("Reduxio API Connection Exception") - - -class AttachmentSpecsNotFound(NotFound): - message = _("Attachment %(attachment_id)s has no " - "key %(specs_key)s.") - - -class InvalidAttachment(Invalid): - message = _("Invalid attachment: %(reason)s") - - -# Veritas driver -class UnableToExecuteHyperScaleCmd(VolumeDriverException): - message = _("Failed HyperScale command for '%(message)s'") - - -class UnableToProcessHyperScaleCmdOutput(VolumeDriverException): - message = _("Failed processing command output '%(cmd_out)s'" - " for HyperScale command") - - -class ErrorInFetchingConfiguration(VolumeDriverException): - message = _("Error in fetching configuration for '%(persona)s'") - - -class ErrorInSendingMsg(VolumeDriverException): - message = _("Error in sending message '%(cmd_error)s'") - - -class ErrorInHyperScaleVersion(VolumeDriverException): - message = _("Error in getting HyperScale version '%(cmd_error)s'") - - -class ErrorInParsingArguments(VolumeDriverException): - message = _("Error in parsing message arguments : Invalid Payload") - - -# GPFS driver -class GPFSDriverUnsupportedOperation(VolumeBackendAPIException): - message = _("GPFS driver unsupported operation: %(msg)s") diff --git a/cinder/flow_utils.py b/cinder/flow_utils.py deleted file mode 100644 index afa3beb91..000000000 --- a/cinder/flow_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging -# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow -from taskflow import formatters -from taskflow.listeners import base -from taskflow.listeners import logging as logging_listener -from taskflow import task - -from cinder import exception - -LOG = logging.getLogger(__name__) - - -def _make_task_name(cls, addons=None): - """Makes a pretty name for a task class.""" - base_name = ".".join([cls.__module__, cls.__name__]) - extra = '' - if addons: - extra = ';%s' % (", ".join([str(a) for a in addons])) - return base_name + extra - - -class CinderTask(task.Task): - """The root task class for all cinder tasks. - - It automatically names the given task using the module and class that - implement the given task as the task name. - """ - - def __init__(self, addons=None, **kwargs): - super(CinderTask, self).__init__(self.make_name(addons), **kwargs) - - @classmethod - def make_name(cls, addons=None): - return _make_task_name(cls, addons) - - -class SpecialFormatter(formatters.FailureFormatter): - - #: Exception is an excepted case, don't include traceback in log if fails. - _NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError) - - def __init__(self, engine): - super(SpecialFormatter, self).__init__(engine) - - def format(self, fail, atom_matcher): - if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None: - exc_info = None - exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False)) - return (exc_info, exc_details) - else: - return super(SpecialFormatter, self).format(fail, atom_matcher) - - -class DynamicLogListener(logging_listener.DynamicLoggingListener): - """This is used to attach to taskflow engines while they are running. - - It provides a bunch of useful features that expose the actions happening - inside a taskflow engine, which can be useful for developers for debugging, - for operations folks for monitoring and tracking of the resource actions - and more... - """ - - def __init__(self, engine, - task_listen_for=base.DEFAULT_LISTEN_FOR, - flow_listen_for=base.DEFAULT_LISTEN_FOR, - retry_listen_for=base.DEFAULT_LISTEN_FOR, - logger=LOG): - super(DynamicLogListener, self).__init__( - engine, - task_listen_for=task_listen_for, - flow_listen_for=flow_listen_for, - retry_listen_for=retry_listen_for, - log=logger, fail_formatter=SpecialFormatter(engine)) diff --git a/cinder/group/__init__.py b/cinder/group/__init__.py deleted file mode 100644 index ff7c58ed3..000000000 --- a/cinder/group/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Importing full names to not pollute the namespace and cause possible -# collisions with use of 'from cinder.transfer import ' elsewhere. - -from oslo_utils import importutils - -from cinder.common import config - - -CONF = config.CONF - -API = importutils.import_class( - CONF.group_api_class) diff --git a/cinder/group/api.py b/cinder/group/api.py deleted file mode 100644 index 4320698a5..000000000 --- a/cinder/group/api.py +++ /dev/null @@ -1,1107 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to groups. -""" - - -import functools - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from cinder import db -from cinder.db import base -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base as objects_base -from cinder.objects import fields as c_fields -import cinder.policy -from cinder import quota -from cinder import quota_utils -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder.volume import api as volume_api -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) -GROUP_QUOTAS = quota.GROUP_QUOTAS -VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( - 'available', - 'in-use', - 'error', - 'error_deleting') -VALID_ADD_VOL_TO_GROUP_STATUS = ( - 'available', - 'in-use') - - -def wrap_check_policy(func): - """Check policy corresponding to the wrapped methods prior to execution. - - This decorator requires the first 3 args of the wrapped function - to be (self, context, group) - """ - @functools.wraps(func) - def wrapped(self, context, target_obj, *args, **kwargs): - check_policy(context, func.__name__, target_obj) - return func(self, context, target_obj, *args, **kwargs) - - return wrapped - - -def check_policy(context, action, target_obj=None): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - - if isinstance(target_obj, objects_base.CinderObject): - # Turn object into dict so target.update can work - target.update( - target_obj.obj_to_primitive()['versioned_object.data'] or {}) - else: - target.update(target_obj or {}) - - _action = 'group:%s' % action - cinder.policy.enforce(context, _action, target) - - -class API(base.Base): - """API for interacting with the volume manager for groups.""" - - def __init__(self, db_driver=None): - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - self.volume_api = volume_api.API() - - super(API, self).__init__(db_driver) - - def _extract_availability_zone(self, availability_zone): - raw_zones = self.volume_api.list_availability_zones(enable_cache=True) - availability_zones = set([az['name'] for az in raw_zones]) - if CONF.storage_availability_zone: - availability_zones.add(CONF.storage_availability_zone) - - if availability_zone is None: - if CONF.default_availability_zone: - availability_zone = CONF.default_availability_zone - else: - # For backwards compatibility use the storage_availability_zone - availability_zone = CONF.storage_availability_zone - - if availability_zone not in availability_zones: - if CONF.allow_availability_zone_fallback: - original_az = availability_zone - availability_zone = ( - CONF.default_availability_zone or - CONF.storage_availability_zone) - LOG.warning("Availability zone '%(s_az)s' not found, falling " - "back to '%(s_fallback_az)s'.", - {'s_az': original_az, - 's_fallback_az': availability_zone}) - else: - msg = _("Availability zone '%(s_az)s' is invalid.") - msg = msg % {'s_az': availability_zone} - raise exception.InvalidInput(reason=msg) - - return availability_zone - - def create(self, context, name, description, group_type, - volume_types, availability_zone=None): - check_policy(context, 'create') - - req_volume_types = [] - # NOTE: Admin context is required to get extra_specs of volume_types. - req_volume_types = (self.db.volume_types_get_by_name_or_id( - context.elevated(), volume_types)) - - if not uuidutils.is_uuid_like(group_type): - req_group_type = self.db.group_type_get_by_name(context, - group_type) - else: - req_group_type = self.db.group_type_get(context, group_type) - - availability_zone = self._extract_availability_zone(availability_zone) - kwargs = {'user_id': context.user_id, - 'project_id': context.project_id, - 'availability_zone': availability_zone, - 'status': c_fields.GroupStatus.CREATING, - 'name': name, - 'description': description, - 'volume_type_ids': [t['id'] for t in req_volume_types], - 'group_type_id': req_group_type['id'], - 'replication_status': c_fields.ReplicationStatus.DISABLED} - group = None - try: - group = objects.Group(context=context, **kwargs) - group.create() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error occurred when creating group" - " %s.", name) - - request_spec_list = [] - filter_properties_list = [] - for req_volume_type in req_volume_types: - request_spec = {'volume_type': req_volume_type.copy(), - 'group_id': group.id} - filter_properties = {} - request_spec_list.append(request_spec) - filter_properties_list.append(filter_properties) - - group_spec = {'group_type': req_group_type.copy(), - 'group_id': group.id} - group_filter_properties = {} - - # Update quota for groups - self.update_quota(context, group, 1) - - self._cast_create_group(context, group, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list) - - return group - - def create_from_src(self, context, name, description=None, - group_snapshot_id=None, source_group_id=None): - check_policy(context, 'create') - - # Populate group_type_id and volume_type_ids - group_type_id = None - volume_type_ids = [] - if group_snapshot_id: - grp_snap = self.get_group_snapshot(context, group_snapshot_id) - group_type_id = grp_snap.group_type_id - grp_snap_src_grp = self.get(context, grp_snap.group_id) - volume_type_ids = [vt.id for vt in grp_snap_src_grp.volume_types] - elif source_group_id: - source_group = self.get(context, source_group_id) - group_type_id = source_group.group_type_id - volume_type_ids = [vt.id for vt in source_group.volume_types] - - kwargs = { - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': c_fields.GroupStatus.CREATING, - 'name': name, - 'description': description, - 'group_snapshot_id': group_snapshot_id, - 'source_group_id': source_group_id, - 'group_type_id': group_type_id, - 'volume_type_ids': volume_type_ids, - 'replication_status': c_fields.ReplicationStatus.DISABLED - } - - group = None - try: - group = objects.Group(context=context, **kwargs) - group.create(group_snapshot_id=group_snapshot_id, - source_group_id=source_group_id) - except exception.GroupNotFound: - with excutils.save_and_reraise_exception(): - LOG.error("Source Group %(source_group)s not found when " - "creating group %(group)s from source.", - {'group': name, 'source_group': source_group_id}) - except exception.GroupSnapshotNotFound: - with excutils.save_and_reraise_exception(): - LOG.error("Group snapshot %(group_snap)s not found when " - "creating group %(group)s from source.", - {'group': name, 'group_snap': group_snapshot_id}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error occurred when creating group" - " %(group)s from group_snapshot %(grp_snap)s.", - {'group': name, 'grp_snap': group_snapshot_id}) - - # Update quota for groups - self.update_quota(context, group, 1) - - if not group.host: - msg = _("No host to create group %s.") % group.id - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - group.assert_not_frozen() - - if group_snapshot_id: - self._create_group_from_group_snapshot(context, group, - group_snapshot_id) - elif source_group_id: - self._create_group_from_source_group(context, group, - source_group_id) - - return group - - def _create_group_from_group_snapshot(self, context, group, - group_snapshot_id): - try: - group_snapshot = objects.GroupSnapshot.get_by_id( - context, group_snapshot_id) - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - - if not snapshots: - msg = _("Group snapshot is empty. No group will be created.") - raise exception.InvalidGroup(reason=msg) - - for snapshot in snapshots: - kwargs = {} - kwargs['availability_zone'] = group.availability_zone - kwargs['group_snapshot'] = group_snapshot - kwargs['group'] = group - kwargs['snapshot'] = snapshot - volume_type_id = snapshot.volume_type_id - if volume_type_id: - kwargs['volume_type'] = ( - objects.VolumeType.get_by_name_or_id( - context, volume_type_id)) - # Create group volume_type mapping entries - try: - db.group_volume_type_mapping_create(context, group.id, - volume_type_id) - except exception.GroupVolumeTypeMappingExists: - # Only need to create one group volume_type mapping - # entry for the same combination, skipping. - LOG.info("A mapping entry already exists for group" - " %(grp)s and volume type %(vol_type)s. " - "Do not need to create again.", - {'grp': group.id, - 'vol_type': volume_type_id}) - pass - - # Since group snapshot is passed in, the following call will - # create a db entry for the volume, but will not call the - # volume manager to create a real volume in the backend yet. - # If error happens, taskflow will handle rollback of quota - # and removal of volume entry in the db. - try: - self.volume_api.create(context, - snapshot.volume_size, - None, - None, - **kwargs) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.error("Error occurred when creating volume " - "entry from snapshot in the process of " - "creating group %(group)s " - "from group snapshot %(group_snap)s.", - {'group': group.id, - 'group_snap': group_snapshot.id}) - except Exception: - with excutils.save_and_reraise_exception(): - try: - group.destroy() - finally: - LOG.error("Error occurred when creating group " - "%(group)s from group snapshot %(group_snap)s.", - {'group': group.id, - 'group_snap': group_snapshot.id}) - - volumes = objects.VolumeList.get_all_by_generic_group(context, - group.id) - for vol in volumes: - # Update the host field for the volume. - vol.host = group.host - vol.save() - - self.volume_rpcapi.create_group_from_src( - context, group, group_snapshot) - - def _create_group_from_source_group(self, context, group, - source_group_id): - try: - source_group = objects.Group.get_by_id(context, - source_group_id) - source_vols = objects.VolumeList.get_all_by_generic_group( - context, source_group.id) - - if not source_vols: - msg = _("Source Group is empty. No group " - "will be created.") - raise exception.InvalidGroup(reason=msg) - - for source_vol in source_vols: - kwargs = {} - kwargs['availability_zone'] = group.availability_zone - kwargs['source_group'] = source_group - kwargs['group'] = group - kwargs['source_volume'] = source_vol - volume_type_id = source_vol.volume_type_id - if volume_type_id: - kwargs['volume_type'] = ( - objects.VolumeType.get_by_name_or_id( - context, volume_type_id)) - # Create group volume_type mapping entries - try: - db.group_volume_type_mapping_create(context, group.id, - volume_type_id) - except exception.GroupVolumeTypeMappingExists: - # Only need to create one group volume_type mapping - # entry for the same combination, skipping. - LOG.info("A mapping entry already exists for group" - " %(grp)s and volume type %(vol_type)s. " - "Do not need to create again.", - {'grp': group.id, - 'vol_type': volume_type_id}) - pass - - # Since source_group is passed in, the following call will - # create a db entry for the volume, but will not call the - # volume manager to create a real volume in the backend yet. - # If error happens, taskflow will handle rollback of quota - # and removal of volume entry in the db. - try: - self.volume_api.create(context, - source_vol.size, - None, - None, - **kwargs) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.error("Error occurred when creating cloned " - "volume in the process of creating " - "group %(group)s from " - "source group %(source_group)s.", - {'group': group.id, - 'source_group': source_group.id}) - except Exception: - with excutils.save_and_reraise_exception(): - try: - group.destroy() - finally: - LOG.error("Error occurred when creating " - "group %(group)s from source group " - "%(source_group)s.", - {'group': group.id, - 'source_group': source_group.id}) - - volumes = objects.VolumeList.get_all_by_generic_group(context, - group.id) - for vol in volumes: - # Update the host field for the volume. - vol.host = group.host - vol.save() - - self.volume_rpcapi.create_group_from_src(context, group, - None, source_group) - - def _cast_create_group(self, context, group, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list): - - try: - for request_spec in request_spec_list: - volume_type = request_spec.get('volume_type') - volume_type_id = None - if volume_type: - volume_type_id = volume_type.get('id') - - specs = {} - if volume_type_id: - qos_specs = volume_types.get_volume_type_qos_specs( - volume_type_id) - specs = qos_specs['qos_specs'] - if not specs: - # to make sure we don't pass empty dict - specs = None - - volume_properties = { - 'size': 0, # Need to populate size for the scheduler - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'creating', - 'attach_status': 'detached', - 'encryption_key_id': request_spec.get('encryption_key_id'), - 'display_description': request_spec.get('description'), - 'display_name': request_spec.get('name'), - 'volume_type_id': volume_type_id, - 'group_type_id': group.group_type_id, - } - - request_spec['volume_properties'] = volume_properties - request_spec['qos_specs'] = specs - - group_properties = { - 'size': 0, # Need to populate size for the scheduler - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'creating', - 'display_description': group_spec.get('description'), - 'display_name': group_spec.get('name'), - 'group_type_id': group.group_type_id, - } - - group_spec['volume_properties'] = group_properties - group_spec['qos_specs'] = None - - except Exception: - with excutils.save_and_reraise_exception(): - try: - group.destroy() - finally: - LOG.error("Error occurred when building request spec " - "list for group %s.", group.id) - - # Cast to the scheduler and let it handle whatever is needed - # to select the target host for this group. - self.scheduler_rpcapi.create_group( - context, - group, - group_spec=group_spec, - request_spec_list=request_spec_list, - group_filter_properties=group_filter_properties, - filter_properties_list=filter_properties_list) - - def update_quota(self, context, group, num, project_id=None): - reserve_opts = {'groups': num} - try: - reservations = GROUP_QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - if reservations: - GROUP_QUOTAS.commit(context, reservations) - except Exception as e: - with excutils.save_and_reraise_exception(): - try: - group.destroy() - if isinstance(e, exception.OverQuota): - quota_utils.process_reserve_over_quota( - context, e, resource='groups') - finally: - LOG.error("Failed to update quota for group %s.", group.id) - - @wrap_check_policy - def delete(self, context, group, delete_volumes=False): - if not group.host: - self.update_quota(context, group, -1, group.project_id) - - LOG.debug("No host for group %s. Deleting from " - "the database.", group.id) - group.destroy() - - return - - group.assert_not_frozen() - - if not delete_volumes and group.status not in ( - [c_fields.GroupStatus.AVAILABLE, - c_fields.GroupStatus.ERROR]): - msg = _("Group status must be available or error, " - "but current status is: %s") % group.status - raise exception.InvalidGroup(reason=msg) - - # NOTE(tommylikehu): Admin context is required to load group snapshots. - with group.obj_as_admin(): - if group.group_snapshots: - raise exception.InvalidGroup( - reason=_("Group has existing snapshots.")) - - volumes = self.db.volume_get_all_by_generic_group(context.elevated(), - group.id) - if volumes and not delete_volumes: - msg = (_("Group %s still contains volumes. " - "The delete-volumes flag is required to delete it.") - % group.id) - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes_model_update = [] - for volume in volumes: - if volume['attach_status'] == "attached": - msg = _("Volume in group %s is attached. " - "Need to detach first.") % group.id - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - snapshots = objects.SnapshotList.get_all_for_volume(context, - volume['id']) - if snapshots: - msg = _("Volume in group still has " - "dependent snapshots.") - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes_model_update.append({'id': volume['id'], - 'status': 'deleting'}) - - self.db.volumes_update(context, volumes_model_update) - - group.status = c_fields.GroupStatus.DELETING - group.terminated_at = timeutils.utcnow() - group.save() - - self.volume_rpcapi.delete_group(context, group) - - @wrap_check_policy - def update(self, context, group, name, description, - add_volumes, remove_volumes): - """Update group.""" - if group.status != c_fields.GroupStatus.AVAILABLE: - msg = _("Group status must be available, " - "but current status is: %s.") % group.status - raise exception.InvalidGroup(reason=msg) - - add_volumes_list = [] - remove_volumes_list = [] - if add_volumes: - add_volumes = add_volumes.strip(',') - add_volumes_list = add_volumes.split(',') - if remove_volumes: - remove_volumes = remove_volumes.strip(',') - remove_volumes_list = remove_volumes.split(',') - - invalid_uuids = [] - for uuid in add_volumes_list: - if uuid in remove_volumes_list: - invalid_uuids.append(uuid) - if invalid_uuids: - msg = _("UUIDs %s are in both add and remove volume " - "list.") % invalid_uuids - raise exception.InvalidVolume(reason=msg) - - volumes = self.db.volume_get_all_by_generic_group(context, group.id) - - # Validate name. - if name == group.name: - name = None - - # Validate description. - if description == group.description: - description = None - - # Validate volumes in add_volumes and remove_volumes. - add_volumes_new = "" - remove_volumes_new = "" - if add_volumes_list: - add_volumes_new = self._validate_add_volumes( - context, volumes, add_volumes_list, group) - if remove_volumes_list: - remove_volumes_new = self._validate_remove_volumes( - volumes, remove_volumes_list, group) - - if (name is None and description is None and not add_volumes_new and - not remove_volumes_new): - msg = (_("Cannot update group %(group_id)s " - "because no valid name, description, add_volumes, " - "or remove_volumes were provided.") % - {'group_id': group.id}) - raise exception.InvalidGroup(reason=msg) - - fields = {'updated_at': timeutils.utcnow()} - - # Update name and description in db now. No need to - # to send them over through an RPC call. - if name is not None: - fields['name'] = name - if description is not None: - fields['description'] = description - if not add_volumes_new and not remove_volumes_new: - # Only update name or description. Set status to available. - fields['status'] = c_fields.GroupStatus.AVAILABLE - else: - fields['status'] = c_fields.GroupStatus.UPDATING - - group.update(fields) - group.save() - - # Do an RPC call only if the update request includes - # adding/removing volumes. add_volumes_new and remove_volumes_new - # are strings of volume UUIDs separated by commas with no spaces - # in between. - if add_volumes_new or remove_volumes_new: - self.volume_rpcapi.update_group( - context, group, - add_volumes=add_volumes_new, - remove_volumes=remove_volumes_new) - - def _validate_remove_volumes(self, volumes, remove_volumes_list, group): - # Validate volumes in remove_volumes. - remove_volumes_new = "" - for volume in volumes: - if volume['id'] in remove_volumes_list: - if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS: - msg = (_("Cannot remove volume %(volume_id)s from " - "group %(group_id)s because volume " - "is in an invalid state: %(status)s. Valid " - "states are: %(valid)s.") % - {'volume_id': volume['id'], - 'group_id': group.id, - 'status': volume['status'], - 'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS}) - raise exception.InvalidVolume(reason=msg) - # Volume currently in group. It will be removed from group. - if remove_volumes_new: - remove_volumes_new += "," - remove_volumes_new += volume['id'] - - for rem_vol in remove_volumes_list: - if rem_vol not in remove_volumes_new: - msg = (_("Cannot remove volume %(volume_id)s from " - "group %(group_id)s because it " - "is not in the group.") % - {'volume_id': rem_vol, - 'group_id': group.id}) - raise exception.InvalidVolume(reason=msg) - - return remove_volumes_new - - def _validate_add_volumes(self, context, volumes, add_volumes_list, group): - add_volumes_new = "" - for volume in volumes: - if volume['id'] in add_volumes_list: - # Volume already in group. Remove from add_volumes. - add_volumes_list.remove(volume['id']) - - for add_vol in add_volumes_list: - try: - add_vol_ref = self.db.volume_get(context, add_vol) - except exception.VolumeNotFound: - msg = (_("Cannot add volume %(volume_id)s to " - "group %(group_id)s because volume cannot be " - "found.") % - {'volume_id': add_vol, - 'group_id': group.id}) - raise exception.InvalidVolume(reason=msg) - orig_group = add_vol_ref.get('group_id', None) - if orig_group: - # If volume to be added is already in the group to be updated, - # it should have been removed from the add_volumes_list in the - # beginning of this function. If we are here, it means it is - # in a different group. - msg = (_("Cannot add volume %(volume_id)s to group " - "%(group_id)s because it is already in " - "group %(orig_group)s.") % - {'volume_id': add_vol_ref['id'], - 'group_id': group.id, - 'orig_group': orig_group}) - raise exception.InvalidVolume(reason=msg) - if add_vol_ref: - add_vol_type_id = add_vol_ref.get('volume_type_id', None) - if not add_vol_type_id: - msg = (_("Cannot add volume %(volume_id)s to group " - "%(group_id)s because it has no volume " - "type.") % - {'volume_id': add_vol_ref['id'], - 'group_id': group.id}) - raise exception.InvalidVolume(reason=msg) - vol_type_ids = [v_type.id for v_type in group.volume_types] - if add_vol_type_id not in vol_type_ids: - msg = (_("Cannot add volume %(volume_id)s to group " - "%(group_id)s because volume type " - "%(volume_type)s is not supported by the " - "group.") % - {'volume_id': add_vol_ref['id'], - 'group_id': group.id, - 'volume_type': add_vol_type_id}) - raise exception.InvalidVolume(reason=msg) - if (add_vol_ref['status'] not in - VALID_ADD_VOL_TO_GROUP_STATUS): - msg = (_("Cannot add volume %(volume_id)s to group " - "%(group_id)s because volume is in an " - "invalid state: %(status)s. Valid states are: " - "%(valid)s.") % - {'volume_id': add_vol_ref['id'], - 'group_id': group.id, - 'status': add_vol_ref['status'], - 'valid': VALID_ADD_VOL_TO_GROUP_STATUS}) - raise exception.InvalidVolume(reason=msg) - - # group.host and add_vol_ref['host'] are in this format: - # 'host@backend#pool'. Extract host (host@backend) before - # doing comparison. - vol_host = vol_utils.extract_host(add_vol_ref['host']) - group_host = vol_utils.extract_host(group.host) - if group_host != vol_host: - raise exception.InvalidVolume( - reason=_("Volume is not local to this node.")) - - # Volume exists. It will be added to CG. - if add_volumes_new: - add_volumes_new += "," - add_volumes_new += add_vol_ref['id'] - - else: - msg = (_("Cannot add volume %(volume_id)s to group " - "%(group_id)s because volume does not exist.") % - {'volume_id': add_vol_ref['id'], - 'group_id': group.id}) - raise exception.InvalidVolume(reason=msg) - - return add_volumes_new - - def get(self, context, group_id): - group = objects.Group.get_by_id(context, group_id) - check_policy(context, 'get', group) - return group - - def get_all(self, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - check_policy(context, 'get_all') - if filters is None: - filters = {} - - if filters: - LOG.debug("Searching by: %s", filters) - - if (context.is_admin and 'all_tenants' in filters): - del filters['all_tenants'] - groups = objects.GroupList.get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - else: - groups = objects.GroupList.get_all_by_project( - context, context.project_id, filters=filters, marker=marker, - limit=limit, offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - return groups - - @wrap_check_policy - def reset_status(self, context, group, status): - """Reset status of generic group""" - - if status not in c_fields.GroupStatus.ALL: - msg = _("Group status: %(status)s is invalid, valid status " - "are: %(valid)s.") % {'status': status, - 'valid': c_fields.GroupStatus.ALL} - raise exception.InvalidGroupStatus(reason=msg) - field = {'updated_at': timeutils.utcnow(), - 'status': status} - group.update(field) - group.save() - - @wrap_check_policy - def create_group_snapshot(self, context, group, name, description): - group.assert_not_frozen() - options = {'group_id': group.id, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': "creating", - 'name': name, - 'description': description, - 'group_type_id': group.group_type_id} - - group_snapshot = None - group_snapshot_id = None - try: - group_snapshot = objects.GroupSnapshot(context, **options) - group_snapshot.create() - group_snapshot_id = group_snapshot.id - - snap_name = group_snapshot.name - snap_desc = group_snapshot.description - with group.obj_as_admin(): - self.volume_api.create_snapshots_in_db( - context, group.volumes, snap_name, snap_desc, - None, group_snapshot_id) - - except Exception: - with excutils.save_and_reraise_exception(): - try: - # If the group_snapshot has been created - if group_snapshot.obj_attr_is_set('id'): - group_snapshot.destroy() - finally: - LOG.error("Error occurred when creating group_snapshot" - " %s.", group_snapshot_id) - - self.volume_rpcapi.create_group_snapshot(context, group_snapshot) - - return group_snapshot - - def delete_group_snapshot(self, context, group_snapshot, force=False): - check_policy(context, 'delete_group_snapshot') - group_snapshot.assert_not_frozen() - values = {'status': 'deleting'} - expected = {'status': ('available', 'error')} - filters = [~db.group_creating_from_src( - group_snapshot_id=group_snapshot.id)] - res = group_snapshot.conditional_update(values, expected, filters) - - if not res: - msg = _('GroupSnapshot status must be available or error, and no ' - 'Group can be currently using it as source for its ' - 'creation.') - raise exception.InvalidGroupSnapshot(reason=msg) - - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - - # TODO(xyang): Add a new db API to update all snapshots statuses - # in one db API call. - for snap in snapshots: - snap.status = c_fields.SnapshotStatus.DELETING - snap.save() - - self.volume_rpcapi.delete_group_snapshot(context.elevated(), - group_snapshot) - - def update_group_snapshot(self, context, group_snapshot, fields): - check_policy(context, 'update_group_snapshot') - group_snapshot.update(fields) - group_snapshot.save() - - def get_group_snapshot(self, context, group_snapshot_id): - check_policy(context, 'get_group_snapshot') - group_snapshots = objects.GroupSnapshot.get_by_id(context, - group_snapshot_id) - return group_snapshots - - def get_all_group_snapshots(self, context, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - check_policy(context, 'get_all_group_snapshots') - filters = filters or {} - - if context.is_admin and 'all_tenants' in filters: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - group_snapshots = objects.GroupSnapshotList.get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - else: - group_snapshots = objects.GroupSnapshotList.get_all_by_project( - context.elevated(), context.project_id, filters=filters, - marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - return group_snapshots - - def reset_group_snapshot_status(self, context, gsnapshot, status): - """Reset status of group snapshot""" - - check_policy(context, 'reset_group_snapshot_status') - if status not in c_fields.GroupSnapshotStatus.ALL: - msg = _("Group snapshot status: %(status)s is invalid, " - "valid statuses are: " - "%(valid)s.") % {'status': status, - 'valid': c_fields.GroupSnapshotStatus.ALL} - raise exception.InvalidGroupSnapshotStatus(reason=msg) - field = {'updated_at': timeutils.utcnow(), - 'status': status} - gsnapshot.update(field) - gsnapshot.save() - - def _check_type(self, group): - if not group.is_replicated: - msg = _("Group %s is not a replication group type.") % group.id - LOG.error(msg) - raise exception.InvalidGroupType(reason=msg) - - for vol_type in group.volume_types: - if not vol_utils.is_replicated_spec(vol_type.extra_specs): - msg = _("Volume type %s does not have 'replication_enabled' " - "spec key set to ' True'.") % vol_type.id - LOG.error(msg) - raise exception.InvalidVolumeType(reason=msg) - - # Replication group API (Tiramisu) - @wrap_check_policy - def enable_replication(self, context, group): - self._check_type(group) - - valid_status = [c_fields.GroupStatus.AVAILABLE] - if group.status not in valid_status: - params = {'valid': valid_status, - 'current': group.status, - 'id': group.id} - msg = _("Group %(id)s status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot enable replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - valid_rep_status = [c_fields.ReplicationStatus.DISABLED, - c_fields.ReplicationStatus.ENABLED] - if group.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': group.replication_status, - 'id': group.id} - msg = _("Group %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot enable replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = objects.VolumeList.get_all_by_generic_group( - context.elevated(), group.id) - - valid_status = ['available', 'in-use'] - for vol in volumes: - if vol.status not in valid_status: - params = {'valid': valid_status, - 'current': vol.status, - 'id': vol.id} - msg = _("Volume %(id)s status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot enable replication.") % params - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - # replication_status could be set to enabled when volume is - # created and the mirror is built. - if vol.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': vol.replication_status, - 'id': vol.id} - msg = _("Volume %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot enable replication.") % params - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - vol.replication_status = c_fields.ReplicationStatus.ENABLING - vol.save() - - group.replication_status = c_fields.ReplicationStatus.ENABLING - group.save() - - self.volume_rpcapi.enable_replication(context, group) - - @wrap_check_policy - def disable_replication(self, context, group): - self._check_type(group) - - valid_status = [c_fields.GroupStatus.AVAILABLE, - c_fields.GroupStatus.ERROR] - if group.status not in valid_status: - params = {'valid': valid_status, - 'current': group.status, - 'id': group.id} - msg = _("Group %(id)s status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot disable replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - valid_rep_status = [c_fields.ReplicationStatus.ENABLED, - c_fields.ReplicationStatus.ERROR] - if group.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': group.replication_status, - 'id': group.id} - msg = _("Group %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot disable replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = objects.VolumeList.get_all_by_generic_group( - context.elevated(), group.id) - - for vol in volumes: - if vol.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': vol.replication_status, - 'id': vol.id} - msg = _("Volume %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot disable replication.") % params - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - vol.replication_status = c_fields.ReplicationStatus.DISABLING - vol.save() - - group.replication_status = c_fields.ReplicationStatus.DISABLING - group.save() - - self.volume_rpcapi.disable_replication(context, group) - - @wrap_check_policy - def failover_replication(self, context, group, - allow_attached_volume=False, - secondary_backend_id=None): - self._check_type(group) - - valid_status = [c_fields.GroupStatus.AVAILABLE] - if group.status not in valid_status: - params = {'valid': valid_status, - 'current': group.status, - 'id': group.id} - msg = _("Group %(id)s status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot failover replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - valid_rep_status = [c_fields.ReplicationStatus.ENABLED, - c_fields.ReplicationStatus.FAILED_OVER] - if group.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': group.replication_status, - 'id': group.id} - msg = _("Group %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot failover replication.") % params - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = objects.VolumeList.get_all_by_generic_group( - context.elevated(), group.id) - - valid_status = ['available', 'in-use'] - for vol in volumes: - if vol.status not in valid_status: - params = {'valid': valid_status, - 'current': vol.status, - 'id': vol.id} - msg = _("Volume %(id)s status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot failover replication.") % params - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - if vol.status == 'in-use' and not allow_attached_volume: - msg = _("Volume %s is attached but allow_attached_volume flag " - "is False. Cannot failover replication.") % vol.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - if vol.replication_status not in valid_rep_status: - params = {'valid': valid_rep_status, - 'current': vol.replication_status, - 'id': vol.id} - msg = _("Volume %(id)s replication status must be %(valid)s, " - "but current status is: %(current)s. " - "Cannot failover replication.") % params - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - vol.replication_status = c_fields.ReplicationStatus.FAILING_OVER - vol.save() - - group.replication_status = c_fields.ReplicationStatus.FAILING_OVER - group.save() - - self.volume_rpcapi.failover_replication(context, group, - allow_attached_volume, - secondary_backend_id) - - @wrap_check_policy - def list_replication_targets(self, context, group): - self._check_type(group) - - return self.volume_rpcapi.list_replication_targets(context, group) diff --git a/cinder/hacking/__init__.py b/cinder/hacking/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/hacking/checks.py b/cinder/hacking/checks.py deleted file mode 100644 index 48e9ef8ff..000000000 --- a/cinder/hacking/checks.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import re -import six - -""" -Guidelines for writing new hacking checks - - - Use only for Cinder specific tests. OpenStack general tests - should be submitted to the common 'hacking' module. - - Pick numbers in the range N3xx. Find the current test with - the highest allocated number and then pick the next value. - - Keep the test method code in the source file ordered based - on the N3xx value. - - List the new rule in the top level HACKING.rst file - - Add test cases for each new rule to - cinder/tests/test_hacking.py - -""" - -# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects -UNDERSCORE_IMPORT_FILES = ['cinder/objects/__init__.py', - 'cinder/objects/manageableresources.py'] - -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") -translated_log = re.compile( - r"(.)*LOG\.(audit|debug|error|info|warn|warning|critical|exception)" - "\(\s*_\(\s*('|\")") -string_translation = re.compile(r"(.)*_\(\s*('|\")") -vi_header_re = re.compile(r"^#\s+vim?:.+") -underscore_import_check = re.compile(r"(.)*i18n\s+import(.)* _$") -underscore_import_check_multi = re.compile(r"(.)*i18n\s+import(.)* _, (.)*") -# We need this for cases where they have created their own _ function. -custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") -no_audit_log = re.compile(r"(.)*LOG\.audit(.)*") -no_print_statements = re.compile(r"\s*print\s*\(.+\).*") -dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") - -# NOTE(jsbryant): When other oslo libraries switch over non-namespaced -# imports, we will need to add them to the regex below. -oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db" - "|config|utils|serialization|log)") -no_contextlib_nested = re.compile(r"\s*with (contextlib\.)?nested\(") - -logging_instance = re.compile( - r"(.)*LOG\.(warning|info|debug|error|exception)\(") - -assert_None = re.compile( - r".*assertEqual\(None, .*\)") -assert_True = re.compile( - r".*assertEqual\(True, .*\)") - - -class BaseASTChecker(ast.NodeVisitor): - """Provides a simple framework for writing AST-based checks. - - Subclasses should implement visit_* methods like any other AST visitor - implementation. When they detect an error for a particular node the - method should call ``self.add_error(offending_node)``. Details about - where in the code the error occurred will be pulled from the node - object. - - Subclasses should also provide a class variable named CHECK_DESC to - be used for the human readable error message. - - """ - - def __init__(self, tree, filename): - """This object is created automatically by pep8. - - :param tree: an AST tree - :param filename: name of the file being analyzed - (ignored by our checks) - """ - self._tree = tree - self._errors = [] - - def run(self): - """Called automatically by pep8.""" - self.visit(self._tree) - return self._errors - - def add_error(self, node, message=None): - """Add an error caused by a node to the list of errors for pep8.""" - - # Need to disable pylint check here as it doesn't catch CHECK_DESC - # being defined in the subclasses. - message = message or self.CHECK_DESC # pylint: disable=E1101 - error = (node.lineno, node.col_offset, message, self.__class__) - self._errors.append(error) - - def _check_call_names(self, call_node, names): - if isinstance(call_node, ast.Call): - if isinstance(call_node.func, ast.Name): - if call_node.func.id in names: - return True - return False - - -def no_vi_headers(physical_line, line_number, lines): - """Check for vi editor configuration in source files. - - By default vi modelines can only appear in the first or - last 5 lines of a source file. - - N314 - """ - # NOTE(gilliard): line_number is 1-indexed - if line_number <= 5 or line_number > len(lines) - 5: - if vi_header_re.match(physical_line): - return 0, "N314: Don't put vi configuration in source files" - - -def no_translate_logs(logical_line, filename): - """Check for 'LOG.*(_(' - - Starting with the Pike series, OpenStack no longer supports log - translation. We shouldn't translate logs. - - - This check assumes that 'LOG' is a logger. - - Use filename so we can start enforcing this in specific folders - instead of needing to do so all at once. - - C312 - """ - if translated_log.match(logical_line): - yield(0, "C312: Log messages should not be translated!") - - -def no_mutable_default_args(logical_line): - msg = "N322: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -def check_explicit_underscore_import(logical_line, filename): - """Check for explicit import of the _ function - - We need to ensure that any files that are using the _() function - to translate messages are explicitly importing the _ function. We - can't trust unit test to catch whether the import has been - added so we need to check for it here. - """ - - # Build a list of the files that have _ imported. No further - # checking needed once it is found. - for file in UNDERSCORE_IMPORT_FILES: - if file in filename: - return - if (underscore_import_check.match(logical_line) or - underscore_import_check_multi.match(logical_line) or - custom_underscore_check.match(logical_line)): - UNDERSCORE_IMPORT_FILES.append(filename) - elif string_translation.match(logical_line): - yield(0, "N323: Found use of _() without explicit import of _ !") - - -class CheckForStrUnicodeExc(BaseASTChecker): - """Checks for the use of str() or unicode() on an exception. - - This currently only handles the case where str() or unicode() - is used in the scope of an exception handler. If the exception - is passed into a function, returned from an assertRaises, or - used on an exception created in the same scope, this does not - catch it. - """ - - CHECK_DESC = ('N325 str() and unicode() cannot be used on an ' - 'exception. Remove or use six.text_type()') - - def __init__(self, tree, filename): - super(CheckForStrUnicodeExc, self).__init__(tree, filename) - self.name = [] - self.already_checked = [] - - # Python 2 - def visit_TryExcept(self, node): - for handler in node.handlers: - if handler.name: - self.name.append(handler.name.id) - super(CheckForStrUnicodeExc, self).generic_visit(node) - self.name = self.name[:-1] - else: - super(CheckForStrUnicodeExc, self).generic_visit(node) - - # Python 3 - def visit_ExceptHandler(self, node): - if node.name: - self.name.append(node.name) - super(CheckForStrUnicodeExc, self).generic_visit(node) - self.name = self.name[:-1] - else: - super(CheckForStrUnicodeExc, self).generic_visit(node) - - def visit_Call(self, node): - if self._check_call_names(node, ['str', 'unicode']): - if node not in self.already_checked: - self.already_checked.append(node) - if isinstance(node.args[0], ast.Name): - if node.args[0].id in self.name: - self.add_error(node.args[0]) - super(CheckForStrUnicodeExc, self).generic_visit(node) - - -class CheckLoggingFormatArgs(BaseASTChecker): - """Check for improper use of logging format arguments. - - LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.", - ('volume1', 500)) - - The format arguments should not be a tuple as it is easy to miss. - - """ - - CHECK_DESC = 'C310 Log method arguments should not be a tuple.' - LOG_METHODS = [ - 'debug', 'info', - 'warn', 'warning', - 'error', 'exception', - 'critical', 'fatal', - 'trace', 'log' - ] - - def _find_name(self, node): - """Return the fully qualified name or a Name or Attribute.""" - if isinstance(node, ast.Name): - return node.id - elif (isinstance(node, ast.Attribute) - and isinstance(node.value, (ast.Name, ast.Attribute))): - method_name = node.attr - obj_name = self._find_name(node.value) - if obj_name is None: - return None - return obj_name + '.' + method_name - elif isinstance(node, six.string_types): - return node - else: # could be Subscript, Call or many more - return None - - def visit_Call(self, node): - """Look for the 'LOG.*' calls.""" - # extract the obj_name and method_name - if isinstance(node.func, ast.Attribute): - obj_name = self._find_name(node.func.value) - if isinstance(node.func.value, ast.Name): - method_name = node.func.attr - elif isinstance(node.func.value, ast.Attribute): - obj_name = self._find_name(node.func.value) - method_name = node.func.attr - else: # could be Subscript, Call or many more - return super(CheckLoggingFormatArgs, self).generic_visit(node) - - # obj must be a logger instance and method must be a log helper - if (obj_name != 'LOG' - or method_name not in self.LOG_METHODS): - return super(CheckLoggingFormatArgs, self).generic_visit(node) - - # the call must have arguments - if not len(node.args): - return super(CheckLoggingFormatArgs, self).generic_visit(node) - - # any argument should not be a tuple - for arg in node.args: - if isinstance(arg, ast.Tuple): - self.add_error(arg) - - return super(CheckLoggingFormatArgs, self).generic_visit(node) - - -class CheckOptRegistrationArgs(BaseASTChecker): - """Verifying the registration of options are well formed - - This class creates a check for single opt or list/tuple of - opts when register_opt() or register_opts() are being called. - """ - - CHECK_DESC = ('C311: Arguments being passed to register_opt/register_opts ' - 'must be a single option or list/tuple of options ' - 'respectively. Options must also end with _opt or _opts ' - 'respectively.') - - singular_method = 'register_opt' - plural_method = 'register_opts' - - register_methods = [ - singular_method, - plural_method, - ] - - def _find_name(self, node): - """Return the fully qualified name or a Name or Attribute.""" - if isinstance(node, ast.Name): - return node.id - elif (isinstance(node, ast.Attribute) - and isinstance(node.value, (ast.Name, ast.Attribute))): - method_name = node.attr - obj_name = self._find_name(node.value) - if obj_name is None: - return None - return obj_name + '.' + method_name - elif isinstance(node, six.string_types): - return node - else: # could be Subscript, Call or many more - return None - - def _is_list_or_tuple(self, obj): - return isinstance(obj, ast.List) or isinstance(obj, ast.Tuple) - - def visit_Call(self, node): - """Look for the register_opt/register_opts calls.""" - # extract the obj_name and method_name - if isinstance(node.func, ast.Attribute): - if not isinstance(node.func.value, ast.Name): - return (super(CheckOptRegistrationArgs, - self).generic_visit(node)) - - method_name = node.func.attr - - # obj must be instance of register_opt() or register_opts() - if method_name not in self.register_methods: - return (super(CheckOptRegistrationArgs, - self).generic_visit(node)) - - if len(node.args) > 0: - argument_name = self._find_name(node.args[0]) - if argument_name: - if (method_name == self.singular_method and - not argument_name.lower().endswith('opt')): - self.add_error(node.args[0]) - elif (method_name == self.plural_method and - not argument_name.lower().endswith('opts')): - self.add_error(node.args[0]) - else: - # This covers instances of register_opt()/register_opts() - # that are registering the objects directly and not - # passing in a variable referencing the options being - # registered. - if (method_name == self.singular_method and - self._is_list_or_tuple(node.args[0])): - self.add_error(node.args[0]) - elif (method_name == self.plural_method and not - self._is_list_or_tuple(node.args[0])): - self.add_error(node.args[0]) - - return super(CheckOptRegistrationArgs, self).generic_visit(node) - - -def check_datetime_now(logical_line, noqa): - if noqa: - return - - msg = ("C301: Found datetime.now(). " - "Please use timeutils.utcnow() from oslo_utils.") - if 'datetime.now' in logical_line: - yield(0, msg) - - -_UNICODE_USAGE_REGEX = re.compile(r'\bunicode *\(') - - -def check_unicode_usage(logical_line, noqa): - if noqa: - return - - msg = "C302: Found unicode() call. Please use six.text_type()." - - if _UNICODE_USAGE_REGEX.search(logical_line): - yield(0, msg) - - -def check_no_print_statements(logical_line, filename, noqa): - # CLI and utils programs do need to use 'print()' so - # we shouldn't check those files. - if noqa: - return - - if "cinder/cmd" in filename or "tools/" in filename: - return - - if re.match(no_print_statements, logical_line): - msg = ("C303: print() should not be used. " - "Please use LOG.[info|error|warning|exception|debug]. " - "If print() must be used, use '# noqa' to skip this check.") - yield(0, msg) - - -def check_no_log_audit(logical_line): - """Ensure that we are not using LOG.audit messages - - Plans are in place going forward as discussed in the following - spec (https://review.openstack.org/#/c/91446/) to take out - LOG.audit messages. Given that audit was a concept invented - for OpenStack we can enforce not using it. - """ - - if no_audit_log.match(logical_line): - yield(0, "C304: Found LOG.audit. Use LOG.info instead.") - - -def check_timeutils_strtime(logical_line): - msg = ("C306: Found timeutils.strtime(). " - "Please use datetime.datetime.isoformat() or datetime.strftime()") - if 'timeutils.strtime' in logical_line: - yield(0, msg) - - -def no_log_warn(logical_line): - msg = "C307: LOG.warn is deprecated, please use LOG.warning!" - if "LOG.warn(" in logical_line: - yield (0, msg) - - -def dict_constructor_with_list_copy(logical_line): - msg = ("N336: Must use a dict comprehension instead of a dict constructor " - "with a sequence of key-value pairs.") - if dict_constructor_with_list_copy_re.match(logical_line): - yield (0, msg) - - -def check_timeutils_isotime(logical_line): - msg = ("C308: Found timeutils.isotime(). " - "Please use datetime.datetime.isoformat()") - if 'timeutils.isotime' in logical_line: - yield(0, msg) - - -def no_test_log(logical_line, filename, noqa): - if ('cinder/tests/tempest' in filename or - 'cinder/tests' not in filename or noqa): - return - msg = "C309: Unit tests should not perform logging." - if logging_instance.match(logical_line): - yield (0, msg) - - -def validate_assertTrue(logical_line): - if re.match(assert_True, logical_line): - msg = ("C313: Unit tests should use assertTrue(value) instead" - " of using assertEqual(True, value).") - yield(0, msg) - - -def factory(register): - register(no_vi_headers) - register(no_translate_logs) - register(no_mutable_default_args) - register(check_explicit_underscore_import) - register(CheckForStrUnicodeExc) - register(CheckLoggingFormatArgs) - register(CheckOptRegistrationArgs) - register(check_datetime_now) - register(check_timeutils_strtime) - register(check_timeutils_isotime) - register(check_unicode_usage) - register(check_no_print_statements) - register(check_no_log_audit) - register(no_log_warn) - register(dict_constructor_with_list_copy) - register(no_test_log) - register(validate_assertTrue) diff --git a/cinder/i18n.py b/cinder/i18n.py deleted file mode 100644 index ceb6fc25c..000000000 --- a/cinder/i18n.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html . - -""" - -import oslo_i18n as i18n - -DOMAIN = 'cinder' - -_translators = i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - - -def enable_lazy(enable=True): - return i18n.enable_lazy(enable) - - -def translate(value, user_locale=None): - return i18n.translate(value, user_locale) - - -def get_available_languages(): - return i18n.get_available_languages(DOMAIN) diff --git a/cinder/image/__init__.py b/cinder/image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/image/cache.py b/cinder/image/cache.py deleted file mode 100644 index ba90c1579..000000000 --- a/cinder/image/cache.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (C) 2015 Pure Storage, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pytz import timezone -import six - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from cinder import objects -from cinder import rpc -from cinder import utils - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class ImageVolumeCache(object): - def __init__(self, db, volume_api, max_cache_size_gb=0, - max_cache_size_count=0): - self.db = db - self.volume_api = volume_api - self.max_cache_size_gb = int(max_cache_size_gb) - self.max_cache_size_count = int(max_cache_size_count) - self.notifier = rpc.get_notifier('volume', CONF.host) - - def get_by_image_volume(self, context, volume_id): - return self.db.image_volume_cache_get_by_volume_id(context, volume_id) - - def evict(self, context, cache_entry): - LOG.debug('Evicting image cache entry: %(entry)s.', - {'entry': self._entry_to_str(cache_entry)}) - self.db.image_volume_cache_delete(context, cache_entry['volume_id']) - self._notify_cache_eviction(context, cache_entry['image_id'], - cache_entry['host']) - - @staticmethod - def _get_query_filters(volume_ref): - if volume_ref.is_clustered: - return {'cluster_name': volume_ref.cluster_name} - return {'host': volume_ref.host} - - def get_entry(self, context, volume_ref, image_id, image_meta): - cache_entry = self.db.image_volume_cache_get_and_update_last_used( - context, - image_id, - **self._get_query_filters(volume_ref) - ) - - if cache_entry: - LOG.debug('Found image-volume cache entry: %(entry)s.', - {'entry': self._entry_to_str(cache_entry)}) - - if self._should_update_entry(cache_entry, image_meta): - LOG.debug('Image-volume cache entry is out-dated, evicting: ' - '%(entry)s.', - {'entry': self._entry_to_str(cache_entry)}) - self._delete_image_volume(context, cache_entry) - cache_entry = None - - if cache_entry: - self._notify_cache_hit(context, cache_entry['image_id'], - cache_entry['host']) - else: - self._notify_cache_miss(context, image_id, - volume_ref['host']) - return cache_entry - - def create_cache_entry(self, context, volume_ref, image_id, image_meta): - """Create a new cache entry for an image. - - This assumes that the volume described by volume_ref has already been - created and is in an available state. - """ - LOG.debug('Creating new image-volume cache entry for image ' - '%(image_id)s on %(service)s', - {'image_id': image_id, - 'service': volume_ref.service_topic_queue}) - - # When we are creating an image from a volume the updated_at field - # will be a unicode representation of the datetime. In that case - # we just need to parse it into one. If it is an actual datetime - # we want to just grab it as a UTC naive datetime. - image_updated_at = image_meta['updated_at'] - if isinstance(image_updated_at, six.string_types): - image_updated_at = timeutils.parse_strtime(image_updated_at) - else: - image_updated_at = image_updated_at.astimezone(timezone('UTC')) - - cache_entry = self.db.image_volume_cache_create( - context, - volume_ref.host, - volume_ref.cluster_name, - image_id, - image_updated_at.replace(tzinfo=None), - volume_ref.id, - volume_ref.size - ) - - LOG.debug('New image-volume cache entry created: %(entry)s.', - {'entry': self._entry_to_str(cache_entry)}) - return cache_entry - - def ensure_space(self, context, volume): - """Makes room for a volume cache entry. - - Returns True if successful, false otherwise. - """ - - # Check to see if the cache is actually limited. - if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0: - return True - - # Make sure that we can potentially fit the image in the cache - # and bail out before evicting everything else to try and make - # room for it. - if (self.max_cache_size_gb != 0 and - volume.size > self.max_cache_size_gb): - return False - - # Assume the entries are ordered by most recently used to least used. - entries = self.db.image_volume_cache_get_all( - context, - **self._get_query_filters(volume)) - - current_count = len(entries) - - current_size = 0 - for entry in entries: - current_size += entry['size'] - - # Add values for the entry we intend to create. - current_size += volume.size - current_count += 1 - - LOG.debug('Image-volume cache for %(service)s current_size (GB) = ' - '%(size_gb)s (max = %(max_gb)s), current count = %(count)s ' - '(max = %(max_count)s).', - {'service': volume.service_topic_queue, - 'size_gb': current_size, - 'max_gb': self.max_cache_size_gb, - 'count': current_count, - 'max_count': self.max_cache_size_count}) - - while ((current_size > self.max_cache_size_gb - or current_count > self.max_cache_size_count) - and len(entries)): - entry = entries.pop() - LOG.debug('Reclaiming image-volume cache space; removing cache ' - 'entry %(entry)s.', {'entry': self._entry_to_str(entry)}) - self._delete_image_volume(context, entry) - current_size -= entry['size'] - current_count -= 1 - LOG.debug('Image-volume cache for %(service)s new size (GB) = ' - '%(size_gb)s, new count = %(count)s.', - {'service': volume.service_topic_queue, - 'size_gb': current_size, - 'count': current_count}) - - # It is only possible to not free up enough gb, we will always be able - # to free enough count. This is because 0 means unlimited which means - # it is guaranteed to be >0 if limited, and we can always delete down - # to 0. - if self.max_cache_size_gb > 0: - if current_size > self.max_cache_size_gb > 0: - LOG.warning('Image-volume cache for %(service)s does ' - 'not have enough space (GB).', - {'service': volume.service_topic_queue}) - return False - - return True - - @utils.if_notifications_enabled - def _notify_cache_hit(self, context, image_id, host): - self._notify_cache_action(context, image_id, host, 'hit') - - @utils.if_notifications_enabled - def _notify_cache_miss(self, context, image_id, host): - self._notify_cache_action(context, image_id, host, 'miss') - - @utils.if_notifications_enabled - def _notify_cache_eviction(self, context, image_id, host): - self._notify_cache_action(context, image_id, host, 'evict') - - @utils.if_notifications_enabled - def _notify_cache_action(self, context, image_id, host, action): - data = { - 'image_id': image_id, - 'host': host, - } - LOG.debug('ImageVolumeCache notification: action=%(action)s' - ' data=%(data)s.', {'action': action, 'data': data}) - self.notifier.info(context, 'image_volume_cache.%s' % action, data) - - def _delete_image_volume(self, context, cache_entry): - """Delete a volume and remove cache entry.""" - volume = objects.Volume.get_by_id(context, cache_entry['volume_id']) - - # Delete will evict the cache entry. - self.volume_api.delete(context, volume) - - def _should_update_entry(self, cache_entry, image_meta): - """Ensure that the cache entry image data is still valid.""" - image_updated_utc = (image_meta['updated_at'] - .astimezone(timezone('UTC'))) - cache_updated_utc = (cache_entry['image_updated_at'] - .replace(tzinfo=timezone('UTC'))) - - LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, ' - 'requested image updated_at = %(image_utc)s.', - {'entry_utc': six.text_type(cache_updated_utc), - 'image_utc': six.text_type(image_updated_utc)}) - - return image_updated_utc != cache_updated_utc - - def _entry_to_str(self, cache_entry): - return six.text_type({ - 'id': cache_entry['id'], - 'image_id': cache_entry['image_id'], - 'volume_id': cache_entry['volume_id'], - 'host': cache_entry['host'], - 'size': cache_entry['size'], - 'image_updated_at': cache_entry['image_updated_at'], - 'last_used': cache_entry['last_used'], - }) diff --git a/cinder/image/glance.py b/cinder/image/glance.py deleted file mode 100644 index ee4b2cc2a..000000000 --- a/cinder/image/glance.py +++ /dev/null @@ -1,645 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# Copyright 2013 NTT corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of an image service that uses Glance as the backend""" - - -from __future__ import absolute_import - -import copy -import itertools -import random -import shutil -import sys -import time - -import glanceclient.exc -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import six -from six.moves import range -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ - - -glance_opts = [ - cfg.ListOpt('allowed_direct_url_schemes', - default=[], - help='A list of url schemes that can be downloaded directly ' - 'via the direct_url. Currently supported schemes: ' - '[file, cinder].'), - cfg.StrOpt('glance_catalog_info', - default='image:glance:publicURL', - help='Info to match when looking for glance in the service ' - 'catalog. Format is: separated values of the form: ' - ':: - ' - 'Only used if glance_api_servers are not provided.'), -] -glance_core_properties_opts = [ - cfg.ListOpt('glance_core_properties', - default=['checksum', 'container_format', - 'disk_format', 'image_name', 'image_id', - 'min_disk', 'min_ram', 'name', 'size'], - help='Default core properties of image') -] -CONF = cfg.CONF -CONF.register_opts(glance_opts) -CONF.register_opts(glance_core_properties_opts) -CONF.import_opt('glance_api_version', 'cinder.common.config') - -LOG = logging.getLogger(__name__) - - -def _parse_image_ref(image_href): - """Parse an image href into composite parts. - - :param image_href: href of an image - :returns: a tuple of the form (image_id, netloc, use_ssl) - :raises ValueError: - - """ - url = urllib.parse.urlparse(image_href) - netloc = url.netloc - image_id = url.path.split('/')[-1] - use_ssl = (url.scheme == 'https') - return (image_id, netloc, use_ssl) - - -def _create_glance_client(context, netloc, use_ssl, version=None): - """Instantiate a new glanceclient.Client object.""" - if version is None: - version = CONF.glance_api_version - params = {} - if use_ssl: - scheme = 'https' - # https specific params - params['insecure'] = CONF.glance_api_insecure - params['ssl_compression'] = CONF.glance_api_ssl_compression - params['cacert'] = CONF.glance_ca_certificates_file - else: - scheme = 'http' - if CONF.auth_strategy == 'keystone': - params['token'] = context.auth_token - if CONF.glance_request_timeout is not None: - params['timeout'] = CONF.glance_request_timeout - endpoint = '%s://%s' % (scheme, netloc) - params['global_request_id'] = context.global_id - return glanceclient.Client(str(version), endpoint, **params) - - -def get_api_servers(context): - """Return Iterable over shuffled api servers. - - Shuffle a list of glance_api_servers and return an iterator - that will cycle through the list, looping around to the beginning - if necessary. If CONF.glance_api_servers is None then they will - be retrieved from the catalog. - """ - api_servers = [] - api_servers_info = [] - - if CONF.glance_api_servers is None: - info = CONF.glance_catalog_info - try: - service_type, service_name, endpoint_type = info.split(':') - except ValueError: - raise exception.InvalidConfigurationValue(_( - "Failed to parse the configuration option " - "'glance_catalog_info', must be in the form " - "::")) - for entry in context.service_catalog: - if entry.get('type') == service_type: - api_servers.append( - entry.get('endpoints')[0].get(endpoint_type)) - else: - for api_server in CONF.glance_api_servers: - api_servers.append(api_server) - - for api_server in api_servers: - if '//' not in api_server: - api_server = 'http://' + api_server - url = urllib.parse.urlparse(api_server) - netloc = url.netloc + url.path - use_ssl = (url.scheme == 'https') - api_servers_info.append((netloc, use_ssl)) - - random.shuffle(api_servers_info) - return itertools.cycle(api_servers_info) - - -class GlanceClientWrapper(object): - """Glance client wrapper class that implements retries.""" - - def __init__(self, context=None, netloc=None, use_ssl=False, - version=None): - if netloc is not None: - self.client = self._create_static_client(context, - netloc, - use_ssl, version) - else: - self.client = None - self.api_servers = None - self.version = version - - def _create_static_client(self, context, netloc, use_ssl, version): - """Create a client that we'll use for every call.""" - self.netloc = netloc - self.use_ssl = use_ssl - self.version = version - return _create_glance_client(context, - self.netloc, - self.use_ssl, self.version) - - def _create_onetime_client(self, context, version): - """Create a client that will be used for one call.""" - if self.api_servers is None: - self.api_servers = get_api_servers(context) - self.netloc, self.use_ssl = next(self.api_servers) - return _create_glance_client(context, - self.netloc, - self.use_ssl, version) - - def call(self, context, method, *args, **kwargs): - """Call a glance client method. - - If we get a connection error, - retry the request according to CONF.glance_num_retries. - """ - version = kwargs.pop('version', self.version) - - retry_excs = (glanceclient.exc.ServiceUnavailable, - glanceclient.exc.InvalidEndpoint, - glanceclient.exc.CommunicationError) - num_attempts = 1 + CONF.glance_num_retries - - for attempt in range(1, num_attempts + 1): - client = self.client or self._create_onetime_client(context, - version) - try: - controller = getattr(client, - kwargs.pop('controller', 'images')) - return getattr(controller, method)(*args, **kwargs) - except retry_excs as e: - netloc = self.netloc - extra = "retrying" - error_msg = _("Error contacting glance server " - "'%(netloc)s' for '%(method)s', " - "%(extra)s.") - if attempt == num_attempts: - extra = 'done trying' - LOG.exception(error_msg, {'netloc': netloc, - 'method': method, - 'extra': extra}) - raise exception.GlanceConnectionFailed(reason=e) - - LOG.exception(error_msg, {'netloc': netloc, - 'method': method, - 'extra': extra}) - time.sleep(1) - except glanceclient.exc.HTTPOverLimit as e: - raise exception.ImageLimitExceeded(e) - - -class GlanceImageService(object): - """Provides storage and retrieval of disk image objects within Glance.""" - - def __init__(self, client=None): - self._client = client or GlanceClientWrapper() - self._image_schema = None - self.temp_images = None - - def detail(self, context, **kwargs): - """Calls out to Glance for a list of detailed image information.""" - params = self._extract_query_params(kwargs) - try: - images = self._client.call(context, 'list', **params) - except Exception: - _reraise_translated_exception() - - _images = [] - for image in images: - if self._is_image_available(context, image): - _images.append(self._translate_from_glance(context, image)) - - return _images - - def _extract_query_params(self, params): - _params = {} - accepted_params = ('filters', 'marker', 'limit', - 'sort_key', 'sort_dir') - for param in accepted_params: - if param in params: - _params[param] = params.get(param) - - # NOTE(geguileo): We set is_public default value for v1 because we want - # to retrieve all images by default. We don't need to send v2 - # equivalent - "visible" - because its default value when omitted is - # "public, private, shared", which will return all. - if CONF.glance_api_version <= 1: - # ensure filters is a dict - _params.setdefault('filters', {}) - # NOTE(vish): don't filter out private images - _params['filters'].setdefault('is_public', 'none') - - return _params - - def show(self, context, image_id): - """Returns a dict with image data for the given opaque image id.""" - try: - image = self._client.call(context, 'get', image_id) - except Exception: - _reraise_translated_image_exception(image_id) - - if not self._is_image_available(context, image): - raise exception.ImageNotFound(image_id=image_id) - - base_image_meta = self._translate_from_glance(context, image) - return base_image_meta - - def get_location(self, context, image_id): - """Get backend storage location url. - - Returns a tuple containing the direct url and locations representing - the backend storage location, or (None, None) if these attributes are - not shown by Glance. - """ - if CONF.glance_api_version == 1: - # image location not available in v1 - return (None, None) - try: - # direct_url is returned by v2 api - client = GlanceClientWrapper(version=2) - image_meta = client.call(context, 'get', image_id) - except Exception: - _reraise_translated_image_exception(image_id) - - if not self._is_image_available(context, image_meta): - raise exception.ImageNotFound(image_id=image_id) - - # some glance stores like nfs only meta data - # is stored and returned as locations. - # so composite of two needs to be returned. - return (getattr(image_meta, 'direct_url', None), - getattr(image_meta, 'locations', None)) - - def add_location(self, context, image_id, url, metadata): - """Add a backend location url to an image. - - Returns a dict containing image metadata on success. - """ - if CONF.glance_api_version != 2: - raise exception.Invalid("Image API version 2 is disabled.") - client = GlanceClientWrapper(version=2) - try: - return client.call(context, 'add_location', - image_id, url, metadata) - except Exception: - _reraise_translated_image_exception(image_id) - - def download(self, context, image_id, data=None): - """Calls out to Glance for data and writes data.""" - if data and 'file' in CONF.allowed_direct_url_schemes: - direct_url, locations = self.get_location(context, image_id) - urls = [direct_url] + [loc.get('url') for loc in locations or []] - for url in urls: - if url is None: - continue - parsed_url = urllib.parse.urlparse(url) - if parsed_url.scheme == "file": - # a system call to cp could have significant performance - # advantages, however we do not have the path to files at - # this point in the abstraction. - with open(parsed_url.path, "r") as f: - shutil.copyfileobj(f, data) - return - - try: - image_chunks = self._client.call(context, 'data', image_id) - except Exception: - _reraise_translated_image_exception(image_id) - - if not data: - return image_chunks - else: - for chunk in image_chunks: - data.write(chunk) - - def create(self, context, image_meta, data=None): - """Store the image data and return the new image object.""" - sent_service_image_meta = self._translate_to_glance(image_meta) - - if data: - sent_service_image_meta['data'] = data - - recv_service_image_meta = self._client.call(context, 'create', - **sent_service_image_meta) - - return self._translate_from_glance(context, recv_service_image_meta) - - def update(self, context, image_id, - image_meta, data=None, purge_props=True): - """Modify the given image with the new data.""" - # For v2, _translate_to_glance stores custom properties in image meta - # directly. We need the custom properties to identify properties to - # remove if purge_props is True. Save the custom properties before - # translate. - if CONF.glance_api_version > 1 and purge_props: - props_to_update = image_meta.get('properties', {}).keys() - - image_meta = self._translate_to_glance(image_meta) - # NOTE(dosaboy): see comment in bug 1210467 - if CONF.glance_api_version == 1: - image_meta['purge_props'] = purge_props - # NOTE(bcwaldon): id is not an editable field, but it is likely to be - # passed in by calling code. Let's be nice and ignore it. - image_meta.pop('id', None) - try: - # NOTE(dosaboy): the v2 api separates update from upload - if CONF.glance_api_version > 1: - if data: - self._client.call(context, 'upload', image_id, data) - if image_meta: - if purge_props: - # Properties to remove are those not specified in - # input properties. - cur_image_meta = self.show(context, image_id) - cur_props = cur_image_meta['properties'].keys() - remove_props = list(set(cur_props) - - set(props_to_update)) - image_meta['remove_props'] = remove_props - image_meta = self._client.call(context, 'update', image_id, - **image_meta) - else: - image_meta = self._client.call(context, 'get', image_id) - else: - if data: - image_meta['data'] = data - image_meta = self._client.call(context, 'update', image_id, - **image_meta) - except Exception: - _reraise_translated_image_exception(image_id) - else: - return self._translate_from_glance(context, image_meta) - - def delete(self, context, image_id): - """Delete the given image. - - :raises ImageNotFound: if the image does not exist. - :raises NotAuthorized: if the user is not an owner. - - """ - try: - self._client.call(context, 'delete', image_id) - except glanceclient.exc.NotFound: - raise exception.ImageNotFound(image_id=image_id) - return True - - def _translate_from_glance(self, context, image): - """Get image metadata from glance image. - - Extract metadata from image and convert it's properties - to type cinder expected. - - :param image: glance image object - :return: image metadata dictionary - """ - if CONF.glance_api_version == 2: - if self._image_schema is None: - self._image_schema = self._client.call(context, 'get', - controller='schemas', - schema_name='image', - version=2) - # NOTE(aarefiev): get base image property, store image 'schema' - # is redundant, so ignore it. - image_meta = {key: getattr(image, key) - for key in image.keys() - if self._image_schema.is_base_property(key) is True - and key != 'schema'} - - # NOTE(aarefiev): nova is expected that all image properties - # (custom or defined in schema-image.json) stores in - # 'properties' key. - image_meta['properties'] = { - key: getattr(image, key) for key in image.keys() - if self._image_schema.is_base_property(key) is False} - else: - image_meta = _extract_attributes(image) - - image_meta = _convert_timestamps_to_datetimes(image_meta) - image_meta = _convert_from_string(image_meta) - return image_meta - - @staticmethod - def _translate_to_glance(image_meta): - image_meta = _convert_to_string(image_meta) - image_meta = _remove_read_only(image_meta) - - # NOTE(tsekiyama): From the Image API v2, custom properties must - # be stored in image_meta directly, instead of the 'properties' key. - if CONF.glance_api_version >= 2: - properties = image_meta.get('properties') - if properties: - image_meta.update(properties) - del image_meta['properties'] - - return image_meta - - @staticmethod - def _is_image_available(context, image): - """Check image availability. - - This check is needed in case Nova and Glance are deployed - without authentication turned on. - """ - # The presence of an auth token implies this is an authenticated - # request and we need not handle the noauth use-case. - if hasattr(context, 'auth_token') and context.auth_token: - return True - - if image.is_public or context.is_admin: - return True - - properties = image.properties - - if context.project_id and ('owner_id' in properties): - return str(properties['owner_id']) == str(context.project_id) - - if context.project_id and ('project_id' in properties): - return str(properties['project_id']) == str(context.project_id) - - try: - user_id = properties['user_id'] - except KeyError: - return False - - return str(user_id) == str(context.user_id) - - -def _convert_timestamps_to_datetimes(image_meta): - """Returns image with timestamp fields converted to datetime objects.""" - for attr in ['created_at', 'updated_at', 'deleted_at']: - if image_meta.get(attr): - image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) - return image_meta - - -# NOTE(bcwaldon): used to store non-string data in glance metadata -def _json_loads(properties, attr): - prop = properties[attr] - if isinstance(prop, six.string_types): - properties[attr] = jsonutils.loads(prop) - - -def _json_dumps(properties, attr): - prop = properties[attr] - if not isinstance(prop, six.string_types): - properties[attr] = jsonutils.dumps(prop) - - -_CONVERT_PROPS = ('block_device_mapping', 'mappings') - - -def _convert(method, metadata): - metadata = copy.deepcopy(metadata) - properties = metadata.get('properties') - if properties: - for attr in _CONVERT_PROPS: - if attr in properties: - method(properties, attr) - - return metadata - - -def _convert_from_string(metadata): - return _convert(_json_loads, metadata) - - -def _convert_to_string(metadata): - return _convert(_json_dumps, metadata) - - -def _extract_attributes(image): - # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform - # a get(), resulting in a useless request back to glance. This list is - # therefore sorted, with dependent attributes as the end - # 'deleted_at' depends on 'deleted' - # 'checksum' depends on 'status' == 'active' - IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', - 'container_format', 'status', 'id', - 'name', 'created_at', 'updated_at', - 'deleted', 'deleted_at', 'checksum', - 'min_disk', 'min_ram', 'protected'] - if CONF.glance_api_version == 2: - IMAGE_ATTRIBUTES.append('visibility') - else: - IMAGE_ATTRIBUTES.append('is_public') - - output = {} - - for attr in IMAGE_ATTRIBUTES: - if attr == 'deleted_at' and not output['deleted']: - output[attr] = None - elif attr == 'checksum' and output['status'] != 'active': - output[attr] = None - else: - output[attr] = getattr(image, attr, None) - - output['properties'] = getattr(image, 'properties', {}) - - return output - - -def _remove_read_only(image_meta): - IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] - output = copy.deepcopy(image_meta) - for attr in IMAGE_ATTRIBUTES: - if attr in output: - del output[attr] - return output - - -def _reraise_translated_image_exception(image_id): - """Transform the exception for the image but keep its traceback intact.""" - _exc_type, exc_value, exc_trace = sys.exc_info() - new_exc = _translate_image_exception(image_id, exc_value) - six.reraise(type(new_exc), new_exc, exc_trace) - - -def _reraise_translated_exception(): - """Transform the exception but keep its traceback intact.""" - _exc_type, exc_value, exc_trace = sys.exc_info() - new_exc = _translate_plain_exception(exc_value) - six.reraise(type(new_exc), new_exc, exc_trace) - - -def _translate_image_exception(image_id, exc_value): - if isinstance(exc_value, (glanceclient.exc.Forbidden, - glanceclient.exc.Unauthorized)): - return exception.ImageNotAuthorized(image_id=image_id) - if isinstance(exc_value, glanceclient.exc.NotFound): - return exception.ImageNotFound(image_id=image_id) - if isinstance(exc_value, glanceclient.exc.BadRequest): - return exception.Invalid(exc_value) - return exc_value - - -def _translate_plain_exception(exc_value): - if isinstance(exc_value, (glanceclient.exc.Forbidden, - glanceclient.exc.Unauthorized)): - return exception.NotAuthorized(exc_value) - if isinstance(exc_value, glanceclient.exc.NotFound): - return exception.NotFound(exc_value) - if isinstance(exc_value, glanceclient.exc.BadRequest): - return exception.Invalid(exc_value) - return exc_value - - -def get_remote_image_service(context, image_href): - """Create an image_service and parse the id from the given image_href. - - The image_href param can be an href of the form - 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', - or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the - image_href is a standalone id, then the default image service is returned. - - :param image_href: href that describes the location of an image - :returns: a tuple of the form (image_service, image_id) - - """ - # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a - # standalone image ID - if '/' not in str(image_href): - image_service = get_default_image_service() - return image_service, image_href - - try: - (image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href) - glance_client = GlanceClientWrapper(context=context, - netloc=glance_netloc, - use_ssl=use_ssl) - except ValueError: - raise exception.InvalidImageRef(image_href=image_href) - - image_service = GlanceImageService(client=glance_client) - return image_service, image_id - - -def get_default_image_service(): - return GlanceImageService() diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py deleted file mode 100644 index ca393811f..000000000 --- a/cinder/image/image_utils.py +++ /dev/null @@ -1,678 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods to deal with images. - -This is essentially a copy from nova.virt.images.py -Some slight modifications, but at some point -we should look at maybe pushing this up to Oslo -""" - - -import contextlib -import errno -import math -import os -import re -import tempfile - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import imageutils -from oslo_utils import timeutils -from oslo_utils import units -import psutil - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import throttling -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -image_helper_opts = [cfg.StrOpt('image_conversion_dir', - default='$state_path/conversion', - help='Directory used for temporary storage ' - 'during image conversion'), ] - -CONF = cfg.CONF -CONF.register_opts(image_helper_opts) - -QEMU_IMG_LIMITS = processutils.ProcessLimits( - cpu_time=8, - address_space=1 * units.Gi) - -# NOTE(abhishekk): qemu-img convert command supports raw, qcow2, qed, -# vdi, vmdk, vhd and vhdx disk-formats but glance doesn't support qed -# disk-format. -# Ref: http://docs.openstack.org/image-guide/convert-images.html -VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2', - 'vhd', 'vhdx', 'parallels') - - -def validate_disk_format(disk_format): - return disk_format in VALID_DISK_FORMATS - - -def qemu_img_info(path, run_as_root=True): - """Return an object containing the parsed output from qemu-img info.""" - cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', path] - - if os.name == 'nt': - cmd = cmd[2:] - out, _err = utils.execute(*cmd, run_as_root=run_as_root, - prlimit=QEMU_IMG_LIMITS) - info = imageutils.QemuImgInfo(out) - - # From Cinder's point of view, any 'luks' formatted images - # should be treated as 'raw'. - if info.file_format == 'luks': - info.file_format = 'raw' - - return info - - -def get_qemu_img_version(): - info = utils.execute('qemu-img', '--version', check_exit_code=False)[0] - pattern = r"qemu-img version ([0-9\.]*)" - version = re.match(pattern, info) - if not version: - LOG.warning("qemu-img is not installed.") - return None - return _get_version_from_string(version.groups()[0]) - - -def _get_version_from_string(version_string): - return [int(x) for x in version_string.split('.')] - - -def check_qemu_img_version(minimum_version): - qemu_version = get_qemu_img_version() - if (qemu_version is None - or qemu_version < _get_version_from_string(minimum_version)): - if qemu_version: - current_version = '.'.join((str(element) - for element in qemu_version)) - else: - current_version = None - - _msg = _('qemu-img %(minimum_version)s or later is required by ' - 'this volume driver. Current qemu-img version: ' - '%(current_version)s') % {'minimum_version': minimum_version, - 'current_version': current_version} - raise exception.VolumeBackendAPIException(data=_msg) - - -def _convert_image(prefix, source, dest, out_format, - src_format=None, run_as_root=True): - """Convert image to other format.""" - - cmd = prefix + ('qemu-img', 'convert', - '-O', out_format, source, dest) - - # Check whether O_DIRECT is supported and set '-t none' if it is - # This is needed to ensure that all data hit the device before - # it gets unmapped remotely from the host for some backends - # Reference Bug: #1363016 - - # NOTE(jdg): In the case of file devices qemu does the - # flush properly and more efficiently than would be done - # setting O_DIRECT, so check for that and skip the - # setting for non BLK devs - if (utils.is_blk_device(dest) and - volume_utils.check_for_odirect_support(source, - dest, - 'oflag=direct')): - cmd = prefix + ('qemu-img', 'convert', - '-t', 'none') - - # AMI images can be raw or qcow2 but qemu-img doesn't accept "ami" as - # an image format, so we use automatic detection. - # TODO(geguileo): This fixes unencrypted AMI image case, but we need to - # fix the encrypted case. - if (src_format or '').lower() not in ('', 'ami'): - cmd += ('-f', src_format) # prevent detection of format - - cmd += ('-O', out_format, source, dest) - - start_time = timeutils.utcnow() - utils.execute(*cmd, run_as_root=run_as_root) - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - - # NOTE(jdg): use a default of 1, mostly for unit test, but in - # some incredible event this is 0 (cirros image?) don't barf - if duration < 1: - duration = 1 - try: - image_size = qemu_img_info(source, - run_as_root=run_as_root).virtual_size - except ValueError as e: - msg = ("The image was successfully converted, but image size " - "is unavailable. src %(src)s, dest %(dest)s. %(error)s") - LOG.info(msg, {"src": source, - "dest": dest, - "error": e}) - return - - fsz_mb = image_size / units.Mi - mbps = (fsz_mb / duration) - msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, " - "duration %(duration).2f sec, destination %(dest)s") - LOG.debug(msg, {"src": source, - "sz": fsz_mb, - "duration": duration, - "dest": dest}) - - msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s" - LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) - - -def convert_image(source, dest, out_format, src_format=None, - run_as_root=True, throttle=None): - if not throttle: - throttle = throttling.Throttle.get_default() - with throttle.subcommand(source, dest) as throttle_cmd: - _convert_image(tuple(throttle_cmd['prefix']), - source, dest, - out_format, - src_format=src_format, - run_as_root=run_as_root) - - -def resize_image(source, size, run_as_root=False): - """Changes the virtual size of the image.""" - cmd = ('qemu-img', 'resize', source, '%sG' % size) - utils.execute(*cmd, run_as_root=run_as_root) - - -def fetch(context, image_service, image_id, path, _user_id, _project_id): - # TODO(vish): Improve context handling and add owner and auth data - # when it is added to glance. Right now there is no - # auth checking in glance, so we assume that access was - # checked before we got here. - start_time = timeutils.utcnow() - with fileutils.remove_path_on_error(path): - with open(path, "wb") as image_file: - try: - image_service.download(context, image_id, image_file) - except IOError as e: - if e.errno == errno.ENOSPC: - params = {'path': os.path.dirname(path), - 'image': image_id} - reason = _("No space left in image_conversion_dir " - "path (%(path)s) while fetching " - "image %(image)s.") % params - LOG.exception(reason) - raise exception.ImageTooBig(image_id=image_id, - reason=reason) - - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - - # NOTE(jdg): use a default of 1, mostly for unit test, but in - # some incredible event this is 0 (cirros image?) don't barf - if duration < 1: - duration = 1 - fsz_mb = os.stat(image_file.name).st_size / units.Mi - mbps = (fsz_mb / duration) - msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " - "duration %(duration).2f sec") - LOG.debug(msg, {"dest": image_file.name, - "sz": fsz_mb, - "duration": duration}) - msg = "Image download %(sz).2f MB at %(mbps).2f MB/s" - LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) - - -def get_qemu_data(image_id, has_meta, disk_format_raw, dest, run_as_root): - # We may be on a system that doesn't have qemu-img installed. That - # is ok if we are working with a RAW image. This logic checks to see - # if qemu-img is installed. If not we make sure the image is RAW and - # throw an exception if not. Otherwise we stop before needing - # qemu-img. Systems with qemu-img will always progress through the - # whole function. - try: - # Use the empty tmp file to make sure qemu_img_info works. - data = qemu_img_info(dest, run_as_root=run_as_root) - # There are a lot of cases that can cause a process execution - # error, but until we do more work to separate out the various - # cases we'll keep the general catch here - except processutils.ProcessExecutionError: - data = None - if has_meta: - if not disk_format_raw: - raise exception.ImageUnacceptable( - reason=_("qemu-img is not installed and image is of " - "type %s. Only RAW images can be used if " - "qemu-img is not installed.") % - disk_format_raw, - image_id=image_id) - else: - raise exception.ImageUnacceptable( - reason=_("qemu-img is not installed and the disk " - "format is not specified. Only RAW images " - "can be used if qemu-img is not installed."), - image_id=image_id) - return data - - -def fetch_verify_image(context, image_service, image_id, dest, - user_id=None, project_id=None, size=None, - run_as_root=True): - fetch(context, image_service, image_id, dest, - None, None) - image_meta = image_service.show(context, image_id) - - with fileutils.remove_path_on_error(dest): - has_meta = False if not image_meta else True - try: - format_raw = True if image_meta['disk_format'] == 'raw' else False - except TypeError: - format_raw = False - data = get_qemu_data(image_id, has_meta, format_raw, - dest, run_as_root) - # We can only really do verification of the image if we have - # qemu data to use - if data is not None: - fmt = data.file_format - if fmt is None: - raise exception.ImageUnacceptable( - reason=_("'qemu-img info' parsing failed."), - image_id=image_id) - - backing_file = data.backing_file - if backing_file is not None: - raise exception.ImageUnacceptable( - image_id=image_id, - reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % - {'fmt': fmt, 'backing_file': backing_file})) - - # NOTE(xqueralt): If the image virtual size doesn't fit in the - # requested volume there is no point on resizing it because it will - # generate an unusable image. - if size is not None and data.virtual_size > size: - params = {'image_size': data.virtual_size, 'volume_size': size} - reason = _("Size is %(image_size)dGB and doesn't fit in a " - "volume of size %(volume_size)dGB.") % params - raise exception.ImageUnacceptable(image_id=image_id, - reason=reason) - - -def fetch_to_vhd(context, image_service, - image_id, dest, blocksize, - user_id=None, project_id=None, run_as_root=True): - fetch_to_volume_format(context, image_service, image_id, dest, 'vpc', - blocksize, user_id, project_id, - run_as_root=run_as_root) - - -def fetch_to_raw(context, image_service, - image_id, dest, blocksize, - user_id=None, project_id=None, size=None, run_as_root=True): - fetch_to_volume_format(context, image_service, image_id, dest, 'raw', - blocksize, user_id, project_id, size, - run_as_root=run_as_root) - - -def fetch_to_volume_format(context, image_service, - image_id, dest, volume_format, blocksize, - user_id=None, project_id=None, size=None, - run_as_root=True): - qemu_img = True - image_meta = image_service.show(context, image_id) - - # NOTE(avishay): I'm not crazy about creating temp files which may be - # large and cause disk full errors which would confuse users. - # Unfortunately it seems that you can't pipe to 'qemu-img convert' because - # it seeks. Maybe we can think of something for a future version. - with temporary_file() as tmp: - has_meta = False if not image_meta else True - try: - format_raw = True if image_meta['disk_format'] == 'raw' else False - except TypeError: - format_raw = False - data = get_qemu_data(image_id, has_meta, format_raw, - tmp, run_as_root) - if data is None: - qemu_img = False - - tmp_images = TemporaryImages.for_image_service(image_service) - tmp_image = tmp_images.get(context, image_id) - if tmp_image: - tmp = tmp_image - else: - fetch(context, image_service, image_id, tmp, user_id, project_id) - - if is_xenserver_format(image_meta): - replace_xenserver_image_with_coalesced_vhd(tmp) - - if not qemu_img: - # qemu-img is not installed but we do have a RAW image. As a - # result we only need to copy the image to the destination and then - # return. - LOG.debug('Copying image from %(tmp)s to volume %(dest)s - ' - 'size: %(size)s', {'tmp': tmp, 'dest': dest, - 'size': image_meta['size']}) - image_size_m = math.ceil(float(image_meta['size']) / units.Mi) - volume_utils.copy_volume(tmp, dest, image_size_m, blocksize) - return - - data = qemu_img_info(tmp, run_as_root=run_as_root) - virt_size = int(math.ceil(float(data.virtual_size) / units.Gi)) - - # NOTE(xqueralt): If the image virtual size doesn't fit in the - # requested volume there is no point on resizing it because it will - # generate an unusable image. - if size is not None and virt_size > size: - params = {'image_size': virt_size, 'volume_size': size} - reason = _("Size is %(image_size)dGB and doesn't fit in a " - "volume of size %(volume_size)dGB.") % params - raise exception.ImageUnacceptable(image_id=image_id, reason=reason) - - fmt = data.file_format - if fmt is None: - raise exception.ImageUnacceptable( - reason=_("'qemu-img info' parsing failed."), - image_id=image_id) - - backing_file = data.backing_file - if backing_file is not None: - raise exception.ImageUnacceptable( - image_id=image_id, - reason=_("fmt=%(fmt)s backed by:%(backing_file)s") - % {'fmt': fmt, 'backing_file': backing_file, }) - - # NOTE(e0ne): check for free space in destination directory before - # image convertion. - check_available_space(dest, virt_size, image_id) - - # NOTE(jdg): I'm using qemu-img convert to write - # to the volume regardless if it *needs* conversion or not - # TODO(avishay): We can speed this up by checking if the image is raw - # and if so, writing directly to the device. However, we need to keep - # check via 'qemu-img info' that what we copied was in fact a raw - # image and not a different format with a backing file, which may be - # malicious. - LOG.debug("%s was %s, converting to %s ", image_id, fmt, volume_format) - if image_meta['disk_format'] == 'vhd': - # qemu-img still uses the legacy 'vpc' name for vhd format. - disk_format = 'vpc' - else: - disk_format = image_meta['disk_format'] - convert_image(tmp, dest, volume_format, - src_format=disk_format, - run_as_root=run_as_root) - - -def _validate_file_format(image_data, expected_format): - if image_data.file_format == expected_format: - return True - elif image_data.file_format == 'vpc' and expected_format == 'vhd': - # qemu-img still uses the legacy 'vpc' name for the vhd format. - return True - return False - - -def upload_volume(context, image_service, image_meta, volume_path, - volume_format='raw', run_as_root=True): - image_id = image_meta['id'] - if (image_meta['disk_format'] == volume_format): - LOG.debug("%s was %s, no need to convert to %s", - image_id, volume_format, image_meta['disk_format']) - if os.name == 'nt' or os.access(volume_path, os.R_OK): - with open(volume_path, 'rb') as image_file: - image_service.update(context, image_id, {}, image_file) - else: - with utils.temporary_chown(volume_path): - with open(volume_path, 'rb') as image_file: - image_service.update(context, image_id, {}, image_file) - return - - with temporary_file() as tmp: - LOG.debug("%s was %s, converting to %s", - image_id, volume_format, image_meta['disk_format']) - - data = qemu_img_info(volume_path, run_as_root=run_as_root) - backing_file = data.backing_file - fmt = data.file_format - if backing_file is not None: - # Disallow backing files as a security measure. - # This prevents a user from writing an image header into a raw - # volume with a backing file pointing to data they wish to - # access. - raise exception.ImageUnacceptable( - image_id=image_id, - reason=_("fmt=%(fmt)s backed by:%(backing_file)s") - % {'fmt': fmt, 'backing_file': backing_file}) - - out_format = image_meta['disk_format'] - # qemu-img accepts 'vpc' as argument for vhd format - if out_format == 'vhd': - out_format = 'vpc' - - convert_image(volume_path, tmp, out_format, - run_as_root=run_as_root) - - data = qemu_img_info(tmp, run_as_root=run_as_root) - if data.file_format != out_format: - raise exception.ImageUnacceptable( - image_id=image_id, - reason=_("Converted to %(f1)s, but format is now %(f2)s") % - {'f1': out_format, 'f2': data.file_format}) - - with open(tmp, 'rb') as image_file: - image_service.update(context, image_id, {}, image_file) - - -def check_virtual_size(virtual_size, volume_size, image_id): - virtual_size = int(math.ceil(float(virtual_size) / units.Gi)) - - if virtual_size > volume_size: - params = {'image_size': virtual_size, - 'volume_size': volume_size} - reason = _("Image virtual size is %(image_size)dGB" - " and doesn't fit in a volume of size" - " %(volume_size)dGB.") % params - raise exception.ImageUnacceptable(image_id=image_id, - reason=reason) - return virtual_size - - -def check_available_space(dest, image_size, image_id): - # TODO(e0ne): replace psutil with shutil.disk_usage when we drop - # Python 2.7 support. - if not os.path.isdir(dest): - dest = os.path.dirname(dest) - - free_space = psutil.disk_usage(dest).free - if free_space <= image_size: - msg = ('There is no space to convert image. ' - 'Requested: %(image_size)s, available: %(free_space)s' - ) % {'image_size': image_size, 'free_space': free_space} - raise exception.ImageTooBig(image_id=image_id, reason=msg) - - -def is_xenserver_format(image_meta): - return ( - image_meta['disk_format'] == 'vhd' - and image_meta['container_format'] == 'ovf' - ) - - -def set_vhd_parent(vhd_path, parentpath): - utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) - - -def extract_targz(archive_name, target): - utils.execute('tar', '-xzf', archive_name, '-C', target) - - -def fix_vhd_chain(vhd_chain): - for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): - set_vhd_parent(child, parent) - - -def get_vhd_size(vhd_path): - out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') - return int(out) - - -def resize_vhd(vhd_path, size, journal): - utils.execute( - 'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal) - - -def coalesce_vhd(vhd_path): - utils.execute( - 'vhd-util', 'coalesce', '-n', vhd_path) - - -def create_temporary_file(*args, **kwargs): - if (CONF.image_conversion_dir and not - os.path.exists(CONF.image_conversion_dir)): - os.makedirs(CONF.image_conversion_dir) - - fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs) - os.close(fd) - return tmp - - -def cleanup_temporary_file(backend_name): - temp_dir = CONF.image_conversion_dir - if (not temp_dir or not os.path.exists(temp_dir)): - LOG.debug("Configuration image_conversion_dir is None or the path " - "doesn't exist.") - return - try: - # TODO(wanghao): Consider using os.scandir for better performance in - # future when cinder only supports Python version 3.5+. - files = os.listdir(CONF.image_conversion_dir) - # NOTE(wanghao): For multi-backend case, if one backend was slow - # starting but another backend is up and doing an image conversion, - # init_host should only clean the tmp files which belongs to its - # backend. - for tmp_file in files: - if tmp_file.endswith(backend_name): - path = os.path.join(temp_dir, tmp_file) - os.remove(path) - except OSError as e: - LOG.warning("Exception caught while clearing temporary image " - "files: %s", e) - - -@contextlib.contextmanager -def temporary_file(*args, **kwargs): - tmp = None - try: - tmp = create_temporary_file(*args, **kwargs) - yield tmp - finally: - if tmp: - fileutils.delete_if_exists(tmp) - - -def temporary_dir(): - if (CONF.image_conversion_dir and not - os.path.exists(CONF.image_conversion_dir)): - os.makedirs(CONF.image_conversion_dir) - - return utils.tempdir(dir=CONF.image_conversion_dir) - - -def coalesce_chain(vhd_chain): - for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): - with temporary_dir() as directory_for_journal: - size = get_vhd_size(child) - journal_file = os.path.join( - directory_for_journal, 'vhd-util-resize-journal') - resize_vhd(parent, size, journal_file) - coalesce_vhd(child) - - return vhd_chain[-1] - - -def discover_vhd_chain(directory): - counter = 0 - chain = [] - - while True: - fpath = os.path.join(directory, '%d.vhd' % counter) - if os.path.exists(fpath): - chain.append(fpath) - else: - break - counter += 1 - - return chain - - -def replace_xenserver_image_with_coalesced_vhd(image_file): - with temporary_dir() as tempdir: - extract_targz(image_file, tempdir) - chain = discover_vhd_chain(tempdir) - fix_vhd_chain(chain) - coalesced = coalesce_chain(chain) - fileutils.delete_if_exists(image_file) - os.rename(coalesced, image_file) - - -class TemporaryImages(object): - """Manage temporarily downloaded images to avoid downloading it twice. - - In the 'with TemporaryImages.fetch(image_service, ctx, image_id) as tmp' - clause, 'tmp' can be used as the downloaded image path. In addition, - image_utils.fetch() will use the pre-fetched image by the TemporaryImages. - This is useful to inspect image contents before conversion. - """ - - def __init__(self, image_service): - self.temporary_images = {} - self.image_service = image_service - image_service.temp_images = self - - @staticmethod - def for_image_service(image_service): - instance = image_service.temp_images - if instance: - return instance - return TemporaryImages(image_service) - - @classmethod - @contextlib.contextmanager - def fetch(cls, image_service, context, image_id, suffix=''): - tmp_images = cls.for_image_service(image_service).temporary_images - with temporary_file(suffix=suffix) as tmp: - fetch_verify_image(context, image_service, image_id, tmp) - user = context.user_id - if not tmp_images.get(user): - tmp_images[user] = {} - tmp_images[user][image_id] = tmp - LOG.debug("Temporary image %(id)s is fetched for user %(user)s.", - {'id': image_id, 'user': user}) - yield tmp - del tmp_images[user][image_id] - LOG.debug("Temporary image %(id)s for user %(user)s is deleted.", - {'id': image_id, 'user': user}) - - def get(self, context, image_id): - user = context.user_id - if not self.temporary_images.get(user): - return None - return self.temporary_images[user].get(image_id) diff --git a/cinder/interface/__init__.py b/cinder/interface/__init__.py deleted file mode 100644 index 223cc4dec..000000000 --- a/cinder/interface/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -_volume_register = [] -_backup_register = [] -_fczm_register = [] - - -def volumedriver(cls): - """Decorator for concrete volume driver implementations.""" - _volume_register.append(cls) - return cls - - -def backupdriver(cls): - """Decorator for concrete backup driver implementations.""" - _backup_register.append(cls) - return cls - - -def fczmdriver(cls): - """Decorator for concrete fibre channel zone manager drivers.""" - _fczm_register.append(cls) - return cls diff --git a/cinder/interface/backup_chunked_driver.py b/cinder/interface/backup_chunked_driver.py deleted file mode 100644 index f9ff881aa..000000000 --- a/cinder/interface/backup_chunked_driver.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Backup driver with 'chunked' backup operations. -""" - -from cinder.interface import backup_driver - - -class BackupChunkedDriver(backup_driver.BackupDriver): - """Backup driver that supports 'chunked' backups.""" - - def put_container(self, container): - """Create the container if needed. No failure if it pre-exists. - - :param container: The container to write into. - """ - - def get_container_entries(self, container, prefix): - """Get container entry names. - - :param container: The container from which to get entries. - :param prefix: The prefix used to match entries. - """ - - def get_object_writer(self, container, object_name, extra_metadata=None): - """Returns a writer which stores the chunk data in backup repository. - - :param container: The container to write to. - :param object_name: The object name to write. - :param extra_metadata: Extra metadata to be included. - :returns: A context handler that can be used in a "with" context. - """ - - def get_object_reader(self, container, object_name, extra_metadata=None): - """Returns a reader object for the backed up chunk. - - :param container: The container to read from. - :param object_name: The object name to read. - :param extra_metadata: Extra metadata to be included. - """ - - def delete_object(self, container, object_name): - """Delete object from container. - - :param container: The container to modify. - :param object_name: The object name to delete. - """ - - def update_container_name(self, backup, container): - """Allows sub-classes to override container name. - - This method exists so that sub-classes can override the container name - as it comes in to the driver in the backup object. Implementations - should return None if no change to the container name is desired. - """ - - def get_extra_metadata(self, backup, volume): - """Return extra metadata to use in prepare_backup. - - This method allows for collection of extra metadata in prepare_backup() - which will be passed to get_object_reader() and get_object_writer(). - Subclass extensions can use this extra information to optimize - data transfers. - - :returns: json serializable object - """ diff --git a/cinder/interface/backup_driver.py b/cinder/interface/backup_driver.py deleted file mode 100644 index 549ea3ae8..000000000 --- a/cinder/interface/backup_driver.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Core backup driver interface. - -All backup drivers should support this interface as a bare minimum. -""" - -from cinder.interface import base - - -class BackupDriver(base.CinderInterface): - """Backup driver required interface.""" - - def get_metadata(self, volume_id): - """Get volume metadata. - - Returns a json-encoded dict containing all metadata and the restore - version i.e. the version used to decide what actually gets restored - from this container when doing a backup restore. - - Typically best to use py:class:`BackupMetadataAPI` for this. - - :param volume_id: The ID of the volume. - :returns: json-encoded dict of metadata. - """ - - def put_metadata(self, volume_id, json_metadata): - """Set volume metadata. - - Typically best to use py:class:`BackupMetadataAPI` for this. - - :param volume_id: The ID of the volume. - :param json_metadata: The json-encoded dict of metadata. - """ - - def backup(self, backup, volume_file, backup_metadata=False): - """Start a backup of a specified volume. - - If backup['parent_id'] is given, then an incremental backup - should be performed. - - If the parent backup is of different size, a full backup should be - performed to ensure all data is included. - - :param backup: The backup information. - :param volume_file: The volume or file to write the backup to. - :param backup_metadata: Whether to include volume metadata in the - backup. - - The variable structure of backup in the following format:: - - { - 'id': id, - 'availability_zone': availability_zone, - 'service': driver_name, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'display_name': name, - 'display_description': description, - 'volume_id': volume_id, - 'status': fields.BackupStatus.CREATING, - 'container': container, - 'parent_id': parent_id, - 'size': size, - 'host': host, - 'snapshot_id': snapshot_id, - 'data_timestamp': data_timestamp, - } - - service: backup driver - parent_id: parent backup id - size: equal to volume size - data_timestamp: backup creation time - """ - - def restore(self, backup, volume_id, volume_file): - """Restore volume from a backup. - - :param backup: The backup information. - :param volume_id: The volume to be restored. - :param volume_file: The volume or file to read the data from. - """ - - def delete_backup(self, backup): - """Delete a backup from the backup store. - - :param backup: The backup to be deleted. - """ - - def export_record(self, backup): - """Export driver specific backup record information. - - If backup backend needs additional driver specific information to - import backup record back into the system it must override this method - and return it as a dictionary so it can be serialized into a string. - - Default backup driver implementation has no extra information. - - :param backup: backup object to export - :returns: driver_info - dictionary with extra information - """ - - def import_record(self, backup, driver_info): - """Import driver specific backup record information. - - If backup backend needs additional driver specific information to - import backup record back into the system it must override this method - since it will be called with the extra information that was provided by - export_record when exporting the backup. - - Default backup driver implementation does nothing since it didn't - export any specific data in export_record. - - :param backup: backup object to export - :param driver_info: dictionary with driver specific backup record - information - :returns: None - """ diff --git a/cinder/interface/backup_verify_driver.py b/cinder/interface/backup_verify_driver.py deleted file mode 100644 index 15c8307c8..000000000 --- a/cinder/interface/backup_verify_driver.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Backup driver with verification interface. - -Used for backup drivers that support the option to verify the backup after -completion. -""" - -from cinder.interface import backup_driver - - -class BackupDriverWithVerify(backup_driver.BackupDriver): - """Backup driver that supports the optional verification.""" - - def verify(self, backup): - """Verify that the backup exists on the backend. - - Verify that the backup is OK, possibly following an import record - operation. - - :param backup: Backup id of the backup to verify. - :raises InvalidBackup, NotImplementedError: - """ diff --git a/cinder/interface/base.py b/cinder/interface/base.py deleted file mode 100644 index 3cea6df1c..000000000 --- a/cinder/interface/base.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc -import inspect - -import six - - -def _get_arg_count(method): - """Get the number of args for a method. - - :param method: The method to check. - :returns: The number of args for the method. - """ - if not method: - return 0 - - arg_spec = inspect.getargspec(method) - return len(arg_spec[0]) - - -def _get_method_info(cls): - """Get all methods defined in a class. - - Note: This will only return public methods and their associated arg count. - - :param cls: The class to inspect. - :returns: `Dict` of method names with a tuple of the method and their arg - counts. - """ - result = {} - - methods = inspect.getmembers(cls, inspect.ismethod) - for (name, method) in methods: - if name.startswith('_'): - # Skip non-public methods - continue - result[name] = (method, _get_arg_count(method)) - - return result - - -@six.add_metaclass(abc.ABCMeta) -class CinderInterface(object): - """Interface base class for Cinder. - - Cinder interfaces should inherit from this class to support indirect - inheritance evaluation. - - This can be used to validate compliance to an interface without requiring - that the class actually be inherited from the same base class. - """ - - _method_cache = None - - @classmethod - def _get_methods(cls): - if not cls._method_cache: - cls._method_cache = _get_method_info(cls) - return cls._method_cache - - @classmethod - def __subclasshook__(cls, other_cls): - """Custom class inheritance evaluation. - - :param cls: The CinderInterface to check against. - :param other_cls: The class to be checked if it implements - our interface. - """ - interface_methods = cls._get_methods() - driver_methods = _get_method_info(other_cls) - - interface_keys = interface_methods.keys() - driver_keys = driver_methods.keys() - - matching_count = len(set(interface_keys) & set(driver_keys)) - if matching_count != len(interface_keys): - # Missing some methods, does not implement this interface or is - # missing something. - return NotImplemented - - # TODO(smcginnis) Add method signature checking. - # We know all methods are there, now make sure they look right. - # Unfortunately the methods can be obfuscated by certain decorators, - # so we need to find a better way to pull out the real method - # signatures. - # driver_methods[method_name][0].func_closure.cell_contents works - # for most cases but not all. - # AST might work instead of using introspect. - - return True diff --git a/cinder/interface/fczm_driver.py b/cinder/interface/fczm_driver.py deleted file mode 100644 index 208cb5856..000000000 --- a/cinder/interface/fczm_driver.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Core fibre channel zone manager driver interface. - -All fczm drivers should support this interface as a bare minimum. -""" - -from cinder.interface import base - - -class FibreChannelZoneManagerDriver(base.CinderInterface): - """FCZM driver required interface.""" - - def add_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Add a new initiator<>target connection. - - All implementing drivers should provide concrete implementation - for this API. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - - .. code-block:: python - - Example initiator_target_map: - - { - '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] - } - - Note that WWPN can be in lower or upper case and can be ':' - separated strings. - """ - - def delete_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Delete an initiator<>target connection. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - - .. code-block:: python - - Example initiator_target_map: - - { - '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] - } - - Note that WWPN can be in lower or upper case and can be ':' - separated strings. - """ - - def get_san_context(self, target_wwn_list): - """Get SAN context for end devices. - - :param target_wwn_list: Mapping of initiator to list of targets - - Example initiator_target_map: ['20240002ac000a50', '20240002ac000a40'] - Note that WWPN can be in lower or upper case and can be - ':' separated strings. - """ diff --git a/cinder/interface/util.py b/cinder/interface/util.py deleted file mode 100644 index 15226b085..000000000 --- a/cinder/interface/util.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import fnmatch -import inspect -import os - -from cinder import interface - - -def _ensure_loaded(start_path): - """Loads everything in a given path. - - This will make sure all classes have been loaded and therefore all - decorators have registered class. - - :param start_path: The starting path to load. - """ - for root, folder, files in os.walk(start_path): - for phile in fnmatch.filter(files, '*.py'): - path = os.path.join(root, phile) - try: - __import__( - path.replace('/', '.')[:-3], globals(), locals()) - except Exception: - # Really don't care here - pass - - -def get_volume_drivers(): - """Get a list of all volume drivers.""" - _ensure_loaded('cinder/volume/drivers') - return [DriverInfo(x) for x in interface._volume_register] - - -def get_backup_drivers(): - """Get a list of all backup drivers.""" - _ensure_loaded('cinder/backup/drivers') - return [DriverInfo(x) for x in interface._backup_register] - - -def get_fczm_drivers(): - """Get a list of all fczm drivers.""" - _ensure_loaded('cinder/zonemanager/drivers') - return [DriverInfo(x) for x in interface._fczm_register] - - -class DriverInfo(object): - """Information about driver implementations.""" - - def __init__(self, cls): - self.cls = cls - self.desc = cls.__doc__ - self.class_name = cls.__name__ - self.class_fqn = '{}.{}'.format(inspect.getmodule(cls).__name__, - self.class_name) - self.version = getattr(cls, 'VERSION', None) - self.ci_wiki_name = getattr(cls, 'CI_WIKI_NAME', None) - self.supported = getattr(cls, 'SUPPORTED', True) - - def __str__(self): - return self.class_name - - def __repr__(self): - return self.class_fqn - - def __hash__(self): - return hash(self.class_fqn) diff --git a/cinder/interface/volume_consistencygroup_driver.py b/cinder/interface/volume_consistencygroup_driver.py deleted file mode 100644 index ebb5c197d..000000000 --- a/cinder/interface/volume_consistencygroup_driver.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Consistency group volume driver interface. -""" - -from cinder.interface import base - - -class VolumeConsistencyGroupDriver(base.CinderInterface): - """Interface for drivers that support consistency groups.""" - - def create_consistencygroup(self, context, group): - """Creates a consistencygroup. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be created. - :returns: model_update - - model_update will be in this format: {'status': xxx, ......}. - - If the status in model_update is 'error', the manager will throw - an exception and it will be caught in the try-except block in the - manager. If the driver throws an exception, the manager will also - catch it in the try-except block. The group status in the db will - be changed to 'error'. - - For a successful operation, the driver can either build the - model_update and return it or return None. The group status will - be set to 'available'. - """ - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates a consistencygroup from source. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be created. - :param volumes: a list of volume dictionaries in the group. - :param cgsnapshot: the dictionary of the cgsnapshot as source. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :param source_cg: the dictionary of a consistency group as source. - :param source_vols: a list of volume dictionaries in the source_cg. - :returns: model_update, volumes_model_update - - The source can be cgsnapshot or a source cg. - - param volumes is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Volume to be precise. It cannot be - assigned to volumes_model_update. volumes_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - To be consistent with other volume operations, the manager will - assume the operation is successful if no exception is thrown by - the driver. For a successful operation, the driver can either build - the model_update and volumes_model_update and return them or - return None, None. - """ - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be deleted. - :param volumes: a list of volume dictionaries in the group. - :returns: model_update, volumes_model_update - - param volumes is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Volume to be precise. It cannot be - assigned to volumes_model_update. volumes_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate volumes_model_update and model_update - and return them. - - The manager will check volumes_model_update and update db accordingly - for each volume. If the driver successfully deleted some volumes - but failed to delete others, it should set statuses of the volumes - accordingly so that the manager can update db correctly. - - If the status in any entry of volumes_model_update is 'error_deleting' - or 'error', the status in model_update will be set to the same if it - is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of the group will be - set to 'error' in the db. If volumes_model_update is not returned by - the driver, the manager will set the status of every volume in the - group to 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager. The statuses of the - group and all volumes in it will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and volumes_model_update and return them or - return None, None. The statuses of the group and all volumes - will be set to 'deleted' after the manager deletes them from db. - """ - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a consistency group. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be updated. - :param add_volumes: a list of volume dictionaries to be added. - :param remove_volumes: a list of volume dictionaries to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - model_update is a dictionary that the driver wants the manager - to update upon a successful return. If None is returned, the manager - will set the status to 'available'. - - add_volumes_update and remove_volumes_update are lists of dictionaries - that the driver wants the manager to update upon a successful return. - Note that each entry requires a {'id': xxx} so that the correct - volume entry can be updated. If None is returned, the volume will - remain its original status. Also note that you cannot directly - assign add_volumes to add_volumes_update as add_volumes is a list of - cinder.db.sqlalchemy.models.Volume objects and cannot be used for - db update directly. Same with remove_volumes. - - If the driver throws an exception, the status of the group as well as - those of the volumes to be added/removed will be set to 'error'. - """ - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot. - - :param context: the context of the caller. - :param cgsnapshot: the dictionary of the cgsnapshot to be created. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :returns: model_update, snapshots_model_update - - param snapshots is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be - assigned to snapshots_model_update. snapshots_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is 'error', the - status in model_update will be set to the same if it is not already - 'error'. - - If the status in model_update is 'error', the manager will raise an - exception and the status of cgsnapshot will be set to 'error' in the - db. If snapshots_model_update is not returned by the driver, the - manager will set the status of every snapshot to 'error' in the except - block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - cgsnapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of cgsnapshot and all snapshots - will be set to 'available' at the end of the manager function. - """ - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot. - - :param context: the context of the caller. - :param cgsnapshot: the dictionary of the cgsnapshot to be deleted. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :returns: model_update, snapshots_model_update - - param snapshots is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be - assigned to snapshots_model_update. snapshots_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is - 'error_deleting' or 'error', the status in model_update will be set to - the same if it is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of cgsnapshot will be - set to 'error' in the db. If snapshots_model_update is not returned by - the driver, the manager will set the status of every snapshot to - 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - cgsnapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of cgsnapshot and all snapshots - will be set to 'deleted' after the manager deletes them from db. - """ diff --git a/cinder/interface/volume_driver.py b/cinder/interface/volume_driver.py deleted file mode 100644 index 0ce663c36..000000000 --- a/cinder/interface/volume_driver.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Core backend volume driver interface. - -All backend drivers should support this interface as a bare minimum. -""" - -from cinder.interface import base - - -class VolumeDriverCore(base.CinderInterface): - """Core backend driver required interface.""" - - def do_setup(self, context): - """Any initialization the volume driver needs to do while starting. - - Called once by the manager after the driver is loaded. - Can be used to set up clients, check licenses, set up protocol - specific helpers, etc. - - :param context: The admin context. - """ - - def check_for_setup_error(self): - """Validate there are no issues with the driver configuration. - - Called after do_setup(). Driver initialization can occur there or in - this call, but must be complete by the time this returns. - - If this method raises an exception, the driver will be left in an - "uninitialized" state by the volume manager, which means that it will - not be sent requests for volume operations. - - This method typically checks things like whether the configured - credentials can be used to log in the storage backend, and whether any - external dependencies are present and working. - - :raises VolumeBackendAPIException: in case of setup error. - """ - - def get_volume_stats(self, refresh=False): - """Collects volume backend stats. - - The get_volume_stats method is used by the volume manager to collect - information from the driver instance related to information about the - driver, available and used space, and driver/backend capabilities. - - It returns a dict with the following required fields: - - * volume_backend_name - This is an identifier for the backend taken from cinder.conf. - Useful when using multi-backend. - * vendor_name - Vendor/author of the driver who serves as the contact for the - driver's development and support. - * driver_version - The driver version is logged at cinder-volume startup and is useful - for tying volume service logs to a specific release of the code. - There are currently no rules for how or when this is updated, but - it tends to follow typical major.minor.revision ideas. - * storage_protocol - The protocol used to connect to the storage, this should be a short - string such as: "iSCSI", "FC", "nfs", "ceph", etc. - * total_capacity_gb - The total capacity in gigabytes (GiB) of the storage backend being - used to store Cinder volumes. Use keyword 'unknown' if the backend - cannot report the value or 'infinite' if there is no upper limit. - But, it is recommended to report real values as the Cinder - scheduler assigns lowest weight to any storage backend reporting - 'unknown' or 'infinite'. - - * free_capacity_gb - The free capacity in gigabytes (GiB). Use keyword 'unknown' if the - backend cannot report the value or 'infinite' if there is no upper - limit. But, it is recommended to report real values as the Cinder - scheduler assigns lowest weight to any storage backend reporting - 'unknown' or 'infinite'. - - And the following optional fields: - - * reserved_percentage (integer) - Percentage of backend capacity which is not used by the scheduler. - * location_info (string) - Driver-specific information used by the driver and storage backend - to correlate Cinder volumes and backend LUNs/files. - * QoS_support (Boolean) - Whether the backend supports quality of service. - * provisioned_capacity_gb - The total provisioned capacity on the storage backend, in gigabytes - (GiB), including space consumed by any user other than Cinder - itself. - * max_over_subscription_ratio - The maximum amount a backend can be over subscribed. - * thin_provisioning_support (Boolean) - Whether the backend is capable of allocating thinly provisioned - volumes. - * thick_provisioning_support (Boolean) - Whether the backend is capable of allocating thick provisioned - volumes. (Typically True.) - * total_volumes (integer) - Total number of volumes on the storage backend. This can be used in - custom driver filter functions. - * filter_function (string) - A custom function used by the scheduler to determine whether a - volume should be allocated to this backend or not. Example: - - capabilities.total_volumes < 10 - - * goodness_function (string) - Similar to filter_function, but used to weigh multiple volume - backends. Example: - - capabilities.capacity_utilization < 0.6 ? 100 : 25 - - * multiattach (Boolean) - Whether the backend supports multiattach or not. Defaults to False. - * sparse_copy_volume (Boolean) - Whether copies performed by the volume manager for operations such - as migration should attempt to preserve sparseness. - - The returned dict may also contain a list, "pools", which has a similar - dict for each pool being used with the backend. - - :param refresh: Whether to discard any cached values and force a full - refresh of stats. - :returns: dict of appropriate values (see above). - """ - - def create_volume(self, volume): - """Create a new volume on the backend. - - This method is responsible only for storage allocation on the backend. - It should not export a LUN or actually make this storage available for - use, this is done in a later call. - - TODO(smcginnis): Add example data structure of volume object. - - :param volume: Volume object containing specifics to create. - :returns: (Optional) dict of database updates for the new volume. - :raises VolumeBackendAPIException: if creation failed. - """ - - def delete_volume(self, volume): - """Delete a volume from the backend. - - If the driver can talk to the backend and detects that the volume is no - longer present, this call should succeed and allow Cinder to complete - the process of deleting the volume. - - :param volume: The volume to delete. - :raises VolumeIsBusy: if the volume is still attached or has snapshots. - VolumeBackendAPIException on error. - """ - - def initialize_connection(self, volume, connector, initiator_data=None): - """Allow connection to connector and return connection info. - - :param volume: The volume to be attached. - :param connector: Dictionary containing information about what is being - connected to. - :param initiator_data: (Optional) A dictionary of driver_initiator_data - objects with key-value pairs that have been - saved for this initiator by a driver in previous - initialize_connection calls. - :returns: A dictionary of connection information. This can optionally - include a "initiator_updates" field. - - The "initiator_updates" field must be a dictionary containing a - "set_values" and/or "remove_values" field. The "set_values" field must - be a dictionary of key-value pairs to be set/updated in the db. The - "remove_values" field must be a list of keys, previously set with - "set_values", that will be deleted from the db. - - May be called multiple times to get connection information after a - volume has already been attached. - """ - - def attach_volume(self, context, volume, instance_uuid, host_name, - mountpoint): - """Lets the driver know Nova has attached the volume to an instance. - - :param context: Security/policy info for the request. - :param volume: Volume being attached. - :param instance_uuid: ID of the instance being attached to. - :param host_name: The host name. - :param mountpoint: Device mount point on the instance. - """ - - def terminate_connection(self, volume, connector): - """Remove access to a volume. - - :param volume: The volume to remove. - :param connector: The Dictionary containing information about the - connection. This is optional when doing a - force-detach and can be None. - """ - - def detach_volume(self, context, volume, attachment=None): - """Detach volume from an instance. - - :param context: Security/policy info for the request. - :param volume: Volume being detached. - :param attachment: (Optional) Attachment information. - """ - - def clone_image(self, volume, image_location, image_id, image_metadata, - image_service): - """Clone an image to a volume. - - :param volume: The volume to create. - :param image_location: Where to pull the image from. - :param image_id: The image identifier. - :param image_metadata: Information about the image. - :param image_service: The image service to use. - :returns: Model updates. - """ - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume. - - :param context: Security/policy info for the request. - :param volume: The volume to create. - :param image_service: The image service to use. - :param image_id: The image identifier. - :returns: Model updates. - """ - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image. - - :param context: Security/policy info for the request. - :param volume: The volume to copy. - :param image_service: The image service to use. - :param image_meta: Information about the image. - :returns: Model updates. - """ - - def extend_volume(self, volume, new_size): - """Extend the size of a volume. - - :param volume: The volume to extend. - :param new_size: The new desired size of the volume. - """ diff --git a/cinder/interface/volume_group_driver.py b/cinder/interface/volume_group_driver.py deleted file mode 100644 index 9e0a4b309..000000000 --- a/cinder/interface/volume_group_driver.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Generic volume group volume driver interface. -""" - -from cinder.interface import base - - -class VolumeGroupDriver(base.CinderInterface): - """Interface for drivers that support groups.""" - - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the Group object to be created. - :returns: model_update - - model_update will be in this format: {'status': xxx, ......}. - - If the status in model_update is 'error', the manager will throw - an exception and it will be caught in the try-except block in the - manager. If the driver throws an exception, the manager will also - catch it in the try-except block. The group status in the db will - be changed to 'error'. - - For a successful operation, the driver can either build the - model_update and return it or return None. The group status will - be set to 'available'. - """ - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :param source_group: a Group object as source. - :param source_vols: a list of Volume objects in the source_group. - :returns: model_update, volumes_model_update - - The source can be group_snapshot or a source group. - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - To be consistent with other volume operations, the manager will - assume the operation is successful if no exception is thrown by - the driver. For a successful operation, the driver can either build - the model_update and volumes_model_update and return them or - return None, None. - """ - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the Group object to be deleted. - :param volumes: a list of Volume objects in the group. - :returns: model_update, volumes_model_update - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate volumes_model_update and model_update - and return them. - - The manager will check volumes_model_update and update db accordingly - for each volume. If the driver successfully deleted some volumes - but failed to delete others, it should set statuses of the volumes - accordingly so that the manager can update db correctly. - - If the status in any entry of volumes_model_update is 'error_deleting' - or 'error', the status in model_update will be set to the same if it - is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of the group will be - set to 'error' in the db. If volumes_model_update is not returned by - the driver, the manager will set the status of every volume in the - group to 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager. The statuses of the - group and all volumes in it will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and volumes_model_update and return them or - return None, None. The statuses of the group and all volumes - will be set to 'deleted' after the manager deletes them from db. - """ - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param context: the context of the caller. - :param group: the Group object to be updated. - :param add_volumes: a list of Volume objects to be added. - :param remove_volumes: a list of Volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - model_update is a dictionary that the driver wants the manager - to update upon a successful return. If None is returned, the manager - will set the status to 'available'. - - add_volumes_update and remove_volumes_update are lists of dictionaries - that the driver wants the manager to update upon a successful return. - Note that each entry requires a {'id': xxx} so that the correct - volume entry can be updated. If None is returned, the volume will - remain its original status. Also note that you cannot directly - assign add_volumes to add_volumes_update as add_volumes is a list of - volume objects and cannot be used for db update directly. Same with - remove_volumes. - - If the driver throws an exception, the status of the group as well as - those of the volumes to be added/removed will be set to 'error'. - """ - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of Snapshot objects. It cannot be assigned - to snapshots_model_update. snapshots_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is 'error', the - status in model_update will be set to the same if it is not already - 'error'. - - If the status in model_update is 'error', the manager will raise an - exception and the status of group_snapshot will be set to 'error' in - the db. If snapshots_model_update is not returned by the driver, the - manager will set the status of every snapshot to 'error' in the except - block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'available' at the end of the manager function. - """ - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of objects. It cannot be assigned to - snapshots_model_update. snapshots_model_update is a list of of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is - 'error_deleting' or 'error', the status in model_update will be set to - the same if it is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of group_snapshot will - be set to 'error' in the db. If snapshots_model_update is not returned - by the driver, the manager will set the status of every snapshot to - 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'deleted' after the manager deletes them from db. - """ diff --git a/cinder/interface/volume_management_driver.py b/cinder/interface/volume_management_driver.py deleted file mode 100644 index ec2abb688..000000000 --- a/cinder/interface/volume_management_driver.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Manage/unmanage existing volume driver interface. -""" - -from cinder.interface import base - - -class VolumeManagementDriver(base.CinderInterface): - """Interface for drivers that support managing existing volumes.""" - - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - volume structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - 2. Place some metadata on the volume, or somewhere in the backend, that - allows other driver requests (e.g. delete, clone, attach, detach...) - to locate the backend storage object when required. - - If the existing_ref doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - - The volume may have a volume_type, and the driver can inspect that and - compare against the properties of the referenced backend storage - object. If they are incompatible, raise a - ManageExistingVolumeTypeMismatch, specifying a reason for the failure. - - :param volume: Cinder volume to manage - :param existing_ref: Dictionary with keys 'source-id', 'source-name' - with driver-specific values to identify a backend - storage object. - :raises ManageExistingInvalidReference: If the existing_ref doesn't - make sense, or doesn't refer to an existing backend storage - object. - :raises ManageExistingVolumeTypeMismatch: If there is a mismatch - between the volume type and the properties of the existing - backend storage object. - """ - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Dictionary with keys 'source-id', 'source-name' - with driver-specific values to identify a backend - storage object. - :raises ManageExistingInvalidReference: If the existing_ref doesn't - make sense, or doesn't refer to an existing backend storage - object. - """ - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - For most drivers, this will not need to do anything. However, some - drivers might use this call as an opportunity to clean up any - Cinder-specific configuration that they have associated with the - backend storage object. - - :param volume: Cinder volume to unmanage - """ diff --git a/cinder/interface/volume_snapshot_driver.py b/cinder/interface/volume_snapshot_driver.py deleted file mode 100644 index 9c500cc11..000000000 --- a/cinder/interface/volume_snapshot_driver.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Snapshot capable volume driver interface. -""" - -from cinder.interface import base - - -class VolumeSnapshotDriver(base.CinderInterface): - """Interface for drivers that support snapshots. - - TODO(smcginnis) Merge into VolumeDriverBase once NFS driver supports - snapshots. - """ - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: Information for the snapshot to be created. - """ - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: The snapshot to delete. - """ - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - If volume_type extra specs includes 'replication: True' - the driver needs to create a volume replica (secondary), - and setup replication between the newly created volume and - the secondary volume. - - An optional larger size for the new snapshot can be specified. Drivers - should check this value and create or expand the new volume to match. - - :param volume: The volume to be created. - :param snapshot: The snapshot from which to create the volume. - :returns: A dict of database updates for the new volume. - """ - - def revert_to_snapshot(self, context, volume, snapshot): - """Revert volume to snapshot. - - Note: the revert process should not change the volume's - current size, that means if the driver shrank - the volume during the process, it should extend the - volume internally. - - :param context: the context of the caller. - :param volume: The volume to be reverted. - :param snapshot: The snapshot used for reverting. - """ diff --git a/cinder/interface/volume_snapshotmanagement_driver.py b/cinder/interface/volume_snapshotmanagement_driver.py deleted file mode 100644 index 0b18d601d..000000000 --- a/cinder/interface/volume_snapshotmanagement_driver.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Manage/unmanage existing volume snapshots driver interface. -""" - -from cinder.interface import base - - -class VolumeSnapshotManagementDriver(base.CinderInterface): - """Interface for drivers that support managing existing snapshots.""" - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - snapshot structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the - snapshot['name'] which is how drivers traditionally map between a - cinder snapshot and the associated backend storage object. - - 2. Place some metadata on the snapshot, or somewhere in the backend, - that allows other driver requests (e.g. delete) to locate the - backend storage object when required. - - :param snapshot: The snapshot to manage. - :param existing_ref: Dictionary with keys 'source-id', 'source-name' - with driver-specific values to identify a backend - storage object. - :raises ManageExistingInvalidReference: If the existing_ref doesn't - make sense, or doesn't refer to an existing backend storage - object. - """ - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of snapshot to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param snapshot: The snapshot to manage. - :param existing_ref: Dictionary with keys 'source-id', 'source-name' - with driver-specific values to identify a backend - storage object. - :raises ManageExistingInvalidReference: If the existing_ref doesn't - make sense, or doesn't refer to an existing backend storage - object. - """ - - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - For most drivers, this will not need to do anything. However, some - drivers might use this call as an opportunity to clean up any - Cinder-specific configuration that they have associated with the - backend storage object. - - :param snapshot: The snapshot to unmanage. - """ diff --git a/cinder/keymgr/__init__.py b/cinder/keymgr/__init__.py deleted file mode 100644 index 640ba0917..000000000 --- a/cinder/keymgr/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from castellan import options as castellan_opts -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import importutils - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -castellan_opts.set_defaults(CONF) - - -def log_deprecated_warning(deprecated_value, castellan): - versionutils.deprecation_warning(deprecated_value, - versionutils.deprecated.NEWTON, - in_favor_of=castellan, logger=LOG) - - -# NOTE(kfarr): this method is for backwards compatibility, it is deprecated -# for removal -def set_overrides(conf): - api_class = None - should_override = False - try: - api_class = conf.key_manager.api_class - except cfg.NoSuchOptError: - LOG.warning("key_manager.api_class is not set, will use deprecated" - " option keymgr.api_class if set") - try: - api_class = CONF.keymgr.api_class - should_override = True - except cfg.NoSuchOptError: - LOG.warning("keymgr.api_class is not set") - - deprecated_barbican = 'cinder.keymgr.barbican.BarbicanKeyManager' - barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager' - deprecated_mock = 'cinder.tests.unit.keymgr.mock_key_mgr.MockKeyManager' - castellan_mock = ('castellan.tests.unit.key_manager.mock_key_manager.' - 'MockKeyManager') - - if api_class == deprecated_barbican: - should_override = True - log_deprecated_warning(deprecated_barbican, barbican) - api_class = barbican - elif api_class == deprecated_mock: - should_override = True - log_deprecated_warning(deprecated_mock, castellan_mock) - api_class = castellan_mock - elif api_class is None: - should_override = True - # TODO(kfarr): key_manager.api_class should be set in DevStack, and - # this block can be removed - LOG.warning("key manager not set, using insecure default %s", - castellan_mock) - api_class = castellan_mock - - if should_override: - conf.set_override('api_class', api_class, 'key_manager') - - -def API(conf=CONF): - set_overrides(conf) - cls = importutils.import_class(conf.key_manager.api_class) - return cls(conf) diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py deleted file mode 100644 index 83771666c..000000000 --- a/cinder/keymgr/conf_key_mgr.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -An implementation of a key manager that reads its key from the project's -configuration options. - -This key manager implementation provides limited security, assuming that the -key remains secret. Using the volume encryption feature as an example, -encryption provides protection against a lost or stolen disk, assuming that -the configuration file that contains the key is not stored on the disk. -Encryption also protects the confidentiality of data as it is transmitted via -iSCSI from the compute host to the storage host (again assuming that an -attacker who intercepts the data does not know the secret key). - -Because this implementation uses a single, fixed key, it proffers no -protection once that key is compromised. In particular, different volumes -encrypted with a key provided by this key manager actually share the same -encryption key so *any* volume can be decrypted once the fixed key is known. -""" - -import binascii - -from castellan.common.objects import symmetric_key -from castellan.key_manager import key_manager -from oslo_config import cfg -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ - - -key_mgr_opts = [ - cfg.StrOpt('fixed_key', - help='Fixed key returned by key manager, specified in hex', - deprecated_group='keymgr'), -] - -CONF = cfg.CONF -CONF.register_opts(key_mgr_opts, group='key_manager') - -LOG = logging.getLogger(__name__) - - -class ConfKeyManager(key_manager.KeyManager): - """Key Manager that supports one key defined by the fixed_key conf option. - - This key manager implementation supports all the methods specified by the - key manager interface. This implementation creates a single key in response - to all invocations of create_key. Side effects (e.g., raising exceptions) - for each method are handled as specified by the key manager interface. - """ - - warning_logged = False - - def __init__(self, configuration): - if not ConfKeyManager.warning_logged: - LOG.warning('This key manager is insecure and is not ' - 'recommended for production deployments') - ConfKeyManager.warning_logged = True - - super(ConfKeyManager, self).__init__(configuration) - - self.conf = configuration - self.conf.register_opts(key_mgr_opts, group='key_manager') - self.key_id = '00000000-0000-0000-0000-000000000000' - - def _get_key(self): - if self.conf.key_manager.fixed_key is None: - raise ValueError(_('config option key_manager.fixed_key is not ' - 'defined')) - hex_key = self.conf.key_manager.fixed_key - key_bytes = bytes(binascii.unhexlify(hex_key)) - return symmetric_key.SymmetricKey('AES', - len(key_bytes) * 8, - key_bytes) - - def create_key(self, context, **kwargs): - """Creates a symmetric key. - - This implementation returns a UUID for the key read from the - configuration file. A NotAuthorized exception is raised if the - specified context is None. - """ - if context is None: - raise exception.NotAuthorized() - - return self.key_id - - def create_key_pair(self, context, **kwargs): - raise NotImplementedError( - "ConfKeyManager does not support asymmetric keys") - - def store(self, context, managed_object, **kwargs): - """Stores (i.e., registers) a key with the key manager.""" - if context is None: - raise exception.NotAuthorized() - - if managed_object != self._get_key(): - raise exception.KeyManagerError( - reason="cannot store arbitrary keys") - - return self.key_id - - def get(self, context, managed_object_id): - """Retrieves the key identified by the specified id. - - This implementation returns the key that is associated with the - specified UUID. A NotAuthorized exception is raised if the specified - context is None; a KeyError is raised if the UUID is invalid. - """ - if context is None: - raise exception.NotAuthorized() - - if managed_object_id != self.key_id: - raise KeyError(str(managed_object_id) + " != " + str(self.key_id)) - - return self._get_key() - - def delete(self, context, managed_object_id): - """Represents deleting the key. - - Because the ConfKeyManager has only one key, which is read from the - configuration file, the key is not actually deleted when this is - called. - """ - if context is None: - raise exception.NotAuthorized() - - if managed_object_id != self.key_id: - raise exception.KeyManagerError( - reason="cannot delete non-existent key") - - LOG.warning("Not deleting key %s", managed_object_id) diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po deleted file mode 100644 index 900959ecf..000000000 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-error.po +++ /dev/null @@ -1,2934 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# Zbyněk Schwarz , 2014 -# OpenStack Infra , 2015. #zanata -# Zbyněk Schwarz , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:19+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-12-21 12:13+0000\n" -"Last-Translator: Zbyněk Schwarz \n" -"Language: cs\n" -"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Czech\n" - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Failed to remove from new volume set %(new_vvs)s." -msgstr "" -"%(exception)s: Výjimka během vrácení přetypování svazku %(volume_name)s. " -"Nelze odstranit z nové sady svazků %(new_vvs)s." - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Original volume set/QOS settings may not have been fully restored." -msgstr "" -"%(exception)s: Výjimka během vrácení přetypování svazku %(volume_name)s. " -"Původní nastavení sady svazků/QoS nemusí být zcela obnovena." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" -msgstr "" -"%(fun)s: selhalo s nečekaným výstupem příkazového řádku.\n" -"Příkaz: %(cmd)s\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s\n" - -#, python-format -msgid "" -"%(method)s %(url)s unexpected response status: %(response)s (expects: " -"%(expects)s)." -msgstr "" -"%(method)s %(url)s neočekávaná odpověď stavu: %(response)s (očekáváno: " -"%(expects)s)." - -#, python-format -msgid "%(name)s: %(value)s" -msgstr "%(name)s: %(value)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" -msgstr "'%(value)s' je neplatná hodnota pro dodatečnou specifikaci '%(key)s'" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "Pro zavedení záložního systému MUSÍ být zadán platný druhotní cíl." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting create_snapshot operation!" -msgstr "" -"Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " -"provedení operace vytvoření snímku!" - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting delete_volume operation!" -msgstr "" -"Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " -"provedení operace smazání svazku!" - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting unmanage operation!" -msgstr "" -"Účet pro svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o " -"provedení operace zrušení správy!" - -#, python-format -msgid "Array Serial Number must be in the file %(fileName)s." -msgstr "Sériové číslo pole musí být v souboru %(fileName)s." - -#, python-format -msgid "Array query failed - No response (%d)!" -msgstr "Dotaz na pole selhal - Žádná odpověď (%d)!" - -msgid "Array query failed. No capabilities in response!" -msgstr "Dotaz na pole selhal. V odpovědi nebyly uvedeny schopnosti!" - -msgid "Array query failed. No controllers in response!" -msgstr "Dotaz na pole selhal. V odpovědi nebyly uvedeny kontroléry!" - -msgid "Array query failed. No global id in XML response!" -msgstr "Dotaz na pole selhal. V odpovědi XML nebylo globální id!" - -msgid "Attaching snapshot from a remote node is not supported." -msgstr "Připojení snímku ke vzdálenému uzlu není podporováno." - -#, python-format -msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." -msgstr "Ověřování žádosti: %(zfssaurl)s pokus: %(retry)d ." - -msgid "Backend returned err for lun export." -msgstr "Při export lun vrátila podpůrná vrstva chybu!" - -#, python-format -msgid "Backup id %s is not invalid. Skipping reset." -msgstr "ID zálohy %s není neplatné. Resetování je přeskočeno." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Zálohovací služba %(configured_service)s nepodporuje ověřování. Záloha s id " -"%(id)s není ověřena. Ověřování přeskočeno." - -#, python-format -msgid "Backup volume metadata failed: %s." -msgstr "Záložní popisná data svazku selhala: %s" - -#, python-format -msgid "Bad response from server: %(url)s. Error: %(err)s" -msgstr "Špatná odpověď od serveru: %(url)s. Chyba: %(err)s" - -#, python-format -msgid "" -"CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " -"source." -msgstr "" -"Nebyl nalezen snímek skupiny jednotnosti %(cgsnap)s při vytváření skupiny " -"%(cg)s ze zdroje." - -#, python-format -msgid "" -"CLI fail: '%(cmd)s' = %(code)s\n" -"out: %(stdout)s\n" -"err: %(stderr)s" -msgstr "" -"Selhání rozhraní příkazového řádku: '%(cmd)s' = %(code)s\n" -"výstup: %(stdout)s\n" -"chyba: %(stderr)s" - -msgid "Call to Nova delete snapshot failed" -msgstr "Volání Nova pro smazání snímku selhalo" - -msgid "Call to Nova to create snapshot failed" -msgstr "Volání Nova pro vytvoření snímku selhalo" - -#, python-format -msgid "Call to json.loads() raised an exception: %s." -msgstr "Volání json.loads() vyvolalo výjimku: %s." - -#, python-format -msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." -msgstr "Nelze zjišťovat v %(target_ip)s pomocí %(target_iqn)s." - -msgid "Can not open the recent url, login again." -msgstr "Nelze otevřít nedávnou adresu url, probíhá znovu přihlašování." - -#, python-format -msgid "Can't find volume to map %(key)s, %(msg)s" -msgstr "Nelze najít svazek k mapování %(key)s, %(msg)s" - -msgid "Can't open the recent url, relogin." -msgstr "Nelze otevřít nedávnou adresu url, probíhá znovu přihlašování." - -#, python-format -msgid "" -"Cannot add and verify tier policy association for storage group : " -"%(storageGroupName)s to FAST policy : %(fastPolicyName)s." -msgstr "" -"Nelze přidat a ověřit přidružení zásady vrstvení skupiny úložiště : " -"%(storageGroupName)s k zásadě FAST : %(fastPolicyName)s." - -#, python-format -msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." -msgstr "Nelze klonovat obraz %(image)s do svazku %(volume)s. Chyba: %(error)s." - -#, python-format -msgid "Cannot create or find an initiator group with name %(igGroupName)s." -msgstr "Nelze vytvořit nebo najít skupinu zavaděče s názvem %(igGroupName)s." - -#, python-format -msgid "Cannot delete file %s." -msgstr "Nelze smazat soubor %s." - -msgid "Cannot detect replica status." -msgstr "Nelze zjistit stav repliky." - -msgid "Cannot determine if Tiering Policies are supported." -msgstr "Nelze zjistit zda zásady vrstvení jsou podporovány." - -msgid "Cannot determine whether Tiering Policy is supported on this array." -msgstr "Nelze zjistit zdali je na tomto poli podporována zásada vrstvení." - -#, python-format -msgid "Cannot find Consistency Group %s" -msgstr "Nelze najít skupinu jednotnosti %s." - -#, python-format -msgid "" -"Cannot find a portGroup with name %(pgGroupName)s. The port group for a " -"masking view must be pre-defined." -msgstr "" -"Nelze najít skupinu portů s názvem %(pgGroupName)s. Skupina pro zamaskování " -"musí být předem určena." - -#, python-format -msgid "Cannot find the fast policy %(fastPolicyName)s." -msgstr "Nelze najít zásadu fast %(fastPolicyName)s." - -#, python-format -msgid "" -"Cannot find the new masking view just created with name %(maskingViewName)s." -msgstr "Nelze najít právě vytvořené zamaskování s názvem %(maskingViewName)s." - -#, python-format -msgid "Cannot get QoS spec for volume %s." -msgstr "Nelze získat specifikaci QoS pro svazek %s." - -#, python-format -msgid "Cannot get storage Group from job : %(storageGroupName)s." -msgstr "Nelze získat skupinu úložiště z úkolu : %(storageGroupName)s." - -msgid "Cannot get storage system." -msgstr "Nelze získat úložný systém." - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" -msgstr "Změna názvu svazku z %(tmp)s na %(orig)s selhala protože %(reason)s" - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." -msgstr "Změna názvu svazku z %(tmp)s na %(orig)s selhala protože %(reason)s." - -#, python-format -msgid "Clone %s not in prepared state!" -msgstr "Klon %s není v stavu připraveno!" - -#, python-format -msgid "" -"Clone volume \"%s\" already exists. Please check the results of \"dog vdi " -"list\"." -msgstr "" -"Klon svazku \"%s\" již existuje. Prosím zkontrolujte výstup příkazu \"dog " -"vdi list\"." - -#, python-format -msgid "Cloning of volume %s failed." -msgstr "Klonování svazku %s selhalo." - -#, python-format -msgid "" -"CloudByte does not have a volume corresponding to OpenStack volume [%s]." -msgstr "CloudByte nemá svazek odpovídající svazku OpenStack [%s]." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " -"all [%(max)s] attempts." -msgstr "" -"Operace CloudByte [%(operation)s] selhala u svazku [%(vol)s]. Využito všech " -"[%(max)s] pokusů." - -#, python-format -msgid "" -"CloudByte snapshot information is not available for OpenStack volume [%s]." -msgstr "" -"Informace o snímku CloudByte nejsou dostupné pro svazek OpenStack [%s]." - -#, python-format -msgid "CloudByte volume information not available for OpenStack volume [%s]." -msgstr "" -"Informace o svazku CloudByte nejsou dostupné pro svazek OpenStack [%s]." - -#, python-format -msgid "Cmd :%s" -msgstr "Příkaz :%s" - -#, python-format -msgid "Commit clone failed: %(name)s (%(status)d)!" -msgstr "Odevzdání klonu selhalo: %(name)s (%(status)d)!" - -#, python-format -msgid "Commit failed for %s!" -msgstr "Odevzdání selhalo pro %s!" - -#, python-format -msgid "Compute cluster: %s not found." -msgstr "Výpočetní cluster: %s nenalezen." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Konfigurační volba %s není nastavena." - -#, python-format -msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" -msgstr "V sadě virtuálního svazku %(volume_set)s zjištěn konflikt: %(error)s" - -#, python-format -msgid "Connect to Flexvisor error: %s." -msgstr "Chyba při připojení k Flexvisor: %s." - -#, python-format -msgid "Connect to Flexvisor failed: %s." -msgstr "Připojení k Flexvisor selhalo: %s." - -msgid "Connection error while sending a heartbeat to coordination backend." -msgstr "" -"Chyba připojení při odesílání informací o aktivitě do vrstvy pro koordinaci." - -#, python-format -msgid "Connection to %s failed and no secondary!" -msgstr "Připojení k %s selhalo a žádné druhotné připojení není nastaveno!" - -#, python-format -msgid "Controller GET failed (%d)" -msgstr "Získání kontroléru selhalo (%d)!" - -#, python-format -msgid "Copy offload workflow unsuccessful. %s" -msgstr "Kopírování postupu snížení zátěže bylo neúspěšné. %s" - -#, python-format -msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" -msgstr "" -"Kopírování snímku do svazku pro snímek %(snap)s a svazek %(vol)s selhalo!" - -#, python-format -msgid "Could not GET allocation information (%d)!" -msgstr "Nelze získat informace o přidělení (%d)!" - -#, python-format -msgid "Could not connect to %(primary)s or %(secondary)s!" -msgstr "Nelze se připojit k %(primary)s nebo %(secondary)s!" - -#, python-format -msgid "Could not create snapshot set. Error: '%s'" -msgstr "Nelze vytvořit sadu snímků. Chyba: '%s'" - -msgid "Could not decode scheduler options." -msgstr "Nelze rozšifrovat volby plánovače." - -#, python-format -msgid "Could not delete failed image volume %(id)s." -msgstr "Nelze smazat nezdařený svazek obrazu %(id)s." - -#, python-format -msgid "Could not delete the image volume %(id)s." -msgstr "Nelze smazat svazek obrazu %(id)s." - -#, python-format -msgid "Could not find a host for consistency group %(group_id)s." -msgstr "Nelze najít hostitele pro skupinu jednotnosti %(group_id)s." - -#, python-format -msgid "Could not find any hosts (%s)" -msgstr "Nelze najít žádné hostitele (%s)" - -#, python-format -msgid "" -"Could not find port group : %(portGroupName)s. Check that the EMC " -"configuration file has the correct port group name." -msgstr "" -"Nelze najít skupinu portů : %(portGroupName)s. Zkontrolujte, že soubor s " -"nastavením EMC má správný název skupiny." - -#, python-format -msgid "Could not find volume with name %(name)s. Error: %(error)s" -msgstr "Nelze najít svazek s názvem %(name)s. Chyba: %(error)s" - -#, python-format -msgid "Could not log in to 3PAR array (%s) with the provided credentials." -msgstr "" -"Nelze se přihlásit do pole 3PAR (%s) pomocí zadaných přihlašovacích údajů." - -#, python-format -msgid "Could not stat scheduler options file %(filename)s." -msgstr "Nelze vyhodnotit soubor voleb plánovače %(filename)s." - -#, python-format -msgid "Could not validate device %s" -msgstr "Nelze ověřit zařízení %s" - -#, python-format -msgid "" -"Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " -"(Exception: %(except)s)" -msgstr "" -"Vytvoření klonu svazku obrazu: %(volume_id)s pro obraz %(image_id)s selhalo " -"(Výjimka: %(except)s)" - -#, python-format -msgid "" -"Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." -msgstr "" -"Vytvoření skupiny jednotnosti ze snímku %(snap)s selhalo: Snímek nebyl " -"nalezen." - -#, python-format -msgid "Create consistency group from source %(source)s failed." -msgstr "Vytvoření skupiny jednotnosti ze zdroje %(source)s selhalo." - -#, python-format -msgid "" -"Create consistency group from source cg-%(cg)s failed: " -"ConsistencyGroupNotFound." -msgstr "" -"Vytvoření skupiny jednotnosti ze zdrojové skupiny jednotnosti %(cg)s " -"selhalo: Skupina nebyla nalezena." - -#, python-format -msgid "Create hypermetro error: %s." -msgstr "Chyba při vytváření hypermetra: %s." - -#, python-format -msgid "" -"Create new lun from lun for source %(src)s => destination %(dest)s failed!" -msgstr "Vytvoření nového lun z lun pro zdroj %(src)s => cíl %(dest)s selhalo!" - -#, python-format -msgid "Create snapshot notification failed: %s" -msgstr "Oznámení o vytvořeni snímku selhalo: %s" - -#, python-format -msgid "Create volume failed from snapshot: %s" -msgstr "Vytvoření svazku ze snímku selhalo: %s" - -#, python-format -msgid "Create volume notification failed: %s" -msgstr "Oznámení o vytvoření svazku selhalo: %s" - -#, python-format -msgid "Creation of snapshot failed for volume: %s" -msgstr "Vytvoření snímku selhalo u svazku: %s" - -#, python-format -msgid "Creation of volume %s failed." -msgstr "Vytvoření svazku %s selhalo." - -msgid "" -"Creation request failed. Please verify the extra-specs set for your volume " -"types are entered correctly." -msgstr "" -"Žádost o vytvoření selhala. Prosím ověřte správnost dodatečných specifikací " -"nastavených pro vaše typy svazků." - -msgid "DB error:" -msgstr "Chyba databáze:" - -msgid "DBError encountered: " -msgstr "Zjištěna chyba databáze:" - -msgid "Default Storage Profile was not found." -msgstr "Výchozí profil úložiště nebyl nalezen." - -msgid "" -"Default volume type is not found. Please check default_volume_type config:" -msgstr "" -"Výchozí typ svazku nenalezen. Prosím zkontrolujte nastavení " -"default_volume_type:" - -msgid "Delete consistency group failed to update usages." -msgstr "Nelze aktualizovat využití při mazání skupin jednotnosti." - -#, python-format -msgid "Delete hypermetro error: %s." -msgstr "Chyba při mazání hypermetra: %s" - -msgid "Delete snapshot failed, due to snapshot busy." -msgstr "Smazání snímku selhalo, protože snímek je zaneprázdněn." - -#, python-format -msgid "Delete snapshot notification failed: %s" -msgstr "Oznámení o smazáni snímku selhalo: %s" - -#, python-format -msgid "Delete volume notification failed: %s" -msgstr "Oznámení o smazáni svazku selhalo: %s" - -#, python-format -msgid "Deleting snapshot %s failed" -msgstr "Mazání snímku %s selhalo" - -#, python-format -msgid "Deleting zone failed %s" -msgstr "Mazání zóny %s selhalo" - -#, python-format -msgid "Deletion of volume %s failed." -msgstr "Smazání svazku %s selhalo." - -#, python-format -msgid "Destination Volume Group %s does not exist" -msgstr "Cílová skupina svazku %s neexistuje" - -#, python-format -msgid "Detach attachment %(attach_id)s failed." -msgstr "Odpojení zařízení %(attach_id)s selhalo." - -#, python-format -msgid "Detach migration source volume failed: %(err)s" -msgstr "Odpojení zdrojového svazku přesunu selhalo: %(err)s" - -msgid "Detach volume failed, due to remove-export failure." -msgstr "Chyba při odpojování svazku, kvůli selhání při odebrání exportu." - -msgid "Detach volume failed, due to uninitialized driver." -msgstr "Odpojení svazku selhalo kvůli nezavedenému ovladači." - -msgid "Detaching snapshot from a remote node is not supported." -msgstr "Odpojení snímku od vzdáleného uzle není podporováno." - -#, python-format -msgid "Did not find expected column name in lsvdisk: %s." -msgstr "Nenalezen očekávaný sloupec v lsvdisku: %s." - -msgid "Differential restore failed, trying full restore" -msgstr "Rozdílová obnova selhala, pokus o celkovou obnovu" - -#, python-format -msgid "Disconnection failed with message: %(msg)s." -msgstr "Odpojení selhalo se zprávou: %(msg)s." - -#, python-format -msgid "" -"Driver-based migration of volume %(vol)s failed. Move from %(src)s to " -"%(dst)s failed with error: %(error)s." -msgstr "" -"Přesun svazku %(vol)s za pomocí ovladače selhal. Přesun z %(src)s do %(dst)s " -"selhal s chybou: %(error)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Chyba při připojování svazku %(vol)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vyváření skupiny: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " -"Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Chyba při získávání snímku: %(snapshot)s ve svazku %(lun)s v zásobě " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " -"%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při nastavování svazku: %(lun)s do skupiny zavaděče " -"%(initiatorgroup)s, zásoba %(pool)s, projekt %(project)s, návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -msgid "Error activating LV" -msgstr "Chyba při aktivaci logického svazku" - -#, python-format -msgid "Error cleaning up failed volume creation. Msg - %s." -msgstr "Chyba při čištění selhaného vytváření svazku. Zpráva - %s" - -msgid "Error cloning volume" -msgstr "Chyba při klonování svazku" - -msgid "Error closing channel." -msgstr "Chyba při uzavírání kanálu." - -#, python-format -msgid "" -"Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." -msgstr "" -"Chyba při kontaktování serveru glance '%(netloc)s' pro '%(method)s', " -"%(extra)s." - -#, python-format -msgid "Error creating QOS rule %s" -msgstr "Chyba při vytváření pravidla QOS %s" - -msgid "Error creating Volume" -msgstr "Chyba při vytváření svazku" - -msgid "Error creating Volume Group" -msgstr "Chyba při vytváření skupiny svazku" - -msgid "Error creating chap record." -msgstr "Chyba při vytváření záznamu chap." - -msgid "Error creating cloned volume" -msgstr "Chyba při vytváření klonovaného svazku" - -msgid "Error creating snapshot" -msgstr "Chyba při vytváření snímku" - -msgid "Error creating volume" -msgstr "Chyba při vytváření svazku" - -#, python-format -msgid "Error creating volume. Msg - %s." -msgstr "Chyba při vytváření svazku. Zpráva - %s." - -msgid "Error deactivating LV" -msgstr "Chyba při deaktivaci logického svazku" - -msgid "Error deleting snapshot" -msgstr "Chyba při mazání snímku" - -#, python-format -msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." -msgstr "" -"Chyba při odpojování snímku %(snapshot)s, kvůli selhání při odebrání exportu." - -#, python-format -msgid "Error detaching volume %(volume)s, due to remove export failure." -msgstr "" -"Chyba při odpojování svazku %(volume)s, kvůli selhání při odebrání exportu." - -#, python-format -msgid "Error detaching volume %s" -msgstr "Chyba při odpojování svazku %s" - -#, python-format -msgid "Error disassociating storage group from policy: %s." -msgstr "Chyba při odloučení skupiny úložiště od zásady: %s" - -msgid "Error during re-export on driver init." -msgstr "Chyba při znovu exportování během zavádění ovladače." - -msgid "Error executing SSH command." -msgstr "Chyba při provádění příkazu SSH." - -msgid "Error executing command via ssh." -msgstr "Chyba při provádění příkazu pomocí ssh." - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Chyba při provádění příkazu pomocí ssh: %s" - -msgid "Error extending Volume" -msgstr "Chyba při rozšiřování svazku" - -msgid "Error extending volume" -msgstr "Chyba při rozšiřování svazku" - -#, python-format -msgid "Error extending volume %(id)s. Ex: %(ex)s" -msgstr "Chyba při rozšiřování svazku %(id)s. Výjimka: %(ex)s" - -#, python-format -msgid "Error extending volume: %(vol)s. Exception: %(ex)s" -msgstr "Chyba při rozšiřování svazku: %(vol)s. Výjimka: %(ex)s" - -#, python-format -msgid "Error finding target pool instance name for pool: %(targetPoolName)s." -msgstr "Chyba při hledání cílového názvu instance zásoby: %(targetPoolName)s." - -#, python-format -msgid "Error getting LUN attribute. Exception: %s" -msgstr "Chyba při získávání vlastnosti LUN. Výjimka: %s" - -msgid "Error getting active FC target ports." -msgstr "Chyba při získávání aktivních cílových portů FC." - -msgid "Error getting active ISCSI target iqns." -msgstr "Chyba při získávání aktivních cílových iqn iSCSI." - -msgid "Error getting active ISCSI target portals." -msgstr "Chyba při získávání aktivních cílových portálů iSCSI." - -msgid "Error getting array, pool, SLO and workload." -msgstr "Chyba při získávání pole, zásobu, SLO a vytížení." - -msgid "Error getting chap record." -msgstr "Chyba při získávání záznamu chap." - -msgid "Error getting name server info." -msgstr "Při získávání informací o jmenném serveru nastala chyba." - -msgid "Error getting show fcns database info." -msgstr "Při získávání informací o zobrazení databáze fcns nastala chyba." - -msgid "Error getting target pool name and array." -msgstr "Chyba při získávání názvu cílového pole a zásoby." - -#, python-format -msgid "Error in copying volume: %s" -msgstr "Chyba při kopírování svazku %s" - -#, python-format -msgid "" -"Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " -"with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" -msgstr "" -"Chyba při rozšiřování velikost svazku: Svazek: %(volume)s, velikost svazku: " -"%(vol_size)d se snímkem: %(snapshot)s, velikost svazku: %(snap_size)d" - -#, python-format -msgid "Error in workflow copy from cache. %s." -msgstr "Chyba v postupu při kopírování z mezipaměti. %s." - -#, python-format -msgid "Error invalid json: %s" -msgstr "Chyba neplatný json: %s" - -msgid "Error manage existing get volume size." -msgstr "Chyba při získávání velikosti spravovaného svazku." - -msgid "Error manage existing volume." -msgstr "Chyba při správě existujícího svazku." - -#, python-format -msgid "Error mapping volume: %s" -msgstr "Chyba při mapování svazku: %s" - -#, python-format -msgid "" -"Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." -msgstr "" -"Chyba při přesunu svazku: %(volumename)s. do cílové zásoby " -"%(targetPoolName)s." - -#, python-format -msgid "Error migrating volume: %s" -msgstr "Chyba při přesunování svazku: %s" - -#, python-format -msgid "" -"Error occurred in the volume driver when updating consistency group " -"%(group_id)s." -msgstr "" -"Při aktualizaci skupiny jednotnosti %(group_id)s nastala chyba v ovladači " -"svazku." - -msgid "" -"Error occurred when adding hostgroup and lungroup to view. Remove lun from " -"lungroup now." -msgstr "" -"Při přidávání skupiny hostitele a lun do zobrazení nastala chyba. Prosím " -"odstraňte lun ze skupiny." - -#, python-format -msgid "" -"Error occurred when building request spec list for consistency group %s." -msgstr "" -"Při sestavování seznamu žádaných specifikací pro skupinu jednotnosti %s " -"nastala chyba." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Při vytváření snímku skupiny jednotnosti %s nastala chyba." - -#, python-format -msgid "" -"Error occurred when creating cloned volume in the process of creating " -"consistency group %(group)s from source CG %(source_cg)s." -msgstr "" -"Při vytváření klonovaného svazku během vytváření skupiny jednotnosti " -"%(group)s ze zdrojové skupiny jednotnosti %(source_cg)s nastala chyba." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(cg)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"Při vytváření skupiny jednotnosti %(cg)s ze snímku %(cgsnap)s nastala chyba." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"Při vytváření skupiny jednotnosti %(group)s ze snímku %(cgsnap)s nastala " -"chyba." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from source CG " -"%(source_cg)s." -msgstr "" -"Při vytváření skupiny jednotnosti %(group)s ze zdrojové skupiny jednotnosti " -"%(source_cg)s nastala chyba." - -#, python-format -msgid "Error occurred when creating consistency group %s." -msgstr "Při vytváření skupiny jednotnosti %s nastala chyba." - -#, python-format -msgid "" -"Error occurred when creating volume entry from snapshot in the process of " -"creating consistency group %(group)s from cgsnapshot %(cgsnap)s." -msgstr "" -"Při vytváření záznamu o svazku ze snímku během vytváření skupiny jednotnosti " -"%(group)s ze snímku skupiny jednotnosti %(cgsnap)s nastala chyba." - -#, python-format -msgid "Error occurred when updating consistency group %(group_id)s." -msgstr "Při aktualizaci skupiny jednotnosti %(group_id)s nastala chyba." - -#, python-format -msgid "Error occurred while cloning backing: %s during retype." -msgstr "Při klonování zálohy nastala chyba: %s během přetypování." - -#, python-format -msgid "Error occurred while copying %(src)s to %(dst)s." -msgstr "Při kopírování %(src)s do %(dst)s nastala chyba." - -#, python-format -msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." -msgstr "Při kopírování obrazu nastala chyba: %(id)s do svazku %(vol)s." - -#, python-format -msgid "Error occurred while copying image: %(image_id)s to %(path)s." -msgstr "Při kopírování obrazu nastala chyba: %(image_id)s do %(path)s." - -msgid "Error occurred while creating temporary backing." -msgstr "Při vytváření dočasné zálohy nastala chyba." - -#, python-format -msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." -msgstr "Při vytváření svazku nastala chyba: %(id)s z obrazu %(image_id)s." - -#, python-format -msgid "" -"Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"Chyba při provádění %(command)s. Kód chyby: %(exit_code)d Chybová zpráva: " -"%(result)s" - -#, python-format -msgid "" -"Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"Chyba při provádění příkazu. Kód chyby: %(exit_code)d Chybová zpráva: " -"%(result)s" - -msgid "Error parsing array from host capabilities." -msgstr "Chyba při zpracování pole ze schopností hostitele." - -msgid "Error parsing array, pool, SLO and workload." -msgstr "Chyba při zpracování pole, zásoby, SLO a vytížení." - -msgid "Error parsing target pool name, array, and fast policy." -msgstr "Chyba při zpracování názvu cílového pole, zásoby a zásady fast." - -#, python-format -msgid "" -"Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" -msgstr "" -"Chyba při poskytování svazku %(lun_name)s v %(volume_name)s. Podrobnosti: " -"%(ex)s" - -msgid "Error querying thin pool about data_percent" -msgstr "Chyba při dotazování mělké zásoby o procentech dat" - -msgid "Error renaming logical volume" -msgstr "Chyba při přejmenování logického svazku" - -#, python-format -msgid "Error resolving host %(host)s. Error - %(e)s." -msgstr "Nelze převést na ip adresu hostitele %(host)s. Chyba - %(e)s." - -#, python-format -msgid "Error retrieving LUN %(vol)s number" -msgstr "Chyba při získávání čísla LUN %(vol)s" - -#, python-format -msgid "Error running SSH command: \"%s\"." -msgstr "Chyba při provádění příkazu SSH: \"%s\"." - -#, python-format -msgid "Error running SSH command: %s" -msgstr "Chyba při provádění příkazu SSH: %s" - -msgid "Error running command." -msgstr "Chyba při provádění příkazu." - -#, python-format -msgid "" -"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" -msgstr "" -"Chyba při plánování %(volume_id)s z poslední služby svazku: %(last_host)s : " -"%(exc)s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "Chyba při odesílání informací o aktivitě do vrstvy pro koordinaci." - -#, python-format -msgid "Error setting Flash Cache policy to %s - exception" -msgstr "Chyba při nastavování zásady mezipaměti Flash na %s - došlo k výjimce" - -msgid "Error starting coordination backend." -msgstr "Chyba při spouštění podpůrné vrstvy pro koordinaci." - -#, python-format -msgid "Error unmapping volume: %s" -msgstr "Chyba při zrušení mapování svazku: %s" - -#, python-format -msgid "Error verifying LUN container %(bkt)s" -msgstr "Chyba při ověřování kontejneru LUN %(bkt)s" - -#, python-format -msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" -msgstr "Chyba při ověřování služby iSCSI %(serv)s, na hostiteli %(hst)s" - -msgid "Error: unable to snap replay" -msgstr "Chyba: Nelze vytvořit snímek ze zpětného načtení" - -#, python-format -msgid "Exception cloning volume %(name)s from source volume %(source)s." -msgstr "Výjimka při klonování svazku %(name)s ze zdrojového svazku %(source)s." - -#, python-format -msgid "Exception creating LUN %(name)s in pool %(pool)s." -msgstr "Při vytváření LUN %(name)s v zásobě %(pool)s došlo k výjimce." - -#, python-format -msgid "Exception creating vol %(name)s on pool %(pool)s." -msgstr "Výjimka při vytváření svazku %(name)s v zásobě %(pool)s." - -#, python-format -msgid "" -"Exception creating volume %(name)s from source %(source)s on share %(share)s." -msgstr "" -"Výjimka při vytváření svazku %(name)s ze zdroje %(source)s ve sdílení " -"%(share)s." - -#, python-format -msgid "Exception details: %s" -msgstr "Podrobnosti výjimky: %s" - -#, python-format -msgid "Exception during mounting %s" -msgstr "Při připojování %s došlo k výjimce" - -#, python-format -msgid "Exception during mounting %s." -msgstr "Při připojování %s došlo k výjimce." - -#, python-format -msgid "Exception during snapCPG revert: %s" -msgstr "Výjimka během vrácení snímku společné skupiny poskytování: %s" - -msgid "Exception encountered: " -msgstr "Zjištěna výjimka:" - -#, python-format -msgid "Exception handling resource: %s" -msgstr "Zachycování výjimky zdroje: %s" - -msgid "Exception in string format operation" -msgstr "Výjimka při operaci s formátem řetězce" - -msgid "Exception loading extension." -msgstr "Výjimka během načítání rozšíření." - -#, python-format -msgid "Exception: %(ex)s" -msgstr "Výjimka: %(ex)s" - -#, python-format -msgid "Exception: %s" -msgstr "Výjimka: %s" - -#, python-format -msgid "Exception: %s." -msgstr "Výjimka: %s." - -#, python-format -msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." -msgstr "Spuštění příkazu \"rm\" na záložní soubor %s bylo neúspěšné." - -#, python-format -msgid "Exists snapshot notification failed: %s" -msgstr "Oznámení o existenci snímku selhalo: %s" - -#, python-format -msgid "Exists volume notification failed: %s" -msgstr "Oznámení o existenci svazku selhalo: %s" - -msgid "Extend volume failed." -msgstr "Rozšíření svazku selhalo." - -#, python-format -msgid "Extension of volume %s failed." -msgstr "Rozšíření snímku %s selhalo." - -msgid "" -"Extra spec replication:mode must be set and must be either 'sync' or " -"'periodic'." -msgstr "" -"Dodatečná specifikace replication:mode musí být zadána a musí mít hodnotu " -"'sync' nebo 'periodic'." - -msgid "" -"Extra spec replication:sync_period must be greater than 299 and less than " -"31622401 seconds." -msgstr "" -"Dodatečná specifikace replication:sync_period musí být větší než 299 a " -"menší než 31622401 vteřin." - -msgid "FAST is not supported on this array." -msgstr "FAST není podporován v tomto poli." - -#, python-format -msgid "Failed collecting fcns database info for fabric %s" -msgstr "Shromažďování informací databáze fcns pro fabric %s selhalo" - -#, python-format -msgid "Failed collecting name server info from fabric %s" -msgstr "Shromáždění informací o jmenném serveru z fabric %s selhalo" - -#, python-format -msgid "Failed collecting nsshow info for fabric %s" -msgstr "Shromáždění informací nsshow pro fabric %s selhalo" - -msgid "Failed collecting show fcns database for fabric" -msgstr "Shromažďování zobrazení databáze fcns pro fabric selhalo" - -#, python-format -msgid "Failed destroying volume entry %s" -msgstr "Nelze zničit položku svazku %s" - -#, python-format -msgid "Failed destroying volume entry: %s." -msgstr "Nelze zničit položku svazku: %s." - -#, python-format -msgid "" -"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " -"glance snapshot %(snapshot_ref_id)s volume reference" -msgstr "" -"Získání příznaku zaveditelnosti snímku %(snapshot_id)s selhalo pomocí " -"zadaného odkazu na svazek snímku glance %(snapshot_ref_id)s" - -#, python-format -msgid "Failed getting active zone set from fabric %s" -msgstr "Získání aktivní zóny nastavené z fabric %s selhalo" - -#, python-format -msgid "Failed getting zone status from fabric %s" -msgstr "Získání stavu zóny z fabric %s selhalo" - -#, python-format -msgid "Failed image conversion during cache creation: %s" -msgstr "Během vytváření mezipaměti došlo k chybě v převodu obrazu: %s" - -#, python-format -msgid "" -"Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." -msgstr "Oznámení činnosti snímku %(event)s selhalo u snímku %(snp_id)s." - -#, python-format -msgid "" -"Failed notifying about the volume action %(event)s for volume %(volume_id)s" -msgstr "Oznámení činnosti svazku %(event)s selhalo u svazku %(volume_id)s" - -#, python-format -msgid "Failed notifying on %(topic)s payload %(payload)s" -msgstr "Oznámení %(topic)s s obsahem %(payload)s selhalo." - -#, python-format -msgid "" -"Failed recovery attempt to create iscsi backing lun for Volume ID:" -"%(vol_id)s: %(e)s" -msgstr "" -"Selhal pokus o obnovu vytvoření lun zálohující iscsi pro svazek s ID:" -"%(vol_id)s: %(e)s" - -#, python-format -msgid "Failed rolling back quota for %s reservations" -msgstr "Nelze vrátit zpět kvóty pro rezervace %s" - -#, python-format -msgid "Failed rolling back quota for %s reservations." -msgstr "Nelze vrátit zpět kvóty pro rezervace %s." - -#, python-format -msgid "" -"Failed setting source volume %(source_volid)s back to its initial " -"%(source_status)s status" -msgstr "" -"Nelze nastavit zdrojový svazek %(source_volid)s zpět na původní stav " -"%(source_status)s " - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " -"volume returned to the default storage group." -msgstr "" -"Nelze se vrátit zpět do bodu znovu přidání svazku %(volumeName)s do výchozí " -"skupiny úložiště pro zásadu fast %(fastPolicyName)s. Prosím kontaktujte " -"svého správce systému, aby svazek vrátil zpět do výchozí skupiny úložiště." - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " -"volume re-added manually." -msgstr "" -"Nelze se vrátit zpět do bodu znovu přidání svazku %(volumeName)s do výchozí " -"skupiny úložiště pro zásadu fast %(fastPolicyName)s: Prosím kontaktujte " -"svého správce systému, aby svazek přidal zpět ručně." - -#, python-format -msgid "" -"Failed to add %(volumeName)s to default storage group for fast policy " -"%(fastPolicyName)s." -msgstr "" -"Nelze přidat %(volumeName)s do výchozí skupiny úložiště pro zásadu fast " -"%(fastPolicyName)s." - -#, python-format -msgid "Failed to add %s to cg." -msgstr "Nelze odstranit %s ze skupiny jednotnosti." - -#, python-format -msgid "Failed to add device to handler %s" -msgstr "Nelze přidat zařízení do obslužné rutiny %s" - -#, python-format -msgid "Failed to add initiator iqn %s to target" -msgstr "Nelze přidat zavaděč iqn %s do cíle" - -#, python-format -msgid "Failed to add initiator to group for SCST target %s" -msgstr "Nelze přidat zavaděč do skupiny pro cíl SCST %s" - -#, python-format -msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" -msgstr "Nelze přidat lun do cíle SCST s id: %(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to add multihost-access for volume \"%s\"." -msgstr "Nelze přidat přístup více hostitelů ke svazku \"%s\"." - -#, python-format -msgid "" -"Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " -"%(tierPolicyRuleInstanceName)s." -msgstr "" -"Nelze přidat skupinu úložiště %(storageGroupInstanceName)s do pravidla " -"zásady vrstvení %(tierPolicyRuleInstanceName)s." - -#, python-format -msgid "Failed to add target(port: %s)" -msgstr "Nelze přidat cíl (port: %s)" - -msgid "Failed to attach source volume for copy." -msgstr "Nelze připojit zdrojový svazek pro kopírování." - -#, python-format -msgid "Failed to attach volume %(vol)s." -msgstr "Nelze připojit svazek %(vol)s." - -msgid "Failed to authenticate user." -msgstr "Nelze ověřit uživatele." - -#, python-format -msgid "Failed to check cluster status.(command: %s)" -msgstr "Nelze zkontrolovat stav clusteru. (příkaz: %s)" - -#, python-format -msgid "Failed to clone image volume %(id)s." -msgstr "Klonování svazku obrazu %(id)s selhalo." - -#, python-format -msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." -msgstr "Nelze klonovat svazek %(volume_id)s pro obraz %(image_id)s." - -#, python-format -msgid "Failed to clone volume.(command: %s)" -msgstr "Nelze klonovat svazek.(příkaz: %s)" - -#, python-format -msgid "Failed to close disk device %s" -msgstr "Nelze uzavřít diskové zařízení %s" - -#, python-format -msgid "" -"Failed to collect return properties for volume %(vol)s and connector " -"%(conn)s." -msgstr "" -"Shromáždění vlastností pro návrat svazku %(vol)s a konektoru %(conn)s " -"selhalo." - -#, python-format -msgid "Failed to commit reservations %s" -msgstr "Nelze odevzdat rezervace %s" - -#, python-format -msgid "Failed to copy %(src)s to %(dest)s." -msgstr "Nelze zkopírovat %(src)s do %(dest)s." - -#, python-format -msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" -msgstr "Kopírování obrazu %(image_id)s do svazku %(volume_id)s selhalo" - -#, python-format -msgid "Failed to copy image to volume: %(volume_id)s" -msgstr "Kopírování obrazu do svazku %(volume_id)s selhalo" - -#, python-format -msgid "Failed to copy volume %(src)s to %(dest)s." -msgstr "Nelze zkopírovat svazek %(src)s do %(dest)s." - -#, python-format -msgid "Failed to copy volume %(vol1)s to %(vol2)s" -msgstr "Nelze zkopírovat svazek %(vol1)s do %(vol2)s" - -#, python-format -msgid "Failed to create %(conf)s for volume id:%(vol_id)s" -msgstr "Nelze vytvořit %(conf)s pro svazek s id: %(vol_id)s" - -#, python-format -msgid "Failed to create CGSnapshot. Exception: %s." -msgstr "Nelze vytvořit snímek skupiny jednotnosti. Výjimka: %s." - -msgid "" -"Failed to create SOAP client.Check san_ip, username, password and make sure " -"the array version is compatible" -msgstr "" -"Nelze vytvořit klienta SOAP. Zkontrolujte san_ip, uživatelské jméno, heslo a " -"ujistěte se, že verze pole je kompatibilní" - -#, python-format -msgid "" -"Failed to create a first volume for storage group : %(storageGroupName)s." -msgstr "" -"Nelze vytvořit první svazek pro skupinu úložiště : %(storageGroupName)s." - -#, python-format -msgid "Failed to create blkio cgroup '%(name)s'." -msgstr "Nelze vytvořit kontrolní skupinu vstupu/výstupu bloku '%(name)s'." - -#, python-format -msgid "Failed to create clone of volume \"%s\"." -msgstr "Nelze vytvořit klon svazku \"%s\"." - -#, python-format -msgid "Failed to create cloned volume %s." -msgstr "Nelze vytvořit klonovaný svazek %s." - -#, python-format -msgid "Failed to create consistency group %(group_id)s." -msgstr "Nelze vytvořit skupinu jednotnosti %(group_id)s." - -#, python-format -msgid "" -"Failed to create default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"Nelze vytvořit výchozí skupinu úložiště pro zásadu FAST : %(fastPolicyName)s." - -#, python-format -msgid "Failed to create group to SCST target %s" -msgstr "Nelze vytvořit skupinu pro cíl SCST %s" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Nelze vytvořit id hardwaru v %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " -"tgtd config file contains 'include %(volumes_dir)s/*'" -msgstr "" -"Nelze vytvořit cílové iscsi pro svazek s ID %(vol_id)s. Prosím ujistěte se, " -"že váš soubor s nastavením tgtd obsahuje 'include %(volumes_dir)s/*'" - -#, python-format -msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "Nelze vytvořit cílové iscsi pro svazek s ID %(vol_id)s: %(e)s" - -#, python-format -msgid "" -"Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " -"configuration in %(volumes_dir)s'" -msgstr "" -"Nelze vytvořit cílové iscsi pro svazek s id %(vol_id)s. Prosím ověřte své " -"nastavení v %(volumes_dir)s'" - -#, python-format -msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "Nelze vytvořit cílové iscsi pro svazek s id %(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s" -msgstr "Nelze vytvořit cílové iscsi pro svazek s id: %s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s." -msgstr "Nelze vytvořit cílové iscsi pro svazek s id: %s." - -#, python-format -msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." -msgstr "" -"Nelze vytvořit postup pro správu existujících: %(object_type)s %(object_id)s." - -#, python-format -msgid "Failed to create snapshot of volume \"%s\"." -msgstr "Nelze vytvořit snímek svazku \"%s\"." - -#, python-format -msgid "Failed to create snapshot. (command: %s)" -msgstr "Nelze vytvořit snímek. (příkaz: %s)" - -#, python-format -msgid "Failed to create transfer record for %s" -msgstr "Nelze vytvořit záznam o přenosu pro %s" - -#, python-format -msgid "Failed to create volume \"%s\"." -msgstr "Nelze vytvořit svazek \"%s\"." - -#, python-format -msgid "Failed to create volume %s" -msgstr "Nelze vytvořit svazek %s" - -#, python-format -msgid "Failed to create volume %s." -msgstr "Nelze vytvořit svazek %s." - -#, python-format -msgid "Failed to create volume from snapshot \"%s\"." -msgstr "Nelze vytvořit svazek ze snímku \"%s\"." - -#, python-format -msgid "Failed to create volume. %s" -msgstr "Nelze vytvořit svazek. %s" - -#, python-format -msgid "Failed to create volume: %(name)s (%(status)s)" -msgstr "Nelze vytvořit svazek: %(name)s (%(status)s)" - -#, python-format -msgid "Failed to created Cinder secure environment indicator file: %s" -msgstr "Nelze vytvořit soubor indikující bezpečné prostředí Cinder: %s" - -#, python-format -msgid "Failed to delete initiator iqn %s from target." -msgstr "Nelze smazat zavaděč iqn %s z cíle." - -#, python-format -msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." -msgstr "Nelze smazat snímek %(snap)s svazku %(vol)s." - -#, python-format -msgid "Failed to delete snapshot. (command: %s)" -msgstr "Nelze smazat snímek. (příkaz: %s)" - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " -"%(exception)s." -msgstr "" -"Nelze smazat snímek %(snap)s ze snímku skupiny jednotnosti. Výjimka: " -"%(exception)s." - -#, python-format -msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." -msgstr "" -"Nelze smazat svazek %(vol)s skupiny jednotnosti. Výjimka: %(exception)s." - -#, python-format -msgid "Failed to delete volume \"%s\"." -msgstr "Nelze smazat svazek \"%s\"." - -#, python-format -msgid "Failed to delete volume %s" -msgstr "Nelze smazat svazek %s" - -#, python-format -msgid "Failed to delete volume. %s" -msgstr "Nelze smazat svazek. %s" - -#, python-format -msgid "Failed to ensure export of volume \"%s\"." -msgstr "Nelze zajistit export svazku \"%s\"." - -#, python-format -msgid "Failed to ensure export of volume %s" -msgstr "Nelze zajistit export svazku %s" - -#, python-format -msgid "Failed to export fiber channel target due to %s" -msgstr "Nelze exportovat cíl fiber channel z důvodu %s" - -#, python-format -msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." -msgstr "Nelze rozšířit svazek: %(vol)s na velikost %(size)s GB." - -#, python-format -msgid "" -"Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." -msgstr "Nelze rozšířit svazek %(name)s z %(current_size)sGB na %(new_size)sGB." - -#, python-format -msgid "Failed to find %(s)s. Result %(r)s" -msgstr "Nelze najít %(s)s. Výsledek %(r)s" - -msgid "Failed to get IQN!" -msgstr "Nelze získat IQN!" - -msgid "Failed to get LUN information!" -msgstr "Nelze získat informace o LUN!" - -#, python-format -msgid "Failed to get allocation information (%d)!" -msgstr "Získání informací o přidělení (%d) selhalo!" - -#, python-format -msgid "Failed to get allocation information: %(host)s (%(status)d)!" -msgstr "Nelze získat informace o přidělení: %(host)s (%(status)d)!" - -#, python-format -msgid "Failed to get device number for throttling: %(error)s" -msgstr "Nelze získat číslo zařízení pro škrcení: %(error)s" - -#, python-format -msgid "" -"Failed to get driver initiator data for initiator %(initiator)s and " -"namespace %(namespace)s" -msgstr "" -"Nelze získat data zavedení ovladače pro zavaděč %(initiator)s a jmenný " -"prostor %(namespace)s" - -#, python-format -msgid "Failed to get fiber channel info from storage due to %(stat)s" -msgstr "Nelze získat informace o fiber channel z úložiště kvůli %(stat)s" - -#, python-format -msgid "Failed to get fiber channel target from storage server due to %(stat)s" -msgstr "Nelze získat cíl fiber channel ze serverového úložiště kvůli %(stat)s" - -#, python-format -msgid "Failed to get or create storage group %(storageGroupName)s." -msgstr "Nelze získat nebo vytvořit skupinu úložiště %(storageGroupName)s." - -#, python-format -msgid "Failed to get response: %s." -msgstr "Nelze získat odpověď: %s." - -#, python-format -msgid "Failed to get server info due to %(state)s." -msgstr "Nelze získat informace o disku z důvodu %(state)s." - -msgid "Failed to get sns table" -msgstr "Nelze získat tabulku sns" - -#, python-format -msgid "Failed to get target wwpns from storage due to %(stat)s" -msgstr "Nelze získat cílov= wwpns z úložiště kvůli %(stat)s" - -msgid "Failed to get updated stats from Datera Cluster." -msgstr "Nelze získat aktualizované statistiky z clusteru Datera." - -msgid "Failed to get updated stats from Datera cluster." -msgstr "Nelze získat aktualizované statistiky z clusteru Datera." - -#, python-format -msgid "Failed to get volume status. %s" -msgstr "Nelze získat stav svazku. %s" - -msgid "Failed to initialize connection" -msgstr "Nelze zavést připojení" - -#, python-format -msgid "Failed to initialize connection to volume \"%s\"." -msgstr "Nelze zavést připojení ke svazku \"%s\"." - -msgid "Failed to initialize connection." -msgstr "Nelze zavést připojení." - -msgid "Failed to initialize driver." -msgstr "Nelze zavést ovladač." - -#, python-format -msgid "Failed to issue df command for path %(path)s, error: %(error)s." -msgstr "Nelze zadat příkaz df pro cestu %(path)s, chyba: %(error)s." - -#, python-format -msgid "Failed to issue mmgetstate command, error: %s." -msgstr "Nelze zadat příkaz mmgetstate, chyba: %s." - -#, python-format -msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." -msgstr "Nelze zadat příkaz mmlsattr pro cestu %(path)s, chyba: %(error)s" - -#, python-format -msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" -msgstr "Nelze zadat příkaz mmlsattr v cestě %(path)s, chyba: %(error)s" - -#, python-format -msgid "Failed to issue mmlsconfig command, error: %s." -msgstr "Nelze zadat příkaz mmtsconfig, chyba: %s." - -#, python-format -msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." -msgstr "Nelze zadat příkaz mmlsfs pro cestu %(path)s, chyba: %(error)s." - -#, python-format -msgid "Failed to issue mmlsfs command, error: %s." -msgstr "Nelze zadat příkaz mmlsfs, chyba: %s." - -#, python-format -msgid "Failed to open iet session list for %s" -msgstr "Nelze otevřít seznam sezení iet pro %s" - -#, python-format -msgid "Failed to open volume from %(path)s." -msgstr "Nelze otevřít svazek z %(path)s." - -#, python-format -msgid "Failed to present volume %(name)s (%(status)d)!" -msgstr "Nelze darovat svazek %(name)s (%(status)d)!" - -msgid "Failed to re-export volume, setting to ERROR." -msgstr "Nelze znovu exportovat svazek, je nastavován na ERROR." - -#, python-format -msgid "Failed to register image volume location %(uri)s." -msgstr "Nelze registrovat umístění svazku obrazu %(uri)s." - -#, python-format -msgid "" -"Failed to remove %(volumeName)s from the default storage group for the FAST " -"Policy." -msgstr "" -"Nelze odstranit %(volumeName)s z výchozí skupiny úložiště pro zásadu FAST." - -#, python-format -msgid "Failed to remove %s from cg." -msgstr "Nelze odstranit %s ze skupiny jednotnosti." - -#, python-format -msgid "Failed to remove LUN %s" -msgstr "Nelze odstranit LUN %s" - -#, python-format -msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "Nelze odstranit cílové iscsi pro svazek s ID: %(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "Nelze odstranit cílové iscsi pro svazek s id %(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s" -msgstr "Nelze odstranit cílové iscsi pro svazek s id: %s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s." -msgstr "Nelze odstranit cílové iscsi pro svazek s id: %s." - -#, python-format -msgid "Failed to rename %(new_volume)s into %(volume)s." -msgstr "Nelze přejmenovat %(new_volume)s na %(volume)s." - -msgid "Failed to rename the created snapshot, reverting." -msgstr "Nelze přejmenovat vytvořený snímek, probíhá vrácení zpět." - -#, python-format -msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" -msgstr "" -"Nelze zažádat o asynchronní smazání zdrojového svazku přesunu %(vol)s: " -"%(err)s" - -#, python-format -msgid "" -"Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " -"size: %(size)s" -msgstr "" -"Nelze změnit velikost vdi. Zmenšování vdi není podporováno. VDI: " -"%(vdiname)s, nová velikost: %(size)s" - -#, python-format -msgid "" -"Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " -"%(size)s" -msgstr "" -"Nelze změnit velikost vdi. Velikost je příliš velká. VDI: %(vdiname)s, nová " -"velikost: %(size)s" - -#, python-format -msgid "Failed to resize vdi. vdi not found. %s" -msgstr "Nelze změnit velikost vdi, vdi nenalezeno. %s" - -#, python-format -msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" -msgstr "Nelze změnit velikost vdi. VDI: %(vdiname)s, nová velikost: %(size)s" - -#, python-format -msgid "Failed to resize volume %(volume_id)s, error: %(error)s." -msgstr "Nelze změnit velikost svazku %(volume_id)s, chyba: %(error)s." - -#, python-format -msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" -msgstr "Nelze získat svazek SolidFire s ID: %s v získání podle účtu!" - -#, python-format -msgid "" -"Failed to return volume %(volumeName)s to original storage pool. Please " -"contact your system administrator to return it to the correct location." -msgstr "" -"Nelze vrátit svazek %(volumeName)s do původní zásoby úložiště. Prosím " -"kontaktujte svého správce systému, aby ho vrátil na správné místo." - -#, python-format -msgid "Failed to roll back reservations %s" -msgstr "Nelze vrátit zpět rezervace %s" - -#, python-format -msgid "Failed to run task %(name)s: %(cause)s" -msgstr "Nelze spustit úkol %(name)s: %(cause)s" - -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "Nelze naplánovat %(method)s: %(ex)s" - -#, python-format -msgid "Failed to send request: %s." -msgstr "Nelze odeslat požadavek: %s." - -#, python-format -msgid "Failed to set 'enable' attribute for SCST target %s" -msgstr "Nelze nastavit vlastnost 'enable' pro cíl SCST %s" - -#, python-format -msgid "Failed to set attribute for enable target driver %s" -msgstr "Nelze nastavit vlastnost pro povolení cílového ovladače %s" - -msgid "Failed to setup the Dell EqualLogic driver." -msgstr "Nastavení ovladače Dell EqualLogic selhalo." - -msgid "Failed to shutdown horcm." -msgstr "Ukončování horcm selhalo." - -#, python-format -msgid "Failed to snap Consistency Group %s" -msgstr "Nelze vytvořit snímek skupiny jednotnosti %s" - -msgid "Failed to start horcm." -msgstr "Spouštění horcm selhalo." - -msgid "Failed to terminate connection" -msgstr "Nelze ukončit připojení" - -#, python-format -msgid "Failed to terminate connection %(initiator)s %(vol)s" -msgstr "Nelze ukončit připojení %(initiator)s %(vol)s" - -#, python-format -msgid "Failed to terminate connection to volume \"%s\"." -msgstr "Nelze ukončit připojení ke svazku \"%s\"." - -#, python-format -msgid "Failed to umount %(share)s, reason=%(stderr)s" -msgstr "Nelze odpojit %(share)s, důvod: %(stderr)s" - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target" -msgstr "" -"Nelze aktualizovat %(conf)s pro svazek s id %(vol_id)s po odstranění cíle " -"iscsi" - -#, python-format -msgid "Failed to update %(conf)s for volume id:%(vol_id)s" -msgstr "Nelze aktualizovat %(conf)s pro svazek s id: %(vol_id)s" - -#, python-format -msgid "" -"Failed to update %(volume_id)s metadata using the provided snapshot " -"%(snapshot_id)s metadata." -msgstr "" -"Nelze aktualizovat popisná data %(volume_id)s pomocí popisných dat zadaného " -"snímku %(snapshot_id)s." - -#, python-format -msgid "Failed to update quota donating volume transfer id %s" -msgstr "Nelze aktualizovat id přenosu svazku dodávajícího kvótu %s." - -#, python-format -msgid "Failed to update quota for consistency group %s." -msgstr "Nelze aktualizovat kvótu skupiny jednotnosti %s." - -#, python-format -msgid "Failed to update quota for deleting volume: %s" -msgstr "Nelze aktualizovat kvóty kvůli smazání svazku: %s" - -#, python-format -msgid "Failed to update quota while deleting snapshots: %s" -msgstr "Nelze aktualizovat kvóty při mazání snímků: %s" - -msgid "Failed to update quota while deleting volume." -msgstr "Nelze aktualizovat kvóty při mazání svazku." - -msgid "Failed to update usages deleting backup" -msgstr "Nelze aktualizovat využití při mazání zálohy" - -msgid "Failed to update usages deleting snapshot" -msgstr "Nelze aktualizovat využití při mazání snímku." - -msgid "Failed to update usages deleting volume." -msgstr "Nelze aktualizovat využití při mazání svazku." - -#, python-format -msgid "Failed to update volume status: %s" -msgstr "Nelze aktualizovat stav svazku: %s" - -#, python-format -msgid "" -"Failed to verify that volume was added to storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Nelze ověřit zda svazek byl přidán do skupiny úložiště pro zásadu FAST: " -"%(fastPolicyName)s." - -msgid "Failed to write in /etc/scst.conf." -msgstr "Nelze zapisovat do/etc/scst.conf." - -#, python-format -msgid "Failed to write persistence file: %(path)s." -msgstr "Nelze zapsat soubor přetrvání: %(path)s." - -#, python-format -msgid "" -"Failed updating %(snapshot_id)s metadata using the provided volumes " -"%(volume_id)s metadata" -msgstr "" -"Nelze aktualizovat popisná data %(snapshot_id)s pomocí popisných dat " -"zadaného svazku %(volume_id)s" - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with creation provided " -"model %(model)s." -msgstr "" -"Nelze aktualizovat model snímku %(snapshot_id)s pomocí modelu %(model)s " -"poskytnutého při vytváření." - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with driver provided model " -"%(model)s." -msgstr "" -"Nelze aktualizovat model snímku %(snapshot_id)s pomocí ovladače poskytnutého " -"modelem %(model)s." - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with creation provided model " -"%(model)s" -msgstr "" -"Nelze aktualizovat model svazku %(volume_id)s pomocí modelu %(model)s " -"poskytnutého při vytváření" - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with driver provided model " -"%(model)s" -msgstr "" -"Nelze aktualizovat model svazku %(volume_id)s pomocí ovladače poskytnutého " -"modelem %(model)s" - -#, python-format -msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." -msgstr "Aktualizace snímku %(snapshot_id)s pomocí %(update)s selhala." - -#, python-format -msgid "" -"Failed updating snapshot metadata using the provided volumes %(volume_id)s " -"metadata" -msgstr "" -"Nelze aktualizovat popisná data snímku pomocí popisných dat zadaného svazku " -"%(volume_id)s" - -#, python-format -msgid "Failed updating volume %(volume_id)s bootable flag to true" -msgstr "" -"Aktualizace příznaku zaveditelnosti svazku %(volume_id)s na true selhala" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(update)s" -msgstr "Aktualizace svazku %(volume_id)s pomocí %(update)s selhala" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(updates)s" -msgstr "Aktualizace svazku %(volume_id)s pomocí %(updates)s selhala" - -#, python-format -msgid "Failure deleting staged tmp LUN %s." -msgstr "Smazání zařazeného dočasného LUN %s selhalo." - -msgid "Fetch volume pool name failed." -msgstr "Získání názvu zásoby svazku selhalo." - -#, python-format -msgid "" -"FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " -"HBA state is Online." -msgstr "" -"Ověření konektoru ovladačem Fibre Channel selhalo. '%(setting)s' nenalezeno. " -"Ujistěte se, že stav HBA je Online." - -#, python-format -msgid "Flexvisor failed to get event %(volume)s (%(status)s)." -msgstr "Flexvisor nemohl získat událost %(volume)s (%(status)s)." - -#, python-format -msgid "Flexvisor failed to get pool %(id)s info." -msgstr "Flexvisor nemohl získat informace o zásobě %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool list due to %s." -msgstr "Flexvisor nemohl získat seznam zásob z důvodu %s." - -#, python-format -msgid "Flexvisor failed to get pool list.(Error: %d)" -msgstr "Flexvisor nemohl získat seznam zásob. (Chyba: %d)" - -#, python-format -msgid "Found %(count)s volumes mapped to id: %(uuid)s." -msgstr "Nalezeno %(count)s svazků mapovaných k id: %(uuid)s." - -msgid "Free capacity not set: volume node info collection broken." -msgstr "" -"Volná kapacita není nastavena: Shromažďování informací o uzlech svazku je " -"nefunkční." - -#, python-format -msgid "GPFS is not active. Detailed output: %s." -msgstr "GPFS není aktivní. Podrobný výstup: %s." - -msgid "Get LUN migration error." -msgstr "Chyba při získávání přesunu LUN." - -msgid "Get method error." -msgstr "Chyba získávání metody." - -#, python-format -msgid "Host PUT failed (%s)." -msgstr "PUT hostitele selhal (%s)." - -msgid "Host could not be found!" -msgstr "Hostitel nemohl být nalezen!" - -#, python-format -msgid "ISCSI discovery attempt failed for:%s" -msgstr "Pokus o zjištění ISCSI selhal pro: %s" - -msgid "ISE FW version is not compatible with OpenStack!" -msgstr "Verze ISE FW není kompatibilní s OpenStack!" - -msgid "ISE globalid not set!" -msgstr "ISE globalid není nastaveno!" - -#, python-format -msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." -msgstr "" -"Velikost obrazu %(img_size)dGB je větší než velikost svazku %(vol_size)dGB." - -#, python-format -msgid "Invalid API object: %s" -msgstr "Neplatný objekt API: %s" - -#, python-format -msgid "Invalid JSON: %s" -msgstr "Neplatný JSON: %s" - -#, python-format -msgid "Invalid ReplayList return: %s" -msgstr "Neplatné předání seznamu zpětného načtení: %s" - -#, python-format -msgid "Invalid hostname %(host)s" -msgstr "Neplatný název hostitele %(host)s" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Vyvolání zavedení záložního systému, protože replikace není správně " -"nastavena." - -#, python-format -msgid "JSON encode params %(param)s error: %(status)s." -msgstr "Chyba parametrů kódování JSON %(param)s: %(status)s." - -#, python-format -msgid "JSON transfer data error. %s." -msgstr "Chyba přesunu dat JSON: %s" - -#, python-format -msgid "JSON transfer error: %s." -msgstr "Chyba přesunu JSON: %s." - -#, python-format -msgid "LUN %(path)s geometry failed. Message - %(msg)s" -msgstr "Geometrie LUN %(path)s selhala. Zpráva - %(msg)s" - -msgid "LUN extend failed!" -msgstr "Rozšíření LUN selhalo!" - -msgid "LUN unexport failed!" -msgstr "Zrušení exportu LUN selhalo!" - -#, python-format -msgid "" -"Location info needed for backend enabled volume migration not in correct " -"format: %s. Continuing with generic volume migration." -msgstr "" -"Informace o umístění potřebné pro přesun svazku za pomocí podpůrné vrstvy " -"nejsou ve správném formátu: %s. Bude použita obecná metoda přesunutí svazku." - -#, python-format -msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." -msgstr "Zdá se, že zamaskování: %(maskingViewName)s bylo nedávno smazáno." - -#, python-format -msgid "Lun %s has dependent snapshots, skipping lun deletion." -msgstr "Lun %s má na sobě závislé snímky, smazání lun přeskočeno." - -#, python-format -msgid "Lun create for %s failed!" -msgstr "Vytvoření LUN pro %s selhalo!" - -#, python-format -msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "Vytvoření snímku Lun pro svazek %(vol)s a snímek %(snap)s selhalo!" - -#, python-format -msgid "Lun delete for %s failed!" -msgstr "Smazání Lun pro %s selhalo!" - -msgid "Lun mapping returned null!" -msgstr "Mapování Lun vrátilo prázdný obsah!" - -#, python-format -msgid "MSGID%(id)04d-E: %(msg)s" -msgstr "MSGID%(id)04d-E: %(msg)s" - -#, python-format -msgid "" -"Manager for service %(binary)s %(host)s is reporting problems, not sending " -"heartbeat. Service will appear \"down\"." -msgstr "" -"Správce služby %(binary)s %(host)s hlásí problémy, neodesílá informace o " -"aktivitě. Služba bude vypadat \"mimo provoz\"." - -#, python-format -msgid "" -"Masking View creation or retrieval was not successful for masking view " -"%(maskingViewName)s. Attempting rollback." -msgstr "" -"Vytvoření nebo získání zamaskování nebylo úspěšné u %(maskingViewName)s. " -"Pokus o zpětné vrácení." - -#, python-format -msgid "" -"Max retries reached deleting backup %(basename)s image of volume %(volume)s." -msgstr "" -"Při mazání zálohy obrazu %(basename)s svazku %(volume)s bylo dosaženo " -"maximálního množství pokusů." - -#, python-format -msgid "Message: %s" -msgstr "Zpráva: %s" - -msgid "Model update failed." -msgstr "Aktualizace modelu selhala." - -#, python-format -msgid "Modify volume PUT failed: %(name)s (%(status)d)." -msgstr "Změna PUT svazku selhala: %(name)s (%(status)d)." - -#, python-format -msgid "Mount failure for %(share)s after %(count)d attempts." -msgstr "Selhání připojení k %(share)s po %(count)d pokusech." - -#, python-format -msgid "Mount failure for %(share)s." -msgstr "Selhání připojení pro %(share)s." - -#, python-format -msgid "Multiple replay profiles under name %s" -msgstr "Nalezeno mnoho profilů rychlého načtení s názvem %s." - -msgid "No CLI output for firmware version check" -msgstr "" -"U kontroly verzi firmware nebyl žádný výstup v rozhraní příkazového řádku" - -#, python-format -msgid "No VIP configured for service %s" -msgstr "Nenastaveno žádné VIP pro službu %s" - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." -msgstr "" -"Není vyžadována žádná činnost. Svazek: %(volumeName)s již je součástí " -"zásoby: %(pool)s." - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of slo/workload " -"combination: %(targetCombination)s." -msgstr "" -"Není vyžadována žádná činnost. Svazek: %(volumeName)s již je součástí " -"kombinace slo/vytížení: %(targetCombination)s." - -#, python-format -msgid "" -"No snapshots found in database, but %(path)s has backing file " -"%(backing_file)s!" -msgstr "" -"V databázi nebyly nalezeny žádné snímky, ale %(path)s má záložní soubor " -"%(backing_file)s!" - -#, python-format -msgid "Not able to configure PBM for vCenter server: %s" -msgstr "Nelze nastavit PBM pro server vCenter: %s" - -#, python-format -msgid "OSError: command is %(cmd)s." -msgstr "Chyba OS: příkaz je %(cmd)s." - -#, python-format -msgid "OSError: command is %s." -msgstr "Chyba OS: příkaz je %s." - -#, python-format -msgid "" -"One of the components of the original masking view %(maskingViewName)s " -"cannot be retrieved so please contact your system administrator to check " -"that the correct initiator(s) are part of masking." -msgstr "" -"Jedna ze součástí původního zamaskování %(maskingViewName)s nemůže být " -"získána. Prosím kontaktujte svého správce systému, aby zkontroloval, že " -"správné zavaděče jsou součástí zamaskování." - -#, python-format -msgid "" -"Only SLO/workload migration within the same SRP Pool is supported in this " -"version The source pool : %(sourcePoolName)s does not match the target " -"array: %(targetPoolName)s. Skipping storage-assisted migration." -msgstr "" -"V této verzi je přesun SLO/vytížení podporován pouze v rámci stejné zásoby " -"SRP. Zdrojová zásoba : %(sourcePoolName)s se neshoduje s cílovým polem: " -"%(targetPoolName)s. Přesun za pomocí úložiště bude přeskočen." - -msgid "Only available volumes can be migrated between different protocols." -msgstr "Mezi různými protokoly lze přesunovat pouze dostupné svazky." - -#, python-format -msgid "POST for host create failed (%s)!" -msgstr "POST pro vytvoření hostitele selhalo (%s)!" - -#, python-format -msgid "Pipe1 failed - %s " -msgstr "Roura1 selhala - %s" - -#, python-format -msgid "Pipe2 failed - %s " -msgstr "Roura2 selhala - %s" - -msgid "" -"Please check your xml for format or syntax errors. Please see documentation " -"for more details." -msgstr "" -"Prosím zkontrolujte váš xml soubor, zda v něm nejsou chyby ve formátu, či " -"syntaxi. Pro další informace si přečtěte dokumentaci." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "Název zásoby musí být v souboru %(fileName)s." - -#, python-format -msgid "Prepare clone failed for %s." -msgstr "Příprava klonu selhala pro %s." - -msgid "Primary IP must be set!" -msgstr "Hlavní IP musí být nastavena!" - -msgid "Problem cleaning incomplete backup operations." -msgstr "Při čištění nedokončených záložních operací se vyskytl problém." - -#, python-format -msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." -msgstr "" -"Při čištění dočasných svazků a snímků zálohy %(bkup)s se vyskytl problém." - -#, python-format -msgid "Problem cleaning up backup %(bkup)s." -msgstr "Při čištění zálohy %(bkup)s se vyskytl problém." - -#, python-format -msgid "" -"Purity host %(host_name)s is managed by Cinder but CHAP credentials could " -"not be retrieved from the Cinder database." -msgstr "" -"Cinder spravuje hostitele Purity %(host_name)s, ale přihlašovací údaje CHAP " -"nemohly být získány z databáze Cinder." - -#, python-format -msgid "" -"Purity host %(host_name)s is not managed by Cinder and can't have CHAP " -"credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." -msgstr "" -"Cinder nespravuje hostitele Purity %(host_name)s a proto nelze měnit jeho " -"přihlašovací údaje CHAP. Pro vyřešení problému odstraňte IQN %(iqn)s z " -"hostitele." - -#, python-format -msgid "Qemu-img is not installed. OSError: command is %(cmd)s." -msgstr "Qemu-img není nainstalováno. Chyba OS: příkaz je %(cmd)s." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " -"(%(d_consumed)dG of %(d_quota)dG already consumed)." -msgstr "" -"U %(s_pid)s překročena kvóta, pokus o rozšíření svazku o %(s_size)sG, (již " -"využíváno (%(d_consumed)dG z %(d_quota)dG)." - -#, python-format -msgid "REST Not Available: %s" -msgstr "REST není dostupné: %s" - -#, python-format -msgid "Re-throwing Exception %s" -msgstr "Znovu vyvolávání výjimky %s" - -#, python-format -msgid "Read response raised an exception: %s." -msgstr "Čtení odpovědi vyvolalo výjimku: %s." - -msgid "Recovered model server connection!" -msgstr "Obnoveno připojení modelového serveru!" - -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Obnovování ze selhaného spuštění. Pokus číslo %s" - -msgid "Replication must be specified as ' True' or ' False'." -msgstr "Replikace musí být zadána jako ' True' nebo ' False'." - -msgid "" -"Requested to setup thin provisioning, however current LVM version does not " -"support it." -msgstr "" -"Žádost o nastavení mělkého poskytování, ale současná verze LVM ho " -"nepodporuje." - -#, python-format -msgid "Resizing %s failed. Cleaning volume." -msgstr "Změna velikosti %s selhala. Probíhá čištění svazku." - -#, python-format -msgid "Restore to volume %(volume)s finished with error - %(error)s." -msgstr "Obnovení svazku %(volume)s bylo dokončeno s chybou - %(error)s." - -#, python-format -msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" -msgstr "Pokus proveden %(retry)skrát: %(method)s selhala %(rc)s: %(reason)s" - -msgid "Retype volume error." -msgstr "Chyba přetypování svazku." - -#, python-format -msgid "" -"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " -"Diamond, Optimized, NONE." -msgstr "" -"SLO: %(slo)s není platné. Platné hodnoty jsou Bronze, Silver, Gold, " -"Platinum, Diamond, Optimized, NONE." - -msgid "" -"ScVolume returned success with empty payload. Attempting to locate volume" -msgstr "" -"SCVolume předal zprávu o úspěchu s prázdným obsahem. Pokus o nalezení svazku" - -#, python-format -msgid "Server Busy retry request: %s" -msgstr "Server zaneprázdněn, žádost o opakování: %s" - -#, python-format -msgid "Setting QoS for %s failed" -msgstr "Nastavení QoS pro %s selhalo" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"Sdílení %s ignorováno kvůli neplatnému formátu. Musí být ve formátu adresa:/" -"export." - -#, python-format -msgid "Sheepdog is not installed. OSError: command is %s." -msgstr "Sheepdog není nainstalován. Chyba OS: příkaz je %s." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target ispresently exported for volume: %s" -msgstr "" -"Odstranění exportu přeskočeno. Žádný cíl iscsi není v současnosti exportován " -"pro svazek: %s" - -#, python-format -msgid "Snapshot \"%s\" already exists." -msgstr "Snímek \"%s\" již existuje." - -#, python-format -msgid "" -"Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Snímek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog vdi list\"." - -#, python-format -msgid "Snapshot %s: create failed" -msgstr "Vytvoření snímku %s selhalo" - -#, python-format -msgid "Snapshot %s: has clones" -msgstr "Snímek %s má klony" - -msgid "Snapshot did not exist. It will not be deleted" -msgstr "Snímek neexistuje. Nebude smazán" - -#, python-format -msgid "" -"Source CG %(source_cg)s not found when creating consistency group %(cg)s " -"from source." -msgstr "" -"Při vytváření skupiny %(cg)s ze zdroje nebyla nalezena zdrojová skupina " -"jednotnosti %(source_cg)s." - -#, python-format -msgid "Source snapshot %(snapshot_id)s cannot be found." -msgstr "Zdrojový snímek %(snapshot_id)s nelze nalézt." - -#, python-format -msgid "Source snapshot cannot be found for target volume %(volume_id)s." -msgstr "Zdrojový snímek pro cílový svazek %(volume_id)s nenalezen." - -#, python-format -msgid "Source volume %s not ready!" -msgstr "Zdrojový svazek %s není připraven!" - -#, python-format -msgid "Source volumes cannot be found for target volume %(volume_id)s." -msgstr "Zdrojové svazky pro cílový svazek %(volume_id)s nenalezeny. " - -#, python-format -msgid "" -"Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Zdrojový svazek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog " -"vdi list\"." - -#, python-format -msgid "StdErr :%s" -msgstr "Chybový výstup :%s" - -#, python-format -msgid "StdOut :%s" -msgstr "Standardní výstup :%s" - -#, python-format -msgid "Storage Profile %s was not found." -msgstr "Profil úložiště %s nebyl nalezen." - -#, python-format -msgid "Storage profile: %s cannot be found in vCenter." -msgstr "Profil úložiště: %s nenalezen ve vCenter." - -#, python-format -msgid "TSM [%s] not found in CloudByte storage." -msgstr "TSM [%s] nenalezeno v úložišti CloudByte." - -msgid "The Flexvisor service is unavailable." -msgstr "Služba Flexvisor je nedostupná." - -#, python-format -msgid "The NFS Volume %(cr)s does not exist." -msgstr "Svazek NFS %(cr)s neexistuje." - -msgid "The connector does not contain the required information." -msgstr "Konektor neobsahuje požadované informace." - -msgid "" -"The connector does not contain the required information: initiator is missing" -msgstr "Konektor neobsahuje požadované informace: zavaděč chybí" - -msgid "" -"The connector does not contain the required information: wwpns is missing" -msgstr "Konektor neobsahuje požadované informace: wwpns chybí" - -msgid "The snapshot cannot be deleted because it is a clone point." -msgstr "Snímek nemůže být smazán protože je to bod klonování." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s skipping storage-assisted migration." -msgstr "" -"Zdrojové pole : %(sourceArraySerialNumber)s se neshoduje cílovému poli: " -"%(targetArraySerialNumber)s, přesun za pomocí úložiště bude přeskočen." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s, skipping storage-assisted migration." -msgstr "" -"Zdrojové pole : %(sourceArraySerialNumber)s se neshoduje cílovému poli: " -"%(targetArraySerialNumber)s, přesun za pomocí úložiště bude přeskočen." - -#, python-format -msgid "The source volume %(volume_id)s cannot be found." -msgstr "Zdrojový svazek %(volume_id)s nelze nalézt." - -#, python-format -msgid "The volume driver requires %(data)s in the connector." -msgstr "Ovladač svazku vyžaduje %(data)s v konektoru." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Ovladač svazku vyžaduje název zavaděče iSCSI v konektoru." - -#, python-format -msgid "There are no valid hosts available in configured cluster(s): %s." -msgstr "V nastaveném clusteru nejsou dostupní žádní platní hostitele: %s." - -#, python-format -msgid "There is no valid datastore satisfying requirements: %s." -msgstr "Žádné platné datové úložiště splňující požadavky: %s." - -msgid "There must be at least one valid replication device configured." -msgstr "Musí být zadáno alespoň jedno platné replikační zařízení." - -msgid "This usually means the volume was never successfully created." -msgstr "To většinou znamená, že svazek nikdy nebyl úspěšně vytvořen." - -msgid "Tiering Policy is not supported on this array." -msgstr "Zásada vrstvení není podporována na tomto poli." - -#, python-format -msgid "Timed out deleting %s!" -msgstr "Při mazání %s vypršel časový limit!" - -#, python-format -msgid "Trying to create snapshot by non-existent LV: %s" -msgstr "Pokus o vytvoření svazku z neexistujícího logického svazku. %s" - -#, python-format -msgid "URLError: %s" -msgstr "Chyba URL: %s" - -#, python-format -msgid "Unable to create folder path %s" -msgstr "Nelze vytvořit cestu složky %s" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Nelze vytvořit nebo získat výchozí skupinu úložiště pro zásadu FAST: " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to create volume %s from replay" -msgstr "Nelze vytvořit svazek %s ze zpětného načtení" - -#, python-format -msgid "Unable to create volume. Volume driver %s not initialized" -msgstr "Nelze vytvořit svazek. Ovladač svazku %s není zaveden" - -msgid "Unable to delete busy volume." -msgstr "Nelze smazat zaneprázdněný svazek." - -#, python-format -msgid "Unable to delete due to existing snapshot for volume: %s" -msgstr "Nelze smazat kvůli existujícímu snímku svazku. %s" - -msgid "" -"Unable to delete the destination volume during volume migration, (NOTE: " -"database record needs to be deleted)." -msgstr "" -"Nelze smazat cílový svazek během přesunování, (POZNÁMKA: záznam v databázi " -"je třeba smazat)." - -#, python-format -msgid "Unable to determine whether %(volumeName)s is composite or not." -msgstr "Nelze zjistit zda %(volumeName)s je složený nebo ne." - -#, python-format -msgid "Unable to find VG: %s" -msgstr "Nelze najít skupinu svazku: %s" - -#, python-format -msgid "" -"Unable to find default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"Nelze najít výchozí skupinu úložiště pro zásadu FAST : %(fastPolicyName)s." - -#, python-format -msgid "Unable to find service: %(service)s for given host: %(host)s." -msgstr "Nelze najít službu: %(service)s pro daného hostitele: %(host)s." - -msgid "Unable to get associated pool of volume." -msgstr "Nelze získat přidruženou zásobu svazku." - -#, python-format -msgid "Unable to get default storage group %(defaultSgName)s." -msgstr "Nelze získat výchozí skupinu úložiště %(defaultSgName)s." - -msgid "Unable to get device mapping from network." -msgstr "Nelze získat mapování zařízení ze sítě." - -#, python-format -msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." -msgstr "Nelze získat pravidlo zásady pro zásadu FAST: %(fastPolicyName)s." - -#, python-format -msgid "Unable to locate Volume Group %s" -msgstr "Nelze najít skupinu svazku %s" - -#, python-format -msgid "Unable to locate snapshot %s" -msgstr "Nelze nalézt snímek %s" - -#, python-format -msgid "Unable to manage existing snapshot. Volume driver %s not initialized." -msgstr "Nelze spravovat existující snímek. Ovladač svazku %s není zaveden." - -#, python-format -msgid "Unable to manage existing volume. Volume driver %s not initialized." -msgstr "Nelze spravovat existující svazek. Ovladač svazku %s není zaveden." - -#, python-format -msgid "Unable to map %(vol)s to %(srv)s" -msgstr "Nelze mapovat %(vol)s do %(srv)s" - -#, python-format -msgid "Unable to rename lun %s on array." -msgstr "Nelze přejmenovat lun %s v poli." - -#, python-format -msgid "Unable to rename the logical volume for volume %s." -msgstr "Nelze přejmenovat logický svazek ve svazku %s." - -#, python-format -msgid "Unable to rename the logical volume for volume: %s" -msgstr "Nelze přejmenovat logický svazek ve svazku: %s" - -#, python-format -msgid "Unable to retrieve VolumeConfiguration: %s" -msgstr "Nelze získat nastavení svazku: %s" - -#, python-format -msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." -msgstr "Nelze získat instanci zásoby %(poolName)s v poli %(array)s." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s." -msgstr "Nelze ukončit připojení ke svazku: %(err)s." - -msgid "Unexpected build error:" -msgstr "Neočekávaná chyba při sestavování:" - -msgid "Unexpected error occurs in horcm." -msgstr "V horcm se vyskytly neočekávané chyby." - -msgid "Unexpected error occurs in snm2." -msgstr "V snm2 se vyskytly neočekávané chyby." - -#, python-format -msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" -msgstr "" -"Neočekávaná chyba když se vracel pokus přetypování o smazání sady svazků (%s)" - -#, python-format -msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" -msgstr "" -"Neočekávaná chyba když přetypování se pokusilo o smazání sady svazků (%s)" - -#, python-format -msgid "Unexpected error while invoking web service. Error - %s." -msgstr "" -"Při volání internetové služby se objevila neočekávaná chyba. Chyba - %s." - -#, python-format -msgid "Unknown exception in post clone resize LUN %s." -msgstr "Neznámá výjimka při zvětšení LUN %s po klonování." - -#, python-format -msgid "Unrecognized Login Response: %s" -msgstr "Nerozpoznaná odpověď přihlášení: %s" - -#, python-format -msgid "" -"Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." -msgstr "" -"Aktualizace skupiny jednotnosti nemohla přidat svazek %(volume_id)s: Svazek " -"nenalezen." - -#, python-format -msgid "" -"Update consistency group failed to remove volume-%(volume_id)s: " -"VolumeNotFound." -msgstr "" -"Aktualizace skupiny jednotnosti nemohla odstranit svazek %(volume_id)s: " -"Svazek nenalezen." - -msgid "Update snapshot usages failed." -msgstr "Aktualizace využití snímku selhala." - -msgid "Update volume model for transfer operation failed." -msgstr "Aktualizace modelu svazku pro operaci přenosu selhala." - -#, python-format -msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." -msgstr "" -"Při nahrávání svazku do obrazu nastala chyba (id obrazu: %(image_id)s)." - -msgid "User does not have permission to change Storage Profile selection." -msgstr "Uživatel nemá oprávnění por změnu výběru profilu úložiště." - -msgid "VGC-CLUSTER command blocked and cancelled." -msgstr "Příkaz clusteru VGC zablokován a zrušen." - -#, python-format -msgid "Version string '%s' is not parseable" -msgstr "Řetězec verze '%s' nelze zpracovat" - -#, python-format -msgid "Virtual Volume Set %s does not exist." -msgstr "Sada virtuálního svazku %s neexistuje." - -#, python-format -msgid "Virtual disk device of backing: %s not found." -msgstr "Zařízení zálohy virtuálního disku. %s nenalezeno." - -#, python-format -msgid "Vol copy job status %s." -msgstr "Stav úkolu kopírování svazku: %s." - -#, python-format -msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Svazek \"%s\" nenalezen. Prosím zkontrolujte výstup příkazu \"dog vdi list\"." - -#, python-format -msgid "" -"Volume %(name)s is not suitable for storage assisted migration using retype." -msgstr "" -"Svazek %(name)s není vhodný pro přesun využitím úložiště pomocí přetypování." - -#, python-format -msgid "Volume %(name)s not found on the array." -msgstr "Svazek %(name)s nenalezen v poli." - -#, python-format -msgid "Volume %(name)s not found on the array. No volume to delete." -msgstr "Svazek %(name)s nenalezen v poli. Žádný svazek ke smazání." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. No volume to migrate using retype." -msgstr "" -"Svazek %(name)s nenalezen v poli. Žádný svazek pro přesun pomocí přetypování." - -#, python-format -msgid "" -"Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " -"%(output)s" -msgstr "" -"Svazek %(volumeid)s nemohl odeslat příkaz pro přidělení, vráceno: " -"%(status)s, výstup: %(output)s" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Svazek %s neexistuje v poli." - -#, python-format -msgid "Volume %s, not found on SF Cluster." -msgstr "Svazek %s nebyl nalezen v clusteru SF." - -#, python-format -msgid "Volume %s: create failed" -msgstr "Vytvoření svazku %s selhalo" - -#, python-format -msgid "" -"Volume %s: driver error when trying to retype, falling back to generic " -"mechanism." -msgstr "" -"Svazek %s: chyba ovladače při pokusu o přetypování, bude použit obecný " -"mechanismus." - -#, python-format -msgid "Volume %s: manage failed." -msgstr "Správa svazku %s selhala." - -#, python-format -msgid "Volume %s: rescheduling failed" -msgstr "Znovu naplánování svazku %s selhalo" - -#, python-format -msgid "Volume %s: update volume state failed." -msgstr "Aktualizace stavu svazku %s selhala." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been added to target storage group " -"%(storageGroup)s." -msgstr "" -"Svazek : %(volumeName)s nebyl přidán do cílové skupiny úložiště " -"%(storageGroup)s." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been removed from source storage group " -"%(storageGroup)s." -msgstr "" -"Svazek : %(volumeName)s nebyl odstraněn ze zdrojové skupiny úložiště " -"%(storageGroup)s." - -#, python-format -msgid "" -"Volume : %(volumeName)s. was not successfully migrated to target pool " -"%(targetPoolName)s." -msgstr "" -"Svazek : %(volumeName)s nebyl úspěšně přesunut do cílové zásoby " -"%(targetPoolName)s." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"accept_transfer operation!" -msgstr "" -"Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " -"operace přijetí přenosu!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"attach_volume operation!" -msgstr "" -"Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " -"operace připojení svazku!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"delete_volume operation!" -msgstr "" -"Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " -"operace smazání svazku!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"detach_volume operation!" -msgstr "" -"Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " -"operace odpojení svazku!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"extend_volume operation!" -msgstr "" -"Svazek s ID %s nebyl nalezen v clusteru SoldFire, při pokusu o provedení " -"operace rozšíření svazku!" - -#, python-format -msgid "" -"Volume ID %s was not found on the zfssa device while attempting " -"delete_volume operation." -msgstr "" -"Svazek s ID %s nebyl nalezen na zařízení zfssa při pokusu o operaci smazání " -"svazku." - -#, python-format -msgid "Volume already exists. %s" -msgstr "Svazek již existuje. %s" - -msgid "Volume did not exist. It will not be deleted" -msgstr "Svazek neexistuje. Nebude smazán" - -#, python-format -msgid "Volume driver %s not initialized" -msgstr "Ovladač svazku %s není zaveden" - -msgid "Volume in unexpected state" -msgstr "Svazek je v neočekávaném stavu" - -#, python-format -msgid "Volume in unexpected state %s, expected awaiting-transfer" -msgstr "Svazek v neočekávaném stavu %s, očekáván awaiting-transfer" - -#, python-format -msgid "Volume migration failed due to exception: %(reason)s." -msgstr "Přesun svazku selhal kvůli výjimce: %(reason)s." - -msgid "Volume must be detached for clone operation." -msgstr "Svazek musí být pro operaci klonování odpojen." - -#, python-format -msgid "Volume size \"%sG\" is too large." -msgstr "Velikost svazku \"%sG\" je příliš velká." - -#, python-format -msgid "VolumeType %s deletion failed, VolumeType in use." -msgstr "Mazání typu svazku %s selhalo, typ je používán." - -#, python-format -msgid "" -"WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " -"attempt %(retry)s in progress." -msgstr "" -"Operace WebDAV selhala s chybovým kódem: %(code)s, důvod: %(reason)s. " -"Probíhá pokus %(retry)s." - -#, python-format -msgid "WebDAV returned with %(code)s error during %(method)s call." -msgstr "WebDAV předal chybu %(code)s během volání %(method)s." - -#, python-format -msgid "" -"Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " -"OLTP_REP, NONE." -msgstr "" -"Vytížení: %(workload)s není platné. Platné hodnoty jsou DSS_REP, DSS, OLTP, " -"OLTP_REP, NONE." - -msgid "_find_mappings: volume is not active" -msgstr "Nalezení mapování: svazek není aktivní" - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " -"operation: orig=%(orig)s new=%(new)s." -msgstr "" -"Odstranění operace kopírování virtuálního disku: Svazek %(vol)s nemá zadanou " -"operaci kopírování: původní=%(orig)s, nové=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"Odstranění operace kopírování virtuálního disku: Popisná data svazku %(vol)s " -"nemají zadanou operaci kopírování: původní=%(orig)s, nové=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " -"operations." -msgstr "" -"Odstranění operace kopírování virtuálního disku: Svazek %s nemají žádné " -"registrované operace kopírování virtuálního disku." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " -"copy operations." -msgstr "" -"Odstranění operace kopírování virtuálního disku: Popisná data svazku %s " -"nemají žádné registrované operace kopírování virtuálního disku." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " -"%(host_name)s found." -msgstr "" -"Zrušení mapování virtuálního disku na hostiteli: Nenalezeno žádné mapování " -"svazku %(vol_name)s k hostiteli %(host_name)s." - -#, python-format -msgid "_wait_for_job_complete failed after %(retries)d tries." -msgstr "Čekání na dokončení úkolu selhalo po %(retries)d pokusech." - -#, python-format -msgid "_wait_for_sync failed after %(retries)d tries." -msgstr "Čekání na synchronizaci selhalo po %(retries)d pokusech." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"záloha: %(vol_id)s nemohl odstranit pevný odkaz na zálohu z %(vpath)s do " -"%(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s" - -#, python-format -msgid "can't create 2 volumes with the same name, %s" -msgstr "Nelze vytvořit 2 svazky se stejným názvem, %s" - -msgid "cinder-rtstool is not installed correctly" -msgstr "cinder-rtstool není správně nainstalováno" - -#, python-format -msgid "" -"delete: %(vol_id)s failed with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"smazání: %(vol_id)s selhalo, standardní výstup: %(out)s.\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "ensure_export: Volume %s not found on storage." -msgstr "Zajištění exportu: Svazek %s nebyl nalezen v úložišti." - -#, python-format -msgid "error opening rbd image %s" -msgstr "chyba při otevírání obrazu rbd %s" - -msgid "error refreshing volume stats" -msgstr "Při obnově statistik svazku došlo k chybě" - -msgid "horcm command timeout." -msgstr "Příkazu horcm vypršel časový limit." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s." -msgstr "" -"Zavedení spojení: Shromáždění vlastností pro návrat svazku %(vol)s a " -"konektoru %(conn)s selhalo." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s.\n" -msgstr "" -"Zavedení spojení: Shromáždění vlastností pro návrat svazku %(vol)s a " -"konektoru %(conn)s selhalo.\n" - -msgid "iscsi_ip_address must be set!" -msgstr "iscsi_ip_address musí být nastavena!" - -msgid "manage_existing: No source-name in ref!" -msgstr "Správa existujících: V odkazu není název zdroje!" - -#, python-format -msgid "manage_existing_get_size: %s does not exist!" -msgstr "Správa existujících: Získání velikosti:%s neexistuje!" - -msgid "manage_existing_get_size: No source-name in ref!" -msgstr "Správa existujících: Získání velikosti: V odkazu není název zdroje!" - -msgid "model server went away" -msgstr "modelový server je nedostupný" - -#, python-format -msgid "modify volume: %s does not exist!" -msgstr "změna svazku: %s neexistuje!" - -msgid "san ip must be configured!" -msgstr "san ip musí být nastaveno!" - -msgid "san_login must be configured!" -msgstr "san_login musí být nastaveno!" - -msgid "san_password must be configured!" -msgstr "san_password musí být nastaveno!" - -#, python-format -msgid "single_user auth mode enabled, but %(param)s not set" -msgstr "" -"Povolen režim ověření pomocí jednoho uživatele, ale %(param)s není nastaveno" - -msgid "snm2 command timeout." -msgstr "Příkazu snm2 vypršel časový limit." - -msgid "" -"storwize_svc_multihostmap_enabled is set to False, not allowing multi host " -"mapping." -msgstr "" -"storwize_svc_multihostmap_enabled je nastaven na False, neumožňující " -"mapování více hostitelů." - -#, python-format -msgid "unmanage: Volume %s does not exist!" -msgstr "Zrušení správy: Svazek %s neexistuje!" - -msgid "" -"zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " -"for backend enabled volume migration. Continuing with generic volume " -"migration." -msgstr "" -"zfssa_replication_ip není nastaveno v cinder.conf. zfssa_replication_ip not " -"je potřebné pro přesun svazku za pomoci podpůrné vrstvy. Bude použita obecná " -"metoda přesunutí svazku." diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po deleted file mode 100644 index 85fd2fcb0..000000000 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-info.po +++ /dev/null @@ -1,2763 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Zbyněk Schwarz , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:18+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-12-22 01:35+0000\n" -"Last-Translator: Zbyněk Schwarz \n" -"Language: cs\n" -"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Czech\n" - -#, python-format -msgid "\t%(name)-35s : %(value)s" -msgstr "\t%(name)-35s : %(value)s" - -#, python-format -msgid "\t%(param)-35s : %(value)s" -msgstr "\t%(param)-35s : %(value)s" - -#, python-format -msgid "\t%(prefix)-35s : %(version)s" -msgstr "\t%(prefix)-35s : %(version)s" - -#, python-format -msgid "\t%(request)-35s : %(value)s" -msgstr "\t%(request)-35s : %(value)s" - -#, python-format -msgid "" -"\n" -"\n" -"\n" -"\n" -"Request URL: %(url)s\n" -"\n" -"Call Method: %(method)s\n" -"\n" -"Request Data: %(data)s\n" -"\n" -"Response Data:%(res)s\n" -"\n" -msgstr "" -"\n" -"\n" -"\n" -"\n" -"URL požadavku: %(url)s\n" -"\n" -"Metoda volání: %(method)s\n" -"\n" -"Požadovaná data: %(data)s\n" -"\n" -"Vrácená data:%(res)s\n" -"\n" - -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "%(url)s vrátilo chybu: %(e)s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s vrácena s HTTP %(status)d" - -#, python-format -msgid "%(volume)s assign type fibre_channel, properties %(properties)s" -msgstr "%(volume)s má typ přidělení kanál fibre, vlastnosti %(properties)s" - -#, python-format -msgid "%s is already umounted" -msgstr "%s již je odpojeno" - -#, python-format -msgid "3PAR driver cannot perform migration. Retype exception: %s" -msgstr "Ovladač 3PAR nemůže provést přesun. Výjimka při přetypování: %s" - -#, python-format -msgid "3PAR vlun %(name)s not found on host %(host)s" -msgstr "3PAR vlun %(name)s nenalezen v hostiteli %(host)s" - -#, python-format -msgid "" -"3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " -"deleted because: %(reason)s" -msgstr "" -"3PAR vlun pro svazek '%(name)s' byl smazán, ale hostitel '%(host)s' nebyl, " -"protože: %(reason)s" - -#, python-format -msgid "AUTH properties: %s." -msgstr "Vlastnosti ověření: %s." - -#, python-format -msgid "Accepting transfer %s" -msgstr "Přijímání přenosu %s" - -msgid "Activate Flexvisor cinder volume driver." -msgstr "Aktivovat ovladač Flexvisor pro svazek cinder." - -#, python-format -msgid "Add volume response: %s" -msgstr "Odpověď přidání svazku: %s" - -#, python-format -msgid "Added %s to cg." -msgstr "Přidávání %s do skupiny jednotnosti." - -#, python-format -msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." -msgstr "" -"Přidán svazek: %(volumeName)s do existující skupiny úložiště " -"%(sgGroupName)s. " - -#, python-format -msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" -msgstr "Přidávání ACL do svazku %(vol)s se skupinou zavaděče s názvem %(igrp)s" - -#, python-format -msgid "" -"Adding volume: %(volumeName)s to default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Přidávání svazku: %(volumeName)s do výchozí skupiny úložiště por zásadu " -"FAST: %(fastPolicyName)s." - -#, python-format -msgid "Adding volumes to cg %s." -msgstr "Přidávání svazků do skupiny jednotnosti %s." - -msgid "Attach volume completed successfully." -msgstr "Připojení svazku úspěšně dokončeno." - -msgid "Availability Zones retrieved successfully." -msgstr "Zóny dostupnosti úspěšně získány." - -#, python-format -msgid "Backend name is %s." -msgstr "Název podpůrné vrstvy je %s." - -#, python-format -msgid "Backing VM: %(backing)s renamed to %(new_name)s." -msgstr "Záloha virtuálního stroje %(backing)s přejmenována na %(new_name)s." - -msgid "Backing not available, no operation to be performed." -msgstr "Zálohování není dostupné, nebude provedena žádná operace." - -#, python-format -msgid "Backing not found, creating for volume: %s" -msgstr "Záloha nenalezena, vytváření zálohy pro svazek %s" - -#, python-format -msgid "" -"Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " -"skipping base image delete." -msgstr "" -"Základní obraz zálohy svazku %(volume)s stále má snímky %(snapshots)s. " -"Mazání základního obrazu je přeskočeno." - -#, python-format -msgid "" -"Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " -"in %(delay)ss." -msgstr "" -"Záložní obraz svazku %(volume)s je zaneprázdněn. Bude zopakováno ještě " -"%(retries)skrát za %(delay)ss." - -#, python-format -msgid "Backup service: %s." -msgstr "Zálohovací služba: %s." - -#, python-format -msgid "Begin backup of volume %s." -msgstr "Spuštění zálohování svazku %s." - -msgid "Begin detaching volume completed successfully." -msgstr "Zahájení odpojení svazku úspěšně dokončeno." - -msgid "CHAP authentication disabled." -msgstr "Ověření pomocí CHAP zakázáno." - -#, python-format -msgid "CONCERTO version: %s" -msgstr "Verze CONCERTO: %s" - -msgid "Calling os-brick to detach ScaleIO volume." -msgstr "Volání os-brick pro odpojení svazku ScaleIO." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because cluster " -"exists in different management group." -msgstr "" -"Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " -"cluster existuje v jiné správní skupině." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has been exported." -msgstr "" -"Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " -"svazek byl exportován." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has snapshots." -msgstr "" -"Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " -"svazek má snímky." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume does " -"not exist in this management group." -msgstr "" -"Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože " -"svazek není v této správní skupině." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume is " -"from a different backend." -msgstr "" -"Nelze poskytnout přesun za pomocí podpůrné vrstvy pro svazek: %s, protože je " -"z jiné podpůrné vrstvy." - -#, python-format -msgid "Cgsnapshot %s: creating." -msgstr "Snímek skupiny jednotnosti %s: vytváření" - -#, python-format -msgid "Change volume capacity request: %s." -msgstr "Žádost o změnu kapacity svazku: %s." - -#, python-format -msgid "Checking image clone %s from glance share." -msgstr "Kontrolování klona obrazu %s ze sdílení glance." - -#, python-format -msgid "Checking origin %(origin)s of volume %(volume)s." -msgstr "Kontrola původu %(origin)s svazku %(volume)s." - -#, python-format -msgid "" -"Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." -msgstr "Svazek NFS Cinder se současnou cestou \"%(cr)s\" již není spravován." - -msgid "Cinder secure environment indicator file exists." -msgstr "Soubor indikující bezpečné prostředí Cinder existuje." - -#, python-format -msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - Přidáno připojení pro mapu I-T: %s" - -#, python-format -msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - Smazáno připojení pro mapu I-T: %s" - -#, python-format -msgid "Cleaning cache for share %s." -msgstr "Čištění mezipaměti pro sídlení %s." - -msgid "Cleaning up incomplete backup operations." -msgstr "Čištění nedokončené operace zálohování." - -#, python-format -msgid "Clone %s created." -msgstr "Klon %s vytvořen." - -#, python-format -msgid "Cloning from cache to destination %s" -msgstr "Klonování z mezipaměti do cíle %s" - -#, python-format -msgid "Cloning from snapshot to destination %s" -msgstr "Klonování ze snímku do cíle %s" - -#, python-format -msgid "Cloning image %s from cache" -msgstr "Klonování obrazu %s z mezipaměti." - -#, python-format -msgid "Cloning image %s from snapshot." -msgstr "Klonování obrazu %s ze snímku." - -#, python-format -msgid "Cloning volume %(src)s to volume %(dst)s" -msgstr "Klonování svazku %(src)s do svazku %(dst)s" - -#, python-format -msgid "" -"Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" -"%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " -"perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " -"multi-initiator=%(multi-initiator)s" -msgstr "" -"Klonování svazku ze svazku snímku %(vol)s, snímek %(snap)s, klon %(clone)s, " -"velikost snímku %(size)s, rezerva%(reserve)s, typ agenta %(agent-type)s, " -"název zásady výkonu %(perfpol-name)s, šifrování %(encryption)s, šifra " -"%(cipher)s, vícenásobný zavaděč %(multi-initiator)s" - -#, python-format -msgid "CloudByte API executed successfully for command [%s]." -msgstr "CloudByte API úspěšně provedeno pro příkaz [%s]." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." -msgstr "" -"Operace CloudByte [%(operation)s] úspěšně provedena ve svazku " -"[%(cb_volume)s]." - -msgid "Complete-Migrate volume completed successfully." -msgstr "Dokončení přenosu svazku úspěšně provedeno." - -#, python-format -msgid "Completed: convert_to_base_volume: id=%s." -msgstr "Dokončeno: Převod na základní svazek: id=%s." - -#, python-format -msgid "" -"Connect initialization info: {driver_volume_type: fibre_channel, data: " -"%(properties)s" -msgstr "" -"informace o zavedení připojení: {typ ovladače svazku: kanál fibre, data: " -"%(properties)s" - -#, python-format -msgid "Connecting to host: %s." -msgstr "Připojování k hostiteli %s." - -#, python-format -msgid "Connecting to target host: %s for backend enabled migration." -msgstr "" -"Připojování k cílovému hostiteli %s pro přesun za pomoci podpůrné vrstvy." - -#, python-format -msgid "Connector returning fcnsinfo-%s" -msgstr "Konektor vrací fcnsinfo-%s" - -#, python-format -msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" -msgstr "Převeden obraz o velikosti %(sz).2f MB rychlostí %(mbps).2f MB/s" - -#, python-format -msgid "" -"Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" -msgstr "" -"Převádění %(volume_name)s na úplné poskytování pomocí uživatele společné " -"skupiny poskytování %(new_cpg)s" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin dedup provisioning with userCPG=" -"%(new_cpg)s" -msgstr "" -"Převádění %(volume_name)s na mělké deduplikované poskytování pomocí " -"uživatele společné skupiny poskytování %(new_cpg)s" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" -msgstr "" -"Převádění %(volume_name)s na mělké poskytování pomocí uživatele společné " -"skupiny poskytování %(new_cpg)s" - -msgid "Coordination backend started successfully." -msgstr "Podpůrná vrstva pro koordinaci byla úspěšně spuštěna." - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." -msgstr "" -"Obraz %(img)s zkopírován do svazku %(vol)s použitím postupu kopírování " -"zátěže." - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using local image cache." -msgstr "" -"Obraz %(img)s zkopírován do svazku %(vol)s použitím místní mezipaměti obrazu." - -#, python-format -msgid "Copied image to volume %s using regular download." -msgstr "Obraz zkopírován do svazku %s použitím normálního stažení." - -#, python-format -msgid "Copy job to dest vol %s completed." -msgstr "Úkol kopírování do cílového svazku %s byl dokončen." - -msgid "Copy volume to image completed successfully." -msgstr "Kopírování svazku do obrazu úspěšně dokončeno." - -#, python-format -msgid "Copying src vol %(src)s to dest vol %(dst)s." -msgstr "Kopírování zdrojového svazku %(src)s do cílového svazku %(dst)s." - -#, python-format -msgid "Could not find replica to delete of volume %(vol)s." -msgstr "Nelze najít repliku ke smazání svazku %(vol)s." - -#, python-format -msgid "Could not run dpkg-query command: %(msg)s." -msgstr "Nelze spustit příkaz dpkg-query: %(msg)s." - -#, python-format -msgid "Could not run rpm command: %(msg)s." -msgstr "Nelze spustit příkaz rpm: %(msg)s." - -#, python-format -msgid "" -"Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" -msgstr "" -"Nelze aktualizovat zásobu úložiště pomocí mmchattr na %(pool)s, chyba: " -"%(error)s" - -#, python-format -msgid "" -"Couldn't find destination volume %(vol)s in the database. The entry might be " -"successfully deleted during migration completion phase." -msgstr "" -"V databázi nelze najít cílový svazek %(vol)s. Položka mohla být úspěšně " -"smazána během fáze dokončení přenosu." - -#, python-format -msgid "" -"Couldn't find the temporary volume %(vol)s in the database. There is no need " -"to clean up this volume." -msgstr "" -"V databázi nelze najít dočasný svazek %(vol)s. Není třeba tento svazek " -"čistit." - -#, python-format -msgid "Create Cloned Volume %(volume_id)s completed." -msgstr "Vytváření klonovaného svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Create Consistency Group: %(group)s." -msgstr "Vytvořit skupinu jednotnosti: %(group)s." - -#, python-format -msgid "Create Volume %(volume_id)s completed." -msgstr "Vytváření svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." -msgstr "" -"Vytváření svazku %(volume_id)s ze snímku %(snapshot_id)s bylo dokončeno." - -#, python-format -msgid "" -"Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " -"%(sourceName)s." -msgstr "" -"Vytvoření repliky ze svazku: Klon svazku: %(cloneName)s, zdrojový svazek: " -"%(sourceName)s." - -#, python-format -msgid "Create backup finished. backup: %s." -msgstr "Vytváření zálohy dokončeno. Záloha: %s." - -#, python-format -msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "" -"Vytváření zálohy bylo zahájeno: záloha: %(backup_id)s svazek: %(volume_id)s." - -#, python-format -msgid "Create consistency group from source-%(source)s completed successfully." -msgstr "Vytvoření skupiny jednotnosti ze zdroje %(source)s úspěšně dokončeno." - -#, python-format -msgid "Create export done from Volume %(volume_id)s." -msgstr "Vytvoření exportu pro svazek %(volume_id)s bylo úspěšně dokončeno. " - -msgid "Create snapshot completed successfully" -msgstr "Vytvoření snímku úspěšně dokončeno" - -#, python-format -msgid "" -"Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Vytvořit snímek skupiny jednotnosti %(cgId)s, id snímku: %(cgsnapshot)s." - -#, python-format -msgid "Create snapshot from volume %s" -msgstr "Vytvořit snímek ze svazku %s" - -#, python-format -msgid "" -"Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " -"%(raid_snapshot_id)s, volume: %(volume)s." -msgstr "" -"Vytvoření bylo úspěšné. Snímek: %(snapshot)s,id snímku v raid: " -"%(raid_snapshot_id)s, svazek: %(volume)s." - -#, python-format -msgid "Create target consistency group %(targetCg)s." -msgstr "Vytvořit cílovou skupinu jednotnosti %(targetCg)s." - -#, python-format -msgid "Create volume of %s GB" -msgstr "Vytvořit svazek o %s GB" - -#, python-format -msgid "" -"Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " -"and OpenStack volume [%(stack_vol)s]." -msgstr "" -"Snímek CloudByte úspěšně vytvořen [%(cb_snap)s] s ohledem na svazky " -"CloudByte [%(cb_vol)s] a OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Created Consistency Group %s" -msgstr "Vytvořena skupina jednotnosti %s" - -#, python-format -msgid "" -"Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." -"t parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"Klon [%(cb_clone)s] vytvořen v cestě snímků CloudByte [%(cb_snap)s] s " -"ohledem na nadřazený svazek OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Created datastore folder: %s." -msgstr "Vytvořena složka datového úložiště %s." - -#, python-format -msgid "" -"Created lun-map:\n" -"%s" -msgstr "" -"Vytvořena mapa lun:\n" -"%s" - -#, python-format -msgid "" -"Created multi-attach E-Series host group %(label)s with clusterRef " -"%(clusterRef)s" -msgstr "" -"Vytvořena skupina vícenásobného připojení hostitele E-Series '%(label)s' s " -"odkazem clusteru %(clusterRef)s" - -#, python-format -msgid "Created new initiator group name: %(igGroupName)s." -msgstr "Vytvořen nový název skupiny zavaděče: %(igGroupName)s." - -#, python-format -msgid "Created new masking view : %(maskingViewName)s." -msgstr "Vytvořeno nové zamaskování : %(maskingViewName)s." - -#, python-format -msgid "Created new storage group: %(storageGroupName)s." -msgstr "Vytvořena nová skupina úložiště: %(storageGroupName)s." - -#, python-format -msgid "Created snap grp with label %s." -msgstr "Vytvořena skupina snímku se jmenovkou %s." - -#, python-format -msgid "Created volume %(instanceId)s: %(name)s" -msgstr "Vytvořen svazek %(instanceId)s: %(name)s" - -#, python-format -msgid "Created volume %(volname)s, volume id %(volid)s." -msgstr "Svazek %(volname)s vytvořen, id svazku %(volid)s." - -msgid "Created volume successfully." -msgstr "Svazek úspěšně vytvořen." - -#, python-format -msgid "Created volume with label %s." -msgstr "Vytvořen svazek se jmenovkou %s." - -#, python-format -msgid "Creating %(volume)s on %(device)s" -msgstr "Vytváření %(volume)s na %(device)s" - -#, python-format -msgid "Creating backup of volume %(volume_id)s in container %(container)s" -msgstr "Vytváření zálohy svazku %(volume_id)s v kontejneru %(container)s" - -#, python-format -msgid "Creating cgsnapshot %(name)s." -msgstr "Vytváření snímku skupiny jednotnosti %(name)s." - -#, python-format -msgid "Creating clone of volume: %s" -msgstr "Vytváření klona svazku %s" - -#, python-format -msgid "Creating clone of volume: %s." -msgstr "Vytváření klona svazku %s." - -#, python-format -msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." -msgstr "Vytváření skupiny jednotnosti %(name)s ze snímku %(snap)s." - -#, python-format -msgid "" -"Creating consistency group %(name)s from source consistency group " -"%(source_cgid)s." -msgstr "" -"Vytváření skupiny jednotnosti %(name)s ze zdrojové skupiny jednotnosti " -"%(source_cgid)s." - -#, python-format -msgid "Creating consistency group %(name)s." -msgstr "Vytváření skupiny jednotnosti %(name)s." - -#, python-format -msgid "Creating host object %(host_name)r with IQN: %(iqn)s." -msgstr "Vytváření objektu hostitele %(host_name)r s IQN: %(iqn)s." - -#, python-format -msgid "Creating host object %(host_name)r with WWN: %(wwn)s." -msgstr "Vytváření objektu hostitele %(host_name)r s WWN: %(wwn)s." - -#, python-format -msgid "Creating host with ports %s." -msgstr "Vytváření hostitele s porty %s." - -#, python-format -msgid "Creating image snapshot %s" -msgstr "Vytváření snímku obrazu %s" - -#, python-format -msgid "Creating initiator group %(grp)s with initiator %(iname)s" -msgstr "Vytváření skupiny zavaděče %(grp)s se zavaděčem %(iname)s" - -#, python-format -msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" -msgstr "Vytváření skupiny zavaděče %(igrp)s s jedním zavaděčem %(iname)s" - -#, python-format -msgid "Creating iscsi_target for volume: %s" -msgstr "Vytváření cíle iscsi pro svazek %s" - -#, python-format -msgid "" -"Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " -"snap_description=%(desc)s" -msgstr "" -"Vytváření snímku pro svazek %(vol)s, název snímku: %(name)s, popis snímku " -"%(desc)s" - -#, python-format -msgid "Creating snapshot: %s" -msgstr "Vytváření snímku: %s" - -#, python-format -msgid "Creating transfer of volume %s" -msgstr "Vytváření přenosu pro svazek %s" - -#, python-format -msgid "Creating volume %s from snapshot." -msgstr "Vytváření svazku %s ze snímku." - -#, python-format -msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." -msgstr "" -"Vytváření svazku s velikostí %(size)s GB pro obnovu zálohy %(backup_id)s." - -#, python-format -msgid "Creating volume snapshot: %s." -msgstr "Vytváření snímku svazku: %s." - -#, python-format -msgid "Creatng volume from snapshot. volume: %s" -msgstr "Vytváření svazku ze snímku. Svazek: %s" - -#, python-format -msgid "Delete Consistency Group: %(group)s." -msgstr "Smazat skupiny jednotnosti: %(group)s." - -#, python-format -msgid "Delete Snapshot %(snapshot_id)s completed." -msgstr "Mazání snímku %(snapshot_id)s dokončeno." - -#, python-format -msgid "Delete Snapshot: %(snapshot)s." -msgstr "Smazání snímku: %(snapshot)s." - -#, python-format -msgid "Delete Snapshot: %(snapshotName)s." -msgstr "Mazání snímku: %(snapshotName)s." - -#, python-format -msgid "Delete Volume %(volume_id)s completed." -msgstr "Mazání svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Delete backup finished, backup %s deleted." -msgstr "Smazání zálohy bylo dokončeno, záloha %s smazána." - -#, python-format -msgid "Delete backup started, backup: %s." -msgstr "Smazání zálohy bylo zahájeno: záloha: %s." - -#, python-format -msgid "Delete backup with id: %s" -msgstr "Smazat zálohu s id: %s" - -#, python-format -msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" -msgstr "Mazání snímku %(snap_name)s skupiny jednotnosti: %(group_name)s" - -#, python-format -msgid "Delete cgsnapshot with id: %s" -msgstr "Smazání snímku skupiny jednotnosti s id: %s" - -msgid "Delete consistency group completed successfully." -msgstr "Smazání skupiny jednotnosti úspěšně dokončeno." - -#, python-format -msgid "Delete consistency group with id: %s" -msgstr "Smazání skupiny jednotnosti s id: %s" - -#, python-format -msgid "" -"Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." -msgstr "" -"Smazání zálohy '%(backup)s' svazku '%(volume)s' bylo dokončeno s varováním." - -msgid "Delete snapshot completed successfully" -msgstr "Smazání snímku úspěšně dokončeno" - -#, python-format -msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Smazat snímek zdrojové skupiny jednotnosti %(cgId)s, id snímku: " -"%(cgsnapshot)s." - -msgid "Delete snapshot metadata completed successfully." -msgstr "Mazání popisných dat snímku úspěšně dokončeno." - -#, python-format -msgid "Delete snapshot with id: %s" -msgstr "Smazat snímek s id: %s" - -#, python-format -msgid "Delete transfer with id: %s" -msgstr "Mazání přenosu s id: %s" - -msgid "Delete volume metadata completed successfully." -msgstr "Mazání popisných dat svazku úspěšně dokončeno." - -msgid "Delete volume request issued successfully." -msgstr "Žádost o smazání svazku úspěšně vytvořena." - -#, python-format -msgid "Delete volume with id: %s" -msgstr "Smazat svazek s id: %s" - -#, python-format -msgid "Deleted %(row)d rows from table=%(table)s" -msgstr "Smazáno %(row)d řádků z tabulky %(table)s" - -#, python-format -msgid "" -"Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " -"[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"Snímek CloudByte úspěšně smazán [%(snap)s] s ohledem na nadřazené svazky " -"CloudByte [%(cb_vol)s] a OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Deleted the VM backing: %s." -msgstr "Záloha virtuálního stroje %s smazána." - -#, python-format -msgid "Deleted vmdk file: %s." -msgstr "Smazán soubor vmdk %s." - -msgid "Deleted volume successfully." -msgstr "Svazek úspěšně smazán." - -#, python-format -msgid "Deleting Volume: %(volume)s" -msgstr "Mazání svazku: %(volume)s" - -#, python-format -msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." -msgstr "Mazání základního obrazu zálohy='%(basename)s' svazku %(volume)s." - -#, python-format -msgid "Deleting deleteInitiatorGrp %s " -msgstr "Rušení mazání skupiny zavaděče %s" - -#, python-format -msgid "Deleting snapshot %(ss)s from %(pro)s" -msgstr "Mazání snímku %(ss)s z %(pro)s" - -#, python-format -msgid "Deleting snapshot %s " -msgstr "Mazání snímku %s" - -#, python-format -msgid "Deleting snapshot: %s" -msgstr "Mazání snímku: %s" - -#, python-format -msgid "Deleting stale snapshot: %s" -msgstr "Mazání starého snímku: %s" - -#, python-format -msgid "Deleting volume %s " -msgstr "Mazání svazku %s" - -#, python-format -msgid "Detach Volume, metadata is: %s." -msgstr "Odpojení svazku, popisná data jsou %s." - -msgid "Detach volume completed successfully." -msgstr "Odpojení svazku úspěšně dokončeno." - -msgid "Determined volume DB was empty at startup." -msgstr "Databáze určeného svazku byla při spuštění prázdná." - -msgid "Determined volume DB was not empty at startup." -msgstr "Databáze určeného svazku nebyla při spuštění prázdná." - -#, python-format -msgid "" -"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " -"delete anything." -msgstr "" -"Snímek %(name)s nenalezen pro zálohu %(backing)s. Není třeba nic mazat." - -#, python-format -msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" -msgstr "" -"Zjišťovací ip adresa %(disc_ip)s byla nalezena na datové+správní podsíti " -"%(net_label)s" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" -msgstr "" -"Zjišťovací ip adresa %(disc_ip)s je použita na datové podsíti %(net_label)s" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" -msgstr "Zjišťovací ip adresa %(disc_ip)s je použita na podsíti %(net_label)s" - -#, python-format -msgid "Discovery ip %s is used on mgmt+data subnet" -msgstr "Zjišťovací ip adresa %s je použita na datové+správní podsíti" - -#, python-format -msgid "Dissociating volume %s " -msgstr "Odlučování svazku %s" - -#, python-format -msgid "Domain id is %s." -msgstr "ID domény je %s." - -#, python-format -msgid "Done copying image: %(id)s to volume: %(vol)s." -msgstr "Kopírování obrazu dokončeno: %(id)s do svazku: %(vol)s." - -#, python-format -msgid "Done copying volume %(vol)s to a new image %(img)s" -msgstr "Svazek %(vol)s zkopírován do nového obrazu %(img)s" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " -"in cluster daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Zjištěn cluster GPFS nižší úrovně. Funkce šifrování při nečinnosti GPFS není " -"povolena v daemonu clusteru na úrovni %(cur)s - úroveň musí být alespoň " -"%(min)s." - -msgid "Driver initialization completed successfully." -msgstr "Zavedení ovladače úspěšně dokončeno." - -#, python-format -msgid "" -"E-series proxy API version %(version)s does not support full set of SSC " -"extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"Verze API E-series proxy %(version)s nepodporuje úplnou sadu dodatečných " -"specifikací SSC. Verze proxy musí být alespoň %(min_version)s." - -#, python-format -msgid "E-series proxy API version %s does not support autosupport logging." -msgstr "Verze API E-series proxy %s nepodporuje záznam automatické podpory." - -#, python-format -msgid "EQL-driver: Setup is complete, group IP is \"%s\"." -msgstr "Ovladač EQL: Nastavení dokončeno, IP adresa skupiny je \"%s\"." - -#, python-format -msgid "EQL-driver: executing \"%s\"." -msgstr "Ovladač EQL: spouštění \"%s\"." - -#, python-format -msgid "Editing Volume %(vol)s with mask %(mask)s" -msgstr "Upravování svazku %(vol)s maskou %(mask)s" - -msgid "Embedded mode detected." -msgstr "Zjištěn režim vnoření." - -msgid "Enabling LVM thin provisioning by default because a thin pool exists." -msgstr "" -"Protože existuje mělká zásoba, je standardně povoleno mělké poskytování LVM." - -msgid "Enabling LVM thin provisioning by default because no LVs exist." -msgstr "" -"Protože neexistují žádné LV, je standardně povoleno mělké poskytování LVM." - -#, python-format -msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" -msgstr "Spouštění rozšíření svazku %(vol)s, nová velikost=%(size)s" - -#, python-format -msgid "" -"Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s" -msgstr "" -"Spouštění zavedení spojení pro svazek %(vol)s, konektor=%(conn)s, umístění=" -"%(loc)s" - -#, python-format -msgid "" -"Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s." -msgstr "" -"Spouštění ukončení spojení se svazkem %(vol)s, konektor=%(conn)s, umístění=" -"%(loc)s" - -#, python-format -msgid "Entering unmanage_volume volume = %s" -msgstr "Spouštění rušení správy svazku %s." - -#, python-format -msgid "Exploring array subnet label %s" -msgstr "Prozkoumávání jmenovky podsítě pole %s" - -#, python-format -msgid "Export record finished, backup %s exported." -msgstr "Exportování záznamu bylo dokončeno: záloha: %s exportována." - -#, python-format -msgid "Export record started, backup: %s." -msgstr "Exportování záznamu bylo zahájeno: záloha: %s." - -#, python-format -msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." -msgstr "Exportován lun %(vol_id)s s id %(lun_id)s." - -msgid "Extend volume completed successfully." -msgstr "Rozšíření svazku úspěšně dokončeno." - -msgid "Extend volume request issued successfully." -msgstr "Žádost o rozšíření svazku úspěšně vytvořena." - -#, python-format -msgid "Extending volume %s." -msgstr "Rozšiřování svazku %s." - -#, python-format -msgid "" -"FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"FAST: statistiky kapacity pro zásadu %(fastPolicyName)s v poli " -"%(arrayName)s. Celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita " -"v gb: %(free_capacity_gb)lu." - -#, python-format -msgid "Failed to create host: %(name)s. Check if it exists on the array." -msgstr "Nelze vytvořit hostitele: %(name)s. Zkontrolujte zda existuje v poli." - -#, python-format -msgid "" -"Failed to create hostgroup: %(name)s. Please check if it exists on the array." -msgstr "" -"Nelze vytvořit skupinu hostitele: %(name)s. Prosím zkontrolujte zda existuje " -"v poli." - -#, python-format -msgid "Failed to open iet session list for %(vol_id)s: %(e)s" -msgstr "Nelze otevřít seznam sezení iet pro %(vol_id)s: %(e)s" - -#, python-format -msgid "Fault thrown: %s" -msgstr "Vyvolána chyba: %s" - -#, python-format -msgid "Fetched vCenter server version: %s" -msgstr "Získaná verze serveru vCenter: %s" - -#, python-format -msgid "Filtered targets for SAN is: %s" -msgstr "Filtrované cíle pro SAN jsou: %s" - -#, python-format -msgid "Fixing previous mount %s which was not unmounted correctly." -msgstr "Oprava předchozího připojení %s, které nebylo správně odpojeno." - -#, python-format -msgid "Flash Cache policy set to %s" -msgstr "Zásada mezipaměti Flash nastavena na %s" - -#, python-format -msgid "Flexvisor already unassigned volume %(id)s." -msgstr "Flexvisor již zrušil přidělení svazku %(id)s." - -#, python-format -msgid "Flexvisor snapshot %(id)s not existed." -msgstr "Snímek Flexvisor %(id)s neexistuje." - -#, python-format -msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor úspěšně přidal svazek %(id)s do skupiny %(cgid)s." - -#, python-format -msgid "Flexvisor succeeded to clone volume %(id)s." -msgstr "Flexvisor úspěšně naklonoval svazek %(id)s." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s from snapshot." -msgstr "Flexvisor úspěšně vytvořil svazek %(id)s ze snímku." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s." -msgstr "Flexvisor úspěšně vytvořil svazek %(id)s." - -#, python-format -msgid "Flexvisor succeeded to delete snapshot %(id)s." -msgstr "Flexvisor úspěšně smazal snímek %(id)s." - -#, python-format -msgid "Flexvisor succeeded to extend volume %(id)s." -msgstr "Flexvisor úspěšně rozšířil svazek %(id)s." - -#, python-format -msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor úspěšně odstranil svazek %(id)s ze skupiny %(cgid)s." - -#, python-format -msgid "Flexvisor succeeded to unassign volume %(id)s." -msgstr "Flexvisor úspěšně zrušil přidělení svazku %(id)s." - -#, python-format -msgid "Flexvisor volume %(id)s does not exist." -msgstr "Svazek Flexvisor %(id)s neexistuje." - -msgid "Force upload to image is disabled, Force option will be ignored." -msgstr "Vynucení nahrání do obrazu je zakázáno, vynucení bude ignorováno." - -#, python-format -msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." -msgstr "" -"Volná kapacita podpůrné vrstvy je: %(free)s, celková kapacita: %(total)s." - -#, python-format -msgid "Generating transfer record for volume %s" -msgstr "Vytváření záznamu o přenosu pro svazek %s" - -msgid "Get all snapshots completed successfully." -msgstr "Získání všech snímků úspěšně dokončeno." - -msgid "Get all volumes completed successfully." -msgstr "Získání všech svazků úspěšně dokončeno." - -#, python-format -msgid "Get domain by name response: %s" -msgstr "Získání domény pomocí odpovědi názvem: %s" - -msgid "Get snapshot metadata completed successfully." -msgstr "Získání popisných dat snímku úspěšně dokončeno." - -msgid "Get snapshot metadata value not implemented." -msgstr "Získání hodnot popisných dat snímku není zavedeno." - -#, python-format -msgid "Get the default ip: %s." -msgstr "Získání výchozí ip adresy: %s." - -msgid "Get volume admin metadata completed successfully." -msgstr "Získání popisných dat správce svazku úspěšně dokončeno." - -msgid "Get volume image-metadata completed successfully." -msgstr "Získání popisných dat obrazu svazku úspěšně dokončeno." - -msgid "Get volume metadata completed successfully." -msgstr "Získání popisných dat svazku úspěšně dokončeno." - -msgid "Getting getInitiatorGrpList" -msgstr "Získávání seznamu skupin zavaděče" - -#, python-format -msgid "Getting volume information for vol_name=%s" -msgstr "Získávání informací o svazku s názvem %s" - -#, python-format -msgid "Going to perform request again %s with valid token." -msgstr "Žádost %s bude znovu vytvořena s novou známkou." - -#, python-format -msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" -msgstr "Společné části HPE3PAR %(common_ver)s, klient hp3par %(rest_ver)s" - -#, python-format -msgid "HPELeftHand API version %s" -msgstr "Verze API HPELeftHand: %s" - -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Vyvolána výjimka HTTP: %s" - -#, python-format -msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." -msgstr "ID hypermetra: %(metro_id)s. ID vzdáleného LUN: %(remote_lun_id)s." - -#, python-format -msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." -msgstr "Obraz %(pool)s/%(image)s závisí na snímku %(snap)s." - -#, python-format -msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" -msgstr "Klonování obrazu %(image_id)s bylo neúspěšné. Zpráva: %(msg)s" - -#, python-format -msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" -msgstr "Stažen obraz o velikosti %(sz).2f MB rychlostí %(mbps).2f MB/s" - -#, python-format -msgid "Image will locally be converted to raw %s" -msgstr "Obraz bude na místní úrovni převeden na prostý %s" - -#, python-format -msgid "Image-volume cache disabled for host %(host)s." -msgstr "Mezipaměť obrazu svazku zakázána pro hostitele %(host)s." - -#, python-format -msgid "Image-volume cache enabled for host %(host)s." -msgstr "Mezipaměť obrazu svazku povolena pro hostitele %(host)s." - -#, python-format -msgid "Import record id %s metadata from driver finished." -msgstr "Importování popisných dat záznamu s id %s z ovladače bylo dokončeno." - -#, python-format -msgid "Import record started, backup_url: %s." -msgstr "Importování záznamu bylo zahájeno: záloha: %s." - -#, python-format -msgid "Initialize connection: %(volume)s." -msgstr "Zavedení spojení: %(volume)s." - -msgid "Initialize volume connection completed successfully." -msgstr "Zavedení spojení se svazkem úspěšně dokončeno." - -#, python-format -msgid "Initialized driver %(name)s version: %(vers)s" -msgstr "Zaveden ovladač %(name)s s verzí: %(vers)s" - -msgid "Initializing extension manager." -msgstr "Zavádění správce rozšíření." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." -msgstr "Názvy zavaděče %(initiatorNames)s nejsou v poli %(storageSystemName)s." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " -msgstr "" -"Názvy zavaděče %(initiatorNames)s nejsou v poli %(storageSystemName)s. " - -#, python-format -msgid "Initiator group name is %(grp)s for initiator %(iname)s" -msgstr "Název skupiny zavaděče je %(grp)s pro zavaděč %(iname)s" - -#, python-format -msgid "LUN %(id)s extended to %(size)s GB." -msgstr "LUN %(id)s rozšířen na %(size)s GB." - -#, python-format -msgid "LUN with given ref %s need not be renamed during manage operation." -msgstr "" -"LUN se zadaným odkazem %s není třeba během operace správy přejmenovávat." - -#, python-format -msgid "" -"Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " -"%(name)s." -msgstr "" -"Ukončování vytváření svazku: %(volumeName)s, návratový kód: %(rc)lu, slovník " -"svazku: %(name)s." - -#, python-format -msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." -msgstr "Ukončování mazání svazku: %(volumename)s, návratový kód: %(rc)lu." - -#, python-format -msgid "Leaving initialize_connection: %s" -msgstr "Ukončování zavedení připojení: %s" - -#, python-format -msgid "Loaded extension: %s" -msgstr "Načteno rozšíření: %s" - -#, python-format -msgid "" -"Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" -"%(lv)s" -msgstr "" -"Logický svazek nenalezen při dotazování na informace LVM. (Název skupiny " -"svazku: %(vg)s, název logického svazku: %(lv)s)" - -msgid "Manage existing volume completed successfully." -msgstr "Správa existujícího svazku úspěšně dokončena." - -#, python-format -msgid "" -"Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." -msgstr "" -"Operace správy byla dokončena pro LUN s novou cestou %(path)s a uuid " -"%(uuid)s." - -#, python-format -msgid "" -"Manage operation completed for volume with new label %(label)s and wwn " -"%(wwn)s." -msgstr "" -"Operace správy byla dokončena pro svazek s novou jmenovkou %(label)s a wwn " -"%(wwn)s." - -#, python-format -msgid "Manage volume %s" -msgstr "Správa svazku %s" - -msgid "Manage volume request issued successfully." -msgstr "Žádost o správu svazku úspěšně vytvořena." - -#, python-format -msgid "Masking view %(maskingViewName)s successfully deleted." -msgstr "Maskování %(maskingViewName)s úspěšně smazáno." - -#, python-format -msgid "Migrate Volume %(volume_id)s completed." -msgstr "Přesun svazku %(volume_id)s dokončen." - -msgid "Migrate volume completed successfully." -msgstr "Přesun svazku úspěšně dokončen." - -msgid "Migrate volume completion issued successfully." -msgstr "Dokončení přesunutí svazku úspěšně vytvořeno." - -msgid "Migrate volume request issued successfully." -msgstr "Žádost o přesun svazku úspěšně vytvořena." - -#, python-format -msgid "Migrating using retype Volume: %(volume)s." -msgstr "Přesunování pomocí přetypování svazku: %(volume)s." - -#, python-format -msgid "" -"Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." -msgstr "" -"Změna snímku společné skupiny poskytování svazku %(volume_name)s z " -"%(old_snap_cpg)s na %(new_snap_cpg)s." - -#, python-format -msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" -msgstr "" -"Změna uživatele společné skupiny poskytování svazku %(volume_name)s z " -"%(old_cpg)s na %(new_cpg)s" - -#, python-format -msgid "Modifying %s comments." -msgstr "Měnění komentářů svazku %s" - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"Modul PyWBEM není nainstalován. Nainstalujte ho pomocí balíčku python-" -"pywbem." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"Modul PyWBEM není nainstalován. Nainstalujte ho pomocí balíčku python-pywbem." - -#, python-format -msgid "Mounting volume: %s ..." -msgstr "Připojování svazku: %s ..." - -#, python-format -msgid "Mounting volume: %s succeeded" -msgstr "Připojování svazku: %s úspěšně provedeno" - -#, python-format -msgid "" -"NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"NON-FAST: statistiky kapacity pro zásobu %(poolName)s v poli %(arrayName)s. " -"Celková kapacita v gb: %(total_capacity_gb)lu, volná kapacita v gb: " -"%(free_capacity_gb)lu." - -msgid "Need to remove FC Zone, building initiator target map" -msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavaděče" - -msgid "Need to remove FC Zone, building initiator target map." -msgstr "Je třeba odstranit zónu FC, probíhá sestavování mapu cílů zavaděče." - -#, python-format -msgid "" -"NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " -"loaded." -msgstr "" -"Načteny ovladač NetApp druhu %(storage_family)s a protokol " -"%(storage_protocol)s." - -#, python-format -msgid "New Cinder secure environment indicator file created at path %s." -msgstr "Nový soubor indikující bezpečné prostředí Cinder vytvořen v cestě %s." - -#, python-format -msgid "New str info is: %s." -msgstr "Nové informace o str jsou: %s" - -#, python-format -msgid "No dpkg-query info found for %(pkg)s package." -msgstr "Nenalezeny žádné informace balíčku %(pkg)s pomocí dpkq-query." - -#, python-format -msgid "No igroup found for initiator %s" -msgstr "Pro zavaděče %s nebyla nalezena žádná zaváděcí skupina" - -#, python-format -msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" -msgstr "Není přítomen žádný cíl iscsi pro svazek s id %(vol_id)s: %(e)s" - -#, python-format -msgid "No need to extend volume %s as it is already the requested new size." -msgstr "" -"Není třeba rozšiřovat svazek %s protože již má požadovanou novou velikost." - -#, python-format -msgid "" -"No replication synchronization session found associated with source volume " -"%(source)s on %(storageSystem)s." -msgstr "" -"Zdrojový svazek %(source)s na %(storageSystem)s nemá přiděleno žádné sezení " -"synchronizace replikace." - -#, python-format -msgid "" -"No restore point found for backup='%(backup)s' of volume %(volume)s although " -"base image is found - forcing full copy." -msgstr "" -"Pro zálohu='%(backup)s' svazku %(volume)s nebyl nalezen žádný bod obnovy, " -"ale základní obraz byl nalezen - je vynucena úplné kopírování." - -#, python-format -msgid "No rpm info found for %(pkg)s package." -msgstr "Nenalezeny žádné informace balíčku %(pkg)s pomocí rpm." - -#, python-format -msgid "No volume found for CG: %(cg)s." -msgstr "Nenalezen žádný cílové svazek pro skupinu jednotnosti: %(cg)s." - -#, python-format -msgid "OpenStack OS Version Info: %(info)s" -msgstr "Informace o verzi Openstack OS: %(info)s" - -#, python-format -msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" -msgstr "Přepisování svazku %(volume_id)s obnovou zálohy %(backup_id)s" - -#, python-format -msgid "Params for add volume request: %s." -msgstr "Parametry pro žádost o přidání svazku: %s." - -#, python-format -msgid "Performing post clone for %s" -msgstr "Provádění operací po klonování pro %s" - -#, python-format -msgid "Performing secure delete on volume: %s" -msgstr "Provádění bezpečného smazání svazku: %s" - -#, python-format -msgid "Pool id is %s." -msgstr "ID zásoby je %s." - -#, python-format -msgid "Port group instance name is %(foundPortGroupInstanceName)s." -msgstr "Název instance skupiny portu je %(foundPortGroupInstanceName)s." - -#, python-format -msgid "Post clone resize LUN %s" -msgstr "Provádění změny velikosti po klonování pro LUN %s" - -#, python-format -msgid "Prefer use target wwpn %(wwpn)s" -msgstr "Upřednostňováno použití cílového wwpn %(wwpn)s" - -#, python-format -msgid "Profile %s has been deleted." -msgstr "Profil %s byl smazán." - -#, python-format -msgid "Protection domain id: %(domain_id)s." -msgstr "ID ochranné domény: %(domain_id)s." - -#, python-format -msgid "Protection domain name: %(domain_name)s." -msgstr "Název ochranné domény: %(domain_name)s." - -msgid "Proxy mode detected." -msgstr "Zjištěn režim proxy." - -#, python-format -msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" -msgstr "" -"Odstraňování smazaných řádků starších než %(age)d dní z tabulky %(table)s" - -#, python-format -msgid "QoS: %s." -msgstr "QoS: %s." - -#, python-format -msgid "Query capacity stats response: %s." -msgstr "Odpověď dotazu na statistiky kapacity: %s." - -msgid "" -"RBD striping not supported - ignoring configuration settings for rbd striping" -msgstr "" -"Odejmutí RBD není podporováno - nastavení konfigurace pro odebrání rbd bude " -"ignorováno" - -#, python-format -msgid "RBD volume %s not found, allowing delete operation to proceed." -msgstr "Svazek RBD %s nebyl nalezen umožňující v pokračování operace smazání." - -#, python-format -msgid "" -"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " -"certificate: %(verify_cert)s." -msgstr "" -"IP adresa serveru REST: %(ip)s, port: %(port)s, uživatelské jméno: %(user)s. " -"Ověření certifikátu serveru: %(verify_cert)s." - -#, python-format -msgid "Re-using existing purity host %(host_name)r" -msgstr "Znovu používání existujícího hostitele Purity %(host_name)r" - -msgid "Reconnected to coordination backend." -msgstr "Znovu připojeno k podpůrné vrstvě pro koordinaci." - -msgid "Reconnecting to coordination backend." -msgstr "Znovu připojování k podpůrné vrstvě pro koordinaci." - -#, python-format -msgid "Registering image in cache %s" -msgstr "Registrování obrazu v mezipaměti %s" - -#, python-format -msgid "" -"Relocating volume: %s to a different datastore due to insufficient disk " -"space on current datastore." -msgstr "" -"Přemisťování svazku %s do jiného datového úložiště z důvodu nedostatečného " -"místa na disku v současném úložišti." - -#, python-format -msgid "Remote return FC info is: %s." -msgstr "Informace o vzdálené FC vrátily: %s." - -msgid "Remove volume export completed successfully." -msgstr "Odstranění exportu svazku úspěšně dokončeno." - -#, python-format -msgid "Removed %s from cg." -msgstr "Odebírání %s ze skupiny jednotnosti." - -#, python-format -msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" -msgstr "Odstraňování ACL ze svazku %(vol)s pro skupinu zavaděče %(igrp)s" - -#, python-format -msgid "Removing iscsi_target for Volume ID: %s" -msgstr "Odstraňování cíle iscsi pro svazek s ID %s" - -#, python-format -msgid "Removing iscsi_target for volume: %s" -msgstr "Odstraňování cíle iscsi pro svazek %s" - -#, python-format -msgid "Removing iscsi_target for: %s" -msgstr "Odstraňování cíle iscsi pro %s" - -#, python-format -msgid "Removing iscsi_target: %s" -msgstr "Odstraňování cíle iscsi %s" - -#, python-format -msgid "Removing non-active host: %(host)s from scheduler cache." -msgstr "Odstraňování neaktivního hostitele: %(host)s z mezipaměti plánovače." - -#, python-format -msgid "Removing volumes from cg %s." -msgstr "Odebírání svazků ze skupiny jednotnosti %s." - -#, python-format -msgid "Rename Volume %(volume_id)s completed." -msgstr "Přejmenování svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." -msgstr "Přejmenovávání %(id)s z %(current_name)s na %(new_name)s." - -#, python-format -msgid "Renaming backing VM: %(backing)s to %(new_name)s." -msgstr "Přejmenovávání zálohy virtuálního stroje %(backing)s na %(new_name)s." - -#, python-format -msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" -msgstr "Přejmenovávání existujícího snímku %(ref_name)s na %(new_name)s" - -#, python-format -msgid "Renaming existing volume %(ref_name)s to %(new_name)s" -msgstr "Přejmenovávání existujícího svazku %(ref_name)s na %(new_name)s" - -#, python-format -msgid "Requested image %(id)s is not in raw format." -msgstr "Požadovaný obraz %(id)s není v prostém formátu." - -#, python-format -msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." -msgstr "" -"Zažádáno o sjednocené nastavení: %(storage_family)s a %(storage_protocol)s." - -msgid "Reserve volume completed successfully." -msgstr "Rezervace všech svazků úspěšně dokončena." - -#, python-format -msgid "" -"Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." -msgstr "" -"Resetování stavu zálohy bylo zahájeno, id zálohy: %(backup_id)s, stav: " -"%(status)s." - -#, python-format -msgid "Resetting backup %s to available (was restoring)." -msgstr "Resetování zálohy %s na available (stav byl restoring)." - -#, python-format -msgid "Resetting backup %s to error (was creating)." -msgstr "Resetování zálohy %s na error (stav byl creating)." - -#, python-format -msgid "" -"Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." -msgstr "" -"Resetování svazku %(vol_id)s do předchozího stavu %(status)s (stav byl " -"\"backing-up\")." - -#, python-format -msgid "Resizing LUN %s directly to new size." -msgstr "Měnění velikosti LUN %s přímo na její velikost." - -#, python-format -msgid "Resizing file to %sG" -msgstr "Měnění velikosti souboru na %sG" - -#, python-format -msgid "Resizing file to %sG..." -msgstr "Měnění velikosti souboru na %sG..." - -#, python-format -msgid "" -"Restore backup finished, backup %(backup_id)s restored to volume " -"%(volume_id)s." -msgstr "" -"Obnovení zálohy bylo dokončeno: záloha: %(backup_id)s obnovena do svazku: " -"%(volume_id)s." - -#, python-format -msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "" -"Obnovení zálohy bylo zahájeno: záloha: %(backup_id)s svazek: %(volume_id)s." - -#, python-format -msgid "Restoring backup %(backup)s to volume %(volume)s." -msgstr "Obnovování zálohy %(backup)s do svazku %(volume)s" - -#, python-format -msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" -msgstr "Obnovování zálohy %(backup_id)s do svazku %(volume_id)s" - -msgid "Resume volume delete completed successfully." -msgstr "Obnovení smazání svazku úspěšné dokončeno." - -#, python-format -msgid "Resuming delete on backup: %s." -msgstr "Pokračování ve smazání zálohy: %s." - -#, python-format -msgid "Return FC info is: %s." -msgstr "Informace o FC vrátily: %s." - -#, python-format -msgid "Returning random Port Group: %(portGroupName)s." -msgstr "Vrácení náhodné skupiny portu: %(portGroupName)s." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." -msgstr "" -"Přetypování chytré mezipaměti LUN(id: %(lun_id)s) z (název: %(old_name)s, " -"id: %(old_id)s) na (název: %(new_name)s, id: %(new_id)s) bylo úspěšně " -"dokončeno." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." -msgstr "" -"Přetypování chytrého oddílu LUN(id: %(lun_id)s) z (název: %(old_name)s, id: " -"%(old_id)s) na (název: %(new_name)s, id: %(new_id)s) bylo úspěšně dokončeno. " - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " -"success." -msgstr "" -"Přetypování chytré QoS LUN(id: %(lun_id)s) z %(old_qos_value)s na " -"%(new_qos)s bylo úspěšně dokončeno. " - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " -"%(new_policy)s success." -msgstr "" -"Přetypování chytré vrstvy LUN(id: %(lun_id)s) z %(old_policy)s na " -"%(new_policy)s bylo úspěšně dokončeno. " - -#, python-format -msgid "Retype Volume %(volume_id)s is completed." -msgstr "Přetypování svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." -msgstr "" -"Přetypování svazku %(volume_id)s bylo provedeno a svazek byl přesunut do " -"zásoby %(pool_id)s." - -#, python-format -msgid "" -"Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " -"%(old_snap_cpg)s." -msgstr "" -"Přetypování společné skupiny poskytování svazku %(volume_name)s vráceno z " -"%(new_snap_cpg)s zpět na %(old_snap_cpg)s." - -msgid "Retype volume completed successfully." -msgstr "Přetypování svazku úspěšně dokončeno." - -msgid "Retype volume request issued successfully." -msgstr "Žádost o přetypování svazku úspěšně vytvořena." - -msgid "Retype was to same Storage Profile." -msgstr "Přetypování bylo na stejný profil úložiště." - -msgid "Roll detaching of volume completed successfully." -msgstr "Provedení odpojení svazku úspěšně dokončeno." - -#, python-format -msgid "Running with vmemclient version: %s" -msgstr "Spuštěno s vmemclient verze %s" - -#, python-format -msgid "" -"ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " -"image id: %(id)s." -msgstr "" -"ScaleIO kopírování obrazu do svazku %(vol)s, služba obrazu: %(service)s, id " -"obrazu: %(id)s." - -#, python-format -msgid "" -"ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " -"image meta: %(meta)s." -msgstr "" -"ScaleIO kopírování svazku do obrazu. Svazek: %(vol)s, služba obrazu: " -"%(service)s, popisná data obrazu: %(meta)s." - -#, python-format -msgid "" -"ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." -msgstr "" -"ScaleIO vytváření klonovaného svazku: zdrojový svazek %(src)s do cílového " -"svazku %(tgt)s." - -#, python-format -msgid "" -"ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " -"%(volname)s." -msgstr "" -"ScaleIO vytvoření svazku ze snímku: snímek %(snapname)s do svazku " -"%(volname)s." - -msgid "ScaleIO delete snapshot." -msgstr "ScaleIO mazání snímku." - -#, python-format -msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." -msgstr "ScaleIO rozšíření svazku: svazek %(volname)s na velikost %(new_size)s." - -#, python-format -msgid "ScaleIO get domain id by name request: %s." -msgstr "ScaleIO získání id domény pomocí zažádání o název: %s." - -#, python-format -msgid "ScaleIO get pool id by name request: %s." -msgstr "ScaleIO získání id zásoby pomocí zažádání o název: %s." - -#, python-format -msgid "ScaleIO rename volume request: %s." -msgstr "Žádost o přejmenování svazku ScaleIO: %s." - -#, python-format -msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." -msgstr "Svazek ScaleIO %(vol)s byl přejmenován na %(new_name)s." - -#, python-format -msgid "" -"Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " -"from /etc/cinder.conf." -msgstr "" -"Druhotný soubor ssh klíčů hostitele %(kwargs)s bude načten spolu s %(conf)s " -"z /etc/cinder.conf." - -msgid "Session might have expired. Trying to relogin" -msgstr "Sezení mohlo vypršet. Bude proveden pokus o další přihlášení" - -#, python-format -msgid "Set newly managed Cinder volume name to %(name)s." -msgstr "Nastavit název nově spravovaného svazku Cinder na %(name)s." - -#, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "Nastavování hostitele %(host)s na %(state)s." - -#, python-format -msgid "Setting snapshot %(snap)s to online_flag %(flag)s" -msgstr "Nastavování svazku %(snap)s příznakem online %(flag)s" - -#, python-format -msgid "Setting volume %(vol)s to online_flag %(flag)s" -msgstr "Nastavování svazku %(vol)s příznakem online %(flag)s" - -#, python-format -msgid "Skipping deletion of volume %s as it does not exist." -msgstr "Přeskakování mazání svazku %s protože neexistuje." - -#, python-format -msgid "" -"Skipping image volume %(id)s because it is not accessible by current Tenant." -msgstr "" -"Přeskakování svazku obrazu %(id)s protože pro současného nájemníka není " -"dostupný." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume: %s" -msgstr "" -"Odstranění exportu přeskočeno. Žádný cíl iscsi není v současnosti exportován " -"pro svazek: %s" - -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" -msgstr "" -"Odstranění exportu přeskočeno. Svazku %s není poskytnut žádný cíl iscsi." - -#, python-format -msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" -msgstr "" -"Sdílení SMB %(share)s. Celková velikost %(size)s, celkem přiděleno " -"%(allocated)s" - -#, python-format -msgid "Snapshot %(disp)s '%(new)s' is now being managed." -msgstr "Snímek %(disp)s '%(new)s' je nyní spravován." - -#, python-format -msgid "" -"Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " -"'%(new)s'." -msgstr "" -"Snímek %(disp)s '%(vol)s' již není spravován. Snímek přejmenován na " -"'%(new)s'." - -#, python-format -msgid "Snapshot %s created successfully." -msgstr "Snímek %s úspěšně vytvořen." - -#, python-format -msgid "Snapshot %s does not exist in backend." -msgstr "Snímek %s neexistuje v podpůrné vrstvě," - -#, python-format -msgid "Snapshot %s not found" -msgstr "Snímek %s nenalezen" - -#, python-format -msgid "Snapshot '%(ref)s' renamed to '%(new)s'." -msgstr "Snímek '%(ref)s' přejmenován na '%(new)s'. " - -msgid "Snapshot create request issued successfully." -msgstr "Žádost o vytvoření snímku úspěšně zadána." - -#, python-format -msgid "" -"Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." -msgstr "" -"Vytvoření snímku %(cloneName)s dokončeno. Zdrojový svazek: %(sourceName)s." - -msgid "Snapshot delete request issued successfully." -msgstr "Žádost o smazání snímku úspěšně vytvořena." - -msgid "Snapshot force create request issued successfully." -msgstr "Žádost o vynucení vytvoření snímku úspěšně zadána." - -#, python-format -msgid "" -"Snapshot record for %s is not present, allowing snapshot_delete to proceed." -msgstr "Záznam snímku %s není přítomen, operace smazání snímku je povolena." - -msgid "Snapshot retrieved successfully." -msgstr "Snímek úspěšně získán." - -#, python-format -msgid "Snapshot volume %(vol)s into snapshot %(id)s." -msgstr "Svazek snímku %(vol)s do snímku %(id)s." - -#, python-format -msgid "Snapshot volume response: %s." -msgstr "Odpověď svazku snímku: %s." - -#, python-format -msgid "Snapshot: %(snapshot)s: not found on the array." -msgstr "Snímek: %(snapshot)s: nenalezen v poli." - -#, python-format -msgid "Source Snapshot: %s" -msgstr "Zdrojový snímek: %s" - -#, python-format -msgid "" -"Source and destination ZFSSA shares are the same. Do nothing. volume: %s" -msgstr "" -"Zdroj a cíl sdílení ZFSAA jsou stejné. Nebude nic provedeno, svazek: %s" - -#, python-format -msgid "Start to create cgsnapshot for consistency group: %(group_name)s" -msgstr "Vytvoření snímku pro skupinu jednotnosti %(group_name)s zahájeno." - -#, python-format -msgid "Start to create consistency group: %(group_name)s id: %(id)s" -msgstr "Vytvoření skupiny jednotnosti zahájeno: %(group_name)s id: %(id)s" - -#, python-format -msgid "Start to delete consistency group: %(cg_name)s" -msgstr "Mazání skupiny jednotnosti zahájeno: %(cg_name)s" - -#, python-format -msgid "Starting %(topic)s node (version %(version_string)s)" -msgstr "Spouštění uzle %(topic)s (verze %(version_string)s)" - -#, python-format -msgid "Starting volume driver %(driver_name)s (%(version)s)" -msgstr "Spouštění ovladače svazku %(driver_name)s (%(version)s)" - -#, python-format -msgid "Storage Group %(storageGroupName)s successfully deleted." -msgstr "Skupina úložiště %(storageGroupName)s úspěšně smazána." - -#, python-format -msgid "Storage group not associated with the policy. Exception is %s." -msgstr "Skupina úložiště není přidružena k zásadě. Výjimka je %s." - -#, python-format -msgid "" -"Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " -"%(pool_id)s." -msgstr "" -"Názvy zásob úložiště: %(pools)s, název zásoby úložiště: %(pool)s, id zásoby: " -"%(pool_id)s." - -#, python-format -msgid "Successful login by user %s" -msgstr "Uživatel %s se úspěšně přihlásil" - -#, python-format -msgid "Successfully added %(volumeName)s to %(sgGroupName)s." -msgstr "%(volumeName)s úspěšně přidáno do %(sgGroupName)s." - -#, python-format -msgid "Successfully copied disk at: %(src)s to: %(dest)s." -msgstr "Úspěšně zkopírován disk %(src)s do %(dest)s." - -#, python-format -msgid "Successfully create volume %s" -msgstr "Svazek %s úspěšně vytvořen" - -#, python-format -msgid "" -"Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " -"[%(stack_vol)s]." -msgstr "" -"Svazek CloudByte úspěšně vytvořen [%(cb_vol)s] s ohledem na svazek OpenStack " -"[%(stack_vol)s]." - -#, python-format -msgid "Successfully created clone: %s." -msgstr "Úspěšně vytvořen klon: %s." - -#, python-format -msgid "" -"Successfully created snapshot: %(snap)s for volume backing: %(backing)s." -msgstr "Snímek %(snap)s pro zálohu svazku %(backing)s úspěšně vytvořen." - -#, python-format -msgid "Successfully created snapshot: %s." -msgstr "Snímek %s úspěšně vytvořen." - -#, python-format -msgid "Successfully created volume backing: %s." -msgstr "Záloha svazku %s úspěšně vytvořena." - -#, python-format -msgid "Successfully deleted %s." -msgstr "%s úspěšně smazáno." - -#, python-format -msgid "Successfully deleted file: %s." -msgstr "Soubor %s úspěšně smazán." - -#, python-format -msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." -msgstr "Snímek %(name)s pro zálohu %(backing)s byl úspěšně smazán." - -#, python-format -msgid "Successfully deleted snapshot: %s" -msgstr "Snímek %s úspěšně smazán" - -#, python-format -msgid "Successfully deleted snapshot: %s." -msgstr "Snímek %s úspěšně smazán." - -#, python-format -msgid "" -"Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " -"OpenStack volume [%(stack_vol)s]." -msgstr "" -"Svazek CloudByte [%(cb_vol)s], odpovídající svazku OpenStack " -"[%(stack_vol)s], byl úspěšně smazán." - -#, python-format -msgid "Successfully deleted volume: %s" -msgstr "Svazek %s úspěšně smazán" - -#, python-format -msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." -msgstr "Virtuální disk pěněně rozšířen: %(path)s na %(size)s GB." - -#, python-format -msgid "Successfully extended volume %(volume_id)s to size %(size)s." -msgstr "Svazek %(volume_id)s úspěšně rozšířen na velikost %(size)s." - -#, python-format -msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." -msgstr "Svazek %(vol)s úspěšně rozšířen na velikost %(size)s GB." - -#, python-format -msgid "Successfully got volume information for volume %s" -msgstr "Informace o svazku %s úspěšně získány" - -#, python-format -msgid "Successfully initialized connection with volume: %(volume_id)s." -msgstr "Spojení se svazkem %(volume_id)s úspěšně zavedeno." - -#, python-format -msgid "" -"Successfully initialized connection. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." -msgstr "" -"Připojení úspěšně zavedeno. Cílové wwn: %(target_wwn)s, mapa cílů zavaděče: " -"%(initiator_target_map)s, lun: %(target_lun)s." - -#, python-format -msgid "" -"Successfully moved volume backing: %(backing)s into the folder: %(fol)s." -msgstr "Záloha svazku %(backing)s úspěšně přesunuta do složky %(fol)s." - -#, python-format -msgid "" -"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " -"resource pool: %(rp)s." -msgstr "" -"Záloha svazku %(backing)s úspěšně přemístěna do datového úložiště %(ds)s a " -"zásoby zdrojů %(rp)s." - -msgid "Successfully retrieved InitiatorGrpList" -msgstr "Seznam skupin zavaděče úspěšně získán" - -#, python-format -msgid "Successfully setup driver: %(driver)s for server: %(ip)s." -msgstr "Ovladač %(driver)s úspěšně nastaven pro server %(ip)s." - -#, python-format -msgid "Successfully terminated connection for volume: %(volume_id)s." -msgstr "Spojení se svazkem %(volume_id)s úspěšně ukončeno." - -#, python-format -msgid "" -"Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " -"%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " -"%(storage_protocol)s." -msgstr "" -"Statistiky svazku úspěšně aktualizovány. Podpůrná vrstva: " -"%(volume_backend_name)s, prodejce: %(vendor_name)s, verze ovladače: " -"%(driver_version)s, protokol úložiště: %(storage_protocol)s." - -#, python-format -msgid "" -"Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Svazek CloudByte úspěšně aktualizován [%(cb_vol)s] s ohledem na svazek " -"OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Switching volume %(vol)s to profile %(prof)s." -msgstr "Přepínání svazku %(vol)s do profilu %(prof)s." - -#, python-format -msgid "System %(id)s has %(status)s status." -msgstr "Systém %(id)s má stav %(status)s." - -#, python-format -msgid "" -"System with controller addresses [%s] is not registered with web service." -msgstr "" -"Systém s adresami kontroléru [%s] není zaregistrován u internetové služby." - -#, python-format -msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." -msgstr "Cílové wwns v zamaskování %(maskingView)s: %(targetWwns)s." - -#, python-format -msgid "Terminate connection: %(volume)s." -msgstr "Ukončení připojení: %(volume)s." - -msgid "Terminate volume connection completed successfully." -msgstr "Ukončení připojení svazku úspěšně dokončeno." - -msgid "" -"The NAS file operations will be run as non privileged user in secure mode. " -"Please ensure your libvirtd settings have been configured accordingly (see " -"section 'OpenStack' in the Quobyte Manual." -msgstr "" -"Operace se souborem NAS budou spouštěny pod účtem uživatele bez oprávnění " -"správce v zabezpečeném režimu. Ujistěte se prosím, že vaše nastavení " -"libvirtd je správné (přečtěte si část 'OpenStack' v příručce Quobyte)." - -#, python-format -msgid "The QoS sepcs is: %s." -msgstr "Specifikace QoS jsou %s." - -#, python-format -msgid "" -"The image was successfully converted, but image size is unavailable. src " -"%(src)s, dest %(dest)s. %(error)s" -msgstr "" -"Obraz byl úspěšně převeden, ale velikost obrazu není dostupná. Zdroj " -"%(src)s, cíl %(dest)s. %(error)s" - -#, python-format -msgid "" -"The multi-attach E-Series host group '%(label)s' already exists with " -"clusterRef %(clusterRef)s" -msgstr "" -"Již existuje skupina vícenásobného připojení hostitele E-Series '%(label)s' " -"s odkazem clusteru %(clusterRef)s" - -#, python-format -msgid "The pool_name from extraSpecs is %(pool)s." -msgstr "Název zásoby z dodatečných specifikací je %(pool)s." - -#, python-format -msgid "The same hostid is: %s." -msgstr "Stejné id hostitele je: %s." - -#, python-format -msgid "The storage group found is %(foundStorageGroupInstanceName)s." -msgstr "Nalezená skupiny úložiště je %(foundStorageGroupInstanceName)s." - -#, python-format -msgid "" -"The volume belongs to more than one storage group. Returning storage group " -"%(sgName)s." -msgstr "" -"Svazek patří do více než jedné skupině úložiště. Předána skupina úložiště " -"%(sgName)s." - -#, python-format -msgid "" -"There is no backing for the snapshotted volume: %(snap)s. Not creating any " -"backing for the volume: %(vol)s." -msgstr "" -"Pro snímek svazku %(snap)s neexistuje záloha. Nebude vytvořena žádná záloha " -"pro svazek: %(vol)s." - -#, python-format -msgid "" -"There is no backing for the source volume: %(src)s. Not creating any backing " -"for volume: %(vol)s." -msgstr "" -"Pro zdrojový svazek %(src)s neexistuje záloha. Nebude vytvořena žádná záloha " -"pro svazek: %(vol)s." - -#, python-format -msgid "There is no backing for the volume: %s. Need to create one." -msgstr "Svazek %s nemá žádnou zálohu. Je třeba ji vytvořit." - -#, python-format -msgid "There is no backing for volume: %s; no need to extend the virtual disk." -msgstr "Svazek %s nemá žádnou zálohu; není třeba rozšiřovat virtuální disk." - -#, python-format -msgid "There is no backing, and so there is no snapshot: %s." -msgstr "Záloha neexistuje, tím pádem ani snímek %s neexistuje." - -#, python-format -msgid "There is no backing, so will not create snapshot: %s." -msgstr "Záloha neexistuje, snímek %s nebude vytvořen." - -#, python-format -msgid "" -"There is no snapshot point for the snapshotted volume: %(snap)s. Not " -"creating any backing for the volume: %(vol)s." -msgstr "" -"Pro snímek svazku %(snap)s neexistuje žádný bod snímku. Záloha pro svazek " -"%(vol)s nebude vytvořena." - -msgid "Token is invalid, going to re-login and get a new one." -msgstr "Známka je neplatná, přihlášení bude opakováno pro získání nového." - -msgid "Transfer volume completed successfully." -msgstr "Přenos svazku úspěšně dokončen." - -#, python-format -msgid "Tried to delete non-existent vdisk %s." -msgstr "Pokus o smazání neexistujícího virtuálního disku %s." - -#, python-format -msgid "" -"Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " -"with delete." -msgstr "" -"Pokus o smazání snímku %s, který ale nebyl nalezen v clusteru Datera. " -"Pokračuje se ve smazání." - -#, python-format -msgid "" -"Tried to delete volume %s, but it was not found in the Datera cluster. " -"Continuing with delete." -msgstr "" -"Pokus o smazání svazku %s, který ale nebyl nalezen v clusteru Datera. " -"Pokračuje se ve smazání." - -#, python-format -msgid "Trying to unmap volume from all sdcs before deletion: %s." -msgstr "Pokus o zrušení mapování svazku ze všech sdcs před smazáním: %s" - -msgid "Unable to accept transfer for volume, because it is in maintenance." -msgstr "Nelze přijmout přenos svazku, protože je v údržbě." - -msgid "Unable to attach volume, because it is in maintenance." -msgstr "Nelze připojit svazek, protože je v údržbě." - -msgid "Unable to create the snapshot for volume, because it is in maintenance." -msgstr "Nelze vytvořit snímek svazku, protože je v údržbě." - -msgid "Unable to detach volume, because it is in maintenance." -msgstr "Nelze odpojit svazek, protože je v údržbě." - -msgid "Unable to get Cinder internal context, will not use image-volume cache." -msgstr "" -"Nelze získat vnitřní kontext Cinder, mezipaměť obrazu-svazku nebude použita." - -msgid "" -"Unable to initialize the connection for volume, because it is in maintenance." -msgstr "Nelze zavést připojení ke svazku, protože je v údržbě." - -#, python-format -msgid "Unable to serialize field '%s' - excluding from backup" -msgstr "Nelze serializovat pole '%s' - je vynecháno ze zásoby" - -msgid "Unable to update volume, because it is in maintenance." -msgstr "Nelze aktualizovat svazek, protože je v údržbě." - -#, python-format -msgid "Unexporting lun %s." -msgstr "Rušení exportu lun %s." - -#, python-format -msgid "Unmanage snapshot with id: %s" -msgstr "Zrušit správu snímku s id: %s" - -#, python-format -msgid "Unmanage volume %(volume_id)s completed." -msgstr "Rušení správy svazku %(volume_id)s dokončeno." - -#, python-format -msgid "Unmanage volume %s" -msgstr "Zrušit správu svazku %s" - -#, python-format -msgid "Unmanage volume with id: %s" -msgstr "Zrušit správu svazku s id: %s" - -#, python-format -msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." -msgstr "Správa zrušena u LUN se současnou cestou %(path)s a uuid %(uuid)s." - -#, python-format -msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." -msgstr "" -"Správa byla zrušena pro svazek se současnou jmenovkou %(label)s a wwn " -"%(wwn)s." - -#, python-format -msgid "Unmap volume: %(volume)s." -msgstr "Zrušení mapování svazku: %(volume)s." - -msgid "Unreserve volume completed successfully." -msgstr "Zrušení rezervace všech svazků úspěšně dokončena." - -#, python-format -msgid "" -"Update Consistency Group: %(group)s. This adds and/or removes volumes from a " -"CG." -msgstr "" -"Aktualizace skupiny jednotnosti: %(group)s. Toto přidá a/nebo odstraní " -"svazky ze skupiny jednotnosti." - -#, python-format -msgid "Update migrated volume %(new_volume)s completed." -msgstr "Aktualizace přesunutého svazku %(new_volume)s dokončena." - -msgid "Update readonly setting on volume completed successfully." -msgstr "Aktualizace nastavení položek pro čtení ve svazku úspěšně dokončena." - -msgid "Update snapshot metadata completed successfully." -msgstr "Aktualizace popisných dat snímku úspěšně dokončena." - -msgid "Update volume admin metadata completed successfully." -msgstr "Aktualizace popisných dat správce svazku úspěšně dokončena." - -msgid "Update volume metadata completed successfully." -msgstr "Aktualizace popisných dat svazku úspěšně dokončena." - -#, python-format -msgid "Updated Consistency Group %s" -msgstr "Skupina jednotnosti %s aktualizována" - -#, python-format -msgid "" -"Updating consistency group %(id)s with name %(name)s description: " -"%(description)s add_volumes: %(add_volumes)s remove_volumes: " -"%(remove_volumes)s." -msgstr "" -"Aktualizování skupiny jednotnosti %(id)s s názvem %(name)s, popisem: " -"%(description)s, přidané svazky: %(add_volumes)s odstraněné svazky: " -"%(remove_volumes)s." - -#, python-format -msgid "Updating snapshot %(id)s with info %(dict)s" -msgstr "Aktualizace snímku %(id)s informacemi %(dict)s" - -#, python-format -msgid "Updating status for CG: %(id)s." -msgstr "Aktualizace stavu skupiny jednotnosti: %(id)s." - -#, python-format -msgid "Updating storage service catalog information for backend '%s'" -msgstr "" -"Aktualizování informací o katalogu služby úložiště pro podpůrnou vrstvu '%s'" - -msgid "Use ALUA when adding initiator to host." -msgstr "Použít ALUA při přidávání zavaděče k hostiteli." - -msgid "Use CHAP when adding initiator to host." -msgstr "Použít CHAP při přidávání zavaděče k hostiteli." - -#, python-format -msgid "" -"Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." -msgstr "" -"Používán správce zóny FC %(zm_version)s, ovladač %(drv_name)s " -"%(drv_version)s." - -#, python-format -msgid "Using compute cluster(s): %s." -msgstr "Použit výpočtové clustery: %s." - -#, python-format -msgid "Using existing initiator group name: %(igGroupName)s." -msgstr "Použit existující název skupiny zavaděče: %(igGroupName)s." - -#, python-format -msgid "Using overridden vmware_host_version from config: %s" -msgstr "Použita potlačena verze hostitele vmware z nastavení: %s" - -#, python-format -msgid "Using pool %(pool)s instead of %(cpg)s" -msgstr "Používání zásoby %(pool)s místo %(cpg)s" - -#, python-format -msgid "Value with type=%s is not serializable" -msgstr "Hodnota typ=%s není serializovatelná" - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is being retyped." -msgstr "Probíhá přetypování virtuálního svazku %(disp)s '%(new)s'" - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is now being managed." -msgstr "Virtuálního svazek %(disp)s '%(new)s' je nyní spravován." - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " -"%(cpg)s" -msgstr "" -"Snímek společné skupiny poskytování virtuálního svazku %(disp)s '%(new)s' je " -"prázdný, proto bude nastavena na: %(cpg)s" - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " -"'%(new)s'." -msgstr "" -"Virtuální svazek %(disp)s '%(vol)s' již není spravován. Byl přejmenován na " -"'%(new)s'." - -#, python-format -msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." -msgstr "Virtuálního svazek %(disp)s úspěšně přetypován na %(new_type)s." - -#, python-format -msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." -msgstr "Virtuální svazek '%(ref)s' přejmenován na '%(new)s'." - -#, python-format -msgid "Vol copy job completed for dest %s." -msgstr "Úkol kopírování svazku dokončen u cíle %s." - -#, python-format -msgid "Volume %(volume)s does not have meta device members." -msgstr "Svazek %(volume)s nemá členy meta zařízení." - -#, python-format -msgid "" -"Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." -msgstr "" -"Svazek %(volume)s již je namapován. Číslo zařízení je %(deviceNumber)s." - -#, python-format -msgid "Volume %(volumeName)s not in any storage group." -msgstr "Svazek %(volumeName)s není v žádné skupině úložiště." - -#, python-format -msgid "" -"Volume %(volume_id)s: being created as %(create_type)s with specification: " -"%(volume_spec)s" -msgstr "" -"Svazek %(volume_id)s je vytvářen jako %(create_type)s se specifikací: " -"%(volume_spec)s" - -#, python-format -msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" -msgstr "Svazek %(volume_name)s (%(volume_id)s) úspěšně vytvořen" - -#, python-format -msgid "Volume %s converted." -msgstr "Svazek %s převeden." - -#, python-format -msgid "Volume %s created" -msgstr "Svazek %s vytvořen" - -#, python-format -msgid "Volume %s has been transferred." -msgstr "Svazek %s byl přenesen." - -#, python-format -msgid "Volume %s is mapping to multiple hosts." -msgstr "Mapování svazku %s k více hostitelům." - -#, python-format -msgid "Volume %s is not mapped. No volume to unmap." -msgstr "Svazek %s není namapován. Není třeba rušit mapování." - -#, python-format -msgid "Volume %s presented." -msgstr "Svazek %s darován." - -#, python-format -msgid "Volume %s retyped." -msgstr "Svazek %s přetypován." - -#, python-format -msgid "Volume %s unmanaged." -msgstr "Zrušena správa svazku %s." - -#, python-format -msgid "Volume %s: retyped successfully" -msgstr "Svazek %s úspěšně přetypován" - -#, python-format -msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" -msgstr "Svazek již je namapován. Získávání %(ig)s, %(vol)s" - -#, python-format -msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" -msgstr "" -"Kopírování svazku o velikosti %(size_in_m).2f MB rychlostí %(mbps).2f MB/s" - -#, python-format -msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." -msgstr "" -"Kopírování svazku dokončeno (%(size_in_m).2f MB rychlostí %(mbps).2f MB/s)." - -msgid "Volume created successfully." -msgstr "Svazek úspěšně vytvořen." - -msgid "Volume detach called, but volume not attached." -msgstr "Zavoláno odpojení svazku, ale svazek není připojen." - -msgid "Volume info retrieved successfully." -msgstr "Informace o svazku úspěšně získány." - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s" -msgstr "Název svazku změněn z %(tmp)s na %(orig)s" - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s." -msgstr "Název svazku změněn z %(tmp)s na %(orig)s." - -msgid "Volume retrieved successfully." -msgstr "Svazek úspěšně získán." - -#, python-format -msgid "Volume service: %(label)s. Casted to: %(loc)s" -msgstr "Služba svazku: %(label)s. Obsazena do: %(loc)s" - -#, python-format -msgid "Volume status is: %s." -msgstr "Stav svazku je: %s." - -#, python-format -msgid "Volume type is %s." -msgstr "Typ svazku je %s." - -#, python-format -msgid "" -"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " -"id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " -"name: %(domain_name)s." -msgstr "" -"Typ svazku: %(volume_type)s, název zásoby úložiště: %(pool_name)s, id zásoby " -"úložiště: %(pool_id)s, id ochranné domény: %(domain_id)s, název ochranné " -"domény: %(domain_name)s." - -msgid "Volume updated successfully." -msgstr "Svazek úspěšně aktualizován." - -#, python-format -msgid "Volume with given ref %s need not be renamed during manage operation." -msgstr "" -"Svazek zadaným odkazem %s není třeba během operace správy přejmenovávat." - -#, python-format -msgid "" -"Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " -"size: %(backup_size)d, continuing with restore." -msgstr "" -"Svazek: %(vol_id)s, velikost: %(vol_size)d je větší než záloha: " -"%(backup_id)s, velikost: %(backup_size)d, pokračuje se v obnově." - -#, python-format -msgid "WWPN on node %(node)s: %(wwpn)s." -msgstr "WWPN na uzlu %(node)s: %(wwpn)s." - -#, python-format -msgid "" -"Waiting for volume expansion of %(vol)s to complete, current remaining " -"actions are %(action)s. ETA: %(eta)s mins." -msgstr "" -"Čekání na dokončení rozšíření svazku %(vol)s. Činnosti které je třeba ještě " -"provést: %(action)s. Předpokládaný čas dokončení: %(eta)s min." - -msgid "Waiting for web service array communication." -msgstr "Čeká se na komunikaci s polem internetové služby." - -msgid "Waiting for web service to validate the configured password." -msgstr "Čeká se na ověření nastaveného hesla internetovou službou." - -#, python-format -msgid "Will clone a volume from the image volume %(id)s." -msgstr "Svazek bude klonován ze svazku obrazu %(id)s." - -#, python-format -msgid "XtremIO SW version %s" -msgstr "Verze softwaru XtremIO: %s" - -#, python-format -msgid "ZFSSA version: %s" -msgstr "Verze ZFSSA: %s" - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation %s" -msgstr "Zóna existuje v režimu I-T. Vytváření zóny %s přeskočeno" - -#, python-format -msgid "Zone map to add: %s" -msgstr "Mapa zóny pro přidání: %s" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Zásada zónování: %s nebylo rozpoznáno" - -#, python-format -msgid "Zoning policy for Fabric %s" -msgstr "Zásady zónování pro Fabric %s" - -#, python-format -msgid "Zoning policy for fabric %s" -msgstr "Zásady zónování pro Fabric %s" - -#, python-format -msgid "" -"_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"Operace kontroly kopie svazku: Svazek %(vol)s nemá zadanou operaci " -"kopírování virtuálního disku: původní=%(orig)s, nové=%(new)s." - -#, python-format -msgid "_get_tgt_ip_from_portgroup: Get ip: %s." -msgstr "Získání cílové iup adrey ze skupiny portů: Získaná IP adresa: %s." - -#, python-format -msgid "_get_tgt_iqn: iSCSI target iqn is: %s." -msgstr "Získání cílového iqn: Cílové iqn pro iSCSI je: %s." - -#, python-format -msgid "" -"add_host_with_check. create host success. host name: %(name)s, host id: " -"%(id)s" -msgstr "" -"Přidání hostitele s kontrolou proběhlo úspěšně. Název hostitele: %(name)s, " -"id hostitele: %(id)s" - -#, python-format -msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" -msgstr "" -"Přidat hostitele s kontrolou, název hostitele: %(name)s, id hostitele: %(id)s" - -#, python-format -msgid "casted to %s" -msgstr "obsazeno do %s" - -#, python-format -msgid "cgsnapshot %s: created successfully" -msgstr "Snímek skupiny jednotnosti %s: úspěšně vytvořen" - -#, python-format -msgid "cgsnapshot %s: deleted successfully" -msgstr "Snímek skupiny jednotnosti %s: úspěšně smazán" - -#, python-format -msgid "cgsnapshot %s: deleting" -msgstr "Snímek skupiny jednotnosti %s: mazání" - -#, python-format -msgid "" -"create_hostgroup_with_check. Create hostgroup success. hostgroup name: " -"%(name)s, hostgroup id: %(id)s" -msgstr "" -"Vytvoření skupiny hostitele s kontrolou proběhlo úspěšně. Název skupiny " -"hostitele: %(name)s, id skupiny hostitele: %(id)s" - -#, python-format -msgid "" -"create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" -msgstr "" -"Vytvořit skupinu hostitele s kontrolou, název skupiny hostitele: %(name)s, " -"id skupiny hostitele: %(id)s" - -#, python-format -msgid "" -"create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " -"%(tgt_lun_id)s, copy_name: %(copy_name)s." -msgstr "" -"Vytváření svazku ze snímku: ID zdrojového lun: %(src_lun_id)s,ID cílového " -"lun: %(tgt_lun_id)s, název kopie: %(copy_name)s." - -#, python-format -msgid "" -"do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " -"%(lun_id)s." -msgstr "" -"Provést mapování, skupina lun: %(lun_group)s, id zobrazení: %(view_id)s, id " -"lun: %(lun_id)s." - -#, python-format -msgid "igroup %(grp)s found for initiator %(iname)s" -msgstr "Pro zavaděč %(iname)s nalezena skupina zavaděče %(grp)s" - -#, python-format -msgid "initialize_connection success. Return data: %s." -msgstr "Spojení úspěšně zavedeno. Vrácená data: %s" - -#, python-format -msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" -msgstr "Zavedení spojení se svazkem %(volume)s, konektor %(connector)s" - -#, python-format -msgid "initialize_connection, host lun id is: %s." -msgstr "Zavedení spojení, id hostitele lun: %s." - -#, python-format -msgid "" -"initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " -"portgroup_id: %(portgroup_id)s." -msgstr "" -"Zvadenení spojení: IQN iSCSI: %(iscsi_iqn)s, cílová IP adresa: " -"%(target_ip)s, ID skupiny portů: %(portgroup_id)s." - -#, python-format -msgid "initialize_connection, metadata is: %s." -msgstr "Zavedení spojení, popisná data. %s." - -#, python-format -msgid "" -"initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." -msgstr "Zavedení spojení s FC: Zavaděč %(wwpns)s, název svazku: %(volume)s." - -msgid "initiator has no password while using chap,adding it" -msgstr "Zavaděč nemá žádné heslo při používání chap, heslo je přidáno" - -#, python-format -msgid "" -"migrate_volume_completion is cleaning up an error for volume %(vol1)s " -"(temporary volume %(vol2)s" -msgstr "" -"Dokončení přenosu svazku čistí chybu ve svazku %(vol1)s (dočasný svazek " -"%(vol2)s" - -#, python-format -msgid "new cloned volume: %s" -msgstr "Nový klonovaný svazek: %s" - -#, python-format -msgid "open_connection to %(ssn)s at %(ip)s" -msgstr "Připojení otevřeno do %(ssn)s na adrese %(ip)s" - -#, python-format -msgid "setting volume %s to error_restoring (was restoring-backup)." -msgstr "Nastavování svazku %s na error_restoring (stav byl restoring-backup)." - -#, python-format -msgid "snapshot %s doesn't exist" -msgstr "snímek %s neexistuje" - -#, python-format -msgid "source volume for cloning: %s" -msgstr "Zdrojový svazek pro klonování: %s" - -#, python-format -msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." -msgstr "" -"Zastavení snímku: Název snímku: %(snapshot)s, název svazku: %(volume)s." - -#, python-format -msgid "terminate_connection volume: %(volume)s, connector: %(con)s" -msgstr "Ukončení připojení se svazkem: %(volume)s, konektor: %(con)s" - -#, python-format -msgid "terminate_connection, return data is: %s." -msgstr "Ukončení spojení, vrácená data: %s." - -#, python-format -msgid "" -"terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " -"%(lunid)s." -msgstr "" -"Ukončení spojení s fc: Název svazku %(volume)s, wwpns: %(wwns)s, id lun: " -"%(lunid)s." - -#, python-format -msgid "tunevv failed because the volume '%s' has snapshots." -msgstr "tunevv selhalo protože svazek '%s' má snímky." - -#, python-format -msgid "username: %(username)s, verify_cert: %(verify)s." -msgstr "Uživatelské jméno: %(username)s, ověření certifikátu: %(verify)s." - -#, python-format -msgid "vol=%s" -msgstr "svazek=%s" - -#, python-format -msgid "vol_name=%(name)s provider_location=%(loc)s" -msgstr "název svazku %(name)s, umístění poskytovatele %(loc)s" - -#, python-format -msgid "volume %(name)s extended to %(size)d." -msgstr "Svazek %(name)s rozšířen na %(size)d." - -#, python-format -msgid "volume %s doesn't exist" -msgstr "Svazek %s neexistuje" - -#, python-format -msgid "volume %s no longer exists in backend" -msgstr "Svazek %s již v podpůrné vrstvě neexistuje" - -msgid "volume_file does not support fileno() so skipping fsync()" -msgstr "Soubor svazku nepodporuje fileno(), a proto je fsync() přeskočen" diff --git a/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po b/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po deleted file mode 100644 index 019e6c051..000000000 --- a/cinder/locale/cs/LC_MESSAGES/cinder-log-warning.po +++ /dev/null @@ -1,1348 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Zbyněk Schwarz , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev161\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-05 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-12-22 01:37+0000\n" -"Last-Translator: Zbyněk Schwarz \n" -"Language: cs\n" -"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Czech\n" - -#, python-format -msgid "%(path)s is being set with open permissions: %(perm)s" -msgstr "%(path)s je nastavována s volnými oprávněními: %(perm)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s is already mounted" -msgstr "%s již je připojeno" - -#, python-format -msgid "%s not found!" -msgstr "%s nenalezeno!" - -msgid "" -"'hpe3par:cpg' is not supported as an extra spec in a volume type. CPG's are " -"chosen by the cinder scheduler, as a pool, from the cinder.conf entry " -"'hpe3par_cpg', which can be a list of CPGs." -msgstr "" -"'hpe3par:cpg' není podporováno jako dodatečná specifikace v typu svazku. " -"Společné skupiny poskytování jsou voleny plánovačem cinder, jako zásoba, z " -"položky 'hpe3par_cpg' v cinder.conf, který může být seznamem těchto skupin." - -#, python-format -msgid "3PAR vlun for volume %(name)s not found on host %(host)s" -msgstr "3PAR vlun pro svazek %(name)s nenalezen v hostiteli %(host)s" - -msgid "Attempted to delete a space that's not there." -msgstr "Pokus smazat prostor který neexistuje." - -#, python-format -msgid "" -"Attempting a rollback of: %(volumeName)s to original pool " -"%(sourcePoolInstanceName)s." -msgstr "" -"Pokus o zpětné vrácení %(volumeName)s do původní zásoby " -"%(sourcePoolInstanceName)s." - -msgid "Attempting recreate of backing lun..." -msgstr "Pokus o znovuvytvoření záložního lun..." - -#, python-format -msgid "" -"Availability zone '%(s_az)s' not found, falling back to '%(s_fallback_az)s'." -msgstr "" -"Zóna dostupnosti '%(s_az)s' nebyla nalezena, bude použita záložní " -"'%(s_fallback_az)s'." - -#, python-format -msgid "Availability zone '%s' is invalid" -msgstr "Zóna dostupnosti '%s' je neplatná" - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping verify." -msgstr "" -"Zálohovací služba %(service)s nepodporuje ověřování. Záloha s id %(id)s není " -"ověřena. Ověřování přeskočeno." - -#, python-format -msgid "CHAP is enabled, but server secret not configured on server %s" -msgstr "CHAP je povolen, ale soukromý klíč serveru není nastaven na serveru %s" - -#, python-format -msgid "CHAP secret exists for host %s but CHAP is disabled" -msgstr "Soukromý klíč CHAP pro hostitele %s existuje, ale CHAP je zakázán." - -msgid "CHAP secret exists for host but CHAP is disabled." -msgstr "Soukromý klíč CHAP pro hostitele existuje, ale CHAP je zakázán." - -msgid "Can't find lun on the array." -msgstr "Nelze najít lun v poli." - -msgid "Can't find snapshot on the array." -msgstr "Nelze najít snímek v poli." - -msgid "Can't find target iqn from rest." -msgstr "Nelze najít cílové iqn z REST." - -msgid "Cannot determine the hardware type." -msgstr "Nelze zjistit typ hardwaru." - -#, python-format -msgid "Cannot get volume status %(exc)s." -msgstr "Nelze získat stav svazku %(exc)s." - -#, python-format -msgid "" -"Cannot undo volume rename; old name was %(old_name)s and new name is " -"%(new_name)s." -msgstr "" -"Nelze vrátit přejmenování svazku zpět; starý název byl %(old_name)s a nový " -"název je %(new_name)s." - -#, python-format -msgid "Change will make usage less than 0 for the following resources: %s" -msgstr "Změna využití se sníží na méně než 0 pro následující zdroje: %s" - -#, python-format -msgid "" -"Changing backing: %(backing)s name from %(new_name)s to %(old_name)s failed." -msgstr "Změna názvu zálohy %(backing)s z %(new_name)s na %(old_name)s selhala." - -#, python-format -msgid "" -"Clone failed on V3. Cleaning up the target volume. Clone name: %(cloneName)s " -msgstr "" -"Klonování selhalo ve V3. Čištění cílového svazku. Název klona: %(cloneName)s " - -#, python-format -msgid "Could not create target because it already exists for volume: %s" -msgstr "Nelze vytvořit cíl protože již ve svazku existuje: %s" - -#, python-format -msgid "Could not determine root volume name on %s." -msgstr "Nelze zjistit název kořenového svazku v %s." - -#, python-format -msgid "Could not get pool information (%s)!" -msgstr "Nelze získat informace o zásobě (%s)!" - -#, python-format -msgid "Could not get status for %(name)s (%(status)d)." -msgstr "Nelze získat stav %(name)s (%(status)d)." - -#, python-format -msgid "" -"CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" -"%(ret)s." -msgstr "" -"Vytvoření ID hardwaru úložiště selhalo. Zavaděč %(initiator)s, rc=%(rc)d, " -"ret=%(ret)s." - -#, python-format -msgid "DELETE call failed for %s!" -msgstr "Volání smazání selhalo u %s!" - -#, python-format -msgid "Deadlock detected when running '%(func_name)s': Retrying..." -msgstr "" -"Při provádění '%(func_name)s' zjištěno zablokování. Bude proveden nový " -"pokus..." - -#, python-format -msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" -msgstr "" -"ID mazaného snímku nenalezeno. Odstraňování z cinder: %(id)s Výjimka: %(msg)s" - -#, python-format -msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" -msgstr "" -"ID mazanéhosvazku nenalezeno. Odstraňování z cinder: %(id)s Výjimka: %(msg)s" - -#, python-format -msgid "Deleting image in unexpected status: %(image_status)s." -msgstr "Mazání obrazu v neočekávaném stavu: %(image_status)s." - -#, python-format -msgid "Destination %s already exists." -msgstr "Cíl %s již existuje." - -msgid "" -"Destination volume type is different from source volume type for an " -"encrypted volume. Encrypted backup restore has failed." -msgstr "" -"Cílový typ svazku se liší od zdrojového typu zašifrovaného svazku. Obnovení " -"zašifrované zálohy selhalo." - -msgid "Detected snapshot stuck in creating status, setting to ERROR." -msgstr "Objeven snímek zaseknutý ve stavu creating, nastavování na ERROR." - -#, python-format -msgid "Detected volume stuck in %(curr_status)s status, setting to ERROR." -msgstr "" -"Objeven svazek zaseknutý ve stavu %(curr_status)s, nastavování na ERROR." - -msgid "Discover file retries exhausted." -msgstr "Vyčerpány pokusy o zjištění souboru." - -msgid "Driver didn't return connection info from terminate_connection call." -msgstr "Ovladač nevrátil informace o připojení z volání o ukončení připojení." - -msgid "Driver didn't return connection info, can't add zone." -msgstr "Ovladač nevrátil informace o připojení, nelze přidat zónu." - -#, python-format -msgid "" -"Driver path %s is deprecated, update your configuration to the new path." -msgstr "" -"Cesta ovladače %s je zastaralá, aktualizujte svá nastavení na novou cestu." - -#, python-format -msgid "Error encountered translating config_string: %(config_string)s to dict" -msgstr "" -"Při překladu řetězce nastavení se objevila chyba: %(config_string)s do dict" - -#, python-format -msgid "Error finding LUNs for volume %s. Verify volume exists." -msgstr "Chyba při hledání LUN pro svazek %s. Ověřování zda svazek existuje." - -#, python-format -msgid "" -"Error in filtering function '%(function)s' : '%(error)s' :: failing host" -msgstr "" -"Chyba ve funkce filtrování '%(function)s' : '%(error)s' :: hostitel selhává" - -#, python-format -msgid "" -"Error in goodness_function function '%(function)s' : '%(error)s' :: " -"Defaulting to a goodness of 0" -msgstr "" -"Chyba ve funkcí Goodness '%(function)s' : '%(error)s' :: Je použita její " -"minimální hodnota 0" - -#, python-format -msgid "Error mapping LUN. Code :%(code)s, Message: %(message)s" -msgstr "Chyba při mapování LUN. Kód: %(code)s, zpráva: %(message)s" - -#, python-format -msgid "Error occurred while deleting backing: %s." -msgstr "Při mazání zálohy nastala chyba: %s." - -#, python-format -msgid "Error occurred while deleting descriptor: %s." -msgstr "Při mazání popisovače nastala chyba: %s." - -#, python-format -msgid "Error occurred while deleting temporary disk: %s." -msgstr "Při mazání dočasného disku nastala chyba: %s." - -#, python-format -msgid "Error refreshing volume info. Message: %s" -msgstr "Chyba při obnovování informaci o svazku. Zpráva: %s" - -#, python-format -msgid "Error running SSH command: %s" -msgstr "Chyba při provádění příkazu SSH: %s" - -#, python-format -msgid "Error unmapping LUN. Code :%(code)s, Message: %(message)s" -msgstr "Chyba při rušení mapování LUN. Kód: %(code)s, zpráva: %(message)s" - -#, python-format -msgid "Error updating agent-type for volume %s." -msgstr "Chyba při aktualizaci typu agentu ve svazku %s." - -msgid "Error while listing objects, continuing with delete." -msgstr "Chyba při vypisování objektů, pokračuje se ve smazání." - -#, python-format -msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" -msgstr "Výjimka při čištění mezipaměti %(share)s. Zpráva - %(ex)s" - -#, python-format -msgid "Exception during deleting %s" -msgstr "Během mazání %s nastala výjimka" - -#, python-format -msgid "Exception during mounting %s" -msgstr "Při připojování %s došlo k výjimce" - -#, python-format -msgid "Exception during unmounting %s" -msgstr "Při odpojování nastala výjimka %s" - -#, python-format -msgid "Exception moving file %(src)s. Message - %(e)s" -msgstr "Výjimka při přesunování souboru %(src)s. Zpráva - %(e)s" - -#, python-format -msgid "Exception moving file %(src)s. Message: %(e)s" -msgstr "Výjimka při přesunování souboru %(src)s. Zpráva: %(e)s" - -#, python-format -msgid "" -"Exception while creating image %(image_id)s snapshot. Exception: %(exc)s" -msgstr "Při vytváření snímku obrazu %(image_id)s nastala výjimka: %(exc)s" - -#, python-format -msgid "" -"Exception while registering image %(image_id)s in cache. Exception: %(exc)s" -msgstr "Výjimka při registraci obrazu %(image_id)s v mezipaměti: %(exc)s" - -#, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" -msgstr "" -"Rozšíření %(ext_name)s: nelze rozšířit zdroj %(collection)s: Žádný takový " -"zdroj" - -#, python-format -msgid "Extra spec %(old)s is deprecated. Use %(new)s instead." -msgstr "" -"Dodatečná specifikace %(old)s je zastaralá. Místo toho použijte %(new)s." - -#, python-format -msgid "Extra spec %(old)s is obsolete. Use %(new)s instead." -msgstr "Dodatečná specifikace %(old)s je zrušena. Místo toho použijte %(new)s." - -msgid "" -"Extra spec key 'storagetype:pool' is obsoleted since driver version 5.1.0. " -"This key will be ignored." -msgstr "" -"Klíč dodatečné specifikace 'storagetype:pool' je zastaralý od verze ovladače " -"5.1.0. Tento klíč bude ignorován." - -#, python-format -msgid "FAST is enabled. Policy: %(fastPolicyName)s." -msgstr "FAST je povoleno. Zásada: %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed target removal because target or ACL's couldn't be found for iqn: %s." -msgstr "" -"Odstranění cíle selhalo protože cíl nebo ACL nemohly být v iqn nalezeny: %s." - -#, python-format -msgid "" -"Failed terminating the connection of volume %(volume_id)s, but it is " -"acceptable." -msgstr "" -"Ukončení spojení se svazkem %(volume_id)s selhalo, selhání je přijatelné." - -#, python-format -msgid "Failed to activate volume copy throttling: %(err)s" -msgstr "Nelze aktivovat přiškrcení kopie svazku: %(err)s" - -#, python-format -msgid "Failed to add host group: %s" -msgstr "Nelze přidat skupinu hostitele: %s" - -#, python-format -msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" -msgstr "Přidružení specifikace qos %(id)s k typu: %(vol_type_id)s selhalo" - -#, python-format -msgid "Failed to create new image-volume cache entry. Error: %(exception)s" -msgstr "" -"Nelze vytvořit novou položku obrazu-svazku v mezipaměti. Výjimka: " -"%(exception)s" - -#, python-format -msgid "Failed to create pair: %s" -msgstr "Nelze vytvořit pár: %s" - -#, python-format -msgid "" -"Failed to create volume from image-volume cache, will fall back to default " -"behavior. Error: %(exception)s" -msgstr "" -"Nelze vytvořit svazek z mezipaměti obrazu-svazku, bude použito výchozí " -"chování. Chyba: %(exception)s" - -#, python-format -msgid "Failed to destroy Storage Group %s." -msgstr "Nelze zničit skupinu úložiště %s." - -#, python-format -msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" -msgstr "Odloučení specifikace qos %(id)s od typu: %(vol_type_id)s selhalo" - -#, python-format -msgid "Failed to disassociate qos specs %s." -msgstr "Odloučení specifikace qos %s selhalo." - -#, python-format -msgid "Failed to discard zero page: %s" -msgstr "Nelze zahodit nulovou stránku: %s" - -msgid "Failed to get Raid Snapshot ID and did not store in snapshot." -msgstr "Nelze získat ID Raid snímku a ve snímku nebylo uloženo." - -msgid "Failed to get target pool id." -msgstr "Nelze získat id cílové zásoby." - -#, python-format -msgid "Failed to invoke ems. Message : %s" -msgstr "Nelze zavolat ems. Zpráva: %s" - -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" -msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" - -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "Nelze načít rozšíření %(ext_factory)s: %(exc)s" - -#, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" -msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" - -#, python-format -msgid "Failed to manage virtual volume %(disp)s due to error during retype." -msgstr "" -"Nelze spravovat virtuální svazek %(disp)s kvůli chybě během přetypování." - -#, python-format -msgid "" -"Failed to migrate volume. The destination volume %(vol)s is not deleted " -"since the source volume may have been deleted." -msgstr "" -"Nelze přesunout svazek. Cílový svazek %(vol)s nebyl smazán protože zdrojový " -"svazek mohl být smazán." - -#, python-format -msgid "" -"Failed to migrate: %(volumeName)s from default source storage group for FAST " -"policy: %(sourceFastPolicyName)s. Attempting cleanup... " -msgstr "" -"Nelze přesunout: %(volumeName)s z výchozí skupiny zdrojového úložiště pro " -"zásadu FAST %(sourceFastPolicyName)s. Pokus o vyčištění..." - -#, python-format -msgid "Failed to query pool %(id)s status %(ret)d." -msgstr "Dotaz na stav %(ret)d zásoby %(id)s selhal." - -#, python-format -msgid "Failed to refresh mounts, reason=%s" -msgstr "Nelze obnovit připojení, důvod=%s" - -#, python-format -msgid "Failed to restart horcm: %s" -msgstr "Nelze restartovat horcm: %s" - -#, python-format -msgid "Failed to run command: %s." -msgstr "Nelze provést příkaz: %s." - -#, python-format -msgid "Failed to run lsguicapability. Exception: %s." -msgstr "Nelze spustit lsguicapability. Výjimka: %s." - -#, python-format -msgid "" -"Failed to save iscsi LIO configuration when modifying volume id: %(vol_id)s." -msgstr "Nelze uložit nastavení iscsi LIO při měnění id svazku %(vol_id)s." - -#, python-format -msgid "Failed to setup blkio cgroup to throttle the device '%(device)s'." -msgstr "" -"Nelze nastavit kontrolní skupinu vstupu/výstupu bloku pro škrcení zařízení " -"'%(device)s'." - -#, python-format -msgid "Failed to unmount previous mount: %s" -msgstr "Nelze odpojit předchozí připojení: %s" - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target. %(conf)s does not exist." -msgstr "" -"Nelze aktualizovat %(conf)s pro svazek s id %(vol_id)s po odstranění cíle " -"iscsi. %(conf)s neexistuje." - -#, python-format -msgid "Failure deleting job %s." -msgstr "Nelze smazat úkol %s." - -#, python-format -msgid "Failure deleting temp snapshot %s." -msgstr "Mazání dočasného snímku %s selhalo." - -#, python-format -msgid "Failure deleting the snapshot %(snapshot_id)s of volume %(volume_id)s." -msgstr "Mazání snímku %(snapshot_id)s svazku %(volume_id)s selhalo." - -msgid "" -"Fallocate not supported by current version of glusterfs. So falling back to " -"dd." -msgstr "" -"Fallocate není podporováno současnou verzí glusterfs. Přechází se zpět na dd." - -#, python-format -msgid "" -"Flexvisor failed to delete volume %(id)s from group %(vgid)s due to " -"%(status)s." -msgstr "" -"Flexvisor nemohl smazat svazek %(id)s ze skupiny %(vgid)s z důvodu%(status)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s from the group %(vgid)s." -msgstr "Flexvisor nemohl smazat svazek %(id)s ze skupiny %(vgid)s." - -#, python-format -msgid "" -"Found invalid iSCSI IP address(s) in configuration option(s) " -"hpe3par_iscsi_ips or iscsi_ip_address '%s.'" -msgstr "" -"Ve volbách nastavení hpe3par_iscsi_ips nebo iscsi_ip_address nalezeny " -"neplatné IP adresy iSCSI: '%s'." - -msgid "Goodness function not set :: defaulting to minimal goodness rating of 0" -msgstr "Funkce Goodness není nastavena :: je použita její minimální hodnota 0" - -#, python-format -msgid "Got disconnected; trying to reconnect. (%s)" -msgstr "Došlo k odpojení; pokus o znovu připojení. (%s)" - -#, python-format -msgid "" -"Group sync name not found for target group %(target)s on %(storageSystem)s." -msgstr "" -"Název synchronizace skupiny nenalezen v cílové skupině %(target)s na " -"%(storageSystem)s." - -#, python-format -msgid "" -"HPELeftHand API is version %(current)s. A minimum version of %(min)s is " -"needed for manage/unmanage support." -msgstr "" -"Verze API HPELeftHand: %(current)s. Pro podporu vytváření/rušení spravování " -"je vyžadována verze %(min)s nebo vyšší." - -#, python-format -msgid "" -"Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." -msgstr "" -"Nápověda \"%s\" zahozena, protože rozšířené vlastnosti serveru nejsou " -"aktivní v Nova." - -#, python-format -msgid "" -"Hint \"%s\" dropped because Nova did not return enough information. Either " -"Nova policy needs to be changed or a privileged account for Nova should be " -"specified in conf." -msgstr "" -"Nápověda \"%s\" zahozena, protože Nova nevrátila dostatek informací. Buď je " -"třeba změnit zásadu Nova, nebo je třeba v nastavení zadat výsadní účet pro " -"Nova se všemi potřebnými oprávněními." - -msgid "" -"Host exists without CHAP credentials set and has iSCSI attachments but CHAP " -"is enabled. Updating host with new CHAP credentials." -msgstr "" -"Hostitel je bez nastavených přihlašovacích údajů CHAP, má připojení iSCSI, " -"ale CHAP je povolen. Aktualizování hostitele pomocí nových přihlašovacích " -"údajů CHAP." - -msgid "Host has no CHAP key, but CHAP is enabled." -msgstr "Hostiteli nemá žádný klíč CHAP, ale CHAP je povolen." - -#, python-format -msgid "IOnetworks GET failed (%d)" -msgstr "Získání IOnetworks selhalo (%d)" - -msgid "IQN already existed." -msgstr "IQN již existuje." - -msgid "IQN has been used to create map." -msgstr "IQN bylo použito k vytvoření mapy." - -msgid "ISCSI provider_location not stored, using discovery" -msgstr "Umístění poskytovatele ISCSI neuloženo, bude se zjišťovat" - -msgid "Id not in sort_keys; is sort_keys unique?" -msgstr "id není v seřazení klíčů; je seřazení jedinečné?" - -msgid "Image delete encountered an error." -msgstr "Při mazání obrazu nastala chyba." - -#, python-format -msgid "Image-volume cache for host %(host)s does not have enough space (GB)." -msgstr "" -"Mezipaměť obrazu-svazku pro hostitele %(host)s nemá dostatek místa (GB)." - -msgid "Inconsistent Luns." -msgstr "Nekonzistentní Lun." - -#, python-format -msgid "" -"Incorrect value error: %(blocksize)s, it may indicate that " -"'volume_dd_blocksize' was configured incorrectly. Fall back to default." -msgstr "" -"Chyba kvůli nesprávné hodnotě: %(blocksize)s, to může naznačovat, že " -"'volume_dd_blocksize' nebylo nastaveno správně. Bude použita výchozí hodnota." - -#, python-format -msgid "" -"Insufficient free space for thin provisioning. The ratio of provisioned " -"capacity over total capacity %(provisioned_ratio).2f has exceeded the " -"maximum over subscription ratio %(oversub_ratio).2f on host %(host)s." -msgstr "" -"Pro mělké poskytování není dostatek volného místa. Poměr poskytované " -"kapacity na celkovou %(provisioned_ratio).2f překročil maximální poměr " -"odběru %(oversub_ratio).2f na hostiteli %(host)s." - -#, python-format -msgid "" -"Insufficient free space for volume creation on host %(host)s (requested / " -"avail): %(requested)s/%(available)s" -msgstr "" -"Není dostatek volného místa pro vytvoření svazku na hostiteli %(host)s. " -"(Požadováno: %(requested)s/Dostupné %(available)s)" - -#, python-format -msgid "" -"Insufficient free space for volume creation. Total capacity is %(total).2f " -"on host %(host)s." -msgstr "" -"Není dostatek volného místa pro vytvoření svazku. Celková kapacita je " -"%(total).2f na hostiteli %(host)s." - -#, python-format -msgid "Invalid IP address format '%s'" -msgstr "Neplatný formát IP adresy '%s'" - -#, python-format -msgid "" -"Invalid goodness result. Result must be between 0 and 100. Result " -"generated: '%s' :: Defaulting to a goodness of 0" -msgstr "" -"Neplatný výsledek funkce goodness. Výsledek musí být mezi 0 až 100. " -"Vypočtený výsledek '%s' :: Je použita její minimální hodnota 0" - -#, python-format -msgid "Invalid trace flag: %s" -msgstr "Neplatný příznak sledování: %s" - -msgid "" -"It is not the recommended way to use drivers by NetApp. Please use " -"NetAppDriver to achieve the functionality." -msgstr "" -"Toto není doporučený způsob používání ovladačů od NetApp. Pro dosažení této " -"funkce prosím použijte NetAppDriver." - -#, python-format -msgid "" -"LUN misalignment may occur for current initiator group %(ig_nm)s) with host " -"OS type %(ig_os)s. Please configure initiator group manually according to " -"the type of the host OS." -msgstr "" -"U současné skupině zavaděče %(ig_nm)s s hostitelem mající typ OS %(ig_os)s " -"se může objevit nevyrovnanost LUN. Prosím nastavte skupinu zavaděče ručně " -"podle typu OS na hostiteli." - -msgid "Least busy iSCSI port not found, using first iSCSI port in list." -msgstr "" -"Nejméně zaneprázdněný port iSCSI nenalezen, použit první port v seznamu." - -#, python-format -msgid "" -"Lun is not in lungroup. Lun id: %(lun_id)s, lungroup id: %(lungroup_id)s" -msgstr "" -"Lun není ve skupine lun. ID LUN %(lun_id)s, ID skupiny LUN: %(lungroup_id)s." - -#, python-format -msgid "Message - %s." -msgstr "Zpráva - %s." - -#, python-format -msgid "More than one valid preset was detected, using %s" -msgstr "Byly zjištěny více než jedny platné předvolby, jsou použity %s" - -#, python-format -msgid "Mount point %(name)s already exists. Message: %(msg)s" -msgstr "Bod připojení %(name)s již existuje. Zpráva %(msg)s" - -msgid "No VLUN contained CHAP credentials. Generating new CHAP key." -msgstr "" -"Žádný VLUN neobsahoval přihlašovací údaje CHAP. Vytváření nového klíče CHAP." - -#, python-format -msgid "No backing file found for %s, allowing snapshot to be deleted." -msgstr "Pro %s nenalezen žádný zálohovací soubor, smazání snímku povoleno." - -#, python-format -msgid "No entry in LUN table for volume/snapshot %(name)s." -msgstr "Žádná položka v tablce LUN pro svazek/snímek %(name)s." - -msgid "No host or VLUNs exist. Generating new CHAP key." -msgstr "Žádný VLUN nebo hostitel neexistuje. Vytváření nového klíče CHAP." - -msgid "No mapping." -msgstr "Žádné mapování." - -#, python-format -msgid "No port group found in masking view %(mv)s." -msgstr "V maskování %(mv)s nenalezena žádná skupiny portů." - -msgid "No protection domain name or id was specified in configuration." -msgstr "V nastavení nebylo zadán žádný název nebo id ochranné domény." - -#, python-format -msgid "No status payload for volume %s." -msgstr "Žádný obsah stavu svazku %s." - -#, python-format -msgid "" -"No storage group found. Performing rollback on Volume: %(volumeName)s To " -"return it to the default storage group for FAST policy %(fastPolicyName)s." -msgstr "" -"Nenalezena žádná skupina úložiště. Provádění zpětného vrácení svazku " -"%(volumeName)s pro návrat do výchozí skupiny úložiště pro zásadu FAST " -"%(fastPolicyName)s." - -#, python-format -msgid "No storage pool found with available capacity %s." -msgstr "Nenalezena žádná zásoba úložiště s dostupnou vlastností %s." - -msgid "No storage pool name or id was found." -msgstr "Nebyla nalezen žádný název zásoby úložiště nebo jeho id." - -msgid "No such host alias name." -msgstr "Žádný takový název zkratky hostitele." - -#, python-format -msgid "No target ports found in masking view %(maskingView)s." -msgstr "V maskování %(maskingView)s nenalezeny žádné cílové porty." - -msgid "No volume node in XML content." -msgstr "V obsahu XML není žádný uzel svazku." - -#, python-format -msgid "No weighed hosts found for volume with properties: %s" -msgstr "Nenalezeni žádní vážení hostitelé pro svazek s vlastnostmi %s" - -msgid "Non-iSCSI VLUN detected." -msgstr "Zjištěn VLUN mimo iSCSI." - -#, python-format -msgid "Not deleting key %s" -msgstr "Klíč %s nebude smazán" - -#, python-format -msgid "Persistence file already exists for volume, found file at: %s" -msgstr "Soubor přetrvávání svazku již existuje, nalezen v %s" - -#, python-format -msgid "" -"Pre check for deletion. Volume: %(volumeName)s is part of a storage group. " -"Attempting removal from %(storageGroupInstanceNames)s." -msgstr "" -"Předkontrola mazání. Svazek %(volumeName)s je součástí skupiny úložiště. " -"Pokus o odstranění z %(storageGroupInstanceNames)s." - -#, python-format -msgid "" -"Production use of \"%(backend)s\" backend requires the Cinder controller to " -"have multipathing properly set up and the configuration option \"%(mpflag)s" -"\" to be set to \"True\"." -msgstr "" -"Použití podpůrné vrstvy \"%(backend)s\" v ostrém provozu vyžaduje, aby " -"kontrolér Cinder měl správně nastaveny vícenásobné cesty a volba nastavení " -"\"%(mpflag)s\" nastavenu na \"True\"." - -#, python-format -msgid "Property %s already exists." -msgstr "Vlastnost %s již existuje." - -#, python-format -msgid "" -"RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " -"backup metadata." -msgstr "" -"Obraz RBD zálohy %(backup)s svazku %(volume)s nenalezen. Mazání popisných " -"dat zálohy." - -#, python-format -msgid "Reconnect attempt %(attempt)s failed. Next try in %(backoff).2fs." -msgstr "" -"Pokus o znovu připojení %(attempt)s selhal. Další pokus za %(backoff).2fs." - -#, python-format -msgid "Registration of image volume URI %(uri)s to image %(image_id)s failed." -msgstr "Registrace URI svazku obrazu %(uri)s k obrazu %(image_id)s selhala." - -#, python-format -msgid "" -"Remaining capacity %(remainingCapacityGb)s GBs is determined from SRP pool " -"capacity and not the SLO capacity. Performance may not be what you expect." -msgstr "" -"Zbývající místo %(remainingCapacityGb)s GB je odvozeno od schopnosti zásoby " -"SRP a ne SLO. Výkon nemusí být takový, jaký jste očekávali." - -#, python-format -msgid "Rename failure in cleanup of cDOT QOS policy group %(name)s: %(ex)s" -msgstr "" -"Selhání přejmenování při čistění skupiny zásady QOS cDOT %(name)s: %(ex)s" - -#, python-format -msgid "" -"Report interval must be less than service down time. Current config " -"service_down_time: %(service_down_time)s, report_interval for this: service " -"is: %(report_interval)s. Setting global service_down_time to: " -"%(new_down_time)s" -msgstr "" -"Interval hlášení musí být menší než doba nečinnosti. Současné nastavení doby " -"nečinnosti služby: %(service_down_time)s, interval hlášení pro tuto službu " -"je %(report_interval)s. Nastavování doby nečinnosti globální služby na: " -"%(new_down_time)s" - -msgid "Requested image is not accessible by current Tenant." -msgstr "Požadovaný obraz není dostupný současnému nájemníku." - -msgid "Returning as clean tmp vol job already running." -msgstr "Probíhá vrácení protože úkol čištění dočasného svazku již probíhá." - -#, python-format -msgid "" -"ScaleIO only supports volumes with a granularity of 8 GBs. The new volume " -"size is: %d." -msgstr "" -"ScaleIO podporuje pouze svazky s rozdělením po 8GB. Nová velikost svazku je: " -"%d." - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"Sdílení %s ignorováno kvůli neplatnému formátu. Musí být ve formátu adresa:/" -"export." - -msgid "Silent failure of target removal detected, retry...." -msgstr "Zjištěno tiché selhání odstranění cíle, bude provede nový pokus..." - -#, python-format -msgid "Snapshot \"%s\" not found." -msgstr "Snímek \"%s\" nenalezen." - -#, python-format -msgid "Snapshot %(name)s already exists. Message: %(msg)s" -msgstr "Snímek %(name)s již existuje. Zpráva: %(msg)s" - -#, python-format -msgid "Snapshot %s already deleted." -msgstr "Snímek %s již je smazán." - -#, python-format -msgid "Snapshot still %(status)s Cannot delete snapshot." -msgstr "Snímek je stále ve stavu %(status)s. Nelze ho smazat." - -#, python-format -msgid "Storage group %(name)s already exists. Message: %(msg)s" -msgstr "Skupina úložiště %(name)s již existuje. Zpráva: %(msg)s" - -#, python-format -msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." -msgstr "" -"Název synchronizace úložiště nenalezen v cíli %(target)s na " -"%(storageSystem)s." - -msgid "Storage-assisted migration failed during retype." -msgstr "Přesun za pomocí úložiště selhal během přetypování." - -#, python-format -msgid "" -"The \"netapp_size_multiplier\" configuration option is deprecated and will " -"be removed in the Mitaka release. Please set \"reserved_percentage = %d\" " -"instead." -msgstr "" -"Volba nastavení \"netapp_size_multiplier\" je zastaralá a bude odstraněna " -"ve verzi Mitaka. Místo toho prosím nastavte \"reserved_percentage = %d\"." - -msgid "The 'hplh' prefix is deprecated. Use 'hpelh' instead." -msgstr "Předpona 'hplh' je zastaralá. Místo toho použijte 'hpelh'." - -msgid "The MCS Channel is grouped." -msgstr "Kanál MCS je seskupen." - -msgid "" -"The NAS file operations will be run as root, allowing root level access at " -"the storage backend." -msgstr "" -"Operace se souborem NAS budou spouštěny pod účtem root, umožňující přístup k " -"podpůrné vrstvě úložiště na úrovni správce." - -#, python-format -msgid "" -"The NAS file operations will be run as root: allowing root level access at " -"the storage backend. This is considered an insecure NAS environment. Please " -"see %s for information on a secure NAS configuration." -msgstr "" -"Operace se souborem NAS budou spouštěny pod účtem root: umožňující přístup k " -"podpůrné vrstvě úložiště na úrovni správce. Toto je považováno za nebezpečné " -"prostředí NAS. Pro další informace o bezpečném nastavení NFS si přečtěte %s." - -msgid "" -"The NAS file permissions mode will be 666 (allowing other/world read & write " -"access)." -msgstr "" -"Režim oprávnění souboru NAS bude 666 (umožňující ostatním/světu přístup ke " -"čtení a zápisu)." - -#, python-format -msgid "" -"The NAS file permissions mode will be 666 (allowing other/world read & write " -"access). This is considered an insecure NAS environment. Please see %s for " -"information on a secure NFS configuration." -msgstr "" -"Režim oprávnění souboru NAS bude 666 (umožňující ostatním/světu přístup ke " -"čtení a zápisu). Toto je považováno za nebezpečné prostředí NAS. Pro další " -"informace o bezpečném nastavení NFS si přečtěte %s." - -msgid "" -"The VMAX plugin only supports Retype. If a pool based migration is necessary " -"this will happen on a Retype From the command line: cinder --os-volume-api-" -"version 2 retype --migration-policy on-demand" -msgstr "" -"Zásuvný modul VMAX podporuje pouze přetypování. Pokud je přesun pomocí " -"zásoby nutný, bude proveden při přetypování z příkazové řádky: cinder --os-" -"volume-api-version 2 retype --migration-policy on-" -"demand" - -#, python-format -msgid "" -"The colon in vendor name was replaced by underscore. Updated vendor name is " -"%(name)s\"." -msgstr "" -"Pomlčka v názvu prodejce byla nahrazena podtržítkem. Aktualizovaný název " -"prodejce je %(name)s\"." - -#, python-format -msgid "The device %s won't be cleared." -msgstr "Zařízení %s nebude vyčištěno." - -msgid "" -"The option 'netapp_storage_pools' is deprecated and will be removed in the " -"future releases. Please use the option 'netapp_pool_name_search_pattern' " -"instead." -msgstr "" -"Volba 'netapp_storage_pools' je zastaralá a bude odstraněna v budoucích " -"verzích. Místo toho prosím použijte 'netapp_pool_name_search_pattern'." - -msgid "" -"The option 'netapp_volume_list' is deprecated and will be removed in the " -"future releases. Please use the option 'netapp_pool_name_search_pattern' " -"instead." -msgstr "" -"Volba 'netapp_volume_list' je zastaralá a bude odstraněna v budoucích " -"verzích. Místo toho prosím použijte 'netapp_pool_name_search_pattern'." - -msgid "" -"The primary array is not reachable at this time. Since replication is " -"enabled, listing replication targets and failing over a volume can still be " -"performed." -msgstr "" -"Hlavní pole není v současnosti dostupné. Protože je replikace povolená může " -"být stále proveden výpis cílů replikace a zavedení záložního systému." - -#, python-format -msgid "The provisioning: %(provisioning)s is not valid." -msgstr "Poskytování %(provisioning)s není platné." - -#, python-format -msgid "" -"The volume: %(volumename)s was not first part of the default storage group " -"for FAST policy %(fastPolicyName)s." -msgstr "" -"Svazek %(volumename)s nebyl první částí výchozí skupiny úložiště pro zásadu " -"FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"The volume: %(volumename)s. was not first part of the default storage group " -"for FAST policy %(fastPolicyName)s." -msgstr "" -"Svazek %(volumename)s nebyl první částí výchozí skupiny úložiště pro zásadu " -"FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"There are no datastores matching new requirements; can't retype volume: %s." -msgstr "" -"Neexistují žádná datová úložiště odpovídající novým požadavkům; nelze " -"přetypovat svazek %s." - -#, python-format -msgid "Trying to boot from an empty volume: %s." -msgstr "Pokus o zavedení z prázdného svazku: %s." - -#, python-format -msgid "" -"Unable to clone image_volume for image %(image_id)s will not create cache " -"entry." -msgstr "" -"Nelze klonovat svazek obrazu %(image_id)s, položka v mezipaměti nebude " -"vytvořena." - -#, python-format -msgid "Unable to create folder %s" -msgstr "Nelze vytvořit složku %s" - -#, python-format -msgid "Unable to create snapshot %s" -msgstr "Nelze vytvořit snímek %s" - -#, python-format -msgid "Unable to delete Protection Group Snapshot: %s" -msgstr "Nelze smazat snímek ochranné skupiny: %s" - -#, python-format -msgid "Unable to delete Protection Group: %s" -msgstr "Nelze smazat ochrannou skupinu: %s" - -#, python-format -msgid "Unable to delete space %(space)s" -msgstr "Nelze smazat prostor %(space)s" - -#, python-format -msgid "" -"Unable to ensure space for image-volume in cache. Will skip creating entry " -"for image %(image)s on host %(host)s." -msgstr "" -"Nelze zajistit prostor v mezipaměti pro obraz-svazek. Bude přeskočeno " -"vytváření záznamu obrazu %(image)s na hostiteli %(host)s." - -#, python-format -msgid "" -"Unable to extend volume: %(vol)s to size: %(size)s on current datastore due " -"to insufficient space." -msgstr "" -"Nelze rozšířit svazek: %(vol)s na velikost %(size)s GB na současném datovém " -"úložišti z důvodu nedostatku místa." - -#, python-format -msgid "Unable to find Masking view: %(view)s." -msgstr "Nelze najít maskování: %(view)s." - -#, python-format -msgid "Unable to find snapshot %s" -msgstr "Nelze najít snímek %s" - -msgid "" -"Unable to get internal tenant context: Missing required config parameters." -msgstr "" -"Nelze získat vnitřní kontext nájemníka: Chybí požadované parametry nastavení." - -msgid "Unable to get rados pool stats." -msgstr "Nelze získat statistiky zásoby rados." - -msgid "Unable to get storage tiers from tier policy rule." -msgstr "Nelze získat vrstvy úložiště z pravidla zásady vrstvy." - -#, python-format -msgid "Unable to locate volume:%s" -msgstr "Nelze nalézt svazek: %s" - -msgid "Unable to poll cluster free space." -msgstr "Nelze se dotázat na prostor bez clusteru." - -#, python-format -msgid "Unable to rename %(old_name)s, error message: %(error)s" -msgstr "Nelze přejmenovat %(old_name)s, chybová zpráva: %(error)s" - -#, python-format -msgid "Unable to update host type for host with label %(l)s. %(e)s" -msgstr "Nelze aktualizovat typ hostitele se jmenovkou %(l)s. %(e)s" - -#, python-format -msgid "Unable to update stats on non-initialized Volume Group: %s" -msgstr "Nelze aktualizovat statistiky v nezavedené skupině svazku: %s" - -#, python-format -msgid "Unexpected exception during image cloning in share %s" -msgstr "Neočekávaná výjimka při klonování obrazu ve sdílení %s" - -msgid "Unexpected exception while listing used share." -msgstr "Neočekávaná výjimka při vypisování použitých sdílení." - -msgid "Unexpected exception while short listing used share." -msgstr "" -"Při zkráceném vypisování použitého sdílení došlo k neočekávané výjimce." - -#, python-format -msgid "" -"Unknown error occurred while checking mount point: %s Trying to continue." -msgstr "" -"Při kontrole bodu připojení nastala neznámá chyba: %s. Probíhá pokus o " -"pokračování." - -#, python-format -msgid "Update driver status failed: %(config_group)s is uninitialized." -msgstr "Aktualizace stavu ovladače selhala: %(config_group)s není zavedeno." - -msgid "Use of empty request context is deprecated" -msgstr "Použití kontextu prázdné žádosti je zastaralé" - -#, python-format -msgid "" -"Vendor unique property \"%(property)s\" must start with vendor prefix with " -"colon \"%(prefix)s\". The property was not registered on capabilities list." -msgstr "" -"Vlastnost jedinečná prodejci \"%(property)s\" musí začínat předponou " -"prodejce \"%(prefix)s\" s pomlčkou. Vlastnost nebyla registrována v seznamu " -"schopností." - -msgid "Verify certificate is not set, using default of False." -msgstr "Ověření certifikátu nenastaveno, použito výchozí nastavení: False." - -#, python-format -msgid "Virtual Volume Set '%s' doesn't exist on array." -msgstr "Sada virtuálního svazku '%s' neexistuje v poli." - -#, python-format -msgid "Volume \"%s\" not found." -msgstr "Svazek \"%s\" nenalezen." - -#, python-format -msgid "Volume %(name)s already presented (%(status)d)!" -msgstr "Svazek %(name)s již je přítomen na (%(status)d)!" - -#, python-format -msgid "Volume %(volume)s is not in any masking view." -msgstr "Svazek %(volume)s není v žádném maskování." - -#, python-format -msgid "" -"Volume %(volumeName)s was not first part of the default storage group for " -"the FAST Policy." -msgstr "" -"Svazek %(volumeName)s nebyl první částí výchozí skupiny úložiště pro zásadu " -"FAST." - -#, python-format -msgid "Volume %(volume_id)s already deleted." -msgstr "Svazek %(volume_id)s již byl smazán." - -#, python-format -msgid "Volume %(volume_id)s cannot be retyped because it has snapshot." -msgstr "Svazek %(volume_id)s nemůže být přetypován protože má snímek." - -#, python-format -msgid "Volume %(volume_id)s cannot be retyped during attachment." -msgstr "Svazek %(volume_id)s nemůže být během připojování přetypován." - -#, python-format -msgid "Volume %s already deleted." -msgstr "Svazek %s již je smazán." - -#, python-format -msgid "Volume %s does not exist." -msgstr "Svazek %s neexistuje." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping" -msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen" - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen." - -#, python-format -msgid "Volume %s is not found!, it may have been deleted." -msgstr "Svazek %s nenalezen! Mohl být smazán." - -#, python-format -msgid "Volume %s was not found while trying to delete it." -msgstr "Svazek %s nebyla nalezen při pokusu o jeho smazání." - -#, python-format -msgid "" -"Volume : %(volumeName)s is not currently belonging to any storage group." -msgstr "Svazek: %(volumeName)s v současnosti nepatří k žádné skupině úložiště." - -#, python-format -msgid "Volume copy job for src vol %s not found." -msgstr "Úkol kopírování svazku pro zdrojový svazek %s nenalezen." - -#, python-format -msgid "Volume deletion failed with message: %s" -msgstr "Smazání svazku selhalo se zprávou: %s" - -#, python-format -msgid "Volume is not writable. Please broaden the file permissions. Mount: %s" -msgstr "" -"Nelze zapisovat na svazek. Prosím rozšiřte oprávnění souboru. Připojení: %s" - -#, python-format -msgid "Volume not found. %s" -msgstr "Svazek nenalezen. %s" - -#, python-format -msgid "Volume path %s does not exist, nothing to remove." -msgstr "Cesta svazku %s neexistuje, není nic k odstranění." - -msgid "Volume refresh job already running. Returning..." -msgstr "Úkol obnovy svazku již běží. Vrácení..." - -#, python-format -msgid "Volume still %(status)s Cannot delete volume." -msgstr "Svazek je stále ve stavu %(status)s. Nelze ho smazat." - -msgid "Volume type will be changed to be the same as the source volume." -msgstr "Typ svazku bude změněn aby odpovídal typu zdrojového svazku" - -#, python-format -msgid "" -"Volume: %(volumeName)s Does not belong to storage group %(defaultSgName)s." -msgstr "Svazek: %(volumeName)s nepatří do skupiny úložiště %(defaultSgName)s." - -#, python-format -msgid "Volume: %(volumeName)s is not currently belonging to any storage group." -msgstr "Svazek %(volumeName)s v současnosti nepatří k žádné skupině úložiště." - -#, python-format -msgid "Volume: %s is in use, can't retype." -msgstr "Svazek %s se používá, nelze přetypovat." - -#, python-format -msgid "_get_vdisk_map_properties: Did not find a preferred node for vdisk %s." -msgstr "" -"Získání vlastnosti mapy virtuálního disku: Nenalezen upřednostňovaný uzel " -"pro virtuální disk %s." - -#, python-format -msgid "_migrate_cleanup on : %(volumeName)s." -msgstr "Vyčištění přesunu pro %(volumeName)s." - -#, python-format -msgid "_migrate_rollback on : %(volumeName)s." -msgstr "Zpětné vrácení přesunu pro %(volumeName)s." - -msgid "_remove_device: invalid properties or device." -msgstr "Odstranění zařízení: Neplatné vlastnosti nebo zařízení." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: Multiple mappings of volume %(vdisk_name)s found, no " -"host specified." -msgstr "" -"Odmapování virtuálního disku od hostitele: Nalezeno vícero mapování svazku " -"%(vdisk_name)s, nezadán žádný hostitel." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to any host found." -msgstr "" -"Odmapování virtuálního disku od hostitele: Nenalezeno žádné mapování svazku " -"%(vol_name)s k žádnému hostiteli." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() není podporován touto verzí librbd" - -#, python-format -msgid "" -"iSCSI IP: '%s' was not found in hpe3par_iscsi_ips list defined in cinder." -"conf." -msgstr "" -"iSCSI IP: '%s' nebylo nalezeno v seznamu hpe3par_iscsi_ips zadaném v cinder." -"conf." - -#, python-format -msgid "initialize_connection: Did not find a preferred node for volume %s." -msgstr "Zavedení spojení: Nenalezen upřednostňovaný uzel pro svazek %s." - -#, python-format -msgid "ldev(%(ldev)d) is already mapped (hlun: %(hlu)d)" -msgstr "ldev(%(ldev)d) již je mapováno (hlun: %(hlu)d)" - -#, python-format -msgid "object %(key)s of type %(typ)s not found, %(err_msg)s" -msgstr "Objekt %(key)s typu %(typ)s nenalezen, %(err_msg)s" - -msgid "qemu-img is not installed." -msgstr "qemu-img není nainstalováno." - -#, python-format -msgid "snapshot: %s not found, skipping delete operation" -msgstr "snímek: %s nenalezeno, operace mazání je přeskočena" - -#, python-format -msgid "snapshot: %s not found, skipping delete operations" -msgstr "snímek: %s nenalezeno, operace mazání jsou přeskočeny" - -#, python-format -msgid "" -"srstatld requires WSAPI version '%(srstatld_version)s' version '%(version)s' " -"is installed." -msgstr "" -"srstatld vyžaduje verzi WSAPI '%(srstatld_version)s'. Naisntalovaná verze je " -"'%(version)s'." - -msgid "terminate_connection: lun map not found" -msgstr "Ukončení připojení: Mapa lun nenalezena" - -#, python-format -msgid "" -"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no host " -"specified." -msgstr "" -"Odmapování svazku od hostitele: Nalezeno vícero mapování svazku " -"%(vol_name)s, nezadán žádný hostitel." - -#, python-format -msgid "" -"unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." -msgstr "" -"Odmapování svazku od hostitele: Nenalezeno žádné mapování svazku " -"%(vol_name)s k žádnému hostiteli." - -#, python-format -msgid "" -"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host)s " -"found." -msgstr "" -"Odmapování svazku od hostitele: Nenalezeno žádné mapování svazku " -"%(vol_name)s k hostiteli %(host)s." - -#, python-format -msgid "volume service is down. (host: %s)" -msgstr "Služba svazku je mimo provoz. (Hostitel: %s)" - -msgid "volume_tmp_dir is now deprecated, please use image_conversion_dir." -msgstr "" -"volume_tmp_dir je nyní zastaralé, prosím použijte image_conversion_dir." - -#, python-format -msgid "warning: Tried to delete vdisk %s but it does not exist." -msgstr "Varování: Pokus o smazání virtuálního disku %s, který ale neexistuje." - -#, python-format -msgid "" -"zfssa_initiator: %(ini)s wont be used on zfssa_initiator_group= %(inigrp)s." -msgstr "" -"Zavaděč zfssa: %(ini)s nebude použit ve skupině zavaděče zfssa= %(inigrp)s." - -msgid "" -"zfssa_initiator_config not found. Using deprecated configuration options." -msgstr "zfssa_initiator_config nenalezeno. Použity zastaralé volby nastavení." diff --git a/cinder/locale/cs/LC_MESSAGES/cinder.po b/cinder/locale/cs/LC_MESSAGES/cinder.po deleted file mode 100644 index a2a87e9a0..000000000 --- a/cinder/locale/cs/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,7820 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Zbyněk Schwarz , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-02-22 01:35+0000\n" -"Last-Translator: Zbyněk Schwarz \n" -"Language: cs\n" -"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Czech\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder verze: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "ale velikost je nyní %d" - -#, python-format -msgid " but size is now %d." -msgstr "ale velikost je nyní %d." - -msgid " or " -msgstr "nebo" - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s nelze spravovat existující svazek připojený k hostitelům. Před " -"importem ho prosím odpojte od existujících hostitelů." - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"výsledek: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: Oprávnění zamítnuto." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: selhalo s nečekaným výstupem příkazového řádku.\n" -"Příkaz: %(cmd)s\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Kód stavu: %(_status)s\n" -"Tělo: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, alternativní název subjektu: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: vytváření síťového portálu: ujistěte se, že port %(port)d na " -"ip adrese %(ip)s nepoužívá jiná služba." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: záloha %(bck_id)s svazku %(vol_id)s selhala. Objekt zálohy má " -"neočekávaný režim. Podporovány jsou zálohy obrazu nebo souborů, současný " -"režim je %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"Služba %(service)s není ve stavu %(status)s v zařízení úložiště: %(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s musí být <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s musí být >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"Hodnota %(worker_name)s ve %(workers)d je neplatná, musí být větší než 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"Nelze získat přístup k %s. Ověřte, zda GPFS je aktivní a že systém souborů " -"je připojen." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"nelze změnit velikost %s pomocí operace klonování, protože neobsahuje žádné " -"bloky." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"nelze změnit velikost %s pomocí operace klonování, protože je umístěn na " -"komprimovaném svazku" - -#, python-format -msgid "%s configuration option is not set." -msgstr "Konfigurační volba %s není nastavena." - -#, python-format -msgid "%s is not a directory." -msgstr "%s není adresář." - -#, python-format -msgid "%s is not installed" -msgstr "%s není nainstlaováno" - -#, python-format -msgid "%s is not installed." -msgstr "%s není nainstalováno." - -#, python-format -msgid "%s is not set" -msgstr "%s není nastaveno" - -#, python-format -msgid "%s is not set." -msgstr "%s není nastaveno." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s musí být platný obraz raw nebo qcow2." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s musí být absolutní cesta." - -#, python-format -msgid "%s must be an integer." -msgstr "%s musí být celé číslo." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s není nastaveno v cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s není nastaveno." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' v souboru nastavení je neplatný pro protokol připojení k " -"flashsystem. Platné hodnoty jsou %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "při zápisu informací o snímku musí být přítomno 'active'." - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' musí být zadáno" - -msgid "'qemu-img info' parsing failed." -msgstr "zpracování 'qemu-img info' selhalo." - -msgid "'status' must be specified." -msgstr "'status' musí být zadáno." - -msgid "'volume_id' must be specified" -msgstr "'volume_id' svazku musí být zadáno." - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Příkaz: %(cmd)s) (Návratový kód: %(exit_code)s) (Standardní výstup: " -"%(stdout)s) (Chybový výstup: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr " LUN (HLUN) nebyl nalezen. (Logické zařízení: %(ldev)s)" - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Volný LUN (HLUN) nebyl nalezen. Přidejte jinou skupinu hostitele. (Logické " -"zařízení: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Skupina hostitele nemohla být smazána. (Port: %(port)s název: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Skupina hostitele nemohla být smazána. (Port: %(port)s, gid: %(gid)s, název: " -"%(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Skupina hostitele je neplatná. (Skupina hostitele: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "" -"Pár nemůže být smazán. (primární svazek: %(pvol)s, sekundární svazek: " -"%(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Pár nemohl být vytvořen. Překročen maximální počet párů. (Metoda kopírování: " -"%(copy_method)s, primární svazek: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Parametr je neplatný. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Hodnota parametru je neplatná. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Zásoba nemohla být nalezena. (Id zásoby: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Stav snímku je neplatný. (Stav: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "Pro zavedení záložního systému MUSÍ být zadán platný druhotní cíl." - -msgid "A volume ID or share was not specified." -msgstr "Nezadáno ID svazku nebo sdílení." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Stav svazku je neplatný. (Stav: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s selhalo s chybovým řetězcem %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"Řetězec verze API %(version)s je v neplatném formátu- Musí být ve formátu " -"Hlavní číslo verze a Vedlejší číslo verze" - -msgid "API key is missing for CloudByte driver." -msgstr "Ovladači CloudByte chybí klíč API." - -#, python-format -msgid "API response: %s" -msgstr "Odpověď API: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "API s verzí %(version)s není v této metodě podporován." - -msgid "API version could not be determined." -msgstr "Nelze zjistit verzi API." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Chystáte se smazat podřazené projekty mající nenulovou kvótu. Toto by nemělo " -"být prováděno" - -msgid "Access list not available for public volume types." -msgstr "Seznam přístupu není dostupný pro veřejné typy svazku." - -msgid "Activate or deactivate QoS error." -msgstr "Chyba při aktivaci nebo deaktivaci QoS." - -msgid "Activate snapshot error." -msgstr "Chyba při aktivaci snímku." - -msgid "Add FC port to host error." -msgstr "Chyba při přidávání FC portu k hostiteli." - -msgid "Add fc initiator to array error." -msgstr "Chyba při přidávání zavaděče fc do pole." - -msgid "Add initiator to array error." -msgstr "Chyba při přidávání zavaděče do pole." - -msgid "Add lun to cache error." -msgstr "Chyba při přidávání lun do mezipaměti." - -msgid "Add lun to partition error." -msgstr "Chyba při přidávání lun do oddílu." - -msgid "Add mapping view error." -msgstr "Chyba při přidávání zobrazení mapování." - -msgid "Add new host error." -msgstr "Chyba při přidávání nového hostitele." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Žádné ze zadaných zásob úložiště, které mají být spravovány, neexistují. " -"Prosím zkontrolujte své nastavení. Neexistující zásoby: %s" - -msgid "An error has occurred during backup operation" -msgstr "Během operace zálohování nastala chyba" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Při operaci kopírování LUN došlo k chybě. Název operace: %(luncopyname)s. " -"Status: %(luncopystatus)s. Stav: %(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Při čtení svazku \"%s\" nastal problém." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Při zápisu do svazku \"%s\" nastal problém." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "CHAP uživatel iSCSI nemohl být přidán. (Jméno uživatele: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "CHAP uživatel iSCSI nemohl být smazán. (uživatelské jméno: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Cíl iSCSI nemohl být přidán. (Port: %(port)s, přezdívka: %(alias)s, důvod: " -"%(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Cíl iSCSI nemohl být smazán. (Port: %(port)s, tno: %(tno)s, přezdívka: " -"%(alias)s)" - -msgid "An unknown exception occurred." -msgstr "Vyskytla se neočekávaná výjimka." - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Uživatel s příznakem z podprojektu nemá povoleno zobrazit kvótu nadřazených " -"projektů." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "Pole neexistuje nebo je offline. Současný stav pole je %s." - -msgid "Associate host to hostgroup error." -msgstr "Chyba při přidružování hostitele ke skupině hostitele." - -msgid "Associate host to mapping view error." -msgstr "Chyba při přidružování hostitele k zobrazení mapování." - -msgid "Associate initiator to host error." -msgstr "Chyba při přidružování zavaděče k hostiteli." - -msgid "Associate lun to lungroup error." -msgstr "Chyba při přidružování lun ke skupině lun." - -msgid "Associate lungroup to mapping view error." -msgstr "Chyba při přidružování skupiny lun k zobrazení mapování." - -msgid "Associate portgroup to mapping view error." -msgstr "Chyba při přidružování skupiny portu k zobrazení mapování." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Musí být nastavena alespoň jedna platná IP adresa iSCSI." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Pokus o přenos %s s neplatným ověřovacím klíčem." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "Údaje ověřovací skupiny [%s] nebyly nalezeny v úložišti CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "Přihlašovací údaje uživatele nebyly nalezeny v úložišti CloudByte." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "Zóna dostupnosti '%(s_az)s' je neplatná." - -msgid "Available categories:" -msgstr "Dostupné kategorie:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Specifikace QoS podpůrné vrstvy nejsou podporovány u tohoto druhu úložišť a " -"verze ONTAP." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Podpůrná vrstva neexistuje (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Podpůrná vrstva hlásí: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Podpůrná vrstva hlásí: Položka již existuje" - -msgid "Backend reports: item not found" -msgstr "Podpůrná vrstva hlásí: Položka nenalezena" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "" -"Služba podpůrné vrstvy dosáhla časového limitu na nový pokus: %(timeout)s " -"vteřin" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Úložiště podpůrné vrstvy nenastavilo cíl fiber channel." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "Při zálohování používaného svazku je zapotřebí použít příznak force." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "Záloha %(backup_id)s nemohla být nalezena." - -msgid "Backup RBD operation failed" -msgstr "Záložní operace RBD selhala" - -msgid "Backup already exists in database." -msgstr "Záloha již existuje v databázi." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Záložní ovladač nahlásil chybu: %(message)s" - -msgid "Backup id required" -msgstr "ID zálohy je vyžadováno" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "Záloha není podporována svazky GlusterFS se snímky." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "Zálohy jsou podporovány pouze u svazků SOFS bez záložního souboru." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "Záloha je podporována pouze u prostě-formátovaných svazků GlusterFS." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "Zálohy jsou podporovány pouze v prostě-formátovaných svazcích SOFS." - -msgid "Backup operation of an encrypted volume failed." -msgstr "Záložní operace zašifrovaného svazku selhala." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Zálohovací služba %(configured_service)s nepodporuje ověřování. Záloha s id " -"%(id)s není ověřena. Ověřování přeskočeno." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Zálohovací služba %(service)s nepodporuje ověřování. Záloha s id %(id)s není " -"ověřena. Resetování přeskočeno." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "Záloha by měla mít pouze jeden snímek ale místo toho má %s" - -msgid "Backup status must be available" -msgstr "Stav zálohy musí být dostupný" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Stav zálohy musí být dostupný a ne %s." - -msgid "Backup status must be available or error" -msgstr "Stav zálohy musí být dostupný nebo chybný" - -msgid "Backup to be restored has invalid size" -msgstr "Obnovovaná záloha má neplatnou velikost" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Vrácen špatný stav řádku: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Špatné klíče v sadě kvót: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Špatná nebo neočekávaná odpověď od API podpůrné vrstvy úložiště svazků: " -"%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "Špatný formát projektu: projekt není ve správném formátu (%s)" - -msgid "Bad response from Datera API" -msgstr "Špatná odpověď od API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Špatná odpověď od SolidFire API" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Špatná odpověď od XMS, %s" - -msgid "Binary" -msgstr "Binární soubor" - -msgid "Blank components" -msgstr "Prázdné součásti" - -msgid "Blockbridge api host not configured" -msgstr "Hostitel API Blockbridge není nastaven" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "Blockbridge je nastaven s neplatným způsobem ověření '%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "Výchozí zásoba Blockbridge neexistuje" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Heslo Blockbridge není nastaven (vyžadováno pro způsob ověření 'password')" - -msgid "Blockbridge pools not configured" -msgstr "Zásoby Blockbridge nejsou nastaveny" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Příznak Blockbridge není nastaven (vyžadováno pro způsob ověření 'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Uživatel Blockbridge není nastaven (vyžadováno pro způsob ověření 'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Chyba vyhrazování CLI Brocade Fibre Channel: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Chyba vyhrazování HTTP Brocade Fibre Channel: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "Soukromý klíč CHAP by měl mít alespoň 12-16 bajtů." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Výstup výjimky rozhraní příkazového řádku:\n" -"příkaz: %(cmd)s\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Výstup výjimky rozhraní příkazového řádku:\n" -"příkaz: %(cmd)s\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E, mapování virtuálního disku na hostitele nebylo vytvořeno protože " -"disk k hostiteli již je namapován.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "Verze CONCERTO není podporována" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "Společná skupiny poskytování (%s) neexistuje v poli" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "" -"Název mezipaměti je None, prosím nastavte smartcache:cachename v klíči." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "Svazek mezipaměti %s nemá požadované vlastnosti" - -msgid "Can not add FC port to host." -msgstr "Nelze přidat FC port k hostiteli." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "Nelze najít id mezipaměti podle jejího názvu %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Nelze najít id oddílu podle názvu %(name)s." - -#, python-format -msgid "Can not translate %s to integer." -msgstr "Nelze převést %s na celé číslo." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Nelze získat přístup k 'scality_sofs_config': %s" - -msgid "Can't decode backup record." -msgstr "Nelze rozšifrovat záznam zálohy." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "Nelze v poli najít název mezipaměti, její název je: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "Nelze v poli najít název oddílu, jeho název je: %(name)s." - -msgid "Can't find the same host id from arrays." -msgstr "V polích nelze najít stejné id hostitele ." - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Nelze získat id svazku. Název svazku: %s." - -msgid "Can't parse backup record." -msgstr "Nelze zpracovat záznam zálohy." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože nemá žádný typ svazku." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože již je ve skupině %(orig_group)s.." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože svazek nelze nalézt." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože svazek neexistuje." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože svazek je v neplatném stavu: %(status)s. Platné stavy jsou %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Nelze přidat svazek %(volume_id)s do skupiny jednotnosti %(group_id)s " -"protože typ svazku %(volume_type)s tato skupina nepodporuje." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Nelze připojit již připojený svazek %s; vícenásobné připojení je zakázáno " -"přes volbu nastavení 'netapp_enable_multiattach'." - -msgid "Cannot connect to ECOM server." -msgstr "Nelze se připojit k serveru ECOM." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Nelze vytvořit skupinu jednotnosti %(group)s protože snímek %(snap)s není v " -"platném stavu. Platné stavy jsou: %(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Nelze vytvořit skupinu jednotnosti %(group)s protože zdrojový svazek " -"%(source_vol)s není v platném stavu. Platné stavy jsou: %(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Nelze vytvořit adresář %s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "Nelze vytvořit specifikace šifrování. Typ svazku se používá." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Nelze vytvořit obraz formátu disku: %s. Je přijímán pouze formát disku vmdk." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Nelze vytvořit zamaskování: %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Při nastavení 'netapp_enable_multiattach' na true nelze v poli ESeries " -"vytvořit více jak %(req)s svazků." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "Nelze vytvořit nebo najít skupinu úložiště s názvem %(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "Nelze vytvořit svazek o velikosti %s: není násobkem 8GB." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"Nelze vytvořit typ svazku s názvem %(name)s a specifikacemi %(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "LUN %s nelze smazat, pokud existují snímky." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Nelze smazat svazek mezipaměti: %(cachevol_name)s. Byl aktualizována " -"%(updated_at)s a v současnosti má %(numclones)d instancí svazku." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Nelze smazat svazek mezipaměti: %(cachevol_name)s. Byl aktualizována " -"%(updated_at)s a v současnosti má %(numclones)s instancí svazku." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "Nelze smazat specifikace šifrování. Typ svazku se používá." - -msgid "Cannot determine storage pool settings." -msgstr "Nelze zjistit typ nastavení zásoby úložiště." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Nelze spustit /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "Nelze najít skupinu CG %s." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"Nelze najít službu konfigurace kontroléru pro systém úložiště " -"%(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "Nelze najít službu replikace pro vytvoření svazku pro snímek %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "Nelze najít službu repliakce pro smazání svazku %s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Nelze najít službu replikace na systému %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "Nelze najít svazek: %(id)s, operace zrušení správy. Ukončování..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "Nelze najít svazek: %(volumename)s. Operace rozšíření. Ukončování..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "Nelze najít číslo zařízení pro svazek %(volumeName)s." - -msgid "Cannot find migration task." -msgstr "Nelze najít úkol pro přesun." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Nelze najít službu replikace v systému %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"Nelze najít zdrojovou instanci skupiny jednotnosti. ID skupiny jednotnosti: " -"%s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "Nelze získat id mcs podle id kanálu: %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "Nelze získat potřebné informace o systému zásoby nebo úložiště." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Nelze získat nebo vytvořit skupinu úložiště: %(sgGroupName)s pro svazek " -"%(volumeName)s" - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "Nelze získat nebo vytvořit skupinu zavaděče: %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Nelze získat skupinu portu: %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Nelze získat skupinu úložiště: %(sgGroupName)s pro zamaskování " -"%(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Nelze získat podporovaný rozsah velikosti pro %(sps)s. Návratový kód: " -"%(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Nelze získat výchozí skupiny úložiště pro zásadu FAST: %(fastPolicyName)s." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "" -"Nelze připojit Scality SOFS, zkontrolujte záznam systému pro možné chyby" - -msgid "Cannot ping DRBDmanage backend" -msgstr "Nelze zjistit spojení s podpůrnou vrstvou DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Nelze umístit svazek %(id)s na %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Při vytváření skupiny jednotnosti %(name)s ze zdroje nelze zadávat " -"'cgsnapshot_id' a 'source_cgid' najednou." - -msgid "Cannot register resource" -msgstr "Nelze registrovat zdroj" - -msgid "Cannot register resources" -msgstr "Nelze registrovat zdroje" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Nelze odstranit svazek %(volume_id)s ze skupiny jednotnosti %(group_id)s " -"protože není ve skupině." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Nelze odstranit svazek %(volume_id)s ze skupiny jednotnosti %(group_id)s " -"protože svazek je v neplatném stavu: %(status)s. Platné stavy jsou: " -"%(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Nelze přetypovat z HPE3PARDriver na %s." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Nelze přetypovat z jednoho pole 3PAR na jiné." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Nelze přetypovat do společné skupiny poskytování v jiné doméně." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "Nelze přetypovat do snímku společné skupiny poskytování v jiné doméně." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"Nelze spustit příkaz vgc-cluster, ujistěte se prosím, že je software " -"nainstalován a máte správně nastavená oprávnění." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "Nelze nastavit sériové číslo a název jednotky hitachi najednou." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "Nelze zadat název a id ochranné domény najednou." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "Nelze zadat název a id zásoby úložiště najednou." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Nelze aktualizovat skupinu jednotnosti %(group_id)s protože nebylo zadán " -"platný název, popis, či přidání/odstranění svazku." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "Nelze aktualizovat specifikace šifrování. Typ svazku se používá." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "Nelze aktualizovat typ svazku %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Nelze ověřit existenci objektu: %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "Snímek skupiny jednotnosti %(cgsnapshot_id)s nemohl být nalezen." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Snímek skupiny jednotnosti je prázdný. Žádná skupina nebude vytvořena." - -msgid "Change hostlun id error." -msgstr "Chyba při změně id hostitele lun." - -msgid "Change lun priority error." -msgstr "Chyba při získávání priority lun." - -msgid "Change lun smarttier policy error." -msgstr "Chyba při změny chytré vrstvy zásady lun." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"Změna by využití změnila na méně než 0 pro následující zdroje: %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Zkontrolujte oprávnění k přístupu pro sdílení ZFS přidělené k tomuto " -"ovladači." - -msgid "Check hostgroup associate error." -msgstr "Chyba při kontrole přidružení skupiny hostitele." - -msgid "Check initiator added to array error." -msgstr "Chyba při kontrole přidání zavaděče do pole." - -msgid "Check initiator associated to host error." -msgstr "Chyba při kontrole přidružení zavaděče k hostiteli." - -msgid "Check lungroup associate error." -msgstr "Chyba při kontrole přidružení skupiny lun." - -msgid "Check portgroup associate error." -msgstr "Chyba při kontrole přidružení skupiny portu." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Zkontrolujte stav služby http. Také se ujistěte, že číslo portu https je " -"stejné jako port zadaný v cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"Velikost kusu není násobkem velikosti bloku z kterého je možné vytvořit " -"kontrolní součet." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Chyba vyhrazování CLI Cisco Fibre Channel: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "Funkce klonování není licencována na %(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"Typ klonu '%(clone_type)s' je neplatný; platné hodnoty jsou: " -"'%(full_clone)s' a '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"Cluster není zformátován. Pravděpodobně byste měli provést příkaz \"dog " -"cluster format\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Selhání ovladače Cinder Coho Data: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "RPC port Coho není nastaven" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Příkaz %(cmd)s byl v klientském řádku zablokován a zrušen" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "" -"Pomocník příkazového řádku: Čekání na podmínku: Vypršel časový limit %s." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Povolovač komprimace není nainstalován. Nelze vytvořit komprimovaný svazek." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Výpočetní cluster: %(cluster)s nenalezen." - -msgid "Condition has no field." -msgstr "Podmínka nemá žádné pole." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Konfigurační soubor %(configurationFile)s neexistuje." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Konfigurační volba %s není nastavena." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Konfliktní specifikace QoS pro typ svazku %s: Když je specifiakce přidružena " -"k typu svazku legacy, \"netapp:qos_policy_group\" není povolena v " -"dodatečných specifikací typu svazku." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Připojení ke glance selhalo: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Připojení k swift selhalo: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Konektor nepodporuje: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Konektor nemá požadované informace: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "" -"Skupina jednotnosti je prázdná. Nebude vytvořen žádný snímek skupiny " -"jednotnosti." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "Skupina jednotnosti %(consistencygroup_id)s nemohla být nalezena." - -msgid "Container" -msgstr "Kontejner" - -msgid "Container size smaller than required file size." -msgstr "Velikost kontejneru je menší než požadovaná velikost souboru." - -msgid "Content type not supported." -msgstr "Typ obsahu není podporován." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "Služba nastavení kontroléru nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "IP kontroléru '%(host)s' nešlo zpracovat: %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Převedeno na %(f1)s, ale formát je nyní %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "Převedeno na %(vol_format)s, ale formát je nyní %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Převedeno na prosté, ale formát je nyní %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Převedeno na prostý, ale formát je nyní %s." - -msgid "Coordinator uninitialized." -msgstr "Koordinátor nezaveden." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Úkol kopírování svazku selhal: převedení na základní svazek: Id=%(id)s, stav=" -"%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Kopírování popisných dat z %(src_type)s %(src_id)s do %(vol_id)s." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Nelze zjistit, který koncový bod Keystone použít. Ten může být nastaven buď " -"v katalogu služby nebo pomocí volby 'backup_swift_auth_url' v souboru cinder." -"conf." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Nelze zjistit, který koncový bod Swift použít. Ten může být nastaven buď v " -"katalogu služby nebo pomocí volby 'backup_swift_url' v souboru cinder.conf." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "Nelze najít id clusteru GPFS: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "Nelze najít zařízení systému souborů GPFS: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Nelze najít nastavení v %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Nelze najít export iSCSI pro svazek %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "Nelze najít cílové iSCSI pro svazek %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "Ve výstupu příkazu %(cmd)s nelze najít klíč: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Nelze najít parametr %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "Nelze najít cíl %s" - -msgid "Could not get system name." -msgstr "Nelze získat název systému." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "Nelze uložit nastavení do %(file_path)s: %(exc)s" - -msgid "Create QoS policy error." -msgstr "Chyba při vytváření zásady QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Vytvoření zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Vytvoření zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Vytvoření exportu pro svazek selhalo." - -msgid "Create hostgroup error." -msgstr "Chyba při vytváření skupiny hostitele." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Chyba při vytváření hypermetra. %s" - -msgid "Create lun migration error." -msgstr "Chyba při vytváření přesunu lun." - -msgid "Create luncopy error." -msgstr "Chyba při vytváření kopie lun." - -msgid "Create lungroup error." -msgstr "Chyba při vytváření skupiny lun." - -msgid "Create manager volume flow failed." -msgstr "Vytvoření postupu správce svazku selhalo." - -msgid "Create snapshot error." -msgstr "Chyba při vytváření snímku." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Chyba při vytváření svazku: Protože %s." - -msgid "Create volume failed." -msgstr "Vytvoření svazku selhalo." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"Vytváření skupiny jednotnosti ze zdroje není v současnosti podporováno." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Vytvoření a aktivování sady zóny selhalo: (Sada zóny=%(cfg_name)s chyba=" -"%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Vytvoření a aktivování sady zóny selhalo: (Sada zóny=%(zoneset)s chyba=" -"%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "Vytváření využití pro období od %(begin_period)s do %(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "Současný hostitel není součástí domény HGST." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Současný hostitel není platný pro svazek %(id)s s typem %(type)s, přesun " -"není povolen" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Současný mapovaný hostitel svazku %(vol)s je v nepodporované skupině " -"hostitele s %(group)s." - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Chyba nastavení ovladače DRBDmanage: nenalezeny některé požadované knihovny " -"(dbus, drbdmanage.*)." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage očekával jeden zdroj (\"%(res)s\"), obdrženo %(n)d" - -msgid "Data ONTAP API version could not be determined." -msgstr "Nelze zjistit verzi API Data ONTAP." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "Data ONTAP pracuje v režimu 7, který nepodporuje skupiny zásad QoS." - -msgid "Database schema downgrade is not allowed." -msgstr "Snížení verze schématu databáze na nižší verzi není povoleno." - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup je platný typ poskytování, ale vyžaduje WSAPI verze " -"'%(dedup_version)s', je nainstalována verze '%(version)s'." - -msgid "Dedup luns cannot be extended" -msgstr "Deduplikované lun nemohou být rozšířeny" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Výchozí kvóta pro zdroj: %(res)s je nastavena příznakem výchozí kvóty: quota_" -"%(res)s, je nyní zastaralé. Pro výchozí kvótu prosím použijte třídu výchozí " -"kvóty." - -msgid "Default volume type can not be found." -msgstr "Výchozí typ svazku nenalezen." - -msgid "Delete LUNcopy error." -msgstr "Chyba při mazání kopírování LUN." - -msgid "Delete QoS policy error." -msgstr "Chyba při mazání zásady QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "Chyba při mazání přidružené lun ze skupiny lun." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Smazání zálohy zrušeno, v současnosti nastavená služba záloh " -"[%(configured_service)s] není stejnou službou použitou k vytvoření této " -"zálohy [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "Smazání skupiny jednotnosti selhalo." - -msgid "Delete hostgroup error." -msgstr "Chyba při mazání skupiny hostitele." - -msgid "Delete hostgroup from mapping view error." -msgstr "Chyba při mazání skupiny hostitele ze zobrazení mapování." - -msgid "Delete lun error." -msgstr "Chyba při mazání lun." - -msgid "Delete lun migration error." -msgstr "Chyba při mazání přesunu lun." - -msgid "Delete lungroup error." -msgstr "Chyba při mazání skupiny lun." - -msgid "Delete lungroup from mapping view error." -msgstr "Chyba při mazání skupiny lun ze zobrazení mapování." - -msgid "Delete mapping view error." -msgstr "Chyba při mazání zobrazení mapování." - -msgid "Delete portgroup from mapping view error." -msgstr "Chyba při mazání skupiny portu ze zobrazení mapování." - -msgid "Delete snapshot error." -msgstr "Chyba při mazání snímku." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "Smazání snímku svazku není podporováno ve stavu: %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Smazání zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Odpojování svazku od databáze a přeskakování rpc." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Mazání zón selhalo: (příkaz=%(cmd)s chyba=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Pro podporu skupin jednotnosti je vyžadována API Dell verze 2.1 a vyšší." - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource je funkce pouze pro správce" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "Cíl má stav přesunu %(stat)s, očekáváno %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Cílový svazek není uprostřed přesunu." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Odpojení svazku selhalo. Více než jedno připojení, ale nezadáno žádné " -"attachment_id." - -msgid "Detach volume from instance and then try again." -msgstr "Odpojte svazek od instance a zkuste to znovu." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)s" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "Nenalezen očekávaný sloupec v %(fun)s: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "Očekávaný klíč %(key)s nenalezen v %(fun)s: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "Důvod zakázání obsahuje neplatné znaky nebo je příliš dlouhý." - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "Doména s názvem %s nebyla nalezena." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Zjištěn cluster GPFS nižší úrovně. Funkce klonování GPFS není povolena v " -"daemonu clusteru na úrovni %(cur)s - úroveň musí být alespoň %(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "Zavedení připojení ovladačem selhalo (chyba: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Ovladač musí zavést initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Ovladač úspěšně rozšifroval data importované zálohy, ale některá pole chybí " -"(%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"Verze API E-series proxy %(current_version)s nepodporuje úplnou sadu " -"dodatečných specifikací SSC. Verze proxy musí být alespoň %(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Výjimka CLI ovladače EMC VNX Cinder: %(cmd)s (Návratový kód: %(rc)s) " -"(Výstup: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword musí mít platné " -"hodnoty." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Pro vytvoření skupiny jednotnosti %(name)s ze zdroje musí být zadán buď " -"'cgsnapshot_id' nebo 'source_cgid'." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO: %(slo)s nebo vytížení %(workload)s jsou neplatné. Prozkoumejte " -"předchozí chybový výpis pro platné hodnoty." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Je vyžadováno buď hitachi_serial_number nebo hitachi_unit_name." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "Služba složení prvku nebyla nalezena v %(storageSystemName)s." - -msgid "Enables QoS." -msgstr "Povolí QoS." - -msgid "Enables compression." -msgstr "Povolí komprimaci." - -msgid "Enables replication." -msgstr "Povolí replikaci." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Ujistěte se, že configfs je připojen k /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při přidávání zavaděče: %(initiator)s ve skupině zavaděče: " -"%(initiatorgroup)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při přidávání do cílové skupiny: %(targetgroup)s mající IQN: %(iqn)s. " -"Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Chyba při připojování svazku %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při klonování snímku: %(snapshot)s ve svazku %(lun)s ze zásoby " -"%(pool)s, projekt %(project)s, klon projektu: %(clone_proj)s, návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Chyba při vytváření klonovaného svazku: %(cloneName)s. Návratový kód: " -"%(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vytváření klonovaného svazku: Svazek: %(cloneName)s, zdrojový " -"svazek %(sourceName)s. Návratový kód: %(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vyváření skupiny: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Chyba při vytváření maskování: %(groupName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vytváření svazku: %(volumeName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vytváření svazku: %(volumename)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vytváření skupiny repliky: zdroj: %(source)s cíl: %(target)s. " -"Návratový kód: %(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při vytváření zavaděče: %(initiator)s s přezdívkou: %(alias)s. " -"Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při vytváření projektu: %(project)s v zásobě %(pool)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při vytváření vlastnosti: %(property)s, typ: %(type)s, popis: " -"%(description)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při vytváření sdílení: %(name)s, návratový kód: %(ret.status)d, " -"zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při vytváření snímku: %(snapshot)s ve svazku %(lun)s do zásoby " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při vytváření snímku: %(snapshot)s ve sdílení %(share)s do zásoby " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Chyba při vytváření cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při vytváření cílové skupiny: %(targetgroup)s mající IQN: %(iqn)s. " -"Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Chyba při vytváření svazku: %(lun)s, velikost: %(size)s, návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při vytváření nového složeného svazku. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při provádění replikace na: zásobu: %(pool)s, projekt %(proj)s, " -"svazek: %(vol)s pro cíl %(tgt)s a zásobu: %(tgt_pool)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Chyba při vytváření nesvázaného svazku v operaci rozšíření." - -msgid "Error Creating unbound volume." -msgstr "Chyba při vytváření nesvázaného svazku." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při mazání svazku: %(volumeName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při mazání snímku: %(snapshot)s ve sdílení %(share)s do zásoby " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při mazání snímku: %(snapshot)s ve svazku %(lun)s do zásoby %(pool)s, " -"projekt %(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Chyba při mazání svazku: %(lun)s ze zásoby %(pool)s, projekt %(project)s, " -"návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při mazání projektu: %(project)s v zásobě %(pool)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Chyba při odstraňování provedení replikace: %(id)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při získávání zavaděčů: Skupina zavaděče: %(initiatorgroup)s. " -"Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při získávání statistik projektu: Zásoba %(pool)s, projekt " -"%(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při získávání sdílení: %(share)s v zásobě %(pool)s, projekt " -"%(project)s, návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při získávání snímku: %(snapshot)s ve svazku %(lun)s do zásoby " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Chyba při získávání cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při získávání svazku: %(lun)s v zásobě %(pool)s, projekt %(project)s, " -"návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Chyba při přesunu svazku z jedné zásoby do druhé. Návratový kód: %(rc)lu. " -"Chyba: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Chyba při úpravě maskování : %(groupName)s. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při nastavování vlastností: %(props)s ve svazku %(lun)s v zásobě " -"%(pool)s, projekt %(project)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při ukončování sezení přesunu. Návratový kód: %(rc)lu. Chyba: " -"%(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při ověřování zavaděče: %(iqn)s. Návratový kód: %(ret.status)d, " -"zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při ověřování zásoby: %(pool)s: Návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při ověřování projektu: %(project)s v zásobě %(pool)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při ověřování služby: Služba: %(service)s, návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při ověřování cíle: %(alias)s. Návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Chyba při ověřování sdílení: %(share)s v projektu %(project)s a zásobě " -"%(pool)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Chyba při přidávání svazku: %(volumeName)s mající cestu instance: " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Chyba při přidávání zavaděče do skupiny : %(groupName)s. Návratový kód: " -"%(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "Chyba při přidávání svazku do složeného svazku. Chyba byla: %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"Chyba při připojování svazku %(volumename)s do cílového základního svazku." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Chyba při přidružování skupiny úložiště: %(storageGroupName)s k zásadě FAST: " -"%(fastPolicyName)s. Popis chyby: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při rušení vztahu klona. Synchronizovaný název: %(syncName)s. " -"Návratový kód: %(rc)lu. Chyba: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Chyba při připojování ke clusteru ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Chyba při připojování pomocí ssh: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Chyba při vytváření svazku: %s." - -msgid "Error deleting replay profile." -msgstr "Chyba při mazání profilu rychlého načtení." - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Chyba při mazání svazku %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Chyba během zpracování hodnotitele: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Chyba při úpravě sdílení: %(share)s v zásobě: %(pool)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Chyba při povolování iSER pro síťový portál: ujistěte se prosím, že RDMA je " -"podporováno pro port %(port)d vašeho iSCSI na ip adrese %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "Během čištění selhaného připojení se vyskytla chyba: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Chyba při provádění [%(cmd)s] CloudByte API, chyba: %(err)s." - -msgid "Error executing EQL command" -msgstr "Chyba při provádění příkazu EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Chyba při provádění příkazu pomocí ssh: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Chyba při rozšiřování svazku %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Chyba při rozšiřování svazku: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Chyba při hledání %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Chyba při hledání %s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Chyba při získávání podrobností o verzi zařízení. Návratový kód: %(ret." -"status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "Chyba při získávání id domény z názvu %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "Chyba při získávání id domény z názvu %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Chyba při získávání skupin zavaděče." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Chyba při získávání id zásoby s názvem %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Chyba při získávání id zásoby z názvu %(pool_name)s: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Chyba při získávání provádění replikace: %(id)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Chyba při získávání podrobností o zdroji replikace. Návratový kód: %(ret." -"status)d, zpráva: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Chyba při získávání podrobností o cíli replikace. Návratový kód: %(ret." -"status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při získávání verze: svc: %(svc)s. Návratový kód: %(ret.status)d, " -"zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Chyba v operaci [%(operation)s] pro svazek [%(cb_volume)s] v úložišti " -"CloudByte: [%(cb_error)s], kód chyby: [%(error_code)s]" - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "Chyba při vytváření prostoru pro %(space)s s velikostí %(size)d GB" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "Chyba při rozšiřování prostoru svazku %(space)s o dalších %(size)d GB" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Chyba při změně synchronizace repliky: %(sv)s, operace: %(operation)s. " -"Návratový kód: %(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Chyba při změně služby: %(service)s, návratový kód: %(ret.status)d, zpráva: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při přesunování svazku: %(vol)s ze zdrojového projektu %(src)s do " -"cílového %(tgt)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -msgid "Error not a KeyError." -msgstr "Chyba není chyba klíče." - -msgid "Error not a TypeError." -msgstr "Chyba není chyba typu." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Při vytváření snímku skupiny jednotnosti %s nastala chyba." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Při mazání snímku skupiny jednotnosti %s nastala chyba." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "Při aktualizaci skupiny jednotnosti %s nastala chyba." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Chyba při přejmenování svazku %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Chybová odpověď: %s" - -msgid "Error retrieving volume size" -msgstr "Chyba při získávání velikosti svazku" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při odesílání aktualizace replikace pro činnost s id: %(id)s. " -"Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Chyba při odesílání aktualizace replikace. Předaná chyba: %(err)s. Činnost: " -"%(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při nastavování dědění replikace na %(set)s pro svazek: %(vol)s, " -"projekt %(project)s. Návratový kód: %(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Chyba při oddělování balíčku: %(package)s od zdroje: %(src)s. Návratový kód: " -"%(ret.status)d, zpráva: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "Chyba při rozvazování svazku %(vol)s ze zásoby. %(error)s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Chyba při kontrole stavu přenosu: %s" - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "Chyba při získávání dat pomocí ssh: (příkaz=%(cmd)s chyba=%(err)s)." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Chyba při žádání API %(service)s." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Chyba při spouštění rozhraní příkazového řádku pro zónu: (příkaz=%(cmd)s " -"chyba=%(err)s)." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Překročen maximální počet pokusů %(max_attempts)d pro svazek %(volume_id)s." - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Překročeno omezení snímků na svazek" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Při připojování meta svazku do cílového svazku %(volumename)s se objevila " -"výjimka." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Při vytváření repliky prvku nastala výjimka. Název klonu: %(cloneName)s, " -"zdrojový název: %(sourceName)s, dodatečné specifikace: %(extraSpecs)s" - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Výjimka ve výběru diskového prostoru pro svazek: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Výjimka: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Očekáváno uuid ale obdrženo %(uuid)s." - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "U počtu uzlů očekáváno číslo, svcinfo lsiogrp předalo: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"Od příkazu %(cmd)s v příkazovém řádku neočekáván žádný výstup, předáno " -"%(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Při filtrování pomocí UID virtuálního disku bylo očekáváno vrácení jednoho " -"disku z lsvdisk. Bylo předáno %(count)s." - -#, python-format -msgid "Expected volume size was %d" -msgstr "Očekávaná velikost snímku byla %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Exportování zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Exportování záznamu zrušeno, v současnosti nastavená služba záloh " -"[%(configured_service)s] není stejnou službou použitou k vytvoření této " -"zálohy [%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Chyba při rozšiřování svazku" - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Rozšíření svazku je tímto ovladačem podporováno pouze když svazek nemá " -"snímky." - -msgid "Extend volume not implemented" -msgstr "Rozšíření svazku není zavedeno" - -msgid "FAST is not supported on this array." -msgstr "FAST není podporován v tomto poli." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC je protokol, ale OpenStack nedodává wwpn." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Nelze zrušit přidělení %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "Nelze vytvořit svazek mezipaměti %(volume)s. Chyba: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "Nelze přidat připojení pro fabric=%(fabric)s: Chyba: %(err)s" - -msgid "Failed cgsnapshot" -msgstr "Snímek jednotnosti selhal" - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "Nelze vytvořit snímek ze svazku %(volname)s: %(response)s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Nelze získat podrobnosti zásoby %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "Nelze odstranit připojení pro fabric=%(fabric)s: Chyba: %(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Nelze rozšířit svazek %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Nelze se přihlásit do 3PAR (%(url)s) kvůli %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Nelze získat přístup k aktivním nastavení zónování." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Nelze získat přístup ke stavu sady zóny: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Nelze získat zámek zdroje. (Sériová konzole: %(serial)s, inst: %(inst)s, " -"ret: %(ret)s, chybový výstup: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Nelze přidat logické zařízení." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Nelze přidat svazek %(volumeName)s do skupiny jednotnosti %(cgName)s. " -"Návratový kód: %(rc)lu. Chyba: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "Nelze přidat nastavení zónování." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "Nelze přidělit zavaděč iSCSI IQN. (Port: %(port)s, důvod: %(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Nelze asociovat specifikace qos: %(specs_id)s s typem %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Nelze připojit cílové iSCSI pro svazek %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Nelze zálohovat popisná data svazku - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Nelze zálohovat popisná data svazku - objekt zálohy popisných dat 'backup.%s." -"meta' již existuje" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Nelze klonovat svazek ze snímku %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Nelze se připojit k %(vendor_name)s Pole %(host)s: %(err)s" - -msgid "Failed to connect to array" -msgstr "Nelze se připojit k poli" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "Nelze se připojit k daemonu sheep. Adresa: %(addr)s, port: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Nelze zkopírovat obraz do svazku: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Nelze zkopírovat popisná data do svazku: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Nelze zkopírovat svazek, cílové zařízení je nedostupné." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Nelze zkopírovat svazek, zdrojové zařízení je nedostupné." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"Nelze vytvořit skupinu jednotnosti %(cgName)s ze snímku %(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "Nelze vytvořit IG, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Nelze vytvořit skupinu svazku: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Nelze vytvořit soubor. (Soubor: %(file)s, ret: %(ret)s, chybový výstup: " -"%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "Nelze vytvořit dočasný snímek pro svazek %s." - -msgid "Failed to create api volume flow." -msgstr "Nelze vytvořit postup api svazku." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "" -"Vytvoření snímku skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "Vytvoření skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Nelze vytvořit skupinu jednotnosti %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Nelze vytvořit skupinu jednotnosti %s protože skupina jednotnosti VNX nemůže " -"přijmout komprimované LUN jako členy." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Nelze vytvořit skupinu jednotnosti: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "Nelze vytvořit skupinu jednotnosti: %(cgid)s. Chyba: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Nelze vytvořit skupinu jednotnosti: %(consistencyGroupName)s. Návratový kód: " -"%(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Nelze vytvořit id hardwaru v %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "Nelze vytvořit hostitele: %(name)s. Zkontrolujte zda existuje v poli." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Nelze vytvořit skupinu hostitele: %(name)s. Zkontrolujte zda existuje v poli." - -msgid "Failed to create iqn." -msgstr "Nelze vytvořit iqn." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Nelze vytvořit cílové iscsi pro svazek %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Nelze vytvořit správu existujícího postupu." - -msgid "Failed to create manage_existing flow." -msgstr "Nelze vytvořit postup pro správu existujících." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "Nelze vytvořit mapu na mcs, žádný kanál nemůže mapovat." - -msgid "Failed to create map." -msgstr "Nelze vytvořit mapu." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Nelze vytvořit popisná data pro svazek: %(reason)s" - -msgid "Failed to create partition." -msgstr "Nelze vytvořit oddíl." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "" -"Nelze vytvořit specifikace qos: %(name)s se specifikacemi %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Nelze vytvořit repliku." - -msgid "Failed to create scheduler manager volume flow" -msgstr "Nelze vytvořit postup správce plánovače svazku" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Nelze vytvořit snímek %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "Nelze vytvořit snímek pro skupinu jednotnosti: %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Nelze vytvořit snímek pro svazek %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "Nelze vytvořit zásadu snímku ve svazku %(vol)s: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "Nelze vytvořit oblast zdrojů snímku ve svazku %(vol)s: %(res)s." - -msgid "Failed to create snapshot." -msgstr "Nelze vytvořit snímek." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Nelze vytvořit snímek. Informace o svazku CloudByte nenalezeny ve svazku " -"Openstack [%s]." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Nelze vytvořit mělkou zásobu, chybová zpráva byla: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Nelze vytvořit svazek %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Nelze smazat SI svazku s id: %(volume_id)s protože existují dva s tímto id." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Nelze smazat logické zařízení. (Logické zařízení: %(ldev)s, důvod: " -"%(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "Smazání snímku skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "Smazání skupiny jednotnosti %(id)s selhalo z důvodu %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Nelze smazat skupinu jednotnosti: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Nelze smazat skupinu jednotnosti. %(consistencyGroupName)s. Návratový kód: " -"%(rc)lu. Chyba: %(error)s." - -msgid "Failed to delete device." -msgstr "Smazání zařízení selhalo." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Nelze smazat sadu souborů ze skupinu jednotnosti %(cgname)s. Chyba: " -"%(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Nelze smazat iqn." - -msgid "Failed to delete map." -msgstr "Nelze smazat mapu." - -msgid "Failed to delete partition." -msgstr "Nelze smazat oddíl." - -msgid "Failed to delete replica." -msgstr "Nelze smazat repliku." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Nelze smazat snímek %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "Nelze smazat snímek pro skupinu jednotnosti: %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "Nelze smazat snímek s id %s protože existují dva s tímto id." - -msgid "Failed to delete snapshot." -msgstr "Nelze smazat snímek." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Nelze smazat svazek %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Nelze smazat svazek pro svazek s id: %(volume_id)s protože existují dva s " -"tímto id." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Nelze odpojit cílové iSCSI pro svazek %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Nelze zjistit nastavení blockbridge API" - -msgid "Failed to disassociate qos specs." -msgstr "Nelze odloučit specifikace qos." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Nelze odloučit specifikace qos: %(specs_id)s s typem %(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "Nelze zajistit oblast zdrojů snímku, nelze nalézt svazek s id %s" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Zavedení připojení s clusterem Coho selhalo" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Nelze provést [%(cmd)s] CloudByte API. Stav http: %(status)s, chyba: " -"%(error)s." - -msgid "Failed to execute common command." -msgstr "Nelze provést běžný příkaz." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Nelze exportovat pro svazek: %(reason)s" - -msgid "Failed to find Storage Center" -msgstr "Nelze najít centrum úložiště" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "V zadané zásobě nelze najít kopii virtuálního disku." - -msgid "Failed to find account for volume." -msgstr "Nelze najít účet pro svazek." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"Nelze najít sadu souborů pro cestu %(path)s, výstup příkazu: %(cmdout)s." - -#, python-format -msgid "Failed to find host %s." -msgstr "Nelze najít hostitele %s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Nelze získat údaje účtu CloudByte [%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Nelze získat podrobnosti cíle LUN pro LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "Nelze získat podrobnosti cílového LUN %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Nelze získat seznam cílů LUN pro LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Nelze získat ID oddílu pro svazek %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "Nelze získat ID Raid snímku %(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "Nelze získat ID Raid snímku %(snapshot_id)s." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Nelze získat zdroj úložiště. Systém se ho pokusí získat znovu. (Zdroj: " -"%(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "Nelze získat všechna přidružení specifikací qos %s" - -msgid "Failed to get channel info." -msgstr "Nelze získat informace o kanálu." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Nelze získat úroveň kódu (%s)." - -msgid "Failed to get device info." -msgstr "Nelze získat informace o zařízení." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "" -"Nelze získat doménu protože společná skupiny poskytování (%s) neexistuje v " -"poli." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "Nelze získat ip na kanálu %(channel_id)s se svazkem: %(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Nelze získat informace o iqn." - -msgid "Failed to get license info." -msgstr "Nelze získat informace o licenci." - -msgid "Failed to get lv info." -msgstr "Nelze získat informace o lv." - -msgid "Failed to get map info." -msgstr "Nelze získat informace o mapě." - -msgid "Failed to get model update from clone" -msgstr "Nelze získat aktualizaci modelu z klona" - -msgid "Failed to get name server info." -msgstr "Nelze získat informace o jmenném serveru." - -msgid "Failed to get network info." -msgstr "Nelze získat informace o síti." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Nelze získat nové id části v nové zásobě: %(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Nelze získat informace o oddílu." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Nelze získat zásobu svazku s id %(volume_id)s." - -msgid "Failed to get replica info." -msgstr "Nelze získat informace o replice." - -msgid "Failed to get show fcns database info." -msgstr "Nelze získat zobrazení informací o databázi fcns." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Nelze získat velikost svazku %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Nelze získat snímek pro svazek %s." - -msgid "Failed to get snapshot info." -msgstr "Nelze získat informace o snímku." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Nelze získat cílové IQN pro LUN %s" - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Nelze získat cílový portál LUN pro LUN %s" - -msgid "Failed to get targets" -msgstr "Nelze získat cíle" - -msgid "Failed to get wwn info." -msgstr "Nelze získat informace o wwn." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Nelze získat, vytvořit, nebo přidat svazek %(volumeName)s pro zamaskování " -"%(maskingViewName)s. Obdržená chybová zpráva byla %(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Nelze zjistit podpůrnou vrstvu svazku." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "Nelze propojit sadu souborů se sdílením %(cgname)s. Chyba: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Nelze se přihlásit k %s Pole (neplatné přihlášení?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Nelze se přihlásit jako uživatel %s." - -msgid "Failed to login with all rest URLs." -msgstr "Nelze se přihlásit pomocí jakékoliv z REST URL." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Nelze zažádat pro koncový bod clusteru Datera z následujícího důvodu: %s" - -msgid "Failed to manage api volume flow." -msgstr "Nelze spravovat postup api svazku." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Nelze spravovat existující %(type)s %(name)s, protože nahlášená velikost " -"%(size)s není číslo s plovoucí desetinnou čárkou." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Nelze spravovat existující svazek %(name)s, protože při získávání velikosti " -"svazku došlo k chybě." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Nelze spravovat existující svazek %(name)s, protože operace pro přejmenování " -"selhala: Chybová zpráva: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Nelze spravovat existující svazek %(name)s, protože nahlášená velikost " -"%(size)s není číslo s plovoucí desetinnou čárkou." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Nelze spravovat svazek %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Nelze mapovat logické zařízení. (Logické zařízení: %(ldev)s, LUN: %(lun)s, " -"port: %(port)s, id: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Nelze přesunout svazek poprvé." - -msgid "Failed to migrate volume for the second time." -msgstr "Nelze přesunout svazek podruhé." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "Nelze přesunout mapování LUN. Návratový kód: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "Nelze přesunout svazek %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Nelze otevřít soubor. (Soubor: %(file)s, ret: %(ret)s, chybový výstup: " -"%(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Nelze zpracovat výstup rozhraní příkazového řádku:\n" -"příkaz: %(cmd)s\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Nelze zpracovat volbu nastavení 'keystone_catalog_info', musí být ve formátu " -"::" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Nelze zpracovat volbu nastavení 'swift_catalog_info', musí být ve formátu " -"::" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Nelze provést reklamaci nulté stránky. (Logické zařízení: %(ldev)s, důvod: " -"%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "Nelze odstranit exportování svazku %(volume)s: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "Nelze odstranit cílové iscsi pro svazek %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Nelze odstranit svazek %(volumeName)s ze skupiny jednotnosti %(cgName)s. " -"Návratový kód: %(rc)lu. Chyba: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "Nelze odstranit svazek %(volumeName)s z výchozí skupiny úložiště." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"Nelze odstranit svazek %(volumeName)s z výchozí skupiny úložiště: " -"%(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"Nelze odstranit: %(volumename)s. z výchozí skupiny úložiště pro zásadu FAST " -"%(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Nelze přejmenovat logický svazek %(name)s, chybová zpráva byla: %(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Nelze získat aktivní nastavení zónování %s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Nelze nastavit Qos pro existující svazek %(name)s, chybová zpráva: %(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "Nelze nastavit vlastnost 'Incoming user' pro cíl SCST." - -msgid "Failed to set partition." -msgstr "Nelze nastavit oddíl." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Nelze nastavit oprávnění pro skupinu jednotnosti %(cgname)s. Chyba: " -"%(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Nelze zadat logické zařízení pro svazek %(volume_id)s pro zrušení namapování." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Nelze zadat logické zařízení ke smazání. (Metoda: %(method)s, id: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Nelze ukončit sezení přesunu." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "Nelze rozvázat svazek %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Nelze odpojit sadu souborů pro skupinu jednotnosti %(cgname)s. Chyba: " -"%(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Nelze zrušit mapování logického zařízení. (Logické zařízení: %(ldev)s, " -"důvod: %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Nelze aktualizovat skupinu jednotnosti: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Nelze aktualizovat popisná data svazku: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Nelze aktualizovat nebo smazat nastavení zónování" - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Nelze aktualizovat specifikace qos: %(specs_id)s se specifikacemi " -"%(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "Nelze aktualizovat využití kvóty při přetypování svazku." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Nelze aktualizovat popisná data svazku %(vol_id)s pomocí zadaných dat " -"%(src_type)s %(src_id)s" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Nelze vytvořit svazek %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Nelze získat informace LUN pro %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Nelze přesunout nově klonovaný LUN do %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Nelze zařadit LUN %s do tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "Flexvisor nemohl přidat svazek %(id)s, z důvodu %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Flexvisor nemohl připojit svazek %(vol)s ve skupině %(group)s z důvodu " -"%(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Flexvisor nemohl odstranit svazek %(vol)s ve skupině %(group)s z důvodu " -"%(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Flexvisor nemohl odstranit svazek %(id)s, z důvodu %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Selhání vyhledávání SAN Fibre Channel: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Selhání zóny operací Fibre Channel: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Selhání kontroly připojení Fibre Channel: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Soubor %(file_path)s nemohl být nalezen." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Soubor %(path)s má neplatný záložní soubor %(bfile)s, operace přerušena." - -#, python-format -msgid "File already exists at %s." -msgstr "Soubor s nastavením již existuje v %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "Soubor již existuje v: %s" - -msgid "Find host in hostgroup error." -msgstr "Chyba při hledání hostitele ve skupině hostitele." - -msgid "Find host lun id error." -msgstr "Chyba při hledání id hostitele lun." - -msgid "Find lun group from mapping view error." -msgstr "Chyba při hledání skupiny lun v zobrazení mapování." - -msgid "Find mapping view error." -msgstr "Chyba při hledání zobrazení mapování." - -msgid "Find portgroup error." -msgstr "Chyba při hledání skupiny portu." - -msgid "Find portgroup from mapping view error." -msgstr "Chyba při hledání skupiny portu v zobrazení mapování." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Zásada mezipaměti Flash vyžaduje WSAPI verze '%(fcache_version)s', je " -"nainstalována verze '%(version)s'." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor nemohl přidělit svazek.:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor nemohl přidělit svazek:%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor nemohl najít snímek svazku %(id)s ve snímku %(vgsid)s skupiny " -"%(vgid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Vytvoření svazku pomocí Flexvisor selhalo:%(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor selhal při mazání svazku %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor nemohl přidat svazek %(id)s do skupiny %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor nemohl přidělit svazek %(id)s, protože se nešlo dotázat na stav " -"pomocí id události." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl přidělit svazek %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor nemohl přidělit svazek %(volume)s iqn %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl klonovat svazek %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "Flexvisor nemohl klonovat svazek (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl vytvořit snímek svazku %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "Flexvisor nemohl vytvořit snímek svazku (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor nemohl vytvořit svazek %(id)s ve skupině %(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor nemohl vytvořit svazek %(volume)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor nemohl vytvořit svazek (získání události) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "Flexvisor nemohl vytvořit svazek ze snímku %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor nemohl vytvořit svazek ze snímku %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor nemohl vytvořit svazek ze snímku (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor nemohl smazat snímek %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "Flexvisor nemohl smazat svazek (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl smazat svazek %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl rozšířit svazek %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor nemohl rozšířit svazek %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "Flexvisor nemohl rozšířit svazek (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor nemohl získat informace o zásobě %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "Flexvisor nemohl získat id snímku svazku %(id)s ze skupiny %(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor nemohl odstranit svazek %(id)s ze skupiny %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor nemohl spustit svazek ze snímku %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor nemohl spustit svazek ze snímku (nelze získat událost) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor nemohl zrušit přidělení svazku %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "Flexvisor nemohl zrušit přidělení svazku (získání události) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor nemohl zrušit přidělení svazku:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor nemohl najít informace o zdrojovém svazku %(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "Zrušení přidělení svazku pomocí Flexvisor selhalo:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Svazek Flexvisor %(id)s nemohl být připojen ke skupině %(vgid)s." - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS neběží, stav: %s." - -msgid "Gateway VIP is not set" -msgstr "Brána VIP není nastavena" - -msgid "Get FC ports from array error." -msgstr "Chyba při získávání portů FC z pole." - -msgid "Get FC target wwpn error." -msgstr "Chyba při získávání cílového FC wwpn." - -msgid "Get LUNcopy information error." -msgstr "Chyba při získávání informací o kopírování LUN." - -msgid "Get QoS id by lun id error." -msgstr "Chyba při získávání id QoS pomocí id lun." - -msgid "Get QoS information error." -msgstr "Chyba při získávání informací o QoS." - -msgid "Get QoS policy error." -msgstr "Chyba při získávání zásad QoS." - -msgid "Get cache by name error." -msgstr "Chyba při získávání mezipaměti pomocí názvu." - -msgid "Get connected free FC wwn error." -msgstr "Chyba při připojování volného FC wwn." - -msgid "Get host initiators info failed." -msgstr "Získávání informací o zavaděčích hostitele selhalo." - -msgid "Get hostgroup information error." -msgstr "Chyba při získávání informací o skupině hostitele." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Chyba při získávání informací o portu iSCSI, prosím zkontrolujte cílovou IP " -"zadanou v souboru s nastavením huawei." - -msgid "Get iSCSI port information error." -msgstr "Chyba při získávání informací o portu iSCSI." - -msgid "Get iSCSI target port error." -msgstr "Chyba při získávání portu cíle iSCSI." - -msgid "Get lun migration task error." -msgstr "Chyba při získávání úkolu o přesunu lun." - -msgid "Get lungroup id by lun id error." -msgstr "Chyba při získávání id skupiny lun pomocí id lun." - -msgid "Get lungroup information error." -msgstr "Chyba při získávání informací o skupině lun." - -msgid "Get partition by name error." -msgstr "Chyba při získávání oddílu podle názvu." - -msgid "Get partition by partition id error." -msgstr "Chyba při získávání oddílu pomocí id oddílu." - -msgid "Get smartcache by cache id error." -msgstr "Chyba při získávání chytré mezipaměti pomocí id mezipaměti." - -msgid "Get snapshot id error." -msgstr "Chyba při získávání id snímku." - -msgid "Get target IP error." -msgstr "Chyba při získávání cílové IP adresy." - -msgid "Get volume by name error." -msgstr "Chyba při získávání svazku podle názvu." - -msgid "Get volume error." -msgstr "Chyba při získávání svazku." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Popisná data glance nemohou být aktualizována, klíč %(key)s existuje pro " -"svazek s id %(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "Popisná data Glance pro svazek/snímek %(id)s nemohla být nalezena." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Konfigurační soubor Gluster v %(config)s neexistuje." - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Selhání api úložiště Google Cloud: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Chyba v připojení k úložišti Google Cloud: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Chyba v ověření oauth2 v úložišti Google Cloud: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "Získány špatné informace o cestě z DRBDmanage! (%s)" - -msgid "HBSD error occurs." -msgstr "Objevena chyba HBSD." - -msgid "HPELeftHand url not found" -msgstr "HPELeftHand URL nenalezena" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"Velikost bloku kontrolního součtu byla od poslední zálohy změněna, Nová " -"velikost bloku kontrolního součtu: %(new)s. Stará velikost bloku kontrolního " -"součtu: %(old)s. Proveďte úplnou zálohu." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Vrstvy %(tier_levels)s nebyly vytvořeny." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Nápověda \"%s\" není podporována." - -msgid "Host" -msgstr "Hostitel" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "Hostitel %(host)s nemohl být nalezen." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"Hostitel %(host)s neodpovídá obsahu certifikátu x509: Běžný název: " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "Hostitel %s nemá žádné zavaděče FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "Skupina hostitele s názvem %s nebyla nalezena" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Skupina hostitele mající odkaz %s nebyla nalezena" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Hostitel nenalezen. Nelze odstranit %(service)s na %(host)s." - -#, python-format -msgid "Host type %s not supported." -msgstr "Hostitel typu %s není podporován." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "Hostitel s porty %(ports)s nenalezen." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"Skupina vstupu/výstupu %(iogrp)d není platná; platné skupiny jsou %(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Je-li komprese nastavena na True, pak rsize musí být také nastaveno (nesmí " -"se rovnat -1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Je-li notfmtdisk nastaveno na True, pak rsize musí být také nastaveno na " -"hodnotu -1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"V protokolu připojení k flashsystem byla nalezena neplatná hodnota " -"'%(prot)s': platné hodnoty jsou %(enabled)s." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Zadána neplatná hodnota pro chytrou vrstvu: nastavte buď na 0, 1, 2, či 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Zadána neplatná hodnota pro storwize_svc_vol_grainsize: nastavte buď na 32, " -"64, 128, nebo 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Zadána neplatná hodnota pro hloubku: Nelze zadat najednou thin i thick." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "Obraz %(image_id)s nemohl být nalezen." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "Obraz %(image_id)s není aktivní." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" - -msgid "Image location not present." -msgstr "Umístění obrazu není přítomno." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Virtuální velikost obrazu je %(image_size)dGB, a proto se nevejde do svazku " -"s velikostí %(volume_size)dGB." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Při mazání svazku rbd nastala chyba Obraz je zaneprázdněn. To může být " -"způsobeno připojením od klienta, které bylo přerušeno a, pokud tomu tak je, " -"může být vyřešeno novým pokus o smazání po vypršení 30 vteřin." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Importování záznamu selhalo, nelze najít zálohovací službu pro provedení " -"importu. Požadovaná služba %(service)s" - -msgid "Incorrect request body format" -msgstr "Nesprávný formát těla požadavku" - -msgid "Incorrect request body format." -msgstr "Nesprávný formát těla požadavku." - -msgid "Incremental backups exist for this backup." -msgstr "Pro tuto zálohu existují přírůstkové zálohy." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Výjimka CLI Infortrend: %(err)s, parametr: %(param)s (Návratový kód: %(rc)s) " -"(Výstup: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Vstupní svazky nebo snímky jsou neplatné." - -msgid "Input volumes or source volumes are invalid." -msgstr "Vstupní nebo zdrojové svazky jsou neplatné." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "Instance %(uuid)s nemohla být nalezena." - -msgid "Insufficient free space available to extend volume." -msgstr "Pro rozšíření svazku není dostatek volného místa." - -msgid "Insufficient privileges" -msgstr "Nedostatečná oprávnění" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Neplatná doména 3PAR: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Neplatná hodnota ALUA. Hodnota musí být 1 nebo 0." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "Zadány neplatné argumenty Ceph pro záložní operaci rbd" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Neplatný snímek skupiny jednotnosti: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "Neplatná skupina jednotnosti: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"Neplatná skupina jednotnosti: Žádný hostitel pro vytvoření skupiny " -"jednotnosti." - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Nalezena neplatná verze API HPELeftHand: %(found)s. Pro podporu spravování " -"je vyžadována verze %(minimum)s nebo vyšší." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Neplatný formát IP adresy: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Při získávání zásady QoS pro svazek %s byla zjištěna neplatná specifikace QoS" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Neplatná specifikace sdílení úložiště Virtuozzo: %r. Musí být [MDS1[," -"MDS2],...:/][:HESLO]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "Neplatná verze XtremIO %(cur)s, je vyžadována verze %(min)s nebo vyšší" - -msgid "Invalid argument" -msgstr "Neplatný argument" - -msgid "Invalid argument - negative seek offset." -msgstr "Neplatný argument - záporná odchylka hledání." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Neplatný argument - whence=%s není podporováno" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Neplatný argument - whence=%s není podporováno." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Neplatný režim připojení '%(mode)s' pro svazek %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Neplatný ověřovací klíč: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Neplatná záloha: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"V úložišti CloudByte nalezeny neplatné přihlašovací údaje CHAP pro uživatele." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "Neplatná odpověď zavedení připojení od svazku %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "Neplatná odpověď zavedení připojení od svazku %(name)s: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Neplatný typ obsahu %(content_type)s." - -msgid "Invalid credentials" -msgstr "Neplatné přihlašovací údaje" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Neplatný adresář: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Neplatný typ adaptéru disku: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Neplatné zálohování disku: %s" - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Neplatný typ disku: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Neplatný typ disku: %s" - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Neplatný hostitel: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Nalezena neplatná verze hpe3parclient (%(found)s). Je vyžadována verze " -"%(minimum)s a vyšší. Spusťte \"pip install --upgrade python-3parclient\" pro " -"aktualizaci klienta." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Nalezena neplatná verze hpelefthandclient (%(found)s). Je vyžadována verze " -"%(minimum)s a vyšší. Spusťte \"pip install --upgrade python-lefthandclient\" " -"pro aktualizaci klienta." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Neplatný href %(image_href)s obrazu." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"Neplatný identifikátor obrazu nrbo nelze získat přístup k požadovanému " -"obrazu." - -msgid "Invalid imageRef provided." -msgstr "Zadáno neplatné imageRef." - -msgid "Invalid input" -msgstr "Neplatný vstup" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Obdržen neplatný vstup: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Neplatný filtr is_public [%s]" - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Neplatná velikost popisných dat: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Neplatná popisná data: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Neplatná základna bodu připojení: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Neplatná základna bodu připojení: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "" -"Neplatný nový název snímku společné skupiny poskytování pro přetypování. " -"Nový název='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Neplatné číslo portu %(config)s pro port RPC Coho" - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Neplatné specifikace qos: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "Neplatná žádost o připojení svazku k neplatnému hostiteli" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Neplatná žádost o připojení svazku s neplatným režimem. Režim připojení by " -"měl být 'rw' nebo 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Neplatné vypršení rezervace %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Neplatná hlavička odpovědi od serveru RPC" - -msgid "Invalid service catalog json." -msgstr "Neplatný json katalog služeb" - -msgid "Invalid sheepdog cluster status." -msgstr "Neplatný stav clusteru sheepdog." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Neplatný snímek: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Neplatný stav: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "Zažádáno o neplatnou zásobu úložiště %s. Přetypování selhalo." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Zadána neplatná zásoba úložiště %s." - -msgid "Invalid transport type." -msgstr "Neplatný typ přenosu." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Neplatné nastavení aktualizace: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Neplatná hodnota '%s' pro vynucení." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Neplatná hodnota '%s' pro force." - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "Neplatná hodnota '%s' pro is_public. Patné hodnoty: True nebo False. " - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Neplatná hodnota '%s' pro přeskočení ověření." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Neplatná hodnota pro 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Neplatná hodnota pro 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Neplatná hodnota pro 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Neplatná hodnota pro 'scheduler_max_attempts', musí být >=1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "Neplatná hodnota pro volbu nastavení NetApp netapp_host_type." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "Neplatná hodnota pro volbu nastavení NetApp netapp_lun_ostype." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Neplatná hodnota pro věk, %(age)s" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"Zadána neplatná velikost svazku při žádosti o vytvoření: %s (argument " -"velikosti musí být celé číslo (nebo celé číslo zadané pomocí řetězce) a " -"větší než nula)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Neplatný typ svazku: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Neplatný svazek: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Neplatný svazek: Nelze přidat svazek %(volume_id)s do skupiny jednotnosti " -"%(group_id)s protože svazek je v neplatném stavu: %(status)s. Platné stavy " -"jsou: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Neplatný svazek. Nelze přidat svazek %(volume_id)s do skupiny jednotnosti " -"%(group_id)s protože typ svazku %(volume_type)s tato skupina nepodporuje." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Neplatný svazek: Nelze přidat svazek valešné-uuid-svazku do skupiny " -"jednotnosti %(group_id)s protože svazek nelze nalézt." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Neplatný svazek: Nelze odstranit svazek falešné-uuid-svazku ze skupiny " -"jednotnosti %(group_id)s protože není ve skupině." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "Předán neplatný typ svazku: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Zadán neplatný typ svazku: %s (požadovaný typ není kompatibilní; buď se musí " -"shodovat se zdrojovým svazkem, nebo vynechejte argument zadávající typ)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"Zadán neplatný typ svazku: %s (požadovaný typ není kompatibilní; doporučuje " -"se vynechat argument zadávající typ)" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"Zadán neplatný typ svazku: %s (požadovaný typ musí být podporován touto " -"skupinou jednotnosti)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Neplatný formát wwpns %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "Vyvolání internetové služby selhalo." - -msgid "Issue encountered waiting for job." -msgstr "Při čekání na úkol se vyskytl problém." - -msgid "Issue encountered waiting for synchronization." -msgstr "Při čekání na synchronizaci se vyskytl problém." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Vyvolání zavedení záložního systému, protože replikace není správně " -"nastavena." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "Úkol nebyl nalezen v CloudByte odpovědi pro vytvoření svazku [%s]." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "Úkol nebyl nalezen v CloudByte odpovědi pro smazání svazku [%s]." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Názvy klíče mohou obsahovat pouze alfanumerické znaky, podtržítka, tečky, " -"dvojtečky a pomlčky." - -#, python-format -msgid "KeyError: %s" -msgstr "Chyba klíče: %s" - -msgid "LUN export failed!" -msgstr "Export LUN selhal!" - -msgid "LUN map overflow on every channel." -msgstr "Přetečení LUN mapy ve všech kanálech." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN nenalezena pomocí zadaného odkazu %s." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "Číslo LUN je mimo rozsah v kanálu s id: %(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Posledních %s položek cinder v záznamu systému:-" - -msgid "LeftHand cluster not found" -msgstr "Cluster LeftHand nebyl nalezen" - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Řádek %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Cesta odkazu již existuje a není to symbolický odkaz." - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Propojený klon zdrojového svazku není podporován ve stavu: %s." - -msgid "Lock acquisition failed." -msgstr "Získání zámku selhalo." - -msgid "Logout session error." -msgstr "Chyba odhlášení sezení." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Služba vyhledávání není nastavena. Volba nastavení fc_san_lookup_service " -"musí udávat konkrétní zavedení této služby." - -msgid "Lun migration error." -msgstr "Chyba při přesunu Lun." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "Zpráva zamítnuta: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "Zpráva zamítnuta: Chyba při ověření: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "Zpráva zamítnuta: Neshoda vzdáleného volání procedur: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Poškozený řetězec výstupu fcns: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Poškozené tělo zprávy: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Poškozený řetězec jmenného serveru: %s" - -msgid "Malformed request body" -msgstr "Poškozené tělo požadavku" - -msgid "Malformed request body." -msgstr "Poškozené tělo požadavku." - -msgid "Malformed request url" -msgstr "Poškozená url požadavku" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Poškozená odpověď na příkaz %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Poškozená vlastnost scheduler_hints" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Poškozené zobrazení řetězce databáze fcns: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Poškozené nastavení zóny: (rozbočovač=%(switch)s nastavení zóny=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Poškozený stav zóny: (rozbočovač=%(switch)s nastavení zóny=%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Správa existujícího získání velikosti vyžaduje 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "Správa existujícího snímku není zavedena." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Správa existujícího svazku selhala kvůli neplatnému odkazu na podpůrnou " -"vrstvu %(existing_ref)s: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Správa existujícího svazku selhala kvůli neshodě s typem svazku: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Správa existujícího svazku není zavedena." - -msgid "Manage existing volume requires 'source-id'." -msgstr "Správa existujícího svazku vyžaduje 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Správa svazku není podporována, pokud je FAST povoleno. Zásada FAST: " -"%(fastPolicyName)s." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"Připravení mapování %(id)s nemohlo být dokončeno v rámci přiděleného " -"časového limitu %(to)d vteřin. Ukončování." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "Maskování %(maskingViewName)s nebylo úspěšně smazáno" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Překročen maximální povolený počet záloh (%(allowed)d)" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "Překročen maximální povolený počet snímků (%(allowed)d)" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Maximální povolený počet svazků (%(allowed)d) překračuje kvótu '%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "Lze zadat pouze jedno z %s" - -msgid "Metadata backup already exists for this volume" -msgstr "Záloha popisných dat je pro tento svazek již vytvořena" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "Object zálohy popisných dat '%s' již existuje" - -msgid "Metadata property key blank." -msgstr "Klíč vlastnosti popisných dat je prázdný." - -msgid "Metadata restore failed due to incompatible version" -msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi" - -msgid "Metadata restore failed due to incompatible version." -msgstr "Obnovení popisných dat selhalo kvůli nekompatibilní verzi." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Chybí python modul 'purestorage', ujistěte se, že knihovna je nainstalovaná " -"a dostupná." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "Chybí parametr nastavení Fibre Channel SAN - fc_fabric_names" - -msgid "Missing request body" -msgstr "Chybí tělo žádosti" - -msgid "Missing request body." -msgstr "Chybí tělo žádosti." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "V těle žádosti chybí povinný prvek '%s'" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "V těle žádosti chybí povinný prvek '%s'." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "V těle žádosti chybí povinný prvek 'consistencygroup'." - -msgid "Missing required element quota_class_set in request body." -msgstr "V těle žádosti chybí povinný prvek quota_class_set." - -msgid "Missing required element snapshot in request body." -msgstr "V těle žádosti chybí požadovaný prvek snapshot." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Nalezeno mnoho sériových čísel, ale pro tuto operaci bylo očekáváno pouze " -"jedno. Prosím upravte váš soubor s nastavením EMC." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Nalezeno mnoho kopií svazku %s." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "Nalezeno mnoho shod pro '%s', použijte ID pro zpřesnění hledání." - -msgid "Multiple profiles found." -msgstr "Nalezeno mnoho profilů." - -msgid "Must implement a fallback schedule" -msgstr "Je nutné zavést záložní plán" - -msgid "Must implement find_retype_host" -msgstr "Je nutné zavést find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "Je nutné zavést host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "Je nutné zavést schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "Je nutné zavést schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "Je nutné zavést schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "lsfabric je nutné předat wwpn nebo host." - -msgid "Must specify 'connector'" -msgstr "Musíte zadat 'connector'" - -msgid "Must specify 'connector'." -msgstr "Musíte zadat 'connector'." - -msgid "Must specify 'host'." -msgstr "Musíte zadat 'host'." - -msgid "Must specify 'new_volume'" -msgstr "Musíte zadat 'new_volume'" - -msgid "Must specify 'status'" -msgstr "Musíte zadat 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Pro aktualizaci musíte zadat 'status', 'attach_status' nebo " -"'migration_status'." - -msgid "Must specify a valid attach status" -msgstr "Musíte zadat platný stav připojení" - -msgid "Must specify a valid migration status" -msgstr "Musíte zadat platný stav přesunu" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Musíte zadat platnou persónu %(valid)s, hodnota '%(persona)s' není platná." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Musíte zadat platný typ poskytování %(valid)s, hodnota '%(prov)s' je " -"neplatná." - -msgid "Must specify a valid status" -msgstr "Musíte zadat platný stav" - -msgid "Must specify an ExtensionManager class" -msgstr "Musí být určena třída ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "V žádosti musíte zadat bootable." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Musíte zadat název nebo id ochranné domény." - -msgid "Must specify readonly in request." -msgstr "V žádosti musíte zadat readonly." - -msgid "Must specify storage pool name or id." -msgstr "Název nebo id zásoby úložiště musí být zadáno." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "Musíte zadat zásoby úložiště. Volba: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Musíte zadat kladnou, nenulovou hodnotu pro věk" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"Nastavení NAS '%(name)s=%(value)s' je neplatné. Musí být buď 'auto', 'true', " -"nebo 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "Soubor s nastavením NFS v %(config)s neexistuje" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "Soubor NFS %s nebyl nalezen." - -msgid "NFS file could not be discovered." -msgstr "Soubor NFS nemohl být nalezen." - -msgid "NaElement name cannot be null." -msgstr "Název NaElement nemůže být prázdný." - -msgid "Name" -msgstr "Název" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"V těle žádosti nemohou být název, popis, přidání svazků, či odebrání svazků " -"všechny prázdné." - -msgid "Need non-zero volume size" -msgstr "Je třeba zadat nenulovou velikost svazku" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Zpráva nebyla zamítnuta ani přijata: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Výjimka ovladače NetApp Cinder." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"Nová velikost pro rozšíření musí být větší než současná. (současná: " -"%(size)s, rozšířená: %(new_size)s)." - -msgid "New volume size must be specified as an integer." -msgstr "Nová velikost svazku musí být zadána jako celé číslo." - -msgid "New volume type must be specified." -msgstr "Musíte zadat nový typ svazku." - -msgid "New volume type not specified in request_spec." -msgstr "Nový typ svazku nebyl zadán ve specifikaci žádosti." - -msgid "Nimble Cinder Driver exception" -msgstr "Výjimka ovladače Nimble Cinder" - -msgid "No FCP targets found" -msgstr "Žádné cíle FCP nenalezeny" - -msgid "No Port Group elements found in config file." -msgstr "V souboru nastavení nenalezeny žádné prvky skupiny portu." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Žádné aktivní portály iSCSI zadané pomocí IP iSCSI" - -#, python-format -msgid "No available service named %s" -msgstr "Žádná dostupná služba nazvaná %s" - -#, python-format -msgid "No backup with id %s" -msgstr "Žádná záloha s id %s" - -msgid "No backups available to do an incremental backup." -msgstr "Žádná záloha pro provedení přírůstkové zálohy." - -msgid "No big enough free disk" -msgstr "Žádný dostatečně velký volný disk" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Žádný snímek jednotnosti s id %s" - -msgid "No cinder entries in syslog!" -msgstr "Žádné položky cinder v záznamu systému!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Ve správci souborů nebyl nalezen klonovaný LUN s názvem %s" - -msgid "No config node found." -msgstr "Nenalezen žádný uzel nastavení." - -#, python-format -msgid "No consistency group with id %s" -msgstr "Žádná skupina jednotnosti s id %s" - -#, python-format -msgid "No element by given name %s." -msgstr "Žádný prvek podle zadaného názvu %s." - -msgid "No errors in logfiles!" -msgstr "Žádné chyby v souborech záznamu!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "V %s nenalezen žádný záložní soubor." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Žádné další volné ID LUN. Byl překročen maximální počet svazků, které lze " -"připojit k hostiteli (%s)." - -msgid "No free disk" -msgstr "Žádný volný disk" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Pro %s nebyly v zadaném seznamu nalezeny žádné vhodné portály iscsi." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Pro %s nenalezeny žádné vhodné portály iscsi." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Žádný hostitel pro vytvoření skupiny jednotnosti %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "V cílovém poli není žádný port umožňující iSCSI." - -msgid "No image_name was specified in request." -msgstr "V žádosti nebyl zadán název obrazu." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Pro zavaděče %s nebyla nalezena žádná skupina zavaděče" - -msgid "No initiators found, cannot proceed" -msgstr "Nebyly nalezeny žádné zavaděče, nelze pokračovat" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "V clusteru s ip adresou %s nenalezeno žádné rozhraní." - -msgid "No ip address found." -msgstr "Žádná ip adresa nenalezena." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "V CloudByte nebyly nalezeny žádné ověřovací skupiny iscsi." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "V CloudByte nebyly nalezeny žádné zavaděče iscsi." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Ve svazku Cloudbyte [%s] nebyly nalezeny žádné služby iscsi." - -msgid "No iscsi services found in CloudByte storage." -msgstr "V úložišti Cloudbyte nebyly nalezeny žádné služby iscsi." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "Nezadán žádný klíč a nelze ho načíst z %(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Nenalezena žádné připojená sdílení Gluster" - -msgid "No mounted NFS shares found" -msgstr "Nenalezena žádná připojená sdílení NFS" - -msgid "No mounted SMBFS shares found." -msgstr "Nenalezena žádná připojená sdílení SMBFS" - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Nenalezena žádné připojená sdílení Virtuozzo" - -msgid "No mounted shares found" -msgstr "Nenalezena žádná připojená sdílení" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"Ve skupině vstupu/výstupu %(gid)s nenalezen žádný uzel pro svazek %(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Pro poskytující svazky nejsou dostupné žádné zásoby. Ujistěte se, že volba " -"netapp_pool_name_search_pattern je nastavena správně." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Žádná odpověď při výpisu volání API přihlašovacích údajů uživatele iSCSI " -"úložiště CloudByte." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "Žádná odpověď od výpisu volání api tsm úložiště CloudByte." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "Žádná odpověď od výpisu volání api souborového systému CloudByte." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "Nenastavena žádné VIP pro službu a nejsou žádné adresy klienta nexenta" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Nenalezen žádný snímek kde %s je záložní soubor." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "Obraz snímku nebyl nalezen ve skupině snímku %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Nebyly zadány žádné zdrojové snímky pro vytvoření skupiny jednotnosti %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "Nenalezena žádná cesta úložiště pro cestu exportu %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Žádné takové specifikace QoS: %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "Nenalezena žádná vhodná zjišťovací ip adresa" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "Pro obnovení zálohy verze %s není žádná podpora" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"Hostitel nemá k dispozici nepoužité ID LUN; vícenásobné připojení je " -"povoleno, což vyžaduje jedinečné ID LUN napříč celou skupinou hostitele." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Žádný platní hostitelé pro svazek %(id)s s typem %(type)s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"Žádný svazek v clusteru na virtuálním serveru %(vserver)s a cestě spojení " -"%(junction)s" - -msgid "No volume service(s) started successfully, terminating." -msgstr "Žádné služby svazku nebyly úspěšně spuštěny, ukončování." - -msgid "No volume was found at CloudByte storage." -msgstr "V úložišti CloudByte nebyl nalezen žádný svazek." - -msgid "No volume_type should be provided when creating test replica." -msgstr "Při vytváření zkušební repliky by neměl být zadán žádný typ svazku." - -msgid "No volumes found in CloudByte storage." -msgstr "V úložišti Cloudbyte nebyly nalezeny žádné svazky." - -msgid "No weighed hosts available" -msgstr "Žádní vážení hostitelé nejsou dostupní" - -#, python-format -msgid "Not a valid string: %s" -msgstr "%s není platný řetězec." - -msgid "Not a valid value for NaElement." -msgstr "Neplatná hodnota pro NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "Nelze nalézt vhodné datové úložiště pro svazek: %s." - -msgid "Not an rbd snapshot" -msgstr "Není snímkem rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Nemáte oprávnění k obrazu %(image_id)s." - -msgid "Not authorized." -msgstr "Není povoleno." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Na podpůrné vrstvě je nedostatek místa (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"Ve sdílení ZFS není dostatek úložného prostoru k provedení této operace." - -msgid "Not stored in rbd" -msgstr "Neuloženo v rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "Při vytváření snímku Nova vrátila stav \"error\"." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "Při výpisu souborového sytému CloudByte byla obdržena nulová odpověď." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "Obdržena prázdná odpověď od výpisu ověřovacích skupin iscsi CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "Od výpisu zavaděčů iscsi CloudByte byla obdržena prázdná odpověď." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "Od výpisu služeb iscsi svazku CloudByte byla obdržena prázdná odpověď." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Při vytváření svazku [%s] v úložišti CloudByte byla obdržena prázdná odpověď." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Při mazání svazku [%s] v úložišti CloudByte byla obdržena prázdná odpověď." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Při dotazování úkolu [%(job)s] využívající [%(operation)s] v úložišti " -"CloudByte byla obdržena prázdná odpověď." - -msgid "Object Count" -msgstr "Počet objektů" - -msgid "Object is not a NetApp LUN." -msgstr "Objekt není NetApp LUN." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Chyba při přidávání svazku do složeného svazku během operace rozšíření: " -"%(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"Jeden z požadovaných vstupů od hostitele, portu, nebo schématu nebyl nalezen." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s každých " -"%(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "Ve specifikaci QoS lze nastavit pouze jednu mez." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Pouze uživatelé mající příznak z nadřazených nebo kořenových projektů mají " -"povoleno zobrazit kvóty podřazených projektů" - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "Zrušit správu lze pouze u svazků, jenž spravuje OpenStack." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "Operace selhala se stavem=%(status)s. Úplný výpis: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Operace není podporována: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "Volba gpfs_images_dir není správně nastavena." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "Volba gpfs_images_share_mode není správně nastavena." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "Volba gpfs_mount_point_base není správně nastavena." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "Původní %(res)s %(prop)s musí mít jednu z hodnot '%(vals)s'" - -#, python-format -msgid "ParseException: %s" -msgstr "Chyba zpracování: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"Název oddílu je None, prosím nastavte smartpartition:partitionname v klíči." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"Heslo nebo soukromý klíč SSH jsou vyžadovány pro ověření: nastavte volbu " -"san_password nebo san_private_key." - -msgid "Path to REST server's certificate must be specified." -msgstr "Cesta k certifikátu serveru REST musí být zadána." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Prosím předem vytvořte zásobu %(pool_list)s!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Prosím předem vytvořte vrstvu %(tier_levels)s v zásobě %(pool)s!" - -msgid "Please specify a name for QoS specs." -msgstr "Prosím zadejte název pro specifikaci QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "Zásada nedovoluje, aby bylo %(action)s provedeno." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Zásoba %(poolNameInStr)s nebyla nalezena." - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Zásoba ze svazku['hostitel] %(host)s nebyla nalezena." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "Zásoba ze svazku['hostitel] selhala: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "Zásoba není dostupná v poli hostitel svazku." - -msgid "Pool is not available in the volume host fields." -msgstr "Zásoba není dostupná v polích hostitele svazku." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "Zásoba s názvem %(pool)s nebyla nalezena v doméně %(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "Zásoba s názvem %(pool_name)s nebyla nalezena v doméně %(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Zásoba: %(poolName)s. není přidružena k vrstvě úložiště pro zásadu fast " -"%(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "Název zásoby musí být v souboru %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Zásoby %s neexistují" - -msgid "Pools name is not set." -msgstr "Název zásoby není nastaven." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Stav hlavní kopie: %(status)s a synchronizováný: %(sync)s." - -msgid "Project ID" -msgstr "ID projektu" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "Kvóty projekty nejsou pro vnořené kvóty správně nastaveny: %(reason)s." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Protokol %(storage_protocol)s není podporován u úložišť druhu " -"%(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "Zadanému záznamu zálohy chybí id" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Zadaný stav snímku %(provided)s není povolen pro snímek se stavem " -"%(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Informace poskytovatele s ohledem na úložiště Cloudbyte nebyly nalezeny pro " -"svazek OpenStack [%s]." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Selhání ovladače Pure Storage Cinder: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Specifikace QoS %(specs_id)s již existuje." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Specifikace QoS %(specs_id)s je stále přidružena k entitám." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"Specifikace QoS %(specs_id)s nemají specifikace s klíčem %(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Specifikace QoS nejsou podporovány u tohoto druhu úložišť a verze ONTAP." - -msgid "Qos specs still in use." -msgstr "Specifikace QoS se stále používá." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"Parametr dotazů podle služby je zastaralý. Místo toho použijte binární " -"parametr." - -msgid "Query resource pool error." -msgstr "Chyba při dotazování zásoby zdroje." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "Limit kvóty %s musí být větší nebo rovno existujícím zdrojům." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Třída kvóty %(class_name)s nemohla být nalezena." - -msgid "Quota could not be found" -msgstr "Kvóta nemohla být nalezena." - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Kvóta překročena pro zdroje: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Kvóta překročena: kód=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Limit kvóty je pro zdroj '%(res)s' projektu '%(proj)s neplatný: limit " -"%(limit)d je menší než použitá hodnota %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Rezervace kvóty %(uuid)s nemohla být nalezena." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "Využití kvóty pro projekt %(project_id)s nemohla být nalezena." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "" -"RBD operace porovnání selhala - (ret=%(ret)s chybový výstup=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s, hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "IP serveru REST musí být zadáno." - -msgid "REST server password must by specified." -msgstr "Heslo serveru REST musí být zadáno." - -msgid "REST server username must by specified." -msgstr "Uživatelské jméno serveru REST musí být zadáno." - -msgid "RPC server response is incomplete" -msgstr "Odpověď serveru RPC je nedokončená" - -msgid "Raid did not have MCS Channel." -msgstr "RAID neměl kanál MCS." - -#, python-format -msgid "Received error string: %s" -msgstr "Obdržen chybový řetězec: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "Odkaz musí být pro nespravovaný snímek." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "Odkaz musí být pro nespravovaný virtuální svazek." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "Odkaz musí být název nespravovaného virtuálního svazku." - -msgid "Reference must contain either source-name or source-id element." -msgstr "Odkaz musí obsahovat buď prvek source-name nebo source-id." - -msgid "Reference must contain source-id or source-name key." -msgstr "Odkaz musí obsahovat zdrojové id, nebo klíč mající názem zdroje." - -msgid "Reference must contain source-id or source-name." -msgstr "Odkaz musí obsahovat id nebo název zdroje." - -msgid "Reference must contain source-name element." -msgstr "Odkaz musí obsahovat prvek source-name." - -msgid "Reference must contain source-name or source-id." -msgstr "Odkaz musí obsahovat source-name nebo source-id." - -msgid "Reference must contain source-name." -msgstr "Odkaz musí obsahovat název zdroje." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Přesun svazku s ID: %(id)s zamítnut. Prosím zkontrolujte své nastavené " -"protože zdroj a cíl jsou ve stejné skupině svazku: %(name)s." - -msgid "Remove CHAP error." -msgstr "Chyba při odstraňování CHAP." - -msgid "Remove fc from host error." -msgstr "Chyba při odstraňování fc z hostitele." - -msgid "Remove host from array error." -msgstr "Chyba při odstraňování hostitele z pole." - -msgid "Remove host from hostgroup error." -msgstr "Chyba při odstraňování hostitele ze skupiny hostitele." - -msgid "Remove iscsi from host error." -msgstr "Chyba při odstraňování iscsi z hostitele." - -msgid "Remove lun from cache error." -msgstr "Chyba při odstraňování lun z mezipaměti." - -msgid "Remove lun from partition error." -msgstr "Chyba při odstraňování lun z oddílu." - -msgid "Remove volume export failed." -msgstr "Odstranění exportu svazku selhalo." - -msgid "Rename lun on array error." -msgstr "Chyba při přejmenování lun v poli." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "Schopnost služby replikace nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Služba replikace nebyla nalezena v %(storageSystemName)s." - -msgid "Request body and URI mismatch" -msgstr "Neshoda s tělem požadavku a URI" - -msgid "Request body contains too many items" -msgstr "Tělo požadavku obsahuje příliš mnoho položek" - -msgid "Request body contains too many items." -msgstr "Tělo požadavku obsahuje příliš mnoho položek." - -msgid "Request body empty" -msgstr "Tělo žádosti je prázdné" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Požadavek pro cluster Datera vrátil špatný stav: %(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Požadovaná záloha překračuje povolenou kvótu gigabajtů zálohy. Požadováno " -"%(requested)sG, kvóta je %(quota)sG a bylo spotřebováno %(consumed)sG." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Požadovaný svazek nebo snímek překračuje povolenou kvótu %(name)s. " -"Požadováno %(requested)sG, kvóta je %(quota)sG a bylo spotřebováno " -"%(consumed)sG." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"Požadovaná velikost svazku %(size)d je větší než maximální povolený limit " -"%(limit)d." - -msgid "Required configuration not found" -msgstr "Požadované nastavení nenalezeno." - -#, python-format -msgid "Required flag %s is not set" -msgstr "Požádovaný příznak %s není nastaven" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Resetování stavu zálohy zrušeno, v současnosti nastavená služba záloh " -"[%(configured_service)s] není stejnou službou použitou k vytvoření této " -"zálohy [%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Změna velikosti klonu %s selhala." - -msgid "Resizing image file failed." -msgstr "Změna velikosti obrazu selhala." - -msgid "Resource could not be found." -msgstr "Zdroj nemohl být nalezen." - -msgid "Resource not ready." -msgstr "Zdroj není připraven." - -#, python-format -msgid "Response error - %s." -msgstr "Chyba odpovědi - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Chyba v odpovědi - Systém úložiště je mimo provoz." - -#, python-format -msgid "Response error code - %s." -msgstr "Chybový kód odpovědi - %s." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Obnovení zálohy zrušeno, očekávaný stav svazku je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Obnovení zálohy zrušeno, v současnosti nastavená služba záloh " -"[%(configured_service)s] není stejnou službou použitou k vytvoření této " -"zálohy [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Obnovení zálohy zrušeno, očekávaný stav zálohy je %(expected_status)s ale " -"zjištěný stav je %(actual_status)s." - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Počet pokusů překročen pro příkaz: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Zjištěna opakovatelná výjimka SolidFire" - -msgid "Retype requires migration but is not allowed." -msgstr "Přetypování vyžaduje přesun ale ten není povolen." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Vrácení %(volumeName)s zpět jeho vymazáním." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"Spouštět Cinder pomocí VMware vCenter s verzí starší než %s není povoleno." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "Nastavení SMBFS 'smbfs_oversub_ratio' je neplatné. Musí být > 0: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"Nastavení SMBFS 'smbfs_used_ratio' je neplatné. Musí být > 0 a <= 1.0: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "Soubor s nastavením SMBFS %(config)s neexistuje." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "Soubor s nastavením SMBFS není zadán " - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "Příkaz SSH selhal po '%(total_attempts)r' pokusech : '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Zjištěno vložení příkazů SSH: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "SSH připojení selhalo v %(fabric)s s chybou: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "Certifikát SSL vypršel %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Chyba SSL: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Filtr hostitelů plánovače %(filter_name)s nemohl být nalezen." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Váha plánovače hostitele %(weigher_name)s nemohla být nalezena." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Stav druhé kopie: %(status)s a synchronizováný: %(sync)s, postup " -"synchronizace je: %(progress)s%%." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "Sériové číslo musí být v souboru %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Služba %(service)s odstraněna na hostiteli %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Služba %(service_id)s nemohla být nalezena." - -msgid "Service is unavailable at this time." -msgstr "Služba je v tuto chvíli nedostupná." - -msgid "Sets thin provisioning." -msgstr "Nastaví mělké poskytování." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"Nastavení skupin zásad QoS LUN není podporováno u tohoto druhu úložišť a " -"verze ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Nastavení souboru skupiny zásad QoS není podporováno u tohoto druhu úložišť " -"a verze ONTAP." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"Do sdílení v %(dir)s nemůže služba svazků Cinder zapisovat. Operace se " -"snímky nebudou podporovány." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Chyba vstupu/výstupu Sheepdog, příkaz byl: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Zobrazení operací lze pouze u projektů ve stejné hierarchii jako projekt do " -"kterého jsou uživatelé zařazeni." - -msgid "Size" -msgstr "Velikost" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "Velikost pro svazek: %s nenalezeno, nelze bezpečně smazat." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Velikost je %(image_size)dGB a nevejde se do svazku o velikosti " -"%(volume_size)dGB." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"Velikost zadaného obrazu %(image_size)sGB je větší než velikost svazku " -"%(volume_size)sGB." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "Snímek %(snapshot_id)s nemohl být nalezen." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"Snímek %(snapshot_id)s nemá žádná popisná data mající klíč %(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "Snímek '%s' neexistuje v poli." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"Snímek nelze vytvořit protože svazek %(vol_id)s není dostupný, současný stav " -"svazku: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "Snímek nemůže být vytvořen při přesunu svazku." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "Snímek druhotné repliky není povolen." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Snímek svazku není podporován ve stavu: %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "Zdroj snímku \"%s\" který není nikde zaveden?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "Stav snímku %(cur)s není povolen při aktualizaci stavu snímku" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "Stav snímku musí být pro klonování \"available\"." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "Zálohovaný snímek musí být dostupný, ale jeho stav je nyní \"%s\"." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Snímek='%(snap)s' neexistuje v základním obrazu='%(base)s' - rušení " -"přírůstkové zálohy" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "Snímky nejsou podporovány pro tento formát snímku: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Chyba soketu: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Výjimka ovladače SolidFire Cinder" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "Pole směru řazení překračuje velikost pole klíče řazení." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "" -"Zdrojová skupina jednotnosti je prázdná. Žádná skupina nebude vytvořena." - -msgid "Source host details not found." -msgstr "Podrobnosti zdrojového hostitele nenalezeny." - -msgid "Source volume device ID is required." -msgstr "ID zařízení zdrojového svazku je vyžadováno." - -msgid "Source volume not mid-migration." -msgstr "Zdrojový svazek není uprostřed přesunu." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "Byarray vrácený od Spaceinfo je neplatný" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Zadaný hostitel pro namapování ke svazku %(vol)s je v nepodporované skupině " -"hostitele s %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "Zadaný logický svazek neexistuje." - -msgid "Specify a password or private_key" -msgstr "Zadejte heslo nebo soukromý klíč" - -msgid "Specify san_password or san_private_key" -msgstr "Zadejte san_password nebo san_private_key" - -msgid "Start LUNcopy error." -msgstr "Chyba při spuštění kopírování LUN." - -msgid "State" -msgstr "Stav" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "Špatný stav uzlu. Současný stav je %s." - -msgid "Status" -msgstr "Stav" - -msgid "Stop snapshot error." -msgstr "Chyba při zastavování snímku." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "Služba nastavení úložiště nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"Služba správy id hardwaru úložiště nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Profil úložiště %s nenalezen." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"Služba změny umístění úložiště nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "Druhy úložiště %s nejsou podporovány." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "Skupina úložiště %(storageGroupName)s nebyla úspěšně smazána" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Hostitel úložiště %(svr)s nebyl zjištěn, ověřte název" - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Profil úložiště: %(storage_profile)s nenalezen." - -msgid "Storage resource could not be found." -msgstr "Zdroj úložiště nemohl být nalezen." - -msgid "Storage system id not set." -msgstr "ID úložného systému nebylo nastaveno." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "Systém úložiště nebyl nalezen pro zásobu %(poolNameInStr)s." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "Systém úložiště %(array)s nebyl nalezen." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "Systém %(id)s nalezen se stavem špatného hesla - %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "Byl nalezen systém %(id)s se špatným stavem - %(status)s." - -msgid "System does not support compression." -msgstr "Systém nepodporuje kompresi." - -msgid "System is busy, retry operation." -msgstr "Systém je zaneprázdněn, zopakujte operaci." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s] nebylo v úložišti CloudByte nalezeno pro účet [%(account)s]." - -msgid "Target volume type is still in use." -msgstr "Cílový typ svazku se stále používá." - -msgid "Terminate connection failed" -msgstr "Ukončení připojení selhalo" - -msgid "Terminate connection unable to connect to backend." -msgstr "Nelze se připojit k podpůrné vrstvě pro ukončení připojení." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Ukončení připojení svazku selhalo: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "Zdroj %(type)s %(id)s pro replikování nebyl nalezen." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Parametry 'sort_key' a 'sort_dir' jsou zastaralé a nemohou být použity s " -"parametrem 'sort'." - -msgid "The EQL array has closed the connection." -msgstr "Pole EQL uzavřelo připojení." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"Systém souborů GPFS %(fs)s není na požadované úrovni vydání. Současná úroveň " -"je %(cur)s, musí být alespoň %(min)s." - -msgid "The IP Address was not found." -msgstr "IP adresa nebyla nalezena." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"Požadavek WebDAV selhal. Důvod: %(msg)s, návratový kód/důvod: %(code)s, " -"zdrojový svazek: %(src)s, cílový svazek: %(dst)s. metoda %(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"Chyba výše naznačuje, že databáze nebyla vytvořena.\n" -"Prosím vytvořte ji pomocí 'cinder-manage db sync' před zadáním tohoto " -"příkazu." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"Pole nepodporuje nastavení zásoby úložiště pro SLO %(slo)s a zatížení " -"%(workload)s. Prosím zkontrolujte pole pro platné SLO a zatížení." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Příkaz %(cmd)s selhal. (ret: %(ret)s, standardní výstup: %(out)s, chybový " -"výstup: %(err)s." - -msgid "The copy should be primary or secondary" -msgstr "Kopie by měla být primární nebo sekundární" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"Vytvoření logického zařízení nemohlo být dokončeno. (Logické zařízení: " -"%(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "Dekorovaná metoda musí přijímat buď objekt svazku nebo objekt snímku " - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "Zařízení na cestě %(path)s není dostupné: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "Doba konce (%(end)s) musí být po době začátku (%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "Dodatečná specifikace: %(extraspec)s není platná." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "Záložní svazek nelze smazat: %s." - -#, python-format -msgid "The following elements are required: %s" -msgstr "Jsou vyžadovány následující prvky: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "Skupina hostitele nebo cíl iSCSI nemohly být přidány." - -msgid "The host group or iSCSI target was not found." -msgstr "Skupina hostitele nebo cíl iSCSI nebyly nalezeny." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "CHAP uživatel iSCSI %(user)s neexistuje." - -msgid "The key cannot be None." -msgstr "Klíč nemůže být None." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "Logické zařízení pro zadané %(type)s %(id)s již je smazáno." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "Metodě %(method)s vypršel časový limit. (Hodnota limitu: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "Metoda update_migrated_volume není zavedena." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "Připojení %(mount_path)s není platný svazek Quobyte USP. Chyba %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "" -"Parametr podpůrné vrstvy úložiště. (Skupina nastavení: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "Pro provedení přírůstkové zálohy musí být dostupná nadřazená záloha." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "Zadaný snímek '%s' není snímek zadaného svazku." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"Odkaz na svazek v podpůrné vrstvě by měl být ve formátu souborový systém/" -"název svazku (název svazku nemůže obsahovat '/')" - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"Režim replikace nebyl v dodatečných specifikacích svazku správně nastaven. " -"Pokud je režim periodic, musí být také zadáno replication:sync_period a mít " -"zvolen interval mající hodnotu od 300 až 31622400 vteřin." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"Požadovaná velikost : %(requestedSize)s není stejná jako výsledná velikost: " -"%(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Zdroj %(resource)s nebyl nalezen." - -msgid "The results are invalid." -msgstr "Výsledky jsou neplatné." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "Snímek nelze vytvořit, zatímco svazek je v režimu údržby." - -msgid "The source volume for this WebDAV operation not found." -msgstr "Zdrojový svazek nebyl nalezen pro tuto operaci WebDAV." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "Typ zdrojového svazku '%(src)s' je jiný než cílový typ '%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Typ zdrojového svazku '%s' není dostupný." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "Zadané %(desc)s je zaneprázdněno." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"Zadané logické zařízení %(ldev)s nemůže být spravováno. Zařízení nesmí být " -"mapováno." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"Zadané logické zařízení %(ldev)s nemůže být spravováno. Zařízení nesmí být " -"spárováno." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"Zadané logické zařízení %(ldev)s nemůže být spravováno. Velikost zařízení " -"musí být v násobcích gigabajtu." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"Zadané logické zařízení %(ldev)s nemůže být spravováno. Typ svazku musí být " -"DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"Zadaná operace není podporována. Velikost svazku musí být stejná jako zdroj " -"%(type)s. (svazek: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Zadaný virtuální disk je mapován k hostiteli." - -msgid "The specified volume is mapped to a host." -msgstr "Zadaný svazek je namapován k hostiteli." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"Heslo pole úložiště pro %s není správné, prosím aktualizujte nastavené heslo." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"Podpůrná vrstva úložiště může být použita. (Skupina nastavení: " -"%(config_group)s)" - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"Odebraný počet popisných dat z %(memberCount)s je příliš malý pro svazek: " -"%(volumeName)s, s velikostí %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"Typ popisných dat: %(metadata_type)s pro svazek/snímek %(id)s je neplatný." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"Svazek %(volume_id)s nemohl být rozšířen. Typ svazku musí být Normální." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"Nelze zrušit správu svazku %(volume_id)s. Typ svazku musí být " -"%(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "" -"Svazek %(volume_id)s byl úspěšně zařazen pod správu. (Logické zařízení: " -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"Svazek %(volume_id)s byl úspěšně odstraněn ze správy. (Logické zařízení: " -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Svazek %(volume_id)s pro mapování nemohl být nalezen." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "Svazek nemůže v režimu údržby přijímat přenosy v režimu údržby." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Svazek nemůže být během údržby připojen." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Svazek nemůže být během údržby odpojen." - -msgid "The volume cannot be updated during maintenance." -msgstr "Svazek nemůže být během údržby spravován." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "Připojení ke svazku nemůže být zavedeno v režimu údržby." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Ovladač svazku vyžaduje název zavaděče iSCSI v konektoru." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Svazek je v současnosti zaneprázdněn v 3PAR a nemůže být nyní smazán. Prosím " -"zkuste to znovu později." - -msgid "The volume label is required as input." -msgstr "Jmenovka svazku je vyžadována jako vstup." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "Žádné dostupné zdroje k použití. (Zdroj: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Žádní platní hostitelé ESX." - -msgid "There are no valid datastores." -msgstr "Žádná platná datová úložiště." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Neexistuje označení pro %(param)s. Zadané úložiště je pro správu svazku " -"nezbytné." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Neexistuje označení pro logické zařízení. Zadané logické zařízení je " -"nezbytné pro správu svazku." - -msgid "There is no metadata in DB object." -msgstr "V objektu DB nejsou žádná popisná data." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "Neexistuje sdílení schopné hostit %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "Neexistuje sdílení schopné hostit %(volume_size)sG." - -#, python-format -msgid "There is no such action: %s" -msgstr "Žádná taková činnost: %s" - -msgid "There is no virtual disk device." -msgstr "Neexistuje žádné zařízení virtuálního disku." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "Při přidávání svazku do skupiny vzdálené kopie se vyskytla chyba: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Při vytváření snímku skupiny jednotnosti nastala chyba: %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "Při vytváření skupiny vzdálené kopie se vyskytla chyba: %s" - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Při nastavování doby synchronizace skupiny vzdálené kopie se vyskytla chyba: " -"%s" - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Při nastavování skupiny vzdálené kopie na polích 3PAR se vyskytla chyba: " -"('%s'). Svazek nebude rozpoznán jako mající typ pocházející od replikace." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Při spouštění vzdálené kopie se vyskytla chyba: %s" - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Není nastaven žádný konfigurační soubor Gluster (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Není zadán žádný soubor s nastavením NFS (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Není nastaven žádný svazek Quobyte (%s). Příklad: quobyte:///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "Mělké poskytování není podporováno v této verzi LVM." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "Tento ovladač nepodporuje mazání právě používaných snímků." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "Tento ovladač nepodporuje vytváření snímků u právě používaných svazků." - -msgid "This request was rate-limited." -msgstr "Tento požadavek má omezené množství." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Tato systémová platforma (%s) není podporována. Tento ovladač podporuje " -"pouze platformy Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "Služba zásad nebyla nalezena v %(storageSystemName)s." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "Při čekání na aktualizaci od Nova při vytváření snímku %s vypršel čas." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Při čekání na aktualizaci od Nova při mazání snímku %(id)s vypršel čas." - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Při žádání o API %(service)s vypršel časový limit." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Přenos %(transfer_id)s nemohl být nalezen." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Přenos %(transfer_id)s: Svazek s ID %(volume_id)s je v neočekávaném stavu " -"%(status)s, předpokládáno čekání na přenos" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Pokus o importování popisných dta zálohy z id %(meta_id)s do zálohy %(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Úkol ladění svazku zastaven pře jeho dokončením: název svazku=" -"%(volume_name)s, stav úkoolu=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"Typ %(type_id)s již je přidružen k jiné specifikaci qos: %(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "Změna typu přístupu se nevztahuje k veřejnému typu svazku." - -msgid "Type cannot be converted into NaElement." -msgstr "Typ nemůže být převeden na NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "Chyba typu: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUID %s jsou zároveň v seznamu přidání i odstranění svazků." - -msgid "Unable to access the backend storage via file handle." -msgstr "" -"Nelze získat přístup k podpůrné vrstvě úložiště pomocí obslužné rutiny " -"souboru." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "Nelze získat přístup k podpůrné vrstvě úložiště pomocí cesty %(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"Nelze přidat hostitele Cinder do hostitelů aplikací v prostoru %(space)s" - -msgid "Unable to connect or find connection to host" -msgstr "Nelze se připojit nebo nalézt připojení k hostiteli" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Nelze vytvořit skupinu jednotnosti %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "Nelze vytvořit zámek. Podpůrná vrstva koordinátora nebyla spuštěna." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Nelze vytvořit nebo získat výchozí skupinu úložiště pro zásadu FAST: " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Nelze vytvořit klon repliky svazku %s." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "Nelze smazat snímek skupiny jednotnosti %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "Nelze smazat snímek %(id)s, stav: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "Nelze smazat zásadu snímku ve svazku %s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Nelze odpojit svazek. Pro odpojení musí být stav svazku 'in-use' a stav " -"připojení musí být 'attached'." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "Nelze zjistit název snímku v Purity pro snímek %(id)s." - -msgid "Unable to determine system id." -msgstr "Nelze zjistit id systému." - -msgid "Unable to determine system name." -msgstr "Nelze zjistit název systému." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Nelze provést operace správy snímku pomocí Purity REST API verze " -"%(api_version)s, vyžaduje %(required_versions)s." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Nelze rozšířit svazek %s" - -msgid "Unable to fetch connection information from backend." -msgstr "Nelze získat informace o připojení z podpůrné vrstvy." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "Nelze získat informace o připojení z podpůrné vrstvy: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "Nelze najít odkaz na Purity s názvem=%s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Nelze najít skupinu svazku: %(vg_name)s" - -msgid "Unable to find iSCSI mappings." -msgstr "Nelze najít mapování iSCSI." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "Nelze najít soubor klíčů hostitelů ssh: %s" - -msgid "Unable to find system log file!" -msgstr "Nelze najít soubor záznamu systému!" - -#, python-format -msgid "Unable to find volume %s" -msgstr "Nelze najít svazek %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Nelze získat blokové zařízení pro soubor '%s'" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Nelze získat informace o nastavení potřebné k vytvoření svazku: " -"%(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Nelze získat odpovídající záznam pro zásobu" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Nelze získat informace o prostoru %(space)s, prosím ověřte, že cluster běží " -"a je připojen." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Nelze získat seznam IP adres na tomto hostiteli. Zkontrolujte oprávnění a " -"sítě." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "Nelze získat seznam členů domény. Zkontrolujte, že cluster je spuštěn." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Nelze získat seznam prostorů pro vytvoření nového názvu. Ověřte prosím, zda " -"cluster běží." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Nelze získat statistiky s podpůrné vrstvy s názvem: %s" - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Nelze získat cílové koncové body pro hardware s ID %(hardwareIdInstance)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Nelze importovat svazek %(deviceId)s do cinder. Je to zdrojový svazek sezení " -"replikace %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Nelze importovat svazek %(deviceId)s do cinder. Vnější svazek není v zásobě " -"spravované současným hostitelem cinder." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Nelze importovat svazek %(deviceId)s do cinder. Zobrazení svazku je " -"maskováno %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "Nelze načíst certifikační autoritu z %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Nelze načíst certifikát z %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Nelze načíst klíč z %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "Nelze nalézt účet %(account_name)s na zařízení Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "Nelze nalézt SVM které spravuje IP adresu '%s'" - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Nelze spravovat existující svazek. Svazek %(volume_ref)s již je spravován." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Nelze spravovat svazek %s" - -msgid "Unable to map volume" -msgstr "Nelze mapovat svazek" - -msgid "Unable to map volume." -msgstr "Nelze mapovat svazek." - -msgid "Unable to parse attributes." -msgstr "Nelze zpracovat vlastnosti." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "Nelze povýšit repliku na hlavní pro svazek %s. Druhá kopie neexistuje." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Nelze znovu použít hostitele, kterého nespravuje Cinder, pomocí " -"use_chap_auth=True." - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Nelze znovu použít hostitele, pokud má nastaveno neznámé ověřovací údaje " -"CHAP." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Nelze přejmenovat svazek %(existing)s na %(newname)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Nezle přetypovat: Kopie svazku %s již existuje, Přetypování by překročilo " -"omezení 2 kopií." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Nezle přetypovat: Současná činnost vyžaduje kopii svazku, není povoleno, " -"pokud je nový typ replikace. Svazek = %s" - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Nelze vytvořit snímek skupiny jednotnosti %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "Nelze ukončit připojení svazku v podpůrné vrstvě." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Nelze ukončit připojení k ovladači: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Nelze aktualizovat skupinu jednotnosti %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"Nelze ověřit skupinu zavaděče: %(igGroupName)s por zamaskování " -"%(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Nepřijatelné parametry." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "Neočekávaný stav mapování %(status)s pro %(id)s. Vlastnosti: %(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Neočekávaná odpověď rozhraní příkazového řádku: neshoda hlavičky/řádku. " -"Hlavička: %(header)s, řádek: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "Neočekávaný stav mapování %(status)s pro %(id)s. Vlastnosti: %(attr)s." - -msgid "Unexpected response from Nimble API" -msgstr "Neočekávaná odpověď od Nimble API" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Neočekávaná odpověď od Tegile IntelliFlash API" - -msgid "Unexpected status code" -msgstr "Neočekávaný kód stavu" - -msgid "Unknown Gluster exception" -msgstr "Neznámá výjimka Gluster" - -msgid "Unknown NFS exception" -msgstr "Neznámá výjimka NFS" - -msgid "Unknown RemoteFS exception" -msgstr "Neznámá výjimka RemoteFS" - -msgid "Unknown SMBFS exception." -msgstr "Neznámá výjimka SMBFS." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Neznámá výjimka úložiště Virtuozzo" - -msgid "Unknown action" -msgstr "Neznámá činnost" - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Neznámý nebo nepodporovaný příkaz %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Neznámý protokol: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Neznámý zdroj kvóty %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Neznámý směr řazení, musí být 'desc' nebo 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Neznámý směr řazení, musí být buď 'desc' nebo 'asc'." - -msgid "Unmanage volume not implemented." -msgstr "Zrušení správy svazku není zavedeno." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Nerozpoznané klíčové slovo QoS: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Nerozpoznaný formát zálohy: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Nerozpoznaná hodnota read_deleted '%s'" - -msgid "Unsupported Content-Type" -msgstr "Nepodporovaný Content-Type" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "Nepodporovaná verze Data ONTAP. Podporované jsou verze 7.3.1 a vyšší." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Nepodporovaná verze zálohy popisných dat (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Zažádána nepodporovaná verze popisných dat zálohy" - -msgid "Unsupported backup verify driver" -msgstr "Nepodporovaný ovladač ověření zálohy" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Nepodporovaný frimware na rozbočovači %s. Ujistěte se, že na rozbočovači je " -"nainstalována verze 6.4 nebo vyšší." - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Nepodporovaný formát svazku: %s" - -msgid "Update QoS policy error." -msgstr "Chyba při aktualizování zásady QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Operace aktualizace a smazání kvóty může provést pouze správce přímého " -"nadřazeného nebo správce Cloudu." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Operace aktualizace a smazání kvóty mohou být provedeny pouze v projektech " -"ve stejné hierarchii jako projekt do kterého jsou uživatelé zařazeni." - -msgid "Updated At" -msgstr "Aktualizováno" - -msgid "Upload to glance of attached volume is not supported." -msgstr "Nahrávání na glance připojeného svazku není podporováno." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Chyba při použití ALUA k přidružení zavaděče k hostiteli." - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Chyba při použití CHAP k přidružení zavaděče k hostiteli. Zkontrolujte " -"prosím uživatelské jméno a heslo CHAP." - -msgid "User ID" -msgstr "ID uživatele" - -msgid "User does not have admin privileges" -msgstr "Uživatel nemá správcovská oprávnění" - -msgid "User not authorized to perform WebDAV operations." -msgstr "Uživatel nemá oprávnění provádět operace WebDAV." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "Zpětné vrácení V2, svazek není v žádné skupině úložiště." - -msgid "V3 rollback" -msgstr "Zpětné vrácení V3" - -#, python-format -msgid "VV Set %s does not exist." -msgstr "Sada virtuálních svazků %s neexistuje." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Platní spotřebitelé specifikace QoS jsou: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "Platná ovládací umístění jsou: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Ověření připojení svazku selhalo (chyba: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "Hodnota \"%(value)s\" není platná pro volbu nastavení \"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "Hodnota %(param)s pro %(param_string)s není boolean." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Je třeba zadat hodnotu pro 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "Chyba hodnoty: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "Virtuální disk %(name)s není součástí mapování %(src)s -> %(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"Verze %(req_ver)s není API podporována. Minimální verze je %(min_ver)s a " -"maximální %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "Objekt s verzí %s nemůže získat objekt podle id." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "Objekt s verzí %s nepodporuje podmíněné aktualizace." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Virtuální svazek '%s' neexistuje v poli." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Úkol kopírování svazku do cíle %s selhal." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Svazek %(deviceID)s nenalezen." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Svazek %(name)s nebyl nalezen v poli. Nelze zjistit zda jsou svazky " -"namapovány." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "Svazek %(name)s byl vytvořen ve VNX, ale je ve stavu %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Svazek %(vol)s nemohl být vytvořen v zásobě %(pool)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"Stav svazku %(vol_id)s musí být dostupný pro aktualizaci příznaku pouze pro " -"čtení, ale současný stav: %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"Stav svazku %(vol_id)s musí být dostupný, nebo chybný, ale současný stav je: " -"%(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "Svazek %(volume_id)s nemohl být nalezen." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"Svazek %(volume_id)s nemá žádná popisná data správy mající klíč " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"Svazek %(volume_id)s je v současnosti mapován k nepodporované skupině " -"hostitele %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "Svazek %(volume_id)s není v současnosti mapován k hostiteli %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "Svazek %(volume_id)s je stále připojen, nejdříve odpojte svazek." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Chyba replikace svazku %(volume_id)s: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Svazek %(volume_name)s je zaneprázdněn." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Svazek %s nemohl být vytvořen ze zdrojového svazku." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "Svazek %s nemohl být vytvořen ve sdílení." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Svazek %s nemohl být vytvořen." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Svazek %s nemá zadáno umístění poskytovatele, je přeskočen." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Svazek %s neexistuje v poli." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "Svazek %s již je spravován OpenStack." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"Svazek %s je online. Pro správu pomocí OpenStack ho nastavte na offline." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Svazek %s nesmí být součástí skupiny jednotnosti." - -#, python-format -msgid "Volume %s not found." -msgstr "Svazek %s nenalezen." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Svazek %s: Chyba při pokusu o rozšíření svazku" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Svazek (%s) již existuje v poli" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Svazek (%s) již existuje v poli." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Skupina svazku %s neexistuje" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Typ svazku %(id)s již existuje." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"Smazání typu svazku %(volume_type_id)s není povoleno, když existují svazky s " -"tímto typem." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " -"%(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "ID typu svazku nesmí být None." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Svazek [%(cb_vol)s] nebyl nalezen v úložišti CloudByte odpovídající svazku " -"Openstack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "Svazek [%s] nebyl nalezen v úložišti CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "Připojení svazku nebylo nalezen ve filtru: %(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "Nastavení podpůrné vrstvy svazku je neplatné: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Svazek s tímto názvem již existuje" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "Svazek nemůže být obnoven protože obsahuje snímky." - -msgid "Volume create failed while extracting volume ref." -msgstr "Vytvoření svazku selhalo při extrahování odkazu svazku." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Cesta k souboru zařízení svazku %s neexistuje." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Zařízení svazku nenalezeno na %(device)s." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Ovladač svazku %s není zaveden." - -msgid "Volume driver not ready." -msgstr "Ovladač svazku není připraven." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Ovladač svazku nahlásil chybu: %(message)s" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "Svazek je připojen k serveru. (%s)" - -msgid "Volume is in-use." -msgstr "Svazek se používá." - -msgid "Volume is not available." -msgstr "Svazek není dostupný." - -msgid "Volume is not local to this node" -msgstr "Svazek není pro tento uzel místním" - -msgid "Volume is not local to this node." -msgstr "Svazek není pro tento uzel místním." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Zažádáno o zálohu popisných dat svazku, ale tento ovladač tuto funkci zatím " -"nepodporuje." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Přesunutí svazku selhalo: %(reason)s" - -msgid "Volume must be available" -msgstr "Svazek musí být dostupný" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "Svazek musí být ve stejné zóně dostupnosti jako snímek" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "Svazek musí být ve stejné zóně dostupnosti jako zdrojový svazek" - -msgid "Volume must not be replicated." -msgstr "Svazek nesmí být replikován." - -msgid "Volume must not have snapshots." -msgstr "Svazek nesmí mít snímky." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Svazek není nalezen v instanci %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "Svazek nebyl nalezen na nastavené podpůrné vrstvě úložiště." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Svazek nebyl nalezen na nastavené podpůrné vrstvě úložiště. Pokud obsah " -"vašeho svazku obsahuje \"/\", je třeba ho přejmenovat a pak se pokus znovu o " -"správu." - -msgid "Volume not found on configured storage pools." -msgstr "Svazek nebyl nalezen v nastavených zásobách úložiště." - -msgid "Volume not found." -msgstr "Svazek nenalezen." - -msgid "Volume not yet assigned to host." -msgstr "Svazek ještě není přidělen k hostiteli." - -msgid "Volume reference must contain source-name element." -msgstr "Odkaz na svazek musí obsahovat prvek source-name." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Replikace svazku %(volume_id)s nemohla být nalezena." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Služba svazku %s nemohla být spuštěna." - -msgid "Volume should have agent-type set as None." -msgstr "Svazek by měl mít typ agenta nastaven na None." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"Velikost svazku %(volume_size)sGB nemůže být menší než minimální velikost " -"disku v obrazu %(min_disk)sGB." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "Velikost svazku '%(size)s' musí být celé číslo a větší než 0" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"Velikost svazku'%(size)s'GB nemůže být menší než původní velikost svazku " -"%(source_size)sGB. Musí být >= původní velikosti svazku." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"Velikost svazku'%(size)s'GB nemůže být menší než velikost snímku " -"%(snap_size)sGB. Musí být <= původní velikosti snímku." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "Velikost svazku se od poslední zálohy zvýšila. Proveďte úplnou zálohu." - -msgid "Volume size must multiple of 1 GB." -msgstr "Velikost svazku musí být násobkem 1GB." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"Stav svazku musí být pro vytvoření snímku \"available\" nebo \"in-use\". " -"(nyní je %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "Stav svazku musí být \"available\" nebo \"in-use\"." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "Stav svazku musí být pro rezervaci %s." - -msgid "Volume status must be 'available'." -msgstr "Stav svazku musí být 'available'." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "Mapování svazku ke skupině zavaděče již existuje" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"Zálohovaný svazek musí být dostupný nebo používaný, ale jeho stav je nyní " -"\"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "Obnovovaná záloha musí být dostupná" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "Typ svazku ID '%s' je neplatné." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"Přístup k typu svazku pro kombinaci %(volume_type_id)s / %(project_id)s již " -"existuje." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"Přístup k typu svazku nenalezen pro kombinaci %(volume_type_id)s / " -"%(project_id)s." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "Šifrování typu svazku pro typ %(type_id)s již existuje." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "Šifrování typu svazku pro typ %(type_id)s neexistuje." - -msgid "Volume type name can not be empty." -msgstr "Název typu svazku nemůže být prázdný." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Svazek: %(volumeName)s není zřetězený svazek. Rozšíření lze provádět pouze " -"na zřetězeném svazku. Ukončování..." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Nastavení VzStorage 'vzstorage_used_ratio' je neplatné. Musí být > 0 a <= " -"1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Soubor s nastavením VzStorage %(config)s nebyl nalezen." - -msgid "Wait replica complete timeout." -msgstr "Vypršel časový limit čekání na dokončení replikace." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"Čekání na připojení uzlů ke clusteru. Ujistěte se, že všichni daemoni sheep " -"jsou spuštěni." - -msgid "X-IO Volume Driver exception!" -msgstr "Výjimka ovladače svazku X-IO!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO není správně nastaveno, nenalezeny žádné portály iscsi" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO nebyl zaveden správně, žádné clustery nenalezeny" - -msgid "You must implement __call__" -msgstr "Musíte zavést __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Před použitím ovladačů 3PAR musíte nainstalovat hpe3parclient. Spusťte \"pip " -"install python-3parclient\" pro instalaci klienta." - -msgid "You must supply an array in your EMC configuration file." -msgstr "Ve vašem souboru s nastavením musíte zadat pole EMC." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Vaše původní velikost: %(originalVolumeSize)s GB je větší než: %(newSize)s " -"GB. Podporováno je pouze rozšíření. Uknčování..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "Chyba dělení nulou: %s" - -msgid "Zone" -msgstr "Zóna" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Zásada zónování: %s nebylo rozpoznáno" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"Vytvoření a kopírování dat virtuálního disku: Nelze získat vlastnosti " -"virtuálního disku %s." - -msgid "_create_host failed to return the host name." -msgstr "Vytvoření hostitele nevrátilo název hostitele." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"Vytvoření hostitele: Nelze převést název hostitele. Název není unicode nebo " -"řetězec." - -msgid "_create_host: No connector ports." -msgstr "Vytvoření hostitele: Žádné porty pro připojení." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Vytvoření virtuálního disku %(name)s - ve výstupu příkazového řádku nebyla " -"nalezena zpráva o úspěchu.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"Získání slovníku hlavičky: hlavičky vlastnosti a jejich hodnoty se " -"neshodují.\n" -"Hlavičky: %(header)s\n" -"Hodnoty: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "Získání hostitele od konektoru nevrátilo název jeho hostitele." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"Získání vlastností mapy virtuálního disku: Nelze získat informace o " -"připojení FC pro připojení svazek-hostitel. Je hostitel správně nastaven pro " -"připojení FC?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"Získání vlastností mapy virtuálního disku: Ve skupině vstupu/výstupu %(gid)s " -"nebyl nalezen žádný uzel pro svazek %(vol)s." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "Aktualizace statistik svazku: Nelze získat data zásoby úložiště." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"Přidání kopie virtuálního disku selhalo: Kopie svazku %s již existuje. " -"Přidání další kopie by překročilo omezení 2 kopií." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"Přidání kopie virtuálního disku bylo spuštěno bez kopie v očekávané zásobě." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants musí být boolean, obdrženo %s" - -msgid "already created" -msgstr "již vytvořeno" - -msgid "already_created" -msgstr "již vytvořeno" - -msgid "attach snapshot from remote node" -msgstr "připojit snímek ke vzdálenému uzlu" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "vlastnost %s nelze líně načíst" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"záloha: %(vol_id)s nemohl vytvořit pevný odkaz na zařízení v %(vpath)s do " -"%(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"záloha: %(vol_id)s nemohla získat oznámení o úspěšném dokončení zálohy od " -"serveru.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"záloha: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům na " -"%(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"záloha: %(vol_id)s nemohla spustit dsmc na %(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "záloha: %(vol_id)s selhala. %(path)s není soubor." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"záloha: %(vol_id)s selhala. %(path)s odkazuej na nečekaný typ souboru. " -"Podporován blok, nebo normální soubory, současný režim souboru je " -"%(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"záloha: %(vol_id)s selhala. Nelze získat skutečnou cestu ke svazku na " -"%(path)s." - -msgid "being attached by different mode" -msgstr "je připojen v jiném režimu" - -#, python-format -msgid "call failed: %r" -msgstr "Volání selhalo: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "Volání selhalo: Zbytečné argumenty" - -msgid "call failed: PROC_UNAVAIL" -msgstr "Volání selhalo: Proces je nedostupný" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "Volání selhalo: Neshoda programu: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "Volání selhalo: Program je nedostupný" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "nelze najít lun-map, ig:%(ig)s svazek:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "nelze najít svazek pro rozšíření" - -msgid "can't handle both name and index in req" -msgstr "v žádosti nelze zpracovat jak název tak index najednou" - -msgid "cannot understand JSON" -msgstr "JSON nelze porozumět" - -#, python-format -msgid "cg-%s" -msgstr "skupinajednotnosti-%s" - -msgid "cgsnapshot assigned" -msgstr "snímek skupiny jednotnosti přidělen" - -msgid "cgsnapshot changed" -msgstr "snímek skupiny jednotnosti změněn" - -msgid "cgsnapshots assigned" -msgstr "snímky skupiny jednotnosti přiděleny" - -msgid "cgsnapshots changed" -msgstr "snímky skupiny jednotnosti změněny" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"Kontrola chyby v nastavení: Heslo nebo soukromý klíč SSH jsou vyžadovány pro " -"ověření: nastavte volbu san_password nebo san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "Kontrola chyby v nastavení: Nelze zjistit id systému." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "Kontrola chyby v nastavení: Nelze zjistit název systému." - -msgid "check_hypermetro_exist error." -msgstr "Chyba při kontrole existence hypermetra." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "hloubka klonování překračuje omezení %s" - -msgid "consistencygroup assigned" -msgstr "skupina jednotnosti přidělena" - -msgid "consistencygroup changed" -msgstr "skupina jednotnosti změněna" - -msgid "control_location must be defined" -msgstr "ovládací umístění musí být zadáno" - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "Vytvoření klonovaného svazku: Velikost zdroje a cíle se liší." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "" -"Vytvoření kopie: Zdrojový virtuální disk %(src)s (%(src_id)s) neexistuje." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "Vytvoření kopie: Zdrojový virtuální disk %(src)s neexistuje." - -msgid "create_host: Host name is not unicode or string." -msgstr "Vytvoření hostitele: Název není unicode nebo řetězec." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "Vytvoření hostitele: Nepředány žádné zavaděče nebo wwpns." - -msgid "create_hypermetro_pair error." -msgstr "Chyba při vytváření páru hypermetro." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"Vytvoření snímku: Stav svazku musí být pro vytvoření snímku \"available\" " -"nebo \"in-use\". Neplatný stav je %s." - -msgid "create_snapshot: get source volume failed." -msgstr "Vytvoření snímku: Získání zdrojového svazku selhalo." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "Vytváření svazku ze snímku: Svazek %(name)s neexistuje." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"Vytvoření svazku ze snímku: Pro vytvoření svazku musí být stav snímku " -"\"dostupný\". Neplatný stav je: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"Vytvoření svazku ze snímku: Velikost svazku se liší od svazku ze snímku." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"smazání: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům se " -"standardním výstupem: %(out)s.\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"smazání: %(vol_id)s nemohla spustit dsmc pomocí standardního výstupu: " -"%(out)s.\n" -"chybový výstup: %(err)s." - -msgid "delete_hypermetro error." -msgstr "Chyba při mazání hypermetra." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "Mazání iniciátora: %s ACL nenalezeno. Pokračování." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "mazání snímku %(snapshot_name)s který má nezávislé svazky" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "mazání svazku %(volume_name)s který má snímek" - -msgid "detach snapshot from remote node" -msgstr "odpojit snímek ze vzdáleného uzle" - -msgid "do_setup: No configured nodes." -msgstr "Zavedení: Nenastaveny žádné uzly." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"chyba při zápisu objektu do swift, MD5 objektu ve swift %(etag)s se liší od " -"MD5 objektu odeslaného do swift %(md5)s" - -msgid "failed to create new_volume on destination host" -msgstr "nelze vytvořit nový svazek na cílovém hostiteli" - -msgid "fake" -msgstr "falešný" - -#, python-format -msgid "file already exists at %s" -msgstr "soubor již existuje v %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno nepodporuje SheepdogIOWrapper" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() není podporován RBD()" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled je nastaven na False, neumožňující mapování " -"více hostitelů. CMMVC6071E, mapování virtuálního disku na hostitele nebylo " -"vytvořeno protože disk k hostiteli již je namapován." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() není podporován touto verzí librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s zálohováno: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s zálohováno:%(backing_file)s" - -msgid "force delete" -msgstr "vynutit smazání" - -msgid "get_hyper_domain_id error." -msgstr "chyb při získávání id domény hypermetra." - -msgid "get_hypermetro_by_id error." -msgstr "Chyba při získávání hypermetra podle id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"Získání parametrů iSCSI: Nelze získat cílovou IP adresu pro zavaděč %(ini)s, " -"prosím zkontrolujte soubor s nastavením." - -msgid "glance_metadata changed" -msgstr "popisná data glance změněna" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode je nastaven na kopírování při zápisu, ale %(vol)s a " -"%(img)s mají odlišné systémy souborů." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode je nastaven na kopírování při zápisu, ale %(vol)s a " -"%(img)s mají odlišné sady souborů." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"skupina hgst %(grp)s a uživatel hgst %(usr)s musí být namapovány k platným " -"živatelům/skupinám v cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "síť hgst %(net)s zadaná v cinder.conf nebyla nalezena v clusteru" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"redundance hgst musí být v cinder.conf nastavena na 0 (pokud nechcete HA) " -"nebo 1 (pro HA)." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "" -"režim prostoru hgst musí být v cinder.conf zadán osmičkové soustavě/" -"celočíselně" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "server úložiště hgst %(svr)s nemá formát :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "V cinder.conf musí být určeny servery úložiště hgst" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Služba http mohla být uprostřed operace náhle ukončena, nebo převedena do " -"stavu údržby." - -msgid "id cannot be None" -msgstr "id nemůže být None" - -#, python-format -msgid "image %s not found" -msgstr "Obraz %s nebyl nalezen" - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "Zavedení spojení: Nelze získat vlastnosti svazku %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "Zavedení spojení: Svazku %s chybí vlastnost." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"Zavedení spojení: Ve skupině vstupu/výstupu %(gid)s nebyl nalezen žádný uzel " -"pro svazek %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "Zavedení připojení: Virtuální disk %s není určen." - -#, python-format -msgid "invalid user '%s'" -msgstr "Neplatný uživatel '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "portál iscsi %s nebyl nalezen" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"IP adresa iscsi musí být při použití protokolu 'iSCSI' uvedena v souboru s " -"nastavením." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "chyba správce klíčů: %(reason)s" - -msgid "limit param must be an integer" -msgstr "parametr limit musí být celé číslo" - -msgid "limit param must be positive" -msgstr "parametr limit musí být kladný" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing vyžaduje klíč 'name' pro identifikaci existujícího svazku." - -#, python-format -msgid "marker [%s] not found" -msgstr "značka [%s] nenalezena" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp chybí uvozovky %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "zásady přesunu musí být 'on-demand' nebo 'never', předáno: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs selhalo u svazku %(vol)s, chybová zpráva byla: %(err)s." - -msgid "mock" -msgstr "nepravý" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs není nainstalováno" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage nalezl mnoho zdrojů s názvem %s" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "nalezeno mnoho zdrojů mající ID snímku %s" - -msgid "name cannot be None" -msgstr "název nemůže být Žádný" - -#, python-format -msgid "no REPLY but %r" -msgstr "Žádná odpověď ale %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "v drbdmanage nenalezen žádný snímek s id %s" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "více než jeden snímek s id %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "id %s nemá právě jeden svazek" - -#, python-format -msgid "obj missing quotes %s" -msgstr "objektu chybí uvozovky %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled není vypnuto." - -msgid "progress must be an integer percentage" -msgstr "postup musí být procento vyjádřené celým číslem" - -msgid "provider must be defined" -msgstr "poskytovatel musí být zadán" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"Ovladač tohoto svazku vyžaduje qemu-img %(minimum_version)s nebo novější. " -"Současná verze qemu-img: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img není nainstalováno a obraz je typu %s. Pokud qemu-img není " -"nainstalován, lze použít pouze obrazy s typem RAW." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img není nainstalováno a formát disku není zadán. Pokud qemu-img není " -"nainstalován, lze použít pouze obrazy s typem RAW." - -msgid "rados and rbd python libraries not found" -msgstr "Python knihovny rados a rbd nebyly nalezeny" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted může být buď 'no', 'yes' nebo 'only', ne %r" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"obnova: %(vol_id)s nemohla spustit dsmc kvůli neplatným argumentům na " -"%(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"obnova: %(vol_id)s nemohla spustit dsmc na %(bpath)s.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"obnova: %(vol_id)s selhala.\n" -"standardní výstup: %(out)s\n" -"chybový výstup: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"obnovení zálohy zrušeno, současný seznam objektů neodpovídá seznamu " -"uloženého v popisných datech." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb chybí člen %s: Možná budete potřebovat novější python-rtslib-fb." - -msgid "san_ip is not set." -msgstr "san_ip není nastaveno." - -msgid "san_ip must be set" -msgstr "san_ip musí být nastaveno" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login a /nebo san_password není v cinder.conf nastaveno pro ovladač " -"Datera. Nastavte tyto údaje a spusťte znovu službu cinder-volume." - -msgid "serve() can only be called once" -msgstr "serve() může být voláno pouze jednou" - -#, python-format -msgid "snapshot-%s" -msgstr "snímek-%s" - -msgid "snapshots assigned" -msgstr "snímky přiděleny" - -msgid "snapshots changed" -msgstr "snímek změněn" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "id zdrojového svazku:%s není replikováno" - -msgid "status must be available" -msgstr "stav musí být dostupný" - -msgid "stop_hypermetro error." -msgstr "Chyba při zastavování hypermetra." - -msgid "sync_hypermetro error." -msgstr "Chyba při synchronizaci hypermetra." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli není nainstalováno a nelze vytvořit výchozí adresář " -"(%(default_path)s): %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "Ukončení spojení: Nelze získat název hostitele z konektoru." - -msgid "timeout creating new_volume on destination host" -msgstr "při vytváření nového svazku na cílovém hostiteli vypršel časový limit" - -msgid "too many body keys" -msgstr "příliš mnoho klíčů těla" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "odpojení: %s: není připojeno" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "odpojení: %s: cíl je zaneprázdněn" - -msgid "umount: : some other error" -msgstr "odpojení: : nějaká jiná chyba" - -msgid "umount: : target is busy" -msgstr "odpojení: : zařízení je zaneprázdněno" - -#, python-format -msgid "unrecognized argument %s" -msgstr "nerozpoznaný argument %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "nepodporovaný kompresní algoritmus: %s" - -msgid "valid iqn needed for show_target" -msgstr "Pro zobrazení cíle je třeba platné iqn" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "virtuální disk %s není určen." - -msgid "vmemclient python library not found" -msgstr "Python knihovna vmemclient nebyla nalezena" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "svazek %s není nalezen v drbdmanage" - -msgid "volume assigned" -msgstr "svazek přidělen" - -msgid "volume changed" -msgstr "svazek změněn" - -msgid "volume is already attached" -msgstr "svazek již je připojen" - -msgid "volume is not local to this node" -msgstr "svazek není pro tento uzel místním" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"velikost svazku %(volume_size)d je příliš malá pro obnovení zálohy o " -"velikosti %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "velikost svazku %d je neplatná." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "při vytváření svazku ve skupině jednotnosti musí být zadán jeho typ." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id nemůže být None" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"pro vytvoření skupiny jednotnosti %(name)s musí být zadány typy svazků." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "pro vytvoření skupiny jednotnosti %s musí být zadány typy svazků." - -msgid "volumes assigned" -msgstr "svazky přiděleny" - -msgid "volumes changed" -msgstr "svazky změněny" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "Čekání na podmínku: %s vypršel časový limit." diff --git a/cinder/locale/de/LC_MESSAGES/cinder.po b/cinder/locale/de/LC_MESSAGES/cinder.po deleted file mode 100644 index d18e19b17..000000000 --- a/cinder/locale/de/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,10172 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# Alec Hans , 2013 -# Ettore Atalan , 2014 -# FIRST AUTHOR , 2011 -# Andreas Jaeger , 2016. #zanata -# Monika Wolf , 2016. #zanata -# Robert Simai , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev243\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-14 23:32+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-13 10:55+0000\n" -"Last-Translator: Robert Simai \n" -"Language: de\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: German\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack-Cinder-Version: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "aber die Größe ist jetzt %d" - -#, python-format -msgid " but size is now %d." -msgstr "aber die Größe ist jetzt %d." - -msgid " or " -msgstr "oder" - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s ist nicht festgelegt." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing kann einen Datenträger, der mit Hosts verbunden " -"ist, nicht verwalten. Trennen Sie vor dem Importieren die Verbindung dieses " -"Datenträgers zu vorhandenen Hosts." - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"Ergebnis: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: Berechtigung verweigert." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: Fehler mit nicht erwarteter CLI-Ausgabe.\n" -" Befehl: %(cmd)s\n" -" Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Statuscode: %(_status)s\n" -"Nachrichtentext: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: Erstellen von NetworkPortal: Stellen Sie sicher, dass Port " -"%(port)d unter IP %(ip)s nicht durch einen anderen Dienst verwendet wird." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: Sicherung %(bck_id)s, Datenträger %(vol_id)s fehlgeschlagen. " -"Sicherungsobjekt hat unerwarteten Modus. Abbild- oder Dateisicherungen " -"werden unterstützt, tatsächlicher Modus ist %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"%(service)s-Dienst befindet sich nicht %(status)s auf der Speicher-" -"Appliance: %(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s muss <= %(max_value)d sein." - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s muss >= %(min_value)d sein." - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"Wert %(worker_name)s von %(workers)d ist ungültig. Der Wert muss größer als " -"0 sein." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "%s \"data\" ist nicht im Ergebnis enthalten." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"Auf %s kann nicht zugegriffen werden. Überprüfen Sie, ob GPFS aktiv ist und " -"das Dateisystem eingehängt ist." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"Größe von %s kann nicht mithilfe der Klonoperation geändert werden, da keine " -"Blocks enthalten sind." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"Größe von %s kann nicht mithilfe der Klonoperation geändert werden, da auf " -"einem komprimierten Datenträger gehostet." - -#, python-format -msgid "%s configuration option is not set." -msgstr "Konfigurationsoption %s ist nicht festgelegt." - -#, python-format -msgid "%s does not exist." -msgstr "%s ist nicht vorhanden." - -#, python-format -msgid "%s is not a directory." -msgstr "%s ist kein Verzeichnis." - -#, python-format -msgid "%s is not installed" -msgstr "%s ist nicht installiert." - -#, python-format -msgid "%s is not installed." -msgstr "%s ist nicht installiert." - -#, python-format -msgid "%s is not set" -msgstr "%s ist nicht festgelegt" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s ist nicht festgelegt, ist jedoch für ein gültiges Replikationsgerät " -"erforderlich." - -#, python-format -msgid "%s is not set." -msgstr "%s ist nicht festgelegt." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s muss ein gültiges raw- oder qcow2-Abbild sein." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s muss ein absoluter Pfad sein." - -#, python-format -msgid "%s must be an integer." -msgstr "%s muss eine Ganzzahl sein." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s in cinder.conf nicht festgelegt." - -#, python-format -msgid "%s not set." -msgstr "%s nicht festgelegt." - -#, python-format -msgid "'%(key)s = %(value)s'" -msgstr "'%(key)s = %(value)s'" - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' ist für flashsystem_connection_protocol in Konfigurationsdatei " -"ungültig. Gültige Werte sind %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "'active' muss vorhanden sein, wenn snap_info geschrieben wird." - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' muss angegeben werden." - -msgid "'qemu-img info' parsing failed." -msgstr "Auswertung von 'qemu-img info' fehlgeschlagen." - -msgid "'status' must be specified." -msgstr "'status' muss angegeben werden." - -msgid "'volume_id' must be specified" -msgstr "'volume_id' muss angegeben werden." - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Befehl: %(cmd)s) (Rückgabecode: %(exit_code)s) (Standardausgabe: " -"%(stdout)s) (Standard-Fehlerausgabe: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "Eine LUN (HLUN) wurde nicht gefunden. (LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "" -"Es wurde eine gleichzeitige, möglicherweise widersprüchliche, Anforderung " -"gestellt." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Es wurde keine freie LUN (HLUN) gefunden. Fügen Sie eine andere Hostgruppe " -"hinzu. (Logische Einheit: '%(ldev)s')" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Eine Hostgruppe konnte nicht hinzugefügt werden. (Port: %(port)s, Name: " -"%(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Eine Hostgruppe konnte nicht gelöscht werden. (Port: %(port)s, GID: %(gid)s, " -"Name: '%(name)s')" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Eine Hostgruppe ist ungültig. (Hostgruppe: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "" -"Ein Paar kann nicht gelöscht werden. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Es konnte kein Paar erstellt werden. Die maximale Paaranzahl wurde " -"überschritten. (Kopiermethode: %(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Ein Parameter ist ungültig. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Ein Parameterwert ist ungültig. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Ein Pool konnte nicht gefunden werden. (Pool-ID: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Ein Schattenkopiestatus ist ungültig. (Status: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "Für ein Failover muss ein gültiges sekundäres Ziel angegeben werden." - -msgid "A volume ID or share was not specified." -msgstr "Es wurde keine Datenträger-ID oder Freigabe angegeben." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Ein Datenträgerstatus ist ungültig. (Status: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s fehlgeschlagen mit Fehlerzeichenkette %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"API-Versionszeichenkette %(version)s ist im falschen Format. Erforderliches " -"Format: MajorNum.MinorNum." - -msgid "API key is missing for CloudByte driver." -msgstr "API-Schlüssel für CloudByte-Treiber fehlt." - -#, python-format -msgid "API response: %(response)s" -msgstr "API-Antwort: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "API-Antwort: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "Die API-Version %(version)s wird für diese Methode nicht unterstützt." - -msgid "API version could not be determined." -msgstr "API-Version konnte nicht bestimmt werden." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Es wird versucht, untergeordnete Projekte mit einem Kontingent ungleich null " -"zu löschen. Dies sollte nicht ausgeführt werden." - -msgid "Access list not available for public volume types." -msgstr "Zugriffsliste ist für öffentliche Datenträgertypen nicht verfügbar." - -msgid "Activate or deactivate QoS error." -msgstr "Fehler beim Aktivieren oder Inaktivieren von QoS." - -msgid "Activate snapshot error." -msgstr "Fehler beim Aktivieren der Schattenkopie." - -msgid "Add FC port to host error." -msgstr "Fehler beim Hinzufügen des FC-Ports zum Host." - -msgid "Add fc initiator to array error." -msgstr "Fehler beim Hinzufügen des FC-Initiators zum Array." - -msgid "Add initiator to array error." -msgstr "Fehler beim Hinzufügen des Initiators zum Array." - -msgid "Add lun to cache error." -msgstr "Fehler beim Hinzufügen der LUN zum Zwischenspeicher." - -msgid "Add lun to partition error." -msgstr "Fehler beim Hinzufügen der LUN zur Partition." - -msgid "Add mapping view error." -msgstr "Fehler beim Hinzufügen der Zuordnungsansicht." - -msgid "Add new host error." -msgstr "Fehler beim Hinzufügen des neuen Hosts." - -msgid "Add port to port group error." -msgstr "Fehler beim Hinzufügen von Port zur Portgruppe." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Keine der angegebenen Speicherpools, die verwaltet werden sollen, sind " -"vorhanden. Überprüfen Sie Ihre Konfiguration. Nicht vorhandene Pools: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"Eine API-Versionsanforderung muss mit einem VersionedMethod-Objekt " -"verglichen werden." - -msgid "An error has occurred during backup operation" -msgstr "Während des Datensicherungsvorgangs ist ein Fehler aufgetreten." - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Fehler während der LUNcopy-Operation. LUNcopy-Name: %(luncopyname)s. LUNcopy-" -"Status: %(luncopystatus)s. LUNcopy-Zustand: %(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Beim Lesen von Datenträger \"%s\" ist ein Fehler aufgetreten." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Beim Schreiben auf Datenträger \"%s\" ist ein Fehler aufgetreten." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "" -"Ein iSCSI-CHAP-Benutzer konnte nicht hinzugefügt werden. (Benutzername: " -"%(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "" -"Ein iSCSI-CHAP-Benutzer konnte nicht gelöscht werden. (Benutzername: " -"%(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Ein iSCSI-Ziel konnte nicht hinzugefügt werden. (Port: %(port)s, Alias: " -"%(alias)s, Ursache: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Ein iSCSI-Ziel konnte nicht gelöscht werden. (Port: %(port)s, Zielnummer: " -"%(tno)s, Alias: %(alias)s)" - -msgid "An unknown error occurred." -msgstr "Ein unbekannter Fehler ist aufgetreten." - -msgid "An unknown exception occurred." -msgstr "Eine unbekannte Ausnahme ist aufgetreten." - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Ein Benutzer mit einem Token, als dessen Bereich ein Unterprojekt festgelegt " -"wurde, darf das Kontingent der zugehörigen übergeordneten Elemente nicht " -"anzeigen." - -msgid "Append port group description error." -msgstr "Fehler beim Anhängen der Portgruppenbeschreibung." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Anwenden der Zonen und Konfigurationen auf den Switch fehlgeschlagen " -"(Fehlercode=%(err_code)s Fehlernachricht=%(err_msg)s)." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "" -"Array ist nicht vorhanden oder ist offline. Aktueller Status des Arrays: %s." - -msgid "Associate host to hostgroup error." -msgstr "Fehler beim Zuordnen des Hosts zur Hostgruppe." - -msgid "Associate host to mapping view error." -msgstr "Fehler beim Zuordnen des Hosts zur Zuordnungsansicht." - -msgid "Associate initiator to host error." -msgstr "Fehler beim Zuordnen des Initiators zum Host." - -msgid "Associate lun to QoS error." -msgstr "Fehler beim Zuordnen der LUN zu QoS." - -msgid "Associate lun to lungroup error." -msgstr "Fehler beim Zuordnen der LUN zur LUN-Gruppe." - -msgid "Associate lungroup to mapping view error." -msgstr "Fehler beim Zuordnen der LUN-Gruppe zur Zuordnungsansicht." - -msgid "Associate portgroup to mapping view error." -msgstr "Fehler beim Zuordnen der Portgruppe zur Zuordnungsansicht." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Es muss mindestens eine gültige iSCSI-IP-Adresse festgelegt werden." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Versuch, %s mit ungültigem Autorisierungsschlüssel zu übertragen." - -#, python-format -msgid "Attribute: %s not found." -msgstr "Attribut %s nicht gefunden." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "" -"Details der Authentifizierungsgruppe [%s] nicht im CloudByte-Speicher " -"gefunden." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"Details des Authentifizierungsbenutzers nicht im CloudByte-Speicher gefunden." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"Authentifizierung fehlgeschlagen. Überprüfen Sie die Berechtigungsnachweise " -"für den Switch. Fehlercode %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "Verfügbarkeitszone '%(s_az)s' ist ungültig." - -msgid "Available categories:" -msgstr "Verfügbare Kategorien:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Backend-QoS-Spezifikationen werden für diese Speicherfamilie und ONTAP-" -"Version nicht unterstützt." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Backend ist nicht vorhanden (%(backend)s)." - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Backendberichte: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Backendberichte: Element ist bereits vorhanden." - -msgid "Backend reports: item not found" -msgstr "Backendberichte: Element nicht gefunden." - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "Zeitlimittreffer für Wiederholungen bei Backenddienst: %(timeout)s s" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Backend-Speicher hat Fibre Channel-Ziel nicht konfiguriert. " - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"Für die Sicherung eines Datenträgers mit dem Status 'in-use' muss das force-" -"Flag verwendet werden." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "Die Sicherung %(backup_id)s wurde nicht gefunden." - -msgid "Backup RBD operation failed" -msgstr "RBD-Sicherungsoperation ist fehlgeschlagen." - -msgid "Backup already exists in database." -msgstr "Die Sicherungskopie ist bereits in Datenbank vorhanden." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Sicherungstreiber meldete einen Fehler: %(message)s" - -msgid "Backup id required" -msgstr "Datensicherungs-ID erforderlich" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"Die Sicherung von GlusterFS-Datenträgern mit Schattenkopien wird nicht " -"unterstützt." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"Sicherung wird nur für SOFS-Datenträger ohne Sicherungsdatei unterstützt." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"Die Sicherung wird nur für unformatierte GlusterFS-Datenträger unterstützt." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "Sicherung wird nur für unformatierte SOFS-Datenträger unterstützt." - -msgid "Backup operation of an encrypted volume failed." -msgstr "" -"Die Sicherungsoperation eines verschlüsselten Datenträgers ist " -"fehlgeschlagen." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Der Sicherungsdienst %(configured_service)s unterstützt keine Überprüfung. " -"Die Sicherungs-ID %(id)s wird nicht überprüft. Die Überprüfung wird " -"übersprungen." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Der Sicherungsdienst %(service)s unterstützt keine Überprüfung. Die " -"Sicherungs-ID %(id)s wird nicht überprüft. Zurücksetzung wird übersprungen." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"Die Sicherung sollte nur eine Schattenkopie enthalten, stattdessen enthält " -"sie %s." - -msgid "Backup status must be available" -msgstr "Sicherungsstatus muss 'available' sein." - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Sicherungsstatus muss 'verfügbar' sein und nicht '%s'." - -msgid "Backup status must be available or error" -msgstr "Sicherungsstatus muss 'available' oder 'error' sein." - -msgid "Backup to be restored has invalid size" -msgstr "Wiederherzustellende Sicherung hat ungültige Größe." - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Falsche Statuszeile zurückgegeben: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Falsche(r) Schlüssel in Kontingentsatz: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Fehlerhafte oder nicht erwartete Antwort von Backend-API des Datenträgers: " -"%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "" -"Fehlerhaftes Projektformat: Projekt weist nicht das richtige Format (%s) auf." - -msgid "Bad response from Datera API" -msgstr "Ungültige Antwort von Datera-API" - -msgid "Bad response from SolidFire API" -msgstr "Ungültige Antwort von SolidFire-API" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Fehlerhafte Antwort vom XMS, %s" - -msgid "Binary" -msgstr "Binärdatei" - -msgid "Blank components" -msgstr "Leere Komponenten" - -msgid "Blockbridge api host not configured" -msgstr "Blockbridge-API-Host nicht konfiguriert" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge wurde mit dem ungültigen Authentifizierungsschema " -"'%(auth_scheme)s' konfiguriert." - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge-Standardpool ist nicht vorhanden." - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge-Kennwort nicht konfiguriert (erforderlich für " -"Authentifizierungsschema 'password')" - -msgid "Blockbridge pools not configured" -msgstr "Blockbridge-Pools wurden nicht konfiguriert." - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Blockbridge-Token nicht konfiguriert (erforderlich für " -"Authentifizierungsschema 'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge-Benutzer nicht konfiguriert (erforderlich für " -"Authentifizierungsschema 'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "CLI-Fehler beim Brocade Fibre Channel-Zoning: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "HTTP-Fehler beim Brocade Fibre Channel-Zoning: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "Geheimer CHAP-Schlüssel sollte aus 12 bis 16 Bytes bestehen." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Ausgabe bei CLI-Ausnahme:\n" -" Befehl: %(cmd)s\n" -" Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Ausgabe bei CLI-Ausnahme:\n" -" Befehl: %(cmd)s\n" -" Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E Die Zuordnung von virtueller Platte zu Host wurde nicht erstellt, " -"da die virtuelle Platte bereits einem Host zugeordnet ist.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "CONCERTO-Version wird nicht unterstützt." - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) ist im Array nicht vorhanden." - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "" -"Zwischenspeichername ist None. Legen Sie smartcache:cachename im Schlüssel " -"fest." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "" -"Der Zwischenspeicherdatenträger %s hat nicht die erforderlichen " -"Eigenschaften." - -msgid "Call returned a None object" -msgstr "Der Aufruf hat ein 'None'-Objekt zurückgegeben." - -msgid "Can not add FC port to host." -msgstr "FC-Port kann nicht zu Host hinzugefügt werden." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "" -"Zwischenspeicher-ID nach Zwischenspeichername %(name)s wurde nicht gefunden." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Partitions-ID nach Name %(name)s wurde nicht gefunden." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "Poolinfo konnte nicht abgerufen werden. Pool: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "%s kann nicht in eine Ganzzahl umgesetzt werden." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Zugriff auf 'scality_sofs_config' nicht möglich: %s" - -msgid "Can't decode backup record." -msgstr "Sicherungsdatensatz kann nicht decodiert werden." - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "" -"Replikationsdatenträger kann nicht erweitert werden, Datenträger: %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"LUN kann nicht im Array gefunden werden. Prüfen Sie 'source-name' und " -"'source-id'." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "Cachename wurde im Array nicht gefunden. Cachename: %(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"LUN-Info wurde nicht im Array gefunden. Datenträger: %(id)s, LUN-Name: " -"%(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"Partitionsname wurde im Array nicht gefunden. Partitionsname: %(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "Der Dienst wurde nicht gefunden: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"Schattenkopie kann nicht im Array gefunden werden. Prüfen Sie 'source-name' " -"und 'source-id'." - -msgid "Can't find the same host id from arrays." -msgstr "Dieselbe Host-ID wurde nicht in den Arrays gefunden." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "" -"Datenträger-ID kann nicht aus der Schattenkopie abgerufen werden. " -"Schattenkopie: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Die Datenträger-ID kann nicht abgerufen werden. Datenträgername: %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"LUN %(lun_id)s kann nicht in Cinder importiert werden. Der LUN-Typ stimmt " -"nicht überein." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem " -"HyperMetroPair vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer LUN-" -"Kopieraufgabe vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer LUN-" -"Gruppe vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem LUN-" -"Spiegel vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einem " -"SplitMirror vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer " -"Migrationsaufgabe vorhanden." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"LUN %s kann nicht in Cinder importiert werden. Sie ist bereits in einer " -"fernen Replikationsaufgabe vorhanden." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"LUN %s kan nicht in Cinder importiert werden. Der LUN-Status ist nicht " -"normal." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Schattenkopie %s kann nicht in Cinder importiert werden. Die Schattenkopie " -"gehört nicht zum Datenträger." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Schattenkopie %s kann nicht in Cinder importiert werden. Die Schattenkopie " -"wurde dem Initiator verfügbar gemacht." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Schattenkopie %s kann nicht in Cinder importiert werden. Status der " -"Schattenkopie ist nicht normal oder der Aktivitätsstatus ist nicht 'online'. " - -msgid "Can't parse backup record." -msgstr "Sicherungsdatensatz kann nicht analysiert werden." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da er keinen Datenträgertyp hat." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da er bereits in der Konsistenzgruppe %(orig_group)s " -"enthalten ist." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da der Datenträger nicht gefunden wurde." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da der Datenträger nicht vorhanden ist." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da der Datenträger einen ungültigen Status hat: " -"%(status)s. Gültige Status sind: %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht zur Konsistenzgruppe %(group_id)s " -"hinzugefügt werden, da der Datenträgertyp %(volume_type)s von der Gruppe " -"nicht unterstützt wird." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Der bereits angehängte Datenträger %s kann nicht angehängt werden. Das " -"mehrfache Anhängen ist über die Konfigurationsoption " -"'netapp_enable_multiattach' inaktiviert." - -msgid "Cannot change VF context in the session." -msgstr "VF-Kontext kann nicht in der Sitzung geändert werden." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"VF-Kontext kann nicht geändert werden. Die angegebene VF ist nicht in der " -"Liste der verwaltbaren VFs %(vf_list)s enthalten." - -msgid "Cannot connect to ECOM server." -msgstr "Verbindung zu ECOM-Server kann nicht hergestellt werden." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Konsistenzgruppe %(group)s kann nicht erstellt werden, da Schattenkopie " -"%(snap)s keinen gültigen Status hat. Gültige Status sind: %(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Konsistenzgruppe %(group)s kann nicht erstellt werden, da Quellendatenträger " -"%(source_vol)s keinen gültigen Status hat. Gültige Status sind: %(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Verzeichnis %s kann nicht erstellt werden." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "" -"Verschlüsselungsspezifikationen können nicht erstellt werden. Datenträgertyp " -"ist im Gebrauch." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Festplattenformat des Abbilds kann nicht erstellt werden: %s. Nur das vmdk-" -"Plattenformat wird akzeptiert." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Maskenansicht %(maskingViewName)s kann nicht erstellt werden. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Im ESeries-Array können nicht mehr als %(req)s Datenträger erstellt werden, " -"wenn 'netapp_enable_multiattach' auf true gesetzt wurde." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"Eine Speichergruppe mit dem Namen %(sgGroupName)s kann nicht erstellt oder " -"gefunden werden." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "" -"Datenträger der Größe %s kann nicht erstellt werden: Kein Vielfaches von 8 " -"GB." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"volume_type mit dem Namen %(name)s und der Spezifikation %(extra_specs)s " -"kann nicht erstellt werden." - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "LUN %s kann nicht gelöscht werden, wenn Schattenkopien vorhanden sind." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Zwischenspeicherdatenträger %(cachevol_name)s kann nicht gelöscht werden. Er " -"wurde am %(updated_at)s aktualisiert und enthält zurzeit %(numclones)d " -"Datenträgerinstanzen." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Der Zwischenspeicherdatenträger %(cachevol_name)s kann nicht gelöscht " -"werden. Er wurde am %(updated_at)s aktualisiert und enthält zurzeit " -"%(numclones)s Datenträgerinstanzen." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "" -"Verschlüsselungsspezifikationen können nicht gelöscht werden. Datenträgertyp " -"ist im Gebrauch." - -msgid "Cannot determine storage pool settings." -msgstr "Es konnten keine Speicherpooleinstellungen ermittelt werden." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Ausführen von /sbin/mount.sofs nicht möglich" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "CG-Gruppe %s kann nicht gefunden werden." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "Controllerkonfigurationsdienst für Speichersystem %(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"Replikationsdienst zum Erstellen von Datenträger für Schattenkopie %s wurde " -"nicht gefunden." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"Der Replikationsdienst zum Löschen der Schattenkopie %s wurde nicht gefunden." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Replikationsdienst kann im System %s nicht gefunden werden." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"Der Datenträger %(id)s wurde nicht gefunden. Operation zum Aufheben der " -"Verwaltung. Wird beendet..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "" -"Datenträger %(volumename)s kann nicht gefunden werden. " -"Erweiterungsoperation. Vorgang wird beendet..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "Gerätenummer für Datenträger %(volumeName)s wurde nicht gefunden." - -msgid "Cannot find migration task." -msgstr "Migrationsaufgabe wurde nicht gefunden." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Replikationsdienst kann im System %s nicht gefunden werden." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"Instanz der Quellenkonsistenzgruppe wurde nicht gefunden. " -"consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "mcs_id kann nicht nach Kanal-ID abgerufen werden: %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"Die erforderlichen Informationen zum Pool oder zur Speichereinheit können " -"nicht abgerufen werden." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Speichergruppe %(sgGroupName)s kann für Datenträger %(volumeName)s nicht " -"abgerufen oder erstellt werden." - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "" -"Initiatorgruppe %(igGroupName)s kann nicht abgerufen oder erstellt werden. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Portgruppe %(pgGroupName)s kann nicht abgerufen werden. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Speichergruppe %(sgGroupName)s kann nicht aus Maskenansicht " -"%(maskingViewInstanceName)s abgerufen werden. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Unterstützter Größenbereich kann für %(sps)s nicht abgerufen werden. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Die Standardspeichergruppe für FAST-Richtlinie %(fastPolicyName)s kann nicht " -"abgerufen werden." - -msgid "Cannot get the portgroup from the masking view." -msgstr "Die Portgruppe konnte nicht aus der Maskenansicht abgerufen werden." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "" -"Scality-SoFS kann nicht eingehängt werden. Überprüfen Sie das " -"Systemprotokoll auf Fehler." - -msgid "Cannot ping DRBDmanage backend" -msgstr "DRBDmanage-Backend kann nicht mit Ping überprüft werden." - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Der Datenträger %(id)s kann nicht auf %(host)s angeordnet werden." - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Es können nicht sowohl 'cgsnapshot_id' als auch 'source_cgid' angegeben " -"werden, um die Konsistenzgruppe %(name)s aus der Quelle zu erstellen." - -msgid "Cannot register resource" -msgstr "Registrieren der Ressource nicht möglich." - -msgid "Cannot register resources" -msgstr "Registrieren der Ressourcen nicht möglich." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Der Datenträger %(volume_id)s kann nicht aus der Konsistenzgruppe " -"%(group_id)s entfernt werden, da er sich nicht in der Gruppe befindet." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Datenträger %(volume_id)s kann nicht aus der Konsistenzgruppe %(group_id)s " -"entfernt werden, da der Datenträger einen ungültigen Status hat: %(status)s. " -"Gültige Status sind: %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Typänderung von HPE3PARDriver in %s nicht möglich." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Typänderung von einem 3PAR-Array in einen anderen nicht möglich. " - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Typänderung in CPG in einer anderen Domäne nicht möglich. " - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "Typänderung in snapCPG in einer anderen Domäne nicht möglich. " - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"vgc-cluster-Befehl kann nicht ausgeführt werden. Stellen Sie sicher, dass " -"die Software installiert ist und die Berechtigungen ordnungsgemäß festgelegt " -"sind." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"hitachi_serial_number und hitachi_unit_name können nicht gemeinsam " -"festgelegt werden." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "" -"Schutzdomänenname und Schutzdomänen-ID können nicht gemeinsam angegeben " -"werden." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"Speicherpoolname und Speicherpool-ID können nicht gemeinsam angegeben werden." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Konsistenzgruppe %(group_id)s kann nicht aktualisiert werden, da keine " -"gültigen Werte für name, description, add_volumes oder remove_volumes " -"angegeben wurden." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "" -"Verschlüsselungsspezifikationen können nicht aktualisiert werden. " -"Datenträgertyp ist im Gebrauch." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "volume_type %(id)s kann nicht aktualisiert werden." - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "" -"Das Vorhandensein von object:%(instanceName)s kann nicht geprüft werden." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "CgSnapshot %(cgsnapshot_id)s wurde nicht gefunden." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost ist leer. Es wird keine Konsistenzgruppe erstellt." - -msgid "Change hostlun id error." -msgstr "Fehler beim Ändern der Host-LUN-ID." - -msgid "Change lun priority error." -msgstr "Fehler beim Ändern der LUN-Priorität." - -msgid "Change lun smarttier policy error." -msgstr "Fehler beim Ändern der LUN-smarttier-Richtlinie." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"Durch die Änderung wäre die Nutzung kleiner als 0 für die folgenden " -"Ressourcen: %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Prüfen Sie die Zugriffsberechtigungen für die diesem Treiber zugeordneten " -"freigegebenen ZFS-Verzeichnisse." - -msgid "Check hostgroup associate error." -msgstr "Fehler beim Prüfen der Hostgruppenzuordnung." - -msgid "Check initiator added to array error." -msgstr "Fehler beim Prüfen des Initiators, der dem Array hinzugefügt wurde." - -msgid "Check initiator associated to host error." -msgstr "Fehler beim Prüfen des Initiators, der dem Host zugeordnet wurde." - -msgid "Check lungroup associate error." -msgstr "Fehler beim Prüfen der LUN-Gruppenzuordnung." - -msgid "Check portgroup associate error." -msgstr "Fehler beim Prüfen der Portgruppenzuordnung." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Prüfen Sie den Zustand des http-Diensts. Stellen Sie auch sicher, dass die " -"https-Portnummer der in cinder.conf angegebenen Portnummer entspricht." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"Chunkgröße ist kein Vielfaches der Blockgröße zur Bildung des Hashwerts." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "CLI-Fehler beim der Cisco Fibre Channel-Zoning: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "Klonfunktion ist für %(storageSystem)s nicht lizenziert." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"Der Klontyp '%(clone_type)s' ist ungültig. Gültige Werte: '%(full_clone)s' " -"und '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"Cluster ist nicht formatiert. Sie müssen möglicherweise \"dog cluster format" -"\" ausführen." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Fehler bei Coho Data-Cinder-Treiber: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "Der Coho-RPC-Port ist nicht konfiguriert." - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Befehl %(cmd)s blockierte in der CLI und wurde abgebrochen." - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: %s Zeitlimit." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Compression Enabler ist nicht installiert. Es kann kein komprimierter " -"Datenträger erstellt werden. " - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Compute-Cluster: %(cluster)s nicht gefunden." - -msgid "Condition has no field." -msgstr "Die Bedingung hat kein Feld." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"Die Konfiguration von 'max_over_subscription_ratio' ist ungültig. Muss > 0 " -"sein: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Konfigurationsfehler: dell_sc_ssn ist nicht festgelegt." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Konfigurationsdatei %(configurationFile)s ist nicht vorhanden." - -msgid "Configuration is not found." -msgstr "Die Konfiguration wurde nicht gefunden. " - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Konfigurationswert %s ist nicht festgelegt." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Widersprüchliche QoS-Spezifikationen in Datenträgertyp %s: Wenn QoS-" -"Spezifikation zu Datenträgertyp zugeordnet wird, ist die traditionelle " -"Einstellung \"netapp:qos_policy_group\" in den zusätzlichen Spezifikationen " -"des Datenträgertyps nicht zulässig." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Verbindung mit Glance fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Verbindung mit Swift fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Connector stellt Folgendes nicht bereit: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Connector hat nicht die erforderlichen Informationen: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "Die Konsistenzgruppe ist leer. Es wird kein Cgsnapshot erstellt." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "ConsistencyGroup %(consistencygroup_id)s wurde nicht gefunden." - -msgid "Container" -msgstr "Container" - -msgid "Container size smaller than required file size." -msgstr "Container ist kleiner als die erforderliche Dateigröße." - -msgid "Content type not supported." -msgstr "Inhaltstyp wird nicht unterstützt." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Controllerkonfigurationsdienst wurde in %(storageSystemName)s nicht gefunden." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "Controller-IP '%(host)s' konnte nicht aufgelöst werden: %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "In %(f1)s konvertiert, Format ist nun jedoch %(f2)s." - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "In %(vol_format)s konvertiert, Format ist nun jedoch %(file_format)s." - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "In unformatierten Zustand konvertiert, Format ist nun jedoch %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "In unformatierten Zustand konvertiert, Format ist nun jedoch %s." - -msgid "Coordinator uninitialized." -msgstr "Koordinator wurde nicht initialisiert." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Datenträgerkopieraufgabe fehlgeschlagen: convert_to_base_volume: ID=%(id)s, " -"'Status=%(status)s' wird ignoriert." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"Datenträgerkopieraufgabe fehlgeschlagen: create_cloned_volume ID=%(id)s, " -"Status=%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Metadaten werden von %(src_type)s %(src_id)s nach %(vol_id)s kopiert." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Es konnte nicht ermittelt werden, welcher Keystone-Endpunkt zu verwenden " -"ist. Dies kann entweder im Dienstkatalog oder mit der cinder.conf-" -"Konfigurationsoption 'backup_swift_auth_url' festgelegt werden." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Es konnte nicht ermittelt werden, welcher Swift-Endpunkt zu verwenden ist. " -"Dies kann entweder im Dienstkatalog oder mit der cinder.conf-" -"Konfigurationsoption 'backup_swift_url' festgelegt werden." - -msgid "Could not find DISCO wsdl file." -msgstr "DISCO-WSDL-Datei wurde nicht gefunden." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "GPFS-Cluster-ID wurde nicht gefunden: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "GPFS-Dateisystemgerät wurde nicht gefunden: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Konfiguration wurde nicht im Pfad %(path)s gefunden." - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "iSCSI-Export nicht gefunden für Datenträger %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "iSCSI-Ziel für Datenträger %(volume_id)s wurde nicht gefunden." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"Schlüssel wurde nicht in der Ausgabe des Befehls %(cmd)s gefunden: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Der Parameter %(param)s wurde nicht gefunden." - -#, python-format -msgid "Could not find target %s" -msgstr "Das Ziel %s wurde nicht gefunden." - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "" -"Der übergeordnete Datenträger für die Schattenkopie '%s' im Arrary wurde " -"nicht gefunden." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "" -"Es wurde keine eindeutige Schattenkopie %(snap)s auf dem Datenträger %(vol)s " -"gefunden." - -msgid "Could not get system name." -msgstr "Systemname konnte nicht abgerufen werden." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "paste-App '%(name)s' konnte nicht aus %(path)s geladen werden." - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"Informationen für Schattenkopie %(name)s konnten nicht gelesen werden. Code: " -"%(code)s. Ursache: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "" -"Konfigurationsdatei %(file_path)s konnte nicht wiederhergestellt werden: " -"%(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "" -"Konfiguration konnte nicht in %(file_path)s gespeichert werden: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "Konsistenzgruppenschattenkopie %s konnte nicht gestartet werden." - -#, python-format -msgid "Counter %s not found" -msgstr "Zähler %s nicht gefunden." - -msgid "Create QoS policy error." -msgstr "Fehler beim Erstellen der QoS-Richtlinie." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Erstellung von Sicherung abgebrochen. Sicherungsstatus %(expected_status)s " -"erwartet, tatsächlicher Status ist %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Erstellung von Sicherung abgebrochen. Datenträgerstatus %(expected_status)s " -"erwartet, tatsächlicher Status ist %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Erstellen des Exports für Datenträger fehlgeschlagen." - -msgid "Create hostgroup error." -msgstr "Fehler beim Erstellen der Hostgruppe." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Fehler beim Erstellen von hypermetro. %s." - -msgid "Create lun error." -msgstr "Fehler beim Erstellen der LUN." - -msgid "Create lun migration error." -msgstr "Fehler beim Erstellen der LUN-Migration." - -msgid "Create luncopy error." -msgstr "Fehler beim Erstellen der LUN-Kopie." - -msgid "Create lungroup error." -msgstr "Fehler beim Erstellen der LUN-Gruppe." - -msgid "Create manager volume flow failed." -msgstr "" -"Der Ablauf für die Erstellung eines Verwaltungsdatenträgers ist " -"fehlgeschlagen." - -msgid "Create port group error." -msgstr "Fehler beim Erstellen der Portgruppe." - -msgid "Create replication error." -msgstr "Fehler beim Erstellen der Replikation." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "Fehler beim Erstellen des Replikationspaars. Fehler: %s." - -msgid "Create snapshot error." -msgstr "Fehler beim Erstellen der Schattenkopie." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Fehler beim Erstellen des Datenträgers. Ursache: %s." - -msgid "Create volume failed." -msgstr "Erstellen von Datenträger fehlgeschlagen." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"Das Erstellen einer Konsistenzgruppe aus einer Quelle wird zurzeit nicht " -"unterstützt. " - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Fehler beim Erstellen und Aktivieren der Zonengruppe: (Zonengruppe=" -"%(cfg_name)s Fehler=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Fehler beim Erstellen und Aktivieren der Zonengruppe: (Zonengruppe=" -"%(zoneset)s Fehler=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "" -"Es werden Nutzungen für die Zeit von %(begin_period)s bis %(end_period)s " -"erstellt." - -msgid "Current host isn't part of HGST domain." -msgstr "Der aktuelle Host ist nicht Teil der HGST-Domäne." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Aktueller Host nicht gültig für Datenträger %(id)s mit Typ %(type)s, " -"Migration nicht zulässig." - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Der zurzeit zugeordnete Host für den Datenträger %(vol)s befindet sich in " -"einer nicht unterstützten Hostgruppe mit %(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"DRBDmanage-Treiberfehler: Erwarteter Schlüssel \"%s\" nicht in der Antwort. " -"Falsche DRBDmanage-Version?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage-Treiberinstallationsfehler: Einige erforderliche Bibliotheken " -"(dbus, drbdmanage.*) wurden nicht gefunden." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "" -"DRBDmanage hat eine Ressource erwartet (\"%(res)s\"), aber %(n)d erhalten." - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"DRBDmanage-Zeitlimitüberschreitung beim Warten auf neuen Datenträger nach " -"Wiederherstellung der Schattenkopie: Ressource \"%(res)s\", Datenträger " -"\"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"DRBDmanage-Zeitlimitüberschreitung beim Warten auf Schattenkopieerstellung: " -"Ressource \"%(res)s\", Schattenkopie \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"DRBDmanage-Zeitlimitüberschreitung beim Warten auf Datenträgererstellung: " -"Ressource \"%(res)s\", Datenträger \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"DRBDmanage-Zeitlimitüberschreitung beim Warten auf Datenträgergröße: " -"Datenträger-ID \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "Data ONTAP-API-Version konnte nicht bestimmt werden." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"Data ONTAP arbeitet im 7-Modus, der QoS-Richtliniengruppen nicht unterstützt." - -msgid "Database schema downgrade is not allowed." -msgstr "Das Herabstufen des Datenbankschemas ist nicht zulässig." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "" -"Die Datengruppe %s wird nicht in der Nexenta Store-Appliance gemeinsam " -"genutzt." - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Die Datengruppe %s wurde nicht in Nexenta SA gefunden." - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup ist ein gültiger Bereitstellungstyp, erfordert jedoch WSAPI-Version " -"'%(dedup_version)s'. Version '%(version)s' ist installiert." - -msgid "Dedup luns cannot be extended" -msgstr "Dedup-LUNs können nicht erweitert werden." - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Das Standardkontingent für die Ressource %(res)s wird über die " -"Standardkontingentmarkierung quota_%(res)s festgelegt, die jetzt veraltet " -"ist. Verwenden Sie für das Standardkontingent die Standardkontingentklasse. " - -msgid "Default volume type can not be found." -msgstr "Standarddatenträgertyp wurde nicht gefunden." - -msgid "Delete LUNcopy error." -msgstr "Fehler beim Löschen der LUN-Kopie." - -msgid "Delete QoS policy error." -msgstr "Fehler beim Löschen der QoS-Richtlinie." - -msgid "Delete associated lun from lungroup error." -msgstr "Fehler beim Löschen der zugeordneten LUN aus LUN-Gruppe." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Löschen von Sicherung abgebrochen. Der derzeit konfigurierte " -"Sicherungsdienst [%(configured_service)s] ist nicht der Sicherungsdienst, " -"der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." - -msgid "Delete consistency group failed." -msgstr "Löschen der Konsistenzgruppe fehlgeschlagen." - -msgid "Delete hostgroup error." -msgstr "Fehler beim Löschen der Hostgruppe." - -msgid "Delete hostgroup from mapping view error." -msgstr "Fehler beim Löschen der Hostgruppe aus der Zuordnungsansicht." - -msgid "Delete lun error." -msgstr "Fehler beim Löschen der LUN." - -msgid "Delete lun migration error." -msgstr "Fehler beim Löschen der LUN-Migration." - -msgid "Delete lungroup error." -msgstr "Fehler beim Löschen der LUN-Gruppe." - -msgid "Delete lungroup from mapping view error." -msgstr "Fehler beim Löschen der LUN-Gruppe aus der Zuordnungsansicht." - -msgid "Delete mapping view error." -msgstr "Fehler beim Löschen der Zuordnungsansicht." - -msgid "Delete port group error." -msgstr "Fehler beim Löschen der Portgruppe." - -msgid "Delete portgroup from mapping view error." -msgstr "Fehler beim Löschen der Portgruppe aus der Zuordnungsansicht." - -msgid "Delete snapshot error." -msgstr "Fehler beim Löschen der Schattenkopie." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "" -"Löschen der Schattenkopie des Datenträgers im Status %s nicht unterstützt." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup abgebrochen. Sicherungsstatus %(expected_status)s erwartet, " -"tatsächlicher Status ist %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Datenträger wird aus Datenbank gelöscht und rpc wird übersprungen." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Löschen von Zonen fehlgeschlagen: (Befehl=%(cmd)s Fehler=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Dell API 2.1 oder höher für Konsistenzgruppenunterstützung erforderlich." - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"Dell Cinder-Treiberkonfigurationsfehler. Keine Unterstützung für Replikation " -"mit direkter Verbindung." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Dell Cinder-Treiberkonfigurationsfehler: 'replication_device' %s wurde nicht " -"gefunden." - -msgid "Describe-resource is admin only functionality" -msgstr "'Describe-resource' ist eine reine Administratorfunktion." - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "Ziel hat migration_status %(stat)s, erwartet %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Zieldatenträger befindet sich nicht in einer Migration." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Aufheben der Zuordnung des Datenträgers fehlgeschlagen: Mehr als ein Anhang, " -"aber keine attachment_id angegeben." - -msgid "Detach volume from instance and then try again." -msgstr "" -"Hängen Sie den Datenträger von der Instanz ab und versuchen Sie es dann " -"erneut." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Mehrere Datenträger mit dem Namen %(vol_name)s gefunden" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "Erwarteter Spaltenname nicht in %(fun)s gefunden: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "" -"Der erwartete Schlüssel %(key)s wurde in %(fun)s nicht gefunden: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "Ursache für Inaktivierung: Enthält ungültige Zeichen oder ist zu lang." - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "Domäne mit dem Namen %s wurde nicht gefunden." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Ältere Version von GPFS-Cluster erkannt. GPFS-Klonfunktion ist nicht in der " -"Cluster-Dämonstufe %(cur)s aktiviert: Es muss mindestens Stufe %(min)s " -"vorhanden sein." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "Treiberinitialisierungsverbindung fehlgeschlagen (Fehler: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Treiber muss initialize_connection implementieren." - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Der Treiber hat die importierten Sicherungsdaten erfolgreich entschlüsselt, " -"aber es fehlen Felder (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E-series-Proxy-API-Version %(current_version)s unterstützt nicht den " -"vollständigen Satz an zusätzlichen SSC-Spezifikationen. Mindestens " -"erforderliche Proxy-Version: %(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"EMC-VNX-Cinder-Treiber-CLI-Ausnahme: %(cmd)s (Rückgabecode: %(rc)s) " -"(Ausgabe: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC-VNX-Cinder-Treiber-Ausnahme vom Typ 'SPUnavailableException': %(cmd)s " -"(Rückgabecode: %(rc)s) (Ausgabe: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword müssen gültige " -"Werte haben." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Entweder 'cgsnapshot_id' oder 'source_cgid' muss angegeben werden, um " -"Konsistenzgruppe %(name)s aus Quelle zu erstellen." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"Entweder SLO %(slo)s oder Workload %(workload)s ist ungültig. Untersuchen " -"Sie die vorherige Fehlermeldung auf gültige Werte." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "" -"Entweder hitachi_serial_number oder hitachi_unit_name ist erforderlich." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "" -"Elementkompositionsdienst wurde in %(storageSystemName)s nicht gefunden. " - -msgid "Enables QoS." -msgstr "Aktiviert QoS." - -msgid "Enables compression." -msgstr "Aktiviert Komprimierung." - -msgid "Enables replication." -msgstr "Aktiviert Replikation." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "" -"Stellen Sie sicher, dass configfs an /sys/kernel/config eingehängt ist." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Hinzufügen von Initiator %(initiator)s für groupInitiatorGroup: " -"%(initiatorgroup)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Hinzufügen zu Zielgruppe %(targetgroup)s mit IQN %(iqn)s " -"Rückgabecode:%(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Fehler beim Anhängen des Datenträgers %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Klonen von Schattenkopie %(snapshot)s für Datenträger %(lun)s " -"von Pool %(pool)s Projekt: %(project)s Klonprojekt: %(clone_proj)s. " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Fehler beim Erstellen eines geklonten Datenträgers: %(cloneName)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler bei Erstellen von geklontem Datenträger: Datenträger: %(cloneName)s " -"Quellendatenträger: %(sourceName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Erstellen der Gruppe %(groupName)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Fehler beim Erstellen der Maskenansicht %(groupName)s. Rückgabecode: " -"%(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler bei Erstellen von Datenträger: %(volumeName)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler bei Erstellen von Datenträger: %(volumename)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler bei CreateGroupReplica: Quelle: %(source)s Ziel: %(target)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Erstellen von Initiator %(initiator)s für Alias: %(alias)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Erstellen von Projekt %(project)s für Pool %(pool)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Erstellen der Eigenschaft: %(property)s Typ: %(type)s " -"Beschreibung: %(description)s Rückgabecode: %(ret.status)d Nachricht: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Erstellen von freigegebenem Verzeichnis %(name)s. Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Erstellen von Schattenkopie %(snapshot)s für Datenträger %(lun)s " -"in Pool: %(pool)s Projekt: %(project)s. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Erstellen von Schattenkopie %(snapshot)s für freigegebenes " -"Verzeichnis %(share)s in Pool %(pool)s Projekt: %(project)s Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Fehler beim Erstellen von Ziel %(alias)s Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Erstellen von Zielgruppe %(targetgroup)s mit IQN %(iqn)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Fehler beim Erstellen von Datenträger %(lun)s Größe: %(size)s Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s. " - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Erstellen des neuen Verbunddatenträgers. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Erstellen der Replikationsaktion für: Pool: %(pool)s Projekt: " -"%(proj)s Datenträger: %(vol)s für Ziel: %(tgt)s und Pool: %(tgt_pool)s. " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "" -"Fehler beim Erstellen eines nicht gebundenen Datenträgers bei einer " -"Erweiterungsoperation." - -msgid "Error Creating unbound volume." -msgstr "Fehler beim Erstellen eines nicht gebundenen Datenträgers." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler bei Löschen von Datenträger: %(volumeName)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Fehler beim Löschen der Gruppe: %(storageGroupName)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Fehler beim Löschen der Initiatorgruppe: %(initiatorGroupName)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Löschen von Schattenkopie %(snapshot)s für freigegebenes " -"Verzeichnis %(share)s aus Pool %(pool)s Projekt: %(project)s Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Löschen von Schattenkopie %(snapshot)s für Datenträger %(lun)s " -"aus Pool: %(pool)s Projekt: %(project)s. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Fehler beim Löschen des Datenträgers %(lun)s aus Pool %(pool)s, Projekt: " -"%(project)s. Rückgabecode: %(ret.status)d, Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Löschen des Projekts: %(project)s auf Pool: %(pool)s. " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Fehler beim Löschen der Replikationsaktion: %(id)s. Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Erweitern von Datenträger: %(volumeName)s. Rückgabecode: " -"%(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abrufen von Initiatoren: InitiatorGroup: %(initiatorgroup)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Fehler beim Abrufen der Poolstatistikdaten: Pool: %(pool)s Rückgabecode: " -"%(status)d Nachricht: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Abrufen der Projektstatistiken: Pool: %(pool)s Projekt: " -"%(project)s Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Abrufen von freigegebenem Verzeichnis %(share)s für Pool " -"%(pool)s Projekt: %(project)s. Rückgabecode: %(ret.status)d Nachricht: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Abrufen von Schattenkopie %(snapshot)s für Datenträger %(lun)s " -"in Pool %(pool)s Projekt: %(project)s. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Fehler beim Abrufen von Ziel %(alias)s Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s ." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Abrufen von Datenträger %(lun)s für Pool: %(pool)s Projekt: " -"%(project)s. Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Fehler beim Migrieren des Datenträgers von einem Pool in einen anderen. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Fehler beim Ändern der Maskenansicht %(groupName)s. Rückgabecode: %(rc)lu. " -"Fehler: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Fehler bei Pooleigentümer: Eigner des Pools %(pool)s ist nicht %(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Festlegen der Eigenschaften %(props)s für Datenträger %(lun)s " -"von Pool %(pool)s Projekt: %(project)s. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Beenden der Migrationssitzung. Rückgabecode: %(rc)lu. Fehler: " -"%(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Überprüfen von Initiator %(iqn)s Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Überprüfen von Pool %(pool)s Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Überprüfen von Projekt %(project)s für Pool %(pool)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Überprüfen des %(service)s-Diensts. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Überprüfen von Ziel %(alias)s Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Fehler beim Überprüfen von freigegebenem Verzeichnis %(share)s für Projekt " -"%(project)s und Pool %(pool)s Rückgabecode: %(ret.status)d Nachricht: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Fehler beim Hinzufügen des Datenträgers %(volumeName)s mit dem Instanzpfad: " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Fehler beim Hinzufügen des Initiators zur Gruppe %(groupName)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "" -"Fehler beim Hinzufügen von Datenträger zu Verbunddatenträger. Fehler ist: " -"%(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"Fehler beim Anhängen von Datenträger %(volumename)s an Zielbasisdatenträger." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Fehler beim Zuordnen der Speichergruppe %(storageGroupName)s zur FAST-" -"Richtlinie %(fastPolicyName)s. Fehlerbeschreibung: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Unterbrechen der Klonbeziehung: Synchronisationsname: " -"%(syncName)s Rückgabecode: %(rc)lu. Fehler: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Fehler beim Herstellen der Verbindung zum ceph-Cluster." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Fehler beim Herstellen einer Verbindung über SSH: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Fehler beim Erstellen des Datenträgers: %s" - -msgid "Error deleting replay profile." -msgstr "Fehler beim Löschen des Wiedergabeprofils." - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Fehler beim Löschen des Datenträgers %(ssn)s: %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Fehler beim Löschen des Datenträgers %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Fehler während Bewerteranalyse: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Fehler beim Bearbeiten von freigegebenem Verzeichnis %(share)s für Pool " -"%(pool)s. Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Fehler beim Aktivieren von iSER für NetworkPortal: Stellen Sie sicher, dass " -"RDMA an Ihrem iSCSI-Port %(port)d unter IP %(ip)s unterstützt wird." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "" -"Bei der Bereinigung eines fehlgeschlagenen Zuordnungsversuchs ist ein Fehler " -"aufgetreten: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Fehler beim Ausführen von CloudByte-API [%(cmd)s], Fehler: %(err)s." - -msgid "Error executing EQL command" -msgstr "Fehler beim Ausführen des EQL-Befehls" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Fehler beim Ausführen eines Befehls über SSH: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Fehler beim Erweitern des Datenträgers %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Fehler beim Erweitern des Datenträgers: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Fehler bei der Suche nach %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Fehler bei der Suche nach %s." - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Abrufen von Replikationseinstellungsdaten. Rückgabecode: " -"%(rc)lu. Fehler: %(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abrufen der Details der Appliance-Version. Rückgabecode: %(ret." -"status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "Fehler beim Abrufen der Domänen-ID von Name %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "Fehler beim Abrufen der Domänen-ID von Name %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Fehler beim Abrufen von Initiatorgruppen." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Fehler beim Abrufen der Pool-ID von Name %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Fehler beim Abrufen der Pool-ID von Name %(pool_name)s: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abrufen der Replikationsaktion: %(id)s. Rückgabecode: " -"%(ret.status)d Nachricht: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abrufen der Details der Replikationsquelle. Rückgabecode: %(ret." -"status)d Nachricht: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abrufen der Details des Replikationsziels. Rückgabecode: %(ret." -"status)d Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Abrufen der Version: SVC: %(svc)s. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Fehler in Operation [%(operation)s] für Datenträger [%(cb_volume)s] im " -"CloudByte-Speicher: [%(cb_error)s], Fehlercode: [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Fehler in Antwort von SolidFire-API: Daten=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "" -"Fehler in Bereichserstellung für %(space)s mit einer Größe von %(size)d GB." - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"Fehler in Bereichserweiterung für Datenträger %(space)s mit zusätzlich " -"%(size)d GB" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Fehler beim Verwalten des Datenträgers: %s" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Ändern der Replikatsynchronisation: %(sv)s-Operation: " -"%(operation)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Fehler beim Ändern von %(service)s-Dienst. Rückgabecode: %(ret.status)d " -"Nachricht: %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Verschieben des Datenträgers: %(vol)s von Quellenprojekt: " -"%(src)s zu Zielprojekt: %(tgt)s Rückgabecode: %(ret.status)d Nachricht: " -"%(ret.data)s." - -msgid "Error not a KeyError." -msgstr "Fehler ist nicht vom Typ 'KeyError'." - -msgid "Error not a TypeError." -msgstr "Fehler ist nicht vom Typ 'TypeError'." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Beim Erstellen von Cgsnapshot %s ist ein Fehler aufgetreten." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Beim Löschen von Cgsnapshot %s ist ein Fehler aufgetreten." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "Beim Aktualisieren von Konsistenzgruppe %s ist ein Fehler aufgetreten." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Fehler beim Umbenennen von Datenträger %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Fehlerantwort: %s" - -msgid "Error retrieving volume size" -msgstr "Fehler beim Abrufen der Datenträgergröße" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Senden der Replikationsaktualisierung für Aktions-ID: %(id)s. " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Fehler beim Senden der Replikationsaktualisierung. Zurückgegebener Fehler: " -"%(err)s. Aktion: %(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Festlegen der Replikationsvererbung auf %(set)s für Datenträger: " -"%(vol)s Projekt %(project)s Rückgabecode: %(ret.status)d Nachricht: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Fehler beim Abtrennen des Pakets: %(package)s von Quelle: %(src)s " -"Rückgabecode: %(ret.status)d Nachricht: %(ret.data)s ." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "" -"Fehler beim Aufheben der Bindung für Datenträger %(vol)s aus dem Pool. " -"%(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Fehler beim Authentifizieren am Switch: %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Fehler beim Ändern des VF-Kontexts %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Fehler beim Überprüfen der Firmwareversion %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Fehler beim Überprüfen des Transaktionsstatus: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "Fehler beim Überprüfen, ob VF für die Verwaltung von %s verfügbar ist." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Fehler beim Herstellen der Verbindung zum Switch %(switch_id)s mit dem " -"Protokoll %(protocol)s. Fehler: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Fehler beim Erstellen des Authentifizierungstokens: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"Fehler beim Erstellen der Schattenkopie [Status] %(stat)s - [Ergebnis] " -"%(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Fehler beim Erstellen des Datenträgers [Status] %(stat)s - [Ergebnis] " -"%(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Fehler beim Löschen der Schattenkopie [Status] %(stat)s - [Ergebnis] %(res)s." - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Fehler beim Löschen des Datenträgers [Status] %(stat)s - [Ergebnis] %(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Fehler beim Erweitern des Datenträgers [Status] %(stat)s - [Ergebnis] " -"%(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "Fehler beim Abrufen der Details von %(op)s. Rückgabecode: %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Fehler beim Abrufen von Daten über SSH: (Befehl=%(cmd)s Fehler=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Fehler beim Abrufen von Disco-Informationen [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Fehler beim Abrufen des nvp-Werts: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Fehler beim Abrufen der Sitzungsinformationen %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Fehler bei der syntaktischen Analyse der Daten: %s." - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"Fehler beim Abfragen der Seite %(url)s auf dem Switch. Ursache: %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Fehler beim Entfernen der Zonen und Konfigurationen in der " -"Zonenzeichenkette: %(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Fehler beim Anfordern der %(service)s-API." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "Fehler beim Ausführen von Zoning-CLI: (Befehl=%(cmd)s Fehler=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Fehler beim Aktualisieren der neuen Zonen und Konfigurationen in der " -"Zonenzeichenkette. Fehler %(description)s." - -msgid "Error writing field to database" -msgstr "Fehler beim Schreiben eines Felds in Datenbank." - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Fehler [%(stat)s - %(res)s] beim Abrufen der Datenträger-ID." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Fehler [%(stat)s - %(res)s] beim Wiederherstellen der Schattenkopie " -"[%(snap_id)s] im Datenträger [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"Fehler [Status] %(stat)s - [Ergebnis] %(res)s] beim Abrufen der Datenträger-" -"ID." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Die maximal zulässige Anzahl von %(max_attempts)d Planungsversuchen wurde " -"für den Datenträger %(volume_id)s überschritten." - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Maximale Anzahl an Schattenkopien pro Datenträger überschritten." - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Ausnahmebedingung beim Anhängen des Metadatenträgers an Zieldatenträger " -"%(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Ausnahmebedingung beim Erstellen des Elementreplikats. Klonname: " -"%(cloneName)s Quellenname: %(sourceName)s Zusätzliche Spezifikationen: " -"%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Ausnahme in _select_ds_for_volume: %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "" -"Beim Zusammensetzen der Zonenzeichenkette ist eine Ausnahme eingetreten: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Ausnahme: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "UUID erwartet, aber %(uuid)s empfangen." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Es wurde genau ein Knoten mit dem Namen \"%s\" erwartet." - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Erwartet wurde ganze Zahl für node_count. Rückgabe von svcinfo lsiogrp: " -"%(node)s" - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "Keine Ausgabe von CLI-Befehl %(cmd)s erwartet. Erhalten: %(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Beim Filtern nach vdisk_UID wurde die Rückgabe einer einzigen virtuellen " -"Platte von lsvdisk erwartet. %(count)s wurden zurückgegeben." - -#, python-format -msgid "Expected volume size was %d" -msgstr "Erwartete Datenträgergröße war %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Exportieren der Sicherung abgebrochen. Sicherungsstatus %(expected_status)s " -"erwartet, tatsächlicher Status ist %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Exportieren des Datensatzes abgebrochen. Der derzeit konfigurierte " -"Sicherungsdienst [%(configured_service)s] ist nicht der Sicherungsdienst, " -"der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." - -msgid "Extend volume error." -msgstr "Fehler beim Erweitern des Datenträgers." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Dieser Treiber unterstützt Datenträgererweiterungen nur, wenn keine " -"Schattenkopien vorhanden sind." - -msgid "Extend volume not implemented" -msgstr "Erweitern von Datenträgern nicht implementiert." - -msgid "FAST is not supported on this array." -msgstr "FAST wird in diesem Array nicht unterstützt." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "" -"FC ist das Protokoll, aber WWPNs werden von OpenStack nicht bereitgestellt." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Zuordnung für %(volume)s konnte nicht aufgehoben werden." - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "" -"Fehler beim Erstellen des Zwischenspeicherdatenträgers %(volume)s. Fehler: " -"%(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Fehler beim Hinzufügen einer Verbindung für Fabric=%(fabric)s: Fehler:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "Cgsnapshot fehlgeschlagen" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "Fehler beim Erstellen einer Schattenkopie für Gruppe: %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"Fehler beim Erstellen einer Schattenkopie für Datenträger %(volname)s: " -"%(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "Fehler beim Abrufen der aktiven über Fabric %s gesetzten Zone." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Fehler beim Abrufen der Details für Pool %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Fehler beim Entfernen einer Verbindung für Fabric=%(fabric)s: Fehler:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Fehler beim Erweitern von Datenträger %(volname)s." - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Anmeldung an 3PAR (%(url)s) nicht möglich. Ursache: %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Auf die Zoning-Konfiguration konnte nicht zugegriffen werden." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Auf den Zonengruppenstatus konnte nicht zugegriffen werden: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Es konnte keine Ressourcensperre bezogen werden. (Seriennummer: %(serial)s, " -"Instanz: %(inst)s, Rückgabe: %(ret)s, Standardfehler: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Die logische Einheit wurde nicht hinzugefügt." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Hinzufügen des Datenträgers %(volumeName)s zur Konsistenzgruppe " -"%(cgName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "Fehler beim Hinzufügen der Zoning-Konfiguration." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Der qualifizierte iSCSI-Initiatorname konnte nicht zugewiesen werden. (Port: " -"%(port)s, Ursache: '%(reason)s')" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"Zuordnung von qos_specs fehlgeschlagen: %(specs_id)s mit Typ %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Fehler beim Anhängen von iSCSI-Ziel für Datenträger %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Fehler bei der Sicherung von Datenträgermetadaten - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Fehler bei der Sicherung von Datenträgermetadaten. Metadatensicherungsobjekt " -"'backup.%s.meta' ist bereits vorhanden." - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Fehler beim Klonen des Datenträgers aus Schattenkopie %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Fehler beim Verbinden mit %(vendor_name)s-Array %(host)s: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Fehler beim Herstellen einer Verbindung zur Dell-REST-API. " - -msgid "Failed to connect to array" -msgstr "Fehler beim Verbinden mit Array." - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"Fehler beim Herstellen einer Verbindung zu sheep-Dämon. Adresse: %(addr)s, " -"Port: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Kopieren des Abbilds auf Datenträger fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Kopieren von Metadaten auf Datenträger fehlgeschlagen: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Fehler beim Kopieren des Datenträgers, Zielgerät nicht verfügbar." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Fehler beim Kopieren des Datenträgers, Quellengerät nicht verfügbar." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"Fehler beim Erstellen der Konsistenzgruppe %(cgName)s aus Schattenkopie " -"%(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "IG konnte nicht erstellt werden. %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Erstellen der Datenträgergruppe fehlgeschlagen: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Fehler beim Erstellen einer Datei. (Datei: %(file)s, Rückgabe: %(ret)s, " -"Standardfehler: %(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "" -"Fehler beim Erstellen einer temporären Schattenkopie für Datenträger %s." - -msgid "Failed to create api volume flow." -msgstr "Der API-Ablauf für die Erstellung des Datenträgers ist fehlgeschlagen." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "" -"Fehler beim Erstellen der Schattenkopie der Konsistenzgruppe (Cgsnapshot) " -"%(id)s. Ursache: %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "" -"Fehler beim Erstellen der Konsistenzgruppe %(id)s. Ursache: %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Fehler beim Erstellen der Konsistenzgruppe %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Fehler beim Erstellen von Konsistenzgruppe %s, da VNX-Konsistenzgruppe keine " -"komprimierten LUNs als Mitglieder akzeptieren kann." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Fehler beim Erstellen der Konsistenzgruppe: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "" -"Fehler beim Erstellen der Konsistenzgruppe: %(cgid)s. Fehler: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Erstellen der Konsistenzgruppe: %(consistencyGroupName)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Fehler beim Erstellen von Hardware-ID(s) auf %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "" -"Fehler beim Erstellen des Hosts: %(name)s. Überprüfen Sie, ob er im Array " -"vorhanden ist." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Fehler beim Erstellen der Hostgruppe: %(name)s. Überprüfen Sie, ob sie im " -"Array vorhanden ist." - -msgid "Failed to create iqn." -msgstr "Fehler beim Erstellen des IQN." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Fehler beim Erstellen von iSCSI-Ziel für Datenträger %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Fehler beim Erstellen von 'manage existing'-Ablauf." - -msgid "Failed to create manage_existing flow." -msgstr "Fehler beim Erstellen von manage_existing-Ablauf." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "" -"Fehler beim Erstellen der Zuordnung für MCS. Es kann kein Kanal zugeordnet " -"werden." - -msgid "Failed to create map." -msgstr "Fehler beim Erstellen der Zuordnung." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Erstellen von Metadaten für Datenträger fehlgeschlagen: %(reason)s" - -msgid "Failed to create partition." -msgstr "Fehler beim Erstellen der Partition." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "" -"Erstellen von qos_specs fehlgeschlagen: %(name)s mit Spezifikationen " -"%(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Fehler beim Erstellen des Replikats." - -msgid "Failed to create scheduler manager volume flow" -msgstr "" -"Der Ablauf für die Erstellung des Scheduler-Verwaltungsdatenträgers ist " -"fehlgeschlagen." - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Erstellen von Schattenkopie %s fehlgeschlagen" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "Fehler beim Erstellen von Schattenkopie für cg: %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Fehler beim Erstellen einer Schattenkopie für Datenträger %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"Fehler beim Erstellen der Schattenkopierichtlinie auf Datenträger %(vol)s: " -"%(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"Fehler beim Erstellen des Schattenkopieressourcenbereichs auf Datenträger " -"%(vol)s: %(res)s." - -msgid "Failed to create snapshot." -msgstr "Fehler beim Erstellen der Schattenkopie." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Erstellen der Schattenkopie fehlgeschlagen. CloudByte-" -"Datenträgerinformationen für OpenStack-Datenträger [%s] nicht gefunden." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "Fehler beim Erstellen eines untergeordneten Connectors für %s." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "Die Speichergruppe %(storageGroupName)s konnte nicht erstellt werden." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Fehler beim Erstellen von Thin-Pool. Fehlernachricht: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Erstellen von Datenträger %s fehlgeschlagen" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"SI für volume_id %(volume_id)s konnte nicht gelöscht werden, da ein Paar " -"vorhanden ist." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Eine logische Einheit konnte nicht gelöscht werden. (Logische Einheit: " -"%(ldev)s, Ursache: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "" -"Fehler beim Löschen der Schattenkopie der Konsistenzgruppe (Cgsnapshot) " -"%(id)s. Ursache: %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "Fehler beim Löschen der Konsistenzgruppe %(id)s. Ursache: %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Fehler beim Löschen der Konsistenzgruppe: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Löschen der Konsistenzgruppe: %(consistencyGroupName)s. " -"Rückgabecode: %(rc)lu. Fehler: %(error)s." - -msgid "Failed to delete device." -msgstr "Fehler beim Löschen des Geräts." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Fehler beim Löschen der Dateigruppe für Konsistenzgruppe %(cgname)s. Fehler: " -"%(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Fehler beim Löschen des IQN." - -msgid "Failed to delete map." -msgstr "Fehler beim Löschen der Zuordnung." - -msgid "Failed to delete partition." -msgstr "Fehler beim Löschen der Partition." - -msgid "Failed to delete replica." -msgstr "Fehler beim Löschen des Replikats." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Löschen von Schattenkopie %s fehlgeschlagen" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "Fehler beim Löschen von Schattenkopie für cg: %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"Schattenkopie für snapshot_id %s konnte nicht gelöscht werden, da ein Paar " -"vorhanden ist." - -msgid "Failed to delete snapshot." -msgstr "Fehler beim Löschen der Schattenkopie." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Fehler beim Löschen von Datenträger %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Datenträger für volume_id %(volume_id)s konnte nicht gelöscht werden, da ein " -"Paar vorhanden ist." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Fehler beim Abhängen von iSCSI-Ziel für Datenträger %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Konfiguration der Blockbridge-API konnte nicht bestimmt werden." - -msgid "Failed to disassociate qos specs." -msgstr "Zuordnung der QoS-Spezifikationen konnte nicht aufgehoben werden." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"Aufheben der Zuordnung von qos_specs fehlgeschlagen: %(specs_id)s mit Typ " -"%(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Fehler beim Sicherstellen des Schattenkopieressourcenbereichs. Datenträger " -"für ID %s wurde nicht gefunden." - -msgid "Failed to establish connection with Coho cluster" -msgstr "Fehler beim Herstellen einer Verbindung mit Coho-Cluster." - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Ausführung von CloudByte-API [%(cmd)s] fehlgeschlagen. Http-Status: " -"%(status)s, Fehler: %(error)s." - -msgid "Failed to execute common command." -msgstr "Ausführen des allgemeinen Befehls fehlgeschlagen." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Export für Datenträger fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Fehler beim Erweitern des Datenträgers %(name)s, Fehlernachricht: %(msg)s." - -msgid "Failed to find QoSnode" -msgstr "QoSnode wurde nicht gefunden." - -msgid "Failed to find Storage Center" -msgstr "Storage Center wurde nicht gefunden." - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "" -"Es wurde keine Kopie der virtuellen Platte im erwarteten Pool gefunden." - -msgid "Failed to find account for volume." -msgstr "Konto für Datenträger wurde nicht gefunden." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"Fehler bei der Suche nach einer Dateigruppe für Pfad %(path)s, " -"Befehlsausgabe: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "Es konnte keine Gruppenschattenkopie mit dem Namen %s gefunden werden." - -#, python-format -msgid "Failed to find host %s." -msgstr "Host %s nicht gefunden." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "Die iSCSI-Initiatorgruppe mit %(initiator)s wurde nicht gefunden." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Abrufen der CloudByte-Kontodetails zu Konto [%s] fehlgeschlagen." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Fehler beim Abrufen von LUN-Zieldetails für LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "Fehler beim Abrufen von LUN-Zieldetails für LUN %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Fehler beim Abrufen von LUN-Zielliste für die LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Fehler beim Abrufen der Partitions-ID für Datenträger %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"Fehler beim Abrufen der RAID-Schattenkopie-ID aus Schattenkopie " -"%(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"Fehler beim Abrufen der RAID-Schattenkopie-ID aus Schattenkopie: " -"%(snapshot_id)s." - -msgid "Failed to get SplitMirror." -msgstr "SplitMirror konnte nicht abgerufen werden." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Es konnte keine Speicherressource abgerufen werden. Das System wird " -"versuchen, die Speicherressource erneut abzurufen. (Ressource: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "Fehler beim Abrufen aller Zuordnungen von QoS-Spezifikationen %s" - -msgid "Failed to get channel info." -msgstr "Fehler beim Abrufen der Informationen zum Kanal." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Codeebene (%s) konnte nicht abgerufen werden." - -msgid "Failed to get device info." -msgstr "Fehler beim Abrufen der Informationen zum Gerät." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "" -"Fehler beim Abrufen der Domäne, weil CPG (%s) im Array nicht vorhanden ist." - -msgid "Failed to get image snapshots." -msgstr "Schattenkopie des Abbilds konnte nicht abgerufen werden." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"Fehler beim Abrufen der IP für Kanal %(channel_id)s mit Datenträger: " -"%(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Fehler beim Abrufen der Informationen zum IQN." - -msgid "Failed to get license info." -msgstr "Fehler beim Abrufen der Informationen zur Lizenz." - -msgid "Failed to get lv info." -msgstr "Fehler beim Abrufen der LV-Informationen." - -msgid "Failed to get map info." -msgstr "Fehler beim Abrufen der Informationen zur Zuordnung." - -msgid "Failed to get migration task." -msgstr "Die Migrationsaufgabe konnte nicht abgerufen werden." - -msgid "Failed to get model update from clone" -msgstr "Fehler beim Abrufen von Modellaktualisierung aus Klon" - -msgid "Failed to get name server info." -msgstr "Fehler beim Abrufen der Namensserverinformationen." - -msgid "Failed to get network info." -msgstr "Fehler beim Abrufen der Informationen zum Netz." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Fehler beim Abrufen der neuen Teilekennung im neuen Pool: %(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Fehler beim Abrufen der Informationen zur Partition." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Fehler beim Abrufen der Pool-ID mit Datenträger %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"Fehler beim Abrufen der Informationen der fernen Kopie für %(volume)s. " -"Ursache: %(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"Fehler beim Abrufen der Informationen der fernen Kopie für %(volume)s. " -"Ausnahme: %(err)s." - -msgid "Failed to get replica info." -msgstr "Fehler beim Abrufen der Informationen zum Replikat." - -msgid "Failed to get show fcns database info." -msgstr "Fehler beim Abrufen der Anzeige von FCNS-Datenbankinformationen." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Fehler beim Abrufen der Größe von Datenträger %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Fehler beim Abrufen einer Schattenkopie für Datenträger %s." - -msgid "Failed to get snapshot info." -msgstr "Fehler beim Abrufen der Informationen zur Schattenkopie." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Fehler beim Abrufen von qualifiziertem iSCSI-Zielnamen für LUN %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "Ziel-LUN von SplitMirror konnte nicht abgerufen werden." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Fehler beim Abrufen von Zielportal für LUN %s" - -msgid "Failed to get targets" -msgstr "Ziele konnten nicht abgerufen werden." - -msgid "Failed to get wwn info." -msgstr "Fehler beim Abrufen der Informationen zum WWN." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Abrufen, Erstellen oder Hinzufügen von Datenträger %(volumeName)s in " -"Maskenansicht %(maskingViewName)s fehlgeschlagen. Die empfangene " -"Fehlernachricht war %(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Datenträger-Backend konnte nicht identifiziert werden." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Fehler beim Verlinken der Dateigruppe für das freigegebene Verzeichnis " -"%(cgname)s. Fehler: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Anmeldung bei %s-Array fehlgeschlagen (ungültige Anmeldung?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Anmeldung für Benutzer %s fehlgeschlagen." - -msgid "Failed to login with all rest URLs." -msgstr "Anmeldung mit allen REST-URLs fehlgeschlagen." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Fehler beim Stellen einer Anforderung an den Datera-Cluster-Endpunkt. " -"Ursache: %s" - -msgid "Failed to manage api volume flow." -msgstr "Der API-Ablauf für die Verwaltung des Datenträgers ist fehlgeschlagen." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Fehler beim Verwalten des bereits vorhandenen %(type)s %(name)s, da die " -"gemeldete Größe %(size)s keine Gleitkommazahl war." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Fehler beim Verwalten des vorhandenen Datenträgers %(name)s, da beim Abrufen " -"der Datenträgergröße ein Fehler aufgetreten ist." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Fehler beim Verwalten des vorhandenen Datenträgers %(name)s, da die " -"Umbenennungsoperation fehlgeschlagen ist: Fehlernachricht: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Fehler beim Verwalten des bereits vorhandenen Datenträgers %(name)s, da die " -"gemeldete Größe %(size)s keine Gleitkommazahl war." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"Verwalten eines vorhandenen Datenträgers aufgrund nicht übereinstimmender E/" -"A-Gruppen fehlgeschlagen. Die E/A-Gruppe des zu verwaltenden Datenträgers " -"ist %(vdisk_iogrp)s. Die E/A-Gruppe des ausgewählten Typs ist %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der Pool des zu " -"verwaltenden Datenträgers nicht mit dem Backend-Pool übereinstimmt. Der Pool " -"des zu verwaltenden Datenträgers ist %(vdisk_pool)s. Der Backend-Pool ist " -"%(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " -"verwaltende Datenträger vom Typ 'compress', der ausgewählte Datenträger " -"jedoch vom Typ 'not compress' ist." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " -"verwaltende Datenträger vom Typ 'not compress', der ausgewählte Datenträger " -"jedoch vom Typ 'compress' ist." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " -"verwaltende Datenträger nicht in einer gültigen E/A-Gruppe ist." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " -"verwaltende Datenträger vom Typ 'thick', der ausgewählte Datenträger jedoch " -"vom Typ 'thin' ist." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"Verwalten eines vorhandenen Datenträgers fehlgeschlagen, da der zu " -"verwaltende Datenträger vom Typ 'thin', der ausgewählte Datenträger jedoch " -"vom Typ 'thick' ist." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Fehler beim Verwalten des Datenträgers %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Eine logische Einheit konnte nicht zugeordnet werden. (Logische Einheit: " -"%(ldev)s, LUN: %(lun)s, Port: %(port)s, ID: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Migration des Datenträgers zum ersten Mal fehlgeschlagen." - -msgid "Failed to migrate volume for the second time." -msgstr "Migration des Datenträgers zum zweiten Mal fehlgeschlagen." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "Fehler beim Verschieben der LUN-Zuordnung. Rückgabecode: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "Fehler beim Verschieben des Datenträgers %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Fehler beim Öffnen einer Datei. (Datei: %(file)s, Rückgabe: %(ret)s, " -"Standardfehler: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI-Ausgabe konnte nicht analysiert werden:\n" -" Befehl: %(cmd)s\n" -" Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s." - -msgid "" -"Failed to parse the configuration option 'glance_catalog_info', must be in " -"the form ::" -msgstr "" -"Fehler bei der Analyse der Konfigurationsoption 'glance_catalog_info'. " -"Erforderliches Format: ::" - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Die Konfigurationsoption 'keystone_catalog_info' muss das Format :" -": haben." - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Die Konfigurationsoption 'swift_catalog_info' konnte nicht analysiert " -"werden. Muss das Format :: haben." - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Es konnte keine Null-Seiten-Erschließung durchgeführt werden. (Logische " -"Einheit: %(ldev)s, Ursache: '%(reason)s')" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "" -"Entfernen von Export für Datenträger %(volume)s fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "Fehler beim Entfernen von iSCSI-Ziel für Datenträger %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Fehler beim Entfernen des Datenträgers %(volumeName)s aus der " -"Konsistenzgruppe %(cgName)s. Rückgabecode: %(rc)lu. Fehler: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "" -"Fehler beim Entfernen des Datenträgers %(volumeName)s aus " -"Standarddienstgruppe." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"Datenträger %(volumeName)s konnte nicht aus der Standarddienstgruppe " -"%(volumeName)s entfernt werden." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"%(volumename)s konnte nicht aus der Standardspeichergruppe für FAST-" -"Richtlinie %(fastPolicyName)s entfernt werden." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Fehler beim Umbenennen des logischen Datenträgers %(name)s. Fehlernachricht: " -"%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Fehler beim Abrufen von aktiver Zoning-Konfiguration %s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"Fehler beim Definieren der CHAP-Authentifizierung für den Ziel-IQN %(iqn)s. " -"Details: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Fehler beim Festlegen von QoS für vorhandenen Datenträger %(name)s. " -"Fehlernachricht: %(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "Fehler beim Festlegen des Attributs 'Incoming user' für SCST-Ziel." - -msgid "Failed to set partition." -msgstr "Fehler beim Festlegen der Partition." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Fehler beim Festlegen von Berechtigungen für die Konsistenzgruppe " -"%(cgname)s. Fehler: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Es wurde keine logische Einheit für den Datenträger %(volume_id)s angegeben, " -"für die die Zuordnung aufgehoben werden soll." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Es wurde keine zu löschende logische Einheit angegeben. (Methode: " -"%(method)s, ID: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Die Migrationssitzung konnte nicht beendet werden." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "Fehler beim Aufheben der Bindung von Datenträger %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Fehler beim Aufheben der Verlinkung der Dateigruppe für Konsistenzgruppe " -"%(cgname)s. Fehler: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Die Zuordnung einer logischen Einheit konnte nicht aufgehoben werden. " -"(Logische Einheit: %(ldev)s, Ursache: %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Fehler beim Aktualisieren der Konsistenzgruppe: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Aktualisieren von Metadaten für Datenträger fehlgeschlagen: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Fehler beim Aktualisieren oder Löschen der Zoning-Konfiguration" - -msgid "Failed to update or delete zoning configuration." -msgstr "Fehler beim Aktualisieren oder Löschen der Zoning-Konfiguration." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Aktualisieren von qos_specs fehlgeschlagen: %(specs_id)s mit Spezifikationen " -"%(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "" -"Aktualisieren der Kontingentnutzung bei Typänderung des Datenträgers " -"fehlgeschlagen." - -msgid "Failed to update snapshot." -msgstr "Aktualisierung der Schattenkopie fehlgeschlagen." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Fehler beim Aktualisieren der Metadaten des Datenträgers %(vol_id)s mithilfe " -"der bereitgestellten %(src_type)s %(src_id)s-Metadaten" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Fehler beim Erstellen des Datenträgers %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Fehler beim Abrufen von LUN-Informationen für %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Fehler beim Verschieben von neuer geklonter LUN nach %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Fehler beim Zwischenspeichern von LUN %s in temporärem Bereich." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "" -"Flexvisor konnte Datenträger %(id)s nicht hinzufügen. Ursache: %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor konnte den Datenträger %(vol)s in der Gruppe %(group)s nicht " -"verknüpfen. Ursache: %(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor konnte den Datenträger %(vol)s in der Gruppe %(group)s nicht " -"entfernen. Ursache: %(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "" -"Flexvisor konnte den Datenträger %(id)s nicht entfernen. Ursache: %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Fehler bei der Fibre Channel-SAN-Suche: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Fibre Channel-Zoning-Operation fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Fehler bei der Fibre Channel-Verbindungssteuerung: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Datei %(file_path)s wurde nicht gefunden." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "Datei %(path)s hat ungültige Sicherungsdatei %(bfile)s. Abbruch." - -#, python-format -msgid "File already exists at %s." -msgstr "Datei bereits vorhanden bei %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "Datei bereits vorhanden in: %s" - -msgid "Find host in hostgroup error." -msgstr "Fehler beim Suchen des Hosts in Hostgruppe." - -msgid "Find host lun id error." -msgstr "Fehler beim Suchen der Host-LUN-ID." - -msgid "Find lun group from mapping view error." -msgstr "Fehler beim Suchen der LUN-Gruppe in der Zuordnungsansicht." - -msgid "Find mapping view error." -msgstr "Fehler beim Suchen der Zuordnungsansicht." - -msgid "Find portgroup error." -msgstr "Fehler beim Suchen der Portgruppe." - -msgid "Find portgroup from mapping view error." -msgstr "Fehler beim Suchen der Portgruppe in der Zuordnungsansicht." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Flash-Cache-Richtlinie erfordert WSAPI-Version '%(fcache_version)s'. Version " -"'%(version)s' ist installiert." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor konnte die Schattenkopie des Datenträgers %(id)s in der Gruppe " -"%(vgid)s Schattenkopie %(vgsid)s nicht finden." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor konnte Datenträger %(volumeid)s nicht erstellen: %(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht löschen: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "" -"Flexvisor konnte den Datenträger %(id)s nicht der Gruppe %(cgid)s hinzufügen." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor konnte Datenträger %(id)s nicht zuordnen, da der Status nicht " -"anhand der Ereignis-ID abgerufen werden konnte. " - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht zuordnen: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor konnte Datenträger %(volume)s IQN %(iqn)s nicht zuordnen." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht klonen: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Datenträger %(id)s nicht klonen (Ereignis konnte nicht " -"abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor konnte Schattenkopie für Datenträger %(id)s nicht erstellen: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Schattenkopie für Datenträger %(id)s nicht erstellen " -"(Ereignis konnte nicht abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "" -"Flexvisor konnte Datenträger %(id)s nicht in der Gruppe %(vgid)s erstellen." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(volume)s nicht erstellen: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor konnte Datenträger %s nicht erstellen (Ereignis abrufen)." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor konnte Datenträger nicht aus der Schattenkopie %(id)s erstellen: " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor konnte Datenträger nicht aus Schattenkopie %(id)s erstellen: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Datenträger nicht aus der Schattenkopie %(id)s erstellen " -"(Ereignis konnte nicht abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor konnte Schattenkopie %(id)s nicht löschen: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Schattenkopie %(id)s nicht löschen (Ereignis konnte nicht " -"abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht löschen: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht erweitern: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor konnte Datenträger %(id)s nicht erweitern: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Datenträger %(id)s nicht erweitern (Ereignis konnte nicht " -"abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "" -"Flexvisor konnte Poolinformationen zu %(id)s nicht abrufen: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor konnnte die Schattenkopie-ID des Datenträgers %(id)s nicht aus der " -"Gruppe %(vgid)s abrufen." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "" -"Flexvisor konnte den Datenträger %(id)s nicht aus der Gruppe %(cgid)s " -"entfernen." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor konnte Datenträger nicht aus der Schattenkopie %(id)s generieren: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor konnte Datenträger nicht aus der Schattenkopie %(id)s erstellen " -"(Ereignis konnte nicht abgerufen werden)." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "" -"Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben (Ereignis " -"abrufen)." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "" -"Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "" -"Flexvisor konnte die Informationen zum Quellendatenträger %(id)s nicht " -"finden." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Flexvisor konnte Zuordnung für Datenträger %(id)s nicht aufheben: %(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "" -"Flexvisor-Datenträger %(id)s konnte nicht mit der Gruppe %(vgid)s verknüpft " -"werden." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "Der Ordner %s ist nicht in der Nexenta Store-Appliance vorhanden." - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS ist nicht aktiv. Status: %s." - -msgid "Gateway VIP is not set" -msgstr "Gateway-VIP wurde nicht festgelegt." - -msgid "Get FC ports by port group error." -msgstr "Fehler beim Abrufen der FC-Ports nach Portgruppe." - -msgid "Get FC ports from array error." -msgstr "Fehler beim Abrufen der FC-Ports aus dem Array." - -msgid "Get FC target wwpn error." -msgstr "Fehler beim Abrufen des WWPN des FC-Ziels." - -msgid "Get HyperMetroPair error." -msgstr "Fehler beim Abrufen von HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Fehler beim Abrufen der LUN-Gruppe nach Ansicht." - -msgid "Get LUNcopy information error." -msgstr "Fehler beim Abrufen der Informationen zur LUN-Kopie." - -msgid "Get QoS id by lun id error." -msgstr "Fehler beim Abrufen der QoS-ID nach LUN-ID." - -msgid "Get QoS information error." -msgstr "Fehler beim Abrufen der Informationen zu QoS." - -msgid "Get QoS policy error." -msgstr "Fehler beim Abrufen der QoS-Richtlinie." - -msgid "Get SplitMirror error." -msgstr "Fehler beim Abrufen von SplitMirror." - -msgid "Get active client failed." -msgstr "Abrufen des aktiven Clients ist fehlgeschlagen." - -msgid "Get array info error." -msgstr "Fehler beim Abrufen der Array-Info." - -msgid "Get cache by name error." -msgstr "Fehler beim Abrufen des Zwischenspeichers nach Name." - -msgid "Get connected free FC wwn error." -msgstr "Fehler beim Abrufen der verbundenen freien FC-WWNs." - -msgid "Get engines error." -msgstr "Fehler beim Abrufen von Engines." - -msgid "Get host initiators info failed." -msgstr "Abrufen der Informationen zu Hostinitiatoren fehlgeschlagen." - -msgid "Get hostgroup information error." -msgstr "Fehler beim Abrufen der Informationen zur Hostgruppe." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Fehler beim Abrufen der Informationen zum iSCSI-Port. Überprüfen Sie die " -"konfigurierte Ziel-IP in der huawei-Konfigurationsdatei." - -msgid "Get iSCSI port information error." -msgstr "Fehler beim Abrufen der Informationen zum iSCSI-Port." - -msgid "Get iSCSI target port error." -msgstr "Fehler beim Abrufen des iSCSI-Zielports." - -msgid "Get lun id by name error." -msgstr "Fehler beim Abrufen der LUN-ID nach Namen." - -msgid "Get lun migration task error." -msgstr "Fehler beim Abrufen der LUN-Migrationsaufgabe." - -msgid "Get lungroup id by lun id error." -msgstr "Fehler beim Abrufen der LUN-Gruppen-ID nach LUN-ID." - -msgid "Get lungroup information error." -msgstr "Fehler beim Abrufen der Informationen zur LUN-Gruppe." - -msgid "Get migration task error." -msgstr "Fehler beim Abrufen der Migrationsaufgabe." - -msgid "Get pair failed." -msgstr "Fehler beim Abrufen von Paar." - -msgid "Get partition by name error." -msgstr "Fehler beim Abrufen der Partition nach Name." - -msgid "Get partition by partition id error." -msgstr "Fehler beim Abrufen der Partition nach Partitions-ID." - -msgid "Get port group by view error." -msgstr "Fehler beim Abrufen der Portgruppe nach Ansicht." - -msgid "Get port group error." -msgstr "Fehler beim Abrufen von Portgruppen." - -msgid "Get port groups by port error." -msgstr "Fehler beim Abrufen der Portgruppen nach Port." - -msgid "Get ports by port group error." -msgstr "Fehler beim Abrufen von Ports nach Portgruppe." - -msgid "Get remote device info failed." -msgstr "Abrufen der fernen Geräteinfo fehlgeschlagen." - -msgid "Get remote devices error." -msgstr "Fehler beim Abrufen ferner Geräte." - -msgid "Get smartcache by cache id error." -msgstr "Fehler beim Abrufen des Smart Cache nach Cache-ID." - -msgid "Get snapshot error." -msgstr "Fehler beim Abrufen der Schattenkopie." - -msgid "Get snapshot id error." -msgstr "Fehler beim Abrufen der Schattenkopie-ID." - -msgid "Get target IP error." -msgstr "Fehler beim Abrufen der Ziel-IP." - -msgid "Get target LUN of SplitMirror error." -msgstr "Fehler beim Abrufen der Ziel-LUN von SplitMirror." - -msgid "Get views by port group error." -msgstr "Fehler beim Abrufen der Ansichten nach Portgruppe." - -msgid "Get volume by name error." -msgstr "Fehler beim Abrufen des Datenträgers nach Name." - -msgid "Get volume error." -msgstr "Fehler beim Abrufen des Datenträgers." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Glance-Metadaten können nicht aktualisiert werden. Schlüssel %(key)s für " -"Datenträger-ID %(volume_id)s vorhanden." - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"Glance-Metadaten für Datenträger/Schattenkopie %(id)s können nicht gefunden " -"werden." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Gluster-Konfigurationsdatei ist in %(config)s nicht vorhanden." - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Google-Cloudspeicher-API-Fehler: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Google-Cloudspeicherverbindungsfehler: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Oauth2-Fehler in Google-Cloudspeicher: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "Fehlerhafte Pfadangaben von DRBDmanage erhalten! (%s)" - -#, python-format -msgid "Group type %(group_type_id)s could not be found." -msgstr "Gruppentyp %(group_type_id)s konnte nicht gefunden werden." - -#, python-format -msgid "Group type with name %(group_type_name)s could not be found." -msgstr "" -"Gruppentyp mit dem Namen %(group_type_name)s konnte nicht gefunden werden." - -msgid "HBSD error occurs." -msgstr "HBSD-Fehler tritt auf." - -msgid "HPELeftHand url not found" -msgstr "HPELeftHand-URL nicht gefunden" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"Hash-Blockgröße wurde seit der letzten Sicherung geändert. Neue Hash-" -"Blockgröße: %(new)s. Alte Hash-Blockgröße: %(old)s. Führen Sie eine " -"vollständige Sicherung durch." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Es wurden keine Schichten %(tier_levels)s erstellt." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Hinweis \"%s\" nicht unterstützt." - -msgid "Host" -msgstr "Host" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "Der Host %(host)s wurde nicht gefunden." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"Host %(host)s entspricht nicht dem Inhalt des x509-Zertifikats: CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "Host %s hat keine iSCSI-Initiatoren." - -#, python-format -msgid "Host group with name %s not found" -msgstr "Hostgruppe mit dem Namen %s nicht gefunden." - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Hostgruppe mit ref %s nicht gefunden." - -msgid "Host is NOT Frozen." -msgstr "Der Host ist nicht gesperrt." - -msgid "Host is already Frozen." -msgstr "Der Host ist bereits gesperrt." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "" -"Host nicht gefunden. Fehler beim Entfernen von %(service)s auf %(host)s." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "'replication_status' für Host muss für Failover %s sein." - -#, python-format -msgid "Host type %s not supported." -msgstr "Hosttyp %s nicht unterstützt." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "Host mit Ports %(ports)s nicht gefunden." - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "" -"Hypermetro und Replikation können nicht mit demselben Datenträgertyp " -"(volume_type) verwendet werden." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"E/A-Gruppe %(iogrp)d ist ungültig. Verfügbare E/A-Gruppen sind %(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Wenn die Komprimierung auf True festgelegt wurde, muss auch rsize festgelegt " -"werden (auf einen Wert ungleich -1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Wenn 'nofmtdisk' auf 'True' gesetzt ist, muss 'rsize' auf '-1' gesetzt " -"werden." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Unzulässiger Wert '%(prot)s' für flashsystem_connection_protocol angegeben: " -"Gültige Werte sind %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Unzulässiger Wert für IOTYPE angegeben: 0, 1 oder 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Unzulässiger Wert für smarttier angegeben. Gültige Werte sind 0, 1, 2 und 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Unzulässiger Wert für storwize_svc_vol_grainsize angegeben: Gültige Werte " -"sind 32, 64, 128 und 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Unzulässiger Wert für thin angegeben: thin und thick können nicht " -"gleichzeitig festgelegt werden." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "Abbild %(image_id)s wurde nicht gefunden." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "Abbild %(image_id)s ist nicht aktiv." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "Das Abbild %(image_id)s ist nicht zulässig: %(reason)s" - -msgid "Image location not present." -msgstr "Abbildposition nicht vorhanden." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Die virtuelle Größe des Abbilds mit %(image_size)d GB passt nicht auf einen " -"Datenträger mit der Größe %(volume_size)d GB." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Beim Löschen von RBD-Datenträger ist ein ImageBusy-Fehler aufgetreten. Dies " -"wurde möglicherweise von einer Verbindung mit einem Client verursacht, der " -"abgestürzt ist. Wenn dies der Fall ist, kann das Problem möglicherweise " -"behoben werden, wenn Sie den Löschversuch nach Ablauf von 30 Sekunden " -"wiederholen." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Importieren des Datensatzes fehlgeschlagen. Der Sicherungsdienst zum " -"Durchführen des Imports kann nicht gefunden werden. Dienst %(service)s " -"anfordern" - -msgid "Incorrect request body format" -msgstr "Falsches Format für Anforderungshauptteil" - -msgid "Incorrect request body format." -msgstr "Falsches Format für Anforderungshauptteil." - -msgid "Incremental backups exist for this backup." -msgstr "Inkrementelle Sicherungen sind für diese Sicherung vorhanden." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend-CLI-Ausnahmebedingung: %(err)s Parameter: %(param)s " -"(Rückgabecode: %(rc)s) (Ausgabe: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Eingabedatenträger oder Schattenkopien sind ungültig." - -msgid "Input volumes or source volumes are invalid." -msgstr "Eingabedatenträger oder Quellendatenträger sind ungültig." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "Instanz %(uuid)s wurde nicht gefunden." - -msgid "Insufficient free space available to extend volume." -msgstr "" -"Es ist nicht genügend freier Speicherplatz vorhanden, um den Datenträger zu " -"erweitern." - -msgid "Insufficient privileges" -msgstr "Unzureichende Berechtigungen" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Ungültige 3PAR-Domäne: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Ungültiger ALUA-Wert. ALUA-Wert muss 1 oder 0 sein." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "Ungültige Ceph-Argumente für RBD-Sicherungsoperation angegeben." - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Ungültiger CgSnapshot: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "Ungültige ConsistencyGroup: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"Ungültige Konsistenzgruppe: Kein Host für die Erstellung der Konsistenzgruppe" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Ungültige HPELeftHand-API-Version gefunden: %(found)s. Version %(minimum)s " -"oder höher erforderlich für die Unterstützung der Funktionen Verwaltung/" -"Aufheben von Verwaltung." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Ungültiges IP-Adressformat: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Ungültige QoS-Spezifikation beim Abrufen der QoS-Richtlinie für Datenträger " -"%s gefunden." - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Ungültiges Replikationsziel: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Ungültige Spezifikation einer Virtuozzo-Speicherfreigabe: %r. Erforderlich: " -"[MDS1[,MDS2],...:/][:KENNWORT]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"Ungültige XtremIO-Version %(cur)s, Version ab %(min)s ist erforderlich." - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"Es wurden ungültige Kontingentzuordnungen für die folgenden " -"Projektkontingente definiert: %s" - -msgid "Invalid argument" -msgstr "Ungültiges Argument" - -msgid "Invalid argument - negative seek offset." -msgstr "Ungültiges Argument - negativer Suchoffset." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Ungültiges Argument - whence=%s wird nicht unterstützt." - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Ungültiges Argument: whence=%s wird nicht unterstützt." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Ungültiger Anhangmodus '%(mode)s' für Datenträger %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Ungültiger Autorisierungsschlüssel: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Ungültige Sicherung: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "Ungültige Details des CHAP-Benutzers im CloudByte-Speicher gefunden." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "" -"Ungültige Antwort für Initialisierung der Verbindung von Datenträger %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Ungültige Antwort für Initialisierung der Verbindung von Datenträger " -"%(name)s: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Ungültiger Inhaltstyp %(content_type)s." - -msgid "Invalid credentials" -msgstr "Ungültige Berechtigungsnachweise" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Ungültiges Verzeichnis: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Ungültiger Plattenadaptertyp: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Ungültige Plattensicherung: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Ungültiger Plattentyp: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Ungültiger Plattentyp: %s." - -#, python-format -msgid "Invalid group type: %(reason)s" -msgstr "Ungültiger Gruppentyp: %(reason)s" - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Ungültiger Host: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Ungültige hpe3parclient-Version gefunden (%(found)s). Eine Version ab " -"%(minimum)s ist erforderlich. Führen Sie \"pip install --upgrade " -"python-3parclient\" aus, um ein Upgrade für hpe3parclient durchzuführen." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Ungültige hpelefthandclient-Version gefunden (%(found)s). Version " -"%(minimum)s oder höher erforderlich. Führen Sie 'pip install --upgrade " -"python-lefthandclient' aus, um ein Upgrade für hpelefthandclient " -"durchzuführen." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Ungültiger Abbildhyperlink %(image_href)s." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"Ungültige Abbild-ID oder auf das angeforderte Abbild kann nicht zugegriffen " -"werden." - -msgid "Invalid imageRef provided." -msgstr "Angabe für imageRef ungültig." - -msgid "Invalid input" -msgstr "Ungültige Eingabe" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Ungültige Eingabe erhalten: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "'is_public-Filter' [%s] ungültig" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Es wurde ein ungültiger LUN-Typ %s konfiguriert." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Ungültige Metadatengröße: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Ungültige Metadaten: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Ungültige Mountpunktbasis: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Ungültige Mountpunktbasis: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Ungültiger neuer snapCPG-Name für Typänderung. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Ungültige Portnummer %(config)s für Coho-RPC-Port." - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Es wurde ein ungültiger PrefetchType '%s' konfiguriert. PrefetchType muss in " -"0,1,2,3 sein." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Ungültige QoS-Spezifikationen: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "" -"Die Anforderung, einen Datenträger an ein ungültiges Ziel anzuhängen, ist " -"nicht zulässig." - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Die Anforderung, einen Datenträger mit einem ungültigen Modus anzuhängen, " -"ist nicht zulässig. Der Anhangmodus muss 'rw' oder 'ro' sein." - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Ungültiges Ende für Reservierung: %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Ungültiger Antwortheader vom RPC-Server" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "Ungültige sekundäre ID %s." - -msgid "Invalid service catalog json." -msgstr "Ungültige Servicekatalog-JSON." - -msgid "Invalid sheepdog cluster status." -msgstr "Ungültiger Status des sheepdog-Clusters." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Ungültige Schattenkopie: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Ungültiger Status: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "Ungültiger Speicherpool %s angefordert. Typänderung fehlgeschlagen." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Ungültiger Speicherpool %s angegeben." - -msgid "Invalid storage pool is configured." -msgstr "Es wurde ein ungültiger Speicherpool konfiguriert." - -msgid "Invalid transport type." -msgstr "Ungültiger Transporttyp." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Ungültige Aktualisierungseinstellung: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Wert '%s' für Zwangsausführung ungültig." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Wert '%s' für Zwangsausführung ungültig. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "Ungültiger Wert '%s' für is_public. Gültige Werte: True oder False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Ungültiger Wert '%s' für skip_validation." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Ungültiger Wert für 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Ungültiger Wert für 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Ungültiger Wert für 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Ungültiger Wert für scheduler_max_attempts. Der Wert muss >= 1 sein." - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "Ungültiger Wert für NetApp-Konfigurationsoption netapp_host_type." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "Ungültiger Wert für NetApp-Konfigurationsoption netapp_lun_ostype." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Ungültiger Wert für age: %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Ungültiger Wert: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"Ungültige Datenträgergröße für Erstellungsanforderung angegeben: %s " -"(Größenargument muss eine Ganzzahl (oder eine Zeichenkettendarstellung einer " -"Ganzzahl) und größer als null sein)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Ungültiger Datenträgertyp: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Ungültiger Datenträger: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Ungültiger Datenträger: Der Datenträger %(volume_id)s kann nicht zur " -"Konsistenzgruppe %(group_id)s hinzugefügt werden, da er einen ungültigen " -"Status hat: %(status)s. Gültige Status sind: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Ungültiger Datenträger: Der Datenträger %(volume_id)s kann nicht zur " -"Konsistenzgruppe %(group_id)s hinzugefügt werden, da der Datenträgertyp " -"%(volume_type)s nicht von der Gruppe unterstützt wird." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Ungültiger Datenträger: fake-volume-uuid des Datenträgers kann nicht zur " -"Konsistenzgruppe %(group_id)s hinzugefügt werden, da der Datenträger nicht " -"gefunden wurde." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Ungültiger Datenträger: fake-volume-uuid des Datenträgers kann nicht aus der " -"Konsistenzgruppe %(group_id)s entfernt werden, da er sich nicht in der " -"Gruppe befindet." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "Ungültiger volume_type übergeben: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Ungültiger volume_type angegeben: %s (der angeforderte Typ ist nicht " -"kompatibel; entweder übereinstimmenden Quellendatenträger verwenden oder " -"Typargument weglassen)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"Ungültiger volume_type angegeben: %s (der angeforderte Typ ist nicht " -"kompatibel; Weglassen des Typarguments empfohlen)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"Ungültiger volume_type angegeben: %s (der angeforderte Typ muss von dieser " -"Konsistenzgruppe unterstützt werden)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Ungültiges wwpns-Format %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "Aufrufen des Web-Service fehlgeschlagen." - -msgid "Issue encountered waiting for job." -msgstr "Beim Warten auf die Aufgabe ist ein Problem aufgetreten. " - -msgid "Issue encountered waiting for synchronization." -msgstr "Beim Warten auf die Synchronisation ist ein Problem aufgetreten. " - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Ausgabe eines Failovers fehlgeschlagen, weil die Replikation nicht " -"ordnungsgemäß konfiguriert wurde. " - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"Job-ID in Antwort zu Datenträger erstellen [%s] von CloudByte nicht gefunden." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"Job-ID in Antwort zu Datenträger löschen [%s] von CloudByte nicht gefunden." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Schlüsselnamen dürfen nur alphanumerische Zeichen, Unterstriche, Punkte, " -"Doppelpunkte und Bindestriche enthalten." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"Für die Unterstützung von verschachtelten Kontingenten muss Keystone ab " -"Version 3 verwendet werden." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "LU für den Datenträger nicht vorhanden: %s" - -msgid "LUN export failed!" -msgstr "Exportieren der LUN fehlgeschlagen!" - -msgid "LUN map overflow on every channel." -msgstr "Überlauf der LUN-Zuordnung an jedem Kanal." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN mit angegebener Referenz %s nicht gefunden." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "" -"LUN-Nummer liegt außerhalb des gültigen Bereichs für Kanal-ID: %(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Letzte %s Cinder-Syslog-Einträge:-" - -msgid "LeftHand cluster not found" -msgstr "LeftHand-Cluster nicht gefunden" - -msgid "License is unavailable." -msgstr "Die Lizenz ist nicht verfügbar." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Zeile %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Linkpfad ist bereits vorhanden und ist kein symbolischer Link." - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Verlinkter Klon von Quellendatenträger im Status %s nicht unterstützt." - -msgid "Lock acquisition failed." -msgstr "Erstellen der Sperre fehlgeschlagen." - -msgid "Logout session error." -msgstr "Fehler beim Abmelden der Sitzung." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Suchdienst nicht konfiguriert. Konfigurationsoption für " -"fc_san_lookup_service muss eine konkrete Implementierung des Suchdiensts " -"angeben." - -msgid "Lun migration error." -msgstr "Fehler bei LUN-Migration." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"MD5 von Objekt: %(object_name)s Vorher: %(md5)s und nachher: %(etag)s nicht " -"identisch." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Fehlerhafte FCNS-Ausgabezeichenkette: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Fehlerhafter Nachrichtentext: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Fehlerhafte Zeichenkette für Namensserver: %s" - -msgid "Malformed request body" -msgstr "Fehlerhafter Anforderungshauptteil" - -msgid "Malformed request body." -msgstr "Fehlerhafter Anforderungshauptteil." - -msgid "Malformed request url" -msgstr "Fehlerhafte Anforderungs-URL" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Fehlerhafte Antwort auf Befehl %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Attribut 'scheduler_hints' fehlerhaft" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Fehlerhafte Zeichenkette für Anzeige von FCNS-Datenbank: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Fehlerhafte Zonenkonfiguration: (Switch=%(switch)s Zonenkonfiguration=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Fehlerhafter Zonenstatus: (switch=%(switch)s zone_config=%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Verwaltung eines vorhandenen Abrufs der Größe erfordert 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "Verwaltung einer vorhandenen Schattenkopie nicht implementiert." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Verwaltung des vorhandenen Datenträgers aufgrund von ungültiger Backend-" -"Referenz %(existing_ref)s fehlgeschlagen: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Verwaltung des vorhandenen Datenträgers aufgrund eines abweichenden " -"Datenträgertyps fehlgeschlagen: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Verwaltung eines vorhandenen Datenträgers nicht implementiert." - -msgid "Manage existing volume requires 'source-id'." -msgstr "Verwaltung eines vorhandenen Datenträgers erfordert 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Verwalten von Datenträger wird nicht unterstützt, wenn FAST aktiviert wurde. " -"FAST-Richtlinie: %(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"Verwalten von Schattenkopien für Failover-Datenträger ist nicht zulässig." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"Zuordnungsinfo ist 'None', da die Array-Version hypermetro nicht unterstützt." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"Vorbereitung von %(id)s-Zuordnung wurde nicht innerhalb des zugewiesenen " -"Zeitlimits von %(to)d Sekunden abgeschlossen. Wird beendet." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "Maskenansicht %(maskingViewName)s wurde nicht erfolgreich gelöscht." - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Maximale Anzahl an zulässigen Sicherungen (%(allowed)d) überschritten" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "" -"Maximale Anzahl an zulässigen Schatenkopien (%(allowed)d) überschritten" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Maximale Anzahl an zulässigen Datenträgern (%(allowed)d) für Kontingent " -"'%(name)s' überschritten." - -#, python-format -msgid "May specify only one of %s" -msgstr "Nur eine Angabe von %s ist zulässig." - -#, python-format -msgid "Message %(message_id)s could not be found." -msgstr "Nachricht %(message_id)s konnte nicht gefunden werden." - -msgid "Metadata backup already exists for this volume" -msgstr "Für diesen Datenträger ist bereits eine Metadatensicherung vorhanden." - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "Metadatensicherungsobjekt '%s' ist bereits vorhanden." - -msgid "Metadata property key blank." -msgstr "Metadateneigenschaftenschlüssel leer." - -msgid "Metadata restore failed due to incompatible version" -msgstr "" -"Fehler bei der Wiederherstellung der Metadaten aufgrund einer inkompatiblen " -"Version" - -msgid "Metadata restore failed due to incompatible version." -msgstr "" -"Metadatenwiederherstellung aufgrund einer inkompatiblen Version " -"fehlgeschlagen." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Python-Modul 'purestorage' fehlt. Stellen Sie sicher, dass die Bibliothek " -"installiert und verfügbar ist." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "Fehlende Fibre Channel-SAN-Konfigurationsparameter - fc_fabric_names" - -msgid "Missing request body" -msgstr "Fehlender Anforderungshauptteil" - -msgid "Missing request body." -msgstr "Anforderungshauptteil fehlt." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "Fehlendes erforderliches Element '%s' im Anforderungshauptteil" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "Fehlendes erforderliches Element '%s' im Anforderungshauptteil." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "" -"Fehlendes erforderliches Element 'consistencygroup' im Anforderungshauptteil." - -msgid "Missing required element quota_class_set in request body." -msgstr "" -"Fehlendes erforderliches Element 'quota_class_set' im Anforderungshauptteil." - -msgid "Missing required element snapshot in request body." -msgstr "Fehlendes erforderliches Element 'snapshot' im Anforderungshauptteil." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Mehrere SerialNumbers gefunden, wenn nur eine Angabe für diese Operation " -"erwartet wurde. Ändern Sie Ihre EMC-Konfigurationsdatei." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Mehrere Kopien von Datenträger %s gefunden." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Mehrere Übereinstimmungen für '%s' gefunden. Verwenden Sie eine ID zur " -"genaueren Bestimmung." - -msgid "Multiple profiles found." -msgstr "Mehrere Profile gefunden." - -msgid "Must implement a fallback schedule" -msgstr "Implementierung eines Ersatzzeitplans erforderlich" - -msgid "Must implement find_retype_host" -msgstr "find_retype_host muss implementiert werden." - -msgid "Must implement host_passes_filters" -msgstr "host_passes_filters muss implementiert werden." - -msgid "Must implement schedule_create_consistencygroup" -msgstr "schedule_create_consistencygroup muss implementiert werden." - -msgid "Must implement schedule_create_volume" -msgstr "schedule_create_volume muss implementiert werden." - -msgid "Must implement schedule_get_pools" -msgstr "schedule_get_pools muss implementiert werden." - -msgid "Must pass wwpn or host to lsfabric." -msgstr "WWPN oder Host muss an lsfabric übergeben werden." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Dieser Befehl muss als Cloudadministrator zusammen mit einer Keystone-" -"Richtliniendatei 'policy.json' ausgeführt werden, damit der " -"Cloudadministrator beliebige Projekte auflisten und abrufen kann. " - -msgid "Must specify 'connector'" -msgstr "'connector' muss angegeben werden." - -msgid "Must specify 'connector'." -msgstr "'connector' muss angegeben werden." - -msgid "Must specify 'host'." -msgstr "'host' muss angegeben werden." - -msgid "Must specify 'new_volume'" -msgstr "'new_volume' muss angegeben werden." - -msgid "Must specify 'status'" -msgstr "'status' muss angegeben werden." - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"'status', 'attach_status' oder 'migration_status' muss für die " -"Aktualisierung angegeben werden. " - -msgid "Must specify a valid attach status" -msgstr "Ein gültiger Anhangstatus muss angegeben werden." - -msgid "Must specify a valid migration status" -msgstr "Ein gültiger Migrationsstatus muss angegeben werden." - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Eine gültige Persona %(valid)s muss angegeben werden. Wert '%(persona)s' ist " -"ungültig." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Ein gültiger Bereitstellungstyp %(valid)s muss angegeben werden. Wert " -"'%(prov)s' ist ungültig." - -msgid "Must specify a valid status" -msgstr "Ein gültiger Status muss angegeben werden." - -msgid "Must specify an ExtensionManager class" -msgstr "Eine ExtensionManager-Klasse muss angegeben werden." - -msgid "Must specify bootable in request." -msgstr "'bootable' muss in der Anforderung angegeben werden." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Schutzdomänenname oder Schutzdomänen-ID muss angegeben werden." - -msgid "Must specify readonly in request." -msgstr "'readonly' muss in der Anforderung angegeben werden." - -msgid "Must specify snapshot source-name or source-id." -msgstr "" -"'source-name' oder 'source-id' der Schattenkopie müssen angegeben werden." - -msgid "Must specify source-name or source-id." -msgstr "'source-name' oder 'source-id' muss angegeben werden." - -msgid "Must specify storage pool name or id." -msgstr "Speicherpoolname oder Speicherpool-ID muss angegeben werden." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "Speicherpools müssen angegeben werden. Option: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Für age muss ein positiver Wert ungleich null angegeben werden." - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"NAS-Konfiguration '%(name)s=%(value)s' ungültig. Muss 'auto', 'true' oder " -"'false' sein." - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "NFS-Konfigurationsdatei ist in %(config)s nicht vorhanden." - -#, python-format -msgid "NFS file %s not discovered." -msgstr "NFS-Datei %s nicht erkannt." - -msgid "NFS file could not be discovered." -msgstr "NFS-Datei wurde nicht erkannt." - -msgid "NaElement name cannot be null." -msgstr "NaElement-Name darf nicht null sein." - -msgid "Name" -msgstr "Name" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Name, description, add_volumes und remove_volumes dürfen im " -"Anforderungshauptteil nicht alle leer bleiben." - -msgid "Need non-zero volume size" -msgstr "Datenträgergröße ungleich null erforderlich" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Weder MSG_DENIED noch MSG_ACCEPTED: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "NetApp Cinder-Treiberausnahme" - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"Neue Größe für Erweiterung muss größer als aktuelle Größe sein (aktuell: " -"%(size)s, erweitert: %(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"Die neue Größe muss größer sein als die tatsächliche Größe aus dem " -"Backendspeicher. Tatsächliche Größe: %(oldsize)s, neue Größe: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "Neue Datenträgergröße muss als Ganzzahl angegeben werden." - -msgid "New volume type must be specified." -msgstr "Neuer Datenträgertyp muss angegeben werden." - -msgid "New volume type not specified in request_spec." -msgstr "Der neue Datenträgertyp wurde nicht in request_spec angegeben." - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble-Cinder-Treiberausnahme" - -msgid "No FC initiator can be added to host." -msgstr "Es kann kein FC-Initiator zum Host hinzugefügt werden. " - -msgid "No FC port connected to fabric." -msgstr "Kein FC-Port mit Fabric verbunden." - -msgid "No FCP targets found" -msgstr "Keine FCP-Ziele gefunden" - -msgid "No Port Group elements found in config file." -msgstr "" -"Es wurden keine Portgruppenelemente in der Konfigurationsdatei gefunden." - -msgid "No VF ID is defined in the configuration file." -msgstr "In der Konfigurationsdatei ist keine VF-ID definiert." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Keine aktiven iSCSI-Portale mit angegebenen iSCSI-IPs" - -#, python-format -msgid "No available service named %s" -msgstr "Kein Dienst mit dem Namen %s verfügbar." - -#, python-format -msgid "No backup with id %s" -msgstr "Keine Datensicherung mit ID %s" - -msgid "No backups available to do an incremental backup." -msgstr "" -"Keine Sicherungen für die Erstellung einer inkrementellen Sicherung " -"verfügbar." - -msgid "No big enough free disk" -msgstr "Nicht genügend freier Plattenspeicherplatz" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Kein Cgsnapshot mit ID %s" - -msgid "No cinder entries in syslog!" -msgstr "Keine Cinder-Einträge im Syslog!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Keine geklonte LUN mit dem Namen %s auf dem Dateiserver gefunden." - -msgid "No config node found." -msgstr "Kein Konfigurationsknoten gefunden." - -#, python-format -msgid "No consistency group with id %s" -msgstr "Keine Konsistenzgruppe mit ID %s vorhanden." - -#, python-format -msgid "No element by given name %s." -msgstr "Kein Element mit dem angegebenen Namen %s vorhanden." - -msgid "No errors in logfiles!" -msgstr "Keine Fehler in den Protokolldateien!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "Keine Datei mit %s als Sicherungsdatei gefunden." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Keine freien LUN-IDs übrig. Die maximale Anzahl der Datenträger, die an den " -"Host (%s) angehängt werden können, wurde überschritten." - -msgid "No free disk" -msgstr "Kein freier Plattenspeicherplatz" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Kein gültiges iSCSI-Portal in bereitgestellter Liste für %s gefunden." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Keine gültigen iSCSI-Portals für %s gefunden." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Kein Host zum Erstellen der Konsistenzgruppe %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "Keine iSCSI-fähigen Ports auf Zielarray." - -msgid "No image_name was specified in request." -msgstr "Kein image_name in Anforderung angegeben." - -msgid "No initiator connected to fabric." -msgstr "Kein Initiator mit Fabric verbunden." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Keine Initiatorgruppe gefunden für Initiator %s" - -msgid "No initiators found, cannot proceed" -msgstr "Keine Initiatoren gefunden. Fortfahren nicht möglich." - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "Keine Schnittstelle im Cluster für IP %s gefunden." - -msgid "No ip address found." -msgstr "Keine IP-Adresse gefunden." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "In CloudByte wurden keine iSCSI-Authentifizierungsgruppen gefunden." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "Keine iSCSI-Initiatoren in CloudByte gefunden." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Kein iSCSI-Dienst für CloudByte-Datenträger [%s] gefunden." - -msgid "No iscsi services found in CloudByte storage." -msgstr "Keine iSCSI-Dienste in CloudByte-Speicher gefunden." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"Keine Schlüsseldatei angegeben und Laden von Schlüssel aus %(cert)s %(e)s " -"nicht möglich." - -msgid "No mounted Gluster shares found" -msgstr "Keine eingehängten gemeinsam genutzten Gluster-Laufwerke gefunden." - -msgid "No mounted NFS shares found" -msgstr "Keine eingehängten gemeinsam genutzten NFS-Laufwerke gefunden." - -msgid "No mounted SMBFS shares found." -msgstr "Keine eingehängten gemeinsam genutzten SMBFS-Laufwerke gefunden." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Keine eingehängten gemeinsam genutzten Virtuozzo-Laufwerke gefunden." - -msgid "No mounted shares found" -msgstr "Keine eingehängten gemeinsam genutzten Laufwerke gefunden." - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "Kein Knoten in E/A-Gruppe %(gid)s für Datenträger %(vol)s gefunden." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Für das Bereitstellen von Datenträgern sind keine Pools verfügbar. Stellen " -"Sie sicher, dass die Konfigurationsoption netapp_pool_name_search_pattern " -"ordnungsgemäß festgelegt ist." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Es wurde keine Antwort von CloudBytes API-Aufruf zum Auflisten der iSCSI-" -"Authentifizierungsbenutzer aus dem CloudByte-Speicher empfangen." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"Es wurde keine Antwort von CloudBytes API-Aufruf zum Auflisten von TSM " -"(listTsm) empfangen." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"Es wurde keine Antwort von CloudBytes API-Aufruf zum Auflisten vom " -"Dateisystem (listFileSystem) empfangen." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "" -"Kein Dienst-VIP konfiguriert und 'nexenta_client_address' nicht vorhanden." - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Es wurde keine Schattenkopie mit %s als Sicherungsdatei gefunden." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "Kein Schattenkopieabbild in Schattenkopiegruppe %s gefunden." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "Keine Schattenkopien auf dem Datenträger %s gefunden." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Keine Quellenschattenkopien zum Erstellen der Konsistenzgruppe %s angegeben." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "Keinen Speicherpfad für Exportpfad %s gefunden." - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Keine solche QoS-Spezifikation %(specs_id)s vorhanden." - -msgid "No suitable discovery ip found" -msgstr "Keine geeignete Erkennungs-IP-Adresse gefunden." - -#, python-format -msgid "No support to restore backup version %s" -msgstr "Keine Unterstützung für die Wiederherstellung der Sicherungsversion %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Keine Ziel-ID für Datenträger %(volume_id)s gefunden." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"Auf dem Host sind keine nicht verwendeten LUN-IDs verfügbar. Da das " -"mehrfache Anhängen aktiviert ist, müssen alle LUN-IDs in der gesamten " -"Hostgruppe eindeutig sein." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Es wurde kein gültiger Host gefunden. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Keine gültigen Hosts für Datenträger %(id)s mit Typ %(type)s" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "" -"Keine virtuelle Platte mit der durch die Referenz %s angegebenen UID " -"vorhanden." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "Keine Ansichten für LUN gefunden: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"Kein Datenträger im Cluster mit virtuellem Server %(vserver)s und Junction-" -"Pfad %(junction)s." - -msgid "No volume service(s) started successfully, terminating." -msgstr "" -"Keiner der Datenträgerdienste wurde erfolgreich gestartet. Wird beendet." - -msgid "No volume was found at CloudByte storage." -msgstr "Kein Datenträger im CloudByte-Speicher gefunden." - -msgid "No volume_type should be provided when creating test replica." -msgstr "" -"Beim Erstellen des Testreplikats darf keine Angabe für volume_type " -"vorgenommen werden." - -msgid "No volumes found in CloudByte storage." -msgstr "Keine Datenträger in CloudByte-Speicher gefunden." - -msgid "No weighed hosts available" -msgstr "Keine gewichteten Hosts verfügbar." - -#, python-format -msgid "Not a valid string: %s" -msgstr "Keine gültige Zeichenfolge: %s" - -msgid "Not a valid value for NaElement." -msgstr "Kein gültiger Wert für NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "" -"Es kann kein geeigneter Datenspeicher für den Datenträger %s gefunden werden." - -msgid "Not an rbd snapshot" -msgstr "Keine RBD-Schattenkopie" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Keine Autorisierung für das Abbild %(image_id)s." - -msgid "Not authorized." -msgstr "Nicht berechtigt." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Nicht genug Speicherplatz im Backend (%(backend)s) vorhanden." - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"Es ist nicht genügend Speicherplatz im freigegebenen ZFS-Verzeichnis zum " -"Ausführen dieser Operation vorhanden." - -msgid "Not stored in rbd" -msgstr "Nicht in RBD gespeichert." - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "Nova gab beim Erstellen der Schattenkopie den Status \"error\" zurück." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "" -"Zum Auflisten vom Dateisystem (listFileSystem) Antwort 'Null' von CloudByte " -"erhalten." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "" -"CloudBytes Antwort zum Auflisten der iSCSI-Authentifizierungsgruppen ist " -"'Null'." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "" -"Zum Auflisten von iSCSI-Initiatoren (listiSCSIInitiator) Antwort 'Null' von " -"CloudByte erhalten." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"Zum Auflisten von Datenträger-iSCSI-Services (listVolumeiSCSIService) " -"Antwort 'Null' von CloudByte erhalten." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Antwort 'Null' beim Erstellen von Datenträger [%s] bei CloudByte-Speicher " -"erhalten." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Antwort 'Null' beim Löschen von Datenträger [%s] bei CloudByte-Speicher " -"erhalten." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Antwort 'Null' beim Abfragen nach auf [%(operation)s] basierendem Job " -"[%(job)s] im CloudByte-Speicher erhalten." - -msgid "Object Count" -msgstr "Objektanzahl" - -msgid "Object Version" -msgstr "Objektversion" - -msgid "Object is not a NetApp LUN." -msgstr "Objekt ist keine NetApp-LUN." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Bei einer Erweiterungsoperation ist ein Fehler beim Hinzufügen des " -"Datenträgers zu einem Verbunddatenträger aufgetreten: %(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"Eine der erforderlichen Eingaben aus Host, Port oder Schema wurde nicht " -"gefunden." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Nur %(value)s %(verb)s-Anforderung(en) an %(uri)s alle %(unit_string)s " -"möglich" - -msgid "Only one limit can be set in a QoS spec." -msgstr "In einer QoS-Spezifikation kann nur ein Grenzwert festgelegt werden." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Nur Benutzer mit einem Token, als dessen Bereich direkt übergeordnete oder " -"Stammprojekte festgelegt wurden, dürfen die Kontingente der zugehörigen " -"untergeordneten Elemente anzeigen." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "" -"Aufheben der Verwaltung von Datenträgern ist nur für Datenträger möglich, " -"die von OpenStack verwaltet werden." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "" -"Operation fehlgeschlagen mit status=%(status)s. Vollständiger " -"Speicherauszug: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Nicht unterstützte Operation: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "Option gpfs_images_dir wurde nicht richtig festgelegt." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "Option gpfs_images_share_mode wurde nicht richtig festgelegt." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "Option gpfs_mount_point_base wurde nicht richtig festgelegt." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "" -"Die Ursprungs-%(res)s %(prop)s muss einen der folgenden Werte haben: " -"'%(vals)s'." - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"Partitionsname ist None. Legen Sie smartpartition:partitionname im Schlüssel " -"fest." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"Kennwort oder privater SSH-Schlüssel ist für Authentifizierung erforderlich. " -"Legen Sie entweder die Option 'san_password' oder die Option " -"'san_private_key' fest." - -msgid "Path to REST server's certificate must be specified." -msgstr "Pfad zum Zertifikat des REST-Servers muss angegeben werden." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Erstellen Sie Pool %(pool_list)s bereits im Voraus!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Erstellen Sie die %(tier_levels)s im Pool %(pool)s bereits im Voraus!" - -msgid "Please specify a name for QoS specs." -msgstr "Geben Sie einen Namen für die QoS-Spezifikationen an." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "Richtlinie lässt Ausführung von %(action)s nicht zu." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Pool %(poolNameInStr)s wurde nicht gefunden." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "Der Pool %s ist nicht in der Nexenta Store-Appliance vorhanden." - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Pool von Datenträger ['host'] %(host)s nicht gefunden." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "Pool von Datenträger ['host'] fehlgeschlagen mit: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "Pool ist im Hostfeld für Datenträger nicht verfügbar." - -msgid "Pool is not available in the volume host fields." -msgstr "Pool ist in den Hostfeldern für Datenträger nicht verfügbar." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "" -"Pool mit dem Namen %(pool)s wurde in der Domäne %(domain)s nicht gefunden." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"Pool mit dem Namen %(pool_name)s wurde in der Domäne %(domain_id)s nicht " -"gefunden." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Der Pool %(poolName)s ist keiner Speicherschicht für FAST-Richtlinie " -"%(fastPolicy)s zugeordnet." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName muss in der Datei %(fileName)s vorhanden sein." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Pool %s ist nicht vorhanden." - -msgid "Pools name is not set." -msgstr "Name des Pools wurde nicht festgelegt." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Status der primären Kopie: %(status)s und synchronisiert: %(sync)s." - -#, python-format -msgid "Programming error in Cinder: %(reason)s" -msgstr "Programmierfehler in Cinder: %(reason)s" - -msgid "Project ID" -msgstr "Projekt-ID" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"Projektkontingente sind nicht ordnungsgemäß für verschachtelte Kontingente " -"konfiguriert: %(reason)s." - -msgid "Protection Group not ready." -msgstr "Schutzgruppe ist nicht bereit." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Protokoll %(storage_protocol)s wird nicht unterstützt für Speicherfamilie " -"%(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "Im angegebenen Sicherungsdatensatz fehlt eine ID." - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Angegebener Schattenkopiestatus %(provided)s ist nicht zulässig für " -"Schattenkopie mit Status %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Providerinformation für CloudByte-Speicher wurde für den OpenStack-" -"Datenträger [%s] nicht gefunden." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Fehler bei Pure Storage-Cinder-Treiber: %(reason)s" - -msgid "Purge command failed, check cinder-manage logs for more details." -msgstr "" -"Der Löschbefehl ist fehlgeschlagen. Suchen Sie in den cinder-manage-" -"Protokolldateien nach weiteren Details." - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Die QoS-Spezifikation %(specs_id)s ist bereits vorhanden." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "" -"Die QoS-Spezifikation %(specs_id)s ist immer noch Entitäten zugeordnet." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "QoS-Konfiguration ist falsch. %s muss > 0 sein." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"QoS-Richtlinie muss für IOTYPE und eine weitere qos_specs angegeben werden. " -"QoS-Richtlinie: %(qos_policy)s." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"QoS policy muss für IOTYPE angegeben werden: 0, 1 oder 2. QoS-Richtlinie: " -"%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"QoS-Richtlinienkonflikt bei 'upper_limit' und 'lower_limit'. QoS-Richtlinie: " -"%(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"QoS-Spezifikation %(specs_id)s enthält keine Spezifikation mit dem Schlüssel " -"%(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"QoS-Spezifikationen werden für diese Speicherfamilie und ONTAP-Version nicht " -"unterstützt." - -msgid "Qos specs still in use." -msgstr "Qos-Spezifikationen sind noch im Gebrauch." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"Die Abfrage nach Serviceparameter ist veraltet. Verwenden Sie stattdessen " -"den binären Parameter." - -msgid "Query resource pool error." -msgstr "Fehler beim Abfragen des Ressourcenpools." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "" -"Der Grenzwert für das Kontingent %s muss mindestens den vorhandenen " -"Ressourcen entsprechen." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Die Kontingentklasse %(class_name)s wurde nicht gefunden." - -msgid "Quota could not be found" -msgstr "Kontingent wurde nicht gefunden." - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Kontingent für Ressourcen überschritten: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Kontingent überschritten: code=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Das Kontingent für Projekt %(project_id)s wurde nicht gefunden." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Kontingentgrenzwert ungültig für Projekt '%(proj)s' für die Ressource " -"'%(res)s': Grenzwert von %(limit)d ist kleiner als der Wert von %(used)d, " -"der im Gebrauch ist." - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Kontingentreservierung %(uuid)s wurde nicht gefunden." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "Die Kontingentnutzung für Projekt %(project_id)s wurde nicht gefunden." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "" -"RBD-Differenzierungsoperation fehlgeschlagen - (ret=%(ret)s stderr=" -"%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "IP des REST-Servers muss angegeben werden." - -msgid "REST server password must by specified." -msgstr "Kennwort des REST-Servers muss angegeben werden." - -msgid "REST server username must by specified." -msgstr "Benutzername des REST-Servers muss angegeben werden." - -msgid "RPC Version" -msgstr "RPC-Version" - -msgid "RPC server response is incomplete" -msgstr "RPC-Serverantwort ist unvollständig." - -msgid "Raid did not have MCS Channel." -msgstr "RAID hatte keinen MCS-Channel." - -#, python-format -msgid "Received error string: %s" -msgstr "Empfangene Fehlerzeichenkette: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "Referenz muss für eine nicht verwaltete Schattenkopie sein." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "" -"Referenz muss für einen nicht verwalteten virtuellen Datenträger sein. " - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "Referenz muss der Name einer nicht verwalteten Schattenkopie sein." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "" -"Referenz muss der Datenträgername eines nicht verwalteten virtuellen " -"Datenträgers sein." - -msgid "Reference must contain either source-name or source-id element." -msgstr "" -"Referenz muss entweder das Element 'source-name' oder das Element 'source-" -"id' enthalten." - -msgid "Reference must contain source-id or source-name element." -msgstr "" -"Die Referenz muss das Element 'source-id' oder 'source-name' enthalten." - -msgid "Reference must contain source-id or source-name key." -msgstr "Referenz muss Schlüssel 'source-id' oder 'source-name' enthalten." - -msgid "Reference must contain source-id or source-name." -msgstr "Referenz muss 'source-id' oder 'source-name' enthalten." - -msgid "Reference must contain source-id." -msgstr "Referenz muss 'source-id' enthalten." - -msgid "Reference must contain source-name element." -msgstr "Referenz muss Element 'source-name' enthalten. " - -msgid "Reference must contain source-name or source-id." -msgstr "Referenz muss 'source-name' oder 'source-id' enthalten." - -msgid "Reference must contain source-name." -msgstr "Referenz muss Element 'source-name' enthalten." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "" -"Die Referenz auf den zu verwaltenden Datenträger muss 'source-name' " -"enthalten." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "" -"Die Referenz auf den zu verwaltenden Datenträger %s muss 'source-name' " -"enthalten." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Die Migration von Datenträger-ID %(id)s wird verweigert. Prüfen Sie Ihre " -"Konfiguration, da Quelle und Ziel dieselbe Datenträgergruppe haben: %(name)s." - -msgid "Remote pool cannot be found." -msgstr "Der ferne Pool wurde nicht gefunden." - -msgid "Remove CHAP error." -msgstr "Fehler beim Entfernen von CHAP." - -msgid "Remove fc from host error." -msgstr "Fehler beim Entfernen von FC vom Host." - -msgid "Remove host from array error." -msgstr "Fehler beim Entfernen des Hosts aus dem Array." - -msgid "Remove host from hostgroup error." -msgstr "Fehler beim Entfernen des Hosts aus der Hostgruppe." - -msgid "Remove iscsi from host error." -msgstr "Fehler beim Entfernen von iSCSI vom Host." - -msgid "Remove lun from QoS error." -msgstr "Fehler beim Entfernen der LUN aus QoS." - -msgid "Remove lun from cache error." -msgstr "Fehler beim Entfernen der LUN aus dem Zwischenspeicher." - -msgid "Remove lun from partition error." -msgstr "Fehler beim Entfernen der LUN aus der Partition." - -msgid "Remove port from port group error." -msgstr "Fehler beim Entfernen des Ports aus der Portgruppe." - -msgid "Remove volume export failed." -msgstr "Entfernen des Exports für Datenträger fehlgeschlagen." - -msgid "Rename lun on array error." -msgstr "Fehler beim Umbenennen der LUN im Array." - -msgid "Rename snapshot on array error." -msgstr "Fehler beim Umbenennen der Schattenkopie im Array." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "Die Replikation %(name)s an %(ssn)s ist fehlgeschlagen." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "" -"Die Replikationsdienstfunktion wurde in %(storageSystemName)s nicht gefunden." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Replikationsdienst wurde in %(storageSystemName)s nicht gefunden." - -msgid "Replication not allowed yet." -msgstr "Replikation ist noch nicht zulässig." - -msgid "Request body and URI mismatch" -msgstr "Abweichung zwischen Anforderungshauptteil und URI" - -msgid "Request body contains too many items" -msgstr "Anforderungshauptteil enthält zu viele Elemente" - -msgid "Request body contains too many items." -msgstr "Anforderungshauptteil enthält zu viele Elemente." - -msgid "Request body empty" -msgstr "Anforderungshauptteil leer" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Anforderung an Datera-Cluster gibt unzulässigen Status zurück: %(status)s | " -"%(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Die angeforderte Sicherung überschreitet das zulässige Kontingent (in " -"Gigabytes) für Sicherungen. Es wurden %(requested)s G bei einem Kontingent " -"von %(quota)s G angefordert und %(consumed)s G verbraucht." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Angeforderter Datenträger oder angeforderte Schattenkopie überschreitet das " -"zulässige %(name)s-Kontingent. %(requested)s G angefordert bei einem " -"Kontingent von %(quota)s G. %(consumed)s G wurden verbraucht." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"Die Größe des angeforderten Datenträgers %(size)d liegt über dem maximal " -"zulässigen Wert von %(limit)d." - -msgid "Required configuration not found" -msgstr "Erforderliche Konfiguration nicht gefunden." - -#, python-format -msgid "Required flag %s is not set" -msgstr "Erforderliche Markierung %s ist nicht gesetzt." - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Zurücksetzen von Sicherungsstatus abgebrochen. Der derzeit konfigurierte " -"Sicherungsdienst [%(configured_service)s] ist nicht der Sicherungsdienst, " -"der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Ändern der Größe des Klons %s fehlgeschlagen." - -msgid "Resizing image file failed." -msgstr "Fehler bei der Größenänderung der Abbilddatei." - -msgid "Resource could not be found." -msgstr "Ressource wurde nicht gefunden." - -msgid "Resource not ready." -msgstr "Ressource nicht bereit." - -#, python-format -msgid "Response error - %s." -msgstr "Antwortfehler - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Antwortfehler - Das Speichersystem ist offline." - -#, python-format -msgid "Response error code - %s." -msgstr "Antwortfehlercode - %s." - -msgid "RestURL is not configured." -msgstr "RestURL ist nicht konfiguriert." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Wiederherstellen von Sicherung abgebrochen. Datenträgerstatus " -"%(expected_status)s erwartet, tatsächlicher Status ist %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Wiederherstellen von Sicherung abgebrochen. Der derzeit konfigurierte " -"Sicherungsdienst [%(configured_service)s] ist nicht der Sicherungsdienst, " -"der zum Erstellen dieser Sicherung [%(backup_service)s] verwendet wurde." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Wiederherstellen von Sicherung abgebrochen. Sicherungsstatus " -"%(expected_status)s erwartet, tatsächlicher Status ist %(actual_status)s." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Die Anzahl der abgerufenen SolidFire-Datenträger unterscheidet sich von der " -"für die bereitgestellten Cinder-Schattenkopien. Abgerufen: %(ret)s Sollwert: " -"%(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Die Anzahl der abgerufenen SolidFire-Datenträger unterscheidet sich von der " -"für die bereitgestellten Cinder-Datenträger. Abgerufen: %(ret)s Sollwert: " -"%(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Wiederholungsanzahl für Befehl überschritten: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Es ist eine Retryable SolidFire-Ausnahme eingetreten." - -msgid "Retype requires migration but is not allowed." -msgstr "'Retype' erfordert eine Migration; diese ist jedoch nicht zulässig." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Für %(volumeName)s wird ein Rollback durch Löschen durchgeführt." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"Es ist nicht zulässig, Cinder mit einer VMware vCenter-Version auszuführen, " -"die niedriger ist als %s." - -msgid "SAN product is not configured." -msgstr "SAN-Produkt ist nicht konfiguriert." - -msgid "SAN protocol is not configured." -msgstr "SAN-Protokoll ist nicht konfiguriert." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"SMBFS-Konfiguration 'smbfs_oversub_ratio' ist ungültig. Muss > 0 sein: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"SMBFS-Konfiguration 'smbfs_used_ratio' ist ungültig. Muss > 0 und <= 1.0 " -"sein: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "SMBFS-Konfigurationsdatei ist nicht in %(config)s vorhanden." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "SMBFS-Konfigurationsdatei nicht definiert (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"SSH-Befehl fehlgeschlagen nach '%(total_attempts)r' Versuchen : '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "SSH-Befehlsinjektion erkannt: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "SSH-Verbindung für %(fabric)s fehlgeschlagen mit Fehler: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "SSL-Zertifikat abgelaufen am %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL-Fehler: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Scheduler-Hostfilter %(filter_name)s wurde nicht gefunden." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Scheduler-Host-Weigher %(weigher_name)s wurde nicht gefunden." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Status der sekundären Kopie: %(status)s und synchronisiert: %(sync)s, " -"Fortschritt der Synchronisierung ist: %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"Sekundäre ID darf nicht mit primärem Array übereinstimmen: 'backend_id' = " -"%(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber muss in der Datei %(fileName)s vorhanden sein." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Dienst %(service)s auf Host %(host)s entfernt." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "Der Dienst %(service_id)s wurde auf dem Host %(host)s nicht gefunden." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Der Dienst %(service_id)s wurde nicht gefunden." - -msgid "Service is too old to fulfil this request." -msgstr "Der Dienst ist zu alt, um diese Anforderung zu erfüllen." - -msgid "Service is unavailable at this time." -msgstr "Der Dienst ist derzeit nicht verfügbar." - -msgid "Set pair secondary access error." -msgstr "Fehler beim Festlegen des sekundären Paarzugriffs." - -msgid "Sets thin provisioning." -msgstr "Legt Thin Provisioning fest." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"Die Festlegung der LUN-QoS-Richtliniengruppe wird für diese Speicherfamilie " -"und diese ONTAP-Version nicht unterstützt." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Die Festlegung der Richtliniengruppe 'file qos' wird für diese " -"Speicherfamilie und diese ONTAP-Version nicht unterstützt." - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export. " -"Please check the nas_host and nas_share_path settings." -msgstr "" -"Freigegebenes Verzeichnis %s wegen ungültigem Format ignoriert. Muss das " -"Format Adresse:/Export haben. Prüfen Sie die Einstellungen 'nas_host' und " -"'nas_share_path'." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"Freigegebenes Verzeichnis unter %(dir)s kann vom Cinder-Datenträgerdienst " -"nicht beschrieben werden. Schattenkopieoperationen werden nicht unterstützt." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Sheepdog-E/A-Fehler. Befehl war: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Operationen zum Anzeigen können nur an Projekten in derselben Hierarchie des " -"Projekts, das als Bereich für Benutzer festgelegt wurde, ausgeführt werden." - -msgid "Size" -msgstr "Größe" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "" -"Größe für Datenträger: %s nicht gefunden, sicherer Löschvorgang nicht " -"möglich." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Größe beträgt %(image_size)d GB und passt nicht in einen Datenträger mit der " -"Größe %(volume_size)d GB." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"Größe des angegebenen Abbilds mit %(image_size)s GB übersteigt die Größe des " -"Datenträgers mit %(volume_size)s GB." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"Es wurde angefordert, die Schattenkopie %(id)s zu löschen, während darauf " -"gewartet wurde, dass sie verfügbar wird. Möglicherweise wurde eine " -"gleichzeitige Anforderung gestellt." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"Die Schattenkopie %(id)s wurde während des kaskadierenden Löschvorgangs im " -"Status %(state)s und nicht im Status 'Wird gelöscht' gefunden." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "Schattenkopie %(snapshot_id)s wurde nicht gefunden." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"Schattenkopie %(snapshot_id)s enthält keine Metadaten mit dem Schlüssel " -"%(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "Schattenkopie '%s' ist im Array nicht vorhanden." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"Schattenkopie kann nicht erstellt werden, da Datenträger %(vol_id)s nicht " -"verfügbar ist. Aktueller Datenträgerstatus: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "" -"Die Schattenkopie kann nicht erstellt werden, während der Datenträger " -"migriert wird." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "Die Schattenkopie der sekundären Replik ist nicht zulässig. " - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Schattenkopie von Datenträger im Status %s nicht unterstützt." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "Schattenkopieressource \"%s\", die an keiner Stelle implementiert ist?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"Schattenkopiestatus %(cur)s ist nicht zulässig für update_snapshot_status." - -msgid "Snapshot status must be \"available\" to clone." -msgstr "Die Schattenkopie muss zum Klonen den Status \"available\" haben." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"Die Schattenkopie, die gesichert werden soll, muss den Status 'available' " -"oder 'in-use' haben, aber der aktuelle Status ist \"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "Schattenkopie mit der ID %s wurde nicht gefunden." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Die Schattenkopie '%(snap)s' ist nicht im Basisabbild '%(base)s' vorhanden. " -"Die inkrementelle Sicherung wird abgebrochen." - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "" -"Schattenkopien werden für dieses Datenträgerformat nicht unterstützt: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Socketfehler: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "SolidFire Cinder-Treiberausnahme" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "" -"Array-Größe der Sortierrichtung überschreitet Array-Größe des " -"Sortierschlüssels." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "" -"Quellenkonsistenzgruppe ist leer. Es wird keine Konsistenzgruppe erstellt." - -msgid "Source host details not found." -msgstr "Quellenhostdetails nicht gefunden." - -msgid "Source volume device ID is required." -msgstr "Geräte-ID für Quellendatenträger ist erforderlich." - -msgid "Source volume not mid-migration." -msgstr "Quellendatenträger befindet sich nicht in einer Migration." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo hat zurückgegeben, dass byarray ungültig ist." - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Der angegebene Host, der dem Datenträger %(vol)s zugeordnet werden soll, " -"befindet sich in einer nicht unterstützten Hostgruppe mit %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "Der angegebene logische Datenträger ist nicht vorhanden." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "Angegebene Schattenkopiegruppe mit der ID %s wurde nicht gefunden. " - -msgid "Specify a password or private_key" -msgstr "Geben Sie einen Wert für 'password' oder 'private_key' an." - -msgid "Specify san_password or san_private_key" -msgstr "Geben Sie 'san_password' oder 'san_private_key' an." - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Geben Sie den Namen des Datenträgertyps, eine Beschreibung, 'is_public' oder " -"eine Kombination aus allem an." - -msgid "Split pair error." -msgstr "Fehler bei Paaraufteilung." - -msgid "Split replication failed." -msgstr "Aufteilen der Replikation fehlgeschlagen." - -msgid "Start LUNcopy error." -msgstr "Fehler beim Starten der LUN-Kopie." - -msgid "State" -msgstr "Zustand" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "Status des Knotens ist falsch. Aktueller Status: %s." - -msgid "Status" -msgstr "Status" - -msgid "Stop snapshot error." -msgstr "Fehler beim Stoppen der Schattenkopie." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Speicherkonfigurationsdienst wurde in %(storageSystemName)s nicht gefunden." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"Der Dienst für die Verwaltung von Speicherhardware-IDs wurde in " -"%(storageSystemName)s nicht gefunden. " - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Speicherprofil %s nicht gefunden." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"Speicherversetzungsdienst wurde in %(storageSystemName)s nicht gefunden." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "Speicherfamilie %s wird nicht unterstützt." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "" -"Die Speichergruppe %(storageGroupName)s wurde nicht erfolgreich gelöscht." - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Speicherhost %(svr)s nicht gefunden. Überprüfen Sie den Namen." - -msgid "Storage pool is not configured." -msgstr "Es ist kein Speicherpool konfiguriert." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Speicherprofil %(storage_profile)s nicht gefunden." - -msgid "Storage resource could not be found." -msgstr "Speicherressource wurde nicht gefunden." - -msgid "Storage system id not set." -msgstr "Speichersystem-ID nicht festgelegt." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "Speichersystem für Pool %(poolNameInStr)s nicht gefunden." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "StorageSystem %(array)s wurde nicht gefunden. " - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"Die Summe der Nutzung untergeordneter Projekte '%(sum)s' ist größer als das " -"freie Kontingent von '%(free)s' für das Projekt '%(proj)s' für die Ressource " -"'%(res)s'. Reduzieren Sie den Grenzwert oder die Nutzung für mindestens " -"eines der folgenden Projekte: '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Fehler bei Paarwechsel." - -msgid "Sync pair error." -msgstr "Fehler bei Paarsynchronisierung." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "" -"System %(id)s mit unzulässigem Kennwortstatus gefunden: %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "System %(id)s gefunden mit unzulässigem Status: %(status)s." - -msgid "System does not support compression." -msgstr "System unterstützt Komprimierung nicht." - -msgid "System is busy, retry operation." -msgstr "Das System ist ausgelastet. Wiederholen Sie die Operation." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s] wurde in CloudByte-Speicher für Konto [%(account)s] nicht " -"gefunden." - -msgid "Target volume type is still in use." -msgstr "Zieldatenträgertyp ist noch im Gebrauch." - -msgid "Terminate connection failed" -msgstr "Beenden der Verbindung fehlgeschlagen" - -msgid "Terminate connection unable to connect to backend." -msgstr "" -"Beim Beenden der Verbindung konnte keine Verbindung zum Backend hergestellt " -"werden." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Beenden der Datenträgerverbindung fehlgeschlagen: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "Die zu replizierende Quelle %(type)s %(id)s wurde nicht gefunden." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Die Parameter 'sort_key' und 'sort_dir' sind veraltet und können nicht dem " -"Parameter 'sort' verwendet werden." - -msgid "The EQL array has closed the connection." -msgstr "Der EQL-Array hat die Verbindung geschlossen." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"Das GPFS-Dateisystem %(fs)s ist nicht auf dem erforderlichen Releasestand. " -"Aktueller Stand ist %(cur)s, er muss aber mindestens %(min)s sein." - -msgid "The IP Address was not found." -msgstr "Die IP-Adresse wurde nicht gefunden." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"Die WebDAV-Anforderung ist fehlgeschlagen. Ursache: %(msg)s, Rückgabecode/" -"Ursache: %(code)s, Quellendatenträger: %(src)s, Zieldatenträger: %(dst)s, " -"Methode: %(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"Der oben genannte Fehler gibt möglicherweise an, dass die Datenbank nicht " -"erstellt wurde.\n" -"Erstellen Sie eine Datenbank mithilfe von 'cinder-manage db sync', bevor Sie " -"diesen Befehl ausführen." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"Das Array unterstützt nicht die Speicherpooleinstellung für SLO %(slo)s und " -"Workload %(workload)s. Überprüfen Sie, ob das Array gültige SLOs und " -"Workloads enthält." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "" -"Die Repliaktion ist in dem Backend, in dem der Datenträger erstellt wurde, " -"nicht aktiviert." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Der Befehl %(cmd)s ist fehlgeschlagen. (Rückgabe: %(ret)s, Standardausgabe: " -"%(out)s, Standardfehler: %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "Die Kopie muss primär oder sekundär sein." - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"Die Erstellung einer logischen Einheit konnte nicht abgeschlossen werden. " -"(Logische Einheit: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"Die dekorierte Methode muss entweder einen Datenträger oder ein " -"Schattenkopieobjekt akzeptieren." - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "Das Gerät im Pfad %(path)s ist nicht erreichbar: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "Die Endzeit (%(end)s) muss nach der Startzeit (%(start)s) liegen." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "Die zusätzliche Spezifikation %(extraspec)s ist nicht gültig." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "Der Failover-Datenträger konnte nicht gelöscht werden: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "Die folgenden Elemente sind erforderlich: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "Die Hostgruppe oder das iSCSI-Ziel konnte nicht hinzugefügt werden." - -msgid "The host group or iSCSI target was not found." -msgstr "Die Hostgruppe oder das iSCSI-Ziel wurde nicht gefunden." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " -"die Datenträger und nehmen Sie die Replikation an den 3PAR-Backends wieder " -"auf. " - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " -"die Datenträger und nehmen Sie die Replikation an den LeftHand-Backends " -"wieder auf. " - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"Der Host ist nicht für die Zurücksetzung bereit. Synchronisieren Sie erneut " -"die Datenträger und nehmen Sie die Replikation an den Storwize-Backends " -"wieder auf. " - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "Der iSCSI-CHAP-Benutzer %(user)s ist nicht vorhanden." - -#, python-format -msgid "The job has not completed and is in a %(state)s state." -msgstr "Die Aufgabe wurde nicht ausgeführt und hat den Status %(state)s." - -msgid "The key cannot be None." -msgstr "Der Schlüssel kann nicht None sein." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "" -"Die logische Einheit für angegebenen %(type)s %(id)s wurde bereits gelöscht." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"Das zulässige Zeitlimit für Methode %(method)s wurde überschritten. " -"(Zeitlimitwert: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "Die Methode update_migrated_volume ist nicht implementiert." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"Der im Pfad %(mount_path)s eingehängte Datenträger ist kein gültiger Quobyte-" -"USP-Datenträger. Fehler: %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "Der Parameter des Speicherbackends. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "" -"Die übergeordnete Sicherung muss für eine inkrementelle Sicherung verfügbar " -"sein." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "" -"Die angegebene Schattenkopie '%s' ist keine Schattenkopie des angegebenen " -"Datenträgers." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"Der Verweis auf den Datenträger im Backend muss das Format file_system/" -"volume_name haben (volume_name darf keinen Schrägstrich '/' enthalten)." - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "Der Zähler für ferne Aufbewahrung muss kleiner-gleich %s sein. " - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"Der Replikationsmodus wurde in den extra_specs für den Datenträgertyp nicht " -"ordnungsgemäß konfiguriert. Wenn replication:mode periodisch ist, muss " -"replication:sync_period ebenfalls angegeben werden, und zwar mit einem Wert " -"zwischen 300 und 31622400 Sekunden." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "" -"Der Synchronisierungszeitraum für die Replikation muss mindestens %s " -"Sekunden sein." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"Die angeforderte Größe %(requestedSize)s entspricht nicht der sich " -"ergebenden Größe %(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Die Ressource %(resource)s wurde nicht gefunden." - -msgid "The results are invalid." -msgstr "Die Ergebnisse sind ungültig." - -#, python-format -msgid "The retention count must be %s or less." -msgstr "Der Zähler für Aufbewahrung muss kleiner-gleich %s sein." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"Die Schattenkopie kann nicht erstellt werden, wenn der Datenträger im " -"Wartungsmodus ist." - -#, python-format -msgid "The snapshot is unavailable: %(data)s" -msgstr "Die Schattenkopie ist nicht verfügbar: %(data)s" - -msgid "The source volume for this WebDAV operation not found." -msgstr "" -"Der Quellendatenträger für diese WebDAV-Operation wurde nicht gefunden." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"Der Quellendatenträgertyp '%(src)s' unterscheidet sich vom " -"Zieldatenträgertyp '%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Der Quellendatenträgertyp '%s' ist nicht verfügbar." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "Die angegebene %(desc)s ist ausgelastet." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "Die angegebene LUN gehört nicht zum genannten Pool: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " -"logische Einheit darf nicht zuordnend sein." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " -"logische Einheit darf nicht paarweise verbunden sein." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Die " -"Größe der logischen Einheit muss ein Vielfaches von Gigabyte sein." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"Die angegebene logische Einheit %(ldev)s konnte nicht verwaltet werden. Der " -"Datenträgertyp muss 'DP-VOL' sein." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"Die angegebene Operation wird nicht unterstützt. Die Datenträgergröße muss " -"der Quelle %(type)s entsprechen. (Datenträger: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Die angegebene virtuelle Platte ist einem Host zugeordnet." - -msgid "The specified volume is mapped to a host." -msgstr "Der angegebene Datenträger ist einem Host zugeordnet." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"Das Speicherarray-Kennwort für %s ist falsch. Aktualisieren Sie das " -"konfigurierte Kennwort. " - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"Das Speicherbackend kann verwendet werden. (config_group: %(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"Die Speichereinheit unterstützt %(prot)s nicht. Konfigurieren Sie das Gerät " -"für die Unterstützung von %(prot)s oder wechseln Sie zu einem Treiber, der " -"ein anderes Protokoll verwendet. " - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"Die Striping-Metazahl %(memberCount)s ist zu klein für Datenträger " -"%(volumeName)s mit der Größe %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"Der Metadatentyp %(metadata_type)s für Datenträger/Schattenkopie %(id)s ist " -"ungültig." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"Der Datenträger %(volume_id)s konnte nicht erweitert werden. Der " -"Datenträgertyp muss 'Normal' lauten." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"Die Verwaltung für Datenträger %(volume_id)s konnte nicht aufgehoben werden. " -"Der Datenträgertyp muss %(volume_type)s sein." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "" -"Der Datenträger %(volume_id)s wird erfolgreich verwaltet. (Logische Einheit: " -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"Die Verwaltung von Datenträger %(volume_id)s wurde erfolgreich aufgehoben. " -"(Logische Einheit: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Der zuzuordnende Datenträger %(volume_id)s wurde nicht gefunden." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "Der Datenträger kann im Wartungsmodus keine Übertragung akzeptieren." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Der Datenträger kann im Wartungsmodus nicht angehängt werden." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Der Datenträger kann im Wartungsmodus nicht abgehängt werden." - -msgid "The volume cannot be updated during maintenance." -msgstr "Der Datenträger kann während der Wartung nicht aktualisiert werden." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "" -"Die Datenträgerverbindung kann im Wartungsmodus nicht initialisiert werden." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "" -"Der Datenträgertreiber erfordert den Namen des iSCSI-Initiators im Connector." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Der Datenträger ist in 3PAR aktiv und kann derzeit nicht gelöscht werden. " -"Sie können es später erneut versuchen. " - -msgid "The volume label is required as input." -msgstr "Die Datenträgerbezeichnung ist als Eingabe erforderlich." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "" -"Es sind keine Ressourcen zur Verwendung verfügbar. (Ressource: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Es sind keine gültigen ESX-Hosts vorhanden." - -msgid "There are no valid datastores." -msgstr "Es gibt keine gültigen Datenspeicher." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Es gibt keine Bezeichnung der %(param)s. Der angegebene Speicher ist zum " -"Verwalten des Datenträgers erforderlich." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Es gibt keine Bezeichnung der logischen Einheit. Die angegebene logische " -"Einheit ist zum Verwalten des Datenträgers erforderlich." - -msgid "There is no metadata in DB object." -msgstr "Im DB-Objekt sind keine Metadaten vorhanden." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "" -"Es ist kein gemeinsam genutztes Laufwerk vorhanden, das %(volume_size)s G " -"hosten kann." - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "" -"Es ist kein gemeinsam genutztes Laufwerk vorhanden, das %(volume_size)s G " -"hosten kann." - -#, python-format -msgid "There is no such action: %s" -msgstr "Keine solche Aktion vorhanden: %s" - -msgid "There is no virtual disk device." -msgstr "Es ist keine virtuelle Platteneinheit vorhanden." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "" -"Fehler beim Hinzufügen des Datenträgers zur Gruppe der fernen Kopie: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Beim Erstellen von cgsnapshot ist ein Fehler aufgetreten: %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "Fehler beim Erstellen der Gruppe der fernen Kopie: %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Fehler beim Konfigurieren des Synchronisierungsintervalls für die Gruppe der " -"fernen Kopie: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Beim Konfigurieren einer Gruppe der fernen Kopie in den 3PAR-Arrays ist ein " -"Fehler aufgetreten: ('%s'). Der Datenträger wird nicht als Replikationstyp " -"erkannt." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Beim Konfigurieren eines fernen Zeitplans für die LefHand-Arrays ist ein " -"Fehler aufgetreten: ('%s'). Der Datenträger wird nicht als Replikationstyp " -"erkannt." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Fehler beim Starten der fernen Kopie: %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Keine Gluster-Konfigurationsdatei konfiguriert (%s)." - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Keine NFS-Konfigurationsdatei konfiguriert (%s)." - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Es ist kein konfigurierter Quobyte-Datenträger vorhanden (%s). Beispiel: " -"quobyte:///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "Thin Provisioning wird mit dieser Version von LVM nicht unterstützt." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "" -"Dieser Treiber unterstützt nicht das Löschen von Schattenkopien mit dem " -"Status 'in-use'." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Dieser Treiber unterstützt nicht das Erstellen von Schattenkopien für " -"Datenträger mit dem Status 'in-use'." - -msgid "This request was rate-limited." -msgstr "Übertragungsratenbegrenzung für diese Anforderung." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Diese Systemplattform (%s) wird nicht unterstützt. Dieser Treiber " -"unterstützt nur Win32-Plattformen." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "Tier-Richtliniendienst wurde in %(storageSystemName)s nicht gefunden. " - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Zeitlimit beim Warten auf Nova-Aktualisierung zum Erstellen von " -"Schattenkopie %s überschritten." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Zeitlimit beim Warten auf Nova-Aktualisierung zum Löschen von Schattenkopie " -"%(id)s überschritten." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Zeitlimitüberschreitung beim Aufruf von %s " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Zeitlimitüberschreitung beim Anfordern der %(service)s-API." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "" -"Zeitlimitüberschreitung beim Anfordern von Funktionen aus dem Backend " -"%(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Die Übertragung %(transfer_id)s wurde nicht gefunden." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Übertragung %(transfer_id)s: Die Datenträger-ID %(volume_id)s hat nicht den " -"erwartetem Status %(status)s. Erwartet wurde 'awaiting-transfer'." - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Es wird versucht, Sicherungsmetadaten aus ID %(meta_id)s in Sicherung %(id)s " -"zu importieren." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Die Aufgabe zum Optimieren des Datenträgers wurde gestoppt, bevor sie fertig " -"war: volume_name=%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"Der Typ %(type_id)s ist bereits anderen QoS-Spezifikationen zugeordnet: " -"%(qos_specs_id)s." - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"Änderung des Typzugriffs ist auf öffentliche Datenträger nicht anwendbar." - -msgid "Type cannot be converted into NaElement." -msgstr "Typ kann nicht in NaElement konvertiert werden." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "" -"UUIDs %s befinden sich sowohl in der Liste zum Hinzufügen als auch in der " -"zum Entfernen von Datenträgern." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Kein Zugriff auf das Storwize-Backend für den Datenträger %s möglich." - -msgid "Unable to access the backend storage via file handle." -msgstr "Zugriff auf Backendspeicher über Dateikennung nicht möglich." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "Zugriff auf Backendspeicher über den Pfad %(path)s nicht möglich." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"Cinder-Host konnte apphosts für Bereich %(space)s nicht hinzugefügt werden." - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "Das Failover von %s konnte nicht abgeschlossen werden." - -msgid "Unable to connect or find connection to host" -msgstr "" -"Verbindung zum Host kann nicht hergestellt werden oder es wurde keine " -"Verbindung zum Host gefunden." - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Konsistenzgruppe %s konnte nicht erstellt werden." - -msgid "Unable to create lock. Coordination backend not started." -msgstr "" -"Es konnte keine Sperre erstellt werden. Das Koordinierungsbackend wurde " -"nicht gestartet." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Standardspeichergruppe für FAST-Richtlinie kann nicht erstellt oder " -"abgerufen werden: %(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Replikatklon für Datenträger %s konnte nicht erstellt werden." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "Beziehung für %s kann nicht erstellt werden." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "Der Datenträger %(name)s konnte nicht aus %(snap)s erstellt werden." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "Der Datenträger %(name)s konnte nicht aus %(vol)s erstellt werden." - -#, python-format -msgid "Unable to create volume %s" -msgstr "Der Datenträger %s kann nicht erstellt werden." - -msgid "Unable to create volume. Backend down." -msgstr "Datenträger konnte nicht erstellt werden. Backend inaktiv." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "Konsistenzgruppenschattenkopie %s konnte nicht gelöscht werden." - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "" -"Die Schattenkopie %(id)s konnte nicht gelöscht werden. Status: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "" -"Schattenkopierichtlinie auf Datenträger %s konnte nicht gelöscht werden." - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"Der Zieldatenträger für den Datenträger %(vol)s kann nicht gelöscht werden. " -"Ausnahme: %(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Der Datenträger kann nicht abgehängt werden. Der Status des Datenträgers " -"muss 'in-use' und der Anhangstatus 'attach_status' muss 'attached' sein, " -"damit das Abhängen erfolgen kann." - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"Anhand des angegebenen Sekundärziels kann 'secondary_array' nicht ermittelt " -"werden: %(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Der Schattenkopiename in Purity für die Schattenkopie %(id)s kann nicht " -"bestimmt werden." - -msgid "Unable to determine system id." -msgstr "System-ID kann nicht bestimmt werden." - -msgid "Unable to determine system name." -msgstr "Systemname kann nicht bestimmt werden." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Es können keine Operationen zum Verwalten von Schattenkopien mit Purity-REST-" -"API-Version %(api_version)s ausgeführt werden, erfordert " -"%(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Replikation mit Purity-REST-API-Version %(api_version)s nicht möglich. " -"Erfordert eine der folgenden Versionen: %(required_versions)s." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Es kann keine Beziehung zum Storwizse-Cluster %s hergestellt werden." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Datenträger %s kann nicht erweitert werden." - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"Failover für den Datenträger %(id)s auf das sekundäre Backend nicht möglich, " -"da die Replikationsbeziehung nicht gewechselt werden kann: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"Zurücksetzen auf Standardeinstellung nicht möglich. Dies kann erst nach " -"Abschluss eines Failovers erfolgen." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "Failover für Replikationsziel fehlgeschlagen: %(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "Abrufen von Verbindungsinformationen aus dem Backend nicht möglich." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "" -"Abrufen von Verbindungsinformationen aus dem Backend nicht möglich: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "Purity-ref mit Name=%s wurde nicht gefunden." - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Datenträgergruppe wurde nicht gefunden: %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"Es wurde kein Failoverziel gefunden. Keine sekundären Ziele konfiguriert." - -msgid "Unable to find iSCSI mappings." -msgstr "iSCSI-Zuordnungen wurden nicht gefunden." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "ssh_hosts_key_file wurde nicht gefunden: %s" - -msgid "Unable to find system log file!" -msgstr "Systemprotokolldatei kann nicht gefunden werden!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"Es wurde keine funktionsfähige pg-Schattenkopie für ein Failover des " -"ausgewählten sekundären Arrays gefunden: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"Es wurden keine funktionsfähigen, sekundären Arrays in den konfigurierten " -"Zielen gefunden: %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "Datenträger %s wurde nicht gefunden." - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Abrufen eines Blockgeräts für Datei '%s' nicht möglich." - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Fehler beim Abrufen der erforderlichen Konfigurationsinformationen zum " -"Erstellen eines Datenträgers: %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Fehler beim Abrufen des entsprechenden Datensatzes für Pool" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Informationen zum Bereich %(space)s konnten nicht abgerufen werden. " -"Überprüfen Sie, ob der Cluster ausgeführt wird und verbunden ist." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Die Liste der IP-Adressen auf diesem Host konnte nicht abgerufen werden. " -"Überprüfen Sie Berechtigungen und Netzbetrieb." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Die Liste der Domänenmitglieder konnte nicht abgerufen werden. Überprüfen " -"Sie, ob der Cluster ausgeführt wird." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Die Liste der Bereiche konnte für die Neubenennung nicht abgerufen werden. " -"Überprüfen Sie, ob der Cluster ausgeführt wird." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Fehler beim Abrufen der Statistiken für backend_name: %s" - -msgid "Unable to get storage volume from job." -msgstr "Der Speicherdatenträger konnte aus der Aufgabe nicht abgerufen werden." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Zielendpunkte können nicht abgerufen werden für hardwareId " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "Der Name der Maskenansicht konnte nicht abgerufen werden." - -msgid "Unable to get the name of the portgroup." -msgstr "Der Name der Portgruppe konnte nicht abgerufen werden." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "" -"Die Replikationsbeziehung für den Datenträger %s kann nicht abgerufen werden." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Der Datenträger %(deviceId)s konnte nicht in Cinder importiert werden. Er " -"ist der Quellendatenträger der Replikationssitzung %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Der Datenträger %(deviceId)s konnte nicht in Cinder importiert werden. Der " -"externe Datenträger befindet sich nicht in dem Pool, der vom aktuellen " -"Cinder-Host verwaltet wird." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Der Datenträger %(deviceId)s konnte nicht in Cinder importiert werden. " -"Datenträger ist in Maskenansicht %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "Laden von Zertifizierungsstelle aus %(cert)s %(e)s nicht möglich." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Zertifikat kann nicht aus %(cert)s %(e)s geladen werden." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Laden von Schlüssel aus %(cert)s %(e)s nicht möglich." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"Das Konto %(account_name)s wurde auf dem SolidFire-Gerät nicht gefunden." - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "Es kann keine SVM bestimmt werden, die die IP-Adresse '%s' verwaltet." - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "Die angegebenen Wiedergabeprofile %s wurden nicht gefunden." - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Vorhandener Datenträger kann nicht verwaltet werden. Der Datenträger " -"%(volume_ref)s wird bereits verwaltet." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Datenträger %s kann nicht verwaltet werden." - -msgid "Unable to map volume" -msgstr "Datenträger kann nicht zugeordnet werden." - -msgid "Unable to map volume." -msgstr "Datenträger kann nicht zugeordnet werden." - -msgid "Unable to parse attributes." -msgstr "Attribute können nicht analysiert werden." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"Replikat kann nicht zum Primärreplikat für Datenträger %s hochgestuft " -"werden. Keine sekundäre Kopie verfügbar." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Ein Host, der nicht von Cinder verwaltet wird, kann mit use_chap_auth=True " -"nicht erneut verwendet werden." - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Host, der mit unbekannten CHAP-Berechtigungsnachweisen konfiguriert ist, " -"kann nicht erneut verwendet werden." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Datenträger %(existing)s konnte nicht in %(newname)s umbenannt werden." - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "Schattenkopiegruppe mit der ID %s kann nicht abgerufen werden. " - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"Typänderung von %(specname)s nicht möglich. Empfang aktueller und " -"angeforderter %(spectype)s-Werte wurde erwartet. Empfangener Wert: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Typänderung nicht möglich: Eine Kopie von Datenträger %s ist vorhanden. Mit " -"einer Typänderung wird der Grenzwert von 2 Kopien überschritten. " - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Typänderung nicht möglich: Aktuelle Aktion erfordert eine Datenträgerkopie. " -"Dies ist nicht zulässig, wenn der neue Typ 'replication' ist. Datenträger = " -"%s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"Spiegelmodusreplikation für %(vol)s kann nicht konfiguriert werden. " -"Ausnahme: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Schattenkopie der Konsistenzgruppe %s konnte nicht erstellt werden." - -msgid "Unable to terminate volume connection from backend." -msgstr "Datenträgerverbindung kann nicht vom Backend beendet werden." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Datenträgerverbindung kann nicht beendet werden: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Konsistenzgruppe %s konnte nicht aktualisiert werden." - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "Initiatorgruppe %(igGroupName)s in Maskenansicht %(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Unzulässige Parameter." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Unerwarteter Zuordnungsstatus %(status)s für %(id)s-Zuordnung. Attribute: " -"%(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Unerwartete CLI-Antwort: Abweichung zwischen Header/Zeile. Header: " -"%(header)s, Zeile: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Nicht erwarteter Zuordnungsstatus %(status)s für Zuordnung %(id)s. " -"Attribute: %(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "" -"Nicht erwartete Ausgabe. Erwartet wurde [%(expected)s], empfangen wurde " -"[%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "Unerwartete Antwort von Nimble-API" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Unerwartete Antwort von Tegile IntelliFlash-API" - -msgid "Unexpected status code" -msgstr "Unerwarteter Statuscode" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Unerwarteter Statuscode des Switch %(switch_id)s mit dem Protokoll " -"%(protocol)s für URL %(page)s. Fehler: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Unbekannte Gluster-Ausnahme" - -msgid "Unknown NFS exception" -msgstr "Unbekannte NFS-Ausnahme" - -msgid "Unknown RemoteFS exception" -msgstr "Unbekannte RemoteFS-Ausnahme" - -msgid "Unknown SMBFS exception." -msgstr "Unbekannte SMBFS-Ausnahme" - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Unbekannte Virtuozzo-Speicherausnahmebedingung" - -msgid "Unknown action" -msgstr "Unbekannte Aktion" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Es ist nicht bekannt, ob der zu verwaltende Datenträger %s bereits von " -"Cinder verwaltet wird. Verwalten des Datenträgers wird abgebrochen. Fügen " -"Sie dem Datenträger die angepasste Schemaeigenschaft 'cinder_managed' hinzu " -"und setzen Sie den zugehörigen Wert auf 'False'. Alternativ können Sie den " -"Wert der Cinder-Konfigurationsrichtlinie 'zfssa_manage_policy' auf 'loose' " -"setzen, um diese Einschränkung zu entfernen." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Es ist nicht bekannt, ob der zu verwaltende Datenträger %s bereits von " -"Cinder verwaltet wird. Verwalten des Datenträgers wird abgebrochen. Fügen " -"Sie dem Datenträger die angepasste Schemaeigenschaft 'cinder_managed' hinzu " -"und setzen Sie den zugehörigen Wert auf 'False'. Alternativ können Sie den " -"Wert der Cinder-Konfigurationsrichtlinie 'zfssa_manage_policy' auf 'loose' " -"setzen, um diese Einschränkung zu entfernen." - -#, python-format -msgid "Unknown operation %s." -msgstr "Unbekannte Operation %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Unbekannter oder nicht unterstützter Befehl %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Unbekanntes Protokoll: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Unbekannte Kontingentressourcen %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "" -"Unbekannte Sortierrichtung: Muss 'desc' (absteigend) oder " -"'asc' (aufsteigend) sein." - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Unbekannte Sortierrichtung: muss 'desc' oder 'asc' sein." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "" -"Die Optionen 'Aufheben der Verwaltung' und 'Kaskadiertes Löschen' heben sich " -"gegenseitig auf." - -msgid "Unmanage volume not implemented." -msgstr "Aufheben der Verwaltung für einen Datenträger nicht implementiert." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"Aufheben der Verwaltung von Schattenkopien aus Failover-Datenträgern ist " -"nicht zulässig." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"Aufheben der Verwaltung von Schattenkopien aus Failover-Datenträgern ist " -"nicht zulässig." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Nicht erkanntes QOS-Schlüsselwort: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Nicht erkanntes Sicherungsformat: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Nicht erkannter read_deleted-Wert '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "gcs-Optionen löschen: %s" - -msgid "Unsupported Content-Type" -msgstr "Nicht unterstützter Inhaltstyp" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Nicht unterstützte Data ONTAP-Version. Data ONTAP-Versionen ab 7.3.1 werden " -"unterstützt." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Nicht unterstützte Metadatenversion für die Sicherung (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Nicht unterstützte Metadatenversion für die Sicherung angefordert." - -msgid "Unsupported backup verify driver" -msgstr "Nicht unterstützter Treiber zum Überprüfen der Sicherung" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Nicht unterstützte Firmware auf Switch %s. Stellen Sie sicher, dass auf dem " -"Switch Firmware ab Version 6.4 ausgeführt wird." - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Nicht unterstütztes Datenträgerformat: %s" - -msgid "Update QoS policy error." -msgstr "Fehler beim Aktualisieren der QoS-Richtlinie." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Operationen zum Aktualisieren und zum Löschen einer Quote können nur von " -"einen Administrator der direkt übergeordneten Ebene oder vom CLOUD-" -"Administrator ausgeführt werden." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Operationen zum Aktualisieren und zum Löschen eines Kontingents können nur " -"an Projekten in derselben Hierarchie des Projekts, das als Bereich für " -"Benutzer festgelegt wurde, ausgeführt werden." - -msgid "Update list, doesn't include volume_id" -msgstr "Listenaktualisierung enthält nicht 'volume_id'" - -msgid "Updated At" -msgstr "Aktualisiert am" - -msgid "Upload to glance of attached volume is not supported." -msgstr "" -"Das Hochladen des angehängten Datenträgers auf Glance wird nicht unterstützt." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Fehler beim Verwenden von ALUA zum Zuordnen des Initiators zum Host." - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Fehler beim Verwenden von CHAP zum Zuordnen des Initiators zum Host. " -"Überprüfen Sie den CHAP-Benutzernamen und das Kennwort." - -msgid "User ID" -msgstr "Benutzer-ID" - -msgid "User does not have admin privileges" -msgstr "Benutzer hat keine Admin-Berechtigungen." - -msgid "User not authorized to perform WebDAV operations." -msgstr "" -"Der Benutzer ist zum Ausführen von WebDAV-Operationen nicht berechtigt." - -msgid "UserName is not configured." -msgstr "UserName ist nicht konfiguriert." - -msgid "UserPassword is not configured." -msgstr "UserPassword ist nicht konfiguriert." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "V2-Rollback. Datenträger befindet sich in keiner Speichergruppe." - -msgid "V3 rollback" -msgstr "V3-Rollback" - -msgid "VF is not enabled." -msgstr "VF ist nicht aktiviert." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV-Gruppe %s ist nicht vorhanden." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Gültige Konsumenten von QoS-Spezifikationen sind: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "Gültige Steuerposition ist: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Validieren der Datenträgerverbindung fehlgeschlagen (Fehler: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"Wert \"%(value)s\" ist nicht gültig für die Konfigurationsoption \"%(option)s" -"\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "Wert %(param)s für %(param_string)s ist kein boolescher Wert." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Wert für 'scality_sofs_config' erforderlich" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "" -"Virtuelle Platte %(name)s nicht in Zuordnung %(src)s -> %(tgt)s enthalten." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"Die Version %(req_ver)s wird von der API nicht unterstützt. Minimum ist " -"%(min_ver)s und Maximum ist %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s kann ein Objekt nicht nach ID abrufen." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s unterstützt keine Bedingungsaktualisierung." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Virtueller Datenträger '%s' ist im Array nicht vorhanden. " - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Die Datenträgerkopieraufgabe für das Ziel %s ist fehlgeschlagen." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Datenträger %(deviceID)s nicht gefunden." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Der Datenträger %(name)s wurde nicht im Array gefunden. Es kann nicht " -"festgestellt werden, ob zugeordnete Datenträger vorhanden sind." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "" -"Datenträger %(name)s wurde in VNX erstellt, hat aber den Status %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Datenträger %(vol)s konnte nicht in Pool %(pool)s erstellt werden." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "" -"Datenträger %(vol1)s stimmt nicht mit snapshot.volume_id %(vol2)s überein." - -#, python-format -msgid "Volume %(vol_id)s status must be %(statuses)s" -msgstr "Der Status des Datenträgers %(vol_id)s muss %(statuses)s sein" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"Der Status des Datenträgers %(vol_id)s muss für die Aktualisierung der " -"readonly-Markierung 'available' sein, aber der aktuelle Status ist: " -"%(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"Der Status des Datenträgers %(vol_id)s muss 'available' sein, aber der " -"aktuelle Status ist: %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "Datenträger %(volume_id)s wurde nicht gefunden." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"Datenträger %(volume_id)s enthält keine Verwaltungsmetadaten mit dem " -"Schlüssel %(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"Datenträger %(volume_id)s enthält keine Metadaten mit dem Schlüssel " -"%(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"Der Datenträger %(volume_id)s ist momentan der nicht unterstützten " -"Hostgruppe %(group)s zugeordnet." - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "" -"Der Datenträger %(volume_id)s ist zurzeit nicht dem Host %(host)s zugeordnet." - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"Datenträger %(volume_id)s ist noch angehängt und muss zuerst abgehängt " -"werden." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Replizierungsfehler für Datenträger %(volume_id)s: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Der Datenträger %(volume_name)s ist ausgelastet." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Datenträger %s konnte nicht aus Quellendatenträger erstellt werden." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "" -"Datenträger '%s' konnte nicht in den freigegebenen Verzeichnissen erstellt " -"werden." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Datenträger '%s' konnte nicht erstellt werden." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "Der Datenträger %s ist nicht in Nexenta SA vorhanden." - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "Der Datenträger %s ist nicht in der Nexenta Store-Appliance vorhanden." - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "Der Datenträger %s ist im Array nicht vorhanden. " - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"Für Datenträger %s ist provider_location nicht angegeben, wird übersprungen." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Der Datenträger %s ist im Array nicht vorhanden. " - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "Der Datenträger %s ist nicht im ZFSSA-Backend vorhanden." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "Datenträger %s wird bereits von OpenStack verwaltet." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"Der Datenträger %s hat nicht den Typ 'replication'. Dieser Datenträger muss " -"ein Datenträgertyp sein, bei dem die extra Spezifikation " -"'replication_enabled' auf ' True' gesetzt ist, damit " -"Replikationsaktionen unterstützt werden." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"Der Datenträger %s ist 'online'. Setzen Sie den Datenträger für die " -"Verwaltung mit OpenStack auf 'offline'." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Datenträger %s darf nicht Teil einer Konsistenzgruppe sein." - -#, python-format -msgid "Volume %s not found." -msgstr "Datenträger %s nicht gefunden." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Datenträger %s: Fehler beim Versuch, den Datenträger zu erweitern." - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Datenträger (%s) ist im Array bereits vorhanden." - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Datenträger (%s) ist im Array bereits vorhanden." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Datenträgergruppe %s ist nicht vorhanden." - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Datenträgertyp %(id)s ist bereits vorhanden." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"Löschen von Datenträgertyp %(volume_type_id)s ist für vorhandene Datenträger " -"mit dem Typ nicht zulässig." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"Datenträgertyp %(volume_type_id)s hat keine Sonderspezifikationen mit dem " -"Schlüssel %(extra_specs_key)s" - -msgid "Volume Type id must not be None." -msgstr "Datenträgertyp-ID darf nicht 'None' sein." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Der Datenträger [%(cb_vol)s] wurde nicht in dem CloudByte-Speicher gefunden, " -"der zum OpenStack-Datenträger [%(ops_vol)s] gehört." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "Datenträger [%s] nicht in CloudByte-Speicher gefunden." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "" -"Datenträgeranhängung wurde mit dem folgenden Filter nicht gefunden: " -"%(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "Konfiguration des Datenträgerbackends ist ungültig: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Ein Datenträger mit diesem Namen ist bereits vorhanden." - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "" -"Datenträger kann nicht wiederhergestellt werden, da er Schattenkopien " -"enthält." - -msgid "Volume create failed while extracting volume ref." -msgstr "" -"Datenträgererstellung beim Extrahieren der Datenträgerreferenz " -"fehlgeschlagen." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Dateipfad für Datenträgergerät %s ist nicht vorhanden." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Datenträgergerät unter %(device)s nicht gefunden." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Datenträgertreiber %s nicht initialisiert." - -msgid "Volume driver not ready." -msgstr "Datenträgertreiber ist nicht bereit." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Datenträgertreiber meldete einen Fehler: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"Der Datenträger hat eine temporäre Schattenkopie, die zurzeit nicht gelöscht " -"werden kann. " - -msgid "Volume has children and cannot be deleted!" -msgstr "" -"Der Datenträger hat untergeordnete Elemente und kann nicht gelöscht werden!" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "Datenträger ist an einem Server angehängt. (%s)" - -msgid "Volume is in-use." -msgstr "Datenträgerstatus ist 'in-use'." - -msgid "Volume is not available." -msgstr "Datenträger ist nicht verfügbar." - -msgid "Volume is not local to this node" -msgstr "Der Datenträger ist für diesen Knoten nicht lokal." - -msgid "Volume is not local to this node." -msgstr "Der Datenträger ist für diesen Knoten nicht lokal." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Sicherung der Datenträgermetadaten angefordert, dieser Treiber unterstützt " -"diese Funktion jedoch noch nicht." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Datenträgermigration fehlgeschlagen: %(reason)s" - -msgid "Volume must be available" -msgstr "Datenträger muss verfügbar sein." - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "" -"Der Datenträger muss sich in derselben Verfügbarkeitszone befinden wie die " -"Schattenkopie." - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"Der Datenträger muss sich in derselben Verfügbarkeitszone befinden wie der " -"Quellendatenträger." - -msgid "Volume must have a volume type" -msgstr "Datenträger muss einen Datenträgertyp haben." - -msgid "Volume must not be replicated." -msgstr "Datenträger darf nicht repliziert sein. " - -msgid "Volume must not have snapshots." -msgstr "Datenträger darf keine Schattenkopien enthalten." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Datenträger für Instanz %(instance_id)s nicht gefunden." - -msgid "Volume not found on configured storage backend." -msgstr "Datenträger nicht im konfiguriertem Speicherbackend gefunden." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Datenträger nicht im konfigurierten Speicherbackend gefunden. Wenn der Name " -"Ihres Datenträgers das Zeichen \"/\" enthält, benennen Sie ihn bitte um und " -"wiederholen Sie die Verwaltungsoperation." - -msgid "Volume not found on configured storage pools." -msgstr "Datenträger nicht in konfigurierten Speicherpools gefunden." - -msgid "Volume not found." -msgstr "Datenträger nicht gefunden." - -msgid "Volume not unique." -msgstr "Datenträger ist nicht eindeutig." - -msgid "Volume not yet assigned to host." -msgstr "Datenträger noch nicht dem Host zugeordnet." - -msgid "Volume reference must contain source-name element." -msgstr "Datenträgerreferenz muss Element 'source-name' enthalten." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Die Datenträgerreplizierung für %(volume_id)s wurde nicht gefunden." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Starten des Datenträgerdiensts %s ist fehlgeschlagen." - -msgid "Volume should have agent-type set as None." -msgstr "Für Datenträger sollte Agententyp auf None festgelegt werden." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"Datenträgergröße %(volume_size)s GB darf nicht kleiner als die " -"Mindestplattengröße des Abbilds von %(min_disk)s GB sein." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "Datenträgergröße '%(size)s' muss eine Ganzzahl und größer als 0 sein." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"Datenträgergröße '%(size)s' GB darf nicht kleiner als die ursprüngliche " -"Datenträgergröße von %(source_size)s GB sein. Sie muss >= der ursprünglichen " -"Datenträgergröße sein." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"Datenträgergröße '%(size)s' GB darf nicht kleiner als die Schattenkopiegröße " -"von %(snap_size)s GB sein. Sie muss >= der ursprünglichen Schattenkopiegröße " -"sein." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"Der Datenträger hat sich seit der letzten Sicherung vergrößert. Führen Sie " -"eine vollständige Sicherung durch." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." - -msgid "Volume size must multiple of 1 GB." -msgstr "Datenträgergröße muss Vielfaches von 1 GB sein." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"Der Datenträger muss für eine Schattenkopie den Status \"available\" oder " -"\"in-use\" haben, der Status ist aber %s." - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "Datenträgerstatus muss \"available\" oder \"in-use\" sein." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "Der Datenträgerstatus muss für eine Reservierung %s sein." - -msgid "Volume status must be 'available'." -msgstr "Datenträgerstatus muss 'available' sein." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "" -"Die Zuordnung von Datenträger zu Initiatorgruppe ist bereits vorhanden." - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"Der Datenträger, der gesichert werden soll, muss den Status 'available'oder " -"'in-use' haben, aber der aktuelle Status ist \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "Wiederherzustellender Datenträger muss verfügbar sein." - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "Datenträgertyp %(volume_type_id)s wurde nicht gefunden." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "Volumentypkennung '%s' ist ungültig." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"Datenträgertypzugriff für die Kombination %(volume_type_id)s / " -"%(project_id)s ist bereits vorhanden. " - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"Datenträgertypzugriff für die Kombination %(volume_type_id)s / " -"%(project_id)s nicht gefunden." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "" -"Datenträgertypverschlüsselung für Typ %(type_id)s ist bereits vorhanden." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "Datenträgertypverschlüsselung für Typ %(type_id)s ist nicht vorhanden." - -msgid "Volume type name can not be empty." -msgstr "Name des Datenträgertyps darf nicht leer sein." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "" -"Datenträgertyp mit dem Namen %(volume_type_name)s wurde nicht gefunden." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Der Datenträger %(volumeName)s ist kein verketteter Datenträger. Sie können " -"die Erweiterung nur für einen verketteten Datenträger ausführen. Vorgang " -"wird beendet..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"Der Datenträger %(volumeName)s wurde der Speichergruppe %(sgGroupName)s " -"nicht hinzugefügt." - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "Der Datenträger %s wird bereits von Cinder verwaltet." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -"Anzahl Datenträger/Konto sowohl auf primären als auch auf sekundären " -"SolidFire-Konten überschritten." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage-Konfiguration 'vzstorage_used_ratio' ist ungültig. Muss > 0 und <= " -"1,0 sein: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "VzStorage-Konfigurationsdatei ist nicht in %(config)s vorhanden." - -msgid "Wait replica complete timeout." -msgstr "Zeitlimitüberschreitung beim Warten auf Fertigstellung des Replikats." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Warten auf Synchronisierung fehlgeschlagen. Ausführungsstatus: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"Es wird gewartet, bis alle Knoten mit dem Cluster verknüpft sind. Stellen " -"Sie sicher, dass alle sheep-Dämonprozesse aktiv sind." - -msgid "We should not do switch over on primary array." -msgstr "Wechsel in primären Array nicht empfohlen." - -msgid "X-IO Volume Driver exception!" -msgstr "Ausnahme bei X-IO-Datenträgertreiber!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "" -"XtremIO ist nicht ordnungsgemäß konfiguriert, keine iSCSI-Portale gefunden." - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO nicht richtig initialisiert, keine Cluster gefunden." - -msgid "You must implement __call__" -msgstr "Sie müssen '__call__' implementieren." - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Sie müssen hpe3parclient installieren, bevor Sie 3PAR-Treiber verwenden. " -"Führen Sie \"pip install python-3parclient\" aus, um hpe3parclient zu " -"installieren." - -msgid "You must supply an array in your EMC configuration file." -msgstr "Sie müssen in Ihrer EMC-Konfigurationsdatei ein Array angeben." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Die ursprüngliche Größe von %(originalVolumeSize)s GB ist größer als " -"%(newSize)s GB. Nur Erweiterung wird unterstützt. Vorgang wird beendet..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Zone" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Zoning-Richtlinie: %s, nicht erkannt" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data: Abrufen von Attributen für virtuelle Platte %s " -"fehlgeschlagen." - -msgid "_create_host failed to return the host name." -msgstr "_create_host hat Hostnamen nicht zurückgegeben." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: Hostname kann nicht übersetzt werden. Hostname ist weder " -"Unicode noch Zeichenkette." - -msgid "_create_host: No connector ports." -msgstr "_create_host: keine Connector-Ports." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "'_create_local_cloned_volume', Replikationsdienst nicht gefunden." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"'_create_local_cloned_volume', Datenträgername: %(volumename)s, " -"Quellendatenträgername: %(sourcevolumename)s, Quellendatenträgerinstanz: " -"%(source_volume)s, Zieldatenträgerinstanz: %(target_volume)s, Rückgabecode: " -"%(rc)lu, Fehler: %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - Keine Erfolgsnachricht in CLI-Ausgabe gefunden.\n" -" Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "'_create_volume_name', 'id_code' ist 'None'." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, Replikationsdienst wurde nicht gefunden." - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, Typ der Kopiersitzung ist nicht definiert! " -"Kopiersitzung: %(cpsession)s, Kopiertyp: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"'_delete_volume', Datenträgername: %(volumename)s, Rückgabecode: %(rc)lu, " -"Fehler: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"'_delete_volume', Datenträgername: %(volumename)s, " -"Speicherkonfigurationsdienst nicht gefunden." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op: Erweitern eines Datenträgers mit Schattenkopien wird " -"nicht unterstützt." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget. Es kann keine Verbindung zu ETERNUS hergestellt " -"werden." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames. Es " -"kann keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " -"hergestellt werden." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s. Es " -"kann keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames. Es " -"kann keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"'_find_initiator_names', Connector: %(connector)s, Initiator nicht gefunden." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, Daten sind " -"'None'! Bearbeiten Sie die Treiberkonfigurationsdatei und korrigieren Sie " -"die Angaben." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"'_get_eternus_connection', Dateiname: %(filename)s, IP: %(ip)s, Port: " -"%(port)s, Benutzer: %(user)s, Kennwort: ****, URL: %(url)s, FEHLGESCHLAGEN!" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s. IQN wurde " -"nicht gefunden." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo. Es kann keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames. " -"Es kann keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance. Es kann " -"keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: Attributheader und Attributwerte stimmen nicht überein.\n" -" Header: %(header)s\n" -" Werte: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector hat den Hostnamen für den Connector nicht " -"zurückgegeben." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, Abrufen von Hostaffinität aus 'aglist/vol_instance' " -"fehlgeschlagen, affinitygroup: %(ag)s, ReferenceNames. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, Abrufen der Hostaffinitätsinstanz fehlgeschlagen, volmap: " -"%(volmap)s, GetInstance. Es kann keine Verbindung zu ETERNUS hergestellt " -"werden." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement. Es kann " -"keine Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " -"hergestellt werden." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances. Es kann keine Verbindung zu ETERNUS " -"hergestellt werden." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "" -"_get_target_port, protcol: %(protocol)s, target_port wurde nicht gefunden." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "" -"_get_unmanaged_replay: Schattenkopie mit dem Namen %s wurde nicht gefunden." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: Datenträger-ID %s wurde nicht gefunden." - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: 'source-name' muss angegeben werden." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: Es konnten keine Informationen zur FC-Verbindung " -"für die Host-Datenträger-Verbindung abgerufen werden. Ist der Host " -"ordnungsgemäß für FC-Verbindungen konfiguriert?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: Kein Knoten in E/A-Gruppe %(gid)s für Datenträger " -"%(vol)s gefunden." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Speicherkonfigurationsdienst nicht gefunden." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controllerkonfigurationsdienst " -"wurde nicht gefunden." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit. Es kann keine Verbindung zu ETERNUS " -"hergestellt werden." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "" -"_update_volume_stats: Pooldaten aus Speicher konnten nicht abgerufen werden." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s. Status der Kopiersitzung " -"ist BROKEN." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy fehlgeschlagen: Eine Kopie von Datenträger %s ist bereits " -"vorhanden. Hinzufügen einer weiteren Kopie führt zu Überschreiten des " -"Grenzwerts von 2 Kopien. " - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"add_vdisk_copy ohne eine Kopie der virtuellen Platte im erwarteten Pool " -"gestartet." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants muss ein boolescher Wert sein, erhalten '%s'." - -msgid "already created" -msgstr "bereits erstellt" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "Schattenkopie aus fernem Knoten zuordnen" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "Attribut %s kann nicht über Lazy-Loading geladen werden." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Sicherung: %(vol_id)s konnte keine feste Geräteverknüpfung von %(vpath)s zu " -"%(bpath)s herstellen.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Sicherung: %(vol_id)s hat keine Benachrichtigung zur erfolgreichen Sicherung " -"vom Server erhalten.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Sicherung: %(vol_id)s konnte DSMC aufgrund von ungültigen Argumenten nicht " -"unter %(bpath)s ausführen.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Sicherung: %(vol_id)s konnte DSMC nicht unter %(bpath)s ausführen.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "Datensicherung: %(vol_id)s fehlgeschlagen. %(path)s ist keine Datei." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"Sicherung: %(vol_id)s fehlgeschlagen. %(path)s ist ein unerwarteter " -"Dateityp. Blockdateien oder reguläre Dateien werden unterstützt, der " -"tatsächliche Dateimodus ist %(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"Sicherung: %(vol_id)s fehlgeschlagen. Tatsächlicher Pfad zum Datenträger in " -"%(path)s kann nicht abgerufen werden." - -msgid "being attached by different mode" -msgstr "ist über anderen Modus angehängt" - -#, python-format -msgid "call failed: %r" -msgstr "Aufruf fehlgeschlagen: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "Aufruf fehlgeschlagen: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "Aufruf fehlgeschlagen: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "Aufruf fehlgeschlagen: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "Aufruf fehlgeschlagen: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "LUN-Zuordnung nicht gefunden, ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "Zu erweiternder Datenträger wurde nicht gefunden." - -msgid "can't handle both name and index in req" -msgstr "" -"Es können nicht sowohl der Name als auch der Index in der Anforderung " -"verarbeitet werden." - -msgid "cannot understand JSON" -msgstr "JSON kann nicht interpretiert werden." - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "cgsnapshot zugeordnet" - -msgid "cgsnapshot changed" -msgstr "cgsnapshot geändert" - -msgid "cgsnapshots assigned" -msgstr "cgsnapshots zugeordnet" - -msgid "cgsnapshots changed" -msgstr "cgsnapshots geändert" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: Kennwort oder privater SSH-Schlüssel ist für die " -"Authentifizierung erforderlich: Legen Sie entweder die Option 'san_password' " -"oder die Option 'san_private_key' fest." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: System-ID kann nicht bestimmt werden." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: Systemname kann nicht bestimmt werden." - -msgid "check_hypermetro_exist error." -msgstr "check_hypermetro_exist-Fehler." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "Klontiefe überschreitet den Grenzwert von %s" - -msgid "consistencygroup assigned" -msgstr "consistencygroup zugeordnet" - -msgid "consistencygroup changed" -msgstr "consistencygroup geändert" - -msgid "control_location must be defined" -msgstr "control_location muss definiert sein." - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "" -"'create_cloned_volume', Quellendatenträger ist in ETERNUS nicht vorhanden." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"'create_cloned_volume', Name der Zieldatenträgerinstanz: " -"%(volume_instancename)s, Abrufen der Instanz fehlgeschlagen." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: Größe von Quelle und Ziel sind unterschiedlich." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: Der Quellendatenträger %(src_vol)s mit %(src_size)d GB " -"passt nicht in einen Zieldatenträger %(tgt_vol)s mit %(tgt_size)d GB." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src muss aus der Schattenkopie einer " -"Konsistenzgruppe oder aus einer Quellenkonsistenzgruppe erstellt werden. " - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"'create_consistencygroup_from_src' unterstützt nur eine cgsnapshot-Quelle " -"oder eine Konsistenzgruppenquelle. Die Verwendung mehrerer Quellen ist nicht " -"zulässig." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "" -"create_copy: Als Quelle angegebene virtuelle Platte %(src)s (%(src_id)s) ist " -"nicht vorhanden." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "" -"create_copy: Als Quelle angegebene virtuelle Platte %(src)s ist nicht " -"vorhanden." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: Hostname ist weder Unicode noch Zeichenkette." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: Keine Initiatoren oder WWPNs angegeben." - -msgid "create_hypermetro_pair error." -msgstr "create_hypermetro_pair-Fehler." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "'create_snapshot', Eternuspool: %(eternus_pool)s, Pool nicht gefunden." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"'create_snapshot', Schattenkopiename: %(snapshotname)s, " -"Quellendatenträgername: %(volumename)s, vol_instance.path: %(vol_instance)s, " -"Zieldatenträgername: %(d_volumename)s, Pool: %(pool)s, Rückgabecode: " -"%(rc)lu, Fehler: %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"'create_snapshot', Datenträgername: %(s_volumename)s, Quellendatenträger " -"nicht in ETERNUS gefunden." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"'create_snapshot', Datenträgername: %(volumename)s, Replikationsdienst nicht " -"gefunden." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: Datenträgerstatus muss \"available\" oder \"in-use\" für " -"eine Schattenkopie sein. Der ungültige Status ist %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: Abrufen des Quellendatenträgers fehlgeschlagen." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, Datenträger: %(volume)s, EnumerateInstances. Es kann keine " -"Verbindung zu ETERNUS hergestellt werden." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"'create_volume', Datenträger: %(volume)s, Datenträgername: %(volumename)s, " -"Eternuspool: %(eternus_pool)s, Speicherkonfigurationsdienst nicht gefunden." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"'create_volume', Datenträgername: %(volumename)s, Poolname: " -"%(eternus_pool)s, Rückgabecode: %(rc)lu, Fehler: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "" -"'create_volume_from_snapshot', Quellendatenträger ist in ETERNUS nicht " -"vorhanden." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"'create_volume_from_snapshot', Name der Zieldatenträgerinstanz: " -"%(volume_instancename)s, Abrufen der Instanz fehlgeschlagen." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "" -"create_volume_from_snapshot: Schattenkopie %(name)s ist nicht vorhanden." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: Der Status der Schattenkopie muss \"available\" " -"zum Erstellen eines Datenträgers sein. Der ungültige Status ist %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: Datenträgergröße unterscheidet sich von der " -"Größe des auf der Schattenkopie basierenden Datenträgers." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"Löschen: %(vol_id)s konnte DSMC aufgrund von ungültigen Argumenten nicht " -"ausführen. Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Löschen: %(vol_id)s konnte DSMC nicht ausführen. Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -msgid "delete_hypermetro error." -msgstr "delete_hypermetro-Fehler." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: %s-ACL nicht gefunden. Wird fortgesetzt." - -msgid "delete_replication error." -msgstr "delete_replication-Fehler." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"Löschen von Schattenkopie %(snapshot_name)s mit abhängigen Datenträgern" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "Löschen von Datenträger %(volume_name)s mit Schattenkopie" - -msgid "detach snapshot from remote node" -msgstr "Zuordnung der Schattenkopie zu fernem Knoten aufheben" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: keine konfigurierten Knoten." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"Fehler beim Schreiben von Objekt an Swift. MD5 von Objekt in Swift " -"[%(etag)s] entspricht nicht MD5 von an Swift gesendetem Objekt [%(md5)s]" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "'extend_volume', Eternuspool: %(eternus_pool)s, Pool nicht gefunden." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"'extend_volume', Datenträger: %(volume)s, Datenträgername: %(volumename)s, " -"Eternuspool: %(eternus_pool)s, Speicherkonfigurationsdienst nicht gefunden." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"'extend_volume', Datenträgername: %(volumename)s, Rückgabecode: %(rc)lu, " -"Fehler: %(errordesc)s, Pooltyp: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "" -"'extend_volume', Datenträgername: %(volumename)s, Datenträger nicht gefunden." - -msgid "failed to create new_volume on destination host" -msgstr "Erstellen von new_volume auf Zielhost fehlgeschlagen." - -msgid "fake" -msgstr "fake" - -#, python-format -msgid "file already exists at %s" -msgstr "Datei ist bereits in %s vorhanden." - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "Fileno wird von SheepdogIOWrapper nicht unterstützt." - -msgid "fileno() not supported by RBD()" -msgstr "fileno() wird von RBD() nicht unterstützt." - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "Das Dateisystem %s ist nicht in der Nexenta Store-Appliance vorhanden." - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled ist auf 'False' gesetzt, Zuordnung von " -"mehreren Hosts wird nicht zugelassen. CMMVC6071E Die Zuordnung der " -"virtuellen Platte zum Host wurde nicht erstellt, da die virtuelle Platte " -"bereits einem Host zugeordnet ist." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() wird in dieser Version von librbd nicht unterstützt." - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s gesichert durch: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s gesichert durch:%(backing_file)s" - -msgid "force delete" -msgstr "Löschen erzwingen" - -msgid "get_hyper_domain_id error." -msgstr "get_hyper_domain_id-Fehler." - -msgid "get_hypermetro_by_id error." -msgstr "get_hypermetro_by_id-Fehler." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: Fehler beim Abrufen der Ziel-IP für Initiator %(ini)s. " -"Überprüfen Sie die Konfigurationsdatei." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: Fehler beim Abrufen der Attribute für den Datenträger %s" - -msgid "glance_metadata changed" -msgstr "'glance_metadata' geändert" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode wurde als copy_on_write festgelegt, aber %(vol)s und " -"%(img)s gehören zu anderen Dateisystemen." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode wurde als copy_on_write festgelegt, aber %(vol)s und " -"%(img)s gehören zu anderen Dateigruppen." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s und hgst_user %(usr)s müssen in cinder.conf gültigen " -"Benutzern/Gruppen zugeordnet werden." - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"hgst_net %(net)s wurde in cinder.conf angegeben, aber im Cluster nicht " -"gefunden." - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy muss in cinder.conf auf 0 (keine HA) oder 1 (HA) gesetzt " -"werden." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "" -"Der Wert für hgst_space_mode muss in cinder.conf eine Oktalzahl/Ganzzahl " -"sein." - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "hgst_storage-Server %(svr)s nicht im Format :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers müssen in cinder.conf definiert werden." - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Der http-Dienst wurde möglicherweise mitten in dieser Operation unerwartet " -"inaktiviert oder in den Wartungsmodus versetzt." - -msgid "id cannot be None" -msgstr "ID darf nicht 'None' sein." - -#, python-format -msgid "image %s not found" -msgstr "Abbild %s nicht gefunden" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "" -"'initialize_c'onnection', Datenträger: %(volume)s, Datenträger nicht " -"gefunden." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "" -"initialize_connection: Fehler beim Abrufen der Attribute für Datenträger %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "" -"initialize_connection: Fehlendes Datenträgerattribut für Datenträger %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: Kein Knoten in E/A-Gruppe %(gid)s für Datenträger " -"%(vol)s gefunden." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: Virtuelle Platte %s ist nicht definiert." - -#, python-format -msgid "invalid user '%s'" -msgstr "Ungültiger Benutzer '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "iSCSI-Portal %s nicht gefunden." - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"iscsi_ip_address muss in Konfigurationsdatei festgelegt werden, wenn " -"Protokoll 'iSCSI' verwendet wird." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "Schlüsselmanagerfehler: %(reason)s" - -msgid "limit param must be an integer" -msgstr "'limit'-Parameter muss eine Ganzzahl sein." - -msgid "limit param must be positive" -msgstr "'limit'-Parameter muss positiv sein." - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing erfordert den Schlüssel 'name' zum Identifizieren eines " -"vorhandenen Datenträgers." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: Fehler beim Verwalten der vorhandenen Wiedergabe " -"%(ss)s auf dem Datenträger %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "Marker [%s] nicht gefunden" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "Fehlende Anführungszeichen für mdiskgrp %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy muss 'on-demand' oder 'never' sein, übergeben: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs fehlgeschlagen auf Datenträger %(vol)s, Fehlernachricht: %(err)s." - -msgid "mock" -msgstr "mock" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs ist nicht installiert." - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "Es wurden mehrere Ressourcen mit dem Namen %s von drbdmanage gefunden." - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "mehrere Ressourcen mit Schattenkopie-ID %s gefunden" - -msgid "name cannot be None" -msgstr "Name darf nicht 'None' sein." - -#, python-format -msgid "no REPLY but %r" -msgstr "Keine Antwort, aber %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "keine Schattenkopie mit ID %s in drbdmanage gefunden" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "nicht genau eine Schattenkopie mit ID %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "nicht genau ein Datenträger mit ID %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "Fehlende Anführungszeichen für obj %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled ist nicht ausgeschaltet." - -msgid "progress must be an integer percentage" -msgstr "Fortschritt muss ein Ganzzahlprozentsatz sein." - -msgid "provider must be defined" -msgstr "Provider muss definiert sein." - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img ab Version %(minimum_version)s ist für diesen Datenträgertreiber " -"erforderlich. Aktuelle qemu-img-Version: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img ist nicht installiert und das Abbild ist vom Typ %s. Es können nur " -"RAW-Abbilder verwendet werden, wenn qemu-img nicht installiert ist." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img ist nicht installiert und das Plattenformat wurde nicht angegeben. " -"Es können nur RAW-Abbilder verwendet werden, wenn qemu-img nicht installiert " -"ist." - -msgid "rados and rbd python libraries not found" -msgstr "rados- und rbd-python-Bibliotheken nicht gefunden." - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "'read_deleted' kann nur 'no', 'yes' oder 'only' sein, nicht '%r'" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "'replication_failover' fehlgeschlagen. %s wurde nicht gefunden." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"'replication_failover' fehlgeschlagen. Das Backend ist nicht für ein " -"Failover konfiguriert." - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Wiederherstellung: %(vol_id)s konnte DSMC wegen ungültiger Argumente nicht " -"unter %(bpath)s ausführen.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Wiederherstellung: %(vol_id)s konnte DSMC nicht unter %(bpath)s ausführen.\n" -"Standardausgabe: %(out)s\n" -" Standardfehler: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Wiederherstellung: %(vol_id)s fehlgeschlagen.\n" -"Standardausgabe: %(out)s\n" -"Standardfehler: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup abgebrochen, tatsächliche Objektliste entspricht nicht der in " -"den Metadaten gespeicherten Liste." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb enthält Mitglied %s nicht: Sie benötigen möglicherweise eine " -"neuere python-rtslib-fb." - -msgid "san_ip is not set." -msgstr "san_ip wurde nicht festgelegt." - -msgid "san_ip must be set" -msgstr "'san_ip' muss festgelegt sein." - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login und/oder san_password sind für den Datera-Treiber in der Datei " -"cinder.conf nicht festgelegt. Legen Sie diese Informationen fest und starten " -"Sie den Cinder-Datenträgerdienst erneut." - -msgid "serve() can only be called once" -msgstr "serve() kann nur einmal aufgerufen werden." - -#, python-format -msgid "snapshot-%s" -msgstr "snapshot-%s" - -msgid "snapshots assigned" -msgstr "Schattenkopien zugeordnet" - -msgid "snapshots changed" -msgstr "Schattenkopien geändert" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "Quellendatenträger-ID: %s wird nicht repliziert." - -msgid "source-name cannot be empty." -msgstr "'source-name' darf nicht leer sein." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "" -"Das Format für 'source-name' muss wie folgt lauten: " -"'vmdk_path@vm_inventory_path'." - -#, python-format -msgid "status must be %s and" -msgstr " Status muss %s sein und " - -msgid "status must be available" -msgstr "Status muss 'available' sein." - -msgid "stop_hypermetro error." -msgstr "stop_hypermetro-Fehler." - -msgid "sync_hypermetro error." -msgstr "sync_hypermetro-Fehler." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli nicht installiert. Standardverzeichnis (%(default_path)s) konnte " -"nicht erstellt werden: %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection: Fehler beim Abrufen von Hostname von Connector." - -msgid "timeout creating new_volume on destination host" -msgstr "Zeitlimitüberschreitung beim Erstellen von new_volume auf Zielhost." - -msgid "too many body keys" -msgstr "zu viele Textschlüssel" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "Abhängen: %s: nicht eingehängt" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "Abhängen: %s: Ziel ist ausgelastet" - -msgid "umount: : some other error" -msgstr "Abhängen: : ein anderer Fehler" - -msgid "umount: : target is busy" -msgstr "Abhängen: : Ziel ist ausgelastet" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "" -"unmanage_snapshot: Schattenkopie mit dem Namen %s wurde nicht gefunden." - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: Datenträger-ID %s wurde nicht gefunden." - -#, python-format -msgid "unrecognized argument %s" -msgstr "Nicht erkanntes Argument %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "Nicht unterstützter Komprimierungsalgorithmus: %s" - -msgid "valid iqn needed for show_target" -msgstr "Gültiger qualifizierter iSCSI-Name für show_target erforderlich" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "Virtuelle Platte %s ist nicht definiert." - -msgid "vmemclient python library not found" -msgstr "vmemclient-python-Bibliothek nicht gefunden." - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "Datenträger %s in drbdmanage nicht gefunden." - -msgid "volume assigned" -msgstr "Datenträger zugeordnet" - -msgid "volume changed" -msgstr "Datenträger geändert" - -msgid "volume is already attached" -msgstr "Datenträger ist bereits angehängt." - -msgid "volume is not local to this node" -msgstr "Der Datenträger ist für diesen Knoten nicht lokal." - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"Die Größe des Datenträgers %(volume_size)d ist zu klein zum Wiederherstellen " -"einer Sicherung der Größe %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "Die Datenträgergröße %d ist ungültig." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"volume_type muss beim Erstellen eines Datenträgers in einer Konsistenzgruppe " -"angegeben werden." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id darf nicht 'None' sein." - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"volume_types muss zum Erstellen der Konsistenzgruppe %(name)s angegeben " -"werden." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "" -"volume_types muss zum Erstellen der Konsistenzgruppe %s angegeben werden." - -msgid "volumes assigned" -msgstr "Datenträger zugeordnet" - -msgid "volumes changed" -msgstr "Datenträger geändert" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s hat zulässiges Zeitlimit überschritten." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"Die Eigenschaft 'zfssa_manage_policy' muss auf 'strict' oder 'loose' gesetzt " -"sein. Aktueller Wert: %s." diff --git a/cinder/locale/es/LC_MESSAGES/cinder.po b/cinder/locale/es/LC_MESSAGES/cinder.po deleted file mode 100644 index f5ef72b6c..000000000 --- a/cinder/locale/es/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,10187 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# Eduardo Gonzalez Gutierrez , 2015 -# FIRST AUTHOR , 2011 -# Jose Enrique Ruiz Navarro , 2014 -# Andreas Jaeger , 2016. #zanata -# Jose Porrua , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-07-28 05:37+0000\n" -"Last-Translator: Jose Porrua \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"Versión de OpenStack Cinder: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " pero el tamaño ahora es %d" - -#, python-format -msgid " but size is now %d." -msgstr " pero el tamaño es ahora %d." - -msgid " or " -msgstr " o " - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s no está establecido." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing no puede gestionar un volumen conectado con " -"hosts. Desconecte este volumen de los hosts existentes antes de realizar la " -"importación." - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"Resultado: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: Permiso denegado." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: se ha encontrado un error con una salida de CLI inesperada.\n" -" Mandato: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Código de estado: %(_status)s\n" -"Cuerpo: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: se está creando NetworkPortal: asegúrese de que el puerto " -"%(port)d en la IP %(ip)s no lo esté utilizando otro servicio." - -#, python-format -msgid "%(name)s cannot be all spaces." -msgstr "%(name)s no puede estar vacío." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: copia de seguridad %(bck_id)s, volumen %(vol_id)s ha fallado. El " -"objeto de copia de seguridad tiene una modalidad inesperada. Se soportan las " -"copias de seguridad de imagen o archivo, la modalidad real es %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"El servicio %(service)s no está %(status)s en el dispositivo de " -"almacenamiento: %(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s debe ser <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s debe ser >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"El valor %(worker_name)s de %(workers)d no es válido, debe ser mayor que 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "%s \"data\" no está en el resultado." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"No se puede acceder a %s. Verifique que GPFS está activo y que el sistema de " -"archivos está montado." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"%s no se puede redimensionar utilizando la operación de clonación ya que no " -"contiene bloques." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"%s no se puede redimensionar utilizando la operación de clonación ya que se " -"encuentra en un volumen comprimido" - -#, python-format -msgid "%s configuration option is not set." -msgstr "La opción de configuración %s no está establecida." - -#, python-format -msgid "%s does not exist." -msgstr "%s no existe." - -#, python-format -msgid "%s is not a directory." -msgstr "%s no es un directorio." - -#, python-format -msgid "%s is not installed" -msgstr "%s no está instalado" - -#, python-format -msgid "%s is not installed." -msgstr "%s no está instalado." - -#, python-format -msgid "%s is not set" -msgstr "%s no está establecido" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s no está definido y es necesario para que el dispositivo de replicación " -"sea válido." - -#, python-format -msgid "%s is not set." -msgstr "%s no está establecido." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s debe ser una imagen raw o qcow2 válida." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s debe ser una ruta absoluta." - -#, python-format -msgid "%s must be an integer." -msgstr "%s debe ser un entero." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s no está definido en cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s no establecido." - -#, python-format -msgid "'%(key)s = %(value)s'" -msgstr "'%(key)s = %(value)s'" - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' no es válido para flashsystem_connection_protocol en el archivo " -"de configuración. Los valores válidos son %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "'activo' debe estar presente al escribir snap_info." - -msgid "'consistencygroup_id' must be specified" -msgstr "Es necesario especificar el 'consistencygroup_id'" - -msgid "'qemu-img info' parsing failed." -msgstr "Se ha encontrado un error en el análisis de 'qemu-img info'." - -msgid "'status' must be specified." -msgstr "se debe especificar 'status'." - -msgid "'volume_id' must be specified" -msgstr "Se debe especificar 'volume_id'" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Mandato: %(cmd)s) (Código de retorno: %(exit_code)s) (Salida estándar: " -"%(stdout)s) (Error estándar: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "No se ha encontrado un LUN (HLUN). (LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "Se ha realizado una solicitud simultánea, posiblemente contradictoria." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"No se ha encontrado un LUN (HLUN) libre. Añada un grupo de host diferente. " -"(LDEV: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"No se ha podido añadir un grupo de host. (puerto: %(port)s, nombre: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"No se ha podido eliminar un grupo de host. (puerto: %(port)s, ID de grupo: " -"%(gid)s, nombre: %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Un grupo de host no es válido. (grupo de host: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "No se puede suprimir un par. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"No se ha podido crear un par. Se ha excedido el número de par máximo. " -"(método de copia: %(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Un parámetro no es válido. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Un valor de parámetro no es válido. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "No se ha encontrado una agrupación. (ID de agrupación: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Un estado de instantánea no es válido. (estado: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Se DEBE especificar un destino secundario válido para poder realizar la " -"migración tras error." - -msgid "A volume ID or share was not specified." -msgstr "No se ha especificado un ID de volumen o compartición." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Un estado de volumen no es válido. (estado: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "La API %(name)s ha fallado con serie de error %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"La cadena de la versión de la API %(version)s tiene un formato no válido. " -"Debe ser un formato MajorNum.MinorNum." - -msgid "API key is missing for CloudByte driver." -msgstr "Falta la clave de API para el controlador CloudByte." - -#, python-format -msgid "API response: %(response)s" -msgstr "Respuesta de la API: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "Respuesta de la API: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "La versión de la API %(version)s, no está soportada en este método." - -msgid "API version could not be determined." -msgstr "No se ha podido determinar la versión de API." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"A punto de suprimir proyectos hijo que tienen cuota distinta de cero. Esto " -"no debe realizarse." - -msgid "Access list not available for public volume types." -msgstr "La lista de acceso no está disponible para tipos de volumen públicos." - -msgid "Activate or deactivate QoS error." -msgstr "Error al activar o desactivar QoS." - -msgid "Activate snapshot error." -msgstr "Error al activar una instantánea." - -msgid "Add FC port to host error." -msgstr "Error al añadir el puerto FC al host." - -msgid "Add fc initiator to array error." -msgstr "Error al añadir el iniciador fc a la matriz." - -msgid "Add initiator to array error." -msgstr "Error al añadir el iniciar a la matriz." - -msgid "Add lun to cache error." -msgstr "Error al añadir lun a la caché." - -msgid "Add lun to partition error." -msgstr "Error al añadir lun a la partición." - -msgid "Add mapping view error." -msgstr "Error al añadir la vista de correlaciones." - -msgid "Add new host error." -msgstr "Error al añadir host nuevo." - -msgid "Add port to port group error." -msgstr "Error al añadir el puerto al grupo de puertos." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"No existen las agrupaciones de almacenamiento especificadas que se van a " -"gestionar. Compruebe su configuración. Agrupaciones no existentes: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"Una solicitud de versión de la API se debe comparar con un objeto " -"VersionedMethod." - -msgid "An error has occurred during backup operation" -msgstr "Un error ha ocurrido durante la operación de copia de seguridad" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Se ha producido un error durante la operación LUNcopy. Nombre de LUNcopy: " -"%(luncopyname)s. Situación de LUNcopy: %(luncopystatus)s. Estado de LUNcopy: " -"%(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Se ha producido un error al leer el volumen \"%s\"." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Se ha producido un error al escribir en el volumen \"%s\"." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "" -"No se ha podido añadir un usuario CHAP de iSCSI. (nombre de usuario: " -"%(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "" -"No se ha podido eliminar un usuario CHAP de iSCSI. (username: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"No se ha podido añadir un destino de iSCSI. (puerto: %(port)s, alias: " -"%(alias)s, razón: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"No se ha podido suprimir un destino de iSCSI. (puerto: %(port)s, tno: " -"%(tno)s, alias: %(alias)s)" - -msgid "An unknown error occurred." -msgstr "Se ha producido un error desconocido." - -msgid "An unknown exception occurred." -msgstr "Una excepción desconocida ha ocurrido" - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Un usuario con ámbito de señal para un subproyecto no puede ver la cuota de " -"sus padres." - -msgid "Append port group description error." -msgstr "Error al anexar la descripción del grupo de puertos." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Ha fallado la aplicación de zonas y cfg al conmutador (código de error=" -"%(err_code)s mensaje de error=%(err_msg)s." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "" -"La matriz no existe o está fuera de línea. El estado actual de la matriz es " -"%s." - -msgid "Associate host to hostgroup error." -msgstr "Error al asociar host con el grupo de host." - -msgid "Associate host to mapping view error." -msgstr "Error al asociar el host con la vista de correlaciones." - -msgid "Associate initiator to host error." -msgstr "Error al asociar el iniciador con el host." - -msgid "Associate lun to QoS error." -msgstr "Error al asociar el LUN a QoS." - -msgid "Associate lun to lungroup error." -msgstr "Error al asociar LUN con el grupo de LUN." - -msgid "Associate lungroup to mapping view error." -msgstr "Error al asociar el grupo de LUN con la vista de correlaciones." - -msgid "Associate portgroup to mapping view error." -msgstr "Error al asociar el grupo de puertos a la vista de correlaciones." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Por lo menos se debe establecer una dirección IP de iSCSI válida." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Intento de transferir %s con clave de aut no válida." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "" -"No se han encontrado detalles del grupo de autenticación [%s] en el " -"almacenamiento CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"No se han encontrado detalles del usuario de autenticación en el " -"almacenamiento CloudByte." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"Ha fallado la autenticación, compruebe las credenciales del conmutador, " -"código de error %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "La zona de disponibilidad '%(s_az)s' no es válida." - -msgid "Available categories:" -msgstr "Categorías disponibles:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Las especificaciones QoS de programa de fondo no se admiten en esta familia " -"de almacenamiento y versión ONTAP ." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "El programa de fondo no existe %(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Informes de fondo: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Informes de fondo: el elemento ya existe" - -msgid "Backend reports: item not found" -msgstr "Informes de fondo: elemento no encontrado" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "" -"Tiempo de espera de reintento de servicio de fondo: %(timeout)s segundos" - -msgid "Backend storage did not configure fiber channel target." -msgstr "" -"El almacenamiento del programa de fondo no ha configurado el destino de " -"canal de fibra." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"La copia de seguridad de un volumen en uso debe utilizar el distintivo force." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "No se ha podido encontrar la copia de seguridad %(backup_id)s." - -msgid "Backup RBD operation failed" -msgstr "La operación de RBD de copia de seguridad ha fallado" - -msgid "Backup already exists in database." -msgstr "La copia de seguridad ya existe en la base de datos." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Driver de copia de seguridad ha reportado un error: %(message)s" - -msgid "Backup id required" -msgstr "Se necesita una copia de seguridad" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"No se soporta la copia de seguridad para volúmenes GlusterFS con " -"instantáneas." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"La copia de seguridad sólo se admite en volúmenes SOFS sin archivo de " -"respaldo." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"La copia de seguridad solo se soporta para volúmenes GlusterFS con formato " -"raw." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "La copia de seguridad sólo se admite en volúmenes SOFS sin formato." - -msgid "Backup operation of an encrypted volume failed." -msgstr "" -"No se ha podido realizar la operación de copia de seguridad de un volumen " -"cifrado." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"El servicio de copia de seguridad %(configured_service)s no admite la " -"verificación. El id de copia de seguridad %(id)s no se ha verificado. Se " -"omite la verificación." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"El servicio de copia de seguridad %(service)s no soporta la verificación. El " -"ID de copia de seguridad %(id)s no se ha verificado. Se omite el " -"restablecimiento." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"La copia de seguridad solo debe tener una instantánea pero en lugar de ello " -"tiene %s" - -msgid "Backup status must be available" -msgstr "El estado de la copia de seguridad debe ser available" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "El estado de copia de seguridad debe ser disponible y no %s." - -msgid "Backup status must be available or error" -msgstr "El estado de la copia de seguridad debe ser available o error" - -msgid "Backup to be restored has invalid size" -msgstr "La copia de seguridad que restaurar tiene un tamaño no válido" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Se ha devuelto una línea de estado errónea: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Clave(s) incorrecta(s) en conjunto de cuotas: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Respuesta errónea o inesperada de la API de programa de fondo del volumen de " -"almacenamiento: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "" -"Formato de proyecto erróneo: el proyecto no tiene un formato correcto (%s)" - -msgid "Bad response from Datera API" -msgstr "Respuesta errónea de la API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Respuesta errónea de la API SolidFire" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Respuesta errónea de XMS, %s" - -msgid "Binary" -msgstr "Binario" - -msgid "Blank components" -msgstr "Componentes en blanco" - -msgid "Blockbridge api host not configured" -msgstr "No se ha configurado el host de API Blockbridge" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge se ha configurado con un esquema de autenticación no válido " -"'%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "La agrupación predeterminada de Blockbridge no existe" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Contraseña de Blockbridge no configurada (obligatoria para el esquema de " -"autenticación 'password')" - -msgid "Blockbridge pools not configured" -msgstr "Agrupaciones de Blockbridge no configuradas" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Señal Blockbridge no configurada (obligatoria para el esquema de " -"autenticación 'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Usuario de API Blockbridge no configurado (obligatorio para el esquema de " -"autenticación 'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "" -"Error de CLI de distribución en zonas de canal de fibra de Brocade: " -"%(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "" -"Error de HTTP de distribución en zonas de canal de fibra de Brocade: " -"%(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "El secreto de CHAP debe tener entre 12 y 16 bytes." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Salida de excepción de CLI:\n" -" mandato: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Salida de excepción de CLI:\n" -" mandato: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E La correlación de disco virtual a host no se ha creado, ya que el " -"disco virtual ya se ha correlacionado con un host.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "No se admite la versión CONCERTO." - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) no existe en la matriz" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "" -"El nombre de caché es Ninguno, establezca smartcache:cachename en clave." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "El volumen de la memoria caché %s no tiene las propiedades necesarias" - -msgid "Call returned a None object" -msgstr "La llamada ha devuelto un objeto None" - -msgid "Can not add FC port to host." -msgstr "No se puede añadir el puerto FC al host." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "No se encuentra el ID de caché por nombre de caché %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "No se encuentra el ID de partición por nombre %(name)s." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "" -"No se ha podido obtener la información de la agrupación. Agrupación: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "No se puede traducir %s a un entero." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "No se puede acceder a 'scality_sofs_config': %s" - -msgid "Can't decode backup record." -msgstr "No se puede decodificar el registro de copia de seguridad." - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "No se puede extender el volumen de replicación, volumen: %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"No se puede encontrar el LUN en la matriz, compruebe el nombre de origen o " -"el ID de origen." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "" -"No se encuentra el nombre de la memoria caché en la matriz, el nombre de la " -"memoria caché es: %(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"No se puede encontrar la información del LUN en la matriz. Volumen: %(id)s, " -"nombre de lun: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"No se puede encontrar el nombre de la partición en la matriz, el nombre de " -"la partición es: %(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "No se ha podido encontrar el servicio: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"No se puede encontrar la instantánea en la matriz, compruebe el nombre de " -"origen o el ID de origen." - -msgid "Can't find the same host id from arrays." -msgstr "No se puede encontrar mismo ID de host en las matrices." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "" -"No se puede obtener el ID de volumen de la instantánea, instantánea: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "No se puede obtener el ID de volumen. Nombre de volumen %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"No se puede importar el LUN %(lun_id)s en Cinder. El tipo de LUN no es " -"coincidente." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en un HyperMetroPair." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en una tarea de copia de " -"LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en un grupo de LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en un reflejo de LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "No se puede importar el LUN %s en Cinder. Ya existe en un SplitMirror." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en una tarea de " -"migración." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"No se puede importar el LUN %s en Cinder. Ya existe en una tarea de " -"replicación remota." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"No se puede importar el LUN %s en Cinder. El estado del LUN no es normal." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"No se puede importar la instantánea %s en Cinder. La instantánea no " -"pertenece al volumen." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"No se puede importar la instantánea %s en Cinder. Se ha expuesto la " -"instantánea al iniciador." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"No se puede importar la instantánea %s en Cinder. El estado de la " -"instantánea no es normal o el estado de ejecución no es en línea." - -msgid "Can't parse backup record." -msgstr "No se puede analizar el registro de copia de seguridad." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " -"que no tiene tipo de volumen." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo de consistencia " -"%(group_id)s porque ya está en el grupo de consistencia %(orig_group)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " -"que no se puede encontrar el volumen." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " -"el volumen no existe." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " -"que el volumen está en un estado no válido: %(status)s. Los estados válidos " -"son: %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"No se puede añadir el volumen %(volume_id)s al grupo %(group_id)s debido a " -"que el grupo no soporta el tipo de volumen %(volume_type)s." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"No se puede conectar un volumen ya conectado %s; la conexión múltiple está " -"inhabilitada mediante la opción de configuración 'netapp_enable_multiattach'." - -msgid "Cannot change VF context in the session." -msgstr "No se puede cambiar el contexto de la VF en la sesión." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"No se puede cambiar el contexto de la VF, la VF especificada no está " -"disponible en la lista de VF gestionables %(vf_list)s." - -msgid "Cannot connect to ECOM server." -msgstr "No se puede conectar al servidor de ECOM.." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"No se puede crear el grupo de consistencia %(group)s porque la instantánea " -"%(snap)s no está en un estado válido. Los estados válidos son: %(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"No se puede crear el grupo de consistencia %(group)s porque el volumen de " -"origen %(source_vol)s no está en un estado válido. Los estados válidos son: " -"%(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "No se puede crear el directorio %s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "" -"No se pueden crear especificaciones de cifrado. El tipo de volumen se está " -"utilizando." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"No se puede crear la imagen del formato de disco: %s. Solo se acepta el " -"formato de disco vmdk." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "No se puede crear la vista de máscara: %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"No se pueden crear más de %(req)s volúmenes en la matriz ESeries si " -"'netapp_enable_multiattach' se ha establecido en true." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"No se puede crear o encontrar un grupo de almacenamiento con el nombre " -"%(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "No se puede crear el volumen de tamaño %s: no es múltiplo de 8GB." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"No se puede crear el tipo de volumen con el nombre %(name)s y las " -"especificaciones %(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "No se puede suprimir LUN %s mientras haya instantáneas." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"No se puede suprimir el volumen de la memoria caché: %(cachevol_name)s. Se " -"ha actualizado a %(updated_at)s y actualmente tiene %(numclones)d instancias " -"de volumen." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"No se puede suprimir el volumen de la memoria caché: %(cachevol_name)s. Se " -"ha actualizado a %(updated_at)s y actualmente tiene %(numclones)s instancias " -"de volumen." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "" -"No se pueden suprimir especificaciones de cifrado. El tipo de volumen se " -"está utilizando." - -msgid "Cannot determine storage pool settings." -msgstr "" -"No se puede determinar la configuración de la agrupación de almacenamiento." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "No se puede ejecutar /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "No se puede encontrar el grupo CG %s." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"No se puede encontrar el servicio de configuración de controlador para el " -"sistema de almacenamiento %(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"No se puede encontrar el servicio de réplica para crear el volumen para la " -"instantánea %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"No se encuentra el servicio de réplica para suprimir la instantánea %s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "No se ha podido encontrar el servicio de réplica en el sistema %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"No se encuentra el volumen: %(id)s. No se puede gestionar la operación. " -"Saliendo..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "" -"No se puede encontrar el volumen: %(volumename)s. Ampliar operación. " -"Saliendo...." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "" -"No se puede encontrar el número de dispositivo para el volumen " -"%(volumeName)s." - -msgid "Cannot find migration task." -msgstr "No se puede encontrar la tarea de migración." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "No se ha podido encontrar el servicio de réplica en el sistema %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"No se encuentra la instancia de grupo de consistencia de origen. " -"consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "No se puede obtener mcs_id por el ID de canal: %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"No se puede obtener la información del sistema de almacenamiento o " -"agrupación necesaria" - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"No se puede obtener o crear un grupo de almacenamiento: %(sgGroupName)s para " -"el volumen %(volumeName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "No se puede obtener o crear el grupo de iniciadores: %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "No se puede obtener el grupo de puertos: %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"No se puede obtener el grupo de almacenamiento: %(sgGroupName)s de la vista " -"de máscara %(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"No se puede obtener el rango de tamaño soportado para %(sps)s Código de " -"retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"No se puede obtener el grupo de almacenamiento predeterminado para la " -"política FAST: %(fastPolicyName)s." - -msgid "Cannot get the portgroup from the masking view." -msgstr "No se puede obtener el grupo de puertos de la vista de máscara." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores" - -msgid "Cannot ping DRBDmanage backend" -msgstr "No se puede realizar ping en el programa de fondo DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "No se puede colocar el volumen %(id)s en %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"No se puede proporcionar 'cgsnapshot_id' y 'source_cgid' para crear el grupo " -"de consistencia %(name)s desde el origen." - -msgid "Cannot register resource" -msgstr "No se puede registrar el recurso" - -msgid "Cannot register resources" -msgstr "No se puede registrar los recursos" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"No se puede eliminar el volumen %(volume_id)s del grupo de consistencia " -"%(group_id)s porque no está en el grupo." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"No se puede eliminar el volumen %(volume_id)s del grupo de consistencia " -"%(group_id)s porque el volumen está en un estado no válido: %(status)s. Los " -"estados válidos son: %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "No se puede reescribir de HPE3PARDriver a %s." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "No se puede volver a escribir de una matriz de 3PAR a otra." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "No se puede volver a escribir en un CPG en un dominio diferente." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "" -"No se puede volver a escribir en un CPG de instantánea en un dominio " -"diferente." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"No se puede ejecutar el mandato vgc-cluster, asegúrese de que el software se " -"haya instalado y que los permisos se hayan configurado correctamente." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"No se pueden especificar ambos, hitachi_serial_number e hitachi_unit_name." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "" -"No se puede especificar el nombre de dominio de protección y el ID de " -"dominio de protección." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"No se puede especificar el nombre de agrupación de almacenamiento y el ID de " -"agrupación de almacenamiento." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"No se puede actualizar el grupo de consistencia %(group_id)s porque no se " -"han proporcionado nombre, descripción, add_volumes o remove_volumes válidos." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "" -"No se pueden actualizar especificaciones de cifrado. El tipo de volumen se " -"está utilizando." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "No se puede actualizar volume_type (tipo de volumen): %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "No se puede verificar la existencia del objeto: %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "No se ha podido encontrar el CgSnapshot %(cgsnapshot_id)s." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost está vacío. No se creará ningún grupo de consistencia." - -msgid "Change hostlun id error." -msgstr "Error al cambiar el ID de hostlun." - -msgid "Change lun priority error." -msgstr "Error al cambiar la prioridad de lun." - -msgid "Change lun smarttier policy error." -msgstr "Error al cambiar la política smarttier de lun." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"El cambio produciría un uso inferior a 0 para los recursos siguientes: " -"%(unders)s." - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Compruebe los permisos de acceso para la unidad compartida ZFS asignada a " -"este controlador." - -msgid "Check hostgroup associate error." -msgstr "Error al comprobar la asociación del grupo de host." - -msgid "Check initiator added to array error." -msgstr "Error al comprobar el iniciador añadido a la matriz." - -msgid "Check initiator associated to host error." -msgstr "Error al comprobar el iniciador asociado con el host." - -msgid "Check lungroup associate error." -msgstr "Error al comprobar la asociación del grupo de LUN." - -msgid "Check portgroup associate error." -msgstr "Error al comprobar la asociación del grupo de puertos." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Compruebe el estado del servicio HTTP. Asegúrese también de que el número de " -"puerto HTTPS es el mismo que el que se ha especificado en cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"El tamaño de trozo no es múltiplo del tamaño de bloque para la creación de " -"hash." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "" -"Error de CLI de distribución en zonas de canal de fibra de Cisco: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "La función Clonar no tiene licencia en %(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"El tipo de clon '%(clone_type)s' no es válido; los valores válidos son: " -"'%(full_clone)s' y '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"El clúster no se ha formateado. Debe realizar probablemente \"dog cluster " -"format\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Error de controlador Coho Data Cinder: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "No se ha configurado el puerto RPC de Coho" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Mandato %(cmd)s bloqueado en la CLI que se ha cancelado" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "" -"CommandLineHelper._wait_for_condition: %s ha agotado el tiempo de espera." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"El habilitador de compresión no está instalado. No se puede crear un volumen " -"comprimido." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Clúster de cálculo: no se ha encontrado %(cluster)s." - -msgid "Condition has no field." -msgstr "La condición no tiene ningún campo." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"La configuración 'max_over_subscription_ratio' no es válida. Debe ser > 0: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Error de configuración: dell_sc_ssn no está establecido." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "El archivo de configuración %(configurationFile)s no existe." - -msgid "Configuration is not found." -msgstr "No se ha encontrado la configuración." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "No se ha establecido el valor de configuración %s." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Especificaciones QoS en conflicto en el tipo de volumen %s: cuando la " -"especificación QoS se asocia al tipo de volumen, no se permite el valor " -"heredado \"netapp:qos_policy_group\" en las especificaciones adicionales del " -"tipo de volumen." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Conexión a glance falló: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "La conexión a swift ha fallado: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "El conector no proporciona: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "El conector no dispone de la información necesaria: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "El grupo de consistencia está vacío. No se creará ningún cgsnapshot." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "No se ha encontrado el ConsistencyGroup %(consistencygroup_id)s." - -msgid "Container" -msgstr "Contenedor" - -msgid "Container size smaller than required file size." -msgstr "Tamaño de contenedor menor que tamaño de archivo necesario." - -msgid "Content type not supported." -msgstr "Tipo de contenido no soportado." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "" -"El servicio de configuración de controlador no se ha encontrado en " -"%(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "No se ha podido resolver el IP de controlador '%(host)s': %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Se ha convertido a %(f1)s, pero ahora el formato es %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "" -"Se ha convertido a %(vol_format)s, pero ahora el formato es %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Convertido a sin formato, pero el formato es ahora %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Se ha convertido a sin formato, pero el formato es ahora %s." - -msgid "Coordinator uninitialized." -msgstr "Coordinador desinicializado." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"La tarea de copia de volumen ha fallado: convert_to_base_volume: id=%(id)s, " -"estado=%(status)s." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"La tarea Copiar volumen ha fallado: create_cloned_volume id=%(id)s, status=" -"%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Copiando metadatos de %(src_type)s %(src_id)s a %(vol_id)s." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"No se ha podido determinar qué punto final Keystone debe utilizarse. Puede " -"establecerse en el catálogo de servicio o con la opción de configuración " -"cinder.conf 'backup_swift_url'." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"No se ha podido determinar qué punto final Swift debe utilizarse. Puede " -"establecerse en el catálogo de servicio o con la opción de configuración " -"cinder.conf 'backup_swift_url'." - -msgid "Could not find DISCO wsdl file." -msgstr "No se ha podido encontrar el archivo wsdl de DISCO." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "No se ha podido encontrar el id de clúster GPFS: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "" -"No se ha podido encontrar el dispositivo de sistema de archivos GPFS: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "No se ha podido encontrar configuración en %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "No se ha podido encontrar la exportación iSCSI para el volumen %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "No se ha encontrado el destino iSCSI del volumen: %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"No se ha podido encontrar la clave en la salida del mandato %(cmd)s: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "No se ha podido encontrar el parámetro %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "No se ha podido encontrar el destino %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "" -"No se ha podido encontrar el volumen padre de la instantánea '%s' en la " -"matriz." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "" -"No se ha podido encontrar la instantánea exclusiva %(snap)s en el volumen " -"%(vol)s." - -msgid "Could not get system name." -msgstr "No se ha podido obtener el nombre del sistema." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s " - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"No se ha podido leer la información de la instantánea %(name)s. Código: " -"%(code)s. Motivo: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "" -"No se ha podido restaurar el archivo de configuración %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "No se ha podido guardar la configuración en %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "No se ha podido iniciar la instantánea del grupo de consistencia %s." - -#, python-format -msgid "Counter %s not found" -msgstr "No se ha encontrado el contador %s " - -msgid "Create QoS policy error." -msgstr "Error al crear la política QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"La creación de copia de seguridad ha terminado anormalmente, se esperaba el " -"estado de copia de seguridad %(expected_status)s pero se ha obtenido " -"%(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"La creación de copia de seguridad ha terminado anormalmente, se esperaba el " -"estado de volumen %(expected_status)s pero se ha obtenido %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Error al crear la exportación de volumen." - -msgid "Create hostgroup error." -msgstr "Error al crear el grupo de host." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Error al crear el hypermetro. %s." - -msgid "Create lun error." -msgstr "Error al crear el LUN." - -msgid "Create lun migration error." -msgstr "Error al crear la migración de lun." - -msgid "Create luncopy error." -msgstr "Error de crear luncopy." - -msgid "Create lungroup error." -msgstr "Error al crear el grupo de LUN." - -msgid "Create manager volume flow failed." -msgstr "Error al crear un flujo de volumen de gestor." - -msgid "Create port group error." -msgstr "Error al crear el grupo de puertos." - -msgid "Create replication error." -msgstr "Error al crear la replicación." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "Crear par de replicación ha fallado. Error: %s." - -msgid "Create snapshot error." -msgstr "Error crear instantánea." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Error al crear volumen. Razón: %s." - -msgid "Create volume failed." -msgstr "La creación de volumen ha fallado." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"Actualmente no se da soporte a crear un grupo de consistencia desde un " -"origen." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Han fallado la creación y activación del conjunto de zonas: (Zone set=" -"%(cfg_name)s error=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Han fallado la creación y activación del conjunto de zonas: (Zone set=" -"%(zoneset)s error=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "Creando usos desde %(begin_period)s hasta %(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "El host actual no forma parte del dominio HGST." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Host actual no válido para el volumen %(id)s con el tipo %(type)s, migración " -"no permitida" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"El host correlacionado actualmente para el volumen %(vol)s está en un grupo " -"de hosts no admitido con %(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"Erro de controlador DRBDmanage: no se esperaba la clave \"%s\"en la " -"respuesta, ¿se trata de una versión de DRBDmanage incorrecta?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Error de configuración de controlador DRBDmanage: algunas bibliotecas " -"obligatorias (dbus, drbdmanage.*) no encontradas." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage esperaba un recurso (\"%(res)s\"), ha obtenido %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"Tiempo de espera excedido para DRBDmanage esperando el nuevo volumen después " -"de restaurar la instantánea; recurso \"%(res)s\", volumen \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"Tiempo de espera excedido para DRBDmanage esperando la creación de la " -"instantánea; recurso \"%(res)s\", instantánea \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"Tiempo de espera excedido para DRBDmanage esperando la creación del volumen; " -"recurso \"%(res)s\", volumen \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"Tiempo de espera excedido para DRBDmanage esperando el tamaño del volumen; " -"ID de volumen \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "No se ha podido determinar la versión de API de ONTAP de datos." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"Los datos ONTAP que funcionan en 7-Mode no dan soporte a los grupos de " -"política QoS." - -msgid "Database schema downgrade is not allowed." -msgstr "No se permite degradar el esquema de base de datos." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "" -"El conjunto de datos %s no está compartido en la aplicación Nexenta Store" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "No se ha encontrado el grupo de conjuntos de datos %s en Nexenta SA" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup es un tipo de aprovisionamiento válido pero requiere que la versión de " -"WSAPI '%(dedup_version)s' versión '%(version)s' esté instalada." - -msgid "Dedup luns cannot be extended" -msgstr "No se pueden ampliar los LUN dedup" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"La cuota predeterminada para el recurso: %(res)s se establece por medio del " -"indicador de cuota predeterminada: quota_%(res)s, ahora se ha desaprobado. " -"Use la clase de cuota predeterminada para la cuota predeterminada." - -msgid "Default volume type can not be found." -msgstr "No se ha podido encontrar el tipo de volumen predeterminado." - -msgid "Delete LUNcopy error." -msgstr "Error al suprimir LUNcopy." - -msgid "Delete QoS policy error." -msgstr "Error al suprimir la política QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "Error al suprimir el LUN asociado del grupo de LUN." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"La supresión de la copia de seguridad ha terminado anormalmente, el servicio " -"de copia de seguridad configurado actualmente [%(configured_service)s] no es " -"el servicio de copia de seguridad que se usó para crear esta copia de " -"seguridad [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "Ha fallado la supresión del grupo de consistencia." - -msgid "Delete hostgroup error." -msgstr "Error al suprimir el grupo de host." - -msgid "Delete hostgroup from mapping view error." -msgstr "Error al suprimir el grupo de host de la vista de correlaciones." - -msgid "Delete lun error." -msgstr "Error al suprimir lun." - -msgid "Delete lun migration error." -msgstr "Error al suprimir la migración de lun." - -msgid "Delete lungroup error." -msgstr "Error al suprimir el grupo de LUN." - -msgid "Delete lungroup from mapping view error." -msgstr "Error al suprimir el grupo de LUN de la vista de correlaciones." - -msgid "Delete mapping view error." -msgstr "Error al suprimir la vista de correlaciones." - -msgid "Delete port group error." -msgstr "Error al suprimir el grupo de puertos." - -msgid "Delete portgroup from mapping view error." -msgstr "Error al suprimir el grupo de puertos de la vista de correlaciones." - -msgid "Delete snapshot error." -msgstr "Error al suprimir una instantánea." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "La supresión de instantánea del volumen no se soporta en estado: %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup ha terminado anormalmente, se esperaba el estado de copia de " -"seguridad %(expected_status)s pero se ha obtenido %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Suprimiendo el volumen de la base de datos y omitiendo rpc." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Ha fallado la supresión de zonas: (command=%(cmd)s error=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Dell API 2.1 o superior necesario para soporte del grupo de consistencia." - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"No se da soporte a la replicación de errores de configuración del " -"controlador Cinder de Dell con conexión directa." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Error de configuración del controlador Cinder de Dell, no se ha encontrado " -"el dispositivo de replicación (replication_device) %s" - -msgid "Describe-resource is admin only functionality" -msgstr "El recurso de descripción es funcionalidad sólo de administrador" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "El destino tiene migration_status %(stat)s, esperado %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "El volumen de destino no mid-migration." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Error al desconectar el volumen: más de una conexión, pero ningún " -"attachment_id proporcionado." - -msgid "Detach volume from instance and then try again." -msgstr "Desconecte el volumen de la instancia y vuelva a intentarlo." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Se ha detectado más de un volumen con el nombre %(vol_name)s" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "No se ha encontrado la columna esperada en %(fun)s: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "No se ha encontrado la clave esperada %(key)s en %(fun)s: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "Disabled reason contiene caracteres inválidos o es demasiado larga." - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "No se ha encontrado el dominio con el nombre %s." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Downlevel GPFS Cluster detectado. La característica GPFS Clone no está " -"habilitada en nivel cluster daemon %(cur)s - debe estar al menos en nivel " -"%(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "El controlador no ha podido inicializar la conexión (error: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "El controlador debe implementar initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"El controlador ha decodificado correctamente los datos de la copia de " -"seguridad importados, pero faltan campos (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"La API de proxy E-series versión %(current_version)s no da soporte a todo el " -"conjunto de especificaciones adicionales SSC. La versión de proxy debe ser " -"como mínimo %(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Excepción de CLI del controlador de EMC VNX Cinder: %(cmd)s (Código de " -"retorno: %(rc)s) (Salida: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"Excepción SPUnavailableException del controlador de EMC VNX Cinder: %(cmd)s " -"(Código de retorno: %(rc)s) (Salida: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword deben tener valores " -"válidos." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Debe proporcionar 'cgsnapshot_id' o 'source_cgid' para crear el grupo de " -"consistencia %(name)s del origen." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"El SLO: %(slo)s o la carga de trabajo %(workload)s no son válidos. Examine " -"sentencias de error anteriores para valores válidos." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Se necesita hitachi_serial_number o hitachi_unit_name." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "" -"El servicio de composición de elementos no se ha encontrado en " -"%(storageSystemName)s" - -msgid "Enables QoS." -msgstr "Habilita la calidad de servicio." - -msgid "Enables compression." -msgstr "Habilita la compresión." - -msgid "Enables replication." -msgstr "Habilita la réplica." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Asegúrese de que configfs está montado en /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al añadir el iniciador: %(initiator)s en groupInitiatorGroup: " -"%(initiatorgroup)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s ." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al añadir TargetGroup: %(targetgroup)s con IQN: %(iqn)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Error al conectar el volumen %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al clonar la instantánea: %(snapshot)s en el volumen: %(lun)s de la " -"agrupación: %(pool)s Proyecto: %(project)s Clonar proyecto: %(clone_proj)s " -"Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Error al crear el volumen clonado: %(cloneName)s. Código de retorno: " -"%(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al crear el volumen clonado: Volumen: %(cloneName)s Origen Volumen: " -"%(sourceName)s. Código de retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al crear el grupo: %(groupName)s. Código de retorno: %(rc)lu. Error: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Error al crear la vista de máscara: %(groupName)s. Código de retorno: " -"%(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al crear el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al crear el volumen: %(volumename)s. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error CreateGroupReplica: origen: %(source)s destino: %(target)s. Código de " -"retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al crear el iniciador: %(initiator)s en el Alias: %(alias)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al crear el proyecto: %(project)s en la agrupación: %(pool)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al crear la propiedad: %(property)s Tipo: %(type)s Descripción: " -"%(description)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al crear la unidad compartida: %(name)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al crear la instantánea: %(snapshot)s en el volumen: %(lun)s en la " -"agrupación: %(pool)s Proyecto: %(project)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al crear la instantánea: %(snapshot)s de la unidad compartida: " -"%(share)s en la agrupación: %(pool)s Proyecto: %(project)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Error al crear el destino: %(alias)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al crear TargetGroup: %(targetgroup)s con IQN: %(iqn)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Error al crear el volumen: %(lun)s Tamaño: %(size)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al crear el nuevo código de retorno de volumen compuesto: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al crear la acción de réplica en: agrupación: %(pool)s Proyecto: " -"%(proj)s volumen: %(vol)s para destino: %(tgt)s y agrupación: %(tgt_pool)s " -"código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Error al crear un volumen desenlazado en una operación de ampliación" - -msgid "Error Creating unbound volume." -msgstr "Error al crear el volumen desenlazado." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al suprimir el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Error al suprimir el grupo: %(storageGroupName)s. Código de retorno: " -"%(rc)lu. Error: %(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Error al suprimir el grupo de iniciadores: %(initiatorGroupName)s. Código de " -"retorno: %(rc)lu. Error: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al suprimir la instantánea: %(snapshot)s en la unidad compartida: " -"%(share)s en la agrupación: %(pool)s Proyecto: %(project)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al suprimir la instantánea: %(snapshot)s en el volumen: %(lun)s en la " -"agrupación: %(pool)s Proyecto: %(project)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Error al suprimir el volumen: %(lun)s de la agrupación: %(pool)s, Proyecto: " -"%(project)s. Código de retorno: %(ret.status)d, Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al suprimir el proyecto: %(project)s enla agrupación: %(pool)s Código " -"de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Error al suprimir la acción de réplica: %(id)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al extender el volumen: %(volumeName)s. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al obtener los iniciadores: InitiatorGroup: %(initiatorgroup)s Código " -"de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Error al obtener las estadísticas de la agrupación: Agrupación: %(pool)s " -"Código de retorno: %(status)d Mensaje: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al obtener el estado del proyecto: agrupación: %(pool)s Proyecto: " -"%(project)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al obtener la unidad compartida: %(share)s en la agrupación: %(pool)s " -"Proyecto: %(project)s Código de retorno: %(ret.status)d Mensaje: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al obtener la instantánea: %(snapshot)s en el volumen: %(lun)s en la " -"agrupación: %(pool)s Proyecto: %(project)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Error al obtener el destino: %(alias)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al obtener el volumen: %(lun)s en la agrupación: %(pool)s Proyecto: " -"%(project)s Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Error al migrar el volumen de una agrupación a otra. Código de retorno: " -"%(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Error al modificar la vista de máscara: %(groupName)s. Código de retorno: " -"%(rc)lu. Error: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Error en la propiedad de la agrupación: La agrupación %(pool)s no es " -"propiedad de %(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al establecer las propiedades. Propiedades: %(props)s en el volumen: " -"%(lun)s de la agrupación: %(pool)s Proyecto: %(project)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al terminar la sesión de migración. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al verificar el iniciador: %(iqn)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al verificar la agrupación: %(pool)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al verificar el proyecto: %(project)s en la agrupación: %(pool)s " -"Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al verificar el servicio: %(service)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al verificar el destino: %(alias)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Error al verificar la unidad compartida: %(share)s en el Proyecto: " -"%(project)s y la Agrupación: %(pool)s Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s ." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Error al añadir el volumen: %(volumeName)s con vía de acceso de instancia: " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Error al añadir el iniciador al grupo: %(groupName)s. Código de retorno: " -"%(rc)lu. Error: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "" -"Error al añadir el volumen al volumen compuesto. El error es: %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "Error al añadir el volumen %(volumename)s al volumen base de destino" - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Error al asociar el grupo de almacenamiento: %(storageGroupName)s con la " -"política fast: %(fastPolicyName)s con la descripción de error: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Error al interrumpir la relación de clonación: Nombre de sincronización: " -"%(syncName)s Código de retorno: %(rc)lu. Error: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Error al conectarse con un clúster ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Error al conectar mediante ssh: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Error al crear el volumen: %s." - -msgid "Error deleting replay profile." -msgstr "Error al suprimir el perfil de reproducción." - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Error al suprimir el volumen %(ssn)s: %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Error al suprimir el volumen %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Error durante el análisis de evaluador: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Error al editar la unidad compartida: %(share)s en la agrupación: %(pool)s " -"Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Error al habilitar iSER para NetworkPortal: asegúrese de que hay soporte " -"para RDMA en el puerto iSCSI %(port)d en la IP %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "" -"Se ha encontrado un error durante la limpieza de una conexión anómala: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Error al ejecutar la API de CloudByte [%(cmd)s], Error: %(err)s." - -msgid "Error executing EQL command" -msgstr "Error al ejecutar el mandato EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Error al ejecutar mandato mediante ssh: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Error al ampliar el volumen %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Error al extender volumen: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Error al buscar %(name)s" - -#, python-format -msgid "Error finding %s." -msgstr "Error al buscar %s" - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al configurar ReplicationSettingData. Código de retorno: %(rc)lu. " -"Error: %(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Error al obtener los detalles de la versión de dispositivo. Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "" -"Error al obtener el ID de dominio a partir del nombre %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "" -"Error al obtener el ID de dominio a partir del nombre %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Error al obtener los grupos de iniciador." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "" -"Error al obtener el ID de agrupación a partir del nombre %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "" -"Error al obtener el ID de agrupación a partir del nombre %(pool_name)s: " -"%(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Error al obtener la acción de réplica: %(id)s. Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Error al obtener los detalles de origen de réplica. Código de retorno: %(ret." -"status)d Mensaje: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Error al obtener los detalles de destino de réplica. Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al obtener la versión: svc: %(svc)s. Código de retorno: %(ret.status)d " -"Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Error en la operación [%(operation)s] para el volumen [%(cb_volume)s] en el " -"almacenamiento de CloudByte: [%(cb_error)s], código de error: " -"[%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Error en respuesta de API SolidFire: data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "Error en la creación de espacio para %(space)s de tamaño %(size)d GB" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"Error en space-extend para el volumen %(space)s con %(size)d GB adicionales" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Error al gestionar el volumen: %s." - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Error al modificar la sincronización de réplica: %(sv)s operación: " -"%(operation)s. Código de retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Error al modificar el servicio: %(service)s Código de retorno: " -"%(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al mover el volumen: %(vol)s del proyecto de origen: %(src)s al " -"proyecto de destino: %(tgt)s Código de retorno: %(ret.status)d Mensaje: " -"%(ret.data)s ." - -msgid "Error not a KeyError." -msgstr "El error no es un KeyError." - -msgid "Error not a TypeError." -msgstr "El error no es un TypeError." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Se producido un error al crear el cgsnapshot %s." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Se producido un error al suprimir el cgsnapshot %s." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "Se ha producido un error al actualizar el grupo de consistencia %s." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Error al cambiar el nombre del volumen %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Respuesta de tipo error: %s" - -msgid "Error retrieving volume size" -msgstr "Error al recuperar el tamaño de volumen" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al enviar una actualización de réplica para el ID de acción: %(id)s. " -"Código de retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Error al enviar la actualización de réplica. Error devuelto: %(err)s. " -"Acción: %(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al establecer la herencia de réplica en %(set)s para el volumen: " -"%(vol)s del proyecto %(project)s Código de retorno: %(ret.status)d Mensaje: " -"%(ret.data)s ." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Error al cortar el paquete: %(package)s del origen: %(src)s Código de " -"retorno: %(ret.status)d Mensaje: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "Error al desenlazar el volumen %(vol)s de la agrupación. %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Error durante la autenticación con el conmutador: %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Error al cambiar el contexto de la VF %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Error al comprobar la versión de firmware %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Error al comprobar estado de transacción: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "Error al comprobar si la VF está disponible para gestión %s." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Error al conectar el conmutador %(switch_id)s con el protocolo %(protocol)s. " -"Error: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Error al crear el token de autenticación: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "Error al crear la instantánea [estado] %(stat)s - [resultado] %(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "Error al crear el volumen [estado] %(stat)s - [resultado] %(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Error al suprimir la instantánea [estado] %(stat)s - [resultado] %(res)s." - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "Error al suprimir el volumen [estado] %(stat)s - [resultado] %(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "Error al extender el volumen [estado] %(stat)s - [resultado] %(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "" -"Error al obtener detalles de %(op)s, se ha devuelto el código: %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "Error al obtener datos mediante ssh: (command=%(cmd)s error=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Error al obtener la información de Disco [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Error al obtener el valor nvp: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Error al obtener la información de sesión %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Error al analizar los datos: %s" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"Error al consultar a la página %(url)s sobre el conmutador, motivo%(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Error al eliminar las zonas y cfg de la cadena de zonas: %(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Error al solicitar la API de %(service)s." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Error al ejecutar CLI de distribución en zonas: (command=%(cmd)s error=" -"%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Error al actualizar las nuevas zonas y cfg en la cadena de zonas. Error " -"%(description)s." - -msgid "Error writing field to database" -msgstr "Error al escribir el campo en la base de datos" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Error[%(stat)s - %(res)s] al obtener el ID del volumen." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Error[%(stat)s - %(res)s] al restaurar la instantánea [%(snap_id)s] en el " -"volumen [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"Error[estado] %(stat)s - [resultado] %(res)s] al obtener el ID del volumen." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Se ha superado el máximo de intentos de planificación %(max_attempts)d para " -"el volumen %(volume_id)s" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Se ha superado el límite de instantáneas por volumen" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Excepción al añadir el volumen meta al volumen de destino %(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Exception durante la creación de réplica de elemento. Nombre de clonación: " -"%(cloneName)s Nombre de origen : %(sourceName)s Especificaciones " -"adicionales: %(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Excepción en _select_ds_for_volume: %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "Exception al formar la cadena de zonas: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Excepción: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Se esperaba exactamente un volumen denominado \"%s\"" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Se esperaba un entero para node_count, svcinfo lsiogrp ha devuelto: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"No se esperaba ninguna salida del mandato CLI %(cmd)s, se ha obtenido " -"%(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Se esperaba que se devolviera un disco virtual único de lsvdisk al filtrar " -"en vdisk_UID. Se han devuelto %(count)s." - -#, python-format -msgid "Expected volume size was %d" -msgstr "El tamaño de volumen esperado era %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"La exportación de copia de seguridad ha terminado anormalmente, se esperaba " -"el estado de copia de seguridad %(expected_status)s pero se ha obtenido " -"%(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"La exportación de registro ha terminado anormalmente, el servicio de copia " -"de seguridad configurado actualmente [%(configured_service)s] no es el " -"servicio de copia de seguridad que se usó para crear esta copia de seguridad " -"[%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Error al ampliar el volumen." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Ampliar el volumen solo es compatible para este controlador cuando no " -"existen instantáneas." - -msgid "Extend volume not implemented" -msgstr "Ampliar el volumen no se ha implementado" - -msgid "FAST is not supported on this array." -msgstr "FAST no se admite en esta matriz." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC es el protocolo pero OpenStack no proporciona wwpns." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "No se ha podido desasignar %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "" -"No se ha podido crear el volumen de la memoria caché %(volume)s. Error: " -"%(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "No se ha podido añadir conexión para tejido=%(fabric)s: Error:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "Ha fallado cgsnapshot" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "No se ha podido crear la instantánea del grupo: %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"No se ha podido crear la instantánea del volumen %(volname)s: %(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "No se ha podido obtener el conjunto de zonas activas del tejido %s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "No se han podido obtener detalles para la agrupación %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"No se ha podido eliminar conexión para tejido=%(fabric)s: Error:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "No se ha podido ampliar el volumen %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "No se ha podido iniciar la sesión en 3PAR (%(url)s) debido a %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "" -"No se ha podido acceder a configuración de distribución en zonas activa." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "No se ha podido acceder al estado de zoneset:%s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"No se ha podido obtener un bloqueo de recurso. (serie: %(serial)s, inst: " -"%(inst)s, ret: %(ret)s, stderr: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "No se ha podido añadir el dispositivo lógico." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"No se ha podido añadir el volumen %(volumeName)s al grupo de consistencia " -"%(cgName)s. Código de retorno: %(rc)lu. Error: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "No se ha podido añadir configuración de distribución en zonas." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Error al asignar el IQN del iniciador iSCSI. (puerto: %(port)s, razón: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Error al asociar qos_specs: %(specs_id)s con el tipo %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "" -"Se ha encontrado un error en al conectar el destino iSCSI para el volumen " -"%(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "" -"No se ha podido realizar la copia de seguridad de los metadatos de volumen - " -"%s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"No se ha podido hacer copia de seguridad de metadatos de volumen - Objeto de " -"copia de seguridad de metadatos 'backup.%s.meta' ya existe" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "No se ha podido clonar el volumen de la instantánea %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "No se ha podido conectar a %(vendor_name)s Matriz %(host)s: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "No se ha podido conectar con la API REST de Dell" - -msgid "Failed to connect to array" -msgstr "No se ha podido conectar a la matriz" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"No se ha podido conectar al daemon sheep. Dirección: %(addr)s, puerto: " -"%(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Copiar la imagen al volumen ha fallado: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "No se ha podido copiar los metadatos a volumen: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Error al copiar el volumen, dispositivo de destino no disponible." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Error al copiar el volumen, dispositivo de origen no disponible." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"No se ha podido crear el grupo de consistencia %(cgName)s desde la " -"instantánea %(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "No se ha podido crear IG, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "No se ha podido crear el grupo de volumen: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Error al crear un archivo. (archivo: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "No se ha podido crear una instantánea temporal para el volumen %s." - -msgid "Failed to create api volume flow." -msgstr "No se ha podido crear flujo de volumen de la API." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "No se ha podido crear el cgsnapshot %(id)s debido a %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "" -"No se ha podido crear el grupo de consistencia %(id)s debido a %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "No se ha podido crear el grupo de consistencia %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"No se ha podido crear el grupo de consistencia %s porque el grupo de " -"consistencia VNX no puede aceptar LUN comprimidos como miembros." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "No se ha podido crear el grupo de consistencia: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "" -"No se ha podido crear el grupo de consistencia: %(cgid)s. Error: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"No se ha podido crear el grupo de consistencia: %(consistencyGroupName)s " -"Código de retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "No se han podido crear los ID de hardware en %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "" -"No se ha podido crear el host: %(name)s. Compruebe si existe en la matriz." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"No se ha podido crear el grupo de host: %(name)s. Compruebe si existe en la " -"matriz." - -msgid "Failed to create iqn." -msgstr "No se ha podido crear el iqn." - -msgid "Failed to create iscsi client" -msgstr "No se ha podido crear el cliente iSCSI" - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "" -"Se ha encontrado un error al crear el destino iscsi para el volumen " -"%(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "No se ha podido crear la gestión del flujo existente." - -msgid "Failed to create manage_existing flow." -msgstr "No se ha podido crear el flujo manage_existing." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "" -"No se ha podido crear la correlación en mcs, ningún canal se puede " -"correlacionar." - -msgid "Failed to create map." -msgstr "No se ha podido crear la correlación." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "No se ha podido crear los metadatos para volumen: %(reason)s" - -msgid "Failed to create partition." -msgstr "No se ha podido crear la partición." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "Error al crear qos_specs: %(name)s con especificaciones %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "No se ha podido crear la réplica." - -msgid "Failed to create scheduler manager volume flow" -msgstr "No se ha podido crear flujo de volumen de gestor de planificador" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "No se ha podido crear la instantánea %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "No se ha podido crear una instantánea para cg: %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "No se ha podido crear una instantánea para el volumen %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"No se ha podido crear la política de instantáneas en el volumen %(vol)s: " -"%(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"No se ha podido crear el área de recursos de la instantánea en el volumen " -"%(vol)s: %(res)s." - -msgid "Failed to create snapshot." -msgstr "No se ha podido crear la instantánea." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"No se ha podido crear la instantánea. No se ha encontrado la información del " -"volumen de CloudByte para el volumen de OpenStack [%s]." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "No se ha podido crear la conexión en sentido sur para %s." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "No se ha podido crear el grupo de almacenamiento %(storageGroupName)s." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "" -"No se ha podido crear la agrupación ligera, el mensaje de error ha sido: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Error al crear volumen %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"No se puede suprimir SI de volume_id: %(volume_id)s porque tiene un par." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"No se ha podido suprimir un dispositivo lógico. (LDEV: %(ldev)s, razón: " -"%(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "No se ha podido suprimir el cgsnapshot %(id)s debido a %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "" -"No se ha podido suprimir el grupo de consistencia %(id)s debido a %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "No se ha podido suprimir el grupo de consistencia: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"No se ha podido suprimir el grupo de consistencia: %(consistencyGroupName)s " -"Código de retorno: %(rc)lu. Error: %(error)s." - -msgid "Failed to delete device." -msgstr "No se ha podido suprimir el dispositivo." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"No se ha podido suprimir el conjunto de archivos para el grupo de " -"consistencia %(cgname)s. Error: %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "No se ha podido suprimir el iqn." - -msgid "Failed to delete map." -msgstr "No se ha podido suprimir la correlación." - -msgid "Failed to delete partition." -msgstr "No se ha podido suprimir la partición." - -msgid "Failed to delete replica." -msgstr "No se ha podido suprimir la réplica." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "No se ha podido suprimir la instantánea %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "No se ha podido suprimir una instantánea para cg: %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"No se puede suprimir la instantánea de snapshot_id: %s porque tiene un par." - -msgid "Failed to delete snapshot." -msgstr "No se ha podido suprimir la instantánea." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "No se ha podido suprimir el volumen %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"No se ha podido suprimir el volumen de volume_id: %(volume_id)s porque tiene " -"un par." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "" -"Se ha encontrado un error al desconectar el destino iSCSI para el volumen " -"%(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "No se ha podido determinar la configuración de API Blockbridge" - -msgid "Failed to disassociate qos specs." -msgstr "Error al desasociar especificaciones de qos." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Error al desasociar qos_specs: %(specs_id)s con el tipo %(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"No se ha podido garantizar el área de recursos de la instantánea, no se ha " -"encontrado el volumen para el ID %s" - -msgid "Failed to establish a stable connection" -msgstr "No se ha podido establecer una conexión estable." - -msgid "Failed to establish connection with Coho cluster" -msgstr "No se ha podido establecer conexión con el clúster Coho." - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"No se ha podido ejecutar la API de CloudByte [%(cmd)s]. Estado HTTP: " -"%(status)s, Error: %(error)s." - -msgid "Failed to execute common command." -msgstr "No se ha podido ejecutar el mandato común." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "No se ha podido exportar para volumen: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "" -"No se ha podido ampliar el volumen existente %(name)s, Mensaje de error: " -"%(msg)s." - -msgid "Failed to find QoSnode" -msgstr "No se ha podido encontrar QoSnode" - -msgid "Failed to find Storage Center" -msgstr "No se ha podido encontrar Storage Center" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "" -"No se ha podido encontrar una copia de vdisk en la agrupación esperada." - -msgid "Failed to find account for volume." -msgstr "No se ha podido encontrar la cuenta para el volumen." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"No se ha podido encontrar el conjunto de archivos para la vía de acceso " -"%(path)s, salida de mandato: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "No se ha podido encontrar la instantánea de grupo denominada: %s" - -#, python-format -msgid "Failed to find host %s." -msgstr "No se ha podido encontrar el host %s." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "" -"No se ha podido encontrar el grupo de iniciadores iSCSI que contiene " -"%(initiator)s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "" -"No se han podido obtener los detalles de cuenta de CloudByte para la cuenta " -"[%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "" -"Se ha encontrado un error en la obtención de los detalles de destino de LUN " -"para el LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "No se han podido obtener detalles de destino de LUN para el LUN %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "No se ha podido obtener la lista de destino de LUN para el LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "No se ha podido obtener el ID de partición del volumen %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"No se ha podido obtener el ID de instantánea Raid de la instantánea " -"%(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"No se ha podido obtener el ID de instantánea Raid de la instantánea " -"%(snapshot_id)s." - -msgid "Failed to get SplitMirror." -msgstr "No se ha podido obtener SplitMirror." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Error al obtener un recurso de almacenamiento. El sistema intentará obtener " -"el recurso de almacenamiento otra vez. (recurso: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "Error al obtener todas las asociaciones de qos specs %s" - -msgid "Failed to get channel info." -msgstr "No se ha podido obtener información de canal." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "No se ha podido obtener el nivel de código (%s)." - -msgid "Failed to get device info." -msgstr "No se ha podido obtener información de dispositivo." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "No ha obtenido dominio debido a que CPG (%s) no existe en la matriz." - -msgid "Failed to get image snapshots." -msgstr "No se han podido obtener las instantáneas de imagen." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"No se ha podido obtener la IP en el canal %(channel_id)s con el volumen: " -"%(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "No se ha podido obtener información de iqn." - -msgid "Failed to get license info." -msgstr "No se ha podido obtener información de licencia." - -msgid "Failed to get lv info." -msgstr "No se ha podido obtener información de volumen lógico." - -msgid "Failed to get map info." -msgstr "No se ha podido obtener información de correlación." - -msgid "Failed to get migration task." -msgstr "No se ha podido obtener la tarea de migración." - -msgid "Failed to get model update from clone" -msgstr "" -"Se ha encontrado un error en la obtención de la actualización del modelo " -"desde el clon" - -msgid "Failed to get name server info." -msgstr "No se ha podido obtener información de servidor de nombres." - -msgid "Failed to get network info." -msgstr "No se ha podido obtener información de red." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "" -"No se ha podido obtener el nuevo ID de part en la nueva agrupación: " -"%(pool_id)s." - -msgid "Failed to get partition info." -msgstr "No se ha podido obtener información de partición." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "" -"No se ha podido obtenr el ID de agrupación con el volumen %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"No se ha podido obtener la información de la copia remota para %(volume)s " -"debido a %(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"No se ha podido obtener la información de la copia remota para %(volume)s. " -"Excepción: %(err)s." - -msgid "Failed to get replica info." -msgstr "No se ha podido obtener información de réplica." - -msgid "Failed to get show fcns database info." -msgstr "No se ha podido obtener información de base de datos fcns." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "No se ha podido obtener el tamaño del volumen %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "No se ha podido obtener una instantánea para el volumen %s." - -msgid "Failed to get snapshot info." -msgstr "No se ha podido obtener información de instantánea." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "" -"Se ha encontrado un error en la obtención del IQN de destino para el LUN %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "No se ha podido obtener el LUN de destino de SplitMirror." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "" -"Se ha encontrado un error en la obtención del portal de destino para el LUN " -"%s" - -msgid "Failed to get targets" -msgstr "No se han podido obtener los destinos" - -msgid "Failed to get wwn info." -msgstr "No se ha podido obtener información de wwn." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"No se ha podido obtener, crear o añadir el volumen %(volumeName)s a la vista " -"de máscara %(maskingViewName)s. El mensaje de error recibido ha sido " -"%(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "No se ha podido identificar el programa de fondo de volumen." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"No se ha podido enlazar el conjunto de archivos para el %(cgname)s " -"compartido. Error: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "" -"No se ha podido iniciar sesión en %s Matriz (¿inicio de sesión no válido?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Error al iniciar sesión para el usuario %s." - -msgid "Failed to login with all rest URLs." -msgstr "No se ha podido iniciar sesión con todos los URL rest." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"No se ha podido hacer una solicitud al punto final del clúster de Datera " -"debido al siguiente motivo: %s" - -msgid "Failed to manage api volume flow." -msgstr "No se ha podido gestionar el flujo de volumen de la API." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"No se ha podido gestionar %(type)s %(name)s existentes, porque el tamaño " -"reportado %(size)s no era un número de coma flotante." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"No se ha podido gestionar el volumen existente %(name)s, debido a un error " -"en la obtención del tamaño del volumen." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"No se ha podido gestionar el volumen existente %(name)s, porque la operación " -"de cambio de nombre ha fallado: Mensaje de error: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"No se ha podido gestionar el volumen existente %(name)s, porque el archivo " -"indicado %(size)s no era un número de coma flotante." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el grupo de E/S " -"no coincide. El grupo de E/S del volumen a gestionar es %(vdisk_iogrp)s. El " -"grupo de E/S del tipo elegido es %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que la agrupación " -"del volumen a gestionar no coincide con la agrupación del programa fondo. " -"La agrupación del volumen a gestionar es %(vdisk_pool)s. La agrupación del " -"programa fondo es %(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el volumen a " -"gestionar es comprimido, pero el tipo de volumen elegido es no comprimido." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el volumen a " -"gestionar es no comprimido, pero el tipo de volumen elegido es comprimido." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el volumen a " -"gestionar no es un grupo de E/S válido." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el volumen a " -"gestionar es pesado, pero el tipo de volumen elegido es ligero." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"No se ha podido gestionar el volumen existente debido a que el volumen a " -"gestionar es ligero, pero el tipo de volumen elegido es pesado." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "No se ha podido gestionar el volumen %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Error al correlacionar un dispositivo lógico. (LDEV: %(ldev)s, LUN: %(lun)s, " -"puerto: %(port)s, id: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "No se ha podido migrar el volumen por primera vez" - -msgid "Failed to migrate volume for the second time." -msgstr "No se ha podido migrar el volumen por segunda vez" - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "No se ha podido mover la correlación de LUN. Código de retorno: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "No se ha podido mover el volumen %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Error al abrir un archivo. (archivo: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"No se ha podido analizar la salida de CLI:\n" -" mandato: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Error al analizar la opción de configuración 'keystone_catalog_info'; debe " -"tener el formato ::" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Error al analizar la opción de configuración 'swift_catalog_info'; debe " -"tener el formato ::" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Error al realizar una reclamación de página cero. (LDEV: %(ldev)s, razón: " -"%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "" -"No se ha podido eliminar la exportación para el volumen %(volume)s: " -"%(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "" -"Se ha encontrado un error al eliminar el destino iscsi para el volumen " -"%(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"No se ha podido eliminar el volumen %(volumeName)s del grupo de consistencia " -"%(cgName)s. Código de retorno: %(rc)lu. Error: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "" -"No se ha podido eliminar el volumen %(volumeName)s del SG predeterminado." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"No se ha podido eliminar el volumen %(volumeName)s del SG predeterminado: " -"%(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"No se ha podido eliminar %(volumename)s del grupo de almacenamiento " -"predeterminado para la política FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"No se ha podido renombrar el volumen lógico %(name)s, el mensaje de error " -"era: %(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "" -"No se ha podido recuperar la configuración de distribución en zonas activas " -"%s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"No se ha podido establecer la autenticación de CHAP para el IQN de destino " -"%(iqn)s. Detalles: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"No se ha podido establecer QoS para el volumen existente %(name)s, Mensaje " -"de error: %(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "" -"No se ha podido establecer el atributo 'Incoming user' para el destino SCST." - -msgid "Failed to set partition." -msgstr "No se ha podido establecer la partición." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"No se han podido establecer permisos para el grupo de consistencia " -"%(cgname)s. Error: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"No se ha podido especificar un dispositivo lógico para el volumen " -"%(volume_id)s cuya correlación se va a anular." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"No se ha podido especificar un dispositivo lógico para suprimir. (method: " -"%(method)s, id: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "No se ha podido terminar la sesión de migración" - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "No se ha podido desenlazar el volumen %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"No se ha podido desenlazar el conjunto de archivos para el grupo de " -"consistencia %(cgname)s. Error: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"No se ha podido anular correlación de un dispositivo lógico. (LDEV: " -"%(ldev)s, razón: %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "No se ha podido actualizar el grupo de consistencia :%(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "No se ha podido actualizar los metadatos para volumen: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "" -"No se ha podido actualizar o suprimir la configuración de distribución en " -"zonas" - -msgid "Failed to update or delete zoning configuration." -msgstr "" -"No se ha podido actualizar o suprimir la configuración de distribución en " -"zonas." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Error al actualizar qos_specs: %(specs_id)s con especificaciones " -"%(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "No se ha podido actualizar el uso de cuota al rescribir el volumen." - -msgid "Failed to update snapshot." -msgstr "Fallo al actualizar la instantánea." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Error al actualizar metadatos de volumen %(vol_id)s con los %(src_type)s " -"%(src_id)s metadatos proporcionados" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Anomalía al crear el volumen %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Error al obtener la información de LUN para %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Error al mover el nuevo LUN clonado a %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Error de transferencia de LUN %s a tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "Fexvisor no ha podido añadir el volumen %(id)s debido a %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor no ha podido unir el volumen %(vol)s en el grupo %(group)s debido a " -"%(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor no ha podido eliminar el volumen %(vol)s en el grupo %(group)s " -"debido a %(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Fexvisor no ha podido eliminar el volumen %(id)s debido a %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Anomalía de búsqueda de SAN de canal de fibra: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Ha fallado la operación de zona de canal de fibra: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Anomalía de control de conexión de canal de fibra: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "No se ha podido encontrar el archivo %(file_path)s." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"El archivo %(path)s tiene un archivo de respaldo %(bfile)s no válido, " -"terminando de forma anormal." - -#, python-format -msgid "File already exists at %s." -msgstr "Ya existe el archivo en %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "El archivo ya existe en: %s" - -msgid "Find host in hostgroup error." -msgstr "Error al buscar el host en el grupo de host." - -msgid "Find host lun id error." -msgstr "Error al buscar el ID de LUN de host." - -msgid "Find lun group from mapping view error." -msgstr "Error al buscar el grupo de LUN en la vista de correlaciones." - -msgid "Find mapping view error." -msgstr "Error al buscar la vista de correlaciones." - -msgid "Find portgroup error." -msgstr "Error al buscar el grupo de puertos." - -msgid "Find portgroup from mapping view error." -msgstr "Error al buscar el grupo de puertos en la vista de correlaciones." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"La política de memoria caché de flash requiere que la versión de WSAPI " -"'%(fcache_version)s' versión '%(version)s' esté instalada." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "" -"La desasignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "La asignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor no ha podido encontrar la instantánea del volumen %(id)s en la " -"instantánea %(vgid)s del grupo %(vgsid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "" -"La creación de volumen de Flexvisor ha fallado: %(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido suprimir el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor no ha podido añadir el volumen %(id)s al grupo %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor no ha podido asignar el volumen %(id)s porque no ha podido " -"consultar el estado por id de suceso." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido asignar el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor no ha podido asignar al volumen %(volume)s el iqn %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido clonar el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido clonar el volumen (no ha podido obtener el suceso) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor no ha podido crear la instantánea para el volumen %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido crear la instantánea para el volumen (no ha podido " -"obtener el suceso) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor no ha podido crear el volumen %(id)s en el grupo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor no ha podido crear el volumen %(volume)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor no ha podido crear el volumen (obtener el suceso) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor no ha podido crear el volumen a partir de la instantánea %(id)s: " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor no ha podido crear el volumen a partir de la instantánea %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido crear el volumen a partir de la instantánea (no ha " -"podido obtener el suceso) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor no ha podido suprimir la instantánea %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido suprimir la instantánea (no ha podido obtener el " -"suceso) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido suprimir el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido ampliar el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor no ha podido ampliar el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido ampliar el volumen (no ha podido obtener el suceso) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "" -"Flexvisor no ha podido obtener la información de agrupación %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor no ha podido obtener el ID de instantánea del volumen %(id)s del " -"grupo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor no ha podido eliminar el volumen %(id)s del grupo %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor no ha podido generar el volumen a partir de la instantánea %(id)s:" -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor no ha podido generar el volumen a partir de la instantánea (no ha " -"podido obtener el suceso) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor no ha podido desasignar el volumen %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor no ha podido desasignar el volumen (obtener el suceso) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor no ha podido desasignar el volumen: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "" -"Flexvisor no ha podido encontrar la información del volumen de origen %(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"La desasignación de volumen de Flexvisor ha fallado: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "El volumen de Flexvisor %(id)s no se ha podido unir al grupo %(vgid)s." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "La carpeta %s no existe en la aplicación Nexenta Store" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS no se está ejecutando, estado: %s." - -msgid "Gateway VIP is not set" -msgstr "No se ha establecido el VIP de pasarela." - -msgid "Get FC ports by port group error." -msgstr "Error al obtener los puertos FC por grupo de puertos." - -msgid "Get FC ports from array error." -msgstr "Error al obtener los puertos FC de la matriz." - -msgid "Get FC target wwpn error." -msgstr "Error al obtener wwpn de destino FC." - -msgid "Get HyperMetroPair error." -msgstr "Error al obtener HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Error al obtener el grupo de LUN por vista." - -msgid "Get LUNcopy information error." -msgstr "Error al obtener información de LUNcopy." - -msgid "Get QoS id by lun id error." -msgstr "Error al obtener el ID QoS por ID de lun." - -msgid "Get QoS information error." -msgstr "Error al obtener información QoS." - -msgid "Get QoS policy error." -msgstr "Error al obtener política QoS." - -msgid "Get SplitMirror error." -msgstr "Error al obtener SplitMirror." - -msgid "Get active client failed." -msgstr "La operación Obtener cliente activo ha fallado." - -msgid "Get array info error." -msgstr "Error al obtener información de la matriz." - -msgid "Get cache by name error." -msgstr "Error al obtener la caché por nombre." - -msgid "Get connected free FC wwn error." -msgstr "Error al obtener wwn FC libre conectado." - -msgid "Get engines error." -msgstr "Error al obtener los motores." - -msgid "Get host initiators info failed." -msgstr "Error al obtener la información de los iniciadores del host." - -msgid "Get hostgroup information error." -msgstr "Error al obtener la información de grupo de host." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Error al obtener información de puerto iSCSI, compruebe la IP de destino " -"configurada en el archivo config de huawei ." - -msgid "Get iSCSI port information error." -msgstr "Error al obtener información de puerto iSCSI." - -msgid "Get iSCSI target port error." -msgstr "Error al obtener el puerto de destino iSCSI." - -msgid "Get lun id by name error." -msgstr "Error al obtener el LUN por nombre." - -msgid "Get lun migration task error." -msgstr "Error al obtener la tarea de migración de lun." - -msgid "Get lungroup id by lun id error." -msgstr "Error al obtener el ID de grupo de LUN por ID de lun." - -msgid "Get lungroup information error." -msgstr "Error al obtener la información de grupo de LUN." - -msgid "Get migration task error." -msgstr "Error al obtener la tarea de migración." - -msgid "Get pair failed." -msgstr "Obtener par ha fallado." - -msgid "Get partition by name error." -msgstr "Error al obtener partición por nombre." - -msgid "Get partition by partition id error." -msgstr "Error al obtener partición por ID de partición." - -msgid "Get port group by view error." -msgstr "Error al obtener el grupo de puertos por vista." - -msgid "Get port group error." -msgstr "Error al obtener el grupo de puertos." - -msgid "Get port groups by port error." -msgstr "Error al obtener los grupos de puertos por puerto." - -msgid "Get ports by port group error." -msgstr "Error al obtener los puertos por grupo de puertos." - -msgid "Get remote device info failed." -msgstr "Obtener dispositivo remoto ha fallado." - -msgid "Get remote devices error." -msgstr "Error al obtener los dispositivos remotos." - -msgid "Get smartcache by cache id error." -msgstr "Error al obtener smartcache por ID de caché." - -msgid "Get snapshot error." -msgstr "Error al obtener la instantánea." - -msgid "Get snapshot id error." -msgstr "Error al obtener el ID de instantánea." - -msgid "Get target IP error." -msgstr "Error al obtener la IP de destino." - -msgid "Get target LUN of SplitMirror error." -msgstr "Error al obtener el LUN de destino de SplitMirror." - -msgid "Get views by port group error." -msgstr "Error al obtener vistas por grupo de puertos." - -msgid "Get volume by name error." -msgstr "Error al obtener el volumen por nombre." - -msgid "Get volume error." -msgstr "Error al obtener el volumen." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Los metadatos de Glance no se pueden actualizar, la clave %(key)s existe " -"para el ID de volumen %(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"No se ha podido encontrar los metadatos de vistazo para los metadatos/" -"instantánea %(id)s." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "El archivo de configuración de Gluster en %(config)s no existe" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Error de la api de Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Error de conexión de Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Error de oauth2 de Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "" -"Se ha obtenido información incorrecta sobre vía de acceso de DRBDmanage (%s)" - -msgid "HBSD error occurs." -msgstr "Se ha producido un error HBSD." - -msgid "HPELeftHand url not found" -msgstr "URL de HPELeftHand no encontrado" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"El tamaño de bloque de hash ha cambiado desde la última copia de seguridad. " -"Nuevo tamaño de bloque hash: %(new)s. Tamaño de bloque hash antiguo: " -"%(old)s. Haga una copia de seguridad completa." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "No se han creado los niveles %(tier_levels)s." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Sugerencia \"%s\" no soportada." - -msgid "Host" -msgstr "Host" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "No se ha podido encontrar el host %(host)s." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"El host %(host)s no coincide con el contenido del certificado x509: " -"CommonName %(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "El host %s no tiene ningún iniciador FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "No se ha encontrado el grupo de host con el nombre %s." - -#, python-format -msgid "Host group with ref %s not found" -msgstr "No se ha encontrado el grupo de host con ref %s" - -msgid "Host is NOT Frozen." -msgstr "El host NO está inmovilizado." - -msgid "Host is already Frozen." -msgstr "El host ya está inmovilizado." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "" -"No se ha encontrado el host. No se ha podido eliminar %(service)s en " -"%(host)s." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "" -"El estado de replicación (replication_status) del host debe ser %s para " -"poder realizar la migración tras error." - -#, python-format -msgid "Host type %s not supported." -msgstr "El tipo de host %s no se soporta." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "No se ha encontrado el host con los puertos %(ports)s." - -msgid "Hosts" -msgstr "Hosts" - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "No se puede utilizar Hipermetro y Replicación en el mismo volume_type." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"El grupo de E/S %(iogrp)d no es válido; los grupos de E/S disponibles son " -"%(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Si compression se define como True, rsize también debe definirse (distinto a " -"-1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "Si nofmtdisk está definido a True, rsize también se debe definir a -1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Valor no permitido '%(prot)s' especificado para " -"flashsystem_connection_protocol: Los valores válidos son %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Se ha especificado un valor no permitido en IOTYPE: 0, 1 o 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "Valor no válido especificado para smarttier: establezca 0, 1, 2 o 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Valor ilegal especificado para storwize_svc_vol_grainsize: establecido en " -"32, 64, 128 o 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Valor no válido especificado en thin: No se puede establecer thin y thick al " -"mismo tiempo." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "No se ha podido encontrar la imagen %(image_id)s. " - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "La imagen %(image_id)s no está activa." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "La imagen %(image_id)s es inaceptable: %(reason)s" - -msgid "Image location not present." -msgstr "Ubicación de imagen no presente." - -msgid "Image quota exceeded" -msgstr "Se ha excedido la cuota de la imágen" - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"El tamaño virtual de la imgen es %(image_size)d GB y no cabe en un volumen " -"de tamaño %(volume_size)dGB." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Se ha producido un error de ImageBusy al suprimir el volumen rbd. Puede " -"haberse producido debido a una conexión de un cliente que ha colgado y, si " -"es así, se puede resolver volviendo a intentar la supresión después de que " -"hayan transcurrido 30 segundos." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Ha fallado la importación de registro, no se puede encontrar el servicio de " -"copia de seguridad para realizar la importación. Solicite el servicio " -"%(service)s" - -msgid "Incorrect request body format" -msgstr "Formato de cuerpo de solicitud incorrecto" - -msgid "Incorrect request body format." -msgstr "Formato de cuerpo de solicitud incorrecto." - -msgid "Incremental backups exist for this backup." -msgstr "" -"Existen copias de seguridad incrementales para esta copia de seguridad." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Excepción de CLI Infortrend: %(err)s Parám: %(param)s (Código de retorno: " -"%(rc)s) (Salida: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Los volúmenes de entrada o instantáneas no son válidos." - -msgid "Input volumes or source volumes are invalid." -msgstr "Los volúmenes de entrada o los volúmenes de origen no son válidos." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "No se ha podido encontrar la instancia %(uuid)s." - -msgid "Insufficient free space available to extend volume." -msgstr "No hay suficiente espacio libre disponible para extender el volumen." - -msgid "Insufficient privileges" -msgstr "Privilegios insuficientes" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Dominio 3PAR no válido: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Valor ALUA no válido. El valor ALUA debe ser 1 o 0." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "" -"Arg de ceph no válidos proporcionados para operación rbd de copia de " -"seguridad" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "CgSnapshot no válido: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "ConsistencyGroup no válido: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"ConsistencyGroup no válido: No hay host para crear grupo de consistencia" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Se ha encontrado una versión de API de HPELeftHand no válida:%(found)s). Se " -"necesita la versión %(minimum)s o superior para tener soporte para gestionar/" -"dejar de gestionar." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Formato de dirección IP no válido: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Se ha detectado una especificación QoS no válida al obtener la política QoS " -"del volumen %s" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Destino de replicación no válido: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Especificación de compartición de Virtuozzo Storage no válida: %r. Debe ser: " -"[MDS1[,MDS2],...:/][:PASSWORD]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"Versión de XtremIO no válida %(cur)s, se requiere la versión %(min)s o " -"superior" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"Se han definido unas cuotas asignadas no válidas en las siguientes cuotas de " -"proyecto: %s" - -msgid "Invalid argument" -msgstr "Argumento no válido" - -msgid "Invalid argument - negative seek offset." -msgstr "Argumento no válido - desplazamiento de búsqueda negativo." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Argumento no válido - whence=%s no admitido" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Argumento no válido - whence=%s no admitido." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Método de conexión '%(mode)s' inválido para el volumen %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Claves de autenticación inválidas: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Copia de seguridad no válida: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"Se han encontrado detalles de usuario chap no válidos en el almacenamiento " -"CloudByte." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "Respuesta de inicialización de conexión no válida del volumen %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Respuesta de inicialización de conexión no válida del volumen %(name)s: " -"%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Tipo de contenido invalido %(content_type)s." - -msgid "Invalid credentials" -msgstr "Credenciales no válidas" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Directorio no válido: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Tipo de adaptador de disco no válido: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Respaldo de disco no válido: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Tipo de disco no válido: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Tipo de disco no válido: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Host inválido: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Se ha encontrado una versión de hpe3parclient no válida (%(found)s). Se " -"requiere la versión %(minimum)s o superior. Ejecute \"pip install --upgrade " -"python-3parclient\" para actualizar hpe3parclient." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Se ha encontrado una versión no válida de hpelefthandclient (%(found)s). Se " -"requiere la versión %(minimum)s o superior. Ejecute 'pip install --upgrade " -"python-lefthandclient' para actualizar hpelefthandclient." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Href de imagen %(image_href)s no válida." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"El identificador de imagen no es válido o no se ha podido acceder a la " -"imagen solicitada." - -msgid "Invalid imageRef provided." -msgstr "Se ha proporcionado una referencia de imagen no válida." - -msgid "Invalid input" -msgstr "Entrada no válida" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Entrada inválida recibida: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Filtro is_public no válido [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Se ha configurado un tipo de lun %s no válido." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Tamaño de metadatos inválido: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Metadatos inválidos: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Base de punto de montaje no válida: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de punto de montaje no válida: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "" -"Nuevo nombre de snapCPG no válido para la reescritura. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Número de puerto no válido %(config)s para el puerto RPC de Coho" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Se ha configurado un tipo de captación previa %s no válido. El tipo de " -"captación previa debe estar dentro de 0,1,2,3." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Especificaciones de qos no válidas: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "Solicitud no válida para adjuntar un volumen a un destino no válido" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Solicitud no válida para adjuntar un volumen con el modo no válido. " -"Adjuntando modo debe ser 'rw' o 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Caducidad de reserva no válida %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Respuesta no válida procedente del servidor RPC" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "ID secundarios no válidos %s." - -msgid "Invalid service catalog json." -msgstr "JSON de catálogo de servicios no válido." - -msgid "Invalid sheepdog cluster status." -msgstr "Estado de clúster sheepdog no válido." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Instantánea no válida: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Estado no válido: '%s' " - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "" -"Se ha solicitado una agrupación de almacenamiento no válida %s. Ha fallado " -"la reescritura." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Se ha especificado una agrupación de almacenamiento no válida %s." - -msgid "Invalid storage pool is configured." -msgstr "Se ha configurado una agrupación de almacenamiento no válida." - -msgid "Invalid transport type." -msgstr "Tipo de transporte no válido." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Valor de actualización no válido: '%s' " - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Valor no válido %s' para forzar." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Valor no válido %s' para forzar." - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "Valor no válido '%s' para is_public. Valores aceptados: True o False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Valor no válido '%s' para skip_validation." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Valor no válido para 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Valor no válido para 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Valor no válido para 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Valor no válido para 'scheduler_max_attempts', debe ser >= 1 " - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "" -"Valor no válido para la opción de configuración netapp_host_type de NetApp." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "" -"Valor no válido para la opción de configuración netapp_lun_ostype de NetApp." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Valor no válido para la edad, %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Valor no válido: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"Se ha proporcionado un tamaño de volumen no válido para crear la solicitud: " -"%s (el argumento de tamaño debe ser un entero (o una representación de " -"cadena de un entero) y mayor que cero)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Tipo de volumen inválido: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Volumen inválido: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Volumen no válido: No se puede añadir el volumen %(volume_id)s al grupo de " -"consistencia %(group_id)s porque el volumen está en un estado no válido: " -"%(status)s. Los estados válidos son: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Volumen no válido: No se puede añadir el volumen %(volume_id)s al grupo de " -"consistencia %(group_id)s porque el grupos de consistencia no soporta el " -"tipo de volumen %(volume_type)s ." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Volumen no válido: No se puede añadir el volumen fake-volume-uuid al grupo " -"de consistencia %(group_id)s porque no se ha encontrado el volumen." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Volumen no válido: No se puede eliminar el volumen fake-volume-uuid del " -"grupo de consistencia %(group_id)s porque no está en el grupo." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "El volume_type no válido ha pasado: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Se ha proporcionado un tipo volume_type no válido: %s (el tipo solicitado no " -"es compatible; debe hacer coincidir el volumen de origen o debe omitir el " -"argumento de tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"Se ha proporcionado un tipo volume_type no válido: %s (el tipo solicitado no " -"es compatible; se recomienda omitir el argumento de tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"volume_type proporcionado no válido: %s (este grupo de consistencia debe " -"soportar el tipo solicitado)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Formato de wwpns no válido %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "Ha fallado la invocación al servicio web." - -msgid "Issue encountered waiting for job." -msgstr "Se ha detectado un problema al esperar el trabajo." - -msgid "Issue encountered waiting for synchronization." -msgstr "Se ha detectado un problema al esperar la sincronización." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Se está emitiendo un mensaje de migración tras error fallida porque la " -"replicación no está configurada correctamente." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"No se ha encontrado el ID de trabajo en la respuesta de creación de volumen " -"[%s] de CloudByte." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"No se ha encontrado el ID de trabajo en la respuesta de supresión de volumen " -"[%s] de CloudByte." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Los nombres de clave sólo pueden contener caracteres alfanuméricos, " -"subrayados, puntos, dos puntos y guiones." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"Se debe utilizar la versión 3 o superior de Keystone para tener soporte para " -"cuotas anidadas." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "No existe ningún LU para el volumen: %s" - -msgid "LUN doesn't exist." -msgstr "El LUN no existe." - -msgid "LUN export failed!" -msgstr "Error al exportar LUN." - -msgid "LUN map overflow on every channel." -msgstr "Desbordamiento de correlación de LUN en todos los canales." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "No se ha encontrado un LUN con la referencia dada %s." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "El número de LUN está fuera de limites en el ID de canal: %(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Últimas %s entradas de syslog de Cinder:-" - -msgid "LeftHand cluster not found" -msgstr "Clúster LeftHand no encontrado" - -msgid "License is unavailable." -msgstr "La licencia no está disponible." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Línea %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "La vía de acceso al enlace existe y no es un symlink" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "No se soporta el clon enlazado del volumen de origen en estado: %s." - -msgid "Lock acquisition failed." -msgstr "Ha fallado la adquisición del bloqueo." - -msgid "Logout session error." -msgstr "Error al cerrar la sesión." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Servicio de búsqueda no configurado. La opción de configuración para " -"fc_san_lookup_service necesita especificar una implementación concreta del " -"servicio de búsqueda." - -msgid "Lun migration error." -msgstr "Error de migración de LUN." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"El MD5 del objeto: %(object_name)s antes de: %(md5)s y después de: %(etag)s " -"no es el mismo." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Cadena de salida de fcns con formato incorrecto: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Serie de servidor de nombres mal formada: %s" - -msgid "Malformed request body" -msgstr "Cuerpo de solicitud formado incorrectamente" - -msgid "Malformed request body." -msgstr "Cuerpo de solicitud mal formado." - -msgid "Malformed request url" -msgstr "URL de solicitud formado incorrectamente" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Respuesta con formato incorrecto para el mandato %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Atributo scheduler_hints formado incorrectamente" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Cadena de base de datos show fcns con formato incorrecto: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Configuración de zona mal formada: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Estado de zona mal formado: (switch=%(switch)s zone_config=%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "La gestión para obtener tamaño necesita 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "No se ha implementado la gestión de la instantánea existente." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"La gestión del volumen existente ha fallado porque la referencia de programa " -"de fondo no es válida %(existing_ref)s: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"La gestión del volumen existente ha fallado debido a una discrepancia de " -"tipo de volumen: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "La gestión de volumen existente no se ha implementado." - -msgid "Manage existing volume requires 'source-id'." -msgstr "La gestión del volumen existente necesita 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Gestionar volumen no se admite si FAST está habilitado. Política de FAST: " -"%(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "No se permite gestionar instantáneas a volúmenes que han dado error." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"La información de correlación es None debido a que la versión de la matriz " -"no admite hypermetro." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"La preparación de la correlación %(id)s no se ha podido completar en el " -"tiempo de espera asignado de %(to)d segundos. Terminando." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "" -"La vista de máscara %(maskingViewName)s no se ha suprimido correctamente" - -msgid "Maximum age is count of days since epoch." -msgstr "La edad máxima es el recuento de días desde epoch." - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "" -"Se ha superado el número máximo de copias de seguridad permitidas " -"(%(allowed)d)" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "Se ha superado el número máximo de volúmenes permitidos (%(allowed)d)" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Se ha superado el número máximo de volúmenes permitidos (%(allowed)d) para " -"la cuota '%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "Puede especificar solo uno de %s" - -#, python-format -msgid "Message %(message_id)s could not be found." -msgstr "No se ha podido encontrar el mensaje %(message_id)s." - -msgid "Metadata backup already exists for this volume" -msgstr "La copia de seguridad de metadatos ya existe para este volumen" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "El objeto de copia de seguridad de metadatos '%s' ya existe" - -#, python-format -msgid "Metadata property key %s greater than 255 characters." -msgstr "Clave de propiedad de metadatos %s mayor que 255 caracteres." - -#, python-format -msgid "Metadata property key %s value greater than 255 characters." -msgstr "Valor de clave de propiedad de metadatos %s mayor que 255 caracteres." - -msgid "Metadata property key blank." -msgstr "Clave de propiedad de metadatos en blanco" - -msgid "Metadata restore failed due to incompatible version" -msgstr "" -"La restauración de metadatos ha fallado debido a la versión incompatible" - -msgid "Metadata restore failed due to incompatible version." -msgstr "" -"La restauración de metadatos ha fallado debido a una versión incompatible." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Falta el módulo de Python 'purestorage', asegúrese de que la biblioteca está " -"instalada y disponible." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "" -"Falta parámetro de configuración de SAN de canal de fibra - fc_fabric_names" - -msgid "Missing request body" -msgstr "Falta el cuerpo de la solicitud" - -msgid "Missing request body." -msgstr "No se ha hallado el cuerpo de la solicitud." - -#, python-format -msgid "Missing required element '%(element)s' in request body." -msgstr "" -"Falta el elemento obligatorio '%(element)s' en el cuerpo de la solicitud." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "Falta el elemento requerido '%s' en el cuerpo de la solicitud" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "Falta el elemento oblitatorio '%s' en el cuerpo de la solicitud." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "" -"Falta el elemento obligatorio 'consistencygroup' en el cuerpo de la " -"solicitud." - -msgid "Missing required element quota_class_set in request body." -msgstr "Falta el elemento necesario quota_class_set en cuerpo de solicitud." - -msgid "Missing required element snapshot in request body." -msgstr "" -"Falta la instantánea de elemento obligatoria en el cuerpo de solicitud." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Se han encontrado varios SerialNumbers, cuando sólo se esperaba uno para " -"esta operación. Cambie el archivo de configuración EMC." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Se han encontrado varias copias del volumen %s." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Se han encontrado varias coincidencias para '%s', utilice un ID para ser más " -"específico." - -msgid "Multiple profiles found." -msgstr "Se han encontrado varios perfiles." - -msgid "Must implement a fallback schedule" -msgstr "Debe de implementar un horario de reserva" - -msgid "Must implement find_retype_host" -msgstr "Debe implementar find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "Debe implementar host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "Es necesario implementar schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "Debe implementar schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "Es necesario implementar schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "Se debe pasar wwpn o host a lsfabric." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Debe ejecutar este comando como administrador de nube utilizando un archivo " -"policy.json de Keystone, que permite al administrador de nube listar y " -"obtener cualquier proyecto." - -msgid "Must specify 'connector'" -msgstr "Debe especificar 'connector'" - -msgid "Must specify 'connector'." -msgstr "Debe especificar 'connector'." - -msgid "Must specify 'host'." -msgstr "Debe especificar 'host'." - -msgid "Must specify 'new_volume'" -msgstr "Debe especificar 'new_volume'" - -msgid "Must specify 'status'" -msgstr "Debe especificar 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Debe especificar 'status', 'attach_status' o 'migration_status' para la " -"actualización." - -msgid "Must specify a valid attach status" -msgstr "Debe especificar un estado de conexión válido" - -msgid "Must specify a valid migration status" -msgstr "Debe especificar un estado de migración válido" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Debe especificar un valor válido de persona %(valid)s, el valor " -"'%(persona)s' no es válido." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Debe especificar un tipo de suministro válido %(valid)s, el valor '%(prov)s' " -"no es válido." - -msgid "Must specify a valid status" -msgstr "Debe especificar un estado válido" - -msgid "Must specify an ExtensionManager class" -msgstr "Debe especificar una clase ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "Debe especificar bootable en la solicitud." - -msgid "Must specify protection domain name or protection domain id." -msgstr "" -"Se debe especificar el nombre del dominio de protección o el ID del dominio " -"de protección." - -msgid "Must specify readonly in request." -msgstr "Debe especificar sólo lectura en solicitud." - -msgid "Must specify snapshot source-name or source-id." -msgstr "" -"Se debe especificar el nombre de origen o el ID de origen de la instantánea." - -msgid "Must specify source-name or source-id." -msgstr "Debe especificar source-name o source-id." - -msgid "Must specify storage pool name or id." -msgstr "" -"Se debe especificar el nombre o el ID de la agrupación de almacenamiento." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "" -"Se deben especificar las agrupaciones de almacenamiento. Opción: " -"sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Debe proporcionar un valor positivo distinto de cero para la edad" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"La configuración de NAS '%(name)s=%(value)s' no es válida. Debe ser 'auto', " -"'true' o 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "El archivo de configuración de NFS en %(config)s no existe" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "Archivo NFS %s no descubierto." - -msgid "NFS file could not be discovered." -msgstr "El archivo NFS no se ha podido descubrir." - -msgid "NaElement name cannot be null." -msgstr "El nombre de NaElement no puede ser nulo." - -msgid "Name" -msgstr "Nombre" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Nombre, descripción, add_volumes y remove_volumes no pueden estar vacíos en " -"el cuerpo de la solicitud." - -msgid "Need non-zero volume size" -msgstr "Se necesita un tamaño de volumen distinto de cero" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Ni MSG_DENIED ni MSG_ACCEPTED: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Excepción de controlador NetApp Cinder." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"El nuevo tamaño para ampliar debe ser mayor que el tamaño actual. (actual: " -"%(size)s, ampliado: %(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"El nuevo tamaño debe ser mayor que el tamaño real del almacenamiento de " -"fondo. tamaño real: %(oldsize)s, tamaño nuevo: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "Se debe especificar el nuevo tamaño de volumen como un entero." - -msgid "New volume type must be specified." -msgstr "Se debe especificar tipo de volumen nuevo." - -msgid "New volume type not specified in request_spec." -msgstr "No se ha especificado el tipo de volumen nuevo en request_spec." - -msgid "Nimble Cinder Driver exception" -msgstr "Excepción de controlador Nimble Cinder" - -msgid "No FC initiator can be added to host." -msgstr "No se puede añadir ningún iniciador FC al host." - -msgid "No FC port connected to fabric." -msgstr "No hay ningún puerto FC conectado al tejido." - -msgid "No FCP targets found" -msgstr "No se han encontrado destinos FCP" - -msgid "No Port Group elements found in config file." -msgstr "" -"No se han encontrado elementos de grupo de puertos en el archivo de " -"configuración." - -msgid "No VF ID is defined in the configuration file." -msgstr "No se ha definido ningún ID de VF en el archivo de configuración." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "No hay portales iSCSI activos con las IP iSCSI proporcionadas" - -#, python-format -msgid "No available service named %s" -msgstr "No se ha nombrado el servicio disponible %s" - -#, python-format -msgid "No backup with id %s" -msgstr "No hay ninguna copia de seguridad con el ID %s" - -msgid "No backups available to do an incremental backup." -msgstr "" -"No hay copias de seguridad disponibles para hacer una copia de seguridad " -"incremental." - -msgid "No big enough free disk" -msgstr "No hay suficiente espacio libre en el disco" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "No hay ningún cgsnapshot con el ID %s" - -msgid "No cinder entries in syslog!" -msgstr "No hay entradas de Cinder en syslog" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "" -"No se ha encontrado ningún LUN clonado denominado %s en el gestor de archivos" - -msgid "No config node found." -msgstr "No se ha encontrado ningún nodo de configuración." - -#, python-format -msgid "No consistency group with id %s" -msgstr "No existe ningún grupo de consistencia con el id %s" - -#, python-format -msgid "No element by given name %s." -msgstr "No hay ningún elemento con el nombre indicado %s." - -msgid "No errors in logfiles!" -msgstr "¡No hay errores en los ficheros de log!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "No se ha encontrado el archivo con %s como archivo de respaldo." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"No queda libre ningún ID de LUN. El número máximo de volúmenes que se puede " -"conectar al host (%s) se ha superado." - -msgid "No free disk" -msgstr "No hay disco libre" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "" -"No se ha encontrado ningún portal iscsi bueno en la lista proporcionada para " -"%s." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "No se ha encontrado ningún portal iscsi bueno para %s." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "No hay host para crear grupo de consistencia%s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "No hay puertos habilitados para iSCSI en la matriz de destino." - -msgid "No image_name was specified in request." -msgstr "" -"No se ha especificado ningún nombre de imagen (image_name) en la solicitud." - -msgid "No initiator connected to fabric." -msgstr "No hay ningún iniciador conectado al tejido." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "No se ha encontrado ningún grupo de iniciadores para el iniciador %s" - -msgid "No initiators found, cannot proceed" -msgstr "No se han encontrado iniciadores, no se puede continuar" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "No se ha encontrado ninguna interfaz en el clúster para la IP %s" - -msgid "No ip address found." -msgstr "No se ha encontrado la dirección IP." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "No se ha encontrado ningún grupo de autenticación iscsi en CloudByte." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "No se ha encontrado ningún iniciador iscsi en CloudByte." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "" -"No se ha encontrado ningún servicio iscsi para el volumen de CloudByte [%s]." - -msgid "No iscsi services found in CloudByte storage." -msgstr "" -"No se ha encontrado ningún servicio iscsi en el almacenamiento de CloudByte." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"No se ha especificado archivo de claves y no se puede cargar la clave desde " -"%(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "No se han encontrado unidades compartidas Gluster montadas" - -msgid "No mounted NFS shares found" -msgstr "No se han encontrado unidades compartidas NFS montadas" - -msgid "No mounted SMBFS shares found." -msgstr "No se han encontrado unidades compartidas SMBFS montadas" - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "No se han encontrado unidades compartidas de Virtuozzo Storage" - -msgid "No mounted shares found" -msgstr "No se han encontrado unidades compartidas montadas" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"No se ha encontrado ningún nodo en el grupo de E/S %(gid)s del volumen " -"%(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"No hay agrupaciones disponibles para el suministro de volúmenes. Asegúrese " -"de que la opción de configuración netapp_pool_name_search_pattern se haya " -"establecido correctamente." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"No se ha recibido ninguna respuesta desde la llamada de API de usuario de " -"autenticación iSCSI de la lista de almacenamiento CloudByte ." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"No se ha recibido ninguna respuesta de la llamada a la API de tsm de la " -"lista de almacenamiento de CloudByte." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"No se ha recibido ninguna respuesta de la llamada a la API del sistema de " -"archivos de la lista de CloudByte." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "" -"No se ha configurado ningún servicio VIP y no hay ninguna " -"nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "No se ha encontrado archivo con %s como archivo de respaldo." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "" -"No se ha encontrado ninguna imagen de instantánea en el grupo de " -"instantáneas %s." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "No se han podido encontrar instantáneas en el volumen %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"No se han proporcionado instantáneas de origen para crear el grupo de " -"consistencia %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "" -"No se ha encontrado ninguna vía de acceso de almacenamiento para la vía de " -"acceso de exportación %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "No hay especificaciones de QoS %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "No se ha encontrado ningún IP de descubrimiento adecuado" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "No hay soporte para restaurar la versión de copia de seguridad %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "" -"No se ha encontrado ningún ID de destino para el volumen %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"No hay disponibles ID de LUN sin utilizar en el host; la conexión múltiple " -"está habilitada, lo cual requiere que todos los ID de LUN sean exclusivos en " -"todo el grupo de hosts." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "No se ha encontrado ningún host válido. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "No hay hosts válidos para el volumen %(id)s con el tipo %(type)s" - -msgid "No valid ports." -msgstr "No hay puertos válidos." - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "No hay ningún vdisk con el UID especificado en ref %s." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "No se ha encontrado ninguna vista para el LUN: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"No hay ningún volumen en el clúster con vserver %(vserver)s y vía de acceso " -"de cruce %(junction)s " - -msgid "No volume service(s) started successfully, terminating." -msgstr "" -"No se ha iniciado correctamente ningún servicio de volumen, terminando." - -msgid "No volume was found at CloudByte storage." -msgstr "No se ha encontrado ningún volumen en el almacenamiento de CloudByte." - -msgid "No volume_type should be provided when creating test replica." -msgstr "" -"No debe proporcionarse ningún volume_type cuando se crea la réplica de " -"prueba." - -msgid "No volumes found in CloudByte storage." -msgstr "No se ha encontrado ningún volumen en el almacenamiento de CloudByte." - -msgid "No weighed hosts available" -msgstr "No hay hosts ponderados disponibles" - -#, python-format -msgid "Not a valid string: %s" -msgstr "Cadena no válida: %s" - -msgid "Not a valid value for NaElement." -msgstr "Valor no válido para NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "" -"No se puede encontrar un almacén de datos adecuado para el volumen: %s." - -msgid "Not an rbd snapshot" -msgstr "No es una instantánea rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "No está autorizado para la imagen %(image_id)s." - -msgid "Not authorized." -msgstr "No Autorizado" - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "No hay espacio suficiente en el el programa de fondo (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"No hay espacio de almacenamiento suficiente en la unidad compartida ZFS para " -"realizar esta operación." - -msgid "Not stored in rbd" -msgstr "No está almacenado en rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "Nova devolvió el estado \"error\" mientras creaba la instantánea." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "" -"Se ha recibido una respuesta nula desde el sistema de archivos de la lista " -"de CloudByte." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "" -"Se ha recibido una respuesta nula desde los grupos de autenticación iscsi de " -"la lista de CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "" -"Se ha recibido una respuesta nula desde los iniciadores iscsi de la lista de " -"CloudByte." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"Se ha recibido una respuesta nula desde el servicio iscsi del volumen de la " -"lista de CloudByte." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Respuesta nula recibida al crear el volumen [%s] en el almacenamiento de " -"CloudByte." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Respuesta nula recibida al suprimir el volumen [%s] en el almacenamiento de " -"CloudByte." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Se ha recibido una respuesta nula al realizar la consulta para el trabajo " -"basado en [%(operation)s] [%(job)s] en el almacenamiento CloudByte." - -msgid "Object Count" -msgstr "Recuento de objetos" - -msgid "Object Version" -msgstr "Versión del objeto" - -msgid "Object is not a NetApp LUN." -msgstr "El objeto no es un LUN de NetApp." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"En una operación de ampliación, error al añadir volumen al volumen " -"compuesto: %(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"No se ha encontrado una de las entradas necesarias procedentes del host, " -"puerto o esquema." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Solo se pueden realizar %(value)s solicitud(es) de %(verb)s para %(uri)s " -"cada %(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "Sólo se puede establecer un límite en una especificación QoS." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Sólo los usuarios con ámbito de señal para padres inmediatos o proyectos " -"root pueden ver las cuotas hijo." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "" -"Solo los volúmenes gestionados por OpenStack pueden dejarse de gestionar." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "" -"Se ha encontrado un error en la operación con el estado=%(status)s. Volcado " -"completo: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Operación no admitida: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "La opción gpfs_images_dir no se ha establecido correctamente." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "La opción gpfs_images_share_mode no se ha establecido correctamente." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "La opción gpfs_mount_point_base no se ha establecido correctamente." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "%(res)s %(prop)s de origen debe ser uno de los valores '%(vals)s'" - -msgid "Param [identifier] is invalid." -msgstr "El parámetro [identifier] no es válido." - -msgid "Param [lun_name] is invalid." -msgstr "El parámetro [lun_name] no es válido." - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"Nombre de la partición es Ninguno, establezca smartpartition:partitionname " -"en clave." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"La contraseña o clave privada SSH es necesaria para la autenticación: " -"establezca la opción san_password o san_private_key." - -msgid "Path to REST server's certificate must be specified." -msgstr "Se debe especificar la vía de acceso al certificado del servidor REST." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Cree la agrupación %(pool_list)s con antelación." - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "" -"Cree el nivel %(tier_levels)s en la agrupación %(pool)s con antelación." - -msgid "Please specify a name for QoS specs." -msgstr "Especifique un nombre para especificaciones de QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "La política no permite realizar %(action)s. " - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "La agrupación %(poolNameInStr)s no se ha encontrado." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "La agrupación %s no existe en la aplicación Nexenta Store" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "No se ha encontrado la agrupación del volumen['host'] %(host)s." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "La agrupación del volumen ['host'] ha fallado con: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "La agrupación no está disponible en el campo del host del volumen." - -msgid "Pool is not available in the volume host fields." -msgstr "La agrupación no está disponibles en los campos de host del volumen." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "" -"No se ha encontrado la agrupación con el nombre %(pool)s en el dominio " -"%(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"No se ha encontrado la agrupación con el nombre %(pool_name)s en el dominio " -"%(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"La agrupación %(poolName)s no está asociada con el nivel de almacenamiento " -"de la política fast %(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName debe estar en el archivo %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "La agrupaciones %s no existen" - -msgid "Pools name is not set." -msgstr "No se ha establecido el nombre de agrupaciones." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Estado de copia primaria: %(status)s y sincronizado: %(sync)s." - -#, python-format -msgid "Programming error in Cinder: %(reason)s" -msgstr "Error de programación en Cinder: %(reason)s" - -msgid "Project ID" -msgstr "ID del proyecto" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"No se han configurado correctamente las cuotas del proyecto para las cuotas " -"anidadas: %(reason)s." - -msgid "Protection Group not ready." -msgstr "El grupo de protección no está preparado." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"El protocolo %(storage_protocol)s no es admitido para la familia de " -"almacenamiento %(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "Al registro de copia de seguridad proporcionado le falta un ID." - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"El estado de instantánea proporcionado %(provided)s no está permitido para " -"instantánea con estado %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"No se ha encontrado la información del proveedor sobre el almacenamiento de " -"CloudByte para el volumen de OpenStack [%s]." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Error de controlador Pure Storage Cinder: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Las especificaciones de QoS %(specs_id)s ya existen." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Especificaciones de QoS %(specs_id)s está asociado con las entidades." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "La configuración de QoS es incorrecta. %s debe ser > 0." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"La política QoS debe especificar para IOTYPE y otras qos_specs. Política " -"QoS: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"La política QoS debe especificar para IOTYPE el valor: 0, 1, o 2. Política " -"QoS: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"Conflicto entre upper_limit y lower_limit en la política QoS. Política QoS: " -"%(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"La especificación QoS %(specs_id)s no tiene especificación con clave " -"%(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Las especificaciones QoS no se admiten en esta familia de almacenamiento y " -"versión ONTAP." - -msgid "Qos specs still in use." -msgstr "Especificaciones de QoS aún en uso." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"La consulta por parámetro de servicio está en desuso. Use el parámetro " -"binario en su lugar." - -msgid "Query resource pool error." -msgstr "Error al consultar la agrupación de recursos." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "" -"El límite de cuota %s debe ser igual o mayor que los recursos existentes." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "No se ha podido encontrar la clase de cuota %(class_name)s." - -msgid "Quota could not be found" -msgstr "No se ha podido encontrar la cuota" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Cuota superada para recursos: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Cuota excedida: código=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "No se ha podido encontrar la cuota para el proyecto %(project_id)s." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Límite de cuota no válido para el proyecto '%(proj)s' para el recurso " -"'%(res)s': el límite de %(limit)d es menor que el valor en uso, de %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "No se ha podido encontrar la reserva de cuota %(uuid)s." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "" -"No se ha podido encontrar el uso de cuota para el proyecto %(project_id)s." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "Op. dif. RBD ha fallado - (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "Se debe especificar la IP de servidor REST." - -msgid "REST server password must by specified." -msgstr "Se debe especificar la contraseña del servidor REST." - -msgid "REST server username must by specified." -msgstr "Se debe especificar el nombre de usuario del servidor REST." - -msgid "RPC Version" -msgstr "Versión de RPC" - -msgid "RPC server response is incomplete" -msgstr "La respuesta del servidor RPC es incompleta" - -msgid "Raid did not have MCS Channel." -msgstr "Raid no tiene el canal MCS." - -#, python-format -msgid "Received error string: %s" -msgstr "Serie de error recibida: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "La referencia debe ser para una instantánea no gestionada." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "La referencia debe ser para un volumen virtual no gestionado." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "La referencia debe ser el nombre de una instantánea no gestionada." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "" -"La referencia debe ser el nombre de volumen de un volumen virtual no " -"gestionado." - -msgid "Reference must contain either source-name or source-id element." -msgstr "La referencia debe contener el elemento source-name o source-id." - -msgid "Reference must contain source-id or source-name element." -msgstr "La referencia debe contener el elemento id-source o source-name." - -msgid "Reference must contain source-id or source-name key." -msgstr "La referencia debe contener la clave source-id o source-name." - -msgid "Reference must contain source-id or source-name." -msgstr "La referencia debe contener source-id o source-name." - -msgid "Reference must contain source-id." -msgstr "La referencia debe contener el source-id." - -msgid "Reference must contain source-name element." -msgstr "La referencia debe contener el elemento source-name." - -msgid "Reference must contain source-name or source-id." -msgstr "La referencia debe contener source-name o source-id." - -msgid "Reference must contain source-name." -msgstr "La referencia debe contener el elemento source-name." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "" -"La referencia al volumen a gestionar debe contener el elemento source-name." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "" -"La referencia al volumen: %s a gestionar debe contener el elemento source-" -"name." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Rechazando la migración del ID de volumen: %(id)s. Compruebe la " -"configuración, ya que el origen y el destino son el mismo grupo de " -"volúmenes: %(name)s." - -msgid "Remote pool cannot be found." -msgstr "No se puede encontrar la agrupación remota." - -msgid "Remove CHAP error." -msgstr "Error al eliminar CHAP." - -msgid "Remove fc from host error." -msgstr "Error al eliminar fc del host." - -msgid "Remove host from array error." -msgstr "Error al eliminar el host de la matriz." - -msgid "Remove host from hostgroup error." -msgstr "Error al eliminar el host del grupo de host." - -msgid "Remove iscsi from host error." -msgstr "Error al eliminar iscsi del host." - -msgid "Remove lun from QoS error." -msgstr "Error al eliminar LUN de QoS ." - -msgid "Remove lun from cache error." -msgstr "Error al eliminar lun de la caché." - -msgid "Remove lun from partition error." -msgstr "Error al eliminar lun de la partición." - -msgid "Remove port from port group error." -msgstr "Error al eliminar el puerto del grupo de puertos." - -msgid "Remove volume export failed." -msgstr "Error al eliminar la exportación del volumen." - -msgid "Rename lun on array error." -msgstr "Error al renombrar lun en la matriz." - -msgid "Rename snapshot on array error." -msgstr "Error al renombrar la instantánea en la matriz." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "Ha fallado la replicación %(name)s a %(ssn)s." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "" -"La capacidad del servicio de réplica no se ha encontrado en " -"%(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "El servicio de réplica no se ha encontrado en %(storageSystemName)s." - -msgid "Replication not allowed yet." -msgstr "Aún no se permite la replicación." - -msgid "Request body and URI mismatch" -msgstr "Discrepancia de URI y cuerpo de solicitud" - -msgid "Request body contains too many items" -msgstr "El cuerpo de solicitud contiene demasiados elementos" - -msgid "Request body contains too many items." -msgstr "El cuerpo de solicitud contiene demasiados elementos." - -msgid "Request body empty" -msgstr "Cuerpo de la solicitud vacío" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"La solicitud al clúster de Datera ha devuelto un estado incorrecto: " -"%(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"La copia de seguridad que se ha solicitado supera la cuota de gigabytes " -"permitida para copias de seguridad. Se ha solicitado %(requested)sG, la " -"cuota es %(quota)sG y se ha consumido %(consumed)sG." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"El volumen o la instantánea que se ha solicitado supera la cuota %(name)s " -"permitida. Se ha solicitado %(requested)sG, la cuota es %(quota)sG y se ha " -"consumido %(consumed)sG." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"El tamaño del volumen solicitado %(size)d es mayor que el límite máximo " -"permitido %(limit)d." - -msgid "Required configuration not found" -msgstr "Configuración necesaria no encontrada" - -#, python-format -msgid "Required flag %s is not set" -msgstr "El distintivo necesario %s no se ha establecido" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"El restablecimiento del estado de la copia de seguridad ha terminado " -"anormalmente, el servicio de copia de seguridad configurado actualmente " -"[%(configured_service)s] no es el servicio de copia de seguridad que se usó " -"para crear esta copia de seguridad [%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Error al cambiar el tamaño de clononación %s." - -msgid "Resizing image file failed." -msgstr "Cambiar tamaño de archivo de imagen ha fallado." - -msgid "Resource could not be found." -msgstr "No se ha podido encontrar el recurso." - -msgid "Resource not ready." -msgstr "Recurso no preparado." - -#, python-format -msgid "Response error - %s." -msgstr "Error de respuesta - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Error de respuesta - El sistema de almacenamiento está fuera de línea." - -#, python-format -msgid "Response error code - %s." -msgstr "Código de error de respuesta - %s." - -msgid "RestURL is not configured." -msgstr "RestURL no está configurado." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"La restauración de la copia de seguridad ha terminado anormalmente, se " -"esperaba el estado de volumen %(expected_status)s pero se ha obtenido " -"%(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"La restauración de la copia de seguridad ha terminado anormalmente, el " -"servicio de copia de seguridad configurado actualmente " -"[%(configured_service)s] no es el servicio de copia de seguridad que se usó " -"para crear esta copia de seguridad [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"La restauración de la copia de seguridad ha terminado anormalmente: se " -"esperaba el estado de copia de seguridad %(expected_status)s pero se ha " -"obtenido %(actual_status)s." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Se ha recuperado una cantidad distinta de volúmenes de SolidFire para las " -"instancias Cinder proporcionadas. Recuperados: %(ret)s Deseados: %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Se ha recuperado una cantidad distinta de volúmenes de SolidFire para los " -"volúmenes Cinder proporcionados. Recuperados: %(ret)s Deseados: %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Se ha superado el recuento de reintentos para el mandato: %s" - -msgid "Retryable Dell Exception encountered" -msgstr "Se ha detectado una excepción reintentable de Dell" - -msgid "Retryable Pure Storage Exception encountered" -msgstr "Se ha detectado una excepción reintentable de Pure Storage" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Se ha detectado una excepción reintentable de SolidFire" - -msgid "Retype requires migration but is not allowed." -msgstr "La reescritura requiere migración, pero no está permitido." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Retrotrayendo %(volumeName)s mediante su supresión." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"No se permite ejecutar Cinder con una versión de VMware vCenter inferior a " -"la versión %s." - -msgid "SAN product is not configured." -msgstr "Producto SAN no está configurado." - -msgid "SAN protocol is not configured." -msgstr "Protocolo SAN no está configurado." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"El archivo de configuración de SMBFS 'smbfs_oversub_ratio' no es válido. " -"Debe ser > 0: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"El archivo de configuración de SMBFS 'smbfs_used_ratio' no es válido. Debe " -"ser > 0 y <= 1.0: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "El archivo de configuración SMBFS en %(config)s no existe." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "" -"El archivo de configuración SMBFS no se ha configurado (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Se ha encontrado un error en el mandato SSH tras '%(total_attempts)r' " -"intentos: '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Se ha detectado inyección de mandato SSH: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "La conexión SSH ha fallado para %(fabric)s con el error: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "El certificado SSL ha caducado el %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Error de SSL: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "" -"No se ha podido encontrar el filtro de host de planificador %(filter_name)s." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "El Scheduler Host Weigher %(weigher_name)s no se ha podido encontrar." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Estado de copia secundaria: %(status)s y sincronizado: %(sync)s, el progreso " -"de la sincronización es: %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"El ID secundario no puede ser el mismo que la matriz primaria, backend_id = " -"%(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber debe estar en el archivo %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Se ha eliminado el servicio %(service)s en el host %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "" -"No se ha podido encontrar el servicio %(service_id)s en el host %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "No se ha podido encontrar el servicio %(service_id)s." - -msgid "Service is too old to fulfil this request." -msgstr "El servicio es demasiado antiguo para cumplir esta solicitud." - -msgid "Service is unavailable at this time." -msgstr "El servicio no esta disponible en este momento" - -msgid "Session might have expired." -msgstr "La sesión pudo haber expirado." - -msgid "Set pair secondary access error." -msgstr "Error al definir el acceso secundario del par." - -msgid "Sets thin provisioning." -msgstr "Establece suministro ligero." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"No se admite el establecimiento del grupo de política de calidad de servicio " -"de LUN en esta familia de almacenamiento y versión de ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"No se admite el establecimiento del grupo de política de calidad de servicio " -"del archivo en esta familia de almacenamiento y versión de ontap." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"El servicio de volumen Cinder no puede grabar en la unidad compartida en " -"%(dir)s. Las operaciones de instantánea no se admitirán." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Error de E/S Sheepdog, el mandato era: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Las operaciones de mostrar sólo se pueden realizar en proyectos de la misma " -"jerarquía del proyecto en el que los usuarios tienen alcance." - -msgid "Size" -msgstr "Tamaño" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "" -"Tamaño para volumen: %s no se ha encontrado, no puede asegurar la supresión." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"El tamaño es %(image_size)dGB y no se ajusta en un volumen de tamaño " -"%(volume_size)dGB." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"El tamaño de la imagen especificada %(image_size)sGB es mayor que el tamaño " -"de volumen %(volume_size)sGB." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"Se ha solicitado suprimir la instantánea %(id)s mientras se esperaba a que " -"estuviera disponible. Quizás se ha realizado una solicitud simultánea." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"La instantánea %(id)s se ha encontrado con el estado%(state)s en lugar de " -"'borrando' durante la supresión en cascada." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"La instantánea %(snapshot_id)s no tiene metadatos con la clave " -"%(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "La instantánea '%s' no existe en la matriz." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"No se puede crear la instantánea porque el volumen %(vol_id)s no está " -"disponible, el estado actual del volumen es: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "No se puede crear una instantánea mientras se está migrando el volumen" - -msgid "Snapshot of secondary replica is not allowed." -msgstr "La instantánea de la réplica secundaria no está permitida." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "La instantánea del volumen no se soporta en estado: %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "¿Recurso de instantánea \"%s\" no desplegado en ningún sitio?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "Estado de instantánea %(cur)s no permitido para update_snapshot_status" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "El estado de la instantánea debe ser \"disponible\" para clonar." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"La instantánea de la que se va a hacer una copia de seguridad debe estar " -"disponible, pero el estado actual es \"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "No se ha encontrado la instantánea con el ID %s." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"La instantánea='%(snap)s' no existe en la imagen de base='%(base)s' - " -"terminando anormalmente copia de seguridad incremental" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "Este formato de volumen no admite instantáneas: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Error de socket: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Excepción de controlador SolidFire Cinder" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "" -"El tamaño de la matriz de dirección de ordenación excede el tamaño de matriz " -"de la clave de ordenación." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "" -"El grupo de consistencia está vacío. No se creará ningún grupo de " -"consistencia." - -msgid "Source host details not found." -msgstr "Detalles de host de origen no encontrados." - -msgid "Source volume device ID is required." -msgstr "El ID de dispositivo de volumen de origen es obligatorio." - -msgid "Source volume not mid-migration." -msgstr "El volumen de origen no mid-migration." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo ha devuelto una byarray que no es válida" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"El host especificado para correlacionarse con el volumen %(vol)s está en un " -"grupo de hosts no admitido con %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "El volumen lógico especificado no existe." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "" -"No se ha encontrado el grupo de instantáneas especificado con el ID %s." - -msgid "Specify a password or private_key" -msgstr "Especifique una contraseña o private_key" - -msgid "Specify san_password or san_private_key" -msgstr "Especifique san_password o san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Especifique el nombre del tipo de volumen, la descripción, is_public o una " -"combinación de los mismos." - -msgid "Split pair error." -msgstr "Error al dividir el par." - -msgid "Split replication failed." -msgstr "Dividir replicación ha fallado." - -msgid "Start LUNcopy error." -msgstr "Error al iniciar LUNcopy." - -msgid "State" -msgstr "Estado" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "El estado del nodo es incorrecto. El estado actual es %s." - -msgid "Status" -msgstr "Estado" - -msgid "Stop snapshot error." -msgstr "Error al detener una instantánea." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "" -"El servicio de configuración de almacenamiento no se ha encontrado en " -"%(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"El servicio de gestión de ID de hardware de almacenamiento no se ha " -"encontrado en %(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "No se ha encontrado el perfil de almacenamiento %s." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"El servicio de reubicación de almacenamiento no se ha encontrado en " -"%(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "No hay soporte para la familia de almacenamiento %s." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "" -"El grupo de almacenamiento %(storageGroupName)s no se ha suprimido " -"correctamente" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Host de almacenamiento %(svr)s no detectado, compruebe el nombre." - -msgid "Storage pool is not configured." -msgstr "No se ha configurado la agrupación de almacenamiento." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "No se ha encontrado el perfil de almacenamiento %(storage_profile)s." - -msgid "Storage resource could not be found." -msgstr "No se he encontrado el recurso de almacenamiento." - -msgid "Storage system id not set." -msgstr "ID de sistema de almacenamiento no establecido." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "" -"No se ha encontrado el sistema de almacenamiento para la agrupación " -"%(poolNameInStr)s." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "El sistema de almacenamiento %(array)s no se ha encontrado." - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"La suma de uso de hijos '%(sum)s' es superior a la cuota libre de '%(free)s' " -"para el proyecto '%(proj)s' para el recurso '%(res)s'. Rebaje el límite de " -"uso de uno o más de los siguientes proyectos: '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Error al cambiar al par." - -msgid "Sync pair error." -msgstr "Error al sincronizar el par." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "" -"Se ha encontrado el sistema %(id)s con un estado de contraseña incorrecto - " -"%(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "El sistema%(id)s se ha encontrado con estado incorrecto - %(status)s." - -msgid "System does not support compression." -msgstr "El sistema no soporta la compresión." - -msgid "System is busy, retry operation." -msgstr "El sistema está ocupado, vuelva a intentar la operación." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"No se ha encontrado TSM [%(tsm)s] en el almacenamiento CloudByte para la " -"cuenta [%(account)s]." - -msgid "Target volume type is still in use." -msgstr "El tipo de volumen de destino aún se está utilizando." - -msgid "Terminate connection failed" -msgstr "No se ha podido terminar la conexión" - -msgid "Terminate connection unable to connect to backend." -msgstr "" -"La terminación de la conexión no se ha podido conectar con el programa de " -"fondo." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Error al terminar la conexión del volumen: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "El origen %(type)s %(id)s para replicar no se ha encontrado." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Los parámetros 'sort_key' y 'sort_dir' están en desuso y no se pueden " -"utilizar con el parámetro 'sort'." - -msgid "The EQL array has closed the connection." -msgstr "La matriz EQL ha cerrado la conexión." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"El GPFS filesystem %(fs)s no está en el nivel de release requerido. El " -"nivel actual es %(cur)s, debe ser al menos %(min)s." - -msgid "The IP Address was not found." -msgstr "No se ha encontrado la dirección IP." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"La solicitud WebDAV ha fallado. Motivo: %(msg)s, Código de retorno/motivo: " -"%(code)s, Volumen de origen: %(src)s, Volumen de destino: %(dst)s, Método: " -"%(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"El error anterior puede mostrar que la base de datos no se ha creado.\n" -"Cree una base de datos utilizando 'cinder-manage db sync' antes de ejecutar " -"este mandato." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"La matriz no da soporte al valor de la agrupación de almacenamiento para SLO " -"%(slo)s y la carga de trabajo %(workload)s. Busque SLO y cargas de trabajo " -"válidos en la matriz." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "" -"El programa de fondo donde se ha creado el volumen no tiene la replicación " -"habilitada." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"El mandato %(cmd)s ha fallado. (ret: %(ret)s, stdout: %(out)s, stderr: " -"%(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "La copia debe ser primaria o secundaria" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"No se ha podido completar la creación de un dispositivo lógico. (LDEV: " -"%(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"El método decorated debe aceptar un volumen o un objeto de instantánea." - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "El dispositivo en la ruta %(path)s no está disponible: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"La hora de finalización (%(end)s) debe ser posterior a la hora de inicio " -"(%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "La especificación extraspec: %(extraspec)s no es válida." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "No se ha podido suprimir el volumen que ha dado error: %s." - -#, python-format -msgid "The following elements are required: %s" -msgstr "Se necesitan los elementos siguientes: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "No se ha podido añadir el grupo de host o el destino iSCSI." - -msgid "The host group or iSCSI target was not found." -msgstr "No se ha encontrado el grupo de host o el destino iSCSI." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"El host no está a punto para restablecerlo. Vuelva a sincronizar los " -"volúmenes y reanude la replicación en los programas de fondo 3PAR." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"El host no está a punto para restablecerlo. Vuelva a sincronizar los " -"volúmenes y reanude la replicación en los programas de fondo LeftHand." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"El host no está a punto para restablecerlo. Vuelva a sincronizar los " -"volúmenes y reanude la replicación en los programas de fondo de Storwize." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "El usuario CHAP de iSCSI %(user)s no existe." - -msgid "The key cannot be None." -msgstr "La clave no puede ser Ninguno." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "" -"El dispositivo lógico del %(type)s %(id)s especificado ya se había eliminado." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"El método %(method)s ha excedido el tiempo de espera. (valor de tiempo de " -"espera: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "No se ha implementado el método update_migrated_volume." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"El montaje %(mount_path)s no es un volumen Quobyte USP válido. Error: %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "" -"El parámetro del back-end de almacenamiento. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "" -"La copia de seguridad padre debe estar disponible para una copia de " -"seguridad incremental." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "" -"La instantánea proporcionada '%s' no es una instantánea del volumen " -"proporcionado." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"La referencia al volumen del programa de fondo debería tener el formato " -"file_system/volume_name (volume_name no puede contener '/')" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "El recuento de retención remota debe ser de %s o inferior." - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"El modo de replicación no se ha configurado correctamente en las " -"especificaciones adicionales (extra_specs) del tipo de volumen. Si " -"replication:mode es periodic, se debe especificar también replication:" -"sync_period y debe ser entre 300 y 31622400 segundos." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "" -"El periodo de sincronización de replicación debe ser al menos de %s segundos." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"El tamaño solicitado: %(requestedSize)s no es el mismo que el tamaño " -"resultante: %(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "No se ha encontrado el recurso %(resource)s." - -msgid "The results are invalid." -msgstr "Los resultados no son válidos." - -#, python-format -msgid "The retention count must be %s or less." -msgstr "El recuento de retención debe ser de %s o inferior." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"La instantánea no se puede crear cuando el volumen está en modalidad de " -"mantenimiento." - -#, python-format -msgid "The snapshot is unavailable: %(data)s" -msgstr "La instantánea no está disponible: %(data)s" - -msgid "The source volume for this WebDAV operation not found." -msgstr "No se ha encontrado el volumen de origen para esta operación WebDAV." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"El tipo de volumen de origen '%(src)s' es distinto del tipo de volumen de " -"destino '%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "El tipo de volumen de origen '%s' no está disponible." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "El %(desc)s especificado está ocupado." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "El LUN especificado no pertenece a la agrupación indicada: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"No se ha podido gestionar el ldev %(ldev)s especificado. No debe " -"correlacionarse el ldev." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"No se ha podido gestionar el ldev %(ldev)s especificado. No debe emparejarse " -"el ldev." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"No se ha podido gestionar el ldev %(ldev)s especificado. El tamaño de ldev " -"debe ser un múltiplo de gigabyte." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"No se ha podido gestionar el ldev %(ldev)s especificado. El tipo de volumen " -"debe ser DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"La operación especificada no se admite. El tamaño del volumen debe ser el " -"mismo que el origen %(type)s. (volumen: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "El disco virtual especificado se correlaciona con un host." - -msgid "The specified volume is mapped to a host." -msgstr "El volumen especificado se ha correlacionado con un host." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"La contraseña de la matriz de almacenamiento para %s es incorrecta, " -"actualice la contraseña configurada." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"Se puede utilizar el back-end de almacenamiento. (config_group: " -"%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"El dispositivo de almacenamiento no admite %(prot)s. Configure el " -"dispositivo para que admita %(prot)s o cambie a un controlador que utilice " -"otro protocolo." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"El metarrecuento en bandas de %(memberCount)s es demasiado pequeño para el " -"volumen: %(volumeName)s, con tamaño %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"El tipo de metadatos %(metadata_type)s del volumen/instantánea %(id)s no es " -"válido." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"El volumen %(volume_id)s no se ha podido ampliar. El tipo de volumen debe " -"ser Normal." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"El volumen %(volume_id)s no ha podido quedar como no gestionado. El tipo de " -"volumen debe ser %(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "" -"Se ha gestionado correctamente el volumen %(volume_id)s. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"El volumen %(volume_id)s no se ha gestionado correctamente. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "El volumen %(volume_id)s para correlacionar no se ha encontrado." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "" -"El volumen no puede aceptar transferencias en la modalidad de mantenimiento." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "El volumen no se puede conectar en la modalidad de mantenimiento." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "El volumen no se puede desconectar en la modalidad de mantenimiento." - -msgid "The volume cannot be updated during maintenance." -msgstr "El volumen no se puede actualizar durante el mantenimiento." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "" -"La conexión de volumen no se puede inicializar en modalidad de mantenimiento." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "" -"La unidad de volumen requiere el nombre del iniciador iSCSI en el conector." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"El volumen está ocupado actualmente en el 3PAR y no puede suprimirse en este " -"momento. Inténtelo de nuevo más tarde." - -msgid "The volume label is required as input." -msgstr "Se necesita la etiqueta de volumen como entrada." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "No hay recursos disponibles para utilizar. (recurso: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "No hay hosts ESX válidos." - -msgid "There are no valid datastores." -msgstr "No hay almacenes de datos válidos." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"No hay designación de %(param)s. El almacenamiento especificado es esencial " -"para gestionar el volumen." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"No hay designación del ldev. El ldev especificado es esencial para gestionar " -"el volumen." - -msgid "There is no metadata in DB object." -msgstr "No hay metadatos en el objeto de base de datos." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "No hay ninguna unidad compartida con este host %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "No hay ninguna unidad compartida que pueda alojar %(volume_size)sG" - -#, python-format -msgid "There is no such action: %s" -msgstr "No existe esta acción: %s" - -msgid "There is no virtual disk device." -msgstr "No hay ningún dispositivo de disco virtual." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "" -"Se producido un error al añadir el volumen al grupo de copias remotas: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Se producido un error al crear el cgsnapshot: %s." - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "Se producido un error al crear el grupo de copias remotas: %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Se producido un error al definir el periodo de sincronización del grupo de " -"copias remotas: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Se producido un error al definir el grupo de copias remotas en las matrices " -"de 3PAR: ('%s'). El volumen no se reconocerá como un tipo de replicación." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Se producido un error al definir una planificación remota en las matrices de " -"LeftHand : ('%s'). El volumen no se reconocerá como un tipo de replicación." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Se producido un error al iniciar la copia remota: %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "No hay ningún archivo de configuración de Gluster configurado (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "No hay ningún archivo de configuración de NFS configurado (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"No se ha configurado un volumen Quobyte (%s). Ejemplo: quobyte:///" -"" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "No se admite el aprovisionamiento ligero en esta versión de LVM." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "Este controlador no admite suprimir instantáneas en uso." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "Este controlador no admite instantáneas de volúmenes en uso." - -msgid "This request was rate-limited." -msgstr "Esta solicitud estaba limitada por tipo." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"No se admite esta plataforma de sistema (%s). Este controlador solo admite " -"plataformas Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "" -"El servicio de política de niveles no se ha encontrado para " -"%(storageSystemName)s." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Se desactivó mientras esperaba la actualización de Nova para la creación de " -"la instantánea %s." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Se ha desactivado mientras esperaba la actualización de Nova para suprimir " -"la instantánea %(id)s." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Tiempo de espera excedido al llamar a %s " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Se ha agotado el tiempo de espera al solicitar la API de %(service)s." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "" -"Se ha agotado el tiempo de espera al solicitar capacidades al programa de " -"fondo %(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "No se ha podido encontrar la transferencia %(transfer_id)s)." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Transferencia %(transfer_id)s: id de volumen %(volume_id)s en estado " -"inesperado %(status)s, awaiting-transfer esperado" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Intentando importar metadatos de copia de seguridad de ID %(meta_id)s a la " -"copia de seguridad %(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"La tarea de ajustar el volumen se ha detenido antes de finalizar: " -"volume_name=%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"El tipo %(type_id)s ya está asociado con otro qos specs: %(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"La modificación del acceso de tipo no es aplicable al tipo de volumen " -"público." - -msgid "Type cannot be converted into NaElement." -msgstr "El tipo no se puede convertir a NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "" -"Los UUID %s están tanto en la lista de volumen de añadir como de eliminar." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "" -"No se ha podido acceder al programa de fondo de Storwize para el volumen %s." - -msgid "Unable to access the backend storage via file handle." -msgstr "" -"No se ha podido acceder al almacenamiento de programa de fondo a través del " -"descriptor de archivo." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "" -"No se ha podido obtener acceso al almacenamiento de extremo trasero por " -"medio de la ruta %(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "No se puede añadir el host Cinder a apphosts para el espacio %(space)s" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "No se ha podido completar la migración tras error de %s." - -msgid "Unable to connect or find connection to host" -msgstr "No se ha podido conectar o encontrar una conexión con el host" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "No se ha podido crear el grupo de consistencia %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "" -"No se puede crear el bloqueo. El programa de fondo de coordinación no se ha " -"iniciado" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"No se puede crear u obtener el grupo de almacenamiento predeterminado para " -"la política FAST: %(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "No se ha podido crear un clon de réplica para el volumen %s." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "No ha sido posible crear la relación para %s.." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "No se ha podido crear el volumen %(name)s a partir de %(snap)s." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "No se ha podido crear el volumen %(name)s a partir de %(vol)s." - -#, python-format -msgid "Unable to create volume %s" -msgstr "No se ha podido crear el volumen %s" - -msgid "Unable to create volume. Backend down." -msgstr "No se ha podido crear el volumen. El programa de fondo está inactivo." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "No se ha podido suprimir la instantánea del grupo de consistencia %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "No se ha podido suprimir la instantánea %(id)s, estado: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "No se puede suprimir la política de instantáneas en el volumen %s." - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"No se ha podido suprimir el volumen de destino para el volumen %(vol)s. " -"Excepción: %(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"No se puede desasociar el volumen. El estado del volumen debe ser 'in-use' y " -"attach_status debe ser 'attached' para poder desasociarlo." - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"No se ha podido determinar la matriz secundaria (secondary_array) a partir " -"del secundario indicado: %(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"No se ha podido determinar el nombre de instantánea en Purity de la " -"instantánea %(id)s." - -msgid "Unable to determine system id." -msgstr "No se ha podido determinar ID del sistema." - -msgid "Unable to determine system name." -msgstr "No se ha podido determinar el nombre del sistema." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"No se pueden realizar operaciones de gestión de instantáneas con Purity REST " -"API versión %(api_version)s, se necesita %(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"No se ha podido realizar la replicación con la API REST de Purity versión " -"%(api_version)s, se necesita una de las versiones siguientes: " -"%(required_versions)s." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "" -"No se ha podido establecer la asociación con el clúster de Storwize %s." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "No se ha podido ampliar el volumen %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"No se ha podido migrar tras error el volumen %(id)s al programa de fondo " -"secundario porque la relación de replicación no puede conmutar: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"No se ha podido restablecer al \"valor predeterminado\", esto sólo se puede " -"hacer una vez se ha completado una migración tras error." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "" -"No se ha podido realizar la migración tras error al destino de replicación:" -"%(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "No se puede captar información de conexión de programa de fondo." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "" -"No se puede captar información de conexión desde el programa de fondo: " -"%(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "No se ha encontrado la ref Purity con name=%s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "No se puede encontrar el grupo de volumen: %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"No se ha podido encontrar el destino de migración tras error, no se han " -"configurado destinos secundarios." - -msgid "Unable to find iSCSI mappings." -msgstr "No se pueden encontrar correlaciones iSCSI." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "No se puede encontrar ssh_hosts_key_file: %s" - -msgid "Unable to find system log file!" -msgstr "¡No ha sido posible encontrar el fichero de log del sistema!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"No se ha podido encontrar una instantánea pg viable para utilizar para la " -"migración tras error en la matriz secundaria seleccionada: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"No se ha podido encontrar una matriz secundaria viable a partir de los " -"destinos configurados: %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "No se puede encontrar el volumen %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "No se puede obtener un dispositivo de bloque para el archivo '%s'" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"No se puede obtener la información de configuración necesaria para crear un " -"volumen: %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "No se puede obtener el registro correspondiente a la agrupación." - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"No se puede obtener información acerca del espacio %(space)s, verifique que " -"el clúster se esté ejecutando y esté conectado." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"No se puede obtener la lista de direcciones IP en este host, compruebe los " -"permisos y las redes." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"No se puede obtener la lista de miembros de dominio, compruebe que el " -"clúster se está ejecutando." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"No se puede obtener la lista de espacios para hacer un nuevo nombre. " -"Verifique que el clúster se esté ejecutando." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "No se puede obtener estadísticas para backend_name: %s" - -msgid "Unable to get storage volume from job." -msgstr "No ha sido posible obtener el volumen de almacenamiento del trabajo." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"No se pueden obtener los puntos finales de destino para hardwareId " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "No ha sido posible obtener el nombre de la vista de máscara." - -msgid "Unable to get the name of the portgroup." -msgstr "No ha sido posible obtener el nombre del grupo de puertos." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "No se ha podido obtener la relación de replicación para el volumen %s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"No se puede importar el volumen %(deviceId)s en cinder. Es el volumen de " -"origen de sesión de réplica %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"No se puede importar el volumen %(deviceId)s a cinder. El volumen externo no " -"está en la agrupación gestionada por el host cinder actual." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"No se puede importar el volumen %(deviceId)s a cinder. El volumen está en " -"vista de máscara %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "No se puede cargar CA desde %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "No se puede cargar el certificado desde %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "No se puede cargar la clave desde %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"No se ha podido localizar la cuenta %(account_name)s en el dispositivo " -"Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "No se ha podido localizar un SVM que gestione la dirección IP '%s'" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "" -"No se han podido encontrar los perfiles de reproducción specificados %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"No se puede gestionar el volumen existente. El volumen %(volume_ref)s ya se " -"ha gestionado." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "No se puede gestionar el volumen %s" - -msgid "Unable to map volume" -msgstr "No se ha podido correlacionar el volumen" - -msgid "Unable to map volume." -msgstr "No se ha podido correlacionar el volumen." - -msgid "Unable to parse attributes." -msgstr "No se pueden analizar los atributos." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"No se puede ascender la réplica a primaria para el volumen %s. Ni hay " -"ninguna copia secundaria disponible." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"No se puede volver a utilizar un host que no está gestionado por Cinder con " -"use_chap_auth=True," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"No se puede volver a utilizar un host con credenciales CHAP desconocidas " -"configuradas." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "No se puede renombrar el volumen %(existing)s a %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "No se ha podido recuperar el grupo de instantáneas con el id %s." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"No se ha podido volver a escribir %(specname)s, se esperaba recibir los " -"valores actuales y solicitados de %(spectype)s. Valor recibido: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"No se puede volver a escribir: ya existe una copia de volumen %s. La " -"reescritura superaría el límite de 2 copias." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"No se ha podido volver a escribir: la acción requiere una copia de volumen " -"(volume-copy), que no se permite cuando el nuevo tipo es replicación. " -"Volumen = %s" - -#, python-format -msgid "Unable to send requests: %s" -msgstr "No se han podido enviar peticiones: %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"No se ha podido configurar la replicación en modo reflejo para %(vol)s. " -"Excepción: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "No se ha podido crear una instantánea del grupo de consistencia %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "No se puede terminar conexión de volumen desde programa de fondo." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "No se puede terminar la conexión de volumen: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "No se ha actualizar el grupo de consistencia %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"No se puede verificar el grupo de iniciadores: %(igGroupName)s en la vista " -"de máscara %(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Parametros inaceptables" - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Estado de correlación inesperado %(status)s para correlación %(id)s. " -"Atributos: %(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Respuesta de CLI inesperada: discrepancia de cabecera/fila. cabecera: " -"%(header)s, fila: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Estado de correlación no esperado %(status)s para la correlación %(id)s. " -"Atributos: %(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "" -"Salida inesperada. Se esperaba[%(expected)s] pero se ha recibido [%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "Respuesta inesperada de la API Nimble" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Respuesta inesperada de la API de Tegile IntelliFlash" - -msgid "Unexpected status code" -msgstr "Código de estado inesperado" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Código de estado inesperado del conmutador %(switch_id)s con el protocolo " -"%(protocol)s para el URL %(page)s. Error: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Excepción de Gluster desconocida" - -msgid "Unknown NFS exception" -msgstr "Excepción de NFS desconocida" - -msgid "Unknown RemoteFS exception" -msgstr "Excepción de RemoteFS desconocida" - -msgid "Unknown SMBFS exception." -msgstr "Excepción de SMBFS desconocida" - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Excepción desconocida de Virtuozzo Storage" - -msgid "Unknown action" -msgstr "Acción desconocida" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"No se sabe si el volumen: %s que se tiene que gestionar ya está siendo " -"gestionado por Cinder. Abortando Gestionar volumen. Añada la propiedad de " -"esquema personalizada 'cinder_managed' al volumen y establezca su valor en " -"False. Como alternativa, establezca el valor de la política de configuración " -"de Cinder 'zfssa_manage_policy' en 'loose' para eliminar esta restricción." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"No se sabe si el volumen: %s que se tiene que gestionar ya está siendo " -"gestionado por Cinder. Abortando Gestionar volumen. Añada la propiedad de " -"esquema personalizada 'cinder_managed' al volumen y establezca su valor en " -"False. Como alternativa, establezca el valor de la política de configuración " -"de Cinder 'zfssa_manage_policy' en 'loose' para eliminar esta restricción." - -#, python-format -msgid "Unknown operation %s." -msgstr "Operación desconocida %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Mandato desconocido o no soportado %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Protocolo desconocido: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Recursos de cuota desconocidos %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Dirección de ordenación desconocida, debe ser 'desc' o 'asc'." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "" -"Las opciones de dejar de administrar y suprimir en cascada son mútuamente " -"excluyentes." - -msgid "Unmanage volume not implemented." -msgstr "No se ha implementdo la opción de dejar de administrar un volumen." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"No se permite dejar de gestionar instantáneas desde volúmenes que han dado " -"error ('failed-over')." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"No se permite dejar de gestionar instantáneas desde volúmenes que han dado " -"error." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Palabra clave de QOS no reconocida: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Formato de respaldo no reconocido: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Valor de read_deleted no reconocido '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "Inhabilite las opciones de gcs: %s" - -msgid "Unsupported Content-Type" -msgstr "Tipo de contenido no soportado" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Versión de ONTAP de datos no soportada. Hay soporte para la versión de ONTAP " -"de datos 7.3.1 y posterior." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Versión de metadatos de copia de seguridad no soportada (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "" -"Se ha solicitado una versión de metadatos de copia de seguridad no soportada" - -msgid "Unsupported backup verify driver" -msgstr "Controlador de verificación de copia de seguridad no admitido" - -#, python-format -msgid "Unsupported fields %s." -msgstr "Campos no soportados: %s." - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Firmware no soportado en el conmutador %s. Asegúrese de que el conmutador " -"ejecuta firmware v6.4 o superior" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Formato de volumen no admitido: %s " - -msgid "Update QoS policy error." -msgstr "Error al actualizar la política QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Las operaciones de actualizar y suprimir cuota sólo las puede realizar un " -"administrador de padre inmediato o un admin de CLOUD." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Las operaciones de actualizar y suprimir cuota sólo se pueden realizar en " -"proyectos de la misma jerarquía del proyecto en el que los usuarios tienen " -"alcance." - -msgid "Update list, doesn't include volume_id" -msgstr "La lista de actulización no incluye el ID de volumen (volume_id)" - -msgid "Updated At" -msgstr "Actualizado el" - -msgid "Upload to glance of attached volume is not supported." -msgstr "No se soporta la carga en Glance del volumen conectado." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Error al utilizar ALUA para asociar el iniciador con el host." - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Error al utilizar CHAP para asociar el iniciador con el host. Verifique el " -"nombre de usuario y contraseña CHAP." - -msgid "User ID" -msgstr "ID de usuario" - -msgid "User does not have admin privileges" -msgstr "El usuario no tiene privilegios de administrador" - -msgid "User not authorized to perform WebDAV operations." -msgstr "El usuario no tiene autorización para realizar operaciones WebDAV." - -msgid "UserName is not configured." -msgstr "Nombre de usuario no está configurado." - -msgid "UserPassword is not configured." -msgstr "Contraseña de usuario no está configurada." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "" -"Retrotracción V2, el volumen no está en ningún grupo de almacenamiento." - -msgid "V3 rollback" -msgstr "Retrotracción V3" - -msgid "VF is not enabled." -msgstr "VF no está habilitado." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "Conjunto VV %s no existe." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Consumidor válido de QoS specs son: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "La ubicación de control válido es: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Error al validar la conexión del volumen (error: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"El valor \"%(value)s\" no es valido para la opción de configuración " -"\"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "El valor %(param)s de %(param_string)s no es un booleano." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Valor necesario para 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "" -"El disco virtual %(name)s no está implicado en la correlación %(src)s -> " -"%(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"La versión %(req_ver)s no está soportada por la API. La versión mínima es " -"la %(min_ver)s y la máxima es la %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s no puede recuperar el objeto por su ID." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s no da soporte a la actualización condicional." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "El volumen virtual '%s' no existe en la matriz." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "El trabajo de copia de volumen para destino %s ha fallado." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "No se ha encontrado el volumen %(deviceID)s." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"El volumen %(name)s no se ha encontrado en la matriz. No se puede determinar " -"si hay volúmenes correlacionados." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "El volumen %(name)s se ha creado en VNX, pero con el estado %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "No se ha podido crear el volumen %(vol)s en la agrupación %(pool)s." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "" -"El volumen %(vol1)s no coincide con el valor de snapshot.volume_id %(vol2)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"El estado de volumen %(vol_id)s debe ser disponible para actualizar " -"distintivo de sólo lectura, pero el estado actual es: %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"El estado de volumen %(vol_id)s debe ser disponible, pero el estado actual " -"es: %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "El volumen %(volume_id)s no se ha podido encontrar." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"Volumen %(volume_id)s no tiene metadatos de administración con la clave " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"El volumen %(volume_id)s no tiene metadatos con la clave %(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"El volumen %(volume_id)s está correlacionado actualmente con un grupo de " -"host no admitido %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "" -"El volumen %(volume_id)s no está correlacionado actualmente con el host " -"%(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"El volumen %(volume_id)s todavía están conectados, en primer lugar " -"desconecte el volumen." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Error de réplica de volumen %(volume_id)s: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "El volumen %(volume_name)s está ocupado." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "No se ha podido crear el volumen %s desde el volumen de origen." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "El volumen %s no se puede crear en las unidades compartidas." - -#, python-format -msgid "Volume %s could not be created." -msgstr "No se ha podido crear el volumen %s." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "El volumen %s no existe en Nexenta SA" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "El volumen %s no existe en la aplicación Nexenta Store" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "El volumen %s no existe en la matriz." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "El volumen %s no tiene especificado provider_location, se salta." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "El volumen %s no existe en la matriz." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "El volumen %s no existe en el programa de fondo ZFSSA." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "El volumen %s ya se gestiona en OpenStack." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"El volumen %s no es de tipo replicado. Este volumen tiene que tener un tipo " -"de volumen con la especificación adicional replication_enabled establecida " -"en ' True' para admitir acciones de replicación." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"El volumen %s está en línea. Defina el volumen como fuera de línea para " -"gestionarlo con OpenStack." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "El volumen %s no debe formar parte de un grupo de consistencia." - -#, python-format -msgid "Volume %s not found" -msgstr "No se ha encontrado el volumen %s" - -#, python-format -msgid "Volume %s not found." -msgstr "No se ha encontrado el volumen %s." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Volumen %s: Error al intentar ampliar el volumen" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "El volumen (%s) ya existe en la matriz" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "El volumen (%s) ya existe en la matriz." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "El grupo de volúmenes %s no existe" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "El tipo de volumen %(id)s ya existe. " - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"La eliminación del tipo de volumen %(volume_type_id)s no está permitida con " -"los volúmenes presente con el tipo." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"El tipo de volumen %(volume_type_id)s no tiene especificaciones adicionales " -"con la clave %(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "La id del tipo de volumen no debe ser Ninguno." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"No se ha encontrado el volumen [%(cb_vol)s] en el almacenamiento de " -"CloudByte correspondiente al volumen de OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "No se ha encontrado el volumen [%s] en el almacenamiento de CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "" -"El archivo adjunto de volumen no se ha podido encontrar con el filtro: " -"%(filter)s." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "La configuración de fondo del volumen no es válida: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Ya existe un volumen con este nombre" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "El volumen no se puede restaurar porque contiene instantáneas." - -msgid "Volume create failed while extracting volume ref." -msgstr "" -"Ha fallado la creación del volumen al extraer la referencia del volumen." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "La vía de acceso de archivo de dispositivo de volumen %s no existe." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Dispositivo de volumen no encontrado en: %(device)s" - -#, python-format -msgid "Volume does not exists %s." -msgstr "El volumen %s no existe." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Controlador de volumen %s no inicializado." - -msgid "Volume driver not ready." -msgstr "Driver de volumen no está preparado." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Driver de volumen ha reportado un error: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"El volumen tiene una instantánea temporal que no se puede suprimir en este " -"momento." - -msgid "Volume has children and cannot be deleted!" -msgstr "El volumen tiene hijos y no se puede suprimir." - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "El volumen está conectado a un servidor. (%s)" - -msgid "Volume is in-use." -msgstr "El volumen está en uso." - -msgid "Volume is not available." -msgstr "El volumen no está disponible." - -msgid "Volume is not local to this node" -msgstr "El volumen no es local para este nodo" - -msgid "Volume is not local to this node." -msgstr "El volumen no es local para este nodo." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Se ha solicitado copia de seguridad de metadatos de volumen pero este " -"controlador no soporta aún esta característica." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Ha fallado la migración en volumen: %(reason)s" - -msgid "Volume must be available" -msgstr "El volumen deber estar disponible" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "El volumen debe estar en la misma disponibilidad que la instantánea" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"El volumen debe estar en la misma zona de disponibilidad que el volumen de " -"origen" - -msgid "Volume must have a volume type" -msgstr "El volumen debe tener un tipo de volumen" - -msgid "Volume must not be replicated." -msgstr "El volumen no debe replicarse." - -msgid "Volume must not have snapshots." -msgstr "El volumen no debe tener instantáneas." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "No se ha encontrado el volumen para la instancia %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "" -"No se ha encontrado el volumen en el programa de fondo de almacenamiento " -"configurado." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"No se ha encontrado el volumen en el programa de fondo de almacenamiento " -"configurado. Si el nombre del volumen contiene \"/\", renómbrelo y vuelva a " -"intentarlo." - -msgid "Volume not found on configured storage pools." -msgstr "" -"No se ha encontrado el volumen en las agrupaciones de almacenamiento " -"configuradas." - -msgid "Volume not found." -msgstr "No se ha encontrado el volumen." - -msgid "Volume not unique." -msgstr "El volumen no es exclusivo." - -msgid "Volume not yet assigned to host." -msgstr "Aún no se ha asignado el volumen al host." - -msgid "Volume reference must contain source-name element." -msgstr "La referencia de volumen debe contener el elemento source-name." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "La réplica de volumen de %(volume_id)s no se ha encontrado." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "No se ha podido iniciar el servicio de volumen %s." - -msgid "Volume should have agent-type set as None." -msgstr "El volumen debe tener agent-type establecido como None." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"El tamaño de volumen %(volume_size)sGB no puede ser menor que el tamaño de " -"minDisk de imagen %(min_disk)sGB." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "El tamaño de volumen '%(size)s' debe ser un entero y mayor que 0" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"El tamaño del volumen '%(size)s'GB no puede ser menor que el tamaño original " -"del volumen %(source_size)sGB. Deben ser >= tamaño de volumen original." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"El tamaño de volumen '%(size)s'GB no puede ser menor que el tamaño de la " -"instantánea %(snap_size)sGB. Deben ser >= el tamaño de instantánea original." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"El tamaño del volumen ha aumentado desde la última copia de seguridad. Haga " -"una copia de seguridad completa." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "El tamaño del volumen debe ser un múltiplo de 1 GB." - -msgid "Volume size must multiple of 1 GB." -msgstr "El tamaño de volumen debe ser múltiplo de 1 GB." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"El estado de volumen debe ser \"disponible\" o \"en-uso\" para la " -"instantánea. (es %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "El estado de volumen debe ser \"disponible\" o \"en-uso\"." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "El estado de volumen debe ser %s para poder reservarlo." - -msgid "Volume status must be 'available'." -msgstr "El estado de volumen debe ser 'disponible'." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "El volumen para la correlación del grupo de iniciadores ya existe" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"El volumen del que se va a hacer una copia de seguridad debe estar " -"disponible o en uso, pero el estado actual es \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "El volumen que restaurar debe estar disponible" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "No se ha podido encontrar el tipo de volumen %(volume_type_id)s." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "El ID de tipo de volumen '%s' no es válido." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"El acceso de tipo de volumen para la combinación %(volume_type_id)s / " -"%(project_id)s ya existe." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"No se ha encontrado el acceso de tipo de volumen para la combinación " -"%(volume_type_id)s / %(project_id)s." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "El cifrado del tipo de volumen para el tipo %(type_id)s ya existe." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "El cifrado de tipo de volumen para el tipo %(type_id)s no existe." - -msgid "Volume type name can not be empty." -msgstr "EL nombre de tipo de volumen no puede estar vacío." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "" -"No se ha podido encontrar el tipo de volumen con el nombre " -"%(volume_type_name)s." - -#, python-format -msgid "Volume%s: not found" -msgstr "Volumen %s: no encontrado" - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"El volumen %(volumeName)s no es un volumen concatenado. Sólo puede realizar " -"una ampliación del volumen concatenado. Saliendo..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"El volumen %(volumeName)s no se ha añadido al grupo de almacenamiento " -"%(sgGroupName)s." - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "El volumen %s ya se gestiona en Cinder." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -"Se ha superado el número de volúmenes por cuenta en las cuentas de " -"SolidFire, tanto primarias como secundarias." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage config 'vzstorage_used_ratio' no válido. Debe ser > 0 y <= 1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "El archivo de config VzStorage en %(config)s no existe." - -msgid "Wait replica complete timeout." -msgstr "Al esperar la réplica se ha agotado el tiempo de espera." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Esperar sincronización ha fallado. Estado de ejecución: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"Esperando a que todos los nodos se unan al clúster. Asegúrese de que los " -"daemons sheep se estén ejecutando." - -msgid "We should not do switch over on primary array." -msgstr "No deberíamos hace conmutación en la matriz principal." - -msgid "X-IO Volume Driver exception!" -msgstr "Excepción del controlador de volumen X-IO" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "" -"XtremIO no configurado correctamente, no se ha encontrado ningún portal iscsi" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "" -"XtremIO no se ha inicializado correctamente, no se han encontrado clústeres" - -msgid "You must implement __call__" -msgstr "Debe implementar __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Debe instalar hpe3parclient para poder utilizar controladores 3PAR. Ejecute " -"\"pip install python-3parclient\" para instalar hpe3parclient." - -msgid "You must supply an array in your EMC configuration file." -msgstr "Debe proporcionar una matriz en el archivo de configuración EMC" - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Su tamaño original de %(originalVolumeSize)s GB es mayor que: %(newSize)s " -"GB. Sólo se admite la ampliación. Saliendo..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Zona" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Política de distribución en zonas: %s, no reconocida" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data: no se han podido obtener los atributos para " -"vdisk %s." - -msgid "_create_host failed to return the host name." -msgstr "_create_host no ha devuelto el nombre de host." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: no puede convertir el nombre de host. El nombre de host no " -"está en unicode o serie." - -msgid "_create_host: No connector ports." -msgstr "_create_host: no hay puertos de conector." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "" -"_create_local_cloned_volume, no se ha encontrado el servicio de replicación." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, nombre de volumen: %(volumename)s, nombre de " -"volumen de origen: %(sourcevolumename)s, instancia de volumen de origen: " -"%(source_volume)s, instancia de volumen de destino: %(target_volume)s, " -"código de retorno: %(rc)lu, error: %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - no se han encontrado mensajes de realizado " -"satisfactoriamente en la salida de la CLI.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, id_code es None." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, no se puede encontrar el servicio de replicación" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, no se ha definido el tipo de sesión de copia. Sesión de " -"copia: %(cpsession)s, tipo de copia: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, sesión de copia: %(cpsession)s, operación: " -"%(operation)s, Código de retorno: %(rc)lu, Error: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, nombre de volumen: %(volumename)s, código de retorno: " -"%(rc)lu, error: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, nombre de volumen: %(volumename)s, nombre de volumenel " -"servicio de configuración de almacenamiento." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, nombre de clase: %(classname)s, InvokeMethod, no se " -"puede establecer conexión con ETERNUS." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "_extend_volume_op: no se permite ampliar un volumen con instantáneas." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, conector: %(connector)s, Asociadores: " -"FUJITSU_AuthorizedTarget, no se puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, conector: %(connector)s, EnumerateInstanceNames, no se " -"puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,conector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, no se puede establecer conexión con " -"ETERNUS." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, no " -"se puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, nombre de clase: %(classname)s, " -"EnumerateInstanceNames, no se puede establecer conexión con ETERNUS." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names, conector: %(connector)s, no se ha encontrado el " -"iniciador." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, nombre de volumen: %(volumename)s, EnumerateInstanceNames, no se " -"puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, no se puede " -"establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, nombre de archivo: %(filename)s, nombre de etiqueta: " -"%(tagname)s,los datos son None. Edite la configuración del controlador y " -"corríjalo." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, nombre de archivo: %(filename)s, ip: %(ip)s, " -"puerto: %(port)s, usuario: %(user)s, contraseña: ****, URL: %(url)s, ERROR." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn no " -"encontrado." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, Nombres asociador: " -"CIM_BindsTo, no se puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"no se puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, no se " -"puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: las cabeceras y los valores del atributo no coinciden.\n" -" Cabeceras: %(header)s\n" -" Valores: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector no ha podido devolver el nombre de host para el " -"conector." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, no se ha podido obtener host-affinity de aglist/" -"vol_instance, affinitygroup: %(ag)s, ReferenceNames, no se puede establecer " -"conexión con ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, no se ha podido obtener la instancia de host-affinity, " -"volmap: %(volmap)s, GetInstance, no se puede establecer conexión con ETERNUS." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Asociadores: FUJITSU_SAPAvailableForElement, no se puede " -"establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, no se puede " -"establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, no se puede establecer conexión con ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, no se puede establecer " -"conexión con ETERNUS." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances, no se puede establecer conexión con " -"ETERNUS." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "" -"_get_target_port, protocolo: %(protocol)s, no se ha encontrado target_port." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "" -"_get_unmanaged_replay: No se puede encontrar la instantánea denominada %s" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: No se puede encontrar el ID de volumen %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: Debe especificar source-name." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: no se ha podido obtener la información de " -"conexión de FC para la conexión host-volumen. ¿Está el host configurado " -"correctamente para conexiones de FC?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: no se ha encontrado ningún nodo en el grupo de E/" -"S %(gid)s para el volumen %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, nombre de volumen: %(volumename)s, " -"volume_uid: %(uid)s, iniciador: %(initiator)s, destino: %(tgt)s, aglist: " -"%(aglist)s, no se ha encontrado el servicio de configuración de " -"almacenamiento." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, nombre de volumen: " -"%(volumename)s, volume_uid: %(uid)s, aglist: %(aglist)s, no se ha encontrado " -"el servicio de configuración de controlador." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, nombre de volumen: %(volumename)s, volume_uid: %(volume_uid)s, " -"Grupo de afinidad: %(ag)s, Código de retorno: %(rc)lu, Error: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, nombres de asociadores: " -"CIM_ProtocolControllerForUnit, no se puede establecer conexión con ETERNUS." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "" -"_update_volume_stats: no se han podido obtener los datos de la agrupación de " -"almacenamiento." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, el estado de la sesión de " -"copia es INTERRUMPIDO." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"Error de add_vdisk_copy: ya existe una copia de volumen %s. La adición de " -"otra copia superaría el límite de 2 copias." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"add_vdisk_copy se ha iniciado sin una copia de disco virtual en la " -"agrupación esperada." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants debe ser un valor booleano, se ha obtenido '%s'." - -msgid "already created" -msgstr "ya creado" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "conecte instantánea del nodo remoto" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "el atributo %s no es de carga diferida" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"copia de seguridad: %(vol_id)s no ha podido crear enlace fijo de dispositivo " -"de %(vpath)s a %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"copia de seguridad: %(vol_id)s no ha podido obtener notificación de éxito de " -"copia de seguridad de servidor.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"copia de seguridad: %(vol_id)s no ha podido ejecutar dsmc debido a " -"argumentos no válidos en %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"copia de seguridad: %(vol_id)s no ha podido ejecutar dsmc en %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "copia de seguridad: %(vol_id)s ha fallado. %(path)s no es un archivo." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"copia de seguridad: %(vol_id)s ha fallado. %(path)s es de un tipo de archivo " -"inesperado. Se soportan archivos de bloque o normales, la modalidad de " -"archivo real es %(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"copia de seguridad: %(vol_id)s ha fallado. No se puede obtener vía de acceso " -"real al volumen en %(path)s." - -msgid "being attached by different mode" -msgstr "conectado por medio de un modo diferente" - -#, python-format -msgid "call failed: %r" -msgstr "Ha fallado la llamada: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "Ha fallado la llamada: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "Ha fallado la llamada: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "Ha fallado la llamada: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "Ha fallado la llamada: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "No se encuentra lun-map, ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "no se puede encontrar el volumen para ampliar" - -msgid "can't handle both name and index in req" -msgstr "no se pueden gestionar el nombre y el índice en la solicitud" - -msgid "cannot understand JSON" -msgstr "no se puede entender JSON" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "cgsnapshot asignada" - -msgid "cgsnapshot changed" -msgstr "cgsnapshot modificada" - -msgid "cgsnapshots assigned" -msgstr "cgsnapshots asignado" - -msgid "cgsnapshots changed" -msgstr "cgsnapshots modificadas" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: se necesita contraseña o clave privada SSH para la " -"autenticación: establezca la opción san_password o san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: no se ha podido determinar el ID del sistema." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "" -"check_for_setup_error: no se ha podido determinar el nombre del sistema." - -msgid "check_hypermetro_exist error." -msgstr "Error de check_hypermetro_exist." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "la profundidad de clon excede el límite de %s" - -msgid "consistencygroup assigned" -msgstr "consistencygroup asignado" - -msgid "consistencygroup changed" -msgstr "consistencygroup modificado" - -msgid "control_location must be defined" -msgstr "control_location se debe definir" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, el volumen de origen no existe en ETERNUS." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, nombre de instancia del volumen de destino: " -"%(volume_instancename)s, error al obtener la instancia." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: el tamaño de origen y de destino son distintos." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: el tamaño del volumen de origen %(src_vol)s es " -"%(src_size)dGB y no cabe en el volumen de destino %(tgt_vol)s of size " -"%(tgt_size)dGB." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src se debe crear desde una instantánea de CG " -"o desde un origen de CG." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src solo admite un origen de cgsnapshot o bien " -"un origen de grupo de consistencia. No se pueden utilizar diversos orígenes." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "" -"create_copy: El disco virtual de origen %(src)s (%(src_id)s) no existe." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: el disco virtual de origen %(src)s no existe." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: El nombre de host no está en Unicode ni es una serie." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: No se han proporcionado iniciadores o wwpns." - -msgid "create_hypermetro_pair error." -msgstr "Error de create_hypermetro_pair." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"create_snapshot, eternus_pool: %(eternus_pool)s, no se ha encontrado la " -"aprupación." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, nombre de instantánea: %(snapshotname)s, nombre de volumen " -"de origen: %(volumename)s, vol_instance.path: %(vol_instance)s, nombre de " -"volumen de destino: %(d_volumename)s, agrupación: %(pool)s, código de " -"retorno: %(rc)lu, error: %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, nombre de volumen: %(s_volumename)s, no se ha encontrado el " -"volumen de origen en ETERNUS." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, nombre de volumen: %(volumename)s, no se ha encontrado el " -"servicio de replicación." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: el estado de volumen debe ser \"available\" (disponible) o " -"\"in-use\" (en uso) para la instantánea. El estado no válido es %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: la obtención de volumen de origen ha fallado." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, volumen: %(volume)s, EnumerateInstances, no puede conectar " -"con ETERNUS." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, volumen: %(volume)s, nombre de volumen: %(volumename)s, " -"eternus_pool: %(eternus_pool)s, no se ha encontrado el servicio de " -"configuración de almacenamiento." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, nombre de volumen: %(volumename)s, nombre de agrupación: " -"%(eternus_pool)s, código de retorno: %(rc)lu, error: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "" -"create_volume_from_snapshot, el volumen de origen no existe en ETERNUS." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, nombre de instancia del volumen de destino: " -"%(volume_instancename)s, error al obtener la instancia." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot: la instantánea %(name)s no existe." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: el estado de la instantánea debe ser \"available" -"\" (disponible) para crear el volumen. El estado no válido es: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: el tamaño del volumen es distinto al volumen " -"basado en la instantánea." - -msgid "data not found" -msgstr "No se han encontrado datos" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"suprimir: %(vol_id)s no ha podido ejecutar dsmc debido a argumentos no " -"válidos con stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"suprimir: %(vol_id)s no ha podido ejecutar dsmc sin salida estándar: " -"%(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "Error de delete_hypermetro." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: %s ACL no encontrado. Continuando." - -msgid "delete_replication error." -msgstr "Error de delete_replication." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"suprimiendo la instantánea %(snapshot_name)s que tiene volúmenes dependientes" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "suprimiendo el volumen %(volume_name)s que tiene instantánea" - -msgid "detach snapshot from remote node" -msgstr "desconecte instantánea del nodo remoto" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: No hay nodos configurado." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"error al grabar archivo en Swift, el MD5 del archivo en Swift %(etag)s no es " -"el mismo que el MD5 del archivo enviado a Swift %(md5)s" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"extend_volume, eternus_pool: %(eternus_pool)s, no se ha encontrado la " -"aprupación." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, volumen: %(volume)s, nombre de volumen: %(volumename)s, " -"eternus_pool: %(eternus_pool)s, no se ha encontrado el servicio de " -"configuración de almacenamiento." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, nombre de volumen: %(volumename)s, código de retorno: " -"%(rc)lu, error: %(errordesc)s, tipo de agrupación: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "" -"extend_volume, nombre del volumen: %(volumename)s, no se ha encontrado el " -"volumen." - -msgid "failed to create new_volume on destination host" -msgstr "error al crear new_volume en el host de destino" - -msgid "fake" -msgstr "ficticio" - -#, python-format -msgid "file already exists at %s" -msgstr "el archivo ya existe en %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno no admitido por SheepdogIOWrapper" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() no admitido por RBD()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "El sistema de archivos %s no existe en la aplicación Nexenta Store" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled está establecido en False y no permite la " -"correlación de varios hosts. CMMVC6071E La correlación de disco virtual a " -"host no se ha creado, ya que el disco virtual ya está correlacionado con un " -"host." - -msgid "flush() not supported in this version of librbd" -msgstr "nivel() no admitido en esta versión de librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" - -msgid "force delete" -msgstr "forzar supresión" - -msgid "get_hyper_domain_id error." -msgstr "Error de get_hyper_domain_id." - -msgid "get_hypermetro_by_id error." -msgstr "Error de get_hypermetro_by_id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: No se ha podido obtener la IP de destino para el iniciador " -"%(ini)s, compruebe el archivo config." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: No se han podido obtener los atributos para el volumen %s" - -msgid "glance_metadata changed" -msgstr "glance_metadata modificado" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode está establecido en copy_on_write, pero %(vol)s y " -"%(img)s pertenece a sistemas de archivos diferentes." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode está establecido en copy_on_write, pero %(vol)s y " -"%(img)s pertenecen a conjuntos de archivos diferentes." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s y hgst_user %(usr)s deben correlacionarse con usuarios/" -"grupos válidos en cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"No se ha encontrado hgst_net %(net)s especificado en cinder.conf en el " -"clúster" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy debe establecerse en 0 (no HA) o 1 (HA) en cinder.conf." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode debe ser un octal/ent en cinder.conf" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "El servidor hgst_storage %(svr)s no tiene el formato :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers deben definirse en cinder.conf" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Es posible que el servicio HTTP se haya inhabilitado de forma abrupta o que " -"se haya puesto en estado de mantenimiento en el transcurso de esta operación." - -msgid "id cannot be None" -msgstr "el ID no puede ser None" - -#, python-format -msgid "image %s not found" -msgstr "no se ha encontrado la imagen %s" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "" -"initialize_connection, volumen: %(volume)s, no se ha encontrado el volumen." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "" -"initialize_connection: No se han podido obtener los atributos para el " -"volumen %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection: Falta un atributo para el volumen %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: No se ha encontrado ningún nodo en el grupo de E/S " -"%(gid)s para el volumen %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: el disco virtual %s no está definido." - -#, python-format -msgid "invalid user '%s'" -msgstr "usuario no válido '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "portal iscsi portal, %s, no encontrado" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"iscsi_ip_address debe establecerse en el archivo de configuración al " -"utilizar el protocolo 'iSCSI'." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "error de gestor clave: %(reason)s" - -msgid "limit param must be an integer" -msgstr "el parámetro de límite debe ser un entero" - -msgid "limit param must be positive" -msgstr "el parámetro de límite debe ser positivo" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing necesita una clave 'name' para identificar un volumen " -"existente." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: Error al gestionar la reproducción existente " -"%(ss)s en el volumen %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "no se ha encontrado el marcador [%s]" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp no tiene comillas %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy debe ser 'on-demand' o 'never', se ha pasado: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "" -"mkfs ha fallado en el volumen %(vol)s, el mensaje de error era: %(err)s." - -msgid "mock" -msgstr "simulación" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs no está instalado" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage ha encontrado varios recursos con el nombre %s" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "se han encontrado varios recursos con el ID de instantánea %s" - -msgid "name cannot be None" -msgstr "el nombre no puede ser None" - -#, python-format -msgid "no REPLY but %r" -msgstr "ninguna RESPUESTA, sino %r" - -msgid "no data found" -msgstr "No se han encontrado datos" - -msgid "no error code found" -msgstr "No se ha encontrado ningún código de error." - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "no se ha encontrado ninguna instantánea con el id %s en drbdmanage" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "no exactamente una instantánea con el id %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "no exactamente un volumen con el id %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "obj no tiene comillas %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled no está inactivo." - -msgid "progress must be an integer percentage" -msgstr "el progreso debe ser un porcentaje de entero" - -msgid "provider must be defined" -msgstr "Se debe definir el proveedor" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"El controlador de volumen necesita qemu-img %(minimum_version)s o posterior. " -"Versión qemu-img actual: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img no está instalado y la imagen es de tipo %s. Solo se puede usar " -"imágenes RAW si qemu-img no está instalado." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img no está instalado y el formato del disco no está especificado. " -"Solo se pueden usar las imágenes RAW si qemu-img no está instalado." - -msgid "rados and rbd python libraries not found" -msgstr "no se han encontrado las bibliotecas rados y rbd python" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted solo puede ser 'no', 'yes' o 'only', no %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover ha fallado. No se ha encontrado %s." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"replication_failover ha fallado. No se ha configurado un programa de fondo " -"para la migración tras error" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restaurar: %(vol_id)s no ha podido ejecutar dsmc debido a argumentos no " -"válidos en %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restaurar: %(vol_id)s no ha podido ejecutar dsmc en %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"restaurar: %(vol_id)s ha fallado.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup terminada anormalmente, la lista de objetos real no coincide " -"con la lista de objetos almacenada en metadatos." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"Falta el miembro %s de rtslib_fb: es posible que necesite un python-rtslib-" -"fb más reciente." - -msgid "san_ip is not set." -msgstr "san_ip no está establecido." - -msgid "san_ip must be set" -msgstr "se debe establecer san_ip" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login y/o san_password no se han establecido para el controlador Datera " -"en cinder.conf. Establezca esta información e inicie el servicio cinder-" -"volume de nuevo." - -msgid "serve() can only be called once" -msgstr "serve() sólo se puede llamar una vez " - -msgid "size not found" -msgstr "No se ha encontrado el tamaño" - -msgid "snapshot info not found" -msgstr "No se ha encontrado información de la instantánea" - -#, python-format -msgid "snapshot-%s" -msgstr "instantánea-%s" - -msgid "snapshots assigned" -msgstr "instantáneas asignadas" - -msgid "snapshots changed" -msgstr "instantáneas modificadas" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "El ID de volumen de origen: %s no se replica" - -msgid "source-name cannot be empty." -msgstr "source-name no puede estar vacío." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "El formato de source-name debería ser: 'vmdk_path@vm_inventory_path'." - -#, python-format -msgid "status must be %s and" -msgstr "el estado debe ser %s y" - -msgid "status must be available" -msgstr "el estado debe ser available" - -msgid "status not found" -msgstr "No se ha encontrado el estado" - -msgid "stop_hypermetro error." -msgstr "Error de stop_hypermetro." - -msgid "sync_hypermetro error." -msgstr "Error de sync_hypermetro." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli no instalado. No se ha podido crear un directorio predeterminado " -"(%(default_path)s): %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "" -"terminate_connection: No se ha podido obtener el nombre de host del conector." - -msgid "timeout creating new_volume on destination host" -msgstr "tiempo de desactivación al crear new_volume en el host de destino" - -msgid "too many body keys" -msgstr "demasiadas claves de cuerpo" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: no montado" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s: el destino está ocupado" - -msgid "umount: : some other error" -msgstr "umount: : algún otro error" - -msgid "umount: : target is busy" -msgstr "umount: : el destino está ocupado" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: No se puede encontrar la instantánea denominada %s" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: No se puede encontrar el ID de volumen %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "argumento no reconocido %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "algoritmo de compresión no soportado: %s" - -msgid "valid iqn needed for show_target" -msgstr "es necesario un iqn válido para show_target" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "El disco virtual %s no está definido." - -msgid "vmemclient python library not found" -msgstr "No se ha encontrado la biblioteca python vmemclient." - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "No se ha encontrado el volumen %s en drbdmanage" - -msgid "volume assigned" -msgstr "volumen asignado" - -msgid "volume changed" -msgstr "volumen modificado" - -msgid "volume is already attached" -msgstr "El volumen ya está conectado" - -msgid "volume is not local to this node" -msgstr "el volumen no es local para este nodo" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"el tamaño de volumen %(volume_size)d es demasiado pequeño para restaurar una " -"copia de seguridad con un tamaño de %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "el tamaño de volumen %d no es válido." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"Es necesario proporcionar el tipo_volumen al crear un volumen en un grupo " -"de ." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id no puede ser None (Ninguno)" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"Es necesario proporcionar tipos_volumen para crear el grupo de consistencia " -"%(name)s." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "" -"Es necesario proporcionar tipos_volumen para crear el grupo de consistencia " -"%s." - -msgid "volumes assigned" -msgstr "volúmenes asignados" - -msgid "volumes changed" -msgstr "volúmenes modificados" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s ha agotado el tiempo de espera." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"La propiedad zfssa_manage_policy se debe establecer a 'strict' o 'loose'. El " -"valor actual es: %s." diff --git a/cinder/locale/fr/LC_MESSAGES/cinder.po b/cinder/locale/fr/LC_MESSAGES/cinder.po deleted file mode 100644 index 518e9c8e0..000000000 --- a/cinder/locale/fr/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,10081 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# EVEILLARD , 2013 -# François Bureau, 2013 -# FIRST AUTHOR , 2011 -# Jonathan Dupart , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-04 06:28+0000\n" -"Last-Translator: Martine Marin \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"Version d'OpenStack Cinder : %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " mais la taille est maintenant %d" - -#, python-format -msgid " but size is now %d." -msgstr " mais la taille est maintenant %d." - -msgid " or " -msgstr " ou " - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s n'est pas défini." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing ne peut pas gérer un volume connecté à des hôtes. " -"Déconnectez ce volume des hôtes existants avant de procéder à l'importation" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"Résultat : %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s : %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s : Droit refusé." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s : Echec avec une sortie CLI inattendue.\n" -" Commande : %(cmd)s\n" -" stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Code de statut : %(_status)s\n" -"Corps : %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName : %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s : création de NetworkPortal : vérifiez que le port %(port)d à " -"l'adresse IP %(ip)s n'est pas utilisé par un autre service." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s : échec de la sauvegarde %(bck_id)s, volume %(vol_id)s. L'objet de " -"sauvegarde possède un mode inattendu. Sauvegardes d'image ou de fichier " -"prises en charge, mode réel : %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"Le service %(service)s n'a pas le statut %(status)s sur le dispositif de " -"stockage %(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s doit être <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s doit être >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"La valeur %(worker_name)s de %(workers)d n'est pas valide. Elle doit être " -"supérieure à 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "%s \"data\" ne figure pas dans le résultat." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s n'est pas accessible. Vérifiez que GPFS est actif et que le système de " -"fichiers est monté." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"%s ne peut pas être redimensionné par une opération de clonage car il ne " -"contient aucun bloc." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"%s ne peut pas être redimensionné par une opération de clonage car il est " -"hébergé sur un volume compressé" - -#, python-format -msgid "%s configuration option is not set." -msgstr "L'option de configuration %s n'a pas été définie." - -#, python-format -msgid "%s does not exist." -msgstr "%s n'existe pas." - -#, python-format -msgid "%s is not a directory." -msgstr "%s n'est pas un répertoire." - -#, python-format -msgid "%s is not installed" -msgstr "%s n'est pas installé" - -#, python-format -msgid "%s is not installed." -msgstr "%s n'est pas installé." - -#, python-format -msgid "%s is not set" -msgstr "%s n'est pas défini" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s n'est pas défini et est obligatoire pour que l'unité de réplication soit " -"valide." - -#, python-format -msgid "%s is not set." -msgstr "%s n'est pas défini." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s doit être une image raw ou qcow2 valide." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s doit être un chemin d'accès absolu." - -#, python-format -msgid "%s must be an integer." -msgstr "%s doit être un entier." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s non défini dans cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s non défini." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' n'est pas valide pour flashsystem_connection_protocol dans le " -"fichier de configuration. La ou les valeurs valides sont %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "'active' doit présent lors de l'écriture de snap_info." - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' doit être spécifié" - -msgid "'qemu-img info' parsing failed." -msgstr "Echec de l'analyse syntaxique de 'qemu-img info'." - -msgid "'status' must be specified." -msgstr "'status' doit être spécifié." - -msgid "'volume_id' must be specified" -msgstr "'volume_id' doit être spécifié." - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Commande : %(cmd)s) (Code retour : %(exit_code)s) (Sortie standard : " -"%(stdout)s) (Erreur standard : %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "" -"Un numéro d'unité logique hôte (HLUN) est introuvable. (unité logique : " -"%(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "Une demande simultanée, et peut-être contradictoire, a été effectuée." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Aucun numéro d'unité logique libre (HLUN) n'a été trouvé. Ajoutez un autre " -"groupe d'hôtes. (unité logique : %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Un groupe d'hôtes n'a pas pu être ajouté (port : %(port)s, nom : %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Un groupe d'hôtes n'a pas pu être supprimé. (port : %(port)s, gid : %(gid)s, " -"nom : %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Un groupe d'hôtes n'est pas valide. (groupe d'hôtes : %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "" -"Une paire n'a pas pu être supprimée. (P-VOL : %(pvol)s, S-VOL : %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Une paire n'a pas pu être créée. Le nombre maximum de paires est dépassé. " -"(méthode de copie : %(copy_method)s, P-VOL : %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Un paramètre n'est pas valide. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Une valeur de paramètre n'est pas valide. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Un pool est introuvable. (ID pool : %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Un statut d'instantané n'est pas valide. (statut : %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Une cible secondaire valide DOIT être spécifiée pour effectuer le " -"basculement." - -msgid "A volume ID or share was not specified." -msgstr "Un ID de volume ou un partage n'a pas été spécifié." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Un statut de volume n'est pas valide. (statut : %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "Echec de l'API %(name)s avec la chaîne d'erreur %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"La chaîne de version d'API %(version)s a un format non valide. Elle doit " -"être au format NumMajeur.NumMineur." - -msgid "API key is missing for CloudByte driver." -msgstr "Clé d'API manquante pour le pilote CloudByte." - -#, python-format -msgid "API response: %(response)s" -msgstr "Réponse de l'API : %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "Réponse de l'API : %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "" -"La version %(version)s de l'API n'est pas prise en charge avec cette méthode." - -msgid "API version could not be determined." -msgstr "La version d'API n'a pas pu être déterminée." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Vous êtes sur le point de supprimer des projets enfants avec quota non zéro. " -"Ceci ne devrait pas être effectué. " - -msgid "Access list not available for public volume types." -msgstr "Liste d'accès indisponible pour les types de volume publics." - -msgid "Activate or deactivate QoS error." -msgstr "Erreur lors de l'activation ou de la désactivation QoS." - -msgid "Activate snapshot error." -msgstr "Erreur lors de l'activation de l'instantané." - -msgid "Add FC port to host error." -msgstr "Erreur lors de l'ajout du port FC à l'hôte." - -msgid "Add fc initiator to array error." -msgstr "Erreur lors de l'ajout d'initiateur fc à la matrice." - -msgid "Add initiator to array error." -msgstr "Erreur lors de l'ajout de l'initiateur à la matrice." - -msgid "Add lun to cache error." -msgstr "Erreur lors de l'ajout de numéro d'unité logique au cache." - -msgid "Add lun to partition error." -msgstr "Erreur lors de l'ajout de numéro d'unité logique à la partition." - -msgid "Add mapping view error." -msgstr "Erreur lors de l'ajout de la vue de mappage." - -msgid "Add new host error." -msgstr "Erreur lors de l'ajout d'un nouvel hôte." - -msgid "Add port to port group error." -msgstr "Erreur lors de l'ajout de port à un groupe de ports." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Les pools de stockage spécifiés pour être gérés n'existent pas tous. " -"Vérifiez votre configuration. Pools non existants : %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"Une demande de version d'API doit être comparée à un objet VersionedMethod." - -msgid "An error has occurred during backup operation" -msgstr "Une erreur est survenue lors de la sauvegarde" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Une erreur s'est produite pendant l'opération LUNcopy. Nom LUNcopy : " -"%(luncopyname)s. Statut LUNcopy : %(luncopystatus)s. Etat LUNcopy : " -"%(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Une erreur s'est produite lors de la lecture du volume \"%s\"." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Une erreur s'est produite lors de l'écriture dans le volume \"%s\"." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "" -"Un utilisateur CHAP iSCSI n'a pas pu être ajouté. (nom d'utilisateur : " -"%(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "" -"Un utilisateur CHAP iSCSI n'a pas pu être supprimé. (nom d'utilisateur : " -"%(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Une cible iSCSI n'a pas pu être ajoutée. (port : %(port)s, alias : " -"%(alias)s, raison : %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Une cible iSCSI n'a pas pu être supprimée. (port : %(port)s, tno : %(tno)s, " -"alias : %(alias)s)" - -msgid "An unknown exception occurred." -msgstr "Une exception inconnue s'est produite." - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Un utilisateur avec portée de jeton limitée à un sous-projet n'est pas " -"autorisé à visualiser le quota de ses parents." - -msgid "Append port group description error." -msgstr "Erreur lors de l'ajout de la description du groupe de ports." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Echec de l'application de zones et de cfgs au commutateur (code d'erreur=" -"%(err_code)s, message d'erreur=%(err_msg)s.)" - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "" -"La matrice n'existe pas ou est hors ligne. Statut actuel de la matrice : %s." - -msgid "Associate host to hostgroup error." -msgstr "Erreur lors de l'association de l'hôte à hostgroup." - -msgid "Associate host to mapping view error." -msgstr "Erreur lors de l'association de l'hôte à la vue de mappage." - -msgid "Associate initiator to host error." -msgstr "Erreur lors de l'association de l'initiateur à l'hôte." - -msgid "Associate lun to QoS error." -msgstr "Erreur lors de l'association de numéro d'unité logique (lun) à QoS." - -msgid "Associate lun to lungroup error." -msgstr "Erreur lors de l'association du numéro d'unité logique à lungroup." - -msgid "Associate lungroup to mapping view error." -msgstr "Erreur lors de l'association de lungroup à la vue de mappage." - -msgid "Associate portgroup to mapping view error." -msgstr "Erreur lors de l'association de portgroup à la vue de mappage." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Au moins une adresse IP iSCSI valide doit être définie." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Tentative de transfert de %s avec une clé d'auth non valide." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "" -"Les informations du groupe d'authentification [%s] sont introuvables dans le " -"stockage CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"Informations d'utilisateur de l'authentification introuvables dans le " -"stockage CloudByte." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"L'authentification a échoué, vérifiez les données d'identification du " -"commutateur, code d'erreur : %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "La zone de disponibilité '%(s_az)s' n'est pas valide." - -msgid "Available categories:" -msgstr "Catégories disponibles :" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Les spécifications QoS de back-end ne sont pas prises en charge sur cette " -"famille de stockage et version ONTAP." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Le système dorsal n'existe pas (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Rapports de back-end : %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Rapports de back-end : l'élément existe déjà" - -msgid "Backend reports: item not found" -msgstr "Rapports de back-end : élément introuvable" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "" -"Le délai de nouvelle tentative du service de back-end est atteint : " -"%(timeout)s s" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Le stockage de back-end n'a pas configuré la cible de canal optique." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"La sauvegarde d'un volume en cours d'utilisation doit utiliser l'indicateur " -"force." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "Sauvegarde %(backup_id)s introuvable." - -msgid "Backup RBD operation failed" -msgstr "Echec de l'opération RBD de sauvegarde" - -msgid "Backup already exists in database." -msgstr "La sauvegarde existe déjà dans la base de données." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Le pilote de sauvegarde a signalé une erreur : %(message)s" - -msgid "Backup id required" -msgstr "ID de sauvegarde requis" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"La sauvegarde n'est pas prise en charge pour les volumes GlusterFS avec des " -"instantanés." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"La sauvegarde est seulement prise en charge pour les volumes SOFS sans " -"fichier de sauvegarde." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"La sauvegarde est seulement prise en charge pour les volumes GlusterFS au " -"format raw." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "" -"La sauvegarde est seulement prise en charge pour les volumes SOFS au format " -"raw." - -msgid "Backup operation of an encrypted volume failed." -msgstr "Echec de l'opération de sauvegarde d'un volume chiffré." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Le service de sauvegarde %(configured_service)s ne prend pas en charge la " -"vérification. L'ID de sauvegarde %(id)s n'est pas vérifié. Vérification " -"ignorée." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Le service de sauvegarde %(service)s ne prend pas en charge la vérification. " -"L'ID de sauvegarde %(id)s n'est pas vérifié. Réinitialisation ignorée." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"La sauvegarde comporte %s instantanés alors qu'elle ne doit en contenir qu'un" - -msgid "Backup status must be available" -msgstr "L'état de sauvegarde doit être disponible" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "L'état de sauvegarde doit être disponible mais pas %s." - -msgid "Backup status must be available or error" -msgstr "L'état de sauvegarde doit être Disponible ou Erreur" - -msgid "Backup to be restored has invalid size" -msgstr "La sauvegarde à restaurer a une taille non valide" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Mauvaise ligne d'état renvoyée : %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Clé(s) incorrecte(s) dans le quota défini : %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Réponse erronée ou inattendue de l'API back-end du volume de stockage : " -"%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "" -"Format de projet incorrect : le projet n'est pas au format approprié (%s)" - -msgid "Bad response from Datera API" -msgstr "Réponse erronée de l'API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Réponse erronée de l'API SolidFire" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Réponse incorrecte de XMS, %s" - -msgid "Binary" -msgstr "binaire" - -msgid "Blank components" -msgstr "Composants vides" - -msgid "Blockbridge api host not configured" -msgstr "L'hôte de l'API Blockbridge n'a pas été configuré" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge a été configuré avec une méthode d'authentification non valide " -"'%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "Le pool Blockbridge par défaut n'existe pas" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Le mot de passe Blockbridge n'a pas été configuré (requis pour la méthode " -"d'authentification 'mot de passe')" - -msgid "Blockbridge pools not configured" -msgstr "Les pools Blockbridge n'ont pas été configurés" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Le jeton Blockbridge n'a pas été configuré (requis pour la méthode " -"d'authentification 'jeton')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"L'utilisateur Blockbridge n'a pas été configuré (requis pour la méthode " -"d'authentification 'mot de passe')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "" -"Erreur d'interface CLI de segmentation Brocade Fibre Channel : %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Erreur HTTP de segmentation Brocade Fibre Channel : %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "Le secret CHAP doit contenir 12 à 16 octets." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Sortie exception CLI :\n" -" commande : %(cmd)s\n" -" stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Sortie exception CLI :\n" -" commande : %(cmd)s\n" -" stdout : %(out)s\n" -" stderr : %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E Le mappage de disque virtuel à hôte n'a pas été créé car le " -"disque virtuel est déjà mappé à un hôte.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "La version CONCERTO n'est pas prise en charge" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) n'existe pas dans la matrice" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "Le nom du cache est None. Définissez smartcache:cachename dans la clé." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "Le volume de cache %s n'a pas les propriétés requises" - -msgid "Call returned a None object" -msgstr "L'appel a renvoyé un objet de type None " - -msgid "Can not add FC port to host." -msgstr "Impossible d'ajouter le port FC à l'hôte." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "" -"Impossible de déterminer l'ID de cache à partir du nom de cache %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Impossible de déterminer l'ID de partition à partir du nom %(name)s." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "Impossible d'obtenir les informations de pool. Pool : %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "Impossible de transformer %s en entier." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Impossible d'accéder à 'scality_sofs_config' : %s" - -msgid "Can't decode backup record." -msgstr "Impossible de décoder l'enregistrement de sauvegarde." - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "Extension impossible du volume de réplication, volume : %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"Numéro d'unité logique (LUN) introuvable sur la matrice, vérifiez la valeur " -"de source-name ou source-id." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "Nom de cache %(name)s introuvable sur la matrice." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"Informations de numéro d'unité logique (lun) introuvables sur la matrice. " -"Volume : %(id)s, nom lun : %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"Nom de partition introuvable sur la matrice. Nom de la partition : %(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "Service introuvable : %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"Instantané introuvable sur la matrice, vérifiez la valeur de source-name ou " -"source-id." - -msgid "Can't find the same host id from arrays." -msgstr "Impossible de trouver le même ID d'hôte (host id) dans les matrices." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "" -"Impossible d'obtenir l'ID de volume à partir de l'instantané, instantané : " -"%(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Impossible d'obtenir l'ID du volume. Nom du volume : %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %(lun_id)s dans " -"Cinder. Type LUN non concordant." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans HyperMetroPair." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans une tâche de copie LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans un groupe LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans un miroir LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans SplitMirror." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Ce " -"numéro existe déjà dans une tâche de migration." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder . Ce " -"numéro existe déjà dans une tâche de réplication à distance." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"Impossible d'importer le numéro d'unité logique (LUN) %s dans Cinder. Le " -"statut de ce numéro n'est pas normal." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Impossible d'importer l'instantané %s dans Cinder. L'instantané n'appartient " -"pas au volume." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Impossible d'importer l'instantané %s dans Cinder. L'instantané est exposé " -"dans l'initiateur." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Impossible d'importer l'instantané %s dans Cinder. Le statut de l'instantané " -"n'est pas normal ou le statut d'exécution n'est pas connecté (online)." - -msgid "Can't parse backup record." -msgstr "Impossible d'analyser l'enregistrement de sauvegarde." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car il ne possède aucun type de volume." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car il fait déjà partie du groupe de cohérence %(orig_group)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car le volume est introuvable." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car le volume n'existe pas." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car le volume est dans un état non valide : %(status)s. Les " -"états valides sont : %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Impossible d'ajouter le volume %(volume_id)s au groupe de cohérence " -"%(group_id)s car le type de volume %(volume_type)s n'est pas pris en charge " -"par le groupe." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Impossible de rattacher le volume %s qui est déjà rattaché ; multiattach est " -"désactivé via l'option de configuration 'netapp_enable_multiattach'." - -msgid "Cannot change VF context in the session." -msgstr "Impossible de modifier le contexte VF dans la session." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"Impossible de modifier le contexte VF, VF indiqué non disponible dans la " -"liste VF gérable %(vf_list)s." - -msgid "Cannot connect to ECOM server." -msgstr "Connexion au serveur ECOM impossible." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Impossible de créer le groupe de cohérence %(group)s car l'instantané " -"%(snap)s n'est pas dans un état valide. Les états valides sont : %(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Impossible de créer le groupe de cohérence %(group)s car le volume source " -"%(source_vol)s n'est pas à un état valide. Etats valides : %(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Impossible de créer le répertoire %s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "" -"Impossible de créer des spécifications de chiffrement. Type de volume en " -"service." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Impossible de créer une image du format de disque : %s. Seul le format de " -"disque vmdk est accepté." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Impossible de créer une vue de masquage : %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Impossible de créer plus de %(req)s volumes sur la matrice ESeries lorsque " -"'netapp_enable_multiattach' est défini à true." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"Impossible de créer ou de trouver un groupe de stockage dénommé " -"%(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "" -"Vous ne pouvez pas créer un volume avec la taille %s : cette taille n'est " -"pas un multiple de 8 Go." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"Impossible de créer volume_type avec le nom %(name)s et les spécifications " -"%(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "" -"Impossible de supprimer le numéro d'unité logique %s alors que des " -"instantanés existent." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Impossible de supprimer le volume cache %(cachevol_name)s. Il a été mis à " -"jour le %(updated_at)s et contient actuellement %(numclones)d instances de " -"volume." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Impossible de supprimer le volume cache : %(cachevol_name)s. Ila été mis à " -"jour le %(updated_at)s et contient actuellement %(numclones)s instances de " -"volume." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "" -"Impossible de supprimer des spécifications de chiffrement. Type de volume en " -"service." - -msgid "Cannot determine storage pool settings." -msgstr "Impossible de déterminer les paramètres du pool de stockage." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Impossible d'exécuter /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "Groupe CG %s introuvable." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"Service de configuration du contrôleur introuvable pour le système de " -"stockage %(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"Service de réplication introuvable pour créer le volume pour l'instantané %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"Impossible de trouver le service de réplication pour supprimer l'instantané " -"%s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Service Replication introuvable sur le système %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"Impossible de trouver le volume : %(id)s. Opération d'arrêt de la gestion " -"(unmanage). Sortie..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "Volume %(volumename)s introuvable. Etendez l'opération. Sortie..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "Nombre d'unités introuvable pour le volume %(volumeName)s." - -msgid "Cannot find migration task." -msgstr "Tâche de migration introuvable." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Impossible de trouver le service de réplication sur le système %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"Impossible de trouver l'instance de groupe de cohérence source. " -"consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "" -"Impossible d'obtenir mcs_id à partir de l'ID de canal : %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"Impossible d'obtenir les informations de pool ou de système de stockage " -"nécessaires." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Impossible d'obtenir ou de créer un groupe de stockage %(sgGroupName)s pour " -"le volume %(volumeName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "" -"Impossible d'obtenir ou de créer le groupe de demandeurs : %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Impossible d'obtenir le groupe de ports : %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Impossible d'obtenir le groupe de stockage %(sgGroupName)s de la vue de " -"masquage %(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Impossible d'obtenir la plage de tailles prises en charge pour %(sps)s Code " -"retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Impossible d'obtenir le groupe de stockage par défaut pour la règle FAST : " -"%(fastPolicyName)s" - -msgid "Cannot get the portgroup from the masking view." -msgstr "Impossible d'obtenir portgroup à partir de la vue de masquage." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "" -"Impossible de monter Scality SOFS, consultez le fichier syslog pour voir les " -"erreurs" - -msgid "Cannot ping DRBDmanage backend" -msgstr "" -"Impossible d'exécuter une commande ping sur le système dorsal DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Impossible de placer le volume %(id)s sur %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Vous ne pouvez pas spécifier à la fois 'cgsnapshot_id' et 'source_cgid' pour " -"créer un groupe de cohérence %(name)s depuis la source." - -msgid "Cannot register resource" -msgstr "Impossible d'enregistrer la ressource" - -msgid "Cannot register resources" -msgstr "Impossible d'enregistrer les ressources" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Impossible de supprimer le volume %(volume_id)s du groupe de cohérence " -"%(group_id)s car il ne se trouve pas dans le groupe." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Impossible de supprimer le volume %(volume_id)s du groupe de cohérence " -"%(group_id)s car le volume est dans un état non valide : %(status)s. Les " -"états valides sont : %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Confirmation impossible de HPE3PARDriver vers %s." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Confirmation impossible de matrice 3PAR en une autre." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Confirmation impossible vers un CPG dans un autre domaine." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "" -"Confirmation impossible vers un CPG d'instantané dans un autre domaine." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"Impossible d'exécuter la commande vgc-cluster. Vérifiez que le logiciel est " -"installé et que les autorisations sont définies correctement." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"Impossible de définir à la fois hitachi_serial_number et hitachi_unit_name." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "" -"Vous ne pouvez pas spécifier à la fois un nom et un ID de domaine de " -"protection." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"Vous ne pouvez pas spécifier à la fois le nom et l'ID du pool de stockage." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Impossible de mettre à jour le groupe de cohérence %(group_id)s car aucun " -"nom, description, add_volumes ou remove_volumes valide n'ont été fournis." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "" -"Impossible de mettre à jour des spécifications de chiffrement. Type de " -"volume en service." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "Impossible de mettre à jour le type de volume %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Impossible de vérifier l'existence de l'objet %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "L'instantané de groupe de cohérence %(cgsnapshot_id)s est introuvable." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost est vide. Aucun groupe de cohérence ne sera créé." - -msgid "Change hostlun id error." -msgstr "Erreur lors du changement d'ID hostlun." - -msgid "Change lun priority error." -msgstr "Erreur lors de la modification de priorité de numéro d'unité logique." - -msgid "Change lun smarttier policy error." -msgstr "" -"Erreur lors de la modification de stratégie smarttier de numéro d'unité " -"logique." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"La modification définira une utilisation inférieure à 0 pour les ressources " -"suivantes : %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Vérifiez les autorisations d'accès accordées pour le partage ZFS affecté à " -"ce pilote." - -msgid "Check hostgroup associate error." -msgstr "Erreur lors de la vérification de l'associé hostgroup." - -msgid "Check initiator added to array error." -msgstr "" -"Erreur lors de la vérification de l'ajout de l'initiateur à la matrice." - -msgid "Check initiator associated to host error." -msgstr "" -"Erreur lors de la vérification de l'association de l'initiateur à l'hôte." - -msgid "Check lungroup associate error." -msgstr "Erreur lors de la vérification de l'associé lungroup." - -msgid "Check portgroup associate error." -msgstr "Erreur lors de la vérification de l'associé portgroup." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Vérifiez l'état du service HTTP. Assurez-vous également que le numéro de " -"port HTTPS est identique à celui indiqué dans cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"La taille de bloc n'est pas un multiple de la taille de bloc pour la " -"création du hachage." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "" -"Erreur d'interface CLI de segmentation Cisco Fibre Channel : %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "La fonction Clone n'est pas autorisée sur %(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"Type de clone '%(clone_type)s' non valide, les valeurs admises sont : " -"'%(full_clone)s' et '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"Le cluster n'est pas formaté. Vous devriez probablement effectuer \"dog " -"cluster format\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Echec du pilote Coho Data Cinder : %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "Le port Coho rpc n'est pas configuré" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "" -"La commande %(cmd)s a été bloquée dans l'interface CLI et a été annulée" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "" -"CommandLineHelper._wait_for_condition : dépassement du délai d'attente %s." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"L'optimiseur de compression n'est pas installé. Impossible de créer le " -"volume compressé." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Cluster de calcul %(cluster)s introuvable." - -msgid "Condition has no field." -msgstr "La condition n'a aucun champ correspondant." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"Configuration 'max_over_subscription_ratio' non valide. Doit être > 0 : %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Erreur de configuration : dell_sc_ssn non défini." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Le fichier de configuration %(configurationFile)s n'existe pas." - -msgid "Configuration is not found." -msgstr "Configuration introuvable." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "La valeur de configuration %s n'est pas définie." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Conflit entre les spécifications QoS dans le type de volume %s : lorsque la " -"spécification QoS est associée au type de volume, un \"netapp:" -"qos_policy_group\" suranné n'est pas autorisédans les spécifications " -"supplémentaires du type de volume." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Connexion à glance échouée : %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Connexion à swift échouée : %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Le connecteur ne fournit pas : %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Le connecteur n'a pas les informations requises : %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "" -"Le groupe de cohérence est vide. Aucun instantané de groupe de cohérence ne " -"sera créé." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "Groupe de cohérence %(consistencygroup_id)s introuvable." - -msgid "Container" -msgstr "Conteneur" - -msgid "Container size smaller than required file size." -msgstr "Taille du conteneur inférieure à la taille de fichier requise." - -msgid "Content type not supported." -msgstr "Type de contenu non pris en charge." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Service de configuration de contrôleur introuvable sur %(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "L'adresse IP du contrôleur '%(host)s' n'a pas pu être résolue : %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Converti au format %(f1)s, mais le format est maintenant %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "" -"Converti au format %(vol_format)s, mais le format est maintenant " -"%(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Converti au format brut, mais le format est maintenant %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Converti au format brut, mais le format est maintenant %s." - -msgid "Coordinator uninitialized." -msgstr "Coordinateur non initialisé." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Echec de la tâche de copie de volume : convert_to_base_volume: id=%(id)s, " -"status=%(status)s." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"La tâche de copie du volume a échoué : create_cloned_volume id=%(id)s, " -"status=%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Copie de métadonnées depuis %(src_type)s %(src_id)s vers %(vol_id)s" - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Impossible de déterminer le noeud final Keystone à utiliser. Cela peut être " -"défini dans le catalogue de service ou à l'aide de l'option de configuration " -"cinder.conf 'backup_swift_auth_url'." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Impossible de déterminer le noeud final Swift à utiliser. Cela peut être " -"défini dans le catalogue de service ou à l'aide de l'option de configuration " -"cinder.conf 'backup_swift_url'." - -msgid "Could not find DISCO wsdl file." -msgstr "Fichier wsdl DISCO introuvable." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "ID de cluster GPFS introuvable : %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "Périphérique du système de fichiers GPFS introuvable : %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Configuration introuvable dans %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Exportation iSCSI trouvée pour le volume %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "Cible iSCSI introuvable pour le volume : %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"Impossible de trouver une clé dans la sortie de la commande %(cmd)s: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Impossible de trouver les paramètres %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "Cible %s introuvable" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "" -"Impossible de trouver le volume parent de l'instantané '%s' sur la matrice." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "" -"Impossible de trouver l'instantané unique %(snap)s sur le volume %(vol)s." - -msgid "Could not get system name." -msgstr "Impossible d'obtenir le nom du système." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "Echec du chargement de l'app de collage '%(name)s' depuis %(path)s" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"Impossible de lire les informations pour l'instantané %(name)s. Code : " -"%(code)s. Raison : %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "" -"Impossible de restaurer le fichier de configuration %(file_path)s : %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "Impossible d'enregistrer la configuration dans %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "Impossible de démarrer l'instantané du groupe de cohérence %s." - -#, python-format -msgid "Counter %s not found" -msgstr "Compteur %s non trouvé" - -msgid "Create QoS policy error." -msgstr "Erreur lors de la création de la stratégie QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Création de la sauvegarde interrompue, état de la sauvegarde attendu " -"%(expected_status)s, mais état %(actual_status)s obtenu." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Création de la sauvegarde interrompue, état du volume attendu " -"%(expected_status)s, mais état %(actual_status)s obtenu." - -msgid "Create export for volume failed." -msgstr "La création d'une exportation pour le volume a échoué." - -msgid "Create hostgroup error." -msgstr "Erreur lors de la création de hostgroup." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Erreur lors de la création d'hypermetro. %s." - -msgid "Create lun error." -msgstr "Erreur de création du numéro d'unité logique (lun)." - -msgid "Create lun migration error." -msgstr "Erreur lors de la création de migration de numéro d'unité logique." - -msgid "Create luncopy error." -msgstr "Erreur lors de la création luncopy." - -msgid "Create lungroup error." -msgstr "Erreur lors de la création de lungroup." - -msgid "Create manager volume flow failed." -msgstr "Echec de la création du flux de volume du gestionnaire. " - -msgid "Create port group error." -msgstr "Erreur lors de la création du groupe de ports." - -msgid "Create replication error." -msgstr "Erreur lors de la création de la réplication." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "" -"La création d'une paire de systèmes pour la réplication a échoué. Erreur : " -"%s." - -msgid "Create snapshot error." -msgstr "Erreur lors de la création de l'instantané." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Erreur lors de la création du volume. Motif : %s." - -msgid "Create volume failed." -msgstr "Echec de la création du volume." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"La création d'un groupe de cohérence à partir d'une source n'est pas prise " -"en charge actuellement." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Echec de création et d'activation de l'ensemble de zones : (Ensemble de " -"zones=%(cfg_name)s erreur=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Echec de création et d'activation de l'ensemble de zones : (Ensemble de " -"zones=%(zoneset)s erreur=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "" -"Création d'utilisations pour la période comprise entre %(begin_period)s et " -"%(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "L'hôte actuel ne fait pas partie du domaine HGST." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Hôte actuel non valide pour le volume %(id)s de type %(type)s, migration non " -"autorisée" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"L'hôte actuellement mappé pour le volume %(vol)s est dans un groupe d'hôtes " -"non pris en charge avec %(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"Erreur du pilote DRBDmanage : la clé attendue \"%s\" ne figure pas dans la " -"réponse, version DRBDmanage incorrecte ?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Erreur de configuration du pilote DRBDmanage : certaines bibliothèques " -"requises (dbus, drbdmanage.*) sont introuvables." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage attendait une ressource (\"%(res)s\"), mais a reçu %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"Dépassement du délai d'attente de DRBDmanage lors de la création du volume " -"après la restauration d'instantané ; ressource \"%(res)s\", volume \"%(vol)s" -"\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"Dépassement du délai d'attente de DRBDmanage lors de la création de " -"l'instantané ; ressource \"%(res)s\", instantané \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"Dépassement du délai d'attente de DRBDmanage lors de la création du volume ; " -"ressource \"%(res)s\", volume \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"Dépassement du délai d'attente de DRBDmanage pour obtenir la taille du " -"volume ; ID volume \"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "La version d'API Data ONTAP n'a pas pu être déterminée." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"Data ONTAP opérant sous 7-Mode ne prend pas en charge les groupes de " -"stratégies QoS." - -msgid "Database schema downgrade is not allowed." -msgstr "Rétrograder le schéma de base de données n'est pas autorisé." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "Dataset %s n'est pas partagé dans Nexenta Store Appliance" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Groupe Dataset %s introuvable sur Nexenta SA" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup est un type de mise à disposition valide, mais nécessite que WSAPI " -"version '%(dedup_version)s', version '%(version)s' soit installé." - -msgid "Dedup luns cannot be extended" -msgstr "Les numéros d'unité logique dédoublonnés ne peuvent pas être étendus" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Le quota par défaut de la ressource %(res)s est défini par l'indicateur de " -"quota par défaut : quota_%(res)s. Il est désormais obsolète. Utilisez la " -"classe de quota par défaut pour le quota par défaut." - -msgid "Default volume type can not be found." -msgstr "Le type de volume par défaut est introuvable." - -msgid "Delete LUNcopy error." -msgstr "Erreur lors de la suppression de LUNcopy." - -msgid "Delete QoS policy error." -msgstr "Erreur lors de la suppression de la stratégie QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "" -"Erreur lors de la suppression du numéro d'unité logique associé de lungroup." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Suppression de la sauvegarde interrompue, le service de sauvegarde " -"actuellement configuré [%(configured_service)s] ne correspond pas au service " -"de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "La suppression du groupe de cohérence a échoué." - -msgid "Delete hostgroup error." -msgstr "Erreur lors de la suppression de hostgroup." - -msgid "Delete hostgroup from mapping view error." -msgstr "Erreur lors de la suppression de hostgroup de la vue de mappage." - -msgid "Delete lun error." -msgstr "Erreur lors de la suppression du numéro d'unité logique." - -msgid "Delete lun migration error." -msgstr "Erreur lors de la suppression de migration de numéro d'unité logique." - -msgid "Delete lungroup error." -msgstr "Erreur lors de la suppression de lungroup." - -msgid "Delete lungroup from mapping view error." -msgstr "Erreur lors de la suppression de lungroup de la vue de mappage." - -msgid "Delete mapping view error." -msgstr "Erreur lors de la suppression de la vue de mappage." - -msgid "Delete port group error." -msgstr "Erreur lors de la suppression du groupe de ports." - -msgid "Delete portgroup from mapping view error." -msgstr "Erreur lors de la suppression de portgroup de la vue de mappage." - -msgid "Delete snapshot error." -msgstr "Erreur lors de la suppression de l'instantané." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "" -"Suppression de l'instantané du volume non pris en charge à l'état : %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup interrompu, état de la sauvegarde attendu %(expected_status)s, " -"mais état %(actual_status)s obtenu." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Suppression du volume de la base de données et saut de RPC." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Echec de la suppression des zones : (commande=%(cmd)s erreur=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"L'API Dell 2.1 ou ultérieure est requise pour prise en charge de groupe de " -"cohérence" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"Erreur de configuration du pilote Cinder de Dell, la réplication n'est pas " -"prise en charge avec la connexion directe." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Erreur de configuration du pilote Cinder de Dell, replication_device %s " -"introuvable" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource est une fonctionnalité admin uniquement" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "La destination a pour migration_status %(stat)s, %(exp)s est attendu." - -msgid "Destination volume not mid-migration." -msgstr "Le volume de destination n'est pas en cours de migration." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"La déconnexion du volume a échoué : plusieurs volumes connectés ont été " -"indiqués, mais sans attachment_id attachment_id." - -msgid "Detach volume from instance and then try again." -msgstr "Déconnectez le volume de l'instance puis réessayez." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Plusieurs volumes portant le nom %(vol_name)s détectés" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "Colonne attendue introuvable dans %(fun)s : %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "La clé attendue, %(key)s, est introuvable dans %(fun)s : %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "" -"La raison de désactivation contient des caractères invalides ou est trop " -"longue" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "Le domaine nommé %s est introuvable." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Rétrogradation de cluster GPFS détectée. Fonction de clonage GPFS non " -"activée au niveau du démon de cluster %(cur)s - doit être au moins au niveau " -"%(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "" -"L'initialisation de la connexion par le pilote a échoué (erreur : %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Le pilote doit implémenter initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Le pilote a correctement décodé les données de la sauvegarde importée, mais " -"des zones sont manquantes (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"La version %(current_version)s de l'API de proxy lE-series ne prend pas en " -"charge la panoplie complète de spécifications SSC supplémentaires. La " -"version du proxy doit être au moins %(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Exception CLI du pilote EMC VNX Cinder : %(cmd)s (code retour : %(rc)s) " -"(sortie : %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"Exception SPUnavailableException du pilote EMC VNX Cinder : %(cmd)s (code " -"retour : %(rc)s) (sortie : %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword doivent avoir des " -"valeurs valides." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Soit 'cgsnapshot_id', soit 'source_cgid' doit être soumis pour créer le " -"groupe de cohérence %(name)s depuis la source." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO : %(slo)s ou la charge de travail %(workload)s n'est pas valide. " -"Examinez l'énoncé de l'erreur pour voir si les valeurs sont valides." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Soit hitachi_serial_number, soit hitachi_unit_name est requis." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "" -"Service de composition d'élément introuvable sur %(storageSystemName)s." - -msgid "Enables QoS." -msgstr "Active QoS." - -msgid "Enables compression." -msgstr "Active la compression. " - -msgid "Enables replication." -msgstr "Active la réplication. " - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Assurez-vous que configfs est monté sur /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur d'ajout de l'initiateur %(initiator)s à l'élément groupInitiatorGroup " -"%(initiatorgroup)s. Code retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur d'ajout du groupe cible : %(targetgroup)s avec IQN : %(iqn)s Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Erreur lors du rattachement du volume %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur de clonage de l'instantané : %(snapshot)s sur le volume : %(lun)s du " -"pool : %(pool)s Projet : %(project)s Projet clone : %(clone_proj)s Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erreur de création du volume cloné : %(cloneName)s Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de création d'un volume cloné : Volume : %(cloneName)s Volume :" -"%(sourceName)s. Code retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de création du groupe : %(groupName)s. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erreur de création de la vue de masquage : %(groupName)s. Code retour : " -"%(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de création du volume : %(volumeName)s. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de création du volume : %(volumename)s. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur CreateGroupReplica : source : %(source)s cible : %(target)s. Code " -"retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur de création de l'initiateur %(initiator)s sur l'alias %(alias)s. Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur de création du projet : %(project)s sur le pool : %(pool)s Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de la création de la propriété : %(property)s. Type : %(type)s. " -"Description : %(description)s. Code retour : %(ret.status)d Message : " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur lors de la création du partage %(name)s. Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur de création de l'instantané : %(snapshot)s sur le volume %(lun)s vers " -"le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de la création de l'instantané %(snapshot)s sur le partage " -"%(share)s vers le pool %(pool)s Projet : %(project)s Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Erreur de création de la cible : %(alias)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur de création du groupe cible : %(targetgroup)s avec IQN : %(iqn)s Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Erreur de création du volume : %(lun)s Taille : %(size)s Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de création d'un volume composite. Code retour : %(rc)lu. Erreur : " -"%(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur lors de la création de l'action de réplication sur : pool : %(pool)s " -"Projet : %(proj)s volume : %(vol)s pour la cible : %(tgt)s et le pool : " -"%(tgt_pool)s Code retour : %(ret.status)d Message : %(ret.data)s ." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Erreur de création de volume non lié sur une opération Extend." - -msgid "Error Creating unbound volume." -msgstr "Erreur lors de la création d'un volume non lié." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de suppression du volume : %(volumeName)s. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Erreur lors de la suppression du groupe : %(storageGroupName)s. Code " -"retour : %(rc)lu. Erreur : %(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Erreur lors de la suppression du groupe d'initiateurs : " -"%(initiatorGroupName)s. Code retour : %(rc)lu. Erreur : %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de la suppression de l'instantané %(snapshot)s sur le partage " -"%(share)s vers le pool %(pool)s Projet : %(project)s Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur de suppression de l'instantané : %(snapshot)s sur le volume %(lun)s " -"vers le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Erreur lors de la suppression du volume %(lun)s du pool : %(pool)s, projet : " -"%(project)s. Code retour : %(ret.status)d, Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de la suppression du projet %(project)s sur le pool : %(pool)s. " -"Code retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Erreur lors de la suppression de l'action de réplication : %(id)s. Code " -"retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur d'extension de volume : %(volumeName)s. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur d'obtention des initiateurs. InitiatorGroup : %(initiatorgroup)s. " -"Code retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Erreur lors de l'obtention des statistiques du pool : pool : %(pool)s, code " -"retour : %(status)d, message : %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de l'obtention des statistiques du projet : Pool : %(pool)s " -"Projet: %(project)s Code retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur d'obtention du partage %(share)s sur le pool %(pool)s. Projet: " -"%(project)s Code retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur d'obtention de l'instantané %(snapshot)s sur le volume %(lun)s vers " -"le pool %(pool)s Projet : %(project)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Erreur d'obtention de la cible : %(alias)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur d'obtention du volume : %(lun)s sur le pool %(pool)s Projet : " -"%(project)s Code retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Erreur de migration du volume d'un pool vers un autre. Code retour : " -"%(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erreur de modification de la vue de masquage : %(groupName)s. Code retour : " -"%(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Erreur de propriété du pool : le pool %(pool)s n'est pas détenu par %(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur de définition des propriétés : %(props)s sur le volume : %(lun)s du " -"pool : %(pool)s Projet : %(project)s Code retour : %(ret.status)d Message : " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de fin de la session de migration. Code retour : %(rc)lu. Erreur : " -"%(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur de vérification du demandeur : %(iqn)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur de vérification du pool : %(pool)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur de vérification du projet : %(project)s sur le pool : %(pool)s Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur lors de la vérification du service %(service)s. Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur de vérification de la cible : %(alias)s Code retour : %(ret.status)d " -"Message : %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erreur lors de la vérification du partage %(share)s sur le projet " -"%(project)s et le pool %(pool)s. Code retour : %(ret.status)d Message : " -"%(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Erreur d'ajout du volume : %(volumeName)s avec le chemin d'instance : " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Erreur d'ajout du demandeur au groupe : %(groupName)s. Code retour : " -"%(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "Erreur d'ajout du volume vers le volume composite. Erreur : %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "Erreur d'ajout du volume %(volumename)s au volume de base cible." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Erreur d'association du groupe de stockage : %(storageGroupName)s. A la " -"règle FAST : %(fastPolicyName)s avec la description d'erreur : %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Erreur de rupture de la relation clonée : Nom de synchronisation : " -"%(syncName)s Code retour : %(rc)lu. Erreur : %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Erreur lors de la connexion au cluster ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Erreur lors de la connexion via SSH : %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Erreur lors de la création du volume : %s." - -msgid "Error deleting replay profile." -msgstr "Erreur lors de la suppression du profil de relecture." - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Erreur lors de la suppression du volume %(ssn)s : %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Erreur lors de la suppression du volume %(vol)s : %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Erreur lors de l'analyse syntaxique de l'évaluateur : %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'édition du partage %(share)s sur le pool %(pool)s. Code " -"retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Erreur lors de l'activation d'iSER pour NetworkPortal : assurez-vous que " -"RDMA est pris en charge sur votre port iSCSI %(port)d à l'adresse IP %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "" -"Erreur survenue durant le nettoyage d'un rattachement ayant échoué : %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "" -"Erreur lors de l'exécution de l'API CloudByte [%(cmd)s]. Erreur : %(err)s." - -msgid "Error executing EQL command" -msgstr "Erreur d'exécution de la commande EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Erreur d'exécution de la commande via SSH : %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Erreur lors de l'extension du volume %(vol)s : %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Erreur lors de l'extension du volume : %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Erreur lors de la recherche de %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Erreur lors de la recherche de %s." - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur lors de l'obtention de ReplicationSettingData. Code retour : %(rc)lu. " -"Erreur : %(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'obtention des détails de version du dispositif. Code " -"retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "" -"Erreur d'obtention de l'ID de domaine à partir du nom %(name)s : %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "" -"Erreur d'obtention de l'ID de domaine à partir du nom %(name)s : %(id)s." - -msgid "Error getting initiator groups." -msgstr "Erreur d'obtention des groupes d'initiateurs." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Erreur d'obtention de l'ID de pool à partir du nom %(pool)s : %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "" -"Erreur d'obtention de l'ID de pool à partir du nom %(pool_name)s : " -"%(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'obtention de l'action de réplication : %(id)s. Code " -"retour : %(ret.status)d Message : %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'obtention des détails de la source de réplication. Code " -"retour : %(ret.status)d Message : %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'obtention des détails de la cible de réplication. Code " -"retour : %(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur lors de l'obtention de la version : service : %(svc)s. Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Erreur dans l'opération [%(operation)s] pour le volume [%(cb_volume)s] dans " -"le stockage CloudByte : [%(cb_error)s], code d'erreur : [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Erreur dans la réponse de l'API SolidFire : data = %(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "" -"Erreur lors de l'opération space-create pour %(space)s avec une taille de " -"%(size)d Go" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"Erreur dans space-extend pour le volume %(space)s avec %(size)d Go " -"additionnels" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Erreur lors de la gestion du volume : %s." - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erreur lors de la modification de synchronisation de la réplique : %(sv)s " -"opération : %(operation)s. Code retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erreur lors de la modification du service %(service)s. Code retour : " -"%(ret.status)d Message : %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur lors du déplacement du volume %(vol)s du projet source %(src)s vers " -"le projet cible %(tgt)s. Code retour : %(ret.status)d Message : " -"%(ret.data)s." - -msgid "Error not a KeyError." -msgstr "Erreur autre qu'une erreur KeyError." - -msgid "Error not a TypeError." -msgstr "Erreur autre qu'une erreur TypeError." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "" -"Une erreur est survenue lors de la création de l'instantané de groupe de " -"cohérence %s." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "" -"Une erreur est survenue lors de la suppression de l'instantané de groupe de " -"cohérence %s." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "" -"Une erreur est survenue lors de la mise à jour du groupe de cohérence %s." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "" -"Erreur lors de l'attribution d'un nouveau nom au volume %(vol)s : %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Réponse d'erreur : %s" - -msgid "Error retrieving volume size" -msgstr "Erreur lors de l'obtention de la taille du volume" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur lors de l'envoi de la mise à jour de la réplication pour l'ID " -"d'action : %(id)s. Code retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Erreur lors de l'envoi de la mise à jour de la réplication. Erreur " -"renvoyée : %(err)s. Action : %(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur lors de la définition de l'héritage de réplication à %(set)s pour le " -"volume : %(vol)s Projet %(project)s. Code retour : %(ret.status)d Message : " -"%(ret.data)s." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erreur lors de la scission du package %(package)s de la source : %(src)s. " -"Code retour : %(ret.status)d Message : %(ret.data)s ." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "" -"Erreur de suppression de la liaison du volume %(vol)s du pool. %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Erreur lors de l'authentification avec le commutateur : %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Erreur lors de la modification du contexte VF %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Erreur lors de la vérification de la version de microprogramme %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Erreur de contrôle de l'état de transaction : %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "" -"Erreur lors de la détermination de la disponibilité de VF pour la gestion : " -"%s." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Erreur lors de la connexion du commutateur %(switch_id)s avec le protocole " -"%(protocol)s. Erreur : %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Erreur lors de la création du jeton d'authentification : %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"Erreur lors de la création de l'instantané : [status] %(stat)s - [result] " -"%(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Erreur lors de la création du volume : [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Erreur lors de la suppression de l'instantané : [status] %(stat)s - [result] " -"%(res)s" - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Erreur lors de la suppression du volume : [status] %(stat)s - [result] " -"%(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Erreur lors de l'extension du volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "" -"Erreur lors de l'obtention des détails de %(op)s, code renvoyé : %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Erreur d'obtention de données via SSH : (commande=%(cmd)s erreur=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Erreur lors de l'obtention des informations disco [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Erreur lors de l'obtention de la valeur nvp : %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Erreur lors de l'obtention des informations de session %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Erreur lors de l'analyse syntaxique des données : %s." - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"Erreur lors de l'interrogation de la page %(url)s sur le commutateur, " -"raison : %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Erreur lors de la suppression de zones et cfgs dans la chaîne de zone : " -"%(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Erreur lors de la demande de l'API %(service)s." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Erreur d'exécution de l'interface CLI de segmentation : (commande=%(cmd)s " -"erreur=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Erreur lors de la mise à jour des nouvelles zones et cfgs dans la chaîne de " -"zone. Erreur : %(description)s." - -msgid "Error writing field to database" -msgstr "Erreur lors de l'écriture du champ dans la base de données" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Erreur [%(stat)s - %(res)s] lors de l'obtention de l'ID du volume." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Erreur [%(stat)s - %(res)s] lors de la restauration de l'instantané " -"[%(snap_id)s] dans le volume [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"Erreur [status] %(stat)s - [result] %(res)s] lors de l'obtention de l'ID du " -"volume." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Tentatives de planification max %(max_attempts)d pour le volume %(volume_id)s" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Nombre limite d'instantanés par volume dépassé" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Exception lors de l'ajout des métadonnées de volume au volume cible " -"%(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Exception lors de la création d'une réplique de l'élément. Nom de clone : " -"%(cloneName)s Nom source : %(sourceName)s Spécifications supplémentaires : " -"%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Exception dans _select_ds_for_volume : %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "Exception lors de la formation de la chaîne de zone : %s." - -#, python-format -msgid "Exception: %s" -msgstr "Exception : %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "UUID attendu mais %(uuid)s reçu." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Un seul noeud précis nommé \"%s\" est attendu" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Un entier est attendu pour node_count, svcinfo lsiogrp a renvoyé : %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"Aucune sortie n'était attendue de la commande CLI %(cmd)s, %(out)s a été " -"renvoyé." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Un disque virtuel unique était attendu de lsvdisk lors du filtrage par " -"vdisk_UID. %(count)s ont été renvoyés." - -#, python-format -msgid "Expected volume size was %d" -msgstr "La taille du volume attendue était %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Exportation de la sauvegarde interrompue, état de la sauvegarde attendu " -"%(expected_status)s, mais état %(actual_status)s obtenu." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Exportation de l'enregistrement interrompue, le service de sauvegarde " -"actuellement configuré [%(configured_service)s] ne correspond pas au service " -"de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Erreur lors de l'extension du volume." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"L'extension de volume n'est prise en charge pour ce pilote que si aucun " -"instantané n'est défini." - -msgid "Extend volume not implemented" -msgstr "Extension du volume non implémentée" - -msgid "FAST is not supported on this array." -msgstr "FAST n'est pas pris en charge sur cette matrice." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "" -"FC est le protocole mais les noms WWPN ne sont pas fournis par OpenStack." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Echec de la libération de %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "Echec de la création du volume cache %(volume)s. Erreur : %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Echec de l'ajout de la connexion pour fabric=%(fabric)s : Erreur : %(err)s" - -msgid "Failed cgsnapshot" -msgstr "Echec de l'instantané de groupe de cohérence" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "Echec de la création de l'instantané pour le groupe : %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"Echec de la création d'un instantané pour le volume %(volname)s : " -"%(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "" -"Echec de l'obtention de la zone active définie à partir de l'ensemble de " -"noeuds (fabric) %s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Echec d'obtention des informations sur le pool %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Echec du retrait de la connexion pour fabric=%(fabric)s : Erreur : %(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Echec de l'extension du volume %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Echec de connexion à 3PAR (%(url)s) en raison de %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Impossible d'accéder à la configuration de segmentation active." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Impossible d'accéder au statut de l'ensemble de zones : %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Echec de l'acquisition d'un verrouillage des ressources. (série : " -"%(serial)s, inst : %(inst)s, ret : %(ret)s, stderr : %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Echec de l'ajout de l'unité logique." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"L'ajout du volume %(volumeName)s au groupe de cohérence %(cgName)s n'a pas " -"abouti. Retour : %(rc)lu. Erreur : %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "L'ajout de la configuration de segmentation a échoué." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Echec de l'affectation du nom qualifié iSCSI de l'initiateur iSCSI. (port : " -"%(port)s, raison : %(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"Echec d'association de qos_specs : %(specs_id)s avec spécif. %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Echec de connexion de la cible iSCSI pour le volume %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Echec de sauvegarde des métadonnées de volume de sauvegarde - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Echec de sauvegarde des métadonnées de volume de sauvegarde - objet de " -"sauvegarde de métadonnées 'backup.%s.meta' existe déjà" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Impossible de cloner un volume depuis l'instantané %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Echec de la connexion à la matrice %(vendor_name)s %(host)s : %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Echec de connexion à l'API REST Dell " - -msgid "Failed to connect to array" -msgstr "Impossible de se connecter à la matrice" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"Echec de la connexion au démon sheep. Adresse : %(addr)s, port : %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Échec de la copie de l'image vers le volume : %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Échec de la copie des métadonnées pour le volume : %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Echec de la copie du volume, l'unité de destination est indisponible." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Echec de la copie du volume, l'unité source est indisponible." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"Echec de la création du groupe de cohérence %(cgName)s depuis l'instantané " -"%(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "Echec de la création du groupe demandeur, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Impossible de créer le groupe de volumes : %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Echec de la création d'un fichier. (fichier : %(file)s, ret : %(ret)s, " -"stderr : %(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "Impossible de créer un instantané temporaire pour le volume %s." - -msgid "Failed to create api volume flow." -msgstr "Echec de la création du flux de volume d'API." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "" -"Echec de la création de l'instantané du groupe de cohérence %(id)s. Cause : " -"%(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "" -"Echec de la création du groupe de cohérence %(id)s. Cause : %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Echec de la création du groupe de cohérence %(id)s : %(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Echec de la création du groupe de cohérence %s car le groupe de cohérence " -"VNX ne peut pas accepter de numéros d'unités logiques compressés en tant que " -"membres." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Echec de la création du groupe de cohérence : %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "" -"Echec de la création du groupe de cohérence : %(cgid)s. Erreur : %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Echec de la création du groupe de cohérence : %(consistencyGroupName)s Code " -"retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Echec de la création des ID du matériel sur %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "" -"Echec de la création de l'hôte : %(name)s. Vérifiez s'il existe sur la " -"matrice." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Echec de la création de hostgroup : %(name)s. Vérifiez s'il existe sur la " -"matrice." - -msgid "Failed to create iqn." -msgstr "Echec de la création de l'iqn." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Echec de la création de la cible iSCSI pour le volume %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Echec de la création du flux de gestion existant." - -msgid "Failed to create manage_existing flow." -msgstr "Echec de la création du flux manage_existing." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "" -"Echec de la création de la mappe sur mcs, aucun canal ne peut être mappé." - -msgid "Failed to create map." -msgstr "Echec de création de la mappe." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Échec de la création de métadonnées pour le volume : %(reason)s" - -msgid "Failed to create partition." -msgstr "Echec de la création de la partition." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "Echec de création de qos_specs : %(name)s avec spécif. %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Echec de la création de la réplique." - -msgid "Failed to create scheduler manager volume flow" -msgstr "" -"Echec de la création du flux de volume du gestionnaire de planificateur" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Echec de la création de l'instantané %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "" -"Echec de la création de l'instantané pour le groupe de cohérence : " -"%(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Echec de création d'un instantané pour le volume %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"Echec de la création de stratégie d'instantané sur le volume %(vol)s : " -"%(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"Echec de la création de la zone de ressource d'instantané sur le volume " -"%(vol)s: %(res)s." - -msgid "Failed to create snapshot." -msgstr "Echec de la création de l'instantané." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Echec de la création d'instantané. Les informations du volume CloudByte sont " -"introuvables pour le volume OpenStack [%s]." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "Impossible de créer le connecteur Southbound pour %s." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "La création du groupe de stockage %(storageGroupName)s a échoué." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Echec de la création du pool à la demande. Message d'erreur : %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Echec de création du volume %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Echec de la suppression de SI pour volume_id : %(volume_id)s car il a une " -"paire." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Echec de la suppression d'une unité logique. (unité logique : %(ldev)s, " -"raison : %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "" -"Echec de la suppression de l'instantané du groupe de cohérence %(id)s. " -"Cause : %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "" -"Echec de la suppression du groupe de cohérence %(id)s. Cause : %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Echec de la suppression du groupe de cohérence : %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Echec de la suppression du groupe de cohérence : %(consistencyGroupName)s " -"Code retour : %(rc)lu. Erreur : %(error)s." - -msgid "Failed to delete device." -msgstr "Echec de suppression de l'unité." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Echec de suppression de l'ensemble de fichiers pour le groupe de cohérence " -"%(cgname)s. Erreur : %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Echec de la suppression de l'iqn." - -msgid "Failed to delete map." -msgstr "Echec de la suppression de la mappe." - -msgid "Failed to delete partition." -msgstr "Echec de la suppression de la partition." - -msgid "Failed to delete replica." -msgstr "Echec de la suppression de la réplique." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Echec de la suppression de l'instantané %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "" -"Echec de la suppression de l'instantané pour le groupe de cohérence : " -"%(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"Echec de la suppression de l'instantané pour snapshot_id %s car il a une " -"paire." - -msgid "Failed to delete snapshot." -msgstr "Echec de la suppression de l'instantané." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Echec de suppression du volume %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Echec de la suppression du volume pour volume_id : %(volume_id)s car il a " -"une paire." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Echec de déconnexion de la cible iSCSI pour le volume %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Impossible de déterminer la configuration de l'API Blockbridge" - -msgid "Failed to disassociate qos specs." -msgstr "Echec de la dissociation des spécifications QoS." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"Echec de dissociation de qos_specs : %(specs_id)s avec spécif. %(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Impossible de réserver la zone de ressource d'instantané, impossible de " -"localiser le volume pour l'ID %s" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Connexion impossible à établir avec le cluster Coho " - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Echec de l'exécution de l'API CloudByte [%(cmd)s]. Statut HTTP : %(status)s. " -"Erreur : %(error)s." - -msgid "Failed to execute common command." -msgstr "Echec de l'exécution de la commande common." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Echec de l'exportation pour le volume : %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "Echec de l'extension du volume %(name)s, message d'erreur : %(msg)s." - -msgid "Failed to find QoSnode" -msgstr "QoSnode introuvable" - -msgid "Failed to find Storage Center" -msgstr "Echec de la recherche de Storage Center" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "Copie de disque virtuel introuvable dans le pool attendu." - -msgid "Failed to find account for volume." -msgstr "Impossible de trouver un compte pour le volume." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"Ensemble de fichiers introuvable pour le chemin %(path)s, sortie de la " -"commande : %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "Impossible de trouver l'instantané de groupe nommé : %s" - -#, python-format -msgid "Failed to find host %s." -msgstr "L'hôte %s est introuvable." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "" -"Impossible de trouver le groupe d'initiateurs iSCSI contenant %(initiator)s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Echec de l'obtention des détails CloudByte du compte [%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Echec d'obtention des détails cible du numéro d'unité logique %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "" -"Echec de l'obtention des détails de la cible du numéro d'unité logique pour " -"le numéro d'unité logique %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Echec d'obtention de la liste cible du numéro d'unité logique %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Echec d'obtention de l'ID de partition pour le volume %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"Echec de l'obtention de l'ID d'instantané Raid à partir de l'instantané " -"%(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"Echec de l'obtention de l'ID d'instantané Raid à partir de l'instantané " -"%(snapshot_id)s." - -msgid "Failed to get SplitMirror." -msgstr "Impossible d'obtenir SplitMirror." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Echec de l'obtention d'une ressource de stockage. Une nouvelle tentative " -"d'obtention de la ressource de stockage sera effectuée. (ressource : " -"%(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "" -"Echec de récupération de toutes les associations de spécifications QoS %s" - -msgid "Failed to get channel info." -msgstr "Echec d'obtention des informations du canal." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Echec d'obtention du niveau de code (%s)." - -msgid "Failed to get device info." -msgstr "Echec d'obtention des informations de l'unité." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "" -"Echec de l'obtention du nom de domaine car CPG (%s) n'existe pas dans la " -"matrice." - -msgid "Failed to get image snapshots." -msgstr "Echec d'obtention des instantanés de l'image." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"Echec de l'obtention de l'adresse IP sur le canal %(channel_id)s avec le " -"volume : %(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Echec d'obtention des informations iqn." - -msgid "Failed to get license info." -msgstr "Echec d'obtention des informations de licence." - -msgid "Failed to get lv info." -msgstr "Echec d'obtention des informations lv." - -msgid "Failed to get map info." -msgstr "Echec d'obtention des informations de la mappe." - -msgid "Failed to get migration task." -msgstr "Echec d'obtention de la tâche de migration." - -msgid "Failed to get model update from clone" -msgstr "Echec d'obtention de la mise à jour du modèle depuis le clone" - -msgid "Failed to get name server info." -msgstr "Echec d'obtention des infos du serveur de noms." - -msgid "Failed to get network info." -msgstr "Echec d'obtention des informations réseau." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "" -"Impossible d'obtenir l'ID du nouvel élément dans le nouveau pool : " -"%(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Echec d'obtention des informations de la partition." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Echec de l'obtention de l'ID de pool à partir du volume %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"Impossible d'obtenir les informations de copie à distance pour %(volume)s en " -"raison de l'erreur : %(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"Impossible d'obtenir des informations de copie à distance pour %(volume)s. " -"Exception : %(err)s." - -msgid "Failed to get replica info." -msgstr "Echec d'obtention des informations de la réplique." - -msgid "Failed to get show fcns database info." -msgstr "Echec de l'obtention des infos de la commande show fcns database." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Echec de l'obtention de la taille du volume %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Impossible d'obtenir un instantané pour le volume %s." - -msgid "Failed to get snapshot info." -msgstr "Echec d'obtention des informations de l'instantané." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Echec d'obtention de l'IQN cible pour le numéro d'unité logique %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "Echec d'obtention du numéro d'unité logique (LUN) de SplitMirror." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Echec d'obtention du portail cible pour le numéro d'unité logique %s" - -msgid "Failed to get targets" -msgstr "Echec de l'obtention des cibles" - -msgid "Failed to get wwn info." -msgstr "Echec d'obtention des informations wwn." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Echec de l'obtention, de la création ou de l'ajout du volume %(volumeName)s " -"à la vue de masquage %(maskingViewName)s. Message d'erreur reçu : " -"%(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Echec d'identification du back-end du volume." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Echec de liaison de l'ensemble de fichiers pour le partage %(cgname)s. " -"Erreur : %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Echec de connexion à la matrice %s (connexion non valide ?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Echec de connexion de l'utilisateur %s." - -msgid "Failed to login with all rest URLs." -msgstr "Echec de connexion à toutes les URL Rest." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Echec de la demande au noeud final du cluster Datera pour la raison " -"suivante : %s" - -msgid "Failed to manage api volume flow." -msgstr "Echec de la gestion du flux de volume d'API." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Echec de gestion du volume %(type)s %(name)s existant vu que la taille " -"signalée, %(size)s, n'était pas un nombre en virgule flottante." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Echec de gestion du volume %(name)s existant, en raison de l'erreur lors de " -"obtention de la taille de volume." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Echec de gestion du volume %(name)s existant, en raison de l'échec de " -"l'opération Renommer : Message d'erreur : %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Echec de gestion du volume %(name)s existant, en raison de la taille " -"rapportée %(size)s qui n'était pas un nombre à virgule flottante." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"Impossible de gérer le volume existant en raison d'un groupe d'E-S non " -"concordant. Le groupe d'E-S du volume à gérer est %(vdisk_iogrp)s. Le groupe " -"d'E-S du type sélectionné est %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"Impossible de gérer le volume existant car le pool du volume à gérer ne " -"correspond pas au pool du back-end. Pool du volume à gérer : %(vdisk_pool)s. " -"Pool du back-end : %(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"Impossible de gérer le volume existant car le volume à gérer est \"compress" -"\", alors que le type de volume sélectionné est \"not compress\"." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"Impossible de gérer le volume existant car le volume à gérer est \"not " -"compress\", alors que le type de volume sélectionné est \"compress\"." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"Impossible de gérer le volume existant car le volume à gérer ne se trouve " -"pas dans un groupe d'E-S valide." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"Impossible de gérer le volume existant car le volume à gérer est de type " -"\"thick\", alors que le type de volume sélectionné est \"thin\"." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"Impossible de gérer le volume existant car le volume à gérer est de type " -"\"thin\", alors que le type de volume sélectionné est \"thick\"." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Impossible de gérer le volume %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Echec du mappage d'une unité logique. (unité logique : %(ldev)s, numéro " -"d'unité logique : %(lun)s, port : %(port)s, ID : %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Echec de la migration du volume pour la première fois." - -msgid "Failed to migrate volume for the second time." -msgstr "Echec de la migration du volume pour la seconde fois." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "" -"Echec du déplacement du mappage de numéro d'unité logique. Code retour : %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "Impossible de déplacer le volume %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Echec de l'ouverture d'un fichier. (fichier : %(file)s, ret : %(ret)s, " -"stderr : %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Echec d'analyse de la sortie CLI :\n" -" commande : %(cmd)s\n" -" stdout : %(out)s\n" -" stderr : %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Echec de l'analyse syntaxique de l'option de configuration " -"'keystone_catalog_info', doit figurer sous la forme :" -":" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Echec de l'analyse syntaxique de l'option de configuration " -"'swift_catalog_info', doit figurer sous la forme :" -":" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Echec d'une opération zero-page reclamation. (unité logique : %(ldev)s, " -"raison : %(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "" -"Échec de la suppression de l'export pour le volume %(volume)s : %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "" -"Echec de la suppression de la cible iSCSI pour le volume %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"La suppression du volume %(volumeName)s du groupe de cohérence %(cgName)s " -"n'a pas abouti. Code retour : %(rc)lu. Erreur : %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "" -"Echec de la suppression du volume %(volumeName)s du groupe de stockage par " -"défaut." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"Echec de la suppression du volume %(volumeName)s dans le groupe de stockage " -"par défaut : %(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"Echec de la suppression de %(volumename)s du groupe de stockage par défaut " -"pour la règle FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Impossible de renommer le volume logique %(name)s. Message d'erreur : " -"%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Impossible de récupérer la configuration de segmentation active %s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"Echec de la définition de l'authentification CHAP pour l'IQN cible %(iqn)s. " -"Détails : %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Echec de définition QoS pour le volume existant %(name)s. Message d'erreur : " -"%(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "" -"Echec de la définition de l'attribut 'Utilisateur entrant' pour la cible " -"SCST." - -msgid "Failed to set partition." -msgstr "Echec de la définition de la partition." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Echec de définition des droits pour le groupe de cohérence %(cgname)s. " -"Erreur : %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Impossible de spécifier une unité logique pour le volume %(volume_id)s dont " -"le mappage doit être annulé." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Impossible de spécifier une unité logique à supprimer. (méthode : " -"%(method)s, ID : %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Echec de l'arrêt de la session de migration." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "La suppression de la liaison du volume %(volume)s a échoué" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Echec de suppression de liaison de l'ensemble de fichiers pour le groupe de " -"cohérence %(cgname)s. Erreur : %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Echec de l'annulation du mappage d'une unité logique. (unité logique : " -"%(ldev)s, raison : %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Echec de la mise à jour du groupe de cohérence : %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Échec de la mise à jour des métadonnées pour le volume : %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "" -"Impossible de mettre à jour ou supprimer la configuration de segmentation" - -msgid "Failed to update or delete zoning configuration." -msgstr "" -"Impossible de mettre à jour ou supprimer la configuration de segmentation." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Echec de mise à jour de qos_specs : %(specs_id)s avec spécif. %(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "" -"Echec de la mise à jour de l'utilisation de quota lors de la confirmation du " -"volume." - -msgid "Failed to update snapshot." -msgstr "Impossible de mettre à jour l'instantané." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Echec de la mise à jour des métadonnées de volume %(vol_id)s avec les " -"métadonnées fournies (%(src_type)s %(src_id)s)" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Echec de création du volume %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Echec d'obtention des infos LUN pour %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Echec du déplacement du nouveau numéro d'unité logique cloné vers %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Echec du transfert du numéro d'unité logique %s vers tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "" -"Echec de Fexvisor lors de l'ajout du volume %(id)s. Cause : %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor n'est pas parvenu à joindre le volume %(vol)s au groupe %(group)s. " -"Motif : %(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor n'est pas parvenu à retirer le volume %(vol)s du groupe %(group)s. " -"Motif : %(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "" -"Fexvisor n'est pas parvenu à supprimer le volume %(id)s. Motif : %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Echec de recherche de réseau SAN Fibre Channel : %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Echec d'opération de zone Fibre Channel : %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Echec de contrôle de la connexion Fibre Channel : %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Fichier %(file_path)s introuvable." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Le fichier %(path)s dispose d'un fichier de sauvegarde non valide : " -"%(bfile)s. L'opération est abandonnée." - -#, python-format -msgid "File already exists at %s." -msgstr "Le fichier existe déjà dans %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "Le fichier existe déjà dans : %s" - -msgid "Find host in hostgroup error." -msgstr "Erreur lors de la recherche de l'hôte dans hostgroup." - -msgid "Find host lun id error." -msgstr "Erreur lors de la recherche de l'ID de numéro d'unité logique hôte." - -msgid "Find lun group from mapping view error." -msgstr "" -"Erreur lors de la recherche de groupe de numéros d'unité logique dans la vue " -"de mappage." - -msgid "Find mapping view error." -msgstr "Erreur lors de la recherche de la vue de mappage." - -msgid "Find portgroup error." -msgstr "Erreur lors de la recherche de portgroup." - -msgid "Find portgroup from mapping view error." -msgstr "Erreur lors de la recherche de portgroup dans la vue de mappage." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Flash Cache Policy nécessite que WSAPI version '%(fcache_version)s', version " -"'%(version)s' soit installé." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de l'affectation de volume : %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de l'affectation de volume : %(id)s : %(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor n'a pas trouvé l'instantané %(id)s du volume dans l'instantané " -"%(vgsid)s du groupe %(vgid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de la création de volume : %(volumeid)s : %(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la suppression du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "" -"Echec de Flexvisor lors de l'ajout du volume %(id)s au groupe %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor n'a pas affecté le volume %(id)s car il ne peut demander l'état de " -"la requête par ID d'événement." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de l'affectation du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "" -"Echec de Flexvisor lors de l'affectation du volume %(volume)s IQN %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Echec de Flexvisor lors du clonage du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors du clonage du volume (échec de l'obtention de " -"l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la création de l'instantané pour le volume " -"%(id)s : %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de la création de l'instantané pour le volume (échec " -"de l'obtention de l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "" -"Echec de Flexvisor lors de la création du volume %(id)s dans le groupe " -"%(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la création du volume %(volume)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "" -"Echec de Flexvisor lors de la création du volume (obtention de l'événement) " -"%s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la création du volume à partir de l'instantané " -"%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de la création du volume à partir de l'instantané " -"%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de la création du volume à partir de l'instantané " -"(échec de l'obtention de l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la suppression de l'instantané %(id)s : " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de la suppression de l'instantané (échec de " -"l'obtention de l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la suppression du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de l'exportation du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de l'exportation du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de l'extension du volume (échec de l'obtention de " -"l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de l'obtention des informations de pool %(id)s : " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Echec de Flexvisor lors de l'obtention de l'ID instantané du volume %(id)s à " -"partir du groupe %(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "" -"Flexvisor n'est pas parvenu à supprimer le volume %(id)s du groupe %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de la génération du volume à partir de l'instantané " -"%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de la génération du volume à partir de l'instantané " -"(échec de l'obtention de l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "" -"Echec de Flexvisor lors de la libération du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Echec de Flexvisor lors de la libération du volume (obtention de " -"l'événement) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de la libération du volume %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor n'a pas trouvé les infos du volume source %(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Echec de Flexvisor lors de la libération de volume : %(id)s : %(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "" -"Le volume Flexvisor %(id)s n'a pas réussi à joindre le groupe %(vgid)s." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "Le dossier %s n'existe pas dans Nexenta Store Appliance" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS n'est pas en cours d'exécution - état : %s." - -msgid "Gateway VIP is not set" -msgstr "Gateway VIP n'a pas été défini" - -msgid "Get FC ports by port group error." -msgstr "Erreur lors de l'obtention de ports FC par groupe de ports." - -msgid "Get FC ports from array error." -msgstr "Erreur lors de l'obtention des ports FC à partir de la matrice." - -msgid "Get FC target wwpn error." -msgstr "Erreur lors de l'obtention du wwpn FC cible." - -msgid "Get HyperMetroPair error." -msgstr "Erreur lors de l'obtention d'HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Erreur lors de l'obtention de groupe LUN par vue." - -msgid "Get LUNcopy information error." -msgstr "Erreur lors de l'obtention des informations LUNcopy." - -msgid "Get QoS id by lun id error." -msgstr "" -"Erreur lors de l'obtention d'ID QoS à partir de l'ID de numéro d'unité " -"logique." - -msgid "Get QoS information error." -msgstr "Erreur lors de l'obtention des informations QoS." - -msgid "Get QoS policy error." -msgstr "Erreur lors de l'obtention de la stratégie QoS." - -msgid "Get SplitMirror error." -msgstr "Erreur lors de l'obtention de SplitMirror." - -msgid "Get active client failed." -msgstr "L'obtention du client actif a échoué." - -msgid "Get array info error." -msgstr "Erreur lors de l'obtention d'informations sur la matrice." - -msgid "Get cache by name error." -msgstr "Erreur lors de l'obtention du cache à partir du nom." - -msgid "Get connected free FC wwn error." -msgstr "Erreur lors de l'obtention du wwn FC libre connecté." - -msgid "Get engines error." -msgstr "Erreur lors de l'obtention de moteurs." - -msgid "Get host initiators info failed." -msgstr "Erreur lors de l'obtention des informations d'initiateurs hôte." - -msgid "Get hostgroup information error." -msgstr "Erreur lors de l'obtention des informations hostgroup." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Erreur lors de l'obtention des informations de port iSCSI. Vérifiez que " -"l'adresse IP cible a été configurée dans le fichier de configuration huawei." - -msgid "Get iSCSI port information error." -msgstr "Erreur lors de l'obtention des informations de port iSCSI." - -msgid "Get iSCSI target port error." -msgstr "Erreur lors de l'obtention du port iSCSI cible." - -msgid "Get lun id by name error." -msgstr "Erreur d'obtention de l'ID de numéro d'unité logique (lun id) par nom." - -msgid "Get lun migration task error." -msgstr "Erreur lors de la création de migration de numéro d'unité logique." - -msgid "Get lungroup id by lun id error." -msgstr "" -"Erreur lors de l'obtention d'ID lungroup à partir de l'ID de numéro d'unité " -"logique." - -msgid "Get lungroup information error." -msgstr "Erreur lors de l'obtention des informations lungroup." - -msgid "Get migration task error." -msgstr "Erreur lors de l'obtention de la tâche de migration." - -msgid "Get pair failed." -msgstr "L'obtention d'une paire a échoué." - -msgid "Get partition by name error." -msgstr "Erreur lors de l'obtention de la partition à partir du nom." - -msgid "Get partition by partition id error." -msgstr "" -"Erreur lors de l'obtention de la partition à partir de l'ID de partition." - -msgid "Get port group by view error." -msgstr "Erreur lors de l'obtention de groupe de ports par vue." - -msgid "Get port group error." -msgstr "Erreur lors de l'obtention du groupe de ports." - -msgid "Get port groups by port error." -msgstr "Erreur lors de l'obtention de groupes de ports par port." - -msgid "Get ports by port group error." -msgstr "Erreur lors de l'obtention de ports par groupe de ports." - -msgid "Get remote device info failed." -msgstr "L'obtention d'informations sur l'unité distante a échoué." - -msgid "Get remote devices error." -msgstr "Erreur lors de l'obtention d'unités distantes." - -msgid "Get smartcache by cache id error." -msgstr "Erreur lors de l'obtention de smartcache à partir de l'ID du cache." - -msgid "Get snapshot error." -msgstr "Erreur lors de l'obtention de l'instantané." - -msgid "Get snapshot id error." -msgstr "Erreur lors de l'obtention de l'ID de l'instantané." - -msgid "Get target IP error." -msgstr "Erreur lors de l'obtention de l'adresse IP cible." - -msgid "Get target LUN of SplitMirror error." -msgstr "" -"Erreur lors de l'obtention du numéro d'unité logique (LUN) cible de " -"SplitMirror." - -msgid "Get views by port group error." -msgstr "Erreur lors de l'obtention de vues par port." - -msgid "Get volume by name error." -msgstr "Erreur lors de l'obtention du volume à partir du nom." - -msgid "Get volume error." -msgstr "Erreur lors de l'obtention du volume." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Impossible de mettre à jour les métadonnées Glance, la clé %(key)s existe " -"pour l'ID volume %(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "Métadonnées Glance introuvables pour le volume/instantané %(id)s." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Le fichier de configuration Gluster dans %(config)s n'existe pas" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Echec de l'API Google Cloud Storage : %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Echec de connexion de Google Cloud Storage : %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Echec d'authentification oauth2 de Google Cloud Storage : %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "Informations de chemin incorrectes reçues de DRBDmanage : (%s)" - -msgid "HBSD error occurs." -msgstr "Une erreur HBSD s'est produite." - -msgid "HPELeftHand url not found" -msgstr "URL HPELeftHand introuvable" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"La taille de bloc de hachage a changé depuis la dernière sauvegarde. " -"Nouvelle taille de bloc de hachage : %(new)s. Ancienne taille de bloc de " -"hachage : %(old)s. Effectuez une sauvegarde intégrale." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Vous n'avez pas créé de niveau(x) %(tier_levels)s." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Suggestion \"%s\" non prise en charge." - -msgid "Host" -msgstr "Hôte" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "L'hôte %(host)s est introuvable." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"L'hôte %(host)s ne correspond pas au contenu du certificat x509 : CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "L'hôte %s n'a aucun demandeur FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "Le groupe d'hôtes nommé %s est introuvable" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Le groupe d'hôtes avec la référence %s est introuvable" - -msgid "Host is NOT Frozen." -msgstr "L'hôte N'EST PAS figé." - -msgid "Host is already Frozen." -msgstr "L'hôte est déjà figé." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Hôte introuvable. La suppression de %(service)s sur %(host)s a échoué." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "" -"La valeur de replication_status de l'hôte doit être %s pour l'opération de " -"basculement." - -#, python-format -msgid "Host type %s not supported." -msgstr "Type d'hôte %s non pris en charge." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "Aucun hôte avec ports %(ports)s n'a été trouvé." - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "" -"Hypermetro et Replication ne peuvent pas être utilisés dans le même " -"paramètre volume_type." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"Le groupe d'E-S %(iogrp)d n'est pas valide ; les groupes d'E-S disponibles " -"sont %(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Si la compression est définie sur True, rsize doit également être définie " -"(autre que -1). " - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Si le paramètre nofmtdisk est défini avec la valeur True, rsize doit " -"également être défini avec la valeur -1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Valeur non admise '%(prot)s' spécifiée pour " -"flashsystem_connection_protocol : la ou les valeurs valides sont %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Valeur non conforme indiquée pour IOTYPE : 0, 1 ou 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Valeur non valide spécifiée pour smarttier : définissez-la sur 0, 1, 2 ou 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Valeur non admise spécifiée pour storwize_svc_vol_grainsize : définissez-la " -"sur 32, 64, 128 ou 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Valeur incorrecte spécifiée pour thin : vous ne pouvez pas spécifier thin et " -"thick en même temps." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "L'image %(image_id)s est introuvable." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "L'image %(image_id)s n'est pas active." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "L'image %(image_id)s est inacceptable : %(reason)s" - -msgid "Image location not present." -msgstr "Emplacement de l'image introuvable." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"La taille virtuelle de l'image, %(image_size)d Go, ne peut pas être hébergée " -"dans un volume avec une taille de %(volume_size)d Go." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Erreur ImageBusy générée lors de la suppression du volume rbd. Cela peut " -"être dû à une interruption de connexion d'un client et, dans ce cas, il " -"suffit parfois d'effectuer une nouvelle tentative de suppression après un " -"délai de 30 secondes." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Echec d'importation d'enregistrement, service de sauvegarde introuvable pour " -"réaliser l'importation. Service de requête %(service)s" - -msgid "Incorrect request body format" -msgstr "Format de corps de demande incorrect" - -msgid "Incorrect request body format." -msgstr "Format de corps de demande incorrect." - -msgid "Incremental backups exist for this backup." -msgstr "Les sauvegardes incrémentielles existent pour cette sauvegarde." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Exception CLI Infortrend : %(err)s Paramètre : %(param)s (Code retour : " -"%(rc)s) (Sortie : %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Les volumes ou les instantanés d'entrée ne sont pas valides." - -msgid "Input volumes or source volumes are invalid." -msgstr "Les volumes d'entrée ou les volumes source ne sont pas valides." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "Instance %(uuid)s introuvable." - -msgid "Insufficient free space available to extend volume." -msgstr "L'espace libre disponible est insuffisant pour l'extension du volume." - -msgid "Insufficient privileges" -msgstr "Privilèges insuffisants" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Domaine 3PAR non valide : %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Valeur ALUA non valide. La valeur de ALUA doit être 1 ou 0." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "Args Ceph non valide pour l'opération rbd de sauvegarde" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Instantané de groupe de cohérence non valide : %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "Groupe de cohérence non valide : %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"ConsistencyGroup non valide : Aucun hôte pour créer le groupe de cohérence" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Version de l'API HPELeftHand API non valide détectée : %(found)s. Version " -"%(minimum)s ou ultérieure requise pour la prise en charge des opérations de " -"gestion et d'arrêt de gestion (manage/unmanage)." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Format d'adresse IP non valide : '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Spécification QoS non valide détectée lors de l'obtention de la stratégie " -"QoS pour le volume %s" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Cible de réplication non valide : %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Spécification de partage Virtuozzo Storage non valide : %r. Doit être : " -"[MDS1[,MDS2],...:/][:MOT DE PASSE]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"Version XtremIO non valide %(cur)s, version %(min)s ou ultérieure est requise" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"Quotas alloués non valides définis pour les quotas de projet suivants : %s" - -msgid "Invalid argument" -msgstr "Argument non valide" - -msgid "Invalid argument - negative seek offset." -msgstr "Argument non valide - décalage seek négatif." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Argument non valide - whence=%s non pris en charge" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Argument non valide - whence=%s n'est pas pris en charge." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Mode de connexion '%(mode)s' non valide pour le volume %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Clé d'auth non valide : %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Sauvegarde non valide : %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"Informations d'utilisateur chap non valides détectées dans le stockage " -"CloudByte." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "" -"Réponse d'initialisation de connexion non valide pour le volume %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Réponse d'initialisation de connexion non valide pour le volume %(name)s : " -"%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Le type de contenu %(content_type)s est invalide" - -msgid "Invalid credentials" -msgstr "Données d'identification non valides" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Répertoire non valide : %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Type de carte d'unité de disque non valide : %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Sauvegarde de disque non valide : %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Type de disque non valide : %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Type de disque non valide : %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Hôte non valide : %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Version de hpe3parclient non valide détectée (%(found)s). Version " -"%(minimum)s ou ultérieure requise. Exécutez \"pip install --upgrade " -"python-3parclient\" pour mettre à niveau le client hpe3parclient." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Version de hpelefthandclient non valide détectée (%(found)s). Version " -"%(minimum)s ou ultérieure requise. Exécutez \"pip install --upgrade python-" -"lefthandclient\" pour mettre à niveau le client hpe3lefthandclient." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "href %(image_href)s d'image non valide." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"Identificateur d'image non valide ou accès impossible à l'image demandée." - -msgid "Invalid imageRef provided." -msgstr "imageRef fournie non valide." - -msgid "Invalid input" -msgstr "Entrée incorrecte" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Entrée invalide reçue : %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Filtre is_public non valide [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Le type lun non valide %s est configuré." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Taille de métadonnée invalide : %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Métadonnée invalide : %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Base du point de montage non valide : %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de point de montage non valide : %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Nouveau nom snapCPG non valide pour confirmation. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Numéro de port %(config)s non valide pour le port Coho rpc " - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Le paramètre PrefetchType non valide '%s' est configuré. PrefetchType doit " -"avoir la valeur 0, 1, 2 ou 3." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Spécifications QoS non valides : %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "Demande non valide de connexion du volume à une cible non valide" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Demande non valide de rattachement du volume dans un mode non valide. Le " -"mode d'attachement doit être 'rw' ou 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Expiration de réservation non valide %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "En-tête de réponse non valide du serveur RPC " - -#, python-format -msgid "Invalid secondary id %s." -msgstr "ID secondaire non valide %s." - -msgid "Invalid service catalog json." -msgstr "json de catalogue de service non valide." - -msgid "Invalid sheepdog cluster status." -msgstr "Statut de cluster Sheepdog non valide." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "snapshot invalide : %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Statut non valide : '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "Pool de stockage non valide %s demandé. Echec de la nouvelle saisie." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Pool de stockage %s non valide spécifié." - -msgid "Invalid storage pool is configured." -msgstr "Un pool de stockage non valide est configuré." - -msgid "Invalid transport type." -msgstr "Type de transport non valide." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Paramètre de mise à jour non valide : '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Valeur invalide '%s' pour le 'forçage'." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Valeur non valide '%s' pour force. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "" -"Valeur '%s' non valide pour is_public. Valeurs admises : True ou False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "La valeur '%s' n'est pas valide pour skip_validation." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Valeur non valide pour 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Valeur non valide pour 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Valeur non valide pour 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Valeur non valide pour 'scheduler_max_attempts', doit être >= 1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "" -"Valeur non valide pour l'option de configuration NetApp netapp_host_type." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "" -"Valeur non valide pour l'option de configuration NetApp netapp_lun_ostype." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Valeur non valide pour 'age', %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Valeur non valide : \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"La taille de volume fournie pour la demande de création n'est pas valide : " -"%s (l'argument de taille doit être un entier (ou représentation de chaîne " -"d'un entier) et supérieur à zéro)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Type de volume non valide : %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Volume invalide : %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Volume non valide : Impossible d'ajouter le volume %(volume_id)s au groupe " -"de cohérence %(group_id)s car le volume est dans un état non valide : " -"%(status)s. Les états valides sont : ('disponible', 'en cours " -"d'utilisation')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Volume non valide : Impossible d'ajouter le volume %(volume_id)s au groupe " -"de cohérence %(group_id)s car le type de volume %(volume_type)s n'est pas " -"pris en charge par le groupe." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Volume non valide : Impossible d'ajouter l'uuid de volume fictif au groupe " -"de cohérence %(group_id)s car le volume est introuvable." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Volume non valide : Impossible de supprimer l'uuid de volume fictif du " -"groupe de cohérence %(group_id)s car il ne se trouve pas dans le groupe." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "volume_type non valide transmis : %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Un type de volume non valide a été soumis : %s (le type demandé n'est pas " -"compatible ; il doit correspondre au volume source ou vous devez omettre " -"l'argument type)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"volume_type non valide indiqué : %s (le type demandé n'est pas compatible ; " -"il est recommandé d'omettre l'argument type)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"volume_type non valide fourni : %s (le type demandé doit être pris en charge " -"par ce groupe de cohérence)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Format de wwpn %(wwpns)s non valide" - -msgid "Invoking web service failed." -msgstr "L'appel du service Web a échoué." - -msgid "Issue encountered waiting for job." -msgstr "Erreur rencontrée durant l'attente du travail." - -msgid "Issue encountered waiting for synchronization." -msgstr "Erreur rencontrée durant l'attente de la synchronisation." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Le lancement d'un basculement a échoué car la réplication n'a pas été " -"configurée correctement." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"ID de travail introuvable dans la réponse CloudByte à Créer un volume [%s]." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"ID de travail non trouvé dans la réponse CloudByte à la suppression du " -"volume [%s]." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Les noms de clé peuvent seulement contenir des caractères alphanumériques, " -"des traits de soulignement, des points, des signes deux-points et des traits " -"d'union." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError : %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"Keystone version 3 ou ultérieure doit être utilisé pour la prise en charge " -"de quotas imbriqués." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "L'unité logique n'existe pas pour le volume : %s" - -msgid "LUN export failed!" -msgstr "L'exportation de numéro d'unité logique a échoué. " - -msgid "LUN map overflow on every channel." -msgstr "Dépassement de mappe de numéro d'unité logique sur chaque canal." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN introuvable avec la réf donnée %s." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "" -"Le nombre de numéros d'unité logique est hors bornes sur l'ID de canal : " -"%(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Dernières entrées Cinder syslog %s :-" - -msgid "LeftHand cluster not found" -msgstr "Cluster LeftHand introuvable" - -msgid "License is unavailable." -msgstr "La licence n'est pas disponible." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Ligne %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Le chemin de lien existe déjà et n'est pas un lien symbolique" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Le clone lié du volume source n'est pas pris en charge à l'état : %s." - -msgid "Lock acquisition failed." -msgstr "L'acquisition du verrou a échoué." - -msgid "Logout session error." -msgstr "Erreur de déconnexion de la session." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Service de recherche non configuré. L'option de configuration pour " -"fc_san_lookup_service doit indiquer une mise en oeuvre concrète du service " -"de recherche." - -msgid "Lun migration error." -msgstr "Erreur de migration du numéro d'unité logique." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"La valeur MD5 de l'objet : %(object_name)s avant : %(md5)s et après : " -"%(etag)s n'est pas la même." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED : %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED : AUTH_ERROR : %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED : RPC_MISMATCH : %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Format de chaîne de sortie fcns incorrect : %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Format de corps de message non valide : %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Chaîne de serveur de noms mal formée : %s" - -msgid "Malformed request body" -msgstr "Format de corps de demande incorrect" - -msgid "Malformed request body." -msgstr "Le corps de la requête est mal-formé." - -msgid "Malformed request url" -msgstr "Format d'URL de demande incorrect" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Format incorrect de la réponse à la commande %(cmd)s : %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Format incorrect de l'attribut scheduler_hints" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Format de chaîne show fcns database incorrect : %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Configuration de zone mal formée : (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Format de statut de zone incorrect : (commutateur = %(switch)s, config de " -"zone = %(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "" -"L'obtention de la taille dans l'opération de gestion de l'existant requiert " -"l'élément 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "La gestion d'instantané existant n'est pas implémentée." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Echec de la gestion du volume existant en raison d'une référence de back-end " -"non valide %(existing_ref)s : %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Echec de la gestion du volume existant en raison de types de volume " -"différents : %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Gestion de volume existant non implémentée." - -msgid "Manage existing volume requires 'source-id'." -msgstr "La gestion de volume existant requiert 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"La gestion de volume n'est pas prise en charge si FAST est activé. Stratégie " -"FAST : %(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"La gestion des instantanés sur des volumes basculés n'est pas autorisée." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"La valeur de Map info est None car la version de la matrice ne prend pas en " -"charge hypermetro." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"Echec d'achèvement de la préparation du mappage %(id)s dans le délai alloué " -"de %(to)d secondes. En cours de fermeture." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "" -"Le masquage de la vue %(maskingViewName)s n'a pas été correctement supprimé" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Nombre maximum de sauvegardes autorisées (%(allowed)d) dépassé" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "Nombre maximal d'instantanés autorisés (%(allowed)d) dépassé" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Le nombre maximal de volumes autorisé, (%(allowed)d), a été dépassé pour le " -"quota '%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "Un seul %s doit être spécifié" - -msgid "Metadata backup already exists for this volume" -msgstr "Une sauvegarde de métadonnées existe déjà pour ce volume" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "L'objet de sauvegarde des métadonnées '%s' existe déjà" - -msgid "Metadata property key blank." -msgstr "Clé de propriété de métadonnées à blanc." - -msgid "Metadata restore failed due to incompatible version" -msgstr "" -"Echec de restauration des métadonnées en raison d'une version incompatible" - -msgid "Metadata restore failed due to incompatible version." -msgstr "" -"Echec de restauration des métadonnées en raison d'une version incompatible." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Module Python 'purestorage' manquant. Assurez-vous que la bibliothèque est " -"installée et disponible." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "Paramètre de configuration SAN Fibre Channel - fc_fabric_names" - -msgid "Missing request body" -msgstr "Corps de la demande manquant" - -msgid "Missing request body." -msgstr "Corps de la demande manquant." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "Elément requis manquant '%s' dans le corps de la demande" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "L'élément requis '%s' est manquant dans le corps de demande." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "" -"L'élément requis 'consistencygroup' est manquant dans le corps de demande." - -msgid "Missing required element quota_class_set in request body." -msgstr "Elément quota_class_set requis manquant dans le corps de demande." - -msgid "Missing required element snapshot in request body." -msgstr "Elément snapshot requis manquant dans le corps de demande." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Plusieurs SerialNumbers ont été détectés alors qu'un seul était attendu pour " -"cette opération. Modifiez votre fichier de configuration EMC." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Plusieurs copies du volume %s ont été trouvées." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Plusieurs correspondances ont été trouvées pour '%s'. Utilisez un ID pour " -"être plus précis." - -msgid "Multiple profiles found." -msgstr "Plusieurs profils ont été trouvés." - -msgid "Must implement a fallback schedule" -msgstr "Doit mettre en oeuvre un calendrier de retrait" - -msgid "Must implement find_retype_host" -msgstr "Doit implémenter find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "Doit implémenter host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "Doit implémenter schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "Doit implémenter schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "Doit implémenter schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "Doit transmettre le wwpn ou l'hôte à lsfabric." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Cette commande doit être utilisée en tant qu'administrateur de cloud (cloud " -"admin) à l'aide d'un fichier Keystone policy.json qui permet à " -"l'administrateur de cloud de répertorier et d'obtenir n'importe quel projet." - -msgid "Must specify 'connector'" -msgstr "Vous devez spécifier 'connector'" - -msgid "Must specify 'connector'." -msgstr "Vous devez spécifier le 'connector'" - -msgid "Must specify 'host'." -msgstr "Vous devez spécifier l' 'host'" - -msgid "Must specify 'new_volume'" -msgstr "Vous devez spécifier 'new_volume'" - -msgid "Must specify 'status'" -msgstr "Vous devez spécifier 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Indiquer 'status', 'attach_status' ou 'migration_status' pour la mise à jour." - -msgid "Must specify a valid attach status" -msgstr "Indiquer un état de connexion valide" - -msgid "Must specify a valid migration status" -msgstr "Indiquer un état de migration valide" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Spécifiez une personnalité valide %(valid)s, la valeur '%(persona)s' n'est " -"pas valide." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Spécifiez un type de mise à disposition valide %(valid)s, la valeur " -"'%(prov)s' n'est pas valide." - -msgid "Must specify a valid status" -msgstr "Indiquer un état valide" - -msgid "Must specify an ExtensionManager class" -msgstr "Vous devez définir une classe ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "Doit indiquer bootable dans la demande." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Vous devez spécifier un nom ou un ID de domaine de protection." - -msgid "Must specify readonly in request." -msgstr "Doit indiquer readonly dans la demande." - -msgid "Must specify snapshot source-name or source-id." -msgstr "" -"Vous devez spécifier la valeur source-name ou source-id de l'instantané." - -msgid "Must specify source-name or source-id." -msgstr "Vous devez spécifier source-name ou source-id." - -msgid "Must specify storage pool name or id." -msgstr "Vous devez spécifier le nom ou l'ID du pool de stockage." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "" -"Vous devez spécifier des pools de stockage. Option : sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Une valeur positive différente de zéro doit être indiquée pour age" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"La configuration NAS '%(name)s=%(value)s' n'est pas valide. Doit être " -"'auto', 'true' ou 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "Fichier de configuration NFS dans %(config)s n'existe pas" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "Fichier NFS %s non reconnu." - -msgid "NFS file could not be discovered." -msgstr "Découverte impossible du fichier NFS." - -msgid "NaElement name cannot be null." -msgstr "Le nom NaElement ne peut pas avoir la valeur Null." - -msgid "Name" -msgstr "Nom" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Nom, description, add_volumes et remove_volumes ne peuvent pas être tous " -"vides dans le corps de la demande." - -msgid "Need non-zero volume size" -msgstr "Taille de volume non nulle nécessaire" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Ni MSG_DENIED ni MSG_ACCEPTED : %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Exception de pilote NetApp Cinder." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"La nouvelle taille pour l'extension doit être supérieure à la taille " -"actuelle. (Taille actuelle : %(size)s, taille après l'extension : " -"%(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"La nouvelle taille doit être supérieure à la taille réelle du stockage du " -"back-end. Taille réelle : %(oldsize)s, nouvelle taille : %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "La nouvelle taille du volume doit être indiquée comme entier." - -msgid "New volume type must be specified." -msgstr "Le nouveau type de volume doit être indiqué." - -msgid "New volume type not specified in request_spec." -msgstr "Nouveau type de volume non indiqué dans request_spec." - -msgid "Nimble Cinder Driver exception" -msgstr "Exception du pilote Nimble Cinder" - -msgid "No FC initiator can be added to host." -msgstr "Aucun initiateur FC ne peut être ajouté à l'hôte." - -msgid "No FC port connected to fabric." -msgstr "Aucun port FC n'est connecté à l'ensemble de noeuds (fabric)." - -msgid "No FCP targets found" -msgstr "Aucune cible FCP détectée" - -msgid "No Port Group elements found in config file." -msgstr "" -"Aucun élément de groupe de ports n'a été trouvé dans le fichier de " -"configuration." - -msgid "No VF ID is defined in the configuration file." -msgstr "Aucun ID VF n'est défini dans le fichier de configuration." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Il n'existe aucun portail iSCSI actif dans les IP iSCSI soumis" - -#, python-format -msgid "No available service named %s" -msgstr "Aucun service disponible dénommé %s" - -#, python-format -msgid "No backup with id %s" -msgstr "Aucune sauvegarde avec l'ID %s" - -msgid "No backups available to do an incremental backup." -msgstr "Aucune sauvegarde disponible pour faire une sauvegarde incrémentielle." - -msgid "No big enough free disk" -msgstr "Aucun disque libre de taille suffisante" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Aucun instantané de groupe de cohérence avec l'ID %s" - -msgid "No cinder entries in syslog!" -msgstr "Aucune entrée Cinder dans syslog" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Aucun LUN cloné nommé %s détecté dans le gestionnaire de fichiers" - -msgid "No config node found." -msgstr "Aucun noeud de configuration n'a été trouvé." - -#, python-format -msgid "No consistency group with id %s" -msgstr "Aucun groupe de cohérence avec l'ID %s" - -#, python-format -msgid "No element by given name %s." -msgstr "Aucun élément du nom indiqué %s." - -msgid "No errors in logfiles!" -msgstr "Aucune erreur dans le fichier de log !" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "Aucun fichier trouvé avec %s comme fichier de sauvegarde." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Il ne reste aucun ID de numéro d'unité logique libre. Le nombre maximal de " -"volumes pouvant être rattachés à l'hôte (%s) a été dépassé." - -msgid "No free disk" -msgstr "Aucun disque libre" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Aucun portail iSCSI valide détecté pour %s dans la liste fournie." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Aucun portail iSCSI valide détecté pour %s." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Aucun hôte pour créer le groupe de cohérence %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "Aucun port activé pour iSCSI n'existe sur la matrice cible." - -msgid "No image_name was specified in request." -msgstr "image_name non défini dans la demande." - -msgid "No initiator connected to fabric." -msgstr "Aucun initiateur n'est connecté à l'ensemble de noeuds (fabric)." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Aucun groupe demandeur trouvé pour le demandeur %s" - -msgid "No initiators found, cannot proceed" -msgstr "Aucun initiateur détecté : poursuite impossible" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "Aucune interface trouvée dans le cluster pour l'IP %s" - -msgid "No ip address found." -msgstr "Aucune adresse IP n'a été trouvée." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "Aucun groupe d'authentification iscsi n'a été trouvé dans CloudByte." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "Aucun initiateur iSCSI détecté dans CloudByte." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Aucun service iSCSI détecté pour le volume CloudByte [%s]." - -msgid "No iscsi services found in CloudByte storage." -msgstr "Aucun service iSCSI détecté dans le système de stockage CloudByte." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"Aucun fichier de clés indiqué et impossible de charger la clé depuis " -"%(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Aucun partage Gluster monté trouvé" - -msgid "No mounted NFS shares found" -msgstr "Aucun partage NFS monté trouvé" - -msgid "No mounted SMBFS shares found." -msgstr "Aucun partage SMBFS monté trouvé." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Aucun partage de stockage Virtuozzo monté n'a été trouvé" - -msgid "No mounted shares found" -msgstr "Aucun partage monté trouvé" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"Aucun noeud n'a été détecté dans le groupe d'E-S %(gid)s pour le volume " -"%(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Aucun pool n'est disponible pour l'allocation de volumes. Vérifiez que " -"l'option de configuration netapp_pool_name_search_pattern a été définie " -"correctement." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Aucune réponse n'a été reçue de l'appel d'API CloudByte de liste " -"d'utilisateur d'authentification iSCSI." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"Aucune réponse n'a été reçue de CloudByte pour l'affichage de l'appel API " -"TSM du système de stockage." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"Aucune réponse n'a été reçue de CloudByte pour l'affichage de l'appel API du " -"système de fichiers." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "" -"Aucune adresse IP virtuelle (VIP) de service configurée et aucun élément " -"nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Aucun instantané trouvé avec %s comme fichier de sauvegarde." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "" -"Aucune image instantanée n'a été trouvée dans le groupe d'instantanés %s." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "Aucun instantané n'a été trouvé sur le volume %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Aucun instantané source n'a été fourni pour créer le groupe de cohérence %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "Aucun chemin de stockage trouvé pour le chemin d'exportation %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Aucune spécif. QoS du type %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "Aucune adresse IP correcte trouvée" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "" -"Aucune prise en charge pour la restauration de la version de sauvegarde %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Aucun ID cible trouvé pour le volume %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"Aucun ID de numéro d'unité logique inutilisé n'est disponible sur l'hôte ; " -"multiattach est activé, ce qui requiert que tous les ID de numéro d'unité " -"logique soient uniques à travers le groupe d'hôtes complet." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Hôte non valide trouvé. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Aucun hôte valide pour le volume %(id)s de type %(type)s" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "Aucun disque virtuel (vdisk) avec l'UID indiqué par ref %s." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "Aucune vue n'a été trouvée pour le numéro d'unité logique (LUN) : %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"Aucun volume dans le cluster contenant le serveur virtuel %(vserver)s et le " -"chemin de jonction %(junction)s " - -msgid "No volume service(s) started successfully, terminating." -msgstr "Aucun service de volume n'a pu démarrer, abandon." - -msgid "No volume was found at CloudByte storage." -msgstr "Aucun volume n'a été détecté dans le système de stockage CloudByte." - -msgid "No volume_type should be provided when creating test replica." -msgstr "" -"Aucun volume_type ne doit être fourni lors de la création de la réplique de " -"test." - -msgid "No volumes found in CloudByte storage." -msgstr "Aucun volume détecté dans le système de stockage CloudByte." - -msgid "No weighed hosts available" -msgstr "Aucun hôte pondéré n'est disponible" - -#, python-format -msgid "Not a valid string: %s" -msgstr "Chaîne non valide : %s" - -msgid "Not a valid value for NaElement." -msgstr "Valeur non valide pour NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "" -"Impossible de trouver un magasin de données approprié pour le volume : %s." - -msgid "Not an rbd snapshot" -msgstr "N'est pas un instantané rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Non autorisé pour l'image %(image_id)s." - -msgid "Not authorized." -msgstr "Non autorisé." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Espace insuffisant sur le système dorsal (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"Espace de stockage insuffisant dans le partage ZFS pour exécuter cette " -"opération." - -msgid "Not stored in rbd" -msgstr "Non stocké dans rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "Nova a renvoyé l'état \"erreur\" durant la création de l'instantané." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "" -"Réponse nulle reçue de CloudByte pour l'affichage du système de fichiers." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "" -"Réponse Null reçue de la liste des groupes d'authentification iscsi de " -"CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "" -"Réponse nulle reçue de CloudByte pour l'affichage des initiateurs iSCSI." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"Réponse nulle reçue de CloudByte pour l'affichage du service iSCSI du volume." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Réponse Null reçue lors de la création du volume [%s] sur le système de " -"stockage CloudByte." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Une réponse Null a été reçue lors de la suppression du volume [%s] du " -"stockage CloudByte." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Réponse Null reçue lors de l'interrogation du travail [%(operation)s] basé " -"[%(job)s] dans le stockage CloudByte." - -msgid "Object Count" -msgstr "Nombre d'objets" - -msgid "Object Version" -msgstr "Version de l'objet" - -msgid "Object is not a NetApp LUN." -msgstr "L'objet n'est pas un numéro d'unité logique NetApp." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Sur une opération Extend, erreur d'ajout au volume composite : " -"%(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"L'une des entrées requises de l'hôte, du port ou du schéma n'a pas été " -"trouvée." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Seules les demandes %(value)s %(verb)s peuvent être envoyées à %(uri)s " -"toutes les %(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "Une seule limite peut être définie dans une spécification QoS." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Seuls les utilisateurs avec portée de jeton sur les parents immédiats ou les " -"projets racine sont autoriser à visualiser ses quotas enfants." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "Seuls les volumes gérés par OpenStack peuvent être non gérés." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "" -"Échec de l'opération avec statut = %(status)s. Vidage complet : %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Opération non prise en charge : %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "L'option gpfs_images_dir n'est pas correctement définie." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "L'option gpfs_images_share_mode n'est pas correctement définie." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "L'option gpfs_mount_point_base n'est pas correctement définie." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "" -"L'état d'origine de %(res)s %(prop)s doit être l'une des valeurs '%(vals)s'" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException : %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"Le nom de partition est None. Définissez smartpartition:partitionname dans " -"la clé." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"Mot de passe ou clé privée SSH obligatoire pour l'authentification : " -"définissez l'option san_password ou san_private_key ." - -msgid "Path to REST server's certificate must be specified." -msgstr "Le chemin d'accès au certificat du serveur REST doit être spécifié." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Créez d'avance le pool %(pool_list)s. " - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Créez d'avance le niveau %(tier_levels)s dans le pool %(pool)s. " - -msgid "Please specify a name for QoS specs." -msgstr "Veuillez indiquer un nom pour les spécifications QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Pool %(poolNameInStr)s introuvable." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "Le pool %s n'existe pas dans Nexenta Store Appliance" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Le pool du volume ['host'] %(host)s est introuvable." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "Le pool du volume ['host'] a échoué en renvoyant : %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "Le pool n'est pas disponible dans la zone d'hôte du volume." - -msgid "Pool is not available in the volume host fields." -msgstr "Le pool n'est pas disponible dans les zones d'hôte du volume." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "Le pool nommé %(pool)s est introuvable dans le domaine %(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"Le pool nommé %(pool_name)s est introuvable dans le domaine %(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Pool : %(poolName)s. n'est pas associé au groupe de serveurs d'application " -"d'archivage pour la règle FAST %(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName doit être dans le fichier %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Le pool %s n'existe pas" - -msgid "Pools name is not set." -msgstr "Le nom de pools n'a pas été défini." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "" -"Statut de la copie primaire : %(status)s et synchronisation : %(sync)s." - -msgid "Project ID" -msgstr "ID Projet" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"Les quotas de projet ne sont pas correctement configurés pour les quotas " -"imbriqués : %(reason)s." - -msgid "Protection Group not ready." -msgstr "Le groupe de protection n'est pas prêt." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Le protocole %(storage_protocol)s n'est pas pris en charge pour la famille " -"de stockage %(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "L'ID est manquant dans l'enregistrement de sauvegarde fourni" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Etat d'instantané %(provided)s fourni interdit pour l'instantané ayant pour " -"état %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Les informations du fournisseur sur le stockage CloudByte w.r.t n'ont pas " -"été trouvées pour le volume OpenStack [%s]." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Echec du pilote Pure Storage Cinder : %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Spécifications QoS %(specs_id)s déjà existantes." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Les spécifications QoS %(specs_id)s sont encore associées aux entités." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "Configuration incorrecte de QoS. %s doit être > 0." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"La stratégie QoS doit indiquer une valeur pour IOTYPE et une autre " -"spécification qos_specs, stratégie QoS : %(qos_policy)s." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"La stratégie QoS doit indiquer pour IOTYPE : 0, 1 ou 2, stratégie QoS : " -"%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"Conflit entre les valeurs upper_limit et lower_limit dans la stratégie QoS, " -"stratégie QoS : %(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "Spécif. QoS %(specs_id)s sans spécif. avec la clé %(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Les spécifications QoS ne sont pas prises en charge sur cette famille de " -"stockage et version ONTAP." - -msgid "Qos specs still in use." -msgstr "Spécifications QoS encore en service." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"La requête par paramètre de service est obsolète. Utilisez le paramètre " -"binaire à la place." - -msgid "Query resource pool error." -msgstr "Erreur lors de l'interrogation du pool de ressources." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "" -"La limite de quota %s doit être égale ou supérieure aux ressources " -"existantes." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Classe de quota %(class_name)s introuvable." - -msgid "Quota could not be found" -msgstr "Quota introuvable" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Quota dépassé pour les ressources : %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Quota dépassé: code=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Quota du projet %(project_id)s introuvable." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"La limite de quota n'est pas valide pour le projet '%(proj)s' de la " -"ressource '%(res)s' : la limite %(limit)d est inférieure à la valeur " -"utilisée %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Réservation de quota %(uuid)s introuvable." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "Utilisation de quota pour le projet %(project_id)s introuvable." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "Echec de l'op. diff RBD - (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "L'IP du serveur REST doit être spécifiée." - -msgid "REST server password must by specified." -msgstr "Le mot de passe de l'utilisateur du serveur REST doit être spécifié." - -msgid "REST server username must by specified." -msgstr "Le nom d'utilisateur du serveur REST doit être spécifié." - -msgid "RPC Version" -msgstr "Version RPC" - -msgid "RPC server response is incomplete" -msgstr "Réponse incomplète du serveur RPC" - -msgid "Raid did not have MCS Channel." -msgstr "Raid n'avait pas de canal MCS." - -#, python-format -msgid "Received error string: %s" -msgstr "Chaîne d'erreur reçue : %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "La référence doit s'appliquer à un instantané non géré." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "La référence doit s'appliquer à un volume virtuel non géré." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "La référence doit correspondre au nom d'un instantané non géré." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "La référence doit s'appliquer à un nom de volume virtuel non géré." - -msgid "Reference must contain either source-name or source-id element." -msgstr "La référence doit contenir l'élément source-name ou source-id." - -msgid "Reference must contain source-id or source-name element." -msgstr "La référence doit contenir l'élément source-id ou source-name." - -msgid "Reference must contain source-id or source-name key." -msgstr "La référence doit contenir une clé source-id ou source-name." - -msgid "Reference must contain source-id or source-name." -msgstr "La référence doit contenir source-id ou source-name." - -msgid "Reference must contain source-id." -msgstr "La référence doit contenir source-id." - -msgid "Reference must contain source-name element." -msgstr "La référence doit contenir l'élément source-name." - -msgid "Reference must contain source-name or source-id." -msgstr "La référence doit contenir l'élément source-name ou source-id." - -msgid "Reference must contain source-name." -msgstr "La référence doit contenir source-name." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "La référence au volume à gérer doit contenir l'élément source-name." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "La référence au volume %s à gérer doit contenir l'élément source-name." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Refus de migrer l'ID volume %(id)s. Vérifiez votre configuration car la " -"source et la destination indiquent le même groupe de volumes : %(name)s." - -msgid "Remote pool cannot be found." -msgstr "Pool distant introuvable." - -msgid "Remove CHAP error." -msgstr "Erreur lors de la suppression CHAP." - -msgid "Remove fc from host error." -msgstr "Erreur lors de la suppression fc de l'hôte." - -msgid "Remove host from array error." -msgstr "Erreur lors de la suppression de l'hôte de la matrice." - -msgid "Remove host from hostgroup error." -msgstr "Erreur lors du retrait de l'hôte de hostgroup." - -msgid "Remove iscsi from host error." -msgstr "Erreur lors de la suppression iscsi de l'hôte." - -msgid "Remove lun from QoS error." -msgstr "Erreur lors de la suppression de numéro d'unité logique (lun) de QoS." - -msgid "Remove lun from cache error." -msgstr "Erreur lors de la suppression du numéro d'unité logique du cache." - -msgid "Remove lun from partition error." -msgstr "" -"Erreur lors de la suppression de numéro d'unité logique de la partition." - -msgid "Remove port from port group error." -msgstr "Erreur lors de la suppression d'un port du groupe de ports." - -msgid "Remove volume export failed." -msgstr "La suppression de l'exportation du volume a échoué." - -msgid "Rename lun on array error." -msgstr "" -"Erreur lors de la modification du nom du numéro d'unité logique sur la " -"matrice." - -msgid "Rename snapshot on array error." -msgstr "Erreur lors de la modification du nom de l'instantané sur la matrice." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "La réplication %(name)s vers %(ssn)s a échoué." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "" -"Fonction du service de réplication introuvable sur %(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Service de réplication introuvable sur %(storageSystemName)s." - -msgid "Replication not allowed yet." -msgstr "La réplication n'est pas autorisée pour le moment." - -msgid "Request body and URI mismatch" -msgstr "Corps et URI de demande discordants" - -msgid "Request body contains too many items" -msgstr "Le corps de la demande contient un trop grand nombre d'éléments" - -msgid "Request body contains too many items." -msgstr "Le corps de demande contient trop d'éléments." - -msgid "Request body empty" -msgstr "Le corps de la demande est vide" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"La demande au cluster Datera a renvoyé un statut incorrect : %(status)s | " -"%(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"La sauvegarde demandée dépasse le quota de sauvegarde autorisé en Go. " -"%(requested)s Go demandés. Le quota est de %(quota)s Go et %(consumed)s Go " -"ont été consommés." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Le volume ou l'instantané demandé dépasse le quota %(name)s autorisé. " -"%(requested)s Go demandés. Le quota est de %(quota)s Go et %(consumed)s Go " -"ont été consommés." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"La taille de volume demandée, %(size)d, dépasse la limite maximale autorisée " -"%(limit)d." - -msgid "Required configuration not found" -msgstr "Configuration obligatoire non trouvée" - -#, python-format -msgid "Required flag %s is not set" -msgstr "L'indicateur obligatoire %s n'est pas défini" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Réinitialisation du statut de la sauvegarde interrompue, le service de " -"sauvegarde actuellement configuré [%(configured_service)s] ne correspond pas " -"au service de sauvegarde utilisé pour créer cette sauvegarde " -"[%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Le redimensionnement du clone %s a échoué." - -msgid "Resizing image file failed." -msgstr "Echec de redimensionnement du fichier image." - -msgid "Resource could not be found." -msgstr "Ressource introuvable." - -msgid "Resource not ready." -msgstr "Ressource non prête." - -#, python-format -msgid "Response error - %s." -msgstr "Erreur de réponse - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Erreur de réponse - Le système de stockage est hors ligne." - -#, python-format -msgid "Response error code - %s." -msgstr "Code d'erreur de réponse - %s." - -msgid "RestURL is not configured." -msgstr "RestURL n'est pas configuré." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Restauration de la sauvegarde interrompue : état du volume attendu " -"%(expected_status)s, mais état %(actual_status)s obtenu." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Restauration de la sauvegarde interrompue, le service de sauvegarde " -"actuellement configuré [%(configured_service)s] ne correspond pas au service " -"de sauvegarde utilisé pour créer cette sauvegarde [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Restauration de la sauvegarde interrompue : état de la sauvegarde attendu " -"%(expected_status)s, mais état %(actual_status)s obtenu." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Nombre différent de volumes Solidfire récupérés pour les instantanés Cinder " -"fournis. Récupérés : %(ret)s Souhaités : %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Nombre différent de volumes Solidfire récupérés pour les volumes Cinder " -"fournis. Récupérés : %(ret)s Souhaités : %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Nombre de nouvelles tentatives dépassé pour la commande : %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Exception SolidFire réessayable rencontrée" - -msgid "Retype requires migration but is not allowed." -msgstr "La nouvelle saisie nécessite la migration, mais n'est pas autorisée." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Annulation de %(volumeName)s par sa suppression." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"L'exécution de Cinder avec VMware vCenter d'une version inférieure à %s " -"n'est pas autorisée." - -msgid "SAN product is not configured." -msgstr "Le produit SAN n'est pas configuré." - -msgid "SAN protocol is not configured." -msgstr "Le protocole SAN n'est pas configuré." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"Configuration SMBFS 'smbfs_oversub_ratio' non valide. Doit être > 0 : %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"Configuration SMBFS 'smbfs_used_ratio' non valide. Doit être > 0 et <= 1,0 : " -"%s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "Le fichier de configuration SMBFS dans %(config)s n'existe pas." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "Fichier de configuration SMBFS non défini (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Echec de la commande SSH après '%(total_attempts)r' tentatives : " -"'%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Injection de commande SSH détectée : %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "Echec de la connexion SSH pour %(fabric)s avec l'erreur : %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "Le certificat SSL a expiré le %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Erreur SSL : %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Filtre hôte du planificateur %(filter_name)s introuvable." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Peseur de l'hôte du planificateur %(weigher_name)s introuvable." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Statut de la copie secondaire : %(status)s et synchronisation : %(sync)s, " -"progression de la synchronisation : %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"L'ID secondaire ne peut pas être identique à la matrice principale, " -"backend_id = %(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber doit être dans le fichier %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Le service %(service)s sur l'hôte %(host)s a été retiré." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "Service %(service_id)s introuvable sur l'hôte %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Le service %(service_id)s est introuvable." - -msgid "Service is too old to fulfil this request." -msgstr "Service trop ancien pour satisfaire cette demande." - -msgid "Service is unavailable at this time." -msgstr "Le service est indisponible actuellement." - -msgid "Set pair secondary access error." -msgstr "Erreur lors de la définition d'accès secondaire à une paire." - -msgid "Sets thin provisioning." -msgstr "Active l'allocation de ressources à la demande." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"La définition du groupe de règles LUN QoS n'est pas prise en charge sur " -"cette famille de stockage et version ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"La définition du groupe de règles qos du fichier n'est pas prise en charge " -"sur cette famille de stockage et version ontap." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"Le partage dans %(dir)s n'est pas accessible en écriture par le service de " -"volumes Cinder. Les opérations d'instantané ne seront pas prises en charge." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Erreur d'E-S Sheepdog I/O, commande concernée : \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Les opérations Show ne peuvent porter que sur des projets dans la même " -"hiérarchie de projet que celle définie pour la portée utilisateurs." - -msgid "Size" -msgstr "Taille" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "" -"La taille du volume %s est introuvable. Impossible d'effectuer une " -"suppression sécurisée." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"La taille est %(image_size)d Go et ne s'adapte pas dans un volume d'une " -"taille de %(volume_size)d Go." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"La taille de l'image définie %(image_size)s Go est supérieure à la taille du " -"volume %(volume_size)s Go." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"Il a été demandé de supprimer l'instantané %(id)s alors qu'on attendait " -"qu'il soit disponible. Une demande simultanée a peut-être été effectuée." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"L'instantané %(id)s a été trouvé à l'état %(state)s et non pas à l'état " -"'deleting' (suppression en cours) lors de la suppression en cascade." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "L'instantané %(snapshot_id)s est introuvable." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"L'instantané %(snapshot_id)s n'a pas de métadonnées avec la clé " -"%(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "L'instantané '%s' n'existe pas sur la matrice." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"L'instantané ne peut pas être créé car le volume %(vol_id)s n'est pas " -"disponible, état actuel du volume : %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "L'instantané ne peut pas être créé pendant la migration du volume." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "Instantané de réplique secondaire non autorisé." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Instantané du volume non pris en charge à l'état : %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "Ressource d'instantané \"%s\" non déployée ailleurs ?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "Statut d'instantané %(cur)s interdit pour update_snapshot_status" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "" -"L'état de l'instantané doit être \"disponible\" pour que le clonage soit " -"possible." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"L'instantané à sauvegarder doit être disponible, mais le statut actuel est " -"\"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "L'instantané avec l'ID %s est introuvable." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"L'instantané='%(snap)s' n'existe pas dans l'image de base='%(base)s' - " -"abandon sauvegarde incrémentielle" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "" -"Les instantanés ne sont pas pris en charge pour ce format de volume : %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Erreur de socket : %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Exception du pilote SolidFire Cinder" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "La taille de tableau du sens de tri dépasse celle de la clé de tri." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "" -"Le groupe de cohérence source est vide. Aucun groupe de cohérence ne sera " -"créé." - -msgid "Source host details not found." -msgstr "Détails de l'hôte source introuvables." - -msgid "Source volume device ID is required." -msgstr "L'ID d'unité du volume source est requis." - -msgid "Source volume not mid-migration." -msgstr "Le volume source n'est pas en cours de migration." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo renvoyé par la matrice est incorrect" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"L'hôte spécifié à mapper au volume %(vol)s est dans un groupe d'hôtes non " -"pris en charge avec %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "Le volume logique spécifié n'existe pas." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "Le groupe d'instantanés spécifié avec l'ID %s est introuvable." - -msgid "Specify a password or private_key" -msgstr "Spécifiez un mot de passe ou private_key" - -msgid "Specify san_password or san_private_key" -msgstr "Indiquez san_password ou san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Indiquez le nom de type de volume, une description, is_public ou une " -"combinaison de ces éléments." - -msgid "Split pair error." -msgstr "Erreur lors du fractionnement d'une paire." - -msgid "Split replication failed." -msgstr "L'opération de fractionnement de la réplication a échoué." - -msgid "Start LUNcopy error." -msgstr "Erreur lors du lancement de LUNcopy." - -msgid "State" -msgstr "Etat" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "L'état du noeud est incorrect. Etat actuel : %s." - -msgid "Status" -msgstr "Statut" - -msgid "Stop snapshot error." -msgstr "Erreur lors de l'arrêt de l'instantané." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Service de configuration de stockage introuvable sur %(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"Service de gestion d'ID matériel de stockage introuvable sur " -"%(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Le profil de stockage %s est introuvable." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"Service de réadressage de stockage introuvable sur %(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "La famille de produits de stockage %s n'est pas prise en charge." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "" -"Le groupe de stockage %(storageGroupName)s n'a pas été correctement supprimé" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "L'hôte de stockage %(svr)s n'a pas été détecté, vérifiez son nom" - -msgid "Storage pool is not configured." -msgstr "Le pool de stockage n'est pas configuré." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Profil de stockage %(storage_profile)s introuvable." - -msgid "Storage resource could not be found." -msgstr "Ressource de stockage introuvable." - -msgid "Storage system id not set." -msgstr "ID du système de stockage non défini." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "Système de stockage introuvable pour le pool %(poolNameInStr)s." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "StorageSystem %(array)s introuvable." - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"La somme d'utilisation du quota enfant '%(sum)s' est supérieure au quota " -"disponible '%(free)s' pour le projet '%(proj)s' de la ressource '%(res)s'. " -"Réduisez la limite ou l'utilisation pour un ou plusieurs projets parmi les " -"suivants : '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Erreur lors du basculement d'une paire." - -msgid "Sync pair error." -msgstr "Erreur lors de la synchronisation de paire." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "" -"Système %(id)s détecté avec un statut de mot de passe incorrect - " -"%(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "Système %(id)s trouvé avec état erroné - %(status)s." - -msgid "System does not support compression." -msgstr "Le système ne gère pas la compression." - -msgid "System is busy, retry operation." -msgstr "Le système est occupé, recommencer l'opération." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s] n'a pas été trouvé dans le stockage CloudByte pour le compte " -"[%(account)s]." - -msgid "Target volume type is still in use." -msgstr "Le type de volume cible est toujours utilisé." - -msgid "Terminate connection failed" -msgstr "Echec de fin de la connexion" - -msgid "Terminate connection unable to connect to backend." -msgstr "" -"La fonction mettant fin à la connexion ne peut pas se connecter au back-end." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "La clôture de la connexion au volume a échoué : %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "La source %(type)s %(id)s à répliquer est introuvable." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Les paramètres 'sort_key' et 'sort_dir' sont obsolètes et ne peuvent pas " -"être utilisés avec le paramètre 'sort'." - -msgid "The EQL array has closed the connection." -msgstr "La matrice EQL a fermé la connexion." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"Le système de fichiers GPFS %(fs)s ne se trouve pas au niveau d'édition " -"requis. Niveau actuel : %(cur)s, niveau minimal requis : %(min)s." - -msgid "The IP Address was not found." -msgstr "Adresse IP introuvable." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"Echec de la demande WebDAV. Cause : %(msg)s. Code retour/cause : %(code)s, " -"volume source : %(src)s, volume de destination : %(dst)s, méthode : " -"%(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"L'erreur ci-dessous peut indiquer que la base de données n'a pas été créée.\n" -"Créez une base de données avec 'cinder-manage db sync' avant d'exécuter " -"cette commande." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"La matrice ne prend pas en charge la valeur de pool de stockage pour SLO " -"%(slo)s et la charge de travail %(workload)s. Vérifiez la matrice pour " -"connaître les valeurs SLO et les valeurs de charge de travail valides." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "" -"La réplication n'est pas activée sur le back-end sur lequel le volume est " -"créé." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Echec de la commande %(cmd)s. (ret : %(ret)s, stdout : %(out)s, stderr : " -"%(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "La copie doit être primaire ou secondaire" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"La création d'une unité logique n'a pas abouti. (unité logique : %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"La méthode décorée doit accepter un instantané ou un objet d'instantané" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "L'unité du chemin %(path)s est indisponible : %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"L'heure de fin (%(end)s) doit être postérieure à l'heure de début " -"(%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "La spécification supplémentaire %(extraspec)s n'est pas valide." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "Suppression impossible du volume basculé : %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "Les éléments suivants sont requis : %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "Impossible d'ajouter le groupe d'hôtes ou la cible iSCSI." - -msgid "The host group or iSCSI target was not found." -msgstr "Le groupe d'hôtes ou la cible iSCSI est introuvable." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " -"reprenez la réplication sur les back-ends 3PAR." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " -"reprenez la réplication sur les back-ends LeftHand." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"L'hôte n'est pas prêt à être rebasculé. Resynchronisez les volumes et " -"reprenez la réplication sur les back-ends Storwize." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "L'utilisateur CHAP iSCSI %(user)s n'existe pas." - -msgid "The key cannot be None." -msgstr "La clé ne peut pas être nulle." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "L'unité logique pour le %(type)s %(id)s spécifié a déjà été supprimée." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"La méthode %(method)s a expiré. (valeur du délai d'expiration : %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "La méthode update_migrated_volume n'est pas implémentée." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"Le montage %(mount_path)s n'est pas un volume Quobyte USP valide. Erreur : " -"%(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "" -"Paramètre du back-end de stockage. (groupe de configuration : " -"%(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "" -"La sauvegarde parent doit être disponible pour la sauvegarde incrémentielle." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "L'instantané fourni '%s' n'est pas un instantané du volume fourni." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"La référence au volume sur le back-end doit être au format " -"système_de_fichiers/nom_volume (nom_volume ne peut pas contenir de barre " -"oblique '/')" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "" -"La valeur de la rétention à distance doit être inférieure ou égale à %s." - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"Le mode de réplication n'a pas été configuré correctement dans les " -"spécifications extra_specs du type de volume. Si la valeur de replication:" -"mode est periodic, replication:sync_period doit également être spécifié avec " -"une valeur comprise entre 300 et 31622400 secondes." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "" -"La période de synchronisation de la réplication doit être d'au moins %s " -"secondes." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"La taille demandée : %(requestedSize)s n'est pas identique à la taille " -"résultante : %(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Ressource %(resource)s introuvable." - -msgid "The results are invalid." -msgstr "Le résultat n'est pas valide." - -#, python-format -msgid "The retention count must be %s or less." -msgstr "La valeur de rétention doit être inférieure ou égale à %s." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"L'instantané ne peut pas être créé alors que le volume est en mode " -"maintenance." - -msgid "The source volume for this WebDAV operation not found." -msgstr "Volume source introuvable pour cette opération WebDAV." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"Le type de volume source %(src)s est différent du type de volume de " -"destination %(dest)s." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Le type de volume source %s n'est pas disponible." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "La %(desc)s spécifiée est occupée." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "" -"Le numéro d'unité logique (LUN) indiqué n'appartient pas au pool indiqué : " -"%s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"L'unité logique spécifiée %(ldev)s ne peut pas être gérée. L'unité logique " -"ne doit pas être en cours de mappage." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"L'unité logique spécifiée %(ldev)s ne peut pas être gérée. L'unité logique " -"ne doit pas être appariée." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"L'unité logique spécifiée %(ldev)s ne peut pas être gérée. La taille de " -"l'unité logique doit être exprimée en multiples du gigaoctet." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"L'unité logique spécifiée %(ldev)s ne peut pas être gérée. Le type de volume " -"doit être DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"L'opération spécifiée n'est pas prise en charge. La taille du volume doit " -"être identique à la source %(type)s. (volume : %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Le disque virtuel spécifié est mappé avec un hôte." - -msgid "The specified volume is mapped to a host." -msgstr "Le volume spécifié est mappé à un hôte." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"Le mot de passe de la matrice de stockage pour %s est incorrect, mettez à " -"jour le mot de passe configuré." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"Le back-end de stockage peut être utilisé. (groupe de configuration : " -"%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"L'unité de stockage ne prend pas en charge %(prot)s. Configurez l'unité pour " -"la prise en charge de %(prot)s ou basculez sur un pilote utilisant un autre " -"protocole." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"Le compte segmenté de %(memberCount)s est trop faible pour le volume : " -"%(volumeName)s, avec la taille %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"Le type de métadonnées %(metadata_type)s pour le volume/l'instantané %(id)s " -"n'est pas valide." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"Le volume %(volume_id)s n'a pas pu être étendu. Le type de volume doit être " -"Normal." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"Le volume %(volume_id)s ne peut pas être non géré. Son type doit être " -"%(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "Le volume %(volume_id)s est géré. (unité logique : %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "Le volume %(volume_id)s n'est plus géré. (unité logique : %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Le volume %(volume_id)s à mapper est introuvable." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "Le volume ne peut pas accepter de transfert en mode maintenance." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Le volume ne peut pas être rattaché en mode maintenance." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Le volume ne peut pas être détaché en mode maintenance." - -msgid "The volume cannot be updated during maintenance." -msgstr "Le volume ne peut pas être mis à jour en phase de maintenance." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "" -"La connexion de volume ne peut pas être initialisée en mode maintenance." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "" -"Le pilote de volume a besoin du nom de demandeur iSCSI dans le connecteur." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Le volume est actuellement occupé sur le 3PAR et ne peut pas être supprimé " -"pour le moment. Réessayez plus tard." - -msgid "The volume label is required as input." -msgstr "Le label du volume est requis comme entrée." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "" -"Il n'y a aucune ressource disponible utilisable. (ressource : %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Il n'existe aucun hôte ESX valide." - -msgid "There are no valid datastores." -msgstr "Il n'y a aucun magasin de données valide." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Il n'existe aucune désignation du paramètre %(param)s. Le système de " -"stockage spécifié est essentiel pour gérer le volume." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Il n'existe aucune désignation de l'unité logique. L'unité logique spécifiée " -"est essentielle pour gérer le volume." - -msgid "There is no metadata in DB object." -msgstr "Aucune métadonnée n'est présente dans l'objet BD." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "Aucun partage ne pouvant héberger l'hôte %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "Il n'y a aucun partage pouvant héberger %(volume_size)sG." - -#, python-format -msgid "There is no such action: %s" -msgstr "Aucune action de ce type : %s" - -msgid "There is no virtual disk device." -msgstr "Il n'y a pas d'unité de disque virtuel." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "" -"Une erreur s'est produite lors de l'ajout du volume au groupe de copie à " -"distance : %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Une erreur s'est produite lors de la création de cgsnapshot : %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "" -"Une erreur s'est produite lors de la création du groupe de copie à " -"distance : %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Une erreur s'est produite lors de la définition de la période de " -"synchronisation du groupe de copie à distance : %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Une erreur s'est produite lors de la configuration d'un groupe de copie à " -"distance sur les matrices 3PAR : ('%s'). Le volume ne sera pas reconnu comme " -"type de réplication." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Une erreur s'est produite lors de la configuration d'une planification à " -"distance sur les matrices LeftHand : ('%s'). Le volume ne sera pas reconnu " -"en tant que type de réplication." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "" -"Une erreur s'est produite lors du démarrage de la copie à distance : %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Aucun fichier de configuration Gluster n'est configuré (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Aucun fichier de configuration NFS n'est configuré (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Aucun volume Quobyte n'est configuré (%s). Exemple : quobyte:///" -"" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "" -"L'allocation de ressources à la demande n'est pas prise en charge sur cette " -"version du gestionnaire de volume logique (LVM)." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "" -"Ce pilote ne prend pas en charge la suppression d'instantanés en cours " -"d'utilisation." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Ce pilote ne prend pas en charge la capture d'instantanés de volumes en " -"cours d'utilisation." - -msgid "This request was rate-limited." -msgstr "Cette demande était limitée par la fréquence." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Cette plateforme de système (%s) n'est pas prise en charge. Le pilote prend " -"en charge les plateformes Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "" -"Service de règles de hiérarchisation introuvable pour %(storageSystemName)s." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Dépassement du délai d'attente de mise à jour Nova pour la création de " -"l'instantané %s." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Dépassement du délai d'attente de mise à jour Nova pour la suppression de " -"l'instantané %(id)s." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Délai d'attente dépassé lors de l'appel de %s " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Dépassement du délai lors de la demande de l'API %(service)s." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "" -"Délai d'attente dépassé lors de la demande de fonctionnalités du back-end " -"%(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Transfert %(transfer_id)s introuvable." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Transfert %(transfer_id)s : ID volume %(volume_id)s dans un état inattendu " -"%(status)s, awaiting-transfer attendu" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Tentative d'importation des métadonnées de sauvegarde depuis l'ID " -"%(meta_id)s vers la sauvegarde %(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Tâche de réglage du volume arrêtée avant la fin : volume_name=" -"%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"Type %(type_id)s déjà associé à d'autres spécifications QoS : " -"%(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"La modification d'accès de type n'est pas applicable aux types de volume " -"publics." - -msgid "Type cannot be converted into NaElement." -msgstr "Impossible de convertir le type en NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError : %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "" -"Les UUID %s sont tous les deux dans la liste de volumes à ajouter et à " -"supprimer." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Impossible d'accéder au back-end Storwize pour le volume %s." - -msgid "Unable to access the backend storage via file handle." -msgstr "Impossible d'accéder au stockage dorsal via le descripteur de fichier." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "" -"Impossible d'accéder au stockage d'arrière plan par le chemin %(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"Impossible d'ajouter l'hôte Cinder aux apphosts pour l'espace %(space)s" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "Impossible d'effectuer le basculement de %s." - -msgid "Unable to connect or find connection to host" -msgstr "Impossible d'établir ou de trouver une connexion à l'hôte" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Impossible de créer le groupe de cohérence %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "" -"Impossible de créer un verrou. Le back-end de coordination n'est pas démarré." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Impossible de créer ou d'obtenir le groupe de stockage par défaut pour la " -"règle FAST : %(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Impossible de créer un clone de réplication pour le volume %s." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "Impossible de créer la relation pour %s." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "Impossible de créer le volume %(name)s depuis %(snap)s." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "Impossible de créer le volume %(name)s depuis %(vol)s." - -#, python-format -msgid "Unable to create volume %s" -msgstr "Impossible de créer le volume %s" - -msgid "Unable to create volume. Backend down." -msgstr "Impossible de créer le volume. Le back-end est arrêté." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "Impossible de supprimer l'instantané de groupe de cohérence %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "Impossible de supprimer l'instantané %(id)s, état : %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "Impossible de supprimer la stratégie d'instantané sur le volume %s." - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"Impossible de supprimer le volume cible pour le volume %(vol)s. Exception : " -"%(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Impossible de déconnecter le volume. Le statut du volume doit être 'in-use' " -"et le statut attach_status doit être 'attached' pour le déconnecter." - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"Impossible de déterminer secondary_array à partir de la matrice secondaire " -"fournie : %(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Impossible de déterminer le nom de l'instantané dans Purity pour " -"l'instantané %(id)s." - -msgid "Unable to determine system id." -msgstr "Impossible de déterminer l'ID système." - -msgid "Unable to determine system name." -msgstr "Impossible de déterminer le nom du système." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Impossible d'effectuer des opérations d'instantané avec la version d'API " -"REST de Purity %(api_version)s, ceci nécessite %(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Impossible d'effectuer de réplication avec l'API REST Purity version " -"%(api_version)s, l'une des %(required_versions)s est nécessaire." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Impossible d'établir un partenariat avec le cluster Storwize %s." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Impossible d'étendre le volume %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"Impossible de faire basculer le volume %(id)s sur le back-end secondaire, " -"car la relation de réplication ne peut pas être basculée : %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"Impossible de rebasculer sur \"default\", cette opération n'est possible " -"qu'après la fin d'un basculement." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "Basculement impossible vers la cible de réplication : %(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "" -"Impossible d'extraire des informations de connexion depuis le back-end." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "" -"Impossible d'extraire des informations de connexion depuis le back-end : " -"%(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "La référence Purity avec name=%s est introuvable " - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Impossible de trouver le groupe de volumes : %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"Cible de basculement introuvable, aucune cible secondaire n'a été configurée." - -msgid "Unable to find iSCSI mappings." -msgstr "Impossible de trouver des mappages iSCSI." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "ssh_hosts_key_file introuvable: %s" - -msgid "Unable to find system log file!" -msgstr "Fichier de trace système non trouvé!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"Impossible de trouver un instantané pg viable à utiliser pour le basculement " -"sur la matrice secondaire sélectionnée : %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"Impossible de trouver une matrice secondaire viable à partir des cibles " -"configurées : %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "Impossible de trouver le volume %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Impossible d'obtenir l'unité par bloc pour le fichier '%s'" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Impossible d'obtenir les informations de configuration nécessaires pour " -"créer un volume : %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Impossible d'obtenir l'enregistrement correspondant pour le pool." - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Impossible d'obtenir des informations sur l'espace %(space)s, vérifiez que " -"le cluster est en cours d'exécution et connecté." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Impossible d'obtenir la liste des adresses IP sur cet hôte, vérifiez les " -"autorisations et le réseau." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Impossible d'obtenir la liste des membres du domaine, vérifiez que le " -"cluster est en cours d'exécution." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Impossible d'obtenir la liste des espaces pour en créer un nouveau. Vérifiez " -"que le cluster est en cours d'exécution. " - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Impossible d'obtenir des statistiques pour backend_name : %s" - -msgid "Unable to get storage volume from job." -msgstr "Impossible d'extraire le volume de stockage à partir de la tâche." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Impossible d'obtenir les noeuds finals pour hardwareId " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "Impossible d'obtenir le nom de la vue de masquage." - -msgid "Unable to get the name of the portgroup." -msgstr "Impossible d'obtenir le nom de portgroup." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "Impossible d'obtenir la relation de réplication pour le volume %s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Impossible d'importer le volume %(deviceId)s dans Cinder. Il s'agit du " -"volume source de la session de réplication %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Impossible d'importer le volume %(deviceId)s dans Cinder. Le volume externe " -"n'est pas dans le pool géré par l'hôte Cinder actuel." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Impossible d'importer le volume %(deviceId)s dans Cinder. Le volume est en " -"vue de masquage %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "" -"Impossible de charger l'autorité de certification depuis %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Impossible de charger le certificat depuis %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Impossible de charger la clé depuis %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "Compte %(account_name)s introuvable sur l'unité Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "" -"Impossible de localiser une machine virtuelle de stockage (SVM) qui gère " -"l'adresse IP '%s'" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "Impossible de localiser les profils de relecture indiqués %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Impossible de gérer le volume existant. Le volume %(volume_ref)s est déjà " -"géré." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Impossible de gérer le volume %s" - -msgid "Unable to map volume" -msgstr "Impossible de mapper le volume" - -msgid "Unable to map volume." -msgstr "Impossible de mapper le volume." - -msgid "Unable to parse attributes." -msgstr "Impossible d'analyser les attributs." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"Impossible de promouvoir la réplique en réplique primaire pour le volume %s. " -"Aucune copie secondaire disponible." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Impossible de réutiliser un hôte qui n'est pas géré par Cinder avec " -"use_chap_auth=True," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Impossible de réutiliser l'hôte avec des données d'identification CHAP " -"inconnues configurées." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Impossible de renommer le volume %(existing)s en %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "Impossible de récupérer le groupe d'instantanés avec l'ID %s." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"Impossible de ressaisir %(specname)s, réception attendue des valeurs en " -"cours et demandées %(spectype)s. Valeur reçue : %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Confirmation impossible : une copie du volume %s existe. La confirmation " -"dépasserait la limite de 2 copies." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Impossible de modifier le type : l'action en cours a besoin du paramètre " -"volume_copy. Elle est interdite lorsque le nouveau type est une réplication. " -"Volume = %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"Impossible de configurer la réplication en mode miroir pour %(vol)s. " -"Exception : %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Impossible de créer un instantané du groupe de cohérence %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "Impossible de terminer la connexion au volume depuis le back-end." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Impossible de terminer la connexion au volume : %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Impossible de mettre à jour le groupe de cohérence %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"Impossible de vérifier le groupe de demandeurs : %(igGroupName)s dans la vue " -"de masquage %(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Paramètres inacceptables." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Statut de mappage %(status)s inattendu pour le mappage %(id)s. Attributs : " -"%(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Réponse CLI imprévue : non concordance d'en-tête/ligne. en-tête : " -"%(header)s, ligne : %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Statut de mappage inattendu %(status)s pour le mappage %(id)s. Attributs : " -"%(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "Sortie inattendue. [%(expected)s] attendu mais [%(output)s] reçu" - -msgid "Unexpected response from Nimble API" -msgstr "Code de réponse inattendu de l'API Nimble" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Réponse inattendue de l'API Tegile IntelliFlash" - -msgid "Unexpected status code" -msgstr "Code de statut inattendu" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Code de statut inattendu provenant du commutateur %(switch_id)s avec le " -"protocole %(protocol)s pour l'URL %(page)s. Erreur : %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Exception Gluster inconnue" - -msgid "Unknown NFS exception" -msgstr "Exception NFS inconnue" - -msgid "Unknown RemoteFS exception" -msgstr "Exception RemoteFS inconnue" - -msgid "Unknown SMBFS exception." -msgstr "Exception SMBFS inconnue." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Exception Virtuozzo Storage inconnue" - -msgid "Unknown action" -msgstr "Action inconnu" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Inconnu si le volume : %s à gérer est déjà géré par Cinder. Abandon de " -"l'opération gérer le volume. Ajoutez la propriété de schéma personnalisé " -"'cinder_managed' au volume et définissez sa valeur sur False. Autrement, " -"définissez la valeur de la stratégie de configuration cinder " -"'zfssa_manage_policy' sur 'loose' pour supprimer cette restriction." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Inconnu si le volume : %s à gérer est déjà géré par Cinder. Abandon de " -"l'opération gérer le volume. Ajoutez la propriété de schéma personnalisé " -"'cinder_managed' au volume et définissez sa valeur sur False. Autrement, " -"définissez la valeur de la stratégie de configuration cinder " -"'zfssa_manage_policy' sur 'loose' pour supprimer cette restriction." - -#, python-format -msgid "Unknown operation %s." -msgstr "Opération inconnue %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Commande inconnue ou non prise en charge %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Protocole inconnu : %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Ressources de quota inconnues %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "Les options unmanage et cascade delete s'excluent mutuellement." - -msgid "Unmanage volume not implemented." -msgstr "" -"La fonction consistant à ne plus gérer un volume (unmanage volume) n'est pas " -"implémentée." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"Arrêter de gérer des instantanés dans des volumes basculés ('failed-over') " -"n'est pas autorisé." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"Arrêter de gérer des instantanés dans des volumes basculés n'est pas " -"autorisé." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Mot clé QOS non reconnu : \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Format de sauvegarde non identifié : %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Valeur read_deleted non reconnue '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "Options gcs non définies : %s" - -msgid "Unsupported Content-Type" -msgstr "Type de contenu non pris en charge" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Version de Data ONTAP non prise en charge. Data ONTAP versions 7.3.1 et " -"supérieures sont prises en charge." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Version des métadonnées de sauvegarde non prise en charge (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Version des métadonnées de sauvegarde non prise en charge demandée" - -msgid "Unsupported backup verify driver" -msgstr "Sauvegarde non prise en charge ; vérifiez le pilote" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Microprogramme non pris en charge sur le commutateur %s. Assurez-vous que le " -"commutateur exécute le microprogramme 6.4 ou version supérieure" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Format de volume non pris en charge : %s " - -msgid "Update QoS policy error." -msgstr "Erreur lors de la mise à jour de la stratégie QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Les opérations de mise à jour ou de suppression de quota ne peuvent être " -"effectuées que par un administrateur de parent immédiat ou par " -"l'administrateur CLOUD." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Les opérations de mise à jour ou de suppression de quota ne peuvent porter " -"que sur des projets dans la même hiérarchie de projet que celle définie pour " -"la portée utilisateurs." - -msgid "Update list, doesn't include volume_id" -msgstr "La liste de mise à jour ne comprend pas volume_id" - -msgid "Updated At" -msgstr "Mis à jour à" - -msgid "Upload to glance of attached volume is not supported." -msgstr "" -"Le téléchargement vers Glance du volume connecté n'est pas pris en charge." - -msgid "Use ALUA to associate initiator to host error." -msgstr "" -"Erreur lors de l'utilisation de ALUA pour associer l'initiateur à l'hôte. " - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Erreur lors de l'utilisation de CHAP pour associer l'initiateur à l'hôte. " -"Vérifiez le nom d'utilisateur et le mot de passe CHAP." - -msgid "User ID" -msgstr "ID Utilisateur" - -msgid "User does not have admin privileges" -msgstr "L’utilisateur n'a pas les privilèges administrateur" - -msgid "User not authorized to perform WebDAV operations." -msgstr "Utilisateur non autorisé à exécuter des opérations WebDAV." - -msgid "UserName is not configured." -msgstr "UserName n'est pas configuré." - -msgid "UserPassword is not configured." -msgstr "UserPassword n'est pas configuré." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "Rétromigration vers V2, le volume n'est dans aucun groupe de stockage." - -msgid "V3 rollback" -msgstr "Rétromigration V3" - -msgid "VF is not enabled." -msgstr "VF n'est pas activé." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV Set %s inexistant." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Client valide de spécifications QoS : %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "Emplacement de contrôle valide : %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "La validation de la connexion de volume a échoué (erreur : %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"La valeur \"%(value)s\" n'est pas valide pour l'option de configuration " -"\"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "La valeur %(param)s pour %(param_string)s n'est pas de type booléen." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Valeur requise pour 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError : %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "" -"Le disque virtuel %(name)s n'est pas impliqué dans le mappage %(src)s -> " -"%(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"La version %(req_ver)s n'est pas prise en charge par l'API. Minimum : " -"%(min_ver)s et maximum : %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s ne parvient pas à récupérer l'objet par ID." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "" -"VersionedObject %s ne prend pas en charge la mise à jour conditionnelle." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Le volume virtuel '%s' n'existe pas dans la matrice." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Echec du travail de copie du vol. pour la dest %s." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Volume %(deviceID)s introuvable." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Volume %(name)s introuvable dans la matrice. Impossible de déterminer si des " -"volumes sont mappés." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "Le volume %(name)s a été créé dans VNX, mais à l'état %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Le volume %(vol)s n'a pas pu être créé dans le pool %(pool)s." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "Le volume %(vol1)s ne correspond pas à snapshot.volume_id %(vol2)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"L'état du volume %(vol_id)s doit être Disponible pour la mise à jour de " -"l'indicateur readonly, mais l'état actuel est : %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"Le volume %(vol_id)s doit être dans un état de disponibilité, mais l'état en " -"cours est : %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "Le volume %(volume_id)s est introuvable." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"Le volume %(volume_id)s n'a aucune métadonnée d'administration avec la clé " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"Le volume %(volume_id)s ne comporte pas de métadonnées avec la clé " -"%(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"Le volume %(volume_id)s est actuellement mappé à un groupe d'hôtes non pris " -"en charge : %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "Le volume %(volume_id)s n'est pas actuellement mappé à l'hôte %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"Le volume %(volume_id)s est toujours attaché. Détachez-le préalablement." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Erreur de réplication du volume %(volume_id)s : %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Le volume %(volume_name)s est occupé." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Le volume %s n'a pas pu être créé à partir du volume source." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "Le volume %s n'a pas pu être créé sur les partages." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Le volume %s n'a pas pu être créé." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "Le volume %s n'existe pas dans Nexenta SA" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "Le volume %s n'existe pas dans Nexenta Store Appliance" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "Le volume %s n'existe pas sur la matrice." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"provider_location n'a pas été spécifié pour le volume %s. Il sera ignoré." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Le volume %s n'existe pas sur la matrice." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "Le volume %s n'existe pas sur le back-end ZFSSA." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "Le volume %s est déjà géré par OpenStack." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"Le volume %s n'est pas du type répliqué. Ce volume doit être d'un type de " -"volume avec la spécification supplémentaire (extra spec) définie avec ' " -"True' pour prendre en charge les actions de réplication." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"Le volume %s est en ligne (online). Définissez le volume comme étant hors " -"ligne (offline) pour le gérer avec OpenStack." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Le volume %s ne doit pas faire partie d'un groupe de cohérence." - -#, python-format -msgid "Volume %s not found." -msgstr "Le volume %s est introuvable." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Volume %s : Erreur lors de la tentative d'extension du volume" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Le volume (%s) existe déjà dans la matrice" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Le volume (%s) existe déjà dans la matrice." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Le groupe de volumes %s n'existe pas" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Le type de volume %(id)s existe déjà." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"Suppression du type de volume %(volume_type_id)s interdite avec les volumes " -"de ce type." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"Le type de volume %(volume_type_id)s ne comporte pas de spécs supp avec la " -"clé %(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "L'ID de type de volume ne peut pas être None." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Volume [%(cb_vol)s] introuvable sur le stockage CloudByte correspondant au " -"volume OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "Volume [%s] introuvable dans le système de stockage CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "" -"La pièce jointe du volume est introuvable avec le filtre : %(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "La configuration de back-end du volume n'est pas valide : %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Un volume ayant ce nom existe déjà" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "Impossible de restaurer le volume car il contient des instantanés." - -msgid "Volume create failed while extracting volume ref." -msgstr "" -"La création du volume a échoué lors de l'extraction de la référence du " -"volume (volume ref)." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "" -"Le chemin d'accès du fichier du périphérique de volume %s n'existe pas." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Périphérique de volume introuvable à %(device)s." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Pilote de volume %s non initialisé." - -msgid "Volume driver not ready." -msgstr "Pilote de volume non prêt." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Le pilote de volume a signalé une erreur : %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"Le volume contient un instantané temporaire qui ne peut pas être supprimé " -"pour l'instant." - -msgid "Volume has children and cannot be deleted!" -msgstr "Ce volume a des enfants et ne peut pas être supprimé !" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "Le volume est attaché à un serveur. (%s)" - -msgid "Volume is in-use." -msgstr "Volume en cours d'utilisation." - -msgid "Volume is not available." -msgstr "Le volume n'est pas disponible." - -msgid "Volume is not local to this node" -msgstr "Le volume n'est pas local sur ce noeud" - -msgid "Volume is not local to this node." -msgstr "Le volume n'est pas local sur ce noeud." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"La sauvegarde des métadonnées de volume est demandée mais ce pilote ne prend " -"pas en charge cette fonction pour le moment." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Échec de la migration du volume : %(reason)s" - -msgid "Volume must be available" -msgstr "Le volume doit être disponible." - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "" -"Le volume doit être dans la même zone de disponibilité que l'instantané" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"Le volume doit être dans la même zone de disponibilité que le volume source" - -msgid "Volume must have a volume type" -msgstr "Le volume doit comporter un type de volume" - -msgid "Volume must not be replicated." -msgstr "Le volume ne doit pas être répliqué." - -msgid "Volume must not have snapshots." -msgstr "Le volume ne doit pas avoir d'instantanés." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Volume introuvable pour l'instance %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "Volume introuvable sur le back-end de stockage configuré." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Volume introuvable sur le back-end de stockage configuré. Si le nom de votre " -"volume contient une barre oblique \"/\", renommez-le puis renouvelez " -"l'opération." - -msgid "Volume not found on configured storage pools." -msgstr "Volume introuvable sur les pools de stockage configurés." - -msgid "Volume not found." -msgstr "Volume introuvable." - -msgid "Volume not unique." -msgstr "Le volume n'est pas unique." - -msgid "Volume not yet assigned to host." -msgstr "Volume non encore affecté à l'hôte." - -msgid "Volume reference must contain source-name element." -msgstr "La référence du volume doit contenir l'élément source-name." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Réplication du volume pour %(volume_id)s introuvable." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Le démarrage du service de volume %s n'a pas abouti." - -msgid "Volume should have agent-type set as None." -msgstr "Le volume doit avoir la valeur agent-type définie à None." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"La taille %(volume_size)s Go du volume ne peut pas être inférieure à la " -"taille de l'image minDisk %(min_disk)s Go." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "La taille de volume '%(size)s' doit être un entier ou supérieure à 0." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"La taille de volume '%(size)s' Go ne peut pas être inférieure à la taille de " -"volume d'origine %(source_size)s Go. Elle doit être supérieure ou égale à " -"la taille du volume d'origine." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"La taille de volume '%(size)s' Go ne peut pas être inférieure à la taille " -"d'instantané %(snap_size)s Go. Elle doit être supérieure ou égale à la " -"taille de l'instantané d'origine." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"Taille de volume accrue depuis la dernière sauvegarde. Effectuez une " -"sauvegarde intégrale." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "La taille du volume doit être un multiple de 1 Go." - -msgid "Volume size must multiple of 1 GB." -msgstr "La taille du volume doit être un multiple de 1 Go." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"L'état du volume doit être \"disponible\" ou \"en usage\" pour l'instantané. " -"(état en cours : %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "L'état du volume doit être \"disponible\" ou \"en usage\"." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "Le statut du volume doit être %s pour l'opération de réservation." - -msgid "Volume status must be 'available'." -msgstr "L'état du volume doit être 'disponible'." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "Le volume pour le mappage du groupe initiateur existe déjà" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"Le volume à sauvegarder doit être disponible ou en cours d'utilisation, mais " -"son statut actuel indique \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "Le volume à restaurer doit être disponible" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "Le type de volume %(volume_type_id)s est introuvable." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "L'ID de volume '%s' n'est pas valide." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"L'accès de type volume pour la combinaison %(volume_type_id)s / " -"%(project_id)s existe déjà." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"Accès de type volume introuvable pour la combinaison %(volume_type_id)s / " -"%(project_id)s." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "Le chiffrement du type de volume existe déjà pour le type %(type_id)s." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "" -"Le chiffrement du type de volume pour le type %(type_id)s n'existe pas." - -msgid "Volume type name can not be empty." -msgstr "Le nom de type de volume ne peut pas être vide." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "Le type de volume portant le nom %(volume_type_name)s est introuvable." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Le volume %(volumeName)s n'est pas un volume concaténé. Vous pouvez " -"seulement effectuer une extension sur le volume concaténé. Sortie..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"Le volume : %(volumeName)s n'a pas été ajouté au groupe de stockage " -"%(sgGroupName)s." - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "Volume %s déjà géré par Cinder." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -"Nombre de volumes/compte dépassé sur les comptes SolidFire principaux et " -"secondaires." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"La configuration VzStorage 'vzstorage_used_ratio' n'est pas valide. Doit " -"être > 0 et <= 1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Le fichier de configuration VzStorage %(config)s n'existe pas." - -msgid "Wait replica complete timeout." -msgstr "Dépassement du délai d'attente d'achèvement de la réplique." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Echec de l'attente de synchronisation. Statut d'exécution : %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"En attente que tous les noeuds aient rejoint le cluster. Vérifiez que tous " -"les démons sheep sont en exécution." - -msgid "We should not do switch over on primary array." -msgstr "" -"Il est déconseillé d'effectuer un basculement sur la matrice principale." - -msgid "X-IO Volume Driver exception!" -msgstr "Exception de pilote du volume X-IO" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "" -"XtremIO n'a pas été configuré correctement, aucun portail iscsi n'a été " -"détecté" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "" -"XtremIO ne s'est pas initialisé correctement, aucun cluster n'a été trouvé" - -msgid "You must implement __call__" -msgstr "Vous devez implémenter __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Vous devez installer hpe3parclient avant d'utiliser des pilotes 3PAR. " -"Exécutez \"pip install python-3parclient\" pour installer hpe3parclient." - -msgid "You must supply an array in your EMC configuration file." -msgstr "" -"Vous devez fournir une matrice dans votre fichier de configuration EMC." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Votre taille initiale : %(originalVolumeSize)s Go est supérieure à : " -"%(newSize)s Go. Seule l'extension est prise en charge. Sortie..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError : %s" - -msgid "Zone" -msgstr "Zone" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Stratégie de segmentation : %s, non reconnue" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data : échec de l'obtention des attributs du disque " -"virtuel %s." - -msgid "_create_host failed to return the host name." -msgstr "Echec de _create_host pour renvoyer le nom d'hôte." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host : impossible de convertir le nom d'hôte. Le nom d'hôte n'est " -"pas de type Unicode ou chaîne." - -msgid "_create_host: No connector ports." -msgstr "_create_host : aucun port de connecteur." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume, Service de réplication introuvable." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, nom du volume : %(volumename)s, nom du volume " -"source : %(sourcevolumename)s, instance de volume source : " -"%(source_volume)s, instance de volume cible : %(target_volume)s, Code " -"retour : %(rc)lu, Erreur : %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - aucun message de réussite trouvé dans la sortie " -"CLI.\n" -" stdout : %(out)s\n" -" stderr : %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, la valeur de id_code est None." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, Service de réplication introuvable" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, le type de session de copie (copy session) n'est pas " -"défini ! Session de copie : %(cpsession)s, type de copie : %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, session de copie : %(cpsession)s, opération : " -"%(operation)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, nom du volume : %(volumename)s, Code retour : %(rc)lu, " -"Erreur : %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, nom du volume : %(volumename)s, Service de configuration de " -"stockage introuvable." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, nom de classe : %(classname)s, InvokeMethod, " -"impossible de se connecter à ETERNUS." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op : l'extension d'un volume avec des instantanés n'est pas " -"prise en charge." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connecteur : %(connector)s, Associateurs : " -"FUJITSU_AuthorizedTarget, impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connecteur : %(connector)s, EnumerateInstanceNames, " -"impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connecteur : %(connector)s, AssocNames : " -"FUJITSU_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance : %(vol_instance_path)s, " -"Impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, nom de classe : %(classname)s, " -"EnumerateInstanceNames, impossible de se connecter à ETERNUS." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names, connecteur : %(connector)s, initiateur introuvable." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, nom du volume : %(volumename)s, EnumerateInstanceNames, " -"impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool :%(eternus_pool)s, EnumerateInstances, impossible " -"de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, nom du fichier : %(filename)s, tagname : %(tagname)s, la valeur " -"de data est None ! Editez le fichier de configuration du pilote et corrigez." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, nom du fichier : %(filename)s, adresse IP : %(ip)s, " -"port : %(port)s, utilisateur : %(user)s, mot de passe : ****, URL : %(url)s, " -"ECHEC !!." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, liste iscsiip : %(iscsiip_list)s, iqn " -"introuvable." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, AssociatorNames : " -"CIM_BindsTo, impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, " -"EnumerateInstanceNames, impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip : %(iscsiip)s, GetInstance, " -"impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic : les en-têtes et valeurs des attributs ne correspondent pas.\n" -" En-têtes : %(header)s\n" -" Valeurs : %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"Echec de _get_host_from_connector lors du renvoi du nom d'hôte pour le " -"connecteur." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, échec d'obtention de host-affinity depuis aglist/" -"vol_instance, groupe d'affinité : %(ag)s, ReferenceNames, impossible de se " -"connecter à ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, échec d'obtention de l'instance host-affinity, volmap : " -"%(volmap)s, GetInstance, impossible de se connecter à ETERNUS." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associateurs : FUJITSU_SAPAvailableForElement, " -"impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, groupe d'affinité : %(ag)s, ReferenceNames, impossible " -"de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance : %(vol_instance)s, ReferenceNames : " -"CIM_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap : %(volmap)s, GetInstance, impossible de se " -"connecter à ETERNUS." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances, impossible de se connecter à ETERNUS." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "_get_target_port, protcole : %(protocol)s, target_port introuvable." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay : instantané nommé %s introuvable" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay : ID volume %s introuvable" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay : vous devez spécifier source-name." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties : impossible d'obtenir les informations de " -"connexion FC pour la connexion hôte-volume. L'hôte est-il configuré " -"correctement pour les connexions FC ?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties : noeud introuvable dans le groupe d'E-S %(gid)s " -"pour le volume %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, chemin vol_instance.path :%(vol)s, nom du volume : %(volumename)s, " -"volume_uid: %(uid)s, initiateur : %(initiator)s, cible : %(tgt)s, aglist : " -"%(aglist)s, Service de configuration de stockage introuvable." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, chemin vol_instance.path : %(volume)s, nom du volume : " -"%(volumename)s, volume_uid : %(uid)s, aglist : %(aglist)s, Service de " -"configuration de contrôleur introuvable." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, nom du volume : %(volumename)s, volume_uid : %(volume_uid)s, " -"groupe d'affinité : %(ag)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun, chemin vol_instance.path : %(volume)s, AssociatorNames : " -"CIM_ProtocolControllerForUnit, impossible de se connecter à ETERNUS." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "" -"_update_volume_stats : impossible d'obtenir les données du pool de stockage." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession : %(cpsession)s, l'état de copysession " -"est BROKEN." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"Echec de add_vdisk_copy : une copie du volume %s existe. L'ajout d'une autre " -"copie dépasserait la limite de 2 copies." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"add_vdisk_copy a démarré sans copie de disque virtuel dans le pool attendu." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants doit être une valeur booléenne, '%s' a été renvoyé." - -msgid "already created" -msgstr "déjà créé" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "attacher l'instantané du noeud distant" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "l'attribut %s n'est pas de type lazy-loadable" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"sauvegarde : %(vol_id)s - Echec de création du lien physique de périphérique " -"entre %(vpath)s et %(bpath)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"sauvegarde : %(vol_id)s n'est pas parvenu à extraire la notification de " -"réussite de sauvegarde auprès du serveur.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"sauvegarde : échec de %(vol_id)s pour l'exécution de dsmc en raison " -"d'arguments non valides sur %(bpath)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"sauvegarde : échec de %(vol_id)s pour l'exécution de dsmc sur %(bpath)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "sauvegarde : échec de %(vol_id)s. %(path)s n'est pas un fichier." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"sauvegarde : échec de %(vol_id)s. %(path)s est un type de fichier inattendu. " -"Fichiers par blocs ou standard pris en charge, mode de fichier réel : " -"%(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"sauvegarde : échec de %(vol_id)s. Impossible de récupérer le chemin réel au " -"volume %(path)s." - -msgid "being attached by different mode" -msgstr "connecté par un mode différent" - -#, python-format -msgid "call failed: %r" -msgstr "échec de l'appel : %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "échec de l'appel : GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "échec de l'appel : PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "échec de l'appel : PROG_MISMATCH : %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "échec de l'appel : PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "lun-map, ig:%(ig)s vol introuvable :%(vol)s" - -msgid "can't find the volume to extend" -msgstr "volume à étendre introuvable" - -msgid "can't handle both name and index in req" -msgstr "impossible de gérer à la fois le nom et l'index dans la demande" - -msgid "cannot understand JSON" -msgstr "impossible de comprendre JSON" - -#, python-format -msgid "cg-%s" -msgstr "groupe de cohérence %s" - -msgid "cgsnapshot assigned" -msgstr "instantané cgsnapshot affecté" - -msgid "cgsnapshot changed" -msgstr "instantané cgsnapshot modifié" - -msgid "cgsnapshots assigned" -msgstr "instantanés cgsnapshots affectés" - -msgid "cgsnapshots changed" -msgstr "instantanés cgsnapshots modifiés" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error : mot de passe ou clé privée SSH obligatoire pour " -"l'authentification : définissez l'option san_password ou san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "" -"check_for_setup_error : impossible de déterminer l'identificateur du système." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error : impossible de déterminer le nom du système." - -msgid "check_hypermetro_exist error." -msgstr "Erreur check_hypermetro_exist." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "profondeur de clone dépassant la limite de %s" - -msgid "consistencygroup assigned" -msgstr "groupe de cohérence (consistencygroup) affecté" - -msgid "consistencygroup changed" -msgstr "groupe de cohérence (consistencygroup) modifié" - -msgid "control_location must be defined" -msgstr "control_location doit être défini" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, le volume source n'existe pas dans ETERNUS." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, nom d'instance du volume cible : " -"%(volume_instancename)s, La récupération de l'instance a échoué." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "" -"create_cloned_volume : La taille de la source et de la destination diffère." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume : la taille du volume source %(src_vol)s est " -"%(src_size)d Go et ne peut pas tenir dans le volume cible %(tgt_vol)s d'une " -"taille de %(tgt_size)d Go." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src doit correspondre à une création à partir " -"d'un instantané CG ou d'une source CG." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src prend en charge une source cgsnapshot ou " -"une source de groupe de cohérence uniquement. Vous ne pouvez pas utiliser " -"plusieurs sources." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "" -"create_copy : le disque virtuel source %(src)s (%(src_id)s) n'existe pas." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "" -"create_copy : le(s) disque(vs) virtuel(s) source %(src)s n'existe(nt) pas." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host : le nom d'hôte n'est pas du type Unicode ou chaîne." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host : Aucun initiateur ni wwpns fourni." - -msgid "create_hypermetro_pair error." -msgstr "Erreur create_hypermetro_pair." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "create_snapshot, eternus_pool : %(eternus_pool)s, pool introuvable." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, nom de l'instantané : %(snapshotname)s, nom du volume " -"source : %(volumename)s, chemin vol_instance.path : %(vol_instance)s, nom du " -"volume de destination : %(d_volumename)s, pool : %(pool)s, Code retour : " -"%(rc)lu, Erreur : %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, nom du volume : %(s_volumename)s, volume source introuvable " -"sur ETERNUS." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, nom du volume : %(volumename)s, Service de réplication " -"introuvable." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot : le statut du volume doit être \"available\" ou \"in-use\" " -"pour l'instantané. Le statut non valide est %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot : échec de récupération du volume source." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, volume : %(volume)s, EnumerateInstances, impossible de se " -"connecter à ETERNUS." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, volume : %(volume)s, nom du volume : %(volumename)s, " -"eternus_pool : %(eternus_pool)s, Service de configuration de stockage " -"introuvable." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, nom du volume : %(volumename)s, nom du pool : " -"%(eternus_pool)s, Code retour : %(rc)lu, Erreur : %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "" -"create_volume_from_snapshot, le volume source n'existe pas dans ETERNUS." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, nom d'instance du volume cible : " -"%(volume_instancename)s, La récupération de l'instance a échoué." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot : l'instantané %(name)s n'existe pas." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot : le statut de l'instantané doit être \"available" -"\" pour créer le volume. Le statut non valide est : %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot : la taille du volume est différente de celle du " -"volume basé sur l'instantané." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"suppression : échec de %(vol_id)s pour l'exécution de dsmc en raison " -"d'arguments non valides avec stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"suppression : échec de %(vol_id)s pour l'exécution de dsmc avec stdout : " -"%(out)s\n" -" stderr : %(err)s" - -msgid "delete_hypermetro error." -msgstr "Erreur delete_hypermetro." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator : ACL %s introuvable. Poursuite de l'opération." - -msgid "delete_replication error." -msgstr "Erreur lors de la suppression de réplication (delete_replication)." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"Suppression de l'instantané %(snapshot_name)s ayant des volumes dépendants" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "Suppression du volume %(volume_name)s ayant un instantané" - -msgid "detach snapshot from remote node" -msgstr "détachez l'instantané du noeud distant" - -msgid "do_setup: No configured nodes." -msgstr "do_setup : Aucun noeud configuré." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"erreur lors de l'écriture de l'objet à Swift, MD5 de l'objet dans swift " -"%(etag)s est différent de MD5 de l'objet envoyé à swift %(md5)s" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume, eternus_pool : %(eternus_pool)s, pool introuvable." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, volume : %(volume)s, nom du volume : %(volumename)s, " -"eternus_pool : %(eternus_pool)s, service de configuration de stockage " -"introuvable." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, nom du volume : %(volumename)s, Code retour : %(rc)lu, " -"Erreur : %(errordesc)s, Type de pool : %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume, nom du volume : %(volumename)s, volume introuvable." - -msgid "failed to create new_volume on destination host" -msgstr "échec de la création de new_volume sur l'hôte de destination" - -msgid "fake" -msgstr "factice" - -#, python-format -msgid "file already exists at %s" -msgstr "fichier existe déjà à %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno n'est pas pris en charge par SheepdogIOWrapper" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() non pris en charge par RBD()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "Le système de fichiers %s n'existe pas dans Nexenta Store Appliance" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled est défini sur False. Le mappage multihôte " -"est interdit. CMMVC6071E Le mappage entre un disque virtuel et un hôte n'a " -"pas été créé car le disque virtuel est déjà mappé avec un hôte." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() non pris en charge dans cette version de librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s sauvegardé par : %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt = %(fmt)s sauvegardé par : %(backing_file)s" - -msgid "force delete" -msgstr "Forcer la suppression" - -msgid "get_hyper_domain_id error." -msgstr "Erreur get_hyper_domain_id." - -msgid "get_hypermetro_by_id error." -msgstr "Erreur get_hypermetro_by_id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params : échec d'obtention de l'IP cible pour l'initiateur " -"%(ini)s. Vérifiez le fichier de configuration." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool : échec d'obtention des attributs pour le volume %s" - -msgid "glance_metadata changed" -msgstr "métadonnées glance_metadata modifiées" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode est défini sur copy_on_write mais %(vol)s et %(img)s " -"appartiennent à des systèmes de fichiers différents." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode est défini sur copy_on_write mais %(vol)s et %(img)s " -"appartiennent à des ensembles de fichiers différents." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s et hgst_user %(usr)s doivent être mappés à des " -"utilisateurs/groupes valides dans cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"L'élément hgst_net %(net)s spécifié dans cinder.conf est introuvable dans le " -"cluster" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy doit être défini à 0 (non HA) ou 1 (HA) dans cinder.conf." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode doit être de type octal/int dans cinder.conf" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "Le serveur hgst_storage %(svr)s ne respecte pas le format :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers doit être défini dans cinder.conf" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Le service HTTP a pu être subitement désactivé ou mis à l'état de " -"maintenance au milieu de cette opération." - -msgid "id cannot be None" -msgstr "l'ID ne peut pas être None" - -#, python-format -msgid "image %s not found" -msgstr "image %s non trouvée" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection, volume : %(volume)s, Volume introuvable." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "" -"initialize_connection : échec d'obtention des attributs pour le volume %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection : attribut de volume manquant pour le volume %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection : aucun noeud n'a été détecté dans le groupe d'E-S " -"%(gid)s pour le volume %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection : le disque virtuel %s n'est pas défini." - -#, python-format -msgid "invalid user '%s'" -msgstr "utilisateur non valide '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "portal iSCSI %s introuvable" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"iscsi_ip_address doit être définie dans le fichier de configuration si vous " -"utilisez le protocole 'iSCSI'." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "Erreur du gestionnaire de clés : %(reason)s" - -msgid "limit param must be an integer" -msgstr "le paramètre limit doit être un entier" - -msgid "limit param must be positive" -msgstr "le paramètre limit doit être positif" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing requiert une clé 'name' pour identifier un volume existant." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot : Erreur lors de la gestion de la relecture " -"existante %(ss)s sur le volume %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "le marqueur [%s] est introuvable" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "guillemets manquants pour mdiskgrp %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy doit être 'on-demand' ou 'never', transmis : %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs a échoué sur le volume %(vol)s, message d'erreur : %(err)s." - -msgid "mock" -msgstr "mock" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs n'est pas installé" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "plusieurs ressources avec le nom %s ont été détectées par drbdmanage" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "plusieurs ressources avec l'ID d'instantané %s ont été détectées" - -msgid "name cannot be None" -msgstr "le nom ne peut pas être None" - -#, python-format -msgid "no REPLY but %r" -msgstr "aucune réponse (REPLY) mais %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "aucun instantané avec l'ID %s n'a été détecté dans drbdmanage" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "n'est pas exactement un snapshot avec l'ID %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "n'est pas exactement un volume avec l'ID %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "guillemets manquants pour obj %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled n'est pas désactivé." - -msgid "progress must be an integer percentage" -msgstr "la progression doit être un pourcentage d'entier" - -msgid "provider must be defined" -msgstr "fournisseur doit être défini" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img %(minimum_version)s ou ultérieur est requis par ce pilote de " -"périphérique. Version qemu-img actuelle : %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img non installé et image de type %s. Seules les images RAW peuvent " -"être utilisées si qemu-img n'est pas installé." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img n'est pas installé et le format de disque n'est pas spécifié. " -"Seules les images RAW peuvent être utilisées si qemu-img n'est pas installé." - -msgid "rados and rbd python libraries not found" -msgstr "bibliothèques rados et rbd python introuvables" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "" -"read_deleted peut uniquement correspondre à 'no', 'yes' ou 'only', et non %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "Echec de replication_failover. %s introuvable." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"Echec de replication_failover. Le back-end n'est pas configuré pour le " -"basculement" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restauration : échec de %(vol_id)s pour l'exécution de dsmc en raison " -"d'arguments non valides sur %(bpath)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restauration : échec de %(vol_id)s pour l'exécution de dsmc sur %(bpath)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"restauration : échec de %(vol_id)s.\n" -"stdout : %(out)s\n" -" stderr : %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup annulé, la liste d'objets actuelle ne correspond pas à la " -"liste d'objets stockée dans les métadonnées." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"Le membre %s est manquant dans rtslib_fb : vous avez peut-être besoin d'un " -"python-rtslib-fb plus récent." - -msgid "san_ip is not set." -msgstr "san_ip n'a pas été défini." - -msgid "san_ip must be set" -msgstr "san_ip doit être défini" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login et/ou san_password n'est pas défini pour le pilote Datera dans " -"cinder.conf. Définissez cette information et relancez le service cinder-" -"volume." - -msgid "serve() can only be called once" -msgstr "serve() ne peut être appelé qu'une seule fois" - -#, python-format -msgid "snapshot-%s" -msgstr "instantané %s" - -msgid "snapshots assigned" -msgstr "instantanés affectés" - -msgid "snapshots changed" -msgstr "instantanés modifiés" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "identificateur de volume source : %s non répliqué" - -msgid "source-name cannot be empty." -msgstr "source-name ne peut pas être vide." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "" -"Le format de source-name doit être : 'chemin_vmdk@chemin_inventaire_vm'." - -#, python-format -msgid "status must be %s and" -msgstr "Le statut doit être %s et" - -msgid "status must be available" -msgstr "l'état doit être disponible" - -msgid "stop_hypermetro error." -msgstr "Erreur stop_hypermetro." - -msgid "sync_hypermetro error." -msgstr "Erreur sync_hypermetro." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli n'est pas installé et n'a pas pu créer de répertoire par défaut " -"(%(default_path)s) : %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "" -"terminate_connection : échec d'obtention du nom d'hôte à partir du " -"connecteur." - -msgid "timeout creating new_volume on destination host" -msgstr "délai d'attente de création de new_volume sur l'hôte de destination" - -msgid "too many body keys" -msgstr "trop de clés de corps" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: non monté" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s : cible occupée" - -msgid "umount: : some other error" -msgstr "umount: : autre erreur" - -msgid "umount: : target is busy" -msgstr "umount: : cible occupée" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot : instantané nommé %s introuvable" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot : ID volume %s introuvable" - -#, python-format -msgid "unrecognized argument %s" -msgstr "argument non reconnu %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "algorithme de compression non pris en charge : %s" - -msgid "valid iqn needed for show_target" -msgstr "iqn valide requis pour show_target" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "Le disque virtuel %s n'a pas été défini." - -msgid "vmemclient python library not found" -msgstr "La bibliothèque Python vmemclient est introuvable" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "Le volume %s est introuvable dans drbdmanage" - -msgid "volume assigned" -msgstr "volume affecté" - -msgid "volume changed" -msgstr "volume modifié" - -msgid "volume is already attached" -msgstr "le volume est déjà connecté" - -msgid "volume is not local to this node" -msgstr "le volume n'est pas local sur ce noeud" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"La taille du volume %(volume_size)d est insuffisante pour restaurer la " -"sauvegarde d'une taille de %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "taille du volume %d non valide." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"volume_type doit être fourni lors de la création d'un volume dans un groupe " -"de cohérence." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id ne peut pas avoir pour valeur None" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"volume_types doit être indiqué pour créer le groupe de cohérence %(name)s." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "volume_types doit être indiqué pour créer le groupe de cohérence %s." - -msgid "volumes assigned" -msgstr "volumes affectés" - -msgid "volumes changed" -msgstr "volumes modifiés" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition : %s a dépassé le délai d'attente." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"La propriété zfssa_manage_policy doit être définie avec la valeur 'strict' " -"ou 'loose'. Valeur actuelle : %s." diff --git a/cinder/locale/it/LC_MESSAGES/cinder-log-error.po b/cinder/locale/it/LC_MESSAGES/cinder-log-error.po deleted file mode 100644 index d6caa309e..000000000 --- a/cinder/locale/it/LC_MESSAGES/cinder-log-error.po +++ /dev/null @@ -1,3445 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Alessandra , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -# Remo Mattei , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:19+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:39+0000\n" -"Last-Translator: Remo Mattei \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Italian\n" - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Failed to remove from new volume set %(new_vvs)s." -msgstr "" -"%(exception)s: Eccezione durante il ripristino di retype per il volume " -"%(volume_name)s. Impossibile rimuovere dal nuovo insieme di volumi " -"%(new_vvs)s." - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Original volume set/QOS settings may not have been fully restored." -msgstr "" -"%(exception)s: Eccezione durante il ripristino di retype per il volume " -"%(volume_name)s. Le impostazioni configurate/QOS del volume originale " -"potrebbero non essere state ripristinate correttamente." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" -msgstr "" -"%(fun)s: non riuscito con un output CLI imprevisto.\n" -" Comando: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" - -#, python-format -msgid "" -"%(method)s %(url)s unexpected response status: %(response)s (expects: " -"%(expects)s)." -msgstr "" -"%(method)s %(url)s, stato risposta imprevisto: %(response)s (previsto: " -"%(expects)s)." - -#, python-format -msgid "%(name)s: %(value)s" -msgstr "%(name)s: %(value)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" -msgstr "" -"'%(value)s' non è un valore valido per la specifica supplementare '%(key)s'" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Una destinazione secondaria valida DEVE essere specificata per poter " -"eseguire il failover." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting create_snapshot operation!" -msgstr "" -"L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " -"tentativo di eseguire l'operazione create_snapshot." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting delete_volume operation!" -msgstr "" -"L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " -"tentativo di eseguire l'operazione delete_volume." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting unmanage operation!" -msgstr "" -"L'account per l'ID volume %s non è stato trovato nel cluster SolidFire nel " -"tentativo di eseguire l'operazione di annullamento gestione." - -#, python-format -msgid "Array Serial Number must be in the file %(fileName)s." -msgstr "Il numero di serie dell'array deve essere nel file %(fileName)s." - -#, python-format -msgid "Array query failed - No response (%d)!" -msgstr "Query dell'array non riuscita - Nessuna risposta (%d)." - -msgid "Array query failed. No capabilities in response!" -msgstr "Query dell'array non riuscita - Nessuna funzionalità nella risposta." - -msgid "Array query failed. No controllers in response!" -msgstr "Query dell'array non riuscita. Nessun controller nella risposta." - -msgid "Array query failed. No global id in XML response!" -msgstr "Query dell'array non riuscita. Nessun ID globale nella risposta XML." - -msgid "Attaching snapshot from a remote node is not supported." -msgstr "Collegamento istantanea da un nodo remoto non supportato." - -#, python-format -msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." -msgstr "" -"Autorizzazione della richiesta: %(zfssaurl)s nuovo tentativo: %(retry)d ." - -msgid "Backend returned err for lun export." -msgstr "Il backend ha restituito un errore per l'esportazione della lun." - -#, python-format -msgid "Backup id %s is not invalid. Skipping reset." -msgstr "ID backup %s non valido. Reimpostazione ignorata." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Il servizio di backup %(configured_service)s non supporta la verifica. L'ID " -"backup %(id)s non è verificato. Ignorare la verifica." - -#, python-format -msgid "Backup volume metadata failed: %s." -msgstr "Impossibile eseguire il backup dei metadati del volume : %s." - -#, python-format -msgid "Bad response from server: %(url)s. Error: %(err)s" -msgstr "Risposta errata dal server: %(url)s. Errore: %(err)s" - -#, python-format -msgid "" -"CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " -"source." -msgstr "" -"Istantanea CG %(cgsnap)s non trovata durante la creazione del gruppo di " -"coerenza %(cg)s dall'origine." - -#, python-format -msgid "" -"CLI fail: '%(cmd)s' = %(code)s\n" -"out: %(stdout)s\n" -"err: %(stderr)s" -msgstr "" -"Errore CLI: '%(cmd)s' = %(code)s\n" -"out: %(stdout)s\n" -"err: %(stderr)s" - -msgid "Call to Nova delete snapshot failed" -msgstr "Chiamata a Nova per eliminare l'istantanea non riuscita" - -msgid "Call to Nova to create snapshot failed" -msgstr "Chiamata a Nova per creare l'istantanea non riuscita" - -#, python-format -msgid "Call to json.loads() raised an exception: %s." -msgstr "La chiamata a json.loads() ha generato un'eccezione: %s." - -#, python-format -msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." -msgstr "" -"Impossibile eseguire il rilevamento in %(target_ip)s con %(target_iqn)s." - -msgid "Can not open the recent url, login again." -msgstr "Impossibile aprire l'url recente, rieseguire il login." - -#, python-format -msgid "Can't find volume to map %(key)s, %(msg)s" -msgstr "Impossibile trovare il volume da associare %(key)s, %(msg)s" - -msgid "Can't open the recent url, relogin." -msgstr "Impossibile aprire l'url recente, rieseguire il login." - -#, python-format -msgid "" -"Cannot add and verify tier policy association for storage group : " -"%(storageGroupName)s to FAST policy : %(fastPolicyName)s." -msgstr "" -"Impossibile aggiungere e verificare l'associazione della politica di " -"livellamento per il gruppo di archiviazione: %(storageGroupName)s alla " -"politica FAST: %(fastPolicyName)s." - -#, python-format -msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." -msgstr "" -"Impossibile clonare l'immagine %(image)s sul volume %(volume)s. Errore: " -"%(error)s." - -#, python-format -msgid "Cannot create or find an initiator group with name %(igGroupName)s." -msgstr "" -"Impossibile creare o trovare un gruppo di iniziatori di nome %(igGroupName)s." - -#, python-format -msgid "Cannot delete file %s." -msgstr "Impossibile eliminare il file %s." - -msgid "Cannot detect replica status." -msgstr "Impossibile rilevare lo stato della replica." - -msgid "Cannot determine if Tiering Policies are supported." -msgstr "Impossibile stabilire se le politiche di livellamento sono supportate." - -msgid "Cannot determine whether Tiering Policy is supported on this array." -msgstr "" -"Impossibile stabilire se la politica di livellamento è supportata su questo " -"array." - -#, python-format -msgid "Cannot find Consistency Group %s" -msgstr "Impossibile trovare il gruppo di coerenza %s" - -#, python-format -msgid "" -"Cannot find a portGroup with name %(pgGroupName)s. The port group for a " -"masking view must be pre-defined." -msgstr "" -"Impossibile trovare un gruppo di porte di nome %(pgGroupName)s. Il gruppo di " -"porte per una vista di mascheramento deve essere predefinito." - -#, python-format -msgid "Cannot find the fast policy %(fastPolicyName)s." -msgstr "Impossibile trovare la politica FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Cannot find the new masking view just created with name %(maskingViewName)s." -msgstr "" -"Impossibile trovare la nuova vista di mascheramento appena creata di nome " -"%(maskingViewName)s." - -#, python-format -msgid "Cannot get QoS spec for volume %s." -msgstr "Impossibile ottenere la specifica QoS spec per il volume %s." - -#, python-format -msgid "Cannot get port group from masking view: %(maskingViewName)s. " -msgstr "" -"Impossibile ottenere il gruppo di porte dalla vista di mascheramento: " -"%(maskingViewName)s. " - -msgid "Cannot get port group name." -msgstr "Impossibile ottenere il nome del gruppo di porte." - -#, python-format -msgid "Cannot get storage Group from job : %(storageGroupName)s." -msgstr "" -"Impossibile ottenere il gruppo di archiviazione dal lavoro: " -"%(storageGroupName)s." - -msgid "Cannot get storage system." -msgstr "Impossibile ottenere il sistema di archivio." - -#, python-format -msgid "Caught error: %(type)s %(error)s" -msgstr "Intercettato errore: %(type)s %(error)s" - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" -msgstr "" -"Modifica del nome volume da %(tmp)s a %(orig)s non riuscita a causa di " -"%(reason)s" - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." -msgstr "" -"Modifica del nome volume da %(tmp)s a %(orig)s non riuscita a causa di " -"%(reason)s" - -#, python-format -msgid "Clone %s not in prepared state!" -msgstr "Clone %s non in stato preparato." - -#, python-format -msgid "" -"Clone volume \"%s\" already exists. Please check the results of \"dog vdi " -"list\"." -msgstr "" -"Il volume clone \"%s\" esiste già. Controllare i risultati di \"dog vdi list" -"\"." - -#, python-format -msgid "Cloning of volume %s failed." -msgstr "Clonazione del volume %s non riuscita." - -#, python-format -msgid "" -"CloudByte does not have a volume corresponding to OpenStack volume [%s]." -msgstr "CloudByte non ha un volume corrispondente al volume OpenStack [%s]." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " -"all [%(max)s] attempts." -msgstr "" -"Operazione CloudByte [%(operation)s] non riuscita per il volume [%(vol)s]. " -"Esauriti tutti i [%(max)s] tentativi." - -#, python-format -msgid "" -"CloudByte snapshot information is not available for OpenStack volume [%s]." -msgstr "" -"Informazioni sull'istantanea CloudByte non disponibili per il volume " -"OpenStack [%s]." - -#, python-format -msgid "CloudByte volume information not available for OpenStack volume [%s]." -msgstr "" -"Informazioni sul volume CloudByte non disponibili per il volume OpenStack " -"[%s]." - -#, python-format -msgid "Cmd :%s" -msgstr "Cmd :%s" - -#, python-format -msgid "Commit clone failed: %(name)s (%(status)d)!" -msgstr "Commit del clone non riuscito: %(name)s (%(status)d)." - -#, python-format -msgid "Commit failed for %s!" -msgstr "Commit non riuscito per %s." - -#, python-format -msgid "Compute cluster: %s not found." -msgstr "Cluster di calcolo: %s non trovato." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Valore di configurazione %s non impostato." - -#, python-format -msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" -msgstr "" -"Conflitto rilevato nell'insieme di volumi virtuali %(volume_set)s: %(error)s" - -#, python-format -msgid "Connect to Flexvisor error: %s." -msgstr "Errore di connessione a Flexvisor: %s." - -#, python-format -msgid "Connect to Flexvisor failed: %s." -msgstr "Connessione a Flexvisor non riuscita: %s." - -msgid "Connection error while sending a heartbeat to coordination backend." -msgstr "" -"Errore di connessione durante l'invio di un heartbeat al backend di " -"coordinazione." - -#, python-format -msgid "Connection to %s failed and no secondary!" -msgstr "Connessione a %s non riuscita e nessun elemento secondario." - -#, python-format -msgid "Controller GET failed (%d)" -msgstr "Comando GET del controller non riuscito (%d)" - -#, python-format -msgid "Copy offload workflow unsuccessful. %s" -msgstr "Copia del carico di lavoro offload non eseguita correttamente. %s" - -#, python-format -msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" -msgstr "" -"La copia dell'istantanea sul volume per l'istantanea %(snap)s volume %(vol)s " -"non è riuscita." - -#, python-format -msgid "Could not GET allocation information (%d)!" -msgstr "" -"Impossibile eseguire il comando GET per le informazioni sull'allocazione " -"(%d)." - -#, python-format -msgid "Could not calculate node utilization for node %s." -msgstr "Non è stato possibile calcolare l'utilizzo del nodo per il nodo %s" - -#, python-format -msgid "Could not connect to %(primary)s or %(secondary)s!" -msgstr "Impossibile connettersi a %(primary)s o %(secondary)s." - -#, python-format -msgid "Could not create snapshot set. Error: '%s'" -msgstr "Impossibile creare la serie di istantanee. Errore: '%s'" - -msgid "Could not decode scheduler options." -msgstr "Impossibile decodificare le opzioni dello scheduler." - -#, python-format -msgid "Could not delete failed image volume %(id)s." -msgstr "Impossibile eliminare il volume dell'immagine non riuscita %(id)s." - -#, python-format -msgid "Could not delete the image volume %(id)s." -msgstr "Impossibile eliminare il volume dell'immagine %(id)s." - -#, python-format -msgid "Could not find a host for consistency group %(group_id)s." -msgstr "Impossibile trovare un host per il gruppo di coerenza %(group_id)s." - -#, python-format -msgid "Could not find any hosts (%s)" -msgstr "Impossibile trovare gli host (%s)" - -#, python-format -msgid "" -"Could not find port group : %(portGroupName)s. Check that the EMC " -"configuration file has the correct port group name." -msgstr "" -"Impossibile trovare il gruppo di porte: %(portGroupName)s. Verificare che il " -"file di configurazione EMC presenti il nome del gruppo di porte corretto." - -#, python-format -msgid "Could not find volume with name %(name)s. Error: %(error)s" -msgstr "Impossibile trovare il volume di nome %(name)s. Errore: %(error)s" - -msgid "" -"Could not get performance base counter name. Performance-based scheduler " -"functions may not be available." -msgstr "" -"Non è stato possibile ottenere il nome del contatore di base delle " -"prestazioni. Le funzioni dello scheduler basato sulle prestazioni potrebbero " -"non essere disponibili." - -#, python-format -msgid "Could not get utilization counters from node %s" -msgstr "Non è stato possibile ottenere i contatori di utilizzo dal nodo %s" - -#, python-format -msgid "Could not log in to 3PAR array (%s) with the provided credentials." -msgstr "" -"Non è stato possibile accedere all'array 3PAR (%s) con le credenziali " -"fornite." - -#, python-format -msgid "Could not log in to LeftHand array (%s) with the provided credentials." -msgstr "" -"Non è stato possibile accedere all'array LeftHand (%s) con le credenziali " -"fornite." - -#, python-format -msgid "Could not stat scheduler options file %(filename)s." -msgstr "" -"Impossibile avviare il file delle opzioni dello scheduler %(filename)s." - -#, python-format -msgid "Could not validate device %s" -msgstr "Impossibile convalidare il dispositivo %s" - -#, python-format -msgid "" -"Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " -"(Exception: %(except)s)" -msgstr "" -"Creazione di clone_image_volume: %(volume_id)s per l'immagine %(image_id)s, " -"non riuscita (Eccezione: %(except)s)" - -#, python-format -msgid "" -"Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." -msgstr "" -"Creazione del gruppo di coerenza dall'istantanea -%(snap)s non riuscita: " -"SnapshotNotFound." - -#, python-format -msgid "Create consistency group from source %(source)s failed." -msgstr "Creazione del gruppo di coerenza dall'origine %(source)s non riuscita." - -#, python-format -msgid "" -"Create consistency group from source cg-%(cg)s failed: " -"ConsistencyGroupNotFound." -msgstr "" -"Creazione del gruppo di coerenza dall'origine cg-%(cg)s non riuscita: " -"ConsistencyGroupNotFound." - -#, python-format -msgid "Create hypermetro error: %s." -msgstr "Errore di creazione hypermetro: %s." - -#, python-format -msgid "" -"Create new lun from lun for source %(src)s => destination %(dest)s failed!" -msgstr "" -"La creazione della nuova lun dalla lun per l'origine %(src)s => destinazione " -"%(dest)s non è riuscita." - -#, python-format -msgid "Create pair failed. Error: %s." -msgstr "Creazione coppia non riuscita. Errore: %s." - -msgid "Create replication volume error." -msgstr "Errore di creazione del volume di replica." - -#, python-format -msgid "Create snapshot notification failed: %s" -msgstr "Notifica di creazione istantanea non riuscita: %s" - -#, python-format -msgid "Create volume failed from snapshot: %s" -msgstr "Creazione del volume non riuscita dall'istantanea: %s" - -#, python-format -msgid "Create volume notification failed: %s" -msgstr "Notifica di creazione volume non riuscita: %s" - -#, python-format -msgid "Creation of snapshot failed for volume: %s" -msgstr "Creazione dell'istantanea non riuscita per il volume: %s" - -#, python-format -msgid "Creation of volume %s failed." -msgstr "Creazione del volume %s non riuscita." - -msgid "" -"Creation request failed. Please verify the extra-specs set for your volume " -"types are entered correctly." -msgstr "" -"Richiesta di creazione non riuscita. Verificare che le specifiche " -"supplementari impostate per i tipi di volume siano immesse correttamente." - -msgid "DB error:" -msgstr "Errore DB:" - -msgid "DBError encountered: " -msgstr "Rilevato DBError: " - -msgid "DRBDmanage: too many assignments returned." -msgstr "DRBDmanage: restituite troppe assegnazioni." - -msgid "Default Storage Profile was not found." -msgstr "Profilo di memoria predefinito non trovato." - -msgid "" -"Default volume type is not found. Please check default_volume_type config:" -msgstr "" -"Impossibile trovare il tipo di volume predefinito. Controllare la " -"configurazione default_volume_type:" - -msgid "Delete consistency group failed to update usages." -msgstr "" -"L'eliminazione del gruppo di coerenza non è riuscita ad aggiornare gli " -"utilizzi." - -#, python-format -msgid "Delete hypermetro error: %s." -msgstr "Errore di eliminazione hypermetro: %s." - -msgid "Delete replication error." -msgstr "Errore di eliminazione replica." - -msgid "Delete snapshot failed, due to snapshot busy." -msgstr "Eliminazione istantanea non riuscita, per istantanea occupata." - -#, python-format -msgid "Delete snapshot notification failed: %s" -msgstr "Notifica di eliminazione istantanea non riuscita: %s" - -#, python-format -msgid "Delete volume notification failed: %s" -msgstr "Notifica di eliminazione volume non riuscita: %s" - -#, python-format -msgid "Deleting snapshot %s failed" -msgstr "Eliminazione dell'istantanea %s non riuscita" - -#, python-format -msgid "Deleting zone failed %s" -msgstr "Eliminazione della zona non riuscita %s" - -#, python-format -msgid "Deletion of volume %s failed." -msgstr "Eliminazione del volume %s non riuscita." - -#, python-format -msgid "Destination Volume Group %s does not exist" -msgstr "Il gruppo volumi di destinazione %s non esiste" - -#, python-format -msgid "Detach attachment %(attach_id)s failed." -msgstr "Scollegamento collegamento %(attach_id)s non riuscito." - -#, python-format -msgid "Detach migration source volume failed: %(err)s" -msgstr "" -"Scollegamento del volume di origine della migrazione non riuscito: %(err)s" - -msgid "Detach volume failed, due to remove-export failure." -msgstr "" -"Scollegamento volume non riuscito, a causa di un errore di rimozione " -"esportazione." - -msgid "Detach volume failed, due to uninitialized driver." -msgstr "" -"Scollegamento volume non riuscito, a causa del driver non inizializzato." - -msgid "Detaching snapshot from a remote node is not supported." -msgstr "Scollegamento istantanea da un nodo remoto non supportato." - -#, python-format -msgid "Did not find expected column name in lsvdisk: %s." -msgstr "Impossibile trovare il nome colonna previsto in lsvdisk: %s." - -msgid "Differential restore failed, trying full restore" -msgstr "" -"Ripristino differenziale non riuscito, viene tentato il ripristino completo" - -#, python-format -msgid "Disable replication on volume failed with message: %s" -msgstr "Disabilitazione replica sul volume non riuscita con il messaggio: %s " - -#, python-format -msgid "Disconnection failed with message: %(msg)s." -msgstr "Scollegamento non riuscito con il messaggio: %(msg)s." - -msgid "Driver reported error during replication failover." -msgstr "Il driver ha riportato un errore durante il failover della replica." - -#, python-format -msgid "" -"Driver-based migration of volume %(vol)s failed. Move from %(src)s to " -"%(dst)s failed with error: %(error)s." -msgstr "" -"Migrazione basata sul driver del volume %(vol)s non riuscita. Spostamento da " -"%(src)s a %(dst)s non riuscito con errore: %(error)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Errore di collegamento del volume %(vol)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore nella creazione del gruppo: %(groupName)s. Codice di ritorno: " -"%(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " -"Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Errore durante l'acquisizione dell'istantanea: %(snapshot)s del volume: " -"%(lun)s nel pool: %(pool)s, Progetto: %(project)s Codice di ritorno: " -"%(ret.status)d, Messaggio: %(ret.data)s." - -#, python-format -msgid "Error JSONDecodeError. %s" -msgstr "Errore JSONDecodeError. %s" - -#, python-format -msgid "" -"Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " -"%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante l'impostazione del volume: %(lun)s nel gruppo iniziatori: " -"%(initiatorgroup)s Pool: %(pool)s Progetto: %(project)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "Error TypeError. %s" -msgstr "Errore TypeError. %s" - -msgid "Error activating LV" -msgstr "Errore durante l'attivazione di LV" - -#, python-format -msgid "Error changing Storage Profile for volume %(original)s to %(name)s" -msgstr "" -"Errore durante la modifica del profilo di memoria per il volume %(original)s " -"in %(name)s." - -#, python-format -msgid "Error cleaning up failed volume creation. Msg - %s." -msgstr "" -"Errore durante la ripulitura della creazione del volume non riuscita. " -"Messaggio - %s." - -msgid "Error cloning volume" -msgstr "Errore durante la clonazione del volume" - -msgid "Error closing channel." -msgstr "Errore di chiusura del canale." - -#, python-format -msgid "" -"Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." -msgstr "" -"Errore durante il tentativo di contattare il server glance '%(netloc)s' per " -"'%(method)s', %(extra)s." - -#, python-format -msgid "Error creating QOS rule %s" -msgstr "Errore durante la creazione della regola QOS %s" - -msgid "Error creating Volume" -msgstr "Errore durante la creazione del volume" - -msgid "Error creating Volume Group" -msgstr "Errore durante la creazione del gruppo volumi" - -msgid "Error creating chap record." -msgstr "Errore durante la creazione del record chap." - -msgid "Error creating cloned volume" -msgstr "Errore durante la creazione del volume clonato" - -msgid "Error creating snapshot" -msgstr "Errore durante la creazione dell'istantanea" - -msgid "Error creating volume" -msgstr "Errore durante la creazione del volume" - -#, python-format -msgid "Error creating volume. Msg - %s." -msgstr "Errore durante la creazione del volume. Messaggio - %s." - -msgid "Error deactivating LV" -msgstr "Errore durante la disattivazione di LV" - -msgid "Error deleting snapshot" -msgstr "Errore durante l'eliminazione dell'istantanea" - -#, python-format -msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." -msgstr "" -"Errore durante lo scollegamento dell'istantanea %(snapshot)s, causato da un " -"errore di rimozione dell'esportazione." - -#, python-format -msgid "Error detaching volume %(volume)s, due to remove export failure." -msgstr "" -"Errore durante lo scollegamento del volume %(volume)s, causato da un errore " -"di rimozione dell'esportazione." - -#, python-format -msgid "Error detaching volume %s" -msgstr "Errore durante lo scollegamento del volume %s" - -#, python-format -msgid "Error disassociating storage group from policy: %s." -msgstr "" -"Errore durante la disassociazione del gruppo di archiviazione dalla " -"politica: %s" - -msgid "Error during re-export on driver init." -msgstr "" -"Errore durante la riesportazione durante l'inizializzazione del driver." - -msgid "" -"Error encountered on Cinder backend during thaw operation, service will " -"remain frozen." -msgstr "" -"Si è verificato un errore sul backend Cinder durante l'operazione di " -"sblocco, il servizio resterà bloccato." - -msgid "Error executing SSH command." -msgstr "Errore durante l'esecuzione del comando SSH." - -msgid "Error executing command via ssh." -msgstr "Errore di esecuzione del comando tramite ssh." - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Errore di esecuzione comando tramite ssh: %s" - -#, python-format -msgid "Error expanding volume %s." -msgstr "Errore durante l'espansione del volume %s." - -msgid "Error extending Volume" -msgstr "Errore durante l'estensione del volume" - -msgid "Error extending volume" -msgstr "Errore durante l'estensione del volume" - -#, python-format -msgid "Error extending volume %(id)s. Ex: %(ex)s" -msgstr "Errore durante l'estensione del volume: %(id)s. Eccezione: %(ex)s" - -#, python-format -msgid "Error extending volume: %(vol)s. Exception: %(ex)s" -msgstr "Errore durante l'estensione del volume: %(vol)s. Eccezione: %(ex)s" - -#, python-format -msgid "Error finding replicated pg snapshot on %(secondary)s." -msgstr "" -"Errore durante la ricerca dell'istantanea pg replicata su %(secondary)s." - -#, python-format -msgid "Error finding target pool instance name for pool: %(targetPoolName)s." -msgstr "" -"Errore durante la ricerca del nome dell'istanza del pool di destinazione per " -"il pool: %(targetPoolName)s." - -#, python-format -msgid "Error getting FaultDomainList for %s" -msgstr "Errore durante il richiamo di FaultDomainList per %s" - -#, python-format -msgid "Error getting LUN attribute. Exception: %s" -msgstr "Errore durante il richiamo dell'attributo LUN. Eccezione: %s" - -msgid "Error getting active FC target ports." -msgstr "Errore durante il richiamo di porte di destinazione FC attive." - -msgid "Error getting active ISCSI target iqns." -msgstr "Errore durante il richiamo di iqns di destinazione ISCSI attivi." - -msgid "Error getting active ISCSI target portals." -msgstr "Errore durante il richiamo di portali destinazione ISCSI attivi." - -msgid "Error getting array, pool, SLO and workload." -msgstr "Errore durante il richiamo di array, pool, SLO e carico di lavoro." - -msgid "Error getting chap record." -msgstr "Errore durante il richiamo del record chap." - -msgid "Error getting name server info." -msgstr "Errore durante il richiamo delle informazioni sul server dei nomi. " - -msgid "Error getting show fcns database info." -msgstr "Impossibile ottenere le informazioni sul database show fcns. " - -msgid "Error getting target pool name and array." -msgstr "" -"Errore durante il richiamo del nome del pool di destinazione e dell'array." - -#, python-format -msgid "Error has occurred: %s" -msgstr "Si è verificato un errore: %s" - -#, python-format -msgid "Error in copying volume: %s" -msgstr "Errore durante la copia del volume: %s" - -#, python-format -msgid "" -"Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " -"with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" -msgstr "" -"Errore durante l'estensione della dimensione del volume: Volume: %(volume)s " -"Dim_Vol: %(vol_size)d con Istantanea: %(snapshot)s Dim_Istant: %(snap_size)d" - -#, python-format -msgid "Error in workflow copy from cache. %s." -msgstr "Errore nella copia del carico di lavoro dalla cache. %s." - -#, python-format -msgid "Error invalid json: %s" -msgstr "Errore di json non valido: %s" - -msgid "Error manage existing get volume size." -msgstr "Errore di gestione della dimensione del volume get esistente." - -msgid "Error manage existing volume." -msgstr "Errore di gestione del volume esistente." - -#, python-format -msgid "Error managing replay %s" -msgstr "Errore durante la gestione della risposta %s. " - -msgid "Error mapping VDisk-to-host" -msgstr "Errore durante l'associazione di VDisk a host" - -#, python-format -msgid "Error mapping volume: %s" -msgstr "Errore durante l'associazione del volume: %s. " - -#, python-format -msgid "" -"Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." -msgstr "" -"Errore durante la migrazione del volume: %(volumename)s.nel pool di " -"destinazione %(targetPoolName)s." - -#, python-format -msgid "Error migrating volume: %s" -msgstr "Errore durante la migrazione del volume: %s" - -#, python-format -msgid "" -"Error occurred in the volume driver when updating consistency group " -"%(group_id)s." -msgstr "" -"Si è verificato un errore nel driver del volume durante l'aggiornamento del " -"gruppo di coerenza %(group_id)s." - -msgid "" -"Error occurred when adding hostgroup and lungroup to view. Remove lun from " -"lungroup now." -msgstr "" -"Si è verificato un errore durante l'aggiunta del gruppo di host e del gruppo " -"di lun alla vista. Rimuovere la lun dal gruppo di lun ora." - -#, python-format -msgid "" -"Error occurred when building request spec list for consistency group %s." -msgstr "" -"Si è verificato un errore durante la creazione dell'elenco di specifiche " -"delle richieste per il gruppo di coerenza %s." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Si è verificato un errore durante la creazione di cgsnapshot %s." - -#, python-format -msgid "" -"Error occurred when creating cloned volume in the process of creating " -"consistency group %(group)s from source CG %(source_cg)s." -msgstr "" -"Si è verificato un errore durante la creazione del volume clonato nel " -"processo di creazione del gruppo di coerenza %(group)s dalla CG di origine " -"%(source_cg)s." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(cg)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"Si è verificato un errore durante la creazione del gruppo di coerenza %(cg)s " -"dall'istantanea %(cgsnap)s." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"Si è verificato un errore durante la creazione del gruppo di coerenza " -"%(group)s dall'istantanea %(cgsnap)s." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from source CG " -"%(source_cg)s." -msgstr "" -"Si è verificato un errore durante la creazione del gruppo di coerenza " -"%(group)s dalla CG di origine %(source_cg)s." - -#, python-format -msgid "Error occurred when creating consistency group %s." -msgstr "" -"Si è verificato un errore durante la creazione del gruppo di coerenza %s." - -#, python-format -msgid "" -"Error occurred when creating volume entry from snapshot in the process of " -"creating consistency group %(group)s from cgsnapshot %(cgsnap)s." -msgstr "" -"Si è verificato un errore durante la creazione della voce volume " -"dall'istantanea nel processo di creazione del gruppo di coerenza %(group)s " -"dall'istantanea %(cgsnap)s." - -#, python-format -msgid "Error occurred when updating consistency group %(group_id)s." -msgstr "" -"Si è verificato un errore durante l'aggiornamento del gruppo di coerenza " -"%(group_id)s." - -#, python-format -msgid "Error occurred while cloning backing: %s during retype." -msgstr "" -"Si è verificato un errore durante la clonazione del backup : %s durante la " -"riscrittura." - -#, python-format -msgid "Error occurred while copying %(src)s to %(dst)s." -msgstr "Si è verificato un errore durante la copia di %(src)s in %(dst)s." - -#, python-format -msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." -msgstr "" -"Si è verificato un errore durante la copia dell'immagine: %(id)s nel volume: " -"%(vol)s." - -#, python-format -msgid "Error occurred while copying image: %(image_id)s to %(path)s." -msgstr "" -"Si è verificato un errore durante la copia dell'immagine: %(image_id)s in " -"%(path)s." - -msgid "Error occurred while creating temporary backing." -msgstr "Si è verificato un errore durante la creazione del backup temporaneo." - -#, python-format -msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." -msgstr "" -"Si è verificato un errore durante la creazione del volume: %(id)s " -"dall'immagine: %(image_id)s." - -#, python-format -msgid "" -"Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"Errore nell'esecuzione di %(command)s. Codice di errore: %(exit_code)d " -"Messaggio di errore: %(result)s" - -#, python-format -msgid "" -"Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"Errore nell'esecuzione del comando. Codice di errore: %(exit_code)d " -"Messaggio di errore: %(result)s" - -msgid "Error parsing array from host capabilities." -msgstr "Errore durante l'analisi dell'array dalle funzionalità host." - -msgid "Error parsing array, pool, SLO and workload." -msgstr "Errore durante l'analisi di array, pool, SLO e carico di lavoro." - -msgid "Error parsing target pool name, array, and fast policy." -msgstr "" -"Errore durante l'analisi del nome del pool di destinazione, dell'array e " -"della politica fast." - -#, python-format -msgid "" -"Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" -msgstr "" -"Errore durante il provisioning del volume %(lun_name)s su %(volume_name)s. " -"Dettagli: %(ex)s" - -msgid "Error querying thin pool about data_percent" -msgstr "Errore durante la query del pool thin su data_percent" - -msgid "Error renaming logical volume" -msgstr "Errore durante la ridenominazione del volume logico" - -#, python-format -msgid "Error renaming volume %(original)s to %(name)s" -msgstr "Errore durante la ridenominazione del volume %(original)s in %(name)s." - -#, python-format -msgid "Error resolving host %(host)s. Error - %(e)s." -msgstr "Errore durante la risoluzione dell'host %(host)s. Errore - %(e)s." - -#, python-format -msgid "Error retrieving LUN %(vol)s number" -msgstr "Errore durante il richiamo del numero di LUN %(vol)s" - -#, python-format -msgid "Error running SSH command: \"%s\"." -msgstr "Errore durante l'esecuzione del comando SSH: \"%s\"." - -#, python-format -msgid "Error running SSH command: %s" -msgstr "Errore durante l'esecuzione del comando SSH: %s" - -msgid "Error running command." -msgstr "Errore durante l'esecuzione del comando." - -#, python-format -msgid "" -"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" -msgstr "" -"Errore durante la pianificazione di %(volume_id)s dall'ultimo vol-service: " -"%(last_host)s : %(exc)s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "Errore durante l'invio di un heartbeat al backend di coordinazione." - -#, python-format -msgid "Error setting Flash Cache policy to %s - exception" -msgstr "" -"Errore durante l'impostazione della politica Flash Cache su %s - eccezione" - -msgid "Error starting coordination backend." -msgstr "Errore durante l'avvio del backend di coordinazione." - -#, python-format -msgid "Error trying to change %(opt)s from %(old)s to %(new)s" -msgstr "Errore durante il tentativo di modificare %(opt)s da %(old)s a %(new)s" - -#, python-format -msgid "Error unmanaging replay %s" -msgstr "Errore durante l'annullamento della gestione della risposta %s. " - -#, python-format -msgid "Error unmapping volume: %s" -msgstr "Errore durante l'annullamento dell'associazione del volume: %s. " - -#, python-format -msgid "Error verifying LUN container %(bkt)s" -msgstr "Errore durante la verifica del contatore LUN %(bkt)s" - -#, python-format -msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" -msgstr "" -"Errore durante la verifica del servizio iSCSI %(serv)s sull'host %(hst)s" - -msgid "Error: unable to snap replay" -msgstr "Errore: impossibile eseguire l'istantanea della risposta" - -#, python-format -msgid "Exception cloning volume %(name)s from source volume %(source)s." -msgstr "" -"Eccezione durante la clonazione del volume %(name)s dal volume di origine " -"%(source)s." - -#, python-format -msgid "Exception creating LUN %(name)s in pool %(pool)s." -msgstr "Eccezione durante la creazione della LUN %(name)s nel pool %(pool)s." - -#, python-format -msgid "Exception creating vol %(name)s on pool %(pool)s." -msgstr "Eccezione durante la creazione del volume %(name)s nel pool %(pool)s." - -#, python-format -msgid "" -"Exception creating volume %(name)s from source %(source)s on share %(share)s." -msgstr "" -"Eccezione durante la creazione del volume %(name)s dall'origine %(source)s " -"sulla condivisione %(share)s.." - -#, python-format -msgid "Exception details: %s" -msgstr "Dettagli eccezione: %s" - -#, python-format -msgid "Exception during mounting %s" -msgstr "Eccezione durante il montaggio di %s" - -#, python-format -msgid "Exception during mounting %s." -msgstr "Eccezione durante il montaggio di %s." - -msgid "Exception during mounting." -msgstr "Eccezione durante il montaggio." - -#, python-format -msgid "Exception during snapCPG revert: %s" -msgstr "Eccezione durante il ripristino snapCPG: %s" - -msgid "Exception encountered: " -msgstr "Rilevata eccezione:" - -#, python-format -msgid "Exception handling resource: %s" -msgstr "Eccezione durante la gestione della risorsa: %s" - -msgid "Exception in string format operation" -msgstr "Eccezione nell'operazione di formattazione della stringa" - -msgid "Exception loading extension." -msgstr "Eccezione durante il caricamento dell'estensione." - -#, python-format -msgid "Exception: %(ex)s" -msgstr "Eccezione: %(ex)s" - -#, python-format -msgid "Exception: %s" -msgstr "Eccezione: %s" - -#, python-format -msgid "Exception: %s." -msgstr "Eccezione: %s." - -#, python-format -msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." -msgstr "" -"L'esecuzione del comando \"rm\" nel file di backup per %s non è stata " -"eseguita correttamente." - -#, python-format -msgid "Exists snapshot notification failed: %s" -msgstr "Notifica di esistenza istantanea non riuscita: %s" - -#, python-format -msgid "Exists volume notification failed: %s" -msgstr "Notifica di esistenza volume non riuscita: %s" - -msgid "Extend volume failed." -msgstr "Estensione del volume non riuscita." - -#, python-format -msgid "Extension of volume %s failed." -msgstr "Estensione del volume %s non riuscita" - -msgid "" -"Extra spec replication:mode must be set and must be either 'sync' or " -"'periodic'." -msgstr "" -"La specifica supplementare replication:mode deve essere impostata e deve " -"essere 'sync' o 'periodic'." - -msgid "" -"Extra spec replication:sync_period must be greater than 299 and less than " -"31622401 seconds." -msgstr "" -"La specifica supplementare replication:sync_period deve essere maggiore di " -"299 e minore di 31622401 secondi." - -#, python-format -msgid "Extra specs must be specified as capabilities:%s=' True'." -msgstr "" -"Le specifiche supplementari devono essere specificate come funzionalità:" -"%s=' True'." - -msgid "" -"Extra specs must be specified as replication_type=' sync' or ' " -"async'." -msgstr "" -"Le specifiche supplementari devono essere specificate come " -"replication_type=' sync' o ' async'." - -msgid "FAST is not supported on this array." -msgstr "FAST non è supportato su questo array." - -#, python-format -msgid "Failed collecting fcns database info for fabric %s" -msgstr "" -"Impossibile raccogliere le informazioni sul database fcns per fabric %s" - -#, python-format -msgid "Failed collecting name server info from fabric %s" -msgstr "" -"Impossibile raccogliere le informazioni sul server dei nomi da fabric %s" - -#, python-format -msgid "Failed collecting nsshow info for fabric %s" -msgstr "Impossibile raccogliere le informazioni nsshow per fabric %s" - -msgid "Failed collecting show fcns database for fabric" -msgstr "Errore durante la raccolta del database show fcns per fabric" - -#, python-format -msgid "Failed destroying volume entry %s" -msgstr "Impossibile distruggere la voce del volume %s" - -#, python-format -msgid "Failed destroying volume entry: %s." -msgstr "Impossibile distruggere la voce del volume: %s" - -#, python-format -msgid "" -"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " -"glance snapshot %(snapshot_ref_id)s volume reference" -msgstr "" -"Impossibile aggiornare il flag avviabile dell'istantanea %(snapshot_id)s " -"utilizzando il riferimento del volume dell'istantanea glance fornita " -"%(snapshot_ref_id)s" - -#, python-format -msgid "Failed getting active zone set from fabric %s" -msgstr "Impossibile ottenere l'insieme di zone attive da fabric %s" - -#, python-format -msgid "Failed getting zone status from fabric %s" -msgstr "Impossibile ottenere lo stato della zona da fabric %s" - -#, python-format -msgid "Failed image conversion during cache creation: %s" -msgstr "Conversione immagine non riuscita durante la creazione della cache: %s" - -#, python-format -msgid "" -"Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." -msgstr "" -"Impossibile inviare una notifica sull'azione %(event)s dell'istantanea per " -"l'istantanea %(snp_id)s." - -#, python-format -msgid "" -"Failed notifying about the volume action %(event)s for volume %(volume_id)s" -msgstr "" -"Impossibile inviare una notifica sull'azione %(event)s del volume per il " -"volume %(volume_id)s" - -#, python-format -msgid "Failed notifying on %(topic)s payload %(payload)s" -msgstr "Impossibile eseguire la notifica su %(topic)s payload %(payload)s" - -#, python-format -msgid "" -"Failed recovery attempt to create iscsi backing lun for Volume ID:" -"%(vol_id)s: %(e)s" -msgstr "" -"Tentativo di recupero per creare la lun di backup iscsi per l'ID volume:" -"%(vol_id)s: %(e)s non riuscito" - -#, python-format -msgid "Failed rolling back quota for %s reservations" -msgstr "Impossibile eseguire il rollback della quota per %s prenotazioni" - -#, python-format -msgid "Failed rolling back quota for %s reservations." -msgstr "Impossibile eseguire il rollback della quota per %s prenotazioni." - -#, python-format -msgid "" -"Failed setting source volume %(source_volid)s back to its initial " -"%(source_status)s status" -msgstr "" -"Impossibile impostare il volume di origine %(source_volid)s sullo stato " -"iniziale %(source_status)s" - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " -"volume returned to the default storage group." -msgstr "" -"Impossibile eseguire il rollback per aggiungere nuovamente il volume " -"%(volumeName)s al gruppo di archiviazione predefinito per la politica FAST " -"%(fastPolicyName)s. Contattare l'amministratore di sistema per fare in modo " -"che il volume venga restituito al gruppo di archiviazione predefinito." - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " -"volume re-added manually." -msgstr "" -"Impossibile eseguire il rollback per aggiungere nuovamente il volume " -"%(volumeName)s al gruppo di archiviazione predefinito per la politica FAST " -"%(fastPolicyName)s. Contattare l'amministratore di sistema per fare in modo " -"che il volume venga aggiunto di nuovo manualmente." - -#, python-format -msgid "" -"Failed to add %(volumeName)s to default storage group for fast policy " -"%(fastPolicyName)s." -msgstr "" -"Impossibile aggiungere %(volumeName)s al gruppo di archiviazione predefinito " -"per la politica fast %(fastPolicyName)s." - -#, python-format -msgid "Failed to add %s to cg." -msgstr "Impossibile aggiungere %s a cg." - -#, python-format -msgid "Failed to add device to handler %s" -msgstr "Impossibile aggiungere il dispositivo all'handler %s" - -#, python-format -msgid "Failed to add initiator iqn %s to target" -msgstr "Impossibile aggiungere l'iniziatore iqn %s alla destinazione" - -#, python-format -msgid "Failed to add initiator to group for SCST target %s" -msgstr "" -"Impossibile aggiungere l'iniziatore al gruppo per la destinazione SCST %s" - -#, python-format -msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" -msgstr "" -"Impossibile aggiungere la lun all'id di destinazione SCST:%(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to add multihost-access for volume \"%s\"." -msgstr "Impossibile aggiungere l'accesso multihost per il volume \"%s\"." - -#, python-format -msgid "" -"Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " -"%(tierPolicyRuleInstanceName)s." -msgstr "" -"Impossibile aggiungere il gruppo di archiviazione " -"%(storageGroupInstanceName)s alla regola delle politiche di livellamento " -"%(tierPolicyRuleInstanceName)s." - -#, python-format -msgid "Failed to add target(port: %s)" -msgstr "Impossibile aggiungere la destinazione (port: %s)" - -msgid "Failed to apply replication:activereplay setting" -msgstr "Impossibile applicare l'impostazione replication:activereplay" - -msgid "Failed to attach source volume for copy." -msgstr "Impossibile collegare il volume di origine per la copia." - -#, python-format -msgid "Failed to attach volume %(vol)s." -msgstr "Impossibile collegare il volume %(vol)s." - -msgid "Failed to authenticate user." -msgstr "Impossibile autenticare l'utente." - -#, python-format -msgid "Failed to check cluster status.(command: %s)" -msgstr "Impossibile verificare lo stato del cluster.(command: %s)" - -#, python-format -msgid "Failed to clone image volume %(id)s." -msgstr "Impossibile clonare il volume dell'immagine %(id)s." - -#, python-format -msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." -msgstr "" -"Impossibile clonare il volume %(volume_id)s per l'immagine %(image_id)s." - -#, python-format -msgid "Failed to clone volume.(command: %s)" -msgstr "Impossibile clonare il volume.(command: %s)" - -#, python-format -msgid "Failed to close disk device %s" -msgstr "Impossibile chiudere il dispositivo disco %s" - -#, python-format -msgid "" -"Failed to collect return properties for volume %(vol)s and connector " -"%(conn)s." -msgstr "" -"Impossibile raccogliere le proprietà di ritorno per il volume %(vol)s e il " -"connettore %(conn)s." - -#, python-format -msgid "Failed to commit reservations %s" -msgstr "Impossibile eseguire il commit delle prenotazioni %s" - -#, python-format -msgid "Failed to copy %(src)s to %(dest)s." -msgstr "Impossibile copiare %(src)s su %(dest)s. " - -#, python-format -msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" -msgstr "Impossibile copiare l'immagine %(image_id)s sul volume: %(volume_id)s" - -#, python-format -msgid "Failed to copy image to volume: %(volume_id)s" -msgstr "Impossibile copiare l'immagine sul volume: %(volume_id)s" - -#, python-format -msgid "Failed to copy volume %(src)s to %(dest)s." -msgstr "Impossibile copiare il volume %(src)s su %(dest)s. " - -#, python-format -msgid "Failed to copy volume %(vol1)s to %(vol2)s" -msgstr "Impossibile copiare il volume %(vol1)s su %(vol2)s" - -#, python-format -msgid "Failed to create %(conf)s for volume id:%(vol_id)s" -msgstr "Impossibile creare %(conf)s per l'id volume:%(vol_id)s" - -#, python-format -msgid "Failed to create CG from CGsnapshot. Exception: %s" -msgstr "Impossibile creare CG da CGsnapshot. Eccezione: %s." - -#, python-format -msgid "Failed to create CGSnapshot. Exception: %s." -msgstr "Impossibile creare CGSnapshot. Eccezione: %s." - -msgid "" -"Failed to create SOAP client.Check san_ip, username, password and make sure " -"the array version is compatible" -msgstr "" -"Impossibile creare il cliente SOAP. Controllare san_ip, nome utente, " -"password e verificare che la versione array sia compatibile" - -#, python-format -msgid "" -"Failed to create a first volume for storage group : %(storageGroupName)s." -msgstr "" -"Impossibile creare un primo volume per il gruppo di archiviazione: " -"%(storageGroupName)s." - -#, python-format -msgid "Failed to create blkio cgroup '%(name)s'." -msgstr "Impossibile creare blkio cgroup '%(name)s'." - -#, python-format -msgid "Failed to create clone of volume \"%s\"." -msgstr "Impossibile creare il clone del volume \"%s\"." - -#, python-format -msgid "Failed to create cloned volume %s." -msgstr "Impossibile creare il volume clonato %s." - -#, python-format -msgid "Failed to create consistency group %(group_id)s." -msgstr "Impossibile creare il gruppo di coerenza %(group_id)s." - -#, python-format -msgid "" -"Failed to create default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"Impossibile creare il gruppo di archiviazione predefinito per la politica " -"FAST: %(fastPolicyName)s." - -#, python-format -msgid "Failed to create group to SCST target %s" -msgstr "Impossibile creare la destinazione SCST del gruppo %s" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Impossibile creare l'ID hardware su %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " -"tgtd config file contains 'include %(volumes_dir)s/*'" -msgstr "" -"Impossibile creare la destinazione iscsi per l'ID volume: %(vol_id)s. " -"Verificare che il file di configurazione tgtd contenga 'include " -"%(volumes_dir)s/*'" - -#, python-format -msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "" -"Impossibile creare la destinazione iscsi per l'ID volume: %(vol_id)s: %(e)s" - -#, python-format -msgid "" -"Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " -"configuration in %(volumes_dir)s'" -msgstr "" -"Impossibile creare la destinazione iscsi per l'id volume:%(vol_id)s. " -"Verificare la configurazione in %(volumes_dir)s'" - -#, python-format -msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "" -"Impossibile creare la destinazione iscsi per l'id volume:%(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s" -msgstr "Impossibile creare la destinazione iscsi per l'id volume:%s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s." -msgstr "Impossibile creare la destinazione iscsi per l'id volume:%s." - -#, python-format -msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." -msgstr "" -"Impossibile creare il flusso manage_existing: %(object_type)s %(object_id)s." - -#, python-format -msgid "Failed to create snapshot of volume \"%s\"." -msgstr "Impossibile creare l'istantanea del volume \"%s\"." - -#, python-format -msgid "Failed to create snapshot. (command: %s)" -msgstr "Impossibile creare l'istantanea. (comando: %s)" - -#, python-format -msgid "Failed to create transfer record for %s" -msgstr "Impossibile creare il record di trasferimento per %s" - -#, python-format -msgid "Failed to create volume \"%s\"." -msgstr "Impossibile creare il volume \"%s\"." - -#, python-format -msgid "Failed to create volume %s" -msgstr "Impossibile creare il volume %s" - -#, python-format -msgid "Failed to create volume %s." -msgstr "Impossibile creare il volume %s." - -#, python-format -msgid "Failed to create volume from snapshot \"%s\"." -msgstr "Impossibile creare il volume dall'istantanea \"%s\"." - -#, python-format -msgid "Failed to create volume. %s" -msgstr "Impossibile creare il volume. %s" - -#, python-format -msgid "Failed to create volume: %(name)s (%(status)s)" -msgstr "Impossibile creare il volume: %(name)s (%(status)s)" - -#, python-format -msgid "Failed to created Cinder secure environment indicator file: %s" -msgstr "Impossibile creare il file indicatore dell'ambiente sicuro Cinder; %s" - -#, python-format -msgid "Failed to delete initiator iqn %s from target." -msgstr "Impossibile eliminare l'iniziatore iqn %s dalla destinazione." - -#, python-format -msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." -msgstr "Impossibile eliminare l'istantanea %(snap)s del volume %(vol)s." - -#, python-format -msgid "Failed to delete snapshot. (command: %s)" -msgstr "Impossibile eliminare l'istantanea. (comando: %s)" - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " -"%(exception)s." -msgstr "" -"Impossibile eliminare l'istantanea %(snap)s di CGSnapshot. Eccezione: " -"%(exception)s." - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of cgsnapshot: %(cgsnapshot_id)s. " -"Exception: %(exception)s." -msgstr "" -"Impossibile eliminare l'istantanea %(snap)s di cgsnapshot: " -"%(cgsnapshot_id)s. Eccezione: %(exception)s." - -#, python-format -msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." -msgstr "" -"Impossibile eliminare il volume %(vol)s di CG. Eccezione: %(exception)s." - -#, python-format -msgid "Failed to delete volume \"%s\"." -msgstr "Impossibile eliminare il volume \"%s\"." - -#, python-format -msgid "Failed to delete volume %s" -msgstr "Impossibile eliminare il volume %s" - -#, python-format -msgid "Failed to delete volume. %s" -msgstr "Impossibile eliminare il volume. %s" - -#, python-format -msgid "Failed to ensure export of volume \"%s\"." -msgstr "Impossibile garantire l'esportazione del volume \"%s\"." - -#, python-format -msgid "Failed to ensure export of volume %s" -msgstr "Impossibile garantire l'esportazione del volume %s" - -#, python-format -msgid "Failed to export fiber channel target due to %s" -msgstr "Impossibile esportare la destinazione fiber channel a causa di %s" - -#, python-format -msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." -msgstr "" -"Impossibile estendere il volume: %(vol)s alla dimensione di: %(size)s GB." - -#, python-format -msgid "" -"Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." -msgstr "" -"Impossibile estendere il volume %(name)s da %(current_size)sGB a " -"%(new_size)sGB." - -#, python-format -msgid "Failed to failover volume %(volume_id)s to %(target)s: %(error)s." -msgstr "" -"Impossibile eseguire il failover del volume %(volume_id)s su %(target)s: " -"%(error)s." - -#, python-format -msgid "Failed to find %(s)s. Result %(r)s" -msgstr "impossibile trovare %(s)s. Risultato %(r)s" - -msgid "Failed to get IQN!" -msgstr "Impossibile ottenere IQN." - -msgid "Failed to get LUN information!" -msgstr "Impossibile ottenere le informazioni sulla LUN." - -#, python-format -msgid "Failed to get allocation information (%d)!" -msgstr "Impossibile ottenere le informazioni sull'allocazione (%d)." - -#, python-format -msgid "Failed to get allocation information: %(host)s (%(status)d)!" -msgstr "" -"Impossibile ottenere le informazioni sull'allocazione: %(host)s (%(status)d)!" - -#, python-format -msgid "Failed to get device number for throttling: %(error)s" -msgstr "" -"Impossibile ottenere il numero di dispositivo per la regolazione: %(error)s" - -#, python-format -msgid "" -"Failed to get driver initiator data for initiator %(initiator)s and " -"namespace %(namespace)s" -msgstr "" -"Impossibile ottenere i dati dell'iniziatore del driver per l'iniziatore " -"%(initiator)s e lo spazio dei nomi %(namespace)s" - -#, python-format -msgid "Failed to get fiber channel info from storage due to %(stat)s" -msgstr "" -"Impossibile ottenere le informazioni fiber channel dalla memoria a causa di " -"%(stat)s" - -#, python-format -msgid "Failed to get fiber channel target from storage server due to %(stat)s" -msgstr "" -"Impossibile ottenere la destinazione fiber channel dal server di memoria a " -"causa di %(stat)s" - -#, python-format -msgid "Failed to get or create storage group %(storageGroupName)s." -msgstr "" -"Impossibile ottenere o creare il gruppo di archiviazione " -"%(storageGroupName)s." - -#, python-format -msgid "Failed to get response: %s." -msgstr "Impossibile ottenere la risposta: %s." - -#, python-format -msgid "Failed to get server info due to %(state)s." -msgstr "Impossibile ottenere le informazioni sul server a causa di %(state)s." - -msgid "Failed to get sns table" -msgstr "Impossibile ottenere la tabella sns" - -#, python-format -msgid "Failed to get target wwpns from storage due to %(stat)s" -msgstr "" -"Impossibile ottenere i wwpn di destinazione dalla memoria a causa di %(stat)s" - -msgid "Failed to get updated stats from Datera Cluster." -msgstr "Impossibile ottenere le statistiche aggiornate dal cluster Datera." - -msgid "Failed to get updated stats from Datera cluster." -msgstr "Impossibile ottenere le statistiche aggiornate dal cluster Datera." - -#, python-format -msgid "Failed to get volume status. %s" -msgstr "Impossibile ottenere lo stato del volume. %s" - -msgid "Failed to initialize connection" -msgstr "Impossibile inizializzare la connessione" - -#, python-format -msgid "Failed to initialize connection to volume \"%s\"." -msgstr "Impossibile inizializzare la connessione al volume \"%s\"." - -msgid "Failed to initialize connection." -msgstr "Impossibile inizializzare la connessione." - -msgid "Failed to initialize driver." -msgstr "Impossibile inizializzare il driver." - -#, python-format -msgid "Failed to issue df command for path %(path)s, error: %(error)s." -msgstr "" -"Impossibile eseguire il comando df per il percorso %(path)s, errore: " -"%(error)s." - -#, python-format -msgid "Failed to issue mmgetstate command, error: %s." -msgstr "Impossibile eseguire il comando mmgetstate, errore: %s." - -#, python-format -msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." -msgstr "" -"Impossibile eseguire il comando mmlsattr per il percorso %(path)s, errore: " -"%(error)s" - -#, python-format -msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" -msgstr "" -"Impossibile eseguire il comando mmlsattr sul percorso %(path)s, errore: " -"%(error)s" - -#, python-format -msgid "Failed to issue mmlsconfig command, error: %s." -msgstr "Impossibile eseguire il comando mmlsconfig, errore: %s." - -#, python-format -msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." -msgstr "" -"Impossibile eseguire il comando mmlsfs per il percorso %(path)s, errore: " -"%(error)s." - -#, python-format -msgid "Failed to issue mmlsfs command, error: %s." -msgstr "Impossibile eseguire il comando mmlsfs, errore: %s." - -#, python-format -msgid "Failed to open iet session list for %s" -msgstr "Impossibile aprire l'elenco di sessioni iet per %s" - -#, python-format -msgid "Failed to open volume from %(path)s." -msgstr "Impossibile aprire il volume da %(path)s." - -msgid "Failed to perform replication failover" -msgstr "Impossibile eseguire il failover della replica" - -#, python-format -msgid "Failed to present volume %(name)s (%(status)d)!" -msgstr "Impossibile presentare il volume %(name)s (%(status)d)!" - -msgid "Failed to re-export volume, setting to ERROR." -msgstr "Impossibile riesportare il volume, impostazione in ERRORE." - -#, python-format -msgid "Failed to register image volume location %(uri)s." -msgstr "Impossibile registrare l'ubicazione del volume dell'immagine %(uri)s." - -#, python-format -msgid "" -"Failed to remove %(volumeName)s from the default storage group for the FAST " -"Policy." -msgstr "" -"Impossibile rimuovere: %(volumeName)s dal gruppo di archiviazione " -"predefinito per la politica FAST." - -#, python-format -msgid "Failed to remove %s from cg." -msgstr "Impossibile rimuovere %s da cg." - -#, python-format -msgid "Failed to remove LUN %s" -msgstr "Impossibile rimuovere la LUN %s" - -#, python-format -msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "" -"Impossibile rimuovere la destinazione iscsi per l'ID volume: %(vol_id)s: " -"%(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "" -"Impossibile rimuovere la destinazione iscsi per l'id volume:%(vol_id)s: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s" -msgstr "Impossibile rimuovere la destinazione iscsi per l'id volume:%s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s." -msgstr "Impossibile rimuovere la destinazione iscsi per l'id volume:%s." - -#, python-format -msgid "Failed to rename %(new_volume)s into %(volume)s." -msgstr "Impossibile ridenominare %(new_volume)s in %(volume)s." - -msgid "Failed to rename the created snapshot, reverting." -msgstr "Impossibile ridenominare l'istantanea creata, ripristino." - -#, python-format -msgid "Failed to rename volume %(existing)s to %(new)s. Volume manage failed." -msgstr "" -"Impossibile ridenominare il volume %(existing)s in %(new)s. Gestione del " -"volume non riuscita." - -#, python-format -msgid "" -"Failed to rename volume %(existing)s to %(new)s. Volume unmanage failed." -msgstr "" -"Impossibile ridenominare il volume %(existing)s in %(new)s. Annullamento " -"della gestione del volume non riuscito." - -#, python-format -msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" -msgstr "" -"Impossibile richiedere l'eliminazione asincrona del volume di origine della " -"migrazione %(vol)s: %(err)s" - -#, python-format -msgid "" -"Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " -"size: %(size)s" -msgstr "" -"Impossibile ridimensionare VDI. Riduzione di VDI non supportata. VDI: " -"%(vdiname)s nuova dimensione: %(size)s" - -#, python-format -msgid "" -"Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " -"%(size)s" -msgstr "" -"Impossibile ridimensionare VDI. Dimensione volume troppo grande. VDI: " -"%(vdiname)s nuova dimensione: %(size)s" - -#, python-format -msgid "Failed to resize vdi. vdi not found. %s" -msgstr "Impossibile ridimensionare VDI. VDI non trovata. %s" - -#, python-format -msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" -msgstr "Impossibile ridimensionare VDI. %(vdiname)s nuova dimensione: %(size)s" - -#, python-format -msgid "Failed to resize volume %(volume_id)s, error: %(error)s." -msgstr "Impossibile ridimensionare il volume %(volume_id)s, errore: %(error)s" - -#, python-format -msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" -msgstr "Impossibile recuperare il volume SolidFire-ID: %s in get_by_account!" - -#, python-format -msgid "" -"Failed to return volume %(volumeName)s to original storage pool. Please " -"contact your system administrator to return it to the correct location." -msgstr "" -"Impossibile restituire il volume %(volumeName)s al pool di archiviazione " -"originale. Contattare l'amministratore di sistema per restituirlo " -"all'ubicazione corretta." - -#, python-format -msgid "Failed to roll back reservations %s" -msgstr "Impossibile eseguire il rollback delle prenotazioni %s" - -#, python-format -msgid "Failed to run task %(name)s: %(cause)s" -msgstr "Impossibile eseguire l'attività %(name)s: %(cause)s" - -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "Impossibile eseguire schedule_%(method)s: %(ex)s" - -#, python-format -msgid "Failed to send request: %s." -msgstr "Impossibile inviare la richiesta: %s." - -#, python-format -msgid "Failed to set 'enable' attribute for SCST target %s" -msgstr "Impossibile impostare l'attributo 'enable' per la destinazione SCST %s" - -#, python-format -msgid "Failed to set attribute for enable target driver %s" -msgstr "" -"Impossibile impostare l'attributo per abilitare il driver di destinazione %s" - -#, python-format -msgid "Failed to set properties for volume %(existing)s. Volume manage failed." -msgstr "" -"Impossibile impostare le proprietà per il volume %(existing)s. Gestione del " -"volume non riuscita." - -#, python-format -msgid "" -"Failed to set properties for volume %(existing)s. Volume unmanage failed." -msgstr "" -"Impossibile impostare le proprietà per il volume %(existing)s. Annullamento " -"della gestione del volume non riuscita." - -msgid "Failed to setup the Dell EqualLogic driver." -msgstr "Impossibile configurare il driver Dell EqualLogic." - -msgid "Failed to shutdown horcm." -msgstr "Impossibile chiudere horcm." - -#, python-format -msgid "Failed to snap Consistency Group %s" -msgstr "Impossibile eseguire istantanea del gruppo di coerenza %s" - -msgid "Failed to start horcm." -msgstr "Impossibile avviare horcm." - -msgid "Failed to terminate connection" -msgstr "Impossibile terminare la connessione" - -#, python-format -msgid "Failed to terminate connection %(initiator)s %(vol)s" -msgstr "Impossibile terminare la connessione %(initiator)s %(vol)s" - -#, python-format -msgid "Failed to terminate connection to volume \"%s\"." -msgstr "Impossibile terminare la connessione al volume \"%s\"." - -#, python-format -msgid "Failed to umount %(share)s, reason=%(stderr)s" -msgstr "Impossibile smontare %(share)s, motivo=%(stderr)s" - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target" -msgstr "" -"Impossibile aggiornare %(conf)s per l'id volume %(vol_id)s dopo la rimozione " -"della destinazione iscsi" - -#, python-format -msgid "Failed to update %(conf)s for volume id:%(vol_id)s" -msgstr "Impossibile aggiornare %(conf)s per l'id volume:%(vol_id)s" - -#, python-format -msgid "" -"Failed to update %(volume_id)s metadata using the provided snapshot " -"%(snapshot_id)s metadata." -msgstr "" -"Impossibile aggiornare i metadati %(volume_id)s utilizzando i metadati " -"dell'istantanea fornita %(snapshot_id)s." - -#, python-format -msgid "Failed to update quota donating volume transfer id %s" -msgstr "" -"Impossibile aggiornare la quota che denota l'id di trasferimento del volume " -"%s" - -#, python-format -msgid "Failed to update quota for consistency group %s." -msgstr "Impossibile aggiornare la quota per il gruppo di coerenza %s." - -#, python-format -msgid "Failed to update quota for deleting volume: %s" -msgstr "Impossibile aggiornare la quota per l'eliminazione del volume: %s" - -#, python-format -msgid "Failed to update quota while deleting snapshots: %s" -msgstr "" -"Impossibile aggiornare la quota durante l'eliminazione delle istantanee: %s" - -msgid "Failed to update quota while deleting volume." -msgstr "Impossibile aggiornare la quota durante l'eliminazione del volume." - -msgid "Failed to update replay profiles" -msgstr "Impossibile aggiornare i profili di risposta" - -msgid "Failed to update storage profile" -msgstr "Impossibile aggiornare il profilo di memoria" - -msgid "Failed to update usages deleting backup" -msgstr "Impossibile aggiornare gli utilizzi eliminando il backup" - -msgid "Failed to update usages deleting snapshot" -msgstr "Impossibile aggiornare gli utilizzi eliminando l'istantanea" - -msgid "Failed to update usages deleting volume." -msgstr "Impossibile aggiornare gli utilizzi eliminando il volume." - -#, python-format -msgid "Failed to update volume status: %s" -msgstr "Impossibile aggiornare lo stato del volume: %s" - -#, python-format -msgid "" -"Failed to verify that volume was added to storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Impossibile verificare quale volume è stato aggiunto al gruppo di " -"archiviazione per la politica FAST: %(fastPolicyName)s." - -msgid "Failed to write in /etc/scst.conf." -msgstr "Impossibile scrivere in /etc/scst.conf." - -#, python-format -msgid "Failed to write persistence file: %(path)s." -msgstr "Impossibile scrivere il file di persistenza: %(path)s." - -#, python-format -msgid "" -"Failed updating %(snapshot_id)s metadata using the provided volumes " -"%(volume_id)s metadata" -msgstr "" -"Impossibile aggiornare i metadati dell'istantanea %(snapshot_id)s " -"utilizzando i metadati dei volumi forniti %(volume_id)s" - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with creation provided " -"model %(model)s." -msgstr "" -"Impossibile aggiornare il modello di istantanea %(snapshot_id)s con il " -"modello fornito dalla creazione %(model)s." - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with driver provided model " -"%(model)s." -msgstr "" -"Impossibile aggiornare il modello di istantanea %(snapshot_id)s con il " -"modello driver fornito %(model)s." - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with creation provided model " -"%(model)s" -msgstr "" -"Impossibile aggiornare il modello di volume %(volume_id)s con il modello " -"fornito dalla creazione %(model)s" - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with driver provided model " -"%(model)s" -msgstr "" -"Impossibile aggiornare il modello di volume %(volume_id)s con il modello " -"driver fornito %(model)s" - -#, python-format -msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." -msgstr "Impossibile aggiornare l'istantanea %(snapshot_id)s con %(update)s." - -#, python-format -msgid "" -"Failed updating snapshot metadata using the provided volumes %(volume_id)s " -"metadata" -msgstr "" -"Impossibile aggiornare i metadati dell'istantanea utilizzando i metadati dei " -"volumi forniti %(volume_id)s" - -#, python-format -msgid "Failed updating volume %(volume_id)s bootable flag to true" -msgstr "" -"Impossibile aggiornare il flag avviabile del volume %(volume_id)s su true" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(update)s" -msgstr "Impossibile aggiornare il volume %(volume_id)s con %(update)s" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(updates)s" -msgstr "Impossibile aggiornare il volume %(volume_id)s con %(updates)s" - -#, python-format -msgid "Failure deleting staged tmp LUN %s." -msgstr "Errore durante l'eliminazione della LUN temporanea gestita %s." - -#, python-format -msgid "Failure restarting snap vol. Error: %s." -msgstr "Errore durante il riavvio del volume snap. Errore: %s." - -msgid "Fetch volume pool name failed." -msgstr "Recupero del nome del pool di volumi non riuscito." - -#, python-format -msgid "" -"FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " -"HBA state is Online." -msgstr "" -"FibreChannelDriver validate_connector non riuscito. Nessun '%(setting)s'. " -"Accertarsi che lo stato HBA sia Online." - -#, python-format -msgid "Flexvisor failed to get event %(volume)s (%(status)s)." -msgstr "Flexvisor non è riuscito a ottenere l'evento %(volume)s: (%(status)s)." - -#, python-format -msgid "Flexvisor failed to get pool %(id)s info." -msgstr "Flexvisor non è riuscito a ottenere le informazioni sul pool %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool list due to %s." -msgstr "Flexvisor non è riuscito a ottenere l'elenco di pool a causa di %s." - -#, python-format -msgid "Flexvisor failed to get pool list.(Error: %d)" -msgstr "Flexvisor non è riuscito a ottenere l'elenco di pool. (Errore: %d)" - -#, python-format -msgid "Found %(count)s volumes mapped to id: %(uuid)s." -msgstr "Trovati %(count)s volumi associati all'id: %(uuid)s." - -msgid "Free capacity not set: volume node info collection broken." -msgstr "" -"Capacità libera non impostata: raccolta informazioni sul nodo volumi " -"interrotta." - -#, python-format -msgid "GPFS is not active. Detailed output: %s." -msgstr "GPFS non è attivo. Output dettagliato: %s." - -msgid "Get LUN migration error." -msgstr "Errore di richiamo migrazione LUN." - -msgid "Get method error." -msgstr "Errore di richiamo metodo. " - -#, python-format -msgid "Host PUT failed (%s)." -msgstr "Comando PUT dell'host non riuscito (%s)." - -msgid "Host could not be found!" -msgstr "Impossibile trovare l'host." - -#, python-format -msgid "ISCSI discovery attempt failed for:%s" -msgstr "Tentativo di rilevamento ISCSI non riuscito per: %s" - -msgid "ISE FW version is not compatible with OpenStack!" -msgstr "Versione ISE FW non compatibile con OpenStack!" - -msgid "ISE globalid not set!" -msgstr "ID globale ISE non impostato." - -#, python-format -msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." -msgstr "" -"La dimensione dell'immagine di %(img_size)dGB è maggiore della dimensione " -"del volume di %(vol_size)dGB." - -#, python-format -msgid "Invalid API object: %s" -msgstr "Oggetto API non valido: %s" - -#, python-format -msgid "Invalid JSON: %s" -msgstr "JSON non valido: %s" - -#, python-format -msgid "Invalid ReplayList return: %s" -msgstr "Restituzione ReplayList non valida: %s" - -#, python-format -msgid "Invalid hostname %(host)s" -msgstr "Nome host non valido %(host)s" - -msgid "Invalid replication target specified for failover" -msgstr "Destinazione di replica non valida specificata per il failover" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Esecuzione del failover non riuscita perché la replica non è configurata " -"correttamente." - -#, python-format -msgid "JSON encode params %(param)s error: %(status)s." -msgstr "Errore dei parametri %(param)s di codifica JSON: %(status)s." - -#, python-format -msgid "JSON transfer data error. %s." -msgstr "Errore dei dati di trasferimento JSON: %s." - -#, python-format -msgid "JSON transfer error: %s." -msgstr "Errore di trasferimento JSON: %s." - -#, python-format -msgid "LUN %(path)s geometry failed. Message - %(msg)s" -msgstr "Geometria della LUN %(path)s non riuscita. Messaggio - %(msg)s" - -msgid "LUN extend failed!" -msgstr "Estensione LUN non riuscita. " - -msgid "LUN unexport failed!" -msgstr "Annullamento dell'esportazione della LUN non riuscito." - -#, python-format -msgid "" -"Location info needed for backend enabled volume migration not in correct " -"format: %s. Continuing with generic volume migration." -msgstr "" -"Informazioni sull'ubicazione necessarie per la migrazione volumi abilitata " -"al backend non in formato corretto: %s. Continuare con la migrazione volumi " -"generica." - -msgid "" -"Logging into the Datera cluster failed. Please check your username and " -"password set in the cinder.conf and start the cinder-volume service again." -msgstr "" -"Accesso al cluster Datera non riuscito. Controllare nome utente e password " -"impostati in cinder.conf e avviare di nuovo il servizio cinder-volume." - -#, python-format -msgid "" -"Login error. URL: %(url)s\n" -"Reason: %(reason)s." -msgstr "" -"Errore di login. URL: %(url)s\n" -"Motivo: %(reason)s." - -#, python-format -msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." -msgstr "" -"La vista di mascheramento: %(maskingViewName)s è stata eliminata di recente." - -#, python-format -msgid "Lun %s has dependent snapshots, skipping lun deletion." -msgstr "" -"La lun %s presenta istantanee dipendenti, l'eliminazione della lun viene " -"ignorata." - -#, python-format -msgid "Lun create for %s failed!" -msgstr "Creazione della lun per %s non riuscita." - -#, python-format -msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "" -"L'istantanea di creazione lun per il volume %(vol)s istantanea %(snap)s non " -"è riuscita." - -#, python-format -msgid "Lun delete for %s failed!" -msgstr "Eliminazione della lun per %s non riuscita." - -msgid "Lun mapping returned null!" -msgstr "L'associazione LUN ha restituito null." - -#, python-format -msgid "MSGID%(id)04d-E: %(msg)s" -msgstr "MSGID%(id)04d-E: %(msg)s" - -msgid "Manage exist volume failed." -msgstr "Gestione del volume esistente non riuscita." - -#, python-format -msgid "" -"Manager for service %(binary)s %(host)s is reporting problems, not sending " -"heartbeat. Service will appear \"down\"." -msgstr "" -"Il gestore del servizio %(binary)s %(host)s riporta dei problemi, " -"l'heartbeat non viene inviato. Il servizio risulterà \"disattivo\"." - -#, python-format -msgid "" -"Masking View creation or retrieval was not successful for masking view " -"%(maskingViewName)s. Attempting rollback." -msgstr "" -"La creazione o il richiamo della vista di mascheramento non sono stati " -"eseguiti correttamente per la vista di mascheramento %(maskingViewName)s. " -"Tentativo di rollback." - -#, python-format -msgid "" -"Max retries reached deleting backup %(basename)s image of volume %(volume)s." -msgstr "" -"Raggiunto numero massimo di tentativi di eliminazione backup %(basename)s " -"immagine di volume %(volume)s." - -#, python-format -msgid "Message: %s" -msgstr "Messaggio: %s" - -msgid "Model update failed." -msgstr "Aggiornamento del modello non riuscito." - -#, python-format -msgid "Modify volume PUT failed: %(name)s (%(status)d)." -msgstr "Modifica del volume PUT non riuscita: %(name)s (%(status)d)." - -#, python-format -msgid "Mount failure for %(share)s after %(count)d attempts." -msgstr "Errore di montaggio per %(share)s dopo %(count)d tentativi." - -#, python-format -msgid "Mount failure for %(share)s." -msgstr "Errore di montaggio per %(share)s." - -#, python-format -msgid "Multiple replay profiles under name %s" -msgstr "Più profili di risposta con nome %s" - -msgid "No CLI output for firmware version check" -msgstr "Nessun output CLI per il controllo della versione firmware" - -#, python-format -msgid "No VIP configured for service %s" -msgstr "Nessun VIP configurato per il servizio %s" - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." -msgstr "" -"Nessuna azione richiesta. Il volume: %(volumeName)s è già parte del pool: " -"%(pool)s." - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of slo/workload " -"combination: %(targetCombination)s." -msgstr "" -"Nessuna azione richiesta. Il volume: %(volumeName)s è già parte della " -"combinazione slo/carico di lavoro: %(targetCombination)s." - -#, python-format -msgid "" -"No snapshots found in database, but %(path)s has backing file " -"%(backing_file)s!" -msgstr "" -"Nessuna istantanea trovata nel database, ma %(path)s contiene un file di " -"backup %(backing_file)s." - -#, python-format -msgid "Not able to configure PBM for vCenter server: %s" -msgstr "Impossibile configurare PBM per il server vCenter: %s" - -#, python-format -msgid "OSError: command is %(cmd)s." -msgstr "OSError: il comando è %(cmd)s." - -#, python-format -msgid "OSError: command is %s." -msgstr "OSError: il comando è %s." - -#, python-format -msgid "" -"One of the components of the original masking view %(maskingViewName)s " -"cannot be retrieved so please contact your system administrator to check " -"that the correct initiator(s) are part of masking." -msgstr "" -"Uno dei componenti della vista di mascheramento originale " -"%(maskingViewName)s non può essere richiamato, quindi contattare " -"l'amministratore di sistema per verificare che l'iniziatore corretto sia " -"parte del mascheramento." - -#, python-format -msgid "" -"Only SLO/workload migration within the same SRP Pool is supported in this " -"version The source pool : %(sourcePoolName)s does not match the target " -"array: %(targetPoolName)s. Skipping storage-assisted migration." -msgstr "" -"Solo la migrazione di SLO/carico di lavoro all'interno dello stesso pool SRP " -"è supportata in questa versione. Il pool di origine: %(sourcePoolName)s non " -"corrisponde all'array di destinazione: %(targetPoolName)s. La migrazione " -"assistita dalla memoria viene ignorata." - -msgid "Only available volumes can be migrated between different protocols." -msgstr "" -"Solo i volumi disponibili possono essere migrati tra protocolli diversi." - -#, python-format -msgid "POST for host create failed (%s)!" -msgstr "POST per la creazione di host non riuscito (%s)!" - -#, python-format -msgid "Pipe1 failed - %s " -msgstr "Pipe1 non riuscito - %s " - -#, python-format -msgid "Pipe2 failed - %s " -msgstr "Pipe2 non riuscito - %s " - -msgid "" -"Please check your xml for format or syntax errors. Please see documentation " -"for more details." -msgstr "" -"Verificare il formato e gli errori di sintassi del file xml. Per ulteriori " -"dettagli, vedere la documentazione." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "Il nome pool deve essere nel file %(fileName)s." - -#, python-format -msgid "Prepare clone failed for %s." -msgstr "Preparazione del clone non riuscita per %s." - -msgid "Primary IP must be set!" -msgstr "L'IP primario deve essere impostato." - -msgid "Problem cleaning incomplete backup operations." -msgstr "Problema durante la ripulitura delle operazioni di backup incomplete." - -#, python-format -msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." -msgstr "" -"Problema durante la ripulitura di volumi temporanei e istantanee per il " -"backup %(bkup)s." - -#, python-format -msgid "Problem cleaning up backup %(bkup)s." -msgstr "Problema durante la ripulitura del backup %(bkup)s." - -#, python-format -msgid "" -"Purity host %(host_name)s is managed by Cinder but CHAP credentials could " -"not be retrieved from the Cinder database." -msgstr "" -"L'host Purity %(host_name)s è gestito da Cinder ma le credenziali CHAP non " -"possono essere richiamate dal database Cinder." - -#, python-format -msgid "" -"Purity host %(host_name)s is not managed by Cinder and can't have CHAP " -"credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." -msgstr "" -"L'host Purity %(host_name)s non è gestito da Cinder e non può avere " -"credenziali CHAP modificate. Rimuovere IQN %(iqn)s dall'host per risolvere " -"il problema." - -#, python-format -msgid "Qemu-img is not installed. OSError: command is %(cmd)s." -msgstr "Qemu-img non è installato. OSError: il comando è %(cmd)s." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " -"(%(d_consumed)dG of %(d_quota)dG already consumed)." -msgstr "" -"Quota superata per %(s_pid)s, si è tentato di estendere il volume per " -"%(s_size)sG, (%(d_consumed)dG di %(d_quota)dG già utilizzato)." - -#, python-format -msgid "REST Not Available: %s" -msgstr "REST non disponibile: %s" - -#, python-format -msgid "Re-throwing Exception %s" -msgstr "Rigenerazione dell'eccezione %s" - -#, python-format -msgid "Read response raised an exception: %s." -msgstr "La lettura della risposta ha generato un'eccezione: %s." - -msgid "Recovered model server connection!" -msgstr "Connessione al model server ripristinata." - -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "" -"Viene eseguito un recupero da un'esecuzione non riuscita. Provare il numero " -"%s" - -msgid "Replication must be specified as ' True' or ' False'." -msgstr "La replica deve essere specificata come ' True' o ' False'." - -msgid "" -"Requested to setup thin provisioning, however current LVM version does not " -"support it." -msgstr "" -"È stata richiesta la configurazione di thin provisioning, tuttavia la " -"versione LVM corrente non lo supporta." - -#, python-format -msgid "Resizing %s failed. Cleaning volume." -msgstr "Nuovo dimensionamento di %s non riuscito. Ripulitura del volume." - -#, python-format -msgid "Restore to volume %(volume)s finished with error - %(error)s." -msgstr "Ripristino su volume %(volume)s completato con errore - %(error)s." - -#, python-format -msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" -msgstr "" -"Tentativo eseguito %(retry)s volte: %(method)s Non riuscito %(rc)s: " -"%(reason)s" - -#, python-format -msgid "Retype unable to find volume %s." -msgstr "" -"Il comando di riscrittura non è stato in grado di trovare il volume %s." - -msgid "Retype volume error." -msgstr "Errore di riscrittura del volume." - -msgid "Retype volume error. Create replication failed." -msgstr "Errore di riscrittura del volume. Creazione replica non riuscita." - -msgid "Retype volume error. Delete replication failed." -msgstr "Errore di riscrittura del volume. Eliminazione replica non riuscita." - -#, python-format -msgid "" -"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " -"Diamond, Optimized, NONE." -msgstr "" -"SLO: %(slo)s non valido. I valori validi sono Bronze, Silver, Gold, " -"Platinum, Diamond, Optimized, NONE." - -msgid "" -"ScVolume returned success with empty payload. Attempting to locate volume" -msgstr "" -"ScVolume restituito correttamente con payload vuoto. Tentativo di " -"individuare il volume" - -#, python-format -msgid "Server Busy retry request: %s" -msgstr "Il sistema è occupato, ritentare la richiesta: %s" - -msgid "Service not found for updating replication_status." -msgstr "Servizio non trovato per l'aggiornamento di replication_status." - -#, python-format -msgid "Setting QoS for %s failed" -msgstr "Impostazione QoS per %s non riuscita" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"Condivisione %s ignorata a causa di un formato non valido. Deve essere del " -"formato address:/export. " - -#, python-format -msgid "Sheepdog is not installed. OSError: command is %s." -msgstr "Sheepdog non è installato. OSError: il comando è %s." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target ispresently exported for volume: %s" -msgstr "" -"remove_export viene ignorato. Nessun iscsi_target viene al momento esportato " -"per il volume: %s" - -#, python-format -msgid "Snapshot \"%s\" already exists." -msgstr "L'istantanea \"%s\" esiste già." - -#, python-format -msgid "" -"Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Istantanea \"%s\" non trovata. Controllare i risultati di \"dog vdi list\"." - -#, python-format -msgid "Snapshot %(snapshotname)s not found on the array. No volume to delete." -msgstr "" -"Istantanea %(snapshotname)s non trovata nell'array. Nessun volume da " -"eliminare." - -#, python-format -msgid "Snapshot %s: create failed" -msgstr "Istantanea %s: creazione non riuscita" - -#, python-format -msgid "Snapshot %s: has clones" -msgstr "L'istantanea %s: presenta cloni" - -msgid "Snapshot did not exist. It will not be deleted" -msgstr "L'istantanea non esiste. Non verrà eliminata" - -#, python-format -msgid "" -"Source CG %(source_cg)s not found when creating consistency group %(cg)s " -"from source." -msgstr "" -"CG di origine %(source_cg)s non trovata durante la creazione del gruppo di " -"coerenza %(cg)s dall'origine." - -#, python-format -msgid "Source snapshot %(snapshot_id)s cannot be found." -msgstr "Impossibile trovare l'istantanea di origine %(snapshot_id)s." - -#, python-format -msgid "Source snapshot cannot be found for target volume %(volume_id)s." -msgstr "" -"Impossibile trovare l'istantanea di origine per il volume di destinazione " -"%(volume_id)s." - -#, python-format -msgid "Source volume %s not ready!" -msgstr "Volume di origine %s non pronto." - -#, python-format -msgid "Source volumes cannot be found for target volume %(volume_id)s." -msgstr "" -"Impossibile trovare i volumi di origine per il volume di destinazione " -"%(volume_id)s." - -#, python-format -msgid "" -"Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Volume src \"%s\" non trovato. Controllare i risultati di \"dog vdi list\"." - -#, python-format -msgid "Start synchronization failed. Error: %s." -msgstr "Avvio della sincronizzazione non riuscito. Errore: %s." - -#, python-format -msgid "StdErr :%s" -msgstr "StdErr :%s" - -#, python-format -msgid "StdOut :%s" -msgstr "StdOut :%s" - -#, python-format -msgid "Storage Profile %s was not found." -msgstr "Profilo di memoria %s non trovato." - -#, python-format -msgid "Storage profile: %s cannot be found in vCenter." -msgstr "Profilo di archiviazione: %s non trovato in vCenter." - -#, python-format -msgid "TSM [%s] not found in CloudByte storage." -msgstr "TSM [%s] non trovato nella memoria CloudByte." - -msgid "The Flexvisor service is unavailable." -msgstr "Il servizio Flexvisor non è disponibile." - -#, python-format -msgid "The NFS Volume %(cr)s does not exist." -msgstr "Il volume NFS %(cr)s non esiste." - -msgid "The connector does not contain the required information." -msgstr "Il connettore non contiene le informazioni necessarie." - -msgid "" -"The connector does not contain the required information: initiator is missing" -msgstr "" -"Il connettore non contiene le informazioni necessarie: iniziatore mancante" - -msgid "" -"The connector does not contain the required information: wwpns is missing" -msgstr "Il connettore non contiene le informazioni necessarie: wwpns mancante" - -#, python-format -msgid "" -"The primary array must have an API version of %(min_ver)s or higher, but is " -"only on %(current_ver)s, therefore replication is not supported." -msgstr "" -"L'array primario deve avere una versione API di %(min_ver)s o superiore, ma " -"è solo nella %(current_ver)s, quindi la replica non è supportata." - -#, python-format -msgid "" -"The replication mode of %(type)s has not successfully established " -"partnership with the replica Storwize target %(stor)s." -msgstr "" -"La modalità di replica di %(type)s non ha una relazione stabilita " -"correttamente con la destinazione Storwize %(stor)s della replica." - -msgid "The snapshot cannot be deleted because it is a clone point." -msgstr "L'istantanea non può essere eliminata perché è un punto di clonazione." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s skipping storage-assisted migration." -msgstr "" -"L'array di origine: %(sourceArraySerialNumber)s non corrisponde all'array di " -"destinazione: %(targetArraySerialNumber)s , la migrazione assistita dalla " -"memoria viene ignorata." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s, skipping storage-assisted migration." -msgstr "" -"L'array di origine: %(sourceArraySerialNumber)s non corrisponde all'array di " -"destinazione: %(targetArraySerialNumber)s, la migrazione assistita dalla " -"memoria viene ignorata." - -#, python-format -msgid "The source volume %(volume_id)s cannot be found." -msgstr "Impossibile trovare il volume di origine %(volume_id)s." - -#, python-format -msgid "The volume driver requires %(data)s in the connector." -msgstr "Il driver del volume richiede %(data)s nel connettore." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Il driver del volume richiede il nome iniziatore iSCSI nel connettore." - -#, python-format -msgid "There are no valid hosts available in configured cluster(s): %s." -msgstr "Non sono presenti host validi disponibili nel cluster configurato: %s." - -#, python-format -msgid "There is no valid datastore satisfying requirements: %s." -msgstr "Non è presente alcun datastore valido che soddisfa i requisiti: %s." - -msgid "There must be at least one valid replication device configured." -msgstr "È necessario configurare almeno un dispositivo di replica valido." - -#, python-format -msgid "" -"There was a problem with the failover (%(error)s) and it was unsuccessful. " -"Volume '%(volume)s will not be available on the failed over target." -msgstr "" -"Si è verificato un problema con il failover (%(error)s) e non è stato " -"eseguito correttamente. Il volume '%(volume)s non sarà disponibile sulla " -"destinazione sottoposta a failover." - -#, python-format -msgid "There was an error deleting volume %(id)s: %(error)s." -msgstr "" -"Si è verificato un errore durante l'eliminazione del volume %(id)s: " -"%(error)s." - -msgid "This usually means the volume was never successfully created." -msgstr "" -"Ciò in genere significa che il volume non è stato mai creato correttamente." - -msgid "Tiering Policy is not supported on this array." -msgstr "La politica di livellamento non è supportata su questo array." - -#, python-format -msgid "Timed out deleting %s!" -msgstr "Timeout durante l'eliminazione di %s." - -#, python-format -msgid "Trying to create snapshot by non-existent LV: %s" -msgstr "Tentativo di creare un'istantanea da LV non esistente: %s" - -#, python-format -msgid "URLError: %s" -msgstr "Errore URL: %s" - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Impossibile accedere al back-end Storwize per il volume %s." - -#, python-format -msgid "Unable to create folder path %s" -msgstr "Impossibile trovare il percorso della cartella %s" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Impossibile creare o ottenere il gruppo di archiviazione predefinito per la " -"politica FAST: %(fastPolicyName)s." - -#, python-format -msgid "Unable to create volume %s from replay" -msgstr "Impossibile creare il volume %s dalla risposta" - -#, python-format -msgid "Unable to create volume on SC: %s" -msgstr "Impossibile creare il volume su SC: %s" - -#, python-format -msgid "Unable to create volume. Volume driver %s not initialized" -msgstr "Impossibile creare il volume. Driver del volume %s non inizializzato" - -msgid "Unable to delete busy volume." -msgstr "Impossibile eliminare il volume occupato." - -#, python-format -msgid "Unable to delete due to existing snapshot for volume: %s" -msgstr "" -"Impossibile eseguire l'eliminazione a causa di un'istantanea esistente per " -"il volume: %s" - -#, python-format -msgid "Unable to delete profile %s." -msgstr "Impossibile eliminare il profilo %s." - -#, python-format -msgid "Unable to delete replication for %(vol)s to %(dest)s." -msgstr "Impossibile eliminare la replica per %(vol)s in %(dest)s." - -msgid "" -"Unable to delete the destination volume during volume migration, (NOTE: " -"database record needs to be deleted)." -msgstr "" -"Impossibile eliminare il volume di destinazione durante la migrazione del " -"volume, (NOTA: il record del database deve essere eliminato)." - -#, python-format -msgid "Unable to determine whether %(volumeName)s is composite or not." -msgstr "Impossibile stabilire se %(volumeName)s è composito o meno." - -msgid "Unable to disconnect host from volume, could not determine Purity host" -msgstr "" -"Impossibile scollegare l'host dal volume, non è stato possibile determinare " -"l'host Purity" - -msgid "" -"Unable to failover to the secondary. Please make sure that the secondary " -"back-end is ready." -msgstr "" -"Impossibile eseguire il failover sul backend secondario, Accertarsi che il " -"backend secondario sia pronto." - -#, python-format -msgid "Unable to find VG: %s" -msgstr "Impossibile trovare VG: %s" - -#, python-format -msgid "" -"Unable to find default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"Impossibile trovare il gruppo di archiviazione predefinito per la politica " -"FAST: %(fastPolicyName)s." - -#, python-format -msgid "Unable to find disk folder %(name)s on %(ssn)s" -msgstr "Impossibile trovare la cartella del disco %(name)s su %(ssn)s" - -#, python-format -msgid "Unable to find mapping profiles: %s" -msgstr "Impossibile trovare i profili di associazione: %s" - -#, python-format -msgid "Unable to find or create QoS Node named %s" -msgstr "Impossibile trovare o creare il nodo QoS denominato %s" - -#, python-format -msgid "Unable to find service: %(service)s for given host: %(host)s." -msgstr "" -"Impossibile trovare il servizio: %(service)s per l'host specificato: " -"%(host)s." - -msgid "Unable to get associated pool of volume." -msgstr "Impossibile ottenere il pool di volume associato." - -#, python-format -msgid "Unable to get default storage group %(defaultSgName)s." -msgstr "" -"Impossibile ottenere il gruppo di archiviazione predefinito " -"%(defaultSgName)s." - -msgid "Unable to get device mapping from network." -msgstr "Impossibile ottenere l'associazione del dispositivo dalla rete." - -#, python-format -msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." -msgstr "" -"Impossibile ottenere la regola della politica per la politica FAST: " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to locate Volume Group %s" -msgstr "Impossibile individuare il gruppo volumi %s" - -#, python-format -msgid "Unable to locate snapshot %s" -msgstr "Impossibile individuare l'istantanea %s" - -#, python-format -msgid "Unable to manage existing snapshot. Volume driver %s not initialized." -msgstr "" -"Impossibile gestire l'istantanea esistente. Driver del volume %s non " -"inizializzato. " - -#, python-format -msgid "Unable to manage existing volume. Volume driver %s not initialized." -msgstr "" -"Impossibile gestire il volume esistente. Driver del volume %s non " -"inizializzato. " - -#, python-format -msgid "Unable to map %(vol)s to %(srv)s" -msgstr "Impossibile associare %(vol)s a %(srv)s" - -#, python-format -msgid "Unable to rename lun %s on array." -msgstr "Impossibile ridenominare la lun %s sull'array." - -#, python-format -msgid "Unable to rename the logical volume for volume %s." -msgstr "Impossibile ridenominare il volume logico per il volume %s." - -#, python-format -msgid "Unable to rename the logical volume for volume: %s" -msgstr "Impossibile ridenominare il volume logico per il volume: %s" - -#, python-format -msgid "Unable to replicate %(volname)s to %(destsc)s" -msgstr "Impossibile replicare %(volname)s su %(destsc)s" - -#, python-format -msgid "Unable to retrieve VolumeConfiguration: %s" -msgstr "Impossibile richiamare la configurazione del volume: %s" - -#, python-format -msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." -msgstr "" -"Impossibile richiamare l'istanza pool di %(poolName)s sull'array %(array)s." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s." -msgstr "Impossibile terminare la connessione del volume: %(err)s" - -msgid "Unexpected build error:" -msgstr "Errore di generazione non previsto:" - -msgid "Unexpected error occurs in horcm." -msgstr "Si è verificato un errore non previsto in horcm." - -msgid "Unexpected error occurs in snm2." -msgstr "Si è verificato un errore non previsto in snm2." - -#, python-format -msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" -msgstr "" -"Si è verificato un errore non previsto quando il ripristino retype() ha " -"tentato di deleteVolumeSet(%s)" - -#, python-format -msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" -msgstr "" -"Si è verificato un errore non previsto quando retype() ha tentato di " -"deleteVolumeSet(%s)" - -#, python-format -msgid "Unexpected error while invoking web service. Error - %s." -msgstr "Errore imprevisto durante il richiamo del servizio Web. Errore - %s." - -#, python-format -msgid "Unexpected exception during cache cleanup of snapshot %s" -msgstr "Eccezione imprevista durante la ripulitura cache dell'istantanea %s" - -#, python-format -msgid "Unknown exception in post clone resize LUN %s." -msgstr "Eccezione sconosciuta nella LUN di post ridimensionamento clone %s." - -#, python-format -msgid "Unrecognized Login Response: %s" -msgstr "Risposta di login non riconosciuta: %s" - -#, python-format -msgid "" -"Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." -msgstr "" -"L'aggiornamento del gruppo di coerenza non è riuscito ad aggiungere il " -"volume-%(volume_id)s: VolumeNotFound." - -#, python-format -msgid "" -"Update consistency group failed to remove volume-%(volume_id)s: " -"VolumeNotFound." -msgstr "" -"L'aggiornamento del gruppo di coerenza non è riuscito a rimuovere il volume-" -"%(volume_id)s: VolumeNotFound." - -msgid "Update snapshot usages failed." -msgstr "Aggiornamento utilizzi istantanea non riuscito." - -msgid "Update volume model for transfer operation failed." -msgstr "" -"Aggiornamento del modello di volume per l'operazione di trasferimento non " -"riuscito." - -#, python-format -msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." -msgstr "" -"L'aggiornamento del volume nell'immagine ha riportato un errore (image-id: " -"%(image_id)s)." - -msgid "User does not have permission to change Storage Profile selection." -msgstr "" -"L'utente non dispone dell'autorizzazione per modificare la selezione del " -"profilo di memoria." - -msgid "VGC-CLUSTER command blocked and cancelled." -msgstr "Comando VGC-CLUSTER bloccato e annullato." - -#, python-format -msgid "Version string '%s' is not parseable" -msgstr "La stringa di versione '%s'non è analizzabile" - -#, python-format -msgid "Virtual Volume Set %s does not exist." -msgstr "L'insieme di volumi virtuali %s non esiste." - -#, python-format -msgid "Virtual disk device of backing: %s not found." -msgstr "Dispositivo disco virtuale di backup: %s non trovato." - -#, python-format -msgid "Vol copy job status %s." -msgstr "Stato del lavoro di copia del volume %s." - -#, python-format -msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"Volume \"%s\" non trovato. Controllare i risultati di \"dog vdi list\"." - -#, python-format -msgid "" -"Volume %(name)s is not suitable for storage assisted migration using retype." -msgstr "" -"Il volume %(name)s non è adatto per la migrazione assistita dalla memoria " -"utilizzando la riscrittura." - -#, python-format -msgid "Volume %(name)s not found on the array." -msgstr "Volume %(name)s non trovato nell'array. " - -#, python-format -msgid "Volume %(name)s not found on the array. No volume to delete." -msgstr "Volume %(name)s non trovato nell'array. Nessun volume da eliminare." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. No volume to migrate using retype." -msgstr "" -"Volume %(name)s non trovato sull'array. Nessun volume da migrare utilizzando " -"la riscrittura." - -#, python-format -msgid "Volume %(vol)s in the consistency group could not be deleted." -msgstr "" -"Il volume %(vol)s ha un problema di consistenza del gruppo e non è possibile " -"cancellarlo. " - -#, python-format -msgid "" -"Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " -"%(output)s" -msgstr "" -"Il volume %(volumeid)s non è riuscito a inviare il comando di assegnazione, " -"ret: %(status)s output: %(output)s" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Il volume %s non esiste nell'array." - -#, python-format -msgid "Volume %s, not found on SF Cluster." -msgstr "Volume %s, non trovato nel cluster SF." - -#, python-format -msgid "Volume %s: create failed" -msgstr "Volume %s: creazione non riuscita" - -#, python-format -msgid "" -"Volume %s: driver error when trying to retype, falling back to generic " -"mechanism." -msgstr "" -"Volume %s: errore del driver nel tentativo di eseguire la riscrittura, " -"fallback su meccanismo generico." - -#, python-format -msgid "Volume %s: manage failed." -msgstr "Volume %s: gestione non riuscita." - -#, python-format -msgid "Volume %s: rescheduling failed" -msgstr "Volume %s: ripianificazione non riuscita" - -#, python-format -msgid "Volume %s: update volume state failed." -msgstr "Volume %s: aggiornamento dello stato del volume non riuscito." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been added to target storage group " -"%(storageGroup)s." -msgstr "" -"Il volume: %(volumeName)s non è stato aggiunto al gruppo di archiviazione di " -"origine %(storageGroup)s." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been removed from source storage group " -"%(storageGroup)s." -msgstr "" -"Il volume: %(volumeName)s non è stato rimosso dal gruppo di archiviazione di " -"origine %(storageGroup)s." - -#, python-format -msgid "" -"Volume : %(volumeName)s. was not successfully migrated to target pool " -"%(targetPoolName)s." -msgstr "" -"Il volume : %(volumeName)s. non è stato migrato correttamente nel pool di " -"destinazione %(targetPoolName)s." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"accept_transfer operation!" -msgstr "" -"L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " -"eseguire l'operazione accept_transfer." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"attach_volume operation!" -msgstr "" -"L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " -"eseguire l'operazione attach_volume." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"delete_volume operation!" -msgstr "" -"L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " -"eseguire l'operazione delete_volume." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"detach_volume operation!" -msgstr "" -"L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " -"eseguire l'operazione detach_volume." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"extend_volume operation!" -msgstr "" -"L'ID volume %s non è stato trovato nel cluster SolidFire nel tentativo di " -"eseguire l'operazione estend_volume!" - -#, python-format -msgid "" -"Volume ID %s was not found on the zfssa device while attempting " -"delete_volume operation." -msgstr "" -"L'ID volume %s non è stato trovato nel dispositivo zfssa nel tentativo di " -"eseguire l'operazione delete_volume." - -#, python-format -msgid "Volume already exists. %s" -msgstr "Il volume esiste già. %s" - -#, python-format -msgid "" -"Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" -msgstr "" -"Creazione del volume non riuscita, eliminazione dell'istantanea creata " -"%(volume_name)s@%(name)s" - -#, python-format -msgid "Volume creation failed, deleting created snapshot %s" -msgstr "" -"Creazione del volume non riuscita, eliminazione dell'istantanea creata %s" - -msgid "Volume did not exist. It will not be deleted" -msgstr "Il volume non esiste. Non verrà eliminato" - -#, python-format -msgid "Volume driver %s not initialized" -msgstr "Il driver di volume %s non è inizializzato" - -msgid "Volume in unexpected state" -msgstr "Volume in uno stato imprevisto" - -#, python-format -msgid "Volume in unexpected state %s, expected awaiting-transfer" -msgstr "Volume in uno stato imprevisto %s, previsto awaiting-transfer" - -#, python-format -msgid "Volume migration failed due to exception: %(reason)s." -msgstr "Migrazione volume non riuscita a causa di un'eccezione: %(reason)s" - -msgid "Volume must be detached for clone operation." -msgstr "Il volume deve essere scollegato per l'operazione di clonazione." - -#, python-format -msgid "Volume size \"%sG\" is too large." -msgstr "La dimensione del volume \"%sG\" è troppo grande." - -#, python-format -msgid "VolumeType %s deletion failed, VolumeType in use." -msgstr "Eliminazione di VolumeType %s non riuscita, VolumeType in uso." - -#, python-format -msgid "" -"WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " -"attempt %(retry)s in progress." -msgstr "" -"Operazione WebDAV non riuscita con codice di errore: %(code)s motivo: " -"%(reason)s Nuovo tentativo %(retry)s in corso." - -#, python-format -msgid "WebDAV returned with %(code)s error during %(method)s call." -msgstr "" -"WebDAV restituito con errore %(code)s durante la chiamata di %(method)s." - -#, python-format -msgid "" -"Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " -"OLTP_REP, NONE." -msgstr "" -"Carico di lavoro: %(workload)s non valido. I valori validi sono DSS_REP, " -"DSS, OLTP, OLTP_REP, NONE." - -msgid "_check_version_fail: Parsing error." -msgstr "_check_version_fail: Errore di analisi." - -msgid "_find_mappings: volume is not active" -msgstr "_find_mappings: volume non attivo" - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " -"operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: Il volume %(vol)s non presenta l'operazione di copia " -"vdisk specificata: orig=%(orig)s nuova=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: I metadati del volume %(vol)s non presentano l'operazione " -"di copia vdisk specificata: orig=%(orig)s nuova=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " -"operations." -msgstr "" -"_rm_vdisk_copy_op: Il volume %s non presenta operazioni di copia vdisk " -"registrate." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " -"copy operations." -msgstr "" -"_rm_vdisk_copy_op: I metadati del volume %s non presentano operazioni di " -"copia vdisk registrate." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " -"%(host_name)s found." -msgstr "" -"_unmap_vdisk_from_host: Non è stata trovata nessuna associazione del volume " -"%(vol_name)s all'host %(host_name)s." - -#, python-format -msgid "_wait_for_job_complete failed after %(retries)d tries." -msgstr "_wait_for_job_complete non riuscito dopo %(retries)d tentativi." - -#, python-format -msgid "_wait_for_job_complete, failed after %(retries)d tries." -msgstr "_wait_for_job_complete, non riuscito dopo %(retries)d tentativi." - -#, python-format -msgid "_wait_for_sync failed after %(retries)d tries." -msgstr "_wait_for_sync non riuscito dopo %(retries)d tentativi." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"backup: %(vol_id)s non è stato in grado di rimovere l'hardlinl di backup da " -"%(vpath)s a %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -#, python-format -msgid "can't create 2 volumes with the same name, %s" -msgstr "impossibile creare 2 volumi con lo stesso nome, %s" - -msgid "cinder-rtstool is not installed correctly" -msgstr "cinder-rtstool non è installato correttamente" - -#, python-format -msgid "" -"delete: %(vol_id)s failed with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"eliminazione: %(vol_id)s non riuscito con stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "ensure_export: Volume %s not found on storage." -msgstr "ensure_export: Volume %s non trovato nella memoria." - -#, python-format -msgid "error opening rbd image %s" -msgstr "errore nell'apertura dell'immagine rbd %s" - -msgid "error refreshing volume stats" -msgstr "errore durante l'aggiornamento delle statistiche del volume" - -msgid "horcm command timeout." -msgstr "Timeout del comando horcm." - -msgid "import pywbem failed!! pywbem is necessary for this volume driver." -msgstr "" -"importazione di pywbem non riuscita. pywbem è necessario per questo driver " -"di volume." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s." -msgstr "" -"initialize_connection: impossibile raccogliere le proprietà di ritorno per " -"il volume %(vol)s e il connettore %(conn)s." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s.\n" -msgstr "" -"initialize_connection: impossibile raccogliere le proprietà di ritorno per " -"il volume %(vol)s e il connettore %(conn)s.\n" - -msgid "iscsi_ip_address must be set!" -msgstr "iscsi_ip_address deve essere impostato." - -msgid "manage_existing: No source-name in ref!" -msgstr "manage_existing: Nessun source-name nel riferimento." - -#, python-format -msgid "manage_existing_get_size: %s does not exist!" -msgstr "manage_existing_get_size: %s non esiste." - -msgid "manage_existing_get_size: No source-name in ref!" -msgstr "manage_existing_get_size: Nessun source-name nel riferimento." - -msgid "model server went away" -msgstr "model server é scomparso" - -#, python-format -msgid "modify volume: %s does not exist!" -msgstr "modify volume: %s does not exist." - -msgid "san ip must be configured!" -msgstr "L'IP SAN deve essere configurato." - -msgid "san_login must be configured!" -msgstr "san_login deve essere configurato." - -msgid "san_password must be configured!" -msgstr "san_password deve essere configurato." - -#, python-format -msgid "single_user auth mode enabled, but %(param)s not set" -msgstr "" -"Modalità di autorizzazione single_user abilitata, ma %(param)s non impostato" - -msgid "snm2 command timeout." -msgstr "Timeout del comando snm2." - -msgid "" -"storwize_svc_multihostmap_enabled is set to False, not allowing multi host " -"mapping." -msgstr "" -"storwize_svc_multihostmap_enabled è impostato su False, associazione di più " -"host non consentita. " - -#, python-format -msgid "unmanage: Volume %s does not exist!" -msgstr "unmanage: Il volume %s non esiste." - -msgid "zfssa_initiator cannot be empty when creating a zfssa_initiator_group." -msgstr "" -"zfssa_initiator non può essere vuoto durante la creazione di un " -"zfssa_initiator_group." - -msgid "" -"zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " -"for backend enabled volume migration. Continuing with generic volume " -"migration." -msgstr "" -"zfssa_replication_ip non impostato in cinder.conf. zfssa_replication_ip è " -"necessario per la migrazione volumi abilitata al backend. Continuare con la " -"migrazione volumi generica." diff --git a/cinder/locale/it/LC_MESSAGES/cinder-log-info.po b/cinder/locale/it/LC_MESSAGES/cinder-log-info.po deleted file mode 100644 index cc84705be..000000000 --- a/cinder/locale/it/LC_MESSAGES/cinder-log-info.po +++ /dev/null @@ -1,3380 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Alessandra , 2016. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:18+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-17 10:49+0000\n" -"Last-Translator: Alessandra \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Italian\n" - -#, python-format -msgid "\t%(name)-35s : %(value)s" -msgstr "\t%(name)-35s : %(value)s" - -#, python-format -msgid "\t%(param)-35s : %(value)s" -msgstr "\t%(param)-35s : %(value)s" - -#, python-format -msgid "\t%(prefix)-35s : %(version)s" -msgstr "\t%(prefix)-35s : %(version)s" - -#, python-format -msgid "\t%(request)-35s : %(value)s" -msgstr "\t%(request)-35s : %(value)s" - -#, python-format -msgid "" -"\n" -"\n" -"\n" -"\n" -"Request URL: %(url)s\n" -"\n" -"Call Method: %(method)s\n" -"\n" -"Request Data: %(data)s\n" -"\n" -"Response Data:%(res)s\n" -"\n" -msgstr "" -"\n" -"\n" -"\n" -"\n" -"URL richiesta: %(url)s\n" -"\n" -"Metodo di chiamata: %(method)s\n" -"\n" -"Dati richiesta: %(data)s\n" -"\n" -"Dati risposta: %(res)s\n" -"\n" - -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "%(url)s ha restituito un errore: %(e)s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s restituito con HTTP %(status)d" - -#, python-format -msgid "%(volume)s assign type fibre_channel, properties %(properties)s" -msgstr "%(volume)s assegna tipo fibre_channel, proprietà %(properties)s" - -#, python-format -msgid "%s is already umounted" -msgstr "%s è giù smontato" - -#, python-format -msgid "3PAR driver cannot perform migration. Retype exception: %s" -msgstr "" -"Il driver 3PAR non può eseguire la migrazione. Riscrivere l'eccezione: %s" - -#, python-format -msgid "3PAR vlun %(name)s not found on host %(host)s" -msgstr "3PAR vlun %(name)s non trovato sull'host %(host)s" - -#, python-format -msgid "" -"3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " -"deleted because: %(reason)s" -msgstr "" -"3PAR vlun per il volume '%(name)s' è stato eliminato ma l'host '%(host)s' " -"non è stato eliminato perché: %(reason)s" - -#, python-format -msgid "AUTH properties: %s." -msgstr "Proprietà AUTH: %s." - -#, python-format -msgid "Accepting transfer %s" -msgstr "Accettazione trasferimento %s" - -msgid "Activate Flexvisor cinder volume driver." -msgstr "Attivare il driver del volume cinder Flexvisor." - -msgid "Add connection: finished iterating over all target list" -msgstr "" -"Aggiungi connessione: completata iterazione sull'elenco di tutte le " -"destinazioni" - -#, python-format -msgid "Add volume response: %s" -msgstr "Aggiungi risposta del volume: %s" - -#, python-format -msgid "Added %s to cg." -msgstr "Aggiunto %s a cg." - -#, python-format -msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." -msgstr "" -"Aggiunto volume: %(volumeName)s al gruppo di archiviazione esistente " -"%(sgGroupName)s." - -#, python-format -msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" -msgstr "Aggiunta di ACL al volume=%(vol)s con nome gruppo iniziatore %(igrp)s" - -#, python-format -msgid "Adding volume %(v)s to consistency group %(cg)s." -msgstr "Aggiunta del volume %(v)s al gruppo di coerenza %(cg)s." - -#, python-format -msgid "" -"Adding volume: %(volumeName)s to default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Aggiunta del volume: %(volumeName)s al gruppo di archiviazione predefinito " -"per la politica FAST: %(fastPolicyName)s." - -#, python-format -msgid "Adding volumes to cg %s." -msgstr "Aggiunta volumi a cg %s." - -#, python-format -msgid "Already mounted: %s" -msgstr "Già montato: %s" - -msgid "Attach volume completed successfully." -msgstr "Collegamento del volume completato correttamente." - -#, python-format -msgid "" -"Automatically selected %(binary)s RPC version %(version)s as minimum service " -"version." -msgstr "" -"Selezionato automaticamente RPC %(binary)s versione %(version)s come " -"versione di servizio minima." - -#, python-format -msgid "" -"Automatically selected %(binary)s objects version %(version)s as minimum " -"service version." -msgstr "" -"Selezionati automaticamente oggetti %(binary)s versione %(version)s come " -"versione di servizio minima." - -msgid "Availability Zones retrieved successfully." -msgstr "Zone di disponibilità richiamate correttamente." - -#, python-format -msgid "Backend name is %s." -msgstr "Il nome backend è %s." - -#, python-format -msgid "Backing VM: %(backing)s renamed to %(new_name)s." -msgstr "VM di backup: %(backing)s ridenominata %(new_name)s." - -msgid "Backing not available, no operation to be performed." -msgstr "Backup non disponibile, nessuna operazione da eseguire." - -#, python-format -msgid "Backing not found, creating for volume: %s" -msgstr "Backup non trovato, creazione per volume: %s" - -#, python-format -msgid "" -"Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " -"skipping base image delete." -msgstr "" -"L'immagine di base di backup del volume %(volume)s presenta ancora " -"%(snapshots)s istantanee, per cui l'eliminazione dell'immagine di base viene " -"ignorata." - -#, python-format -msgid "" -"Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " -"in %(delay)ss." -msgstr "" -"L'immagine di backup del volume %(volume)s è occupata, il tentativo viene " -"eseguito altre %(retries)s volte in %(delay)ss." - -#, python-format -msgid "Backup service: %s." -msgstr "Servizio di backup: %s." - -#, python-format -msgid "Begin backup of volume %s." -msgstr "Inizio backup del volume %s." - -msgid "Begin detaching volume completed successfully." -msgstr "Inizio dello scollegamento del volume completato correttamente." - -#, python-format -msgid "" -"BrcdFCZoneDriver - Add connection for fabric %(fabric)s for I-T map: " -"%(i_t_map)s" -msgstr "" -"BrcdFCZoneDriver - Aggiungi connessione per %(fabric)s for I-T map: " -"%(i_t_map)s" - -#, python-format -msgid "" -"BrcdFCZoneDriver - Delete connection for fabric %(fabric)s for I-T map: " -"%(i_t_map)s" -msgstr "" -"BrcdFCZoneDriver - Eliminare la connessione per fabric %(fabric)s per " -"l'associazione I-T: %(i_t_map)s" - -msgid "CHAP authentication disabled." -msgstr "Autenticazione CHAP disabilitata." - -#, python-format -msgid "CONCERTO version: %s" -msgstr "Versione CONCERTO: %s" - -msgid "Calling os-brick to detach ScaleIO volume." -msgstr "Chiamata os-brick per scollegare il volume ScaleIO." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because cluster " -"exists in different management group." -msgstr "" -"Impossibile fornire la migrazione assistita del backend per il volume: %s in " -"quanto il cluster esiste in un gruppo di gestione diverso." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has been exported." -msgstr "" -"Impossibile fornire la migrazione assistita del backend per il volume: %s in " -"quanto il volume è stato esportato." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has snapshots." -msgstr "" -"Impossibile fornire la migrazione assistita del backend per il volume: %s in " -"quanto il volume presenta istantanee." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume does " -"not exist in this management group." -msgstr "" -"Impossibile fornire la migrazione assistita del backend per il volume: %s in " -"quanto il volume non esiste in questo gruppo di gestione." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume is " -"from a different backend." -msgstr "" -"Impossibile fornire la migrazione assistita del backend per il volume: %s in " -"quanto il volume deriva da un backend diverso." - -#, python-format -msgid "Cgsnapshot %s: creating." -msgstr "Crazione di cgsnapshot %s:." - -#, python-format -msgid "Change volume capacity request: %s." -msgstr "Modificare la richiesta di capacità del volume: %s." - -#, python-format -msgid "Checking image clone %s from glance share." -msgstr "Verifica del clone dell'immagine %s dalla condivisione glance." - -#, python-format -msgid "Checking origin %(origin)s of volume %(volume)s." -msgstr "Verifica dell'origine %(origin)s del volume %(volume)s." - -#, python-format -msgid "" -"Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." -msgstr "" -"Il volume NFS Cinder con percorso corrente \"%(cr)s\" non è più gestito." - -msgid "Cinder secure environment indicator file exists." -msgstr "Il file indicatore dell'ambiente sicuro Cinder esiste." - -#, python-format -msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - Aggiungi connessione per associazione I-T: %s" - -#, python-format -msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - Elimina connessione per associazione I-T: %s" - -#, python-format -msgid "Cleaning cache for share %s." -msgstr "Cancellazione della cache per la condivisione %s." - -msgid "Cleaning up incomplete backup operations." -msgstr "Ripulitura delle operazioni di backup incomplete." - -#, python-format -msgid "Clone %s created." -msgstr "Clone %s creato." - -#, python-format -msgid "Cloning from cache to destination %s" -msgstr "Clonazione dalla cache alla destinazione %s" - -#, python-format -msgid "Cloning from snapshot to destination %s" -msgstr "Clonazione dall'istantanea alla destinazione %s" - -#, python-format -msgid "Cloning image %s from cache" -msgstr "Clonazione dell'immagine %s dalla cache" - -#, python-format -msgid "Cloning image %s from snapshot." -msgstr "Clonazione dell'immagine %s dall'istantanea." - -#, python-format -msgid "Cloning volume %(src)s to volume %(dst)s" -msgstr "Clonazione del volume %(src)s nel volume %(dst)s" - -#, python-format -msgid "" -"Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" -"%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " -"perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " -"multi-initiator=%(multi-initiator)s" -msgstr "" -"Clonazione del volume dal volume dell'istantanea=%(vol)s snapshot=%(snap)s " -"clone=%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=" -"%(agent-type)s perfpol-name=%(perfpol-name)s encryption=%(encryption)s " -"cipher=%(cipher)s multi-initiator=%(multi-initiator)s" - -#, python-format -msgid "CloudByte API executed successfully for command [%s]." -msgstr "API CloudByte eseguito correttamente per il comando [%s]." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." -msgstr "" -"Operazione CloudByte [%(operation)s] eseguita correttamente per il volume " -"[%(cb_volume)s]." - -msgid "Complete-Migrate volume completed successfully." -msgstr "Completamento-migrazione del volume completati correttamente." - -#, python-format -msgid "Completed: convert_to_base_volume: id=%s." -msgstr "Completato: convert_to_base_volume: id=%s." - -#, python-format -msgid "" -"Connect initialization info: {driver_volume_type: fibre_channel, data: " -"%(properties)s" -msgstr "" -"Informazioni sull'inizializzazione della connessione: {driver_volume_type: " -"fibre_channel, dati: %(properties)s" - -#, python-format -msgid "Connecting to host: %s." -msgstr "Connessione a host: %s." - -#, python-format -msgid "Connecting to target host: %s for backend enabled migration." -msgstr "" -"Connessione a host di destinazione: %s per la migrazione abiliata al backend." - -#, python-format -msgid "Connector returning fcnsinfo-%s" -msgstr "Il connettore restituisce fcnsinfo-%s" - -#, python-format -msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" -msgstr "Convertita immagine di %(sz).2f MB su %(mbps).2f MB/s" - -#, python-format -msgid "" -"Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" -msgstr "" -"Conversione di %(volume_name)s a full provisioning con userCPG=%(new_cpg)s" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin dedup provisioning with userCPG=" -"%(new_cpg)s" -msgstr "" -"Conversione di %(volume_name)s a thin dedup provisioning con userCPG=" -"%(new_cpg)s" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" -msgstr "" -"Conversione di %(volume_name)s a thin provisioning con userCPG=%(new_cpg)s" - -msgid "Coordination backend started successfully." -msgstr "Backend di coordinazione avviato correttamente. " - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." -msgstr "" -"Copiata immagine %(img)s nel volume %(vol)s utilizzando il carico di lavoro " -"offload di copia." - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using local image cache." -msgstr "" -"Copiata immagine %(img)s in volume %(vol)s utilizzando la cache immagini " -"locale." - -#, python-format -msgid "Copied image to volume %s using regular download." -msgstr "Copiata immagine nel volume %s utilizzando il download normale." - -#, python-format -msgid "Copy job to dest vol %s completed." -msgstr "Lavoro di copia nel volume di destinazione %s completato." - -msgid "Copy volume to image completed successfully." -msgstr "Copia del volume nell'immagine completata correttamente." - -#, python-format -msgid "Copying src vol %(src)s to dest vol %(dst)s." -msgstr "" -"Copia del volume di origine %(src)s nel volume di destinazione %(dst)s." - -#, python-format -msgid "Could not find replica to delete of volume %(vol)s." -msgstr "Impossibile trovare la replica per l'eliminazione del volume %(vol)s." - -#, python-format -msgid "Could not run dpkg-query command: %(msg)s." -msgstr "Impossibile eseguire il comando dpkg-query: %(msg)s." - -#, python-format -msgid "Could not run rpm command: %(msg)s." -msgstr "Impossibile eseguire il comando rpm: %(msg)s." - -#, python-format -msgid "" -"Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" -msgstr "" -"Impossibile aggiornare il pool di archiviazione con mmchattr a %(pool)s, " -"errore: %(error)s" - -#, python-format -msgid "" -"Couldn't find destination volume %(vol)s in the database. The entry might be " -"successfully deleted during migration completion phase." -msgstr "" -"Impossibile trovare il volume di destinazione %(vol)s nel database. La voce " -"potrebbe essere stata eliminata correttamente durante la fase di " -"completamento della migrazione." - -#, python-format -msgid "" -"Couldn't find the temporary volume %(vol)s in the database. There is no need " -"to clean up this volume." -msgstr "" -"Impossibile trovare il volume temporaneo %(vol)s nel database. Non c'è " -"alcuna necessità di ripulire questo volume." - -#, python-format -msgid "Create Cloned Volume %(volume_id)s completed." -msgstr "Creazione del volume clonato %(volume_id)s completata." - -#, python-format -msgid "Create Consistency Group: %(group)s." -msgstr "Crea gruppo di coerenza: %(group)s." - -#, python-format -msgid "Create Volume %(volume_id)s completed." -msgstr "Creazione del volume %(volume_id)s completata." - -#, python-format -msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." -msgstr "" -"Creazione volume %(volume_id)s da istantanea %(snapshot_id)s completata." - -#, python-format -msgid "" -"Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " -"%(sourceName)s." -msgstr "" -"Creare una replica dal volume: Volume clone: %(cloneName)s Volume di " -"origine: %(sourceName)s." - -#, python-format -msgid "Create backup finished. backup: %s." -msgstr "Creazione backup completata, backup: %s." - -#, python-format -msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "Creazione backup avviata, backup: %(backup_id)s volume: %(volume_id)s." - -#, python-format -msgid "Create consistency group from source-%(source)s completed successfully." -msgstr "" -"Creazione del gruppo di coerenza da origine-%(source)s completata " -"correttamente." - -#, python-format -msgid "Create export done from Volume %(volume_id)s." -msgstr "Creazione esportazione eseguita dal volume %(volume_id)s." - -msgid "Create snapshot completed successfully" -msgstr "Creazione istantanea completata correttamente" - -#, python-format -msgid "" -"Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Crea istantanea per gruppo di coerenza %(cgId)s cgsnapshotID: %(cgsnapshot)s." - -#, python-format -msgid "Create snapshot from volume %s" -msgstr "Crea istantanea dal volume %s" - -#, python-format -msgid "" -"Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " -"%(raid_snapshot_id)s, volume: %(volume)s." -msgstr "" -"Creazione eseguita correttamente. Istantanea: %(snapshot)s, ID istantanea in " -"raid: %(raid_snapshot_id)s, volume: %(volume)s." - -#, python-format -msgid "Create target consistency group %(targetCg)s." -msgstr "Crea gruppo di coerenza di destinazione %(targetCg)s." - -#, python-format -msgid "Create volume of %s GB" -msgstr "Crea volume di %s GB" - -#, python-format -msgid "CreateReplay success %s" -msgstr "CreateReplay riuscito %s" - -#, python-format -msgid "" -"Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " -"and OpenStack volume [%(stack_vol)s]." -msgstr "" -"Creata correttamente un'istantanea CloudByte [%(cb_snap)s] in relazione al " -"volume CloudByte [%(cb_vol)s] e al volume OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Created Consistency Group %s" -msgstr "Creato gruppo di coerenza %s" - -#, python-format -msgid "" -"Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." -"t parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"Creato un clone [%(cb_clone)s] nel percorso dell'istantanea CloudByte " -"[%(cb_snap)s] in relazione al volume OpenStack principale [%(stack_vol)s]." - -#, python-format -msgid "Created datastore folder: %s." -msgstr "Creata cartella di datastore: %s." - -#, python-format -msgid "" -"Created lun-map:\n" -"%s" -msgstr "" -"Creata associazione lun:\n" -"%s" - -#, python-format -msgid "" -"Created multi-attach E-Series host group %(label)s with clusterRef " -"%(clusterRef)s" -msgstr "" -"Creato gruppo di host multi-attach E-Series '%(label)s' con clusterRef " -"%(clusterRef)s" - -#, python-format -msgid "Created new initiator group name: %(igGroupName)s." -msgstr "Creato nome nuovo del gruppo di iniziatori: %(igGroupName)s." - -#, python-format -msgid "Created new masking view : %(maskingViewName)s." -msgstr "Creata nuova vista di mascheramento: %(maskingViewName)s." - -#, python-format -msgid "Created new storage group: %(storageGroupName)s." -msgstr "Creato nuovo gruppo di archiviazione: %(storageGroupName)s." - -#, python-format -msgid "Created snap grp with label %s." -msgstr "Creato grp snap con etichetta %s." - -#, python-format -msgid "Created volume %(instanceId)s: %(name)s" -msgstr "Creato volume %(instanceId)s: %(name)s" - -#, python-format -msgid "Created volume %(volname)s, volume id %(volid)s." -msgstr "Creato volume %(volname)s, id volume %(volid)s." - -msgid "Created volume successfully." -msgstr "Volume creato correttamente." - -#, python-format -msgid "Created volume with label %s." -msgstr "Creato volume con etichetta %s." - -#, python-format -msgid "Creating %(volume)s on %(device)s" -msgstr "Creazione di %(volume)s su %(device)s" - -msgid "Creating Consistency Group" -msgstr "Creazione gruppo di coerenza" - -#, python-format -msgid "Creating backup of volume %(volume_id)s in container %(container)s" -msgstr "" -"Creazione backup del volume %(volume_id)s nel contenitore %(container)s" - -#, python-format -msgid "Creating cgsnapshot %(name)s." -msgstr "Creazione cgsnapshot %(name)s." - -#, python-format -msgid "Creating clone of volume: %s" -msgstr "Creazione clone del volume: %s" - -#, python-format -msgid "Creating clone of volume: %s." -msgstr "Creazione clone del volume: %s." - -#, python-format -msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." -msgstr "Creazione gruppo di coerenza %(name)s da cgsnapshot %(snap)s." - -#, python-format -msgid "" -"Creating consistency group %(name)s from source consistency group " -"%(source_cgid)s." -msgstr "" -"Creazione gruppo di coerenza %(name)s da gruppo di coerenza di origine " -"%(source_cgid)s." - -#, python-format -msgid "Creating consistency group %(name)s." -msgstr "Creazione gruppo di coerenza %(name)s." - -#, python-format -msgid "Creating host object %(host_name)r with IQN: %(iqn)s." -msgstr "Creazione dell'oggetto host %(host_name)r con IQN: %(iqn)s." - -#, python-format -msgid "Creating host object %(host_name)r with WWN: %(wwn)s." -msgstr "Creazione dell'oggetto host %(host_name)r con WWN: %(wwn)s." - -#, python-format -msgid "Creating host with ports %s." -msgstr "Creazione host con porte %s." - -#, python-format -msgid "Creating image snapshot %s" -msgstr "Creazione dell'istantanea dell'immagine %s" - -#, python-format -msgid "Creating initiator group %(grp)s with initiator %(iname)s" -msgstr "Creazione del gruppo iniziatore %(grp)s con iniziatore %(iname)s" - -#, python-format -msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" -msgstr "Creazione del gruppo iniziatore %(igrp)s con un iniziatore %(iname)s" - -#, python-format -msgid "Creating iscsi_target for volume: %s" -msgstr "Creazione di iscsi_target per il volume: %s" - -#, python-format -msgid "Creating regular file: %s.This may take some time." -msgstr "" -"Creazione del file regolare: %s. Questa operazione potrebbe richiedere del " -"tempo." - -#, python-format -msgid "Creating server %s" -msgstr "Creazione server %s" - -#, python-format -msgid "Creating snapshot %(snap)s of volume %(vol)s" -msgstr "Creazione dell'istantanea %(snap)s del volume %(vol)s." - -#, python-format -msgid "" -"Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " -"snap_description=%(desc)s" -msgstr "" -"Creazione dell'istantanea per il nome volume=%(vol)s nome_istantanea=" -"%(name)s descrizione_istantanea=%(desc)s" - -#, python-format -msgid "Creating snapshot: %s" -msgstr "Creazione istantanea: %s." - -#, python-format -msgid "Creating temp snapshot %(snap)s from volume %(vol)s" -msgstr "Creazione dell'istantanea temporanea %(snap)s dal volume %(vol)s." - -#, python-format -msgid "Creating transfer of volume %s" -msgstr "Creazione trasferimento di volume %s" - -#, python-format -msgid "Creating volume %s from snapshot." -msgstr "Creazione del volume %s dall'istantanea." - -#, python-format -msgid "Creating volume from snapshot: %s" -msgstr "Creazione volume dall'istantanea: %s." - -#, python-format -msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." -msgstr "" -"Creazione del volume di %(size)s GB per il ripristino del backup " -"%(backup_id)s." - -#, python-format -msgid "Creating volume snapshot: %s." -msgstr "Creazione istantanea del volume: %s." - -#, python-format -msgid "Creatng volume from snapshot. volume: %s" -msgstr "Creazione volume dall'istantanea. volume: %s" - -#, python-format -msgid "DRBD connection for %s already removed" -msgstr "Connessione DRBD per %s già rimossa" - -#, python-format -msgid "Delete Consistency Group: %(group)s." -msgstr "Elimina gruppo di coerenza: %(group)s." - -#, python-format -msgid "Delete Snapshot %(snapshot_id)s completed." -msgstr "Eliminazione istantanea %(snapshot_id)s completata." - -#, python-format -msgid "Delete Snapshot: %(snapshot)s." -msgstr "Elimina istantanea: %(snapshot)s." - -#, python-format -msgid "Delete Snapshot: %(snapshotName)s." -msgstr "Elimina istantanea: %(snapshotName)s." - -#, python-format -msgid "Delete Volume %(volume_id)s completed." -msgstr "Eliminazione del volume %(volume_id)s completata." - -#, python-format -msgid "Delete backup finished, backup %s deleted." -msgstr "Eliminazione backup completata, backup %s eliminato." - -#, python-format -msgid "Delete backup started, backup: %s." -msgstr "Eliminazione backup avviata, backup: %s." - -#, python-format -msgid "Delete backup with id: %s" -msgstr "Elimina (delete) backup con l'id: %s" - -#, python-format -msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" -msgstr "" -"Elimina cgsnapshot %(snap_name)s per il gruppo di coerenza : %(group_name)s" - -#, python-format -msgid "Delete cgsnapshot with id: %s" -msgstr "Elimina cgsnapshot con id:%s" - -#, python-format -msgid "Delete connection target list: %(targets)s" -msgstr "Elimina elenco di destinazioni di connessione: %(targets)s" - -msgid "Delete consistency group completed successfully." -msgstr "Eliminazione del gruppo di coerenza completata correttamente." - -#, python-format -msgid "Delete consistency group with id: %s" -msgstr "Elimina gruppo di coerenza con id: %s" - -#, python-format -msgid "" -"Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." -msgstr "" -"Eliminazione del backup '%(backup)s' per il volume '%(volume)s' completata " -"con avvertenza." - -msgid "Delete snapshot completed successfully" -msgstr "Eliminazione istantanea completata correttamente" - -#, python-format -msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Elimina istantanea per CG di origine %(cgId)s cgsnapshotID: %(cgsnapshot)s." - -msgid "Delete snapshot metadata completed successfully." -msgstr "Eliminazione dei metadati dell'istantanea completata correttamente." - -#, python-format -msgid "Delete snapshot with id: %s" -msgstr "Elimina istantanea con id: %s" - -#, python-format -msgid "Delete transfer with id: %s" -msgstr "Elimina trasferimento con id: %s" - -msgid "Delete volume metadata completed successfully." -msgstr "Eliminazione dei metadati del volume completata correttamente." - -msgid "Delete volume request issued successfully." -msgstr "Eliminazione della richiesta del volume eseguita correttamente." - -#, python-format -msgid "Delete volume with id: %s" -msgstr "Elimina volume con id: %s" - -#, python-format -msgid "Deleted %(row)d rows from table=%(table)s" -msgstr "Eliminate %(row)d righe dalla tabella=%(table)s" - -#, python-format -msgid "" -"Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " -"[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"Eliminata un'istantanea CloudByte [%(snap)s] in relazione al volume " -"CloudByte principale [%(cb_vol)s] e al volume OpenStack principale " -"[%(stack_vol)s]." - -#, python-format -msgid "Deleted the VM backing: %s." -msgstr "Eliminato backup VM: %s." - -#, python-format -msgid "Deleted vmdk file: %s." -msgstr "Eliminato file vmdk: %s." - -msgid "Deleted volume successfully." -msgstr "Volume eliminato correttamente." - -msgid "Deleting Consistency Group" -msgstr "Eliminazione gruppo di coerenza" - -#, python-format -msgid "Deleting Volume: %(volume)s" -msgstr "Eliminazione del volume: %(volume)s" - -#, python-format -msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." -msgstr "" -"Eliminazione dell'immagine di base di backup='%(basename)s' di volume " -"%(volume)s." - -#, python-format -msgid "Deleting deleteInitiatorGrp %s " -msgstr "Eliminazione di deleteInitiatorGrp %s " - -#, python-format -msgid "Deleting snapshot %(ss)s from %(pro)s" -msgstr "Eliminazione istantanea %(ss)s da %(pro)s" - -#, python-format -msgid "Deleting snapshot %s " -msgstr "Eliminazione istantanea %s" - -#, python-format -msgid "Deleting snapshot: %s" -msgstr "Eliminazione dell'istantanea: %s" - -#, python-format -msgid "Deleting stale snapshot: %s" -msgstr "Eliminazione dell'istantanea obsoleta: %s" - -#, python-format -msgid "Deleting volume %s " -msgstr "Eliminazione del volume %s" - -#, python-format -msgid "Deleting volume %s." -msgstr "Eliminazione del volume %s." - -#, python-format -msgid "Detach Volume, metadata is: %s." -msgstr "Scollegare il volume, i metadati sono: %s." - -msgid "Detach volume completed successfully." -msgstr "Scollegamento del volume completato correttamente." - -msgid "Determined volume DB was empty at startup." -msgstr "È stato stabilito che il DB dei volumi era vuoto all'avvio." - -msgid "Determined volume DB was not empty at startup." -msgstr "È stato stabilito che il DB dei volumi non era vuoto all'avvio." - -#, python-format -msgid "" -"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " -"delete anything." -msgstr "" -"Non è stata trovata l'istantanea: %(name)s per il backup: %(backing)s. Non è " -"necessario eliminare nulla." - -#, python-format -msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" -msgstr "" -"L'IP di rilevamento %(disc_ip)s viene trovato nella sottorete gestione+dati " -"%(net_label)s" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" -msgstr "" -"L'IP di rilevamento %(disc_ip)s viene utilizzato nella sottorete di dati " -"%(net_label)s" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" -msgstr "" -"L'IP di rilevamento %(disc_ip)s viene utilizzato nella sottorete " -"%(net_label)s" - -#, python-format -msgid "Discovery ip %s is used on mgmt+data subnet" -msgstr "L'IP di rilevamento %s viene utilizzato nella sottorete gestione+dati" - -#, python-format -msgid "Dissociating volume %s " -msgstr "Dissociazione del volume %s" - -#, python-format -msgid "Domain id is %s." -msgstr "L'ID dominio è %s." - -#, python-format -msgid "Done copying image: %(id)s to volume: %(vol)s." -msgstr "Eseguita copia dell'immagine: %(id)s su volume: %(vol)s." - -#, python-format -msgid "Done copying volume %(vol)s to a new image %(img)s" -msgstr "Eseguita copia del volume %(vol)s in una nuova immagine %(img)s" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " -"in cluster daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Rilevato un cluster GPFS di livello inferiore. La funzione encryption-at-" -"rest GPFS non è abilitata nel livello daemon del cluster %(cur)s - deve " -"essere almeno di livello %(min)s." - -msgid "Driver initialization completed successfully." -msgstr "Inizializzazione del driver completata correttamente." - -msgid "Driver post RPC initialization completed successfully." -msgstr "Post-inizializzazione RPC del driver completata correttamente." - -#, python-format -msgid "" -"E-series proxy API version %(version)s does not support full set of SSC " -"extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"API proxy E-series versione %(version)s non supporta la serie completa di " -"specifiche supplementari SSC. La versione proxy deve essere almeno " -"%(min_version)s. " - -#, python-format -msgid "E-series proxy API version %s does not support autosupport logging." -msgstr "" -"API proxy E-series versione %s non supporta l'accesso con supporto " -"automatico. " - -#, python-format -msgid "EQL-driver: Setup is complete, group IP is \"%s\"." -msgstr "Driver EQL: La configurazione è completa, l'IP gruppo è \"%s\"." - -#, python-format -msgid "EQL-driver: executing \"%s\"." -msgstr "Driver EQL: esecuzione di \"%s\"." - -#, python-format -msgid "Editing Volume %(vol)s with mask %(mask)s" -msgstr "Modifica del volume %(vol)s con maschera %(mask)s" - -msgid "Embedded mode detected." -msgstr "Rilevata modalità integrata." - -msgid "Enabling LVM thin provisioning by default because a thin pool exists." -msgstr "" -"Abilitazione del thin provisioning LVM per impostazione predefinita perché " -"esiste un pool thin." - -msgid "Enabling LVM thin provisioning by default because no LVs exist." -msgstr "" -"Abilitazione del thin provisioning LVM per impostazione predefinita perché " -"non esiste alcun LV." - -#, python-format -msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" -msgstr "Inserimento del volume extend_volume=%(vol)s new_size=%(size)s" - -#, python-format -msgid "" -"Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s" -msgstr "" -"Inserimento del volume initialize_connection=%(vol)s connettore=%(conn)s " -"ubicazione=%(loc)s" - -#, python-format -msgid "" -"Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s." -msgstr "" -"Inserimento del volume terminate_connection=%(vol)s connettore=%(conn)s " -"ubicazione=%(loc)s" - -#, python-format -msgid "Entering unmanage_volume volume = %s" -msgstr "Inserimento del volume unmanage_volume = %s" - -#, python-format -msgid "Exploring array subnet label %s" -msgstr "Esplorazione dell'etichetta della sottorete dell'array %s" - -#, python-format -msgid "Export record finished, backup %s exported." -msgstr "Esportazione record completata, backup %s esportato." - -#, python-format -msgid "Export record started, backup: %s." -msgstr "Esportazione record avviata, backup: %s." - -#, python-format -msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." -msgstr "Esportata lun %(vol_id)s su lun_id %(lun_id)s." - -msgid "Extend volume completed successfully." -msgstr "Estensione del volume completata correttamente." - -msgid "Extend volume request issued successfully." -msgstr "Richiesta di estensione del volume eseguita correttamente." - -#, python-format -msgid "" -"Extend volume: %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." -msgstr "" -"Estendere il volume: %(volumename)s, oldsize: %(oldsize)s, newsize: " -"%(newsize)s." - -#, python-format -msgid "Extending volume %s." -msgstr "Estensione del volume %s." - -#, python-format -msgid "Extending volume: %(id)s New size: %(size)s GB" -msgstr "Estensione volume: %(id)s Nuova dimensione: %(size)s GB" - -#, python-format -msgid "" -"FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"FAST: statistiche di capacità per la politica %(fastPolicyName)s sull'array " -"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." - -msgid "Failed over to replication target successfully." -msgstr "Failover della destinazione di replica eseguito correttamente." - -#, python-format -msgid "Failed to create host: %(name)s. Check if it exists on the array." -msgstr "" -"Impossibile creare l'host: %(name)s. Controllare se esiste nell'array. " - -#, python-format -msgid "" -"Failed to create hostgroup: %(name)s. Please check if it exists on the array." -msgstr "" -"Impossibile creare il gruppo di host: %(name)s. Controllare se esiste " -"sull'array." - -#, python-format -msgid "Failed to open iet session list for %(vol_id)s: %(e)s" -msgstr "Impossibile aprire l'elenco di sessioni iet per %(vol_id)s: %(e)s" - -#, python-format -msgid "Failing backend to %s" -msgstr "Esito negativo del backend su %s" - -#, python-format -msgid "Fault thrown: %s" -msgstr "Errore generato: %s" - -#, python-format -msgid "Fetched vCenter server version: %s" -msgstr "Recuperata versione server vCenter: %s" - -#, python-format -msgid "Filtered targets for SAN is: %(targets)s" -msgstr "Le destinazioni filtrate per SAN sono: %(targets)s" - -#, python-format -msgid "Filtered targets for SAN is: %s" -msgstr "Le destinazioni filtrate per SAN sono: %s" - -#, python-format -msgid "Final filtered map for delete connection: %(i_t_map)s" -msgstr "Associazione filtrata finale per eliminazione connessione: %(i_t_map)s" - -#, python-format -msgid "Final filtered map for fabric: %(i_t_map)s" -msgstr "Associazione filtrata finale per fabric: %(i_t_map)s" - -#, python-format -msgid "Fixing previous mount %s which was not unmounted correctly." -msgstr "" -"Correzione del montaggio precedente %s che non è stato smontato " -"correttamente." - -#, python-format -msgid "Flash Cache policy set to %s" -msgstr "Politica Flash Cache impostata su %s" - -#, python-format -msgid "Flexvisor already unassigned volume %(id)s." -msgstr "Flexvisor ha già annullato l'assegnazione del volume %(id)s." - -#, python-format -msgid "Flexvisor snapshot %(id)s not existed." -msgstr "L'istantanea Flexvisor %(id)s non esiste." - -#, python-format -msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." -msgstr "" -"Flexvisor è riuscito ad aggiungere il volume %(id)s al gruppo %(cgid)s." - -#, python-format -msgid "Flexvisor succeeded to clone volume %(id)s." -msgstr "Flexvisor è riuscito a clonare il volume %(id)s." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s from snapshot." -msgstr "Flexvisor è riuscito a creare il volume %(id)s dall'istantanea. " - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s." -msgstr "Flexvisor è riuscito a creare il volume %(id)s." - -#, python-format -msgid "Flexvisor succeeded to delete snapshot %(id)s." -msgstr "Flexvisor è riuscito ad eliminare l'istantanea %(id)s." - -#, python-format -msgid "Flexvisor succeeded to extend volume %(id)s." -msgstr "Flexvisor è riuscito ad estendere il volume %(id)s." - -#, python-format -msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor è riuscito a rimuovere il volume %(id)s dal gruppo %(cgid)s." - -#, python-format -msgid "Flexvisor succeeded to unassign volume %(id)s." -msgstr "Flexvisor è riuscito ad annullare l'assegnazione del volume %(id)s." - -#, python-format -msgid "Flexvisor volume %(id)s does not exist." -msgstr "Il volume Flexvisor %(id)s non esiste." - -#, python-format -msgid "Folder %s does not exist, it was already deleted." -msgstr "La cartella %s non esiste, è già stata eliminata." - -msgid "Force upload to image is disabled, Force option will be ignored." -msgstr "" -"Il caricamento forzato nell'immagine è disabilitato, l'opzione Force verrà " -"ignorata." - -#, python-format -msgid "Found a temporary snapshot %(name)s" -msgstr "Trovata un'istantanea temporanea %(name)s" - -#, python-format -msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." -msgstr "" -"La capacità disponibile per il backend è: %(free)s, capacità totale: " -"%(total)s." - -#, python-format -msgid "Friendly zone name after forming: %(zonename)s" -msgstr "Nome zona breve dopo la formazione: %(zonename)s" - -#, python-format -msgid "Generating transfer record for volume %s" -msgstr "Generazione del record di trasferimento per il volume %s" - -msgid "Get all snapshots completed successfully." -msgstr "Richiamo di tutte le istantanee completato correttamente." - -msgid "Get all volumes completed successfully." -msgstr "Richiamo di tutti i volumi completato correttamente." - -#, python-format -msgid "Get domain by name response: %s" -msgstr "Ottieni dominio dalla risposta del nome: %s" - -msgid "Get snapshot metadata completed successfully." -msgstr "Richiamo dei metadati dell'istantanea completato correttamente." - -msgid "Get snapshot metadata value not implemented." -msgstr "Richiamo del valore dei metadati dell'istantanea non implementato." - -#, python-format -msgid "Get the default ip: %s." -msgstr "Richiama l'ip predefinito: %s." - -msgid "Get volume admin metadata completed successfully." -msgstr "Richiamo dei metadati di gestione del volume completato correttamente." - -msgid "Get volume image-metadata completed successfully." -msgstr "" -"Richiamo dei metadati dell'immagine del volume completato correttamente." - -msgid "Get volume metadata completed successfully." -msgstr "Richiamo dei metadati del volume completato correttamente." - -msgid "Getting getInitiatorGrpList" -msgstr "Richiamo di getInitiatorGrpList" - -#, python-format -msgid "Getting volume information for vol_name=%s" -msgstr "Richiamo delle informazioni sul volume per vol_name=%s" - -#, python-format -msgid "Going to perform request again %s with valid token." -msgstr "Eseguire di nuovo la richiesta %s con un token valido." - -#, python-format -msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" -msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" - -#, python-format -msgid "HPELeftHand API version %s" -msgstr "Versione API HPELeftHand API %s" - -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Generata eccezione HTTP: %s" - -#, python-format -msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." -msgstr "ID hypermetro: %(metro_id)s. ID lun remota: %(remote_lun_id)s." - -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export." -msgstr "" -"Ignorato errore di creazione LU \"%s\" durante la verifica dell'esportazione." - -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export." -msgstr "" -"Ignorato errore di aggiunta voce di associazione LUN \"%s\" durante la " -"verifica dell'esportazione." - -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export." -msgstr "" -"Ignorato errore di creazione destinazione \"%s\" durante la verifica " -"dell'esportazione." - -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export." -msgstr "" -"Ignorato errore di creazione gruppo di destinazione \"%s\" durante la " -"verifica dell'esportazione." - -#, python-format -msgid "" -"Ignored target group member addition error \"%s\" while ensuring export." -msgstr "" -"Ignorato errore di aggiunta membro del gruppo di destinazione \"%s\" durante " -"la verifica dell'esportazione." - -#, python-format -msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." -msgstr "L'immagine %(pool)s/%(image)s è dipendente nell'istantanea %(snap)s." - -#, python-format -msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" -msgstr "" -"Clonazione dell'immagine non corretta per l'immagine %(image_id)s. " -"Messaggio: %(msg)s" - -#, python-format -msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" -msgstr "Download immagine di %(sz).2f MB su %(mbps).2f MB/s" - -#, python-format -msgid "Image will locally be converted to raw %s" -msgstr "L'immagine verrà convertita localmente nella riga %s" - -#, python-format -msgid "Image-volume cache disabled for host %(host)s." -msgstr "Cache image-volume disabilitata per l'host %(host)s." - -#, python-format -msgid "Image-volume cache enabled for host %(host)s." -msgstr "Cache image-volume abilitata per l'host %(host)s." - -#, python-format -msgid "Import record id %s metadata from driver finished." -msgstr "Importazione metadati id record %s da driver completata." - -#, python-format -msgid "Import record started, backup_url: %s." -msgstr "Importazione record avviata, backup_url: %s." - -#, python-format -msgid "Imported %(fail)s to %(guid)s." -msgstr "Importato %(fail)s in %(guid)s." - -#, python-format -msgid "Initialize connection: %(volume)s." -msgstr "Inizializza connessione: %(volume)s." - -msgid "Initialize volume connection completed successfully." -msgstr "" -"Inizializzazione della connessione del volume completata correttamente." - -#, python-format -msgid "Initialized driver %(name)s version: %(vers)s" -msgstr "Inizializzato driver %(name)s versione: %(vers)s" - -#, python-format -msgid "" -"Initializing RPC dependent components of volume driver %(driver_name)s " -"(%(version)s)" -msgstr "" -"Inizializzazione dei componenti dipendenti RPC del driver del volume " -"%(driver_name)s (%(version)s)" - -msgid "Initializing extension manager." -msgstr "Inizializzazione gestore estensioni." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." -msgstr "" -"Il nome iniziatore %(initiatorNames)s non si trova nell'array " -"%(storageSystemName)s." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " -msgstr "" -"Il nome iniziatore %(initiatorNames)s non si trova nell'array " -"%(storageSystemName)s. " - -#, python-format -msgid "Initiator group name is %(grp)s for initiator %(iname)s" -msgstr "Il nome del gruppo iniziatore è %(grp)s per l'iniziatore %(iname)s" - -#, python-format -msgid "LUN %(id)s extended to %(size)s GB." -msgstr "LUN %(id)s estesa a %(size)s GB." - -#, python-format -msgid "LUN with given ref %s need not be renamed during manage operation." -msgstr "" -"La LUN con un determinato riferimento %s non deve essere ridenominata " -"durante l'operazione di gestione." - -#, python-format -msgid "" -"Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " -"%(name)s." -msgstr "" -"Lasciare create_volume: %(volumeName)s Codice di ritorno: %(rc)lu volume " -"dict: %(name)s." - -#, python-format -msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." -msgstr "Lasciare delete_volume: %(volumename)s Codice di ritorno: %(rc)lu." - -#, python-format -msgid "Leaving initialize_connection: %s" -msgstr "Lasciare initialize_connection: %s" - -#, python-format -msgid "Loaded extension: %s" -msgstr "Estensione caricata: %s" - -#, python-format -msgid "" -"Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" -"%(lv)s" -msgstr "" -"Volume logico non trovato durante la query per le informazioni LVM. (vg_name=" -"%(vg)s, lv_name=%(lv)s" - -msgid "Manage existing volume completed successfully." -msgstr "Gestione del volume esistente completata correttamente." - -#, python-format -msgid "" -"Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." -msgstr "" -"Operazione di gestione completata per LUN con nuovo percorso %(path)s e uuid " -"%(uuid)s." - -#, python-format -msgid "" -"Manage operation completed for volume with new label %(label)s and wwn " -"%(wwn)s." -msgstr "" -"Operazione di gestione completata per volume con nuova etichetta %(label)s e " -"wwn %(wwn)s." - -#, python-format -msgid "Manage volume %s" -msgstr "Gestisci volume %s" - -msgid "Manage volume request issued successfully." -msgstr "Richiesta di gestione del volume eseguita correttamente." - -#, python-format -msgid "Masking view %(maskingViewName)s successfully deleted." -msgstr "Vista di mascheramento %(maskingViewName)s eliminata correttamente." - -#, python-format -msgid "Migrate Volume %(volume_id)s completed." -msgstr "Migrazione del volume %(volume_id)s completata." - -msgid "Migrate volume completed successfully." -msgstr "Migrazione del volume completata correttamente." - -msgid "Migrate volume completion issued successfully." -msgstr "Completamento della migrazione del volume eseguito correttamente." - -msgid "Migrate volume request issued successfully." -msgstr "Richiesta di migrazione del volume eseguita correttamente." - -#, python-format -msgid "Migrating using retype Volume: %(volume)s." -msgstr "Migrazione tramite la riscrittura del volume: %(volume)s." - -#, python-format -msgid "" -"Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." -msgstr "" -"Modifica di %(volume_name)s snap_cpg da %(old_snap_cpg)s a %(new_snap_cpg)s." - -#, python-format -msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" -msgstr "Modifica di %(volume_name)s userCPG da %(old_cpg)s a %(new_cpg)s" - -#, python-format -msgid "Modifying %s comments." -msgstr "Modifica di %s commenti." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"Modulo PyWBEM non installato. Installare PyWBEM utilizzando il package " -"python-pywbem." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"Modulo PyWBEM non installato. Installare PyWBEM utilizzando il package " -"python-pywbem." - -#, python-format -msgid "Mounting volume: %s ..." -msgstr "Montaggio del volume: %s ..." - -#, python-format -msgid "Mounting volume: %s succeeded" -msgstr "Montaggio del volume: %s eseguito correttamente" - -#, python-format -msgid "" -"NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"NON-FAST: statistiche di capacità per il pool %(poolName)s sull'array " -"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." - -msgid "Need to remove FC Zone, building initiator target map" -msgstr "" -"È necessario rimuovere la zona FC, creazione dell'associazione di " -"destinazione dell'iniziatore" - -msgid "Need to remove FC Zone, building initiator target map." -msgstr "" -"È necessario rimuovere la zona FC, creazione dell'associazione di " -"destinazione dell'iniziatore." - -#, python-format -msgid "" -"NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " -"loaded." -msgstr "" -"Driver NetApp di famiglia %(storage_family)s e protocollo " -"%(storage_protocol)s caricato." - -#, python-format -msgid "New Cinder secure environment indicator file created at path %s." -msgstr "" -"Nuovo file indicatore dell'ambiente sicuro Cinder creato al percorso %s." - -#, python-format -msgid "" -"New size is equal to the real size from backend storage, no need to extend. " -"realsize: %(oldsize)s, newsize: %(newsize)s." -msgstr "" -"La nuova dimensione è uguale alla dimensione reale dell'archiviazione di " -"backend, non c'è necessità di estenderla. dimensione reale: %(oldsize)s, " -"nuova dimensione: %(newsize)s." - -#, python-format -msgid "New str info is: %s." -msgstr "La nuova informazione di stringa è: %s." - -#, python-format -msgid "No dpkg-query info found for %(pkg)s package." -msgstr "Nessuna informazione dpkg-query trovata per il pacchetto %(pkg)s." - -#, python-format -msgid "No igroup found for initiator %s" -msgstr "Nessun igroup trovato per l'iniziatore %s" - -#, python-format -msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" -msgstr "Nessuna destinazione iscsi presente per l'id volume:%(vol_id)s: %(e)s" - -#, python-format -msgid "No need to extend volume %s as it is already the requested new size." -msgstr "" -"Nessuna necessità di estendere il volume %s in quanto è già nella nuova " -"dimensione richiesta." - -#, python-format -msgid "" -"No replication synchronization session found associated with source volume " -"%(source)s on %(storageSystem)s." -msgstr "" -"Nessuna sessione di sincronizzazione replica trovata associata al volume di " -"origine %(source)s su %(storageSystem)s." - -#, python-format -msgid "" -"No restore point found for backup='%(backup)s' of volume %(volume)s although " -"base image is found - forcing full copy." -msgstr "" -"Nessun punto di ripristino trovato per il backup='%(backup)s' del volume " -"%(volume)s nonostante sia stata trovata l'immagine di base - forzatura della " -"copia completa." - -#, python-format -msgid "No rpm info found for %(pkg)s package." -msgstr "Nessuna informazione rpm trovata per il pacchetto %(pkg)s." - -#, python-format -msgid "No targets to add or remove connection for initiator: %(init_wwn)s" -msgstr "" -"Nessuna destinazione per aggiungere o rimuovere la connessione per " -"l'iniziatore: %(init_wwn)s" - -#, python-format -msgid "No volume found for CG: %(cg)s." -msgstr "Nessun volume trovato per CG: %(cg)s." - -#, python-format -msgid "Non fatal cleanup error: %s." -msgstr "Errore di ripulitura non grave: %s." - -#, python-format -msgid "OpenStack OS Version Info: %(info)s" -msgstr "Informazioni sulla versione OS OpenStack: %(info)s" - -#, python-format -msgid "" -"Origin volume %s appears to be removed, try to remove it from backend if it " -"is there." -msgstr "" -"Il volume di origine %s risulta rimosso, provare a rimuoverlo dal backend se " -"presente." - -#, python-format -msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" -msgstr "" -"Sovrascrittura del volume %(volume_id)s con ripristino del backup " -"%(backup_id)s" - -#, python-format -msgid "Params for add volume request: %s." -msgstr "Parametri per la richiesta di aggiunta del volume: %s." - -#, python-format -msgid "Performing post clone for %s" -msgstr "Esecuzione del post clone per %s" - -#, python-format -msgid "Performing secure delete on volume: %s" -msgstr "Esecuzione di secure delete nel volume: %s" - -#, python-format -msgid "Pool id is %s." -msgstr "L'ID pool è %s." - -#, python-format -msgid "Port group instance name is %(foundPortGroupInstanceName)s." -msgstr "Il nome istanza del gruppo di porte è %(foundPortGroupInstanceName)s." - -#, python-format -msgid "Post clone resize LUN %s" -msgstr "LUN di ridimensionamento post clone %s" - -#, python-format -msgid "Prefer use target wwpn %(wwpn)s" -msgstr "Preferire l'utilizzo di wwpn di destinazione %(wwpn)s" - -#, python-format -msgid "Profile %s has been deleted." -msgstr "Il profilo %s è stato eliminato." - -#, python-format -msgid "Protection domain id: %(domain_id)s." -msgstr "ID dominio di protezione: %(domain_id)s." - -#, python-format -msgid "Protection domain name: %(domain_name)s." -msgstr "Nome dominio di protezione: %(domain_name)s." - -msgid "Proxy mode detected." -msgstr "Rilevata modalità proxy." - -#, python-format -msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" -msgstr "" -"Analisi delle righe eliminate più vecchie di giorni=%(age)d dalla tabella=" -"%(table)s" - -#, python-format -msgid "QoS: %s." -msgstr "QoS: %s." - -#, python-format -msgid "Query capacity stats response: %s." -msgstr "Risposta statistiche di capacità query: %s." - -msgid "" -"RBD striping not supported - ignoring configuration settings for rbd striping" -msgstr "" -"Striping RBD non supportato - le impostazioni di configurazione per lo " -"striping rbd vengono ignorate" - -#, python-format -msgid "RBD volume %s not found, allowing delete operation to proceed." -msgstr "" -"Volume RBD %s non trovato, consentita la continuazione dell'operazione di " -"eliminazione." - -#, python-format -msgid "" -"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " -"certificate: %(verify_cert)s." -msgstr "" -"IP server REST: %(ip)s, port: %(port)s, nome utente: %(user)s. Verificare il " -"certificato del server: %(verify_cert)s." - -#, python-format -msgid "Re-using existing purity host %(host_name)r" -msgstr "Riutilizzo dell'host purity %(host_name)r esistente" - -msgid "Reconnected to coordination backend." -msgstr "Riconnesso al backend di coordinazione." - -msgid "Reconnecting to coordination backend." -msgstr "Riconnessione al backend di coordinazione." - -#, python-format -msgid "Registering image in cache %s" -msgstr "Registrazione immagine nella cache %s" - -#, python-format -msgid "Regular file: %s created." -msgstr "File regolare: %s creato." - -#, python-format -msgid "" -"Relocating volume: %s to a different datastore due to insufficient disk " -"space on current datastore." -msgstr "" -"Riallocazione del volume: %s in un datastore diverso per spazio su disco " -"insufficiente nel datastore corrente." - -#, python-format -msgid "Remote return FC info is: %s." -msgstr "Le informazioni FC restituite remote sono: %s." - -msgid "Remove volume export completed successfully." -msgstr "Rimozione esportazione volume completata correttamente." - -#, python-format -msgid "Removed %s from cg." -msgstr "Rimosso %s da cg." - -#, python-format -msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" -msgstr "Rimozione di ACL dal volume=%(vol)s per gruppo iniziatore %(igrp)s" - -#, python-format -msgid "Removing iscsi_target for Volume ID: %s" -msgstr "Rimozione di iscsi_target per l'ID volume: %s" - -#, python-format -msgid "Removing iscsi_target for volume: %s" -msgstr "Rimoziome di iscsi_target per il volume: %s" - -#, python-format -msgid "Removing iscsi_target for: %s" -msgstr "Rimozione di iscsi_target per: %s" - -#, python-format -msgid "Removing iscsi_target: %s" -msgstr "Rimozione di iscsi_target: %s" - -#, python-format -msgid "Removing non-active host: %(host)s from scheduler cache." -msgstr "Rimozione dell'host non attivo: %(host)s dalla cache dello scheduler." - -#, python-format -msgid "Removing volume %(v)s from consistency group %(cg)s." -msgstr "Rimozione del volume %(v)s dal gruppo di coerenza %(cg)s." - -#, python-format -msgid "Removing volumes from cg %s." -msgstr "Rimozione volumi da cg %s." - -#, python-format -msgid "Rename Volume %(volume_id)s completed." -msgstr "Ridenominazione del volume %(volume_id)s completata." - -#, python-format -msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." -msgstr "Ridenominazione di %(id)s da %(current_name)s a %(new_name)s." - -#, python-format -msgid "Renaming backing VM: %(backing)s to %(new_name)s." -msgstr "Ridenominazione VM di backup: %(backing)s in %(new_name)s." - -#, python-format -msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" -msgstr "Ridenominazione dell'istantanea esistente %(ref_name)s su %(new_name)s" - -#, python-format -msgid "Renaming existing volume %(ref_name)s to %(new_name)s" -msgstr "Ridenominazione del volume esistente %(ref_name)s su %(new_name)s" - -#, python-format -msgid "Replication %(vol)s to %(dest)s." -msgstr "Replica di %(vol)s su %(dest)s." - -#, python-format -msgid "Replication created for %(volname)s to %(destsc)s" -msgstr "Replica creata per %(volname)s su %(destsc)s" - -#, python-format -msgid "Replication is not configured on backend: %s." -msgstr "Replica non configurata sul backend: %s." - -#, python-format -msgid "Requested image %(id)s is not in raw format." -msgstr "L'immagine richiesta %(id)s non è nel formato non elaborato." - -#, python-format -msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." -msgstr "" -"Richiesta configurazione unificata: %(storage_family)s e " -"%(storage_protocol)s." - -msgid "Reserve volume completed successfully." -msgstr "Riserva del volume completata correttamente." - -#, python-format -msgid "" -"Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." -msgstr "" -"Reimpostazione stato backup avviata, backup_id: %(backup_id)s, stato: " -"%(status)s." - -#, python-format -msgid "Resetting backup %s to available (was restoring)." -msgstr "" -"Reimpostazione del backup %s su disponibile (era in fase di ripristino)." - -#, python-format -msgid "Resetting backup %s to error (was creating)." -msgstr "Reimpostazione del backup %s su errore (era in fase di creazione)." - -msgid "Resetting cached RPC version pins." -msgstr "Reimpostazione dei pin della versione RPC memorizzata nella cache." - -#, python-format -msgid "" -"Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." -msgstr "" -"Reimpostazione del volume %(vol_id)s sullo stato precedente %(status)s (in " -"fase di backup)." - -#, python-format -msgid "Resizing LUN %s directly to new size." -msgstr "Ridimensionamento della LUN %s direttamente sulla nuova dimensione." - -#, python-format -msgid "Resizing LUN %s using clone operation." -msgstr "Ridimensionamento della LUN %s tramite l'operazione di clonazione." - -#, python-format -msgid "Resizing file to %sG" -msgstr "Ridimensionamento del file su %sG" - -#, python-format -msgid "Resizing file to %sG..." -msgstr "Ridimensionamento del file su %sG..." - -#, python-format -msgid "" -"Restore backup finished, backup %(backup_id)s restored to volume " -"%(volume_id)s." -msgstr "" -"Ripristino backup completato, backup: %(backup_id)s ripristinato su volume: " -"%(volume_id)s." - -#, python-format -msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "" -"Ripristino backup avviato, backup: %(backup_id)s volume: %(volume_id)s." - -#, python-format -msgid "Restoring backup %(backup)s to volume %(volume)s." -msgstr "Ripristino del backup %(backup)s nel volume %(volume)s" - -#, python-format -msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" -msgstr "Ripristino del backup %(backup_id)s nel volume %(volume_id)s" - -msgid "Restoring iSCSI target from configuration file" -msgstr "Ripristino della destinazione iSCSI dal file di configurazione" - -msgid "Resume volume delete completed successfully." -msgstr "Ripresa eliminazione del volume completata correttamente." - -#, python-format -msgid "Resuming delete on backup: %s." -msgstr "Ripresa dell'eliminazione al backup: %s." - -#, python-format -msgid "Return FC info is: %s." -msgstr "Le informazioni FC restituite sono: %s." - -#, python-format -msgid "Returning random Port Group: %(portGroupName)s." -msgstr "Viene restituito il gruppo di porte casuale: %(portGroupName)s." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." -msgstr "" -"Riscrittura di LUN(id: %(lun_id)s) smartcache da (name: %(old_name)s, id: " -"%(old_id)s) a (name: %(new_name)s, id: %(new_id)s) riuscita." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." -msgstr "" -"Riscrittura di LUN(id: %(lun_id)s) smartpartition da (name: %(old_name)s, " -"id: %(old_id)s) a (name: %(new_name)s, id: %(new_id)s) riuscita." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " -"success." -msgstr "" -"Riscrittura di LUN(id: %(lun_id)s) smartquos da %(old_qos_value)s a " -"%(new_qos)s riuscita." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " -"%(new_policy)s success." -msgstr "" -"Riscrittura di LUN(id: %(lun_id)s) politica smarttier da %(old_policy)s a " -"%(new_policy)s riuscita." - -#, python-format -msgid "Retype Volume %(volume_id)s is completed." -msgstr "La riscrittura del volume %(volume_id)s viene completata." - -#, python-format -msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." -msgstr "" -"La riscrittura del volume %(volume_id)s viene eseguita e migrata nel pool " -"%(pool_id)s." - -#, python-format -msgid "" -"Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " -"%(old_snap_cpg)s." -msgstr "" -"Ripristino riscrittura %(volume_name)s snap_cpg da %(new_snap_cpg)s a " -"%(old_snap_cpg)s." - -msgid "Retype volume completed successfully." -msgstr "Riscrittura del volume completata correttamente. " - -msgid "Retype volume request issued successfully." -msgstr "Richiesta di riscrittura del volume eseguita correttamente. " - -msgid "Retype was to same Storage Profile." -msgstr "La riscrittura è stata eseguita nello stesso profilo di archiviazione." - -msgid "Roll detaching of volume completed successfully." -msgstr "Esecuzione dello scollegamento del volume completata correttamente." - -#, python-format -msgid "Running with vmemclient version: %s" -msgstr "Esecuzione con versione vmemclient: %s" - -#, python-format -msgid "SC server created %s" -msgstr "Server SC creato %s" - -#, python-format -msgid "" -"ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " -"image id: %(id)s." -msgstr "" -"ScaleIO copy_image_to_volume volume: %(vol)s servizio immagine: %(service)s " -"id immagine: %(id)s." - -#, python-format -msgid "" -"ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " -"image meta: %(meta)s." -msgstr "" -"ScaleIO copy_volume_to_image volume: %(vol)s servizio immagine: %(service)s " -"meta immagine: %(meta)s." - -#, python-format -msgid "" -"ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." -msgstr "" -"ScaleIO ha creato il volume clonato: volume di origine %(src)s nel volume di " -"destinazione %(tgt)s." - -#, python-format -msgid "" -"ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " -"%(volname)s." -msgstr "" -"ScaleIO crea il volume dall'istantanea: istantanea %(snapname)s in volume " -"%(volname)s." - -msgid "ScaleIO delete snapshot." -msgstr "ScaleIO elimina l'istantanea." - -#, python-format -msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." -msgstr "" -"ScaleIO estende il volume: volume %(volname)s alla dimensione di " -"%(new_size)s." - -#, python-format -msgid "ScaleIO get domain id by name request: %s." -msgstr "ScaleIO recupera l'ID dominio dalla richiesta del nome: %s." - -#, python-format -msgid "ScaleIO get pool id by name request: %s." -msgstr "ScaleIO recupera l'ID pool dalla richiesta del nome: %s." - -#, python-format -msgid "ScaleIO get volume by id request: %s." -msgstr "ScaleIO recupera il volume dalla richiesta dell'id: %s." - -#, python-format -msgid "ScaleIO rename volume request: %s." -msgstr "ScaleIO rinomina la richiesta del volume: %s." - -msgid "ScaleIO snapshot group of volumes" -msgstr "Gruppo di volumi istantanea ScaleIO" - -#, python-format -msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." -msgstr "Il volume ScaleIO %(vol)s è stato ridenominato %(new_name)s." - -#, python-format -msgid "" -"Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " -"from /etc/cinder.conf." -msgstr "" -"I file di chiavi host ssh secondari %(kwargs)s saranno caricati insieme a " -"%(conf)s da /etc/cinder.conf." - -msgid "" -"Service not found for updating active_backend_id, assuming default for " -"driver init." -msgstr "" -"Servizio non trovato per l'aggiornamento di active_backend_id, viene " -"utilizzato il valore predefinito per l'inizializzazione del driver." - -msgid "Session might have expired. Trying to relogin" -msgstr "La sessione potrebbe essere scaduta. Provare a rieseguire il login" - -msgid "Set backend status to frozen successfully." -msgstr "" -"Impostazione dello stato di backend su bloccato eseguita correttamente." - -#, python-format -msgid "Set newly managed Cinder volume name to %(name)s." -msgstr "Impostare il nome volume Cinder appena gestito su %(name)s." - -#, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "Impostazione dell'host %(host)s su %(state)s." - -#, python-format -msgid "Setting snapshot %(snap)s to online_flag %(flag)s" -msgstr "Impostazione dell'istantanea %(snap)s su online_flag %(flag)s" - -#, python-format -msgid "Setting volume %(vol)s to online_flag %(flag)s" -msgstr "Impostazione del volume %(vol)s su online_flag %(flag)s" - -#, python-format -msgid "" -"Skipping add target %(target_array)s to protection group %(pgname)s since " -"it's already added." -msgstr "" -"Ignorare l'aggiunta della destinazione %(target_array)s al gruppo di " -"protezione %(pgname)s perché è già aggiunta." - -#, python-format -msgid "" -"Skipping allow pgroup %(pgname)s on target array %(target_array)s since it " -"is already allowed." -msgstr "" -"Ignorare l'autorizzazione di pgroup %(pgname)s sull'array di destinazione " -"%(target_array)s perché è già autorizzato." - -#, python-format -msgid "Skipping deletion of volume %s as it does not exist." -msgstr "Ignorare l'eliminazione del volume %s perché non esiste." - -msgid "Skipping ensure_export. Found existing iSCSI target." -msgstr "Ignorare ensure_export. Trovata destinazione iSCSI esistente." - -#, python-format -msgid "" -"Skipping image volume %(id)s because it is not accessible by current Tenant." -msgstr "" -"Ignorare il volume dell'immagine %(id)s perché non è accessibile dal Tenant " -"corrente." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume: %s" -msgstr "" -"remove_export viene ignorato. Nessun iscsi_target viene al momento esportato " -"per il volume: %s" - -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" -msgstr "" -"remove_export viene ignorato. Nessun provisioning di iscsi_target per il " -"volume: %s" - -#, python-format -msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" -msgstr "" -"Condivisione smb %(share)s Dimensione totale %(size)s Totale allocato " -"%(allocated)s" - -#, python-format -msgid "Snapshot %(disp)s '%(new)s' is now being managed." -msgstr "L'istantanea %(disp)s '%(new)s' è ora in fase di gestione." - -#, python-format -msgid "" -"Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " -"'%(new)s'." -msgstr "" -"L'istantanea %(disp)s '%(vol)s' non viene più gestita. Istantanea " -"ridenominata in '%(new)s'." - -#, python-format -msgid "" -"Snapshot %(folder)s@%(snapshot)s does not exist, it was already deleted." -msgstr "" -"L'istantanea %(folder)s@%(snapshot)s non esiste, è già stata eliminata." - -#, python-format -msgid "" -"Snapshot %(folder)s@%(snapshot)s has dependent clones, it will be deleted " -"later." -msgstr "" -"L'istantanea %(folder)s@%(snapshot)s ha cloni dipendenti, verrà eliminata " -"successivamente." - -#, python-format -msgid "Snapshot %s created successfully." -msgstr "Istantanea %s: creata correttamente." - -#, python-format -msgid "Snapshot %s does not exist in backend." -msgstr "L'istantanea %s non esiste nel backend." - -#, python-format -msgid "Snapshot %s does not exist, it seems it was already deleted." -msgstr "L'istantanea %s non esiste, sembra che sia stata già eliminata." - -#, python-format -msgid "Snapshot %s does not exist, it was already deleted." -msgstr "L'istantanea %s non esiste, è già stata eliminata." - -#, python-format -msgid "Snapshot %s has dependent clones, will be deleted later." -msgstr "L'istantanea %s ha cloni dipendenti, verrà eliminata successivamente." - -#, python-format -msgid "Snapshot %s not found" -msgstr "Istantanea %s non trovata" - -#, python-format -msgid "Snapshot '%(ref)s' renamed to '%(new)s'." -msgstr "Volume virtuale '%(ref)s' ridenominato in '%(new)s'." - -msgid "Snapshot create request issued successfully." -msgstr "Richiesta di creazione dell'istantanea eseguita correttamente." - -#, python-format -msgid "" -"Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." -msgstr "" -"Creazione dell'istantanea %(cloneName)s complata. Volume di origine: " -"%(sourceName)s." - -msgid "Snapshot delete request issued successfully." -msgstr "Richiesta di eliminazione dell'istantanea eseguita correttamente." - -msgid "Snapshot force create request issued successfully." -msgstr "Richiesta di creazione forzata dell'istantanea eseguita correttamente." - -#, python-format -msgid "" -"Snapshot record for %s is not present, allowing snapshot_delete to proceed." -msgstr "" -"Il record dell'istantanea per %s non è presente, consentita la continuazione " -"dell'operazione di eliminazione dell'istantanea." - -msgid "Snapshot retrieved successfully." -msgstr "Istantanea richiamata correttamente." - -#, python-format -msgid "Snapshot volume %(vol)s into snapshot %(id)s." -msgstr "Volume dell'istantanea %(vol)s nell'istantanea %(id)s." - -#, python-format -msgid "Snapshot volume response: %s." -msgstr "Risposta del volume dell'istantanea: %s." - -#, python-format -msgid "Snapshot: %(snapshot)s: not found on the array." -msgstr "Istantanea : %(snapshot)s: non trovata nell'array." - -#, python-format -msgid "Source Snapshot: %s" -msgstr "Istantanea di origine: %s" - -#, python-format -msgid "" -"Source and destination ZFSSA shares are the same. Do nothing. volume: %s" -msgstr "" -"Le condivisioni di origine e destinazione ZFSSA sono le stesse. Non eseguire " -"alcuna operazione. volume: %s" - -#, python-format -msgid "Start to create cgsnapshot for consistency group: %(group_name)s" -msgstr "Inizia a creare l'istantanea per il gruppo di coerenza: %(group_name)s" - -#, python-format -msgid "Start to create consistency group: %(group_name)s id: %(id)s" -msgstr "Inizia a creare il gruppo di coerenza: id %(group_name)s: %(id)s" - -#, python-format -msgid "Start to delete consistency group: %(cg_name)s" -msgstr "Inizia a eliminare il gruppo di coerenza: %(cg_name)s" - -#, python-format -msgid "Starting %(topic)s node (version %(version_string)s)" -msgstr "Avvio del nodo %(topic)s (versione %(version_string)s)" - -#, python-format -msgid "Starting volume driver %(driver_name)s (%(version)s)" -msgstr "Avvio del driver del volume %(driver_name)s (%(version)s)" - -#, python-format -msgid "Storage Group %(storageGroupName)s successfully deleted." -msgstr "Gruppo di archiviazione %(storageGroupName)s eliminato correttamente." - -#, python-format -msgid "Storage group not associated with the policy. Exception is %s." -msgstr "Gruppo di archiviazione non associato alla politica. L'eccezione è %s." - -#, python-format -msgid "" -"Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " -"%(pool_id)s." -msgstr "" -"Nomi pool di archiviazione: %(pools)s, nome pool di archiviazione: %(pool)s, " -"id pool: %(pool_id)s." - -#, python-format -msgid "Successful login by user %s" -msgstr "Login corretto da parte dell'utente %s" - -#, python-format -msgid "Successfully added %(volumeName)s to %(sgGroupName)s." -msgstr "Aggiunto correttamente %(volumeName)s a %(sgGroupName)s." - -#, python-format -msgid "Successfully copied disk at: %(src)s to: %(dest)s." -msgstr "Copiato correttamente disco su: %(src)s in: %(dest)s." - -#, python-format -msgid "Successfully create volume %s" -msgstr "Creazione del volume %s eseguita correttamente " - -#, python-format -msgid "" -"Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " -"[%(stack_vol)s]." -msgstr "" -"Creato correttamente un volume CloudByte [%(cb_vol)s] in relazione al volume " -"OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Successfully created clone: %s." -msgstr "Creato correttamente clone: %s." - -#, python-format -msgid "" -"Successfully created snapshot: %(snap)s for volume backing: %(backing)s." -msgstr "" -"Creata correttamente istantanea: %(snap)s per backup del volume: %(backing)s." - -#, python-format -msgid "Successfully created snapshot: %s." -msgstr "Creata correttamente istantanea: %s." - -#, python-format -msgid "Successfully created volume backing: %s." -msgstr "Backup del volume creato correttamente: %s." - -#, python-format -msgid "Successfully deleted %s." -msgstr "Eliminato correttamente %s" - -#, python-format -msgid "Successfully deleted file: %s." -msgstr "Eliminato correttamente file: %s." - -#, python-format -msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." -msgstr "Eliminata correttamente istantanea: %(name)s di backup: %(backing)s." - -#, python-format -msgid "Successfully deleted snapshot: %s" -msgstr "Eliminata correttamente istantanea: %s" - -#, python-format -msgid "Successfully deleted snapshot: %s." -msgstr "Eliminata correttamente istantanea: %s." - -#, python-format -msgid "" -"Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " -"OpenStack volume [%(stack_vol)s]." -msgstr "" -"Eliminato correttamente il volume [%(cb_vol)s] su CloudByte corrispondente " -"al volume OpenStack [%(stack_vol)s]." - -#, python-format -msgid "Successfully deleted volume: %s" -msgstr "Eliminato correttamente volume: %s" - -#, python-format -msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." -msgstr "" -"Disco virtuale esteso correttamente: %(path)s alla dimensione di: %(size)s " -"GB." - -#, python-format -msgid "Successfully extended volume %(volume_id)s to size %(size)s." -msgstr "Volume esteso correttamente %(volume_id)s alla dimensione di %(size)s." - -#, python-format -msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." -msgstr "Esteso correttamente volume: %(vol)s alla dimensione di: %(size)s GB." - -#, python-format -msgid "Successfully got volume information for volume %s" -msgstr "Informazioni sul volume richiamate correttamente per il volume %s" - -#, python-format -msgid "Successfully initialized connection with volume: %(volume_id)s." -msgstr "Connessione inizializzata correttamente con volume: %(volume_id)s." - -#, python-format -msgid "" -"Successfully initialized connection. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." -msgstr "" -"Connessione inizializzata correttamente. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." - -#, python-format -msgid "" -"Successfully moved volume backing: %(backing)s into the folder: %(fol)s." -msgstr "" -"Spostato correttamente backup del volume: %(backing)s nella cartella: " -"%(fol)s." - -#, python-format -msgid "" -"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " -"resource pool: %(rp)s." -msgstr "" -"Rilocato correttamente backup del volume: %(backing)s in datastore: %(ds)s e " -"pool di risorse: %(rp)s." - -msgid "Successfully retrieved InitiatorGrpList" -msgstr "InitiatorGrpList richiamato correttamente" - -#, python-format -msgid "Successfully setup driver: %(driver)s for server: %(ip)s." -msgstr "Configurato correttamente driver: %(driver)s per server: %(ip)s." - -#, python-format -msgid "Successfully setup replication for %s." -msgstr "Configurata correttamente replica per %s." - -#, python-format -msgid "Successfully terminated connection for volume: %(volume_id)s." -msgstr "Connessione terminata correttamente per volume: %(volume_id)s." - -#, python-format -msgid "" -"Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " -"%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " -"%(storage_protocol)s." -msgstr "" -"Statistiche del volume aggiornate correttamente. backend: " -"%(volume_backend_name)s, fornitore: %(vendor_name)s, versione driver: " -"%(driver_version)s, protocollo di archiviazione: %(storage_protocol)s." - -#, python-format -msgid "" -"Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Aggiornato correttamente un volume CloudByte [%(cb_vol)s] corrispondente al " -"volume OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Switching volume %(vol)s to profile %(prof)s." -msgstr "Passaggio del volume %(vol)s a profilo %(prof)s." - -#, python-format -msgid "System %(id)s has %(status)s status." -msgstr "Il sistema %(id)s ha lo stato %(status)s." - -#, python-format -msgid "" -"System with controller addresses [%s] is not registered with web service." -msgstr "" -"Il sistema con indirizzi controller [%s] non è registrato con il servizio " -"web." - -#, python-format -msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." -msgstr "" -"wwns di destinazione nella vista di mascheramento %(maskingView)s: " -"%(targetWwns)s." - -#, python-format -msgid "Terminate connection: %(volume)s." -msgstr "Termina connessione: %(volume)s." - -msgid "Terminate volume connection completed successfully." -msgstr "Completamento della connessione del volume completato correttamente." - -msgid "Thawed backend successfully." -msgstr "Backend sbloccato correttamente." - -msgid "" -"The NAS file operations will be run as non privileged user in secure mode. " -"Please ensure your libvirtd settings have been configured accordingly (see " -"section 'OpenStack' in the Quobyte Manual." -msgstr "" -"Le operazioni del file NAS verranno eseguite come utente non privilegiato in " -"modalità sicura. Verificare che le impostazioni libvirtd siano state " -"configurate di conseguenza (vedere la sezione 'OpenStack' nel manuale " -"Quobyte)." - -#, python-format -msgid "The QoS sepcs is: %s." -msgstr "Le specifiche QoS sono: %s." - -#, python-format -msgid "" -"The image was successfully converted, but image size is unavailable. src " -"%(src)s, dest %(dest)s. %(error)s" -msgstr "" -"L'immagine è stata convertita correttamente, ma la dimensione dell'immagine " -"non è disponibile. origine %(src)s, dest %(dest)s. %(error)s" - -#, python-format -msgid "" -"The multi-attach E-Series host group '%(label)s' already exists with " -"clusterRef %(clusterRef)s" -msgstr "" -"Il gruppo di host multi-attach E-Series '%(label)s' esiste già con " -"clusterRef %(clusterRef)s" - -#, python-format -msgid "The pool_name from extraSpecs is %(pool)s." -msgstr "Il nome_pool da extraSpecs è %(pool)s." - -#, python-format -msgid "The same hostid is: %s." -msgstr "Lo stesso id host è: %s." - -#, python-format -msgid "The storage group found is %(foundStorageGroupInstanceName)s." -msgstr "" -"Il gruppo di archiviazione trovato è %(foundStorageGroupInstanceName)s." - -#, python-format -msgid "The target instance device id is: %(deviceid)s." -msgstr "L'ID del dispositivo per l'istanza di destinazione è: %(deviceid)s." - -#, python-format -msgid "" -"The volume belongs to more than one storage group. Returning storage group " -"%(sgName)s." -msgstr "" -"Il volume appartiene a più di un gruppo di archiviazione. Viene restituito " -"il gruppo di archiviazione %(sgName)s." - -#, python-format -msgid "" -"There is no backing for the snapshotted volume: %(snap)s. Not creating any " -"backing for the volume: %(vol)s." -msgstr "" -"Non esiste alcun backup per il volume con istantanea: %(snap)s. Non viene " -"creato alcun backup per il volume: %(vol)s." - -#, python-format -msgid "" -"There is no backing for the source volume: %(src)s. Not creating any backing " -"for volume: %(vol)s." -msgstr "" -"Non esiste alcun backup per il volume di origine: %(src)s. Non viene creato " -"alcun backup per il volume: %(vol)s." - -#, python-format -msgid "There is no backing for the volume: %s. Need to create one." -msgstr "" -"Non è presente alcun backup per il volume: %s. È necessario crearne uno." - -#, python-format -msgid "There is no backing for volume: %s; no need to extend the virtual disk." -msgstr "" -"Non è presente alcun backup per il volume: %s; non è necessario estendere il " -"disco virtuale." - -#, python-format -msgid "There is no backing, and so there is no snapshot: %s." -msgstr "Non esiste un backup per cui non non esiste alcuna istantanea: %s." - -#, python-format -msgid "There is no backing, so will not create snapshot: %s." -msgstr "Non esiste un backup per cui non verrà creata un'istantanea: %s." - -#, python-format -msgid "" -"There is no snapshot point for the snapshotted volume: %(snap)s. Not " -"creating any backing for the volume: %(vol)s." -msgstr "" -"Non esiste alcun punto di istantanea per il volume con istantanea: %(snap)s. " -"Non viene creato alcun backup per il volume: %(vol)s." - -msgid "Token is invalid, going to re-login and get a new one." -msgstr "Il token non è valido, rieseguire il login e ottenere un nuovo token." - -msgid "Transfer volume completed successfully." -msgstr "Trasferimento del volume completato correttamente." - -#, python-format -msgid "Tried to delete non-existent vdisk %s." -msgstr "Si è tentato di eliminare un vdisk non esistente %s." - -#, python-format -msgid "" -"Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " -"with delete." -msgstr "" -"Si è tentato di eliminare l'istantanea %s, ma non è stata trovata nel " -"cluster Datera. Continuare con l'eliminazione. " - -#, python-format -msgid "" -"Tried to delete volume %s, but it was not found in the Datera cluster. " -"Continuing with delete." -msgstr "" -"Si è tentato di eliminare il volume %s, ma non è stato trovato nel cluster " -"Datera. Continuare con l'eliminazione. " - -#, python-format -msgid "" -"Tried to detach volume %s, but it was not found in the Datera cluster. " -"Continuing with detach." -msgstr "" -"Si è tentato di scollegare il volume %s, ma non è stato trovato nel cluster " -"Datera. Continuare con l'eliminazione. " - -#, python-format -msgid "Trying to unmap volume from all sdcs before deletion: %s." -msgstr "" -"Tentativo di annullare l'associazione del volume da tutti gli sdcs prima " -"dell'eliminazione: %s." - -msgid "Unable to accept transfer for volume, because it is in maintenance." -msgstr "" -"Impossibile accettare il trasferimento per il volume perché è in " -"manutenzione." - -msgid "Unable to attach volume, because it is in maintenance." -msgstr "Impossibile collegare il volume perché è in manutenzione." - -msgid "Unable to create the snapshot for volume, because it is in maintenance." -msgstr "" -"Impossibile creare l'istantanea per il volume perché è in manutenzione." - -msgid "Unable to detach volume, because it is in maintenance." -msgstr "Impossibile scollegare il volume perché è in manutenzione." - -msgid "Unable to get Cinder internal context, will not use image-volume cache." -msgstr "" -"Impossibile ottenere il contesto interno Cinder, la cache del volume " -"immagine non verrà utilizzata." - -#, python-format -msgid "Unable to get remote copy information for volume %s" -msgstr "Impossibile ottenere le informazioni di copia remota per il volume %s" - -msgid "" -"Unable to initialize the connection for volume, because it is in maintenance." -msgstr "" -"Impossibile inizializzare la connessione per il volume perché è in " -"manutenzione." - -#, python-format -msgid "Unable to serialize field '%s' - excluding from backup" -msgstr "Impossibile serializzare il campo '%s' - esclusione dal backup" - -msgid "Unable to update volume, because it is in maintenance." -msgstr "Impossibile aggiornare il volume perché è in manutenzione." - -#, python-format -msgid "Unexporting lun %s." -msgstr "Annullata esportazione lun %s." - -#, python-format -msgid "Unmanage snapshot with id: %s" -msgstr "Annulla gestione dell'istantanea con id: %s" - -#, python-format -msgid "Unmanage volume %(volume_id)s completed." -msgstr "Annullamento della gestione del volume %(volume_id)s completata." - -#, python-format -msgid "Unmanage volume %s" -msgstr "Annulla gestione del volume %s" - -#, python-format -msgid "Unmanage volume with id: %s" -msgstr "Annulla gestione volume con id: %s" - -#, python-format -msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." -msgstr "" -"Annullata operazione di gestione LUN con percorso corrente %(path)s e uuid " -"%(uuid)s." - -#, python-format -msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." -msgstr "Volume non gestito con etichetta corrente %(label)s e wwn %(wwn)s." - -#, python-format -msgid "Unmap volume: %(volume)s." -msgstr "Annulla associazione volume: %(volume)s." - -msgid "Unreserve volume completed successfully." -msgstr "Annullamento della riserva del volume completata correttamente." - -#, python-format -msgid "" -"Update Consistency Group: %(group)s. This adds and/or removes volumes from a " -"CG." -msgstr "" -"Aggiorna gruppo di coerenza: %(group)s. I volumi vengono aggiunti e/o " -"rimossi da un CG." - -msgid "Update consistency group completed successfully." -msgstr "Aggiornamento del gruppo di coerenza completato correttamente." - -#, python-format -msgid "Update migrated volume %(new_volume)s completed." -msgstr "Aggiornamento del volume migrato %(new_volume)s completato." - -msgid "Update readonly setting on volume completed successfully." -msgstr "" -"Aggiornamento dell'impostazione di sola lettura sul volume completato " -"correttamente." - -msgid "Update snapshot metadata completed successfully." -msgstr "Aggiornamento dei metadati dell'istantanea completato correttamente." - -msgid "Update volume admin metadata completed successfully." -msgstr "" -"Aggiornamento dei metadati di gestione del volume completato correttamente." - -msgid "Update volume metadata completed successfully." -msgstr "Aggiornamento dei metadati del volume completato correttamente." - -#, python-format -msgid "Updated Consistency Group %s" -msgstr "Aggiornato gruppo di coerenza %s" - -#, python-format -msgid "" -"Updating consistency group %(id)s with name %(name)s description: " -"%(description)s add_volumes: %(add_volumes)s remove_volumes: " -"%(remove_volumes)s." -msgstr "" -"Aggiornamento gruppo di coerenza %(id)s di nome %(name)s descrizione: " -"%(description)s add_volumes: %(add_volumes)s remove_volumes: " -"%(remove_volumes)s." - -#, python-format -msgid "Updating snapshot %(id)s with info %(dict)s" -msgstr "Aggiornamento dell'istantanea %(id)s con informazioni %(dict)s" - -#, python-format -msgid "Updating status for CG: %(id)s." -msgstr "Aggiornamento dello stato per CG: %(id)s." - -#, python-format -msgid "Updating storage service catalog information for backend '%s'" -msgstr "" -"Aggiornamento delle informazioni sul catalogo del servizio di archiviazione " -"per il backend '%s'" - -msgid "Use ALUA when adding initiator to host." -msgstr "Utilizzare ALUA quando si aggiunge l'iniziatore all'host." - -msgid "Use CHAP when adding initiator to host." -msgstr "Utilizzare CHAP quando si aggiunge l'iniziatore all'host." - -#, python-format -msgid "" -"Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." -msgstr "" -"Utilizzo di FC Zone Manager %(zm_version)s, Driver %(drv_name)s " -"%(drv_version)s." - -#, python-format -msgid "Using FC lookup service %s." -msgstr "Utilizzo del servizio di ricerca FC %s." - -#, python-format -msgid "Using compute cluster(s): %s." -msgstr "Utilizzo di cluster di calcolo: %s." - -#, python-format -msgid "Using existing initiator group name: %(igGroupName)s." -msgstr "" -"Si sta utilizzando il nome del gruppo di iniziatori esistente: " -"%(igGroupName)s." - -msgid "" -"Using extra_specs for defining QoS specs will be deprecated in the N release " -"of OpenStack. Please use QoS specs." -msgstr "" -"L'utilizzo di extra_specs per definire le specifiche QoS risulterà obsoleto " -"nella release N di OpenStack. Utilizzare le specifiche QoS." - -#, python-format -msgid "Using overridden vmware_host_version from config: %s" -msgstr "Utilizzo della versione vmware_host_version sovrascritta da config: %s" - -#, python-format -msgid "Using pool %(pool)s instead of %(cpg)s" -msgstr "Utilizzo del pool %(pool)s anziché %(cpg)s" - -msgid "VF context is changed in the session." -msgstr "Il contesto VF viene modificato nella sessione." - -#, python-format -msgid "Value with type=%s is not serializable" -msgstr "Il valore con tipo=%s non è serializzabile" - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is being retyped." -msgstr "Il volume virtuale %(disp)s '%(new)s' viene riscritto." - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is now being managed." -msgstr "Il volume virtuale %(disp)s '%(new)s' è ora in fase di gestione." - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " -"%(cpg)s" -msgstr "" -"Il volume virtuale %(disp)s '%(new)s' snapCPG è vuoto per cui verrà " -"impostato su: %(cpg)s" - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " -"'%(new)s'." -msgstr "" -"Il volume virtuale %(disp)s '%(vol)s' non viene più gestito. Volume " -"ridenominato in '%(new)s'." - -#, python-format -msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." -msgstr "Volume virtuale %(disp)s riscritto correttamente in %(new_type)s." - -#, python-format -msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." -msgstr "Volume virtuale '%(ref)s' ridenominato in '%(new)s'." - -#, python-format -msgid "Vol copy job completed for dest %s." -msgstr "Lavoro di copia del volume completato per destinazione %s." - -#, python-format -msgid "Volume %(volume)s does not have meta device members." -msgstr "Il volume %(volume)s non presenta membri di metadispositivi." - -#, python-format -msgid "" -"Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." -msgstr "" -"Il volume %(volume)s è già associato. Il numero di dispositivo è " -"%(deviceNumber)s." - -#, python-format -msgid "Volume %(volumeName)s not in any storage group." -msgstr "Volume %(volumeName)s non presente in alcun gruppo di archiviazione." - -#, python-format -msgid "" -"Volume %(volume_id)s: being created as %(create_type)s with specification: " -"%(volume_spec)s" -msgstr "" -"Volume %(volume_id)s: creato come %(create_type)s con specifica: " -"%(volume_spec)s" - -#, python-format -msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" -msgstr "Volume %(volume_name)s (%(volume_id)s): creato correttamente" - -#, python-format -msgid "Volume %s converted." -msgstr "Volume %s convertito." - -#, python-format -msgid "Volume %s created" -msgstr "Volume %s creato" - -#, python-format -msgid "Volume %s does not exist, it seems it was already deleted." -msgstr "Il volume %s non esiste, sembra che sia stato già eliminato." - -#, python-format -msgid "Volume %s has been transferred." -msgstr "Il volume %s è stato trasferito." - -#, python-format -msgid "Volume %s is mapping to multiple hosts." -msgstr "Il volume %s viene associato a più host." - -#, python-format -msgid "Volume %s is not mapped. No volume to unmap." -msgstr "" -"Il volume %s non è associato. Nessun volume di cui annullare l'associazione." - -#, python-format -msgid "Volume %s presented." -msgstr "Volume %s presentato." - -#, python-format -msgid "Volume %s retyped." -msgstr "Volume %s riscritto." - -#, python-format -msgid "Volume %s unmanaged." -msgstr "Volume %s non gestito." - -#, python-format -msgid "Volume %s will be deleted later." -msgstr "Il volume %s verrà eliminato successivamente." - -#, python-format -msgid "Volume %s: retyped successfully" -msgstr "Volume %s: riscritto correttamente" - -#, python-format -msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" -msgstr "Volume già associato, richiamo di %(ig)s, %(vol)s" - -#, python-format -msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" -msgstr "Copia del volume %(size_in_m).2f MB su %(mbps).2f MB/s" - -#, python-format -msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." -msgstr "Copia del volume completata %(size_in_m).2f MB su %(mbps).2f MB/s" - -msgid "Volume created successfully." -msgstr "Volume creato correttamente." - -msgid "Volume detach called, but volume not attached." -msgstr "Scollegamento volume chiamato, ma volume non collegato." - -msgid "Volume info retrieved successfully." -msgstr "Informazioni sul volume richiamate correttamente." - -#, python-format -msgid "Volume mappings for %(name)s: %(mappings)s" -msgstr "Associazioni volume per %(name)s: %(mappings)s" - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s" -msgstr "Nome volume modificato da %(tmp)s a %(orig)s" - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s." -msgstr "Nome volume modificato da %(tmp)s a %(orig)s" - -msgid "Volume retrieved successfully." -msgstr "Volume richiamato correttamente." - -#, python-format -msgid "Volume service: %(label)s. Casted to: %(loc)s" -msgstr "Servizio volume: %(label)s. Eseguito cast per: %(loc)s" - -#, python-format -msgid "Volume status is: %s." -msgstr "Lo stato del volume è: %s." - -#, python-format -msgid "Volume type is %s." -msgstr "Il tipo di volume è %s." - -#, python-format -msgid "" -"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " -"id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " -"name: %(domain_name)s." -msgstr "" -"Tipo di volume: %(volume_type)s, nome pool di archiviazione: %(pool_name)s, " -"id pool di archiviazione: %(pool_id)s, id dominio di protezione: " -"%(domain_id)s, nome dominio di protezione: %(domain_name)s." - -msgid "Volume updated successfully." -msgstr "Volume aggiornato correttamente." - -#, python-format -msgid "Volume with given ref %s need not be renamed during manage operation." -msgstr "" -"Il volume con un determinato riferimento %s non deve essere ridenominato " -"durante l'operazione di gestione." - -#, python-format -msgid "" -"Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " -"size: %(backup_size)d, continuing with restore." -msgstr "" -"Il volume: %(vol_id)s, dimensione: %(vol_size)d è maggiore del backup: " -"%(backup_id)s, dimensione: %(backup_size)d, continuare con il ripristino." - -#, python-format -msgid "WWPN on node %(node)s: %(wwpn)s." -msgstr "WWPN sul nodo %(node)s: %(wwpn)s." - -#, python-format -msgid "" -"Waiting for volume expansion of %(vol)s to complete, current remaining " -"actions are %(action)s. ETA: %(eta)s mins." -msgstr "" -"In attesa del completamento dell'espansione del volume di %(vol)s, le azioni " -"restanti correnti sono %(action)s. ETA: %(eta)s min." - -msgid "Waiting for web service array communication." -msgstr "In attesa della comunicazione dell'array del servizio web." - -msgid "Waiting for web service to validate the configured password." -msgstr "In attesa del servizio web per convalidare la password configurata." - -#, python-format -msgid "Will clone a volume from the image volume %(id)s." -msgstr "Un volume verrà clonato dal volume dell'immagine %(id)s." - -#, python-format -msgid "XtremIO SW version %s" -msgstr "XtremIO versione SW %s" - -#, python-format -msgid "ZFSSA version: %s" -msgstr "Versione ZFSSA: %s" - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation %s" -msgstr "La zona esiste in modalità I-T. Ignorare la creazione zone per %s" - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" -msgstr "" -"La zona esiste in modalità I-T. Ignorare la creazione zone per %(zonename)s" - -#, python-format -msgid "Zone map to add: %s" -msgstr "Associazione zone da aggiungere: %s" - -msgid "" -"Zone name created using prefix because either host name or storage system is " -"none." -msgstr "" -"Nome zona creato utilizzando il prefisso perché il nome host o il sistema di " -"archiviazione sono none." - -msgid "Zone name created using prefix because host name is none." -msgstr "Nome zona creato utilizzando il prefisso perché il nome host è none." - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Politica di configurazione zone %s non riconosciuta" - -#, python-format -msgid "Zoning policy for Fabric %(policy)s" -msgstr "Politica di configurazione zone per Fabric %(policy)s" - -#, python-format -msgid "Zoning policy for Fabric %s" -msgstr "Politica di configurazione zone per Fabric %s" - -#, python-format -msgid "Zoning policy for fabric %(policy)s" -msgstr "Politica di configurazione zone per fabric %(policy)s" - -#, python-format -msgid "Zoning policy for fabric %s" -msgstr "Politica di configurazione zone per fabric %s" - -msgid "Zoning policy is not valid, no zoning will be performed." -msgstr "" -"La politica di configurazione zone non è valida, non verrà eseguita nessuna " -"configurazione zone." - -#, python-format -msgid "" -"_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_check_volume_copy_ops: Il volume %(vol)s non presenta l'operazione di copia " -"vdisk specificata: orig=%(orig)s nuova=%(new)s." - -msgid "_delete_copysession, The copysession was already completed." -msgstr "_delete_copysession, La sessione di copia è già stata completata." - -#, python-format -msgid "" -"_delete_volume_setting, volumename:%(volumename)s, volume not found on " -"ETERNUS. " -msgstr "" -"_delete_volume_setting, nome volume:%(volumename)s, volume non trovato su " -"ETERNUS. " - -#, python-format -msgid "_get_tgt_ip_from_portgroup: Get ip: %s." -msgstr "_get_tgt_ip_from_portgroup: Richiamare ip: %s." - -#, python-format -msgid "_get_tgt_iqn: iSCSI target iqn is: %s." -msgstr "_get_tgt_iqn: iqn di destinazione iSCSI è: %s." - -#, python-format -msgid "_unmap_lun, volumename: %(volumename)s, volume is not mapped." -msgstr "_unmap_lun, volumename: %(volumename)s, volume non associato." - -#, python-format -msgid "_unmap_lun, volumename:%(volumename)s, volume not found." -msgstr "_unmap_lun, volumename:%(volumename)s, volume non trovato." - -#, python-format -msgid "" -"add_host_with_check. create host success. host name: %(name)s, host id: " -"%(id)s" -msgstr "" -"add_host_with_check. creazione host riuscita. nome host: %(name)s, id host: " -"%(id)s" - -#, python-format -msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" -msgstr "add_host_with_check. nome host: %(name)s, id host: %(id)s" - -#, python-format -msgid "casted to %s" -msgstr "eseguito cast per %s" - -#, python-format -msgid "cgsnapshot %s: created successfully" -msgstr "cgsnapshot %s: creata correttamente" - -#, python-format -msgid "cgsnapshot %s: deleted successfully" -msgstr "cgsnapshot %s: eliminata correttamente" - -#, python-format -msgid "cgsnapshot %s: deleting" -msgstr "Eliminazione di cgsnapshot %s:." - -#, python-format -msgid "create_cloned_volume, info: %s, Exit method." -msgstr "create_cloned_volume, info: %s, Metodo di uscita." - -#, python-format -msgid "" -"create_cloned_volume, target volume id: %(tid)s, source volume id: %(sid)s, " -"Enter method." -msgstr "" -"create_cloned_volume, id volume di destinazione: %(tid)s, id volume di " -"origine: %(sid)s, Immettere il metodo." - -#, python-format -msgid "" -"create_hostgroup_with_check. Create hostgroup success. hostgroup name: " -"%(name)s, hostgroup id: %(id)s" -msgstr "" -"create_hostgroup_with_check. creazione hostgroup riuscita. Nome hostgroup: " -"%(name)s, id hostgroup: %(id)s" - -#, python-format -msgid "" -"create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" -msgstr "" -"create_hostgroup_with_check. nome gruppo host: %(name)s, id gruppo host: " -"%(id)s" - -#, python-format -msgid "create_snapshot, info: %s, Exit method." -msgstr "create_snapshot, info: %s, Metodo di uscita." - -#, python-format -msgid "create_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." -msgstr "" -"create_snapshot, snap id: %(sid)s, id volume: %(vid)s, Immettere il metodo." - -#, python-format -msgid "create_volume, info: %s, Exit method." -msgstr "create_volume, info: %s, Metodo di uscita." - -#, python-format -msgid "create_volume, volume id: %s, Enter method." -msgstr "create_volume, volume id: %s, Metodo di errore." - -#, python-format -msgid "create_volume_from_snapshot, info: %s, Exit method." -msgstr "create_volume_from_snapshot, info: %s, Metodo di uscita." - -#, python-format -msgid "" -"create_volume_from_snapshot, volume id: %(vid)s, snap id: %(sid)s, Enter " -"method." -msgstr "" -"create_volume_from_snapshot, id volume: %(vid)s, snap id: %(sid)s, Immettere " -"il metodo." - -#, python-format -msgid "" -"create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " -"%(tgt_lun_id)s, copy_name: %(copy_name)s." -msgstr "" -"create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " -"%(tgt_lun_id)s, copy_name: %(copy_name)s." - -#, python-format -msgid "delete_snapshot, delete: %s, Exit method." -msgstr "delete_snapshot, delete: %s, Metodo di uscita." - -#, python-format -msgid "delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." -msgstr "" -"delete_snapshot, snap id: %(sid)s, id volume: %(vid)s, Immettere il metodo." - -#, python-format -msgid "delete_volume, delete: %s, Exit method." -msgstr "delete_volume, delete: %s, Metodo di uscita." - -#, python-format -msgid "delete_volume, volume id: %s, Enter method." -msgstr "delete_volume, volume id: %s, Immettere il metodo." - -#, python-format -msgid "" -"do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " -"%(lun_id)s." -msgstr "" -"do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " -"%(lun_id)s." - -#, python-format -msgid "extend_volume, used pool name: %s, Exit method." -msgstr "extend_volume, nome pool utilizzato: %s, Metodo di uscita." - -#, python-format -msgid "extend_volume, volume id: %s, Enter method." -msgstr "extend_volume, id volume: %s, Immettere il metodo." - -#, python-format -msgid "igroup %(grp)s found for initiator %(iname)s" -msgstr "igroup %(grp)s trovato per l'iniziatore %(iname)s" - -#, python-format -msgid "initialize_connection success. Return data: %s." -msgstr "initialize_connection riuscita. Dati restituiti: %s." - -#, python-format -msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" -msgstr "initialize_connection volume: %(volume)s, connettore: %(connector)s" - -#, python-format -msgid "initialize_connection, host lun id is: %s." -msgstr "initialize_connection, l'id lun host è: %s." - -#, python-format -msgid "initialize_connection, info: %s, Exit method." -msgstr "initialize_connection, info: %s, Metodo di uscita." - -#, python-format -msgid "initialize_connection, initiator: %(wwpns)s, LUN ID: %(lun_id)s." -msgstr "initialize_connection, iniziatore: %(wwpns)s, ID LUN: %(lun_id)s." - -#, python-format -msgid "" -"initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " -"portgroup_id: %(portgroup_id)s." -msgstr "" -"initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " -"portgroup_id: %(portgroup_id)s." - -#, python-format -msgid "initialize_connection, metadata is: %s." -msgstr "initialize_connection, i metadati sono: %s." - -#, python-format -msgid "" -"initialize_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " -"method." -msgstr "" -"initialize_connection, id volume: %(vid)s, initiator: %(initiator)s, " -"Immettere il metodo." - -#, python-format -msgid "" -"initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " -"target_luns: %(target_luns)s, Volume is already mapped." -msgstr "" -"initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " -"target_luns: %(target_luns)s, Il volume è già associato." - -#, python-format -msgid "" -"initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." -msgstr "" -"initialize_connection_fc, iniziatore: %(wwpns)s, nome volume: %(volume)s." - -msgid "initiator has no password while using chap,adding it" -msgstr "" -"l'iniziatore non presenta alcuna password durante l'utilizzo di chap, la " -"password viene aggiunta" - -#, python-format -msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." -msgstr "nome iniziatore: %(initiator_name)s, ID LUN: %(lun_id)s." - -#, python-format -msgid "" -"manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " -"renamed to %(id)s and is now managed by Cinder." -msgstr "" -"manage_existing_snapshot: l'istantanea %(exist)s sul volume %(volume)s è " -"stata ridenominata come %(id)s ed è ora gestita da Cinder." - -#, python-format -msgid "" -"migrate_volume_completion is cleaning up an error for volume %(vol1)s " -"(temporary volume %(vol2)s" -msgstr "" -"migrate_volume_completion sta ripulendo un errore per il volume %(vol1)s " -"(volume temporaneo %(vol2)s" - -#, python-format -msgid "new cloned volume: %s" -msgstr "nuovo volume clonato: %s" - -#, python-format -msgid "open_connection to %(ssn)s at %(ip)s" -msgstr "open_connection a %(ssn)s su %(ip)s" - -#, python-format -msgid "open_connection: Updating API version to %s" -msgstr "open_connection: Aggiornamento della versione API a %s" - -#, python-format -msgid "replication failover secondary is %(ssn)s" -msgstr "Il failover di replica secondario è %(ssn)s" - -#, python-format -msgid "setting volume %s to error_restoring (was restoring-backup)." -msgstr "" -"impostazione del volume %s su error_restoring (era in fase di ripristino del " -"backup)." - -#, python-format -msgid "snapshot %s doesn't exist" -msgstr "l'istantanea %s non esiste" - -#, python-format -msgid "source volume for cloning: %s" -msgstr "volume di origine per la clonazione: %s" - -#, python-format -msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." -msgstr "stop_snapshot: nome istantanea: %(snapshot)s, nome volume: %(volume)s." - -#, python-format -msgid "terminate_connection volume: %(volume)s, connector: %(con)s" -msgstr "terminate_connection volume: %(volume)s, connettore: %(con)s" - -#, python-format -msgid "terminate_connection, return data is: %s." -msgstr "terminate_connection, i dati restituiti sono: %s." - -#, python-format -msgid "terminate_connection, unmap: %s, Exit method." -msgstr "terminate_connection, annullare l'associazione: %s, Metodo di uscita." - -#, python-format -msgid "" -"terminate_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " -"method." -msgstr "" -"terminate_connection, id volume: %(vid)s, iniziatore: %(initiator)s, " -"Immettere il metodo." - -#, python-format -msgid "terminate_connection: initiator name: %(ini)s, LUN ID: %(lunid)s." -msgstr "terminate_connection: nome iniziatore: %(ini)s, ID LUN: %(lunid)s." - -#, python-format -msgid "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." -msgstr "terminate_connection: wwpns: %(wwns)s, ID LUN: %(lun_id)s." - -#, python-format -msgid "" -"terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " -"%(lunid)s." -msgstr "" -"terminate_connection_fc: nome volume: %(volume)s, wwpns: %(wwns)s, lun_id: " -"%(lunid)s." - -#, python-format -msgid "tunevv failed because the volume '%s' has snapshots." -msgstr "tunevv non riuscito perché il volume '%s' contiene istantanee." - -#, python-format -msgid "username: %(username)s, verify_cert: %(verify)s." -msgstr "nome utente: %(username)s, verify_cert: %(verify)s." - -#, python-format -msgid "vol=%s" -msgstr "vol=%s" - -#, python-format -msgid "vol_name=%(name)s provider_location=%(loc)s" -msgstr "vol_name=%(name)s provider_location=%(loc)s" - -#, python-format -msgid "volume %(name)s extended to %(size)d." -msgstr "volume %(name)s esteso a %(size)d." - -#, python-format -msgid "volume %s doesn't exist" -msgstr "il volume %s non esiste" - -#, python-format -msgid "volume %s no longer exists in backend" -msgstr "Il volume %s non esiste più nel backend" - -#, python-format -msgid "volume: %(volume)s, lun params: %(params)s." -msgstr "volume: %(volume)s, parametri lun: %(params)s." - -msgid "volume_file does not support fileno() so skipping fsync()" -msgstr "volume_file does non supporta fileno(), ignorare fsync()" diff --git a/cinder/locale/it/LC_MESSAGES/cinder.po b/cinder/locale/it/LC_MESSAGES/cinder.po deleted file mode 100644 index d374962c0..000000000 --- a/cinder/locale/it/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,9893 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-01 12:31+0000\n" -"Last-Translator: Alessandra \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Italian\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder versione: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " ma la dimensione ora è %d" - -#, python-format -msgid " but size is now %d." -msgstr " ma la dimensione ora è %d." - -msgid " or " -msgstr "oppure" - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s non è impostato." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing non può gestire un volume connesso agli host. " -"Disconnettere questo volume dagli host esistenti prima dell'importazione" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"risultato: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: Autorizzazione negata." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: non riuscito con un output CLI imprevisto.\n" -" Comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Codice dello stato: %(_status)s\n" -"Corpo: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: creazione portale di rete: accertarsi che la porta %(port)d " -"sull'ip %(ip)s non sia utilizzata da un altro servizio." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s non riuscito. L'oggetto di " -"backup riporta una modalità non prevista. I backup di immagine o di file " -"sono supportati; la modalità corrente è %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"Il servizio %(service)s non è %(status)s sull'applicazione di archiviazione: " -"%(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s deve essere <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s deve essere >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"Il valore %(worker_name)s di %(workers)d non è valido, deve essere superiore " -"a 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "\"data\" %s non presente nel risultato." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"Impossibile accedere a %s. Verificare che GPFS sia attiva e che il file " -"system sia montato." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"Impossibile ridimensionare %s mediante l'operazione clona, perché non " -"contiene blocchi" - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"Impossibile ridimensionare %s mediante l'operazione clona, perché ospitato " -"su un volume compresso" - -#, python-format -msgid "%s configuration option is not set." -msgstr "L'opzione di configurazione %s non è impostata. " - -#, python-format -msgid "%s does not exist." -msgstr "%s non esiste." - -#, python-format -msgid "%s is not a directory." -msgstr "%s non è una directory." - -#, python-format -msgid "%s is not installed" -msgstr "%s non installato" - -#, python-format -msgid "%s is not installed." -msgstr "%s non installato." - -#, python-format -msgid "%s is not set" -msgstr "%s non è impostato" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s non è impostato ed è richiesto affinché il dispositivo di replica sia " -"valido." - -#, python-format -msgid "%s is not set." -msgstr "%s non impostato." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s deve essere un'immagine qcow2 o raw validi." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s deve essere un percorso assoluto." - -#, python-format -msgid "%s must be an integer." -msgstr "%s deve essere un numero intero." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s non impostato in cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s non impostato." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' non è valido per flashsystem_connection_protocol nel file di " -"configurazione. i valori validi sono %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "'active' deve essere presente durante la scrittura di snap_info." - -msgid "'consistencygroup_id' must be specified" -msgstr "è necessario specificare 'consistencygroup_id'" - -msgid "'qemu-img info' parsing failed." -msgstr "analisi di 'qemu-img info' non riuscita." - -msgid "'status' must be specified." -msgstr "È necessario specificare 'status'." - -msgid "'volume_id' must be specified" -msgstr "È necessario specificare 'volume_id'" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Comando: %(cmd)s) (Codice di ritorno: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "Un LUN (HLUN) non è stato trovato. (LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "" -"È stata effettuata una richiesta contemporanea, probabilmente " -"contraddittoria." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Non è stato trovato un LUN (HLUN) disponibile. Aggiungere un diverso gruppo " -"di host. (LDEV: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Impossibile aggiungere un gruppo di host. (porta: %(port)s, nome: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Impossibile eliminare un gruppo di host. (porta: %(port)s, gid: %(gid)s, " -"nome: %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Un gruppo di host non è valido. (gruppo host: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "Impossibile eliminare una coppia. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Impossibile creare una coppia. Il numero massimo di coppie è stato superato. " -"(metodo di copia: %(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Un parametro non è valido. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Il valore del parametro non è valido. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Impossibile trovare un pool. (id pool: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Lo stato dell'istantanea non è valido. (stato: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Una destinazione secondaria valida DEVE essere specificata per poter " -"eseguire il failover." - -msgid "A volume ID or share was not specified." -msgstr "ID volume o condivisione non specificati. " - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Lo stato del volume non è valido. (stato: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s non riuscita con stringa di errore %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"Stringa della versione API %(version)s in formato non valido. Deve essere in " -"formato MajorNum.MinorNum." - -msgid "API key is missing for CloudByte driver." -msgstr "Chiave API mancante per il driver CloudByte." - -#, python-format -msgid "API response: %(response)s" -msgstr "Risposta dell'API: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "Risposta dell'API: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "Versione API %(version)s non supportata in questo metodo." - -msgid "API version could not be determined." -msgstr "Impossibile determinare la versione API." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Si sta per eliminare i progetti child con quota non zero. Ciò non dovrebbe " -"essere eseguito" - -msgid "Access list not available for public volume types." -msgstr "Elenco accessi non disponibile per i tipi di volume pubblici." - -msgid "Activate or deactivate QoS error." -msgstr "Errore di attivazione o disattivazione QoS. " - -msgid "Activate snapshot error." -msgstr "Errore di attivazione istantanea." - -msgid "Add FC port to host error." -msgstr "Errore di aggiunta porta FC all'host. " - -msgid "Add fc initiator to array error." -msgstr "Errore di aggiunta iniziatore fc all'array. " - -msgid "Add initiator to array error." -msgstr "Errore di aggiunta iniziatore all'array. " - -msgid "Add lun to cache error." -msgstr "Errore di aggiunta lun alla cache. " - -msgid "Add lun to partition error." -msgstr "Errore di aggiunta lun alla partizione. " - -msgid "Add mapping view error." -msgstr "Errore di aggiunta vista associazione. " - -msgid "Add new host error." -msgstr "Errore di aggiunta nuovo host. " - -msgid "Add port to port group error." -msgstr "Errore di aggiunta porta a gruppo di porte." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Tutti i pool di memoria specificati per essere gestiti non esistono. " -"Controllare la configurazione. Pool non esistenti: %s " - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"La richiesta di una versione API deve essere confrontata a un oggetto " -"VersionedMethod." - -msgid "An error has occurred during backup operation" -msgstr "Si è verificato un errore durante l'operazione di backup" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Si è verificato un errore durante l'operazione di LUNcopy. Nome LUNcopy: " -"%(luncopyname)s. Stato LUNcopy: %(luncopystatus)s. Stato LUNcopy: " -"%(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Si è verificato un errore durante la lettura del volume \"%s\". " - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Si è verificato un errore durante la scrittura del volume \"%s\". " - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "Impossibile aggiungere un utente iSCSI CHAP. (nome utente: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "Un utente iSCSI CHAP non può essere eliminato. (nome utente: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Impossibile aggiungere una destinazione iSCSI. (porta: %(port)s, alias: " -"%(alias)s, motivo: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Impossibile eliminare una destinazione iSCSI. (porta: %(port)s, tno: " -"%(tno)s, alias: %(alias)s)" - -msgid "An unknown exception occurred." -msgstr "E' stato riscontrato un errore sconosciuto" - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"A un utente con un token nell'ambito di un sottoprogetto non è consentito " -"visualizzare la quota dei relativi parent." - -msgid "Append port group description error." -msgstr "Errore di aggiunta descrizione gruppo di porte." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Applicazione di zone e cfgs allo switch non riuscita (codice di errore=" -"%(err_code)s messaggio di errore=%(err_msg)s." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "L'array non esiste o è offline. Lo stato corrente dell'array è %s. " - -msgid "Associate host to hostgroup error." -msgstr "Errore di associazione host al gruppo host." - -msgid "Associate host to mapping view error." -msgstr "Errore di associazione host alla vista associazione." - -msgid "Associate initiator to host error." -msgstr "Errore di associazione iniziatore all'host. " - -msgid "Associate lun to QoS error." -msgstr "Errore di associazione lun a QoS. " - -msgid "Associate lun to lungroup error." -msgstr "Errore di associazione lun al gruppo lun." - -msgid "Associate lungroup to mapping view error." -msgstr "Errore di associazione gruppo lun alla vista associazione." - -msgid "Associate portgroup to mapping view error." -msgstr "Errore di associazione gruppo porte a vista associazione. " - -msgid "At least one valid iSCSI IP address must be set." -msgstr "È necessario impostare almeno un indirizzo IP iSCSI valido." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "" -"Tentativo di trasferimento %s con chiave di autenticazione (auth) non valida." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "Dettagli gruppo aut [%s] non trovati nella memoria CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "Dettagli utente aut non trovati nella memoria CloudByte." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"Autenticazione non riuscita, verificare le credenziali dello switch, codice " -"di errore %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "L'area di disponibilità '%(s_az)s' non è valida." - -msgid "Available categories:" -msgstr "Categorie disponibili:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Spec QoS di backend non supportate in questa famiglia di memoria e versione " -"ONTAP." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Il backend non esiste (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Report Backend: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Report Backend: l'elemento esiste già" - -msgid "Backend reports: item not found" -msgstr "Report Backend: elemento non trovato" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "Raggiunto timeout di tentativi del servizio backend: %(timeout)s sec" - -msgid "Backend storage did not configure fiber channel target." -msgstr "" -"L'archivio di backend non ha configurato la destinazione del canale a fibre " -"ottiche" - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"Il backup di un volume in uso deve utilizzare l'indicatore di forzatura. " - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "Impossibile trovare il backup %(backup_id)s." - -msgid "Backup RBD operation failed" -msgstr "Operazione di backup RBD non riuscita" - -msgid "Backup already exists in database." -msgstr "Il backup esiste già nel database. " - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Il driver di backup ha riportato un errore: %(message)s" - -msgid "Backup id required" -msgstr "ID backup richiesto" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "Backup non supportato per i volumi GlusterFS con istantanee." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "Il backup è supportato solo per i volumi SOFS senza file di backup." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"Il backup è supportato solo per i volumi GlusterFS formattati in modo non " -"elaborato." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "" -"Il backup è supportato solo per i volumi SOFS formattati in modo non " -"elaborato." - -msgid "Backup operation of an encrypted volume failed." -msgstr "" -"L'operazione di backup di un volume codificato non ha avuto esito positivo." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Il servizio di backup %(configured_service)s non supporta la verifica. L'ID " -"backup %(id)s non è verificato. Ignorare la verifica." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Il servizio di backup %(service)s non supporta la verifica. L'id backup " -"%(id)s non è verificato. Ignorare la reimpostazione." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "Il backup dovrebbe avere una sola istantanea ma invece ne ha %s" - -msgid "Backup status must be available" -msgstr "Lo stato del backup deve essere available" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Lo stato del backup deve essere disponibile e non %s." - -msgid "Backup status must be available or error" -msgstr "Lo stato del backup deve essere available o error" - -msgid "Backup to be restored has invalid size" -msgstr "Il backup da ripristinare ha una dimensione non valida" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Restituita riga di stato non corretto: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Chiavi errate nell'insieme di quota: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Risposta non corretta o imprevista dall'API di backend del volume di " -"archiviazione: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "" -"Formato progetto non corretto: il formato del progetto non è corretto (%s)" - -msgid "Bad response from Datera API" -msgstr "Risposta errata dall'API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Risposta dell'API SolidFire non valida" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Risposta non valida da XMS, %s" - -msgid "Binary" -msgstr "Valore binario" - -msgid "Blank components" -msgstr "Componenti vuoti" - -msgid "Blockbridge api host not configured" -msgstr "Host api Blockbridge non configurato " - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "Blockbridge configurato con schema aut non valido '%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "Il pool predefinito Blockbridge non esiste " - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Password Blockbridge non configurata (richiesta per lo schema aut 'password')" - -msgid "Blockbridge pools not configured" -msgstr "Pool Blockbridge non configurati " - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Token Blockbridge non configurato (richiesto per lo schema aut 'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Utente Blockbridge non configurato (richiesto per lo schema aut 'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Errore CLI di divisione in zone di Brocade Fibre Channel: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Errore HTTP di divisione in zone di Brocade Fibre Channel: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "Il segreto CHAP deve essere 12-16 byte. " - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Output eccezione CLI:\n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Output eccezione CLI:\n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E L'associazione VDisk a host non è stata creata perché il VDisk " -"ègià associato ad un host.\n" -" \"" - -msgid "CONCERTO version is not supported" -msgstr "Versione CONCERTO non supportata" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) non esiste nell'array" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "" -"Il nome della cache è Nessuno, impostare smartcache:cachename nella chiave. " - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "Il volume della cache %s non presenta le proprietà richieste" - -msgid "Call returned a None object" -msgstr "La chiamata ha restituito un oggetto None" - -msgid "Can not add FC port to host." -msgstr "Impossibile aggiungere la porta FC all'host." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "Impossibile trovare l'ID cache per nome cache %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Impossibile trovare l'ID partizione per nome %(name)s." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "Impossibile ottenere le informazioni sul pool. pool: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "Impossibile convertire %s in un numero intero." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Impossibile accedere a 'scality_sofs_config': %s" - -msgid "Can't decode backup record." -msgstr "Impossibile decodificare il record di backup. " - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "Impossibile estendere il volume di replica, volume: %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"Impossibile trovare la LUN sull'array, controllare source-name o source-id." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "Impossibile trovare il nome cache nell'array, nome cache: %(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"Impossibile trovare informazioni sulla lun nell'array. volume: %(id)s, nome " -"lun: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"Impossibile trovare il nome partizione nell'array, il nome partizione è: " -"%(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "Impossibile trovare il servizio: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"Impossibile trovare l'istantanea sull'array, controllare source-name o " -"source-id." - -msgid "Can't find the same host id from arrays." -msgstr "Impossibile trovare lo stesso id host dagli array." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "Impossibile ottenere l'id volume dall'istantanea, istantanea: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Impossibile ottenere l'id volume. Nome volume: %s" - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"Impossibile importare la LUN %(lun_id)s in Cinder. Tipo di LUN non " -"corrispondente." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"Impossibile importare LUN %s in Cinder. Esiste già in un HyperMetroPair." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " -"copia LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "Impossibile importare LUN %s in Cinder. Esiste già in un gruppo LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"Impossibile importare la LUN %s in Cinder. Esiste già in un mirror LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "" -"Impossibile importare la LUN %s in Cinder. Esiste già in uno SplitMirror." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " -"migrazione." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"Impossibile importare la LUN %s in Cinder. Esiste già in un'attività di " -"replica remota." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "Impossibile importare LUN %s in Cinder. Lo stato LUN non è normal." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Impossibile importare l'istantanea %s in Cinder. L'istantanea non appartiene " -"al volume." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Impossibile importare l'istantanea %s in Cinder. L'istantanea è esposta " -"all'iniziatore." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Impossibile importare l'istantanea %s in Cinder. Lo stato dell'istantanea " -"non è normal o lo stato di esecuzione non è online." - -msgid "Can't parse backup record." -msgstr "Impossibile analizzare il record di backup. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perché non dispone di un tipo di volume." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perchéè già nel gruppo di coerenza %(orig_group)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perché non è possibile trovare il volume." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perché il volume non esiste." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perché il volume si trova in uno stato non valido: %(status)s. " -"Gli stati validi sono: %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Impossibile aggiungere il volume %(volume_id)s al gruppo di coerenza " -"%(group_id)s perché il tipo di volume %(volume_type)s non è supportato dal " -"gruppo." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Impossibile collegare il volume già collegato %s; multicollegamento " -"disabilitato tramitel'opzione di configurazione 'netapp_enable_multiattach'. " - -msgid "Cannot change VF context in the session." -msgstr "Impossibile modificare il contesto VF nella sessione." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"Impossibile modificare il contesto VF, il contesto VF specificato non è " -"disponibile nell'elenco di VF gestibili %(vf_list)s." - -msgid "Cannot connect to ECOM server." -msgstr "Impossibile effettuare la connessione al server ECOM." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Impossibile creare il gruppo di coerenza %(group)s perché l'istantanea " -"%(snap)s non è in uno stato valido. Gli stati validi sono: %(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Impossibile creare il gruppo di coerenza %(group)s perché il volume di " -"origine %(source_vol)s non è in uno stato valido. Gli stati validi sono: " -"%(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Impossibile creare la directory %s. " - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "Impossibile creare le specifiche di codifica. Tipo di volume in uso." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Impossibile creare l'immagine del formato disco: %s. Viene accettato solo il " -"formato disco vmdk." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Impossibile creare la vista di mascheramento: %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Impossibile creare più di %(req)s volumi nell'array ESeries quando " -"'netapp_enable_multiattach' è impostato su true." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"Impossibile creare o trovare un gruppo di archiviazione con nome " -"%(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "Impossibile creare il volume di dimensione %s: non multiplo di 8GB. " - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"Impossibile creare volume_type con nome %(name)s e specifiche %(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "Impossibile eliminare la LUN %s mentre esistono istantanee. " - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Impossibile eliminare il volume cache: %(cachevol_name)s. È stato aggiornato " -"alle%(updated_at)s e attualmente ha %(numclones)d istanze volume." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Impossibile eliminare il volume cache: %(cachevol_name)s. È stato aggiornato " -"alle%(updated_at)s e attualmente ha %(numclones)s istanze volume." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "" -"Impossibile eliminare le specifiche di codifica. Tipo di volume in uso." - -msgid "Cannot determine storage pool settings." -msgstr "Impossibile determinare le impostazioni del pool di archiviazione." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Impossibile eseguire /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "Impossibile trovare il gruppo CG %s." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"Impossibile trovare il servizio di configurazione controller per il sistema " -"di memorizzazione %(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"Impossibile trovare il servizio di replica per creare il volume per " -"l'istantanea %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"Impossibile trovare il servizio di replica per eliminare l'istantanea %s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Impossibile trovare il servizio di replica sul sistema %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"Impossibile trovare il volume: %(id)s. Annullamento gestione. Uscita..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "" -"Impossibile trovare il volume: %(volumename)s. Operazione di estensione. " -"Uscire...." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "Impossibile trovare il numero unità per il volume %(volumeName)s." - -msgid "Cannot find migration task." -msgstr "Impossibile trovare l'attività di migrazione. " - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Impossibile trovare il servizio di replica sul sistema %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "Impossibile trovare l'istanza GC di origine. consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "Impossibile ottenere mcs_id per id canale: %(channel_id)s. " - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"Impossibile ottenere le informazioni necessarie sul sistema di archiviazione " -"o sul pool." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Impossibile ottenere o creare un gruppo di archiviazione: %(sgGroupName)s " -"per il volume %(volumeName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "Impossibile ottenere o creare il gruppo iniziatore: %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Impossibile ottenere il gruppo di porte: %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Impossibile ottenere il gruppo di archiviazione: %(sgGroupName)s dalla vista " -"di mascheramento %(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Impossibile ottenere l'intervallo della dimensione supportato per %(sps)s " -"Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Impossibile ottenere il gruppo di archiviazione predefinito per la politica " -"FAST: %(fastPolicyName)s." - -msgid "Cannot get the portgroup from the masking view." -msgstr "Impossibile ottenere il gruppo di porte dalla vista di mascheramento." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "Impossibile montare Scality SOFS, controllare il syslog per gli errori" - -msgid "Cannot ping DRBDmanage backend" -msgstr "Impossibile effettuare il ping al backend DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Impossibile collocare il volume %(id)s su %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Impossibile fornire sia 'cgsnapshot_id' e 'source_cgid' per creareil gruppo " -"di coerenza %(name)s dall'origine. " - -msgid "Cannot register resource" -msgstr "Impossibile registrare la risorsa" - -msgid "Cannot register resources" -msgstr "Impossibile registrare le risorse" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Impossibile rimuovere il volume %(volume_id)s dal gruppo di coerenza " -"%(group_id)s perché non è presente nel gruppo." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Impossibile rimuovere il volume %(volume_id)s dal gruppo di coerenza " -"%(group_id)s perché il volume si trova in uno stato non valido: %(status)s. " -"Gli stati validi sono: %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Impossibile riscrivere da HPE3PARDriver a %s." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Impossibile riscrivere da un array 3PAR ad un altro." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Impossibile riscrivere in un CPG in un dominio diverso." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "Impossibile riscrivere in uno snap CPG in un diverso dominio." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"Impossibile eseguire il comando vgc-cluster, assicurarsi che sia installato " -"il software e che le autorizzazioni siano impostate correttamente. " - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"Impossibile impostare entrambi hitachi_serial_number e hitachi_unit_name." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "" -"Impossibile specificare nome dominio di protezione e id dominio di " -"protezione." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"Impossibile specificare il nome del pool di memoria e l'ID del pool di " -"memoria. " - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Impossibile aggiornare il gruppo di coerenza %(group_id)s perché non sono " -"stati forniti nome, descrizione, add_volumes o remove_volumes validi." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "" -"Impossibile aggiornare le specifiche di codifica. Tipo di volume in uso." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "Impossibile aggiornare volume_type %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Impossibile verificare l'esistenza dell'oggetto:%(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "Impossibile trovare CgSnapshot %(cgsnapshot_id)s." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost vuoto. Non verrà creato alcun gruppo di coerenza." - -msgid "Change hostlun id error." -msgstr "Errore di modifica ID hostlun." - -msgid "Change lun priority error." -msgstr "Errore di modifica della priorità lun. " - -msgid "Change lun smarttier policy error." -msgstr "Errore di modifica della politica smarttier lun. " - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"La modifica renderebbe l'utilizzo inferiore a 0 per le seguenti risorse: " -"%(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Controllare le autorizzazioni di accesso per la condivisione ZFS assegnata a " -"questo driver." - -msgid "Check hostgroup associate error." -msgstr "Errore di controllo associazione gruppo host. " - -msgid "Check initiator added to array error." -msgstr "Errore di controllo aggiunta iniziatore all'array. " - -msgid "Check initiator associated to host error." -msgstr "Errore di controllo iniziatore associato all'host. " - -msgid "Check lungroup associate error." -msgstr "Errore di controllo associazione gruppo lun. " - -msgid "Check portgroup associate error." -msgstr "Errore di controllo associazione gruppo porte. " - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Controllare lo stato del servizio http. Assicurarsi che il numero della " -"porta https sia uguale a quello specificato in cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"La dimensione della porzione non è un multiplo della dimensione del blocco " -"per la creazione dell'hash." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Errore CLI di divisione in zone di Cisco Fibre Channel: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "" -"La funzione di creazione del clone non dispone di licenza in " -"%(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"Il tipo di clone '%(clone_type)s' non è valido; i valori validi sono: " -"'%(full_clone)s' e '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"Il cluster non è formattato. È probabilmente necessario eseguire \"dog " -"cluster format\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Errore driver Coho Data Cinder: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "La porta RPC Coho non è configurata" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Il comando %(cmd)s è bloccato nella CLI ed è stato annullato " - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: %s timeout." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Compression Enabler non è installato. Impossibile creare il volume compresso." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Cluster di calcolo: %(cluster)s non trovato. " - -msgid "Condition has no field." -msgstr "La condizione non ha campi." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"Configurazione 'max_over_subscription_ratio' non valida. Deve essere > 0: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Errore di configurazione: dell_sc_ssn non impostato." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Il file di configurazione %(configurationFile)s non esiste." - -msgid "Configuration is not found." -msgstr "Configurazione non trovata." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Valore di configurazione %s non impostato." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Specifiche QoS in conflitto nel tipo volume %s: quando spec QoS è associata " -"al tipo di volume, legacy \"netapp:qos_policy_group\" non consentito nelle " -"specifiche supplementari del tipo di volume." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Connessione a glance non riuscita: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Connessione a swift non riuscita: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Il connettore non fornisce: %s " - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Il connettore non ha le informazioni necessarie: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "Il gruppo di coerenza è vuoto. Non verrà creato nessun cgsnapshot." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "Impossibile trovare ConsistencyGroup %(consistencygroup_id)s." - -msgid "Container" -msgstr "Contenitore" - -msgid "Container size smaller than required file size." -msgstr "" -"Dimensione del contenitore più piccola della dimensione di file richiesta." - -msgid "Content type not supported." -msgstr "Tipo di contenuto non supportato." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "Controller Configuration Service non trovato in %(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "Impossibile risolvere l'IP del controller '%(host)s': %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Convertito in %(f1)s, ma il formato ora è %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "Convertito in %(vol_format)s, ma il formato ora è %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Convertito in non elaborato, ma il formato ora è %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Convertito su grezzo, ma il formato è ora %s." - -msgid "Coordinator uninitialized." -msgstr "Coordinatore non inizializzato." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Copia attività di volume non riuscita: convert_to_base_volume: id=%(id)s, " -"status=%(status)s." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"Attività di copia del volume non riuscita: create_cloned_volume id=%(id)s, " -"stato=%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Copia dei metadati da %(src_type)s %(src_id)s a %(vol_id)s." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Impossibile determinare quale endpoint Keystone utilizzare. Può essere " -"impostato sia nel catalogo del servizio che utilizzando l'opzione di " -"configurazione cinder.conf 'backup_swift_auth_url'." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Impossibile determinare quale endpoint Swift utilizzare. Può essere " -"impostato sia nel catalogo del servizio che utilizzando l'opzione di " -"configurazione cinder.conf 'backup_swift_url'." - -msgid "Could not find DISCO wsdl file." -msgstr "Impossibile trovare il file wsdl DISCO." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "Impossibile trovare l'id cluster GPFS: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "Impossibile trovare il dispositivo di file system GPFS: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Impossibile trovare la configurazione in %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Impossibile trovare l'esportazione iSCSI per il volume %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "" -"Impossibile trovare la destinazione iscsi per il volume: %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"Impossibile trovare la chiave nell'output del comando %(cmd)s: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Impossibile trovare il parametro %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "Impossibile trovare la destinazione %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "Impossibile trovare il volume parent per l'istantanea '%s' sull'array." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "Impossibile trovare l'istantanea univoca %(snap)s sul volume %(vol)s." - -msgid "Could not get system name." -msgstr "Impossibile ottenere il nome sistema." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "Impossibile caricare l'app paste '%(name)s' in %(path)s" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"Impossibile leggere le informazioni per l'istantanea %(name)s. Codice: " -"%(code)s. Motivo: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "" -"Non è stato possibile ripristinare il file di configurazione %(file_path)s:" -"%(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "" -"Non è stato possibile salvare la configurazione in %(file_path)s:%(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "Impossibile avviare l'istantanea del gruppo di coerenza %s." - -#, python-format -msgid "Counter %s not found" -msgstr "Contatore %s non trovato" - -msgid "Create QoS policy error." -msgstr "Errore di creazione della politica QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Creazione del backup interrotta, lo stato del backup previsto è " -"%(expected_status)s ma è stato ricevuto %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Creazione del backup interrotta, lo stato del volume previsto è " -"%(expected_status)s ma è stato ricevuto %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Creazione esportazione per il volume non riuscita." - -msgid "Create hostgroup error." -msgstr "Errore durante la creazione del gruppo host." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Errore di creazione hypermetro. %s." - -msgid "Create lun error." -msgstr "Errore di creazione lun. " - -msgid "Create lun migration error." -msgstr "Errore di creazione migrazione lun. " - -msgid "Create luncopy error." -msgstr "Errore durante la creazione di luncopy." - -msgid "Create lungroup error." -msgstr "Errore durante la creazione del gruppo lun." - -msgid "Create manager volume flow failed." -msgstr "Creazione flusso del volume del gestore non riuscita. " - -msgid "Create port group error." -msgstr "Errore di creazione gruppo di porte." - -msgid "Create replication error." -msgstr "Errore di creazione replica." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "Creazione coppia di replica non riuscita. Errore: %s." - -msgid "Create snapshot error." -msgstr "Errore durante la creazione dell'istantanea." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Errore di creazione del volume. Perché %s. " - -msgid "Create volume failed." -msgstr "Creazione del volume non riuscita." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"La creazione di un gruppo di coerenza da un'origine non è attualmente " -"supportata." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Creazione e attivazione delle serie di zone non riuscita: (Zone set=" -"%(cfg_name)s errore=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Creazione e attivazione delle serie di zone non riuscita: (Zone set=" -"%(zoneset)s errore=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "Creazione degli utilizzi da %(begin_period)s fino a %(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "L'host corrente non fa parte del dominio HGST. " - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Host corrente non valido per il volume %(id)s con tipo %(type)s, migrazione " -"non consentita" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"L'host attualmente associato per il volume %(vol)s è in un gruppo host non " -"supportato con%(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"Errore driver DRBDmanage: prevista chiave \"%s\" non presente nella " -"risposta, versione DRBDmanage errata?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Errore di impostazione del driver DRBDmanage: alcune librerie richieste " -"(dbus, drbdmanage.*) non trovate." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage prevedeva una risorsa (\"%(res)s\"), ottenute %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"Timeout DRBDmanage in attesa di un nuovo volume dopo il ripristino " -"dell'istantanea; risorsa \"%(res)s\", volume \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"Timeout DRBDmanage in attesa della creazione dell'istantanea; risorsa " -"\"%(res)s\", istantanea \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"Timeout DRBDmanage in attesa della creazione del volume; risorsa \"%(res)s" -"\", volume \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"Timeout DRBDmanage in attesa della dimensione del volume; ID volume \"%(id)s" -"\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "Impossibile determinare la versione API di Data ONTAP." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"I dati ONTAP che operano in modalità 7 non supportano i gruppi di politiche " -"QoS. " - -msgid "Database schema downgrade is not allowed." -msgstr "Il downgrade dello schema di database non è consentito." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "Il dataset %s non è condiviso nell'applicazione Nexenta Store" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Gruppo di dataset %s non trovato su Nexenta SA" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup è un tipo di acquisizione valido, ma richiede WSAPI versione " -"'%(dedup_version)s'; è installata la versione '%(version)s'." - -msgid "Dedup luns cannot be extended" -msgstr "Impossibile estendere le lun dedup" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Quota predefinita per la risorsa: %(res)s è impostato dall'indicatore quota " -"predefinita: quota_%(res)s, è ora obsoleta. Utilizzare la classe di quota " -"predefinita per la quota predefinita." - -msgid "Default volume type can not be found." -msgstr "Impossibile trovare il tipo di volume predefinito." - -msgid "Delete LUNcopy error." -msgstr "Errore di eliminazione LUNcopy. " - -msgid "Delete QoS policy error." -msgstr "Errore di eliminazione della politica QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "Errore di eliminazione lun associata dal gruppo lun. " - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Eliminazione del backup interrotta, il servizio di backup attualmente " -"configurato [%(configured_service)s] non è il servizio di backup utilizzato " -"per creare questo backup [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "Eliminazione del gruppo di coerenza non riuscita." - -msgid "Delete hostgroup error." -msgstr "Errore di eliminazione gruppo host. " - -msgid "Delete hostgroup from mapping view error." -msgstr "Errore di eliminazione gruppo host dalla vista associazione." - -msgid "Delete lun error." -msgstr "Errore di eliminazione lun. " - -msgid "Delete lun migration error." -msgstr "Errore di eliminazione migrazione lun. " - -msgid "Delete lungroup error." -msgstr "Errore di eliminazione gruppo lun. " - -msgid "Delete lungroup from mapping view error." -msgstr "Errore di eliminazione gruppo lun dalla vista associazione." - -msgid "Delete mapping view error." -msgstr "Errore di eliminazione vista associazione." - -msgid "Delete port group error." -msgstr "Errore di eliminazione gruppo di porte." - -msgid "Delete portgroup from mapping view error." -msgstr "Errore di eliminazione gruppo porte dalla vista associazione." - -msgid "Delete snapshot error." -msgstr "Errore di eliminazione istantanea." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "" -"Eliminazione dell'istantanea del volume non supportata nello stato: %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup interrotta, lo stato del backup previsto è %(expected_status)s " -"ma è stato ricevuto %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Eliminazione del volume dal database, rpc ignorato." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Eliminazione zone non riuscita: (comando=%(cmd)s errore=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Dell API 2.1 o successiva richiesta per il supporto del gruppo di coerenza" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"Replica dell'errore di configurazione del driver Dell Cinder non supportata " -"con la connessione diretta." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Dispositivo di replica dell'errore di configurazione del driver Dell Cinder " -"%s non trovato" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource è una funzionalità solo di admin" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "La destinazione ha migration_status %(stat)s, è previsto %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Volume di destinazione non migrazione intermedia." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Scollegamento volume non riuscito: più di un collegamento, ma nessun " -"attachment_id fornito. " - -msgid "Detach volume from instance and then try again." -msgstr "Scollegare il volume dall'istanza e riprovare." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "È stato rilevato più di un volume con il nome %(vol_name)s" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "La colonna prevista non è stata trovata in %(fun)s: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "Impossibile trovare la chiave prevista %(key)s in %(fun)s: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "La causa disabilitata contiene caratteri non validi o è troppo lunga" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "Impossibile trovare il dominio con nome %s. " - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Rilevato un cluster GPFS di livello inferiore. La funzione di clonazione " -"GPFS non è abilitata nel livello daemon del cluster %(cur)s - deve essere " -"almeno di livello %(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "" -"Inizializzazione connessione del driver non riuscita (errore: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Il driver deve implementare initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Il driver ha decodificato correttamente i dati di backup importati, ma ci " -"sonocampi mancanti (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"API proxy E-series versione %(current_version)s non supporta la serie " -"completa dispecifiche supplementari SSC. La versione proxy deve essere " -"almeno %(min_version)s. " - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Eccezione EMC VNX Cinder Driver CLI: %(cmd)s (Codice di ritorno: %(rc)s) " -"(Output: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Codice di ritorno: " -"%(rc)s) (Output: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword devono esserevalori " -"validi. " - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"È necessario fornire 'cgsnapshot_id' o 'source_cgid' per creare il gruppo di " -"congruenza %(name)s dall'origine." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO: %(slo)s o carico di lavoro %(workload)s non validi. Esaminare la " -"precedente istruzione di errore per i valori validi." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Sono richiesti sia hitachi_serial_number che hitachi_unit_name." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "Element Composition Service non trovato in %(storageSystemName)s." - -msgid "Enables QoS." -msgstr "Abilita il QoS. " - -msgid "Enables compression." -msgstr "Abilita la compressione. " - -msgid "Enables replication." -msgstr "Abilita la replica. " - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Verificare che configfs sia montato in /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante l'aggiunta dell'iniziatore: %(initiator)s su " -"groupInitiatorGroup: %(initiatorgroup)s Codice di ritorno: %(ret.status)d " -"Messaggio: %(ret.data)s ." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"errore durante l'aggiunta a TargetGroup: %(targetgroup)s withIQN: " -"%(iqn)sCodice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Errore di collegamento del volume %(vol)s. " - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante la clonazione dell'istantanea: %(snapshot)s sul volume: " -"%(lun)s del pool: %(pool)s Progetto: %(project)s Progetto clone: " -"%(clone_proj)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Errore durante la creazione del volume clonato: %(cloneName)s Codice di " -"ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore creazione volume clonato: Volume: %(cloneName)s Volume di origine: " -"%(sourceName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore nella creazione del gruppo: %(groupName)s. Codice di ritorno: " -"%(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Errore nella creazione della vista di mascheramento: %(groupName)s. Codice " -"di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore creazione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " -"Errore: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore creazione volume: %(volumename)s. Codice di ritorno: %(rc)lu. " -"Errore: %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore CreateGroupReplica: origine: %(source)s destinazione: %(target)s. " -"Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante la creazione dell'iniziatore: %(initiator)s sull'alias: " -"%(alias)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante la creazione del progetto: %(project)s sul pool: %(pool)s " -"Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante la creazione delle proprietà: %(property)s Tipo: %(type)s " -"Descrizione: %(description)s Codice di ritorno: %(ret.status)d Messaggio: " -"%(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la creazione della condivisione: %(name)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante la creazione dell'istantanea: %(snapshot)s sul volume: " -"%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante la creazione dell'istantanea: %(snapshot)s sulla " -"condivisione: %(share)s nel pool: %(pool)s Progetto: %(project)s Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Errore durante la creazione della destinazione: %(alias)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante la creazione di TargetGroup: %(targetgroup)s con IQN: " -"%(iqn)sCodice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Errore durante la creazione del volume: %(lun)s Dimensione: %(size)s Codice " -"di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore durante la creazione del nuovo volume composito; codice di ritorno: " -"%(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante la creazione dell'azione di replica su: pool: %(pool)s " -"Progetto: %(proj)s volume: %(vol)s per destinazione: %(tgt)s e pool: " -"%(tgt_pool)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "" -"Errore durante la creazione di un volume non collegato in un'operazione di " -"estensione." - -msgid "Error Creating unbound volume." -msgstr "Errore durante la creazione del volume non collegato." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore eliminazione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " -"Errore: %(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Errore durante l'eliminazione del gruppo: %(storageGroupName)s. Codice di " -"ritorno: %(rc)lu. Errore: %(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Errore durante l'eliminazione del gruppo di iniziatori: " -"%(initiatorGroupName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'eliminazione dell'istantanea: %(snapshot)s sulla " -"condivisione: %(share)s nel pool: %(pool)s Progetto: %(project)s Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'eliminazione dell'istantanea: %(snapshot)s sul volume: " -"%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Errore durante l'eliminazione del volume: %(lun)s dal pool: %(pool)s, " -"Progetto: %(project)s. Codice di ritorno: %(ret.status)d, Messaggio: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'eliminazione del progetto: %(project)s sul pool: %(pool)s " -"Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Errore durante l'eliminazione dell'azione di replica: %(id)s Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore estensione volume: %(volumeName)s. Codice di ritorno: %(rc)lu. " -"Errore: %(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante l'acquisizione degli iniziatori: InitiatorGroup:" -"%(initiatorgroup)s Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Errore durante il richiamo delle statistiche del pool: Pool: %(pool)s Codice " -"di ritorno: %(status)d Messaggio: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'acquisizione delle statistiche del progetto: Pool: %(pool)s " -"Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'acquisizione della condivisione: %(share)s nel pool: " -"%(pool)s Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'acquisizione dell'istantanea: %(snapshot)s sul volume: " -"%(lun)s nel pool: %(pool)s Progetto: %(project)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Errore durante l'acquisizione della destinazione: %(alias)s Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'acquisizione del volume: %(lun)s nel pool: %(pool)s " -"Progetto: %(project)s Codice di ritorno: %(ret.status)d Messaggio: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Errore durante la migrazione del volume da un pool ad un altro. Codice di " -"ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Errore durante la modifica della vista di mascheramento: %(groupName)s. " -"Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Proprietà del pool degli errori: Il pool %(pool)s non è di proprietà di " -"%(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante l'impostazione props Props: %(props)s sul volume: %(lun)s del " -"pool: %(pool)s Progetto: %(project)s Codice di ritorno: %(ret.status)d " -"Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore durante la chiusura della sessione di migrazione. Codice di ritorno: " -"%(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la verifica dell'iniziatore: %(iqn)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la verifica del pool: %(pool)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"errore durante la verifica del progetto: %(project)s nel pool: %(pool)s " -"Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la verifica del servizio: %(service)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la verifica della destinazione: %(alias)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Errore durante la verifica della condivisione: %(share)s sul Progetto:" -"%(project)s e Pool: %(pool)s Codice di ritorno: %(ret.status)d Messaggio: " -"%(ret.data)s ." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Errore durante l'aggiunta del volume: %(volumeName)s con percorso istanza: " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Errore durante l'aggiunta dell'iniziatore al gruppo: %(groupName)s. Codice " -"di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "" -"Errore durante l'aggiunta del volume al volume composito. L'errore è: " -"%(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"Errore durante l'aggiunta del volume %(volumename)s al volume di base di " -"destinazione." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Errore durante l'associazione del gruppo storage: %(storageGroupName)s. alla " -"politica FAST: %(fastPolicyName)s con descrizione dell'errore: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Errore durante l'interruzione della relazione del clone: Nome " -"sincronizzazione: %(syncName)s Codice di ritorno: %(rc)lu. Errore: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Errore durante la connessione al cluster ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Errore durante la connessione mediante ssh: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Errore durante la creazione del volume: %s. " - -msgid "Error deleting replay profile." -msgstr "Errore durante l'eliminazione del profilo di risposta. " - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Errore durante l'eliminazione del volume %(ssn)s: %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Errore durante l'eliminazione del volume %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Errore durante l'analisi del programma di valutazione: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Errore durante la modifica della condivisione: %(share)s nel pool: %(pool)s " -"Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Errore durante l'abilitazione di iSER per il portale di rete: assicurarsi " -"che RDMA sia supportato sulla porta iSCSI %(port)d sull'ip %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "" -"Si è verificato un errore durante la ripulitura di un collegamento non " -"riuscito: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Errore nell'esecuzione dell'API CloudByte [%(cmd)s], Errore:%(err)s." - -msgid "Error executing EQL command" -msgstr "Errore durante l'esecuzione del comando EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Errore di esecuzione comando tramite ssh: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Errore durante l'estensione del volume %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Errore durante l'estensione del volume: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Errore nella ricerca di %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Errore nella ricerca di %s." - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore durante il richiamo di ReplicationSettingData. Codice di ritorno: " -"%(rc)lu. Errore: %(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Errore nel richiamo dei dettagli della versione del dispositivo. Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "Errore nel recupero dell'id dominio dal nome %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "Errore nel recupero dell'id dominio dal nome %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Errore durante l'acquisizione dei gruppi iniziatori." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Errore nel recupero dell'id pool dal nome %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Errore nel recupero dell'id pool dal nome %(pool_name)s: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Errore nel richiamo dell'azione di replica: %(id)s. Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Errore nel richiamo dei dettagli dell'origine di replica. Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Errore nel richiamo dei dettagli della destinazione di replica. Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante l'acquisizione della versione: svc: %(svc)s.Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Errore nell'operazione [%(operation)s] per il volume [%(cb_volume)s] nella " -"memoria CloudByte: [%(cb_error)s], codice di errore: [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Errore nella risposta dell'API SolidFire: data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "Errore in space-create per %(space)s di dimensione %(size)d GB" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"Errore in space-extend per il volume %(space)s con %(size)d GB aggiuntivi" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Errore durante la gestione del volume: %s. " - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Errore durante la sincronizzazione della replica di modifica: %(sv)s " -"operazione: %(operation)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Errore durante la modifica del servizio: %(service)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante lo spostamento del volume: %(vol)s dal progetto di origine: " -"%(src)s al progetto di destinazione: %(tgt)s Codice di ritorno: " -"%(ret.status)d Messaggio: %(ret.data)s ." - -msgid "Error not a KeyError." -msgstr "L'errore non è un KeyError." - -msgid "Error not a TypeError." -msgstr "L'errore non è un TypeError." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Si è verificato un errore durante la creazione di cgsnapshot %s." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Si è verificato un errore durante l'eliminazione di cgsnapshot %s." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "" -"Si è verificato un errore durante l'aggiornamento del gruppo di coerenza %s." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Errore durante la ridenominazione del volume %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Risposta errore: %s" - -msgid "Error retrieving volume size" -msgstr "Errore durante il richiamo della dimensione del volume " - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante l'invio dell'aggiornamento di replica per l'id azione: " -"%(id)s. Codice di ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Errore durante l'invio dell'aggiornamento di replica. Errore restituito: " -"%(err)s. Azione: %(id)s. " - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore durante l'impostazione dell'eredità di di replica su %(set)s per il " -"volume: %(vol)s progetto %(project)s Codice di ritorno: %(ret.status)d " -"Messaggio: %(ret.data)s ." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Errore staccando il pacchetto: %(package)s dall'origine: %(src)s Codice di " -"ritorno: %(ret.status)d Messaggio: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "" -"Errore durante lo scollegamento del volume %(vol)s dal pool. %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Errore durante l'autenticazione con lo switch: %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Errore durante la modifica del contesto VF %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Errore durante il controllo della versione firmware %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Errore durante il controllo dello stato della transazione: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "" -"Errore durante il controllo per verificare se VF è disponibile per la " -"gestione di %s." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Errore durante la connessione dello switch %(switch_id)s con protocollo " -"%(protocol)s. Errore: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Errore durante la creazione del token di autenticazione: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"Errore durante la creazione dell'istantanea [status] %(stat)s - [result] " -"%(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Errore durante la creazione del volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Errore durante l'eliminazione dell'istantanea [status] %(stat)s - [result] " -"%(res)s" - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Errore durante l'eliminazione del volume [status] %(stat)s - [result] " -"%(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"Errore durante l'estensione del volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "" -"Errore durante il richiamo dei dettagli %(op)s, codice restituito: " -"%(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Errore durante il ricevimento dei dati tramite ssh: (comando=%(cmd)s errore=" -"%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Errore durante il richiamo delle informazioni disco [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Errore durante il richiamo del valore nvp: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Errore durante il richiamo delle informazioni sulla sessione %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Errore durante l'analisi dei dati: %s." - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"Errore durante la query della pagina %(url)s sullo switch, motivo %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Errore durante la rimozione di zone e cfgs nella stringa di zona. " -"%(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Errore durante la richiesta dell'API di %(service)s." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Errore durante l'esecuzione di zoning CLI: (comando=%(cmd)s errore=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Errore durante l'aggiornamento di nuove zone e cfgs nella stringa di zona. " -"Errore %(description)s." - -msgid "Error writing field to database" -msgstr "Errore durante la scrittura del campo nel database" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Errore [%(stat)s - %(res)s] durante il richiamo dell'id volume." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Errore [%(stat)s - %(res)s] durante il ripristino dell'istantanea " -"[%(snap_id)s] nel volume [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"Errore [status] %(stat)s - [result] %(res)s] durante il richiamo dell'id " -"volume." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Superamento numero max tentativi di pianificazione %(max_attempts)d per il " -"volume %(volume_id)s" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Superato il limite di istantanee per volume" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Eccezione durante l'aggiunta del volume meta al volume di destinazione " -"%(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Eccezione durante la creazione della replica elemento. Nome clone: " -"%(cloneName)s Nome origine: %(sourceName)s Spec supplementari: " -"%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Eccezione in _select_ds_for_volume: %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "Eccezione durante la formazione della stringa di zona: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Eccezione: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Previsto esattamente un solo nodo chiamato \"%s\"" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Previsto numero intero per node_count, restituito svcinfo lsiogrp: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"Non è previsto alcun output dal comando CLI %(cmd)s, è stato ricevuto " -"%(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Prevista restituzione vdisk singolo da lsvdisk durante il filtro su " -"vdisk_UID. %(count)s restituito." - -#, python-format -msgid "Expected volume size was %d" -msgstr "La dimensione del volume prevista era %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Esportazione del backup interrotta, lo stato del backup previsto è " -"%(expected_status)s ma è stato ricevuto %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Esportazione del record interrotta; il servizio di backup attualmente " -"configurato [%(configured_service)s] non è il servizio di backup utilizzato " -"per creare questo backup [%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Errore di estensione del volume. " - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Estensione volume è supportata solo per questo driver quando non esiste " -"nessuna istantanea." - -msgid "Extend volume not implemented" -msgstr "Estensione volume non implementata" - -msgid "FAST is not supported on this array." -msgstr "FAST non supportato su questo array." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC è il protocollo ma i wwpn non sono forniti da OpenStack." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Impossibile annullare l'assegnazione %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "Impossibile creare il volume cache %(volume)s. Errore: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Impossibile aggiungere la connessione per fabric=%(fabric)s: Errore:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "cgsnapshot non riuscito" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "Impossibile creare l'istantanea per il gruppo: %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"Impossibile creare l'istantanea per il volume %(volname)s: %(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "Impossibile ottenere la zona attiva impostata da fabric %s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Impossibile acquisire i dettagli per il pool %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Impossibile rimuovere la connessione per fabric=%(fabric)s: Errore:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Errore di estensione del volume %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Impossibile accedere a 3PAR (%(url)s) perché %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Impossibile accedere alla configurazione di zonatura attiva. " - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Impossibile accedere allo stato zoneset:%s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Impossibile acquisire un blocco risorsa. (serial: %(serial)s, inst: " -"%(inst)s, ret: %(ret)s, stderr: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Impossibile aggiungere l'unità logica." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Impossibile aggiungere il volume %(volumeName)s al gruppo di coerenza " -"%(cgName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "Impossibile aggiungere la configurazione di zonatura. " - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Impossibile assegnare l'iniziatore iSCSI IQN. (porta: %(port)s, motivo: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Impossibile associare qos_specs: %(specs_id)s con il tipo %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Impossibile collegare la destinazione per il volume %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Impossibile eseguire il backup dei metadati del volume - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Impossibile eseguire il backup sui metadati di volume - Oggetto di backup " -"dei metadati 'backup.%s.meta' esiste già" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Impossibile clonare il volume dall'istantanea %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Impossibile connettersi a %(vendor_name)s Array %(host)s: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Impossibile collegarsi all'API Dell REST" - -msgid "Failed to connect to array" -msgstr "Impossibile stabilire una connessione all'array" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"Impossibile connettersi al daemon sheep, indirizzo: %(addr)s, porta: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Impossibile copiare l'immagine nel volume: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Impossibile copiare i metadati nel volume: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "" -"Impossibile copiare il volume, dispositivo di destinazione non disponibile. " - -msgid "Failed to copy volume, source device unavailable." -msgstr "" -"Impossibile copiare il volume, dispositivo di origine non disponibile. " - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "Impossibile creare il GC %(cgName)s dall'istantanea %(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "Impossibile creare IG, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Impossibile creare il gruppo volume: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Impossibile creare un file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "Impossibile creare un'istantanea temporanea per il volume %s. " - -msgid "Failed to create api volume flow." -msgstr "Impossibile creare il flusso del volume api." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "" -"Impossibile creare l'istantanea del gruppo di coerenza %(id)s a causa di " -"%(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "Impossibile creare il gruppo di coerenza %(id)s a causa di %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Impossibile creare il gruppo di coerenza %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Impossibile creare il gruppo di coerenza %s perché il gruppo di coerenza VNX " -"non può accettare LUN compresse come membri." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Impossibile creare il gruppo di coerenza: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "" -"Impossibile creare il gruppo di coerenza: %(cgid)s. Errore: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Impossibile creare il gruppo di coerenza: %(consistencyGroupName)s Codice " -"di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Impossibile creare l'id hardware su %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "" -"Impossibile creare l'host: %(name)s. Controllare se esiste nell'array. " - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Impossibile creare il gruppo host: %(name)s. Controllare se esiste " -"nell'array. " - -msgid "Failed to create iqn." -msgstr "Impossibile creare l'iqn." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Impossibile creare la destinazione iscsi per il volume %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Impossibile creare il flusso di gestione esistente." - -msgid "Failed to create manage_existing flow." -msgstr "Impossibile creare il flusso manage_existing." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "Impossibile creare la mappa su mcs, nessun canale può associarsi. " - -msgid "Failed to create map." -msgstr "Impossibile creare la mappa." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Impossibile creare i metadati per il volume: %(reason)s" - -msgid "Failed to create partition." -msgstr "Impossibile creare la partizione. " - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "" -"Impossibile creare qos_specs: %(name)s con le specifiche %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Impossibile creare la replica. " - -msgid "Failed to create scheduler manager volume flow" -msgstr "Impossibile creare il flusso del volume del gestore scheduler" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Impossibile creare istantanea %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "Impossibile creare l'istantanea per cg: %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Impossibile creare l'istantanea per il volume %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"impossibile creare la politica di istantanea sul volume %(vol)s: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"Impossibile creare l'area risorsa istantanea sul volume %(vol)s:%(res)s. " - -msgid "Failed to create snapshot." -msgstr "Impossibile creare l'istantanea." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Impossibile creare l'istantanea. Non sono state trovate le informazioni sul " -"volume CloudByte per il volume OpenStack [%s]." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "Impossibile creare il connettore south bound per %s." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "Impossibile creare il gruppo di archiviazione %(storageGroupName)s." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Creazione del thin pool non riuscita, messaggio di errore: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Impossibile creare il volume %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Impossibile eliminare SI per volume_id: %(volume_id)s perché ha coppia. " - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Impossibile eliminare un'unità logica. (LDEV: %(ldev)s, motivo: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "" -"Impossibile eliminare l'istantanea del gruppo di coerenza %(id)s a causa di " -"%(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "" -"Impossibile eliminare il gruppo di coerenza %(id)s a causa di %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Impossibile eliminare il gruppo di coerenza: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Impossibile eliminare il gruppo di coerenza: %(consistencyGroupName)s Codice " -"di ritorno: %(rc)lu. Errore: %(error)s." - -msgid "Failed to delete device." -msgstr "Impossibile eliminare il dispositivo." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Impossibile eliminare il fileset per il gruppo di coerenza %(cgname)s. " -"Errore: %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Impossibile eliminare l'iqn. " - -msgid "Failed to delete map." -msgstr "Impossibile eliminare la mappa. " - -msgid "Failed to delete partition." -msgstr "Impossibile eliminare la partizione. " - -msgid "Failed to delete replica." -msgstr "Impossibile eliminare la replica. " - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Impossibile eliminare istantanea %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "Impossibile eliminare l'istantanea per cg: %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"Impossibile eliminare l'istantanea per snapshot_id: %s perché ha coppia. " - -msgid "Failed to delete snapshot." -msgstr "Impossibile eliminare l'istantanea." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Impossibile eliminare il volume %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Impossibile eliminare il volume per volume_id: %(volume_id)s perché ha " -"coppia. " - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Impossibile scollegare la destinazione iSCSI del volume %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Impossibile determinare la configurazione API blockbridge" - -msgid "Failed to disassociate qos specs." -msgstr "Impossibile annullare l'associazione delle specifiche (specs) qos." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"Impossibile annullare l'associazione di qos_specs: %(specs_id)s con il tipo " -"%(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Impossibile garantire l'area risorsa istantanea, impossibile individuare il " -"volume per l'ID %s " - -msgid "Failed to establish connection with Coho cluster" -msgstr "Impossibile stabilire la connessione con il cluster Coho" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Impossibile eseguire API CloudByte [%(cmd)s]. Stato Http:%(status)s, Errore: " -"%(error)s." - -msgid "Failed to execute common command." -msgstr "Impossibile eseguire il comando comune. " - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Impossibile eseguire l'esportazione per il volume: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Impossibile estendere il volume %(name)s, Messaggio di errore: %(msg)s. " - -msgid "Failed to find QoSnode" -msgstr "Impossibile trovare QoSnode" - -msgid "Failed to find Storage Center" -msgstr "Storage Center non trovato" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "Impossibile trovare una copia del disco virtuale nel pool previsto." - -msgid "Failed to find account for volume." -msgstr "Impossibile trovare l'account per il volume. " - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"Impossibile rilevare il fileset per il percorso %(path)s, output del " -"comando: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "Impossibile trovare l'istantanea del gruppo denominata: %s" - -#, python-format -msgid "Failed to find host %s." -msgstr "Impossibile trovare l'host %s." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "" -"Impossibile trovare il gruppo di iniziatori iSCSI contenente %(initiator)s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Non è stato possibile ottenere i dettagli dell'account CloudByte [%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Impossibile ottenere i dettagli della destinazione LUN per LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "Impossibile ottenere i dettagli della destinazione LUN per la LUN %s" - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Impossibile acquisire l'elenco di destinazione LUN per LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Impossibile ottenere l'ID partizione per il volume %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"Impossibile ottenere l'ID istantanea raid dall'istantanea %(snapshot_id)s. " - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"Impossibile ottenere l'ID istantanea raid dall'istantanea %(snapshot_id)s. " - -msgid "Failed to get SplitMirror." -msgstr "Impossibile ottenere SplitMirror." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Impossibile ottenere la risorsa di memoria. Il sistema tenterà di acquisire " -"di nuovo la risorsa di memoria. (risorsa: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "" -"Impossibile ottenere tutte le associazioni delle specifiche (specs) qos %s" - -msgid "Failed to get channel info." -msgstr "Impossibile ottenere le informazioni canale. " - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Impossibile acquisire il livello di codice (%s)." - -msgid "Failed to get device info." -msgstr "Impossibile ottenere le informazioni dispositivo. " - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "Impossibile ottenere il dominio poiché CPG (%s) non esiste nell'array." - -msgid "Failed to get image snapshots." -msgstr "Impossibile ottenere le istantanee dell'immagine." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"Impossibile richiamare l'ip sul canale %(channel_id)s con il volume: " -"%(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Impossibile ottenere le informazioni iqn. " - -msgid "Failed to get license info." -msgstr "Impossibile ottenere le informazioni licenza. " - -msgid "Failed to get lv info." -msgstr "Impossibile ottenere le informazioni lv. " - -msgid "Failed to get map info." -msgstr "Impossibile ottenere le informazioni mappa. " - -msgid "Failed to get migration task." -msgstr "Impossibile ottenere l'attività di migrazione." - -msgid "Failed to get model update from clone" -msgstr "Impossibile ottenere l'aggiornamento del modello dal clone" - -msgid "Failed to get name server info." -msgstr "Impossibile ottenere le informazioni sul server nomi. " - -msgid "Failed to get network info." -msgstr "Impossibile ottenere le informazioni rete. " - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Impossibile ottenere l'id nuova parte nel nuovo pool: %(pool_id)s. " - -msgid "Failed to get partition info." -msgstr "Impossibile ottenere le informazioni partizione. " - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Impossibile ottenere l'id pool con il volume %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"Impossibile ottenere le informazioni di copia remota per %(volume)s a causa " -"di %(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"Impossibile ottenere le informazioni di copia remota per %(volume)s. " -"Eccezione: %(err)s." - -msgid "Failed to get replica info." -msgstr "Impossibile ottenere le informazioni replica. " - -msgid "Failed to get show fcns database info." -msgstr "Impossibile visualizzare le informazioni sul database fcns. " - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Impossibile ottenere la dimensione del volume %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Impossibile ottenere l'istantanea per il volume %s." - -msgid "Failed to get snapshot info." -msgstr "Impossibile ottenere le informazioni istantanea. " - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Impossibile ottenere IQN di destinazione per LUN %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "Impossibile ottenere la LUN di destinazione di SplitMirror." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Impossibile ottenere il portale di destinazione per LUN %s" - -msgid "Failed to get targets" -msgstr "Impossibile ottenere le destinazioni" - -msgid "Failed to get wwn info." -msgstr "Impossibile ottenere le informazioni wwn. " - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Impossibile ottenere, creare o aggiungere il volume %(volumeName)s alla " -"vista di mascheramento %(maskingViewName)s. Il messaggio di errore ricevuto " -"è %(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Impossibile identificare il backend del volume." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Impossibile collegare il fileset per la condivisione %(cgname)s. Errore: " -"%(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Impossibile accedere a %s Array (accesso non valido?). " - -#, python-format -msgid "Failed to login for user %s." -msgstr "Impossibile eseguire l'accesso per l'utente %s. " - -msgid "Failed to login with all rest URLs." -msgstr "Impossibile accedere con tutti gli URL rest. " - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Impossibile effettuare la richiesta all'endpoint del cluster Datera a causa " -"del seguente motivo: %s" - -msgid "Failed to manage api volume flow." -msgstr "Impossibile gestire il flusso del volume api." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Impossibile gestire %(type)s %(name)s esistente, poiché la dimensione " -"%(size)s riportata non è un numero a virgola mobile." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Impossibile gestire il volume esistente %(name)s, a causa di un errore " -"durante l'acquisizione della dimensione del volume." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Impossibile gestire il volume esistente %(name)s, perché l'operazione di " -"ridenominazione ha avuto esito negativo: messaggio di errore: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Impossibile gestire il volume esistente %(name)s, poiché la dimensione " -"%(size)s riportata non è un numero a virgola mobile." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"Impossibile gestire il volume esistente a causa di una mancata " -"corrispondenza del gruppo I/O. Il gruppo I/O del volume da gestire è " -"%(vdisk_iogrp)s. Il gruppo I/O del tipo scelto è %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"Impossibile gestire il volume esistente in quanto il pool del volume da " -"gestire non corrisponde al pool del backend. Il pool del volume da gestire è " -"%(vdisk_pool)s. Il pool del backend è %(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"Impossibile gestire il volume esistente in quanto il volume da gestire è " -"compress, ma il tipo di volume scelto non è compress." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"Impossibile gestire il volume esistente in quanto il volume da gestire non è " -"compress, ma il tipo di volume scelto è compress." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"Impossibile gestire il volume esistente in quanto il volume da gestire non " -"si trova in un gruppo I/O valido." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"Impossibile gestire il volume esistente in quanto il volume da gestire è " -"thick ma il tipo di volume scelto è thin." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"Impossibile gestire il volume esistente in quanto il volume da gestire è " -"thin, ma il tipo di volume scelto è thick." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Impossibile gestire il volume %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Impossibile associare un'unità logica. (LDEV: %(ldev)s, LUN: %(lun)s, porta: " -"%(port)s, id: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Impossibile migrare il volume per la prima volta." - -msgid "Failed to migrate volume for the second time." -msgstr "Impossibile migrare il volume per la seconda volta." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "Impossibile spostare l'associazione LUN. Codice di ritorno: %s " - -#, python-format -msgid "Failed to move volume %s." -msgstr "Impossibile spostare il volume %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Impossibile aprire un file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Impossibile analizzare l'output CLI:\n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Impossibile analizzare l'opzione di configurazione 'keystone_catalog_info', " -"deve avere il formato ::" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Impossibile analizzare l'opzione di configurazione 'swift_catalog_info', " -"deve avere il formato ::" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Impossibile eseguire una correzione a pagina zero. (LDEV: %(ldev)s, motivo: " -"%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "" -"Impossibile rimuovere l'esportazione per il volume %(volume)s: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "" -"Impossibile rimuovere la destinazione iscsi per il volume %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Impossibile rimuovere il volume %(volumeName)s dal gruppo di coerenza " -"%(cgName)s. Codice di ritorno: %(rc)lu. Errore: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "Impossibile rimuovere il volume %(volumeName)s dal GM predefinito. " - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"Impossibile rimuovere il volume %(volumeName)s da SG predefinito: " -"%(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"Impossibile rimuovere: %(volumename)s. dal gruppo storage predefinito per la " -"politica FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Impossibile ridenominare il volume logico %(name)s; il messaggio di errore " -"è: %(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Impossibile ripristinare la configurazione di zona attiva %s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"Impossibile impostare l'autenticazione CHAP per la destinazione IQN %(iqn)s. " -"Dettagli: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Impossibile impostare QoS per il volume esistente %(name)s, Messaggio di " -"errore: %(msg)s. " - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "" -"Impossibile impostare l'attributo 'Utente in entrata' per la destinazione " -"SCST." - -msgid "Failed to set partition." -msgstr "Impossibile impostare la partizione. " - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Impossibile impostare le autorizzazioni per il gruppo di coerenza " -"%(cgname)s. Errore: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Impossibile specificare un'unità logica per il volume %(volume_id)s per cui " -"annullare l'associazione." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Impossibile specificare un'unità logica da eliminare. (metodo: %(method)s, " -"id: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Impossibile terminare la sessione di migrazione." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "Impossibile scollegare il volume %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Impossibile scollegare la fileset per il gruppo di coerenza %(cgname)s. " -"Errore: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Impossibile annullare l'associazione di un'unità logica. (LDEV: %(ldev)s, " -"motivo: %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Impossibile aggiornare il gruppo di coerenza: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Impossibile aggiornare i metadati per il volume: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Impossibile aggiornare o eliminare la configurazione di zona" - -msgid "Failed to update or delete zoning configuration." -msgstr "Impossibile aggiornare o eliminare la configurazione di zona." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Impossibile aggiornare qos_specs: %(specs_id)s con le specifiche " -"%(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "" -"Impossibile aggiornare l'utilizzo della quota durante la riscrittura del " -"volume." - -msgid "Failed to update snapshot." -msgstr "Impossibile aggiornare l'istantanea." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Impossibile aggiornare i metadati del volume %(vol_id)s utilizzando i " -"metadati %(src_type)s %(src_id)s forniti" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Errore nella creazione del volume %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Errore durante l'acquisizione delle informazioni sulla LUN per %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Errore durante lo spostamento della nuova LUN clonata in %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Errore di trasferimento della LUN %s in tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "" -"Fexvisor non è riuscito ad aggiungere il volume %(id)s a causa di %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor non è riuscito ad unirsi al volume %(vol)s nel gruppo %(group)s a " -"causa di %(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor non è riuscito a rimuovere il volume %(vol)s nel gruppo %(group)s a " -"causa di %(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "" -"Fexvisor non è riuscito a rimuovere il volume %(id)s a causa di %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Errore di controllo SAN di Fibre Channel: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Operazione Fibre Channel Zone non riuscita: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Errore di controllo connessione di Fibre Channel: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Impossibile trovare il file %(file_path)s." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Il file %(path)s ha un file di backup %(bfile)s non valido, operazione " -"interrotta." - -#, python-format -msgid "File already exists at %s." -msgstr "Il file già esiste in %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "Il file già esiste in: %s" - -msgid "Find host in hostgroup error." -msgstr "Errore di rilevamento host nel gruppo host." - -msgid "Find host lun id error." -msgstr "Errore di rilevamento id lun host." - -msgid "Find lun group from mapping view error." -msgstr "Errore di rilevamento gruppo lun dalla vista associazione." - -msgid "Find mapping view error." -msgstr "Errore di rilevamento vista associazione." - -msgid "Find portgroup error." -msgstr "Errore di rilevamento gruppo porte." - -msgid "Find portgroup from mapping view error." -msgstr "Errore di rilevamento gruppo porte dalla vista associazione." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"La politica della cache flash richiede WSAPI versione '%(fcache_version)s', " -"è installata la versione '%(version)s'." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor assegnazione volume non riuscita: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor errore di assegnazione volume:%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor non è riuscito a trovare l'istantanea del volume %(id)s nel gruppo " -"%(vgid)s istantanea %(vgsid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor non è riuscito a creare il volume.:%(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito ad eliminare il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "" -"Flexvisor non è riuscito ad aggiungere il volume %(id)s al gruppo %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor non è riuscito ad assegnare il volume %(id)s perché non riesce ad " -"eseguire la query dello stato utilizzando l'id evento." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito ad assegnare il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "" -"Flexvisor non è riuscito ad assegnare il volume %(volume)s iqn %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito a clonare il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito a clonare il volume (evento get non riuscito) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor non è riuscito a creare l'istantanea per il volume %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito a creare l'istantanea per il (evento get non " -"riuscito) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "" -"Flexvisor non è riuscito a creare il volume %(id)s nel gruppo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor non è riuscito a creare il volume %(volume)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor non è riuscito a creare il volume (evento get) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor non è riuscito a creare il volume da un'istantanea %(id)s: " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor non è riuscito a creare il volume da un'istantanea %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito a creare il volume da un'istantanea (evento get non " -"riuscito) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito ad eliminare l'istantanea %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito ad eliminare l'istantanea (evento get non " -"riuscito) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito ad eliminare il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor non è riuscito ad estendere il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor non è riuscito ad estendere il volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito ad estendere il volume (evento get non riuscito) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "" -"Flexvisor non è riuscito ad acquisire le informazioni sul pool %(id)s: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor non è riuscito ad ottenere l'ID istantanea del volume %(id)s dal " -"gruppo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "" -"Flexvisor non è riuscito a rimuovere il volume %(id)s dal gruppo %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor non è riuscito a generare il volume da un'istantanea %(id)s:" -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito a generare il volume da un'istantanea (evento get " -"non riuscito) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "" -"Flexvisor non è riuscito ad annullare l'assegnazione del volume %(id)s: " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor non è riuscito ad annullare l'assegnazione del volume (evento get) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "" -"Flexvisor non è riuscito ad annullare l'assegnazione del volume: %(id)s: " -"%(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor è riuscito a trovare l'origine del volume %(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Flexvisor errore annullando l'assegnazione del volume:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "" -"Il volume Flexvisor %(id)s non è riuscito ad unirsi al gruppo %(vgid)s." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "La cartella %s non esiste nell'applicazione Nexenta Store" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS non in esecuzione, stato: %s." - -msgid "Gateway VIP is not set" -msgstr "VIP Gateway non impostato " - -msgid "Get FC ports by port group error." -msgstr "Errore di richiamo porte FC per gruppo di porte." - -msgid "Get FC ports from array error." -msgstr "Errore di richiamo porte FC dall'array. " - -msgid "Get FC target wwpn error." -msgstr "Errore di richiamo wwpn di destinazione FC." - -msgid "Get HyperMetroPair error." -msgstr "Errore di richiamo HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Errore di richiamo gruppo LUN per vista." - -msgid "Get LUNcopy information error." -msgstr "Errore di richiamo delle informazioni LUNcopy. " - -msgid "Get QoS id by lun id error." -msgstr "Errore di richiamo id QoS tramite id lun. " - -msgid "Get QoS information error." -msgstr "Errore di richiamo delle informazioni QoS. " - -msgid "Get QoS policy error." -msgstr "Errore di richiamo della politica QoS." - -msgid "Get SplitMirror error." -msgstr "Errore di richiamo SplitMirror." - -msgid "Get active client failed." -msgstr "Acquisizione del client attivo non riuscita." - -msgid "Get array info error." -msgstr "Errore di richiamo informazioni sull'array." - -msgid "Get cache by name error." -msgstr "Errore di richiamo cache per nome. " - -msgid "Get connected free FC wwn error." -msgstr "Errore di acquisizione wwn FC libero connesso. " - -msgid "Get engines error." -msgstr "Errore di richiamo motori. " - -msgid "Get host initiators info failed." -msgstr "Richiamo info iniziatori host non riuscito. " - -msgid "Get hostgroup information error." -msgstr "Errore di richiamo delle informazioni gruppo host. " - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Errore di richiama info porta iSCSI, controllare l'IP di destinazione " -"configurato nelfile conf huawei." - -msgid "Get iSCSI port information error." -msgstr "Errore di richiamo informazioni porta iscsi." - -msgid "Get iSCSI target port error." -msgstr "Errore di richiamo porta di destinazione iscsi." - -msgid "Get lun id by name error." -msgstr "Errore di richiamo dell'ID lun per nome. " - -msgid "Get lun migration task error." -msgstr "Errore di richiamo attività di migrazione lun. " - -msgid "Get lungroup id by lun id error." -msgstr "Errore di richiamo id gruppo lun tramite id lun. " - -msgid "Get lungroup information error." -msgstr "Errore di richiamo delle informazioni gruppo lun. " - -msgid "Get migration task error." -msgstr "Errore di richiamo attività di migrazione. " - -msgid "Get pair failed." -msgstr "Richiamo coppia non riuscito." - -msgid "Get partition by name error." -msgstr "Errore di richiamo partizione per nome. " - -msgid "Get partition by partition id error." -msgstr "Errore di richiamo partizione per id partizione. " - -msgid "Get port group by view error." -msgstr "Errore di richiamo gruppo di porte per vista." - -msgid "Get port group error." -msgstr "Errore di richiamo gruppo di porte." - -msgid "Get port groups by port error." -msgstr "Errore di richiamo gruppi di porte per porta." - -msgid "Get ports by port group error." -msgstr "Errore di richiamo porte per gruppo di porte." - -msgid "Get remote device info failed." -msgstr "Richiamo informazioni dispositivo remoto non riuscito." - -msgid "Get remote devices error." -msgstr "Errore di richiamo dispositivi remoti. " - -msgid "Get smartcache by cache id error." -msgstr "Errore di richiamo smartcache per id cache. " - -msgid "Get snapshot error." -msgstr "Errore di richiamo istantanea." - -msgid "Get snapshot id error." -msgstr "Errore di richiamo id istantanea." - -msgid "Get target IP error." -msgstr "Errore di richiamo IP di destinazione. " - -msgid "Get target LUN of SplitMirror error." -msgstr "Errore di richiamo LUN di destinazione di SplitMirror." - -msgid "Get views by port group error." -msgstr "Errore di richiamo viste per gruppo di porte." - -msgid "Get volume by name error." -msgstr "Errore di richiamo volume per nome. " - -msgid "Get volume error." -msgstr "Errore di richiamo volume. " - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Impossibile aggiornare i metadati Glance, la chiave %(key)s esiste per l'id " -"volume %(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"Impossibile trovare i metadati di Glance per il volume/istantanea %(id)s." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Il file di configurazione Gluster in %(config)s non esiste" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Errore api di Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Errore di connessione di Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Errore oauth2 di Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "" -"Ricevute informazioni relative al percorso non corrette da DRBDmanage! (%s)" - -msgid "HBSD error occurs." -msgstr "Si è verificato un errore HBSD." - -msgid "HPELeftHand url not found" -msgstr "URL HPELeftHand non trovato" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"La dimensione del blocco hash è stata modificata dall'ultimo backup. Nuova " -"dimensione del blocco hash: %(new)s. Dimensione del blocco hash precedente: " -"%(old)s. Eseguire un backup completo." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Non sono stati creati livelli %(tier_levels)s. " - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Suggerimento \"%s\" non supportato." - -msgid "Host" -msgstr "Host" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "Impossibile trovare l'host %(host)s." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"L'host %(host)s non corrisponde al contenuto del certificato x509: " -"CommonName %(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "L'host %s non ha iniziatori FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "Gruppo host con nome %s non trovato" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Gruppo host con riferimento %s non trovato" - -msgid "Host is NOT Frozen." -msgstr "L'host NON è bloccato." - -msgid "Host is already Frozen." -msgstr "L'host è già bloccato." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Host non trovato. Impossibile rimuovere %(service)s su %(host)s." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "L'host replication_status deve essere %s per eseguire il failover." - -#, python-format -msgid "Host type %s not supported." -msgstr "Tipo host %s non supportato." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "L'host con porte %(ports)s non trovato." - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "" -"Hypermetro e Replica non possono essere utilizzati nello stesso tipo di " -"volume." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"Il gruppo I/O %(iogrp)d non è valido; i gruppi I/O disponibili sono " -"%(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Se la compressione è impostata su True, anche rsize deve essere impostato " -"(non uguale a -1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Se nofmtdisk è impostato su True, anche rsize deve essere impostato su -1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Specificato valore non valido '%(prot)s' per " -"flashsystem_connection_protocol: i valori validi sono %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Valore non consentito specificato per IOTYPE: 0, 1 o 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Valore non consentito specificato per smarttier: impostare su 0, 1, 2, o 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Valore non consentito specificato per storwize_svc_vol_grainsize: impostare " -"su 32, 64, 128 o 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Valore non consentito specificato per thin: Impossibile impostare thin e " -"thickcontemporaneamente." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "Impossibile trovare l'immagine %(image_id)s." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "L'immagine %(image_id)s non è attiva." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "L'immagine %(image_id)s non è accettabile: %(reason)s" - -msgid "Image location not present." -msgstr "Ubicazione di immagine non presente." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"La dimensione virtuale immagine è %(image_size)d GB e non rientra in un " -"volume di dimensione %(volume_size)dGB." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Errore ImageBusy generato durante l'eliminazione del volume rbd. Ciò può " -"essere causato da una connessione da un client che si è interrotta e, in " -"questo caso, può essere risolto provando a ripetere l'eliminazione dopo 30 " -"secondi." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Importazione del record non riuscita; non è stato possibile trovare il " -"servizio di backup per eseguire l'importazione. Richiesta del servizio " -"%(service)s" - -msgid "Incorrect request body format" -msgstr "Il formato del corpo della richiesta non è corretto" - -msgid "Incorrect request body format." -msgstr "Il formato della struttura della richiesta non è corretto." - -msgid "Incremental backups exist for this backup." -msgstr "Per questo backup esistono backup incrementali." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Eccezione CLI Infortrend: %(err)s Param: %(param)s (Codice di ritorno: " -"%(rc)s) (Output: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Istantanee o volumi di input non validi." - -msgid "Input volumes or source volumes are invalid." -msgstr "Volumi di input o di origine non validi." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "Impossibile trovare l'istanza %(uuid)s." - -msgid "Insufficient free space available to extend volume." -msgstr "Spazio libero disponibile insufficiente per estendere il volume." - -msgid "Insufficient privileges" -msgstr "Privilegi insufficienti" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Dominio 3PAR non valido: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Valore ALUA non valido. Il valore ALUA deve essere 1 o 0. " - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "" -"Gli argomenti (args) Ceph forniti per l'operazione di backup rbd non sono " -"validi" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "CgSnapshot non valido: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "ConsistencyGroup non valido: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"ConsistencyGroup non valido: nessun host per la creazione del gruppo di " -"coerenza" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Trovata versione API HPEeftHand non valida: %(found)s. È richiesta la " -"versione %(minimum)s o superiore per gestire/annullare la gestione del " -"supporto." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Formato indirizzo IP non valido: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Specifica QoS non valida rilevata durante il richiamo della politica QoS per " -"il volume %s " - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Destinazione di replica non valida: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Specifica di condivisione Virtuozzo Storage non valida: %r. Deve essere: " -"[MDS1[,MDS2],...:/][:PASSWORD]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"Versione XtremIO non valida %(cur)s, è richiesta la versione %(min)s o " -"successiva" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"Quote allocate non valide definite per le seguenti quote del progetto: %s" - -msgid "Invalid argument" -msgstr "Argomento non valido" - -msgid "Invalid argument - negative seek offset." -msgstr "Argomento non valido - offset di ricerca negativo." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Argomento non valido - whence=%s non supportato" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Argomento non valido - whence=%s non supportato." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "" -"Modalità di collegamento non valida '%(mode)s' per il volume %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Chiave di autenticazione non valida: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Backup non valido: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "Trovati dettagli utente chap non validi nella memoria CloudByte." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "" -"Risposta di inizializzazione di connessione del volume non valida %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Risposta di inizializzazione di connessione del volume non valida %(name)s: " -"%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Tipo di contenuto non valido%(content_type)s." - -msgid "Invalid credentials" -msgstr "Credenziali non valide" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Directory non valida: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Tipo adattatore disco non valido: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Backup del disco non valido: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Tipo disco non valido: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Tipo disco non valido: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Host non valido: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Trovata versione di hpe3parclient non valida (%(found)s). È richiesta la " -"versione %(minimum)s o versioni successive. Eseguire \"pip install --" -"upgrade python-3parclient\" per aggiornare hpe3parclient." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Trovata versione di hpelefthandclient non valida (%(found)s). È richiesta la " -"versione %(minimum)s o versioni successive. Eseguire \"pip install --" -"upgrade python-lefthandclient\" per aggiornare hpelefthandclient." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "href immagine %(image_href)s non valido." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"Identificativo dell'immagine non valido oppure non è possibile accedere " -"all'immagine richiesta." - -msgid "Invalid imageRef provided." -msgstr "imageRef specificato non è valido." - -msgid "Invalid input" -msgstr "Input non valido" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Input ricevuto non valido: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Filtro is_public non valido [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Configurato tipo di lun non valido %s." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Dimensione metadati non valida: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Metadati non validi: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Base del punto di montaggio non valida: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base del punto di montaggio non valida: %s" - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Nuovo nome snapCPG non valido per la riscrittura new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Numero di porta non valido %(config)s per la porta RPC Coho" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Configurato tipo di prefetch non valido '%s'. PrefetchType deve essere " -"0,1,2,3." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Specifiche qos non valide: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "" -"Richiesta non valida per collegare il volume a una destinazione non valida" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Richiesta non valida per collegare il volume con una modalità non valida. La " -"modalità di collegamento deve essere 'rw' o 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Scadenza prenotazione non valida %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Intestazione di risposta non valida dal server RPC" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "L'id secondario %s e' invalido. " - -msgid "Invalid service catalog json." -msgstr "json del catalogo del servizio non è valido." - -msgid "Invalid sheepdog cluster status." -msgstr "Stato del cluster sheepdog non valido. " - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Istantanea non valida: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Stato non valido: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "" -"Richiesto un pool dell'archivio %s non valido. Nuova immissione non riuscita." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Specificato un pool dell'archivio %s non valido." - -msgid "Invalid storage pool is configured." -msgstr "Configurato pool di archiviazione non valido." - -msgid "Invalid transport type." -msgstr "Tipo di trasporto non valido." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Impostazione di aggiornamento non valida: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Valore non valido '%s' per force." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Valore non valido '%s' per force. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "Valore non valido '%s' per is_public. Valori accettati: True o False. " - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Valore non valido '%s' per skip_validation. " - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Valore non valido per 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Valore non valido per 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Valore non valido per 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Valore non valido per 'scheduler_max_attempts', deve essere >= 1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "" -"Valore non valido per l'opzione di configurazione NetApp netapp_host_type." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "" -"Valore non valido per l'opzione di configurazione NetApp netapp_lun_ostype." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Valore non valido per l'età, %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Valore non valido: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"Dimensione del volume fornita non valida per la richiesta di creazione: %s " -"(l'argomento size deve essere un numero intero (o la rappresentazione " -"stringa di un numero intero) e maggiore di zero)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Tipo di volume non valido: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Volume non valido: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Volume non valido: impossibile aggiungere il volume %(volume_id)s al gruppo " -"di coerenza %(group_id)s perché il volume si trova in uno stato non valido: " -"%(status)s. Gli stati validi sono: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Volume non valido: impossibile aggiungere il volume %(volume_id)s al gruppo " -"di coerenza %(group_id)s perché il tipo di volume %(volume_type)s non è " -"supportato dal gruppo." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Volume non valido: impossibile aggiungere fake-volume-uuid del volume dal " -"gruppo di coerenza %(group_id)s perché non è possibile trovare il volume." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Volume non valido: impossibile rimuovere fake-volume-uuid del volume dal " -"gruppo di coerenza %(group_id)s perché non è presente nel gruppo." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "Passato volume_type non valido: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"volume_type fornito non valido: %s (il tipo richiesto non è compatibile; far " -"corrisponde col volume di origine oppure omettere l'argomento tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"volume_type fornito non valido: %s (il tipo richiesto non è compatibile;si " -"consiglia di omettere l'argomento tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"volume_type fornito non valido: %s (il tipo richiesto deve essere supportato " -"da questo gruppo di coerenza)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Formato wwpn non valido %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "Richiamo del servizio Web non riuscito." - -msgid "Issue encountered waiting for job." -msgstr "Si è verificato un problema in attesa del job." - -msgid "Issue encountered waiting for synchronization." -msgstr "Si è verificato un problema in attesa della sincronizzazione." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Esecuzione del failover non riuscita perché la replica non è configurata " -"correttamente." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"ID lavoro non trovato nella risposta alla creazione volume di CloudByte [%s]." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"ID lavoro non trovato nella risposta all'eliminazione volume di CloudByte " -"[%s]." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"I nomi delle chiavi possono contenere solo caratteri alfanumerici, di " -"sottolineatura, punti, due punti e trattini." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"Keystone versione 3 o successiva deve essere utilizzato per ottenere il " -"supporto delle quote nidificate." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "LUN non esiste per il volume: %s" - -msgid "LUN export failed!" -msgstr "Esportazione LUN non riuscita. " - -msgid "LUN map overflow on every channel." -msgstr "Eccedenza mappa LUN su ogni canale. " - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN non trovata con il riferimento fornito %s." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "Il numero LUN è fuori dai limiti sul canale id: %(ch_id)s. " - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Ultime %s voci del syslog cinder:-" - -msgid "LeftHand cluster not found" -msgstr "Cluster LeftHand non trovato" - -msgid "License is unavailable." -msgstr "La licenza non è disponibile." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Riga %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Il percorso di collegamento già esiste e non è un symlink" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Clone collegato del volume di origine non supportato nello stato: %s." - -msgid "Lock acquisition failed." -msgstr "Acquisizione blocco non riuscita." - -msgid "Logout session error." -msgstr "Errore della sessione di logout." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Servizio di ricerca non configurato. L'opzione di configurazione per " -"fc_san_lookup_service deve specificare un'implementazione concreta del " -"servizio di ricerca." - -msgid "Lun migration error." -msgstr "Errore di migrazione Lun." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"MD5 dell'oggetto: %(object_name)s prima: %(md5)s e dopo: %(etag)s non è lo " -"stesso." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Stringa di output fcns non corretta: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Corpo del messaggio non valido: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Stringa nameserver non formata correttamente: %s" - -msgid "Malformed request body" -msgstr "Corpo richiesta non corretto" - -msgid "Malformed request body." -msgstr "Formato del corpo della richiesta non corretto." - -msgid "Malformed request url" -msgstr "url richiesta non corretto" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Risposta non valida per il comando %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Attributo scheduler_hints non corretto" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Stringa comando show fcns database non corretta: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Configurazione di zona non corretta: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Stato della zona non corretto: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Acquisizione dimensione gestione esistente richiede 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "Gestione dell'istantanea esistente non implementata." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"La gestione del volume esistente non è riuscita a causa del riferimento di " -"backend non valido %(existing_ref)s: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"La gestione del volume esistente non è riuscita a causa della mancata " -"corrispondenza del tipo di volume: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Gestione del volume esistente non implementato." - -msgid "Manage existing volume requires 'source-id'." -msgstr "La gestione del volume esistente richiede 'source-id'. " - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Gestione del volume non supportata se è abilitato FAST. Politica FAST: " -"%(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"La gestione di istantanee su volumi sottoposti a failover non è consentita." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"Le informazioni sulla mappa sono None a causa di una versione dell'array che " -"non supporta hypermetro." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"La preparazione dell'associazione %(id)s non è stata completata entro il " -"timeout di secondi %(to)d assegnati. Interruzione in corso." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "Vista di mascheramento %(maskingViewName)s non eliminata correttamente" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Numero massimo di backup consentiti (%(allowed)d) superato" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "Numero massimo di istantanee consentite (%(allowed)d) superato" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Numero massimo di volumi consentiti (%(allowed)d) superato per la quota " -"'%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "È possibile specificare soltanto uno di %s" - -msgid "Metadata backup already exists for this volume" -msgstr "Il backup dei metadati esiste già per questo volume" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "L'oggetto di backup di metadati '%s' esiste già" - -msgid "Metadata property key blank." -msgstr "Chiave della proprietà dei metadati vuota." - -msgid "Metadata restore failed due to incompatible version" -msgstr "" -"Il ripristino dei metadati non è riuscito a causa di una versione non " -"compatibile" - -msgid "Metadata restore failed due to incompatible version." -msgstr "" -"Ripristino dei metadati non riuscito a causa di una versione non compatibile." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Modulo python 'purestorage' non presente, accertarsi che la libreria sia " -"installata e disponibile." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "" -"Manca parametro di configurazione di Fibre Channel SAN - fc_fabric_names" - -msgid "Missing request body" -msgstr "Manca il corpo della richiesta" - -msgid "Missing request body." -msgstr "Corpo della richiesta mancante." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "Manca l'elemento '%s' richiesto nel corpo della richiesta" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "Manca l'elemento '%s' richiesto nel corpo della richiesta. " - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "" -"Manca l'elemento 'consistencygroup' richiesto nel corpo della richiesta. " - -msgid "Missing required element quota_class_set in request body." -msgstr "" -"Elemento quota_class_set obbligatorio mancante nel corpo della richiesta." - -msgid "Missing required element snapshot in request body." -msgstr "Elemento istantanea obbligatorio mancante nel corpo della richiesta." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Trovati più SerialNumber mentre ne era previsto uno solo per questa " -"operazione. Modificare il file di configurazione EMC. " - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "trovate più copie del volume %s." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Sono state rilevate più corrispondenze per '%s', utilizzare un ID per essere " -"più precisi." - -msgid "Multiple profiles found." -msgstr "Trovati più profili. " - -msgid "Must implement a fallback schedule" -msgstr "È necessario implementare una pianificazione fallback" - -msgid "Must implement find_retype_host" -msgstr "È necessario implementare find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "È necessario implementare host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "È necessario implementare schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "È necessario implementare schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "È necessario implementare schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "È necessario passare wwpn o host a lsfabric." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Eseguire questo comando come admin cloud utilizzando un Keystone policy.json " -"che consenta all'admin cloud di visualizzare e ottenere qualsiasi progetto." - -msgid "Must specify 'connector'" -msgstr "È necessario specificare 'connector'" - -msgid "Must specify 'connector'." -msgstr "È necessario specificare 'connector'." - -msgid "Must specify 'host'." -msgstr "È necessario specificare 'host'." - -msgid "Must specify 'new_volume'" -msgstr "È necessario specificare 'new_volume'" - -msgid "Must specify 'status'" -msgstr "È necessario specificare 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"È necessario specificare 'status', 'attach_status' o 'migration_status' per " -"l'aggiornamento." - -msgid "Must specify a valid attach status" -msgstr "È necessario specificare uno stato di allegato valido" - -msgid "Must specify a valid migration status" -msgstr "È necessario specificare uno stato di migrazione valido" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"È necessario specificare un utente tipo %(valid)s valido, il valore " -"'%(persona)s' non è valido." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"È necessario specificare un tipo di provisioning %(valid)s valido, il valore " -"'%(prov)s' non è valido." - -msgid "Must specify a valid status" -msgstr "È necessario specificare uno stato valido" - -msgid "Must specify an ExtensionManager class" -msgstr "È necessario specificare una classe ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "È necessario specificare bootable nella richiesta." - -msgid "Must specify protection domain name or protection domain id." -msgstr "" -"È necessario specificare il nome dominio di protezione o l'ID del dominio di " -"protezione. " - -msgid "Must specify readonly in request." -msgstr "È necessario specificare readonly nella richiesta." - -msgid "Must specify snapshot source-name or source-id." -msgstr "È necessario specificare source-name o source-id dell'istantanea." - -msgid "Must specify source-name or source-id." -msgstr "È necessario specificare source-name o source-id." - -msgid "Must specify storage pool name or id." -msgstr "È necessario specificare il nome o l'ID del pool di memoria. " - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "" -"È necessario specificare i pool di archiviazione. Opzione: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "È necessario fornire un valore positivo, diverso da zero per l'età" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"Configurazione della NAS '%(name)s=%(value)s' non valida. Deve essere " -"'auto', 'true', o 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "Il file di configurazione NFS in %(config)s non esiste" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "File NFS %s non rilevato." - -msgid "NFS file could not be discovered." -msgstr "Impossibile rilevare il file NFS." - -msgid "NaElement name cannot be null." -msgstr "Il nome NaElement non può essere null." - -msgid "Name" -msgstr "Nome" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Il nome, la descrizione, add_volumes e remove_volumes non possono essere " -"vuoti nel corpo della richiesta." - -msgid "Need non-zero volume size" -msgstr "Necessaria dimensione volume diversa da zero" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Né MSG_DENIED né MSG_ACCEPTED: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Eccezione del driver Cinder di NetApp." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"La nuova dimensione per l'estensione deve essere superiore alla dimensione " -"corrente. (corrente: %(size)s, esteso: %(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"La nuova dimensione deve essere maggiore della dimensione reale " -"dell'archiviazione di backend. dimensione reale: %(oldsize)s, nuova " -"dimensione: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "" -"La nuova dimensione di volume deve essere specificata come numero intero." - -msgid "New volume type must be specified." -msgstr "È necessario specificare il tipo del nuovo volume." - -msgid "New volume type not specified in request_spec." -msgstr "Nuovo tipo di volume non specificato in request_spec." - -msgid "Nimble Cinder Driver exception" -msgstr "Eccezione driver Nimble Cinder" - -msgid "No FC initiator can be added to host." -msgstr "Nessun iniziatore FC può essere aggiunto all'host." - -msgid "No FC port connected to fabric." -msgstr "Nessuna porta FC collegata a fabric." - -msgid "No FCP targets found" -msgstr "Nessuna destinazione FCP trovata" - -msgid "No Port Group elements found in config file." -msgstr "" -"Nessun elemento del gruppo di porte trovato nel file di configurazione." - -msgid "No VF ID is defined in the configuration file." -msgstr "Nessun ID VF definito nel file di configurazione." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Nessun portale iSCSI attivo con gli IP iSCSI forniti " - -#, python-format -msgid "No available service named %s" -msgstr "Nessun servizio disponibile denominato %s" - -#, python-format -msgid "No backup with id %s" -msgstr "Nessun backup con id %s" - -msgid "No backups available to do an incremental backup." -msgstr "Nessun backup disponibile per eseguire un backup incrementale." - -msgid "No big enough free disk" -msgstr "Nessun disco disponibile è abbastanza grande" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Nessun cgsnapshot con id %s" - -msgid "No cinder entries in syslog!" -msgstr "Nessuna voce cinder nel syslog!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Nessuna LUN clonata chiamata %s è stata trovata nel filer" - -msgid "No config node found." -msgstr "Nessun nodo config trovato. " - -#, python-format -msgid "No consistency group with id %s" -msgstr "Nessun gruppo di coerenza con id %s" - -#, python-format -msgid "No element by given name %s." -msgstr "Nessun elemento dal nome specificato %s." - -msgid "No errors in logfiles!" -msgstr "Nessun errore nei file di log!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "Nessun file trovato con %s come file di backup." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Nessun ID LUN rimasto libero. Il numero massimo di volumi che possono essere " -"collegati all'host (%s) è stato superato. " - -msgid "No free disk" -msgstr "Nessun disco disponibile" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Nessun portale iscsi valido trovato nell'elenco fornito per %s." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Nessun portale iscsi valido trovato in %s." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Nessun host per creare il gruppo di coerenza %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "Nessuna porta abilitata a iSCSI nell'array di destinazione." - -msgid "No image_name was specified in request." -msgstr "Nessun image_name specificato nella richiesta." - -msgid "No initiator connected to fabric." -msgstr "Nessun iniziatore collegato a fabric." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Nessun gruppo iniziatore trovato per l'iniziatore %s" - -msgid "No initiators found, cannot proceed" -msgstr "Nessun iniziatore trovato, impossibile continuare" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "Nessuna interfaccia trovata nel cluster per l'ip %s" - -msgid "No ip address found." -msgstr "Nessun indirizzo IP rilevato. " - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "Non è stato trovato alcun gruppo aut iscsi in CloudByte" - -msgid "No iscsi initiators were found in CloudByte." -msgstr "Non è stato trovato alcun iniziatore iscsi in CloudByte" - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Nessun servizio iscsi trovato per il volume CloudByte [%s]." - -msgid "No iscsi services found in CloudByte storage." -msgstr "Nessun servizio iscsi rilevato nell'archivio CloudByte." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"Nessun file di chiavi specificato e non è possibile caricare la chiave da " -"%(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Non è stata trovata nessuna condivisione di Gluster montata" - -msgid "No mounted NFS shares found" -msgstr "Non è stata trovata nessuna condivisione di NFS montata" - -msgid "No mounted SMBFS shares found." -msgstr "Non è stata trovata nessuna condivisione SMBFS montata." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Non è stata trovata alcuna condivisione Virtuozzo Storage montata" - -msgid "No mounted shares found" -msgstr "Non è stata trovata nessuna condivisione montata" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "Nessun nodo trovato nel gruppo I/O %(gid)s per il volume %(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Nessun pool disponibile per i volumi di provisioning. Assicurarsi " -"chel'opzione di configurazione netapp_pool_name_search_pattern sia impostata " -"correttamente. " - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Nessuna risposta ricevuta dalla chiamata API utente aut iSCSI dell'elenco " -"della memoriaCloudByte." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"Nessuna risposta ricevuta dalla chiamata API tsm dell'elenco dell'archivio " -"CloudByte." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"Nessuna risposta ricevuta dalla chiamata API del file system dell'elenco " -"CloudByte." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "Nessun VIP di servizio configurato e nessun nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Nessuna istantanea (snap) trovata con %s come file di backup." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "Nessuna immagine istantanea trovata nel gruppo di istantanee %s." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "Nessuna istantanea trovata sul volume %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Nessuna istantanea origine fornita per creare il gruppo di coerenza %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "" -"Nessun percorso di archiviazione trovato per il percorso di esportazione %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Non esiste alcuna specifica QoS %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "Non è stato trovato nessun IP di rilevamento adatto" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "Nessun supporto per il ripristino della versione di backup %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Nessun id destinazione è stato trovato per il volume %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"Nessun ID LUN non utilizzato disponibile sull'host; è abilitato il " -"multicollegamento cherichiede che tutti gli ID LUN siano univoci nell'intero " -"gruppo di host. " - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Non è stato trovato alcun host valido. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Nessun host valido per il volume %(id)s con tipo %(type)s" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "Nessun vdisk con l'UID specificato da ref %s." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "Nessuna vista trovata per LUN: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"Nessun volume nel cluster con vserver %(vserver)s e percorso di giunzione " -"%(junction)s " - -msgid "No volume service(s) started successfully, terminating." -msgstr "Nessun servizio volume avviato con esito positivo, si termina. " - -msgid "No volume was found at CloudByte storage." -msgstr "Nessun volume trovato nell'archivio CloudByte." - -msgid "No volume_type should be provided when creating test replica." -msgstr "" -"Non è necessario fornire alcun volume_type durante la creazione della " -"replica di test." - -msgid "No volumes found in CloudByte storage." -msgstr "Nessun volume rilevato nell'archivio CloudByte." - -msgid "No weighed hosts available" -msgstr "Nessun host pesato disponibile " - -#, python-format -msgid "Not a valid string: %s" -msgstr "Stringa non valida: %s" - -msgid "Not a valid value for NaElement." -msgstr "Non un valore valido per NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "Impossibile trovare un datastore adeguato per il volume: %s." - -msgid "Not an rbd snapshot" -msgstr "Non è un'istantanea rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Non autorizzato per l'immagine %(image_id)s." - -msgid "Not authorized." -msgstr "Non autorizzato." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Spazio insufficiente sul backend (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"Spazio di archiviazione insufficiente nella condivisione ZFS per eseguire " -"questa operazione." - -msgid "Not stored in rbd" -msgstr "Non memorizzato in rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "" -"Nova ha restituito lo stato \"error\" durante la creazione dell'istantanea." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "Ricevuta risposta Null dal file system dell'elenco di CloudByte." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "Ricevuta risposta Null dei gruppi aut iscsi dell'elenco di CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "" -"Ricevuta risposta Null dagli iniziatori iscsi dell'elenco di CloudByte." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"Ricevuta risposta Null dal servizio iscsi del volume dell'elenco di " -"CloudByte." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Ricevuta risposta null durante la creazione del volume [%s] nell'archivio " -"CloudByte." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Ricevuta risposta null durante l'eliminazione del volume [%s] nella memoria " -"CloudByte." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Ricevuta risposta null durante l'interrogazione per il lavoro basato su " -"[%(operation)s][%(job)s] nella memoria CloudByte." - -msgid "Object Count" -msgstr "Numero oggetti" - -msgid "Object Version" -msgstr "Versione oggetto" - -msgid "Object is not a NetApp LUN." -msgstr "L'oggetto non è un NetApp LUN." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"In un'operazione di estensione si è verificato un errore durante l'aggiunta " -"di un volume al volume composito. %(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"Uno degli input richiesti dall'host, porta o schema non è stato trovato." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Solo le richieste %(value)s %(verb)s request(s) possono essere effettuate a " -"%(uri)s ogni %(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "In una specifica QoS può essere impostato un solo limite. " - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Solo agli utenti con token nell'ambito dei parent immediati o progetti root " -"è consentito visualizzare le quote child." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "" -"È possibile annullare la gestione solo dei volumi gestiti da OpenStack." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "Operazione non riuscita con status=%(status)s. Dump completo: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Operazione non supportata: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "L'opzione gpfs_images_dir non è impostata correttamente." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "L'opzione gpfs_images_share_mode non è impostata correttamente." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "L'opzione gpfs_mount_point_base non è impostata correttamente." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "%(res)s %(prop)s di origine deve essere uno dei valori '%(vals)s'" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"Il nome della partizione è Nessuno, impostare smartpartition:partitionname " -"nella chiave. " - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"La password o la chiave privata SSH è obbligatoria per l'autenticazione: " -"impostare l'opzione san_password o san_private_key." - -msgid "Path to REST server's certificate must be specified." -msgstr "È necessario specificare il percorso al certificato server REST. " - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Creare il pool %(pool_list)s in anticipo. " - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Creare il livello %(tier_levels)s nel pool %(pool)s in anticipo. " - -msgid "Please specify a name for QoS specs." -msgstr "Specificare un nome per le specifiche (specs) QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "La politica non consente di eseguire l'azione %(action)s." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Il pool %(poolNameInStr)s non è stato trovato." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "Il pool %s non esiste nell'applicazione Nexenta Store" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Pool dal volume['host'] %(host)s non trovato." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "Pool dal volume['host'] non riuscito con: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "Il pool non è disponibile nel campo dell'host del volume." - -msgid "Pool is not available in the volume host fields." -msgstr "Il pool non è disponibile nei campi dell'host del volume." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "Impossibile trovare il pool con nome %(pool)s nel trovare %(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"Impossibile trovare il pool con nome %(pool_name)s nel trovare %(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Il pool: %(poolName)s. non è associato al livello di memoria per la politica " -"FAST %(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "Il nome pool deve essere nel file %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "I pool %s non esistono" - -msgid "Pools name is not set." -msgstr "Il nome pool non è impostato. " - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Stato della copia primaria: %(status)s e sincronizzata: %(sync)s." - -msgid "Project ID" -msgstr "Identificativo del Progetto" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"Le quote del progetto non sono configurate correttamente per le quote " -"nidificate: %(reason)s." - -msgid "Protection Group not ready." -msgstr "Gruppo di protezione non pronto." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Il protocollo %(storage_protocol)s non è supportato per la famiglia di " -"archiviazione %(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "Nel record di backup fornito manca un id" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Lo stato %(provided)s dell'istantanea fornito non è consentito per " -"un'istantanea con lo stato %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Informazioni del provider sulla memoria CloudByte non trovate per il volume " -"OpenStack [%s]." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Errore driver Pure Storage Cinder: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Le specifiche QoS %(specs_id)s esistono già." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Le specifiche QoS %(specs_id)s sono ancora associate alle entità." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "La configurazione QoS è errata. %s deve essere > 0." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"La politica deve essere specificata per IOTYPE e un altro qos_specs, " -"politica QoS: %(qos_policy)s." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"La politica QoS deve essere specificata per IOTYPE: 0, 1 o 2, politica QoS: " -"%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"Conflitto upper_limit e lower_limit della politica QoS, politica QoS: " -"%(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"La specifica QoS %(specs_id)s non dispone di specifiche con la chiave " -"%(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Spec QoS non supportate in questa famiglia di memoria e versione ONTAP. " - -msgid "Qos specs still in use." -msgstr "Le specifiche (specs) Qos sono ancora in uso." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"La query tramite il parametro service è obsoleta. Utilizzare invece il " -"parametro bynary." - -msgid "Query resource pool error." -msgstr "Errore di query del pool di risorse. " - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "" -"Il limite della quota %s deve essere uguale o maggiore delle risorse " -"esistenti. " - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Impossibile trovare la classe di quota %(class_name)s." - -msgid "Quota could not be found" -msgstr "Impossibile trovare la quota" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Quota superata per le risorse: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Quota superata: code=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Impossibile trovare la quota per il progetto %(project_id)s." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Limite della quota non valido per il progetto '%(proj)s' per la risorsa " -"'%(res)s': il limite di %(limit)d è inferiore al valore in uso di %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Impossibile trovare la prenotazione della quota %(uuid)s." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "" -"Impossibile trovare l'utilizzo della quota per il progetto %(project_id)s." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "Operazione diff RBD non riuscita - (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "È necessario specificare l'IP del server REST. " - -msgid "REST server password must by specified." -msgstr "È necessario specificare la password del server REST. " - -msgid "REST server username must by specified." -msgstr "È necessario specificare il nome utente del server REST. " - -msgid "RPC Version" -msgstr "Versione RPC" - -msgid "RPC server response is incomplete" -msgstr "La risposta del server RPC è incompleta" - -msgid "Raid did not have MCS Channel." -msgstr "Il Raid non ha il canale MCS. " - -#, python-format -msgid "Received error string: %s" -msgstr "Ricevuta stringa di errore: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "Il riferimento deve essere per un'istantanea non gestita." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "Il riferimento deve essere per un volume virtuale non gestito." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "Il riferimento deve essere il nome di un'istantanea non gestita." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "" -"Il riferimento deve essere il nome volume di un volume virtuale non gestito." - -msgid "Reference must contain either source-name or source-id element." -msgstr "Il riferimento deve contenere l'elemento source-name o source-id." - -msgid "Reference must contain source-id or source-name element." -msgstr "Il riferimento deve contenere l'elemento source-id o source-name." - -msgid "Reference must contain source-id or source-name key." -msgstr "Il riferimento deve contenere la chiave source-id o source-name." - -msgid "Reference must contain source-id or source-name." -msgstr "Il riferimento deve contenere source-id o source-name." - -msgid "Reference must contain source-id." -msgstr "Il riferimento deve contenere source-id." - -msgid "Reference must contain source-name element." -msgstr "Il riferimento deve contenere l'elemento source-name." - -msgid "Reference must contain source-name or source-id." -msgstr "Il riferimento deve contenere il nome e l'id dell'origine." - -msgid "Reference must contain source-name." -msgstr "Il riferimento deve contenere source-name." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "Il riferimento al volume da gestire deve contenere source-name." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "Il riferimento al volume: %s da gestire deve contenere source-name." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Migrazione del volume con ID: %(id)s rifiutata. Verificare la configurazione " -"perché il Gruppo di volumi è sia l'origine che la destinazione: %(name)s." - -msgid "Remote pool cannot be found." -msgstr "Impossibile trovare il pool remoto." - -msgid "Remove CHAP error." -msgstr "Errore di rimozione CHAP." - -msgid "Remove fc from host error." -msgstr "Errore di rimozione fc dall'host. " - -msgid "Remove host from array error." -msgstr "Errore di rimozione host dall'array. " - -msgid "Remove host from hostgroup error." -msgstr "Errore di rimozione host dal gruppo host. " - -msgid "Remove iscsi from host error." -msgstr "Errore di rimozione iscsi dall'host. " - -msgid "Remove lun from QoS error." -msgstr "Errore di rimozione lun da QoS. " - -msgid "Remove lun from cache error." -msgstr "Errore di rimozione lun da cache." - -msgid "Remove lun from partition error." -msgstr "Errore di rimozione lun dalla partizione. " - -msgid "Remove port from port group error." -msgstr "Errore di rimozione porta da gruppo di porte." - -msgid "Remove volume export failed." -msgstr "Rimozione esportazione volume non riuscita. " - -msgid "Rename lun on array error." -msgstr "Errore di ridenominazione lun sull'array. " - -msgid "Rename snapshot on array error." -msgstr "Errore di ridenominazione istantanea sull'array. " - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "Replica %(name)s su %(ssn)s non riuscita." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "Funzione del servizio di replica non trovata in %(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Replication Service non trovato in %(storageSystemName)s." - -msgid "Replication not allowed yet." -msgstr "Replica non ancora consentita." - -msgid "Request body and URI mismatch" -msgstr "Il corpo della richiesta e l'URI non corrispondono" - -msgid "Request body contains too many items" -msgstr "Il corpo della richiesta contiene troppi elementi" - -msgid "Request body contains too many items." -msgstr "" -"Il corpo della richiesta contiene un numero troppo elevato di elementi." - -msgid "Request body empty" -msgstr "Corpo della richiesta vuoto" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"La richiesta a Datera ha restituito uno stato non corretto: %(status)s | " -"%(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Il backup richiesto supera la quota di GB di backup consentita. La quota " -"%(requested)sG, richiesta è %(quota)sG e sono stati utilizzati %(consumed)sG." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Il volume o l'istantanea richiesti superano la quota consentita %(name)s. " -"Richiesto %(requested)sG, la quota è %(quota)sG e sono stati utilizzati " -"%(consumed)sG." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"La dimensione del volume richiesta %(size)d è maggiore del limite massimo " -"consentito %(limit)d." - -msgid "Required configuration not found" -msgstr "Configurazione richiesta non trovata" - -#, python-format -msgid "Required flag %s is not set" -msgstr "L'indicatore richiesto %s non è impostato" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"La reimpostazione dello stato del backup è stata interrotta, il servizio di " -"backup attualmente configurato [%(configured_service)s] non è il servizio di " -"backup utilizzato per creare questo backup [%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Ridimensionamento clone %s non riuscito. " - -msgid "Resizing image file failed." -msgstr "Ridimensionamento del file immagine non riuscita." - -msgid "Resource could not be found." -msgstr "Impossibile trovare la risorsa." - -msgid "Resource not ready." -msgstr "Risorsa non pronta." - -#, python-format -msgid "Response error - %s." -msgstr "Errore di risposta - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Errore di risposta - Il sistema di archiviazione è offline." - -#, python-format -msgid "Response error code - %s." -msgstr "Codice di errore risposta - %s." - -msgid "RestURL is not configured." -msgstr "RestURL non coinfigurato." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Ripristino del backup interrotto, lo stato del volume previsto è " -"%(expected_status)s ma è stato ricevuto %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Ripristino del backup interrotto, il servizio di backup attualmente " -"configurato [%(configured_service)s] non è il servizio di backup utilizzato " -"per creare questo backup [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Ripristino del backup interrotto: lo stato del backup previsto è " -"%(expected_status)s ma è stato ricevuto %(actual_status)s." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Richiamata una quantità diversa di volumi SolidFire per le istantanee Cinder " -"fornite. Richiamati: %(ret)s Desiderati: %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Richiamata una quantità diversa di volumi SolidFire per i volumi Cinder " -"forniti. Richiamati: %(ret)s Desiderati: %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Conteggio dei tentativi superato per il comando: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Rilevata eccezione Retryable SolidFire" - -msgid "Retype requires migration but is not allowed." -msgstr "" -"L'assegnazione del nuovo tipo richiede la migrazione, ma questa operazione " -"non è consentita." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Esecuzione roll back %(volumeName)s mediante eliminazione." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"L'esecuzione di Cinder con una versione di VMware vCenter inferiore a %s non " -"è consentita." - -msgid "SAN product is not configured." -msgstr "Prodotto SAN non configurato." - -msgid "SAN protocol is not configured." -msgstr "Protocollo SAN non configurato." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"Configurazione SMBFS 'smbfs_oversub_ratio' non valida. Deve essere > 0: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"Configurazione SMBFS 'smbfs_used_ratio' non valida. Deve essere > 0 and <= " -"1.0: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "Il file di configurazione SMBFS in %(config)s non esiste." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "Il file di configurazione SMBFS non è impostato (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"Comando SSH non riuscito dopo '%(total_attempts)r' tentativi: '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Rilevato inserimento comando SSH: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "Connessione SSH non riuscita per %(fabric)s con errore: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "Certificato SSL scaduto il %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Errore SSL: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Impossibile trovare il filtro Scheduler Host %(filter_name)s." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Impossibile trovare Scheduler Host Weigher %(weigher_name)s." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Stato della copia secondaria: %(status)s e sincronizzata: %(sync)s, " -"avanzamento sync è: %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"L'id secondario non può essere uguale all'array primario, backend_id = " -"%(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber deve essere nel file %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Servizio %(service)s su host %(host)s rimosso." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "Impossibile trovare il servizio %(service_id)s sull'host %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Impossibile trovare il servizio %(service_id)s." - -msgid "Service is too old to fulfil this request." -msgstr "Il servizio è troppo vecchio per soddisfare la richiesta." - -msgid "Service is unavailable at this time." -msgstr "Il servizio non è disponibile in questo momento." - -msgid "Set pair secondary access error." -msgstr "Errore di impostazione dell'accesso secondario alla coppia. " - -msgid "Sets thin provisioning." -msgstr "Imposta il thin provisioning." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"Impostazione del gruppo di politiche QoS della LUN non supportata su questa " -"famiglia di archiviazione e versione ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Impostazione del gruppo di politiche qos del file non supportato su questa " -"famiglia di archiviazione e versione ontap." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"La condivisione %(dir)s non è scrivibile dal servizio del volume Cinder. Le " -"operazioni dell'istantanea non saranno supportate." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Errore I/O Sheepdog, il comando era: \"%s\". " - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Operazioni di visualizzazione possono essere eseguite solo per progetti " -"nella stessa gerarchia delprogetto che è l'ambito degli utenti. " - -msgid "Size" -msgstr "Dimensione" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "" -"Dimensione per il volume: %s non trovato, impossibile eseguire eliminazione " -"protetta." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"La dimensione è %(image_size)dGB e non è contenuta in un volume di " -"dimensione %(volume_size)dGB." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"La dimensione dell'immagine specificata %(image_size)sGB è maggiore della " -"dimensione del volume %(volume_size)sGB." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"È stato richiesto di eliminare l'istantanea %(id)s nell'attesa che " -"diventasse disponibile. È possibile che sia stata eseguita una richiesta " -"contemporanea." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"Istantanea %(id)s trovata nello stato %(state)s anziché nello stato " -"'deleting' durante l'eliminazione a catena." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "Impossibile trovare l'istantanea %(snapshot_id)s." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"L'istantanea %(snapshot_id)s non contiene metadati con la chiave " -"%(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "L'istantanea '%s' non esiste sull'array." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"Impossibile creare l'istantanea perché il volume %(vol_id)s non è " -"disponibile, stato corrente del volume: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "Impossibile creare l'istantanea durante la migrazione del volume." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "Non è consentita l'istantanea della replica secondaria." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Istantanea del volume non supportata nello stato: %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "Risorsa dell'istantanea \"%s\" non distribuita?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"Lo stato dell'istantanea %(cur)s non è consentito per update_snapshot_status" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "Lo stato dell'istantanea deve essere \"available\" per la clonazione." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"L'istantanea di cui deve essere eseguito il backup deve essere disponibile, " -"ma lo stato corrente è \"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "Impossibile trovare l'istantanea con id %s." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Snapshot='%(snap)s' non esiste in image='%(base)s' di base - interruzione " -"del backup incrementale" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "Le istantanee non sono supportate per questo formato di volume: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Errore socket: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Eccezione SolidFire Cinder Driver" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "" -"La dimensione dell'array della direzione di ordinamento supera la dimensione " -"dell'array della chiave di ordinamento." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "Il GC di origine è vuoto. Non verrà creato alcun gruppo di coerenza." - -msgid "Source host details not found." -msgstr "Dettagli sull'host di origine non trovati." - -msgid "Source volume device ID is required." -msgstr "L'ID periferica volume di origine è obbligatorio. " - -msgid "Source volume not mid-migration." -msgstr "Volume di origine non migrazione intermedia." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo restituito dall'array non è valido" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"L'host specificato per l'associazione al volume %(vol)s è in un gruppo host " -"non supportato con%(group)s." - -msgid "Specified logical volume does not exist." -msgstr "Il volume logico specificato non esiste." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "Impossibile trovare il gruppo di istantanee specificato con id %s. " - -msgid "Specify a password or private_key" -msgstr "Specificare una password o private_key" - -msgid "Specify san_password or san_private_key" -msgstr "Specifica san_password o san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Specificare il nome, la descrizione, is_public o una combinazione dei " -"precedenti per il tipo di volume." - -msgid "Split pair error." -msgstr "Errore di divisione della coppia." - -msgid "Split replication failed." -msgstr "Replica divisione non riuscita." - -msgid "Start LUNcopy error." -msgstr "Errore di avvio LUNcopy." - -msgid "State" -msgstr "Stato" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "Lo stato del nodo non è corretto. Lo stato corrente è %s. " - -msgid "Status" -msgstr "Stato" - -msgid "Stop snapshot error." -msgstr "Errore di arresto istantanea." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "Storage Configuration Service non trovato in %(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "Storage HardwareId mgmt Service non trovato in %(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Profilo di memoria %s non trovato." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "Storage Relocation Service non trovato in %(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "La famiglia di archiviazione %s non è supportata." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "" -"Il gruppo di archiviazione %(storageGroupName)s non è stato eliminato " -"correttamente" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Host di memoria %(svr)s non rilevato, verificare il nome " - -msgid "Storage pool is not configured." -msgstr "Pool di archiviazione non configurato." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Profilo di archiviazione: %(storage_profile)s non trovato." - -msgid "Storage resource could not be found." -msgstr "Impossibile trovare la risorsa di memoria." - -msgid "Storage system id not set." -msgstr "Id sistema di archivio non impostato." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "" -"Impossibile trovare il sistema di archiviazione per il pool " -"%(poolNameInStr)s." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "StorageSystem %(array)s non è stato trovato" - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"La somma dell'utilizzo child '%(sum)s' è maggiore della quota disponibile di " -"'%(free)s' per il progetto '%(proj)s' per la risorsa '%(res)s'. Ridurre il " -"limite o l'utilizzo per uno o più dei seguenti progetti: '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Errore di passaggio alla coppia." - -msgid "Sync pair error." -msgstr "Errore di sincronizzazione della coppia." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "Sistema %(id)s trovato con stato password errata - %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "%(id)s di sistema trovati in stato non valido - %(status)s." - -msgid "System does not support compression." -msgstr "Il sistema non supporta la compressione," - -msgid "System is busy, retry operation." -msgstr "Il sistema è occupato, ritentare l'operazione. " - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s] non trovato nella memoria CloudByte per l'account " -"[%(account)s]." - -msgid "Target volume type is still in use." -msgstr "Il tipo di volume di destinazione è ancora in uso." - -msgid "Terminate connection failed" -msgstr "Interrompi connessione non riuscito" - -msgid "Terminate connection unable to connect to backend." -msgstr "Interrompi connessione non riesce a collegarsi al backend." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Terminazione connessione volume non riuscita: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "L'origine %(type)s %(id)s da replicare non è stata trovata." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"I parametri 'sort_key' e 'sort_dir' sono obsoleti e non possono essere " -"utilizzati con il parametro 'sort'." - -msgid "The EQL array has closed the connection." -msgstr "L'array EQL ha chiuso la connessione." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"Il filesystem %(fs)s GPFS non è del livello di release richiesto. Il " -"livello corrente è %(cur)s, deve essere almeno %(min)s." - -msgid "The IP Address was not found." -msgstr "L'indirizzo IP non è stato trovato." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"La richiesta WebDAV non è riuscita. Motivo: %(msg)s, Codice di ritorno/" -"motivo:%(code)s, Volume di origine: %(src)s, Volume di destinazione:%(dst)s, " -"Metodo:%(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"L'errore sopra riportato potrebbe mostrare che il database non è stato " -"creato.\n" -"Creare un database utilizzando 'cinder-manage db sync' prima di eseguire " -"questo comando." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"L'array non supporta l'impostazione del pool di archiviazione per SLO " -"%(slo)s e carico di lavoro %(workload)s. Controllare l'array per SLO e " -"carichi di lavoro validi." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "Il back-end in cui viene creato il volume non ha la replica abilitata." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Il comando %(cmd)s non è riuscito. (ret: %(ret)s, stdout: %(out)s, stderr: " -"%(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "La copia deve essere primaria o secondaria" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"La creazione dell'unità logica non può essere completata. (LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "Il metodo decorato deve accettare un oggetto volume o istantanea " - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "Il dispositivo nel percorso %(path)s non è disponibile: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"L'ora di fine (%(end)s) deve essere successiva all'ora di inizio (%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "Specifica supplementare: %(extraspec)s non valida." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "Impossibile eliminare il volume sottoposto a failover: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "Sono richiesti i seguenti elementi: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "Impossibile aggiungere il gruppo di host o la destinazione iSCSI." - -msgid "The host group or iSCSI target was not found." -msgstr "Non è stato trovato il gruppo di host o la destinazione iSCSI." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"L'host non è pronto per essere sottoposto a failback. Risincronizzare i " -"volumi e riprendere la replica sui backend 3PAR." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"L'host non è pronto per essere sottoposto a failback. Risincronizzare i " -"volumi e riprendere la replica sui backend LeftHand." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"L'host non è pronto per essere sottoposto a failback. Risincronizzare i " -"volumi e riprendere la replica sui backend Storwize." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "L'utente iSCSI CHAP %(user)s non esiste." - -msgid "The key cannot be None." -msgstr "La chiave non può essere Nessuno." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "L'unità logica per %(type)s %(id)s specificato è già stata eliminata." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "Il metodo %(method)s è scaduto. (valore timeout: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "Il metodo update_migrated_volume non è implementato." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"Il montaggio %(mount_path)s non è un volume USP Quobyte valido. Errore: " -"%(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "Il parametro del backend di memoria. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "Il backup parent deve essere disponibile per il backup incrementale." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "L'istantanea fornita '%s' non è un'istantanea del volume fornito." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"Il riferimento al volume nel backend deve avere il formato file_system/" -"volume_name (volume_name non può contenere '/')" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "Il conteggio memorizzazione remoto deve essere %s o inferiore." - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"La modalità di replica non è stata configurata correttamente nel tipo di " -"volume extra_specs. Se replication:mode è periodic, replication:sync_period " -"deve essere specificato e deve essere compreso tra 300 e 31622400 secondi." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "" -"Il periodo di sincronizzazione replica deve essere di almeno %s secondi." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"La dimensione richiesta : %(requestedSize)s non è la stessa dimensione " -"risultante %(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Impossibile trovare la risorsa %(resource)s." - -msgid "The results are invalid." -msgstr "I risultati non sono validi." - -#, python-format -msgid "The retention count must be %s or less." -msgstr "Il conteggio memorizzazione deve essere %s o inferiore." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"L'istantanea non può essere creata quando il volume è in modalità di " -"manutenzione. " - -msgid "The source volume for this WebDAV operation not found." -msgstr "Volume di origine per questa operazione WebDAV non trovato." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"Il tipo volume origine '%(src)s' è diverso dal tipo di volume di " -"destinazione '%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Il tipo volume origine '%s' non è disponibile." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "Il %(desc)s specificato è occupato." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "La LUN specificata non appartiene al pool indicato: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"L'ldev specificato %(ldev)s non può essere gestito. L'ldev non deve essere " -"associazione." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"L'ldev specificato %(ldev)s non può essere gestito. L'ldev non deve essere " -"associato." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"L'ldev specificato %(ldev)s non può essere gestito. La dimensione ldev deve " -"essere in multipli di gigabyte." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"L'ldev specificato %(ldev)s non può essere gestito. Il volume deve essere " -"del tipo DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"L'operazione specificata non è supportata. La dimensione del volume deve " -"essere la stessa dell'origine %(type)s. (volume: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Il disco virtuale specificato è associato a un host." - -msgid "The specified volume is mapped to a host." -msgstr "Il volume specificato è associato a un host." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"La password dell'array di archiviazione per %s non è corretta, aggiornare la " -"password configurata." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"È possibile utilizzare il backend di memoria. (config_group: " -"%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"Il dispositivo di archiviazione non supporta %(prot)s. Configurare il " -"dispositivo per supportare %(prot)s o passare a un driver che utilizza un " -"protocollo diverso." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"Il conteggio di metadati striped %(memberCount)s è troppo piccolo per il " -"volume: %(volumeName)s, con dimensione %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"Il tipo di metadati: %(metadata_type)s per volume/istantanea %(id)s non è " -"valido." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"Il volume %(volume_id)s non può essere esteso. Il tipo di volume deve essere " -"Normale." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"Il volume %(volume_id)s non può essere non gestito. Il tipo di volume deve " -"essere %(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "La volume %(volume_id)s viene gestito correttamente. (LDEV:%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"La gestione del volume %(volume_id)s viene annullata correttamente. (LDEV:" -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Il volume %(volume_id)s da associare non è stato trovato." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "" -"Il volume non può accettare trasferimenti in modalità di manutenzione. " - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Il volume non può essere collegato in modalità di manutenzione. " - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Il volume non può essere scollegato in modalità di manutenzione. " - -msgid "The volume cannot be updated during maintenance." -msgstr "Il volume non può essere aggiornato durante la manutenzione. " - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "" -"La connessione volume non può essere inizializzata in modalità di " -"manutenzione. " - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Il driver del volume richiede il nome iniziatore iSCSI nel connettore." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Il volume è attualmente occupato su 3PAR e non può essere eliminato in " -"questo momento. È possibile ritentare successivamente." - -msgid "The volume label is required as input." -msgstr "L'etichetta volume è richiesta come input." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "" -"Non sono presenti risorse disponibili per l'utilizzo. (risorsa: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Non vi sono host ESX validi. " - -msgid "There are no valid datastores." -msgstr "Nessun archivio dati valido." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Non c'è alcuna designazione del %(param)s. L'archivio specificato è " -"indispensabile per gestire il volume." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Non c'è alcuna designazione dell'ldev. L'ldev specificato è indispensabile " -"per gestire il volume." - -msgid "There is no metadata in DB object." -msgstr "Non sono presenti metadati nell'oggetto DB. " - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "Non esiste alcuna condivisione che può ospitare %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "Non esiste nessuna condivisione che possa ospitare %(volume_size)sG." - -#, python-format -msgid "There is no such action: %s" -msgstr "Non esiste alcuna azione simile: %s" - -msgid "There is no virtual disk device." -msgstr "Non esistono unità del disco virtuale." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "" -"Si è verificato un errore durante l'aggiunta del volume al gruppo di copie " -"remote: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Si è verificato un errore durante la creazione dell'istantanea cg: %s." - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "" -"Si è verificato un errore durante la creazione del gruppo di copie remote: " -"%s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Si è verificato un errore durante l'impostazione del periodo di " -"sincronizzazione per il gruppo di copie remoto: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Si è verificato un errore durante la configurazione di un gruppo di copie " -"remoto sugli array 3PAR:('%s'). Il volume non verrà riconosciuto come tipo " -"di replica." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Si è verificato un errore durante la configurazione di una pianificazione " -"remota sugli array LeftHand:('%s'). Il volume non verrà riconosciuto come " -"tipo di replica." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Si è verificato un errore durante l'avvio della copia remota: %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Nessun file di configurazione Gluster configurato (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Nessun file di configurazione NFS configurato (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Non è configurato alcun volume Quobyte (%s). Esempio: quobyte:///" -"" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "Thin provisioning non supportato in questa versione di LVM." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "Questo driver non supporta l'eliminazione di istantanee in uso." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Questo driver non supporta la creazione di istantanee nei volumi in uso." - -msgid "This request was rate-limited." -msgstr "Questa richiesta era rate-limited." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Questa piattaforma del sistema (%s) non è supportata. Questo driver supporta " -"solo le piattaforme Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "Tier Policy Service non trovato per %(storageSystemName)s." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Timeout in attesa dell'aggiornamento di Nova per la creazione " -"dell'istantanea %s." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Timeout in attesa dell'aggiornamento di Nova per l'eliminazione " -"dell'istantanea %(id)s." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Timeout durante la chiamata di %s " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Richiesta dell'API di %(service)s scaduta." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "" -"Timeout durante la richiesta delle funzionalità dal backend %(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Impossibile trovare il trasferimento %(transfer_id)s." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Trasferimento %(transfer_id)s: id volume %(volume_id)s in uno stato " -"imprevisto %(status)s, previsto awaiting-transfer" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Tentativo di importare i metadati di backup dall'id %(meta_id)s nel backup " -"%(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Attività di regolazione del volume arrestata prima di essere completata: " -"volume_name=%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"Il tipo %(type_id)s è già associato ad altre specifiche (specs) qos: " -"%(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "Modifica di accesso tipo non applicabile al tipo di volume pubblico. " - -msgid "Type cannot be converted into NaElement." -msgstr "Il tipo non può essere convertito in NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "" -"Gli UUID %s sono presenti nell'elenco dei volumi di aggiunta e rimozione." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Impossibile accedere al back-end Storwize per il volume %s." - -msgid "Unable to access the backend storage via file handle." -msgstr "Impossibile accedere alla memoria backend attraverso la gestione file." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "" -"Impossibile accedere alla memoria backend attraverso il percorso %(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"Impossibile aggiungere l'host Cinder a apphosts per lo spazio %(space)s" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "Impossibile completare il failover di %s." - -msgid "Unable to connect or find connection to host" -msgstr "Impossibile connettersi o trovare la connessione all'host" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Impossibile creare il gruppo di coerenza %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "Impossibile creare il blocco. Backend di coordinazione non avviato." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Impossibile creare o ottenere il gruppo storage predefinito per la politica " -"FAST. %(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Impossibile creare il clone della replica per il volume %s." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "Impossibile creare la relazione per %s." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "Impossibile creare il volume %(name)s da %(snap)s." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "Impossibile creare il volume %(name)s da %(vol)s." - -#, python-format -msgid "Unable to create volume %s" -msgstr "Impossibile creare il volume %s" - -msgid "Unable to create volume. Backend down." -msgstr "Impossibile creare il volume. Backend disattivo." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "Impossibile eliminare l'istantanea del gruppo di coerenza %s " - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "Impossibile eliminare l'istantanea %(id)s, stato: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "Impossibile eliminare la politica di istantanea sul volume %s. " - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"Impossibile eliminare il volume di destinazione per il volume %(vol)s. " -"Eccezione: %(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Impossibile scollegare il volume. Lo stato del volume deve essere 'in-use' e " -"attach_status deve essere 'attached' per scollegarlo. " - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"Impossibile determinare secondary_array dall'array secondario fornito: " -"%(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Impossibile determinare il nome dell'istantanea in Purity per l'istantanea " -"%(id)s." - -msgid "Unable to determine system id." -msgstr "Impossibile determinare l'id sistema." - -msgid "Unable to determine system name." -msgstr "Impossibile determinare il nome sistema." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Impossibile eseguire operazioni di gestione istantanea con la versione API " -"REST Purity %(api_version)s, richiesta %(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Impossibile eseguire la replica con la versione API REST Purity " -"%(api_version)s, richiesta una tra le versioni %(required_versions)s." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Impossibile stabilire la relazione con il cluster Storwize %s." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Impossibile estendere il volume %s." - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"Impossibile eseguire il failover del volume %(id)s sul back-end secondario, " -"in quanto la relazione di replica non è in grado di eseguire il passaggio: " -"%(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"Impossibile eseguire il failback su \"default\", è possibile eseguire questa " -"operazione solo dopo il completamento di un failover." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "" -"Impossibile eseguire il failover sulla destinazione di replica:%(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "Impossibile recuperare le informazioni sulla connessione dal backend." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "" -"Impossibile richiamare le informazioni sulla connessione dal backend: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "Impossibile trovare ref Purity con nome=%s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Impossibile trovare il gruppo volume: %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"Impossibile trovare la destinazione di failover, nessuna destinazione " -"secondaria configurata." - -msgid "Unable to find iSCSI mappings." -msgstr "Impossibile trovare associazioni iSCSI. " - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "Impossibile trovare ssh_hosts_key_file: %s" - -msgid "Unable to find system log file!" -msgstr "Impossibile trovare il file di log di sistema." - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"Impossibile trovare l'istantanea pg utilizzabile da utilizzare per il " -"failover sull'array secondario selezionato: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"Impossibile trovare l'array secondario utilizzabile da destinazioni " -"configurate: %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "Impossibile trovare il volume %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Impossibile ottenere un dispositivo di blocco per il file '%s'" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Impossibile ottenere le informazioni di configurazione necessarie per creare " -"un volume: %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Impossibile acquisire il record corrispondente per il pool. " - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Impossibile ottenere le informazioni sullo spazio %(space)s, verificare che " -"il cluster sia in esecuzione e connesso. " - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Impossibile ottenere l'elenco di indirizzi IP su questo host, controllare " -"autorizzazioni e rete." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Impossibile ottenere un elenco di membri del dominio, verificare che il " -"cluster sia in esecuzione. " - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Impossibile ottenere l'elenco di spazi per creare il nuovo nome. Verificare " -"che il cluster sia in esecuzione. " - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Impossibile richiamare le statistiche per backend_name: %s" - -msgid "Unable to get storage volume from job." -msgstr "Impossibile ottenere il volume di archiviazione dal job." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Impossibile acquisire gli endpoint di destinazione per hardwareId " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "Impossibile ottenere il nome della vista di mascheramento." - -msgid "Unable to get the name of the portgroup." -msgstr "Impossibile ottenere il nome del gruppo di porte." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "Impossibile ottenere la relazione di replica per il volume %s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Impossibile importare il volume %(deviceId)s in cinder. È il volume " -"originedella sessione di replica %(sync)s. " - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Impossibile importare il volume %(deviceId)s in cinder. Il volume esterno " -"non ènel pool gestito dall'host cinder corrente." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Impossibile importare il volume %(deviceId)s in cinder. Il volume è nella " -"vista di mascheramento %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "Impossibile caricare CA da %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Impossibile caricare il certificato da %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Impossibile caricare la chiave da %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"Impossibile individuare l'account %(account_name)s nell'unità Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "Impossibile individuare un SVM che stia gestendo l'indirizzo IP '%s'" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "Impossibile individuare i profili di risposta specificati %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Impossibile gestire il volume esistente. Il volume %(volume_ref)s è già " -"gestito. " - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Impossibile gestire il volume %s" - -msgid "Unable to map volume" -msgstr "Impossibile associare il volume" - -msgid "Unable to map volume." -msgstr "Impossibile associare il volume." - -msgid "Unable to parse attributes." -msgstr "Impossibile analizzare gli attributi. " - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"Impossibile promuovere la replica a primaria per il volume %s. Nessuna copia " -"secondaria disponibile." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Impossibile utilizzare nuovamente un host non gestito da Cinder con " -"use_chap_auth=True," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Impossibile utilizzare nuovamente un host con credenziali CHAP sconosciute " -"configurate." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Impossibile ridenominare il volume %(existing)s in %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "Impossibile richiamare il gruppo di istantanee con id %s." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"Impossibile riscrivere %(specname)s, si prevedeva di ricevere i valori " -"%(spectype)s corrente e richiesto. Valore ricevuto: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Impossibile eseguire la riscrittura: Una copia del volume %s esiste. La " -"riscrittura supererebbe il limite di 2 copie." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Impossibile eseguire la riscrittura: L'azione corrente richiede una copia " -"volume, non consentita quando il nuovo tipo è replica. Volume = %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"Impossibile configurare la replica della modalità mirror per %(vol)s. " -"Eccezione: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Impossibile eseguire istantanea del gruppo di coerenza %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "Impossibile terminare la connessione del volume dal backend." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Impossibile terminare la connessione del volume: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Impossibile aggiornare il gruppo di coerenza %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"Impossibile verificare il gruppo iniziatore: %(igGroupName)s nella vista di " -"mascheramento %(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Parametri inaccettabili." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Stato associazione imprevisto %(status)s per l'associazione %(id)s. " -"Attributi: %(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Risposta CLI non prevista: mancata corrispondenza intestazione/riga. " -"Intestazione: %(header)s, riga: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Stato associazione imprevisto %(status)s per l'associazione %(id)s. " -"Attributi: %(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "Output imprevisto. Previsto [%(expected)s] ma ricevuto [%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "Risposta imprevista dall'API Nimble" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Risposta imprevista dall'API Tegile IntelliFlash" - -msgid "Unexpected status code" -msgstr "Codice di stato imprevisto" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Codice di stato imprevisto dallo switch %(switch_id)s con protocollo " -"%(protocol)s per url %(page)s. Errore: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Eccezione Gluster sconosciuta" - -msgid "Unknown NFS exception" -msgstr "Eccezione NFS sconosciuta" - -msgid "Unknown RemoteFS exception" -msgstr "Eccezione RemoteFS sconosciuta" - -msgid "Unknown SMBFS exception." -msgstr "Eccezione SMBFS sconosciuta." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Eccezione Virtuozzo Storage sconosciuta " - -msgid "Unknown action" -msgstr "Azione sconosciuta" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Si ignora se il volume: %s da gestire è già gestito da Cinder. Interruzione " -"della gestione del volume. Aggiungere la proprietà dello schema " -"personalizzato 'cinder_managed' al volume e impostare il relativo valore su " -"False. In alternativa, impostare il valore della politica di configurazione " -"di cinder 'zfssa_manage_policy' su 'loose' per rimuovere questa restrizione." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Si ignora se il volume: %s da gestire è già gestito da Cinder. Interruzione " -"della gestione del volume. Aggiungere la proprietà dello schema " -"personalizzato 'cinder_managed' al volume e impostare il relativo valore su " -"False. In alternativa, impostare il valore della politica di configurazione " -"di cinder 'zfssa_manage_policy' su 'loose' per rimuovere questa restrizione." - -#, python-format -msgid "Unknown operation %s." -msgstr "Operazione %s sconosciuta." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Comando %(cmd)s sconosciuto o non supportato" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Protocollo sconosciuto: %(protocol)s. " - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Risorse quota sconosciute %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "" -"Le opzioni di disabilitazione ed eliminazione a catena si escludono " -"reciprocamente." - -msgid "Unmanage volume not implemented." -msgstr "Disabilitazione gestione volume non implementata." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"L'annullamento della gestione di istantanee da volumi sottoposti a failover " -"non è consentito." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"L'annullamento della gestione di istantanee da volumi sottoposti a failover " -"non è consentito." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Parola chiave QoS non riconosciuta: \"%s\" " - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Formato backup non riconosciuto: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Valore read_deleted non riconosciuto '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "Annulla opzioni gcs: %s" - -msgid "Unsupported Content-Type" -msgstr "Tipo-contenuto non supportato" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Versione Data ONTAP non supportata. Data ONTAP versione 7.3.1 e successive " -"sono supportate." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "La versione dei metadati di backup non è supportata (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "La versione dei metadati di backup richiesta non è supportata" - -msgid "Unsupported backup verify driver" -msgstr "Driver di verifica del backup non supportato" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Firmware non supportato per lo switch %s. Assicurarsi che lo switch sia in " -"esecuzione su firmware v6.4 o superiore" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Formato del volume non supportato: %s " - -msgid "Update QoS policy error." -msgstr "Errore di aggiornamento della politica QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Operazioni di aggiornamento e eliminazione quota possono essere eseguite " -"solo da un admin del parent immediato o dall'admin CLOUD." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Operazioni di aggiornamento e eliminazione quota possono essere eseguite " -"solo a progettinella stessa gerarchia del progetto che è l'ambito degli " -"utenti. " - -msgid "Update list, doesn't include volume_id" -msgstr "Aggiorna elenco, non includere volume_id" - -msgid "Updated At" -msgstr "Aggiornato a" - -msgid "Upload to glance of attached volume is not supported." -msgstr "Il caricamento in glance del volume collegato non è supportato." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Errore di utilizzo ALUA per associare l'iniziatore all'host. " - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Errore di utilizzo CHAP per associare l'iniziatore all'host. Controllarenome " -"utente e password CHAP. " - -msgid "User ID" -msgstr "Identificativo Utente" - -msgid "User does not have admin privileges" -msgstr "L'utente non ha i privilegi dell'amministratore" - -msgid "User not authorized to perform WebDAV operations." -msgstr "L'utente non è autorizzato ad eseguire le operazioni WebDAV." - -msgid "UserName is not configured." -msgstr "UserName non configurato." - -msgid "UserPassword is not configured." -msgstr "UserPassword non configurato." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "Rollback V2, il volume non è in alcun gruppo di memoria. " - -msgid "V3 rollback" -msgstr "Rollback V3 " - -msgid "VF is not enabled." -msgstr "VF non è abilitato." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "L'impostazione VV %s non esiste." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Il consumer valido delle specifiche (specs) QoS è: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "l'ubicazione di controllo valida è: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Convalida connessione volume non riuscita (errore: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"Il valore \"%(value)s\" non è valido per l'opzione di configurazione " -"\"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "Valore %(param)s per %(param_string)s non è un booleano." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Valore richiesto per 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "Vdisk %(name)s non coinvolto nell'associazione %(src)s -> %(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"La versione %(req_ver)s non è supportata dall'API. Il valore minimo è " -"%(min_ver)s ed il massimo è %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s non può richiamare l'oggetto mediante l'id." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s non supporta l'aggiornamento condizionale." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Il volume virtuale '%s' non esiste nell'array." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Lavoro di copia del volume per la destinazione %s non riuscito." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Volume %(deviceID)s non trovato." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Volume %(name)s non trovato nell'array. Impossibile determinare se vi sono " -"volumi associati." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "Il volume %(name)s è stato creato in VNX, ma nello stato %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Impossibile creare il volume %(vol)s nel pool %(pool)s." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "Il volume %(vol1)s non e' uguale allo snapshot.volume_id %(vol2)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"Lo stato del volume %(vol_id)s deve essere available per aggiornare " -"l'indicatore di sola lettura, ma lo stato corrente è: %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"Lo stato del volume %(vol_id)s deve essere available, ma lo stato corrente " -"è: %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "Impossibile trovare il volume %(volume_id)s." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"Il volume %(volume_id)s contiene metadati di gestione con la chiave " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"Volume %(volume_id)s non contiene metadati con la chiave %(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"Il volume %(volume_id)s è attualmente associato al gruppo host non " -"supportato %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "Il volume %(volume_id)s non è attualmente associato all'host %(host)s " - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "Volume %(volume_id)s è ancora collegato, prima scollegare il volume." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Errore di replica del volume %(volume_id)s : %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Volume %(volume_name)s occupato." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Impossibile creare il volume %s dal volume di origine." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "Impossibile creare il volume %s nelle condivisioni." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Impossibile creare il volume %s." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "Il volume %s non esiste in Nexenta SA" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "Il volume %s non esiste nell'applicazione Nexenta Store" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "Il volume %s non esiste su questo array. " - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Sul volume %s non è specificato provider_location; ignorato." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Il volume %s non esiste nell'array." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "Il volume %s non esiste nel backend ZFSSA." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "Il volume %s è già gestito da OpenStack." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"Il volume %s non è di tipo replicato. Questo volume deve essere di un tipo " -"di volume con la specifica supplementare replication_enabled impostata su " -"' True' per supportare le azioni di replica." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"Il volume %s è online. Impostare il volume su offline per la gestione " -"tramite OpenStack." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Il volume %s non deve fare parte di un gruppo di coerenza." - -#, python-format -msgid "Volume %s not found." -msgstr "Volume %s non trovato." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Volume %s: Errore durante il tentativo di estendere il volume" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Il volume (%s) esiste già nell'array" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Il volume (%s) esiste già sull'array." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Il gruppo del volume %s non esiste" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Il tipo di volume %(id)s esiste già." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"L'eliminazione del tipo di volume %(volume_type_id)s non è consentita con i " -"volumi presenti con il tipo." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"Il tipo di volume %(volume_type_id)s non contiene specifiche supplementari " -"con la chiave %(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "L'id del tipo di volume non deve essere None." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Volume [%(cb_vol)s] non trovato nella memoria CloudByte corrispondente al " -"volume OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "Volume [%s] non rilevato nell'archivio CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "" -"Impossibile trovare il collegamento del volume con il filtro: %(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "Configurazione backend del volume non valida: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Il volume con questo nome già esiste" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "Impossibile ripristinare il volume perché contiene delle istantanee." - -msgid "Volume create failed while extracting volume ref." -msgstr "" -"Creazione del volume non riuscita durante l'estrazione del riferimento del " -"volume." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Il percorso del file del dispositivo del volume %s non esiste." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Il dispositivo del volume non è stato trovato in %(device)s." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Il driver di volume %s non è inizializzato." - -msgid "Volume driver not ready." -msgstr "Driver del volume non pronto." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Il driver del volume ha riportato un errore: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"Il volume ha un'istantanea temporanea che non può essere eliminata in questo " -"momento." - -msgid "Volume has children and cannot be deleted!" -msgstr "Il volume ha elementi child e non può essere eliminato." - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "Il volume è collegato a un server. (%s) " - -msgid "Volume is in-use." -msgstr "Volume attualmente utilizzato." - -msgid "Volume is not available." -msgstr "Il volume non è disponibile. " - -msgid "Volume is not local to this node" -msgstr "Per questo nodo volume non è locale" - -msgid "Volume is not local to this node." -msgstr "Il volume non è locale rispetto a questo nodo." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"È stato richiesto il backup dei metadati di volume ma questo driver non " -"supporta ancora questa funzione." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Migrazione volume non riuscita: %(reason)s" - -msgid "Volume must be available" -msgstr "Il volume deve essere disponibile" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "" -"Il volume deve trovarsi nell'area di disponibilità così come l'istantanea" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"Il volume deve trovarsi nella stessa area di disponibilità così come il " -"volume di origine" - -msgid "Volume must have a volume type" -msgstr "Il volume deve avere un tipo di volume" - -msgid "Volume must not be replicated." -msgstr "Il volume non deve essere replicato." - -msgid "Volume must not have snapshots." -msgstr "Il volume non deve avere istantanee." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Volume non trovato per l'istanza %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "Volume non trovato sul backend di archiviazione configurato." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Volume non trovato nel backend di archiviazione configurato. Se il nome " -"volume contiene \"/\", ridenominarlo e provare a gestirlo di nuovo." - -msgid "Volume not found on configured storage pools." -msgstr "Volume non trovato nei pool di archiviazione configurati." - -msgid "Volume not found." -msgstr "Volume non trovato." - -msgid "Volume not unique." -msgstr "Volume non univoco." - -msgid "Volume not yet assigned to host." -msgstr "Il volume non è stato ancora assegnato all'host." - -msgid "Volume reference must contain source-name element." -msgstr "Il riferimento al volume deve contenere l'elemento source-name." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Impossibile trovare la replica del volume per %(volume_id)s." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Avvio del servizio volume %s non riuscito. " - -msgid "Volume should have agent-type set as None." -msgstr "Il volume deve avere agent-type impostato su Nessuno. " - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"La dimensione del volume %(volume_size)sGB non può essere minore della " -"dimensione minDisk dell'immagine %(min_disk)sGB." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "" -"La dimensione del volume '%(size)s' deve essere un numero intero e maggiore " -"di 0" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"La dimensione del volume '%(size)s'GB non può essere minore della dimensione " -"del volume originale %(source_size)sGB. Deve essere >= la dimensione del " -"volume originale." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"La dimensione del volume '%(size)s'GB non può essere minore della dimensione " -"dell'istantanea %(snap_size)sGB. Deve essere >= la dimensione " -"dell'istantanea originale." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"La dimensione del volume è stata ridotta dall'ultimo backup. Eseguire un " -"backup completo." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "La dimensione del volume deve essere un multiplo di 1 GB." - -msgid "Volume size must multiple of 1 GB." -msgstr "La dimensione del volume deve essere un multiplo di 1 GB. " - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"Lo stato del volume deve essere \"available\" o \"in-use\" per l'istantanea. " -"(è %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "Lo stato del volume deve essere \"available\" o \"in-use\"." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "Lo stato del volume deve essere %s per eseguire la prenotazione." - -msgid "Volume status must be 'available'." -msgstr "Lo stato del volume deve essere 'available'." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "L'associazione del volume al gruppo iniziatori già esiste" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"Il volume di cui deve essere eseguito il backup deve essere disponibile o in " -"uso, ma lo stato corrente è \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "Il volume da ripristinare deve essere disponibile" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "Impossibile trovare il tipo di volume %(volume_type_id)s." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "L'ID tipo volume '%s' non è valido." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"L'accesso di tipo Volume per la combinazione %(volume_type_id)s / " -"%(project_id)s già esiste." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"L'accesso di tipo Volume per la combinazione %(volume_type_id)s / " -"%(project_id)s non è stato trovato." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "La codifica del tipo di volume per il tipo %(type_id)s esiste già." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "La codifica del tipo di volume per il tipo %(type_id)s non esiste." - -msgid "Volume type name can not be empty." -msgstr "Il nome tipo di volume non può essere vuoto." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "" -"Impossibile trovare il tipo di volume con il nome %(volume_type_name)s." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Il volume: %(volumeName)s non è un volume concatenato. È possibile solo " -"eseguire l'estensione su un volume concatenato. Uscire..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"Il volume: %(volumeName)s non è stato aggiunto al gruppo di archiviazione " -"%(sgGroupName)s." - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "Il volume: %s è già in fase di importazione da Cinder." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "Volumi/account superati sugli account SolidFire primario e secondario." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Config VzStorage 'vzstorage_used_ratio' non valida, deve essere > 0 e <= " -"1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "File config VzStorage in %(config)s non esiste." - -msgid "Wait replica complete timeout." -msgstr "Timeout di attesa del completamento replica." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Sincronizzazione attesa non riuscita. Stato esecuzione: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"In attesa che tutti i nodi partecipino al cluster. Verificare che tutti i " -"daemon sheep siano in esecuzione. " - -msgid "We should not do switch over on primary array." -msgstr "Non è consigliabile passare all'array primario." - -msgid "X-IO Volume Driver exception!" -msgstr "Eccezione X-IO Volume Driver!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO non configurato correttamente, nessun portale iscsi trovato" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "" -"XtremIO non è inizializzato correttamente, non è stato trovato nessun cluster" - -msgid "You must implement __call__" -msgstr "È necessario implementare __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"È necessario installare hpe3parclient prima di utilizzare i driver 3PAR. " -"Eseguire \"pip install python-3parclient\" per installare hpe3parclient." - -msgid "You must supply an array in your EMC configuration file." -msgstr "È necessario fornire un array nel file di configurazione EMC." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"La dimensione originale: %(originalVolumeSize)s GB è maggiore di %(newSize)s " -"GB. È supportata solo l'operazione di estensione. Uscire..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Zona" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Politica di zona %s non riconosciuta" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data: Impossibile ottenere gli attributi per il vdisk " -"%s." - -msgid "_create_host failed to return the host name." -msgstr "_create_host non riuscito nella restituzione del nome host." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: impossibile convertire il nome host. Il nome host non è " -"unicode o stringa." - -msgid "_create_host: No connector ports." -msgstr "_create_host: Nessuna porta connettore." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume, Servizio di replica non trovato." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, nome volume: %(volumename)s, nome volume di " -"origine: %(sourcevolumename)s, istanza volume di origine: %(source_volume)s, " -"istanza volume di destinazione: %(target_volume)s, Codice di ritorno: " -"%(rc)lu, Errore: %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - non è stato trovato un messaggio di successo " -"nell'output CLI.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, id_code è None." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, Impossibile trovare il servizio di replica" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, copy session type is undefined! sessione di copia: " -"%(cpsession)s, tipo di copia: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, volumename: %(volumename)s, Codice di ritorno: %(rc)lu, " -"Errore: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"non trovato." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, impossibile " -"connettersi a ETERNUS." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op: l'estensione di un volume con le istantanee non è " -"supportata." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connector: %(connector)s, Associatori: " -"FUJITSU_AuthorizedTarget, impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"impossibile connettersi a ETERNUS." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names, connector: %(connector)s, iniziatore non trovato." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, impossibile " -"connettersi a ETERNUS." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, impossibile " -"connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, nome file: %(filename)s, tagname: %(tagname)s, i dati sono " -"None. Modificare il file di configurazione del driver e correggerlo." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, nome file: %(filename)s, ip: %(ip)s, porta: " -"%(port)s, utente: %(user)s, password: ****, url: %(url)s, NON RIUSCITO." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn non " -"trovato." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, " -"impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: le intestazioni e i valori dell'attributo non corrispondono.\n" -" Intestazioni: %(header)s\n" -" Valori: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector non è riuscito a restituire il nome host per il " -"connettore." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, richiamo host-affinity da aglist/vol_instance non riuscito, " -"affinitygroup: %(ag)s, ReferenceNames, impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, richiamo istanza host-affinity non riuscito, volmap: " -"%(volmap)s, GetInstance, impossibile connettersi a ETERNUS." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associatori: FUJITSU_SAPAvailableForElement, impossibile " -"connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, impossibile " -"connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, impossibile connettersi " -"a ETERNUS." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances, impossibile connettersi a ETERNUS." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "" -"_get_target_port, protcol: %(protocol)s, porta di destinazione non trovata." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay: Impossibile trovare l'istantanea denominata %s" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: Impossibile trovare l'id volume %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: È necessario specificare source-name." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: Impossibile ottenere le informazioni di " -"connessione FC per la connessione di host-volume. L'host per le connessioni " -"FC è configurato correttamente?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: Nessun nodo trovato nel gruppo I/O %(gid)s per il " -"volume %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, iniziatore: %(initiator)s, destinazione: %(tgt)s, aglist: " -"%(aglist)s, Storage Configuration Service non trovato." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"non trovato." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, impossibile connettersi a ETERNUS." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "" -"_update_volume_stats: Impossibile ottenere i dati del pool dell'archivio." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, lo stato della sessione " -"di copia è BROKEN." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy non riuscito: Esiste una copia del volume %s. L'aggiunta di " -"un'altra copia eccede il limite di 2 copie." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"add_vdisk_copy avviato senza una copia del disco virtuale nel pool previsto." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants deve essere un booleano, ricevuto '%s'." - -msgid "already created" -msgstr "già creato" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "collegamento istantanea dal nodo remoto " - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "l'attributo %s non è caricabile" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s non è riuscito a creare un collegamento reale del " -"dispositivo da %(vpath)s a %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s non ha ricevuto la notifica di backup riuscito dal " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s non ha eseguito dsmc a causa di argomenti non validi " -"presenti su %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s non ha eseguito dsmc su %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "backup: %(vol_id)s non riuscito. %(path)s non è un file." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"backup: %(vol_id)s non riuscito. %(path)s è un tipo file non previsto. I " -"file di blocco o regolari sono supportati; il modo file reale è %(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"backup: %(vol_id)s non riuscito. Impossibile ottenere il percorso reale del " -"volume su %(path)s." - -msgid "being attached by different mode" -msgstr "in fase di collegamento tramite una modalità differente" - -#, python-format -msgid "call failed: %r" -msgstr "chiamata non riuscita: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "chiamata non riuscita: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "chiamata non riuscita: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "chiamata non riuscita: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "chiamata non riuscita: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "impossibile trovare lun-map, ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "impossibile trovare il volume da estendere" - -msgid "can't handle both name and index in req" -msgstr "impossibile gestire entrambi il nome e l'indice nella richiesta" - -msgid "cannot understand JSON" -msgstr "impossibile riconoscere JSON" - -#, python-format -msgid "cg-%s" -msgstr "gc-%s" - -msgid "cgsnapshot assigned" -msgstr "istantanea cg assegnata" - -msgid "cgsnapshot changed" -msgstr "istantanea cg modificata" - -msgid "cgsnapshots assigned" -msgstr "istantanee cg assegnate" - -msgid "cgsnapshots changed" -msgstr "istantanee cg modificate" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: La password o la chiave privata SSH è obbligatoria " -"per l'autenticazione: impostare l'opzione san_password o san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: Impossibile determinare l'ID del sistema." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: Impossibile determinare il nome del sistema." - -msgid "check_hypermetro_exist error." -msgstr "Errore check_hypermetro_exist." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "la profondità del clone supera il limite di %s" - -msgid "consistencygroup assigned" -msgstr "gruppo di coerenza assegnato" - -msgid "consistencygroup changed" -msgstr "gruppo di coerenza modificato" - -msgid "control_location must be defined" -msgstr "control_location deve essere definito" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, Il volume di origine non esiste in ETERNUS." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, nome istanza volume di destinazione: " -"%(volume_instancename)s, Richiamo istanza non riuscito." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "" -"create_cloned_volume: la dimensione dell'origine e della destinazione sono " -"differenti." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: la dimensione del volume di origine %(src_vol)s è " -"%(src_size)dGB e non si adatta al volume di destinazione %(tgt_vol)s di " -"dimensione %(tgt_size)dGB." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src deve essere creato da un'istantanea CG o da " -"un CG di origine." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src supporta solo un'origine istantanea cg o " -"un'origine gruppo di coerenza. Non possono essere utilizzate più origini." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: vdisk origine %(src)s (%(src_id)s) non esiste." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: vdisk origine %(src)s non esiste." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: il nome host non è unicode o stringa." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: Nessun iniziatore o wwpns fornito." - -msgid "create_hypermetro_pair error." -msgstr "Errore create_hypermetro_pair." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "create_snapshot, eternus_pool: %(eternus_pool)s, pool non trovato." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, snapshotname: %(snapshotname)s, nome volume di origine: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, nome volume di " -"destinazione: %(d_volumename)s, pool: %(pool)s, Codice di ritorno: %(rc)lu, " -"Errore: %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, volumename: %(s_volumename)s, volume di origine non trovato " -"su ETERNUS." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, volumename: %(volumename)s, Server di replica non trovato." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: Lo stato del volume deve essere \"available\" o \"in-use\" " -"per l'istantanea. Lo stato non valido è %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: richiesta del volume di origine non riuscita." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, volume: %(volume)s, EnumerateInstances, impossibile " -"connettersi a ETERNUS." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service non trovato." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Codice di ritorno: %(rc)lu, Errore: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "" -"create_volume_from_snapshot, Il volume di origine non esiste in ETERNUS." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, nome istanza volume di destinazione: " -"%(volume_instancename)s, Richiamo istanza non riuscito." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot: l'istantanea %(name)s non esiste." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: Lo stato dell'istantanea deve essere \"available" -"\" per la creazione del volume. Lo stato non valido è: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: Dimensione del volume diversa dal volume basato " -"sull'istantanea." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"eliminazione: %(vol_id)s non ha eseguito dsmc a causa di argomenti non " -"validi con stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"eliminazione: %(vol_id)s non ha eseguito dsmc con stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "Errore delete_hypermetro." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: ACL %s non trovato. Continuare." - -msgid "delete_replication error." -msgstr "Errore delete_replication." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"eliminazione dell'istantanea %(snapshot_name)s che contiene dei volume " -"dipendenti" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "eliminazione del volume %(volume_name)s che contiene l'istantanea" - -msgid "detach snapshot from remote node" -msgstr "scollegamento istantanea dal nodo remoto " - -msgid "do_setup: No configured nodes." -msgstr "do_setup: Nessun nodo configurato." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"errore di scrittura dell'oggetto in swift, MD5 dell'oggetto in swift " -"%(etag)s non è uguale a MD5 dell'oggetto inviato a swift %(md5)s" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume, eternus_pool: %(eternus_pool)s, pool non trovato." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service non trovato." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, volumename: %(volumename)s, Codice di ritorno: %(rc)lu, " -"Errore: %(errordesc)s, PoolType: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume, volumename: %(volumename)s, volume non trovato." - -msgid "failed to create new_volume on destination host" -msgstr "impossibile creare new_volume nell'host di destinazione" - -msgid "fake" -msgstr "fake" - -#, python-format -msgid "file already exists at %s" -msgstr "il file esiste già in %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno non è supportato da SheepdogIOWrapper " - -msgid "fileno() not supported by RBD()" -msgstr "fileno() non supportato da RBD()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "Il filesystem %s non esiste nell'applicazione Nexenta Store" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled è impostato su False, associazione di più " -"host non consentita. CMMVC6071E L'associazione VDisk a host non è stata " -"creata perché il il VDisk è già associato ad un host." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() non è supportato in questa versione di librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s" - -msgid "force delete" -msgstr "forzare eliminazione " - -msgid "get_hyper_domain_id error." -msgstr "Errore get_hyper_domain_id." - -msgid "get_hypermetro_by_id error." -msgstr "Errore get_hypermetro_by_id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: impossibile ottenere l'IP di destinazione per l'iniziatore " -"%(ini)s, controllare il file di configurazione." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: Impossibile ottenere gli attributi per il volume %s" - -msgid "glance_metadata changed" -msgstr "glance_metadata modificato" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode è impostato su copy_on_write, ma %(vol)s e %(img)s " -"appartengono a file system differenti." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode è impostato su copy_on_write, ma %(vol)s e %(img)s " -"appartengono a fileset differenti." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s e hgst_user %(usr)s devono corrispondere a utenti/gruppi " -"validi in cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "hgst_net %(net)s specificato in cinder.conf non trovato nel cluster" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy deve essere impostato su 0 (non-HA) o 1 (HA) in cinder.conf. " - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode deve essere un octal/int in cinder.conf" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "Server hgst_storage %(svr)s non nel formato :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers deve essere definito in cinder.conf" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Il servizio http potrebbe essere stato bruscamente disabilitato o inserito " -"in stato di manutenzione durante questa operazione." - -msgid "id cannot be None" -msgstr "l'id non può essere None" - -#, python-format -msgid "image %s not found" -msgstr "impossibile trovare l'immagine %s" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection, volume: %(volume)s, Volume non trovato." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "" -"initialize_connection: impossibile ottenere gli attributi per il volume %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection: manca l'attributo volume per il volume %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: Nessun nodo trovato nel gruppo I/O %(gid)s per il " -"volume %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s non definito." - -#, python-format -msgid "invalid user '%s'" -msgstr "utente non valido '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "portale iscsi, %s, non trovato" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"iscsi_ip_address deve essere impostato nel file di configurazione quando si " -"usa il protocollo 'iSCSI'." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "errore gestore chiavi: %(reason)s" - -msgid "limit param must be an integer" -msgstr "parametro limite deve essere un numero intero" - -msgid "limit param must be positive" -msgstr "parametro limite deve essere positivo" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing richiede una chiave 'name' per identificare un volume " -"esistente." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: Errore durante la gestione della risposta " -"esistente %(ss)s sul volume %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "indicatore [%s] non trovato" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "apici mancanti per mdiskgrp %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "" -"migration_policy deve essere 'on-demand' o 'never', è stato passato: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "" -"mkfs non riuscito sul volume %(vol)s, il messaggio di errore è: %(err)s." - -msgid "mock" -msgstr "mock" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs non è installato" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage ha trovato più risorse con il nome %s" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "Rilevate più risorse con ID istantanea %s" - -msgid "name cannot be None" -msgstr "il nome non può essere None" - -#, python-format -msgid "no REPLY but %r" -msgstr "nessuna RISPOSTA ma %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "in drbdmanage non è stata trovata alcuna istantanea con id %s" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "non esattamente una sola istantanea con id %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "non esattamente un solo volume con id %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "apici mancanti per obj %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled non è disattivo." - -msgid "progress must be an integer percentage" -msgstr "l'avanzamento deve essere una percentuale a numero intero" - -msgid "provider must be defined" -msgstr "il provider deve essere definito" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img %(minimum_version)s o versione successiva richiesto da questo " -"driver del volume. Versione qemu-img corrente: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img non è installata e l'immagine è di tipo %s. È possibile utilizzare " -"solo le immagini RAW se qemu-img non è installata." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img non è installata e il formato del disco non è specificato. È " -"possibile utilizzare solo le immagini RAW se qemu-img non è installata." - -msgid "rados and rbd python libraries not found" -msgstr "le librerie python rados e rbd non sono state trovate" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted può essere solo 'no', 'yes' o 'only', non %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover non riuscito. %s non trovato." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"replication_failover non riuscito. Backend non configurato per il failover" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"ripristino: %(vol_id)s non ha eseguito dsmc a causa di argomenti non validi " -"presenti su %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"ripristino: %(vol_id)s non ha eseguito dsmc su %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"restore: %(vol_id)s non riuscito.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup interrotto, l'elenco di oggetti effettivo non corrisponde " -"all'elenco di oggetti archiviato nei metadati." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"Per rtslib_fb manca il membro %s: potrebbe essere necessario un python-" -"rtslib-fb più recente." - -msgid "san_ip is not set." -msgstr "san_ip non impostato." - -msgid "san_ip must be set" -msgstr "san_ip deve essere impostato" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login e/o san_password non impostati per il driver Datera in cinder." -"conf. Impostare tali informazioni ed avviare il servizio cinder-volume " -"nuovamente." - -msgid "serve() can only be called once" -msgstr "il servizio() può essere chiamato solo una volta" - -#, python-format -msgid "snapshot-%s" -msgstr "istantanea-%s " - -msgid "snapshots assigned" -msgstr "istantanee assegnate" - -msgid "snapshots changed" -msgstr "istantanee modificate" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "ID volume origine:%s non replicato" - -msgid "source-name cannot be empty." -msgstr "source-name non può essere vuoto." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "Il formato di source-name deve essere : 'vmdk_path@vm_inventory_path'." - -#, python-format -msgid "status must be %s and" -msgstr "lo stato deve essere %s e" - -msgid "status must be available" -msgstr "lo stato deve essere available" - -msgid "stop_hypermetro error." -msgstr "Errore stop_hypermetro." - -msgid "sync_hypermetro error." -msgstr "Errore sync_hypermetro." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli non è installato e non è stato possibile creare la directory " -"predefinita (%(default_path)s): %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "" -"terminate_connection: Impossibile acquisire il nome host dal connettore." - -msgid "timeout creating new_volume on destination host" -msgstr "timeout durante la creazione di new_volume nell'host di destinazione" - -msgid "too many body keys" -msgstr "troppe chiavi del corpo" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "smontaggio: %s: non montato" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "smontaggio: %s: la destinazione è occupata" - -msgid "umount: : some other error" -msgstr "smontaggio: : qualche altro errore" - -msgid "umount: : target is busy" -msgstr "smontaggio: : la destinazione è occupata" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: Impossibile trovare l'istantanea denominata %s" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: Impossibile trovare l'id volume %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "argomento non riconosciuto %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "algoritmo di compressione non supportato: %s" - -msgid "valid iqn needed for show_target" -msgstr "iqn valido necessario per show_target" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "vdisk %s non definito." - -msgid "vmemclient python library not found" -msgstr "Libreria python vmemclient non trovata " - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "Il volume %s non è stato trovato in drbdmanage" - -msgid "volume assigned" -msgstr "volume assegnato" - -msgid "volume changed" -msgstr "volume modificato" - -msgid "volume is already attached" -msgstr "il volume è già collegato" - -msgid "volume is not local to this node" -msgstr "Per questo nodo, volume non è locale" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"La dimensione del volume %(volume_size)d è troppo piccola per il ripristino " -"del backup la cui dimensione è %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "la dimensione del volume %d non è valida." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"è necessario fornire volume_type quando si crea un volume in un gruppo di " -"gruppo." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id non può essere None" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"Per creare il gruppo di coerenza %(name)s è necessario fornire volume_types." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "è necessario fornire volume_types per creare un gruppo di coerenza %s." - -msgid "volumes assigned" -msgstr "volumi assegnati" - -msgid "volumes changed" -msgstr "volumi modificati" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s scaduto." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"La proprietà zfssa_manage_policy deve essere impostata su 'strict' o " -"'loose'. Il valore corrente è : %s." diff --git a/cinder/locale/ja/LC_MESSAGES/cinder.po b/cinder/locale/ja/LC_MESSAGES/cinder.po deleted file mode 100644 index 243780333..000000000 --- a/cinder/locale/ja/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,12212 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Ray Akimoto , 2015 -# Ryo Fujita , 2013 -# Tomoyuki KATO , 2013 -# Andreas Jaeger , 2016. #zanata -# Yoshiki Eguchi , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev243\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-14 23:32+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-11 03:43+0000\n" -"Last-Translator: Yoshiki Eguchi \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Japanese\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"CoprHD Exception: %(msg)s\n" -msgstr "" -"\n" -"CoprHD の例外: %(msg)s\n" - -#, python-format -msgid "" -"\n" -"General Exception: %(exec_info)s\n" -msgstr "" -"\n" -"一般的な例外: %(exec_info)s\n" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder バージョン: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "しかし、現在のサイズは %d です" - -#, python-format -msgid " but size is now %d." -msgstr "しかし、現在のサイズは %d です。" - -msgid " or " -msgstr "または" - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s が設定されていません。" - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing がホストに接続したボリュームを管理できません。イン" -"ポート前に既存のホストからこのボリュームの接続を解除してください。" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"結果: %(res)s。" - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(error_msg)s Error description: %(error_description)s" -msgstr "%(error_msg)s エラーの説明: %(error_description)s" - -#, python-format -msgid "%(error_msg)s Error details: %(error_details)s" -msgstr "%(error_msg)s エラー詳細: %(error_details)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: アクセス権が拒否されました。" - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: 予期しない CLI 出力により失敗しました。\n" -"コマンド: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"ステータスコード: %(_status)s\n" -"本体: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s、subjectAltName: %(sanList)s。" - -#, python-format -msgid "%(msg)s And %(num)s services from the cluster were also removed." -msgstr "クラスターの %(msg)s と %(num)s サービスも削除されました。" - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"NetworkPortal の作成に関する %(msg_type)s: 他のサービスが IP %(ip)s 上のポー" -"ト %(port)d を使用していないことを確認してください。" - -#, python-format -msgid "%(name)s cannot be all spaces." -msgstr "%(name)s は全て空白にはできません。" - -#, python-format -msgid "%(new_size)s < current size %(size)s" -msgstr "%(new_size)s < 現在のサイズ %(size)s" - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: バックアップ %(bck_id)s、ボリューム %(vol_id)s が失敗しました。バック" -"アップオブジェクトが予期しないモードです。イメージまたはファイルのバックアッ" -"プがサポートされています。実際のモードは %(vol_mode)s です。" - -#, python-format -msgid "%(reason)s" -msgstr "%(reason)s" - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"%(service)s サービスはストレージアプライアンス %(host)s で %(status)s になっ" -"ていません" - -#, python-format -msgid "" -"%(type)s with id %(id)s is already being cleaned up or another host has " -"taken over it." -msgstr "" -" %(type)s のid %(id)s はすでにクリーンアップされているか、他のホストに引き" -"継がれています。" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s は %(max_value)d 以下である必要があります。" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s は %(min_value)d 以上である必要があります。" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"%(workers)d の %(worker_name)s 値が無効です。0 より大きい値にしなければなりま" -"せん。" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "結果内に %s \"data\" がありません。" - -#, python-format -msgid "%s assigned" -msgstr "割り当てられた %s" - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s にアクセスできません。GPFS がアクティブであること、およびファイルシステム" -"がマウントされていることを確認してください。" - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"%s はブロックを含んでいないため、複製操作を使用してサイズ変更できません。" - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"%s は圧縮ボリューム上でホストされているため、複製操作を使用してサイズ変更する" -"ことはできません" - -#, python-format -msgid "%s changed" -msgstr "変更された %s" - -#, python-format -msgid "%s configuration option is not set." -msgstr "%s の設定オプションが設定されていません。" - -#, python-format -msgid "%s does not exist." -msgstr "%s は存在しません。" - -#, python-format -msgid "%s is not a clone!" -msgstr "%s はクローンではありません。" - -#, python-format -msgid "%s is not a directory." -msgstr "%s はディレクトリーではありません。" - -#, python-format -msgid "%s is not installed" -msgstr "%s がインストールされていません。" - -#, python-format -msgid "%s is not installed." -msgstr "%s がインストールされていません。" - -#, python-format -msgid "%s is not set" -msgstr "%s が設定されていません" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s が設定されていません。これはレプリケーションデバイスを有効にするために必要" -"です。" - -#, python-format -msgid "%s is not set." -msgstr "%s が設定されていません。" - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s は有効な raw または qcow2 イメージでなければなりません。" - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s は絶対パスである必要があります。" - -#, python-format -msgid "%s must be an integer." -msgstr "%s は整数である必要があります。" - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s が cinder.conf に設定されていません" - -#, python-format -msgid "%s not set." -msgstr "%s が設定されていません。" - -#, python-format -msgid "'%(key)s = %(value)s'" -msgstr "'%(key)s = %(value)s'" - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"設定ファイルの flashsystem_connection_protocol で '%(prot)s' は無効です。有効" -"な値は %(enabled)s です。" - -msgid "'active' must be present when writing snap_info." -msgstr "snap_info の書き込み時には 'active' が存在しなければなりません。" - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' を指定する必要があります。" - -msgid "'group_id' must be specified" -msgstr "'group_id' の指定は必須です" - -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info' の解析に失敗しました。" - -msgid "'status' must be specified." -msgstr "'status' を指定する必要があります。" - -msgid "'success' not found" -msgstr "'success' が見つかりません。" - -msgid "'volume_id' must be specified" -msgstr "'volume_id' を指定する必要があります" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" - -msgid "400 Bad Request" -msgstr "413 Request entity too large" - -msgid "401 Unauthorized Error" -msgstr "401 Unauthorized エラー" - -msgid "404 Not Found Error" -msgstr "404 Not Found エラー" - -msgid "413 Request entity too large" -msgstr "413 Request entity too large" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "LUN (HLUN) が見つかりませんでした。(LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "矛盾する可能性のある同時実行リクエストが行われました。 " - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"フリー LUN (HLUN) が見つかりませんでした。異なるホストグループを追加してくだ" -"さい。(LDEV: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"ホストグループを追加できませんでした。(ポート: %(port)s、名前: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"ホストグループを削除できませんでした。(ポート: %(port)s、gid: %(gid)s、名前: " -"%(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "ホストグループが無効です。(ホストグループ: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "ペアを削除できません。(P-VOL: %(pvol)s、S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"ペアを作成できませんでした。ペアの最大数を超過しています。(コピーメソッド: " -"%(copy_method)s、P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "パラメーターが無効です。(%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "パラメーター値が無効です。(%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "プールが見つかりませんでした。(プール ID: %(pool_id)s)" - -msgid "A readonly volume must be attached as readonly." -msgstr "読み取り専用ボリュームは、読み取り専用として接続する必要があります。" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "スナップショットの状態況が無効です。(状況: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"フェイルオーバーを行うために、有効なセカンダリーターゲットを指定する必要があ" -"ります。" - -msgid "A volume ID or share was not specified." -msgstr "ボリューム ID またはシェアが指定されませんでした。" - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "ボリュームの状態が無効です。(状況: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s がエラー文字列 %(err)s で失敗しました。" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"API バージョンの文字列 %(version)s が無効な形式です。MajorNum.MinorNum の形式" -"である必要があります。" - -msgid "API key is missing for CloudByte driver." -msgstr "CloudByte ドライバーの API キーがありません。" - -#, python-format -msgid "API response: %(response)s" -msgstr "API レスポンス: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "API 応答: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "このメソッドでは API バージョン %(version)s はサポートされていません。" - -msgid "API version could not be determined." -msgstr "API バージョンを判別できませんでした。" - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"ゼロでないクォータを持つ子プロジェクトを削除しようとしています。これは実施す" -"べきではありません。" - -msgid "Access forbidden: Authentication required" -msgstr "アクセス不許可: 認証が必要です。" - -msgid "" -"Access forbidden: You don't have sufficient privileges to perform this " -"operation" -msgstr "アクセス不許可: このオペレーションを行うための十分な権限がありません。" - -msgid "Access list not available for public volume types." -msgstr "パブリックボリュームタイプではアクセスリストを使用できません。" - -msgid "Activate or deactivate QoS error." -msgstr "QoS のアクティブ化またはアクティブ化解除のエラー。" - -msgid "Activate snapshot error." -msgstr "スナップショットのアクティブ化のエラー。" - -msgid "Add FC port to host error." -msgstr "ホストへの FC ポート追加のエラー。" - -msgid "Add fc initiator to array error." -msgstr "アレイへの FC イニシエーター追加のエラー。" - -msgid "Add hypermetro to metrogroup error." -msgstr "metrogroup への hypermetro 追加エラー。" - -msgid "Add initiator to array error." -msgstr "アレイへのイニシエーター追加のエラー。" - -msgid "Add lun to cache error." -msgstr "キャッシュへの LUN 追加のエラー。" - -msgid "Add lun to partition error." -msgstr "パーティションへの LUN 追加のエラー。" - -msgid "Add mapping view error." -msgstr "マッピングビュー追加のエラー。" - -msgid "Add new host error." -msgstr "新規ホスト追加のエラー。" - -msgid "Add port to port group error." -msgstr "ポートグループへのポート追加のエラー。" - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"管理対象となる指定されたすべてのストレージプールが存在しません。設定を確認し" -"てください。存在しないプール: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"API バージョンのリクエストは VersionedMethod オブジェクトと比較する必要があり" -"ます。" - -msgid "An error has occurred during backup operation" -msgstr "バックアップ操作中にエラーが発生しました。" - -#, python-format -msgid "An error has occurred in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver でエラーが発生しました。(理由: %(reason)s)" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"LUNcopy 操作中にエラーが発生しました。LUNcopy 名: %(luncopyname)s。LUNcopy 状" -"況: %(luncopystatus)s。LUNcopy 状態: %(luncopystate)s。" - -#, python-format -msgid "An error occurred while attempting to modify Snapshot '%s'." -msgstr "スナップショット '%s' を変更しようとした際にエラーが発生しました。" - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "ボリューム \"%s\" の読み取り中にエラーが発生しました。" - -#, python-format -msgid "An error occurred while seeking for volume \"%s\"." -msgstr "ボリューム \"%s\" のシーク中にエラーが発生しました。" - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "ボリューム \"%s\" への書き込み中にエラーが発生しました。" - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "iSCSI CHAP ユーザーを追加できませんでした。(ユーザー名: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "iSCSI CHAP ユーザーを削除できませんでした。(ユーザー名: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"iSCSI ターゲットを追加できませんでした。(ポート: %(port)s、別名: %(alias)s、" -"理由: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"iSCSI ターゲットを削除できませんでした。(ポート: %(port)s、tno: %(tno)s、別" -"名: %(alias)s)" - -msgid "An unknown error occurred." -msgstr "不明なエラーが発生しました。" - -msgid "An unknown exception occurred." -msgstr "不明な例外が発生しました。" - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"サブプロジェクトに割り当てられたトークンを持つユーザーは、親のクォータを参照" -"することはできません。" - -msgid "Append port group description error." -msgstr "ポートグループの説明追加のエラー。" - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"スイッチへのゾーンおよび cfgs の適用が失敗しました (エラーコード =" -"%(err_code)s エラーメッセージ =%(err_msg)s。" - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "" -"アレイが存在しないかオフラインになっています。現在のアレイの状態は %s です。" - -msgid "Associate host to hostgroup error." -msgstr "ホストグループへのホストの関連付けのエラー。" - -msgid "Associate host to mapping view error." -msgstr "マッピングビューへのホストの関連付けのエラー。" - -msgid "Associate initiator to host error." -msgstr "ホストへのイニシエーターの関連付けのエラー。" - -msgid "Associate lun to QoS error." -msgstr "QoS への LUN の関連付けのエラー。" - -msgid "Associate lun to lungroup error." -msgstr "LUN グループへの LUN の関連付けのエラー。" - -msgid "Associate lungroup to mapping view error." -msgstr "マッピングビューへの LUN グループの関連付けのエラー。" - -msgid "Associate portgroup to mapping view error." -msgstr "マッピングビューへのポートグループの関連付けのエラー。" - -#, python-format -msgid "Async error: Unable to retrieve %(obj)s method %(method)s result" -msgstr "" -"非同期エラー: オブジェクト %(obj)s 、メソッド %(method)s の結果を取得でき" -"ません。" - -msgid "At least one valid iSCSI IP address must be set." -msgstr "有効な iSCSI IP アドレスを 1 つ以上設定する必要があります。" - -#, python-format -msgid "" -"Attach volume (%(name)s) to host (%(hostname)s) initiator " -"(%(initiatorport)s) failed:\n" -"%(err)s" -msgstr "" -"ホスト (%(hostname)s) 、イニシエーター (%(initiatorport)s) へのボリュー" -"ム (%(name)s) の接続が失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Attach volume to host failed in copy volume to image, retcode: %s." -msgstr "" -"ホストへのボリュームの接続が、イメージからのボリュームのコピー中に失敗しまし" -"た。retcode: %s" - -msgid "Attach_volume failed." -msgstr "ボリュームの接続に失敗しました。" - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "無効な認証キーを使用して %s を転送しようとしています。" - -#, python-format -msgid "Attribute: %s not found." -msgstr "属性 %s が見つかりません。" - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "CloudByte のストレージで認証グループ [%s] の詳細が見つかりません。" - -msgid "Auth user details not found in CloudByte storage." -msgstr "CloudByte のストレージで認証ユーザーの詳細が見つかりません。" - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"認証が失敗しました。スイッチのクレデンシャルを検証してください。エラーコード " -"%s。" - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "アベイラビリティーゾーン '%(s_az)s' は無効です。" - -msgid "Available categories:" -msgstr "使用可能カテゴリー:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"バックエンド QoS 仕様はこのストレージファミリーおよび ONTAP バージョンでサ" -"ポートされません。" - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "バックエンドが存在しません(%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "バックエンドレポート: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "バックエンドレポート: 項目は既に存在します" - -msgid "Backend reports: item not found" -msgstr "バックエンドレポート: 項目が見つかりません" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "" -"バックエンドサービスで再試行のタイムアウトが発生する時間: %(timeout)s 秒" - -msgid "Backend storage did not configure fiber channel target." -msgstr "" -"バックエンドストレージによってファイバーチャネルターゲットは設定されませんで" -"した。" - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"使用中のボリュームのバックアップを行う際は、force フラグを使用する必要があり" -"ます。" - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "バックアップ %(backup_id)s が見つかりませんでした。" - -msgid "Backup RBD operation failed" -msgstr "バックアップ RBD 操作が失敗しました。" - -msgid "Backup already exists in database." -msgstr "データベースのバックアップが既に存在しています。" - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "バックアップドライバーがエラーを報告しました: %(message)s" - -msgid "Backup id required" -msgstr "バックアップ ID が必要です" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"スナップショットが含まれる GlusterFS ボリュームのバックアップはサポートされて" -"いません。" - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"バックアップはバッキングファイルのない SOFS ボリュームでのみサポートされま" -"す。" - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"バックアップは、ロー形式の GlusterFS ボリュームに対してのみサポートされます。" - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "" -"バックアップは、ロー形式の SOFS ボリュームに対してのみサポートされます。" - -msgid "Backup operation of an encrypted volume failed." -msgstr "暗号化ボリュームのバックアップ操作が失敗しました。" - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"バックアップサービス %(configured_service)s では検査がサポートされていませ" -"ん。バックアップ ID %(id)s は検査されません。検査をスキップします。" - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"バックアップサービス %(service)s では検査がサポートされていません。バックアッ" -"プ ID %(id)s は検査されません。リセットをスキップします。" - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"バックアップに含まれるスナップショットは 1 つのみでなければなりませんが、%s " -"個含まれています" - -msgid "Backup status must be available" -msgstr "バックアップの状態は「使用可能」でなければなりません。" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "バックアップの状態は %s ではなく「使用可能」でなければなりません。" - -msgid "Backup status must be available or error" -msgstr "バックアップの状態は「使用可能」または「エラー」でなければなりません。" - -msgid "Backup to be restored has invalid size" -msgstr "復元するバックアップのサイズが無効です。" - -#, python-format -msgid "Bad HTTP response status %(status)s" -msgstr "不正な HTTP レスポンスステータス : %(status)s" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "正しくないステータス表示行が返されました: %(arg)s。" - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "無効なキーがクォータセット 内にあります: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"ストレージボリュームバックエンド API からの不正な応答または想定しない応答: " -"%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "不正なプロジェクト形式: プロジェクトの形式が正しくありません (%s)" - -msgid "Bad response from Datera API" -msgstr "Datera API からの正しくない応答" - -msgid "Bad response from SolidFire API" -msgstr "SolidFire API からの正しくない応答" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "XMS からの正しくない応答、%s" - -msgid "Binary" -msgstr "バイナリー" - -msgid "Blank components" -msgstr "空白コンポーネント" - -msgid "Blockbridge api host not configured" -msgstr "Blockbridge API のホストが設定されていません。" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge に無効な認証スキーム '%(auth_scheme)s' が設定されています。" - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge のデフォルトプールが存在しません。" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge のパスワードが設定されていません ('password' の認証スキームに必" -"要)" - -msgid "Blockbridge pools not configured" -msgstr "Blockbridge プールが設定されていません。" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Blockbridge のトークンが設定されていません ('token' の認証スキームに必要)" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge のユーザーが設定されていません ('password' の認証スキームに必要)" - -msgid "Bourne internal server error" -msgstr "Bourne 内部サーバーエラー" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "BrocadeファイバーチャネルゾーニングCLIエラー:%(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "BrocadeファイバーチャネルゾーニングHTTPエラー:%(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "CHAP 秘密は 12 バイトから 16 バイトである必要があります。" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"CLI 例外出力:\n" -"コマンド: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 例外出力:\n" -"コマンド: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s。" - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E VDisk は既にホストにマッピングされているため、VDisk からホストへの" -"マッピングは作成されませんでした。\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "CONCERTO バージョンはサポートされません" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) がアレイ上に存在しません" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "" -"キャッシュ名がありません。キーで smartcache:cachename を設定してください。" - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "キャッシュボリューム %s に必須のプロパティーがありません" - -msgid "Call returned a None object" -msgstr "呼び出しが None オブジェクトを返しました。" - -msgid "Can not add FC port to host." -msgstr "ホストに FC ポートを追加できません。" - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "" -"キャッシュ名 %(name)s によってキャッシュ ID を見つけることができません。" - -#, python-format -msgid "Can not find cinder volume - %(volumeName)s" -msgstr "Cinder ボリュームが見つかりません - %(volumeName)s" - -#, python-format -msgid "Can not find cinder volume - %(volumeName)s." -msgstr "Cinder ボリュームが見つかりません - %(volumeName)s" - -#, python-format -msgid "Can not find cinder volume - %s" -msgstr "Cinder ボリュームが見つかりません - %s" - -#, python-format -msgid "Can not find cinder volume - %s." -msgstr "Cinder ボリュームが見つかりません - %s" - -#, python-format -msgid "Can not find client id. The connection target name is %s." -msgstr "クライアント ID が見つかりません。接続ターゲット名は %s です。" - -#, python-format -msgid "Can not find consistency group: %s." -msgstr "整合性グループが見つかりません: %s" - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "名前 %(name)s によってパーティション ID を見つけることができません。" - -#, python-format -msgid "Can not find this error code:%s." -msgstr "エラーコード %s が見つかりません。" - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "プール情報を取得できません。プール: %s" - -msgid "Can not get target ip address. " -msgstr "ターゲット IP アドレスを取得できません。" - -msgid "" -"Can not set tiering policy for a deduplicated volume. Set the tiering policy " -"on the pool where the deduplicated volume locates." -msgstr "" -"重複排除されたボリュームには階層化ポリシーを設定できません。重複排除されたボ" -"リュームのあるプールに対して階層化ポリシーを設定します。" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "%s を整数に変換できません。" - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "'scality_sofs_config' にアクセスできません: %s" - -msgid "Can't decode backup record." -msgstr "バックアップレコードを復号化できません。" - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "レプリケーションボリュームを拡張できません。ボリューム: %(id)s" - -#, python-format -msgid "" -"Can't find HNAS configurations on cinder.conf neither on the path %(xml)s." -msgstr "" -"パス %(xml)s 上のいずれの cinder.conf にも、 HNAS 設定が見つかりません。" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"アレイで LUN を見つけることができません。source-name または source-id を確認" -"してください。" - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "" -"アレイでキャッシュ名を見つけることができません。キャッシュ名は %(name)s で" -"す。" - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"アレイで LUN 情報が見つかりません。ボリューム: %(id)s、LUN 名: %(name)s。" - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"アレイでパーティション名を見つけることができません。パーティション名は " -"%(name)s です。" - -#, python-format -msgid "Can't find service: %s" -msgstr "サービスが見つかりません: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"アレイでスナップショットを見つけることができません。source-name または " -"source-id を確認してください。" - -msgid "Can't find the same host id from arrays." -msgstr "アレイから同一のホスト ID が見つかりません。" - -msgid "Can't find valid IP from rest, please check it on storage." -msgstr "" -"rest で有効な IP を見つけることができません。ストーレジ上で確認を行ってくださ" -"い。" - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "" -"スナップショットからボリューム ID を取得できません。スナップショット: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "ボリューム ID を取得できません。ボリューム名: %s。" - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"Cinder に LUN %(lun_id)s をインポートできません。LUN タイプが一致しません。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"Cinder に LUN %s をインポートできません。すでに HyperMetroPair 内に存在しま" -"す。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"Cinder に LUN %s をインポートできません。すでに LUN コピータスク内に存在しま" -"す。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "" -"Cinder に LUN %s をインポートできません。すでに LUN グループ内に存在します。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"Cinder に LUN %s をインポートできません。すでに LUN ミラー内に存在します。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "" -"Cinder に LUN %s をインポートできません。既に SplitMirror 内に存在します。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"Cinder に LUN %s をインポートできません。すでにマイグレーションタスク内に存在" -"します。" - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"Cinder に LUN %s をインポートできません。すでにリモートのレプリケーションタス" -"ク内に存在します。" - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"Cinder にスナップショット%s をインポートできません。LUN 状態が正常ではありま" -"せん。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Cinder にスナップショット%s をインポートできません。スナップショットはボ" -"リュームに属していません。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Cinder にスナップショット%s をインポートできません。スナップショットはイニシ" -"エーターに公開されています。" - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Cinder にスナップショット %s をインポートできません。スナップショットの状態が" -"正常ではないか、実行状態がオンラインではありません。" - -msgid "Can't parse backup record." -msgstr "バックアップレコードを解析できません。" - -#, python-format -msgid "Can't support cache on the array, cache name is: %(name)s." -msgstr "アレイでキャッシュをサポートできません。キャッシュ名は %(name)s です。" - -#, python-format -msgid "Can't support partition on the array, partition name is: %(name)s." -msgstr "" -"アレイでパーティションがサポートできません、パーティション名は %(name)s で" -"す。" - -msgid "Can't support qos on the array" -msgstr "アレイで QoS がサポートできません。" - -msgid "Can't support qos on the array." -msgstr "アレイで QoS がサポートできません。" - -msgid "Can't support tier on the array." -msgstr "アレイでティアがサポートできません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"ボリューム %(volume_id)s にはボリュームタイプがないため、このボリュームを整合" -"性グループ %(group_id)s に追加できません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"ボリューム %(volume_id)s が既に整合性グループ %(orig_group)s 内に存在するた" -"め、このボリュームを整合性グループ %(group_id)s に追加することはできません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"ボリューム %(volume_id)s は見つからないため、整合性グループ %(group_id)s に追" -"加できません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"ボリューム %(volume_id)s は存在しないため、整合性グループ %(group_id)s に追加" -"できません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、整合性グルー" -"プ%(group_id)s に追加できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"ボリュームタイプ %(volume_type)s は整合性グループ %(group_id)s ではサポートさ" -"れていないため、ボリューム %(volume_id)s をこの整合性グループに追加できませ" -"ん。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because it has no " -"volume type." -msgstr "" -"ボリューム %(volume_id)s にはボリュームタイプがないため、このボリュームをグ" -"ループ %(group_id)s に追加できません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because it is already " -"in group %(orig_group)s." -msgstr "" -"ボリューム %(volume_id)s が既にグループ %(orig_group)s 内に存在するため、この" -"ボリュームをグループ %(group_id)s に追加することはできません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume cannot " -"be found." -msgstr "" -"ボリューム %(volume_id)s は見つからないため、グループ %(group_id)s に追加でき" -"ません。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume does " -"not exist." -msgstr "" -"ボリューム %(volume_id)s は存在しないため、グループ %(group_id)s に追加できま" -"せん。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume is in " -"an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グルー" -"プ%(group_id)s に追加できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume type " -"%(volume_type)s is not supported by the group." -msgstr "" -"ボリュームタイプ %(volume_type)s はグループ %(group_id)s ではサポートされてい" -"ないため、ボリューム %(volume_id)s をこのグループに追加できません。" - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"すでに接続されているボリューム%s を接続できません。マルチ接続は " -"'netapp_enable_multiattach' 設定オプションにより無効になっています。" - -msgid "Cannot change VF context in the session." -msgstr "VF コンテキストをセッション内で変更できません。" - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"VF コンテキストを変更できません。指定された VF は管理可能な VF リスト " -"%(vf_list)s で使用可能ではありません。" - -msgid "Cannot connect to ECOM server." -msgstr "ECOM サーバーに接続できません。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"スナップショット %(snap)s は有効な状態ではないため、整合性グループ%(group)s " -"を作成できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"ソースボリューム %(source_vol)s が有効な状態にないため、整合性グループ " -"%(group)s を作成できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "Cannot create directory %s." -msgstr "ディレクトリー %s を作成できません。" - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "暗号化仕様を作成できません。ボリュームタイプは使用中です。" - -#, python-format -msgid "" -"Cannot create group %(group)s because snapshot %(snap)s is not in a valid " -"state. Valid states are: %(valid)s." -msgstr "" -"スナップショット %(snap)s は有効な状態ではないため、グループ%(group)s を作成" -"できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "" -"Cannot create group %(group)s because source volume %(source_vol)s is not in " -"a valid state. Valid states are: %(valid)s." -msgstr "" -"ソースボリューム %(source_vol)s が有効な状態にないため、グループ %(group)s を" -"作成できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "Cannot create group_type with name %(name)s and specs %(group_specs)s" -msgstr "" -"名前 %(name)s および仕様 %(group_specs)s を使用して group_type を作成できませ" -"ん。" - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"ディスク形式 %s のイメージを作成できません。vmdk ディスク形式のみが受け入れら" -"れます。" - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "マスキングビュー %(maskingViewName)s を作成できません。" - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -" 'netapp_enable_multiattach' が true に設定されている場合、%(req)s 以上のボ" -"リュームを ESeries アレイに作成できません。" - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"名前が %(sgGroupName)s のストレージグループを作成または検出できません。" - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "サイズが %s のボリュームを作成できません: 8GB の倍数ではありません。" - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"名前 %(name)s および仕様 %(extra_specs)s を使用して volume_type を作成できま" -"せん。" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "スナップショットが存在する間は、LUN %s は削除できません。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"キャッシュボリューム: %(cachevol_name)s を削除できません。%(updated_at)s に更" -"新されたこのキャッシュボリュームには現在 %(numclones)d のボリュームインスタン" -"スがあります。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"キャッシュボリューム: %(cachevol_name)s を削除できません。%(updated_at)s に更" -"新されたこのキャッシュボリュームには 現在 %(numclones)s のボリュームインスタ" -"ンスがあります。" - -#, python-format -msgid "" -"Cannot delete consistency group %(id)s. %(reason)s, and it cannot be the " -"source for an ongoing CG or CG Snapshot creation." -msgstr "" -"整合性グループ %(id)s を削除できません。理由 : %(reason)s 。 これを進行中の " -"CG または CG スナップショットのソースとすることはできません。" - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "暗号化仕様を削除できません。ボリュームタイプは使用中です。" - -msgid "Cannot determine storage pool settings." -msgstr "ストレージプールの設定を決定できません。" - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "/sbin/mount.sofs を実行できません。" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "CG グループ %s が見つかりません。" - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"ストレージシステム %(storage_system)s のコントローラー構成サービスが見つかり" -"ません。" - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"スナップショット %s のボリュームを作成するための複製サービスが見つかりませ" -"ん。" - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"スナップショット %s を削除するレプリケーションサービスが見つかりません。" - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "複製サービスがシステム %s に見つかりません。" - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"ボリュームが見つかりません: %(id)s。処理の管理を解除します。処理を終了しま" -"す。" - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "ボリューム: %(volumename)s が見つかりません。拡張操作。終了中..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "ボリューム %(volumeName)s の装置番号が見つかりません。" - -msgid "Cannot find migration task." -msgstr "マイグレーションタスクを見つけることができません。" - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "システム %s でレプリケーションサービスが見つかりません。" - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "ソース CG のインスタンスが見つかりません。consistencygroup_id: %s。" - -#, python-format -msgid "Cannot get iSCSI ipaddresses or multipath flag. Exception is %(ex)s. " -msgstr "iSCSI IP アドレスか、マルチパスフラグを取得できません。例外: %(ex)s" - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "チャンネル ID %(channel_id)s によって mcs_id を取得できません。" - -msgid "Cannot get necessary pool or storage system information." -msgstr "必要なプールまたはストレージシステムの情報を取得できません。" - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"ボリューム %(volumeName)s のストレージグループ %(sgGroupName)s を取得または作" -"成できません " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "イニシエーターグループ %(igGroupName)s を取得または作成できません。" - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "ポートグループ %(pgGroupName)s を取得できません。" - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"ストレージグループ %(sgGroupName)s をマスキングビュー " -"%(maskingViewInstanceName)s から取得できません。" - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"%(sps)s でサポートされるサイズ範囲を取得できません。戻りコード: %(rc)lu。エ" -"ラー: %(error)s。" - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"FAST ポリシー %(fastPolicyName)s のデフォルトのストレージグループを取得できま" -"せん。" - -msgid "Cannot get the portgroup from the masking view." -msgstr "マスキングビューからポートグループを取得できません。" - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "" -"Scality SOFS をマウントできません。syslog でエラーについて確認してください。" - -msgid "Cannot ping DRBDmanage backend" -msgstr "DRBDmanage のバックエンドに ping を送信できません。" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "ボリューム %(id)s をホスト %(host)s 上に配置できません" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"ソースから整合性グループ %(name)s を作成するために、'cgsnapshot_id' または " -"'source_cgid' の両方を提供することができません。" - -#, python-format -msgid "" -"Cannot provide both 'group_snapshot_id' and 'source_group_id' to create " -"group %(name)s from source." -msgstr "" -"ソースからグループ %(name)s を作成するために、 'group_snapshot_id' または " -"'source_group_id' の両方を提供することができません。" - -msgid "Cannot register resource" -msgstr "リソースを登録できません。" - -msgid "Cannot register resources" -msgstr "リソースを登録できません。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"ボリューム %(volume_id)s は整合性グループ %(group_id)s にないため、このグルー" -"プから削除できません。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、整合性グルー" -"プ%(group_id)s から削除できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from group %(group_id)s because it is not " -"in the group." -msgstr "" -"ボリューム %(volume_id)s はグループ %(group_id)s 内に存在しないため、このグ" -"ループから削除できません。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from group %(group_id)s because volume is " -"in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グループ " -"%(group_id)s から削除できません。有効な状態は %(valid)s です。" - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "HPE3PARDriver から %s にタイプ変更することはできません。" - -msgid "Cannot retype from one 3PAR array to another." -msgstr "3PAR アレイから別のアレイにタイプ変更することはできません。" - -msgid "Cannot retype to a CPG in a different domain." -msgstr "別のドメインの CPG にタイプ変更できません。" - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "別のドメインのスナップ CPG にタイプ変更できません。" - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"vgc-cluster コマンドを実行できません。ソフトウェアが実装済みで、権限が適切に" -"設定されていることを確認してください。" - -msgid "Cannot save group_snapshots changes in group object update." -msgstr "" -"グループオブジェクトの更新で、 group_snapshots の変更を保存できません。" - -msgid "Cannot save volume_types changes in group object update." -msgstr "グループオブジェクトの更新で、 volume_types の変更を保存できません。" - -msgid "Cannot save volumes changes in group object update." -msgstr "グループオブジェクトの更新で、 ボリュームの変更を保存できません。" - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"hitachi_serial_number と hitachi_unit_name の両方を設定することはできません。" - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "保護ドメイン名と保護ドメイン ID の両方を指定することはできません。" - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"ストレージプール名とストレージプール ID の両方を指定することはできません。" - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"有効な名前、説明、add_volumes、または remove_volumes が指定されなかったため、" -"整合性グループ %(group_id)s を更新できません。" - -#, python-format -msgid "" -"Cannot update consistency group %s, status must be available, and it cannot " -"be the source for an ongoing CG or CG Snapshot creation." -msgstr "" -"整合性グループ %s を更新できません。状態は「使用可能」である必要があります。 " -"これを進行中の CG または CG スナップショットのソースとすることはできません。" - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "暗号化仕様を更新できません。ボリュームタイプは使用中です。" - -#, python-format -msgid "" -"Cannot update group %(group_id)s because no valid name, description, " -"add_volumes, or remove_volumes were provided." -msgstr "" -"有効な名前、説明、add_volumes 、または remove_volumes が指定されなかったた" -"め、グループ %(group_id)s を更新できません。" - -#, python-format -msgid "Cannot update group_type %(id)s" -msgstr "group_type %(id)s を更新できません。" - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "volume_type %(id)s を更新できません。" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "オブジェクト %(instanceName)s の存在を確認できません。" - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "CgSnapshot %(cgsnapshot_id)s が見つかりませんでした。" - -msgid "" -"CgSnapshot status must be available or error, and no CG can be currently " -"using it as source for its creation." -msgstr "" -"cgsnapshot の状態は「使用可能」または「エラー」でなければなりません。またこれ" -"をソースとして用いて CG を作成することは現在できません。" - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost が空です。整合性グループは作成されません。" - -msgid "Change hostlun id error." -msgstr "hostlun ID 変更のエラー。" - -msgid "Change lun priority error." -msgstr "LUN 優先順位変更のエラー。" - -msgid "Change lun smarttier policy error." -msgstr "LUN smarttier ポリシー変更のエラー。" - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "変更によって、次のリソースの使用量が 0 未満になります: %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"このドライバーに割り当てられている ZFS 共有のアクセス権を確認してください。" - -msgid "Check hostgroup associate error." -msgstr "ホストグループ関連付けの確認のエラー。" - -msgid "Check initiator added to array error." -msgstr "アレイに追加されたイニシエーターの確認のエラー。" - -msgid "Check initiator associated to host error." -msgstr "ホストに関連付けられたイニシエーターの確認のエラー。" - -msgid "Check lungroup associate error." -msgstr "LUN グループ関連付けの確認のエラー。" - -msgid "Check portgroup associate error." -msgstr "ポートグループ関連付けの確認のエラー。" - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"HTTP サービスの状態を確認してください。また、HTTPS ポート番号が cinder.conf " -"に指定されている番号と同じであることも確認してください。" - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"チャンクサイズが、ハッシュを作成するためのブロックサイズの倍数ではありませ" -"ん。" - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "CiscoファイバーチャネルゾーニングCLIエラー:%(reason)s" - -#, python-format -msgid "Client with ip %s wasn't found " -msgstr "IP %s のクライアントが見つかりませんでした。" - -msgid "" -"Clone can't be taken individually on a volume that is part of a Consistency " -"Group" -msgstr "" -"整合性グループの一部のボリュームのクローンを個別に取得することはできません。" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "%(storageSystem)s では複製フィーチャーはライセンス交付されていません。" - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"クローンタイプ '%(clone_type)s' は無効です。有効な値は '%(full_clone)s' およ" -"び '%(linked_clone)s' です。" - -#, python-format -msgid "Clone volume %(new_volume_name)s failed, volume status is: %(status)s." -msgstr "" -"ボリューム %(new_volume_name)s のクローンが失敗しました。ボリュームのステータ" -"ス: %(status)s" - -#, python-format -msgid "Clone volume %s failed while waiting for success." -msgstr "ボリューム %s のクローンが、成功を待っている間に失敗しました。" - -msgid "Cluster" -msgstr "クラスター" - -#, python-format -msgid "Cluster %(id)s could not be found." -msgstr "クラスター %(id)s が見つかりませんでした。" - -#, python-format -msgid "Cluster %(id)s still has hosts." -msgstr "クラスター %(id)s はまだホストを持っています。" - -#, python-format -msgid "Cluster %(name)s already exists." -msgstr "クラスター %(name)s は既に存在します。" - -#, python-format -msgid "Cluster %s successfully removed." -msgstr "クラスター %s は正常に削除されました。" - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"クラスターの形式が正しく設定されていません。ドッグクラスター形式を実行する必" -"要があるかもしれません。" - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Coho Data Cinder ドライバーの失敗: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "Coho の rpc ポートが設定されていません。" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "CLI でブロックされたコマンド %(cmd)s を取り消しました。" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: %s タイムアウト。" - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"圧縮イネーブラーがインストールされていません。圧縮されたボリュームを作成でき" -"ません。" - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "コンピュートクラスター: %(cluster)s が見つかりません。" - -msgid "Condition has no field." -msgstr "条件にフィールドがありません。" - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"設定 'max_over_subscription_ratio' は無効です。0 より大きくなければなりませ" -"ん: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "設定エラー: dell_sc_ssn not が設定されていません。" - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "構成ファイル %(configurationFile)s が存在しません。" - -msgid "Configuration is not found." -msgstr "設定が見つかりません。" - -#, python-format -msgid "Configuration value %s is not set." -msgstr "構成値 %s が設定されていません。" - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"ボリューム種別 %s に競合する QoS 仕様があります。QoS 仕様がボリューム種別に関" -"連付けられている場合、レガシーの \"netapp:qos_policy_group\" はボリューム種別" -"の追加仕様で許可されません。" - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Glance との接続に失敗しました: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Swift との接続に失敗しました: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "コネクターが %s を提供しません。" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "コネクターは必要な情報を持っていません: %(missing)s" - -#, python-format -msgid "" -"Consistency Group %(cg_uri)s: update failed\n" -"%(err)s" -msgstr "" -"整合性グループ %(cg_uri)s: 更新に失敗しました。\n" -"%(err)s" - -#, python-format -msgid "" -"Consistency Group %(name)s: create failed\n" -"%(err)s" -msgstr "" -"整合性グループ %(name)s: 作成に失敗しました。\n" -"%(err)s" - -#, python-format -msgid "" -"Consistency Group %(name)s: delete failed\n" -"%(err)s" -msgstr "" -"整合性グループ %(name)s: 削除に失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Consistency Group %s not found" -msgstr "整合性グループ %s が見つかりません。" - -#, python-format -msgid "Consistency Group %s: not found" -msgstr "整合性グループ %s: 見つかりませんでした。" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "整合性グループが空です。cgsnapshot は作成されません。" - -msgid "" -"Consistency group must not have attached volumes, volumes with snapshots, or " -"dependent cgsnapshots" -msgstr "" -"整合性グループは、接続されたボリューム、スナップショットが含まれるボリュー" -"ム、従属 cgsnapshot を持つことができません。" - -msgid "" -"Consistency group status must be available or error and must not have " -"volumes or dependent cgsnapshots" -msgstr "" -"整合性グループの状態は「使用可能」または「エラー」でなければならず、ボリュー" -"ムや従属 cgsnapshot を持つことができません。" - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "ConsistencyGroup %(consistencygroup_id)s が見つかりませんでした。" - -msgid "Container" -msgstr "コンテナー" - -msgid "Container size smaller than required file size." -msgstr "コンテナーサイズが必要なファイルサイズを下回っています。" - -msgid "Content type not supported." -msgstr "コンテンツタイプはサポートされていません。" - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "コントローラー構成サービスが %(storageSystemName)s に見つかりません。" - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "コントローラー IP「%(host)s」を解決できませんでした: %(e)s。" - -msgid "Controller IP is missing for ZTE driver." -msgstr "ZTE ドライバーのコントローラー IP がありません。" - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "%(f1)s に変換されましたが、現在の形式は %(f2)s です" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "%(vol_format)s に変換されましたが、現在の形式は %(file_format)s です" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "ローに変換されましたが、現在の形式は %s です" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "ローに変換されましたが、現在の形式は %s です。" - -msgid "Coordinator uninitialized." -msgstr "初期化されていないコーディネーター。" - -#, python-format -msgid "CoprHD internal server error. Error details: %s" -msgstr "CoprHD 内部サーバーエラー。エラー詳細: %s" - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"ボリュームのコピータスクが失敗しました: convert_to_base_volume: id=%(id)s、" -"status=%(status)s。" - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"ボリュームタスクのコピーが失敗しました: create_cloned_volume id=%(id)s、状態 " -"=%(status)s。" - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "" -"%(src_type)s %(src_id)s から %(vol_id)s にメタデータをコピーしています。" - -#, python-format -msgid "Could not complete failover: %s" -msgstr "フェイルオーバーが完了しませんでした: %s" - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" -"には、サービスカタログまたは cinder.conf config のオプションである " -"'backup_swift_auth_url' を使用します。" - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" -"には、サービスカタログまたは cinder.conf config のオプションである " -"'backup_swift_url' を使用します。" - -msgid "Could not find DISCO wsdl file." -msgstr "DISCO の wsdl ファイルが見つかりませんでした。" - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "GPFS クラスター ID が見つかりませんでした: %s。" - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "GPFS ファイルシステムデバイスが見つかりませんでした: %s。" - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "%(path)s で config が見つかりませんでした。" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "ボリューム %s の iSCSI エクスポートが見つかりませんでした。" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "" -"ボリューム: %(volume_id)s の iSCSI ターゲットを見つけることができませんでし" -"た。" - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"コマンド %(cmd)s: %(out)sの出力でキーを見つけることができませんでした。" - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "パラメーター %(param)s が見つかりませんでした。" - -#, python-format -msgid "Could not find target %s" -msgstr "ターゲット %s が見つかりませんでした。" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "" -"スナップショット '%s' の親ボリュームをアレイで見つけることができませんでし" -"た。" - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "" -"ボリューム %(vol)s で一意のスナップショット %(snap)s を見つけることができませ" -"んでした。" - -#, python-format -msgid "Could not find unique volume %(vol)s." -msgstr "ユニークボリューム %(vol)s が見つかりません。" - -msgid "Could not get system name." -msgstr "システム名を取得できませんでした。" - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" -"paste アプリケーション '%(name)s' を %(path)s からロードできませんでした。" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"スナップショット %(name)s の情報を読み取ることができませんでした。コード: " -"%(code)s。理由: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "設定ファイル %(file_path)s をリストアできませんでした: %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "設定を %(file_path)s に保存できませんでした: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "整合性グループのスナップショット %s を開始できませんでした。" - -#, python-format -msgid "Couldn't find ORM model for Persistent Versioned Object %s." -msgstr "" -"バージョンが設定された永続オブジェクト %s 用のORMモデルが見つかりません。" - -#, python-format -msgid "Couldn't remove cluster %s because it doesn't exist." -msgstr "クラスター %s は存在しないため削除できませんでした。" - -#, python-format -msgid "Couldn't remove cluster %s because it still has hosts." -msgstr "クラスター %s はまだホストを持っているため削除できませんでした。" - -#, python-format -msgid "Counter %s not found" -msgstr "カウンター %s が見つかりません" - -msgid "Create QoS policy error." -msgstr "QoS ポリシー作成のエラー。" - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップの作成が中止しました。予期していたバックアップの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップの作成が中止しました。予期していたボリュームの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -msgid "Create export for volume failed." -msgstr "ボリュームのエクスポートの作成に失敗しました。" - -msgid "Create group failed." -msgstr "グループの作成に失敗しました。" - -msgid "Create hostgroup error." -msgstr "ホストグループ作成のエラー。" - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "hypermetro 作成のエラー。%s。" - -msgid "Create lun error." -msgstr "LUN 作成のエラー。" - -msgid "Create lun migration error." -msgstr "LUN マイグレーション作成のエラー。" - -msgid "Create luncopy error." -msgstr "LUN コピー作成のエラー。" - -msgid "Create lungroup error." -msgstr "LUN グループ作成のエラー。" - -msgid "Create manager volume flow failed." -msgstr "マネージャーボリュームフローの作成が失敗しました。" - -msgid "Create port group error." -msgstr "ポートグループ作成のエラー。" - -msgid "Create replication error." -msgstr "レプリケーション作成のエラー。" - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "レプリケーションペアの作成が失敗しました。エラー: %s。" - -msgid "Create snapshot error." -msgstr "スナップショット作成のエラー。" - -#, python-format -msgid "Create volume error. Because %s." -msgstr "ボリューム作成のエラー。理由 %s。" - -msgid "Create volume failed." -msgstr "ボリュームの作成に失敗しました。" - -#, python-format -msgid "Create volume failed. Volume name: %(name)s. Return code: %(ret)s." -msgstr "" -"ボリュームの作成に失敗しました。ボリューム名: %(name)s 戻りコード: %(ret)s" - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "ソースからの整合性グループの作成は現在サポートされません。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"ゾーンセットの作成およびアクティブ化に失敗しました: (Zone set=%(cfg_name)s " -"error=%(err)s)。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"ゾーンセットの作成およびアクティブ化に失敗しました: (Zone set=%(zoneset)s " -"error=%(err)s)。" - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "%(begin_period)s から %(end_period)s までの使用状況を作成中" - -msgid "" -"Credentials configuration parameters missing: you need to set hnas_password " -"or hnas_ssh_private_key in the cinder.conf." -msgstr "" -"認証設定パラメーターがありません: hnas_password または hnas_ssh_private_key " -"を cinder.conf に設定する必要があります。" - -msgid "Current host isn't part of HGST domain." -msgstr "現在のホストが HGST ドメインに含まれません。" - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"現在のホストは、タイプ %(type)s のボリューム %(id)s に対して無効です。マイグ" -"レーションは許可されません" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"現在、ボリューム %(vol)s のマップ済みホストは、サポート対象ではない " -"%(group)s のホストグループ内にあります。" - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"DRBDmanage ドライバーのエラー: 予期されたキー \"%s\" が答えに含まれていませ" -"ん。DRBDmanage のバージョンが間違っている可能性があります。" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage ドライバーの設定エラー: 必要なライブラリー (dbus や drbdmanage.* " -"など) が見つかりません。" - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "" -"DRBDmanage が 1 つのリソース (\"%(res)s\") を予期しましたが、%(n)d が得られま" -"した。" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"DRBDmanage でスナップショットのリストア後新規のボリュームの待機のタイムアウト" -"が発生しました。リソース \"%(res)s\"、ボリューム \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"DRBDmanage でスナップショットの作成のタイムアウトが発生しました。リソース " -"\"%(res)s\"、スナップショット \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"DRBDmanage でボリューム作成の待機のタイムアウトが発生しました。リソース " -"\"%(res)s\"、ボリューム \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"DRBDmanage でボリュームサイズの待機のタイムアウトが発生しました。ボリューム " -"ID \"%(id)s\" (res \"%(res)s\"、vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "データ ONTAP API バージョンを判別できませんでした。" - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"7-Mode で動作する Data ONTAP は QoS ポリシーグループをサポートしません。" - -msgid "Database schema downgrade is not allowed." -msgstr "データベーススキーマのダウングレードはできません。" - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "データセット %s は Nexenta Store アプライアンスで共有されません" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "データセットグループ %s が Nexenta SA で見つかりません" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup は有効なプロビジョニングタイプですが、WSAPI バージョン" -"「%(dedup_version)s」バージョン「%(version)s」がインストールされていることを" -"必要としています。" - -msgid "Dedup luns cannot be extended" -msgstr "Dedup luns は拡張できません" - -msgid "" -"Deduplication Enabler is not installed. Can not create deduplicated volume." -msgstr "" -"重複排除イネーブラーがインストールされていません。重複排除されたボリュームを" -"作成できません。" - -msgid "Default group type can not be found." -msgstr "デフォルトのグループ種別が見つかりません。" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"リソース %(res)s のデフォルトのクォータは、デフォルトのクォータフラグ: quota_" -"%(res)s によって設定されていますが、これは現在推奨されていません。デフォルト" -"のクォータにデフォルトのクォータクラスを使用してください。" - -msgid "Default volume type can not be found." -msgstr "デフォルトのボリュームタイプが見つかりません。" - -msgid "Delete LUNcopy error." -msgstr "LUN コピー削除のエラー。" - -msgid "Delete QoS policy error." -msgstr "QoS ポリシー削除のエラー。" - -msgid "Delete associated lun from lungroup error." -msgstr "LUN グループからの関連付けされた LUN 削除のエラー。" - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"バックアップの削除が中止しました。現在構成されているバックアップサービス " -"[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" -"サービス [%(backup_service)s] ではありません。" - -msgid "Delete consistency group failed." -msgstr "整合性グループの削除に失敗しました。" - -msgid "Delete group failed." -msgstr "グループの削除に失敗しました。" - -msgid "Delete hostgroup error." -msgstr "ホストグループ削除のエラー。" - -msgid "Delete hostgroup from mapping view error." -msgstr "マッピングビューからのホストグループ削除のエラー。" - -msgid "Delete hypermetro from metrogroup error." -msgstr "metrogroup からの hypermetro 削除エラー。" - -msgid "Delete hypermetro group error." -msgstr "hypermetro グループ削除エラー。" - -msgid "Delete lun error." -msgstr "LUN 削除のエラー。" - -msgid "Delete lun migration error." -msgstr "LUN マイグレーション削除のエラー。" - -msgid "Delete lungroup error." -msgstr "LUN グループ削除のエラー。" - -msgid "Delete lungroup from mapping view error." -msgstr "マッピングビューからの LUN グループ削除のエラー。" - -msgid "Delete mapping view error." -msgstr "マッピングビュー削除のエラー。" - -msgid "Delete port group error." -msgstr "ポートグループ削除のエラー。" - -msgid "Delete portgroup from mapping view error." -msgstr "マッピングビューからのポートグループ削除のエラー。" - -msgid "Delete snapshot error." -msgstr "スナップショット削除のエラー。" - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "状態 %s でのボリュームのスナップショット削除はサポートされていません。" - -#, python-format -msgid "Delete volume failed. Clone name: %(name)s. Return code: %(ret)s." -msgstr "" -"ボリュームの削除に失敗しました。クローン名: %(name)s 戻りコード: %(ret)s" - -#, python-format -msgid "Delete volume failed. Volume name: %(name)s.Return code: %(ret)s." -msgstr "" -"ボリュームの削除に失敗しました。ボリューム名: %(name)s 戻りコード: %(ret)s" - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップの削除が中止しました。予期していたバックアップの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -msgid "Deleting volume from database and skipping rpc." -msgstr "データベースからボリュームを作成中。rpc をスキップします。" - -#, python-format -msgid "Deleting volume metadata is not allowed for volumes in %s status." -msgstr "" -"ボリュームの状態が %s である場合は、ボリュームメタデータの削除は許可されませ" -"ん。" - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "ゾーンの削除に失敗しました: (command=%(cmd)s error=%(err)s)。" - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "整合性グループをサポートするには Dell API 2.1 以降が必要です" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"直接接続では Dell Cinder ドライバーの設定エラーの複製を行うことはできません。" - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Dell Cinder ドライバーの設定エラー の replication_device %s が見つかりませ" -"ん。" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource は管理者専用の機能です" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "" -"宛先の migration_status は %(stat)s ですが、予期されたのは %(exp)s です。" - -msgid "Destination volume not mid-migration." -msgstr "宛先ボリュームはマイグレーション中ではありません" - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"ボリュームの切り離しが失敗しました: 2 つ以上の接続が存在するものの、" -"attachment_id が提供されていません。" - -msgid "Detach volume from instance and then try again." -msgstr "ボリュームをインスタンスから切り離して、再試行してください。" - -#, python-format -msgid "Detaching volume %(volumename)s from host %(hostname)s failed: %(err)s" -msgstr "" -"ホスト %(hostname)s からの ボリューム%(volumename)s の切断に失敗しました : " -"%(err)s" - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "名前 %(vol_name)s を持つ複数のボリュームが検出されました。" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "予期された列が %(fun)s で見つかりませんでした: %(hdr)s" - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "予期されるキー %(key)s が %(fun)s: %(raw)s で見つかりませんでした。" - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "「無効理由」に無効な文字が含まれているか、理由が長すぎます" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "名前が %s のドメインが見つかりませんでした。" - -msgid "Down Hosts" -msgstr "停止中のホスト" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"ダウンレベル GPFS クラスターが検出されました。クラスターデーモンレベル " -"%(cur)s で GPFS 複製フィーチャーが有効になっていません。レベル %(min)s 以上は" -"必要です。" - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "ドライバーの初期化接続に失敗しました (エラー: %(err)s)。" - -msgid "Driver must implement initialize_connection" -msgstr "ドライバーは initialize_connection を実装する必要があります。" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"ドライバーがインポートされたバックアップデータを正常に復号化しましたが、欠け" -"ているフィールド (%s) があります。" - -#, python-format -msgid "Dsware Create Snapshot failed! Result: %s." -msgstr "Dsware でのスナップショットの作成に失敗しました。結果: %s" - -msgid "Dsware clone volume failed: volume can not be found from Dsware." -msgstr "" -"Dsware でのボリュームのクローンが失敗しました: ボリュームが Dsware 内に見つか" -"りません。" - -#, python-format -msgid "" -"Dsware clone volume time out. Volume: %(new_volume_name)s, status: %(status)s" -msgstr "" -"Dsware での ボリューム %(new_volume_name)s のクローンがタイムアウトしました。" -"ステータス: %(status)s" - -msgid "Dsware config file not exists!" -msgstr "Dsware 設定ファイルが見つかりません。" - -#, python-format -msgid "Dsware create volume failed! Result is: %s." -msgstr "Dsware でボリュームの作成に失敗しました。 結果は %s です。" - -#, python-format -msgid "Dsware delete volume failed: %s!" -msgstr "Dsware でのボリュームの削除に失敗しました: %s" - -#, python-format -msgid "Dsware detach volume from host failed: %s!" -msgstr "Dsware でホストからのボリュームの切り離しに失敗しました: %s " - -#, python-format -msgid "" -"Dsware extend Volume failed! New size %(new_size)s should be greater than " -"old size %(old_size)s!" -msgstr "" -"Dsware で、ボリュームの拡張に失敗しました。変更後のサイズ %(new_size)s は現在" -"のサイズ %(old_size)s より大きくなければいけません。" - -#, python-format -msgid "Dsware extend Volume failed! Result:%s." -msgstr "Dsware でボリュームの拡張に失敗しました。 結果: %s " - -#, python-format -msgid "Dsware fails to start cloning volume %s." -msgstr "Dsware がボリューム %s のクローニングの開始に失敗しました。" - -msgid "Dsware get manager ip failed, volume provider_id is None!" -msgstr "" -"Dsware が管理 ip の取得に失敗しました。 volume provider_id が None です。" - -msgid "Dsware get snapshot failed!" -msgstr "Dsware でのスナップショットの取得に失敗しました。" - -msgid "Dsware query Dsware version failed!" -msgstr "Dsware で、 Dsware バージョンの問い合わせに失敗しました。" - -#, python-format -msgid "Dsware query volume %s failed!" -msgstr "Dsware でのボリューム %s の問い合わせに失敗しました。" - -#, python-format -msgid "Dsware: create volume from snap failed. Result: %s." -msgstr "" -"Dsware: スナップショットからのボリュームの作成に失敗しました。 結果: %s" - -msgid "Dsware: volume size can not be less than snapshot size." -msgstr "" -"Dsware: ボリュームのサイズはスナップショットのサイズより小さくはできません。" - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E シリーズのプロキシー API バージョン %(current_version)s は SSC の追加仕様を" -"すべてサポートするわけではありません。このプロキシーバージョンは少なくとも " -"%(min_version)s でなければなりません。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"EMC VNX Cinder ドライバー CLI 例外: %(cmd)s (戻りコード: %(rc)s)(出力: " -"%(out)s)。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC VNX Cinder ドライバーの SP が使用できない例外: %(cmd)s (戻りコード: " -"%(rc)s)(出力: %(out)s)。" - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp、EcomServerPort、EcomUserName、EcomPassword に有効な値を設定する" -"必要があります。" - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"ソースから整合性グループ %(name)s を作成するには、'cgsnapshot_id' または " -"'source_cgid' を指定する必要があります。" - -#, python-format -msgid "" -"Either 'group_snapshot_id' or 'source_group_id' must be provided to create " -"group %(name)s from source." -msgstr "" -"ソースからグループ %(name)s を作成するには、 'group_snapshot_id' または " -"'source_group_id' を指定する必要があります。" - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO %(slo)s またはワークロード %(workload)s のいずれかが無効です。以前のエ" -"ラーステートメントで有効な値を調べてください。" - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "hitachi_serial_number または hitachi_unit_name のいずれかが必要です。" - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "要素構成サービスが %(storageSystemName)s に見つかりません。" - -msgid "Enables QoS." -msgstr "QoS を有効化します。" - -msgid "Enables compression." -msgstr "圧縮を有効化します。" - -msgid "Enables replication." -msgstr "レプリケーションを有効化します。" - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "" -"configfs が /sys/kernel/config でマウントされていることを確認してください。" - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"groupInitiatorGroup %(initiatorgroup)s でのイニシエーター %(initiator)s の追" -"加中にエラーが発生しました。戻りコード: %(ret.status)d メッセージ: " -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"IQN %(iqn)s でターゲットグループ %(targetgroup)s への追加中にエラーが発生しま" -"した。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr " ボリューム %(vol)s の追加に失敗しました。" - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール: %(pool)s プロジェクト: %(project)s 複製プロジェクト: %(clone_proj)s " -"のボリューム: %(lun)s でのスナップショット: %(snapshot)s の複製中にエラーが発" -"生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"複製ボリュームの作成エラー: %(cloneName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"複製ボリュームの作成エラー: ボリューム: %(cloneName)s、ソースボリューム: " -"%(sourceName)s。戻りコード: %(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"グループの作成エラー: %(groupName)s。戻りコード: %(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"マスキングビューの作成エラー: %(groupName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"ボリュームの作成エラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"ボリュームの作成エラー: %(volumename)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"CreateGroupReplica エラー: ソース: %(source)s ターゲット: %(target)s。戻り" -"コード: %(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"エイリアス %(alias)s でのイニシエーター %(initiator)s の作成中にエラーが発生" -"しました。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s" - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"プール %(pool)s でのプロジェクト %(project)s の作成中にエラーが発生しました。" -"戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プロパティー: %(property)s タイプ: %(type)s 説明: %(description)s の作成中に" -"エラーが発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"共有 %(name)s の作成中にエラーが発生しました。戻りコード: %(ret.status)d、" -"メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"ボリューム %(lun)s でスナップショット %(snapshot)s をプール %(pool)s に作成し" -"ているときにエラーが発生しました。プロジェクト: %(project)s、戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"共有 %(share)s でスナップショット %(snapshot)s をプール %(pool)s に作成中にエ" -"ラーが発生しました。プロジェクト: %(project)s 戻りコード: %(ret.status)d " -"メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"ターゲットの作成中にエラーが発生しました: %(alias)s。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"IQN %(iqn)s でターゲットグループ %(targetgroup)s の作成中にエラーが発生しまし" -"た。戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"ボリュームの作成中にエラーが発生しました: %(lun)s。サイズ: %(size)s、戻りコー" -"ド: %(ret.status)d、メッセージ: %(ret.data)s。 " - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"新規複合ボリュームの作成エラー。戻りコード: %(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"プール: %(pool)s でのレプリケーションアクションの作成中にエラーが発生しまし" -"た。プロジェクト: %(proj)s ターゲット: %(tgt)s のボリューム: %(vol)s および" -"プール: %(tgt_pool)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "拡張操作でのアンバインド済みボリュームの作成エラーです。" - -msgid "Error Creating unbound volume." -msgstr "アンバインドボリュームの作成エラーです。" - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"ボリュームの削除エラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"グループの削除のエラー: %(storageGroupName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"イニシエーターグループの削除のエラー: %(initiatorGroupName)s。戻りコード: " -"%(rc)lu。エラー: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"共有 %(share)s でスナップショット %(snapshot)s をプール %(pool)s から削除中に" -"エラーが発生しました。プロジェクト: %(project)s、戻りコード: %(ret.status)d、" -"メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"ボリューム %(lun)s でスナップショット %(snapshot)s をプール %(pool)s から削除" -"しているときにエラーが発生しました。プロジェクト: %(project)s、戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"プール: %(pool)s、プロジェクト: %(project)s からのボリューム: %(lun)s の削除" -"中にエラーが発生しました。戻りコード: %(ret.status)d、メッセージ: " -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール: %(pool)s のプロジェクト: %(project)s の削除中にエラーが発生しました。" -"戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"レプリケーションアクション: %(id)s の削除中にエラーが発生しました。戻りコー" -"ド: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"拡張ボリュームのエラー: %(volumeName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"イニシエーターの取得中にエラーが発生しました。InitiatorGroup: " -"%(initiatorgroup)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"プール統計: プール: %(pool)sの取得中にエラーが発生しました 戻りコード: " -"%(status)d メッセージ: %(data)s。" - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プロジェクト統計: プール: %(pool)s プロジェクト: %(project)s の取得中にエラー" -"が発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール %(pool)s での共有 %(share)s の取得中にエラーが発生しました。プロジェク" -"ト: %(project)s 戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"ボリューム %(lun)s からプール %(pool)s へのスナップショット %(snapshot)s の取" -"得中にエラーが発生しました。プロジェクト: %(project)s、戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"ターゲットの取得中にエラーが発生しました: %(alias)s。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール %(pool)s でのボリューム %(lun)s の取得中にエラーが発生しました。プロ" -"ジェクト: %(project)s、戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"あるプールから別のプールへのボリュームのマイグレーション中にエラーが発生しま" -"した。戻りコード: %(rc)lu。 エラー: %(error)s。" - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"マスキングビューの変更エラー: %(groupName)s。戻りコード: %(rc)lu。 エラー: " -"%(error)s。" - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"プール所有権のエラーが発生しました: %(host)s はプール %(pool)s を所有していま" -"せん。" - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール %(pool)s のボリューム %(lun)s でのプロパティー %(props)s の設定中にエ" -"ラーが発生しました。プロジェクト: %(project)s、戻りコード: %(ret.status)d、" -"メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"マイグレーションセッションの終了エラー。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"イニシエーターの検査中にエラーが発生しました: %(iqn)s。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"プールの検査中にエラーが発生しました: %(pool)s。戻りコード: %(ret.status)d、" -"メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"プール %(pool)s でのプロジェクト %(project)s の検査中にエラーが発生しました。" -"戻りコード: %(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"サービス %(service)s の検査中にエラーが発生しました。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"ターゲットの検査中にエラーが発生しました: %(alias)s。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"プロジェクト %(project)s およびプール %(pool)s で共有 %(share)s を検証中にエ" -"ラーが発生しました。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"インスタンスパス %(volumeInstancePath)s によるボリューム %(volumeName)s の追" -"加エラーです。" - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"イニシエーターのグループへの追加エラー: %(groupName)s。戻りコード: " -"%(rc)lu。 エラー: %(error)s。" - -#, python-format -msgid "Error adding volume %(vol)s to %(sg)s. %(error)s." -msgstr " %(sg)s へのボリューム %(vol)s の追加エラー。%(error)s。" - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "複合ボリュームへのボリュームの追加エラー。エラー: %(error)s。" - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"ボリューム %(volumename)s のターゲット基本ボリュームへの追加エラーです。" - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"ストレージグループ %(storageGroupName)s の FAST ポリシー %(fastPolicyName)sへ" -"の関連付けエラーです。エラーの説明: %(errordesc)s。" - -#, python-format -msgid "Error attaching volume %(vol)s. Target limit might be reached!" -msgstr "ボリューム %(vol)s の接続エラー。ターゲットの制限に達します。" - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"複製関係の切断エラー: 同期名: %(syncName)s。戻りコード: %(rc)lu。エラー: " -"%(error)s。" - -msgid "Error connecting to ceph cluster." -msgstr "ceph クラスターへの接続エラーです。" - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "ssh を介した接続中にエラーが発生しました: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "ボリュームの作成中にエラーが発生しました: %s。" - -msgid "Error deleting replay profile." -msgstr "リプレープロファイルの削除でエラーが発生しました。" - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "ボリューム %(ssn)s の削除でエラーが発生しました: %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "ボリューム %(vol)s の削除中にエラーが発生しました: %(err)s。" - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "エバリュエーター構文解析中にエラーが発生しました: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"プール %(pool)s での共有 %(share)s の編集中にエラーが発生しました。戻りコー" -"ド: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"NetworkPortal の iSER の有効化に関するエラー: IP %(ip)s 上の iSCSI ポート " -"%(port)d で RDMA がサポートされていることを確認してください。" - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "失敗した接続のクリーンアップ中にエラーが検出されました: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "" -"CloudByte API [%(cmd)s] の実行中にエラーが発生しました。エラー: %(err)s。" - -msgid "Error executing EQL command" -msgstr "EQL コマンドを実行するときにエラーが発生しました。" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "ssh を介したコマンドの実行エラー: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "ボリューム %(vol)s の拡張中にエラーが発生しました: %(err)s。" - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "ボリュームの拡張エラーです: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "%(name)s の検索中にエラーが発生しました。" - -#, python-format -msgid "Error finding %s." -msgstr "%s の検索中にエラーが発生しました。" - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"ReplicationSettingData の取得エラー。戻りコード: %(rc)lu。エラー: %(error)s。" - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"アプライアンスバージョンの詳細の取得中にエラーが発生しました。戻りコード: " -"%(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "" -"名前 %(name)s からドメイン ID を取得中にエラーが発生しました: %(err)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "名前 %(name)s からドメイン ID を取得中にエラーが発生しました: %(id)s。" - -msgid "Error getting initiator groups." -msgstr "イニシエーターグループの取得エラー。" - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "名前 %(pool)s からプール ID を取得中にエラーが発生しました: %(err)s。" - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "" -"名前 %(pool_name)s からプール ID を取得中にエラーが発生しました: " -"%(err_msg)s。" - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"レプリケーションアクション: %(id)s の取得中にエラーが発生しました。戻りコー" -"ド: %(ret.status)d メッセージ: %(ret.data)s。" - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"レプリケーションソースの詳細の取得中にエラーが発生しました。戻りコード: " -"%(ret.status)d メッセージ: %(ret.data)s。" - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"レプリケーションターゲットの詳細の取得中にエラーが発生しました。戻りコード: " -"%(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "Error getting sdc id from ip %(sdc_ip)s: %(sdc_id_message)s" -msgstr "" -"IP %(sdc_ip)s からの sdc id の取得でエラーが発生しました: %(sdc_id_message)s" - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"バージョンの取得中にエラーが発生しました: svc: %(svc)s。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"CloudByte のストレージでボリューム [%(cb_volume)s] に関する処理 " -"[%(operation)s] でエラーが発生しました: [%(cb_error)s]。エラーコード: " -"[%(error_code)s]。" - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "SolidFire API 応答にエラーがあります: data=%(data)s" - -msgid "Error in processing live migration file." -msgstr "ライブマイグレーションファイルの処理中にエラーが発生しました。" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "%(size)d GB のサイズの %(space)s のスペースの作成のエラーです。" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "追加の %(size)d GB のボリューム %(space)s のスペース拡張のエラー" - -#, python-format -msgid "Error managing volume: %s." -msgstr "ボリュームの管理中にエラーが発生しました: %s。" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"レプリカ同期の変更エラー: %(sv)s。操作: %(operation)s。戻りコード: %(rc)lu。" -"エラー: %(error)s。" - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"サービス %(service)s の変更中にエラーが発生しました。戻りコード: " -"%(ret.status)d、メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"ソースプロジェクト: %(src)s からターゲットプロジェクト: %(tgt)s へのボリュー" -"ム: %(vol)s の移動中にエラーが発生しました。戻りコード: %(ret.status)d メッ" -"セージ: %(ret.data)s。 " - -msgid "Error not a KeyError." -msgstr "KeyError ではなくエラーです。" - -msgid "Error not a TypeError." -msgstr "TypeError ではなくエラーです。" - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "cgsnapshot %s を作成中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when creating group_snapshot %s." -msgstr "group_snapshot %s を作成中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "cgsnapshot %s を削除中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when deleting group snapshot %s." -msgstr "グループスナップショット %s を削除中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when deleting group_snapshot %s." -msgstr "group_snapshot %s を削除中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "整合性グループ %s を更新中にエラーが発生しました。" - -#, python-format -msgid "Error occurred when updating group %s." -msgstr "グループ %s を更新中にエラーが発生しました。" - -#, python-format -msgid "Error parsing config file: %(xml_config_file)s" -msgstr "構成ファイルの解析エラー: %(xml_config_file)s" - -#, python-format -msgid "Error removing volume %(vol)s from %(sg)s. %(error)s." -msgstr "%(sg)s からのボリューム %(vol)s の削除エラー。%(error)s。" - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "ボリューム %(vol)s の名前を変更中にエラーが発生しました: %(err)s。" - -#, python-format -msgid "Error response: %s" -msgstr "エラー応答: %s" - -msgid "Error retrieving volume size" -msgstr "ボリュームサイズの抽出でエラーが発生しました" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"アクション ID: %(id)s のレプリケーション更新の送信中にエラーが発生しました。" -"戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"レプリケーション更新の送信中にエラーが発生しました。戻されたエラー: %(err)s。" -"アクション: %(id)s。" - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"ボリューム: %(vol)s プロジェクト %(project)s の %(set)s へのレプリケーション" -"継承の設定中にエラーが発生しました。戻りコード: %(ret.status)d メッセージ: " -"%(ret.data)s。" - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"ソース: %(src)s からのパッケージ: %(package)s の提供中にエラーが発生しまし" -"た。戻りコード: %(ret.status)d メッセージ: %(ret.data)s。" - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "プールからのボリューム %(vol)s のアンバインドエラー。%(error)s。" - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "スイッチによる認証中にエラーが発生しました: %s。" - -#, python-format -msgid "Error while changing VF context %s." -msgstr "VF コンテキスト %s の変更中にエラーが発生しました。" - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "ファームウェアバージョン %s の検査中にエラーが発生しました。" - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "トランザクション状態の検査中にエラーが発生しました: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "VF が管理 %s に対して使用可能かどうかを検査中にエラーが発生しました。" - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"プロトコル %(protocol)s を指定したスイッチ %(switch_id)s の接続中にエラーが発" -"生しました。エラー: %(error)s。" - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "認証トークンの作成中にエラーが発生しました: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"スナップショットの作成中 [status] %(stat)s にエラーが発生しました: [result] " -"%(res)s。" - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"ボリュームの作成中 [status] %(stat)s にエラーが発生しました: [result] " -"%(res)s。" - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"スナップショットの削除中 [status] %(stat)s にエラーが発生しました: [result] " -"%(res)s" - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"ボリュームの削除中 [status] %(stat)s にエラーが発生しました: [result] " -"%(res)s。" - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"ボリュームの拡張中 [status] %(stat)s にエラーが発生しました: [result] " -"%(res)s。" - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "%(op)s の詳細の取得中にエラーが発生しました。戻りコード: %(status)s。" - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"ssh を介してデータを取得中にエラーが発生しました: (command=%(cmd)s error=" -"%(err)s)。" - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "disco の情報 [%s] の取得中にエラーが発生しました。" - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "nvp 値の取得中にエラーが発生しました: %s。" - -#, python-format -msgid "Error while getting session information %s." -msgstr "セッション情報 %s の取得中にエラーが発生しました。" - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "データの解析中にエラーが発生しました: %s。" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"スイッチでのページ %(url)s の照会中にエラーが発生しました。理由 %(error)s。" - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"ゾーン文字列でのゾーンおよび cfgs の削除中にエラーが発生しました: " -"%(description)s。" - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "%(service)s API の要求中にエラーが発生しました。" - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"ゾーニング CLI の実行中にエラーが発生しました: (command=%(cmd)s error=" -"%(err)s)。" - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"ゾーン文字列での新規ゾーンおよび cfgs の更新中にエラーが発生しました。エラー " -"%(description)s。" - -msgid "Error writing field to database" -msgstr "データベースへのフィールドの書き込みに失敗しました" - -#, python-format -msgid "Error: Failed to %(operation_type)s %(component)s" -msgstr "エラー: 失敗しました。%(operation_type)s %(component)s" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "ボリューム ID の取得中にエラーが発生しました [%(stat)s - %(res)s]。" - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"スナップショット[%(snap_id)s] のボリューム [%(vol)s] へのリストア中にエラーが" -"発生しました [%(stat)s - %(res)s]。" - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"ボリュームの取得中にエラーが発生しました [status] %(stat)s - [result] " -"%(res)s]。" - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"ボリューム %(volume_id)s のスケジュールの最大試行回数 %(max_attempts)d を超過" -"しました" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "ボリュームごとのスナップショットの制限を超えました。" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr " ターゲットボリューム %(volumename)s へのメタボリュームの追加の例外。" - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"要素のレプリカの作成中の例外。クローン名: %(cloneName)s、ソース名: " -"%(sourceName)s、追加の仕様: %(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "_select_ds_for_volume で例外が発生しました: %s。" - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "ゾーン文字列の形成中に例外が発生しました: %s。" - -#, python-format -msgid "Exception: %s" -msgstr "例外: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "UUID が必要ですが、%(uuid)s を受け取りました。" - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "1 つのノードの呼び出しを予期していました \"%s\"" - -#, python-format -msgid "Expected higher file exists for snapshot %s" -msgstr "" -"スナップショット %s には、上位のファイルが存在することが期待されています。" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"node_count に対して整数が予期され、svcinfo lsiogrp が返されました: %(node)s。" - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"CLI コマンド %(cmd)s からの出力がないことが予期されます。%(out)s を受け取りま" -"す。" - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"vdisk_UID でフィルタリングする場合、lsvdisk から 1 つの vdisk が返されること" -"が予期されます。%(count)s が返されました。" - -#, python-format -msgid "Expected volume size was %d" -msgstr "予期されたボリュームサイズは %d でした。" - -#, python-format -msgid "Export Group %s: not found" -msgstr "エクスポートグループ %s: 見つかりませんでした。" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップのエクスポートが中止しました。予期していたバックアップの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -#, python-format -msgid "Export group with name %s already exists" -msgstr "名前 %s を持つエクスポートグループはすでに存在します。" - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"レコードのエクスポートが中止しました。現在構成されているバックアップサービス " -"[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" -"サービス [%(backup_service)s] ではありません。" - -msgid "Extend volume error." -msgstr "ボリューム拡張のエラー。" - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"ボリュームの拡張は、スナップショットが存在しない場合にのみ、このドライバーに" -"対してサポートされます。" - -msgid "Extend volume not implemented" -msgstr "ボリュームの拡張が実装されていません。" - -msgid "" -"FAST VP Enabler is not installed. Can not set tiering policy for the volume." -msgstr "" -"FAST VP イネーブラーがインストールされていません。ボリュームの階層化ポリシー" -"を設定できません。" - -msgid "FAST is not supported on this array." -msgstr "FAST はこのアレイでサポートされていません。" - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC がプロトコルですが、wwpns が OpenStack によって提供されていません。" - -#, python-format -msgid "FS label: %(fs_label)s" -msgstr "FS ラベル: %(fs_label)s" - -msgid "FSS cinder volume driver not ready: Unable to determine session id." -msgstr "" -"FSS cinder ボリュームドライバーが準備できていません: セッション ID を判別でき" -"ません。" - -msgid "FSS do not support multipathing." -msgstr "FSS はマルチパスをサポートしていません。" - -#, python-format -msgid "FSS get mirror sync timeout on vid: %s " -msgstr "FSS でミラー同期がタイムアウトしました。vid : %s " - -#, python-format -msgid "FSS get timemark copy timeout on vid: %s" -msgstr "FSS でタイムマークコピーがタイムアウトしました。vid : %s " - -#, python-format -msgid "" -"FSS rest api return failed, method=%(method)s, uri=%(url)s, response=" -"%(response)s" -msgstr "" -"FSS rest API が失敗を返しました。メソッド = %(method)s 、 url = %(url)s 、 " -"レスポンス = %(response)s" - -msgid "" -"FSSISCSIDriver manage_existing requires vid to identify an existing volume." -msgstr "" -"FSSISCSI ドライバーで既存のボリュームを特定するには、 manage_existing で vid " -"が必要です。" - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "%(volume)s の割り当て解除に失敗しました" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "キャッシュボリューム %(volume)s の作成に失敗しました。エラー: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "ファブリック %(fabric)s の接続の追加に失敗しました。エラー: %(err)s" - -msgid "Failed cgsnapshot" -msgstr "cgsnapshot が失敗しました。" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "グループのスナップショットの作成に失敗しました: %(response)s。" - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"ボリューム %(volname)s のスナップショットの作成に失敗しました: %(response)s。" - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "ファブリック %s からのアクティブなゾーンセットの取得に失敗しました。" - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "プール %s の詳細の取得に失敗しました。" - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "ファブリック %(fabric)s の接続の削除に失敗しました。エラー: %(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "ボリューム %(volname)s を拡張できませんでした。" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "3PAR (%(url)s) へのログインに失敗しました。理由: %(err)s" - -msgid "Failed to _get_node_uuid." -msgstr "_get_node_uuid が失敗しました。" - -msgid "Failed to access active zoning configuration." -msgstr "アクティブなゾーニング設定へのアクセスに失敗しました。" - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "ゾーンセットステータスにアクセスできませんでした: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"リソースロックを獲得できませんでした。(シリアル: %(serial)s、inst: %(inst)s、" -"ret: %(ret)s、stderr: %(err)s)" - -msgid "Failed to add or update zoning configuration." -msgstr "ゾーニング設定の追加または更新に失敗しました。" - -msgid "Failed to add the logical device." -msgstr "論理デバイスを追加できませんでした。" - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"整合性グループ %(cgName)s に ボリューム %(volumeName)s を追加できませんでし" -"た。戻りコード: %(rc)lu。エラー: %(error)s。" - -msgid "Failed to add zoning configuration." -msgstr "ゾーニング設定の追加に失敗しました。" - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"iSCSI イニシエーター IQN を割り当てることができませんでした。(ポート: " -"%(port)s、理由: %(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"qos_specs %(specs_id)s をタイプ %(type_id)s に関連付けることができませんでし" -"た。" - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの接続に失敗しました。" - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "ボリュームメタデータのバックアップに失敗しました: %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"ボリュームメタデータのバックアップに失敗しました - メタデータバックアップオブ" -"ジェクト 'backup.%s.meta' は既に存在します" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "スナップショット %s のボリュームの複製に失敗しました。" - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "%(vendor_name)s 配列 %(host)s への接続に失敗しました: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Dell REST API への接続に失敗しました" - -msgid "Failed to connect to array" -msgstr "アレイへの接続に失敗しました" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"シープデーモンへの接続に失敗しました。アドレス: %(addr)s、 ポート: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "イメージをボリュームにコピーできませんでした: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "メタデータをボリュームにコピーできませんでした: %(reason)s" - -msgid "" -"Failed to copy volume to image as image quota has been met. Please delete " -"images or have your limit increased, then try again." -msgstr "" -"イメージのクォータに到達したため、ボリュームのイメージへのコピーが失敗しまし" -"た。イメージを削除するか、上限値を増やして再試行してください。" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "ボリュームのコピーに失敗しました。宛先デバイスが使用できません。" - -msgid "Failed to copy volume, source device unavailable." -msgstr "ボリュームのコピーに失敗しました。ソースデバイスが使用できません。" - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"スナップショット %(cgSnapshot)s から CG %(cgName)s の作成に失敗しました。" - -#, python-format -msgid "Failed to create IG, %s" -msgstr "IG を作成できませんでした。%s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "ボリュームグループを作成できませんでした: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"ファイルを作成できませんでした。(ファイル: %(file)s、ret: %(ret)s, stderr: " -"%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "%s の一時スナップショットの作成に失敗しました。" - -msgid "Failed to create api volume flow." -msgstr "API ボリュームフローの作成に失敗しました。" - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "%(reason)s が原因で cg スナップショット %(id)s の作成に失敗しました。" - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "%(reason)s が原因で整合性グループ %(id)s の作成に失敗しました。" - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "整合性グループ %(id)s の作成に失敗しました: %(ret)s。" - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"VNX 整合性グループは圧縮された LUN をメンバーとして受け入れられないため、整合" -"性グループ %s の作成に失敗しました。" - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "整合性グループ %(cgName)s の作成に失敗しました。" - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "整合性グループ %(cgid)s の作成に失敗しました。エラー: %(excmsg)s。" - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"整合性グループ %(consistencyGroupName)s の作成に失敗しました。戻りコード: " -"%(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "%(storageSystemName)s 上でハードウェア ID を作成できませんでした。" - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "" -"ホスト: %(name)s の作成に失敗しました。このホストがアレイに存在しているかどう" -"か確認してください。" - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"ホストグループ: %(name)s の作成に失敗しました。このホストグループがアレイに存" -"在しているかどうか確認してください。" - -msgid "Failed to create iqn." -msgstr "iqn の作成に失敗しました。" - -msgid "Failed to create iscsi target" -msgstr "iSCSI ターゲットの作成に失敗しました。" - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの作成に失敗しました。" - -msgid "Failed to create manage existing flow." -msgstr "既存の管理フローの作成に失敗しました。" - -msgid "Failed to create manage_existing flow." -msgstr "manage_existing フローの作成に失敗しました。" - -msgid "Failed to create map on mcs, no channel can map." -msgstr "" -"MCS でのマップ作成に失敗しました。チャンネルはマップを行うことができません。" - -msgid "Failed to create map." -msgstr "マップの作成に失敗しました。" - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "ボリュームのメタデータの作成に失敗しました: %(reason)s" - -msgid "Failed to create partition." -msgstr "パーティションの作成に失敗しました。" - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "" -"仕様 %(qos_specs)s を使用して qos_specs %(name)s を作成することができませんで" -"した。" - -msgid "Failed to create replica." -msgstr "複製の作成に失敗しました。" - -msgid "Failed to create scheduler manager volume flow" -msgstr "スケジューラーマネージャーのボリュームフローを作成できませんでした" - -#, python-format -msgid "" -"Failed to create snap.snap name: %(snapname)s,srvol name :%(srv)s with " -"Return code: %(ret)s. " -msgstr "" -"スナップショットの作成に失敗しました。スナップショット名: %(snapname)s, " -"srvol 名: %(srv)s , 戻りコード: %(ret)s" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "スナップショット %s の作成に失敗しました。" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "整合性グループ %(cgName)s のスナップショット作成に失敗しました。" - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "ボリューム %s のスナップショットの作成に失敗しました。" - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"ボリューム %(vol)s 上でのスナップショットポリシーの作成に失敗しました: " -"%(res)s。" - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"ボリューム %(vol)s 上でのスナップショットリソースエリアの作成に失敗しました: " -"%(res)s。" - -msgid "Failed to create snapshot." -msgstr "スナップショットの作成に失敗しました。" - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"スナップショットの作成に失敗しました。OpenStack ボリューム [%s] に関して" -"CloudByte のボリューム情報が見つかりませんでした。" - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "%s のサウスバウンドコネクターの作成に失敗しました。" - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "ストレージグループ %(storageGroupName)s を作成できませんでした。" - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "シンプールの作成に失敗しました。エラーメッセージ: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "ボリューム %s の作成に失敗しました。" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"volume_id: %(volume_id)s の SI にペアが存在するため、その削除に失敗しました。" - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "論理デバイスを削除できませんでした。(LDEV: %(ldev)s、理由: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "%(reason)s が原因で cgsnapshot %(id)s の削除に失敗しました。" - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "%(reason)s が原因で整合性グループ %(id)s の削除に失敗しました。" - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "整合性グループ %(cgName)s の削除に失敗しました。" - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"整合性グループ %(consistencyGroupName)s の削除に失敗しました。戻りコード: " -"%(rc)lu。エラー: %(error)s。" - -msgid "Failed to delete device." -msgstr "デバイスの削除に失敗しました。" - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"整合性グループ %(cgname)s のファイルセットの削除に失敗しました。エラー: " -"%(excmsg)s。" - -msgid "Failed to delete iqn." -msgstr "iqn の削除に失敗しました。" - -msgid "Failed to delete map." -msgstr "マップの削除に失敗しました。" - -msgid "Failed to delete partition." -msgstr "パーティションの削除に失敗しました。" - -msgid "Failed to delete replica." -msgstr "複製の削除に失敗しました。" - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "スナップショット %s の削除に失敗しました。" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "整合性グループ %(cgId)s のスナップショット削除に失敗しました。" - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"snapshot_id: %s のスナップショットにペアが存在するため、その削除に失敗しまし" -"た。" - -msgid "Failed to delete snapshot." -msgstr "スナップショットの削除に失敗しました。" - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "ボリューム %(volumeName)s の削除に失敗しました。" - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"volume_id: %(volume_id)s のボリュームにペアが存在するため、その削除に失敗しま" -"した。" - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの切り離しに失敗しました。" - -msgid "Failed to determine blockbridge API configuration" -msgstr "Blockbridge API の設定を決定できませんでした。" - -msgid "Failed to disassociate qos specs." -msgstr "qos 仕様の関連付けを解除できませんでした。" - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"qos_specs %(specs_id)s とタイプ %(type_id)s の関連付けを解除できませんでし" -"た。" - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"スナップショットリソースエリアの確認に失敗しました。ID %s のボリュームを見つ" -"けることができませんでした" - -msgid "Failed to establish SSC connection!" -msgstr "SSC 接続の確立に失敗しました。" - -msgid "Failed to establish a stable connection" -msgstr "安定した接続の確立に失敗しました。" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Coho クラスターとの接続に失敗しました。" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"CloudByte API [%(cmd)s] の実行に失敗しました。HTTP ステータス: %(status)s、エ" -"ラー: %(error)s。" - -msgid "Failed to execute common command." -msgstr "共通のコマンドの実行に失敗しました。" - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "ボリュームのエクスポートに失敗しました: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "ボリューム %(name)s の拡張に失敗しました。エラーメッセージ: %(msg)s。" - -msgid "Failed to find QoSnode" -msgstr "QoSNode が見つかりません。" - -msgid "Failed to find Storage Center" -msgstr "ストレージセンターが見つかりませんでした" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "予期されるプールに vdisk コピーが見つかりませんでした。" - -msgid "Failed to find account for volume." -msgstr "ボリュームのアカウントが見つかりませんでした。" - -#, python-format -msgid "Failed to find available FC targets for %s." -msgstr "%s で利用可能な FCターゲットを見つけることに失敗しました。" - -#, python-format -msgid "Failed to find available iSCSI targets for %s." -msgstr "%s で利用可能な iSCSI ターゲットを見つけることに失敗しました。" - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"パス %(path)s のファイルセットが見つかりませんでした。コマンド出力: " -"%(cmdout)s。" - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "%s という名前のグループスナップショットが見つかりませんでした。" - -#, python-format -msgid "Failed to find host %s." -msgstr "ホスト %s を見つけることに失敗しました。" - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "" -"%(initiator)s を含む iSCSI イニシエーターグループを見つけることに失敗しまし" -"た。" - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "アカウント [%s] の CloudByte アカウント詳細を取得できませんでした。" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "LUN %s の LUN ターゲット詳細の取得に失敗しました" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "LUN %s の LUN ターゲット詳細の取得に失敗しました。" - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "LUN %s の LUN ターゲットリストを取得できませんでした" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s のパーティション ID の取得に失敗しました。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"スナップショット %(snapshot_id)s からの RAID スナップショット ID の取得に失敗" -"しました。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"スナップショット: %(snapshot_id)s からの RAID スナップショット ID の取得に失" -"敗しました。" - -msgid "Failed to get SplitMirror." -msgstr "SplitMirror の取得に失敗しました。" - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"ストレージリソースを取得できませんでした。システムは、もう一度ストレージリ" -"ソースの取得を試みます。(リソース: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "qos 仕様 %s のすべての関連付けは取得できませんでした。" - -msgid "Failed to get channel info." -msgstr "チャンネル情報の取得に失敗しました。" - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "コードレベル (%s) を取得できませんでした。" - -msgid "Failed to get device info." -msgstr "デバイス情報の取得に失敗しました。" - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "CPG (%s) がアレイ上に存在しないため、ドメインを取得できませんでした。" - -#, python-format -msgid "Failed to get iSCSI target info for the LUN: %s" -msgstr "LUN の iSCSI ターゲット情報の取得に失敗しました: %s" - -#, python-format -msgid "Failed to get iSCSI target info for the LUN: %s." -msgstr "LUN の iSCSI ターゲット情報の取得に失敗しました: %s" - -msgid "Failed to get image snapshots." -msgstr "イメージ のスナップショットの獲得に失敗しました。" - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -" ボリューム: %(volume_id)s を持つチャンネル %(channel_id)s の IP の取得に失敗" -"しました。" - -msgid "Failed to get iqn info." -msgstr "iqn 情報の取得に失敗しました。" - -msgid "Failed to get license info." -msgstr "ライセンス情報の取得に失敗しました。" - -msgid "Failed to get lv info." -msgstr "Iv 情報の取得に失敗しました。" - -msgid "Failed to get map info." -msgstr "マップ情報の取得に失敗しました。" - -msgid "Failed to get migration task." -msgstr "マイグレーションタスクの取得に失敗しました。" - -msgid "Failed to get model update from clone" -msgstr "複製からのモデル更新の取得に失敗しました。" - -msgid "Failed to get name server info." -msgstr "ネームサーバー情報の取得に失敗しました。" - -msgid "Failed to get network info." -msgstr "ネットワーク情報の取得に失敗しました。" - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "新規プール: %(pool_id)s での新規パート ID の取得に失敗しました。" - -msgid "Failed to get partition info." -msgstr "パーティション情報の取得に失敗しました。" - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s を持つプール ID の取得に失敗しました。" - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"%(err)s が原因で、%(volume)s のリモートコピー情報の取得に失敗しました。" - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "%(volume)s のリモートコピー情報の取得に失敗しました。例外: %(err)s。" - -msgid "Failed to get replica info." -msgstr "レプリカ情報の取得に失敗しました。" - -msgid "Failed to get show fcns database info." -msgstr "fcns データベース情報の表示に失敗しました。" - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "ボリューム %s のサイズを得ることに失敗しました。" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "ボリューム %s のスナップショットの獲得に失敗しました。" - -msgid "Failed to get snapshot info." -msgstr "スナップショット情報の取得に失敗しました。" - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "LUN %s のターゲット IQN の取得に失敗しました" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "SplitMirror のターゲット LUN の取得に失敗しました。" - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "LUN %s のターゲットポータルの取得に失敗しました" - -#, python-format -msgid "Failed to get target_id of target [%s]" -msgstr "ターゲット [%s] の target_id の取得に失敗しました。" - -msgid "Failed to get targets" -msgstr "ターゲットを取得できませんでした" - -msgid "Failed to get wwn info." -msgstr "wwn 情報の取得に失敗しました。" - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"ボリューム %(volumeName)s の取得、作成、またはマスキングビュー " -"%(maskingViewName)s への追加が失敗しました。受け取ったエラーメッセージは " -"%(errorMessage)s です。" - -msgid "Failed to identify volume backend." -msgstr "ボリュームバックエンドを識別できませんでした。" - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"共有 %(cgname)s のファイルセットへのリンクに失敗しました。エラー: " -"%(excmsg)s。" - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "%s 配列へのログインに失敗しました (無効なログイン?)。" - -#, python-format -msgid "Failed to login for user %s." -msgstr "ユーザー %s のログインに失敗しました。" - -msgid "Failed to login with all rest URLs." -msgstr "すべての rest URL のログインに失敗しました。" - -#, python-format -msgid "Failed to login. Return code: %(ret)s." -msgstr "ログインが失敗しました。戻りコード: %(ret)s" - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"次の理由で、Datera クラスターエンドポイントへの要求を実行できませんでした: %s" - -msgid "Failed to manage api volume flow." -msgstr "API ボリュームフローの管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"報告されたサイズ %(size)s が浮動小数点値でないため、既存の %(type)s %(name)s " -"の管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing snapshot %(name)s, because rename operation " -"failed: Error msg: %(msg)s." -msgstr "" -"名前変更操作が失敗したため、既存スナップショット %(name)s の管理に失敗しまし" -"た: エラーメッセージ: %(msg)s。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"ボリュームサイズ取得エラーのため、既存ボリューム %(name)s の管理に失敗しまし" -"た。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"名前変更操作が失敗したため、既存ボリューム %(name)s の管理に失敗しました: エ" -"ラーメッセージ: %(msg)s。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"変更されたサイズ %(size)s が浮動小数点数ではなかったため、既存ボリューム " -"%(name)s の管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing volume because the pool %(pool)s of the volume " -"type chosen does not match the file system %(fs_label)s passed in the volume " -"reference." -msgstr "" -"選択されたボリューム種別のプール %(pool)s が、ボリューム参照で渡されたファイ" -"ルシステム %(fs_label)s と一致しないため、既存のボリュームの管理に失敗しまし" -"た。" - -#, python-format -msgid "" -"Failed to manage existing volume because the pool %(pool)s of the volume " -"type chosen does not match the pool %(pool_host)s of the host." -msgstr "" -"選択されたボリューム種別のプール %(pool)s がホストのプール %(pool_host)s と一" -"致しないため、既存のボリュームの管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"(%(pool)s) does not match the NFS share passed in the volume reference " -"(%(share)s)." -msgstr "" -"選択されたボリューム種別のプール (%(pool)s) がボリューム参照で渡された NFS 共" -"有 (%(share)s) と一致しないため、既存のボリュームの管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing volume because the pool of the volume type chosen " -"(%(pool)s) does not match the pool of the host %(pool_host)s" -msgstr "" -"選択されたボリューム種別のプール (%(pool)s) がホストのプール %(pool_host)s と" -"一致しないため、既存のボリュームの管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"入出力グループの不一致が原因で既存のボリュームの管理に失敗しました。管理対象" -"となるボリュームの入出力グループは %(vdisk_iogrp)s です。選択された種別の入出" -"力グループは %(opt_iogrp)s です。" - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"管理対象となるボリュームのプールがバックエンドプールと一致しないことが原因" -"で、既存のボリュームの管理に失敗しました。管理対象となるボリュームのプールは " -"%(vdisk_pool)s です。バックエンドのプールは %(backend_pool)s です。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"管理対象となるボリュームは compress ですが、選択されたボリューム種別は " -"compress でないことが原因で、既存のボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"管理対象となるボリュームは compress ではありませんが、選択されたボリューム種" -"別は compress であることが原因で、既存のボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"管理対象となるボリュームが有効な入出力グループになかったことが原因で、既存の" -"ボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"管理対象となるボリュームは thick ですが、選択されたボリューム種別は thin であ" -"ることが原因で、既存のボリュームの管理に失敗しました。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"管理対象となるボリュームは thin ですが、選択されたボリューム種別は thick であ" -"ることが原因で、既存のボリュームの管理に失敗しました。" - -#, python-format -msgid "" -"Failed to manage existing volume/snapshot %(name)s, because of error in " -"getting its size." -msgstr "" -"サイズ取得エラーのため、既存ボリューム/スナップショット %(name)s の管理に失敗" -"しました。" - -#, python-format -msgid "Failed to manage volume %s." -msgstr "ボリューム %s の管理に失敗しました。" - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"論理デバイスをマッピングできませんでした。(LDEV: %(ldev)s、LUN: %(lun)s、ポー" -"ト: %(port)s、ID: %(id)s)" - -#, python-format -msgid "Failed to migrate volume %(src)s." -msgstr "ボリューム %(src)s のマイグレーションに失敗しました。" - -#, python-format -msgid "" -"Failed to migrate volume between source vol %(src)s and dest vol %(dst)s." -msgstr "" -"ソースボリューム %(src)s と宛先ボリューム %(dst)s の間のボリュームのマイグ" -"レーションが失敗しました。" - -msgid "Failed to migrate volume for the first time." -msgstr "ボリュームのマイグレーションに失敗しました (初回)。" - -msgid "Failed to migrate volume for the second time." -msgstr "ボリュームのマイグレーションに失敗しました (2 回目)。" - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "LUN マッピングの移動に失敗しました。戻りコード: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "ボリューム %s の移動に失敗しました。" - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"ファイルを開くことができませんでした。(ファイル: %(file)s、ret: %(ret)s、" -"stderr: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 出力の解析に失敗しました:\n" -"コマンド: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s。" - -msgid "" -"Failed to parse the configuration option 'glance_catalog_info', must be in " -"the form ::" -msgstr "" -"構成オプション 'glance_catalog_info' の解析に失敗しました。:" -": という形式でなければなりません。" - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"設定オプションの 'keystone_catalog_info' を解析できませんでした。本オプション" -"は、:: の形式を持つ必要がありま" -"す。" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"構成オプション 'swift_catalog_info' の解析に失敗しました。:" -": という形式でなければなりません" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"ゼロページレクラメーションを実行できませんでした。(LDEV: %(ldev)s、理由: " -"%(reason)s)" - -#, python-format -msgid "Failed to read configuration file(s): %s" -msgstr "設定ファイルの読み込みに失敗しました: %s " - -#, python-format -msgid "" -"Failed to recognize JSON payload:\n" -"[%s]" -msgstr "" -"JSON ペイロードの認識に失敗しました:\n" -"[%s]" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "ボリューム %(volume)s のエクスポートを削除できませんでした: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの削除に失敗しました。" - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"整合性グループ %(cgName)s から ボリューム %(volumeName)s を削除できませんでし" -"た。戻りコード: %(rc)lu。エラー: %(error)s。" - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "" -"デフォルトの SG からボリューム %(volumeName)s を削除できませんでした。 " - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"ボリューム %(volumeName)s をデフォルト SG %(volumeName)s から削除できませんで" -"した。" - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"FAST ポリシー %(fastPolicyName)s のデフォルトストレージグループから " -"%(volumename)s を削除できませんでした。" - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"論理ボリューム %(name)s の名前変更に失敗しました。エラーメッセージ: " -"%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "アクティブなゾーニング構成 %s の取得に失敗しました" - -#, python-format -msgid "Failed to retrieve attachments for volume %(name)s" -msgstr "ボリューム %(name)s の接続を取得できません。" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "ターゲット IQN %(iqn)s の CHAP 認証の設定に失敗しました。詳細: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"既存のボリューム %(name)s 用の QoS の設定に失敗しました。エラーメッセージ: " -"%(msg)s。" - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "SCST ターゲットの「着信ユーザー」属性の設定に失敗しました。" - -msgid "Failed to set partition." -msgstr "パーティションの設定に失敗しました。" - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"整合性グループ %(cgname)s に対する許可の設定に失敗しました。エラー: " -"%(excmsg)s。" - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"マッピングを解除するボリューム %(volume_id)s の論理デバイスを指定できませんで" -"した。" - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"削除する論理デバイスを指定できませんでした。(メソッド: %(method)s、ID: " -"%(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "マイグレーションセッションの終了に失敗しました。" - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "ボリューム %(volume)s のアンバインドに失敗しました" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"整合性グループ %(cgname)s のファイルセットのリンク解除に失敗しました。エ" -"ラー: %(excmsg)s。" - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"論理デバイスのマッピングを解除できませんでした。(LDEV: %(ldev)s、理由: " -"%(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "整合性グループの更新に失敗しました: %(cgName)s。" - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "ボリュームのメタデータの更新に失敗しました: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "ゾーニング構成の更新または削除に失敗しました" - -msgid "Failed to update or delete zoning configuration." -msgstr "ゾーニング設定の更新または削除に失敗しました。" - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"仕様 %(qos_specs)s を使用して qos_specs %(specs_id)s を更新することができませ" -"んでした。" - -msgid "Failed to update quota usage while retyping volume." -msgstr "ボリュームのタイプを変更中にクォータの使用量を更新できませんでした。" - -msgid "Failed to update snapshot." -msgstr "スナップショットの更新に失敗しました。" - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"指定の %(src_type)s %(src_id)s メタデータを使用してボリューム %(vol_id)s メタ" -"データを更新することができませんでした" - -msgid "Failover requested on non replicated backend." -msgstr "" -"複製されていないバックエンド上でフェイルオーバーがリクエストされました。" - -#, python-format -msgid "Failure creating volume %s." -msgstr "ボリューム %s の作成に失敗しました。" - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "%s の LUN 情報の取得中に障害が発生しました。" - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "複製された新規 LUN を %s へ移動中に障害が発生しました。" - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "LUN %s を一時 lun へステージング中に障害が発生しました。" - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "" -"Fexvisor は %(reason)s が原因でボリューム %(id)s の追加に失敗しました。" - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor は %(ret)s が原因でグループ %(group)s でのボリューム %(vol)s の結合" -"に失敗しました。" - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor は %(ret)s が原因で グループ %(group)s でのボリューム %(vol)s の削" -"除に失敗しました。" - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "" -"Fexvisor は %(reason)s が原因でボリューム %(id)s の削除に失敗しました。" - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "ファイバーチャネル SAN ルックアップ障害: %(reason)s" - -msgid "Fibre Channel Zone Manager not initialized" -msgstr "ファイバーチャネルゾーンマネージャーが初期化されていません。" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "ファイバーチャネルゾーン操作が失敗しました: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "ファイバーチャネル接続制御障害: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "ファイル %(file_path)s が見つかりませんでした。" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"ファイル %(path)s に無効なバッキングファイル %(bfile)s があります。打ち切りま" -"す。" - -#, python-format -msgid "File already exists at %s." -msgstr "ファイルは %s に既に存在します。" - -#, python-format -msgid "File already exists at: %s" -msgstr "ファイルは既に存在します: %s" - -#, python-format -msgid "File system not found or not mounted: %(fs)s" -msgstr "ファイルシステムが見つからないか、マウントされていません: %(fs)s" - -msgid "Find host in hostgroup error." -msgstr "ホストグループでのホスト検索のエラー。" - -msgid "Find host lun id error." -msgstr "ホスト LUN ID 検索のエラー。" - -msgid "Find lun group from mapping view error." -msgstr "マッピングビューからの LUN グループ検索のエラー。" - -msgid "Find mapping view error." -msgstr "マッピングビュー検索のエラー。" - -msgid "Find obj number error." -msgstr "オブジェクト番号検索のエラー。" - -msgid "Find portgroup error." -msgstr "ポートグループ検索のエラー。" - -msgid "Find portgroup from mapping view error." -msgstr "マッピングビューからのポートグループ検索のエラー。" - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Flash キャッシュポリシーは、WSAPI バージョン「%(fcache_version)s」バージョン" -"「%(version)s」がインストールされていることを必要としています。" - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "" -"Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "" -"Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor はグループ %(vgid)s スナップショット %(vgsid)s でボリューム %(id)s " -"スナップショットを見つけられませんでした。" - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "" -"Flexvisor によるボリュームの作成が失敗しました: %(volumeid)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "" -"Flexvisor はボリューム %(id)s をグループ %(cgid)s に追加できませんでした。" - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor は、イベント ID でステータスを照会できないことが原因でボリューム " -"%(id)s を割り当てることに失敗しました。" - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor はボリューム %(id)s の割り当てに失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "" -"Flexvisor はボリューム %(volume)s iqn %(iqn)s の割り当てに失敗しました。" - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor はボリューム %(id)s の複製に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor はボリューム %(id)s の複製に失敗しました (イベントの取得に失敗しま" -"した)。" - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しまし" -"た: %(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しました " -"(イベントの取得に失敗しました)。" - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "" -"Flexvisor はグループ %(vgid)s 内でボリューム %(id)s を作成できませんでした。" - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor はボリューム %(volume)s の作成に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor はボリューム %s の作成 (イベントの取得) に失敗しました。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " -"%(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " -"%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" -"ベントの取得に失敗しました)。" - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor はスナップショット %(id)s の削除に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor はスナップショット %(id)s の削除に失敗しました (イベントの取得に失" -"敗しました)。" - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor はボリューム %(id)s の拡張に失敗しました (イベントの取得に失敗しま" -"した)。" - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor はプール情報 %(id)s の取得に失敗しました: %(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor はグループ %(vgid)s からボリューム %(id)s のスナップショット ID を" -"取得できませんでした。" - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "" -"Flexvisor はグループ %(cgid)s からのボリューム %(id)s の削除に失敗しました。" - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " -"%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" -"ベントの取得に失敗しました)。" - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "" -"Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor はボリューム %(id)s の割り当て解除 (イベントの取得) に失敗しまし" -"た。" - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s" - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor はソースボリューム %(id)s 情報を検出できません。" - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Flexvisor によるボリュームの割り当て解除が失敗しました: %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Flexvisor ボリューム %(id)s はグループ %(vgid)s の結合に失敗しました。" - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "フォルダー %s は Nexenta Store アプライアンスに存在しません" - -#, python-format -msgid "GET method is not supported by resource: %s" -msgstr "GET メソッドはリソースではサポートされていません: %s" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS が実行されていません。状態: %s。" - -msgid "Gateway VIP is not set" -msgstr "ゲートウェイ VIP が設定されていません" - -msgid "Get FC ports by port group error." -msgstr "ポートグループによる FC ポート取得のエラー。" - -msgid "Get FC ports from array error." -msgstr "アレイからの FC ポート取得のエラー。" - -msgid "Get FC target wwpn error." -msgstr "FC ターゲット wwpn 取得のエラー。" - -msgid "Get HyperMetroPair error." -msgstr "HyperMetroPair 取得のエラー。" - -msgid "Get LUN group by view error." -msgstr "ビューによる LUN グループ取得のエラー。" - -msgid "Get LUNcopy information error." -msgstr "LUN コピーの情報取得のエラー。" - -msgid "Get QoS id by lun id error." -msgstr "LUN による QoS ID 取得のエラー。" - -msgid "Get QoS information error." -msgstr "QoS 情報取得のエラー。 " - -msgid "Get QoS policy error." -msgstr "QoS ポリシー取得のエラー。" - -msgid "Get SplitMirror error." -msgstr "SplitMirror 取得のエラー。" - -msgid "Get active client failed." -msgstr "アクティブなクライアントの取得が失敗しました。" - -msgid "Get array info error." -msgstr "アレイ情報取得のエラー。" - -msgid "Get cache by name error." -msgstr "名前によるキャッシュ取得のエラー。" - -msgid "Get connected free FC wwn error." -msgstr "空き FC wwn 接続のエラー。" - -msgid "Get engines error." -msgstr "エンジン取得のエラー。" - -msgid "Get host initiators info failed." -msgstr "ホストイニシエーター情報の取得が失敗しました。" - -msgid "Get hostgroup information error." -msgstr "ホストグループの情報取得のエラー。" - -msgid "Get hypermetro group by id error." -msgstr "idによる hypermetro グループの取得エラー。" - -msgid "Get hypermetro group by name error." -msgstr "名前による hypermetro グループの取得エラー。" - -msgid "Get hypermetro group error." -msgstr "hypermetro グループ取得エラー。" - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"iSCSI ポート情報取得のエラー。huawei 設定ファイルに設定されたターゲット IP を" -"確認してください。" - -msgid "Get iSCSI port information error." -msgstr "iSCSI ポート情報取得のエラー。" - -msgid "Get iSCSI target port error." -msgstr "iSCSI ターゲットポート取得のエラー。" - -msgid "Get lun id by name error." -msgstr "名前による LUN ID 取得のエラー。" - -msgid "Get lun migration task error." -msgstr "LUN マイグレーションタスク取得のエラー。" - -msgid "Get lungroup id by lun id error." -msgstr "LUN ID による LUN グループ ID 取得のエラー。" - -msgid "Get lungroup information error." -msgstr "LUN グループの情報取得のエラー。" - -msgid "Get manageable snapshots not implemented." -msgstr "管理可能スナップショットの取得は実装されていません。" - -msgid "Get manageable volumes not implemented." -msgstr "管理可能ボリュームの取得は実装されていません。" - -msgid "Get migration task error." -msgstr "マイグレーションタスク取得のエラー。" - -msgid "Get pair failed." -msgstr "ペアの取得が失敗しました。" - -msgid "Get partition by name error." -msgstr "名前によるパーティション取得のエラー。" - -msgid "Get partition by partition id error." -msgstr "パーティション ID によるパーティション取得のエラー。" - -msgid "Get port group by view error." -msgstr "ビューによるポートグループ取得のエラー。" - -msgid "Get port group error." -msgstr "ポートグループ取得のエラー。" - -msgid "Get port groups by port error." -msgstr "ポートによるポートグループ取得のエラー。" - -msgid "Get ports by port group error." -msgstr "ポートグループによるポート取得のエラー。" - -msgid "Get remote device info failed." -msgstr "リモートデバイス情報の取得が失敗しました。" - -msgid "Get remote devices error." -msgstr "リモートデバイス取得のエラー。" - -msgid "Get smartcache by cache id error." -msgstr "キャッシュ ID によるスマートキャッシュ取得のエラー。" - -msgid "Get snapshot error." -msgstr "スナップショット取得のエラー。" - -msgid "Get snapshot id error." -msgstr "スナップショット ID 取得のエラー。" - -msgid "Get target IP error." -msgstr "ターゲット IP 取得のエラー。" - -msgid "Get target LUN of SplitMirror error." -msgstr "SplitMirror のターゲット LUN 取得のエラー。" - -msgid "Get views by port group error." -msgstr "ポートグループによるビュー取得のエラー。" - -msgid "Get volume by name error." -msgstr "名前によるボリューム取得のエラー。" - -msgid "Get volume error." -msgstr "ボリューム取得のエラー。" - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Glance メタデータを更新できません。ボリューム ID %(volume_id)s に対するキー " -"%(key)s が存在します" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"ボリューム/スナップショット %(id)s の Glance メタデータが見つかりません。" - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Gluster 構成ファイルが %(config)s に存在しません" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Google Cloud Storage の API エラー: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Google Cloud Storage の接続エラー: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Google Cloud Storage の oauth2 エラー: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "DRBDmanage (%s) から不正なパスの情報が提供されました。" - -#, python-format -msgid "Group %(group_id)s could not be found." -msgstr "グループ %(group_id)s が見つかりませんでした。" - -#, python-format -msgid "" -"Group %s still contains volumes. The delete-volumes flag is required to " -"delete it." -msgstr "" -"グループ %s にはまだボリュームがあります。これを削除するには delete-volumes " -"フラグが必要です。" - -#, python-format -msgid "" -"Group Type %(group_type_id)s deletion is not allowed with groups present " -"with the type." -msgstr "" -"グループ種別 %(group_type_id)s を持つグループでは、そのグループ種別は削除でき" -"ません。" - -#, python-format -msgid "Group Type %(group_type_id)s has no specs with key %(group_specs_key)s." -msgstr "" -"グループ種別 %(group_type_id)s にはキー %(group_specs_key)s を持つスペックは" -"ありません。" - -#, python-format -msgid "Group Type %(id)s already exists." -msgstr "グループ種別 %(id)s は既に存在します。" - -#, python-format -msgid "Group Type %(type_id)s has no extra spec with key %(id)s." -msgstr "" -"グループ種別 %(type_id)s には、キー %(id)s に関する追加の仕様がありません。" - -msgid "Group snapshot is empty. No group will be created." -msgstr "グループスナップショット が空です。グループは作成されません。" - -#, python-format -msgid "Group status must be available or error, but current status is: %s" -msgstr "" -"グループの状態は「使用可能」または「エラー」でなければなりませんが、現在の状" -"態は %s です。" - -#, python-format -msgid "Group status must be available, but current status is: %s." -msgstr "" -"グループの状態は「使用可能」でなければなりませんが、現在の状態は %s です。" - -#, python-format -msgid "Group type %(group_type_id)s could not be found." -msgstr "グループ種別 %(group_type_id)s が見つかりませんでした。" - -#, python-format -msgid "" -"Group type access for %(group_type_id)s / %(project_id)s combination already " -"exists." -msgstr "" -"%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスは既に存" -"在します。" - -#, python-format -msgid "" -"Group type access not found for %(group_type_id)s / %(project_id)s " -"combination." -msgstr "" -"%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスが見つか" -"りません。" - -#, python-format -msgid "Group type encryption for type %(type_id)s already exists." -msgstr "タイプ %(type_id)s のグループ種別暗号化は既に存在します。" - -#, python-format -msgid "Group type encryption for type %(type_id)s does not exist." -msgstr "タイプ %(type_id)s に対するグループ種別暗号化は存在しません。" - -msgid "Group type name can not be empty." -msgstr "グループ種別名を空にすることはできません" - -#, python-format -msgid "Group type with name %(group_type_name)s could not be found." -msgstr "名前 %(group_type_name)s を持つグループ種別が見つかりませんでした。" - -#, python-format -msgid "" -"Group volume type mapping for %(group_id)s / %(volume_type_id)s combination " -"already exists." -msgstr "" -" %(group_id)s / %(volume_type_id)s の組み合わせのグループボリューム種別のマッ" -"ピングはすでに存在します。" - -#, python-format -msgid "GroupSnapshot %(group_snapshot_id)s could not be found." -msgstr "" -"グループスナップショット %(group_snapshot_id)s は見つかりませんでした。" - -msgid "" -"GroupSnapshot status must be available or error, and no Group can be " -"currently using it as source for its creation." -msgstr "" -"GroupSnapshot の状態は「使用可能」または「エラー」でなければなりません。また" -"これをソースとして用いてグループを作成することは現在できません。" - -msgid "HBSD error occurs." -msgstr "HBSD エラーが発生しました。" - -msgid "HNAS connection reset!" -msgstr "HNAS の接続がリセットされました。" - -msgid "HPELeftHand url not found" -msgstr "HPELeftHand url が見つかりません" - -#, python-format -msgid "HTTP code: %(status_code)s, %(reason)s [%(error_msg)s]" -msgstr "HTTP コード: %(status_code)s, %(reason)s [%(error_msg)s]" - -#, python-format -msgid "HTTP code: %(status_code)s, response: %(reason)s [%(error_msg)s]" -msgstr "HTTP コード: %(status_code)s 、レスポンス: %(reason)s [%(error_msg)s]" - -#, python-format -msgid "HTTP exit code: [%(code)s]" -msgstr "HTTP 終了コード : [%(code)s]" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"最後のバックアップ以降にハッシュブロックサイズが変更されました。新規ハッシュ" -"ブロックサイズ: %(new)s。旧ハッシュブロックサイズ: %(old)s。フルバックアップ" -"を実行してください。" - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "%(tier_levels)s 層が作成されていません。" - -msgid "Heartbeat" -msgstr "ハートビート" - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "ヒント「%s」はサポートされていません。" - -msgid "Host" -msgstr "ホスト" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "ホスト %(host)s が見つかりませんでした。" - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"ホスト %(host)s は x509 証明書の内容に一致しません: CommonName " -"%(commonName)s。" - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "ホスト %s に FC イニシエーターがありません" - -msgid "Host attach volume failed!" -msgstr "ホストへのボリュームの接続に失敗しました。" - -#, python-format -msgid "Host group with name %s not found" -msgstr "名前が %s のホストグループが見つかりません" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "参照 %s が指定されたホストグループが見つかりません" - -msgid "Host is NOT Frozen." -msgstr "ホストは固定化されていません。" - -msgid "Host is already Frozen." -msgstr "ホストは既に固定化されています。" - -msgid "Host must be specified in query parameters" -msgstr "クエリーパラメーターではホストの指定が必要です。" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "" -"ホストが見つかりません。%(host)s 上で %(service)s の削除に失敗しました。" - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "" -"フェイルオーバーを行うにはホストの replication_status が %s である必要があり" -"ます。" - -#, python-format -msgid "Host type %s not supported." -msgstr "ホストタイプ %s はサポートされていません。" - -#, python-format -msgid "Host with name: %s not found" -msgstr "名前 %s を持つホスト: 見つかりませんでした。" - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "ポート %(ports)s が設定されたホストが見つかりません。" - -msgid "Hosts" -msgstr "ホスト" - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "Hypermetro とレプリケーションは、同一の volume_type で使用できません。" - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"入出力グループ %(iogrp)d iは無効です。使用できる入出力グループは %(avail)s で" -"す。" - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"圧縮が True に設定される場合、rsize も (not equal to -1) に設定しなければなり" -"ません。" - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"nofmtdisk が True に設定される場合、rsize も -1 に設定しなければなりません。" - -msgid "" -"If you want to create a thin provisioning volume, this param must be True." -msgstr "" -"シンプロビジョニングボリュームを作成したい場合は、このパラメーターは True で" -"なければいけません。" - -msgid "" -"Illegal provisioning type. The supported provisioning types are 'thick' or " -"'thin'." -msgstr "" -"正しくないプロビジョニングタイプです。サポートされているプロビジョニングのタ" -"イプは 'thick' か 'thin' です。" - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"正しくない値 '%(prot)s' が flashsystem_connection_protocol に指定されていま" -"す。有効な値は %(enabled)s です。" - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "IOTYPE に正しくない値が指定されています: 0、1、または 2。" - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"smarttier に正しくない値が指定されています: 0、1、2、または 3 のいずれかに設" -"定してください。" - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"正しくない値が storwize_svc_vol_grainsize に指定されています。32、64、128、" -"256 のいずれかに設定してください。" - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"thin に正しくない値が指定されています: thin と thick を同時に設定することはで" -"きません。" - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "イメージ %(image_id)s が見つかりませんでした。" - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "イメージ %(image_id)s はアクティブではありません。" - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "イメージ %(image_id)s は受け入れられません: %(reason)s" - -msgid "Image location not present." -msgstr "イメージロケーションが存在しません。" - -msgid "Image quota exceeded" -msgstr "イメージのクォータを超えました。" - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"イメージの仮想サイズは %(image_size)dGB であり、サイズ %(volume_size)dGB のボ" -"リュームに適合しません。" - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"rbd ボリュームの削除中に ImageBusy エラーが発生しました。これは、異常終了した" -"クライアントからの接続が原因である可能性があります。その場合、30 秒経過後に削" -"除を再試行すると、解決できることがあります。" - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"レコードのインポートに失敗しました。インポートを実行するバックアップサービス" -"が見つかりません。要求サービス %(service)s" - -#, python-format -msgid "" -"Incorrect port number. Load balanced port is: %(lb_api_port)s, api service " -"port is: %(apisvc_port)s" -msgstr "" -"正しくないポート番号です。ロードバランス用ポート: %(lb_api_port)s 、APIサービ" -"ス用ポート: %(apisvc_port)s" - -msgid "Incorrect request body format" -msgstr "要求本体の形式が正しくありません。" - -msgid "Incorrect request body format." -msgstr "要求本体の形式が正しくありません。" - -msgid "Incremental backups exist for this backup." -msgstr "このバックアップには増分バックアップが存在します。" - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend CLI の例外: %(err)s。パラメーター: %(param)s (戻りコード: %(rc)s) " -"(出力: %(out)s)" - -msgid "Initiators of host cannot be empty." -msgstr "ホストのイニシエーターは空にできません。" - -msgid "Input volumes or snapshots are invalid." -msgstr "入力ボリュームまたはスナップショットが無効です。" - -msgid "Input volumes or source volumes are invalid." -msgstr "入力ボリュームまたはソースボリュームが無効です。" - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "インスタンス %(uuid)s が見つかりませんでした。" - -msgid "Insufficient free space available to extend volume." -msgstr "ボリュームを拡張するために十分な空きスペースがありません。" - -msgid "Insufficient privileges" -msgstr "不十分な権限" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "無効な 3PAR ドメイン: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "無効な ALUA 値。ALUA 値は、1 または 0 でなければなりません。" - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "バックアップ RBD 操作に指定された Ceph 引数が無効です。" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "無効な CgSnapshot: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "無効な ConsistencyGroup: %(reason)s" - -#, python-format -msgid "" -"Invalid ConsistencyGroup: Cannot delete consistency group %(id)s. " -"%(reason)s, and it cannot be the source for an ongoing CG or CG Snapshot " -"creation." -msgstr "" -"無効な整合性グループ : 整合性グループ %(id)s を削除できません。理由 : " -"%(reason)s 。 これを進行中の CG または CG スナップショットのソースとすること" -"はできません。" - -#, python-format -msgid "" -"Invalid ConsistencyGroup: Cannot update consistency group %s, status must be " -"available, and it cannot be the source for an ongoing CG or CG Snapshot " -"creation." -msgstr "" -"無効な整合性グループ : 整合性グループ %s を更新できません。状態は「使用可能」" -"である必要があります。 これを進行中の CG または CG スナップショットのソースと" -"することはできません。" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"無効な整合性グループ : 整合性グループを作成するためのホストがありません。" - -#, python-format -msgid "Invalid Group: %(reason)s" -msgstr "無効なグループ: %(reason)s" - -#, python-format -msgid "Invalid GroupSnapshot: %(reason)s" -msgstr "無効なグループスナップショット: %(reason)s" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"無効な HPELeftHand API バージョンが見つかりました: %(found)s。管理/非管理のサ" -"ポートには、バージョン %(minimum)s 以上が必要です。" - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "無効な IP アドレスの形式: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"ボリューム %s の QoS ポリシーの取得中に 無効な QoS 仕様が検出されました" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "無効なレプリケーションターゲット: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Virtuozzo Storage のシェアの指定が無効です: %r。[MDS1[,MDS2],...:/][:PASSWORD] である必要があります。" - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"XtremIO バージョン %(cur)s は無効です。バージョン %(min)s 以上が必要です" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"以下のプロジェクトのクォータに定義した割り当て済みのクォータが無効です: %s" - -msgid "Invalid argument" -msgstr "引数が無効です。" - -msgid "Invalid argument - negative seek offset." -msgstr "引数が無効です。シークオフセットが負の値です。" - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "引数が無効です: whence=%s はサポートされていません。" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "引数が無効です。whence=%s はサポートされません。" - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "接続モード '%(mode)s' はボリューム %(volume_id)s には無効です。" - -#, python-format -msgid "Invalid attachment info for volume %(name)s: %(reason)s" -msgstr "ボリューム %(name)s の接続が無効です : %(reason)s" - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "認証キーが無効です: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "無効なバックアップ: %(reason)s" - -#, python-format -msgid "Invalid body provided for creating volume. Request API version: %s." -msgstr "" -"ボリューム作成に、無効な本文が指定されました。リクエストされた API のバージョ" -"ン: %s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "CloudByte のストレージで無効な chap ユーザーの詳細が見つかりました。" - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "ボリューム %(name)s の接続初期化応答が無効です" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "ボリューム %(name)s の接続初期化応答が無効です: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "無効なコンテンツタイプ %(content_type)s。" - -msgid "Invalid credentials" -msgstr "無効な認証情報" - -#, python-format -msgid "Invalid directory: %s" -msgstr "無効なディレクトリー: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "ディスクアダプタータイプが無効です: %(invalid_type)s。" - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "ディスクバッキングが無効です: %s。" - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "ディスクタイプが無効です: %(disk_type)s。" - -#, python-format -msgid "Invalid disk type: %s." -msgstr "ディスクタイプが無効です: %s。" - -#, python-format -msgid "" -"Invalid disk-format '%(disk_format)s' is specified. Allowed disk-formats are " -"%(allowed_disk_formats)s." -msgstr "" -"無効なディスクフォーマット '%(disk_format)s' が指定されました。許可される" -"ディスクフォーマットは %(allowed_disk_formats)s です。" - -#, python-format -msgid "Invalid filter keys: %s" -msgstr "無効なフィルターキー : %s" - -#, python-format -msgid "Invalid group type: %(reason)s" -msgstr "無効なグループ種別: %(reason)s" - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "無効なホスト: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"無効な hpe3parclient バージョンが見つかりました (%(found)s)。バージョン " -"%(minimum)s 以上が必要です。 \"pip install --upgrade python-3parclient\" を実" -"行して hpe3parclient をアップグレードしてください。" - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"無効な hpelefthandclient バージョンが見つかりました (%(found)s)。バージョン " -"%(minimum)s 以上が必要です。 'pip install --upgrade python-lefthandclient' " -"を実行して hpelefthandclient をアップグレードしてください。" - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "無効なイメージ href %(image_href)s。" - -msgid "Invalid image identifier or unable to access requested image." -msgstr "イメージ ID が無効か、要求されたイメージにアクセスできません。" - -msgid "Invalid imageRef provided." -msgstr "無効な imageRef が指定されました。" - -msgid "Invalid input" -msgstr "無効な入力" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "無効な入力を受信しました: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "無効な is_public フィルター [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "無効な LUN タイプ %s が設定されています。" - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "無効なメタデータサイズ: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "メタデータが無効です: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "無効なマウントポイントベース: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "無効なマウントポイントベース: %s" - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "新規 snapCPG 名がタイプ変更には無効です。new_snap_cpg='%s'。" - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Coho の rpc ポートの無効なポート番号 %(config)s" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"無効なプリフェッチタイプ '%s' が設定されています。プリフェッチタイプは 0、1、" -"2、3 でなければなりません。" - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "qos 仕様が無効です: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "ボリュームを無効なターゲットに接続する要求は無効です" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"無効なモードでボリュームを接続しようとしているため、要求は無効です。接続モー" -"ドは 'rw' または 'ro' でなければなりません" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "予約の有効期限 %(expire)s が無効です。" - -msgid "Invalid response header from RPC server" -msgstr "RPC サーバーからの無効な応答ヘッダー" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "無効なセカンダリー ID %s。" - -msgid "Invalid service catalog json." -msgstr "無効なサービスカタログ JSON。" - -msgid "Invalid sheepdog cluster status." -msgstr "シープドッグクラスターの状態が無効です。" - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "無効なスナップショット: %(reason)s" - -#, python-format -msgid "Invalid sort dirs passed: %s" -msgstr "無効なソート方向が渡されました : %s" - -#, python-format -msgid "Invalid sort keys passed: %s" -msgstr "無効なソートキーが渡されました : %s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "無効な状態: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "無効なストレージプール %s が要求されました。再入力は失敗しました。" - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "無効なストレージプール %s が指定されました。" - -msgid "Invalid storage pool is configured." -msgstr "無効なストレージプールが設定されています。" - -msgid "Invalid transport type." -msgstr "無効なトランスポートタイプ。" - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "無効な更新設定: '%s'" - -#, python-format -msgid "Invalid value '%s' for delete-volumes flag." -msgstr "delete-volumes フラッグの値 '%s' が無効です。" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "force の値 '%s' は無効です。" - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "force の値 '%s' は無効です。" - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "is_publicの値 '%s' が無効です。許容される値は True または False です。" - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "skip_validation の値 '%s' が無効です。" - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr " 'bootable' の値 '%s' は無効です。" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "'force' の値 '%s' は無効です。" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr " 'readonly' の値 '%s' は無効です。" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "'scheduler_max_attempts' の値が無効です。1 以上でなければなりません" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "NetApp の設定オプション netapp_host_type の値が無効です。" - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "NetApp の設定オプション netapp_lun_ostype の値が無効です。" - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "年齢 %(age)s の値が無効です" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "無効な値: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"作成要求に指定されたボリュームサイズ %s は無効です (size 引数は整数(または整" -"数の文字列表記) でなければならず、またゼロより大きくなければなりません)。" - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "無効なボリューム種別: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "無効なボリューム: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"無効なボリューム: ボリューム %(volume_id)s は無効な状態 %(status)s であるた" -"め、整合性グループ %(group_id)s に追加できません。有効な状態は次のとおりで" -"す: (「使用可能」、「使用中」)。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"無効なボリューム: ボリュームタイプ %(volume_type)s は整合性グルー" -"プ%(group_id)s ではサポートされていないため、ボリューム %(volume_id)s をこの" -"整合性グループに追加できません。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"無効なボリューム: ボリューム fake-volume-uuid は見つからないため、整合性グ" -"ループ %(group_id)s に追加できません。" - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"無効なボリューム: ボリューム fake-volume-uuid は整合性グループ%(group_id)s に" -"存在しないため、このグループから削除できません。" - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "無効な volume_type が渡されました: %s。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" -"ません。ソースボリュームと合致するか、 タイプの引数を排除する必要がありま" -"す)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" -"ません。タイプの引数を排除することを推奨します)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"無効な volume_type %s が指定されました (要求するタイプは、この整合性グループ" -"でサポートされていなければなりません)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"group)." -msgstr "" -"無効な volume_type %s が指定されました (要求するタイプは、このグループでサ" -"ポートされていなければなりません)。" - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "wwpn 形式 %(wwpns)s は無効です" - -msgid "Invoking web service failed." -msgstr "Web サービスの呼び出しが失敗しました。" - -msgid "Issue encountered waiting for job." -msgstr "ジョブの待機中に問題が発生しました。" - -msgid "Issue encountered waiting for synchronization." -msgstr "同期の待機中に問題が発生しました。" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"レプリケーションが適切に設定されていないため、fail-over の発行が失敗しまし" -"た。" - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"CloudByte のボリューム [%s] 作成に関するレスポンスにジョブ ID が見つかりませ" -"ん。" - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"CloudByte のボリューム [%s] 削除に関するレスポンスにジョブ ID が見つかりませ" -"ん。" - -msgid "" -"K2 driver does not support clone of a attached volume. To get this done, " -"create a snapshot from the attached volume and then create a volume from the " -"snapshot." -msgstr "" -"K2 ドライバーは接続されたボリュームのクローンをサポートしていません。クローン" -"を行うには、接続されたボリュームのスナップショットを作成して、そのスナップ" -"ショットからボリュームを作成してください。" - -msgid "K2 rest api version search failed." -msgstr "K2 REST API バージョンの検索に失敗しました。" - -#, python-format -msgid "K2 rest api version should be >= %s." -msgstr "K2 REST API バージョンは %s 以上でなければいけません。" - -#, python-format -msgid "Kaminario retryable exception: %(reason)s" -msgstr "再試行可能な Kaminario 例外: %(reason)s" - -#, python-format -msgid "KaminarioCinderDriver failure: %(reason)s" -msgstr "Kaminario Cinder ドライバー障害です: %(reason)s" - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"鍵の名前に含めることができるのは、英数字、アンダースコアー、ピリオド、コロ" -"ン、およびハイフンのみです。" - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"ネストされたクォータを使用するには、Keystone のバージョン 3 以降を使用する必" -"要があります。" - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "ボリューム: %s 用の LU は存在しません" - -msgid "LUN doesn't exist." -msgstr "LUN が存在しません。" - -msgid "LUN export failed!" -msgstr "LUN のエクスポートが失敗しました。" - -msgid "LUN map overflow on every channel." -msgstr "すべてのチャンネルでの LUN マップのオーバーフロー。" - -#, python-format -msgid "LUN not found by UUID: %(uuid)s." -msgstr "UUID %(uuid)s の LUN が見つかりません。" - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "指定された参照 %s を持つ LUN が見つかりません。" - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "LUN 番号がチャンネル ID: %(ch_id)s の境界を越えています。" - -msgid "LUN unexport failed" -msgstr "LUN のアンエクスポートが失敗しました。" - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "最後の %s cinder syslog 項目:-" - -msgid "LeftHand cluster not found" -msgstr "LeftHand クラスターが見つかりません" - -msgid "License is unavailable." -msgstr "ライセンスが使用できません。" - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "行 %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "リンクパスは既に存在しますが、symlink ではありません" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "" -"状態 %s でのソースボリュームのリンクされた複製はサポートされていません。" - -msgid "Lock acquisition failed." -msgstr "ロックの取得に失敗しました。" - -msgid "Login failed." -msgstr "ログインに失敗しました。" - -#, python-format -msgid "Login failure code: %(statuscode)s Error: %(responsetext)s" -msgstr "ログイン失敗 コード: %(statuscode)s エラー: %(responsetext)s" - -msgid "Logout session error." -msgstr "ログアウトセッションのエラー。" - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"ルックアップサービスが構成されていません。fc_san_lookup_service の構成オプ" -"ションはルックアップサービスの具体的実装の指定を必要とします。" - -msgid "Lun migration error." -msgstr "LUN マイグレーションのエラー。" - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"オブジェクトの MD5: %(md5)s の前と %(etag)s の後の %(object_name)s が同じで" -"はありません。" - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "fcns 出力ストリングの形式が誤っています: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "メッセージ本体の形式に誤りがあります: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "誤った形式のネームサーバー文字列: %s" - -msgid "Malformed request body" -msgstr "誤った形式の要求本体" - -msgid "Malformed request body." -msgstr "誤った形式のリクエスト本文。" - -msgid "Malformed request url" -msgstr "誤った形式の要求 URL" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "コマンド %(cmd)s への応答の形式が誤っています: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "scheduler_hints 属性の形式に誤りがあります" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "fcns データベース文字列の形式が誤っています: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"誤った形式のゾーン構成: (switch=%(switch)s zone_config=%(zone_config)s)。" - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"ゾーンステータスの形式が誤っています: (switch=%(switch)s zone_config=" -"%(zone_config)s)" - -msgid "Manage existing get size requires 'id'." -msgstr "既存の get サイズを管理するには 'id' が必要です。" - -msgid "Manage existing snapshot not implemented." -msgstr "既存のスナップショットの管理が実装されていません。" - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"無効なバックエンド参照 %(existing_ref)s のため、既存ボリュームの管理に失敗し" -"ました: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"ボリューム種別の不一致のため、既存ボリュームの管理に失敗しました: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "既存ボリュームの管理は実装されていません。" - -msgid "Manage existing volume requires 'source-id'." -msgstr "既存のボリュームを管理するには 'source-id' が必要です。" - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST が有効化されている場合、ボリュームの管理はサポートされません。FAST ポリ" -"シー: %(fastPolicyName)s。" - -msgid "Manage volume type invalid." -msgstr "管理対象のボリューム種別が不正です。" - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"フェイルオーバーされたボリュームへのスナップショットを管理対象にすることは許" -"可されません。" - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"アレイのバージョンが hypermetro をサポートしないことが原因で、マップ情報があ" -"りません。" - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"マッピング %(id)s の準備を、割り当てられたタイムアウトの %(to)d 秒以内に完了" -"できませんでした。終了します。" - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "マスキングビュー %(maskingViewName)s は正常に削除されませんでした" - -#, python-format -msgid "" -"Max over subscription is configured to %(ratio)1f while ScaleIO support up " -"to %(sio_ratio)s." -msgstr "" -"最大値を超過したサプスクリプションが %(ratio)1f に設定されました。 Scale IO " -"は %(sio_ratio)s までサポートしています。" - -msgid "Max read bandwidth setting for volume qos, use 0 for unlimited" -msgstr "" -"ボリューム QoS の最大読み出し帯域幅設定。無制限にする場合は0を指定します。" - -msgid "Max read iops setting for volume qos, use 0 for unlimited" -msgstr "ボリューム QoS の読み出し iops 設定。無制限にする場合は0を指定します。" - -msgid "Max total bandwidth setting for volume qos, use 0 for unlimited" -msgstr "" -"ボリューム QoS のトータルの最大帯域幅設定。無制限にする場合は0を指定します。" - -msgid "Max total iops setting for volume qos, use 0 for unlimited" -msgstr "" -"ボリューム QoS のトータルの iops 設定。無制限にする場合は0を指定します。" - -msgid "Max write bandwidth setting for volume qos, use 0 for unlimited" -msgstr "" -"ボリューム QoS の最大書き込み帯域幅設定。無制限にする場合は0を指定します。" - -msgid "Max write iops setting for volume qos, use 0 for unlimited" -msgstr "ボリューム QoS の書き込み iops 設定。無制限にする場合は0を指定します。" - -msgid "Maximum age is count of days since epoch." -msgstr "最長存続時間は、エポック以降の日数です。" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "バックアップの許容最大数 (%(allowed)d) を超えました。" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "スナップショットの許容最大数 (%(allowed)d) を超えました。" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"許容されるボリュームの最大数 (%(allowed)d) がクォータ '%(name)s' を超えまし" -"た。" - -#, python-format -msgid "May specify only one of %s" -msgstr "指定できる %s は 1 つのみです" - -#, python-format -msgid "Message %(message_id)s could not be found." -msgstr "メッセージ %(message_id)s が見つかりませんでした。" - -msgid "Metadata backup already exists for this volume" -msgstr "このボリュームのメタデータバックアップは既に存在します。" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "メタデータのバックアップオブジェクト '%s' は既に存在します" - -#, python-format -msgid "Metadata property key %s greater than 255 characters." -msgstr "メタデータプロパティーのキー %s の文字数が255文字を超えています。" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters." -msgstr "メタデータプロパティーのキー %s の値の文字数が255文字を超えています。" - -msgid "Metadata property key blank." -msgstr "メタデータプロパティーのキーが空です。" - -msgid "Metadata restore failed due to incompatible version" -msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました" - -msgid "Metadata restore failed due to incompatible version." -msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました。" - -#, python-format -msgid "Method %(method)s is not defined" -msgstr "メソッド %(method)s は定義されていません。" - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"'purestorage' の python モジュールがありません。ライブラリーがインストールさ" -"れ、使用可能であることを確認してください。" - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "" -"Fibre Channel の SAN 構成パラメーターの fc_fabric_names が欠落しています" - -#, python-format -msgid "Missing attributes list for API %s." -msgstr "API %s の属性リストがありません。" - -msgid "Missing authentication option (passw or private key file)." -msgstr "認証オプションがありません (パスワードまたは秘密鍵ファイル)" - -msgid "Missing record count for NetApp iterator API invocation." -msgstr "NetApp のイテレーター API 起動記録カウントがありません。" - -msgid "Missing request body" -msgstr "要求本体がありません。" - -msgid "Missing request body." -msgstr "要求本体がありません。" - -#, python-format -msgid "Missing required element '%(element)s' in request body." -msgstr "リクエストの本文に必要な要素 '%(element)s' がありません。" - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "要求本体に必須要素 '%s' がありません。" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "リクエストの本文に必要な要素 '%s' がありません。" - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "リクエストの本文に必要な要素の 'consistencygroup' がありません。" - -msgid "Missing required element 'delete' in request body." -msgstr "リクエストの本文に必要な要素の 'delete' がありません。" - -msgid "Missing required element quota_class_set in request body." -msgstr "要求本体に必須要素 quota_class_set がありません。" - -msgid "Missing required element snapshot in request body." -msgstr "リクエストの本文に必要な要素のスナップショットがありません。" - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"この処理では 1 つの SerialNumber を予期していたものの、複数の SerialNumber が" -"見つかりました。EMC の設定ファイルを変更してください。" - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "ボリューム %s の複数のコピーが見つかりました。" - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"'%s' に関して複数の一致が見つかりました。ID を使用して絞り込んでください。" - -msgid "Multiple profiles found." -msgstr "複数のプロファイルが見つかりました。" - -msgid "Must implement a fallback schedule" -msgstr "フォールバックスケジューラーを実装する必要があります。" - -msgid "Must implement find_retype_host" -msgstr "find_retype_host を実装する必要があります" - -msgid "Must implement host_passes_filters" -msgstr "host_passes_filters を実装する必要があります。" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "schedule_create_consistencygroup を実装する必要があります" - -msgid "Must implement schedule_create_group" -msgstr "schedule_create_group の実装が必要です。" - -msgid "Must implement schedule_create_volume" -msgstr "schedule_create_volume を実装する必要があります。" - -msgid "Must implement schedule_get_pools" -msgstr "schedule_get_pools を実装する必要があります" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "wwpn またはホストを lsfabric に渡す必要があります。" - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"クラウド管理者があらゆるプロジェクトのリストと取得を行える Keystone の " -"policy.json を使用して、クラウド管理者としてこのコマンドを実行する必要があり" -"ます。" - -msgid "Must specify 'connector'" -msgstr "'connector' を指定する必要があります" - -msgid "Must specify 'connector'." -msgstr "'connector' を指定する必要があります。" - -msgid "Must specify 'host'." -msgstr "'host' を指定する必要があります。" - -msgid "Must specify 'new_volume'" -msgstr "'new_volume' を指定する必要があります。" - -msgid "Must specify 'status'" -msgstr "'status' を指定する必要があります。" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"更新のために 'status'、'attach_status'、または 'migration_status' を指定する" -"必要があります。" - -msgid "Must specify a valid attach status" -msgstr "有効な接続状態を指定してください。" - -msgid "Must specify a valid migration status" -msgstr "有効なマイグレーション状態を指定してください。" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"有効な個人 %(valid)s を指定する必要があります。値 '%(persona)s' は無効です。" - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"有効なプロビジョニングタイプ %(valid)s を指定する必要があります。値 " -"'%(prov)s' は無効です。" - -msgid "Must specify a valid status" -msgstr "有効な状態を指定してください。" - -msgid "Must specify an ExtensionManager class" -msgstr "ExtensionManager クラスを指定する必要があります" - -msgid "Must specify bootable in request." -msgstr "要求にブート可能を指定する必要があります。" - -msgid "" -"Must specify one or more of the following keys to update: name, description, " -"add_volumes, remove_volumes." -msgstr "" -"更新を行うには、次のキーを一つ以上指定する必要があります : 名前、説明、 " -"add_volumes 、 remove_volumes" - -msgid "Must specify protection domain name or protection domain id." -msgstr "保護ドメインの名前か ID を指定しなければなりません。" - -msgid "Must specify readonly in request." -msgstr "要求内で読み取り専用を指定する必要があります。" - -msgid "Must specify snapshot source-name or source-id." -msgstr "" -"スナップショットの source-name または source-id を指定する必要があります。" - -msgid "Must specify source-name or source-id." -msgstr "ソース名またはソース ID を指定する必要があります。" - -msgid "Must specify storage pool name or id." -msgstr "ストレージプールの名前か ID を指定しなければなりません。" - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "" -"ストレージプールを指定しなければなりません。オプション: sio_storage_pools。" - -msgid "Must supply a positive, non-zero value for age" -msgstr "存続期間には正の非ゼロ値を指定してください" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"NAS 構成「%(name)s=%(value)s」は無効です。「auto」、「true」、「false」のいず" -"れかでなければなりません。" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "NFS 構成ファイルが %(config)s に存在しません。" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "NFS ファイル %s は検出されていません。" - -msgid "NFS file could not be discovered." -msgstr "NFS ファイルを検出できませんでした。" - -msgid "NULL host not allowed for volume backend lookup." -msgstr "NULL ホストはボリューム・バックエンド検索では許可されません。" - -msgid "NaElement name cannot be null." -msgstr "NaElement 名は NULL にできません。" - -msgid "Name" -msgstr "名前" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"要求本体で、名前、説明、add_volumes、および remove_volumes をすべて空にするこ" -"とはできません。" - -msgid "Need non-zero volume size" -msgstr "ゼロでないボリュームサイズが必要です" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "MSG_DENIED でも MSG_ACCEPTED でもありません: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "NetApp Cinder Driver 例外です。" - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"拡張用に指定する新しいサイズは、現行サイズより大きくなければなりません。(現" -"行: %(size)s、拡張用: %(new_size)s)。" - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"新規サイズはバックエンドストレージの実サイズよりも大きくなければなりません。" -"実サイズ: %(oldsize)s、新規サイズ: %(newsize)s。" - -msgid "New volume size must be specified as an integer." -msgstr "新しいボリュームサイズを整数で指定する必要があります。" - -msgid "New volume type must be specified." -msgstr "新規ボリュームタイプを指定する必要があります。" - -msgid "New volume type not specified in request_spec." -msgstr "新規のボリュームタイプが要求仕様に指定されていません。" - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble Cinder ドライバー例外" - -msgid "No FC initiator can be added to host." -msgstr "ホストには FC イニシエーターを追加できません。" - -#, python-format -msgid "No FC port can be used for LUN %s." -msgstr "LUN %s に利用できる FC ポートがありません。" - -msgid "No FC port connected to fabric." -msgstr "ファブリックに接続された FC ポートはありません。" - -msgid "No FC targets found" -msgstr "FC ターゲットが見つかりません。" - -msgid "No FCP targets found" -msgstr "FCP ターゲットが見つかりません" - -msgid "No Port Group elements found in config file." -msgstr "設定ファイルにポートグループの要素が見つかりません。" - -msgid "No VF ID is defined in the configuration file." -msgstr "設定ファイルに VF ID が定義されていません。" - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "提供された iSCSI IP の iSCSI ポータルがアクティブではありません。" - -#, python-format -msgid "No available service named %s" -msgstr "%s という名前の使用可能なサービスはありません" - -#, python-format -msgid "No backup with id %s" -msgstr "ID %s のバックアップがありません" - -msgid "No backups available to do an incremental backup." -msgstr "増分バックアップを実行するために使用可能なバックアップがありません。" - -msgid "No big enough free disk" -msgstr "十分な大きさの空きディスクがありません。" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "ID %s の cgsnapshot は存在しません" - -msgid "No cinder entries in syslog!" -msgstr "cinder 項目が syslog にありません。" - -msgid "No clients in vdev information." -msgstr "vdev 情報にクライアント情報がありません。" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "複製された %s という名前の LUN はファイラーで見つかりません" - -msgid "No config node found." -msgstr "設定ノードが見つかりません。" - -#, python-format -msgid "No consistency group with id %s" -msgstr "ID %s の整合性グループは存在しません" - -msgid "No data information in return info." -msgstr "返された情報の中に、データ情報がありません。" - -#, python-format -msgid "No element by given name %s." -msgstr "指定された名前 %s の要素はありません。" - -msgid "No errors in logfiles!" -msgstr "ログファイル内にエラーはありません。" - -msgid "No fcdevices in given data." -msgstr "与えられたデータに FC デバイス情報がありません。" - -msgid "No fcdevices information in given data." -msgstr "与えられたデータに fc デバイス情報がありません。" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "バッキングファイルとして %s を持つファイルが見つかりません。" - -msgid "No free FC initiator can be assigned to host." -msgstr "ホストに割り当て可能な、空いている FC イニシエーターがありません。" - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"空いている LUN ID が残っていません。ホスト (%s) に接続できるボリュームの最大" -"数を超過しています。" - -msgid "No free disk" -msgstr "空きディスクはありません。" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "正しい iscsi ポータルが %s の指定されたリストに見つかりません。" - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "%s の正しい iscsi ポータルが見つかりません。" - -#, python-format -msgid "No group snapshot with id %s" -msgstr "id %s のグループスナップショットは存在しません。" - -#, python-format -msgid "No group with id %s" -msgstr "ID %s のグループは存在しません。" - -#, python-format -msgid "No host to create consistency group %s." -msgstr "整合性グループ %s を作成するためのホストがありません。" - -#, python-format -msgid "No host to create group %s." -msgstr "グループ %s を作成するためのホストがありません。" - -msgid "No iSCSI-enabled ports on target array." -msgstr "ターゲット配列に iSCSI に対応するポートがありません。" - -msgid "No image_name was specified in request." -msgstr "要求に image_name が指定されていませんでした。" - -msgid "No initiator connected to fabric." -msgstr "ファブリックに接続されたイニシエーターはありません。" - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "イニシエーター %s のイニシエーターグループが見つかりません。" - -msgid "No initiators found, cannot proceed" -msgstr "イニシエーターが見つからないため、続行できません" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "ip %s のクラスター上にインターフェースが見つかりませんでした" - -msgid "No iocluster information in given data." -msgstr "与えられたデータに iocluster 情報がありません。" - -msgid "No ip address found." -msgstr "IP アドレスが見つかりません。" - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "CloudByte で iscsi 認証グループが見つかりませんでした。" - -msgid "No iscsi initiators were found in CloudByte." -msgstr "iscsi イニシエーターが CloudByte に見つかりませんでした。" - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "CloudByte ボリューム [%s] の iscsi サービスが見つかりません。" - -msgid "No iscsi services found in CloudByte storage." -msgstr "iscsi サービスが CloudByte ストレージに見つかりません。" - -msgid "No iscsidevices information in given data." -msgstr "与えられたデータに iSCSI デバイス情報がありません。" - -msgid "No iscsitargets for target." -msgstr "ターゲット用の iSCSI ターゲットがありません、" - -msgid "No iscsitargets in return info." -msgstr "返された情報の中に、iSCSI ターゲット情報がありません。" - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"鍵ファイルが指定されていないため、%(cert)s %(e)s から鍵をロードできません。" - -msgid "No mounted Gluster shares found" -msgstr "マウントされた Gluster 共有が見つかりません" - -msgid "No mounted NFS shares found" -msgstr "マウントされた NFS 共有が見つかりません" - -msgid "No mounted SMBFS shares found." -msgstr "マウントされた SMBFS 共有が見つかりません。" - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "マウントされた Virtuozzo Storage 共有が見つかりません" - -msgid "No mounted shares found" -msgstr "マウントされた共有が見つかりません" - -msgid "No new vlun(s) were created" -msgstr "新しい vlun が作成されていません。" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "ボリューム %(vol)s の入出力グループ %(gid)s でノードが見つかりません。" - -msgid "No pool available." -msgstr "利用可能なプールがありません。" - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"ボリュームのプロビジョニングに使用できるプールがありません。設定オプション " -"netapp_pool_name_search_pattern が正しく設定されていることを確認してくださ" -"い。" - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"CloudByte ストレージのリスト iSCSI 認証ユーザーの API の呼び出しからレスポン" -"スがありませんでした。" - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"CloudByte ストレージのリスト tsm API の呼び出しから応答を受け取りませんでし" -"た。" - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"CloudByte のリストファイルシステムの API 呼び出しから応答を受け取りませんでし" -"た。" - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "サービス VIP が設定されておらず、nexenta_client_address がありません" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "バッキングファイルとして %s を持つスナップが見つかりません。" - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "" -"スナップショットグループ %s にスナップショットイメージが見つかりません。" - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "ボリューム %s でスナップショットを見つけることができませんでした。" - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"整合性グループ %s を作成するためのソーススナップショットが提供されていませ" -"ん。" - -msgid "" -"No storage could be allocated for this volume request. You may be able to " -"try another size or volume type." -msgstr "" -"このボリュームのリクエストに対して、ストレージを割り当てられませんでした。サ" -"イズやボリューム種別を変更して試すことができるかもしれません。" - -#, python-format -msgid "No storage path found for export path %s" -msgstr "エクスポートパス %s 用のストレージパスが見つかりません" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "そのような QoS 仕様 %(specs_id)s は存在しません。" - -msgid "No suitable discovery ip found" -msgstr "適切なディスカバリー ip が見つかりません。" - -msgid "No suitable host was found to failover." -msgstr "フェイルオーバーに適したホストが見つかりませんでした。" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "バックアップバージョン %s をリストアすることができません" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "ボリューム %(volume_id)s のターゲット ID が見つかりません。" - -msgid "No target in given data." -msgstr "与えられたデータにターゲット情報がありません。" - -msgid "No target information in given data." -msgstr "与えられたデータに ターゲット情報がありません。" - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"ホスト上に使用可能な未使用の LUN ID がありません。すべての LUN ID がホストグ" -"ループ全体で一意である必要のある、マルチ接続が有効になっています。" - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "有効なホストが見つかりませんでした。%(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "タイプ %(type)s のボリューム %(id)s に対して有効なホストがありません" - -msgid "No valid ports." -msgstr "有効なポートがありません。" - -msgid "No vdev information in given data" -msgstr "与えられたデータに vdev 情報がありません。" - -msgid "No vdev information in given data." -msgstr "与えられたデータに vdev 情報がありません。" - -msgid "No vdev sizemb in given data." -msgstr "与えられたデータに vdev sizemb 情報がありません。" - -#, python-format -msgid "No vdisk with the ID specified by ref %s." -msgstr "参照 %s によって指定された ID を持つ vdisk がありません。" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "参照 %s によって指定された UID を持つ vdisk がありません。" - -#, python-format -msgid "No views found for LUN: %s" -msgstr "LUN: %s 用のビューが見つかりませんでした" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"仮想サーバー %(vserver)s および接合パス %(junction)s を含むボリュームはクラス" -"ターにありません" - -msgid "No volume service(s) started successfully, terminating." -msgstr "どのボリュームサービス も正常に起動しませんでした。処理を終了します。" - -msgid "No volume was found at CloudByte storage." -msgstr "CloudByte ストレージでボリュームが見つかりませんでした。" - -msgid "No volume_type should be provided when creating test replica." -msgstr "テストレプリカの作成時に volume_type を指定してはなりません。" - -msgid "No volumes found in CloudByte storage." -msgstr "ボリュームが CloudByte ストレージに見つかりません。" - -#, python-format -msgid "No volumes or consistency groups exist in cluster %(current)s." -msgstr "クラスター %(current)s にはボリュームも整合性グループも存在しません。" - -msgid "No weighed hosts available" -msgstr "重み付けを設定したホストが存在しません" - -msgid "No wwpns found in host connector." -msgstr "ホストコネクターで wwpn が見つかりませんでした。" - -msgid "Non-getter API passed to API test method." -msgstr "API テストメソッドに非ゲッター API が渡されました。" - -msgid "None numeric BWS QoS limitation" -msgstr "BWS QoS 制限の数値がありません。" - -msgid "None numeric IOPS QoS limitation" -msgstr "IOPS QoS 制限の数値がありません。" - -#, python-format -msgid "Not a valid string: %s" -msgstr "有効な文字列ではありません: %s" - -msgid "Not a valid value for NaElement." -msgstr "NaElement に無効な値です。" - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "ボリューム %s に適したデータストアが見つかりません。" - -msgid "Not an rbd snapshot" -msgstr "rbd スナップショットではありません。" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "イメージ %(image_id)s では許可されません。" - -msgid "Not authorized." -msgstr "許可されていません。" - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "バックエンド容量が不十分です (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"この操作を実行するために十分なストレージスペースが ZFS 共有にありません。" - -msgid "Not stored in rbd" -msgstr "rbd 内に保管されていません。" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "" -"スナップショットの作成時に Nova から「エラー」ステータスが返されました。" - -msgid "Null response received from CloudByte's list filesystem." -msgstr "" -"CloudByte のリストファイルシステムから Null のレスポンスを受信しました。" - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "" -"CloudByte の リスト iscsi 認証グループから Null のレスポンスを受信しました。" - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "CloudByte のリスト iscsi イニシエーターからヌル応答を受け取りました。" - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"CloudByte のリストボリューム iscsi サービスからヌル応答を受け取りました。" - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"CloudByte のストレージでボリューム [%s] を作成中に Null のレスポンスを受信し" -"ました。" - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"CloudByte のストレージでボリューム [%s] を削除中に Null のレスポンスを受信し" -"ました。" - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"CloudByte のストレージで [%(operation)s] に 関するジョブ [%(job)s] の検索中" -"に Null のレスポンスを受信しました。" - -msgid "Object Count" -msgstr "オブジェクト数" - -msgid "Object Version" -msgstr "オブジェクトのバージョン" - -msgid "Object is not a NetApp LUN." -msgstr "オブジェクトは NetApp LUN ではありません。" - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"拡張操作時に複合ボリューム %(volumename)s へのボリュームの追加中にエラーが発" -"生しました。" - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"ホスト、ポート、またはスキーマからの必須の入力の 1 つが見つかりませんでした。" - -msgid "" -"One of the services is in Liberty version. We do not provide backward " -"compatibility with Liberty now, you need to upgrade to Mitaka first." -msgstr "" -"サービスの一つが Liberty バージョンです。現在、 Liberty への後方互換性は提供" -"していません。 先に Mitaka へのアップグレードが必要です。" - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"%(uri)s に対して実行できる要求は、%(unit_string)s につき %(value)s %(verb)s " -"要求に限られます。" - -msgid "Only one limit can be set in a QoS spec." -msgstr "QoS 仕様に設定できる制限は 1 つのみです。" - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"直近の親またはルートプロジェクトに割り当てられたトークンを持つユーザーのみ" -"が、子のクォータを参照することができます。" - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "非管理に設定できるのは、OpenStack が管理するボリュームのみです。" - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "状態=%(status)s で操作が失敗しました。フルダンプ: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "操作はサポートされていません: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "gpfs_images_dir オプションが正しく設定されていません。" - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "gpfs_images_share_mode オプションが正しく設定されていません。" - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "gpfs_mount_point_base オプションが正しく設定されていません。" - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "" -"作成元の %(res)s %(prop)s は '%(vals)s' 値のいずれかでなければなりません。" - -msgid "Param [identifier] is invalid." -msgstr "パラメーター [identifier] が無効です。" - -msgid "Param [lun_name] is invalid." -msgstr "パラメーター [lun_name] が無効です。" - -msgid "Param [snapshot_uuid] is invalid." -msgstr "パラメーター [snapshot_uuid] が無効です。" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"パーティション名がありません。キーで smartpartition:partitionname を設定して" -"ください。" - -msgid "Password has expired or has been reset, please change the password." -msgstr "" -"パスワードの期限が切れているか、リセットされました。パスワードを変更してくだ" -"さい。" - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"認証にはパスワードまたは SSH 秘密鍵が必要です: san_password または " -"san_private_key オプションを設定してください。" - -msgid "Path to REST server's certificate must be specified." -msgstr "REST サーバーの証明書へのパスを指定しなければなりません。" - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "事前に %(pool_list)s プールを作成してください。" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "事前にプール %(pool)s に %(tier_levels)s 層を作成してください。" - -#, python-format -msgid "Please provide at least one volume for parameter %s" -msgstr "パラメーター %s には、少なくとも1つのボリューム を指定してください。" - -#, python-format -msgid "Please provide valid format volume: lun for parameter %s" -msgstr "パラメーター %s に、正しい形式で ボリューム: lun を指定してください。" - -msgid "Please specify a name for QoS specs." -msgstr "QoS 仕様の名前を指定してください。" - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "ポリシーは %(action)s の実行を許可していません。" - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "プール %(poolNameInStr)s が見つかりません。" - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "プール %s は Nexenta Store アプライアンスに存在しません" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "ボリュームのプール ['host'] %(host)s が見つかりません。" - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "ボリュームのプール ['host'] が以下のため失敗しました: %(ex)s。" - -msgid "Pool is not available in the cinder configuration fields." -msgstr "プールが cinder 設定フィールドにありません。" - -msgid "Pool is not available in the volume host field." -msgstr "プールがボリュームホストフィールドにありません。" - -msgid "Pool is not available in the volume host fields." -msgstr "プールがボリュームホストフィールドにありません。" - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "名前が %(pool)s のプールがドメイン %(domain)s で見つかりませんでした。" - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"名前が %(pool_name)s のプールがドメイン %(domain_id)s で見つかりませんでし" -"た。" - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"プール %(poolName)s は、FAST ポリシー %(fastPolicy)s のストレージ層に関連付け" -"られていません。" - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName は %(fileName)s ファイル内に存在する必要があります。" - -#, python-format -msgid "Pools %s does not exist" -msgstr "プール %s は存在しません" - -msgid "Pools name is not set." -msgstr "プール名が設定されていません。" - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "プライマリーコピーの状態: %(status)s および同期済み: %(sync)s。" - -#, python-format -msgid "Programming error in Cinder: %(reason)s" -msgstr "Cinder でのプログラミングエラー : %(reason)s" - -msgid "Project ID" -msgstr "プロジェクト ID" - -msgid "Project name not specified" -msgstr "プロジェクト名が指定されていません。" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"ネストされたクォータに対してプロジェクトのクォータが適切に設定されていませ" -"ん: %(reason)s" - -#, python-format -msgid "Project: %s not found" -msgstr "プロジェクト %s: 見つかりませんでした。" - -msgid "Protection Group not ready." -msgstr "保護グループの準備ができていません。" - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"ストレージファミリー %(storage_family)s ではプロトコル %(storage_protocol)s " -"はサポートされません。" - -msgid "Provided backup record is missing an id" -msgstr "提供されたバックアップレコードに ID がありません" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"指定されたスナップショットステータス %(provided)s は、ステータスが " -"%(current)s となっているスナップショットには許可されません。" - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"OpenStack のボリューム [%s] について、CloudByte のストレージに関するプロバイ" -"ダー情報が見つかりませんでした。" - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Pure Storage Cinder ドライバー障害です: %(reason)s" - -msgid "Purge command failed, check cinder-manage logs for more details." -msgstr "Purge コマンドが失敗しました。詳細はログを確認して下さい。" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "QoS 仕様 %(specs_id)s は既に存在します。" - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "QoS 仕様 %(specs_id)s はまだエンティティーと関連付けられています。" - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "QoS 設定が誤っています。%s は 0 より大きくなければなりません。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"IOTYPE ともう 1 つの qos_specs に QoS ポリシーを指定する必要があります。QoS " -"ポリシー: %(qos_policy)s。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"IOTYPE に QoS ポリシーを指定する必要があります: 0、1、または 2、QoS ポリ" -"シー: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"QoS ポリシー upper_limit と lower_limit が競合しています。QoS ポリシー: " -"%(qos_policy)s。" - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"QoS 仕様 %(specs_id)s には、キー %(specs_key)s を持つ仕様はありません。" - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"QoS 仕様はこのストレージファミリーおよび ONTAP バージョンでサポートされませ" -"ん。" - -msgid "Qos specs still in use." -msgstr "Qos 仕様はまだ使用中です。" - -#, python-format -msgid "Query Dsware version failed! Retcode is %s." -msgstr "Dsware でバージョンのクエリーに失敗しました。 Retcode は %s です。" - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"サービスパラメーターによる照会は推奨されません。代わりにバイナリーパラメー" -"ターを使用してください。" - -msgid "Query resource pool error." -msgstr "リソースプール照会のエラー。" - -#, python-format -msgid "Query volume attach failed, result=%s." -msgstr "Query ボリュームの接続に失敗しました。結果 = %s" - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "クォータ %s の制限は既存のリソース以上である必要があります。" - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "クォータクラス %(class_name)s が見つかりませんでした。" - -msgid "Quota could not be found" -msgstr "クォータが見つかりませんでした。" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "リソースのクォータを超過しました: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "クォータを超過しました: code=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "プロジェクト %(project_id)s のクォータが見つかりませんでした。" - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"リソース '%(res)s' のプロジェクト'%(proj)s' に関するクォーター上限が無効で" -"す : %(limit)d の上限が %(used)d の使用中の値よりも小さくなっています" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "クォータの予約 %(uuid)s が見つかりませんでした。" - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "プロジェクト %(project_id)s のクォータ使用率が見つかりませんでした。" - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "RBD diff 操作が失敗しました: (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST Async Error: Command not accepted." -msgstr "REST 非同期エラー: コマンドが許可されていません。" - -msgid "REST server IP must by specified." -msgstr "REST サーバーの IP を指定しなければなりません。" - -msgid "REST server password must by specified." -msgstr "REST サーバーのパスワードを指定しなければなりません。" - -msgid "REST server username must by specified." -msgstr "REST サーバーのユーザー名を指定しなければなりません。" - -msgid "RPC Version" -msgstr "RPC のバージョン" - -msgid "RPC server response is incomplete" -msgstr "RPC サーバーの応答が完了していません。" - -msgid "Raid did not have MCS Channel." -msgstr "RAID に MCS チャンネルがありません。" - -#, python-format -msgid "Received error string: %s" -msgstr "エラー文字列を受信しました: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "参照は非管理対象のスナップショットに対するものでなければなりません。" - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "非管理対象の仮想ボリュームに対する参照でなければなりません。" - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "参照は非管理対象のスナップショットの名前でなければなりません。" - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "参照は非管理対象仮想ボリュームのボリューム名でなければなりません。" - -msgid "Reference must contain either source-name or source-id element." -msgstr "" -"参照には source-name または source-id のいずれかの要素が含まれていなければな" -"りません。" - -msgid "Reference must contain source-id or source-name element." -msgstr "" -"参照には source-id または source-name の要素が含まれていなければなりません。" - -msgid "Reference must contain source-id or source-name key." -msgstr "" -"参照には source-id または source-name キーが含まれていなければなりません。" - -msgid "Reference must contain source-id or source-name." -msgstr "参照には source-id または source-name が含まれていなければなりません。" - -msgid "Reference must contain source-id." -msgstr "参照には source-id が含まれていなければなりません。" - -msgid "Reference must contain source-name element." -msgstr "参照には source-name 要素が含まれていなければなりません。" - -msgid "Reference must contain source-name or source-id." -msgstr "参照には source-name または source-id が含まれていなければなりません。" - -msgid "Reference must contain source-name." -msgstr "参照には source-name が含まれていなければなりません。" - -msgid "Reference to volume to be managed must contain source-name." -msgstr "" -"管理対象のボリュームへの参照には source-name が含まれていなければなりません。" - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "" -"管理対象のボリューム: %s への参照には source-name が含まれていなければなりま" -"せん。" - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"ボリューム ID %(id)s のマイグレーションを拒否中。ソースと宛先が同じボリューム" -"グループ %(name)s であるため、構成を確認してください。" - -msgid "Remote pool cannot be found." -msgstr "リモートプールが見つかりません。" - -msgid "Remove CHAP error." -msgstr "CHAP 削除のエラー。" - -msgid "Remove fc from host error." -msgstr "ホストからの FC 削除のエラー。" - -msgid "Remove host from array error." -msgstr "アレイからのホスト削除のエラー。" - -msgid "Remove host from hostgroup error." -msgstr "ホストグループからのホスト削除のエラー。" - -msgid "Remove iscsi from host error." -msgstr "ホストからの iSCSI 削除のエラー。" - -msgid "Remove lun from QoS error." -msgstr "QoS からの LUN 削除のエラー。" - -msgid "Remove lun from cache error." -msgstr "キャッシュからの LUN 削除のエラー。" - -msgid "Remove lun from partition error." -msgstr "パーティションからの LUN 削除のエラー" - -msgid "Remove port from port group error." -msgstr "ポートグループからのポート削除のエラー。" - -msgid "Remove volume export failed." -msgstr "ボリュームのエクスポートの削除に失敗しました。" - -msgid "Rename lun on array error." -msgstr "アレイでの LUN 名前変更のエラー。" - -msgid "Rename snapshot on array error." -msgstr "アレイでのスナップショット名前変更のエラー。" - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "%(name)s の %(ssn)s へのレプリケーションが失敗しました。" - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "複製サービス機能が %(storageSystemName)s に見つかりません。" - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "複製サービスが %(storageSystemName)s に見つかりません。" - -msgid "" -"Replication is configured, but no MirrorView/S enabler installed on VNX." -msgstr "" -"レプリケーションが設定されていますが、 VNX にMirrorView/S イネーブラーがイン" -"ストールされていません。" - -msgid "Replication not allowed yet." -msgstr "まだレプリケーションを行うことはできません。" - -msgid "" -"Replication setup failure: replication has been enabled but no replication " -"target has been specified for this backend." -msgstr "" -"レプリケーションセットアップ失敗: レプリケーションが有効になっていますが、レ" -"プリケーションターゲットがこのバックエンド用に指定されていません。" - -msgid "" -"Replication setup failure: replication:livevolume has been enabled but more " -"than one replication target has been specified for this backend." -msgstr "" -"レプリケーションセットアップ失敗: ライブボリュームが有効になっていますが、1つ" -"以上のレプリケーションターゲットがこのバックエンド用に指定されています。" - -msgid "Request body and URI mismatch" -msgstr "要求本体と URI の不一致" - -msgid "Request body contains too many items" -msgstr "要求本体に含まれる項目が多すぎます" - -msgid "Request body contains too many items." -msgstr "要求本体に含まれる項目が多すぎます。" - -msgid "Request body empty" -msgstr "要求本体が空です" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Datera クラスターに対する要求から、正しくない状態が返されました: %(status)s " -"| %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"要求されたバックアップが許容バックアップクォータ (ギガバイト) を超えていま" -"す。要求量 %(requested)s G、クォータ %(quota)s G、消費量 %(consumed)s. G。" - -msgid "Requested resource is currently unavailable" -msgstr "要求されたリソースは現在利用できません。" - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"スナップショットにリクエストされたボリュームが許容される %(name)s のクォータ" -"を超えています。%(requested)sG がリクエストされ、%(quota)sG のクォータが設定" -"され、%(consumed)sG が使用されています。" - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"リクエストされたボリュームサイズ %(size)d が許容される最大サイズ %(limit)d を" -"超えています。" - -msgid "Required configuration not found" -msgstr "必要な構成が見つかりません" - -#, python-format -msgid "Required flag %s is not set" -msgstr "必須フラグ %s が設定されていません" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"バックアップの状態のリセットを中止しました。現在構成されているバックアップ" -"サービス [%(configured_service)s] は、このバックアップの作成に使用されたバッ" -"クアップサービス [%(backup_service)s] ではありません。" - -#, python-format -msgid "Resizing clone %s failed." -msgstr "複製 %s のリサイズが失敗しました。" - -msgid "Resizing image file failed." -msgstr "イメージファイルのサイズ変更が失敗しました。" - -msgid "Resource could not be found." -msgstr "リソースが見つかりませんでした。" - -msgid "Resource not ready." -msgstr "リソースが作動不能です。" - -#, python-format -msgid "Response error - %s." -msgstr "応答エラー - %s。" - -msgid "Response error - The storage-system is offline." -msgstr "応答エラー - 該当の storage-system はオフラインです。" - -#, python-format -msgid "Response error code - %s." -msgstr "応答エラーコード - %s。" - -msgid "RestURL is not configured." -msgstr "Rest URL は設定されていません。" - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップのリストアが中止しました。予期していたボリュームの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"バックアップのリストアが中止しました。現在構成されているバックアップサービス " -"[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" -"サービス [%(backup_service)s] ではありません。" - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"バックアップのリストアが中止しました。予期していたバックアップの状態は " -"%(expected_status)s ですが、%(actual_status)s を受け取りました。" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"指定された Cinder スナップショットについて異なる量の SolidFire ボリュームを検" -"出しました。%(ret)s を検出しましたが、%(des)s を期待していました" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"指定された Cinder ボリュームについて異なる量の SolidFire ボリュームを検出しま" -"した。%(ret)s を検出しましたが、%(des)s を期待していました。" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "コマンドの再試行回数を超過しました: %s" - -msgid "Retryable Dell Exception encountered" -msgstr "再試行可能な Dell 例外が発生しました" - -msgid "Retryable Pure Storage Exception encountered" -msgstr "再試行可能な Pure Storage 例外が発生しました" - -msgid "Retryable SolidFire Exception encountered" -msgstr "再試行可能な SolidFire 例外が発生しました" - -msgid "" -"Retype needs volume to be in available or in-use state, not be part of an " -"active migration or a consistency group, requested type has to be different " -"that the one from the volume, and for in-use volumes front-end qos specs " -"cannot change." -msgstr "" -"タイプ変更では、ボリュームが利用可能か使用中の状態であること、アクティブマイ" -"グレーションや整合性グループの一部ではないこと、要求したタイプが現在のボ" -"リュームの一つと異なることが必要です。また、使用中のボリュームのフロントエン" -"ド qos 仕様は変更できません。" - -msgid "Retype requires migration but is not allowed." -msgstr "タイプ変更するにはマイグレーションが必要ですが、許可されていません。" - -msgid "" -"Rollback - Volume in another storage group besides default storage group." -msgstr "" -"ロールバック - デフォルトのストレージグループとは別のストレージグループのボ" -"リューム。" - -#, python-format -msgid "" -"Rollback for Volume: %(volumeName)s has failed. Please contact your system " -"administrator to manually return your volume to the default storage group " -"for fast policy/ slo." -msgstr "" -"ボリューム %(volumeName)s のロールバックに失敗しました。システム管理者に連絡" -"して、ボリュームを失敗した FAST ポリシー / slo のデフォルトのストレージグルー" -"プに手動で戻してください。" - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "%(volumeName)s を削除してロールバックしています。" - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"Cinder をバージョン %s 以前の VMware vCenter と共に実行することは許可されてい" -"ません。" - -msgid "SAN product is not configured." -msgstr "SAN 製品は設定されていません。" - -msgid "SAN protocol is not configured." -msgstr "SAN プロトコルは設定されていません。" - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"SMBFS 構成 'smbfs_oversub_ratio' は無効です。0 より大きくなければなりません: " -"%s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"SMBFS 構成 'smbfs_used_ratio' は無効です。0 より大きく、1.0 以下でなければな" -"りません: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "%(config)s の SMBFS 構成ファイルは存在しません。" - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "SMBFS 構成ファイルが設定されていません (smbfs_shares_config)。" - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"'%(total_attempts)r' 回の試行後に SSH コマンドが失敗しました: '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "SSH コマンド注入が検出されました: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "%(fabric)s の SSH 接続がエラー %(err)s で失敗しました" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "%s で SSL 証明書の有効期限が切れました。" - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL エラー: %(arg)s。" - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "" -"スケジューラーホストフィルター %(filter_name)s が見つかりませんでした。" - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "スケジューラーホスト Weigher %(weigher_name)s が見つかりませんでした。" - -#, python-format -msgid "" -"Search URI %s is not in the expected format, it should end with ?tag={0}" -msgstr "" -"検索 URI %s は期待される形式ではありません。 URI は ?tag={0} で終わる必要が" -"あります。" - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"セカンダリーコピーの状態: %(status)s および同期済み: %(sync)s、同期が進行中: " -"%(progress)s%%。" - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"セカンダリー ID はプライマリー配列と同じであってはいけません。backend_id は " -"%(secondary)s です。" - -#, python-format -msgid "Section: %(svc_name)s" -msgstr "セクション: %(svc_name)s" - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber は %(fileName)s ファイル内に存在する必要があります。" - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "ホスト %(host)s 上のサービス %(service)s を削除しました。" - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "ホスト %(host)s でサービス%(service_id)s が見つかりませんでした。" - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "サービス %(service_id)s が見つかりませんでした。" - -msgid "Service is too old to fulfil this request." -msgstr "サービスが古すぎるため、このリクエストに対応できません。" - -msgid "Service is unavailable at this time." -msgstr "現在サービスは使用できません。" - -msgid "" -"Service temporarily unavailable: The server is temporarily unable to service " -"your request" -msgstr "" -"サービス一時利用不可: サーバーは一時的にリクエストに対するサービスを提供でき" -"ません。" - -msgid "Session might have expired." -msgstr "セッションが期限切れになった可能性があります。" - -msgid "Set pair secondary access error." -msgstr "ペアのセカンダリーアクセス設定のエラー。" - -msgid "Sets thin provisioning." -msgstr "シンプロビジョニングを設定します。" - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"LUN QoS ポリシーグループの設定は、このストレージファミリーおよび ONTAP バー" -"ジョンではサポートされていません。" - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"ファイル qos ポリシーグループの設定は、このストレージファミリーおよびontap " -"バージョンではサポートされていません。" - -msgid "" -"Setup is incomplete. Device mapping not found from FC network. Cannot " -"perform VLUN creation." -msgstr "" -"セットアップは未完了です。 FC ネットワークでデバイスマッピングが見つかりませ" -"んでした。 VLUN の作成を実行できません。" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export. " -"Please check the nas_host and nas_share_path settings." -msgstr "" -"形式が無効であるため、共有 %s は無視されました。address:/export 形式でなけれ" -"ばなりません。nas_host および nas_share_path の設定を確認してください。" - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"%(dir)s にある共有は、Cinder ボリュームサービスによって書き込み可能ではありま" -"せん。スナップショット操作はサポートされません。" - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "シープドッグの I/O エラー。実行されたコマンド: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"処理の表示を行えるのは、ユーザーが割り当てられたプロジェクトと同じ階層にある" -"プロジェクトに限られます。" - -msgid "Size" -msgstr "サイズ" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "ボリュームのサイズ %s が見つかりません、セキュアな削除ができません。" - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"サイズは %(image_size)d GB で、サイズ %(volume_size)d GB のボリュームに適合し" -"ません。" - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"指定されたイメージのサイズ %(image_size)s GB がボリュームサイズ " -"%(volume_size)s GB を上回っています。" - -msgid "SnapMirror features require Data ONTAP 8.2 or later." -msgstr "SnapMirror 機能を利用するには Data ONTAP 8.2 以上が必要です。" - -msgid "SnapMirror relationship is not quiesced." -msgstr "SnapMirror relationship は静止されていません。" - -#, python-format -msgid "" -"Snapshot %(cgsnapshot_id)s: for Consistency Group %(cg_name)s: delete " -"failed\n" -"%(err)s" -msgstr "" -"整合性グループ %(cg_name)s のスナップショット %(cgsnapshot_id)s: 作成に失敗し" -"ました。\n" -"%(err)s" - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"スナップショット ID %(id)s が使用可能になるよう待機している途中に削除対象に" -"指定されました。同時実行リクエストが行われた可能性があります。" - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"カスケードの削除中に、「削除中」ではなく %(state)s の状態でスナップショット " -"%(id)s が見つかりました。" - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "スナップショット %(snapshot_id)s が見つかりませんでした。" - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"スナップショット %(snapshot_id)s にはキー %(metadata_key)s を持つメタデータは" -"ありません。" - -#, python-format -msgid "" -"Snapshot %(src_snapshot_name)s: clone failed\n" -"%(err)s" -msgstr "" -"スナップショット %(src_snapshot_name)s: クローンが失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Snapshot %s : Delete Failed\n" -msgstr "スナップショット %s : 削除が失敗しました。\n" - -#, python-format -msgid "Snapshot %s must not be part of a group." -msgstr "スナップショット %s はグループの一部であってはなりません。" - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "スナップショット '%s' はアレイに存在しません。" - -msgid "Snapshot ID must be in UUID form." -msgstr "スナップショット ID は UUID 形式でなければいけません。" - -msgid "Snapshot already managed." -msgstr "スナップショットはすでに管理されています。" - -msgid "" -"Snapshot can't be taken individually on a volume that is part of a " -"Consistency Group" -msgstr "" -"整合性グループの一部のボリュームのスナップショットを個別に取得することはでき" -"ません。" - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"ボリューム %(vol_id)s が「使用可能」ではないため、スナップショットを作成でき" -"ません。現在のボリュームの状態は %(vol_status)s です。" - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "" -"ボリュームのマイグレーション中にスナップショットを作成することはできません。" - -msgid "" -"Snapshot delete can't be done individually on a volume that is part of a " -"Consistency Group" -msgstr "" -"整合性グループの一部のボリュームのスナップショットを個別に削除することはでき" -"ません。" - -#, python-format -msgid "" -"Snapshot for Consistency Group %(cg_name)s: create failed\n" -"%(err)s" -msgstr "" -"整合性グループ %(cg_name)s のスナップショット: 作成に失敗しました。\n" -"%(err)s" - -msgid "Snapshot has a temporary snapshot that can't be deleted at this time." -msgstr "" -"スナップショットには今回削除できない一時的なスナップショットが含まれていま" -"す。" - -msgid "Snapshot has children and cannot be deleted!" -msgstr "スナップショットには子が含まれており、削除できません。" - -msgid "Snapshot of secondary replica is not allowed." -msgstr "2 次レプリカのスナップショットは許可されません。" - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "状態 %s でのボリュームのスナップショットはサポートされていません。" - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "" -"スナップショットリソース \"%s\" がどこかにデプロイされていないかを確認してく" -"ださい。" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"スナップショットの状態 %(cur)s は update_snapshot_status には許可されません。" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "" -"複製を行うには、スナップショットの状態が「使用可能」でなければなりません。" - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"バックアップ対象のスナップショットが利用可能である必要がありますが、現在の状" -"態は \"%s\" です。" - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "%s の ID を持つスナップショットを見つけることができませんでした。" - -#, python-format -msgid "Snapshot with name %(snaplabel)s already exists under %(typename)s" -msgstr "" -"名前 %(snaplabel)s を持つスナップショットは、 %(typename)s の下にすでに存在し" -"ます。" - -#, python-format -msgid "" -"Snapshot: %(snapshotname)s, create failed\n" -"%(err)s" -msgstr "" -"スナップショット %(snapshotname)s: 作成が失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Snapshot: %s search failed in K2." -msgstr "K2 でスナップショット %s の検索に失敗しました。" - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"スナップショット '%(snap)s' は基本イメージ '%(base)s' 内に存在しません: 増分" -"バックアップを打ち切ります" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "このボリューム形式では、スナップショットはサポートされていません: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "ソケットエラー: %(arg)s。" - -msgid "SolidFire Cinder Driver exception" -msgstr "SolidFire Cinder Driver 例外" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "ソート方向の配列サイズがソートキーの配列サイズを超えています。" - -msgid "" -"Source CG cannot be empty or in 'creating' or 'updating' state. No " -"cgsnapshot will be created." -msgstr "" -"ソース CG が空もしくは 'creating' や 'updating' の状態ではいけません。 " -"cgsnapshot は作成されません。" - -msgid "Source CG is empty. No consistency group will be created." -msgstr "ソース CG が空です。整合性グループは作成されません。" - -msgid "Source Group is empty. No group will be created." -msgstr "ソースグループが空です。グループは作成されません。" - -msgid "" -"Source group cannot be empty or in 'creating' or 'updating' state. No group " -"snapshot will be created." -msgstr "" -"ソース グループ が空もしくは 'creating' や 'updating' の状態ではいけませ" -"ん。 グループスナップショット は作成されません。" - -msgid "Source host details not found." -msgstr "ソースホスト詳細が見つかりません" - -msgid "Source volume device ID is required." -msgstr "ソースボリュームのデバイス ID が必要です。" - -msgid "Source volume not mid-migration." -msgstr "ソースボリュームはマイグレーション中ではありません" - -msgid "SpaceInfo returned byarray is invalid" -msgstr "アレイによって返された SpaceInfo が無効です。" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"ボリューム %(vol)s にマップする指定されたホストは、サポート対象ではない " -"%(group)s のホストグループ内にあります。" - -msgid "Specified logical volume does not exist." -msgstr "指定された論理ボリュームは存在しません。" - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "" -"ID %s の指定されたスナップショットグループを見つけることができませんでした。" - -msgid "Specifies IP pool to use for volume" -msgstr "ボリュームに用いる IP プールを指定します。" - -msgid "" -"Specifies number of replicas for each volume. Can only be increased once " -"volume is created" -msgstr "" -"各ボリュームで作成するレプリカ数を指定します。ボリュームが一度作成されると、" -"値は増やすことのみ可能です。" - -msgid "Specify a password or private_key" -msgstr "パスワードまたは private_key を指定してください。" - -msgid "Specify group type name, description or a combination thereof." -msgstr "グループ種別の名前、説明、またはこれらの組み合わせを指定してください。" - -msgid "Specify san_password or san_private_key" -msgstr "san_password または san_private_key を指定してください" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"ボリュームタイプの名前、説明、is_public、またはこれらの組み合わせを指定してく" -"ださい。" - -msgid "Split pair error." -msgstr "ペア分割のエラー。" - -msgid "Split replication failed." -msgstr "レプリケーションの分割が失敗しました。" - -msgid "Start LUNcopy error." -msgstr "LUN コピー開始のエラー。" - -msgid "State" -msgstr "状態" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "ノードの状態が誤っています。現在の状態は %s です。" - -msgid "Status" -msgstr "ステータス" - -msgid "Stop snapshot error." -msgstr "スナップショット停止のエラー。" - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "ストレージ構成サービスが %(storageSystemName)s に見つかりません。" - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"ストレージハードウェア ID 管理サービスが %(storageSystemName)s に見つかりませ" -"ん。" - -#, python-format -msgid "Storage Profile %s not found." -msgstr "ストレージプロファイル %s が見つかりません。" - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "ストレージ再配置サービスが %(storageSystemName)s に見つかりません。" - -#, python-format -msgid "Storage family %s is not supported." -msgstr "ストレージファミリー %s はサポートされていません。" - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "ストレージグループ %(storageGroupName)s は正常に削除されませんでした" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "ストレージホスト %(svr)s が検出されません。名前を検証してください。" - -msgid "Storage pool is not configured." -msgstr "ストレージプールが設定されていません。" - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "ストレージプロファイル %(storage_profile)s が見つかりません。" - -msgid "Storage resource could not be found." -msgstr "ストレージリソースが見つかりませんでした。" - -msgid "Storage system id not set." -msgstr "ストレージシステム ID が設定されていません。" - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "プール %(poolNameInStr)s のストレージシステムが見つかりません。" - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "ストレージシステム %(array)s が見つかりません。" - -#, python-format -msgid "" -"Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups " -"from cluster %(current)s to %(new)s" -msgstr "" -"%(num_vols)s ボリュームと %(num_cgs)s 整合性グループの、クラスター " -"%(current)s から %(new)s への名前変更が正常に行われました。" - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"子の使用量の合計 '%(sum)s' がリソース '%(res)s' のプロジェクト'%(proj)s' に" -"関するクォータ '%(free)s' よりも大きくなっています。以下のプロジェクトの 1 つ" -"以上について上限または使用量を減らしてください: '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "ペア切り替えのエラー。" - -msgid "Sync pair error." -msgstr "ペア同期のエラー。" - -#, python-format -msgid "Synology driver authentication failed: %(reason)s." -msgstr "Synology ドライバーの認証が失敗しました: %(reason)s" - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "正しくない状態 - %(pass_status)s のシステム %(id)s が見つかりました。" - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "システム %(id)s で正しくない状態 %(status)s が見つかりました。" - -msgid "System does not support compression." -msgstr "システムは圧縮をサポートしません。" - -msgid "System is busy, retry operation." -msgstr "システムがビジー状態です。再試行してください。" - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"アカウント [%(account)s] の CloudByte ストレージで、TSM [%(tsm)s] が見つかり" -"ませんでした。" - -msgid "Target group type is still in use." -msgstr "ターゲットグループ種別はまだ使用中です。" - -msgid "Target volume type is still in use." -msgstr "ターゲットボリュームタイプはまだ使用中です。" - -#, python-format -msgid "" -"Task did not complete in %d secs. Operation timed out. Task in CoprHD will " -"continue" -msgstr "" -"タスクが %d 秒以内に完了しませんでした。オペレーションはタイムアウトしまし" -"た。 CoprHD 内のタスクは継続されます。" - -#, python-format -msgid "Task: %(task_id)s is failed with error: %(error_message)s" -msgstr "タスク: %(task_id)s が失敗しました。エラー: %(error_message)s" - -#, python-format -msgid "Tenant %s: not found" -msgstr "テナント %s: 見つかりませんでした。" - -msgid "Terminate connection failed" -msgstr "接続を強制終了できませんでした。" - -msgid "Terminate connection unable to connect to backend." -msgstr "バックエンドに接続できない接続を強制終了します。" - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "ボリューム接続の終了に失敗しました: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "複製する %(type)s %(id)s ソースが見つかりませんでした。" - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"'sort_key' パラメーターおよび 'sort_dir' パラメーターは非推奨であり、'sort' " -"パラメーターと併用することはできません。" - -msgid "The CG does not exist on array." -msgstr "CG はアレイに存在しません。" - -msgid "The EQL array has closed the connection." -msgstr "EQL アレイが接続を閉じました。" - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"GPFS ファイルシステム %(fs)s は必要なリリースレベルに達していません。現在のレ" -"ベルは %(cur)s です。%(min)s 以上は必要です。" - -msgid "The IP Address was not found." -msgstr "IP アドレスが見つかりませんでした。" - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"WebDAV 要求が失敗しました。理由: %(msg)s、戻りコード/理由: %(code)s、ソースボ" -"リューム: %(src)s、宛先ボリューム: %(dst)s、メソッド: %(method)s。" - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"上のエラーは、データベースが作成されなかったことを示している可能性がありま" -"す。\n" -"このコマンドを実行する前に、'cinder-manage db sync' を使用してデータベースを" -"作成してください。" - -msgid "The allocated size must less than total size." -msgstr "割り当てサイズは合計のサイズよりも少ない必要があります。" - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"配列が SLO %(slo)s とワークロード %(workload)s のストレージプール設置をサポー" -"トしません。配列で有効な SLO とワークロードを確認してください。" - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s or " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"アレイが SLO %(slo)s またはワークロード %(workload)s のストレージプール設置を" -"サポートしません。アレイで有効な SLO とワークロードを確認してください。" - -msgid "" -"The authentication service failed to provide the location of the service URI " -"when redirecting back" -msgstr "" -"Authentication サービスで、リダイレクトで戻るサービス URI の提供に失敗しまし" -"た。" - -msgid "The authentication service failed to reply with 401" -msgstr "Authentication サービスが失敗し、 401 エラーが返されました。" - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "" -"ボリュームが作成されるバックエンドに有効になっているレプリケーションがありま" -"せん。" - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"コマンド %(cmd)s が失敗しました。(ret: %(ret)s、stdout: %(out)s、stderr: " -"%(err)s)" - -msgid "The config parameter hnas_mgmt_ip0 is not set in the cinder.conf." -msgstr "設定パラメーター hnas_mgmt_ip0 が cinder.conf に設定されていません。" - -#, python-format -msgid "" -"The config parameter hnas_svc%(idx)s_hdp is not set in the cinder.conf. Note " -"that you need to have at least one pool configured." -msgstr "" -"設定パラメーター hnas_svc%(idx)s_hdp が cinder.conf に設定されていません。少" -"なくとも一つのプールの設定が必要です。" - -#, python-format -msgid "" -"The config parameter hnas_svc%(idx)s_iscsi_ip is not set in the cinder.conf. " -"Note that you need to have at least one pool configured." -msgstr "" -"設定パラメーター hnas_svc%(idx)s_iscsi_ip が cinder.conf に設定されていませ" -"ん。少なくとも一つのプールの設定が必要です。" - -#, python-format -msgid "" -"The config parameter hnas_svc%(idx)s_volume_type is not set in the cinder." -"conf. Note that you need to have at least one pool configured." -msgstr "" -"設定パラメーター hnas_svc%(idx)s_volume_type が cinder.conf に設定されていま" -"せん。少なくとも一つのプールの設定が必要です。" - -msgid "The config parameter hnas_username is not set in the cinder.conf." -msgstr "設定パラメーター hnas_username が cinder.conf に設定されていません。" - -msgid "The copy should be primary or secondary" -msgstr "コピーは 1 次または 2 次であることが必要です" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "論理デバイスの作成を完了できませんでした。(LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"装飾されたメソッドは、ボリュームとスナップショットオブジェクトのいずれもを受" -"け付けることができません。" - -msgid "The decorated method must accept image_meta." -msgstr "装飾されたメソッドは、 image_meta を受け入れる必要があります。" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "パス %(path)s のデバイスは使用不可です: %(reason)s" - -msgid "The domain_name config in cinder.conf is wrong." -msgstr "cinder.conf 内の domain_name 設定が誤っています。" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "終了時刻 (%(end)s) は開始時刻 (%(start)s) より後でなければなりません。" - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "追加仕様: %(extraspec)s は無効です。" - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "フェイルオーバーされたボリュームを削除することができませんでした: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "次の要素が必要です: %s" - -msgid "" -"The given pool info must include the storage pool and naming start with " -"OpenStack-" -msgstr "" -"与えられるプール情報にはストレージプールが含まれていなければならず、名前は " -"Openstack から始まる必要があります。" - -msgid "The host group or iSCSI target could not be added." -msgstr "ホストグループまたは iSCSI ターゲットを追加できませんでした。" - -msgid "The host group or iSCSI target was not found." -msgstr "ホストグループまたは iSCSI ターゲットが見つかりませんでした。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"ホストはフェイルバックを行う準備ができていません。3PAR バックエンドでボリュー" -"ムを再同期し、レプリケーションを再開してください。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"ホストはフェイルバックを行う準備ができていません。LeftHand バックエンドでボ" -"リュームを再同期し、複製を再開してください。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"ホストはフェイルバックを行う準備ができていません。Storwize バックエンドでボ" -"リュームを再同期し、複製を再開してください。" - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "iSCSI CHAP ユーザー %(user)s は存在しません。" - -#, python-format -msgid "" -"The imported lun is in pool %(lun_pool)s which is not managed by the host " -"%(host)s." -msgstr "" -"インポートした LUN はホスト %(host)s に管理されていないプール %(lun_pool)s に" -"あります。" - -#, python-format -msgid "The job has not completed and is in a %(state)s state." -msgstr "ジョブが完了していません。状態は %(state)s です。" - -msgid "The key cannot be None." -msgstr "キーは None に設定することはできません。" - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "指定された %(type)s %(id)s の論理デバイスは既に削除されました。" - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"メソッド %(method)s がタイムアウトになりました。(タイムアウト値: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "update_migrated_volume メソッドが実装されていません。" - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"マウント %(mount_path)s は有効な Quobyte USP ボリュームではありません。エ" -"ラー: %(exc)s" - -msgid "The name to use for storage instances created" -msgstr "作成したストレージインスタンスにも用いる名前" - -msgid "The name to use for volumes created" -msgstr "作成したボリュームに用いる名前" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "ストレージバックエンドのパラメーター。(config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "増分バックアップでは親バックアップが使用可能でなければなりません。" - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "" -"指定されたスナップショット '%s' は指定されたボリュームのスナップショットでは" -"ありません。" - -msgid "The redirect location of the authentication service is not provided" -msgstr "Authentication サービスのリダイレクトの場所が指定されていません。" - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"バックエンドのボリュームへの参照の形式は file_system/volume_name " -"(volume_name には '/' を使用できません) でなければなりません" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "リモートの保存数は %s 以下でなければなりません。" - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"ボリューム種別の extra_specs でレプリケーションモードが正しく構成されていませ" -"ん。replication:mode が periodic の場合、replication:sync_period must も 300 " -"秒から 31622400 秒の間に設定しなければなりません。" - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "レプリケーション同期期間は少なくとも %s 秒でなければなりません。" - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"要求されたサイズ %(requestedSize)s が、結果として作成されたサイ" -"ズ%(resultSize)s と同一ではありません。" - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "リソース %(resource)s が見つかりませんでした。" - -msgid "The resource is a FSS thin device, minimum size is 10240 MB." -msgstr "リソースは FSS ボリュームです。最小のサイズは 10240 MB です。" - -msgid "The resource is a thin device, thin size is invalid." -msgstr "リソースは thin デバイスです。 thin サイズが正しくありません。" - -msgid "The results are invalid." -msgstr "結果が無効です。" - -#, python-format -msgid "The retention count must be %s or less." -msgstr "保存数は %s 以下でなければなりません。" - -msgid "The san_secondary_ip param is null." -msgstr " san_secondary_ip パラメーターが null です。" - -msgid "The snapshot cannot be created when the volume is in error status." -msgstr "" -"ボリュームの状態が「エラー」である場合は、スナップショットを作成できません。" - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"ボリュームがメンテナンスモードの場合は、スナップショットを作成できません。" - -#, python-format -msgid "The snapshot is unavailable: %(data)s" -msgstr "スナップショットは使用できません : %(data)s" - -#, python-format -msgid "The snapshot's parent in ScaleIO is %(ancestor)s and not %(volume)s." -msgstr "" -"Scale IO 内のこのスナップショットの親は %(ancestor)s で、 %(volume)s ではあ" -"りません。" - -msgid "" -"The snapshot's parent is not the original parent due to deletion or revert " -"action, therefore this snapshot cannot be managed." -msgstr "" -"このスナップショットの親は、削除中やアクションの取り消し中などの理由で、オリ" -"ジナルのものではありません。このため、このスナップショットは管理できません。" - -msgid "The source volume for this WebDAV operation not found." -msgstr "この WebDAV 操作のソースボリュームが見つかりません。" - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"ソースのボリュームタイプ '%(src)s' が宛先のボリュームタイプ '%(dest)s' と異な" -"ります。" - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "ソースボリュームタイプ '%s' は使用できません。" - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "指定された %(desc)s は使用中です。" - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "指定された LUN は指定のプールに属していません: %s。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"指定された ldev %(ldev)s を管理できませんでした。ldev をマッピングしてはなり" -"ません。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"指定された ldev %(ldev)s を管理できませんでした。ldev をペアにしてはなりませ" -"ん。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"指定された ldev %(ldev)s を管理できませんでした。ldev サイズはギガバイトの倍" -"数でなければなりません。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"指定された ldev %(ldev)s を管理できませんでした。ボリュームタイプは DP-VOL で" -"なければなりません。" - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"指定された操作はサポートされていません。ボリュームサイズはソース %(type)s と" -"同じでなければなりません。(ボリューム: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "指定された vdisk はホストにマッピングされています。" - -msgid "The specified volume is mapped to a host." -msgstr "指定されたボリュームはホストにマッピングされています。" - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"%s のストレージアレイのパスワードが正しくありません。設定されたパスワードを更" -"新してください。" - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "ストレージバックエンドを使用できます。(config_group: %(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"ストレージデバイスは %(prot)s をサポートしません。デバイスが %(prot)s をサ" -"ポートするように設定するか、別のプロトコルを使用するドライバーに切り替えてく" -"ださい。" - -msgid "The storage pool information is empty or not correct" -msgstr "ストレージプールの情報が空か、正しくありません。" - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"%(memberCount)s のストライプメタ数がボリュームに対して小さすぎます: " -"%(volumeSize)s のサイズを持つ %(volumeName)s。" - -#, python-format -msgid "The token is not generated by authentication service. %s" -msgstr "トークンが authentication サービスで生成されませんでした。 %s" - -#, python-format -msgid "The token is not generated by authentication service.%s" -msgstr "トークンが authentication サービスで生成されませんでした。 %s" - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"メタデータのタイプ: ボリューム/スナップショット %(id)s の %(metadata_type)s " -"が無効です。" - -#, python-format -msgid "The value %(value)s for key %(key)s in extra specs is invalid." -msgstr "追加スペックのキー %(key)s の値 %(value)s が無効です。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"ボリューム %(volume_id)s を拡張できませんでした。ボリュームタイプは「標準」で" -"なければなりません。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"ボリューム %(volume_id)s を管理解除できませんでした。ボリュームタイプは " -"%(volume_type)s でなければなりません。" - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "ボリューム %(volume_id)s は正常に管理されています。(LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"ボリューム %(volume_id)s は正常に管理解除されています。(LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "マッピングするボリューム %(volume_id)s が見つかりませんでした。" - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "メンテナンスモードではボリュームを転送できません。" - -msgid "The volume cannot be attached in maintenance mode." -msgstr "メンテナンスモードではボリュームを追加できません。" - -msgid "The volume cannot be detached in maintenance mode." -msgstr "メンテナンスモードではボリュームを切り離すことができません。" - -msgid "The volume cannot be updated during maintenance." -msgstr "メンテナンス中にはボリュームを更新することはできません。" - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "メンテナンスモードではボリューム接続を初期化できません。" - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "" -"ボリュームドライバーには、コネクター内の iSCSI イニシエーター名が必要です。" - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"ボリュームは 3PAR 上で現在使用中のため、この時点では削除できません。後で再試" -"行できます。" - -msgid "The volume label is required as input." -msgstr "入力としてボリュームラベルが必要です。" - -#, python-format -msgid "" -"The volume to be managed is a %(provision)s LUN and the tiering setting is " -"%(tier)s. This doesn't match with the type %(type)s." -msgstr "" -"管理対象となるボリュームは %(provision)s LUN で、ティアリングの設定は " -"%(tier)s です。これはタイプ %(type)s と適合していません。" - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "使用できるリソースがありません。(リソース: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "有効な ESX ホストがありません。" - -msgid "There are no valid datastores." -msgstr "有効なデータストアがありません。" - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"%(param)s の宛先がありません。指定されたストレージは、ボリュームを管理するた" -"めに不可欠です。" - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"ldev の宛先がありません。指定された ldev は、ボリュームを管理するために不可欠" -"です。" - -msgid "There is no metadata in DB object." -msgstr "DB オブジェクトの中にメタデータがありません。" - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "%(volume_size)sG をホストできる共有がありません" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "%(volume_size)sG をホストできる共有がありません。" - -#, python-format -msgid "There is no such action: %s" -msgstr "このようなアクションはありません: %s" - -msgid "There is no virtual disk device." -msgstr "仮想ディスクデバイスがありません。" - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "" -"リモートコピーグループへのボリュームの追加中にエラーが発生しました: %s。" - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "cgsnapshot の作成中にエラーが発生しました: %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "リモートコピーグループの作成中にエラーが発生しました: %s。" - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "リモートコピーグループの同期期間の設定中にエラーが発生しました: %s。" - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"3PAR アレイでのリモートコピーグループのセットアップ中にエラーが発生しました: " -"('%s')。ボリュームはレプリケーションタイプとして認識されません。" - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"LeftHand アレイのリモートスケジュールのセットアップ中にエラーが発生しました: " -"('%s')。ボリュームはレプリケーションタイプとして認識されません。" - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "リモートコピーの開始中にエラーが発生しました: %s。" - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Gluster 構成ファイルが構成されていません (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "NFS 構成ファイルが構成されていません (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"設定済みの Quobyte ボリューム (%s) が存在しません。 例: quobyte:///" -"" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "" -"このバージョンの LVM ではシンプロビジョニングはサポートされていません。" - -msgid "ThinProvisioning Enabler is not installed. Can not create thin volume." -msgstr "" -"シンプロビジョニングイネーブラーがインストールされていません。シンボリューム" -"を作成できません。" - -msgid "This driver does not support deleting in-use snapshots." -msgstr "" -"このドライバーは、使用中のスナップショットの削除をサポートしていません。" - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"このドライバーは、使用中のボリュームのスナップショット作成をサポートしていま" -"せん。" - -msgid "This request was rate-limited." -msgstr "このリクエストは一定時間内の実行回数に制限があります。" - -#, python-format -msgid "This snapshot %(snap)s doesn't belong to the volume parent %(vol)s." -msgstr "" -"このスナップショット %(snap)s は親ボリューム %(vol)s に属していません。" - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"このシステムプラットフォーム (%s) はサポートされていません。このドライバー" -"は、Win32 プラットフォームのみサポートします。" - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "%(storageSystemName)s の層ポリシーサービスが見つかりません。" - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"スナップショット %s を作成するために Nova の更新を待機している間にタイムアウ" -"トになりました。" - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"スナップショット %(id)s を削除するために Nova の更新を待機している間にタイム" -"アウトになりました。" - -#, python-format -msgid "Timeout waiting for %(condition_name)s in wait_until." -msgstr "wait_until 中の %(condition_name)s のタイムアウトを待っています。" - -#, python-format -msgid "Timeout while calling %s " -msgstr " %s の呼び出し中にタイムアウトが発生しました。" - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "%(service)s API の要求中にタイムアウトになりました。" - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "" -"バックエンドの %(service)s から機能をリクエストする際にタイムアウトが発生しま" -"した。" - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "転送 %(transfer_id)s が見つかりませんでした。" - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"転送 %(transfer_id)s: ボリューム ID %(volume_id)s が予期しない状態%(status)s " -"です。awaiting-transfer が予期されていました" - -msgid "" -"True to set acl 'allow_all' on volumes created. Cannot be changed on volume " -"once set" -msgstr "" -"ボリューム作成時に acl 'allow_all' を True に設定します。一度ボリュームに設" -"定すると変更することはできません。" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"ID %(meta_id)s からバックアップ %(id)s にバックアップのメタデータをインポート" -"しようとしています。" - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"ボリュームタスクの調整が完了前に停止しました: volume_name=%(volume_name)s、" -"task-status=%(status)s。" - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"タイプ %(type_id)s は既に別の qos 仕様 %(qos_specs_id)s に関連付けられていま" -"す。" - -msgid "Type access modification is not applicable to public group type." -msgstr "" -"パブリックなグループタイプでは、タイプアクセスの変更を行うことはできません。" - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"パブリックなボリュームタイプでは、タイプアクセスの変更を行うことはできませ" -"ん。" - -msgid "Type cannot be converted into NaElement." -msgstr "タイプは NaElement に変換できません。" - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -msgid "URI should end with /tag" -msgstr "URIは /tag で終わらなければいけません。" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUID %s が、ボリュームの追加リストと削除リストの両方に存在します。" - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "ボリューム %s の Storwize バックエンドにアクセスできません。" - -msgid "Unable to access the backend storage via file handle." -msgstr "ファイルハンドル経由でバックエンドストレージにアクセスできません。" - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "パス %(path)s を介してバックエンドストレージにアクセスできません。" - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"スペース %(space)s のスナップショットに Cinder のホストを追加できません。" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "%s のフェイルオーバーを完了できません。" - -msgid "Unable to connect or find connection to host" -msgstr "ホストに接続できないか、ホストへの接続が見つかりません。" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "整合性グループ %s を作成できません。" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "" -"ロックを作成できません。コーディネーションバックエンドがスタートしていませ" -"ん。" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST ポリシー %(fastPolicyName)s のデフォルトストレージグループを作成または取" -"得できません。" - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "ボリューム %s のレプリカ複製を作成できません。" - -#, python-format -msgid "Unable to create server object for initiator %(name)s" -msgstr "イニシエーター %(name)s 用にサーバーオブジェクトを作成できません。" - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "%s の関係を作成できません。" - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "%(snap)s からボリューム %(name)s を作成できません" - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "%(vol)s からボリューム %(name)s を作成できません。" - -#, python-format -msgid "Unable to create volume %s" -msgstr "ボリューム %s を作成できません。" - -msgid "Unable to create volume. Backend down." -msgstr "ボリュームを作成できません。バックエンドがダウンしています。" - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "整合性グループ %s を削除できません。" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "スナップショット %(id)s を削除できません。状態: %(status)s。" - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "ボリューム %s 上のスナップショットポリシーを削除できません。" - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"ボリューム %(vol)s のターゲットボリュームを削除できません。例外: %(err)s。" - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"ボリュームを切り離すことができません。切り離すには、ボリュームの状態が「使用" -"中」で、attach_status が「接続済み」でなければなりません。" - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"提供されたセカンダリー配列から secondary_array を検出できません: " -"%(secondary)s" - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Purity でスナップショット %(id)s のスナップショット名を判別できません。" - -msgid "Unable to determine system id." -msgstr "システム ID を判別できません。" - -msgid "Unable to determine system name." -msgstr "システム名を判別できません。" - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Purity REST API のバージョン %(api_version)s でスナップショット処理を管理でき" -"ません。%(required_versions)s が必要です。" - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Purity REST API のバージョン %(api_version)s ではレプリケーションを行うことが" -"できません。%(required_versions)s のうちのいずれかのバージョンが必要です。" - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Storwize クラスター %s とのパートナーシップを確立できません。" - -#, python-format -msgid "Unable to extend volume %s" -msgstr "ボリューム %s を拡張できません。" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"レプリケーション関係を切り替えられないため、ボリューム %(id)s をセカンダリー" -"バックエンドにフェイルオーバーできません: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"デフォルトにフェイルバックすることできません。フェイルバックができるのは、" -"フェイルオーバーの完了後に限られます。" - -msgid "Unable to failback. Backend is misconfigured." -msgstr "フェイルバックを行えません。バックエンドの設定に誤りがあります。" - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "" -"レプリケーションターゲットへのフェイルオーバーが有効ではありません:" -"%(reason)s)" - -msgid "Unable to fetch connection information from backend." -msgstr "バックエンドから接続情報を取り出すことができません。" - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "バックエンドから接続情報を取り出すことができません: %(err)s" - -msgid "Unable to find K2peer in source K2:" -msgstr "ソース K2 内で K2peer が見つかりません。" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "名前 %s を持つ Purity 参照が見つかりません。" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "ボリュームグループが見つかりません: %(vg_name)s" - -msgid "Unable to find any active VPSA controller" -msgstr "アクティブな VPSA コントローラーが見つかりません。" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"フェイルオーバーのターゲットが見つかりません。セカンダリーターゲットが設定さ" -"れていません。" - -msgid "Unable to find iSCSI mappings." -msgstr "iSCSI のマッピングが見つかりません。" - -#, python-format -msgid "Unable to find server object for initiator %(name)s" -msgstr "イニシエーター %(name)s 用のサーバーオブジェクトが見つかりません。" - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "ssh_hosts_key_file が見つかりません: %s" - -msgid "Unable to find system log file!" -msgstr "システムログファイルが見つかりません。" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"特定のセカンダリー配列のフェイルオーバーで使用すべき適切な pg スナップショッ" -"トを見つけることができません: %(id)s" - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"設定されたターゲットから適切なセカンダリー配列を見つけることができません: " -"%(targets)s" - -#, python-format -msgid "Unable to find volume %s" -msgstr "ボリューム %s が見つかりません。" - -#, python-format -msgid "Unable to find volume with FSS vid =%s." -msgstr "FSS vid が %s のボリュームが見つかりませんでした。" - -#, python-format -msgid "Unable to find volume: %s from K2." -msgstr "K2でボリューム %s が見つかりません。" - -msgid "Unable to get FC target wwpns from K2." -msgstr "K2 から FC ターゲット wwpn を取得できませんでした。" - -msgid "Unable to get ISCSI IP address from K2." -msgstr "K2 から iSCSI IP アドレスを取得できませんでした。" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "ファイル '%s' のブロックデバイスを取得できません。" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "ボリュームの作成に必要な設定情報を取得できません: %(errorMessage)s。" - -msgid "Unable to get corresponding record for pool." -msgstr "プールに該当するレコードを取得できません。" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"スペース %(space)s に関する情報が得られません。クラスターが稼働中であることを" -"確認し、是正してください。" - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"このホスト上の IP アドレスのリストが得られません。権限とネットワークを確認し" -"てください。" - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"ドメインメンバーのリストが得られません。クラスターが稼働していることを確認し" -"てください。" - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"新規の名前を作成するためのスペースのリストが得られません。クラスターが稼働し" -"ていることを確認してください。" - -msgid "Unable to get size of manage volume." -msgstr "管理ボリュームのサイズを取得できません。" - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "backend_name の統計情報を取得できません: %s" - -msgid "Unable to get storage volume from job." -msgstr "ジョブからストレージボリュームを取得できません。" - -msgid "Unable to get target endpoints for any hardwareIds." -msgstr "どのハードウェア ID のターゲットエンドポイントも取得できません。" - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"ハードウェア ID %(hardwareIdInstance)s のターゲットエンドポイントを取得できま" -"せん。" - -msgid "Unable to get target iqn from K2." -msgstr "K2 から ターゲット iqn を取得できませんでした。" - -msgid "Unable to get the name of the masking view." -msgstr "マスキングビューの名前を取得できません。" - -msgid "Unable to get the name of the portgroup." -msgstr "ポートグループの名前を取得できません。" - -msgid "Unable to get the name of the storage group" -msgstr "ストレージグループの名前を取得できません。" - -msgid "Unable to get the name of the storage group." -msgstr "ストレージグループの名前を取得できません。" - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "ボリューム %s のレプリケーション関係を取得できません。" - -msgid "Unable to import 'krest' python module." -msgstr "'krest' python モジュールをインポートできません。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Cinder にボリューム %(deviceId)s をインポートできません。これはレプリケーショ" -"ンセッション %(sync)s のソースボリュームです。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Uinder にボリューム %(deviceId)s をインポートできません。外部ボリュームは、現" -"在の cinder ホストが管理するプールに含まれません。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Cinder にボリューム %(deviceId)s を追加できません。マスキングビューのボリュー" -"ムは %(mv)s です。" - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s から認証局をロードできません。" - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s から証明書をロードできません。" - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s から鍵をロードできません。" - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"Solidfire デバイス上でアカウント %(account_name)s を見つけることができませ" -"ん。" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "IP アドレス「%s」を管理している SVM が見つかりません" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "指定されたリプレープロファイル %s を特定できません" - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"既に管理されているボリュームが存在するため、ボリューム %(volume_ref)s の管理" -"に失敗しました。" - -#, python-format -msgid "Unable to manage volume %s" -msgstr "ボリューム %s を管理できません" - -msgid "Unable to map volume" -msgstr "ボリュームをマッピングできません" - -msgid "Unable to map volume." -msgstr "ボリュームのマッピングができません。" - -msgid "Unable to parse attributes." -msgstr "属性を解析できません。" - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"レプリカをボリューム %s のプライマリーにプロモートできません。セカンダリーコ" -"ピーがありません。" - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"use_chap_auth=True が指定されている、Cinder で管理されていないホストを再使用" -"することはできません。" - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"不明な CHAP 資格情報が構成されているホストを再使用することはできません。" - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できません。" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "%s の ID を持つスナップショットグループを取得できません。" - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"%(specname)s を再入力できません。要求した最新の %(spectype)s の値を受信するこ" -"とを予期していたものの、%(spec)s の値を受信しました。" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"種別変更ができません。ボリューム %s のコピーが存在します。タイプ変更を行う" -"と、コピー数 2 という制限を超えます。" - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"種別変更ができません: 現行アクションにはボリュームコピーが必要ですが、新しい" -"タイプが複製の場合は許可されません。ボリューム = %s" - -#, python-format -msgid "Unable to send requests: %s" -msgstr "リクエストを送信できません: %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"%(vol)s のミラーモードレプリケーションをセットアップできません。例外: " -"%(err)s。" - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "整合性グループ %s を移動できません。" - -msgid "Unable to terminate volume connection from backend." -msgstr "バックエンドからのボリューム接続を終了することができません。" - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "ボリューム接続を終了することができません: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "整合性グループ %s を更新できません。" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"マスキングビュー %(maskingViewName)s のイニシエーターグループ " -"%(igGroupName)s を検査できません。" - -msgid "Unacceptable parameters." -msgstr "受け入れられないパラメーター。" - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -" マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" - -msgid "Unexpected 'disabled_reason' found on enable request." -msgstr "enable request に予期しない 'disabled_reason' が見つかりました。" - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"予期しない CLI 応答: ヘッダー/行の不一致。ヘッダー: %(header)s、行: %(row)s。" - -msgid "Unexpected exception during get pools info." -msgstr "プール情報の取得中に予期しない例外が発生しました。" - -msgid "Unexpected exception during pool checking." -msgstr "プールのチェック中に予期しない例外が発生しました。" - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "" -"予期しない出力。[%(expected)s] が予期されましたが、[%(output)s] を受け取りま" -"した" - -#, python-format -msgid "Unexpected over quota on %(name)s." -msgstr "%(name)s で、予期せずクォータを超過しました。" - -msgid "Unexpected response from Nimble API" -msgstr "Nimble API からの予期しない応答" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Tegile IntelliFlash API からの予期しない応答" - -msgid "Unexpected status code" -msgstr "予期しないステータスコード" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"URL %(page)s 用にプロトコル %(protocol)s を指定したスイッチ%(switch_id)s から" -"返された予期しないステータスコード。エラー: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "不明な Gluster 例外" - -msgid "Unknown NFS exception" -msgstr "不明な NFS 例外" - -msgid "Unknown RemoteFS exception" -msgstr "不明な RemoteFS 例外" - -msgid "Unknown SMBFS exception." -msgstr "不明な SMBFS 例外。" - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Virtuozzo Storage で不明な例外が発生しました" - -msgid "Unknown action" -msgstr "不明なアクション" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"管理対象のボリューム: %s がすでに Cinder によって管理されている場合は不明で" -"す。ボリュームの管理を中止します。 'cinder_managed' カスタムスキーマプロパ" -"ティーをそのボリュームに追加し、その値を False に設定してください。あるいは、" -"Cinder の設定ポリシーの値 'zfssa_manage_policy' を 'loose' に変更してこの制限" -"を取り除きます。" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"管理対象のボリューム: %s がすでに Cinder によって管理されている場合は不明で" -"す。ボリュームの管理を中止します。 'cinder_managed' カスタムスキーマプロパ" -"ティーをそのボリュームに追加し、その値を False に設定してください。あるいは、" -"Cinder の設定ポリシーの値 'zfssa_manage_policy' を 'loose' に変更してこの制限" -"を取り除きます。" - -#, python-format -msgid "Unknown operation %s." -msgstr "不明な処理 %s。" - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "不明またはサポートされないコマンド (%(cmd)s) です。" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "不明なプロトコル: %(protocol)s。" - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "不明なクォータリソース %(unknown)s。" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません。" - -#, python-format -msgid "Unknown/Unsupported HTTP method: %s" -msgstr "未知/未サポートの HTTP メソッド: %s" - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "削除オプションの非管理とカスケーディングを同時に行うことはできません。" - -msgid "Unmanage volume not implemented." -msgstr "ボリュームの非管理が実装されていません。" - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"'failed-over' ボリュームからのスナップショットを非管理対象にすることは許可さ" -"れません。" - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"フェイルオーバーされたボリュームからのスナップショットを非管理対象にすること" -"は許可されません。" - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "認識されない QoS キーワード: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "認識されないバッキングフォーマット: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "認識されない read_deleted 値 '%s'" - -#, python-format -msgid "" -"Unrecoverable Error: Versioned Objects in DB are capped to unknown version " -"%(version)s." -msgstr "" -"リカバリー不可能なエラー : DB 内のバージョニングされたオブジェクトは、不明な" -"バージョン %(version)s でキャップされています。" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "gcs オプションの設定を解除します: %s" - -msgid "Unsupported Content-Type" -msgstr "サポートされない Content-Type" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"サポートされない Data ONTAP バージョンです。Data ONTAP バージョン 7.3.1 以上" -"がサポートされています。" - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "サポートされないバックアップのメタデータバージョン (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "サポートされないバックアップメタデータバージョンが要求されました。" - -msgid "Unsupported backup verify driver" -msgstr "サポートされないバックアップ検証ドライバーです。" - -#, python-format -msgid "Unsupported fields %s." -msgstr "サポートされないフィールド: %s" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"スイッチ %s でサポートされないファームウェアです。スイッチでファームウェア " -"v6.4 以上が実行されていることを確認してください" - -#, python-format -msgid "Unsupported method: %s" -msgstr "サポートされないメソッド: %s" - -#, python-format -msgid "Unsupported volume format %s" -msgstr "ボリューム形式はサポートされていません: %s " - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "ボリューム形式はサポートされていません: %s " - -msgid "Update QoS policy error." -msgstr "QoS ポリシー更新のエラー。" - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"クォータ処理の更新と削除を行えるのは、直近の親の管理者または CLOUD 管理者のい" -"ずれかです。" - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"クォータ処理の更新と削除を行えるのは、ユーザーが割り当てられたプロジェクトと" -"同じ階層にあるプロジェクトに限られます。" - -msgid "Update list, doesn't include volume_id" -msgstr "リストを更新します。volume_id が含まれません。" - -msgid "Updated At" -msgstr "最終更新" - -#, python-format -msgid "Updating volume metadata is not allowed for volumes in %s status." -msgstr "" -"ボリュームの状態が %s である場合は、ボリュームメタデータの更新は許可されませ" -"ん。" - -msgid "Upload to glance of attached volume is not supported." -msgstr "" -"接続されたボリュームの glance へのアップロードはサポートされていません。" - -msgid "Use ALUA to associate initiator to host error." -msgstr "ALUA を使用したホストへのイニシエーターの関連付けのエラー。" - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"CHAP を使用したホストへのイニシエーターの関連付けのエラー。CHAP のユーザー名" -"とパスワードを確認してください。" - -msgid "User ID" -msgstr "ユーザー ID" - -msgid "User Name is missing for ZTE driver." -msgstr "ZTE ドライバー用のユーザー名 がありません。" - -msgid "User Password is missing for ZTE driver." -msgstr "ZTE ドライバー用のユーザーパスワード がありません。" - -msgid "User does not have admin privileges" -msgstr "ユーザーに管理者特権がありません。" - -msgid "User not authorized to perform WebDAV operations." -msgstr "ユーザーは WebDAV 操作の実行が許可されていません。" - -msgid "User not permitted to query Data ONTAP volumes." -msgstr "ユーザーには Data ONTAP ボリュームの照会が許可されていません。" - -msgid "UserName is not configured." -msgstr "ユーザー名は設定されていません。" - -msgid "UserPassword is not configured." -msgstr "ユーザーパスワードは設定されていません。" - -msgid "V2 rollback, volume is not in any storage group." -msgstr "" -"V2 のロールバック。どのストレージグループにもボリュームが存在しません。" - -msgid "V3 rollback" -msgstr "V3 のロールバック" - -msgid "VF is not enabled." -msgstr "VF は有効になっていません。" - -msgid "VNX Cinder driver does not support multiple replication targets." -msgstr "" -"VNX Cinder ドライバーは複数のレプリケーションターゲットをサポートしていませ" -"ん。" - -#, python-format -msgid "VPool %(name)s ( %(vpooltype)s ) : not found" -msgstr "Pool %(name)s ( %(vpooltype)s ) : 見つかりませんでした。" - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV セット %s は存在しません。" - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "QoS 仕様の有効なコンシューマー: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "有効な制御ロケーション: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "ボリューム接続の検証に失敗しました (エラー: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "構成オプション \"%(option)s\" の値 \"%(value)s\" は無効です。" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "%(param_string)s の値 %(param)s がブール値ではありません。" - -msgid "Value required for 'scality_sofs_config'" -msgstr "'scality_sofs_config' の値が必要です。" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "%(src)s から %(tgt)s へのマッピングに関連しない Vdisk %(name)s。" - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"バージョン %(req_ver)s はこのAPIではサポートされていません。最大値は " -"%(min_ver)s で、最小値は %(max_ver)s です。" - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s が ID ごとのオブジェクトを抽出できません。" - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s が条件の変更を行うことができません。" - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "仮想ボリューム '%s' がアレイに存在しません。" - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "宛先 %s のボリュームコピージョブが失敗しました。" - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "ボリューム %(deviceID)s が見つかりません。" - -#, python-format -msgid "Volume %(name)s could not be found. It might be already deleted" -msgstr "" -"ボリューム %(name)s が見つかりませんでした。すでに削除されている可能性があり" -"ます。" - -#, python-format -msgid "Volume %(name)s not found" -msgstr "ボリューム %(name)s が見つかりません。" - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"ボリューム %(name)s がアレイ上に見つかりません。マッピングされるボリュームが" -"あるかどうかを判別できません。" - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "ボリューム %(name)s は VNX で作成されましたが、%(state)s 状態です。" - -#, python-format -msgid "Volume %(name)s was not deactivated in time." -msgstr "ボリューム %(name)s は時間内に非アクティブになりませんでした。" - -#, python-format -msgid "" -"Volume %(name)s: clone failed\n" -"%(err)s" -msgstr "" -"ボリューム %(name)s: クローンが失敗しました。\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(name)s: create failed\n" -"%(err)s" -msgstr "" -"ボリューム %(name)s: 作成に失敗しました。\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(name)s: delete failed\n" -"%(err)s" -msgstr "" -"ボリューム %(name)s: 削除が失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "ボリューム %(vol)s をプール %(pool)s に作成できませんでした。" - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "ボリューム%(vol1)s が snapshot.volume_id %(vol2)s と一致しません。" - -#, python-format -msgid "Volume %(vol_id)s is not local to this node %(host)s" -msgstr "" -"ボリューム %(vol_id)s は、このノード %(host)s に対してローカルではありませ" -"ん。" - -#, python-format -msgid "Volume %(vol_id)s status must be %(statuses)s" -msgstr "ボリューム %(vol_id)s の状態は %(statuses)s でなければいけません。" - -#, python-format -msgid "Volume %(vol_id)s status must be available to extend." -msgstr "" -"ボリューム %(vol_id)s を拡張するには、状態が「利用可能」でなければいけませ" -"ん。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"読み取り専用フラグを更新するには、ボリューム %(vol_id)s の状態が「使用可能」" -"でなければなりませんが、現在の状態は %(vol_status)s です。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"ボリューム %(vol_id)s の状態は「使用可能」でなければなりませんが、現在の状態" -"は %(vol_status)s です。" - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "ボリューム %(volume_id)s が見つかりませんでした。" - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"ボリューム %(volume_id)s には、キー %(metadata_key)s を持つ管理メタデータがあ" -"りません。" - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"ボリューム %(volume_id)s にはキー %(metadata_key)s を持つメタデータはありませ" -"ん。" - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"ボリューム %(volume_id)s は現在、サポート対象ではないホストグループ " -"%(group)s にマップされています" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "" -"ボリューム %(volume_id)s は現在、ホスト %(host)s にマッピングされていません" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"ボリューム %(volume_id)s はまだ接続されています。最初にボリュームを切り離して" -"ください。" - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "ボリューム %(volume_id)s 複製エラー: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "ボリューム %(volume_name)s は使用中です。" - -#, python-format -msgid "" -"Volume %(volume_name)s: expand failed\n" -"%(err)s" -msgstr "" -"ボリューム %(volume_name)s: 拡張が失敗しました。\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(volume_name)s: update failed\n" -"%(err)s" -msgstr "" -"ボリューム %(volume_name)s: 更新が失敗しました。\n" -"%(err)s" - -#, python-format -msgid "Volume %s : not found" -msgstr "ボリューム %s: 見つかりませんでした。" - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "ボリューム %s をソースボリュームから作成できませんでした。" - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "共有上でボリューム %s を作成できませんでした。" - -#, python-format -msgid "Volume %s could not be created." -msgstr "ボリューム %s を作成できませんでした。" - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "ボリューム %s は Nexenta SA に存在しません" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "ボリューム %s は Nexenta Store アプライアンスに存在しません" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "ボリューム %s does はアレイに存在しません。" - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"ボリューム %s で provider_location が指定されていません。スキップします。" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "ボリューム %s がアレイに存在しません。" - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "ボリューム %s が ZFSSA バックエンドに存在しません。" - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "ボリューム %s は OpenStack により既に管理されています。" - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"ボリューム %s は複製された種別のものではありません。このボリュームは、レプリ" -"ケーションアクションをサポートするために追加仕様 replication_enabled を " -"' True' に設定したボリューム種別のものでなければなりません。" - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"ボリューム %s がオンラインです。OpenStack を使用して管理するために、ボリュー" -"ムをオフラインに設定してください。" - -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a group or have " -"snapshots." -msgstr "" -"ボリューム %s の移行と追加を行うことはできず、グループに含めることはできず、" -"スナップショットを持つこともできません。" - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "ボリューム %s は整合性グループの一部であってはなりません。" - -#, python-format -msgid "Volume %s not found" -msgstr "ボリューム %s が見つかりません。" - -#, python-format -msgid "Volume %s not found." -msgstr "ボリューム %s が見つかりません。" - -#, python-format -msgid "" -"Volume %s status must be available or in-use, must not be migrating, have " -"snapshots, be replicated, be part of a group and destination host must be " -"different than the current host" -msgstr "" -"ボリューム %s の状態は「利用可能」もしくは「使用中」である必要があります。ま" -"た、マイグレーション中でないこと、スナップショットを持たないこと、複製されて" -"いないこと、グループの一部ではないことも必要で、移行先ホストは現在とは異なる" -"ホストでなければいけません。" - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "ボリューム %s: ボリュームの拡張を試行中にエラーが発生しました" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "ボリューム (%s) はすでにアレイ上に存在します" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "ボリューム (%s) は既にアレイ上にあります。" - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "ボリュームグループ %s は存在しません。" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "ボリューム種別 %(id)s は既に存在します。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"ボリューム種別 %(volume_type_id)s を持つボリュームでは、そのボリューム種別は" -"削除できません。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"ボリューム種別 %(volume_type_id)s にはキー %(extra_specs_key)s を持つ追加の仕" -"様はありません。" - -msgid "Volume Type id must not be None." -msgstr "ボリュームタイプ ID を None に設定することはできません。" - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"OpenStack のボリューム [%(ops_vol)s] に相当する CloudByte のストレージでボ" -"リューム [%(cb_vol)s] が見つかりませんでした。" - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "ボリューム [%s] が CloudByte ストレージに見つかりません。" - -msgid "Volume already managed." -msgstr "ボリュームはすでに管理されています。" - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "ボリューム接続がフィルター %(filter)s で見つかりませんでした。" - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "ボリュームバックエンド構成が無効です: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "この名前のボリュームは既に存在します" - -msgid "" -"Volume cannot be created individually from a snapshot that is part of a " -"Consistency Group" -msgstr "" -"整合性グループの一部であるスナップショットから、ボリュームを個別に作成するこ" -"とはできません。" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "スナップショットが含まれているため、ボリュームを復元できません。" - -#, python-format -msgid "Volume connected to host %s." -msgstr "ボリュームがホスト %s に接続されました。" - -msgid "Volume create failed while extracting volume ref." -msgstr "ボリューム参照の抽出中にボリュームの作成に失敗しました。" - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "ボリュームデバイスのファイルパス %s が存在しません。" - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "%(device)s でボリュームデバイスが見つかりません。" - -#, python-format -msgid "Volume does not exists %s." -msgstr "ボリュームが存在しません: %s" - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "ボリュームドライバー %s が初期化されていません。" - -msgid "Volume driver not ready." -msgstr "ボリュームドライバーが準備できていません。" - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "ボリュームドライバーがエラーを報告しました: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"ボリュームには今回削除できない一時的なスナップショットが含まれています。" - -msgid "Volume has children and cannot be deleted!" -msgstr "ボリュームには子が含まれており、削除できません。" - -#, python-format -msgid "Volume in group %s is attached. Need to detach first." -msgstr "グループ %s のボリュームが接続されています。まず切り離してください。" - -msgid "Volume in group still has dependent snapshots." -msgstr "グループ内のボリュームには、まだ従属スナップショットがあります。" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "ボリュームがサーバーに追加されています (%s)。" - -msgid "Volume is in-use." -msgstr "ボリュームが使用中です。" - -msgid "Volume is not available." -msgstr "ボリュームが利用できません。" - -msgid "Volume is not local to this node" -msgstr "ボリュームは、このノードに対してローカルではありません。" - -msgid "Volume is not local to this node." -msgstr "ボリュームがこのノードに対してローカルではありません。" - -msgid "Volume manage failed." -msgstr "ボリュームの管理に失敗しました。" - -msgid "" -"Volume manage identifier must contain either source-id or source-name " -"element." -msgstr "" -"ボリューム管理識別子には、要素 source-id か source-name のいずれかが含まれて" -"いる必要があります。" - -msgid "" -"Volume manage identifier with source-id is only supported with clustered " -"Data ONTAP." -msgstr "" -"source-id を含むボリューム管理識別子は、クラスタリングされた Data ONTAP での" -"みサポートされています。" - -#, python-format -msgid "Volume manager for backend '%s' does not exist." -msgstr "バックエンド '%s' のボリューム・マネージャーは存在しません。" - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"ボリュームメタデータバックアップが要求されましたが、このドライバーではまだこ" -"の機能はサポートされていません。" - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "ボリュームマイグレーションが失敗しました: %(reason)s" - -msgid "Volume must be available" -msgstr "ボリュームは使用可能である必要があります" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "ボリュームはスナップショットと同じ可用性ゾーンになければなりません" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "ボリュームはソースボリュームと同じ可用性ゾーンになければなりません" - -msgid "Volume must have a volume type" -msgstr "ボリュームにはボリューム種別が必要です" - -msgid "Volume must not be replicated." -msgstr "ボリュームを複製することはできません。" - -msgid "Volume must not have snapshots." -msgstr "ボリュームにスナップショットがあってはなりません。" - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "インスタンス %(instance_id)s のボリュームが見つかりませんでした。" - -msgid "Volume not found on configured storage backend." -msgstr "ボリュームが構成済みストレージバックエンドに見つかりません。" - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"設定されたストレージバックエンドでボリュームが見つかりません。ボリューム名に " -"\"/\" が使用されている場合、名前を変更し、再度管理を試行してください。" - -msgid "Volume not found on configured storage pools." -msgstr "ボリュームが構成済みストレージプールに見つかりません。" - -msgid "Volume not found." -msgstr "ボリュームが見つかりません。" - -msgid "Volume not unique." -msgstr "ボリュームが一意ではありません。" - -msgid "Volume not yet assigned to host." -msgstr "ボリュームがまだホストに割り当てられていません。" - -msgid "Volume reference must contain source-name element." -msgstr "ボリュームの参照にはソース名の要素が含まれる必要があります。" - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "%(volume_id)s のボリューム複製が見つかりませんでした。" - -#, python-format -msgid "Volume service %s failed to start." -msgstr "ボリュームサービス %s が起動できませんでした。" - -msgid "Volume should have agent-type set as None." -msgstr "ボリュームには agent-type として None を設定する必要があります。" - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"ボリュームサイズ %(volume_size)s GB をイメージの minDisk サイズ %(min_disk)s " -"GB より小さくすることはできません。" - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "" -"ボリュームサイズ '%(size)s' は、整数であり、かつ 0 より大きくなければなりませ" -"ん" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"ボリュームサイズ \"%(size)s\" GB を元のボリュームサイズ %(source_size)s GB よ" -"り小さくすることはできません。このサイズは元のボリュームサイズ以上でなければ" -"なりません。" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"ボリュームサイズ「%(size)s」GB をスナップショットサイズ %(snap_size)s GB より" -"小さくすることはできません。このサイズは元のスナップショットサイズ以上でなけ" -"ればなりません。" - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"最後のバックアップ以降にボリュームサイズが増加しました。フルバックアップを実" -"行してください。" - -msgid "Volume size must be a multiple of 1 GB." -msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" - -msgid "Volume size must multiple of 1 GB." -msgstr "ボリュームサイズは 1 GB の倍数である必要があります" - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"スナップショットに関しては、ボリュームの状態が「使用可能」または「使用中」で" -"なければなりません (現在は %s です)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "ボリュームの状態は「使用可能」または「使用中」でなければなりません。" - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "ボリュームを予約するにはボリューム状態が %s である必要があります。" - -msgid "Volume status must be 'available'." -msgstr "ボリュームの状態は「使用可能」でなければなりません。" - -#, python-format -msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" -msgstr "" -"スナップショット %(id)s に関しては、ボリュームの状態が「利用可能」でなければ" -"いけません。 (現在は %(status)s です)" - -msgid "Volume to Initiator Group mapping already exists" -msgstr "ボリュームからイニシエーターグループへのマッピングは既に存在します。" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"バックアップ対象のボリュームが利用可能か使用中である必要がありますが、現在の" -"状態は \"%s\" です。" - -msgid "Volume to be restored to must be available" -msgstr "復元するボリュームは「使用可能」でなければなりません。" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "ボリューム種別 %(volume_type_id)s が見つかりませんでした。" - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "ボリュームタイプ ID '%s' は無効です。" - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスは既" -"に存在します。" - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスが見" -"つかりません。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "タイプ %(type_id)s のボリューム種別暗号化は既に存在します。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "タイプ %(type_id)s に対するボリューム種別暗号化は存在しません。" - -msgid "Volume type name can not be empty." -msgstr "ボリューム種別名を空にすることはできません" - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "名前 %(volume_type_name)s を持つボリューム種別が見つかりませんでした。" - -#, python-format -msgid "Volume%s: not found" -msgstr "ボリューム %s: 見つかりませんでした。" - -msgid "Volume/Snapshot not found on configured storage backend." -msgstr "" -"ボリューム/スナップショットが構成済みストレージバックエンドに見つかりません。" - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"ボリューム %(volumeName)s は連結されたボリュームではありません。拡張を実行で" -"きる対象は、連結されたボリュームのみです。終了中..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"ボリューム %(volumeName)s がストレージグループ %(sgGroupName)s に追加されませ" -"んでした。" - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "ボリューム: %s はすでに Cinder によって管理されています。" - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -" プライマリーとセカンダリーの SolidFire アカウント上で、ボリュームとアカウン" -"トの数量が超過しました。" - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage の設定の 'vzstorage_used_ratio' が無効です。0 より大きく 1.0 以下で" -"ある必要があります: %s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s の VzStorage のコンフィグファイルが存在しません" - -msgid "Wait replica complete timeout." -msgstr "レプリカの完了を待機するタイムアウト。" - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "同期の待機が失敗しました。実行状態: %s。" - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"すべてのノードがクラスターに接続するのを待機しています。すべてのシープデーモ" -"ンが稼働中であることを確認してください。" - -msgid "We should not do switch over on primary array." -msgstr "プライマリーアレイで切り替えを行ってはなりません。" - -#, python-format -msgid "Worker for %(type)s %(id)s already exists." -msgstr "%(type)s のワーカー %(id)s はすでに存在します。" - -#, python-format -msgid "Worker with %s could not be found." -msgstr "%s の ID を持つワーカーを見つけることができませんでした。" - -msgid "X-IO Volume Driver exception!" -msgstr "X-IO ボリュームドライバー例外が発生しました。" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO は正しく設定されていません。iscsi ポータルが見つかりません" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO は正しく初期化されていません。クラスターが見つかりません" - -msgid "You must implement __call__" -msgstr "__call__ を実装しなければなりません" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"3PAR ドライバーを使用するには hpe3parclient をインストールしておく必要があり" -"ます。 \"pip install python-3parclient\" を実行して hpe3parclient をインス" -"トールしてください。" - -msgid "You must supply an array in your EMC configuration file." -msgstr "EMC 構成ファイルにアレイを指定する必要があります。" - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"元のサイズ %(originalVolumeSize)s GB が、%(newSize)s GB より大きくなっていま" -"す。拡張のみがサポートされます。終了中..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "ゾーン" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "ゾーニングポリシー %s は認識されていません" - -#, python-format -msgid "" -"[%(group)s] Invalid %(protocol)s ports %(port)s specified for io_port_list." -msgstr "" -"[%(group)s] 無効な %(protocol)s ポート %(port)s が io_port_list に指定されま" -"した。" - -msgid "_call failed." -msgstr "_call が失敗しました。" - -#, python-format -msgid "" -"_cloned_volume: Failed to clone vol. vol name: %(name)s with Return code: " -"%(ret)s. " -msgstr "" -"_cloned_volume: ボリュームのクローンに失敗しました。ボリューム名: %(name)s, " -"戻りコード: %(ret)s" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "_create_and_copy_vdisk_data: vdisk %s の属性を取得できませんでした。" - -msgid "_create_host failed to return the host name." -msgstr "_create_host でホスト名を返すことができませんでした。" - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: ホスト名を変換できません。ホスト名は Unicode でもバイト文字列で" -"もありません。" - -msgid "_create_host: No connector ports." -msgstr "_create_host: コネクターポートがありません。" - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "" -"_create_local_cloned_volume、レプリケーションサービスが見つかりません。" - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume、ボリューム名: %(volumename)s、ソースボリューム" -"名: %(sourcevolumename)s、ソースボリュームインスタンス: %(source_volume)s、" -"ターゲットボリュームインスタンス: %(target_volume)s、戻りコード: %(rc)lu、エ" -"ラー: %(errordesc)s。" - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - 成功メッセージが CLI 出力内に見つかりませんでし" -"た。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name、id_code がありません。" - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "" -"_delete_copysession、レプリケーションサービスを見つけることができません" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession、コピーセッションのタイプが定義されていません。コピーセッ" -"ション: %(cpsession)s、コピータイプ: %(copytype)s。" - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession、コピーセッション: %(cpsession)s、操作: %(operation)s、戻" -"りコード: %(rc)lu、エラー: %(errordesc)s。" - -#, python-format -msgid "" -"_delete_cvol: Failed to delete clone vol. cloned name: %(name)s with Return " -"code: %(ret)s." -msgstr "" -"_delete_cvol: クローンボリュームの削除に失敗しました。クローン名: %(name)s , " -"戻りコード: %(ret)s." - -#, python-format -msgid "" -"_delete_snapshot:Failed to delete snap.snap name: %(snapname)s with Return " -"code: %(ret)s." -msgstr "" -"_delete_snapshot: スナップショットの削除に失敗しました。スナップショット名: " -"%(snapname)s, 戻りコード: %(ret)s" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " -"%(errordesc)s。" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume、ボリューム名: %(volumename)s、ストレージ設定サービスが見つか" -"りません。" - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service、クラス名: %(classname)s、InvokeMethod、ETERNUS に接続" -"できません。" - -#, python-format -msgid "" -"_extend_volume:Failed to extend vol.vol name:%(name)s with Return code: " -"%(ret)s." -msgstr "" -"_extend_volume: ボリュームの拡張に失敗しました。ボリューム名: %(name)s, 戻り" -"コード: %(ret)s" - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op: スナップショットを持つボリュームの拡張はサポートされていま" -"せん。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group、コネクター: %(connector)s、アソシエーター: " -"FUJITSU_AuthorizedTarget を ETERNUS に接続できません。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group、コネクター: %(connector)s、EnumerateInstanceNames を " -"ETERNUS に接続できません。" - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group、コネクター: %(connector)s、AssocNames: " -"FUJITSU_ProtocolControllerForUnit を ETERNUS に接続できません。" - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession、ReferenceNames、vol_instance: %(vol_instance_path)s、" -"ETERNUS に接続できません。" - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service、クラス名: %(classname)s、EnumerateInstanceNames、" -"ETERNUS に接続できません。" - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names、コネクター: %(connector)s、イニシエーターが見つかりま" -"せん。" - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun、ボリューム名: %(volumename)s、EnumerateInstanceNames、ETERNUS に接" -"続できません。" - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool、eternus_pool:%(eternus_pool)s、EnumerateInstances、ETERNUS に接続" -"できません。" - -msgid "_get_async_url: Invalid URL." -msgstr "_get_async_url: 正しくないURLです。" - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg、ファイル名: %(filename)s、tagname: %(tagname)s、データがありませ" -"ん。ドライバー設定ファイルを編集して修正してください。" - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection、ファイル名: %(filename)s、ip: %(ip)s、ポート: " -"%(port)s、ユーザー: %(user)s、パスワード: ****、url: %(url)s、失敗しました。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties、iscsiip list: %(iscsiip_list)s、iqn が見つかり" -"ません。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、AssociatorNames: " -"CIM_BindsTo を ETERNUS に接続できません。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、EnumerateInstanceNames " -"を ETERNUS に接続できません。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、GetInstance を ETERNUS " -"に接続できません。" - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: 属性のヘッダーと値が適合していません。\n" -" ヘッダー: %(header)s\n" -" 値: %(row)s。" - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector がコネクターのホスト名を返すことができませんでした。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc、aglist/vol_instance からの host-affinity の取得が失敗しまし" -"た。affinitygroup: %(ag)s、ReferenceNames を ETERNUS に接続できません。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc、host-affinity インスタンスの取得が失敗しました。volmap: " -"%(volmap)s、GetInstance、ETERNUS に接続できません。" - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi、アソシエーター: FUJITSU_SAPAvailableForElement、ETERNUS " -"に接続できません。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi、affinitygroup: %(ag)s, ReferenceNames、ETERNUS に接続でき" -"ません。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi、vol_instance: %(vol_instance)s、ReferenceNames: " -"CIM_ProtocolControllerForUnit、ETERNUS に接続できません。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi、volmap: %(volmap)s、GetInstance、ETERNUS に接続できませ" -"ん。" - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "_get_target_port、EnumerateInstances を ETERNUS に接続できません。" - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "" -"_get_target_port、プロトコル: %(protocol)s、target_port が見つかりません。" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay: %s という名前のスナップショットが見つかりません" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: ボリューム ID %s が見つかりません。" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: ソース名を指定する必要があります。" - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: ホストとボリュームの接続について FC 接続情報を取得" -"できませんでした。ホストが FC 接続用に正しく構成されていますか。" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: ボリューム %(vol)s の入出力グループ %(gid)s でノー" -"ドが見つかりませんでした。" - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun、vol_instance.path:%(vol)s、ボリューム名: %(volumename)s、" -"volume_uid: %(uid)s、イニシエーター: %(initiator)s、ターゲット: %(tgt)s、" -"aglist: %(aglist)s、ストレージ設定サービスが見つかりません。" - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun、vol_instance.path: %(volume)s、ボリューム名: %(volumename)s、" -"volume_uid: %(uid)s、aglist: %(aglist)s、コントローラー設定サービスが見つかり" -"ません。" - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun、ボリューム名 %(volumename)s、volume_uid: %(volume_uid)s、" -"AffinityGroup: %(ag)s、戻りコード: %(rc)lu、エラー: %(errordesc)s。" - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun、vol_instance.path: %(volume)s、AssociatorNames: " -"CIM_ProtocolControllerForUnit を ETERNUS に接続できません。" - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats: ストレージプールデータを取得できませんでした。" - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete、cpsession: %(cpsession)s、コピーセッション状態は " -"BROKEN です。" - -msgid "action_locked not found" -msgstr "action_locked が見つかりません。" - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy が失敗しました。ボリューム %s のコピーが存在します。別のコピー" -"を追加すると、コピー数 2 という制限を超えます。" - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"期待されたプール内の vdisk コピーなしで add_vdisk_copy が開始されました。" - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants はブール値である必要がありますが、'%s' が得られました。" - -msgid "already created" -msgstr "既に作成済み" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "リモートノードにスナップショットを追加します。" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "属性 %s は遅延ロードできません" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"バックアップ: %(vol_id)s での %(vpath)s から %(bpath)s へのデバイスハードリン" -"クの作成に失敗しました。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"バックアップ: %(vol_id)s サーバーからバックアップ成功通知を取得できませんでし" -"た。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"バックアップ: %(vol_id)s で、%(bpath)s 上の無効な引数のため、dsmc の実行に失" -"敗しました。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"バックアップ: %(vol_id)s %(bpath)s で dsmc を実行できませんでした。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "" -"バックアップ: %(vol_id)s に障害が発生しました。%(path)s はファイルではありま" -"せん。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"バックアップ: %(vol_id)s に障害が発生しました。%(path)s は予期されていない" -"ファイルタイプです。ブロック化または通常のファイルがサポートされています。実" -"際のファイルモードは %(vol_mode)s です。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"バックアップ: %(vol_id)s に障害が発生しました。ボリュームへの実際のパス " -"%(path)s を取得できません。" - -msgid "being attached by different mode" -msgstr "別のモードで接続しています。" - -#, python-format -msgid "build_ini_targ_map fails. %s" -msgstr "build_ini_targ_map が失敗しました。 %s" - -#, python-format -msgid "call failed: %r" -msgstr "呼び出しが失敗しました: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "呼び出しが失敗しました: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "呼び出しが失敗しました: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "呼び出しが失敗しました: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "呼び出しが失敗しました: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "lun-map を見つけることができません。ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "拡張するボリュームが見つかりません" - -msgid "can't handle both name and index in req" -msgstr "req にある名前とインデックスはどちらも処理できません" - -msgid "cannot understand JSON" -msgstr "JSON を解釈できません" - -#, python-format -msgid "cg-%s" -msgstr "cg: %s" - -msgid "" -"cg_creating_from_src must be called with cg_id or cgsnapshot_id parameter." -msgstr "" -"cg_creating_from_src は cg_id または cgsnapshot_id パラメーターと共に呼び出す" -"必要があります。" - -msgid "cgsnapshot assigned" -msgstr "割り当てられた cgsnapshot" - -msgid "cgsnapshot changed" -msgstr "変更された cgsnapshot" - -msgid "cgsnapshots assigned" -msgstr "割り当てられた cgsnapshot" - -msgid "cgsnapshots changed" -msgstr "変更された cgsnapshot" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: 認証にはパスワードまたは SSH 秘密鍵が必要です: " -"san_password または san_private_key オプションを設定してください。" - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: システム ID を判別できません。" - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: システム名を判別できません。" - -msgid "check_hypermetro_exist error." -msgstr "check_hypermetro_exist エラー。" - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "複製の深さが限度 %s を超えています" - -msgid "cluster assigned" -msgstr "割り当てられたクラスター" - -msgid "cluster changed" -msgstr "変更されたクラスター" - -msgid "config option key_manager.fixed_key is not defined" -msgstr "設定オプション key_manager.fixed_key は定義されていません。" - -#, python-format -msgid "consistency group with name: %s already exists" -msgstr "名前 %s を持つ整合性グループはすでに存在します。" - -msgid "consistencygroup assigned" -msgstr "割り当てられた整合性グループ" - -msgid "consistencygroup changed" -msgstr "変更された整合性グループ" - -msgid "control_location must be defined" -msgstr "control_location を定義する必要があります" - -msgid "coprhd_hostname is not set in cinder configuration" -msgstr "cinder 設定で coprhd_hostname が設定されていません。" - -msgid "coprhd_password is not set in cinder configuration" -msgstr "cinder 設定で coprhd_password が設定されていません。" - -msgid "coprhd_port is not set in cinder configuration" -msgstr "cinder 設定で coprhd_port が設定されていません。" - -msgid "coprhd_project is not set in cinder configuration" -msgstr "cinder 設定で coprhd_project が設定されていません。" - -msgid "coprhd_tenant is not set in cinder configuration" -msgstr "cinder 設定で coprhd_tenant が設定されていません。" - -msgid "coprhd_username is not set in cinder configuration" -msgstr "cinder 設定で coprhd_username が設定されていません。" - -msgid "coprhd_varray is not set in cinder configuration" -msgstr "cinder 設定で coprhd_varray が設定されていません。" - -msgid "create hypermetro group error." -msgstr "hypermetro グループ作成エラー。" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume、ETERNUS にソースボリュームが存在しません。" - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume、ターゲットボリュームインスタンス名: " -"%(volume_instancename)s、インスタンスの取得が失敗しました。" - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: ソースと宛先のサイズが異なっています。" - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: ソースボリューム %(src_vol)s のサイズは %(src_size)dGB " -"で、サイズ %(tgt_size)dGBand のターゲットボリューム %(tgt_vol)s に適合しませ" -"ん。" - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src は CG スナップショットまたはソース CG から作" -"成しなければなりません。" - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src は 1 つの cgsnapshot ソースまたは整合性グ" -"ループソースのみをサポートします。複数ソースは使用できません。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: ソース vdisk %(src)s (%(src_id)s) は存在しません。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: ソース vdisk %(src)s は存在しません。" - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: ホスト名は Unicode でもバイト文字列でもありません。" - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: イニシエーターも wwpn も指定されていません。" - -msgid "create_hypermetro_pair error." -msgstr "create_hypermetro_pair エラー。" - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"create_snapshot、eternus_pool: %(eternus_pool)s、プールが見つかりません。" - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot、スナップショット名: %(snapshotname)s、ソースボリューム名: " -"%(volumename)s、vol_instance.path: %(vol_instance)s、宛先ボリューム名: " -"%(d_volumename)s、プール: %(pool)s、戻りコード: %(rc)lu、エラー: " -"%(errordesc)s。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot、ボリューム名: %(s_volumename)s、ETERNUS でソースボリュームが" -"見つかりません。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot、ボリューム名: %(volumename)s、レプリケーションサービスが見つ" -"かりません。" - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: スナップショットのボリュームの状態は「使用可能」または「使用" -"中」でなければなりません。無効な状態は %s です。" - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: ソースボリュームの取得に失敗しました。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume、ボリューム: %(volume)s、EnumerateInstances、ETERNUS に接続でき" -"ません。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume、ボリューム: %(volume)s、ボリューム名: %(volumename)s、" -"eternus_pool: %(eternus_pool)s、ストレージ設定サービスが見つかりません。" - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume、ボリューム名: %(volumename)s、プール名: %(eternus_pool)s、戻り" -"コード: %(rc)lu、エラー: %(errordesc)s。" - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "" -"create_volume_from_snapshot、ETERNUS にソースボリュームが存在しません。" - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot、ターゲットボリュームインスタンス名: " -"%(volume_instancename)s、インスタンスの取得が失敗しました。" - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "" -"create_volume_from_snapshot: スナップショット %(name)s は存在しません。" - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: ボリュームを作成するには、スナップショットの状態" -"が「使用可能」でなければなりません。無効な状態は %s です。" - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: ボリュームサイズが、スナップショットベースボ" -"リュームと異なります。" - -#, python-format -msgid "" -"create_volume_from_snapshot: snapshot %(snapshot_name)s size is " -"%(snapshot_size)dGB and doesn't fit in target volume %(volume_name)s of size " -"%(volume_size)dGB." -msgstr "" -"create_volume_from_snapshot: スナップショット %(snapshot_name)s のサイズは " -"%(snapshot_size)dGB で、ターゲットボリューム %(volume_name)s のサイズ " -"%(volume_size)dGB に適合しません。" - -msgid "data not found" -msgstr "データが見つかりません。" - -msgid "delete host from group failed. " -msgstr "グループからのホスト削除に失敗しました。" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"削除: %(vol_id)s で、以下の無効な引数のため、dsmc の実行に失敗しました。 " -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"削除: %(vol_id)s dsmc を実行できませんでした。stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "delete_hypermetro エラー。" - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: %s ACL が見つかりません。処理を続行します。" - -msgid "delete_replication error." -msgstr "delete_replication エラー。" - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "従属ボリュームを持つスナップショット %(snapshot_name)s の削除中" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "スナップショットを含むボリューム %(volume_name)s の削除中" - -msgid "detach snapshot from remote node" -msgstr "リモートノードからスナップショットを切断します。" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: 構成されたノードがありません。" - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"Swift へのオブジェクトの書き込み中にエラーが発生しました。Swift 内のオブジェ" -"クトの MD5 %(etag)s が Swift に送信されたオブジェクトの MD5 %(md5)s と同じで" -"はありません" - -#, python-format -msgid "" -"error: Incorrect value of new size: %(new_size_in_gb)s GB\n" -"New size must be greater than current size: %(current_size)s GB" -msgstr "" -"エラー: 新しいサイズの値が正しくありません: %(new_size_in_gb)s GB\n" -"新しいサイズは現在のサイズより大きくなければいけません。現在のサイズ: " -"%(current_size)s GB" - -msgid "error: task list is empty, no task response found" -msgstr "エラー: タスクリストが空です。タスクの応答が見つかりませんでした。" - -msgid "" -"existing_ref argument must be of this format:app_inst_name:storage_inst_name:" -"vol_name" -msgstr "" -"引数 existing_ref は 次の形式でなければいけません: app_inst_name:" -"storage_inst_name:vol_name" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"extend_volume、eternus_pool: %(eternus_pool)s、プールが見つかりません。" - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume、ボリューム: %(volume)s、ボリューム名: %(volumename)s、" -"eternus_pool: %(eternus_pool)s、ストレージ設定サービスが見つかりません。" - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " -"%(errordesc)s、プールタイプ: %(pooltype)s。" - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "" -"extend_volume、ボリューム名: %(volumename)s、ボリュームが見つかりません。" - -msgid "failed to create new_volume on destination host" -msgstr "宛先ホスト上に new_volume を作成できませんでした" - -msgid "fake" -msgstr "偽" - -#, python-format -msgid "file already exists at %s" -msgstr "ファイルは %s に既に存在します。" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "SheepdogIOWrapper は fileno をサポートしません。" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() は RBD() でサポートされていません。" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "ファイルシステム%s は Nexenta Store アプライアンスに存在しません" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled は False に設定されています。マルチホスト" -"マッピングは許可されていません。CMMVC6071E VDisk は既にホストにマッピングされ" -"ているため、VDisk からホストへのマッピングは作成されませんでした。" - -msgid "flush() not supported in this version of librbd" -msgstr "このバージョンの librbd では flush() はサポートされていません" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s の基盤: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s は %(backing_file)s でサポートされています" - -msgid "force delete" -msgstr "強制削除" - -msgid "get_hyper_domain_id error." -msgstr "get_hyper_domain_id エラー。" - -msgid "get_hypermetro_by_id error." -msgstr "get_hypermetro_by_id エラー。" - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: イニシエーター %(ini)s のターゲット IP の取得に失敗しまし" -"た。設定ファイルを確認してください。" - -#, python-format -msgid "" -"get_iscsi_params: No valid port in portgroup. portgroup_id: %(id)s, please " -"check it on storage." -msgstr "" -"get_iscsi_params: 有効なポートがポートグループ内に見つかりません。ストレージ" -"上で確認を行って下さい。 portgroup_id: %(id)s" - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: ボリューム %s の属性の取得に失敗しました。" - -msgid "gid is null. FSS failed to delete cgsnapshot." -msgstr "gid が null です。 FSS は cgsnapshot の削除に失敗しました。" - -msgid "glance_metadata changed" -msgstr "変更された glance_metadata" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " -"%(img)s は異なるファイルシステムに属しています。" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " -"%(img)s は異なるファイルセットに属しています。" - -msgid "group assigned" -msgstr "割り当てられたグループ" - -msgid "group changed" -msgstr "変更されたグループ" - -#, python-format -msgid "group-%s" -msgstr "グループ - %s" - -msgid "" -"group_creating_from_src must be called with group_id or group_snapshot_id " -"parameter." -msgstr "" -"group_creating_from_src は group_id または group_snapshot_id パラメーターと共" -"に呼び出す必要があります。" - -msgid "group_snapshot assigned" -msgstr "割り当てられた group_snapshot " - -msgid "group_snapshot changed" -msgstr "変更された group_snapshot " - -msgid "group_snapshots assigned" -msgstr "割り当てられたグループスナップショット" - -#, python-format -msgid "group_type must be provided to create group %(name)s." -msgstr "" -"グループ %(name)s を作成するには、group_type を指定する必要があります。" - -msgid "group_type_id cannot be None" -msgstr "group_type_id を None に設定することはできません。" - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"cinder.conf で hgst_group %(grp)s と hgst_user %(usr)s が適切なユーザーとグ" -"ループに合致する必要があります。" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "cinder.conf で指定した hgst_net %(net)s がクラスターで見つかりません。" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"cinder.conf で hgst_redundancy を 0 (HA でない) または 1 (HA) に設定する必要" -"があります。" - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr " cinder.conf で hgst_space_mode は octal/int である必要があります。" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "" -"hgst_storage サーバー %(svr)s で : の形式が設定されていません。" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "cinder.conf で hgst_storage_servers を定義する必要があります。" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"この操作の途中で HTTP サービスが急に無効または保守状態になった可能性がありま" -"す。" - -msgid "id cannot be None" -msgstr "ID を None にすることはできません。" - -#, python-format -msgid "image %s not found" -msgstr "イメージ %s が見つかりません" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "" -"initialize_connection、ボリューム: %(volume)s、ボリュームが見つかりません。" - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection: ボリューム %s の属性の取得に失敗しました。" - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "" -"initialize_connection: ボリューム %s のボリューム属性が欠落しています。" - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: ボリューム %(vol)s の入出力グループ %(gid)s でノードが" -"見つかりませんでした。" - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s が定義されていません。" - -#, python-format -msgid "invalid user '%s'" -msgstr "ユーザー '%s' は無効です" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "iscsi ポータル %s が見つかりません" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"プロトコル 'iSCSI' を使用する場合、設定ファイルに iscsi_ip_address を設定しな" -"ければなりません。" - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "鍵マネージャーのエラー: %(reason)s" - -msgid "limit param must be an integer" -msgstr "limit パラメーターは整数でなければなりません。" - -msgid "limit param must be positive" -msgstr "limit パラメーターは正でなければなりません。" - -msgid "lun info not found" -msgstr "LUN 情報が見つかりません。" - -msgid "" -"manage_existing cannot manage a volume connected to hosts. Please disconnect " -"this volume from existing hosts before importing." -msgstr "" -"manage_existing はホストに接続されたボリュームを管理できません。インポートを" -"行う前にこのボリュームを既存のホストから切断してください。" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"既存のボリュームを特定するには、manage_existing で 'name' キーが必要です。" - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: ボリューム %(vol)s 上で既存のリプレー %(ss)s の管理" -"でエラーが発生しました。" - -#, python-format -msgid "marker [%s] not found" -msgstr "マーカー [%s] が見つかりません。" - -#, python-format -msgid "marker not found: %s" -msgstr "マーカーが見つかりません: %s" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp に引用符 %s がありません" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "" -"migration_policy は 'on-demand' または 'never' でなければなりません。%s が渡" -"されました" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "" -"ボリューム %(vol)s 上で mkfs が失敗しました。エラーメッセージ: %(err)s。" - -#, python-format -msgid "" -"mkvdiskhostmap error:\n" -" command: %(cmd)s\n" -" lun: %(lun)s\n" -" result_lun: %(result_lun)s" -msgstr "" -"mkvdiskhostmap エラー:\n" -" コマンド: %(cmd)s\n" -" lun: %(lun)s\n" -" result_lun: %(result_lun)s" - -msgid "mock" -msgstr "モック" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs がインストールされていません。" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage が名前 %s を持つ複数のリソースを発見しました。" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "スナップショット ID %s を持つ複数のリソースが見つかりました。" - -msgid "name cannot be None" -msgstr "名前を None に設定することはできません。" - -msgid "no \"access-key\" field" -msgstr " \"access-key\" フィールドがありません。" - -msgid "no \"user\" field" -msgstr " \"user\" フィールドがありません。" - -#, python-format -msgid "no REPLY but %r" -msgstr "REPLY がないものの %r があります" - -msgid "no data found" -msgstr "データが見つかりません。" - -msgid "no error code found" -msgstr "エラーコードが見つかりません。" - -msgid "no readonly found" -msgstr "読み取り専用が見つかりません。" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "drbdmanage で ID %s を持つスナップショットが見つかりません。" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "ID %s を持つスナップショットは 1つだけではありません。" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "ID %s を持つボリュームは 1 つだけではありません。" - -#, python-format -msgid "obj missing quotes %s" -msgstr "obj に引用符 %s がありません" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled がオフになっていません。" - -#, python-format -msgid "pool [%s] is not writable" -msgstr "プール [%s] は書き込み不可です。" - -msgid "progress must be an integer percentage" -msgstr "進行状況は整数のパーセンテージでなければなりません。" - -msgid "provider must be defined" -msgstr "プロバイダーを定義する必要があります" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img %(minimum_version)s 以降がこのボリュームドライバーに必要です。現在" -"の qemu-img バージョン: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img がインストールされていません。また、イメージのタイプは %s です。" -"qemu-img がインストールされていない場合は、RAW イメージのみが使用可能です。" - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img がインストールされておらず、ディスク形式が指定されていません。qemu-" -"img がインストールされていない場合は、RAW イメージのみが使用可能です。" - -msgid "rados and rbd python libraries not found" -msgstr "" -"rados python ライブラリーおよび rbd python ライブラリーが見つかりません。" - -msgid "rawtimestamp is null. FSS failed to create_volume_from_snapshot." -msgstr "" -"rawtimestamp が null です。 FSS は create_volume_from_snapshot に失敗しまし" -"た。" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "" -"read_deleted には 'no', 'yes', 'only' のいずれかのみを指定できます。%r は指定" -"できません" - -#, python-format -msgid "replication_device %s is not set." -msgstr "replication_device %s が設定されていません。" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover が失敗しました。%s が見つかりません。" - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"replication_failover が失敗しました。バックエンドがフェイルオーバーのために設" -"定されていません。" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"復元: %(vol_id)s で、%(bpath)s 上の無効な引数のため、dsmc の実行に失敗しまし" -"た。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"復元: %(vol_id)s %(bpath)s で dsmc を実行できませんでした。\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"復元: %(vol_id)s 失敗しました。\n" -" stdout: %(out)s\n" -"stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup が打ち切られました。実際のオブジェクトリストが、メタデータ内に" -"保管されているオブジェクトリストと一致しません。" - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb にメンバー %s がありません。より新しい python-rtslib-fb が必要かも" -"しれません。" - -msgid "san_ip is not set." -msgstr "san_ip が設定されていません。" - -msgid "san_ip must be set" -msgstr "san_ip を設定する必要があります" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login と san_password のいずれかまたは両方が cinder.conf の Dateraドライ" -"バーに設定されていません。この情報を設定して、cinder-volume サービスを再開し" -"てください。" - -msgid "" -"scaleio_verify_server_certificate is True but " -"scaleio_server_certificate_path is not provided in cinder configuration" -msgstr "" -"scaleio_verify_server_certificate が True ですが、Cinder設定で " -"scaleio_server_certificate_path が指定されていません。" - -msgid "serve() can only be called once" -msgstr "serve() は一度しか呼び出せません。" - -msgid "size not found" -msgstr "サイズが見つかりません。" - -msgid "snapshot info not found" -msgstr "スナップショット情報が見つかりません。" - -#, python-format -msgid "snapshot with the name: %s Not Found" -msgstr "名前 %s を持つスナップショット: 見つかりませんでした。" - -#, python-format -msgid "snapshot-%s" -msgstr "スナップショット: %s" - -msgid "snapshots assigned" -msgstr "割り当てられたスナップショット" - -msgid "snapshots changed" -msgstr "変更されたスナップショット" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "ソースボリューム ID %s が複製されていません。" - -msgid "source-name cannot be empty." -msgstr "source-name は空にできません。" - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "" -"source-name 形式は 'vmdk_path@vm_inventory_path' でなければなりません。" - -msgid "specs must be a dictionary." -msgstr "スペックはディクショナリーである必要があります。" - -#, python-format -msgid "status must be %s and" -msgstr "状態は %s である必要があります" - -msgid "status must be available" -msgstr "状態は「使用可能」でなければなりません。" - -msgid "status not found" -msgstr "ステータスが見つかりません。" - -msgid "stop hypermetro group error." -msgstr "hypermetro グループ停止エラー。" - -msgid "stop_hypermetro error." -msgstr "stop_hypermetro エラー。" - -msgid "storops Python library is not installed." -msgstr "strops Python ライブラリーがインストールされていません。" - -msgid "sync hypermetro group error." -msgstr "hypermetro グループ同期エラー。" - -msgid "sync_hypermetro error." -msgstr "sync_hypermetro エラー。" - -#, python-format -msgid "target=%(target)s, lun=%(lun)s" -msgstr "target=%(target)s, lun=%(lun)s" - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli がインストールされておらず、デフォルトのディレクトリー " -"(%(default_path)s) を作成できませんでした: %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection: コネクターからホスト名を取得できませんでした。" - -msgid "timeout creating new_volume on destination host" -msgstr "" -"宛先ホスト上に new_volume を作成しているときにタイムアウトになりました。" - -msgid "too many body keys" -msgstr "本体キーが多すぎます" - -#, python-format -msgid "trg_id is invalid: %d." -msgstr "trg_id が無効です: %d" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "アンマウント: %s: マウントされていません" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "アンマウント: %s: ターゲットが使用中です" - -msgid "umount: : some other error" -msgstr "アンマウント: : その他のエラー" - -msgid "umount: : target is busy" -msgstr "アンマウント: : ターゲットが使用中です" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: %s という名前のスナップショットが見つかりません。" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: ボリューム ID %s が見つかりません。" - -#, python-format -msgid "unrecognized argument %s" -msgstr "認識されない引数 %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "サポートされない圧縮アルゴリズム: %s" - -msgid "uuid not found" -msgstr "UUID が見つかりません。" - -msgid "valid iqn needed for show_target" -msgstr "show_target に必要とされる有効な iqn" - -#, python-format -msgid "varray %s: not found" -msgstr "varray %s: 見つかりませんでした。" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "vdisk %s が定義されていません。" - -msgid "vid is null. FSS failed to create snapshot." -msgstr "vid が null です。 FSS はスナップショットの作成に失敗しました。" - -msgid "vid is null. FSS failed to create_volume_from_snapshot." -msgstr "vid が null です。 FSS は create_volume_from_snapshot に失敗しました。" - -msgid "vid is null. FSS failed to delete snapshot" -msgstr "vid が null です。 FSS はスナップショットの削除に失敗しました。" - -msgid "vid is null. FSS failed to delete volume." -msgstr "vid が null です。 FSS はボリュームの削除に失敗しました。" - -msgid "vmemclient python library not found" -msgstr "vmemclient python ライブラリーが見つかりません" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "drbdmanage でボリューム %s が見つかりません。" - -msgid "volume assigned" -msgstr "割り当てられたボリューム" - -msgid "volume changed" -msgstr "変更されたボリューム" - -msgid "volume is already attached" -msgstr "ボリュームは既に接続されています。" - -msgid "volume is not local to this node" -msgstr "ボリュームは、このノードに対してローカルではありません。" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"ボリュームサイズ %(volume_size)d は、サイズ %(size)d のバックアップを復元する" -"には小さすぎます。" - -#, python-format -msgid "volume size %d is invalid." -msgstr "ボリュームサイズ %d は無効です。" - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"ボリュームを整合性グループに作成する場合は、volume_type を指定する必要があり" -"ます。" - -msgid "volume_type must be provided when creating a volume in a group." -msgstr "" -"ボリュームをグループに作成する場合は、volume_type を指定する必要があります。" - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id を None に設定することはできません。" - -msgid "volume_types assigned" -msgstr "割り当てられたボリュームタイプ" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"整合性グループ %(name)s を作成するには、volume_types を指定する必要がありま" -"す。" - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "" -"整合性グループ %s を作成するには、volume_types を指定する必要があります。" - -#, python-format -msgid "volume_types must be provided to create group %(name)s." -msgstr "" -"グループ %(name)s を作成するには、volume_types を指定する必要があります。" - -msgid "volumes assigned" -msgstr "割り当てられたボリューム" - -msgid "volumes changed" -msgstr "変更されたボリューム" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s はタイムアウトしました。" - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"zfssa_manage_policy プロパティーは 'strict' または 'loose' に設定する必要があ" -"ります。現在の値は %s です。" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po deleted file mode 100644 index 8401deddd..000000000 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-error.po +++ /dev/null @@ -1,3247 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:19+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-25 02:49+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Failed to remove from new volume set %(new_vvs)s." -msgstr "" -"%(exception)s: 볼륨 %(volume_name)s의 다시 입력 되돌리기 중에 예외가 발생했습" -"니다. 새 볼륨 세트 %(new_vvs)s에서 제거하는 데 실패했습니다." - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Original volume set/QOS settings may not have been fully restored." -msgstr "" -"%(exception)s: 볼륨 %(volume_name)s의 다시 입력을 되돌리는 중에 예외가 발생했" -"습니다. 원래 볼륨 세트/QOS 설정이 완전히 복구되지 않았을 수 있습니다." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" -msgstr "" -"%(fun)s: 예상치 못한 CLI 출력과 함께 실패했습니다. \n" -"명령: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" - -#, python-format -msgid "" -"%(method)s %(url)s unexpected response status: %(response)s (expects: " -"%(expects)s)." -msgstr "" -"%(method)s %(url)s 예상치 못한 응답 상태: %(response)s (expects: " -"%(expects)s)." - -#, python-format -msgid "%(name)s: %(value)s" -msgstr "%(name)s: %(value)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" -msgstr "'%(value)s'이(가) 추가 사양 '%(key)s'에 올바르지 않은 값임" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "장애 복구하려면 올바른 보조 대상을 지정해야 합니다." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting create_snapshot operation!" -msgstr "" -"create_snapshot 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정" -"을 찾을 수 없습니다." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting delete_volume operation!" -msgstr "" -"delete_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정을 " -"찾을 수 없습니다." - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting unmanage operation!" -msgstr "" -"관리 취소 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s의 계정을 찾" -"을 수 없습니다." - -#, python-format -msgid "Array Serial Number must be in the file %(fileName)s." -msgstr "배열 일련 번호가 %(fileName)s 파일에 있어야 합니다." - -#, python-format -msgid "Array query failed - No response (%d)!" -msgstr "배열 쿼리 실패 - 응답이 없음(%d)!" - -msgid "Array query failed. No capabilities in response!" -msgstr "배열 쿼리 실패. 응답에 기능이 없습니다." - -msgid "Array query failed. No controllers in response!" -msgstr "배열 쿼리 실패. 응답에 제어기가 없습니다." - -msgid "Array query failed. No global id in XML response!" -msgstr "배열 쿼리 실패. XML 응답에 글로벌 id가 없습니다." - -msgid "Attaching snapshot from a remote node is not supported." -msgstr "원격 노드에서 스냅샷을 연결하는 기능은 지원되지 않습니다." - -#, python-format -msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." -msgstr "요청 승인: %(zfssaurl)s 재시도: %(retry)d ." - -msgid "Backend returned err for lun export." -msgstr "백엔드에서 lun 내보내기 오류를 리턴했습니다." - -#, python-format -msgid "Backup id %s is not invalid. Skipping reset." -msgstr "백업 id %s이(가) 올바르지 않습니다. 재설정을 건너뜁니다." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"백업 서비스 %(configured_service)s이(가) 확인을 지원하지 않습니다. 백업 ID " -"%(id)s이(가) 확인되지 않습니다. 확인을 건너뜁니다." - -#, python-format -msgid "Backup volume metadata failed: %s." -msgstr "백업 볼륨 메타데이터 실패: %s." - -#, python-format -msgid "Bad response from server: %(url)s. Error: %(err)s" -msgstr "서버의 잘못된 응답: %(url)s. 오류: %(err)s" - -#, python-format -msgid "" -"CG snapshot %(cgsnap)s not found when creating consistency group %(cg)s from " -"source." -msgstr "" -"소스에서 일관성 그룹 %(cg)s을(를) 작성할 때 CG 스냅샷 %(cgsnap)s을(를) 찾을 " -"수 없습니다." - -#, python-format -msgid "" -"CLI fail: '%(cmd)s' = %(code)s\n" -"out: %(stdout)s\n" -"err: %(stderr)s" -msgstr "" -"CLI 실패: '%(cmd)s' = %(code)s\n" -"출력: %(stdout)s\n" -"오류: %(stderr)s" - -msgid "Call to Nova delete snapshot failed" -msgstr "Nova를 호출하여 스냅샷을 삭제하는 데 실패" - -msgid "Call to Nova to create snapshot failed" -msgstr "Nova를 호출하여 스냅샷을 작성하는 데 실패" - -#, python-format -msgid "Call to json.loads() raised an exception: %s." -msgstr "json.loads() 호출에서 예외 발생: %s." - -#, python-format -msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." -msgstr "%(target_iqn)s(으)로 %(target_ip)s에서 검색할 수 없습니다." - -msgid "Can not open the recent url, login again." -msgstr "최신 url을 열 수 없습니다. 다시 로그인하십시오." - -#, python-format -msgid "Can't find volume to map %(key)s, %(msg)s" -msgstr "%(key)s을(를) 맵핑할 볼륨을 찾을 수 없음, %(msg)s" - -msgid "Can't open the recent url, relogin." -msgstr "최신 url을 열 수 없음, 다시 로그인" - -#, python-format -msgid "" -"Cannot add and verify tier policy association for storage group : " -"%(storageGroupName)s to FAST policy : %(fastPolicyName)s." -msgstr "" -"스토리지 그룹: %(storageGroupName)s의 계층 정책 연관을 확인하고 FAST 정책: " -"%(fastPolicyName)s에 추가할 수 없습니다." - -#, python-format -msgid "Cannot clone image %(image)s to volume %(volume)s. Error: %(error)s." -msgstr "" -"이미지 %(image)s을(를) 볼륨 %(volume)s에 복제할 수 없습니다. 오류: %(error)s." - -#, python-format -msgid "Cannot create or find an initiator group with name %(igGroupName)s." -msgstr "이름이 %(igGroupName)s인 개시자 그룹을 작성하거나 찾을 수 없습니다." - -#, python-format -msgid "Cannot delete file %s." -msgstr "%s 파일을 삭제할 수 없습니다." - -msgid "Cannot detect replica status." -msgstr "복제본 상태를 발견할 수 없습니다." - -msgid "Cannot determine if Tiering Policies are supported." -msgstr "계층 지정 정책이 지원되는지 판별할 수 없습니다." - -msgid "Cannot determine whether Tiering Policy is supported on this array." -msgstr "이 배열에서 계층 지정 정책이 지원되는지 판별할 수 없습니다." - -#, python-format -msgid "Cannot find Consistency Group %s" -msgstr "일관성 그룹 %s을(를) 찾을 수 없음" - -#, python-format -msgid "" -"Cannot find a portGroup with name %(pgGroupName)s. The port group for a " -"masking view must be pre-defined." -msgstr "" -"이름이 %(pgGroupName)s인 portGroup을 찾을 수 없습니다. 마스킹 보기의 포트 그" -"룹을 사전 정의해야 합니다." - -#, python-format -msgid "Cannot find the fast policy %(fastPolicyName)s." -msgstr "빠른 정책 %(fastPolicyName)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "" -"Cannot find the new masking view just created with name %(maskingViewName)s." -msgstr "" -"방금 작성했으며 이름이 %(maskingViewName)s인 새 마스킹 보기를 찾을 수 없습니" -"다." - -#, python-format -msgid "Cannot get QoS spec for volume %s." -msgstr "볼륨 %s의 QoS 사양을 가져올 수 없습니다." - -#, python-format -msgid "Cannot get port group from masking view: %(maskingViewName)s. " -msgstr "마스킹 보기 %(maskingViewName)s에서 포트 그룹을 가져올 수 없습니다. " - -msgid "Cannot get port group name." -msgstr "포트 그룹 이름을 가져올 수 없습니다." - -#, python-format -msgid "Cannot get storage Group from job : %(storageGroupName)s." -msgstr "작업에서 스토리지 그룹을 가져올 수 없음: %(storageGroupName)s." - -msgid "Cannot get storage system." -msgstr "스토리지 시스템을 가져올 수 없습니다." - -#, python-format -msgid "Caught error: %(type)s %(error)s" -msgstr "오류 발견: %(type)s %(error)s" - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s" -msgstr "" -"%(reason)s(으)로 인해 볼륨 이름을 %(tmp)s에서 %(orig)s(으)로 변경하는 데 실패" - -#, python-format -msgid "" -"Changing the volume name from %(tmp)s to %(orig)s failed because %(reason)s." -msgstr "" -"%(reason)s(으)로 인해 볼륨 이름을 %(tmp)s에서 %(orig)s(으)로 변경하는 데 실패" -"했습니다." - -#, python-format -msgid "Clone %s not in prepared state!" -msgstr "복제본 %s이(가) 준비된 상태가 아닙니다." - -#, python-format -msgid "" -"Clone volume \"%s\" already exists. Please check the results of \"dog vdi " -"list\"." -msgstr "" -"복제 볼륨 \"%s\"이(가) 이미 있습니다. \"dog vdi list\"의 결과를 확인하십시오." - -#, python-format -msgid "Cloning of volume %s failed." -msgstr "볼륨 %s 복제에 실패했습니다." - -#, python-format -msgid "" -"CloudByte does not have a volume corresponding to OpenStack volume [%s]." -msgstr "CloudByte에 OpenStack 볼륨 [%s]에 해당하는 볼륨이 없습니다." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] failed for volume [%(vol)s]. Exhausted " -"all [%(max)s] attempts." -msgstr "" -"볼륨 [%(vol)s]의 CloudByte 조작 [%(operation)s]에 실패했습니다. [%(max)s] 시" -"도수가 모두 소진되었습니다." - -#, python-format -msgid "" -"CloudByte snapshot information is not available for OpenStack volume [%s]." -msgstr "OpenStack 볼륨 [%s]의 CloudByte 스냅샷 정보를 사용할 수 없습니다." - -#, python-format -msgid "CloudByte volume information not available for OpenStack volume [%s]." -msgstr "OpenStack 볼륨 [%s]의 CloudByte 볼륨 정보를 사용할 수 없습니다." - -#, python-format -msgid "Cmd :%s" -msgstr "Cmd :%s" - -#, python-format -msgid "Commit clone failed: %(name)s (%(status)d)!" -msgstr "복제본 커밋에 실패: %(name)s (%(status)d)!" - -#, python-format -msgid "Commit failed for %s!" -msgstr "%s의 커밋에 실패했습니다." - -#, python-format -msgid "Compute cluster: %s not found." -msgstr "컴퓨트 클러스터: %s을(를) 찾을 수 없음." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "구성 값 %s을(를) 설정하지 않았습니다." - -#, python-format -msgid "Conflict detected in Virtual Volume Set %(volume_set)s: %(error)s" -msgstr "가상 볼륨 세트 %(volume_set)s에서 충돌 발견: %(error)s" - -#, python-format -msgid "Connect to Flexvisor error: %s." -msgstr "Flexvisor에 연결 오류: %s." - -#, python-format -msgid "Connect to Flexvisor failed: %s." -msgstr "Flexvisor에 연결 실패: %s." - -msgid "Connection error while sending a heartbeat to coordination backend." -msgstr "하트비트를 조정 백엔드에 보내는 중에 연결 오류가 발생했습니다." - -#, python-format -msgid "Connection to %s failed and no secondary!" -msgstr "%s에 연결에 실패했으며 보조가 없습니다." - -#, python-format -msgid "Controller GET failed (%d)" -msgstr "제어기 GET 실패(%d)" - -#, python-format -msgid "Copy offload workflow unsuccessful. %s" -msgstr "오프로드 워크플로 복사에 실패했습니다. %s" - -#, python-format -msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" -msgstr "스냅샷 %(snap)s 볼륨 %(vol)s의 볼륨에 스냅샷을 복사하는 데 실패" - -#, python-format -msgid "Could not GET allocation information (%d)!" -msgstr "할당 정보를 가져올 수 없습니다(%d)." - -#, python-format -msgid "Could not calculate node utilization for node %s." -msgstr "노드 %s의 노드 활용도를 계산할 수 없습니다." - -#, python-format -msgid "Could not connect to %(primary)s or %(secondary)s!" -msgstr "%(primary)s 또는 %(secondary)s에 연결할 수 없습니다." - -#, python-format -msgid "Could not create snapshot set. Error: '%s'" -msgstr "스냅샷 세트를 작성할 수 없습니다. 오류: '%s'" - -msgid "Could not decode scheduler options." -msgstr "스케줄러 옵션을 디코딩할 수 없습니다." - -#, python-format -msgid "Could not delete failed image volume %(id)s." -msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." - -#, python-format -msgid "Could not delete the image volume %(id)s." -msgstr "이미지 볼륨 %(id)s을(를) 삭제할 수 없습니다." - -#, python-format -msgid "Could not find a host for consistency group %(group_id)s." -msgstr "일관성 그룹 %(group_id)s의 호스트를 찾을 수 없습니다." - -#, python-format -msgid "Could not find any hosts (%s)" -msgstr "호스트(%s)를 찾을 수 없음" - -#, python-format -msgid "" -"Could not find port group : %(portGroupName)s. Check that the EMC " -"configuration file has the correct port group name." -msgstr "" -"포트 그룹: %(portGroupName)s을(를) 찾을 수 없습니다. EMC 구성 파일에 올바른 " -"포트 그룹 이름이 있는지 확인하십시오." - -#, python-format -msgid "Could not find volume with name %(name)s. Error: %(error)s" -msgstr "이름이 %(name)s인 볼륨을 찾을 수 없습니다. 오류: %(error)s" - -msgid "" -"Could not get performance base counter name. Performance-based scheduler " -"functions may not be available." -msgstr "" -"성능 기본 카운터 이름을 가져올 수 없습니다. 성능 기반 스케줄러 기능을 사용할 " -"수 없습니다." - -#, python-format -msgid "Could not get utilization counters from node %s" -msgstr "노드 %s에서 활용도 카운터를 가져올 수 없음" - -#, python-format -msgid "Could not log in to 3PAR array (%s) with the provided credentials." -msgstr "제공된 자격 증명으로 3PAR 배열(%s)에 로그인할 수 없습니다." - -#, python-format -msgid "Could not log in to LeftHand array (%s) with the provided credentials." -msgstr "제공된 자격 증명으로 LeftHand 배열(%s)에 로그인할 수 없습니다." - -#, python-format -msgid "Could not stat scheduler options file %(filename)s." -msgstr "스케줄러 옵션 파일 %(filename)s의 통계를 낼 수 없습니다." - -#, python-format -msgid "Could not validate device %s" -msgstr "%s 장치를 검증할 수 없음" - -#, python-format -msgid "" -"Create clone_image_volume: %(volume_id)sfor image %(image_id)s, failed " -"(Exception: %(except)s)" -msgstr "" -"이미지 %(image_id)s의 clone_image_volume: %(volume_id)s 작성 실패(예외: " -"%(except)s)" - -#, python-format -msgid "" -"Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." -msgstr "스냅샷-%(snap)s에서 일관성 그룹 작성 실패." - -#, python-format -msgid "Create consistency group from source %(source)s failed." -msgstr "소스 %(source)s에서 일관성 그룹을 작성하는 데 실패했습니다." - -#, python-format -msgid "" -"Create consistency group from source cg-%(cg)s failed: " -"ConsistencyGroupNotFound." -msgstr "소스 cg-%(cg)s에서 일관성 그룹 작성 실패: ConsistencyGroupNotFound." - -#, python-format -msgid "Create hypermetro error: %s." -msgstr "hypermetro 작성 오류: %s." - -#, python-format -msgid "" -"Create new lun from lun for source %(src)s => destination %(dest)s failed!" -msgstr "소스 %(src)s => 대상 %(dest)s의 lun에서 새 lun을 작성하는 데 실패" - -#, python-format -msgid "Create pair failed. Error: %s." -msgstr "쌍 작성 실패. 오류: %s." - -msgid "Create replication volume error." -msgstr "복제 볼륨 작성 오류." - -#, python-format -msgid "Create snapshot notification failed: %s" -msgstr "스냅샷 작성 알림 실패: %s" - -#, python-format -msgid "Create volume failed from snapshot: %s" -msgstr "스냅샷에서 볼륨 작성 실패: %s" - -#, python-format -msgid "Create volume notification failed: %s" -msgstr "볼륨 작성 알림 실패: %s" - -#, python-format -msgid "Creation of snapshot failed for volume: %s" -msgstr "볼륨의 스냅샷 작성 실패: %s" - -#, python-format -msgid "Creation of volume %s failed." -msgstr "볼륨 %s 작성에 실패했습니다." - -msgid "" -"Creation request failed. Please verify the extra-specs set for your volume " -"types are entered correctly." -msgstr "" -"작성 요청에 실패했습니다. 볼륨 유형의 추가 사양 세트가 올바르게 입력되었는지 " -"확인하십시오." - -msgid "DB error:" -msgstr "DB 오류:" - -msgid "DBError encountered: " -msgstr "DBError 발생: " - -msgid "DRBDmanage: too many assignments returned." -msgstr "DRBDmanage: 너무 많은 할당이 리턴되었습니다." - -msgid "Default Storage Profile was not found." -msgstr "기본 스토리지 프로파일을 찾을 수 없습니다." - -msgid "" -"Default volume type is not found. Please check default_volume_type config:" -msgstr "" -"기본 볼륨 유형을 찾을 수 없습니다. default_volume_type 구성을 확인하십시오." - -msgid "Delete consistency group failed to update usages." -msgstr "일관성 그룹을 삭제하는 중 사용법을 업데이트하지 못했습니다." - -#, python-format -msgid "Delete hypermetro error: %s." -msgstr "hypermetro 삭제 오류: %s." - -msgid "Delete replication error." -msgstr "복제 삭제 오류." - -msgid "Delete snapshot failed, due to snapshot busy." -msgstr "사용 중인 스냅샷으로 인해 스냅샷을 삭제하는 데 실패했습니다." - -#, python-format -msgid "Delete snapshot notification failed: %s" -msgstr "스냅샷 삭제 알림 실패: %s" - -#, python-format -msgid "Delete volume notification failed: %s" -msgstr "볼륨 삭제 알림 실패: %s" - -#, python-format -msgid "Deleting snapshot %s failed" -msgstr "스냅샷 %s 삭제 실패" - -#, python-format -msgid "Deleting zone failed %s" -msgstr "구역 삭제 실패 %s" - -#, python-format -msgid "Deletion of volume %s failed." -msgstr "볼륨 %s 삭제에 실패했습니다." - -#, python-format -msgid "Destination Volume Group %s does not exist" -msgstr "대상 볼륨 그룹 %s이(가) 없음" - -#, python-format -msgid "Detach attachment %(attach_id)s failed." -msgstr "첨부 파일 %(attach_id)s의 연결을 끊는 데 실패했습니다." - -#, python-format -msgid "Detach migration source volume failed: %(err)s" -msgstr "마이그레이션 소스 볼륨의 연결을 해제하는 데 실패: %(err)s" - -msgid "Detach volume failed, due to remove-export failure." -msgstr "내보내기 제거 실패로 인해 볼륨 연결 해제에 실패했습니다." - -msgid "Detach volume failed, due to uninitialized driver." -msgstr "초기화되지 않는 드라이버로 인해 볼륨 연결 해제에 실패했습니다." - -msgid "Detaching snapshot from a remote node is not supported." -msgstr "원격 노드에서 스냅샷의 연결을 해제하는 기능은 지원되지 않습니다." - -#, python-format -msgid "Did not find expected column name in lsvdisk: %s." -msgstr "lsvdisk에 예상 열 이름을 찾지 못함: %s." - -msgid "Differential restore failed, trying full restore" -msgstr "차등 복원 실패, 전체 복원 시도" - -#, python-format -msgid "Disable replication on volume failed with message: %s" -msgstr "볼륨에서 복제를 비활성화하는 데 실패하고 다음 메시지가 표시됨: %s" - -#, python-format -msgid "Disconnection failed with message: %(msg)s." -msgstr "연결 해제에 실패하고 다음 메시지가 표시됨: %(msg)s." - -msgid "Driver reported error during replication failover." -msgstr "복제 장애 복구 중에 드라이버에서 오류를 보고했습니다." - -#, python-format -msgid "" -"Driver-based migration of volume %(vol)s failed. Move from %(src)s to " -"%(dst)s failed with error: %(error)s." -msgstr "" -"볼륨 %(vol)s의 드라이버 기반 마이그레이션에 실패했습니다. %(src)s에서 " -"%(dst)s(으)로 이동이 오류로 인해 실패: %(error)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "볼륨 %(vol)s을(를) 연결하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "그룹 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s of Volume: %(lun)s in Pool: %(pool)s, " -"Project: %(project)s Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀에 있는 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅" -"샷을 가져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "Error JSONDecodeError. %s" -msgstr "JSONDecodeError 오류. %s" - -#, python-format -msgid "" -"Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " -"%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트에서 InitiatorGroup: %(initiatorgroup)s으로 " -"볼륨 %(lun)s을 설정하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "Error TypeError. %s" -msgstr "TypeError 오류. %s" - -msgid "Error activating LV" -msgstr "LV 활성화 오류" - -#, python-format -msgid "Error changing Storage Profile for volume %(original)s to %(name)s" -msgstr "" -"볼륨의 스토리지 프로파일을 %(original)s에서 %(name)s(으)로 변경 중 오류 발생" - -#, python-format -msgid "Error cleaning up failed volume creation. Msg - %s." -msgstr "실패한 볼륨 작성을 정리하는 중에 오류가 발생했습니다. 메시지 - %s." - -msgid "Error cloning volume" -msgstr "볼륨 복제 오류" - -msgid "Error closing channel." -msgstr "채널 닫기 오류." - -#, python-format -msgid "" -"Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." -msgstr "" -"'%(method)s', %(extra)s의 Glance 서버 '%(netloc)s'에 접속하는 중 오류가 발생" -"했습니다." - -#, python-format -msgid "Error creating QOS rule %s" -msgstr "QOS 규칙 %s 작성 오류" - -msgid "Error creating Volume" -msgstr "볼륨 작성 오류" - -msgid "Error creating Volume Group" -msgstr "볼륨 그룹 작성 오류" - -msgid "Error creating chap record." -msgstr "chap 레코드를 작성하는 중에 오류가 발생했습니다." - -msgid "Error creating cloned volume" -msgstr "복제된 볼륨 작성 오류" - -msgid "Error creating snapshot" -msgstr "스냅샷 작성 오류" - -msgid "Error creating volume" -msgstr "볼륨 작성 오류" - -#, python-format -msgid "Error creating volume. Msg - %s." -msgstr "볼륨을 작성하는 중에 오류가 발생했습니다. 메시지 - %s." - -msgid "Error deactivating LV" -msgstr "LV 비활성화 오류" - -msgid "Error deleting snapshot" -msgstr "스냅샷 삭제 에러" - -#, python-format -msgid "Error detaching snapshot %(snapshot)s, due to remove export failure." -msgstr "" -"내보내기 제거 실패로 인해 스냅샷 %(snapshot)s의 연결을 해제하는 중 오류가 발" -"생했습니다." - -#, python-format -msgid "Error detaching volume %(volume)s, due to remove export failure." -msgstr "" -"내보내기 제거 실패로 인해 볼륨 %(volume)s의 연결을 해제하는 중 오류가 발생했" -"습니다." - -#, python-format -msgid "Error detaching volume %s" -msgstr "볼륨 %s 연결 해제 오류" - -#, python-format -msgid "Error disassociating storage group from policy: %s." -msgstr "정책에서 스토리지 그룹의 연관을 해제하는 중 오류 발생: %s." - -msgid "Error during re-export on driver init." -msgstr "드라이버 초기화 시 다시 내보내는 중에 오류가 발생했습니다." - -msgid "" -"Error encountered on Cinder backend during thaw operation, service will " -"remain frozen." -msgstr "" -"thaw 조작 중에 Cinder 백엔드에서 오류 발생, 서비스가 동결된 상태로 남습니다." - -msgid "Error executing SSH command." -msgstr "SSH 명령 실행 오류." - -msgid "Error executing command via ssh." -msgstr "ssh를 통해 명령 실행 중에 오류 발생." - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "ssh를 통해 명령 실행 중에 오류 발생: %s" - -#, python-format -msgid "Error expanding volume %s." -msgstr "볼륨 %s 확장 오류." - -msgid "Error extending Volume" -msgstr "볼륨 확장 오류" - -msgid "Error extending volume" -msgstr "볼륨 확장 오류" - -#, python-format -msgid "Error extending volume %(id)s. Ex: %(ex)s" -msgstr "볼륨 %(id)s 확장 오류. 예: %(ex)s" - -#, python-format -msgid "Error extending volume: %(vol)s. Exception: %(ex)s" -msgstr "볼륨 확장 오류: %(vol)s. 예외: %(ex)s" - -#, python-format -msgid "Error finding replicated pg snapshot on %(secondary)s." -msgstr "%(secondary)s에서 복제된 pg 스냅샷을 찾는 중 오류가 발생했습니다." - -#, python-format -msgid "Error finding target pool instance name for pool: %(targetPoolName)s." -msgstr "풀의 대상 풀 인스턴스 이름을 찾는 중 오류 발생: %(targetPoolName)s." - -#, python-format -msgid "Error getting FaultDomainList for %s" -msgstr "%s의 FaultDomainList 가져오기 오류" - -#, python-format -msgid "Error getting LUN attribute. Exception: %s" -msgstr "LUN 속성 가져오기 오류. 실행: %s" - -msgid "Error getting active FC target ports." -msgstr "활성 FC 대상 포트를 가져오는 중 오류가 발생했습니다." - -msgid "Error getting active ISCSI target iqns." -msgstr "활성 ISCSI 대상 iqns를 가져오는 중 오류가 발생했습니다." - -msgid "Error getting active ISCSI target portals." -msgstr "활성 ISCSI 대상 포털을 가져오는 중 오류가 발생했습니다." - -msgid "Error getting array, pool, SLO and workload." -msgstr "배열, 풀, SLO 및 워크로드를 가져오는 중 오류가 발생했습니다." - -msgid "Error getting chap record." -msgstr "chap 레코드를 가져오는 중에 오류가 발생했습니다." - -msgid "Error getting name server info." -msgstr "이름 서버 정보 가져오기 오류." - -msgid "Error getting show fcns database info." -msgstr "표시 fcns 데이터베이스 정보 가져오기 오류." - -msgid "Error getting target pool name and array." -msgstr "대상 풀 이름과 배열을 가져오는 중 오류가 발생했습니다." - -#, python-format -msgid "Error has occurred: %s" -msgstr "오류 발생: %s" - -#, python-format -msgid "Error in copying volume: %s" -msgstr "볼륨 복사 오류: %s" - -#, python-format -msgid "" -"Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " -"with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" -msgstr "" -"볼륨 크기 확장 오류: 볼륨: %(volume)s Vol_Size: %(vol_size)d 포함된 스냅샷: " -"%(snapshot)s Snap_Size: %(snap_size)d" - -#, python-format -msgid "Error in workflow copy from cache. %s." -msgstr "캐시에서 워크플로우를 복사하는 데 실패했습니다. %s." - -#, python-format -msgid "Error invalid json: %s" -msgstr "올바르지 않은 JSON 오류: %s" - -msgid "Error manage existing get volume size." -msgstr "기존 볼륨 크기 가져오기를 관리하는 중 오류가 발생했습니다." - -msgid "Error manage existing volume." -msgstr "기존 볼륨 관리 오류." - -#, python-format -msgid "Error managing replay %s" -msgstr "재생 관리 오류 %s" - -msgid "Error mapping VDisk-to-host" -msgstr " VDisk-호스트 맵핑 오류" - -#, python-format -msgid "Error mapping volume: %s" -msgstr "볼륨 맵핑 오류: %s" - -#, python-format -msgid "" -"Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." -msgstr "" -"볼륨 %(volumename)s을(를) 대상 풀 %(targetPoolName)s(으)로 마이그레이션하는 " -"중에 오류가 발생했습니다." - -#, python-format -msgid "Error migrating volume: %s" -msgstr "볼륨 마이그레이션 오류: %s" - -#, python-format -msgid "" -"Error occurred in the volume driver when updating consistency group " -"%(group_id)s." -msgstr "" -"일관성 그룹 %(group_id)s을(를) 업데이트할 때 볼륨 드라이버에서 오류가 발생했" -"습니다." - -msgid "" -"Error occurred when adding hostgroup and lungroup to view. Remove lun from " -"lungroup now." -msgstr "" -"볼 hostgroup 및 lungroup을 추가할 때 오류가 발생했습니다. 이제 lungroup에서 " -"lun을 제거합니다." - -#, python-format -msgid "" -"Error occurred when building request spec list for consistency group %s." -msgstr "일관성 그룹 %s의 요청 사양 목록을 빌드할 때 오류가 발생했습니다." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "cg 스냅샷 %s을(를) 작성하는 중에 오류가 발생했습니다." - -#, python-format -msgid "" -"Error occurred when creating cloned volume in the process of creating " -"consistency group %(group)s from source CG %(source_cg)s." -msgstr "" -"소스 CG %(source_cg)s에서 일관성 그룹 %(group)s을(를) 작성하는 프로세스 중에 " -"복제된 볼륨을 작성할 때 오류가 발생했습니다." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(cg)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"cgsnapshot %(cgsnap)s에서 일관성 그룹 %(cg)s을(를) 작성하는 중에 오류가 발생" -"했습니다. " - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"cgsnapshot %(cgsnap)s에서 일관성 그룹 %(group)s을(를) 작성하는 중에 오류가 발" -"생했습니다. " - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from source CG " -"%(source_cg)s." -msgstr "" -"소스 CG %(source_cg)s에서 일관성 그룹 %(group)s을(를) 작성할 때 오류가 발생했" -"습니다." - -#, python-format -msgid "Error occurred when creating consistency group %s." -msgstr "일관성 그룹 %s을(를) 작성하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "" -"Error occurred when creating volume entry from snapshot in the process of " -"creating consistency group %(group)s from cgsnapshot %(cgsnap)s." -msgstr "" -"cgsnapshot %(cgsnap)s에서 일관성 그룹 %(group)s을(를) 작성하는 프로세스 중에 " -"스냅샷에서 볼륨 항목을 작성할 때 오류가 발생했습니다." - -#, python-format -msgid "Error occurred when updating consistency group %(group_id)s." -msgstr "일관성 그룹 %(group_id)s을(를) 업데이트하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "Error occurred while cloning backing: %s during retype." -msgstr "다시 입력 중에 지원 %s을(를) 복제하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred while copying %(src)s to %(dst)s." -msgstr "%(src)s을(를) %(dst)s(으)로 복사하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." -msgstr "" -"이미지 %(id)s을(를) 볼륨 %(vol)s(으)로 복사하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred while copying image: %(image_id)s to %(path)s." -msgstr "" -"이미지: %(image_id)s을(를) %(path)s에 복사하는 중에 오류가 발생했습니다." - -msgid "Error occurred while creating temporary backing." -msgstr "임시 지원을 작성하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." -msgstr "" -"이미지 %(image_id)s에서 볼륨 %(id)s을(를) 작성하는 중에 오류가 발생했습니다." - -#, python-format -msgid "" -"Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"%(command)s 실행 오류. 오류 코드: %(exit_code)d 오류 메시지: %(result)s" - -#, python-format -msgid "" -"Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "실행 명령 오류. 오류 코드: %(exit_code)d 오류 메시지: %(result)s" - -msgid "Error parsing array from host capabilities." -msgstr "호스트 기능에서 배열을 구문 분석하는 중 오류가 발생했습니다." - -msgid "Error parsing array, pool, SLO and workload." -msgstr "배열, 풀, SLO 및 워크로드를 구문 분석하는 중 오류가 발생했습니다." - -msgid "Error parsing target pool name, array, and fast policy." -msgstr "" -"대상 풀 이름, 배열 및 빠른 정책을 구문 분석하는 중 오류가 발생했습니다." - -#, python-format -msgid "" -"Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" -msgstr "" -"%(volume_name)s에서 볼륨 %(lun_name)s을(를) 프로비저닝하는 중에 오류가 발생했" -"습니다. 세부 사항: %(ex)s" - -msgid "Error querying thin pool about data_percent" -msgstr "data_percent에 대한 thin 풀 쿼리 오류" - -msgid "Error renaming logical volume" -msgstr "논리 볼륨의 이름 변경 오류" - -#, python-format -msgid "Error renaming volume %(original)s to %(name)s" -msgstr "볼륨의 이름을 %(original)s에서 %(name)s(으)로 변경하는 중 오류 발생" - -#, python-format -msgid "Error resolving host %(host)s. Error - %(e)s." -msgstr "호스트 %(host)s을(를) 분석하는 중에 오류가 발생했습니다. 오류 - %(e)s." - -#, python-format -msgid "Error retrieving LUN %(vol)s number" -msgstr "LUN %(vol)s 번호 검색 오류" - -#, python-format -msgid "Error running SSH command: \"%s\"." -msgstr "SSH 명령 실행 중 오류: \"%s\"" - -#, python-format -msgid "Error running SSH command: %s" -msgstr "SSH 명령 실행 중 오류: %s" - -msgid "Error running command." -msgstr "명령 실행 오류." - -#, python-format -msgid "" -"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" -msgstr "" -"볼륨-서비스 %(last_host)s : %(exc)s에서 %(volume_id)s을(를) 스케줄링하는 중 " -"오류 발생" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "하트비트를 조정 백엔드에 보내는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error setting Flash Cache policy to %s - exception" -msgstr "플래시 캐시 정책을 %s(으)로 설정하는 중 오류 발생 - 예외" - -msgid "Error starting coordination backend." -msgstr "조정 백엔드를 시작하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error trying to change %(opt)s from %(old)s to %(new)s" -msgstr "%(opt)s을(를) %(old)s에서 %(new)s(으)로 변경하려는 중 오류 발생" - -#, python-format -msgid "Error unmanaging replay %s" -msgstr "재생 %s 관리 취소 오류" - -#, python-format -msgid "Error unmapping volume: %s" -msgstr "볼륨 맵핑 해제 오류: %s" - -#, python-format -msgid "Error verifying LUN container %(bkt)s" -msgstr "LUN 컨테이너 %(bkt)s 확인 오류" - -#, python-format -msgid "Error verifying iSCSI service %(serv)s on host %(hst)s" -msgstr "호스트 %(hst)s에서 iSCSI 서비스 %(serv)s을(를) 확인하는 중 오류 발생" - -msgid "Error: unable to snap replay" -msgstr "오류: 재생을 스냅할 수 없음" - -#, python-format -msgid "Exception cloning volume %(name)s from source volume %(source)s." -msgstr "" -"소스 볼륨 %(source)s에서 볼륨 %(name)s을(를) 복제하는 중 예외가 발생했습니다." - -#, python-format -msgid "Exception creating LUN %(name)s in pool %(pool)s." -msgstr "풀 %(pool)s에서 LUN %(name)s을(를) 작성하는 중 예외가 발생했습니다." - -#, python-format -msgid "Exception creating vol %(name)s on pool %(pool)s." -msgstr "풀 %(pool)s에서 볼륨 %(name)s을(를) 작성하는 중 예외가 발생했습니다." - -#, python-format -msgid "" -"Exception creating volume %(name)s from source %(source)s on share %(share)s." -msgstr "" -"공유 %(share)s하는 소스 %(source)s에서 볼륨 %(name)s을(를) 작성하는 중 예외" -"가 발생했습니다." - -#, python-format -msgid "Exception details: %s" -msgstr "예외 세부 사항: %s" - -#, python-format -msgid "Exception during mounting %s" -msgstr "%s 마운트 중 예외" - -#, python-format -msgid "Exception during mounting %s." -msgstr "%s 마운트 중 예외 발생." - -msgid "Exception during mounting." -msgstr "마운트 중에 예외 발생" - -#, python-format -msgid "Exception during snapCPG revert: %s" -msgstr "snapCPG 되돌리기 중에 예외 발생: %s" - -msgid "Exception encountered: " -msgstr "예외 발생:" - -#, python-format -msgid "Exception handling resource: %s" -msgstr "자원 처리 예외: %s" - -msgid "Exception in string format operation" -msgstr "문자열 형식화 오퍼레이션의 예외" - -msgid "Exception loading extension." -msgstr "확장을 로드하는 중에 예외가 발생했습니다." - -#, python-format -msgid "Exception: %(ex)s" -msgstr "예외: %(ex)s" - -#, python-format -msgid "Exception: %s" -msgstr "예외: %s" - -#, python-format -msgid "Exception: %s." -msgstr "예외: %s." - -#, python-format -msgid "Exec of \"rm\" command on backing file for %s was unsuccessful." -msgstr "%s 의 지원 파일에서 \"rm\" 명령을 실행하는 데 실패했습니다." - -#, python-format -msgid "Exists snapshot notification failed: %s" -msgstr "스냅샷 존재 알림 실패: %s" - -#, python-format -msgid "Exists volume notification failed: %s" -msgstr "볼륨 존재 알림 실패: %s" - -msgid "Extend volume failed." -msgstr "볼륨 확장에 실패했습니다." - -#, python-format -msgid "Extension of volume %s failed." -msgstr "볼륨 %s 확장에 실패했습니다." - -msgid "" -"Extra spec replication:mode must be set and must be either 'sync' or " -"'periodic'." -msgstr "" -"추가 사양 replication:mode를 설정해야 하며 'sync' 또는 'periodic'이어야 합니" -"다." - -msgid "" -"Extra spec replication:sync_period must be greater than 299 and less than " -"31622401 seconds." -msgstr "" -"추가 사양 replication:sync_period는 299 이상 31622401초 미만이어야 합니다." - -#, python-format -msgid "Extra specs must be specified as capabilities:%s=' True'." -msgstr "추가 사양은 capabilities:%s=' True'로 지정해야 합니다" - -msgid "" -"Extra specs must be specified as replication_type=' sync' or ' " -"async'." -msgstr "" -"추가 사양은 replication_type=' sync' 또는 ' async'로 지정해야 합니다." - -msgid "FAST is not supported on this array." -msgstr "이 배열에서는 FAST가 지원되지 않습니다." - -#, python-format -msgid "Failed collecting fcns database info for fabric %s" -msgstr "패브릭 %s의 fcns 데이터베이스 정보 수집 실패" - -#, python-format -msgid "Failed collecting name server info from fabric %s" -msgstr "패브릭 %s에서 이름 서버 정보를 수집하는 데 실패" - -#, python-format -msgid "Failed collecting nsshow info for fabric %s" -msgstr "패브릭 %s의 nsshow 정보를 수집하는 데 실패" - -msgid "Failed collecting show fcns database for fabric" -msgstr "패브릭의 표시 fcns 데이터베이스 수집 실패" - -#, python-format -msgid "Failed destroying volume entry %s" -msgstr "볼륨 항목 %s 영구 삭제 실패" - -#, python-format -msgid "Failed destroying volume entry: %s." -msgstr "볼륨 항목 영구 삭제 실패: %s" - -#, python-format -msgid "" -"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " -"glance snapshot %(snapshot_ref_id)s volume reference" -msgstr "" -"제공된 glance 스냅샷 %(snapshot_ref_id)s 볼륨 참조를 사용하여 스냅샷 " -"%(snapshot_id)s 부트 가능 플래그를 가져오는 데 실패" - -#, python-format -msgid "Failed getting active zone set from fabric %s" -msgstr "패브릭 %s에서 활성 구역 세트를 가져오는 데 실패" - -#, python-format -msgid "Failed getting zone status from fabric %s" -msgstr "패브릭 %s의 구역 상태 가져오기 실패" - -#, python-format -msgid "Failed image conversion during cache creation: %s" -msgstr "캐시 작성 중에 이미지 전환 실패: %s" - -#, python-format -msgid "" -"Failed notifying about the snapshot action %(event)s for snapshot %(snp_id)s." -msgstr "" -"스냅샷 %(snp_id)s의 스냅샷 작업 %(event)s에 대해 알리는 데 실패했습니다." - -#, python-format -msgid "" -"Failed notifying about the volume action %(event)s for volume %(volume_id)s" -msgstr "볼륨 %(volume_id)s의 볼륨 작업 %(event)s에 대해 알리는 데 실패" - -#, python-format -msgid "Failed notifying on %(topic)s payload %(payload)s" -msgstr "%(topic)s 페이로드 %(payload)s에 대한 알림 실패" - -#, python-format -msgid "" -"Failed recovery attempt to create iscsi backing lun for Volume ID:" -"%(vol_id)s: %(e)s" -msgstr "" -"볼륨 ID:%(vol_id)s의 iscsi 지원 lun을 작성하기 위한 복구 시도에 실패: %(e)s" - -#, python-format -msgid "Failed rolling back quota for %s reservations" -msgstr "%s 예약 할당량을 롤백하는 데 실패" - -#, python-format -msgid "Failed rolling back quota for %s reservations." -msgstr "%s 예약 할당량을 롤백하는 데 실패." - -#, python-format -msgid "" -"Failed setting source volume %(source_volid)s back to its initial " -"%(source_status)s status" -msgstr "" -"소스 볼륨 %(source_volid)s을(를) 초기 %(source_status)s 상태로 다시 설정하는 " -"데 실패" - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " -"volume returned to the default storage group." -msgstr "" -"빠른 정책 %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨 %(volumeName)s을" -"(를) 다시 추가하기 위한 롤백에 실패했습니다. 기본 스토리지 그룹에 리턴된 볼륨" -"을 가져오려면 sysadmin에 문의하십시오." - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " -"volume re-added manually." -msgstr "" -"빠른 정책 %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨 %(volumeName)s을" -"(를) 다시 추가하기 위한 롤백에 실패했습니다. 수동으로 다시 추가한 볼륨을 가져" -"오려면 sysadmin에 문의하십시오." - -#, python-format -msgid "" -"Failed to add %(volumeName)s to default storage group for fast policy " -"%(fastPolicyName)s." -msgstr "" -"빠른 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹에 %(volumeName)s 을" -"(를) 추가하지 못했습니다." - -#, python-format -msgid "Failed to add %s to cg." -msgstr "%s을(를) cg에 추가하는 데 실패했습니다." - -#, python-format -msgid "Failed to add device to handler %s" -msgstr "핸들러 %s에 장치를 추가하는 데 실패" - -#, python-format -msgid "Failed to add initiator iqn %s to target" -msgstr "개시자 iqn %s을(를) 대상에 추가하지 못함" - -#, python-format -msgid "Failed to add initiator to group for SCST target %s" -msgstr "SCST 대상 %s의 그룹에 개시자를 추가하는 데 실패" - -#, python-format -msgid "Failed to add lun to SCST target id:%(vol_id)s: %(e)s" -msgstr "SCST 대상 id:%(vol_id)s에 lun 추가 실패: %(e)s" - -#, python-format -msgid "Failed to add multihost-access for volume \"%s\"." -msgstr "볼륨 \"%s\"의 다중 호스트 액세스를 추가하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " -"%(tierPolicyRuleInstanceName)s." -msgstr "" -"스토리지 그룹 %(storageGroupInstanceName)s을(를) 계층 정책 규칙 " -"%(tierPolicyRuleInstanceName)s에 추가하지 못했습니다." - -#, python-format -msgid "Failed to add target(port: %s)" -msgstr "대상(포트: %s)을 추가하는 데 실패" - -msgid "Failed to apply replication:activereplay setting" -msgstr "replication:activereplay 설정을 적용하는 데 실패" - -msgid "Failed to attach source volume for copy." -msgstr "복사할 소스 볼륨을 연결하는 데 실패했습니다." - -#, python-format -msgid "Failed to attach volume %(vol)s." -msgstr "볼륨 %(vol)s에 연결하는 데 실패했습니다." - -msgid "Failed to authenticate user." -msgstr "사용자를 인증하지 못했습니다." - -#, python-format -msgid "Failed to check cluster status.(command: %s)" -msgstr "클러스터 상태를 확인하는 데 실패했습니다(명령: %s)." - -#, python-format -msgid "Failed to clone image volume %(id)s." -msgstr "이미지 볼륨 %(id)s을(를) 복제하는 데 실패." - -#, python-format -msgid "Failed to clone volume %(volume_id)s for image %(image_id)s." -msgstr "" -"이미지 %(image_id)s의 볼륨 %(volume_id)s을(를) 복제하는 데 실패했습니다." - -#, python-format -msgid "Failed to clone volume.(command: %s)" -msgstr "볼륨을 복제하는 데 실패했습니다(명령: %s)." - -#, python-format -msgid "Failed to close disk device %s" -msgstr "디스크 장치 %s 닫기 실패" - -#, python-format -msgid "" -"Failed to collect return properties for volume %(vol)s and connector " -"%(conn)s." -msgstr "" -"%(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수집하지 못했습니다." - -#, python-format -msgid "Failed to commit reservations %s" -msgstr "%s 예약을 커밋하는 데 실패" - -#, python-format -msgid "Failed to copy %(src)s to %(dest)s." -msgstr "%(src)s을(를) %(dest)s에 복사하는 데 실패했습니다." - -#, python-format -msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" -msgstr "이미지 %(image_id)s을(를) 볼륨: %(volume_id)s에 복사하는 데 실패" - -#, python-format -msgid "Failed to copy image to volume: %(volume_id)s" -msgstr "볼륨: %(volume_id)s에 이미지를 복사하는 데 실패" - -#, python-format -msgid "Failed to copy volume %(src)s to %(dest)s." -msgstr "볼륨 %(src)s을(를) %(dest)s에 복사하는 데 실패했습니다." - -#, python-format -msgid "Failed to copy volume %(vol1)s to %(vol2)s" -msgstr "볼륨 %(vol1)s을(를) %(vol2)s에 복사하는 데 실패" - -#, python-format -msgid "Failed to create %(conf)s for volume id:%(vol_id)s" -msgstr "볼륨 id:%(vol_id)s의 %(conf)s을(를) 작성하는 데 실패" - -#, python-format -msgid "Failed to create CG from CGsnapshot. Exception: %s" -msgstr "CGSnapshot에서 CG 작성 실패. 예외: %s." - -#, python-format -msgid "Failed to create CGSnapshot. Exception: %s." -msgstr "CGSnapshot 작성 실패. 예외: %s." - -msgid "" -"Failed to create SOAP client.Check san_ip, username, password and make sure " -"the array version is compatible" -msgstr "" -"SOAP 클라이언트 작성에 실패했습니다. san_ip, 사용자 이름, 비밀번호를 확인하" -"고 배열 버전이 호환되는지 확인하십시오." - -#, python-format -msgid "" -"Failed to create a first volume for storage group : %(storageGroupName)s." -msgstr "" -"스토리지 그룹: %(storageGroupName)s의 첫 번째 볼륨을 작성하는 데 실패했습니" -"다." - -#, python-format -msgid "Failed to create blkio cgroup '%(name)s'." -msgstr "blkio cgroup '%(name)s'을(를) 작성하는 데 실패했습니다." - -#, python-format -msgid "Failed to create clone of volume \"%s\"." -msgstr "볼륨 \"%s\"의 복제본을 작성하는 데 실패했습니다." - -#, python-format -msgid "Failed to create cloned volume %s." -msgstr "복제된 볼륨 %s을(를) 작성하지 못했습니다." - -#, python-format -msgid "Failed to create consistency group %(group_id)s." -msgstr "일관성 그룹 %(group_id)s을(를) 작성하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to create default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"FAST 정책: %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하지 못했습니" -"다." - -#, python-format -msgid "Failed to create group to SCST target %s" -msgstr "SCST 대상 %s에 그룹을 작성하는 데 실패" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 하드웨어 ID를 작성하지 못했습니다. " - -#, python-format -msgid "" -"Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " -"tgtd config file contains 'include %(volumes_dir)s/*'" -msgstr "" -"볼륨 ID: %(vol_id)s의 iscsi 대상을 작성하지 못했습니다. tgtd 구성 파일에 " -"'include %(volumes_dir)s/*'가 포함되었는지 확인하십시오." - -#, python-format -msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "볼륨 ID:%(vol_id)s:에 대한 iscsi 대상을 작성하지 못함: %(e)s" - -#, python-format -msgid "" -"Failed to create iscsi target for volume id:%(vol_id)s. Please verify your " -"configuration in %(volumes_dir)s'" -msgstr "" -"볼륨 id:%(vol_id)s에 대한 iscsi 대상을 작성하지 못했습니다. " -"%(volumes_dir)s'에서 구성을 확인하십시오." - -#, python-format -msgid "Failed to create iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "볼륨 id:%(vol_id)s: %(e)s에 대한 iscsi 대상을 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create iscsi target for volume id:%s" -msgstr "볼륨 id:%s에 대한 iscsi 대상을 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create iscsi target for volume id:%s." -msgstr "볼륨 id:%s에 대한 iscsi 대상을 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create manage_existing flow: %(object_type)s %(object_id)s." -msgstr "" -"manage_existing 흐르을 작성하는 데 실패: %(object_type)s %(object_id)s." - -#, python-format -msgid "Failed to create snapshot of volume \"%s\"." -msgstr "볼륨 \"%s\"에 대한 스냅샷을 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create snapshot. (command: %s)" -msgstr "스냅샷을 작성하는 데 실패했습니다(명령: %s)." - -#, python-format -msgid "Failed to create transfer record for %s" -msgstr "%s의 전송 레코드를 작성하는 데 실패" - -#, python-format -msgid "Failed to create volume \"%s\"." -msgstr "볼륨 \"%s\"을(를) 작성하지 못함 " - -#, python-format -msgid "Failed to create volume %s" -msgstr "%s 볼륨을 작성하지 못함 " - -#, python-format -msgid "Failed to create volume %s." -msgstr "볼륨 %s을(를) 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create volume from snapshot \"%s\"." -msgstr "스냅샷 \"%s\"에서 볼륨을 작성하는 데 실패했습니다." - -#, python-format -msgid "Failed to create volume. %s" -msgstr "볼륨을 작성하지 못했습니다. %s" - -#, python-format -msgid "Failed to create volume: %(name)s (%(status)s)" -msgstr "볼륨을 작성하는 데 실패: %(name)s (%(status)s)" - -#, python-format -msgid "Failed to created Cinder secure environment indicator file: %s" -msgstr "Cinder 보안 환경 표시기 파일을 작성하는 데 실패: %s" - -#, python-format -msgid "Failed to delete initiator iqn %s from target." -msgstr "대상에서 개시자 iqn %s을(를) 삭제하지 못했습니다." - -#, python-format -msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." -msgstr "볼륨 %(vol)s의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니다." - -#, python-format -msgid "Failed to delete snapshot. (command: %s)" -msgstr "스냅샷을 삭제하는 데 실패했습니다(명령: %s)." - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " -"%(exception)s." -msgstr "" -"CGSnapshot의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니다. 예외: " -"%(exception)s." - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of cgsnapshot: %(cgsnapshot_id)s. " -"Exception: %(exception)s." -msgstr "" -"cgsnapshot: %(cgsnapshot_id)s의 스냅샷 %(snap)s을(를) 삭제하는 데 실패했습니" -"다. 예외: %(exception)s." - -#, python-format -msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." -msgstr "CG의 볼륨 %(vol)s을(를) 삭제하는 데 실패했습니다. 예외: %(exception)s." - -#, python-format -msgid "Failed to delete volume \"%s\"." -msgstr "볼륨 \"%s\"을(를) 삭제하는 데 실패했습니다. " - -#, python-format -msgid "Failed to delete volume %s" -msgstr "볼륨 %s을(를) 삭제하는 데 실패했습니다." - -#, python-format -msgid "Failed to delete volume. %s" -msgstr "볼륨을 삭제하는 데 실패했습니다. %s" - -#, python-format -msgid "Failed to ensure export of volume \"%s\"." -msgstr "볼륨 \"%s\" 내보내기를 확인하는 데 실패했습니다." - -#, python-format -msgid "Failed to ensure export of volume %s" -msgstr "볼륨 %s 내보내기를 확인하는 데 실패했습니다." - -#, python-format -msgid "Failed to export fiber channel target due to %s" -msgstr "%s(으)로 인해 파이버 채널 대상을 내보내는 데 실패" - -#, python-format -msgid "Failed to extend volume: %(vol)s to size: %(size)s GB." -msgstr "볼륨 %(vol)s을(를) %(size)sGB 크기로 확장하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." -msgstr "" -"%(current_size)sGB에서 %(new_size)sGB(으)로 extend_volume %(name)s을(를) 수행" -"하는 데 실패했습니다." - -#, python-format -msgid "Failed to failover volume %(volume_id)s to %(target)s: %(error)s." -msgstr "" -"볼륨 %(volume_id)s을(를) %(target)s(으)로 장애 복구하는 데 실패: %(error)s." - -#, python-format -msgid "Failed to find %(s)s. Result %(r)s" -msgstr "%(s)s을(를) 찾지 못했습니다. 결과 %(r)s" - -msgid "Failed to get IQN!" -msgstr "IQN을 가져오는 데 실패" - -msgid "Failed to get LUN information!" -msgstr "LUN 정보를 가져오는 데 실패했습니다." - -#, python-format -msgid "Failed to get allocation information (%d)!" -msgstr "할당 정보를 가져오는 데 실패했습니다(%d)." - -#, python-format -msgid "Failed to get allocation information: %(host)s (%(status)d)!" -msgstr "할당 정보: %(host)s (%(status)d)을(를) 가져오는 데 실패했습니다." - -#, python-format -msgid "Failed to get device number for throttling: %(error)s" -msgstr "조절을 위해 장치 번호를 가져오는 데 실패: %(error)s" - -#, python-format -msgid "" -"Failed to get driver initiator data for initiator %(initiator)s and " -"namespace %(namespace)s" -msgstr "" -"개시자 %(initiator)s 및 네임스페이스 %(namespace)s의 드라이버 개시자 데이터" -"를 가져오는 데 실패" - -#, python-format -msgid "Failed to get fiber channel info from storage due to %(stat)s" -msgstr "%(stat)s(으)로 인해 스토리지에서 파이버 채널 정보를 가져오는 데 실패" - -#, python-format -msgid "Failed to get fiber channel target from storage server due to %(stat)s" -msgstr "" -"%(stat)s(으)로 인해 스토리지 서버에서 파이버 채널 대상을 가져오는 데 실패" - -#, python-format -msgid "Failed to get or create storage group %(storageGroupName)s." -msgstr "" -"스토리지 그룹 %(storageGroupName)s을(를) 가져오거나 작성하지 못했습니다." - -#, python-format -msgid "Failed to get response: %s." -msgstr "응답 가져오기 실패: %s." - -#, python-format -msgid "Failed to get server info due to %(state)s." -msgstr "%(state)s(으)로 인해 서버 정보를 가져오는 데 실패했습니다." - -msgid "Failed to get sns table" -msgstr "sns 테이블 가져오기 실패" - -#, python-format -msgid "Failed to get target wwpns from storage due to %(stat)s" -msgstr "%(stat)s(으)로 인해 스토리지에서 대상 wwpns를 가져오는 데 실패" - -msgid "Failed to get updated stats from Datera Cluster." -msgstr "Datera 클러스터에서 업데이트된 통계를 가져오는 데 실패했습니다." - -msgid "Failed to get updated stats from Datera cluster." -msgstr "Datera 클러스터에서 업데이트된 통계를 가져오는 데 실패했습니다." - -#, python-format -msgid "Failed to get volume status. %s" -msgstr "볼륨 상태를 가져오는 데 실패했습니다. %s" - -msgid "Failed to initialize connection" -msgstr "연결 초기화 실패" - -#, python-format -msgid "Failed to initialize connection to volume \"%s\"." -msgstr "볼륨 \"%s\"에 대한 연결을 초기화하는 데 실패했습니다." - -msgid "Failed to initialize connection." -msgstr "연결을 초기화하는 데 실패했습니다." - -msgid "Failed to initialize driver." -msgstr "드라이버를 초기화하는 데 실패했습니다." - -#, python-format -msgid "Failed to issue df command for path %(path)s, error: %(error)s." -msgstr "경로 %(path)s에서 df 명령 실행 실패, 오류: %(error)s." - -#, python-format -msgid "Failed to issue mmgetstate command, error: %s." -msgstr "mmgetstate 명령 실행 실패, 오류: %s." - -#, python-format -msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." -msgstr "경로 %(path)s에서 mmlsattr 명령 실행 실패, 오류: %(error)s." - -#, python-format -msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" -msgstr "경로 %(path)s에서 mmlsattr 명령 실행 실패, 오류: %(error)s." - -#, python-format -msgid "Failed to issue mmlsconfig command, error: %s." -msgstr "mmlsconfig 명령 실행 실패, 오류: %s." - -#, python-format -msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." -msgstr "경로 %(path)s에서 mmlsfs 명령 실행 실패, 오류: %(error)s." - -#, python-format -msgid "Failed to issue mmlsfs command, error: %s." -msgstr "mmlsfs 명령 실행 실패, 오류: %s." - -#, python-format -msgid "Failed to open iet session list for %s" -msgstr "%s의 iet 세션 목록을 여는 데 실패" - -#, python-format -msgid "Failed to open volume from %(path)s." -msgstr "%(path)s에서 볼륨을 여는 데 실패했습니다." - -msgid "Failed to perform replication failover" -msgstr "복제 장애 복구 수행 실패" - -#, python-format -msgid "Failed to present volume %(name)s (%(status)d)!" -msgstr "볼륨 %(name)s (%(status)d)을(를) 표시하는 데 실패했습니다." - -msgid "Failed to re-export volume, setting to ERROR." -msgstr "볼륨을 다시 내보내는 데 실패했습니다. ERROR로 설정합니다." - -#, python-format -msgid "Failed to register image volume location %(uri)s." -msgstr "이미지 볼륨 위치 %(uri)s을(를) 등록하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to remove %(volumeName)s from the default storage group for the FAST " -"Policy." -msgstr "" -"FAST 정책에 대한 기본 스토리지 그룹에서 %(volumeName)s 볼륨을 제거하지 못했습" -"니다." - -#, python-format -msgid "Failed to remove %s from cg." -msgstr "cg에서 %s을(를) 제거하는 데 실패했습니다." - -#, python-format -msgid "Failed to remove LUN %s" -msgstr "LUN %s 제거 실패" - -#, python-format -msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "볼륨 ID:%(vol_id)s:에 대한 iscsi 대상을 제거하지 못함: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "볼륨 id:%(vol_id)s: %(e)s에 대한 iscsi 대상을 제거하지 못했습니다. " - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s" -msgstr "볼륨 id:%s에 대한 iscsi 대상을 제거하지 못했습니다. " - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s." -msgstr "볼륨 id:%s에 대한 iscsi 대상을 제거하지 못했습니다. " - -#, python-format -msgid "Failed to rename %(new_volume)s into %(volume)s." -msgstr "%(new_volume)s의 이름을 %(volume)s(으)로 바꾸지 못했습니다." - -msgid "Failed to rename the created snapshot, reverting." -msgstr "작성된 스냅샷의 이름을 바꾸는 데 실패, 되돌리는 중입니다." - -#, python-format -msgid "Failed to rename volume %(existing)s to %(new)s. Volume manage failed." -msgstr "" -"볼륨 %(existing)s의 이름을 %(new)s(으)로 변경하지 못했습니다. 볼륨 관리에 실" -"패했습니다." - -#, python-format -msgid "" -"Failed to rename volume %(existing)s to %(new)s. Volume unmanage failed." -msgstr "" -"볼륨 %(existing)s의 이름을 %(new)s(으)로 변경하는 데 실패했습니다. 볼륨 관리 " -"취소에 실패했습니다." - -#, python-format -msgid "Failed to request async delete of migration source vol %(vol)s: %(err)s" -msgstr "" -"마이그레이션 소스 볼륨 %(vol)s의 비동기 삭제를 요청하는 데 실패: %(err)s" - -#, python-format -msgid "" -"Failed to resize vdi. Shrinking vdi not supported. vdi: %(vdiname)s new " -"size: %(size)s" -msgstr "" -"vdi의 크기를 조정하는 데 실패했습니다. vdi 축소는 지원되지 않습니다. vdi: " -"%(vdiname)s 새 크기: %(size)s" - -#, python-format -msgid "" -"Failed to resize vdi. Too large volume size. vdi: %(vdiname)s new size: " -"%(size)s" -msgstr "" -"vdi의 크기를 조정하는 데 실패했습니다. 볼륨 크기가 너무 큽니다. vdi: " -"%(vdiname)s 새 크기: %(size)s" - -#, python-format -msgid "Failed to resize vdi. vdi not found. %s" -msgstr "vdi의 크기를 조정하는 데 실패했습니다. vdi를 찾을 수 없습니다. %s" - -#, python-format -msgid "Failed to resize vdi. vdi: %(vdiname)s new size: %(size)s" -msgstr "" -"vdi의 크기를 조정하는 데 실패했습니다. vdi: %(vdiname)s 새 크기: %(size)s" - -#, python-format -msgid "Failed to resize volume %(volume_id)s, error: %(error)s." -msgstr "볼륨 %(volume_id)s 크기 조정 실패, 오류: %(error)s." - -#, python-format -msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" -msgstr "%s get_by_account!에서 SolidFire-ID 볼륨을 검색하지 못했습니다. " - -#, python-format -msgid "" -"Failed to return volume %(volumeName)s to original storage pool. Please " -"contact your system administrator to return it to the correct location." -msgstr "" -"볼륨 %(volumeName)s을(를) 원래 스토리지 풀로 리턴하는 데 실패했습니다. 시스" -"템 관리자에게 문의하여 올바른 위치로 리턴하십시오." - -#, python-format -msgid "Failed to roll back reservations %s" -msgstr "%s 예약을 철회하는 데 실패" - -#, python-format -msgid "Failed to run task %(name)s: %(cause)s" -msgstr "작업 %(name)s 실행 실패: %(cause)s" - -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "schedule_%(method)s 실패: %(ex)s" - -#, python-format -msgid "Failed to send request: %s." -msgstr "요청 보내기 실패: %s." - -#, python-format -msgid "Failed to set 'enable' attribute for SCST target %s" -msgstr "SCST 대상 %s의 'enable' 속성을 설정하는 데 실패" - -#, python-format -msgid "Failed to set attribute for enable target driver %s" -msgstr "대상 드라이버 %s을(를) 활성화하는 속성을 설정하는 데 실패" - -#, python-format -msgid "Failed to set properties for volume %(existing)s. Volume manage failed." -msgstr "" -"볼륨 %(existing)s의 특성을 설정하지 못했습니다. 볼륨 관리에 실패했습니다." - -#, python-format -msgid "" -"Failed to set properties for volume %(existing)s. Volume unmanage failed." -msgstr "" -"볼륨 %(existing)s의 특성을 설정하지 못했습니다. 볼륨 관리 취소에 실패했습니" -"다." - -msgid "Failed to setup the Dell EqualLogic driver." -msgstr "Dell EqualLogic 드라이버를 설정하는 데 실패했습니다." - -msgid "Failed to shutdown horcm." -msgstr "horcm을 시스템 종료하는 데 실패했습니다." - -#, python-format -msgid "Failed to snap Consistency Group %s" -msgstr "일관성 그룹 %s을(를) 맞추는 데 실패" - -msgid "Failed to start horcm." -msgstr "horcm을 시작하지 못했습니다." - -msgid "Failed to terminate connection" -msgstr "연결 종료 실패" - -#, python-format -msgid "Failed to terminate connection %(initiator)s %(vol)s" -msgstr "연결 %(initiator)s %(vol)s을(를) 종료하는 데 실패" - -#, python-format -msgid "Failed to terminate connection to volume \"%s\"." -msgstr "볼륨 \"%s\"에 대한 연결을 종료하는 데 실패했습니다." - -#, python-format -msgid "Failed to umount %(share)s, reason=%(stderr)s" -msgstr "%(share)s을(를) umount하는 데 실패, 이유=%(stderr)s" - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target" -msgstr "" -"iscsi 대상을 제거한 후 볼륨 id %(vol_id)s의 %(conf)s을(를) 업데이트하는 데 실" -"패" - -#, python-format -msgid "Failed to update %(conf)s for volume id:%(vol_id)s" -msgstr "볼륨 id:%(vol_id)s의 %(conf)s을(를) 업데이트하는 데 실패" - -#, python-format -msgid "" -"Failed to update %(volume_id)s metadata using the provided snapshot " -"%(snapshot_id)s metadata." -msgstr "" -"제공된 스냅샷 %(snapshot_id)s 메타데이터를 사용하여 %(volume_id)s 메타데이터" -"를 업데이트하는 데 실패했습니다." - -#, python-format -msgid "Failed to update quota donating volume transfer id %s" -msgstr "볼륨 전송 id %s를 제공하는 할당량을 업데이트하는 데 실패" - -#, python-format -msgid "Failed to update quota for consistency group %s." -msgstr "일관성 그룹 %s의 할당량을 업데이트하는 데 실패했습니다." - -#, python-format -msgid "Failed to update quota for deleting volume: %s" -msgstr "볼륨 삭제를 위해 할당량을 업데이트하는 데 실패: %s" - -#, python-format -msgid "Failed to update quota while deleting snapshots: %s" -msgstr "스냅샷을 삭제하는 중에 할당량 업데이트 실패: %s" - -msgid "Failed to update quota while deleting volume." -msgstr "볼륨을 삭제하는 동안 할당량을 업데이트하는 데 실패했습니다." - -msgid "Failed to update replay profiles" -msgstr "재생 프로파일을 업데이트하는 데 실패" - -msgid "Failed to update storage profile" -msgstr "스토리지 프로파일을 업데이트하는 데 실패" - -msgid "Failed to update usages deleting backup" -msgstr "백업을 삭제하는 중 사용법을 업데이트하지 못함" - -msgid "Failed to update usages deleting snapshot" -msgstr "스냅샷을 삭제하는 중 사용법을 업데이트하지 못함" - -msgid "Failed to update usages deleting volume." -msgstr "볼륨을 삭제하는 중 사용법을 업데이트하지 못했습니다." - -#, python-format -msgid "Failed to update volume status: %s" -msgstr "볼륨 상태를 업데이트하는 데 실패: %s" - -#, python-format -msgid "" -"Failed to verify that volume was added to storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST 정책의 스토리지 그룹에 볼륨이 추가되었는지 확인하는 데 실패: " -"%(fastPolicyName)s." - -msgid "Failed to write in /etc/scst.conf." -msgstr "/etc/scst.conf에 쓰지 못했습니다." - -#, python-format -msgid "Failed to write persistence file: %(path)s." -msgstr "영구 파일을 쓰는 데 실패: %(path)s." - -#, python-format -msgid "" -"Failed updating %(snapshot_id)s metadata using the provided volumes " -"%(volume_id)s metadata" -msgstr "" -"제공된 %(volume_id)s 메타데이터를 사용하여 %(snapshot_id)s 메타데이터를 업데" -"이트하는 데 실패" - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with creation provided " -"model %(model)s." -msgstr "" -"스냅샷 %(snapshot_id)s의 모델을 작성 제공 모델 %(model)s(으)로 업데이트하는 " -"데 실패했습니다." - -#, python-format -msgid "" -"Failed updating model of snapshot %(snapshot_id)s with driver provided model " -"%(model)s." -msgstr "" -"스냅샷 %(snapshot_id)s의 모델을 드라이버 제공 모델 %(model)s(으)로 업데이트하" -"는 데 실패했습니다." - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with creation provided model " -"%(model)s" -msgstr "" -"볼륨 %(volume_id)s의 모델을 작성 제공 모델 %(model)s(으)로 업데이트하는 데 실" -"패" - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with driver provided model " -"%(model)s" -msgstr "" -"볼륨 %(volume_id)s의 모델을 드라이버 제공 모델 %(model)s(으)로 업데이트하는 " -"데 실패" - -#, python-format -msgid "Failed updating snapshot %(snapshot_id)s with %(update)s." -msgstr "스냅샷 %(snapshot_id)s을(를) %(update)s(으)로 업데이트하는 데 실패." - -#, python-format -msgid "" -"Failed updating snapshot metadata using the provided volumes %(volume_id)s " -"metadata" -msgstr "" -"제공된 %(volume_id)s 메타데이터를 사용하여 스냅샷 메타데이터를 업데이트하는 " -"데 실패" - -#, python-format -msgid "Failed updating volume %(volume_id)s bootable flag to true" -msgstr "볼륨 %(volume_id)s 부트 가능 플래그를 true로 업데이트하는 데 실패" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(update)s" -msgstr "볼륨 %(volume_id)s을(를) %(update)s(으)로 업데이트하는 데 실패" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(updates)s" -msgstr "볼륨 %(volume_id)s을(를) %(updates)s(으)로 업데이트하는 데 실패" - -#, python-format -msgid "Failure deleting staged tmp LUN %s." -msgstr "스테이징된 임시 LUN %s 삭제 실패." - -#, python-format -msgid "Failure restarting snap vol. Error: %s." -msgstr "스냅 볼륨을 다시 시작하는 데 실패. 오류: %s." - -msgid "Fetch volume pool name failed." -msgstr "볼륨 풀 이름을 가져오는 데 실패했습니다." - -#, python-format -msgid "" -"FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " -"HBA state is Online." -msgstr "" -"FibreChannelDriver validate_connector가 실패했습니다. '%(setting)s'가 없습니" -"다. HBA 상태가 온라인인지 확인하십시오." - -#, python-format -msgid "Flexvisor failed to get event %(volume)s (%(status)s)." -msgstr "" -"Flexviso에서 이벤트 %(volume)s (%(status)s)을(를) 가져오는 데 실패했습니다." - -#, python-format -msgid "Flexvisor failed to get pool %(id)s info." -msgstr "Flexvisor에서 풀 %(id)s 정보를 가져오는 데 실패했습니다." - -#, python-format -msgid "Flexvisor failed to get pool list due to %s." -msgstr "%s(으)로 인해 Flexvisor에서 풀 목록을 가져오는 데 실패했습니다." - -#, python-format -msgid "Flexvisor failed to get pool list.(Error: %d)" -msgstr "Flexvisor에서 풀 목록을 가져오는 데 실패했습니다(오류: %d)" - -#, python-format -msgid "Found %(count)s volumes mapped to id: %(uuid)s." -msgstr "id: %(uuid)s에 맵핑된 %(count)s개의 볼륨을 찾을 수 없습니다. " - -msgid "Free capacity not set: volume node info collection broken." -msgstr "여유 공간을 설정하지 않음: 볼륨 노드 정보 콜렉션이 중단되었습니다. " - -#, python-format -msgid "GPFS is not active. Detailed output: %s." -msgstr "GPFS가 활성이 아닙니다. 자세한 결과: %s." - -msgid "Get LUN migration error." -msgstr "LUN 마이그레이션 가져오기 오류." - -msgid "Get method error." -msgstr "메소드 가져오기 오류." - -#, python-format -msgid "Host PUT failed (%s)." -msgstr "호스트 PUT 실패(%s)." - -msgid "Host could not be found!" -msgstr "호스트를 찾을 수 없습니다." - -#, python-format -msgid "ISCSI discovery attempt failed for:%s" -msgstr "%s에 대한 ISCSI 검색 시도 실패" - -msgid "ISE FW version is not compatible with OpenStack!" -msgstr "ISE FW 버전이 OpenStack과 호환되지 않습니다." - -msgid "ISE globalid not set!" -msgstr "ISE globalid가 설정되지 않습니다." - -#, python-format -msgid "Image size %(img_size)dGB is larger than volume size %(vol_size)dGB." -msgstr "이미지 크기 %(img_size)dGB가 볼륨 크기 %(vol_size)dGB보다 큽니다." - -#, python-format -msgid "Invalid API object: %s" -msgstr "올바르지 않은 API 오브젝트: %s" - -#, python-format -msgid "Invalid JSON: %s" -msgstr "올바르지 않은 JSON: %s" - -#, python-format -msgid "Invalid ReplayList return: %s" -msgstr "올바르지 않은 ReplayList 리턴: %s" - -#, python-format -msgid "Invalid hostname %(host)s" -msgstr "올바르지 않은 호스트 이름 %(host)s" - -msgid "Invalid replication target specified for failover" -msgstr "장애 복구를 위해 올바르지 않은 복제 대상이 지정됨" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." - -#, python-format -msgid "JSON encode params %(param)s error: %(status)s." -msgstr "JSON 인코드 매개 변수 %(param)s 오류: %(status)s." - -#, python-format -msgid "JSON transfer data error. %s." -msgstr "JSON 전송 데이터 오류. %s." - -#, python-format -msgid "JSON transfer error: %s." -msgstr "JSON 전송 오류: %s." - -#, python-format -msgid "LUN %(path)s geometry failed. Message - %(msg)s" -msgstr "LUN %(path)s 지오메트리에 실패했습니다. 메시지 - %(msg)s" - -msgid "LUN extend failed!" -msgstr "LUN 확장 실패" - -msgid "LUN unexport failed!" -msgstr "LUN 내보내기 취소 실패" - -#, python-format -msgid "" -"Location info needed for backend enabled volume migration not in correct " -"format: %s. Continuing with generic volume migration." -msgstr "" -"백엔드 사용 볼륨 마이그레이션에 필요한 위치 정보가 올바른 형식: %s이(가) 아닙" -"니다. 일반 볼륨 마이그레이션을 계속합니다." - -msgid "" -"Logging into the Datera cluster failed. Please check your username and " -"password set in the cinder.conf and start the cinder-volume service again." -msgstr "" -"Datera 클러스터에 로그인하는 데 실패했습니다. cinder.conf에 설정된 사용자 이" -"름과 비밀번호를 확인하고 cinder-volume 서비스를 다시 시작하십시오." - -#, python-format -msgid "" -"Login error. URL: %(url)s\n" -"Reason: %(reason)s." -msgstr "" -"로그인 오류. URL: %(url)s\n" -"이유: %(reason)s." - -#, python-format -msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." -msgstr "마스킹 보기: %(maskingViewName)s이(가) 최근에 삭제된 것으로 보입니다." - -#, python-format -msgid "Lun %s has dependent snapshots, skipping lun deletion." -msgstr "Lun %s에 종속 스냅샷이 있으므로 lun 삭제를 건너뜁니다." - -#, python-format -msgid "Lun create for %s failed!" -msgstr "%s의 Lun 작성 실패" - -#, python-format -msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "볼륨 %(vol)s 스냅샷 %(snap)s의 Lun 작성 스냅샷 실패" - -#, python-format -msgid "Lun delete for %s failed!" -msgstr "%s의 Lun 삭제 실패" - -msgid "Lun mapping returned null!" -msgstr "Lun 맵핑에서 널(null)을 리턴했습니다!" - -#, python-format -msgid "MSGID%(id)04d-E: %(msg)s" -msgstr "MSGID%(id)04d-E: %(msg)s" - -msgid "Manage exist volume failed." -msgstr "기존 볼륨 관리에 실패했습니다." - -#, python-format -msgid "" -"Manager for service %(binary)s %(host)s is reporting problems, not sending " -"heartbeat. Service will appear \"down\"." -msgstr "" -"서비스 %(binary)s %(host)s의 관리자가 하트비트를 보내지 않고 문제점을 보고합" -"니다. 서비스가 \"작동 중지\"로 표시됩니다." - -#, python-format -msgid "" -"Masking View creation or retrieval was not successful for masking view " -"%(maskingViewName)s. Attempting rollback." -msgstr "" -"마스킹 보기 %(maskingViewName)s의 마스킹 보기 작성 또는 검색에 성공하지 못했" -"습니다. 롤백을 시도합니다." - -#, python-format -msgid "" -"Max retries reached deleting backup %(basename)s image of volume %(volume)s." -msgstr "" -"볼륨 %(volume)s의 백업 %(basename)s 이미지를 삭제하도록 허용된 최대 재시도 수" -"에 도달했습니다." - -#, python-format -msgid "Message: %s" -msgstr "메시지: %s" - -msgid "Model update failed." -msgstr "모델 업데이트에 실패했습니다." - -#, python-format -msgid "Modify volume PUT failed: %(name)s (%(status)d)." -msgstr "수정 볼륨 PUT 실패: %(name)s (%(status)d)." - -#, python-format -msgid "Mount failure for %(share)s after %(count)d attempts." -msgstr "%(count)d번의 시도 후에 %(share)s의 마운트 실패." - -#, python-format -msgid "Mount failure for %(share)s." -msgstr "%(share)s의 마운트 실패." - -#, python-format -msgid "Multiple replay profiles under name %s" -msgstr "%s(이)라는 이름의 여러 재생 프로파일" - -msgid "No CLI output for firmware version check" -msgstr "펌웨어 버전 확인을 위한 CLI 출력이 없음" - -#, python-format -msgid "No VIP configured for service %s" -msgstr "서비스 %s의 VIP가 구성되지 않음" - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." -msgstr "" -"작업이 필요하지 않습니다. 볼륨: %(volumeName)s이(가) 이미 풀: %(pool)s의 일부" -"가 아닙니다." - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of slo/workload " -"combination: %(targetCombination)s." -msgstr "" -"작업이 필요하지 않습니다. 볼륨: %(volumeName)s이(가) 이미 slo/워크로드 조합: " -"%(targetCombination)s의 일부입니다." - -#, python-format -msgid "" -"No snapshots found in database, but %(path)s has backing file " -"%(backing_file)s!" -msgstr "" -"데이터베이스에서 스냅샷을 찾을 수 없지만t %(path)s에 지원 파일 " -"%(backing_file)s이(가) 있습니다." - -#, python-format -msgid "Not able to configure PBM for vCenter server: %s" -msgstr "vCenter 서버의 PBM을 구성할 수 없음: %s" - -#, python-format -msgid "OSError: command is %(cmd)s." -msgstr "OSError: 명령이 %(cmd)s입니다." - -#, python-format -msgid "OSError: command is %s." -msgstr "OSError: 명령이 %s입니다." - -#, python-format -msgid "" -"One of the components of the original masking view %(maskingViewName)s " -"cannot be retrieved so please contact your system administrator to check " -"that the correct initiator(s) are part of masking." -msgstr "" -"원래 마스킹 보기 %(maskingViewName)s의 구성 요소 중 하나를 검색할 수 없으므" -"로, 올바른 개시자가 마스킹의 일부인지 확인하도록 시스템 관리자에게 요청하십시" -"오." - -#, python-format -msgid "" -"Only SLO/workload migration within the same SRP Pool is supported in this " -"version The source pool : %(sourcePoolName)s does not match the target " -"array: %(targetPoolName)s. Skipping storage-assisted migration." -msgstr "" -"이 버전에서는 동일한 SRP 풀의 SLO/워크로드 마이그레이션만 지원됩니다. 소스 " -"풀: %(sourcePoolName)s이(가) 대상 배열: %(targetPoolName)s과(와) 일치하지 않" -"습니다. 스토리지 지원 마이그레이션을 건너뜁니다." - -msgid "Only available volumes can be migrated between different protocols." -msgstr "서로 다른 프로토콜 간에 사용 가능한 볼륨만 마이그레이션할 수 있습니다." - -#, python-format -msgid "POST for host create failed (%s)!" -msgstr "호스트의 POST 작성에 실패했습니다(%s)." - -#, python-format -msgid "Pipe1 failed - %s " -msgstr "Pipe1 실패 - %s " - -#, python-format -msgid "Pipe2 failed - %s " -msgstr "Pipe2 실패 - %s " - -msgid "" -"Please check your xml for format or syntax errors. Please see documentation " -"for more details." -msgstr "" -"xml에서 형식 또는 구문 오류를 확인하십시오. 자세한 내용은 문서를 참조하십시" -"오." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName은 파일 %(fileName)s에 있어야 합니다. " - -#, python-format -msgid "Prepare clone failed for %s." -msgstr "%s의 복제본 준비에 실패했습니다." - -msgid "Primary IP must be set!" -msgstr "기본 IP를 설정해야 합니다!" - -msgid "Problem cleaning incomplete backup operations." -msgstr "불완전한 백업 조작 정리 문제." - -#, python-format -msgid "Problem cleaning temp volumes and snapshots for backup %(bkup)s." -msgstr "백업 %(bkup)s의 임시 볼륨 및 스냅샷 정리 문제." - -#, python-format -msgid "Problem cleaning up backup %(bkup)s." -msgstr "백업 %(bkup)s 정리 문제." - -#, python-format -msgid "" -"Purity host %(host_name)s is managed by Cinder but CHAP credentials could " -"not be retrieved from the Cinder database." -msgstr "" -"Cinder에서 Purity 호스트 %(host_name)s을(를) 관리하지만 Cinder 데이터베이스에" -"서 CHAP 자격 증명을 검색할 수 없습니다." - -#, python-format -msgid "" -"Purity host %(host_name)s is not managed by Cinder and can't have CHAP " -"credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." -msgstr "" -"Cinder에서 Purity 호스트 %(host_name)s을(를) 관리하지 않으므로 CHAP 자격 증명" -"을 수정할 수 없습니다. 이 문제를 해결하려면 호스트에서 IQN %(iqn)s을(를) 제거" -"하십시오." - -#, python-format -msgid "Qemu-img is not installed. OSError: command is %(cmd)s." -msgstr "Qemu-img가 설치되지 않았습니다. OSError: 명령이 %(cmd)s입니다." - -#, python-format -msgid "" -"Quota exceeded for %(s_pid)s, tried to extend volume by %(s_size)sG, " -"(%(d_consumed)dG of %(d_quota)dG already consumed)." -msgstr "" -"%(s_pid)s에 대한 할당량을 초과하여 %(s_size)sG만큼 볼륨을 확장하려고 시도, " -"(%(d_quota)dG 중 %(d_consumed)dG는 이미 사용됨)" - -#, python-format -msgid "REST Not Available: %s" -msgstr "REST를 사용할 수 없음: %s" - -#, python-format -msgid "Re-throwing Exception %s" -msgstr "%s 예외가 다시 발생" - -#, python-format -msgid "Read response raised an exception: %s." -msgstr "읽기 응답에서 예외 발생: %s." - -msgid "Recovered model server connection!" -msgstr "모델 서버 연결을 복구했습니다!" - -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "실패한 실행에서 복구 중입니다. 번호 %s 시도" - -msgid "Replication must be specified as ' True' or ' False'." -msgstr "복제는 ' True'나 ' False'로 지정되어야 합니다." - -msgid "" -"Requested to setup thin provisioning, however current LVM version does not " -"support it." -msgstr "" -"씬 프로비저닝을 설정하도록 요청했지만 현재 LVM 버전에서 지원하지 않습니다." - -#, python-format -msgid "Resizing %s failed. Cleaning volume." -msgstr "%s 크기 조정에 실패했습니다. 볼륨을 정리합니다." - -#, python-format -msgid "Restore to volume %(volume)s finished with error - %(error)s." -msgstr "" -"볼륨 %(volume)s(으)로 복원이 완료되었지만 %(error)s 오류가 발생했습니다." - -#, python-format -msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" -msgstr "%(retry)s번 재시도: %(method)s 실패 %(rc)s: %(reason)s" - -#, python-format -msgid "Retype unable to find volume %s." -msgstr "재입력해도 볼륨 %s을(를) 찾을 수 없습니다." - -msgid "Retype volume error." -msgstr "볼륨 다시 입력 오류." - -msgid "Retype volume error. Create replication failed." -msgstr "" -"볼륨을 다시 입력하는 중에 오류가 발생했습니다. 복제 작성에 실패했습니다." - -msgid "Retype volume error. Delete replication failed." -msgstr "" -"볼륨을 다시 입력하는 중에 오류가 발생했습니다. 복제 삭제에 실패했습니다." - -#, python-format -msgid "" -"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " -"Diamond, Optimized, NONE." -msgstr "" -"SLO: %(slo)s이(가) 올바르지 않습니다. 올바른 값은 Bronze, Silver, Gold, " -"Platinum, Diamond, Optimized, NONE입니다." - -msgid "" -"ScVolume returned success with empty payload. Attempting to locate volume" -msgstr "" -"ScVolume에서 빈 페이로드와 함께 성공을 리턴했습니다. 볼륨을 찾으려고 시도합니" -"다." - -#, python-format -msgid "Server Busy retry request: %s" -msgstr "서버 사용 중 재시도 요청: %s" - -msgid "Service not found for updating replication_status." -msgstr "replication_status를 업데이트하는 서비스를 찾을 수 없습니다." - -#, python-format -msgid "Setting QoS for %s failed" -msgstr "%s의 QoS 설정 실패" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"올바르지 않은 형식으로 인해 공유 %s이(가) 무시됩니다. format. address:/" -"export 형식이어야 합니다." - -#, python-format -msgid "Sheepdog is not installed. OSError: command is %s." -msgstr "Sheepdog가 설치되지 않았습니다. OSError: 명령이 %s입니다." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target ispresently exported for volume: %s" -msgstr "" -"remove_export를 건너뜁니다. 현재 %s 볼륨에 대한 iscsi_target을 내보내지 않았" -"습니다. " - -#, python-format -msgid "Snapshot \"%s\" already exists." -msgstr "스냅샷 \"%s\"이(가) 이미 있습니다." - -#, python-format -msgid "" -"Snapshot \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"스냅샷 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시오." - -#, python-format -msgid "Snapshot %(snapshotname)s not found on the array. No volume to delete." -msgstr "" -"배열에서 스냅샷 %(snapshotname)s을(를) 찾을 수 없습니다. 삭제할 볼륨이 없습니" -"다. " - -#, python-format -msgid "Snapshot %s: create failed" -msgstr "스냅샷 %s: 작성 실패" - -#, python-format -msgid "Snapshot %s: has clones" -msgstr "스냅샷 %s에 복제본이 있음" - -msgid "Snapshot did not exist. It will not be deleted" -msgstr "스냅샷이 없으므로 삭제되지 않음" - -#, python-format -msgid "" -"Source CG %(source_cg)s not found when creating consistency group %(cg)s " -"from source." -msgstr "" -"소스에서 일관성 그룹 %(cg)s을(를) 작성할 때 소스 CG CG %(source_cg)s을(를) " -"찾을 수 없습니다." - -#, python-format -msgid "Source snapshot %(snapshot_id)s cannot be found." -msgstr "소스 스냅샷 %(snapshot_id)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "Source snapshot cannot be found for target volume %(volume_id)s." -msgstr "대상 볼륨 %(volume_id)s의 소스 스냅샷을 찾을 수 없습니다." - -#, python-format -msgid "Source volume %s not ready!" -msgstr "소스 볼륨 %s이(가) 준비되지 않았습니다." - -#, python-format -msgid "Source volumes cannot be found for target volume %(volume_id)s." -msgstr "대상 볼륨 %(volume_id)s의 소스 볼륨을 찾을 수 없습니다." - -#, python-format -msgid "" -"Src Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"소스 볼륨 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시" -"오." - -#, python-format -msgid "Start synchronization failed. Error: %s." -msgstr "동기화 시작 실패. 오류: %s." - -#, python-format -msgid "StdErr :%s" -msgstr "StdErr :%s" - -#, python-format -msgid "StdOut :%s" -msgstr "StdOut: %s" - -#, python-format -msgid "Storage Profile %s was not found." -msgstr "스토리지 프로파일 %s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "Storage profile: %s cannot be found in vCenter." -msgstr "스토리지 프로파일: vCenter에서 %s을(를) 찾을 수 없습니다." - -#, python-format -msgid "TSM [%s] not found in CloudByte storage." -msgstr "CloudByte 스토리지에서 TSM [%s]을(를) 찾을 수 없습니다." - -msgid "The Flexvisor service is unavailable." -msgstr "Flexvisor 서비스를 사용할 수 없습니다." - -#, python-format -msgid "The NFS Volume %(cr)s does not exist." -msgstr "NFS 볼륨 %(cr)s이(가) 없습니다." - -msgid "The connector does not contain the required information." -msgstr "커넥터에 필수 정보가 없습니다." - -msgid "" -"The connector does not contain the required information: initiator is missing" -msgstr "커넥터에 필수 정보가 없음: 개시자가 누락되어 있음" - -msgid "" -"The connector does not contain the required information: wwpns is missing" -msgstr "커넥터에 필수 정보가 없음: wwpns가 누락되어 있음" - -#, python-format -msgid "" -"The primary array must have an API version of %(min_ver)s or higher, but is " -"only on %(current_ver)s, therefore replication is not supported." -msgstr "" -"기본 배열에 %(min_ver)s 이상의 API 버전이 있어야 하지만, %(current_ver)s만 있" -"으므로 복제가 지원되지 않습니다." - -#, python-format -msgid "" -"The replication mode of %(type)s has not successfully established " -"partnership with the replica Storwize target %(stor)s." -msgstr "" -"%(type)s의 복제 모드가 복제본 Storwize 대상 %(stor)s과(와) 성공적으로 파트너" -"십을 맺을 수 없습니다." - -msgid "The snapshot cannot be deleted because it is a clone point." -msgstr "스냅샷이 복제 지점이므로 삭제할 수 없습니다." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s skipping storage-assisted migration." -msgstr "" -"소스 배열: %(sourceArraySerialNumber)s이(가) 대상 배열: " -"%(targetArraySerialNumber)s과(와) 일치하지 않아 스토리지 지원 마이그레이션을 " -"건너뜁니다." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s, skipping storage-assisted migration." -msgstr "" -"소스 배열: %(sourceArraySerialNumber)s이(가) 대상 배열: " -"%(targetArraySerialNumber)s과(와) 일치하지 않아 스토리지 지원 마이그레이션을 " -"건너뜁니다." - -#, python-format -msgid "The source volume %(volume_id)s cannot be found." -msgstr "소스 볼륨 %(volume_id)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "The volume driver requires %(data)s in the connector." -msgstr "볼륨 드라이버는 커넥터에 %(data)s이(가) 필요합니다." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "볼륨 드라이버는 커넥터에 iSCSI 개시자 이름이 필요합니다. " - -#, python-format -msgid "There are no valid hosts available in configured cluster(s): %s." -msgstr "구성된 클러스터에서 사용할 수 있는 올바른 호스트가 없음: %s." - -#, python-format -msgid "There is no valid datastore satisfying requirements: %s." -msgstr "요구 사항을 만족하는 올바른 데이터 저장소가 없음: %s." - -msgid "There must be at least one valid replication device configured." -msgstr "하나 이상의 올바른 복제 장치가 구성되어야 합니다." - -#, python-format -msgid "" -"There was a problem with the failover (%(error)s) and it was unsuccessful. " -"Volume '%(volume)s will not be available on the failed over target." -msgstr "" -"장애 복구에 문제점이 있어(%(error)s) 실패했습니다. 장애 복구된 대상에서 볼륨 " -"'%(volume)s을(를) 사용할 수 없습니다." - -#, python-format -msgid "There was an error deleting volume %(id)s: %(error)s." -msgstr "볼륨 %(id)s 삭제 오류: %(error)s." - -msgid "This usually means the volume was never successfully created." -msgstr "일반적으로 볼륨이 성공적으로 작성된 적이 없음을 나타냅니다." - -msgid "Tiering Policy is not supported on this array." -msgstr "이 배열에서 계층 지정 정책이 지원되지 않습니다." - -#, python-format -msgid "Timed out deleting %s!" -msgstr "%s을(를) 삭제하는 중에 제한시간이 종료되었습니다." - -#, python-format -msgid "Trying to create snapshot by non-existent LV: %s" -msgstr "존재하지 않는 LV에서 스냅샷을 작성하려고 시도: %s" - -#, python-format -msgid "URLError: %s" -msgstr "URLError: %s" - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "볼륨 %s의 Storwize 백엔드에 액세스할 수 없습니다." - -#, python-format -msgid "Unable to create folder path %s" -msgstr "폴더 경로 %s을(를) 작성할 수 없음" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하거나 확인하지 " -"못했습니다." - -#, python-format -msgid "Unable to create volume %s from replay" -msgstr "재생에서 볼륨 %s을(를) 작성할 수 없습니다." - -#, python-format -msgid "Unable to create volume on SC: %s" -msgstr "SC에서 볼륨을 작성할 수 없음: %s" - -#, python-format -msgid "Unable to create volume. Volume driver %s not initialized" -msgstr "볼륨을 작성할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않음" - -msgid "Unable to delete busy volume." -msgstr "사용 중인 볼륨을 삭제할 수 없습니다." - -#, python-format -msgid "Unable to delete due to existing snapshot for volume: %s" -msgstr "볼륨의 기존 스냅샷으로 인해 삭제할 수 없음: %s" - -#, python-format -msgid "Unable to delete profile %s." -msgstr "%s 프로파일을 삭제할 수 없습니다." - -#, python-format -msgid "Unable to delete replication for %(vol)s to %(dest)s." -msgstr "%(dest)s(으)로 복제된 %(vol)s을(를) 삭제할 수 없습니다." - -msgid "" -"Unable to delete the destination volume during volume migration, (NOTE: " -"database record needs to be deleted)." -msgstr "" -"볼륨 마이그레이션 중에 대상 볼륨을 삭제할 수 없습니다(참고: 데이터베이스 레코" -"드를 삭제해야 함)." - -#, python-format -msgid "Unable to determine whether %(volumeName)s is composite or not." -msgstr "%(volumeName)s의 복합 여부를 판별할 수 없습니다." - -msgid "Unable to disconnect host from volume, could not determine Purity host" -msgstr "" -"볼륨에서 호스트의 연결을 해제할 수 없으므로, Purity 호스트를 판별할 수 없음" - -msgid "" -"Unable to failover to the secondary. Please make sure that the secondary " -"back-end is ready." -msgstr "" -"보조로 장애 복구할 수 없습니다. 보조 백엔드가 준비되었는지 확인하십시오." - -#, python-format -msgid "Unable to find VG: %s" -msgstr "VG에서 찾을 수 없습니다: %s" - -#, python-format -msgid "" -"Unable to find default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 찾을 수 없습니다." - -#, python-format -msgid "Unable to find disk folder %(name)s on %(ssn)s" -msgstr "%(ssn)s에서 디스크 폴더 %(name)s을(를) 찾을 수 없음" - -#, python-format -msgid "Unable to find mapping profiles: %s" -msgstr "맵핑 프로파일을 찾을 수 없음: %s" - -#, python-format -msgid "Unable to find or create QoS Node named %s" -msgstr "%s(이)라는 QoS 노드를 찾거나 작성할 수 없음" - -#, python-format -msgid "Unable to find service: %(service)s for given host: %(host)s." -msgstr "제공된 호스트: %(host)s의 서비스: %(service)s을(를) 찾을 수 없습니다." - -msgid "Unable to get associated pool of volume." -msgstr "볼륨의 연관된 풀을 가져올 수 없습니다." - -#, python-format -msgid "Unable to get default storage group %(defaultSgName)s." -msgstr "기본 스토리지 그룹 %(defaultSgName)s을(를) 가져올 수 없습니다." - -msgid "Unable to get device mapping from network." -msgstr "네트워크에서 장치 맵핑을 가져올 수 없습니다." - -#, python-format -msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." -msgstr "빠른 정책: %(fastPolicyName)s의 정책 규칙을 가져올 수 없습니다." - -#, python-format -msgid "Unable to locate Volume Group %s" -msgstr "볼륨그룹 %s를 찾을 수 없음" - -#, python-format -msgid "Unable to locate snapshot %s" -msgstr "스냅샷 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Unable to manage existing snapshot. Volume driver %s not initialized." -msgstr "" -"기존 스냅샷을 관리할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않았습니" -"다." - -#, python-format -msgid "Unable to manage existing volume. Volume driver %s not initialized." -msgstr "" -"기존 볼륨을 관리할 수 없습니다. 볼륨 드라이버 %s이(가) 초기화되지 않았습니다." - -#, python-format -msgid "Unable to map %(vol)s to %(srv)s" -msgstr "%(vol)s을(를) %(srv)s에 맵핑할 수 없음" - -#, python-format -msgid "Unable to rename lun %s on array." -msgstr "배열에서 lun %s의 이름을 바꿀 수 없습니다." - -#, python-format -msgid "Unable to rename the logical volume for volume %s." -msgstr "볼륨 %s의 논리 볼륨 이름을 변경할 수 없습니다." - -#, python-format -msgid "Unable to rename the logical volume for volume: %s" -msgstr "볼륨의 논리 볼륨 이름을 변경할 수 없음: %s" - -#, python-format -msgid "Unable to replicate %(volname)s to %(destsc)s" -msgstr "%(volname)s을(를) %(destsc)s에 복제할 수 없음" - -#, python-format -msgid "Unable to retrieve VolumeConfiguration: %s" -msgstr "VolumeConfiguration을 찾을 수 없음: %s" - -#, python-format -msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." -msgstr "배열 %(array)s에서 %(poolName)s의 풀 인스턴스를 검색할 수 없습니다." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s." -msgstr "볼륨 연결을 종료할 수 없음: %(err)s" - -msgid "Unexpected build error:" -msgstr "예상치 못한 빌드 오류:" - -msgid "Unexpected error occurs in horcm." -msgstr "horcm에서 예상치 못한 오류가 발생합니다." - -msgid "Unexpected error occurs in snm2." -msgstr "snm2에서 예상치 못한 오류가 발생합니다." - -#, python-format -msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" -msgstr "" -"retype() 되돌리기에서 VolumeSet(%s)을(를) 삭제하는 중에 예상치 못한 오류 발생" - -#, python-format -msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" -msgstr "retype()에서 VolumeSet(%s)을(를) 삭제하는 중에 예상치 못한 오류 발생" - -#, python-format -msgid "Unexpected error while invoking web service. Error - %s." -msgstr "웹 서비스를 호출하는 중에 예상치 못한 오류가 발생했습니다. 오류 - %s." - -#, python-format -msgid "Unexpected exception during cache cleanup of snapshot %s" -msgstr "스냅샷 %s의 캐시 정리 중에 예상치 못한 예외 발생" - -#, python-format -msgid "Unknown exception in post clone resize LUN %s." -msgstr "사후 복제 크기 조정 LUN %s에서 알 수 없는 예외가 발생했습니다." - -#, python-format -msgid "Unrecognized Login Response: %s" -msgstr "인식되지 않은 로그인 응답: %s" - -#, python-format -msgid "" -"Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." -msgstr "" -"일관성 그룹을 업데이트하는 중 볼륨-%(volume_id)s 추가 실패: VolumeNotFound." - -#, python-format -msgid "" -"Update consistency group failed to remove volume-%(volume_id)s: " -"VolumeNotFound." -msgstr "" -"일관성 그룹을 업데이트하는 중 볼륨-%(volume_id)s 제거 실패: VolumeNotFound." - -msgid "Update snapshot usages failed." -msgstr "스냅샷 사용법을 업데이트하는 데 실패했습니다." - -msgid "Update volume model for transfer operation failed." -msgstr "전송 조작을 위한 볼륨 모델 업데이트에 실패했습니다." - -#, python-format -msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." -msgstr "이미지에 볼륨을 업로드하는 중 오류 발생(image-id: %(image_id)s)." - -msgid "User does not have permission to change Storage Profile selection." -msgstr "사용자에게 스토리지 프로파일 선택을 변경할 권한이 없습니다." - -msgid "VGC-CLUSTER command blocked and cancelled." -msgstr "VGC-CLUSTER 명령이 차단되어 취소되었습니다." - -#, python-format -msgid "Version string '%s' is not parseable" -msgstr "버전 문자열 '%s'을(를) 구문 분석할 수 없음" - -#, python-format -msgid "Virtual Volume Set %s does not exist." -msgstr "가상 볼륨 세트 %s이(가) 없습니다." - -#, python-format -msgid "Virtual disk device of backing: %s not found." -msgstr "지원 가상 디스크 장치: %s을(를) 찾을 수 없습니다." - -#, python-format -msgid "Vol copy job status %s." -msgstr "볼륨 복사 작업 상태 %s." - -#, python-format -msgid "Volume \"%s\" not found. Please check the results of \"dog vdi list\"." -msgstr "" -"볼륨 \"%s\"을(를) 찾을 수 없습니다. \"dog vdi list\"의 결과를 확인하십시오." - -#, python-format -msgid "" -"Volume %(name)s is not suitable for storage assisted migration using retype." -msgstr "" -"볼륨 %(name)s이(가) 재입력을 사용하는 스토리지 지원 마이그레이션에 적합하지 " -"않습니다." - -#, python-format -msgid "Volume %(name)s not found on the array." -msgstr "배열에서 볼륨 %(name)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "Volume %(name)s not found on the array. No volume to delete." -msgstr "배열에서 %(name)s 볼륨을 찾을 수 없습니다. 삭제할 볼륨이 없습니다. " - -#, python-format -msgid "" -"Volume %(name)s not found on the array. No volume to migrate using retype." -msgstr "" -"배열에서 %(name)s 볼륨을 찾을 수 없습니다. 다시 입력을 사용하여 마이그레이션" -"할 볼륨이 없습니다. " - -#, python-format -msgid "Volume %(vol)s in the consistency group could not be deleted." -msgstr "일관성 그룹의 볼륨 %(vol)s을(를) 삭제할 수 없습니다." - -#, python-format -msgid "" -"Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " -"%(output)s" -msgstr "" -"볼륨 %(volumeid)s에서 지정 명령을 보내는 데 실패, ret: %(status)s 출력: " -"%(output)s" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "배열에 %s 볼륨이 없습니다." - -#, python-format -msgid "Volume %s, not found on SF Cluster." -msgstr "SF 클러스터에서 %s 볼륨을 찾을 수 없습니다. " - -#, python-format -msgid "Volume %s: create failed" -msgstr "볼륨 %s: 작성 실패" - -#, python-format -msgid "" -"Volume %s: driver error when trying to retype, falling back to generic " -"mechanism." -msgstr "" -"다시 입력하려고 할 때 볼륨 %s: 드라이버 오류 발생, 일반 메커니즘으로 장애 복" -"구." - -#, python-format -msgid "Volume %s: manage failed." -msgstr "볼륨 %s: 관리 실패." - -#, python-format -msgid "Volume %s: rescheduling failed" -msgstr "볼륨 %s: 재스케줄링 실패" - -#, python-format -msgid "Volume %s: update volume state failed." -msgstr "볼륨 %s: 볼륨 상태 업데이트 실패." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been added to target storage group " -"%(storageGroup)s." -msgstr "" -"대상 스토리지 그룹 %(storageGroup)s에 볼륨: %(volumeName)s이(가) 추가되지 않" -"았습니다." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been removed from source storage group " -"%(storageGroup)s." -msgstr "" -"소스 스토리지 그룹 %(storageGroup)s에서 볼륨: %(volumeName)s이(가) 제거되지 " -"않았습니다." - -#, python-format -msgid "" -"Volume : %(volumeName)s. was not successfully migrated to target pool " -"%(targetPoolName)s." -msgstr "" -"볼륨: %(volumeName)s이(가) 대상 풀 %(targetPoolName)s에 성공적으로 마이그레이" -"션되지 않았습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"accept_transfer operation!" -msgstr "" -"accept_transfer 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) " -"찾을 수 없습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"attach_volume operation!" -msgstr "" -"attach_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" -"을 수 없습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"delete_volume operation!" -msgstr "" -"delete_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" -"을 수 없습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"detach_volume operation!" -msgstr "" -"detach_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" -"을 수 없습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"extend_volume operation!" -msgstr "" -"extend_volume 조작을 시도하는 중에 SolidFire Cluster에서 볼륨 ID %s을(를) 찾" -"을 수 없습니다." - -#, python-format -msgid "" -"Volume ID %s was not found on the zfssa device while attempting " -"delete_volume operation." -msgstr "" -"delete_volume 조작을 시도하는 동안 zfssa 장치에서 볼륨 ID %s을(를) 찾을 수 없" -"습니다." - -#, python-format -msgid "Volume already exists. %s" -msgstr "볼륨이 이미 있습니다. %s" - -#, python-format -msgid "" -"Volume creation failed, deleting created snapshot %(volume_name)s@%(name)s" -msgstr "볼륨 작성에 실패하여, 작성된 스냅샷 %(volume_name)s@%(name)s 삭제" - -#, python-format -msgid "Volume creation failed, deleting created snapshot %s" -msgstr "볼륨 작성에 실패하여, 작성된 스냅샷 %s 삭제" - -msgid "Volume did not exist. It will not be deleted" -msgstr "볼륨이 없어 삭제되지 않음" - -#, python-format -msgid "Volume driver %s not initialized" -msgstr "볼륨 드라이버 %s이(가) 초기화되지 않음" - -msgid "Volume in unexpected state" -msgstr "예상치 못한 상태의 볼륨" - -#, python-format -msgid "Volume in unexpected state %s, expected awaiting-transfer" -msgstr "예상치 못한 상태 %s의 볼륨, 예상된 상태는 전송 대기임" - -#, python-format -msgid "Volume migration failed due to exception: %(reason)s." -msgstr "예외로 인해 볼륨 마이그레이션에 실패: %(reason)s." - -msgid "Volume must be detached for clone operation." -msgstr "복제 조작을 수행하려면 볼륨의 연결을 해제해야 합니다." - -#, python-format -msgid "Volume size \"%sG\" is too large." -msgstr "볼륨 크기 \"%sG\"이(가) 너무 큽니다." - -#, python-format -msgid "VolumeType %s deletion failed, VolumeType in use." -msgstr "VolumeType %s 삭제 실패, VolumeType이 사용 중입니다." - -#, python-format -msgid "" -"WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " -"attempt %(retry)s in progress." -msgstr "" -"WebDAV 조작에 실패, 오류 코드: %(code)s 이유: %(reason)s 재시도 시도 " -"%(retry)s이(가) 진행 중입니다." - -#, python-format -msgid "WebDAV returned with %(code)s error during %(method)s call." -msgstr "%(method)s 호출 중에 WebDAV에서 %(code)s을(를) 리턴했습니다." - -#, python-format -msgid "" -"Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " -"OLTP_REP, NONE." -msgstr "" -"워크로드: %(workload)s이(가) 올바르지 않습니다. 올바른 값은 DSS_REP, DSS, " -"OLTP, OLTP_REP, NONE입니다." - -msgid "_check_version_fail: Parsing error." -msgstr "_check_version_fail: 구문 분석 오류." - -msgid "_find_mappings: volume is not active" -msgstr "_find_mappings: 볼륨이 활성이 아님" - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " -"operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: 볼륨 %(vol)s에 지정된 vdisk 복사 조작이 없음: orig=" -"%(orig)s new=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: 볼륨 %(vol)s 메타데이터에 지정된 vdisk 복사 조작이 없음: " -"orig=%(orig)s new=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " -"operations." -msgstr "_rm_vdisk_copy_op: 볼륨 %s에 등록된 vdisk 복사 조작이 없습니다." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " -"copy operations." -msgstr "" -"_rm_vdisk_copy_op: 볼륨 메타데이터 %s에 등록된 vdisk 복사 조작이 없습니다." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " -"%(host_name)s found." -msgstr "" -"_unmap_vdisk_from_host: 볼륨 %(vol_name)s과(와) 호스트 %(host_name)s의 맵핑" -"을 찾을 수 없습니다." - -#, python-format -msgid "_wait_for_job_complete failed after %(retries)d tries." -msgstr "_wait_for_job_complete가 %(retries)d번의 재시도 후에 실패했습니다." - -#, python-format -msgid "_wait_for_job_complete, failed after %(retries)d tries." -msgstr "_wait_for_job_complete가 %(retries)d번의 재시도 후에 실패했습니다." - -#, python-format -msgid "_wait_for_sync failed after %(retries)d tries." -msgstr "_wait_for_sync가 %(retries)d번의 재시도 후에 실패했습니다." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"백업: %(vol_id)s이(가) %(vpath)s에서 %(bpath)s으로 백업 하드 링크를 제거하지 " -"못했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -#, python-format -msgid "can't create 2 volumes with the same name, %s" -msgstr "동일한 이름 %s의 두 볼륨을 작성할 수 없음" - -msgid "cinder-rtstool is not installed correctly" -msgstr "cinder-rtstool이 올바르게 설치되지 않음" - -#, python-format -msgid "" -"delete: %(vol_id)s failed with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"삭제: %(vol_id)s이(가) stdout에 실패함. stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "ensure_export: Volume %s not found on storage." -msgstr "ensure_export: 스토리지에 볼륨 %s을(를) 찾을 수 없습니다." - -#, python-format -msgid "error opening rbd image %s" -msgstr "rbd 이미지 %s 열기 오류" - -msgid "error refreshing volume stats" -msgstr "볼륨 상태를 새로 고치는 중 오류 발생" - -msgid "horcm command timeout." -msgstr "horcm 명령의 제한시간이 초과되었습니다." - -msgid "import pywbem failed!! pywbem is necessary for this volume driver." -msgstr "" -"pywbem 가져오기에 실패했습니다. 이 볼륨 드라이버에 pywbem이 필요합니다." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s." -msgstr "" -"initialize_connection: %(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수" -"집하지 못했습니다." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s.\n" -msgstr "" -"initialize_connection: %(vol)s 볼륨 및 %(conn)s 커넥터에 대한 리턴 특성을 수" -"집하지 못함.\n" - -msgid "iscsi_ip_address must be set!" -msgstr "iscsi_ip_address를 설정해야 합니다." - -msgid "manage_existing: No source-name in ref!" -msgstr "manage_existing: ref에 source-name이 없습니다." - -#, python-format -msgid "manage_existing_get_size: %s does not exist!" -msgstr "manage_existing_get_size: %s이(가) 없습니다." - -msgid "manage_existing_get_size: No source-name in ref!" -msgstr "manage_existing_get_size: ref에 source-name이 없습니다." - -msgid "model server went away" -msgstr "모델 서버가 사라졌음" - -#, python-format -msgid "modify volume: %s does not exist!" -msgstr "수정 볼륨: %s(이)가 없습니다." - -msgid "san ip must be configured!" -msgstr "san ip를 구성해야 합니다." - -msgid "san_login must be configured!" -msgstr "san_login을 구성해야 합니다." - -msgid "san_password must be configured!" -msgstr "san_password를 구성해야 합니다." - -#, python-format -msgid "single_user auth mode enabled, but %(param)s not set" -msgstr "single_user 인증 모드를 사용하지만 %(param)s이(가) 설정되지 않음" - -msgid "snm2 command timeout." -msgstr "snm2 명령의 제한시간이 초과되었습니다." - -msgid "" -"storwize_svc_multihostmap_enabled is set to False, not allowing multi host " -"mapping." -msgstr "" -"storwize_svc_multihostmap_enabled가 False로 설정되어 있어 다중 호스트 맵핑이 " -"허용되지 않습니다." - -#, python-format -msgid "unmanage: Volume %s does not exist!" -msgstr "관리 취소: %s 볼륨이 없습니다." - -msgid "zfssa_initiator cannot be empty when creating a zfssa_initiator_group." -msgstr "" -"zfssa_initiator_group을(를) 작성할 때 zfssa_initiator가 비어 있지 않아야 합니" -"다." - -msgid "" -"zfssa_replication_ip not set in cinder.conf. zfssa_replication_ip is needed " -"for backend enabled volume migration. Continuing with generic volume " -"migration." -msgstr "" -"cinder.conf. zfssa_replication_ip에 설정되지 않은 zfssa_replication_ip가 백엔" -"드 사용 볼륨 마이그레이션에 필요합니다. 일반 볼륨 마이그레이션을 계속합니다." diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po deleted file mode 100644 index 683cdede0..000000000 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder-log-info.po +++ /dev/null @@ -1,3211 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:18+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-26 07:43+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "\t%(name)-35s : %(value)s" -msgstr "\t%(name)-35s : %(value)s" - -#, python-format -msgid "\t%(param)-35s : %(value)s" -msgstr "\t%(param)-35s : %(value)s" - -#, python-format -msgid "\t%(prefix)-35s : %(version)s" -msgstr "\t%(prefix)-35s : %(version)s" - -#, python-format -msgid "\t%(request)-35s : %(value)s" -msgstr "\t%(request)-35s : %(value)s" - -#, python-format -msgid "" -"\n" -"\n" -"\n" -"\n" -"Request URL: %(url)s\n" -"\n" -"Call Method: %(method)s\n" -"\n" -"Request Data: %(data)s\n" -"\n" -"Response Data:%(res)s\n" -"\n" -msgstr "" -"\n" -"\n" -"\n" -"\n" -"요청 URL: %(url)s\n" -"\n" -"호출 메소드: %(method)s\n" -"\n" -"요청 데이터: %(data)s\n" -"\n" -"응답 데이터:%(res)s\n" -"\n" - -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "%(url)s이(가) 결함을 리턴함: %(e)s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" - -#, python-format -msgid "%(volume)s assign type fibre_channel, properties %(properties)s" -msgstr "%(volume)s에서 fibre_channel 유형 할당, 특성 %(properties)s" - -#, python-format -msgid "%s is already umounted" -msgstr "%s이(가) 이미 umount됨" - -#, python-format -msgid "3PAR driver cannot perform migration. Retype exception: %s" -msgstr "" -"3PAR 드라이버에서 마이그레이션을 수행할 수 없습니다. 다시 입력 예외: %s" - -#, python-format -msgid "3PAR vlun %(name)s not found on host %(host)s" -msgstr "3PAR vlun %(name)s을(를) %(host)s 호스트에서 찾을 수 없음" - -#, python-format -msgid "" -"3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " -"deleted because: %(reason)s" -msgstr "" -"볼륨 '%(name)s' 의 3PAR vlun이 삭제되었지만, 다음 이유로 인해 호스트 " -"'%(host)s'이(가) 삭제됨: %(reason)s" - -#, python-format -msgid "AUTH properties: %s." -msgstr "AUTH 특성: %s." - -#, python-format -msgid "Accepting transfer %s" -msgstr "전송 %s 승인" - -msgid "Activate Flexvisor cinder volume driver." -msgstr "Flexvisor cinder 볼륨 드라이버를 활성화합니다." - -msgid "Add connection: finished iterating over all target list" -msgstr "연결 추가: 모든 대상 목록 반복 완료" - -#, python-format -msgid "Add volume response: %s" -msgstr "볼륨 응답 추가: %s" - -#, python-format -msgid "Added %s to cg." -msgstr "%s이(가) cg에 추가되었습니다." - -#, python-format -msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." -msgstr "" -"볼륨 %(volumeName)s이(가) 기존 스토리지 그룹 %(sgGroupName)s에 추가되었습니" -"다." - -#, python-format -msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" -msgstr "개시자 그룹 이름 %(igrp)s(으)로 볼륨=%(vol)s에 ACL 추가" - -#, python-format -msgid "Adding volume %(v)s to consistency group %(cg)s." -msgstr "일관성 그룹 %(cg)s에 볼륨 %(v)s 추가" - -#, python-format -msgid "" -"Adding volume: %(volumeName)s to default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST 정책: %(fastPolicyName)s의 기본 스토리지 그룹에 볼륨: %(volumeName)s 추" -"가." - -#, python-format -msgid "Adding volumes to cg %s." -msgstr "cg %s에 볼륨 추가." - -#, python-format -msgid "Already mounted: %s" -msgstr "이미 마운트됨: %s" - -msgid "Attach volume completed successfully." -msgstr "볼륨 연결이 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"Automatically selected %(binary)s RPC version %(version)s as minimum service " -"version." -msgstr "" -"자동으로 %(binary)s RPC 버전 %(version)s을(를) 최소 서비스 버전으로 선택했습" -"니다." - -#, python-format -msgid "" -"Automatically selected %(binary)s objects version %(version)s as minimum " -"service version." -msgstr "" -"자동으로 %(binary)s 오브젝트 버전 %(version)s을(를) 최소 서비스 버전으로 선택" -"했습니다." - -msgid "Availability Zones retrieved successfully." -msgstr "가용 구역이 성공적으로 검색되었습니다." - -#, python-format -msgid "Backend name is %s." -msgstr "백엔드 이름이 %s입니다." - -#, python-format -msgid "Backing VM: %(backing)s renamed to %(new_name)s." -msgstr "백업 VM: %(backing)s의 이름이 %(new_name)s(으)로 변경되었습니다." - -msgid "Backing not available, no operation to be performed." -msgstr "백업을 사용할 수 없습니다. 작업이 수행되지 않습니다." - -#, python-format -msgid "Backing not found, creating for volume: %s" -msgstr "백업을 찾을 수 없으므로, 볼륨 작성: %s" - -#, python-format -msgid "" -"Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " -"skipping base image delete." -msgstr "" -"볼륨 %(volume)s의 백업 기본 이미지에 여전히 %(snapshots)s 스냅샷이 있으므로, " -"기본 이미지 삭제를 건너뜁니다." - -#, python-format -msgid "" -"Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " -"in %(delay)ss." -msgstr "" -"볼륨 %(volume)s의 백업 이미지가 사용 중입니다. %(delay)ss 안에 %(retries)s" -"번 더 재시도" - -#, python-format -msgid "Backup service: %s." -msgstr "백업 서비스: %s." - -#, python-format -msgid "Begin backup of volume %s." -msgstr "볼륨 %s의 백업을 시작하십시오." - -msgid "Begin detaching volume completed successfully." -msgstr "볼륨 연결 해제가 성공적으로 시작되었습니다." - -#, python-format -msgid "" -"BrcdFCZoneDriver - Add connection for fabric %(fabric)s for I-T map: " -"%(i_t_map)s" -msgstr "BrcdFCZoneDriver - I-T 맵: %(i_t_map)s의 패브릭 %(fabric)s 연결 추가" - -#, python-format -msgid "" -"BrcdFCZoneDriver - Delete connection for fabric %(fabric)s for I-T map: " -"%(i_t_map)s" -msgstr "BrcdFCZoneDriver - I-T 맵: %(i_t_map)s의 패브릭 %(fabric)s 연결 삭제" - -msgid "CHAP authentication disabled." -msgstr "CHAP 인증을 사용하지 않습니다." - -#, python-format -msgid "CONCERTO version: %s" -msgstr "CONCERTO 버전: %s" - -msgid "Calling os-brick to detach ScaleIO volume." -msgstr "ScaleIO 볼륨의 연결을 해제하기 위해 os-brick 호출." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because cluster " -"exists in different management group." -msgstr "" -"클러스터가 다른 관리 그룹에 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션" -"을 제공할 수 없습니다." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has been exported." -msgstr "" -"볼륨을 내보냈으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공할 수 없습" -"니다." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has snapshots." -msgstr "" -"볼륨에 스냅샷이 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공할 수 " -"없습니다." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume does " -"not exist in this management group." -msgstr "" -"볼륨이 이 관리 그룹에 없으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공" -"할 수 없습니다." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume is " -"from a different backend." -msgstr "" -"볼륨이 다른 백엔드에 있으므로 볼륨 %s에 대해 백엔드 지원 마이그레이션을 제공" -"할 수 없습니다." - -#, python-format -msgid "Cgsnapshot %s: creating." -msgstr "Cgsnapshot %s: 작성 중." - -#, python-format -msgid "Change volume capacity request: %s." -msgstr "볼륨 용량 변경 요청: %s." - -#, python-format -msgid "Checking image clone %s from glance share." -msgstr "Glance 공유의 이미지 복제 %s 확인." - -#, python-format -msgid "Checking origin %(origin)s of volume %(volume)s." -msgstr "볼륨 %(volume)s의 원본 %(origin)s 확인." - -#, python-format -msgid "" -"Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." -msgstr "현재 경로가 \"%(cr)s\"인 Cinder NFS 볼륨이 더 이상 관리되지 않습니다." - -msgid "Cinder secure environment indicator file exists." -msgstr "Cinder 보안 환경 표시기 파일이 있습니다." - -#, python-format -msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - I-T map의 연결 추가: %s" - -#, python-format -msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - I-T map의 연결 삭제: %s" - -#, python-format -msgid "Cleaning cache for share %s." -msgstr "공유 %s의 캐시 정리." - -msgid "Cleaning up incomplete backup operations." -msgstr "불완전한 백업 조작 정리." - -#, python-format -msgid "Clone %s created." -msgstr "복제 %s이(가) 작성되었습니다." - -#, python-format -msgid "Cloning from cache to destination %s" -msgstr "캐시에서 대상 %s(으)로 복제" - -#, python-format -msgid "Cloning from snapshot to destination %s" -msgstr "스냅샷에서 대상 %s(으)로 복제" - -#, python-format -msgid "Cloning image %s from cache" -msgstr "캐시에서 이미지 %s 복제." - -#, python-format -msgid "Cloning image %s from snapshot." -msgstr "스냅샷에서 이미지 %s 복제." - -#, python-format -msgid "Cloning volume %(src)s to volume %(dst)s" -msgstr "볼륨 %(src)s을(를) 볼륨 %(dst)s(으)로 복제" - -#, python-format -msgid "" -"Cloning volume from snapshot volume=%(vol)s snapshot=%(snap)s clone=" -"%(clone)s snap_size=%(size)s reserve=%(reserve)sagent-type=%(agent-type)s " -"perfpol-name=%(perfpol-name)s encryption=%(encryption)s cipher=%(cipher)s " -"multi-initiator=%(multi-initiator)s" -msgstr "" -"스냅샷 볼륨=%(vol)s 스냅샷=%(snap)s 복제=%(clone)s snap_size=%(size)s 예약=" -"%(reserve)sagent-type=%(agent-type)s perfpol-name=%(perfpol-name)s 암호화=" -"%(encryption)s 암호=%(cipher)s 다중 개시자r=%(multi-initiator)s에서 볼륨 복제" - -#, python-format -msgid "CloudByte API executed successfully for command [%s]." -msgstr "명령 [%s]에 대해 CloudByte API가 성공적으로 실행되었습니다." - -#, python-format -msgid "" -"CloudByte operation [%(operation)s] succeeded for volume [%(cb_volume)s]." -msgstr "" -"볼륨 [%(cb_volume)s]에 대해 CloudByte 조작 [%(operation)s]에 성공했습니다." - -msgid "Complete-Migrate volume completed successfully." -msgstr "볼륨 전체 마이그레이션이 성공적으로 완료되었습니다." - -#, python-format -msgid "Completed: convert_to_base_volume: id=%s." -msgstr "완료됨: convert_to_base_volume: id=%s." - -#, python-format -msgid "" -"Connect initialization info: {driver_volume_type: fibre_channel, data: " -"%(properties)s" -msgstr "" -"초기화 정보 연결: {driver_volume_type: fibre_channel, 데이터: %(properties)s" - -#, python-format -msgid "Connecting to host: %s." -msgstr "호스트에 연결: %s." - -#, python-format -msgid "Connecting to target host: %s for backend enabled migration." -msgstr "백엔드 사용 마이그레이션의 대상 호스트 %s에 연결." - -#, python-format -msgid "Connector returning fcnsinfo-%s" -msgstr "커넥터에서 fcnsinfo-%s 리턴" - -#, python-format -msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" -msgstr "%(mbps).2f MB/s에서 %(sz).2f MB 이미지 전환 " - -#, python-format -msgid "" -"Converting %(volume_name)s to full provisioning with userCPG=%(new_cpg)s" -msgstr "%(volume_name)s을(를) userCPG=%(new_cpg)s인 전체 프로비저닝으로 변환" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin dedup provisioning with userCPG=" -"%(new_cpg)s" -msgstr "" -"%(volume_name)s을(를) userCPG=%(new_cpg)s인 씬 중복 제거 프로비저닝으로 변환" - -#, python-format -msgid "" -"Converting %(volume_name)s to thin provisioning with userCPG=%(new_cpg)s" -msgstr "%(volume_name)s을(를) userCPG=%(new_cpg)s인 씬 프로비저닝으로 변환" - -msgid "Coordination backend started successfully." -msgstr "조정 백엔드가 성공적으로 시작되었습니다." - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." -msgstr "" -"복사 오프로드 워크플로우를 사용하여 이미지 %(img)s을(를) 볼륨 %(vol)s(으)로 " -"복사했습니다." - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using local image cache." -msgstr "" -"로컬 이미지 캐시를 사용하여 이미지 %(img)s을(를) 볼륨 %(vol)s(으)로 복사했습" -"니다." - -#, python-format -msgid "Copied image to volume %s using regular download." -msgstr "일반 다운로드를 사용하여 볼륨 %s에 이미지를 복사합니다." - -#, python-format -msgid "Copy job to dest vol %s completed." -msgstr "대상 볼륨 %s(으)로 작업 복사가 완료되었습니다." - -msgid "Copy volume to image completed successfully." -msgstr "이미지에 볼륨 복사가 성공적으로 완료되었습니다." - -#, python-format -msgid "Copying src vol %(src)s to dest vol %(dst)s." -msgstr "소스 볼륨 %(src)s을(를) 대상 볼륨 %(dst)s에 복사합니다." - -#, python-format -msgid "Could not find replica to delete of volume %(vol)s." -msgstr "볼륨 %(vol)s을(를) 삭제하기 위한 복제본을 찾을 수 없습니다." - -#, python-format -msgid "Could not run dpkg-query command: %(msg)s." -msgstr "dpkg-query 명령을 실행할 수 없음: %(msg)s." - -#, python-format -msgid "Could not run rpm command: %(msg)s." -msgstr "rpm 명령을 실행할 수 없음: %(msg)s." - -#, python-format -msgid "" -"Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" -msgstr "" -"mmchattr을 사용하여 스토리지 풀을 %(pool)s(으)로 업데이트할 수 없음, 오류: " -"%(error)s" - -#, python-format -msgid "" -"Couldn't find destination volume %(vol)s in the database. The entry might be " -"successfully deleted during migration completion phase." -msgstr "" -"데이터베이스에서 대상 볼륨 %(vol)s을(를) 찾을 수 없습니다. 마이그레이션 완료 " -"단계 중에 항목이 성공적으로 삭제되었을 가능성이 있습니다." - -#, python-format -msgid "" -"Couldn't find the temporary volume %(vol)s in the database. There is no need " -"to clean up this volume." -msgstr "" -"데이터베이스에서 임시 볼륨 %(vol)s을(를) 찾을 수 없습니다.이 볼륨을 정리하지 " -"않아도 됩니다." - -#, python-format -msgid "Create Cloned Volume %(volume_id)s completed." -msgstr "복제된 볼륨 %(volume_id)s 작성이 완료되었습니다." - -#, python-format -msgid "Create Consistency Group: %(group)s." -msgstr "일관성 그룹 %(group)s 작성." - -#, python-format -msgid "Create Volume %(volume_id)s completed." -msgstr "볼륨 %(volume_id)s 작성이 완료되었습니다." - -#, python-format -msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." -msgstr "스냅샷 %(snapshot_id)s에서 볼륨 %(volume_id)s 작성이 완료되었습니다." - -#, python-format -msgid "" -"Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " -"%(sourceName)s." -msgstr "" -"볼륨에서 복제본 작성: 볼륨 복제: %(cloneName)s 소스 볼륨: %(sourceName)s." - -#, python-format -msgid "Create backup finished. backup: %s." -msgstr "백업 작성이 완료됨. 백업: %s." - -#, python-format -msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "백업 작성이 시작됨, 백업: %(backup_id)s 볼륨: %(volume_id)s." - -#, python-format -msgid "Create consistency group from source-%(source)s completed successfully." -msgstr "source-%(source)s에서 일관성 그룹 생성이 성공적으로 완료되었습니다." - -#, python-format -msgid "Create export done from Volume %(volume_id)s." -msgstr "볼륨 %(volume_id)s에서 내보내기 작성이 수행되었습니다." - -msgid "Create snapshot completed successfully" -msgstr "스냅샷 작성이 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "일관성 그룹 %(cgId)s의 스냅샷 cgsnapshotID: %(cgsnapshot)s 작성." - -#, python-format -msgid "Create snapshot from volume %s" -msgstr "%s 볼륨에서 스냅샷 작성" - -#, python-format -msgid "" -"Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " -"%(raid_snapshot_id)s, volume: %(volume)s." -msgstr "" -"작성 성공, 스냅샷: %(snapshot)s, RAID의 스냅샷 ID: %(raid_snapshot_id)s, 볼" -"륨: %(volume)s." - -#, python-format -msgid "Create target consistency group %(targetCg)s." -msgstr "대상 일관성 그룹 %(targetCg)s 작성." - -#, python-format -msgid "Create volume of %s GB" -msgstr "%s GB의 볼륨 작성" - -#, python-format -msgid "CreateReplay success %s" -msgstr "CreateReplay 성공 %s" - -#, python-format -msgid "" -"Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " -"and OpenStack volume [%(stack_vol)s]." -msgstr "" -"CloudByte 스냅샷 [%(cb_snap)s] w.r.t CloudByte 볼륨 [%(cb_vol)s] 및 " -"OpenStack 볼륨 [%(stack_vol)s]이(가) 작성되었습니다." - -#, python-format -msgid "Created Consistency Group %s" -msgstr "일관성 그룹 %s 작성" - -#, python-format -msgid "" -"Created a clone [%(cb_clone)s] at CloudByte snapshot path [%(cb_snap)s] w.r." -"t parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"CloudByte 스냅샷 경로 [%(cb_snap)s] w.r.t 상위 OpenStack 볼륨 [%(stack_vol)s]" -"에서 복제본 [%(cb_clone)s]이(가) 작성되었습니다." - -#, python-format -msgid "Created datastore folder: %s." -msgstr "데이터 저장소 폴더 작성: %s." - -#, python-format -msgid "" -"Created lun-map:\n" -"%s" -msgstr "" -"lun-map 작성:\n" -"%s" - -#, python-format -msgid "" -"Created multi-attach E-Series host group %(label)s with clusterRef " -"%(clusterRef)s" -msgstr "" -"clusterRef가 %(clusterRef)s인 다중 연결 E-Series 호스트 그룹 %(label)s이(가) " -"작성됨 " - -#, python-format -msgid "Created new initiator group name: %(igGroupName)s." -msgstr "새 개시자 그룹 이름 %(igGroupName)s이(가) 작성되었습니다." - -#, python-format -msgid "Created new masking view : %(maskingViewName)s." -msgstr "새 마스킹 보기 %(maskingViewName)s이(가) 작성되었습니다." - -#, python-format -msgid "Created new storage group: %(storageGroupName)s." -msgstr "새 스토리지 그룹 작성: %(storageGroupName)s." - -#, python-format -msgid "Created snap grp with label %s." -msgstr "레이블이 %s인 스냅 grp가 작성되었습니다." - -#, python-format -msgid "Created volume %(instanceId)s: %(name)s" -msgstr "볼륨 %(instanceId)s이(가) 작성됨: %(name)s" - -#, python-format -msgid "Created volume %(volname)s, volume id %(volid)s." -msgstr "볼륨 %(volname)s, 볼륨 id %(volid)s이(가) 작성되었습니다." - -msgid "Created volume successfully." -msgstr "볼륨이 성공적으로 작성되었습니다." - -#, python-format -msgid "Created volume with label %s." -msgstr "레이블이 %s인 볼륨이 작성되었습니다." - -#, python-format -msgid "Creating %(volume)s on %(device)s" -msgstr "%(device)s에서 %(volume)s 작성" - -msgid "Creating Consistency Group" -msgstr "일관성 그룹 작성" - -#, python-format -msgid "Creating backup of volume %(volume_id)s in container %(container)s" -msgstr "%(container)s 컨테이너에서 %(volume_id)s 볼륨의 백업 작성" - -#, python-format -msgid "Creating cgsnapshot %(name)s." -msgstr "cgsnapshot %(name)s 작성." - -#, python-format -msgid "Creating clone of volume: %s" -msgstr "%s 볼륨의 복제 작성" - -#, python-format -msgid "Creating clone of volume: %s." -msgstr "볼륨의 복제 작성: %s" - -#, python-format -msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." -msgstr "cgsnapshot %(snap)s에서 일관성 그룹 %(name)s 작성." - -#, python-format -msgid "" -"Creating consistency group %(name)s from source consistency group " -"%(source_cgid)s." -msgstr "소스 일관성 그룹 %(source_cgid)s에서 일관성 그룹 %(name)s 작성." - -#, python-format -msgid "Creating consistency group %(name)s." -msgstr "일관성 그룹 %(name)s 작성." - -#, python-format -msgid "Creating host object %(host_name)r with IQN: %(iqn)s." -msgstr "IQN: %(iqn)s(으)로 호스트 오브젝트 %(host_name)r 작성" - -#, python-format -msgid "Creating host object %(host_name)r with WWN: %(wwn)s." -msgstr "WWN: %(wwn)s(으)로 호스트 오브젝트 %(host_name)r 작성" - -#, python-format -msgid "Creating host with ports %s." -msgstr "포트 %s(으)로 호스트 작성." - -#, python-format -msgid "Creating image snapshot %s" -msgstr "이미지 스냅샷 \"%s\" 작성" - -#, python-format -msgid "Creating initiator group %(grp)s with initiator %(iname)s" -msgstr "개시자 %(iname)s(으)로 개시자 그룹 %(grp)s 작성" - -#, python-format -msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" -msgstr "한 개시자 %(iname)s(으)로 개시자 그룹 %(igrp)s 작성" - -#, python-format -msgid "Creating iscsi_target for volume: %s" -msgstr "%s 볼륨에 대한 iscsi_target 작성" - -#, python-format -msgid "Creating regular file: %s.This may take some time." -msgstr "일반 파일 작성%s.이 작업을 수행하는 데 다소 시간이 걸릴 수 있습니다." - -#, python-format -msgid "Creating server %s" -msgstr "서버 %s 작성" - -#, python-format -msgid "Creating snapshot %(snap)s of volume %(vol)s" -msgstr "볼륨 %(vol)s의 스냅샷 %(snap)s 작성" - -#, python-format -msgid "" -"Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " -"snap_description=%(desc)s" -msgstr "" -"volume_name=%(vol)s snap_name=%(name)s snap_description=%(desc)s의 스냅샷 작" -"성" - -#, python-format -msgid "Creating snapshot: %s" -msgstr "스냅샷 작성: %s." - -#, python-format -msgid "Creating temp snapshot %(snap)s from volume %(vol)s" -msgstr "볼륨 %(vol)s에서 임시 스냅샷 %(snap)s 작성" - -#, python-format -msgid "Creating transfer of volume %s" -msgstr "볼륨 %s의 전송 작성" - -#, python-format -msgid "Creating volume %s from snapshot." -msgstr "스냅샷에서 볼륨 %s 작성." - -#, python-format -msgid "Creating volume from snapshot: %s" -msgstr "스냅샷에서 볼륨 작성: %s." - -#, python-format -msgid "Creating volume of %(size)s GB for restore of backup %(backup_id)s." -msgstr "백업 %(backup_id)s의 복원을 위한 %(size)sGB의 볼륨 작성." - -#, python-format -msgid "Creating volume snapshot: %s." -msgstr "볼륨 스냅샷 작성: %s." - -#, python-format -msgid "Creatng volume from snapshot. volume: %s" -msgstr "스냅샷에서 볼륨 작성, 볼륨: %s" - -#, python-format -msgid "DRBD connection for %s already removed" -msgstr "%s의 DRBD 연결이 이미 제거됨" - -#, python-format -msgid "Delete Consistency Group: %(group)s." -msgstr "일관성 그룹 %(group)s 삭제." - -#, python-format -msgid "Delete Snapshot %(snapshot_id)s completed." -msgstr "스냅샷 %(snapshot_id)s 삭제가 완료되었습니다." - -#, python-format -msgid "Delete Snapshot: %(snapshot)s." -msgstr "스냅샷 %(snapshot)s 삭제" - -#, python-format -msgid "Delete Snapshot: %(snapshotName)s." -msgstr "스냅샷 삭제: %(snapshotName)s." - -#, python-format -msgid "Delete Volume %(volume_id)s completed." -msgstr "볼륨 %(volume_id)s 삭제가 완료되었습니다." - -#, python-format -msgid "Delete backup finished, backup %s deleted." -msgstr "백업 삭제가 완료됨, 백업 %s이(가) 삭제되었습니다." - -#, python-format -msgid "Delete backup started, backup: %s." -msgstr "백업 삭제가 시작됨, 백업: %s." - -#, python-format -msgid "Delete backup with id: %s" -msgstr "ID가 %s인 백업 삭제" - -#, python-format -msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" -msgstr "일관성 그룹 %(group_name)s의 cgsnapshot %(snap_name)s 삭제" - -#, python-format -msgid "Delete cgsnapshot with id: %s" -msgstr "ID가 %s인 cgsnapshot 삭제" - -#, python-format -msgid "Delete connection target list: %(targets)s" -msgstr "연결 대상 목록 삭제: %(targets)s" - -msgid "Delete consistency group completed successfully." -msgstr "일관성 그룹 삭제가 성공적으로 완료되었습니다." - -#, python-format -msgid "Delete consistency group with id: %s" -msgstr "ID가 %s인 일관성 그룹 삭제" - -#, python-format -msgid "" -"Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." -msgstr "" -"볼륨 '%(volume)s'의 백업 '%(backup)s' 삭제가 완료되었으며 경고가 표시됩니다." - -msgid "Delete snapshot completed successfully" -msgstr "스냅샷 삭제가 성공적으로 완료되었습니다." - -#, python-format -msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "소스 CG %(cgId)s의 스냅샷 cgsnapshotID: %(cgsnapshot)s 삭제." - -msgid "Delete snapshot metadata completed successfully." -msgstr "스냅샷 메타데이터 삭제가 성공적으로 완료되었습니다." - -#, python-format -msgid "Delete snapshot with id: %s" -msgstr "ID가 %s인 스냅샷 삭제" - -#, python-format -msgid "Delete transfer with id: %s" -msgstr "ID가 %s인 전송 삭제" - -msgid "Delete volume metadata completed successfully." -msgstr "볼륨 메타데이터 삭제가 성공적으로 완료되었습니다." - -msgid "Delete volume request issued successfully." -msgstr "볼륨 삭제 요청이 성공적으로 실행되었습니다." - -#, python-format -msgid "Delete volume with id: %s" -msgstr "ID가 %s인 볼륨 삭제" - -#, python-format -msgid "Deleted %(row)d rows from table=%(table)s" -msgstr "테이블=%(table)s에서 %(row)d 행 삭제" - -#, python-format -msgid "" -"Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " -"[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"CloudByte 스냅샷 [%(snap)s] w.r.t 상위 CloudByte 볼륨 [%(cb_vol)s] 및 상위 " -"OpenStack 볼륨 [%(stack_vol)s]이(가) 삭제되었습니다." - -#, python-format -msgid "Deleted the VM backing: %s." -msgstr "VM 백업 삭제: %s." - -#, python-format -msgid "Deleted vmdk file: %s." -msgstr "vmdk 파일이 삭제됨: %s." - -msgid "Deleted volume successfully." -msgstr "볼륨이 성공적으로 삭제되었습니다." - -msgid "Deleting Consistency Group" -msgstr "일관성 그룹 삭제" - -#, python-format -msgid "Deleting Volume: %(volume)s" -msgstr "볼륨 삭제: %(volume)s" - -#, python-format -msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." -msgstr "볼륨 %(volume)s의 백업 기본 이미지='%(basename)s' 삭제." - -#, python-format -msgid "Deleting deleteInitiatorGrp %s " -msgstr "deleteInitiatorGrp %s 삭제" - -#, python-format -msgid "Deleting snapshot %(ss)s from %(pro)s" -msgstr "%(pro)s에서 스냅샷 %(ss)s 삭제" - -#, python-format -msgid "Deleting snapshot %s " -msgstr "스냅샷 %s 삭제" - -#, python-format -msgid "Deleting snapshot: %s" -msgstr "스냅샷 삭제: %s" - -#, python-format -msgid "Deleting stale snapshot: %s" -msgstr "시간이 경과된 스냅샷 삭제: %s" - -#, python-format -msgid "Deleting volume %s " -msgstr "볼륨 %s 삭제" - -#, python-format -msgid "Deleting volume %s." -msgstr "볼륨 %s 삭제." - -#, python-format -msgid "Detach Volume, metadata is: %s." -msgstr "볼륨 연결 해제, 메타데이터: %s." - -msgid "Detach volume completed successfully." -msgstr "볼륨 연결 해제가 성공적으로 완료되었습니다." - -msgid "Determined volume DB was empty at startup." -msgstr "시작 시 볼륨 DB가 비어 있는 것으로 판별되었습니다." - -msgid "Determined volume DB was not empty at startup." -msgstr "시작 시 볼륨 DB가 비어 있지 않은 것으로 판별되었습니다" - -#, python-format -msgid "" -"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " -"delete anything." -msgstr "" -"백업 %(backing)s의 스냅샷 %(name)s을(를) 찾을 수 없습니다. 삭제할 내용이 없습" -"니다." - -#, python-format -msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" -msgstr "검색 ip %(disc_ip)s을(를) mgmt+data 서브넷 %(net_label)s에서 발견함" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" -msgstr "검색 ip %(disc_ip)s을(를) 데이터 서브넷 %(net_label)s에서 사용" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" -msgstr "검색 ip %(disc_ip)s을(를) 서브넷 %(net_label)s에서 사용" - -#, python-format -msgid "Discovery ip %s is used on mgmt+data subnet" -msgstr "검색 ip %s을(를) mgmt+data 서브넷에서 사용" - -#, python-format -msgid "Dissociating volume %s " -msgstr "볼륨 %s 연관 해제" - -#, python-format -msgid "Domain id is %s." -msgstr "도메인 id는 %s입니다." - -#, python-format -msgid "Done copying image: %(id)s to volume: %(vol)s." -msgstr "이미지 %(id)s을(를) 볼륨 %(vol)s(으)로 복사하는 작업이 완료되었습니다." - -#, python-format -msgid "Done copying volume %(vol)s to a new image %(img)s" -msgstr "볼륨 %(vol)s을(를) 새 이미지 %(img)s(으)로 복사 완료" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS encryption-at-rest feature not enabled " -"in cluster daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"하위 레벨 GPFS 클러스터가 발견됨. GPFS encryption-at-rest 기능을 클러스터 디" -"먼 레벨 %(cur)s에서 사용할 수 없음 - 최소 %(min)s 레벨이어야 합니다. " - -msgid "Driver initialization completed successfully." -msgstr "드라이버 초기화가 성공적으로 완료되었습니다." - -msgid "Driver post RPC initialization completed successfully." -msgstr "드라이버 post RPC 초기화가 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"E-series proxy API version %(version)s does not support full set of SSC " -"extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E-series 프록시 API 버전 %(version)s이(가) SSC 추가 스펙의 전체 세트를 지원하" -"지 않습니다. 프록시 버전은 %(min_version)s 이상이어야 합니다. " - -#, python-format -msgid "E-series proxy API version %s does not support autosupport logging." -msgstr "E-series 프록시 API 버전 %s에서 자동 지원 로깅을 지원하지 않습니다." - -#, python-format -msgid "EQL-driver: Setup is complete, group IP is \"%s\"." -msgstr "EQL-드라이버: 설정이 완료되었습니다, 그룹 IP는 \"%s\"입니다." - -#, python-format -msgid "EQL-driver: executing \"%s\"." -msgstr "EQL-드라이버: \"%s\" 실행." - -#, python-format -msgid "Editing Volume %(vol)s with mask %(mask)s" -msgstr "마스크 %(mask)s(으)로 볼륨 %(vol)s 편집" - -msgid "Embedded mode detected." -msgstr "임베드된 모드가 발견되었습니다." - -msgid "Enabling LVM thin provisioning by default because a thin pool exists." -msgstr "씬 풀이 있으므로 기본적으로 LVM 씬 프로비저닝을 사용합니다." - -msgid "Enabling LVM thin provisioning by default because no LVs exist." -msgstr "LV가 없으므로 기본적으로 LVM 씬 프로비저닝을 사용합니다." - -#, python-format -msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" -msgstr "extend_volume 볼륨=%(vol)s new_size=%(size)s 입력" - -#, python-format -msgid "" -"Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s" -msgstr "initialize_connection 볼륨=%(vol)s 커넥터=%(conn)s 위치=%(loc)s 입력" - -#, python-format -msgid "" -"Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s." -msgstr "terminate_connection 볼륨=%(vol)s 커넥터=%(conn)s 위치=%(loc)s 입력." - -#, python-format -msgid "Entering unmanage_volume volume = %s" -msgstr "unmanage_volume 볼륨 = %s 입력" - -#, python-format -msgid "Exploring array subnet label %s" -msgstr "배열 서브넷 레이블 %s 탐색" - -#, python-format -msgid "Export record finished, backup %s exported." -msgstr "레코드 내보내기가 완료됨, 백업 %s을(를) 내보냈습니다." - -#, python-format -msgid "Export record started, backup: %s." -msgstr "레코드 내보내기가 시작됨, 백업: %s." - -#, python-format -msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." -msgstr "lun_id %(lun_id)s에서 lun %(vol_id)s을(를) 내보냈습니다." - -msgid "Extend volume completed successfully." -msgstr "볼륨 확장이 성공적으로 완료되었습니다." - -msgid "Extend volume request issued successfully." -msgstr "볼륨 확장 요청이 성공적으로 실행되었습니다." - -#, python-format -msgid "" -"Extend volume: %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." -msgstr "볼륨 확장 %(volumename)s, oldsize: %(oldsize)s, newsize: %(newsize)s." - -#, python-format -msgid "Extending volume %s." -msgstr "볼륨 %s 확장." - -#, python-format -msgid "Extending volume: %(id)s New size: %(size)s GB" -msgstr "볼륨 확장: %(id)s 새 크기: %(size)sGB" - -#, python-format -msgid "" -"FAST: capacity stats for policy %(fastPolicyName)s on array %(arrayName)s. " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"FAST: 배열 %(arrayName)s.의 정책 %(fastPolicyName)s에 대한 용량 통계 " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." - -msgid "Failed over to replication target successfully." -msgstr "복제 대상으로 장애 복구되었습니다." - -#, python-format -msgid "Failed to create host: %(name)s. Check if it exists on the array." -msgstr "호스트 작성 실패: %(name)s. 배열에 있는지 확인하십시오." - -#, python-format -msgid "" -"Failed to create hostgroup: %(name)s. Please check if it exists on the array." -msgstr "hostgroup 작성 실패: %(name)s. 배열에 있는지 확인하십시오." - -#, python-format -msgid "Failed to open iet session list for %(vol_id)s: %(e)s" -msgstr "%(vol_id)s의 iet 세션 목록을 여는 데 실패: %(e)s" - -#, python-format -msgid "Failing backend to %s" -msgstr "%s의 백엔드 실패" - -#, python-format -msgid "Fault thrown: %s" -msgstr "처리된 결함: %s" - -#, python-format -msgid "Fetched vCenter server version: %s" -msgstr "가져온 vCenter 서버 버전: %s" - -#, python-format -msgid "Filtered targets for SAN is: %(targets)s" -msgstr "SAN의 필터링된 대상: %(targets)s" - -#, python-format -msgid "Filtered targets for SAN is: %s" -msgstr "SAN의 필터링된 대상: %s" - -#, python-format -msgid "Final filtered map for delete connection: %(i_t_map)s" -msgstr "연결 삭제를 위해 최종으로 필터링된 맵: %(i_t_map)s" - -#, python-format -msgid "Final filtered map for fabric: %(i_t_map)s" -msgstr "패브릭의 최종 필터링된 맵: %(i_t_map)s" - -#, python-format -msgid "Fixing previous mount %s which was not unmounted correctly." -msgstr "올바르게 마운트 해제되지 않은 이전 마운트 %s을(를) 수정합니다." - -#, python-format -msgid "Flash Cache policy set to %s" -msgstr "플래시 캐시 정책이 %s(으)로 설정됨" - -#, python-format -msgid "Flexvisor already unassigned volume %(id)s." -msgstr "Flexvisor에서 볼륨 %(id)s의 할당을 이미 해제했습니다." - -#, python-format -msgid "Flexvisor snapshot %(id)s not existed." -msgstr "Flexvisor 스냅샷 %(id)s이(가) 없습니다." - -#, python-format -msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(cgid)s에 추가했습니다." - -#, python-format -msgid "Flexvisor succeeded to clone volume %(id)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 복제하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s from snapshot." -msgstr "Flexvisor가 스냅샷에서 볼륨 %(id)s을(를) 작성하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 작성하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor succeeded to delete snapshot %(id)s." -msgstr "Flexvisor에서 스냅샷 %(id)s을(를) 삭제하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor succeeded to extend volume %(id)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 확장하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor가 그룹 %(cgid)s에서 볼륨 %(id)s을(를) 제거했습니다. " - -#, python-format -msgid "Flexvisor succeeded to unassign volume %(id)s." -msgstr "Flexvisor에서 볼륨 %(id)s의 할당을 해제하는 데 성공했습니다." - -#, python-format -msgid "Flexvisor volume %(id)s does not exist." -msgstr "Flexvisor 볼륨 %(id)s이(가) 없습니다." - -#, python-format -msgid "Folder %s does not exist, it was already deleted." -msgstr "폴더 %s이(가) 없습니다. 이미 삭제되었습니다." - -msgid "Force upload to image is disabled, Force option will be ignored." -msgstr "이미지에 강제 업로드는 사용하지 않습니다. 강제 적용 옵션이 무시됩니다." - -#, python-format -msgid "Found a temporary snapshot %(name)s" -msgstr "임시 스냅샷 %(name)s 발견" - -#, python-format -msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." -msgstr "백엔드의 사용 가능 용량: %(free)s, 총 용량: %(total)s." - -#, python-format -msgid "Friendly zone name after forming: %(zonename)s" -msgstr "구성 후의 선호 구역 이름: %(zonename)s" - -#, python-format -msgid "Generating transfer record for volume %s" -msgstr "볼륨 %s의 전송 레코드 생성" - -msgid "Get all snapshots completed successfully." -msgstr "모든 스냅샷 가져오기가 성공적으로 완료되었습니다." - -msgid "Get all volumes completed successfully." -msgstr "모든 볼륨 가져오기가 성공적으로 완료되었습니다." - -#, python-format -msgid "Get domain by name response: %s" -msgstr "이름별 도메인 가져오기 응답: %s" - -msgid "Get snapshot metadata completed successfully." -msgstr "스냅샷 메타데이터 가져오기가 성공적으로 완료되었습니다." - -msgid "Get snapshot metadata value not implemented." -msgstr "스냅샷 메타데이터 값 가져오기가 구현되지 않았습니다." - -#, python-format -msgid "Get the default ip: %s." -msgstr "기본 ip 가져오기: %s." - -msgid "Get volume admin metadata completed successfully." -msgstr "볼륨 관리 메타데이터 가져오기가 성공적으로 완료되었습니다." - -msgid "Get volume image-metadata completed successfully." -msgstr "볼륨 이미지 메타데이터 가져오기가 성공적으로 완료되었습니다." - -msgid "Get volume metadata completed successfully." -msgstr "볼륨 메타데이터 가져오기가 성공적으로 완료되었습니다." - -msgid "Getting getInitiatorGrpList" -msgstr "getInitiatorGrpList 가져오기" - -#, python-format -msgid "Getting volume information for vol_name=%s" -msgstr "vol_name=%s의 볼륨 정보 가져오기" - -#, python-format -msgid "Going to perform request again %s with valid token." -msgstr "올바른 토큰으로 %s 요청을 다시 수행합니다." - -#, python-format -msgid "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" -msgstr "HPE3PARCommon %(common_ver)s,hpe3parclient %(rest_ver)s" - -#, python-format -msgid "HPELeftHand API version %s" -msgstr "HPELeftHand API 버전 %s" - -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 예외 처리: %s" - -#, python-format -msgid "Hypermetro id: %(metro_id)s. Remote lun id: %(remote_lun_id)s." -msgstr "Hypermetro id: %(metro_id)s. 원격 lun id: %(remote_lun_id)s." - -#, python-format -msgid "Ignored LU creation error \"%s\" while ensuring export." -msgstr "내보내기를 확인하는 중에 LU 작성 오류 \"%s\"이(가) 무시되었습니다." - -#, python-format -msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export." -msgstr "" -"내보내기를 확인하는 동안 LUN 맵핑 항목 추가 오류 \"%s\"이(가) 무시되었습니다." - -#, python-format -msgid "Ignored target creation error \"%s\" while ensuring export." -msgstr "내보내기를 확인하는 중에 대상 작성 오류 \"%s\"이(가) 무시되었습니다." - -#, python-format -msgid "Ignored target group creation error \"%s\" while ensuring export." -msgstr "" -"내보내기를 확인하는 중에 대상 그룹 작성 오류 \"%s\"이(가) 무시되었습니다." - -#, python-format -msgid "" -"Ignored target group member addition error \"%s\" while ensuring export." -msgstr "" -"내보내기를 확인하는 중에 대상 그룹 멤버 추가 오류 \"%s\"이(가) 무시되었습니" -"다." - -#, python-format -msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." -msgstr "이미지 %(pool)s/%(image)s은(는) 스냅샷 %(snap)s에 종속됩니다." - -#, python-format -msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" -msgstr "이미지 %(image_id)s에 대한 이미지 복제에 실패. 메시지: %(msg)s" - -#, python-format -msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" -msgstr "%(mbps).2f MB/s에서 이미지 다운로드 %(sz).2f MB" - -#, python-format -msgid "Image will locally be converted to raw %s" -msgstr "로컬에서 이미지가 원시 %s(으)로 변환됨" - -#, python-format -msgid "Image-volume cache disabled for host %(host)s." -msgstr "호스트 %(host)s에 대해 이미지-볼륨 캐시가 사용되지 않습니다." - -#, python-format -msgid "Image-volume cache enabled for host %(host)s." -msgstr "호스트 %(host)s에 대해 이미지-볼륨 캐시가 사용되었습니다." - -#, python-format -msgid "Import record id %s metadata from driver finished." -msgstr "드라이버에서 레코드 id %s 메타데이터 가져오기가 완료되었습니다." - -#, python-format -msgid "Import record started, backup_url: %s." -msgstr "레코드 가져오기가 시작됨, backup_url: %s." - -#, python-format -msgid "Imported %(fail)s to %(guid)s." -msgstr "%(fail)s을(를) %(guid)s에 가져왔습니다." - -#, python-format -msgid "Initialize connection: %(volume)s." -msgstr "연결 초기화: %(volume)s." - -msgid "Initialize volume connection completed successfully." -msgstr "볼륨 연결 초기화가 성공적으로 완료되었습니다." - -#, python-format -msgid "Initialized driver %(name)s version: %(vers)s" -msgstr "초기화된 드라이버 %(name)s 버전: %(vers)s" - -#, python-format -msgid "" -"Initializing RPC dependent components of volume driver %(driver_name)s " -"(%(version)s)" -msgstr "" -"볼륨 드라이버 %(driver_name)s (%(version)s)의 RPC 종속 구성 요소 초기화" - -msgid "Initializing extension manager." -msgstr "확장기능 관리자를 초기화 중입니다. " - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." -msgstr "" -"개시자 이름 %(initiatorNames)s이(가) 배열 %(storageSystemName)s에 없습니다." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " -msgstr "" -"개시자 이름 %(initiatorNames)s이(가) 배열 %(storageSystemName)s에 없습니다." - -#, python-format -msgid "Initiator group name is %(grp)s for initiator %(iname)s" -msgstr "개시자 %(iname)s의 개시자 그룹 이름이 %(grp)s입니다." - -#, python-format -msgid "LUN %(id)s extended to %(size)s GB." -msgstr "LUN %(id)s이(가) %(size)sGB로 확장되었습니다." - -#, python-format -msgid "LUN with given ref %s need not be renamed during manage operation." -msgstr "" -"지정된 ref %s이(가) 있는 LUN은 관리 조작 중에 이름을 바꾸지 않아도 됩니다." - -#, python-format -msgid "" -"Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " -"%(name)s." -msgstr "" -"create_volume 종료: %(volumeName)s 리턴 코드: %(rc)lu 볼륨 dict: %(name)s." - -#, python-format -msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." -msgstr "delete_volume 종료: %(volumename)s 리턴 코드: %(rc)lu." - -#, python-format -msgid "Leaving initialize_connection: %s" -msgstr "initialize_connection 종료: %s" - -#, python-format -msgid "Loaded extension: %s" -msgstr "로드된 확장: %s" - -#, python-format -msgid "" -"Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" -"%(lv)s" -msgstr "" -"LVM 정보를 쿼리할 때 논리 볼륨을 찾을 수 없습니다. (vg_name=%(vg)s, lv_name=" -"%(lv)s" - -msgid "Manage existing volume completed successfully." -msgstr "기존 볼륨 관리가 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." -msgstr "" -"새 경로 %(path)s 및 uuid %(uuid)s(으)로 LUN의 관리 조작이 완료되었습니다." - -#, python-format -msgid "" -"Manage operation completed for volume with new label %(label)s and wwn " -"%(wwn)s." -msgstr "" -"새 레이블 %(label)s 및 wwn %(wwn)s의 볼륨에 대한 관리 조작이 완료되었습니다." - -#, python-format -msgid "Manage volume %s" -msgstr "볼륨 %s 관리" - -msgid "Manage volume request issued successfully." -msgstr "볼륨 요청 관리가 성공적으로 실행되었습니다." - -#, python-format -msgid "Masking view %(maskingViewName)s successfully deleted." -msgstr "마스킹 보기 %(maskingViewName)s이(가) 성공적으로 삭제되었습니다." - -#, python-format -msgid "Migrate Volume %(volume_id)s completed." -msgstr "볼륨 %(volume_id)s 마이그레이션이 완료되었습니다." - -msgid "Migrate volume completed successfully." -msgstr "볼륨 마이그레이션이 성공적으로 완료되었습니다." - -msgid "Migrate volume completion issued successfully." -msgstr "볼륨 마이그레이션 완료가 성공적으로 실행되었습니다." - -msgid "Migrate volume request issued successfully." -msgstr "볼륨 마이그레이션 요청이 성공적으로 실행되었습니다." - -#, python-format -msgid "Migrating using retype Volume: %(volume)s." -msgstr "볼륨: %(volume)s 재입력을 사용하여 마이그레이션." - -#, python-format -msgid "" -"Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." -msgstr "" -"%(volume_name)s snap_cpg를 %(old_snap_cpg)s에서 %(new_snap_cpg)s(으)로 수정." - -#, python-format -msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" -msgstr "%(old_cpg)s에서 %(new_cpg)s(으)로 %(volume_name)s userCPG 수정" - -#, python-format -msgid "Modifying %s comments." -msgstr "%s 주석 수정." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"PyWBEM 모듈이 설치되지 않았습니다. python-pywbem 패키지를 사용하여 PyWBEM을 " -"설치하십시오. " - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"모듈 PyWBEM이 설치되지 않았습니다. python-pywbem 패키지를 사용하여 PyWBEM을 " -"설치하십시오." - -#, python-format -msgid "Mounting volume: %s ..." -msgstr "볼륨 마운트: %s ..." - -#, python-format -msgid "Mounting volume: %s succeeded" -msgstr "볼륨 마운트: %s 성공" - -#, python-format -msgid "" -"NON-FAST: capacity stats for pool %(poolName)s on array %(arrayName)s " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." -msgstr "" -"NON-FAST: 배열 %(arrayName)s의 풀 %(poolName)s에 대한 용량 통계 " -"total_capacity_gb=%(total_capacity_gb)lu, free_capacity_gb=" -"%(free_capacity_gb)lu." - -msgid "Need to remove FC Zone, building initiator target map" -msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드" - -msgid "Need to remove FC Zone, building initiator target map." -msgstr "FC Zone을 제거해야 하며, 개시자 대상 맵 빌드." - -#, python-format -msgid "" -"NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " -"loaded." -msgstr "" -"제품군 %(storage_family)s 및 프로토콜 %(storage_protocol)s의 NetApp 드라이버" -"가 로드되었습니다." - -#, python-format -msgid "New Cinder secure environment indicator file created at path %s." -msgstr "새 Cinder 보안 환경 표시기 파일이 %s 경로에 작성되었습니다." - -#, python-format -msgid "" -"New size is equal to the real size from backend storage, no need to extend. " -"realsize: %(oldsize)s, newsize: %(newsize)s." -msgstr "" -"새 크기는 백엔드 스토리지의 실제 크기와 같으므로 확장하지 않아도 됩니다. " -"realsize: %(oldsize)s, newsize: %(newsize)s." - -#, python-format -msgid "New str info is: %s." -msgstr "새 str 정보: %s." - -#, python-format -msgid "No dpkg-query info found for %(pkg)s package." -msgstr "%(pkg)s 패키지의 dpkg-query 정보를 찾을 수 없습니다." - -#, python-format -msgid "No igroup found for initiator %s" -msgstr "%s 개시자의 igroup을 찾을 수 없음" - -#, python-format -msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" -msgstr "볼륨 id %(vol_id)s의 iscsi 대상이 없음: %(e)s" - -#, python-format -msgid "No need to extend volume %s as it is already the requested new size." -msgstr "볼륨 %s이(가) 이미 요청된 새 크기이므로 볼륨을 확장하지 않아도 됩니다." - -#, python-format -msgid "" -"No replication synchronization session found associated with source volume " -"%(source)s on %(storageSystem)s." -msgstr "" -"발견된 복제 동기화 세션이 %(storageSystem)s의 소스 볼륨 %(source)s과(와) 연관" -"되지 않았습니다." - -#, python-format -msgid "" -"No restore point found for backup='%(backup)s' of volume %(volume)s although " -"base image is found - forcing full copy." -msgstr "" -"기본 이미지가 있어도 볼륨 %(volume)s의 백업='%(backup)s'의 복원 지점을 찾을 " -"수 없음 - 전체 복사 시행." - -#, python-format -msgid "No rpm info found for %(pkg)s package." -msgstr "%(pkg)s 패키지의 rpm 정보를 찾을 수 없습니다." - -#, python-format -msgid "No targets to add or remove connection for initiator: %(init_wwn)s" -msgstr "개시자의 연결을 추가하거나 제거할 대상이 없음: %(init_wwn)s" - -#, python-format -msgid "No volume found for CG: %(cg)s." -msgstr "CG의 볼륨을 찾을 수 없음: %(cg)s." - -#, python-format -msgid "Non fatal cleanup error: %s." -msgstr "치명적이지 않은 정리 오류: %s." - -#, python-format -msgid "OpenStack OS Version Info: %(info)s" -msgstr "OpenStack OS 버전 정보: %(info)s" - -#, python-format -msgid "" -"Origin volume %s appears to be removed, try to remove it from backend if it " -"is there." -msgstr "" -"원래 볼륨 %s이(가) 제거된 것으로 보입니다. 볼륨이 백엔드에 있는 경우 제거하십" -"시오." - -#, python-format -msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" -msgstr "%(volume_id)s 볼륨을 %(backup_id)s 백업의 복원으로 겹쳐씀" - -#, python-format -msgid "Params for add volume request: %s." -msgstr "볼륨 요청 추가 매개 변수: %s." - -#, python-format -msgid "Performing post clone for %s" -msgstr "%s의 사후 복제 수행" - -#, python-format -msgid "Performing secure delete on volume: %s" -msgstr "%s 볼륨에서 보안 삭제 수행" - -#, python-format -msgid "Pool id is %s." -msgstr "풀 id는 %s입니다." - -#, python-format -msgid "Port group instance name is %(foundPortGroupInstanceName)s." -msgstr "포트 그룹 인스턴스 이름이 %(foundPortGroupInstanceName)s입니다." - -#, python-format -msgid "Post clone resize LUN %s" -msgstr "사후 복제 크기 조정 LUN %s" - -#, python-format -msgid "Prefer use target wwpn %(wwpn)s" -msgstr "대상 wwpn %(wwpn)s 사용 선호" - -#, python-format -msgid "Profile %s has been deleted." -msgstr "프로파일 %s이(가) 삭제되었습니다." - -#, python-format -msgid "Protection domain id: %(domain_id)s." -msgstr "보호 도메인 id: %(domain_id)s." - -#, python-format -msgid "Protection domain name: %(domain_name)s." -msgstr "보호 도메인 이름: %(domain_name)s." - -msgid "Proxy mode detected." -msgstr "프록시 모드가 발견되었습니다." - -#, python-format -msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" -msgstr "테이블=%(table)s에서 기간=%(age)d일보다 오래된 삭제 행 제거" - -#, python-format -msgid "QoS: %s." -msgstr "QoS: %s." - -#, python-format -msgid "Query capacity stats response: %s." -msgstr "용량 통계 쿼리 응답: %s." - -msgid "" -"RBD striping not supported - ignoring configuration settings for rbd striping" -msgstr "RBD 스트리핑이 지원되지 않음 - rbd 스트리핑의 구성 설정 무시" - -#, python-format -msgid "RBD volume %s not found, allowing delete operation to proceed." -msgstr "RBD 볼륨 %s을(를) 찾을 수 없으므로, 삭제 조작을 계속할 수 없습니다." - -#, python-format -msgid "" -"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " -"certificate: %(verify_cert)s." -msgstr "" -"REST 서버 IP: %(ip)s, 포트: %(port)s, 사용자 이름: %(user)s. 서버의 인증서 확" -"인: %(verify_cert)s." - -#, python-format -msgid "Re-using existing purity host %(host_name)r" -msgstr "기존 purity 호스트 %(host_name)r 재사용" - -msgid "Reconnected to coordination backend." -msgstr "조정 백엔드에 다시 연결되었습니다." - -msgid "Reconnecting to coordination backend." -msgstr "조정 백엔드에 다시 연결 중입니다." - -#, python-format -msgid "Registering image in cache %s" -msgstr "캐시 %s에 이미지 등록" - -#, python-format -msgid "Regular file: %s created." -msgstr "일반 파일: %s이(가) 작성되었습니다." - -#, python-format -msgid "" -"Relocating volume: %s to a different datastore due to insufficient disk " -"space on current datastore." -msgstr "" -"현재 데이터 저장소의 디스크 공간이 부족하므로 볼륨 %s을(를) 다른 데이터 저장" -"소로 재배치." - -#, python-format -msgid "Remote return FC info is: %s." -msgstr "원격 리턴 FC 정보: %s." - -msgid "Remove volume export completed successfully." -msgstr "볼륨 내보내기 제거가 성공적으로 완료되었습니다." - -#, python-format -msgid "Removed %s from cg." -msgstr "cg에서 %s이(가) 제거되었습니다." - -#, python-format -msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" -msgstr "개시자 그룹 이름 %(igrp)s의 볼륨=%(vol)s에서 ACL 제거" - -#, python-format -msgid "Removing iscsi_target for Volume ID: %s" -msgstr "볼륨 ID %s에 대한 iscsi_target 제거" - -#, python-format -msgid "Removing iscsi_target for volume: %s" -msgstr "%s 볼륨에 대한 iscsi_target 제거" - -#, python-format -msgid "Removing iscsi_target for: %s" -msgstr "%s에 대한 iscsi_target 제거" - -#, python-format -msgid "Removing iscsi_target: %s" -msgstr "iscsi_target 제거: %s" - -#, python-format -msgid "Removing non-active host: %(host)s from scheduler cache." -msgstr "스케줄러 캐시에서 비활성 호스트: %(host)s 제거." - -#, python-format -msgid "Removing volume %(v)s from consistency group %(cg)s." -msgstr "일관성 그룹 %(cg)s에서 볼륨 %(v)s 제거 " - -#, python-format -msgid "Removing volumes from cg %s." -msgstr "cg %s에서 볼륨 제거." - -#, python-format -msgid "Rename Volume %(volume_id)s completed." -msgstr "볼륨 %(volume_id)s 이름 변경이 완료되었습니다." - -#, python-format -msgid "Renaming %(id)s from %(current_name)s to %(new_name)s." -msgstr "%(id)s의 이름을 %(current_name)s에서 %(new_name)s(으)로 변경." - -#, python-format -msgid "Renaming backing VM: %(backing)s to %(new_name)s." -msgstr "백업 VM %(backing)s의 이름을 %(new_name)s(으)로 변경." - -#, python-format -msgid "Renaming existing snapshot %(ref_name)s to %(new_name)s" -msgstr "기본 스냅샷의 이름을 %(ref_name)s에서 %(new_name)s(으)로 변경" - -#, python-format -msgid "Renaming existing volume %(ref_name)s to %(new_name)s" -msgstr "기본 볼륨의 이름을 %(ref_name)s에서 %(new_name)s(으)로 변경" - -#, python-format -msgid "Replication %(vol)s to %(dest)s." -msgstr "%(vol)s을(를) %(dest)s에 복제." - -#, python-format -msgid "Replication created for %(volname)s to %(destsc)s" -msgstr "%(destsc)s에 %(volname)s에 대한 복제 작성" - -#, python-format -msgid "Replication is not configured on backend: %s." -msgstr "복제가 백엔드에 구성되지 않음: %s." - -#, python-format -msgid "Requested image %(id)s is not in raw format." -msgstr "요청된 이미지 %(id)s이(가) 원시 형식이 아닙니다." - -#, python-format -msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." -msgstr "요청된 통합 구성: %(storage_family)s 및 %(storage_protocol)s." - -msgid "Reserve volume completed successfully." -msgstr "볼륨 예약이 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." -msgstr "백업 상태 재설정이 시작됨, backup_id: %(backup_id)s, 상태: %(status)s." - -#, python-format -msgid "Resetting backup %s to available (was restoring)." -msgstr "백업 %s을(를) 사용 가능으로 재설정(복원)." - -#, python-format -msgid "Resetting backup %s to error (was creating)." -msgstr "백업 %s을(를) 오류로 재설정(작성)." - -msgid "Resetting cached RPC version pins." -msgstr "캐시된 RPC 버전 핀을 재설정하는 중입니다." - -#, python-format -msgid "" -"Resetting volume %(vol_id)s to previous status %(status)s (was backing-up)." -msgstr "볼륨 %(vol_id)s을(를) 이전 상태 %(status)s(으)로 재설정(백업)." - -#, python-format -msgid "Resizing LUN %s directly to new size." -msgstr "LUN %s의 크기를 직접 새 크기로 조정합니다." - -#, python-format -msgid "Resizing LUN %s using clone operation." -msgstr "복제 조작을 사용하여 LUN %s 크기 조정." - -#, python-format -msgid "Resizing file to %sG" -msgstr "파일의 크기를 %sG로 조정" - -#, python-format -msgid "Resizing file to %sG..." -msgstr "파일의 크기를 %sG(으)로 조정..." - -#, python-format -msgid "" -"Restore backup finished, backup %(backup_id)s restored to volume " -"%(volume_id)s." -msgstr "" -"백업 복원이 완료됨, 백업 %(backup_id)s이(가) 볼륨 %(volume_id)s(으)로 복원됩" -"니다." - -#, python-format -msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "백업 복원이 시작됨, 백업: %(backup_id)s 볼륨: %(volume_id)s." - -#, python-format -msgid "Restoring backup %(backup)s to volume %(volume)s." -msgstr "백업 %(backup)s을(를) 볼륨 %(volume)s(으)로 복원 중입니다." - -#, python-format -msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" -msgstr "%(backup_id)s 백업을 %(volume_id)s 볼륨으로 복원" - -msgid "Restoring iSCSI target from configuration file" -msgstr "구성 파일에서 iSCSI 대상 복원" - -msgid "Resume volume delete completed successfully." -msgstr "볼륨 삭제 재개가 성공적으로 완료되었습니다." - -#, python-format -msgid "Resuming delete on backup: %s." -msgstr "백업에서 삭제 재개: %s." - -#, python-format -msgid "Return FC info is: %s." -msgstr "리턴 FC 정보: %s." - -#, python-format -msgid "Returning random Port Group: %(portGroupName)s." -msgstr "임의의 포트 그룹 %(portGroupName)s(으)로 리턴합니다." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartcache from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) successfully." -msgstr "" -"LUN(id: %(lun_id)s) smartcache를 (name: %(old_name)s, id: %(old_id)s)에서 " -"(name: %(new_name)s, id: %(new_id)s)(으)로 다시 입력하는 데 성공했습니다." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartpartition from (name: %(old_name)s, id: " -"%(old_id)s) to (name: %(new_name)s, id: %(new_id)s) success." -msgstr "" -"LUN(id: %(lun_id)s) smartpartition을 (name: %(old_name)s, id: %(old_id)s)에" -"서 (name: %(new_name)s, id: %(new_id)s)(으)로 다시 입력하는 데 성공했습니다." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smartqos from %(old_qos_value)s to %(new_qos)s " -"success." -msgstr "" -"LUN(id: %(lun_id)s) smartqos를 %(old_qos_value)s에서 %(new_qos)s(으)로 다시 " -"입력하는 데 성공했습니다." - -#, python-format -msgid "" -"Retype LUN(id: %(lun_id)s) smarttier policy from %(old_policy)s to " -"%(new_policy)s success." -msgstr "" -"LUN(id: %(lun_id)s) smarttier 정책을 %(old_policy)s에서 %(new_policy)s(으)로 " -"다시 입력하는 데 성공했습니다." - -#, python-format -msgid "Retype Volume %(volume_id)s is completed." -msgstr "볼륨 %(volume_id)s 다시 입력이 완료되었습니다." - -#, python-format -msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." -msgstr "" -"볼륨 %(volume_id)s 다시 입력이 수행되었으며 풀 %(pool_id)s(으)로 마이그레이션" -"되었습니다." - -#, python-format -msgid "" -"Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " -"%(old_snap_cpg)s." -msgstr "" -"다시 입력에서 %(volume_name)s snap_cpg를 %(new_snap_cpg)s에서 다시 " -"%(old_snap_cpg)s(으)로 되돌립니다." - -msgid "Retype volume completed successfully." -msgstr "볼륨 다시 입력이 성공적으로 완료되었습니다." - -msgid "Retype volume request issued successfully." -msgstr "볼륨 다시 입력 요청이 성공적으로 실행되었습니다." - -msgid "Retype was to same Storage Profile." -msgstr "동일한 스토리지 프로파일에 다시 입력되었습니다." - -msgid "Roll detaching of volume completed successfully." -msgstr "볼륨 연결 해제가 성공적으로 롤링되었습니다." - -#, python-format -msgid "Running with vmemclient version: %s" -msgstr "vmemclient 버전으로 실행: %s" - -#, python-format -msgid "SC server created %s" -msgstr "SC 서버 작성 %s" - -#, python-format -msgid "" -"ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " -"image id: %(id)s." -msgstr "" -"ScaleIO copy_image_to_volume 볼륨: %(vol)s 이미지 서비스: %(service)s 이미지 " -"id: %(id)s." - -#, python-format -msgid "" -"ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " -"image meta: %(meta)s." -msgstr "" -"ScaleIO copy_volume_to_image 볼륨: %(vol)s 이미지 서비스: %(service)s 이미지 " -"메타: %(meta)s." - -#, python-format -msgid "" -"ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." -msgstr "" -"ScaleIO가 소스 볼륨 %(src)s에서 대상 볼륨 %(tgt)s(으)로 복제된 볼륨을 작성합" -"니다." - -#, python-format -msgid "" -"ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " -"%(volname)s." -msgstr "" -"스냅샷에서 ScaleIO가 볼륨 작성: 볼륨 %(volname)s의 스냅샷 %(snapname)s." - -msgid "ScaleIO delete snapshot." -msgstr "ScaleIO에서 스냅샷 삭제." - -#, python-format -msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." -msgstr "ScaleIO에서 볼륨 %(volname)s을(를) 크기 %(new_size)s(으)로 확장합니다." - -#, python-format -msgid "ScaleIO get domain id by name request: %s." -msgstr "ScaleIO 이름별 도메인 가져오기 요청: %s." - -#, python-format -msgid "ScaleIO get pool id by name request: %s." -msgstr "ScaleIO에서 이름별 풀 가져오기 요청: %s." - -#, python-format -msgid "ScaleIO get volume by id request: %s." -msgstr "ScaleIO의 id별 볼륨 가져오기 요청: %s." - -#, python-format -msgid "ScaleIO rename volume request: %s." -msgstr "ScaleIO 볼륨 이름 변경 요청: %s." - -msgid "ScaleIO snapshot group of volumes" -msgstr "볼륨의 ScaleIO 스냅샷 그룹" - -#, python-format -msgid "ScaleIO volume %(vol)s was renamed to %(new_name)s." -msgstr "ScaleIO 볼륨 %(vol)s의 이름이 %(new_name)s(으)로 변경되었습니다." - -#, python-format -msgid "" -"Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " -"from /etc/cinder.conf." -msgstr "" -"보조 ssh 호스트 키 파일 %(kwargs)s이(가) /etc/cinder.conf의 %(conf)s과(와) 함" -"께 로드됩니다." - -msgid "" -"Service not found for updating active_backend_id, assuming default for " -"driver init." -msgstr "" -"active_backend_id 업데이트를 위한 서비스를 찾을 수 없으므로, 드라이버 초기화 " -"기본값을 사용합니다." - -msgid "Session might have expired. Trying to relogin" -msgstr "세션이 만기되었을 수 있습니다. 다시 로그인을 시도합니다." - -msgid "Set backend status to frozen successfully." -msgstr "백엔드 상태가 동결로 설정되었습니다." - -#, python-format -msgid "Set newly managed Cinder volume name to %(name)s." -msgstr "새로 관리된 Cinder 볼륨 이름을 %(name)s(으)로 설정합니다." - -#, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "%(host)s 호스트를 %(state)s(으)로 설정 중입니다. " - -#, python-format -msgid "Setting snapshot %(snap)s to online_flag %(flag)s" -msgstr "스냅샷 %(snap)s을(를) online_flag %(flag)s(으)로 설정" - -#, python-format -msgid "Setting volume %(vol)s to online_flag %(flag)s" -msgstr "볼륨 %(vol)s을(를) online_flag %(flag)s(으)로 설정" - -#, python-format -msgid "" -"Skipping add target %(target_array)s to protection group %(pgname)s since " -"it's already added." -msgstr "" -"대상 %(target_array)s이(가) 이미 보호 그룹 %(pgname)s에 추가되었으므로 이 작" -"업을 건너뜁니다." - -#, python-format -msgid "" -"Skipping allow pgroup %(pgname)s on target array %(target_array)s since it " -"is already allowed." -msgstr "" -"대상 배열 %(target_array)s에서 pgroup %(pgname)s이(가) 이미 허용되었으므로, " -"이 작업을 건너뜁니다." - -#, python-format -msgid "Skipping deletion of volume %s as it does not exist." -msgstr "볼륨 %s이(가) 없으므로 삭제를 건너뜁니다." - -msgid "Skipping ensure_export. Found existing iSCSI target." -msgstr "ensure_export를 건너뜁니다. 기존 iSCSI 대상이 발견되었습니다." - -#, python-format -msgid "" -"Skipping image volume %(id)s because it is not accessible by current Tenant." -msgstr "" -"현재 테넌트에서 액세스할 수 없으므로 이미지 볼륨 %(id)s을(를) 건너뜁니다." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume: %s" -msgstr "" -"remove_export를 건너뜀. 현재 %s 볼륨에 대한 iscsi_target이 내보내지지 않았습" -"니다. " - -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" -msgstr "" -"remove_export를 건너뛰고 있습니다. 볼륨에 대해 프로비저닝된 iscsi_target이 없" -"음: %s" - -#, python-format -msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" -msgstr "Smb 공유 %(share)s 총 크기 %(size)s 할당된 총계 %(allocated)s" - -#, python-format -msgid "Snapshot %(disp)s '%(new)s' is now being managed." -msgstr "스냅샷 %(disp)s '%(new)s'을(를) 지금 관리 중입니다." - -#, python-format -msgid "" -"Snapshot %(disp)s '%(vol)s' is no longer managed. Snapshot renamed to " -"'%(new)s'." -msgstr "" -"스냅샷 %(disp)s '%(vol)s'이(가) 더 이상 관리되지 않습니다. 스냅샷의 이름이 " -"'%(new)s'(으)로 변경됩니다." - -#, python-format -msgid "" -"Snapshot %(folder)s@%(snapshot)s does not exist, it was already deleted." -msgstr "스냅샷 %(folder)s@%(snapshot)s이(가) 없습니다. 이미 삭제되었습니다." - -#, python-format -msgid "" -"Snapshot %(folder)s@%(snapshot)s has dependent clones, it will be deleted " -"later." -msgstr "" -"스냅샷 %(folder)s@%(snapshot)s에 종속 복제본이 있으므로 나중에 삭제됩니다." - -#, python-format -msgid "Snapshot %s created successfully." -msgstr "스냅샷 %s이(가) 성공적으로 작성되었습니다." - -#, python-format -msgid "Snapshot %s does not exist in backend." -msgstr "백엔드에 스냅샷 %s이(가) 없습니다." - -#, python-format -msgid "Snapshot %s does not exist, it seems it was already deleted." -msgstr "스냅샷 %s이(가) 없습니다. 이미 삭제된 것으로 보입니다." - -#, python-format -msgid "Snapshot %s does not exist, it was already deleted." -msgstr "스냅샷 %s이(가) 없습니다. 이미 삭제되었습니다." - -#, python-format -msgid "Snapshot %s has dependent clones, will be deleted later." -msgstr "스냅샷 %s에 종속 복제본이 있으므로 나중에 삭제됩니다." - -#, python-format -msgid "Snapshot %s not found" -msgstr "스냅샷 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Snapshot '%(ref)s' renamed to '%(new)s'." -msgstr "스냅샷 '%(ref)s'의 이름이 '%(new)s'(으)로 변경됩니다." - -msgid "Snapshot create request issued successfully." -msgstr "스냅샷 작성 요청이 성공적으로 실행되었습니다." - -#, python-format -msgid "" -"Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." -msgstr "" -"스냅샷 작성 %(cloneName)s이(가) 완료되었습니다. 소스 볼륨: %(sourceName)s." - -msgid "Snapshot delete request issued successfully." -msgstr "스냅샷 삭제 요청이 성공적으로 실행되었습니다." - -msgid "Snapshot force create request issued successfully." -msgstr "스냅샷 강제 작성 요청이 성공적으로 실행되었습니다." - -#, python-format -msgid "" -"Snapshot record for %s is not present, allowing snapshot_delete to proceed." -msgstr "%s의 스냅샷 레코드가 없으므로, snapshot_delete를 계속할 수 있습니다." - -msgid "Snapshot retrieved successfully." -msgstr "스냅샷이 성공적으로 검색되었습니다." - -#, python-format -msgid "Snapshot volume %(vol)s into snapshot %(id)s." -msgstr "볼륨 %(vol)s의 스냅샷을 스냅샷 %(id)s(으)로 작성합니다." - -#, python-format -msgid "Snapshot volume response: %s." -msgstr "스냅샷 볼륨 응답: %s." - -#, python-format -msgid "Snapshot: %(snapshot)s: not found on the array." -msgstr "스냅샷 %(snapshot)s이(가) 배열에 없음" - -#, python-format -msgid "Source Snapshot: %s" -msgstr "소스 스냅샷: %s" - -#, python-format -msgid "" -"Source and destination ZFSSA shares are the same. Do nothing. volume: %s" -msgstr "" -"소스 및 대상 ZFSSA 공유가 동일합니다. 아무 작업도 수행하지 않습니다. 볼륨: %s" - -#, python-format -msgid "Start to create cgsnapshot for consistency group: %(group_name)s" -msgstr "일관성 그룹의 cgsnapshot 작성 시작: %(group_name)s" - -#, python-format -msgid "Start to create consistency group: %(group_name)s id: %(id)s" -msgstr "일관성 그룹 작성 시작: %(group_name)s id: %(id)s" - -#, python-format -msgid "Start to delete consistency group: %(cg_name)s" -msgstr "일관성 그룹 삭제 시작: %(cg_name)s" - -#, python-format -msgid "Starting %(topic)s node (version %(version_string)s)" -msgstr "%(topic)s 노드(버전 %(version_string)s) 시작 중" - -#, python-format -msgid "Starting volume driver %(driver_name)s (%(version)s)" -msgstr "볼륨 드라이버 %(driver_name)s (%(version)s) 시작" - -#, python-format -msgid "Storage Group %(storageGroupName)s successfully deleted." -msgstr "스토리지 그룹 %(storageGroupName)s이(가) 성공적으로 삭제되었습니다." - -#, python-format -msgid "Storage group not associated with the policy. Exception is %s." -msgstr "스토리지 그룹이 정책과 연관되지 않았습니다. 예외는 %s입니다." - -#, python-format -msgid "" -"Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " -"%(pool_id)s." -msgstr "" -"스토리지 풀 이름: %(pools)s, 스토리지 풀 이름: %(pool)s, 풀 id: %(pool_id)s." - -#, python-format -msgid "Successful login by user %s" -msgstr "%s 사용자로 로그인 완료" - -#, python-format -msgid "Successfully added %(volumeName)s to %(sgGroupName)s." -msgstr "%(volumeName)s이(가) %(sgGroupName)s에 성공적으로 추가되었습니다." - -#, python-format -msgid "Successfully copied disk at: %(src)s to: %(dest)s." -msgstr " %(src)s의 디스크를 %(dest)s(으)로 복사했습니다." - -#, python-format -msgid "Successfully create volume %s" -msgstr "볼륨 %s을(를) 성공적으로 작성" - -#, python-format -msgid "" -"Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " -"[%(stack_vol)s]." -msgstr "" -"CloudByte 볼륨 [%(cb_vol)s] w.r.t OpenStack 볼륨 [%(stack_vol)s]이(가) 성공적" -"으로 작성되었습니다." - -#, python-format -msgid "Successfully created clone: %s." -msgstr "성공적으로 복제본이 작성됨: %s." - -#, python-format -msgid "" -"Successfully created snapshot: %(snap)s for volume backing: %(backing)s." -msgstr "볼륨 백업 %(backing)s의 스냅샷 %(snap)s이(가) 작성되었습니다." - -#, python-format -msgid "Successfully created snapshot: %s." -msgstr "성공적으로 스냅샷이 작성됨: %s." - -#, python-format -msgid "Successfully created volume backing: %s." -msgstr "성공적으로 볼륨 백업이 작성됨: %s" - -#, python-format -msgid "Successfully deleted %s." -msgstr "%s이(가) 성공적으로 삭제되었습니다." - -#, python-format -msgid "Successfully deleted file: %s." -msgstr "성공적으로 파일 삭제: %s." - -#, python-format -msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." -msgstr "백업 %(backing)s의 스냅샷 %(name)s이(가) 삭제되었습니다." - -#, python-format -msgid "Successfully deleted snapshot: %s" -msgstr "성공적으로 스냅샷 삭제: %s" - -#, python-format -msgid "Successfully deleted snapshot: %s." -msgstr "성공적으로 스냅샷 삭제: %s." - -#, python-format -msgid "" -"Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " -"OpenStack volume [%(stack_vol)s]." -msgstr "" -"OpenStack volume [%(stack_vol)s]에 해당하는 CloudByte의 볼륨 [%(cb_vol)s]이" -"(가) 성공적으로 삭제되었습니다." - -#, python-format -msgid "Successfully deleted volume: %s" -msgstr "성공적으로 볼륨 삭제: %s" - -#, python-format -msgid "Successfully extended virtual disk: %(path)s to %(size)s GB." -msgstr "가상 디스크 %(path)s을(를) %(size)sGB(으)로 확장했습니다." - -#, python-format -msgid "Successfully extended volume %(volume_id)s to size %(size)s." -msgstr "볼륨 %(volume_id)s을(를) 크기 %(size)s(으)로 확장하는 데 성공했습니다." - -#, python-format -msgid "Successfully extended volume: %(vol)s to size: %(size)s GB." -msgstr "볼륨 %(vol)s을(를) %(size)sGB 크기로 확장했습니다." - -#, python-format -msgid "Successfully got volume information for volume %s" -msgstr "볼륨 %s의 볼륨 정보를 성공적으로 가져옴" - -#, python-format -msgid "Successfully initialized connection with volume: %(volume_id)s." -msgstr "볼륨 %(volume_id)s과(와) 연결이 성공적으로 시작되었습니다." - -#, python-format -msgid "" -"Successfully initialized connection. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." -msgstr "" -"연결이 성공적으로 시작되었습니다. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." - -#, python-format -msgid "" -"Successfully moved volume backing: %(backing)s into the folder: %(fol)s." -msgstr "볼륨 백업 %(backing)s을(를) 폴더 %(fol)s(으)로 이동했습니다." - -#, python-format -msgid "" -"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " -"resource pool: %(rp)s." -msgstr "" -"볼륨 백업 %(backing)s을(를) 데이터 저장소 %(ds)s 및 자원 풀 %(rp)s(으)로 재배" -"치했습니다." - -msgid "Successfully retrieved InitiatorGrpList" -msgstr "성공적으로 InitiatorGrpList 검색" - -#, python-format -msgid "Successfully setup driver: %(driver)s for server: %(ip)s." -msgstr "%(ip)s 서버의 드라이버 %(driver)s을(를) 성공적으로 설정했습니다." - -#, python-format -msgid "Successfully setup replication for %s." -msgstr "%s의 설정이 성공적으로 복제되었습니다." - -#, python-format -msgid "Successfully terminated connection for volume: %(volume_id)s." -msgstr "볼륨 %(volume_id)s의 연결이 성공적으로 종료되었습니다." - -#, python-format -msgid "" -"Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " -"%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " -"%(storage_protocol)s." -msgstr "" -"볼륨 통계가 성공적으로 업데이트되었습니다. 백엔드: %(volume_backend_name)s, " -"공급자: %(vendor_name)s, 드라이버 버전: %(driver_version)s, 스토리지 프로토" -"콜: %(storage_protocol)s." - -#, python-format -msgid "" -"Successfully updated CloudByte volume [%(cb_vol)s] corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"OpenStack 볼륨 [%(ops_vol)s]에 해당하는 CloudByte 볼륨 [%(cb_vol)s]이(가) 성" -"공적으로 업데이트되었습니다." - -#, python-format -msgid "Switching volume %(vol)s to profile %(prof)s." -msgstr "볼륨 %(vol)s을(를) 프로파일 %(prof)s에 전환." - -#, python-format -msgid "System %(id)s has %(status)s status." -msgstr "시스템 %(id)s의 상태가 %(status)s입니다." - -#, python-format -msgid "" -"System with controller addresses [%s] is not registered with web service." -msgstr "제어기 주소가 [%s]인 시스템이 웹 서비스에 등록되지 않았습니다." - -#, python-format -msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." -msgstr "마스킹 보기 %(maskingView)s의 wwns: %(targetWwns)s." - -#, python-format -msgid "Terminate connection: %(volume)s." -msgstr "연결 종료: %(volume)s." - -msgid "Terminate volume connection completed successfully." -msgstr "볼륨 연결 종료가 성공적으로 완료되었습니다." - -msgid "Thawed backend successfully." -msgstr "백엔드의 동결이 성공적으로 해제되었습니다." - -msgid "" -"The NAS file operations will be run as non privileged user in secure mode. " -"Please ensure your libvirtd settings have been configured accordingly (see " -"section 'OpenStack' in the Quobyte Manual." -msgstr "" -"보안 모드에서 권한이 없는 사용자로 NAS 파일 조작이 실행됩니다. libvirtd 설정" -"이 적절하게 구성되었는지 확인하십시오(Quobyte 매뉴얼에서 'OpenStack' 섹션 참" -"조." - -#, python-format -msgid "The QoS sepcs is: %s." -msgstr "QoS 사양: %s." - -#, python-format -msgid "" -"The image was successfully converted, but image size is unavailable. src " -"%(src)s, dest %(dest)s. %(error)s" -msgstr "" -"이미지를 성공적으로 전환했지만 이미지 크기를 사용할 수 없습니다. src " -"%(src)s, dest %(dest)s. %(error)s" - -#, python-format -msgid "" -"The multi-attach E-Series host group '%(label)s' already exists with " -"clusterRef %(clusterRef)s" -msgstr "" -"clusterRef가 %(clusterRef)s인 다중 연결 E-Series 호스트 그룹 '%(label)s'이" -"(가) 이미 있음" - -#, python-format -msgid "The pool_name from extraSpecs is %(pool)s." -msgstr "extraSpecs의 pool_name은 %(pool)s입니다." - -#, python-format -msgid "The same hostid is: %s." -msgstr "동일한 hostid: %s." - -#, python-format -msgid "The storage group found is %(foundStorageGroupInstanceName)s." -msgstr "발견한 스토리지 그룹이 %(foundStorageGroupInstanceName)s입니다." - -#, python-format -msgid "The target instance device id is: %(deviceid)s." -msgstr "대상 인스턴스 장치 id: %(deviceid)s." - -#, python-format -msgid "" -"The volume belongs to more than one storage group. Returning storage group " -"%(sgName)s." -msgstr "" -"볼륨이 두 개 이상의 스토리지 그룹에 속합니다. 스토리지 그룹 %(sgName)s을(를) " -"리턴합니다." - -#, python-format -msgid "" -"There is no backing for the snapshotted volume: %(snap)s. Not creating any " -"backing for the volume: %(vol)s." -msgstr "" -"스냅샷이 작성된 볼륨 %(snap)s의 백업이 없습니다. 볼륨 %(vol)s의 백업을 작성하" -"지 않습니다." - -#, python-format -msgid "" -"There is no backing for the source volume: %(src)s. Not creating any backing " -"for volume: %(vol)s." -msgstr "" -"소스 볼륨 %(src)s의 백업이 없습니다. 볼륨 %(vol)s의 백업을 작성하지 않습니다." - -#, python-format -msgid "There is no backing for the volume: %s. Need to create one." -msgstr "볼륨 %s의 백업이 없습니다. 하나를 작성해야 합니다." - -#, python-format -msgid "There is no backing for volume: %s; no need to extend the virtual disk." -msgstr "볼륨 %s의 백업이 없으므로 가상 디스크를 확장하지 않아도 됩니다." - -#, python-format -msgid "There is no backing, and so there is no snapshot: %s." -msgstr "백업이 없으므로 스냅샷이 없음: %s." - -#, python-format -msgid "There is no backing, so will not create snapshot: %s." -msgstr "백업이 없으므로 스냅샷이 작성되지 않음: %s." - -#, python-format -msgid "" -"There is no snapshot point for the snapshotted volume: %(snap)s. Not " -"creating any backing for the volume: %(vol)s." -msgstr "" -"스냅샷이 작성된 볼륨 %(snap)s의 스냅샷 지점이 없습니다. 볼륨 %(vol)s의 백업" -"을 작성하지 않습니다." - -msgid "Token is invalid, going to re-login and get a new one." -msgstr "토큰이 올바르지 않으므로, 다시 로그인하여 새 토큰을 얻으십시오." - -msgid "Transfer volume completed successfully." -msgstr "볼륨 전송이 성공적으로 완료되었습니다." - -#, python-format -msgid "Tried to delete non-existent vdisk %s." -msgstr "존재하지 않는 vdisk %s을(를) 삭제하려고 했습니다." - -#, python-format -msgid "" -"Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " -"with delete." -msgstr "" -"스냅샷 %s을(를) 삭제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. " -"삭제를 계속합니다." - -#, python-format -msgid "" -"Tried to delete volume %s, but it was not found in the Datera cluster. " -"Continuing with delete." -msgstr "" -"볼륨 %s을(를) 삭제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. 삭" -"제를 계속합니다." - -#, python-format -msgid "" -"Tried to detach volume %s, but it was not found in the Datera cluster. " -"Continuing with detach." -msgstr "" -"볼륨 %s의 연결을 해제하려고 시도했지만 Datera 클러스터에서 찾을 수 없습니다. " -"연결 해제를 계속합니다." - -#, python-format -msgid "Trying to unmap volume from all sdcs before deletion: %s." -msgstr "삭제 전에 모든 sdcs에서 볼륨의 맵핑 해제 시도: %s." - -msgid "Unable to accept transfer for volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 전송을 승인할 수 없습니다." - -msgid "Unable to attach volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 연결할 수 없습니다." - -msgid "Unable to create the snapshot for volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 스냅샷을 작성할 수 없습니다." - -msgid "Unable to detach volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 연결을 해제할 수 없습니다." - -msgid "Unable to get Cinder internal context, will not use image-volume cache." -msgstr "" -"Cinder 내부 컨텍스트를 가져올 수 없음, 이미지-볼륨 캐시를 사용하지 않습니다." - -#, python-format -msgid "Unable to get remote copy information for volume %s" -msgstr "볼륨 %s의 원격 복사 정보를 가져올 수 없음" - -msgid "" -"Unable to initialize the connection for volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 볼륨의 연결을 초기화할 수 없습니다." - -#, python-format -msgid "Unable to serialize field '%s' - excluding from backup" -msgstr "필드 '%s'을(를) 직렬화할 수 없음 - 백업에서 제외" - -msgid "Unable to update volume, because it is in maintenance." -msgstr "볼륨이 유지보수 중이므로 업데이트할 수 없습니다." - -#, python-format -msgid "Unexporting lun %s." -msgstr "lun %s 내보내기 취소." - -#, python-format -msgid "Unmanage snapshot with id: %s" -msgstr "ID가 %s인 스냅샷 관리 취소" - -#, python-format -msgid "Unmanage volume %(volume_id)s completed." -msgstr "볼륨 %(volume_id)s 관리 취소가 완료되었습니다." - -#, python-format -msgid "Unmanage volume %s" -msgstr "볼륨 %s 관리 취소" - -#, python-format -msgid "Unmanage volume with id: %s" -msgstr "ID가 %s인 볼륨 관리 취소" - -#, python-format -msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." -msgstr "현재 경로가 %(path)s이고 uuid가 %(uuid)s인 관리 취소된 LUN." - -#, python-format -msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." -msgstr "현재 레이블 %(label)s 및 wwn %(wwn)s의 볼륨 관리가 취소되었습니다." - -#, python-format -msgid "Unmap volume: %(volume)s." -msgstr "볼륨 맵핑 해제: %(volume)s." - -msgid "Unreserve volume completed successfully." -msgstr "볼륨 예약 취소가 성공적으로 완료되었습니다." - -#, python-format -msgid "" -"Update Consistency Group: %(group)s. This adds and/or removes volumes from a " -"CG." -msgstr "" -"일관성 그룹 %(group)s을(를) 업데이트하십시오. 그러면 CG에서 볼륨을 추가/또" -"는 제거합니다." - -msgid "Update consistency group completed successfully." -msgstr "일관성 그룹 업데이트가 성공적으로 완료되었습니다." - -#, python-format -msgid "Update migrated volume %(new_volume)s completed." -msgstr "마이그레이션된 볼륨 %(new_volume)s의 업데이트가 완료되었습니다." - -msgid "Update readonly setting on volume completed successfully." -msgstr "볼륨에서 읽기 전용 설정을 업데이트하는 작업이 공적으로 완료되었습니다." - -msgid "Update snapshot metadata completed successfully." -msgstr "스냅샷 메타데이터 업데이트가 성공적으로 완료되었습니다." - -msgid "Update volume admin metadata completed successfully." -msgstr "볼륨 관리 메타데이터 업데이트가 성공적으로 완료되었습니다." - -msgid "Update volume metadata completed successfully." -msgstr "볼륨 메타데이터 업데이트가 성공적으로 완료되었습니다." - -#, python-format -msgid "Updated Consistency Group %s" -msgstr "일관성 그룹 %s이(가) 업데이트됨" - -#, python-format -msgid "" -"Updating consistency group %(id)s with name %(name)s description: " -"%(description)s add_volumes: %(add_volumes)s remove_volumes: " -"%(remove_volumes)s." -msgstr "" -"일관성 그룹 %(id)s을(를) 이름 %(name)s으로 업데이트. 설명: %(description)s " -"add_volumes: %(add_volumes)s remove_volumes: %(remove_volumes)s." - -#, python-format -msgid "Updating snapshot %(id)s with info %(dict)s" -msgstr "스냅샷 %(id)s을(를) 정보 %(dict)s(으)로 업데이트" - -#, python-format -msgid "Updating status for CG: %(id)s." -msgstr "CG의 상태 업데이트: %(id)s." - -#, python-format -msgid "Updating storage service catalog information for backend '%s'" -msgstr "백엔드 '%s'의 스토리지 서비스 카탈로그 정보 업데이트" - -msgid "Use ALUA when adding initiator to host." -msgstr "개시자를 호스트에 추가할 때 ALUA를 사용하십시오." - -msgid "Use CHAP when adding initiator to host." -msgstr "개시자를 호스트에 추가할 때 CHAP을 사용하십시오." - -#, python-format -msgid "" -"Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." -msgstr "" -"FC Zone 관리자 사용 %(zm_version)s, 드라이버 %(drv_name)s %(drv_version)s." - -#, python-format -msgid "Using FC lookup service %s." -msgstr "FC 검색 서비스 %s 사용." - -#, python-format -msgid "Using compute cluster(s): %s." -msgstr "컴퓨트 클러스터 사용: %s." - -#, python-format -msgid "Using existing initiator group name: %(igGroupName)s." -msgstr "기존 개시자 그룹 이름 사용: %(igGroupName)s." - -msgid "" -"Using extra_specs for defining QoS specs will be deprecated in the N release " -"of OpenStack. Please use QoS specs." -msgstr "" -"QoS 사양을 정의하는 데 extra_specs을 사용하는 기능은 OpenStack의 N 릴리스에" -"서 더 이상 사용되지 않습니다. QoS 사양을 사용하십시오." - -#, python-format -msgid "Using overridden vmware_host_version from config: %s" -msgstr "구성에서 겹쳐쓴 vmware_host_version 사용: %s" - -#, python-format -msgid "Using pool %(pool)s instead of %(cpg)s" -msgstr "%(cpg)s 대신 풀 %(pool)s 사용" - -msgid "VF context is changed in the session." -msgstr "VF 컨텍스트가 세션에서 변경되었습니다." - -#, python-format -msgid "Value with type=%s is not serializable" -msgstr "type=%s인 값은 직렬화되지 않음" - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is being retyped." -msgstr "가상 볼륨 %(disp)s '%(new)s'을(를) 다시 입력 중입니다." - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is now being managed." -msgstr "가상 볼륨 %(disp)s '%(new)s'을(를) 지금 관리 중입니다." - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " -"%(cpg)s" -msgstr "" -"가상 볼륨 %(disp)s '%(new)s' snapCPG가 비어 있으므로, %(cpg)s(으)로 설정됩니" -"다." - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " -"'%(new)s'." -msgstr "" -"가상 볼륨 %(disp)s '%(vol)s'이(가) 더 이상 관리되지 않습니다. 볼륨의 이름이 " -"'%(new)s'(으)로 변경됩니다." - -#, python-format -msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." -msgstr "" -"가상 볼륨 %(disp)s이(가) 성공적으로 %(new_type)s(으)로 다시 입력되었습니다." - -#, python-format -msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." -msgstr "가상 볼륨 '%(ref)s'의 이름이 '%(new)s'(으)로 변경됩니다." - -#, python-format -msgid "Vol copy job completed for dest %s." -msgstr "대상 %s에 대한 볼륨 복사 작업이 완료되었습니다." - -#, python-format -msgid "Volume %(volume)s does not have meta device members." -msgstr "볼륨 %(volume)s에 메타 장치 멤버가 없습니다." - -#, python-format -msgid "" -"Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." -msgstr "" -"볼륨 %(volume)s이(가) 이미 맵핑되었습니다. 장치 번호는 %(deviceNumber)s입니" -"다." - -#, python-format -msgid "Volume %(volumeName)s not in any storage group." -msgstr "스토리지 그룹에 볼륨 %(volumeName)s이(가) 없습니다." - -#, python-format -msgid "" -"Volume %(volume_id)s: being created as %(create_type)s with specification: " -"%(volume_spec)s" -msgstr "" -"볼륨 %(volume_id)s: 사양이 %(volume_spec)s인 %(create_type)s(으)로 작성됨" - -#, python-format -msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" -msgstr "볼륨 %(volume_name)s (%(volume_id)s): 성공적으로 작성됨" - -#, python-format -msgid "Volume %s converted." -msgstr "볼륨 %s이(가) 전환되었습니다." - -#, python-format -msgid "Volume %s created" -msgstr "볼륨 %s이(가) 작성됨" - -#, python-format -msgid "Volume %s does not exist, it seems it was already deleted." -msgstr "볼륨 %s이(가) 없습니다. 이미 삭제된 것으로 보입니다." - -#, python-format -msgid "Volume %s has been transferred." -msgstr "볼륨 %s이(가) 전송되었습니다." - -#, python-format -msgid "Volume %s is mapping to multiple hosts." -msgstr "볼륨 %s이(가) 여러 호스트에 맵핑됩니다." - -#, python-format -msgid "Volume %s is not mapped. No volume to unmap." -msgstr "%s 볼륨이 맵핑되지 않았습니다. 맵핑 해제할 볼륨이 없습니다. " - -#, python-format -msgid "Volume %s presented." -msgstr "볼륨 %s이(가) 제공되었습니다." - -#, python-format -msgid "Volume %s retyped." -msgstr "볼륨 %s이(가) 다시 입력되었습니다." - -#, python-format -msgid "Volume %s unmanaged." -msgstr "볼륨 %s이(가) 관리 취소되었습니다." - -#, python-format -msgid "Volume %s will be deleted later." -msgstr "볼륨 %s이(가) 나중에 삭제됩니다." - -#, python-format -msgid "Volume %s: retyped successfully" -msgstr "볼륨 %s이(가) 성공적으로 다시 입력됨" - -#, python-format -msgid "Volume already mapped, retrieving %(ig)s, %(vol)s" -msgstr "볼륨이 이미 맵핑됨, %(ig)s, %(vol)s 검색" - -#, python-format -msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" -msgstr "%(mbps).2f MB/s에서 볼륨 복사 %(size_in_m).2f MB" - -#, python-format -msgid "Volume copy completed (%(size_in_m).2f MB at %(mbps).2f MB/s)." -msgstr " %(mbps).2f MB/s)에서 볼륨 복사 완료 (%(size_in_m).2f MB." - -msgid "Volume created successfully." -msgstr "볼륨이 성공적으로 작성되었습니다." - -msgid "Volume detach called, but volume not attached." -msgstr "볼륨 연결 해제가 호출되었지만, 볼륨이 연결되어 있지 않습니다." - -msgid "Volume info retrieved successfully." -msgstr "볼륨 정보가 성공적으로 검색되었습니다." - -#, python-format -msgid "Volume mappings for %(name)s: %(mappings)s" -msgstr "%(name)s의 볼륨 맵핑: %(mappings)s" - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s" -msgstr "%(tmp)s에서 %(orig)s(으)로 볼륨 이름 변경" - -#, python-format -msgid "Volume name changed from %(tmp)s to %(orig)s." -msgstr "%(tmp)s에서 %(orig)s(으)로 볼륨 이름이 변경되었습니다." - -msgid "Volume retrieved successfully." -msgstr "볼륨이 성공적으로 검색되었습니다." - -#, python-format -msgid "Volume service: %(label)s. Casted to: %(loc)s" -msgstr "볼륨 서비스: %(label)s. 캐스트: %(loc)s" - -#, python-format -msgid "Volume status is: %s." -msgstr "볼륨 통계: %s." - -#, python-format -msgid "Volume type is %s." -msgstr "볼륨 유형이 %s입니다." - -#, python-format -msgid "" -"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " -"id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " -"name: %(domain_name)s." -msgstr "" -"볼륨 유형: %(volume_type)s, 스토리지 풀 이름: %(pool_name)s, 스토리지 풀 id: " -"%(pool_id)s, 보호 도메인 id: %(domain_id)s, 보호 도메인 이름: " -"%(domain_name)s." - -msgid "Volume updated successfully." -msgstr "볼륨이 성공적으로 업데이트되었습니다." - -#, python-format -msgid "Volume with given ref %s need not be renamed during manage operation." -msgstr "" -"지정된 ref %s이(가) 있는 볼륨은 관리 조작 중에 이름을 바꾸지 않아도 됩니다." - -#, python-format -msgid "" -"Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " -"size: %(backup_size)d, continuing with restore." -msgstr "" -"볼륨: %(vol_id)s, 크기: %(vol_size)d이(가) 백업: %(backup_id)s, 크기: " -"%(backup_size)d보다 큽니다. 복원을 계속합니다." - -#, python-format -msgid "WWPN on node %(node)s: %(wwpn)s." -msgstr "노드 %(node)s의 WWPN: %(wwpn)s." - -#, python-format -msgid "" -"Waiting for volume expansion of %(vol)s to complete, current remaining " -"actions are %(action)s. ETA: %(eta)s mins." -msgstr "" -"%(vol)s의 볼륨 확장이 완료될 때까지 대기 중, 현재 나머지 작업은 %(action)s입" -"니다. ETA: %(eta)s분." - -msgid "Waiting for web service array communication." -msgstr "웹 서비스 배열 통신에 대기합니다." - -msgid "Waiting for web service to validate the configured password." -msgstr "웹 서비스에서 구성된 비밀번호를 검증하는 동안 대기합니다." - -#, python-format -msgid "Will clone a volume from the image volume %(id)s." -msgstr "이미지 볼륨 %(id)s에서 볼륨을 복제합니다." - -#, python-format -msgid "XtremIO SW version %s" -msgstr "XtremIO SW 버전 %s" - -#, python-format -msgid "ZFSSA version: %s" -msgstr "ZFSSA 버전: %s" - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation %s" -msgstr "I-T 모드에 구역이 있습니다. %s의 구역 작성을 건너뜁니다." - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation for %(zonename)s" -msgstr "I-T 모드에 구역이 있습니다. %(zonename)s의 구역 작성을 건너뜁니다." - -#, python-format -msgid "Zone map to add: %s" -msgstr "추가할 구역 맵: %s" - -msgid "" -"Zone name created using prefix because either host name or storage system is " -"none." -msgstr "" -"호스트 이름이나 스토리지 시스템이 없으므로 접두어를 사용하여 구역 이름이 작성" -"되었습니다." - -msgid "Zone name created using prefix because host name is none." -msgstr "호스트 이름이 없으므로 접두어를 사용하여 구역 이름이 작성되었습니다." - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "구역 지정 정책: %s, 인식되지 않음" - -#, python-format -msgid "Zoning policy for Fabric %(policy)s" -msgstr "패브릭 %(policy)s의 구역 지정 정책" - -#, python-format -msgid "Zoning policy for Fabric %s" -msgstr "패브릭 %s의 구역 지정 정책" - -#, python-format -msgid "Zoning policy for fabric %(policy)s" -msgstr "패브릭 %(policy)s의 구역 지정 정책" - -#, python-format -msgid "Zoning policy for fabric %s" -msgstr "패브릭 %s의 구역 지정 정책" - -msgid "Zoning policy is not valid, no zoning will be performed." -msgstr "구역 지정 정책이 올바르지 않음, 구역 지정이 수행되지 않습니다." - -#, python-format -msgid "" -"_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_check_volume_copy_ops:: 볼륨 %(vol)s에 지정된 vdisk 복사 조작이 없음: orig=" -"%(orig)s new=%(new)s." - -msgid "_delete_copysession, The copysession was already completed." -msgstr "_delete_copysession, copysession이 이미 완료되었습니다." - -#, python-format -msgid "" -"_delete_volume_setting, volumename:%(volumename)s, volume not found on " -"ETERNUS. " -msgstr "" -"_delete_volume_setting, volumename:%(volumename)s, ETERNUS에 볼륨이 없습니" -"다. " - -#, python-format -msgid "_get_tgt_ip_from_portgroup: Get ip: %s." -msgstr "_get_tgt_ip_from_portgroup: ip 가져오기: %s." - -#, python-format -msgid "_get_tgt_iqn: iSCSI target iqn is: %s." -msgstr "_get_tgt_iqn: iSCSI 대상 iqn: %s." - -#, python-format -msgid "_unmap_lun, volumename: %(volumename)s, volume is not mapped." -msgstr "_unmap_lun, volumename: %(volumename)s, 볼륨이 맵핑되지 않았습니다." - -#, python-format -msgid "_unmap_lun, volumename:%(volumename)s, volume not found." -msgstr "_unmap_lun, volumename:%(volumename)s, 볼륨을 찾을 수 없습니다." - -#, python-format -msgid "" -"add_host_with_check. create host success. host name: %(name)s, host id: " -"%(id)s" -msgstr "" -"add_host_with_check.호스트 작성 성공. 호스트 이름: %(name)s, 호스트 id: " -"%(id)s" - -#, python-format -msgid "add_host_with_check. host name: %(name)s, host id: %(id)s" -msgstr "add_host_with_check. 호스트 이름: %(name)s, 호스트 id: %(id)s" - -#, python-format -msgid "casted to %s" -msgstr "%s(으)로 캐스트" - -#, python-format -msgid "cgsnapshot %s: created successfully" -msgstr "cgsnapshot %s: 성공적으로 작성됨" - -#, python-format -msgid "cgsnapshot %s: deleted successfully" -msgstr "cgsnapshot %s: 성공적으로 삭제됨" - -#, python-format -msgid "cgsnapshot %s: deleting" -msgstr "cgsnapshot %s: 삭제" - -#, python-format -msgid "create_cloned_volume, info: %s, Exit method." -msgstr "create_cloned_volume, 정보: %s, 메소드를 종료합니다." - -#, python-format -msgid "" -"create_cloned_volume, target volume id: %(tid)s, source volume id: %(sid)s, " -"Enter method." -msgstr "" -"create_cloned_volume, 대상 볼륨 id: %(tid)s, 소스 볼륨 id: %(sid)s, 메소드를 " -"입력합니다." - -#, python-format -msgid "" -"create_hostgroup_with_check. Create hostgroup success. hostgroup name: " -"%(name)s, hostgroup id: %(id)s" -msgstr "" -"create_hostgroup_with_check. hostgroup 작성 성공. hostgroup 이름: %(name)s, " -"hostgroup id: %(id)s" - -#, python-format -msgid "" -"create_hostgroup_with_check. hostgroup name: %(name)s, hostgroup id: %(id)s" -msgstr "" -"create_hostgroup_with_check. hostgroup 이름: %(name)s, hostgroup id: %(id)s" - -#, python-format -msgid "create_snapshot, info: %s, Exit method." -msgstr "create_snapshot, 정보: %s, 메소드를 종료합니다." - -#, python-format -msgid "create_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." -msgstr "" -"create_snapshot, 스냅 id: %(sid)s, 볼륨 id: %(vid)s, 메소드를 입력합니다." - -#, python-format -msgid "create_volume, info: %s, Exit method." -msgstr "create_volume, 정보: %s, 메소드를 종료합니다." - -#, python-format -msgid "create_volume, volume id: %s, Enter method." -msgstr "create_volume, 볼륨 id: %s, 메소드를 입력합니다." - -#, python-format -msgid "create_volume_from_snapshot, info: %s, Exit method." -msgstr "create_volume_from_snapshot, 정보: %s, 메소드를 종료합니다." - -#, python-format -msgid "" -"create_volume_from_snapshot, volume id: %(vid)s, snap id: %(sid)s, Enter " -"method." -msgstr "" -"create_volume_from_snapshot, 볼륨 id: %(vid)s, 스냅 id: %(sid)s, 메소드를 입" -"력합니다." - -#, python-format -msgid "" -"create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " -"%(tgt_lun_id)s, copy_name: %(copy_name)s." -msgstr "" -"create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, tgt_lun_id: " -"%(tgt_lun_id)s, copy_name: %(copy_name)s." - -#, python-format -msgid "delete_snapshot, delete: %s, Exit method." -msgstr "delete_snapshot, 삭제: %s, 메소드를 종료합니다." - -#, python-format -msgid "delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, Enter method." -msgstr "" -"delete_snapshot, 스냅 id: %(sid)s, 볼륨 id: %(vid)s, 메소드를 입력합니다." - -#, python-format -msgid "delete_volume, delete: %s, Exit method." -msgstr "delete_volume, 삭제: %s, 메소드를 종료합니다." - -#, python-format -msgid "delete_volume, volume id: %s, Enter method." -msgstr "delete_volume, 볼륨 id: %s, 메소드를 입력합니다." - -#, python-format -msgid "" -"do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " -"%(lun_id)s." -msgstr "" -"do_mapping, lun_group: %(lun_group)s, view_id: %(view_id)s, lun_id: " -"%(lun_id)s." - -#, python-format -msgid "extend_volume, used pool name: %s, Exit method." -msgstr "extend_volume, 사용된 풀 이름: %s, 메소드를 종료합니다." - -#, python-format -msgid "extend_volume, volume id: %s, Enter method." -msgstr "extend_volume, 볼륨 id: %s, 메소드를 입력합니다." - -#, python-format -msgid "igroup %(grp)s found for initiator %(iname)s" -msgstr "개시자 %(iname)s의 igroup %(grp)s을(를) 찾을 수 없음" - -#, python-format -msgid "initialize_connection success. Return data: %s." -msgstr "initialize_connection 성공: 데이터 리턴: %s." - -#, python-format -msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" -msgstr "initialize_connection 볼륨: %(volume)s, 커넥터: %(connector)s" - -#, python-format -msgid "initialize_connection, host lun id is: %s." -msgstr "initialize_connection, 호스트 lun id: %s." - -#, python-format -msgid "initialize_connection, info: %s, Exit method." -msgstr "initialize_connection, 정보: %s, 메소드를 종료합니다." - -#, python-format -msgid "initialize_connection, initiator: %(wwpns)s, LUN ID: %(lun_id)s." -msgstr "initialize_connection, 개시자: %(wwpns)s, LUN ID: %(lun_id)s." - -#, python-format -msgid "" -"initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " -"portgroup_id: %(portgroup_id)s." -msgstr "" -"initialize_connection, iscsi_iqn: %(iscsi_iqn)s, target_ip: %(target_ip)s, " -"portgroup_id: %(portgroup_id)s." - -#, python-format -msgid "initialize_connection, metadata is: %s." -msgstr "initialize_connection, 메타데이터: %s." - -#, python-format -msgid "" -"initialize_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " -"method." -msgstr "" -"initialize_connection, 볼륨 id: %(vid)s, 개시자: %(initiator)s, 메소드를 입력" -"합니다." - -#, python-format -msgid "" -"initialize_connection, volume: %(volume)s, target_lun: %(target_lun)s, " -"target_luns: %(target_luns)s, Volume is already mapped." -msgstr "" -"initialize_connection, 볼륨: %(volume)s, target_lun: %(target_lun)s, " -"target_luns: %(target_luns)s, 볼륨이 이미 맵핑되었습니다." - -#, python-format -msgid "" -"initialize_connection_fc, initiator: %(wwpns)s, volume name: %(volume)s." -msgstr "initialize_connection_fc, 개시자: %(wwpns)s, 볼륨 이름: %(volume)s." - -msgid "initiator has no password while using chap,adding it" -msgstr "chap을 사용하는 동안 개시자의 비밀번호가 없음, 추가" - -#, python-format -msgid "initiator name: %(initiator_name)s, LUN ID: %(lun_id)s." -msgstr "개시자 이름: %(initiator_name)s, LUN ID: %(lun_id)s." - -#, python-format -msgid "" -"manage_existing_snapshot: snapshot %(exist)s on volume %(volume)s has been " -"renamed to %(id)s and is now managed by Cinder." -msgstr "" -"manage_existing_snapshot: 볼륨 %(volume)s의 스냅샷 %(exist)s 이름이 " -"%(id)s(으)로 변경되었으며 이제 Cinder에서 관리됩니다." - -#, python-format -msgid "" -"migrate_volume_completion is cleaning up an error for volume %(vol1)s " -"(temporary volume %(vol2)s" -msgstr "" -"migrate_volume_completion이 볼륨 %(vol1)s의 오류 정리(임시 볼륨 %(vol2)s" - -#, python-format -msgid "new cloned volume: %s" -msgstr "새로 복제된 볼륨: %s" - -#, python-format -msgid "open_connection to %(ssn)s at %(ip)s" -msgstr "%(ip)s에서 %(ssn)s에 대한 open_connection" - -#, python-format -msgid "open_connection: Updating API version to %s" -msgstr "open_connection: API 버전을 %s(으)로 업데이트" - -#, python-format -msgid "replication failover secondary is %(ssn)s" -msgstr "복제 장애 복구 보조가 %(ssn)s임" - -#, python-format -msgid "setting volume %s to error_restoring (was restoring-backup)." -msgstr "볼륨 %s을(를) error_restoring(으)로 설정(복원-백업)." - -#, python-format -msgid "snapshot %s doesn't exist" -msgstr "%s 스냅샷이 없습니다." - -#, python-format -msgid "source volume for cloning: %s" -msgstr "복제할 소스 볼륨: %s" - -#, python-format -msgid "stop_snapshot: snapshot name: %(snapshot)s, volume name: %(volume)s." -msgstr "stop_snapshot: 스냅샷 이름: %(snapshot)s, 볼륨 이름: %(volume)s." - -#, python-format -msgid "terminate_connection volume: %(volume)s, connector: %(con)s" -msgstr "terminate_connection 볼륨: %(volume)s, 커넥터: %(con)s" - -#, python-format -msgid "terminate_connection, return data is: %s." -msgstr "terminate_connection, 리턴 데이터: %s." - -#, python-format -msgid "terminate_connection, unmap: %s, Exit method." -msgstr "terminate_connection, 맵핑 해제: %s, 메소드를 종료합니다." - -#, python-format -msgid "" -"terminate_connection, volume id: %(vid)s, initiator: %(initiator)s, Enter " -"method." -msgstr "" -"terminate_connection, 볼륨 id: %(vid)s, 개시자: %(initiator)s, 메소드를 입력" -"합니다." - -#, python-format -msgid "terminate_connection: initiator name: %(ini)s, LUN ID: %(lunid)s." -msgstr "terminate_connection: 개시자 이름: %(ini)s, LUN ID: %(lunid)s." - -#, python-format -msgid "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." -msgstr "terminate_connection: wwpns: %(wwns)s, LUN ID: %(lun_id)s." - -#, python-format -msgid "" -"terminate_connection_fc: volume name: %(volume)s, wwpns: %(wwns)s, lun_id: " -"%(lunid)s." -msgstr "" -"terminate_connection_fc: 볼륨 이름: %(volume)s, wwpns: %(wwns)s, lun_id: " -"%(lunid)s." - -#, python-format -msgid "tunevv failed because the volume '%s' has snapshots." -msgstr "볼륨 '%s'에 스냅샷이 있으므로 tunevv에 실패했습니다." - -#, python-format -msgid "username: %(username)s, verify_cert: %(verify)s." -msgstr "사용자 이름: %(username)s, verify_cert: %(verify)s." - -#, python-format -msgid "vol=%s" -msgstr "vol=%s" - -#, python-format -msgid "vol_name=%(name)s provider_location=%(loc)s" -msgstr "vol_name=%(name)s provider_location=%(loc)s" - -#, python-format -msgid "volume %(name)s extended to %(size)d." -msgstr "볼륨 %(name)s이(가) %(size)d(으)로 확장되었습니다." - -#, python-format -msgid "volume %s doesn't exist" -msgstr "%s 볼륨이 없습니다." - -#, python-format -msgid "volume %s no longer exists in backend" -msgstr "백엔드에 더 이상 볼륨 %s이(가) 없음" - -#, python-format -msgid "volume: %(volume)s, lun params: %(params)s." -msgstr "볼륨: %(volume)s, lun 매개 변수: %(params)s." - -msgid "volume_file does not support fileno() so skipping fsync()" -msgstr "volume_file에서 fileno()를 지원하지 않으므로 fsync()를 건너뜀" diff --git a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po b/cinder/locale/ko_KR/LC_MESSAGES/cinder.po deleted file mode 100644 index cfb8863db..000000000 --- a/cinder/locale/ko_KR/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,9430 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# bluejay , 2013 -# NaleeJang , 2013 -# Sungjin Kang , 2013 -# Yongbok Kim , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-25 06:57+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder 버전: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "현재 크기는 %d 입니다. " - -#, python-format -msgid " but size is now %d." -msgstr " 그러나 현재 크기는 %d입니다." - -msgid " or " -msgstr "또는" - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s이(가) 설정되지 않았습니다." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing은 호스트에 연결된 볼륨을 관리할 수 없습니다. 가져" -"오기 전에 이 볼륨과 기존 호스트의 연결을 끊으십시오. " - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"결과: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: 권한이 거부됩니다." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: 예상치 못한 CLI 출력과 함께 실패했습니다. \n" -"명령: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"상태 코드: %(_status)s\n" -"본문: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: NetworkPortal 작성: 다른 서비스가 ip %(ip)s의 포트 %(port)d을" -"(를) 사용하고 있지 않은지 확인하십시오. " - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: 백업 %(bck_id)s, 볼륨 %(vol_id)s에 실패했습니다. 백업 오브젝트가 예상" -"치 못한 모드에 있습니다. 이미지 또는 파일 백업이 지원되었습니다. 실제 모드는 " -"%(vol_mode)s입니다." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"%(service)s 서비스가 스토리지 어플라이언스 %(host)s에서 %(status)s이(가) 아님" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s은(는) %(max_value)d보다 작거나 같아야 함" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s은(는) >= %(min_value)d이어야 함. " - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"%(workers)d의 %(worker_name)s 값이 올바르지 않습니다. 이 값은 0보다 커야 합니" -"다." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "%s \"data\"가 결과에 없습니다." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s에 액세스할 수 없습니다. GPFS가 활성이고 파일 시스템이 마운트되었는지 확인" -"하십시오." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"블록이 포함되어 있지 않아서 복제 조작을 사용하여 %s 크기를 조정할 수 없습니" -"다." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"압축된 볼륨에 호스트되었으므로 복제 조작을 사용하여 %s의 크기를 조정할 수 없" -"음" - -#, python-format -msgid "%s configuration option is not set." -msgstr "%s 구성 옵션이 설정되지 않았습니다. " - -#, python-format -msgid "%s does not exist." -msgstr "%s이(가) 없습니다." - -#, python-format -msgid "%s is not a directory." -msgstr "%s이(가) 디렉토리가 아닙니다. " - -#, python-format -msgid "%s is not installed" -msgstr "%s이(가) 설치되지 않음" - -#, python-format -msgid "%s is not installed." -msgstr "%s이(가) 설치되어 있지 않습니다. " - -#, python-format -msgid "%s is not set" -msgstr "%s이(가) 설정되지 않았음" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s이(가) 설정되지 않았지만, 복제 장치가 유효하게 되려면 설정해야 합니다." - -#, python-format -msgid "%s is not set." -msgstr "%s이(가) 설정되지 않았습니다." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s은(는) 유효한 원시 또는 qcow2 이미지여야 합니다. " - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s은(는) 절대 경로여야 합니다. " - -#, python-format -msgid "%s must be an integer." -msgstr "%s은(는) 정수여야 합니다." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "cinder.conf에 %s이(가) 설정되지 않음" - -#, python-format -msgid "%s not set." -msgstr "%s이(가) 설정되지 않음. " - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s'이(가) 구성 파일의 flashsystem_connection_protocol에 대해 올바르지 " -"않습니다. 올바른 값은 %(enabled)s입니다." - -msgid "'active' must be present when writing snap_info." -msgstr "스냅샷 정보를 기록할 때 '활성'이 있어야 합니다. " - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id'를 지정해야 함" - -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info' 구문 분석에 실패했습니다. " - -msgid "'status' must be specified." -msgstr "'상태'가 지정되어야 합니다." - -msgid "'volume_id' must be specified" -msgstr "'volume_id'를 지정해야 함" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(명령: %(cmd)s) (리턴 코드: %(exit_code)s) (Stdout: %(stdout)s) (Stderr: " -"%(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "A LUN(HLUN)을 찾을 수 없습니다.(LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "동시에 모순된 요청이 수행되었습니다." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"사용 가능한 LUN(HLUN)을 찾을 수 없습니다. 다른 호스트 그룹을 추가하십시오." -"(LDEV: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "호스트 그룹을 추가할 수 없습니다.(포트: %(port)s, 이름e: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"호스트 그룹을 삭제할 수 없습니다.(포트: %(port)s, gid: %(gid)s, 이름: " -"%(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "호스트 그룹이 올바르지 않습니다.(호스트 그룹: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "쌍을 삭제할 수 없습니다.(P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"쌍을 작성할 수 없습니다. 쌍의 최대 수가 초과되었습니다.(복사 메소드: " -"%(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "매개변수가 올바르지 않습니다.(%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "매개변수값이 올바르지 않습니다.(%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "풀을 찾을 수 없습니다.(pool id: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "스냅샷 상태가 올바르지 않습니다.(상태: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "장애 복구하려면 올바른 보조 대상을 지정해야 합니다." - -msgid "A volume ID or share was not specified." -msgstr "볼륨 ID 또는 공유가 지정되지 않았습니다. " - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "볼륨 상태가 올바르지 않습니다.(상태: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s이(가) 실패했으며 오류 문자열은 %(err)s임" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"API 버전 문자열 %(version)s 형식이 올바르지 않습니다. 형식은 MajorNum." -"MinorNum 이어야 합니다." - -msgid "API key is missing for CloudByte driver." -msgstr "CloudByte 드라이버의 API 키가 누락되었습니다." - -#, python-format -msgid "API response: %(response)s" -msgstr "API 응답: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "API 응답: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "API 버전 %(version)s에서는 이 메소드를 지원하지 않습니다.." - -msgid "API version could not be determined." -msgstr "API 버전을 판별할 수 없습니다." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"할당량이 0(영)이 아닌 하위 프로젝트를 삭제하려고 합니다. 이는 수행해서는 안 " -"됩니다. " - -msgid "Access list not available for public volume types." -msgstr "액세스 목록을 공용 볼륨 유형에 사용할 수 없습니다. " - -msgid "Activate or deactivate QoS error." -msgstr "QoS 활성화 또는 비활성화 오류입니다. " - -msgid "Activate snapshot error." -msgstr "스냅샷 활성화 오류입니다. " - -msgid "Add FC port to host error." -msgstr "호스트에 FC 포트 추가 오류입니다. " - -msgid "Add fc initiator to array error." -msgstr "배열에 fc 개시자 추가 오류입니다. " - -msgid "Add initiator to array error." -msgstr "배열에 개시자 추가 오류입니다. " - -msgid "Add lun to cache error." -msgstr "캐시에 lun 추가 오류입니다. " - -msgid "Add lun to partition error." -msgstr "파티션에 lun 추가 오류입니다. " - -msgid "Add mapping view error." -msgstr "맵핑 보기 추가 오류입니다. " - -msgid "Add new host error." -msgstr "새 호스트 추가 오류입니다. " - -msgid "Add port to port group error." -msgstr "포트 그룹에 포트 추가 오류." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"관리될 모든 지정된 스토리지 풀이 존재하지 않습니다. 구성을 확인하십시오. 존재" -"하지 않는 풀: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "API 버전 요청은 VersionedMethod 오브젝트와 비교해야 합니다." - -msgid "An error has occurred during backup operation" -msgstr "백업 조작 중에 오류가 발생함 " - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"LUNcopy 조작 중 오류가 발생했습니다. LUNcopy 이름: %(luncopyname)s. LUNcopy " -"상태: %(luncopystatus)s. LUNcopy 상태: %(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "볼륨 \"%s\"을(를) 읽는 중에 오류가 발생했습니다. " - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "볼륨 \"%s\"에 쓰는 중에 오류가 발생했습니다. " - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "iSCSI CHAP 사용자를 추가할 수 없습니다.(사용자 이름: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "iSCSI CHAP 사용자를 삭제할 수 없습니다.(사용자 이름: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"iSCSI 대상을 추가할 수 없습니다.(포트: %(port)s, 별명: %(alias)s, 이유: " -"%(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"iSCSI 대상을 삭제할 수 없습니다.(포트: %(port)s, tno: %(tno)s, 별명: " -"%(alias)s)" - -msgid "An unknown exception occurred." -msgstr "알 수 없는 예외가 발생했습니다. " - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"하위 프로젝트로 범위 지정된 토큰을 가진 사용자는 상위의 할당량을 볼 수 없습니" -"다. " - -msgid "Append port group description error." -msgstr "포트 그룹 설명 추가 오류." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"구역과 cfgs를 스위치에 적용하는 데 실패했습니다(오류 코드=%(err_code)s 오류 " -"메시지=%(err_msg)s." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "" -"배열이 존재하지 않거나 오프라인 상태입니다. 배열의 현재 상태는 %s입니다. " - -msgid "Associate host to hostgroup error." -msgstr "호스트 그룹에 호스트 연관 오류입니다. " - -msgid "Associate host to mapping view error." -msgstr "맵핑 보기에 호스트 연관 오류입니다. " - -msgid "Associate initiator to host error." -msgstr "호스트에 개시자 연관 오류입니다. " - -msgid "Associate lun to QoS error." -msgstr "lun을 QoS에 연결 오류." - -msgid "Associate lun to lungroup error." -msgstr "Lun 그룹에 lun 연관 오류입니다. " - -msgid "Associate lungroup to mapping view error." -msgstr "맵핑 보기에 lun 그룹 연관 오류입니다. " - -msgid "Associate portgroup to mapping view error." -msgstr "맵핑 보기에 포트 그룹 연관 오류입니다. " - -msgid "At least one valid iSCSI IP address must be set." -msgstr "최소한 하나의 올바른 iSCSI IP 주소를 설정해야 합니다. " - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "올바르지 않은 인증 키로 %s 전송을 시도했습니다. " - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "CloudByte 스토리지에서 인증 그룹 [%s] 세부사항을 찾을 수 없습니다. " - -msgid "Auth user details not found in CloudByte storage." -msgstr "CloudByte 스토리지에서 인증 사용자 세부사항을 찾을 수 없습니다. " - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "인증 실패, 스위치 자격 증명 확인, 오류 코드 %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "가용성 구역 '%(s_az)s'이(가) 올바르지 않습니다. " - -msgid "Available categories:" -msgstr "사용 가능한 카테고리:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"백엔드 QoS 스펙이 이 스토리지 제품군 및 ONTAP 버전에서 지원되지 않습니다. " - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "백엔드가 존재하지 않음(%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "백엔드 보고서: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "백엔드 보고서: 항목이 이미 존재함" - -msgid "Backend reports: item not found" -msgstr "백엔드 보고서: 항목을 찾을 수 없음" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "백엔드 서비스 재시도 제한시간 도달: %(timeout)s초" - -msgid "Backend storage did not configure fiber channel target." -msgstr "백엔드 스토리지가 파이버 채널 대상을 구성하지 않았습니다." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "사용 중인 볼륨을 백업하려면 강제 실행 플래그를 사용해야 합니다. " - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "%(backup_id)s 백업을 찾을 수 없습니다. " - -msgid "Backup RBD operation failed" -msgstr "백업 RBD 조작이 실패함 " - -msgid "Backup already exists in database." -msgstr "데이터베이스에 이미 백업이 있습니다. " - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "백업 드라이버 오류 보고서: %(message)s" - -msgid "Backup id required" -msgstr "백업 ID 필요" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "스냅샷이 있는 GlusterFS 볼륨에 대한 백업이 지원되지 않습니다." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "백업 파일 없는 SOFS 볼륨에 대해서만 백업이 지원됩니다." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "백업은 원시 형식화 GlusterFS 볼륨에 대해서만 지원됩니다." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "원시 형식화 SOFS 볼륨에 대해서만 백업이 지원됩니다." - -msgid "Backup operation of an encrypted volume failed." -msgstr "암호화된 볼륨의 백업 조작이 실패했습니다." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"백업 서비스 %(configured_service)s이(가) 확인을 지원하지 않습니다. 백업 ID " -"%(id)s이(가) 확인되지 않습니다. 확인을 건너뜁니다." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"백업 서비스 %(service)s이(가) 확인을 지원하지 않습니다. 백업 ID %(id)s이(가) " -"확인되지 않습니다. 재설정을 건너뜁니다." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "백업에 하나의 스냅샷만 있어야 하지만 대신 %s개가 있음" - -msgid "Backup status must be available" -msgstr "백업 상태가 사용 가능해야 함" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "백업 상태는 사용 가능해야 하며 %s이(가) 아니어야 합니다." - -msgid "Backup status must be available or error" -msgstr "백업 상태는 사용 가능 또는 오류여야 함" - -msgid "Backup to be restored has invalid size" -msgstr "복원할 백업이 올바르지 않은 크기임" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "잘못된 상태 표시줄이 리턴됨: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "할당량 세트의 잘못된 키: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "스토리지 볼륨 백엔드 API로부터 잘못되었거나 예상치 못한 응답: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "잘못된 프로젝트 형식: 프로젝트 형식이 올바르지 않음(%s)" - -msgid "Bad response from Datera API" -msgstr "Datera API의 잘못된 응답" - -msgid "Bad response from SolidFire API" -msgstr "SolidFire API의 잘못된 응답" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "XMS의 잘못된 응답, %s" - -msgid "Binary" -msgstr "2진" - -msgid "Blank components" -msgstr "비어 있는 구성요소" - -msgid "Blockbridge api host not configured" -msgstr "Blockbridge api 호스트가 구성되지 않음" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "Blockbridge가 올바르지 않은 인증 스킴 '%(auth_scheme)s'(으)로 구성됨" - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge 기본 풀이 존재하지 않음" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "Blockbridge 비밀번호가 구성되지 않음(인증 스킴 '비밀번호'의 경우 필수)" - -msgid "Blockbridge pools not configured" -msgstr "Blockbridge 풀이 구성되지 않음" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "Blockbridge 토큰이 구성되지 않음(인증 스킴 '토큰'의 경우 필수)" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "Blockbridge 사용자가 구성되지 않음(인증 스킴 '비밀번호'의 경우 필수)" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Brocade Fibre Channel Zoning CLI 오류: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Brocade Fibre Channel Zoning HTTP 오류: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "CHAP 본인확인정보는 12바이트 - 16바이트여야 합니다. " - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"CLI 실행 출력: \n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 실행 출력: \n" -" command: %(cmd)s\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E VDisk가 이미 호스트에 맵핑되어 있기 때문에 VDisk에서 호스트로의 맵" -"핑이 작성되지 않았습니다. \n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "CONCERTO 버전이 지원되지 않음" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "배열에 CPG(%s)가 없음" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "캐시 이름이 None입니다. 키에서 smartcache:cachename을 설정하십시오. " - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "캐시 볼륨 %s에 필수 특성이 없음" - -msgid "Call returned a None object" -msgstr "호출에서 None 오브젝트를 리턴함" - -msgid "Can not add FC port to host." -msgstr "호스트에 FC 포트를 추가할 수 없습니다." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "캐시 이름 %(name)s을(를) 사용하여 캐시 id를 찾을 수 없습니다. " - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "이름 %(name)s을(를) 사용하여 파티션 id를 찾을 수 없습니다. " - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "풀 정보를 가져올 수 없습니다. 풀: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "%s을(를) 정수로 변환할 수 없습니다. " - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "'scality_sofs_config'에 액세스할 수 없음: %s" - -msgid "Can't decode backup record." -msgstr "백업 레코드를 디코드할 수 없습니다. " - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "복제 볼륨을 확장할 수 없음, 볼륨: %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"배열에서 LUN을 찾을 수 없습니다. source-name 또는 source-id를 확인하십시오." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "배열에서 캐시 이름을 찾을 수 없음, 캐시 이름: %(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"배열에서 lun 정보를 찾을 수 없습니다. 볼륨: %(id)s, lun 이름: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"배열에서 파티션 이름을 찾을 수 없습니다. 파티션 이름은 %(name)s입니다. " - -#, python-format -msgid "Can't find service: %s" -msgstr "서비스를 찾을 수 없음: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"배열에서 스냅샷을 찾을 수 없습니다. source-name 또는 source-id를 확인하십시" -"오." - -msgid "Can't find the same host id from arrays." -msgstr "배열에서 동일한 호스트 id를 찾을 수 없습니다." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "스냅샷에서 볼륨 id를 가져올 수 없음, 스냅샷: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "볼륨 id를 가져올 수 없습니다. 볼륨 이름: %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"LUN %(lun_id)s을(를) Cinder에 가져올 수 없습니다. LUN 유형이 일치하지 않습니" -"다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 HyperMetroPair에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 복사 작업에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 그룹에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 LUN 미러에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 SplitMirror에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 마이그레이션 작업에 있습니다." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"LUN %s을(를) Cinder에 가져올 수 없습니다. 이미 원격 복제 작업에 있습니다." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "LUN %s을(를) Cinder에 가져올 수 없습니다. LUN 상태가 정상이 아닙니다." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷이 볼륨에 속하지 않습니다." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷이 개시자에 공개되어 있습" -"니다." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"스냅샷 %s을(를) Cinder에 가져올 수 없습니다. 스냅샷 상태가 정상이 아니거나 실" -"행 상태가 온라인이 아닙니다." - -msgid "Can't parse backup record." -msgstr "백업 레코드를 구문 분석할 수 없습니다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 그룹" -"에 볼륨 유형이 없습니다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"이미 일관성 그룹 %(orig_group)s에 있기 때문에 볼륨 %(volume_id)s을(를) 일관" -"성 그룹 %(group_id)s에 추가할 수 없습니다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨" -"을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨" -"이 없습니다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 볼륨 " -"상태 %(status)s이(가) 올바르지 않은 상태입니다. 올바른 상태는 %(valid)s입니" -"다. " - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"볼륨 %(volume_id)s을(를) 일관성 그룹 %(group_id)s에 추가할 수 없습니다. 해당 " -"그룹에서 볼륨 유형 %(volume_type)s을(를) 지원하지 않습니다. " - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"이미 연결된 볼륨 %s은(는) 연결할 수 없습니다. 다중 연결은 " -"'netapp_enable_multiattach' 구성 옵션을 통해 사용 안함으로 설정됩니다. " - -msgid "Cannot change VF context in the session." -msgstr "세션에서 VF 컨텍스트를 변경할 수 없습니다." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"VF 컨텍스트를 변경할 수 없음, 관리할 수 있는 VF 목록 %(vf_list)s에서 지정된 " -"VF를 사용할 수 없습니다." - -msgid "Cannot connect to ECOM server." -msgstr "ECOM 서버를 연결할 수 없습니다. " - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"스냅샷 %(snap)s이(가) 올바른 상태가 아니어서 일관성 그룹 %(group)s을(를)작성" -"할 수 없습니다. 올바른 상태는 %(valid)s입니다. " - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"소스 볼륨 %(source_vol)s이(가) 올바른 세부 상태에 있지 않으므로 일관성 그룹 " -"%(group)s을(를) 작성할 수 없습니다. 올바른 세부 상태는 %(valid)s입니다. " - -#, python-format -msgid "Cannot create directory %s." -msgstr "디렉토리 %s을(를) 작성할 수 없습니다. " - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "암호화 스펙을 작성할 수 없습니다. 볼륨 유형이 사용 중입니다." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"디스크 형식 %s의 이미지를 작성할 수 없습니다. vmdk 디스크 형식만 허용됩니다. " - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "마스킹 보기 %(maskingViewName)s을(를) 작성할 수 없습니다. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"'netapp_enable_multiattach'가 true로 설정되면 ESeries 배열에서 %(req)s개를 초" -"과하는 볼륨을 작성할 수 없습니다. " - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "이름이 %(sgGroupName)s인 스토리지 그룹을 작성하거나 찾을 수 없습니다. " - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "크기가 %s인 볼륨을 작성할 수 없습니다. 8GB의 배수가 아닙니다. " - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"이름이 %(name)s이고 스펙이 %(extra_specs)s인 volume_type을 작성할 수 없음" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "스냅샷이 존재하는 동안 LUN %s을(를) 삭제할 수 없습니다. " - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"캐시 볼륨: %(cachevol_name)s을(를) 삭제할 수 없습니다. %(updated_at)s에 업데" -"이트되었으며 현재 %(numclones)d개 볼륨 인스턴스를 가지고 있습니다. " - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"캐시 볼륨: %(cachevol_name)s을(를) 삭제할 수 없습니다. %(updated_at)s에 업데" -"이트되었으며 현재 %(numclones)s개 볼륨 인스턴스를 가지고 있습니다. " - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "암호화 스펙을 삭제할 수 없습니다. 볼륨 유형이 사용 중입니다." - -msgid "Cannot determine storage pool settings." -msgstr "스토리지 풀 설정을 판별할 수 없습니다." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "/sbin/mount.sofs를 실행할 수 없음" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "CG 그룹 %s을(를) 찾을 수 없습니다." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"스토리지 시스템 %(storage_system)s에 대한 제어기 구성 서비스를 찾을 수 없습니" -"다." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "스냅샷 %s에 대한 볼륨을 작성할 복제 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "스냅샷 %s을(를) 삭제할 복제 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "%s 시스템에서 복제 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "볼륨: %(id)s을(를) 찾을 수 없습니다. 관리 해제 조작. 종료 중..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "%(volumename)s 볼륨을 찾을 수 없습니다. 확장 조작. 종료 중..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "%(volumeName)s 볼륨에 대한 디바이스 번호를 찾을 수 없습니다. " - -msgid "Cannot find migration task." -msgstr "마이그레이션 태스크를 찾을 수 없습니다. " - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "시스템 %s에서 복제 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "소스 CG 인스턴스를 찾을 수 없습니다. consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "채널 id: %(channel_id)s을(를) 사용하여 mcs_id를 가져올 수 없습니다. " - -msgid "Cannot get necessary pool or storage system information." -msgstr "필요한 풀 또는 스토리지 시스템 정보를 가져올 수 없습니다. " - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"%(volumeName)s 볼륨의 스토리지 그룹 %(sgGroupName)s을(를) 가져오거나 작성할 " -"수 없음 " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "개시자 그룹 %(igGroupName)s을(를) 가져오거나 작성할 수 없습니다. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "포트 그룹 %(pgGroupName)s을(를) 가져올 수 없습니다. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"마스킹 보기에서 스토리지 그룹: %(sgGroupName)s을(를) 가져오지 못" -"함%(maskingViewInstanceName)s." - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"%(sps)s에 지원되는 크기 범위를 가져올 수 없음. 리턴 코드: %(rc)lu. 오류: " -"%(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 가져올 수 없습니다." - -msgid "Cannot get the portgroup from the masking view." -msgstr "마스킹 보기에서 portgroup을 가져올 수 없습니다." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "스케일 SOFS를 마운트할 수 없음. syslog에서 오류 확인" - -msgid "Cannot ping DRBDmanage backend" -msgstr "DRBDmanage 백엔드에 대해 ping을 실행할 수 없음" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "%(id)s 볼륨을 %(host)s에 배치할 수 없음 " - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"소스에서 일관성 그룹 %(name)s을(를) 작성하는 데 필요한 'cgsnapshot_id'와 " -"'source_cgid'를 모두 제공할 수 없습니다. " - -msgid "Cannot register resource" -msgstr "자원을 등록할 수 없습니다. " - -msgid "Cannot register resources" -msgstr "자원을 등록할 수 없습니다. " - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"일관성 그룹 %(group_id)s에서 볼륨 %(volume_id)s을(를) 제거할 수 없습니다. 해" -"당 볼륨이 그룹에 있지 않습니다. " - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"일관성 그룹 %(group_id)s에서 볼륨 %(volume_id)s을(를) 제거할 수 없습니다. 볼" -"륨 상태 %(status)s이(가) 올바르지 않은 상태입니다. 올바른 상태: %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "HPE3PARDriver에서 %s(으)로 재입력할 수 없습니다." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "하나의 3PAR 배열에서 다른 3PAR 배열로 재입력할 수 없습니다." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "다른 도메인의 CPG로 재입력할 수 없습니다." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "다른 도메인의 snap CPG로 재입력할 수 없습니다." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"vgc-cluster 명령을 실행할 수 없습니다. 소프트웨어가 설치되어 있으며 권한이 적" -"절하게 설정되어 있는지 확인하십시오. " - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "hitachi_serial_number 및 hitachi_unit_name 모두 설정할 수 없습니다." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "보호 도메인 이름과 보호 도메인 ID를 모두 지정할 수 없습니다. " - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "스토리지 풀 이름과 스토리지 풀 ID를 모두 지정할 수 없습니다. " - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"일관성 그룹 %(group_id)s을(를) 업데이트할 수 없습니다. 올바른 이름, 설명, " -"add_volumes 또는 remove_volumes를 제공하지 않았습니다. " - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "암호화 스펙을 업데이트할 수 없습니다. 볼륨 유형이 사용 중입니다." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "volume_type %(id)s을(를) 업데이트할 수 없음" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "오브젝트의 존재를 확인할 수 없음: %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "Cg 스냅샷 %(cgsnapshot_id)s을(를) 찾을 수 없습니다. " - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost가 비어 있습니다. 일관성 그룹이 작성되지 않습니다. " - -msgid "Change hostlun id error." -msgstr "hostlun id 변경 오류." - -msgid "Change lun priority error." -msgstr "Lun 우선순위 변경 오류입니다. " - -msgid "Change lun smarttier policy error." -msgstr "Lun smarttier 정책 변경 오류입니다. " - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "이 변경으로 인해 %(unders)s 자원의 사용량이 0보다 적게 됩니다. " - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "이 드라이버에 지정된 ZFS 공유에 대한 액세스 권한을 확인하십시오." - -msgid "Check hostgroup associate error." -msgstr "호스트 그룹 연관 확인 오류입니다. " - -msgid "Check initiator added to array error." -msgstr "배열에 추가된 개시자 확인 오류입니다. " - -msgid "Check initiator associated to host error." -msgstr "호스트에 연관된 개시자 확인 오류입니다. " - -msgid "Check lungroup associate error." -msgstr "Lun 그룹 연관 확인 오류입니다. " - -msgid "Check portgroup associate error." -msgstr "포트 그룹 연관 확인 오류입니다. " - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"http 서비스 상태를 확인하십시오. 또한 https 포트 번호가 cinder.conf에 지정된 " -"번호 중 하나인지도 확인하십시오." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "청크 크기가 해시 작성을 위한 블록 크기의 배수가 아닙니다. " - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Cisco Fibre Channel Zoning CLI 오류: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "%(storageSystem)s에 복제 기능 라이센스가 없습니다." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"복제 유형 '%(clone_type)s'이(가) 올바르지 않습니다. 올바른 값: " -"'%(full_clone)s' 및 '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"클러스터가 형식화되지 않았습니다. \"dog 클러스터 형식화\"를 수행해야 할 수 있" -"습니다. " - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Coho Data Cinder 드라이버 실패: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "Coho rpc 포트가 구성되지 않음" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "명령 %(cmd)s이(가) CLI에서 차단되어 취소됨" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: %s 제한시간" - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"압축 인에이블러가 설치되어 있지 않습니다. 압축된 볼륨을 작성할 수 없습니다." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "계산 클러스터: %(cluster)s을(를) 찾을 수 없습니다. " - -msgid "Condition has no field." -msgstr "조건에 필드가 없습니다." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"구성 'max_over_subscription_ratio'가 올바르지 않습니다. 0보다 커야 함: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "구성 오류: dell_sc_ssn이 설정되지 않았습니다." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "구성 파일 %(configurationFile)s이(가) 없습니다. " - -msgid "Configuration is not found." -msgstr "구성을 찾을 수 없습니다." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "구성 값 %s을(를) 설정하지 않았습니다." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"볼륨 유형 %s에서 QoS 스펙 충돌: QoS 스펙이 볼륨 유형과 연관된 경우에는 볼륨 " -"유형 추가 스펙에서 레거시 \"netapp:qos_policy_group\"이 허용되지 않습니다. " - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "glance 연결 실패: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Swift 연결 실패: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "커넥터가 제공되지 않음: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "커넥터에 필수 정보가 없음: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "일관성 그룹이 비어 있습니다. 작성된 cg 스냅샷이 없습니다." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "일관성 그룹 %(consistencygroup_id)s을(를) 찾을 수 없습니다. " - -msgid "Container" -msgstr "컨테이너" - -msgid "Container size smaller than required file size." -msgstr "컨테이너 크기가 필요한 파일 크기보다 작습니다." - -msgid "Content type not supported." -msgstr "컨텐츠 유형이 지원되지 않습니다." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 제어기 구성 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "제어기 IP '%(host)s'을(를) 분석할 수 없음: %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "%(f1)s(으)로 변환되었지만 이제 형식이 %(f2)s임" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "%(vol_format)s(으)로 변환되었지만 이제 형식이 %(file_format)s임" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "원시로 변환되었지만 형식은 지금 %s임" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "원시로 변환되었지만 형식은 현재 %s입니다." - -msgid "Coordinator uninitialized." -msgstr "조정자가 초기화되지 않습니다." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"볼륨 복사 태스크에 실패: convert_to_base_volume: id=%(id)s,설정 상태=" -"%(status)s을(를) 무시합니다. " - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"복사 볼륨 태스크에 실패: create_cloned_volume id=%(id)s, status=%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "%(src_type)s %(src_id)s에서 %(vol_id)s(으)로 메타데이터를 복사하는 중" - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"사용할 Keystone 엔드포인트를 판별하지 못했습니다. 서비스 카탈로그에서 또는 " -"cinder.conf 구성 옵션 'backup_swift_auth_url'을 사용하여 Swift 엔드포인트를 " -"설정할 수 있습니다." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"사용할 Swift 엔드포인트를 판별하지 못했습니다. 서비스 카탈로그에서 또는 " -"cinder.conf 구성 옵션 'backup_swift_url'을 사용하여 Swift 엔드포인트를 설정" -"할 수 있습니다." - -msgid "Could not find DISCO wsdl file." -msgstr "DISCO wsdl 파일을 찾을 수 없습니다." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "GPFS 클러스터 ID를 찾을 수 없음: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "GPFS 파일 시스템 디바이스를 찾을 수 없음: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "%(path)s에서 구성을 찾을 수 없음" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "%s 볼륨에 대한 iSCSI 내보내기를 찾을 수 없음" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "볼륨: %(volume_id)s에 대한 iSCSI 대상을 찾을 수 없습니다. " - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "명령 %(cmd)s의 출력에서 키를 찾을 수 없음: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "파라메터를 찾을수 없습니다: %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "대상 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "배열에서 스냅샷 '%s'의 상위 볼륨을 찾을 수 없습니다." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "볼륨 %(vol)s에서 고유 스냅샷 %(snap)s을(를) 찾을 수 없습니다." - -msgid "Could not get system name." -msgstr "시스템 이름을 가져올 수 없습니다. " - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "%(path)s에서 페이스트 앱 '%(name)s'을(를) 로드할 수 없음" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"스냅샷 %(name)s의 정보를 읽을 수 없습니다. 코드: %(code)s. 이유: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "구성을 파일 %(file_path)s에 복원할 수 없음: %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "구성을 %(file_path)s에 저장할 수 없음: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "일관성 그룹 스냅샷 %s을(를) 시작할 수 없습니다." - -#, python-format -msgid "Counter %s not found" -msgstr "카운터 %s을(를) 찾을 수 없음" - -msgid "Create QoS policy error." -msgstr "QoS 정책 작성 오류입니다. " - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"백업 작성 중단. 예상 백업 상태는 %(expected_status)s이지만 %(actual_status)s" -"인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"백업 작성 중단. 예상 볼륨 상태는 %(expected_status)s이지만 %(actual_status)s" -"인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -msgid "Create export for volume failed." -msgstr "볼륨에 대한 내보내기 작성에 실패했습니다. " - -msgid "Create hostgroup error." -msgstr "호스트 그룹 작성 오류입니다. " - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "hypermetro 작성 오류. %s." - -msgid "Create lun error." -msgstr "lun 작성 오류." - -msgid "Create lun migration error." -msgstr "Lun 마이그레이션 작성 오류입니다. " - -msgid "Create luncopy error." -msgstr "luncopy 작성 오류입니다. " - -msgid "Create lungroup error." -msgstr "Lun 그룹 작성 오류입니다. " - -msgid "Create manager volume flow failed." -msgstr "관리자 볼륨 플로우 작성에 실패했습니다. " - -msgid "Create port group error." -msgstr "포트 그룹 작성 오류." - -msgid "Create replication error." -msgstr "복제 작성 오류." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "복제 쌍 작성 실패. 오류: %s." - -msgid "Create snapshot error." -msgstr "스냅샷 작성 오류입니다. " - -#, python-format -msgid "Create volume error. Because %s." -msgstr "볼륨 작성 오류입니다. 원인은 %s입니다. " - -msgid "Create volume failed." -msgstr "볼륨 작성에 실패했습니다. " - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "소스에서 일관성 그룹을 생성하는 기능은 현재 지원되지 않습니다." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"구역 세트 작성 및 활성화에 실패: (구역 세트=%(cfg_name)s 오류=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "구역 세트 작성 및 활성화에 실패: (구역 세트=%(zoneset)s 오류=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "%(begin_period)s에서 %(end_period)s까지의 기간에 대한 사용 내역 작성" - -msgid "Current host isn't part of HGST domain." -msgstr "현재 호스트가 HGST 도메인의 일부가 아닙니다. " - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"현재 호스트가 유형이 %(type)s인 볼륨 %(id)s에 대해 올바르지 않음. 마이그레이" -"션이 허용되지 않음" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"볼륨 %(vol)s에 대해 현재 맵핑된 호스트가 %(group)s이(가) 포함된 지원되지 않" -"는 호스트 그룹에 있습니다. " - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"DRBDmanage 드라이버 오류: 예상 키 \"%s\"이(가) 응답하지 않음, 잘못된 " -"DRBDmanage 버전입니까?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage 드라이버 설정 오류: 일부 필수 라이브러리(dbus, drbdmanage.*)를 찾" -"을 수 없습니다. " - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage에서 하나의 자원(\"%(res)s\")을 예상했지만 %(n)d개를 얻음" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"스냅샷 복원 후에 새 볼륨을 기다리는 중에 DRBDmanage 제한시간이 초과됨, 리소" -"스 \"%(res)s\", 볼륨 \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"스냅샷이 생성될 때까지 기다리는 동안 DRBDmanage 제한시간이 초과됨, 자원 " -"\"%(res)s\", 스냅샷 \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"볼륨이 생성될 때까지 기다리는 동안 DRBDmanage 제한시간이 초과됨, 자원 " -"\"%(res)s\", 볼륨 \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"볼륨 크기를 기다리는 동안 DRBDmanage 제한시간이 초과됨, 볼륨 ID \"%(id)s" -"\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "Data ONTAP API 버전을 판별할 수 없습니다." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"7 모드에서 작동 중인 데이터 ONTAP이 QoS 정책 그룹을 지원하지 않습니다. " - -msgid "Database schema downgrade is not allowed." -msgstr "데이터베이스 스키마 다운그레이드가 허용되지 않습니다." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "데이터 세트 %s이(가) Nexenta Store appliance에서 공유되지 않음" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Nexenta SA에서 데이터 세트 그룹 %s을(를) 찾을 수 없음" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup는 올바른 프로비저닝 유형이지만 WSAPI 버전 '%(dedup_version)s' 버전 " -"'%(version)s'이(가) 설치되어 있어야 합니다. " - -msgid "Dedup luns cannot be extended" -msgstr "Dedup lun을 확장할 수 없음" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"%(res)s 자원에 대한 기본 할당량은 기본 할당량 플래그 quota_%(res)s에 의해 설" -"정됩니다. 이제 이 플래그는 더 이상 사용되지 않습니다. 기본 할당량에 기본 할당" -"량 클래스를 사용하십시오." - -msgid "Default volume type can not be found." -msgstr "기본 볼륨 유형을 찾을 수 없습니다." - -msgid "Delete LUNcopy error." -msgstr "LUNcopy 삭제 오류입니다. " - -msgid "Delete QoS policy error." -msgstr "QoS 정책 삭제 오류입니다. " - -msgid "Delete associated lun from lungroup error." -msgstr "Lun 그룹에서 연관된 lun 삭제 오류입니다. " - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"백업 삭제가 중단되었습니다. 현재 구성된 백업 서비스 [%(configured_service)s]" -"은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위해 사용된 백업 서비스가 " -"아닙니다." - -msgid "Delete consistency group failed." -msgstr "일관성 그룹 삭제에 실패했습니다. " - -msgid "Delete hostgroup error." -msgstr "호스트 그룹 삭제 오류입니다. " - -msgid "Delete hostgroup from mapping view error." -msgstr "맵핑 보기에서 호스트 그룹 삭제 오류입니다. " - -msgid "Delete lun error." -msgstr "LUN 삭제 오류입니다. " - -msgid "Delete lun migration error." -msgstr "Lun 마이그레이션 삭제 오류입니다. " - -msgid "Delete lungroup error." -msgstr "Lun 그룹 삭제 오류입니다. " - -msgid "Delete lungroup from mapping view error." -msgstr "맵핑 보기에에서 lun 그룹 삭제 오류입니다. " - -msgid "Delete mapping view error." -msgstr "맵핑 보기 삭제 오류입니다. " - -msgid "Delete port group error." -msgstr "포트 그룹 삭제 오류." - -msgid "Delete portgroup from mapping view error." -msgstr "맵핑 보기에서 포트 그룹 삭제 오류입니다. " - -msgid "Delete snapshot error." -msgstr "스냅샷 삭제 오류입니다. " - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "볼륨의 스냅샷 삭제가 다음 상태에서 지원되지 않음: %s" - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup 중단. 예상 백업 상태는 %(expected_status)s이지만 " -"%(actual_status)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -msgid "Deleting volume from database and skipping rpc." -msgstr "데이터베이스에서 볼륨을 삭제하고 rpc를 건너뜁니다." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "구역 삭제 실패: (명령=%(cmd)s 오류=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "일관성 그룹 지원을 위해서는 Dell API 2.1 이상이 필요함" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"직접 연결에서는 Dell Cinder 드라이버 구성 오류 복제가 지원되지 않습니다." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Dell Cinder 드라이버 구성 오류 replication_device %s을(를) 찾을 수 없음" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource 기능은 관리자만 사용가능" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "%(exp)s을(를) 예상했지만 대상에 migration_status %(stat)s이(가) 있음. " - -msgid "Destination volume not mid-migration." -msgstr "대상 볼륨이 마이그레이션에 포함되지 않음 " - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"볼륨 분리 실패: 둘 이상의 첨부가 있지만 attachment_id가 제공되지 않았습니다. " - -msgid "Detach volume from instance and then try again." -msgstr "인스턴스에서 볼륨을 분리하고 다시 시도하십시오." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "이름이 %(vol_name)s인 둘 이상의 볼륨을 발견했음" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "%(fun)s에서 예상 열을 찾을 수 없음: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "%(fun)s에서 예상 키 %(key)s을(를) 찾을 수 없음: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "문자가 올바르지 않거나 너무 긴 이유로 사용되지 않습니다. " - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "이름이 %s인 도메인을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"하위 레벨 GPFS 클러스터가 발견됨. GPFS 복제 기능을 클러스터 디먼 레벨 %(cur)s" -"에서 사용할 수 없음 - 최소 %(min)s 레벨이어야 합니다. " - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "드라이버 연결 초기화에 실패했습니다(오류: %(err)s). " - -msgid "Driver must implement initialize_connection" -msgstr "드라이버가 initialize_connection을 구현해야 함" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"드라이버가 가져온 백업 데이터를 디코드했지만 필드(%s)가 누락되었습니다. " - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E-series 프록시 API 버전 %(current_version)s이(가) SSC 추가 스펙의 전체 세트" -"를 지원하지 않습니다. 프록시 버전은 %(min_version)s 이상이어야 합니다. " - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"EMC VNX Cinder 드라이버 CLI 예외: %(cmd)s(리턴 코드: %(rc)s) (출력: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (리턴 코드: %(rc)s) (출" -"력: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword는 올바른 값을 가지" -"고 있어야 합니다. " - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"소스에서 일관성 그룹 %(name)s을(를) 작성하려면 'cgsnapshot_id' 또는 " -"'source_cgid'를 제공해야 합니다. " - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO: %(slo)s 또는 워크로드 %(workload)s이(가) 올바르지 않습니다. 올바른 값은" -"이전 오류문을 확인하십시오. " - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "hitachi_serial_number 또는 hitachi_unit_name이 필요합니다." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 요소 컴포지션 서비스를 찾을 수 없습니다. " - -msgid "Enables QoS." -msgstr "QoS를 사용으로 설정합니다. " - -msgid "Enables compression." -msgstr "압축을 사용으로 설정합니다. " - -msgid "Enables replication." -msgstr "복제를 사용으로 설정합니다. " - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "configfs가 /sys/kernel/config에 마운트되는지 확인하십시오." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"groupInitiatorGroup에 개시자 %(initiator)s을(를) 추가하는 중 오류 발생: " -"%(initiatorgroup)s. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s ." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"IQN이 %(iqn)s인 대상 그룹 %(targetgroup)s에 추가하는 중 오류 발생. 리턴 코" -"드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "볼륨 %(vol)s을(를) 연결하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"풀: %(pool)s의 볼륨: %(lun)s에서 스냅샷: %(snapshot)s 복제 중 오류 발생 프로" -"젝트: %(project)s 복제 프로젝트: %(clone_proj)s 리턴 코드: %(ret.status)d 메" -"시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"복제된 볼륨 작성 오류: %(cloneName)s 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"복제된 볼륨 작성 중 오류: 볼륨: %(cloneName)s 소스 볼륨: %(sourceName)s. 리" -"턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "그룹 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"마스킹 보기 작성 오류: %(groupName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "볼륨 작성 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "볼륨 작성 오류: %(volumename)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"CreateGroupReplica 오류: 소스: %(source)s 대상: %(target)s. 리턴 코드: " -"%(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"%(alias)s 별명에 %(initiator)s 개시자를 작성하는 중 오류 발생. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"%(pool)s 풀에 %(project)s 프로젝트를 작성하는 중 오류 발생. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"특성: %(property)s 유형: %(type)s 설명: %(description)s 작성 중 오류 발생, 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"공유 작성 중 오류 발생: %(name)s 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에 %(snapshot)s 스냅샷을 가져" -"오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"스냅샷 작성 오류: %(snapshot)s 공유: %(share)s 대상 풀: %(pool)s 프로젝트: " -"%(project)s 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"%(alias)s 대상 작성 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"IQN %(iqn)s(으)로 대상 그룹 %(targetgroup)s을(를) 작성하는 중 오류 발생. 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"%(lun)s 볼륨(크기: %(size)s)을 작성하는 중 오류 발생. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "새 컴포지트 볼륨 작성 중 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"대상: %(tgt)s 및 풀: %(tgt_pool)s에 대해 풀: %(pool)s 프로젝트: %(proj)s 볼" -"륨: %(vol)s에서 복제 조치 작성 중 오류 발생, 리턴 코드: %(ret.status)d 메시" -"지: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "확장 조작 시 언바운드 볼륨을 작성하는 중에 오류가 발생했습니다. " - -msgid "Error Creating unbound volume." -msgstr "언바운드 볼륨을 작성하는 중 오류가 발생했습니다." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "볼륨 삭제 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"그룹 삭제 중 오류: %(storageGroupName)s. 리턴 코드: %(rc)lu. 오류:%(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"개시자 그룹 삭제 오류: %(initiatorGroupName)s. 리턴 코드: %(rc)lu. 오류: " -"%(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"스냅샷: %(snapshot)s 삭제 오류, 공유: %(share)s, 대상 풀: %(pool)s 프로젝트: " -"%(project)s 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅샷을 삭" -"제하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"풀: %(pool)s에서 볼륨: %(lun)s 삭제 중 오류 발생, 프로젝트: %(project)s. 리" -"턴 코드: %(ret.status)d, 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"풀: %(pool)s에서 프로젝트: %(project)s을(를) 삭제하는 중 오류 발생, 리턴 코" -"드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"복제 조치: %(id)s 삭제 중 오류 발생, 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "볼륨 확장 오류: %(volumeName)s. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"개시자를 가져오는 중 오류 발생: 개시자 그룹: %(initiatorgroup)s 리턴 코드:" -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"풀 통계 가져오기 오류: 풀: %(pool)s 리턴 코드: %(status)d 메시지: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"프로젝트 통계를 가져오는 중 오류 발생: 풀: %(pool)s 프로젝트: %(project)s 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"공유 가져오기 중 오류 발생: %(share)s 풀: %(pool)s 프로젝트: %(project)s 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(snapshot)s 스냅샷을 가" -"져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"%(alias)s 대상을 가져오는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트에서 %(lun)s 볼륨을 가져오는 중 오류 발생. 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"하나의 풀에서 다른 풀로 볼륨을 마이그레이션하는 중 오류 발생. 리턴 코드: " -"%(rc)lu. 오류: %(error)s. " - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"마스킹 보기 수정 중 오류 발생: %(groupName)s. 리턴 코드: %(rc)lu. 오류: " -"%(error)s. " - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "풀 소유권 오류: %(host)s에서 풀 %(pool)s을(를) 소유하지 않습니다." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀 %(project)s 프로젝트의 %(lun)s 볼륨에서 %(props)s 특성을 가져오" -"는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "마이그레이션 세션 종료 중 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"%(iqn)s 개시자를 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"%(pool)s 풀을 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s 풀에서 %(project)s 프로젝트를 확인하는 중 오류 발생. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"서비스 %(service)s 확인 중 오류 발생. 리턴 코드: %(ret.status)d 메시지:" -"%(ret.data)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"%(alias)s 대상을 확인하는 중 오류 발생. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"공유 확인 중 오류 발생: %(share)s, 프로젝트: %(project)s, 풀: %(pool)s, 리턴 " -"코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"다음 인스턴스 경로를 사용하여 %(volumeName)s 볼륨을 추가하는 중에 오류가 발생" -"했습니다. %(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"그룹에 개시자 추가 중 오류 발생: %(groupName)s. 리턴 코드: %(rc)lu. 오류: " -"%(error)s. " - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "컴포지트 볼륨에 볼륨 추가 중 오류: %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"%(volumename)s 볼륨을 대상 기본 볼륨에 추가하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"%(storageGroupName)s 스토리지 그룹을 fast 정책 %(fastPolicyName)s과(와) 연관" -"시키는 중 오류 발생. 오류 설명: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"복제 관계 중단 오류: 동기화 이름: %(syncName)s 리턴 코드: %(rc)lu. 오류: " -"%(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "ceph 클러스터 연결 중에 오류가 발생했습니다. " - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "ssh를 통해 연결하는 중 오류 발생: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "볼륨 작성 중 오류 발생: %s." - -msgid "Error deleting replay profile." -msgstr "재생 프로파일을 삭제하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "%(ssn)s 볼륨: %(volume)s을(를) 삭제하는 중 오류 발생" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "볼륨 %(vol)s 삭제 중 오류 발생: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "평가자 구문 분석 중 오류 발생: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"공유 편집 중 오류 발생: %(share)s 풀: %(pool)s 리턴 코드: %(ret.status)d 메시" -"지: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"NetworkPortal에 대해 iSER을 사용으로 설정하는 중 오류 발생: ip %(ip)s의 " -"iSCSI 포트 %(port)d에서 RDMA가 지원되는지 확인하십시오. " - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "실패한 접속을 정리하는 중에 오류 발생: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "CloudByte API 실행 오류 [%(cmd)s], 오류: %(err)s." - -msgid "Error executing EQL command" -msgstr "EQL 명령 실행 중 오류 발생 " - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "ssh를 통해 명령 실행 중에 오류 발생: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "볼륨 %(vol)s 확장 중 오류 발생: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "볼륨 확장 중 오류 발생: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "%(name)s을(를) 찾는 중에 오류가 발생했습니다. " - -#, python-format -msgid "Error finding %s." -msgstr "%s을(를) 찾는 중에 오류가 발생했습니다. " - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"ReplicationSettingData 가져오기 오류. 리턴 코드: %(rc)lu. 오류: %(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"어플라이언스 버전 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "이름 %(name)s에서 도메인 ID를 가져오는 중에 오류 발생: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "이름 %(name)s에서 도메인 ID를 가져오는 중에 오류 발생: %(id)s." - -msgid "Error getting initiator groups." -msgstr "개시자 그룹을 가져오는 중 오류가 발생했습니다." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "이름 %(pool)s에서 풀 ID를 가져오는 중에 오류 발생: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "이름 %(pool_name)s에서 풀 ID를 가져오는 중에 오류 발생: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"복제 조치: %(id)s을(를) 가져오는 중에 오류가 발생했습니다. 리턴 코드: " -"%(ret.status)d 메시지: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"복제 소스 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: %(ret." -"status)d 메시지: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"복제 대상 세부사항을 가져오는 중에 오류가 발생했습니다. 리턴 코드: %(ret." -"status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"버전을 가져오는 중 오류 발생: svc: %(svc)s. 리턴 코드: %(ret.status)d 메시" -"지: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"CloudByte 스토리지의 볼륨 [%(cb_volume)s]에 대한 조작 [%(operation)s] 중 오" -"류 발생: [%(cb_error)s], 오류 코드: [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "SolidFire API 응답의 오류: 데이터=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "크기가 %(size)dGB인 %(space)s에 대한 공간 작성 시 오류 발생" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "추가로 %(size)dGB를 가진 볼륨 %(space)s에 대한 공간 확장 시 오류 발생" - -#, python-format -msgid "Error managing volume: %s." -msgstr "볼륨 관리 중 오류 발생: %s." - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"복제본 동기화 수정 오류: %(sv)s 조작: %(operation)s. 리턴 코드: %(rc)lu. 오" -"류: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"서비스 수정 중 오류 발생: %(service)s 리턴 코드: %(ret.status)d 메시지:" -"%(ret.data)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"볼륨: %(vol)s을(를) 소스 프로젝트: %(src)s에서 대상 프로젝트: %(tgt)s(으)로 " -"이동하는 중에 오류가 발생했습니다. 리턴 코드: %(ret.status)d 메시지: " -"%(ret.data)s." - -msgid "Error not a KeyError." -msgstr "KeyError가 아닌 오류입니다." - -msgid "Error not a TypeError." -msgstr "TypeError가 아닌 오류입니다." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "cg 스냅샷 %s을(를) 작성하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "cg 스냅샷 %s을(를) 삭제하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "일관성 그룹 %s을(를) 업데이트하는 중에 오류가 발생했습니다. " - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "볼륨 %(vol)s 이름 변경 중 오류 발생: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "오류 응답: %s" - -msgid "Error retrieving volume size" -msgstr "볼륨 크기 검색 시 오류 발생" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"조치 id: %(id)s에 대한 복제 업데이트를 전송하는 중에 오류가 발생했습니다. 리" -"턴 코드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"복제 업데이트를 전송하는 중에 오류가 발생했습니다. 리턴된 오류: %(err)s. 조" -"치: %(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"볼륨: %(vol)s 프로젝트 %(project)s에 대해 복제 상속을 %(set)s(으)로 설정하는 " -"중에 오류 발생, 리턴 코드: %(ret.status)d 메시지: %(ret.data)s ." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"소스: %(src)s에서 패키지: %(package)s을(를) 제공하는 중 오류 발생, 리턴 코" -"드: %(ret.status)d 메시지: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "풀에서 %(vol)s 볼륨 바인드 해제 중 오류: %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "스위치로 인증하는 동안 오류 발생: %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "VF 컨텍스트 %s을(를) 변경하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "펌웨어 버전 %s을(를) 확인하는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "트랜잭션 상태 검사 중에 오류 발생: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "VF가 관리 %s에 사용 가능한지 확인하는 중 오류가 발생했습니다." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"프로토콜이 %(protocol)s인 스위치 %(switch_id)s에 연결하는 중에 오류 발생. 오" -"류: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "인증 토큰을 작성하는 중에 오류 발생: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"스냅샷 [status] %(stat)s - [result] %(res)s을(를) 생성하는 중에 오류가 발생했" -"습니다." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"볼륨 [status] %(stat)s - [result] %(res)s을(를) 생성하는 중에 오류가 발생했습" -"니다." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"스냅샷 [status] %(stat)s - [result] %(res)s을(를) 삭제하는 중에 오류가 발생했" -"습니다." - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"볼륨 [status] %(stat)s - [result] %(res)s을(를) 삭제하는 중에 오류가 발생했습" -"니다." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "" -"볼륨 [status] %(stat)s - [result] %(res)s을(를) 확장하는 중에 오류가 발생했습" -"니다." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "%(op)s 세부 사항을 가져오는 중에 오류 발생, 리턴 코드: %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"ssh를 통해 데이터를 가져오는 동안 오류 발생: (명령=%(cmd)s 오류=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "disco 정보 [%s]을(를) 가져오는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "nvp 값을 가져오는 중 오류 발생: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "세션 정보 %s을(를) 가져오는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "데이터 구문 분석 중 오류 발생: %s." - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "" -"스위치에서 페이지 %(url)s을(를) 쿼리하는 중에 오류 발생, 이유 %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"구역 문자열에서 구역과 cfgs를 제거하는 중에 오류 발생: %(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "%(service)s API를 요청하는 중 오류가 발생했습니다." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "구역 지정 CLI 실행 중에 오류 발생: (명령=%(cmd)s 오류=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"구역 설정에서 새 구역과 cfgs를 업데이트하는 중에 오류가 발생했습니다. 오류 " -"%(description)s." - -msgid "Error writing field to database" -msgstr "데이터베이스에 필드 쓰기 오류" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "볼륨 id를 가져오는 동안 오류[%(stat)s - %(res)s]이(가) 발생했습니다." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"스냅샷 [%(snap_id)s]을(를) 볼륨 [%(vol)s]에 복원하는 동안 오류[%(stat)s - " -"%(res)s]이(가) 발생했습니다." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"볼륨 id를 가져오는 동안 오류[status] %(stat)s - [result] %(res)s]이(가) 발생" -"했습니다." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"%(volume_id)s 볼륨에 대한 최대 스케줄링 시도 횟수 %(max_attempts)d을(를) 초과" -"함" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "볼륨당 스냅샷 한계를 초과함" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"대상 볼륨 %(volumename)s에 메타 볼륨을 추가하는 중에 예외가 발생했습니다. " - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"요소 복제본을 작성하는 중에 예외가 발생했습니다. 복제본 이름: %(cloneName)s " -"소스 이름: %(sourceName)s 추가 스펙: %(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "_select_ds_for_volume의 예외: %s" - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "구역 문자열을 형성하는 중에 예외 발생: %s." - -#, python-format -msgid "Exception: %s" -msgstr "예외: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "uuid를 예상했지만 %(uuid)s을(를) 수신했습니다. " - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "\"%s\"(이)라는 정확히 하나의 노드만 필요" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"node_count에 대해 정수를 예상했지만 svcinfo lsiogrp가 리턴됨: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"CLI 명령 %(cmd)s에서 출력을 예상하지 않았는데 %(out)s이(가) 생성되었습니다. " - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"vdisk_UID에서 필터링할 때 lsvdisk에서 예상 단일 vdisk가 리턴되었습니다." -"%(count)s이(가) 리턴되었습니다. " - -#, python-format -msgid "Expected volume size was %d" -msgstr "예상된 볼륨 크기는 %d이지만" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"백업 내보내기가 중단되었습니다. 예상 백업 상태는 %(expected_status)s이지만 " -"%(actual_status)s인 동안에는 인스턴스 연관을 변경할 수 없습니다." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"레코드 내보내기가 중단되었습니다. 현재 구성된 백업 서비스 " -"[%(configured_service)s]은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위" -"해 사용된 백업 서비스가 아닙니다." - -msgid "Extend volume error." -msgstr "볼륨 확장 오류입니다. " - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "스냅샷이 없는 경우에만 이 드라이버에 대해 확장 볼륨이 지원됩니다. " - -msgid "Extend volume not implemented" -msgstr "볼륨 확장이 구현되지 않음" - -msgid "FAST is not supported on this array." -msgstr "이 배열에서는 FAST가 지원되지 않습니다." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC는 프로토콜이지만 OpenStack에서 wwpns를 제공하지 않습니다. " - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "%(volume)s을(를) 지정 취소하지 못함" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "캐시 볼륨 %(volume)s을(를) 작성하지 못했습니다. 오류: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "패브릭=%(fabric)s에 대한 연결 추가 실패: 오류:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "cg 스냅샷 실패" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "그룹의 스냅샷 작성 실패: %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "볼륨 %(volname)s에 대한 스냅샷 작성 실패: %(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "패브릭 %s에서 활성 구역 세트를 가져오는 데 실패했습니다." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "풀 %s에 대한 세부사항을 가져오지 못했습니다. " - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "패브릭=%(fabric)s에 대한 연결 제거 실패: 오류:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "볼륨 %(volname)s을(를) 확장하지 못했습니다. " - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "%(err)s(으)로 인해 3PAR(%(url)s)로의 로그인 실패" - -msgid "Failed to access active zoning configuration." -msgstr "활성 구역 지정 구성에 액세스하지 못했습니다. " - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "구역 세트 상태 액세스 실패: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"자원 잠금을 획득하는 데 실패했습니다.(serial: %(serial)s, inst: %(inst)s, " -"ret: %(ret)s, stderr: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "논리 디바이스를 추가하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"볼륨 %(volumeName)s을(를) 일관성 그룹 %(cgName)s에 추가하지 못했습니다. 리턴 " -"코드: %(rc)lu. 오류: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "구역 지정 구성을 추가하지 못했습니다. " - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"iSCSI 게시자 IQN을 지정하는 데 실패했습니다.(포트: %(port)s, 이유: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "qos_specs %(specs_id)s을(를) %(type_id)s 유형과 연관시키지 못했습니다." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "%(volume_id)s 볼륨에 대한 iSCSI 대상을 첨부하지 못했습니다. " - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "볼륨 메타데이터를 백업하지 못함 - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"볼륨 메타데이터를 백업하지 못함 - 메타데이터 백업 오브젝트 'backup.%s." -"meta'가 이미 존재함" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "스냅샷 %s에서 볼륨을 복제하지 못했습니다. " - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "%(vendor_name)s 배열 %(host)s에 연결 실패: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Dell REST API에 연결하는 데 실패" - -msgid "Failed to connect to array" -msgstr "배열에 연결 실패" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "sheep 디먼에 연결하지 못했습니다. 주소: %(addr)s, 포트: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "볼륨에 이미지를 복사할 수 없음: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "볼륨에 메타데이터를 복사하지 못함: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "볼륨 복사에 실패했습니다. 대상 디바이스를 사용할 수 없습니다. " - -msgid "Failed to copy volume, source device unavailable." -msgstr "볼륨 복사에 실패했습니다. 소스 디바이스를 사용할 수 없습니다. " - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "스냅샷 %(cgSnapshot)s에서 CG %(cgName)s을(를) 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create IG, %s" -msgstr "IG를 작성하지 못함, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "볼륨 그룹을 작성할 수 없음: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"파일을 작성하는 데 실패했습니다.(파일: %(file)s, ret: %(ret)s, stderr: " -"%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "볼륨 %s에 대한 임시 스냅샷을 작성하지 못했습니다. " - -msgid "Failed to create api volume flow." -msgstr "api 볼륨 플로우 작성에 실패했습니다. " - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "%(reason)s(으)로 인해 cg 스냅샷 %(id)s을(를) 작성할 수 없습니다." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "%(reason)s(으)로 인해 일관성 그룹 %(id)s을(를) 작성할 수 없습니다." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "일관성 그룹 %(id)s 작성 실패: %(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"일관성 그룹 %s을(를) 작성할 수 없습니다. VNX 일관성 그룹에서 압축된 LUN를 멤" -"버로 승인할 수 없습니다. " - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "일관성 그룹 작성 실패: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "일관성 그룹 작성 실패: %(cgid)s. 오류: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"일관성 그룹 작성 실패: %(consistencyGroupName)s 리턴 코드: %(rc)lu. 오류: " -"%(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 하드웨어 ID를 작성하지 못했습니다. " - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "호스트 작성 실패: %(name)s. 배열에 있는지 확인하십시오. " - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "호스트 그룹 작성 실패: %(name)s. 배열에 있는지 확인하십시오. " - -msgid "Failed to create iqn." -msgstr "IQN 작성에 실패했습니다. " - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "%(volume_id)s 볼륨에 대한 iscsi 대상을 작성하지 못했습니다. " - -msgid "Failed to create manage existing flow." -msgstr "기존 플로우 관리를 작성하지 못했습니다." - -msgid "Failed to create manage_existing flow." -msgstr "manage_existing 플로우를 작성하지 못했습니다." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "MCS에서 맵 작성에 실패했습니다. 맵핑할 수 있는 채널이 없습니다. " - -msgid "Failed to create map." -msgstr "맵 작성에 실패했습니다. " - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "볼륨에 대한 메타데이터를 작성하지 못함: %(reason)s" - -msgid "Failed to create partition." -msgstr "파티션 작성에 실패했습니다. " - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "" -"%(qos_specs)s 스펙을 가진 qos_specs %(name)s을(를) 작성하지 못했습니다. " - -msgid "Failed to create replica." -msgstr "복제본 작성에 실패했습니다. " - -msgid "Failed to create scheduler manager volume flow" -msgstr "스케줄러 관리자 볼륨 플로우 작성 실패" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "스냅샷 %s 작성 실패" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "cg의 스냅샷 작성 실패: %(cgName)s. " - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "볼륨 %s에 대한 스냅샷을 작성하지 못했습니다. " - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "볼륨 %(vol)s에서 스냅샷 정책을 작성하지 못함: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "볼륨 %(vol)s에서 스냅샷 자원 영역을 작성하지 못함: %(res)s." - -msgid "Failed to create snapshot." -msgstr "스냅샷 작성에 실패했습니다. " - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"스냅샷을 작성하지 못했습니다. OpenStack 볼륨 [%s]에 대한 CloudByte 볼륨 정보" -"를 찾을 수 없습니다." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "%s의 남쪽 방향 커텍터를 작성하지 못했습니다." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "스토리지 그룹 %(storageGroupName)s을(를) 생성하지 못했습니다." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "씬 풀을 작성하지 못함, 오류 메시지: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "%s 볼륨을 작성하지 못함 " - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"쌍이 있기 때문에 volume_id: %(volume_id)s에 대한 SI를 삭제하지 못했습니다. " - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"논리 디바이스를 삭제하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "%(reason)s(으)로 인해 cg 스냅샷 %(id)s을(를) 삭제할 수 없습니다." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "%(reason)s(으)로 인해 일관성 그룹 %(id)s을(를) 삭제할 수 없습니다." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "일관성 그룹 삭제 실패: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"일관성 그룹 삭제 실패: %(consistencyGroupName)s 리턴 코드: %(rc)lu. 오류: " -"%(error)s." - -msgid "Failed to delete device." -msgstr "장치 삭제에 실패했습니다. " - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "일관성 그룹 %(cgname)s에 대한 파일 세트 삭제 실패. 오류: %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "IQN 삭제에 실패했습니다. " - -msgid "Failed to delete map." -msgstr "맵 삭제에 실패했습니다. " - -msgid "Failed to delete partition." -msgstr "파티션 삭제에 실패했습니다. " - -msgid "Failed to delete replica." -msgstr "복제본 삭제에 실패했습니다. " - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "스냅샷 %s 삭제 실패" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "cg의 스냅샷 삭제 실패: %(cgId)s. " - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "쌍이 있기 때문에 snapshot_id: %s에 대한 스냅샷을 삭제하지 못했습니다. " - -msgid "Failed to delete snapshot." -msgstr "스냅샷 삭제에 실패했습니다. " - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "%(volumeName)s 볼륨을 삭제하는 데 실패했습니다. " - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"쌍이 있기 때문에 volume_id: %(volume_id)s에 대한 볼륨을 삭제하지 못했습니다. " - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "%(volume_id)s 볼륨에 대한 iSCSI 대상을 분리하지 못했습니다. " - -msgid "Failed to determine blockbridge API configuration" -msgstr "Blockbridge API 구성 판별 실패" - -msgid "Failed to disassociate qos specs." -msgstr "qos 스펙의 연관을 해제하지 못했습니다. " - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"qos_specs %(specs_id)s을(를) %(type_id)s 유형과 연관 해제시키지 못했습니다. " - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "스냅샷 자원 영역 보장 실패, id %s에 대한 볼륨을 찾을 수 없음" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Coho 클러스터와 연결하는 데 실패" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"CloudByte API 실행 실패 [%(cmd)s]. Http 상태: %(status)s, 오류: %(error)s. " - -msgid "Failed to execute common command." -msgstr "공통 명령을 실행하지 못했습니다. " - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "볼륨에 대한 내보내기가 실패함: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "볼륨 %(name)s을(를) 확장하는 데 실패했습니다. 오류 메시지: %(msg)s." - -msgid "Failed to find QoSnode" -msgstr "QoSnode을 찾는 데 실패" - -msgid "Failed to find Storage Center" -msgstr "Storage Center 찾기 실패" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "예상 풀에서 vdisk 사본을 찾는 데 실패했습니다." - -msgid "Failed to find account for volume." -msgstr "볼륨에 대한 계정을 찾지 못했습니다. " - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "경로 %(path)s에 대한 파일 세트를 찾지 못함, 명령 출력: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "%s(이)라는 그룹 스냅샷을 찾지 못했습니다." - -#, python-format -msgid "Failed to find host %s." -msgstr "호스트 %s을(를) 찾지 못했습니다. " - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "%(initiator)s을(를) 포함하는 iSCSI 개시자 그룹을 찾지 못했습니다." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "계정 [%s]의 CloudByte 계정 세부사항을 가져오지 못했습니다." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "LUN %s에 대한 LUN 대상 세부사항을 가져오지 못함" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "LUN %s에 대한 LUN 대상 세부사항을 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "LUN %s에 대한 LUN 대상 목록을 가져오지 못함" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "볼륨 %(volume_id)s에 대한 파티션 ID를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "스냅샷 %(snapshot_id)s에서 Raid 스냅샷 ID를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "스냅샷: %(snapshot_id)s에서 Raid 스냅샷 ID를 가져오지 못했습니다. " - -msgid "Failed to get SplitMirror." -msgstr "SplitMirror를 가져오지 못했습니다." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"스토리지 자원을 가져오는 데 실패했습니다. 시스템은 스토리지 자원을 다시 가져" -"오기 위해 시도합니다.(자원: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "qos 스펙 %s의 모든 연관을 가져오지 못함 " - -msgid "Failed to get channel info." -msgstr "채널 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "코드 레벨을 가져오는 데 실패했습니다(%s)." - -msgid "Failed to get device info." -msgstr "디바이스 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "배열에 CPG(%s)가 없기 때문에 도메인을 가져오지 못했습니다." - -msgid "Failed to get image snapshots." -msgstr "이미지 스냅샷을 가져오지 못했습니다." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"볼륨이 %(volume_id)s인 채널 %(channel_id)s의 ip를 가져오지 못했습니다. " - -msgid "Failed to get iqn info." -msgstr "IQN 정보를 가져오지 못했습니다. " - -msgid "Failed to get license info." -msgstr "라이센스 정보를 가져오지 못했습니다. " - -msgid "Failed to get lv info." -msgstr "lv 정보를 가져오지 못했습니다. " - -msgid "Failed to get map info." -msgstr "맵 정보를 가져오지 못했습니다. " - -msgid "Failed to get migration task." -msgstr "마이그레이션 태스크를 가져오지 못했습니다." - -msgid "Failed to get model update from clone" -msgstr "복제에서 모델 업데이트를 가져오지 못함" - -msgid "Failed to get name server info." -msgstr "이름 서버 정보를 가져오지 못했습니다. " - -msgid "Failed to get network info." -msgstr "네트워크 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "새 풀: %(pool_id)s에서 새 파트 id를 가져오지 못했습니다. " - -msgid "Failed to get partition info." -msgstr "파티션 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "볼륨이 %(volume_id)s인 풀 id를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"%(err)s(으)로 인해 %(volume)s의 원격 복사 정보를 가져오는 데 실패했습니다." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "%(volume)s의 원격 복사 정보를 가져오는 데 실패했습니다. 예외: %(err)s." - -msgid "Failed to get replica info." -msgstr "복제본 정보를 가져오지 못했습니다. " - -msgid "Failed to get show fcns database info." -msgstr "표시 fcns 데이터베이스 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "볼륨 %s의 크기 가져오기 실패" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "볼륨 %s에 대한 스냅샷을 가져오지 못했습니다. " - -msgid "Failed to get snapshot info." -msgstr "스냅샷 정보를 가져오지 못했습니다. " - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "LUN %s에 대한 대상 IQN을 가져오지 못함" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "SplitMirror의 대상 LUN을 가져오지 못했습니다." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "LUN %s에 대한 대상 포털을 가져오지 못함" - -msgid "Failed to get targets" -msgstr "대상을 가져오지 못함" - -msgid "Failed to get wwn info." -msgstr "WWN 정보를 가져오지 못했습니다. " - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"볼륨 %(volumeName)s을(를) 가져오거나 작성하거나 마스킹 보기 " -"%(maskingViewName)s에 추가하지 못함. 오류 메시지 %(errorMessage)s을(를) 수신" -"했습니다. " - -msgid "Failed to identify volume backend." -msgstr "볼륨 백엔드 식별 실패" - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "공유 %(cgname)s에 대한 파일 세트 링크 실패. 오류: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "%s 배열에 로그온하지 못했습니다(올바르지 않은 로그인?). " - -#, python-format -msgid "Failed to login for user %s." -msgstr "사용자 %s에 대한 로그인에 실패했습니다. " - -msgid "Failed to login with all rest URLs." -msgstr "모든 나머지 URL로 로그인하지 못했습니다. " - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "다음 이유로 Datera 클러스터 엔드포인트에 요청을 작성하지 못함: %s" - -msgid "Failed to manage api volume flow." -msgstr "API 볼륨 플로우를 관리하지 못했습니다. " - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"보고된 크기 %(size)s이(가) 부동 소수점 숫자가 아니기 때문에 기존 %(type)s " -"%(name)s을(를) 관리하지 못했습니다. " - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"볼륨 크기를 가져오는 중에 오류가 발생하여 기존 볼륨 %(name)s을(를) 관리하는 " -"데 실패했습니다. " - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"이름 바꾸기 조작이 실패하여 기존 볼륨 %(name)s을(를) 관리하는 데 실패했습니" -"다. 오류 메시지: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"보고된 크기 %(size)s이(가) 부동 소수점 숫자가 아니므로 기존 볼륨 %(name)s을" -"(를) 관리하지 못했습니다." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"I/O 그룹 불일치로 인해 기존 볼륨을 관리하는 데 실패했습니다. 관리할 볼륨의 I/" -"O 그룹은 %(vdisk_iogrp)s입니다 선택한 유형의 I/O 그룹은 %(opt_iogrp)s입니다." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"관리할 볼륨 풀이 백엔드 풀과 일치하지 않으므로 기존 볼륨을 관리하는 데 실패했" -"습니다. 관리할 볼륨의 풀은 %(vdisk_pool)s입니다. 백엔드의 풀은 " -"%(backend_pool)s입니다." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"관리할 볼륨은 압축이지만 선택한 볼륨은 압축이 아니므로 기존 볼륨을 관리하는 " -"데 실패했습니다." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"관리할 볼륨은 압축이 아니지만 선택한 볼륨은 압축이므로 기존 볼륨을 관리하는 " -"데 실패했습니다." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"관리할 볼륨이 올바른 I/O 그룹에 없으므로 기존 볼륨을 관리하는 데 실패했습니" -"다." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"관리할 볼륨은 thick이지만 선택한 볼륨은 thin이므로 기존 볼륨을 관리하는 데 실" -"패했습니다." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"관리할 볼륨은 thin이지만 선택한 볼륨은 think이므로 기존 볼륨을 관리하는 데 실" -"패했습니다." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "볼륨 %s을(를) 관리하지 못했습니다. " - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"논리 디바이스를 맵핑하는 데 실패했습니다.(LDEV: %(ldev)s, LUN: %(lun)s, 포" -"트: %(port)s, id: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "볼륨 마이그레이션 첫 번째 실패. " - -msgid "Failed to migrate volume for the second time." -msgstr "볼륨 마이그레이션 두 번째 실패. " - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "LUN 맵핑 이동에 실패했습니다. 리턴 코드: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "볼륨 %s을(를) 이동하지 못했습니다. " - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"파일을 여는 데 실패했습니다.(파일: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 출력 구문 분석 실패:\n" -" command: %(cmd)s\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"구성 옵션 'keystone_catalog_info'를 구문 분석하지 못함. :" -": 양식이어야 함" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"구성 옵션 'swift_catalog_info'를 구문 분석하지 못함. :" -": 양식이어야 함" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"0 페이지 교정을 수행하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: %(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "볼륨 %(volume)s의 내보내기 제거 실패: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "%(volume_id)s 볼륨에 대한 iscsi 대상을 제거하지 못했습니다. " - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"볼륨 %(volumeName)s을(를) 일관성 그룹 %(cgName)s에서 제거하지 못했습니다. 리" -"턴 코드: %(rc)lu. 오류: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "기본 SG에서 볼륨 %(volumeName)s을(를) 제거하지 못했습니다. " - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"기본 SG(%(volumeName)s)에서 %(volumeName)s 볼륨을 제거하지 못했습니다. " - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹에서 %(volumename)s 볼" -"륨을 제거하지 못했습니다." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "논리적 볼륨 %(name)s의 이름을 바꾸지 못함, 오류 메시지: %(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "활성 구역 지정 구성 %s을(를) 검색하지 못함" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "대상 IQN %(iqn)s의 CHAP 인증을 설정하지 못했습니다. 세부 사항: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"기존 볼륨 %(name)s에 대한 QoS 설정에 실패했습니다. 오류 메시지: %(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "SCST 대상에 대해 '수신 사용자' 속성을 설정하는 데 실패했습니다. " - -msgid "Failed to set partition." -msgstr "파티션 설정에 실패했습니다. " - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "일관성 그룹 %(cgname)s에 대한 권한 설정 실패. 오류: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"맵핑되지 않는 볼륨 %(volume_id)s에 대한 논리 디바이스를 지정하는 데 실패했습" -"니다." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"삭제할 논리 디바이스를 지정하는 데 실패했습니다.(메소드: %(method)s, id: " -"%(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "마이그레이션 세션을 종료하는 데 실패했습니다. " - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "볼륨 %(volume)s 바인드 해제 실패" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"일관성 그룹 %(cgname)s에 대한 파일 세트 링크 해제 실패. 오류: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"논리 디바이스를 맵핑 해제하는 데 실패했습니다.(LDEV: %(ldev)s, 이유: " -"%(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "일관성 그룹 업데이트 실패: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "볼륨에 대한 메타데이터를 업데이트하지 못함: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "구역 지정 구성 업데이트 또는 삭제에 실패" - -msgid "Failed to update or delete zoning configuration." -msgstr "구역 지정 구성 업데이트 또는 삭제에 실패했습니다." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"%(qos_specs)s 스펙을 가진 qos_specs %(specs_id)s을(를) 업데이트하지 못했습니" -"다. " - -msgid "Failed to update quota usage while retyping volume." -msgstr "볼륨을 다시 입력하는 동안 할당량 사용을 업데이트하는 데 실패했습니다." - -msgid "Failed to update snapshot." -msgstr "스냅샷을 업데이트하는 데 실패했습니다." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"볼륨 %(vol_id)s 메타데이터 업데이트 실패함(제공된 %(src_type)s %(src_id)s 메" -"타데이터 사용) " - -#, python-format -msgid "Failure creating volume %s." -msgstr "%s 볼륨을 작성하지 못했습니다." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "%s에 대한 LUN 정보를 가져오지 못했습니다." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "복제된 새 LUN을 %s(으)로 이동하지 못했습니다." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "LUN %s을(를) tmp로 스테이징하지 못했습니다." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "" -"%(reason)s(으)로 인해 Fexvisor에서 볼륨 %(id)s을(를) 추가하지 못했습니다." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"%(ret)s(으)로 인해 Fexvisor가 그룹 %(group)s에서 볼륨 %(vol)s을(를) 결합하지 " -"못했습니다. " - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"%(ret)s(으)로 인해 Fexvisor가 그룹 %(group)s에서 볼륨 %(vol)s을(를) 제거하지 " -"못했습니다. " - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "" -"%(reason)s(으)로 인해 Fexvisor가 볼륨 %(id)s을(를) 제거하지 못했습니다. " - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "파이버 채널 SAN 검색 실패: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "파이버 채널 구역 조작 실패: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "파이버 채널 연결 제어 실패: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "%(file_path)s 파일을 찾을 수 없습니다. " - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"파일 %(path)s에 올바르지 않은 백업 파일 %(bfile)s이(가) 있어 중단합니다." - -#, python-format -msgid "File already exists at %s." -msgstr "%s에 파일이 이미 있습니다. " - -#, python-format -msgid "File already exists at: %s" -msgstr "%s에 파일이 이미 있음" - -msgid "Find host in hostgroup error." -msgstr "호스트 그룹에서 호스트 찾기 오류입니다. " - -msgid "Find host lun id error." -msgstr "호스트 lun id 찾기 오류입니다. " - -msgid "Find lun group from mapping view error." -msgstr "맵핑 보기에서 lun 그룹 찾기 오류입니다. " - -msgid "Find mapping view error." -msgstr "맵핑 보기 찾기 오류입니다. " - -msgid "Find portgroup error." -msgstr "포트 그룹 찾기 오류입니다. " - -msgid "Find portgroup from mapping view error." -msgstr "맵핑 보기에서 포트 그룹 찾기 오류입니다. " - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"플래시 캐시 정책에는 WSAPI 버전 '%(fcache_version)s' 버전 '%(version)s'이" -"(가) 설치되어 있어야 합니다. " - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor 볼륨 지정 실패:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 볼륨 지정 실패:%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor에서 볼륨 %(id)s 스냅샷을 그룹 %(vgid)s 스냅샷 %(vgsid)s에서 찾을 " -"수 없습니다." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor 볼륨 작성 실패:%(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 삭제하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(cgid)s에 추가하지 못했습니다." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"이벤트 ID로 상태를 조회할 수 없어 Flexvisor가 %(id)s 볼륨을 지정하지 못했습니" -"다." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 지정하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor에서 볼륨 %(volume)s에 iqn %(iqn)s을(를) 지정하지 못했습니다." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 복제하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "Flexvisor가 %(id)s 볼륨을 복제하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨의 스냅샷을 작성하지 못함: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor가 %(id)s 볼륨의 스냅샷을 작성하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 그룹 %(vgid)s에 작성하지 못했습니다." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor가 %(volume)s 볼륨을 작성하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor가 %s 볼륨을 작성하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지 못함: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor가 %(id)s 스냅샷에서 볼륨을 작성하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 스냅샷을 삭제하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "Flexvisor가 %(id)s 스냅샷을 삭제하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 삭제하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 확장하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 확장하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "Flexvisor가 %(id)s 볼륨을 확장하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor가 풀 정보 %(id)s을(를) 가져오지 못함: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor가 그룹 %(vgid)s에서 볼륨 %(id)s의 스냅샷 ID를 가져오지 못했습니다." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor가 그룹 %(cgid)s에서 볼륨 %(id)s을(를) 제거하지 못했습니다. " - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor가 %(id)s 스냅샷에서 볼륨을 파생하지 못함: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor가 %(id)s 스냅샷에서 볼륨을 파생하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor가 %(id)s 볼륨을 지정 취소하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "Flexvisor가 %(id)s 볼륨을 지정 취소하지(이벤트를 가져오지) 못했습니다." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor에서 볼륨 %(id)s을(를) 지정 취소하지 못함: %(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor에서 소스 볼륨 %(id)s 정보를 찾을 수 없습니다." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 볼륨 지정 취소 실패:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Flexvisor 볼륨 %(id)s에서 그룹 %(vgid)s을(를) 결합하지 못했습니다." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "폴더 %s이(가) Nexenta Store 어플라이언스에 없음" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS가 실행 중이 아닙니다. 상태: %s." - -msgid "Gateway VIP is not set" -msgstr "게이트웨이 VIP가 설정되지 않음" - -msgid "Get FC ports by port group error." -msgstr "포트 그룹별로 FC 포트 가져오기 오류." - -msgid "Get FC ports from array error." -msgstr "배열에서 FC 포트 가져오기 오류입니다. " - -msgid "Get FC target wwpn error." -msgstr "FC 대상 wwpn 가져오기 오류입니다. " - -msgid "Get HyperMetroPair error." -msgstr "HyperMetroPair 가져오기 오류." - -msgid "Get LUN group by view error." -msgstr "보기별로 LUN 그룹 가져오기 오류." - -msgid "Get LUNcopy information error." -msgstr "LUNcopy 정보 가져오기 오류입니다. " - -msgid "Get QoS id by lun id error." -msgstr "Lun id별 QoS id 가져오기 오류입니다. " - -msgid "Get QoS information error." -msgstr "QoS 정보 가져오기 오류입니다. " - -msgid "Get QoS policy error." -msgstr "QoS 정책 가져오기 오류입니다. " - -msgid "Get SplitMirror error." -msgstr "SplitMirror 가져오기 오류." - -msgid "Get active client failed." -msgstr "활성 클라이언트를 가져오는 데 실패했습니다." - -msgid "Get array info error." -msgstr "배열 정보 가져오기 오류." - -msgid "Get cache by name error." -msgstr "이름별 캐시 가져오기 오류입니다. " - -msgid "Get connected free FC wwn error." -msgstr "연결된 사용 가능한 FC wwn 가져오기 오류입니다. " - -msgid "Get engines error." -msgstr "엔진 가져오기 오류." - -msgid "Get host initiators info failed." -msgstr "호스트 개시자 정보 가져오기에 실패했습니다. " - -msgid "Get hostgroup information error." -msgstr "호스트 그룹 정보 가져오기 오류입니다. " - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"iSCSI 포트 정보 가져오기 오류입니다. huawei conf 파일에서 대상 IP가 구성되어 " -"있는지 확인하십시오. " - -msgid "Get iSCSI port information error." -msgstr "iSCSI 포트 정보 가져오기 오류입니다. " - -msgid "Get iSCSI target port error." -msgstr "iSCSI 대상 포트 가져오기 오류입니다. " - -msgid "Get lun id by name error." -msgstr "이름별 lun id 가져오기 오류." - -msgid "Get lun migration task error." -msgstr "Lun 마이그레이션 태스크 가져오기 오류입니다. " - -msgid "Get lungroup id by lun id error." -msgstr "Lun id별 lun 그룹 id 가져오기 오류입니다. " - -msgid "Get lungroup information error." -msgstr "Lun 그룹 정보 가져오기 오류입니다. " - -msgid "Get migration task error." -msgstr "마이그레이션 작업 가져오기 오류." - -msgid "Get pair failed." -msgstr "쌍 가져오기 오류." - -msgid "Get partition by name error." -msgstr "이름별 파티션 가져오기 오류입니다. " - -msgid "Get partition by partition id error." -msgstr "파티션 id별 파티션 가져오기 오류입니다. " - -msgid "Get port group by view error." -msgstr "보기별로 포트 그룹 가져오기 오류." - -msgid "Get port group error." -msgstr "포트 그룹 가져오기 오류." - -msgid "Get port groups by port error." -msgstr "포트별로 포트 그룹 가져오기 오류." - -msgid "Get ports by port group error." -msgstr "포트 그룹별로 포트 가져오기 오류." - -msgid "Get remote device info failed." -msgstr "원격 장치 정보를 가져오는 데 실패했습니다." - -msgid "Get remote devices error." -msgstr "원격 장치 가져오기 오류." - -msgid "Get smartcache by cache id error." -msgstr "캐시 id별 스마트 캐시 가져오기 오류입니다. " - -msgid "Get snapshot error." -msgstr "스냅샷 가져오기 오류." - -msgid "Get snapshot id error." -msgstr "스냅샷 ID 가져오기 오류입니다. " - -msgid "Get target IP error." -msgstr "대상 IP 가져오기 오류입니다. " - -msgid "Get target LUN of SplitMirror error." -msgstr "SplitMirror의 대상 LUN 가져오기 오류." - -msgid "Get views by port group error." -msgstr "포트 그룹별로 보기 가져오기 오류." - -msgid "Get volume by name error." -msgstr "이름별 볼륨 가져오기 오류입니다. " - -msgid "Get volume error." -msgstr "볼륨 가져오기 오류입니다. " - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"글랜스 메타데이터를 업데이트할 수 없음. 볼륨 ID %(volume_id)s에 대해 %(key)s " -"키가 있음" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "%(id)s 볼륨/스냅샷에 대한 글랜스 메타데이터를 찾을 수 없습니다. " - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Gluster config 파일이 %(config)s에 없음" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Google Cloud Storage api 실패: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Google Cloud Storage 연결 실패: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Google Cloud Storage oauth2 실패: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "DRBDmanage에서 잘못된 경로 정보를 가져왔습니다(%s)! " - -msgid "HBSD error occurs." -msgstr "HBSD 오류가 발생했습니다." - -msgid "HPELeftHand url not found" -msgstr "HPELeftHand url을 찾을 수 없음" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"마지막 백업 이후 해시 블록 크기가 변경되었습니다. 새 해시 블록 크기:%(new)s. " -"이전 해시 블록 크기: %(old)s. 전체 백업을 수행하십시오. " - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "%(tier_levels)s 티어를 작성하지 않았습니다. " - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "힌트 \"%s\"이(가) 지원되지 않습니다." - -msgid "Host" -msgstr "호스트" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "%(host)s 호스트를 찾을 수 없습니다. " - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"%(host)s 호스트가 x509 인증서 컨텐츠와 일치하지 않음: CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "%s 호스트에 FC 개시자가 없음" - -#, python-format -msgid "Host group with name %s not found" -msgstr "이름이 %s인 호스트 그룹을 찾을 수 없음" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "ref %s을(를) 가진 호스트 그룹을 찾을 수 없음" - -msgid "Host is NOT Frozen." -msgstr "호스트가 동결되지 않았습니다." - -msgid "Host is already Frozen." -msgstr "호스트가 이미 동결되었습니다." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "" -"호스트를 찾을 수 없습니다. %(host)s에서 %(service)s을(를) 제거하지 못했습니" -"다. " - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "장애 보구하려면 호스트 replication_status가 %s이어야 합니다." - -#, python-format -msgid "Host type %s not supported." -msgstr "호스트 유형 %s이(가) 지원되지 않습니다." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "포트가 %(ports)s인 호스트를 찾을 수 없습니다. " - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "Hypermetro와 복제를 동일한 volume_type에서 사용할 수 없습니다." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"I/O 그룹 %(iogrp)d이(가) 올바르지 않습니다. 사용 가능한 I/O 그룹은 %(avail)s" -"입니다. " - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "압축이 True로 설정되면 rsize도 설정해야 합니다(-1이 아님). " - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "nofmtdisk가 True로 설정되면 rsize도 -1로 설정해야 합니다." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"flashsystem_connection_protocol에 올바르지 않은 값 '%(prot)s'을(를) 지정함: " -"올바른 값은 %(enabled)s입니다." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "IOTYPE에 대해 잘못된 값이 지정됨 : 0, 1 또는 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "smarttier에 대해 잘못된 값이 지정됨: 0, 1, 2 또는 3으로 설정하십시오. " - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"storwize_svc_vol_grainsize에 잘못된 값이 지정됨:32, 64, 128 또는 256으로 설정" -"하십시오. " - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"thin에 대해 잘못된 값이 지정됨: 동시에 thin과 thick을 설정할 수 없습니다. " - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "%(image_id)s 이미지를 찾을 수 없습니다. " - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "%(image_id)s 이미지가 활성 상태가 아닙니다. " - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "%(image_id)s 이미지는 허용할 수 없음: %(reason)s" - -msgid "Image location not present." -msgstr "이미지 위치가 없습니다." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"이미지 가상 크기가 %(image_size)dGB이므로 %(volume_size)dGB 크기의 볼륨에 맞" -"지 않습니다. " - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"rbd 볼륨 삭제 중 ImageBusy 오류가 발생했습니다. 이는 충돌한 클라이언트로부터" -"의 연결로 인해 발생했을 수 있습니다. 이러한 경우 30초 후 삭제를 재시도하여 문" -"제를 해결할 수도 있습니다." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"레코드 가져오기에 실패했습니다. 가져오기를 수행할 백업 서비스를 찾을 수 없습" -"니다. 요청 서비스 %(service)s" - -msgid "Incorrect request body format" -msgstr "올바르지 않은 요청 본문 형식" - -msgid "Incorrect request body format." -msgstr "올바르지 않은 요청 본문 형식입니다." - -msgid "Incremental backups exist for this backup." -msgstr "이 백업에 대한 증분 백업이 있습니다. " - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend CLI 예외: %(err)s 매개변수: %(param)s(리턴 코드: %(rc)s) (출력: " -"%(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "입력 볼륨 또는 스냅샷이 올바르지 않습니다. " - -msgid "Input volumes or source volumes are invalid." -msgstr "입력 볼륨 또는 소스 볼륨이 올바르지 않습니다. " - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "%(uuid)s 인스턴스를 찾을 수 없습니다. " - -msgid "Insufficient free space available to extend volume." -msgstr "볼륨을 확장하는 데 충분한 여유 공간이 없습니다." - -msgid "Insufficient privileges" -msgstr "권한이 충분하지 않음" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "3PAR 도메인이 잘못되었습니다: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "ALUA 값이 올바르지 않습니다. ALUA 값은 1 또는 0이어야 합니다. " - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "백업 rbd 조작에 올바르지 않은 Ceph 인수가 제공됨 " - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "올바르지 않은 Cg 스냅샷: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "올바르지 않은 일관성 그룹: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "올바르지 않은 일관성 그룹: 일관성 그룹을 작성할 호스트가 없음" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"올바르지 않은 HPELeftHand API 버전 발견: %(found)s. 관리/비관리 지원을 위해서" -"는 %(minimum)s 이상이 필요합니다. " - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "올바르지 않은 IP 주소 형식: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"볼륨 %s에 대한 QoS 정책을 가져오는 중에 올바르지 않은 QoS 스펙이 발견됨" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "올바르지 않은 복제 대상: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"올바르지 않은 Virtuozzo 스토리지 공유 스펙: %r. 다음이어야 함: [MDS1[," -"MDS2],...:/][:PASSWORD]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "올바르지 않은 XtremIO 버전 %(cur)s, %(min)s 이상의 버전이 필요함" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "다음 프로젝트 할당량에 잘못 할당된 할당량이 정의됨 : %s" - -msgid "Invalid argument" -msgstr "올바르지 않은 인수 " - -msgid "Invalid argument - negative seek offset." -msgstr "올바르지 않은 인수 - 음수 찾기 오프셋. " - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "올바르지 않은 인수 - whence=%s은(는) 지원되지 않음 " - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "올바르지 않은 인수 - whence=%s은(는) 지원되지 않습니다. " - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "볼륨 %(volume_id)s의 연결 모드 '%(mode)s'가 잘못 되었습니다." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "잘못된 인증 키: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "올바르지 않은 백업: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"CloudByte 스토리지에서 올바르지 않은 chap 사용자 세부사항이 발견되었습니다. " - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "볼륨 %(name)s의 올바르지 않은 연결 초기화 응답" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "볼륨 %(name)s의 올바르지 않은 연결 초기화 응답: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s." - -msgid "Invalid credentials" -msgstr "올바르지 않은 신임 정보" - -#, python-format -msgid "Invalid directory: %s" -msgstr "올바르지 않은 디렉토리: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "올바르지 않은 디스크 어댑터 유형: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "올바르지 않은 디스크 백업: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "올바르지 않은 디스크 유형: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "올바르지 않은 디스크 유형: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "잘못된 호스트: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"올바르지 않은 hpe3parclient 버전을 발견했습니다(%(found)s). 버전 %(minimum)s " -"이상이 필요합니다. \"pip install --upgrade python-3parclient\"를 실행하여 " -"hpe3parclient를 업그레이드하십시오." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"올바르지 않은 hpelefthandclient 버전을 찾았습니다(%(found)s). 버전 " -"%(minimum)s 이상이 필요합니다. 'pip install --upgrade python-" -"lefthandclient'를 실행하여 hpelefthandclient를 업그레이드하십시오." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "올바르지 않은 이미지 href %(image_href)s." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "이미지 ID가 올바르지 않거나 요청된 이미지에 액세스할 수 없습니다." - -msgid "Invalid imageRef provided." -msgstr "올바르지 않은 imageRef가 제공되었습니다. " - -msgid "Invalid input" -msgstr "올바르지 않은 입력" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "잘못된 입력을 받음: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "올바르지 않은 is_public 필터 [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "올바르지 않은 lun 유형 %s이(가) 구성되었습니다." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "잘못된 메타데이터 크기: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "잘못된 메타데이터: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "올바르지 않은 마운트 지점 기반: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "올바르지 않은 마운트 지점 기반: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "" -"새 snapCPG 이름이 재입력을 수행하기에 올바르지 않습니다. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Coho rpc 포트의 올바르지 않은 포트 번호 %(config)s" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"올바르지 않은 프리페치 유형 '%s'이(가) 구성되었습니다. PrefetchType은 0,1,2,3" -"이어야 합니다." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "올바르지 않은 qos 스펙: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "올바르지 않은 대상에 볼륨을 접속하는 요청이 올바르지 않습니다. " - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"올바르지 않은 모드로 볼륨을 접속하는 유효하지 않은 요청입니다. 접속 모드는 " -"'rw' 또는 'ro'여야 합니다. " - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "올바르지 않은 예약 만기 %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "RPC 서버의 올바르지 않은 응답 헤더" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "올바르지 않은 보조 id %s." - -msgid "Invalid service catalog json." -msgstr "올바르지 않은 서비스 카탈로그 json입니다. " - -msgid "Invalid sheepdog cluster status." -msgstr "Sheepdog 클러스터 상태가 올바르지 않습니다. " - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "잘못된 스냅샷: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "올바르지 않은 상태: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "" -"올바르지 않은 스토리지 풀 %s이(가) 요청되었습니다. 재입력에 실패했습니다." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "올바르지 않은 스토리지 풀 %s이(가) 지정되었습니다." - -msgid "Invalid storage pool is configured." -msgstr "올바르지 않은 스토리지 풀이 구성되었습니다." - -msgid "Invalid transport type." -msgstr "올바르지 않은 전송 유형입니다." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "올바르지 않은 업데이트 설정: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "강제 실행에 대한 올바르지 않은 값 '%s'입니다. " - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "강제 실행에 대한 올바르지 않은 값 '%s'입니다. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "" -"is_public에 대해 값 '%s'이(가) 올바르지 않습니다. 승인된 값: True 또는 False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "skip_validation에 대한 값 '%s'이(가) 올바르지 않습니다. " - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "'부트 가능'에 대한 값이 올바르지 않음: '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "'강제 실행'에 대해 값이 올바르지 않음: '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "'읽기 전용'에 대한 값이 올바르지 않음: '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "'scheduler_max_attempts'에 대한 올바르지 않은 값, >= 1이어야 함" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "NetApp 구성 옵션 netapp_host_type에 대한 값이 올바르지 않습니다. " - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "NetApp 구성 옵션 netapp_lun_ostype에 대한 값이 올바르지 않습니다. " - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "연령에 대한 값이 올바르지 않음, %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "올바르지 않은 값: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"작성 요청에 올바르지 않은 볼륨 크기가 제공됨: %s(크기 인수는정수(또는 정수의 " -"문자열 표시)이거나 0보다 커야함). " - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "잘못된 볼륨 종류: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "잘못된 볼륨: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"올바르지 않은 볼륨: %(volume_id)s 볼륨을 일관성 그룹 %(group_id)s에 추가할 " -"수 없습니다. 볼륨 상태 %(status)s(이)가 올바르지 않은 상태입니다. 올바른 상태" -"는 ('available', 'in-use')입니다. " - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"올바르지 않은 볼륨: %(volume_id)s 볼륨을 일관성 그룹 %(group_id)s에 추가할 " -"수 없습니다. 이 그룹에서 볼륨 유형 %(volume_type)s을(를) 제공되어야 합니다." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"올바르지 않은 볼륨: fake-volume-uuid 볼륨을 일관성 그룹 %(group_id)s에 추가" -"할 수 없습니다. 해당 볼륨을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"올바르지 않은 볼륨: fake-volume-uuid 볼륨을 일관성 그룹 %(group_id)s에서 제거" -"할 수 없습니다. 해당 볼륨이 그룹에 없습니다. " - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "올바르지 않은 volume_type이 전달됨: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"올바르지 않은 volume_type이 제공됨: %s(요청된 유형이 호환 가능하지 않음, 소" -"스 볼륨을 일치시키거나 유형 인수를 생략하십시오)" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"올바르지 않은 volume_type이 제공됨: %s(요청된 유형이 호환 가능하지 않음, 유" -"형 인수를 생략하도록 권장함)" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"올바르지 않은 volume_type이 제공됨: %s(이 일관성 그룹이 요청된 유형을지원해" -"야 함)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "올바르지 않은 wwpn 형식 %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "웹 서비스 호출에 실패했습니다." - -msgid "Issue encountered waiting for job." -msgstr "작업 대기 중에 문제가 발생했습니다." - -msgid "Issue encountered waiting for synchronization." -msgstr "동기화 대기 중에 문제가 발생했습니다." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "복제가 적절하게 구성되지 않았으므로 장애 복구 실행에 실패했습니다." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "CloudByte의 볼륨 작성[%s] 응답에서 작업 ID를 찾을 수 없습니다. " - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "CloudByte의 삭제 볼륨 [%s] 응답에서 작업 ID를 찾을 수 없습니다. " - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"키 이름은 영숫자 문자, 밑줄, 마침표, 콜론, 하이픈만 포함할 수 있습니다." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "중첩 할당량 지원을 받기 위해 Keystone 버전 3 이상을 사용해야 합니다." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "볼륨의 LU가 없음: %s" - -msgid "LUN export failed!" -msgstr "LUN 내보내기 실패! " - -msgid "LUN map overflow on every channel." -msgstr "모든 채널의 LUN 맵 오버플로우입니다. " - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "주어진 ref %s을(를) 사용하여 LUN을 찾을 수 없습니다. " - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "LUN 번호가 채널 id에 대한 경계를 벗어남: %(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "마지막 %s cinder syslog 항목:-" - -msgid "LeftHand cluster not found" -msgstr "LeftHand 클러스터를 찾을 수 없음" - -msgid "License is unavailable." -msgstr "라이센스를 사용할 수 없습니다." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "행 %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "링크 경로가 이미 존재하고 symlink가 아님" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "소스 볼륨의 링크된 복제본이 다음 상태에서 지원되지 않음: %s" - -msgid "Lock acquisition failed." -msgstr "잠금 확보에 실패했습니다." - -msgid "Logout session error." -msgstr "로그아웃 세션 오류." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"검색 서비스가 구성되지 않았습니다. fc_san_lookup_service에 대한 구성 옵션이 " -"검색 서비스의 구체적인 구현을 지정해야 합니다. " - -msgid "Lun migration error." -msgstr "Lun 마이그레이션 오류입니다. " - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"%(md5)s 전과 %(etag)s 후의 오브젝트 %(object_name)s이(가) 동일하지 않습니다." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "올바르지 않은 형식의 fcns 출력 문자열: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "잘못된 메시지 본문: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "올바르지 않은 형식의 이름 서버 문자열: %s" - -msgid "Malformed request body" -msgstr "형식이 틀린 요청 본문" - -msgid "Malformed request body." -msgstr "요청 본문의 형식이 잘못되었습니다. " - -msgid "Malformed request url" -msgstr "형식이 틀린 요청 URL" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "%(cmd)s 명령에 대해 양식이 잘못된 응답: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "형식이 틀린 scheduler_hints 속성" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "올바르지 않은 형식의 표시 fcns 데이터베이스 문자열: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"올바르지 않은 형식의 구역 구성: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"올바르지 않은 형식의 구역 상태: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "기존 가져오기 크기를 관리하려면 'id'가 필요합니다. " - -msgid "Manage existing snapshot not implemented." -msgstr "기존 스냅샷 관리가 구현되지 않았습니다. " - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"올바르지 않은 백엔드 참조로 인해 기존 볼륨 관리에 실패함 %(existing_ref)s: " -"%(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "볼륨 유형 불일치로 인해 기존 볼륨 관리에 실패함: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "기존 볼륨 관리가 구현되지 않았습니다." - -msgid "Manage existing volume requires 'source-id'." -msgstr "기존 볼륨을 관리하려면 'source-id'가 필요합니다. " - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST가 사용으로 설정된 경우 볼륨 관리가 지원되지 않습니다. FAST 정책: " -"%(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "장애 복구된 볼륨에서 스냅샷 관리는 허용되지 않습니다." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "배열 버전이 hypermetro를 지원하지 않으므로 맵 정보가 없습니다." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"%(id)s 맵핑 준비가 할당된 %(to)d초(제한시간) 내에 완료되지 못했습니다. 종료됩" -"니다. " - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "마스킹 보기 %(maskingViewName)s이(가) 삭제되지 않음" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "허용된 최대 백업 수(%(allowed)d)를 초과함" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "허용된 최대 스냅샷 수 (%(allowed)d)을(를) 초과함" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"할당량 '%(name)s'에 대해 허용된 최대 볼륨 수(%(allowed)d)가 초과되었습니다. " - -#, python-format -msgid "May specify only one of %s" -msgstr "%s 중 하나만 지정할 수 있음 " - -msgid "Metadata backup already exists for this volume" -msgstr "이 볼륨에 대한 메타데이터 백업이 이미 존재함" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "메타데이터 백업 오브젝트 '%s'이(가) 이미 존재함" - -msgid "Metadata property key blank." -msgstr "메타데이터 특성 키가 공백입니다. " - -msgid "Metadata restore failed due to incompatible version" -msgstr "호환 불가능한 버전으로 인해 메타데이터 복원에 실패" - -msgid "Metadata restore failed due to incompatible version." -msgstr "호환 불가능한 버전으로 인해 메타데이터 복원 실패" - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"'purestorage' python 모듈 누락. 라이브러리가 설치되어 사용할 수 있는지 사용" -"할 수 없습니다." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "파이버 채널 SAN 구성 매개변수 누락 - fc_fabric_names" - -msgid "Missing request body" -msgstr "요청 본문이 누락됨 " - -msgid "Missing request body." -msgstr "요청 본문이 누락되었습니다. " - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "요청 본문에서 '%s' 필수 요소가 누락됨 " - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "요청 본문에서 필수 요소 '%s'이(가) 누락되었습니다. " - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "요청 본문에 필수 요소 'consistencygroup'이 누락되었습니다. " - -msgid "Missing required element quota_class_set in request body." -msgstr "요청 본문에서 필수 요소 quota_class_set가 누락되었습니다." - -msgid "Missing required element snapshot in request body." -msgstr "요청 본문에 필수 요소 스냅샷이 누락되었습니다. " - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"이 조작에 대해 하나의 SerialNumber만 예상되었을 때 다중 SerialNumber가 발견되" -"었습니다. EMC 구성 파일을 변경하십시오. " - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "볼륨 %s의 다중 사본이 발견되었습니다. " - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"'%s'에 대한 일치를 여러 개 찾았습니다. 더 구체적인 검색을 위해 ID를 사용하십" -"시오." - -msgid "Multiple profiles found." -msgstr "다중 프로파일이 발견되었습니다. " - -msgid "Must implement a fallback schedule" -msgstr "대체 스케줄을 구현해야 함" - -msgid "Must implement find_retype_host" -msgstr "find_retype_host를 구현해야 함" - -msgid "Must implement host_passes_filters" -msgstr "host_passes_filters를 구현해야 함 " - -msgid "Must implement schedule_create_consistencygroup" -msgstr "schedule_create_consistencygroup을 구현해야 함" - -msgid "Must implement schedule_create_volume" -msgstr "schedule_create_volume을 구현해야 함" - -msgid "Must implement schedule_get_pools" -msgstr "schedule_get_pools를 구현해야 함 " - -msgid "Must pass wwpn or host to lsfabric." -msgstr "lsfabric에 wwpn 또는 호스트를 전달해야 합니다." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"클라우드 관리자가 프로젝트를 나열하고 가져올 수 있는 Keystone policy.json을 " -"사용하여 클라우드 관리자로 이 명령을 실행해야 합니다." - -msgid "Must specify 'connector'" -msgstr "'커넥터'를 지정해야 함" - -msgid "Must specify 'connector'." -msgstr "'커넥터'를 지정해야 합니다. " - -msgid "Must specify 'host'." -msgstr "'호스트'를 지정해야 합니다. " - -msgid "Must specify 'new_volume'" -msgstr "'new_volume'을 지정해야 함" - -msgid "Must specify 'status'" -msgstr "'상태'를 지정해야 함" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"업데이트하려면 'status', 'attach_status' 또는 'migration_status'를 지정해야 " -"합니다." - -msgid "Must specify a valid attach status" -msgstr "올바른 접속 상태를 지정해야 함" - -msgid "Must specify a valid migration status" -msgstr "올바른 마이그레이션 상태를 지정해야 함" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"올바른 사용자 %(valid)s을(를) 지정해야 합니다. '%(persona)s' 값이 올바르지 않" -"습니다. " - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"올바른 프로비저닝 유형 %(valid)s을(를) 지정해야 합니다. '%(prov)s' 값이 올바" -"르지 않습니다. " - -msgid "Must specify a valid status" -msgstr "올바른 상태를 지정해야 함" - -msgid "Must specify an ExtensionManager class" -msgstr "ExtensionManager 클래스를 지정해야 함" - -msgid "Must specify bootable in request." -msgstr "요청에서 부트 가능을 지정해야 합니다." - -msgid "Must specify protection domain name or protection domain id." -msgstr "보호 도메인 이름 또는 보호 도메인 ID를 지정해야 합니다. " - -msgid "Must specify readonly in request." -msgstr "요청에서 읽기 전용을 지정해야 합니다." - -msgid "Must specify snapshot source-name or source-id." -msgstr "스냅샷 source-name 또는 source-id를 지정해야 합니다." - -msgid "Must specify source-name or source-id." -msgstr "source-name 또는 source-id가 있어야 합니다." - -msgid "Must specify storage pool name or id." -msgstr "스토리지 풀 이름 또는 ID를 지정해야 합니다. " - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "스토리지 풀을 지정해야 합니다. 옵션: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "기간에 0이 아닌 양수 값을 제공해야 함" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"NAS 구성 '%(name)s=%(value)s'이(가) 올바르지 않습니다. 'auto', 'true' 또는 " -"'false'여야 합니다." - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "NFS config 파일이 %(config)s에 없음" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "NFS 파일 %s을(를) 찾을 수 없습니다." - -msgid "NFS file could not be discovered." -msgstr "NFS 파일을 찾을 수 없습니다. " - -msgid "NaElement name cannot be null." -msgstr "NaElement 이름은 널(null)일 수 없습니다." - -msgid "Name" -msgstr "이름" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"요청 본문에서 이름, 설명, add_volumes 및 remove_volumes이 모두비어 있을 수 없" -"습니다. " - -msgid "Need non-zero volume size" -msgstr "0이 아닌 볼륨 크기가 필요함" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "MSG_DENIED와 MSG_ACCEPTED가 모두 아님: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "NetApp Cinder Driver 예외." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"확장을 위한 새 크기는 현재 크기보다 커야 합니다. %(size)s, 확장됨: " -"%(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"새 크기는 백엔드 스토리지의 실제 크기보다 커야 함: realsize: %(oldsize)s, " -"newsize: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "새 볼륨 크기를 정수로 지정해야 합니다. " - -msgid "New volume type must be specified." -msgstr "새 볼륨 유형을 지정해야 합니다." - -msgid "New volume type not specified in request_spec." -msgstr "새 볼륨 유형이 request_spec에서 지정되지 않았습니다." - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble Cinder 드라이버 예외" - -msgid "No FC initiator can be added to host." -msgstr "FC 개시자를 호스트에 추가할 수 없습니다." - -msgid "No FC port connected to fabric." -msgstr "FC 포트가 패브릭에 연결되지 않았습니다." - -msgid "No FCP targets found" -msgstr "FCP 대상을 찾을 수 없음" - -msgid "No Port Group elements found in config file." -msgstr "구성 파일에서 포트 그룹 요소를 찾을 수 없습니다." - -msgid "No VF ID is defined in the configuration file." -msgstr "구성 파일에 VF ID가 정의되지 않았습니다." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "제공된 iSCSI IP를 가진 활성 iSCSI 포털이 없음" - -#, python-format -msgid "No available service named %s" -msgstr "%s(으)로 이름 지정된 사용 가능한 서비스가 없음 " - -#, python-format -msgid "No backup with id %s" -msgstr "ID가 %s인 백업이 없음" - -msgid "No backups available to do an incremental backup." -msgstr "증분 백업을 수행할 수 있는 백업이 없습니다. " - -msgid "No big enough free disk" -msgstr "충분한 여유 디스크 공간 없음" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "ID가 %s인 cg 스냅샷이 없음" - -msgid "No cinder entries in syslog!" -msgstr "syslog에 cinder 항목이 없습니다!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "이름이 %s인 복제 LUN을 파일러에서 찾을 수 없음" - -msgid "No config node found." -msgstr "구성 노드를 찾을 수 없습니다. " - -#, python-format -msgid "No consistency group with id %s" -msgstr "ID가 %s인 일관성 그룹이 없음" - -#, python-format -msgid "No element by given name %s." -msgstr "지정된 이름 %s의 요소가 없습니다." - -msgid "No errors in logfiles!" -msgstr "로그 파일에 오류가 없습니다!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "백업 파일로 %s을(를) 가진 파일을 찾을 수 없음 " - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"사용 가능한 LUN ID가 남아 있지 않습니다. 호스트에 연결할 수 있는 최대 볼륨 수" -"(%s)가 초과되었습니다. " - -msgid "No free disk" -msgstr "여유 디스크 공간 없음" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "%s에 대한 제공된 목록에서 올바른 iscsi 포털을 찾을 수 없습니다." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "%s에 대한 올바른 iscsi 포털을 찾을 수 없습니다." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "일관성 그룹 %s을(를) 작성할 호스트가 없습니다. " - -msgid "No iSCSI-enabled ports on target array." -msgstr "대상 배열에 iSCSI 사용 포트가 없습니다. " - -msgid "No image_name was specified in request." -msgstr "요청에 image_name이 지정되지 않았습니다. " - -msgid "No initiator connected to fabric." -msgstr "개시자가 패브릭에 연결되지 않았습니다." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "%s 개시자의 개시자 그룹을 찾을 수 없음" - -msgid "No initiators found, cannot proceed" -msgstr "개시자를 찾을 수 없음, 계속할 수 없음" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "ip %s에 대한 클러스터에서 인터페이스를 찾을 수 없음" - -msgid "No ip address found." -msgstr "IP 주소를 찾을 수 없습니다. " - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "CloudByte에서 iscsi 인증 그룹을 찾을 수 없습니다. " - -msgid "No iscsi initiators were found in CloudByte." -msgstr "CloudByte에서 iscsi 개시자를 찾을 수 없습니다." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "CloudByte 볼륨 [%s]의 iscsi 서비스를 찾을 수 없습니다." - -msgid "No iscsi services found in CloudByte storage." -msgstr "CloudByte 스토리지에서 iscsi 서비스를 찾을 수 없습니다." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "지정된 키 파일이 없으며 %(cert)s %(e)s에서 키를 로드할 수 없습니다. " - -msgid "No mounted Gluster shares found" -msgstr "마운트된 Gluster 공유를 찾지 못함" - -msgid "No mounted NFS shares found" -msgstr "마운트된 NFS 공유를 찾지 못함" - -msgid "No mounted SMBFS shares found." -msgstr "마운트된 SMBFS 공유를 찾을 수 없습니다." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "마운트된 Virtuozzo 스토리지 공유를 찾을 수 없습니다. " - -msgid "No mounted shares found" -msgstr "마운트된 공유를 찾을 수 없음" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "볼륨 %(vol)s에 대해 I/O 그룹 %(gid)s에서 노드를 찾을 수 없습니다. " - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"볼륨 프로비저닝을 위해 사용할 수 있는 풀이 없습니다. 구성 옵션 " -"netapp_pool_name_search_pattern이 올바르게 설정되었는지 확인하십시오. " - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"CloudByte 스토리지 목록 iSCSI 인증 사용자 API 호출에서 응답이 수신되지 않았습" -"니다. " - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "CloudByte 스토리지 목록 tsm API 호출에서 응답을 수신하지 못했습니다." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "CloudByte의 목록 파일 시스템 api 호출에서 응답을 수신하지 못했습니다." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "서비스 VIP가 구성되지 않았으며nexenta_client_address가 없음" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "백업 파일로 %s을(를) 가진 스냅샷을 찾을 수 없음 " - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "스냅샷 그룹 %s에서 스냅샷 이미지를 찾을 수 없습니다. " - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "볼륨 %s에서 스냅샷을 찾을 수 없습니다." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"일관성 그룹 %s을(를) 작성하는 데 필요한 소스 스냅샷이 제공되지 않았습니다. " - -#, python-format -msgid "No storage path found for export path %s" -msgstr "내보내기 경로 %s에 대해 스토리지 경로를 찾을 수 없음" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "해당하는 QoS 스펙 %(specs_id)s이(가) 없습니다. " - -msgid "No suitable discovery ip found" -msgstr "적합한 발견 ip를 찾을 수 없음" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "백업 버전 %s 복원을 지원하지 않음" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "%(volume_id)s 볼륨에 대한 대상ID가 없습니다. " - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"호스트에서 사용할 수 있는 사용하지 않은 LUN ID가 없습니다. 다중 연결이 사용으" -"로 설정되며 이를 위해서는 모든 LUN ID가 전체 호스트 그룹에서 고유해야 합니" -"다. " - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "유효한 호스트가 없습니다. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "유형이 %(type)s인 볼륨 %(id)s의 호스트가 올바르지 않음" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "ref %s(으)로 지정된 UID를 갖는 vdisk가 없습니다." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "LUN의 보기가 없음: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"vserver가 %(vserver)s이고 접합 경로가 %(junction)s인 클러스터에 볼륨이 없음" - -msgid "No volume service(s) started successfully, terminating." -msgstr "볼륨 서비스가 시작되지 않아서 종료합니다. " - -msgid "No volume was found at CloudByte storage." -msgstr "CloudByte 스토리지에서 볼륨을 찾을 수 없습니다." - -msgid "No volume_type should be provided when creating test replica." -msgstr "테스트 복제본을 작성할 때 volume_type을 제공하지 않아야 합니다. " - -msgid "No volumes found in CloudByte storage." -msgstr "CloudByte 스토리지에서 볼륨을 찾을 수 없습니다." - -msgid "No weighed hosts available" -msgstr "사용 가능한 적합한 호스트가 없음" - -#, python-format -msgid "Not a valid string: %s" -msgstr "올바른 문자열이 아님: %s" - -msgid "Not a valid value for NaElement." -msgstr "NaElement의 올바른 값이 아닙니다." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "볼륨: %s에 적합한 데이터베이스를 찾을 수 없습니다." - -msgid "Not an rbd snapshot" -msgstr "rbd 스냅샷이 아님" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "%(image_id)s 이미지에 대한 권한이 없습니다. " - -msgid "Not authorized." -msgstr "권한이 없습니다. " - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "백엔드(%(backend)s)에 공간이 충분하지 않음" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "ZFS 공유에서 이 조작을 수행하는 데 필요한 스토리지 공간이 부족합니다." - -msgid "Not stored in rbd" -msgstr "rbd에 저장되지 않음" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "스냅샷을 작성하는 동안 Nova에서 \"오류\" 상태를 리턴함. " - -msgid "Null response received from CloudByte's list filesystem." -msgstr "CloudByte의 목록 파일 시스템에서 널 응답을 수신했습니다." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "CloudByte의 목록 iscsi 인증 그룹에서 널 응답이 수신되었습니다. " - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "CloudByte의 목록 iscsi 개시자로부터 널 응답을 수신했습니다." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "CloudByte의 목록 볼륨 iscsi 서비스에서 널 응답을 수신했습니다." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"CloudByte 스토리지에서 볼륨 [%s]을(를) 작성하는 중 널 응답을 수신했습니다." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"CloudByte 스토리지에서 볼륨[%s]을 삭제하는 중에 널 응답이 수신되었습니다. " - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"CloudByte 스토리지에서 [%(operation)s] 기반 작업 [%(job)s]에 대해 조회하는 중" -"에 널 응답이 수신되었습니다. " - -msgid "Object Count" -msgstr "오브젝트 카운트" - -msgid "Object Version" -msgstr "오브젝트 버전" - -msgid "Object is not a NetApp LUN." -msgstr "오브젝트가 NetApp LUN이 아닙니다." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"확장 조작에서 컴포지트 볼륨 %(volumename)s에 볼륨을 추가하는 중에 오류가 발생" -"했습니다." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "호스트, 포트 또는 스키마에서 필요한 입력 중 하나를 찾을 수 없습니다." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"%(value)s %(verb)s 요청만이 %(unit_string)s마다 %(uri)s에 적용될 수 있습니" -"다. " - -msgid "Only one limit can be set in a QoS spec." -msgstr "QoS 스펙에서는 하나의 한계만 설정할 수 있습니다. " - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"직속 상위 또는 루트 프로젝트로 범위가 지정된 토큰을 가진 사용자만 하위 할당량" -"을 볼 수 있습니다. " - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "OpenStack에서 관리하는 볼류만 관리를 취소할 수 있습니다." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "조작이 상태=%(status)s과(와) 함께 실패했습니다. 전체 덤프: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "조작이 지원되지 않음: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "gpfs_images_dir 옵션이 올바르게 설정되지 않았습니다. " - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "gpfs_images_share_mode 옵션이 올바르게 설정되지 않았습니다. " - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "gpfs_mount_point_base 옵션이 올바르게 설정되지 않았습니다." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "시작 %(res)s %(prop)s은(는) '%(vals)s' 값 중 하나여야 함" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"파티션 이름이 None입니다. 키에서 smartpartition:partitionname을 설정하십시" -"오. " - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"인증에 비밀번호 또는 SSH 개인용 키가 필요합니다. san_password 또는 " -"san_private_key 옵션을 설정하십시오. " - -msgid "Path to REST server's certificate must be specified." -msgstr "REST 서버 인증서의 경로를 지정해야 합니다. " - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "미리 %(pool_list)s 풀을 작성하십시오! " - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "미리 풀 %(pool)s에서 %(tier_levels)s 티어를 작성하십시오! " - -msgid "Please specify a name for QoS specs." -msgstr "QoS 스펙에 대한 이름을 지정하십시오. " - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "정책이 %(action)s이(가) 수행되도록 허용하지 않습니다. " - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "%(poolNameInStr)s 풀을 찾을 수 없습니다. " - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "풀 %s이(가) Nexenta Store 어플라이언스에 없음" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "volume['host'] %(host)s의 풀을 찾을 수 없습니다. " - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "다음과 함께 volume['host']의 풀이 실패함: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "볼륨 호스트 필드에서 풀을 사용할 수 없습니다." - -msgid "Pool is not available in the volume host fields." -msgstr "볼륨 호스트 필드에서 풀을 사용할 수 없습니다." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "이름이 %(pool)s인 풀을 도메인 %(domain)s에서 찾을 수 없습니다. " - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"이름이 %(pool_name)s인 풀을 도메인 %(domain_id)s에서 찾을 수 없습니다. " - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"%(poolName)s 풀이 fast 정책 %(fastPolicy)s에 대한 스토리지 티어와 연관되어 있" -"지 않습니다." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName은 파일 %(fileName)s에 있어야 합니다. " - -#, python-format -msgid "Pools %s does not exist" -msgstr "Pools %s이(가) 존재하지 않음" - -msgid "Pools name is not set." -msgstr "풀 이름이 설정되지 않았습니다. " - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "1차 사본 상태: %(status)s 및 동기화됨: %(sync)s." - -msgid "Project ID" -msgstr "프로젝트 ID" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "중첩된 할당량에 맞게 프로젝트 할당량이 설정되지 않음: %(reason)s." - -msgid "Protection Group not ready." -msgstr "보호 그룹이 준비되지 않았습니다." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"프로토콜 %(storage_protocol)s이(가) 스토리지 제품군%(storage_family)s입니다." - -msgid "Provided backup record is missing an id" -msgstr "제공된 백업 레코드에 ID가 누락됨" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"제공된 스냅샷 상태 %(provided)s이(가) %(current)s 상태의 스냅샷에 허용되지 않" -"습니다. " - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"OpenStack 볼륨 [%s]의 제공자 정보 w.r.t CloudByte 스토리지를 찾을 수 없습니" -"다." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Pure Storage Cinder 드라이버 실패: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "%(specs_id)s QoS 스펙이 이미 존재합니다. " - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "QoS 스펙 %(specs_id)s이(가) 엔티티와 연관되어 있습니다. " - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "QoS 구성이 잘못되었습니다. %s이(가) 0보다 커야 합니다." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"IOTYPE 및 다른 qos_specs에 대해 QoS 정책을 지정해야 함, QoS 정책: " -"%(qos_policy)s." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"IOTYPE의 QoS 정책을 지정해야 함: 0, 1 또는 2, QoS 정책: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "QoS 정책 upper_limit 및 lower_limit 충돌, QoS 정책: %(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "QoS 스펙 %(specs_id)s에 %(specs_key)s 키를 갖는 스펙이 없습니다. " - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "QoS 스펙이 이 스토리지 제품군 및 ONTAP 버전에서 지원되지 않습니다. " - -msgid "Qos specs still in use." -msgstr "Qos 스펙을 아직 사용 중입니다. " - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"서비스 매개변수에 의한 조회가 더 이상 사용되지 않습니다. 2진 매개변수를 사용" -"하십시오. 삭제 중입니다. " - -msgid "Query resource pool error." -msgstr "자원 풀 조회 오류입니다. " - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "할당량 %s 한계는 기존 자원 이상이어야 합니다. " - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "%(class_name)s 할당량 클래스를 찾을 수 없습니다. " - -msgid "Quota could not be found" -msgstr "할당량을 찾을 수 없음" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "자원에 대한 할당량 초과: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "할당량 초과: 코드=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "%(project_id)s 프로젝트에 대한 할당량을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"'%(res)s' 자원의 프로젝트 '%(proj)s'에 대한 할당량 한계가 올바르지 않음: " -"%(limit)d의 한계가 사용 중인 값 %(used)d보다 적음" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "할당 예약 %(uuid)s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "%(project_id)s 프로젝트에 대한 할당 사용량을 찾을 수 없습니다. " - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "RBD diff op 실패 - (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "REST 서버 IP를 지정해야 합니다. " - -msgid "REST server password must by specified." -msgstr "REST 서버 비밀번호를 지정해야 합니다. " - -msgid "REST server username must by specified." -msgstr "REST 서버 사용자 이름을 지정해야 합니다. " - -msgid "RPC Version" -msgstr "RPC 버전" - -msgid "RPC server response is incomplete" -msgstr "RPC 서버 응답이 완료되지 않음" - -msgid "Raid did not have MCS Channel." -msgstr "Raid에 MCS 채널이 없습니다. " - -#, python-format -msgid "Received error string: %s" -msgstr "오류 문자열이 수신됨: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "참조는 관리되지 않은 스냅샷용이어야 합니다." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "참조는 관리되지 않는 가상 볼륨에 대한 것이어야 합니다." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "참조는 관리되지 않은 스냅샷의 이름이어야 합니다." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "참조는 관리되지 않는 가상 볼륨의 볼륨 이름이어야 합니다. " - -msgid "Reference must contain either source-name or source-id element." -msgstr "참조에는 source-name 또는 source-id 요소가 포함되어야 합니다. " - -msgid "Reference must contain source-id or source-name element." -msgstr "참조에는 source-id 또는 source-name 요소가 포함되어야 합니다. " - -msgid "Reference must contain source-id or source-name key." -msgstr "참조에는 source-id 또는 source-name 키가 포함되어 있어야 합니다. " - -msgid "Reference must contain source-id or source-name." -msgstr "참조에는 source-id 또는 source-name이 포함되어 있어야 합니다. " - -msgid "Reference must contain source-id." -msgstr "참조에는 source-id가 있어야 합니다." - -msgid "Reference must contain source-name element." -msgstr "참조에는 source-name 요소가 포함되어야 합니다." - -msgid "Reference must contain source-name or source-id." -msgstr "참조에는 source-name 또는 source-id가 있어야 합니다." - -msgid "Reference must contain source-name." -msgstr "참조에는 source-name이 포함되어야 합니다. " - -msgid "Reference to volume to be managed must contain source-name." -msgstr "관리할 볼륨에 대한 참조에 source-name이 포함되어야 합니다." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "관리할 볼륨: %s에 대한 참조에 source-name이 포함되어야 합니다." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"볼륨 ID: %(id)s의 마이그레이션을 거부하고 있습니다. 소스 및 대상이 같은 볼륨 " -"그룹이므로 구성을 확인하십시오. %(name)s." - -msgid "Remote pool cannot be found." -msgstr "원격 풀을 찾을 수 없습니다." - -msgid "Remove CHAP error." -msgstr "CHAP 제거 오류입니다. " - -msgid "Remove fc from host error." -msgstr "호스트에서 fc 제거 오류입니다. " - -msgid "Remove host from array error." -msgstr "배열에서 호스트 제거 오류입니다. " - -msgid "Remove host from hostgroup error." -msgstr "호스트 그룹에서 호스트 제거 오류입니다. " - -msgid "Remove iscsi from host error." -msgstr "호스트에서 iscsi 제거 오류입니다. " - -msgid "Remove lun from QoS error." -msgstr "QoS에서 lun 제거 오류." - -msgid "Remove lun from cache error." -msgstr "캐시에서 lun 제거 오류입니다. " - -msgid "Remove lun from partition error." -msgstr "파티션에서 lun 제거 오류입니다. " - -msgid "Remove port from port group error." -msgstr "포트 그룹에서 포트 제거 오류." - -msgid "Remove volume export failed." -msgstr "볼륨 내보내기 제거에 실패했습니다. " - -msgid "Rename lun on array error." -msgstr "배열에서 lun 이름 바꾸기 오류입니다. " - -msgid "Rename snapshot on array error." -msgstr "배열에서 스냅샷 이름 변경 오류." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "%(ssn)s에 %(name)s을(를) 복제하는 데 실패했습니다." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 복제 서비스 기능을 찾을 수 없습니다. " - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 복제 서비스를 찾을 수 없습니다. " - -msgid "Replication not allowed yet." -msgstr "복제가 아직 허용되지 않습니다." - -msgid "Request body and URI mismatch" -msgstr "요청 본문 및 URI 불일치" - -msgid "Request body contains too many items" -msgstr "요청 본문에 너무 많은 항목이 들어있음" - -msgid "Request body contains too many items." -msgstr "요청 본문이 너무 많은 항목을 포함합니다." - -msgid "Request body empty" -msgstr "요청 본문이 비어 있음" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Datera 클러스터에 대한 요청이 잘못된 상태를 리턴함: %(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"요청된 백업이 허용된 백업 기가바이트 할당량을 초과합니다. 요청된 크기는 " -"%(requested)sG이고 할당량은 %(quota)sG이며 %(consumed)sG가 이용되었습니다." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"요청된 볼륨 또는 스냅샷이 허용된 %(name)s 할당량을 초과합니다. 요청된 크기는 " -"%(requested)sG이고 할당량은 %(quota)sG이며 %(consumed)sG가 이용되었습니다." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"요청된 볼륨 크기 %(size)d이(가) 허용된 최대 한계 %(limit)d보다 큽니다. " - -msgid "Required configuration not found" -msgstr "필수 구성을 찾을 수 없음" - -#, python-format -msgid "Required flag %s is not set" -msgstr "필수 플래그 %s이(가) 설정되지 않음" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"백업 상태 재설정이 중단되었습니다. 현재 구성된 백업 서비스 " -"[%(configured_service)s]은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위" -"해 사용된 백업 서비스가 아닙니다." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "복제본 %s 크기 조정에 실패했습니다. " - -msgid "Resizing image file failed." -msgstr "이미지 파일의 크기 조정이 실패함. " - -msgid "Resource could not be found." -msgstr "자원을 찾을 수 없습니다. " - -msgid "Resource not ready." -msgstr "자원이 준비되지 않았습니다. " - -#, python-format -msgid "Response error - %s." -msgstr "응답 오류 - %s." - -msgid "Response error - The storage-system is offline." -msgstr "응답 오류 - 스토리지 시스템이 오프라인입니다." - -#, python-format -msgid "Response error code - %s." -msgstr "응답 오류 코드 - %s." - -msgid "RestURL is not configured." -msgstr "RestURL이 구성되지 않았습니다." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"백업 복원 중단, 예상 볼륨 상태는 %(expected_status)s이지만 %(actual_status)s" -"을(를) 가져옴" - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"백업 복원이 중단되었습니다. 현재 구성된 백업 서비스 [%(configured_service)s]" -"은(는) 이 백업 [%(backup_service)s]을(를) 작성하기 위해 사용된 백업 서비스가 " -"아닙니다." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"백업 복원 중단: 예상 백업 상태는 %(expected_status)s이지만 %(actual_status)s" -"을(를) 가져옴" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"제공된 Cinder 스냅샷에 대해 다른 크기의 SolidFire 볼륨을 검색했습니다. 검색: " -"%(ret)s 필요: %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"제공된 Cinder 볼륨에 대해 다른 크기의 SolidFire 볼륨을 검색했습니다. 검색: " -"%(ret)s 필요: %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "명령의 재시도 수 초과: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "재시도할 수 있는 SolidFire 예외가 발생함" - -msgid "Retype requires migration but is not allowed." -msgstr "다시 입력에 마이그레이션이 필요하지만 허용되지 않습니다." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "%(volumeName)s을(를) 삭제하여 롤백하는 중입니다. " - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "VMware vCenter 버전이 %s보다 낮은 Cinder는 실행할 수 없습니다." - -msgid "SAN product is not configured." -msgstr "SAN 제품이 구성되지 않았습니다." - -msgid "SAN protocol is not configured." -msgstr "SAN 프로토콜이 구성되지 않았습니다." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"SMBFS 구성 'smbfs_oversub_ratio'가 올바르지 않습니다. 0보다 커야 함: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"SMBFS 구성 'smbfs_used_ratio'가 올바르지 않습니다. 0보다 크고 1.00 이하여야 " -"함: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "SMBFS 구성 파일이 %(config)s에 없습니다." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "SMBFS 구성 파일이 설정되지 않았습니다(smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"'%(command)s' 명령을 '%(total_attempts)r'번 시도한 후에 SSH 명령이 실패했습니" -"다. " - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "SSH 명령 인젝션 발견됨: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "오류: %(err)s과(와) 함께 %(fabric)s에 대한 SSH 연결 실패 " - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "SSL 인증서가 %s에 만료되었습니다. " - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL 오류: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "스케줄러 호스트 필터 %(filter_name)s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Scheduler Host Weigher %(weigher_name)s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"2차 사본 상태: %(status)s 및 동기화됨: %(sync)s, 동기화 프로세스는 " -"%(progress)s%%입니다. " - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "보조 id는 기본 배열과 같지 않아야 합니다. backend_id = %(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber는 파일 %(fileName)s에 있어야 합니다. " - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "호스트 %(host)s의 서비스 %(service)s이(가) 제거되었습니다. " - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "호스트 %(host)s에서 서비스 %(service_id)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "%(service_id)s 서비스를 찾을 수 없습니다. " - -msgid "Service is too old to fulfil this request." -msgstr "서비스가 너무 오래되어 이 요청을 이행할 수 없습니다." - -msgid "Service is unavailable at this time." -msgstr "서비스가 지금 사용 불가능합니다. " - -msgid "Set pair secondary access error." -msgstr "쌍 보조 액세스 설정 오류." - -msgid "Sets thin provisioning." -msgstr "씬 프로비저닝을 설정합니다. " - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"이 스토리지 제품군과 ONTAP 버전에서는 LUN QoS 정책 그룹 설정이지원되지 않습니" -"다. " - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"이 스토리지 제품군과 ONTAP 버전에서는 파일 qos 정책 그룹 설정이지원되지 않습" -"니다. " - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"%(dir)s에서의 공유를 Cinder 볼륨 서비스에서 쓸 수 없습니다. 스냅샷 조작이 지" -"원되지 않습니다." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Sheepdog I/O 오류, 명령: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"표시 조작은 사용자가 범위 지정되는 프로젝트의 동일한 계층에 있는 프로젝트에 " -"대해서만 작성될 수 있습니다. " - -msgid "Size" -msgstr "크기" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "" -"볼륨 %s에 대한 크기를 찾을 수 없습니다. 보안 삭제를 수행할 수 없습니다." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"크기는 %(image_size)dGB이며 크기 %(volume_size)dGB의 볼륨에 맞지 않습니다." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"지정된 이미지 크기 %(image_size)sGB는 다음 볼륨 크기보다 커야 합니다: " -"%(volume_size)sGB." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"사용 가능해질 때까지 기다리는 동안 스냅샷 %(id)s을(를) 삭제하도록 요청되었습" -"니다. 동시에 요청되었을 가능성이 있습니다." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"단계식 삭제 중에 \"삭제 중\"이 아니라 %(state)s 상태의 스냅샷 %(id)s이(가) 발" -"견되었습니다." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "%(snapshot_id)s 스냅샷을 찾을 수 없습니다. " - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"%(snapshot_id)s 스냅샷에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "배열에 스냅샷 '%s'이(가) 없습니다." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"볼륨 %(vol_id)s이(가) 사용 가능 상태가 아니어서 스냅샷을 작성할 수 없습니다. " -"현재 볼륨 상태: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "볼륨 마이그레이션 중에 스냅샷을 작성할 수 없습니다. " - -msgid "Snapshot of secondary replica is not allowed." -msgstr "2차 복제본의 스냅샷이 허용되지 않습니다." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "볼륨의 스냅샷이 다음 상태에서 지원되지 않음: %s" - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "배치되지 않은 스냅샷 res \"%s\"이(가) 있습니까? " - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"update_snapshot_status에 대해 스냅샷 상태 %(cur)s이(가) 허용되지 않습니다. " - -msgid "Snapshot status must be \"available\" to clone." -msgstr "스냅샷 상태가 복제 \"사용 가능\"이어야 합니다. " - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "백업할 스냅샷이 사용 가능해야 하지만 현재 상태가 \"%s\"입니다. " - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "id가 %s인 스냅샷을 찾을 수 없습니다." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"스냅샷='%(snap)s'이(가) 기본 이미지='%(base)s'에 존재하지 않음 - 중단 중증분 " -"백업" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "스냅샷이 이 볼륨 형식에 대해 지원되지 않음: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "소켓 오류: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "SolidFire Cinder 드라이버 예외" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "정렬 방향 배열 크기가 정렬 키 배열 크기를 초과합니다. " - -msgid "Source CG is empty. No consistency group will be created." -msgstr "소스 CG가 비어 있습니다. 일관성 그룹이 작성되지 않습니다. " - -msgid "Source host details not found." -msgstr "소스 호스트 세부사항을 찾을 수 없습니다." - -msgid "Source volume device ID is required." -msgstr "소스 볼륨 디바이스 ID가 필요합니다. " - -msgid "Source volume not mid-migration." -msgstr "소스 볼륨이 마이그레이션에 포함되지 않음 " - -msgid "SpaceInfo returned byarray is invalid" -msgstr "배열에서 리턴한 SpaceInfo가 올바르지 않음" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"볼륨 %(vol)s에 맵핑할 지정된 호스트가 %(group)s이(가) 포함된 지원되지 않는 호" -"스트 그룹에 있습니다. " - -msgid "Specified logical volume does not exist." -msgstr "지정된 논리적 볼륨이 존재하지 않습니다. " - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "id가 %s인 지정된 스냅샷 그룹을 찾을 수 없습니다. " - -msgid "Specify a password or private_key" -msgstr "비밀번호 또는 private_key 지정" - -msgid "Specify san_password or san_private_key" -msgstr "san_password 또는 san_private_key 지정" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "볼륨 유형 이름, 설명, is_public 또는 조합을 지정하십시오." - -msgid "Split pair error." -msgstr "쌍 분할 오류." - -msgid "Split replication failed." -msgstr "복제 분할에 실패했습니다." - -msgid "Start LUNcopy error." -msgstr "LUNcopy 시작 오류입니다. " - -msgid "State" -msgstr "상태" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "노드의 세부 상태가 잘못되었습니다. 현재 세부 상태는 %s입니다. " - -msgid "Status" -msgstr "상태" - -msgid "Stop snapshot error." -msgstr "스냅샷 중지 오류입니다. " - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "%(storageSystemName)s에서 스토리지 구성 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"%(storageSystemName)s에서 스토리지 하드웨어 ID mgmt 서비스를 찾을 수 없습니" -"다. " - -#, python-format -msgid "Storage Profile %s not found." -msgstr "스토리지 프로파일 %s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"%(storageSystemName)s에서 스토리지 위치 재지정 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Storage family %s is not supported." -msgstr "스토리지 제품군 %s이(가) 지원되지 않습니다. " - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "스토리지 그룹 %(storageGroupName)s이(가) 삭제되지 않음" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "스토리지 호스트 %(svr)s이(가) 발견되지 않음, 이름 확인" - -msgid "Storage pool is not configured." -msgstr "스토리지 풀이 구성되지 않았습니다." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "스토리지 프로파일: %(storage_profile)s을(를) 찾을 수 없습니다." - -msgid "Storage resource could not be found." -msgstr "스토리지 자원을 찾을 수 없습니다. " - -msgid "Storage system id not set." -msgstr "스토리지 시스템 ID가 설정되지 않았습니다." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "%(poolNameInStr)s 풀에 대한 스토리지 시스템을 찾을 수 없습니다. " - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "스토리지 시스템 %(array)s을(를) 찾을 수 없습니다." - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"하위 사용량 합계 '%(sum)s'이(가) '%(res)s' 자원의 '%(proj)s' 프로젝트에 사용 " -"가능한 할당량 '%(free)s'보다 큽니다. 하나 이상의 '%(child_ids)s' 프로젝트에 " -"대한 한계 또는 사용량을 낮추십시오." - -msgid "Switch over pair error." -msgstr "쌍 전환 오류." - -msgid "Sync pair error." -msgstr "쌍 동기화 오류." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "시스템 %(id)s에 잘못된 비밀번호 상태가 있음 - %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "%(id)s 시스템에 잘못된 상태가 있음 - %(status)s." - -msgid "System does not support compression." -msgstr "시스템이 압축을 지원하지 않습니다. " - -msgid "System is busy, retry operation." -msgstr "시스템을 사용 중입니다. 조작을 재시도하십시오. " - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s]이(가) 계정 [%(account)s]에 대한 CloudByte 스토리지에 없습니다. " - -msgid "Target volume type is still in use." -msgstr "대상 볼륨 유형이 아직 사용 중입니다." - -msgid "Terminate connection failed" -msgstr "연결 종료 실패" - -msgid "Terminate connection unable to connect to backend." -msgstr "연결 종료에서 백엔드에 연결하지 못했습니다." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "볼륨 연결 종료 실패: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "복제할 %(type)s %(id)s 소스를 찾을 수 없습니다." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"'sort_key'와 'sort_dir' 매개변수는 더 이상 사용되지 않는 항목이므로'sort' 매" -"개변수에 이를 사용할 수 없습니다. " - -msgid "The EQL array has closed the connection." -msgstr "EQL 배열에서 연결을 종료했습니다." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"GPFS 파일 시스템 %(fs)s이(가) 필수 릴리스 레벨이 아닙니다. 현재 레벨은 " -"%(cur)s이고, 최소 %(min)s이어야 합니다. " - -msgid "The IP Address was not found." -msgstr "IP 주소를 찾을 수 없습니다." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"WebDAV 요청 실패. 이유: %(msg)s, 리턴 코드/이유: %(code)s,소스 볼륨: " -"%(src)s, 대상 볼륨: %(dst)s, 메소드: %(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"위의 오류는 데이터베이스가 작성되지 않았음을 표시할 수 있습니다.\n" -"이 명령을 실행하기 전에 'cinder-manage db sync'를 사용하여 데이터베이스를 작" -"성하십시오." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"배열에서 SLO %(slo)s 및 워크로드 %(workload)s의 스토리지 풀 설정을 지원하지 " -"않습니다. 올바른 SLO 및 워크로드의 배열을 확인하십시오." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "볼륨이 작성된 백엔드에서 복제가 사용되지 않았습니다." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"명령 %(cmd)s에 실패했습니다.(ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "사본은 1차 또는 2차여야 함" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "논리 디바이스의 작성을 완료할 수 없습니다.(LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "데코레이션된 메소드는 볼륨 또는 스냅샷 오브젝트를 승인해야 함" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "경로 %(path)s에 있는 디바이스를 사용할 수 없음: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "종료 시간(%(end)s)은 시작 시간(%(start)s) 이후여야 합니다." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "추가 스펙: %(extraspec)s이(가) 올바르지 않습니다. " - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "장애 복구된 볼륨을 삭제할 수 없음: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "다음 요소가 필요함: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "호스트 그룹 또는 iSCSI 대상을 추가할 수 없습니다." - -msgid "The host group or iSCSI target was not found." -msgstr "호스트 그룹 또는 iSCSI 대상을 찾을 수 없습니다." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 3PAR 백엔" -"드에서 복제를 재개하십시오." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 LeftHand " -"백엔드에서 복제를 재개하십시오." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"호스트를 장애 복구할 준비가 되지 않았습니다. 볼륨을 다시 동기화하고 Storwize " -"백엔드에서 복제를 재개하십시오." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "iSCSI CHAP 사용자 %(user)s이(가) 존재하지 않습니다." - -msgid "The key cannot be None." -msgstr "키는 None이 되어서는 안 됩니다. " - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "지정된 %(type)s %(id)s의 논리 디바이스가 이미 삭제되었습니다." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"메소드 %(method)s이(가) 제한시간을 초과했습니다.(제한시간 값: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "update_migrated_volume 메소드가 구현되지 않았습니다. " - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"마운트 %(mount_path)s이(가) 올바른 Quobyte USP 볼륨이 아닙니다. 오류: %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "스토리지 백엔드의 매개변수입니다.(config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "상위 백업은 증분 백업을 수행할 수 있어야 합니다. " - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "제공된 스냅샷 '%s'이(가) 제공된 볼륨의 스냅샷이 아닙니다." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"백엔드의 볼륨 참조 형식은 file_system/volume_name이어야 합니다(volume_name에 " -"'/'가 포함될 수 없음)." - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "원격 보유 수는 %s 이하여야 합니다." - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"볼륨 유형 extra_specs에 복제 모드가 올바르게 구성되지 않았습니다. " -"replication:mode가 주기적인 경우 replication:sync_period도 지정해야 하며 300 " -"- 31622400초여야 합니다." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "복제 동기화 기간은 최소 %s초여야 합니다." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"요청된 크기 %(requestedSize)s이(가) 결과로 얻어진 크기 %(resultSize)s인 동안" -"에는 인스턴스 연관을 변경할 수 없습니다." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "자원 %(resource)s을(를) 찾을 수 없습니다." - -msgid "The results are invalid." -msgstr "결과가 올바르지 않습니다. " - -#, python-format -msgid "The retention count must be %s or less." -msgstr "보유 수는 %s 이하여야 합니다." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "볼륨이 유지보수 모드에 있으면 스냅샷을 작성할 수 없습니다. " - -msgid "The source volume for this WebDAV operation not found." -msgstr "이 WebDAV 조작의 소스 볼륨을 찾을 수 없습니다." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"소스 볼륨 유형 '%(src)s'이(가) 대상 볼륨 유형인 '%(dest)s'과(와) 다릅니다." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "소스 볼륨 유형 '%s'은(는) 사용할 수 없습니다." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "지정된 %(desc)s이(가) 사용 중입니다." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "지정된 LUN이 제공된 풀에 속하지 않음: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev는 쌍을 이루지 아니어야 합" -"니다." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev는 쌍을 이루지 않아야 합니" -"다." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. ldev 크기는 기가바이트 단위여" -"야 합니다." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"지정된 ldev %(ldev)s을(를) 관리할 수 없습니다. 볼륨 유형은 DP-VOL이어야 합니" -"다." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"지정된 조작이 지원되지 않습니다. 볼륨 크기는 소스 %(type)s과(와) 동일해야 합" -"니다.(볼륨: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "지정된 vdisk가 호스트에 맵핑됩니다." - -msgid "The specified volume is mapped to a host." -msgstr "지정된 볼륨이 호스트에 맵핑되어 있습니다. " - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"%s의 스토리지 배열 비밀번호가 올바르지 않습니다. 구성된 비밀번호를 업데이트하" -"십시오." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "스토리지 백엔드를 사용할 수 있습니다.(config_group: %(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"스토리지 장치에서 %(prot)s을(를) 지원하지 않습니다. %(prot)s을(를) 지원하도" -"록 장치를 구성하거나 다른 프토토콜을 사용하는 드라이버로 전환하십시오." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"%(memberCount)s의 스트라이프 메타 개수가 크기가 %(volumeSize)s인 볼륨 " -"%(volumeName)s에 대해 너무 적습니다. " - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"볼륨/스냅샷 %(id)s에 대한 메타데이터의 유형: %(metadata_type)s이(가) 올바르" -"지 않습니다. " - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"볼륨 %(volume_id)s을(를) 확장할 수 없습니다. 볼륨 유형은 보통이어야 합니다." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"볼륨 %(volume_id)s을(를) 관리 해제할 수 없습니다. 볼륨 유형은 " -"%(volume_type)s(이)어야 합니다." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "볼륨 %(volume_id)s을(를) 정상적으로 관리합니다. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "볼륨 %(volume_id)s을(를) 정상적으로 관리 해제합니다. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "맵핑할 볼륨 %(volume_id)s을(를) 찾을 수 없습니다." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "유지보수 모드에서는 볼륨이 전송을 승인할 수 없습니다. " - -msgid "The volume cannot be attached in maintenance mode." -msgstr "유지보수 모드에서는 볼륨에 연결할 수 없습니다. " - -msgid "The volume cannot be detached in maintenance mode." -msgstr "유지보수 모드에서는 볼륨을 분리할 수 없습니다. " - -msgid "The volume cannot be updated during maintenance." -msgstr "유지보수 중에는 볼륨을 업데이트할 수 없습니다. " - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "유지보수 모드에서는 볼륨 연결을 초기화할 수 없습니다. " - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "볼륨 드라이버는 커넥터에 iSCSI 개시자 이름이 필요합니다. " - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"볼륨이 현재 3PAR에서 사용 중이므로 이번에는 삭제할 수 없습니다. 나중에 다시 " -"시도할 수 있습니다." - -msgid "The volume label is required as input." -msgstr "볼륨 레이블이 입력으로 필요합니다. " - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "사용 가능한 자원이 없습니다.(자원: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "올바른 ESX 호스트가 없습니다. " - -msgid "There are no valid datastores." -msgstr "올바른 데이터 저장소가 없습니다. " - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"%(param)s의 지정이 없습니다. 지정된 스토리지는 볼륨 관리에 꼭 필요합니다." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "ldev의 지정이 없습니다. 지정된 ldev는 볼륨 관리에 꼭 필요합니다." - -msgid "There is no metadata in DB object." -msgstr "DB 오브젝트에 메타데이터가 없습니다. " - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "%(volume_size)sG를 보유할 수 있는 공유가 없음" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "%(volume_size)sG를 호스트할 수 있는 공유가 없습니다." - -#, python-format -msgid "There is no such action: %s" -msgstr "해당 조치가 없음: %s" - -msgid "There is no virtual disk device." -msgstr "가상 디스크 디바이스가 없습니다." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "원격 복사 그룹에 볼륨을 추가하는 중 오류 발생: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "cgsnapshot 작성 중 오류 발생: %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "원격 복사 그룹을 생성하는 중 오류 발생: %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "원격 복사 그룹의 동기화 기간을 설정하는 중 오류 발생: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"3PAR 배열에서 원격 복사 그룹을 설정하는 중 오류 발생: ('%s'). 볼륨이 복제 유" -"형으로 인식되지 않습니다." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"LeftHand 배열에서 원격 스케줄을 설정하는 중 오류 발생: ('%s'). 볼륨이 복제 유" -"형으로 인식되지 않습니다." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "원격 복사를 시작하는 중 오류 발생: %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Gluster config 파일이 구성되지 않음(%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "구성된 NFS config 파일이 없음(%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"구성된 Quobyte 볼륨이 없습니다(%s). 예제: quobyte:///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "이 버전의 LVM에서는 씬 프로비저닝이 지원되지 않습니다." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "이 드라이버는 스냅샵 삭제 중에 사용 중인 스냅샷을 지원하지 않습니다." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "이 드라이버는 스냅샵 작성 중에 사용 중인 볼륨을 지원하지 않습니다." - -msgid "This request was rate-limited." -msgstr "이 요청이 비율 제한되었습니다. " - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"이 시스템 플랫폼 (%s)이(가) 지원되지 않습니다. 이 드라이버는 Win32 플랫폼만 " -"지원합니다." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "%(storageSystemName)s에 대한 티어 정책 서비스를 찾을 수 없습니다. " - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "스냅샷 %s 작성을 위해 Nova 업데이트를 대기하는 동안 제한시간이 초과됨." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"스냅샷 %(id)s 삭제를 위해 Nova 업데이트를 대기하는 동안 제한시간이 초과됨." - -#, python-format -msgid "Timeout while calling %s " -msgstr "%s을(를) 호출하는 동안 제한시간 초과" - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "%(service)s API를 요청하는 중 제한시간을 초과했습니다." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "백엔드 %(service)s에서 기능을 요청하는 동안 제한시간이 초과됩니다." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "전송 %(transfer_id)s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"전송 %(transfer_id)s: 볼륨 id %(volume_id)s이(가) 예상치 않은 상태%(status)s" -"입니다. 예상된 상태는 전송 대기입니다. " - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"ID %(meta_id)s의 백업 메타데이터를 백업 %(id)s(으)로 가져오려고 시도 중입니" -"다. " - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"볼륨 조정 태스크가 완료되기 전에 중지됨: volume_name=%(volume_name)s, task-" -"status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"%(type_id)s 유형이 이미 다른 qos 스펙과 연관되어 있음: %(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "유형 액세스 수정을 공용 볼륨 유형에 적용할 수 없습니다. " - -msgid "Type cannot be converted into NaElement." -msgstr "유형을 NaElement로 변환할 수 없습니다." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUID %s이(가) 추가 및 제거 볼륨 목록에 있습니다. " - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "볼륨 %s의 Storwize 백엔드에 액세스할 수 없습니다." - -msgid "Unable to access the backend storage via file handle." -msgstr "파일 핸들을 통해 백엔드 스토리지에 액세스할 수 없습니다. " - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "경로 %(path)s을(를) 통해 백엔드 스토리지에 액세스할 수 없음." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "공간 %(space)s에 대한 apphosts에 Cinder 호스트를 추가할 수 없음" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "%s의 장애 복구를 완료할 수 없습니다." - -msgid "Unable to connect or find connection to host" -msgstr "호스트에 대한 연결을 설정하거나 찾을 수 없음" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "일관성 그룹 %s을(를) 작성할 수 없음" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "잠금을 생성할 수 없습니다. 조정 백엔드가 시작되지 않았습니다." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST 정책 %(fastPolicyName)s에 대한 기본 스토리지 그룹을 작성하거나 확인하지 " -"못했습니다." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "볼륨 %s에 대한 복제본을 작성할 수 없습니다. " - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "%s의 관계를 작성할 수 없습니다." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "%(snap)s에서 %(name)s 볼륨을 생성할 수 없습니다." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "%(vol)s에서 %(name)s 볼륨을 생성할 수 없습니다." - -#, python-format -msgid "Unable to create volume %s" -msgstr "볼륨 %s을(를) 생성할 수 없습니다." - -msgid "Unable to create volume. Backend down." -msgstr "볼륨을 생성할 수 없습니다. 백엔드가 종료되었습니다." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "일관성 그룹 스냅샷 %s을(를) 삭제할 수 없음" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "스냅샷 %(id)s을(를) 삭제할 수 없음, 상태: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "볼륨 %s에 대한 스냅샷 정책을 삭제할 수 없습니다. " - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "볼륨 %(vol)s의 대상 볼륨을 삭제할 수 없습니다. 예외: %(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"볼륨의 연결을 해제할 수 없습니다. 연결을 해제하려면 볼륨 상태가 '사용 중'이어" -"야 하며 attach_status가 '연결됨'이어야 합니다." - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "제공된 보조 %(secondary)s에서 secondary_array를 판별할 수 없습니다." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "스냅샷 %(id)s에 대한 Purity에서 스냅샷 이름을 판별할 수 없습니다. " - -msgid "Unable to determine system id." -msgstr "시스템 ID를 판별할 수 없습니다. " - -msgid "Unable to determine system name." -msgstr "시스템 이름을 판별할 수 없습니다. " - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Purity REST API 버전 %(api_version)s을(를) 사용하여 스냅샷 조작 관리를 수행" -"할 수 없습니다. %(required_versions)s이(가) 필요합니다. " - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Purity REST API 버전 %(api_version)s(으)로 복제를 수행할 수 없습니다. " -"%(required_versions)s 중 하나가 필요합니다." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Storwize 클러스터 %s과(와) 파트너십을 설정할 수 없습니다." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "볼륨 %s을(를) 확장할 수 없음" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"복제 관계를 전환할 수 없으므로 볼륨 %(id)s을(를) 보조 백엔드로 장애 복구할 " -"수 없음: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"\"default\"로 장애 복구할 수 없습니다. 장애 조치가 완료된 후에만 수행할 수 있" -"습니다." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "복제 대상으로 장애 복구할 수 없음:%(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "백엔드에서 연결 정보를 페치할 수 없습니다." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "백엔드에서 연결 정보를 페치할 수 없음: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "이름이 %s인 Purity ref를 찾을 수 없음" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "볼륨 그룹을 찾을 수 없음: %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "장애 복구 대상을 찾을 수 없습니다. 보조 대상이 구성되지 않았습니다." - -msgid "Unable to find iSCSI mappings." -msgstr "iSCSI 맵핑을 찾을 수 없습니다. " - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "ssh_hosts_key_file을 찾을 수 없음: %s" - -msgid "Unable to find system log file!" -msgstr "시스템 로그 파일을 찾을 수 없음!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"선택한 보조 배열에서 장애 복구를 사용하기 위해 실행 가능한 pg 스냅샷을 찾을 " -"수 없음: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "구성된 대상 %(targets)s에서 실행 가능한 보조 배열을 찾을 수 없습니다." - -#, python-format -msgid "Unable to find volume %s" -msgstr "볼륨 %s을(를) 찾을 수 없음" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "'%s' 파일의 블록 디바이스를 가져올 수 없음" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"볼륨 작성에 필요한 구성 정보를 가져올 수 없음: %(errorMessage)s(이)어야 합니" -"다." - -msgid "Unable to get corresponding record for pool." -msgstr "풀에 대한 해당 레코드를 가져올 수 없습니다. " - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"공간 %(space)s에 대한 정보를 가져올 수 없습니다. 클러스터가 실행 중이며 연결" -"되어 있는지 확인하십시오. " - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"이 호스트에서 IP 주소의 목록을 가져올 수 없습니다. 권한 및 네트워킹을 확인하" -"십시오. " - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"도메인 멤버의 목록을 가져올 수 없습니다. 클러스터가 실행 중인지 확인하십시" -"오. " - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"새 이름을 작성할 공간의 목록을 가져올 수 없습니다. 클러스터가 실행 중인지 확" -"인하십시오. " - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "backend_name에 대한 통계를 가져올 수 없음: %s" - -msgid "Unable to get storage volume from job." -msgstr "작업에서 스토리지 볼륨을 가져올 수 없습니다." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"하드웨어 ID %(hardwareIdInstance)s의 대상 엔드포인트를 가져올 수 없습니다." - -msgid "Unable to get the name of the masking view." -msgstr "마스킹 보기의 이름을 가져올 수 없습니다." - -msgid "Unable to get the name of the portgroup." -msgstr "portgroup의 이름을 가져올 수 없습니다." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "볼륨 %s의 복제 관계를 가져올 수 없습니다." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 이는 복제 세션 %(sync)s" -"의 소스 볼륨입니다. " - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 외부 볼륨이 현재 cinder " -"호스트에 의해 관리되는 풀에 없습니다. " - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"볼륨 %(deviceId)s을(를) cinder로 가져올 수 없습니다. 볼륨이 마스킹 보기 " -"%(mv)s에 있습니다. " - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s에서 CA를 로드할 수 없습니다. " - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s에서 인증서를 로드할 수 없습니다." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "%(cert)s %(e)s에서 키를 로드할 수 없습니다. " - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "Solidfire 디바이스에서 %(account_name)s 계정을 찾을 수 없음" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "IP 주소 '%s'을(를) 관리하는 SVM을 찾을 수 없음" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "지정된 재생 프로파일 %s을(를) 찾을 수 없음" - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"기존 볼륨을 관리할 수 없습니다. 볼륨 %(volume_ref)s이(가) 이미 관리되고 있습" -"니다. " - -#, python-format -msgid "Unable to manage volume %s" -msgstr "볼륨 %s을(를) 관리할 수 없음" - -msgid "Unable to map volume" -msgstr "볼륨을 맵핑할 수 없음" - -msgid "Unable to map volume." -msgstr "볼륨을 맵핑할 수 없습니다. " - -msgid "Unable to parse attributes." -msgstr "속성을 구문 분석할 수 없습니다. " - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"%s 볼륨의 복제본을 1차로 승격할 수 없습니다. 2차 사본을 사용할 수 없습니다." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Cinder에서 관리하지 않는 호스트는 use_chap_auth=True를 사용하여재사용할 수 없" -"습니다. " - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "알 수 없는 CHAP 신임 정보로 구성된 호스트를 재사용할 수 없습니다. " - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "볼륨 %(existing)s의 이름을 %(newname)s(으)로 바꿀 수 없음" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "id가 %s인 스냅샷 그룹을 검색할 수 없습니다." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"%(specname)s을(를) 다시 입력할 수 없음, 현재 및 요청된 %(spectype)s 값을 수신" -"해야 합니다. 수신한 값: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"재입력할 수 없음: %s 볼륨의 사본이 있습니다. 재입력을 수행하면 한계값인 2개" -"의 사본을 초과합니다." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"재입력할 수 없음: 현재 조치에 볼륨-사본이 필요함, 새 유형이 복제인 경우 이는 " -"허용되지 않습니다. 볼륨 = %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "%(vol)s의 미러 모드 복제를 설정할 수 없습니다. 예외: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "일관성 그룹 %s의 스냅샷을 작성할 수 없음" - -msgid "Unable to terminate volume connection from backend." -msgstr "백엔드에서 볼륨 연결을 종료할 수 없습니다." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "볼륨 연결을 종료할 수 없음: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "일관성 그룹 %s을(를) 업데이트할 수 없음" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"마스킹 보기 %(maskingViewName)s에서 개시자 그룹 %(igGroupName)s을(를) 확인할 " -"수 없습니다." - -msgid "Unacceptable parameters." -msgstr "허용할 수 없는 매개변수입니다. " - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"%(id)s 맵핑에 대해 예상치 못한 맵핑 상태 %(status)s. 속성:%(attr)s(이)어야 합" -"니다." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"예상치 못한 CLI 응답: 헤더/행 불일치. 헤더: %(header)s, 행: %(row)s(이)어야 " -"합니다." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"맵핑 %(id)s에 대해 예상치 않은 맵핑 상태 %(status)s이(가) 발생했습니다. 속" -"성: %(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "" -"예상치 못한 결과입니다. [%(expected)s]을(를) 예상했지만 [%(output)s]을(를) 수" -"신했습니다." - -msgid "Unexpected response from Nimble API" -msgstr "Nimble API로부터의 예기치 않은 응답" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Tegile IntelliFlash API에서 예상치 못한 응답" - -msgid "Unexpected status code" -msgstr "예기치 않은 상태 코드" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"url %(page)s에 대해 프로토콜 %(protocol)s을(를) 사용하는 스위치 %(switch_id)s" -"에서 예상치 못한 상태 코드 수신. 오류: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "알 수 없는 Gluster 예외" - -msgid "Unknown NFS exception" -msgstr "알 수 없는 NFS 예외" - -msgid "Unknown RemoteFS exception" -msgstr "알 수 없는 RemoteFS 예외" - -msgid "Unknown SMBFS exception." -msgstr "알 수 없는 SMBFS 예외입니다." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "알 수 없는 Virtuozzo 스토리지 예외" - -msgid "Unknown action" -msgstr "알 수 없는 조치" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"관리할 볼륨: %s이(가) 이미 Cinder에서 관리 중인지 알 수 없습니다. 볼륨 관리" -"를 중단합니다. 'cinder_managed' 사용자 지정 스키마 특성을 볼륨에 추가하고 값" -"을 False로 설정합니다. 또는 cinder 구성 정책 'zfssa_manage_policy'의 값을 " -"'loose'로 설정하여 이 제한을 제거하십시오." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"관리할 볼륨: %s이(가) 이미 Cinder에서 관리 중인지 알 수 없습니다. 볼륨 관리" -"를 중단합니다. 'cinder_managed' 사용자 지정 스키마 특성을 볼륨에 추가하고 값" -"을 False로 설정합니다. 또는 cinder 구성 정책 'zfssa_manage_policy'의 값을 " -"'loose'로 설정하여 이 제한을 제거하십시오." - -#, python-format -msgid "Unknown operation %s." -msgstr "알 수 없는 조작 %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "알 수 없거나 지원되지 않는 명령 %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "알 수 없는 프로토콜: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "알 수 없는 할당량 자원 %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 합니다. " - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "관리 취소 및 계단식 삭제 옵션은 상호 배타적입니다." - -msgid "Unmanage volume not implemented." -msgstr "관리 취소 볼륨이 구현되지 않았습니다." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"'장애 복구' 볼륨에서 스냅샷의 관리를 해제하는 기능은 허용되지 않습니다." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"장애 복구된 볼륨에서 스냅샷의 관리를 해제하는 기능은 허용되지 않습니다." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "인식되지 않는 QOS 키워드: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "인식할 수 없는 백업 형식: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "인식되지 않는 read_deleted 값 '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "gcs 옵션 설정 해제: %s" - -msgid "Unsupported Content-Type" -msgstr "지원되지 않는 Content-Type" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"지원되지 않는 Data ONTAP 버전입니다. Data ONTAP 버전 7.3.1 이상이 지원됩니다." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "지원되지 않는 백업 메타데이터 버전(%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "지원되지 않는 백업 메타데이터 버전이 요청됨" - -msgid "Unsupported backup verify driver" -msgstr "지원되지 않는 백업 확인 드라이버" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"스위치 %s에 지원되지 않는 펌웨어가 있습니다. 스위치가 펌웨어 v6.4 이상에서 실" -"행 중인지 확인하십시오." - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "지원되지 않는 볼륨 형식: %s" - -msgid "Update QoS policy error." -msgstr "QoS 정책 업데이트 오류입니다. " - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"할당량 업데이트 및 삭제 조작은 직속 상위의 관리자 또는 CLOUD 관리자만 작성할 " -"수 있습니다. " - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"할당량 업데이트 및 삭제 조작은 사용자가 범위 지정되는 프로젝트의 동일한 계층" -"에 있는 프로젝트에 대해서만 작성될 수 있습니다. " - -msgid "Update list, doesn't include volume_id" -msgstr "목록 업데이트, volume_id를 포함하지 않음" - -msgid "Updated At" -msgstr "업데이트" - -msgid "Upload to glance of attached volume is not supported." -msgstr "접속된 볼륨의 글랜스에 대한 업로드가 지원되지 않습니다." - -msgid "Use ALUA to associate initiator to host error." -msgstr "ALUA를 사용하여 호스트에 개시자 연관 오류입니다. " - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"CHAP를 사용하여 호스트에 개시자 연관 오류입니다. CHAP 사용자 이름 및 비밀번호" -"를 확인하십시오. " - -msgid "User ID" -msgstr "사용자 ID" - -msgid "User does not have admin privileges" -msgstr "사용자에게 관리자 권한이 없음" - -msgid "User not authorized to perform WebDAV operations." -msgstr "사용자에게 WebDAV 조작을 수행할 권한이 없습니다." - -msgid "UserName is not configured." -msgstr "UserName이 구성되지 않았습니다." - -msgid "UserPassword is not configured." -msgstr "UserPassword가 구성되지 않았습니다." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "V2 롤백, 볼륨이 스토리지 그룹에 없습니다. " - -msgid "V3 rollback" -msgstr "V3 롤백" - -msgid "VF is not enabled." -msgstr "VF가 사용되지 않습니다." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV 설정 %s이(가) 없습니다. " - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "QoS 스펙의 유효한 이용자: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "유효한 제어 위치: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "볼륨 연결 유효성 검증에 실패했습니다(오류: %(err)s). " - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "값 \"%(value)s\"이(가) 구성 옵션 \"%(option)s\"에 대해 올바르지 않음" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "%(param_string)s에 대한 값 %(param)s이(가) 부울이 아닙니다. " - -msgid "Value required for 'scality_sofs_config'" -msgstr "'scality_sofs_config'에 필요한 값" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "Vdisk %(name)s이(가) 맵핑 %(src)s -> %(tgt)s에 포함되지 않았습니다. " - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"API에서 %(req_ver)s 버전을 지원하지 않습니다. 최소 %(min_ver)s 이상, 최대 " -"%(max_ver)s 이하여야 합니다." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s이(가) id별로 오브젝트를 검색할 수 없습니다." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s에서 조건부 업데이트를 지원하지 않습니다." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "배열에 가상 볼륨 '%s'이(가) 없습니다." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "%s 대상에 대한 볼륨 복사 작업에 실패했습니다." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "볼륨 %(deviceID)s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"배열에서 %(name)s 볼륨을 찾을 수 없습니다. 맵핑된 볼륨이 있는지 여부를 판별" -"할 수 없습니다. " - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "볼륨 %(name)s을(를) VNX에 작성했지만 %(state)s 상태입니다." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "볼륨 %(vol)s을(를) 풀 %(pool)s에서 작성할 수 없습니다. " - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "%(vol1)s 볼륨이 snapshot.volume_id %(vol2)s과(와) 일치하지 않습니다." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"볼륨 %(vol_id)s 상태가 읽기 전용 플래그 업데이트에 사용 가능 상태여야 하지만 " -"현재 상태가 %(vol_status)s입니다. " - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"볼륨 %(vol_id)s 상태가 사용 가능이어야 합니다. 현재 상태: %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "%(volume_id)s 볼륨을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"%(volume_id)s 볼륨에 %(metadata_key)s 키를 갖는 관리 메타데이터가 없습니다. " - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"%(volume_id)s 볼륨에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"볼륨 %(volume_id)s이(가) 지원되지 않는 호스트 그룹 %(group)s에 현재 맵핑되어 " -"있음" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "볼륨 %(volume_id)s이(가) 호스트 %(host)s에 현재 맵핑되어 있지 않음" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"%(volume_id)s 볼륨이 여전히 첨부되어 있습니다. 먼저 불륨을 분리하십시오. " - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "볼륨 %(volume_id)s 복제 오류: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "볼륨 %(volume_name)s을(를) 사용 중입니다." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "볼륨 %s을(를) 소스 볼륨에서 작성할 수 없습니다. " - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "%s 볼륨을 공유에서 작성할 수 없습니다. " - -#, python-format -msgid "Volume %s could not be created." -msgstr "볼륨 %s을(를) 작성할 수 없습니다. " - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "볼륨 %s이(가) Nexenta SA에 없음" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "볼륨 %s이(가) Nexenta Store 어플라이언스에 없음" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "배열에 %s 볼륨이 없습니다." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "%s 볼륨에 지정된 provider_location이 없습니다. 건너뜁니다." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "배열에 %s 볼륨이 없습니다." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "ZFSSA 백엔드에 %s 볼륨이 없습니다." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "OpenStack에서 이미 볼륨 %s을(를) 관리합니다." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"볼륨 %s이(가) 복제된 유형이 아닙니다. 복제 작업을 지원하려면 이 볼륨은 추가 " -"사양 replication_enabled가 ' True'로 설정된 볼륨 유형이어야 합니다." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"볼륨 %s이(가) 온라인입니다. OpenStack을 사용하여 관리할 볼륨을 오프라인으로 " -"설정합니다." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "볼륨 %s이(가) 일관성 그룹의 일부가 아니어야 합니다. " - -#, python-format -msgid "Volume %s not found." -msgstr "볼륨 %s을(를) 찾을 수 없습니다. " - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "볼륨 %s: 볼륨 확장 시도 중 오류 발생" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "배열에 이미 볼륨(%s)이 있음" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "볼륨(%s)이 배열에 이미 있습니다." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "볼륨 그룹 %s이(가) 없음" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "%(id)s 볼륨 유형이 이미 존재합니다. " - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"유형이 있는 볼륨에는 %(volume_type_id)s 볼륨 유형 삭제가 허용되지 않습니다." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"%(volume_type_id)s 볼륨 유형에 %(extra_specs_key)s 키를 갖는 추가 스펙이 없습" -"니다. " - -msgid "Volume Type id must not be None." -msgstr "볼륨 유형 ID가 있어야 합니다. " - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"CloudByte 스토리지에서 OpenStack 볼륨 [%(ops_vol)s]에 해당하는 볼륨 " -"[%(cb_vol)s]을(를) 찾을 수 없습니다." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "CloudByte 스토리지에서 볼륨 [%s]을(를) 찾을 수 없습니다." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "%(filter)s 필터로 볼륨 접속을 찾을 수 없습니다. " - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "볼륨 백엔드 구성이 올바르지 않음: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "이 이름에 의한 볼륨이 이미 존재함" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "스냅샷이 포함되었기 때문에 볼륨을 복원할 수 없습니다." - -msgid "Volume create failed while extracting volume ref." -msgstr "볼륨 ref를 추출하는 동안 볼륨 작성에 실패했습니다." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "볼륨 디바이스 파일 경로 %s이(가) 존재하지 않습니다." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "%(device)s에서 볼륨 디바이스를 찾을 수 없음. " - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "볼륨 드라이버 %s이(가) 초기화되지 않았습니다." - -msgid "Volume driver not ready." -msgstr "볼륨 드라이버가 준비 되어있지 않습니다." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "볼륨 드라이버 오류 보고서: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "현재 삭제할 수 없는 임시 스냅샷이 볼륨에 있습니다." - -msgid "Volume has children and cannot be deleted!" -msgstr "볼륨에 하위가 있으므로 삭제할 수 없습니다!" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "볼륨이 서버에 연결됩니다(%s). " - -msgid "Volume is in-use." -msgstr "볼륨을 사용 중입니다." - -msgid "Volume is not available." -msgstr "볼륨을 사용할 수 없습니다. " - -msgid "Volume is not local to this node" -msgstr "볼륨이 이 노드에 대해 로컬이 아님" - -msgid "Volume is not local to this node." -msgstr "볼륨이 이 노드의 로컬이 아닙니다. " - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"볼륨 메타데이터 백업이 요청되었으나 이 드라이버는 아직 이 기능을 설정하십시" -"오." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "볼륨 마이그레이션 실패: %(reason)s" - -msgid "Volume must be available" -msgstr "볼륨이 사용 가능해야 함" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "볼륨이 스냅샷과 동일한 가용성 구역에 있어야 함 " - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "볼륨이 소스 볼륨과 동일한 가용성 구역에 있어야 함 " - -msgid "Volume must have a volume type" -msgstr "볼륨에 볼륨 유형이 있어야 함" - -msgid "Volume must not be replicated." -msgstr "볼륨을 복제하지 않아야 합니다." - -msgid "Volume must not have snapshots." -msgstr "볼륨에 스냅샷이 없어야 합니다." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "%(instance_id)s 인스턴스에 대한 볼륨을 찾을 수 없습니다. " - -msgid "Volume not found on configured storage backend." -msgstr "구성된 스토리지 백엔드에서 볼륨을 찾을 수 없습니다. " - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"구성된 스토리지 백엔드에 볼륨을 찾을 수 없습니다. 볼륨 이름에 \"/\"가 포함되" -"지 않은 경우 이름을 변경하고 다시 관리하십시오." - -msgid "Volume not found on configured storage pools." -msgstr "구성된 스토리지 풀에서 볼륨을 찾을 수 없습니다. " - -msgid "Volume not found." -msgstr "볼륨을 찾을 수 없습니다. " - -msgid "Volume not unique." -msgstr "볼륨이 고유하지 않습니다." - -msgid "Volume not yet assigned to host." -msgstr "볼륨을 아직 호스트에 지정하지 않았습니다." - -msgid "Volume reference must contain source-name element." -msgstr "볼륨 참조에는 source-name 요소가 포함되어 있어야 합니다. " - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "%(volume_id)s의 볼륨 복제를 찾을 수 없습니다. " - -#, python-format -msgid "Volume service %s failed to start." -msgstr "볼륨 서비스 %s을(를) 시작하는 데 실패했습니다. " - -msgid "Volume should have agent-type set as None." -msgstr "볼륨의 에이전트 유형은 None으로 설정되어야 합니다. " - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"볼륨 크기 %(volume_size)sGB는 다음 이미지 minDisk 크기보다 작을 수 없습니다: " -"%(min_disk)sGB." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "볼륨 크기 '%(size)s'은(는) 0보다 큰 정수여야 함" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"볼륨 크기 '%(size)s'GB는 원래 볼륨 크기인 %(source_size)sGB보다 작을 수 없습" -"니다. 해당 크기가 원래 볼륨 크기보다 크거나 같아야 합니다." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"볼륨 크기 '%(size)s'GB은(는) 스냅샷 크기인 %(snap_size)sGB보다 작을 수 없습니" -"다. 해당 크기가 원래 스냅샷 크기보다 크거나 같아야 합니다." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"마지막 백업 이후 볼륨 크기가 증가되었습니다. 전체 백업을 수행하십시오. " - -msgid "Volume size must be a multiple of 1 GB." -msgstr "볼륨 크기는 1GB의 배수여야 합니다. " - -msgid "Volume size must multiple of 1 GB." -msgstr "볼륨 크기는 1GB의 배수여야 합니다. " - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"볼륨 상태가 스냅샷에 대해 \"사용 가능\" 또는 \"사용 중\"이어야 합니다(%s임)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "볼륨 상태가 \"사용 가능\" 또는 \"사용 중\"이어야 합니다." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "예약하려면 볼륨 상태가 %s 이어야 합니다. " - -msgid "Volume status must be 'available'." -msgstr "볼륨 상태가 '사용 가능'이어야 합니다. " - -msgid "Volume to Initiator Group mapping already exists" -msgstr "개시자 그룹에 대한 볼륨 맵핑이 이미 있음" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"백업할 볼륨은 사용 가능하거나 사용 중이어야 하지만 현재 상태가 \"%s\"입니다. " - -msgid "Volume to be restored to must be available" -msgstr "복원할 볼륨이 사용 가능해야 함" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "%(volume_type_id)s 볼륨 유형을 찾을 수 없습니다. " - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "볼륨 유형 ID '%s'이(가) 올바르지 않습니다." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"%(volume_type_id)s / %(project_id)s 조합에 대한 볼륨 유형 액세스가 이미 존재" -"합니다." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"%(volume_type_id)s / %(project_id)s 조합에 대한 볼륨 유형 액세스를 찾을 수 없" -"습니다." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "%(type_id)s 유형에 대한 볼륨 유형 암호화가 이미 존재합니다. " - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "유형 %(type_id)s의 볼륨 유형 암호화가 존재하지 않습니다." - -msgid "Volume type name can not be empty." -msgstr "볼륨 유형 이름은 빈 상태로 둘 수 없습니다." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "이름이 %(volume_type_name)s인 볼륨 유형을 찾을 수 없습니다. " - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"%(volumeName)s 볼륨은 연결된 볼륨이 아닙니다. 연결된 볼륨에 대해서만 확장을 " -"수행할 수 있습니다. 종료 중..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"볼륨: %(volumeName)s이(가) 스토리지 그룹 %(sgGroupName)s에 추가되지 않았습니" -"다." - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "Cinder에서 이미 볼륨: %s을(를) 관리 중입니다." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "기본 및 보조 SolidFire 계정 모두에서 볼륨/계정이 초과되었습니다." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 구성 'vzstorage_used_ratio'가 올바르지 않습니다. 0보다 크고 1.0 이" -"하여야 함: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s의 VzStorage 구성 파일이 존재하지 않습니다. " - -msgid "Wait replica complete timeout." -msgstr "복제본 완료 대기 제한시간 초과" - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "동기화 대기 실패. 실행 상태: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"모든 노드가 클러스터를 결합할 때까지 기다리는 중입니다. 모든 sheep 디먼이 실" -"행 중인지 확인하십시오. " - -msgid "We should not do switch over on primary array." -msgstr "기본 배열로 전환할 수 없습니다." - -msgid "X-IO Volume Driver exception!" -msgstr "X-IO 볼륨 드라이버 예외 발생!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO가 올바르게 구성되지 않음, iscsi 포털을 찾을 수 없음" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO가 올바르게 초기화되지 않음, 클러스터를 찾을 수 없음" - -msgid "You must implement __call__" -msgstr "__call__을 구현해야 합니다. " - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"3PAR 드라이버를 사용하기 전에 hpe3parclient를 설치해야 합니다. \"pip install " -"python-3parclient\"를 실행하여 hpe3parclient를 설치하십시오." - -msgid "You must supply an array in your EMC configuration file." -msgstr "EMC 구성 파일에서 배열을 제공해야 합니다. " - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"원래 크기 %(originalVolumeSize)sGB가 %(newSize)sGB보다 큽니다. 확장만 지원됩" -"니다. 종료 중..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "영역" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "구역 지정 정책: %s, 인식되지 않음" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "_create_and_copy_vdisk_data: vdisk %s의 속성을 가져오지 못했습니다." - -msgid "_create_host failed to return the host name." -msgstr "_create_host가 호스트 이름을 리턴하지 못함" - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: 호스트 이름을 변환할 수 없습니다. 호스트 이름이 유니코드 또는 " -"문자열이 아닙니다. " - -msgid "_create_host: No connector ports." -msgstr "_create_host: 커넥터 포트가 없습니다." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume, 복제 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, 소스 볼륨 인스턴스: %(source_volume)s, 대상 볼륨 인스턴" -"스: %(target_volume)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - CLI 출력에서 성공 메시지를 찾을 수 없습니다. \n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, id_code가 None입니다." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, 복제 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, 복사 세션 유형이 정의되지 않았습니다! 복사 세션: " -"%(cpsession)s, 복사 유형: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, copysession: %(cpsession)s, 조작: %(operation)s, 리턴 코" -"드: %(rc)lu, 오류: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, volumename: %(volumename)s, 리턴 코드: %(rc)lu, 오류: " -"%(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, volumename: %(volumename)s, 스토리지 구성 서비스를 찾을 수 없" -"습니다." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, ETERNUS에 연결" -"할 수 없습니다." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "_extend_volume_op: 스냅샷이 포함된 볼륨 확장은 지원되지 않습니다. " - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, 커넥터: %(connector)s, 연관자: " -"FUJITSU_AuthorizedTarget, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, 커넥터: %(connector)s, EnumerateInstanceNames, ETERNUS" -"에 연결할 수 없습니다." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, 커넥터: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names, 커넥터: %(connector)s, 개시자를 찾을 수 없습니다." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, ETERNUS에 연결" -"할 수 없습니다." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, ETERNUS에 연결" -"할 수 없습니다." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, 데이터가 없습니" -"다! 드라이버 구성 파일을 편집하고 정정하십시오." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, 파일 이름: %(filename)s, ip: %(ip)s, 포트: " -"%(port)s, 사용자: %(user)s, passwd: ****, url: %(url)s, 실패!!." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn을 찾을 수 " -"없습니다." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"ETERNUS에 연결할 수 없습니다.." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, ETERNUS에 " -"연결할 수 없습니다." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: 속성 헤더 및 값이 일치하지 않습니다. \n" -" 헤더: %(header)s\n" -" 값: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector에서 커넥터의 호스트 이름을 리턴하지 못했습니다." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, aglist/vol_instance에서 호스트 연관 관계를 가져오는 데 실" -"패, affinitygroup: %(ag)s, ReferenceNames, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, 호스트 연관 관계 인스턴스를 가져오는 데 실패, volmap: " -"%(volmap)s, GetInstance, ETERNUS에 연결할 수 없습니다." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, ETERNUS에 연" -"결할 수 없습니다." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, ETERNUS에 연결할 " -"수 없습니다." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, ETERNUS에 연결할 수 없습" -"니다." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "_get_target_port, EnumerateInstances, ETERNUS에 연결할 수 없습니다." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "" -"_get_target_port, protcol: %(protocol)s, target_port를 찾을 수 없습니다." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay: %s(이)라는 스냅샷을 찾을 수 없음" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: 볼륨 id %s을(를) 찾을 수 없음" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: source-name을 지정해야 합니다." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: 호스트-볼륨 연결에 대한 FC 연결 정보를 가져올 수 " -"없습니다. FC 연결에 사용하는 호스트가 올바르게 구성되었습니까?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: 다음 볼륨의 I/O 그룹 %(gid)s에서 노드를 찾을 수 없" -"습니다. 볼륨: %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, 개시자: %(initiator)s, 대상: %(tgt)s, aglist: %(aglist)s, 스토리지 " -"구성 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, 컨트롤러 구성 서비스를 찾을 수 없습" -"니다." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, ETERNUS에 연결할 수 없습니다." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats: 스토리지 풀 데이터를 가져오지 못했습니다." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession 상태가 손상되" -"었습니다." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy 실패: %s 볼륨의 사본이 있습니다. 또 하나의 사본을 추가하면 한" -"계값인 2개의 사본을 초과합니다." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "add_vdisk_copy가 예상 풀에서 vdisk 사본 없이 시작되었습니다." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants는 부울이어야 하지만 '%s'이(가) 수신되었습니다. " - -msgid "already created" -msgstr "이미 작성됨" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "원격 노드에서 스냅샷 연결" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "속성 %s이(가) lazy-loadable 속성이 아님" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"백업: %(vol_id)s이(가) %(vpath)s에서 %(bpath)s으로 백업 하드 링크를 제거하지 " -"못했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"백업: %(vol_id)s이(가) 서버에서 백업 성공 알림을 얻지 못했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"백업: %(vol_id)s이(가) %(bpath)s으로 백업 하드 링크를 제거하지 못했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"백업: %(vol_id)s이(가) %(bpath)s에서 dsmc를 실행하는 데 실패했습니다. \n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "백업: %(vol_id)s이(가) 실패했습니다. %(path)s이(가) 파일이 아닙니다. " - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"백업: %(vol_id)s이(가) 실패했습니다. %(path)s이(가) 예상치 못한 파일 유형입니" -"다. 블록 또는 일반 파일이 지원되며 실제 파일 모드는 %(vol_mode)s입니다." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"백업: %(vol_id)s이(가) 실패했습니다. %(path)s에서 볼륨에 대한 실제 경로를 가" -"져올 수 없습니다." - -msgid "being attached by different mode" -msgstr "다른 모드로 접속하는 중 " - -#, python-format -msgid "call failed: %r" -msgstr "호출 실패: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "호출 실패: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "호출 실패: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "호출 실패: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "호출 실패: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "lun 맵을 찾을 수 없음, ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "확장할 볼륨을 찾을 수 없음" - -msgid "can't handle both name and index in req" -msgstr "요청의 이름 및 색인을 둘 다 처리할 수 없음" - -msgid "cannot understand JSON" -msgstr "JSON을 이해할 수 없음" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "cgsnapshot가 지정됨" - -msgid "cgsnapshot changed" -msgstr "cgsnapshot가 변경됨" - -msgid "cgsnapshots assigned" -msgstr "cgsnapshots가 지정됨" - -msgid "cgsnapshots changed" -msgstr "cgsnapshots가 변경됨" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: 인증에 비밀번호 또는 SSH 개인 키가 필요함: " -"san_password 또는 san_private_key 옵션을 설정하십시오." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: 시스템 ID를 판별할 수 없습니다." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: 시스템 이름을 판별할 수 없습니다." - -msgid "check_hypermetro_exist error." -msgstr "check_hypermetro_exist 오류." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "복제 깊이가 %s 한계를 초과함 " - -msgid "consistencygroup assigned" -msgstr "consistencygroup이 지정됨" - -msgid "consistencygroup changed" -msgstr "consistencygroup이 변경되지 않음" - -msgid "control_location must be defined" -msgstr "control_location을 정의해야 함 " - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, 소스 볼륨이 ETERNUS에 없습니다." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, 대상 볼륨 instancename: %(volume_instancename)s, 인스턴" -"스 가져오기에 실패했습니다." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: 소스 및 대상 크기가 다릅니다. " - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: 소스 볼륨 %(src_vol)s 크기는 %(src_size)dGB이고 대상 볼" -"륨 %(tgt_vol)s의 크기 %(tgt_size)dGB에 맞지 않습니다." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src는 CG 스냅샷이나 소스 CG에서 작성해야 합니다." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src에서는 cgsnapshot 소스나 일관성 그룹 소스만 " -"지원합니다. 여러 소스를 사용할 수 없습니다." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: 소스 vdisk %(src)s(%(src_id)s)이(가) 없습니다. " - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: 소스 vdisk %(src)s이(가) 없습니다. " - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: 호스트 이름이 유니코드 또는 문자열이 아닙니다. " - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: 개시자 또는 wwpn이 제공되지 않았습니다." - -msgid "create_hypermetro_pair error." -msgstr "create_hypermetro_pair 오류." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"create_snapshot, eternus_pool: %(eternus_pool)s, 풀을 찾을 수 없습니다." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, snapshotname: %(snapshotname)s, 소스 볼륨 이름: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, 대상 볼륨 이름: " -"%(d_volumename)s, 풀: %(pool)s, 리턴 코드: %(rc)lu, 오류: %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, volumename: %(s_volumename)s, 소스 볼륨이 TERNUS에 없습니다." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, volumename: %(volumename)s, 복제 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: 스냅샷에 대한 볼륨 상태는 \"사용 가능\" 또는 \"사용 중\"이어" -"야 합니다. 올바르지 않은 상태는 %s입니다." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: 소스 볼륨 가져오기에 실패했습니다." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, 볼륨: %(volume)s, EnumerateInstances, ETERNUS에 연결할 수 없습" -"니다." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, 볼륨: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, 스토리지 구성 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, 리턴 " -"코드: %(rc)lu, 오류: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "create_volume_from_snapshot, 소스 볼륨이 ETERNUS에 없습니다." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, 대상 볼륨 instancename: " -"%(volume_instancename)s, 인스턴스 가져오기에 실패했습니다." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot: %(name)s 스냅샷이 없습니다. " - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: 볼륨 작성에 스냅샷 상태가 \"사용 가능\"해야 합니" -"다. 올바르지 않은 상태는 %s입니다." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "create_volume_from_snapshot: 볼륨 크기가 스냅샷 기반 볼륨과 다릅니다." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"삭제: %(vol_id)s이(가) 올바르지 않은 인수 때문에 dsmc를 실행하는 데 실패함. " -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"삭제: %(vol_id)s이(가) dsmc를 실행하는 데 실패함. stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "delete_hypermetro 오류." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: %s ACL을 찾을 수 없습니다. 계속합니다." - -msgid "delete_replication error." -msgstr "delete_replication 오류." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "종속 볼륨을 갖는 %(snapshot_name)s 스냅샷 삭제 중" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "스냅샷을 갖는 %(volume_name)s 볼륨 삭제 중" - -msgid "detach snapshot from remote node" -msgstr "원격 노드에서 스냅샷 분리" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: 구성된 노드가 없습니다." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"오브젝트를 swift에 기록하는 중 오류 발생. %(etag)s swift에 있는 오브젝트의 " -"MD5는 %(md5)s swift로 보낸 오브젝트의 MD5와 같지 않음" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume, eternus_pool: %(eternus_pool)s, 풀을 찾을 수 없습니다." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, 볼륨: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, 스토리지 구성 서비스를 찾을 수 없습니다." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, volumename: %(volumename)s, 리턴 코드: %(rc)lu, 오류: " -"%(errordesc)s, PoolType: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume, volumename: %(volumename)s, 볼륨을 찾을 수 없습니다." - -msgid "failed to create new_volume on destination host" -msgstr "대상 호스트에 new_volume을 작성하지 못함 " - -msgid "fake" -msgstr "fake" - -#, python-format -msgid "file already exists at %s" -msgstr "%s에 파일이 이미 있음 " - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "SheepdogIOWrapper는 fileno를 지원하지 않음" - -msgid "fileno() not supported by RBD()" -msgstr "RBD()에서 fileno()를 지원하지 않음 " - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "파일 시스템 %s이(가) Nexenta Store 어플라이언스에 없음" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled가 False로 설정되었습니다. 멀티 호스트 맵핑" -"이 허용되지 않습니다. CMMVC6071E VDisk가 이미 호스트에 맵핑되었으므로 VDisk " -"대 호스트 맵핑이 작성되지 않았습니다." - -msgid "flush() not supported in this version of librbd" -msgstr "이 버전의 librbd에서는 flush()가 지원되지 않음 " - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s 백업: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s 백업: %(backing_file)s" - -msgid "force delete" -msgstr "삭제 강제 실행" - -msgid "get_hyper_domain_id error." -msgstr "get_hyper_domain_id 오류." - -msgid "get_hypermetro_by_id error." -msgstr "get_hypermetro_by_id 오류." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: 개시자 %(ini)s에 대한 대상 IP를 가져오지 못했습니다. 구성 " -"파일을 확인하십시오. " - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: %s 볼륨의 속성을 가져오는 데 실패" - -msgid "glance_metadata changed" -msgstr "glance_metadata가 변경됨" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode가 copy_on_write로 설정되었지만 %(vol)s과(와) %(img)s은" -"(는)다른 파일 시스템에 속합니다." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode가 copy_on_write로 설정되었지만 %(vol)s과(와) %(img)s은" -"(는)다른 파일 세트에 속합니다." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s 및 hgst_user %(usr)s은(는) cinder.conf의 올바른 사용자/그" -"룹에 맵핑되어야 함" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"cinder.conf에서 지정된 hgst_net %(net)s을(를) 클러스터에서 찾을 수 없음" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy는 cinder.conf에서 0(비HA) 또는 1(HA)로 설정해야 합니다. " - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode는 cinder.conf의 octal/int여야 함" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "hgst_storage 서버 %(svr)s의 형식이 :가 아님" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers가 cinder.conf에서 정의되어야 함" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"http 서비스가 갑자기 사용 안함으로 설정되었거나 이 조작을 수행하는 중에 유지" -"보수 상태로 변경된 경우일 수 있습니다." - -msgid "id cannot be None" -msgstr "id는 None일 수 없음" - -#, python-format -msgid "image %s not found" -msgstr "%s 이미지를 찾을 수 없음 " - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection, 볼륨: %(volume)s, 볼륨을 찾을 수 없습니다." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection: 볼륨 %s에 대한 속성을 가져오지 못했습니다. " - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection: 볼륨 %s에 대한 볼륨 속성이 누락되었습니다. " - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: 볼륨 %(vol)s에 대한 I/O 그룹 %(gid)s에서 노드를 찾을 " -"수 없습니다. " - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s이(가) 정의되지 않았습니다." - -#, python-format -msgid "invalid user '%s'" -msgstr "사용자 '%s'이(가) 올바르지 않음 " - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "iscsi 포털, %s을(를) 찾을 수 없음" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"프로토콜 'iSCSI' 사용 시 구성 파일에서 iscsi_ip_address를 설정해야 합니다. " - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "주요 관리자 오류: %(reason)s" - -msgid "limit param must be an integer" -msgstr "limit 매개변수는 정수여야 함" - -msgid "limit param must be positive" -msgstr "limit 매개변수가 양수여야 함" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "manage_existing에는 기존 볼륨을 식별하기 위한 'name' 키가 필요합니다. " - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: 볼륨 %(vol)s에서 기존 재생 %(ss)s을(를) 관리하는 " -"중 오류 발생" - -#, python-format -msgid "marker [%s] not found" -msgstr "마커 [%s]을(를) 찾을 수 없음" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp에 따옴표 누락 %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy는 'on-demand' 또는 'never'이어야 함. 패스됨: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "%(vol)s 볼륨에서 mkfs가 실패했습니다. 오류 메시지: %(err)s." - -msgid "mock" -msgstr "mock" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs가 설치되지 않음" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage에서 이름이 %s인 다중 자원을 발견함" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "스냅샷 ID가 %s인 다중 자원이 발견됨" - -msgid "name cannot be None" -msgstr "이름은 None일 수 없음" - -#, python-format -msgid "no REPLY but %r" -msgstr "REPLY가 아니라 %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "drbdmanage에서 ID가 %s인 스냅샷을 찾을 수 없음" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "ID가 %s인 스냅샷이 정확하게 하나가 아님" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "ID가 %s인 볼륨이 정확하게 하나가 아님" - -#, python-format -msgid "obj missing quotes %s" -msgstr "오브젝트에 따옴표 누락 %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled가 해제되지 않았습니다. " - -msgid "progress must be an integer percentage" -msgstr "진행상태는 정수 백분율이어야 함" - -msgid "provider must be defined" -msgstr "제공자를 정의해야 함 " - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"이 볼륨 드라이버에서는 qemu-img %(minimum_version)s 이상이 필요합니다. 현재 " -"qemu-img 버전: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img가 설치되지 않고 이미지가 %s 유형입니다. qemu-img가 설치되지 않은 경" -"우 RAW 이미지만 사용할 수 있습니다. " - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img가 설치되지 않고 디스크 형식이 지정되지 않았습니다. qemu-img가 설치되" -"지 않은 경우 RAW 이미지만 사용할 수 있습니다. " - -msgid "rados and rbd python libraries not found" -msgstr "rados 및 rbd python 라이브러리를 찾을 수 없음" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "" -"read_deleted는 'no', 'yes', 'only' 중에서 선택 가능하며, %r은(는) 불가능함" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover에 실패했습니다. %s을(를) 찾을 수 없습니다." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"replication_failover에 실패했습니다. 백엔드를 장애 복구하도록 구성되지 않았습" -"니다." - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"복원: %(vol_id)s이(가) %(bpath)s으로 백업 하드 링크를 제거하지 못했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"복원: %(vol_id)s이(가) %(bpath)s에서 dsmc를 실행하는 데 실패했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"복원: %(vol_id)s이(가) 실패했습니다.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup이 중단되었습니다. 실제 오브젝트 목록이 메타데이터에 저장된 오" -"브젝트 목록과일치하지 않습니다. " - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb에 멤버 %s이(가) 누락됨: 새 python-rtslib-fb가 필요할 수 있습니다. " - -msgid "san_ip is not set." -msgstr "san_ip가 설정되지 않았습니다. " - -msgid "san_ip must be set" -msgstr "san_ip가 설정되어야 함" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"cinder.conf의 Datera 드라이버에 대해 san_login 및/또는 san_password가설정되" -"지 않았습니다. 이 정보를 설정하고 cinder-volume 서비스를 다시시작하십시오. " - -msgid "serve() can only be called once" -msgstr "serve()는 한 번만 호출할 수 있음" - -#, python-format -msgid "snapshot-%s" -msgstr "스냅샷-%s" - -msgid "snapshots assigned" -msgstr "스냅샷이 지정됨" - -msgid "snapshots changed" -msgstr "스냅샷이 변경됨" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "소스 볼륨 id:%s을(를) 복제할 수 없음" - -msgid "source-name cannot be empty." -msgstr "source-name은 비어 있지 않아야 합니다." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "source-name 형식은 'vmdk_path@vm_inventory_path'이어야 합니다." - -#, python-format -msgid "status must be %s and" -msgstr "상태는 %s이어야 하며" - -msgid "status must be available" -msgstr "상태가 사용 가능해야 함" - -msgid "stop_hypermetro error." -msgstr "stop_hypermetro 오류." - -msgid "sync_hypermetro error." -msgstr "sync_hypermetro 오류." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli가 설치되지 않아 기본 디렉토리(%(default_path)s)를 작성할 수 없음: " -"%(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "" -"terminate_connection: 커넥터에서 호스트 이름을 가져오는 데 실패했습니다." - -msgid "timeout creating new_volume on destination host" -msgstr "대상 호스트에 new_volume을 작성하는 중에 제한시간이 초과됨 " - -msgid "too many body keys" -msgstr "본문 키가 너무 많음" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: 마운트되지 않았음" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s: 대상이 사용 중임" - -msgid "umount: : some other error" -msgstr "umount: : 기타 특정 오류" - -msgid "umount: : target is busy" -msgstr "umount: : 대상이 사용 중임" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: %s(이)라는 스냅샷을 찾을 수 없음" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: 볼륨 id %s을(를) 찾을 수 없음" - -#, python-format -msgid "unrecognized argument %s" -msgstr "인식되지 않는 인수 %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "지원되지 않는 압축 알고리즘: %s" - -msgid "valid iqn needed for show_target" -msgstr "show_target에 필요한 올바른 iqn" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "vdisk %s이(가) 정의되지 않았습니다. " - -msgid "vmemclient python library not found" -msgstr "vmemclient python 라이브러리를 찾을 수 없음" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "drbdmanage에서 볼륨 %s을(를) 찾을 수 없음" - -msgid "volume assigned" -msgstr "볼륨 지정됨" - -msgid "volume changed" -msgstr "볼륨 변경됨" - -msgid "volume is already attached" -msgstr "볼륨이 이미 접속됨" - -msgid "volume is not local to this node" -msgstr "볼륨이 이 노드에 대해 로컬이 아님" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"볼륨 크기 %(volume_size)d은(는) %(size)d 크기의 백업을 복원하기에 너무 작습니" -"다. " - -#, python-format -msgid "volume size %d is invalid." -msgstr "볼륨 크기 %d이(가) 올바르지 않음" - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "일관성 그룹에 볼륨을 작성할 때 volume_type이 제공되어야 합니다." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id는 None일 수 없음" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "일관성 그룹 %(name)s 작성에 volume_types가 제공되어야 합니다." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "일관성 그룹 %s 작성에 volume_types가 제공되어야 합니다." - -msgid "volumes assigned" -msgstr "볼륨이 지정됨" - -msgid "volumes changed" -msgstr "볼륨이 변경됨" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s 제한시간 초과되었습니다. " - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"zfssa_manage_policy 특성을 'strict' 또는 'loose'로 설정해야 합니다. 현재 값: " -"%s." diff --git a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po b/cinder/locale/pt_BR/LC_MESSAGES/cinder.po deleted file mode 100644 index 6e50ffb2b..000000000 --- a/cinder/locale/pt_BR/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,9827 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Gabriel Wainer, 2013 -# Lucas Ribeiro , 2014 -# Rodrigo Felix de Almeida , 2014 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-24 01:24+0000\n" -"Last-Translator: Carlos Marques \n" -"Language: pt-BR\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Portuguese (Brazil)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"Versão do OpenStack Cinder: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " mas o tamanho agora é %d" - -#, python-format -msgid " but size is now %d." -msgstr " porém, o tamanho atual é %d." - -msgid " or " -msgstr " ou " - -#, python-format -msgid "%(attr)s is not set." -msgstr "O %(attr)s não está configurado." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing não pode gerenciar um volume conectado a hosts. " -"Desconecte esse volume dos hosts existentes antes de importar" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"resultado: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: Permissão negada." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: Falha com saída da CLI inesperada.\n" -" Comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Código de Status: %(_status)s\n" -"Corpo: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: criando NetworkPortal: assegure-se de que a porta %(port)d no " -"IP %(ip)s não esteja sendo usada por outro serviço." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s falhou. Objeto de backup possui " -"modo inesperado. Backups de arquivo ou imagem suportados, modo real é " -"%(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"O Serviço %(service)s não é %(status)s no dispositivo de armazenamento: " -"%(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s deve ser <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s deve ser >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "Valor %(worker_name)s de %(workers)d é inválido, deve ser maior que 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "Os \"dados\" %s não estão no resultado." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s não pode ser acessado. Verifique se o GPFS está ativo e o sistema de " -"arquivos está montado." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"%s não pode ser redimensionado usando a operação de clone, pois ele não " -"contém blocos." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"%s não pode ser redimensionado usando a operação de clone, pois ele está " -"hospedado no volume compactado" - -#, python-format -msgid "%s configuration option is not set." -msgstr "A opção de configuração %s não está definida." - -#, python-format -msgid "%s does not exist." -msgstr "%s não existe." - -#, python-format -msgid "%s is not a directory." -msgstr "%s não é um diretório." - -#, python-format -msgid "%s is not installed" -msgstr "%s não está instalado" - -#, python-format -msgid "%s is not installed." -msgstr "%s não está instalado." - -#, python-format -msgid "%s is not set" -msgstr "%s não está configurado" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"%s não está configurado e é necessário para que o dispositivo de replicação " -"seja válido. " - -#, python-format -msgid "%s is not set." -msgstr "%s não está configurado." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s deve ser uma imagem bruta ou qcow2 válida." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s deve ser um caminho absoluto." - -#, python-format -msgid "%s must be an integer." -msgstr "%s deve ser um número inteiro." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s não configurado em cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s não definido." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' é inválido para flashsystem_connection_protocol no arquivo de " -"configuração. valor(es) válido(s) são %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "'ativo' deve estar presente ao gravar snap_info." - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' deve ser especificado" - -msgid "'qemu-img info' parsing failed." -msgstr "Falha na análise de 'qemu-img info'." - -msgid "'status' must be specified." -msgstr "'status' deve ser especificado." - -msgid "'volume_id' must be specified" -msgstr "'volume_id' deve ser especificado" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Comando: %(cmd)s) (Código de Retorno: %(exit_code)s) (Saída padrão: " -"%(stdout)s) (Erro padrão: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "Um LUN (HLUN) não foi localizado. (LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "Foi feita uma solicitação atual possivelmente contraditória." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Um LUN (HLUN) livre não foi localizado. Inclua um grupo de hosts diferente. " -"(LDEV: %(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Um grupo de hosts não pôde ser incluído. (porta: %(port)s, nome: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Um grupo de hosts não pôde ser excluído. (porta: %(port)s, GID: %(gid)s, " -"nome: %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Um grupo de hosts é inválido. (grupo de hosts: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "Um par não pode ser excluído. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Um par não pôde ser criado. O número máximo do par é excedido. (método de " -"cópia: %(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Um parâmetro é inválido. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Um valor de parâmetro é inválido. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Um conjunto não pôde ser localizado. (ID do conjunto: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Um status de captura instantânea é inválido. (status: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Um destino secundário válido DEVE ser especificado para executar failover." - -msgid "A volume ID or share was not specified." -msgstr "Um ID de volume ou compartilhamento não foi especificado." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Um status de volume é inválido. (status: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s falhou com sequência de erros %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"Sequência de Versão de API %(version)s é de formato inválido. Ela deve " -"estar no formato MajorNum.MinorNum." - -msgid "API key is missing for CloudByte driver." -msgstr "A chave da API está ausente para o driver CloudByte." - -#, python-format -msgid "API response: %(response)s" -msgstr "Resposta da API: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "Resposta da API: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "A versão da API %(version)s não é suportada nesse método." - -msgid "API version could not be determined." -msgstr "A versão da API não pôde ser determinada." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Prestes a excluir projetos filhos que têm cota diferente de zero. Isso não " -"deve ser executado" - -msgid "Access list not available for public volume types." -msgstr "Lista de acesso não disponível para tipos de volume público." - -msgid "Activate or deactivate QoS error." -msgstr "Erro ao ativar ou desativar QoS" - -msgid "Activate snapshot error." -msgstr "Erro ao ativar captura instantânea." - -msgid "Add FC port to host error." -msgstr "Erro ao incluir porta FC no host." - -msgid "Add fc initiator to array error." -msgstr "Erro ao incluir inicializador de FC na matriz." - -msgid "Add initiator to array error." -msgstr "Erro ao incluir inicializador na matriz." - -msgid "Add lun to cache error." -msgstr "Erro ao incluir lun no cache." - -msgid "Add lun to partition error." -msgstr "Erro ao incluir LUN na partição." - -msgid "Add mapping view error." -msgstr "Erro ao incluir visualização de mapeamento." - -msgid "Add new host error." -msgstr "Erro ao incluir novo host." - -msgid "Add port to port group error." -msgstr "Erro ao incluir porta no grupo de portas." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Todos os conjuntos de armazenamento especificados para serem gerenciados não " -"existem. Verifique sua configuração. Não existem conjuntos: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "" -"Uma solicitação da versão da API deve ser comparada com um objeto " -"VersionedMethod object." - -msgid "An error has occurred during backup operation" -msgstr "Um erro ocorreu durante a operação de backup" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Ocorreu um erro durante a operação LUNcopy. Nome de LUNcopy: " -"%(luncopyname)s. O status de LUNcopy: %(luncopystatus)s. Estado de LUNcopy: " -"%(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Ocorreu um erro ao ler o volume \"%s\"." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Ocorreu um erro ao gravar no volume \"%s\"." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "" -"Um usuário de CHAP iSCSI não pôde ser incluído. (nome de usuário: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "" -"Um usuário do CHAP iSCSI não pôde ser excluído. (nome de usuário: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Um destino de iSCSI não pôde ser incluído. (porta: %(port)s, alias: " -"%(alias)s, motivo: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Um destino de iSCSI não pôde ser excluído. (porta: %(port)s, tno: %(tno)s, " -"alias: %(alias)s)" - -msgid "An unknown exception occurred." -msgstr "Ocorreu uma exceção desconhecida." - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Um usuário com escopo do token definido para um subprojeto não tem permissão " -"para ver a quota de seus pais." - -msgid "Append port group description error." -msgstr "Erro ao anexar descrição do grupo de portas." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Falha ao aplicar as zonas e cfgs no comutador (error code=%(err_code)s error " -"msg=%(err_msg)s." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "A matriz não existe ou está off-line. O status atual da matriz é %s." - -msgid "Associate host to hostgroup error." -msgstr "Erro ao associar host ao grupo de hosts." - -msgid "Associate host to mapping view error." -msgstr "Erro ao associar host ao mapeamento." - -msgid "Associate initiator to host error." -msgstr "Erro ao associar inicializador ao host." - -msgid "Associate lun to QoS error." -msgstr "Erro ao associar LUN ao QoS." - -msgid "Associate lun to lungroup error." -msgstr "Erro ao associar LUN ao grupo de LUNs." - -msgid "Associate lungroup to mapping view error." -msgstr "Erro ao associar grupo de LUNs à visualização de mapeamento." - -msgid "Associate portgroup to mapping view error." -msgstr "Erro ao associar grupo de portas à visualização de mapeamento." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Pelo menos um endereço IP iSCSI válido deve ser configurado." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Tentativa de transferir %s com chave de auth inválida." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "" -"Detalhes do grupo de autenticação [%s] não localizados no armazenamento " -"CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"Detalhes do usuário de autenticação não localizados no armazenamento do " -"CloudByte." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"A autenticação falhou, verifique as credenciais do comutador, código de erro " -"%s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "A zona de disponibilidade '%(s_az)s' é inválida." - -msgid "Available categories:" -msgstr "Categorias disponíveis:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"As especificações de QoS backend não são suportadas nesta família de " -"armazenamento e versão ONTAP. " - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Backend não existe (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Relatórios de backend: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Relatórios de backend: o item já existe" - -msgid "Backend reports: item not found" -msgstr "Relatórios de backend: item não localizado" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "" -"Ocorrência de tempo limite de nova tentativa do serviço de backend: " -"%(timeout)s segundos" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Armazenamento de backend não configurou o destino do Fiber Channel." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "O backup de um volume em uso deve usar a sinalização de força." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "O backup %(backup_id)s não pôde ser localizado." - -msgid "Backup RBD operation failed" -msgstr "Operação RBD de backup falhou" - -msgid "Backup already exists in database." -msgstr "Backup já existe no banco de dados." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "O driver de Backup reportou um erro: %(message)s" - -msgid "Backup id required" -msgstr "ID de backup necessário" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"Backup não é suportado para volumes GlusterFS com capturas instantâneas." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"O backup é suportado apenas para volumes SOFS sem fazer backup do arquivo." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "Backup é suportado apenas para volumes GlusterFS em formato bruto." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "O backup é suportado apenas para volumes SOFS não formatados." - -msgid "Backup operation of an encrypted volume failed." -msgstr "A operação de backup de um volume criptografado falhou." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"O serviço de backup %(configured_service)s não suporta verificação. O ID do " -"backup %(id)s não foi verificado. Ignorando verificação." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Serviço de backup %(service)s não suporta verificação. ID do backup %(id)s " -"não foi verificado. Ignorando reconfiguração." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "Backup deve ter apenas uma captura instantânea mas possui apenas %s" - -msgid "Backup status must be available" -msgstr "O status de backup deve ser disponível" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Status de backup deve ser disponível e não %s." - -msgid "Backup status must be available or error" -msgstr "O status de backup deve ser disponível ou erro" - -msgid "Backup to be restored has invalid size" -msgstr "O backup a ser restaurado tem tamanho inválido" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Linha de status inválido retornada: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Chave(s) inválida(s) no conjunto de quota: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Resposta inválida ou inesperada da API de backend do volume de " -"armazenamento: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "" -"Formato de projeto inválido: o projeto não está no formato adequado (%s)" - -msgid "Bad response from Datera API" -msgstr "Resposta inválida da API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Resposta inválida da API SolidFire" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Resposta inválida de XMS, %s" - -msgid "Binary" -msgstr "binário" - -msgid "Blank components" -msgstr "Componentes em branco" - -msgid "Blockbridge api host not configured" -msgstr "Host da API Blockbridge não configurado" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge configurado com esquema de autenticação inválido " -"'%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "O conjunto padrão Blockbridge não existe" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Senha Blockbridge não configurada (necessária para esquema de autenticação " -"'password')" - -msgid "Blockbridge pools not configured" -msgstr "Conjuntos Blockbridge não configurados" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Token Blockbridge não configurado (necessário para esquema de autenticação " -"'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Usuário Blockbridge não configurado (necessário para esquema de autenticação " -"'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Erro na CLI do Fibre Channel Zoning Brocade: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Erro HTTP do Fibre Channel Zoning Brocade: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "O segredo do CHAP deve ter de 12 a 16 bytes." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Saída de Exceção da CLI:\n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Saída de Exceção da CLI:\n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E O mapeamento de VDisk para host não foi criado porque o VDisk já " -"está mapeado para um host.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "versão CONCERTO não é suportada" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) não existe na matriz" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "O nome do cache é Nenhum; configure smartcache:cachename na chave." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "O volume de cache %s não possui as propriedades necessárias. " - -msgid "Call returned a None object" -msgstr "A chamada retornou um objeto Nenhum" - -msgid "Can not add FC port to host." -msgstr "Não é possível incluir a porta FC no host." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "Não é possível localizar ID de cache pelo nome do cache %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Não é possível localizar o ID da partição por nome %(name)s." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "Não é possível obter informações do conjunto. conjunto: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "Não foi possível converter %s para inteiro." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Não é possível acessar 'scality_sofs_config': %s" - -msgid "Can't decode backup record." -msgstr "Não é possível decodificar registro de backup." - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "Não é possível estender o volume de replicação, volume: %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"Não é possível localizar a LUN na matriz, verifique o source-name ou o " -"source-id." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "" -"Não é possível localizar o nome do cache na matriz; o nome do cache é: " -"%(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"Não é possível localizar as informações de LUN na matriz, volume: %(id)s, " -"nome do LUN: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "" -"Não é possível localizar o nome da partição na matriz; o nome da partição é: " -"%(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "Não é possível localizar o serviço %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"Não é possível localizar a captura instantânea na matriz, verifique o nome " -"da origem ou o ID da origem." - -msgid "Can't find the same host id from arrays." -msgstr "Não é possível localizar o mesmo ID de host a partir das matrizes." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "" -"Não é possível obter ID de volume a partir da captura instantânea, captura " -"instantânea: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Não é possível obter ID de volume . Nome do volume %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"Não é possível importar a LUN %(lun_id)s no Cinder. Tipo de LUN incompatível." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em um HyperMetroPair." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de cópia " -"da LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em um grupo de LUNs." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em um espelho de LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em um SplitMirror." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de " -"migração." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"Não é possível importar a LUN %s no Cinder. Já existe em uma tarefa de " -"replicação remota." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"Não é possível importar a LUN %s no Cinder. O status de LUN não é normal." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Não é possível importar a captura instantânea %s no Cinder. A captura " -"instantânea não pertence ao volume." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Não é possível importar a captura instantânea %s no Cinder. A captura " -"instantânea é exporta para o iniciador." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Não é possível importar a captura instantânea %s no Cinder. O status captura " -"instantânea não é normal ou o status de execução não é on-line." - -msgid "Can't parse backup record." -msgstr "Não é possível analisar o registro de backup." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque ele não possui nenhum tipo de volume." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque ele já está no grupo de consistências %(orig_group)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque o volume não pode ser localizado." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque o volume não existe." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque o volume está em um estado inválido: %(status)s. Os " -"estados válidos são: %(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Não é possível incluir o volume %(volume_id)s no grupo de consistências " -"%(group_id)s porque o tipo de volume %(volume_type)s não é suportado pelo " -"grupo." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Não é possível anexar o volume já anexado %s; a anexação múltipla está " -"desativada pela opção de configuração 'netapp_enable_multiattach'." - -msgid "Cannot change VF context in the session." -msgstr "Não é possível alterar o contexto do VF na sessão." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"Não é possível alterar o contexto do VF porque o VF especificado não está " -"disponível na lista de VFs gerenciáveis %(vf_list)s." - -msgid "Cannot connect to ECOM server." -msgstr "Não é possível conectar-se ao servidor ECOM." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Não é possível criar o grupo de consistências %(group)s porque a captura " -"instantânea %(snap)s não está em um estado válido. Os estados válidos são: " -"%(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Não é possível criar grupo de consistências %(group)s porque o volume de " -"origem %(source_vol)s não está em um estado válido. Os estados válidos são: " -"%(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Não é possível criar o diretório %s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "" -"Não foi possível criar especificações de criptografia. Tipo de volume em uso." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Não foi possível criar imagem do formato de disco: %s. Apenas o formato de " -"disco vmdk disk é aceito." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "" -"Não é possível criar visualização de mascaramento: %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Não é possível criar mais de %(req)s volumes na matriz ESeries quando " -"'netapp_enable_multiattach' está configurado como true." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"Não é possível criar ou localizar um grupo de armazenamentos com o nome " -"%(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "Não é possível criar volume de tamanho %s: não é múltiplo de 8GB." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"Não é possível criar o volume_type com o nome %(name)s e as especificações " -"%(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "" -"Não é possível excluir o LUN %s enquanto existem capturas instantâneas." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Não é possível excluir o volume de cache: %(cachevol_name)s. Ele foi " -"atualizado em %(updated_at)s e atualmente tem %(numclones)d instâncias de " -"volume." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Não é possível excluir o volume de cache: %(cachevol_name)s. Ele foi " -"atualizado em %(updated_at)s e atualmente tem %(numclones)s instâncias de " -"volume." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "" -"Não é possível excluir especificações de criptografia. Tipo de volume em uso." - -msgid "Cannot determine storage pool settings." -msgstr "" -"Não é possível determinar as configurações do conjunto de armazenamentos." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Não é possível executar /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "Não é possível localizar o grupo CG %s." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"Não é possível localizar o Serviço de Configuração do Controlador para o " -"sistema de armazenamento %(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"Não é possível localizar o Serviço de replicação para criar volume para a " -"captura instantânea %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "" -"Não é possível localizar o Serviço de replicação para excluir a captura " -"instantânea %s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Não é possível localizar o Serviço de replicação no sistema %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"Não é possível localizar o Volume: %(id)s. operação cancelar gerenciamento. " -"Saindo..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "" -"Não é possível localizar Volume:%(volumename)s. Estender operação. Saindo...." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "" -"Não é possível localizar o número do dispositivo para o volume " -"%(volumeName)s." - -msgid "Cannot find migration task." -msgstr "Não é possível localizar a tarefa de migração." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Não é possível localizar o serviço de replicação no sistema %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"Não é possível localizar a instância CG de origem. consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "Não é possível obter mcs_id por ID de canal: %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"Não é possível obter as informações necessárias do conjunto ou do sistema de " -"armazenamento." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Não é possível obter ou criar um grupo de armazenamento: %(sgGroupName)s " -"para o volume %(volumeName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "Não é possível obter ou criar o grupo inicializador: %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Não é possível obter grupo da porta: %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Não é possível obter o grupo de armazenamento: %(sgGroupName)s da " -"visualização de mascaramento %(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Não é possível obter o intervalo de tamanho suportado para %(sps)s Código de " -"retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Não é possível obter o grupo de armazenamentos padrão para a política FAST: " -"%(fastPolicyName)s." - -msgid "Cannot get the portgroup from the masking view." -msgstr "" -"Não é possível obter o grupo de portas a partir da visualização de máscara." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "Não é possível montar Scality SOFS, verifique os erros no syslog" - -msgid "Cannot ping DRBDmanage backend" -msgstr "Não é possível executar ping do backend do DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Não é possível colocar o volume %(id)s em %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Não é possível fornecer 'cgsnapshot_id' e 'source_cgid' para criar o grupo " -"de consistências %(name)s da origem." - -msgid "Cannot register resource" -msgstr "Não foi possível registrar recurso" - -msgid "Cannot register resources" -msgstr "Não foi possível registrar recursos" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Não é possível remover o volume %(volume_id)s a partir do grupo de " -"consistências %(group_id)s porque ele não está no grupo." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Não é possível remover o volume %(volume_id)s a partir do grupo de " -"consistências %(group_id)s porque o volume está em um estado inválido: " -"%(status)s. Os estados válidos são: %(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Não é possível redefinir de HPE3PARDriver para %s." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Não é possível digitar novamente de uma matriz 3PAR para outra." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Não é possível digitar novamente para um CPG em um domínio diferente." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "" -"Não é possível digitar novamente em um CPG de snap em um domínio diferente." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"Não é possível executar o comando vgc-cluster; verifique se o software está " -"instalado e as permissões estão configuradas corretamente." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "Não é possível definir hitachi_serial_number e hitachi_unit_name." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "Não é possível especificar o nome e o ID do domínio de proteção." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"Não é possível especificar o nome e o ID do conjunto de armazenamentos." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Não é possível atualizar o grupo de consistências %(group_id)s porque nenhum " -"nome, descrição, add_volumes ou remove_volumes válido foi fornecido." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "" -"Não é possível atualizar especificações de criptografia. Tipo de volume em " -"uso." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "Não é possível atualizar volume_type %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Não é possível verificar a existência do objeto: %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "CgSnapshot %(cgsnapshot_id)s não pôde ser encontrado." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost está vazio. Nenhum grupo de consistências será criado." - -msgid "Change hostlun id error." -msgstr "Erro ao alterar o ID de hostlun." - -msgid "Change lun priority error." -msgstr "Erro ao mudar prioridade de LUN." - -msgid "Change lun smarttier policy error." -msgstr "Erro ao mudar política smarttier de LUN." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "A mudança usaria menos de 0 para os recursos a seguir: %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Verifique as permissões de acesso para o compartilhamento do ZFS designado a " -"este driver." - -msgid "Check hostgroup associate error." -msgstr "Verifique o erro de associação ao grupo de hosts." - -msgid "Check initiator added to array error." -msgstr "Erro ao verificar inicializador incluído na matriz." - -msgid "Check initiator associated to host error." -msgstr "Erro ao verificar inicializador associado ao host." - -msgid "Check lungroup associate error." -msgstr "Verifique o erro de associação ao grupo de LUNs." - -msgid "Check portgroup associate error." -msgstr "Verifique o erro de associação ao grupo de portas." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Verifique o estado do serviço http. Além disso, assegure-se de que o número " -"da porta https seja o mesmo que o especificado em cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "" -"O tamanho do chunk não é múltiplo do tamanho de bloco para criar o hash." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Erro na CLI do Fibre Channel Zoning Cisco: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "O recurso de clonagem não possui licença no %(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"O tipo de clone '%(clone_type)s' é inválido; os valores válidos são: " -"'%(full_clone)s' e '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"O cluster não está formatado. É provável que seja necessário executar \"dog " -"cluster format\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Falha no driver Coho Data Cinder: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "A porta RPC Coho não está configurada" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Comando %(cmd)s bloqueado na CLI e foi cancelado" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: tempo limite de %s." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Ativador de Deduplicação não está instalado. Não é possível criar volume " -"compactado." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Cluster de cálculo: %(cluster)s não localizado." - -msgid "Condition has no field." -msgstr "A condição não possui campo." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"Configuração de 'max_over_subscription_ratio' inválida. Deve ser > 0: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Erro de configuração: dell_sc_ssn não configurado." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "O arquivo de configuração %(configurationFile)s não existe." - -msgid "Configuration is not found." -msgstr "A configuração não foi localizada" - -#, python-format -msgid "Configuration value %s is not set." -msgstr "O valor de configuração %s não está configurado." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Conflito de especificações de QoS no tipo de volume %s: quando a " -"especificação de QoS é associada ao tipo de volume; \"netapp:qos_policy_group" -"\" não é permitido nas especificações extras de tipo de volume." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Falha de conexão ao Glance: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Conexão com o swift falhou: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "O conector não fornece: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "O conector não possui as informações necessárias: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "O grupo de consistências está vazio. Nenhuma cgsnapshot será criada." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "ConsistencyGroup %(consistencygroup_id)s não pôde ser encontrado." - -msgid "Container" -msgstr "Contêiner" - -msgid "Container size smaller than required file size." -msgstr "Tamanho do contêiner menor do que o tamanho do arquivo necessário." - -msgid "Content type not supported." -msgstr "Tipo de conteúdo não suportado." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "" -"O Serviço de configuração do Controller não foi localizado em " -"%(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "O IP do controlador '%(host)s' não pôde ser resolvido: %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Convertido em %(f1)s, mas o formato agora é %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "Convertido em %(vol_format)s, mas o formato agora é %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Convertido em bruto, mas o formato é agora %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Convertida para raw, mas o formato agora é %s." - -msgid "Coordinator uninitialized." -msgstr "Coordenador não inicializado." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Cópia da tarefa do volume falhou: convert_to_base_volume: id=%(id)s, " -"configuração=%(status)s." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"A tarefa de cópia de volume falhou: create_cloned_volume id=%(id)s, status=" -"%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Copiando metadados de %(src_type)s %(src_id)s para %(vol_id)s." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Não foi possível determinar qual terminal Keystone usar. Isso pode ser " -"configurado no catálogo de serviços ou com a opção de configuração cinder." -"conf 'backup_swift_auth_url'." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Não foi possível determinar qual terminal Swift usar. Isso pode ser " -"configurado no catálogo de serviços ou com a opção de configuração cinder." -"conf 'backup_swift_url'." - -msgid "Could not find DISCO wsdl file." -msgstr "Não foi possível localizar o arquivo wsdl DISCO." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "Não foi possível localizar o ID do cluster GPFS: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "" -"Não foi possível localizar o dispositivo do sistema de arquivos GPFS: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Não foi possível localizar a configuração em %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Não foi possível localizar iSCSI export para o volume %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "" -"Não foi possível localizar o destino de iSCSI para o volume: %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "" -"Não foi possível localizar a chave na saída do comando %(cmd)s: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Não foi possível encontrar o parâmetro %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "Não foi possível localizar o destino %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "" -"Não foi possível localizar o volume pai para a Captura Instantânea %s na " -"matriz." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "" -"Não foi possível localizar a captura instantânea exclusiva %(snap)s no " -"volume %(vol)s." - -msgid "Could not get system name." -msgstr "Não foi possível obter o nome do sistema." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "" -"Não foi possível carregar o aplicativo paste app '%(name)s' a partir do " -"%(path)s" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"Não foi possível ler informações para a captura instantânea %(name)s. " -"Código: %(code)s. Razão: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "" -"Não foi possível restaurar o arquivo de configuração %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "Não foi possível salvar a configuração para %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "" -"Não foi possível iniciar a captura instantânea %s do grupo de consistências." - -#, python-format -msgid "Counter %s not found" -msgstr "Contador %s não localizado" - -msgid "Create QoS policy error." -msgstr "Erro ao criar política de QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Criação de backup interrompida, esperava-se o status de backup " -"%(expected_status)s, mas foi obtido %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Criação de backup interrompida, esperava-se o status de volume " -"%(expected_status)s, mas foi obtido %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Falha ao criar exportação para o volume." - -msgid "Create hostgroup error." -msgstr "Erro ao criar grupo de hosts." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Erro ao criar hypermetro. %s." - -msgid "Create lun error." -msgstr "Erro de criação de LUN." - -msgid "Create lun migration error." -msgstr "Erro ao criar migração de LUN." - -msgid "Create luncopy error." -msgstr "Erro de criação de luncopy." - -msgid "Create lungroup error." -msgstr "Erro ao criar grupo de LUNs." - -msgid "Create manager volume flow failed." -msgstr "Falha ao criar fluxo de volume do gerenciador." - -msgid "Create port group error." -msgstr "Erro ao criar grupo de portas." - -msgid "Create replication error." -msgstr "Erro ao criar replicação." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "Falha ao criar o par de replicação. Erro %s." - -msgid "Create snapshot error." -msgstr "Erro de criação de captura instantânea." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Erro ao criar volume. Porque %s." - -msgid "Create volume failed." -msgstr "Criar o volume falhou." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "" -"Criar um grupo de consistências a partir de uma origem não é atualmente " -"suportado." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Criação e ativação de conjunto de zonas falhou: (Conjunto de Zona=" -"%(cfg_name)s erro=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Criação e ativação de conjunto de zonas falhou: (Conjunto de Zona=" -"%(zoneset)s erro=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "Criando usos para %(begin_period)s até %(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "O host atual não faz parte do domínio HGST." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"O host atual não é válido para o volume %(id)s com tipo %(type)s, migração " -"não permitida" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"O host mapeado atualmente para o volume %(vol)s está no grupo de hosts não " -"suportado com %(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"Erro do driver DRBDmanager: a chave esperada \"%s\" não está na resposta. A " -"versão de DRBDmanage está errada?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Erro de configuração do driver DRBDmanage: algumas bibliotecas necessárias " -"(dbus, drbdmanage.*) não encontrado." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage esperava um recurso (\"%(res)s\"), foi obtido %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"Tempo limite de DRBDmanage ao aguardar novo volume após restauração da " -"captura instantânea; recurso \"%(res)s\", volume \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"Tempo limite de DRBDmanage ao aguardar a criação da captura instantânea; " -"recurso \"%(res)s\", captura instantânea\"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"Tempo limite de DRBDmanage ao aguardar a criação do volume; recurso \"%(res)s" -"\", volume \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"Tempo limite de DRBDmanage ao aguardar o tamanho do volume; ID de volume " -"\"%(id)s\" (res \"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "A versão da API do Data ONTAP não pôde ser determinada." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"A operação ONTAP de dados em Modo 7 não suporta grupos de política de QoS." - -msgid "Database schema downgrade is not allowed." -msgstr "O downgrade do esquema do banco de dados não é permitido." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "" -"O conjunto de dados %s não está compartilhado no dispositivo Nexenta Store." - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Grupo de conjunto de dados %s não localizado no Nexenta SA." - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"A deduplicação é um tipo de fornecimento válido, mas requer a versão WSAPI " -"'%(dedup_version)s' versão '%(version)s' está instalada." - -msgid "Dedup luns cannot be extended" -msgstr "LUNs dedup não podem ser estendidos" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Quota padrão para recurso: %(res)s é definida pela flag de quota padrão: " -"quota_%(res)s, isso está deprecado agora. Por favor, use a classe de quota " -"padrão para definir a quota padrão." - -msgid "Default volume type can not be found." -msgstr "O tipo de volume padrão não pode ser localizado." - -msgid "Delete LUNcopy error." -msgstr "Erro ao excluir LUNcopy." - -msgid "Delete QoS policy error." -msgstr "Erro ao excluir política de QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "Erro ao excluir LUN associado do grupo de LUNs." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Exclusão de backup interrompida, o serviço de backup atualmente configurado " -"[%(configured_service)s] não é o serviço de backup que foi usado para criar " -"esse backup [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "Falha ao excluir grupo de consistências." - -msgid "Delete hostgroup error." -msgstr "Erro ao excluir grupo de hosts." - -msgid "Delete hostgroup from mapping view error." -msgstr "Erro ao excluir grupo de hosts do mapeamento." - -msgid "Delete lun error." -msgstr "Erro ao excluir lun." - -msgid "Delete lun migration error." -msgstr "Erro ao excluir migração de LUN." - -msgid "Delete lungroup error." -msgstr "Erro ao excluir grupo de LUNs." - -msgid "Delete lungroup from mapping view error." -msgstr "Erro ao excluir grupo de LUNs da visualização de mapeamento." - -msgid "Delete mapping view error." -msgstr "Erro ao excluir visualização de mapeamento." - -msgid "Delete port group error." -msgstr "Erro ao excluir grupo de portas." - -msgid "Delete portgroup from mapping view error." -msgstr "Erro ao excluir grupo de portas da visualização de mapeamento." - -msgid "Delete snapshot error." -msgstr "Erro ao excluir captura instantânea." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "Remoção da captura instantânea do volume não suportada no estado: %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup interrompida, esperava-se o status de backup " -"%(expected_status)s mas obteve %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Excluindo volume do banco de dados e ignorando rpc." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Exclusão de zonas falhou: (comando=%(cmd)s erro=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Dell API 2.1 ou mais recente é necessária para suporte do Grupo de " -"consistências" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"Replicação de erro de configuração do driver Dell Cinder não suportada com " -"conexão direta" - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"replication_device %s de erro de configuração de driver Dell Cinder não " -"localizada" - -msgid "Describe-resource is admin only functionality" -msgstr "O Descrever-recurso é uma funcionalidade apenas administrativa" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "O destino possui migration_status %(stat)s, esperado %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Volume de destino não de migração intermediária." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Falha ao remover volume: mais de um anexo, mas nenhum attachment_id " -"fornecido." - -msgid "Detach volume from instance and then try again." -msgstr "Remova o volume da instância e, em seguida, tente novamente." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Detectado mais de um volume com o nome %(vol_name)s" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "A coluna esperada não foi localizada em %(fun)s: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "" -"Não foi possível localizar a chave esperada %(key)s em %(fun)s: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "Razão desativada contém caracteres inválidos ou é muito longa" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "O domínio com o nome %s não foi localizado." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Cluster GPFS de Nível Inferior Detectado. O recurso Clone do GPFS não está " -"ativado no nível de daemon do cluster %(cur)s - deve estar pelo menos no " -"nível %(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "Falha ao inicializar conexão do driver (erro: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "O driver deve implementar initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Driver decodificado com êxito importou os dados de backup, mas há campos " -"ausentes (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"A versão %(current_version)s da API de proxy E-series não suporta o conjunto " -"integral de especificações extras SSC. A versão do proxy deve ser pelo menos " -"%(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Exceção da CLI do Driver Cinder de EMC VNX: %(cmd)s (Código de Retorno: " -"%(rc)s) (Saída: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"SPUnavailableException do Driver EMC VNX Cinder: %(cmd)s (Código de Retorno: " -"%(rc)s) (Saída: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword devem ter valores " -"válidos." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"'cgsnapshot_id' ou 'source_cgid' deve ser fornecido para criar o grupo de " -"consistências %(name)s a partir da origem." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"O SLO: %(slo)s ou carga de trabalho %(workload)s é inválido. Examine a " -"instrução de erro anterior para obter os valores válidos." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "" -"Apenas um dos campos hitachi_serial_number ou hitachi_unit_name é necessário." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "" -"O Serviço de composição do elemento não foi localizado em " -"%(storageSystemName)s." - -msgid "Enables QoS." -msgstr "Permite de QoS." - -msgid "Enables compression." -msgstr "Ativa a compactação." - -msgid "Enables replication." -msgstr "Permite replicação." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Assegure-se de que configfs esteja montado em /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao Incluir Inicializador: %(initiator)s em groupInitiatorGroup: " -"%(initiatorgroup)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Incluir ao TargetGroup: %(targetgroup)s com IQN: %(iqn)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Erro ao anexar o volume %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro Clonando Captura Instantânea: %(snapshot)s no Volume: %(lun)s de " -"Conjunto: %(pool)s Projeto: %(project)s Projeto clone: %(clone_proj)s Código " -"de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erro ao criar volume clonado: %(cloneName)s Código de retorno: %(rc)lu. " -"Erro: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Criar Volume Clonado: Volume: %(cloneName)s Volume Volume: " -"%(sourceName)s. Código de Retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Criar Grupo: %(groupName)s. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erro ao Criar Visualização de Mascaramento: %(groupName)s. Código de " -"retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Criar Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Criar Volume: %(volumename)s. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro CreateGroupReplica: origem: %(source)s destino: %(target)s. Código de " -"retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao Criar Inicializador: %(initiator)s em Alias: %(alias)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao Criar Projeto: %(project)s no Conjunto: %(pool)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao criar propriedade: %(property)s Tipo: %(type)s Descrição: " -"%(description)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao Criar Compartilhamento: %(name)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Criar Captura Instantânea: %(snapshot)s no Volume: %(lun)s para o " -"Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Criar Captura Instantânea: %(snapshot)s no Compartilhamento: " -"%(share)s para Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Erro ao Criar Destino: %(alias)s Código de Retorno: %(ret.status)d Mensagem: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao Criar TargetGroup: %(targetgroup)s withIQN: %(iqn)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Erro ao Criar Volume: %(lun)s Tamanho: %(size)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao criar o novo código de retorno do volume composto: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao criar ação de replicação em: Conjunto: %(pool)s Projeto: %(proj)s " -"volume: %(vol)s para destino: %(tgt)s e conjunto: %(tgt_pool)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Erro ao criar o volume desvinculado em uma operação Estender." - -msgid "Error Creating unbound volume." -msgstr "Erro ao Criar volume desvinculado." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Excluir Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Erro ao excluir grupo: %(storageGroupName)s. Código de retorno:%(rc)lu. " -"Erro: %(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Erro ao Excluir Grupo de Iniciadores: %(initiatorGroupName)s. Código de " -"retorno:%(rc)lu. Erro: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Excluir Captura Instantânea: %(snapshot)s no Compartilhamento: " -"%(share)s para Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Excluir Captura Instantânea: %(snapshot)s no Volume: %(lun)s para " -"Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Erro ao excluir volume: %(lun)s do Conjunto: %(pool)s, Projeto: %(project)s. " -"Código de retorno: %(ret.status)d, Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao excluir projeto: %(project)s no pool: %(pool)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Erro ao excluir ação de replicação: %(id)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao Estender Volume: %(volumeName)s. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao Obter Inicializadores: InitiatorGroup: %(initiatorgroup)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Erro ao Obter Estatísticas do Conjunto: Conjunto %(pool)s, Código de " -"Retorno: %(status)d, Mensagem: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao obter estatísticas do projeto: Conjunto: %(pool)s Projeto: " -"%(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Obter Compartilhamento: %(share)s no Conjunto: %(pool)s Projeto: " -"%(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Obter Captura Instantânea: %(snapshot)s no Volume:%(lun)s para " -"Conjunto: %(pool)s Projeto: %(project)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Erro ao Obter Destino: %(alias)s Código de retorno: %(ret.status)d Mensagem: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Obter Volume: %(lun)s em Conjunto: %(pool)s Projeto: %(project)s " -"Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Erro ao Migrar volume de um conjunto para outro. Código de retorno: " -"%(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Erro ao Modificar visualização de mascaramento: %(groupName)s. Código de " -"retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Erro de propriedade do Conjunto: O conjunto %(pool)s não é de propriedade do " -"%(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Definir instalações do operador programável: %(props)s no Volume: " -"%(lun)s de Conjunto: %(pool)s Projeto: %(project)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao finalizar a sessão de migração. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao Verificar Inicializador: %(iqn)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao Verificar o Conjunto: %(pool)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao Verificar Projeto: %(project)s no Conjunto: %(pool)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao Verificar o Serviço: %(service)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao Verificar Destino: %(alias)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Erro ao verificar compartilhamento: %(share)s no Projeto: %(project)s e no " -"Conjunto: %(pool)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Erro ao incluir o Volume: %(volumeName)s com o caminho de instância: " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Erro ao incluir inicializador para o grupo :%(groupName)s. Código de " -"retorno :%(rc)lu. Erro: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "Erro ao incluir o volume ao volume composto. O erro é: %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "Erro ao anexar o volume %(volumename)s ao volume de base de destino." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Erro de associação de grupo de armazenamento : %(storageGroupName)s. Para " -"Política FAST: %(fastPolicyName)s com descrição do erro: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Relacionamento de clone de quebra de erro: Nome da sincronização: " -"%(syncName)s Código de retorno: %(rc)lu. Erro: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Erro ao se conectar ao cluster ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Erro ao conectar via ssh: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Erro ao criar volume: %s." - -msgid "Error deleting replay profile." -msgstr "Erro ao excluir perfil de reprodução." - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Erro ao excluir o volume %(ssn)s: %(volume)s." - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Erro ao excluir o volume %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Erro durante a análise do avaliador: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erro ao editar compartilhamento: %(share)s no Conjunto: %(pool)s Código de " -"retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Erro ao ativar iSER para NetworkPortal: assegure-se de que RDMA seja " -"suportado na porta iSCSI %(port)d no IP %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "Erro encontrado durante limpeza de um anexo com falha: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Erro ao executar a API do CloudByte [%(cmd)s], Erro: %(err)s." - -msgid "Error executing EQL command" -msgstr "Erro ao executar o comando EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Erro ao executar comando via ssh: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Erro ao estender o volume %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Erro ao estender volume:%(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Erro ao localizar %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Erro ao localizar %s." - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao obter ReplicationSettingData. Código de retorno: %(rc)lu. Erro: " -"%(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erro ao obter detalhes da versão do dispositivo. Código de retorno: %(ret." -"status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "Erro ao obter o ID do domínio do nome %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "Erro ao obter ID do domínio do nome %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Erro ao obter grupos do inicializador." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Erro ao obter ID do conjunto do nome %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Erro ao obter o ID do conjunto do nome %(pool_name)s: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erro ao obter ação de replicação: %(id)s. Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erro ao obter detalhes da origem de replicação. Código de retorno: %(ret." -"status)d Mensagem: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Erro ao obter detalhes do destino de replicação. Código de retorno: %(ret." -"status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao obter a versão: svc: %(svc)s. Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Erro na Operação [%(operation)s] para o volume [%(cb_volume)s] no " -"armazenamento CloudByte: [%(cb_error)s], código de erro: [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Erro na resposta da API SolidFire: data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "Erro na criação de espaço para %(space)s de tamanho %(size)d GB" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "Erro em space-extend para o volume %(space)s com %(size)d GB adicional" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Erro ao gerenciar o volume: %s." - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Erro ao modificar a sincronização de réplica: %(sv)s operação: " -"%(operation)s. Código de retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Erro ao modificar o Serviço: %(service)s Código de retorno: %(ret.status)d " -"Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao mover o volume: %(vol)s do projeto de origem: %(src)s para o projeto " -"de destino: %(tgt)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -msgid "Error not a KeyError." -msgstr "O erro não é um KeyError." - -msgid "Error not a TypeError." -msgstr "O erro não é um TypeError" - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Ocorreu um erro ao criar cgsnapshot %s." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Ocorreu um erro ao excluir cgsnapshot %s." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "Ocorreu um erro ao atualizar o grupo de consistências %s." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Erro ao renomear o volume %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Erro de resposta: %s" - -msgid "Error retrieving volume size" -msgstr "Erro ao recuperar o tamanho do volume" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao enviar atualização de replicação para o ID de ação: %(id)s. Código " -"de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Erro ao enviar atualização de replicação. Erro retornado: %(err)s. Ação: " -"%(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao configurar herança de replicação para %(set)s para o volume: %(vol)s " -"projeto %(project)s Código de retorno: %(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Erro ao separar o pacote: %(package)s da origem: %(src)s Código de retorno: " -"%(ret.status)d Mensagem: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "Erro ao desvincular o volume %(vol)s do conjunto. %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Erro ao autenticar-se com o comutador: %s" - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Erro ao alterar o contexto do VF %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Erro ao verificar a versão do firmware %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Erro ao verificar status da transação: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "Erro ao verificar se o VF está disponível para gerenciamento %s." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Erro ao conectar o comutador %(switch_id)s com o protocolo %(protocol)s. " -"Erro: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Erro ao criar o token de autenticação: %s." - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"Erro ao criar a captura instantânea [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "Erro ao criar o volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Erro ao excluir a captura instantânea [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "Erro ao excluir o volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "Erro ao estender o volume [status] %(stat)s - [result] %(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "Erro ao obter detalhes do %(op)s, código retornado: %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "Erro ao obter dados via ssh: (comando=%(cmd)s erro=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Erro ao obter informações do disco [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Erro ao obter o valor de nvp: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Erro ao obter informações da sessão %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Erro ao analisar os dados: %s" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "Erro ao consultar a página %(url)s no comutador, razão %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Erro ao remover as novas zonas e cfgs na sequência de zonas. %(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Erro ao solicitar %(service)s da API." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "Erro ao executar CLI de zoneamento: (comando=%(cmd)s erro=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Erro ao atualizar as novas zonas e cfgs na sequência de zonas. Erro " -"%(description)s." - -msgid "Error writing field to database" -msgstr "Erro ao gravar campo no banco de dados" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Erro[%(stat)s - %(res)s] ao obter o ID do volume." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Error[%(stat)s - %(res)s] ao restaurar captura instantânea [%(snap_id)s] " -"para o volume [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "Error[status] %(stat)s - [result] %(res)s] ao obter o ID do volume." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Excedido o número máximo de tentativas de planejamento %(max_attempts)d para " -"o volume %(volume_id)s" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Limite de capturas instantâneas por volume excedido" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "Exceção ao anexar metavolume ao volume de destino %(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Exceção durante a criação da réplica do elemento. Nome do clone: " -"%(cloneName)s Nome da origem: %(sourceName)s Especificações extras: " -"%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Exceção no _select_ds_for_volume: %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "Exceção ao formar a sequência de zonas: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Exceção: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Esperado um uuid, mas recebido %(uuid)s." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Esperado exatamente um nó chamado \"%s\"" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Número inteiro esperado para node_count, svcinfo lsiogrp retornou: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "Nenhuma saída esperada do comando da CLI %(cmd)s, obtido %(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"vdisk único esperado retornado de lsvdisk ao filtrar no vdisk_UID. " -"%(count)s foram retornados." - -#, python-format -msgid "Expected volume size was %d" -msgstr "Tamanho do volume esperado era %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Exportação de backup interrompida, esperava-se o status de backup " -"%(expected_status)s mas obteve %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Exportação de registro interrompida, o serviço de backup atualmente " -"configurado [%(configured_service)s] não é o serviço de backup que foi usado " -"para criar esse backup [%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Erro ao estender volume." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Estender o volume é suportado para este driver apenas quando não existem " -"capturas instantâneas." - -msgid "Extend volume not implemented" -msgstr "Estender volume não implementado" - -msgid "FAST is not supported on this array." -msgstr "O FAST não é suportado nesta matriz." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC é o protocolo, mas wwpns não são fornecidos pelo OpenStack." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Falha ao remover designação de %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "Falha ao criar volume de cache %(volume)s. Erro: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "Falha ao incluir conexão para a malha=%(fabric)s: Erro:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "cgsnapshot falhou" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "Falha ao criar captura instantânea para o grupo %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"Falha ao criar captura instantânea para o volume %(volname)s: %(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "Falha ao obter o conjunto de zonas ativas a partir da malha %s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Falha ao obter detalhes para o conjunto %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "Falha ao remover conexão da malha=%(fabric)s: Erro:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Falha ao Estender Volume %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Falha ao efetuar login para o 3PAR (%(url)s) porque %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Falha ao acessar a configuração de zoneamento ativa." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Falha ao acessar o status do conjunto de zonas:%s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Falha ao adquirir um bloqueio de recurso. (serial: %(serial)s, inst: " -"%(inst)s, ret: %(ret)s, erro padrão: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Falha ao incluir o dispositivo lógico." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Falha ao incluir o volume %(volumeName)s no grupo de consistências " -"%(cgName)s. Código de retorno: %(rc)lu. Erro: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "Falha ao incluir a configuração de zoneamento." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Falha ao designar o IQN do inicializador iSCSI. (porta: %(port)s, motivo: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Falha ao associar qos_specs: %(specs_id)s com tipo %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Falha ao anexar destino iSCSI para o volume %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Falha ao fazer o backup dos metadados do volume - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Falha ao fazer backup de metadados de volume – Objeto de backup de metadados " -"'backup.%s.meta' já existe" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Falha ao clonar volume da captura instantânea %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Falha ao conectar-se ao %(vendor_name)s, Matriz %(host)s: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Falha ao conectar-se com a API REST Dell" - -msgid "Failed to connect to array" -msgstr "Falha ao conectar-se à matriz" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"Falha ao conectar-se ao daemon sheep. Endereço: %(addr)s, porta: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Falha ao copiar imagem para o volume: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Falha ao copiar metadados para o volume: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Falha ao copiar o volume; dispositivo de destino indisponível." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Falha ao copiar o volume; dispositivo de origem indisponível." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "Falha ao criar CG %(cgName)s da captura instantânea %(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "Falha ao criar IG, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Falha ao criar Grupo de Volumes: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Falha ao criar um arquivo. (arquivo: %(file)s, ret: %(ret)s, erro padrão: " -"%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "Falha ao criar uma captura instantânea temporária para o volume %s." - -msgid "Failed to create api volume flow." -msgstr "Falha ao criar o fluxo de volume da API." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "Falha ao criar a captura instantânea cg %(id)s devido a %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "Falha ao criar grupo de consistências %(id)s devido a %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Falha ao criar grupo de consistências %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Falha ao criar o grupo de consistências %s porque o grupo de consistências " -"VNX não pode aceitar LUNs compactados como membros." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Falha ao criar grupo de consistências: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "Falha ao criar grupo de consistências: %(cgid)s. Erro: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Falha ao criar grupo de consistências: %(consistencyGroupName)s Código de " -"retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Falha ao criar ID(s) de hardware em %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "Falha ao criar o host: %(name)s. Verifique se ele existir na matriz." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Falha ao criar grupo de hosts: %(name)s. Verifique se ele existe na matriz." - -msgid "Failed to create iqn." -msgstr "Falha ao criar iqn." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Falha ao criar destino iscsi para o volume %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Falha ao criar fluxo existente de gerenciamento." - -msgid "Failed to create manage_existing flow." -msgstr "Falha ao criar fluxo manage_existing." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "Falha ao criar mapa no mcs; nenhum canal pode ser mapeado." - -msgid "Failed to create map." -msgstr "Falha ao criar mapa." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Falha ao criar metadados para o volume: %(reason)s" - -msgid "Failed to create partition." -msgstr "Falha ao criar a partição." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "Falha ao criar qos_specs: %(name)s com especificações %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Falha ao criar réplica." - -msgid "Failed to create scheduler manager volume flow" -msgstr "Falha ao criar fluxo de volume de gerenciador de planejador" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Falha ao criar a captura instantânea %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "Falha ao criar a captura instantânea para cg: %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Falha ao criar captura instantânea para o volume %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"Falha ao criar política de captura instantânea no volume %(vol)s: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"Falha ao criar área de recursos de captura instantânea no volume %(vol)s: " -"%(res)s." - -msgid "Failed to create snapshot." -msgstr "Falha ao criar captura instantânea." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Falha ao criar captura instantânea. As informações de volume do CloudByte " -"não foram localizadas para O volume OpenStack [%s]." - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "Falha ao criar conector de ligação south para %s." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "Falha ao criar grupo de armazenamentos %(storageGroupName)s." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Falha ao criar o conjunto thin, a mensagem de erro foi: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Falha ao criar o volume %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "Falha ao excluir SI para volume_id: %(volume_id)s porque ele tem par." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Falha ao excluir um dispositivo lógico. (LDEV: %(ldev)s, motivo: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "Falha ao excluir cgsnapshot %(id)s devido a %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "Falha ao excluir o grupo de consistências %(id)s devido a %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Falha ao excluir o grupo de consistências: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Falha ao excluir o grupo de consistências: %(consistencyGroupName)s Código " -"de retorno: %(rc)lu. Erro: %(error)s." - -msgid "Failed to delete device." -msgstr "Falha ao excluir dispositivo." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Falha ao excluir o conjunto de arquivos para o grupo de consistências " -"%(cgname)s Erro: %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Falha ao excluir iqn." - -msgid "Failed to delete map." -msgstr "Falha ao excluir mapa." - -msgid "Failed to delete partition." -msgstr "Falha ao excluir a partição." - -msgid "Failed to delete replica." -msgstr "Falha ao excluir a réplica." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Falha ao excluir a captura instantânea %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "Falha ao excluir a captura instantânea para cg: %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"Falha ao excluir captura instantânea para snapshot_id: %s porque ela tem par." - -msgid "Failed to delete snapshot." -msgstr "Falha ao excluir captura instantânea." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Falha ao excluir o volume %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Falha ao excluir volume para volume_id: %(volume_id)s porque ele tem par." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Falha ao remover o destino de iSCSI para o volume %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Falha ao determinar a configuração da API blockbridge" - -msgid "Failed to disassociate qos specs." -msgstr "Falha ao desassociar qos specs. " - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Falha ao desassociar qos_specs: %(specs_id)s com tipo %(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Falha ao assegurar a área de recursos de captura instantânea; não foi " -"possível localizar o volume para o ID %s" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Falha ao estabelecer a conexão com o cluster Coho" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Falha ao executar a API do CloudByte [%(cmd)s]. Status de HTTP: %(status)s, " -"Erro: %(error)s." - -msgid "Failed to execute common command." -msgstr "Falha ao executar o comando comum." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Falha ao exportar para o volume: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "Falha ao estender o volume %(name)s, Mensagem. de erro: %(msg)s." - -msgid "Failed to find QoSnode" -msgstr "Falha ao localizar o QoSnode" - -msgid "Failed to find Storage Center" -msgstr "Falha ao localizar o Centro de Armazenamento" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "Falha ao localizar uma cópia do disco virtual no conjunto esperado." - -msgid "Failed to find account for volume." -msgstr "Falha ao localizar a conta para o volume." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"Falha ao localizar conjunto de arquivos para o caminho %(path)s, saída do " -"comando: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "Falha ao localizar a captura instantânea de grupo denominada: %s" - -#, python-format -msgid "Failed to find host %s." -msgstr "Falha ao localizar host %s." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "" -"Falha ao localizar o grupo de iniciadores iSCSI contendo %(initiator)s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Falha ao obter detalhes da conta do CloudByte para a conta [%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Falha ao obter detalhes do destino de LUN para o LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "Falha ao obter detalhes do destino do LUN %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Falha ao obter a lista de destinos de LUN para o LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Falha ao obter o ID da partição para o volume %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"Falha ao obter o ID de captura instantânea de RAID da captura instantânea " -"%(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"Falha ao obter ID de captura instantânea de RAID da captura instantânea: " -"%(snapshot_id)s." - -msgid "Failed to get SplitMirror." -msgstr "Falha ao obter SplitMirror" - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Falha ao obter um recurso de armazenamento. O sistema tentará obter o " -"recurso de armazenamento novamente. (recurse: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "Falha ao obter todas as associações de qos specs %s" - -msgid "Failed to get channel info." -msgstr "Falha ao obter informações do canal." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Falha ao obter nível de código (%s)." - -msgid "Failed to get device info." -msgstr "Falha ao obter informações do dispositivo." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "Falha ao obter o domínio porque o CPG (%s) não existe na matriz." - -msgid "Failed to get image snapshots." -msgstr "Falha ao obter capturas instantâneas da imagem" - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "Falha ao obter IP no Canal %(channel_id)s com o volume: %(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Falha ao obter informações do iqn." - -msgid "Failed to get license info." -msgstr "Falha ao obter informações de licença." - -msgid "Failed to get lv info." -msgstr "Falha ao obter informações de lv." - -msgid "Failed to get map info." -msgstr "Falha ao obter informações do mapa." - -msgid "Failed to get migration task." -msgstr "Falha ao obter a tarefa de migração." - -msgid "Failed to get model update from clone" -msgstr "Falha ao obter atualização de modelo a partir do clone" - -msgid "Failed to get name server info." -msgstr "Falha ao obter informações do servidor de nomes." - -msgid "Failed to get network info." -msgstr "Falha ao obter informações de rede." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Falha ao obter novo ID da parte no novo conjunto: %(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Falha ao obter informações de partição." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Falha ao obter ID do conjunto com o volume %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"Falha ao obter informações de cópia remota para o %(volume)s devido a " -"%(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"Falha ao obter informações de cópia remota para o %(volume)s. Exceção " -"%(err)s." - -msgid "Failed to get replica info." -msgstr "Falha ao obter informações de réplica." - -msgid "Failed to get show fcns database info." -msgstr "Falha ao obter/mostrar informações do banco de dados fcns." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Falha ao obter o tamanho do volume %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Falha ao obter captura instantânea para o volume %s." - -msgid "Failed to get snapshot info." -msgstr "Falha ao obter informações de captura instantânea." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Falha ao obter o IQN de destino para o LUN %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "Falha ao obter LUN de destino do SplitMirror." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Falha ao obter o portal de destino para o LUN %s" - -msgid "Failed to get targets" -msgstr "Falha ao obter destinos" - -msgid "Failed to get wwn info." -msgstr "Falha ao obter informações de wwn." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Falha ao obter, criar ou incluir o volume %(volumeName)s para visualização " -"de mascaramento %(maskingViewName)s. A mensagem de erro recebida foi " -"%(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Falha ao identificar backend do volume" - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Falha ao vincular o conjunto de arquivos para o compartilhamento %(cgname)s. " -"Erro: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Falha ao efetuar logon na Matriz %s (login inválido?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Falha ao efetuar login para o usuário %s." - -msgid "Failed to login with all rest URLs." -msgstr "Falha ao efetuar login com todas as URLs REST." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Falha ao realizar uma solicitação ao terminal do cluster Datera devido ao " -"seguinte motivo: %s" - -msgid "Failed to manage api volume flow." -msgstr "Falha ao gerenciar fluxo de volume da API." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Falha ao gerenciar %(type)s %(name)s existente, porque o tamanho relatado " -"%(size)s não era um número de vírgula flutuante." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Falha ao gerenciar o volume existente %(name)s devido a um erro na obtenção " -"do tamanho do volume." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Falha ao gerenciar o volume existente %(name)s porque a operação de " -"renomeação falhou: Mensagem de erro: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Falha ao gerenciar volume existente %(name)s, porque o tamanho relatado " -"%(size)s não era um número de vírgula flutuante." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"Falha ao gerenciar o volume existente devido a uma incompatibilidade de " -"grupo de E/S. O grupo de E/S do volume a ser gerenciado é %(vdisk_iogrp)s, e " -"o grupo de E/S do tipo escolhido é %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"Falha ao gerenciar o volume existente porque o conjunto do volume a ser " -"gerenciado não corresponde ao conjunto de backend. O conjunto do volume a " -"ser gerenciado é %(vdisk_pool)s, e o conjunto do backend é %(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"Falha ao gerenciar o volume existente porque o volume a ser gerenciado está " -"compactado, e o tipo de volume escolhido não está compactado." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"Falha ao gerenciar o volume existente porque o volume a ser gerenciado não " -"está compactado, e o tipo de volume escolhido está compactado." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"Falha ao gerenciar volume existente porque o volume a ser gerenciado não " -"está em um grupo de E/S válido." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"Falha ao gerenciar o volume existente porque o volume a ser gerenciado é " -"thick, e o tipo de volume escolhido é thin." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"Falha ao gerenciar o volume existente porque o volume a ser gerenciado é " -"thin, e o tipo de volume escolhido é thick." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Falha ao gerenciar o volume %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Falha ao mapear um dispositivo lógico. (LDEV: %(ldev)s, LUN: %(lun)s, porta: " -"%(port)s, ID: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Falha ao migrar o volume pela primeira vez." - -msgid "Failed to migrate volume for the second time." -msgstr "Falha ao migrar o volume pela segunda vez." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "Falha ao mover mapeamento de LUN. Código de retorno: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "Falha ao mover o volume %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Falha ao abrir um arquivo. (aquivo: %(file)s, ret: %(ret)s, erro padrão: " -"%(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Falha ao analisar saída da CLI \n" -" comando: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Falha ao analisar a opção de configuração 'keystone_catalog_info', deve ser " -"na forma ::" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Falha ao analisar a opção de configuração 'swift_catalog_info', deve ser na " -"forma ::" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Falha ao realizar uma reclamação com página zero. (LDEV: %(ldev)s, motivo: " -"%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "Falha ao remover exportação para o volume %(volume)s: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "Falha ao remover destino iscsi para o volume %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Falha ao remover o volume %(volumeName)s do grupo de consistências " -"%(cgName)s. Código de retorno: %(rc)lu. Erro: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "Falha ao remover o volume %(volumeName)s do SG padrão." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "Falha ao remover o volume %(volumeName)s do SG padrão: %(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"Falha ao remover: %(volumename)s. do grupo de armazenamento padrão para " -"política FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Falha ao renomear volume lógico %(name)s, mensagem de erro foi: %(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Falha ao recuperar configuração de zoneamento ativo %s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"Falha ao configurar a autenticação CHAP para o IQN de destino %(iqn)s. " -"Detalhes: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Falha ao configurar QoS para o volume existente %(name)s. Mensagem de erro: " -"%(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "Falha ao configurar atributo 'Incoming user' para SCST de destino." - -msgid "Failed to set partition." -msgstr "Falha ao configurar partição." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Falha ao configurar permissões para o grupo de consistências %(cgname)s " -"Erro: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Falha ao especificar um dispositivo lógico para o volume %(volume_id)s a ser " -"removido do mapeamento." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Falha ao especificar um dispositivo lógico a ser excluído. (método: " -"%(method)s, ID: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Falha ao finalizar a sessão de migração." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "Falha ao desvincular o volume %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Falha ao desvincular o conjunto de arquivos para o grupo de consistências " -"%(cgname)s. Erro: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Falha ao remover mapeamento de um dispositivo lógico. (LDEV: %(ldev)s, " -"motivo: %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Falha ao atualizar grupo de consistências: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Falha ao atualizar metadados para o volume: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Falha ao atualizar ou excluir a configuração de zoneamento" - -msgid "Failed to update or delete zoning configuration." -msgstr "Falha ao atualizar ou excluir a configuração de zoneamento." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Falha ao atualizar qos_specs: %(specs_id)s com especificações %(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "Falha ao atualizar o uso de cota ao digitar novamente o volume." - -msgid "Failed to update snapshot." -msgstr "Falha ao atualizar captura instantânea." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Falha ao atualizar os metadados do volume %(vol_id)s usando os metadados " -"%(src_type)s %(src_id)s fornecidos" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Falha ao criar o volume %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Falha ao obter informações de LUN para %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Falha ao mover novo LUN clonado para %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Falha na preparação do LUN %s para tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "O Fexvisor falhou ao incluir o volume %(id)s devido a %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor falhou ao associar o volume %(vol)s no grupo %(group)s devido a " -"%(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor falhou ao remover o volume %(vol)s no grupo %(group)s devido a " -"%(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Fexvisor falhou ao remover o volume %(id)s devido a %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Falha no Fibre Channel SAN Lookup: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Operação Fibre Channel Zone falhou: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Falha no controle de conexão Fibre Channel: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "O arquivo %(file_path)s não pôde ser localizado." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"O arquivo %(path)s tem arquivo de backup inválido %(bfile)s, interrompendo." - -#, python-format -msgid "File already exists at %s." -msgstr "O arquivo já existe em %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "O arquivo já existe em: %s" - -msgid "Find host in hostgroup error." -msgstr "Erro ao localizar host no grupo de hosts." - -msgid "Find host lun id error." -msgstr "Erro ao localizar ID do LUN do host." - -msgid "Find lun group from mapping view error." -msgstr "Erro ao localizar grupo de LUNs da visualização de mapeamento." - -msgid "Find mapping view error." -msgstr "Erro ao localizar a visualização de mapeamento." - -msgid "Find portgroup error." -msgstr "Erro ao localizar grupo de portas." - -msgid "Find portgroup from mapping view error." -msgstr "Erro ao localizar grupo de portas da visualização de mapeamento." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"A política de cache de atualização requer a versão WSAPI " -"'%(fcache_version)s' versão '%(version)s' está instalada." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Volume de designação do Flexvisor com falha: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Volume de designação do Flexvisor falhou:%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"O Flexvisor não pôde localizar a captura instantânea do volume %(id)s no " -"grupo %(vgid)s da captura instantânea %(vgsid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor cria volume com falha.:%(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "O Flexvisor falhou ao excluir o volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "O Flexvisor falhou ao incluir o volume %(id)s ao grupo %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor falhou ao designar o volume %(id)s devido a não poder consultar o " -"status pelo id de evento." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor falhou ao designar volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "O Flexvisor falhou ao designar o volume %(volume)s iqn %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "O Flexvisor falhou ao clonar o volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "O Flexvisor falhou ao clonar o volume (falha ao obter evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"O Flexvisor falhou ao criar a captura instantânea para o volume %(id)s:" -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"O Flexvisor falhou ao criar a captura instantânea para o volume (falha ao " -"obter evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "O Flexvisor falhou ao criar o volume %(id)s no grupo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor falhou ao criar volume %(volume)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor falhou ao criar volume (get event) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"O Flexvisor falhou ao criar volume da captura instantânea %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"O Flexvisor falhou ao criar volume da captura instantânea %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"O Flexvisor falhou ao criar volume da captura instantânea (falha ao obter " -"evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "O Flexvisor falhou ao excluir a captura instantânea %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"O Flexvisor falhou ao excluir a captura instantânea (falha ao obter " -"evento)%(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "O Flexvisor falhou ao excluir o volume %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "O Flexvisor falhou ao estender o volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "O Flexvisor falhou ao estender o volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"O Flexvisor falhou ao estender o volume (falha ao obter evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "" -"O Flexvisor falhou ao obter informações do conjunto %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"O Flexvisor falhou ao obter o id de captura instantânea do volume %(id)s do " -"grupo %(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor falhou ao remover o volume %(id)s do grupo %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"O Flexvisor falhou ao efetuar spawn do volume de captura instantânea %(id)s:" -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor falhou ao efetuar spawn do volume de captura instantânea (falha ao " -"obter evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor falhou ao remover a designação do volume %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor falhou ao remover designação do volume (obter evento) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "" -"O Flexvisor falhou ao remover a designação do volume: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "" -"O Flexvisor não conseguiu localizar as informações do volume de origem " -"%(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Falha na remoção de designação de volume pelo Flexvisor %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "O volume do Flexvisor %(id)s falhou ao unir o grupo %(vgid)s." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "O volume %s não existe no dispositivo Nexenta Store." - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS não está em execução, estado: %s." - -msgid "Gateway VIP is not set" -msgstr "Gateway VIP não está configurado" - -msgid "Get FC ports by port group error." -msgstr "Erro ao obter portas FC por grupo de portas." - -msgid "Get FC ports from array error." -msgstr "Erro ao obter portas FC da matriz." - -msgid "Get FC target wwpn error." -msgstr "Erro ao obter wwpn de destino do FC." - -msgid "Get HyperMetroPair error." -msgstr "Erro ao obter HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Erro ao obter grupo de LUN por visualização." - -msgid "Get LUNcopy information error." -msgstr "Erro ao obter informações de LUNcopy." - -msgid "Get QoS id by lun id error." -msgstr "Erro ao obter ID de QoS por ID do LUN." - -msgid "Get QoS information error." -msgstr "Erro ao obter informações de QoS." - -msgid "Get QoS policy error." -msgstr "Erro ao obter política de QoS." - -msgid "Get SplitMirror error." -msgstr "Erro ao obter SplitMirror." - -msgid "Get active client failed." -msgstr "Falha ao ativar o cliente" - -msgid "Get array info error." -msgstr "Erro ao obter informações da matriz." - -msgid "Get cache by name error." -msgstr "Erro ao obter cache por nome." - -msgid "Get connected free FC wwn error." -msgstr "Erro ao obter wwn FC livre conectado." - -msgid "Get engines error." -msgstr "Erro ao obter mecanismos." - -msgid "Get host initiators info failed." -msgstr "Falha ao obter informações de inicializadores de host." - -msgid "Get hostgroup information error." -msgstr "Erro ao obter informações do grupo de hosts." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Erro ao obter informações da porta iSCSI; verifique o IP de destino " -"configurada no arquivo conf huawei." - -msgid "Get iSCSI port information error." -msgstr "Erro ao obter informações da porta iSCSI." - -msgid "Get iSCSI target port error." -msgstr "Erro ao obter porta de destino iSCSI." - -msgid "Get lun id by name error." -msgstr "Erro ao obter ID de LUN pelo nome." - -msgid "Get lun migration task error." -msgstr "Erro ao obter tarefa de migração de LUN." - -msgid "Get lungroup id by lun id error." -msgstr "Erro ao obter ID do grupo de LUNs por ID do LUN." - -msgid "Get lungroup information error." -msgstr "Erro ao obter informações do grupo de LUNs." - -msgid "Get migration task error." -msgstr "Erro ao obter tarefa de migração." - -msgid "Get pair failed." -msgstr "Erro ao obter par." - -msgid "Get partition by name error." -msgstr "Erro ao obter partição por nome." - -msgid "Get partition by partition id error." -msgstr "Erro ao obter partição por ID da partição." - -msgid "Get port group by view error." -msgstr "Erro ao obter grupo de portas por visualização." - -msgid "Get port group error." -msgstr "Erro ao obter grupo de portas." - -msgid "Get port groups by port error." -msgstr "Erro ao obter grupos de porta por porta." - -msgid "Get ports by port group error." -msgstr "Erro ao obter portas por grupo de portas." - -msgid "Get remote device info failed." -msgstr "Falha ao obter informações do dispositivo remoto." - -msgid "Get remote devices error." -msgstr "Erro ao obter dispositivos remotos." - -msgid "Get smartcache by cache id error." -msgstr "Erro ao obter smartcache por ID de cache." - -msgid "Get snapshot error." -msgstr "Erro ao obter captura instantânea." - -msgid "Get snapshot id error." -msgstr "Erro ao obter ID de captura instantânea." - -msgid "Get target IP error." -msgstr "Erro ao obter IP de destino." - -msgid "Get target LUN of SplitMirror error." -msgstr "Erro ao obter LUN de destino do SplitMirror." - -msgid "Get views by port group error." -msgstr "Erro ao obter visualizações por grupo de portas." - -msgid "Get volume by name error." -msgstr "Erro ao obter volume por nome." - -msgid "Get volume error." -msgstr "Erro ao obter volume." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"A visão de metadados não pode ser atualizada; existe a chave %(key)s para o " -"ID do volume %(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"Metadados do Glance para o volume/captura instantânea %(id)s não pôde ser " -"encontrado." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "O arquivo de configuração do Gluster em %(config)s não existe" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Falha da API Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Falha de conexão do Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Falha do oauth2 do Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "Informações de caminho inválido obtido do DRBDmanage! (%s)" - -msgid "HBSD error occurs." -msgstr "Erro HBSD ocorreu." - -msgid "HPELeftHand url not found" -msgstr "URL HPELeftHand não localizada" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"O tamanho de bloco hash foi alterado desde o último backup. O novo tamanho " -"de bloco hash: %(new)s. Antigo tamanho de bloco hash: %(old)s. Execute um " -"backup completo." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Não tem camada(s) %(tier_levels)s criada(s)." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Sugestão \"%s\" não suportada." - -msgid "Host" -msgstr "Host" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "O host %(host)s não pôde ser localizado." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"O host %(host)s não corresponde ao conteúdo do certificado x509: CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "Host %s não possui inicializadores do FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "Grupo de hosts com o nome %s não localizado" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Grupo de hosts com ref %s não localizado" - -msgid "Host is NOT Frozen." -msgstr "O Host NÃO está Paralisado" - -msgid "Host is already Frozen." -msgstr "O Host já está Paralisado" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Host não localizado. Falha ao remover %(service)s no %(host)s." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "O replication_status do host deve ser %s para executar failover." - -#, python-format -msgid "Host type %s not supported." -msgstr "Tipo de host %s não suportado." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "Host com as portas %(ports)s não localizado." - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "Hypermetro a Replicação não podem ser usados no mesmo volume_type." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"O grupo de E/S %(iogrp)d não é válido; os grupos de E/S disponíveis são " -"%(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Se a compactação estiver configurada como True, rsize também deverá ser " -"configurado (não igual a -1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Se nofmtdisk for configurado para True, rsize também deverá ser configurado " -"para -1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Valor ilegal '%(prot)s' especificado para flashsystem_connection_protocol: " -"valor(es) válido(s) são %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Valor ilegal especificado para IOTYPE: 0, 1 ou 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Valor ilegal especificado para smarttier: configurado para 0, 1, 2 ou 3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Valor ilegal especificado para storwize_svc_vol_grainsize: configurado como " -"32, 64, 128 ou 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Valor ilegal especificado para thin: não é possível configurar thin e thick " -"ao mesmo tempo." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "A imagem %(image_id)s não pôde ser localizada." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "A imagem %(image_id)s não está ativa." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "A imagem %(image_id)s é inaceitável: %(reason)s" - -msgid "Image location not present." -msgstr "Local da imagem ausente." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"O tamanho virtual da imagem é %(image_size)d GB e não se ajusta a um volume " -"de tamanho %(volume_size)dGB." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Erro ImageBusy ocorrido ao excluir volume rbd. Isso pode ter sido causado " -"por uma conexão de um cliente que travou e, em caso afirmativo, pode ser " -"resolvido tentando novamente a exclusão após 30 segundos." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Importação de registro falhou, não é possível localizar o serviço de backup " -"para executar a importação. Solicitar serviço %(service)s" - -msgid "Incorrect request body format" -msgstr "Formato do corpo da solicitação incorreta" - -msgid "Incorrect request body format." -msgstr "Formato do corpo da solicitação incorreto." - -msgid "Incremental backups exist for this backup." -msgstr "Os backups incrementais existem para esse backup." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Exceção da CLI Infortrend: %(err)s Parâmetro: %(param)s (Código de retorno: " -"%(rc)s) (Saída: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Os volumes ou capturas instantâneas de entrada são inválidos." - -msgid "Input volumes or source volumes are invalid." -msgstr "Os volumes de entrada ou de origem são inválidos." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "A instância %(uuid)s não pôde ser localizada." - -msgid "Insufficient free space available to extend volume." -msgstr "Espaço livre insuficiente disponível para o volume de extensão." - -msgid "Insufficient privileges" -msgstr "Privilégios insuficientes" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Inválido Domínio 3PAR: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Valor ALUA inválido. O valor ALUA deve ser 1 ou 0." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "Argumentos fornecidos de Ceph inválidos para a operação rbd de backup" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "CgSnapshot inválido: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "ConsistencyGroup inválido: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"ConsistencyGroup inválido: Nenhum host para criar grupo de consistências" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Versão HPELeftHand da API inválida localizada: %(found)s. A versão " -"%(minimum)s ou maior são necessárias para gerenciar/não gerenciar o suporte." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Formato de endereço IP inválido: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Especificação de QoS inválida detectada ao obter política de QoS para o " -"volume %s" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Destino de Replicação Inválido: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Especificação de compartilhamento de armazenamento Virtuozzo inválido: %r. " -"Deve ser: [MDS1[,MDS2],...:/][:PASSWORD]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "" -"Versão XtremIO %(cur)s inválida, versão %(min)s ou posterior é necessária" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "" -"Cotas alocadas inválidas definidas para as cotas de projeto a seguir: %s" - -msgid "Invalid argument" -msgstr "Argumento inválido" - -msgid "Invalid argument - negative seek offset." -msgstr "Argumento inválido – deslocamento de busca negativo." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Argumento inválido - whence=%s não suportado" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Argumento inválido – whence=%s não suportado." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Modo de anexamento inválido '%(mode)s' para o volume %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Chave de autenticação inválida: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Backup inválido: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"Detalhes do usuário chap inválidos localizados no armazenamento do CloudByte." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "Resposta de inicialização de conexão inválida %(name)s" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Resposta de inicialização de conexão inválida de volume %(name)s: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Tipo de conteúdo inválido %(content_type)s." - -msgid "Invalid credentials" -msgstr "Credenciais inválidas" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Diretório inválido: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Tipo de adaptador de disco inválido: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Disco inválido auxiliar: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Tipo de disco inválido: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Tipo de disco inválido: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Host inválido: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Versão de hpe3parclient inválida localizada (%(found)s). Versão %(minimum)s " -"ou maior é necessária. Execute \"pip install --upgrade python-3parclient\" " -"para fazer upgrade do hpe3parclient." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Versão de hpelefthandclient inválida localizada (%(found)s). Versão " -"%(minimum)s ou maior é necessária. Execute 'pip install --upgrade python-" -"lefthandclient' para fazer upgrade do hpelefthandclient." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Imagem inválida href %(image_href)s." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"O identificador da imagem inválido ou incapaz de acessar a imagem solicitada." - -msgid "Invalid imageRef provided." -msgstr "imageRef inválida fornecida." - -msgid "Invalid input" -msgstr "Entrada inválida" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Entrada inválida recebida: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Filtro is_public inválido [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Um tipo de LUN inválido %s foi configurado." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Tamanho de metadados inválido: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Metadados inválidos: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Base de ponto de montagem inválido: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Base de ponto de montagem inválida: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Novo nome do snapCPG inválido para nova digitação. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Número de porta inválido %(config)s para a porta RPC do Coho." - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Um tipo de pré-busca inválido '%s' está configurado. O PrefetchType deve ser " -"em 0,1,2,3." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Inválidas qos specs: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "Solicitação inválida para anexar volume a um destino inválido" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Solicitação inválida para anexar o volume a um modo inválido. O modo de " -"anexação deve ser 'rw' ou 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Expiração de reserva inválida %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Cabeçalho de resposta inválido a partir do servidor RPC" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "ID secundário inválido %s" - -msgid "Invalid service catalog json." -msgstr "Catálogo de serviço json inválido." - -msgid "Invalid sheepdog cluster status." -msgstr "Status do cluster sheepdog inválido." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Snapshot inválido: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Status inválido: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "" -"Conjunto de armazenamentos inválido %s solicitado. Digitar novamente com " -"falha." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Conjunto de armazenamento inválido %s especificado." - -msgid "Invalid storage pool is configured." -msgstr "Um conjunto de armazenamento inválido foi configurado." - -msgid "Invalid transport type." -msgstr "Tipo de transporte inválido." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Configuração de atualização inválida: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Valor inválido '%s' para força." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Valor inválido '%s' para força. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "" -"Valor inválido '%s' para valores is_public. Valores aceitos: True ou False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Valor inválido '%s' para skip_validation." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Valor inválido para 'inicializável': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Valor inválido para 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Valor inválido para 'somente leitura': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "Valor inválido para 'scheduler_max_attempts'; deve ser >= 1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "" -"Valor inválido para a opção de configuração netapp_host_type do NetApp." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "" -"Valor inválido para a opção de configuração netapp_lun_ostype do NetApp." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Valor inválido para a idade, %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Valor inválido: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"Tamanho de volume inválido fornecido para a solicitação de criação: %s (o " -"tamanho do argumento deve ser um número inteiro (ou representação em " -"sequência de um número inteiro) e maior que zero)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Tipo de volume inválido: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Volume inválido: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Volume inválido: Não é possível incluir o volume %(volume_id)s no grupo de " -"consistências %(group_id)s porque o volume está em um estado inválido: " -"%(status)s. Os estados válidos são: ('disponível', 'em uso')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Volume inválido: Não é possível incluir o volume %(volume_id)s no grupo de " -"consistências %(group_id)s porque o tipo de volume %(volume_type)s não é " -"suportado pelo grupo de consistências." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Volume inválido: Não é possível incluir o volume fake-volume-uuid no grupo " -"de consistências %(group_id)s porque o volume não pode ser localizado." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Volume inválido: Não é possível remover o volume fake-volume-uuid do grupo " -"de consistências %(group_id)s porque ele não está no grupo." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "volume_type inválido transmitido: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"volume_type inválido fornecido: %s (o tipo solicitado não é compatível; " -"corresponda o volume de origem ou omita o argumento de tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"volume_type inválido fornecido: %s (o tipo solicitado não é compatível; " -"recomendar omitir o argumento de tipo)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"volume_type inválido fornecido: %s (o tipo solicitado deve ser suportado por " -"este grupo de consistências)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Formato inválido de wwpns %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "A chamada do serviço da web falhou." - -msgid "Issue encountered waiting for job." -msgstr "Emita espera encontrada para a tarefa." - -msgid "Issue encountered waiting for synchronization." -msgstr "Emita encontrado aguardando a sincronização." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Falha ao emitir um failover porque a replicação não está configurada " -"corretamente." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "" -"ID da tarefa não localizado na resposta [%s] do volume de criação do " -"CloudByte." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "" -"ID da tarefa não localizado na resposta de exclusão de volume [%s] do " -"CloudByte." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Nomes de chaves só podem conter caracteres alfanuméricos, sublinhados, " -"pontos, vírgulas e hifens." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "" -"O Keystone versão 3 ou maior deve ser usado para obter o suporte de cota " -"aninhado." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "O LU não existe para o volume: %s" - -msgid "LUN export failed!" -msgstr "Falha ao exportar LUN!" - -msgid "LUN map overflow on every channel." -msgstr "Estouro do mapa de LUN em todos os canais." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN não localizado com ref %s dada." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "O número do LUN está fora do limite no ID de canal: %(ch_id)s." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Últimas %s entradas syslog do cinder:-" - -msgid "LeftHand cluster not found" -msgstr "cluster LeftHand não localizado" - -msgid "License is unavailable." -msgstr "A licença está indisponível." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Linha %(dis)d: %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Caminho do link já existe e não é um symlink" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Clonar vinculado do volume de origem não suportado no estado: %s." - -msgid "Lock acquisition failed." -msgstr "A aquisição de bloqueio falhou." - -msgid "Logout session error." -msgstr "Erro de sessão de logout." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Serviço de consulta não configurado. Opção de configuração para " -"fc_san_lookup_service é necessária especificar uma implementação concreta do " -"serviço de consulta." - -msgid "Lun migration error." -msgstr "Erro de migração de lun." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"O MD5 do objeto: %(object_name)s antes: %(md5)s e depois: %(etag)s não é o " -"mesmo." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Sequência de saída de fcns malformada: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Corpo da mensagem malformado: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Sequência de servidor de nomes mal formada: %s" - -msgid "Malformed request body" -msgstr "Corpo da solicitação malformado" - -msgid "Malformed request body." -msgstr "Corpo da solicitação malformado." - -msgid "Malformed request url" -msgstr "URL da solicitação malformada" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Resposta malformada para o comando %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Atributo scheduler_hints malformado" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Sequência de demonstração do banco de dados fcns malformada: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Configuração de zona mal formada: (switch=%(switch)s zone_config=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Status da zona malformado: (switch=%(switch)s zone_config=%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Gerenciar o tamanho da obtenção existente requer 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "" -"O gerenciamento de captura instantânea existente não está implementado." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Falha ao gerenciar volume existente devido a uma referência de backend " -"inválido %(existing_ref)s: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Falha ao gerenciar volume existente devido a incompatibilidade de tipo de " -"volume: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Gerenciar volume existente não implementado." - -msgid "Manage existing volume requires 'source-id'." -msgstr "Gerenciar volume existente requer 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"O gerenciamento de volume não será suportado se FAST estiver ativado. " -"Política FAST: %(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"Não é permitido gerenciar capturas instantâneas para volumes com failover " -"executado. " - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "" -"As informações do mapa são Nenhum porque a versão da matriz não suporta " -"hypermetro." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"Preparar mapeamento de %(id)s falhou ao concluir dentro de theallotted " -"%(to)d segundos atribuído. Finalizando." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "" -"A visualização de mascaramento %(maskingViewName)s não foi excluída com " -"sucesso" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Número máximo de backups permitidos (%(allowed)d) excedido" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "" -"Número máximo de capturas instantâneas permitido (%(allowed)d) excedido" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Número máximo de volumes permitido (%(allowed)d) excedido para a cota " -"'%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "Só é possível especificar um de %s" - -msgid "Metadata backup already exists for this volume" -msgstr "Backup de metadados já existe para esse volume" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "Objeto de backup de metadados '%s' já existe" - -msgid "Metadata property key blank." -msgstr "A chave da propriedade de metadados está em branco." - -msgid "Metadata restore failed due to incompatible version" -msgstr "Restauração de metadados falhou devido à versão incompatível" - -msgid "Metadata restore failed due to incompatible version." -msgstr "A restauração de metadados falhou devido à versão incompatível." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Módulo python 'purestorage' ausente, assegure-se de que a biblioteca esteja " -"instalada e disponível." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "" -"Parâmetro de configuração de SAN Fibre Channel ausente - fc_fabric_names" - -msgid "Missing request body" -msgstr "Corpo da solicitação ausente" - -msgid "Missing request body." -msgstr "Corpo da solicitação ausente." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "Elemento obrigatório '%s' ausente no corpo da solicitação" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "Elemento obrigatório '%s' ausente no corpo da solicitação." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "Elemento requerido ausente 'consistencygroup' no corpo da solicitação." - -msgid "Missing required element quota_class_set in request body." -msgstr "Faltando elemento obrigatório quota_class_set no corpo da requisição." - -msgid "Missing required element snapshot in request body." -msgstr "" -"Captura instantânea de elemento requerido ausente no corpo da solicitação." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Diversos SerialNumbers localizados, quando somente um era esperado para esta " -"operação. Mude o arquivo de configuração do EMC." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Várias cópias do volume %s localizadas." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Várias correspondências localizadas para '%s', use um ID para ser mais " -"específico." - -msgid "Multiple profiles found." -msgstr "Vários perfis localizados." - -msgid "Must implement a fallback schedule" -msgstr "Deve implementar um planejamento de fallback" - -msgid "Must implement find_retype_host" -msgstr "Deve implementar find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "Deve implementar host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "Deve implementar schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "Deve implementar schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "Deve implementar schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "É necessário passar wwpn ou host para lsfabric." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Deve-se executar esse comando como um administrador em nuvem usando policy." -"json do Keystone, que permite que o administrador em nuvem liste e obtenha " -"qualquer projeto. " - -msgid "Must specify 'connector'" -msgstr "Deve especificar 'conector'" - -msgid "Must specify 'connector'." -msgstr "Deve especificar 'connector'." - -msgid "Must specify 'host'." -msgstr "Deve especificar 'host'." - -msgid "Must specify 'new_volume'" -msgstr "Deve especificar 'new_volume'" - -msgid "Must specify 'status'" -msgstr "Deve especificar 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Deve especificar 'status', ' attach_status' ou 'migration_status' para " -"atualização." - -msgid "Must specify a valid attach status" -msgstr "Deve especificar um status de anexo válido" - -msgid "Must specify a valid migration status" -msgstr "Deve especificar um status de migração válido" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Deve especificar uma pessoa válida %(valid)s, o valor '%(persona)s' é " -"inválido." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Deve especificar um tipo de fornecimento válido %(valid)s, o valor " -"'%(prov)s' é inválido." - -msgid "Must specify a valid status" -msgstr "Deve especificar um status válido" - -msgid "Must specify an ExtensionManager class" -msgstr "Deve especificar uma classe ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "Deve especificar inicializável na solicitação." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Deve especificar o nome ou o ID do domínio de proteção." - -msgid "Must specify readonly in request." -msgstr "Deve especificar somente leitura na solicitação." - -msgid "Must specify snapshot source-name or source-id." -msgstr "" -"Deve-se especificar o source-name ou o source-id da captura instantânea." - -msgid "Must specify source-name or source-id." -msgstr "Deve-se especificar o elemento source-name ou source-id." - -msgid "Must specify storage pool name or id." -msgstr "Deve especificar um nome ou ID do conjunto de armazenamentos." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "" -"Deve-se especificar conjuntos de armazenamentos. Opções: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Deve fornecer um número positivo, diferente de zero para a idade" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"Configuração de NAS ‘%(name)s=%(value)s' inválida. Deve ser ‘auto', 'true’ " -"ou 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "O arquivo de configuração do NFS em %(config)s não existe" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "Arquivo NFS %s não descoberto." - -msgid "NFS file could not be discovered." -msgstr "O arquivo NFS não pôde ser descoberto." - -msgid "NaElement name cannot be null." -msgstr "O nome NaElement não pode ser nulo. " - -msgid "Name" -msgstr "Nome" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Nome, descrição, add_volumes e remove_volumes não podem estar todos vazios " -"no corpo da solicitação." - -msgid "Need non-zero volume size" -msgstr "Necessário tamanho do volume diferente de zero" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Nem MSG_DENIED nem MSG_ACCEPTED: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Exceção no driver NetApp Cinder." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"Novo tamanho a ser estendido deve ser maior que o tamanho atual. (atual: " -"%(size)s, estendido: %(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"O novo tamanho deve ser maior que o tamanho real a partir do armazenamento " -"de backend. realsize: %(oldsize)s, newsize: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "Novo tamanho do volume deve ser especificado como um número inteiro." - -msgid "New volume type must be specified." -msgstr "Novo tipo de volume deve ser especificado." - -msgid "New volume type not specified in request_spec." -msgstr "Tipo de volume novo não especificado em request_spec." - -msgid "Nimble Cinder Driver exception" -msgstr "Exceção Nimble Cinder Driver" - -msgid "No FC initiator can be added to host." -msgstr "Nenhum iniciador de FC pode ser incluído no host. " - -msgid "No FC port connected to fabric." -msgstr "Nenhuma porta FC conectada à malha." - -msgid "No FCP targets found" -msgstr "Nenhum destino do FCP localizado" - -msgid "No Port Group elements found in config file." -msgstr "" -"Nenhum elemento de Grupo de Portas localizado no arquivo de configuração." - -msgid "No VF ID is defined in the configuration file." -msgstr "Nenhum ID de VF está definido no arquivo de configuração." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Nenhum portal iSCSI ativo com IPs de iSCSI fornecidos" - -#, python-format -msgid "No available service named %s" -msgstr "Nenhum serviço disponível denominado %s" - -#, python-format -msgid "No backup with id %s" -msgstr "Nenhum backup com o ID %s" - -msgid "No backups available to do an incremental backup." -msgstr "Não há backups disponíveis para fazer um backup incremental." - -msgid "No big enough free disk" -msgstr "Disco livre não é grande o suficiente" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Nenhuma cgsnapshot com o ID %s" - -msgid "No cinder entries in syslog!" -msgstr "Nenhuma entrada do cinder no syslog!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Nenhum LUN clonado denominado %s foi localizado no arquivador" - -msgid "No config node found." -msgstr "Nenhum nó de configuração localizado." - -#, python-format -msgid "No consistency group with id %s" -msgstr "Nenhum grupo de consistências com o ID %s" - -#, python-format -msgid "No element by given name %s." -msgstr "Nenhum elemento pelo nome fornecido %s." - -msgid "No errors in logfiles!" -msgstr "Sem erros nos arquivos de log!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "Nenhum arquivo localizado com %s como arquivo auxiliar." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Não resta nenhum ID de LUN. O número máximo de volumes que pode ser anexado " -"ao host (%s) foi excedido." - -msgid "No free disk" -msgstr "Nenhum disco livre" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Nenhum portal de iscsi bom localizado na lista fornecida para %s." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Nenhum portal de iscsi bom localizado para %s." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Nenhum host para criar o grupo de consistências %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "Nenhuma porta ativada para iSCSI na matriz de destino." - -msgid "No image_name was specified in request." -msgstr "Nenhum image_name foi especificado na solicitação." - -msgid "No initiator connected to fabric." -msgstr "Nenhum iniciador conectado à malha." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Nenhum grupo de inicializador localizado para o inicializador %s" - -msgid "No initiators found, cannot proceed" -msgstr "Nenhum inicializador localizado, não é possível continuar" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "Nenhuma interface localizada no cluster para o IP %s" - -msgid "No ip address found." -msgstr "Nenhum endereço IP localizado." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "Nenhum grupo de autenticação iscsi foi localizado no CloudByte." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "Nenhum inicializador iscsi foi localizado no CloudByte." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Nenhum serviço de iscsi localizado para o volume do CloudByte [%s]." - -msgid "No iscsi services found in CloudByte storage." -msgstr "Nenhum serviço de iscsi localizado no armazenamento CloudByte." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"Nenhum arquivo-chave especificado e incapaz de carregar a chave a partir de " -"%(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Nenhum compartilhamento de Gluster montado foi localizado" - -msgid "No mounted NFS shares found" -msgstr "Nenhum compartilhamento NFS montado foi localizado" - -msgid "No mounted SMBFS shares found." -msgstr "Nenhum compartilhamento SMBFS montado foi localizado." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "" -"Nenhum compartilhamento de armazenamento Virtuozzo montado foi localizado" - -msgid "No mounted shares found" -msgstr "Nenhum compartilhamento montado foi localizado" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"Nenhum nó foi localizado no grupo de E/S %(gid)s para o volume %(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Nenhum conjunto está disponível para volumes de fornecimento. Assegure-se de " -"que a opção de configuração netapp_pool_name_search_pattern esteja definida " -"corretamente." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Nenhuma resposta foi recebida da API do usuário de autenticação iSCSI da " -"lista de armazenamento do CloudByte chamada." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "" -"Nenhuma resposta foi recebida da chamada API de tsm da lista de " -"armazenamento CloudByte." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "" -"Nenhuma resposta foi recebida da chamada api do sistema de arquivos da lista " -"do CloudByte." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "Nenhum VIP de serviço configurado e nenhum nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Nenhum snap localizado com %s como arquivo auxiliar." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "" -"Nenhuma imagem de captura instantânea localizada no grupo de capturas " -"instantâneas %s." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "Nenhuma captura instantânea pôde ser localizada no volume %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Nenhuma captura instantânea de origem fornecida para criar o grupo de " -"consistências %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "" -"Nenhum caminho do armazenamento localizado para o caminho de exportação %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Sem spec QoS %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "Nenhum IP de descoberta adequado foi localizado" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "Não há suporte para restaurar a versão de backup %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Nenhum ID de destino localizado para o volume %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"Nenhum ID de LUN não utilizado está disponível no host; a anexação múltipla " -"está ativada, o que requer que todos os IDs de LUN sejam exclusivos em todo " -"o grupo de hosts." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Nenhum host válido localizado. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Nenhum host válido para o volume %(id)s com tipo %(type)s" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "Nenhum vdisk com o UID especificado pela referência %s." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "Nenhuma visualização localizada para a LUN: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"Nenhum volume no cluster com vserver %(vserver)s e caminho de junção " -"%(junction)s " - -msgid "No volume service(s) started successfully, terminating." -msgstr "Nenhum serviço de volume iniciado com êxito; finalizando." - -msgid "No volume was found at CloudByte storage." -msgstr "Nenhum volume foi localizado no armazenamento CloudByte." - -msgid "No volume_type should be provided when creating test replica." -msgstr "Nenhum volume_type deve ser fornecido ao criar a réplica de teste." - -msgid "No volumes found in CloudByte storage." -msgstr "Nenhum volume localizado no armazenamento CloudByte." - -msgid "No weighed hosts available" -msgstr "Nenhum host ponderado disponível" - -#, python-format -msgid "Not a valid string: %s" -msgstr "Não é uma sequência válida: %s" - -msgid "Not a valid value for NaElement." -msgstr "Nenhum valor válido para NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "Não foi possível encontrar um datastore adequado para o volume: %s." - -msgid "Not an rbd snapshot" -msgstr "Não uma captura instantânea de rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Não autorizado para a imagem %(image_id)s." - -msgid "Not authorized." -msgstr "Não autorizado." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Não há espaço suficiente no backend (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"Não há espaço de armazenamento suficiente no compartilhamento do ZFS para " -"executar essa operação." - -msgid "Not stored in rbd" -msgstr "Não armazenado em rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "Nova retornou o status \"erro\" ao criar a captura instantânea." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "Resposta nula recebida do sistema de arquivos da lista de CloudByte." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "" -"Resposta nula recebida dos grupos de autenticação iscsi da lista do " -"CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "" -"Resposta nula recebida de inicializadores de iscsi da lista do CloudByte." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"Resposta nula recebida do serviço de iscsi do volume da lista do CloudByte." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"Resposta nula recebida ao criar volume [%s] no armazenamento CloudByte." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "" -"Resposta nula recebida ao excluir o volume [%s] no armazenamento do " -"CloudByte." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Resposta nula recebida ao consultar a tarefa baseada em [%(operation)s] " -"[%(job)s] no armazenamento CloudByte." - -msgid "Object Count" -msgstr "Contagem de Objetos" - -msgid "Object Version" -msgstr "Versão do Objeto" - -msgid "Object is not a NetApp LUN." -msgstr "O objeto não é um LUN de NetApp." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Em uma operação Estender, o erro ao incluir o volume para compor o volume " -"%(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"Uma das entradas necessárias do host, porta ou esquema não foi localizada." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Apenas solicitações %(value)s %(verb)s podem ser feitas ao %(uri)s a cada " -"%(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "Somente um limite pode ser configurado em uma especificação de QoS." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Somente usuários com escopo do token definido para pais imediatos ou " -"projetos raiz têm permissão para ver suas cotas filhas." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "Somente volumes gerenciados pelo OpenStack podem ser não gerenciados." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "A operação falhou com o status=%(status)s. Dump completo: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Operação não suportada: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "A opção gpfs_images_dir não está configurada corretamente." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "A opção gpfs_images_share_mode não está configurada corretamente." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "A opção gpfs_mount_point_base não está configurada corretamente." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "A origem de %(res)s %(prop)s deve ser um dos valores '%(vals)s'" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "" -"O nome da partição é Nenhum; configure smartpartition:partitionname na chave." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"A senha ou a chave privada SSH é requerida para autenticação: configure " -"opção san_password ou san_private_key." - -msgid "Path to REST server's certificate must be specified." -msgstr "O caminho para o certificado do servidor REST deve ser especificado." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Crie o conjunto %(pool_list)s antecipadamente!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Crie antecipadamente a camada %(tier_levels)s no conjunto %(pool)s!" - -msgid "Please specify a name for QoS specs." -msgstr "Por favor, especifique o nome para as especificações QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "A política não permite que %(action)s sejam executadas." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Pool %(poolNameInStr)s não foi encontrado." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "O conjunto %s não existe no dispositivo Nexenta Store." - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Conjunto do volume ['host'] %(host)s não localizado." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "O conjunto do volume ['host'] falhou com: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "O conjunto não está disponível no campo do host de volume." - -msgid "Pool is not available in the volume host fields." -msgstr "O conjunto não está disponível nos campos do host de volume." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "Conjunto com o nome %(pool)s não foi localizado no domínio %(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "" -"O conjunto com o nome %(pool_name)s não foi localizado no domínio " -"%(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Conjunto: %(poolName)s. não está associado à camada de armazenamento para a " -"política fast %(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName deve estar no arquivo %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Conjuntos %s não existem" - -msgid "Pools name is not set." -msgstr "O nome dos conjuntos não está configurado." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Status da cópia primária: %(status)s e sincronizada: %(sync)s." - -msgid "Project ID" -msgstr "ID do Projeto" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "" -"As Cotas de Projeto não estão configuradas corretamente para as cotas " -"aninhadas: %(reason)s." - -msgid "Protection Group not ready." -msgstr "Grupo de Proteção não pronto." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Protocolo %(storage_protocol)s não é suportado para a família de " -"armazenamento %(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "Registro de backup fornecido tem um ID ausente" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Fornecido um status de captura instantânea %(provided)s não permitido para " -"captura instantânea com status %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Informações do provedor no armazenamento w.r.t CloudByte não foram " -"localizadas para o volume [%s] do OpenStack." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Falha no driver de Pure Storage do Cinder: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Especificações QoS %(specs_id)s já existem." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Especificações QoS %(specs_id)s ainda estão associadas com entidades." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "A configuração de QoS está errada. %s deve ser > 0." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"A política de QoS deve ser especificada para o IOTYPE e para outras " -"qos_specs, política de QoS: %(qos_policy)s." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"A política de QoS deve ser especificada para IOTYPE: 0, 1 ou 2, política de " -"QoS: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"Conflito de upper_limit e lower_limit da política do QoS, política do QoS: " -"%(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "Especificação QoS %(specs_id)s não tem spec com chave %(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Especificações de QoS não são suportadas nesta família de armazenamento e " -"versão de ONTAP." - -msgid "Qos specs still in use." -msgstr "Qos specs ainda em uso." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"Parâmetro de consulta por serviço está depreciado. Por favor, use um " -"parâmetro binário no lugar." - -msgid "Query resource pool error." -msgstr "Erro ao consultar conjunto de recursos." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "" -"O limite de cota %s deve ser igual ou maior que os recursos existentes." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "A classe da cota %(class_name)s não pôde ser localizada." - -msgid "Quota could not be found" -msgstr "A cota não pôde ser localizada" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Cota excedida para os recursos: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Quota excedida: codigo=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "A cota para o projeto %(project_id)s não pôde ser localizada." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Limite de cota inválido para o projeto '%(proj)s'para o recurso '%(res)s': O " -"limite de %(limit)d é menor que o valor em uso de %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "A reserva da cota %(uuid)s não pôde ser localizada." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "O uso da cota para o projeto %(project_id)s não pôde ser localizado." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "Op de Dif de RBD falhou – (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "O IP do servidor REST deve ser especificado." - -msgid "REST server password must by specified." -msgstr "A senha do servidor REST deve ser especificada." - -msgid "REST server username must by specified." -msgstr "O nome do usuário do servidor REST deve ser especificado." - -msgid "RPC Version" -msgstr "Versão do RPC" - -msgid "RPC server response is incomplete" -msgstr "A resposta do servidor RPC está incompleta" - -msgid "Raid did not have MCS Channel." -msgstr "O RAID não tinha o Canal MCS." - -#, python-format -msgid "Received error string: %s" -msgstr "Sequência de erros recebida: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "A referência deve ser para uma captura instantânea não gerenciada." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "A referência deve ser para um volume virtual não gerenciado." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "A referência deve o nome de uma captura instantânea não gerenciada." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "" -"A referência deve ser o nome do volume de um volume virtual não gerenciado." - -msgid "Reference must contain either source-name or source-id element." -msgstr "A referência deve conter um elemento source-id ou source-name." - -msgid "Reference must contain source-id or source-name element." -msgstr "A referência deve conter um elemento source-id ou source-name." - -msgid "Reference must contain source-id or source-name key." -msgstr "A referência deve conter a chave source-id ou source-name." - -msgid "Reference must contain source-id or source-name." -msgstr "A referência deve conter source-id ou source-name." - -msgid "Reference must contain source-id." -msgstr "A referência deve conter o source-id." - -msgid "Reference must contain source-name element." -msgstr "A referência deve conter o elemento de nome de origem." - -msgid "Reference must contain source-name or source-id." -msgstr "A referência deve conter source-name ou source-id." - -msgid "Reference must contain source-name." -msgstr "A referência deve conter o nome de origem." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "A referência ao volume a ser gerenciado deve conter o source-name." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "A referência ao volume: %s a ser gerenciado deve conter o source-name." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Recusando migrar o ID do volume: %(id)s. Verifique sua configuração porque a " -"origem e o destino são o mesmo Grupo de Volume: %(name)s." - -msgid "Remote pool cannot be found." -msgstr "O conjunto remoto não pode ser localizado." - -msgid "Remove CHAP error." -msgstr "Erro ao remover CHAP." - -msgid "Remove fc from host error." -msgstr "Erro ao remover FC do host." - -msgid "Remove host from array error." -msgstr "Erro ao remover host da matriz." - -msgid "Remove host from hostgroup error." -msgstr "Erro ao remover host do grupo de hosts." - -msgid "Remove iscsi from host error." -msgstr "Erro ao remover iscsi do host." - -msgid "Remove lun from QoS error." -msgstr "Erro ao remover LUN do QoS." - -msgid "Remove lun from cache error." -msgstr "Erro ao remover LUN do cache." - -msgid "Remove lun from partition error." -msgstr "Erro ao remover LUN da partição." - -msgid "Remove port from port group error." -msgstr "Erro ao remover porta no grupo de portas." - -msgid "Remove volume export failed." -msgstr "Falha ao remover exportação de volume." - -msgid "Rename lun on array error." -msgstr "Erro ao renomear LUN na matriz." - -msgid "Rename snapshot on array error." -msgstr "Erro ao renomear a captura instantânea na matriz." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "A replicação de %(name)s para %(ssn)s falhou." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "" -"Recurso de serviço de replicação não localizado em %(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "O Serviço de replicação não foi localizado em %(storageSystemName)s." - -msgid "Replication not allowed yet." -msgstr "Replicação ainda não permitida." - -msgid "Request body and URI mismatch" -msgstr "Corpo da solicitação e incompatibilidade de URI" - -msgid "Request body contains too many items" -msgstr "O corpo da solicitação contém excesso de itens" - -msgid "Request body contains too many items." -msgstr "Corpo da requisição contém itens demais." - -msgid "Request body empty" -msgstr "Corpo da solicitação vazio" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"A solicitação ao cluster Datera retornou o status inválido: %(status)s | " -"%(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"O backup solicitado excede a cota permitida de gigabytes de Backup. " -"Solicitados %(requested)sG, a cota é %(quota)sG e %(consumed)sG foi " -"consumido." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"O volume solicitado ou a captura instantânea excede a cota %(name)s " -"permitida. Solicitados %(requested)sG, a cota é %(quota)sG e %(consumed)sG " -"foi consumido." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"O tamanho do volume solicitado %(size)d é maior que o limite máximo " -"permitido %(limit)d." - -msgid "Required configuration not found" -msgstr "Configuração necessária não localizada" - -#, python-format -msgid "Required flag %s is not set" -msgstr "A sinalização %s necessária não está configurada" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Reconfiguração do status do backup interrompida, o serviço de backup " -"atualmente configurado [%(configured_service)s] não é o serviço de backup " -"que foi usado para criar esse backup [%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Falha ao redimensionar clone %s." - -msgid "Resizing image file failed." -msgstr "O redimensionamento do arquivo de imagem falhou." - -msgid "Resource could not be found." -msgstr "O recurso não pôde ser localizado." - -msgid "Resource not ready." -msgstr "O recurso não está pronto." - -#, python-format -msgid "Response error - %s." -msgstr "Erro de resposta - %s." - -msgid "Response error - The storage-system is offline." -msgstr "Erro de resposta - O sistema de armazenamento está off-line." - -#, python-format -msgid "Response error code - %s." -msgstr "Código de erro de resposta – %s." - -msgid "RestURL is not configured." -msgstr "RestURL não está configurado." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Restauração de backup interrompida, esperava-se o status de volume " -"%(expected_status)s, mas obteve %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Restauração de backup interrompida, o serviço de backup atualmente " -"configurado [%(configured_service)s] não é o serviço de backup que foi usado " -"para criar esse backup [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Restauração de backup interrompida: esperava-se o status de backup " -"%(expected_status)s mas obteve %(actual_status)s." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Recuperada uma quantia diferente de volumes SolidFire para as capturas " -"instantâneas Cinder fornecidas. Recuperados: %(ret)s, Desejados: %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Recuperada uma quantia diferente de volumes SolidFire para os volumes Cinder " -"fornecidos. Recuperados: %(ret)s, Desejados: %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Contagem de novas tentativas excedida para o comando: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Exceção do Retryable SolidFire encontrada" - -msgid "Retype requires migration but is not allowed." -msgstr "Digitar novamente requer migração mas não é permitido." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Retroceder %(volumeName)s excluindo-o." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "" -"Não é permitido executar o Cinder com uma versão do VMware vCenter inferior " -"a %s." - -msgid "SAN product is not configured." -msgstr "O produto SAN não está configurado." - -msgid "SAN protocol is not configured." -msgstr "O protocolo SAN não está configurado." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "Configuração SMBFS 'smbfs_oversub_ratio' inválida. Deve ser > 0: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"Configuração SMBFS 'smbfs_used_ratio' inválida. Deve ser > 0 e <= 1,0: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "O arquivo de configuração SMBFS em %(config)s não existe." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "Arquivo de configuração SMBFS não definido (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "Comando SSH falhou após '%(total_attempts)r' tentativas: '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Injeção de comando SSH detectada: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "Conexão SSH falhou para %(fabric)s com erro: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "Certificado SSL expirado em %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Erro de SSL: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "" -"O Filtro do Host do Planejador %(filter_name)s não pôde ser localizado." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "" -"O Ponderador %(weigher_name)s do Host do Planejador não pôde ser localizado." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Status da cópia secundária: %(status)s e sincronizada: %(sync)s, o progresso " -"de sincronização é: %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"O ID secundário não pode ser igual à matriz primária, backend_id = " -"%(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber deve estar no arquivo %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Serviço %(service)s no host %(host)s removido." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "O serviço %(service_id)s não pôde ser localizado no host %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "O serviço %(service_id)s não pôde ser localizado." - -msgid "Service is too old to fulfil this request." -msgstr "O serviço é muito antigo para preencher essa solicitação." - -msgid "Service is unavailable at this time." -msgstr "O serviço está indisponível neste momento." - -msgid "Set pair secondary access error." -msgstr "Erro de configuração de acesso secundário do par." - -msgid "Sets thin provisioning." -msgstr "Configura thin provisioning." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"Configurar o grupo de política de LUN QoS não é suportado nesta família de " -"armazenamento e versão de ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Configurar o grupo de política de arquivo qos não é suportado nesta família " -"de armazenamento e versão de ONTAP." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"O compartilhamento em %(dir)s não pode ser gravado pelo serviço de volume " -"Cinder. As operações de captura instantânea não serão suportadas." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Erro de E/S de sheepdog, o comando foi: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"As operações de exibição podem ser feitas somente para projetos na mesma " -"hierarquia do projeto no qual os usuários estão com escopo definido." - -msgid "Size" -msgstr "Tamanho" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "Tamanho do volume: %s não localizado, não é seguro excluir." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"O tamanho é de %(image_size)dGB e não se ajusta em um volume de tamanho de " -"%(volume_size)dGB." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"Tamanho de imagem especificada %(image_size)sGB é maior que o tamanho do " -"volume %(volume_size)sGB." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"A captura instantânea %(id)s foi solicitada para ser excluída enquanto " -"aguardava para tornar-se disponível. Uma solicitação simultânea pode ter " -"sido feita." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"A captura instantânea %(id)s foi localizada no estado %(state)s em vez de " -"'excluída' durante exclusão em cascata." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "A captura instantânea %(snapshot_id)s não pôde ser localizada." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"A captura instantânea %(snapshot_id)s não tem metadados com a chave " -"%(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "A captura instantânea '%s' não existe na matriz." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"A captura instantânea não pode ser criada porque o volume %(vol_id)s não " -"está disponível, status atual do volume: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "" -"A captura instantânea não pode ser criada enquanto o volume está migrando." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "A captura instantânea da réplica secundária não é permitida." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Captura instantânea do volume não suportada no estado: %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "" -"Captura instantânea res \"%s\" que não é implementada em qualquer lugar." - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"Status de captura instantânea %(cur)s não permitido para " -"update_snapshot_status" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "O status da captura instantânea deve ser \"disponível\" para clonar." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"A captura instantânea a ser feito o backup deve estar disponível, mas o " -"status atual é \"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "A captura instantânea com ID %s não pôde ser localizada." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Captura instantânea='%(snap)s' não existe na imagem base='%(base)s' - " -"interrompendo backup incremental" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "" -"As capturas instantâneas não são suportadas para este formato de volume: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Erro de soquete: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Exceção no driver SolidFire Cinder" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "" -"O tamanho da matriz de direção de classificação excede o tamanho da matriz " -"de chave de classificação." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "CG de origem está vazio. Nenhum grupo de consistências será criado." - -msgid "Source host details not found." -msgstr "Detalhes do host de origem não localizados." - -msgid "Source volume device ID is required." -msgstr "ID do dispositivo de volume de origem é necessário." - -msgid "Source volume not mid-migration." -msgstr "Volume de origem não de migração intermediária." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo retornou byarray é inválido" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"O host especificado a ser mapeado para o volume %(vol)s está no grupo de " -"hosts não suportado com %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "O volume lógico especificado não existe." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "" -"O grupo de capturas instantâneas especificado com o IDo %s não pôde ser " -"localizado." - -msgid "Specify a password or private_key" -msgstr "Especifique uma senha ou private_key" - -msgid "Specify san_password or san_private_key" -msgstr "Especifique san_password ou san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Especifique um nome de tipo de volume , a descrição, is_public ou uma " -"combinação deles." - -msgid "Split pair error." -msgstr "Erro ao dividir par." - -msgid "Split replication failed." -msgstr "Falha ao dividir replicação." - -msgid "Start LUNcopy error." -msgstr "Erro ao iniciar LUNcopy." - -msgid "State" -msgstr "Estado" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "O estado do nó está errado. O estado atual é %s." - -msgid "Status" -msgstr "Status" - -msgid "Stop snapshot error." -msgstr "Erro ao parar captura instantânea." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "" -"O Serviço de configuração de armazenamento não foi localizado em " -"%(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"O Serviço mgmt de HardwareId de armazenamento não foi localizado em " -"%(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Perfil de armazenamento %s não localizado." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"O Serviço de realocação de armazenamento não foi localizado em " -"%(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "Família de armazenamento %s não é suportada." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "" -"O grupo de armazenamentos %(storageGroupName)s não foi excluído com sucesso" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Host de armazenamento %(svr)s não detectado; verifique o nome" - -msgid "Storage pool is not configured." -msgstr "O conjunto de armazenamento não está configurado." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Perfil de armazenamento: %(storage_profile)s não encontrado." - -msgid "Storage resource could not be found." -msgstr "Recurso de armazenamento não pôde ser encontrado." - -msgid "Storage system id not set." -msgstr "ID do sistema de armazenamento não configurado." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "Sistema de armazenamento não encontrado para pool %(poolNameInStr)s." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "StorageSystem %(array)s não foi encontrado." - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"A soma de uso filho '%(sum)s' é maior que a cota livre de '%(free)s' para o " -"projeto '%(proj)s' do recurso '%(res)s'. Diminua o limite ou o uso de um ou " -"mais dos projetos a seguir: '%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Erro ao executar switch over de par." - -msgid "Sync pair error." -msgstr "Erro de sincronização de par." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "" -"Sistema %(id)s localizado com um status de senha inválida - %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "Sistema %(id)s localizado com status inválido - %(status)s." - -msgid "System does not support compression." -msgstr "O sistema não suporta compactação." - -msgid "System is busy, retry operation." -msgstr "O sistema está ocupado, tente novamente a operação." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"O TSM [%(tsm)s] não foi localizado no armazenamento CloudByte para a conta " -"[%(account)s]." - -msgid "Target volume type is still in use." -msgstr "Tipo de volume de destino ainda está em uso." - -msgid "Terminate connection failed" -msgstr "Finalização da conexão com falha" - -msgid "Terminate connection unable to connect to backend." -msgstr "A finalização da conexão não pode conectar-se ao backend." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Falha ao finalizar a conexão de volume: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "A origem %(type)s %(id)s a ser replicada não foi localizada." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Os parâmetros 'sort_key' e 'sort_dir' foram descontinuados e não podem ser " -"usados com o parâmetro 'sort’." - -msgid "The EQL array has closed the connection." -msgstr "A matriz EQL fechou a conexão." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"O sistema de arquivos GPFS %(fs)s não está no nível da liberação necessário. " -"O nível atual é %(cur)s, deve ser pelo menos %(min)s." - -msgid "The IP Address was not found." -msgstr "o Endereço IP não foi localizado." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"A solicitação WebDAV falhou. Motivo: %(msg)s, Código de retorno/razão: " -"%(code)s, Volume de Origem: %(src)s, Volume de Destino: %(dst)s, Método: " -"%(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"O erro acima pode mostrar que o banco de dados não foi criado.\n" -"Crie um banco de dados usando ‘cinder-manage db sync’ antes de executar esse " -"comando." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"A matriz não suporta a configuração do conjunto de armazenamentos para o SLO " -"%(slo)s e carga de trabalho %(workload)s. Verifique se se há SLOs e cargas " -"de trabalho válidos na matriz." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "O backend no qual o volume é criado não possui a replicação ativada. " - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Comando %(cmd)s com falha. (ret: %(ret)s, saída padrão: %(out)s, erro " -"padrão: %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "A cópia deve ser primária ou secundária" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "" -"A criação de um dispositivo lógico não pôde ser concluída. (LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"O método decorado deve aceitar um volume ou um objeto de captura instantânea" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "O dispositivo no caminho %(path)s está indisponível: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"O horário de encerramento (%(end)s) deve ser posterior ao horário de início " -"(%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "O extraspec: %(extraspec)s não é válido." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "O volume com failover executado não pôde ser excluído: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "Os seguintes elementos são necessários: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "O grupo de hosts ou destino de iSCSI não pôde ser incluído." - -msgid "The host group or iSCSI target was not found." -msgstr "O grupo de hosts ou destino iSCSI não foi localizado." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"O host não está pronto para efetuar failback. Ressincronize os volumes e " -"continue a replicação nos backends 3PAR." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"O host não está pronto para efetuar failback. Ressincronize os volumes e " -"continue a replicação nos backends LEftHand." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"O host não está pronto para efetuar failback. Ressincronize os volumes e " -"continue a replicação nos backends Storwize." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "O usuário de CHAP iSCSI %(user)s não existe." - -msgid "The key cannot be None." -msgstr "A chave não pode ser Nenhum." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "" -"O dispositivo lógico para o %(type)s %(id)s especificado já foi excluído." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"O método %(method)s atingiu o tempo limite. (valor de tempo limite: " -"%(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "O método update_migrated_volume não está implementado." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"A montagem %(mount_path)s não é um volume Quobyte USP válido. Erro: %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "" -"O parâmetro do backend de armazenamento. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "O backup principal deve estar disponível para backup incremental." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "" -"A captura instantânea fornecida '%s' não é uma captura instantânea do volume " -"fornecido." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"A referência ao volume no backend deve ter o formato file_system/volume_name " -"(volume_name não pode conter '/')" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "A contagem de retenção remota deve ser %s ou menos." - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"O modo de replicação não foi configurado corretamente no tipo de volume " -"extra_specs. Se replication:mode for periódico, replication:sync_period " -"também deverá ser especificado e estar entre 300 e 31622400 segundos." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "" -"O período de sincronização de replicação deve ser pelo menos %s segundos." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"O tamanho solicitado : %(requestedSize)s não é o mesmo que o tamanho " -"resultante: %(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "O recurso %(resource)s não foi localizado." - -msgid "The results are invalid." -msgstr "Os resultados são inválidos." - -#, python-format -msgid "The retention count must be %s or less." -msgstr "A contagem de retenção deve ser %s ou menos." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"A captura instantânea não pode ser criada quando o volume está no modo de " -"manutenção." - -msgid "The source volume for this WebDAV operation not found." -msgstr "O volume de origem para esta operação WebDAV não foi localizado." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"O tipo de volume de origem '%(src)s' é diferente do tipo volume de destino " -"'%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "O tipo de volume de origem '%s' não está disponível." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "O %(desc)s especificado está ocupado." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "A LUN especificada não pertence ao conjunto fornecido: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"O ldev %(ldev)s especificado não pôde ser gerenciado. O ldev não deve ser de " -"mapeamento." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"O ldev %(ldev)s especificado não pôde ser gerenciado. O ldev não deve ser em " -"par." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"O ldev %(ldev)s especificado não pôde ser gerenciado. O tamanho do ldev deve " -"ser em múltiplos de gigabyte." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"O ldev %(ldev)s especificado não pôde ser gerenciado. O tipo de volume deve " -"ser DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"A operação especificada não é suportada. O tamanho do volume deve ser igual " -"ao %(type)s da origem. (volume: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "O vdisk especificado está mapeado para um host." - -msgid "The specified volume is mapped to a host." -msgstr "O volume especificado está mapeado para um host." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"A senha da matriz de armazenamento %s está incorreta, atualize a senha " -"configurada. " - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"O backend de armazenamento pode ser utilizado. (config_group: " -"%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"O dispositivo de armazenamento não suporta %(prot)s. Configure o dispositivo " -"para suportar %(prot)s ou alterne para um driver usando um protocolo " -"diferente." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"A contagem dividida de metadados de %(memberCount)s é muito pequena para o " -"volume: %(volumeName)s, com o tamanho %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"O tipo de metadados: %(metadata_type)s para volume/captura instantânea " -"%(id)s é inválido." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"O volume %(volume_id)s não pôde ser estendido. O tipo de volume deve ser " -"Normal." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"O volume %(volume_id)s não pôde ter o gerenciamento cancelado. O tipo de " -"volume deve ser %(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "O volume %(volume_id)s é gerenciado com êxito. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"O volume %(volume_id)s teve o gerenciamento cancelado com êxito. (LDEV: " -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "O volume %(volume_id)s a ser mapeado não foi localizado." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "O volume não pode aceitar a transferência no modo de manutenção." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "O volume não pode ser conectado no modo de manutenção." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "O volume não pode ser removido no modo de manutenção." - -msgid "The volume cannot be updated during maintenance." -msgstr "O volume não pode ser atualizado durante a manutenção." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "A conexão do volume não pode ser inicializada no modo de manutenção." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "O driver do volume requer o nome do inicializador iSCSI no conector." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"O volume está atualmente ocupado no 3PAR e não pode ser excluído neste " -"momento. É possível tentar novamente mais tarde." - -msgid "The volume label is required as input." -msgstr "O rótulo de volume é necessário como entrada." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "Não há recursos disponíveis para uso. (recurso: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Não há hosts ESX válidos." - -msgid "There are no valid datastores." -msgstr "Não há nenhum datastore válido." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Não há designação dos %(param)s. O armazenamento especificado é essencial " -"para gerenciar o volume." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Não há designação do ldev. O ldev especificado é essencial para gerenciar o " -"volume." - -msgid "There is no metadata in DB object." -msgstr "Não há metadados no objeto do BD." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "Não há compartilhamento que possa hospedar %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "Não há nenhum compartilhamento que possa armazenar %(volume_size)sG." - -#, python-format -msgid "There is no such action: %s" -msgstr "Essa ação não existe: %s" - -msgid "There is no virtual disk device." -msgstr "Não há nenhum dispositivo de disco virtual." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "Ocorreu um erro ao incluir o volume no grupo de cópias remotas: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "Ocorreu um erro ao criar o cgsnapshot %s." - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "Ocorreu um erro ao criar o grupo de cópias remotas: %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Ocorreu um erro ao configurar o período de sincronização para o grupo de " -"cópias remotas: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Ocorreu um erro ao configurar um grupo de cópias remotas nas matrizes 3PAR:" -"('%s'). O volume não será reconhecido com um tipo de replicação." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Ocorreu um erro ao configurar um planejamento remoto nas matrizes " -"LeftHand('%s'). O volume não será reconhecido com um tipo de replicação." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Ocorreu um erro ao iniciar a cópia remota: %s" - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Não há nenhum arquivo de configuração do Gluster configurado (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Não há nenhum arquivo de configuração do NFS configurado (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Não há volume Quobyte configurado (%s). Exemplo: quobyte: ///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "Thin provisioning não suportado nesta versão do LVM." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "" -"Este driver não oferece suporte à exclusão de capturas instantâneas em uso." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Este driver não oferece suporte a capturas instantâneas de volumes em uso." - -msgid "This request was rate-limited." -msgstr "Essa solicitação estava limitada a taxa." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Essa plataforma do sistema (%s) não é suportada. Esse driver oferece suporte " -"somente a plataformas Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "" -"O Serviço da política de camada não foi localizado para " -"%(storageSystemName)s." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Tempo limite atingido ao aguardar atualização de Nova para criação de " -"captura instantânea %s." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Tempo limite atingido ao aguardar atualização de Nova para exclusão de " -"captura instantânea %(id)s." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Tempo limite ao chamar %s." - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Tempo limite ao solicitar %(service)s da API." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "Tempo limite ao solicitar do backend %(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Transferência %(transfer_id)s não pôde ser encontrada." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Transferência %(transfer_id)s: ID do volume %(volume_id)s em estado " -"inesperado %(status)s, esperava-se aguardando transferência" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Tentando importar metadados de backup do ID %(meta_id)s para o backup %(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"O ajuste da tarefa do volume parou antes de ele ter sido feito: volume_name=" -"%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"O tipo %(type_id)s já está associado a um outro qos specs: %(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"A modificação de tipo de acesso não é aplicável ao tipo de volume público." - -msgid "Type cannot be converted into NaElement." -msgstr "O tipo não pode ser convertido em NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUIDs %s estão na lista de inclusão e remoção de volume." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Não é possível acessar o backend do Storwise para o volume %s." - -msgid "Unable to access the backend storage via file handle." -msgstr "" -"Não é possível acessar o armazenamento de backend por meio da manipulação de " -"arquivos." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "" -"Não é possível acessar o armazenamento de backend por meio do caminho " -"%(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "" -"Não é possível incluir o host Cinder nos apphosts para o espaço %(space)s" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "Não é possível concluir o failover de %s." - -msgid "Unable to connect or find connection to host" -msgstr "Não é possível conectar-se ou localizar a conexão com o host" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Não foi possível criar o grupo de consistências %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "Não é possível criar bloqueio. Backend de coordenação não iniciado." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Não é possível criar ou obter o grupo de armazenamento padrão para a " -"política FAST: %(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Não é possível criar clone de réplica para o volume %s." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "Não é possível criar o relacionamento para %s." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "Não é possível criar o volume %(name)s a partir do %(snap)s." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "Não é possível criar o volume %(name)s a partir do %(vol)s." - -#, python-format -msgid "Unable to create volume %s" -msgstr "Não é possível criar o volume %s" - -msgid "Unable to create volume. Backend down." -msgstr "Não é possível criar o volume. Backend inativo." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "" -"Não é possível excluir a captura instantânea %s do Grupo de consistências" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "" -"Não é possível excluir a captura instantânea %(id)s, status: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "Não é possível excluir política de captura instantânea no volume %s." - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"Não é possível excluir o volume de destino para o volume %(vol)s. Exceção: " -"%(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Não é possível separar o volume. O status do volume deve ser 'em uso' e " -"attach_status deve ser 'anexado' para separar. " - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "" -"Não é possível determinar a secondary_array a partir da matriz secundária " -"fornecida: %(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Não é possível determinar o nome da captura instantânea na Pureza para " -"captura instantânea %(id)s." - -msgid "Unable to determine system id." -msgstr "Não é possível determinar o ID do sistema." - -msgid "Unable to determine system name." -msgstr "Não é possível determinar o nome do sistema." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Não é possível fazer operações de gerenciamento de captura instantânea com a " -"versão da API REST Purity %(api_version)s, requer %(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Não é possível executar replicação com a versão da API REST Purity " -"%(api_version)s, requer uma das %(required_versions)s." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Não é possível estabelecer a parceria com o cluster Storwize: %s" - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Não é possível estender o volume %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"Não é possível executar failover do volume %(id)s para o backend secundário " -"porque o relacionamento da replicação é incapaz de alternar: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"Não é possível executar failover para \"padrão', isso pode ser feito somente " -"após um failover ter sido concluído." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "" -"Não é possível executar failover para o destino de replicação:%(reason)s." - -msgid "Unable to fetch connection information from backend." -msgstr "Não foi possível buscar informações de conexão do backend." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "Não é possível buscar informações de conexão do backend: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "Não é possível localizar ref Pureza com name=%s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Não é possível localizar o Grupo de Volumes: %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"Não é possível localizar o destino de failover, nenhum destino secundário " -"configurado." - -msgid "Unable to find iSCSI mappings." -msgstr "Não é possível localizar mapeamentos de iSCSI." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "Não foi possível encontrar ssh_hosts_key_file: %s" - -msgid "Unable to find system log file!" -msgstr "Não é possível encontrar o arquivo de log do sistema!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"Não é possível localizar uma captura instantânea pg viável a ser utilizada " -"para failover na matriz secundária selecionada: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"Não é possível localizar uma matriz secundária viável a partir dos destinos " -"configurados: %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "Não é possível localizar o volume %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Não foi possível obter um dispositivo de bloco para o arquivo '%s'" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Não é possível obter informações de configuração necessárias para criar um " -"volume: %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Não é possível obter registro correspondente para o pool." - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Não é possível obter informações sobre %(space)s; verifique se o cluster " -"está em execução e conectado." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Não é possível obter lista de endereços IP neste host; verifique as " -"permissões e a rede." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Não é possível obter lista de membros do domínio; verifique se o cluster " -"está em execução." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Não é possível obter lista de espaços para fazer novo nome. Verifique se o " -"cluster está em execução." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Não é possível obter estatísticas para backend_name: %s" - -msgid "Unable to get storage volume from job." -msgstr "Não é possível obter volume de armazenamento a partir da tarefa. " - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Não é possível obter terminais de destino para hardwareId " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "Não é possível obter o nome da visualização de máscara." - -msgid "Unable to get the name of the portgroup." -msgstr "Não é possível obter o nome do grupo de portas." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "Não é possível obter o relacionamento de replicação para o volume %s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Não é possível importar o volume %(deviceId)s para o cinder. É o volume de " -"origem da sessão de replicação %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Não é possível importar o volume %(deviceId)s para o cinder. O volume " -"externo não está no conjunto gerenciado pelo host cinder atual." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Não é possível importar o volume %(deviceId)s para o cinder. O volume está " -"na visualização de mascaramento %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "Não é possível carregar CA a partir de %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Não é possível carregar o certificado a partir de %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Não é possível carregar chave a partir de %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"Não foi possível localizar a conta %(account_name)s no dispositivo Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "" -"Não é possível localizar um SVM que está gerenciando o endereço IP ‘%s'" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "Não é possível localizar perfis de reprodução especificados %s" - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Não é possível gerenciar o volume existente. Volume %(volume_ref)s já " -"gerenciado." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Não é possível gerenciar o volume %s" - -msgid "Unable to map volume" -msgstr "Não é possível mapear o volume" - -msgid "Unable to map volume." -msgstr "Não é possível mapear o volume." - -msgid "Unable to parse attributes." -msgstr "Não é possível analisar atributos." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"Não é possível promover réplica primária para o volume %s. Nenhuma cópia " -"secundária disponível." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Não é possível reutilizar um host que não é gerenciado pelo Cinder com " -"use_chap_auth=True," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Não é possível reutilizar o host com credenciais CHAP desconhecidas " -"configuradas." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Não é possível renomear o volume %(existing)s para %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "" -"Não é possível recuperar o grupo de capturas instantâneas com ID de %s." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"Não é possível redefinir %(specname)s, é esperado receber valores " -"%(spectype)s atuais e solicitados. Valor recebido: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Não é possível digitar novamente: Uma cópia do volume %s existe. Digitar " -"novamente excederia o limite de 2 cópias." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Não é possível digitar novamente: A ação atual precisa de cópia de volume, " -"isso não é permitido quando a nova digitação for a replicação. Volume = %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"Não é possível configurar a replicação de modo de espelho para %(vol)s. " -"Exceção: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Não é possível capturar o Grupo de consistências %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "Não foi possível finalizar conexão do volume do backend." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Não é possível terminar conexão do volume: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Não é possível atualizar o grupo de consistência %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"Não é possível verificar o grupo do inicializador: %(igGroupName)s na " -"visualização de mascaramento %(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Parâmetros inaceitáveis." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Status de mapeamento inesperado %(status)s para o mapeamento %(id)s. " -"Atributos: %(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Resposta da CLI inesperada: incompatibilidade de cabeçalho/linha. cabeçalho: " -"%(header)s, linha: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Status de mapeamento inesperado %(status)s para mapeamento %(id)s. " -"Atributos: %(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "Saída inesperada. Esperada [%(expected)s], mas recebida [%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "Resposta inesperada da API do Nimble" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Resposta inesperada da API do Tegile IntelliFlash" - -msgid "Unexpected status code" -msgstr "Código de status inesperado" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Código de status inesperado a partir do comutador %(switch_id)s com o " -"protocolo %(protocol)s para a URL %(page)s. Erro: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Exceção de Gluster desconhecida" - -msgid "Unknown NFS exception" -msgstr "Exceção NFS desconhecida" - -msgid "Unknown RemoteFS exception" -msgstr "Exceção RemoteFS desconhecida" - -msgid "Unknown SMBFS exception." -msgstr "Exceção SMBFS desconhecida." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Exceção de armazenamento Virtuozzo desconhecido" - -msgid "Unknown action" -msgstr "Ação desconhecida" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Desconhecido se o volume: %s a ser gerenciado estiver sendo gerenciado pelo " -"Cinder. Interrompendo gerenciamento de volume. Inclua a propriedade de " -"esquema customizado 'cinder_managed' no volume e configure seu valor para " -"False. Como alternativa, defina o valor da política de configuração do " -"Cinder 'zfssa_manage_policy' para 'loose' para remover essa restrição." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Desconhecido se o volume: %s a ser gerenciado estiver sendo gerenciado pelo " -"Cinder. Interrompendo gerenciamento de volume. Inclua a propriedade de " -"esquema customizado 'cinder_managed' no volume e configure seu valor para " -"False. Como alternativa, defina o valor da política de configuração do " -"Cinder 'zfssa_manage_policy' para 'loose' para remover essa restrição." - -#, python-format -msgid "Unknown operation %s." -msgstr "Operação desconhecida %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Comando desconhecido ou não suportado %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Protocolo desconhecido: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Recursos da cota desconhecidos %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "" -"As opções de exclusão não gerenciadas e em cascata são mutuamente exclusivas." - -msgid "Unmanage volume not implemented." -msgstr "Volume não gerenciado não implementado" - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"Não é permitido remover gerenciamento de capturas instantâneas para volumes " -"'com failover executado'. " - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"Não é permitido remover gerenciamento de capturas instantâneas para volumes " -"com failover executado. " - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Palavra-chave de QOS desconhecida: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Formato de auxiliar não reconhecido: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Valor read_deleted não reconhecido '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "Desconfigurar opções gcs: %s" - -msgid "Unsupported Content-Type" -msgstr "Tipo de Conteúdo Não Suportado" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Versão do Data ONTAP não suportada. Data ONTAP versão 7.3.1 e acima são " -"suportados." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Versão de metadados de backup não suportada (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Requisitada versão de metadados de backups não-suportados" - -msgid "Unsupported backup verify driver" -msgstr "Backup não suportado, verificar driver" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Firmware não suportado no comutador %s. Certifique-se de que o comutador " -"esteja executando firmware versão v6.4 ou superior" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Formato de volume não suportado: %s " - -msgid "Update QoS policy error." -msgstr "Erro ao atualizar política de QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"As operações de atualização e exclusão de cota podem ser feitas somente por " -"um administrador de pai imediato ou pelo administrador CLOUD." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"As operações de atualização e exclusão de cota podem ser feitas somente para " -"projetos na mesma hierarquia do projeto no qual os usuários estão com escopo " -"definido." - -msgid "Update list, doesn't include volume_id" -msgstr "A lista de atualização não inclui volume _id" - -msgid "Updated At" -msgstr "Atualizado em" - -msgid "Upload to glance of attached volume is not supported." -msgstr "Upload para glance do volume conectado não é suportado." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Erro ao usar ALUA para associar o inicializador ao host." - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Erro ao usar o CHAP para associar o inicializador ao host. Verifique o nome " -"do usuário e a senha do CHAP." - -msgid "User ID" -msgstr "ID de Usuário" - -msgid "User does not have admin privileges" -msgstr "O usuário não tem privilégios de administrador" - -msgid "User not authorized to perform WebDAV operations." -msgstr "O usuário não está autorizado a executar operações do WebDAV." - -msgid "UserName is not configured." -msgstr "UserName não está configurado." - -msgid "UserPassword is not configured." -msgstr "UserPassword não está configurado." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "Retrocesso V2, o volume não está em nenhum grupo de armazenamentos." - -msgid "V3 rollback" -msgstr "Retrocesso V3" - -msgid "VF is not enabled." -msgstr "O VF não está ativado. " - -#, python-format -msgid "VV Set %s does not exist." -msgstr "O Conjunto VV %s não existe." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Os consumidores válidos de QoS specs são: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "O local de controle válido é: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Falha ao validar conexão do volume (erro: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"O valor \"%(value)s\" não é válido para a opção de configuração \"%(option)s" -"\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "O valor %(param)s para %(param_string)s não é um booleano." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Valor necessário para 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "Vdisk %(name)s não envolvido no mapeamento %(src)s -> %(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"A versão %(req_ver)s não é suportada pela API. A versão mínima é " -"%(min_ver)s e a máxima é %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "O VersionedObject %s não pode recuperar o objeto pelo ID." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "O VersionedObject %s não suporta atualização condicional." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "O volume virtual '%s' não existe na matriz." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Tarefa de cópia do vol para dest %s falhou." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Volume %(deviceID)s não localizado." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Volume %(name)s não localizado na matriz. Não é possível determinar se há " -"volumes mapeados." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "O volume %(name)s foi criado no VNX, mas no estado %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "O volume %(vol)s não pôde ser criado no conjunto %(pool)s." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "O volume %(vol1)s não corresponde ao snapshot.volume_id %(vol2)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"O status do volume %(vol_id)s deve estar disponível para atualizar a " -"sinalização somente leitura, mas o status atual é: %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"O status do volume %(vol_id)s deve ser disponível, mas o status atual é: " -"%(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "O volume %(volume_id)s não pôde ser localizado." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"O volume %(volume_id)s não possui metadados de administração com chave " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"O volume %(volume_id)s não possui metadados com a chave %(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"O volume %(volume_id)s está mapeado atualmente para o grupo de hosts não " -"suportado %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "" -"O volume %(volume_id)s não está mapeado atualmente para o host %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "O volume %(volume_id)s ainda está anexado, separe o volume primeiro." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Erro de replicação do volume %(volume_id)s: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "O volume %(volume_name)s está ocupado." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "O volume %s não pôde ser criado a partir do volume de origem." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "O volume %s não pôde ser criado em compartilhamentos." - -#, python-format -msgid "Volume %s could not be created." -msgstr "O volume %s não pôde ser criado." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "O volume %s não existe no Nexenta SA." - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "O volume %s não existe no dispositivo Nexenta Store." - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "O volume %s não existe na matriz." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "O volume %s não possui provider_location especificado, ignorando." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "O volume %s não existe na matriz." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "O volume %s não existe no backend ZFSSA." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "O volume %s já é gerenciado pelo OpenStack." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"O volume %s não é do tipo replicado. Esse volume precisa ser de um tipo de " -"volume com replication_enabled de especificação extra configurado para ' " -"True'para suportar ações de replicação." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"O volume %s está on-line. Configure o volume para off-line gerenciar o uso " -"do OpenStack." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "O volume %s não deve ser parte de um grupo de consistências." - -#, python-format -msgid "Volume %s not found." -msgstr "Volume %s não localizado." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Volume %s: Erro ao tentar estender o volume" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "O volume (%s) já existe na matriz" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Volume (%s) já existe na matriz." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "O Grupo de Volume %s não existe" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "O Tipo de Volume %(id)s já existe." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"A exclusão do Tipo de Volume %(volume_type_id)s não é permitida com volumes " -"presentes com o tipo." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"O Tipo de Volume %(volume_type_id)s não tem specs extras com a chave " -"%(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "Identificador do tipo de volume não pode ser None." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"O volume [%(cb_vol)s] não foi localizado no armazenamento CloudByte " -"correspondente ao volume OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "O volume [%s] não foi localizado no armazenamento CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "O anexo do volume não pôde ser localizado com o filtro: %(filter)s." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "A configuração de backend de volume é inválida: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Já existe um volume com esse nome" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "" -"O volume não pode ser restaurado, uma vez que ele contém capturas " -"instantâneas." - -msgid "Volume create failed while extracting volume ref." -msgstr "A criação do volume falhou ao extrair a referência do volume." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Caminho do arquivo de dispositivo de volume %s não existe." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Dispositivo de volume não localizado em %(device)s." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Driver do volume %s não inicializou." - -msgid "Volume driver not ready." -msgstr "O driver de volume ainda não está pronto." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "O driver do volume reportou um erro: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"O volume possui uma captura instantânea temporária que não pode ser excluída " -"no momento." - -msgid "Volume has children and cannot be deleted!" -msgstr "O volume possui filhos e não pode ser excluído!" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "O volume está conectado a um servidor. (%s)" - -msgid "Volume is in-use." -msgstr "O volume está em uso." - -msgid "Volume is not available." -msgstr "Volume não está disponível." - -msgid "Volume is not local to this node" -msgstr "O volume não é local para este nó" - -msgid "Volume is not local to this node." -msgstr "O volume não é local para este nó." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Backup de metadados do volume solicitado, mas este driver ainda não suporta " -"este recurso." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Migração de volume falhou: %(reason)s" - -msgid "Volume must be available" -msgstr "Volume deve estar disponível" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "" -"O volume deve estar na mesma zona de disponibilidade que a captura " -"instantânea" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"O volume deve estar na mesma zona de disponibilidade que o volume de origem" - -msgid "Volume must have a volume type" -msgstr "O volume deve ter um tipo de volume" - -msgid "Volume must not be replicated." -msgstr "O volume não deve ser replicado." - -msgid "Volume must not have snapshots." -msgstr "Volume não deve ter capturas instantâneas." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Volume não localizado para a instância %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "Volume não localizado no backend de armazenamento configurado." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Volume não localizado no backend de armazenamento configurado. Se o nome do " -"seu volume contiver \"/\", renomeie-o e tente gerenciar novamente." - -msgid "Volume not found on configured storage pools." -msgstr "" -"O volume não foi localizado em conjuntos de armazenamentos configurados." - -msgid "Volume not found." -msgstr "Volume não localizado." - -msgid "Volume not unique." -msgstr "Volume não exclusivo" - -msgid "Volume not yet assigned to host." -msgstr "Volume ainda não designado para o host." - -msgid "Volume reference must contain source-name element." -msgstr "A referência de volume deve conter o elemento de nome de origem." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Replicação de volume %(volume_id)s não pôde ser encontrada." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Falha ao iniciar serviço de volume %s." - -msgid "Volume should have agent-type set as None." -msgstr "O volume deve ter agent-type configurado como Nenhum." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"Tamanho do volume %(volume_size)sGB não pode ser menor do que o tamanho da " -"imagem de minDisk %(min_disk)sGB." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "" -"O tamanho do volume '%(size)s' deve ser um número inteiro e maior que 0" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"O tamanho do volume ‘%(size)s‘ GB não pode ser menor que o tamanho do volume " -"original %(source_size)sGB. Deve ser >= ao tamanho do volume original." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"O tamanho do volume ‘%(size)s' GB não pode ser menor que o tamanho da " -"captura instantânea %(snap_size)sGB. Deve ser >= tamanho da captura " -"instantânea original." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"O tamanho do volume aumentou desde o último backup. Execute um backup " -"completo." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "O tamanho do volume deve ser múltiplo de 1 GB." - -msgid "Volume size must multiple of 1 GB." -msgstr "O tamanho do volume deve ser múltiplo de 1 GB." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"O status do volume deve ser \"disponível\" ou \"em uso\" para captura " -"instantânea. (é %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "O status do volume deve ser \"disponível\" ou \"em uso\"." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "O status do volume deve ser %s ara reservar." - -msgid "Volume status must be 'available'." -msgstr "O status do volume deve ser 'disponível'." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "Já existe um mapeamento de grupos de volume para inicializador" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"O volume a ser feito o backup deve estar disponível ou em uso, mas o status " -"atual é \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "O volume a ser restaurado deve estar disponível" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "O tipo de volume %(volume_type_id)s não pôde ser localizado." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "O ID do tipo '%s' é inválido." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"O acesso do tipo de volume para combinações de %(volume_type_id)s / " -"%(project_id)s já existe." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"Acesso do tipo de volume não localizado para a combinação " -"%(volume_type_id)s / %(project_id)s ." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "Encriptação do tipo de volume para o tipo %(type_id)s já existe." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "Encriptação do tipo de volume para o tipo %(type_id)s não existe." - -msgid "Volume type name can not be empty." -msgstr "Nome de tipo de volume não pode ser vazio." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "" -"O tipo de volume com o nome %(volume_type_name)s não pôde ser localizado." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Volume: %(volumeName)s não é um volume concatenado. É possível apenas " -"desempenhar a extensão no volume concatenado. Saindo..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"O volume: %(volumeName)s não foi incluído no grupo de armazenamentos " -"%(sgGroupName)s. " - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "O volume %s já está sendo gerenciado pelo Cinder." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -"Os volumes/contas excederam nas contas SolidFire primárias e secundárias." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Configuração VzStorage 'vzstorage_used_ratio' inválida. Deve ser > 0 e <= " -"1.0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Arquivo de configuração VzStorage em %(config)s não existe." - -msgid "Wait replica complete timeout." -msgstr "Tempo limite de espera da conclusão da réplica." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Falha ao aguardar sincronização. Status de execução: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"Aguardando que todos os nós se associem ao cluster. Assegure-se de que todos " -"os daemons sheep estejam em execução." - -msgid "We should not do switch over on primary array." -msgstr "Não é recomendável executar switch over na matriz primária." - -msgid "X-IO Volume Driver exception!" -msgstr "Exceção do Driver do Volume X-IO!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO não configurado corretamente; nenhum portal iscsi localizado" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO não inicializado corretamente, nenhum cluster localizado" - -msgid "You must implement __call__" -msgstr "Você deve implementar __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Deve-se instalar o hpe3parclient antes de usar drivers 3PAR drivers. Execute " -"\"pip install python-3parclient\" para instalar o hpe3parclient." - -msgid "You must supply an array in your EMC configuration file." -msgstr "Deve-se fornecer uma matriz no arquivo de configuração do EMC." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Seu tamanho original: %(originalVolumeSize)s GB é maior que: %(newSize)s GB. " -"Somente Estender é suportado. Saindo..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Zona" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Política de Zoneamento: %s, não reconhecido" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "_create_and_copy_vdisk_data: Falha ao obter atributos para o vdisk %s." - -msgid "_create_host failed to return the host name." -msgstr "_create_host falhou ao retornar o nome do host." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: Não é possível converter o nome do host. O nome do host não é " -"unicode ou vazia." - -msgid "_create_host: No connector ports." -msgstr "_create_host: Nenhuma porta de conector." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume, Serviço de Replicação não localizado." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, instância do volume de serviço: %(source_volume)s, " -"instância do volume de destino: %(target_volume)s, Código de retorno: " -"%(rc)lu, Erro: %(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - não foi localizada uma mensagem de êxito na saída " -"da CLI.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, id_code é Nenhum." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, Não é possível localizar o Serviço de Replicação" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, tipo de sessão de cópia é indefinido! Sessão de cópia: " -"%(cpsession)s, tipo de cópia: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, copysession: %(cpsession)s, operação: %(operation)s, " -"Código de retorno: %(rc)lu, Erro: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, volumename: %(volumename)s, Código de Retorno: %(rc)lu, " -"Erro: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, volumename: %(volumename)s, Serviço de Configuração de " -"Armazenamento não localizado." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, não é " -"possível conectar-se ao ETERNUS." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op: Não é suportado estender um volume com capturas " -"instantâneas. " - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, conector: %(connector)s, Associadores: " -"FUJITSU_AuthorizedTarget, não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, não " -"é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, Não " -"é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, não " -"é possível conectar-se ao ETERNUS." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "" -"_find_initiator_names, connector: %(connector)s, iniciador não localizado." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, não é " -"possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, não é " -"possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, dados são Nenhum! " -"Edite o arquivo de configuração do driver e corrija." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, nome do arquivo: %(filename)s, IP: %(ip)s, porta: " -"%(port)s, usuário: %(user)s, senha: ****, URL: %(url)s, FALHA!!." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn não " -"localizado." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, não é " -"possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: cabeçalhos e valores de atributos não correspondem.\n" -" Cabeçalhos: %(header)s\n" -" Valores: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector falhou ao retornar o nome do host para o conector." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, falha ao obter afinidade de host a partir do aglist/" -"vol_instance, affinitygroup: %(ag)s, ReferenceNames, não é possível conectar-" -"se ao ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, falha ao obter instância de afinidade de host, volmap: " -"%(volmap)s, GetInstance, não é possível conectar-se ao ETERNUS." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associadores: FUJITSU_SAPAvailableForElement, não é " -"possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, não é possível " -"conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, não é possível conectar-" -"se ao ETERNUS." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances, não é possível conectar-se ao ETERNUS." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "_get_target_port, protcolo: %(protocol)s, target_port não localizado." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "" -"_get_unmanaged_replay: Não é possível localizar a captura instantânea " -"denominada %s" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: Não é possível localizar o ID do volume %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: Deve-se especificar source-name." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: Não foi possível obter informações de conexão FC " -"para a conexão do volume do host. O host está configurado adequadamente para " -"as conexões FC?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: Nenhum nó localizado no grupo de E/S %(gid)s para " -"o volume %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, iniciator: %(initiator)s, destino: %(tgt)s, aglist: %(aglist)s, " -"Serviço de Configuração de Armazenamento não localizado." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Serviço de Configuração do " -"Controlador não lcoalizado." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Código de Retorno: %(rc)lu, Erro: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, não é possível conectar-se ao ETERNUS." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "" -"_update_volume_stats: Não foi possível obter dados do conjunto de " -"armazenamento." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, o estado de sessão de " -"cópia é BROKEN." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy falhou: Uma cópia de volume %s existe. Incluir outra cópia " -"excederia o limite de 2 cópias." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "add_vdisk_copy iniciado sem uma cópia vdisk no conjunto esperado." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants deve ser um booleano, obtido '%s'." - -msgid "already created" -msgstr "já criado" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "anexar captura instantânea do nó remoto" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "o atributo %s não realiza carregamento demorado" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s falhou ao criar link físico do dispositivo a partir de " -"%(vpath)s para %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s falhou ao obter notificação de sucesso de backup do " -"servidor.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s falhou ao executar o dsmc devido a argumentos inválidos " -"em %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s falhou ao executar o dsmc em %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "backup: %(vol_id)s falhou. %(path)s não é um arquivo." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"backup: %(vol_id)s falhou. %(path)s é tipo de arquivo inesperado. Bloco ou " -"arquivos regulares suportados, modo de arquivo real é %(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"backup: %(vol_id)s falhou. Não é possível obter caminho real para o volume " -"em %(path)s." - -msgid "being attached by different mode" -msgstr "sendo anexado por modo diferente" - -#, python-format -msgid "call failed: %r" -msgstr "A chamada falhou: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "A chamada falhou: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "A chamada falhou: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "A chamada falhou: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "A chamada falhou: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "não é possível localizar mapa de lun, ig:%(ig)s vol:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "não é possível localizar o volume a ser estendido" - -msgid "can't handle both name and index in req" -msgstr "não é possível lidar com o nome e o índice na solicitação" - -msgid "cannot understand JSON" -msgstr "não é possível entender JSON" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "cgsnapshot designada" - -msgid "cgsnapshot changed" -msgstr "cgsnapshot alterada" - -msgid "cgsnapshots assigned" -msgstr "cgsnapshots designadas" - -msgid "cgsnapshots changed" -msgstr "cgsnapshots alteradas" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: A senha ou a chave privada SSH é requerida para " -"autenticação: configure a opção san_password ou san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: Não é possível determinar o ID do sistema." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: Não é possível determinar o nome do sistema." - -msgid "check_hypermetro_exist error." -msgstr "Erro de check_hypermetro_exist." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "a espessura do clone excede o limite de %s" - -msgid "consistencygroup assigned" -msgstr "consistencygroup designado" - -msgid "consistencygroup changed" -msgstr "consistencygroup alterado" - -msgid "control_location must be defined" -msgstr "control_location deve ser definido" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, o Volume de Origem não existe no ETERNUS." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, nome da instância do volume de destino: " -"%(volume_instancename)s, Falha ao Obter Instância." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: Os tamanhos de origem e destino diferem." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: O tamanho do volume de origem %(src_vol)s é " -"%(src_size)dGB e não cabe no volume de destino %(tgt_vol)s de tamanho " -"%(tgt_size)dGB." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src deve estar criando a partir de uma captura " -"instantânea CG, ou de uma origem CG." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src suporta somente uma origem cgsnapshot ou " -"uma origem de grupo de consistências. Diversas origens não podem ser usadas." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: O vdisk de origem %(src)s (%(src_id)s) não existe." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: Vdisk de origem %(src)s não existe." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: Nome do host não é unicode ou sequência." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: Nenhum inicializador ou wwpns fornecido." - -msgid "create_hypermetro_pair error." -msgstr "Erro de create_hypermetro_pair." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"create_snapshot, eternus_pool: %(eternus_pool)s, conjunto não localizado." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, snapshotname: %(snapshotname)s, nome do volume de origem: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, nome do volume de " -"destino: %(d_volumename)s, conjunto: %(pool)s, Código de retorno: %(rc)lu, " -"Erro: %(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, volumename: %(s_volumename)s, volume de origem não " -"localizado no ETERNUS." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, volumename: %(volumename)s, Serviço de Replicação não " -"localizado." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: O status do volume deve ser \"disponível \" ou \"em uso\" " -"para captura instantânea. O status inválido é %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: obter volume de origem falhou." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, volume: %(volume)s, EnumerateInstances, não é possível " -"conectar-se ao ETERNUS." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Serviço de Configuração de Armazenamento não localizado." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Código de retorno: %(rc)lu, Erro: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "create_volume_from_snapshot, o Volume de Origem não existe no ETERNUS." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, nome da instância do volume de destino: " -"%(volume_instancename)s, Falha ao Obter Instância." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "" -"create_volume_from_snapshot: A captura instantânea %(name)s não existe." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: O status da captura instantânea deve ser " -"\"disponível\" para volume de criação. O status inválido é: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: O tamanho do volume é diferente do volume " -"baseado em captura instantânea." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"exclusão: %(vol_id)s falhou ao executar o dsmc devido a argumentos inválidos " -"com stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"exclusão: %(vol_id)s falhou ao executar o dsmc com stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "Erro de delete_hypermetro." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: ACL %s não localizada. Continuando." - -msgid "delete_replication error." -msgstr "Erro de delete_replication." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"excluindo captura instantânea %(snapshot_name)s que possui volumes " -"dependentes" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "excluindo o volume %(volume_name)s que possui captura instantânea" - -msgid "detach snapshot from remote node" -msgstr "remover captura instantânea do nó remoto" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: Nenhum nó configurado." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"erro ao gravar objeto no swift; o MD5 do objeto no swift %(etag)s não é o " -"mesmo enviado ao swift %(md5)s" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "" -"extend_volume, eternus_pool: %(eternus_pool)s, conjunto não localizado." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Serviço de Configuração de Armazenamento não localizado." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, volumename: %(volumename)s, Código de Retorno: %(rc)lu, Erro: " -"%(errordesc)s, PoolType: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume, volumename: %(volumename)s, volume não localizado." - -msgid "failed to create new_volume on destination host" -msgstr "falha ao criar new_volume no host de destino" - -msgid "fake" -msgstr "falso" - -#, python-format -msgid "file already exists at %s" -msgstr "o arquivo já existe em %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno não é suportado pelo SheepdogIOWrapper" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() não suportado por RBD()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "O sistema de arquivos %s não existe no dispositivo Nexenta Store." - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled está configurado para False, não permite " -"vários mapeamentos de host. CMMVC6071E O mapeamento de VDisk para host não " -"foi criado porque o VDisk já está mapeado para um host." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() não suportado nesta versão de librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s retornado por: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s retornado por: %(backing_file)s" - -msgid "force delete" -msgstr "forçar exclusão" - -msgid "get_hyper_domain_id error." -msgstr "Erro de get_hyper_domain_id." - -msgid "get_hypermetro_by_id error." -msgstr "Erro de get_hypermetro_by_id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: Falha ao obter o IP de destino para o inicializador " -"%(ini)s, verifique o arquivo de configuração." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: Falha ao obter atributos para o volume %s" - -msgid "glance_metadata changed" -msgstr "glance_metadata alterado" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode está configurada para copy_on_write, mas %(vol)s e " -"%(img)s pertencem a sistemas de arquivos diferentes." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode está configurada para copy_on_write, mas %(vol)s e " -"%(img)s pertencem a diferentes conjuntos." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s e hgst_user %(usr)s devem ser mapeados para usuários/" -"grupos válidos em cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "hgst_net %(net)s especificado em cinder.conf não localizado no cluster" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy deve ser configurado para 0 (não HA) ou 1 (HA) em cinder." -"conf." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode must deve ser um octal/int em cinder.conf" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "O servidor %(svr)s hgst_storage não é do formato :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers deve ser definido em cinder.conf" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"O serviço http foi desativado abruptamente ou pode ter sido colocado em " -"estado de manutenção no meio dessa operação." - -msgid "id cannot be None" -msgstr "id não pode ser Nenhum" - -#, python-format -msgid "image %s not found" -msgstr "imagem %s não localizada" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection, volume: %(volume)s, Volume não localizado." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection: Falha ao obter atributos para o volume %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection: Atributo de volume ausente para o volume %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: Nenhum nó localizado no grupo de E/S %(gid)s para o " -"volume %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s não está definido." - -#, python-format -msgid "invalid user '%s'" -msgstr "usuário inválido '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "portal iscsi, %s, não localizado" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"iscsi_ip_address deve ser definido no arquivo de configuração ao usar " -"protocolo 'iSCSI'." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "Erro do gerenciador de chaves: %(reason)s" - -msgid "limit param must be an integer" -msgstr "o parâmetro limit deve ser um número inteiro" - -msgid "limit param must be positive" -msgstr "o parâmetro limit deve ser positivo" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing requer uma chave de 'nome' para identificar um volume " -"existente." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: Erro ao gerenciar a reprodução existente %(ss)s no " -"volume %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "marcador [%s] não localizado" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp não tem aspas %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy deve ser 'on-demand' ou 'never', transmitido: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs falhou no volume %(vol)s, mensagem de erro foi: %(err)s." - -msgid "mock" -msgstr "simulado" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs não está instalado" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "vários recursos com o nome %s localizado como drbdmanage" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "vários recursos com ID de captura instantânea %s localizado" - -msgid "name cannot be None" -msgstr "o nome não pode ser Nenhum" - -#, python-format -msgid "no REPLY but %r" -msgstr "Nenhuma REPLY, mas %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "nenhuma captura instantânea com ID %s localizada no drbdmanage" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "não exatamente uma captura instantânea com ID %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "não exatamente um volume com o ID %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "o objeto não tem aspas %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled não está desativado." - -msgid "progress must be an integer percentage" -msgstr "progresso deve ser uma porcentagem de número inteiro" - -msgid "provider must be defined" -msgstr "provider deve ser definido" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img %(minimum_version)s ou posterior é necessário para este driver de " -"volume. Qemu-img versão atual: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img não está instalado e a imagem é do tipo %s. Apenas imagens RAW " -"podem ser usadas se qemu-img não estiver instalado." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img não está instalado e o formato do disco não está especificado. " -"Apenas imagens RAW podem ser usadas se qemu-img não estiver instalado." - -msgid "rados and rbd python libraries not found" -msgstr "bibliotecas Python rados e rbd não localizadas" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted pode ser apenas um de 'no', 'yes' ou 'only', não %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover falhou. %s não localizado." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "replication_failover falhou. Backend não configurado para failover" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restauração: %(vol_id)s falhou ao executar o dsmc devido a argumentos " -"inválidos em %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restauração: %(vol_id)s falhou ao executar o dsmc em %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"restauração: %(vol_id)s falhou.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup interrompido, a lista de objetos real não corresponde à lista " -"de objetos armazenada nos metadados." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb é um membro ausente %s: você pode precisar de um python-rtslib-fb " -"mais novo." - -msgid "san_ip is not set." -msgstr "san_ip não está configurado." - -msgid "san_ip must be set" -msgstr "san_ip deve ser configurado" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"san_login e/ou san_password não está configurado para o driver Datera no " -"cinder.conf. Configure estas informações e inicie o serviço cinder-volume " -"novamente." - -msgid "serve() can only be called once" -msgstr "serve() pode ser chamado apenas uma vez" - -#, python-format -msgid "snapshot-%s" -msgstr "captura instantânea-%s" - -msgid "snapshots assigned" -msgstr "capturas instantâneas designadas" - -msgid "snapshots changed" -msgstr "capturas instantâneas alteradas" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "ID do volume de origem:%s não é replicado" - -msgid "source-name cannot be empty." -msgstr "O source-name não pode estar vazio." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "O formato source-name deve ser: 'vmdk_path@vm_inventory_path'." - -#, python-format -msgid "status must be %s and" -msgstr "status deve ser %s e" - -msgid "status must be available" -msgstr "o status deve estar disponível" - -msgid "stop_hypermetro error." -msgstr "Erro de stop_hypermetro." - -msgid "sync_hypermetro error." -msgstr "Erro de sync_hypermetro." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli não instalado e não pôde criar o diretório padrão " -"(%(default_path)s): %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection: Falha ao obter o nome do host do conector." - -msgid "timeout creating new_volume on destination host" -msgstr "tempo limite ao criar new_volume no host de destino" - -msgid "too many body keys" -msgstr "excesso de chaves de corpo" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: não montado" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s: o destino está ocupado" - -msgid "umount: : some other error" -msgstr "umount: : algum outro erro" - -msgid "umount: : target is busy" -msgstr "umount: : o destino está ocupado" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "" -"unmanage_snapshot: Não é possível localizar a captura instantânea denominada " -"%s" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: Não é possível localizar o ID do volume %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "argumento não reconhecido %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "algoritmo de compressão não suportado: %s" - -msgid "valid iqn needed for show_target" -msgstr "iqn válido necessário para show_target" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "o vdisk %s não está definido." - -msgid "vmemclient python library not found" -msgstr "biblioteca python vmemclient não localizada" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "volume %s não localizado no drbdmanage" - -msgid "volume assigned" -msgstr "volume designado" - -msgid "volume changed" -msgstr "volume alterado" - -msgid "volume is already attached" -msgstr "o volume já está conectado" - -msgid "volume is not local to this node" -msgstr "o volume não é local para este nó" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"O tamanho do volume %(volume_size)d é muito pequeno para restaurar o backup " -"do tamanho %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "O tamanho do volume %d é inválido." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"volume_type deve ser fornecido ao criar um volume em um grupo de " -"consistências." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id não pode ser Nenhum" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"volume_types deve ser fornecido para criar o grupo de consistência %(name)s." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "" -"volume_types deve ser fornecido para criar o grupo de consistências %s." - -msgid "volumes assigned" -msgstr "volumes designados" - -msgid "volumes changed" -msgstr "volumes alterados" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition: %s atingiu tempo limite." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"A propriedade zfssa_manage_policy precisa ser configurada para 'strict' ou " -"'loose'. O valor atual é: %s." diff --git a/cinder/locale/ru/LC_MESSAGES/cinder.po b/cinder/locale/ru/LC_MESSAGES/cinder.po deleted file mode 100644 index e8de937a5..000000000 --- a/cinder/locale/ru/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,9690 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# eshumakher, 2013 -# FIRST AUTHOR , 2011 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-21 02:35+0000\n" -"Last-Translator: Grigory Mokhin \n" -"Language: ru\n" -"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" -"%100>=11 && n%100<=14)? 2 : 3);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Russian\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"Версия OpenStack Cinder: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " но сейчас размер: %d" - -#, python-format -msgid " but size is now %d." -msgstr " но сейчас размер %d." - -msgid " or " -msgstr "или " - -#, python-format -msgid "%(attr)s is not set." -msgstr "%(attr)s не задан." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing не поддерживает управление томом, подключенным к " -"хостам. Отключите том от существующих хостов перед импортом" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"Результат: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: доступ запрещен." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: Сбой с непредвиденным выводом CLI.\n" -" Команда: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Код состояния: %(_status)s\n" -"Тело: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: создание NetworkPortal: убедитесь, что порт %(port)d на IP-" -"адресе %(ip)s не занят другой службой." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: не удалось создать резервную копию %(bck_id)s, том %(vol_id)s. " -"Объект резервной копии находится в неожиданном режиме. Поддерживаются " -"резервные копии файлов и образов. Фактический режим: %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "" -"Служба %(service)s не находится в состоянии %(status)s в устройстве хранения " -"%(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s должно быть <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s должен быть >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"Значение %(worker_name)s %(workers)d недопустимо. Значение должно быть " -"больше 0." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "В результате нет элемента \"data\" %s." - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s недоступен. Убедитесь, что GPFS активна и файловая система смонтирована." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"Размер %s нельзя изменить с помощью операции дублирования, так как он не " -"содержит блоков." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"Размер %s нельзя изменить с помощью операции дублирования, так как он " -"находится на сжатом томе" - -#, python-format -msgid "%s configuration option is not set." -msgstr "Не задан параметр конфигурации %s." - -#, python-format -msgid "%s does not exist." -msgstr "%s не существует." - -#, python-format -msgid "%s is not a directory." -msgstr "%s не является каталогом." - -#, python-format -msgid "%s is not installed" -msgstr "%s не установлен" - -#, python-format -msgid "%s is not installed." -msgstr "%s не установлен." - -#, python-format -msgid "%s is not set" -msgstr "%s - не множество" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "" -"Значение %s не задано. Оно необходимо для правильной работы устройства " -"репликации." - -#, python-format -msgid "%s is not set." -msgstr "%s не задан." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s должно быть допустимым образом raw или qcow2." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s должен быть абсолютным путем." - -#, python-format -msgid "%s must be an integer." -msgstr "%s должен быть целым числом." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s не задан в cinder.conf" - -#, python-format -msgid "%s not set." -msgstr "%s не задан." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' недопустим для flashsystem_connection_protocol в файле " -"конфигурации. Допустимые значения: %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "Должно быть active при записи snap_info." - -msgid "'consistencygroup_id' must be specified" -msgstr "Необходимо указать consistencygroup_id" - -msgid "'qemu-img info' parsing failed." -msgstr "Ошибка анализа 'qemu-img info'." - -msgid "'status' must be specified." -msgstr "Должно быть указано значение status." - -msgid "'volume_id' must be specified" -msgstr "Должен быть указан параметр volume_id" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(Команда: %(cmd)s) (Код возврата: %(exit_code)s) (stdout: %(stdout)s) " -"(stderr: %(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "LUN (HLUN) не найден. (LDEV: %(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "Параллельно выполняется другой, вероятно, конфликтующий запрос." - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Не найден свободный LUN (HLUN). Добавьте другую группу хостов. (LDEV: " -"%(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "Не удалось добавить группу хостов. (порт: %(port)s, имя: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Не удалось удалить группу хостов. (порт: %(port)s, ИД группы: %(gid)s, имя: " -"%(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Недопустимая группа хостов. (группа хостов: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "Не удалось удалить пару. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Не удалось создать пару. Превышено максимальное число пар. (метод " -"копирования: %(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Недопустимый параметр. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Недопустимое значение параметра. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Не найден пул. (ИД пул: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Недопустимое состояние моментальной копии. (состояние: %(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "" -"Для переключения после сбоя необходимо указать допустимый вторичный целевой " -"объект." - -msgid "A volume ID or share was not specified." -msgstr "Не указан ИД тома или общий ресурс." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Недопустимое состояние тома. (состояние: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "Сбой API %(name)s, строка ошибки %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"Недопустимый формат строки версии API %(version)s. Требуется формат: " -"MajorNum.MinorNum." - -msgid "API key is missing for CloudByte driver." -msgstr "Отсутствует ключ API для драйвера CloudByte." - -#, python-format -msgid "API response: %(response)s" -msgstr "Ответ API: %(response)s" - -#, python-format -msgid "API response: %s" -msgstr "Ответ API: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "Версия API %(version)s не поддерживается этим методом." - -msgid "API version could not be determined." -msgstr "Не удалось определить версию API." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Попытка удаления дочерних проектов с ненулевой квотой. Это не следует делать" - -msgid "Access list not available for public volume types." -msgstr "Список прав доступа недоступен для общедоступных типов томов." - -msgid "Activate or deactivate QoS error." -msgstr "Ошибка активации/деактивации QoS." - -msgid "Activate snapshot error." -msgstr "Ошибка активации моментальной копии." - -msgid "Add FC port to host error." -msgstr "Ошибка добавления порта Fibre Channel к хосту." - -msgid "Add fc initiator to array error." -msgstr "Ошибка добавления инициатора Fibre Channel в массив." - -msgid "Add initiator to array error." -msgstr "Ошибка добавления инициатора в массив." - -msgid "Add lun to cache error." -msgstr "Ошибка добавления LUN в кэш." - -msgid "Add lun to partition error." -msgstr "Ошибка добавления LUN в раздел." - -msgid "Add mapping view error." -msgstr "Ошибка добавления представления связей." - -msgid "Add new host error." -msgstr "Ошибка добавления нового хоста." - -msgid "Add port to port group error." -msgstr "Ошибка добавления порта в группу портов." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Все указанные пулы памяти для управления не существуют. Проверьте " -"конфигурацию. Несуществующие пулы: %s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "Запрос версии API должен сравниваться с объектом VersionedMethod." - -msgid "An error has occurred during backup operation" -msgstr "Ошибка операции резервного копирования" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"Ошибка во время операции LUNcopy. Имя LUNcopy: %(luncopyname)s. Статус " -"LUNcopy: %(luncopystatus)s. Состояние LUNcopy: %(luncopystate)s." - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "Возникла ошибка при чтении тома \"%s\"." - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "Возникла ошибка при записи на том \"%s\"." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "" -"Не удалось добавить пользователя CHAP iSCSI. (имя пользователя: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "" -"Не удалось удалить пользователя CHAP iSCSI. (имя пользователя: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Не удалось добавить целевое расположение iSCSI. (порт: %(port)s, псевдоним: " -"%(alias)s, причина: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Не удалось удалить целевое расположение iSCSI. (порт: %(port)s, номер " -"целевого расположения: %(tno)s, псевдоним: %(alias)s)" - -msgid "An unknown exception occurred." -msgstr "Обнаружено неизвестное исключение." - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "" -"Пользователи с маркером, связанным с подпроектом, не могут видеть квоту " -"родительских объектов." - -msgid "Append port group description error." -msgstr "Ошибка добавления описания группы портов." - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"Не удалось применить зоны и конфигурации для коммутатора (код ошибки=" -"%(err_code)s сообщение об ошибке=%(err_msg)s." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "Массив не существует или выключен. Текущее состояние массива: %s." - -msgid "Associate host to hostgroup error." -msgstr "Ошибка связывания хоста с группой хостов." - -msgid "Associate host to mapping view error." -msgstr "Ошибка связывания хоста с представлением связей." - -msgid "Associate initiator to host error." -msgstr "Ошибка связывания инициатора с хостом." - -msgid "Associate lun to QoS error." -msgstr "Ошибка связывания LUN с QoS." - -msgid "Associate lun to lungroup error." -msgstr "Ошибка связывания LUN с группой LUN." - -msgid "Associate lungroup to mapping view error." -msgstr "Ошибка связывания группы LUN с представлением связей." - -msgid "Associate portgroup to mapping view error." -msgstr "Ошибка связывания группы портов с представлением связей." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Необходимо указать хотя бы один допустимый IP-адрес iSCSI." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Попытка передачи %s с недопустимым ключом авторизации." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "Не найдена информация группы идентификации [%s] в хранилище CloudByte." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"В хранилище CloudByte не найдена информация о пользователе для идентификации." - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "" -"Ошибка идентификации. Проверьте идентификационные данные. Код ошибки %s." - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "Зона доступности %(s_az)s недопустима." - -msgid "Available categories:" -msgstr "Доступные категории:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Спецификации QoS базовой системы не поддерживаются в этом семействе систем " -"хранения и версии ONTAP." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Базовый сервер не существует (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Отчеты базовой программы: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Отчеты базовой программы: элемент уже существует" - -msgid "Backend reports: item not found" -msgstr "Отчеты базовой программы: элемент не найден" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "Тайм-аут повторов службы базовой программы: %(timeout)s с" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Память базовой системы не настроила цель оптоволоконного канала." - -msgid "Backing up an in-use volume must use the force flag." -msgstr "" -"Для резервного копирования используемого тома требуется флаг принудительного " -"выполнения." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "Не удалось найти резервную копию %(backup_id)s." - -msgid "Backup RBD operation failed" -msgstr "Сбой операции резервного копирования RBD" - -msgid "Backup already exists in database." -msgstr "Резервная копия уже есть в базе данных." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Драйвер резервного копирования выдал ошибку: %(message)s" - -msgid "Backup id required" -msgstr "Требуется ИД резервной копии" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"Резервное копирование не поддерживается для томов GlusterFS с моментальными " -"копиями." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"Резервное копирование поддерживается только для томов SOFS с форматированием " -"raw без базового файла." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"Резервное копирование поддерживается только для томов GlusterFS с " -"форматированием raw." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "" -"Резервное копирование поддерживается только для томов SOFS с форматированием " -"raw." - -msgid "Backup operation of an encrypted volume failed." -msgstr "Операция резервного копирования зашифрованного тома не выполнена." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Служба резервного копирования %(configured_service)s не поддерживает " -"проверку. ИД резервной копии %(id)s не проверен. Пропуск проверки." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Служба резервного копирования %(service)s не поддерживает проверку. ИД " -"резервной копии %(id)s не проверен. Пропуск сброса." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"Резервная копия должна иметь только одну моментальную копию, а имеет %s" - -msgid "Backup status must be available" -msgstr "Состояние резервной копии должно быть доступным" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Состояние резервной копии должно быть available, а не '%s'." - -msgid "Backup status must be available or error" -msgstr "Требуемое состояние резервной копии: доступен или ошибка" - -msgid "Backup to be restored has invalid size" -msgstr "Резервная копия для восстановления имеет недопустимый размер" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Возвращена неверная строка состояния: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Неверные ключи в наборе квот: %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Неправильный или непредвиденный ответ от API базовой программы тома " -"хранилища: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "Неправильный формат проекта: проект имеет неправильный формат (%s)" - -msgid "Bad response from Datera API" -msgstr "Неправильный ответ API Datera" - -msgid "Bad response from SolidFire API" -msgstr "Неправильный ответ от API SolidFire" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "Неверный ответ из XMS, %s" - -msgid "Binary" -msgstr "Двоичный" - -msgid "Blank components" -msgstr "Пустые компоненты" - -msgid "Blockbridge api host not configured" -msgstr "Не настроен хост API Blockbridge" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge настроен с недопустимой схемой идентификации '%(auth_scheme)s'" - -msgid "Blockbridge default pool does not exist" -msgstr "Пул Blockbridge по умолчанию не существует" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Не настроен пароль Blockbridge (требуется для схемы идентификации 'password')" - -msgid "Blockbridge pools not configured" -msgstr "Пулы Blockbridge не настроены" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Не настроен маркер Blockbridge (требуется для схемы идентификации 'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Не настроен пользователь Blockbridge (требуется для схемы идентификации " -"'password')" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Ошибка CLI зонирования Brocade Fibre Channel: %(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Ошибка HTTP зонирования Brocade Fibre Channel: %(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "Пароль CHAP должен быть от 12 до 16 байт." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Вывод исключительной ситуации CLI:\n" -" команда: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Вывод исключительной ситуации CLI:\n" -" команда: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E Связь VDisk с хостом не создана, так как VDisk уже связан с " -"хостом.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "Версия CONCERTO не поддерживается" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) не существует в массиве" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "Имя кэша - None. Укажите smartcache:cachename в ключе." - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "Для тома кэша %s не заданы обязательные свойства" - -msgid "Call returned a None object" -msgstr "Вызов возвратил объект None" - -msgid "Can not add FC port to host." -msgstr "Не удалось добавить порт Fibre Channel в хост." - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "Не удалось найти ИД кэша по имени кэша %(name)s." - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "Не найден ИД раздела по имени %(name)s." - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "Не удаётся получить информацию о пуле: %s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "Невозможно преобразовать %s в целое число." - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "Нет доступа к scality_sofs_config: %s" - -msgid "Can't decode backup record." -msgstr "Не удалось декодировать запись резервной копии." - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "Невозможно расширить том репликации %(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "" -"Не найден LUN массива, проверьте правильность source-name или source-id." - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "Не найдено имя кэша в массиве. Имя кэша: %(name)s." - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "" -"Не удалось найти информацию о lun в массиве. Том: %(id)s, имя lun: %(name)s." - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "Не найдено имя раздела в массиве. Имя раздела: %(name)s." - -#, python-format -msgid "Can't find service: %s" -msgstr "Служба не найдена: %s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "" -"Не найдена моментальная копия в массиве, проверьте правильность source-name " -"или source-id." - -msgid "Can't find the same host id from arrays." -msgstr "Не найден такой ИД хоста в массивах." - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "Не удалось получить ИД тома из моментальной копии: %(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "Не удаётся получить ИД тома. Имя тома: %s." - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "" -"Не удается импортировать LUN %(lun_id)s в Cinder. Несовпадение типов LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "" -"Не удается импортировать LUN %s в Cinder. Он уже есть в HyperMetroPair." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "" -"Не удается импортировать LUN %s в Cinder. Он уже есть в задаче копирования " -"LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в группе LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в зеркале LUN." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "Не удается импортировать LUN %s в Cinder. Он уже есть в SplitMirror." - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "" -"Не удается импортировать LUN %s в Cinder. Он уже есть в задаче переноса." - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "" -"Не удается импортировать LUN %s в Cinder. Он уже есть в задаче удаленной " -"репликации." - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "" -"Не удается импортировать LUN %s в Cinder. Состояние LUN указывает на ошибку." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "" -"Не удается импортировать моментальную копию %s в Cinder. Моментальная копия " -"не принадлежит тому." - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "" -"Не удается импортировать моментальную копию %s в Cinder. Моментальная копия " -"экспортирована для инициатора." - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"Не удается импортировать моментальную копию %s в Cinder. Состояние " -"моментальной копии указывает на ошибку или на то, что она недоступна." - -msgid "Can't parse backup record." -msgstr "Не удалось проанализировать запись резервной копии." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " -"том не имеет типа тома." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: он " -"уже есть в группе согласования %(orig_group)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " -"том не найден." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " -"том не существует." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " -"том находится в недопустимом состоянии %(status)s. Допустимые состояния: " -"%(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"Не удалось добавить том %(volume_id)s в группу согласования %(group_id)s: " -"тип тома %(volume_type)s не поддерживается группой." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Невозможно подключить уже подключенный том %s. Множественное подключение " -"выключено в параметре конфигурации 'netapp_enable_multiattach'." - -msgid "Cannot change VF context in the session." -msgstr "Не удаётся изменить контекст VF в сеансе." - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"Не удаётся изменить контекст VF, данный VF недоступен в списке управляемых " -"VF %(vf_list)s." - -msgid "Cannot connect to ECOM server." -msgstr "Не удалось подключиться к серверу ECOM." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"Не удалось создать группу согласования %(group)s, поскольку моментальная " -"копия %(snap)s не находится в допустимом состоянии. Допустимые состояния: " -"%(valid)s." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"Не удалось создать группу согласования %(group)s, поскольку исходный том " -"%(source_vol)s не в допустимом состоянии. Допустимые состояния: %(valid)s." - -#, python-format -msgid "Cannot create directory %s." -msgstr "Не удалось создать каталог %s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "Невозможно создать спецификацию шифрования. Тип тома используется." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"Невозможно создать образ формата диска %s. Принимается только формат диска " -"vmdk." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Создать маскирующее представление %(maskingViewName)s невозможно. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"Нельзя создать больше %(req)s томов в массиве ESeries, когда параметр " -"'netapp_enable_multiattach' равен true." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"Не удалось создать или найти группу носителей с именем %(sgGroupName)s." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "Не удалось создать том размера %s: не кратен 8 ГБ." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"Невозможно создать volume_type с именем %(name)s и спецификациями " -"%(extra_specs)s" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "Нельзя удалить LUN %s, когда есть моментальные копии." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"Не удалось удалить том кэша %(cachevol_name)s. Он обновлен в%(updated_at)s и " -"сейчас содержит %(numclones)d экземпляры тома." - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"Не удалось удалить том кэша %(cachevol_name)s. Он обновлен в %(updated_at)s " -"и сейчас содержит %(numclones)s экземпляры тома." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "Невозможно удалить спецификацию шифрования. Тип тома используется." - -msgid "Cannot determine storage pool settings." -msgstr "Не удается определить параметры пула памяти." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "Не удалось выполнить /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "Не найдена группа согласования %s." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"Не удалось найти службу конфигурации контроллеров для системы хранения " -"%(storage_system)s." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"Не найдена служба репликации для создания тома для моментальной копии %s." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "Не найдена служба репликации для удаления моментальной копии %s." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "Не найдена служба репликации в системе %s." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "Не найден том %(id)s. Операция удаления из управления. Выход..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "Том %(volumename)s не найден. Операция Extend. Выполняется выход..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "Не найден номер устройства для тома %(volumeName)s." - -msgid "Cannot find migration task." -msgstr "Не найдена задача переноса." - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "Не найдена служба репликации в системе %s." - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "" -"Не найден исходный экземпляр группы согласования. consistencygroup_id: %s." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "Не удалось получить mcs_id по ИД канала: %(channel_id)s." - -msgid "Cannot get necessary pool or storage system information." -msgstr "" -"Не удалось получить необходимую информацию о пуле или системе хранения." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Получить или создать группу носителей %(sgGroupName)s для тома " -"%(volumeName)s невозможно" - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "Cannot get or create initiator group: %(igGroupName)s. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Получить группу портов %(pgGroupName)s невозможно. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Не удалось получить группу носителей %(sgGroupName)s от маскирующего " -"представления %(maskingViewInstanceName)s. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Не удалось получить диапазон поддерживаемых размеров для %(sps)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"Не удалось получить группу носителей по умолчанию для стратегии FAST " -"%(fastPolicyName)s." - -msgid "Cannot get the portgroup from the masking view." -msgstr "Не удается получить группу портов из маскирующего представления." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "" -"Не удалось смонтировать Scality SOFS. Проверьте системный протокол на " -"наличие ошибок" - -msgid "Cannot ping DRBDmanage backend" -msgstr "Не удалось проверить связь с базовой программой DRBDmanage" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "Не удалось поместить том %(id)s на %(host)s" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Нельзя одновременно указывать 'cgsnapshot_id' и 'source_cgid' для создания " -"группы согласования %(name)s из источника." - -msgid "Cannot register resource" -msgstr "Не удалось зарегистрировать ресурс" - -msgid "Cannot register resources" -msgstr "Не удалось зарегистрировать ресурсы" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"Не удалось удалить том %(volume_id)s из группы согласования %(group_id)s: он " -"не находится в группе." - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"Не удалось удалить том %(volume_id)s из группы согласования %(group_id)s: " -"том находится в недопустимом состоянии %(status)s. Допустимые состояния: " -"%(valid)s." - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "Изменить тип с HPE3PARDriver на %s невозможно." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Изменить тип с одного массива 3PAR на другой невозможно." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Невозможно изменить тип на CPG из другого домена." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "Невозможно изменить тип на snapCPG из другого домена." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"Не удалось выполнить команду vgc-cluster. Убедитесь, что программное " -"обеспечение установлено и права доступа настроены правильно." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "" -"hitachi_serial_number и hitachi_unit_name нельзя указывать одновременно." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "Нельзя одновременно указывать имя домена защиты и ИД домена защиты." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "Нельзя одновременно указывать имя пула памяти и ИД пула памяти." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"Не удалось обновить группу согласования %(group_id)s: недопустимое значение " -"параметра name, description, add_volumes или remove_volumes." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "Невозможно обновить спецификацию шифрования. Тип тома используется." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "Не удалось обновить volume_type %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Не удается проверить существование объекта %(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "Моментальная копия группы согласования %(cgsnapshot_id)s не найдена." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Пустой cgsnahost. Группа согласования создана не будет." - -msgid "Change hostlun id error." -msgstr "Ошибка изменения ИД lun хоста." - -msgid "Change lun priority error." -msgstr "Ошибка изменения приоритета LUN." - -msgid "Change lun smarttier policy error." -msgstr "Ошибка изменения стратегии smarttier LUN." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"Изменение будет использовать менее 0 для следующих ресурсов: %(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "" -"Проверьте права доступа для общего раздела ZFS, присвоенного этому драйверу." - -msgid "Check hostgroup associate error." -msgstr "Ошибка проверки связывания группы хостов." - -msgid "Check initiator added to array error." -msgstr "Ошибка проверки инициатора, добавленного в массив." - -msgid "Check initiator associated to host error." -msgstr "Ошибка проверки инициатора, связанного с хостом." - -msgid "Check lungroup associate error." -msgstr "Ошибка проверки связывания группы LUN." - -msgid "Check portgroup associate error." -msgstr "Ошибка проверки связывания группы портов." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Проверьте состояние службы http. Убедитесь также, что номер порта https " -"number совпадает с указанным в cinder.conf." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "Размер фрагмента не кратен размеру блока для создания хэша." - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Ошибка CLI зонирования Cisco Fibre Channel: %(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "Функция создания копии не лицензирована в %(storageSystem)s." - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"Недопустимый тип дубликата '%(clone_type)s'. Допустимые типы: " -"'%(full_clone)s' и '%(linked_clone)s'." - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "" -"Кластер не отформатирован. Необходимо выполнить команду \"dog cluster format" -"\"." - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Ошибка драйвера Coho Data Cinder: %(message)s" - -msgid "Coho rpc port is not configured" -msgstr "Не настроен порт RPC Coho" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "Команда %(cmd)s заблокирована в CLI и была отменена" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: тайм-аут %s." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Программа включения сжатия не установлена. Создать сжатый том невозможно." - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "Вычислительный кластер: не удалось найти %(cluster)s." - -msgid "Condition has no field." -msgstr "В условии нет поля." - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "" -"Недопустимое значение 'max_over_subscription_ratio'. Должно быть > 0: %s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "Ошибка конфигурации: dell_sc_ssn не задан." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Файл конфигурации %(configurationFile)s не существует." - -msgid "Configuration is not found." -msgstr "Конфигурация не найдена." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Значение конфигурации %s не задано." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"Конфликт спецификаций QoS в типе тома %s: когда спецификация QoS связана с " -"типом тома, устаревшая спецификация \"netapp:qos_policy_group\" запрещена в " -"дополнительных спецификациях типа тома." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Не удалось подключиться к glance: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Ошибка соединения с swift: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Не передан коннектор: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Коннектор не содержит требуемую информацию: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "" -"Группа согласования пустая. Моментальные копии группы согласования " -"создаваться не будут." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "Группа согласования %(consistencygroup_id)s не найдена." - -msgid "Container" -msgstr "контейнер" - -msgid "Container size smaller than required file size." -msgstr "Размер контейнера меньше размера требуемого файла." - -msgid "Content type not supported." -msgstr "Тип содержимого не поддерживается." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "Служба конфигурации контроллеров не найдена в %(storageSystemName)s." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "Не удалось определить IP-адрес контроллера '%(host)s': %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "Выполнено преобразование в %(f1)s, но в данный момент формат - %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "" -"Выполнено преобразование в %(vol_format)s, но в данный момент формат - " -"%(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Преобразование в необработанный, но текущий формат %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Выполнено преобразование в raw, но в данный момент формат - %s." - -msgid "Coordinator uninitialized." -msgstr "Координатор деинициализирован." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Сбой задачи копирования тома: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"Ошибка задачи копирования тома: create_cloned_volume id=%(id)s, состояние=" -"%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Копирование метаданных из %(src_type)s %(src_id)s в %(vol_id)s." - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"Не удается определить, какую конечную точку Keystone следует использовать. " -"Задать ее можно в каталоге служб или посредством опции конфигурации cinder." -"conf, 'backup_swift__auth_url'." - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"Не удается определить, какую конечную точку Swift следует использовать. " -"Задать ее можно в каталоге служб или посредством опции конфигурации cinder." -"conf, 'backup_swift_url'." - -msgid "Could not find DISCO wsdl file." -msgstr "Не найден файл WSDL DISCO." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "Не найден ИД кластера GPFS: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "Не найдено устройство файловой системы GPFS: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "Невозможно найти конфигурацию по адресу %(path)s" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "Не удалось найти экспорт iSCSI для тома %s" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "Не найден целевой объект iSCSI для тома %(volume_id)s." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "Не найден ключ в выводе команды %(cmd)s: %(out)s." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "Не удалось найти параметр %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "Не удалось найти целевой объект %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "Не найден родительский том для моментальной копии '%s' в массиве." - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "Не найдена уникальная моментальная копия %(snap)s для тома %(vol)s." - -msgid "Could not get system name." -msgstr "Не удалось получить имя системы." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "Невозможно загрузить приложение '%(name)s' из %(path)s" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "" -"Не удалось прочитать информацию для моментальной копии %(name)s. Код: " -"%(code)s. Причина: %(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "Не удалось восстановить файл конфигурации %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "Не удалось сохранить конфигурацию в %(file_path)s: %(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "" -"Не удалось запустить создание моментальной копии группы согласования %s." - -#, python-format -msgid "Counter %s not found" -msgstr "Счетчик %s не найден" - -msgid "Create QoS policy error." -msgstr "Ошибка создания стратегии QoS." - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Создание резервной копии прервано: ожидалось состояние резервной копии " -"%(expected_status)s, получено %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Создание резервной копии прервано: ожидалось состояние тома " -"%(expected_status)s, получено %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Не удалось создать экспорт для тома." - -msgid "Create hostgroup error." -msgstr "Ошибка создания группы хостов." - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "Ошибка создания hypermetro. %s." - -msgid "Create lun error." -msgstr "Ошибка создания LUN." - -msgid "Create lun migration error." -msgstr "Ошибка создания переноса LUN." - -msgid "Create luncopy error." -msgstr "Ошибка создания копии LUN." - -msgid "Create lungroup error." -msgstr "Ошибка создания группы LUN." - -msgid "Create manager volume flow failed." -msgstr "Не удалось создать поток тома администратора." - -msgid "Create port group error." -msgstr "Ошибка создания группы портов." - -msgid "Create replication error." -msgstr "Ошибка создания репликации." - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "Ошибка создания пары репликации: %s." - -msgid "Create snapshot error." -msgstr "Ошибка создания моментальной копии." - -#, python-format -msgid "Create volume error. Because %s." -msgstr "Ошибка создания тома. Причина: %s." - -msgid "Create volume failed." -msgstr "Сбой создания тома." - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "Создание группы согласования из источника пока не поддерживается." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Не удалось создать и активировать набор зон: (набор зон=%(cfg_name)s ошибка=" -"%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Не удалось создать и активировать набор зон: (набор зон=%(zoneset)s ошибка=" -"%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "" -"Создаются сведения об использовании с %(begin_period)s по %(end_period)s" - -msgid "Current host isn't part of HGST domain." -msgstr "Текущий хост не является частью домена HGST." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"Текущий хост недопустим для тома %(id)s с типом %(type)s. Перенос запрещен" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Связанный с томом %(vol)s хост находится в неподдерживаемой группе хостов " -"%(group)s." - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"Ошибка драйвера DRBDmanage: в ответе отсутствует ожидаемый ключ \"%s\", " -"неверная версия DRBDmanage?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"Ошибка настройки драйвера DRBDmanage: некоторые необходимые библиотеки " -"(dbus, drbdmanage.*) не найдены." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "Программа DRBDmanage ожидала один ресурс (\"%(res)s\"), получила %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"Тайм-аут ожидания DRBDmanage для нового тома после восстановления " -"моментальной копии; ресурс \"%(res)s\", том \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"Тайм-аут ожидания DRBDmanage при создании моментальной копии; ресурс " -"\"%(res)s\", моментальная копия \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "" -"Тайм-аут ожидания DRBDmanage при создании тома; ресурс \"%(res)s\", том " -"\"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"Тайм-аут ожидания DRBDmanage для размера тома; ИД тома \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "Не удалось определить версию API ONTAP данных." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "" -"ONTAP данных, работающий в режиме 7, не поддерживает группы стратегий QoS." - -msgid "Database schema downgrade is not allowed." -msgstr "Понижение версии схемы базы данных не разрешено." - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "Набор данных %s не является общим в устройстве Nexenta Store" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "Группа набора данных %s не найдена в Nexenta SA" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Дедупликация - допустимый тип предоставления ресурсов, но требуется, чтобы " -"версия WSAPI %(dedup_version)s %(version)s была установлена." - -msgid "Dedup luns cannot be extended" -msgstr "LUN с дедупликацией нельзя расширять" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Квота по умолчанию для ресурса %(res)s, заданная флагом квоты по умолчанию " -"quota_%(res)s, устарела. Используйте класс квоты по умолчанию для квоты по " -"умолчанию." - -msgid "Default volume type can not be found." -msgstr "Не удается найти тип тома по умолчанию." - -msgid "Delete LUNcopy error." -msgstr "Ошибка удаления LUNcopy." - -msgid "Delete QoS policy error." -msgstr "Ошибка удаления стратегии QoS." - -msgid "Delete associated lun from lungroup error." -msgstr "Ошибка удаления связанного LUN из группы LUN." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Удаление резервной копии прервано: настроенная в данный момент служба " -"резервного копирования [%(configured_service)s] не является службой " -"резервного копирования, которая использовалась для создания этой резервной " -"копии [%(backup_service)s]." - -msgid "Delete consistency group failed." -msgstr "Не удалось удалить группу согласования." - -msgid "Delete hostgroup error." -msgstr "Ошибка удаления группы хостов." - -msgid "Delete hostgroup from mapping view error." -msgstr "Ошибка удаления группы хостов из представления связей." - -msgid "Delete lun error." -msgstr "Ошибка удаления LUN." - -msgid "Delete lun migration error." -msgstr "Ошибка удаления переноса LUN." - -msgid "Delete lungroup error." -msgstr "Ошибка удаления группы LUN." - -msgid "Delete lungroup from mapping view error." -msgstr "Ошибка удаления группы LUN из представления связей." - -msgid "Delete mapping view error." -msgstr "Ошибка удаления представления связей." - -msgid "Delete port group error." -msgstr "Ошибка удаления группы портов." - -msgid "Delete portgroup from mapping view error." -msgstr "Ошибка удаления группы портов из представления связей." - -msgid "Delete snapshot error." -msgstr "Ошибка удаления моментальной копии." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "Удаление моментальной копии тома не поддерживается в состоянии %s." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Удаление резервной копии прервано: ожидалось состояние резервной копии " -"%(expected_status)s, получено %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Удаление тома из базы данных и пропуск rpc." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Не удалось удалить зоны: (команда=%(cmd)s ошибка=%(err)s)." - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "" -"Для поддержки групп согласования требуется Dell API версии 2.1 или выше" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "" -"Ошибка конфигурации драйвера Dell Cinder. Репликация не поддерживается для " -"прямого подключения." - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "" -"Ошибка конфигурации драйвера Dell Cinder. replication_device %s не найден" - -msgid "Describe-resource is admin only functionality" -msgstr "Функция Describe-resource доступна только администраторам" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "migration_status целевого хоста - %(stat)s, ожидалось %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Целевой том не в процессе переноса." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Не удалось отключить том: подключений несколько, но не указан attachment_id." - -msgid "Detach volume from instance and then try again." -msgstr "Отключите том от экземпляра и повторите операцию." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "Обнаружено больше одного тома с именем %(vol_name)s" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "Не найден ожидаемый столбец в %(fun)s: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "Не найден ожидаемый ключ %(key)s в %(fun)s: %(raw)s." - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "Причина выключения содержит неверные символы или слишком длинна" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "Не найден домен с именем %s." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Обнаружено понижение уровня кластера GPFS. Функция дублирования GPFS не " -"включена на уровне демона кластера %(cur)s: должен быть уровень не ниже " -"%(min)s." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "Драйверу не удалось инициализировать соединение (ошибка: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Драйвер должен реализовать initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"Драйвер успешно декодировал импортированные данные резервной копии, но в них " -"нет полей (%s)." - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"Версия API прокси-сервера E-Series %(current_version)s не поддерживает " -"полный набор дополнительных спецификаций SSC. Версия прокси-сервера должна " -"быть не ниже %(min_version)s." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"Исключительная ситуация клиента драйвера EMC VNX Cinder: %(cmd)s (код " -"возврата: %(rc)s) (вывод: %(out)s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"Драйвер EMC VNX Cinder, SPUnavailableException: %(cmd)s (код возврата: " -"%(rc)s) (вывод: %(out)s)." - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword должны иметь " -"допустимые значения." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Необходимо указать 'cgsnapshot_id' или 'source_cgid' для создания группы " -"согласования %(name)s из источника." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO %(slo)s или рабочая схема %(workload)s недопустимы. См. предыдущее " -"сообщение об ошибке. Там указаны допустимые значения." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Необходимо указать либо hitachi_serial_number, либо hitachi_unit_name." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "Служба составления элементов не найдена в %(storageSystemName)s." - -msgid "Enables QoS." -msgstr "Включает QoS." - -msgid "Enables compression." -msgstr "Включает сжатие." - -msgid "Enables replication." -msgstr "Включает репликацию." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "Убедитесь, что configfs смонтирована в /sys/kernel/config." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка добавления инициатора: %(initiator)s в groupInitiatorGroup: " -"%(initiatorgroup)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при добавлении в целевую группу %(targetgroup)s с IQN: %(iqn)s, код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "Ошибка подключения тома %(vol)s." - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при дублировании моментальной копии %(snapshot)s, том: %(lun)s, пул: " -"%(pool)s, проект: %(project)s, дубликат проекта: %(clone_proj)s, код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Ошибка создания копии тома %(cloneName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при создании копии тома. Копия: %(cloneName)s Исходный том: " -"%(sourceName)s. Код возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при создании группы: %(groupName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Ошибка при создании маскирующего представления: %(groupName)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при создании тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при создании тома: %(volumename)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка функции CreateGroupReplica. Исходная группа: %(source)s. Целевая " -"группа: %(target)s. Код возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка при создании инициатора %(initiator)s для псевдонима %(alias)s. Код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка при создании проекта %(project)s в пуле %(pool)s, код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка создания свойства %(property)s, тип: %(type)s, описание: " -"%(description)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при создании общего ресурса: %(name)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при создании моментальной копии: %(snapshot)s, том: %(lun)s, пул: " -"%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка создания моментальной копии %(snapshot)s для общего ресурса %(share)s " -"в пуле: %(pool)s, проект: %(project)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Ошибка при создании цели: %(alias)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка при создании целевой группы %(targetgroup)s с IQN: %(iqn)s, код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Ошибка при создании тома %(lun)s, размер %(size)s, код возврата " -"%(ret.status)d Сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при создании нового составного тома. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка действия создания репликации в пуле %(pool)s, проект: %(proj)s, том: " -"%(vol)s, для целевого объекта %(tgt)s и пула %(tgt_pool)s. Код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Ошибка при создании несвязанного тома в операции Extend." - -msgid "Error Creating unbound volume." -msgstr "Ошибка создания несвязанного тома." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка удаления тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"Ошибка удаления группы %(storageGroupName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"Ошибка удаления группы инициатора: %(initiatorGroupName)s. Код возврата: " -"%(rc)lu. Ошибка: %(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при удалении моментальной копии %(snapshot)s, общий ресурс: " -"%(share)s, пул: %(pool)s, проект: %(project)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при удалении моментальной копии %(snapshot)s, том: %(lun)s, пул: " -"%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"Ошибка удаления тома %(lun)s из пула %(pool)s, проект: %(project)s. Код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка удаления проекта %(project)s в пуле %(pool)s. Код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Ошибка действия удаления репликации %(id)s. Код возврата: %(ret.status)d " -"Сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка расширения тома: %(volumeName)s. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка при получении инициаторов: InitiatorGroup: %(initiatorgroup)s, Код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"Ошибка при получении статистики пула: пул: %(pool)s код возврата: %(status)d " -"сообщение: %(data)s." - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка получения статистики проекта: Пул: %(pool)s Проект: %(project)s Код " -"возврата: %(ret.status)d Сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка получения общего ресурса: %(share)s, пул: %(pool)s, проект: " -"%(project)s код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при получении моментальной копии %(snapshot)s, том: %(lun)s, пул: " -"%(pool)s, проект: %(project)s, код возврата: %(ret.status)d, сообщение: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Ошибка при получении цели: %(alias)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при получении тома %(lun)s в пуле %(pool)s, проект: %(project)s, код " -"возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Ошибка при переносе тома из одного пула в другой. Код возврата: %(rc)lu. " -"Ошибка: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Ошибка при модификации маскирующего представления: %(groupName)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "Ошибка принадлежности пула: пул %(pool)s не принадлежит %(host)s." - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при задании свойств %(props)s, том: %(lun)s, пул: %(pool)s, проект: " -"%(project)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка при завершении сеанса переноса. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при проверке инициатора: %(iqn)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при проверке пула: %(pool)s, код возврата: %(ret.status)d, сообщение: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при проверке проекта %(project)s в пуле %(pool)s, код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при проверке службы: %(service)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при проверке цели: %(alias)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Ошибка при проверке общего ресурса %(share)s в проекте %(project)s и пуле " -"%(pool)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"Ошибка при добавлении тома %(volumeName)s с путем к экземпляру " -"%(volumeInstancePath)s." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Ошибка при добавлении инициатора в группу: %(groupName)s. Код возврата: " -"%(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "Ошибка при добавлении тома в составной том. Ошибка: %(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "Ошибка при добавлении тома %(volumename)s в целевой базовый том." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Ошибка при связывании группы носителей %(storageGroupName)s со стратегией " -"FAST %(fastPolicyName)s, описание ошибки: %(errordesc)s." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка разрыва взаимосвязи копии. Имя синхронизации: %(syncName)s Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Ошибка подключения к кластеру ceph." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Ошибка при соединении посредством ssh: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Ошибка создания тома: %s." - -msgid "Error deleting replay profile." -msgstr "Ошибка удаления профайла повтора." - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "Ошибка удаления тома %(ssn)s: %(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "Ошибка удаления тома %(vol)s: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Ошибка во время анализа вычислителя: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Ошибка при изменении общего ресурса: %(share)s, пул: %(pool)s, код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"Ошибка включения iSER для NetworkPortal: убедитесь, что RDMA поддерживается " -"на порте iSCSI %(port)d по IP-адресу %(ip)s." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "Ошибка очистки после сбоя подключения: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "Ошибка выполнения API CloudByte [%(cmd)s], ошибка: %(err)s." - -msgid "Error executing EQL command" -msgstr "Ошибка выполнения команды EQL" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Ошибка выполнения команды через ssh: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "Ошибка расширения тома %(vol)s: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Ошибка расширения тома: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "Ошибка при поиске %(name)s." - -#, python-format -msgid "Error finding %s." -msgstr "Ошибка при поиске %s." - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка получения ReplicationSettingData. Код возврата: %(rc)lu. Ошибка: " -"%(error)s." - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Ошибка получения сведений о версии устройства. Код возврата: %(ret.status)d " -"Сообщение: %(ret.data)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "Ошибка получения ИД домена из имени %(name)s: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "Ошибка получения ИД домена из имени %(name)s: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Ошибка при получении групп инициаторов." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "Ошибка получения ИД пула из имени %(pool)s: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Ошибка получения ИД пула из имени %(pool_name)s: %(err_msg)s." - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Ошибка действия получения репликации %(id)s. Код возврата: %(ret.status)d " -"Сообщение: %(ret.data)s." - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Ошибка получения сведений об источнике репликации. Код возврата: %(ret." -"status)d Сообщение: %(ret.data)s." - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Ошибка получения сведений о цели репликации. Код возврата: %(ret.status)d " -"Сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка при получении версии svc %(svc)s. Код возврата: %(ret.status)d " -"Сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"Ошибка операции [%(operation)s] для тома [%(cb_volume)s] в хранилище " -"CloudByte: [%(cb_error)s], код ошибки: [%(error_code)s]." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "Ошибка в ответе API SolidFire: data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "" -"Ошибка в операции создания пространства для %(space)s размера %(size)d ГБ" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"Ошибка в операции расширения пространства для тома %(space)s на %(size)d ГБ" - -#, python-format -msgid "Error managing volume: %s." -msgstr "Ошибка управления томом: %s." - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Ошибка изменения синхронизации копии %(sv)s, операция: %(operation)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Ошибка изменения службы: %(service)s, код возврата: %(ret.status)d, " -"сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка перемещения тома %(vol)s из исходного проекта %(src)s в целевой " -"проект %(tgt)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -msgid "Error not a KeyError." -msgstr "Тип ошибки отличается от KeyError." - -msgid "Error not a TypeError." -msgstr "Тип ошибки отличается от TypeError." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "Ошибка создания моментальной копии группы согласования %s." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "Ошибка удаления моментальной копии группы согласования %s." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "Ошибка изменения группы согласования %s." - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "Ошибка переименования тома %(vol)s: %(err)s." - -#, python-format -msgid "Error response: %s" -msgstr "Сообщение об ошибке: %s" - -msgid "Error retrieving volume size" -msgstr "Ошибка получения размера тома" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка передачи обновления репликации для ИД действия %(id)s. Код возврата: " -"%(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "" -"Ошибка отправки обновления репликации. Возвращенная ошибка: %(err)s. " -"Действие: %(id)s." - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка установки значения наследования репликации %(set)s для тома %(vol)s. " -"Проект %(project)s, код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Ошибка преобразования пакета %(package)s из источника %(src)s в локальный " -"пакет. Код возврата: %(ret.status)d, сообщение: %(ret.data)s." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "Ошибка удаления привязки тома %(vol)s к пулу: %(error)s." - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "Ошибка при идентификации на коммутаторе: %s." - -#, python-format -msgid "Error while changing VF context %s." -msgstr "Ошибка при изменении контекста VF %s." - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "Ошибка при проверке версии встроенного ПО %s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Ошибка проверки состояния транзакции: %s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "Ошибка при проверке доступности VF для управления %s." - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"Ошибка при подключении коммутатора %(switch_id)s по протоколу %(protocol)s. " -"Ошибка: %(error)s." - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "Ошибка при создании маркера идентификации: %s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "" -"Ошибка создания моментальной копии [состояние] %(stat)s - [результат] " -"%(res)s." - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "Ошибка создания тома [состояние] %(stat)s - [результат] %(res)s." - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "" -"Ошибка удаления моментальной копии [состояние] %(stat)s - [результат] " -"%(res)s." - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "Ошибка удаления тома [состояние] %(stat)s - [результат] %(res)s." - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "Ошибка расширения тома [состояние] %(stat)s - [результат] %(res)s." - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "Ошибка получения сведений о %(op)s , код возврата: %(status)s." - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "Ошибка получения данных через ssh: (команда=%(cmd)s ошибка=%(err)s)." - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "Ошибка при получении информации disco [%s]." - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "Ошибка при получении значения nvp: %s." - -#, python-format -msgid "Error while getting session information %s." -msgstr "Ошибка при получении информации о сеансе %s." - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "Ошибка при анализе данных: %s" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "Ошибка запроса страницы %(url)s на коммутаторе, причина %(error)s." - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "" -"Ошибка при удалении зон и конфигураций из строки зоны: %(description)s." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "Ошибка во время запроса API %(service)s." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "" -"Ошибка выполнения распределения по зонам через интерфейс командной строки: " -"(команда=%(cmd)s ошибка=%(err)s)." - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "" -"Ошибка при обновлении новых зон и конфигураций в строке зоны. Ошибка: " -"%(description)s." - -msgid "Error writing field to database" -msgstr "Ошибка записи поля в базу данных" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "Ошибка [%(stat)s - %(res)s] при получении ИД тома." - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"Ошибка [%(stat)s - %(res)s] при восстановлении моментальной копии " -"[%(snap_id)s] в том [%(vol)s]." - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "" -"Ошибка [состояние] %(stat)s - [результат] %(res)s] при получении ИД тома." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"Превышено максимальное число попыток планирования %(max_attempts)d для тома " -"%(volume_id)s" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "Превышено максимально допустимое число моментальных копий тома" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Возникла исключительная ситуация при добавлении тома метаданных к целевому " -"тому %(volumename)s." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Возникла исключительная ситуация при создании реплики элемента. Имя " -"дубликата %(cloneName)s Исходное имя: %(sourceName)s Дополнительные " -"спецификации: %(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "Исключительная ситуация в _select_ds_for_volume: %s." - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "Исключительная ситуация при формировании строки зоны: %s." - -#, python-format -msgid "Exception: %s" -msgstr "Исключительная ситуация: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Ожидался uuid, а получен %(uuid)s." - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "Ожидался ровно один узел с именем \"%s\"" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"Ожидалось целочисленное значение для node_count, команда svcinfo lsiogrp " -"вернула %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "" -"Вывод из команды %(cmd)s интерфейса командной строки не ожидался. Получен " -"%(out)s." - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"Во время фильтрации по vdisk_UID от команды lsvdisk ожидался один vdisk. " -"Возвращено %(count)s." - -#, python-format -msgid "Expected volume size was %d" -msgstr "Ожидаемый размер тома: %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Экспорт резервной копии прерван: ожидалось состояние резервной копии " -"%(expected_status)s, получено %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Экспорт записи прерван: настроенная в данный момент служба резервного " -"копирования [%(configured_service)s] не является службой резервного " -"копирования, которая использовалась для создания этой резервной копии " -"[%(backup_service)s]." - -msgid "Extend volume error." -msgstr "Ошибка расширения тома." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Расширение тома для этого драйвера поддерживается, только когда нет " -"моментальных копий." - -msgid "Extend volume not implemented" -msgstr "Том расширения не реализован" - -msgid "FAST is not supported on this array." -msgstr "FAST не поддерживается в этом массиве." - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "Протокол - FC, но не получены WWPN от OpenStack." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "Отменить назначение %(volume)s не удалось" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "Не удалось создать том кэша %(volume)s. Ошибка: %(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Не удалось добавить соединение для коммутируемой сети=%(fabric)s. Ошибка:" -"%(err)s" - -msgid "Failed cgsnapshot" -msgstr "Сбойная моментальная копия группы согласования" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "Не удалось создать моментальную копию для группы %(response)s." - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"Не удалось создать моментальную копию для тома %(volname)s: %(response)s." - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "Не удалось получить набор активных зон из фабрики %s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "Не удалось получить сведения для пула %s." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "" -"Не удалось удалить соединение для коммутируемой сети=%(fabric)s. Ошибка:" -"%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Не удалось расширить том %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "Не удалось войти в 3PAR (%(url)s), причина: %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Нет доступа к активной конфигурации распределения по зонам." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Не удалось получить доступ к состоянию набора областей: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Не удалось получить блокировку ресурса. (порядковый номер: %(serial)s, " -"экземпляр: %(inst)s, код возврата: %(ret)s, stderr: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Не удалось добавить логическое устройство." - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"Не удалось добавить том %(volumeName)s в группу согласования %(cgName)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -msgid "Failed to add zoning configuration." -msgstr "Не удалось добавить конфигурацию распределения по зонам." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"Не удалось присвоить IQN инициатора iSCSI. (порт: %(port)s, причина: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Не удалось связать qos_specs %(specs_id)s с типом %(type_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "Не удалось подключить целевой объект iSCSI для тома %(volume_id)s." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Не удалось создать резервную копию метаданных тома - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Не удалось создать резервную копию метаданных тома: объект резервной копии " -"метаданных backup.%s.meta уже существует" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "Не удалось создать дубликат тома из моментальной копии %s." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "Не удалось подключиться к %(vendor_name)s Массив %(host)s: %(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "Не удается подключиться к API REST Dell" - -msgid "Failed to connect to array" -msgstr "Не удалось подключиться к массиву" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "" -"Не удалось подключиться к демону sheep. Адрес: %(addr)s, порт: %(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "Не удалось скопировать образ на том: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Не удалось скопировать метаданные на том: %(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "Не удалось скопировать том: недоступно целевое устройство." - -msgid "Failed to copy volume, source device unavailable." -msgstr "Не удалось скопировать том: недоступно исходное устройство." - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "" -"Не удалось создать группу согласования %(cgName)s из моментальной копии " -"%(cgSnapshot)s." - -#, python-format -msgid "Failed to create IG, %s" -msgstr "Не удалось создать группу инициаторов, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Не удалось создать группу тома %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Не удалось создать файл. (файл: %(file)s, код возврата: %(ret)s, stderr: " -"%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "Не удалось создать временную моментальную копию для тома %s." - -msgid "Failed to create api volume flow." -msgstr "Не удалось создать поток тома api." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "Не удалось создать моментальную копию cg %(id)s, причина: %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "Не удалось создать группу согласования %(id)s, причина: %(reason)s." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Не удалось создать группу согласования %(id)s:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"Не удалось создать группу согласования %s, поскольку группа согласования VNX " -"не принимает сжатые LUN в качестве своих элементов." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Не удалось создать группу согласования: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "Не удалось создать группу согласования %(cgid)s. Ошибка: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Не удалось создать группу согласования %(consistencyGroupName)s Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "Не удалось создать ИД аппаратного обеспечения в %(storageSystemName)s." - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "Не удалось создать хост: %(name)s. Проверьте, есть ли он в массиве." - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "" -"Не удалось создать группу хостов: %(name)s. Проверьте, есть ли она в массиве." - -msgid "Failed to create iqn." -msgstr "Не удалось создать iqn." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "Не удалось создать iscsi target для тома %(volume_id)s." - -msgid "Failed to create manage existing flow." -msgstr "Не удалось создать управление существующим потоком." - -msgid "Failed to create manage_existing flow." -msgstr "Не удалось создать поток manage_existing." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "" -"Не удалось создать карту связей в mcs: нет канала поддерживающего карты " -"связей." - -msgid "Failed to create map." -msgstr "Не удалось создать карту." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Не удалось создать метаданные для тома: %(reason)s" - -msgid "Failed to create partition." -msgstr "Не удалось создать раздел." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "Не удалось создать qos_specs %(name)s со спецификацией %(qos_specs)s." - -msgid "Failed to create replica." -msgstr "Не удалось создать реплику." - -msgid "Failed to create scheduler manager volume flow" -msgstr "Не удалось создать поток тома администратора планировщика" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "Не удалось создать моментальную копию %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "" -"Не удалось создать моментальную копию для группы согласования %(cgName)s." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "Не удалось создать моментальную копию для тома %s." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"Не удалось создать стратегию моментальной копии на томе %(vol)s: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"Не удалось найти область ресурсов моментальной копии на томе %(vol)s: " -"%(res)s." - -msgid "Failed to create snapshot." -msgstr "Не удалось создать моментальную копию." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Не удалось создать моментальную копию. Не найдена информация о томе " -"CloudByte для тома OpenStack [%s]." - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "Не удалось создать группу носителей %(storageGroupName)s." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "Не удалось создать оперативный пул, сообщение об ошибке: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "Не удалось создать том %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Не удалось удалить SI для volume_id %(volume_id)s, поскольку у него есть " -"пара." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Не удалось удалить логическое устройство. (LDEV: %(ldev)s, причина: " -"%(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "" -"Не удалось удалить моментальную копию cgsnapshot %(id)s, причина: %(reason)s." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "Не удалось удалить группу согласования %(id)s, причина: %(reason)s." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Не удалось удалить группу согласования %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Не удалось удалить группу согласования %(consistencyGroupName)s. Код " -"возврата %(rc)lu. Ошибка: %(error)s." - -msgid "Failed to delete device." -msgstr "Не удалось удалить устройство." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Не удалось удалить набор файлов для группы согласования %(cgname)s. Ошибка: " -"%(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Не удалось удалить iqn." - -msgid "Failed to delete map." -msgstr "Не удалось удалить карту связей." - -msgid "Failed to delete partition." -msgstr "Не удалось удалить раздел." - -msgid "Failed to delete replica." -msgstr "Не удалось удалить реплику." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "Не удалось удалить моментальную копию %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "" -"Не удалось удалить моментальную копию для группы согласования %(cgId)s." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "" -"Не удалось удалить моментальную копию для snapshot_id %s, поскольку у нее " -"есть пара." - -msgid "Failed to delete snapshot." -msgstr "Не удалось удалить снимок." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "Не удалось удалить том %(volumeName)s." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"Не удалось удалить том для volume_id %(volume_id)s, поскольку у него есть " -"пара." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "Не удалось отключить целевой объект iSCSI для тома %(volume_id)s." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Не удалось определить конфигурацию API Blockbridge" - -msgid "Failed to disassociate qos specs." -msgstr "Не удалось удалить связь спецификации QoS." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "Не удалось удалить связь qos_specs %(specs_id)s с типом %(type_id)s." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Не удалось обеспечить область ресурсов моментальной копии. Не найден том с " -"ИД %s" - -msgid "Failed to establish connection with Coho cluster" -msgstr "Не удалось установить соединение с кластером Coho" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"Не удалось выполнить API CloudByte [%(cmd)s]. Состояние Http: %(status)s, " -"Ошибка: %(error)s." - -msgid "Failed to execute common command." -msgstr "Не удалось выполнить общую команду." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Не удалось экспортировать для тома: %(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "Не удалось расширить том %(name)s, ошибка: %(msg)s." - -msgid "Failed to find QoSnode" -msgstr "Не найден QoSnode" - -msgid "Failed to find Storage Center" -msgstr "Не удалось найти Storage Center" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "Не найдена копия vdisk в ожидаемом пуле." - -msgid "Failed to find account for volume." -msgstr "Не найдена учетная запись для тома." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "Не найден набор файлов для пути %(path)s, вывод команды: %(cmdout)s." - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "Не найдена моментальная копия группы: %s" - -#, python-format -msgid "Failed to find host %s." -msgstr "Не найден хост %s." - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "Не найдена группа инициаторов iSCSI, содержащая %(initiator)s." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "Не удалось получить сведения об учетной записи CloudByte [%s]." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "Не удалось получить целевые сведения LUN для LUN %s" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "Не удалось получить сведения о целевом объекте LUN для LUN %s." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "Не удалось получить целевой список LUN для LUN %s" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "Не удалось получить ИД раздела для тома %(volume_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"Не удалось получить ИД моментальной копии RAID из моментальной копии " -"%(snapshot_id)s." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"Не удалось получить ИД моментальной копии RAID из моментальной копии " -"%(snapshot_id)s." - -msgid "Failed to get SplitMirror." -msgstr "Не удалось получить SplitMirror." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Не удалось получить ресурс памяти. Система попытается получить ресурс памяти " -"повторно. (ресурс: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "Не удалось получить все связи спецификаций QoS %s" - -msgid "Failed to get channel info." -msgstr "Не удалось получить информацию о канале." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Не удалось получить уровень кода (%s)." - -msgid "Failed to get device info." -msgstr "Не удалось получить информацию об устройстве." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "Не удалось получить домен: в массиве нет CPG (%s)." - -msgid "Failed to get image snapshots." -msgstr "Не удалось получить моментальные копии образа." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"Не удалось получить IP-адрес в канале %(channel_id)s с томом %(volume_id)s." - -msgid "Failed to get iqn info." -msgstr "Не удалось получить информацию о iqn." - -msgid "Failed to get license info." -msgstr "Не удалось получить информацию о лицензии." - -msgid "Failed to get lv info." -msgstr "Не удалось получить информацию о логическом томе." - -msgid "Failed to get map info." -msgstr "Не удалось получить информацию о карте связей." - -msgid "Failed to get migration task." -msgstr "Не удалось получить задачу переноса." - -msgid "Failed to get model update from clone" -msgstr "Не удалось получить обновление модели из копии" - -msgid "Failed to get name server info." -msgstr "Не удалось получить информацию о серверах имен." - -msgid "Failed to get network info." -msgstr "Не удалось получить информацию о сети." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Не удалось получить ИД нового раздела в новом пуле %(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Не удалось получить информацию о разделе." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "Не удалось получить ИД пула с томом %(volume_id)s." - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "" -"Не удаётся получить информацию удаленного копирования для %(volume)s, " -"причина: %(err)s." - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "" -"Не удаётся получить информацию удаленного копирования для %(volume)s. " -"Исключительная ситуация: %(err)s." - -msgid "Failed to get replica info." -msgstr "Не удалось получить информацию о реплике." - -msgid "Failed to get show fcns database info." -msgstr "Не удалось получить информацию из базы данных команды show fcns." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "Не удалось получить размер тома %s" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "Не удалось получить моментальную копию для тома %s." - -msgid "Failed to get snapshot info." -msgstr "Не удалось получить информацию о моментальной копии." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "Не удалось получить целевой IQN для LUN %s" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "Не удалось получить целевой LUN SplitMirror." - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "Не удалось получить целевой портал для LUN %s" - -msgid "Failed to get targets" -msgstr "Не удалось получить целевые объекты" - -msgid "Failed to get wwn info." -msgstr "Не удалось получить информацию о WWN." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"Не удалось получить, создать или добавить том %(volumeName)s для " -"маскирующего представления %(maskingViewName)s. Сообщение об ошибке: " -"%(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Не удалось идентифицировать базовую программу тома." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Не удалось связать набор файлов для общей группы согласования %(cgname)s. " -"Ошибка: %(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "Не удалось войти в массив %s (неправильное имя пользователя?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "Не удалось войти для пользователя %s." - -msgid "Failed to login with all rest URLs." -msgstr "Не удалось войти через все URL REST." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Не удалось создать запрос к конечной точке кластера Datera по следующей " -"причине: %s" - -msgid "Failed to manage api volume flow." -msgstr "Сбой управления потоком тома api." - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Сбой управления существующим %(type)s %(name)s: размер %(size)s не число с " -"плавающей точкой." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Управление существующим томом %(name)s невозможно из-за ошибки при получении " -"размера тома." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Управление существующим томом %(name)s невозможно, поскольку операцию " -"переименования выполнить не удалось. Сообщение об ошибке: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Сбой управления существующего тома %(name)s: размер %(size)s не число с " -"плавающей точкой." - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"Не удаётся сделать управляемым существующий том из-за несовпадения группы " -"ввода-вывода. Группа ввода-вывода тома, передаваемого в управление, - это " -"%(vdisk_iogrp)s. Группа ввода-вывода выбранного типа - %(opt_iogrp)s." - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"Не удаётся сделать управляемым существующий том из-за несовпадения пулов " -"тома и базовой системы. Пул тома, передаваемого в управление, - это " -"%(vdisk_pool)s. Пул базовой системы - %(backend_pool)s." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"Не удаётся сделать управляемым существующий том, который является сжатым, но " -"тип которого указан как несжатый." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"Не удаётся сделать управляемым существующий том, который является несжатым, " -"но тип которого указан как сжатый." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "" -"Не удаётся сделать управляемым существующий том, который не включен в " -"допустимую группу ввода-вывода." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"Не удаётся сделать управляемым существующий том, который является " -"расширенным, но тип которого указан как простой." - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"Не удаётся сделать управляемым существующий том, который является простым, " -"но тип которого указан как расширенный." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "Сбой управления томом %s." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Не удалось связать логическое устройство. (LDEV: %(ldev)s, LUN: %(lun)s, " -"порт: %(port)s, ИД: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Перенести том в первый раз не удалось." - -msgid "Failed to migrate volume for the second time." -msgstr "Перенести том во второй раз не удалось." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "Не удалось переместить связи LUN. Код возврата: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "Не удалось переместить том %s." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Не удалось открыть файл. (файл: %(file)s, код возврата: %(ret)s, stderr: " -"%(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"Ошибка анализа вывода интерфейса командной строки:\n" -" команда: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"Выполнить синтаксический анализ опции конфигурации 'keystone_catalog_info' " -"не удалось, она должна быть в формате <тип_службы>:<имя_службы>:" -"<тип_конечной_точки>" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"Выполнить синтаксический анализ опции конфигурации 'swift_catalog_info' не " -"удалось, она должна быть в формате <тип_службы>:<имя_службы>:" -"<тип_конечной_точки>" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Не удалось затребовать обнуленные страницы. (LDEV: %(ldev)s, причина: " -"%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "Не удалось удалить экспорт для тома %(volume)s: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "Не удалось удалить целевой объект iscsi для тома %(volume_id)s." - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Не удалось удалить том %(volumeName)s из группы согласования %(cgName)s. Код " -"возврата: %(rc)lu. Ошибка: %(error)s." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "" -"Не удалось удалить том %(volumeName)s из группы носителей по умолчанию." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "" -"Не удалось удалить том %(volumeName)s из группы носителей по умолчанию: " -"%(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"Не удалось удалить том %(volumename)s из группы носителей по умолчанию для " -"стратегии FAST %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"Не удалось переименовать логический том %(name)s, сообщение об ошибке: " -"%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Извлечь активную конфигурацию распределения по зонам, %s, не удалось" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "" -"Не удалось настроить идентификацию CHAP для целевого IQN %(iqn)s. " -"Подробности: %(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Не удалось задать QoS для существующего тома %(name)s. Сообщение об ошибке: " -"%(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "" -"Не удалось задать атрибут Входящий пользователь для целевого объекта SCST." - -msgid "Failed to set partition." -msgstr "Не удалось задать раздел." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"Не удалось настроить права доступа для группы согласования %(cgname)s. " -"Ошибка: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Не удалось указать логическое устройство для тома %(volume_id)s, связь " -"которого необходимо удалить." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Не удалось указать удаляемое логическое устройство. (метод: %(method)s, ИД: " -"%(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Завершить сеанс переноса не удалось." - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "Не удалось удалить привязку тома %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"Не удалось удалить связь набора файлов для группы согласования %(cgname)s. " -"Ошибка: %(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Не удалось удалить связь логического устройства. (LDEV: %(ldev)s, причина: " -"%(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "Не удалось изменить группу согласования: %(cgName)s." - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Не удалось обновить метаданные для тома: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Не удалось обновить или удалить конфигурацию распределения по зонам" - -msgid "Failed to update or delete zoning configuration." -msgstr "Не удалось обновить или удалить конфигурацию распределения по зонам." - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "" -"Не удалось изменить qos_specs %(specs_id)s со спецификацией %(qos_specs)s." - -msgid "Failed to update quota usage while retyping volume." -msgstr "Не удалось обновить использование квот при изменении типа тома." - -msgid "Failed to update snapshot." -msgstr "Не удалось обновить моментальную копию." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"Не удалось обновить метаданные тома %(vol_id)s с помощью указанных " -"метаданных %(src_type)s %(src_id)s" - -#, python-format -msgid "Failure creating volume %s." -msgstr "Сбой создания тома %s." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "Сбой получения информации LUN для %s." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Сбой при перемещении новой копии LUN в %s." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "Сбой промежуточного копирования LUN %s в tmp." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "Fexvisor не удалось добавить том %(id)s, причина: %(reason)s." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor не удалось добавить том %(vol)s в группу %(group)s. Причина: " -"%(ret)s." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Fexvisor не удалось удалить том %(vol)s из группы %(group)s. Причина: " -"%(ret)s." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Fexvisor не удалось удалить том %(id)s. Причина: %(reason)s." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Сбой поиска в SAN Fibre Channel: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Сбой операции зоны Fibre Channel: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Сбой управления соединением Fibre Channel: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "Файл %(file_path)s не может быть найден." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"Файл %(path)s содержит недопустимый базовый файл %(bfile)s, принудительное " -"завершение." - -#, python-format -msgid "File already exists at %s." -msgstr "Файл уже существует в %s." - -#, python-format -msgid "File already exists at: %s" -msgstr "Файл уже существует в %s" - -msgid "Find host in hostgroup error." -msgstr "Ошибка поиска хоста в группе хостов." - -msgid "Find host lun id error." -msgstr "Ошибка поиска ИД LUN хоста." - -msgid "Find lun group from mapping view error." -msgstr "Ошибка поиска группы LUN из представления связей." - -msgid "Find mapping view error." -msgstr "Ошибка поиска представления связей." - -msgid "Find portgroup error." -msgstr "Ошибка поиска группы портов." - -msgid "Find portgroup from mapping view error." -msgstr "Ошибка поиска группы портов из представления связей." - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Для стратегии кэша во флэш-памяти требуется, чтобы версия WSAPI " -"%(fcache_version)s %(version)s была установлена." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor не удалось назначить том: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor не удалось назначить том: %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor не удалось найти моментальную копию тома %(id)s в группе %(vgid)s. " -"Моментальная копия: %(vgsid)s." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor не удалось создать том: %(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось удалить том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor не удалось добавить том %(id)s в группу %(cgid)s." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor не удалось назначить том %(id)s из-за невозможности запросить " -"состояние по ИД события." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось назначить том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor не удалось назначить том %(volume)s: %(iqn)s." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось скопировать том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось скопировать том (не удалось получить событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor не удалось создать моментальную копию тома %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось создать моментальную копию тома (не удалось получить " -"событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor не удалось создать том %(id)s в группе %(vgid)s." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor не удалось создать том %(volume)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor не удалось создать том (получить событие) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor не удалось создать том из моментальной копии %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor не удалось создать том из моментальной копии %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось создать том из моментальной копии (не удалось получить " -"событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor не удалось удалить моментальную копию %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось удалить моментальную копию (не удалось получить " -"событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось удалить том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось расширить том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor не удалось расширить том %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось расширить том (не удалось получить событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor не удалось получить информацию о пуле %(id)s: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor не удалось получить ИД моментальной копии тома %(id)s из группы " -"%(vgid)s." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor не удалось удалить том %(id)s из группы %(cgid)s." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor не удалось породить том из моментальной копии %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor не удалось породить том из моментальной копии (не удалось получить " -"событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor не удалось отменить назначение тома %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "" -"Flexvisor не удалось отменить назначение тома (получить событие) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor не удалось отменить назначение тома %(id)s: %(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor не удалось найти информацию об исходном томе %(id)s." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor не удалось отменить назначение тома: %(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Тому Flexvisor %(id)s не удалось присоединить группу %(vgid)s." - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "Папка %s не существует в устройстве Nexenta Store" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS не выполняется, состояние: %s." - -msgid "Gateway VIP is not set" -msgstr "Не задан VIP шлюза" - -msgid "Get FC ports by port group error." -msgstr "Ошибка получения портов FC по группе портов." - -msgid "Get FC ports from array error." -msgstr "Ошибка получения портов Fibre Channel из массива." - -msgid "Get FC target wwpn error." -msgstr "Ошибка получения целевого WWPN Fibre Channel." - -msgid "Get HyperMetroPair error." -msgstr "Ошибка получения HyperMetroPair." - -msgid "Get LUN group by view error." -msgstr "Ошибка получения группы LUN по представлению." - -msgid "Get LUNcopy information error." -msgstr "Ошибка получения информации LUNcopy." - -msgid "Get QoS id by lun id error." -msgstr "Ошибка получения ИД QoS по ИД LUN." - -msgid "Get QoS information error." -msgstr "Ошибка получения информации QoS." - -msgid "Get QoS policy error." -msgstr "Ошибка получения стратегии QoS." - -msgid "Get SplitMirror error." -msgstr "Ошибка получения SplitMirror." - -msgid "Get active client failed." -msgstr "Не удалось получить активного клиента." - -msgid "Get array info error." -msgstr "Ошибка получения информации о массиве." - -msgid "Get cache by name error." -msgstr "Ошибка получения кэша по имени." - -msgid "Get connected free FC wwn error." -msgstr "Ошибка получения подключенного свободного WWN Fibre Channel." - -msgid "Get engines error." -msgstr "Ошибка получения модулей." - -msgid "Get host initiators info failed." -msgstr "Ошибка получения информации об инициаторах хоста." - -msgid "Get hostgroup information error." -msgstr "Ошибка получения информации о группе хостов." - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"Ошибка получения информации о портах iSCSI. Проверьте целевой IP-адрес, " -"настроенный в файле конфигурации huawei." - -msgid "Get iSCSI port information error." -msgstr "Ошибка получения информации о портах iSCSI." - -msgid "Get iSCSI target port error." -msgstr "Ошибка получения порта целевого объекта iSCSI." - -msgid "Get lun id by name error." -msgstr "Ошибка получения ИД LUN по имени." - -msgid "Get lun migration task error." -msgstr "Ошибка получения задачи переноса LUN." - -msgid "Get lungroup id by lun id error." -msgstr "Ошибка получения ИД группы LUN по ИД LUN." - -msgid "Get lungroup information error." -msgstr "Ошибка получения информации о группе LUN." - -msgid "Get migration task error." -msgstr "Ошибка получения задачи переноса." - -msgid "Get pair failed." -msgstr "Не удалось получить пару." - -msgid "Get partition by name error." -msgstr "Ошибка получения раздела по имени." - -msgid "Get partition by partition id error." -msgstr "Ошибка получения раздела по ИД раздела." - -msgid "Get port group by view error." -msgstr "Ошибка получения группы портов по представлению." - -msgid "Get port group error." -msgstr "Ошибка получения группы портов." - -msgid "Get port groups by port error." -msgstr "Ошибка получения группы портов по порту." - -msgid "Get ports by port group error." -msgstr "Ошибка получения портов по группе портов." - -msgid "Get remote device info failed." -msgstr "Ошибка получения удаленного устройства." - -msgid "Get remote devices error." -msgstr "Ошибка получения удаленных устройств." - -msgid "Get smartcache by cache id error." -msgstr "Ошибка получения smartcache по ИД кэша." - -msgid "Get snapshot error." -msgstr "Ошибка получения моментальной копии." - -msgid "Get snapshot id error." -msgstr "Ошибка получения ИД моментальной копии." - -msgid "Get target IP error." -msgstr "Ошибка получения целевого IP-адреса." - -msgid "Get target LUN of SplitMirror error." -msgstr "Ошибка получения целевого LUN или SplitMirror." - -msgid "Get views by port group error." -msgstr "Ошибка получения представлений по группе портов." - -msgid "Get volume by name error." -msgstr "Ошибка получения тома по имени." - -msgid "Get volume error." -msgstr "Ошибка получения тома." - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Не удается обновить метаданные Glance, ключ %(key)s существует для ИД тома " -"%(volume_id)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "Не найдены метаданные glance для тома/моментальной копии %(id)s." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "Файл конфигурации Gluster в %(config)s не существует" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Ошибка API Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Ошибка связи с Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Ошибка oauth2 Google Cloud Storage: %(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "Получена неправильная информация о пути от DRBDmanage! (%s)" - -msgid "HBSD error occurs." -msgstr "Ошибка HBSD." - -msgid "HPELeftHand url not found" -msgstr "URL HPELeftHand не найден" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"Размер блока хэша изменился с момента последнего резервного копирования. " -"Новый размер блока хэша: %(new)s. Прежний размер блока хэша: %(old)s. " -"Необходимо полное резервное копирование." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "Не созданы слои %(tier_levels)s." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "Всплывающая подсказка \"%s\" не поддерживается." - -msgid "Host" -msgstr "Узел" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "Узел %(host)s не найден." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"Хост %(host)s не соответствует содержимому сертификата x509: CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "У хоста %s нет инициаторов FC" - -#, python-format -msgid "Host group with name %s not found" -msgstr "Не найдена группа хостов с именем %s" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "Не найдена группа хостов со ссылкой %s" - -msgid "Host is NOT Frozen." -msgstr "Хост не заморожен." - -msgid "Host is already Frozen." -msgstr "Хост уже заморожен." - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Хост не найден. Не удалось переместить %(service)s на %(host)s." - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "Для аварийного переключения состояние репликации хоста должно быть %s." - -#, python-format -msgid "Host type %s not supported." -msgstr "Тип хоста %s не поддерживается." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "Не найден хост с портами %(ports)s." - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "" -"Нельзя использовать Hypermetro и репликацию с одним и тем же типом тома." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"Группа ввода-вывода %(iogrp)d недопустимая. Доступные группы ввода-вывода: " -"%(avail)s." - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Если параметру сжатия присвоено значение True, необходимо также указать " -"значение rsize (не равное -1)." - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "" -"Если параметру nofmtdisk присвоено значение True, то rsize должен быть равен " -"-1." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"Для flashsystem_connection_protocol указано недопустимое значение " -"'%(prot)s': допустимые значения: %(enabled)s." - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "Указано недопустимое значение для IOTYPE: 0, 1 или 2." - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "" -"Указано недопустимое значение для smarttier. Допустимые значения: 0, 1, 2 и " -"3." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"Для storwize_svc_vol_grainsize указано недопустимое значение: допустимые " -"значения: 32, 64, 128 и 256." - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "" -"Указано недопустимое значение для тонкого резервирования. Нельзя указывать " -"тонкое и толстое резервирование одновременно." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "Образ %(image_id)s не найден." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "Образ %(image_id)s не активен." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "Образ %(image_id)s недопустим: %(reason)s" - -msgid "Image location not present." -msgstr "Не указано расположение образа." - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Виртуальный размер образа %(image_size)d ГБ, он слишком большой для тома " -"размером %(volume_size)d ГБ." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Возникла ошибка ImageBusy при удалении тома rbd. Причиной может быть сбой " -"клиентского соединения. В этом случае повторите удаление через 30 с." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Сбой импорта записи: не найдена служба резервного копирования для выполнения " -"импорта. Запрос службы %(service)s" - -msgid "Incorrect request body format" -msgstr "Неправильный формат тела запроса" - -msgid "Incorrect request body format." -msgstr "Недопустимый формат тела запроса." - -msgid "Incremental backups exist for this backup." -msgstr "Для этой резервной копии существует дополняющие резервные копии." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Исключительная ситуация CLI Infortrend: %(err)s. Параметр: %(param)s (Код " -"возврата: %(rc)s) (Вывод: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Недопустимые входные тома или моментальные копии." - -msgid "Input volumes or source volumes are invalid." -msgstr "Недопустимые входные или выходные тома." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "Не удалось найти экземпляр %(uuid)s." - -msgid "Insufficient free space available to extend volume." -msgstr "Недостаточно места для расширения тома." - -msgid "Insufficient privileges" -msgstr "Недостаточно прав доступа" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Недопустимый домен 3PAR: %(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "Недопустимое значение ALUA. Значение ALUA должно быть 1 или 0." - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "" -"Для операции резервного копирования rbd указаны недопустимые аргументы Ceph" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Недопустимая моментальная копия группы согласования: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "Недопустимая группа согласования: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "" -"Недопустимая группа согласования: нет хоста для создания группы согласования" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"Обнаружена недопустимая версия API HPELeftHand: %(found)s. Версия " -"%(minimum)s или выше требуется для поддержки управления/отмены управления." - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Недопустимый формат IP-адреса: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"Обнаружена недопустимая спецификация QoS при получении стратегии QoS для " -"тома %s" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "Недопустимый целевой объект репликации: %(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Недопустимая спецификация общего ресурса Virtuozzo Storage: %r. Должно быть: " -"[MDS1[,MDS2],...:/]<ИМЯ-КЛАСТЕРА>[:ПАРОЛЬ]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "Недопустимая версия XtremIO %(cur)s, требуется версия не ниже %(min)s" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "Неправильно определены квоты для следующего проекта: %s" - -msgid "Invalid argument" -msgstr "Недопустимый аргумент" - -msgid "Invalid argument - negative seek offset." -msgstr "Недопустимый аргумент - отрицательное смещение функции seek." - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Недопустимый аргумент - whence=%s не поддерживается" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "Недопустимый аргумент - whence=%s не поддерживается." - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "Недопустимый режим подключения %(mode)s для тома %(volume_id)s." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Недопустимый ключ идентификации: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Недопустимая резервная копия: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "" -"Обнаружена недопустимая информация о пользователе chap в хранилище CloudByte." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "Недопустимый ответ об инициализации соединения от тома %(name)s:" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"Недопустимый ответ об инициализации соединения от тома %(name)s: %(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Недопустимый тип содержимого %(content_type)s." - -msgid "Invalid credentials" -msgstr "Недопустимые идентификационные данные" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Недопустимый каталог: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Недопустимый тип адаптера диска: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Недопустимая база диска: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Недопустимый тип диска: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Недопустимый тип диска: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Недопустимый хост: %(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"Обнаружена недопустимая версия hpe3parclient (%(found)s). Требуется версия " -"%(minimum)s или более поздняя. Выполните команду \"pip install --upgrade " -"python-3parclient\" для обновления hpe3parclient." - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"Обнаружена недопустимая версия hpelefthandclient (%(found)s). Требуется " -"версия %(minimum)s или более поздняя. Выполните команду 'pip install --" -"upgrade python-lefthandclient' для обновления hpelefthandclient." - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Недопустимый образ href %(image_href)s." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "" -"Недопустимый идентификатор образа или отсутствует доступ к запрошенному " -"образу." - -msgid "Invalid imageRef provided." -msgstr "Указан неверный imageRef." - -msgid "Invalid input" -msgstr "Недопустимый ввод" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Получены недопустимые входные данные: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Неверный фильтр is_public [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "Настроен недопустимый тип lun %s." - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Недопустимый размер метаданных: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Недопустимые метаданные: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Недопустимая база точки монтирования: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Недопустимая база точки монтирования: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Недопустимое новое имя snapCPG для изменения типа. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Недопустимый порт %(config)s для порта RPC Coho" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "" -"Настроен недопустимый тип предварительной выборки '%s'. Тип PrefetchType " -"должен быть 0,1,2,3." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Недопустимая спецификация QoS: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "" -"Недопустимый запрос на подключение тома к недопустимому целевому объекту" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Недопустимый запрос на подключение тома с недопустимым режимом. Режим " -"подключения должен быть 'rw' или 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Неверный срок резервирования %(expire)s." - -msgid "Invalid response header from RPC server" -msgstr "Недопустимый заголовок ответа от сервера RPC" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "Недопустимый вторичный ИД %s." - -msgid "Invalid service catalog json." -msgstr "Недопустимый json каталога службы." - -msgid "Invalid sheepdog cluster status." -msgstr "Недопустимое состояние кластера sheepdog." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Недопустимая моментальная копия: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Недопустимое состояние: '%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "Запрошен недопустимый пул памяти %s. Сбой изменения типа." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Указан недопустимый пул памяти %s." - -msgid "Invalid storage pool is configured." -msgstr "Настроен недопустимый пул памяти." - -msgid "Invalid transport type." -msgstr "Недопустимый тип транспорта." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Недопустимый параметр обновления: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Неверное значение '%s' для принудительного применения." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Недопустимое значение '%s' для принудительного использования. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "" -"Недопустимое значение '%s' для is_public. Допустимые значения: True или " -"False." - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "Недопустимое значение '%s' для skip_validation." - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "Недопустимое значение для 'bootable': '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "Недопустимое значение для 'force': '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "Недопустимое значение для 'readonly': '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "" -"Недопустимое значение для 'scheduler_max_attempts', значение должно быть >=1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "Недопустимое значение параметра конфигурации netapp_host_type NetApp." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "Недопустимое значение параметра конфигурации netapp_lun_ostype NetApp." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Недопустимое значение возраста: %(age)s" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "Недопустимое значение: \"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"В запросе на создание указан недопустимый размер тома: %s (аргумент размера " -"должен быть целым числом или строковым представлением числа и быть больше " -"нуля)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Недопустимый тип тома: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Недопустимый том: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Недопустимый том: нельзя добавить том %(volume_id)s в группу согласования " -"%(group_id)s, поскольку он находится в недопустимом состоянии %(status)s. " -"Допустимые состояния: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Недопустимый том: нельзя добавить том %(volume_id)s в группу согласования " -"%(group_id)s, поскольку тип тома %(volume_type)s не поддерживается ." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Недопустимый том: нельзя добавить том fake-volume-uuid в группу согласования " -"%(group_id)s, поскольку он не найден." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Недопустимый том: нельзя удалить том fake-volume-uuid из группы согласования " -"%(group_id)s, поскольку он отсутствует в этой группе." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "Передано недопустимое значение volume_type: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Указано недопустимое значение volume_type: %s (запрошенный тип несовместим " -"или совпадает с исходным томом, или не указан аргумент типа)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"Указано недопустимое значение volume_type: %s (запрошенный тип несовместим, " -"рекомендуется убрать аргумент типа)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"Указано недопустимое значение volume_type: %s (запрошенный тип должен " -"поддерживаться данной группой согласования)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Недопустимый формат wwpns %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "Ошибка вызова веб-службы." - -msgid "Issue encountered waiting for job." -msgstr "Во время ожидания задания обнаружена неполадка." - -msgid "Issue encountered waiting for synchronization." -msgstr "Во время ожидания синхронизации обнаружена неполадка." - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "" -"Переключение после сбоя невозможно, так как репликация настроена неверно." - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "Не найден ИД задания в ответе на запрос создания тома CloudByte [%s]." - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "Не найден ИД задания в ответе на запрос удаления тома CloudByte [%s]." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Имена ключей могут содержать только алфавитно-цифровые символы, символы " -"подчеркивания, точки, двоеточия и дефисы." - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError: %s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "Поддержка вложенных квот доступна в keystone версии не ниже 3." - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "LU не существует для тома: %s" - -msgid "LUN export failed!" -msgstr "Сбой экспорта LUN!" - -msgid "LUN map overflow on every channel." -msgstr "Переполнение карты LUN на каждом канале." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "LUN не найден по данной ссылке %s." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "" -"Номер LUN в ИД канала %(ch_id)s выходит за пределы диапазона допустимых " -"значений." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Последние записи системного протокола cinder %s:-" - -msgid "LeftHand cluster not found" -msgstr "Кластер LeftHand не найден" - -msgid "License is unavailable." -msgstr "Лицензия недоступна." - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Строка %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Путь ссылки уже существует и не является символьной ссылкой" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "Связанный дубликат исходного тома не поддерживается в состоянии %s." - -msgid "Lock acquisition failed." -msgstr "Не удается установить блокировку." - -msgid "Logout session error." -msgstr "Ошибка завершения сеанса." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Служба поиска не настроена. В параметре конфигурации fc_san_lookup_service " -"необходимо указать конкретную реализацию службы поиска." - -msgid "Lun migration error." -msgstr "Ошибка переноса LUN." - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "" -"MD5 объекта %(object_name)s до: %(md5)s и после: %(etag)s не совпадают." - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED: %r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED: AUTH_ERROR: %r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED: RPC_MISMATCH: %r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Неправильный формат строки вывода fcns: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Неправильное тело сообщения: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Неверный формат строки сервера имен: %s" - -msgid "Malformed request body" -msgstr "Неправильное тело запроса" - -msgid "Malformed request body." -msgstr "Неверный формат тела запроса." - -msgid "Malformed request url" -msgstr "Неправильный запрос url" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "Неправильный ответ на команду %(cmd)s: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Неверный формат атрибута scheduler_hints" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Неправильный формат строки базы данных команды show fcns: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Неверный формат конфигурации зон: (коммутатор=%(switch)s конфигурация зон=" -"%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"Неправильный формат области: (коммутатор=%(switch)s конфигурация области=" -"%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Управление существующим - для получения размера требуется 'id'." - -msgid "Manage existing snapshot not implemented." -msgstr "Управление существующей моментальной копией не реализовано." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Сбой управления существующим томом: недопустимая ссылка на основную " -"программу %(existing_ref)s: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Сбой управления существующим томом из-за несоответствия типа тома: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Управление существующим томом не реализовано." - -msgid "Manage existing volume requires 'source-id'." -msgstr "Для управления существующим томом требуется 'source-id'." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Управление томом не поддерживается, если включен режим FAST. Стратегия FAST: " -"%(fastPolicyName)s." - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "" -"Передача управления моментальными копиями томам, переключенным после сбоя, " -"не разрешена." - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "Пустая информация карты: версия массива на поддерживает hypermetro." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"Подготовка преобразования %(id)s не выполнена за отведенный тайм-аут %(to)d " -"секунд. Завершается." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "Маскирующее представление %(maskingViewName)s не было успешно удалено" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "Превышено максимальное число резервных копий (%(allowed)d)" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "" -"Превышено максимально разрешенное число моментальных копий (%(allowed)d)" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"Превышено максимально разрешенное число томов (%(allowed)d) для квоты " -"'%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "Можно указать только одно из следующих значений: %s" - -msgid "Metadata backup already exists for this volume" -msgstr "Резервная копия метаданных уже существует для этого тома" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "Объект резервной копии метаданных %s уже существует" - -msgid "Metadata property key blank." -msgstr "Пустой ключ свойства метаданных." - -msgid "Metadata restore failed due to incompatible version" -msgstr "Не удалось восстановить метаданные: несовместимая версия" - -msgid "Metadata restore failed due to incompatible version." -msgstr "Не удалось восстановить метаданные: несовместимая версия." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Отсутствует модуль python 'purestorage', убедитесь, что библиотека " -"установлена и нет." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "Не указан параметр конфигурации SAN Fibre Channel - fc_fabric_names" - -msgid "Missing request body" -msgstr "Отсутствует тело запроса" - -msgid "Missing request body." -msgstr "Отсутствует тело запроса." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "В теле запроса отсутствует обязательный элемент %s." - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "В теле запроса отсутствует обязательный элемент '%s'." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "В теле запроса отсутствует обязательный элемент 'consistencygroup'." - -msgid "Missing required element quota_class_set in request body." -msgstr "Отсутствует требуемый параметр quota_class_set в теле запроса." - -msgid "Missing required element snapshot in request body." -msgstr "В теле запроса отсутствует обязательный элемент snapshot." - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"Обнаружено несколько значений SerialNumber, но только одно ожидалось для " -"этой операции. Измените файл конфигурации EMC." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "Обнаружено несколько копий тома %s." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"Для '%s' найдено несколько совпадений, для более конкретного поиска " -"используйте ИД." - -msgid "Multiple profiles found." -msgstr "Обнаружено несколько профайлов." - -msgid "Must implement a fallback schedule" -msgstr "Необходимо реализовать резервное расписание" - -msgid "Must implement find_retype_host" -msgstr "Необходимо реализовать implement find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "Необходимо реализовать host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "Необходимо реализовать schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "Необходимо реализовать schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "Необходимо реализовать schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "В lsfabric должно быть передано глобальное имя порта или хост." - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"Эту команду может выполнять только администратор облака с использованием " -"Keystone policy.json, где администратору облака предоставлены права на вывод " -"списка проектов и получение любого проекта." - -msgid "Must specify 'connector'" -msgstr "Необходимо указать 'connector'" - -msgid "Must specify 'connector'." -msgstr "Необходимо указать 'connector'." - -msgid "Must specify 'host'." -msgstr "Необходимо указать 'host'." - -msgid "Must specify 'new_volume'" -msgstr "Необходимо указать 'new_volume'" - -msgid "Must specify 'status'" -msgstr "Необходимо указать 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Необходимо указать 'status', 'attach_status' или 'migration_status' для " -"обновления." - -msgid "Must specify a valid attach status" -msgstr "Необходимо указать допустимое состояние вложения" - -msgid "Must specify a valid migration status" -msgstr "Необходимо указать допустимое состояние переноса" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "" -"Необходимо указать допустимого пользователя %(valid)s, значение " -"'%(persona)s' недопустимо." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Необходимо указать допустимый тип предоставления ресурсов %(valid)s, " -"значение '%(prov)s' недопустимо." - -msgid "Must specify a valid status" -msgstr "Необходимо указать допустимое состояние" - -msgid "Must specify an ExtensionManager class" -msgstr "Необходимо указать класс ExtensionManager" - -msgid "Must specify bootable in request." -msgstr "Необходимо указать загружаемый файл в запросе." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Необходимо указать имя или ИД домена защиты." - -msgid "Must specify readonly in request." -msgstr "В запросе должен быть указан параметр readonly." - -msgid "Must specify snapshot source-name or source-id." -msgstr "Необходимо указать source-name или source-id моментальной копии." - -msgid "Must specify source-name or source-id." -msgstr "Необходимо указать source-name или source-id." - -msgid "Must specify storage pool name or id." -msgstr "Необходимо указать имя или ИД пула памяти." - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "Необходимо указать пулы памяти. Опция: sio_storage_pools." - -msgid "Must supply a positive, non-zero value for age" -msgstr "" -"В качестве возраста необходимо указать положительное число, не равное 0" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"Недопустимая конфигурация NAS '%(name)s=%(value)s'. Допустимые значения: " -"'auto', 'true' или 'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "Файл конфигурации NFS в %(config)s не существует" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "Файл NFS %s не найден." - -msgid "NFS file could not be discovered." -msgstr "Не удалось найти файл NFS." - -msgid "NaElement name cannot be null." -msgstr "Имя NaElement не может быть пустым." - -msgid "Name" -msgstr "Имя" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"Параметры name, description, add_volumes и remove_volumes не могут быть все " -"пустыми в теле запроса." - -msgid "Need non-zero volume size" -msgstr "Требуется ненулевой размер тома" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "Ни MSG_DENIED, ни MSG_ACCEPTED: %r" - -msgid "NetApp Cinder Driver exception." -msgstr "Исключительная ситуация драйвера NetApp Cinder." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"Новый размер расширения должен превышать текущий размер. (текущий размер: " -"%(size)s, расширенный: %(new_size)s)." - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"Новый размер должен быть больше фактического размера в базовой системе " -"хранения. Фактический размер: %(oldsize)s, новый размер: %(newsize)s." - -msgid "New volume size must be specified as an integer." -msgstr "Новый размер тома необходимо указывать в виде целого числа." - -msgid "New volume type must be specified." -msgstr "Должен быть указан тип нового тома." - -msgid "New volume type not specified in request_spec." -msgstr "Новый тип тома не указан в request_spec." - -msgid "Nimble Cinder Driver exception" -msgstr "Исключительная ситуация драйвера Nimble Cinder" - -msgid "No FC initiator can be added to host." -msgstr "Инициатор Fibre Channel не добавлен в хост." - -msgid "No FC port connected to fabric." -msgstr "Никакой порт Fibre Channel не подключен к фабрике." - -msgid "No FCP targets found" -msgstr "Не найден ы целевые объекты FCP" - -msgid "No Port Group elements found in config file." -msgstr "Элементы группы портов не найдены в файле конфигурации." - -msgid "No VF ID is defined in the configuration file." -msgstr "В файле конфигурации не задан VF ID." - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Нет активных порталов iSCSI с указанными IP-адресами iSCSI" - -#, python-format -msgid "No available service named %s" -msgstr "Нет доступной службы с именем %s" - -#, python-format -msgid "No backup with id %s" -msgstr "Отсутствует резервная копия с ИД %s" - -msgid "No backups available to do an incremental backup." -msgstr "Нет резервных копий для создания дополняющей резервной копии." - -msgid "No big enough free disk" -msgstr "Отсутствуют достаточно большие свободные диски" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "Нет моментальной копии группы согласования с ИД %s" - -msgid "No cinder entries in syslog!" -msgstr "Нет записей cinder в системном протоколе!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "" -"В утилите для работы с файловой системой не найден дубликат LUN с именем %s" - -msgid "No config node found." -msgstr "Не найден узел конфигурации." - -#, python-format -msgid "No consistency group with id %s" -msgstr "Нет группы согласования с ИД %s" - -#, python-format -msgid "No element by given name %s." -msgstr "Не найден элемент с именем %s." - -msgid "No errors in logfiles!" -msgstr "Нет ошибок в файлах протоколов!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "Не найден файл с %s в качестве базового файла." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Не осталось свободных ИД LUN. Максимальное число томов, которое можно " -"подключать к хосту (%s) превышено." - -msgid "No free disk" -msgstr "Отсутствуют свободные диски" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "Не найден правильный портал iscsi в указанном списке %s." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "Не найдены правильные порталы iscsi для %s." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "Нет хоста для создания группы согласования %s." - -msgid "No iSCSI-enabled ports on target array." -msgstr "В целевом массиве нет портов с поддержкой iSCSI." - -msgid "No image_name was specified in request." -msgstr "В запросе не указан параметр image_name." - -msgid "No initiator connected to fabric." -msgstr "Никакой инициатор не подключен к фабрике." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "Группа инициатора не найдена для инициатора %s" - -msgid "No initiators found, cannot proceed" -msgstr "Инициаторы не найдены продолжение работы невозможно" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "В кластере не найден интерфейс для ip-адреса %s" - -msgid "No ip address found." -msgstr "Не найден IP-адрес." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "Не найдены группы идентификации iSCSI в CloudByte." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "В CloudByte не найдены инициаторы iscsi." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "Не найдена служба iscsi для тома CloudByte [%s]." - -msgid "No iscsi services found in CloudByte storage." -msgstr "Службы iscsi не найдены в хранилище CloudByte." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "Не указан файл ключа, и невозможно загрузить ключ из %(cert)s %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Не обнаружено смонтированных общих ресурсов Gluster" - -msgid "No mounted NFS shares found" -msgstr "Не обнаружено смонтированных общих ресурсов NFS" - -msgid "No mounted SMBFS shares found." -msgstr "Не обнаружено смонтированных общих ресурсов SMBFS." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Не найдены смонтированные общие ресурсы Virtuozzo Storage" - -msgid "No mounted shares found" -msgstr "Не обнаружено смонтированных общих ресурсов" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "Не найден узел в группе ввода-вывода %(gid)s для тома %(vol)s." - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"Нет пулов для предоставления ресурсов томам. Убедитесь, что правильно " -"настроен параметр конфигурации netapp_pool_name_search_pattern." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"Не получен ответ из вызова API идентификации пользователей iSCSI списка " -"хранилища CloudByte." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "Вызов API tsm списка хранилища CloudByte не вернул ответа." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "Вызов api файловой системы списка CloudByte не вернул ответа." - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "Не настроен VIP службы и не задан nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "Не найдена моментальная копия с %s в качестве базового файла." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "Не найден образ моментальной копии в группе моментальных копий %s." - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "Не найдены моментальные копии для тома %s." - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "" -"Не указаны исходные моментальные копии для создания группы согласования %s." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "Не найден путь хранения для пути экспорта %s" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Нет такой спецификации QoS: %(specs_id)s." - -msgid "No suitable discovery ip found" -msgstr "Подходящий поисковый IP не найден" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "Нет поддержки восстановления резервной версии %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "Не найден target id для тома %(volume_id)s." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"На хосте нет неиспользуемых ИД LUN. Включены множественные подключения, а " -"это требует, чтобы все ИД LUN были уникальны в пределах всей группы хостов." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Допустимый узел не найден. %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "Нет допустимых хостов для тома %(id)s с типом %(type)s" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "Vdisk с UID, заданным по ссылке %s, отсутствует." - -#, python-format -msgid "No views found for LUN: %s" -msgstr "Не найдены представления для LUN: %s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"В кластере с vserver %(vserver)s и путем пересечения %(junction)s " -"отсутствует том " - -msgid "No volume service(s) started successfully, terminating." -msgstr "Не удалось запустить службы томов. Завершение." - -msgid "No volume was found at CloudByte storage." -msgstr "Не найден том в хранилище CloudByte." - -msgid "No volume_type should be provided when creating test replica." -msgstr "Нельзя указывать volume_type при создании пробной копии." - -msgid "No volumes found in CloudByte storage." -msgstr "В хранилище CloudByte не найдены тома." - -msgid "No weighed hosts available" -msgstr "Нет хостов с весами" - -#, python-format -msgid "Not a valid string: %s" -msgstr "Недопустимая строка: %s" - -msgid "Not a valid value for NaElement." -msgstr "Недопустимое значение для NaElement." - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "Не найдено подходящее хранилище данных для тома %s." - -msgid "Not an rbd snapshot" -msgstr "Не является моментальной копией rbd" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "Нет доступа к образу %(image_id)s." - -msgid "Not authorized." -msgstr "Не авторизировано." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Недостаточно места на базовом сервере (%(backend)s)" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "" -"В общем ресурсе ZFS отсутствует свободное пространство для выполнения этой " -"операции." - -msgid "Not stored in rbd" -msgstr "Не сохранено в rbd" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "" -"Во время создания моментальной копии из Nova возвращено состояние error." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "Файловая система списка CloudByte вернула пустой ответ." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "Получен пустой ответ из групп идентификации iSCSI списка CloudByte." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "Инициаторы iscsi списков CloudByte вернули пустой ответ." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "Служба iscsi списков томов CloudByte вернула пустой ответ." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "При создании тома [%s] в хранилище CloudByte получен пустой ответ." - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "Получен пустой ответ при удалении тома [%s] в хранилище CloudByte." - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"Получен пустой ответ на запрос выполнения операции [%(operation)s] с помощью " -"задания [%(job)s] в хранилище CloudByte." - -msgid "Object Count" -msgstr "Количество объектов" - -msgid "Object Version" -msgstr "Версия объекта" - -msgid "Object is not a NetApp LUN." -msgstr "Объект не находится в LUN NetApp." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Ошибка в операции Extend при добавлении тома в составной том %(volumename)s." - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "" -"Не найден один из обязательных входных параметров из хоста, порта или схемы." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, каждые " -"%(unit_string)s." - -msgid "Only one limit can be set in a QoS spec." -msgstr "В спецификации QoS можно указать только одно ограничение." - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"Только пользователи с маркером, связанным с непосредственными родительскими " -"объектами и корневыми проектами могут просматривать квоты дочерних элементов." - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "" -"Вывести из-под управления можно только тома, находящиеся под управлением " -"OpenStack." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "Операция не выполнена, состояние=%(status)s. Полный дамп: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "Операция не поддерживается: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "Опция gpfs_images_dir указана неправильно." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "Опция gpfs_images_share_mode указана неправильно." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "Опция gpfs_mount_point_base указана неправильно." - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "Исходное значение %(res)s %(prop)s должно быть одно из '%(vals)s'" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "Имя раздела - None. Укажите smartpartition:partitionname в ключе." - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"Для идентификации требуется пароль или личный ключ SSH: задайте опцию " -"san_password или san_private_key." - -msgid "Path to REST server's certificate must be specified." -msgstr "Необходимо указать путь к сертификату сервера REST." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Заранее создайте пул %(pool_list)s!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Заранее создайте слой %(tier_levels)s в пуле %(pool)s!" - -msgid "Please specify a name for QoS specs." -msgstr "Укажите имя спецификации QoS." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "Политика не допускает выполнения %(action)s." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "Пул %(poolNameInStr)s не найден." - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "Пул %s не существует в устройстве Nexenta Store" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "Не найден пул из volume['host'] %(host)s." - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "Сбой пула из volume['host']. Исключительная ситуация: %(ex)s." - -msgid "Pool is not available in the volume host field." -msgstr "Не указан пул в поле хоста тома." - -msgid "Pool is not available in the volume host fields." -msgstr "Пул недоступен в полях хоста тома." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "Не найден пул с именем %(pool)s в домене %(domain)s." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "Не найден пул с именем %(pool_name)s в домене %(domain_id)s." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Пул %(poolName)s. не связан со слоем памяти для быстрой стратегии " -"%(fastPolicy)s." - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "Значение PoolName должно быть указано в файле %(fileName)s." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Пулы %s не существуют" - -msgid "Pools name is not set." -msgstr "Не задано имя пула." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Состояние главной копии: %(status)s, синхронизирована: %(sync)s." - -msgid "Project ID" -msgstr " ID проекта" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "В проекте неправильно настроена поддержка вложенных квот: %(reason)s." - -msgid "Protection Group not ready." -msgstr "Группа защиты не готова." - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"Протокол %(storage_protocol)s не поддерживается для семейства памяти " -"%(storage_family)s." - -msgid "Provided backup record is missing an id" -msgstr "В указанной записи резервной копии отсутствует ИД" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"Указанное состояние моментальной копии %(provided)s запрещено для " -"моментальной копии с состоянием %(current)s." - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "" -"Не найдена информация поставщика о хранилище CloudByte для тома OpenStack " -"[%s]." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Сбой драйвера Pure Storage Cinder: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "Спецификация QoS %(specs_id)s уже существует." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "Спецификация QoS %(specs_id)s еще связана с сущностями." - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "Недопустимая конфигурация QoS. %s должен быть > 0." - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"В стратегии QoS необходимо указать IOTYPE и другой параметр qos_specs, " -"стратегия QoS: %(qos_policy)s " - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"В стратегии QoS необходимо указать IOTYPE: 0, 1 или 2, стратегия QoS: " -"%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"В стратегии QoS конфликтуют параметры upper_limit и lower_limit, стратегия " -"QoS: %(qos_policy)s." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"Спецификация QoS %(specs_id)s не имеет спецификации с ключом %(specs_key)s." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "" -"Спецификации QoS не поддерживаются в этом семействе систем хранения и версии " -"ONTAP." - -msgid "Qos specs still in use." -msgstr "Спецификация QoS еще используется." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"Запрос по параметру службы устарело. Вместо него следует использовать " -"двоичный параметр." - -msgid "Query resource pool error." -msgstr "Ошибка запроса пула ресурсов." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "Ограничение %s квоты должно быть не меньше существующих ресурсов." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Класс квоты %(class_name)s не найден." - -msgid "Quota could not be found" -msgstr "Квота не найдена" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Квота превышена для ресурсов: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Квота превышена: код=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "Квота проекта %(project_id)s не найдена." - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"Неверно задано ограничение квот в проекте '%(proj)s' для ресурса '%(res)s': " -"порог %(limit)d меньше, чем текущее использование %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Резервирование квоты %(uuid)s не найдено." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "Использование квоты для проекта %(project_id)s не найдено." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "Сбой операции diff RBD - (код возврата=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "Должен быть указан IP-адрес сервера REST." - -msgid "REST server password must by specified." -msgstr "Должен быть указан пароль сервера REST." - -msgid "REST server username must by specified." -msgstr "Должно быть указано имя пользователя сервера REST." - -msgid "RPC Version" -msgstr "Версия RPC" - -msgid "RPC server response is incomplete" -msgstr "Неполный ответ от сервера RPC" - -msgid "Raid did not have MCS Channel." -msgstr "У RAID нет канала MCS." - -#, python-format -msgid "Received error string: %s" -msgstr "Получена ошибочная строка: %s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "Ссылка должна быть указана для неуправляемой моментальной копии." - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "Для неуправляемого виртуального тома должна быть ссылка." - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "Ссылка должна быть именем неуправляемой моментальной копии." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "Для неуправляемого виртуального тома ссылка должна быть именем тома." - -msgid "Reference must contain either source-name or source-id element." -msgstr "Ссылка должна содержать элемент source-name или source-id." - -msgid "Reference must contain source-id or source-name element." -msgstr "Ссылка должна содержать элемент source-id или source-name." - -msgid "Reference must contain source-id or source-name key." -msgstr "Ссылка должна содержать ключ source-id или source-name." - -msgid "Reference must contain source-id or source-name." -msgstr "Ссылка должна содержать source-id или source-name." - -msgid "Reference must contain source-id." -msgstr "Ссылка должна содержать source-id." - -msgid "Reference must contain source-name element." -msgstr "Ссылка должна содержать элемент имени источника." - -msgid "Reference must contain source-name or source-id." -msgstr "Указатель должен содержать имя источника и ИД источника." - -msgid "Reference must contain source-name." -msgstr "Ссылка должна содержать элемент source-name." - -msgid "Reference to volume to be managed must contain source-name." -msgstr "" -"Ссылка на том, передаваемый под управление, должна содержать элемент source-" -"name." - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "" -"Ссылка на том %s, передаваемый под управление, должна содержать элемент " -"source-name." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Перенос ИД тома %(id)s отклонен. Проверьте конфигурацию, так как исходный и " -"целевой объекты относятся к одной группе томов: %(name)s." - -msgid "Remote pool cannot be found." -msgstr "Удаленный пул не найден." - -msgid "Remove CHAP error." -msgstr "Ошибка удаления CHAP." - -msgid "Remove fc from host error." -msgstr "Ошибка удаления Fibre Channel с хоста." - -msgid "Remove host from array error." -msgstr "Ошибка удаления хоста из массива." - -msgid "Remove host from hostgroup error." -msgstr "Ошибка удаления хоста из группы хостов." - -msgid "Remove iscsi from host error." -msgstr "Ошибка удаления iSCSI с хоста." - -msgid "Remove lun from QoS error." -msgstr "Ошибка удаления LUN из QoS." - -msgid "Remove lun from cache error." -msgstr "Ошибка удаления LUN из кэша." - -msgid "Remove lun from partition error." -msgstr "Ошибка удаления LUN из раздела." - -msgid "Remove port from port group error." -msgstr "Ошибка удаления порта из группы портов." - -msgid "Remove volume export failed." -msgstr "Не удалось удалить экспорт тома." - -msgid "Rename lun on array error." -msgstr "Ошибка переименования LUN в массиве." - -msgid "Rename snapshot on array error." -msgstr "Ошибка переименования моментальной копии в массиве." - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "Репликация %(name)s в %(ssn)s не выполнена." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "Функция службы репликации не найдена в %(storageSystemName)s." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Служба репликации не найдена в %(storageSystemName)s." - -msgid "Replication not allowed yet." -msgstr "Репликация еще не разрешена." - -msgid "Request body and URI mismatch" -msgstr "Тело запроса и URI не совпадают" - -msgid "Request body contains too many items" -msgstr "Тело запроса содержит избыточное количество объектов" - -msgid "Request body contains too many items." -msgstr "Тело запроса содержит слишком много элементов." - -msgid "Request body empty" -msgstr "Пустое тело запроса" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Запрос к кластеру Datera вернул неверное состояние: %(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Размер запрошенной резервной копии превышает разрешенную квоту резервных " -"копий в ГБ. Запрошено: %(requested)s ГБ, квота %(quota)s ГБ, использовано " -"%(consumed)s ГБ." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"Запрошенный том или моментальная копия превышают разрешенную квоту %(name)s. " -"Запрошено %(requested)s ГБ, квота %(quota)s ГБ, использовано %(consumed)s ГБ." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"Запрошенный размер тома %(size)d больше максимально допустимого (%(limit)d)." - -msgid "Required configuration not found" -msgstr "Не найдена требуемая конфигурация" - -#, python-format -msgid "Required flag %s is not set" -msgstr "Не указан требуемый флаг %s" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Сброс состояния резервной копии прерван. Настроенная служба резервного " -"копирования [%(configured_service)s] не является службой резервного " -"копирования, которая использовалась для создания этой резервной копии " -"[%(backup_service)s]." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "Не удалось изменить размер дубликата %s." - -msgid "Resizing image file failed." -msgstr "Не удалось изменить размер файла образа." - -msgid "Resource could not be found." -msgstr "Ресурс не может быть найден." - -msgid "Resource not ready." -msgstr "Ресурс не готов." - -#, python-format -msgid "Response error - %s." -msgstr "Ошибка ответа: %s." - -msgid "Response error - The storage-system is offline." -msgstr "Ошибка ответа - система хранения выключена." - -#, python-format -msgid "Response error code - %s." -msgstr "Код ошибки ответа: %s." - -msgid "RestURL is not configured." -msgstr "RestURL не настроен." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Восстановление резервной копии прервано: ожидалось состояние тома " -"%(expected_status)s, получено %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Восстановление резервной копии прервано: настроенная в данный момент служба " -"резервного копирования [%(configured_service)s] не является службой " -"резервного копирования, которая использовалась для создания этой резервной " -"копии [%(backup_service)s]." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Восстановление резервной копии прервано: ожидалось состояние резервной копии " -"%(expected_status)s, получено %(actual_status)s." - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Полученное непредвиденное число томов SolidFire для предоставленных " -"моментальных копий Cinder. Получено: %(ret)s Ожидалось: %(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"Полученное непредвиденное число томов SolidFire для предоставленных томов " -"Cinder. Получено: %(ret)s Ожидалось: %(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Превышено число попыток для команды: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Обнаружена повторяемая исключительная ситуация SolidFire" - -msgid "Retype requires migration but is not allowed." -msgstr "Для изменения типа требуется перенос, но он запрещен." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "Выполняется откат %(volumeName)s путем его удаления." - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "Работа Cinder с версией VMware vCenter ниже %s невозможна." - -msgid "SAN product is not configured." -msgstr "Продукт SAN не настроен." - -msgid "SAN protocol is not configured." -msgstr "Протокол SAN не настроен." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"Недопустимая конфигурация SMBFS smbfs_oversub_ratio. Значение должно быть " -"больше 0: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"Недопустимая конфигурация SMBFS smbfs_used_ratio. Значение должно быть " -"больше 0 и не больше 1,0: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "Файл конфигурации SMBFS в %(config)s не существует." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "Файл конфигурации SMBFS не указан (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"После '%(total_attempts)r' попыток не выполнена команда SSH: '%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "Обнаружено внедрение команды SSH: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "Сбой соединения SSH для %(fabric)s, ошибка: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "Срок действия сертификата SSL истек %s." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "Ошибка SSL: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "Фильтр узлов диспетчера %(filter_name)s не найден" - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "" -"Не удалось найти определитель весовых коэффициентов хостов планировщика " -"%(weigher_name)s." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"Состояние вспомогательной копии: %(status)s, состояние синхронизации: " -"%(sync)s, ход синхронизации %(progress)s%%." - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "" -"Вторичный ИД не может совпадать с основным массивом, backend_id = " -"%(secondary)s." - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "Значение SerialNumber должно быть указано в файле %(fileName)s." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "Служба %(service)s удалена с хоста %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "Служба %(service_id)s не найдена на хосте %(host)s." - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "Служба %(service_id)s не найдена." - -msgid "Service is too old to fulfil this request." -msgstr "Служба устарела и не поддерживает этот запрос." - -msgid "Service is unavailable at this time." -msgstr "В данный момент служба недоступна." - -msgid "Set pair secondary access error." -msgstr "Ошибка настройки вспомогательного доступа к паре." - -msgid "Sets thin provisioning." -msgstr "Задает тонкое резервирование." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"Установка группы стратегий QoS LUN не поддерживается в этом семействе памяти " -"и версии ONTAP." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Установка группы стратегий QoS для файлов не поддерживается в этом семействе " -"памяти и версии ontap." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"Общий ресурс в %(dir)s недоступен на запись службе томов Cinder. Операции " -"для моментальных копий поддерживаться не будут." - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Ошибка ввода-вывода Sheepdog, команда: \"%s\"." - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "" -"Операции Показать доступны только для проектов в той же иерархии проекта, " -"где находятся пользователи." - -msgid "Size" -msgstr "Размер" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "Размер для тома %s не найден, защищенное удаление невозможно." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Размер %(image_size)d ГБ не умещается на томе размером %(volume_size)d ГБ." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"Размер указанного образа (%(image_size)s ГБ) превышает размер тома " -"(%(volume_size)s ГБ)." - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"Отправлен запрос на удаление моментальной копии %(id)s во время ожидания ее " -"готовности. Вероятно, параллельно выполняется другой запрос." - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"В ходе каскадного удаления обнаружена моментальная копия %(id)s в состоянии " -"%(state)s вместо 'deleting'." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "Снимок %(snapshot_id)s не может быть найден." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "У снимка %(snapshot_id)s нет метаданных с ключом %(metadata_key)s." - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "Моментальная копия '%s' не существует в массиве." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"Невозможно создать моментальную копию, поскольку том %(vol_id)s недоступен. " -"Текущее состояние тома: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "Моментальную копию нельзя создать во время переноса тома." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "Создавать моментальную копию вспомогательной копии не разрешено." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Моментальная копия тома не поддерживается в состоянии %s." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "" -"Создать моментальную копию для ресурса \"%s\", который нигде не был " -"развернут?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"Состояние моментальной копии %(cur)s запрещено для update_snapshot_status" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "Для дублирования состояние моментальной копии должно быть available." - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "" -"Моментальная копия для резервного копирования должна быть доступна, но " -"текущее состояние - \"%s\"." - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "Моментальная копия с ИД %s не найдена." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Моментальная копия='%(snap)s' не существует в базовом образе='%(base)s' - " -"прерывание дополняющего резервного копирования" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "Моментальные копии не поддерживаются для этого формата тома: %s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Ошибка сокета: %(arg)s." - -msgid "SolidFire Cinder Driver exception" -msgstr "Исключительная ситуация драйвера SolidFire Cinder" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "" -"Размер массива значений направления сортировки превышает размер массива " -"ключей сортировки." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "" -"Исходная группа согласования пустая. Группа согласования создана не будет." - -msgid "Source host details not found." -msgstr "Не найдены сведения об исходном хосте." - -msgid "Source volume device ID is required." -msgstr "Требуется ИД устройства исходного тома." - -msgid "Source volume not mid-migration." -msgstr "Исходный том не в процессе переноса." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "SpaceInfo возвратил недопустимый byarray" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"Указанный хост для связывания с томом %(vol)s находится в неподдерживаемой " -"группе хостов %(group)s." - -msgid "Specified logical volume does not exist." -msgstr "Указанный логический том не существует." - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "Указанная группа моментальных копий с ИД %s не найдена." - -msgid "Specify a password or private_key" -msgstr "Укажите пароль или личный_ключ" - -msgid "Specify san_password or san_private_key" -msgstr "Задайте san_password или san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "" -"Укажите такие параметры, как тип тома, имя, описание, is_public или их " -"сочетание." - -msgid "Split pair error." -msgstr "Ошибка разделения пары." - -msgid "Split replication failed." -msgstr "Ошибка разделения репликации." - -msgid "Start LUNcopy error." -msgstr "Ошибка запуска LUNcopy." - -msgid "State" -msgstr "Состояние" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "Неверное состояние узла. Текущее состояние: %s." - -msgid "Status" -msgstr "Статус" - -msgid "Stop snapshot error." -msgstr "Ошибка остановки моментальной копии." - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "Служба конфигурации носителей не найдена в %(storageSystemName)s." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"Служба управления ИД аппаратного обеспечения носителей не найдена в " -"%(storageSystemName)s." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Не найден профайл хранилища %s." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "Служба перемещения носителей не найдена в %(storageSystemName)s." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "Семейство памяти %s не поддерживается." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "Группа носителей %(storageGroupName)s не была удалена успешно" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Хост памяти %(svr)s не обнаружен. Укажите имя" - -msgid "Storage pool is not configured." -msgstr "Пул памяти не настроен." - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Профайл хранилища: %(storage_profile)s не найден." - -msgid "Storage resource could not be found." -msgstr "Ресурс памяти не найден." - -msgid "Storage system id not set." -msgstr "Не указан ИД системы хранения." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "Для пула %(poolNameInStr)s не найдена система памяти." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "Система хранения %(array)s не найдена." - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"Общее использование дочерним объектом '%(sum)s' превышает свободную квоту " -"'%(free)s' в проекте '%(proj)s' для ресурса '%(res)s'. Уменьшите требования " -"или использование для одного или нескольких следующих проектов: " -"'%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "Ошибка переключения пары." - -msgid "Sync pair error." -msgstr "Ошибка синхронизации пары." - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "Система %(id)s находится в состоянии ошибки пароля - %(pass_status)s." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "Система %(id)s находится в неверном состоянии (%(status)s)." - -msgid "System does not support compression." -msgstr "Система не поддерживает сжатие." - -msgid "System is busy, retry operation." -msgstr "Система занята. Повторите операцию." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"TSM [%(tsm)s] не найден в хранилище CloudByte для учетной записи " -"[%(account)s]." - -msgid "Target volume type is still in use." -msgstr "Тип целевого тома еще используется." - -msgid "Terminate connection failed" -msgstr "Завершение соединения не выполнено" - -msgid "Terminate connection unable to connect to backend." -msgstr "" -"Операции Завершить соединение не удалось подключиться к базовому серверу." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Не удалось закрыть соединение с томом: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "Реплицируемый исходный том %(type)s %(id)s не найден." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"Параметры sort_key и sort_dir устарели и не могут использоваться с " -"параметром sort." - -msgid "The EQL array has closed the connection." -msgstr "Массив EQL закрыл соединение." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"Уровень выпуска файловой системы GPFS %(fs)s не соответствует требуемому. " -"Текущий уровень - %(cur)s, должен быть %(min)s." - -msgid "The IP Address was not found." -msgstr "Не найден IP-адрес." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"Запрос WebDAV не выполнен. Причина: %(msg)s, Код возврата/причина: %(code)s, " -"Исходный том: %(src)s, Целевой том: %(dst)s, Метод: %(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"Приведенная выше ошибка могла показать, что база данных не была создана.\n" -"Перед выполнением этой команды создайте базу данных с помощью команды " -"'cinder-manage db sync'." - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"Массив не поддерживает параметр пула памяти для SLO %(slo)s и нагрузки " -"%(workload)s. Проверьте правильность настройки SLO и нагрузок для массива." - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "Репликация не включена в базовой системе, где создан том." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"Команда %(cmd)s не выполнена. (код возврата: %(ret)s, stdout: %(out)s, " -"stderr: %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "Копия должна быть первичной или вторичной" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "Не удалось создать логическое устройство. (LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"Метод с декоратором должен принимать либо том, либо объект моментальной копии" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "Устройство в пути %(path)s недоступно: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"Конечное время (%(end)s) должно указывать на время после начального времени " -"(%(start)s)." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "Недопустимая дополнительная спецификация %(extraspec)s." - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "Нельзя удалить том, для которого выполнено переключение после сбоя: %s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "Требуются следующие элементы: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "Не удалось добавить группу хостов или целевое расположение iSCSI." - -msgid "The host group or iSCSI target was not found." -msgstr "Не найдена группа хостов или целевое расположение iSCSI." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "" -"Хост не готов к обратному переключению. Выполните повторную синхронизацию " -"томов и возобновите репликацию в базовых системах 3PAR." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"Хост не готов к обратному переключению. Выполните повторную синхронизацию " -"томов и возобновите репликацию в базовых системах LeftHand." - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"Хост не готов к обратному переключению. Выполните повторную синхронизацию " -"томов и возобновите репликацию в базовых системах Storwize." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "Пользователь %(user)s CHAP iSCSI не существует." - -msgid "The key cannot be None." -msgstr "Ключ не может быть None." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "Логическое устройство для указанного %(type)s %(id)s уже удалено." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "Истек тайм-аут у метода %(method)s. (значение тайм-аута: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "Метод update_migrated_volume не реализован." - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "" -"Путь монтирования %(mount_path)s не является допустимым томом USP Quobyte. " -"Ошибка: %(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "Параметр базовой системы хранения. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "" -"Родительская резервная копия должна быть доступна для создания дополняющей " -"резервной копии." - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "Моментальная копия '%s' не является моментальной копией данного тома." - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"Ссылка на том в базовой системе должна указываться в формате: файловая-" -"система/имя-тома. Имя тома не может содержать символ '/'" - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"Режим репликации настроен неверно в параметрах extra_specs для типа тома. " -"Если replication:mode задан как periodic, то необходимо указать replication:" -"sync_period в пределах от 300 до 31622400 секунд." - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "Минимальное значение периода репликации составляет %s секунд." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"Запрошенный размер %(requestedSize)s не совпадает с итоговым размером " -"%(resultSize)s." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Ресурс %(resource)s не найден." - -msgid "The results are invalid." -msgstr "Неверные результаты." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "" -"Моментальную копию нельзя создать, когда том находится в режиме обслуживания." - -msgid "The source volume for this WebDAV operation not found." -msgstr "Не найден исходный том для этой операции WebDAV." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"Тип исходного тома '%(src)s' отличается от типа целевого тома '%(dest)s'." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Тип исходного тома '%s' не доступен. " - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "Указанное %(desc)s занято." - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "LUN не принадлежит данному пулу: %s." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "" -"Не удалось включить управление для заданного ldev %(ldev)s. ldev не должен " -"преобразован." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "" -"Не удалось включить управление для заданного ldev %(ldev)s. ldev не должен " -"иметь парное устройство." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"Не удалось включить управление для заданного ldev %(ldev)s. Размер ldev " -"должен указываться в единицах, кратных гигабайтам." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"Не удалось включить управление для заданного ldev %(ldev)s. Требуемый тип " -"тома: DP-VOL." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"Указанная операция не поддерживается. Размер тома должен совпадать с " -"исходным %(type)s. (том: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Указанный vdisk связан с хостом." - -msgid "The specified volume is mapped to a host." -msgstr "Указанный том связан с хостом." - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "" -"Неверный пароль массива хранения для %s. Обновите пароль в конфигурации." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "" -"Базовая система хранения пригодна для использования. (config_group: " -"%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"Носитель не поддерживает %(prot)s. Настройте поддержку %(prot)s в устройстве " -"или переключитесь на драйвер с другим протоколом." - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"Счетчик чередующихся метаданных %(memberCount)s слишком мал для тома " -"%(volumeName)s с размером %(volumeSize)s." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "" -"Тип метаданных %(metadata_type)s для тома/моментальной копии %(id)s " -"недопустимый." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "Том %(volume_id)s не удалось расширить. Тип тома должен быть Normal." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"Не удалось отменить управление для тома %(volume_id)s. Требуемый тип тома: " -"%(volume_type)s." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "Управление тома %(volume_id)s выполняется успешно. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "Управление томом %(volume_id)s отменено успешно. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Связываемый том %(volume_id)s не найден." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "Том не может принять передачу в режиме обслуживания." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Том невозможно подключить в режиме обслуживания." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Том невозможно отключить в режиме обслуживания." - -msgid "The volume cannot be updated during maintenance." -msgstr "Том невозможно обновить во время обслуживания." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "Невозможно инициализировать соединение с томом в режиме обслуживания." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Для драйвера тома требуется имя инициатора iSCSI в коннекторе." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Том в настоящее время занят в 3PAR и не может быть удален. Вы можете " -"повторить попытку позже." - -msgid "The volume label is required as input." -msgstr "Требуется метка тома во входных данных." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "Нет доступных ресурсов. (ресурс: %(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "Нет допустимых хостов ESX." - -msgid "There are no valid datastores." -msgstr "Нет допустимых хранилищ данных." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"Отсутствует обозначение %(param)s. Заданное хранилище требуется для " -"управления томом." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Отсутствует обозначение для ldev. Указанное ldev требуется для управления " -"томом." - -msgid "There is no metadata in DB object." -msgstr "Нет метаданных в объекте базы данных." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "Отсутствуют общие ресурсы, которые могут управлять %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "Нет общих ресурсов, способных вместить %(volume_size)s ГБ." - -#, python-format -msgid "There is no such action: %s" -msgstr "Не существует такого действия: %s" - -msgid "There is no virtual disk device." -msgstr "Нет устройства виртуального диска." - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "Ошибка при добавлении тома в группу удаленного копирования: %s." - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "При создании cgsnapshot возникла ошибка: %s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "Ошибка при создании группы удаленного копирования: %s." - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "" -"Ошибка при настройке периода синхронизации для группы удаленного " -"копирования: %s." - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Ошибка при настройке группы удаленного копирования в массивах 3PAR ('%s'). " -"Неизвестный тип репликации для тома." - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"Ошибка при настройке группы удаленного расписания в массивах LeftHand " -"('%s'). Неизвестный тип репликации для тома." - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "Ошибка при запуске удаленного копирования: %s." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Отсутствует настроенный файл конфигурации Gluster (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Отсутствует настроенный файл конфигурации NFS (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"Нет настроенного тома Quobyte (%s). Пример: quobyte:///<имя-тома>" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "Оперативное выделение ресурсов не поддерживается в этой версии LVM." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "Этот драйвер не поддерживает удаление используемых моментальных копий." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Этот драйвер не поддерживает создание моментальных копий используемых томов." - -msgid "This request was rate-limited." -msgstr "Этот запрос ограничен по частоте." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Платформа %s не поддерживается. Этот драйвер поддерживает только платформы " -"Win32." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "Служба стратегии слоев для %(storageSystemName)s не найдена." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"Истек тайм-аут ожидания обновления Nova для создания моментальной копии %s." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"Истек тайм-аут ожидания обновления Nova для удаления моментальной копии " -"%(id)s." - -#, python-format -msgid "Timeout while calling %s " -msgstr "Тайм-аут при вызове %s " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "Тайм-аут во время запроса API %(service)s." - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "Тайм-аут во время функциональности от базовой системы %(service)s." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "Не удалось найти передачу %(transfer_id)s." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Передача %(transfer_id)s: ИД тома %(volume_id)s в непредвиденном состоянии " -"%(status)s, ожидалось состояние Ожидание-передачи" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "" -"Попытка импортировать метаданные резервной копии из ИД %(meta_id)s в " -"резервную копию %(id)s." - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Выполнение задачи тонкой настройки тома преждевременно прервалось: том " -"%(volume_name)s, состояние задачи %(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"Тип %(type_id)s уже связан с другими спецификациями QoS: %(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "Изменение типа доступа неприменимо к типу общедоступного тома." - -msgid "Type cannot be converted into NaElement." -msgstr "Тип невозможно преобразовать в NaElement." - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "" -"UUID %s находятся и в списке добавления томов, и в списке удаления томов." - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "Не удаётся получить доступ к базовой системе Storwize для тома %s." - -msgid "Unable to access the backend storage via file handle." -msgstr "Нет доступа к памяти базовой системы через ссылку на файл." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "" -"Не удалось получить доступ к хранилищу непереданных сообщений с помощью пути " -"%(path)s." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "Не удалось добавить хост Cinder в apphosts для пространства %(space)s" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "Не удается выполнить переключение после сбоя %s." - -msgid "Unable to connect or find connection to host" -msgstr "Не удалось подключиться к хосту или найти соединение с ним" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "Не удалось создать группу согласования %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "" -"Не удается создать блокировку. Базовая система координации не запущена." - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Создать или получить группу носителей по умолчанию для стратегии FAST " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "Не удалось создать реплику дубликата для тома %s." - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "Не удаётся создать взаимосвязь для %s." - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "Не удается создать том %(name)s из %(snap)s." - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "Не удается создать том %(name)s из %(vol)s." - -#, python-format -msgid "Unable to create volume %s" -msgstr "Не удается создать том %s" - -msgid "Unable to create volume. Backend down." -msgstr "Не удается создать том. Выключена базовая система." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "Не удалось удалить моментальную копию группы согласования %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "Не удалось удалить моментальную копию %(id)s, состояние: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "Не удалось удалить стратегию моментальной копии на томе %s." - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "" -"Не удаётся удалить целевой том для %(vol)s. Исключительная ситуация: %(err)s." - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"Отсоединить том невозможно. Для отсоединения состояние должно быть " -"'используется', а attach_status должен быть 'подсоединен'." - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "Не удается определить secondary_array из параметра: %(secondary)s." - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "" -"Не удалось определить имя моментальной копии в Purity для моментальной копии " -"%(id)s." - -msgid "Unable to determine system id." -msgstr "Не удалось определить ИД системы." - -msgid "Unable to determine system name." -msgstr "Не удалось определить имя системы." - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"Невозможно выполнять операции управления моментальной копией через API REST " -"Purity версии %(api_version)s, требуется версия %(required_versions)s." - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"Невозможно выполнить репликацию с версией API %(api_version)s Purity REST, " -"требуется одна из версий %(required_versions)s." - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "Не удаётся установить партнёрство с кластером Storwize %s." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "Не удалось расширить том %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "" -"Не удаётся переключить том %(id)s на вторичную базовую систему, так как " -"невозможно переключить взаимосвязь репликации: %(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "" -"Не удается восстановить состояние \"default\", это возможно только по " -"завершении переключения после сбоя." - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "" -"Не удается переключиться после сбоя на целевой объект репликации: " -"%(reason)s)." - -msgid "Unable to fetch connection information from backend." -msgstr "Не удалось получить информацию о соединении из базовой программы." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "" -"Не удалось получить информацию о соединении из базовой программы: %(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "Не удалось найти ссылку на Purity с именем=%s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Не удалось найти группу томов %(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "" -"Не найден целевой объект переключения в случае сбоя, не настроены " -"вспомогательные целевые объекты." - -msgid "Unable to find iSCSI mappings." -msgstr "Не найдены связи iSCSI." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "Не найден ssh_hosts_key_file: %s" - -msgid "Unable to find system log file!" -msgstr "Не найден файл системного протокола!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "" -"Не найдена пригодная моментальная копия группы защиты для переключения в " -"случае сбоя в выбранном вспомогательном массиве: %(id)s." - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "" -"Среди настроенных целевых объектов не найден пригодный вспомогательный " -"массив: %(targets)s." - -#, python-format -msgid "Unable to find volume %s" -msgstr "Не найден том %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "Получить блокирующее устройство для файла '%s' невозможно" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "" -"Извлечь информацию о конфигурации, необходимую для создания тома, " -"невозможно: %(errorMessage)s." - -msgid "Unable to get corresponding record for pool." -msgstr "Не удалось получить соответствующую запись для пула." - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"Не удалось получить информацию о пространстве %(space)s. Убедитесь, что " -"кластер работает и подключен." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"Не удалось получить список IP-адресов на этом хосте. Проверьте права доступа " -"и работу сети." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Не удалось получить список элементов домена. Убедитесь, что кластер работает." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Не удалось получить список пространств для создания нового имени. Убедитесь, " -"что кластер работает." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Не удалось получить статистику для backend_name: %s" - -msgid "Unable to get storage volume from job." -msgstr "Не удается получить том носителя из задания." - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "" -"Не удалось получить целевые конечные точки для ИД аппаратного обеспечения " -"%(hardwareIdInstance)s." - -msgid "Unable to get the name of the masking view." -msgstr "Не удается получить имя маскирующего представления." - -msgid "Unable to get the name of the portgroup." -msgstr "Не удается получить имя группы портов." - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "Не удаётся получить взаимосвязь репликации для тома %s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"Не удалось импортировать том %(deviceId)s в Cinder. Это исходный том сеанса " -"репликации %(sync)s." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"Не удалось импортировать том %(deviceId)s в Cinder. Внешний том не в пуле, " -"управляемом текущим хостом Cinder." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"Не удалось импортировать том %(deviceId)s в Cinder. Том находится в " -"маскирующем представлении %(mv)s." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "Не удалось загрузить CA из %(cert)s %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Не удалось загрузить сертификат из %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Не удалось загрузить ключ из %(cert)s %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "" -"Не удалось найти учетную запись %(account_name)s на устройстве Solidfire" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "Не удалось найти SVM, управляющую IP-адресом '%s'" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "Не найдены указанные профили повтора %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Невозможно управлять существующим томом. Том %(volume_ref)s уже находится " -"под управлением." - -#, python-format -msgid "Unable to manage volume %s" -msgstr "Невозможно управлять томом %s" - -msgid "Unable to map volume" -msgstr "Не удалось преобразовать том" - -msgid "Unable to map volume." -msgstr "Не удалось связать том." - -msgid "Unable to parse attributes." -msgstr "Не удалось проанализировать атрибуты." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"Не удалось сделать копию главной для тома %s. Вспомогательной копии нет." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"Нельзя повторно использовать хост, который не управляется Cinder с " -"параметром use_chap_auth=True," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Нельзя повторно использовать хост с неизвестными идентификационными данными " -"CHAP." - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "Не удалось переименовать том %(existing)s в %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "Не удалось получить группу моментальных копий с ИД %s." - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"Не удается изменить тип %(specname)s, ожидалось получение текущих и " -"запрошенных значений %(spectype)s. Полученное значение: %(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Изменить тип невозможно: существует копия тома %s. Изменение типа приведет к " -"превышению ограничения в 2 копии." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Не удалось изменить тип: для текущего действия требуется том-копия, что не " -"разрешено, если в качестве нового типа указана репликация. Том: %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "" -"Не удаётся настроить зеркальную репликацию для %(vol)s. Исключительная " -"ситуация: %(err)s." - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Не удалось создать моментальную копию группы согласования %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "Не удалось разорвать соединение с томом из базовой программы." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Не удалось закрыть соединение с томом: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "Не удалось изменить группу согласования %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"Проверить группу инициатора %(igGroupName)s в маскирующем представлении " -"%(maskingViewName)s. " - -msgid "Unacceptable parameters." -msgstr "Недопустимые параметры." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"Непредвиденное состояние %(status)s для связывания %(id)s. Атрибуты: " -"%(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Непредвиденный ответ интерфейса командной строки: несоответствие заголовка/" -"строки. Заголовок: %(header)s, строка: %(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"Непредвиденное состояние %(status)s для связывания %(id)s. Атрибуты: " -"%(attr)s." - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "" -"Непредвиденный вывод. Вместо ожидаемого [%(expected)s] получен [%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "Непредвиденный ответ от API Nimble" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Непредвиденный ответ от Tegile IntelliFlash API" - -msgid "Unexpected status code" -msgstr "Непредвиденный код состояния" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"Непредвиденный код состояния коммутатора %(switch_id)s с протоколом " -"%(protocol)s для url %(page)s. Ошибка: %(error)s" - -msgid "Unknown Gluster exception" -msgstr "Неизвестная исключительная ситуация Gluster" - -msgid "Unknown NFS exception" -msgstr "Неизвестная исключительная ситуация NFS" - -msgid "Unknown RemoteFS exception" -msgstr "Неизвестная исключительная ситуация в RemoteFS" - -msgid "Unknown SMBFS exception." -msgstr "Неизвестная исключительная ситуация в SMBFS." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Неизвестная исключительная ситуация Virtuozzo Storage" - -msgid "Unknown action" -msgstr "Неизвестное действие" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Не удаётся установить, передан ли уже том %s под управление Cinder. Передача " -"под управление прервана. Добавьте пользовательское свойство схемы " -"'cinder_managed' для тома и присвойте ему значение False. Для устранения " -"этого ограничения также можно присвоить свойству 'zfssa_manage_policy' " -"значение 'loose'." - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"Не удаётся установить, передан ли уже том %s под управление Cinder. Передача " -"под управление прервана. Добавьте пользовательское свойство схемы " -"'cinder_managed' для тома и присвойте ему значение False. Для устранения " -"этого ограничения также можно присвоить свойству 'zfssa_manage_policy' " -"значение 'loose'." - -#, python-format -msgid "Unknown operation %s." -msgstr "Неизвестная операция %s." - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Неизвестная или неподдерживаемая команда %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Неизвестный протокол: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "Неизвестные ресурсы квоты: %(unknown)s." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Неизвестное направление сортировки. Оно должно быть 'desc' или 'asc'." - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "Опции отмены управления и каскадного удаления несовместимы." - -msgid "Unmanage volume not implemented." -msgstr "Отмена управления томом не реализована." - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "" -"Отмена управления моментальными копиями томами, переключенными после сбоя, " -"не разрешена." - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "" -"Отмена управления моментальными копиями томами, переключенными после сбоя, " -"не разрешена." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Нераспознанное ключевое слово QoS: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Нераспознанный формат базового файл: %s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Нераспознанное значение read_deleted '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "Сбросить параметры gcs: %s" - -msgid "Unsupported Content-Type" -msgstr "Не поддерживаемый тип содержимого" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Неподдерживаемая версия ONTAP данных. Поддерживается версия ONTAP данных " -"7.3.1 и выше." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Неподдерживаемая версия метаданных резервной копии (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Запрошена неподдерживаемая версия метаданных резервной копии" - -msgid "Unsupported backup verify driver" -msgstr "Неподдерживаемый драйвер проверки резервной копии" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"Неподдерживаемое встроенное ПО на коммутаторе %s. Убедитесь, что на " -"коммутаторе работает встроенное ПО версии 6.4 или выше" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Неподдерживаемый формат тома: %s " - -msgid "Update QoS policy error." -msgstr "Ошибка изменения стратегии QoS." - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Операции изменения и удаления квоты разрешены только для администратора " -"непосредственного родительского объекта и для администратора облака." - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"Операции изменения и удаления квоты разрешены только для проектов в той " -"жеиерархии проекта, где находятся пользователи." - -msgid "Update list, doesn't include volume_id" -msgstr "Список обновления не включает volume_id" - -msgid "Updated At" -msgstr "Обновлено" - -msgid "Upload to glance of attached volume is not supported." -msgstr "Передача в glance подключенного тома не поддерживается." - -msgid "Use ALUA to associate initiator to host error." -msgstr "Ошибка использования ALUA для связывания инициатора с хостом." - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "" -"Ошибка использования CHAP для связывания инициатора с хостом. Проверьте имя " -"пользователя и пароль CHAP." - -msgid "User ID" -msgstr "ID пользователя" - -msgid "User does not have admin privileges" -msgstr "Пользователь не имеет административных привилегий" - -msgid "User not authorized to perform WebDAV operations." -msgstr "У пользователя нет прав доступа для выполнения операций WebDAV." - -msgid "UserName is not configured." -msgstr "UserName не настроен." - -msgid "UserPassword is not configured." -msgstr "UserPassword не настроен." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "Откат V2: том не принадлежит ни одной группе носителей." - -msgid "V3 rollback" -msgstr "Откат V3" - -msgid "VF is not enabled." -msgstr "VF не включен." - -#, python-format -msgid "VV Set %s does not exist." -msgstr "Набор VV %s не существует." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "Допустимый приемник спецификаций QoS: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "Допустимое расположение управления: %s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "Сбой проверки соединения с томом (ошибка: %(err)s)." - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"Значение \"%(value)s\" недопустимо для параметра конфигурации \"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "Значение %(param)s для %(param_string)s не булевское." - -msgid "Value required for 'scality_sofs_config'" -msgstr "Требуется значение для 'scality_sofs_config'" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "Vdisk %(name)s не участвует в связи %(src)s -> %(tgt)s." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"Версия %(req_ver)s не поддерживается в API. Минимальная требуемая версия: " -"%(min_ver)s, максимальная: %(max_ver)s." - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s не удается получить объект по ИД." - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s не поддерживает условное обновление." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Виртуальный том '%s' не существует в массиве." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "Не удалось выполнить задание копирования тома для целевого тома %s." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "Том %(deviceID)s не найден." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Том %(name)s не найден в массиве. Не удается определить, преобразованы ли " -"тома." - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "Том %(name)s создан в VNX, но в состоянии %(state)s." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Не удалось создать том %(vol)s в пуле %(pool)s." - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "Том %(vol1)s не соответствует snapshot.volume_id %(vol2)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"Том %(vol_id)s должен находиться в состоянии available, чтобы можно было " -"изменить флаг readonly, однако текущее состояние - %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"Состояние %(vol_id)s тома должно быть available, однако текущее состояние - " -"%(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "Том %(volume_id)s не найден." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"У тома %(volume_id)s нет метаданных администрирования с ключом " -"%(metadata_key)s." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "Том %(volume_id)s связан с неподдерживаемой группой хостов %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "Том %(volume_id)s не связан с хостом %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "Том %(volume_id)s все еще присоединен, сначала отсоедините его." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Ошибка репликации тома %(volume_id)s: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Том %(volume_name)s занят." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Не удалось создать том %s из исходного тома." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "Не удалось создать том %s в общих ресурсах." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Не удалось создать том %s." - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "Том %s не существует в Nexenta SA" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "Том %s не существует в устройстве Nexenta Store" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "Том %s не существует в массиве." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Для тома %s не указан параметр provider_location - пропущено." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Том %s не существует в массиве." - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "Том %s не существует в базовой системе ZFSSA." - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "Том %s уже находится под управлением OpenStack." - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"Тип тома %s не поддерживает репликацию. Для поддержки репликации в " -"параметрах типа тома необходимо указать replication_enabled со значением " -"' True'." - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "" -"Том %s включен. Выключите его, чтобы передать под управление OpenStack." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Том %s не должен быть частью группы согласования." - -#, python-format -msgid "Volume %s not found." -msgstr "Том %s не найден." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Том %s: ошибка расширения тома" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Том (%s) уже существует в массиве" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Том (%s) уже есть в массиве." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Группа томов %s не существует" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Тип тома %(id)s уже существует." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"Удаление типа тома %(volume_type_id)s запрещено, если есть тома с таким " -"типом." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом " -"%(extra_specs_key)s." - -msgid "Volume Type id must not be None." -msgstr "ИД типа тома не должен быть None." - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"Том [%(cb_vol)s] не найден в носителе CloudByte, соответствующем тому " -"OpenStack [%(ops_vol)s]." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "Том [%s] не найден в хранилище CloudByte." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "Подключение тома не удалось найти с фильтром %(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "Недопустимая конфигурация базовой программы тома: %(reason)s" - -msgid "Volume by this name already exists" -msgstr "Том с таким именем уже существует" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "Том невозможно восстановить, поскольку он содержит моментальные копии." - -msgid "Volume create failed while extracting volume ref." -msgstr "Ошибка создания тома при извлечении ссылки на том." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Путь к файлу устройства тома %s не существует." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Не найдено устройство тома в %(device)s." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Драйвер тома %s не инициализирован." - -msgid "Volume driver not ready." -msgstr "Драйвер тома не готов." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Драйвер тома выдал ошибку: %(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "" -"У тома есть временная моментальная копия, которую невозможно удалить в " -"данный момент." - -msgid "Volume has children and cannot be deleted!" -msgstr "Невозможно удалить том, у которого есть дочерние объекты." - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "Том подключен к серверу. (%s)" - -msgid "Volume is in-use." -msgstr "Том используется." - -msgid "Volume is not available." -msgstr "Том недоступен." - -msgid "Volume is not local to this node" -msgstr "Том не является локальным для этого узла" - -msgid "Volume is not local to this node." -msgstr "Том не является локальным для этого узла." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Запрошено резервное копирование метаданных тома, но этот драйвер еще не " -"поддерживает эту функцию." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Не удалось выполнить перенос тома: %(reason)s" - -msgid "Volume must be available" -msgstr "Том должен быть доступен" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "Том должен находиться в одной зоне доступности с моментальной копией" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "Том должен находиться в одной зоне доступности с исходным томом" - -msgid "Volume must have a volume type" -msgstr "Должен быть задан тип тома" - -msgid "Volume must not be replicated." -msgstr "Репликация тома запрещена." - -msgid "Volume must not have snapshots." -msgstr "Том не должен иметь моментальных копий." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "Не найден том для копии %(instance_id)s." - -msgid "Volume not found on configured storage backend." -msgstr "Том не найден в настроенной базовой системе хранения." - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"Том не найден в настроенной базовой системе хранения. Если имя тома содержит " -"\"/\", переименуйте его и повторите операцию." - -msgid "Volume not found on configured storage pools." -msgstr "Не найден том в настроенных пулах памяти." - -msgid "Volume not found." -msgstr "Том не найден." - -msgid "Volume not unique." -msgstr "Том не является уникальным." - -msgid "Volume not yet assigned to host." -msgstr "Том не связан с хостом." - -msgid "Volume reference must contain source-name element." -msgstr "Ссылка на том должна содержать элемент source-name." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "Копия тома %(volume_id)s не найдена." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Не удалось запустить службу томов %s." - -msgid "Volume should have agent-type set as None." -msgstr "В параметре agent-type тома должно быть указано значение None." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"Размер тома (%(volume_size)s ГБ) не может быть меньше minDisk образа " -"(%(min_disk)s ГБ)." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "Размер тома '%(size)s' должен быть целым числом, превышающим 0" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"Размер тома ('%(size)s' ГБ) не может быть меньше размера исходного тома " -"(%(source_size)s ГБ). Размер тома должен быть не меньше размера исходного " -"тома." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"Размер тома ('%(size)s' ГБ) не может быть меньше размера моментальной копии " -"(%(snap_size)s ГБ). Размер тома должен быть не меньше размера исходной " -"моментальной копии." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"Размер тома увеличился с момента последнего резервного копирования. " -"Необходимо полное резервное копирование." - -msgid "Volume size must be a multiple of 1 GB." -msgstr "Размер тома должен быть кратным 1 ГБ." - -msgid "Volume size must multiple of 1 GB." -msgstr "Размер тома должен быть кратен 1 ГБ." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"Состояние тома должно быть available или in-use для моментальной копии. " -"(Фактическое состояние: %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "" -"Состояние тома должно быть available или in-use для моментальной копии." - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "Для резервирования том должен находиться в состоянии %s." - -msgid "Volume status must be 'available'." -msgstr "Состояние тома должно быть available." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "Связь тома с группой инициаторов уже существует" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "" -"Том для создания резервной копии должен быть доступным или используемым, но " -"текущее состояние - \"%s\"." - -msgid "Volume to be restored to must be available" -msgstr "Том для восстановления должен быть доступен" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "Тип тома %(volume_type_id)s не может быть найден." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "ИД типа тома '%s' недопустим." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"Права доступа к типу тома для комбинации %(volume_type_id)s / %(project_id)s " -"уже существуют." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"Не найдены права доступа к комбинации типа тома %(volume_type_id)s / " -"%(project_id)s ." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "Шифрование типа тома для типа %(type_id)s уже существует." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "Шифрование типа тома для типа %(type_id)s не существует." - -msgid "Volume type name can not be empty." -msgstr "Имя типа тома не должно быть пустым." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Том: %(volumeName)s не является объединенным томом. Выполнить Extend можно " -"только над объединенным томом. Выполняется выход..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "" -"Том %(volumeName)s не был добавлен в группу носителей %(sgGroupName)s. " - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "Том %s уже находится под управлением Cinder." - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "" -"Число томов превышено и для основной, и для вторичной учетной записи " -"SolidFire." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"Конфигурация VzStorage 'vzstorage_used_ratio' недопустима. Значение должно " -"быть больше 0 и не больше 1,0: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "Файл конфигурации VzStorage в %(config)s не существует." - -msgid "Wait replica complete timeout." -msgstr "Ожидание тайм-аута завершения репликации." - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "Ошибка ожидания синхронизации. Состояние выполнения: %s." - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "" -"Ожидание добавления всех узлов в кластер. Убедитесь, что работают все демоны " -"sheep." - -msgid "We should not do switch over on primary array." -msgstr "Переключение для основного массива не разрешено." - -msgid "X-IO Volume Driver exception!" -msgstr "Исключительная ситуация драйвера тома X-IO!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "Неправильно настроен XtremIO: не найдены порталы iSCSI" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO неправильно инициализирован, кластеры не найдены" - -msgid "You must implement __call__" -msgstr "Отсутствует реализация __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"Необходимо установить hpe3parclient, прежде чем использовать драйверы 3PAR. " -"Выполните команду \"pip install --install python-3parclient\" для установки " -"hpe3parclient." - -msgid "You must supply an array in your EMC configuration file." -msgstr "Необходимо указать массив в файле конфигурации EMC." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Исходный размер, %(originalVolumeSize)s ГБ, превышает %(newSize)s ГБ. " -"Поддерживается только Extend. Выполняется выход..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Зона" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Стратегия распределения по зонам %s не распознана" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data: не удалось получить атрибуты для vdisk %s." - -msgid "_create_host failed to return the host name." -msgstr "Функции _create_host не удалось вернуть имя хоста." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: Не удалось преобразовать имя хоста. Имя хоста не типа unicode " -"или string." - -msgid "_create_host: No connector ports." -msgstr "_create_host: отсутствуют порты коннекторов." - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume, служба репликации не найдена." - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume, имя тома: %(volumename)s, имя исходного тома: " -"%(sourcevolumename)s, экземпляр исходного тома: %(source_volume)s, экземпляр " -"целевого тома: %(target_volume)s, код возврата: %(rc)lu, ошибка: " -"%(errordesc)s." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - в выводе CLI не найдено сообщение об успехе.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name, id_code равен None." - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession, не найдена служба репликации" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession, не определен тип сеанса копировния, сеанс копирования: " -"%(cpsession)s, тип копирования: %(copytype)s." - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession, сеанс копирования: %(cpsession)s, операция: " -"%(operation)s, код возврата: %(rc)lu, ошибка: %(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume, имя тома: %(volumename)s, код возврата: %(rc)lu, ошибка: " -"%(errordesc)s." - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "" -"_delete_volume, имя тома: %(volumename)s, служба настройки хранилища не " -"найдена." - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service, имя класса: %(classname)s, InvokeMethod, не удается " -"подключиться к ETERNUS." - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "" -"_extend_volume_op: расширение тома с моментальными копиями не поддерживается." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, коннектор: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, коннектор: %(connector)s, EnumerateInstanceNames, не " -"удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group, коннектор: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession, ReferenceNames, экземпляр тома: %(vol_instance_path)s, не " -"удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service, имя класса: %(classname)s, EnumerateInstanceNames, не " -"удается подключиться к ETERNUS." - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "_find_initiator_names, коннектор: %(connector)s, инициатор не найден." - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun, имя тома: %(volumename)s, EnumerateInstanceNames, не удается " -"подключиться к ETERNUS." - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool, пул eternus:%(eternus_pool)s, EnumerateInstances, не удается " -"подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg, имя файла: %(filename)s, имя тега: %(tagname)s, пустые данные. " -"Исправьте ошибку в файле конфигурации драйвера." - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection, имя файла: %(filename)s, ip: %(ip)s, порт: " -"%(port)s, пользователь: %(user)s, пароль: ****, url: %(url)s, ошибка." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn не найден." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, не удается " -"подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: заголовки и значения атрибутов не совпадают.\n" -" Заголовки: %(header)s\n" -" Значения: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"Функции _get_host_from_connector не удалось вернуть имя хоста для коннектора." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, ошибка получения host-affinity из aglist/vol_instance, " -"группа привязки: %(ag)s, ReferenceNames, не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc, ошибка получения экземпляра host-affinity, volmap: " -"%(volmap)s, GetInstance, не удается подключиться к ETERNUS." - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, не удается " -"подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, группа привязки: %(ag)s, ReferenceNames, не удается " -"подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi, экземпляр тома: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, не удается подключиться к ETERNUS." - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, не удается подключиться " -"к ETERNUS." - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "" -"_get_target_port, EnumerateInstances, не удается подключиться к ETERNUS." - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "_get_target_port, протокол: %(protocol)s, целевой порт не найден." - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay: не найдена моментальная копия %s" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay: не найден том с ИД %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay: необходимо указать source-name." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: не удалось получить информацию о соединении FC " -"для соединения хост-том. Правильно ли настроен хост для соединений FC?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: не найдены узлы в группе ввода-вывода %(gid)s для " -"тома %(vol)s." - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun, vol_instance.path:%(vol)s, имя тома: %(volumename)s, uid тома: " -"%(uid)s, инициатор: %(initiator)s, цель: %(tgt)s, aglist: %(aglist)s, служба " -"настройки хранилища не найдена." - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun, vol_instance.path: %(volume)s, имя тома: %(volumename)s, uid " -"тома: %(uid)s, aglist: %(aglist)s, служба настройки контроллера не найдена." - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun, имя тома: %(volumename)s, uid тома: %(volume_uid)s, " -"AffinityGroup: %(ag)s, код возврата: %(rc)lu, ошибка: %(errordesc)s." - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, не удается подключиться к ETERNUS." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats: не удалось получить данные пула памяти." - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, состояние copysession: " -"BROKEN." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"Операция add_vdisk_copy не выполнена: существует копия тома %s. Добавление " -"другой копии приведет к превышению ограничения в 2 копии." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "" -"Функция add_vdisk_copy начала выполнение без копии vdisk в ожидаемом пуле." - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "Значение all_tenants должно быть булевским. Получено: %s." - -msgid "already created" -msgstr "уже создан" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "подключить моментальную копию с удаленного узла" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "атрибут %s не с отложенной загрузкой" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"При создании резервной копии %(vol_id)s не удалось создать жесткую ссылку на " -"устройство с %(vpath)s на %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"При создании резервной копии %(vol_id)s не удалось получить уведомление об " -"успешности от сервера.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время выполнения резервного копирования %(vol_id)s не удалось выполнить " -"команду dsmc: недопустимые аргументы в %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время резервного копирования %(vol_id)s не удалось выполнить команду dsmc " -"в %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "Сбой создания резервной копии %(vol_id)s. %(path)s не файл." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"Сбой создания резервной копии %(vol_id)s. %(path)s - неподдерживаемый тип " -"файла. Поддерживаются блоковые и обычные файлы. Фактический режим файла: " -"%(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"Сбой создания резервной копии %(vol_id)s. Не удалось получить фактический " -"путь к тому в %(path)s." - -msgid "being attached by different mode" -msgstr "подключается с другим режимом" - -#, python-format -msgid "call failed: %r" -msgstr "вызов не выполнен: %r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "вызов не выполнен: GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "вызов не выполнен: PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "вызов не выполнен: PROG_MISMATCH: %r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "вызов не выполнен: PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "не найден элемент lun-map, группа инициаторов: %(ig)s, том: %(vol)s" - -msgid "can't find the volume to extend" -msgstr "не найден том для расширения" - -msgid "can't handle both name and index in req" -msgstr "нельзя обработать и имя и индекс в запросе" - -msgid "cannot understand JSON" -msgstr "невозможно понять JSON" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "cgsnapshot присвоен" - -msgid "cgsnapshot changed" -msgstr "cgsnapshot изменен" - -msgid "cgsnapshots assigned" -msgstr "cgsnapshots присвоены" - -msgid "cgsnapshots changed" -msgstr "cgsnapshots изменены" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: для идентификации требуется пароль или личный ключ " -"SSH: задайте опцию san_password или san_private_key." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: не удалось определить ИД системы." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: не удалось определить имя системы." - -msgid "check_hypermetro_exist error." -msgstr "Ошибка check_hypermetro_exist." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "Глубина дублирования превышает ограничение (%s)" - -msgid "consistencygroup assigned" -msgstr "consistencygroup присвоена" - -msgid "consistencygroup changed" -msgstr "consistencygroup изменена" - -msgid "control_location must be defined" -msgstr "Должен быть определен параметр control_location" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume, том источника не существует в ETERNUS." - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume, имя экземпляра целевого тома: %(volume_instancename)s, " -"ошибка получения экземпляра." - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: различаются исходный и целевой размеры." - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume: размер исходного тома %(src_vol)s равен %(src_size)d " -"ГБ и превышает размер целевого тома %(tgt_vol)s, %(tgt_size)d ГБ." - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src: необходимо создавать из моментальной копии " -"группы согласования, а не их исходной группы согласования." - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src поддерживает только источник cgsnapshot или " -"источник группы согласования. Несколько источников использовать нельзя." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: Исходный vdisk %(src)s (%(src_id)s) не существует." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: исходный vdisk %(src)s не существует." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: Имя хоста не типа string или unicode." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: Не переданы инициаторы или глобальные имена портов." - -msgid "create_hypermetro_pair error." -msgstr "Ошибка create_hypermetro_pair." - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "create_snapshot, пул eternus: %(eternus_pool)s, пул не найден." - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot, имя моментальной копии: %(snapshotname)s, имя исходного " -"тома: %(volumename)s, путь к экземпляру: %(vol_instance)s, имя целевого " -"тома: %(d_volumename)s, пул: %(pool)s, код возврата: %(rc)lu, ошибка: " -"%(errordesc)s." - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot, имя тома: %(s_volumename)s, исходный том не найден в " -"ETERNUS." - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "" -"create_snapshot, имя тома: %(volumename)s, служба репликации не найдена." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: Требуемое состояние тома для моментальной копии: \"available" -"\" или \"in-use\". Недопустимое состояние: %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: не удалось получить исходный том." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume, том: %(volume)s, EnumerateInstances, не удается подключиться " -"к ETERNUS." - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume, том: %(volume)s, имя тома: %(volumename)s, пул eternus: " -"%(eternus_pool)s, служба настройки хранилища не найдена." - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume, имя тома: %(volumename)s, имя пула: %(eternus_pool)s, код " -"возврата: %(rc)lu, ошибка: %(errordesc)s." - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "create_volume_from_snapshot, том источника не существует в ETERNUS." - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot, имя экземпляра целевого тома: " -"%(volume_instancename)s, ошибка получения экземпляра." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "" -"create_volume_from_snapshot: Моментальная копия %(name)s не существует." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: Для состояние тома требуется состояние " -"моментальной копии \"available\". Недопустимое состояние: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: размер тома отличается от размера тома на " -"основе моментальной копии." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время удаления %(vol_id)s не удалось выполнить команду dsmc: недопустимые " -"аргументы с stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время удаления %(vol_id)s не удалось выполнить команду dsmc, stdout: " -"%(out)s\n" -" stderr: %(err)s" - -msgid "delete_hypermetro error." -msgstr "Ошибка delete_hypermetro." - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator: %s ACL не найден. Выполнение продолжается." - -msgid "delete_replication error." -msgstr "Ошибка delete_replication." - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "удаляемый снимок %(snapshot_name)s имеет зависимые тома" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "удаление тома %(volume_name)s, который имеет снимок" - -msgid "detach snapshot from remote node" -msgstr "отключить моментальную копию от удаленного узла" - -msgid "do_setup: No configured nodes." -msgstr "do_setup: Нет настроенных узлов." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"Ошибка записи объекта в swift, MD5 объекта в swift %(etag)s не совпадает с " -"MD5 объекта, отправленного в swift %(md5)s" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume, пул eternus: %(eternus_pool)s, пул не найден." - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume, том: %(volume)s, имя тома: %(volumename)s, пул eternus: " -"%(eternus_pool)s, служба настройки хранилища не найдена." - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume, имя тома: %(volumename)s, код возврата: %(rc)lu, ошибка: " -"%(errordesc)s, тип пула: %(pooltype)s." - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume, имя тома: %(volumename)s, том не найден." - -msgid "failed to create new_volume on destination host" -msgstr "не удалось создать new_volume на целевом хосте" - -msgid "fake" -msgstr "поддельный" - -#, python-format -msgid "file already exists at %s" -msgstr "файл уже существует в %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "fileno не поддерживается SheepdogIOWrapper" - -msgid "fileno() not supported by RBD()" -msgstr "Функция fileno() не поддерживается RBD()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "Файловая система %s не существует в устройстве Nexenta Store" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"Параметру flashsystem_multihostmap_enabled присвоено значение False, не " -"разрешать преобразование нескольких хостов. CMMVC6071E Преобразование VDisk-" -"в-хост не создано, так как VDisk уже преобразован в хост." - -msgid "flush() not supported in this version of librbd" -msgstr "Функция flush() не поддерживается в этой версии librbd" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s backed by: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "резервная копия fmt=%(fmt)s создана :%(backing_file)s" - -msgid "force delete" -msgstr "принудительно удалить" - -msgid "get_hyper_domain_id error." -msgstr "Ошибка get_hyper_domain_id." - -msgid "get_hypermetro_by_id error." -msgstr "Ошибка get_hypermetro_by_id." - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "" -"get_iscsi_params: Не удалось получить целевой IP-адрес для инициатора " -"%(ini)s. Проверьте файл конфигурации." - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool: не удалось получить атрибуты для тома %s." - -msgid "glance_metadata changed" -msgstr "glance_metadata изменены" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"Параметру gpfs_images_share_mode присвоено значение copy_on_write, но " -"%(vol)s и %(img)s относятся к разным файловым системам." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"Параметру gpfs_images_share_mode присвоено значение copy_on_write, но " -"%(vol)s и %(img)s относятся к разным наборам файлов." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s и hgst_user %(usr)s должны быть связаны с пользователями/" -"группами в файле cinder.conf" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"Указанное в файле cinder.conf значение hgst_net %(net)s не найдено в кластере" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"Значение hgst_redundancy в файле cinder.conf должно быть 0 (не высокая " -"готовность) или 1 (высокая готовность)." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "" -"Значение hgst_space_mode в файле cinder.conf должно быть восьмеричным/" -"целочисленным" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "Сервер hgst_storage %(svr)s не в формате <хост>:<устройство>" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "Должно быть указано значение hgst_storage_servers в файле cinder.conf" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"Служба http может быть неожиданно отключена или переведена в режим " -"обслуживания посреди выполнения операции." - -msgid "id cannot be None" -msgstr "Недопустимое значение для ИД: None" - -#, python-format -msgid "image %s not found" -msgstr "не найден образ %s" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection, том: %(volume)s, том не найден." - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection: Не удалось получить атрибуты для тома %s." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection: Отсутствует атрибут тома для тома %s." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: Не найдены узлы в группе ввода-вывода %(gid)s для " -"тома %(vol)s." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s не определен." - -#, python-format -msgid "invalid user '%s'" -msgstr "недопустимый пользователь %s" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "портал iSCSI %s не найден" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"В файле конфигурации должен быть указан параметр iscsi_ip_address, когда " -"используется протокол 'iSCSI'." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "ошибка администратора ключей: %(reason)s" - -msgid "limit param must be an integer" -msgstr "Параметр limit должен быть целым числом" - -msgid "limit param must be positive" -msgstr "Параметр limit должен быть положительным" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"Для manage_existing требуется ключ 'name' для идентификации существующего " -"тома." - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot: ошибка при обработке существующего повтора %(ss)s " -"для тома %(vol)s" - -#, python-format -msgid "marker [%s] not found" -msgstr "маркер [%s] не найден" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "у mdiskgrp отсутствуют кавычки %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "" -"Значение migration_policy должно быть on-demand или never, передано значение " -"%s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "Сбой команды mkfs для тома %(vol)s, сообщение об ошибке: %(err)s." - -msgid "mock" -msgstr "ложный" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs не установлена" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage обнаружил несколько ресурсов с именем %s" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "Обнаружено несколько ресурсов с ИД моментальной копии %s" - -msgid "name cannot be None" -msgstr "Недопустимое значение для имени: None" - -#, python-format -msgid "no REPLY but %r" -msgstr "нет REPLY за исключением %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "В drbdmanage не обнаружено моментальных копий с ИД %s" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "Обнаружено несколько моментальных копий с ИД %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "Обнаружено несколько томов с ИД %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "у obj отсутствуют кавычки %s" - -msgid "open_access_enabled is not off." -msgstr "Параметр open_access_enabled не отключен." - -msgid "progress must be an integer percentage" -msgstr "ход выполнения должен быть целым значением в процентах" - -msgid "provider must be defined" -msgstr "должен быть определен провайдер" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"Этому драйверу тома требуется qemu-img версии %(minimum_version)s. Текущая " -"версия qemu-img: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img не установлен, и образ имеет тип %s. Только образы RAW могут " -"использоваться, когда qemu-img не установлен." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img не установлен, и не указан формат диска. Только образы RAW могут " -"использоваться, когда qemu-img не установлен." - -msgid "rados and rbd python libraries not found" -msgstr "Не найдены библиотеки python rados и rbd" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "" -"read_deleted может принимать значения 'no', 'yes' или 'only', значение %r " -"недопустимо" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "Ошибка replication_failover. %s не найден." - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "" -"Ошибка replication_failover. Базовая система не настроена для переключения " -"после сбоя" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время выполнения восстановления %(vol_id)s не удалось выполнить команду " -"dsmc: недопустимые аргументы в %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"Во время восстановления %(vol_id)s не удалось выполнить команду dsmc в " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"сбой восстановления %(vol_id)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"Операция restore_backup прервана, список фактических объектов не совпадает " -"со списком объектов в метаданных." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"В rtslib_fb отсутствует элемент %s: может потребоваться более новая версия " -"python-rtslib-fb." - -msgid "san_ip is not set." -msgstr "Не задано значение san_ip." - -msgid "san_ip must be set" -msgstr "san_ip должен быть назначен" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"Не указано значение параметра san_login или san_password для драйвера Datera " -"в cinder.conf. Укажите эту информацию и запустите службу cinder-volume еще " -"раз." - -msgid "serve() can only be called once" -msgstr "serve() может быть вызван только один раз" - -#, python-format -msgid "snapshot-%s" -msgstr "snapshot-%s" - -msgid "snapshots assigned" -msgstr "моментальные копии присвоены" - -msgid "snapshots changed" -msgstr "моментальные копии изменены" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "исходный том с ИД %s не скопирован" - -msgid "source-name cannot be empty." -msgstr "source-name не может быть пустым." - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "" -"source-name должно указываться в формате: 'vmdk_path@vm_inventory_path'." - -#, python-format -msgid "status must be %s and" -msgstr " состоянием должно быть %s и " - -msgid "status must be available" -msgstr "Требуемое состояние: Доступен" - -msgid "stop_hypermetro error." -msgstr "Ошибка stop_hypermetro." - -msgid "sync_hypermetro error." -msgstr "Ошибка sync_hypermetro." - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"Программа targetcli не установлена, поэтому не удалось создать каталог по " -"умолчанию (%(default_path)s): %(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection: Не удалось получить имя хоста из коннектора." - -msgid "timeout creating new_volume on destination host" -msgstr "истек тайм-аут создания new_volume на целевом хосте" - -msgid "too many body keys" -msgstr "слишком много ключей тела" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: не смонтирован" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s: цель занята" - -msgid "umount: : some other error" -msgstr "umount: <путь_к_mnt>: другая ошибка" - -msgid "umount: : target is busy" -msgstr "umount: <путь_к_mnt>: цель занята" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot: не найдена моментальная копия %s" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot: не найден том с ИД %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "Нераспознанный аргумент %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "Неподдерживаемый алгоритм сжатия: %s" - -msgid "valid iqn needed for show_target" -msgstr "Для show_target требуется допустимый iqn" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "vdisk %s не определен." - -msgid "vmemclient python library not found" -msgstr "Не найдена библиотека vmemclient для языка Python" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "Том %s не найден в drbdmanage" - -msgid "volume assigned" -msgstr "том присвоен" - -msgid "volume changed" -msgstr "том изменен" - -msgid "volume is already attached" -msgstr "том уже подключен" - -msgid "volume is not local to this node" -msgstr "Том не является локальным для этого узла" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"Размер тома %(volume_size)d слишком мал для восстановления резервной копии " -"размером %(size)d." - -#, python-format -msgid "volume size %d is invalid." -msgstr "Недопустимый размер тома %d." - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"Должен быть указан volume_type, когда создается том в группе согласования ." - -msgid "volume_type_id cannot be None" -msgstr "Недопустимое значение для volume_type_id: None" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "" -"Необходимо указать volume_types для создания группы согласования %(name)s." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "Необходимо указать volume_types для создания группы согласования %s." - -msgid "volumes assigned" -msgstr "тома присвоены" - -msgid "volumes changed" -msgstr "тома изменены" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "Истек тайм-аут wait_for_condition: %s." - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "" -"Свойству zfssa_manage_policy должно быть присвоено значение 'strict' или " -"'loose'. Текущее значение: %s." diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po deleted file mode 100644 index daa5451e1..000000000 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-error.po +++ /dev/null @@ -1,2186 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# ADİL REŞİT DURSUN , 2015 -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:19+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-09-16 08:22+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: tr-TR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Turkish (Turkey)\n" - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Failed to remove from new volume set %(new_vvs)s." -msgstr "" -"%(exception)s: %(volume_name)s mantıksal sürücüsü için retype geri alınırken " -"olağandışı durum. Yeni mantıksal sürücüden %(new_vvs)s ayarı kaldırılamadı." - -#, python-format -msgid "" -"%(exception)s: Exception during revert of retype for volume %(volume_name)s. " -"Original volume set/QOS settings may not have been fully restored." -msgstr "" -"%(exception)s: %(volume_name)s mantıksal sürücüsü için retype geri alınırken " -"olağandışı durum. Özgün mantıksal sürücü küme/QOS ayarları tamamen geri " -"yüklenmiş olmayabilir." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" -msgstr "" -"%(fun)s: Beklenmeyen CLI çıktısı ile başarısız oldu.\n" -" Komut: %(cmd)s\n" -"stdout: %(out)s\n" -"stderr: %(err)s\n" - -#, python-format -msgid "" -"%(method)s %(url)s unexpected response status: %(response)s (expects: " -"%(expects)s)." -msgstr "" -"%(method)s %(url)s beklenmeyen yanıt durumu: %(response)s (expects: " -"%(expects)s)." - -#, python-format -msgid "%(name)s: %(value)s" -msgstr "%(name)s: %(value)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "'%(value)s' is an invalid value for extra spec '%(key)s'" -msgstr "'%(value)s', '%(key)s' fazladan özelliği için geçersiz değerdir" - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting create_snapshot operation!" -msgstr "" -"create_snapshot işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " -"Bilgisi için hesap bulunamadı!" - -#, python-format -msgid "" -"Account for Volume ID %s was not found on the SolidFire Cluster while " -"attempting unmanage operation!" -msgstr "" -"unmanage işlemi denenirken SolidFire Kümesinde Mantıksal Sürücü Kimliği %s " -"için hesap bulunamadı!" - -#, python-format -msgid "Authorizing request: %(zfssaurl)s retry: %(retry)d ." -msgstr "İstek yetkilendiriliyor: %(zfssaurl)s tekrar deneme: %(retry)d ." - -msgid "Backend returned err for lun export." -msgstr "Artalanda çalışan uygulama lun dışa aktarımı için hata döndürdü." - -#, python-format -msgid "Backup id %s is not invalid. Skipping reset." -msgstr "Yedekleme numarası %s geçersizdir. Sıfırlama atlanıyor." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Yedekleme servisi %(configured_service)s doğrulamayı desteklemez. Yedekleme " -"numarası %(id)s doğrulanabilir değil. Doğrulama atlanıyor." - -#, python-format -msgid "Backup volume metadata failed: %s." -msgstr "Mantıksal sürücü metadata yedekleme başarısız oldu: %s." - -#, python-format -msgid "" -"CLI fail: '%(cmd)s' = %(code)s\n" -"out: %(stdout)s\n" -"err: %(stderr)s" -msgstr "" -"CLI başarısız: '%(cmd)s' = %(code)s\n" -"çıktı: %(stdout)s\n" -"hata: %(stderr)s" - -msgid "Call to Nova delete snapshot failed" -msgstr "Anlık sistem görüntüsü silmek için Nova çağrısı başarısız oldu" - -msgid "Call to Nova to create snapshot failed" -msgstr "Anlık sistem görüntüsü oluşturmak için Nova çağrısı başarısız oldu" - -#, python-format -msgid "Call to json.loads() raised an exception: %s." -msgstr "json.loads() çağrısı bir istisna oluşturdu: %s." - -#, python-format -msgid "Can not discovery in %(target_ip)s with %(target_iqn)s." -msgstr "%(target_iqn)s ile %(target_ip)s keşfedilemiyor." - -#, python-format -msgid "Can't find volume to map %(key)s, %(msg)s" -msgstr "%(key)s eşleştirmek için mantıksal sürücü bulunamıyor, %(msg)s" - -#, python-format -msgid "" -"Cannot add and verify tier policy association for storage group : " -"%(storageGroupName)s to FAST policy : %(fastPolicyName)s." -msgstr "" -"Depolama grubu için katman ilke ilişkisi doğrulanamaz ve eklenemez : FAST " -"ilkesine %(storageGroupName)s depolama grubu : %(fastPolicyName)s." - -#, python-format -msgid "Cannot create or find an initiator group with name %(igGroupName)s." -msgstr "" -"%(igGroupName)s adı ile başlatıcı bir grup bulunamıyor ya da oluşturulamıyor." - -msgid "Cannot detect replica status." -msgstr "Kopya durumu tespit edilemez." - -msgid "Cannot determine if Tiering Policies are supported." -msgstr "Eğer Katmanlama İlkeleri destekliyorsa tespit edilemez." - -msgid "Cannot determine whether Tiering Policy is supported on this array." -msgstr "" -"Bu dizide Katmanlama İlkesinin desteklenip desteklenmediği belirlenemiyor." - -#, python-format -msgid "Cannot find Consistency Group %s" -msgstr "Tutarlılık Grubu %s bulunamıyor" - -#, python-format -msgid "" -"Cannot find a portGroup with name %(pgGroupName)s. The port group for a " -"masking view must be pre-defined." -msgstr "" -"%(pgGroupName)s adında bir bağlantıNoktasıGrubu bulunamıyor. Maskeleme " -"görünümü için bağlantı noktası grubu önceden tanımlanmış olmalıdır." - -#, python-format -msgid "Cannot find the fast policy %(fastPolicyName)s." -msgstr "Fast ilkesi %(fastPolicyName)s bulunamadı." - -#, python-format -msgid "" -"Cannot find the new masking view just created with name %(maskingViewName)s." -msgstr "" -"%(maskingViewName)s adı ile az önce oluşturulan yeni maskeleme görünümü " -"bulunamıyor." - -#, python-format -msgid "Cannot get QoS spec for volume %s." -msgstr "%s mantıksal sürücüsü için QoS özelliği alınamadı." - -#, python-format -msgid "Cannot get storage Group from job : %(storageGroupName)s." -msgstr "İşten depolama Grubu alınamıyor : %(storageGroupName)s." - -msgid "Cannot get storage system." -msgstr "Depolama sistemi alınamaz." - -#, python-format -msgid "Cloning of volume %s failed." -msgstr "%s mantıksal sürücüsünün kopyalaması başarısız oldu." - -#, python-format -msgid "" -"CloudByte does not have a volume corresponding to OpenStack volume [%s]." -msgstr "" -"CloudByte, OpenStack mantıksal sürücüsüne [%s] uyumlu bir mantıksal sürücüye " -"sahip değil." - -#, python-format -msgid "" -"CloudByte snapshot information is not available for OpenStack volume [%s]." -msgstr "" -"CloudByte anlık sistem görüntü bilgisi OpenStack mantıksal sürücüsü [%s] " -"için kullanılamaz." - -#, python-format -msgid "CloudByte volume information not available for OpenStack volume [%s]." -msgstr "" -"CloudByte mantıksal sürücü bilgisi OpenStack mantıksal sürücüsü [%s] için " -"kullanılamaz." - -#, python-format -msgid "Cmd :%s" -msgstr "Cmd :%s" - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Yapılandırma değeri %s ayarlanmamış." - -#, python-format -msgid "Connect to Flexvisor error: %s." -msgstr "Flexvisor hatasına bağlan: %s." - -#, python-format -msgid "Connect to Flexvisor failed: %s." -msgstr "Flexvisor'a bağlanılamadı: %s." - -#, python-format -msgid "Copy snapshot to volume for snapshot %(snap)s volume %(vol)s failed!" -msgstr "" -"%(snap)s anlık sistem görüntüsü %(vol)s mantıksal sürücüsü için mantıksal " -"sürücüye anlık sistem görüntüsü kopyalayamadı!" - -msgid "Could not decode scheduler options." -msgstr "Zamanlayıcı seçenekleri şifresi çözülemedi." - -#, python-format -msgid "Could not find a host for consistency group %(group_id)s." -msgstr "Tutarlılık grubu %(group_id)s için bir istemci bulunamadı." - -#, python-format -msgid "" -"Could not find port group : %(portGroupName)s. Check that the EMC " -"configuration file has the correct port group name." -msgstr "" -"Bağlantı noktası grubu bulunamadı : %(portGroupName)s. EMC yapılandırma " -"dosyasının doğru bağlantı grup adına sahip olup olmadığını kontrol edin." - -#, python-format -msgid "Could not stat scheduler options file %(filename)s." -msgstr "%(filename)s zamanlayıcı seçenek dosyalarının bilgileri gösterilemedi." - -#, python-format -msgid "" -"Create consistency group from snapshot-%(snap)s failed: SnapshotNotFound." -msgstr "" -"Anlık sistem görüntüsü-%(snap)s'den tutarlılık grubu oluşturma başarısız " -"oldu: AnlıkSistemGörüntüsüBulunamadı." - -#, python-format -msgid "" -"Create new lun from lun for source %(src)s => destination %(dest)s failed!" -msgstr "" -"Kaynak %(src)s => hedef %(dest)s için lun'dan yeni bir lun oluşturulamadı!" - -#, python-format -msgid "Create snapshot notification failed: %s" -msgstr "Anlık sistem görüntüsü bildirimi oluşturma başarısız oldu: %s" - -#, python-format -msgid "Create volume failed from snapshot: %s" -msgstr "" -"Anlık sistem görüntüsünden mantıksal sürücü oluşturma başarısız oldu: %s" - -#, python-format -msgid "Create volume notification failed: %s" -msgstr "Mantıksal sürücü bildirimi oluşturma başarısız oldu: %s" - -#, python-format -msgid "Creation of snapshot failed for volume: %s" -msgstr "" -"Mantıksal sürücü için anlık sistem görüntüsü oluşturma başarısız oldu: %s" - -#, python-format -msgid "Creation of volume %s failed." -msgstr "%s mantıksal sürücü oluşturma başarısız oldu." - -msgid "DB error:" -msgstr "DB hatası:" - -msgid "DBError encountered: " -msgstr "DBError ile karşılaşıldı: " - -msgid "Delete consistency group failed to update usages." -msgstr "Kullanımları güncellemek için tutarlılık grubu silme başarısız oldu." - -msgid "Delete snapshot failed, due to snapshot busy." -msgstr "Anlık sistem görüntüsü meşgul olduğundan silme başarısız oldu." - -#, python-format -msgid "Delete snapshot notification failed: %s" -msgstr "Anlık sistem görüntüsü bildirimi silme başarısız oldu: %s" - -#, python-format -msgid "Delete volume notification failed: %s" -msgstr "Mantıksal sürücü bildirimi silme başarısız oldu: %s" - -#, python-format -msgid "Deleting snapshot %s failed" -msgstr "Anlık sistem görüntüsü %s silme başarısız oldu" - -#, python-format -msgid "Deleting zone failed %s" -msgstr "Bölge silme başarısız oldu %s" - -#, python-format -msgid "Deletion of volume %s failed." -msgstr "%s mantıksal sürücüsünün silinmesi başarısız oldu." - -#, python-format -msgid "Destination Volume Group %s does not exist" -msgstr "Hedef Mantıksal Sürücü Grubu %s yok" - -msgid "Detach volume failed, due to remove-export failure." -msgstr "Mantıksal sürücü ayırma remove-export hatası nedeniyle başarısız oldu." - -msgid "Detach volume failed, due to uninitialized driver." -msgstr "Sürücü başlatılamadığından mantıksal sürücü ayırma başarısız oldu." - -#, python-format -msgid "Did not find expected column name in lsvdisk: %s." -msgstr "lsvdisk içinde beklenen sütun adı bulunamadı: %s." - -msgid "Differential restore failed, trying full restore" -msgstr "Kademeli geri yükleme başarısız oldu, tam geri yükleme deneniyor" - -#, python-format -msgid "Disconnection failed with message: %(msg)s." -msgstr "Bağlantıyı kesme şu ileti ile başarısız oldu: %(msg)s." - -#, python-format -msgid "" -"Driver-based migration of volume %(vol)s failed. Move from %(src)s to " -"%(dst)s failed with error: %(error)s." -msgstr "" -"%(vol)s mantıksal sürücüsünün, sürücü tabanlı göçü başarısız oldu. Kaynaktan " -"%(src)s %(dst)s hedefe taşıma şu hata ile başarısız oldu: %(error)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "%(vol)s mantıksal sürücüsü eklenirken hata." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Grup Oluşturma Hatası: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Setting Volume: %(lun)s to InitiatorGroup: %(initiatorgroup)s Pool: " -"%(pool)s Project: %(project)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Mantıksal Sürücü Ayarlama Hatası: BaşlatıcıGrubu: %(initiatorgroup)s için " -"%(lun)s Havuz: %(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d " -"İleti: %(ret.data)s." - -msgid "Error activating LV" -msgstr "LV etkinleştirilirken hata" - -msgid "Error closing channel." -msgstr "Kanal kapatılırken hata." - -#, python-format -msgid "" -"Error contacting glance server '%(netloc)s' for '%(method)s', %(extra)s." -msgstr "" -"'%(method)s' için '%(netloc)s' glance sunucusuna bağlantı kurulurken hata, " -"%(extra)s. " - -#, python-format -msgid "Error creating QOS rule %s" -msgstr "QOS kuralı %s oluşturulurken hata" - -msgid "Error creating Volume" -msgstr "Mantıksal sürücü oluşturmada hata" - -msgid "Error creating Volume Group" -msgstr "Mantıksal Sürücü Grubu oluşturulurken hata" - -msgid "Error creating chap record." -msgstr "Chap kaydı oluşturulurken hata." - -msgid "Error creating snapshot" -msgstr "Anlık sistem görüntüsü oluşturma hatası" - -#, python-format -msgid "Error creating volume. Msg - %s." -msgstr "Mantıksal sürücü oluşturulurken hata. İleti - %s." - -#, python-format -msgid "Error detaching volume %(volume)s, due to remove export failure." -msgstr "" -"Dışa aktarma hatası kaldırma nedeniyle, %(volume)s mantıksal sürücüsü " -"ayrılırken hata." - -#, python-format -msgid "Error detaching volume %s" -msgstr "Mantıksal sürücü %s ayrılırken hata" - -#, python-format -msgid "Error disassociating storage group from policy: %s." -msgstr "İlkeden depolama grubu ayırırken hata: %s." - -msgid "Error during re-export on driver init." -msgstr "Sürücü init'inde yeniden dışa aktarma sırasında hata." - -msgid "Error executing SSH command." -msgstr "SSH komutu yürütülürken hata." - -msgid "Error executing command via ssh." -msgstr "ssh yoluyla komut yürütürken hata." - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "ssh üzerinden komut yürütülürken hata: %s" - -msgid "Error extending Volume" -msgstr "Mantıksal sürücü genişletilirken hata" - -#, python-format -msgid "Error extending volume %(id)s. Ex: %(ex)s" -msgstr "%(id)s mantıksal sürücüsü genişletilirken hata. Ex: %(ex)s" - -#, python-format -msgid "Error extending volume: %(vol)s. Exception: %(ex)s" -msgstr "Mantıksal sürücü genişletilirken hata: %(vol)s. İstisna: %(ex)s" - -#, python-format -msgid "Error finding target pool instance name for pool: %(targetPoolName)s." -msgstr "Havuz için hedef havuz örnek adı bulunurken hata: %(targetPoolName)s." - -#, python-format -msgid "Error getting LUN attribute. Exception: %s" -msgstr "LUN özniteliği alınırken hata. İstisna: %s" - -msgid "Error getting active FC target ports." -msgstr "Etkin FC hedef bağlantı noktaları alınırken hata." - -msgid "Error getting active ISCSI target iqns." -msgstr "Etkin ISCSI hedef iqns alınırken hata." - -msgid "Error getting active ISCSI target portals." -msgstr "Etkin ISCSI hedef kapısı alınırken hata." - -msgid "Error getting array, pool, SLO and workload." -msgstr "Dizi, havuz, SLO ve iş yükü alma hatası." - -msgid "Error getting chap record." -msgstr "Chap kaydı alınırken hata." - -msgid "Error getting name server info." -msgstr "Ad sunucu bilgisi alınırken hata." - -msgid "Error getting show fcns database info." -msgstr "fcns veritabanı bilgisini göster sonucu alınırken hata." - -msgid "Error getting target pool name and array." -msgstr "Hedef havuz adı ve dizisi alınırken hata." - -#, python-format -msgid "Error in copying volume: %s" -msgstr "Mantıksal sürücü kopyalamada hata: %s" - -#, python-format -msgid "" -"Error in extending volume size: Volume: %(volume)s Vol_Size: %(vol_size)d " -"with Snapshot: %(snapshot)s Snap_Size: %(snap_size)d" -msgstr "" -"Mantıksal sürücü boyutu genişletmede hata. Mantıksal sürücü: Anlık sistem " -"görüntüsü: %(snapshot)s Snap_Size: %(snap_size)d ile %(volume)s Vol_Size: " -"%(vol_size)d" - -#, python-format -msgid "Error in workflow copy from cache. %s." -msgstr "Önbellekten iş akışı kopyalamasında hata. %s." - -#, python-format -msgid "Error invalid json: %s" -msgstr "Geçersiz json hatası: %s" - -msgid "Error manage existing get volume size." -msgstr "Varolan mantıksal sürücü boyutu alma işleminde hata." - -msgid "Error manage existing volume." -msgstr "Varolan mantıksal sürücüyü yönetme hatası." - -#, python-format -msgid "Error mapping volume: %s" -msgstr "Mantıksal sürücü eşleştirme hatası: %s" - -#, python-format -msgid "" -"Error migrating volume: %(volumename)s. to target pool %(targetPoolName)s." -msgstr "" -"Mantıksal sürücü hedef havuza %(targetPoolName)s taşınırken hata: " -"%(volumename)s." - -#, python-format -msgid "Error migrating volume: %s" -msgstr "Mantıksal sürücü göç hatası: %s" - -#, python-format -msgid "" -"Error occurred in the volume driver when updating consistency group " -"%(group_id)s." -msgstr "" -"Tutarlılık grubu %(group_id)s güncellenirken, mantıksal sürücüde hata " -"meydana geldi." - -msgid "" -"Error occurred when adding hostgroup and lungroup to view. Remove lun from " -"lungroup now." -msgstr "" -"Görüntülenecek hostgroup ve lungroup eklenirken hata oluştu. Şimdi " -"lungroup'tan lun'u kaldır." - -#, python-format -msgid "" -"Error occurred when building request spec list for consistency group %s." -msgstr "" -"%s tutarlılık grubu için istek özellik listesi oluşturulurken hata oluştu." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "cgsnapshot %s oluşturulurken hata meydana geldi." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(cg)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"%(cgsnap)s cgsnapshot'ından %(cg)s tutarlılık grubu oluşturulurken hata " -"meydana geldi." - -#, python-format -msgid "" -"Error occurred when creating consistency group %(group)s from cgsnapshot " -"%(cgsnap)s." -msgstr "" -"%(cgsnap)s cgsnapshot'ından %(group)s tutarlılık grubu oluşturulurken hata " -"meydana geldi." - -#, python-format -msgid "Error occurred when creating consistency group %s." -msgstr "Tutarlılık grubu %s oluşturulurken hata meydana geldi." - -#, python-format -msgid "" -"Error occurred when creating volume entry from snapshot in the process of " -"creating consistency group %(group)s from cgsnapshot %(cgsnap)s." -msgstr "" -"%(cgsnap)s cgsnapshot'ından %(group)s tutarlılık grubu oluşturma sürecinde, " -"anlık sistem görüntüsünden mantıksal sürücü girdisi oluşturulurken hata " -"meydana geldi." - -#, python-format -msgid "Error occurred when updating consistency group %(group_id)s." -msgstr "Tutarlılık grubu %(group_id)s güncellenirken hata meydana geldi." - -#, python-format -msgid "Error occurred while cloning backing: %s during retype." -msgstr "Destekleme kopyalanırken hata oluştu: retype sırasında %s." - -#, python-format -msgid "Error occurred while copying %(src)s to %(dst)s." -msgstr "%(src)s kaynağı %(dst)s hedefine kopyalanırken hata oluştu." - -#, python-format -msgid "Error occurred while copying image: %(id)s to volume: %(vol)s." -msgstr "İmaj kopyalanırken hata oluştu: %(id)s mantıksal sürücüye: %(vol)s." - -#, python-format -msgid "Error occurred while copying image: %(image_id)s to %(path)s." -msgstr "İmaj kopyalanırken hata oluştu: %(image_id)s %(path)s." - -msgid "Error occurred while creating temporary backing." -msgstr "Geçici destekleme oluşturulurken hata oluştu." - -#, python-format -msgid "Error occurred while creating volume: %(id)s from image: %(image_id)s." -msgstr "" -"Mantıksal sürücü oluşturulurken hata oluştu: imajdan %(id)s: %(image_id)s." - -#, python-format -msgid "" -"Error on execute %(command)s. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"%(command)s yürütümünde hata. Hata kodu: %(exit_code)d Hata iletisi: " -"%(result)s" - -#, python-format -msgid "" -"Error on execute command. Error code: %(exit_code)d Error msg: %(result)s" -msgstr "" -"Komut yürütme hatası. Hata kodu: %(exit_code)d Hata iletisi: %(result)s" - -msgid "Error parsing array from host capabilities." -msgstr "İstemci yeteneklerinden dizi ayrıştırma hatası." - -msgid "Error parsing array, pool, SLO and workload." -msgstr "Dizi, havuz, SLO ve iş yükü ayrıştırma hatası." - -msgid "Error parsing target pool name, array, and fast policy." -msgstr "Hedef havuz adı, dizisi ve fast ilkesi ayrıştırma hatası." - -#, python-format -msgid "" -"Error provisioning volume %(lun_name)s on %(volume_name)s. Details: %(ex)s" -msgstr "" -"%(volume_name)s üzerinde %(lun_name)s mantıksal sürücü hazırlama hatası. " -"Ayrıntılar: %(ex)s" - -msgid "Error querying thin pool about data_percent" -msgstr "data_percent ile ilgili ince havuz sorgularken hata" - -msgid "Error renaming logical volume" -msgstr "Mantıksal sürücü yeniden adlandırılırken hata" - -#, python-format -msgid "Error resolving host %(host)s. Error - %(e)s." -msgstr "%(host)s istemci çözülürken hata. Hata - %(e)s." - -#, python-format -msgid "Error running SSH command: \"%s\"." -msgstr "SSH komutu çalıştırma hatası: \"%s\"." - -#, python-format -msgid "Error running SSH command: %s" -msgstr "SSH komutu çalıştırılırken hata: %s" - -msgid "Error running command." -msgstr "Komut çalıştırma hatası." - -#, python-format -msgid "" -"Error scheduling %(volume_id)s from last vol-service: %(last_host)s : %(exc)s" -msgstr "" -"Son vol-service'den %(volume_id)s zamanlaması yapılırken hata: " -"%(last_host)s : %(exc)s" - -#, python-format -msgid "Error setting Flash Cache policy to %s - exception" -msgstr "%s için Flash Cache ilkesi ayarlanırken hata - istisna" - -#, python-format -msgid "Error unmapping volume: %s" -msgstr "Mantıksal sürücü eşleştirmesi kaldırılırken hata: %s" - -#, python-format -msgid "Exception cloning volume %(name)s from source volume %(source)s." -msgstr "" -"%(source)s kaynak mantıksal sürücüsünden %(name)s mantıksal sürücüsü " -"kopyalanırken olağandışı durum." - -#, python-format -msgid "Exception creating LUN %(name)s in pool %(pool)s." -msgstr "Havuz %(pool)s içinde LUN %(name)s oluşturulurken olağandışı durum." - -#, python-format -msgid "Exception creating vol %(name)s on pool %(pool)s." -msgstr "" -"%(pool)s havuzu üzerinde %(name)s mantıksal sürücüsü oluşturulurken " -"olağandışı durum." - -#, python-format -msgid "" -"Exception creating volume %(name)s from source %(source)s on share %(share)s." -msgstr "" -"%(share)s paylaşımı üzerinde %(source)s kaynağından %(name)s mantıksal " -"sürücüsü oluşturulurken olağandışı durum." - -#, python-format -msgid "Exception details: %s" -msgstr "İstisna ayrıntıları: %s" - -#, python-format -msgid "Exception during mounting %s" -msgstr "%s bağlama sırasında olağandışı durum" - -#, python-format -msgid "Exception during mounting %s." -msgstr "%s bağlama sırasında istisna." - -#, python-format -msgid "Exception during snapCPG revert: %s" -msgstr "snapCPG geri alınırken olağandışı durum: %s" - -#, python-format -msgid "Exception handling resource: %s" -msgstr "Kaynak işlenirken olağandışı durum: %s" - -msgid "Exception in string format operation" -msgstr "Karakter dizisi biçimi işlemde olağandışı durum" - -msgid "Exception loading extension." -msgstr "Uzantı yüklenirken olağandışı durum." - -#, python-format -msgid "Exception: %s" -msgstr "İstisna: %s" - -#, python-format -msgid "Exception: %s." -msgstr "İstisna: %s." - -#, python-format -msgid "Exists snapshot notification failed: %s" -msgstr "Varolan anlık sistem görüntü bildirimi başarısız oldu: %s" - -#, python-format -msgid "Exists volume notification failed: %s" -msgstr "Mantıksal sürücü bildirimi başarısız oldu: %s" - -msgid "Extend volume failed." -msgstr "Disk bölümü genişletme başarısız oldu." - -#, python-format -msgid "Extension of volume %s failed." -msgstr "%s mantıksal sürücüsünü genişletme başarısız oldu." - -msgid "FAST is not supported on this array." -msgstr "Bu dizide FAST desteklenmiyor." - -#, python-format -msgid "Failed collecting fcns database info for fabric %s" -msgstr "Fabric %s için fcns veritabanı bilgisi toplanamadı" - -#, python-format -msgid "Failed collecting name server info from fabric %s" -msgstr "Fabric %s'den ad sunucu bilgisi toplanamadı" - -#, python-format -msgid "Failed collecting nsshow info for fabric %s" -msgstr "Fabric %s için nsshow bilgisi toplanamadı" - -msgid "Failed collecting show fcns database for fabric" -msgstr "Fabric için show fcns veritabanı toplanamadı" - -#, python-format -msgid "Failed destroying volume entry %s" -msgstr "Mantıksal sürücü girdisi %s silinemedi" - -#, python-format -msgid "" -"Failed fetching snapshot %(snapshot_id)s bootable flag using the provided " -"glance snapshot %(snapshot_ref_id)s volume reference" -msgstr "" -"Verilen glance anlık sistem görüntüsü %(snapshot_ref_id)s mantıksal sürücü " -"kaynağı kullanarak anlık sistem görüntüsü %(snapshot_id)s önyükleme bayrağı " -"getirilemedi" - -#, python-format -msgid "Failed getting active zone set from fabric %s" -msgstr "Fabric %s'den etkin bölge ayarı alınamadı" - -#, python-format -msgid "Failed getting zone status from fabric %s" -msgstr "Fabric %s'den bölge durumu alınamadı" - -#, python-format -msgid "Failed image conversion during cache creation: %s" -msgstr "Önbellek oluşturma sırasında imaj dönüşümü başarısız oldu: %s" - -#, python-format -msgid "" -"Failed notifying about the volume action %(event)s for volume %(volume_id)s" -msgstr "" -"%(volume_id)s mantıksal sürücüsü için %(event)s mantıksal sürücü eylemi " -"bildirilemedi" - -#, python-format -msgid "Failed notifying on %(topic)s payload %(payload)s" -msgstr "%(topic)s üzerindeki %(payload)s yük bildirilemedi" - -#, python-format -msgid "Failed rolling back quota for %s reservations" -msgstr "%s ayrılmışları için kota geri alınamadı" - -#, python-format -msgid "" -"Failed setting source volume %(source_volid)s back to its initial " -"%(source_status)s status" -msgstr "" -"Kaynak mantıksal sürücü %(source_volid)s başlangıç %(source_status)s " -"durumuna geri ayarlanamadı" - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s. Please contact your sysadmin to get the " -"volume returned to the default storage group." -msgstr "" -"Fast ilkesi %(fastPolicyName)s için öntanımlı depolama grubuna " -"%(volumeName)s mantıksal sürücü yeniden ekleme geri alınamadı. Lütfen " -"mantıksal sürücüyü öntanımlı depolama grubuna geri döndürmek için sistem " -"yöneticinize başvurun." - -#, python-format -msgid "" -"Failed to Roll back to re-add volume %(volumeName)s to default storage group " -"for fast policy %(fastPolicyName)s: Please contact your sys admin to get the " -"volume re-added manually." -msgstr "" -"Fast ilkesi %(fastPolicyName)s için öntanımlı depolama grubuna " -"%(volumeName)s mantıksal sürücüsünü yeniden ekleme geri alınamadı: Lütfen " -"elle eklenmiş mantıksal sürücüyü almak için sistem yöneticinize danışın." - -#, python-format -msgid "" -"Failed to add %(volumeName)s to default storage group for fast policy " -"%(fastPolicyName)s." -msgstr "" -"%(fastPolicyName)s fast ilkesi için öntanımlı depolama grubuna " -"%(volumeName)s eklenemedi." - -#, python-format -msgid "Failed to add %s to cg." -msgstr "cg'ye %s eklenemedi." - -#, python-format -msgid "Failed to add device to handler %s" -msgstr "%s işleyicisi için aygıt eklenemedi" - -#, python-format -msgid "Failed to add initiator iqn %s to target" -msgstr "Hedefe başlatıcı iqn %s eklenemedi" - -#, python-format -msgid "Failed to add initiator to group for SCST target %s" -msgstr "SCST hedef %s için gruba başlatıcı eklenemedi" - -#, python-format -msgid "Failed to add multihost-access for volume \"%s\"." -msgstr "\"%s\" mantıksal sürücüsü için multihost-access eklenemedi." - -#, python-format -msgid "" -"Failed to add storage group %(storageGroupInstanceName)s to tier policy rule " -"%(tierPolicyRuleInstanceName)s." -msgstr "" -"%(storageGroupInstanceName)s depolama grubu %(tierPolicyRuleInstanceName)s " -"katman ilke kuralına eklenemedi." - -#, python-format -msgid "Failed to add target(port: %s)" -msgstr "Hedef eklenemedi(bağlantı noktası: %s)" - -msgid "Failed to authenticate user." -msgstr "Kullanıcı kimlik doğrulaması yapılamadı." - -#, python-format -msgid "Failed to close disk device %s" -msgstr "Disk aygıtı %s kapatılamadı" - -#, python-format -msgid "" -"Failed to collect return properties for volume %(vol)s and connector " -"%(conn)s." -msgstr "" -"%(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı için dönüş özellikleri " -"toplanamadı." - -#, python-format -msgid "Failed to commit reservations %s" -msgstr "%s rezervasyonları gönderilemedi" - -#, python-format -msgid "Failed to copy %(src)s to %(dest)s." -msgstr "%(src)s kaynaktan %(dest)s hedefe kopyalanamadı." - -#, python-format -msgid "Failed to copy image %(image_id)s to volume: %(volume_id)s" -msgstr "%(image_id)s imajı mantıksal sürücüye kopyalanamadı: %(volume_id)s" - -#, python-format -msgid "Failed to copy image to volume: %(volume_id)s" -msgstr "İmaj mantıksal sürücüye kopyalanamadı: %(volume_id)s" - -#, python-format -msgid "Failed to copy volume %(src)s to %(dest)s." -msgstr "Disk bölümü kaynağını %(src)s hedefe %(dest)s kopyalama başarısız." - -#, python-format -msgid "Failed to copy volume %(vol1)s to %(vol2)s" -msgstr "%(vol1)s disk bölümünün %(vol2)s'ye kopyalaması başarısız" - -#, python-format -msgid "Failed to create %(conf)s for volume id:%(vol_id)s" -msgstr "Mantıksal sürücü kimliği:%(vol_id)s için %(conf)s oluşturulamadı" - -#, python-format -msgid "Failed to create CGSnapshot. Exception: %s." -msgstr "CGSnapshot oluşturulamadı. İstisna: %s." - -msgid "" -"Failed to create SOAP client.Check san_ip, username, password and make sure " -"the array version is compatible" -msgstr "" -"SOAP istemcisi oluşturulamadı. san_ip, kullanıcı adı, parolayı kontrol edin " -"ve dizi sürümünün uyumlu olduğundan emin olun" - -#, python-format -msgid "" -"Failed to create a first volume for storage group : %(storageGroupName)s." -msgstr "" -"Depolama grubu için birinci mantıksal sürücü oluşturulamadı : " -"%(storageGroupName)s." - -#, python-format -msgid "Failed to create blkio cgroup '%(name)s'." -msgstr "blkio cgroup '%(name)s' oluşturma başarısız." - -#, python-format -msgid "Failed to create clone of volume \"%s\"." -msgstr "\"%s\" mantıksal sürücüsünün klonu oluşturulamadı." - -#, python-format -msgid "Failed to create cloned volume %s." -msgstr "Kopyalanmış %s mantıksal sürücüsü oluşturulamadı." - -#, python-format -msgid "Failed to create consistency group %(group_id)s." -msgstr "Tutarlılık grubu %(group_id)s oluşturulamadı." - -#, python-format -msgid "" -"Failed to create default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"FAST ilkesi için öntanımlı depolama grubu oluşturulamadı: %(fastPolicyName)s." - -#, python-format -msgid "Failed to create group to SCST target %s" -msgstr "SCST hedef %s için grup oluşturulamadı" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "%(storageSystemName)s üzerinde donanım kimlik(leri) oluşturulamadı." - -#, python-format -msgid "" -"Failed to create iscsi target for Volume ID: %(vol_id)s. Please ensure your " -"tgtd config file contains 'include %(volumes_dir)s/*'" -msgstr "" -"Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi oluşturulamadı. " -"Lütfen tgtd yapılandırma dosyanızın 'include %(volumes_dir)s/*' içerdiğine " -"emin olun" - -#, python-format -msgid "Failed to create iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "" -"Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi oluşturulamadı: %(e)s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s" -msgstr "Mantıksal sürücü kimliği için iscsi hedefi oluşturulamadı:%s" - -#, python-format -msgid "Failed to create iscsi target for volume id:%s." -msgstr "Mantıksal sürücü kimliği için iscsi hedefi oluşturulamadı:%s." - -#, python-format -msgid "Failed to create snapshot of volume \"%s\"." -msgstr "\"%s\" mantıksal sürücüsünün anlık sistem görüntüsü oluşturulamadı." - -#, python-format -msgid "Failed to create transfer record for %s" -msgstr "%s için aktarım kaydı oluşturma başarısız" - -#, python-format -msgid "Failed to create volume \"%s\"." -msgstr "\"%s\" disk bölümü oluşturma başarısız." - -#, python-format -msgid "Failed to create volume %s" -msgstr "%s mantıksal sürücüsü oluşturulamadı" - -#, python-format -msgid "Failed to create volume from snapshot \"%s\"." -msgstr "\"%s\" anlık sistem görüntüsünden mantıksal sürücü oluşturulamadı." - -#, python-format -msgid "Failed to created Cinder secure environment indicator file: %s" -msgstr "Cinder güvenli ortam gösterge dosyası oluşturulamadı: %s" - -#, python-format -msgid "Failed to delete snapshot %(snap)s of volume %(vol)s." -msgstr "" -"%(vol)s mantıksal sürücüsünün %(snap)s anlık sistem görüntüsü silinemedi." - -#, python-format -msgid "" -"Failed to delete the snapshot %(snap)s of CGSnapshot. Exception: " -"%(exception)s." -msgstr "" -"CGSnapshot'ın %(snap)s anlık sistem görüntüsü silinemedi. İstisna: " -"%(exception)s." - -#, python-format -msgid "Failed to delete the volume %(vol)s of CG. Exception: %(exception)s." -msgstr "CG'nin %(vol)s mantıksal sürücüsü silinemedi. İstisna: %(exception)s." - -#, python-format -msgid "Failed to delete volume \"%s\"." -msgstr "\"%s\" disk bölümü silme başarısız." - -#, python-format -msgid "Failed to delete volume %s" -msgstr "%s mantıksal sürücüsü silinemedi" - -#, python-format -msgid "Failed to ensure export of volume \"%s\"." -msgstr "\"%s\" mantıksal sürücüsünün dışa aktarımı sağlanamadı." - -#, python-format -msgid "Failed to ensure export of volume %s" -msgstr "%s mantıksal sürücüsünün dışa aktarımı sağlanamadı" - -#, python-format -msgid "Failed to export fiber channel target due to %s" -msgstr "%s nedeniyle fiber kanal hedefi dışa aktarılamadı" - -#, python-format -msgid "" -"Failed to extend_volume %(name)s from %(current_size)sGB to %(new_size)sGB." -msgstr "" -"Mantıksal sürücü %(name)s için %(current_size)sGB mevcut boyutundan " -"%(new_size)sGB boyutuna extend_volume işlemi başarısız oldu." - -#, python-format -msgid "Failed to find %(s)s. Result %(r)s" -msgstr "%(s)s bulunamadı. Sonuç %(r)s" - -#, python-format -msgid "Failed to get device number for throttling: %(error)s" -msgstr "Ayarlama için aygıt numarası alınamadı: %(error)s" - -#, python-format -msgid "" -"Failed to get driver initiator data for initiator %(initiator)s and " -"namespace %(namespace)s" -msgstr "" -"%(initiator)s başlatıcısı ve %(namespace)s ad alanı için sürücü başlatıcı " -"verisi alınamadı" - -#, python-format -msgid "Failed to get fiber channel info from storage due to %(stat)s" -msgstr "%(stat)s nedeniyle depolamadan fiber kanal bilgisi alınamadı" - -#, python-format -msgid "Failed to get fiber channel target from storage server due to %(stat)s" -msgstr "%(stat)s nedeniyle depolama sunucusundan fiber kanal hedefi alınamadı" - -#, python-format -msgid "Failed to get or create storage group %(storageGroupName)s." -msgstr "%(storageGroupName)s depolama grubu oluşturulamadı ya da alınamadı." - -#, python-format -msgid "Failed to get response: %s." -msgstr "Yanıt alınamadı: %s." - -#, python-format -msgid "Failed to get server info due to %(state)s." -msgstr "%(state)s nedeniyle sunucu bilgisi alınamadı." - -msgid "Failed to get sns table" -msgstr "Sns tablosu alınamadı" - -#, python-format -msgid "Failed to get target wwpns from storage due to %(stat)s" -msgstr "%(stat)s nedeniyle depolamadan hedef wwpns alınamadı" - -msgid "Failed to get updated stats from Datera Cluster." -msgstr "Datera Kümesinden güncellenmiş durum bilgileri alınamadı." - -msgid "Failed to get updated stats from Datera cluster." -msgstr "Datera kümesinden güncellenmiş durum bilgisi alınamadı." - -#, python-format -msgid "Failed to initialize connection to volume \"%s\"." -msgstr "\"%s\" disk bölümü bağlantı başlatma başarısız." - -msgid "Failed to initialize connection." -msgstr "Bağlantı başlatılamadı." - -msgid "Failed to initialize driver." -msgstr "Sürücü başlatma başarısız." - -#, python-format -msgid "Failed to issue df command for path %(path)s, error: %(error)s." -msgstr "%(path)s yolu için df komutu sonuçlanmadı, hata: %(error)s." - -#, python-format -msgid "Failed to issue mmgetstate command, error: %s." -msgstr "mmgetstate komutu sonuçlanmadı, hata: %s." - -#, python-format -msgid "Failed to issue mmlsattr command for path %(path)s, error: %(error)s." -msgstr "%(path)s yolu için mmlsattr komutu sonuçlanmadı, hata: %(error)s." - -#, python-format -msgid "Failed to issue mmlsattr command on path %(path)s, error: %(error)s" -msgstr "%(path)s yolundaki mmlsattr komutu sonuçlanmadı, hata: %(error)s" - -#, python-format -msgid "Failed to issue mmlsconfig command, error: %s." -msgstr "mmlsconfig komutu sonuçlanmadı, hata: %s." - -#, python-format -msgid "Failed to issue mmlsfs command for path %(path)s, error: %(error)s." -msgstr "%(path)s yolu için mmlsfs komutu sonuçlanmadı, hata: %(error)s." - -#, python-format -msgid "Failed to open iet session list for %s" -msgstr "%s için iet oturum listesi açılamadı" - -msgid "Failed to re-export volume, setting to ERROR." -msgstr "Mantıksal sürücü yeniden dışa aktarılamadı, HATA durumuna ayarlıyor." - -#, python-format -msgid "" -"Failed to remove %(volumeName)s from the default storage group for the FAST " -"Policy." -msgstr "" -"FAST İlkesi için öntanımlı depolama grubundan %(volumeName)s kaldırılamadı." - -#, python-format -msgid "Failed to remove %s from cg." -msgstr "cg'den %s kaldırılamadı." - -#, python-format -msgid "Failed to remove LUN %s" -msgstr "LUN %s kaldırılamadı" - -#, python-format -msgid "Failed to remove iscsi target for Volume ID: %(vol_id)s: %(e)s" -msgstr "" -"Mantıksal Sürücü Kimliği: %(vol_id)s için iscsi hedefi kaldırılamadı: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%(vol_id)s: %(e)s" -msgstr "" -"Mantıksal sürücü kimliği:%(vol_id)s için iscsi hedefi kaldırılamadı: %(e)s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s" -msgstr "Mantıksal sürücü kimliği için iscsi hedefi kaldırılamadı:%s" - -#, python-format -msgid "Failed to remove iscsi target for volume id:%s." -msgstr "Mantıksal sürücü kimliği için iscsi hedefi kaldırılamadı:%s." - -#, python-format -msgid "Failed to rename %(new_volume)s into %(volume)s." -msgstr "%(volume)s %(new_volume)s olarak yeniden adlandırılamadı." - -msgid "Failed to rename the created snapshot, reverting." -msgstr "" -"Oluşturulan anlık sistem görüntüsü yeniden adlandırılamadı, eski haline " -"döndürülüyor." - -#, python-format -msgid "Failed to resize volume %(volume_id)s, error: %(error)s." -msgstr "" -"Mantıksal sürücü %(volume_id)s yeniden boyutlandırılamadı, hata: %(error)s." - -#, python-format -msgid "Failed to retrieve volume SolidFire-ID: %s in get_by_account!" -msgstr "SolidFire-ID mantıksal sürücüsü alınamadı: %s get_by_account! " - -#, python-format -msgid "" -"Failed to return volume %(volumeName)s to original storage pool. Please " -"contact your system administrator to return it to the correct location." -msgstr "" -"Mantıksal sürücü %(volumeName)s özgün depolama havuzuna dönemedi. Lütfen " -"doğru konuma döndürmek için sistem yöneticinize başvurun." - -#, python-format -msgid "Failed to roll back reservations %s" -msgstr "%s rezervasyonları geri alma başarısız" - -#, python-format -msgid "Failed to run task %(name)s: %(cause)s" -msgstr "%(name)s adındaki görevi çalıştırma başarısız: %(cause)s" - -#, python-format -msgid "Failed to schedule_%(method)s: %(ex)s" -msgstr "schedule_%(method)s başarısız oldu: %(ex)s" - -#, python-format -msgid "Failed to send request: %s." -msgstr "İstek gönderilemedi: %s." - -#, python-format -msgid "Failed to set 'enable' attribute for SCST target %s" -msgstr "SCST hedef %s öznitelik 'enable' ayarlanamadı" - -#, python-format -msgid "Failed to set attribute for enable target driver %s" -msgstr "%s hedef sürücüsünü etkinleştirmek için öznitelik ayarlanamadı" - -msgid "Failed to setup the Dell EqualLogic driver." -msgstr "Dell EqualLogic sürücüsü kurulamadı." - -msgid "Failed to shutdown horcm." -msgstr "Horcm kapatılamadı." - -#, python-format -msgid "Failed to snap Consistency Group %s" -msgstr "Tutarlılık Grubu %s anlık sistem görüntüsü alınamadı" - -msgid "Failed to start horcm." -msgstr "Horcm başlatılamadı." - -msgid "Failed to terminate connection" -msgstr "Bağlantı sonlandırılamadı" - -#, python-format -msgid "Failed to terminate connection %(initiator)s %(vol)s" -msgstr "%(initiator)s %(vol)s bağlantısı sonlandırılamadı" - -#, python-format -msgid "Failed to terminate connection to volume \"%s\"." -msgstr "\"%s\" disk bölümü bağlantı sonlandırma başarısız." - -#, python-format -msgid "Failed to umount %(share)s, reason=%(stderr)s" -msgstr "%(share)s ayırma başarısız, nedeni=%(stderr)s" - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target" -msgstr "" -"iscsi hedefi kaldırıldıktan sonra mantıksal sürücü kimliği %(vol_id)s için " -"%(conf)s güncellenemedi" - -#, python-format -msgid "Failed to update %(conf)s for volume id:%(vol_id)s" -msgstr "Mantıksal sürücü kimliği:%(vol_id)s için %(conf)s güncellenemedi" - -#, python-format -msgid "" -"Failed to update %(volume_id)s metadata using the provided snapshot " -"%(snapshot_id)s metadata." -msgstr "" -"Verilen anlık sistem görüntüsü %(snapshot_id)s metadata'sı kullanılarak " -"%(volume_id)s metadata'sı güncellenemedi." - -#, python-format -msgid "Failed to update quota donating volume transfer id %s" -msgstr "%s mantıksal sürücü aktarım kimliğine verilen kota güncellenemedi" - -#, python-format -msgid "Failed to update quota for consistency group %s." -msgstr "Tutarlılık grubu %s için kota güncellenemedi." - -#, python-format -msgid "Failed to update quota for deleting volume: %s" -msgstr "Mantıksal sürücüyü silmek için kota güncellenemedi: %s" - -msgid "Failed to update quota while deleting volume." -msgstr "Mantıksal sürücü silinirken kota güncellenemedi." - -msgid "Failed to update usages deleting backup" -msgstr "Kullanımları güncelleme başarısız yedek siliniyor" - -msgid "Failed to update usages deleting snapshot" -msgstr "Anlık sistem görüntüsü silinirken kullanımlar güncellenemedi" - -msgid "Failed to update usages deleting volume." -msgstr "Mantıksal sürücü silme kullanımları güncellenemedi." - -#, python-format -msgid "Failed to update volume status: %s" -msgstr "Mantıksal sürücü durumu güncellenemedi: %s" - -#, python-format -msgid "" -"Failed to verify that volume was added to storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST ilkesi için mantıksal sürücünün depolama grubuna eklenmesi " -"doğrulanamadı: %(fastPolicyName)s." - -msgid "Failed to write in /etc/scst.conf." -msgstr "/etc/scst.conf dosyasına yazılamadı." - -#, python-format -msgid "" -"Failed updating %(snapshot_id)s metadata using the provided volumes " -"%(volume_id)s metadata" -msgstr "" -"Verilen %(volume_id)s mantıksal sürücü metadata'sı kullanarak " -"%(snapshot_id)s metadata'sı güncellenemedi" - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with creation provided model " -"%(model)s" -msgstr "" -"Verilen %(model)s modelini oluşturma ile %(volume_id)s mantıksal sürücü " -"modeli güncellenemedi" - -#, python-format -msgid "" -"Failed updating model of volume %(volume_id)s with driver provided model " -"%(model)s" -msgstr "" -"Verilen %(model)s model sürücü ile %(volume_id)s mantıksal sürücü modeli " -"güncellemesi başarısız oldu" - -#, python-format -msgid "" -"Failed updating snapshot metadata using the provided volumes %(volume_id)s " -"metadata" -msgstr "" -"Verilen mantıksal sürücü %(volume_id)s metadata'sı kullanılarak anlık sistem " -"görüntü metadata güncellemesi başarısız oldu" - -#, python-format -msgid "Failed updating volume %(volume_id)s bootable flag to true" -msgstr "" -"Mantıksal sürücü %(volume_id)s önyükleme bayrağı doğru olarak güncellenemedi" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(update)s" -msgstr "%(update)s ile %(volume_id)s mantıksal sürücüsü güncellenemedi" - -#, python-format -msgid "Failed updating volume %(volume_id)s with %(updates)s" -msgstr "" -"%(updates)s güncellemeleri ile %(volume_id)s mantıksal sürücü güncellenemedi" - -#, python-format -msgid "Failure deleting staged tmp LUN %s." -msgstr "Hata silme tmp LUN %s hazırladı." - -msgid "Fetch volume pool name failed." -msgstr "Mantıksal sürücü havuz adı getirme başarısız oldu." - -#, python-format -msgid "" -"FibreChannelDriver validate_connector failed. No '%(setting)s'. Make sure " -"HBA state is Online." -msgstr "" -"FibreChannelDriver validate_connector başarısız oldu. '%(setting)s' yok. HBA " -"durumunun çevrim içi olduğuna emin olun." - -#, python-format -msgid "Flexvisor failed to get event %(volume)s (%(status)s)." -msgstr "Flexvisor %(volume)s (%(status)s) olayı alamadı." - -#, python-format -msgid "Flexvisor failed to get pool %(id)s info." -msgstr "Flexvisor havuz %(id)s bilgisini alamadı." - -#, python-format -msgid "Flexvisor failed to get pool list due to %s." -msgstr "Flexvisor %s nedeniyle havuz listesini alamadı." - -#, python-format -msgid "Flexvisor failed to get pool list.(Error: %d)" -msgstr "Flexvisor havuz listesini alamadı.(Hata: %d)" - -#, python-format -msgid "Found %(count)s volumes mapped to id: %(uuid)s." -msgstr "Şu kimliğe eşleştirilmiş %(count)s mantıksal sürücü bulundu: %(uuid)s." - -msgid "Free capacity not set: volume node info collection broken." -msgstr "" -"Boş kapasite ayarlı değil: mantıksal sürücü düğüm bilgisi koleksiyonu bozuk." - -#, python-format -msgid "GPFS is not active. Detailed output: %s." -msgstr "GPFS etkin değil. Detaylı çıktı: %s." - -msgid "Get method error." -msgstr "Get metodu hatası." - -#, python-format -msgid "ISCSI discovery attempt failed for:%s" -msgstr "ISCSI keşif girişimi başarısız oldu:%s" - -#, python-format -msgid "Invalid API object: %s" -msgstr "Geçersiz API nesnesi: %s" - -#, python-format -msgid "Invalid JSON: %s" -msgstr "Geçersiz JSON: %s" - -#, python-format -msgid "Invalid ReplayList return: %s" -msgstr "Geçersiz ReplayList dönüşü: %s" - -#, python-format -msgid "JSON encode params %(param)s error: %(status)s." -msgstr "JSON %(param)s parametre şifreleme hatası: %(status)s." - -#, python-format -msgid "JSON transfer error: %s." -msgstr "JSON aktarım hatası: %s." - -#, python-format -msgid "LUN %(path)s geometry failed. Message - %(msg)s" -msgstr "LUN %(path)s geometrisi başarısız oldu. İleti - %(msg)s" - -msgid "LUN extend failed!" -msgstr "LUN genişletme işlemi başarısız oldu!" - -#, python-format -msgid "Looks like masking view: %(maskingViewName)s has recently been deleted." -msgstr "" -"Maskeleme görünümü gibi görünür: %(maskingViewName)s yakın zamanda silindi." - -#, python-format -msgid "Lun %s has dependent snapshots, skipping lun deletion." -msgstr "Lun %s bağımlı anlık görüntülere sahip, lun silme işlemi atlanıyor." - -#, python-format -msgid "Lun create for %s failed!" -msgstr "%s için Lun oluşturma başarısız oldu!" - -#, python-format -msgid "Lun create snapshot for volume %(vol)s snapshot %(snap)s failed!" -msgstr "" -"Lun %(vol)s mantıksal sürücüsü %(snap)s anlık görüntüsü için anlık görüntü " -"oluşturamadı!" - -#, python-format -msgid "Lun delete for %s failed!" -msgstr "%s için Lun silme başarısız oldu!" - -msgid "Lun mapping returned null!" -msgstr "Lun eşleştirmesi boş değer döndü!" - -#, python-format -msgid "MSGID%(id)04d-E: %(msg)s" -msgstr "MSGID%(id)04d-E: %(msg)s" - -#, python-format -msgid "" -"Masking View creation or retrieval was not successful for masking view " -"%(maskingViewName)s. Attempting rollback." -msgstr "" -"%(maskingViewName)s maskeleme görünümü için Maskeleme Görünümü oluşturma ya " -"da alma başarılı değil. Geri alma deneniyor." - -#, python-format -msgid "" -"Max retries reached deleting backup %(basename)s image of volume %(volume)s." -msgstr "" -"%(volume)s mantıksal sürücüsünün %(basename)s yedek imajı silerken azami " -"yeniden denemeye ulaşıldı." - -#, python-format -msgid "Message: %s" -msgstr "İleti: %s" - -msgid "Model update failed." -msgstr "Model güncellemesi başarısız oldu." - -#, python-format -msgid "Mount failure for %(share)s after %(count)d attempts." -msgstr "%(share)s paylaşımları için %(count)d denemeden sonra bağlama hatası." - -#, python-format -msgid "Mount failure for %(share)s." -msgstr "%(share)s için bağlama hatası." - -#, python-format -msgid "Multiple replay profiles under name %s" -msgstr "%s adı altında birden fazla tekrar profilleri" - -msgid "No CLI output for firmware version check" -msgstr "Donanım yazılımı sürüm kontrolü için hiçbir CLI çıktısı yok" - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of pool: %(pool)s." -msgstr "" -"Hiçbir eylem gerekli değil. Mantıksal sürücü: %(volumeName)s zaten havuzun " -"bir parçasıdır: %(pool)s." - -#, python-format -msgid "" -"No action required. Volume: %(volumeName)s is already part of slo/workload " -"combination: %(targetCombination)s." -msgstr "" -"Hiçbir eylem gerekmez. Mantıksal sürücü: %(volumeName)s zaten slo/workload " -"birleşiminin parçasıdır: %(targetCombination)s." - -#, python-format -msgid "" -"No snapshots found in database, but %(path)s has backing file " -"%(backing_file)s!" -msgstr "" -"Veritabanında hiçbir anlık sistem görüntüsü bulunamadı, ancak %(path)s " -"yolunda destek dosyaları %(backing_file)s var!" - -#, python-format -msgid "" -"One of the components of the original masking view %(maskingViewName)s " -"cannot be retrieved so please contact your system administrator to check " -"that the correct initiator(s) are part of masking." -msgstr "" -"%(maskingViewName)s özgün maskeleme görünümünün bileşenlerinden biri " -"alınamıyor, bu yüzden lütfen doğru başlatıcıların maskelemenin bir parçası " -"olup olmadığını kontrol etmek için sistem yöneticinize başvurun." - -#, python-format -msgid "" -"Only SLO/workload migration within the same SRP Pool is supported in this " -"version The source pool : %(sourcePoolName)s does not match the target " -"array: %(targetPoolName)s. Skipping storage-assisted migration." -msgstr "" -"Bu sürümde aynı SRP havuzu içinde sadece SLO/iş yükü göçü destekleniyor " -"Kaynak havuz : %(sourcePoolName)s hedef dizi ile eşleşmez: " -"%(targetPoolName)s. Depolama destekli göç atlanıyor." - -msgid "Only available volumes can be migrated between different protocols." -msgstr "" -"Sadece mevcut mantıksal sürücüler farklı protokoller arasında taşınabilir." - -#, python-format -msgid "Pipe1 failed - %s " -msgstr "Pipe1 başarısız - %s " - -#, python-format -msgid "Pipe2 failed - %s " -msgstr "Pipe2 başarısız - %s " - -#, python-format -msgid "" -"Purity host %(host_name)s is managed by Cinder but CHAP credentials could " -"not be retrieved from the Cinder database." -msgstr "" -"Purity istemcisi %(host_name)s Cinder tarafından yönetilir ancak CHAP kimlik " -"bilgileri Cinder veritabanından alınamaz." - -#, python-format -msgid "" -"Purity host %(host_name)s is not managed by Cinder and can't have CHAP " -"credentials modified. Remove IQN %(iqn)s from the host to resolve this issue." -msgstr "" -"Purity istemcisi %(host_name)s Cinder tarafından yönetilemez ve CHAP kimlik " -"bilgileri değiştirilemez. Bu sorunu çözmek için istemciden IQN %(iqn)s'ini " -"kaldır." - -#, python-format -msgid "REST Not Available: %s" -msgstr "REST Kullanılamaz: %s" - -#, python-format -msgid "Re-throwing Exception %s" -msgstr "İstisna yeniden fırlatılıyor %s" - -#, python-format -msgid "Read response raised an exception: %s." -msgstr "Okuma yanıtı bir istisna oluşturdu: %s." - -msgid "Recovered model server connection!" -msgstr "Kurtarılmış model sunucu bağlantısı!" - -#, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Başarısız bir yürütme kurtarılıyor. %s numara dene" - -msgid "Replication must be specified as ' True' or ' False'." -msgstr "Kopyalama ' True' ya da ' False' olarak belirtilmiş olmalıdır." - -msgid "" -"Requested to setup thin provisioning, however current LVM version does not " -"support it." -msgstr "" -"İnce hazırlık kurulumu istendi, ancak mevcut LVM sürümü bunu desteklemiyor." - -#, python-format -msgid "Resizing %s failed. Cleaning volume." -msgstr "" -"%s'nin yeniden boyutlandırılması başarısız oldu. Mantıksal sürücü " -"temizleniyor." - -#, python-format -msgid "Restore to volume %(volume)s finished with error - %(error)s." -msgstr "%(volume)s mantıksal sürücü geri yüklemesi hata ile bitti - %(error)s." - -#, python-format -msgid "Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s" -msgstr "" -"Yeniden deneme %(retry)s süreleri: %(method)s Başarısız oldu %(rc)s: " -"%(reason)s" - -#, python-format -msgid "" -"SLO: %(slo)s is not valid. Valid values are Bronze, Silver, Gold, Platinum, " -"Diamond, Optimized, NONE." -msgstr "" -"SLO: %(slo)s geçersiz. Geçerli değerler Bronz, Gümüş, Altın, Platin, Elmas, " -"Optimize Edilmiş, HİÇBİRİ." - -msgid "" -"ScVolume returned success with empty payload. Attempting to locate volume" -msgstr "" -"ScVolume boş yük ile başarı döndürdü. Mantıksal sürücü konumlandırma " -"deneniyor" - -#, python-format -msgid "Server Busy retry request: %s" -msgstr "Sunucu Meşgul tekrar deneme isteği: %s" - -#, python-format -msgid "Setting QoS for %s failed" -msgstr "%s için QoS ayarı başarısız oldu" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"%s paylaşımı geçersiz biçim nedeniyle yoksayılır. Form adresi olmalıdır:/" -"export." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target ispresently exported for volume: %s" -msgstr "" -"remove_export atlanıyor. Mantıksal sürücü için şimdilik hiçbir iscsi_target " -"dışa aktarılmadı: %s" - -#, python-format -msgid "Snapshot %s: has clones" -msgstr "Anlık sistem görüntüsü %s: kopyalara sahip" - -msgid "Snapshot did not exist. It will not be deleted" -msgstr "Anlık sistem görüntüsü olmasaydı silinemeyecekti" - -#, python-format -msgid "Source snapshot %(snapshot_id)s cannot be found." -msgstr "Kaynak anlık sistem görüntüsü %(snapshot_id)s bulunamıyor." - -#, python-format -msgid "Source snapshot cannot be found for target volume %(volume_id)s." -msgstr "" -"Hedef mantıksal sürücü %(volume_id)s için kaynak anlık sistem görüntüsü " -"bulunamadı." - -#, python-format -msgid "StdErr :%s" -msgstr "StdErr :%s" - -#, python-format -msgid "StdOut :%s" -msgstr "StdOut :%s" - -#, python-format -msgid "Storage profile: %s cannot be found in vCenter." -msgstr "Depolama profili: %s vCenter'da bulunamıyor." - -#, python-format -msgid "TSM [%s] not found in CloudByte storage." -msgstr "CloudByte depolamasında TSM [%s] bulunamadı." - -msgid "The Flexvisor service is unavailable." -msgstr "Flexvisor servisi kullanılabilir değil." - -msgid "The connector does not contain the required information." -msgstr "Bağlayıcı gerekli bilgileri içermez." - -msgid "" -"The connector does not contain the required information: initiator is missing" -msgstr "Bağlayıcı gerekli bilgileri içermiyor: başlatıcı eksik" - -msgid "" -"The connector does not contain the required information: wwpns is missing" -msgstr "Bağlayıcı gerekli bilgileri içermiyor: wwpns eksik" - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s skipping storage-assisted migration." -msgstr "" -"Kaynak dizi : %(sourceArraySerialNumber)s hedef dizi ile eşleşmiyor: " -"%(targetArraySerialNumber)s, depolama destekli göç atlanıyor." - -#, python-format -msgid "" -"The source array : %(sourceArraySerialNumber)s does not match the target " -"array: %(targetArraySerialNumber)s, skipping storage-assisted migration." -msgstr "" -"Kaynak dizi : %(sourceArraySerialNumber)s hedef dizi ile eşleşmiyor: " -"%(targetArraySerialNumber)s, depolama destekli göç atlanıyor." - -#, python-format -msgid "The source volume %(volume_id)s cannot be found." -msgstr "%(volume_id)s kaynak disk bölümü bulunamıyor." - -#, python-format -msgid "The volume driver requires %(data)s in the connector." -msgstr "Mantıksal sürücü bağlayıcıda %(data)s ister." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "Mantıksal sürücü bağlayıcıda iSCSI başlatıcı adı gerektirir." - -#, python-format -msgid "There is no valid datastore satisfying requirements: %s." -msgstr "Gereksinimleri karşılayacak geçerli hiçbir verideposu yok: %s." - -msgid "This usually means the volume was never successfully created." -msgstr "" -"Bu genellikle mantıksal sürücü asla başarılı bir şekilde oluşturulamaz " -"anlamına gelir." - -msgid "Tiering Policy is not supported on this array." -msgstr "Bu dizide Katmanlama İlkesi desteklenmiyor." - -#, python-format -msgid "Trying to create snapshot by non-existent LV: %s" -msgstr "Varolmayan LV ile anlık sistem görüntüsü oluşturma deneniyor: %s" - -#, python-format -msgid "URLError: %s" -msgstr "URLHata: %s" - -#, python-format -msgid "Unable to create folder path %s" -msgstr "%s klasör yolu oluşturulamadı" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST ilkesi için öntanımlı depolama grubu alınamadı ya da oluşturulamadı: " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to create volume %s from replay" -msgstr "Tekrardan %s mantıksal sürücüsü oluşturulamadı" - -#, python-format -msgid "Unable to create volume. Volume driver %s not initialized" -msgstr "Mantıksal sürücü oluşturulamadı. Mantıksal sürücü %s ilklendirilmemiş" - -msgid "Unable to delete busy volume." -msgstr "Kullanılan mantıksal sürücü silinemedi." - -#, python-format -msgid "Unable to delete due to existing snapshot for volume: %s" -msgstr "" -"Mantıksal sürücü için varolan anlık sistem görüntüsü nedeniyle silinemedi: %s" - -msgid "" -"Unable to delete the destination volume during volume migration, (NOTE: " -"database record needs to be deleted)." -msgstr "" -"Disk bölümü geçişi sırasında hedef disk bölümü silinemedi, (NOT: veritabanı " -"kaydının silinmesi gerekir)." - -#, python-format -msgid "Unable to determine whether %(volumeName)s is composite or not." -msgstr "" -"%(volumeName)s mantıksal sürücüsünün bileşik olup olmadığı belirlenemedi." - -#, python-format -msgid "Unable to find VG: %s" -msgstr "VG bulunamadı: %s" - -#, python-format -msgid "" -"Unable to find default storage group for FAST policy : %(fastPolicyName)s." -msgstr "" -"FAST ilkesi için öntanımlı depolama grubu bulunamadı : %(fastPolicyName)s." - -msgid "Unable to get associated pool of volume." -msgstr "Mantıksal sürücünün ilişkili olduğu havuz alınamadı." - -#, python-format -msgid "Unable to get default storage group %(defaultSgName)s." -msgstr "%(defaultSgName)s öntanımlı depolama grubu alınamadı." - -msgid "Unable to get device mapping from network." -msgstr "Ağdan aygıt eşleştirmesi alınamadı." - -#, python-format -msgid "Unable to get policy rule for fast policy: %(fastPolicyName)s." -msgstr "Fast ilkesi için ilke kuralı alınamadı: %(fastPolicyName)s." - -#, python-format -msgid "Unable to locate Volume Group %s" -msgstr "Mantıksal Sürücü Grubu %s yerleştirilemedi" - -#, python-format -msgid "Unable to manage existing volume. Volume driver %s not initialized." -msgstr "" -"Varolan mantıksal sürücü yönetilemedi. Mantıksal sürücü %s ilklendirilmemiş." - -#, python-format -msgid "Unable to map %(vol)s to %(srv)s" -msgstr "%(vol)s %(srv)s sunucusuna eşleştirilemedi" - -#, python-format -msgid "Unable to rename the logical volume for volume: %s" -msgstr "Mantıksal sürücü yeniden adlandırılamadı: %s" - -#, python-format -msgid "Unable to retrieve VolumeConfiguration: %s" -msgstr "Mantıksal Sürücü Yapılandırması alınamadı: %s" - -#, python-format -msgid "Unable to retrieve pool instance of %(poolName)s on array %(array)s." -msgstr "%(array)s dizisindeki %(poolName)s havuz örneği alınamadı." - -msgid "Unexpected build error:" -msgstr "Beklenmeyen inşa hatası:" - -msgid "Unexpected error occurs in horcm." -msgstr "Horcm'da beklenmeyen bir hata meydana geldi." - -msgid "Unexpected error occurs in snm2." -msgstr "snm2 komutunda beklenmeyen bir hata meydana geldi." - -#, python-format -msgid "Unexpected error when retype() revert tried to deleteVolumeSet(%s)" -msgstr "deleteVolumeSet(%s) için retype() geri alınırken beklenmeyen hata" - -#, python-format -msgid "Unexpected error when retype() tried to deleteVolumeSet(%s)" -msgstr "deleteVolumeSet(%s) için retype() denenirken beklenmeyen hata" - -#, python-format -msgid "Unknown exception in post clone resize LUN %s." -msgstr "Kopyalama sonrası LUN %s yeniden boyutlandırılırken bilinmeyen hata." - -#, python-format -msgid "" -"Update consistency group failed to add volume-%(volume_id)s: VolumeNotFound." -msgstr "" -"Mantıksal sürücü-%(volume_id)s eklemek için tutarlılık grubu güncellemesi " -"başarısız oldu: MantıksalSürücüBulunamadı." - -#, python-format -msgid "" -"Update consistency group failed to remove volume-%(volume_id)s: " -"VolumeNotFound." -msgstr "" -"Mantıksal Sürücü-%(volume_id)s kaldırmak için tutarlılık grubu güncellemesi " -"başarısız oldu: MantıksalSürücüBulunamadı." - -msgid "Update snapshot usages failed." -msgstr "Anlık sistem görüntü kullanımları güncellemesi başarısız oldu." - -msgid "Update volume model for transfer operation failed." -msgstr "" -"Aktarım işlemi için mantıksal sürücü modeli güncellemesi başarısız oldu." - -#, python-format -msgid "Upload volume to image encountered an error (image-id: %(image_id)s)." -msgstr "" -"İmaja mantıksal sürücü yükleme bir hata ile karşılaştı (imaj bilgisi: " -"%(image_id)s)." - -msgid "VGC-CLUSTER command blocked and cancelled." -msgstr "VGC-CLUSTER komutu bloklandı ve durduruldu." - -#, python-format -msgid "Version string '%s' is not parseable" -msgstr "'%s' sürüm karakter dizisi ayrıştırılabilir değildir" - -#, python-format -msgid "Virtual disk device of backing: %s not found." -msgstr "Desteklemenin sanal disk aygıtı: %s bulunamadı." - -#, python-format -msgid "Vol copy job status %s." -msgstr "Mantıksal sürücü kopyalama iş durumu %s." - -#, python-format -msgid "" -"Volume %(name)s is not suitable for storage assisted migration using retype." -msgstr "" -"Retype kullanılan depolama destekli göç için %(name)s mantıksal sürücüsü " -"uygun değil." - -#, python-format -msgid "Volume %(name)s not found on the array. No volume to delete." -msgstr "" -"Dizide mantıksal sürücü %(name)s bulunamadı. Silinecek hiçbir mantıksal " -"sürücü yok." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. No volume to migrate using retype." -msgstr "" -"Dizide mantıksal sürücü %(name)s bulunamadı. Retype kullanarak göç için " -"hiçbir mantıksal sürücü yok." - -#, python-format -msgid "" -"Volume %(volumeid)s failed to send assign command, ret: %(status)s output: " -"%(output)s" -msgstr "" -"Mantıksal sürücü %(volumeid)s atama komutunu gönderemedi, dönüş: %(status)s " -"çıktı: %(output)s" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Mantıksal sürücü %s dizide yok." - -#, python-format -msgid "Volume %s, not found on SF Cluster." -msgstr "Mantıksal %s, SF Kümesinde bulunamadı." - -#, python-format -msgid "Volume %s: create failed" -msgstr "Mantıksal sürücü %s: oluşturma başarısız oldu" - -#, python-format -msgid "" -"Volume %s: driver error when trying to retype, falling back to generic " -"mechanism." -msgstr "" -"%s Mantıksal Sürücü: retype denenirken sürücü hatası, genel mekanizmaya geri " -"dönülüyor." - -#, python-format -msgid "Volume %s: rescheduling failed" -msgstr "Mantıksal sürücü %s: yeniden zamanlama başarısız oldu" - -#, python-format -msgid "Volume %s: update volume state failed." -msgstr "" -"Mantıksal sürücü %s: mantıksal sürücü durumu güncelleme başarısız oldu." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been added to target storage group " -"%(storageGroup)s." -msgstr "" -"Mantıksal sürücü : %(volumeName)s %(storageGroup)s hedef depolama grubuna " -"eklenmemiş." - -#, python-format -msgid "" -"Volume : %(volumeName)s has not been removed from source storage group " -"%(storageGroup)s." -msgstr "" -"Mantıksal Sürücü : %(volumeName)s kaynak depolama grubundan %(storageGroup)s " -"kaldırılmadı." - -#, python-format -msgid "" -"Volume : %(volumeName)s. was not successfully migrated to target pool " -"%(targetPoolName)s." -msgstr "" -"Mantıksal sürücü %(volumeName)s. hedef havuza %(targetPoolName)s başarılı " -"bir şekilde taşınamadı." - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"accept_transfer operation!" -msgstr "" -"accept_transfer işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " -"Bilgisi bulunamadı!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"attach_volume operation!" -msgstr "" -"attach_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " -"Bilgisi bulunamadı!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"delete_volume operation!" -msgstr "" -"delete_volume işlemi denenirken SolidFire Kümesinde Mantıksal Sürücü Bilgisi " -"%s bulunamadı!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"detach_volume operation!" -msgstr "" -"detach_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " -"Bilgisi bulunamadı!" - -#, python-format -msgid "" -"Volume ID %s was not found on the SolidFire Cluster while attempting " -"extend_volume operation!" -msgstr "" -"extend_volume işlemi denenirken SolidFire Kümesinde %s Mantıksal Sürücü " -"Bilgisi bulunamadı!" - -msgid "Volume did not exist. It will not be deleted" -msgstr "Mantıksal sürücü olmasaydı silinemeyecekti" - -#, python-format -msgid "Volume driver %s not initialized" -msgstr "Mantıksal sürücü sürücüsü %s başlatılamadı" - -msgid "Volume in unexpected state" -msgstr "Beklenmeyen durumdaki mantıksal sürücü" - -#, python-format -msgid "Volume in unexpected state %s, expected awaiting-transfer" -msgstr "" -"Mantıksal sürücü beklenmeyen %s durumunda, beklenen durum bekleyen aktarım" - -msgid "Volume must be detached for clone operation." -msgstr "Mantıksal sürücü kopyalama işlemi için ayrılmış olmalıdır." - -#, python-format -msgid "VolumeType %s deletion failed, VolumeType in use." -msgstr "VolumeType %s silme başarısız oldu, VolumeType kullanımda." - -#, python-format -msgid "" -"WebDAV operation failed with error code: %(code)s reason: %(reason)s Retry " -"attempt %(retry)s in progress." -msgstr "" -"WebDAV işlemi hata kodu: %(code)s neden: %(reason)s ilerleme sırasındaki " -"tekrar deneme girişimi %(retry)s ile başarısız oldu." - -#, python-format -msgid "WebDAV returned with %(code)s error during %(method)s call." -msgstr "WebDAV %(method)s çağrısı sırasında %(code)s hatası döndürdü." - -#, python-format -msgid "" -"Workload: %(workload)s is not valid. Valid values are DSS_REP, DSS, OLTP, " -"OLTP_REP, NONE." -msgstr "" -"İşyükü: %(workload)s geçerli değil. Geçerli değerler DSS_REP, DSS, OLTP, " -"OLTP_REP, HİÇBİRİ." - -msgid "_find_mappings: volume is not active" -msgstr "_find_mappings: mantıksal sürücü etkin değil" - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy " -"operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: %(vol)s mantıksal sürücü belirtilen vdisk kopyalama " -"işlemine sahip değil: orjinal=%(orig)s yeni=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_rm_vdisk_copy_op: Mantıksal sürücü %(vol)s metadata belirtilen vdisk " -"kopyalama işlemine sahip değil: orjinal=%(orig)s yeni=%(new)s." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy " -"operations." -msgstr "" -"_rm_vdisk_copy_op: %s mantıksal sürücü kayıtlı hiçbir vdisk kopyalama " -"işlemine sahip değil." - -#, python-format -msgid "" -"_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk " -"copy operations." -msgstr "" -"_rm_vdisk_copy_op: Mantıksal sürücü metadata %s kayıtlı herhangi bir vdisk " -"kopyalama işlemine sahip değil." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to host " -"%(host_name)s found." -msgstr "" -"_unmap_vdisk_from_host: %(host_name)s istemcisi için hiçbir mantıksal sürücü " -"%(vol_name)s eşleştirmesi bulunamadı." - -#, python-format -msgid "_wait_for_job_complete failed after %(retries)d tries." -msgstr "_wait_for_job_complete %(retries)d denemeden sonra başarısız oldu." - -#, python-format -msgid "_wait_for_sync failed after %(retries)d tries." -msgstr "_wait_for_sync %(retries)d denemeden sonra başarısız oldu." - -#, python-format -msgid "" -"backup: %(vol_id)s failed to remove backup hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"yedek: %(vol_id)s yedek katı bağlantısı %(vpath)s den %(bpath)s e " -"silinemedi.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -#, python-format -msgid "can't create 2 volumes with the same name, %s" -msgstr "aynı ad ile 2 mantıksal sürücü oluşturulamıyor, %s" - -msgid "cinder-rtstool is not installed correctly" -msgstr "cinder-rtstool doğru bir şekilde kurulu değil" - -#, python-format -msgid "" -"delete: %(vol_id)s failed with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"sil: %(vol_id)s stdout ile başarısız oldu: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "ensure_export: Volume %s not found on storage." -msgstr "ensure_export: Mantıksal sürücü %s depolama üzerinde bulunamadı." - -#, python-format -msgid "error opening rbd image %s" -msgstr "rbd imajı %s açma hatası" - -msgid "error refreshing volume stats" -msgstr "mantıksal sürücü durum bilgisi tazeleme hatası" - -msgid "horcm command timeout." -msgstr "horcm komutu zaman aşımı." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s." -msgstr "" -"initialize_connection: %(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı " -"için dönüş özellikleri toplanamadı." - -#, python-format -msgid "" -"initialize_connection: Failed to collect return properties for volume " -"%(vol)s and connector %(conn)s.\n" -msgstr "" -"initialize_connection: %(vol)s mantıksal sürücüsü ve %(conn)s bağlayıcısı " -"için dönüş özellikleri toplanamadı.\n" - -msgid "model server went away" -msgstr "Model sunucusu gitti." - -#, python-format -msgid "single_user auth mode enabled, but %(param)s not set" -msgstr "single_user kimlik doğrulama kipi etkin, fakat %(param)s ayarlı değil" - -msgid "snm2 command timeout." -msgstr "snm2 komutu zaman aşımı." - -msgid "" -"storwize_svc_multihostmap_enabled is set to False, not allowing multi host " -"mapping." -msgstr "" -"storwize_svc_multihostmap_enabled Yanlış olarak ayarlı, çoklu istemci " -"eşlemeye izin vermez." diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po deleted file mode 100644 index d0ec9a50e..000000000 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-info.po +++ /dev/null @@ -1,2147 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev178\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-06 03:18+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-09-16 08:22+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: tr-TR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Turkish (Turkey)\n" - -#, python-format -msgid "\t%(name)-35s : %(value)s" -msgstr "\t%(name)-35s : %(value)s" - -#, python-format -msgid "\t%(param)-35s : %(value)s" -msgstr "\t%(param)-35s : %(value)s" - -#, python-format -msgid "\t%(prefix)-35s : %(version)s" -msgstr "\t%(prefix)-35s : %(version)s" - -#, python-format -msgid "\t%(request)-35s : %(value)s" -msgstr "\t%(request)-35s : %(value)s" - -#, python-format -msgid "" -"\n" -"\n" -"\n" -"\n" -"Request URL: %(url)s\n" -"\n" -"Call Method: %(method)s\n" -"\n" -"Request Data: %(data)s\n" -"\n" -"Response Data:%(res)s\n" -"\n" -msgstr "" -"\n" -"\n" -"\n" -"\n" -"İstek URL'si: %(url)s\n" -"\n" -"İstek Metodu: %(method)s\n" -"\n" -"İstek Verisi: %(data)s\n" -"\n" -"Yanıt Verisi:%(res)s\n" -"\n" - -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#, python-format -msgid "%(url)s returned a fault: %(e)s" -msgstr "%(url)s hata döndürdü: %(e)s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s HTTP %(status)d ile geri döndü" - -#, python-format -msgid "%(volume)s assign type fibre_channel, properties %(properties)s" -msgstr "%(volume)s atama türü fibre_channel, özellikler %(properties)s" - -#, python-format -msgid "%s is already umounted" -msgstr "%s zaten ayrılmış" - -#, python-format -msgid "3PAR driver cannot perform migration. Retype exception: %s" -msgstr "3PAR sürücüsü göçü gerçekleştiremiyor. Retype istisnası: %s" - -#, python-format -msgid "3PAR vlun %(name)s not found on host %(host)s" -msgstr "3PAR vlun %(name)s %(host)s istemcisinde bulunamadı" - -#, python-format -msgid "" -"3PAR vlun for volume '%(name)s' was deleted, but the host '%(host)s' was not " -"deleted because: %(reason)s" -msgstr "" -"'%(name)s' mantıksal sürücüsü için 3PAR vlun silindi, ama '%(host)s' " -"istemcisi silinmedi çünkü: %(reason)s" - -#, python-format -msgid "AUTH properties: %s." -msgstr "AUTH özellikleri: %s." - -#, python-format -msgid "Accepting transfer %s" -msgstr "%s aktarımı kabul ediliyor" - -msgid "Activate Flexvisor cinder volume driver." -msgstr "Flexvisor cinder mantıksal sürücü sürücüsünü etkinleştir." - -#, python-format -msgid "Add volume response: %s" -msgstr "Mantıksal sürücü ekleme yanıtı: %s" - -#, python-format -msgid "Added %s to cg." -msgstr "%s cg'ye eklendi." - -#, python-format -msgid "Added volume: %(volumeName)s to existing storage group %(sgGroupName)s." -msgstr "" -"Mantıksal sürücü: %(volumeName)s mevcut depolama grubuna %(sgGroupName)s " -"eklendi." - -#, python-format -msgid "Adding ACL to volume=%(vol)s with initiator group name %(igrp)s" -msgstr "" -"%(igrp)s başlatıcı grup ismine sahip mantıksal sürücüye=%(vol)s ACL ekleniyor" - -#, python-format -msgid "" -"Adding volume: %(volumeName)s to default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"Mantıksal sürücü: %(volumeName)s FAST ilkesi: %(fastPolicyName)s için " -"varsayılan depolama grubuna ekleniyor." - -#, python-format -msgid "Adding volumes to cg %s." -msgstr "Mantıksal sürücüler cg %s'e ekleniyor" - -msgid "Attach volume completed successfully." -msgstr "Mantıksal sürücü ekleme başarıyla tamamlandı." - -msgid "Availability Zones retrieved successfully." -msgstr "Kullanılabilir Bölgeler başarıyla alındı." - -#, python-format -msgid "Backend name is %s." -msgstr "Arka uç ismi %s." - -#, python-format -msgid "Backing VM: %(backing)s renamed to %(new_name)s." -msgstr "Destekleyen VM: %(backing)s %(new_name)s olarak yeniden adlandırıldı." - -msgid "Backing not available, no operation to be performed." -msgstr "Destek kullanılabilir değil, hiçbir işlem yapılmayacak." - -#, python-format -msgid "Backing not found, creating for volume: %s" -msgstr "Destek bulunamadı, mantıksal sürücü: %s için oluşturuluyor" - -#, python-format -msgid "" -"Backup base image of volume %(volume)s still has %(snapshots)s snapshots so " -"skipping base image delete." -msgstr "" -"%(volume)s mantıksal sürücüsünün yedek taban imajı hala %(snapshots)s anlık " -"görüntüye sahip bu yüzden taban imaj silme atlanıyor." - -#, python-format -msgid "" -"Backup image of volume %(volume)s is busy, retrying %(retries)s more time(s) " -"in %(delay)ss." -msgstr "" -"%(volume)s mantıksal sürücüsünün yedek imajı meşgul, %(delay)ss içinde " -"%(retries)s kere tekrar deneniyor." - -#, python-format -msgid "Backup service: %s." -msgstr "Yedek servisi: %s." - -#, python-format -msgid "Begin backup of volume %s." -msgstr "Mantıksal sürücü %s yedeğine başla." - -msgid "Begin detaching volume completed successfully." -msgstr "Mantıksal sürücünün ayrılmasına başlanması başarıyla tamamlandı." - -#, python-format -msgid "CONCERTO version: %s" -msgstr "CONCERTO sürümü: %s" - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because cluster " -"exists in different management group." -msgstr "" -"Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü küme " -"farklı bir yönetim grubunda." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has been exported." -msgstr "" -"Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " -"sürücü dışa aktarılmış." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because the volume " -"has snapshots." -msgstr "" -"Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " -"sürücü anlık görüntülere sahip." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume does " -"not exist in this management group." -msgstr "" -"Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " -"sürücü bu yönetim grubunda değil." - -#, python-format -msgid "" -"Cannot provide backend assisted migration for volume: %s because volume is " -"from a different backend." -msgstr "" -"Mantıksal sürücü: %s için arka uç destekli göç sağlanamıyor çünkü mantıksal " -"sürücü başka bir arka uçta." - -#, python-format -msgid "Cgsnapshot %s: creating." -msgstr "Cgsnapshot %s: oluşturuluyor." - -#, python-format -msgid "Change volume capacity request: %s." -msgstr "Mantıksal sürücü kapasite isteğini değiştir: %s." - -#, python-format -msgid "Checking image clone %s from glance share." -msgstr "İmaj klonu %s glance paylaşımından kontrol ediliyor." - -#, python-format -msgid "" -"Cinder NFS volume with current path \"%(cr)s\" is no longer being managed." -msgstr "" -"\"%(cr)s\" mevcut yoluna sahip Cinder NFS mantıksal sürücüsü artık " -"yönetilmiyor." - -msgid "Cinder secure environment indicator file exists." -msgstr "Cinder güvenli ortam göstergesi dosyası mevcut." - -#, python-format -msgid "CiscoFCZoneDriver - Add connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - I-T haritası için bağlantı ekle: %s" - -#, python-format -msgid "CiscoFCZoneDriver - Delete connection for I-T map: %s" -msgstr "CiscoFCZoneDriver - I-T haritası için bağlantı sil: %s" - -#, python-format -msgid "Cleaning cache for share %s." -msgstr "%s paylaşımı için zula temizleniyor." - -msgid "Cleaning up incomplete backup operations." -msgstr "Tamamlanmamış yedek işlemleri temizleniyor." - -#, python-format -msgid "Cloning from cache to destination %s" -msgstr "Zuladan %s hedefine klonlanıyor" - -#, python-format -msgid "Cloning from snapshot to destination %s" -msgstr "Anlık görüntüden %s hedefine klonlanıyor" - -#, python-format -msgid "Cloning image %s from cache" -msgstr "%s imajı zuladan klonlanıyor" - -#, python-format -msgid "Cloning image %s from snapshot." -msgstr "İmaj %s anlık görüntüden klonlanıyor." - -#, python-format -msgid "Cloning volume %(src)s to volume %(dst)s" -msgstr "Mantıksal sürücü %(src)s %(dst)s mantıksal sürücüsüne klonlanıyor" - -#, python-format -msgid "CloudByte API executed successfully for command [%s]." -msgstr "CloudByte API'si [%s] komutu için başarıyla çalıştırıldı." - -msgid "Complete-Migrate volume completed successfully." -msgstr "Mantıksal sürücü göçü-tamamlama başarıyla tamamlandı." - -#, python-format -msgid "Completed: convert_to_base_volume: id=%s." -msgstr "Tamamlandı: convert_to_base_volume: id=%s." - -#, python-format -msgid "" -"Connect initialization info: {driver_volume_type: fibre_channel, data: " -"%(properties)s" -msgstr "" -"Bağlantı ilklendirme bilgisi: {driver_volume_type: fibre_channel, veri: " -"%(properties)s" - -#, python-format -msgid "Connecting to host: %s." -msgstr "İstemciye bağlanılıyor: %s." - -#, python-format -msgid "Connector returning fcnsinfo-%s" -msgstr "Bağlayıcı fcnsinfo-%s döndürüyor" - -#, python-format -msgid "Converted %(sz).2f MB image at %(mbps).2f MB/s" -msgstr "%(sz).2f MB imaj %(mbps).2f MB/s hızda dönüştürüldü" - -#, python-format -msgid "Copied image %(img)s to volume %(vol)s using copy offload workflow." -msgstr "" -"%(img)s imajı %(vol)s mantıksal sürücüsüne başkasına yükleme iş akışıyla " -"kopyalandı." - -#, python-format -msgid "Copied image to volume %s using regular download." -msgstr "Normal indirme kullanılarak imaj %s mantıksal sürücüsüne kopyalandı." - -#, python-format -msgid "Copy job to dest vol %s completed." -msgstr "%s hedef mantıksal sürücüsüne kopyalama işi tamamlandı." - -msgid "Copy volume to image completed successfully." -msgstr "Mantıksal sürücüyü imaja kopyalama başarıyla tamamlandı." - -#, python-format -msgid "Copying src vol %(src)s to dest vol %(dst)s." -msgstr "" -"%(src)s kaynak mantıksal sürücüsü %(dst)s hedef mantıksal sürücüsüne " -"kopyalanıyor." - -#, python-format -msgid "Could not find replica to delete of volume %(vol)s." -msgstr "%(vol)s mantıksal sürücüsünün silimi için kopya bulunamadı." - -#, python-format -msgid "Could not run dpkg-query command: %(msg)s." -msgstr "dpkg-query komutu çalıştırılamadı: %(msg)s." - -#, python-format -msgid "Could not run rpm command: %(msg)s." -msgstr "Rpm komutu çalıştırılamadı: %(msg)s." - -#, python-format -msgid "" -"Could not update storage pool with mmchattr to %(pool)s, error: %(error)s" -msgstr "" -"mmchattr'ye sahip depolama havuzu %(pool)s'e güncellenemedi, hata: %(error)s" - -#, python-format -msgid "" -"Couldn't find destination volume %(vol)s in the database. The entry might be " -"successfully deleted during migration completion phase." -msgstr "" -"Hedef mantıksal sürücü %(vol)s veri tabanında bulunamadı. Girdi göç " -"tamamlama aşamasında başarıyla silinmiş olabilir." - -#, python-format -msgid "" -"Couldn't find the temporary volume %(vol)s in the database. There is no need " -"to clean up this volume." -msgstr "" -"Geçici mantıksal sürücü %(vol)s veri tabanında bulunamadı. Bu mantıksal " -"sürücüyü temizlemeye gerek yok." - -#, python-format -msgid "Create Cloned Volume %(volume_id)s completed." -msgstr "%(volume_id)s Mantıksal sürücüsü klonlama bitti." - -#, python-format -msgid "Create Consistency Group: %(group)s." -msgstr "Tutarlılık Grubu Oluştur: %(group)s." - -#, python-format -msgid "Create Volume %(volume_id)s completed." -msgstr "%(volume_id)s Mantıksal sürücüsü oluşturuldu." - -#, python-format -msgid "Create Volume %(volume_id)s from snapshot %(snapshot_id)s completed." -msgstr "" -"%(snapshot_id)s anlık görüntüsünden %(volume_id)s mantıksal sürücüsü " -"oluşturma tamamlandı." - -#, python-format -msgid "" -"Create a replica from Volume: Clone Volume: %(cloneName)s Source Volume: " -"%(sourceName)s." -msgstr "" -"Mantıksal sürücüden bir kopya oluştur: Kopya Mantıksal Sürücü: %(cloneName)s " -"Kaynak Mantıksal Sürücü: %(sourceName)s." - -#, python-format -msgid "Create backup finished. backup: %s." -msgstr "Yedek oluşturma bitti. yedek: %s." - -#, python-format -msgid "Create backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "" -"Yedek oluşturma başlatıldı, yedek: %(backup_id)s mantıksal sürücü: " -"%(volume_id)s." - -#, python-format -msgid "Create export done from Volume %(volume_id)s." -msgstr "%(volume_id)s Mantıksal sürücüsünden dışa aktarma oluşturma yapıldı." - -msgid "Create snapshot completed successfully" -msgstr "Anlık görüntü oluşturma başarıyla tamamlandı" - -#, python-format -msgid "" -"Create snapshot for Consistency Group %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Tutarlılık Grubu %(cgId)s cgsnapshotID: %(cgsnapshot)s için anlık görüntü " -"oluştur." - -#, python-format -msgid "Create snapshot from volume %s" -msgstr "%s biriminden sistem görüntüsü oluşturuluyor" - -#, python-format -msgid "" -"Create success. Snapshot: %(snapshot)s, Snapshot ID in raid: " -"%(raid_snapshot_id)s, volume: %(volume)s." -msgstr "" -"Oluşturma başarılı. Anlık görüntü: %(snapshot)s, Raid'deki anlık görüntü " -"ID'si: %(raid_snapshot_id)s, mantıksal sürücü: %(volume)s." - -#, python-format -msgid "Create target consistency group %(targetCg)s." -msgstr "Hedef tutarlılık grubu %(targetCg)s oluştur." - -#, python-format -msgid "Create volume of %s GB" -msgstr "%s GB'lık birim oluştur" - -#, python-format -msgid "" -"Created CloudByte snapshot [%(cb_snap)s] w.r.t CloudByte volume [%(cb_vol)s] " -"and OpenStack volume [%(stack_vol)s]." -msgstr "" -"CloudByte anlık görüntüsü [%(cb_snap)s] w.r.t CloudByte mantıksal sürücüsü " -"[%(cb_vol)s] ve OpenStack mantıksal sürücüsü [%(stack_vol)s] oluşturuldu." - -#, python-format -msgid "Created Consistency Group %s" -msgstr "Tutarlılık Grubu %s oluşturuldu" - -#, python-format -msgid "Created datastore folder: %s." -msgstr "Veri deposu dizini oluşturuldu: %s." - -#, python-format -msgid "" -"Created multi-attach E-Series host group %(label)s with clusterRef " -"%(clusterRef)s" -msgstr "" -"clusterRef %(clusterRef)s ile çoklu-ekleme E-serisi istemci grubu %(label)s " -"oluşturuldu" - -#, python-format -msgid "Created new initiator group name: %(igGroupName)s." -msgstr "Yeni başlatıcı grubu ismi oluşturuldu: %(igGroupName)s." - -#, python-format -msgid "Created new masking view : %(maskingViewName)s." -msgstr "Yeni maskeleme görünümü oluşturuldu : %(maskingViewName)s." - -#, python-format -msgid "Created new storage group: %(storageGroupName)s." -msgstr "Yeni depolama grubu oluşturuldu: %(storageGroupName)s." - -#, python-format -msgid "Created snap grp with label %s." -msgstr "%s etiketli anlık görüntü grubu oluşturuldu." - -#, python-format -msgid "Created volume %(instanceId)s: %(name)s" -msgstr "%(instanceId)s mantıksal sürücüsü oluşturuldu: %(name)s" - -#, python-format -msgid "Created volume %(volname)s, volume id %(volid)s." -msgstr "" -"Mantıksal sürücü %(volname)s oluşturuldu, mantıksal sürücü kimliği %(volid)s." - -msgid "Created volume successfully." -msgstr "Mantıksal sürücü başarıyla tamamlandı." - -#, python-format -msgid "Created volume with label %s." -msgstr "%s etiketine sahip mantıksal sürücü oluşturuldu." - -#, python-format -msgid "Creating backup of volume %(volume_id)s in container %(container)s" -msgstr "" -"%(container)s kabındaki %(volume_id)s mantıksal sürücüsünün yedeği " -"oluşturuluyor" - -#, python-format -msgid "Creating cgsnapshot %(name)s." -msgstr "Cgsnapshot %(name)s oluşturuluyor." - -#, python-format -msgid "Creating clone of volume: %s" -msgstr "Mantıksal sürücü klonu oluşturuluyor: %s" - -#, python-format -msgid "Creating consistency group %(name)s from cgsnapshot %(snap)s." -msgstr "%(name)s tutarlılık grubu %(snap)s cgsnapshot'dan oluşturuluyor." - -#, python-format -msgid "Creating consistency group %(name)s." -msgstr "%(name)s tutarlılık grubu oluşturuluyor." - -#, python-format -msgid "Creating host object %(host_name)r with IQN: %(iqn)s." -msgstr "IQN: %(iqn)s e sahip istemci nesnesi %(host_name)r oluşturuluyor." - -#, python-format -msgid "Creating host object %(host_name)r with WWN: %(wwn)s." -msgstr "WWN: %(wwn)s'e sahip %(host_name)r istemci nesnesi oluşturuluyor." - -#, python-format -msgid "Creating host with ports %s." -msgstr "%s bağlantı noktasına sahip istemci oluşturuluyor." - -#, python-format -msgid "Creating image snapshot %s" -msgstr "İmaj anlık görüntüsü %s oluşturuluyor" - -#, python-format -msgid "Creating initiator group %(grp)s with initiator %(iname)s" -msgstr "Başlatıcı grubu %(grp)s %(iname)s başlatıcısı ile oluşturuluyor" - -#, python-format -msgid "Creating initiator group %(igrp)s with one initiator %(iname)s" -msgstr "Başlatıcı grubu %(igrp)s bir başlatıcı ile oluşturuluyor %(iname)s" - -#, python-format -msgid "Creating iscsi_target for volume: %s" -msgstr "Mantıksal sürücü: %s için iscsi_target oluşturuluyor" - -#, python-format -msgid "" -"Creating snapshot for volume_name=%(vol)s snap_name=%(name)s " -"snap_description=%(desc)s" -msgstr "" -"volume_name=%(vol)s snap_name=%(name)s snap_description=%(desc)s için anlık " -"görüntü oluşturuluyor" - -#, python-format -msgid "Creating snapshot: %s" -msgstr "Anlık görüntü oluşturuluyor: %s" - -#, python-format -msgid "Creating transfer of volume %s" -msgstr "%s mantıksal sürücüsünün aktarımı oluşturuluyor" - -#, python-format -msgid "Creatng volume from snapshot. volume: %s" -msgstr "Anlık görüntüden mantıksal sürücü oluşturuluyor. mantıksal sürücü: %s" - -#, python-format -msgid "Delete Consistency Group: %(group)s." -msgstr "Tutarlılık Grubunu sil: %(group)s." - -#, python-format -msgid "Delete Snapshot %(snapshot_id)s completed." -msgstr "%(snapshot_id)s anlık görüntü silme tamamlandı." - -#, python-format -msgid "Delete Snapshot: %(snapshot)s." -msgstr "Anlık görüntü sil: %(snapshot)s." - -#, python-format -msgid "Delete Snapshot: %(snapshotName)s." -msgstr "Anlık görüntüyü sil: %(snapshotName)s." - -#, python-format -msgid "Delete Volume %(volume_id)s completed." -msgstr "%(volume_id)s Mantıksal sürücüsü silindi." - -#, python-format -msgid "Delete backup finished, backup %s deleted." -msgstr "Yedek silme bitti, yedek %s silindi." - -#, python-format -msgid "Delete backup started, backup: %s." -msgstr "Yedek silme başladı, yedek: %s." - -#, python-format -msgid "Delete backup with id: %s" -msgstr "Şu kimliğe sahip yedeği sil: %s" - -#, python-format -msgid "Delete cgsnapshot %(snap_name)s for consistency group: %(group_name)s" -msgstr "Tutarlılık grubu için %(snap_name)s cgsnapshot'ını sil: %(group_name)s" - -#, python-format -msgid "Delete cgsnapshot with id: %s" -msgstr "Şu kimliğe sahip cgsnapshot'u sil: %s" - -msgid "Delete consistency group completed successfully." -msgstr "Tutarlılık grubunun silinmesi başarıyla tamamlandı." - -#, python-format -msgid "Delete consistency group with id: %s" -msgstr "Şu kimliğe sahip tutarlılık grubunu sil: %s" - -#, python-format -msgid "" -"Delete of backup '%(backup)s' for volume '%(volume)s' finished with warning." -msgstr "" -"'%(volume)s' mantıksal sürücüsü için '%(backup)s' yedeği silme uyarıyla " -"bitti." - -msgid "Delete snapshot completed successfully" -msgstr "Anlık görüntü silme başarıyla tamamlandı" - -#, python-format -msgid "Delete snapshot for source CG %(cgId)s cgsnapshotID: %(cgsnapshot)s." -msgstr "" -"Kaynak CG %(cgId)s için anlık görüntüyü sil cgsnapshotID: %(cgsnapshot)s." - -msgid "Delete snapshot metadata completed successfully." -msgstr "Anlık görüntü metadata'sı sil başarıyla tamamlandı." - -#, python-format -msgid "Delete snapshot with id: %s" -msgstr "%s id'li sistem görüntüsü siliniyor" - -#, python-format -msgid "Delete transfer with id: %s" -msgstr "Şu kimliğe sahip aktarımı sil: %s" - -msgid "Delete volume metadata completed successfully." -msgstr "Mantıksal sürücü metadata'sı silme başarıyla tamamlandı." - -msgid "Delete volume request issued successfully." -msgstr "Mantıksal sürücü silme isteği başarıyla yapıldı." - -#, python-format -msgid "Delete volume with id: %s" -msgstr "%s id'li birim siliniyor" - -#, python-format -msgid "Deleted %(row)d rows from table=%(table)s" -msgstr "%(row)d satır table=%(table)s'den silindi" - -#, python-format -msgid "" -"Deleted CloudByte snapshot [%(snap)s] w.r.t parent CloudByte volume " -"[%(cb_vol)s] and parent OpenStack volume [%(stack_vol)s]." -msgstr "" -"CloudByte anlık görüntüsü [%(snap)s] w.r.t üst CloudByte mantıksal sürücüsü " -"[%(cb_vol)s] ve üst OpenStack mantıksal sürücüsü [%(stack_vol)s] silindi." - -#, python-format -msgid "Deleted the VM backing: %s." -msgstr "VM desteği: %s silindi." - -#, python-format -msgid "Deleted vmdk file: %s." -msgstr "Vmdk dosyası silindi: %s." - -msgid "Deleted volume successfully." -msgstr "Mantıksal sürücü başarıyla silindi." - -#, python-format -msgid "Deleting Volume: %(volume)s" -msgstr "Mantıksal sürücü siliniyor: %(volume)s" - -#, python-format -msgid "Deleting backup base image='%(basename)s' of volume %(volume)s." -msgstr "" -"%(volume)s mantıksal sürücüsünün yedek taban imajı='%(basename)s' siliniyor." - -#, python-format -msgid "Deleting deleteInitiatorGrp %s " -msgstr "deleteInitiatorGrp %s siliniyor " - -#, python-format -msgid "Deleting snapshot %(ss)s from %(pro)s" -msgstr "Anlık görüntü %(ss)s %(pro)s'den siliniyor" - -#, python-format -msgid "Deleting snapshot %s " -msgstr "Anlık görüntü %s siliniyor " - -#, python-format -msgid "Deleting snapshot: %s" -msgstr "Anlık görüntü siliniyor: %s" - -#, python-format -msgid "Deleting stale snapshot: %s" -msgstr "Eski anlık görüntü siliniyor: %s" - -#, python-format -msgid "Deleting volume %s " -msgstr "Mantıksal sürücü %s siliniyor " - -msgid "Detach volume completed successfully." -msgstr "Mantıksal sürücü ayır başarıyla tamamlandı." - -msgid "Determined volume DB was empty at startup." -msgstr "Tespit edilen mantıksal sürücü DB'si başlangıçta boştu." - -msgid "Determined volume DB was not empty at startup." -msgstr "Tespit edilen mantıksal sürücü DB'si başlangıçta boş değildi." - -#, python-format -msgid "" -"Did not find the snapshot: %(name)s for backing: %(backing)s. Need not " -"delete anything." -msgstr "" -"Destek: %(backing)s için anlık görüntü: %(name)s bulunamadı. Hiçbir şeyin " -"silinmesi gerekmiyor." - -#, python-format -msgid "Discovery ip %(disc_ip)s is found on mgmt+data subnet %(net_label)s" -msgstr "Keşif ip'si %(disc_ip)s %(net_label)s mgmt+veri alt ağında bulundu" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on data subnet %(net_label)s" -msgstr "Keşif ip'si %(disc_ip)s %(net_label)s veri alt ağında kullanılıyor" - -#, python-format -msgid "Discovery ip %(disc_ip)s is used on subnet %(net_label)s" -msgstr "Keşif ip'si %(disc_ip)s %(net_label)s alt ağında kullanılıyor" - -#, python-format -msgid "Discovery ip %s is used on mgmt+data subnet" -msgstr "Keşif ip'si %s mgmt+data alt ağında kullanılıyor" - -#, python-format -msgid "Dissociating volume %s " -msgstr "Mantıksal sürücü %s ilişiği kesiliyor " - -#, python-format -msgid "Domain id is %s." -msgstr "Alan id'si %s." - -#, python-format -msgid "Done copying image: %(id)s to volume: %(vol)s." -msgstr "İmaj %(id)s nin mantıksal sürücü: %(vol)s e kopyalanması bitti." - -#, python-format -msgid "Done copying volume %(vol)s to a new image %(img)s" -msgstr "%(vol)s mantıksal sürücüsünün %(img)s yeni imajına kopyalanması bitti" - -msgid "Driver initialization completed successfully." -msgstr "Sürücü ilklendirme başarıyla tamamlandı." - -#, python-format -msgid "EQL-driver: Setup is complete, group IP is \"%s\"." -msgstr "EQL-sürücüsü: Kurulum tamamlandı, grup IP'si \"%s\"." - -#, python-format -msgid "EQL-driver: executing \"%s\"." -msgstr "EQL-sürücüsü: \"%s\" çalıştırılıyor." - -#, python-format -msgid "Editing Volume %(vol)s with mask %(mask)s" -msgstr "%(mask)s maskesine sahip %(vol)s mantıksal sürücüsü düzenleniyor" - -msgid "Embedded mode detected." -msgstr "Gömülü kip algılandı." - -#, python-format -msgid "Entering extend_volume volume=%(vol)s new_size=%(size)s" -msgstr "extend_volume volume=%(vol)s new_size=%(size)s durumuna giriliyor" - -#, python-format -msgid "" -"Entering initialize_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s" -msgstr "" -"initialize_connection'a giriliyor mantıksal sürücü=%(vol)s bağlayıcı=" -"%(conn)s konum=%(loc)s" - -#, python-format -msgid "" -"Entering terminate_connection volume=%(vol)s connector=%(conn)s location=" -"%(loc)s." -msgstr "" -"terminate_connection'a giriliyor mantıksal sürücü=%(vol)s bağlayıcı=%(conn)s " -"konum=%(loc)s." - -#, python-format -msgid "Exploring array subnet label %s" -msgstr "Dizi alt ağ etiketi %s keşfediliyor" - -#, python-format -msgid "Export record finished, backup %s exported." -msgstr "Kayıt dışa aktarma bitti, yedek %s dışa aktarıldı." - -#, python-format -msgid "Export record started, backup: %s." -msgstr "Kayıt dışa aktarma başladı, yedek: %s." - -#, python-format -msgid "Exported lun %(vol_id)s on lun_id %(lun_id)s." -msgstr "%(vol_id)s lun_id %(lun_id)s üzerine aktarıldı." - -msgid "Extend volume completed successfully." -msgstr "Mantıksal sürücü büyütme başarıyla tamamlandı." - -msgid "Extend volume request issued successfully." -msgstr "Mantıksal sürücü büyütme isteği başarıyla yapıldı." - -#, python-format -msgid "Extending volume %s." -msgstr "Mantıksal sürücü %s büyütülüyor." - -#, python-format -msgid "Failed to open iet session list for %(vol_id)s: %(e)s" -msgstr "%(vol_id)s için iet oturumu açılamadı: %(e)s" - -#, python-format -msgid "Fault thrown: %s" -msgstr "Hata fırlatıldı: %s" - -#, python-format -msgid "Filtered targets for SAN is: %s" -msgstr "SAN için filtrelenen hedefler: %s" - -#, python-format -msgid "Fixing previous mount %s which was not unmounted correctly." -msgstr "Düzgün bağı ayrılmamış önceki %s bağı düzeltiliyor." - -#, python-format -msgid "Flash Cache policy set to %s" -msgstr "Zula sıfırlama ilkesi %s olarak ayarlandı" - -#, python-format -msgid "Flexvisor already unassigned volume %(id)s." -msgstr "Flexvisor %(id)s mantıksal sürücü atamasını zaten kaldırdı." - -#, python-format -msgid "Flexvisor snapshot %(id)s not existed." -msgstr "Flexvisor anlık görüntüsü %(id)s mevcut değildi." - -#, python-format -msgid "Flexvisor succeeded to add volume %(id)s to group %(cgid)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubuna başarıyla ekledi." - -#, python-format -msgid "Flexvisor succeeded to clone volume %(id)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü klonlamayı başardı." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s from snapshot." -msgstr "" -"Flexvisor başarıyla anlık görüntüden %(id)s mantıksal sürücüsünü oluşturdu." - -#, python-format -msgid "Flexvisor succeeded to create volume %(id)s." -msgstr "Flexvisor başarıyla %(id)s mantıksal sürücüsünü oluşturdu." - -#, python-format -msgid "Flexvisor succeeded to delete snapshot %(id)s." -msgstr "Flexvisor %(id)s anlık görüntüsünü silmeyi başardı." - -#, python-format -msgid "Flexvisor succeeded to extend volume %(id)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmeyi başardı." - -#, python-format -msgid "Flexvisor succeeded to remove volume %(id)s from group %(cgid)s." -msgstr "" -"Flexvisor başarıyla %(id)s mantıksal sürücüsünü %(cgid)s grubundan çıkardı." - -#, python-format -msgid "Flexvisor succeeded to unassign volume %(id)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsü atamasını kesebildi." - -#, python-format -msgid "Flexvisor volume %(id)s does not exist." -msgstr "Flexvisor mantıksal sürücüsü %(id)s mevcut değil." - -msgid "Force upload to image is disabled, Force option will be ignored." -msgstr "İmaja zorla yükleme kapalı, Zorlama seçeneği göz ardı edilecek." - -#, python-format -msgid "Free capacity for backend is: %(free)s, total capacity: %(total)s." -msgstr "Arka uç için boş kapasite: %(free)s, toplam kapasite: %(total)s." - -#, python-format -msgid "Generating transfer record for volume %s" -msgstr "Mantıksal sürücü %s için aktarım kaydı üretiliyor" - -msgid "Get all volumes completed successfully." -msgstr "Tüm mantıksal sürücülerin getirilmesi başarıyla bitti." - -#, python-format -msgid "Get domain by name response: %s" -msgstr "İsimle alan adı alma yanıtı: %s" - -msgid "Get snapshot metadata completed successfully." -msgstr "Anlık görüntü metadata'sı getir başarıyla tamamlandı." - -msgid "Get snapshot metadata value not implemented." -msgstr "Anlık görüntü metadata değeri getirme uygulanmadı." - -msgid "Get volume admin metadata completed successfully." -msgstr "Mantıksal sürücü yönetici metadata'sını getir başarıyla tamamlandı." - -msgid "Get volume image-metadata completed successfully." -msgstr "Mantıksal sürücü imaj-metadata'sı getir başarıyla tamamlandı." - -msgid "Get volume metadata completed successfully." -msgstr "Mantıksal sürücü metadata alma başarıyla tamamlandı." - -msgid "Getting getInitiatorGrpList" -msgstr "getInitiatorGrpList getiriliyor" - -#, python-format -msgid "Getting volume information for vol_name=%s" -msgstr "vol_name=%s için mantıksal sürücü bilgisi alınıyor" - -#, python-format -msgid "Going to perform request again %s with valid token." -msgstr "İstek %s geçerli jetonla tekrar gerçekleşecek." - -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP istisnası fırlatıldı: %s" - -#, python-format -msgid "Image %(pool)s/%(image)s is dependent on the snapshot %(snap)s." -msgstr "İmaj %(pool)s/%(image)s %(snap)s anlık görüntüsüne bağımlı." - -#, python-format -msgid "Image cloning unsuccessful for image %(image_id)s. Message: %(msg)s" -msgstr "%(image_id)s imajı için imaj klonlama başarısız. İleti: %(msg)s" - -#, python-format -msgid "Image download %(sz).2f MB at %(mbps).2f MB/s" -msgstr "İmaj indirme %(sz).2f MB %(mbps).2f MB/s hızında" - -#, python-format -msgid "Image will locally be converted to raw %s" -msgstr "İmaj yerel olarak ham %s'e döüştürülecek" - -#, python-format -msgid "Import record id %s metadata from driver finished." -msgstr "Sürücüden kayıt id %s metadata içe aktarma bitti." - -#, python-format -msgid "Import record started, backup_url: %s." -msgstr "Kayıt içe aktarma başladı, backup_url: %s." - -#, python-format -msgid "Initialize connection: %(volume)s." -msgstr "Bağlantıyı ilklendir: %(volume)s." - -msgid "Initialize volume connection completed successfully." -msgstr "Mantıksal sürücü bağlantısını ilklendirme başarıyla tamamlandı." - -#, python-format -msgid "Initialized driver %(name)s version: %(vers)s" -msgstr "Sürücü %(name)s sürüm: %(vers)s ilklendirildi" - -msgid "Initializing extension manager." -msgstr "Genişletme yöneticisi başlatılıyor" - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s." -msgstr "" -"Başlatıcı İsim(ler)i %(initiatorNames)s %(storageSystemName)s dizisinde " -"değil." - -#, python-format -msgid "" -"Initiator Name(s) %(initiatorNames)s are not on array %(storageSystemName)s. " -msgstr "" -"Başlatıcı İsim(ler)i %(initiatorNames)s %(storageSystemName)s dizisinde " -"değil. " - -#, python-format -msgid "Initiator group name is %(grp)s for initiator %(iname)s" -msgstr "%(iname)s başlatıcısı için başlatıcı grup ismi %(grp)s" - -#, python-format -msgid "LUN %(id)s extended to %(size)s GB." -msgstr "LUN %(id)s %(size)s GB'ye büyütüldü." - -#, python-format -msgid "LUN with given ref %s need not be renamed during manage operation." -msgstr "" -"Verilen referans %s e sahip LUN yönetim işlemi sırasında yeniden " -"isimlendirilemez." - -#, python-format -msgid "" -"Leaving create_volume: %(volumeName)s Return code: %(rc)lu volume dict: " -"%(name)s." -msgstr "" -"create_volume: %(volumeName)s terk ediliyor Dönüş kodu: %(rc)lu mantıksal " -"sürücü sözlüğü: %(name)s." - -#, python-format -msgid "Leaving delete_volume: %(volumename)s Return code: %(rc)lu." -msgstr "delete_volume: %(volumename)s terk ediliyor Dönüş kodu: %(rc)lu." - -#, python-format -msgid "Leaving initialize_connection: %s" -msgstr "initialize_connection: %s terk ediliyor" - -#, python-format -msgid "Loaded extension: %s" -msgstr "Yüklenen bölüm: %s" - -#, python-format -msgid "" -"Logical Volume not found when querying LVM info. (vg_name=%(vg)s, lv_name=" -"%(lv)s" -msgstr "" -"LVM bilgisi sorgulanırken Mantıksal Sürücü bulunamadı. (vg_name=%(vg)s, " -"lv_name=%(lv)s" - -msgid "Manage existing volume completed successfully." -msgstr "Mevcut mantıksal sürücüyü yönetme başarıyla tamamlandı." - -#, python-format -msgid "" -"Manage operation completed for LUN with new path %(path)s and uuid %(uuid)s." -msgstr "" -"LUN için yönetim işlemi yeni yol %(path)s ve uuid %(uuid)s ile tamamlandı." - -#, python-format -msgid "" -"Manage operation completed for volume with new label %(label)s and wwn " -"%(wwn)s." -msgstr "" -"%(label)s yeni etiketli ve %(wwn)s wwn'li mantıksal sürücü için yönetme " -"işlemi tamamlandı." - -#, python-format -msgid "Manage volume %s" -msgstr "Mantıksal sürücü %s'i yönet" - -msgid "Manage volume request issued successfully." -msgstr "Mantıksal sürücü yönetim isteği başarıyla yapıldı." - -#, python-format -msgid "Migrate Volume %(volume_id)s completed." -msgstr "%(volume_id)s mantıksal sürücü göçü tamamlandı." - -msgid "Migrate volume completed successfully." -msgstr "Mantıksal sürücü göçü başarıyla tamamlandı." - -msgid "Migrate volume completion issued successfully." -msgstr "Mantıksal sürücü göç tamamlama başarıyla yapıldı." - -msgid "Migrate volume request issued successfully." -msgstr "Mantıksal sürücü göç isteği başarıyla yapıldı." - -#, python-format -msgid "Migrating using retype Volume: %(volume)s." -msgstr "Retype kullanarak göç yapılıyor Mantıksal sürücü: %(volume)s." - -#, python-format -msgid "" -"Modifying %(volume_name)s snap_cpg from %(old_snap_cpg)s to %(new_snap_cpg)s." -msgstr "" -" %(volume_name)s snap_cpg %(old_snap_cpg)s den %(new_snap_cpg)s e " -"değiştiriliyor." - -#, python-format -msgid "Modifying %(volume_name)s userCPG from %(old_cpg)s to %(new_cpg)s" -msgstr "%(volume_name)s userCPG %(old_cpg)s den %(new_cpg)s e değiştiriliyor" - -#, python-format -msgid "Modifying %s comments." -msgstr "%s yorum değiştiriliyor." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"PyWBEM modülü kurulu değil. python-pywbem paketini kullanarak PyWBEM kur." - -msgid "" -"Module PyWBEM not installed. Install PyWBEM using the python-pywbem package." -msgstr "" -"PyWBEM Modülü kurulu değil. python-pywbem paketini kullanarak PyWBEM kurun." - -msgid "Need to remove FC Zone, building initiator target map" -msgstr "FC Bölgesi silinmeli, başlatıcı hedef haritası inşa ediliyor" - -msgid "Need to remove FC Zone, building initiator target map." -msgstr "FC Bölgesi kaldırılmalı, başlatıcı hedef haritası inşa ediliyor." - -#, python-format -msgid "" -"NetApp driver of family %(storage_family)s and protocol %(storage_protocol)s " -"loaded." -msgstr "" -"%(storage_family)s ailesi ve %(storage_protocol)s iletişim kuralının NetApp " -"sürücüsü yüklendi." - -#, python-format -msgid "New Cinder secure environment indicator file created at path %s." -msgstr "%s yolunda yeni Cinder güvenli ortam göstergesi dosyası oluşturuldu." - -#, python-format -msgid "New str info is: %s." -msgstr "Yeni str bilgisi: %s." - -#, python-format -msgid "No dpkg-query info found for %(pkg)s package." -msgstr "%(pkg)s paketi için dpkg-query bilgisi bulunamadı." - -#, python-format -msgid "No igroup found for initiator %s" -msgstr "%s başlatıcısı için igroup bulunamadı" - -#, python-format -msgid "No iscsi target present for volume id:%(vol_id)s: %(e)s" -msgstr "Mantıksal sürücü id:%(vol_id)s için iscsi hedefi mevcut değil: %(e)s" - -#, python-format -msgid "No need to extend volume %s as it is already the requested new size." -msgstr "" -"Mantıksal sürücü %s istenen yeni boyutta olduğundan mantıksal sürücüyü " -"büyütmeye gerek yok." - -#, python-format -msgid "" -"No replication synchronization session found associated with source volume " -"%(source)s on %(storageSystem)s." -msgstr "" -"%(storageSystem)s üzerinde %(source)s kaynak mantıksal sürücüsü ile ilişkili " -"kopyalama eş zamanlama oturumu bulunamadı." - -#, python-format -msgid "No rpm info found for %(pkg)s package." -msgstr "%(pkg)s paketi için rpm bilgisi bulunamadı." - -#, python-format -msgid "OpenStack OS Version Info: %(info)s" -msgstr "OpenStack OS Sürüm Bilgisi: %(info)s" - -#, python-format -msgid "Overwriting volume %(volume_id)s with restore of backup %(backup_id)s" -msgstr "" -"%(volume_id)s mantıksal sürücüsünün üstüne %(backup_id)s yedeğinin geri " -"yüklemesi yazılıyor" - -#, python-format -msgid "Params for add volume request: %s." -msgstr "Mantıksal sürücü ekleme isteği için parametreler: %s." - -#, python-format -msgid "Performing post clone for %s" -msgstr "%s için klon sonrası işler gerçekleştiriliyor" - -#, python-format -msgid "Performing secure delete on volume: %s" -msgstr "Mantıksal sürücü güvenle siliniyor: %s" - -#, python-format -msgid "Pool id is %s." -msgstr "Havuz id'si %s." - -#, python-format -msgid "Port group instance name is %(foundPortGroupInstanceName)s." -msgstr "Bağlantı noktası grubu sunucu ismi %(foundPortGroupInstanceName)s." - -#, python-format -msgid "Post clone resize LUN %s" -msgstr "LUN %s klon yeniden boyutlandırma sonrası" - -#, python-format -msgid "Prefer use target wwpn %(wwpn)s" -msgstr "hedef wwpn %(wwpn)s kullanmayı tercih et" - -#, python-format -msgid "Profile %s has been deleted." -msgstr "%s profili silindi." - -#, python-format -msgid "Protection domain name: %(domain_name)s." -msgstr "Koruma alan adı: %(domain_name)s." - -msgid "Proxy mode detected." -msgstr "Vekil kipi algılandı." - -#, python-format -msgid "Purging deleted rows older than age=%(age)d days from table=%(table)s" -msgstr "" -"tablo=%(table)s tablosundan yaş=%(age)d den büyük silinmiş satırlar siliniyor" - -#, python-format -msgid "Query capacity stats response: %s." -msgstr "Kapasite istatistikleri sorgusu yanıtı: %s." - -msgid "" -"RBD striping not supported - ignoring configuration settings for rbd striping" -msgstr "" -"RBD şeritleme desteklenmiyor - rbd şeritleme için yapılandırma ayarları göz " -"ardı ediliyor" - -#, python-format -msgid "RBD volume %s not found, allowing delete operation to proceed." -msgstr "" -"RBD mantıksal sürücüsü %s bulunamadı, devam etmek için silme işlemine izin " -"veriliyor." - -#, python-format -msgid "" -"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. Verify server's " -"certificate: %(verify_cert)s." -msgstr "" -"REST sunucu IP'si: %(ip)s, bağlantı noktası: %(port)s, kullanıcıadı: " -"%(user)s. Sunucu sertifikasını doğrula: %(verify_cert)s." - -#, python-format -msgid "Re-using existing purity host %(host_name)r" -msgstr "Mevcut purity istemcisi %(host_name)r tekrar kullanılıyor" - -#, python-format -msgid "Registering image in cache %s" -msgstr "%s zulasında imaj kaydediliyor" - -#, python-format -msgid "Removed %s from cg." -msgstr "%s cg'den silindi." - -#, python-format -msgid "Removing ACL from volume=%(vol)s for initiator group %(igrp)s" -msgstr "" -"%(igrp)s başlatıcı grubu için mantıksal sürücü=%(vol)s'den ACL kaldırılıyor" - -#, python-format -msgid "Removing iscsi_target for Volume ID: %s" -msgstr "Mantıksal sürücü ID: %s için iscsi_target kaldırılıyor" - -#, python-format -msgid "Removing iscsi_target for volume: %s" -msgstr "Mantıksal sürücü: %s için iscsi_target kaldırılıyor" - -#, python-format -msgid "Removing iscsi_target for: %s" -msgstr "%s için iscsi_target kaldırılıyor" - -#, python-format -msgid "Removing iscsi_target: %s" -msgstr "iscsi_target kaldırılıyor: %s" - -#, python-format -msgid "Removing non-active host: %(host)s from scheduler cache." -msgstr "Etkin olmayan istemci:%(host)s zamanlayıcı zulasından siliniyor." - -#, python-format -msgid "Removing volumes from cg %s." -msgstr "Mantıksal sürücüler cg %s'den çıkarılıyor." - -#, python-format -msgid "Rename Volume %(volume_id)s completed." -msgstr "%(volume_id)s mantıksal sürücü yeniden isimlendirme tamamlandı." - -#, python-format -msgid "Renaming backing VM: %(backing)s to %(new_name)s." -msgstr "" -"Destekleyen VM: %(backing)s %(new_name)s olarak yeniden adlandırılıyor." - -#, python-format -msgid "Renaming existing volume %(ref_name)s to %(new_name)s" -msgstr "" -"Mevcut mantıksal sürücü %(ref_name)s %(new_name)s olarak yeniden " -"adlandırılıyor" - -#, python-format -msgid "Requested unified config: %(storage_family)s and %(storage_protocol)s." -msgstr "" -"Benzersizleştirilmiş yapılandırma istendi: %(storage_family)s ve " -"%(storage_protocol)s." - -msgid "Reserve volume completed successfully." -msgstr "Mantıksal sürücüyü rezerve etme başarıyla bitti." - -#, python-format -msgid "" -"Reset backup status started, backup_id: %(backup_id)s, status: %(status)s." -msgstr "" -"Yedek durumunu sıfırlama başlatıldı, yedek_id: %(backup_id)s, durum: " -"%(status)s." - -#, python-format -msgid "Resetting backup %s to available (was restoring)." -msgstr "Yedek %s kullanılabilir olarak sıfırlanıyor (geri yükleniyordu)." - -#, python-format -msgid "Resetting backup %s to error (was creating)." -msgstr "Yedek %s hataya sıfırlanıyor (oluşturuluyordu)." - -#, python-format -msgid "Resizing LUN %s directly to new size." -msgstr "LUN %s doğrudan yeni boyuta boyutlandırılıyor." - -#, python-format -msgid "Resizing file to %sG" -msgstr "Dosya %sG olarak yeniden boyutlanıyor" - -#, python-format -msgid "Resizing file to %sG..." -msgstr "Dosya %sG'ye yeniden boyutlanıyor..." - -#, python-format -msgid "" -"Restore backup finished, backup %(backup_id)s restored to volume " -"%(volume_id)s." -msgstr "" -"Yedek geri yükleme bitti, yedek %(backup_id)s %(volume_id)s mantıksal " -"sürücüsüne geri yüklendi." - -#, python-format -msgid "Restore backup started, backup: %(backup_id)s volume: %(volume_id)s." -msgstr "" -"Yedek geri yükleme başladı, yedek: %(backup_id)s mantıksal sürücü: " -"%(volume_id)s." - -#, python-format -msgid "Restoring backup %(backup)s to volume %(volume)s." -msgstr "%(backup)s yedeği %(volume)s mantıksal sürücüsüne geri yükleniyor." - -#, python-format -msgid "Restoring backup %(backup_id)s to volume %(volume_id)s" -msgstr "Yedek %(backup_id)s %(volume_id)s mantıksal sürücüsüne geri yükleniyor" - -msgid "Resume volume delete completed successfully." -msgstr "Mantıksal sürücü silmeyi sürdürme başarıyla tamamlandı." - -#, python-format -msgid "Resuming delete on backup: %s." -msgstr "Yedek üzerinde silme sürdürülüyor: %s." - -#, python-format -msgid "Retype Volume %(volume_id)s is completed." -msgstr "%(volume_id)s Mantıksal sürücüsü retype tamamlandı." - -#, python-format -msgid "Retype Volume %(volume_id)s is done and migrated to pool %(pool_id)s." -msgstr "" -"%(volume_id)s Mantıksal sürücüsü retype tamamlandı ve %(pool_id)s havuzuna " -"göç edildi." - -#, python-format -msgid "" -"Retype revert %(volume_name)s snap_cpg from %(new_snap_cpg)s back to " -"%(old_snap_cpg)s." -msgstr "" -"Retype %(volume_name)s snap_cpg %(new_snap_cpg)s den %(old_snap_cpg)s e geri " -"al." - -msgid "Retype volume completed successfully." -msgstr "Mantıksal sürücü rtype başarıyla tamamlandı." - -msgid "Retype volume request issued successfully." -msgstr "Mantıksal sürücü retype isteği başarıyla yapıldı." - -msgid "Roll detaching of volume completed successfully." -msgstr "Mantıksal sürücünün ayrılmasının yuvarlanması başarıyla tamamlandı." - -#, python-format -msgid "Running with vmemclient version: %s" -msgstr "vmemclient sürüm: %s ile çalışıyor" - -#, python-format -msgid "" -"ScaleIO copy_image_to_volume volume: %(vol)s image service: %(service)s " -"image id: %(id)s." -msgstr "" -"ScaleIO copy_image_to_volume mantıksal sürücü: %(vol)s imaj servisi: " -"%(service)s imaj id: %(id)s." - -#, python-format -msgid "" -"ScaleIO copy_volume_to_image volume: %(vol)s image service: %(service)s " -"image meta: %(meta)s." -msgstr "" -"ScaleIO copy_volume_to_image mantıksal sürücü: %(vol)s imaj servisi: " -"%(service)s imaj meta: %(meta)s." - -#, python-format -msgid "" -"ScaleIO create cloned volume: source volume %(src)s to target volume %(tgt)s." -msgstr "" -"ScaleIO klonlanmış mantıksal sürücü oluştur: %(src)s kaynak mantıksal " -"sürücüden %(tgt)s hedef mantıksal sürücüye." - -#, python-format -msgid "" -"ScaleIO create volume from snapshot: snapshot %(snapname)s to volume " -"%(volname)s." -msgstr "" -"ScaleIO anlık görüntüden: %(snapname)s %(volname)s mantıksal sürücüsüne " -"mantıksal sürücü oluştur." - -msgid "ScaleIO delete snapshot." -msgstr "ScaleIO anlık görüntüyü sil." - -#, python-format -msgid "ScaleIO extend volume: volume %(volname)s to size %(new_size)s." -msgstr "" -"ScaleIO mantıksal sürücü büyüt: %(volname)s mantıksal sürücüsünü " -"%(new_size)s boyutuna." - -#, python-format -msgid "ScaleIO get domain id by name request: %s." -msgstr "ScaleIO isimle alan id'si getirme isteği: %s." - -#, python-format -msgid "ScaleIO get pool id by name request: %s." -msgstr "ScaleIO isimle havuz id'si getirme isteği: %s." - -#, python-format -msgid "" -"Secondary ssh hosts key file %(kwargs)s will be loaded along with %(conf)s " -"from /etc/cinder.conf." -msgstr "" -"İkincil ssh istemci anahtarı %(kwargs)s /etc/cinder.conf'dan gelen %(conf)s " -"ile birlikte yüklenecek." - -msgid "Session might have expired. Trying to relogin" -msgstr "Oturum sona ermiş olabilir. Tekrar giriş deneniyor" - -#, python-format -msgid "Setting host %(host)s to %(state)s." -msgstr "%(host)s istemcisi %(state)s olarak ayarlanıyor." - -#, python-format -msgid "Setting snapshot %(snap)s to online_flag %(flag)s" -msgstr "Anlık görüntü %(snap)s online_flag %(flag)s olarak ayarlanıyor" - -#, python-format -msgid "Setting volume %(vol)s to online_flag %(flag)s" -msgstr "%(vol)s mantıksal sürücüsü online_flag %(flag)s olarak ayarlanıyor" - -#, python-format -msgid "Skipping deletion of volume %s as it does not exist." -msgstr "%s mantıksal sürücüsünün silinmesi atlanıyor çünkü mevcut değil." - -#, python-format -msgid "" -"Skipping remove_export. No iscsi_target is presently exported for volume: %s" -msgstr "" -"remove_export atlanıyor. Mantıksal sürücü: %s için hiçbiri iscsi_target şu " -"an dışa aktarılmamış" - -#, python-format -msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s" -msgstr "" -"remove_export atlanıyor. Mantıksal sürücü: %s için hiçbir iscsi_target " -"hazırlanmadı" - -#, python-format -msgid "Smb share %(share)s Total size %(size)s Total allocated %(allocated)s" -msgstr "" -"Smb paylaşımı %(share)s Toplam boyut %(size)s Toplam ayrılan %(allocated)s" - -msgid "Snapshot create request issued successfully." -msgstr "Anlık görüntü oluşturma isteği başarıyla yapıldı." - -#, python-format -msgid "" -"Snapshot creation %(cloneName)s completed. Source Volume: %(sourceName)s." -msgstr "" -"Anlık görüntü oluşturma %(cloneName)s tamamlandı. Kaynak Mantıksal sürücü: " -"%(sourceName)s." - -msgid "Snapshot delete request issued successfully." -msgstr "Anlık görüntü silme isteği başarıyla yapıldı." - -msgid "Snapshot force create request issued successfully." -msgstr "Anlık görüntü zorla oluşturma isteği başarıyla yapıldı." - -#, python-format -msgid "" -"Snapshot record for %s is not present, allowing snapshot_delete to proceed." -msgstr "" -"%s için anlık görüntü kaydı mevcut değil, devam etmek için " -"snapshot_delete'ye izin veriliyor." - -msgid "Snapshot retrieved successfully." -msgstr "Anlık görüntü başarıyla getirildi." - -#, python-format -msgid "Snapshot: %(snapshot)s: not found on the array." -msgstr "Anlık görüntü: %(snapshot)s: dizide bulunamadı." - -#, python-format -msgid "Source Snapshot: %s" -msgstr "Kaynak Anlık görüntü: %s" - -#, python-format -msgid "Start to create cgsnapshot for consistency group: %(group_name)s" -msgstr "Tutarlılık grubu için cgsnapshot oluşturmaya başla: %(group_name)s" - -#, python-format -msgid "Start to create consistency group: %(group_name)s id: %(id)s" -msgstr "Tutarlılık grubu oluşturmaya başla: %(group_name)s id: %(id)s" - -#, python-format -msgid "Start to delete consistency group: %(cg_name)s" -msgstr "Tutarlılık grubu silmeye başla: %(cg_name)s" - -#, python-format -msgid "Starting %(topic)s node (version %(version_string)s)" -msgstr "%(topic)s düğüm başlatılıyor (sürüm %(version_string)s)" - -#, python-format -msgid "Starting volume driver %(driver_name)s (%(version)s)" -msgstr "Mantıksal sürücü %(driver_name)s (%(version)s) başlatılıyor" - -#, python-format -msgid "Storage group not associated with the policy. Exception is %s." -msgstr "Depolama grubu ilke ile ilişkilendirilmemiş. İstisna %s." - -#, python-format -msgid "" -"Storage pools names: %(pools)s, storage pool name: %(pool)s, pool id: " -"%(pool_id)s." -msgstr "" -"Depolama havuzları isimleri: %(pools)s, depolama havuz ismi: %(pool)s, pool " -"id: %(pool_id)s." - -#, python-format -msgid "Successful login by user %s" -msgstr "%s kullanıcısı tarafından başarılı giriş" - -#, python-format -msgid "Successfully copied disk at: %(src)s to: %(dest)s." -msgstr "%(src)s konumundaki disk başarıyla %(dest)s konumuna kopyalandı." - -#, python-format -msgid "Successfully create volume %s" -msgstr "%s mantıksal sürücüsünü başarıyla oluştur" - -#, python-format -msgid "" -"Successfully created a CloudByte volume [%(cb_vol)s] w.r.t OpenStack volume " -"[%(stack_vol)s]." -msgstr "" -"Başarıyla bir CloudByte mantıksal sürücüsü [%(cb_vol)s] w.r.t OpenStack " -"mantıksal sürücüsü [%(stack_vol)s] oluşturuldu." - -#, python-format -msgid "Successfully created clone: %s." -msgstr "Klon başarıyla oluşturuldu: %s." - -#, python-format -msgid "" -"Successfully created snapshot: %(snap)s for volume backing: %(backing)s." -msgstr "" -"Anlık görüntü: %(snap)s başarıyla mantıksal sürücü desteği: %(backing)s için " -"oluşturuldu." - -#, python-format -msgid "Successfully created snapshot: %s." -msgstr "Anlık görüntü başarıyla oluşturuldu: %s." - -#, python-format -msgid "Successfully created volume backing: %s." -msgstr "Mantıksal sürücü desteği başarıyla oluşturuldu: %s." - -#, python-format -msgid "Successfully deleted file: %s." -msgstr "Dosya başarıyla silindi: %s." - -#, python-format -msgid "Successfully deleted snapshot: %(name)s of backing: %(backing)s." -msgstr "%(backing)s desteği için anlık görüntü: %(name)s başarıyla silindi." - -#, python-format -msgid "Successfully deleted snapshot: %s" -msgstr "Anlık görüntü başarıyla silindi: %s" - -#, python-format -msgid "Successfully deleted snapshot: %s." -msgstr "Anlık görüntü başarıyla silindi: %s." - -#, python-format -msgid "" -"Successfully deleted volume [%(cb_vol)s] at CloudByte corresponding to " -"OpenStack volume [%(stack_vol)s]." -msgstr "" -"[%(stack_vol)s] OpenStack mantıksal sürücüsüne karşılık gelen [%(cb_vol)s] " -"mantıksal sürücüsü CloudByte'da başarıyla silindi." - -#, python-format -msgid "Successfully deleted volume: %s" -msgstr "Mantıksal sürücü başarıyla silindi: %s" - -#, python-format -msgid "Successfully extended volume %(volume_id)s to size %(size)s." -msgstr "Mantıksal sürücü %(volume_id)s başarıyla %(size)s boyutuna büyütüldü." - -#, python-format -msgid "Successfully got volume information for volume %s" -msgstr "%s mantıksal sürücüsü için mantıksal sürücü bilgisi başarıyla alındı" - -#, python-format -msgid "Successfully initialized connection with volume: %(volume_id)s." -msgstr "Mantıksal sürücü: %(volume_id)s ile bağlantı başarıyla ilklendirildi." - -#, python-format -msgid "" -"Successfully initialized connection. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." -msgstr "" -"Bağlantı başarıyla ilklendirildi. target_wwn: %(target_wwn)s, " -"initiator_target_map: %(initiator_target_map)s, lun: %(target_lun)s." - -#, python-format -msgid "" -"Successfully moved volume backing: %(backing)s into the folder: %(fol)s." -msgstr "" -"Mantıksal sürücü desteği: %(backing)s başarıyla %(fol)s dizinine taşındı." - -#, python-format -msgid "" -"Successfully relocated volume backing: %(backing)s to datastore: %(ds)s and " -"resource pool: %(rp)s." -msgstr "" -"Mantıksal sürücü desteği: %(backing)s başarıyla veri deposu: %(ds)s e ve " -"kaynak havuzu: %(rp)s ye konumlandırıldı." - -msgid "Successfully retrieved InitiatorGrpList" -msgstr "InitiatorGrpList başarıyla alındı" - -#, python-format -msgid "Successfully setup driver: %(driver)s for server: %(ip)s." -msgstr "%(ip)s sunucusu için sürücü: %(driver)s başarıyla kuruldu." - -#, python-format -msgid "Successfully terminated connection for volume: %(volume_id)s." -msgstr "Mantıksal sürücü: %(volume_id)s için bağlantı başarıyla sonlandırıldı." - -#, python-format -msgid "" -"Successfully update volume stats. backend: %(volume_backend_name)s, vendor: " -"%(vendor_name)s, driver version: %(driver_version)s, storage protocol: " -"%(storage_protocol)s." -msgstr "" -"Mantıksal sürücü istatistikleri başarıyla güncellendi: " -"%(volume_backend_name)s, satıcı: %(vendor_name)s, sürücü sürümü: " -"%(driver_version)s, depolama iletişim kuralı: %(storage_protocol)s." - -#, python-format -msgid "System %(id)s has %(status)s status." -msgstr "Sistem %(id)s %(status)s durumuna sahip." - -#, python-format -msgid "" -"System with controller addresses [%s] is not registered with web service." -msgstr "Kontrol adreslerine [%s] sahip sistem web servisine kayıtlı değil." - -#, python-format -msgid "Target wwns in masking view %(maskingView)s: %(targetWwns)s." -msgstr "%(maskingView)s maskeleme görünümündeki hedef wwn'ler: %(targetWwns)s." - -#, python-format -msgid "Terminate connection: %(volume)s." -msgstr "Bağlantıyı sonlandır: %(volume)s." - -msgid "Terminate volume connection completed successfully." -msgstr "Mantıksal sürücü bağlantısını sonlandırma başarıyla tamamlandı." - -#, python-format -msgid "The QoS sepcs is: %s." -msgstr "QoS özelliği: %s." - -#, python-format -msgid "" -"The multi-attach E-Series host group '%(label)s' already exists with " -"clusterRef %(clusterRef)s" -msgstr "" -"Çoklu-ekleme E-Serisi istemci grubu '%(label)s' zaten clusterRef " -"%(clusterRef)s ile mevcut" - -#, python-format -msgid "The storage group found is %(foundStorageGroupInstanceName)s." -msgstr "Bulunan depolama grubu %(foundStorageGroupInstanceName)s." - -#, python-format -msgid "" -"The volume belongs to more than one storage group. Returning storage group " -"%(sgName)s." -msgstr "" -"Mantıksal sürücü birden fazla depolama grubuna ait. %(sgName)s depolama " -"grubu döndürülüyor." - -#, python-format -msgid "" -"There is no backing for the snapshotted volume: %(snap)s. Not creating any " -"backing for the volume: %(vol)s." -msgstr "" -"Anlık görüntülenen mantıksal sürücü: %(snap)s için destek yok. Mantıksal " -"sürücü: %(vol)s için deste oluşturulmuyor." - -#, python-format -msgid "" -"There is no backing for the source volume: %(src)s. Not creating any backing " -"for volume: %(vol)s." -msgstr "" -"Kaynak mantıksal sürücü: %(src)s için destek yok. Mantıksal sürücü: %(vol)s " -"için herhangi bir destek oluşturulmuyor." - -#, python-format -msgid "There is no backing for the volume: %s. Need to create one." -msgstr "Mantıksal sürücü: %s için destek yok. Bir tane oluşturmalı." - -#, python-format -msgid "There is no backing, and so there is no snapshot: %s." -msgstr "Destek yok, o yüzden anlık görüntü de yok: %s." - -#, python-format -msgid "There is no backing, so will not create snapshot: %s." -msgstr "Destek yok, yani anlık görüntü oluşturulmayacak: %s." - -#, python-format -msgid "" -"There is no snapshot point for the snapshotted volume: %(snap)s. Not " -"creating any backing for the volume: %(vol)s." -msgstr "" -"Anlık görüntülenen mantıksal sürücü: %(snap)s için anlık görüntü noktası " -"yok. Mantıksal sürücü: %(vol)s için destek oluşturulmuyor." - -msgid "Token is invalid, going to re-login and get a new one." -msgstr "Jeton geçersiz, tekrar giriş yapıp yeni bir tane alınacak." - -msgid "Transfer volume completed successfully." -msgstr "Mantıksal sürücü aktarımı başarıyla tamamlandı." - -#, python-format -msgid "Tried to delete non-existent vdisk %s." -msgstr "Mevcut olmayan vdisk %s silinmeye çalışıldı." - -#, python-format -msgid "" -"Tried to delete snapshot %s, but was not found in Datera cluster. Continuing " -"with delete." -msgstr "" -"%s anlık görüntüsü silinmeye çalışıldı, ama Datera kümesinde bulunamadı. " -"Silmeye devam ediliyor." - -#, python-format -msgid "" -"Tried to delete volume %s, but it was not found in the Datera cluster. " -"Continuing with delete." -msgstr "" -"%s mantıksal sürücüsü silinmeye çalışıldı, ama Datera kümesinde bulunamadı. " -"Silmeye devam ediliyor." - -#, python-format -msgid "Trying to unmap volume from all sdcs before deletion: %s." -msgstr "" -"Silmeden önce tüm sdc'lerden mantıksal sürücü eşleştirmesini kaldırmaya " -"çalışılıyor: %s." - -#, python-format -msgid "Unable to serialize field '%s' - excluding from backup" -msgstr "'%s' alanı serileştirilemiyor - yedekten çıkarılıyor" - -#, python-format -msgid "Unexporting lun %s." -msgstr "lun %s aktarımı geri alınıyor" - -#, python-format -msgid "Unmanage volume %(volume_id)s completed." -msgstr "%(volume_id)s yönetimini bırakma tamamlandı." - -#, python-format -msgid "Unmanage volume %s" -msgstr "Mantıksal sürücü %s'i yönetmeyi durdur" - -#, python-format -msgid "Unmanage volume with id: %s" -msgstr "Şu kimliğe sahip mantıksal sürücü yönetimini bırak: %s" - -#, python-format -msgid "Unmanaged LUN with current path %(path)s and uuid %(uuid)s." -msgstr "" -"%(path)s mevcut yolu ve %(uuid)s uuid'ine sahip LUN yönetimi bırakıldı." - -#, python-format -msgid "Unmanaged volume with current label %(label)s and wwn %(wwn)s." -msgstr "" -"%(label)s etiketli ve %(wwn)s wwn'li mantıksal sürücünün yönetimi bırakıldı." - -#, python-format -msgid "Unmap volume: %(volume)s." -msgstr "Mantıksal sürücü eşleştirmesini kes: %(volume)s." - -msgid "Unreserve volume completed successfully." -msgstr "Mantıksal sürücünün rezervinin kaldırılması başarıyla bitti." - -#, python-format -msgid "Update migrated volume %(new_volume)s completed." -msgstr "Göç etmiş mantıksal sürücü %(new_volume)s güncellemesi tamamlandı." - -msgid "Update readonly setting on volume completed successfully." -msgstr "" -"Mantıksal sürücü üstünde yalnızca okunabilir ayarın güncellenmesi başarıyla " -"tamamlandı." - -msgid "Update snapshot metadata completed successfully." -msgstr "Anlık görüntü metadata'sı güncelle başarıyla tamamlandı." - -msgid "Update volume admin metadata completed successfully." -msgstr "Mantıksal sürücü yönetici metadata'sını güncelle başarıyla tamamlandı." - -msgid "Update volume metadata completed successfully." -msgstr "Mantıksal sürücü metadata güncellemesi başarıyla tamamlandı." - -#, python-format -msgid "Updated Consistency Group %s" -msgstr "Tutarlılık Grubu %s güncellendi" - -#, python-format -msgid "" -"Updating consistency group %(id)s with name %(name)s description: " -"%(description)s add_volumes: %(add_volumes)s remove_volumes: " -"%(remove_volumes)s." -msgstr "" -"%(name)s isimli %(id)s tutarlılık grubu güncelleniyor tanım: %(description)s " -"add_volumes: %(add_volumes)s remove_volumes: %(remove_volumes)s." - -#, python-format -msgid "Updating snapshot %(id)s with info %(dict)s" -msgstr "%(id)s anlık görüntüsü %(dict)s bilgisi ile güncelleniyor" - -#, python-format -msgid "Updating storage service catalog information for backend '%s'" -msgstr "'%s' arka ucu için depolama servisi kataloğu bilgisi güncelleniyor" - -#, python-format -msgid "" -"Using FC Zone Manager %(zm_version)s, Driver %(drv_name)s %(drv_version)s." -msgstr "" -"FC Zone Manager %(zm_version)s, Sürücü %(drv_name)s %(drv_version)s " -"kullanılıyor." - -#, python-format -msgid "Using existing initiator group name: %(igGroupName)s." -msgstr "Mevcut başlatıcı grubu ismi kullanılıyor: %(igGroupName)s." - -#, python-format -msgid "Using overridden vmware_host_version from config: %s" -msgstr "" -"%s yapılandırmasından üzerine yazılmış vmware_host_version kullanılıyor" - -#, python-format -msgid "Using pool %(pool)s instead of %(cpg)s" -msgstr "%(cpg)s yerine %(pool)s havuzu kullanılıyor" - -#, python-format -msgid "Value with type=%s is not serializable" -msgstr "type=%s sahip değer serileştirilemez" - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is being retyped." -msgstr "Sanal mantıksal sürücü %(disp)s '%(new)s' retype ediliyor." - -#, python-format -msgid "Virtual volume %(disp)s '%(new)s' is now being managed." -msgstr "Sanal mantıksal sürücü %(disp)s '%(new)s' artık yönetiliyor." - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(new)s' snapCPG is empty so it will be set to: " -"%(cpg)s" -msgstr "" -"Sanal mantıksal sürücü %(disp)s '%(new)s' snapCPG boş, bu yüzden %(cpg)s " -"olarak ayarlanacak" - -#, python-format -msgid "" -"Virtual volume %(disp)s '%(vol)s' is no longer managed. Volume renamed to " -"'%(new)s'." -msgstr "" -"Sanal mantıksal sürücü %(disp)s '%(vol)s' artık yönetilmiyor. Mantıksal " -"sürücü '%(new)s' olarak yeniden adlandırıldı." - -#, python-format -msgid "Virtual volume %(disp)s successfully retyped to %(new_type)s." -msgstr "" -"Sanal mantıksal sürücü %(disp)s başarıyla %(new_type)s olarak retype edildi." - -#, python-format -msgid "Virtual volume '%(ref)s' renamed to '%(new)s'." -msgstr "Sanal mantıksal sürücü '%(ref)s' '%(new)s' olarak adlandırıldı." - -#, python-format -msgid "Vol copy job completed for dest %s." -msgstr "Mantıksal sürücü kopyalama işi %s hedefi için tamamlandı." - -#, python-format -msgid "Volume %(volume)s does not have meta device members." -msgstr "Mantıksal sürücü %(volume)s meta aygıt üyelerine sahip değil." - -#, python-format -msgid "" -"Volume %(volume)s is already mapped. The device number is %(deviceNumber)s." -msgstr "" -"Mantıksal sürücü %(volume)s zaten eşleştirilmiş. Aygıt numarası " -"%(deviceNumber)s." - -#, python-format -msgid "Volume %(volumeName)s not in any storage group." -msgstr "Mantıksal sürücü %(volumeName)s hiçbir depolama grubunda değil." - -#, python-format -msgid "" -"Volume %(volume_id)s: being created as %(create_type)s with specification: " -"%(volume_spec)s" -msgstr "" -"Mantıksal sürücü %(volume_id)s: %(volume_spec)s özelliğine sahip " -"%(create_type)s olarak oluşturuluyor" - -#, python-format -msgid "Volume %(volume_name)s (%(volume_id)s): created successfully" -msgstr "" -"Mantıksal sürücü %(volume_name)s (%(volume_id)s): başarıyla oluşturuldu" - -#, python-format -msgid "Volume %s has been transferred." -msgstr "Mantıksal sürücü %s aktarıldı." - -#, python-format -msgid "Volume %s is mapping to multiple hosts." -msgstr "Mantıksal sürücü %s birden fazla istemciyle eşleşiyor." - -#, python-format -msgid "Volume %s is not mapped. No volume to unmap." -msgstr "" -"Mantıksal sürücü %s eşleştirilmemiş. Eşleştirmesi kesilecek bir mantıksal " -"sürücü yok." - -#, python-format -msgid "Volume %s: retyped successfully" -msgstr "Mantıksal sürücü %s: retype başarılı" - -#, python-format -msgid "Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s" -msgstr "Mantıksal sürücü kopyalama %(size_in_m).2f MB %(mbps).2f MB/s hızında" - -msgid "Volume created successfully." -msgstr "Mantıksal sürücü başarıyla oluşturuldu." - -msgid "Volume info retrieved successfully." -msgstr "Mantıksal sürücü bilgisi başarıyla getirildi." - -msgid "Volume retrieved successfully." -msgstr "Mantıksal sürücü başarıyla alındı." - -#, python-format -msgid "Volume service: %(label)s. Casted to: %(loc)s" -msgstr "Mantıksal sürücü servisi: %(label)s. %(loc)s'a gönderildi" - -#, python-format -msgid "Volume status is: %s." -msgstr "Mantıksal sürücü durumu: %s." - -#, python-format -msgid "Volume type is %s." -msgstr "Mantıksal sürücü türü %s." - -#, python-format -msgid "" -"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, storage pool " -"id: %(pool_id)s, protection domain id: %(domain_id)s, protection domain " -"name: %(domain_name)s." -msgstr "" -"Mantıksal sürücü türü: %(volume_type)s, depolama havuzu ismi: %(pool_name)s, " -"depolama havuzu kimliği: %(pool_id)s, koruma alan kimliği: %(domain_id)s, " -"koruma alan ismi: %(domain_name)s." - -msgid "Volume updated successfully." -msgstr "Mantıksal sürücü başarıyla güncellendi." - -#, python-format -msgid "Volume with given ref %s need not be renamed during manage operation." -msgstr "" -"Verilen %s referanslı mantıksal sürücü yönetme işlemi süresinde yeniden " -"adlandırılmamalı." - -#, python-format -msgid "" -"Volume: %(vol_id)s, size: %(vol_size)d is larger than backup: %(backup_id)s, " -"size: %(backup_size)d, continuing with restore." -msgstr "" -"Mantıksal sürücü: %(vol_id)s, boyut: %(vol_size)d yedek: %(backup_id)s, " -"boyut: %(backup_size)d'den büyük, geri yüklemeyle devam ediliyor." - -#, python-format -msgid "WWPN on node %(node)s: %(wwpn)s." -msgstr "%(node)s düğümü üzerinde WWPN: %(wwpn)s." - -msgid "Waiting for web service array communication." -msgstr "Web servis dizisi iletişimi bekleniyor." - -#, python-format -msgid "XtremIO SW version %s" -msgstr "XtremIO SW sürümü %s" - -#, python-format -msgid "ZFSSA version: %s" -msgstr "ZFSSA sürümü: %s" - -#, python-format -msgid "Zone exists in I-T mode. Skipping zone creation %s" -msgstr "Bölge I-T kipinde mevcut. Bölge oluşturma %s atlanıyor" - -#, python-format -msgid "Zone map to add: %s" -msgstr "Eklenecek bölge haritası: %s" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Bölgeleme İlkesi: %s, tanınmıyor" - -#, python-format -msgid "Zoning policy for Fabric %s" -msgstr "Fabric %s için bölgeleme ilkesi" - -#, python-format -msgid "Zoning policy for fabric %s" -msgstr "Fabric %s için bölgeleme haritası" - -#, python-format -msgid "" -"_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk " -"copy operation: orig=%(orig)s new=%(new)s." -msgstr "" -"_check_volume_copy_ops: Mantıksal sürücü %(vol)s belirtilen vdisk kopyalama " -"işlemine sahip değil asıl=%(orig)s yeni=%(new)s." - -#, python-format -msgid "_get_tgt_iqn: iSCSI target iqn is: %s." -msgstr "_get_tgt_iqn: iSCSI hedef iqn: %s." - -#, python-format -msgid "casted to %s" -msgstr "%s'e gönderildi" - -#, python-format -msgid "cgsnapshot %s: created successfully" -msgstr "cgsnapshot %s: başarıyla oluşturuldu" - -#, python-format -msgid "cgsnapshot %s: deleted successfully" -msgstr "cgsnapshot %s: başarıyla silindi" - -#, python-format -msgid "cgsnapshot %s: deleting" -msgstr "cgsnapshot %s: siliniyor" - -#, python-format -msgid "igroup %(grp)s found for initiator %(iname)s" -msgstr "%(iname)s başlatıcısı için %(grp)s igroup bulundu" - -#, python-format -msgid "initialize_connection volume: %(volume)s, connector: %(connector)s" -msgstr "" -"initialize_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(connector)s" - -#, python-format -msgid "" -"migrate_volume_completion is cleaning up an error for volume %(vol1)s " -"(temporary volume %(vol2)s" -msgstr "" -"migrate_volume_completion %(vol1)s mantıksal sürücüsü için bir hatayı " -"temizliyor (geçici mantıksal sürücü %(vol2)s" - -#, python-format -msgid "new cloned volume: %s" -msgstr "yeni klonlanan mantıksal sürücü: %s" - -#, python-format -msgid "open_connection to %(ssn)s at %(ip)s" -msgstr "%(ip)s ye %(ssn)s open_connection" - -#, python-format -msgid "setting volume %s to error_restoring (was restoring-backup)." -msgstr "" -"%s mantıksal sürücüsü error_restoring olarak ayarlanıyor (yedek geri " -"yükleniyordu)." - -#, python-format -msgid "snapshot %s doesn't exist" -msgstr "anlık görüntü %s mevcut değil" - -#, python-format -msgid "source volume for cloning: %s" -msgstr "klon için kaynak mantıksal sürücü: %s" - -#, python-format -msgid "terminate_connection volume: %(volume)s, connector: %(con)s" -msgstr "terminate_connection mantıksal sürücü: %(volume)s, bağlayıcı: %(con)s" - -#, python-format -msgid "tunevv failed because the volume '%s' has snapshots." -msgstr "tunew başarısız çünkü '%s' mantıksal sürücüsü anlık görüntülere sahip." - -#, python-format -msgid "username: %(username)s, verify_cert: %(verify)s." -msgstr "kullanıcı adı: %(username)s, verify_cert: %(verify)s." - -#, python-format -msgid "vol=%s" -msgstr "birim=%s" - -#, python-format -msgid "vol_name=%(name)s provider_location=%(loc)s" -msgstr "vol_name=%(name)s provider_location=%(loc)s" - -#, python-format -msgid "volume %s doesn't exist" -msgstr "mantıksal sürücü %s mevcut değil" - -#, python-format -msgid "volume %s no longer exists in backend" -msgstr "mantıksal sürücü %s artık arka uçta mevcut değil" - -msgid "volume_file does not support fileno() so skipping fsync()" -msgstr "volume_file fileno() desteklemiyor bu yüzden fsync() atlanıyor" diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po deleted file mode 100644 index c7e40ff91..000000000 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder-log-warning.po +++ /dev/null @@ -1,1093 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# Ying Chun Guo , 2015 -# OpenStack Infra , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev161\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-05 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-08-26 03:31+0000\n" -"Last-Translator: openstackjenkins \n" -"Language: tr-TR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Turkish (Turkey)\n" - -#, python-format -msgid "%(path)s is being set with open permissions: %(perm)s" -msgstr "%(path)s açık izinlerle ayarlanıyor: %(perm)s" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s is already mounted" -msgstr "%s zaten bağlı" - -msgid "Attempted to delete a space that's not there." -msgstr "Orada olmayan bir alan silinmeye çalışıldı." - -#, python-format -msgid "" -"Attempting a rollback of: %(volumeName)s to original pool " -"%(sourcePoolInstanceName)s." -msgstr "" -"%(volumeName)s'in asıl havuz %(sourcePoolInstanceName)s'e geri alınması " -"deneniyor." - -msgid "Attempting recreate of backing lun..." -msgstr "Destekleyen lun tekrar oluşturulmaya çalışılıyor..." - -#, python-format -msgid "Availability zone '%s' is invalid" -msgstr "'%s' kullanılabilir bölgesi geçersiz" - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping verify." -msgstr "" -"Yedek servisi %(service)s doğrulamayı desteklemiyor. Yedek %(id)s " -"doğrulanmamış. Doğrulama atlanıyor." - -#, python-format -msgid "CHAP is enabled, but server secret not configured on server %s" -msgstr "CHAP etkin, ama %s sunucusu üzerinde sunucu gizi yapılandırılmamış" - -#, python-format -msgid "CHAP secret exists for host %s but CHAP is disabled" -msgstr "CHAP gizi %s istemcisi için mevcut ama CHAP kapalı" - -msgid "CHAP secret exists for host but CHAP is disabled." -msgstr "CHAP gizi istemci için mevcut ama CHAP kapatılmış." - -msgid "Can't find lun on the array." -msgstr "Dizide lun bulunamıyor." - -msgid "Can't find snapshot on the array." -msgstr "Dizide anlık görüntü bulunamıyor." - -msgid "Can't find target iqn from rest." -msgstr "Rest'den hedef iqn bulunamadı." - -msgid "Cannot determine the hardware type." -msgstr "Donanım türü algılanamadı." - -#, python-format -msgid "Cannot get volume status %(exc)s." -msgstr "Mantıksal sürücü durumu %(exc)s alınamıyor." - -#, python-format -msgid "" -"Cannot undo volume rename; old name was %(old_name)s and new name is " -"%(new_name)s." -msgstr "" -"Mantıksal sürücü yeniden adlandırma geri alınamaz; eski isim %(old_name)s " -"idi ve yeni isim %(new_name)s." - -#, python-format -msgid "Change will make usage less than 0 for the following resources: %s" -msgstr "Değişiklik, şu kaynaklar için kullanımı 0'ın altına düşürecek: %s" - -#, python-format -msgid "" -"Changing backing: %(backing)s name from %(new_name)s to %(old_name)s failed." -msgstr "" -"Destekleyici: %(backing)s ismi %(new_name)s'den %(old_name)s'e değişme " -"başarısız." - -#, python-format -msgid "" -"Clone failed on V3. Cleaning up the target volume. Clone name: %(cloneName)s " -msgstr "" -"V3 üzerinde kopya başarısız. Hedef mantıksal sürücü temizleniyor. Kopya " -"ismi: %(cloneName)s " - -#, python-format -msgid "Could not create target because it already exists for volume: %s" -msgstr "Hedef oluşturulamadı çünkü mantıksal sürücü: %s için zaten mevcut" - -#, python-format -msgid "Could not determine root volume name on %s." -msgstr "%s üzerinde kök mantıksal sürücü ismi belirlenemiyor." - -#, python-format -msgid "" -"CreateStorageHardwareID failed. initiator: %(initiator)s, rc=%(rc)d, ret=" -"%(ret)s." -msgstr "" -"CreateStorageHardwareID başarısız. başlatan: %(initiator)s, rc=%(rc)d, ret=" -"%(ret)s." - -#, python-format -msgid "Deadlock detected when running '%(func_name)s': Retrying..." -msgstr "" -"'%(func_name)s' çalıştırılırken ölükilit algılandı: Tekrar deneniyor..." - -#, python-format -msgid "Delete Snapshot id not found. Removing from cinder: %(id)s Ex: %(msg)s" -msgstr "" -"Anlık görüntü kimliği sil bulunamadı. Cinder'den kaldırılıyor: %(id)s " -"İstisna: %(msg)s" - -#, python-format -msgid "Delete volume id not found. Removing from cinder: %(id)s Ex: %(msg)s" -msgstr "" -"Mantıksal sürücü kimliği sil bulunamadı. Cinder'den kaldırılıyor: %(id)s " -"İstisna: %(msg)s" - -#, python-format -msgid "Deleting image in unexpected status: %(image_status)s." -msgstr "Beklenmedik durumdaki imaj siliniyor: %(image_status)s." - -#, python-format -msgid "Destination %s already exists." -msgstr "Hedef %s zaten mevcut." - -msgid "" -"Destination volume type is different from source volume type for an " -"encrypted volume. Encrypted backup restore has failed." -msgstr "" -"Şifreli bir mantıksal sürücü için hedef mantıksal sürücü türü kaynak " -"mantıksal sürücü türünden farklı. Şifreli yedeğin geri yüklenmesi başarısız." - -msgid "Detected snapshot stuck in creating status, setting to ERROR." -msgstr "" -"Oluşturma durumunda kalmış anlık görüntü algılandı, HATA olarak ayarlanıyor." - -msgid "Discover file retries exhausted." -msgstr "Dosya keşfi yeniden denemeleri tükendi." - -msgid "Driver didn't return connection info from terminate_connection call." -msgstr "Sürücü terminate_connection çağrısından bağlantı bilgisi döndürmedi." - -msgid "Driver didn't return connection info, can't add zone." -msgstr "Sürücü bağlantı bilgisi döndürmedi, bölge eklenemiyor." - -#, python-format -msgid "" -"Driver path %s is deprecated, update your configuration to the new path." -msgstr "" -"Sürücü yolu %s artık kullanılmıyor, yapılandırmanızı yeni yola göre " -"güncelleyin." - -#, python-format -msgid "Error finding LUNs for volume %s. Verify volume exists." -msgstr "" -"%s mantıksal sürücüsü için LUN bulmada hata. Mantıksal sürücünün varlığını " -"doğrula." - -#, python-format -msgid "" -"Error in filtering function '%(function)s' : '%(error)s' :: failing host" -msgstr "" -"'%(function)s' filtreleme fonksiyonunda hata : '%(error)s' :: failing host" - -#, python-format -msgid "" -"Error in goodness_function function '%(function)s' : '%(error)s' :: " -"Defaulting to a goodness of 0" -msgstr "" -"'%(function)s' goodness_function fonksiyonunda hata : '%(error)s' :: iyilik " -"0 olarak varsayılıyor" - -#, python-format -msgid "Error mapping LUN. Code :%(code)s, Message: %(message)s" -msgstr "LUN eşleştirmede hata. Kod:%(code)s, İleti: %(message)s" - -#, python-format -msgid "Error occurred while deleting backing: %s." -msgstr "Destekleyici silinirken hata oluştu: %s." - -#, python-format -msgid "Error occurred while deleting descriptor: %s." -msgstr "Tanımlayıcı silinirken hata oluştu: %s." - -#, python-format -msgid "Error occurred while deleting temporary disk: %s." -msgstr "Geçici disk silinirken hata oluştu: %s." - -#, python-format -msgid "Error refreshing volume info. Message: %s" -msgstr "Mantıksal sürücü bilgisi tazelenirken hata. İleti: %s" - -#, python-format -msgid "Error running SSH command: %s" -msgstr "SSH komutu çalıştırılırken hata: %s" - -#, python-format -msgid "Error unmapping LUN. Code :%(code)s, Message: %(message)s" -msgstr "LUN eşleştirmesi kaldırmada hata. Kod :%(code)s, İleti: %(message)s" - -#, python-format -msgid "Exception during cache cleaning %(share)s. Message - %(ex)s" -msgstr "%(share)s zula temizliği sırasında istisna. İleti - %(ex)s" - -#, python-format -msgid "Exception during deleting %s" -msgstr "%s silme sırasında istisna" - -#, python-format -msgid "Exception during mounting %s" -msgstr "%s bağlama sırasında olağandışı durum" - -#, python-format -msgid "Exception during unmounting %s" -msgstr "%s ayrılırken istisna" - -#, python-format -msgid "Exception moving file %(src)s. Message - %(e)s" -msgstr "%(src)s dosyası taşınırken istisna. İleti - %(e)s" - -#, python-format -msgid "Exception moving file %(src)s. Message: %(e)s" -msgstr "%(src)s dosyasının taşınması sırasında istisna. İleti: %(e)s" - -#, python-format -msgid "" -"Exception while creating image %(image_id)s snapshot. Exception: %(exc)s" -msgstr "" -"%(image_id)s imajı anlık görüntüsü oluşturulurken istisna. İstisna: %(exc)s" - -#, python-format -msgid "" -"Exception while registering image %(image_id)s in cache. Exception: %(exc)s" -msgstr "%(image_id)s imajı zulaya kaydedilirken istisna. İstisna: %(exc)s" - -#, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" -msgstr "" -"%(ext_name)s uzantısı: %(collection)s kaynağı genişletilemiyor: Böyle bir " -"kaynak yok" - -#, python-format -msgid "Extra spec %(old)s is deprecated. Use %(new)s instead." -msgstr "Ek özellik %(old)s artık kullanılmıyor. Yerine %(new)s kullanın." - -#, python-format -msgid "Extra spec %(old)s is obsolete. Use %(new)s instead." -msgstr "Ek özellik %(old)s artık kullanılmıyor. Yerine %(new)s kullanın." - -msgid "" -"Extra spec key 'storagetype:pool' is obsoleted since driver version 5.1.0. " -"This key will be ignored." -msgstr "" -"Ek özellik anahtarı 'storagetype:pool' sürücü sürümü 5.1.0'dan itibaren " -"kullanılmıyor. Bu anahtar atlanıyor." - -#, python-format -msgid "FAST is enabled. Policy: %(fastPolicyName)s." -msgstr "FAST etkin. İlke: %(fastPolicyName)s." - -#, python-format -msgid "" -"Failed target removal because target or ACL's couldn't be found for iqn: %s." -msgstr "Hedef silme başarısız veya iqn: %s için ACL'ler bulunamadı." - -#, python-format -msgid "" -"Failed terminating the connection of volume %(volume_id)s, but it is " -"acceptable." -msgstr "" -"%(volume_id)s mantıksal sürücüsü bağlantısının sonlandırılması başarısız, " -"ama bu kabul edilebilir." - -#, python-format -msgid "Failed to activate volume copy throttling: %(err)s" -msgstr "Mantıksal sürücü kopyalama daraltma etkinleştirilemedi: %(err)s" - -#, python-format -msgid "Failed to add host group: %s" -msgstr "İstemci grubu ekleme başarısız: %s" - -#, python-format -msgid "Failed to associate qos specs %(id)s with type: %(vol_type_id)s" -msgstr "%(vol_type_id)s türündeki %(id)s qos özellikleri ilişkilendirilemedi" - -#, python-format -msgid "Failed to create pair: %s" -msgstr "Çift oluşturma başarısız: %s" - -#, python-format -msgid "Failed to destroy Storage Group %s." -msgstr "Depolama Grubu %s silinemedi." - -#, python-format -msgid "Failed to disassociate qos specs %(id)s with type: %(vol_type_id)s" -msgstr "%(vol_type_id)s türündeki %(id)s qos özellikleri ilişkileri kesilemedi" - -#, python-format -msgid "Failed to disassociate qos specs %s." -msgstr "Qos özellikleri %s ilişkisi kesilemedi." - -#, python-format -msgid "Failed to discard zero page: %s" -msgstr "Sıfır sayfası atılamadı: %s" - -msgid "Failed to get Raid Snapshot ID and did not store in snapshot." -msgstr "" -"Raid Anlık Görüntü Kimliği alınamadı ve anlık görüntü içine kaydedilmedi." - -msgid "Failed to get target pool id." -msgstr "Hedef havuz kimliği alınamadı." - -#, python-format -msgid "Failed to invoke ems. Message : %s" -msgstr "ems başlatma başarısızı. İleti : %s" - -#, python-format -msgid "Failed to load extension %(classpath)s: %(exc)s" -msgstr "%(classpath)s uzantısı yüklemede hata: %(exc)s" - -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "%(ext_factory)s uzantısı yüklemede hata: %(exc)s" - -#, python-format -msgid "Failed to load extension %(ext_name)s: %(exc)s" -msgstr "%(ext_name)s eklentisi yüklenemedi: %(exc)s" - -#, python-format -msgid "Failed to manage virtual volume %(disp)s due to error during retype." -msgstr "" -"Retype sırasındaki hata sebebiyle %(disp)s sanal mantıksal sürücüsü " -"yönetilemedi." - -#, python-format -msgid "" -"Failed to migrate volume. The destination volume %(vol)s is not deleted " -"since the source volume may have been deleted." -msgstr "" -"Mantıksal sürücü göçü başarısız. Hedef mantıksal sürücü %(vol)s kaynak " -"mantıksal sürücü silinmiş olabileceğinden silinmiyor." - -#, python-format -msgid "" -"Failed to migrate: %(volumeName)s from default source storage group for FAST " -"policy: %(sourceFastPolicyName)s. Attempting cleanup... " -msgstr "" -"%(volumeName)s'in %(sourceFastPolicyName)s FAST ilkesi için varsayılan " -"kaynak depolama grubundan göçü başarısız. Temizlik deneniyor... " - -#, python-format -msgid "Failed to query pool %(id)s status %(ret)d." -msgstr "%(id)s havuzu sorgulanamadı durum %(ret)d." - -#, python-format -msgid "Failed to refresh mounts, reason=%s" -msgstr "Bağlar tazelenemedi, sebep=%s" - -#, python-format -msgid "Failed to restart horcm: %s" -msgstr "horcm yeniden başlatılamadı: %s" - -#, python-format -msgid "Failed to run command: %s." -msgstr "Komut çalıştırma başarısız: %s." - -#, python-format -msgid "" -"Failed to save iscsi LIO configuration when modifying volume id: %(vol_id)s." -msgstr "" -"Mantıksal sürücü: %(vol_id)s değiştirilirken iscsi LIO yapılandırması " -"kaydedilemedi." - -#, python-format -msgid "Failed to setup blkio cgroup to throttle the device '%(device)s'." -msgstr "'%(device)s' aygıtını daraltmak için blkio cgroup kurulumu başarısız." - -#, python-format -msgid "" -"Failed to update %(conf)s for volume id %(vol_id)s after removing iscsi " -"target. %(conf)s does not exist." -msgstr "" -"Iscsi hedefini kaldırdıktan sonra %(vol_id)s mantıksal sürücüsü kimliği için " -"%(conf)s güncellemesi başarısız. %(conf)s mevcut değil." - -#, python-format -msgid "Failure deleting job %s." -msgstr "%s işinin silinmesi başarısız." - -#, python-format -msgid "Failure deleting temp snapshot %s." -msgstr "Geçici anlık görüntü %s silinemedi." - -#, python-format -msgid "Failure deleting the snapshot %(snapshot_id)s of volume %(volume_id)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsünün %(snapshot_id)s anlık görüntüsünün " -"silinmesi başarısız." - -#, python-format -msgid "" -"Flexvisor failed to delete volume %(id)s from group %(vgid)s due to " -"%(status)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsünü %(vgid)s grubundan %(status)s " -"sebebiyle silemedi." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s from the group %(vgid)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(vgid)s grubundan silemedi." - -msgid "Goodness function not set :: defaulting to minimal goodness rating of 0" -msgstr "" -"İyilik fonksiyonu ayarlanmamış :: asgari iyilik değeri olan 0 varsayılıyor" - -#, python-format -msgid "Got disconnected; trying to reconnect. (%s)" -msgstr "Bağlantı kesildi; tekrar bağlanılmaya çalışılıyor. (%s)" - -#, python-format -msgid "" -"Group sync name not found for target group %(target)s on %(storageSystem)s." -msgstr "" -"%(storageSystem)s üzerindeki %(target)s hedef grubu için grup eş zamanlama " -"ismi bulunamadı." - -#, python-format -msgid "" -"Hint \"%s\" dropped because ExtendedServerAttributes not active in Nova." -msgstr "" -"ExtendedServerAttributes Nova'da etkin olmadığından \"%s\" ipucu düşürüldü." - -#, python-format -msgid "" -"Hint \"%s\" dropped because Nova did not return enough information. Either " -"Nova policy needs to be changed or a privileged account for Nova should be " -"specified in conf." -msgstr "" -"\"%s\" ipucu düşürüldü çünkü Nova yeterli bilgi döndürmedi. Nova ilkesinin " -"değiştirilmesi gerekiyor ya da yapılandırmada Nova için ayrıcalıklı bir " -"hesap belirtilmeli." - -msgid "" -"Host exists without CHAP credentials set and has iSCSI attachments but CHAP " -"is enabled. Updating host with new CHAP credentials." -msgstr "" -"İstemci CHAP kimlik bilgileri ayarlanmamış halde ve iSCSI eklentileri var " -"ama CHAP etkin. İstemci yeni CHAP kimlik bilgileriyle güncelleniyor." - -msgid "Host has no CHAP key, but CHAP is enabled." -msgstr "İstemcinin CHAP anahtarı yok, ama CHAP etkin." - -msgid "IQN already existed." -msgstr "IQN zaten mevcut." - -msgid "IQN has been used to create map." -msgstr "IQN eşleştirme oluşturmak için kullanılmış." - -msgid "ISCSI provider_location not stored, using discovery" -msgstr "ISCSI provider_location kaydedilmemiş, keşif kullanılıyor" - -msgid "Id not in sort_keys; is sort_keys unique?" -msgstr "Id sort_keys içinde değil, sort_keys benzersiz mi?" - -msgid "Image delete encountered an error." -msgstr "İmaj silme bir hatayla karşılaştı." - -msgid "Inconsistent Luns." -msgstr "Tutarsız Lun." - -#, python-format -msgid "" -"Incorrect value error: %(blocksize)s, it may indicate that " -"'volume_dd_blocksize' was configured incorrectly. Fall back to default." -msgstr "" -"Geçersiz değer hatası: %(blocksize)s, 'volume_dd_blocksize'nin yanlış " -"yapılandırıldığını gösteriyor olabilir. Varsayılana dön." - -#, python-format -msgid "" -"Insufficient free space for thin provisioning. The ratio of provisioned " -"capacity over total capacity %(provisioned_ratio).2f has exceeded the " -"maximum over subscription ratio %(oversub_ratio).2f on host %(host)s." -msgstr "" -"İnce hazırlığı için yetersiz boş alan. Hazırlık kapasitesinin toplam " -"kapasiteye oranı %(provisioned_ratio).2f %(host)s istemcisi üzerinde azami " -"aşım oranı %(oversub_ratio).2f değerini geçti." - -#, python-format -msgid "" -"Insufficient free space for volume creation on host %(host)s (requested / " -"avail): %(requested)s/%(available)s" -msgstr "" -"%(host)s istemcisi üzerinde mantıksal sürücü oluşturma için yetersiz boş " -"alan (istenen / kullanılabilir): %(requested)s/%(available)s" - -#, python-format -msgid "" -"Insufficient free space for volume creation. Total capacity is %(total).2f " -"on host %(host)s." -msgstr "" -"Mantıksal sürücü oluşturma için yetersiz boş alan. %(host)s istemcisi " -"üzerinde toplam kapasite %(total).2f." - -#, python-format -msgid "Invalid IP address format '%s'" -msgstr "Geçersiz IP adresi biçimi '%s'" - -#, python-format -msgid "" -"Invalid goodness result. Result must be between 0 and 100. Result " -"generated: '%s' :: Defaulting to a goodness of 0" -msgstr "" -"Geçersiz iyilik sonucu. Sonuç 0 ve 100 arasında olmalı. Üretilen sonuç: " -"'%s' :: İyilik değeri 0 olarak varsayılıyor" - -#, python-format -msgid "Invalid trace flag: %s" -msgstr "Geçersiz takip bayrağı: %s" - -msgid "" -"It is not the recommended way to use drivers by NetApp. Please use " -"NetAppDriver to achieve the functionality." -msgstr "" -"NetApp sürücüleri kullanmak önerilen bir yol değildir. Lütfen işlevselliğe " -"erişmek için NetAppDriver kullanın." - -#, python-format -msgid "" -"LUN misalignment may occur for current initiator group %(ig_nm)s) with host " -"OS type %(ig_os)s. Please configure initiator group manually according to " -"the type of the host OS." -msgstr "" -"%(ig_os)s istemci OS türlü %(ig_nm)s) başlatıcı grubu için LUN yanlış " -"hizalaması oluşabilir. Lütfen istemci OS türüne göre başlatıcı grubunu elle " -"ayarlayın." - -msgid "Least busy iSCSI port not found, using first iSCSI port in list." -msgstr "" -"En az meşgul iSCSI bağlantı noktası bulunamadı, listedeki ilk iSCSI bağlantı " -"noktası kullanılıyor." - -#, python-format -msgid "Message - %s." -msgstr "İleti - %s." - -#, python-format -msgid "More than one valid preset was detected, using %s" -msgstr "Birden fazla geçerli ön ayar algılandı, %s kullanılıyor" - -#, python-format -msgid "Mount point %(name)s already exists. Message: %(msg)s" -msgstr "Bağlantı noktası %(name)s zaten mevcut. İleti: %(msg)s" - -msgid "No VLUN contained CHAP credentials. Generating new CHAP key." -msgstr "" -"Hiçbir VLUN CHAP kimlik bilgileri içermiyor. Yeni CHAP anahtarı üretiliyor." - -#, python-format -msgid "No backing file found for %s, allowing snapshot to be deleted." -msgstr "" -"%s için destekleyen dosya bulunamadı, anlık görüntünün silinmesine izin " -"veriliyor." - -#, python-format -msgid "No entry in LUN table for volume/snapshot %(name)s." -msgstr "%(name)s mantıksal sürücü/anlık görüntü için LUN tablosunda girdi yok." - -msgid "No host or VLUNs exist. Generating new CHAP key." -msgstr "İstemci veya VLUN mevcut değil. Yeni CHAP anahtarı üretiliyor." - -msgid "No mapping." -msgstr "Eşleştirme yok." - -#, python-format -msgid "No port group found in masking view %(mv)s." -msgstr "%(mv)s maskeleme görünümünde bağlantı noktası grubu bulunamadı." - -msgid "No protection domain name or id was specified in configuration." -msgstr "Yapılandırmada herhangi bir koruma alan adı veya kimliği belirtilmedi." - -#, python-format -msgid "" -"No storage group found. Performing rollback on Volume: %(volumeName)s To " -"return it to the default storage group for FAST policy %(fastPolicyName)s." -msgstr "" -"Depolama grubu bulunamadı. %(fastPolicyName)s FAST ilkesi için varsayılan " -"depolama grubuna döndürmek için Mantıksal sürücü: %(volumeName)s üzerinde " -"geri döndürme yapılıyor." - -#, python-format -msgid "No storage pool found with available capacity %s." -msgstr "%s kullanılabilir kapasitesine sahip depolama havuzu bulunamadı." - -msgid "No storage pool name or id was found." -msgstr "Depolama havuzu ismi veya kimliği bulunamadı." - -msgid "No such host alias name." -msgstr "Böyle bir istemci rumuzu yok." - -#, python-format -msgid "No target ports found in masking view %(maskingView)s." -msgstr "" -"%(maskingView)s maskeleme görünümünde hedef bağlantı noktası bulunamadı." - -#, python-format -msgid "No weighed hosts found for volume with properties: %s" -msgstr "" -"Şu özelliklere sahip mantıksal sürücü için ağırlık verilmiş istemci " -"bulunamadı: %s" - -msgid "Non-iSCSI VLUN detected." -msgstr "iSCSI olmayan VLUN algılandı." - -#, python-format -msgid "Not deleting key %s" -msgstr "%s anahtarı silinmiyor" - -#, python-format -msgid "Persistence file already exists for volume, found file at: %s" -msgstr "" -"Kalıcılık dosyası mantıksal sürücü için zaten mevcut, dosya şurada bulundu: " -"%s" - -#, python-format -msgid "" -"Pre check for deletion. Volume: %(volumeName)s is part of a storage group. " -"Attempting removal from %(storageGroupInstanceNames)s." -msgstr "" -"Silme için ön kontrol. Mantıksal sürücü: %(volumeName)s bir depolama " -"grubunun parçası. %(storageGroupInstanceNames)s'den silme deneniyor." - -#, python-format -msgid "" -"Production use of \"%(backend)s\" backend requires the Cinder controller to " -"have multipathing properly set up and the configuration option \"%(mpflag)s" -"\" to be set to \"True\"." -msgstr "" -"\"%(backend)s\" in üretimde kullanılması Cinder kontrolcüsünün çokluyolunun " -"düzgün ayarlanması ve yapılandırma seçeneği \"%(mpflag)s\" in \"True\" " -"olarak ayarlanmasını gerektirir." - -#, python-format -msgid "" -"RBD image for backup %(backup)s of volume %(volume)s not found. Deleting " -"backup metadata." -msgstr "" -"%(volume)s mantıksal sürücüsü için %(backup)s yedeği için RBD imajı " -"bulunamadı. Metadata yedeği siliniyor." - -#, python-format -msgid "Rename failure in cleanup of cDOT QOS policy group %(name)s: %(ex)s" -msgstr "" -"cDOT QOS ilke grubu %(name)s temizliğinde yeniden adlandırma hatası: %(ex)s" - -#, python-format -msgid "" -"Report interval must be less than service down time. Current config " -"service_down_time: %(service_down_time)s, report_interval for this: service " -"is: %(report_interval)s. Setting global service_down_time to: " -"%(new_down_time)s" -msgstr "" -"Rapor aralığı servisin kapalı kaldığı süreden küçük olmalı. Mevcut " -"service_down_time yapılandırması: %(service_down_time)s, bu servis için " -"report_interval: %(report_interval)s. Genel service_down_time: " -"%(new_down_time)s olarak ayarlanıyor" - -msgid "Requested image is not accessible by current Tenant." -msgstr "İstenen imaj mevcut Kiracı tarafından erişilebilir değil." - -msgid "Returning as clean tmp vol job already running." -msgstr "Geçici mantıksal sürücü temizleme işi hala çalıştığından dönülüyor." - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export." -msgstr "" -"%s paylaşımı geçersiz biçim nedeniyle yoksayılır. Form adresi olmalıdır:/" -"export." - -msgid "Silent failure of target removal detected, retry...." -msgstr "Hedef silmenin sessizce başarısız olduğu algılandı, tekrar dene..." - -#, python-format -msgid "Snapshot %(name)s already exists. Message: %(msg)s" -msgstr "Anlık görüntü %(name)s zaten mevcut. İleti: %(msg)s" - -#, python-format -msgid "Snapshot %s already deleted." -msgstr "Anlık görüntü %s zaten silinmiş." - -#, python-format -msgid "Snapshot still %(status)s Cannot delete snapshot." -msgstr "Anlık görüntü hala %(status)s Anlık görüntü silinemiyor." - -#, python-format -msgid "Storage group %(name)s already exists. Message: %(msg)s" -msgstr "Depolama grubu %(name)s zaten mevcut. İleti: %(msg)s" - -#, python-format -msgid "Storage sync name not found for target %(target)s on %(storageSystem)s." -msgstr "" -"%(storageSystem)s üzerindeki %(target)s hedefi için depolama eş zamanlama " -"ismi bulunamadı." - -msgid "Storage-assisted migration failed during retype." -msgstr "Depolama-destekli göç retype sırasında başarısız oldu." - -msgid "The MCS Channel is grouped." -msgstr "MCS Kanalı gruplandırılmış." - -#, python-format -msgid "" -"The NAS file operations will be run as root: allowing root level access at " -"the storage backend. This is considered an insecure NAS environment. Please " -"see %s for information on a secure NAS configuration." -msgstr "" -"NAS dosya işlemleri root olarak çalıştırılacak: depolama arka ucunda root " -"düzeyinde erişime izin verilecek. Bu güvensiz bir NAS ortamı olarak kabul " -"edilir. Güvenli bir NAS yapılandırması için lütfen %s'e göz atın." - -#, python-format -msgid "" -"The NAS file permissions mode will be 666 (allowing other/world read & write " -"access). This is considered an insecure NAS environment. Please see %s for " -"information on a secure NFS configuration." -msgstr "" -"NAS dosya izinleri kipi 666 olacak (diğer/dünya'ya okuma&yazma izni verir). " -"Bu güvensiz bir NAS ortamı olarak kabul edilir. Güvenli bir NFS " -"yapılandırması için lütfen %s'e göz atın." - -msgid "" -"The VMAX plugin only supports Retype. If a pool based migration is necessary " -"this will happen on a Retype From the command line: cinder --os-volume-api-" -"version 2 retype --migration-policy on-demand" -msgstr "" -"VMAX eklentisi yalnızca Retype destekler. Eğer havuz tabanlı göç gerekliyse " -"bu komut satırından bir Retype üzerinde gerçekleşir: cinder --os-volume-api-" -"version 2 retype --migration-policy on-demand" - -#, python-format -msgid "The provisioning: %(provisioning)s is not valid." -msgstr "Hazırlık: %(provisioning)s geçerli değil." - -#, python-format -msgid "" -"The volume: %(volumename)s was not first part of the default storage group " -"for FAST policy %(fastPolicyName)s." -msgstr "" -"Mantıksal sürücü: %(volumename)s FAST ilkesi %(fastPolicyName)s için " -"varsayılan depolama grubunun ilk bölümü değildi." - -#, python-format -msgid "" -"The volume: %(volumename)s. was not first part of the default storage group " -"for FAST policy %(fastPolicyName)s." -msgstr "" -"Mantıksal sürücü: %(volumename)s. %(fastPolicyName)s FAST ilkesi için " -"varsayılan depolama grubunun ilk bölümü değildi." - -#, python-format -msgid "" -"There are no datastores matching new requirements; can't retype volume: %s." -msgstr "" -"Yeni gereksinimlerle eşleşen veri deposu yok; mantıksal sürücü retype " -"edilemiyor: %s." - -#, python-format -msgid "Trying to boot from an empty volume: %s." -msgstr "Boş bir mantıksal sürücüden ön yükleme yapılmaya çalışılıyor: %s." - -#, python-format -msgid "Unable to create folder %s" -msgstr "%s dizini oluşturulamadı" - -#, python-format -msgid "Unable to create snapshot %s" -msgstr "%s anlık görüntüsü oluşturulamadı" - -#, python-format -msgid "Unable to delete Protection Group Snapshot: %s" -msgstr "Koruma Grubu Anlık Görüntüsü silinemiyor: %s" - -#, python-format -msgid "Unable to delete Protection Group: %s" -msgstr "Koruma Grubu silinemiyor: %s" - -#, python-format -msgid "Unable to delete space %(space)s" -msgstr "%(space)s alanı silinemedi" - -#, python-format -msgid "Unable to find Masking view: %(view)s." -msgstr "Maskeleme görünümü bulunamadı: %(view)s." - -#, python-format -msgid "Unable to find snapshot %s" -msgstr "%s anlık görüntüsü bulunamadı" - -msgid "Unable to get rados pool stats." -msgstr "Rados havuz istatistikleri alınamıyor." - -msgid "Unable to get storage tiers from tier policy rule." -msgstr "Depolama aşamaları aşama ilke kuralından alınamıyor." - -#, python-format -msgid "Unable to locate volume:%s" -msgstr "Mantıksal sürücü:%s bulunamadı" - -msgid "Unable to poll cluster free space." -msgstr "Küme boş alanı çekilemedi." - -#, python-format -msgid "Unable to update host type for host with label %(l)s. %(e)s" -msgstr "%(l)s etiketli istemci için istemci türü güncellenemiyor. %(e)s" - -#, python-format -msgid "Unable to update stats on non-initialized Volume Group: %s" -msgstr "" -"İlklendirilmemiş Mantıksal Sürücü Grubu üzerinde istatistikler " -"güncellenemedi: %s" - -#, python-format -msgid "Unexpected exception during image cloning in share %s" -msgstr "%s paylaşımında imaj kopyalanırken beklenmedik istisna" - -msgid "Unexpected exception while listing used share." -msgstr "Kullanılan paylaşım listelenirken beklenmedik istisna." - -msgid "Unexpected exception while short listing used share." -msgstr "Kullanılan paylaşım kısaca listelenirken beklenmedik istisna." - -#, python-format -msgid "Update driver status failed: %(config_group)s is uninitialized." -msgstr "Sürücü durumu güncelleme başarısız: %(config_group)s ilklendirilmemiş." - -msgid "Use of empty request context is deprecated" -msgstr "Boş istek içeriği kullanımı önerilmiyor" - -msgid "Verify certificate is not set, using default of False." -msgstr "" -"Sertifika doğrulama ayarlanmamış, varsayılan değer olan False kullanılıyor." - -#, python-format -msgid "Volume %(volume)s is not in any masking view." -msgstr "Mantıksal sürücü %(volume)s herhangi bir maskeleme görünümünde değil." - -#, python-format -msgid "" -"Volume %(volumeName)s was not first part of the default storage group for " -"the FAST Policy." -msgstr "" -"Mantıksal sürücü %(volumeName)s FAST İlkesi için varsayılan depolama " -"grubunun ilk bölümü değildi." - -#, python-format -msgid "Volume %(volume_id)s already deleted." -msgstr "Mantıksal sürücü %(volume_id)s zaten silinmiş." - -#, python-format -msgid "Volume %(volume_id)s cannot be retyped because it has snapshot." -msgstr "" -"Mantıksal sürücü %(volume_id)s retype edilemez çünkü anlık görüntüsü var." - -#, python-format -msgid "Volume %(volume_id)s cannot be retyped during attachment." -msgstr "Mantıksal sürücü %(volume_id)s ekleme sırasında retype edilemez." - -#, python-format -msgid "Volume %s does not exist." -msgstr "Mantıksal sürücü %s mevcut değil." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping" -msgstr "Mantıksal sürücü %s provider_location belirtmemiş, atlanıyor" - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "Mantıksal sürücü %s provider_location belirtmemiş, atlanıyor." - -#, python-format -msgid "Volume %s is not found!, it may have been deleted." -msgstr "Mantıksal sürücü %s bulunamadı!, silinmiş olabilir." - -#, python-format -msgid "Volume %s was not found while trying to delete it." -msgstr "Mantıksal sürücü %s silinmeye çalışılırken bulunamadı." - -#, python-format -msgid "" -"Volume : %(volumeName)s is not currently belonging to any storage group." -msgstr "" -"Mantıksal sürücü : %(volumeName)s şu an herhangi bir depolama grubuna ait " -"değil." - -#, python-format -msgid "Volume copy job for src vol %s not found." -msgstr "" -"%s kaynak mantıksal sürücüsü için mantıksal sürücü kopyalama işi bulunamadı." - -#, python-format -msgid "Volume deletion failed with message: %s" -msgstr "Mantıksal sürücü silme şu iletiyle başarısız oldu: %s" - -#, python-format -msgid "Volume is not writable. Please broaden the file permissions. Mount: %s" -msgstr "" -"Mantıksal sürücü yazılabilir değil. Lütfen dosya izinlerinin kapsamını " -"artırın. Bağlama yeri: %s" - -#, python-format -msgid "Volume path %s does not exist, nothing to remove." -msgstr "%s mantıksal sürücü yolu mevcut değil, kaldırılacak bir şey yok." - -msgid "Volume refresh job already running. Returning..." -msgstr "Mantıksal sürücü tazeleme işi zaten çalışıyor. Dönülüyor..." - -#, python-format -msgid "Volume still %(status)s Cannot delete volume." -msgstr "Mantıksal sürücü hala %(status)s Mantıksal sürücü silinemez." - -msgid "Volume type will be changed to be the same as the source volume." -msgstr "" -"Mantıksal sürücü türü kaynak mantıksal sürücüyle aynı olacak şekilde " -"değiştirilecek." - -#, python-format -msgid "" -"Volume: %(volumeName)s Does not belong to storage group %(defaultSgName)s." -msgstr "" -"Mantıksal sürücü: %(volumeName)s %(defaultSgName)s depolama grubuna ait " -"değil." - -#, python-format -msgid "" -"Volume: %(volumeName)s is already part of storage group %(sgGroupName)s." -msgstr "" -"Mantıksal sürücü: %(volumeName)s zaten %(sgGroupName)s depolama grubunun " -"parçası." - -#, python-format -msgid "Volume: %(volumeName)s is not currently belonging to any storage group." -msgstr "" -"Mantıksal sürücü: %(volumeName)s şu an herhangi bir depolama grubuna ait " -"değil." - -#, python-format -msgid "Volume: %s is in use, can't retype." -msgstr "Mantıksal sürücü: %s hala kullanımda, retype yapılamaz." - -#, python-format -msgid "_get_vdisk_map_properties: Did not find a preferred node for vdisk %s." -msgstr "" -"_get_vdisk_map_properties: %s vdisk'i için tercih edilen bir düğüm " -"bulunamadı." - -#, python-format -msgid "_migrate_cleanup on : %(volumeName)s." -msgstr "%(volumeName)s üzerinde _migrate_cleanup." - -#, python-format -msgid "_migrate_rollback on : %(volumeName)s." -msgstr "%(volumeName)s üzerinde _migrate_rollback." - -msgid "_remove_device: invalid properties or device." -msgstr "_remove_device: geçersiz özellik ya da aygıt." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: Multiple mappings of volume %(vdisk_name)s found, no " -"host specified." -msgstr "" -"_unmap_vdisk_from_host: %(vdisk_name)s mantıksal sürücüsünün birden çok " -"eşleşmesi bulundu, istemci belirtilmedi." - -#, python-format -msgid "" -"_unmap_vdisk_from_host: No mapping of volume %(vol_name)s to any host found." -msgstr "" -"_unmap_vdisk_from_host: %(vol_name)s mantıksal sürücüsünün hiçbir istemciye " -"eşleşmesi bulunamadı." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() librbd'nin bu sürümünde desteklenmiyor" - -#, python-format -msgid "initialize_connection: Did not find a preferred node for volume %s." -msgstr "" -"initialize_connection: %s mantıksal sürücüsü için tercih edilen düğüm " -"bulunamadı." - -#, python-format -msgid "ldev(%(ldev)d) is already mapped (hlun: %(hlu)d)" -msgstr "ldev(%(ldev)d) zaten eşleştirilmiş (hlun: %(hlu)d)" - -#, python-format -msgid "object %(key)s of type %(typ)s not found, %(err_msg)s" -msgstr "%(typ)s türündeki %(key)s nesnesi bulunamadı, %(err_msg)s" - -msgid "qemu-img is not installed." -msgstr "qemu-img kurulu değil." - -#, python-format -msgid "snapshot: %s not found, skipping delete operation" -msgstr "anlık görüntü: %s bulunamadı, silme işlemi atlanıyor" - -#, python-format -msgid "snapshot: %s not found, skipping delete operations" -msgstr "anlık görüntü: %s bulunamadı, silme işlemleri atlanıyor" - -msgid "terminate_connection: lun map not found" -msgstr "terminate_connection: lun eşleştirmesi bulunamadı" - -#, python-format -msgid "" -"unmap_vol_from_host: Multiple mappings of volume %(vol_name)s found, no host " -"specified." -msgstr "" -"unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünün birden fazla " -"eşleşmesi var, istemci belirtilmedi." - -#, python-format -msgid "" -"unmap_vol_from_host: No mapping of volume %(vol_name)s to any host found." -msgstr "" -"unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünün hiçbir istemciye " -"eşleşmesi bulunamadı." - -#, python-format -msgid "" -"unmap_vol_from_host: No mapping of volume %(vol_name)s to host %(host)s " -"found." -msgstr "" -"unmap_vol_from_host: %(vol_name)s mantıksal sürücüsünden %(host)s " -"istemcisine eşleştirme bulunamadı." - -#, python-format -msgid "volume service is down. (host: %s)" -msgstr "mantıksal sürücü servisi çalışmıyor. (host: %s)" - -msgid "volume_tmp_dir is now deprecated, please use image_conversion_dir." -msgstr "" -"volume_tmp_dir artık kullanılmıyor, lütfen image_conversion_dir kullanın." - -#, python-format -msgid "warning: Tried to delete vdisk %s but it does not exist." -msgstr "uyarı: vdisk %s silinmeye çalışıldı ama mevcut değil." - -#, python-format -msgid "" -"zfssa_initiator: %(ini)s wont be used on zfssa_initiator_group= %(inigrp)s." -msgstr "" -"zfssa_initiator: %(ini)s zfssa_initiator_group= %(inigrp)s üzerinde " -"kullanılmayacak." - -msgid "" -"zfssa_initiator_config not found. Using deprecated configuration options." -msgstr "" -"zfssa_initiator_config bulunamadı. Artık kullanılmayan yapılandırma " -"seçenekleri kullanılıyor." diff --git a/cinder/locale/tr_TR/LC_MESSAGES/cinder.po b/cinder/locale/tr_TR/LC_MESSAGES/cinder.po deleted file mode 100644 index 257594cd3..000000000 --- a/cinder/locale/tr_TR/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,6491 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# OpenStack Infra , 2015. #zanata -# evgin , 2015. #zanata -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2015-10-30 07:23+0000\n" -"Last-Translator: evgin \n" -"Language: tr-TR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Turkish (Turkey)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder sürümü: %(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr " ancak boyut şu anda %d" - -#, python-format -msgid " but size is now %d." -msgstr " ama şimdiki boyut %d." - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing istemcilere bağlı bir mantıksal sürücü " -"yönetilemez. Lütfen içe aktarmadan önce bu mantıksal sürücüyü mevcut " -"istemcilerden ayırın" - -#, python-format -msgid "%(err)s" -msgstr "Hatalar: %(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"sonuç: %(res)s." - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "%(exception)s: %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s: İzin reddedildi." - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s: Beklenmedik CLI çıktısı ile başarısız oldu.\n" -" Komut: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"Durum Kodu: %(_status)s\n" -"Gövde: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s, subjectAltName: %(sanList)s." - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s: NetworkPortal oluşturuluyor: %(ip)s ip üzerinde %(port)d " -"bağlantı noktasının başka bir servis tarafından kullanılmadığından emin olun." - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s: mantıksal sürücü %(vol_id)s yedekleme %(bck_id)s başarısız oldu. " -"Yedekleme nesnesi beklenmeyen kipe sahip. İmaj ya da dosya yedeklemeleri " -"destekleniyor, gerçek kip %(vol_mode)s." - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "%(service)s Servisi depolama aygıtında %(status)s değil: %(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s <= %(max_value)d olmalı" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s >= %(min_value)d olmalı" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "" -"%(workers)d'ın %(worker_name)s değeri geçersiz, 0'dan daha büyük olmalıdır." - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "" -"%s erişilebilir değil. GPFS'in etkin olduğunu ve dosya sisteminin bağlı " -"olduğunu doğrulayın." - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "" -"%s klon işlemi kullanılarak yeniden boyutlandırılamaz çünkü blok içermiyor." - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "" -"%s klon işlemi kullanılarak yeniden boyutlandırılamaz çünkü sıkıştırılmış " -"bir mantıksal sürücü üzerinde" - -#, python-format -msgid "%s configuration option is not set." -msgstr "%s yapılandırma seçeneği ayarlı değil." - -#, python-format -msgid "%s is not a directory." -msgstr "%s bir dizin değil." - -#, python-format -msgid "%s is not installed" -msgstr "%s kurulu değil" - -#, python-format -msgid "%s is not installed." -msgstr "%s kurulu değil." - -#, python-format -msgid "%s is not set" -msgstr "%s ayarlanmamış" - -#, python-format -msgid "%s is not set." -msgstr "%s ayarlı değil." - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s geçerli bir raw ya da qcow2 imajı olmalıdır." - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s mutlak yol olmalı." - -#, python-format -msgid "%s must be an integer." -msgstr "%s bir tam sayı olmalıdır." - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s cinder.conf'da ayarlanmamış" - -#, python-format -msgid "%s not set." -msgstr "%s ayarlanmamış." - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"'%(prot)s' yapılandırma dosyasında flashsystem_connection_protocol için " -"geçersiz. geçerli değer(ler) %(enabled)s." - -msgid "'active' must be present when writing snap_info." -msgstr "snap_info yazılırken 'active' olması gerekir." - -msgid "'consistencygroup_id' must be specified" -msgstr "'consistencygroup_id' belirtilmiş olmalıdır" - -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info' ayrıştırması başarısız oldu." - -msgid "'status' must be specified." -msgstr "'status' belirtilmelidir." - -msgid "'volume_id' must be specified" -msgstr "'volume_id' belirtilmelidir" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "Bir LUN (HLUN) bulunamadı. (LDEV: %(ldev)s)" - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "" -"Boş bir LUN (HLUN) bulunamadı. Farklı bir istemci grubu ekleyin. (LDEV: " -"%(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "" -"Bir istemci grubu eklenemedi. (bağlantı noktası: %(port)s, isim: %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "" -"Bir istemci grubu silinemedi. (bağlantı noktası: %(port)s, gid: %(gid)s, " -"isim: %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "Bir istemci grubu geçersiz. (istemci grubu: %(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "Bir çift silinemedi. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"Bir çift oluşturulamadı. Azami çift sayısı aşıldı. (kopyalama yöntemi: " -"%(copy_method)s, P-VOL: %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "Bir parametre geçersiz. (%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "Bir parametre değeri geçersiz. (%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "Bir havuz bulunamadı. (havuz kimliğİ: %(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Bir anlık görüntü durumu geçersiz. (durum: %(status)s)" - -msgid "A volume ID or share was not specified." -msgstr "Bir mantıksal sürücü ID ya da paylaşım belirtilmemiş." - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "Bir mantıksal sürücünün durumu geçersiz. (durum: %(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s %(err)s hatası ile başarısız oldu" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"API Sürüm Karakter Dizisi %(version)s geçersiz bir biçimde. MajorNum." -"MinorNum biçiminde olmalı." - -msgid "API key is missing for CloudByte driver." -msgstr "CloudByte sürücüsü için API anahtarı eksik." - -#, python-format -msgid "API response: %s" -msgstr "API yanıtı: %s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "API sürümü %(version)s bu metodda desteklenmiyor." - -msgid "API version could not be determined." -msgstr "API sürümü belirlenemedi." - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "" -"Kotası boş olmayan bir çocuk projeyi silmeye calisiyorsunuz. Bu işlem " -"gerçekleştirilemez." - -msgid "Access list not available for public volume types." -msgstr "" -"Ortak mantıksal sürücü türleri için erişim listesi kullanılabilir değil." - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "" -"Belirtilen yönetilecek depolama havuzlarından hepsi mevcut değil. Lütfen " -"yapılandırmanızı kontrol edin. Mevcut olmayan havuzlar: %s" - -msgid "An error has occurred during backup operation" -msgstr "Yedekleme işlemi sırasında bir hata oluştu" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"LUNcopy işlemi sırasında bir hata oluştu. LUNcopy ismi: %(luncopyname)s. " -"LUNcopy durumu: %(luncopystatus)s. LUNcopy durumu: %(luncopystate)s." - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "Bir iSCSI CHAP kullanıcısı eklenemedi. (kullanıcı adı: %(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "Bir iSCSI CHAP kullanıcısı silinemedi. (kullanıcı adı: %(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"Bir iSCSI hedefi eklenemedi. (bağlantı noktası: %(port)s, rumuz: %(alias)s, " -"sebep: %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"Bir iSCSI hedefi silinemedi. (bağlanıt noktası: %(port)s, tno: %(tno)s, " -"rumuz: %(alias)s)" - -msgid "An unknown exception occurred." -msgstr "Bilinmeyen bir istisna oluştu." - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "Dizi mevcut değil ya da çevrim dışı. Dizinin mevcut durumu %s." - -msgid "At least one valid iSCSI IP address must be set." -msgstr "Geçerli en az bir iSCSI IP adresi ayarlamalıdır." - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "Geçersiz kimlik doğrulama anahtarı ile %s aktarımı dene." - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "" -"CloudByte depolamasında kimlik doğrulama grubu [%s] ayrıntıları bulunamadı." - -msgid "Auth user details not found in CloudByte storage." -msgstr "" -"CloudByte depolamada kimlik doğrulama kullanıcı ayrıntıları bulunamadı." - -msgid "Available categories:" -msgstr "Kullanılabilir kategoriler:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "" -"Arka-uç QoS özellikleri bu depolama ailesi ve ONTAP sürümünde desteklenmiyor." - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "Art alanda çalışan uygulama (%(backend)s) yok" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "Art alanda çalışan uygulama raporları: %(message)s" - -msgid "Backend reports: item already exists" -msgstr "Art alanda çalışan uygulama raporları: öge zaten mevcut" - -msgid "Backend reports: item not found" -msgstr "Art alanda çalışan uygulama raporları: öge bulunamadı" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "Arka uç servis yeniden deneme zaman aşımına erişildi: %(timeout)s sn" - -msgid "Backend storage did not configure fiber channel target." -msgstr "Arka uç depolama fiber kanal hedefini yapılandırmadı." - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "%(backup_id)s yedekleme bulunamadı." - -msgid "Backup RBD operation failed" -msgstr "RBD Yedekleme işlemi başarısız oldu" - -msgid "Backup already exists in database." -msgstr "Yedek veritabanında zaten mevcut." - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "Yedekleme sürücüsü bir hata bildirdi: %(message)s" - -msgid "Backup id required" -msgstr "Yedekleme kimliği gereklidir" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "" -"Yedekleme anlık sistem görüntüleri ile GlusterFS mantıksal sürücüsü için " -"desteklenmiyor." - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "" -"Yedekleme sadece destek dosyası olmayan SOFS mantıksal sürücüleri için " -"desteklenir." - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "" -"Yedekleme sadece raw-biçimli GlusterFS mantıksal sürücüleri için desteklenir." - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "" -"Yedekleme sadece raw-biçimli SOFS mantıksal sürücüleri için desteklenir." - -msgid "Backup operation of an encrypted volume failed." -msgstr "Şifreli mantıksal sürücünün yedekleme işlemi başarısız oldu." - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"Yedekleme servisi %(configured_service)s doğrulamayı desteklemiyor. Yedek " -"kimliği %(id)s doğrulanmadı. Doğrulama atlanıyor." - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "" -"Yedekleme servisi %(service)s doğrulamayı desteklemiyor. Yedek kimliği " -"%(id)s doğrulanmadı. Sıfırlama atlanıyor." - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "" -"Yedekleme %s yerine sadece bir anlık sistem görüntüsüne sahip olmalıdır" - -msgid "Backup status must be available" -msgstr "Yedek durumu kullanılabilir olmalıdır" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "Yedekleme durumu kullanılabilir olmalı ve %s olmamalıdır." - -msgid "Backup status must be available or error" -msgstr "Yedek durumu kullanılabilir ya da hatalı olmalıdır" - -msgid "Backup to be restored has invalid size" -msgstr "Geri yüklenecek yedek geçersiz boyuta sahip" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "Bozuk Durum satırı döndürüldü: %(arg)s." - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "Kota kümesinde bozuk anahtar(lar): %s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "" -"Depolama mantıksal sürücü art alanda çalışan uygulama API'sinden bozuk ya da " -"beklenmeyen yanıt: %(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "Bozuk proje biçimi: proje (%s) doğru biçiminde değil" - -msgid "Bad response from Datera API" -msgstr "Datera API'sinden bozuk yanıt" - -msgid "Bad response from SolidFire API" -msgstr "SolidFire API'den bozuk yanıt" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "XMS'den kötü yanıt, %s" - -msgid "Binary" -msgstr "İkili Değer" - -msgid "Blank components" -msgstr "Boş bileşenler" - -msgid "Blockbridge api host not configured" -msgstr "Blockbridge api istemcisi yapılandırılmadı" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "" -"Blockbridge geçersiz kimlik doğrulama şeması '%(auth_scheme)s' ile " -"yapılandırıldı" - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge öntanımlı havuz yok" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge parolası yapılandırılamadı (kimlik doğrulama şeması 'password' " -"için gerekli)" - -msgid "Blockbridge pools not configured" -msgstr "Blockbridge havuzları yapılandırılmamış" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "" -"Blockbridge jetonu yapılandırılmamış (kimlik doğrulama şeması için gerekli " -"'token')" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "" -"Blockbridge kullanıcısı yapılandırılmadı (kimlik doğrulama şeması 'password' " -"için gerekli)" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "CHAP gizi 12-16 bayt olmalı." - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"CLI İstisnası çıktısı:\n" -" komut: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI İstisna çıktısı:\n" -" komut: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E VDisk-to-host eşleştirmesi oluşturulmadı çünkü VDisk zaten bir " -"istemciye eşleştirilmiş.\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "CONCERTO sürümü desteklenmiyor" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "CPG (%s) dizide mevcut değil" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "%s, tam sayı değere çevrilemez." - -msgid "Can't decode backup record." -msgstr "Yedek kaydı çözülemedi." - -msgid "Can't parse backup record." -msgstr "Yedek kaydı ayrıştırılamadı." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " -"çünkü hiçbir mantıksal sürücü türüne sahip değil." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"Disk bölümü %(volume_id)s tutarlılık grubuna %(group_id)s eklenemedi çünkü " -"zaten içerisinde görünüyor. %(orig_group)s" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " -"çünkü mantıksal sürücü bulunamıyor." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " -"çünkü mantıksal sürücü yok." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " -"çünkü mantıksal sürücü geçersiz bir durumda: %(status)s. Geçerli durumlar: " -"%(valid)s." - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(group_id)s tutarlılık grubuna eklenemiyor " -"çünkü %(volume_type)s mantıksal sürücü türü grup tarafından desteklenmiyor." - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"Zaten eklenmiş mantıksal sürücü %s eklenemez; çoklu ekleme " -"'netapp_enable_multiattach' yapılandırma seçeneğiyle kapatılmış." - -msgid "Cannot connect to ECOM server." -msgstr "ECOM sunucusuna bağlanılamıyor." - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"%(group)s tutarlılık grubu oluşturulamıyor çünkü %(snap)s anlık sistem " -"görüntüsü geçerli bir durumda değil. Geçerli durumlar: %(valid)s." - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "" -"Şifreleme özellikleri oluşturulamıyor. Mantıksal sürücü türü kullanımda." - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "" -"%s disk biçiminin imajı oluşturulamıyor. Yalnızca vmdk disk biçimi kabul " -"edilir." - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "Maskeleme görünümü oluşturulamıyor: %(maskingViewName)s. " - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"'netapp_enable_multiattach' true olarak ayarlandığında ESeries dizisinde " -"%(req)s mantıksal sürücüden fazlası oluşturulamaz." - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "" -"%(sgGroupName)s ismine sahip bir depolama grubu oluşturulamıyor ya da " -"bulunamıyor." - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "%s boyutunda mantıksal sürücü oluşturulamıyor: 8GB katı değil." - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "" -"%(name)s adı ve %(extra_specs)s özellikleri ile volume_type oluşturulamıyor" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "Anlık görüntüler varken LUN %s silinemez." - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "Şifrelem özellikleri silinemez. Mantıksal sürücü türü kullanımda." - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "/sbin/mount.sofs yürütülemiyor" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "CG grubu %s bulunamadı." - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "" -"%(storage_system)s depolama sistemi için Kontrolcü Yapılandırma Servisi " -"bulunamıyor." - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "" -"%s anlık görüntüsü için mantıksal sürücü oluşturmak için Çoğaltma Servisi " -"bulunamıyor." - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "%s anlık görüntüsünü silmek için Çoğaltma Servisi bulunamıyor." - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "%s sisteminde Çoğaltma servisi bulunamadı." - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "" -"Mantıksal sürücü bulunamıyor: %(id)s. işlemi yönetmeyi bırak. Çıkılıyor..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "" -"Mantıksal sürücü bulunamıyor: %(volumename)s. İşlemi genişlet. Çıkılıyor..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "%(volumeName)s mantıksal sürücüsü için aygıt numarası bulunamıyor." - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "mcs_id channel_id: %(channel_id)s tarafından alınamıyor." - -msgid "Cannot get necessary pool or storage system information." -msgstr "Gerekli havuz ya da depolama sistem bilgisi alınamıyor." - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "" -"Depolama grubu oluşturulamıyor ya da alınamıyor: %(volumeName)s mantıksal " -"sürücüsü için %(sgGroupName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "%(igGroupName)s başlatıcı grubu alınamıyor ya da oluşturulamıyor. " - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "Bağlantı noktası grubu alınamıyor: %(pgGroupName)s. " - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"Depolama grubu: %(sgGroupName)s %(maskingViewInstanceName)s maskeleme " -"görünümünden alınamıyor. " - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"%(sps)s için desteklenen boyut aralığı alınamıyor. Dönüş kodu: %(rc)lu. " -"Hata: %(error)s." - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "" -"FAST ilkesi için varsayılan depolama grubu alınamıyor: %(fastPolicyName)s." - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "Scality SOFS bağlanamıyor, hatalar için syslog dosyasını kontrol edin" - -msgid "Cannot ping DRBDmanage backend" -msgstr "DRBDmanage art alanda çalışan uygulamasına ping atılamıyor" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "%(host)s üzerine %(id)s mantıksal sürücüsü yerleştirilemiyor" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"Kaynak tutarlılık grubundan bir tutarlılık grubu %(name)s oluşturma işlemi " -"için 'cgsnapshot_id' ve 'source_cgid' sağlanamıyor." - -msgid "Cannot register resource" -msgstr "Kaynak kaydedilemez" - -msgid "Cannot register resources" -msgstr "Kaynaklar kaydedilemez" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"%(group_id)s tutarlılık grubundan %(volume_id)s mantıksal sürücüsü " -"kaldırılamaz çünkü grup içinde değil." - -msgid "Cannot retype from one 3PAR array to another." -msgstr "Bir 3PAR dizisinden diğerine retype yapılamaz." - -msgid "Cannot retype to a CPG in a different domain." -msgstr "Bir CPG'ye farklı alanda retype yapılamaz." - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "Farklı bir alandaki bir kavrama CPG'ye retype yapılamaz." - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"vgc-cluster komutu çalıştırılamıyor, lütfen yazılımın kurulu ve izinlerin " -"doğru ayarlanmış olduğundan emin olun." - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "Hem hitachi_serial_number hem hitachi_unit_name ayarlanamaz." - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "Hem koruma alan ismi he koruma alan kimliği belirtilemez." - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "" -"Depolama havuzu ismi ve depolama havuzu kimliği aynı anda belirtilemez." - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"%(group_id)s tutarlılık grubu güncellenemiyor çünkü hiçbir geçerli ad, " -"tanımlama, add_volumes ya da remove_volumes sağlanmadı." - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "Şifreleme özellikleri güncellenemez. Mantıksal sürücü türü kullanımda." - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "volume_type %(id)s güncellenemiyor" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "Nesnenin varlığı doğrulanamıyor:%(instanceName)s." - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "CgSnapshot %(cgsnapshot_id)s bulunamadı." - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost boş. Hiçbir tutarlılık grubu oluşturulamayacak." - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "" -"Aşağıdaki kaynaklar için değiştirme kullanımı 0'dan daha az yapacak:" -"%(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "Bu sürücüye atanmış ZFS paylaşımı için erişim izinlerini kontrol edin." - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"Http servisinin durumunu kontrol et. Ayrıca https bağlantı noktası " -"numarasının cinder.conf'da belirtilenle aynı olduğundan emin olun." - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "Özet oluşturmak için parça boyutu blok boyutunun katı değil." - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "Çoğaltma özelliği %(storageSystem)s üzerinde lisanslı değil." - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "CLI içindeki %(cmd)s komutu bloklandı ve iptal edildi" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition: %s zaman aşımına uğradı." - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "" -"Sıkıştırma Etkinleştirici kurulu değil. Sıkıştırılmış mantıksal sürücü " -"oluşturulamıyor." - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "Yapılandırma dosyası %(configurationFile)s bulunamıyor." - -#, python-format -msgid "Configuration value %s is not set." -msgstr "Yapılandırma değeri %s ayarlanmamış." - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"%s mantıksal sürücü türündeki QoS özellikleri çelişiyor: QoS özelliği " -"mantıksal sürücü türüne bağlı olduğunda, mantıksal sürücü türü ek " -"özelliklerinde eski \"netapp:qos_policy_group\" özelliğine izin verilmez." - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Glance bağlantısı başarısız oldu: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Swift bağlantısı başarısız oldu: %(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "Bağlayıcı şunu sağlamıyor: %s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "Bağlayıcı gerekli bilgilere sahip değil: %(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "Tutarlılık grubu boş. Hiçbir cgsnapshot oluşturulamayacaktır." - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "TutarlılıkGrubu %(consistencygroup_id)s bulunamadı." - -msgid "Container" -msgstr "Kap" - -msgid "Container size smaller than required file size." -msgstr "Kap boyutu gerekli dosya boyutundan küçük." - -msgid "Content type not supported." -msgstr "İçerik türü desteklenmiyor." - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Kontrolcü Yapılandırma Servisi %(storageSystemName)s üzerinde bulunamadı." - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "Kontrolcü IP '%(host)s' çözülemedi: %(e)s." - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "%(f1)s e dönüştürüldü, ama biçim şu an %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "" -"%(vol_format)s biçimine dönüştürüldü ancak şimdiki biçim %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "Raw biçimine dönüştürüldü ancak şu anda biçim %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "Ham hale dönüştürüldü, ama biçim artık %s." - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"Mantıksal sürücü kopyalama görevi başarısız: convert_to_base_volume: id=" -"%(id)s, durum=%(status)s." - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "Metadata %(src_type)s %(src_id)s den %(vol_id)s e kopyalanıyor." - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "GPFS küme kimliği bulunamadı: %s." - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "GPFS dosya sistemi aygıtı bulunamadı: %s." - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "%(path)s'deki yapılandırma bulunamadı" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "%s mantıksal sürücü için iSCSI dışa aktarımı bulunamadı" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi bulunamadı." - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "Anahtar %(cmd)s: %(out)s komutu çıktısında bulunamadı." - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "%(param)s parametresi bulunamadı" - -#, python-format -msgid "Could not find target %s" -msgstr "%s hedefi bulunamadı" - -msgid "Could not get system name." -msgstr "Sistem ismi alınamadı." - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "%(path)s den yapıştırma uygulaması '%(name)s' yüklenemedi" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "Yapılandırma %(file_path)s yoluna kaydedilemedi: %(exc)s" - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Yedek oluşturma durduruldu, beklenen yedek durumu %(expected_status)s ancak " -"mevcut yedek durumu %(actual_status)s." - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Yedek oluşturma durduruldu, beklenen mantıksal sürücü durumu " -"%(expected_status)s ancak mevcut durum %(actual_status)s." - -msgid "Create export for volume failed." -msgstr "Mantıksal sürücü için dışa aktarım oluşturma başarısız oldu." - -msgid "Create manager volume flow failed." -msgstr "Yönetici mantıksal sürücü akışı oluşturma işlemi başarısız oldu." - -msgid "Create volume failed." -msgstr "Mantıksal sürücü oluşturma başarısız oldu." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "" -"Bölge kümesinin oluşturulması ve etkinleştirilmesi başarısız: (Bölge kümesi=" -"%(cfg_name)s hata=%(err)s)." - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "" -"Bölge kümesinin oluşturulup etkinleştirilmesi başarısız: (Bölge kümesi=" -"%(zoneset)s hata=%(err)s)." - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "%(begin_period)s %(end_period)s aralığı için kullanımlar oluşturuluyor" - -msgid "Current host isn't part of HGST domain." -msgstr "Mevcut istemci HGST alanının parçası değildir." - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "" -"%(type)s türü ile %(id)s mantıksal sürücüsü için mevcut istemci geçersizdir, " -"taşımaya izin verilmez" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"%(vol)s mantıksal sürücüsü için şu an eşleştirilmiş istemci %(group)s ile " -"desteklenmeyen istemci grubunda." - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage sürücü kurulum hatası: bazı gerekli kütüphaneler (dbus, " -"drbdmanage.*) bulunamadı." - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage bir kaynak (\"%(res)s\") bekledi, alınan %(n)d" - -msgid "Data ONTAP API version could not be determined." -msgstr "Veri ONTAP API sürümü belirlenemedi." - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "7-Kipte çalışan Veri ONTAP QoS ilke gruplarını desteklemiyor." - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup geçerli bir hazırlık türü, ama WSAPI sürümü '%(dedup_version)s' " -"gerektirir. '%(version)s' sürümü kurulu." - -msgid "Dedup luns cannot be extended" -msgstr "Dedup lun'lar büyütülemez" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"Kaynak için öntanımlı kota: %(res)s öntanımlı kota bayrağı: quota_%(res)s " -"ile ayarlanır, ancak şu anda önerilmiyor. Lütfen öntanımlı kota için " -"öntanımlı kota sınıfı kullanın." - -msgid "Default volume type can not be found." -msgstr "Öntanımlı mantıksal sürücü türü bulunamadı." - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Yedek silme işlemi durduruldu, şu anda yapılandırılan yedekleme servisi " -"[%(configured_service)s], bu yedeğin [%(backup_service)s] oluşturulması için " -"kullanılan yedekleme servisi değildir." - -msgid "Delete consistency group failed." -msgstr "Tutarlılık grubu silme başarısız oldu." - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "" -"Mantıksal sürücünün anlık görüntüsünün silinmesi %s durumunda desteklenmiyor." - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup durduruldu, beklenen yedekleme durumu %(expected_status)s " -"ancak alınan %(actual_status)s." - -msgid "Deleting volume from database and skipping rpc." -msgstr "Veritabanından mantıksal sürücü siliniyor ve rpc atlanıyor." - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "Bölgelerin silinmesi başarısız: (komut=%(cmd)s hata=%(err)s)." - -msgid "Describe-resource is admin only functionality" -msgstr "Kaynak-tanımla sadece yönetici işlevidir" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "Hedef migration_status %(stat)s durumuna sahip, beklenen %(exp)s." - -msgid "Destination volume not mid-migration." -msgstr "Hedef mantıksal sürücü taşıma ortasında değildir." - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "" -"Mantıksal sürücü ayırma başarısız oldu: Birden fazla ek sağlandı, ancak " -"hiçbir attachment_id sağlanamadı." - -msgid "Detach volume from instance and then try again." -msgstr "Sunucudan mantıksal sürücüyü ayırın ve sonrasında tekrar deneyin." - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "%(vol_name)s adında birden fazla mantıksal sürücü tespit edildi" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "Beklenen sütun %(fun)s de bulunamadı: %(hdr)s." - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "Beklenen anahtar %(key)s %(fun)s de bulunamadı: %(raw)s" - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "" -"Devre dışı bırakılma nedeni geçersiz karakterler içermesi ya da çok uzun " -"olmasıdır" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "%s ismine sahip alan bulunamadı." - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"Downlevel GPFS Kümesi Algılandı. GPFS Çoğaltma özelliği %(cur)s küme art " -"alan işi seviyesinde etkin değil - en az %(min)s seviye olmalı." - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "Sürücü bağlantı başlatamadı (hata: %(err)s)." - -msgid "Driver must implement initialize_connection" -msgstr "Sürücü initialize_connection gerçekleştirmelidir" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "" -"İçeri aktarımı yapılan yedek datanın, sürücü tarafından başarılı olarak kodu " -"çözüldü ancak hatalı alanlar mevcut (%s)." - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"EMC VNX Cinder Sürücü CLI istisnası: %(cmd)s (Dönüş Kodu: %(rc)s)(Çıktı: " -"%(out)s)." - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"Kaynak tutarlılık grubundan bir tutarlılık grubu %(name)s oluşturmak için " -"'cgsnapshot_id' veya 'source_cgid' ikilisinden biri mutlaka sağlanmalıdır." - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO: %(slo)s veya iş yükü %(workload)s geçersiz. Önceki hata ifadesini " -"geçerli değerler için inceleyin." - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "Ya hitachi_serial_number ya da hitachi_unit_name gerekli." - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "Öğe Dizgi Servisi %(storageSystemName)s üzerinde bulunamadı." - -msgid "Enables QoS." -msgstr "QoS etkinleştir." - -msgid "Enables compression." -msgstr "Sıkıştırmayı etkinleştir." - -msgid "Enables replication." -msgstr "Replikasyonu etkinleştir." - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "" -"Configfs'in /sys/kernel/config yolunda bağlanmış olduğunu garantileyin." - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"groupInitiatorGroup: %(initiatorgroup)s üzerinde Başlatıcı Eklemede Hata: " -"%(initiator)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"IQN: %(iqn)s e sahip TargetGroup: %(targetgroup)s a eklemede hata Dönüş " -"kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "%(vol)s mantıksal sürücüsü eklenirken hata." - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Çoğaltılmış Mantıksal Sürücü Oluşturmada Hata: %(cloneName)s Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Çoğaltılmış Mantıksal Sürüc Oluşturmada Hata: Mantıksal Sürücü: " -"%(cloneName)s Kaynak Mantıksal Sürücü:%(sourceName)s. Dönüş kodu: %(rc)lu. " -"Hata: %(error)s." - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Grup Oluşturmada Hata: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Maskeleme Görünümü Oluşturmada Hata: %(groupName)s. Dönüş kodu: %(rc)lu. " -"Hata: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Mantıksal Sürücü Oluşturma Hatası: %(volumeName)s. Dönüş kodu: %(rc)lu. " -"Hata: %(error)s." - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"%(volumename)s Mantıksal Sürücünün Oluşturulmasında hata. Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"CreateGroupReplica Hatası: kaynak: %(source)s hedef: %(target)s. Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Başlatıcı Oluşturmada Hata: %(initiator)s Rumuz: %(alias)s Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Proje Oluşturmada Hata: %(project)s Havuz: %(pool)s Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Paylaşım Oluşturmada Hata: %(name)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Anlık Görüntü Oluşturmada Hata: %(snapshot)s Mantıksal Sürücü: %(lun)s " -"Havuz: %(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Anlık Görüntü Oluşturmada Hata: %(snapshot)s onshare: %(share)s Havuz: " -"%(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Hedef Oluşturmada Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s ." - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"IQN: %(iqn)s e sahip TargetGroup: %(targetgroup)s oluşturmada hata Dönüş " -"kodu: %(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"Mantıksal Sürücü Oluşturmada Hata: %(lun)s Boyut: %(size)s Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Yeni biletiş Mantıksal sürücü oluşturmada hata Dönüş kodu: %(rc)lu. Hata: " -"%(error)s." - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "Büyütme işleminde bağımsız mantıksal sürücü oluşturmada hata." - -msgid "Error Creating unbound volume." -msgstr "Bağımsız mantıksal sürücü oluşturmada hata." - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Mantıksal Sürücü Silme Hatası: %(volumeName)s. Dönüş kodu: %(rc)lu. Hata: " -"%(error)s." - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Anlık Görüntü Silmede Hata: %(snapshot)s Mantıksal Sürücü: %(lun)s Havuz: " -"%(pool)s Proje: %(project)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"Başlatıcıları Almada Hata: InitiatorGroup: %(initiatorgroup)s Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Paylaşım Almada Hata: %(share)s %(pool)s Havuzunda Proje: %(project)s Dönüş " -"kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"Hedef Almada Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Mantıksal Sürücü Almada Hata: %(lun)s Havuz: %(pool)s Proje: %(project)s " -"Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Mantıksal sürücünün bir havuzdan diğerine göçünde hata. Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"Maskeleme görünümünü değiştirmede hata : %(groupName)s. Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "" -"Havuz sahipliği Hatası: %(pool)s havuzu %(host)s istemcisine ait değildir." - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Göç oturumunu sonlandırmada hata. Dönüş kodu: %(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Başlatıcının Doğrulanmasında Hata: %(iqn)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Havuzu Doğrulamada Hata: %(pool)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"%(pool)s Havuzundaki %(project)s Projesinin Doğrulanmasında Hata Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Servis Doğrulamada Hata: %(service)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Hedefin Doğrulanmasında Hata: %(alias)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"Paylaşımın Onaylanmasında Hata: %(share)s Proje: %(project)s ve Havuz: " -"%(pool)s Dönüş kodu: %(ret.status)d İleti: %(ret.data)s." - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"%(volumeInstancePath)s. sunucu yoluna sahip %(volumeName)s mantıksal " -"sürücüsünü eklemede hata." - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"Başlatıcı, gruba eklenemedi: %(groupName)s. Dönüş kodu: %(rc)lu. Hata: " -"%(error)s." - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "" -"Mantıksal sürücünün bileşik mantıksal sürücüye eklenmesinde hata. Hata: " -"%(error)s." - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "" -"%(volumename)s mantıksal sürücüsünün hedef taban mantıksal sürücüye " -"eklenmesinde hata." - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"Depolama grubuyla ilişkilendirmede hata : %(storageGroupName)s. Fast " -"İlkesine: %(fastPolicyName)s %(errordesc)s hata tanımıyla." - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"Çoğaltma bağını kırmada hata: Eşzamanlama İsmi: %(syncName)s Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -msgid "Error connecting to ceph cluster." -msgstr "Ceph kümesine bağlanırken hata." - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "Ssh yoluyla bağlanırken hata: %s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "Mantıksal sürücü oluşturmada hata: %s." - -msgid "Error deleting replay profile." -msgstr "Yeniden oynatma profilinin silinmesinde hata." - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "%(vol)s mantıksal sürücüsünün silinmesinde hata: %(err)s." - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "Değerlendirici ayrıştırma sırasında hata: %(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"Paylaşım düzenlemede hata: %(share)s %(pool)s Havuzunda Dönüş kodu: " -"%(ret.status)d İleti: %(ret.data)s ." - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"NetworkPortal için iSER etkinleştirilirken hata: lütfen %(ip)s üzerinde " -"iSCSI bağlantı noktanızın %(port)d RDMA tarafından desteklendiğine emin " -"olun." - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "Başarısız eklemenin temizlenmesi sırasında hata oluştu: %(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "CloudByte API [%(cmd)s] yürütülürken hata, Hata: %(err)s." - -msgid "Error executing EQL command" -msgstr "EQL komutu yürütülürken hata" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "Ssh ile komut çalıştırmada hata: %s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "%(vol)s mantıksal sürücüsünün büyütülmesinde hata: %(err)s." - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "Mantıksal sürücü genişletilirken hata: %(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "%(name)s bulunurken hata." - -#, python-format -msgid "Error finding %s." -msgstr "%s bulunurken hata." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "%(name)s isimden alan kimliği almada hata: %(err)s." - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "%(name)s isminden alan kimliği almada hata: %(id)s." - -msgid "Error getting initiator groups." -msgstr "Başlatıcı grupların alınmasında hata." - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "%(pool)s isimden havuz kimliği almada hata: %(err)s." - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "Havuz kimliği %(pool_name)s isminden alınamadı: %(err_msg)s." - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Sürüm almada hata: svc: %(svc)s.Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "SolidFire API yanıtında hata: veri=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "%(size)d GB boyutundaki %(space)s için space-create sırasında hata" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "" -"%(size)d ek GB'ye sahip %(space)s mantıksal sürücüsü için space-extend " -"sırasında hata" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"Kopyanın eşzamanlanmasının değiştirilmesinde hata: %(sv)s işlem: " -"%(operation)s. Dönüş kodu: %(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"Servisi değiştirmede hata: %(service)s Dönüş kodu: %(ret.status)d İleti: " -"%(ret.data)s." - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "%s cgsnapshot oluşturulurken hata oluştu." - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "cgsnapshot %s silinirken hata oluştu." - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "%s tutarlılık grubu güncellenirken hata oluştu." - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "" -"%(vol)s mantıksal sürücüsünün havuz bağlantısını ayırmada hata. %(error)s." - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "Aktarım durumu kontrol edilirken hata: %s" - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "Ssh ile veri almada hata: (komut=%(cmd)s hata=%(err)s)." - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "API %(service)s istenirken hata." - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "Bölgeleme CLI'si çalıştırılırken hata: (komut=%(cmd)s hata=%(err)s)." - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "" -"%(volume_id)s mantıksal sürücüsü için zamanlama denemeleri azami " -"%(max_attempts)d sınırı aşıldı" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "" -"Meta mantıksal sürücünün %(volumename)s hedef mantıksal sürücüye " -"eklenmesinde istisna." - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"Öge kopyası oluşturma sırasında istisna. Kopya adı: %(cloneName)s Kaynak " -"adı: %(sourceName)s Ek özellikler: %(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "_select_ds_for_volume'de istisna: %s." - -#, python-format -msgid "Exception: %s" -msgstr "İstisna: %s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "Bir uuid bekleniyor ancak alınan %(uuid)s." - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "" -"node_count için tam sayı beklendi, svcinfo lsiogrp şunu döndürdü: %(node)s." - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "%(cmd)s CLI komutundan çıktı beklenmiyordu, %(out)s alındı." - -#, python-format -msgid "Expected volume size was %d" -msgstr "Beklenen mantıksal sürücü boyutu %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Yedek dışa aktarımı durduruldu, beklenen yedekleme durumu " -"%(expected_status)s ancak alınan %(actual_status)s." - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Kayıt dışa aktarımı durduruldu, şu anda yapılandırılan yedekleme servisi " -"[%(configured_service)s], bu yedeğin [%(backup_service)s] oluşturulması için " -"kullanılan yedekleme servisi değildir." - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "" -"Bu sürücücü için mantıksal sürücü genişletme sadece anlık sistem görüntüsü " -"olmadığında desteklenir." - -msgid "Extend volume not implemented" -msgstr "Mantıksal sürücü genişletme uygulanmadı" - -msgid "FAST is not supported on this array." -msgstr "FAST bu dizi üzerinde desteklenmiyor." - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "%(volume)s ataması kaldırılamadı" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "fabric=%(fabric)s için bağlantı ekleme başarısız: Hata: %(err)s" - -msgid "Failed cgsnapshot" -msgstr "cgsnapshot başarısız oldu" - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "" -"%(volname)s mantıksal sürücüsü için anlık görüntü oluşturma başarısız: " -"%(response)s." - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "%s havuzu için detayların getirilmesi başarısız." - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "fabric=%(fabric)s için bağlantı kaldırma başarısız: Hata: %(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "Mantıksal Sürücü %(volname)s Genişletilemedi" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "3PAR'a giriş başarısız (%(url)s) çünkü %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "Etkin bölgeleme yapılandırmasına erişim başarısız." - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "Bölge kümesi durumuna erişim başarısız: %s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"Kaynak kilidi alma başarısız. (seri: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" - -msgid "Failed to add the logical device." -msgstr "Mantıksal aygıt ekleme başarısız." - -msgid "Failed to add zoning configuration." -msgstr "Bölgeleme yapılandırması eklenmesi başarısız." - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "" -"iSCSI başlatıcı IQN atanamadı. (bağlantı noktası: %(port)s, sebep: " -"%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "qos_specs ilişkilendirilemedi: %(type_id)s türü ile %(specs_id)s." - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi eklenemedi." - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "Mantıksal sürücü metadata'sı yedeklenemedi - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "" -"Mantıksal sürücü metadata'sı yedeklenemedi - Metadata yedekleme nesnesi " -"'backup.%s.meta' zaten var" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "%s anlık sistem görüntüsünden mantıksal sürücü kopyalanamadı." - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "%(vendor_name)s dizisine bağlantı başarısız %(host)s: %(err)s" - -msgid "Failed to connect to array" -msgstr "Diziye bağlanma başarısız" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "İmaj mantıksal sürücüye kopyalanamadı: %(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "Metadata mantıksal sürücüye kopyalanamadı: %(reason)s" - -#, python-format -msgid "Failed to create IG, %s" -msgstr "IG oluşturma başarısız, %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "Mantıksal Sürücü Grubu oluşturulamadı: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "" -"Bir dosya oluşturma başarısız. (dosya: %(file)s, ret: %(ret)s, stderr: " -"%(err)s)" - -msgid "Failed to create api volume flow." -msgstr "Mantıksal sürücü api'si oluşturulamadı." - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "%(reason)s sebebiyle cg anlık görüntüsü %(id)s oluşturulamadı." - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "Tutarlılık grubu %(id)s %(reason)s sebebiyle oluşturulamadı." - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "Tutarlılık grubu %(id)s oluşturma başarısız:%(ret)s." - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"%s tutarlılık grubu oluşturulamıyor çünkü VNX tutarlılık grubu sıkıştırılmış " -"LUN'ları üye olarak kabul edemiyor." - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "Tutarlılık grubu oluşturulamadı: %(cgName)s." - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "Tutarlılık grubu oluşturulamıyor: %(cgid)s. Hata: %(excmsg)s." - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"%(consistencyGroupName)s tutarlılık grubunu oluşturma başarısız Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "" -"%(storageSystemName)s üzerinde donanım kimlik(ler)i oluşturma başarısız." - -msgid "Failed to create iqn." -msgstr "Iqn oluşturulamadı." - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için iscsi hedefi oluşturulamadı." - -msgid "Failed to create manage_existing flow." -msgstr "manage_existing akışı oluşturulamadı." - -msgid "Failed to create map on mcs, no channel can map." -msgstr "Mcs üzerinde eşleştirme oluşturma başarısız, hiçbir kanal eşleşemez." - -msgid "Failed to create map." -msgstr "Eşleştirme oluşturma başarısız." - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "Mantıksal sürücü için metadata oluşturulamadı: %(reason)s" - -msgid "Failed to create partition." -msgstr "Bölüm oluşturma başarısız." - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "qos_specs oluşturulamadı: %(qos_specs)s özellikleri ile %(name)s." - -msgid "Failed to create replica." -msgstr "Kopya oluşturulamadı." - -msgid "Failed to create scheduler manager volume flow" -msgstr "Zamanlayıcı yönetici mantıksal sürücü akışı oluşturma başarısız" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "%s anlık sistem görüntüsü oluşturulamadı" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "cg: %(cgName)s için anlık görüntü oluşturma başarısız." - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "%s mantıksal sürücüsü için anlık sistem görüntüsü oluşturulamadı." - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "" -"%(vol)s mantıksal sürücüsü üzerinde anlık görüntü ilkesi oluşturma " -"başarısız: %(res)s." - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "" -"%(vol)s mantıksal sürücüsü üzerinde anlık görüntü kaynak alanı oluşturma " -"başarısız: %(res)s." - -msgid "Failed to create snapshot." -msgstr "Anlık görüntü oluşturma başarısız." - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "" -"Anlık sistem görüntüsü oluşturulamadı. [%s] OpenStack mantıksal sürücüsü " -"için CloudByte mantıksal sürücü bilgisi bulunamadı." - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "İnce havuz oluşturma başarısız, hata iletisi: %s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "%s mantıksal sürücüsü oluşturulamadı" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "volume_id: %(volume_id)s için SI silinemiyor çünkü bir çifti var." - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "Bir mantıksal aygıt silinemedi. (LDEV: %(ldev)s, kaynak: %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "%(reason)s sebebiyle cg anlık görüntüsü %(id)s silinemedi." - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "Tutarlılık grubu %(id)s %(reason)s sebebiyle silinemedi." - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "Tutarlılık grubu silme başarısız: %(cgName)s." - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"%(consistencyGroupName)s tutarlılık grubunu silme başarısız Dönüş kodu: " -"%(rc)lu. Hata: %(error)s." - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"%(cgname)s tutarlılık grubu için dosya kümesi silinemedi. Hata: %(excmsg)s." - -msgid "Failed to delete iqn." -msgstr "Iqn silme başarısız." - -msgid "Failed to delete map." -msgstr "Eşleştirme silme başarısız." - -msgid "Failed to delete partition." -msgstr "Bölüm silme başarısız." - -msgid "Failed to delete replica." -msgstr "Kopya silinemedi." - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "%s anlık sistem görüntüsü silinemedi" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "cg: %(cgId)s için anlık görüntü silme başarısız." - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "snapshot_id: %s için anlık görüntü silinemiyor çünkü çifti var." - -msgid "Failed to delete snapshot." -msgstr "Anlık görüntü silme başarısız." - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "%(volumeName)s mantıksal sürücüsü silinemedi." - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "" -"volume_id: %(volume_id)s için bir mantıksal sürücü silinemedi çünkü çifti " -"var." - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için iSCSI hedefi ayrılamadı." - -msgid "Failed to determine blockbridge API configuration" -msgstr "Blockbridge API yapılandırması belirlenemedi" - -msgid "Failed to disassociate qos specs." -msgstr "Qos özellikleri ilişkisi kesilemedi." - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "" -"%(type_id)s türündeki qos_specs: %(specs_id)s ilişiğini kesme başarısız." - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "" -"Anlık görüntü kaynak alanından emin olunamadı, id %s için mantıksal sürücü " -"bulunamadı" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"CloudByte API [%(cmd)s] yürütülemedi. Http durumu: %(status)s, Hata: " -"%(error)s." - -msgid "Failed to execute common command." -msgstr "Yaygın komutun çalıştırılması başarısız." - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "Mantıksal sürücü dışa aktarılamadı: %(reason)s" - -msgid "Failed to find Storage Center" -msgstr "Depolama Merkezi bulunamadı" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "Beklenen havuzda bir vdisk kopyası bulunamadı." - -msgid "Failed to find account for volume." -msgstr "Mantıksal sürücü için kullanıcı bulunamadı." - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "" -"%(path)s yolu için dosya kümesi bulma başarısız, komut çıktısı: %(cmdout)s." - -#, python-format -msgid "Failed to find host %s." -msgstr "%s istemcisi bulunamadı." - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "[%s] hesabı için CloudByte hesap ayrıntıları alınamadı." - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "LUN %s için LUN hedef detayları alma başarısız" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "LUN %s için LUN hedef detaylarını alma başarısız." - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "LUN %s için LUN hedef listesi alınamadı" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için Bölüm ID'si alınamadı." - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "" -"%(snapshot_id)s anlık görüntüsünden Raid Anlık Görüntü Kimliği alınamıyor." - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "" -"%(snapshot_id)s anlık görüntüsünden Raid Anlık Görüntü Kimliği alınamadı." - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"Bir depolama kaynağı alınamadı. Sistem depolama kaynağını tekrar almaya " -"çalışacak. (kaynak: %(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "%s qos özelliklerinin bütün ilişkileri alınamadı" - -msgid "Failed to get channel info." -msgstr "Kanal bilgisi alınamadı." - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "Kod seviyesi alınamadı (%s)." - -msgid "Failed to get device info." -msgstr "Aygıt bilgisi alınamadı." - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "Alan alınamadı çünkü CPG (%s) dizide mevcut değil." - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsüne sahip %(channel_id)s kanalından ip " -"alınamadı." - -msgid "Failed to get iqn info." -msgstr "Iqn bilgisi alma başarısız." - -msgid "Failed to get license info." -msgstr "Lisans bilgisi alma başarısız." - -msgid "Failed to get lv info." -msgstr "lv bilgisi alınamadı." - -msgid "Failed to get map info." -msgstr "Eşleştirme bilgisi alma başarısız." - -msgid "Failed to get model update from clone" -msgstr "Kopyadan model güncellemesi alınamadı" - -msgid "Failed to get name server info." -msgstr "İsim sunucusu bilgisi alınamadı." - -msgid "Failed to get network info." -msgstr "Ağ bilgisi alma başarısız." - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "Yeni havuzda yeni bölüm kimliği alınamadı: %(pool_id)s." - -msgid "Failed to get partition info." -msgstr "Bölüm bilgisi alınamadı." - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsüne sahip havuz kimliği alınamadı." - -msgid "Failed to get replica info." -msgstr "Kopya bilgisi alma başarısız." - -msgid "Failed to get show fcns database info." -msgstr "Fcns veri tabanı bilgisi göstermeyi alma başarısız." - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "%s mantıksal sürücü boyutu alınamadı" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "%s mantıksal sürücüsü için anlık sistem görüntüsü alınamadı." - -msgid "Failed to get snapshot info." -msgstr "Anlık görüntü bilgisi alınamadı." - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "LUN %s için hedef IQN alınması başarısız" - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "LUN %s için hedef portal alınması başarısız" - -msgid "Failed to get targets" -msgstr "Hedefler alınamadı" - -msgid "Failed to get wwn info." -msgstr "wwn bilgisi alma başarısız." - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"%(volumeName)s mantıksal sürücüsünün %(maskingViewName)s maskeleme " -"görünümüne alınması, oluşturulması ya da eklenmesi başarısız. Alınan hata " -"iletisi %(errorMessage)s." - -msgid "Failed to identify volume backend." -msgstr "Mantıksal sürücü art alanda çalışan uygulama tanımlanamadı." - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "" -"%(cgname)s paylaşımı için dosya kümesi bağlantısı başarısız. Hata: " -"%(excmsg)s." - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "%s Dizisine giriş başarısız (geçersiz giriş?)." - -#, python-format -msgid "Failed to login for user %s." -msgstr "%s kullanıcısı giriş yapamadı." - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "" -"Aşağıdaki neden nedeniyle Datera kümesi uç noktasına bir istek yapılamadı: %s" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "" -"Mevcut mantıksal sürücü %(name)s yönetilemedi, çünkü mantıksal sürücü boyutu " -"alınamadı." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "" -"Mevcut mantıksal sürücü %(name)s yönetilemiyor, çünkü yeniden adlandırma " -"işlemi başarısız oldu: Hata iletisi: %(msg)s." - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "" -"Varolan %(name)s mantıksal sürücüsü yönetilemedi çünkü bildirilen boyut " -"%(size)s kayan noktalı sayı değildi." - -#, python-format -msgid "Failed to manage volume %s." -msgstr "%s mantıksal sürücüsü yönetilemedi." - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"Bir mantıksal aygıt eşleştirilemedi. (LDEV: %(ldev)s, LUN: %(lun)s, bağlantı " -"noktası: %(port)s, id: %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "Mantıksal sürücüyü ilk defa göç ettirme başarısız." - -msgid "Failed to migrate volume for the second time." -msgstr "Bir ikinci defa mantıksal sürücüyü göç ettirme başarısız." - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "LUN eşleştirmesi taşınamadı. Dönüş kodu: %s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "%s mantıksal sürücüsü taşınamadı." - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "Bir dosya açılamadı. (dosya: %(file)s, ret: %(ret)s, stderr: %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI çıktısı ayrıştırılamadı:\n" -" komut: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -":: biçeminde olması gereken " -"'swift_catalog_info' yapılandırma seçeneği ayrıştırılamadı" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "" -"Sıfır-sayfa geri kazanımı başarısız. (LDEV: %(ldev)s, sebep: %(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "" -"%(volume)s mantıksal sürücüsü için dışa aktarım kaldırılamadı: %(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için iscsi hedefi kaldırılamadı." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "%(volumeName)s mantıksal sürücüsü varsayılan SG'den kaldırılamadı." - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "%(volumeName)s varsayılan SG'den kaldırılamadı: %(volumeName)s." - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"FAST ilkesi %(fastPolicyName)s için öntanımlı depolama grubundan " -"%(volumename)s kaldırılamadı." - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "" -"%(name)s adındaki mantıksal sürücü yeniden adlandırılamadı, hata iletisi: " -"%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "Etkin bölgeleme yapılandırması %s alınamadı" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "" -"Mevcut mantıksal sürücü %(name)s için QoS ayarlanamadı, Hata iletisi: " -"%(msg)s." - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "" -"SCST hedefi için 'Gelen kullanıcı' özniteliğinin ayarlanması başarısız." - -msgid "Failed to set partition." -msgstr "Bölüm ayarlama başarısız." - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "" -"%(cgname)s tutarlılık grubu için izinler ayarlanamadı. Hata: %(excmsg)s." - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "" -"Eşleştirmesi kaldırılacak %(volume_id)s mantıksal sürücüsü için bir " -"mantıksal aygıt belirtme başarısız." - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "" -"Silinecek mantıksal aygıt belirtme başarısız. (metod: %(method)s, id: %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "Göç oturumunu sonlandırma başarısız." - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "" -"%(cgname)s tutarlılık grubu için dosya kümesi bağı ayırma başarısız. Hata: " -"%(excmsg)s." - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "" -"Bir mantıksal aygıtın eşleştirilmesi kaldırılamadı. (LDEV: %(ldev)s, sebep: " -"%(reason)s)" - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "Mantıksal sürücü için metadata güncellenemedi: %(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "Bölgeleme yapılandırması güncellemesi ya da silinmesi başarısız" - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "qos_specs güncellenemedi: %(qos_specs)s özellikleri ile %(specs_id)s." - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"%(vol_id)s mantıksal sürücüsünün metadata'sının sağlanan %(src_type)s " -"%(src_id)s metadata ile güncellenmesi başarısız" - -#, python-format -msgid "Failure creating volume %s." -msgstr "%s mantıksal sürücüsünün oluşturulması başarısız." - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "%s için LUN bilgisi alınması başarısız." - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "Yeni çoğaltılmış LUN'un %s e taşınması başarısız." - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "LUN %s'in tmp'ye hazırlanması başarısız." - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "Flexvisor %(reason)s sebebiyle %(id)s mantıksal sürücüsünü ekleyemedi." - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Flexvisor %(ret)s sebebiyle %(vol)s mantıksal sürücüsünü %(group)s grubuna " -"katamadı." - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "" -"Flexvisor %(ret)s sebebiyle %(vol)s mantıksal sürücüsünü %(group)s grubundan " -"çıkaramadı." - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsnü %(reason)s sebebiyle kaldıramadı." - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "Fiber Kanal SAN Arama başarısız: %(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "Fiber Kanal Bölge işlemi başarısız oldu: %(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "Fiber Kanal bağlantısı kontrol hatası: %(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "%(file_path)s dosyası bulunamadı." - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "" -"%(path)s dosyası geçersiz %(bfile)s destek dosyasına sahip, iptal ediliyor." - -#, python-format -msgid "File already exists at %s." -msgstr "%s konumunda dosya zaten var." - -#, python-format -msgid "File already exists at: %s" -msgstr "Dosya konumda zaten mevcut: %s" - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"Flash Zula İlkesi WSAPI sürümü '%(fcache_version)s' gerektirir, " -"'%(version)s' kurulu." - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor mantıksal sürücü atama başarısız.:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor mantıksal sürücü atama başarısız:%(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücü anlık görüntüsünü %(vgid)s grubu %(vgsid)s " -"anlık görüntüsünde bulamadı." - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "" -"Flexvisor mantıksal sürücü oluşturma başarısız.:%(volumeid)s:%(status)s." - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü silmede başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubuna ekleyemedi." - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "" -"Flexvisor durumu olay kimliğiyle sorgulayamadığından %(id)s mantıksal " -"sürücüsünü atayamadı." - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücü atamasında başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor %(volume)s mantıksal sürücü %(iqn)s iqn ataması başarısız." - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsünü çoğaltmada başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor mantıksal sürücüyü çoğaltmada başarısız (olay alma başarısız) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsü için anlık görüntü oluşturma başarısız: " -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsü için anlık görüntü oluşturmada başarısız " -"(olay alma başarısız)." - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "" -"Flexvisor %(vgid)s grubunda %(id)s mantıksal sürücüsü oluşturmada başarısız." - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "" -"Flexvisor %(volume)s mantıksal sürücüsünü oluşturmada başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor mantıksal sürücü oluşturmada başarısız (alma olayı) %s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "" -"Flexvisor anlık görüntüden mantıksal sürücü oluşturmada başarısız %(id)s: " -"%(status)s." - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor %(id)s anlık görüntüsünden mantıksal sürücü oluşturamadı:" -"%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor anlık görüntüden mantıksal sürücü oluşturma başarısız (olay alma " -"başarısız) %(id)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisr %(id)s anlık görüntüsünü silmede başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor %(id)s anlık görüntüsünü silmede başarısız (olay alma başarısız)." - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü silmede başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmede başarısız: %(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü büyütmede başarısız:%(status)s." - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "" -"Flexvisor mantıksal sürücüsünü büyütme başarısız (olay almada başarısız) " -"%(id)s." - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor havuz bilgisi %(id)s alamadı: %(status)s." - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "" -"Flexvisor %(id)s mantıksal sürücüsünün anlık görüntü kimliğini %(vgid)s " -"grubundan alamadı." - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor %(id)s mantıksal sürücüsünü %(cgid)s grubundan kaldıramadı." - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "" -"Flexvisor anlık görüntüden mantıksal sürücü başlatamadı %(id)s:%(status)s." - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "" -"Flexvisor anlık görüntüden mantıksal sürücü başlatamadı (olay almada " -"başarısız) %(id)s." - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor %(id)s mantıksal sürücü atamasını kaldıramadı: %(status)s." - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "Flexvisor %(id)s mantıksal sürücü atamasını kaldıramadı (alma olayı)." - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "" -"Flexvisor mantıksal sürücü atamasını kaldırma başarısız:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor %(id)s kaynak mantıksal sürücü bilgisini bulamadı." - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "" -"Flexvisor mantıksal sürücü atamasını kaldırma başarısız:%(id)s:%(status)s." - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "" -"Flexvisor mantıksal sürücüsü %(id)s %(vgid)s grubuna katılmada başarısız." - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS çalışmıyor, durum: %s." - -msgid "Gateway VIP is not set" -msgstr "Geçit VIP ayarlanmamış" - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "" -"Glance metadata güncellenemez, mantıksal sürücü kimliği %(volume_id)s için " -"%(key)s anahtarı mevcuttur" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "" -"Mantıksal sürücü/anlık sistem görüntüsü %(id)s için glance metadata " -"bulunamaz." - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "%(config)s konumunda Gluster yapılandırma dosyası yok" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "DRBDmanage'den kötü yol bilgisi alındı! (%s)" - -msgid "HBSD error occurs." -msgstr "HBSD hatası oluşur." - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"Hash blok boyutu son yedeklemeden bu yana değişti. Yeni hash blok boyutu: " -"%(new)s. Eski hash blok boyutu: %(old)s. Tam bir yedekleme yapın." - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "%(tier_levels)s aşama oluşturulmadı." - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "\"%s\" ipucu desteklenmiyor." - -msgid "Host" -msgstr "Host" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "%(host)s sunucusu bulunamadı." - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "" -"%(host)s istemcisi x509 sertifika içerikleriyle eşleşmiyor: CommonName " -"%(commonName)s." - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "%s istemcisinin FC başlatıcısı yok" - -#, python-format -msgid "Host group with name %s not found" -msgstr "%s isimli istemci grubu bulunamadı" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "%s başvurusuna sahip istemci grubu bulunamadı" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "Host bulunamadı.%(host)s uzerindeki %(service)s silinemiyor." - -#, python-format -msgid "Host type %s not supported." -msgstr "İstemci türü %s desteklenmiyor." - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "%(ports)s bağlantı noktalarına sahip istemci bulunamadı." - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "" -"I/O grubu %(iogrp)d geçerli değil; kullanılabilir I/O grupları %(avail)s." - -msgid "ID" -msgstr "KİMLİK" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "" -"Sıkıştırma True olarak ayarlanırsa, rsize da ayrıca ayarlanmalı (-1 e eşit " -"değil)." - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"flashsystem_connection_protocol için geçersiz değer'%(prot)s' belirtilmiş: " -"geçerli değer(ler) %(enabled)s." - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"storwize_svc_vol_grainsize için geçersiz değer belirtildi: 32, 64, 128 veya " -"256 olarak ayarlayın." - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "%(image_id)s imaj kaynak dosyası bulunamadı." - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "İmaj %(image_id)s etkin değil." - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "%(image_id)s imajı kabul edilemez: %(reason)s" - -msgid "Image location not present." -msgstr "İmaj konumu mevcut değil." - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"Rbd mantıksal sürücüsü silinirken ImageBusy hatası yükseldi. Bu çökmüş bir " -"istemciden gelen bir bağlantı yüzünden olabilir, bu durumda, 30 saniye " -"geçtikten sonra silmeyi tekrar denemek çözebilir." - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "" -"Kayıt içe aktarımı başarısız oldu, içe aktarımı gerçekleştirecek yedekleme " -"servisi bulunamıyor. %(service)s servisini iste" - -msgid "Incorrect request body format" -msgstr "Geçersiz gövde biçimi isteği." - -msgid "Incorrect request body format." -msgstr "Hatalı istek gövde biçimi." - -msgid "Incremental backups exist for this backup." -msgstr "Bu yedek için artımlı yedeklemeler mevcut." - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend CLI istisnası: %(err)s Param: %(param)s (Dönüş Kodu: %(rc)s) " -"(Çıktı: %(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "Girdi mantıksal sürücüleri ve anlık sistem görüntüleri geçersizdir." - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "%(uuid)s örneği bulunamadı." - -msgid "Insufficient privileges" -msgstr "Yetersiz ayrıcalıklar" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "Geçersiz 3PAR Alanı: %(err)s" - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "Rbd işlem yedeklemesi için verilen Ceph argümanları geçersiz" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Geçersiz CgSnapshot: %(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "Geçersiz TutarlılıkGrubu: %(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "Geçersiz TutarlılıkGrubu: Tutarlılık grubu oluşturmak için istemci yok" - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "Geçersiz IP adres biçimi: '%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "" -"%s mantıksal sürücüsü için QoS ilkesi alırken geçersiz QoS özellikleri " -"algılandı" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Geçersiz Virtuozzo Depolama paylaşım belirtimi: %r. Şu şekilde olmalıdır: " -"[MDS1[,MDS2],...:/][:PASSWORD]." - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "Geçersiz XtremIO sürümü %(cur)s, sürüm %(min)s veya yukarısı gerekli" - -msgid "Invalid argument" -msgstr "Geçersiz değişken" - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "Geçersiz değişken - whence=%s desteklenmez" - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "%(volume_id)s mantıksal sürücüsü için geçersiz ekleme kipi '%(mode)s'." - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "Geçersiz kimlik doğrulama anahtarı: %(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "Geçersiz yedekleme: %(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "CloudByte depolamada geçersiz chap kullanıcı ayrıntıları bulundu." - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "%(name)s mantıksal sürücüsünün geçersiz bağlantı ilklendirme yanıtı" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "" -"%(name)s mantıksal sürücüsünün geçersiz bağlantı ilklendirme yanıtı: " -"%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "Geçersiz içerik türü %(content_type)s." - -msgid "Invalid credentials" -msgstr "Geçersiz kimlik bilgileri" - -#, python-format -msgid "Invalid directory: %s" -msgstr "Geçersiz dizin: %s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "Geçersiz disk bağdaştırıcı türü: %(invalid_type)s." - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "Geçersiz disk desteği: %s." - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "Geçersiz disk türü: %(disk_type)s." - -#, python-format -msgid "Invalid disk type: %s." -msgstr "Geçersiz disk türü: %s." - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "Geçersiz istemci: %(reason)s" - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "Geçersiz %(image_href)s imaj kaynak dosyası." - -msgid "Invalid image identifier or unable to access requested image." -msgstr "Geçersiz imaj tanımlayıcı ya da istenen imaja erişilemedi." - -msgid "Invalid imageRef provided." -msgstr "Geçersiz imaj referansı verildi." - -msgid "Invalid input" -msgstr "Geçersiz girdi" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "Geçersiz girdi aldı: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "Geçersiz is_public süzgeci [%s]" - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "Geçersiz metadata boyutu: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "Geçersiz metadata: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "Geçersiz bağlantı noktası tabanı: %s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "Geçersiz paylaşım noktası tabanı: %s." - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "Retype için geçersiz yeni snapCPG ismi. new_snap_cpg='%s'." - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "Geçersiz qos özellikleri: %(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "Geçersiz bir hedefe geçersiz mantıksal sürücü ekleme isteği" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "" -"Geçersiz bir kip ile geçersiz mantıksal sürücü ekleme isteği. Ekleme kipi " -"'rw' ya da 'ro' olmalıdır" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "Geçersiz koşul sonu %(expire)s." - -msgid "Invalid service catalog json." -msgstr "Geçersiz servis katalogu json." - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "Geçersiz anlık sistem görüntüsü: %(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "Geçersiz durum: %s" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "%s geçersiz depolama havuzu istendi. Retype başarısız." - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "Geçersiz depolama havuzu %s belirtildi." - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "Geçersiz güncelleme ayarı: '%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "Zorlama için geçersiz değer '%s'." - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "Zorlama için geçersiz değer '%s'. " - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "" -"is_public için %s yanlış bir değerdir. Kabul edilebilir değerler: True yada " -"False. " - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "skip_validation için geçersiz değer '%s' " - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "'bootable' için geçersiz değer: '%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "'force' için geçersiz değer: '%s'" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "'readonly' için geçersiz değer: '%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "'scheduler_max_attempts' için geçersiz değer, değer >=1 olmalıdır" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "NetApp yapılandırma seçeneği netapp_host_type için geçersiz değer." - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "NetApp yapılandırma seçeneği netapp_lun_ostype için geçersiz değer." - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "Yaş için geçersiz değer, %(age)s" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"İstek oluşturmak için sağlanan geçersiz mantıksal sürücü boyutu: %s (boyut " -"değişkeni bir tam sayı (ya da bir tam sayının karakter dizisi gösterimi) ve " -"sıfırdan büyük olmalıdır)." - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "Geçersiz mantıksal sürücü türü: %(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "Geçersiz mantıksal sürücü: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"Geçersiz mantıksal sürücü: %(volume_id)s mantıksal sürücüsü geçersiz " -"durumda: %(status)s olduğundan dolayı %(group_id)s tutarlılık grubuna " -"eklenemiyor. Geçerli durumlar: ('available', 'in-use')." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"Geçersiz mantıksal sürücü: %(volume_id)s mantıksal sürücüsü, %(volume_type)s " -"mantıksal sürücü türü grup tarafından desteklenmediğinden dolayı " -"%(group_id)s tutarlılık grubuna eklenemiyor." - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"Geçersiz mantıksal sürücü: fake-volume-uuid mantıksal sürücüsü %(group_id)s " -"tutarlılık grubuna eklenemiyor çünkü mantıksal sürücü bulunamıyor." - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"Geçersiz mantıksal sürücü: fake-volume-uuid mantıksal sürücüsü grupta " -"olmadığından dolayı %(group_id)s tutarlılık grubundan kaldırılamıyor." - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "Geçersiz volume_type değeri geçildi: %s." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"Sağlanan volume_type geçersiz: %s (istenen tür uyumlu değil; ya kaynak " -"mantıksal sürüyü eşleştirin ya da tür değişkenini ihmal edin)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"Sağlanan volume_type geçersiz: %s (istenen tür uyumlu değil; tür " -"değişkeninin ihmal edilmesi önerilir)." - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "" -"Geçersiz volume_type verildi: %s (istenen tür bu tutarlılık grubu tarafından " -"desteklenmelidir)." - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "Geçersiz wwpns biçimi %(wwpns)s" - -msgid "Issue encountered waiting for job." -msgstr "İş için beklenirken durumla karşılaşıldı." - -msgid "Issue encountered waiting for synchronization." -msgstr "Eşzamanlama için beklenirken durumla karşılaşıldı." - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "" -"Anahtar adları sadece alfanumerik karakter, altçizgi, virgül, iki nokta üst " -"üste ve tire içerebilir." - -#, python-format -msgid "KeyError: %s" -msgstr "AnahtarHatası: %s" - -msgid "LUN export failed!" -msgstr "LUN dışa aktarma başarısız!" - -msgid "LUN map overflow on every channel." -msgstr "Her kanalda LUN eşleştirme taşması." - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "Verilen %s referansına sahip LUN bulunamadı." - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "LUN sayısı %(ch_id)s kanal kimliğinde sınırların dışında." - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "Son %s cinder syslog girdileri:-" - -msgid "LeftHand cluster not found" -msgstr "LeftHand kümesi bulunamadı" - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "Satır %(dis)d : %(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "Bağlantı yolu zaten mevcut ve sembolik bağlantı değil" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "" -"Kaynak mantıksal sürücüsünün bağlantılı klonu bu durumda desteklenmiyor: %s." - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"Arama servisi yapılandırılmamış. fc_san_lookup_service için yapılandırma " -"seçeneğinin arama servisinin somut bir uygulamasını belirtmesi gerekir." - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "Bozuk fcns çıktı karakter dizisi: %s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "Bozuk isim sunucusu karakter dizisi: %s" - -msgid "Malformed request body" -msgstr "Kusurlu istek gövdesi" - -msgid "Malformed request body." -msgstr "Bozuk biçimli istek gövdesi." - -msgid "Malformed request url" -msgstr "Bozuk istel adresi" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "%(cmd)s komutu için bozuk yanıt: %(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "Bozuk scheduler_hints özelliği" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "Bozuk fcns veri tabanı gösterme karakter dizisi: %s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"Bozuk bölge yapılandırması: (anahtar=%(switch)s zone_config=%(zone_config)s)." - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "Bozuk bölge durumu: (anahtar=%(switch)s zone_config=%(zone_config)s)." - -msgid "Manage existing get size requires 'id'." -msgstr "Mevcut alma boyutunu yönetme 'id' gerektirir." - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "" -"Geçersiz art alanda çalışan uygulama kaynağı %(existing_ref)s nedeniyle " -"varolan mantıksal sürücü yönetimi başarısız oldu: %(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "" -"Mantıksal sürücü türü uyumsuzluğu nedeniyle varolan mantıksal sürücü " -"yönetimi başarısız oldu: %(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "Mevcut mantıksal sürücünün yönetimi henüz uygulanmadı." - -msgid "Manage existing volume requires 'source-id'." -msgstr "Mevcut mantıksal sürücü yönetimi 'source-id' gerektirir." - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST etkinse mantıksal sürücü yönetme desteklenmez. FAST ilkesi: " -"%(fastPolicyName)s." - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "" -"%(id)s eşleştirmesine hazırlık ayrılan %(to)d saniye zaman aşımını içinde " -"başarılamadı. Çıkılıyor." - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "Maskeleme görünümü %(maskingViewName)s başarıyla silinemedi" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "İzin verilen yedeklemelerin azami sayısı (%(allowed)d) aşıldı" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "" -"İzin verilen anlık sistem görüntülerinin azami sayısı (%(allowed)d) aşıldı" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "" -"İzin verilen maksimum disk bölümü (%(allowed)d) kota aşıldı '%(name)s'." - -#, python-format -msgid "May specify only one of %s" -msgstr "%s'nin sadece biri belirtilebilir" - -msgid "Metadata backup already exists for this volume" -msgstr "Bu mantıksal sürücü için metadata yedeği zaten var" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "Metadata yedek nesnesi '%s' zaten var" - -msgid "Metadata property key blank." -msgstr "Metadata özellik anahtarı boş." - -msgid "Metadata restore failed due to incompatible version" -msgstr "Uyumsuz sürüm nedeniyle metadata geri yüklemesi başarısız oldu" - -msgid "Metadata restore failed due to incompatible version." -msgstr "Metadata geri yüklemesi uyumsuz sürüm nedeniyle başarısız oldu." - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "" -"Eksik 'purestorage' python modülü, kütüphanenin kurulu ve kullanılabilir " -"olduğuna emin olun." - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "Fiber Kanal SAN yapılandırma parametresi eksik - fc_fabric_names" - -msgid "Missing request body" -msgstr "Eksik istek gövdesi" - -msgid "Missing request body." -msgstr "Eksik istek gövdesi." - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "İstek gövdesinde gerekli öge '%s' eksik" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "İstek gövdesinde gerekli öge '%s' eksik." - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "İstek gövdesinde gerekli öge 'consistencygroup' eksik." - -msgid "Missing required element quota_class_set in request body." -msgstr "İstek gövdesinde gerekli quota_class_set ögesi eksik." - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "%s mantıksal sürücüsünün birden fazla kopyası bulundu." - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "" -"'%s' için birden fazla eşleşme bulundu, daha belirli olacak bir ID kullanın." - -msgid "Multiple profiles found." -msgstr "Birden fazla profil bulundu." - -msgid "Must implement a fallback schedule" -msgstr "Bir geri dönüş programı uygulanmalı" - -msgid "Must implement find_retype_host" -msgstr "find_retype_host uygulanmalıdır" - -msgid "Must implement host_passes_filters" -msgstr "host_passes_filters uygulanmalıdır" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "schedule_create_consistencygroup uygulanmalıdır" - -msgid "Must implement schedule_create_volume" -msgstr "schedule_create_volume uygulanmalıdır" - -msgid "Must implement schedule_get_pools" -msgstr "schedule_get_pools uygulanmalıdır" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "lsfabric'e wwpn veya istemci geçirilmeli." - -msgid "Must specify 'connector'" -msgstr "'connector' belirtilmelidir" - -msgid "Must specify 'connector'." -msgstr "Belirtilmelidir 'connector'." - -msgid "Must specify 'host'." -msgstr "Belirtilmelidir 'host'." - -msgid "Must specify 'new_volume'" -msgstr "'new_volume' belirtilmelidir" - -msgid "Must specify 'status'" -msgstr "'status' belirtilmelidir" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "" -"Güncelleme için 'status', 'attach_status' ya da 'migration_status' " -"belirtilmelidir." - -msgid "Must specify a valid attach status" -msgstr "Geçerli bir ekleme durumu belirtmelisiniz" - -msgid "Must specify a valid migration status" -msgstr "Geçerli bir göç durumu belirtilmelidir" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "Geçerli bir kişi belirtilmeli %(valid)s, değer '%(persona)s' geçersiz." - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "" -"Geçerli bir hazırlık türü belirtilmeli %(valid)s, değer '%(prov)s' geçersiz." - -msgid "Must specify a valid status" -msgstr "Geçerli bir durum belirtilmelidir" - -msgid "Must specify an ExtensionManager class" -msgstr "UzantıYöneticisi sınıfı belirlenmek zorunda" - -msgid "Must specify bootable in request." -msgstr "İstekte önyüklenebilir belirtilmelidir." - -msgid "Must specify protection domain name or protection domain id." -msgstr "Koruma alan ismi veya koruma alan id'si belirtmeli." - -msgid "Must specify readonly in request." -msgstr "İstekte salt okunur belirtilmelidir." - -msgid "Must specify storage pool name or id." -msgstr "Depolama havuzu ismi veya id'si belirtmeli." - -msgid "Must supply a positive, non-zero value for age" -msgstr "Yaş için pozitif, sıfırdan farklı bir değer sağlanmalı" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "" -"NAS yapılandırması '%(name)s=%(value)s' geçersiz. 'auto', 'true' ya da " -"'false' olmalıdır." - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "%(config)s konumunda NFS yapılandırma dosyası yok" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "NFS dosyası %s keşfedilmemiş." - -msgid "NFS file could not be discovered." -msgstr "NFS dosyası keşfedilemedi." - -msgid "Name" -msgstr "Ad" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "" -"İstek gövdesinde ad, tanımlama, add_volumes ve remove_volumes seçeneklerinin " -"tümü boş olamaz." - -msgid "Need non-zero volume size" -msgstr "Sıfır olmayan mantıksal sürücü boyutu gerekir" - -msgid "NetApp Cinder Driver exception." -msgstr "NetApp Cinder Sürücü istisnası." - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"Genişletmek için yeni boyut mevcut boyuttan daha fazla olmalıdır. (mevcut: " -"%(size)s, genişletilmiş: %(new_size)s)." - -msgid "New volume size must be specified as an integer." -msgstr "Yeni mantıksal sürücü boyutu bir tam sayı olarak belirtilmelidir." - -msgid "New volume type must be specified." -msgstr "Yeni mantıksal sürücü türü belirtilmelidir." - -msgid "New volume type not specified in request_spec." -msgstr "request_spec içinde yeni mantıksal sürücü türü belirtilmemiş." - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble Cinder Sürücü hatası" - -msgid "No FCP targets found" -msgstr "FCP hedefi bulunamadı" - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "Sağlanan iSCSI IP'lerine sahip etkin iSCSI portalı yok" - -#, python-format -msgid "No available service named %s" -msgstr "%s adında kullanılabilir servis yok" - -#, python-format -msgid "No backup with id %s" -msgstr "%s kimlikli yedekleme yok" - -msgid "No backups available to do an incremental backup." -msgstr "Artımlı yedekleme için kullanılabilir hiçbir yedek yok." - -msgid "No big enough free disk" -msgstr "Yeterince büyük boş disk yok" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "%s kimlikli hiçbir cgsnapshot yok" - -msgid "No cinder entries in syslog!" -msgstr "syslog içinde hiçbir cinder girdisi yok!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "Dosyalayıcıda %s isimli çoğaltılmış LUN bulunamadı" - -msgid "No config node found." -msgstr "Yapılandırma düğümü bulunamadı." - -#, python-format -msgid "No consistency group with id %s" -msgstr "%s kimlikli hiçbir tutarlılık grubu yok" - -msgid "No errors in logfiles!" -msgstr "logfiles dosyasında hiçbir hata yok!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "%s için destek dosyası olacak bir dosya bulunamadı." - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "" -"Boş LUN ID'si kalmadı. İstemciye (%s) eklenebilecek azami mantıksal sürücü " -"sayısı aşıldı." - -msgid "No free disk" -msgstr "Boş disk yok" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "%s için sağlanan listede iyi iscsi portalı bulunamadı." - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "%s için iyi iscsi portalı bulunamadı." - -#, python-format -msgid "No host to create consistency group %s." -msgstr "%s tutarlılık grubu oluşturulacak istemci yok." - -msgid "No image_name was specified in request." -msgstr "İstekte hiçbir image_name belirtilmemiş." - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "%s başlatıcısı için hiçbir başlatıcı grup bulunamadı" - -msgid "No initiators found, cannot proceed" -msgstr "Başlatıcı bulunamadı, devam edilemiyor" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "Kümede %s ip'si için arayüz bulunamadı" - -msgid "No ip address found." -msgstr "Ip adresi bulunamadı." - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "CloudByte'da iscsi yetkilendirme grubu bulunamadı." - -msgid "No iscsi initiators were found in CloudByte." -msgstr "CloudByte içinde hiçbir iscsi başlatıcı bulunamadı." - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "" -"[%s] CloudByte mantıksal sürücüsü için hiçbir iscsi servisi bulunamadı." - -msgid "No iscsi services found in CloudByte storage." -msgstr "CloudByte depolamada hiçbir iscsi servisi bulunamadı." - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "" -"Bir anahtar dosya belirtilmemiş ve anahtar %(cert)s den yüklenemiyor %(e)s." - -msgid "No mounted Gluster shares found" -msgstr "Bağlı Gluster paylaşımı bulunamadı" - -msgid "No mounted NFS shares found" -msgstr "Bağlı NFS paylaşımı bulunamadı" - -msgid "No mounted SMBFS shares found." -msgstr "Bağlı SMBFS paylaşımı bulunamadı." - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "Bağlı Virtuozzo Depolama paylaşımı bulunamadı" - -msgid "No mounted shares found" -msgstr "Bağlı paylaşım bulunamadı" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "%(vol)s mantıksal sürücüsü için %(gid)s I/O grubunda düğüm bulunamadı." - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "" -"CloudByte depolama listesi iSCSI auth user API çağrısından yanıt alınamadı." - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "CloudByte storage list tsm API çağrısından yanıt alınamadı." - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "CloudByte'ın list filesystem api çağrısından bir yanıt alınmadı." - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "%s destek dosyası olacak bir anlık görüntü bulunamadı." - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "%s anlık görüntü grubundan anlık görüntü imajı bulunamadı." - -#, python-format -msgid "No storage path found for export path %s" -msgstr "Dışa aktarma yolu %s için depolama yolu bulunamadı" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "Böyle bir %(specs_id)s QoS özelliği yok." - -msgid "No suitable discovery ip found" -msgstr "Uygun ip keşfedilemedi" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "%s yedekleme sürümünü geri yükleme desteklenmiyor" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "%(volume_id)s bölümü için hedef id bulunamadı." - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"İstemci üzerinde kullanılabilir LUN ID'si yok; çoklu ekleme etkin ki bu tüm " -"LUN ID'lerinin tüm istemci grupları arasında benzersiz olmasını gerektirir." - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "Geçerli bir sunucu bulunamadı: %(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "%(type)s türü ile %(id)s mantıksal sürücüsü için geçerli istemci yok" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "" -"%(vserver)s vserver'e ve %(junction)s kavşağına sahip kümede mantıksal " -"sürücü yok " - -msgid "No volume service(s) started successfully, terminating." -msgstr "" -"Disk bölümü servis(leri)i başarılı olarak başlatılamadı, sonlandırılıyor." - -msgid "No volume was found at CloudByte storage." -msgstr "CloudByte depolamada hiçbir mantıksal sürücü bulunamadı." - -msgid "No volume_type should be provided when creating test replica." -msgstr "Test kopyası oluşturulurken volume_type verilmesi gerekmez." - -msgid "No volumes found in CloudByte storage." -msgstr "CloudByte depolamada hiçbir mantıksal sürücüsü bulunamadı." - -msgid "No weighed hosts available" -msgstr "Kullanılabilir ağırlıklı istemci yok" - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "Mantıksal sürücü için uygun bir veri deposu bulunamadı: %s." - -msgid "Not an rbd snapshot" -msgstr "Bir rbd anlık sistem görüntüsü değildir" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "%(image_id)s imajı için yetkilendirilemedi." - -msgid "Not authorized." -msgstr "Yetkiniz yok." - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "Art alanda çalışan uygulamada (%(backend)s) yeterli alan yok" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "ZFS paylaşımında bu işlemi yapacak kadar depolama alanı yok." - -msgid "Not stored in rbd" -msgstr "rbd içinde depolanmıyor" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr " Nova anlık sistem görüntüsü oluşturulurken \"error\" durumu döndürdü." - -msgid "Null response received from CloudByte's list filesystem." -msgstr "CloudByte listesinde dosya sisteminden boş yanıt alındı." - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "CloudByte'ın iscsi yetkilendirme grubu listesinden yanıt dönmedi." - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "CloudByte listesinde iscsi başlatıcılarından boş yanıt alındı." - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "" -"CloudByte listesinde mantıksal sürücü iscsi servisinden boş yanıt alındı." - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "" -"CloudByte depolamasında [%s] mantıksal sürücüsü oluşturulurken boş yanıt " -"alındı." - -msgid "Object Count" -msgstr "Nesne Sayısı" - -msgid "Object is not a NetApp LUN." -msgstr "Nesne bir NetApp LUN değil." - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "" -"Bir Büyütme İşleminde, mantıksal sürücünün bileşik mantıksal sürücüye " -"eklenmesinde hata: %(volumename)s." - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"Yalnızca %(value)s %(verb)s istek(ler)i %(uri)s ye her %(unit_string)s " -"yapılabilir." - -msgid "Only one limit can be set in a QoS spec." -msgstr "QoS özelliğinde yalnızca bir sınır ayarlanabilir." - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "İşlem şu durum ile başarısız oldu=%(status)s. Tam dökümü: %(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "İşlem desteklenmiyor: %(operation)s." - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "gpfs_images_dir seçeneği doğru ayarlanmamış." - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "gpfs_images_share_mode seçeneği doğru ayarlanmamış." - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "gpfs_mount_point_base seçeneği doğru ayarlanmamış." - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException: %s" - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"Yetkilendirme için parola veya SSH özel anahtarı gerekli: ya san_password ya " -"da san_private_key seçeneğini ayarlayın." - -msgid "Path to REST server's certificate must be specified." -msgstr "REST sunucusunun sertifikasına olan yol belirtilmeli." - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "Lütfen %(pool_list)s havuzunu önceden oluşturun!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "Lütfen önceden %(pool)s havuzunda %(tier_levels)s aşamasını oluşturun!" - -msgid "Please specify a name for QoS specs." -msgstr "Lütfen QoS özellikleri için bir ad belirtin." - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "%(action)s uygulanmasına izin verilmiyor." - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "%(poolNameInStr)s havuzu bulunamadı." - -msgid "Pool is not available in the volume host field." -msgstr "Havzu mantıksal sürücü istemci alanında kullanılabilir değil." - -msgid "Pool is not available in the volume host fields." -msgstr "Havuz mantıksal sürücü istemci alanlarında kullanılabilir değil." - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "%(pool)s ismine sahip havuz %(domain)s alanında bulunamadı." - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "%(pool_name)s ismine sahip havuz %(domain_id)s alanında bulunamadı." - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"Havuz: %(poolName)s. %(fastPolicy)s. fast ilkesi için depolama aşamasıyla " -"ilişkilendirilmemiş." - -#, python-format -msgid "Pools %s does not exist" -msgstr "Havuz %s mevcut değil" - -msgid "Pools name is not set." -msgstr "Havuz ismi ayarlanmamış." - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "Birincil kopyalama durumu: %(status)s ve eşzamanlanan: %(sync)s." - -msgid "Project ID" -msgstr "Proje ID" - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "" -"%(storage_protocol)s iletişim kuralı %(storage_family)s depolama ailesi için " -"desteklenmiyor." - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"%(current)s durumundaki anlık sistem görüntüsünde verilen anlık sistem " -"görüntüsü durumuna %(provided)s izin verilmez." - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Pure Storage Cinder sürücü hatası: %(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "QoS Özellikleri %(specs_id)s zaten var." - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "%(specs_id)s QoS Özellikleri hala varlıklar ile ilişkilidir." - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "" -"%(specs_id)s QoS özelliği, %(specs_key)s anahtarı ile hiçbir özelliğe sahip " -"değil." - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "QoS özellikleri bu depolama ailesi ve ONTAP sürümünde desteklenmiyor." - -msgid "Qos specs still in use." -msgstr "Qos özellikleri hala kullanımda." - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "" -"Servis parametreleri ile sorgu önerilmiyor. Lütfen bunun yerine ikili değer " -"parametrelerini kullanın." - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "Kota %s limiti varolan kaynaklarla eşit veya daha büyük olmalıdır." - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "Kota sınıfı %(class_name)s bulunamadı." - -msgid "Quota could not be found" -msgstr "Kota bulunamadı." - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "Kota kaynaklar için aşıldı: %(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "Kota aşıldı: kod=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "%(project_id)s projesi için bir kota bulunamadı." - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "Kota koşulu %(uuid)s bulunamadı." - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "%(project_id)s projesi için kota kullanımı bulunamadı." - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "RBD diff işlemi başarısız oldu - (ret=%(ret)s stderr=%(stderr)s)" - -msgid "REST server IP must by specified." -msgstr "REST sunucu IP'si belirtilmelidir." - -msgid "REST server password must by specified." -msgstr "REST sunucu parolası belirtilmelidir." - -msgid "REST server username must by specified." -msgstr "REST sunucu kullanıcı adı belirtilmelidir." - -msgid "Raid did not have MCS Channel." -msgstr "Raid MCS Kanalına sahip değil." - -#, python-format -msgid "Received error string: %s" -msgstr "Alınan hata: %s" - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "Başvuru yönetilmeyen bir sanal mantıksal sürücü için olmalı." - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "" -"Referans yönetilmeyen bir sanal mantıksal sürücünün mantıksal sürücü ismi " -"olmalı." - -msgid "Reference must contain either source-name or source-id element." -msgstr "Başvuru ya kaynak-ismi ya da kaynak-kimliği öğelerini içermeli." - -msgid "Reference must contain source-id or source-name key." -msgstr "Başvuru source-id veya source-name anahtarını içermeli." - -msgid "Reference must contain source-id or source-name." -msgstr "Başvuru kaynak-id veya kaynak-ismi içermeli." - -msgid "Reference must contain source-name element." -msgstr "Kaynak kaynak-ad ögesi içermelidir." - -msgid "Reference must contain source-name or source-id." -msgstr "Başvuru kaynak-ismi veya kaynak-kimliği içermeli." - -msgid "Reference must contain source-name." -msgstr "Referans kaynak ismi içermeli." - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"Şu mantıksal sürücü göçü reddediliyor: %(id)s. Lütfen yapılandırma " -"ayarlarınızı kontrol edin çünkü kaynak ve hedef aynı Mantıksal Sürücü " -"Grubundalar: %(name)s." - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "Çoğaltma Servisi Yeteneği %(storageSystemName)s üzerinde bulunamadı." - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "Çoğaltma Servisi %(storageSystemName)s üzerinde bulunamadı." - -msgid "Request body and URI mismatch" -msgstr "URI ve gövde isteği uyumsuz" - -msgid "Request body contains too many items" -msgstr "İstek gövdesi çok sayıda öğe içeriyor" - -msgid "Request body contains too many items." -msgstr "İstek gövdesi çok fazla öge içerir." - -msgid "Request body empty" -msgstr "İstek gövdesi boş" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "" -"Datera kümesine yapılan istek kötü durum döndürdü: %(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"İstenen yedekleme izin verilen yedekleme kotasını aşıyor. İstenen " -"%(requested)sG, kota %(quota)sG ve tüketilen %(consumed)sG." - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"İstenen mantıksal sürücü ya da anlık sistem görüntüsü izin verilen %(name)s " -"kotayı aşıyor. İstenen %(requested)sG, kota %(quota)sG ve tüketilen " -"%(consumed)sG." - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "" -"İstenen mantıksal sürücü boyutu %(size)d izin verilen azami sınırdan " -"%(limit)d daha büyük." - -msgid "Required configuration not found" -msgstr "Gerekli yapılandırma bulunamadı" - -#, python-format -msgid "Required flag %s is not set" -msgstr "İstenen %s bayrağı ayarlı değil" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Yedekleme durumu sıfırlama durduruldu, şu anda yapılandırılan yedekleme " -"servisi [%(configured_service)s], bu yedeği [%(backup_service)s] oluşturmak " -"için kullanılan yedekleme servisi değildir." - -#, python-format -msgid "Resizing clone %s failed." -msgstr "%s klonunun yeniden boyutlandırılması başarısız." - -msgid "Resizing image file failed." -msgstr "İmaj dosyası yeniden boyutlandırma başarısız oldu." - -msgid "Resource could not be found." -msgstr "Kaynak bulunamadı." - -msgid "Resource not ready." -msgstr "Kaynak hazır değil." - -#, python-format -msgid "Response error - %s." -msgstr "Yanıt hatası - %s." - -#, python-format -msgid "Response error code - %s." -msgstr "Yanıt hata kodu - %s." - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Yedek geri yükleme durduruldu, beklenen mantıksal sürücü durumu " -"%(expected_status)s fakat mevcut durum %(actual_status)s." - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"Yedek geri yükleme durduruldu, şu anda yapılandırılan yedekleme servisi " -"[%(configured_service)s] bu yedeğin [%(backup_service)s] oluşturulması için " -"kullanılan yedekleme servisi değildir." - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Yedek geri yükleme durduruldu: beklenen yedekleme durumu %(expected_status)s " -"ancak alınan %(actual_status)s." - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "Komut için yeniden deneme sayısı aşıldı: %s" - -msgid "Retryable SolidFire Exception encountered" -msgstr "Yinelenebilir SolidFire İstisnası oluştu" - -msgid "Retype requires migration but is not allowed." -msgstr "Retype göçe ihtiyaç duyuyor ama izin verilmiyor." - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "%(volumeName)s silinerek geri alınıyor." - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "" -"SMBFS yapılandırması 'smbfs_oversub_ratio' geçersiz. Değer > 0 olmalıdır: %s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "" -"SMBFS yapılandırması 'smbfs_used_ratio' geçersiz. Değer > 0 ve <= 1.0 " -"olmalıdır: %s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "%(config)s konumunda SMBFS yapılandırma dosyası yok." - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "SMBFS yapılandırma dosyası ayarlı değil (smbfs_shares_config)." - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "" -"SSH komutu '%(total_attempts)r' girişimden sonra başarısız oldu: " -"'%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "SSH komut ekleme algılandı: %(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "%(fabric)s için SSH bağlantısı başarısız, hata: %(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "SSL Sertifikasının süresi %s de doldu." - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL hatası: %(arg)s." - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "%(filter_name)s zamanlayıcı sunucu filtresi bulunamadı." - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "Zamanlayıcı İstemci Tartıcı %(weigher_name)s bulunamadı." - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"İkincil kopyalama durumu: %(status)s ve eşzamanlanan: %(sync)s, eşzamanlama " -"ilerlemesi: %(progress)s%%." - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "host %(host)s üzerindeki servis %(service)s silindi. " - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "%(service_id)s servisi bulunamadı." - -msgid "Service is unavailable at this time." -msgstr "Şu anda servis kullanılamıyor." - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "" -"LUN QoS ilke grubu ayarlama bu depolama ailesi ve ONTAP sürümünde " -"desteklenmiyor." - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "" -"Dosya qos ilke grubu ayarlama bu depolama ailesi ve ontap sürümünde " -"desteklenmiyor." - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "" -"%(dir)s dizinindeki paylaşım Cinder mantıksal sürücü servisi tarafından " -"yazılabilir değildir. Anlık sistem görüntüsü işlemleri desteklenmiyor." - -msgid "Size" -msgstr "Boyut" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "Mantıksal sürücü boyutu: %s bulunamadı, güvenli silinemiyor." - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"Boyut %(image_size)dGB ve %(volume_size)dGB boyutundaki mantıksal sürücü ile " -"uymuyor." - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "" -"Belirtilen imajın boyutu %(image_size)sGB %(volume_size)sGB mantıksal sürücü " -"boyutundan büyük." - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı." - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"%(snapshot_id)s anlık sistem görüntüsü %(metadata_key)s anahtarı ile hiçbir " -"metadata'ya sahip değil." - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"Anlık sistem görüntüsü oluşturulamıyor çünkü %(vol_id)s mantıksal sürücüsü " -"kullanılabilir değil, mevcut mantıksal sürücü durumu: %(vol_status)s." - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "Mantıksal sürücü taşınırken anlık sistem görüntüsü oluşturulamaz." - -msgid "Snapshot of secondary replica is not allowed." -msgstr "İkincil kopyanın anlık sistem görüntüsüne izin verilmez." - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "Mantıksal sürücünün anlık görüntüsü %s durumunda desteklenmiyor." - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "Hiçbir yere yerleştirilmemiş anlık görüntü res \"%s\"?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "" -"Anlık sistem görüntüsü durumuna %(cur)s update_snapshot_status için izin " -"verilmez" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "" -"Kopyalamak için anlık sistem görüntüsü durumu \"kullanılabilir\" olmalıdır." - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Anlık sistem görüntüsü='%(snap)s' temel imaj='%(base)s' içinde mevcut değil " -"- artırımlı yedekleme durduruluyor" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "" -"Anlık sistem görüntüleri bu mantıksal sürücü biçimi için desteklenmiyor: %s" - -msgid "SolidFire Cinder Driver exception" -msgstr "SolidFire Cinder Sürücü istisnası" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "Sıralama yönü dizi boyutu anahtar dizi boyutu sınırını aştı." - -msgid "Source CG is empty. No consistency group will be created." -msgstr "Kaynak CG boştur. Tutarlılık grubu oluşturulmayacak." - -msgid "Source host details not found." -msgstr "Kaynak istemci detayları bulunamadı." - -msgid "Source volume device ID is required." -msgstr "Kaynak mantıksal sürücü aygıt kimliği gerekli." - -msgid "Source volume not mid-migration." -msgstr "Kaynak mantıksal sürücü taşıma ortasında değildir." - -msgid "SpaceInfo returned byarray is invalid" -msgstr "Dizi tarafından döndürülen SpaceInfo geçersiz" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"%(vol)s mantıksal sürücüsüne eşleştirilmesi için belirtilen istemci " -"%(group)s ile desteklenmeyen istemci grubunda." - -msgid "Specified logical volume does not exist." -msgstr "Belirtilen mantıksal sürücü mevcut değil." - -msgid "Specify a password or private_key" -msgstr "Bir parola ya da private_key belirtin" - -msgid "Specify san_password or san_private_key" -msgstr "san_password veya san_private_key belirtin" - -msgid "State" -msgstr "Durum" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "Düğüm durumu yanlış. Mevcut durum %s." - -msgid "Status" -msgstr "Statü" - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "" -"Depolama Yapılandırma Servisi %(storageSystemName)s üzerinde bulunamadı." - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "" -"Depolama HardwareId yönetim Servisi %(storageSystemName)s üzerinde " -"bulunamadı." - -#, python-format -msgid "Storage Profile %s not found." -msgstr "Depolama Profili %s bulunamadı." - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "" -"Depolama Yeniden Konumlandırma Servisi %(storageSystemName)s üzerinde " -"bulunamadı." - -#, python-format -msgid "Storage family %s is not supported." -msgstr "Depolama ailesi %s desteklenmiyor." - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "Depolama grubu %(storageGroupName)s başarılı bir şekilde silindi" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "Depolama sunucusu %(svr)s tespit edilemedi, adı doğrulayın" - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "Depolama profili: %(storage_profile)s bulunamadı." - -msgid "Storage resource could not be found." -msgstr "Depolama kaynağı bulunamadı." - -msgid "Storage system id not set." -msgstr "Depolama sistemi kimliği ayarlanmamış." - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "%(poolNameInStr)s havuzu için depolama sistemi bulunamadı." - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "Depolama Sistemi %(array)s bulunamadı." - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "Kötü durumda sistem %(id)s bulundu - %(status)s." - -msgid "System does not support compression." -msgstr "Sistem sıkıştırmayı desteklemiyor." - -msgid "System is busy, retry operation." -msgstr "Sistem meşgul, işlemi yeniden deneyin." - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "" -"CloudByte depolamasında [%(account)s] hesabı için TSM [%(tsm)s] bulunamadı." - -msgid "Target volume type is still in use." -msgstr "Hedef mantıksal sürücü türü hala kullanımda." - -msgid "Terminate connection failed" -msgstr "Bağlantı sonlandırma başarısız oldu" - -msgid "Terminate connection unable to connect to backend." -msgstr "Bağlantıyı sonlandır arka uca bağlanılamıyor." - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "Mantıksal sürücü bağlantı sonlandırma başarısız oldu: %(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "Çoğaltılacak %(type)s %(id)s kaynağı bulunamadı." - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"'sort_key' ve 'sort_dir' parametreleri önerilmiyor ve 'sort' parametresi ile " -"kullanılamaz." - -msgid "The EQL array has closed the connection." -msgstr "EQL dizisi bağlantıyı kapattı." - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"GPFS dosya sistemi %(fs)s gerekli sürüm seviyesinde değil. Mevcut seviye " -"%(cur)s, en az %(min)s olmalı." - -msgid "The IP Address was not found." -msgstr "IP Adresi bulunamadı." - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"WebDAV isteği başarısız oldu. Neden: %(msg)s, Dönüş kodu/nedeni: %(code)s, " -"Kaynak Mantıksal Sürücü: %(src)s, Hedef Mantıksal Sürücü: %(dst)s, Yöntem: " -"%(method)s." - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"Yukarıdaki hata veritabanının oluşturulamadığını gösterebilir.\n" -"Lütfen bu komutu çalıştırmadan önce 'cinder-manage db sync' kullanarak bir " -"veritabanı oluşturun." - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"%(cmd)s komutu başarısız. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "Kopyalamanın birincil ya da ikincil olması gerekir" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "Mantıksal bir aygıtın oluşturulması tamamlanamadı. (LDEV: %(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "" -"Dekore edilen metod ya bir mantıksal sürüc üya da anlık görüntü nesnesi " -"kabul etmeli" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "%(path)s yolundaki aygıt kullanılabilir değil: %(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "" -"Bitiş zamanı (%(end)s) başlangıç zamanından (%(start)s) sonra olmalıdır." - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "extraspec: %(extraspec)s geçersizdir." - -#, python-format -msgid "The following elements are required: %s" -msgstr "Aşağıdaki ögeler gereklidir: %s" - -msgid "The host group or iSCSI target could not be added." -msgstr "İstemci grubu veya iSCSI hedefi eklenemedi." - -msgid "The host group or iSCSI target was not found." -msgstr "İstemci grubu ya da iSCSI hedefi bulunamadı." - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "iSCSI CHAP kullanıcısı %(user)s mevcut değil." - -msgid "The key cannot be None." -msgstr "Anahtar boş olamaz." - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "Belirtilen %(type)s %(id)s için mantıksal aygıt zaten silinmiş." - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "" -"%(method)s metodu zaman aşımına uğradı. (zaman aşımı değeri: %(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr " update_migrated_volume metodu uygulanamadı." - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "Depolama arka ucunun parametresi. (config_group: %(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "Artımlı yedekleme için ana yedekleme kullanılabilir olmalıdır." - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"İstenen boyut : %(requestedSize)s sonuçta oluşan boyut: %(resultSize)s ile " -"aynı değildir." - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "Kaynak %(resource)s bulunamadı." - -msgid "The results are invalid." -msgstr "Sonuçlar geçersizdir." - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "Disk bölümü bakım modunda iken anlık görüntü oluşturulamaz." - -msgid "The source volume for this WebDAV operation not found." -msgstr "Bu WebDAV işlemi için kaynak mantıksal sürücü bulunamadı." - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "" -"Kaynak mantıksal sürücü türü '%(src)s' hedef mantıksal sürücü türünden " -"'%(dest)s' farklıdır." - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "Kaynak mantıksal sürücü türü '%s' kullanılabilir değil." - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "Belirtilen %(desc)s meşgul." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "Belirtilen ldev %(ldev)s yönetilemedi. ldev eşleşmiyor olmalı." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "Belirtilen ldev %(ldev)s yönetilemedi. ldev çiftlenmemiş olmalı." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "" -"Belirtilen ldev %(ldev)s yönetilemedi. ldev boyutu gigabayt'ın katı olmalı." - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "" -"Belirtilen ldev %(ldev)s yönetilemedi. Mantıksal sürücü türü DP-VOL olmalı." - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"Belirtilen işlem desteklenmiyor. Mantıksal sürücü boyutu kaynak %(type)s ile " -"aynı olmalı. (mantıksal sürücü: %(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "Belirtilen vdisk bir istemciye eşleştirilmiş." - -msgid "The specified volume is mapped to a host." -msgstr "Belirtilen mantıksal sürücü bir istemciye eşleştirilmiş." - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "Depolama arka ucu kullanılabilir. (config_group: %(config_group)s)" - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"%(memberCount)s biçimlenmiş meta sayısı %(volumeSize)s boyutundaki " -"%(volumeName)s mantıksal sürücüsü için çok küçük." - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "volume/snapshot %(id)s için geçersiz metadata tipi %(metadata_type)s " - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "" -"%(volume_id)s mantıksal sürücüsü genişletilemedi. Mantıksal sürücü türü " -"Normal olmalı." - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsünün yönetimi bırakılamadı. Mantıksal sürücü " -"türü %(volume_type)s olmalı." - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "%(volume_id)s mantıksal sürücüsü başarıyla yönetildi. (LDEV: %(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "" -"%(volume_id)s mantıksal sürücüsünün yönetimi başarıyla bırakıldı. (LDEV: " -"%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "Eşleştirilecek %(volume_id)s mantıksal sürücüsü bulunamadı." - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "Disk bölümü bakım modunda iken transferi başlatamaz." - -msgid "The volume cannot be attached in maintenance mode." -msgstr "Disk bölümü bakım modunda iken bağlı olamaz." - -msgid "The volume cannot be detached in maintenance mode." -msgstr "Disk bölümü bakım modunda iken ayrılamaz." - -msgid "The volume cannot be updated during maintenance." -msgstr "Disk bölümü bakım sırasında güncellenemez." - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "Disk bölümü bağlantısı bakım modunda iken başlatılamaz." - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "" -"Mantıksal sürücü sürücüsü iSCSI ilklendirici ismini bağlayıcıda istiyor." - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "" -"Mantıksal sürücü şu an 3PAR üzerinde meşgul ve silinemez. Daha sonra tekrar " -"deneyebilirsiniz." - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "Kullanım için uygun kaynak yok. (kaynak: %(resource)s)" - -msgid "There are no valid datastores." -msgstr "Geçerli veri depoları yok." - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "" -"%(param)s ın ataması yok. Belirtilen depolama mantıksal sürücüyü yönetmek " -"için gerekli." - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "" -"Ldev'in ataması yok. Belirtilen ldev mantıksal sürücüyü yönetmek için " -"gerekli." - -msgid "There is no metadata in DB object." -msgstr "Veritabanı nesnelerinde hiçbir metadata yok." - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "%(volume_size)sG sahibi paylaşım yok" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "%(volume_size)sG sahibi paylaşım yok." - -#, python-format -msgid "There is no such action: %s" -msgstr "Böyle bir işlem yok: %s" - -msgid "There is no virtual disk device." -msgstr "Sanal disk aygıtı yok." - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "Yapılandırılmış Gluster yapılandırma dosyası yok (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "Yapılandırılmış NFS yapılandırma dosyası yok (%s)" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "LVM'in bu sürümünde ince hazırlık desteklenmiyor." - -msgid "This driver does not support deleting in-use snapshots." -msgstr "" -"Bu sürücü kullanımdaki anlık sistem görüntülerinin silinmesini desteklemez." - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "" -"Bu sürücü kullanımdaki mantıksal sürücülerin anlık sistem görüntüsünü almayı " -"desteklemez." - -msgid "This request was rate-limited." -msgstr "Bu istek sayı limitlidir." - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "" -"Sistem platformu (%s) desteklenmiyor. Bu sürücü yalnızca Win32 platformunu " -"destekler." - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "Aşama İlke Servisi %(storageSystemName)s için bulunamadı." - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "" -"%s anlık sistem görüntüsü oluşumunda Nova güncellemesi beklenirken zaman " -"aşımı oluştu." - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "" -"%(id)s anlık sistem görüntüsünü silmek için Nova güncellemesi beklenirken " -"zaman aşımı oluştu." - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "API %(service)s istenirken zaman aşımı." - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "%(transfer_id)s aktarımı bulunamadı." - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"Aktarım %(transfer_id)s: %(volume_id)s mantıksal sürücü kimliği beklenmeyen " -"durumda %(status)s, beklenen bekleyen-aktarım" - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"Mantıksal sürücü ayarlama görevi bitmeden durduruldu: volume_name=" -"%(volume_name)s, task-status=%(status)s." - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "" -"%(type_id)s türü zaten diğer qos özellikleri ile ilişkilidir:%(qos_specs_id)s" - -msgid "Type access modification is not applicable to public volume type." -msgstr "" -"Tür erişim değişiklikleri ortak mantıksal sürücü türü için uygulanamaz." - -#, python-format -msgid "TypeError: %s" -msgstr "TürHatası: %s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUIDs %s, hem ekleme hem de kaldırma mantıksal sürücü listesindedir." - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "Art alanda çalışan depolamaya %(path)s yolu ile erişilemedi." - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "Cinder istemcisi %(space)s alanı için apphosts'a eklenemedi" - -msgid "Unable to connect or find connection to host" -msgstr "Bağlanılamadı ya da istemci için bağlantı bulunamadı" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "%s tutarlılık grubu oluşturulamadı" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "" -"FAST ilkesi için öntanımlı depolama grubu alınamadı ya da oluşturulamadı: " -"%(fastPolicyName)s." - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "%s mantıksal sürücüsü için kopya çoğaltma oluşturulamıyor." - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "%s Tutarlılık Grubu anlık görüntüsü silinemedi" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "%(id)s anlık sistem görüntüsü silinemedi, durum: %(status)s." - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "%s mantıksal sürücüsü üzerindeki anlık görüntü ilkesi silinemedi." - -msgid "Unable to determine system id." -msgstr "Sistem kimliği belirlenemiyor." - -msgid "Unable to determine system name." -msgstr "Sistem ismi belirlenemiyor." - -#, python-format -msgid "Unable to extend volume %s" -msgstr "%s mantıksal sürücüsü genişletilemedi" - -msgid "Unable to fetch connection information from backend." -msgstr "Art alanda çalışan uygulamadan bağlantı bilgisi getirilemedi." - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "Art alanda çalışan uygulamadan bağlantı bilgisi getirilemedi: %(err)s" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "Mantıksal Sürücü Grubu bulunamadı: %(vg_name)s" - -msgid "Unable to find iSCSI mappings." -msgstr "iSCSI eşleştirmeleri bulunamadı." - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "ssh_hosts_key_file bulunamadı: %s" - -msgid "Unable to find system log file!" -msgstr "Sistem günlük dosyası bulunamadı!" - -#, python-format -msgid "Unable to find volume %s" -msgstr "%s mantıksal sürücüsü bulunamadı" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "'%s' dosyası için bir blok aygıtı alınamadı" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "" -"%(space)s alanında bilgi alınamadı, lütfen kümenin bağlandığını ve " -"çalıştığını doğrulayın." - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "" -"İstemci üzerindeki IP adreslerinin listesi alınamadı, ağ oluşturma ve " -"izinleri kontrol edin." - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "" -"Alan üyelerinin listesi alınamadı, kümenin çalışıp çalışmadığını kontrol " -"edin." - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "" -"Yeni ad oluşturulacak alanların listesi alınamadı. Lütfen kümenin " -"çalıştığını doğrulayın." - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "Backend_name için bilgiler alınamadı: %s" - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "%(hardwareIdInstance)s hardwareid için hedef uç noktalar alınamıyor." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. %(sync)s çoğaltma " -"oturumunun kaynak mantıksal sürücüsü." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. Harici mantıksal " -"sürücü mevcut cinder istemcisi tarafından yönetilen havuzda değil." - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "" -"%(deviceId)s mantıksal sürücüsü cinder'e aktarılamıyor. Mantıksal sürücü " -"%(mv)s maskeleme görünümünde." - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "CA %(cert)s den yüklenemedi %(e)s." - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "Sertifika yüklenemedi %(cert)s %(e)s." - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "Anahtar %(cert)s den yüklenemiyor %(e)s." - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "Solidfire aygıtında %(account_name)s hesabı bulunamadı" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "'%s' IP adresini yöneten bir SVM bulunamıyor" - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "" -"Mevcut disk bölümü yönetilemiyor. Disk bölümü %(volume_ref)s zaten " -"yönetiliyor." - -msgid "Unable to map volume" -msgstr "Mantıksal sürücü eşleştirilemedi" - -msgid "Unable to map volume." -msgstr "Mantıksal sürücü eşleştirilemedi." - -msgid "Unable to parse attributes." -msgstr "Öznitelikler ayrıştırılamadı." - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "" -"%s mantıksal sürücüsü için kopya birincil olarak terfi ettirilemiyor. " -"Kullanılabilir ikincil kopya yok." - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "" -"use_chap_auth=Doğru ile Cinder tarafından yönetilmeyen bir istemci yeniden " -"kullanılamadı," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "" -"Bilinmeyen yapılandırılmış CHAP kimlik bilgileri ile istemci yeniden " -"kullanılamadı." - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"Retype yapılamıyor: %s mantıksal sürücüsünün bir kopyası mevcut. Retype " -"yapma 2 kopya limitinin aşılmasına sebep olur." - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"Retype yapılamıyor: Mevcut eylem mantıksal sürücü kopyalamaya ihtiyaç duyar, " -"yeni tür çoğaltma olduğunda buna izin verilmez. Mantıksal sürücü = %s" - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "Tutarlılık Grubu %s kavranamıyor" - -msgid "Unable to terminate volume connection from backend." -msgstr "" -"Art alanda çalışan uygulamadan mantıksal sürücü bağlantısı sonlandırılamadı." - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "Mantıksal sürücü bağlantısı sonlandırılamadı: %(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "%s tutarlılık grubu güncellenemedi" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"%(maskingViewName)s maskeleme görünümündeki %(igGroupName)s başlatıcı grubu " -"doğrulanamıyor. " - -msgid "Unacceptable parameters." -msgstr "Kabul edilemez parametreler var." - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "" -"%(id)s eşleştirmesi için beklenmeyen eşleştirme durumu %(status)s. " -"Öznitelikler: %(attr)s." - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "" -"Beklenmedik CLI yanıtı: başlık/satır eşleşmiyor. başlık: %(header)s, satır: " -"%(row)s." - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "" -"%(id)s eşleştirmesi için beklenmeyen eşleştirme durumu %(status)s. " -"Öznitelikler: %(attr)s." - -msgid "Unexpected response from Nimble API" -msgstr "Nimble API'sinden beklenmeyen yanıt" - -msgid "Unexpected status code" -msgstr "Beklenmeyen durum kodu" - -msgid "Unknown Gluster exception" -msgstr "Bilinmeyen Gluster istisnası" - -msgid "Unknown NFS exception" -msgstr "Bilinmeyen NFS istisnası" - -msgid "Unknown RemoteFS exception" -msgstr "Bilinmeyen RemoteFS istisnası" - -msgid "Unknown SMBFS exception." -msgstr "Bilinmeyen SMBFS istisnası." - -msgid "Unknown Virtuozzo Storage exception" -msgstr "Bilinmeyen Virtuozzo Depolama istisnası" - -msgid "Unknown action" -msgstr "Bilinmeyen eylem" - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "Bilinmeyen ya da desteklenmeyen komut %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "Bilinmeyen iletişim kuralı: %(protocol)s." - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "%(unknown)s bilinmeyen kota kaynakları." - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc' olmalıdır." - -msgid "Unmanage volume not implemented." -msgstr "Mantıksal sürücünün yönetimini bırakma uygulanmadı." - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "Tanınmayan QOS anahtarı: \"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "Tanınmayan destekleme biçimi: %s " - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "Tanınmayan silinmiş okuma değeri '%s'" - -msgid "Unsupported Content-Type" -msgstr "Desteklenmeyen içerik türü" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "" -"Desteklenmeyen Veri ONTAP sürümü. Veri ONTAP sürümü 7.3.1 ve yukarısı " -"destekleniyor." - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "Desteklenmeyen yedekleme metadata sürümü (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "Desteklenmeyen bir yedekleme metadata sürümü isteniyor" - -msgid "Unsupported backup verify driver" -msgstr "Desteklenmeyen yedekleme doğrulama sürücüsü" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"%s anahtarında desteklenmeyen üretici yazılımı. Anahtarın v6.4 ya da daha " -"yüksek üretici yazılımı kullandığından emin olun" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "Desteklenmeyen mantıksal sürücü biçimi: %s " - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "" -"Kota güncelleme ve kota silme gibi işlemler ancak bir üst yönetici veya " -"CLOUD yöneticisi tarafindan yapilabilir." - -msgid "Updated At" -msgstr "Güncelleme saati" - -msgid "Upload to glance of attached volume is not supported." -msgstr "Eklenti mantıksal sürücüsü glance'ine yükleme desteklenmiyor." - -msgid "User ID" -msgstr "Kullanıcı ID" - -msgid "User does not have admin privileges" -msgstr "Kullanıcı yönetici ayrıcalıklarına sahip değil" - -msgid "User not authorized to perform WebDAV operations." -msgstr "Kullanıcı WebDAV işlemleri yapmaya yetkili değil." - -msgid "V2 rollback, volume is not in any storage group." -msgstr "V2 rollback, mantıksal sürücü herhangi bir depolama grubunda değil." - -msgid "V3 rollback" -msgstr "V3 geridönüş" - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV Kümesi %s mevcut değil." - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "QoS özelliklerinin geçerli tüketicisi: %s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "Geçerli kontrol konumu: %s" - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "" -"\"%(option)s\" yapılandırma seçeneği için \"%(value)s\" değeri geçersizdir" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "%(param_string)s için %(param)s mantıksal bir değer değildir." - -msgid "Value required for 'scality_sofs_config'" -msgstr "'scality_sofs_config' için gerekli değer" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError: %s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "%(name)s vdiski %(src)s -> %(tgt)s eşleştirmesiyle ilgili değil." - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"Sürüm %(req_ver)s API tarafından desteklenmiyor. Asgari %(min_ver)s ve azami " -"%(max_ver)s." - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "Sanal mantıksal sürücü '%s' dizide mevcut değil." - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "%s hedefi için mantıksal sürücü kopyalama işi başarısız." - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "%(deviceID)s mantıksal sürücüsü bulunamadı." - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "" -"Dizide %(name)s mantıksal sürücüsü bulunamadı. Eşleştirilmiş mantıksal " -"sürücünün olup olmadığı belirlenemiyor." - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "Mantıksal sürücü %(vol)s %(pool)s havuzunda oluşturulamadı." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -" %(vol_id)s mantıksal sürücü durumu salt okunur bayrağını güncelleyebilmek " -"için kullanılabilir olmalıdır ancak mevcut durum: %(vol_status)s." - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "" -"%(vol_id)s mantıksal sürücü durumu kullanılabilir olmalıdır ancak mevcut " -"durum: %(vol_status)s." - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "%(volume_id)s bölümü bulunamadı." - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(metadata_key)s anahtarı ile hiçbir " -"yönetici metadata'sına sahip değil." - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "" -"%(volume_id)s mantıksal sürücüsü %(metadata_key)s anahtarı ile hiçbir " -"metadata'ya sahip değil." - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "" -"Mantıksal sürücü %(volume_id)s %(group)s desteklenmeyen istemci grubuyla " -"eşleştirilmiş" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "" -"%(volume_id)s mantıksal sürücüsü şu an %(host)s istemcisiyle eşleştirilmemiş" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "" -"Mantıksal sürücü %(volume_id)s hala ekli, ilk olarak mantıksal sürücüyü ayır." - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "Mantıksal sürücü %(volume_id)s kopyalama hatası: %(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "Mantıksal sürücü %(volume_name)s meşgul." - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "Mantıksal sürücü %s kaynak mantıksal sürücüden oluşturulamadı." - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "Mantıksal sürücü %s paylaşımlarda oluşturulamadı." - -#, python-format -msgid "Volume %s could not be created." -msgstr "Mantıksal sürücü %s oluşturulamadı." - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "" -"Mantıksal sürücü %s belirtilmiş provider_location değerine sahip değil, bu " -"adım geçiliyor." - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "Mantıksal sürücü %s dizide mevcut değil." - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "Mantıksal sürücü %s bir tutarlılık grubununun parçası olmamalıdır." - -#, python-format -msgid "Volume %s not found." -msgstr "%s mantıksal sürücüsü bulunamadı." - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "Mantıksal Sürücü %s: Mantıksal sürücü genişletme denenirken hata" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "Mantıksal sürücü (%s) zaten dizi üzerinde mevcut" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "Mantıksal sürücü (%s) dizide zaten mevcut." - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "Mantıksal Sürücü %s yok" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "Mantıksal Sürücü Türü %(id)s zaten var." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "" -"Türde mevcut mantıksal sürücü varsa %(volume_type_id)s mantıksal sürücü " -"silmeye izin verilmez." - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"%(volume_type_id)s mantıksal sürücü türü %(extra_specs_key)s anahtarı ile " -"hiçbir ek özelliğe sahip değil." - -msgid "Volume Type id must not be None." -msgstr "Mantıksal Sürücü Türü bilgisi Hiçbiri olamaz." - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "CloudByte depolamada [%s] mantıksal sürücüsü bulunamadı." - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "Mantıksal sürücü eki şu süzgeç ile bulunamadı: %(filter)s ." - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "" -"Mantıksal sürücü art alanda çalışan uygulama yapılandırması geçersizdir: " -"%(reason)s" - -msgid "Volume by this name already exists" -msgstr "Bu isimde mantıksal sürücü zaten mevcut" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "Mantıksal sürücü anlık görüntüler içerdiğinden geri yüklenemiyor." - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "Mantıksal sürücü dosya yolu %s yok." - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "Mantıksal sürücü aygıtı %(device)s'da bulunamadı." - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "Mantıksal sürücü sürücüsü %s ilklendirilmemiş." - -msgid "Volume driver not ready." -msgstr "Mantıksal sürücü hazır değil." - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "Mantıksal sürücü bir hata bildirdi: %(message)s" - -msgid "Volume is in-use." -msgstr "Mantıksal sürücü kullanımda." - -msgid "Volume is not available." -msgstr "Disj bölümü uygun değil." - -msgid "Volume is not local to this node" -msgstr "Mantıksal sürücü bu düğüme yerel değil" - -msgid "Volume is not local to this node." -msgstr "Mantıksal sürücü bu düğüme yerel değil." - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "" -"Mantıksal sürücü metadata yedekleme istedi ancak bu sürücü henüz bu özelliği " -"desteklemiyor." - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "Mantıksal sürücü göçü başarısız oldu: %(reason)s" - -msgid "Volume must be available" -msgstr "Mantıksal sürücü kullanılabilir olmalıdır" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "Mantıksal sürücü anlık görüntüyle aynı kullanılabilir bölgede olmalı" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "" -"Mantıksal sürücü kaynak mantıksal sürücüyle aynı kullanılabilir bölgede " -"olmalı" - -msgid "Volume must not be replicated." -msgstr "Mantıksal sürücü çoğaltılmış olmamalı." - -msgid "Volume must not have snapshots." -msgstr "Mantıksal sürücü anlık sistem görüntülerine sahip olmamalıdır." - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "%(instance_id)s sunucusu için mantıksal sürücü bulunamadı." - -msgid "Volume not found on configured storage backend." -msgstr "Mantıksal sürücü yapılandırılmış depolama arka ucunda bulunamadı." - -msgid "Volume not found on configured storage pools." -msgstr "Mantıksal sürücü yapılandırılan depolama havuzlarında bulunamadı." - -msgid "Volume not found." -msgstr "Mantıksal sürücü bulunamadı." - -msgid "Volume not yet assigned to host." -msgstr "Mantıksal sürücü henüz bir istemciye atanmadı." - -msgid "Volume reference must contain source-name element." -msgstr "Mantıksal sürücü kaynağı kaynak-ad ögesi içermelidir." - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "%(volume_id)s için mantıksal sürücü kopyalaması bulunamadı." - -#, python-format -msgid "Volume service %s failed to start." -msgstr "Disk bölümü servisi %s başlatma hatası." - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"Mantıksal sürücü boyutu %(volume_size)sGB %(min_disk)sGB imaj asgari " -"boyutundan küçük olamaz." - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "Mantıksal sürücü boyutu '%(size)s' bir tam sayı ve 0'dan büyük olmalı" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"Mantıksal sürücü boyutu '%(size)s'GB %(source_size)sGB asıl mantıksal sürücü " -"boyutundan küçük olamaz. Asıl mantıksal sürücü boyutundan >= olmalıdırlar." - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"Mantıksal sürücü boyutu '%(size)s'GB %(snap_size)sGB anlık görüntü " -"boyutundan küçük olamaz. Asıl anlık görüntü boyutundan >= olmalıdırlar." - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "" -"Mantıksal sürücü boyutu son yedeklemeden bu yana arttı. Tam bir yedekleme " -"yapın." - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "" -"Mantıksal sürücü durumu \"available\" ya da \"in-use\" olmalıdır. (is %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "Mantıksal sürücü durumu \"available\" ya da \"in-use\" olmalıdır." - -msgid "Volume status must be 'available'." -msgstr "Mantıksal sürücü durumu 'available' olmalıdır." - -msgid "Volume to Initiator Group mapping already exists" -msgstr "Başlatıcı Gruba mantıksal sürücü eşleme zaten var" - -msgid "Volume to be restored to must be available" -msgstr "Geri yüklenecek mantıksal sürücü kullanılabilir olmalıdır" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "%(volume_type_id)s mantıksal sürücü türü bulunamadı." - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "Mantıksal sürücü tür kimliği '%s' geçersiz." - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "" -"%(volume_type_id)s / %(project_id)s birleşimi için mantıksal sürücü türü " -"erişimi zaten var." - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "" -"%(volume_type_id)s / %(project_id)s birleşimi için mantıksal sürücü erişimi " -"bulunamadı." - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "%(type_id)s türü için mantıksal sürücü şifreleme zaten var." - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "%(type_id)s türü için mantıksal sürücü türü yok." - -msgid "Volume type name can not be empty." -msgstr "Mantıksal sürücü türü boş olamaz." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "%(volume_type_name)s adında mantıksal sürücü türü bulunamadı." - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"Mantıksal sürücü: %(volumeName)s bitiştirilmiş bir mantıksal sürücü değil. " -"Büyütmeyi ancak bitiştirilmiş mantıksal sürücü üzerinde yapabilirsiniz. " -"Çıkılıyor..." - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 'vzstorage_used_ratio' yapılandırması geçersiz. Değer > 0 ve <= " -"1.0 olmalıdır: %s." - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s konumunda VzStorage yapılandırma dosyası yok." - -msgid "Wait replica complete timeout." -msgstr "Kopyanın tamamlanmasını bekleme zaman aşımına uğradı." - -msgid "X-IO Volume Driver exception!" -msgstr "kural dışı X-IO disk dürücüsü!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "XtremIO düzgün yapılandırılmamış, iscsi portalı bulunamadı" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO düzgün ilklendirilmemiş, küme bulunamadı" - -msgid "You must implement __call__" -msgstr "__call__ fonksiyonunu uygulamalısınız." - -msgid "You must supply an array in your EMC configuration file." -msgstr "EMC yapılandırma dosyasında bir dizi sağlamalısınız." - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"Özgün boyutunuz: %(originalVolumeSize)s GB yeni boyuttan daha büyük: " -"%(newSize)s GB. Sadece Genişletme destekleniyor. Çıkılıyor..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError: %s" - -msgid "Zone" -msgstr "Bölge" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "Bölgeleme İlkesi: %s, tanınmıyor" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "" -"_create_and_copy_vdisk_data: %s vdiski için özniteliklerin alınması " -"başarısız." - -msgid "_create_host failed to return the host name." -msgstr "_create_host istemci adını döndürmede başarısız." - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "" -"_create_host: Makine adı dönüştürülemieyor. İstemci ismi evrensel kodda veya " -"karakter dizisi değil." - -msgid "_create_host: No connector ports." -msgstr "_create_host: Bağlayıcı bağlantı noktası yok." - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - CLI çıktısında başarı iletisi bulunamadı.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic: öznitelik başlıkları ve değerleri eşleşmiyor.\n" -" Başlıklar: %(header)s\n" -" Değerler: %(row)s." - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "" -"_get_host_from_connector bağlayıcı için istemci adını getirmeyi başaramadı." - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties: İstemci-mantıksal sürücü bağlantısı için FC " -"bağlantı bilgisi alınamadı. İstemci FC bağlantıları için düzgün " -"yapılandırılmış mı?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties: %(vol)s mantıksal sürücüsü için %(gid)s I/O " -"grubunda düğüm bulunamadı." - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats: Depolama havuzu verisi alınamadı." - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy başarısız: %s mantıksal sürücüsünün kopyası mevcut. Başka bir " -"kopya eklemek 2 kopya sınırının aşılmasına sebep olur." - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "add_vdisk_copy beklenen havuzda bir vdisk kopyası olmadan başlatıldı." - -msgid "already created" -msgstr "zaten oluşturuldu" - -msgid "attach snapshot from remote node" -msgstr "Uzak düğüme anlık görüntüyü ekle" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "%s özniteliği tembel-yüklenebilir değil" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"yedek: %(vol_id)s %(vpath)s den %(bpath)s e aygıt linki oluşturmada " -"başarısız.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s sunucudan yedekleme başarı bildirimini alamadı.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s geçersiz değişkenler nedeniyle %(bpath)s yolundaki dsmc " -"komutunu çalıştıramadı.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"backup: %(vol_id)s %(bpath)s yolundaki dsmc komutunu çalıştıramadı.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "backup: %(vol_id)s başarısız oldu. %(path)s dosya değildir." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"backup: %(vol_id)s başarısız oldu. %(path)s beklenmeyen dosya türü. Blok ya " -"da normal dosyalar desteklenir, mevcut dosya kipi %(vol_mode)s." - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "" -"backup: %(vol_id)s başarısız oldu. %(path)s yolundaki mantıksal sürücüye " -"gerçek yol sağlanamıyor." - -msgid "being attached by different mode" -msgstr "farklı kipler ile eklenme" - -msgid "can't find the volume to extend" -msgstr "büyütülecek mantıksal sürücü bulunamadı" - -msgid "can't handle both name and index in req" -msgstr "istekte hem isim hem indis işlenemez" - -msgid "cannot understand JSON" -msgstr "JSON dosyası anlaşılamadı" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error: Yetkilendirme için parola veya SSH özel anahtarı " -"gerekiyor: san_password veya san_private_key seçeneklerinden birini " -"ayarlayın." - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error: Sistem kimliği belirlenemiyor." - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error: Sistem ismi belirlenemiyor." - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "çoğaltma derinliği %s sınırını aşıyor" - -msgid "control_location must be defined" -msgstr "control_location tanımlanmalıdır" - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume: Kaynak ve hedef boyutu farklı." - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy: Kaynak vdisk %(src)s (%(src_id)s) mevcut değil." - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy: Kaynak vdisk %(src)s mevcut değil." - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host: İstemci adı evrensel kod ya da karakter dizisi değil." - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host: Başlatıcılar veya wwpn'ler sağlanmadı." - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot: Anlık görüntü için mantıksal sürücü durumu \"kullanılabilir" -"\" veya \"kullanımda\" olmalı. Geçersiz durum %s." - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot: kaynak mantıksal sürücüyü alma başarısız." - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot: Anlık görüntü %(name)s mevcut değil." - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot: Mantıksal sürücü oluşturma için anlık görüntü " -"durumu \"kullanılabilir\" olmalı. Geçersiz durum: %s." - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "" -"create_volume_from_snapshot: Mantıksal sürücü boyutu anlık görüntü tabanlı " -"mantıksal sürücüden farklı." - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"delete: %(vol_id)s geçersiz değişkenler nedeniyle dsmc komutunu " -"çalıştıramadı stdout çıktısı ile: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"delete: %(vol_id)s dsmc komutunu çalıştıramadı stdout çıktısı ile: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "" -"bağımlı mantıksal sürücülere sahip olan %(snapshot_name)s anlık sistem " -"görüntüsü siliniyor" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "" -"anlık sistem görüntüsüne sahip olan %(volume_name)s mantıksal sürücüsü " -"siliniyor" - -msgid "detach snapshot from remote node" -msgstr "Uzak düğümden anlık görüntüyü çıkart." - -msgid "do_setup: No configured nodes." -msgstr "do_setup: Yapılandırılmış düğüm yok." - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"swift'e nesne yazarken hata, swift içindeki nesnenin MD5 değeri %(etag)s " -"swift'e gönderilen nesnenin MD5 %(md5)s değeri ile aynı değildir" - -msgid "failed to create new_volume on destination host" -msgstr "Hedef istemci üzerinde yeni mantıksal sürücü oluşturulamadı" - -msgid "fake" -msgstr "sahte" - -#, python-format -msgid "file already exists at %s" -msgstr "dosya %s konumunda zaten var" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() RBD() tarafından desteklenmez" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled False olarak ayarlanmış, çoklu istemci " -"eşleştirmeye izin verme. CMMVC6071E VDisk-to-host eşleştirmesi oluşturulmadı " -"çünkü VDisk zaten bir istemciye eşleştirilmiş." - -msgid "flush() not supported in this version of librbd" -msgstr "flush() librbd kütüphanesinin bu sürümünde desteklenmiyor" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s şununla desteklenir: %(backing_file)s" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s şununla desteklenir:%(backing_file)s" - -msgid "force delete" -msgstr "silmeye zorla" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode copy_on_write olarak ayarlanmış, ama %(vol)s ve " -"%(img)s farklı dosya sistemlerine ait." - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode copy_on_write olarak ayarlanmış, ama %(vol)s ve " -"%(img)s farklı dosya kümelerine ait." - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"hgst_group %(grp)s ve hgst_user %(usr)s cinder.conf dosyasında geçerli " -"kullanıcılar/gruplar olarak eşlenmelidir" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "" -"cinder.conf dosyasında belirtilen hgst_net %(net)s küme içinde bulunamadı" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "" -"hgst_redundancy cinder.conf dosyasında 0 (non-HA) ya da 1 (HA) olarak " -"ayarlanmalıdır." - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "hgst_space_mode cinder.conf dosyasında bir sekizli/tam sayı olmalıdır" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "hgst_storage sunucusu %(svr)s : biçiminde değil" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "hgst_storage_servers cinder.conf dosyasında tanımlanmalıdır" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "" -"http servisi bu işlemin ortasında ansızın kapatılmış ya da bakım durumuna " -"alınmış olabilir." - -msgid "id cannot be None" -msgstr "id Hiçbiri olamaz" - -#, python-format -msgid "image %s not found" -msgstr "imaj %s bulunamadı" - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "" -"initialize_connection: %s mantıksal sürücüsü için özniteliklerin alınması " -"başarısız." - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "" -"initialize_connection: %s mantıksal sürücüsü için mantıksal sürücü " -"özniteliği eksik." - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection: %(vol)s mantıksal sürücüsü için %(gid)s I/O grubundan " -"düğüm bulunamadı." - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection: vdisk %s tanımlanmamış." - -#, python-format -msgid "invalid user '%s'" -msgstr "geçersiz kullanıcı '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "iscsi portalı, %s bulunamadı" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "" -"'iSCSI' iletişim kuralı kullanılırken iscsi_ip_address yapılandırma " -"dosyasında ayarlanmalı." - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "anahtar yönetici hatası: %(reason)s" - -msgid "limit param must be an integer" -msgstr "Sınır parametresi tam sayı olmak zorunda" - -msgid "limit param must be positive" -msgstr "Sınır parametresi pozitif olmak zorunda" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "" -"manage_existing varolan bir mantıksal sürücüyü tanımlamak için bir 'name' " -"anahtarı ister." - -#, python-format -msgid "marker [%s] not found" -msgstr " [%s] göstergesi bulunamadı" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp eksik kotalar %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy 'on-demand' ya da 'never' olmalıdır, geçilen: %s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "" -"mkfs %(vol)s mantıksal sürücüsü üzerinde başarısız oldu, hata iletisi: " -"%(err)s." - -msgid "mock" -msgstr "sahte" - -msgid "mount.glusterfs is not installed" -msgstr "mount.glusterfs kurulu değil" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage tarafından %s ismine sahip birden fazla kaynak bulundu" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "%s anlık sistem görüntüsü ile birden fazla kaynak bulundu" - -msgid "name cannot be None" -msgstr "ad Hiçbiri olamaz" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "drbdmanage içinde %s anlık sistem görüntüsü bulunamadı" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "tam olarak %s kimliğine sahip tek bir anlık görüntü değil" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "tam olarak %s kimliğine sahip tek bir mantıksal sürücü değil" - -#, python-format -msgid "obj missing quotes %s" -msgstr "obj eksik kotalar %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled kapalı değil." - -msgid "progress must be an integer percentage" -msgstr "ilerleme bir tam sayı yüzdesi olmalıdır" - -msgid "provider must be defined" -msgstr "sağlayıcı tanımlanmalıdır" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"qemu-img %(minimum_version)s ya da sonraki sürümler bu mantıksal sürücü için " -"gereklidir. Mevcut qemu-img sürümü: %(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img kurulu değil ve imaj türü %s. Eğer qemu-img kurulu değilse, sadece " -"RAW imajlar kullanılabilir." - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img kurulu değil ve disk biçimi belirtilmemiş. Eğer qemu-img kurulu " -"değilse, sadece RAW imajlar kullanılabilir." - -msgid "rados and rbd python libraries not found" -msgstr "rados ve rbd python kütüphaneleri bulunamadı" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "" -"read_deleted sadece 'no', 'yes' ya da 'only', %r hariç seçeneklerinden biri " -"olabilir" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restore: %(vol_id)s geçersiz değişkenler nedeniyle %(bpath)s yolundaki dsmc " -"komutunu çalıştıramadı.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"restore: %(vol_id)s %(bpath)s yolundaki dsmc komutunu çalıştıramadı.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"restore: %(vol_id)s başarısız oldu.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup durduruldu, gerçek nesne listesi metadata'da depolanan nesne " -"listesi ile eşleşmiyor." - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "" -"rtslib_fb eksik %s üyesidir: Daha yeni bir python-rtslib-fb kütüphanesine " -"ihtiyacınız olabilir." - -msgid "san_ip is not set." -msgstr "san_ip ayarlanmamış." - -msgid "san_ip must be set" -msgstr "san_ip ayarlanmış olmalı" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"cinder.conf dosyasında Datera sürücüsü için san_login ve/ya da san_password " -"ayarlı değil. Bu bilgileri ayarla ve cinder-volume servisini tekrar başlat." - -msgid "serve() can only be called once" -msgstr "serve() sadece bir kez çağrılabilir" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "kaynak mantıksal sürücü kimliği:%s çoğaltılmamıştır" - -msgid "status must be available" -msgstr "durum kullanılabilir olmalıdır" - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "" -"targetcli kurulu değil ve öntanımlı dizin (%(default_path)s) oluşturulamadı: " -"%(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection: Bağlayıcıdan istemci adı alma başarısız." - -msgid "timeout creating new_volume on destination host" -msgstr "" -"Hedef istemci üzerinde yeni mantıksal sürücü oluşturulurken zaman aşımı" - -msgid "too many body keys" -msgstr "Çok sayıda gövde anahtarları" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s: bağlı değil" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s: hedef meşgul" - -msgid "umount: : some other error" -msgstr "umount: : bazı diğer hata" - -msgid "umount: : target is busy" -msgstr "umount: : hedef meşgul" - -#, python-format -msgid "unrecognized argument %s" -msgstr "tanınmayan değişken %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "desteklenmeyen sıkıştırma algoritması: %s" - -msgid "valid iqn needed for show_target" -msgstr "show_target için geçerli iqn gerekli" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "vdisk %s tanımlanmamış." - -msgid "vmemclient python library not found" -msgstr "vmemclient python kitaplığı bulunamadı" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "drbdmanage içinde %s mantıksal sürücüsü bulunamadı" - -msgid "volume assigned" -msgstr "mantıksal sürücü atandı" - -msgid "volume changed" -msgstr "mantıksal sürücü değiştirildi" - -msgid "volume is already attached" -msgstr "mantıksal sürücü zaten ekli" - -msgid "volume is not local to this node" -msgstr "mantıksal sürücü bu düğüme yerel değil" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "" -"%(volume_size)d mantıksal sürücü boyutu %(size)d boyutundaki yedeği geri " -"yüklemek için çok küçük." - -#, python-format -msgid "volume size %d is invalid." -msgstr "Hatalı disk bölümü boyutu %d" - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "" -"Tutarlılık grubunda bir mantıksal sürücü oluşturulurken volume_type " -"verilmelidir." - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id Hiçbiri olamaz" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "volume_types, tutarlılık grubu %(name)s oluşturmak için verilmelidir." - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "%s tutarlılık grubu oluşturmak için volume_types verilmelidir." diff --git a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po b/cinder/locale/zh_CN/LC_MESSAGES/cinder.po deleted file mode 100644 index 42cd58cf2..000000000 --- a/cinder/locale/zh_CN/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,10174 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Kai Zhang , 2013 -# Kai Zhang , 2013 -# openstack , 2013 -# Shuwen SUN , 2014 -# Tom Fifield , 2013 -# 颜海峰 , 2014 -# Yu Zhang, 2014 -# 颜海峰 , 2014 -# Andreas Jaeger , 2016. #zanata -# Eric Lei <1165970798@qq.com>, 2016. #zanata -# howard lee , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev243\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-14 23:32+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-09-28 06:21+0000\n" -"Last-Translator: Eric Lei <1165970798@qq.com>\n" -"Language: zh-CN\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Chinese (China)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"CoprHD Exception: %(msg)s\n" -msgstr "" -"\n" -"CoprHD 异常: %(msg)s\n" - -#, python-format -msgid "" -"\n" -"General Exception: %(exec_info)s\n" -msgstr "" -"\n" -"一般异常: %(exec_info)s\n" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder 版本:%(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "但现在大小为 %d" - -#, python-format -msgid " but size is now %d." -msgstr "但现在大小为 %d。" - -msgid " or " -msgstr "或者" - -#, python-format -msgid "%(attr)s is not set." -msgstr "未设置 %(attr)s。" - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing 无法管理已连接至主机的卷。在进行导入之前,请从现有" -"主机断开与此卷的连接" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"结果:%(res)s。" - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(error_msg)s Error description: %(error_description)s" -msgstr "%(error_msg)s 错误描述: %(error_description)s" - -#, python-format -msgid "%(error_msg)s Error details: %(error_details)s" -msgstr "%(error_msg)s 错误详细信息: %(error_details)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "发生异常 %(exception)s:原因 %(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s:许可权被拒绝。" - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s:失败,产生了意外 CLI 输出。\n" -"命令:%(cmd)s\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"状态码: %(_status)s\n" -"主体: %(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s,subjectAltName:%(sanList)s。" - -#, python-format -msgid "%(msg)s And %(num)s services from the cluster were also removed." -msgstr "源于集群的 %(msg)s 和 %(num)s 服务也被删除。" - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s:创建 NetworkPortal:请确保 IP %(ip)s 上的端口 %(port)d未被另一" -"项服务使用。" - -#, python-format -msgid "%(name)s cannot be all spaces." -msgstr "%(name)s不能是所有空间" - -#, python-format -msgid "%(new_size)s < current size %(size)s" -msgstr "%(new_size)s < 当前大小 %(size)s" - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s:备份 %(bck_id)s(针对卷 %(vol_id)s)失败。备份对象具有意外方式。支持" -"映像或文件备份,实际方式为 %(vol_mode)s。" - -#, python-format -msgid "%(reason)s" -msgstr "%(reason)s" - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "在以下存储设备上,%(service)s 服务并非处于 %(status)s 状态:%(host)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s 必须小于或等于 %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s 必须大于或等于 %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "%(workers)d 的 %(worker_name)s 值无效,必须大于 0。" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "结果中没有 %s “数据”。" - -#, python-format -msgid "%s assigned" -msgstr "%s 已分配" - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "%s 无法访问。请验证 GPFS 是否处于活动状态并且文件系统是否已安装。" - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "%s 无法使用克隆操作来调整大小,因为它未包含任何块。" - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "%s 无法使用克隆操作来调整大小,因为它托管于压缩卷上" - -#, python-format -msgid "%s changed" -msgstr "%s 已更改" - -#, python-format -msgid "%s configuration option is not set." -msgstr "未设置 %s 配置选项。" - -#, python-format -msgid "%s does not exist." -msgstr "%s 不存在。" - -#, python-format -msgid "%s is not a directory." -msgstr "%s 不是一个目录。" - -#, python-format -msgid "%s is not installed" -msgstr "未安装 %s" - -#, python-format -msgid "%s is not installed." -msgstr "未安装 %s。" - -#, python-format -msgid "%s is not set" -msgstr "未设置 %s " - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "未设置 %s,它是使复制设备生效所必需的。" - -#, python-format -msgid "%s is not set." -msgstr "未设置 %s。" - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s 必须为有效的 raw 映像或 qcow2 映像。" - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s 必须为绝对路径。" - -#, python-format -msgid "%s must be an integer." -msgstr "%s 必须为整数。" - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "cinder.conf 中未设置 %s" - -#, python-format -msgid "%s not set." -msgstr "未设置 %s。" - -#, python-format -msgid "'%(key)s = %(value)s'" -msgstr "'%(key)s = %(value)s'" - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"对于配置文件中的 flashsystem_connection_protocol,“%(prot)s”无效。有效值为 " -"%(enabled)s。" - -msgid "'active' must be present when writing snap_info." -msgstr "写入 snap_info 时,状态必须为“活动”。" - -msgid "'consistencygroup_id' must be specified" -msgstr "必须指定“consistencygroup_id”" - -msgid "'group_id' must be specified" -msgstr "必须指定\"group_id\"" - -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info'解析失败" - -msgid "'status' must be specified." -msgstr "必须指定“status”。" - -msgid "'volume_id' must be specified" -msgstr "必须指定“volume_id”" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(命令:%(cmd)s)(返回码:%(exit_code)s)(标准输出:%(stdout)s)(标准错" -"误:%(stderr)s)" - -msgid "400 Bad Request" -msgstr "400 错误请求" - -msgid "401 Unauthorized Error" -msgstr "401 未授权错误" - -msgid "404 Not Found Error" -msgstr "404 资源未找到错误" - -msgid "413 Request entity too large" -msgstr "413 请求实体过大" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "找不到 LUN (HLUN)。(逻辑设备:%(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "发出了并行的可能对立的请求。" - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "找不到可用的 LUN (HLUN)。请添加另一主机组。(逻辑设备:%(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "未能添加主机组。(端口为 %(port)s,名称为 %(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "未能删除主机组。(端口为 %(port)s,组标识为 %(gid)s,名称为 %(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "主机组无效。(主机组:%(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "无法删除对。(P-VOL 为 %(pvol)s,S-VOL 为 %(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"未能创建对。超过最大对数。(复制方法为 %(copy_method)s,P-VOL 为 %(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "参数无效。(%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "参数值无效。(%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "找不到池。(池标识:%(pool_id)s)" - -msgid "A readonly volume must be attached as readonly." -msgstr "只读卷必须以只读方式进行挂载。" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "快照状态无效。(状态:%(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "必须指定有效辅助以进行故障转移。" - -msgid "A volume ID or share was not specified." -msgstr "未指定卷标识或者共享。" - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "卷状态无效。(状态:%(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s 失败,带有错误字符串 %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "" -"API 版本字符串 %(version)s 为无效格式。必须为以下格式:MajorNum.MinorNum。" - -msgid "API key is missing for CloudByte driver." -msgstr "CloudByte 驱动程序缺少 API 键。" - -#, python-format -msgid "API response: %(response)s" -msgstr "API 响应:%(response)s" - -#, python-format -msgid "API response: %s" -msgstr "API 响应:%s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "API 版本 %(version)s 在此方法上不受支持。" - -msgid "API version could not be determined." -msgstr "未能确定 API 版本。" - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "将删除具有非零配额的子项目。不应执行此操作" - -msgid "Access forbidden: Authentication required" -msgstr "禁止访问:需要认证" - -msgid "" -"Access forbidden: You don't have sufficient privileges to perform this " -"operation" -msgstr "禁止访问:你没有足够的权限来执行该操作" - -msgid "Access list not available for public volume types." -msgstr "对于公用卷类型,未提供访问列表。" - -msgid "Activate or deactivate QoS error." -msgstr "激活或者取消激活 QoS 时发生错误。" - -msgid "Activate snapshot error." -msgstr "激活快照时发生错误。" - -#, python-format -msgid "Activating zone set failed: (Zone set=%(cfg_name)s error=%(err)s)." -msgstr "激活区域集失败(区域集为 %(cfg_name)s,发生的错误为 %(err)s)。" - -msgid "Add FC port to host error." -msgstr "将 FC 端口添加至主机时发生错误。" - -msgid "Add fc initiator to array error." -msgstr "将 FC 启动程序添加至阵列时发生错误。" - -msgid "Add initiator to array error." -msgstr "将启动程序添加至阵列时发生错误。" - -msgid "Add lun to cache error." -msgstr "将 LUN 添加至高速缓存时发生错误。" - -msgid "Add lun to partition error." -msgstr "将 LUN 添加至分区时发生错误。" - -msgid "Add mapping view error." -msgstr "添加映射视图时发生错误。" - -msgid "Add new host error." -msgstr "添加新主机时发生错误。" - -msgid "Add port to port group error." -msgstr "向端口组添加端口时出错。" - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "所指定的要管理的所有存储池都不存在。请检查配置。不存在的池:%s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "必须将 API 版本请求与 VersionedMethod 对象进行比较。" - -msgid "An error has occurred during backup operation" -msgstr "在备份过程中出现一个错误" - -#, python-format -msgid "An error has occurred in SheepdogDriver. (Reason: %(reason)s)" -msgstr "SheepdogDriver中发生了错误。(原因:%(reason)s)" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"在 LUNcopy 操作期间发生错误。LUNcopy 名称为 %(luncopyname)s。LUNcopy 状态为 " -"%(luncopystatus)s。LUNcopy 状态为 %(luncopystate)s。" - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "读取卷“%s”时发生错误。" - -#, python-format -msgid "An error occurred while seeking for volume \"%s\"." -msgstr "查找卷“%s”时发生错误。" - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "写入卷“%s”时发生错误。" - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "未能添加 iSCSI CHAP 用户。(用户名:%(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "未能删除 iSCSI CHAP 用户。(用户名:%(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"未能添加 iSCSI 目标。(端口为 %(port)s,别名为 %(alias)s,原因为 %(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "" -"未能删除 iSCSI 目标。(端口为 %(port)s,目标号为 %(tno)s,别名为 %(alias)s)" - -msgid "An unknown error occurred." -msgstr "发生未知错误。" - -msgid "An unknown exception occurred." -msgstr "发生未知异常。" - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "不允许令牌作用域仅限于子项目的用户查看其父代的配额。" - -msgid "Append port group description error." -msgstr "附加端口组描述时出错。" - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"对交换机应用 zones 和 cfgs 失败(错误代码为 %(err_code)s,错误消息为 " -"%(err_msg)s)。" - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "阵列不存在或者处于脱机状态。阵列的当前状态为 %s。" - -msgid "Associate host to hostgroup error." -msgstr "使主机与主机组关联时发生错误。" - -msgid "Associate host to mapping view error." -msgstr "使主机与映射视图关联时发生错误。" - -msgid "Associate initiator to host error." -msgstr "使启动程序与主机相关联时发生错误。" - -msgid "Associate lun to QoS error." -msgstr "将 LUN 关联至 QoS 时出错。" - -msgid "Associate lun to lungroup error." -msgstr "使 LUN 与 LUN 组关联时发生错误。" - -msgid "Associate lungroup to mapping view error." -msgstr "使 LUN 组与映射视图关联时发生错误。" - -msgid "Associate portgroup to mapping view error." -msgstr "使端口组与映射视图关联时发生错误。" - -msgid "At least one valid iSCSI IP address must be set." -msgstr "必须至少设置一个有效 iSCSI IP 地址。" - -#, python-format -msgid "" -"Attach volume (%(name)s) to host (%(hostname)s) initiator " -"(%(initiatorport)s) failed:\n" -"%(err)s" -msgstr "" -"将卷 (%(name)s) 连接到主机 (%(hostname)s) 的初始化器 (%(initiatorport)s) 失" -"败:\n" -"%(err)s" - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "请尝试使用有效的认证密钥传输 %s。" - -#, python-format -msgid "Attribute: %s not found." -msgstr "属性: %s 未找到" - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "在 CloudByte 存储器中找不到认证组 [%s] 详细信息。" - -msgid "Auth user details not found in CloudByte storage." -msgstr "在 CloudByte 存储器中找不到认证用户详细信息。" - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "认证失败,请验证交换机凭证,错误代码:%s。" - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "可用性区域“%(s_az)s”无效。" - -msgid "Available categories:" -msgstr "可用的类别:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "在此存储器系列和 ONTAP 版本上,后端 QoS 规范不受支持。" - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "后端不存在 (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "后端报告:%(message)s" - -msgid "Backend reports: item already exists" -msgstr "后端报告:项已存在" - -msgid "Backend reports: item not found" -msgstr "后端报告:找不到项" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "后端服务重试超时匹配项:%(timeout)s 秒" - -msgid "Backend storage did not configure fiber channel target." -msgstr "后端存储器未配置光纤通道目标。" - -msgid "Backing up an in-use volume must use the force flag." -msgstr "备份一个正在使用的卷时必须使用强制标志。" - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "找不到备份 %(backup_id)s。" - -msgid "Backup RBD operation failed" -msgstr "备份RBD操作失败" - -msgid "Backup already exists in database." -msgstr "数据库中已存在备份。" - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "备份驱动程序已报告错误:%(message)s" - -msgid "Backup id required" -msgstr "需要备份标识" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "具有快照的 GlusterFS 卷不支持备份。" - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "仅那些不带支持文件的 SOFS 卷支持备份。" - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "仅原始格式的 GlusterFS 卷支持备份。" - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "仅原始格式的 SOFS 卷支持备份。" - -msgid "Backup operation of an encrypted volume failed." -msgstr "已加密卷的备份操作失败。" - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"备份服务 %(configured_service)s 不支持验证。未验证备份标识 %(id)s。正在跳过验" -"证。" - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "备份服务 %(service)s 不支持验证。未验证备份标识 %(id)s。正在跳过重置。" - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "备份应该仅具有一个快照,但是具有 %s 个快照" - -msgid "Backup status must be available" -msgstr "备份状态必须为“可用”" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "备份状态必须为“可用”,不能是 %s。" - -msgid "Backup status must be available or error" -msgstr "备份状态必须为“可用”或“错误”" - -msgid "Backup to be restored has invalid size" -msgstr "要复原的备份具有无效大小" - -#, python-format -msgid "Bad HTTP response status %(status)s" -msgstr "错误的HTTP响应状态 %(status)s" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "返回的状态行不正确:%(arg)s。" - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "配额集中的键不正确:%s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "从存储卷后端 API 返回了不正确或意外的响应:%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "项目格式不正确:项目没有采用正确格式 (%s)" - -msgid "Bad response from Datera API" -msgstr "来自 Datera API 的响应不正确" - -msgid "Bad response from SolidFire API" -msgstr "来自SolidFire API的错误响应" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "来自 XMS 的响应不正确,%s" - -msgid "Binary" -msgstr "二进制" - -msgid "Blank components" -msgstr "空组件" - -msgid "Blockbridge api host not configured" -msgstr "未配置 Blockbridge API 主机" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "为 Blockbridge 配置了无效认证方案“%(auth_scheme)s”" - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge 缺省池不存在" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "未配置 Blockbridge 密码(对于认证方案“密码”,这是必需的)" - -msgid "Blockbridge pools not configured" -msgstr "未配置 Blockbridge 池" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "未配置 Blockbridge 令牌(对于认证方案“令牌”,这是必需的)" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "未配置 Blockbridge 用户(对于认证方案“密码”,这是必需的)" - -msgid "Bourne internal server error" -msgstr "Bourne 内部服务器错误" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Brocade 光纤通道分区 CLI 错误:%(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Brocade 光纤通道分区 HTTP 错误:%(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "CHAP 密钥应为 12 到 16 个字节。" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"CLI 异常输出:\n" -"命令:%(cmd)s\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 异常输出:\n" -"命令:%(cmd)s\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s。" - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E 未创建 VDisk 至主机的映射,因为该 VDisk 已映射至主机。\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "不支持 CONCERTO 版本" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "数组中不存在 CPG (%s)" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "高速缓存名称为 None,请在键中设置 smartcache:cachename。" - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "高速缓存卷 %s 没有必需属性。" - -msgid "Call returned a None object" -msgstr "调用返回了 None 对象" - -msgid "Can not add FC port to host." -msgstr "无法将 FC 端口添加至主机。" - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "按高速缓存名称 %(name)s 找不到高速缓存标识。" - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "按名称 %(name)s 找不到分区标识。" - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "无法获取池信息。池:%s" - -msgid "Can not get target ip address. " -msgstr "无法获取目标ip地址。" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "无法把 %s 转换成整数" - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "无法访问“scality_sofs_config”:%s" - -msgid "Can't decode backup record." -msgstr "无法将备份记录解码。" - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "无法扩展复制卷,卷:%(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "在阵列上找不到 LUN,请检查 source-name 或 source-id。" - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "在阵列上找不到高速缓存名称,高速缓存名称为 %(name)s。" - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "在阵列上找不到 LUN 信息。卷:%(id)s。LUN 名称:%(name)s。" - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "在阵列上找不到分区名称,分区名称为 %(name)s。" - -#, python-format -msgid "Can't find service: %s" -msgstr "找不到以下服务:%s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "在阵列上找不到快照,请检查 source-name 或 source-id。" - -msgid "Can't find the same host id from arrays." -msgstr "在阵列中找不到同一主机标识。" - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "无法通过快照获取卷,快照:%(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "无法获取卷标识,卷名:%s。" - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "无法将 LUN %(lun_id)s 导入至 Cinder。LUN 类型不匹配。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "无法将 LUN %s 导入至 Cinder。它在 HyperMetroPair 中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 复制任务中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 组中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 镜像中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "无法将 LUN %s 导入至 Cinder。它在 SplitMirror 中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "无法将 LUN %s 导入至 Cinder。它在迁移任务中已存在。" - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "无法将 LUN %s 导入至 Cinder。它在远程复制任务中已存在。" - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "无法将 LUN %s 导入至 Cinder。LUN 状态异常。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "无法将快照 %s 导入至 Cinder。快照不属于卷。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "无法将快照 %s 导入至 Cinder。快照已展示给启动程序。" - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "无法将 LUN %s 导入至 Cinder。快照状态异常或运行状态并非“在线”。" - -msgid "Can't parse backup record." -msgstr "无法解析备份记录。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为它没有任何卷类型。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为它已经存在于一致性组 " -"%(orig_group)s 中。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为找不到该卷。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷不存在。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷处于无效状态:" -"%(status)s。以下是有效状态:%(valid)s。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该组不支持卷类型 " -"%(volume_type)s。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because it has no " -"volume type." -msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为它没有任何卷类型。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because it is already " -"in group %(orig_group)s." -msgstr "" -"无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为它已经存在于组 " -"%(orig_group)s 中。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume cannot " -"be found." -msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为找不到该卷。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume does " -"not exist." -msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该卷不存在。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume is in " -"an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该卷处于无效状态:" -"%(status)s。以下是有效状态:%(valid)s。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to group %(group_id)s because volume type " -"%(volume_type)s is not supported by the group." -msgstr "" -"无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该组不支持卷类型 " -"%(volume_type)s。" - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"无法连接已经连接的卷 %s;通过“netapp_enable_multiattach”配置选项禁用了多个连" -"接。" - -msgid "Cannot change VF context in the session." -msgstr "无法更改会话中的 VF 上下文。" - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "无法更改 VF 上下文,指定的 VF 在管理 VF 列表 %(vf_list)s 中不可用。" - -msgid "Cannot connect to ECOM server." -msgstr "无法连接至 ECOM 服务器。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"无法创建一致性组 %(group)s,因为快照 %(snap)s 未处于有效状态。以下是有效状" -"态:%(valid)s。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"无法创建一致性组 %(group)s,因为源卷 %(source_vol)s 未处于有效状态。有效状态" -"为 %(valid)s。" - -#, python-format -msgid "Cannot create directory %s." -msgstr "无法创建目录 %s。" - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "无法创建加密规范。卷类型在使用中。" - -#, python-format -msgid "" -"Cannot create group %(group)s because snapshot %(snap)s is not in a valid " -"state. Valid states are: %(valid)s." -msgstr "" -"无法创建组 %(group)s,因为快照 %(snap)s 未处于有效状态。以下是有效状态:" -"%(valid)s。" - -#, python-format -msgid "" -"Cannot create group %(group)s because source volume %(source_vol)s is not in " -"a valid state. Valid states are: %(valid)s." -msgstr "" -"无法创建组 %(group)s,因为源卷 %(source_vol)s 未处于有效状态。有效状态为 " -"%(valid)s。" - -#, python-format -msgid "Cannot create group_type with name %(name)s and specs %(group_specs)s" -msgstr "无法创建名称为 %(name)s 且规格为 %(group_specs)s 的组类型。" - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "无法创建磁盘格式为 %s 映像。仅接受 vmdk 磁盘格式。" - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "无法创建掩码视图:%(maskingViewName)s。" - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"当“netapp_enable_multiattach”设置为 true 时,无法在 ESeries 阵列上创建多个 " -"%(req)s 卷。" - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "无法创建或找到名称为 %(sgGroupName)s 的存储器组。" - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "无法创建大小为 %s 的卷:该大小不是 8GB 的倍数。" - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "存在快照时,无法删除 LUN %s。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"无法删除高速缓存卷:%(cachevol_name)s。在 %(updated_at)s 对其进行了更新,它当" -"前具有 %(numclones)d 卷实例。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"无法删除高速缓存卷:%(cachevol_name)s。在 %(updated_at)s 对其进行了更新,它当" -"前具有 %(numclones)s 卷实例。" - -#, python-format -msgid "" -"Cannot delete consistency group %(id)s. %(reason)s, and it cannot be the " -"source for an ongoing CG or CG Snapshot creation." -msgstr "" -"无法删除一致性组%(id)s,%(reason)s,它不能作为创建运行CG或者CG快照的源。" - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "无法删除加密规范。卷类型在使用中。" - -msgid "Cannot determine storage pool settings." -msgstr "无法确定存储池设置。" - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "无法执行 /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "找不到 CG 组 %s。" - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "找不到对应存储系统 %(storage_system)s 的控制器配置服务。" - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "找不到复制服务,无法为快照 %s 创建卷。" - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "找不到复制服务,无法删除快照 %s。" - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "在系统 %s 上找不到复制服务。" - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "找不到卷:%(id)s。取消管理操作。正在退出..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "找不到卷 %(volumename)s。扩展操作。正在退出...." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "找不到卷 %(volumeName)s 的设备号。" - -msgid "Cannot find migration task." -msgstr "找不到迁移任务。" - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "在系统 %s 上找不到复制服务。" - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "找不到源 CG 实例。consistencygroup_id:%s。" - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "无法通过通道标识 %(channel_id)s 获取 mcs_id。" - -msgid "Cannot get necessary pool or storage system information." -msgstr "无法获取必需池或存储系统信息。" - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "无法获取或创建对应卷 %(volumeName)s 的存储器组:%(sgGroupName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "无法获取或创建启动程序组:%(igGroupName)s。" - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "无法获取端口组:%(pgGroupName)s。" - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"无法从掩码视图 %(maskingViewInstanceName)s 获取存储器组 %(sgGroupName)s。" - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"对于 %(sps)s,无法获取受支持的大小范围。返回码为 %(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "无法获取对应快速策略 %(fastPolicyName)s 的缺省存储器组。" - -msgid "Cannot get the portgroup from the masking view." -msgstr "无法通过掩码视图获取端口组。" - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "无法安装 Scality SOFS,请检查系统日志以获取错误" - -msgid "Cannot ping DRBDmanage backend" -msgstr "无法对 DRBDmanage 后端执行 ping 操作" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "无法将卷 %(id)s 置于 %(host)s 上" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"无法同时提供“cgsnapshot_id”和“source_cgid”以从源创建一致性组 %(name)s。" - -#, python-format -msgid "" -"Cannot provide both 'group_snapshot_id' and 'source_group_id' to create " -"group %(name)s from source." -msgstr "" -"无法同时提供“group_snapshot_id”和“source_group_id”以通过源创建组 %(name)s。" - -msgid "Cannot register resource" -msgstr "无法注册资源" - -msgid "Cannot register resources" -msgstr "无法注册多个资源" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "无法从一致性组 %(group_id)s 移除卷 %(volume_id)s因为它没有在该组中。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"无法从一致性组 %(group_id)s 移除卷 %(volume_id)s因为该卷处于无效状态:" -"%(status)s。以下是有效状态:%(valid)s。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from group %(group_id)s because it is not " -"in the group." -msgstr "无法从组 %(group_id)s 移除卷 %(volume_id)s,因为它不在该组中。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from group %(group_id)s because volume is " -"in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"无法从组 %(group_id)s 移除卷 %(volume_id)s,因为该卷处于无效状态:" -"%(status)s。以下是有效状态:%(valid)s。" - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "无法将 HPE3PARDriver 转型为 %s。" - -msgid "Cannot retype from one 3PAR array to another." -msgstr "一个 3PAR 阵列无法通过 retype 操作变为另一个阵列。" - -msgid "Cannot retype to a CPG in a different domain." -msgstr "无法执行 retype 操作,以变为另一个域中的 CPG。" - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "无法执行 retype 操作,以变为另一个域中的 SNAP CPG。" - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "无法运行 vgc-cluster 命令,请确保已安装软件,并且正确设置了许可权。" - -msgid "Cannot save group_snapshots changes in group object update." -msgstr "组对象更新时无法保存组快照变更。" - -msgid "Cannot save volume_types changes in group object update." -msgstr "组对象更新时无法保存卷类型变更。" - -msgid "Cannot save volumes changes in group object update." -msgstr "组对象更新时无法保存卷变更。" - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "无法同时设置 hitachi_serial_number 和 hitachi_unit_name。" - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "无法同时指定保护域名和保护域标识。" - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "无法同时指定存储池名称和存储池标识。" - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"无法更新一致性组 %(group_id)s,因为未提供任何有效名称、描述、add_volumes 或 " -"remove_volumes。" - -#, python-format -msgid "" -"Cannot update consistency group %s, status must be available, and it cannot " -"be the source for an ongoing CG or CG Snapshot creation." -msgstr "" -"不能更新一致性组%s,状态必须可获得,它不能作为创建运行CG或者CG快照的源。" - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "无法更新加密规范。卷类型在使用中。" - -#, python-format -msgid "" -"Cannot update group %(group_id)s because no valid name, description, " -"add_volumes, or remove_volumes were provided." -msgstr "" -"无法更新组 %(group_id)s,因为未提供任何有效名称、描述、add_volumes 或 " -"remove_volumes。" - -#, python-format -msgid "Cannot update group_type %(id)s" -msgstr "无法更新组类型 %(id)s。" - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "无法更新 volume_type %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "无法验证对象 %(instanceName)s 的存在。" - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "找不到 Cg 快照 %(cgsnapshot_id)s。" - -msgid "" -"CgSnapshot status must be available or error, and no CG can be currently " -"using it as source for its creation." -msgstr "" -"一致性组快照状态必须是可获得的或错误,目前没有一致性组可以被用作创建它的源。" - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cg 快照为空。将不创建任何一致性组。" - -msgid "Change hostlun id error." -msgstr "更改 hostlun 标识时出错。" - -msgid "Change lun priority error." -msgstr "更改 LUN 优先级时发生错误。" - -msgid "Change lun smarttier policy error." -msgstr "更改 LUN smarttier 策略时发生错误。" - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "对于下列资源,更改将导致使用量小于 0:%(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "请检查分配给此驱动程序的 ZFS 共享项的访问许可权。" - -msgid "Check hostgroup associate error." -msgstr "检查主机组关联时发生错误。" - -msgid "Check initiator added to array error." -msgstr "检查已添加至阵列的启动程序时发生错误。" - -msgid "Check initiator associated to host error." -msgstr "检查与主机相关联的启动程序时发生错误。" - -msgid "Check lungroup associate error." -msgstr "检查 LUN 组关联时发生错误。" - -msgid "Check portgroup associate error." -msgstr "检查端口组关联时发生错误。" - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"请检查 HTTP 服务的状态。另外,请确保 HTTPS 端口号与 cinder.conf 中指定的 " -"HTTPS 端口号相同。" - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "区块大小不是用于创建散列的块大小的倍数。" - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Cisco 光纤通道分区 CLI 错误:%(reason)s" - -#, python-format -msgid "Client with ip %s wasn't found " -msgstr "未找到ip为 %s 的客户端" - -msgid "" -"Clone can't be taken individually on a volume that is part of a Consistency " -"Group" -msgstr "不能对卷进行单独克隆,因为它是一致性组的一部分" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "克隆功能在 %(storageSystem)s 上未获许可。" - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"克隆“%(clone_type)s”无效;有效值为:“%(full_clone)s”和“%(linked_clone)s”。" - -msgid "Cluster" -msgstr "集群" - -#, python-format -msgid "Cluster %(id)s could not be found." -msgstr "无法找到标识为 %(id)s 的集群。" - -#, python-format -msgid "Cluster %(id)s still has hosts." -msgstr "集群 %(id)s 仍存在主机。" - -#, python-format -msgid "Cluster %(name)s already exists." -msgstr "集群 %(name)s 已存在。" - -#, python-format -msgid "Cluster %s successfully removed." -msgstr "成功删除集群 %s。" - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "集群未格式化。您可能应该执行“dog cluster format”。" - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Coho Data Cinder 驱动程序故障:%(message)s" - -msgid "Coho rpc port is not configured" -msgstr "未配置 Coho rpc 端口" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "命令 %(cmd)s 在 CLI 中被阻塞,并且已取消" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition:%s 超时。" - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "未安装压缩启用程序。无法创建压缩卷。" - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "找不到计算集群 %(cluster)s。" - -msgid "Condition has no field." -msgstr "条件没有任何字段。" - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "配置“max_over_subscription_ratio”无效。必须大于 0:%s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "配置错误:未设置 dell_sc_ssn。" - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "配置文件 %(configurationFile)s 不存在。" - -msgid "Configuration is not found." -msgstr "找不到配置。" - -#, python-format -msgid "Configuration value %s is not set." -msgstr "未设置配置值 %s。" - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"卷类型 %s 中存在冲突的 QoS 规范:当 QoS 规范与卷类型相关联时,不允许卷类型额" -"外规范中存在旧的“netapp:qos_policy_group”。" - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "连接glance失败: %(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "连接 Swift 失败:%(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "连接器未提供:%s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "连接器没有必需信息:%(missing)s" - -#, python-format -msgid "" -"Consistency Group %(cg_uri)s: update failed\n" -"%(err)s" -msgstr "" -"一致性组 %(cg_uri)s: 更新失败\n" -"%(err)s" - -#, python-format -msgid "" -"Consistency Group %(name)s: create failed\n" -"%(err)s" -msgstr "" -"一致性组 %(name)s: 创建失败\n" -"%(err)s" - -#, python-format -msgid "" -"Consistency Group %(name)s: delete failed\n" -"%(err)s" -msgstr "" -"一致性组 %(name)s: 删除失败\n" -"%(err)s" - -#, python-format -msgid "Consistency Group %s not found" -msgstr "找不到一致性组 %s" - -#, python-format -msgid "Consistency Group %s: not found" -msgstr "找不到一致性组: %s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "一致性组为空组。将不创建任何 cg 快照。" - -msgid "" -"Consistency group must not have attached volumes, volumes with snapshots, or " -"dependent cgsnapshots" -msgstr "一致性组一定不能包含已被挂载的卷、具有快照的卷或者从属的cgsnapshots。" - -msgid "" -"Consistency group status must be available or error and must not have " -"volumes or dependent cgsnapshots" -msgstr "" -"一致性组状态必须为“available”或“error”,而且一定不能包含卷或者从属的" -"cgsnapshots。" - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "找不到一致性组 %(consistencygroup_id)s。" - -msgid "Container" -msgstr "容器" - -msgid "Container size smaller than required file size." -msgstr "容器大小小于所需文件大小。" - -msgid "Content type not supported." -msgstr "不支持内容类型。" - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到控制器配置服务。" - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "未能解析控制器 IP“%(host)s”:%(e)s。" - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "已转换为 %(f1)s,但现在格式为 %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "已转换为 %(vol_format)s,但现在格式为 %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "转化为裸格式,但目前格式是 %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "已转换为原始文件,但现在格式为 %s。" - -msgid "Coordinator uninitialized." -msgstr "协调程序未初始化。" - -#, python-format -msgid "CoprHD internal server error. Error details: %s" -msgstr "CoprHD内部服务器错误. 错误详细信息: %s" - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"“复制卷”任务失败:convert_to_base_volume:id=%(id)s,status=%(status)s。" - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "复制卷任务失败:create_cloned_volume id=%(id)s,status=%(status)s。" - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "正在将元数据从 %(src_type)s %(src_id)s 复制到 %(vol_id)s。" - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"无法确定要使用的 Keystone 端点。可在服务目录中设置此项,也可使用 cinder.conf " -"配置选项 “backup_swift_auth_url”设置此项。" - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"无法确定要使用的 Swift 端点。可在服务目录中设置此项,也可使用 cinder.conf 配" -"置选项 “backup_swift_url”设置此项。" - -msgid "Could not find DISCO wsdl file." -msgstr "找不到 DISCO wsdl 文件。" - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "找不到 GPFS 集群标识:%s。" - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "找不到 GPFS 文件系统设备:%s。" - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "在 %(path)s 找不到配置文件。" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "对于卷 %s,找不到 iSCSI 导出" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "找不到卷 %(volume_id)s 的 iSCSI 目标。" - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "在命令 %(cmd)s 的输出 %(out)s 中找不到键。" - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "找不到参数 %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "找不到目标 %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "在阵列上找不到快照“%s”的父卷。" - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "在卷 %(vol)s 上找不到唯一快照 %(snap)s。" - -msgid "Could not get system name." -msgstr "未能获取系统名称。" - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "无法读取快照 %(name)s 的信息。代码:%(code)s。原因:%(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "无法复原配置文件 %(file_path)s:%(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "未能将配置保存到 %(file_path)s:%(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "无法启动一致性组快照 %s。" - -#, python-format -msgid "Couldn't find ORM model for Persistent Versioned Object %s." -msgstr "无法为持久版本对象%s找到ORM模型。" - -#, python-format -msgid "Couldn't remove cluster %s because it doesn't exist." -msgstr "无法删除集群 %s,因为它不存在。" - -#, python-format -msgid "Couldn't remove cluster %s because it still has hosts." -msgstr "无法删除集群 %s,因为它仍存在主机。" - -#, python-format -msgid "Counter %s not found" -msgstr "找不到计数器 %s" - -msgid "Create QoS policy error." -msgstr "创建 QoS 策略时发生错误。" - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"备份创建已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " -"%(actual_status)s。" - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"备份创建已异常中止,需要的卷状态为 %(expected_status)s,但实际为 " -"%(actual_status)s。" - -msgid "Create export for volume failed." -msgstr "为卷创建导出失败。" - -msgid "Create group failed." -msgstr "创建组失败。" - -msgid "Create hostgroup error." -msgstr "创建主机组时发生错误。" - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "创建 hypermetro 错误。%s。" - -msgid "Create lun error." -msgstr "创建 LUN 时出错。" - -msgid "Create lun migration error." -msgstr "创建 LUN 迁移时发生错误。" - -msgid "Create luncopy error." -msgstr "创建 LUNcopy 时发生错误。" - -msgid "Create lungroup error." -msgstr "创建 LUN 组时发生错误。" - -msgid "Create manager volume flow failed." -msgstr "创建管理器卷流失败。" - -msgid "Create port group error." -msgstr "创建端口组时出错。" - -msgid "Create replication error." -msgstr "创建复制错误。" - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "创建复制对失败。错误:%s。" - -msgid "Create snapshot error." -msgstr "创建快照时发生错误。" - -#, python-format -msgid "Create volume error. Because %s." -msgstr "创建卷错误。因为 %s。" - -msgid "Create volume failed." -msgstr "创建卷失败。" - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "当前不支持通过源创建一致性组。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "创建并激活区域集失败(区域集为 %(cfg_name)s,发生的错误为 %(err)s)。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "创建并激活区域集失败(区域集为 %(zoneset)s,发生的错误为 %(err)s)。" - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "正在为 %(begin_period)s 直到 %(end_period)s 创建使用情况" - -msgid "Current host isn't part of HGST domain." -msgstr "当前主机不存在于 HGST 域中。" - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "对于类型为 %(type)s 的卷 %(id)s,当前主机无效,不允许迁移" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "卷 %(vol)s 的当前已映射的主机位于具有 %(group)s 的不受支持的主机组中。" - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"DRBDmanage 驱动程序错误:回复中没有预期关键字“%s”,DRBDmanage 版本是否正确?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage 驱动程序设置错误:找不到某些必需的库(dbus 和 drbdmanage.*)。" - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage 期望一个资源 (\"%(res)s\"),但是获得了 %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "快照复原后 DRBDmanage 等待新卷时超时;资源“%(res)s”,卷“%(vol)s”" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "DRBDmanage 等待创建快照时超时;资源“%(res)s”,快照“%(sn)s”" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "DRBDmanage 等待创建卷时超时;资源“%(res)s”,卷“%(vol)s”" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"DRBDmanage 等待卷大小时超时;卷标识“%(id)s”(res \"%(res)s\",vnr %(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "未能确定数据 ONTAP API 版本。" - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "以 7 方式运行的 Data ONTAP 不支持 QoS 策略组。" - -msgid "Database schema downgrade is not allowed." -msgstr "不允许对数据库模式进行降级。" - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "数据集 %s 在 Nexenta 存储设备中未共享" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "在 Nexenta SA 中找不到数据库组 %s" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"去重是有效的供应类型,但是要求安装了 WSAPI 版本“%(dedup_version)s”版" -"本“%(version)s”。" - -msgid "Dedup luns cannot be extended" -msgstr "无法扩展 Dedup lun" - -msgid "Default group type can not be found." -msgstr "找不到缺省组类型。" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"资源 %(res)s 的缺省配额由缺省配额标记 quota_%(res)s 设置,现在不推荐使用。请" -"对缺省配额使用缺省配额类。 " - -msgid "Default volume type can not be found." -msgstr "找不到缺省卷类型。" - -msgid "Delete LUNcopy error." -msgstr "删除 LUNcopy 时发生错误。" - -msgid "Delete QoS policy error." -msgstr "删除 QoS 策略时发生错误。" - -msgid "Delete associated lun from lungroup error." -msgstr "从 LUN 组中删除相关联的 LUN 时发生错误。" - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"备份删除已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" -"建此备份的备份服务 [%(backup_service)s]。" - -msgid "Delete consistency group failed." -msgstr "删除一致性组失败。" - -msgid "Delete group failed." -msgstr "删除组失败。" - -msgid "Delete hostgroup error." -msgstr "删除主机组时发生错误。" - -msgid "Delete hostgroup from mapping view error." -msgstr "从映射视图删除主机组时发生错误。" - -msgid "Delete lun error." -msgstr "删除 LUN 时发生错误。" - -msgid "Delete lun migration error." -msgstr "删除 LUN 迁移时发生错误。" - -msgid "Delete lungroup error." -msgstr "删除 LUN 组时发生错误。" - -msgid "Delete lungroup from mapping view error." -msgstr "从映射视图删除 LUN 组时发生错误。" - -msgid "Delete mapping view error." -msgstr "删除映射视图时发生错误。" - -msgid "Delete port group error." -msgstr "删除端口组时出错。" - -msgid "Delete portgroup from mapping view error." -msgstr "从映射视图删除端口组时发生错误。" - -msgid "Delete snapshot error." -msgstr "删除快照时发生错误。" - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "不支持对处于以下状态的卷删除快照:%s。" - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup 已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " -"%(actual_status)s。" - -msgid "Deleting volume from database and skipping rpc." -msgstr "正在从数据库删除卷并跳过 RPC。" - -#, python-format -msgid "Deleting volume metadata is not allowed for volumes in %s status." -msgstr "当卷状态为 %s 时,不允许删除该卷的元数据。" - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "删除区域失败:(命令为 %(cmd)s,发生的错误为 %(err)s)。" - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "要提供“一致性组”支持,需要 Dell API 2.1 或更高版本" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "直接连接不支持 Dell Cinder 驱动程序配置错误复制。" - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "找不到 Dell Cinder 驱动程序配置错误 replication_device %s" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource是只有管理员才能执行的功能。" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "目标具有 migration_status %(stat)s,原应为 %(exp)s。" - -msgid "Destination volume not mid-migration." -msgstr "目标卷未在迁移中。" - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "拆离卷失败:存在多个连接,但是未提供 attachment_id。" - -msgid "Detach volume from instance and then try again." -msgstr "请断开卷与实例的连接,然后再次进行尝试。" - -#, python-format -msgid "Detaching volume %(volumename)s from host %(hostname)s failed: %(err)s" -msgstr "将卷 %(volumename)s 从主机 %(hostname)s 上分离失败: %(err)s" - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "检测到多个具有名称 %(vol_name)s 的卷" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "%(fun)s 中找不到需要的列:%(hdr)s。" - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "在 %(fun)s 中找不到期望的键 %(key)s:%(raw)s。" - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "禁用的原因包含无效字符或太长" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "找不到名称为 %s 的域。" - -msgid "Down Hosts" -msgstr "关闭主机" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"检测到下层 GPFS 集群。在集群守护程序级别 %(cur)s 中未启用“GPFS 克隆”功能 - 必" -"须至少处于级别 %(min)s。" - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "驱动程序初始化连接失败(错误:%(err)s)。" - -msgid "Driver must implement initialize_connection" -msgstr "驱动程序必须实现 initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "驱动程序已成功将所导入的备份数据解码,但是缺少字段 (%s)。" - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E-series 代理 API 版本 %(current_version)s 不支持完整的一组 SSC 额外规范。代" -"理版本必须至少为 %(min_version)s。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"发生 EMC VNX Cinder 驱动程序 CLI 异常:%(cmd)s(返回码为 %(rc)s)(输出为 " -"%(out)s)。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC VNX Cinder 驱动程序 SPUnavailableException:%(cmd)s(返回码:%(rc)s)(输" -"出:%(out)s)。" - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp、EcomServerPort、EcomUserName 和 EcomPassword 必须具有有效值。" - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"必须提供“cgsnapshot_id”或者“source_cgid”,以从源创建一致性组 %(name)s。" - -#, python-format -msgid "" -"Either 'group_snapshot_id' or 'source_group_id' must be provided to create " -"group %(name)s from source." -msgstr "" -"必须提供“group_snapshot_id”或者“source_group_id”,才能通过源创建组 %(name)s。" - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO %(slo)s 或工作负载 %(workload)s 无效。请查看先前的错误说明以了解有效值。" - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "需要 hitachi_serial_number 或 hitachi_unit_name。" - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到元素组合服务。" - -msgid "Enables QoS." -msgstr "启用 QoS。" - -msgid "Enables compression." -msgstr "启用压缩。" - -msgid "Enables replication." -msgstr "启用复制。" - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "请确保 configfs 安装在 /sys/kernel/config 处。" - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"在 groupInitiatorGroup %(initiatorgroup)s 上添加启动程序 %(initiator)s 时出" -"错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"添加至带有 IQN %(iqn)s 的目标组 %(targetgroup)s 时出错。返回码:" -"%(ret.status)d,消息:%(ret.data)s。" - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "连接卷 %(vol)s 时出错。" - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"在池 %(pool)s 的卷 %(lun)s 上克隆快照 %(snapshot)s 时出错。项目:%(project)s " -"克隆项目:%(clone_proj)s返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "创建克隆卷 %(cloneName)s 时出错。返回码为 %(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"创建克隆卷时出错:卷:%(cloneName)s 源卷为 %(sourceName)s。返回码为 " -"%(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "创建组 %(groupName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "创建掩码视图 %(groupName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "创建卷 %(volumeName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "创建卷 %(volumename)s 时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"创建组副本时出错:源为 %(source)s,目标为 %(target)s。返回码为 %(rc)lu。错误" -"为 %(error)s。" - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"对别名 %(alias)s 创建发起方 %(initiator)s 时出错。返回码:%(ret.status)d 消" -"息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"在池 %(pool)s 上创建项目 %(project)s 时出错。返回码:%(ret.status)d 消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"创建属性 %(property)s、类型 %(type)s 和描述 %(description)s 时出错。返回码:" -"%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"创建共享项 %(name)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在卷 %(lun)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:%(project)s " -"返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在共享项 %(share)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:" -"%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "创建目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"创建带有 IQN %(iqn)s 的目标组 %(targetgroup)s 时出错。返回码:" -"%(ret.status)d,消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"创建大小为 %(size)s 的卷 %(lun)s 时出错。返回码:%(ret.status)d消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "创建新的组合卷时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"对于目标 %(tgt)s 和池 %(tgt_pool)s,在池 %(pool)s、项目 %(proj)s 和卷 " -"%(vol)s 上创建复制操作时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "对扩展操作创建未绑定卷时出错。" - -msgid "Error Creating unbound volume." -msgstr "创建未绑定卷时出错。" - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "删除卷 %(volumeName)s 时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "删除组 %(storageGroupName)s 时出错。返回码:%(rc)lu。错误:%(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"删除启动程序组 %(initiatorGroupName)s 时出错。返回码:%(rc)lu。错误:" -"%(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在共享项 %(share)s 上对池 %(pool)s 删除快照 %(snapshot)s 时出错。项目:" -"%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在卷 %(lun)s 上对池 %(pool)s 删除快照 %(snapshot)s 时出错。项目:%(project)s " -"返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"从池 %(pool)s 中删除卷 %(lun)s 时出错,项目:%(project)s。返回码:" -"%(ret.status)d,消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"删除池 %(pool)s 上的项目 %(project)s 时出错。返回码:%(ret.status)d 消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"删除复制操作 %(id)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "扩展卷 %(volumeName)s 出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"获取发起方时出错:发起方组为 %(initiatorgroup)s,返回码为 %(ret.status)d 消" -"息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "获取池统计信息时出错:池:%(pool)s 返回码:%(status)d 消息:%(data)s。" - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"获取项目状态时出错:池:%(pool)s 项目:%(project)s返回码:%(ret.status)d 消" -"息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在池 %(pool)s 上获取共享项 %(share)s 时出错。项目:%(project)s 返回码:" -"%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在卷 %(lun)s 上对池 %(pool)s 创建快照 %(snapshot)s 时出错。项目:%(project)s " -"返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "获取目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在池 %(pool)s 上获取卷 %(lun)s 时出错。项目:%(project)s 返回码:" -"%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "在池之间迁移卷时出错。返回码:%(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "修改掩码视图 %(groupName)s 时出错。返回码:%(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "池所有权出错:池 %(pool)s 并非归 %(host)s 所有。" - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在池 %(pool)s 的卷 %(lun)s 上设置属性 Props %(props)s 时出错。项目:" -"%(project)s 返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "终止迁移会话时出错。返回码:%(rc)lu。错误:%(error)s。" - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"验证启动程序 %(iqn)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "验证池 %(pool)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"在池 %(pool)s 上验证项目 %(project)s 时出错。返回码:%(ret.status)d,消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"验证服务 %(service)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "验证目标 %(alias)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在项目 %(project)s 和池 %(pool)s 上验证共享项 %(share)s 时出错。返回码为 " -"%(ret.status)d,消息为 %(ret.data)s。" - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "使用以下实例路径添加卷 %(volumeName)s 时出错:%(volumeInstancePath)s。" - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"向组 %(groupName)s 添加启动程序时出错。返回码:%(rc)lu。错误为 %(error)s。" - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "向组合卷添加卷时出错。错误为:%(error)s。" - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "将卷 %(volumename)s 追加至目标基本卷时出错。" - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"将存储器组 %(storageGroupName)s 关联至快速策略 %(fastPolicyName)s 时出错,错" -"误描述:%(errordesc)s。" - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"破坏克隆关系时出错:同步名称为 %(syncName)s。返回码为 %(rc)lu。错误为 " -"%(error)s。" - -msgid "Error connecting to ceph cluster." -msgstr "连接至 ceph 集群时出错。" - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "通过 ssh 进行连接时出错:%s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "创建卷时出错:%s。" - -msgid "Error deleting replay profile." -msgstr "删除重放概要文件时出错。" - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "删除卷 %(ssn)s 时出错:%(volume)s " - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "删除卷 %(vol)s 时出错:%(err)s。" - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "在评估程序解析期间,发生错误:%(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"在池 %(pool)s 上编辑共享项 %(share)s 时出错。返回码:%(ret.status)d,消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"为 NetworkPortal 启用 iSER 时出错:请确保 RDMA 在 IP %(ip)s 上的 iSCSI 端口 " -"%(port)d 中受支持。" - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "在清除失败的连接期间遇到错误:%(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "执行 CloudByte API [%(cmd)s] 时出错,错误为 %(err)s。" - -msgid "Error executing EQL command" -msgstr "执行 EQL 命令时出错" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "通过 ssh 执行命令时发生错误:%s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "扩展卷 %(vol)s 时出错:%(err)s。" - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "扩展卷时出错:%(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "查找 %(name)s 时出错。" - -#, python-format -msgid "Error finding %s." -msgstr "查找 %s 时出错。" - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "获取 ReplicationSettingData 时出错。返回码:%(rc)lu。错误:%(error)s。" - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"获取设备版本详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "从名称 %(name)s 中获取域标识时出错:%(err)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "从名称 %(name)s 中获取域标识 %(id)s 时出错。" - -msgid "Error getting initiator groups." -msgstr "获取发起方组时,发生错误。" - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "从名称 %(pool)s 中获取池标识时出错:%(err)s。" - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "从名称 %(pool_name)s 中获取池标识时出错:%(err_msg)s。" - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"获取复制操作 %(id)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "获取复制源详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"获取复制目标详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "Error getting sdc id from ip %(sdc_ip)s: %(sdc_id_message)s" -msgstr "从地址 %(sdc_ip)s 获取sdc标识时出错: %(sdc_id_message)s" - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"获取版本时出错:svc:%(svc)s。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"对 CloudByte 存储器中的卷 [%(cb_volume)s] 执行操作 [%(operation)s] 时出错:" -"[%(cb_error)s],错误代码:[%(error_code)s]。" - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "SolidFire API响应里发生错误:data=%(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "对大小为 %(size)d GB 的 %(space)s 进行空间创建时出错" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "对卷 %(space)s 进行空间扩充,额外扩充 %(size)d GB 时出错" - -#, python-format -msgid "Error managing volume: %s." -msgstr "管理卷 %s 时出错。" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"修改副本同步 %(sv)s 操作 %(operation)s 时出错。返回码为 %(rc)lu。错误为 " -"%(error)s。" - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"修改服务 %(service)s 时出错。返回码:%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"将卷 %(vol)s 从源项目 %(src)s 移至目标项目 %(tgt)s 时出错。返回码:" -"%(ret.status)d 消息:%(ret.data)s。" - -msgid "Error not a KeyError." -msgstr "错误并非 KeyError。" - -msgid "Error not a TypeError." -msgstr "错误并非 TypeError。" - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "创建 cg 快照 %s 时发生了错误。" - -#, python-format -msgid "Error occurred when creating group_snapshot %s." -msgstr "创建 group_snapshot %s 时发生了错误。" - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "删除 cg 快照 %s 时发生了错误。" - -#, python-format -msgid "Error occurred when deleting group snapshot %s." -msgstr "删除组快照%s 时出现错误。" - -#, python-format -msgid "Error occurred when deleting group_snapshot %s." -msgstr "删除 group_snapshot %s 时发生了错误。" - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "更新一致性组 %s 时发生了错误。" - -#, python-format -msgid "Error occurred when updating group %s." -msgstr "更新组 %s 时发生了错误。" - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "重命名卷 %(vol)s 时出错:%(err)s。" - -#, python-format -msgid "Error response: %s" -msgstr "错误响应:%s" - -msgid "Error retrieving volume size" -msgstr "检索卷大小时出错" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"为操作标识 %(id)s 发送复制更新时出错。返回码:%(ret.status)d 消息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "发送复制更新时出错。所返回的错误:%(err)s。操作:%(id)s。" - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"对于卷 %(vol)s,将复制继承设置为 %(set)s 时出错。项目:%(project)s 返回码:" -"%(ret.status)d 消息:%(ret.data)s。" - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"从源 %(src)s 分割软件包 %(package)s 时出错。返回码:%(ret.status)d 消息:" -"%(ret.data)s。" - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "从池取消绑定卷 %(vol)s 时出错。%(error)s。" - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "向交换机认证时出错:%s。" - -#, python-format -msgid "Error while changing VF context %s." -msgstr "更改 VF 上下文 %s 时出错。" - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "检查固件版本 %s 时出错。" - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "检查事务状态时发生错误:%s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "检查 VF 对管理 %s 是否可用时出错。" - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"连接带有协议 %(protocol)s 的交换机 %(switch_id)s 时出错。错误:%(error)s。" - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "创建认证令牌时出错:%s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "创建快照 [status] %(stat)s - [result] %(res)s 时出错。" - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "创建卷 [status] %(stat)s - [result] %(res)s 时出错。" - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "删除快照 [status] %(stat)s - [result] %(res)s 时出错。" - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "删除卷 [status] %(stat)s - [result] %(res)s 时出错。" - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "扩展卷 [status] %(stat)s - [result] %(res)s 时出错。" - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "获取 %(op)s 详细信息时出错,返回码:%(status)s。" - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "" -"通过 ssh 获取数据时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "获取 disco 信息 [%s] 时出错。" - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "获取 nvp 值时出错:%s。" - -#, python-format -msgid "Error while getting session information %s." -msgstr "获取会话信息 %s 时出错。" - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "解析数据时出错:%s。" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "在交换机上查询页面 %(url)s 时出错,原因:%(error)s。" - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "移除区域字符串中的 zones 和 cgfs 时出错:%(description)s。" - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "请求 %(service)s API 时出错。" - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "运行分区 CLI 时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "更新区域字符串中的新 zones 和 cgfs 时出错。错误:%(description)s。" - -#, python-format -msgid "" -"Error while updating the zones in the zone string. Error %(description)s." -msgstr "更新区域字符串中的 zones 时出错。错误:%(description)s。" - -msgid "Error writing field to database" -msgstr "将字段写至数据库时出错。" - -#, python-format -msgid "Error: Failed to %(operation_type)s %(component)s" -msgstr "错误: %(operation_type)s %(component)s 失败" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "获取卷标识时发生错误 [%(stat)s - %(res)s]。" - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"将快照 [%(snap_id)s] 复原至卷 [%(vol)s] 时发生错误 [%(stat)s - %(res)s]。" - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "获取卷标识时发生错误 [status] %(stat)s - [result] %(res)s]。" - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "对于卷 %(volume_id)s,已超过最大调度尝试次数 %(max_attempts)d" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "超出每个卷的快照数限制" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "将元卷追加到目标卷 %(volumename)s 时发生异常。" - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"创建元素副本时发生异常。克隆名称:%(cloneName)s,源名称:%(sourceName)s,额外" -"规范:%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "_select_ds_for_volume %s 中发生异常。" - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "构建区域字符串时发生异常:%s。" - -#, python-format -msgid "Exception: %s" -msgstr "异常:%s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "期望 uuid,但是接收到 %(uuid)s。" - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "刚好需要一个名为“%s”的节点" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "期望 node_count 的值为整数,已返回 svcinfo lsiogrp:%(node)s。" - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "期望 CLI 命令 %(cmd)s 没有任何输出,但是获得了 %(out)s。" - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"在 vdisk_UID 上进行过滤时,从 lsvdisk 返回了所需的单个 vdisk。返回了 " -"%(count)s。" - -#, python-format -msgid "Expected volume size was %d" -msgstr "需要的卷大小为 %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"备份导出已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " -"%(actual_status)s。" - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"记录导出已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" -"建此备份的备份服务 [%(backup_service)s]。" - -msgid "Extend volume error." -msgstr "扩展卷时发生错误。" - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "不存在任何快照时,扩展卷仅受此驱动程序支持。" - -msgid "Extend volume not implemented" -msgstr "扩展卷未实现" - -msgid "FAST is not supported on this array." -msgstr "快速策略在此阵列上不受支持。" - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC 为协议,但 OpenStack 未提供 wwpns。" - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "无法取消分配 %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "未能创建高速缓存卷 %(volume)s。错误:%(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "对于光纤网 %(fabric)s,未能添加连接:发生错误:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "已使 cg 快照失效" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "为组创建快照失败:%(response)s。" - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "为卷 %(volname)s 创建快照失败:%(response)s。" - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "通过光纤网络 %s 获取活动区域集失败。" - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "获取池 %s 的详细信息失败。" - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "对于光纤网 %(fabric)s,未能移除连接:发生错误:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "未能扩展卷 %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "未能登录到 3PAR (%(url)s),因为存在 %(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "未能访问活动分区配置。" - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "未能访问区域集状态:%s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"未能获取资源锁定。(序列为 %(serial)s,实例为 %(inst)s,返回为 %(ret)s,标准" -"错误为 %(err)s)" - -msgid "Failed to add or update zoning configuration." -msgstr "未能添加或更新分区配置。" - -msgid "Failed to add the logical device." -msgstr "未能添加逻辑设备。" - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"未能将卷 %(volumeName)s 添加至一致性组 %(cgName)s。返回码为 %(rc)lu。错误为 " -"%(error)s。" - -msgid "Failed to add zoning configuration." -msgstr "未能添加分区配置。" - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "未能分配 iSCSI 发起方 IQN。(端口为 %(port)s,原因为 %(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 关联。" - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "未能针对卷 %(volume_id)s 连接 iSCSI 目标。" - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "未能备份卷元数据 - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "未能备份卷元数据 - 元数据备份对象“backup.%s.meta”已存在" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "未能从快照 %s 克隆卷。" - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "未能连接至 %(vendor_name)s 阵列 %(host)s:%(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "无法连接至 Dell REST API" - -msgid "Failed to connect to array" -msgstr "未能连接至阵列" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "未能连接至 sheep 守护程序。地址:%(addr)s,端口:%(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "未能将映像复制到卷:%(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "未能复制元数据到卷:%(reason)s" - -msgid "" -"Failed to copy volume to image as image quota has been met. Please delete " -"images or have your limit increased, then try again." -msgstr "" -"由于镜像配额限制,将卷拷贝到镜像中的操作失败。请删除镜像或增加镜像配额,然后" -"再尝试。" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "未能复制卷,目标设备不可用。" - -msgid "Failed to copy volume, source device unavailable." -msgstr "未能复制卷,源设备不可用。" - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "未能从快照 %(cgSnapshot)s 创建 CG %(cgName)s。" - -#, python-format -msgid "Failed to create IG, %s" -msgstr "未能创建映像 %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "未能创建卷组: %(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "未能创建文件。(文件为 %(file)s,返回为 %(ret)s,标准错误为 %(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "未能为卷 %s 创建临时快照。" - -msgid "Failed to create api volume flow." -msgstr "未能创建 api 卷流。" - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,未能创建 cg 快照 %(id)s。" - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,未能创建一致性组 %(id)s。" - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "未能创建一致性组 %(id)s:%(ret)s。" - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "未能创建一致性组 %s,因为 VNX 一致性组无法接受压缩的 LUN 作为成员。" - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "未能创建一致性组:%(cgName)s。" - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "未能创建一致性组:%(cgid)s。错误为 %(excmsg)s。" - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"未能创建一致性组 %(consistencyGroupName)s。返回码为 %(rc)lu。错误为 " -"%(error)s。" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "未能在 %(storageSystemName)s 上创建硬件标识。" - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "未能创建主机:%(name)s。请检查它在阵列上是否存在。" - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "未能创建主机组:%(name)s。请检查它在阵列上是否存在。" - -msgid "Failed to create iqn." -msgstr "未能创建 IQN。" - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "未能针对卷 %(volume_id)s 创建 iscsi 目标。" - -msgid "Failed to create manage existing flow." -msgstr "未能创建 manage_existing 流。" - -msgid "Failed to create manage_existing flow." -msgstr "未能创建 manage_existing 流。" - -msgid "Failed to create map on mcs, no channel can map." -msgstr "未能在 MCS 上创建映射,没有通道可以映射。" - -msgid "Failed to create map." -msgstr "未能创建映射。" - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "未能为卷创建元数据:%(reason)s" - -msgid "Failed to create partition." -msgstr "未能创建分区。" - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "未能通过规范 %(qos_specs)s 创建 qos_specs:%(name)s。" - -msgid "Failed to create replica." -msgstr "未能创建副本。" - -msgid "Failed to create scheduler manager volume flow" -msgstr "未能创建调度程序管理器卷流" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "未能创建快照 %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "未能针对 cg %(cgName)s 创建快照。" - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "未能为卷 %s 创建快照。" - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "未能在卷 %(vol)s 上创建快照策略:%(res)s。" - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "未能在卷 %(vol)s 上创建快照资源区域:%(res)s。" - -msgid "Failed to create snapshot." -msgstr "未能创建快照。" - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "未能创建快照。对于 OpenStack 卷 [%s],找不到 CloudByte 卷信息。" - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "无法为 %s 创建南向连接器。" - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "无法创建存储器组 %(storageGroupName)s。" - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "未能创建瘦池,错误消息如下:%s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "未能创建卷 %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "未能删除卷标识 %(volume_id)s 的 SI,因为它有配对。" - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "未能删除逻辑设备。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,未能删除 cg 快照 %(id)s。" - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,未能删除一致性组 %(id)s。" - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "未能删除一致性组:%(cgName)s。" - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"未能删除一致性组 %(consistencyGroupName)s。返回码为 %(rc)lu。错误为 " -"%(error)s。" - -msgid "Failed to delete device." -msgstr "无法删除设备。" - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "未能针对一致性组 %(cgname)s 删除文件集。错误为 %(excmsg)s。" - -msgid "Failed to delete iqn." -msgstr "未能删除 IQN。" - -msgid "Failed to delete map." -msgstr "未能删除映射。" - -msgid "Failed to delete partition." -msgstr "未能删除分区。" - -msgid "Failed to delete replica." -msgstr "未能删除副本。" - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "未能删除快照 %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "未能针对 cg %(cgId)s 删除快照。" - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "未能删除快照标识 %s 的快照,因为它有配对。" - -msgid "Failed to delete snapshot." -msgstr "未能删除快照。" - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "未能删除卷 %(volumeName)s。" - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "未能删除卷标识 %(volume_id)s 的卷,因为它有配对。" - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "未能针对卷 %(volume_id)s 与 iSCSI 目标断开连接。" - -msgid "Failed to determine blockbridge API configuration" -msgstr "未能确定 Blockbridge API 配置" - -msgid "Failed to disassociate qos specs." -msgstr "未能取消关联 Qos 规范。" - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 取消关联。" - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "未能确保快照资源区域,找不到标识 %s 的卷" - -msgid "Failed to establish a stable connection" -msgstr "未能建立稳定的链接" - -msgid "Failed to establish connection with Coho cluster" -msgstr "无法建立与 Coho 集群的连接。" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"未能执行 CloudByte API [%(cmd)s]。Http 状态为 %(status)s,错误为 %(error)s。" - -msgid "Failed to execute common command." -msgstr "未能执行常见命令。" - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "输出卷失败:%(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "无法扩展卷 %(name)s,错误消息:%(msg)s。" - -msgid "Failed to find QoSnode" -msgstr "找不到 QoSnode" - -msgid "Failed to find Storage Center" -msgstr "找不到存储中心" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "在所需池中找不到 vdisk 副本。" - -msgid "Failed to find account for volume." -msgstr "未能查找卷的帐户。" - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "对于路径 %(path)s,未能找到文件集,命令输出:%(cmdout)s。" - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "找不到名为 %s 的组快照" - -#, python-format -msgid "Failed to find host %s." -msgstr "未能找到主机 %s。" - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "找不到包含 %(initiator)s 的 iSCSI 启动程序组。" - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "对于帐户 [%s],未能获取 CloudByte 帐户详细信息。" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "未能获取 LUN %s 的 LUN 目标详细信息" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "未能获取 LUN %s 的 LUN 目标详细信息。" - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "未能获取 LUN %s 的 LUN 目标列表" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "未能获取卷 %(volume_id)s 的分区标识。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "未能从快照 %(snapshot_id)s 获取 RAID 快照标识。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "未能从快照 %(snapshot_id)s 获取 RAID 快照标识。" - -msgid "Failed to get SplitMirror." -msgstr "无法获取 SplitMirror。" - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "未能获取存储资源。系统将再次尝试获取该存储资源。(资源:%(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "未能获取 qos 规范 %s 的所有关联" - -msgid "Failed to get channel info." -msgstr "未能获取通道信息。" - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "未能获取代码级别 (%s)。" - -msgid "Failed to get device info." -msgstr "未能获取设备信息。" - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "未能获取域,因为阵列上不存在 CPG (%s)。" - -msgid "Failed to get image snapshots." -msgstr "无法获取映像快照。" - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "未能获取具有卷 %(volume_id)s 的通道 %(channel_id)s 上的 IP。" - -msgid "Failed to get iqn info." -msgstr "未能获取 IQN 信息。" - -msgid "Failed to get license info." -msgstr "未能获取许可证信息。" - -msgid "Failed to get lv info." -msgstr "未能获取 lv 信息。" - -msgid "Failed to get map info." -msgstr "未能获取映射信息。" - -msgid "Failed to get migration task." -msgstr "无法获取迁移任务。" - -msgid "Failed to get model update from clone" -msgstr "未能从克隆获取模型更新" - -msgid "Failed to get name server info." -msgstr "未能获取名称服务器信息。" - -msgid "Failed to get network info." -msgstr "未能获取网络信息。" - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "未能在新池 %(pool_id)s 中获取新的部件标识。" - -msgid "Failed to get partition info." -msgstr "未能获取分区信息。" - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "未能获取具有卷 %(volume_id)s 的池标识。" - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "无法获取 %(volume)s 的远程复制信息,因为发生了 %(err)s。" - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "无法获取 %(volume)s 的远程复制信息。异常:%(err)s。" - -msgid "Failed to get replica info." -msgstr "未能获取副本信息。" - -msgid "Failed to get show fcns database info." -msgstr "未能获取显示 fcns 数据库信息。" - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "未能获取卷 %s 的大小" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "未能获取卷 %s 的快照。" - -msgid "Failed to get snapshot info." -msgstr "未能获取快照信息。" - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "未能获取 LUN %s 的目标 IQN" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "无法获取 SplitMirror 的目标 LUN。" - -#, python-format -msgid "" -"Failed to get target ip or iqn for initiator %(ini)s, please check config " -"file." -msgstr "为初始化器 %(ini)s 获取目标ip或iqn失败,请检查配置文件。" - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "未能获取 LUN %s 的目标门户网站" - -msgid "Failed to get targets" -msgstr "未能获取目标" - -msgid "Failed to get wwn info." -msgstr "未能获取 WWN 信息。" - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"未能获取或创建掩码视图 %(maskingViewName)s,或者未能对该掩码视图添加卷 " -"%(volumeName)s。接收到的错误消息为 %(errorMessage)s。" - -msgid "Failed to identify volume backend." -msgstr "未能识别卷后端。" - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "未能针对共享项 %(cgname)s 链接文件集。错误为 %(excmsg)s。" - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "未能登录到 %s 阵列(无效登录?)。" - -#, python-format -msgid "Failed to login for user %s." -msgstr "未能让用户 %s 登录。" - -msgid "Failed to login with all rest URLs." -msgstr "未能使用所有 REST URL 进行登录。" - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "由于以下原因,未能对 Datera 集群端点进行请求:%s" - -msgid "Failed to manage api volume flow." -msgstr "未能管理 API 卷流。" - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "未能管理现有 %(type)s %(name)s,因为所报告的大小 %(size)s不是浮点数。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "未能管理现有卷 %(name)s,因为获取卷大小时出错。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "未能管理现有卷 %(name)s,因为重命名操作失败:错误消息为 %(msg)s。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "未能管理现有卷 %(name)s,因为已报告的大小 %(size)s 不是浮点数。" - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"无法管理现有卷,因为 I/O 组不匹配。要管理的卷的 I/O 组为 %(vdisk_iogrp)s。所" -"选类型的 I/O 组为 %(opt_iogrp)s。" - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"无法管理现有卷,因为要管理的卷的池与后端池不匹配。要管理的卷的池为 " -"%(vdisk_pool)s。后端的池为 %(backend_pool)s。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "无法管理现有卷,要管理的卷为压缩卷,但所选卷类型并非压缩卷。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "无法管理现有卷,要管理的卷并非压缩卷,但所选卷类型为压缩卷。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "无法管理现有卷,因为要管理的卷未包含在有效 I/O 组中。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "无法管理现有卷,因为要管理的卷为厚卷,但所选卷类型为薄卷。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "无法管理现有卷,要管理的卷为薄卷,但所选卷类型为厚卷。" - -#, python-format -msgid "Failed to manage volume %s." -msgstr "未能管理卷 %s。" - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"未能映射逻辑设备。(逻辑设备为 %(ldev)s,LUN 为 %(lun)s,端口为 %(port)s,标" -"识为 %(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "第一次迁移卷失败。" - -msgid "Failed to migrate volume for the second time." -msgstr "第二次迁移卷失败。" - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "未能移动 LUN 映射。返回码:%s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "未能移动卷 %s。" - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "未能打开文件。(文件为 %(file)s,返回为 %(ret)s,标准错误为 %(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"未能解析 CLI 输出:\n" -"命令:%(cmd)s\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s。" - -msgid "" -"Failed to parse the configuration option 'glance_catalog_info', must be in " -"the form ::" -msgstr "" -"解析配置选项“glance_catalog_info”失败,必须为以下格式::" -":" - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"解析配置选项“swift_catalog_info”失败,必须为以下格式::" -":" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"解析配置选项“swift_catalog_info”失败,必须为以下格式::" -":" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "未能执行零页面回收。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "未能针对卷 %(volume)s 移除导出:%(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "未能针对卷 %(volume_id)s 除去 iscsi 目标。" - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"未能将卷 %(volumeName)s 从一致性组 %(cgName)s 中移除。返回码为 %(rc)lu。错误" -"为 %(error)s。" - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "未能从缺省 SG 中移除卷 %(volumeName)s。" - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "无法移除卷 %(volumeName)s(从缺省 SG %(volumeName)s)。" - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"无法从快速策略 %(fastPolicyName)s 的缺省存储器组中移除 %(volumename)s。 " - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "未能重命名逻辑卷 %(name)s,错误消息如下:%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "无法检索处于活动状态的分区配置 %s" - -#, python-format -msgid "Failed to retrieve attachments for volume %(name)s" -msgstr "获取卷 %(name)s 的挂载信息失败。" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "无法为目标 IQN %(iqn)s 设置 CHAP 认证。详细信息:%(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "未能对现有卷 %(name)s 设置 QoS,错误消息:%(msg)s。" - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "未能对 SCST 目标设置属性“新用户”。" - -msgid "Failed to set partition." -msgstr "未能设置分区。" - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "未能针对一致性组 %(cgname)s 设置许可权。错误为 %(excmsg)s。" - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "未能针对要取消映射的卷 %(volume_id)s 指定逻辑设备。" - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "未能指定要删除的逻辑设备。(方法为 %(method)s,标识为 %(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "未能终止迁移会话。" - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "未能解除绑定卷 %(volume)s" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "未能针对一致性组 %(cgname)s 取消链接文件集。错误为 %(excmsg)s。" - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "未能取消映射逻辑设备。(逻辑设备为 %(ldev)s,原因为 %(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "未能更新一致性组:%(cgName)s。" - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "未能更新卷的元数据:%(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "未能更新或删除分区配置" - -msgid "Failed to update or delete zoning configuration." -msgstr "无法更新或删除分区配置。" - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "未能通过规范 %(qos_specs)s 更新 qos_specs:%(specs_id)s。" - -msgid "Failed to update quota usage while retyping volume." -msgstr "对卷进行转型时,更新配额使用率失败" - -msgid "Failed to update snapshot." -msgstr "无法更新快照。" - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "未能使用提供的 %(src_type)s %(src_id)s 元数据更新卷 %(vol_id)s 元数据" - -#, python-format -msgid "Failure creating volume %s." -msgstr "创建卷 %s 时发生故障。" - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "针对 %s 获取 LUN 信息时发生故障。" - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "将新克隆的 LUN 移至 %s 时发生故障。" - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "将 LUN %s 登台至临时文件夹时发生故障。" - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,Fexvisor 未能添加卷 %(id)s。" - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "由于 %(ret)s,Fexvisor 未能将卷 %(vol)s加入组 %(group)s。" - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "由于 %(ret)s,Fexvisor 未能移除组 %(group)s中的卷 %(vol)s。" - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "由于 %(reason)s,Fexvisor 未能移除卷 %(id)s。" - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "光纤通道 SAN 查找失败:%(reason)s" - -msgid "Fibre Channel Zone Manager not initialized" -msgstr "光纤通道区域管理器未初始化。" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "“光纤通道区域”操作失败:%(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "光纤通道连接控制失败:%(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "找不到文件 %(file_path)s。" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "文件 %(path)s 具有无效支持文件 %(bfile)s,正在异常中止。" - -#, python-format -msgid "File already exists at %s." -msgstr "%s 处已存在文件。" - -#, python-format -msgid "File already exists at: %s" -msgstr "在以下位置处,已存在文件:%s" - -msgid "Find host in hostgroup error." -msgstr "在主机组中查找主机时发生错误。" - -msgid "Find host lun id error." -msgstr "查找主机 LUN 标识时发生错误。" - -msgid "Find lun group from mapping view error." -msgstr "从映射视图查找 LUN 组时发生错误。" - -msgid "Find mapping view error." -msgstr "查找映射视图时发生错误。" - -msgid "Find portgroup error." -msgstr "查找端口组时发生错误。" - -msgid "Find portgroup from mapping view error." -msgstr "从映射视图查找端口组时发生错误。" - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"闪存高速缓存策略要求安装了 WSAPI 版本“%(fcache_version)s”版本“%(version)s”。" - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "Flexvisor 在组 %(vgid)s 快照 %(vgsid)s 中找不到卷 %(id)s 快照。" - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor 创建卷失败:%(volumeid)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor 未能将卷 %(id)s 添加至组 %(cgid)s。" - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "Flexvisor 无法分配卷 %(id)s,因为无法按事件标识查询状态。 " - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor 无法分配卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor 未能分配卷 %(volume)s iqn %(iqn)s。" - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor 无法克隆卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "Flexvisor 无法克隆卷(无法获取事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "Flexvisor 无法对卷 %(id)s 创建快照:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "Flexvisor 无法对卷 %(id)s 创建快照(无法获取事件)。 " - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor 未能在组 %(vgid)s 中创建卷 %(id)s。" - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor 无法创建卷 %(volume)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor 无法创建卷(获取事件)%s。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "Flexvisor 未能从快照 %(id)s 创建卷:%(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor 无法从快照 %(id)s 创建卷:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 无法从快照创建卷(无法获取事件)%(id)s。 " - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor 无法删除快照 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 无法删除快照(无法获取事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor 未能扩展卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor 无法扩展卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "Flexvisor 无法扩展卷(无法获取事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor 无法获取池信息 %(id)s:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "Flexvisor 未能从组 %(vgid)s 获取卷 %(id)s 的快照标识。" - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor 未能从组 %(cgid)s 中移除卷 %(id)s。" - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor 无法从快照 %(id)s 衍生卷:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 无法从快照衍生卷(无法获取事件)%(id)s。 " - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor 无法取消分配卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "Flexvisor 无法取消分配卷(获取事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor 未能取消分配卷 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor 找不到源卷 %(id)s 信息。" - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 取消分配卷失败:%(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Flexvisor 卷 %(id)s 未能加入组 %(vgid)s。" - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "文件夹 %s 在 Nexenta 存储设备中不存在" - -#, python-format -msgid "GET method is not supported by resource: %s" -msgstr "资源: %s 不支持GET方法" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS 没有在运行,状态:%s。" - -msgid "Gateway VIP is not set" -msgstr "未设置网关 VIP" - -msgid "Get FC ports by port group error." -msgstr "按端口组获取 FC 端口时出错。" - -msgid "Get FC ports from array error." -msgstr "从阵列中获取 FC 端口时发生错误。" - -msgid "Get FC target wwpn error." -msgstr "获取 FC 目标 WWPN 时发生错误。" - -msgid "Get HyperMetroPair error." -msgstr "获取 HyperMetroPair 时出错。" - -msgid "Get LUN group by view error." -msgstr "按视图获取 LUN 组时出错。" - -msgid "Get LUNcopy information error." -msgstr "获取 LUNcopy 信息时发生错误。" - -msgid "Get QoS id by lun id error." -msgstr "通过 LUN 标识获取 QoS 标识时发生错误。" - -msgid "Get QoS information error." -msgstr "获取 QoS 信息时发生错误。" - -msgid "Get QoS policy error." -msgstr "获取 QoS 策略时发生错误。" - -msgid "Get SplitMirror error." -msgstr "获取 SplitMirror 时出错。" - -msgid "Get active client failed." -msgstr "获取活动客户机失败。" - -msgid "Get array info error." -msgstr "获取阵列信息时出错。" - -msgid "Get cache by name error." -msgstr "按名称获取高速缓存时发生错误。" - -msgid "Get connected free FC wwn error." -msgstr "获取已连接的空闲 FC wwn 时发生错误。" - -msgid "Get engines error." -msgstr "获取引擎时出错。" - -msgid "Get host initiators info failed." -msgstr "获取主机启动程序信息失败。" - -msgid "Get hostgroup information error." -msgstr "获取主机组信息时发生错误。" - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "" -"获取 iSCSI 端口信息时发生错误,请检查 huawei conf 文件中所配置的目标 IP。" - -msgid "Get iSCSI port information error." -msgstr "获取 iSCSI 端口信息时发生错误。" - -msgid "Get iSCSI target port error." -msgstr "获取 iSCSI 目标端口时发生错误。" - -msgid "Get lun id by name error." -msgstr "通过名称获取 LUN 标识时出错。" - -msgid "Get lun migration task error." -msgstr "获取 LUN 迁移任务时发生错误。" - -msgid "Get lungroup id by lun id error." -msgstr "通过 LUN 标识获取 LUN 组标识时发生错误。" - -msgid "Get lungroup information error." -msgstr "获取 LUN 组信息时发生错误。" - -msgid "Get manageable snapshots not implemented." -msgstr "获取易管理快照的功能未实现。" - -msgid "Get manageable volumes not implemented." -msgstr "获取易管理卷的功能未实现。" - -msgid "Get migration task error." -msgstr "获取迁移任务时出错。" - -msgid "Get pair failed." -msgstr "获取对失败。" - -msgid "Get partition by name error." -msgstr "按名称获取分区时发生错误。" - -msgid "Get partition by partition id error." -msgstr "按分区标识获取分区时发生错误。" - -msgid "Get port group by view error." -msgstr "按视图获取端口组时出错。" - -msgid "Get port group error." -msgstr "获取端口组时出错。" - -msgid "Get port groups by port error." -msgstr "按端口获取端口组时出错。" - -msgid "Get ports by port group error." -msgstr "按端口组获取端口时出错。" - -msgid "Get remote device info failed." -msgstr "获取远程设备信息失败。" - -msgid "Get remote devices error." -msgstr "获取远程设备时出错。" - -msgid "Get smartcache by cache id error." -msgstr "按高速缓存标识获取 smartcache 时发生错误。" - -msgid "Get snapshot error." -msgstr "获取快照时出错。" - -msgid "Get snapshot id error." -msgstr "获取快照标识时发生错误。" - -msgid "Get target IP error." -msgstr "获取目标 IP 时发生错误。" - -msgid "Get target LUN of SplitMirror error." -msgstr "获取 SplitMirror 的目标 LUN 时出错。" - -msgid "Get views by port group error." -msgstr "按端口组获取视图时出错。" - -msgid "Get volume by name error." -msgstr "按名称获取卷时发生错误。" - -msgid "Get volume error." -msgstr "获取卷时发生错误。" - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "无法更新 Glance 元数据,对于卷标识 %(volume_id)s,键 %(key)s 存在" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "Glance中无法找到卷/镜像 %(id)s 的元数据" - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "%(config)s 处不存在 Gluster 配置文件" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Google 云存储器 API 故障:%(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Google 云存储器连接故障:%(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Google 云存储器 oauth2 故障:%(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "从 DRBDmanage 中获得了错误路径信息!(%s)" - -#, python-format -msgid "Group %(group_id)s could not be found." -msgstr "无法找到名为 %(group_id)s 的组。" - -#, python-format -msgid "" -"Group %s still contains volumes. The delete-volumes flag is required to " -"delete it." -msgstr "组 %s 仍然包含卷。需要 delete-volumes 标记,以将其删除。" - -#, python-format -msgid "" -"Group Type %(group_type_id)s deletion is not allowed with groups present " -"with the type." -msgstr "当存在类型为 %(group_type_id)s 的组时,不允许删除该组类型。" - -#, python-format -msgid "Group Type %(group_type_id)s has no specs with key %(group_specs_key)s." -msgstr "组类型 %(group_type_id)s 没有键 %(group_specs_key)s 对应的规格说明。" - -#, python-format -msgid "Group Type %(id)s already exists." -msgstr "组类型 %(id)s 已存在。" - -#, python-format -msgid "Group Type %(type_id)s has no extra spec with key %(id)s." -msgstr "组类型%(type_id)s没有与键%(id)s对应的额外规格。" - -msgid "Group snapshot is empty. No group will be created." -msgstr "组快照为空。将不会创建任何组。" - -#, python-format -msgid "Group status must be available or error, but current status is: %s" -msgstr "组状态必须为“available”或“error”,但当前状态为:%s" - -#, python-format -msgid "Group status must be available, but current status is: %s." -msgstr "组状态必须为“available”,但当前状态为:%s。" - -#, python-format -msgid "Group type %(group_type_id)s could not be found." -msgstr "组类型 %(group_type_id)s 无法找到。" - -#, python-format -msgid "" -"Group type access for %(group_type_id)s / %(project_id)s combination already " -"exists." -msgstr "已存在针对 %(group_type_id)s / %(project_id)s 组合的组类型权限。" - -#, python-format -msgid "" -"Group type access not found for %(group_type_id)s / %(project_id)s " -"combination." -msgstr "使用 %(group_type_id)s / %(project_id)s 组合无法访问组类型。" - -#, python-format -msgid "Group type encryption for type %(type_id)s already exists." -msgstr "针对类型为 %(type_id)s 的组类型加密方式已存在。" - -#, python-format -msgid "Group type encryption for type %(type_id)s does not exist." -msgstr "针对组类型 %(type_id)s 加密方式不存在。" - -msgid "Group type name can not be empty." -msgstr "组类型名称不能为空。" - -#, python-format -msgid "Group type with name %(group_type_name)s could not be found." -msgstr "名为 %(group_type_name)s 的组类型无法找到。" - -#, python-format -msgid "" -"Group volume type mapping for %(group_id)s / %(volume_type_id)s combination " -"already exists." -msgstr "已存在映射 %(group_id)s / %(volume_type_id)s 组合的卷组类型。" - -#, python-format -msgid "GroupSnapshot %(group_snapshot_id)s could not be found." -msgstr "无法找到组快照 %(group_snapshot_id)s。" - -msgid "" -"GroupSnapshot status must be available or error, and no Group can be " -"currently using it as source for its creation." -msgstr "" -"组快照状态必须为“avaliable”或“error”,而且当前没有任何组可以使用该快照作为源" -"来创建。" - -msgid "HBSD error occurs." -msgstr "发生 HBSD 错误。" - -msgid "HPELeftHand url not found" -msgstr "找不到 HPELeftHand URL" - -#, python-format -msgid "HTTP code: %(status_code)s, %(reason)s [%(error_msg)s]" -msgstr "HTTP状态码: %(status_code)s, %(reason)s [%(error_msg)s]" - -#, python-format -msgid "HTTP code: %(status_code)s, response: %(reason)s [%(error_msg)s]" -msgstr "HTTP状态码: %(status_code)s, 响应: %(reason)s [%(error_msg)s]" - -#, python-format -msgid "HTTP exit code: [%(code)s]" -msgstr "HTTP退出码:[%(code)s]" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"自从最近一次备份以来,散列块大小已更改。新的散列块大小:%(new)s。旧的散列块大" -"小:%(old)s。请执行完全备份。" - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "尚未创建 %(tier_levels)s 层。" - -msgid "Heartbeat" -msgstr "心跳" - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "提示“%s”不受支持。" - -msgid "Host" -msgstr "主机" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "主机 %(host)s 没有找到。" - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "主机 %(host)s 与 x509 证书内容不匹配:CommonName %(commonName)s。" - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "主机 %s 没有 FC 启动程序" - -#, python-format -msgid "Host group with name %s not found" -msgstr "找不到名称为 %s 的主机组" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "找不到具有 ref %s 的主机组" - -msgid "Host is NOT Frozen." -msgstr "主机未冻结。" - -msgid "Host is already Frozen." -msgstr "主机已冻结。" - -msgid "Host must be specified in query parameters" -msgstr "主机必须在查询参数中指定" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "找不到主机。未能在 %(host)s 上移除 %(service)s。" - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "主机 replication_status 必须为 %s 才能进行故障转移。" - -#, python-format -msgid "Host type %s not supported." -msgstr "不支持主机类型 %s。" - -#, python-format -msgid "Host with name: %s not found" -msgstr "找不到名为 %s 的主机" - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "找不到具有端口 %(ports)s 的主机。" - -msgid "Hosts" -msgstr "主机" - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "Hypermetro 和复制不能用于同一 volume_type。" - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "I/O 组 %(iogrp)d 无效;可用的 I/O 组为 %(avail)s。" - -msgid "ID" -msgstr "ID" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "如果 compression 设置为 True,那么还必须设置 rsize(不等于 -1)。" - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "如果 nofmtdisk 设置为 True,rsize 必须也设置为 -1。" - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"为 flashsystem_connection_protocol 指定的值“%(prot)s”非法:有效值为 " -"%(enabled)s。" - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "对 IOTYPE 指定了非法值:0、1 或 2。" - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "为 smarttier 指定了非法值:请将值设置为 0、1、2 或者 3。" - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"为 storwize_svc_vol_grainsize 指定了非法值:请将值设置为 32、64、128 或 256。" - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "为 thin 指定了非法值:不能同时设置 thin 和 thick。" - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "找不到映像 %(image_id)s。" - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "映像 %(image_id)s 处于不活动状态。" - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "映像 %(image_id)s 无法接受,原因是: %(reason)s" - -msgid "Image location not present." -msgstr "映像位置不存在。" - -msgid "Image quota exceeded" -msgstr "镜像超出配额。" - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"映像虚拟大小为 %(image_size)dGB,在大小为 %(volume_size)dGB 的卷中将无法容" -"纳。" - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"删除 rbd 卷时,发生 ImageBusy 错误。这可能是由于客户机的已崩溃连接导致,如果" -"是这样,那么可通过在 30 秒之后重试该删除来解决问题。" - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "记录导入失败,找不到要执行导入的备份服务。请求服务 %(service)s" - -#, python-format -msgid "" -"Incorrect port number. Load balanced port is: %(lb_api_port)s, api service " -"port is: %(apisvc_port)s" -msgstr "" -"不正确的端口号. 负载均衡端口号为: %(lb_api_port)s, api服务端口号为: " -"%(apisvc_port)s" - -msgid "Incorrect request body format" -msgstr "不正确的请求主体格式" - -msgid "Incorrect request body format." -msgstr "请求主体格式不正确。" - -msgid "Incremental backups exist for this backup." -msgstr "对于此备份,存在增量备份。" - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend CLI 异常:%(err)s 参数:%(param)s(返回码:%(rc)s)(输出:" -"%(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "输入卷或快照无效。" - -msgid "Input volumes or source volumes are invalid." -msgstr "输入卷或源卷无效。" - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "找不到实例 %(uuid)s。" - -msgid "Insufficient free space available to extend volume." -msgstr "可用空间不足,无法扩展卷。" - -msgid "Insufficient privileges" -msgstr "特权不足" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "3PAR 域无效:%(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "ALUA 值无效。ALUA 值必须为 1 或 0。" - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "为备份rbd操作提供的Ceph参数无效" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "Cg 快照无效:%(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "一致性组无效:%(reason)s" - -#, python-format -msgid "" -"Invalid ConsistencyGroup: Cannot delete consistency group %(id)s. " -"%(reason)s, and it cannot be the source for an ongoing CG or CG Snapshot " -"creation." -msgstr "" -"无效的一致性组:不能删除一致性组%(id)s.%(reason)s,它不能作为创建运行CG或者CG" -"快照的源。" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "一致性组无效:没有任何主机用于创建一致性组" - -#, python-format -msgid "Invalid Group: %(reason)s" -msgstr "无效的组: %(reason)s" - -#, python-format -msgid "Invalid GroupSnapshot: %(reason)s" -msgstr "无效的组快照: %(reason)s" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"发现无效 HPELeftHand API 版本 (%(found)s)。需要版本 %(minimum)s 或更高版本以" -"获取管理/取消管理支持。" - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "IP 地址格式“%s”无效" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "获取卷 %s 的 QoS 策略时,检测到无效 QoS 规范" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "无效复制目标:%(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"无效 Virtuozzo 存储器共享规范:%r。必须为 [MDS1[,MDS2],...:/][:" -"PASSWORD]。" - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "XtremIO V%(cur)s 无效,需要 V%(min)s 或更高版本" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "对以下项目配额定义的已分配配额无效:%s" - -msgid "Invalid argument" -msgstr "自变量无效" - -msgid "Invalid argument - negative seek offset." -msgstr "无效参数 - 查找偏移量为负数。" - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "自变量无效 - whence=%s 不受支持" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "无效参数 - whence=%s 不受支持。" - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "加载模式 '%(mode)s' 对于卷 %(volume_id)s 无效。" - -#, python-format -msgid "Invalid attachment info for volume %(name)s: %(reason)s" -msgstr "卷 %(name)s 的无效挂载信息: %(reason)s" - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "认证密钥无效:%(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "备份无效:%(reason)s" - -#, python-format -msgid "Invalid body provided for creating volume. Request API version: %s." -msgstr "创建卷时提供了无效请求主体。请求API版本:%s。" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "在 CloudByte 存储器中找到了无效 chap 用户详细信息。" - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "卷 %(name)s 的连接初始化响应无效" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "卷 %(name)s 的连接初始化响应无效:%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "无效的内容类型 %(content_type)s。" - -msgid "Invalid credentials" -msgstr "无效凭证" - -#, python-format -msgid "Invalid directory: %s" -msgstr "无效目录:%s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "无效磁盘适配器类型:%(invalid_type)s。" - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "无效磁盘备份:%s。" - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "无效磁盘类型:%(disk_type)s。" - -#, python-format -msgid "Invalid disk type: %s." -msgstr "无效磁盘类型:%s。" - -#, python-format -msgid "" -"Invalid disk-format '%(disk_format)s' is specified. Allowed disk-formats are " -"%(allowed_disk_formats)s." -msgstr "" -"指定的磁盘格式\"%(disk_format)s\"无效。允许的磁盘格式" -"为%(allowed_disk_formats)s。" - -#, python-format -msgid "Invalid filter keys: %s" -msgstr "无效的筛选键:%s" - -#, python-format -msgid "Invalid group type: %(reason)s" -msgstr "无效的组类型:%(reason)s" - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "主机无效:%(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"发现无效 hpe3parclient 版本 (%(found)s)。需要版本 %(minimum)s 或更高版本。请" -"运行“pip install --upgrade python-3parclient”以升级 hpe3parclient。" - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"发现无效 hpelefthandclient 版本 (%(found)s)。需要版本 %(minimum)s 或更高版" -"本。请运行“pip install --upgrade python-lefthandclient”以升级 " -"hpelefthandclient。" - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "无效映像 href %(image_href)s。" - -msgid "Invalid image identifier or unable to access requested image." -msgstr "映像标识无效,或无法访问所请求映像。" - -msgid "Invalid imageRef provided." -msgstr "提供了无效的imageRef。" - -msgid "Invalid input" -msgstr "输入无效" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "输入无效: %(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "is_public 过滤器 [%s] 无效" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "配置了无效 LUN 类型 %s。" - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "元数据大小无效: %(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "元数据无效: %(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "安装点基准无效:%s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "安装点基准无效:%s。" - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "新 snapCPG 名称对执行 retype 操作无效。new_snap_cpg='%s'。" - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Coho rpc 端口的端口号 %(config)s 无效" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "配置了无效预取类型“%s”。PrefetchType 必须为 0、1、2 和 3 的其中之一。" - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "qos 规范无效:%(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "关于将卷连接至无效目标的请求无效" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "以无效方式连接卷的请求无效。连接方式应该为“rw”或“ro”" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "预留到期 %(expire)s 无效。" - -msgid "Invalid response header from RPC server" -msgstr "RPC 服务器发送的响应头无效" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "无效辅助标识 %s。" - -msgid "Invalid service catalog json." -msgstr "服务目录 json 无效。" - -msgid "Invalid sheepdog cluster status." -msgstr "sheepdog 集群状态无效。" - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "快照无效: %(reason)s" - -#, python-format -msgid "Invalid sort dirs passed: %s" -msgstr "传递的无效排序目录:%s" - -#, python-format -msgid "Invalid sort keys passed: %s" -msgstr "传递的无效排序码:%s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "无效的状态:'%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "请求的存储池 %s 无效。转型失败。" - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "指定的存储池 %s 无效。" - -msgid "Invalid storage pool is configured." -msgstr "配置了无效存储池。" - -msgid "Invalid transport type." -msgstr "无效传输类型。" - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "无效的更新设置:'%s'" - -#, python-format -msgid "Invalid value '%s' for delete-volumes flag." -msgstr "删除卷标志的值\"%s\"无效。" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "值“%s”对于 force 无效。" - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "值“%s”对于 force 无效。" - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "is_public 的值“%s”无效。接受的值:True 或 False。" - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "skip_validation 的值“%s”无效。" - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "“bootable”的值无效:“%s”" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "“force”的值无效:“%s”" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "“readonly”的值无效:“%s”" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "值对于“scheduler_max_attempts”无效,必须 >= 1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "NetApp 配置选项 netapp_host_type 的值无效。" - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "NetApp 配置选项 netapp_lun_ostype 的值无效。" - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "age 的值 %(age)s 无效" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "无效值:“%s”" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"针对创建请求提供的以下卷大小无效:%s(自变量 size 必须是整数(也可以是整数的" -"字符串表示法)并且大于零)。" - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "卷类型无效:%(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "卷无效: %(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"卷无效:无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为该卷处于无效" -"状态:%(status)s。以下是有效状态:(“可用”、“正在使用”)。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"卷无效:无法将卷 %(volume_id)s 添加至一致性组 %(group_id)s,因为卷类型 " -"%(volume_type)s 不受该组支持。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"卷无效:无法将卷 fake-volume-uuid 添加至一致性组 %(group_id)s,因为找不到该" -"卷。" - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"卷无效:无法将卷 fake-volume-uuid 从一致性组 %(group_id)s 移除,因为它没有在" -"该组中。" - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "已传递的 volume_type 无效:%s。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"所提供的以下 volume_type 无效:%s(所请求的类型不兼容;要么与源卷相匹配,要么" -"省略类型参数)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "所提供的 volume_type %s 无效(所请求的类型不兼容;建议省略类型参数)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "提供的以下 volume_type 无效:%s(所请求类型必须受此一致性组支持)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"group)." -msgstr "提供的以下 volume_type 无效:%s(所请求类型必须被该组支持)。" - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "无效 WWPN 格式 %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "调用 Web Service 失败。" - -msgid "Issue encountered waiting for job." -msgstr "等待作业时遇到问题。" - -msgid "Issue encountered waiting for synchronization." -msgstr "等待同步时遇到问题。" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "发出故障转移失败,因为未正确配置复制。" - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "在 CloudByte 的创建卷 [%s] 响应中找不到作业标识。" - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "在 CloudByte 的删除卷 [%s] 响应中找不到作业标识。" - -#, python-format -msgid "Kaminario retryable exception: %(reason)s" -msgstr "Kaminario可重试异常:%(reason)s" - -#, python-format -msgid "KaminarioCinderDriver failure: %(reason)s" -msgstr "Kaminario Cinder 驱动程序故障:%(reason)s" - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "键名只能包含字母数字字符、下划线、句点、冒号和连字符。" - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError:%s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "必须使用 Keystone 版本 3 或更高版本来获取嵌套配额支持。" - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "卷 %s 没有 LU" - -msgid "LUN export failed!" -msgstr "LUN 导出失败!" - -msgid "LUN map overflow on every channel." -msgstr "LUN 映射在每个通道上溢出。" - -#, python-format -msgid "LUN not found by UUID: %(uuid)s." -msgstr "无法通过UUID:%(uuid)s找到LUN。" - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "找不到具有给定引用 %s 的 LUN。" - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "LUN 号超出了通道标识 %(ch_id)s 的范围。" - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "以下是最后 %s 个 cinder 系统日志条目:-" - -msgid "LeftHand cluster not found" -msgstr "找不到 LeftHand 集群" - -msgid "License is unavailable." -msgstr "许可证不可用。" - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "行 %(dis)d:%(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "链接路径已存在,并且它不是符号链接" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "不支持处于以下状态的源卷的已链接克隆:%s。" - -msgid "Lock acquisition failed." -msgstr "锁定获取失败。" - -#, python-format -msgid "Login failure code: %(statuscode)s Error: %(responsetext)s" -msgstr "登陆失败状态码: %(statuscode)s 错误: %(responsetext)s" - -msgid "Logout session error." -msgstr "注销会话错误。" - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"未配置查找服务。fc_san_lookup_service 的配置选项需要指定查找服务的具体实现。" - -msgid "Lun migration error." -msgstr "Lun 迁移错误。" - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "对象 %(object_name)s 的 MD5 在 %(md5)s 之前和 %(etag)s 之后不相同。" - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED:%r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED:AUTH_ERROR:%r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED:RPC_MISMATCH:%r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "以下 fcns 输出字符串的格式不正确:%s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "错误格式的消息体: %(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "以下名称服务器字符串的格式不正确:%s" - -msgid "Malformed request body" -msgstr "错误格式的请求主体" - -msgid "Malformed request body." -msgstr "请求主体的格式不正确。" - -msgid "Malformed request url" -msgstr "错误格式的请求url" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "对命令 %(cmd)s 的响应的格式不正确:%(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "错误格式的 scheduler_hints 属性" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "以下显示 fcns 数据库字符串的格式不正确:%s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"区域配置的格式不正确:(switch=%(switch)s zone_config=%(zone_config)s)。" - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"区域状态的格式不正确:(交换机为 %(switch)s,zone_config 为 " -"%(zone_config)s)。" - -msgid "Manage existing get size requires 'id'." -msgstr "管理现有 get 大小需要“id”。" - -msgid "Manage existing snapshot not implemented." -msgstr "未实现对现有快照的管理。" - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "由于后端引用 %(existing_ref)s 无效,管理现有卷失败:%(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "由于卷类型不匹配,管理现有卷失败:%(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "未实现对现有卷的管理。" - -msgid "Manage existing volume requires 'source-id'." -msgstr "管理现有卷将需要“source-id”。" - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "如果启用了 FAST,那么不支持管理卷。快速策略:%(fastPolicyName)s。" - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "不允许管理到达已故障转移的卷的快照。" - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "无映射信息,因为阵列版本不支持 hypermetro。" - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "映射 %(id)s 准备未能在已分配的 %(to)d 秒超时内完成。正在终止。" - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "未成功删除掩码视图 %(maskingViewName)s" - -msgid "Max read iops setting for volume qos, use 0 for unlimited" -msgstr "为卷的qos设置最大读iops,0表示无限制。" - -msgid "Max total iops setting for volume qos, use 0 for unlimited" -msgstr "为卷的qos设置最大总量iops,0表示无限制。" - -msgid "Max write iops setting for volume qos, use 0 for unlimited" -msgstr "为卷的qos设置最大写iops,0表示无限制。" - -msgid "Maximum age is count of days since epoch." -msgstr "最大年龄是自新纪元开始计算的天数。" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "已超过允许的最大备份数 (%(allowed)d)" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "已超过允许的最大快照数 (%(allowed)d)" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "对于定额“%(name)s”,超过了允许的最大卷数 (%(allowed)d)。" - -#, python-format -msgid "May specify only one of %s" -msgstr "只能指定 %s 中的一个" - -#, python-format -msgid "Message %(message_id)s could not be found." -msgstr "信息 %(message_id)s 无法找到。" - -msgid "Metadata backup already exists for this volume" -msgstr "对于此卷,已存在元数据备份" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "元数据备份对象“%s”已存在" - -#, python-format -msgid "Metadata property key %s greater than 255 characters." -msgstr "元数据属性关键字%s超过255个字符。" - -#, python-format -msgid "Metadata property key %s value greater than 255 characters." -msgstr "元数据属性关键字%s值超过255个字符。" - -msgid "Metadata property key blank." -msgstr "元数据属性关键字为空白。" - -msgid "Metadata restore failed due to incompatible version" -msgstr "由于版本不兼容,元数据复原失败" - -msgid "Metadata restore failed due to incompatible version." -msgstr "由于版本不兼容,元数据复原失败。" - -#, python-format -msgid "Method %(method)s is not defined" -msgstr "方法 %(method)s 未被定义" - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "缺少“purestorage”python 模块,请确保库已安装并且可用。" - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "缺少光纤通道 SAN 配置参数 - fc_fabric_names" - -msgid "Missing request body" -msgstr "缺少请求主体" - -msgid "Missing request body." -msgstr "缺少请求主体。" - -#, python-format -msgid "Missing required element '%(element)s' in request body." -msgstr "请求主体中缺少必要的元素'%(element)s'" - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "在请求主体中缺少必需元素“%s”" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "请求主体中缺少必需元素“%s”。" - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "请求主体中缺少必需元素“consistencygroup”。" - -msgid "Missing required element 'delete' in request body." -msgstr "请求主体中缺少必需元素\"delete\"。" - -msgid "Missing required element quota_class_set in request body." -msgstr "在请求主体中缺少必需元素 quota_class_set。" - -msgid "Missing required element snapshot in request body." -msgstr "请求主体中缺少必需元素 snapshot。" - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "" -"在期望此操作只有一个序列号时,却找到了多个序列号。请更改 EMC 配置文件。" - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "找到了卷 %s 的多个副本。" - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "对于“%s”,找到多个匹配项,请使用标识以更具体地进行查找。" - -msgid "Multiple profiles found." -msgstr "找到了多个概要文件。" - -msgid "Must implement a fallback schedule" -msgstr "必须实现一个回滚 schedule" - -msgid "Must implement find_retype_host" -msgstr "必须实现 find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "必须实现 host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "必须实现 schedule_create_consistencygroup" - -msgid "Must implement schedule_create_group" -msgstr "必须实现 schedule_create_group" - -msgid "Must implement schedule_create_volume" -msgstr "必须实现 schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "必须实现 schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "必须将 wwpn 或 host 传递给 lsfabric。" - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"必须以云管理员身份使用 Keystone policy.json(它允许云管理员列示和获取任何项" -"目)运行此命令。" - -msgid "Must specify 'connector'" -msgstr "必须指定“connector”" - -msgid "Must specify 'connector'." -msgstr "必须指定“connector”。" - -msgid "Must specify 'host'." -msgstr "必须指定“host”。" - -msgid "Must specify 'new_volume'" -msgstr "必须指定“new_volume”" - -msgid "Must specify 'status'" -msgstr "必须指定“status”" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "必须指定“status”、“attach_status”或“migration_status”以进行更新。" - -msgid "Must specify a valid attach status" -msgstr "必须指定有效连接状态" - -msgid "Must specify a valid migration status" -msgstr "必须指定有效迁移状态" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "必须指定有效角色 %(valid)s,值“%(persona)s”无效。" - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "指定有效供应类型 %(valid)s,值“%(prov)s”无效。" - -msgid "Must specify a valid status" -msgstr "必须指定有效状态" - -msgid "Must specify an ExtensionManager class" -msgstr "必须明确一个ExtensionManager类" - -msgid "Must specify bootable in request." -msgstr "必须在请求中指定 bootable。" - -msgid "" -"Must specify one or more of the following keys to update: name, description, " -"add_volumes, remove_volumes." -msgstr "更新时必须指定如下的一个或多个键:名称,描述,添加卷,删除卷。" - -msgid "Must specify protection domain name or protection domain id." -msgstr "必须指定保护域名或者保护域标识。" - -msgid "Must specify readonly in request." -msgstr "必须在请求中指定 readonly。" - -msgid "Must specify snapshot source-name or source-id." -msgstr "必须指定快照 source-name 或 source-id。" - -msgid "Must specify source-name or source-id." -msgstr "必须指定 source-name 或 source-id。" - -msgid "Must specify storage pool name or id." -msgstr "必须指定存储池名称或标识。" - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "必须指定存储库。选项:sio_storage_pools。" - -msgid "Must supply a positive, non-zero value for age" -msgstr "必须为时效提供非零正值" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "NAS 配置“%(name)s=%(value)s”无效。必须为“auto”、“true”或“false”" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr " %(config)s 处不存在 NFS 配置文件" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "未发现 NFS 文件 %s。" - -msgid "NFS file could not be discovered." -msgstr "未能发现 NFS 文件。" - -msgid "NULL host not allowed for volume backend lookup." -msgstr "对于卷后端查找,主机不允许为NULL。" - -msgid "NaElement name cannot be null." -msgstr "NaElement 名称不能为空。" - -msgid "Name" -msgstr "名称" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "在请求主体中,名称、描述、add_volumes 和 remove_volumes 不能全部为空。" - -msgid "Need non-zero volume size" -msgstr "需要非零卷大小" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "既无 MSG_DENIED,也无 MSG_ACCEPTED:%r" - -msgid "NetApp Cinder Driver exception." -msgstr "发生“NetApp Cinder 驱动程序”异常。" - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"用于扩展的新大小必须大于当前大小。(当前:%(size)s,已扩展:%(new_size)s)。" - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"新大小应该大于后端存储器中的实际大小。realsize:%(oldsize)s,newsize:" -"%(newsize)s。" - -msgid "New volume size must be specified as an integer." -msgstr "新卷的大小必须指定为整数。" - -msgid "New volume type must be specified." -msgstr "必须指定新的卷类型。" - -msgid "New volume type not specified in request_spec." -msgstr "在 request_spec 中,未指定新的卷类型。" - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble Cinder 驱动程序异常" - -msgid "No FC initiator can be added to host." -msgstr "无法将任何 FC 启动程序添加至主机。" - -msgid "No FC port connected to fabric." -msgstr "没有任何 FC 端口连接至光纤网络。" - -msgid "No FCP targets found" -msgstr "找不到任何 FCP 目标" - -msgid "No Port Group elements found in config file." -msgstr "配置文件中找不到端口组。" - -msgid "No VF ID is defined in the configuration file." -msgstr "未在配置文件中定义 VF 标识。" - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "不存在具有所提供 iSCSI IP 的活动 iSCSI 门户网站" - -#, python-format -msgid "No available service named %s" -msgstr "不存在任何名为 %s 的可用服务" - -#, python-format -msgid "No backup with id %s" -msgstr "不存在任何具有标识 %s 的备份" - -msgid "No backups available to do an incremental backup." -msgstr "没有任何备份可用于执行增量备份。" - -msgid "No big enough free disk" -msgstr "不存在任何足够大的可用磁盘" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "不存在任何具有标识 %s 的 cg 快照" - -msgid "No cinder entries in syslog!" -msgstr "系统日志中没有任何 cinder 条目!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "在文件管理器上,找不到名为 %s 的已克隆 LUN" - -msgid "No config node found." -msgstr "找不到配置节点。" - -#, python-format -msgid "No consistency group with id %s" -msgstr "不存在任何具有标识 %s 的一致性组" - -#, python-format -msgid "No element by given name %s." -msgstr "没有具备给定名称 %s 的元素。" - -msgid "No errors in logfiles!" -msgstr "日志文件中没有任何错误!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "在将 %s 作为支持文件的情况下,找不到任何文件。" - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "未剩余可用 LUN 标识。已超过可以连接至主机的最大卷数 (%s)。" - -msgid "No free disk" -msgstr "不存在任何可用磁盘" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "对于 %s,在所提供列表中,找不到任何有用的 iSCSI 门户网站。" - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "对于 %s,找不到任何有用的 iSCSI 门户网站。" - -#, python-format -msgid "No group snapshot with id %s" -msgstr "不存在任何具有标识 %s 的组快照" - -#, python-format -msgid "No group with id %s" -msgstr "不存在任何具有标识 %s 的组" - -#, python-format -msgid "No host to create consistency group %s." -msgstr "没有任何主机用于创建一致性组 %s。" - -#, python-format -msgid "No host to create group %s." -msgstr "没有任何主机用于创建组 %s。" - -msgid "No iSCSI-enabled ports on target array." -msgstr "目标阵列上没有可支持 iSCSI 的端口。" - -msgid "No image_name was specified in request." -msgstr "未在请求中指定任何 image_name。" - -msgid "No initiator connected to fabric." -msgstr "没有任何启动程序连接至光纤网络。" - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "找不到对应启动程序 %s 的启动程序组" - -msgid "No initiators found, cannot proceed" -msgstr "找不到任何发起方,无法继续" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "集群中找不到 IP %s 的接口" - -msgid "No ip address found." -msgstr "找不到 IP 地址。" - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "在 CloudByte 中,找不到任何 iscsi 认证组。" - -msgid "No iscsi initiators were found in CloudByte." -msgstr "在 CloudByte 中,找不到任何 iscsi 发起方。" - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "对于 CloudByte 卷 [%s],找不到任何 iscsi 服务。" - -msgid "No iscsi services found in CloudByte storage." -msgstr "在 CloudByte 存储器中,找不到任何 iscsi 服务。" - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "没有指定任何密钥文件,无法从 %(cert)s %(e)s 装入密钥。" - -msgid "No mounted Gluster shares found" -msgstr "找不到任何已安装的 Gluster 共享项" - -msgid "No mounted NFS shares found" -msgstr "找不到任何已安装的 NFS 共享项" - -msgid "No mounted SMBFS shares found." -msgstr "找不到任何已安装的 SMBFS 共享项。" - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "找不到任何已安装的 Virtuozzo 存储器共享项" - -msgid "No mounted shares found" -msgstr "找不到任何已安装的共享项" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"没有池可用于提供卷。请确保正确设置了 netapp_pool_name_search_pattern 配置选" -"项。" - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "未从 CloudByte 存储器列表 iSCSI 认证用户 API 调用接收到任何响应。" - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "未从 CloudByte 存储器列表 tsm API 调用接收到任何响应。" - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "未从 CloudByte 的列表文件系统 API 调用接收到任何响应。" - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "未配置服务 VIP 并且没有 nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "在将 %s 作为支持文件的情况下,找不到任何 snap。" - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "在快照组 %s 中,找不到快照映像。" - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "在卷 %s 上找不到快照。" - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "未提供源快照,无法创建一致性组 %s。" - -msgid "" -"No storage could be allocated for this volume request. You may be able to " -"try another size or volume type." -msgstr "无法为该卷请求分配存储。可以尝试其他大小或卷类型。" - -#, python-format -msgid "No storage path found for export path %s" -msgstr "对于导出路径 %s,找不到存储路径" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "不存在任何此类 QoS 规范 %(specs_id)s。" - -msgid "No suitable discovery ip found" -msgstr "找不到合适的发现 IP" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "不支持复原备份版本 %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "没有为卷 %(volume_id)s 找到目标id。" - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"主机中没有可用的未使用 LUN 标识;已启用多个连接,这要求所有 LUN 标识在整个主" -"机组中唯一。" - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "找不到有效主机,原因是 %(reason)s。" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "对于类型为 %(type)s 的卷 %(id)s,不存在任何有效主机" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "没有具备引用 %s 指定的 UID 的 vdisk。" - -#, python-format -msgid "No views found for LUN: %s" -msgstr "找不到 LUN %s 的视图" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "集群上不存在任何具有 vserver %(vserver)s 和结点路径 %(junction)s 的卷" - -msgid "No volume service(s) started successfully, terminating." -msgstr "未成功启动卷服务,正在终止。" - -msgid "No volume was found at CloudByte storage." -msgstr "在 CloudByte 存储器上,找不到任何卷。" - -msgid "No volume_type should be provided when creating test replica." -msgstr "当创建测试副本时,不应该提供任何 volume_type。" - -msgid "No volumes found in CloudByte storage." -msgstr "在 CloudByte 存储器中找不到任何卷。" - -#, python-format -msgid "No volumes or consistency groups exist in cluster %(current)s." -msgstr "集群 %(current)s 中不存在卷或者一致性组。" - -msgid "No weighed hosts available" -msgstr "没有加权主机可用" - -#, python-format -msgid "Not a valid string: %s" -msgstr "无效字符串:%s" - -msgid "Not a valid value for NaElement." -msgstr "此值对 NaElement 无效。" - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "无法找到适合卷 %s 的数据存储器。" - -msgid "Not an rbd snapshot" -msgstr "不是 rbd 快照" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "未针对映像 %(image_id)s 授权。" - -msgid "Not authorized." -msgstr "未授权。" - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "后端 (%(backend)s) 上没有足够的空间" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "ZFS 共享项中的存储空间不足,无法执行此操作。" - -msgid "Not stored in rbd" -msgstr "未存储在 rbd 中" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "在创建快照时,Nova 返回了“错误”状态。" - -msgid "Null response received from CloudByte's list filesystem." -msgstr "从 CloudByte 的列表文件系统接收到空响应。" - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "从 CloudByte 的列表 iscsi 认证组接收到空响应。" - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "从 CloudByte 的列表 iscsi 发起方接收到空响应。" - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "从 CloudByte 的列表卷 iscsi 服务接收到空响应。" - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "在 CloudByte 存储器上创建卷 [%s] 时,接收到空响应。" - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "删除 CloudByte 存储器上的卷 [%s] 时,接收到空响应。" - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"在 CloudByte 存储器中查询基于 [%(operation)s] 的作业[%(job)s] 时接收到空响" -"应。" - -msgid "Object Count" -msgstr "对象计数" - -msgid "Object Version" -msgstr "对象版本" - -msgid "Object is not a NetApp LUN." -msgstr "对象不是 NetApp LUN。" - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "执行扩展操作期间,向组合卷 %(volumename)s 添加卷时出错。 " - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "找不到来自主机、端口或方案的必需输入之一。" - -msgid "" -"One of the services is in Liberty version. We do not provide backward " -"compatibility with Liberty now, you need to upgrade to Mitaka first." -msgstr "" -"某一服务属于Liberty版本。目前不提供支持Liberty的后向兼容,请首先升级至" -"Mitaka。" - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "" -"只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 " -"%(unit_string)s。" - -msgid "Only one limit can be set in a QoS spec." -msgstr "在 QoS 规范中只能设置一个限制。" - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "只允许令牌作用域仅限于直系父代或者根项目的用户查看其子代配额。" - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "只有 OpenStack 管理的卷才能为非受管卷。" - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "操作失败,并且 status=%(status)s。完全转储:%(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "操作 %(operation)s 不受支持。" - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "选项 gpfs_images_dir 未正确设置。" - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "选项 gpfs_images_share_mode 未正确设置。" - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "选项 gpfs_mount_point_base 未正确设置。" - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "始发 %(res)s %(prop)s 必须为其中一个“%(vals)s”值" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException:%s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "分区名称为 None,请在键中设置 smartpartition:partitionname。" - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"进行认证需要密码或 SSH 专用密钥:请设置 san_password 或 san_private_key 选" -"项。" - -msgid "Path to REST server's certificate must be specified." -msgstr "必须指定 REST 服务器的证书的路径。" - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "请提前创建 %(pool_list)s 池!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "请提前在池 %(pool)s 中创建 %(tier_levels)s 层!" - -#, python-format -msgid "Please provide at least one volume for parameter %s" -msgstr "请为参数 %s 提供至少一个卷" - -msgid "Please specify a name for QoS specs." -msgstr "请为 QoS 规范指定名称。" - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "政策不允许 %(action)s 被执行。" - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "未能找到池 %(poolNameInStr)s。" - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "池 %s 在 Nexenta 存储设备中不存在" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "找不到 volume['host'] %(host)s 中的池。" - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "volume['host'] 中的池失败,产生了异常:%(ex)s。" - -msgid "Pool is not available in the volume host field." -msgstr "在卷主机字段中,未提供池。" - -msgid "Pool is not available in the volume host fields." -msgstr "在卷主机字段中,未提供池。" - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "在域 %(domain)s 中找不到名称为 %(pool)s 的池。" - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "在域 %(domain_id)s 中找不到名称为 %(pool_name)s 的池。" - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "池 %(poolName)s 与快速策略 %(fastPolicy)s 的存储层无关联。 " - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "池名称必须存在于 %(fileName)s 文件中。" - -#, python-format -msgid "Pools %s does not exist" -msgstr "池 %s 不存在" - -msgid "Pools name is not set." -msgstr "未设置池名称。" - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "主副本状态为 %(status)s,并且已同步:%(sync)s" - -#, python-format -msgid "Programming error in Cinder: %(reason)s" -msgstr "Cinder程序错误:%(reason)s" - -msgid "Project ID" -msgstr "项目ID" - -msgid "Project name not specified" -msgstr "未指定项目名称" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "未正确设置要用作嵌套配额的项目配额:%(reason)s。" - -#, python-format -msgid "Project: %s not found" -msgstr "找不到项目:%s" - -msgid "Protection Group not ready." -msgstr "保护组未就绪。" - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "存储器系列 %(storage_family)s 不支持协议 %(storage_protocol)s。" - -msgid "Provided backup record is missing an id" -msgstr "所提供的备份记录缺少标识" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "对于状态为 %(current)s 的快照,不允许提供的快照状态 %(provided)s。" - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "提供程序信息 w.r.t:找不到对应 OpenStack 卷 [%s] 的 CloudByte 存储器。" - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Pure Storage Cinder 驱动程序故障:%(reason)s" - -msgid "Purge command failed, check cinder-manage logs for more details." -msgstr "Pure命令执行失败,更多详细信息请查看cinder-manage日志。" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "QoS 规范 %(specs_id)s 已存在。" - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "QoS 规范 %(specs_id)s 仍然与实体关联。" - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "QoS 配置不正确。%s 必须大于 0。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "QoS 策略必须指定 IOTYPE 和另一 qos_specs,QoS 策略:%(qos_policy)s。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "QoS 策略必须指定 IOTYPE:0、1 或 2。QoS 策略:%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"QoS 策略 upper_limit 和 lower_limit 存在冲突,QoS 策略:%(qos_policy)s。" - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "QoS 规范 %(specs_id)s 没有任何具有键 %(specs_key)s 的规范。" - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "在此存储器系列和 ONTAP 版本上,QoS 规范不受支持。" - -msgid "Qos specs still in use." -msgstr "Qos 规范仍在使用中。" - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "建议不要按 service 参数进行查询。请改为使用 binary 参数。" - -msgid "Query resource pool error." -msgstr "查询资源池时发生错误。" - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "配额 %s 限制必须大于或等于现有资源。" - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "找不到配额类 %(class_name)s。" - -msgid "Quota could not be found" -msgstr "配额没有找到。" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "对于资源,已超过配额:%(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "配额用尽:code=%(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "没有为项目 %(project_id)s 找到配额。" - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"资源“%(res)s”的项目“%(proj)s”的配合限制无效:限制 %(limit)d 小于“in-use”值 " -"%(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "找不到配额预留 %(uuid)s。" - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "找不到项目 %(project_id)s 的配额使用量。" - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "RBD 差集操作失败 - (ret=%(ret)s stderr=%(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "必须指定 REST 服务器 IP。" - -msgid "REST server password must by specified." -msgstr "必须指定 REST 服务器密码。" - -msgid "REST server username must by specified." -msgstr "必须指定 REST 服务器用户名。" - -msgid "RPC Version" -msgstr "RPC 版本" - -msgid "RPC server response is incomplete" -msgstr "PRC 服务器响应不完整" - -msgid "Raid did not have MCS Channel." -msgstr "RAID 不具备 MCS 通道。" - -#, python-format -msgid "Received error string: %s" -msgstr "接收到错误字符串:%s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "引用必须针对非受管快照。" - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "引用必须对应非受管虚拟卷。" - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "引用必须是非受管快照的名称。" - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "引用必须是非受管虚拟卷的卷名称。" - -msgid "Reference must contain either source-name or source-id element." -msgstr "引用必须包含 source-name 或 source-id 元素。" - -msgid "Reference must contain source-id or source-name element." -msgstr "引用必须包含 source-id 或 source-name 元素。" - -msgid "Reference must contain source-id or source-name key." -msgstr "引用必须包含 source-id 或 source-name 键。" - -msgid "Reference must contain source-id or source-name." -msgstr "引用必须包含 source-id 或 source-name。" - -msgid "Reference must contain source-id." -msgstr "引用必须包含 source-id。" - -msgid "Reference must contain source-name element." -msgstr "引用必须包含 source-name 元素。" - -msgid "Reference must contain source-name or source-id." -msgstr "引用必须包含 source-name 或 source-id。" - -msgid "Reference must contain source-name." -msgstr "引用必须包含源名称。" - -msgid "Reference to volume to be managed must contain source-name." -msgstr "对要管理的卷的引用必须包含 source-name。" - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "对要管理的卷 %s 的引用必须包含 source-name。" - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"正在拒绝迁移卷标识:%(id)s。请检查配置因为源和目标是同一卷组:%(name)s。" - -msgid "Remote pool cannot be found." -msgstr "找不到远程池。" - -msgid "Remove CHAP error." -msgstr "移除 CHAP 时发生错误。" - -msgid "Remove fc from host error." -msgstr "从主机中移除 FC 时发生错误。" - -msgid "Remove host from array error." -msgstr "从阵列中移除主机时发生错误。" - -msgid "Remove host from hostgroup error." -msgstr "从主机组中移除主机时发生错误。" - -msgid "Remove iscsi from host error." -msgstr "从主机中移除 iSCSI 时发生错误。" - -msgid "Remove lun from QoS error." -msgstr "从 QoS 移除 LUN 时出错。" - -msgid "Remove lun from cache error." -msgstr "从高速缓存移除 LUN 时发生错误。" - -msgid "Remove lun from partition error." -msgstr "从分区移除 LUN 时发生错误。" - -msgid "Remove port from port group error." -msgstr "从端口组移除端口时出错。" - -msgid "Remove volume export failed." -msgstr "除去卷导出失败。" - -msgid "Rename lun on array error." -msgstr "在阵列上重命名 LUN 时发生错误。" - -msgid "Rename snapshot on array error." -msgstr "在阵列上重命名快照时出错。" - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "将 %(name)s 复制到 %(ssn)s 失败。" - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到复制服务功能。" - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到复制服务。" - -msgid "Replication not allowed yet." -msgstr "尚不允许复制。" - -msgid "Request body and URI mismatch" -msgstr "请求主体和URI不匹配" - -msgid "Request body contains too many items" -msgstr "请求主体包含太多items" - -msgid "Request body contains too many items." -msgstr "请求主体包含太多项。" - -msgid "Request body empty" -msgstr "请求主体是空的" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "针对 Datera 集群的请求返回了不正确的状态:%(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"所请求备份超过允许的备份千兆字节配额。已请求 %(requested)sG,配额为 " -"%(quota)sG,并且已耗用 %(consumed)sG。" - -msgid "Requested resource is currently unavailable" -msgstr "请求资源目前不可用" - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"所请求的卷或快照超过允许的 %(name)s 配额。已请求 %(requested)sG,配额为 " -"%(quota)sG,已耗用 %(consumed)sG。" - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "所请求的卷大小 %(size)d超过了允许的最大限制 %(limit)d。" - -msgid "Required configuration not found" -msgstr "找不到必需的配置选项" - -#, python-format -msgid "Required flag %s is not set" -msgstr "未设置必需标记 %s" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"重置备份状态已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用" -"来创建此备份的备份服务 [%(backup_service)s]。" - -#, python-format -msgid "Resizing clone %s failed." -msgstr "调整克隆 %s 的大小失败。" - -msgid "Resizing image file failed." -msgstr "对映像文件调整大小失败。" - -msgid "Resource could not be found." -msgstr "资源没有找到。" - -msgid "Resource not ready." -msgstr "资源未就绪。" - -#, python-format -msgid "Response error - %s." -msgstr "响应错误 - %s。" - -msgid "Response error - The storage-system is offline." -msgstr "响应错误 - 存储器系统已脱机。" - -#, python-format -msgid "Response error code - %s." -msgstr "响应错误代码 - %s。" - -msgid "RestURL is not configured." -msgstr "未配置 RestURL。" - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"备份复原已异常中止,需要的卷状态为 %(expected_status)s,但获得的是 " -"%(actual_status)s。" - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"备份复原已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" -"建此备份的备份服务 [%(backup_service)s]。" - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"备份复原已异常中止:需要的备份状态为 %(expected_status)s,但获得的是 " -"%(actual_status)s。" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"检索到所提供 Cinder 快照的不同 SolidFire 卷量。已检索到:%(ret)s 期望:" -"%(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"检索到所提供 Cinder 卷的不同 SolidFire 卷量。已检索到:%(ret)s 期望:%(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "对于命令,超过重试次数:%s" - -msgid "Retryable Dell Exception encountered" -msgstr "遇到可重试的Dell异常。" - -msgid "Retryable Pure Storage Exception encountered" -msgstr "遇到可重试的Pure Storage异常。" - -msgid "Retryable SolidFire Exception encountered" -msgstr "遇到可重试的 SolidFire 异常" - -msgid "Retype requires migration but is not allowed." -msgstr "转型需要迁移,但是不允许。" - -msgid "" -"Rollback - Volume in another storage group besides default storage group." -msgstr "回滚 - 卷在缺省存储器组之外的另一个存储器组中。" - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "正在通过删除 %(volumeName)s 而对其进行回滚。" - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "不允许运行 VMware vCenter 版本低于 %s 的 Cinder。" - -msgid "SAN product is not configured." -msgstr "未配置 SAN 产品。" - -msgid "SAN protocol is not configured." -msgstr "未配置 SAN 协议。" - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "SMBFS 配置“smbfs_oversub_ratio”无效。必须大于 0:%s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "SMBFS 配置“smbfs_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "%(config)s 处不存在 SMBFS 配置文件。" - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "SMBFS 配置文件未设置 (smbfs_shares_config)。" - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "SSH 命令在“%(total_attempts)r”之后失败,尝试次数:“%(command)s”" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "检测到 SSH 命令注入:%(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "对于 %(fabric)s,SSH 连接失败,发生错误:%(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "在 %s 上,SSL 证书已到期。" - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL 错误:%(arg)s。" - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "找不到调度程序主机衡量器 %(weigher_name)s。" - -#, python-format -msgid "" -"Search URI %s is not in the expected format, it should end with ?tag={0}" -msgstr "查询URI %s 的格式不是预期的,它应该以 ?tag={0} 结尾" - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "" -"辅助副本状态为 %(status)s,并且已同步:%(sync)s,同步进度为:%(progress)s%%。" - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "辅助标识不能与主阵列相同,backend_id = %(secondary)s。" - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "序列号必须存在于 %(fileName)s 文件中。" - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "已移除主机 %(host)s 上的服务 %(service)s。" - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "在主机 %(host)s 上找不到服务 %(service_id)s。" - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "服务 %(service_id)s 没有找到。" - -msgid "Service is too old to fulfil this request." -msgstr "服务太旧,无法实现此请求。" - -msgid "Service is unavailable at this time." -msgstr "该时刻服务无法使用。" - -msgid "" -"Service temporarily unavailable: The server is temporarily unable to service " -"your request" -msgstr "服务暂时不可用:服务器暂时无法提供请求服务" - -msgid "Set pair secondary access error." -msgstr "设置对辅助访问时出错。" - -msgid "Sets thin provisioning." -msgstr "设置自动精简配置。" - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "不支持对此存储器系列和 ONTAP 版本设置 LUN QoS 策略组。" - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "不支持对此存储器系列和 ONTAP 版本设置文件 QoS 策略组。" - -#, python-format -msgid "" -"Share %s ignored due to invalid format. Must be of form address:/export. " -"Please check the nas_host and nas_share_path settings." -msgstr "" -"由于格式无效,已忽略共享项 %s。格式必须为 address:/export。请检查 nas_host " -"和 nas_share_path 设置。" - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "对于 Cinder 卷服务,%(dir)s 处的共享项不可写。快照操作将不受支持。" - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Sheepdog I/O 错误,命令为:\"%s\"。" - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "只能对与用户作用域仅限于的项目位于同一层次结构中的项目执行显示操作。" - -msgid "Size" -msgstr "配置" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "找不到卷 %s 的大小,无法进行安全删除。" - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "大小为 %(image_size)dGB,无法容纳在大小为 %(volume_size)dGB 的卷中。" - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "所指定映像的大小 %(image_size)sGB 大于卷大小 %(volume_size)sGB。" - -#, python-format -msgid "" -"Snapshot %(cgsnapshot_id)s: for Consistency Group %(cg_name)s: delete " -"failed\n" -"%(err)s" -msgstr "" -"一致性组 %(cg_name)s 的快照 %(cgsnapshot_id)s: 删除失败\n" -"%(err)s" - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "在等待快照 %(id)s 变为可用时请求删除该快照。可能发出了并行请求。" - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"级联删除期间,发现快照 %(id)s 处于 %(state)s 状态而不是“deleting”状态。" - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "快照 %(snapshot_id)s 没有找到。" - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "快照 %(snapshot_id)s 没有任何具有键 %(metadata_key)s 的元数据。" - -#, python-format -msgid "" -"Snapshot %(src_snapshot_name)s: clone failed\n" -"%(err)s" -msgstr "" -"快照 %(src_snapshot_name)s: 克隆失败\n" -"%(err)s" - -#, python-format -msgid "Snapshot %s : Delete Failed\n" -msgstr "快照 %s : 删除失败\n" - -#, python-format -msgid "Snapshot %s must not be part of a group." -msgstr "快照 %s 不能属于某个组。" - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "快照“%s”在阵列上不存在。" - -msgid "Snapshot already managed." -msgstr "快照已管理。" - -msgid "" -"Snapshot can't be taken individually on a volume that is part of a " -"Consistency Group" -msgstr "不能对卷单独地进行快照操作,因为它是一致性组的一部分" - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "无法创建快照,因为卷 %(vol_id)s 不可用,当前卷状态为 %(vol_status)s。" - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "无法在迁移卷时创建快照。" - -msgid "" -"Snapshot delete can't be done individually on a volume that is part of a " -"Consistency Group" -msgstr "无法完成对卷单独进行快照删除的操作,因为它是一致性组的一部分" - -#, python-format -msgid "" -"Snapshot for Consistency Group %(cg_name)s: create failed\n" -"%(err)s" -msgstr "" -"一致性组快照 %(cg_name)s: 创建失败\n" -"%(err)s" - -msgid "Snapshot of secondary replica is not allowed." -msgstr "不允许获取辅助副本的快照。" - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "不支持对处于以下状态的卷生成快照:%s。" - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "未在任何位置部署的快照资源“%s”?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "对于 update_snapshot_status,不允许快照状态 %(cur)s" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "快照状态必须为“可用”,才能进行克隆。" - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "要备份的快照必须可用,但当前状态为“%s”。" - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "找不到标识为 %s 的快照。" - -#, python-format -msgid "" -"Snapshot: %(snapshotname)s, create failed\n" -"%(err)s" -msgstr "" -"快照: %(snapshotname)s, 创建失败\n" -"%(err)s" - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "基本映像“%(base)s”中不存在快照“%(snap)s”- 正在异常中止增量备份" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "以下卷格式不支持快照:%s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "套接字错误:%(arg)s。" - -msgid "SolidFire Cinder Driver exception" -msgstr "发生“SolidFire Cinder 驱动程序”异常" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "排序方向阵列大小超过排序键阵列大小。" - -msgid "" -"Source CG cannot be empty or in 'creating' or 'updating' state. No " -"cgsnapshot will be created." -msgstr "" -"源CG不能为空,也不能是“creating”或“updating”状态。将不会创建cgsnapshot。" - -msgid "Source CG is empty. No consistency group will be created." -msgstr "源 CG 为空。将不会创建任何一致性组。" - -msgid "Source Group is empty. No group will be created." -msgstr "源组为空。将不会创建任何组。" - -msgid "" -"Source group cannot be empty or in 'creating' or 'updating' state. No group " -"snapshot will be created." -msgstr "源组不能为空,也不能是“creating”或“updating”状态。将不会创建组快照。" - -msgid "Source host details not found." -msgstr "找不到源主机详细信息。" - -msgid "Source volume device ID is required." -msgstr "需要源卷设备标识。" - -msgid "Source volume not mid-migration." -msgstr "源卷未在迁移中。" - -msgid "SpaceInfo returned byarray is invalid" -msgstr "阵列返回的 SpaceInfo 无效" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "" -"要映射至卷 %(vol)s 的所指定主机位于具有 %(group)s 的不受支持的主机组中。" - -msgid "Specified logical volume does not exist." -msgstr "所指定的逻辑卷不存在。" - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "找不到标识为 %s 的指定快照组。" - -msgid "Specifies IP pool to use for volume" -msgstr "为使用卷指定IP池。" - -msgid "" -"Specifies number of replicas for each volume. Can only be increased once " -"volume is created" -msgstr "为每一个卷指定副本的数量。一旦卷被创建,其副本数量只能被增加。" - -msgid "Specify a password or private_key" -msgstr "请指定密码或 private_key" - -msgid "Specify group type name, description or a combination thereof." -msgstr "在其中指定组类型的名称、描述或者两者的组合。" - -msgid "Specify san_password or san_private_key" -msgstr "指定san_password或者san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "指定卷类型名称、描述、is_public 或它们的组合。" - -msgid "Split pair error." -msgstr "拆分对时出错。" - -msgid "Split replication failed." -msgstr "拆分复制失败。" - -msgid "Start LUNcopy error." -msgstr "启动 LUNcopy 时发生错误。" - -msgid "State" -msgstr "状态" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "节点的状态错误。当前状态为 %s。" - -msgid "Status" -msgstr "状态" - -msgid "Stop snapshot error." -msgstr "停止快照时发生错误。" - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上,找不到存储器配置服务。" - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到存储器硬件标识管理服务。" - -#, python-format -msgid "Storage Profile %s not found." -msgstr "找不到存储器概要文件 %s。" - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到存储器重定位服务。" - -#, python-format -msgid "Storage family %s is not supported." -msgstr "不支持存储器系列 %s。" - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "未成功删除存储器组 %(storageGroupName)s" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "未检测到存储器主机 %(svr)s,请验证名称" - -msgid "Storage pool is not configured." -msgstr "未配置存储池。" - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "找不到存储器概要文件 %(storage_profile)s。" - -msgid "Storage resource could not be found." -msgstr "找不到存储资源。" - -msgid "Storage system id not set." -msgstr "未设置存储系统标识。" - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "找不到池 %(poolNameInStr)s 的存储系统。" - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "找不到存储系统 %(array)s。" - -#, python-format -msgid "" -"Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups " -"from cluster %(current)s to %(new)s" -msgstr "" -"成功重命名%(num_vols)s卷和 %(num_cgs)s一致性组从集群%(current)s到%(new)s" - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"子代使用率之和“%(sum)s”大于资源“%(res)s”的项目“%(proj)s”的可用配" -"额“%(free)s”。请降低以下一个或多个项目的限制或使用率“%(child_ids)s”" - -msgid "Switch over pair error." -msgstr "切换对时出错。" - -msgid "Sync pair error." -msgstr "同步对时出错。" - -#, python-format -msgid "Synology driver authentication failed: %(reason)s." -msgstr "Synology驱动认证失败:%(reason)s。" - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "发现系统 %(id)s 的密码状态无效 - %(pass_status)s。" - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "找到具有以下不正确状态的系统 %(id)s:%(status)s。" - -msgid "System does not support compression." -msgstr "系统不支持压缩。" - -msgid "System is busy, retry operation." -msgstr "系统繁忙,请重试操作。" - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "在帐户 [%(account)s] 的 CloudByte 存储器中找不到 TSM [%(tsm)s]。" - -msgid "Target group type is still in use." -msgstr "目标组类型仍在使用中。" - -msgid "Target volume type is still in use." -msgstr "目标卷类型仍在使用中。" - -#, python-format -msgid "" -"Task did not complete in %d secs. Operation timed out. Task in CoprHD will " -"continue" -msgstr "任务在 %d 秒内没有完成,操作超时,CoprHD 中的任务将继续" - -#, python-format -msgid "Task: %(task_id)s is failed with error: %(error_message)s" -msgstr "任务 %(task_id)s 失败,错误信息为: %(error_message)s" - -#, python-format -msgid "Tenant %s: not found" -msgstr "找不到租户:%s" - -msgid "Terminate connection failed" -msgstr "终止连接发生故障" - -msgid "Terminate connection unable to connect to backend." -msgstr "终止连接无法连接至后端。" - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "终止卷连接失败:%(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "找不到要复制的 %(type)s %(id)s 源。" - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"“sort_key”和“sort_dir”参数已建议不要使用,并且不能与“sort”参数配合使用。" - -msgid "The EQL array has closed the connection." -msgstr "EQL 阵列已关闭连接。" - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"GPFS 文件系统 %(fs)s 未处于所要求的发行版级别。当前级别为 %(cur)s,而要求的级" -"别必须至少为 %(min)s。" - -msgid "The IP Address was not found." -msgstr "找不到 IP 地址。" - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"WebDAV 请求失败。原因为 %(msg)s,返回码/原因码为 %(code)s,源卷为 %(src)s,目" -"标卷为 %(dst)s,方法为 %(method)s。" - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"上面的错误可能指示尚未创建数据库。\n" -"在运行此命令之前,请使用“cinder-manage db sync”来创建数据库。" - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"该阵列不支持 SLO %(slo)s 和工作负载 %(workload)s 的存储池设置。请检查该阵列以" -"获取有效 SLO 和工作负载。" - -msgid "The authentication service failed to reply with 401" -msgstr "认证服务失败,返回401" - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "创建该卷的后端未启用复制。" - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"命令 %(cmd)s 失败。(返回为 %(ret)s,标准输出为 %(out)s,标准错误为 %(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "副本应为主副本或者辅助副本" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "未能完成逻辑设备的创建。(逻辑设备:%(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "已装饰的方法必须接受卷或快照对象" - -msgid "The decorated method must accept image_meta." -msgstr "已装饰的方法必须接受 image_meta。" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "路径%(path)s 指向的设备不可用:%(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "结束时间 (%(end)s) 必须在开始时间 (%(start)s) 之后。" - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "额外规范 %(extraspec)s 无效。" - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "无法删除已故障转移的卷:%s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "需要下列元素:%s" - -msgid "The host group or iSCSI target could not be added." -msgstr "未能添加主机组或 iSCSI 目标。" - -msgid "The host group or iSCSI target was not found." -msgstr "找不到主机组或 iSCSI 目标。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "主机未准备好故障返回。请重新同步卷并在 3PAR 后端上继续进行复制。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "主机未准备好故障返回。请重新同步卷并在 LeftHand 后端上继续进行复制。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "主机未准备好故障返回。请重新同步卷并在 Storwize 后端上继续进行复制。" - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "iSCSI CHAP 用户 %(user)s 不存在。" - -msgid "The key cannot be None." -msgstr "键不能为“无”。" - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "已删除所指定 %(type)s %(id)s 的逻辑设备。" - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "方法 %(method)s 超时。(超时值:%(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "未实现方法 update_migrated_volume。" - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "安装 %(mount_path)s 不是有效 Quobyte USP 卷。发生错误:%(exc)s" - -msgid "The name to use for storage instances created" -msgstr "存储实例使用的名称已创建。" - -msgid "The name to use for volumes created" -msgstr "卷使用的名称已创建。" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "存储器后端的参数。(config_group:%(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "父备份必须可用于增量备份。" - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "所提供快照“%s”并非所提供卷的快照。" - -msgid "The redirect location of the authentication service is not provided" -msgstr "未提供认证服务的重定向地址" - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"对后端中的卷的引用应具有以下格式:file_system/volume_name(volume_name 不能包" -"含“/”)" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "远程保留计数不得高于 %s。" - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"未在卷类型 extra_specs 中正确配置复制方式。如果 replication:mode 为 " -"periodic,那么必须同时指定 replication:sync_period 并且周期必须介于 300 秒到 " -"31622400 秒之间。" - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "同步复制周期必须至少为 %s 秒。" - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "所请求大小 %(requestedSize)s 与生成的大小 %(resultSize)s 不同。" - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "找不到资源 %(resource)s。" - -msgid "The results are invalid." -msgstr "结果无效。" - -#, python-format -msgid "The retention count must be %s or less." -msgstr "保留计数不得高于 %s。" - -msgid "The snapshot cannot be created when the volume is in error status." -msgstr "当卷处于错误状态时,无法创建快照。" - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "当卷处于维护方式时,无法创建快照。" - -#, python-format -msgid "The snapshot is unavailable: %(data)s" -msgstr "快照不可用:%(data)s" - -msgid "The source volume for this WebDAV operation not found." -msgstr "找不到此 WebDAV 操作的源卷。" - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "原卷类型'%(src)s'与目标卷'%(dest)s'不一致。" - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "原卷类型'%s'不可用。" - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "指定的 %(desc)s 处于繁忙状态。" - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "指定的 LUN 不属于给定池:%s。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "未能管理指定的逻辑设备 %(ldev)s。该逻辑设备不能是映射。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "未能管理指定的逻辑设备 %(ldev)s。该逻辑设备不能成对。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "未能管理指定的逻辑设备 %(ldev)s。逻辑设备大小必须为千兆字节的倍数。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "未能管理指定的逻辑设备 %(ldev)s。卷类型必须为 DP-VOL。" - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"指定的操作不受支持。卷大小必须与源 %(type)s 大小相同。(卷:%(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "指定的 vdisk 已映射到主机。" - -msgid "The specified volume is mapped to a host." -msgstr "所指定的卷已映射至主机。" - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "%s 的存储阵列密码不正确,请更新所配置密码。" - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "可使用存储器后端。(config_group:%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"存储器设备不支持 %(prot)s。请配置该设备以支持 %(prot)s 或切换至使用另一协议的" -"驱动程序。" - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"分割元计数 %(memberCount)s 对于卷%(volumeName)s 太小,大小为 %(volumeSize)s。" - -#, python-format -msgid "The token is not generated by authentication service. %s" -msgstr "认证服务未生成令牌. %s" - -#, python-format -msgid "The token is not generated by authentication service.%s" -msgstr "认证服务未生成令牌.%s" - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "卷/快照 %(id)s 的元数据类型 %(metadata_type)s无效。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "未能扩展卷 %(volume_id)s。卷类型必须为“常规”。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "未能取消管理卷 %(volume_id)s。卷类型必须为 %(volume_type)s。" - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "已成功管理卷 %(volume_id)s。(逻辑设备:%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "已成功取消管理卷 %(volume_id)s。(逻辑设备:%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "找不到要映射的卷 %(volume_id)s。" - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "在维护方式下卷无法接受传输。" - -msgid "The volume cannot be attached in maintenance mode." -msgstr "在维护方式下无法连接卷。" - -msgid "The volume cannot be detached in maintenance mode." -msgstr "在维护方式下无法拆离卷。" - -msgid "The volume cannot be updated during maintenance." -msgstr "维护期间无法更新卷。" - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "在维护方式下无法初始化卷连接。" - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "卷驱动程序在连接器中需要 iSCSI 发起方名称。" - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "该卷当前在 3PAR 上繁忙,此时无法删除。可稍后重试。" - -msgid "The volume label is required as input." -msgstr "需要卷标作为输入。" - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "不存在任何可供使用的资源。(资源:%(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "不存在有效的 ESX 主机。" - -msgid "There are no valid datastores." -msgstr "不存在任何有效数据存储器。" - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "不存在任何对 %(param)s 的指定。在管理卷时,必须使用指定的存储器。" - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "不存在任何对逻辑设备的指定。在管理卷时,必须使用指定的逻辑设备。" - -msgid "There is no metadata in DB object." -msgstr "数据库对象中没有元数据。" - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "不存在任何可主管 %(volume_size)sG 的共享项" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "不存在任何可托管 %(volume_size)sG 的共享项。" - -#, python-format -msgid "There is no such action: %s" -msgstr "没有该动作:%s" - -msgid "There is no virtual disk device." -msgstr "不存在任何虚拟盘设备。" - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "将该卷添加至远程复制组时发生了错误:%s。" - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "创建 cgsnapshot 时发生错误:%s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "创建远程复制组时发生了错误:%s。" - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "为远程复制组设置同步周期时发生了错误:%s。" - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"在 3PAR 阵列上设置远程复制组时发生了错误:(“%s”)。该卷未被识别为复制类型。" - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"在 LeftHand 阵列上设置远程调度时发生了错误:(“%s”)。该卷未被识别为复制类" -"型。" - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "启动远程复制时发生了错误:%s。" - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "未配置 Gluster 配置文件 (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "未配置 NFS 配置文件 (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "未配置 Quobyte 卷 (%s)。示例:quobyte:///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "在此版本的 LVM 上,不支持瘦供应。" - -msgid "This driver does not support deleting in-use snapshots." -msgstr "此驱动程序不支持对正在使用的快照进行删除。" - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "此驱动程序不支持对正在使用的卷生成快照。" - -msgid "This request was rate-limited." -msgstr "这个请求受到频率限制。" - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "此系统平台 (%s) 不受支持。此驱动程序仅支持 Win32 平台。" - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "对于 %(storageSystemName)s,找不到分层策略服务。" - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "等待 Nova 更新(以便创建快照 %s)时超时。" - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "等待 Nova 更新(以便删除快照 %(id)s)时超时。" - -#, python-format -msgid "Timeout while calling %s " -msgstr "调用 %s 时超时 " - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "请求 %(service)s API 时超时。" - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "从后端请求 %(service)s 功能时超时。" - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "无法找到转换器%(transfer_id)s" - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"传输 %(transfer_id)s:具有标识 %(volume_id)s 的卷处于意外状态 %(status)s,需" -"要的状态为正在等待传输" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "正在尝试将备份元数据从标识 %(meta_id)s 导入到备份 %(id)s。" - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"调整卷任务未完成就已停止:volume_name=%(volume_name)s, task-status=" -"%(status)s。" - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "已使类型 %(type_id)s 与另一 qos 规范 %(qos_specs_id)s 关联" - -msgid "Type access modification is not applicable to public group type." -msgstr "类型访问修改不适用于公共组类型。" - -msgid "Type access modification is not applicable to public volume type." -msgstr "类型访问修改不适用于公共卷类型。" - -msgid "Type cannot be converted into NaElement." -msgstr "此类型不能转换为 NaElement。" - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError:%s" - -msgid "URI should end with /tag" -msgstr "URI 应该以 /tag 结尾" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUID %s 同时位于“添加卷”和“移除卷”列表中。" - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "无法访问卷 %s 的 Storwize 后端。" - -msgid "Unable to access the backend storage via file handle." -msgstr "通过文件句柄无法访问后端存储器。" - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "无法通过路径 %(path)s 访问后端存储器。" - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "无法将 Cinder 主机添加至空间 %(space)s 的 apphosts" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "无法完成 %s 的故障转移。" - -msgid "Unable to connect or find connection to host" -msgstr "无法连接至主机,或找不到与主机的连接" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "无法创建一致性组 %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "无法创建锁定。协调后端未启动。" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "无法创建或获取快速策略 %(fastPolicyName)s 的缺省存储器组。 " - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "无法为卷 %s 创建副本克隆。" - -#, python-format -msgid "Unable to create server object for initiator %(name)s" -msgstr "无法为发起者 %(name)s 创建服务器对象" - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "无法为 %s 创建该关系。" - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "无法通过 %(snap)s 创建卷 %(name)s。" - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "无法通过 %(vol)s 创建卷 %(name)s。" - -#, python-format -msgid "Unable to create volume %s" -msgstr "无法创建卷 %s" - -msgid "Unable to create volume. Backend down." -msgstr "无法创建卷。后端已关闭。" - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "无法删除一致性组快照 %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "无法删除快照 %(id)s,状态:%(status)s。" - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "无法删除卷 %s 上的快照策略。" - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "无法删除卷 %(vol)s 的目标卷。异常:%(err)s。" - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"无法拆离卷。卷状态必须为“in-use”,并且 attach_status 必须为“attached”才能拆" -"离。" - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "无法根据所提供辅助项来确定 secondary_array:%(secondary)s。" - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "在快照 %(id)s 的 Purity 中无法确定快照名称。" - -msgid "Unable to determine system id." -msgstr "无法确定系统标识。" - -msgid "Unable to determine system name." -msgstr "无法确定系统名称。" - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"无法对 Purity REST API 版本%(api_version)s 执行“管理快照”操作,需要版本 " -"%(required_versions)s。" - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"无法使用 Purity REST API 版本 %(api_version)s 执行复制,需要 " -"%(required_versions)s 的其中之一。" - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "无法建立与 Storwize 集群 %s 的伙伴关系。" - -#, python-format -msgid "Unable to extend volume %s" -msgstr "无法扩展卷 %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "无法将卷 %(id)s 故障转移至辅助后端,因为复制关系无法切换:%(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "无法故障返回至“default”,此操作只能在故障转换完成后进行。" - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "无法故障转移至复制目标:%(reason)s)。" - -msgid "Unable to fetch connection information from backend." -msgstr "无法从后端访存连接信息。" - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "无法从后端访存连接信息:%(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "找不到名称为 %s 的 Purity ref" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "找不到卷组: %(vg_name)s" - -msgid "Unable to find any active VPSA controller" -msgstr "无法找到任何活跃的VPSA控制器。" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "找不到故障转移目标,未配置辅助目标。" - -msgid "Unable to find iSCSI mappings." -msgstr "找不到 iSCSI 映射。" - -#, python-format -msgid "Unable to find server object for initiator %(name)s" -msgstr "无法找到发起者为 %(name)s 的服务器对象。" - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "找不到 ssh_hosts_key_file:%s" - -msgid "Unable to find system log file!" -msgstr "不能发现系统日志文件" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "找不到可行 pg 快照,无法在所选辅助阵列上使用故障转移:%(id)s。" - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "无法根据所配置目标找到可行辅助阵列:%(targets)s。" - -#, python-format -msgid "Unable to find volume %s" -msgstr "找不到卷 %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "无法获取对应文件“%s”的块设备" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "无法获取创建卷所需要的配置信息:%(errorMessage)s。" - -msgid "Unable to get corresponding record for pool." -msgstr "无法为池获取相应的记录。" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "无法获取有关空间 %(space)s 的信息,请验证集群是否正在运行并且已连接。" - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "无法获取此主机上的 IP 地址列表,请检查许可权和联网。" - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "无法获取域成员列表,请检查集群是否正在运行。" - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "无法获取用于生成新名称的空间的列表。请验证集群是否正在运行。" - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "无法获取 backend_name %s 的统计信息" - -msgid "Unable to get storage volume from job." -msgstr "无法通过作业获取存储器卷。" - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "无法获取对应硬件标识 %(hardwareIdInstance)s 的目标端点。" - -msgid "Unable to get the name of the masking view." -msgstr "无法获取掩码视图的名称。" - -msgid "Unable to get the name of the portgroup." -msgstr "无法获取端口组的名称。" - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "无法获取卷 %s 的复制关系。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "无法将卷 %(deviceId)s 导入到 Cinder。它是复制会话 %(sync)s 的源卷。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"无法将卷 %(deviceId)s 导入到 Cinder。外部卷不在由当前 Cinder 主机管理的池中。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "无法将卷 %(deviceId)s 导入到 Cinder。该卷位于掩码视图 %(mv)s 中。" - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "无法从 %(cert)s %(e)s 装入 CA。" - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "无法从 %(cert)s %(e)s 装入证书。" - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "无法从 %(cert)s %(e)s 装入密钥。" - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "无法在 Solidfire 设备上找到帐户 %(account_name)s" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "无法找到正在管理 IP 地址“%s”的 SVM" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "找不到指定重放概要文件 %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "无法管理现有卷。已管理卷 %(volume_ref)s。" - -#, python-format -msgid "Unable to manage volume %s" -msgstr "无法管理卷 %s" - -msgid "Unable to map volume" -msgstr "无法映射卷" - -msgid "Unable to map volume." -msgstr "无法映射卷。" - -msgid "Unable to parse attributes." -msgstr "无法解析属性。" - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "对于卷 %s,无法将副本升级为主副本。没有任何辅助副本可用。" - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "无法在 use_chap_auth=True 的情况下复用并非由 Cinder 管理的主机," - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "无法在配置了未知 CHAP 凭证的情况下复用主机。" - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "无法将卷 %(existing)s 重命名为 %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "无法检索标识为 %s 的快照组。" - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"无法对 %(specname)s 进行转型,需要接收当前的和请求的 %(spectype)s 值。接收到" -"的值:%(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"无法执行 retype:卷 %s 的副本已存在。执行 retype 将超过2 个副本的限制。" - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "无法转型:当前操作需要卷拷贝,当新类型为复制时,不允许卷拷贝。卷为 %s" - -#, python-format -msgid "Unable to send requests: %s" -msgstr "不能发送请求:%s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "无法对 %(vol)s 设置镜像方式复制。异常:%(err)s。" - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "无法为一致性组 %s 创建快照" - -msgid "Unable to terminate volume connection from backend." -msgstr "无法从后端终止卷连接。" - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "无法终止卷连接:%(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "无法更新一致性组 %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "无法在掩码视图 %(maskingViewName)s 中验证发起方组 %(igGroupName)s。" - -msgid "Unacceptable parameters." -msgstr "无法接受的参数。" - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "映射 %(id)s 的意外映射状态 %(status)s。属性:%(attr)s。" - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "出现意外 CLI 响应:头/行不匹配。头:%(header)s,行:%(row)s。" - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "映射 %(id)s 的意外映射状态 %(status)s。属性:%(attr)s。" - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "意外输出。需要 [%(expected)s],但接收到 [%(output)s]" - -#, python-format -msgid "Unexpected over quota on %(name)s." -msgstr "%(name)s 超过预期配额。" - -msgid "Unexpected response from Nimble API" -msgstr "来自 Nimble API 的意外响应" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "Tegile IntelliFlash API 给出了意外响应" - -msgid "Unexpected status code" -msgstr "意外的状态码" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"对于 url %(page)s,带有协议 %(protocol)s 的交换机 %(switch_id)s 发出意外状态" -"码。错误:%(error)s" - -msgid "Unknown Gluster exception" -msgstr "Gluster 异常未知" - -msgid "Unknown NFS exception" -msgstr "NFS 异常未知" - -msgid "Unknown RemoteFS exception" -msgstr "RemoteFS 异常未知" - -msgid "Unknown SMBFS exception." -msgstr "SMBFS 异常未知。" - -msgid "Unknown Virtuozzo Storage exception" -msgstr "未知 Virtuozzo 存储器异常" - -msgid "Unknown action" -msgstr "操作未知" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"不知道要管理的卷 %s 是否已由 Cinder 管理。正在异常中止管理卷。请" -"将“cinder_managed”定制模式属性添加至该卷,并将其值设置为 False。或者,将 " -"Cinder 配置策略“zfssa_manage_policy”的值设置为“loose”以移除此限制。" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"不知道要管理的卷 %s 是否已由 Cinder 管理。正在异常中止管理卷。请" -"将“cinder_managed”定制模式属性添加至该卷,并将其值设置为 False。或者,将 " -"Cinder 配置策略“zfssa_manage_policy”的值设置为“loose”以移除此限制。" - -#, python-format -msgid "Unknown operation %s." -msgstr "未知操作 %s。" - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "命令 %(cmd)s 未知或不受支持" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "未知协议:%(protocol)s。" - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "配额资源 %(unknown)s 未知。" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "排序方向未知,必须为“降序”或“升序”" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "排序方向未知,必须为“降序”或“升序”。" - -#, python-format -msgid "Unknown/Unsupported HTTP method: %s" -msgstr "未知/不支持的HTTP方法: %s" - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "非管理选项与级联删除选项互斥。" - -msgid "Unmanage volume not implemented." -msgstr "未实现非管理卷。" - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "不允许取消管理来自“已故障转移”的卷的快照。" - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "不允许取消管理来自已故障转移的卷的快照。" - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "不识别 QOS 关键字:“%s”" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "无法识别支持格式:%s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "无法识别的 read_deleted 取值”%s“" - -#, python-format -msgid "" -"Unrecoverable Error: Versioned Objects in DB are capped to unknown version " -"%(version)s." -msgstr "不可恢复错误:数据库中的版本对象被未知版本%(version)s覆盖。" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "取消设置 gcs 选项:%s" - -msgid "Unsupported Content-Type" -msgstr "不支持的Content-Type" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "不支持该数据 ONTAP 版本。支持数据 ONTAP V7.3.1 和更高版本。" - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "不支持备份元数据版本 (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "不支持请求的备份元数据版本" - -msgid "Unsupported backup verify driver" -msgstr "不支持备份验证驱动程序" - -#, python-format -msgid "Unsupported fields %s." -msgstr "不支持的域%s。" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "" -"在交换机 %s 上存在不受支持的固件。请确保交换机正在运行固件 V6.4 或更高版本" - -#, python-format -msgid "Unsupported volume format %s" -msgstr "不支持的卷格式:%s" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "以下卷格式不受支持:%s " - -msgid "Update QoS policy error." -msgstr "更新 QoS 策略时发生错误。" - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "只能由直系父代的管理员或者云管理员执行更新和删除配额操作。" - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "" -"只能对与用户作用域仅限于的项目位于同一层次结构中的项目执行更新和删除配额操" -"作。" - -msgid "Update list, doesn't include volume_id" -msgstr "更新列表未包含 volume_id" - -msgid "Updated At" -msgstr "已更新于" - -#, python-format -msgid "Updating volume metadata is not allowed for volumes in %s status." -msgstr "当卷状态为 %s 时,不允许更新该卷的元数据。" - -msgid "Upload to glance of attached volume is not supported." -msgstr "不支持上载至所连接卷的 Glance。" - -msgid "Use ALUA to associate initiator to host error." -msgstr "使用 ALUA 使启动程序与主机相关联时发生错误。" - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "使用 CHAP 使启动程序与主机相关联时发生错误。请检查 CHAP 用户名和密码。" - -msgid "User ID" -msgstr "用户ID" - -msgid "User does not have admin privileges" -msgstr "用户没有管理员权限" - -msgid "User not authorized to perform WebDAV operations." -msgstr "用户无权执行 WebDAV 操作。" - -msgid "UserName is not configured." -msgstr "未配置 UserName。" - -msgid "UserPassword is not configured." -msgstr "未配置 UserPassword。" - -msgid "V2 rollback, volume is not in any storage group." -msgstr "V2 回滚,卷不在任何存储器组中。" - -msgid "V3 rollback" -msgstr "V3 回滚" - -msgid "VF is not enabled." -msgstr "未启用 VF。" - -#, python-format -msgid "VPool %(name)s ( %(vpooltype)s ) : not found" -msgstr "找不到VPool:%(name)s ( %(vpooltype)s ) " - -#, python-format -msgid "VV Set %s does not exist." -msgstr "VV 集 %s 不存在。" - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "以下是 QoS 规范的有效使用者:%s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "以下是有效控制位置:%s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "验证卷连接失败(错误:%(err)s)。" - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "值“%(value)s”对于配置选项“%(option)s”无效" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "%(param_string)s 的值 %(param)s 不是布尔值。" - -msgid "Value required for 'scality_sofs_config'" -msgstr "“scality_sofs_config”的必需值" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError:%s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "从 %(src)s 到 %(tgt)s 的映射中未涉及到 Vdisk %(name)s。" - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "" -"此 API 不支持版本 %(req_ver)s。最低版本为 %(min_ver)s,最高版本为 " -"%(max_ver)s。" - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s 无法按标识检索对象。" - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s 不支持带条件更新。" - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "虚拟卷“%s”在阵列上不存在。" - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "针对目标 %s 的卷复制作业失败。" - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "找不到卷 %(deviceID)s。" - -#, python-format -msgid "Volume %(name)s could not be found. It might be already deleted" -msgstr "找不到卷 %(name)s。该卷可能已被删除" - -#, python-format -msgid "Volume %(name)s not found" -msgstr "找不到卷 %(name)s" - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "数组中找不到卷 %(name)s。无法确定是否存在已映射的卷。" - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "在 VNX 中创建了卷 %(name)s,但此卷处于 %(state)s 状态。" - -#, python-format -msgid "Volume %(name)s was not deactivated in time." -msgstr "卷 %(name)s 没有被及时释放。" - -#, python-format -msgid "" -"Volume %(name)s: clone failed\n" -"%(err)s" -msgstr "" -"卷 %(name)s: 克隆失败\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(name)s: create failed\n" -"%(err)s" -msgstr "" -"卷 %(name)s: 创建失败\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(name)s: delete failed\n" -"%(err)s" -msgstr "" -"卷 %(name)s: 删除失败\n" -"%(err)s" - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "未能在池 %(pool)s 中创建卷 %(vol)s。" - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "卷 %(vol1)s 与 snapshot.volume_id %(vol2)s 不匹配。" - -#, python-format -msgid "Volume %(vol_id)s is not local to this node %(host)s" -msgstr "卷 %(vol_id)s 不是节点 %(host)s 的本地卷" - -#, python-format -msgid "Volume %(vol_id)s status must be %(statuses)s" -msgstr "卷 %(vol_id)s 的状态必须为 %(statuses)s" - -#, python-format -msgid "Volume %(vol_id)s status must be available to extend." -msgstr "卷 %(vol_id)s 的状态必须是可扩展的。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"卷 %(vol_id)s 状态必须为“可用”,才能更新只读标记,但当前状态为:" -"%(vol_status)s。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "卷 %(vol_id)s 状态必须为“可用”,但当前状态为:%(vol_status)s。" - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "卷 %(volume_id)s 没有找到。" - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "卷 %(volume_id)s 没有任何具有键 %(metadata_key)s 的元数据。" - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "卷 %(volume_id)s 当前已映射至不受支持的主机组 %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "卷 %(volume_id)s 当前未映射至主机 %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "卷 %(volume_id)s 仍然处于连接状态,请先从卷断开连接。" - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "卷 %(volume_id)s 复制错误:%(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "卷 %(volume_name)s 处于繁忙状态。" - -#, python-format -msgid "" -"Volume %(volume_name)s: expand failed\n" -"%(err)s" -msgstr "" -"卷 %(volume_name)s: 扩展失败\n" -"%(err)s" - -#, python-format -msgid "" -"Volume %(volume_name)s: update failed\n" -"%(err)s" -msgstr "" -"卷 %(volume_name)s: 更新失败\n" -"%(err)s" - -#, python-format -msgid "Volume %s : not found" -msgstr "找不到卷:%s" - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "未能从源卷创建卷 %s。" - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "在共享项上,未能创建卷 %s。" - -#, python-format -msgid "Volume %s could not be created." -msgstr "未能创建卷 %s。" - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "卷 %s 在 Nexenta SA 中不存在" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "卷 %s 在 Nexenta 存储设备中不存在" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "卷 %s 在阵列上不存在。" - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "卷 %s 没有指定 provider_location,正在跳过。" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "卷 %s 在阵列上不存在。" - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "卷 %s 在 ZFSSA 后端上不存在。" - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "卷 %s 已由 OpenStack 管理。" - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"卷 %s 并非被复制类型。此卷必须为 extra spec replication_enabled 设置为“ " -"True”的卷类型以支持复制操作。" - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "卷 %s 已联机。将该卷设置为脱机以便使用 OpenStack 进行管理。" - -#, python-format -msgid "" -"Volume %s must not be migrating, attached, belong to a group or have " -"snapshots." -msgstr "卷 %s 不能正在迁移、已挂载、属于某个组或具有快照。" - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "卷 %s 不得是一致性组的一部分。" - -#, python-format -msgid "Volume %s not found" -msgstr "找不到卷 %s" - -#, python-format -msgid "Volume %s not found." -msgstr "找不到卷 %s。" - -#, python-format -msgid "" -"Volume %s status must be available or in-use, must not be migrating, have " -"snapshots, be replicated, be part of a group and destination host must be " -"different than the current host" -msgstr "" -"卷 %s 的状态必须是可用的或正在使用中的,而不能是正在迁移、具有快照、重复的或" -"者属于一个组,同时目标机器也应该不同于目前的机器。" - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "卷 %s:尝试扩展卷时出错" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "数组中已存在卷 (%s)" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "阵列上已存在卷 (%s)。" - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "卷组 %s 不存在" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "卷类型 %(id)s 已存在。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "当存在类型为 %(volume_type_id)s 的卷时,不允许删除该卷类型。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" - -msgid "Volume Type id must not be None." -msgstr "卷类型不能为空。" - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"在对应 OpenStack 卷 [%(ops_vol)s] 的 CloudByte 存储器上找不到卷 " -"[%(cb_vol)s]。" - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "在 CloudByte 存储器中找不到卷 [%s]。" - -msgid "Volume already managed." -msgstr "卷已管理。" - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "在使用过滤器 %(filter)s 的情况下,找不到卷连接。" - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "卷后端配置无效:%(reason)s" - -msgid "Volume by this name already exists" -msgstr "使用此名称的卷已存在" - -msgid "" -"Volume cannot be created individually from a snapshot that is part of a " -"Consistency Group" -msgstr "无法单独地从快照创建卷,因为它是一致性组的一部分" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "卷无法复原,因为它包含快照。" - -#, python-format -msgid "Volume connected to host %s." -msgstr "连接到主机 %s 上的卷。" - -msgid "Volume create failed while extracting volume ref." -msgstr "抽取卷引用时创建卷失败。" - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "卷设备文件路径 %s 不存在。" - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "在 %(device)s 上找不到卷设备。" - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "卷驱动程序 %s 未初始化。" - -msgid "Volume driver not ready." -msgstr "卷驱动未准备好。" - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "卷驱动程序已报告错误:%(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "卷具有此时不能删除的临时快照。" - -msgid "Volume has children and cannot be deleted!" -msgstr "卷具有子代,不能删除!" - -#, python-format -msgid "Volume in group %s is attached. Need to detach first." -msgstr "已连接组 %s 中的卷。需要先拆离。" - -msgid "Volume in group still has dependent snapshots." -msgstr "组中的卷仍然具有从属快照。" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "卷已连接至服务器。(%s)" - -msgid "Volume is in-use." -msgstr "卷在使用中。" - -msgid "Volume is not available." -msgstr "卷不可用。" - -msgid "Volume is not local to this node" -msgstr "卷不是此节点的本地卷" - -msgid "Volume is not local to this node." -msgstr "该卷不是此节点的本地卷。" - -msgid "Volume manage failed." -msgstr "管理卷失败。" - -#, python-format -msgid "Volume manager for backend '%s' does not exist." -msgstr "用于后端'%s'的卷管理器不存在。" - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "已请求卷元数据备份,但此驱动程序尚不支持此功能。" - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "卷迁移失败:%(reason)s" - -msgid "Volume must be available" -msgstr "卷必须可用" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "卷必须与快照位于同一可用性区域中" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "卷必须与源卷位于同一可用性区域中" - -msgid "Volume must have a volume type" -msgstr "卷必须具有卷类型" - -msgid "Volume must not be replicated." -msgstr "不得复制卷。" - -msgid "Volume must not have snapshots." -msgstr "卷不能具有快照。" - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "没有为实例 %(instance_id)s 找到卷。" - -msgid "Volume not found on configured storage backend." -msgstr "在已配置的存储器后端上找不到卷。" - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "在所配置存储器后端找不到卷。如果卷名包含“/”,请重命名并重试管理。" - -msgid "Volume not found on configured storage pools." -msgstr "在已配置的存储池上找不到卷。" - -msgid "Volume not found." -msgstr "找不到卷。" - -msgid "Volume not unique." -msgstr "卷并非唯一。" - -msgid "Volume not yet assigned to host." -msgstr "卷尚未分配给主机。" - -msgid "Volume reference must contain source-name element." -msgstr "卷引用必须包含 source-name 元素。" - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "找不到 %(volume_id)s 的卷复制。" - -#, python-format -msgid "Volume service %s failed to start." -msgstr "卷服务 %s 未能启动。" - -msgid "Volume should have agent-type set as None." -msgstr "卷应该将 agent-type 设置为“无”。" - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "卷大小 %(volume_size)sGB 不能小于映像 minDisk 大小 %(min_disk)sGB。" - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "卷大小“%(size)s”必须为正整数" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"卷大小“%(size)s”GB 不能小于原始卷大小 %(source_size)sGB。它们必须不小于原始卷" -"大小。" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"卷大小“%(size)s”GB 不能小于快照大小 %(snap_size)sGB。它们必须不小于原始快照大" -"小。" - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "自从最近一次备份以来,卷大小已增加。请执行完全备份。" - -msgid "Volume size must be a multiple of 1 GB." -msgstr "卷大小必须为 1 GB 的倍数。" - -msgid "Volume size must multiple of 1 GB." -msgstr "卷大小必须是 1 GB 的倍数。" - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "对于快照,卷状态必须为“available”或“in-use”。(卷状态现在为 %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "卷状态必须为“available”或“in-use”。" - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "卷状态必须为 %s 才能保留。" - -msgid "Volume status must be 'available'." -msgstr "卷状态必须为“可用”。" - -#, python-format -msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" -msgstr "对于快照 %(id)s,卷状态必须为“available”。(卷状态现在为 %(status)s)" - -msgid "Volume to Initiator Group mapping already exists" -msgstr "卷至发起方组的映射已存在" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "要备份的卷必须可用或者正在使用,但是当前状态为“%s”。" - -msgid "Volume to be restored to must be available" -msgstr "要复原至的卷必须可用" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "卷类型 %(volume_type_id)s 没有找到。" - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "卷类型标识“%s”无效。" - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "已存在针对 %(volume_type_id)s / %(project_id)s 组合的卷类型访问权限。" - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "对于 %(volume_type_id)s / %(project_id)s 组合,找不到卷类型访问权限。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "类型 %(type_id)s 的卷类型加密已存在。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "类型 %(type_id)s 的卷类型加密不存在。" - -msgid "Volume type name can not be empty." -msgstr "卷类型名称不能为 空." - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" - -#, python-format -msgid "Volume%s: not found" -msgstr "找不到卷%s" - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "卷 %(volumeName)s 不是并置卷。只能对并置卷执行扩展。正在退出..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "卷 %(volumeName)s 未添加至存储器组 %(sgGroupName)s。" - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "卷 %s 已由 Cinder 管理。" - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "卷/帐户同时超出主 SolidFire 帐户和辅助 SolidFire 帐户的限制。" - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 配置“vzstorage_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s 处不存在 VzStorage 配置文件。" - -msgid "Wait replica complete timeout." -msgstr "等待副本完成时发生超时。" - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "等待同步失败。运行状态:%s。" - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "正在等待所有节点加入集群。请确保所有 sheep 守护程序都在运行。" - -msgid "We should not do switch over on primary array." -msgstr "不应在主阵列上切换。" - -#, python-format -msgid "Worker for %(type)s %(id)s already exists." -msgstr "标识为%(type)s %(id)s的生产者已经存在。" - -#, python-format -msgid "Worker with %s could not be found." -msgstr "无法找到标识为 %s 的生产者。" - -msgid "X-IO Volume Driver exception!" -msgstr "发生 X-IO 卷驱动程序异常!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "未正确配置 XtremIO,找不到任何 iSCSI 门户网站" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "XtremIO 未正确初始化,找不到任何集群" - -msgid "You must implement __call__" -msgstr "你必须执行 __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"使用 3PAR 驱动程序之前,必须安装 hpe3parclient。运行“pip install " -"python-3parclient”以安装 hpe3parclient。" - -msgid "You must supply an array in your EMC configuration file." -msgstr "必须在 EMC 配置文件中提供阵列。" - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"原始大小 %(originalVolumeSize)s GB 大于%(newSize)s GB。仅支持扩展。正在退" -"出..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError:%s" - -msgid "Zone" -msgstr "域" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "分区策略:%s,无法识别" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "_create_and_copy_vdisk_data:未能获取 vdisk %s 的属性。" - -#, python-format -msgid "" -"_create_group:add port failed. Port name: %(name)s with Return code: " -"%(ret)s." -msgstr "_create_group:添加名为 %(name)s 的端口失败,返回的状态码为:%(ret)s。" - -msgid "_create_host failed to return the host name." -msgstr "_create_host 未能返回主机名。" - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "_create_host:无法翻译主机名。主机名不是 Unicode或字符串。" - -msgid "_create_host: No connector ports." -msgstr "_create_host:不存在任何连接器端口。" - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume,找不到复制服务。" - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume,volumenam:%(volumename)s,sourcevolumename:" -"%(sourcevolumename)s,源卷实例:%(source_volume)s,目标卷实例:" -"%(target_volume)s,返回码:%(rc)lu,错误:%(errordesc)s。" - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - 找不到 CLI 输出形式的成功消息。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name,id_code 为 None。" - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession,找不到复制服务" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession,未定义复制会话类型!复制会话:%(cpsession)s,复制类型:" -"%(copytype)s。" - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession,copysession:%(cpsession)s,操作:%(operation)s,返回" -"码:%(rc)lu,错误:%(errordesc)s。" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" -"%(errordesc)s。" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "_delete_volume,volumename:%(volumename)s,找不到存储器配置服务。" - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service,classname:%(classname)s,InvokeMethod,无法连接至 " -"ETERNUS。" - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "_extend_volume_op:不支持扩展带有快照的卷。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,连接器:%(connector)s,关联者:" -"FUJITSU_AuthorizedTarget,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,连接器:%(connector)s,EnumerateInstanceNames,无法连接" -"至 ETERNUS。" - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,连接器:%(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession,ReferenceNames,vol_instance:%(vol_instance_path)s,无法" -"连接至 ETERNUS。" - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service,classname:%(classname)s,EnumerateInstanceNames,无法" -"连接至 ETERNUS。" - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "_find_initiator_names,连接器:%(connector)s,找不到启动程序。" - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun,volumename:%(volumename)s,EnumerateInstanceNames,无法连接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool,eternus_pool:%(eternus_pool)s,EnumerateInstances,无法连接至 " -"ETERNUS。" - -msgid "_get_async_url: Invalid URL." -msgstr "_get_async_url: 无效的 URL." - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg,filename:%(filename)s,tagname:%(tagname)s,没有数据!请编辑驱" -"动配置文件并更正。" - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection,filename:%(filename)s,ip:%(ip)s,端口:%(port)s," -"用户:%(user)s,密码:****,URL:%(url)s,失败!" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties,iscsiip 列表:%(iscsiip_list)s,找不到 iqn。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,AssociatorName:" -"CIM_BindsTo,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,EnumerateInstanceNames," -"无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,GetInstance,无法连接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic:属性头和值不匹配。\n" -"头为 %(header)s\n" -"值为 %(row)s。" - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "_get_host_from_connector 未能返回连接器的主机名。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc,从 aglist/vol_instance 获取主机亲缘关系失败,affinitygroup:" -"%(ag)s,ReferenceNames,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc,获取主机亲缘关系实例失败,volmap:%(volmap)s,GetInstance," -"无法连接至 ETERNUS。" - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi,关联者:FUJITSU_SAPAvailableForElement,无法连接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi,affinitygroup:%(ag)s,ReferenceNames,无法连接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi,vol_instance:%(vol_instance)s,ReferenceNames: " -"CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi,volmap:%(volmap)s,GetInstance,无法连接至 ETERNUS。" - -#, python-format -msgid "_get_sysinfo:get sys info failed. Return code: %(ret)s." -msgstr "_get_sysinfo:获取系统信息失败。返回的状态码为: %(ret)s。" - -#, python-format -msgid "" -"_get_target_ip_ctrl:get iscsi port list fail. with Return code: %(ret)s." -msgstr "_get_target_ip_ctrl:获取iscsi端口列表失败,返回的状态码为:%(ret)s。" - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "_get_target_port,EnumerateInstances,无法连接至 ETERNUS。" - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "_get_target_port,协议:%(protocol)s,找不到 target_port。" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay:找不到名为 %s 的快照" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay:找不到卷标识 %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay:必须指定 source-name。" - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties:对于主机/卷连接,未能获取 FC 连接信息。已针对 FC " -"连接正确配置主机吗?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到任何节点。" - -#, python-format -msgid "" -"_map_delete_host: delete host failed. host name:%(name)s with Return code: " -"%(ret)s" -msgstr "_map_delete_host:删除主机 %(name)s 失败,返回的状态码为: %(ret)s" - -#, python-format -msgid "" -"_map_delete_host:get host info failed. host name:%(name)s with Return code: " -"%(ret)s." -msgstr "" -"_map_delete_host:获取主机 %(name)s 的信息失败,返回的状态码为:%(ret)s。" - -#, python-format -msgid "" -"_map_delete_host:get map group info failed. group name:%(name)s with Return " -"code: %(ret)s." -msgstr "" -"_map_delete_host:从名为 %(name)s 的组中获取映射组信息失败,返回的状态码为:" -"%(ret)s。" - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun,vol_instance.path:%(vol)s,volumename:%(volumename)s," -"volume_uid:%(uid)s,启动程序:%(initiator)s,目标:%(tgt)s,aglist:" -"%(aglist)s,找不到存储器配置服务。" - -#, python-format -msgid "" -"_map_lun:delete lunid from group failed. group name:%(name)s lunid : %(lun)s " -"with Return code: %(ret)s." -msgstr "" -"_map_lun:从名为 %(name)s 的组中删除名为 %(lun)s 的lunid时失败,返回的错误码" -"为:%(ret)s。" - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun,vol_instance.path:%(volume)s,volumename:%(volumename)s," -"volume_uid:%(uid)s,aglist:%(aglist)s,找不到控制器配置服务。" - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun,volumename:%(volumename)s,volume_uid:%(volume_uid)s," -"AffinityGroup:%(ag)s,返回码:%(rc)lu,错误:%(errordesc)s。" - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path:%(volume)s,AssociatorName:" -"CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats:未能获取存储池数据。" - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete,cpsession:%(cpsession)s,copysession 状态为 " -"BROKEN。" - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy 失败:卷 %s 的副本已存在。添加另一个副本将超过 2 个副本的限" -"制。" - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "在所需池中没有 vdisk 副本的情况下,add_vdisk_copy 已开始。" - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants 必须为布尔值,但是获得了“%s”。" - -msgid "already created" -msgstr "已创建" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "从远程节点连接快照" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "属性 %s 不可延迟装入" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"备份:%(vol_id)s 未能创建从 %(vpath)s 至 %(bpath)s 的设备硬链接。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"备份:%(vol_id)s 未能从服务器获取“备份成功”通知。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"备份:由于 %(bpath)s 上的自变量无效,使得 %(vol_id)s 未能运行 dsmc。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"备份:%(vol_id)s 未能对 %(bpath)s 运行 dsmc。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "备份:%(vol_id)s 失败。%(path)s 不是文件。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"备份:%(vol_id)s 失败。%(path)s 是意外的文件类型。支持块文件或常规文件,实际" -"文件方式为 %(vol_mode)s。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "备份:%(vol_id)s 失败。无法获取 %(path)s 处卷的实际路径。" - -msgid "being attached by different mode" -msgstr "正在通过另一方式连接" - -#, python-format -msgid "call failed: %r" -msgstr "调用失败:%r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "调用失败:GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "调用失败:PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "调用失败:PROG_MISMATCH:%r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "调用失败:PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "找不到 LUN 映射,ig:%(ig)s 卷:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "找不到要扩展的卷" - -msgid "can't handle both name and index in req" -msgstr "无法同时处理请求中的名称和索引" - -msgid "cannot understand JSON" -msgstr "无法理解JSON" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "" -"cg_creating_from_src must be called with cg_id or cgsnapshot_id parameter." -msgstr "cg_creating_from_src必须通过cg_id or cgsnapshot_id参数调用。" - -msgid "cgsnapshot assigned" -msgstr "已分配 cgsnapshot" - -msgid "cgsnapshot changed" -msgstr "已更改 cgsnapshot" - -msgid "cgsnapshots assigned" -msgstr "已分配 cgsnapshot" - -msgid "cgsnapshots changed" -msgstr "已更改 cgsnapshot" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error:认证需要密码或 SSH 专用密钥:请设置 san_password 或 " -"san_private_key 选项。" - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error:无法确定系统标识。" - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error:无法确定系统名称。" - -msgid "check_hypermetro_exist error." -msgstr "check_hypermetro_exist 错误。" - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "克隆深度超过 %s 的限制" - -msgid "cluster assigned" -msgstr "已分配集群" - -msgid "cluster changed" -msgstr "已更改集群" - -msgid "config option key_manager.fixed_key is not defined" -msgstr "配置选项 key_manager.fixed_key 为定义。" - -#, python-format -msgid "consistency group with name: %s already exists" -msgstr "名称为: %s 的一致性组已存在" - -msgid "consistencygroup assigned" -msgstr "已分配 consistencygroup" - -msgid "consistencygroup changed" -msgstr "已更改 consistencygroup" - -msgid "control_location must be defined" -msgstr "必须定义 control_location" - -msgid "coprhd_hostname is not set in cinder configuration" -msgstr "在cinder配置中coprhd_hostname未被设置" - -msgid "coprhd_password is not set in cinder configuration" -msgstr "在cinder配置中coprhd_password未被设置" - -msgid "coprhd_port is not set in cinder configuration" -msgstr "在cinder配置中coprhd_port未被设置" - -msgid "coprhd_project is not set in cinder configuration" -msgstr "在cinder配置中coprhd_project未被设置" - -msgid "coprhd_tenant is not set in cinder configuration" -msgstr "在cinder配置中coprhd_tenant未被设置" - -msgid "coprhd_username is not set in cinder configuration" -msgstr "在cinder配置中coprhd_username未被设置" - -msgid "coprhd_varray is not set in cinder configuration" -msgstr "在cinder配置中coprhd_varray未被设置" - -#, python-format -msgid "create group failed. Group name:%(name)s with Return code: %(ret)s." -msgstr "创建名为 %(name)s 的组失败,返回的状态码为:%(ret)s。" - -#, python-format -msgid "create host failed. Host name:%(name)s with Return code: %(ret)s." -msgstr "创建名为 %(name)s 的主机失败,返回的状态码为:%(ret)s。" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume,源卷在 ETERNUS 中不存在。" - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume,目标卷实例名:%(volume_instancename)s,获取实例失败。" - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume:源和目标大小不同。" - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume:源卷 %(src_vol)s 大小为 %(src_size)dGB,无法拟合大小为 " -"%(tgt_size)dGB 的目标卷 %(tgt_vol)s。" - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "create_consistencygroup_from_src 必须为通过 CG 快照或源 CG 创建。" - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src 仅支持 cgsnapshot 源或一致性组源。不能使用多" -"个源。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy:源 vdisk %(src)s (%(src_id)s) 不存在。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy:源 vdisk %(src)s 不存在。" - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host:主机名不是 Unicode 或字符串。" - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host:未提供任何发起方或 wwpn。" - -msgid "create_hypermetro_pair error." -msgstr "create_hypermetro_pair 错误。" - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "create_snapshot,eternus_pool:%(eternus_pool)s,找不到池。" - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot,snapshotname:%(snapshotname)s,源卷名:%(volumename)s," -"vol_instance.path: %(vol_instance)s,目标卷名:%(d_volumename)s,池:" -"%(pool)s,返回码:%(rc)lu,错误:%(errordesc)s。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot,volumename:%(s_volumename)s,在 ETERNUS 上找不到源卷。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "create_snapshot,volumename:%(volumename)s,找不到复制服务。" - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot:对于快照,卷状态必须为“available”或“in-use”。无效状态为 %s。" - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot:获取源卷失败。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "" -"create_volume,卷:%(volume)s,EnumerateInstances,无法连接至 ETERNUS。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume,卷:%(volume)s,卷名:%(volumename)s,eternus_pool:" -"%(eternus_pool)s,找不到存储器配置服务。" - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume,volumename:%(volumename)s,poolname:%(eternus_pool)s,返回" -"码:%(rc)lu,错误:%(errordesc)s。" - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "create_volume_from_snapshot,源卷在 ETERNUS 中不存在。" - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot,目标卷实例名:%(volume_instancename)s,获取实例" -"失败。" - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot:快照 %(name)s 不存在。" - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot:快照状态必须为“可用”,以便创建卷。无效状态为 " -"%s。" - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "create_volume_from_snapshot:卷大小不同于基于快照的卷。" - -msgid "delete host from group failed. " -msgstr "从组中删除主机失败。" - -#, python-format -msgid "" -"delete port from host failed. host name:%(name)s, port name:%(port)s with " -"Return code: %(ret)s." -msgstr "从主机 %(name)s 中删除端口 %(port)s 失败,返回的状态码为:%(ret)s。" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"删除:%(vol_id)s 由于自变量无效而未能运行 dsmc,标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"删除:%(vol_id)s 未能运行 dsmc,标准输出:%(out)s\n" -"标准错误:%(err)s" - -msgid "delete_hypermetro error." -msgstr "delete_hypermetro 错误。" - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "找不到 delete_initiator: %s ACL。正在继续。" - -msgid "delete_replication error." -msgstr "delete_replication 错误。" - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "正在删除具有从属卷的快照 %(snapshot_name)s" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "正在删除有快照的卷 %(volume_name)s" - -msgid "detach snapshot from remote node" -msgstr "从远程节点拆离快照" - -msgid "do_setup: No configured nodes." -msgstr "do_setup:不存在任何已配置的节点。" - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"将对象写入 swift 时出错,swift %(etag)s 中对象的 MD5 与发送至 swift %(md5)s " -"的对象的 MD5 不同" - -#, python-format -msgid "" -"error: Incorrect value of new size: %(new_size_in_gb)s GB\n" -"New size must be greater than current size: %(current_size)s GB" -msgstr "" -"错误: 新大小的值不正确: %(new_size_in_gb)s GB\n" -"新大小必须大于当前大小: %(current_size)s GB" - -msgid "error: task list is empty, no task response found" -msgstr "错误:任务列表为空,找不到任何任务响应" - -msgid "" -"existing_ref argument must be of this format:app_inst_name:storage_inst_name:" -"vol_name" -msgstr "" -"参数 existing_ref 的格式必须为:app_inst_name:storage_inst_name:vol_name。" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume,eternus_pool:%(eternus_pool)s,找不到池。" - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume,卷:%(volume)s, volumename:%(volumename)s,eternus_pool:" -"%(eternus_pool)s,找不到存储器配置服务。" - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" -"%(errordesc)s,池类型:%(pooltype)s。" - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume,volumename:%(volumename)s,找不到卷。" - -msgid "failed to create new_volume on destination host" -msgstr "未能在目标主机上创建新卷" - -msgid "fake" -msgstr "fake" - -#, python-format -msgid "file already exists at %s" -msgstr "文件已存在在 %s" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "SheepdogIOWrapper 不支持 fileno" - -msgid "fileno() not supported by RBD()" -msgstr "fileno() 不受 RBD() 支持" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "文件系统 %s 在 Nexenta 存储设备中不存在" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled 设置为 False,因而不允许多主机映射。" -"CMMVC6071E 该 VDisk 至主机的映射未创建,因为该 VDisk 已映射到某个主机。" - -msgid "flush() not supported in this version of librbd" -msgstr "在 librbd 的此版本中,flush() 不受支持" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt=%(fmt)s 受以下项支持:%(backing_file)s" - -msgid "force delete" -msgstr "强制删除" - -#, python-format -msgid "get_Net_Cfg failed. Return code: %(ret)s." -msgstr "get_Net_Cfg失败。返回的状态码为: %(ret)s." - -msgid "get_hyper_domain_id error." -msgstr "get_hyper_domain_id 错误。" - -msgid "get_hypermetro_by_id error." -msgstr "get_hypermetro_by_id 错误。" - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "get_iscsi_params:未能获取发起方 %(ini)s 的目标 IP,请检查配置文件。" - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool:无法获取卷 %s 的属性" - -msgid "glance_metadata changed" -msgstr "已更改 glance_metadata" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" -"文件系统。" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" -"文件集。" - -msgid "group assigned" -msgstr "已分配组" - -msgid "group changed" -msgstr "已更改组" - -#, python-format -msgid "group-%s" -msgstr "组 %s" - -msgid "" -"group_creating_from_src must be called with group_id or group_snapshot_id " -"parameter." -msgstr "" -"group_creating_from_src必须被调用,参数为group_id或者group_snapshot_id。" - -msgid "group_snapshot assigned" -msgstr "已分配组快照" - -msgid "group_snapshot changed" -msgstr "已更改组快照" - -msgid "group_snapshots assigned" -msgstr "已分配组快照" - -#, python-format -msgid "group_type must be provided to create group %(name)s." -msgstr "创建组%(name)s时必须提供group_type。" - -msgid "group_type_id cannot be None" -msgstr "group_type_id 不能为None" - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"在 cinder.conf 中,hgst_group %(grp)s 和 hgst_user %(usr)s必须映射至有效用户/" -"组" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "在集群中找不到 cinder.conf 中所指定的 hgst_net %(net)s" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "在 cinder.conf 中,hgst_redundancy 必须设置为 0(非 HA)或者 1 (HA)。" - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "在 cinder.conf 中,hgst_space_mode 必须为 octal/int" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "hgst_storage 服务器 %(svr)s 不是 : 格式" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "必须在 cinder.conf 中定义 hgst_storage_servers" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "HTTP 服务可能已突然禁用,或在此操作的中途进入维护状态。" - -msgid "id cannot be None" -msgstr "id不能是None" - -#, python-format -msgid "image %s not found" -msgstr "找不到映像 %s " - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection,卷:%(volume)s,找不到卷。" - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection:未能获取卷 %s 的属性。" - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection:缺少卷 %s 的卷属性。" - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "initialize_connection:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection:未定义 vdisk %s。" - -#, python-format -msgid "invalid user '%s'" -msgstr "用户 '%s' 无效" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "找不到 iscsi 门户网站 %s" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "使用“iSCSI”协议时,必须在配置文件中设置 iscsi_ip_address。" - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "发生密钥管理器错误:%(reason)s" - -msgid "limit param must be an integer" -msgstr "limit 参数必须是整数" - -msgid "limit param must be positive" -msgstr "limit参数必须是正数" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "manage_existing 需要“name”键以标识现有卷。" - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "manage_existing_snapshot:管理卷 %(vol)s 上的现有重放 %(ss)s 时出错" - -#, python-format -msgid "marker [%s] not found" -msgstr "没有找到标记 [%s]" - -#, python-format -msgid "marker not found: %s" -msgstr "没有找到标记: %s" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "Mdisk 组缺少引号 %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy 必须为“on-demand”或“never”,已传递:%s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs 在卷 %(vol)s 上发生故障,错误消息如下:%(err)s。" - -msgid "mock" -msgstr "mock" - -msgid "mount.glusterfs is not installed" -msgstr "未安装 mount.glusterfs" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "drbdmanage 找到多个名称为 %s 的资源" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "找到多个快照标识为 %s 的资源" - -msgid "name cannot be None" -msgstr "name不能是None" - -msgid "no \"access-key\" field" -msgstr "不存在\"access-key\"域" - -msgid "no \"user\" field" -msgstr "不存在“user”域" - -#, python-format -msgid "no REPLY but %r" -msgstr "无回复,但收到 %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "在 drbdmanage 中找不到标识为 %s 的快照" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "而不是刚好只有一个标识为 %s 的快照" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "而不是刚好只有一个标识为 %s 的卷" - -#, python-format -msgid "obj missing quotes %s" -msgstr "对象缺少引号 %s" - -msgid "open_access_enabled is not off." -msgstr "open_access_enabled 未关闭。" - -msgid "progress must be an integer percentage" -msgstr "进度必须为整数百分比" - -msgid "provider must be defined" -msgstr "必须定义提供程序" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"此卷驱动程序需要 qemu-img %(minimum_version)s 或更高版本。当前 qemu-img 版" -"本:%(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"qemu-img 未安装,并且映像的类型为 %s。仅当 qemu-img 未安装时,才能使用原始映" -"像。" - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"qemu-img 未安装,并且磁盘格式未指定。仅当 qemu-img 未安装时,才能使用原始映" -"像。" - -msgid "rados and rbd python libraries not found" -msgstr "找不到 rados 和 rbd python 库" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted 只能是“no”、“yes”或“only”其中一项,而不能是 %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover 失败。找不到 %s。" - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "replication_failover 失败。未配置后端,无法进行故障转移" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"复原:%(vol_id)s 由于 %(bpath)s 上的自变量无效而未能运行 dsmc。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"复原:%(vol_id)s 未能对 %(bpath)s 运行 dsmc。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"复原:%(vol_id)s 失败。\n" -"标准输出:%(out)s\n" -"标准错误:%(err)s。" - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "" -"restore_backup 已异常中止,实际的对象列表与存储在元数据中的对象列表不匹配。" - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "rtslib_fb 缺少成员 %s:您可能需要较新的 python-rtslib-fb。" - -msgid "san_ip is not set." -msgstr "未设置 san_ip。" - -msgid "san_ip must be set" -msgstr "san_ip必须设置" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"没有在 cinder.conf 中为 Datera 驱动程序设置 san_login 和/或 san_password。请" -"设置此信息并再次启动 cinder-volume服务。" - -msgid "" -"scaleio_verify_server_certificate is True but " -"scaleio_server_certificate_path is not provided in cinder configuration" -msgstr "" -"在cinder配置中scaleio_verify_server_certificate的值为True,但是未提供" -"scaleio_server_certificate_path" - -msgid "serve() can only be called once" -msgstr "serve() 只能调用一次" - -#, python-format -msgid "snapshot with the name: %s Not Found" -msgstr "找不到名称为 %s 的快照" - -#, python-format -msgid "snapshot-%s" -msgstr "快照 - %s" - -msgid "snapshots assigned" -msgstr "已更改快照" - -msgid "snapshots changed" -msgstr "已更改快照" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "未复制源卷标识 %s" - -msgid "source-name cannot be empty." -msgstr "source-name 不能为空。" - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "source-name 格式应为“vmdk_path@vm_inventory_path”。" - -msgid "specs must be a dictionary." -msgstr "规格说明必须是字典。" - -#, python-format -msgid "status must be %s and" -msgstr "状态必须为 %s,并且" - -msgid "status must be available" -msgstr "状态必须可用" - -msgid "stop_hypermetro error." -msgstr "stop_hypermetro 错误。" - -msgid "sync_hypermetro error." -msgstr "sync_hypermetro 错误。" - -#, python-format -msgid "target=%(target)s, lun=%(lun)s" -msgstr "目标=%(target)s, lun=%(lun)s" - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "未安装 targetcli,并且未能创建缺省目录(%(default_path)s):%(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection:未能从连接器获取主机名。" - -msgid "timeout creating new_volume on destination host" -msgstr "在目标主机上创建新卷超时" - -msgid "too many body keys" -msgstr "过多主体密钥" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "umount: %s:未安装" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "umount: %s:目标正忙" - -msgid "umount: : some other error" -msgstr "umount: :某个其他错误" - -msgid "umount: : target is busy" -msgstr "umount: :目标正忙" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot:找不到名为 %s 的快照" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot:找不到卷标识 %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "无法识别自变量 %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "以下压缩算法不受支持:%s" - -msgid "valid iqn needed for show_target" -msgstr "show_target 需要有效 iqn" - -#, python-format -msgid "varray %s: not found" -msgstr "找不到varray:%s" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "未定义 vdisk %s。" - -msgid "vmemclient python library not found" -msgstr "找不到 vmemclient python 库" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "在 drbdmanage 中找不到卷 %s" - -msgid "volume assigned" -msgstr "卷已分配" - -msgid "volume changed" -msgstr "卷已更改" - -msgid "volume is already attached" -msgstr "卷已连接" - -msgid "volume is not local to this node" -msgstr "卷不是此节点的本地卷" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "卷大小 %(volume_size)d 太小,无法复原大小为 %(size)d 的备份。" - -#, python-format -msgid "volume size %d is invalid." -msgstr "卷大小 %d 无效。" - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "在一致性组中创建卷时,必须提供 volume_type。" - -msgid "volume_type must be provided when creating a volume in a group." -msgstr "在组中创建卷时,必须提供 volume_type。" - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id 不能为“无”" - -msgid "volume_types assigned" -msgstr "已分配卷类型" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "必须提供 volume_types,才能创建一致性组 %(name)s。" - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "必须提供 volume_types,才能创建一致性组 %s。" - -#, python-format -msgid "volume_types must be provided to create group %(name)s." -msgstr "必须提供volume_types,才能创建组 %(name)s。" - -msgid "volumes assigned" -msgstr "已分配卷" - -msgid "volumes changed" -msgstr "已更改卷" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition:%s 已超时。" - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "zfssa_manage_policy 属性需要设置为“strict”或“loose”。当前值为:%s。" diff --git a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po b/cinder/locale/zh_TW/LC_MESSAGES/cinder.po deleted file mode 100644 index 846ecf3e1..000000000 --- a/cinder/locale/zh_TW/LC_MESSAGES/cinder.po +++ /dev/null @@ -1,8965 +0,0 @@ -# Translations template for cinder. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the cinder project. -# -# Translators: -# FIRST AUTHOR , 2011 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: cinder 9.0.0.0rc2.dev202\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-10-07 03:25+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 12:46+0000\n" -"Last-Translator: Jennifer \n" -"Language: zh-TW\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Chinese (Taiwan)\n" - -#, python-format -msgid "\t%s" -msgstr "\t%s" - -#, python-format -msgid "" -"\n" -"OpenStack Cinder version: %(version)s\n" -msgstr "" -"\n" -"OpenStack Cinder 版本:%(version)s\n" - -#, python-format -msgid " but size is now %d" -msgstr "但大小現在為 %d" - -#, python-format -msgid " but size is now %d." -msgstr "但是,大小現在卻是 %d。" - -msgid " or " -msgstr "或者" - -#, python-format -msgid "%(attr)s is not set." -msgstr "未設定 %(attr)s。" - -#, python-format -msgid "" -"%(driver)s manage_existing cannot manage a volume connected to hosts. Please " -"disconnect this volume from existing hosts before importing" -msgstr "" -"%(driver)s manage_existing 無法管理已連接至主機的磁區。請先斷開此磁區與現有主" -"機的連線,然後再匯入" - -#, python-format -msgid "%(err)s" -msgstr "%(err)s" - -#, python-format -msgid "" -"%(err)s\n" -"result: %(res)s." -msgstr "" -"%(err)s\n" -"結果:%(res)s。" - -#, python-format -msgid "%(error_message)s" -msgstr "%(error_message)s" - -#, python-format -msgid "%(exception)s: %(explanation)s" -msgstr "發生 %(exception)s:%(explanation)s" - -#, python-format -msgid "%(file)s: Permission denied." -msgstr "%(file)s:許可權遭拒。" - -#, python-format -msgid "" -"%(fun)s: Failed with unexpected CLI output.\n" -" Command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"%(fun)s:失敗,CLI 輸出不符合預期。\n" -"指令:%(cmd)s\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "%(host)-25s\t%(availability_zone)-15s" -msgstr "%(host)-25s\t%(availability_zone)-15s" - -#, python-format -msgid "%(host)-25s\t%(zone)-15s" -msgstr "%(host)-25s\t%(zone)-15s" - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "" -"%(message)s\n" -"Status Code: %(_status)s\n" -"Body: %(_body)s" -msgstr "" -"%(message)s\n" -"狀態碼:%(_status)s\n" -"主體:%(_body)s" - -#, python-format -msgid "%(message)s, subjectAltName: %(sanList)s." -msgstr "%(message)s,subjectAltName:%(sanList)s。" - -#, python-format -msgid "" -"%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " -"not in use by another service." -msgstr "" -"%(msg_type)s:正在建立 NetworkPortal:確保 IP %(ip)s 上的埠 %(port)d未在由另" -"一個服務使用。" - -#, python-format -msgid "" -"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has " -"unexpected mode. Image or file backups supported, actual mode is " -"%(vol_mode)s." -msgstr "" -"%(op)s:備份 %(bck_id)s,磁區 %(vol_id)s 失敗。備份物件具有非預期的模式。支援" -"映像檔或檔案備份,實際模式為%(vol_mode)s。" - -#, python-format -msgid "%(service)s Service is not %(status)s on storage appliance: %(host)s" -msgstr "%(service)s 服務在儲存體軟體驅動裝置 %(host)s 上不是處於 %(status)s" - -#, python-format -msgid "%(value_name)s must be <= %(max_value)d" -msgstr "%(value_name)s 必須 <= %(max_value)d" - -#, python-format -msgid "%(value_name)s must be >= %(min_value)d" -msgstr "%(value_name)s 必須 >= %(min_value)d" - -#, python-format -msgid "" -"%(worker_name)s value of %(workers)d is invalid, must be greater than 0." -msgstr "%(workers)d 的 %(worker_name)s 值無效,必須大於 0。" - -#, python-format -msgid "%s" -msgstr "%s" - -#, python-format -msgid "%s \"data\" is not in result." -msgstr "結果中沒有 %s \"data\"。" - -#, python-format -msgid "" -"%s cannot be accessed. Verify that GPFS is active and file system is mounted." -msgstr "無法存取 %s。請確認 GPFS 在作用中且檔案系統已裝載。" - -#, python-format -msgid "%s cannot be resized using clone operation as it contains no blocks." -msgstr "無法使用複製作業來調整 %s 的大小,因為它不包含任何區塊。" - -#, python-format -msgid "" -"%s cannot be resized using clone operation as it is hosted on compressed " -"volume" -msgstr "無法使用複製作業來調整 %s 的大小,因為它是在壓縮磁區上進行管理" - -#, python-format -msgid "%s configuration option is not set." -msgstr "未設定 %s 配置選項。" - -#, python-format -msgid "%s does not exist." -msgstr "%s 不存在。" - -#, python-format -msgid "%s is not a directory." -msgstr "%s 不是目錄。" - -#, python-format -msgid "%s is not installed" -msgstr "%s 未安裝" - -#, python-format -msgid "%s is not installed." -msgstr "未安裝 %s。" - -#, python-format -msgid "%s is not set" -msgstr "未設定 %s" - -#, python-format -msgid "%s is not set and is required for the replication device to be valid." -msgstr "%s 未設定,並且是抄寫裝置變成有效所必需的。" - -#, python-format -msgid "%s is not set." -msgstr "未設定 %s。" - -#, python-format -msgid "%s must be a valid raw or qcow2 image." -msgstr "%s 必須是有效的原始映像檔或 qcow2 映像檔。" - -#, python-format -msgid "%s must be an absolute path." -msgstr "%s 必須是絕對路徑。" - -#, python-format -msgid "%s must be an integer." -msgstr "%s 必須是整數。" - -#, python-format -msgid "%s not set in cinder.conf" -msgstr "%s 未在 cinder.conf 中設定" - -#, python-format -msgid "%s not set." -msgstr "未設定 %s。" - -#, python-format -msgid "" -"'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " -"valid value(s) are %(enabled)s." -msgstr "" -"在配置檔中,'%(prot)s' 不適用於 flashsystem_connection_protocol。有效值為 " -"%(enabled)s。" - -msgid "'active' must be present when writing snap_info." -msgstr "寫入 snap_info 時,狀態必須是「作用中」。" - -msgid "'consistencygroup_id' must be specified" -msgstr "必須指定 'consistencygroup_id'" - -msgid "'qemu-img info' parsing failed." -msgstr "'qemu-img info' 剖析失敗。" - -msgid "'status' must be specified." -msgstr "必須指定 'status'。" - -msgid "'volume_id' must be specified" -msgstr "必須指定 'volume_id'" - -#, python-format -msgid "" -"(Command: %(cmd)s) (Return Code: %(exit_code)s) (Stdout: %(stdout)s) " -"(Stderr: %(stderr)s)" -msgstr "" -"(指令:%(cmd)s)(回覆碼:%(exit_code)s)(標準輸出:%(stdout)s)(標準錯" -"誤:%(stderr)s)" - -#, python-format -msgid "A LUN (HLUN) was not found. (LDEV: %(ldev)s)" -msgstr "找不到 LUN (HLUN)。(LDEV:%(ldev)s)" - -msgid "A concurrent, possibly contradictory, request has been made." -msgstr "已發出並行(可能矛盾)的要求。" - -#, python-format -msgid "" -"A free LUN (HLUN) was not found. Add a different host group. (LDEV: %(ldev)s)" -msgstr "找不到可用的 LUN (HLUN)。請新增不同的主機群組。(LDEV:%(ldev)s)" - -#, python-format -msgid "A host group could not be added. (port: %(port)s, name: %(name)s)" -msgstr "無法新增主機群組。(埠:%(port)s,名稱:%(name)s)" - -#, python-format -msgid "" -"A host group could not be deleted. (port: %(port)s, gid: %(gid)s, name: " -"%(name)s)" -msgstr "無法刪除主機群組。(埠:%(port)s,GID:%(gid)s,名稱:%(name)s)" - -#, python-format -msgid "A host group is invalid. (host group: %(gid)s)" -msgstr "主機群組無效。(主機群組:%(gid)s)" - -#, python-format -msgid "A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)" -msgstr "無法刪除配對。(P-VOL:%(pvol)s,S-VOL:%(svol)s)" - -#, python-format -msgid "" -"A pair could not be created. The maximum number of pair is exceeded. (copy " -"method: %(copy_method)s, P-VOL: %(pvol)s)" -msgstr "" -"無法建立配對。已超出配對數目上限。(複製方法:%(copy_method)s,P-VOL:" -"%(pvol)s)" - -#, python-format -msgid "A parameter is invalid. (%(param)s)" -msgstr "參數無效。(%(param)s)" - -#, python-format -msgid "A parameter value is invalid. (%(meta)s)" -msgstr "參數值無效。(%(meta)s)" - -#, python-format -msgid "A pool could not be found. (pool id: %(pool_id)s)" -msgstr "找不到儲存區。(儲存區 ID:%(pool_id)s)" - -#, python-format -msgid "A snapshot status is invalid. (status: %(status)s)" -msgstr "Snapshot 狀態無效。(狀態:%(status)s)" - -msgid "A valid secondary target MUST be specified in order to failover." -msgstr "必須指定有效的次要目標,才能進行失效接手。" - -msgid "A volume ID or share was not specified." -msgstr "未指定磁區 ID 或共用項目。" - -#, python-format -msgid "A volume status is invalid. (status: %(status)s)" -msgstr "磁區狀態無效。(狀態:%(status)s)" - -#, python-format -msgid "API %(name)s failed with error string %(err)s" -msgstr "API %(name)s 失敗,錯誤字串為 %(err)s" - -#, python-format -msgid "" -"API Version String %(version)s is of invalid format. Must be of format " -"MajorNum.MinorNum." -msgstr "API 版本字串 %(version)s 的格式無效。格式必須是 MajorNum.MinorNum。" - -msgid "API key is missing for CloudByte driver." -msgstr "CloudByte 驅動程式遺漏 API 索引鍵。" - -#, python-format -msgid "API response: %(response)s" -msgstr "API 回應:%(response)s" - -#, python-format -msgid "API response: %s" -msgstr "API 回應:%s" - -#, python-format -msgid "API version %(version)s is not supported on this method." -msgstr "此方法不支援 API %(version)s 版。" - -msgid "API version could not be determined." -msgstr "無法判定 API 版本。" - -msgid "" -"About to delete child projects having non-zero quota. This should not be " -"performed" -msgstr "即將刪除具有非零配額的子項專案。不應執行此動作" - -msgid "Access list not available for public volume types." -msgstr "存取清單不可用於公用磁區類型。" - -msgid "Activate or deactivate QoS error." -msgstr "啟動或關閉服務品質時發生錯誤。" - -msgid "Activate snapshot error." -msgstr "啟動 Snapshot 時發生錯誤。" - -msgid "Add FC port to host error." -msgstr "將 FC 埠新增至主機時發生錯誤。" - -msgid "Add fc initiator to array error." -msgstr "將 FC 起始器新增至陣列時發生錯誤。" - -msgid "Add initiator to array error." -msgstr "將起始器新增至陣列時發生錯誤。" - -msgid "Add lun to cache error." -msgstr "將 LUN 新增至快取時發生錯誤。" - -msgid "Add lun to partition error." -msgstr "將 LUN 新增至分割區時發生錯誤。" - -msgid "Add mapping view error." -msgstr "新增對映視圖時發生錯誤。" - -msgid "Add new host error." -msgstr "新增主機時發生錯誤。" - -msgid "Add port to port group error." -msgstr "將埠新增至埠群組時發生錯誤。" - -#, python-format -msgid "" -"All the specified storage pools to be managed do not exist. Please check " -"your configuration. Non-existent pools: %s" -msgstr "要管理的所有指定儲存區都不存在。請檢查配置。不存在的儲存區:%s" - -msgid "An API version request must be compared to a VersionedMethod object." -msgstr "API 版本要求必須與 VersionedMethod 物件進行比較。" - -msgid "An error has occurred during backup operation" -msgstr "執行備份作業期間發生錯誤" - -#, python-format -msgid "" -"An error occurred during the LUNcopy operation. LUNcopy name: " -"%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " -"%(luncopystate)s." -msgstr "" -"LUNcopy 作業期間發生錯誤。LUNcopy 名稱:%(luncopyname)s。LUNcopy 狀態 " -"(status):%(luncopystatus)s。LUNcopy 狀態 (state):%(luncopystate)s。" - -#, python-format -msgid "An error occurred while reading volume \"%s\"." -msgstr "讀取磁區 \"%s\" 時發生錯誤。" - -#, python-format -msgid "An error occurred while writing to volume \"%s\"." -msgstr "寫入磁區 \"%s\" 時發生錯誤。" - -#, python-format -msgid "An iSCSI CHAP user could not be added. (username: %(user)s)" -msgstr "無法新增 iSCSI CHAP 使用者。(使用者名稱:%(user)s)" - -#, python-format -msgid "An iSCSI CHAP user could not be deleted. (username: %(user)s)" -msgstr "無法刪除 iSCSI CHAP 使用者。(使用者名稱:%(user)s)" - -#, python-format -msgid "" -"An iSCSI target could not be added. (port: %(port)s, alias: %(alias)s, " -"reason: %(reason)s)" -msgstr "" -"無法新增 iSCSI 目標。(埠:%(port)s,別名:%(alias)s,原因:%(reason)s)" - -#, python-format -msgid "" -"An iSCSI target could not be deleted. (port: %(port)s, tno: %(tno)s, alias: " -"%(alias)s)" -msgstr "無法刪除 iSCSI 目標。(埠:%(port)s,TNO:%(tno)s,別名:%(alias)s)" - -msgid "An unknown exception occurred." -msgstr "發生一個未知例外" - -msgid "" -"An user with a token scoped to a subproject is not allowed to see the quota " -"of its parents." -msgstr "不容許具備已限定為某個子專案之記號的使用者來查看其母項的配額。" - -msgid "Append port group description error." -msgstr "附加埠群組說明時發生錯誤。" - -#, python-format -msgid "" -"Applying the zones and cfgs to the switch failed (error code=%(err_code)s " -"error msg=%(err_msg)s." -msgstr "" -"將區域和配置套用至交換器失敗(錯誤碼 = %(err_code)s,錯誤訊息 = " -"%(err_msg)s)。" - -#, python-format -msgid "Array does not exist or is offline. Current status of array is %s." -msgstr "陣列不存在或者已離線。陣列的現行狀態為 %s。" - -msgid "Associate host to hostgroup error." -msgstr "將主機關聯至主機群組時發生錯誤。" - -msgid "Associate host to mapping view error." -msgstr "將主機關聯至對映視圖時發生錯誤。" - -msgid "Associate initiator to host error." -msgstr "將起始器關聯至主機時發生錯誤。" - -msgid "Associate lun to QoS error." -msgstr "將 LUN 與服務品質建立關聯時發生錯誤。" - -msgid "Associate lun to lungroup error." -msgstr "將 LUN 關聯至 LUN 群組時發生錯誤。" - -msgid "Associate lungroup to mapping view error." -msgstr "將 LUN 群組關聯至對映視圖時發生錯誤。" - -msgid "Associate portgroup to mapping view error." -msgstr "將埠群組關聯至對映視圖時發生錯誤。" - -msgid "At least one valid iSCSI IP address must be set." -msgstr "必須至少設定一個有效的 iSCSI IP 位址。" - -#, python-format -msgid "Attempt to transfer %s with invalid auth key." -msgstr "嘗試傳送 %s,但使用的鑑別金鑰無效。" - -#, python-format -msgid "Auth group [%s] details not found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到鑑別群組 [%s] 詳細資料。" - -msgid "Auth user details not found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到鑑別使用者詳細資料。" - -#, python-format -msgid "Authentication failed, verify the switch credentials, error code %s." -msgstr "鑑別失敗,請驗證交換器認證,錯誤碼:%s。" - -#, python-format -msgid "Availability zone '%(s_az)s' is invalid." -msgstr "可用性區域 '%(s_az)s' 無效。" - -msgid "Available categories:" -msgstr "可用的種類:" - -msgid "" -"Back-end QoS specs are not supported on this storage family and ONTAP " -"version." -msgstr "後端服務品質規格在此儲存體系列和 ONTAP 版本上不受支援。" - -#, python-format -msgid "Backend doesn't exist (%(backend)s)" -msgstr "後端不存在 (%(backend)s)" - -#, python-format -msgid "Backend reports: %(message)s" -msgstr "後端報告:%(message)s" - -msgid "Backend reports: item already exists" -msgstr "後端報告:項目已存在" - -msgid "Backend reports: item not found" -msgstr "後端報告:找不到項目" - -#, python-format -msgid "Backend service retry timeout hit: %(timeout)s sec" -msgstr "後端服務重試逾時命中:%(timeout)s 秒" - -msgid "Backend storage did not configure fiber channel target." -msgstr "後端儲存體未配置光纖通道目標。" - -msgid "Backing up an in-use volume must use the force flag." -msgstr "備份使用中的磁區必須使用強制旗標。" - -#, python-format -msgid "Backup %(backup_id)s could not be found." -msgstr "找不到備份 %(backup_id)s。" - -msgid "Backup RBD operation failed" -msgstr "執行備份 RBD 作業時失敗" - -msgid "Backup already exists in database." -msgstr "備份已經存在於資料庫中。" - -#, python-format -msgid "Backup driver reported an error: %(message)s" -msgstr "備份驅動程式報告了錯誤:%(message)s" - -msgid "Backup id required" -msgstr "需要備份 ID" - -msgid "Backup is not supported for GlusterFS volumes with snapshots." -msgstr "具有 Snapshot 的 GlusterFS 磁區不支援備份。" - -msgid "Backup is only supported for SOFS volumes without backing file." -msgstr "僅支援備份沒有支援檔案的 SOFS 磁區。" - -msgid "Backup is only supported for raw-formatted GlusterFS volumes." -msgstr "原始格式的 GlusterFS 磁區僅支援備份。" - -msgid "Backup is only supported for raw-formatted SOFS volumes." -msgstr "僅支援備份原始格式的 SOFS 磁區。" - -msgid "Backup operation of an encrypted volume failed." -msgstr "已加密磁區的備份作業失敗。" - -#, python-format -msgid "" -"Backup service %(configured_service)s does not support verify. Backup id " -"%(id)s is not verified. Skipping verify." -msgstr "" -"備份服務 %(configured_service)s 不支援驗證。未驗證備份 ID%(id)s。正在跳過驗" -"證。" - -#, python-format -msgid "" -"Backup service %(service)s does not support verify. Backup id %(id)s is not " -"verified. Skipping reset." -msgstr "備份服務 %(service)s 不支援驗證。未驗證備份 ID %(id)s。正在跳過重設。" - -#, python-format -msgid "Backup should only have one snapshot but instead has %s" -msgstr "備份應該只有一個 Snapshot,但具有 %s 個" - -msgid "Backup status must be available" -msgstr "備份狀態必須為可用" - -#, python-format -msgid "Backup status must be available and not %s." -msgstr "備份狀態必須為可用,且不是 %s。" - -msgid "Backup status must be available or error" -msgstr "備份狀態必須為可用或者錯誤" - -msgid "Backup to be restored has invalid size" -msgstr "要還原的備份大小無效" - -#, python-format -msgid "Bad Status line returned: %(arg)s." -msgstr "傳回了不當的狀態行:%(arg)s。" - -#, python-format -msgid "Bad key(s) in quota set: %s" -msgstr "配額集中的索引鍵錯誤:%s" - -#, python-format -msgid "" -"Bad or unexpected response from the storage volume backend API: %(data)s" -msgstr "來自儲存磁區後端 API 的回應錯誤或不符合預期:%(data)s" - -#, python-format -msgid "Bad project format: project is not in proper format (%s)" -msgstr "專案格式不當:專案未採取適當格式 (%s)" - -msgid "Bad response from Datera API" -msgstr "Datera API 傳回錯誤的回應" - -msgid "Bad response from SolidFire API" -msgstr "SolidFire API 傳回錯誤的回應" - -#, python-format -msgid "Bad response from XMS, %s" -msgstr "XMS 傳回錯誤的回應,%s" - -msgid "Binary" -msgstr "二進位" - -msgid "Blank components" -msgstr "空白元件" - -msgid "Blockbridge api host not configured" -msgstr "未配置 Blockbridge API 主機" - -#, python-format -msgid "Blockbridge configured with invalid auth scheme '%(auth_scheme)s'" -msgstr "已使用無效的鑑別方法 '%(auth_scheme)s' 配置了 Blockbridge" - -msgid "Blockbridge default pool does not exist" -msgstr "Blockbridge 預設儲存區不存在" - -msgid "" -"Blockbridge password not configured (required for auth scheme 'password')" -msgstr "未配置 Blockbridge 密碼(鑑別方法「密碼」所需的項目)" - -msgid "Blockbridge pools not configured" -msgstr "未配置 Blockbridge 儲存區" - -msgid "Blockbridge token not configured (required for auth scheme 'token')" -msgstr "未配置 Blockbridge 記號(鑑別方法「記號」所需的項目)" - -msgid "Blockbridge user not configured (required for auth scheme 'password')" -msgstr "未配置 Blockbridge 使用者(鑑別方法「密碼」所需的項目)" - -#, python-format -msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Brocade 光纖通道分區 CLI 錯誤:%(reason)s" - -#, python-format -msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" -msgstr "Brocade 光纖通道分區 HTTP 錯誤:%(reason)s" - -msgid "CHAP secret should be 12-16 bytes." -msgstr "CHAP 密碼應該是 12-16 位元組。" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"CLI 異常狀況輸出:\n" -" 指令:%(cmd)s\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"CLI Exception output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"CLI 異常狀況輸出:\n" -" 指令:%(cmd)s\n" -" 標準輸出:%(out)s\n" -"標準錯誤:%(err)s。" - -msgid "" -"CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " -"already mapped to a host.\n" -"\"" -msgstr "" -"CMMVC6071E 未建立 VDisk 至主機對映,因為該 VDisk 已經對映至某個主機。\n" -"\"" - -msgid "CONCERTO version is not supported" -msgstr "CONCERTO 版本不受支援" - -#, python-format -msgid "CPG (%s) doesn't exist on array" -msgstr "陣列上不存在 CPG (%s)" - -msgid "Cache name is None, please set smartcache:cachename in key." -msgstr "快取名稱為「無」,請在索引鍵中設定 smartcache:cachename。" - -#, python-format -msgid "Cache volume %s does not have required properties" -msgstr "快取磁區 %s 沒有必需的內容" - -msgid "Call returned a None object" -msgstr "呼叫傳回了 None 物件。" - -msgid "Can not add FC port to host." -msgstr "無法將 FC 埠新增至主機。" - -#, python-format -msgid "Can not find cache id by cache name %(name)s." -msgstr "依快取名稱 %(name)s 找不到快取 ID。" - -#, python-format -msgid "Can not find partition id by name %(name)s." -msgstr "依名稱 %(name)s 找不到分割區 ID。" - -#, python-format -msgid "Can not get pool info. pool: %s" -msgstr "無法取得儲存區資訊。儲存區:%s" - -#, python-format -msgid "Can not translate %s to integer." -msgstr "無法將 %s 轉換為整數。" - -#, python-format -msgid "Can't access 'scality_sofs_config': %s" -msgstr "無法存取 'scality_sofs_config':%s" - -msgid "Can't decode backup record." -msgstr "無法將備份記錄解碼。" - -#, python-format -msgid "Can't extend replication volume, volume: %(id)s" -msgstr "無法延伸抄寫磁區,磁區:%(id)s" - -msgid "Can't find LUN on the array, please check the source-name or source-id." -msgstr "在陣列上找不到 LUN,請檢查 source-name 或 source-id。" - -#, python-format -msgid "Can't find cache name on the array, cache name is: %(name)s." -msgstr "在陣列上找不到快取名稱,快取名稱為:%(name)s。" - -#, python-format -msgid "Can't find lun info on the array. volume: %(id)s, lun name: %(name)s." -msgstr "在陣列上找不到 LUN 資訊,磁區:%(id)s,LUN 名稱:%(name)s。" - -#, python-format -msgid "Can't find partition name on the array, partition name is: %(name)s." -msgstr "在陣列上找不到分割區名稱,分割區名稱為:%(name)s。" - -#, python-format -msgid "Can't find service: %s" -msgstr "找不到服務:%s" - -msgid "" -"Can't find snapshot on array, please check the source-name or source-id." -msgstr "在陣列上找不到 Snapshot,請檢查 source-name 或 source-id。" - -msgid "Can't find the same host id from arrays." -msgstr "從陣列中找不到相同的主機 ID。" - -#, python-format -msgid "Can't get volume id from snapshot, snapshot: %(id)s" -msgstr "無法從 Snapshot 取得磁區 ID,Snapshot:%(id)s" - -#, python-format -msgid "Can't get volume id. Volume name: %s." -msgstr "無法取得磁區 ID。磁區名稱:%s。" - -#, python-format -msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." -msgstr "無法將 LUN %(lun_id)s 匯入 Cinder。LUN 類型不符。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 HyperMetroPair 中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 LUN 複製作業中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於某個 LUN 群組中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於某個 LUN 鏡映中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於 SplitMirror 中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. Already exists in a migration task." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於移轉作業中。" - -#, python-format -msgid "" -"Can't import LUN %s to Cinder. Already exists in a remote replication task." -msgstr "無法將 LUN %s 匯入 Cinder。它已經存在於遠端抄寫作業中。" - -#, python-format -msgid "Can't import LUN %s to Cinder. LUN status is not normal." -msgstr "無法將 LUN %s 匯入 Cinder。LUN 未處於正常狀態。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." -msgstr "無法將 Snapshot %s 匯入 Cinder。Snapshot 不屬於磁區。" - -#, python-format -msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." -msgstr "無法將 Snapshot %s 匯入 Cinder。已將 Snapshot 向起始器公開。" - -#, python-format -msgid "" -"Can't import snapshot %s to Cinder. Snapshot status is not normal or running " -"status is not online." -msgstr "" -"無法將 Snapshot %s 匯入 Cinder。Snapshot 未處於正常狀態,或者執行中狀態不在線" -"上。" - -msgid "Can't parse backup record." -msgstr "無法剖析備份記錄。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"has no volume type." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區沒有磁區類" -"型。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because it " -"is already in consistency group %(orig_group)s." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區已經位於一" -"致性群組 %(orig_group)s 中。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume cannot be found." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為找不到該磁區。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume does not exist." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為該磁區不存在。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume is in an invalid state: %(status)s. Valid states are: %(valid)s." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區處於無效的狀" -"態:%(status)s。有效的狀態為:%(valid)s。" - -#, python-format -msgid "" -"Cannot add volume %(volume_id)s to consistency group %(group_id)s because " -"volume type %(volume_type)s is not supported by the group." -msgstr "" -"無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為此群組不支援磁區" -"類型 %(volume_type)s。" - -#, python-format -msgid "" -"Cannot attach already attached volume %s; multiattach is disabled via the " -"'netapp_enable_multiattach' configuration option." -msgstr "" -"無法連接已經連接的磁區 %s;已透過'netapp_enable_multiattach' 配置選項停用了多" -"重連接。" - -msgid "Cannot change VF context in the session." -msgstr "無法變更階段作業中的 VF 環境定義。" - -#, python-format -msgid "" -"Cannot change VF context, specified VF is not available in the manageable VF " -"list %(vf_list)s." -msgstr "" -"無法變更 VF 環境定義,在可管理的 VF 清單 %(vf_list)s 中無法使用指定的 VF。" - -msgid "Cannot connect to ECOM server." -msgstr "無法連接至 ECOM 伺服器。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because snapshot %(snap)s is not " -"in a valid state. Valid states are: %(valid)s." -msgstr "" -"無法建立一致性群組 %(group)s,因為 Snapshot %(snap)s 不是處於有效的狀態。有效" -"的狀態為:%(valid)s。" - -#, python-format -msgid "" -"Cannot create consistency group %(group)s because source volume " -"%(source_vol)s is not in a valid state. Valid states are: %(valid)s." -msgstr "" -"無法建立一致性群組 %(group)s,因為來源磁區%(source_vol)s 未處於有效狀態。有效" -"狀態為:%(valid)s。" - -#, python-format -msgid "Cannot create directory %s." -msgstr "無法建立目錄 %s。" - -msgid "Cannot create encryption specs. Volume type in use." -msgstr "無法建立加密規格。磁區類型在使用中。" - -#, python-format -msgid "" -"Cannot create image of disk format: %s. Only vmdk disk format is accepted." -msgstr "無法建立磁碟格式為 %s 的映像檔。僅接受 VMDK 磁碟格式。" - -#, python-format -msgid "Cannot create masking view: %(maskingViewName)s. " -msgstr "無法建立遮罩視圖:%(maskingViewName)s。" - -#, python-format -msgid "" -"Cannot create more than %(req)s volumes on the ESeries array when " -"'netapp_enable_multiattach' is set to true." -msgstr "" -"在下列情況下,無法在 E 系列陣列上建立超過 %(req)s 個磁" -"區:'netapp_enable_multiattach' 設定為 true。" - -#, python-format -msgid "Cannot create or find an storage group with name %(sgGroupName)s." -msgstr "無法建立或找不到名稱為 %(sgGroupName)s 的儲存體群組。" - -#, python-format -msgid "Cannot create volume of size %s: not multiple of 8GB." -msgstr "無法建立大小為 %s 的磁區:不是 8 GB 的倍數。" - -#, python-format -msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" -msgstr "無法使用名稱 %(name)s 及規格 %(extra_specs)s 來建立 volume_type" - -#, python-format -msgid "Cannot delete LUN %s while snapshots exist." -msgstr "存在 Snapshot 時,無法刪除 LUN %s。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)d volume instances." -msgstr "" -"無法刪除快取磁區:%(cachevol_name)s。該快取磁區已在 %(updated_at)s 得到更新," -"且目前具有 %(numclones)d 個磁區實例。" - -#, python-format -msgid "" -"Cannot delete cache volume: %(cachevol_name)s. It was updated at " -"%(updated_at)s and currently has %(numclones)s volume instances." -msgstr "" -"無法刪除快取磁區:%(cachevol_name)s。該快取磁區已在 %(updated_at)s 得到更新," -"且目前具有 %(numclones)s 個磁區實例。" - -msgid "Cannot delete encryption specs. Volume type in use." -msgstr "無法刪除加密規格。磁區類型在使用中。" - -msgid "Cannot determine storage pool settings." -msgstr "無法判定儲存區設定。" - -msgid "Cannot execute /sbin/mount.sofs" -msgstr "無法執行 /sbin/mount.sofs" - -#, python-format -msgid "Cannot find CG group %s." -msgstr "找不到 CG 群組 %s。" - -#, python-format -msgid "" -"Cannot find Controller Configuration Service for storage system " -"%(storage_system)s." -msgstr "找不到下列儲存體系統的「控制器配置服務」:%(storage_system)s。" - -#, python-format -msgid "Cannot find Replication Service to create volume for snapshot %s." -msgstr "找不到「抄寫服務」,無法建立 Snapshot %s 的磁區。" - -#, python-format -msgid "Cannot find Replication Service to delete snapshot %s." -msgstr "找不到「抄寫服務」以刪除 Snapshot %s。" - -#, python-format -msgid "Cannot find Replication service on system %s." -msgstr "在系統 %s 上找不到「抄寫服務」。" - -#, python-format -msgid "Cannot find Volume: %(id)s. unmanage operation. Exiting..." -msgstr "找不到磁區:%(id)s。取消管理作業。正在結束..." - -#, python-format -msgid "Cannot find Volume: %(volumename)s. Extend operation. Exiting...." -msgstr "找不到磁區:%(volumename)s。「延伸」作業。正在結束..." - -#, python-format -msgid "Cannot find device number for volume %(volumeName)s." -msgstr "找不到磁區 %(volumeName)s 的裝置號碼。" - -msgid "Cannot find migration task." -msgstr "找不到移轉作業。" - -#, python-format -msgid "Cannot find replication service on system %s." -msgstr "在系統 %s 上找不到抄寫服務。" - -#, python-format -msgid "Cannot find source CG instance. consistencygroup_id: %s." -msgstr "找不到來源 CG 實例。consistencygroup_id:%s。" - -#, python-format -msgid "Cannot get mcs_id by channel id: %(channel_id)s." -msgstr "無法依下列通道 ID 取得 mcs_id:%(channel_id)s。" - -msgid "Cannot get necessary pool or storage system information." -msgstr "無法取得必要的儲存區或儲存體系統資訊。" - -#, python-format -msgid "" -"Cannot get or create a storage group: %(sgGroupName)s for volume " -"%(volumeName)s " -msgstr "無法取得或建立下列磁區的儲存體群組 %(sgGroupName)s:%(volumeName)s " - -#, python-format -msgid "Cannot get or create initiator group: %(igGroupName)s. " -msgstr "無法取得或建立起始器群組:%(igGroupName)s。" - -#, python-format -msgid "Cannot get port group: %(pgGroupName)s. " -msgstr "無法取得埠群組:%(pgGroupName)s。" - -#, python-format -msgid "" -"Cannot get storage group: %(sgGroupName)s from masking view " -"%(maskingViewInstanceName)s. " -msgstr "" -"無法從下列遮罩視圖取得儲存體群組 %(sgGroupName)s:" -"%(maskingViewInstanceName)s。" - -#, python-format -msgid "" -"Cannot get supported size range for %(sps)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "無法取得 %(sps)s 的受支援大小範圍。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Cannot get the default storage group for FAST policy: %(fastPolicyName)s." -msgstr "無法取得 FAST 原則 %(fastPolicyName)s 的預設儲存體群組。" - -msgid "Cannot get the portgroup from the masking view." -msgstr "無法取得遮罩視圖中的埠群組。" - -msgid "Cannot mount Scality SOFS, check syslog for errors" -msgstr "無法裝載 Scality SOFS,請檢查系統日誌以找出錯誤" - -msgid "Cannot ping DRBDmanage backend" -msgstr "無法對 DRBDmanage 後端進行連線測試" - -#, python-format -msgid "Cannot place volume %(id)s on %(host)s" -msgstr "無法將磁區 %(id)s 置於 %(host)s 上" - -#, python-format -msgid "" -"Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " -"group %(name)s from source." -msgstr "" -"無法同時提供 'cgsnapshot_id' 和 'source_cgid',以從來源建立一致性群組 " -"%(name)s。" - -msgid "Cannot register resource" -msgstr "無法登錄資源" - -msgid "Cannot register resources" -msgstr "無法登錄資源" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because it is not in the group." -msgstr "" -"無法將磁區 %(volume_id)s 從一致性群組 %(group_id)s 中移除,因為該磁區不在此群" -"組中。" - -#, python-format -msgid "" -"Cannot remove volume %(volume_id)s from consistency group %(group_id)s " -"because volume is in an invalid state: %(status)s. Valid states are: " -"%(valid)s." -msgstr "" -"無法將磁區 %(volume_id)s 從一致性群組 %(group_id)s 中移除,因為磁區處於無效的" -"狀態:%(status)s。有效的狀態為:%(valid)s。" - -#, python-format -msgid "Cannot retype from HPE3PARDriver to %s." -msgstr "HPE3PARDriver 無法透過執行 Retype 動作變為 %s。" - -msgid "Cannot retype from one 3PAR array to another." -msgstr "一個 3PAR 陣列無法透過執行 Retype 動作變為另一個陣列。" - -msgid "Cannot retype to a CPG in a different domain." -msgstr "無法執行 Retype 動作,以變為不同網域中的 CPG。" - -msgid "Cannot retype to a snap CPG in a different domain." -msgstr "無法執行 Retype 動作,以變為不同網域中的 snapCPG。" - -msgid "" -"Cannot run vgc-cluster command, please ensure software is installed and " -"permissions are set properly." -msgstr "" -"無法執行 vgc-cluster 指令,請確保已安裝了軟體,並且已經妥善設定許可權。" - -msgid "Cannot set both hitachi_serial_number and hitachi_unit_name." -msgstr "無法設定 hitachi_serial_number 及 hitachi_unit_name。" - -msgid "Cannot specify both protection domain name and protection domain id." -msgstr "不能同時指定保護網域名稱及保護網域 ID。" - -msgid "Cannot specify both storage pool name and storage pool id." -msgstr "不能同時指定儲存區名稱及儲存區 ID。" - -#, python-format -msgid "" -"Cannot update consistency group %(group_id)s because no valid name, " -"description, add_volumes, or remove_volumes were provided." -msgstr "" -"無法更新一致性群組 %(group_id)s,因為未提供有效的名稱、說明、add_volumes 或 " -"remove_volumes。" - -msgid "Cannot update encryption specs. Volume type in use." -msgstr "無法更新加密規格。磁區類型在使用中。" - -#, python-format -msgid "Cannot update volume_type %(id)s" -msgstr "無法更新 volume_type %(id)s" - -#, python-format -msgid "Cannot verify the existence of object:%(instanceName)s." -msgstr "無法驗證物件 %(instanceName)s 是否存在。" - -#, python-format -msgid "CgSnapshot %(cgsnapshot_id)s could not be found." -msgstr "找不到 CgSnapshot %(cgsnapshot_id)s。" - -msgid "Cgsnahost is empty. No consistency group will be created." -msgstr "Cgsnahost 是空的。將不會建立一致性群組。" - -msgid "Change hostlun id error." -msgstr "變更主機 LUN ID 時發生錯誤。" - -msgid "Change lun priority error." -msgstr "變更 LUN 優先順序時發生錯誤。" - -msgid "Change lun smarttier policy error." -msgstr "變更 LUN smarttier 原則時發生錯誤。" - -#, python-format -msgid "" -"Change would make usage less than 0 for the following resources: %(unders)s" -msgstr "變更會使下列資源的用量小於 0:%(unders)s" - -msgid "Check access permissions for the ZFS share assigned to this driver." -msgstr "請檢查指派給此驅動程式之 ZFS 共用的存取權限。" - -msgid "Check hostgroup associate error." -msgstr "檢查主機群組關聯時發生錯誤。" - -msgid "Check initiator added to array error." -msgstr "檢查已新增至陣列的起始器時發生錯誤。" - -msgid "Check initiator associated to host error." -msgstr "檢查與主機相關聯的起始器時發生錯誤。" - -msgid "Check lungroup associate error." -msgstr "檢查 LUN 群組關聯時發生錯誤。" - -msgid "Check portgroup associate error." -msgstr "檢查埠群組關聯時發生錯誤。" - -msgid "" -"Check the state of the http service. Also ensure that the https port number " -"is the same as the one specified in cinder.conf." -msgstr "" -"請檢查 HTTP 服務的狀態。同時確保 HTTPS 埠號與 cinder.conf 中指定的埠號相同。" - -msgid "Chunk size is not multiple of block size for creating hash." -msgstr "片段大小不是用於建立雜湊之區塊大小的倍數。" - -#, python-format -msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" -msgstr "Cisco 光纖通道分區 CLI 錯誤:%(reason)s" - -#, python-format -msgid "Clone feature is not licensed on %(storageSystem)s." -msgstr "複製功能未在 %(storageSystem)s 上獲得授權。" - -#, python-format -msgid "" -"Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " -"and '%(linked_clone)s'." -msgstr "" -"複製類型 '%(clone_type)s' 無效;有效值為:'%(full_clone)s' 和 " -"'%(linked_clone)s'。" - -msgid "" -"Cluster is not formatted. You should probably perform \"dog cluster format\"." -msgstr "叢集未格式化。您可能應該執行 \"dog cluster format\"。" - -#, python-format -msgid "Coho Data Cinder driver failure: %(message)s" -msgstr "Coho Data Cinder 驅動程式失敗:%(message)s" - -msgid "Coho rpc port is not configured" -msgstr "未配置 Coho RPC 埠" - -#, python-format -msgid "Command %(cmd)s blocked in the CLI and was cancelled" -msgstr "指令 %(cmd)s 在 CLI 中遭到封鎖,且已取消" - -#, python-format -msgid "CommandLineHelper._wait_for_condition: %s timeout." -msgstr "CommandLineHelper._wait_for_condition:%s 逾時。" - -msgid "Compression Enabler is not installed. Can not create compressed volume." -msgstr "未安裝「壓縮啟用程式」。無法建立已壓縮的磁區。" - -#, python-format -msgid "Compute cluster: %(cluster)s not found." -msgstr "找不到計算叢集:%(cluster)s。" - -msgid "Condition has no field." -msgstr "條件不含任何欄位。" - -#, python-format -msgid "Config 'max_over_subscription_ratio' invalid. Must be > 0: %s" -msgstr "配置 'max_over_subscription_ratio' 無效。必需大於 0:%s" - -msgid "Configuration error: dell_sc_ssn not set." -msgstr "配置錯誤:未設定 dell_sc_ssn。" - -#, python-format -msgid "Configuration file %(configurationFile)s does not exist." -msgstr "配置檔 %(configurationFile)s 不存在。" - -msgid "Configuration is not found." -msgstr "找不到配置。" - -#, python-format -msgid "Configuration value %s is not set." -msgstr "未設定配置值 %s。" - -#, python-format -msgid "" -"Conflicting QoS specifications in volume type %s: when QoS spec is " -"associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " -"in the volume type extra specs." -msgstr "" -"磁區類型 %s 中存在衝突的服務品質規格:將服務品質規格關聯至磁區類型時,不容許" -"在磁區類型額外規格中使用舊式 \"netapp:qos_policy_group\"。" - -#, python-format -msgid "Connection to glance failed: %(reason)s" -msgstr "Glance 連線失敗:%(reason)s" - -#, python-format -msgid "Connection to swift failed: %(reason)s" -msgstr "Swift 連線失敗:%(reason)s" - -#, python-format -msgid "Connector does not provide: %s" -msgstr "連接器未提供:%s" - -#, python-format -msgid "Connector doesn't have required information: %(missing)s" -msgstr "連接器沒有必要資訊:%(missing)s" - -msgid "Consistency group is empty. No cgsnapshot will be created." -msgstr "一致性群組是空的。將不建立 CgSnapshot。" - -#, python-format -msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." -msgstr "找不到 ConsistencyGroup %(consistencygroup_id)s。" - -msgid "Container" -msgstr "容器" - -msgid "Container size smaller than required file size." -msgstr "儲存器大小小於必要的檔案大小。" - -msgid "Content type not supported." -msgstr "內容類型不受支援。" - -#, python-format -msgid "Controller Configuration Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「控制器配置服務」。" - -#, python-format -msgid "Controller IP '%(host)s' could not be resolved: %(e)s." -msgstr "無法解析控制器 IP '%(host)s':%(e)s。" - -#, python-format -msgid "Converted to %(f1)s, but format is now %(f2)s" -msgstr "已轉換為 %(f1)s,但格式現在為 %(f2)s" - -#, python-format -msgid "Converted to %(vol_format)s, but format is now %(file_format)s" -msgstr "已轉換為 %(vol_format)s,但格式現在為 %(file_format)s" - -#, python-format -msgid "Converted to raw, but format is now %s" -msgstr "已轉換為原始,但格式現在為 %s" - -#, python-format -msgid "Converted to raw, but format is now %s." -msgstr "已轉換為原始,但格式現在為 %s。" - -msgid "Coordinator uninitialized." -msgstr "協調者未起始設定。" - -#, python-format -msgid "" -"Copy volume task failed: convert_to_base_volume: id=%(id)s, status=" -"%(status)s." -msgstr "" -"複製磁區作業失敗:convert_to_base_volume:ID = %(id)s、狀態 = %(status)s。" - -#, python-format -msgid "" -"Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." -msgstr "" -"複製磁區作業失敗:create_cloned_volume ID = %(id)s,狀態 = %(status)s。" - -#, python-format -msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." -msgstr "正在將 meta 資料從 %(src_type)s %(src_id)s 複製到 %(vol_id)s。" - -msgid "" -"Could not determine which Keystone endpoint to use. This can either be set " -"in the service catalog or with the cinder.conf config option " -"'backup_swift_auth_url'." -msgstr "" -"無法判定要使用的 Keystone 端點。這可以在服務型錄中進行設定,也可以使用 " -"cinder.conf 配置選項 'backup_swift_auth_url' 進行設定。" - -msgid "" -"Could not determine which Swift endpoint to use. This can either be set in " -"the service catalog or with the cinder.conf config option 'backup_swift_url'." -msgstr "" -"無法判定要使用的 Swift 端點。這可以在服務型錄中進行設定,也可以使用 cinder." -"conf 配置選項 'backup_swift_url' 進行設定。" - -msgid "Could not find DISCO wsdl file." -msgstr "找不到 DISCO WSDL 檔。" - -#, python-format -msgid "Could not find GPFS cluster id: %s." -msgstr "找不到 GPFS 叢集 ID:%s。" - -#, python-format -msgid "Could not find GPFS file system device: %s." -msgstr "找不到 GPFS 檔案系統裝置:%s。" - -#, python-format -msgid "Could not find config at %(path)s" -msgstr "在 %(path)s 處找不到配置" - -#, python-format -msgid "Could not find iSCSI export for volume %s" -msgstr "找不到磁區 %s 的 iSCSI 匯出" - -#, python-format -msgid "Could not find iSCSI target for volume: %(volume_id)s." -msgstr "找不到磁區 %(volume_id)s 的 iSCSI 目標。" - -#, python-format -msgid "Could not find key in output of command %(cmd)s: %(out)s." -msgstr "在指令 %(cmd)s 的輸出 %(out)s 中找不到索引鍵。" - -#, python-format -msgid "Could not find parameter %(param)s" -msgstr "找不到參數 %(param)s" - -#, python-format -msgid "Could not find target %s" -msgstr "找不到目標 %s" - -#, python-format -msgid "Could not find the parent volume for Snapshot '%s' on array." -msgstr "在陣列上,找不到 Snapshot '%s' 的母項磁區。" - -#, python-format -msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." -msgstr "在磁區 %(vol)s 上找不到唯一的 Snapshot %(snap)s。" - -msgid "Could not get system name." -msgstr "無法取得系統名稱。" - -#, python-format -msgid "Could not load paste app '%(name)s' from %(path)s" -msgstr "無法從 %(path)s 載入 paste 應用程式 '%(name)s'" - -#, python-format -msgid "" -"Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " -"%(reason)s" -msgstr "無法讀取 Snapshot %(name)s 的資訊。程式碼:%(code)s。原因:%(reason)s" - -#, python-format -msgid "Could not restore configuration file %(file_path)s: %(exc)s" -msgstr "無法還原配置檔 %(file_path)s:%(exc)s" - -#, python-format -msgid "Could not save configuration to %(file_path)s: %(exc)s" -msgstr "無法將配置儲存至 %(file_path)s:%(exc)s" - -#, python-format -msgid "Could not start consistency group snapshot %s." -msgstr "無法啟動一致性群組 Snapshot %s。" - -#, python-format -msgid "Counter %s not found" -msgstr "找不到計數器 %s" - -msgid "Create QoS policy error." -msgstr "建立服務品質原則時發生錯誤。" - -#, python-format -msgid "" -"Create backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"已中止建立備份,預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" - -#, python-format -msgid "" -"Create backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"已中斷建立備份,預期磁區狀態 %(expected_status)s,但取得 %(actual_status)s。" - -msgid "Create export for volume failed." -msgstr "針對磁區建立匯出失敗。" - -msgid "Create hostgroup error." -msgstr "建立主機群組時發生錯誤。" - -#, python-format -msgid "Create hypermetro error. %s." -msgstr "建立 hypermetro 時發生錯誤。%s。" - -msgid "Create lun error." -msgstr "建立 LUN 時發生錯誤。" - -msgid "Create lun migration error." -msgstr "建立 LUN 移轉時發生錯誤。" - -msgid "Create luncopy error." -msgstr "建立 LUNcopy 時發生錯誤。" - -msgid "Create lungroup error." -msgstr "建立 LUN 群組時發生錯誤。" - -msgid "Create manager volume flow failed." -msgstr "建立管理程式磁區流程失敗。" - -msgid "Create port group error." -msgstr "建立埠群組時發生錯誤。" - -msgid "Create replication error." -msgstr "建立抄寫時發生錯誤。" - -#, python-format -msgid "Create replication pair failed. Error: %s." -msgstr "建立抄寫配對失敗。錯誤:%s。" - -msgid "Create snapshot error." -msgstr "建立 Snapshot 時發生錯誤。" - -#, python-format -msgid "Create volume error. Because %s." -msgstr "建立磁區時發生錯誤。因為 %s。" - -msgid "Create volume failed." -msgstr "建立磁區失敗。" - -msgid "Creating a consistency group from a source is not currently supported." -msgstr "目前,不支援從來源建立一致性群組。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(cfg_name)s error=" -"%(err)s)." -msgstr "建立及啟動區域集時失敗:(區域集 = %(cfg_name)s 錯誤 = %(err)s)。" - -#, python-format -msgid "" -"Creating and activating zone set failed: (Zone set=%(zoneset)s error=" -"%(err)s)." -msgstr "建立及啟動區域集時失敗:(區域集 = %(zoneset)s 錯誤 = %(err)s)。" - -#, python-format -msgid "Creating usages for %(begin_period)s until %(end_period)s" -msgstr "正在建立從 %(begin_period)s 至 %(end_period)s 的使用情形" - -msgid "Current host isn't part of HGST domain." -msgstr "現行主機不是 HGST 網域的一部分。" - -#, python-format -msgid "" -"Current host not valid for volume %(id)s with type %(type)s, migration not " -"allowed" -msgstr "現行主機不適用於類型為 %(type)s 的磁區 %(id)s,不容許移轉" - -#, python-format -msgid "" -"Currently mapped host for volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "磁區 %(vol)s 目前對映的主機位於不受支援的主機群組%(group)s 中。" - -#, python-format -msgid "" -"DRBDmanage driver error: expected key \"%s\" not in answer, wrong DRBDmanage " -"version?" -msgstr "" -"DRBDmanage 驅動程式錯誤:應答中沒有預期的索引鍵 \"%s\",DRBDmanage 版本是否錯" -"誤?" - -msgid "" -"DRBDmanage driver setup error: some required libraries (dbus, drbdmanage.*) " -"not found." -msgstr "" -"DRBDmanage 驅動程式設定錯誤:找不到部分必要程式庫(dbus、drbdmanage.*)。" - -#, python-format -msgid "DRBDmanage expected one resource (\"%(res)s\"), got %(n)d" -msgstr "DRBDmanage 預期一個資源 (\"%(res)s\"),但卻取得 %(n)d" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for new volume after snapshot restore; resource " -"\"%(res)s\", volume \"%(vol)s\"" -msgstr "" -"在 Snapshot 還原之後等待新磁區時,DRBDmanage 逾時;資源 \"%(res)s\",磁區 " -"\"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for snapshot creation; resource \"%(res)s\", " -"snapshot \"%(sn)s\"" -msgstr "" -"等待建立 Snapshot 時,DRBDmanage 逾時;資源 \"%(res)s\",Snapshot \"%(sn)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume creation; resource \"%(res)s\", volume " -"\"%(vol)s\"" -msgstr "等待建立磁區時,DRBDmanage 逾時;資源 \"%(res)s\",磁區 \"%(vol)s\"" - -#, python-format -msgid "" -"DRBDmanage timeout waiting for volume size; volume ID \"%(id)s\" (res " -"\"%(res)s\", vnr %(vnr)d)" -msgstr "" -"等待磁區大小時,DRBDmanage 逾時;磁區 ID \"%(id)s\"(資源 \"%(res)s\",VNR " -"%(vnr)d)" - -msgid "Data ONTAP API version could not be determined." -msgstr "無法判定資料 ONTAP API 版本。" - -msgid "Data ONTAP operating in 7-Mode does not support QoS policy groups." -msgstr "在 7 模式下運作的資料 ONTAP 不支援服務品質原則群組。" - -msgid "Database schema downgrade is not allowed." -msgstr "不容許將資料庫綱目降級。" - -#, python-format -msgid "Dataset %s is not shared in Nexenta Store appliance" -msgstr "資料集 %s 未在「Nexenta 儲存庫」軟體驅動裝置中共用" - -#, python-format -msgid "Dataset group %s not found at Nexenta SA" -msgstr "在 Nexenta SA 中,找不到資料集群組 %s" - -#, python-format -msgid "" -"Dedup is a valid provisioning type, but requires WSAPI version " -"'%(dedup_version)s' version '%(version)s' is installed." -msgstr "" -"Dedup 是有效的供應類型,但需要 WSAPI '%(dedup_version)s' 版,已安裝 " -"'%(version)s' 版。" - -msgid "Dedup luns cannot be extended" -msgstr "無法延伸 Dedup LUN" - -#, python-format -msgid "" -"Default quota for resource: %(res)s is set by the default quota flag: quota_" -"%(res)s, it is now deprecated. Please use the default quota class for " -"default quota." -msgstr "" -"資源 %(res)s 的預設配額是由預設配額旗標quota_%(res)s 所設定,但它現已淘汰。請" -"將預設配額類別用於預設配額。" - -msgid "Default volume type can not be found." -msgstr "找不到預設磁區類型。" - -msgid "Delete LUNcopy error." -msgstr "刪除 LUNcopy 時發生錯誤。" - -msgid "Delete QoS policy error." -msgstr "刪除服務品質原則時發生錯誤。" - -msgid "Delete associated lun from lungroup error." -msgstr "從 LUN 群組中刪除相關聯的 LUN 時發生錯誤。" - -#, python-format -msgid "" -"Delete backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"已中止刪除備份,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" -"用的備份服務 [%(backup_service)s]。" - -msgid "Delete consistency group failed." -msgstr "刪除一致性群組失敗。" - -msgid "Delete hostgroup error." -msgstr "刪除主機群組時發生錯誤。" - -msgid "Delete hostgroup from mapping view error." -msgstr "從對映視圖中刪除主機群組時發生錯誤。" - -msgid "Delete lun error." -msgstr "刪除 LUN 時發生錯誤。" - -msgid "Delete lun migration error." -msgstr "刪除 LUN 移轉時發生錯誤。" - -msgid "Delete lungroup error." -msgstr "刪除 LUN 群組時發生錯誤。" - -msgid "Delete lungroup from mapping view error." -msgstr "從對映視圖中刪除 LUN 群組時發生錯誤。" - -msgid "Delete mapping view error." -msgstr "刪除對映視圖時發生錯誤。" - -msgid "Delete port group error." -msgstr "刪除埠群組時發生錯誤。" - -msgid "Delete portgroup from mapping view error." -msgstr "從對映視圖中刪除埠群組時發生錯誤。" - -msgid "Delete snapshot error." -msgstr "刪除 Snapshot 時發生錯誤。" - -#, python-format -msgid "Delete snapshot of volume not supported in state: %s." -msgstr "狀態 %s 不支援刪除磁區的 Snapshot。" - -#, python-format -msgid "" -"Delete_backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"Delete_backup 已中止,預期備份狀態 %(expected_status)s,但取得 " -"%(actual_status)s。" - -msgid "Deleting volume from database and skipping rpc." -msgstr "正在從資料庫刪除磁區並跳過 RPC。" - -#, python-format -msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." -msgstr "刪除區域時失敗:(指令 = %(cmd)s 錯誤 = %(err)s)。" - -msgid "Dell API 2.1 or later required for Consistency Group support" -msgstr "一致性群組支援需要 Dell API 2.1 或更新版本" - -msgid "" -"Dell Cinder driver configuration error replication not supported with direct " -"connect." -msgstr "Dell Cinder 驅動程式配置錯誤,直接連接不支援抄寫。" - -#, python-format -msgid "Dell Cinder driver configuration error replication_device %s not found" -msgstr "Dell Cinder 驅動程式配置錯誤,找不到 replication_device %s。" - -msgid "Describe-resource is admin only functionality" -msgstr "Describe-resource 是管理者專用功能" - -#, python-format -msgid "Destination has migration_status %(stat)s, expected %(exp)s." -msgstr "目的地具有 migration_status %(stat)s,預期狀態為 %(exp)s。" - -msgid "Destination volume not mid-migration." -msgstr "移轉期間找不到目的地磁區。" - -msgid "" -"Detach volume failed: More than one attachment, but no attachment_id " -"provided." -msgstr "分離磁區失敗:存在多個連接,但卻未提供attachment_id。" - -msgid "Detach volume from instance and then try again." -msgstr "請從實例分離磁區,然後再試一次。" - -#, python-format -msgid "Detected more than one volume with name %(vol_name)s" -msgstr "已偵測到多個名稱為 %(vol_name)s 的磁區" - -#, python-format -msgid "Did not find expected column in %(fun)s: %(hdr)s." -msgstr "在 %(fun)s 中找不到預期的直欄:%(hdr)s。" - -#, python-format -msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." -msgstr "在 %(fun)s 中找不到預期的索引鍵 %(key)s:%(raw)s。" - -msgid "Disabled reason contains invalid characters or is too long" -msgstr "停用原因包含無效的字元,或者太長" - -#, python-format -msgid "Domain with name %s wasn't found." -msgstr "找不到名稱為 %s 的網域。" - -#, python-format -msgid "" -"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " -"daemon level %(cur)s - must be at least at level %(min)s." -msgstr "" -"偵測到舊版 GPFS 叢集。「GPFS 副本」特性未在叢集常駐程式層次 %(cur)s 啟用 - 必" -"須至少是層次 %(min)s。" - -#, python-format -msgid "Driver initialize connection failed (error: %(err)s)." -msgstr "驅動程式起始設定連線失敗(錯誤:%(err)s)。" - -msgid "Driver must implement initialize_connection" -msgstr "驅動程式必須實作 initialize_connection" - -#, python-format -msgid "" -"Driver successfully decoded imported backup data, but there are missing " -"fields (%s)." -msgstr "驅動程式已順利將匯入的備份資料解碼,但是有遺漏的欄位 (%s)。" - -#, python-format -msgid "" -"E-series proxy API version %(current_version)s does not support full set of " -"SSC extra specs. The proxy version must be at at least %(min_version)s." -msgstr "" -"E 系列 Proxy API 版本 %(current_version)s 不支援完整的SSC 額外規格集。Proxy " -"版本必須至少是 %(min_version)s。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver CLI exception: %(cmd)s (Return Code: %(rc)s) (Output: " -"%(out)s)." -msgstr "" -"EMC VNX Cinder 驅動程式 CLI 異常狀況:%(cmd)s(回覆碼:%(rc)s)(輸出:" -"%(out)s)。" - -#, python-format -msgid "" -"EMC VNX Cinder Driver SPUnavailableException: %(cmd)s (Return Code: %(rc)s) " -"(Output: %(out)s)." -msgstr "" -"EMC VNX Cinder 驅動程式 SPUnavailableException:%(cmd)s(回覆碼:%(rc)s)(輸" -"出:%(out)s)。" - -msgid "" -"EcomServerIp, EcomServerPort, EcomUserName, EcomPassword must have valid " -"values." -msgstr "" -"EcomServerIp、EcomServerPort、EcomUserName、EcomPassword 必須具有有效的值。" - -#, python-format -msgid "" -"Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " -"consistency group %(name)s from source." -msgstr "" -"必須提供 'cgsnapshot_id' 或 'source_cgid',才能從來源建立一致性群組 " -"%(name)s。" - -#, python-format -msgid "" -"Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " -"error statement for valid values." -msgstr "" -"SLO %(slo)s 或工作量 %(workload)s 無效。請檢查前一個錯誤陳述式以取得有效的" -"值。" - -msgid "Either hitachi_serial_number or hitachi_unit_name is required." -msgstr "需要 hitachi_serial_number 或 hitachi_unit_name。" - -#, python-format -msgid "Element Composition Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「元素組合服務」。" - -msgid "Enables QoS." -msgstr "啟用服務品質。" - -msgid "Enables compression." -msgstr "啟用壓縮。" - -msgid "Enables replication." -msgstr "啟用抄寫。" - -msgid "Ensure that configfs is mounted at /sys/kernel/config." -msgstr "請確保 configfs 已裝載於 /sys/kernel/config 中。" - -#, python-format -msgid "" -"Error Adding Initiator: %(initiator)s on groupInitiatorGroup: " -"%(initiatorgroup)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"在下列 groupInitiatorGroup 上新增起始器 %(initiator)s 時發生錯誤:" -"%(initiatorgroup)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Adding to TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"新增至帶 IQN %(iqn)s 的目標群組 %(targetgroup)s 時發生錯誤。回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "Error Attaching volume %(vol)s." -msgstr "連接磁區 %(vol)s 時發生錯誤。" - -#, python-format -msgid "" -"Error Cloning Snapshot: %(snapshot)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Clone project: %(clone_proj)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"在儲存區 %(pool)s 的磁區 %(lun)s 上複製 Snapshot:%(snapshot)s 時發生錯誤專" -"案:%(project)s,副本專案:%(clone_proj)s,回覆碼:%(ret.status)d。訊息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: %(cloneName)s Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"建立副本磁區 %(cloneName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Create Cloned Volume: Volume: %(cloneName)s Source Volume:" -"%(sourceName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"建立副本磁區時發生錯誤:磁區:%(cloneName)s 來源磁區:%(sourceName)s。回覆" -"碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Create Group: %(groupName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "建立群組 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Create Masking View: %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"建立遮罩視圖 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "建立磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Create Volume: %(volumename)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "建立磁區 %(volumename)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error CreateGroupReplica: source: %(source)s target: %(target)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"建立群組抄本時發生錯誤:來源:%(source)s 目標:%(target)s。回覆碼:%(rc)lu。" -"錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Creating Initiator: %(initiator)s on Alias: %(alias)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"在別名 %(alias)s 上建立起始器 %(initiator)s 時發生錯誤。回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"在儲存區 %(pool)s 上建立專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" -"訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Property: %(property)s Type: %(type)s Description: " -"%(description)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"建立內容 %(property)s 類型 %(type)s 說明%(description)s 時發生錯誤。回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Share: %(name)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"建立共用 %(name)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onVolume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在儲存區 %(pool)s 的磁區 %(lun)s 上建立 Snapshot:%(snapshot)s 時發生錯誤專" -"案:%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Snapshot: %(snapshot)s onshare: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在下列儲存區的共用 %(share)s 上建立 Snapshot %(snapshot)s 時發生錯誤:" -"%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d 訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"建立目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating TargetGroup: %(targetgroup)s withIQN: %(iqn)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"建立帶 IQN %(iqn)s 的目標群組 %(targetgroup)s 時發生錯誤。回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating Volume: %(lun)s Size: %(size)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"建立磁區 %(lun)s 時發生錯誤。大小:%(size)s。回覆碼:%(ret.status)d。訊息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error Creating new composite Volume Return code: %(rc)lu. Error: %(error)s." -msgstr "建立新的複合磁區時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Creating replication action on: pool: %(pool)s Project: %(proj)s " -"volume: %(vol)s for target: %(tgt)s and pool: %(tgt_pool)sReturn code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"針對目標 %(tgt)s 和儲存區 %(tgt_pool)s,在儲存區 %(pool)s 專案 %(proj)s磁區 " -"%(vol)s 上建立抄寫動作時發生錯誤。回覆碼:%(ret.status)d。訊息:" -"%(ret.data)s。" - -msgid "Error Creating unbound volume on an Extend operation." -msgstr "在「延伸」作業上建立未連結的磁區時發生錯誤。" - -msgid "Error Creating unbound volume." -msgstr "建立未連結的磁區時發生錯誤。" - -#, python-format -msgid "" -"Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "刪除磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Deleting Group: %(storageGroupName)s. Return code: %(rc)lu. Error: " -"%(error)s" -msgstr "" -"刪除群組 %(storageGroupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s" - -#, python-format -msgid "" -"Error Deleting Initiator Group: %(initiatorGroupName)s. Return code: " -"%(rc)lu. Error: %(error)s" -msgstr "" -"刪除起始器群組 %(initiatorGroupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:" -"%(error)s" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Share: %(share)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在下列儲存區的共用 %(share)s 上刪除 Snapshot %(snapshot)s 時發生錯誤:" -"%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在下列儲存區的磁區 %(lun)s 上刪除 Snapshot:%(snapshot)s 時發生錯誤:" -"%(pool)s 專案:%(project)s 回覆碼:%(ret.status)d 訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting Volume: %(lun)s from Pool: %(pool)s, Project: %(project)s. " -"Return code: %(ret.status)d, Message: %(ret.data)s." -msgstr "" -"從儲存區 %(pool)s 刪除磁區 %(lun)s 時發生錯誤,專案 %(project)s。回覆碼:" -"%(ret.status)d,訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting project: %(project)s on pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"刪除儲存區 %(pool)s 上的專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" -"訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Deleting replication action: %(id)s Return code: %(ret.status)d " -"Message: %(ret.data)s." -msgstr "" -"刪除抄寫動作 %(id)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Extend Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." -msgstr "延伸磁區 %(volumeName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Getting Initiators: InitiatorGroup: %(initiatorgroup)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"取得起始器時發生錯誤:InitiatorGroup:%(initiatorgroup)s 回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Pool Stats: Pool: %(pool)s Return code: %(status)d Message: " -"%(data)s." -msgstr "" -"取得儲存區統計資料時發生錯誤:儲存區:%(pool)s,回覆碼:%(status)d,訊息:" -"%(data)s。" - -#, python-format -msgid "" -"Error Getting Project Stats: Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"取得專案統計資料時發生錯誤:儲存區:%(pool)s,專案:%(project)s,回覆碼:" -"%(ret.status)d,訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Share: %(share)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"取得儲存區 %(pool)s 上的共用 %(share)s 時發生錯誤。專案:%(project)s回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Snapshot: %(snapshot)s on Volume: %(lun)s to Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在儲存區 %(pool)s 的磁區 %(lun)s 上取得 Snapshot:%(snapshot)s 時發生錯誤專" -"案:%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Target: %(alias)sReturn code: %(ret.status)d Message: " -"%(ret.data)s ." -msgstr "" -"取得目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Getting Volume: %(lun)s on Pool: %(pool)s Project: %(project)s Return " -"code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"取得儲存區 %(pool)s 上的磁區 %(lun)s 時發生錯誤。專案:%(project)s。回覆碼:" -"%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Migrating volume from one pool to another. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"將磁區從一個儲存區移轉至另一個儲存區時發生錯誤。回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -#, python-format -msgid "" -"Error Modifying masking view : %(groupName)s. Return code: %(rc)lu. Error: " -"%(error)s." -msgstr "" -"修改遮罩視圖 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "Error Pool ownership: Pool %(pool)s is not owned by %(host)s." -msgstr "錯誤的儲存區所有權:儲存區 %(pool)s 不歸 %(host)s 擁有。" - -#, python-format -msgid "" -"Error Setting props Props: %(props)s on Volume: %(lun)s of Pool: %(pool)s " -"Project: %(project)s Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在儲存區 %(pool)s 的磁區 %(lun)s 上設定內容 Props:%(props)s 時發生錯誤專案:" -"%(project)s。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Terminating migrate session. Return code: %(rc)lu. Error: %(error)s." -msgstr "終止移轉階段作業時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -#, python-format -msgid "" -"Error Verifying Initiator: %(iqn)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"驗證起始器 %(iqn)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Pool: %(pool)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"驗證儲存區 %(pool)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Project: %(project)s on Pool: %(pool)s Return code: " -"%(ret.status)d Message: %(ret.data)s." -msgstr "" -"在儲存區 %(pool)s 上驗證專案 %(project)s 時發生錯誤。回覆碼:%(ret.status)d。" -"訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"驗證服務 %(service)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying Target: %(alias)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"驗證目標 %(alias)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error Verifying share: %(share)s on Project: %(project)s and Pool: %(pool)s " -"Return code: %(ret.status)d Message: %(ret.data)s." -msgstr "" -"在專案 %(project)s 及儲存區 %(pool)s 上驗證共用 %(share)s 時發生錯誤。回覆" -"碼:%(ret.status)d,訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error adding Volume: %(volumeName)s with instance path: " -"%(volumeInstancePath)s." -msgstr "" -"使用下列實例路徑新增磁區 %(volumeName)s 時發生錯誤:%(volumeInstancePath)s。" - -#, python-format -msgid "" -"Error adding initiator to group : %(groupName)s. Return code: %(rc)lu. " -"Error: %(error)s." -msgstr "" -"將起始器新增至群組 %(groupName)s 時發生錯誤。回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -#, python-format -msgid "Error adding volume to composite volume. Error is: %(error)s." -msgstr "將磁區新增至複合磁區時發生錯誤。錯誤:%(error)s。" - -#, python-format -msgid "Error appending volume %(volumename)s to target base volume." -msgstr "將磁區 %(volumename)s 附加至目標基本磁區時發生錯誤。" - -#, python-format -msgid "" -"Error associating storage group : %(storageGroupName)s. To fast Policy: " -"%(fastPolicyName)s with error description: %(errordesc)s." -msgstr "" -"將儲存體群組 %(storageGroupName)s 與下列 FAST 原則建立關聯時發生錯誤:" -"%(fastPolicyName)s,錯誤說明:%(errordesc)s。" - -#, python-format -msgid "" -"Error break clone relationship: Sync Name: %(syncName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"岔斷複製關係時發生錯誤:同步名稱:%(syncName)s 回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -msgid "Error connecting to ceph cluster." -msgstr "連接至 ceph 叢集時發生錯誤。" - -#, python-format -msgid "Error connecting via ssh: %s" -msgstr "透過 SSH 進行連接時發生錯誤:%s" - -#, python-format -msgid "Error creating volume: %s." -msgstr "建立磁區時發生錯誤:%s。" - -msgid "Error deleting replay profile." -msgstr "刪除重播設定檔時發生錯誤。" - -#, python-format -msgid "Error deleting volume %(ssn)s: %(volume)s" -msgstr "刪除磁區 %(ssn)s 時發生錯誤:%(volume)s" - -#, python-format -msgid "Error deleting volume %(vol)s: %(err)s." -msgstr "刪除磁區 %(vol)s 時發生錯誤:%(err)s。" - -#, python-format -msgid "Error during evaluator parsing: %(reason)s" -msgstr "評估器剖析期間發生錯誤:%(reason)s" - -#, python-format -msgid "" -"Error editing share: %(share)s on Pool: %(pool)s Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"編輯儲存區 %(pool)s 上的共用 %(share)s 時發生錯誤。回覆碼:%(ret.status)d。訊" -"息:%(ret.data)s。" - -#, python-format -msgid "" -"Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " -"on your iSCSI port %(port)d on ip %(ip)s." -msgstr "" -"對 NetworkPortal 啟用 iSER 時發生錯誤:請確保 RDMA 在 IP %(ip)s 的 iSCSI 埠 " -"%(port)d 上受支援。" - -#, python-format -msgid "Error encountered during cleanup of a failed attach: %(ex)s" -msgstr "清除失敗連接期間發生錯誤:%(ex)s" - -#, python-format -msgid "Error executing CloudByte API [%(cmd)s], Error: %(err)s." -msgstr "執行 CloudByte API [%(cmd)s] 時發生錯誤,錯誤:%(err)s。" - -msgid "Error executing EQL command" -msgstr "執行 EQL 指令時發生錯誤" - -#, python-format -msgid "Error executing command via ssh: %s" -msgstr "透過 SSH 來執行指令時發生錯誤:%s" - -#, python-format -msgid "Error extending volume %(vol)s: %(err)s." -msgstr "延伸磁區 %(vol)s 時發生錯誤:%(err)s。" - -#, python-format -msgid "Error extending volume: %(reason)s" -msgstr "延伸磁區時發生錯誤:%(reason)s" - -#, python-format -msgid "Error finding %(name)s." -msgstr "尋找 %(name)s 時發生錯誤。" - -#, python-format -msgid "Error finding %s." -msgstr "尋找 %s 時發生錯誤。" - -#, python-format -msgid "" -"Error getting ReplicationSettingData. Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"取得 ReplicationSettingData 時發生錯誤。回覆碼:%(rc)lu。錯誤:%(error)s。" - -msgid "" -"Error getting appliance version details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"取得軟體驅動裝置版本詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret." -"data)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(err)s." -msgstr "從名稱 %(name)s 取得網域 ID 時發生錯誤:%(err)s。" - -#, python-format -msgid "Error getting domain id from name %(name)s: %(id)s." -msgstr "從名稱 %(name)s 取得網域 ID 時發生錯誤:%(id)s。" - -msgid "Error getting initiator groups." -msgstr "取得起始器群組時發生錯誤。" - -#, python-format -msgid "Error getting pool id from name %(pool)s: %(err)s." -msgstr "從名稱 %(pool)s 取得儲存區 ID 時發生錯誤:%(err)s。" - -#, python-format -msgid "Error getting pool id from name %(pool_name)s: %(err_msg)s." -msgstr "從名稱 %(pool_name)s 取得儲存區 ID 時發生錯誤:%(err_msg)s。" - -#, python-format -msgid "" -"Error getting replication action: %(id)s. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"取得抄寫動作 %(id)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -msgid "" -"Error getting replication source details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"取得抄寫來源詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -msgid "" -"Error getting replication target details. Return code: %(ret.status)d " -"Message: %(ret.data)s ." -msgstr "" -"取得抄寫目標詳細資料時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"取得版本時發生錯誤:SVC:%(svc)s。回覆碼:%(ret.status)d 訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error in Operation [%(operation)s] for volume [%(cb_volume)s] in CloudByte " -"storage: [%(cb_error)s], error code: [%(error_code)s]." -msgstr "" -"在 CloudByte 儲存體中,對磁區 [%(cb_volume)s] 執行的作業 [%(operation)s] 發生" -"錯誤:[%(cb_error)s],錯誤碼:[%(error_code)s]。" - -#, python-format -msgid "Error in SolidFire API response: data=%(data)s" -msgstr "SolidFire API 回應發生錯誤:資料 = %(data)s" - -#, python-format -msgid "Error in space-create for %(space)s of size %(size)d GB" -msgstr "為 %(space)s 建立大小為 %(size)d GB 的空間時發生錯誤" - -#, python-format -msgid "Error in space-extend for volume %(space)s with %(size)d additional GB" -msgstr "將磁區 %(space)s 的空間額外延伸 %(size)d GB 時發生錯誤" - -#, python-format -msgid "Error managing volume: %s." -msgstr "管理磁區時發生錯誤:%s。" - -#, python-format -msgid "" -"Error modify replica synchronization: %(sv)s operation: %(operation)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"修改抄本同步化時發生錯誤:%(sv)s 作業:%(operation)s。回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -#, python-format -msgid "" -"Error modifying Service: %(service)s Return code: %(ret.status)d Message: " -"%(ret.data)s." -msgstr "" -"修改服務 %(service)s 時發生錯誤。回覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error moving volume: %(vol)s from source project: %(src)s to target project: " -"%(tgt)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"將磁區 %(vol)s 從來源專案 %(src)s 移至下列目標專案時發生錯誤:%(tgt)s。回覆" -"碼:%(ret.status)d。訊息:%(ret.data)s。" - -msgid "Error not a KeyError." -msgstr "此錯誤不是 KeyError。" - -msgid "Error not a TypeError." -msgstr "此錯誤不是 TypeError。" - -#, python-format -msgid "Error occurred when creating cgsnapshot %s." -msgstr "建立 CgSnapshot %s 時發生錯誤。" - -#, python-format -msgid "Error occurred when deleting cgsnapshot %s." -msgstr "刪除 CgSnapshot %s 時發生錯誤。" - -#, python-format -msgid "Error occurred when updating consistency group %s." -msgstr "更新一致性群組 %s 時發生錯誤。" - -#, python-format -msgid "Error renaming volume %(vol)s: %(err)s." -msgstr "重命名磁區 %(vol)s 時發生錯誤:%(err)s。" - -#, python-format -msgid "Error response: %s" -msgstr "錯誤的回應:%s" - -msgid "Error retrieving volume size" -msgstr "擷取磁區大小時發生錯誤" - -#, python-format -msgid "" -"Error sending replication update for action id: %(id)s . Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"傳送動作識別碼 %(id)s 的抄寫更新時發生錯誤。回覆碼:%(ret.status)d。訊息:" -"%(ret.data)s。" - -#, python-format -msgid "" -"Error sending replication update. Returned error: %(err)s. Action: %(id)s." -msgstr "傳送抄寫更新時發生錯誤。傳回的錯誤:%(err)s。動作:%(id)s。" - -#, python-format -msgid "" -"Error setting replication inheritance to %(set)s for volume: %(vol)s project " -"%(project)s Return code: %(ret.status)d Message: %(ret.data)s ." -msgstr "" -"針對磁區 %(vol)s,將抄寫繼承設定為 %(set)s 時發生錯誤,專案:%(project)s。回" -"覆碼:%(ret.status)d。訊息:%(ret.data)s。" - -#, python-format -msgid "" -"Error severing the package: %(package)s from source: %(src)s Return code: " -"%(ret.status)d Message: %(ret.data)s ." -msgstr "" -"將套件 %(package)s 從來源 %(src)s 分離時發生錯誤。回覆碼:%(ret.status)d。訊" -"息:%(ret.data)s。" - -#, python-format -msgid "Error unbinding volume %(vol)s from pool. %(error)s." -msgstr "從儲存區解除連結磁區 %(vol)s 時發生錯誤。%(error)s。" - -#, python-format -msgid "Error while authenticating with switch: %s." -msgstr "對交換器進行鑑別時發生錯誤:%s。" - -#, python-format -msgid "Error while changing VF context %s." -msgstr "變更 VF 環境定義 %s 時發生錯誤。" - -#, python-format -msgid "Error while checking the firmware version %s." -msgstr "檢查韌體版本 %s 時發生錯誤。" - -#, python-format -msgid "Error while checking transaction status: %s" -msgstr "檢查交易狀態時發生錯誤:%s" - -#, python-format -msgid "Error while checking whether VF is available for management %s." -msgstr "檢查 VF 是否適用於管理 %s 時發生錯誤。" - -#, python-format -msgid "" -"Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " -"Error: %(error)s." -msgstr "" -"連接使用通訊協定 %(protocol)s 的交換器 %(switch_id)s 時發生錯誤。錯誤:" -"%(error)s。" - -#, python-format -msgid "Error while creating authentication token: %s" -msgstr "建立鑑別記號時發生錯誤:%s" - -#, python-format -msgid "Error while creating snapshot [status] %(stat)s - [result] %(res)s." -msgstr "建立 Snapshot [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" - -#, python-format -msgid "Error while creating volume [status] %(stat)s - [result] %(res)s." -msgstr "建立磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" - -#, python-format -msgid "Error while deleting snapshot [status] %(stat)s - [result] %(res)s" -msgstr "刪除 Snapshot [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" - -#, python-format -msgid "Error while deleting volume [status] %(stat)s - [result] %(res)s." -msgstr "刪除磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" - -#, python-format -msgid "Error while extending volume [status] %(stat)s - [result] %(res)s." -msgstr "延伸磁區 [狀態] %(stat)s 時發生錯誤 - [結果] %(res)s。" - -#, python-format -msgid "Error while getting %(op)s details, returned code: %(status)s." -msgstr "取得 %(op)s 詳細資料時發生錯誤,回覆碼:%(status)s。" - -#, python-format -msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." -msgstr "透過 SSH 取得資料時發生錯誤:(指令 = %(cmd)s 錯誤 = %(err)s)。" - -#, python-format -msgid "Error while getting disco information [%s]." -msgstr "取得 DISCO 資訊 [%s] 時發生錯誤。" - -#, python-format -msgid "Error while getting nvp value: %s." -msgstr "取得 NVP 值時發生錯誤:%s。" - -#, python-format -msgid "Error while getting session information %s." -msgstr "取得階段作業資訊 %s 時發生錯誤。" - -#, python-format -msgid "Error while parsing the data: %s." -msgstr "剖析資料時發生錯誤:%s。" - -#, python-format -msgid "Error while querying page %(url)s on the switch, reason %(error)s." -msgstr "在交換器上查詢頁面 %(url)s 時發生錯誤,原因:%(error)s。" - -#, python-format -msgid "" -"Error while removing the zones and cfgs in the zone string: %(description)s." -msgstr "移除區域字串中的區域和配置時發生錯誤:%(description)s。" - -#, python-format -msgid "Error while requesting %(service)s API." -msgstr "要求 %(service)s API 時發生錯誤。" - -#, python-format -msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." -msgstr "執行分區 CLI 時發生錯誤:(指令 = %(cmd)s 錯誤 = %(err)s)。" - -#, python-format -msgid "" -"Error while updating the new zones and cfgs in the zone string. Error " -"%(description)s." -msgstr "更新區域字串中的新區域和配置時發生錯誤。錯誤:%(description)s。" - -msgid "Error writing field to database" -msgstr "將欄位寫入資料庫時發生錯誤" - -#, python-format -msgid "Error[%(stat)s - %(res)s] while getting volume id." -msgstr "取得磁區 ID 時發生錯誤 [%(stat)s - %(res)s]。" - -#, python-format -msgid "" -"Error[%(stat)s - %(res)s] while restoring snapshot [%(snap_id)s] into volume " -"[%(vol)s]." -msgstr "" -"將 Snapshot [%(snap_id)s] 還原到磁區 [%(vol)s] 中時,發生錯誤 [%(stat)s - " -"%(res)s]。" - -#, python-format -msgid "Error[status] %(stat)s - [result] %(res)s] while getting volume id." -msgstr "取得磁區 ID 時發生錯誤 [狀態] %(stat)s - [結果] %(res)s。" - -#, python-format -msgid "" -"Exceeded max scheduling attempts %(max_attempts)d for volume %(volume_id)s" -msgstr "已超出磁區 %(volume_id)s 的排程嘗試次數上限 %(max_attempts)d" - -msgid "Exceeded the limit of snapshots per volume" -msgstr "已超過每個磁區的 Snapshot 數目限制" - -#, python-format -msgid "Exception appending meta volume to target volume %(volumename)s." -msgstr "將 meta 磁區附加到目標磁區 %(volumename)s 時發生異常狀況。" - -#, python-format -msgid "" -"Exception during create element replica. Clone name: %(cloneName)s Source " -"name: %(sourceName)s Extra specs: %(extraSpecs)s " -msgstr "" -"建立元素抄本期間發生異常狀況。副本名稱:%(cloneName)s,來源名稱:" -"%(sourceName)s,額外規格:%(extraSpecs)s " - -#, python-format -msgid "Exception in _select_ds_for_volume: %s." -msgstr "_select_ds_for_volume 發生異常狀況:%s。" - -#, python-format -msgid "Exception while forming the zone string: %s." -msgstr "對區域字串進行格式化時發生異常狀況:%s。" - -#, python-format -msgid "Exception: %s" -msgstr "異常狀況:%s" - -#, python-format -msgid "Expected a uuid but received %(uuid)s." -msgstr "需要 UUID,但收到 %(uuid)s。" - -#, python-format -msgid "Expected exactly one node called \"%s\"" -msgstr "預期只有一個節點稱為 \"%s\"" - -#, python-format -msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." -msgstr "node_count 預期整數,傳回了 svcinfo lsiogrp:%(node)s。" - -#, python-format -msgid "Expected no output from CLI command %(cmd)s, got %(out)s." -msgstr "預期 CLI 指令 %(cmd)s 沒有任何輸出,但卻取得 %(out)s。" - -#, python-format -msgid "" -"Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " -"%(count)s were returned." -msgstr "" -"根據 vdisk_UID 來過濾時,預期從 lsvdisk 傳回單一 vdisk。傳回了 %(count)s 個。" - -#, python-format -msgid "Expected volume size was %d" -msgstr "預期磁區大小為 %d" - -#, python-format -msgid "" -"Export backup aborted, expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"已中止匯出備份,預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" - -#, python-format -msgid "" -"Export record aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"已中止匯出記錄,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" -"用的備份服務 [%(backup_service)s]。" - -msgid "Extend volume error." -msgstr "延伸磁區時發生錯誤。" - -msgid "" -"Extend volume is only supported for this driver when no snapshots exist." -msgstr "僅當不存在 Snapshot 時,此驅動程式才支援延伸磁區。" - -msgid "Extend volume not implemented" -msgstr "未實作延伸磁區" - -msgid "FAST is not supported on this array." -msgstr "此陣列不支援 FAST 原則。" - -msgid "FC is the protocol but wwpns are not supplied by OpenStack." -msgstr "FC 是通訊協定,但 OpenStack 未提供 WWPN。" - -#, python-format -msgid "Faield to unassign %(volume)s" -msgstr "無法取消指派 %(volume)s" - -#, python-format -msgid "Fail to create cache volume %(volume)s. Error: %(err)s" -msgstr "無法建立快取磁區 %(volume)s。錯誤:%(err)s" - -#, python-format -msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "無法給架構 %(fabric)s 新增連線:錯誤:%(err)s" - -msgid "Failed cgsnapshot" -msgstr "失敗的 CgSnapshot" - -#, python-format -msgid "Failed creating snapshot for group: %(response)s." -msgstr "無法為群組建立 Snapshot:%(response)s。" - -#, python-format -msgid "Failed creating snapshot for volume %(volname)s: %(response)s." -msgstr "無法為磁區 %(volname)s 建立 Snapshot:%(response)s。" - -#, python-format -msgid "Failed getting active zone set from fabric %s." -msgstr "無法從光纖 %s 取得作用中的區域集。" - -#, python-format -msgid "Failed getting details for pool %s." -msgstr "無法取得儲存區 %s 的詳細資料。" - -#, python-format -msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" -msgstr "無法移除架構 %(fabric)s 的連線:錯誤:%(err)s" - -#, python-format -msgid "Failed to Extend Volume %(volname)s" -msgstr "無法延伸磁區 %(volname)s" - -#, python-format -msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" -msgstr "無法登入 3PAR (%(url)s),原因:%(err)s" - -msgid "Failed to access active zoning configuration." -msgstr "無法存取作用中的分區配置。" - -#, python-format -msgid "Failed to access zoneset status:%s" -msgstr "無法存取區域集狀態:%s" - -#, python-format -msgid "" -"Failed to acquire a resource lock. (serial: %(serial)s, inst: %(inst)s, ret: " -"%(ret)s, stderr: %(err)s)" -msgstr "" -"無法獲得資源鎖定。(序列:%(serial)s,實例:%(inst)s,ret:%(ret)s,標準錯" -"誤:%(err)s)" - -msgid "Failed to add the logical device." -msgstr "無法新增邏輯裝置。" - -#, python-format -msgid "" -"Failed to add volume %(volumeName)s to consistency group %(cgName)s. Return " -"code: %(rc)lu. Error: %(error)s." -msgstr "" -"無法將磁區 %(volumeName)s 新增至一致性群組 %(cgName)s。回覆碼:%(rc)lu。錯" -"誤:%(error)s。" - -msgid "Failed to add zoning configuration." -msgstr "無法新增分區配置。" - -#, python-format -msgid "" -"Failed to assign the iSCSI initiator IQN. (port: %(port)s, reason: " -"%(reason)s)" -msgstr "無法指派 iSCSI 起始器 IQN。(埠:%(port)s,原因:%(reason)s)" - -#, python-format -msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "無法使 qos_specs:%(specs_id)s 與類型 %(type_id)s 產生關聯。" - -#, python-format -msgid "Failed to attach iSCSI target for volume %(volume_id)s." -msgstr "無法給磁區 %(volume_id)s 連接 iSCSI 目標。" - -#, python-format -msgid "Failed to backup volume metadata - %s" -msgstr "無法備份磁區 meta 資料 - %s" - -#, python-format -msgid "" -"Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " -"already exists" -msgstr "無法備份磁區 meta 資料 - meta 資料備份物件'backup.%s.meta' 已存在" - -#, python-format -msgid "Failed to clone volume from snapshot %s." -msgstr "無法從 Snapshot %s 複製磁區。" - -#, python-format -msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" -msgstr "無法連接 %(vendor_name)s 陣列 %(host)s:%(err)s" - -msgid "Failed to connect to Dell REST API" -msgstr "無法連接至 Dell REST API" - -msgid "Failed to connect to array" -msgstr "無法連接陣列" - -#, python-format -msgid "Failed to connect to sheep daemon. addr: %(addr)s, port: %(port)s" -msgstr "無法連接 sheep 常駐程式。位址:%(addr)s,埠:%(port)s" - -#, python-format -msgid "Failed to copy image to volume: %(reason)s" -msgstr "無法將映像檔複製到磁區:%(reason)s" - -#, python-format -msgid "Failed to copy metadata to volume: %(reason)s" -msgstr "無法將 meta 資料複製到磁區:%(reason)s" - -msgid "Failed to copy volume, destination device unavailable." -msgstr "未能複製磁區,無法使用目的地裝置。" - -msgid "Failed to copy volume, source device unavailable." -msgstr "未能複製磁區,無法使用來源裝置。" - -#, python-format -msgid "Failed to create CG %(cgName)s from snapshot %(cgSnapshot)s." -msgstr "無法從 Snapshot %(cgSnapshot)s 建立 CG %(cgName)s。" - -#, python-format -msgid "Failed to create IG, %s" -msgstr "無法建立 IG %s" - -#, python-format -msgid "Failed to create Volume Group: %(vg_name)s" -msgstr "無法建立磁區群組:%(vg_name)s" - -#, python-format -msgid "" -"Failed to create a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "無法建立檔案。(檔案:%(file)s,ret:%(ret)s,標準錯誤:%(err)s)" - -#, python-format -msgid "Failed to create a temporary snapshot for volume %s." -msgstr "無法建立磁區 %s 的暫用 Snapshot。" - -msgid "Failed to create api volume flow." -msgstr "無法建立 API 磁區流程。" - -#, python-format -msgid "Failed to create cg snapshot %(id)s due to %(reason)s." -msgstr "由於 %(reason)s,無法建立 cg Snapshot %(id)s。" - -#, python-format -msgid "Failed to create consistency group %(id)s due to %(reason)s." -msgstr "由於 %(reason)s,無法建立一致性群組 %(id)s。" - -#, python-format -msgid "Failed to create consistency group %(id)s:%(ret)s." -msgstr "無法建立一致性群組 %(id)s:%(ret)s。" - -#, python-format -msgid "" -"Failed to create consistency group %s because VNX consistency group cannot " -"accept compressed LUNs as members." -msgstr "" -"無法建立一致性群組 %s,因為 VNX 一致性群組無法接受壓縮的 LUN 作為成員。" - -#, python-format -msgid "Failed to create consistency group: %(cgName)s." -msgstr "無法建立一致性群組 %(cgName)s。" - -#, python-format -msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." -msgstr "無法建立一致性群組 %(cgid)s。錯誤:%(excmsg)s。" - -#, python-format -msgid "" -"Failed to create consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"無法建立一致性群組:%(consistencyGroupName)s回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -#, python-format -msgid "Failed to create hardware id(s) on %(storageSystemName)s." -msgstr "無法在 %(storageSystemName)s 上建立硬體 ID。" - -#, python-format -msgid "" -"Failed to create host: %(name)s. Please check if it exists on the array." -msgstr "無法建立主機:%(name)s。請檢查該主機在陣列上是否存在。" - -#, python-format -msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." -msgstr "無法建立主機群組:%(name)s。請檢查該主機群組在陣列上是否存在。" - -msgid "Failed to create iqn." -msgstr "無法建立 IQN。" - -#, python-format -msgid "Failed to create iscsi target for volume %(volume_id)s." -msgstr "無法給磁區 %(volume_id)s 建立 iSCSI 目標。" - -msgid "Failed to create manage existing flow." -msgstr "無法建立管理現有流程。" - -msgid "Failed to create manage_existing flow." -msgstr "無法建立 manage_existing 流程。" - -msgid "Failed to create map on mcs, no channel can map." -msgstr "無法在 MCS 上建立對映,沒有通道可對映。" - -msgid "Failed to create map." -msgstr "無法建立對映。" - -#, python-format -msgid "Failed to create metadata for volume: %(reason)s" -msgstr "無法給磁區建立 meta 資料:%(reason)s" - -msgid "Failed to create partition." -msgstr "無法建立分割區。" - -#, python-format -msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." -msgstr "無法使用規格 %(qos_specs)s 來建立 qos_specs:%(name)s。" - -msgid "Failed to create replica." -msgstr "無法建立抄本。" - -msgid "Failed to create scheduler manager volume flow" -msgstr "無法建立排定器管理程式磁區流程" - -#, python-format -msgid "Failed to create snapshot %s" -msgstr "無法建立 Snapshot %s" - -#, python-format -msgid "Failed to create snapshot for cg: %(cgName)s." -msgstr "無法建立 CG %(cgName)s 的 Snapshot。" - -#, python-format -msgid "Failed to create snapshot for volume %s." -msgstr "無法建立磁區 %s 的 Snapshot。" - -#, python-format -msgid "Failed to create snapshot policy on volume %(vol)s: %(res)s." -msgstr "無法在磁區 %(vol)s 上建立 Snapshot 原則:%(res)s。" - -#, python-format -msgid "Failed to create snapshot resource area on volume %(vol)s: %(res)s." -msgstr "無法在磁區 %(vol)s 上建立 Snapshot 資源區域:%(res)s。" - -msgid "Failed to create snapshot." -msgstr "無法建立 Snapshot。" - -#, python-format -msgid "" -"Failed to create snapshot. CloudByte volume information not found for " -"OpenStack volume [%s]." -msgstr "無法建立 Snapshot。找不到 OpenStack 磁區[%s] 的 CloudByte 磁區資訊。" - -#, python-format -msgid "Failed to create south bound connector for %s." -msgstr "無法建立 %s 的南行連接器。" - -#, python-format -msgid "Failed to create storage group %(storageGroupName)s." -msgstr "無法建立儲存體群組 %(storageGroupName)s。" - -#, python-format -msgid "Failed to create thin pool, error message was: %s" -msgstr "無法建立小型儲存區,錯誤訊息為:%s" - -#, python-format -msgid "Failed to create volume %s" -msgstr "無法建立磁區 %s" - -#, python-format -msgid "Failed to delete SI for volume_id: %(volume_id)s because it has pair." -msgstr "無法刪除 volume_id %(volume_id)s 的 SI,因為它具有配對。" - -#, python-format -msgid "Failed to delete a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "無法刪除邏輯裝置。(LDEV:%(ldev)s,原因:%(reason)s)" - -#, python-format -msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." -msgstr "由於 %(reason)s,無法刪除 cg Snapshot %(id)s。" - -#, python-format -msgid "Failed to delete consistency group %(id)s due to %(reason)s." -msgstr "由於 %(reason)s,無法刪除一致性群組 %(id)s。" - -#, python-format -msgid "Failed to delete consistency group: %(cgName)s." -msgstr "無法刪除一致性群組 %(cgName)s。" - -#, python-format -msgid "" -"Failed to delete consistency group: %(consistencyGroupName)s Return code: " -"%(rc)lu. Error: %(error)s." -msgstr "" -"無法刪除一致性群組:%(consistencyGroupName)s回覆碼:%(rc)lu。錯誤:" -"%(error)s。" - -msgid "Failed to delete device." -msgstr "無法刪除裝置。" - -#, python-format -msgid "" -"Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "無法刪除一致性群組 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" - -msgid "Failed to delete iqn." -msgstr "無法刪除 IQN。" - -msgid "Failed to delete map." -msgstr "無法刪除對映。" - -msgid "Failed to delete partition." -msgstr "無法刪除分割區。" - -msgid "Failed to delete replica." -msgstr "無法刪除抄本。" - -#, python-format -msgid "Failed to delete snapshot %s" -msgstr "無法刪除 Snapshot %s" - -#, python-format -msgid "Failed to delete snapshot for cg: %(cgId)s." -msgstr "無法刪除 CG %(cgId)s 的 Snapshot。" - -#, python-format -msgid "Failed to delete snapshot for snapshot_id: %s because it has pair." -msgstr "無法刪除 snapshot_id %s 的 Snapshot,因為它具有配對。" - -msgid "Failed to delete snapshot." -msgstr "無法刪除 Snapshot。" - -#, python-format -msgid "Failed to delete volume %(volumeName)s." -msgstr "無法刪除磁區 %(volumeName)s。" - -#, python-format -msgid "" -"Failed to delete volume for volume_id: %(volume_id)s because it has pair." -msgstr "無法刪除 volume_id %(volume_id)s 的磁區,因為它具有配對。" - -#, python-format -msgid "Failed to detach iSCSI target for volume %(volume_id)s." -msgstr "無法分離磁區 %(volume_id)s 的 iSCSI 目標。" - -msgid "Failed to determine blockbridge API configuration" -msgstr "無法判定 Blockbridge API 配置。" - -msgid "Failed to disassociate qos specs." -msgstr "無法解除與服務品質規格的關聯。" - -#, python-format -msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." -msgstr "無法解除 qos_specs:%(specs_id)s 與類型 %(type_id)s 的關聯。" - -#, python-format -msgid "" -"Failed to ensure snapshot resource area, could not locate volume for id %s" -msgstr "無法確保 Snapshot 資源區域,找不到 ID %s 的磁區" - -msgid "Failed to establish connection with Coho cluster" -msgstr "無法建立與 Coho 叢集的連線。" - -#, python-format -msgid "" -"Failed to execute CloudByte API [%(cmd)s]. Http status: %(status)s, Error: " -"%(error)s." -msgstr "" -"無法執行 CloudByte API [%(cmd)s]。HTTP 狀態:%(status)s,錯誤:%(error)s。" - -msgid "Failed to execute common command." -msgstr "無法執行一般指令。" - -#, python-format -msgid "Failed to export for volume: %(reason)s" -msgstr "無法針對磁區進行匯出:%(reason)s" - -#, python-format -msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." -msgstr "無法延伸現有磁區 %(name)s,錯誤訊息:%(msg)s。" - -msgid "Failed to find QoSnode" -msgstr "找不到 QoSnode" - -msgid "Failed to find Storage Center" -msgstr "找不到 Storage Center" - -msgid "Failed to find a vdisk copy in the expected pool." -msgstr "在預期儲存區中找不到 vdisk 副本。" - -msgid "Failed to find account for volume." -msgstr "找不到磁區的帳戶。" - -#, python-format -msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." -msgstr "找不到路徑 %(path)s 的檔案集,指令輸出:%(cmdout)s。" - -#, python-format -msgid "Failed to find group snapshot named: %s" -msgstr "找不到名為 %s 的群組 Snapshot" - -#, python-format -msgid "Failed to find host %s." -msgstr "找不到主機 %s。" - -#, python-format -msgid "Failed to find iSCSI initiator group containing %(initiator)s." -msgstr "找不到包含 %(initiator)s 的 iSCSI 起始器群組。" - -#, python-format -msgid "Failed to get CloudByte account details for account [%s]." -msgstr "無法取得帳戶 [%s] 的 CloudByte 帳戶詳細資料。" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s" -msgstr "無法取得 LUN %s 的 LUN 目標詳細資料" - -#, python-format -msgid "Failed to get LUN target details for the LUN %s." -msgstr "無法取得 LUN %s 的 LUN 目標詳細資料。" - -#, python-format -msgid "Failed to get LUN target list for the LUN %s" -msgstr "無法取得 LUN %s 的 LUN 目標清單" - -#, python-format -msgid "Failed to get Partition ID for volume %(volume_id)s." -msgstr "無法取得磁區 %(volume_id)s 的分割區 ID。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from Snapshot %(snapshot_id)s." -msgstr "無法從 Snapshot %(snapshot_id)s 取得 Raid Snapshot ID。" - -#, python-format -msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." -msgstr "無法從 Snapshot %(snapshot_id)s 取得 Raid Snapshot ID。" - -msgid "Failed to get SplitMirror." -msgstr "無法取得 SplitMirror。" - -#, python-format -msgid "" -"Failed to get a storage resource. The system will attempt to get the storage " -"resource again. (resource: %(resource)s)" -msgstr "" -"無法取得儲存體資源。系統將嘗試再次取得儲存體資源。(資源:%(resource)s)" - -#, python-format -msgid "Failed to get all associations of qos specs %s" -msgstr "無法取得服務品質規格 %s 的所有關聯" - -msgid "Failed to get channel info." -msgstr "無法取得通道資訊。" - -#, python-format -msgid "Failed to get code level (%s)." -msgstr "無法取得程式碼層次 (%s)。" - -msgid "Failed to get device info." -msgstr "無法取得裝置資訊。" - -#, python-format -msgid "Failed to get domain because CPG (%s) doesn't exist on array." -msgstr "無法取得網域,因為 CPG (%s) 不存在於陣列上。" - -msgid "Failed to get image snapshots." -msgstr "無法取得映像檔 Snapshot。" - -#, python-format -msgid "Failed to get ip on Channel %(channel_id)s with volume: %(volume_id)s." -msgstr "無法在具有磁區 %(volume_id)s 的通道 %(channel_id)s 上取得 IP。" - -msgid "Failed to get iqn info." -msgstr "無法取得 IQN 資訊。" - -msgid "Failed to get license info." -msgstr "無法取得軟體使用權資訊。" - -msgid "Failed to get lv info." -msgstr "無法取得 LV 資訊。" - -msgid "Failed to get map info." -msgstr "無法取得對映資訊。" - -msgid "Failed to get migration task." -msgstr "無法取得移轉作業。" - -msgid "Failed to get model update from clone" -msgstr "無法從複本取得模型更新" - -msgid "Failed to get name server info." -msgstr "無法取得名稱伺服器資訊。" - -msgid "Failed to get network info." -msgstr "無法取得網路資訊。" - -#, python-format -msgid "Failed to get new part id in new pool: %(pool_id)s." -msgstr "無法取得新儲存區 %(pool_id)s 中的新組件 ID。" - -msgid "Failed to get partition info." -msgstr "無法取得分割區資訊。" - -#, python-format -msgid "Failed to get pool id with volume %(volume_id)s." -msgstr "無法取得具有磁區 %(volume_id)s 的儲存區 ID。" - -#, python-format -msgid "Failed to get remote copy information for %(volume)s due to %(err)s." -msgstr "無法取得 %(volume)s 的遠端複製資訊,原因:%(err)s。" - -#, python-format -msgid "" -"Failed to get remote copy information for %(volume)s. Exception: %(err)s." -msgstr "無法取得 %(volume)s 的遠端複製資訊。異常狀況:%(err)s。" - -msgid "Failed to get replica info." -msgstr "無法取得抄本資訊。" - -msgid "Failed to get show fcns database info." -msgstr "無法取得「顯示 fcns」資料庫資訊。" - -#, python-format -msgid "Failed to get size of volume %s" -msgstr "無法取得磁區 %s 的大小" - -#, python-format -msgid "Failed to get snapshot for volume %s." -msgstr "無法取得磁區 %s 的 Snapshot。" - -msgid "Failed to get snapshot info." -msgstr "無法取得 Snapshot 資訊。" - -#, python-format -msgid "Failed to get target IQN for the LUN %s" -msgstr "無法取得 LUN %s 的目標 IQN" - -msgid "Failed to get target LUN of SplitMirror." -msgstr "無法取得 SplitMirror 的目標 LUN。" - -#, python-format -msgid "Failed to get target portal for the LUN %s" -msgstr "無法取得 LUN %s 的目標入口網站" - -msgid "Failed to get targets" -msgstr "無法取得目標" - -msgid "Failed to get wwn info." -msgstr "無法取得 WWN 資訊。" - -#, python-format -msgid "" -"Failed to get, create or add volume %(volumeName)s to masking view " -"%(maskingViewName)s. The error message received was %(errorMessage)s." -msgstr "" -"無法取得、建立磁區 %(volumeName)s,或將其新增至遮罩視圖%(maskingViewName)s。" -"接收到的錯誤訊息為 %(errorMessage)s。" - -msgid "Failed to identify volume backend." -msgstr "無法識別磁區後端。" - -#, python-format -msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." -msgstr "無法鏈結共用 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" - -#, python-format -msgid "Failed to log on %s Array (invalid login?)." -msgstr "無法登入 %s 陣列(無效登入?)。" - -#, python-format -msgid "Failed to login for user %s." -msgstr "無法以使用者 %s 身分登入。" - -msgid "Failed to login with all rest URLs." -msgstr "無法使用所有其餘 URL 進行登入。" - -#, python-format -msgid "" -"Failed to make a request to Datera cluster endpoint due to the following " -"reason: %s" -msgstr "無法對 Datera 叢集端點發出要求,原因如下:%s" - -msgid "Failed to manage api volume flow." -msgstr "無法管理 API 磁區流程。" - -#, python-format -msgid "" -"Failed to manage existing %(type)s %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "無法管理現有 %(type)s %(name)s,因為報告的大小 %(size)s 不是浮點數字。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because of error in getting " -"volume size." -msgstr "無法管理現有磁區 %(name)s,因為取得磁區大小時發生錯誤。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because rename operation failed: " -"Error msg: %(msg)s." -msgstr "無法管理現有磁區 %(name)s,因為重新命名作業失敗:錯誤訊息:%(msg)s。" - -#, python-format -msgid "" -"Failed to manage existing volume %(name)s, because reported size %(size)s " -"was not a floating-point number." -msgstr "無法管理現有磁區 %(name)s,因為所報告的大小 %(size)s不是浮點數字。" - -#, python-format -msgid "" -"Failed to manage existing volume due to I/O group mismatch. The I/O group of " -"the volume to be managed is %(vdisk_iogrp)s. I/O groupof the chosen type is " -"%(opt_iogrp)s." -msgstr "" -"因為 I/O 群組不符,所以無法管理現有磁區。要管理之磁區的 I/O 群組是 " -"%(vdisk_iogrp)s。所選類型的 I/O 群組是 %(opt_iogrp)s。" - -#, python-format -msgid "" -"Failed to manage existing volume due to the pool of the volume to be managed " -"does not match the backend pool. Pool of the volume to be managed is " -"%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." -msgstr "" -"因為要管理之磁區的通訊協定與後端通訊協定不符,所以無法管理現有磁區。要管理之" -"磁區的通訊協定是 %(vdisk_pool)s。後端的通訊協定是 %(backend_pool)s。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is " -"compress, but the volume type chosen is not compress." -msgstr "" -"因為要管理的磁區是壓縮磁區,但所選磁區類型卻是未壓縮磁區,所以無法管理現有磁" -"區。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not " -"compress, but the volume type chosen is compress." -msgstr "" -"因為要管理的磁區是未壓縮磁區,但所選磁區類型卻是壓縮磁區,所以無法管理現有磁" -"區。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is not in a " -"valid I/O group." -msgstr "因為要管理的磁區不在有效的 I/O 群組中,所以無法管理現有磁區。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thick, " -"but the volume type chosen is thin." -msgstr "" -"因為要管理的磁區是豐富磁區,但所選磁區類型卻是精簡磁區,所以無法管理現有磁" -"區。" - -msgid "" -"Failed to manage existing volume due to the volume to be managed is thin, " -"but the volume type chosen is thick." -msgstr "" -"因為要管理的磁區是精簡磁區,但所選磁區類型卻是豐富磁區,所以無法管理現有磁" -"區。" - -#, python-format -msgid "Failed to manage volume %s." -msgstr "無法管理磁區 %s。" - -#, python-format -msgid "" -"Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, port: " -"%(port)s, id: %(id)s)" -msgstr "" -"無法對映邏輯裝置。(LDEV:%(ldev)s,LUN:%(lun)s,埠:%(port)s,ID:%(id)s)" - -msgid "Failed to migrate volume for the first time." -msgstr "第一次移轉磁區失敗。" - -msgid "Failed to migrate volume for the second time." -msgstr "第二次移轉磁區失敗。" - -#, python-format -msgid "Failed to move LUN mapping. Return code: %s" -msgstr "無法移動 LUN 對映。回覆碼:%s" - -#, python-format -msgid "Failed to move volume %s." -msgstr "無法移動磁區 %s。" - -#, python-format -msgid "Failed to open a file. (file: %(file)s, ret: %(ret)s, stderr: %(err)s)" -msgstr "無法開啟檔案。(檔案:%(file)s,ret:%(ret)s,標準錯誤:%(err)s)" - -#, python-format -msgid "" -"Failed to parse CLI output:\n" -" command: %(cmd)s\n" -" stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"無法剖析 CLI 輸出:\n" -" 指令:%(cmd)s\n" -" 標準輸出:%(out)s\n" -"標準錯誤:%(err)s。" - -msgid "" -"Failed to parse the configuration option 'keystone_catalog_info', must be in " -"the form ::" -msgstr "" -"無法剖析配置選項 'keystone_catalog_info',必須採用下列格式::" -":" - -msgid "" -"Failed to parse the configuration option 'swift_catalog_info', must be in " -"the form ::" -msgstr "" -"無法剖析配置選項 'swift_catalog_info',必須採用下列格式::" -":" - -#, python-format -msgid "" -"Failed to perform a zero-page reclamation. (LDEV: %(ldev)s, reason: " -"%(reason)s)" -msgstr "無法執行 0 個頁面收回。(LDEV:%(ldev)s,原因:%(reason)s)" - -#, python-format -msgid "Failed to remove export for volume %(volume)s: %(reason)s" -msgstr "無法移除磁區 %(volume)s 的匯出項目:%(reason)s" - -#, python-format -msgid "Failed to remove iscsi target for volume %(volume_id)s." -msgstr "無法移除磁區 %(volume_id)s 的 iSCSI 目標。" - -#, python-format -msgid "" -"Failed to remove volume %(volumeName)s from consistency group %(cgName)s. " -"Return code: %(rc)lu. Error: %(error)s." -msgstr "" -"無法從一致性群組 %(cgName)s 中移除磁區 %(volumeName)s。回覆碼:%(rc)lu。錯" -"誤:%(error)s。" - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG." -msgstr "無法從預設 SG 中移除磁區 %(volumeName)s。" - -#, python-format -msgid "Failed to remove volume %(volumeName)s from default SG: %(volumeName)s." -msgstr "無法從下列預設 SG 中移除磁區 %(volumeName)s:%(volumeName)s。" - -#, python-format -msgid "" -"Failed to remove: %(volumename)s. from the default storage group for FAST " -"policy %(fastPolicyName)s." -msgstr "" -"無法將 %(volumename)s 從下列 FAST 原則的預設儲存體群組中移除:" -"%(fastPolicyName)s。" - -#, python-format -msgid "" -"Failed to rename logical volume %(name)s, error message was: %(err_msg)s" -msgstr "無法重新命名邏輯磁區 %(name)s,錯誤訊息為:%(err_msg)s" - -#, python-format -msgid "Failed to retrieve active zoning configuration %s" -msgstr "無法擷取作用中的分區配置 %s" - -#, python-format -msgid "" -"Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" -msgstr "無法設定目標 IQN %(iqn)s 的 CHAP 鑑別。詳細資料:%(ex)s" - -#, python-format -msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." -msgstr "無法設定現有磁區 %(name)s 的服務品質,錯誤訊息:%(msg)s。" - -msgid "Failed to set attribute 'Incoming user' for SCST target." -msgstr "無法設定 SCST 目標的「送入使用者」屬性。" - -msgid "Failed to set partition." -msgstr "無法設定分割區。" - -#, python-format -msgid "" -"Failed to set permissions for the consistency group %(cgname)s. Error: " -"%(excmsg)s." -msgstr "無法設定一致性群組 %(cgname)s 的許可權。錯誤:%(excmsg)s。" - -#, python-format -msgid "" -"Failed to specify a logical device for the volume %(volume_id)s to be " -"unmapped." -msgstr "無法指定要取消對映之磁區 %(volume_id)s 的邏輯裝置。" - -#, python-format -msgid "" -"Failed to specify a logical device to be deleted. (method: %(method)s, id: " -"%(id)s)" -msgstr "無法指定要刪除的邏輯裝置。(方法:%(method)s,ID:%(id)s)" - -msgid "Failed to terminate migrate session." -msgstr "無法終止移轉階段作業。" - -#, python-format -msgid "Failed to unbind volume %(volume)s" -msgstr "無法將磁區 %(volume)s 解除連結" - -#, python-format -msgid "" -"Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." -msgstr "無法解除鏈結一致性群組 %(cgname)s 的檔案集。錯誤:%(excmsg)s。" - -#, python-format -msgid "Failed to unmap a logical device. (LDEV: %(ldev)s, reason: %(reason)s)" -msgstr "無法取消對映邏輯裝置。(LDEV:%(ldev)s,原因:%(reason)s)" - -#, python-format -msgid "Failed to update consistency group: %(cgName)s." -msgstr "無法更新一致性群組:%(cgName)s。" - -#, python-format -msgid "Failed to update metadata for volume: %(reason)s" -msgstr "無法更新磁區的 meta 資料:%(reason)s" - -msgid "Failed to update or delete zoning configuration" -msgstr "無法更新或刪除分區配置" - -msgid "Failed to update or delete zoning configuration." -msgstr "無法更新或刪除分區配置。" - -#, python-format -msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." -msgstr "無法使用規格 %(qos_specs)s 來更新 qos_specs:%(specs_id)s。" - -msgid "Failed to update quota usage while retyping volume." -msgstr "對磁區執行 Retype 作業時,無法更新配額用量。" - -msgid "Failed to update snapshot." -msgstr "無法更新 Snapshot。" - -#, python-format -msgid "" -"Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " -"%(src_id)s metadata" -msgstr "" -"使用提供的 %(src_type)s %(src_id)s meta 資料來更新磁區 %(vol_id)s meta 資料時" -"失敗" - -#, python-format -msgid "Failure creating volume %s." -msgstr "建立磁區 %s 時失敗。" - -#, python-format -msgid "Failure getting LUN info for %s." -msgstr "取得 %s 的 LUN 資訊時失敗。" - -#, python-format -msgid "Failure moving new cloned LUN to %s." -msgstr "將新複製的 LUN 移至 %s 時失敗。" - -#, python-format -msgid "Failure staging LUN %s to tmp." -msgstr "將 LUN %s 暫置到 tmp 時失敗。" - -#, python-format -msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." -msgstr "由於 %(reason)s,Fexvisor 無法新增磁區 %(id)s。" - -#, python-format -msgid "" -"Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "Fexvisor 無法結合群組 %(group)s 中的磁區 %(vol)s,原因:%(ret)s。" - -#, python-format -msgid "" -"Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " -"%(ret)s." -msgstr "Fexvisor 無法移除群組 %(group)s 中的磁區 %(vol)s,原因:%(ret)s。" - -#, python-format -msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." -msgstr "Fexvisor 無法移除磁區 %(id)s,原因:%(reason)s。" - -#, python-format -msgid "Fibre Channel SAN Lookup failure: %(reason)s" -msgstr "「光纖通道」SAN 查閱失敗:%(reason)s" - -#, python-format -msgid "Fibre Channel Zone operation failed: %(reason)s" -msgstr "「光纖通道」區域作業失敗:%(reason)s" - -#, python-format -msgid "Fibre Channel connection control failure: %(reason)s" -msgstr "「光纖通道」連線控制失敗:%(reason)s" - -#, python-format -msgid "File %(file_path)s could not be found." -msgstr "找不到檔案 %(file_path)s。" - -#, python-format -msgid "File %(path)s has invalid backing file %(bfile)s, aborting." -msgstr "檔案 %(path)s 具有無效的支援檔案 %(bfile)s,正在中斷。" - -#, python-format -msgid "File already exists at %s." -msgstr "%s 處已存在檔案。" - -#, python-format -msgid "File already exists at: %s" -msgstr "%s 處已存在檔案" - -msgid "Find host in hostgroup error." -msgstr "在主機群組中尋找主機時發生錯誤。" - -msgid "Find host lun id error." -msgstr "尋找主機 LUN ID 時發生錯誤。" - -msgid "Find lun group from mapping view error." -msgstr "從對映視圖中尋找 LUN 群組時發生錯誤。" - -msgid "Find mapping view error." -msgstr "尋找對映視圖時發生錯誤。" - -msgid "Find portgroup error." -msgstr "尋找埠群組時發生錯誤。" - -msgid "Find portgroup from mapping view error." -msgstr "從對映視圖中尋找埠群組時發生錯誤。" - -#, python-format -msgid "" -"Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " -"'%(version)s' is installed." -msgstr "" -"「快閃記憶體快取原則」需要 WSAPI '%(fcache_version)s' 版,已安裝 " -"'%(version)s' 版。" - -#, python-format -msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." -msgstr "Flexvisor 指派磁區失敗:%(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor assign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 指派磁區失敗:%(id)s:%(status)s。" - -#, python-format -msgid "" -"Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " -"snapshot %(vgsid)s." -msgstr "" -"Flexvisor 在群組 %(vgid)s Snapshot %(vgsid)s 中找不到磁區 %(id)s Snapshot。" - -#, python-format -msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." -msgstr "Flexvisor 建立磁區失敗:%(volumeid)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed deleting volume %(id)s: %(status)s." -msgstr "Flexvisor 無法刪除磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." -msgstr "Flexvisor 無法將磁區 %(id)s 新增至群組 %(cgid)s。" - -#, python-format -msgid "" -"Flexvisor failed to assign volume %(id)s due to unable to query status by " -"event id." -msgstr "Flexvisor 無法指派磁區 %(id)s,原因是無法依事件 ID 來查詢狀態。" - -#, python-format -msgid "Flexvisor failed to assign volume %(id)s: %(status)s." -msgstr "Flexvisor 無法指派磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." -msgstr "Flexvisor 無法指派磁區 %(volume)s iqn %(iqn)s。" - -#, python-format -msgid "Flexvisor failed to clone volume %(id)s: %(status)s." -msgstr "Flexvisor 無法複製磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." -msgstr "Flexvisor 無法複製磁區(無法取得事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." -msgstr "Flexvisor 無法建立磁區 %(id)s 的 Snapshot:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." -msgstr "Flexvisor 無法建立下列磁區的 Snapshot(無法取得事件):%(id)s。" - -#, python-format -msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." -msgstr "Flexvisor 無法在群組 %(vgid)s 中建立磁區 %(id)s。" - -#, python-format -msgid "Flexvisor failed to create volume %(volume)s: %(status)s." -msgstr "Flexvisor 無法建立磁區 %(volume)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume (get event) %s." -msgstr "Flexvisor 無法建立磁區(取得事件)%s。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." -msgstr "Flexvisor 無法從 Snapshot %(id)s 建立磁區:%(status)s。" - -#, python-format -msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor 無法從 Snapshot %(id)s 建立磁區:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 無法從下列 Snapshot 建立磁區(無法取得事件):%(id)s。" - -#, python-format -msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." -msgstr "Flexvisor 無法刪除 Snapshot %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 無法刪除 Snapshot(無法取得事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to delete volume %(id)s: %(status)s." -msgstr "Flexvisor 無法刪除磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s: %(status)s." -msgstr "Flexvisor 無法延伸磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume %(id)s:%(status)s." -msgstr "Flexvisor 無法延伸磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." -msgstr "Flexvisor 無法延伸磁區(無法取得事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to get pool info %(id)s: %(status)s." -msgstr "Flexvisor 無法取得儲存區資訊 %(id)s:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." -msgstr "Flexvisor 無法從群組 %(vgid)s 取得磁區 %(id)s 的 Snapshot ID。" - -#, python-format -msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." -msgstr "Flexvisor 無法從群組 %(cgid)s 中移除磁區 %(id)s。" - -#, python-format -msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." -msgstr "Flexvisor 無法從 Snapshot %(id)s 大量產生磁區:%(status)s。" - -#, python-format -msgid "" -"Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." -msgstr "Flexvisor 無法從下列 Snapshot 大量產生磁區(無法取得事件):%(id)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." -msgstr "Flexvisor 無法取消指派磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume (get event) %(id)s." -msgstr "Flexvisor 無法取消指派磁區(取得事件)%(id)s。" - -#, python-format -msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." -msgstr "Flexvisor 無法取消指派磁區 %(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor unable to find the source volume %(id)s info." -msgstr "Flexvisor 找不到來源磁區 %(id)s 資訊。" - -#, python-format -msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." -msgstr "Flexvisor 取消指派磁區失敗:%(id)s:%(status)s。" - -#, python-format -msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." -msgstr "Flexvisor 磁區 %(id)s 無法加入群組 %(vgid)s 中。" - -#, python-format -msgid "Folder %s does not exist in Nexenta Store appliance" -msgstr "資料夾 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" - -#, python-format -msgid "GPFS is not running, state: %s." -msgstr "GPFS 不在執行中,狀態:%s。" - -msgid "Gateway VIP is not set" -msgstr "未設定閘道 VIP" - -msgid "Get FC ports by port group error." -msgstr "依埠群組取得 FC 埠時發生錯誤。" - -msgid "Get FC ports from array error." -msgstr "從陣列中取得 FC 埠時發生錯誤。" - -msgid "Get FC target wwpn error." -msgstr "取得 FC 目標 WWPN 時發生錯誤。" - -msgid "Get HyperMetroPair error." -msgstr "取得 HyperMetroPair 時發生錯誤。" - -msgid "Get LUN group by view error." -msgstr "依視圖取得 LUN 群組時發生錯誤。" - -msgid "Get LUNcopy information error." -msgstr "取得 LUNcopy 資訊時發生錯誤。" - -msgid "Get QoS id by lun id error." -msgstr "依 LUN ID 取得服務品質 ID 時發生錯誤。" - -msgid "Get QoS information error." -msgstr "取得服務品質資訊時發生錯誤。" - -msgid "Get QoS policy error." -msgstr "取得服務品質原則時發生錯誤。" - -msgid "Get SplitMirror error." -msgstr "取得 SplitMirror 時發生錯誤。" - -msgid "Get active client failed." -msgstr "取得作用中的用戶端失敗。" - -msgid "Get array info error." -msgstr "取得陣列資訊時發生錯誤。" - -msgid "Get cache by name error." -msgstr "依名稱取得快取時發生錯誤。" - -msgid "Get connected free FC wwn error." -msgstr "取得已連接的可用 FC WWN 時發生錯誤。" - -msgid "Get engines error." -msgstr "取得引擎時發生錯誤。" - -msgid "Get host initiators info failed." -msgstr "取得主機起始器資訊時失敗。" - -msgid "Get hostgroup information error." -msgstr "取得主機群組資訊時發生錯誤。" - -msgid "" -"Get iSCSI port info error, please check the target IP configured in huawei " -"conf file." -msgstr "取得 iSCSI 埠資訊時發生錯誤,請檢查 huawei 配置檔中配置的目標 IP。" - -msgid "Get iSCSI port information error." -msgstr "取得 iSCSI 埠資訊時發生錯誤。" - -msgid "Get iSCSI target port error." -msgstr "取得 iSCSI 目標埠時發生錯誤。" - -msgid "Get lun id by name error." -msgstr "依名稱取得 LUN ID 時發生錯誤。" - -msgid "Get lun migration task error." -msgstr "取得 LUN 移轉作業時發生錯誤。" - -msgid "Get lungroup id by lun id error." -msgstr "依 LUN ID 取得 LUN 群組 ID 時發生錯誤。" - -msgid "Get lungroup information error." -msgstr "取得 LUN 群組資訊時發生錯誤。" - -msgid "Get migration task error." -msgstr "取得移轉作業時發生錯誤。" - -msgid "Get pair failed." -msgstr "取得配對失敗。" - -msgid "Get partition by name error." -msgstr "依名稱取得分割區時發生錯誤。" - -msgid "Get partition by partition id error." -msgstr "依分割區 ID 取得分割區時發生錯誤。" - -msgid "Get port group by view error." -msgstr "依視圖取得埠群組時發生錯誤。" - -msgid "Get port group error." -msgstr "取得埠群組時發生錯誤。" - -msgid "Get port groups by port error." -msgstr "依埠取得埠群組時發生錯誤。" - -msgid "Get ports by port group error." -msgstr "依埠群組取得埠時發生錯誤。" - -msgid "Get remote device info failed." -msgstr "取得遠端裝置資訊失敗。" - -msgid "Get remote devices error." -msgstr "取得遠端裝置時發生錯誤。" - -msgid "Get smartcache by cache id error." -msgstr "依快取 ID 取得 smartcache 時發生錯誤。" - -msgid "Get snapshot error." -msgstr "取得 Snapshot 時發生錯誤。" - -msgid "Get snapshot id error." -msgstr "取得 Snapshot ID 時發生錯誤。" - -msgid "Get target IP error." -msgstr "取得目標 IP 時發生錯誤。" - -msgid "Get target LUN of SplitMirror error." -msgstr "取得 SplitMirror 的目標 LUN 時發生錯誤。" - -msgid "Get views by port group error." -msgstr "依埠群組取得視圖時發生錯誤。" - -msgid "Get volume by name error." -msgstr "依名稱取得磁區時發生錯誤。" - -msgid "Get volume error." -msgstr "取得磁區時發生錯誤。" - -#, python-format -msgid "" -"Glance metadata cannot be updated, key %(key)s exists for volume id " -"%(volume_id)s" -msgstr "無法更新 Glance meta 資料,磁區 ID %(volume_id)s 已存在索引鍵 %(key)s" - -#, python-format -msgid "Glance metadata for volume/snapshot %(id)s cannot be found." -msgstr "找不到磁區/Snapshot %(id)s 的 Glance meta 資料。" - -#, python-format -msgid "Gluster config file at %(config)s doesn't exist" -msgstr "%(config)s 處不存在 Gluster 配置檔" - -#, python-format -msgid "Google Cloud Storage api failure: %(reason)s" -msgstr "Google Cloud Storage API 失敗:%(reason)s" - -#, python-format -msgid "Google Cloud Storage connection failure: %(reason)s" -msgstr "Google Cloud Storage 連線失敗:%(reason)s" - -#, python-format -msgid "Google Cloud Storage oauth2 failure: %(reason)s" -msgstr "Google Cloud Storage oauth2 失敗:%(reason)s" - -#, python-format -msgid "Got bad path information from DRBDmanage! (%s)" -msgstr "從 DRBDmanage 取得不正確的路徑資訊!(%s)" - -msgid "HBSD error occurs." -msgstr "發生 HBSD 錯誤。" - -msgid "HPELeftHand url not found" -msgstr "找不到 HPELeftHand URL" - -#, python-format -msgid "" -"Hash block size has changed since the last backup. New hash block size: " -"%(new)s. Old hash block size: %(old)s. Do a full backup." -msgstr "" -"雜湊區塊大小自前次備份以來已變更。新的雜湊區塊大小:%(new)s。舊的雜湊區塊大" -"小:%(old)s。請執行完整備份。" - -#, python-format -msgid "Have not created %(tier_levels)s tier(s)." -msgstr "尚未建立 %(tier_levels)s 層級。" - -#, python-format -msgid "Hint \"%s\" not supported." -msgstr "不支援提示 \"%s\"。" - -msgid "Host" -msgstr "主機" - -#, python-format -msgid "Host %(host)s could not be found." -msgstr "找不到主機 %(host)s。" - -#, python-format -msgid "" -"Host %(host)s does not match x509 certificate contents: CommonName " -"%(commonName)s." -msgstr "主機 %(host)s 不符合 x509 憑證內容:CommonName %(commonName)s。" - -#, python-format -msgid "Host %s has no FC initiators" -msgstr "主機 %s 沒有 FC 起始器" - -#, python-format -msgid "Host group with name %s not found" -msgstr "找不到名稱為 %s 的主機群組" - -#, python-format -msgid "Host group with ref %s not found" -msgstr "找不到參照為 %s 的主機群組" - -msgid "Host is NOT Frozen." -msgstr "主機未處於「凍結」狀態。" - -msgid "Host is already Frozen." -msgstr "主機已經處於「凍結」狀態。" - -#, python-format -msgid "Host not found. Failed to remove %(service)s on %(host)s." -msgstr "找不到主機。無法移除 %(host)s 上的 %(service)s。" - -#, python-format -msgid "Host replication_status must be %s to failover." -msgstr "主機 replication_status 必須是 %s 才能失效接手。" - -#, python-format -msgid "Host type %s not supported." -msgstr "不支援主機類型 %s。" - -#, python-format -msgid "Host with ports %(ports)s not found." -msgstr "找不到具有埠 %(ports)s 的主機。" - -msgid "Hypermetro and Replication can not be used in the same volume_type." -msgstr "不能在同一 volume_type 中使用 Hypermetro 和抄寫。" - -#, python-format -msgid "I/O group %(iogrp)d is not valid; available I/O groups are %(avail)s." -msgstr "I/O 群組 %(iogrp)d 無效;可用的 I/O 群組數目是 %(avail)s。" - -msgid "ID" -msgstr "識別號" - -msgid "" -"If compression is set to True, rsize must also be set (not equal to -1)." -msgstr "如果壓縮設為 True,則也必須設定調整大小(不等於 -1)。" - -msgid "If nofmtdisk is set to True, rsize must also be set to -1." -msgstr "如果 nofmtdisk 設為 True,則 rsize 也必須設為 -1。" - -#, python-format -msgid "" -"Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " -"valid value(s) are %(enabled)s." -msgstr "" -"為 flashsystem_connection_protocol 指定的值 '%(prot)s' 不正確:有效值為 " -"%(enabled)s。" - -msgid "Illegal value specified for IOTYPE: 0, 1, or 2." -msgstr "為 IOTYPE 指定的值無效:0、1 或 2。" - -msgid "Illegal value specified for smarttier: set to either 0, 1, 2, or 3." -msgstr "指定給 smarttier 的值不正確:設定為 0、1、2 或 3。" - -msgid "" -"Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " -"64, 128, or 256." -msgstr "" -"指定給 storwize_svc_vol_grainsize 的值不正確:應設為 32、64、128 或 256。" - -msgid "" -"Illegal value specified for thin: Can not set thin and thick at the same " -"time." -msgstr "指定給 thin 的值不正確:無法同時設定thin 和 thick。" - -#, python-format -msgid "Image %(image_id)s could not be found." -msgstr "找不到映像檔 %(image_id)s。" - -#, python-format -msgid "Image %(image_id)s is not active." -msgstr "映像檔 %(image_id)s 不在作用中。" - -#, python-format -msgid "Image %(image_id)s is unacceptable: %(reason)s" -msgstr "無法接受映像檔 %(image_id)s:%(reason)s" - -msgid "Image location not present." -msgstr "映像檔位置不存在。" - -#, python-format -msgid "" -"Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "" -"映像檔虛擬大小為 %(image_size)d GB,不適合大小為%(volume_size)d GB 的磁區。" - -msgid "" -"ImageBusy error raised while deleting rbd volume. This may have been caused " -"by a connection from a client that has crashed and, if so, may be resolved " -"by retrying the delete after 30 seconds has elapsed." -msgstr "" -"刪除 rbd 磁區時發出 ImageBusy 錯誤。造成此問題的原因可能是從已損毀的用戶端進" -"行連線,如果是這樣,則可以在30 秒後,透過重試刪除來解決。" - -#, python-format -msgid "" -"Import record failed, cannot find backup service to perform the import. " -"Request service %(service)s" -msgstr "匯入記錄時失敗,找不到備份服務來執行匯入。要求服務 %(service)s" - -msgid "Incorrect request body format" -msgstr "要求內文的格式不正確" - -msgid "Incorrect request body format." -msgstr "要求內文的格式不正確。" - -msgid "Incremental backups exist for this backup." -msgstr "此備份的增量備份已存在。" - -#, python-format -msgid "" -"Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " -"(Output: %(out)s)" -msgstr "" -"Infortrend CLI 異常狀況:%(err)s,參數:%(param)s(回覆碼:%(rc)s)(輸出:" -"%(out)s)" - -msgid "Input volumes or snapshots are invalid." -msgstr "輸入磁區或 Snapshot 無效。" - -msgid "Input volumes or source volumes are invalid." -msgstr "輸入磁區或來源磁區無效。" - -#, python-format -msgid "Instance %(uuid)s could not be found." -msgstr "找不到實例 %(uuid)s。" - -msgid "Insufficient free space available to extend volume." -msgstr "可用空間不足,無法延伸磁區。" - -msgid "Insufficient privileges" -msgstr "專用權不足" - -#, python-format -msgid "Invalid 3PAR Domain: %(err)s" -msgstr "無效的 3PAR 網域:%(err)s" - -msgid "Invalid ALUA value. ALUA value must be 1 or 0." -msgstr "無效的 ALUA 值。ALUA 值必須是 1 或 0。" - -msgid "Invalid Ceph args provided for backup rbd operation" -msgstr "提供給備份 rbd 作業的 Ceph 引數無效" - -#, python-format -msgid "Invalid CgSnapshot: %(reason)s" -msgstr "無效的 CgSnapshot:%(reason)s" - -#, python-format -msgid "Invalid ConsistencyGroup: %(reason)s" -msgstr "無效的 ConsistencyGroup:%(reason)s" - -msgid "Invalid ConsistencyGroup: No host to create consistency group" -msgstr "無效的一致性群組:沒有用來建立一致性群組的主機" - -#, python-format -msgid "" -"Invalid HPELeftHand API version found: %(found)s. Version %(minimum)s or " -"greater required for manage/unmanage support." -msgstr "" -"找到的 HPELeftHand API 版本 %(found)s 無效。管理/取消管理支援需要 " -"%(minimum)s 版或更高版本。" - -#, python-format -msgid "Invalid IP address format: '%s'" -msgstr "無效的 IP 位址格式:'%s'" - -#, python-format -msgid "" -"Invalid QoS specification detected while getting QoS policy for volume %s" -msgstr "取得磁區 %s 的服務品質原則時,偵測到無效的服務品質規格" - -#, python-format -msgid "Invalid Replication Target: %(reason)s" -msgstr "無效的抄寫目標:%(reason)s" - -#, python-format -msgid "" -"Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," -"MDS2],...:/][:PASSWORD]." -msgstr "" -"Virtuozzo 儲存體共用項目規格無效:%r。必須是:[MDS1[,MDS2],...:/][:PASSWORD]。" - -#, python-format -msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" -msgstr "XtremIO %(cur)s 版無效,需要 %(min)s 版或更高版本" - -#, python-format -msgid "Invalid allocated quotas defined for the following project quotas: %s" -msgstr "為下列專案配額定義的已配置配額無效:%s" - -msgid "Invalid argument" -msgstr "無效的引數" - -msgid "Invalid argument - negative seek offset." -msgstr "無效的引數 - 負數探查偏移。" - -#, python-format -msgid "Invalid argument - whence=%s not supported" -msgstr "無效的引數 - whence = %s 不受支援" - -#, python-format -msgid "Invalid argument - whence=%s not supported." -msgstr "無效的引數 - 不支援 whence=%s。" - -#, python-format -msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." -msgstr "連接模式 '%(mode)s' 不適用於磁區 %(volume_id)s。" - -#, python-format -msgid "Invalid auth key: %(reason)s" -msgstr "無效的鑑別金鑰:%(reason)s" - -#, python-format -msgid "Invalid backup: %(reason)s" -msgstr "無效的備份:%(reason)s" - -msgid "Invalid chap user details found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找到無效的 CHAP 使用者詳細資料。" - -#, python-format -msgid "Invalid connection initialization response of volume %(name)s" -msgstr "磁區 %(name)s 的連線起始設定回應無效" - -#, python-format -msgid "" -"Invalid connection initialization response of volume %(name)s: %(output)s" -msgstr "磁區 %(name)s 的連線起始設定回應無效:%(output)s" - -#, python-format -msgid "Invalid content type %(content_type)s." -msgstr "無效的內容類型 %(content_type)s。" - -msgid "Invalid credentials" -msgstr "認證無效" - -#, python-format -msgid "Invalid directory: %s" -msgstr "無效的目錄:%s" - -#, python-format -msgid "Invalid disk adapter type: %(invalid_type)s." -msgstr "無效的磁碟配接卡類型:%(invalid_type)s。" - -#, python-format -msgid "Invalid disk backing: %s." -msgstr "無效的磁碟備用項目:%s。" - -#, python-format -msgid "Invalid disk type: %(disk_type)s." -msgstr "無效的磁碟類型:%(disk_type)s。" - -#, python-format -msgid "Invalid disk type: %s." -msgstr "無效的磁碟類型:%s。" - -#, python-format -msgid "Invalid host: %(reason)s" -msgstr "無效的主機:%(reason)s" - -#, python-format -msgid "" -"Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " -"the hpe3parclient." -msgstr "" -"找到的 hpe3parclient 版本 (%(found)s) 無效。需要 %(minimum)s 版或更高版本。請" -"執行 \"pip install --upgrade python-3parclient\" 來升級 hpe3parclient。" - -#, python-format -msgid "" -"Invalid hpelefthandclient version found (%(found)s). Version %(minimum)s or " -"greater required. Run 'pip install --upgrade python-lefthandclient' to " -"upgrade the hpelefthandclient." -msgstr "" -"找到的 hpelefthandclient 版本 (%(found)s) 無效。需要 %(minimum)s 版或更高版" -"本。請執行 'pip install --upgrade python-lefthandclient' 來升級 " -"hpelefthandclient。" - -#, python-format -msgid "Invalid image href %(image_href)s." -msgstr "無效的映像檔 href %(image_href)s。" - -msgid "Invalid image identifier or unable to access requested image." -msgstr "映像檔 ID 無效,或無法存取所要求的映像檔。" - -msgid "Invalid imageRef provided." -msgstr "提供的 imageRef 無效。" - -msgid "Invalid input" -msgstr "無效的輸入" - -#, python-format -msgid "Invalid input received: %(reason)s" -msgstr "收到的輸入無效:%(reason)s" - -#, python-format -msgid "Invalid is_public filter [%s]" -msgstr "無效的 is_public 過濾器 [%s]" - -#, python-format -msgid "Invalid lun type %s is configured." -msgstr "所配置的 LUN 類型 %s 無效。" - -#, python-format -msgid "Invalid metadata size: %(reason)s" -msgstr "無效的 meta 資料大小:%(reason)s" - -#, python-format -msgid "Invalid metadata: %(reason)s" -msgstr "無效的 meta 資料:%(reason)s" - -#, python-format -msgid "Invalid mount point base: %s" -msgstr "無效的裝載點基本程式:%s" - -#, python-format -msgid "Invalid mount point base: %s." -msgstr "無效的裝載點基本程式:%s。" - -#, python-format -msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." -msgstr "新 snapCPG 名稱無效,無法執行 Retype 動作。new_snap_cpg='%s'。" - -#, python-format -msgid "Invalid port number %(config)s for Coho rpc port" -msgstr "Coho RPC 埠的埠號 %(config)s 無效" - -#, python-format -msgid "" -"Invalid prefetch type '%s' is configured. PrefetchType must be in 0,1,2,3." -msgstr "所配置的預先提取類型 %s 無效。PrefetchType 必須位於 0、1、2 和 3 中。" - -#, python-format -msgid "Invalid qos specs: %(reason)s" -msgstr "無效的服務品質規格:%(reason)s" - -msgid "Invalid request to attach volume to an invalid target" -msgstr "將磁區連接至無效目標的要求無效" - -msgid "" -"Invalid request to attach volume with an invalid mode. Attaching mode should " -"be 'rw' or 'ro'" -msgstr "以無效模式來連接磁區的要求無效。連接模式應該是 'rw' 或 'ro'" - -#, python-format -msgid "Invalid reservation expiration %(expire)s." -msgstr "無效的預約有效期限 %(expire)s。" - -msgid "Invalid response header from RPC server" -msgstr "來自 RPC 伺服器的回應標頭無效" - -#, python-format -msgid "Invalid secondary id %s." -msgstr "次要 ID %s 無效。" - -msgid "Invalid service catalog json." -msgstr "無效的服務型錄 JSON。" - -msgid "Invalid sheepdog cluster status." -msgstr "sheepdog 叢集狀態無效。" - -#, python-format -msgid "Invalid snapshot: %(reason)s" -msgstr "無效的 Snapshot:%(reason)s" - -#, python-format -msgid "Invalid status: '%s'" -msgstr "無效的狀態:'%s'" - -#, python-format -msgid "Invalid storage pool %s requested. Retype failed." -msgstr "所要求的儲存區 %s 無效。執行 Retype 動作失敗。" - -#, python-format -msgid "Invalid storage pool %s specificed." -msgstr "所指定的儲存區 %s 無效。" - -msgid "Invalid storage pool is configured." -msgstr "所配置的儲存區無效。" - -msgid "Invalid transport type." -msgstr "傳輸類型無效。" - -#, python-format -msgid "Invalid update setting: '%s'" -msgstr "無效的更新設定:'%s'" - -#, python-format -msgid "Invalid value '%s' for force." -msgstr "force 的值 '%s' 無效。" - -#, python-format -msgid "Invalid value '%s' for force. " -msgstr "force 的值 '%s' 無效。" - -#, python-format -msgid "Invalid value '%s' for is_public. Accepted values: True or False." -msgstr "is_public 的值 '%s' 無效。接受值:True 或 False。" - -#, python-format -msgid "Invalid value '%s' for skip_validation." -msgstr "skip_validation 的值 '%s' 無效。" - -#, python-format -msgid "Invalid value for 'bootable': '%s'" -msgstr "'bootable' 的值無效:'%s'" - -#, python-format -msgid "Invalid value for 'force': '%s'" -msgstr "'force' 的值 '%s' 無效" - -#, python-format -msgid "Invalid value for 'readonly': '%s'" -msgstr "'readonly' 的值無效:'%s'" - -msgid "Invalid value for 'scheduler_max_attempts', must be >=1" -msgstr "'scheduler_max_attempts' 的值無效,必須 >= 1" - -msgid "Invalid value for NetApp configuration option netapp_host_type." -msgstr "NetApp 配置選項 netapp_host_type 的值無效。" - -msgid "Invalid value for NetApp configuration option netapp_lun_ostype." -msgstr "NetApp 配置選項 netapp_lun_ostype 的值無效。" - -#, python-format -msgid "Invalid value for age, %(age)s" -msgstr "經歷時間的值 %(age)s 無效" - -#, python-format -msgid "Invalid value: \"%s\"" -msgstr "無效的值:\"%s\"" - -#, python-format -msgid "" -"Invalid volume size provided for create request: %s (size argument must be " -"an integer (or string representation of an integer) and greater than zero)." -msgstr "" -"提供給建立要求的磁區大小無效:%s(大小引數必須為整數或整數的字串表示法且大於" -"零)。" - -#, python-format -msgid "Invalid volume type: %(reason)s" -msgstr "無效的磁區類型:%(reason)s" - -#, python-format -msgid "Invalid volume: %(reason)s" -msgstr "無效的磁區:%(reason)s" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume is in an invalid state: %(status)s. Valid states " -"are: ('available', 'in-use')." -msgstr "" -"無效的磁區:無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區" -"處於無效的狀態:%(status)s。有效的狀態為:(「可用」、「使用中」)。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume %(volume_id)s to consistency group " -"%(group_id)s because volume type %(volume_type)s is not supported by the " -"group." -msgstr "" -"無效的磁區:無法將磁區 %(volume_id)s 新增至一致性群組 %(group_id)s,因為磁區" -"類型 %(volume_type)s 不受 該群組支援。" - -#, python-format -msgid "" -"Invalid volume: Cannot add volume fake-volume-uuid to consistency group " -"%(group_id)s because volume cannot be found." -msgstr "" -"無效的磁區:無法將磁區 fake-volume-uuid 新增至一致性群組 %(group_id)s,因為找" -"不到該磁區。" - -#, python-format -msgid "" -"Invalid volume: Cannot remove volume fake-volume-uuid from consistency group " -"%(group_id)s because it is not in the group." -msgstr "" -"無效的磁區:無法將磁區 fake-volume-uuid 從一致性群組 %(group_id)s 中移除,因" -"為該磁區不在此群組中。" - -#, python-format -msgid "Invalid volume_type passed: %s." -msgstr "傳遞的 volume_type 無效:%s。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; either " -"match source volume, or omit type argument)." -msgstr "" -"提供的 volume_type 無效:%s(所要求的類型不相容;符合來源磁區,或省略 type 引" -"數)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type is not compatible; " -"recommend omitting the type argument)." -msgstr "" -"提供的 volume_type 無效:%s(所要求的類型不相容;建議省略該類型引數)。" - -#, python-format -msgid "" -"Invalid volume_type provided: %s (requested type must be supported by this " -"consistency group)." -msgstr "提供的 volume_type 無效:%s(所要求的類型必須受此一致性群組支援)。" - -#, python-format -msgid "Invalid wwpns format %(wwpns)s" -msgstr "無效的 WWPN 格式 %(wwpns)s" - -msgid "Invoking web service failed." -msgstr "呼叫 Web 服務失敗。" - -msgid "Issue encountered waiting for job." -msgstr "等待工作時遇到問題。" - -msgid "Issue encountered waiting for synchronization." -msgstr "等待同步時遇到問題。" - -msgid "" -"Issuing a fail-over failed because replication is not properly configured." -msgstr "發出失效接手失敗,因為未正確配置抄寫。" - -#, python-format -msgid "Job id not found in CloudByte's create volume [%s] response." -msgstr "在 CloudByte 的建立磁區 [%s] 回應中找不到工作 ID。" - -#, python-format -msgid "Job id not found in CloudByte's delete volume [%s] response." -msgstr "在 CloudByte 的刪除磁區 [%s] 回應中找不到工作 ID。" - -msgid "" -"Key names can only contain alphanumeric characters, underscores, periods, " -"colons and hyphens." -msgstr "索引鍵名稱只能包含英數字元、底線、句點、冒號及連字號。" - -#, python-format -msgid "KeyError: %s" -msgstr "KeyError:%s" - -msgid "Keystone version 3 or greater must be used to get nested quota support." -msgstr "Keystone 第 3 版或更高版本必須用於取得巢狀配額支援。" - -#, python-format -msgid "LU does not exist for volume: %s" -msgstr "磁區不存在 LU:%s" - -msgid "LUN export failed!" -msgstr "LUN 匯出失敗!" - -msgid "LUN map overflow on every channel." -msgstr "在每個通道上,LUN 對映溢位。" - -#, python-format -msgid "LUN not found with given ref %s." -msgstr "找不到具有給定參照 %s 的 LUN。" - -#, python-format -msgid "LUN number is out of bound on channel id: %(ch_id)s." -msgstr "LUN 號碼已超出通道 ID %(ch_id)s 的範圍。" - -#, python-format -msgid "Last %s cinder syslog entries:-" -msgstr "最後 %s 個 Cinder Syslog 項目:-" - -msgid "LeftHand cluster not found" -msgstr "找不到 LeftHand 叢集" - -msgid "License is unavailable." -msgstr "無法使用授權。" - -#, python-format -msgid "Line %(dis)d : %(line)s" -msgstr "第 %(dis)d 行:%(line)s" - -msgid "Link path already exists and its not a symlink" -msgstr "鏈結路徑已經存在,並且該鏈結路徑不是符號鏈結" - -#, python-format -msgid "Linked clone of source volume not supported in state: %s." -msgstr "狀態 %s 不支援來源磁區的鏈結複本。" - -msgid "Lock acquisition failed." -msgstr "鎖定獲得失敗。" - -msgid "Logout session error." -msgstr "登出階段作業錯誤。" - -msgid "" -"Lookup service not configured. Config option for fc_san_lookup_service needs " -"to specify a concrete implementation of the lookup service." -msgstr "" -"未配置查閱服務。fc_san_lookup_service 的配置選項需要指定查閱服務的具體實作。" - -msgid "Lun migration error." -msgstr "Lun 移轉錯誤。" - -#, python-format -msgid "" -"MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " -"same." -msgstr "之前物件 %(object_name)s 的 MD5 %(md5)s 與之後的 %(etag)s 不同。" - -#, python-format -msgid "MSG_DENIED: %r" -msgstr "MSG_DENIED:%r" - -#, python-format -msgid "MSG_DENIED: AUTH_ERROR: %r" -msgstr "MSG_DENIED:AUTH_ERROR:%r" - -#, python-format -msgid "MSG_DENIED: RPC_MISMATCH: %r" -msgstr "MSG_DENIED:RPC_MISMATCH:%r" - -#, python-format -msgid "Malformed fcns output string: %s" -msgstr "形態異常的 fcns 輸出字串:%s" - -#, python-format -msgid "Malformed message body: %(reason)s" -msgstr "訊息內文的格式不正確:%(reason)s" - -#, python-format -msgid "Malformed nameserver string: %s" -msgstr "格式不正確的名稱伺服器字串:%s" - -msgid "Malformed request body" -msgstr "要求內文的格式不正確" - -msgid "Malformed request body." -msgstr "要求內文形態異常。" - -msgid "Malformed request url" -msgstr "要求 URL 的格式不正確" - -#, python-format -msgid "Malformed response to command %(cmd)s: %(reason)s" -msgstr "對指令 %(cmd)s 的回應格式不正確:%(reason)s" - -msgid "Malformed scheduler_hints attribute" -msgstr "scheduler_hints 屬性的格式不正確" - -#, python-format -msgid "Malformed show fcns database string: %s" -msgstr "形態異常的「顯示 fcns」資料庫字串:%s" - -#, python-format -msgid "" -"Malformed zone configuration: (switch=%(switch)s zone_config=" -"%(zone_config)s)." -msgstr "" -"格式不正確的區域配置:(交換器 = %(switch)szone_config = %(zone_config)s)。" - -#, python-format -msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." -msgstr "" -"形態異常的區域狀態:(交換器 = %(switch)s,zone_config = %(zone_config)s)。" - -msgid "Manage existing get size requires 'id'." -msgstr "管理現有取得大小需要 'id'。" - -msgid "Manage existing snapshot not implemented." -msgstr "未實作管理現有 Snapshot。" - -#, python-format -msgid "" -"Manage existing volume failed due to invalid backend reference " -"%(existing_ref)s: %(reason)s" -msgstr "管理現有磁區時失敗,因為後端參照%(existing_ref)s 無效:%(reason)s" - -#, python-format -msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" -msgstr "管理現有磁區時失敗,因為磁區類型不符:%(reason)s" - -msgid "Manage existing volume not implemented." -msgstr "未實作管理現有磁區。" - -msgid "Manage existing volume requires 'source-id'." -msgstr "管理現有磁區需要 'source-id'。" - -#, python-format -msgid "" -"Manage volume is not supported if FAST is enable. FAST policy: " -"%(fastPolicyName)s." -msgstr "如果啟用了 FAST,則不支援管理磁區。FAST 原則:%(fastPolicyName)s。" - -msgid "Managing of snapshots to failed-over volumes is not allowed." -msgstr "不容許對已失效接手之磁區的 Snapshot 進行管理。" - -msgid "Map info is None due to array version not supporting hypermetro." -msgstr "由於陣列版本不支援 Hypermetro,對映資訊為「無」。" - -#, python-format -msgid "" -"Mapping %(id)s prepare failed to complete within theallotted %(to)d seconds " -"timeout. Terminating." -msgstr "無法在所分配的 %(to)d 秒逾時時間內完成對映 %(id)s準備。終止中。" - -#, python-format -msgid "Masking view %(maskingViewName)s was not deleted successfully" -msgstr "未順利刪除遮罩視圖 %(maskingViewName)s" - -#, python-format -msgid "Maximum number of backups allowed (%(allowed)d) exceeded" -msgstr "已超出容許的備份數目上限 (%(allowed)d)" - -#, python-format -msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" -msgstr "已超出容許的 Snapshot 數目上限 (%(allowed)d)" - -#, python-format -msgid "" -"Maximum number of volumes allowed (%(allowed)d) exceeded for quota " -"'%(name)s'." -msgstr "已超出下列配額容許的磁區數目上限 (%(allowed)d):'%(name)s'。" - -#, python-format -msgid "May specify only one of %s" -msgstr "只能指定 %s 的其中之一" - -msgid "Metadata backup already exists for this volume" -msgstr "此磁區已存在 meta 資料備份" - -#, python-format -msgid "Metadata backup object '%s' already exists" -msgstr "meta 資料備份物件 '%s' 已存在" - -msgid "Metadata property key blank." -msgstr "meta 資料內容索引鍵空白。" - -msgid "Metadata restore failed due to incompatible version" -msgstr "meta 資料還原失敗,因為版本不相容" - -msgid "Metadata restore failed due to incompatible version." -msgstr "由於版本不相容,meta 資料還原失敗。" - -msgid "" -"Missing 'purestorage' python module, ensure the library is installed and " -"available." -msgstr "遺漏 'purestorage' Python 模組,請確保該程式庫已安裝且可用。" - -msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" -msgstr "遺漏了「光纖通道」SAN 配置參數 - fc_fabric_names" - -msgid "Missing request body" -msgstr "遺漏了要求內文" - -msgid "Missing request body." -msgstr "遺漏要求內文。" - -#, python-format -msgid "Missing required element '%s' in request body" -msgstr "要求內文中遺漏了必要元素 '%s'" - -#, python-format -msgid "Missing required element '%s' in request body." -msgstr "要求內文遺漏了必要元素 '%s'。" - -msgid "Missing required element 'consistencygroup' in request body." -msgstr "要求內文中遺漏了必要元素 'consistencygroup'。" - -msgid "Missing required element quota_class_set in request body." -msgstr "要求內文中遺漏了必要元素 quota_class_set。" - -msgid "Missing required element snapshot in request body." -msgstr "要求內文中遺漏了必要元素 Snapshot。" - -msgid "" -"Multiple SerialNumbers found, when only one was expected for this operation. " -"Please change your EMC config file." -msgstr "找到多個 SerialNumber,但這項作業僅預期一個。請變更 EMC 配置檔。" - -#, python-format -msgid "Multiple copies of volume %s found." -msgstr "找到磁區 %s 的多個副本。" - -#, python-format -msgid "Multiple matches found for '%s', use an ID to be more specific." -msgstr "找到 '%s' 的多個相符項,請使用 ID 以更具體地進行尋找。" - -msgid "Multiple profiles found." -msgstr "找到多個設定檔。" - -msgid "Must implement a fallback schedule" -msgstr "必須實作撤回排程" - -msgid "Must implement find_retype_host" -msgstr "必須實作 find_retype_host" - -msgid "Must implement host_passes_filters" -msgstr "必須實作 host_passes_filters" - -msgid "Must implement schedule_create_consistencygroup" -msgstr "必須實作 schedule_create_consistencygroup" - -msgid "Must implement schedule_create_volume" -msgstr "必須實作 schedule_create_volume" - -msgid "Must implement schedule_get_pools" -msgstr "必須實作 schedule_get_pools" - -msgid "Must pass wwpn or host to lsfabric." -msgstr "必須將 WWPN 或主機傳遞給 lsfabric。" - -msgid "" -"Must run this command as cloud admin using a Keystone policy.json which " -"allows cloud admin to list and get any project." -msgstr "" -"必須以雲端管理者身分使用 Keystone policy.json 來執行此指令,此 policy.json 容" -"許雲端管理者列出和取得任何專案。" - -msgid "Must specify 'connector'" -msgstr "必須指定 'connector'" - -msgid "Must specify 'connector'." -msgstr "必須指定 'connector'。" - -msgid "Must specify 'host'." -msgstr "必須指定 'host'。" - -msgid "Must specify 'new_volume'" -msgstr "必須指定 'new_volume'" - -msgid "Must specify 'status'" -msgstr "必須指定 'status'" - -msgid "" -"Must specify 'status', 'attach_status' or 'migration_status' for update." -msgstr "必須指定 'status'、'attach_status' 或 'migration_status' 進行更新。" - -msgid "Must specify a valid attach status" -msgstr "必須指定有效的連接狀態" - -msgid "Must specify a valid migration status" -msgstr "必須指定有效的移轉狀態" - -#, python-format -msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." -msgstr "必須指定有效的 persona %(valid)s,值 '%(persona)s' 無效。" - -#, python-format -msgid "" -"Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " -"invalid." -msgstr "必須指定有效的供應類型 %(valid)s,值 '%(prov)s' 無效。" - -msgid "Must specify a valid status" -msgstr "必須指定有效的狀態" - -msgid "Must specify an ExtensionManager class" -msgstr "必須指定 ExtensionManager 類別" - -msgid "Must specify bootable in request." -msgstr "必須在要求中指定 bootable。" - -msgid "Must specify protection domain name or protection domain id." -msgstr "必須指定保護網域名稱或保護網域 ID。" - -msgid "Must specify readonly in request." -msgstr "必須在要求中指定 readonly。" - -msgid "Must specify snapshot source-name or source-id." -msgstr "必須指定 Snapshot source-name 或 source-id。" - -msgid "Must specify source-name or source-id." -msgstr "必須修改 source-name 或 source-id。" - -msgid "Must specify storage pool name or id." -msgstr "必須指定儲存區名稱或 ID。" - -msgid "Must specify storage pools. Option: sio_storage_pools." -msgstr "必須指定儲存區。選項:sio_storage_pools。" - -msgid "Must supply a positive, non-zero value for age" -msgstr "必須為經歷時間提供非零正數值" - -#, python-format -msgid "" -"NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" -msgstr "NAS 配置 '%(name)s=%(value)s' 無效。必須為 'auto'、'true' 或'false'" - -#, python-format -msgid "NFS config file at %(config)s doesn't exist" -msgstr "%(config)s 處不存在 NFS 配置檔" - -#, python-format -msgid "NFS file %s not discovered." -msgstr "未探索到 NFS 檔 %s。" - -msgid "NFS file could not be discovered." -msgstr "無法探索 NFS 檔案。" - -msgid "NaElement name cannot be null." -msgstr "NaElement 名稱不能是空值。" - -msgid "Name" -msgstr "名稱" - -msgid "" -"Name, description, add_volumes, and remove_volumes can not be all empty in " -"the request body." -msgstr "要求內文中的名稱、說明、add_volumes 和 remove_volumes 不能全部都為空。" - -msgid "Need non-zero volume size" -msgstr "需要非零磁區大小" - -#, python-format -msgid "Neither MSG_DENIED nor MSG_ACCEPTED: %r" -msgstr "MSG_DENIED 或 MSG_ACCEPTED 均不 %r" - -msgid "NetApp Cinder Driver exception." -msgstr "NetApp Cinder 驅動程式異常狀況。" - -#, python-format -msgid "" -"New size for extend must be greater than current size. (current: %(size)s, " -"extended: %(new_size)s)." -msgstr "" -"用於延伸的新大小必須大於現行大小。(現行大小:%(size)s,延伸後大小:" -"%(new_size)s)。" - -#, python-format -msgid "" -"New size should be bigger than the real size from backend storage. realsize: " -"%(oldsize)s, newsize: %(newsize)s." -msgstr "" -"新大小應該大於後端儲存體中的實際大小。實際大小:%(oldsize)s,新大小:" -"%(newsize)s。" - -msgid "New volume size must be specified as an integer." -msgstr "必須將新的磁區大小指定為整數。" - -msgid "New volume type must be specified." -msgstr "必須指定新的磁區類型。" - -msgid "New volume type not specified in request_spec." -msgstr "request_spec 中沒有指定新的磁區類型。" - -msgid "Nimble Cinder Driver exception" -msgstr "Nimble Cinder 驅動程式異常狀況" - -msgid "No FC initiator can be added to host." -msgstr "任何 FC 起始器均無法新增至主機。" - -msgid "No FC port connected to fabric." -msgstr "沒有 FC 埠已連接至光纖。" - -msgid "No FCP targets found" -msgstr "找不到 FCP 目標" - -msgid "No Port Group elements found in config file." -msgstr "在配置檔中找不到「埠群組」元素。" - -msgid "No VF ID is defined in the configuration file." -msgstr "配置檔中未定以 VF ID。" - -msgid "No active iSCSI portals with supplied iSCSI IPs" -msgstr "沒有具備所提供之 iSCSI IP 的作用中 iSCSI 入口網站" - -#, python-format -msgid "No available service named %s" -msgstr "沒有名稱為 %s 的可用服務" - -#, python-format -msgid "No backup with id %s" -msgstr "沒有 ID 為 %s 的備份" - -msgid "No backups available to do an incremental backup." -msgstr "沒有可用的備份來執行增量備份。" - -msgid "No big enough free disk" -msgstr "沒有足夠大的可用磁碟" - -#, python-format -msgid "No cgsnapshot with id %s" -msgstr "沒有 ID 為 %s 的 CgSnapshot" - -msgid "No cinder entries in syslog!" -msgstr "Syslog 中沒有 Cinder 項目!" - -#, python-format -msgid "No cloned LUN named %s found on the filer" -msgstr "在編檔器上找不到名稱為 %s 的複製 LUN" - -msgid "No config node found." -msgstr "找不到任何配置節點。" - -#, python-format -msgid "No consistency group with id %s" -msgstr "沒有 ID 為 %s 的一致性群組" - -#, python-format -msgid "No element by given name %s." -msgstr "依給定的名稱 %s,找不到元素。" - -msgid "No errors in logfiles!" -msgstr "日誌檔中沒有錯誤!" - -#, python-format -msgid "No file found with %s as backing file." -msgstr "找不到含有 %s 的檔案來作為備用檔。" - -#, python-format -msgid "" -"No free LUN IDs left. Maximum number of volumes that can be attached to host " -"(%s) has been exceeded." -msgstr "未剩餘任何可用的 LUN ID。已超出可連接至主機的磁區數目上限(%s)。" - -msgid "No free disk" -msgstr "沒有可用磁碟" - -#, python-format -msgid "No good iscsi portal found in supplied list for %s." -msgstr "在所提供的清單中找不到 %s 的良好 iSCSI 入口網站。" - -#, python-format -msgid "No good iscsi portals found for %s." -msgstr "找不到 %s 的良好 iSCSI 入口網站。" - -#, python-format -msgid "No host to create consistency group %s." -msgstr "沒有用來建立一致性群組 %s 的主機。" - -msgid "No iSCSI-enabled ports on target array." -msgstr "目標陣列上沒有支援 iSCSI 的埠。" - -msgid "No image_name was specified in request." -msgstr "未在要求中指定 image_name。" - -msgid "No initiator connected to fabric." -msgstr "沒有起始器已連接至光纖。" - -#, python-format -msgid "No initiator group found for initiator %s" -msgstr "找不到起始器 %s 的起始器群組" - -msgid "No initiators found, cannot proceed" -msgstr "找不到起始器,無法繼續進行" - -#, python-format -msgid "No interface found on cluster for ip %s" -msgstr "在叢集上找不到 IP %s 的介面" - -msgid "No ip address found." -msgstr "找不到任何 IP 位址。" - -msgid "No iscsi auth groups were found in CloudByte." -msgstr "在 CloudByte 中找不到 iscsi 鑑別群組。" - -msgid "No iscsi initiators were found in CloudByte." -msgstr "在 CloudByte 中找不到 iscsi 起始器。" - -#, python-format -msgid "No iscsi service found for CloudByte volume [%s]." -msgstr "找不到 CloudByte 磁區 [%s] 的 iscsi 服務。" - -msgid "No iscsi services found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到 iscsi 服務。" - -#, python-format -msgid "No key file specified and unable to load key from %(cert)s %(e)s." -msgstr "未指定金鑰檔,且無法從 %(cert)s %(e)s 載入金鑰。" - -msgid "No mounted Gluster shares found" -msgstr "找不到已裝載的 Gluster 共用" - -msgid "No mounted NFS shares found" -msgstr "找不到已裝載的 NFS 共用" - -msgid "No mounted SMBFS shares found." -msgstr "找不到已裝載的 SMBFS 共用項目。" - -msgid "No mounted Virtuozzo Storage shares found" -msgstr "找不到已裝載的 Virtuozzo 儲存體共用項目" - -msgid "No mounted shares found" -msgstr "找不到已裝載的共用項目" - -#, python-format -msgid "No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "在磁區 %(vol)s 的 I/O 群組 %(gid)s 中找不到任何節點。" - -msgid "" -"No pools are available for provisioning volumes. Ensure that the " -"configuration option netapp_pool_name_search_pattern is set correctly." -msgstr "" -"沒有儲存區可用於供應磁區。請確保已正確設定配置選項 " -"netapp_pool_name_search_pattern。" - -msgid "" -"No response was received from CloudByte storage list iSCSI auth user API " -"call." -msgstr "未從 CloudByte 儲存體清單 iSCSI 鑑別使用者 API 呼叫接收到任何回應。" - -msgid "No response was received from CloudByte storage list tsm API call." -msgstr "為了列出 tsm API 呼叫,未從 CloudByte 儲存體中接收到回應。" - -msgid "No response was received from CloudByte's list filesystem api call." -msgstr "未從 CloudByte 的清單檔案系統 API 呼叫中接收到回應。" - -msgid "No service VIP configured and no nexenta_client_address" -msgstr "未配置服務 VIP,且沒有 nexenta_client_address" - -#, python-format -msgid "No snap found with %s as backing file." -msgstr "找不到含有 %s 的 Snapshot 來作為備用檔。" - -#, python-format -msgid "No snapshot image found in snapshot group %s." -msgstr "在 Snapshot 群組 %s 中找不到 Snapshot 映像檔。" - -#, python-format -msgid "No snapshots could be found on volume %s." -msgstr "在磁區 %s 上找不到 Snapshot。" - -#, python-format -msgid "No source snapshots provided to create consistency group %s." -msgstr "沒有提供來源 Snapshot 以建立一致性群組 %s。" - -#, python-format -msgid "No storage path found for export path %s" -msgstr "找不到匯出路徑 %s 的儲存體路徑" - -#, python-format -msgid "No such QoS spec %(specs_id)s." -msgstr "沒有這類服務品質規格 %(specs_id)s。" - -msgid "No suitable discovery ip found" -msgstr "找不到適當的探索 IP" - -#, python-format -msgid "No support to restore backup version %s" -msgstr "不支援還原備份版本 %s" - -#, python-format -msgid "No target id found for volume %(volume_id)s." -msgstr "找不到磁區 %(volume_id)s 的目標 ID。" - -msgid "" -"No unused LUN IDs are available on the host; multiattach is enabled which " -"requires that all LUN IDs to be unique across the entire host group." -msgstr "" -"主機上沒有未用的 LUN ID 可供使用;已啟用多重連接,這需要所有 LUN ID 在整個主" -"機群組中都是唯一的。" - -#, python-format -msgid "No valid host was found. %(reason)s" -msgstr "找不到有效的主機。%(reason)s" - -#, python-format -msgid "No valid hosts for volume %(id)s with type %(type)s" -msgstr "類型為 %(type)s 的磁區 %(id)s 不具有有效主機" - -#, python-format -msgid "No vdisk with the UID specified by ref %s." -msgstr "不存在具有由參照 %s 所指定之 UID 的 vdisk。" - -#, python-format -msgid "No views found for LUN: %s" -msgstr "找不到 LUN 的視圖:%s" - -#, python-format -msgid "" -"No volume on cluster with vserver %(vserver)s and junction path %(junction)s " -msgstr "叢集上沒有含 vserver %(vserver)s 及接合路徑%(junction)s 的磁區" - -msgid "No volume service(s) started successfully, terminating." -msgstr "所有磁區服務均未順利啟動,終止中。" - -msgid "No volume was found at CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到磁區。" - -msgid "No volume_type should be provided when creating test replica." -msgstr "建立測試抄本時,不應提供 volume_type。" - -msgid "No volumes found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到磁區。" - -msgid "No weighed hosts available" -msgstr "沒有加權主機可用" - -#, python-format -msgid "Not a valid string: %s" -msgstr "不是有效的字串:%s" - -msgid "Not a valid value for NaElement." -msgstr "不是 NaElement 的有效值。" - -#, python-format -msgid "Not able to find a suitable datastore for the volume: %s." -msgstr "找不到適合磁區 %s 的資料儲存庫。" - -msgid "Not an rbd snapshot" -msgstr "不是 rbd Snapshot" - -#, python-format -msgid "Not authorized for image %(image_id)s." -msgstr "未獲映像檔 %(image_id)s 的授權。" - -msgid "Not authorized." -msgstr "未被授權" - -#, python-format -msgid "Not enough space on backend (%(backend)s)" -msgstr "後端 (%(backend)s) 上空間不足" - -msgid "Not enough storage space in the ZFS share to perform this operation." -msgstr "ZFS 共用中的儲存體空間不足,無法執行此作業。" - -msgid "Not stored in rbd" -msgstr "未儲存在 rbd 中" - -msgid "Nova returned \"error\" status while creating snapshot." -msgstr "建立 Snapshot 時,Nova 傳回了「錯誤」狀態。" - -msgid "Null response received from CloudByte's list filesystem." -msgstr "從 CloudByte 的清單檔案系統中接收到空值回應。" - -msgid "Null response received from CloudByte's list iscsi auth groups." -msgstr "從 CloudByte 的清單 iscsi 鑑別群組接收到空值回應。" - -msgid "Null response received from CloudByte's list iscsi initiators." -msgstr "從 CloudByte 的清單 iscsi 起始器中接收到空值回應。" - -msgid "Null response received from CloudByte's list volume iscsi service." -msgstr "從 CloudByte 的清單磁區 iscsi 服務中接收到空值回應。" - -#, python-format -msgid "Null response received while creating volume [%s] at CloudByte storage." -msgstr "在 CloudByte 儲存體中建立磁區 [%s] 時接收到空值回應。" - -#, python-format -msgid "Null response received while deleting volume [%s] at CloudByte storage." -msgstr "在 CloudByte 儲存體中刪除磁區 [%s] 時接收到空值回應。" - -#, python-format -msgid "" -"Null response received while querying for [%(operation)s] based job " -"[%(job)s] at CloudByte storage." -msgstr "" -"在 CloudByte 儲存體中查詢 [%(operation)s] 型工作[%(job)s] 時接收到空值回應。" - -msgid "Object Count" -msgstr "物件計數" - -msgid "Object Version" -msgstr "物件版本" - -msgid "Object is not a NetApp LUN." -msgstr "物件不是 NetApp LUN。" - -#, python-format -msgid "" -"On an Extend Operation, error adding volume to composite volume: " -"%(volumename)s." -msgstr "在「延伸」作業上,將磁區新增至下列複合磁區時發生錯誤:%(volumename)s。" - -msgid "One of the required inputs from host, port or scheme was not found." -msgstr "找不到來自主機的其中一個必需輸入、埠或綱目。" - -#, python-format -msgid "" -"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " -"%(unit_string)s." -msgstr "只能每隔 %(unit_string)s 向 %(uri)s 提出 %(value)s 個 %(verb)s 要求。" - -msgid "Only one limit can be set in a QoS spec." -msgstr "在一個服務品質規格中,只能設定一個限制。" - -msgid "" -"Only users with token scoped to immediate parents or root projects are " -"allowed to see its children quotas." -msgstr "" -"只容許具備已限定為原生母項或 root 專案範圍之記號的使用者來查看其子項配額。" - -msgid "Only volumes managed by OpenStack can be unmanaged." -msgstr "只能取消管理由 OpenStack 進行管理的磁區。" - -#, python-format -msgid "Operation failed with status=%(status)s. Full dump: %(data)s" -msgstr "作業失敗,狀態 = %(status)s。完整傾出:%(data)s" - -#, python-format -msgid "Operation not supported: %(operation)s." -msgstr "不受支援的作業:%(operation)s。" - -msgid "Option gpfs_images_dir is not set correctly." -msgstr "未正確設定選項 gpfs_images_dir。" - -msgid "Option gpfs_images_share_mode is not set correctly." -msgstr "未正確設定選項 gpfs_images_share_mode。" - -msgid "Option gpfs_mount_point_base is not set correctly." -msgstr "未正確設定選項 gpfs_mount_point_base。" - -#, python-format -msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" -msgstr "原始 %(res)s %(prop)s 必須是值 %(vals)s 的其中之一" - -#, python-format -msgid "ParseException: %s" -msgstr "ParseException:%s" - -msgid "Partition name is None, please set smartpartition:partitionname in key." -msgstr "分割區名稱為「無」,請在索引鍵中設定 smartpartition:partitionname。" - -msgid "" -"Password or SSH private key is required for authentication: set either " -"san_password or san_private_key option." -msgstr "" -"鑑別需要密碼或 SSH 私密金鑰:請設定san_password 或 san_private_key 選項。" - -msgid "Path to REST server's certificate must be specified." -msgstr "必須指定 REST 伺服器憑證的路徑。" - -#, python-format -msgid "Please create %(pool_list)s pool in advance!" -msgstr "請預先建立 %(pool_list)s 儲存區!" - -#, python-format -msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!" -msgstr "請預先在儲存區 %(pool)s 中建立 %(tier_levels)s 層級!" - -msgid "Please specify a name for QoS specs." -msgstr "請為服務品質規格指定名稱。" - -#, python-format -msgid "Policy doesn't allow %(action)s to be performed." -msgstr "原則不容許執行 %(action)s。" - -#, python-format -msgid "Pool %(poolNameInStr)s is not found." -msgstr "找不到儲存區 %(poolNameInStr)s。" - -#, python-format -msgid "Pool %s does not exist in Nexenta Store appliance" -msgstr "儲存區 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" - -#, python-format -msgid "Pool from volume['host'] %(host)s not found." -msgstr "找不到 volume['host'] %(host)s 的儲存區。" - -#, python-format -msgid "Pool from volume['host'] failed with: %(ex)s." -msgstr "volume['host'] 中的儲存區失敗:%(ex)s。" - -msgid "Pool is not available in the volume host field." -msgstr "在磁區主機欄位中無法使用儲存區。" - -msgid "Pool is not available in the volume host fields." -msgstr "在磁區主機欄位中無法使用儲存區。" - -#, python-format -msgid "Pool with name %(pool)s wasn't found in domain %(domain)s." -msgstr "在網域 %(domain)s 中找不到名稱為 %(pool)s 的儲存區。" - -#, python-format -msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." -msgstr "在網域 %(domain_id)s 中找不到名稱為 %(pool_name)s 的儲存區。" - -#, python-format -msgid "" -"Pool: %(poolName)s. is not associated to storage tier for fast policy " -"%(fastPolicy)s." -msgstr "" -"儲存區 %(poolName)s 未與下列 FAST 原則的儲存體層級相關聯:%(fastPolicy)s。" - -#, python-format -msgid "PoolName must be in the file %(fileName)s." -msgstr "PoolName 必須在檔案 %(fileName)s 中。" - -#, python-format -msgid "Pools %s does not exist" -msgstr "儲存區 %s 不存在" - -msgid "Pools name is not set." -msgstr "未設定儲存區名稱。" - -#, python-format -msgid "Primary copy status: %(status)s and synchronized: %(sync)s." -msgstr "主要副本狀態:%(status)s,已同步:%(sync)s。" - -msgid "Project ID" -msgstr "專案識別號" - -#, python-format -msgid "Project quotas are not properly setup for nested quotas: %(reason)s." -msgstr "未針對巢狀配額正確設定專案配額:%(reason)s。" - -msgid "Protection Group not ready." -msgstr "保護群組尚未備妥。" - -#, python-format -msgid "" -"Protocol %(storage_protocol)s is not supported for storage family " -"%(storage_family)s." -msgstr "通訊協定 %(storage_protocol)s 不受儲存體系列 %(storage_family)s 支援。" - -msgid "Provided backup record is missing an id" -msgstr "所提供的備份記錄遺漏了 ID" - -#, python-format -msgid "" -"Provided snapshot status %(provided)s not allowed for snapshot with status " -"%(current)s." -msgstr "" -"提供的 Snapshot 狀態 %(provided)s,不為 Snapshot(狀態 = %(current)s)所接" -"受。" - -#, python-format -msgid "" -"Provider information w.r.t CloudByte storage was not found for OpenStack " -"volume [%s]." -msgstr "找不到 OpenStack 磁區 [%s] 的提供者資訊 w.r.t CloudByte 儲存體。" - -#, python-format -msgid "Pure Storage Cinder driver failure: %(reason)s" -msgstr "Pure Storage Cinder 驅動程式失敗:%(reason)s" - -#, python-format -msgid "QoS Specs %(specs_id)s already exists." -msgstr "服務品質規格 %(specs_id)s 已存在。" - -#, python-format -msgid "QoS Specs %(specs_id)s is still associated with entities." -msgstr "服務品質規格 %(specs_id)s 仍與實體相關聯。" - -#, python-format -msgid "QoS config is wrong. %s must > 0." -msgstr "服務品質配置錯誤。%s 必須大於 0。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE and another qos_specs, QoS policy: " -"%(qos_policy)s." -msgstr "" -"必須為 IOTYPE 指定服務品質原則及另一個 qos_specs,服務品質原則:" -"%(qos_policy)s。" - -#, python-format -msgid "" -"QoS policy must specify for IOTYPE: 0, 1, or 2, QoS policy: %(qos_policy)s " -msgstr "" -"必須為 IOTYPE 指定服務品質原則:0、1 或 2,服務品質原則:%(qos_policy)s " - -#, python-format -msgid "" -"QoS policy upper_limit and lower_limit conflict, QoS policy: %(qos_policy)s." -msgstr "" -"服務品質原則 upper_limit 與 lower_limit 相衝突,服務品質原則:" -"%(qos_policy)s。" - -#, python-format -msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." -msgstr "服務品質規格 %(specs_id)s 沒有索引鍵為 %(specs_key)s 的規格。" - -msgid "QoS specs are not supported on this storage family and ONTAP version." -msgstr "服務品質規格在此儲存體系列和 ONTAP 版本上不受支援。" - -msgid "Qos specs still in use." -msgstr "服務品質規格仍在使用中。" - -msgid "" -"Query by service parameter is deprecated. Please use binary parameter " -"instead." -msgstr "「依服務查詢」參數已淘汰。請改用二進位參數。" - -msgid "Query resource pool error." -msgstr "查詢資源儲存區時發生錯誤。" - -#, python-format -msgid "Quota %s limit must be equal or greater than existing resources." -msgstr "配額 %s 限制必須等於或大於現有資源數目。" - -#, python-format -msgid "Quota class %(class_name)s could not be found." -msgstr "找不到配額類別 %(class_name)s。" - -msgid "Quota could not be found" -msgstr "找不到配額" - -#, python-format -msgid "Quota exceeded for resources: %(overs)s" -msgstr "資源已超出配額:%(overs)s" - -#, python-format -msgid "Quota exceeded: code=%(code)s" -msgstr "已超出配額:錯誤碼 = %(code)s" - -#, python-format -msgid "Quota for project %(project_id)s could not be found." -msgstr "找不到專案 %(project_id)s 的配額。" - -#, python-format -msgid "" -"Quota limit invalid for project '%(proj)s' for resource '%(res)s': limit of " -"%(limit)d is less than in-use value of %(used)d" -msgstr "" -"針對資源 '%(res)s',專案 '%(proj)s' 的配額限制無效:限制 %(limit)d 小於使用中" -"的值 %(used)d" - -#, python-format -msgid "Quota reservation %(uuid)s could not be found." -msgstr "找不到配額預約 %(uuid)s。" - -#, python-format -msgid "Quota usage for project %(project_id)s could not be found." -msgstr "找不到專案 %(project_id)s 的配額用量。" - -#, python-format -msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" -msgstr "RBD 差異 op 失敗 -(ret = %(ret)s 標準錯誤 = %(stderr)s)" - -#, python-format -msgid "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" -msgstr "REST %(proxy_ver)s hpelefthandclient %(rest_ver)s" - -msgid "REST server IP must by specified." -msgstr "必須指定 REST 伺服器 IP。" - -msgid "REST server password must by specified." -msgstr "必須指定 REST 伺服器密碼。" - -msgid "REST server username must by specified." -msgstr "必須指定 REST 伺服器使用者名稱。" - -msgid "RPC Version" -msgstr "RPC 版本" - -msgid "RPC server response is incomplete" -msgstr "RPC 伺服器回應不完整" - -msgid "Raid did not have MCS Channel." -msgstr "Raid 沒有 MCS 通道。" - -#, python-format -msgid "Received error string: %s" -msgstr "接收到錯誤字串:%s" - -msgid "Reference must be for an unmanaged snapshot." -msgstr "參照必須是針對未受管理的 Snapshot 進行的。" - -msgid "Reference must be for an unmanaged virtual volume." -msgstr "參照必須針對未受管理的虛擬磁區。" - -msgid "Reference must be the name of an unmanaged snapshot." -msgstr "參照必須是受管理之 Snapshot 的名稱。" - -msgid "Reference must be the volume name of an unmanaged virtual volume." -msgstr "參照必須是未受管理虛擬磁區的磁區名稱。" - -msgid "Reference must contain either source-name or source-id element." -msgstr "參照必須包含 source-name 或 source-id 元素。" - -msgid "Reference must contain source-id or source-name element." -msgstr "參照必須包含 source-id 或 source-name 元素。" - -msgid "Reference must contain source-id or source-name key." -msgstr "參照必須包含 source-id 或 source-name 索引鍵。" - -msgid "Reference must contain source-id or source-name." -msgstr "參照必須包含 source-id 或 source-name。" - -msgid "Reference must contain source-id." -msgstr "參照必須包含 source-id。" - -msgid "Reference must contain source-name element." -msgstr "參照必須包含 source-name 元素。" - -msgid "Reference must contain source-name or source-id." -msgstr "參照必須包含 source-name 或 source-id。" - -msgid "Reference must contain source-name." -msgstr "參照必須包含 source-name。" - -msgid "Reference to volume to be managed must contain source-name." -msgstr "對要管理之磁區的參照必需包含 source-name。" - -#, python-format -msgid "Reference to volume: %s to be managed must contain source-name." -msgstr "對要管理之磁區 %s 的參照必需包含 source-name。" - -#, python-format -msgid "" -"Refusing to migrate volume ID: %(id)s. Please check your configuration " -"because source and destination are the same Volume Group: %(name)s." -msgstr "" -"正在拒絕移轉磁區 ID:%(id)s。請檢查您的配置,因為來源和目的地是相同的磁區群" -"組:%(name)s。" - -msgid "Remote pool cannot be found." -msgstr "找不到遠端儲存區。" - -msgid "Remove CHAP error." -msgstr "移除 CHAP 時發生錯誤。" - -msgid "Remove fc from host error." -msgstr "從主機中移除 FC 時發生錯誤。" - -msgid "Remove host from array error." -msgstr "從陣列中移除主機時發生錯誤。" - -msgid "Remove host from hostgroup error." -msgstr "從主機群組中移除主機時發生錯誤。" - -msgid "Remove iscsi from host error." -msgstr "從主機中移除 iSCSI 時發生錯誤。" - -msgid "Remove lun from QoS error." -msgstr "從服務品質中移除 LUN 時發生錯誤。" - -msgid "Remove lun from cache error." -msgstr "從快取中移除 LUN 時發生錯誤。" - -msgid "Remove lun from partition error." -msgstr "從分割區中移除 LUN 時發生錯誤。" - -msgid "Remove port from port group error." -msgstr "從埠群組中移除埠時發生錯誤。" - -msgid "Remove volume export failed." -msgstr "移除磁區匯出失敗。" - -msgid "Rename lun on array error." -msgstr "重新命名陣列上的 LUN 時發生錯誤。" - -msgid "Rename snapshot on array error." -msgstr "重新命名陣列上的 Snapshot 時發生錯誤。" - -#, python-format -msgid "Replication %(name)s to %(ssn)s failed." -msgstr "將 %(name)s 抄寫至 %(ssn)s 失敗。" - -#, python-format -msgid "Replication Service Capability not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「抄寫服務功能」。" - -#, python-format -msgid "Replication Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「抄寫服務」。" - -msgid "Replication not allowed yet." -msgstr "尚未容許抄寫。" - -msgid "Request body and URI mismatch" -msgstr "要求內文與 URI 不符" - -msgid "Request body contains too many items" -msgstr "要求內文包含太多項目" - -msgid "Request body contains too many items." -msgstr "要求內文包含太多項目。" - -msgid "Request body empty" -msgstr "要求內文是空的" - -#, python-format -msgid "Request to Datera cluster returned bad status: %(status)s | %(reason)s" -msgstr "傳送至 Datera 叢集的要求,傳回了不正確的狀態:%(status)s | %(reason)s" - -#, python-format -msgid "" -"Requested backup exceeds allowed Backup gigabytes quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"所要求的備份超出容許的備份 GB 數配額。要求 %(requested)s G,配額為 %(quota)s " -"G,並且已耗用 %(consumed)s G。" - -#, python-format -msgid "" -"Requested volume or snapshot exceeds allowed %(name)s quota. Requested " -"%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." -msgstr "" -"所要求的磁區或 Snapshot 超出容許的 %(name)s 配額。要求 %(requested)s G,配額" -"為 %(quota)s G,並且已耗用 %(consumed)s G。" - -#, python-format -msgid "" -"Requested volume size %(size)d is larger than maximum allowed limit " -"%(limit)d." -msgstr "所要求的磁區大小 %(size)d 大於所容許的上限:%(limit)d。" - -msgid "Required configuration not found" -msgstr "找不到必要的配置" - -#, python-format -msgid "Required flag %s is not set" -msgstr "未設定必要旗標 %s" - -#, python-format -msgid "" -"Reset backup status aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"已中止重設備份狀態,目前配置的備份服務[%(configured_service)s] 不是建立此備份" -"所使用的備份服務 [%(backup_service)s]。" - -#, python-format -msgid "Resizing clone %s failed." -msgstr "調整副本 %s 的大小失敗。" - -msgid "Resizing image file failed." -msgstr "調整映像檔大小時失敗。" - -msgid "Resource could not be found." -msgstr "找不到資源。" - -msgid "Resource not ready." -msgstr "資源未備妥。" - -#, python-format -msgid "Response error - %s." -msgstr "回應錯誤 - %s。" - -msgid "Response error - The storage-system is offline." -msgstr "回應錯誤 - 儲存體系統在線上。" - -#, python-format -msgid "Response error code - %s." -msgstr "回應錯誤碼 - %s。" - -msgid "RestURL is not configured." -msgstr "未配置 RestURL。" - -#, python-format -msgid "" -"Restore backup aborted, expected volume status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"已中止還原備份,預期磁區狀態 %(expected_status)s,但取得 %(actual_status)s。" - -#, python-format -msgid "" -"Restore backup aborted, the backup service currently configured " -"[%(configured_service)s] is not the backup service that was used to create " -"this backup [%(backup_service)s]." -msgstr "" -"已中止還原備份,目前配置的備份服務[%(configured_service)s] 不是建立此備份所使" -"用的備份服務 [%(backup_service)s]。" - -#, python-format -msgid "" -"Restore backup aborted: expected backup status %(expected_status)s but got " -"%(actual_status)s." -msgstr "" -"已中止還原備份:預期備份狀態 %(expected_status)s,但取得 %(actual_status)s。" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"snapshots. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"針對所提供的 Cinder Snapshot,已擷取不同數量的 SolidFire 磁區。已擷取數目:" -"%(ret)s,所需數目:%(des)s" - -#, python-format -msgid "" -"Retrieved a different amount of SolidFire volumes for the provided Cinder " -"volumes. Retrieved: %(ret)s Desired: %(des)s" -msgstr "" -"針對所提供的 Cinder 磁區,已擷取不同數量的 SolidFire 磁區。已擷取數目:" -"%(ret)s,所需數目:%(des)s" - -#, python-format -msgid "Retry count exceeded for command: %s" -msgstr "已超出指令 %s 的重試次數" - -msgid "Retryable SolidFire Exception encountered" -msgstr "發生「可重試的 SolidFire 異常狀況」" - -msgid "Retype requires migration but is not allowed." -msgstr "Retype 需要移轉,但系統不容許。" - -#, python-format -msgid "Rolling back %(volumeName)s by deleting it." -msgstr "正在透過刪除 %(volumeName)s 來將其回復。" - -#, python-format -msgid "" -"Running Cinder with a VMware vCenter version less than %s is not allowed." -msgstr "不容許使用低於 %s 的 VMware vCenter 版本來執行 Cinder。" - -msgid "SAN product is not configured." -msgstr "未配置 SAN 產品。" - -msgid "SAN protocol is not configured." -msgstr "未配置 SAN 通訊協定。" - -#, python-format -msgid "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: %s" -msgstr "SMBFS 配置 'smbfs_oversub_ratio' 無效。必須大於 0:%s" - -#, python-format -msgid "SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 and <= 1.0: %s" -msgstr "SMBFS 配置 'smbfs_used_ratio' 無效。必須大於 0 且小於或等於 1.0:%s" - -#, python-format -msgid "SMBFS config file at %(config)s doesn't exist." -msgstr "%(config)s 處的 SMBFS 配置檔不存在。" - -msgid "SMBFS config file not set (smbfs_shares_config)." -msgstr "未設定 SMBFS 配置檔 (smbfs_shares_config)。" - -#, python-format -msgid "SSH Command failed after '%(total_attempts)r' attempts : '%(command)s'" -msgstr "嘗試 '%(total_attempts)r' 次之後 SSH 指令仍失敗:'%(command)s'" - -#, python-format -msgid "SSH command injection detected: %(command)s" -msgstr "偵測到 SSH 指令注入:%(command)s" - -#, python-format -msgid "SSH connection failed for %(fabric)s with error: %(err)s" -msgstr "%(fabric)s 進行 SSH 連線失敗,發生錯誤:%(err)s" - -#, python-format -msgid "SSL Certificate expired on %s." -msgstr "SSL 憑證已在 %s 過期。" - -#, python-format -msgid "SSL error: %(arg)s." -msgstr "SSL 錯誤:%(arg)s。" - -#, python-format -msgid "Scheduler Host Filter %(filter_name)s could not be found." -msgstr "找不到「排程器主機過濾器」%(filter_name)s。" - -#, python-format -msgid "Scheduler Host Weigher %(weigher_name)s could not be found." -msgstr "找不到「排程器主機稱量程式」%(weigher_name)s。" - -#, python-format -msgid "" -"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress " -"is: %(progress)s%%." -msgstr "次要副本狀態:%(status)s,已同步:%(sync)s,同步進度:%(progress)s%%。" - -#, python-format -msgid "" -"Secondary id can not be the same as primary array, backend_id = " -"%(secondary)s." -msgstr "次要 ID 不能與主要陣列相同,backend_id = %(secondary)s。" - -#, python-format -msgid "SerialNumber must be in the file %(fileName)s." -msgstr "SerialNumber 必須在檔案 %(fileName)s 中。" - -#, python-format -msgid "Service %(service)s on host %(host)s removed." -msgstr "已移除主機 %(host)s 上的服務 %(service)s。" - -#, python-format -msgid "Service %(service_id)s could not be found on host %(host)s." -msgstr "在主機 %(host)s 上找不到服務 %(service_id)s。" - -#, python-format -msgid "Service %(service_id)s could not be found." -msgstr "找不到服務 %(service_id)s。" - -msgid "Service is too old to fulfil this request." -msgstr "服務太舊,無法滿足此要求。" - -msgid "Service is unavailable at this time." -msgstr "此時無法使用服務。" - -msgid "Set pair secondary access error." -msgstr "設定配對次要存取時發生錯誤。" - -msgid "Sets thin provisioning." -msgstr "設定精簡供應。" - -msgid "" -"Setting LUN QoS policy group is not supported on this storage family and " -"ONTAP version." -msgstr "此儲存體系列及 ONTAP 版本不支援設定 LUN 服務品質原則群組。" - -msgid "" -"Setting file qos policy group is not supported on this storage family and " -"ontap version." -msgstr "此儲存體系列及 ONTAP 版本不支援設定檔案服務品質原則群組。" - -#, python-format -msgid "" -"Share at %(dir)s is not writable by the Cinder volume service. Snapshot " -"operations will not be supported." -msgstr "Cinder 磁區服務無法寫入 %(dir)s 處的共用項目。將不支援 Snapshot 作業。" - -#, python-format -msgid "Sheepdog I/O Error, command was: \"%s\"." -msgstr "Sheepdog I/O 錯誤,指令為:\"%s\"。" - -msgid "" -"Show operations can only be made to projects in the same hierarchy of the " -"project in which users are scoped to." -msgstr "顯示作業只能對將使用者限定範圍之相同專案階層中的專案執行。" - -msgid "Size" -msgstr "容量" - -#, python-format -msgid "Size for volume: %s not found, cannot secure delete." -msgstr "找不到磁區 %s 的大小,無法安全刪除。" - -#, python-format -msgid "" -"Size is %(image_size)dGB and doesn't fit in a volume of size " -"%(volume_size)dGB." -msgstr "大小為 %(image_size)d GB,無法容納大小為%(volume_size)d GB 的磁區。" - -#, python-format -msgid "" -"Size of specified image %(image_size)sGB is larger than volume size " -"%(volume_size)sGB." -msgstr "所指定映像檔的大小 %(image_size)s GB 大於磁區大小%(volume_size)s GB。" - -#, python-format -msgid "" -"Snapshot %(id)s has been asked to be deleted while waiting for it to become " -"available. Perhaps a concurrent request was made." -msgstr "" -"在等待 Snapshot %(id)s 變成可用時,已經要求將其刪除。可能發出了並行要求。" - -#, python-format -msgid "" -"Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " -"cascade delete." -msgstr "" -"在連鎖刪除期間,發現 Snapshot %(id)s 處於 %(state)s 狀態,而不是「刪除中」狀" -"態。" - -#, python-format -msgid "Snapshot %(snapshot_id)s could not be found." -msgstr "找不到 Snapshot %(snapshot_id)s。" - -#, python-format -msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." -msgstr "Snapshot %(snapshot_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" - -#, python-format -msgid "Snapshot '%s' doesn't exist on array." -msgstr "陣列上不存在 Snapshot '%s'。" - -#, python-format -msgid "" -"Snapshot cannot be created because volume %(vol_id)s is not available, " -"current volume status: %(vol_status)s." -msgstr "" -"無法建立 Snapshot,因為磁區 %(vol_id)s 無法使用,現行磁區狀態:" -"%(vol_status)s。" - -msgid "Snapshot cannot be created while volume is migrating." -msgstr "移轉磁區時無法建立 Snapshot。" - -msgid "Snapshot of secondary replica is not allowed." -msgstr "不容許使用次要抄本的 Snapshot。" - -#, python-format -msgid "Snapshot of volume not supported in state: %s." -msgstr "狀態 %s 不支援取得磁區的 Snapshot。" - -#, python-format -msgid "Snapshot res \"%s\" that is not deployed anywhere?" -msgstr "未在任何位置部署 Snapshot 資源 \"%s\"?" - -#, python-format -msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" -msgstr "Snapshot 狀態 %(cur)s,不為 update_snapshot_status 所接受" - -msgid "Snapshot status must be \"available\" to clone." -msgstr "Snapshot 狀態必須為「可用」才能複製。" - -#, python-format -msgid "" -"Snapshot to be backed up must be available, but the current status is \"%s\"." -msgstr "要備份的 Snapshot 必須可用,但現行狀態卻是 \"%s\"。" - -#, python-format -msgid "Snapshot with id of %s could not be found." -msgstr "找不到 ID 為 %s 的 Snapshot。" - -#, python-format -msgid "" -"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting " -"incremental backup" -msgstr "" -"Snapshot = '%(snap)s' 不存在於基本映像檔 = '%(base)s' 中 - 正在中止增量備份" - -#, python-format -msgid "Snapshots are not supported for this volume format: %s" -msgstr "此磁區格式不支援 Snapshot:%s" - -#, python-format -msgid "Socket error: %(arg)s." -msgstr "Socket 錯誤:%(arg)s。" - -msgid "SolidFire Cinder Driver exception" -msgstr "SolidFire Cinder 驅動程式異常狀況" - -msgid "Sort direction array size exceeds sort key array size." -msgstr "排序方向陣列大小超過排序鍵陣列大小。" - -msgid "Source CG is empty. No consistency group will be created." -msgstr "來源 CG 是空的。將不會建立一致性群組。" - -msgid "Source host details not found." -msgstr "找不到來源主機詳細資料。" - -msgid "Source volume device ID is required." -msgstr "需要來源磁區裝置 ID。" - -msgid "Source volume not mid-migration." -msgstr "移轉期間找不到來源磁區。" - -msgid "SpaceInfo returned byarray is invalid" -msgstr "陣列傳回的 SpaceInfo 無效" - -#, python-format -msgid "" -"Specified host to map to volume %(vol)s is in unsupported host group with " -"%(group)s." -msgstr "指定要對映至磁區 %(vol)s 的主機位於不受支援的主機群組%(group)s 中。" - -msgid "Specified logical volume does not exist." -msgstr "指定的邏輯磁區不存在。" - -#, python-format -msgid "Specified snapshot group with id %s could not be found." -msgstr "找不到 ID 為 %s 的指定 Snapshot 群組。" - -msgid "Specify a password or private_key" -msgstr "指定密碼或 private_key" - -msgid "Specify san_password or san_private_key" -msgstr "指定 san_password 或 san_private_key" - -msgid "" -"Specify volume type name, description, is_public or a combination thereof." -msgstr "指定磁區類型名稱、說明、is_public 或這些的組合。" - -msgid "Split pair error." -msgstr "分割配對時發生錯誤。" - -msgid "Split replication failed." -msgstr "分割抄寫失敗。" - -msgid "Start LUNcopy error." -msgstr "啟動 LUNcopy 時發生錯誤。" - -msgid "State" -msgstr "狀態" - -#, python-format -msgid "State of node is wrong. Current state is %s." -msgstr "節點的狀態是錯誤的。現行狀態為 %s。" - -msgid "Status" -msgstr "狀態" - -msgid "Stop snapshot error." -msgstr "停止 Snapshot 時發生錯誤。" - -#, python-format -msgid "Storage Configuration Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「儲存體配置服務」。" - -#, python-format -msgid "Storage HardwareId mgmt Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「儲存體硬體 ID 管理服務」。" - -#, python-format -msgid "Storage Profile %s not found." -msgstr "找不到儲存體設定檔 %s。" - -#, python-format -msgid "Storage Relocation Service not found on %(storageSystemName)s." -msgstr "在 %(storageSystemName)s 上找不到「儲存體重新定位服務」。" - -#, python-format -msgid "Storage family %s is not supported." -msgstr "儲存體系列 %s 不受支援。" - -#, python-format -msgid "Storage group %(storageGroupName)s was not deleted successfully" -msgstr "未順利刪除儲存體群組 %(storageGroupName)s" - -#, python-format -msgid "Storage host %(svr)s not detected, verify name" -msgstr "未偵測到儲存體主機 %(svr)s,請驗證名稱" - -msgid "Storage pool is not configured." -msgstr "未配置儲存區。" - -#, python-format -msgid "Storage profile: %(storage_profile)s not found." -msgstr "找不到儲存體設定檔 %(storage_profile)s。" - -msgid "Storage resource could not be found." -msgstr "找不到儲存體資源。" - -msgid "Storage system id not set." -msgstr "未設定儲存體系統 ID。" - -#, python-format -msgid "Storage system not found for pool %(poolNameInStr)s." -msgstr "找不到儲存區 %(poolNameInStr)s 的儲存體系統。" - -#, python-format -msgid "StorageSystem %(array)s is not found." -msgstr "找不到儲存體系統 %(array)s。" - -#, python-format -msgid "" -"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for " -"project '%(proj)s' for resource '%(res)s'. Please lower the limit or usage " -"for one or more of the following projects: '%(child_ids)s'" -msgstr "" -"針對資源 '%(res)s',子項總用量 '%(sum)s' 大於專案 '%(proj)s' 的可用配額 " -"'%(free)s'。請降低下列一個以上專案的限制或用量:'%(child_ids)s'" - -msgid "Switch over pair error." -msgstr "切換配對時發生錯誤。" - -msgid "Sync pair error." -msgstr "同步配對時發生錯誤。" - -#, python-format -msgid "System %(id)s found with bad password status - %(pass_status)s." -msgstr "找到具有不正確密碼狀態 %(pass_status)s 的系統 %(id)s。" - -#, python-format -msgid "System %(id)s found with bad status - %(status)s." -msgstr "發現系統 %(id)s 的狀態 %(status)s 不正確。" - -msgid "System does not support compression." -msgstr "系統不支援壓縮。" - -msgid "System is busy, retry operation." -msgstr "系統忙碌中,請重試作業。" - -#, python-format -msgid "" -"TSM [%(tsm)s] was not found in CloudByte storage for account [%(account)s]." -msgstr "在帳戶 [%(account)s] 的 CloudByte 儲存體中找不到 TSM [%(tsm)s]。" - -msgid "Target volume type is still in use." -msgstr "目標磁區類型仍在使用中。" - -msgid "Terminate connection failed" -msgstr "終止連線失敗" - -msgid "Terminate connection unable to connect to backend." -msgstr "終止連線無法連接至後端。" - -#, python-format -msgid "Terminate volume connection failed: %(err)s" -msgstr "終止磁區連線失敗:%(err)s" - -#, python-format -msgid "The %(type)s %(id)s source to be replicated was not found." -msgstr "找不到要抄寫的 %(type)s %(id)s 來源。" - -msgid "" -"The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " -"with the 'sort' parameter." -msgstr "" -"'sort_key' 和 'sort_dir' 參數已遭淘汰,因此無法與 'sort' 參數搭配使用。" - -msgid "The EQL array has closed the connection." -msgstr "EQL 陣列已關閉連線。" - -#, python-format -msgid "" -"The GPFS filesystem %(fs)s is not at the required release level. Current " -"level is %(cur)s, must be at least %(min)s." -msgstr "" -"GPFS 檔案系統 %(fs)s 不是必要的版次。現行層次是 %(cur)s,必須至少是 %(min)s。" - -msgid "The IP Address was not found." -msgstr "找不到 IP 位址。" - -#, python-format -msgid "" -"The WebDAV request failed. Reason: %(msg)s, Return code/reason: %(code)s, " -"Source Volume: %(src)s, Destination Volume: %(dst)s, Method: %(method)s." -msgstr "" -"WebDAV 要求失敗。原因:%(msg)s,回覆碼/原因:%(code)s,來源磁區:%(src)s,目" -"的地磁區:%(dst)s,方法:%(method)s。" - -msgid "" -"The above error may show that the database has not been created.\n" -"Please create a database using 'cinder-manage db sync' before running this " -"command." -msgstr "" -"上述錯誤可能會顯示尚未建立資料庫。\n" -"請使用 'cinder-manage db sync' 來建立資料庫,然後再執行此指令。" - -#, python-format -msgid "" -"The array does not support the storage pool setting for SLO %(slo)s and " -"workload %(workload)s. Please check the array for valid SLOs and workloads." -msgstr "" -"陣列不支援 SLO %(slo)s 及工作量 %(workload)s 的儲存區設定。請檢查陣列中的有" -"效 SLO 及工作量。" - -msgid "" -"The back-end where the volume is created does not have replication enabled." -msgstr "在其中建立該磁區的後端尚未啟用抄寫。" - -#, python-format -msgid "" -"The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, stderr: %(err)s)" -msgstr "" -"指令 %(cmd)s 失敗。(ret:%(ret)s,標準輸出:%(out)s,標準錯誤:%(err)s)" - -msgid "The copy should be primary or secondary" -msgstr "副本應為主要或次要副本" - -#, python-format -msgid "" -"The creation of a logical device could not be completed. (LDEV: %(ldev)s)" -msgstr "無法完成建立邏輯裝置。(LDEV:%(ldev)s)" - -msgid "The decorated method must accept either a volume or a snapshot object" -msgstr "裝飾方法必須接受磁區或 Snapshot 物件" - -#, python-format -msgid "The device in the path %(path)s is unavailable: %(reason)s" -msgstr "路徑 %(path)s 中的裝置無法使用:%(reason)s" - -#, python-format -msgid "The end time (%(end)s) must be after the start time (%(start)s)." -msgstr "結束時間 (%(end)s) 必須晚於開始時間 (%(start)s)。" - -#, python-format -msgid "The extraspec: %(extraspec)s is not valid." -msgstr "額外規格 %(extraspec)s 無效。" - -#, python-format -msgid "The failed-over volume could not be deleted: %s" -msgstr "無法刪除已失效接手的磁區:%s" - -#, python-format -msgid "The following elements are required: %s" -msgstr "需要下列元素:%s" - -msgid "The host group or iSCSI target could not be added." -msgstr "無法新增主機群組或 iSCSI 目標。" - -msgid "The host group or iSCSI target was not found." -msgstr "找不到主機群組或 iSCSI 目標。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the 3PAR backends." -msgstr "主機尚未備妥以進行失效回復。請重新同步磁區,並回復 3PAR 後端上的抄寫。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the LeftHand backends." -msgstr "" -"主機尚未備妥以進行失效回復。請重新同步磁區,並回復 LeftHand 後端上的抄寫。" - -msgid "" -"The host is not ready to be failed back. Please resynchronize the volumes " -"and resume replication on the Storwize backends." -msgstr "" -"主機尚未備妥以進行失效回復。請重新同步磁區,並回復 Storwize 後端上的抄寫。" - -#, python-format -msgid "The iSCSI CHAP user %(user)s does not exist." -msgstr "iSCSI CHAP 使用者 %(user)s 不存在。" - -msgid "The key cannot be None." -msgstr "金鑰不能是「無」。" - -#, python-format -msgid "The logical device for specified %(type)s %(id)s was already deleted." -msgstr "已經刪除指定 %(type)s %(id)s 的邏輯裝置。" - -#, python-format -msgid "The method %(method)s is timed out. (timeout value: %(timeout)s)" -msgstr "方法 %(method)s 已逾時。(逾時值:%(timeout)s)" - -msgid "The method update_migrated_volume is not implemented." -msgstr "未實作方法 update_migrated_volume。" - -#, python-format -msgid "" -"The mount %(mount_path)s is not a valid Quobyte USP volume. Error: %(exc)s" -msgstr "裝載 %(mount_path)s 不是有效的 Quobyte USP 磁區。錯誤:%(exc)s" - -#, python-format -msgid "The parameter of the storage backend. (config_group: %(config_group)s)" -msgstr "儲存體後端的參數。(config_group:%(config_group)s)" - -msgid "The parent backup must be available for incremental backup." -msgstr "增量備份的母項備份必須可用。" - -#, python-format -msgid "The provided snapshot '%s' is not a snapshot of the provided volume." -msgstr "所提供的 Snapshot '%s' 不是所提供之磁區的 Snapshot。" - -msgid "" -"The reference to the volume in the backend should have the format " -"file_system/volume_name (volume_name cannot contain '/')" -msgstr "" -"對後端中磁區的參照應該具有下列格式:file_system/volume_name(磁區名稱不得包" -"含 '/')" - -#, python-format -msgid "The remote retention count must be %s or less." -msgstr "遠端保留計數必須小於或等於 %s。" - -msgid "" -"The replication mode was not configured correctly in the volume type " -"extra_specs. If replication:mode is periodic, replication:sync_period must " -"also be specified and be between 300 and 31622400 seconds." -msgstr "" -"在磁區類型 extra_specs 中,未正確配置抄寫模式。如果 replication:mode 是定期" -"的,則 replication:sync_period 也必須予以指定且介於 300 和 31622400 秒之間。" - -#, python-format -msgid "The replication sync period must be at least %s seconds." -msgstr "抄寫同步週期必須至少是 %s 秒。" - -#, python-format -msgid "" -"The requested size : %(requestedSize)s is not the same as resulting size: " -"%(resultSize)s." -msgstr "" -"所要求的大小 %(requestedSize)s 與產生的下列大小不相同:%(resultSize)s。" - -#, python-format -msgid "The resource %(resource)s was not found." -msgstr "找不到資源 %(resource)s。" - -msgid "The results are invalid." -msgstr "結果無效。" - -#, python-format -msgid "The retention count must be %s or less." -msgstr "保留計數必須小於或等於 %s。" - -msgid "The snapshot cannot be created when the volume is in maintenance mode." -msgstr "當磁區處於維護模式時,無法建立 Snapshot。" - -msgid "The source volume for this WebDAV operation not found." -msgstr "找不到此 WebDAV 作業的來源磁區。" - -#, python-format -msgid "" -"The source volume type '%(src)s' is different than the destination volume " -"type '%(dest)s'." -msgstr "來源磁區類型 '%(src)s' 與目的地磁區類型'%(dest)s' 不同。" - -#, python-format -msgid "The source volume type '%s' is not available." -msgstr "來源磁區類型 '%s' 無法使用。" - -#, python-format -msgid "The specified %(desc)s is busy." -msgstr "指定的 %(desc)s 忙碌中。" - -#, python-format -msgid "The specified LUN does not belong to the given pool: %s." -msgstr "指定的 LUN 不屬於給定的儲存區:%s。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"mapping." -msgstr "無法管理指定的 LDEV %(ldev)s。不得對映該 LDEV。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev must not be " -"paired." -msgstr "無法管理指定的 LDEV %(ldev)s。不得對該 LDEV進行配對。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The ldev size must be in " -"multiples of gigabyte." -msgstr "無法管理指定的 LDEV %(ldev)s。LDEV 大小必須為GB 的倍數。" - -#, python-format -msgid "" -"The specified ldev %(ldev)s could not be managed. The volume type must be DP-" -"VOL." -msgstr "無法管理指定的 LDEV %(ldev)s。磁區類型必須為 DP-VOL。" - -#, python-format -msgid "" -"The specified operation is not supported. The volume size must be the same " -"as the source %(type)s. (volume: %(volume_id)s)" -msgstr "" -"指定的作業不受支援。磁區大小必須與來源 %(type)s 的大小相同。(磁區:" -"%(volume_id)s)" - -msgid "The specified vdisk is mapped to a host." -msgstr "所指定的 vdisk 已對映至主機。" - -msgid "The specified volume is mapped to a host." -msgstr "指定的磁區已對映至主機。" - -#, python-format -msgid "" -"The storage array password for %s is incorrect, please update the configured " -"password." -msgstr "%s 的儲存體陣列密碼不正確,請更新所配置的密碼。" - -#, python-format -msgid "The storage backend can be used. (config_group: %(config_group)s)" -msgstr "可以使用儲存體後端。(config_group:%(config_group)s)" - -#, python-format -msgid "" -"The storage device does not support %(prot)s. Please configure the device to " -"support %(prot)s or switch to a driver using a different protocol." -msgstr "" -"儲存裝置不支援 %(prot)s。請配置該裝置以支援 %(prot)s,或者切換至使用其他通訊" -"協定的驅動程式。" - -#, python-format -msgid "" -"The striped meta count of %(memberCount)s is too small for volume: " -"%(volumeName)s, with size %(volumeSize)s." -msgstr "" -"已分段的 meta 計數 %(memberCount)s 對下列磁區而言太小:%(volumeName)s,大小" -"為 %(volumeSize)s。" - -#, python-format -msgid "" -"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " -"invalid." -msgstr "磁區/Snapshot %(id)s 的 meta 資料類型 %(metadata_type)s無效。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be extended. The volume type must be " -"Normal." -msgstr "無法延伸磁區 %(volume_id)s。磁區類型必須是「一般」。" - -#, python-format -msgid "" -"The volume %(volume_id)s could not be unmanaged. The volume type must be " -"%(volume_type)s." -msgstr "無法取消管理磁區 %(volume_id)s。磁區類型必須為%(volume_type)s。" - -#, python-format -msgid "The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)" -msgstr "已順利管理磁區 %(volume_id)s。(LDEV:%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s is unmanaged successfully. (LDEV: %(ldev)s)" -msgstr "已順利取消管理磁區 %(volume_id)s。(LDEV:%(ldev)s)" - -#, python-format -msgid "The volume %(volume_id)s to be mapped was not found." -msgstr "找不到要對映的磁區 %(volume_id)s。" - -msgid "The volume cannot accept transfer in maintenance mode." -msgstr "磁區無法在維護模式下接受傳送。" - -msgid "The volume cannot be attached in maintenance mode." -msgstr "無法在維護模式下連接磁區。" - -msgid "The volume cannot be detached in maintenance mode." -msgstr "無法在維護模式下分離磁區。" - -msgid "The volume cannot be updated during maintenance." -msgstr "維護期間,無法更新磁區。" - -msgid "The volume connection cannot be initialized in maintenance mode." -msgstr "無法在維護模式下起始設定磁區連線。" - -msgid "The volume driver requires the iSCSI initiator name in the connector." -msgstr "磁區驅動程式需要連接器中的 iSCSI 起始器名稱。" - -msgid "" -"The volume is currently busy on the 3PAR and cannot be deleted at this time. " -"You can try again later." -msgstr "磁區目前在 3PAR 上正忙,因此此時無法將其刪除。您可以稍後重試。" - -msgid "The volume label is required as input." -msgstr "需要磁區標籤作為輸入。" - -#, python-format -msgid "There are no resources available for use. (resource: %(resource)s)" -msgstr "沒有資源可供使用。(資源:%(resource)s)" - -msgid "There are no valid ESX hosts." -msgstr "沒有有效的 ESX 主機。" - -msgid "There are no valid datastores." -msgstr "沒有有效的資料儲存庫。" - -#, python-format -msgid "" -"There is no designation of the %(param)s. The specified storage is essential " -"to manage the volume." -msgstr "未指定 %(param)s。指定的儲存體對於管理磁區必不可少。" - -msgid "" -"There is no designation of the ldev. The specified ldev is essential to " -"manage the volume." -msgstr "未指定 LDEV。指定的 LDEV對於管理磁區必不可少。" - -msgid "There is no metadata in DB object." -msgstr "資料庫物件中沒有 meta 資料。" - -#, python-format -msgid "There is no share which can host %(volume_size)sG" -msgstr "沒有共用可以管理 %(volume_size)sG" - -#, python-format -msgid "There is no share which can host %(volume_size)sG." -msgstr "沒有共用項目可以管理 %(volume_size)s G。" - -#, python-format -msgid "There is no such action: %s" -msgstr "沒有這樣的動作:%s" - -msgid "There is no virtual disk device." -msgstr "沒有虛擬磁碟裝置。" - -#, python-format -msgid "There was an error adding the volume to the remote copy group: %s." -msgstr "將磁區新增至遠端複製群組時發生錯誤:%s。" - -#, python-format -msgid "There was an error creating the cgsnapshot: %s" -msgstr "建立 cgsnapshot 時發生錯誤:%s" - -#, python-format -msgid "There was an error creating the remote copy group: %s." -msgstr "建立遠端複製群組時發生錯誤:%s。" - -#, python-format -msgid "" -"There was an error setting the sync period for the remote copy group: %s." -msgstr "設定遠端複製群組的同步週期時發生錯誤:%s。" - -#, python-format -msgid "" -"There was an error setting up a remote copy group on the 3PAR arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"在 3PAR 陣列上設定遠端複製群組時發生錯誤:('%s')。該磁區將不作為抄寫類型予以" -"辨識。" - -#, python-format -msgid "" -"There was an error setting up a remote schedule on the LeftHand arrays: " -"('%s'). The volume will not be recognized as replication type." -msgstr "" -"在 LeftHand 陣列上設定遠端排程時發生錯誤:('%s')。該磁區將不作為抄寫類型予以" -"辨識。" - -#, python-format -msgid "There was an error starting remote copy: %s." -msgstr "啟動遠端複製時發生錯誤:%s。" - -#, python-format -msgid "There's no Gluster config file configured (%s)" -msgstr "未配置任何 Gluster 配置檔 (%s)" - -#, python-format -msgid "There's no NFS config file configured (%s)" -msgstr "未配置任何 NFS 配置檔 (%s)" - -#, python-format -msgid "" -"There's no Quobyte volume configured (%s). Example: quobyte:///" -"" -msgstr "" -"沒有已配置的 Quobyte 磁區 (%s)。範例:quobyte:///" - -msgid "Thin provisioning not supported on this version of LVM." -msgstr "此版本的 LVM 不支援精簡供應。" - -msgid "This driver does not support deleting in-use snapshots." -msgstr "此驅動程式不支援刪除使用中 Snapshot。" - -msgid "This driver does not support snapshotting in-use volumes." -msgstr "此驅動程式不支援對使用中磁區建立 Snapshot。" - -msgid "This request was rate-limited." -msgstr "此要求存在頻率限制。" - -#, python-format -msgid "" -"This system platform (%s) is not supported. This driver supports only Win32 " -"platforms." -msgstr "此系統平台 (%s) 不受支援。此驅動程式僅支援Win32 平台。" - -#, python-format -msgid "Tier Policy Service not found for %(storageSystemName)s." -msgstr "找不到 %(storageSystemName)s 的「層級原則服務」。" - -#, python-format -msgid "Timed out while waiting for Nova update for creation of snapshot %s." -msgstr "等待 Nova 更新以建立 Snapshot %s 時發生逾時。" - -#, python-format -msgid "" -"Timed out while waiting for Nova update for deletion of snapshot %(id)s." -msgstr "等待 Nova 更新以刪除 Snapshot %(id)s 時發生逾時。" - -#, python-format -msgid "Timeout while calling %s " -msgstr "呼叫 %s 時逾時" - -#, python-format -msgid "Timeout while requesting %(service)s API." -msgstr "要求 %(service)s API 時發生逾時。" - -#, python-format -msgid "Timeout while requesting capabilities from backend %(service)s." -msgstr "要求後端 %(service)s 中的功能時逾時。" - -#, python-format -msgid "Transfer %(transfer_id)s could not be found." -msgstr "找不到傳送 %(transfer_id)s。" - -#, python-format -msgid "" -"Transfer %(transfer_id)s: Volume id %(volume_id)s in unexpected state " -"%(status)s, expected awaiting-transfer" -msgstr "" -"傳送 %(transfer_id)s:磁區 ID %(volume_id)s 處於非預期的狀態%(status)s,預期" -"狀態為 awaiting-transfer" - -#, python-format -msgid "" -"Trying to import backup metadata from id %(meta_id)s into backup %(id)s." -msgstr "正在嘗試將備份 meta 資料從 ID %(meta_id)s 匯入到備份 %(id)s 中。" - -#, python-format -msgid "" -"Tune volume task stopped before it was done: volume_name=%(volume_name)s, " -"task-status=%(status)s." -msgstr "" -"調整磁區作業已停止,因為該作業已完成:volume_name=%(volume_name)s,task-" -"status=%(status)s。" - -#, python-format -msgid "" -"Type %(type_id)s is already associated with another qos specs: " -"%(qos_specs_id)s" -msgstr "類型 %(type_id)s 已經與另一個服務品質規格%(qos_specs_id)s 產生關聯" - -msgid "Type access modification is not applicable to public volume type." -msgstr "類型存取修訂不適用公用磁區類型。" - -msgid "Type cannot be converted into NaElement." -msgstr "無法將類型轉換為 NaElement。" - -#, python-format -msgid "TypeError: %s" -msgstr "TypeError:%s" - -#, python-format -msgid "UUIDs %s are in both add and remove volume list." -msgstr "UUID %s 同時位於新增和移除磁區清單中。" - -#, python-format -msgid "Unable to access the Storwize back-end for volume %s." -msgstr "無法存取磁區 %s 的 Storwize 後端。" - -msgid "Unable to access the backend storage via file handle." -msgstr "無法透過檔案控點來存取後端儲存體。" - -#, python-format -msgid "Unable to access the backend storage via the path %(path)s." -msgstr "無法透過路徑 %(path)s 來存取後端儲存體。" - -#, python-format -msgid "Unable to add Cinder host to apphosts for space %(space)s" -msgstr "無法將 Cinder 主機新增至空間 %(space)s 的應用程式主機" - -#, python-format -msgid "Unable to complete failover of %s." -msgstr "無法完成 %s 的失效接手。" - -msgid "Unable to connect or find connection to host" -msgstr "無法連接至主機或找不到與主機的連線" - -#, python-format -msgid "Unable to create consistency group %s" -msgstr "無法建立一致性群組 %s" - -msgid "Unable to create lock. Coordination backend not started." -msgstr "無法建立鎖定。協調後端未啟動。" - -#, python-format -msgid "" -"Unable to create or get default storage group for FAST policy: " -"%(fastPolicyName)s." -msgstr "無法建立或取得下列 FAST 原則的預設儲存體群組:%(fastPolicyName)s。" - -#, python-format -msgid "Unable to create replica clone for volume %s." -msgstr "無法建立磁區 %s 的抄本副本。" - -#, python-format -msgid "Unable to create the relationship for %s." -msgstr "無法建立 %s 的關係。" - -#, python-format -msgid "Unable to create volume %(name)s from %(snap)s." -msgstr "無法從 %(snap)s 建立磁區 %(name)s。" - -#, python-format -msgid "Unable to create volume %(name)s from %(vol)s." -msgstr "無法從 %(vol)s 建立磁區 %(name)s。" - -#, python-format -msgid "Unable to create volume %s" -msgstr "無法建立磁區 %s" - -msgid "Unable to create volume. Backend down." -msgstr "無法建立磁區。後端已關閉。" - -#, python-format -msgid "Unable to delete Consistency Group snapshot %s" -msgstr "無法刪除一致性群組 Snapshot %s" - -#, python-format -msgid "Unable to delete snapshot %(id)s, status: %(status)s." -msgstr "無法刪除 Snapshot %(id)s,狀態:%(status)s。" - -#, python-format -msgid "Unable to delete snapshot policy on volume %s." -msgstr "無法刪除磁區 %s 上的 Snapshot 原則。" - -#, python-format -msgid "" -"Unable to delete the target volume for volume %(vol)s. Exception: %(err)s." -msgstr "無法刪除磁區 %(vol)s 的目標磁區。異常狀況:%(err)s。" - -msgid "" -"Unable to detach volume. Volume status must be 'in-use' and attach_status " -"must be 'attached' to detach." -msgstr "" -"無法分離磁區。磁區狀態必須是「使用中」,並且 attach_status必須是「已連接」才" -"能進行分離。" - -#, python-format -msgid "" -"Unable to determine secondary_array from supplied secondary: %(secondary)s." -msgstr "無法判定來自所提供之次要項目的 secondary_array:%(secondary)s。" - -#, python-format -msgid "Unable to determine snapshot name in Purity for snapshot %(id)s." -msgstr "針對 Snapshot %(id)s,無法判定純度中的 Snapshot 名稱。" - -msgid "Unable to determine system id." -msgstr "無法判定系統 ID。" - -msgid "Unable to determine system name." -msgstr "無法判定系統名稱。" - -#, python-format -msgid "" -"Unable to do manage snapshot operations with Purity REST API version " -"%(api_version)s, requires %(required_versions)s." -msgstr "" -"無法使用純度 REST API 版本 %(api_version)s 來執行管理 Snapshot 作業需要 " -"%(required_versions)s。" - -#, python-format -msgid "" -"Unable to do replication with Purity REST API version %(api_version)s, " -"requires one of %(required_versions)s." -msgstr "" -"無法使用純度 REST API %(api_version)s 版來執行抄寫作業,需要下列其中一個:" -"%(required_versions)s。" - -#, python-format -msgid "Unable to establish the partnership with the Storwize cluster %s." -msgstr "無法與 Storwize 叢集 %s 建立夥伴關係。" - -#, python-format -msgid "Unable to extend volume %s" -msgstr "無法延伸磁區 %s" - -#, python-format -msgid "" -"Unable to fail-over the volume %(id)s to the secondary back-end, because the " -"replication relationship is unable to switch: %(error)s" -msgstr "無法將磁區 %(id)s 失效接手至次要後端,因為抄寫關係無法切換:%(error)s" - -msgid "" -"Unable to failback to \"default\", this can only be done after a failover " -"has completed." -msgstr "無法失效回復至「預設」,只有在失效接手完成之後才能執行此作業。" - -#, python-format -msgid "Unable to failover to replication target:%(reason)s)." -msgstr "無法失效接手至抄寫目標:%(reason)s。" - -msgid "Unable to fetch connection information from backend." -msgstr "無法從後端提取連線資訊。" - -#, python-format -msgid "Unable to fetch connection information from backend: %(err)s" -msgstr "無法從後端提取連線資訊:%(err)s" - -#, python-format -msgid "Unable to find Purity ref with name=%s" -msgstr "找不到名稱為 %s 的純度參照" - -#, python-format -msgid "Unable to find Volume Group: %(vg_name)s" -msgstr "找不到磁區群組:%(vg_name)s" - -msgid "Unable to find failover target, no secondary targets configured." -msgstr "找不到失效接手目標,未配置次要目標。" - -msgid "Unable to find iSCSI mappings." -msgstr "找不到 iSCSI 對映。" - -#, python-format -msgid "Unable to find ssh_hosts_key_file: %s" -msgstr "找不到 ssh_hosts_key_file:%s" - -msgid "Unable to find system log file!" -msgstr "找不到系統日誌檔!" - -#, python-format -msgid "" -"Unable to find viable pg snapshot to use forfailover on selected secondary " -"array: %(id)s." -msgstr "找不到可行的 pg Snapshot 以用於選定次要陣列上的失效接手:%(id)s。" - -#, python-format -msgid "" -"Unable to find viable secondary array fromconfigured targets: %(targets)s." -msgstr "找不到來自已配置目標的可行次要陣列:%(targets)s。" - -#, python-format -msgid "Unable to find volume %s" -msgstr "找不到磁區 %s" - -#, python-format -msgid "Unable to get a block device for file '%s'" -msgstr "無法取得檔案 '%s' 的區塊裝置" - -#, python-format -msgid "" -"Unable to get configuration information necessary to create a volume: " -"%(errorMessage)s." -msgstr "無法取得建立磁區所需的配置資訊:%(errorMessage)s。" - -msgid "Unable to get corresponding record for pool." -msgstr "無法取得儲存區的對應記錄。" - -#, python-format -msgid "" -"Unable to get information on space %(space)s, please verify that the cluster " -"is running and connected." -msgstr "無法取得空間 %(space)s 的相關資訊,請驗證叢集是否在執行中且已連接。" - -msgid "" -"Unable to get list of IP addresses on this host, check permissions and " -"networking." -msgstr "無法取得此主機上 IP 位址的清單,請檢查許可權和網路。" - -msgid "" -"Unable to get list of domain members, check that the cluster is running." -msgstr "無法取得網域成員的清單,請檢查叢集是否在執行中。" - -msgid "" -"Unable to get list of spaces to make new name. Please verify the cluster is " -"running." -msgstr "無法取得空間清單以建立新名稱。請驗證叢集是否在執行中。" - -#, python-format -msgid "Unable to get stats for backend_name: %s" -msgstr "無法取得 backend_name 的統計資料:%s" - -msgid "Unable to get storage volume from job." -msgstr "無法從工作中取得儲存磁區。" - -#, python-format -msgid "Unable to get target endpoints for hardwareId %(hardwareIdInstance)s." -msgstr "無法取得 hardwareId %(hardwareIdInstance)s 的目標端點。" - -msgid "Unable to get the name of the masking view." -msgstr "無法取得遮罩視圖的名稱。" - -msgid "Unable to get the name of the portgroup." -msgstr "無法取得埠群組的名稱。" - -#, python-format -msgid "Unable to get the replication relationship for volume %s." -msgstr "無法取得磁區 %s 的抄寫關係。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. It is the source volume of " -"replication session %(sync)s." -msgstr "" -"無法將磁區 %(deviceId)s 匯入 Cinder。該磁區是下列抄寫階段作業的來源磁區:" -"%(sync)s。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. The external volume is not " -"in the pool managed by current cinder host." -msgstr "" -"無法將磁區 %(deviceId)s 匯入 Cinder。外部磁區不在由現行 Cinder 主機管理的儲存" -"區中。" - -#, python-format -msgid "" -"Unable to import volume %(deviceId)s to cinder. Volume is in masking view " -"%(mv)s." -msgstr "無法將磁區 %(deviceId)s 匯入 Cinder。磁區正在遮罩視圖%(mv)s。" - -#, python-format -msgid "Unable to load CA from %(cert)s %(e)s." -msgstr "無法從 %(cert)s %(e)s 載入 CA。" - -#, python-format -msgid "Unable to load cert from %(cert)s %(e)s." -msgstr "無法從 %(cert)s %(e)s 載入憑證。" - -#, python-format -msgid "Unable to load key from %(cert)s %(e)s." -msgstr "無法從 %(cert)s %(e)s 載入金鑰。" - -#, python-format -msgid "Unable to locate account %(account_name)s on Solidfire device" -msgstr "在 SolidFire 裝置上找不到帳戶 %(account_name)s" - -#, python-format -msgid "Unable to locate an SVM that is managing the IP address '%s'" -msgstr "找不到管理 IP 位址 '%s' 的 SVM" - -#, python-format -msgid "Unable to locate specified replay profiles %s " -msgstr "找不到指定的重播設定檔 %s " - -#, python-format -msgid "" -"Unable to manage existing volume. Volume %(volume_ref)s already managed." -msgstr "無法管理現有磁區。磁區 %(volume_ref)s 已經受管理。" - -#, python-format -msgid "Unable to manage volume %s" -msgstr "無法管理磁區 %s" - -msgid "Unable to map volume" -msgstr "無法對映磁區" - -msgid "Unable to map volume." -msgstr "無法對映磁區。" - -msgid "Unable to parse attributes." -msgstr "無法剖析屬性。" - -#, python-format -msgid "" -"Unable to promote replica to primary for volume %s. No secondary copy " -"available." -msgstr "無法將磁區 %s 抄本提升為主要副本。沒有次要副本可用。" - -msgid "" -"Unable to re-use a host that is not managed by Cinder with " -"use_chap_auth=True," -msgstr "無法重複使用未受 Cinder 管理且use_chap_auth=True 的主機。" - -msgid "Unable to re-use host with unknown CHAP credentials configured." -msgstr "無法重複使用配置有不明 CHAP 認證的主機。" - -#, python-format -msgid "Unable to rename volume %(existing)s to %(newname)s" -msgstr "無法將磁區 %(existing)s 重新命名為 %(newname)s" - -#, python-format -msgid "Unable to retrieve snapshot group with id of %s." -msgstr "無法擷取 ID 為 %s 的 Snapshot 群組。" - -#, python-format -msgid "" -"Unable to retype %(specname)s, expected to receive current and requested " -"%(spectype)s values. Value received: %(spec)s" -msgstr "" -"無法對 %(specname)s 執行 Retype 作業,預期接收現行及要求的 %(spectype)s 值。" -"接收到的值:%(spec)s" - -#, python-format -msgid "" -"Unable to retype: A copy of volume %s exists. Retyping would exceed the " -"limit of 2 copies." -msgstr "" -"無法執行 Retype 動作:存在磁區 %s 的副本。如果執行 Retype 動作,則將超過2 份" -"副本的限制。" - -#, python-format -msgid "" -"Unable to retype: Current action needs volume-copy, it is not allowed when " -"new type is replication. Volume = %s" -msgstr "" -"無法執行 Retype 動作:現行動作需要磁區複製,但當新類型為抄寫時,不容許這樣" -"做。磁區為 %s" - -#, python-format -msgid "" -"Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." -msgstr "無法設定 %(vol)s 的鏡映模式抄寫。異常狀況:%(err)s。" - -#, python-format -msgid "Unable to snap Consistency Group %s" -msgstr "無法貼齊一致性群組 %s" - -msgid "Unable to terminate volume connection from backend." -msgstr "無法從後端終止磁區連線。" - -#, python-format -msgid "Unable to terminate volume connection: %(err)s" -msgstr "無法終止磁區連線:%(err)s" - -#, python-format -msgid "Unable to update consistency group %s" -msgstr "無法更新一致性群組 %s" - -#, python-format -msgid "" -"Unable to verify initiator group: %(igGroupName)s in masking view " -"%(maskingViewName)s. " -msgstr "" -"無法在下列遮罩視圖中驗證起始器群組 %(igGroupName)s:%(maskingViewName)s。" - -msgid "Unacceptable parameters." -msgstr "不可接受的參數值" - -#, python-format -msgid "" -"Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " -"%(attr)s." -msgstr "對映 %(id)s 的對映狀態 %(status)s 不符合預期。屬性:%(attr)s。" - -#, python-format -msgid "" -"Unexpected CLI response: header/row mismatch. header: %(header)s, row: " -"%(row)s." -msgstr "非預期的 CLI 回應:標頭/列不符。標頭:%(header)s、列:%(row)s。" - -#, python-format -msgid "" -"Unexpected mapping status %(status)s for mapping%(id)s. Attributes: %(attr)s." -msgstr "對映 %(id)s 的對映狀態 %(status)s 不符合預期。屬性:%(attr)s。" - -#, python-format -msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" -msgstr "非預期的輸出。預期 [%(expected)s],但卻接收到 [%(output)s]" - -msgid "Unexpected response from Nimble API" -msgstr "來自 Nimble API 的非預期回應" - -msgid "Unexpected response from Tegile IntelliFlash API" -msgstr "來自 Tegile IntelliFlash API 的非預期回應" - -msgid "Unexpected status code" -msgstr "非預期的狀態碼" - -#, python-format -msgid "" -"Unexpected status code from the switch %(switch_id)s with protocol " -"%(protocol)s for url %(page)s. Error: %(error)s" -msgstr "" -"針對 URL %(page)s,從使用通訊協定 %(protocol)s 的交換器 %(switch_id)s 傳回非" -"預期的狀態碼。錯誤:%(error)s" - -msgid "Unknown Gluster exception" -msgstr "不明的 Gluster 異常狀況" - -msgid "Unknown NFS exception" -msgstr "不明的 NFS 異常狀況" - -msgid "Unknown RemoteFS exception" -msgstr "不明的 RemoteFS 異常狀況" - -msgid "Unknown SMBFS exception." -msgstr "不明的 SMBFS 異常狀況。" - -msgid "Unknown Virtuozzo Storage exception" -msgstr "「不明 Virtuozzo 儲存體」異常狀況" - -msgid "Unknown action" -msgstr "不明動作" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, Set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"如果要管理的磁區 %s 已經由 Cinder 進行管理,則是不明情況。正在終止管理磁區。" -"請將 'cinder_managed' 自訂綱目內容新增至該磁區,並將它的值設為 False。或者," -"將 Cinder 配置原則 'zfssa_manage_policy' 的值設為 'loose',以移除此限制。" - -#, python-format -msgid "" -"Unknown if the volume: %s to be managed is already being managed by Cinder. " -"Aborting manage volume. Please add 'cinder_managed' custom schema property " -"to the volume and set its value to False. Alternatively, set the value of " -"cinder config policy 'zfssa_manage_policy' to 'loose' to remove this " -"restriction." -msgstr "" -"如果要管理的磁區 %s 已經由 Cinder 進行管理,則是不明情況。正在終止管理磁區。" -"請將 'cinder_managed' 自訂綱目內容新增至該磁區,並將它的值設為 False。或者," -"將 Cinder 配置原則 'zfssa_manage_policy' 的值設為 'loose',以移除此限制。" - -#, python-format -msgid "Unknown operation %s." -msgstr "不明作業 %s。" - -#, python-format -msgid "Unknown or unsupported command %(cmd)s" -msgstr "不明或不支援的指令 %(cmd)s" - -#, python-format -msgid "Unknown protocol: %(protocol)s." -msgstr "不明的通訊協定:%(protocol)s。" - -#, python-format -msgid "Unknown quota resources %(unknown)s." -msgstr "不明的配額資源 %(unknown)s。" - -msgid "Unknown sort direction, must be 'desc' or 'asc'" -msgstr "不明的排序方向,必須為 'desc' 或 'asc'" - -msgid "Unknown sort direction, must be 'desc' or 'asc'." -msgstr "不明的排序方向,必須為 'desc' 或 'asc'。" - -msgid "Unmanage and cascade delete options are mutually exclusive." -msgstr "取消管理與連鎖刪除選項是互斥的。" - -msgid "Unmanage volume not implemented." -msgstr "未實作取消管理磁區。" - -msgid "Unmanaging of snapshots from 'failed-over' volumes is not allowed." -msgstr "不容許取消管理「已失效接手」之磁區中的 Snapshot。" - -msgid "Unmanaging of snapshots from failed-over volumes is not allowed." -msgstr "不容許取消管理已失效接手之磁區中的 Snapshot。" - -#, python-format -msgid "Unrecognized QOS keyword: \"%s\"" -msgstr "無法辨識的服務品質關鍵字:\"%s\"" - -#, python-format -msgid "Unrecognized backing format: %s" -msgstr "無法辨識的備用格式:%s" - -#, python-format -msgid "Unrecognized read_deleted value '%s'" -msgstr "無法辨識 read_deleted 值 '%s'" - -#, python-format -msgid "Unset gcs options: %s" -msgstr "取消設定 gcs 選項:%s" - -msgid "Unsupported Content-Type" -msgstr "不支援的內容類型" - -msgid "" -"Unsupported Data ONTAP version. Data ONTAP version 7.3.1 and above is " -"supported." -msgstr "資料 ONTAP 版本不受支援。支援資料 ONTAP 7.3.1 版以及更高版本。" - -#, python-format -msgid "Unsupported backup metadata version (%s)" -msgstr "不支援的備份 meta 資料版本 (%s)" - -msgid "Unsupported backup metadata version requested" -msgstr "所要求的備份 meta 資料版本不受支援" - -msgid "Unsupported backup verify driver" -msgstr "不受支援的備份驗證驅動程式" - -#, python-format -msgid "" -"Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " -"or higher" -msgstr "交換器 %s 上的韌體不受支援。請確保交換器正在執行韌體6.4 版或更高版本" - -#, python-format -msgid "Unsupported volume format: %s " -msgstr "不受支援的磁區格式:%s" - -msgid "Update QoS policy error." -msgstr "更新服務品質原則時發生錯誤。" - -msgid "" -"Update and delete quota operations can only be made by an admin of immediate " -"parent or by the CLOUD admin." -msgstr "更新和刪除配額作業只能由原生母項的管理者或CLOUD 管理者來執行。" - -msgid "" -"Update and delete quota operations can only be made to projects in the same " -"hierarchy of the project in which users are scoped to." -msgstr "更新和刪除配額作業只能對將使用者限定範圍之相同專案階層中的專案執行。" - -msgid "Update list, doesn't include volume_id" -msgstr "更新清單,不包含 volume_id" - -msgid "Updated At" -msgstr "已更新" - -msgid "Upload to glance of attached volume is not supported." -msgstr "不支援將所連接的磁區上傳至 Glance。" - -msgid "Use ALUA to associate initiator to host error." -msgstr "使用 ALUA 將起始器關聯至主機時發生錯誤。" - -msgid "" -"Use CHAP to associate initiator to host error. Please check the CHAP " -"username and password." -msgstr "使用 CHAP 將起始器關聯至主機時發生錯誤。請檢查 CHAP 使用者名稱及密碼。" - -msgid "User ID" -msgstr "使用者識別號" - -msgid "User does not have admin privileges" -msgstr "使用者並沒有管理者權力" - -msgid "User not authorized to perform WebDAV operations." -msgstr "使用者未獲授權來執行 WebDAV 作業。" - -msgid "UserName is not configured." -msgstr "未配置 UserName。" - -msgid "UserPassword is not configured." -msgstr "未配置 UserPassword。" - -msgid "V2 rollback, volume is not in any storage group." -msgstr "第 2 版回復,磁區不在任何儲存體群組中。" - -msgid "V3 rollback" -msgstr "第 3 版回復" - -msgid "VF is not enabled." -msgstr "未啟用 VF。" - -#, python-format -msgid "VV Set %s does not exist." -msgstr "「VV 集」%s 不存在。" - -#, python-format -msgid "Valid consumer of QoS specs are: %s" -msgstr "服務品質規格的有效消費者為:%s" - -#, python-format -msgid "Valid control location are: %s" -msgstr "有效的控制項位置為:%s" - -#, python-format -msgid "Validate volume connection failed (error: %(err)s)." -msgstr "驗證磁區連線失敗(錯誤:%(err)s)。" - -#, python-format -msgid "" -"Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" -msgstr "值 \"%(value)s\" 不適用於配置選項 \"%(option)s\"" - -#, python-format -msgid "Value %(param)s for %(param_string)s is not a boolean." -msgstr "%(param_string)s 的值 %(param)s 不是布林值。" - -msgid "Value required for 'scality_sofs_config'" -msgstr "'scality_sofs_config' 需要值" - -#, python-format -msgid "ValueError: %s" -msgstr "ValueError:%s" - -#, python-format -msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." -msgstr "從 %(src)s 到 %(tgt)s 的對映未涉及到 vdisk %(name)s。" - -#, python-format -msgid "" -"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " -"maximum is %(max_ver)s." -msgstr "API 不支援 %(req_ver)s 版。下限為 %(min_ver)s,上限為 %(max_ver)s。" - -#, python-format -msgid "VersionedObject %s cannot retrieve object by id." -msgstr "VersionedObject %s 無法依 ID 擷取物件。" - -#, python-format -msgid "VersionedObject %s does not support conditional update." -msgstr "VersionedObject %s 不支援條件式更新。" - -#, python-format -msgid "Virtual volume '%s' doesn't exist on array." -msgstr "虛擬磁區 '%s' 不在陣列上。" - -#, python-format -msgid "Vol copy job for dest %s failed." -msgstr "對目的地 %s 執行磁區複製工作時失敗。" - -#, python-format -msgid "Volume %(deviceID)s not found." -msgstr "找不到磁區 %(deviceID)s。" - -#, python-format -msgid "" -"Volume %(name)s not found on the array. Cannot determine if there are " -"volumes mapped." -msgstr "在陣列上找不到磁區 %(name)s。無法判定是否有已對映的磁區。" - -#, python-format -msgid "Volume %(name)s was created in VNX, but in %(state)s state." -msgstr "已在 VNX 中建立磁區 %(name)s,但該磁區處於 %(state)s 狀態。" - -#, python-format -msgid "Volume %(vol)s could not be created in pool %(pool)s." -msgstr "無法在儲存區 %(pool)s 中建立磁區 %(vol)s。" - -#, python-format -msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." -msgstr "磁區 %(vol1)s 與 snapshot.volume_id %(vol2)s 不符。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available to update readonly flag, but " -"current status is: %(vol_status)s." -msgstr "" -"磁區 %(vol_id)s 狀態必須為可用,才能更新唯讀旗標,但現行狀態為:" -"%(vol_status)s。" - -#, python-format -msgid "" -"Volume %(vol_id)s status must be available, but current status is: " -"%(vol_status)s." -msgstr "磁區 %(vol_id)s 狀態必須為可用,但是現行狀態為:%(vol_status)s。" - -#, python-format -msgid "Volume %(volume_id)s could not be found." -msgstr "找不到磁區 %(volume_id)s。" - -#, python-format -msgid "" -"Volume %(volume_id)s has no administration metadata with key " -"%(metadata_key)s." -msgstr "磁區 %(volume_id)s 沒有索引鍵為%(metadata_key)s 的管理 meta 資料。" - -#, python-format -msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." -msgstr "磁區 %(volume_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" - -#, python-format -msgid "" -"Volume %(volume_id)s is currently mapped to unsupported host group %(group)s" -msgstr "磁區 %(volume_id)s 目前已對映至不受支援的主機群組 %(group)s" - -#, python-format -msgid "Volume %(volume_id)s is not currently mapped to host %(host)s" -msgstr "磁區 %(volume_id)s 目前未對映至主機 %(host)s" - -#, python-format -msgid "Volume %(volume_id)s is still attached, detach volume first." -msgstr "磁區 %(volume_id)s 仍處於連接狀態,請先將磁區分離。" - -#, python-format -msgid "Volume %(volume_id)s replication error: %(reason)s" -msgstr "磁區 %(volume_id)s 抄寫錯誤:%(reason)s" - -#, python-format -msgid "Volume %(volume_name)s is busy." -msgstr "磁區 %(volume_name)s 繁忙。" - -#, python-format -msgid "Volume %s could not be created from source volume." -msgstr "無法從來源磁區建立磁區 %s。" - -#, python-format -msgid "Volume %s could not be created on shares." -msgstr "無法在共用上建立磁區 %s。" - -#, python-format -msgid "Volume %s could not be created." -msgstr "無法建立磁區 %s。" - -#, python-format -msgid "Volume %s does not exist in Nexenta SA" -msgstr "磁區 %s 不存在於 Nexenta SA 中" - -#, python-format -msgid "Volume %s does not exist in Nexenta Store appliance" -msgstr "磁區 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" - -#, python-format -msgid "Volume %s does not exist on the array." -msgstr "磁區 %s 不在陣列上。" - -#, python-format -msgid "Volume %s does not have provider_location specified, skipping." -msgstr "沒有為磁區 %s 指定 provider_location,正在跳過。" - -#, python-format -msgid "Volume %s doesn't exist on array." -msgstr "磁區 %s 不在陣列上。" - -#, python-format -msgid "Volume %s doesn't exist on the ZFSSA backend." -msgstr "磁區 %s 不存在於 ZFSSA 後端上。" - -#, python-format -msgid "Volume %s is already managed by OpenStack." -msgstr "磁區 %s 已經由 OpenStack 進行管理。" - -#, python-format -msgid "" -"Volume %s is not of replicated type. This volume needs to be of a volume " -"type with the extra spec replication_enabled set to ' True' to support " -"replication actions." -msgstr "" -"磁區 %s 不是已抄寫的類型。這個磁區需要是額外規格 replication_enabled 設為 " -"' True' 的磁區類型,才能支援抄寫動作。" - -#, python-format -msgid "" -"Volume %s is online. Set volume to offline for managing using OpenStack." -msgstr "磁區 %s 在線上。請將磁區設為離線,以使用 OpenStack 進行管理。" - -#, python-format -msgid "Volume %s must not be part of a consistency group." -msgstr "磁區 %s 不得為一致性群組的一部分。" - -#, python-format -msgid "Volume %s not found." -msgstr "找不到磁區 %s。" - -#, python-format -msgid "Volume %s: Error trying to extend volume" -msgstr "磁區 %s:嘗試延伸磁區時發生錯誤" - -#, python-format -msgid "Volume (%s) already exists on array" -msgstr "陣列上已存在磁區 (%s)" - -#, python-format -msgid "Volume (%s) already exists on array." -msgstr "陣列上已存在磁區 (%s)。" - -#, python-format -msgid "Volume Group %s does not exist" -msgstr "磁區群組 %s 不存在" - -#, python-format -msgid "Volume Type %(id)s already exists." -msgstr "磁區類型 %(id)s 已存在。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s deletion is not allowed with volumes present " -"with the type." -msgstr "磁區類型 %(volume_type_id)s 刪除作業,不為該類型的磁區所接受。" - -#, python-format -msgid "" -"Volume Type %(volume_type_id)s has no extra specs with key " -"%(extra_specs_key)s." -msgstr "" -"磁區類型 %(volume_type_id)s 沒有索引鍵為 %(extra_specs_key)s 的額外規格。" - -msgid "Volume Type id must not be None." -msgstr "磁區類型 ID 不得為 None。" - -#, python-format -msgid "" -"Volume [%(cb_vol)s] was not found at CloudByte storage corresponding to " -"OpenStack volume [%(ops_vol)s]." -msgstr "" -"在對應於 OpenStack 磁區 [%(ops_vol)s] 的 CloudByte 儲存體處,找不到磁區 " -"[%(cb_vol)s]。" - -#, python-format -msgid "Volume [%s] not found in CloudByte storage." -msgstr "在 CloudByte 儲存體中找不到磁區 [%s]。" - -#, python-format -msgid "Volume attachment could not be found with filter: %(filter)s ." -msgstr "使用過濾器 %(filter)s 找不到磁區附件。" - -#, python-format -msgid "Volume backend config is invalid: %(reason)s" -msgstr "磁區後端配置無效:%(reason)s" - -msgid "Volume by this name already exists" -msgstr "具有此名稱的磁區已經存在" - -msgid "Volume cannot be restored since it contains snapshots." -msgstr "無法還原磁區,因為該磁區包含 Snapshot。" - -msgid "Volume create failed while extracting volume ref." -msgstr "擷取磁區參照時,磁區建立失敗。" - -#, python-format -msgid "Volume device file path %s does not exist." -msgstr "磁區裝置檔案路徑 %s 不存在。" - -#, python-format -msgid "Volume device not found at %(device)s." -msgstr "在 %(device)s 處找不到磁區裝置。" - -#, python-format -msgid "Volume driver %s not initialized." -msgstr "未起始設定磁區驅動程式 %s。" - -msgid "Volume driver not ready." -msgstr "磁區驅動程式未備妥。" - -#, python-format -msgid "Volume driver reported an error: %(message)s" -msgstr "磁區驅動程式報告了錯誤:%(message)s" - -msgid "Volume has a temporary snapshot that can't be deleted at this time." -msgstr "磁區具有目前無法刪除的暫時 Snapshot。" - -msgid "Volume has children and cannot be deleted!" -msgstr "磁區具有子項,且無法予以刪除!" - -#, python-format -msgid "Volume is attached to a server. (%s)" -msgstr "已將磁區連接至伺服器。(%s)" - -msgid "Volume is in-use." -msgstr "磁區在使用中。" - -msgid "Volume is not available." -msgstr "無法使用磁區。" - -msgid "Volume is not local to this node" -msgstr "磁區不是此節點的本端磁區" - -msgid "Volume is not local to this node." -msgstr "磁區不是此節點的本端磁區。" - -msgid "" -"Volume metadata backup requested but this driver does not yet support this " -"feature." -msgstr "已要求磁區 meta 資料備份,但此驅動程式尚不支援此功能。" - -#, python-format -msgid "Volume migration failed: %(reason)s" -msgstr "移轉磁區失敗:%(reason)s" - -msgid "Volume must be available" -msgstr "磁區必須可用" - -msgid "Volume must be in the same availability zone as the snapshot" -msgstr "磁區和 Snapshot 必須位在同一個可用性區域中" - -msgid "Volume must be in the same availability zone as the source volume" -msgstr "磁區和來源磁區必須位在同一個可用性區域中" - -msgid "Volume must have a volume type" -msgstr "磁區必須具有磁區類型" - -msgid "Volume must not be replicated." -msgstr "不得抄寫磁區。" - -msgid "Volume must not have snapshots." -msgstr "磁區不得具有 Snapshot。" - -#, python-format -msgid "Volume not found for instance %(instance_id)s." -msgstr "找不到實例 %(instance_id)s 的磁區。" - -msgid "Volume not found on configured storage backend." -msgstr "在所配置的儲存體後端系統上找不到磁區。" - -msgid "" -"Volume not found on configured storage backend. If your volume name contains " -"\"/\", please rename it and try to manage again." -msgstr "" -"在配置的儲存體後端上找不到磁區。如果磁區名稱包含 \"/\",請將其重命名,然後再" -"次嘗試進行管理。" - -msgid "Volume not found on configured storage pools." -msgstr "在所配置的儲存區上找不到磁區。" - -msgid "Volume not found." -msgstr "找不到磁區。" - -msgid "Volume not unique." -msgstr "磁區不是唯一的。" - -msgid "Volume not yet assigned to host." -msgstr "尚未將磁區指派給主機。" - -msgid "Volume reference must contain source-name element." -msgstr "磁區參照必須包含 source-name 元素。" - -#, python-format -msgid "Volume replication for %(volume_id)s could not be found." -msgstr "找不到 %(volume_id)s 的磁區抄寫。" - -#, python-format -msgid "Volume service %s failed to start." -msgstr "磁區服務 %s 無法啟動。" - -msgid "Volume should have agent-type set as None." -msgstr "磁區應該將代理程式類型設定為「無」。" - -#, python-format -msgid "" -"Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " -"%(min_disk)sGB." -msgstr "" -"磁區大小 %(volume_size)s GB 不能小於映像檔 minDisk 大小 %(min_disk)s GB。" - -#, python-format -msgid "Volume size '%(size)s' must be an integer and greater than 0" -msgstr "磁區大小 '%(size)s' 必須是大於 0 的整數" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than original volume size " -"%(source_size)sGB. They must be >= original volume size." -msgstr "" -"磁區大小 '%(size)s' GB 不能小於原始磁區大小%(source_size)s GB。它們必須大於或" -"等於原始磁區大小。" - -#, python-format -msgid "" -"Volume size '%(size)s'GB cannot be smaller than the snapshot size " -"%(snap_size)sGB. They must be >= original snapshot size." -msgstr "" -"磁區大小 '%(size)s' GB 不能小於 Snapshot 大小%(snap_size)s GB。它們必須大於或" -"等於原始 Snapshot 大小。" - -msgid "Volume size increased since the last backup. Do a full backup." -msgstr "磁區大小自前次備份以來已增加。請執行完整備份。" - -msgid "Volume size must be a multiple of 1 GB." -msgstr "磁區大小必須是 1 GB 的倍數。" - -msgid "Volume size must multiple of 1 GB." -msgstr "磁區大小必須是 1 GB 的倍數。" - -#, python-format -msgid "Volume status must be \"available\" or \"in-use\" for snapshot. (is %s)" -msgstr "對於 Snapshot,磁區狀態必須為「可用」或「使用中」。(是 %s)" - -msgid "Volume status must be \"available\" or \"in-use\"." -msgstr "磁區狀態必須是「可用」或「使用中」。" - -#, python-format -msgid "Volume status must be %s to reserve." -msgstr "磁區狀態必須為 %s 才能保留。" - -msgid "Volume status must be 'available'." -msgstr "磁區狀態必須為「可用」。" - -msgid "Volume to Initiator Group mapping already exists" -msgstr "磁區至起始器群組的對映已存在" - -#, python-format -msgid "" -"Volume to be backed up must be available or in-use, but the current status " -"is \"%s\"." -msgstr "要備份的磁區必須處於可用或使用中狀態,但是現行狀態是 \"%s\"。" - -msgid "Volume to be restored to must be available" -msgstr "要還原至的磁區必須可用" - -#, python-format -msgid "Volume type %(volume_type_id)s could not be found." -msgstr "找不到磁區類型 %(volume_type_id)s。" - -#, python-format -msgid "Volume type ID '%s' is invalid." -msgstr "磁區類型 ID '%s' 無效。" - -#, python-format -msgid "" -"Volume type access for %(volume_type_id)s / %(project_id)s combination " -"already exists." -msgstr "%(volume_type_id)s / %(project_id)s 組合的磁區類型存取已存在。" - -#, python-format -msgid "" -"Volume type access not found for %(volume_type_id)s / %(project_id)s " -"combination." -msgstr "找不到 %(volume_type_id)s / %(project_id)s 組合的磁區類型存取。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s already exists." -msgstr "類型 %(type_id)s 的磁區類型加密已存在。" - -#, python-format -msgid "Volume type encryption for type %(type_id)s does not exist." -msgstr "類型 %(type_id)s 的磁區類型加密不存在。" - -msgid "Volume type name can not be empty." -msgstr "磁區類型名稱不能為空。" - -#, python-format -msgid "Volume type with name %(volume_type_name)s could not be found." -msgstr "找不到名稱為 %(volume_type_name)s 的磁區類型。" - -#, python-format -msgid "" -"Volume: %(volumeName)s is not a concatenated volume. You can only perform " -"extend on concatenated volume. Exiting..." -msgstr "" -"磁區 %(volumeName)s 不是所連結的磁區。您只能對所連結的磁區執行「延伸」作業。" -"正在結束..." - -#, python-format -msgid "Volume: %(volumeName)s was not added to storage group %(sgGroupName)s." -msgstr "未將磁區 %(volumeName)s 新增至儲存體群組 %(sgGroupName)s。" - -#, python-format -msgid "Volume: %s is already being managed by Cinder." -msgstr "磁區 %s 已經由 Cinder 進行管理。" - -msgid "" -"Volumes/account exceeded on both primary and secondary SolidFire accounts." -msgstr "超過了主要及次要 SolidFire 帳戶上的磁區/帳戶。" - -#, python-format -msgid "" -"VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." -msgstr "" -"VzStorage 配置 'vzstorage_used_ratio' 無效。必須大於 0 且小於或等於 1.0:%s。" - -#, python-format -msgid "VzStorage config file at %(config)s doesn't exist." -msgstr "%(config)s 處的 VzStorage 配置檔不存在。" - -msgid "Wait replica complete timeout." -msgstr "等待抄本完成時逾時。" - -#, python-format -msgid "Wait synchronize failed. Running status: %s." -msgstr "等待同步失敗。執行中狀態:%s。" - -msgid "" -"Waiting for all nodes to join cluster. Ensure all sheep daemons are running." -msgstr "正在等待所有節點結合叢集。請確保所有 sheep 常駐程式都在執行中。" - -msgid "We should not do switch over on primary array." -msgstr "我們不應切換到主要陣列上。" - -msgid "X-IO Volume Driver exception!" -msgstr "X-IO 磁區驅動程式異常狀況!" - -msgid "XtremIO not configured correctly, no iscsi portals found" -msgstr "未正確地配置 XtremIO,找不到 iSCSI 入口網站" - -msgid "XtremIO not initialized correctly, no clusters found" -msgstr "未正確地起始設定 XtremIO,找不到叢集" - -msgid "You must implement __call__" -msgstr "必須實作 __call__" - -msgid "" -"You must install hpe3parclient before using 3PAR drivers. Run \"pip install " -"python-3parclient\" to install the hpe3parclient." -msgstr "" -"在使用 3PAR 驅動程式之前,必須先安裝 hpe3parclient。請執行 \"pip install " -"python-3parclient\" 來安裝 hpe3parclient。" - -msgid "You must supply an array in your EMC configuration file." -msgstr "您必須在 EMC 配置檔中提供一個陣列。" - -#, python-format -msgid "" -"Your original size: %(originalVolumeSize)s GB is greater than: %(newSize)s " -"GB. Only Extend is supported. Exiting..." -msgstr "" -"原始大小 %(originalVolumeSize)s GB 大於:%(newSize)s GB。僅支援「延伸」作業。" -"正在結束..." - -#, python-format -msgid "ZeroDivisionError: %s" -msgstr "ZeroDivisionError:%s" - -msgid "Zone" -msgstr "區域" - -#, python-format -msgid "Zoning Policy: %s, not recognized" -msgstr "分區原則:%s,無法辨識" - -#, python-format -msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." -msgstr "_create_and_copy_vdisk_data:無法取得 vdisk %s 的屬性。" - -msgid "_create_host failed to return the host name." -msgstr "_create_host 無法傳回主機名稱。" - -msgid "" -"_create_host: Can not translate host name. Host name is not unicode or " -"string." -msgstr "_create_host:無法轉換主機名稱。主機名稱不是 Unicode 或字串。" - -msgid "_create_host: No connector ports." -msgstr "_create_host:無連接器埠。" - -msgid "_create_local_cloned_volume, Replication Service not found." -msgstr "_create_local_cloned_volume,找不到「抄寫服務」。" - -#, python-format -msgid "" -"_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " -"%(sourcevolumename)s, source volume instance: %(source_volume)s, target " -"volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_create_local_cloned_volume,磁區名稱:%(volumename)s,來源磁區名稱:" -"%(sourcevolumename)s,來源磁區實例:%(source_volume)s,目標磁區實例:" -"%(target_volume)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" - -#, python-format -msgid "" -"_create_vdisk %(name)s - did not find success message in CLI output.\n" -" stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"_create_vdisk %(name)s - 在 CLI 輸出中找不到成功訊息。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -msgid "_create_volume_name, id_code is None." -msgstr "_create_volume_name,id_code 為「無」。" - -msgid "_delete_copysession, Cannot find Replication Service" -msgstr "_delete_copysession,找不到「抄寫服務」" - -#, python-format -msgid "" -"_delete_copysession, copy session type is undefined! copy session: " -"%(cpsession)s, copy type: %(copytype)s." -msgstr "" -"_delete_copysession,未定義複製階段作業類型!複製階段作業:%(cpsession)s,複" -"製類型:%(copytype)s。" - -#, python-format -msgid "" -"_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_delete_copysession,複製階段作業:%(cpsession)s,作業:%(operation)s,回覆" -"碼:%(rc)lu,錯誤:%(errordesc)s。" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s." -msgstr "" -"_delete_volume,磁區名稱:%(volumename)s,回覆碼:%(rc)lu,錯誤:" -"%(errordesc)s。" - -#, python-format -msgid "" -"_delete_volume, volumename: %(volumename)s, Storage Configuration Service " -"not found." -msgstr "_delete_volume,磁區名稱:%(volumename)s,找不到「儲存體配置服務」。" - -#, python-format -msgid "" -"_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " -"connect to ETERNUS." -msgstr "" -"_exec_eternus_service,類別名稱:%(classname)s,呼叫方法,無法連接至 " -"ETERNUS。" - -msgid "_extend_volume_op: Extending a volume with snapshots is not supported." -msgstr "_extend_volume_op:不支援延伸具有 Snapshot 的磁區。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, Associators: " -"FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,連接器:%(connector)s,協助程式:" -"FUJITSU_AuthorizedTarget,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,連接器:%(connector)s,列舉實例名稱,無法連接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_find_affinity_group,connector: %(connector)s,AssocNames: " -"FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_find_affinity_group,連接器:%(connector)s,協助程式名稱:" -"FUJITSU_ProtocolControllerForUnit,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " -"Cannot connect to ETERNUS." -msgstr "" -"_find_copysession,參照名稱,vol_instance:%(vol_instance_path)s,無法連接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_find_eternus_service,類別名稱:%(classname)s,列舉實例名稱,無法連接至 " -"ETERNUS。" - -#, python-format -msgid "_find_initiator_names, connector: %(connector)s, initiator not found." -msgstr "_find_initiator_names,連接器:%(connector)s,找不到起始器。" - -#, python-format -msgid "" -"_find_lun, volumename: %(volumename)s, EnumerateInstanceNames, cannot " -"connect to ETERNUS." -msgstr "" -"_find_lun,磁區名稱:%(volumename)s,列舉實例名稱,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " -"connect to ETERNUS." -msgstr "" -"_find_pool,eternus_pool:%(eternus_pool)s,列舉實例,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " -"Please edit driver configuration file and correct." -msgstr "" -"_get_drvcfg,檔名:%(filename)s,標記名稱:%(tagname)s,資料為「無」!請編輯" -"驅動程式配置檔並更正。" - -#, python-format -msgid "" -"_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " -"user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." -msgstr "" -"_get_eternus_connection,檔名:%(filename)s,IP:%(ip)s,埠:%(port)s,使用" -"者:%(user)s,密碼:****,URL:%(url)s,失敗!" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." -msgstr "" -"_get_eternus_iscsi_properties,ISCSI IP 清單:%(iscsiip_list)s,找不到 IQN。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " -"CIM_BindsTo, cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,協助程式名稱:" -"CIM_BindsTo,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " -"cannot connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,列舉實例名稱,無法連接" -"至 ETERNUS。" - -#, python-format -msgid "" -"_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " -"connect to ETERNUS." -msgstr "" -"_get_eternus_iscsi_properties,ISCSI IP:%(iscsiip)s,取得實例,無法連接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_get_hdr_dic: attribute headers and values do not match.\n" -" Headers: %(header)s\n" -" Values: %(row)s." -msgstr "" -"_get_hdr_dic:屬性標頭與值不相符。\n" -"標頭:%(header)s\n" -"值:%(row)s。" - -msgid "_get_host_from_connector failed to return the host name for connector." -msgstr "_get_host_from_connector 無法傳回連接器的主機名稱。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " -"affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc,從 aglist/vol_instance 取得 host-affinity 失敗,親緣性群組:" -"%(ag)s,參照名稱,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " -"GetInstance, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_fc,取得 host-affinity 實例失敗,磁區對映:%(volmap)s,取得實" -"例,無法連接至 ETERNUS。" - -msgid "" -"_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " -"connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi,協助程式:FUJITSU_SAPAvailableForElement,無法連接至 " -"ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " -"ETERNUS." -msgstr "_get_mapdata_iscsi,親緣性群組:%(ag)s,參照名稱,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_get_mapdata_iscsi,vol_instance:%(vol_instance)s,參照名稱:" -"CIM_ProtocolControllerForUnit,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " -"ETERNUS." -msgstr "" -"_get_mapdata_iscsi,磁區對映:%(volmap)s,取得實例,無法連接至 ETERNUS。" - -msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." -msgstr "_get_target_port,列舉實例,無法連接至 ETERNUS。" - -#, python-format -msgid "_get_target_port, protcol: %(protocol)s, target_port not found." -msgstr "_get_target_port,通訊協定:%(protocol)s,找不到 target_port。" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find snapshot named %s" -msgstr "_get_unmanaged_replay:找不到名為 %s 的 Snapshot" - -#, python-format -msgid "_get_unmanaged_replay: Cannot find volume id %s" -msgstr "_get_unmanaged_replay:找不到磁區 ID %s" - -msgid "_get_unmanaged_replay: Must specify source-name." -msgstr "_get_unmanaged_replay:必須指定 source-name。" - -msgid "" -"_get_vdisk_map_properties: Could not get FC connection information for the " -"host-volume connection. Is the host configured properly for FC connections?" -msgstr "" -"_get_vdisk_map_properties:無法取得主機-磁區連線的 FC 連線資訊。是否已針對 " -"FC 連線適當地配置主機?" - -#, python-format -msgid "" -"_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " -"%(vol)s." -msgstr "" -"_get_vdisk_map_properties:在下列磁區的 I/O 群組 %(gid)s 中找不到節點:" -"%(vol)s。" - -#, python-format -msgid "" -"_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " -"%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " -"Storage Configuration Service not found." -msgstr "" -"_map_lun,vol_instance.path:%(vol)s,磁區名稱:%(volumename)s,volume_uid:" -"%(uid)s,起始器:%(initiator)s,目標:%(tgt)s,親緣性群組清單:%(aglist)s,找" -"不到「儲存體配置服務」。" - -#, python-format -msgid "" -"_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " -"volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " -"not found." -msgstr "" -"_unmap_lun,vol_instance.path:%(volume)s,磁區名稱:%(volumename)s," -"volume_uid:%(uid)s,親緣性群組清單:%(aglist)s,找不到「控制器配置服務」。" - -#, python-format -msgid "" -"_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " -"AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"_unmap_lun,磁區名稱:%(volumename)s,volume_uid:%(volume_uid)s,親緣性群" -"組:%(ag)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" - -#, python-format -msgid "" -"_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " -"CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." -msgstr "" -"_unmap_lun,vol_instance.path: %(volume)s,協助程式名稱:" -"CIM_ProtocolControllerForUnit,無法連接至 ETERNUS。" - -msgid "_update_volume_stats: Could not get storage pool data." -msgstr "_update_volume_stats:無法取得儲存區資料。" - -#, python-format -msgid "" -"_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " -"BROKEN." -msgstr "" -"_wait_for_copy_complete,cpsession:%(cpsession)s,copysession 狀態為 " -"BROKEN。" - -#, python-format -msgid "" -"add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " -"exceed the limit of 2 copies." -msgstr "" -"add_vdisk_copy 失敗:存在磁區 %s 的副本。如果新增另一份副本,則將超過 2 份副" -"本的限制。" - -msgid "add_vdisk_copy started without a vdisk copy in the expected pool." -msgstr "已開始 add_vdisk_copy,但預期儲存區中沒有 vdisk 副本。" - -#, python-format -msgid "all_tenants must be a boolean, got '%s'." -msgstr "all_tenants 必須是布林值,但卻取得 '%s'。" - -msgid "already created" -msgstr "已建立" - -msgid "already_created" -msgstr "already_created" - -msgid "attach snapshot from remote node" -msgstr "從遠端節點連接 Snapshot" - -#, python-format -msgid "attribute %s not lazy-loadable" -msgstr "屬性 %s 無法延遲載入" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to create device hardlink from %(vpath)s to " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"備份:%(vol_id)s 無法建立從 %(vpath)s 到%(bpath)s 的裝置固定鏈結。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to obtain backup success notification from " -"server.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"備份:%(vol_id)s 無法從伺服器取得備份成功通知。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"備份:%(vol_id)s 無法執行 dsmc,因為%(bpath)s 的裝置固定鏈結。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"backup: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"備份:%(vol_id)s 無法在 %(bpath)s 上執行 dsmc。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "backup: %(vol_id)s failed. %(path)s is not a file." -msgstr "備份:%(vol_id)s 失敗。%(path)s 不是檔案。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. %(path)s is unexpected file type. Block or " -"regular files supported, actual file mode is %(vol_mode)s." -msgstr "" -"備份:%(vol_id)s 失敗。%(path)s 為非預期的檔案類型。支援區塊或一般檔案,實際" -"檔案模式為 %(vol_mode)s。" - -#, python-format -msgid "" -"backup: %(vol_id)s failed. Cannot obtain real path to volume at %(path)s." -msgstr "備份:%(vol_id)s 失敗。無法取得 %(path)s 處磁區的實際路徑。" - -msgid "being attached by different mode" -msgstr "正在以不同的模式進行連接" - -#, python-format -msgid "call failed: %r" -msgstr "呼叫失敗:%r" - -msgid "call failed: GARBAGE_ARGS" -msgstr "呼叫失敗:GARBAGE_ARGS" - -msgid "call failed: PROC_UNAVAIL" -msgstr "呼叫失敗:PROC_UNAVAIL" - -#, python-format -msgid "call failed: PROG_MISMATCH: %r" -msgstr "呼叫失敗:PROG_MISMATCH:%r" - -msgid "call failed: PROG_UNAVAIL" -msgstr "呼叫失敗:PROG_UNAVAIL" - -#, python-format -msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" -msgstr "找不到 lun-map,ig:%(ig)s 磁區:%(vol)s" - -msgid "can't find the volume to extend" -msgstr "找不到要延伸的磁區" - -msgid "can't handle both name and index in req" -msgstr "無法處理要求中的名稱及索引" - -msgid "cannot understand JSON" -msgstr "無法理解 JSON" - -#, python-format -msgid "cg-%s" -msgstr "cg-%s" - -msgid "cgsnapshot assigned" -msgstr "已指派 cgsnapshot" - -msgid "cgsnapshot changed" -msgstr "已變更 cgsnapshot" - -msgid "cgsnapshots assigned" -msgstr "已指派 cgsnapshot" - -msgid "cgsnapshots changed" -msgstr "已變更 cgsnapshot" - -msgid "" -"check_for_setup_error: Password or SSH private key is required for " -"authentication: set either san_password or san_private_key option." -msgstr "" -"check_for_setup_error:需要密碼或 SSH 私密金鑰以進行鑑別:請設定 " -"san_password 或 san_private_key 選項。" - -msgid "check_for_setup_error: Unable to determine system id." -msgstr "check_for_setup_error:無法判定系統 ID。" - -msgid "check_for_setup_error: Unable to determine system name." -msgstr "check_for_setup_error:無法判定系統名稱。" - -msgid "check_hypermetro_exist error." -msgstr "check_hypermetro_exist 錯誤。" - -#, python-format -msgid "clone depth exceeds limit of %s" -msgstr "複製深度超出了限制 (%s)" - -msgid "consistencygroup assigned" -msgstr "已指派 consistencygroup" - -msgid "consistencygroup changed" -msgstr "已變更 consistencygroup" - -msgid "control_location must be defined" -msgstr "必須定義 control_location" - -msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." -msgstr "create_cloned_volume,ETERNUS 中不存在「來源磁區」。" - -#, python-format -msgid "" -"create_cloned_volume, target volume instancename: %(volume_instancename)s, " -"Get Instance Failed." -msgstr "" -"create_cloned_volume,目標磁區實例名稱:%(volume_instancename)s,取得實例失" -"敗。" - -msgid "create_cloned_volume: Source and destination size differ." -msgstr "create_cloned_volume:來源及目的地大小不同。" - -#, python-format -msgid "" -"create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " -"doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." -msgstr "" -"create_cloned_volume:來源磁區 %(src_vol)s 大小為 %(src_size)dGB,且不適合大" -"小為 %(tgt_size)dGB 的目標磁區 %(tgt_vol)s。" - -msgid "" -"create_consistencygroup_from_src must be creating from a CG snapshot, or a " -"source CG." -msgstr "" -"create_consistencygroup_from_src 必須是從 CG Snapshot 或來源 CG 進行建立。" - -msgid "" -"create_consistencygroup_from_src only supports a cgsnapshot source or a " -"consistency group source. Multiple sources cannot be used." -msgstr "" -"create_consistencygroup_from_src 僅支援一個 cgsnapshot 來源或一個一致性群組來" -"源。不能使用多個來源。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." -msgstr "create_copy:來源 vdisk %(src)s (%(src_id)s) 不存在。" - -#, python-format -msgid "create_copy: Source vdisk %(src)s does not exist." -msgstr "create_copy:來源 vdisk %(src)s 不存在。" - -msgid "create_host: Host name is not unicode or string." -msgstr "create_host:主機名稱不是 Unicode 或字串。" - -msgid "create_host: No initiators or wwpns supplied." -msgstr "create_host:未提供任何起始器或 WWPN。" - -msgid "create_hypermetro_pair error." -msgstr "create_hypermetro_pair 錯誤。" - -#, python-format -msgid "create_snapshot, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "create_snapshot,eternus_pool:%(eternus_pool)s,找不到儲存區。" - -#, python-format -msgid "" -"create_snapshot, snapshotname: %(snapshotname)s, source volume name: " -"%(volumename)s, vol_instance.path: %(vol_instance)s, dest volume name: " -"%(d_volumename)s, pool: %(pool)s, Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_snapshot,Snapshot 名稱:%(snapshotname)s,來源磁區名稱:" -"%(volumename)s,vol_instance.path:%(vol_instance)s,目的地磁區名稱:" -"%(d_volumename)s,儲存區:%(pool)s,回覆碼:%(rc)lu,錯誤:%(errordesc)s。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(s_volumename)s, source volume not found on " -"ETERNUS." -msgstr "" -"create_snapshot,磁區名稱:%(s_volumename)s,在 ETERNUS 上找不到來源磁區。" - -#, python-format -msgid "" -"create_snapshot, volumename: %(volumename)s, Replication Service not found." -msgstr "create_snapshot,磁區名稱:%(volumename)s,找不到「抄寫服務」。" - -#, python-format -msgid "" -"create_snapshot: Volume status must be \"available\" or \"in-use\" for " -"snapshot. The invalid status is %s." -msgstr "" -"create_snapshot:Snapshot 的磁區狀態必須為「可用」或「使用中」。無效狀態為 " -"%s。" - -msgid "create_snapshot: get source volume failed." -msgstr "create_snapshot:取得來源磁區時失敗。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, EnumerateInstances, cannot connect to " -"ETERNUS." -msgstr "create_volume,磁區:%(volume)s,列舉實例,無法連接至 ETERNUS。" - -#, python-format -msgid "" -"create_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"create_volume,磁區:%(volume)s,磁區名稱:%(volumename)s,eternus_pool:" -"%(eternus_pool)s,找不到「儲存體配置服務」。" - -#, python-format -msgid "" -"create_volume, volumename: %(volumename)s, poolname: %(eternus_pool)s, " -"Return code: %(rc)lu, Error: %(errordesc)s." -msgstr "" -"create_volume,磁區名稱:%(volumename)s,儲存區名稱:%(eternus_pool)s,回覆" -"碼:%(rc)lu,錯誤:%(errordesc)s。" - -msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." -msgstr "create_volume_from_snapshot,ETERNUS 中不存在「來源磁區」。" - -#, python-format -msgid "" -"create_volume_from_snapshot, target volume instancename: " -"%(volume_instancename)s, Get Instance Failed." -msgstr "" -"create_volume_from_snapshot,目標磁區實例名稱:%(volume_instancename)s,取得" -"實例失敗。" - -#, python-format -msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." -msgstr "create_volume_from_snapshot:Snapshot %(name)s 不存在。" - -#, python-format -msgid "" -"create_volume_from_snapshot: Snapshot status must be \"available\" for " -"creating volume. The invalid status is: %s." -msgstr "" -"create_volume_from_snapshot:Snapshot 狀態必須為「可用」,才能建立磁區。無效" -"的狀態為:%s。" - -msgid "" -"create_volume_from_snapshot: Volume size is different from snapshot based " -"volume." -msgstr "create_volume_from_snapshot:磁區大小與 Snapshot 型磁區不同。" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc due to invalid arguments with stdout: " -"%(out)s\n" -" stderr: %(err)s" -msgstr "" -"刪除:%(vol_id)s 無法執行 dsmc,因為引數無效,標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"delete: %(vol_id)s failed to run dsmc with stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"刪除:%(vol_id)s 無法執行 dsmc,標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -msgid "delete_hypermetro error." -msgstr "delete_hypermetro 錯誤。" - -#, python-format -msgid "delete_initiator: %s ACL not found. Continuing." -msgstr "delete_initiator:找不到 %s。將繼續。" - -msgid "delete_replication error." -msgstr "delete_replication 錯誤。" - -#, python-format -msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" -msgstr "正在刪除具有相依磁區的 Snapshot %(snapshot_name)s" - -#, python-format -msgid "deleting volume %(volume_name)s that has snapshot" -msgstr "正在刪除具有 Snapshot 的磁區 %(volume_name)s" - -msgid "detach snapshot from remote node" -msgstr "將 Snapshot 從遠端節點分離" - -msgid "do_setup: No configured nodes." -msgstr "do_setup:未配置節點。" - -#, python-format -msgid "" -"error writing object to swift, MD5 of object in swift %(etag)s is not the " -"same as MD5 of object sent to swift %(md5)s" -msgstr "" -"將物件寫入 Swift 時發生錯誤,Swift 中物件的 MD5 %(etag)s,與傳送至 Swift 的物" -"件 MD5 %(md5)s 不同" - -#, python-format -msgid "extend_volume, eternus_pool: %(eternus_pool)s, pool not found." -msgstr "extend_volume,eternus_pool:%(eternus_pool)s,找不到儲存區。" - -#, python-format -msgid "" -"extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " -"%(eternus_pool)s, Storage Configuration Service not found." -msgstr "" -"extend_volume,磁區:%(volume)s,磁區名稱:%(volumename)s,eternus_pool:" -"%(eternus_pool)s,找不到「儲存體配置服務」。" - -#, python-format -msgid "" -"extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " -"%(errordesc)s, PoolType: %(pooltype)s." -msgstr "" -"extend_volume,磁區名稱:%(volumename)s,回覆碼:%(rc)lu,錯誤:" -"%(errordesc)s,儲存區類型:%(pooltype)s。" - -#, python-format -msgid "extend_volume, volumename: %(volumename)s, volume not found." -msgstr "extend_volume,磁區名稱:%(volumename)s,找不到磁區。" - -msgid "failed to create new_volume on destination host" -msgstr "無法在目的地主機上建立 new_volume" - -msgid "fake" -msgstr "偽造" - -#, python-format -msgid "file already exists at %s" -msgstr "%s 處已存在檔案" - -msgid "fileno is not supported by SheepdogIOWrapper" -msgstr "SheepdogIOWrapper 不支援 fileno" - -msgid "fileno() not supported by RBD()" -msgstr "RBD() 不支援 fileno()" - -#, python-format -msgid "filesystem %s does not exist in Nexenta Store appliance" -msgstr "檔案系統 %s 不存在於「Nexenta 儲存庫」軟體驅動裝置中" - -msgid "" -"flashsystem_multihostmap_enabled is set to False, not allow multi host " -"mapping. CMMVC6071E The VDisk-to-host mapping was not created because the " -"VDisk is already mapped to a host." -msgstr "" -"flashsystem_multihostmap_enabled 已設定為 False,不容許多重主機對映。" -"CMMVC6071E 未建立 VDisk 至主機的對映,因為VDisk 已對映至主機。" - -msgid "flush() not supported in this version of librbd" -msgstr "此版本的 librbd 中不支援 flush()" - -#, python-format -msgid "fmt=%(fmt)s backed by: %(backing_file)s" -msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援" - -#, python-format -msgid "fmt=%(fmt)s backed by:%(backing_file)s" -msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援" - -msgid "force delete" -msgstr "強制刪除" - -msgid "get_hyper_domain_id error." -msgstr "get_hyper_domain_id 錯誤。" - -msgid "get_hypermetro_by_id error." -msgstr "get_hypermetro_by_id 錯誤。" - -#, python-format -msgid "" -"get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " -"check config file." -msgstr "get_iscsi_params:無法取得起始器 %(ini)s 的目標 IP,請檢查配置檔。" - -#, python-format -msgid "get_pool: Failed to get attributes for volume %s" -msgstr "get_pool:無法取得磁區 %s 的屬性" - -msgid "glance_metadata changed" -msgstr "已變更 glance_metadata" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different file systems." -msgstr "" -"gpfs_images_share_mode 已設為 copy_on_write,但 %(vol)s 及 %(img)s 屬於不同的" -"檔案系統。" - -#, python-format -msgid "" -"gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " -"belong to different filesets." -msgstr "" -"gpfs_images_share_mode 已設為 copy_on_write,但 %(vol)s 及 %(img)s 屬於不同的" -"檔案集。" - -#, python-format -msgid "" -"hgst_group %(grp)s and hgst_user %(usr)s must map to valid users/groups in " -"cinder.conf" -msgstr "" -"在 cinder.conf 中,hgst_group %(grp)s 和 hgst_user %(usr)s 必須對映至有效的使" -"用者/群組" - -#, python-format -msgid "hgst_net %(net)s specified in cinder.conf not found in cluster" -msgstr "在叢集中找不到 cinder.conf 內指定的 hgst_net %(net)s" - -msgid "hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in cinder.conf." -msgstr "在 cinder.conf 中,hgst_redundancy 必須設定為 0(非 HA)或 1 (HA)。" - -msgid "hgst_space_mode must be an octal/int in cinder.conf" -msgstr "在 cinder.conf 中,hgst_space_mode 必須是八進位/整數" - -#, python-format -msgid "hgst_storage server %(svr)s not of format :" -msgstr "hgst_storage 伺服器 %(svr)s 的格式不是 :" - -msgid "hgst_storage_servers must be defined in cinder.conf" -msgstr "在 cinder.conf 中,必須定義 hgst_storage_servers" - -msgid "" -"http service may have been abruptly disabled or put to maintenance state in " -"the middle of this operation." -msgstr "HTTP 服務可能已在執行此作業的中途意外停用或置於維護狀態。" - -msgid "id cannot be None" -msgstr "ID 不能為 None" - -#, python-format -msgid "image %s not found" -msgstr "找不到映像檔 %s" - -#, python-format -msgid "initialize_connection, volume: %(volume)s, Volume not found." -msgstr "initialize_connection,磁區:%(volume)s,找不到「磁區」。" - -#, python-format -msgid "initialize_connection: Failed to get attributes for volume %s." -msgstr "initialize_connection:無法取得磁區 %s 的屬性。" - -#, python-format -msgid "initialize_connection: Missing volume attribute for volume %s." -msgstr "initialize_connection:遺漏磁區 %s 的磁區屬性。" - -#, python-format -msgid "" -"initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." -msgstr "" -"initialize_connection:在磁區 %(vol)s 的 I/O 群組 %(gid)s 中找不到節點。" - -#, python-format -msgid "initialize_connection: vdisk %s is not defined." -msgstr "initialize_connection:未定義 vdisk %s。" - -#, python-format -msgid "invalid user '%s'" -msgstr "無效的使用者 '%s'" - -#, python-format -msgid "iscsi portal, %s, not found" -msgstr "找不到 iSCSI 入口網站 %s" - -msgid "" -"iscsi_ip_address must be set in config file when using protocol 'iSCSI'." -msgstr "使用通訊協定 'iSCSI' 時,必須在配置檔中設定 iscsi_ip_address。" - -#, python-format -msgid "key manager error: %(reason)s" -msgstr "金鑰管理程式錯誤:%(reason)s" - -msgid "limit param must be an integer" -msgstr "限制參數必須是整數" - -msgid "limit param must be positive" -msgstr "限制參數必須是正數" - -msgid "manage_existing requires a 'name' key to identify an existing volume." -msgstr "manage_existing 需要 'name' 索引鍵來確認現有磁區。" - -#, python-format -msgid "" -"manage_existing_snapshot: Error managing existing replay %(ss)s on volume " -"%(vol)s" -msgstr "" -"manage_existing_snapshot:管理磁區 %(vol)s 上的現有重播 %(ss)s 時發生錯誤" - -#, python-format -msgid "marker [%s] not found" -msgstr "找不到標記 [%s]" - -#, python-format -msgid "mdiskgrp missing quotes %s" -msgstr "mdiskgrp 遺漏引用 %s" - -#, python-format -msgid "migration_policy must be 'on-demand' or 'never', passed: %s" -msgstr "migration_policy 必須是 'on-demand' 或 'never',已傳遞:%s" - -#, python-format -msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." -msgstr "mkfs 在磁區 %(vol)s 上執行時失敗,錯誤訊息為:%(err)s。" - -msgid "mock" -msgstr "模擬" - -msgid "mount.glusterfs is not installed" -msgstr "未安裝 mount.glusterfs" - -#, python-format -msgid "multiple resources with name %s found by drbdmanage" -msgstr "DRBDmanage 找到多個資源具有名稱 %s" - -#, python-format -msgid "multiple resources with snapshot ID %s found" -msgstr "找到多個資源具有 Snapshot ID %s" - -msgid "name cannot be None" -msgstr "名稱不能為 None" - -#, python-format -msgid "no REPLY but %r" -msgstr "沒有回覆,但 %r" - -#, python-format -msgid "no snapshot with id %s found in drbdmanage" -msgstr "在 DRBDmanage 中找不到 ID 為 %s 的 Snapshot" - -#, python-format -msgid "not exactly one snapshot with id %s" -msgstr "不止一個 Snapshot 具有 ID %s" - -#, python-format -msgid "not exactly one volume with id %s" -msgstr "不止一個磁區具有 ID %s" - -#, python-format -msgid "obj missing quotes %s" -msgstr "obj 遺漏引用 %s" - -msgid "open_access_enabled is not off." -msgstr "未關閉 open_access_enabled。" - -msgid "progress must be an integer percentage" -msgstr "進度必須是整數百分比" - -msgid "provider must be defined" -msgstr "必須定義提供者" - -#, python-format -msgid "" -"qemu-img %(minimum_version)s or later is required by this volume driver. " -"Current qemu-img version: %(current_version)s" -msgstr "" -"此磁區驅動程式需要 qemu-img %(minimum_version)s 或更高版本。現行 qemu-img 版" -"本:%(current_version)s" - -#, python-format -msgid "" -"qemu-img is not installed and image is of type %s. Only RAW images can be " -"used if qemu-img is not installed." -msgstr "" -"未安裝 qemu-img,且映像檔的類型是 %s。如果未安裝 qemu-img,則只能使用原始映像" -"檔。" - -msgid "" -"qemu-img is not installed and the disk format is not specified. Only RAW " -"images can be used if qemu-img is not installed." -msgstr "" -"未安裝 qemu-img,且未指定磁碟格式。如果未安裝qemu-img,則只能使用原始映像檔。" - -msgid "rados and rbd python libraries not found" -msgstr "找不到 rados 及 rbd python 程式庫" - -#, python-format -msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" -msgstr "read_deleted 只能是 'no'、'yes' 或 'only' 其中之一,不能是 %r" - -#, python-format -msgid "replication_failover failed. %s not found." -msgstr "replication_failover 失敗。找不到 %s。" - -msgid "replication_failover failed. Backend not configured for failover" -msgstr "replication_failover 失敗。未配置後端以進行失效接手" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc due to invalid arguments on " -"%(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"還原:%(vol_id)s 無法執行 dsmc,因為%(bpath)s 上的引數無效。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed to run dsmc on %(bpath)s.\n" -"stdout: %(out)s\n" -" stderr: %(err)s" -msgstr "" -"還原:%(vol_id)s 無法在 %(bpath)s 上執行 dsmc。\n" -" 標準輸出:%(out)s\n" -" 標準錯誤:%(err)s" - -#, python-format -msgid "" -"restore: %(vol_id)s failed.\n" -"stdout: %(out)s\n" -" stderr: %(err)s." -msgstr "" -"還原:%(vol_id)s 失敗。\n" -" 標準輸出:%(out)s\n" -"標準錯誤:%(err)s。" - -msgid "" -"restore_backup aborted, actual object list does not match object list stored " -"in metadata." -msgstr "restore_backup 已中斷,實際物件清單與meta 資料中儲存的物件清單不相符。" - -#, python-format -msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." -msgstr "rtslib_fb 遺漏成員 %s:您可能需要更新的 python-rtslib-fb。" - -msgid "san_ip is not set." -msgstr "未設定 san_ip。" - -msgid "san_ip must be set" -msgstr "必須設定 san_ip" - -msgid "" -"san_login and/or san_password is not set for Datera driver in the cinder." -"conf. Set this information and start the cinder-volume service again." -msgstr "" -"未在 cinder.conf 中設定 Datera 驅動程式的 san_login 及/或 san_password。請設" -"定此資訊並重新啟動 cinder-volume 服務。" - -msgid "serve() can only be called once" -msgstr "只能呼叫 serve() 一次" - -#, python-format -msgid "snapshot-%s" -msgstr "snapshot-%s" - -msgid "snapshots assigned" -msgstr "已指派 Snapshot" - -msgid "snapshots changed" -msgstr "已變更 Snapshot" - -#, python-format -msgid "source volume id:%s is not replicated" -msgstr "未抄寫來源磁區 ID:%s" - -msgid "source-name cannot be empty." -msgstr "source-name 不能是空的。" - -msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." -msgstr "source-name 格式應該是:'vmdk_path@vm_inventory_path'。" - -#, python-format -msgid "status must be %s and" -msgstr " 狀態必須是 %s,並且" - -msgid "status must be available" -msgstr "狀態必須可用" - -msgid "stop_hypermetro error." -msgstr "stop_hypermetro 錯誤。" - -msgid "sync_hypermetro error." -msgstr "sync_hypermetro 錯誤。" - -#, python-format -msgid "" -"targetcli not installed and could not create default directory " -"(%(default_path)s): %(exc)s" -msgstr "targetcli 尚未安裝,無法建立預設目錄(%(default_path)s):%(exc)s" - -msgid "terminate_connection: Failed to get host name from connector." -msgstr "terminate_connection:無法從連接器取得主機名稱。" - -msgid "timeout creating new_volume on destination host" -msgstr "在目的地主機上建立 new_volume 時發生逾時" - -msgid "too many body keys" -msgstr "主體金鑰太多" - -#, python-format -msgid "umount: %s: not mounted" -msgstr "卸載:%s:未裝載" - -#, python-format -msgid "umount: %s: target is busy" -msgstr "卸載:%s:目標在忙碌中" - -msgid "umount: : some other error" -msgstr "卸載::其他某個錯誤" - -msgid "umount: : target is busy" -msgstr "卸載::目標在忙碌中" - -#, python-format -msgid "unmanage_snapshot: Cannot find snapshot named %s" -msgstr "unmanage_snapshot:找不到名為 %s 的 Snapshot" - -#, python-format -msgid "unmanage_snapshot: Cannot find volume id %s" -msgstr "unmanage_snapshot:找不到磁區 ID %s" - -#, python-format -msgid "unrecognized argument %s" -msgstr "無法辨識的引數 %s" - -#, python-format -msgid "unsupported compression algorithm: %s" -msgstr "不支援的壓縮演算法:%s" - -msgid "valid iqn needed for show_target" -msgstr "show_target 需要有效的 IQN" - -#, python-format -msgid "vdisk %s is not defined." -msgstr "未定義 vdisk %s。" - -msgid "vmemclient python library not found" -msgstr "找不到 vmemclient Python 程式庫" - -#, python-format -msgid "volume %s not found in drbdmanage" -msgstr "在 DRBDmanage 中找不到磁區 %s" - -msgid "volume assigned" -msgstr "已指派磁區" - -msgid "volume changed" -msgstr "已變更磁區" - -msgid "volume is already attached" -msgstr "已連接磁區" - -msgid "volume is not local to this node" -msgstr "磁區不是此節點的本端磁區" - -#, python-format -msgid "" -"volume size %(volume_size)d is too small to restore backup of size %(size)d." -msgstr "磁區大小 %(volume_size)d 太小,無法還原大小為 %(size)d 的備份。" - -#, python-format -msgid "volume size %d is invalid." -msgstr "磁區大小 %d 無效。" - -msgid "" -"volume_type must be provided when creating a volume in a consistency group." -msgstr "在一致性群組中建立磁區時,必須提供volume_type。" - -msgid "volume_type_id cannot be None" -msgstr "volume_type_id 不能為 None" - -#, python-format -msgid "volume_types must be provided to create consistency group %(name)s." -msgstr "必須提供 volume_types,才能建立一致性群組 %(name)s。" - -#, python-format -msgid "volume_types must be provided to create consistency group %s." -msgstr "必須提供 volume_types,才能建立一致性群組 %s。" - -msgid "volumes assigned" -msgstr "已指派磁區" - -msgid "volumes changed" -msgstr "已變更磁區" - -#, python-format -msgid "wait_for_condition: %s timed out." -msgstr "wait_for_condition:%s 已逾時。" - -#, python-format -msgid "" -"zfssa_manage_policy property needs to be set to 'strict' or 'loose'. Current " -"value is: %s." -msgstr "zfssa_manage_policy 內容需要設為 'strict' 或 'loose'。現行值為:%s。" diff --git a/cinder/manager.py b/cinder/manager.py deleted file mode 100644 index ba38476df..000000000 --- a/cinder/manager.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base Manager class. - -Managers are responsible for a certain aspect of the system. It is a logical -grouping of code relating to a portion of the system. In general other -components should be using the manager to make changes to the components that -it is responsible for. - -For example, other components that need to deal with volumes in some way, -should do so by calling methods on the VolumeManager instead of directly -changing fields in the database. This allows us to keep all of the code -relating to volumes in the same place. - -We have adopted a basic strategy of Smart managers and dumb data, which means -rather than attaching methods to data objects, components should call manager -methods that act on the data. - -Methods on managers that can be executed locally should be called directly. If -a particular method must execute on a remote host, this should be done via rpc -to the service that wraps the manager - -Managers should be responsible for most of the db access, and -non-implementation specific data. Anything implementation specific that can't -be generalized should be done by the Driver. - -In general, we prefer to have one manager with multiple drivers for different -implementations, but sometimes it makes sense to have multiple managers. You -can think of it this way: Abstract different overall strategies at the manager -level(FlatNetwork vs VlanNetwork), and different implementations at the driver -level(LinuxNetDriver vs CiscoNetDriver). - -Managers will often provide methods for initial setup of a host or periodic -tasks to a wrapping service. - -This module provides Manager, a base class for managers. - -""" - - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_service import periodic_task -from oslo_utils import timeutils - -from cinder import context -from cinder import db -from cinder.db import base -from cinder import exception -from cinder import objects -from cinder import rpc -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import utils - -from eventlet import greenpool - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class PeriodicTasks(periodic_task.PeriodicTasks): - def __init__(self): - super(PeriodicTasks, self).__init__(CONF) - - -class Manager(base.Base, PeriodicTasks): - # Set RPC API version to 1.0 by default. - RPC_API_VERSION = '1.0' - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, host=None, db_driver=None, cluster=None, **kwargs): - if not host: - host = CONF.host - self.host = host - self.cluster = cluster - self.additional_endpoints = [] - self.availability_zone = CONF.storage_availability_zone - super(Manager, self).__init__(db_driver) - - @property - def service_topic_queue(self): - return self.cluster or self.host - - def init_host(self, service_id=None, added_to_cluster=None): - """Handle initialization if this is a standalone service. - - A hook point for services to execute tasks before the services are made - available (i.e. showing up on RPC and starting to accept RPC calls) to - other components. Child classes should override this method. - - :param service_id: ID of the service where the manager is running. - :param added_to_cluster: True when a host's cluster configuration has - changed from not being defined or being '' to - any other value and the DB service record - reflects this new value. - """ - pass - - def init_host_with_rpc(self): - """A hook for service to do jobs after RPC is ready. - - Like init_host(), this method is a hook where services get a chance - to execute tasks that *need* RPC. Child classes should override - this method. - - """ - pass - - def is_working(self): - """Method indicating if service is working correctly. - - This method is supposed to be overridden by subclasses and return if - manager is working correctly. - """ - return True - - def reset(self): - """Method executed when SIGHUP is caught by the process. - - We're utilizing it to reset RPC API version pins to avoid restart of - the service when rolling upgrade is completed. - """ - LOG.info('Resetting cached RPC version pins.') - rpc.LAST_OBJ_VERSIONS = {} - rpc.LAST_RPC_VERSIONS = {} - - def set_log_levels(self, context, log_request): - utils.set_log_levels(log_request.prefix, log_request.level) - - def get_log_levels(self, context, log_request): - levels = utils.get_log_levels(log_request.prefix) - log_levels = [objects.LogLevel(context, prefix=prefix, level=level) - for prefix, level in levels.items()] - return objects.LogLevelList(context, objects=log_levels) - - -class ThreadPoolManager(Manager): - def __init__(self, *args, **kwargs): - self._tp = greenpool.GreenPool() - super(ThreadPoolManager, self).__init__(*args, **kwargs) - - def _add_to_threadpool(self, func, *args, **kwargs): - self._tp.spawn_n(func, *args, **kwargs) - - -class SchedulerDependentManager(ThreadPoolManager): - """Periodically send capability updates to the Scheduler services. - - Services that need to update the Scheduler of their capabilities - should derive from this class. Otherwise they can derive from - manager.Manager directly. Updates are only sent after - update_service_capabilities is called with non-None values. - - """ - - def __init__(self, host=None, db_driver=None, service_name='undefined', - cluster=None): - self.last_capabilities = None - self.service_name = service_name - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() - super(SchedulerDependentManager, self).__init__(host, db_driver, - cluster=cluster) - - def update_service_capabilities(self, capabilities): - """Remember these capabilities to send on next periodic update.""" - self.last_capabilities = capabilities - - @periodic_task.periodic_task - def _publish_service_capabilities(self, context): - """Pass data back to the scheduler at a periodic interval.""" - if self.last_capabilities: - LOG.debug('Notifying Schedulers of capabilities ...') - self.scheduler_rpcapi.update_service_capabilities( - context, - self.service_name, - self.host, - self.last_capabilities, - self.cluster) - try: - self.scheduler_rpcapi.notify_service_capabilities( - context, - self.service_name, - self.service_topic_queue, - self.last_capabilities) - except exception.ServiceTooOld as e: - # This means we have Newton's c-sch in the deployment, so - # rpcapi cannot send the message. We can safely ignore the - # error. Log it because it shouldn't happen after upgrade. - msg = ("Failed to notify about cinder-volume service " - "capabilities for host %(host)s. This is normal " - "during a live upgrade. Error: %(e)s") - LOG.warning(msg, {'host': self.host, 'e': e}) - - def reset(self): - super(SchedulerDependentManager, self).reset() - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() - - -class CleanableManager(object): - def do_cleanup(self, context, cleanup_request): - LOG.info('Initiating service %s cleanup', - cleanup_request.service_id) - - # If the 'until' field in the cleanup request is not set, we default to - # this very moment. - until = cleanup_request.until or timeutils.utcnow() - keep_entry = False - - to_clean = db.worker_get_all( - context, - resource_type=cleanup_request.resource_type, - resource_id=cleanup_request.resource_id, - service_id=cleanup_request.service_id, - until=until) - - for clean in to_clean: - original_service_id = clean.service_id - original_time = clean.updated_at - # Try to do a soft delete to mark the entry as being cleaned up - # by us (setting service id to our service id). - res = db.worker_claim_for_cleanup(context, - claimer_id=self.service_id, - orm_worker=clean) - - # Claim may fail if entry is being cleaned by another service, has - # been removed (finished cleaning) by another service or the user - # started a new cleanable operation. - # In any of these cases we don't have to do cleanup or remove the - # worker entry. - if not res: - continue - - # Try to get versioned object for resource we have to cleanup - try: - vo_cls = getattr(objects, clean.resource_type) - vo = vo_cls.get_by_id(context, clean.resource_id) - # Set the worker DB entry in the VO and mark it as being a - # clean operation - clean.cleaning = True - vo.worker = clean - except exception.NotFound: - LOG.debug('Skipping cleanup for non existent %(type)s %(id)s.', - {'type': clean.resource_type, - 'id': clean.resource_id}) - else: - # Resource status should match - if vo.status != clean.status: - LOG.debug('Skipping cleanup for mismatching work on ' - '%(type)s %(id)s: %(exp_sts)s <> %(found_sts)s.', - {'type': clean.resource_type, - 'id': clean.resource_id, - 'exp_sts': clean.status, - 'found_sts': vo.status}) - else: - LOG.info('Cleaning %(type)s with id %(id)s and status ' - '%(status)s', - {'type': clean.resource_type, - 'id': clean.resource_id, - 'status': clean.status}, - resource=vo) - try: - # Some cleanup jobs are performed asynchronously, so - # we don't delete the worker entry, they'll take care - # of it - keep_entry = self._do_cleanup(context, vo) - except Exception: - LOG.exception('Could not perform cleanup.') - # Return the worker DB entry to the original service - db.worker_update(context, clean.id, - service_id=original_service_id, - updated_at=original_time) - continue - - # The resource either didn't exist or was properly cleaned, either - # way we can remove the entry from the worker table if the cleanup - # method doesn't want to keep the entry (for example for delayed - # deletion). - if not keep_entry and not db.worker_destroy(context, id=clean.id): - LOG.warning('Could not remove worker entry %s.', clean.id) - - LOG.info('Service %s cleanup completed.', cleanup_request.service_id) - - def _do_cleanup(self, ctxt, vo_resource): - return False - - def init_host(self, service_id, **kwargs): - ctxt = context.get_admin_context() - self.service_id = service_id - # TODO(geguileo): Once we don't support MySQL 5.5 anymore we can remove - # call to workers_init. - db.workers_init() - cleanup_request = objects.CleanupRequest(service_id=service_id) - self.do_cleanup(ctxt, cleanup_request) diff --git a/cinder/message/__init__.py b/cinder/message/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/message/api.py b/cinder/message/api.py deleted file mode 100644 index 553ff8bab..000000000 --- a/cinder/message/api.py +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Handles all requests related to user facing messages. -""" -import datetime - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from cinder.db import base -from cinder.message import message_field - - -messages_opts = [ - cfg.IntOpt('message_ttl', default=2592000, - help='message minimum life in seconds.'), - cfg.IntOpt('message_reap_interval', default=86400, - help='interval between periodic task runs to clean expired ' - 'messages in seconds.') -] - - -CONF = cfg.CONF -CONF.register_opts(messages_opts) - -LOG = logging.getLogger(__name__) - - -class API(base.Base): - """API for handling user messages.""" - - def create(self, context, action, - resource_type=message_field.Resource.VOLUME, - resource_uuid=None, exception=None, detail=None, level="ERROR"): - """Create a message with the specified information.""" - LOG.info("Creating message record for request_id = %s", - context.request_id) - # Updates expiry time for message as per message_ttl config. - expires_at = (timeutils.utcnow() + datetime.timedelta( - seconds=CONF.message_ttl)) - - detail_id = message_field.translate_detail_id(exception, detail) - message_record = {'project_id': context.project_id, - 'request_id': context.request_id, - 'resource_type': resource_type, - 'resource_uuid': resource_uuid, - 'action_id': action[0] if action else '', - 'message_level': level, - 'event_id': "VOLUME_%s_%s_%s" % (resource_type, - action[0], - detail_id), - 'detail_id': detail_id, - 'expires_at': expires_at} - try: - self.db.message_create(context, message_record) - except Exception: - LOG.exception("Failed to create message record " - "for request_id %s", context.request_id) - - def get(self, context, id): - """Return message with the specified id.""" - return self.db.message_get(context, id) - - def get_all(self, context, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - """Return all messages for the given context.""" - - filters = filters or {} - - messages = self.db.message_get_all(context, filters=filters, - marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - return messages - - def delete(self, context, id): - """Delete message with the specified id.""" - ctx = context.elevated() - return self.db.message_destroy(ctx, id) - - def cleanup_expired_messages(self, context): - ctx = context.elevated() - count = self.db.cleanup_expired_messages(ctx) - LOG.info("Deleted %s expired messages.", count) diff --git a/cinder/message/defined_messages.py b/cinder/message/defined_messages.py deleted file mode 100644 index 3d6844234..000000000 --- a/cinder/message/defined_messages.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Event ID and user visible message mapping. - -Event IDs are used to look up the message to be displayed for an API Message -object. All defined messages should be appropriate for any API user to see -and not contain any sensitive information. A good rule-of-thumb is to be very -general in error messages unless the issue is due to a bad user action, then be -specific. -""" - -from cinder.i18n import _ - - -class EventIds(object): - UNKNOWN_ERROR = 'VOLUME_000001' - UNABLE_TO_ALLOCATE = 'VOLUME_000002' - ATTACH_READONLY_VOLUME = 'VOLUME_000003' - IMAGE_FROM_VOLUME_OVER_QUOTA = 'VOLUME_000004' - - -event_id_message_map = { - EventIds.UNKNOWN_ERROR: _("An unknown error occurred."), - EventIds.UNABLE_TO_ALLOCATE: _( - "No storage could be allocated for this volume " - "request. You may be able to try another size or" - " volume type."), - EventIds.ATTACH_READONLY_VOLUME: _( - "A readonly volume must be attached as readonly."), - EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA: _( - "Failed to copy volume to image as image quota has been met. Please " - "delete images or have your limit increased, then try again."), -} - - -def get_message_text(event_id): - return event_id_message_map[event_id] diff --git a/cinder/message/message_field.py b/cinder/message/message_field.py deleted file mode 100644 index dac75aad4..000000000 --- a/cinder/message/message_field.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Message Resource, Action, Detail and user visible message. - -Use Resource, Action and Detail's combination to indicate the Event -in the format of: - -EVENT: VOLUME_RESOURCE_ACTION_DETAIL - -Also, use exception-to-detail mapping to decrease the workload of -classifying event in cinder's task code. -""" - -from cinder.i18n import _ - - -class Resource(object): - - VOLUME = 'VOLUME' - - -class Action(object): - - SCHEDULE_ALLOCATE_VOLUME = ('001', _('schedule allocate volume')) - ATTACH_VOLUME = ('002', _('attach volume')) - COPY_VOLUME_TO_IMAGE = ('003', _('copy volume to image')) - UPDATE_ATTACHMENT = ('004', _('update attachment')) - COPY_IMAGE_TO_VOLUME = ('005', _('copy image to volume')) - - ALL = (SCHEDULE_ALLOCATE_VOLUME, - ATTACH_VOLUME, - COPY_VOLUME_TO_IMAGE, - UPDATE_ATTACHMENT, - COPY_IMAGE_TO_VOLUME) - - -class Detail(object): - - UNKNOWN_ERROR = ('001', _('An unknown error occurred.')) - DRIVER_NOT_INITIALIZED = ('002', - _('Driver is not initialized at present.')) - NO_BACKEND_AVAILABLE = ('003', - _('Could not found any available ' - 'weighted backend.')) - FAILED_TO_UPLOAD_VOLUME = ('004', - _("Failed to upload volume to image " - "at backend.")) - VOLUME_ATTACH_MODE_INVALID = ('005', - _("Volume's attach mode is invalid.")) - QUOTA_EXCEED = ('006', - _("Not enough quota resource for operation.")) - NOT_ENOUGH_SPACE_FOR_IMAGE = ('007', - _("Image used for creating volume exceeds " - "available space.")) - - ALL = (UNKNOWN_ERROR, - DRIVER_NOT_INITIALIZED, - NO_BACKEND_AVAILABLE, - FAILED_TO_UPLOAD_VOLUME, - VOLUME_ATTACH_MODE_INVALID, - QUOTA_EXCEED, - NOT_ENOUGH_SPACE_FOR_IMAGE) - - # Exception and detail mappings - EXCEPTION_DETAIL_MAPPINGS = { - DRIVER_NOT_INITIALIZED: ['DriverNotInitialized'], - NO_BACKEND_AVAILABLE: ['NoValidBackend'], - VOLUME_ATTACH_MODE_INVALID: ['InvalidVolumeAttachMode'], - QUOTA_EXCEED: ['ImageLimitExceeded', - 'BackupLimitExceeded', - 'SnapshotLimitExceeded'], - NOT_ENOUGH_SPACE_FOR_IMAGE: ['ImageTooBig'] - } - - -def translate_action(action_id): - action_message = next((action[1] for action in Action.ALL - if action[0] == action_id), None) - return action_message or 'unknown action' - - -def translate_detail(detail_id): - detail_message = next((action[1] for action in Detail.ALL - if action[0] == detail_id), None) - return detail_message or Detail.UNKNOWN_ERROR[1] - - -def translate_detail_id(exception, detail): - if exception is not None and isinstance(exception, Exception): - for key, value in Detail.EXCEPTION_DETAIL_MAPPINGS.items(): - if exception.__class__.__name__ in value: - return key[0] - if detail in Detail.ALL: - return detail[0] - return Detail.UNKNOWN_ERROR[0] diff --git a/cinder/objects/__init__.py b/cinder/objects/__init__.py deleted file mode 100644 index 041462721..000000000 --- a/cinder/objects/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(comstud): You may scratch your head as you see code that imports -# this module and then accesses attributes for objects such as Instance, -# etc, yet you do not see these attributes in here. Never fear, there is -# a little bit of magic. When objects are registered, an attribute is set -# on this module automatically, pointing to the newest/latest version of -# the object. - - -def register_all(): - # NOTE(danms): You must make sure your object gets imported in this - # function in order for it to be registered by services that may - # need to receive it via RPC. - __import__('cinder.objects.backup') - # NOTE(geguileo): Don't include cleanable to prevent circular imports - __import__('cinder.objects.cleanup_request') - __import__('cinder.objects.cgsnapshot') - __import__('cinder.objects.cluster') - __import__('cinder.objects.consistencygroup') - __import__('cinder.objects.qos_specs') - __import__('cinder.objects.request_spec') - __import__('cinder.objects.service') - __import__('cinder.objects.snapshot') - __import__('cinder.objects.volume') - __import__('cinder.objects.volume_attachment') - __import__('cinder.objects.volume_type') - __import__('cinder.objects.group_type') - __import__('cinder.objects.group') - __import__('cinder.objects.group_snapshot') - __import__('cinder.objects.manageableresources') - __import__('cinder.objects.dynamic_log') diff --git a/cinder/objects/backup.py b/cinder/objects/backup.py deleted file mode 100644 index 0dfbad6e8..000000000 --- a/cinder/objects/backup.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_serialization import base64 -from oslo_serialization import jsonutils -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields - - -CONF = cfg.CONF - - -@base.CinderObjectRegistry.register -class Backup(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Add new field num_dependent_backups and extra fields - # is_incremental and has_dependent_backups. - # Version 1.2: Add new field snapshot_id and data_timestamp. - # Version 1.3: Changed 'status' field to use BackupStatusField - # Version 1.4: Add restore_volume_id - VERSION = '1.4' - - fields = { - 'id': fields.UUIDField(), - - 'user_id': fields.StringField(), - 'project_id': fields.StringField(), - - 'volume_id': fields.UUIDField(), - 'host': fields.StringField(nullable=True), - 'availability_zone': fields.StringField(nullable=True), - 'container': fields.StringField(nullable=True), - 'parent_id': fields.StringField(nullable=True), - 'status': c_fields.BackupStatusField(nullable=True), - 'fail_reason': fields.StringField(nullable=True), - 'size': fields.IntegerField(nullable=True), - - 'display_name': fields.StringField(nullable=True), - 'display_description': fields.StringField(nullable=True), - - # NOTE(dulek): Metadata field is used to store any strings by backup - # drivers, that's why it can't be DictOfStringsField. - 'service_metadata': fields.StringField(nullable=True), - 'service': fields.StringField(nullable=True), - - 'object_count': fields.IntegerField(nullable=True), - - 'temp_volume_id': fields.StringField(nullable=True), - 'temp_snapshot_id': fields.StringField(nullable=True), - 'num_dependent_backups': fields.IntegerField(nullable=True), - 'snapshot_id': fields.StringField(nullable=True), - 'data_timestamp': fields.DateTimeField(nullable=True), - 'restore_volume_id': fields.StringField(nullable=True), - } - - obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] - - @property - def name(self): - return CONF.backup_name_template % self.id - - @property - def is_incremental(self): - return bool(self.parent_id) - - @property - def has_dependent_backups(self): - return bool(self.num_dependent_backups) - - def obj_make_compatible(self, primitive, target_version): - """Make an object representation compatible with a target version.""" - super(Backup, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - - @staticmethod - def _from_db_object(context, backup, db_backup): - for name, field in backup.fields.items(): - value = db_backup.get(name) - if isinstance(field, fields.IntegerField): - value = value if value is not None else 0 - backup[name] = value - - backup._context = context - backup.obj_reset_changes() - return backup - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason='already created') - updates = self.cinder_obj_get_changes() - - db_backup = db.backup_create(self._context, updates) - self._from_db_object(self._context, self, db_backup) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - db.backup_update(self._context, self.id, updates) - - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.backup_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - @staticmethod - def decode_record(backup_url): - """Deserialize backup metadata from string into a dictionary. - - :raises InvalidInput: - """ - try: - return jsonutils.loads(base64.decode_as_text(backup_url)) - except TypeError: - msg = _("Can't decode backup record.") - except ValueError: - msg = _("Can't parse backup record.") - raise exception.InvalidInput(reason=msg) - - def encode_record(self, **kwargs): - """Serialize backup object, with optional extra info, into a string.""" - # We don't want to export extra fields and we want to force lazy - # loading, so we can't use dict(self) or self.obj_to_primitive - record = {name: field.to_primitive(self, name, getattr(self, name)) - for name, field in self.fields.items()} - # We must update kwargs instead of record to ensure we don't overwrite - # "real" data from the backup - kwargs.update(record) - retval = jsonutils.dump_as_bytes(kwargs) - return base64.encode_as_text(retval) - - -@base.CinderObjectRegistry.register -class BackupList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Backup'), - } - - @classmethod - def get_all(cls, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - backups = db.backup_get_all(context, filters, marker, limit, offset, - sort_keys, sort_dirs) - return base.obj_make_list(context, cls(context), objects.Backup, - backups) - - @classmethod - def get_all_by_host(cls, context, host): - backups = db.backup_get_all_by_host(context, host) - return base.obj_make_list(context, cls(context), objects.Backup, - backups) - - @classmethod - def get_all_by_project(cls, context, project_id, filters=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - backups = db.backup_get_all_by_project(context, project_id, filters, - marker, limit, offset, - sort_keys, sort_dirs) - return base.obj_make_list(context, cls(context), objects.Backup, - backups) - - @classmethod - def get_all_by_volume(cls, context, volume_id, filters=None): - backups = db.backup_get_all_by_volume(context, volume_id, filters) - return base.obj_make_list(context, cls(context), objects.Backup, - backups) - - @classmethod - def get_all_active_by_window(cls, context, begin, end): - backups = db.backup_get_all_active_by_window(context, begin, end) - return base.obj_make_list(context, cls(context), objects.Backup, - backups) - - -@base.CinderObjectRegistry.register -class BackupImport(Backup): - """Special object for Backup Imports. - - This class should not be used for anything but Backup creation when - importing backups to the DB. - - On creation it allows to specify the ID for the backup, since it's the - reference used in parent_id it is imperative that this is preserved. - - Backup Import objects get promoted to standard Backups when the import is - completed. - """ - - def create(self): - updates = self.cinder_obj_get_changes() - - db_backup = db.backup_create(self._context, updates) - self._from_db_object(self._context, self, db_backup) - - -@base.CinderObjectRegistry.register -class BackupDeviceInfo(base.CinderObject, base.CinderObjectDictCompat, - base.CinderComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'volume': fields.ObjectField('Volume', nullable=True), - 'snapshot': fields.ObjectField('Snapshot', nullable=True), - 'secure_enabled': fields.BooleanField(default=False), - } - obj_extra_fields = ['is_snapshot', 'device_obj'] - - @property - def is_snapshot(self): - if self.obj_attr_is_set('snapshot') == self.obj_attr_is_set('volume'): - msg = _("Either snapshot or volume field should be set.") - raise exception.ProgrammingError(message=msg) - return self.obj_attr_is_set('snapshot') - - @property - def device_obj(self): - return self.snapshot if self.is_snapshot else self.volume - - # FIXME(sborkows): This should go away in early O as we stop supporting - # backward compatibility with M. - @classmethod - def from_primitive(cls, primitive, context, expected_attrs=None): - backup_device = BackupDeviceInfo() - if primitive['is_snapshot']: - if isinstance(primitive['backup_device'], objects.Snapshot): - backup_device.snapshot = primitive['backup_device'] - else: - backup_device.snapshot = objects.Snapshot._from_db_object( - context, objects.Snapshot(), primitive['backup_device'], - expected_attrs=expected_attrs) - else: - if isinstance(primitive['backup_device'], objects.Volume): - backup_device.volume = primitive['backup_device'] - else: - backup_device.volume = objects.Volume._from_db_object( - context, objects.Volume(), primitive['backup_device'], - expected_attrs=expected_attrs) - backup_device.secure_enabled = primitive['secure_enabled'] - return backup_device - - # FIXME(sborkows): This should go away in early O as we stop supporting - # backward compatibility with M. - def to_primitive(self, context): - backup_device = (db.snapshot_get(context, self.snapshot.id) - if self.is_snapshot - else db.volume_get(context, self.volume.id)) - primitive = {'backup_device': backup_device, - 'secure_enabled': self.secure_enabled, - 'is_snapshot': self.is_snapshot} - return primitive diff --git a/cinder/objects/base.py b/cinder/objects/base.py deleted file mode 100644 index 11e66ef7f..000000000 --- a/cinder/objects/base.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cinder common internal object model""" - -import contextlib -import datetime - -from oslo_log import log as logging -from oslo_utils import versionutils -from oslo_versionedobjects import base -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects - - -LOG = logging.getLogger('object') -obj_make_list = base.obj_make_list - - -class CinderObjectVersionsHistory(dict): - """Helper class that maintains objects version history. - - Current state of object versions is aggregated in a single version number - that explicitly identifies a set of object versions. That way a service - is able to report what objects it supports using a single string and all - the newer services will know exactly what that mean for a single object. - """ - - def __init__(self): - super(CinderObjectVersionsHistory, self).__init__() - # NOTE(dulek): This is our pre-history and a starting point - Liberty. - # We want Mitaka to be able to talk to Liberty services, so we need to - # handle backporting to these objects versions (although I don't expect - # we've made a lot of incompatible changes inside the objects). - # - # If an object doesn't exist in Liberty, RPC API compatibility layer - # shouldn't send it or convert it to a dictionary. - # - # Please note that we do not need to add similar entires for each - # release. Liberty is here just for historical reasons. - self.versions = ['liberty'] - self['liberty'] = { - 'Backup': '1.1', - 'BackupImport': '1.1', - 'BackupList': '1.0', - 'ConsistencyGroup': '1.1', - 'ConsistencyGroupList': '1.0', - 'Service': '1.0', - 'ServiceList': '1.0', - 'Snapshot': '1.0', - 'SnapshotList': '1.0', - 'Volume': '1.1', - 'VolumeAttachment': '1.0', - 'VolumeAttachmentList': '1.0', - 'VolumeList': '1.1', - 'VolumeType': '1.0', - 'VolumeTypeList': '1.0', - } - - def get_current(self): - return self.versions[-1] - - def get_current_versions(self): - return self[self.get_current()] - - def add(self, ver, updates): - if ver in self.versions: - msg = 'Version %s already exists in history.' % ver - raise exception.ProgrammingError(reason=msg) - - self[ver] = self[self.get_current()].copy() - self.versions.append(ver) - self[ver].update(updates) - - -OBJ_VERSIONS = CinderObjectVersionsHistory() -# NOTE(dulek): You should add a new version here each time you bump a version -# of any object. As a second parameter you need to specify only what changed. -# -# When dropping backward compatibility with an OpenStack release we can rework -# this and remove some history while keeping the versions order. -OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3', - 'CGSnapshot': '1.0', 'CGSnapshotList': '1.0', - 'ConsistencyGroup': '1.2', - 'ConsistencyGroupList': '1.1', 'Service': '1.1', - 'Volume': '1.3', 'VolumeTypeList': '1.1'}) -OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) -OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'}) -OBJ_VERSIONS.add('1.3', {'Service': '1.3'}) -OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'}) -OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'}) -OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0', - 'QualityOfServiceSpecsList': '1.0', - 'VolumeType': '1.2'}) -OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0', - 'Service': '1.4', 'Volume': '1.4', - 'ConsistencyGroup': '1.3'}) -OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'}) -OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'}) -OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5', - 'RequestSpec': '1.1', 'VolumeProperties': '1.1'}) -OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0', - 'Group': '1.1'}) -OBJ_VERSIONS.add('1.12', {'VolumeType': '1.3'}) -OBJ_VERSIONS.add('1.13', {'CleanupRequest': '1.0'}) -OBJ_VERSIONS.add('1.14', {'VolumeAttachmentList': '1.1'}) -OBJ_VERSIONS.add('1.15', {'Volume': '1.6', 'Snapshot': '1.2'}) -OBJ_VERSIONS.add('1.16', {'BackupDeviceInfo': '1.0'}) -OBJ_VERSIONS.add('1.17', {'VolumeAttachment': '1.1'}) -OBJ_VERSIONS.add('1.18', {'Snapshot': '1.3'}) -OBJ_VERSIONS.add('1.19', {'ConsistencyGroup': '1.4', 'CGSnapshot': '1.1'}) -OBJ_VERSIONS.add('1.20', {'Cluster': '1.1'}) -OBJ_VERSIONS.add('1.21', {'ManageableSnapshot': '1.0', - 'ManageableVolume': '1.0', - 'ManageableVolumeList': '1.0', - 'ManageableSnapshotList': '1.0'}) -OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'}) -OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'}) -OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'}) -OBJ_VERSIONS.add('1.25', {'Group': '1.2'}) -OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'}) - - -class CinderObjectRegistry(base.VersionedObjectRegistry): - def registration_hook(self, cls, index): - """Hook called when registering a class. - - This method takes care of adding the class to cinder.objects namespace. - - Should registering class have a method called cinder_ovo_cls_init it - will be called to support class initialization. This is convenient - for all persistent classes that need to register their models. - """ - setattr(objects, cls.obj_name(), cls) - - # If registering class has a callable initialization method, call it. - if callable(getattr(cls, 'cinder_ovo_cls_init', None)): - cls.cinder_ovo_cls_init() - - -class CinderObject(base.VersionedObject): - # NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova, - # cinder, and other objects can exist on the same bus and be distinguished - # from one another. - OBJ_PROJECT_NAMESPACE = 'cinder' - - def cinder_obj_get_changes(self): - """Returns a dict of changed fields with tz unaware datetimes. - - Any timezone aware datetime field will be converted to UTC timezone - and returned as timezone unaware datetime. - - This will allow us to pass these fields directly to a db update - method as they can't have timezone information. - """ - # Get dirtied/changed fields - changes = self.obj_get_changes() - - # Look for datetime objects that contain timezone information - for k, v in changes.items(): - if isinstance(v, datetime.datetime) and v.tzinfo: - # Remove timezone information and adjust the time according to - # the timezone information's offset. - changes[k] = v.replace(tzinfo=None) - v.utcoffset() - - # Return modified dict - return changes - - def obj_make_compatible(self, primitive, target_version): - _log_backport(self, target_version) - super(CinderObject, self).obj_make_compatible(primitive, - target_version) - - def __contains__(self, name): - # We're using obj_extra_fields to provide aliases for some fields while - # in transition period. This override is to make these aliases pass - # "'foo' in obj" tests. - return name in self.obj_extra_fields or super(CinderObject, - self).__contains__(name) - - -class CinderObjectDictCompat(base.VersionedObjectDictCompat): - """Mix-in to provide dictionary key access compat. - - If an object needs to support attribute access using - dictionary items instead of object attributes, inherit - from this class. This should only be used as a temporary - measure until all callers are converted to use modern - attribute access. - - NOTE(berrange) This class will eventually be deleted. - """ - - def get(self, key, value=base._NotSpecifiedSentinel): - """For backwards-compatibility with dict-based objects. - - NOTE(danms): May be removed in the future. - """ - if key not in self.obj_fields: - # NOTE(jdg): There are a number of places where we rely on the - # old dictionary version and do a get(xxx, None). - # The following preserves that compatibility but in - # the future we'll remove this shim altogether so don't - # rely on it. - LOG.debug('Cinder object %(object_name)s has no ' - 'attribute named: %(attribute_name)s', - {'object_name': self.__class__.__name__, - 'attribute_name': key}) - return None - if (value != base._NotSpecifiedSentinel and - key not in self.obj_extra_fields and - not self.obj_attr_is_set(key)): - return value - else: - try: - return getattr(self, key) - except (exception.ObjectActionError, NotImplementedError): - # Exception when haven't set a value for non-lazy - # loadable attribute, but to mimic typical dict 'get' - # behavior we should still return None - return None - - -class CinderPersistentObject(object): - """Mixin class for Persistent objects. - - This adds the fields that we use in common for all persistent objects. - """ - OPTIONAL_FIELDS = [] - - Not = db.Not - Case = db.Case - - fields = { - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'deleted_at': fields.DateTimeField(nullable=True), - 'deleted': fields.BooleanField(default=False, - nullable=True), - } - - @classmethod - def cinder_ovo_cls_init(cls): - """This method is called on OVO registration and sets the DB model.""" - # Persistent Versioned Objects Classes should have a DB model, and if - # they don't, then we have a problem and we must raise an exception on - # registration. - try: - cls.model = db.get_model_for_versioned_object(cls) - except (ImportError, AttributeError): - msg = _("Couldn't find ORM model for Persistent Versioned " - "Object %s.") % cls.obj_name() - raise exception.ProgrammingError(reason=msg) - - @contextlib.contextmanager - def obj_as_admin(self): - """Context manager to make an object call as an admin. - - This temporarily modifies the context embedded in an object to - be elevated() and restores it after the call completes. Example - usage: - - with obj.obj_as_admin(): - obj.save() - """ - if self._context is None: - raise exception.OrphanedObjectError(method='obj_as_admin', - objtype=self.obj_name()) - - original_context = self._context - self._context = self._context.elevated() - try: - yield - finally: - self._context = original_context - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - return None - - @classmethod - def get_by_id(cls, context, id, *args, **kwargs): - # To get by id we need to have a model and for the model to - # have an id field - if 'id' not in cls.fields: - msg = (_('VersionedObject %s cannot retrieve object by id.') % - (cls.obj_name())) - raise NotImplementedError(msg) - - orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs) - # We pass parameters because fields to expect may depend on them - expected_attrs = cls._get_expected_attrs(context, *args, **kwargs) - kargs = {} - if expected_attrs: - kargs = {'expected_attrs': expected_attrs} - return cls._from_db_object(context, cls(context), orm_obj, **kargs) - - def update_single_status_where(self, new_status, - expected_status, filters=()): - values = {'status': new_status} - expected_status = {'status': expected_status} - return self.conditional_update(values, expected_status, filters) - - def conditional_update(self, values, expected_values=None, filters=(), - save_all=False, session=None, reflect_changes=True, - order=None): - """Compare-and-swap update. - - A conditional object update that, unlike normal update, will SAVE the - contents of the update to the DB. - - Update will only occur in the DB and the object if conditions are met. - - If no expected_values are passed in we will default to make sure that - all fields have not been changed in the DB. Since we cannot know the - original value in the DB for dirty fields in the object those will be - excluded. - - We have 4 different condition types we can use in expected_values: - - Equality: {'status': 'available'} - - Inequality: {'status': vol_obj.Not('deleting')} - - In range: {'status': ['available', 'error'] - - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) - - Method accepts additional filters, which are basically anything that - can be passed to a sqlalchemy query's filter method, for example: - - .. code-block:: python - - [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] - - We can select values based on conditions using Case objects in the - 'values' argument. For example: - - .. code-block:: python - - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - volume.conditional_update({'status': case_values}, - {'status': 'available'})) - - And we can use DB fields using model class attribute for example to - store previous status in the corresponding field even though we don't - know which value is in the db from those we allowed: - - .. code-block:: python - - volume.conditional_update({'status': 'deleting', - 'previous_status': volume.model.status}, - {'status': ('available', 'error')}) - - :param values: Dictionary of key-values to update in the DB. - :param expected_values: Dictionary of conditions that must be met for - the update to be executed. - :param filters: Iterable with additional filters - :param save_all: Object may have changes that are not in the DB, this - will say whether we want those changes saved as well. - :param session: Session to use for the update - :param reflect_changes: If we want changes made in the database to be - reflected in the versioned object. This may - mean in some cases that we have to reload the - object from the database. - :param order: Specific order of fields in which to update the values - :returns: number of db rows that were updated, which can be used as a - boolean, since it will be 0 if we couldn't update the DB and - 1 if we could, because we are using unique index id. - """ - if 'id' not in self.fields: - msg = (_('VersionedObject %s does not support conditional update.') - % (self.obj_name())) - raise NotImplementedError(msg) - - # If no conditions are set we will require object in DB to be unchanged - if expected_values is None: - changes = self.obj_what_changed() - - expected = {key: getattr(self, key) - for key in self.fields.keys() - if self.obj_attr_is_set(key) and key not in changes and - key not in self.OPTIONAL_FIELDS} - else: - # Set the id in expected_values to limit conditional update to only - # change this object - expected = expected_values.copy() - expected['id'] = self.id - - # If we want to save any additional changes the object has besides the - # ones referred in values - if save_all: - changes = self.cinder_obj_get_changes() - changes.update(values) - values = changes - - result = db.conditional_update(self._context, self.model, values, - expected, filters, order=order) - - # If we were able to update the DB then we need to update this object - # as well to reflect new DB contents and clear the object's dirty flags - # for those fields. - if result and reflect_changes: - # If we have used a Case, a db field or an expression in values we - # don't know which value was used, so we need to read the object - # back from the DB - if any(isinstance(v, self.Case) or db.is_orm_value(v) - for v in values.values()): - # Read back object from DB - obj = type(self).get_by_id(self._context, self.id) - db_values = obj.obj_to_primitive()['versioned_object.data'] - # Only update fields were changes were requested - values = {field: db_values[field] - for field, value in values.items()} - - # NOTE(geguileo): We don't use update method because our objects - # will eventually move away from VersionedObjectDictCompat - for key, value in values.items(): - setattr(self, key, value) - self.obj_reset_changes(values.keys()) - return result - - def refresh(self): - # To refresh we need to have a model and for the model to have an id - # field - if 'id' not in self.fields: - msg = (_('VersionedObject %s cannot retrieve object by id.') % - (self.obj_name())) - raise NotImplementedError(msg) - - current = self.get_by_id(self._context, self.id) - - # Copy contents retrieved from the DB into self - my_data = vars(self) - my_data.clear() - my_data.update(vars(current)) - - @classmethod - def exists(cls, context, id_): - return db.resource_exists(context, cls.model, id_) - - -class CinderComparableObject(base.ComparableVersionedObject): - def __eq__(self, obj): - if hasattr(obj, 'obj_to_primitive'): - return self.obj_to_primitive() == obj.obj_to_primitive() - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class ObjectListBase(base.ObjectListBase): - def obj_make_compatible(self, primitive, target_version): - _log_backport(self, target_version) - super(ObjectListBase, self).obj_make_compatible(primitive, - target_version) - - -class ClusteredObject(object): - @property - def service_topic_queue(self): - return self.cluster_name or self.host - - @property - def is_clustered(self): - return bool(self.cluster_name) - - def assert_not_frozen(self): - ctxt = self._context.elevated() - if db.is_backend_frozen(ctxt, self.host, self.cluster_name): - msg = _('Modification operations are not allowed on frozen ' - 'storage backends.') - raise exception.InvalidInput(reason=msg) - - -class CinderObjectSerializer(base.VersionedObjectSerializer): - OBJ_BASE_CLASS = CinderObject - - def __init__(self, version_cap=None): - super(CinderObjectSerializer, self).__init__() - self.version_cap = version_cap - - # NOTE(geguileo): During upgrades we will use a manifest to ensure that - # all objects are properly backported. This allows us to properly - # backport child objects to the right version even if parent version - # has not been bumped. - if not version_cap or version_cap == OBJ_VERSIONS.get_current(): - self.manifest = None - else: - if version_cap not in OBJ_VERSIONS: - raise exception.CappedVersionUnknown(version=version_cap) - self.manifest = OBJ_VERSIONS[version_cap] - - def _get_capped_obj_version(self, obj): - objname = obj.obj_name() - version_dict = OBJ_VERSIONS.get(self.version_cap, {}) - version_cap = version_dict.get(objname, None) - - if version_cap: - cap_tuple = versionutils.convert_version_to_tuple(version_cap) - obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION) - if cap_tuple > obj_tuple: - # NOTE(dulek): Do not set version cap to be higher than actual - # object version as we don't support "forwardporting" of - # objects. If service will receive an object that's too old it - # should handle it explicitly. - version_cap = None - - return version_cap - - def serialize_entity(self, context, entity): - if isinstance(entity, (tuple, list, set, dict)): - entity = self._process_iterable(context, self.serialize_entity, - entity) - elif (hasattr(entity, 'obj_to_primitive') and - callable(entity.obj_to_primitive)): - # NOTE(dulek): Backport outgoing object to the capped version. - backport_ver = self._get_capped_obj_version(entity) - entity = entity.obj_to_primitive(backport_ver, self.manifest) - return entity - - -def _log_backport(ovo, target_version): - """Log backported versioned objects.""" - if target_version and target_version != ovo.VERSION: - LOG.debug('Backporting %(obj_name)s from version %(src_vers)s ' - 'to version %(dst_vers)s', - {'obj_name': ovo.obj_name(), - 'src_vers': ovo.VERSION, - 'dst_vers': target_version}) diff --git a/cinder/objects/cgsnapshot.py b/cinder/objects/cgsnapshot.py deleted file mode 100644 index fc48aba28..000000000 --- a/cinder/objects/cgsnapshot.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from oslo_versionedobjects import fields - - -@base.CinderObjectRegistry.register -class CGSnapshot(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Added from_group_snapshot - VERSION = '1.1' - - OPTIONAL_FIELDS = ['consistencygroup', 'snapshots'] - - fields = { - 'id': fields.UUIDField(), - 'consistencygroup_id': fields.UUIDField(nullable=True), - 'project_id': fields.StringField(), - 'user_id': fields.StringField(), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'status': fields.StringField(nullable=True), - 'consistencygroup': fields.ObjectField('ConsistencyGroup', - nullable=True), - 'snapshots': fields.ObjectField('SnapshotList', nullable=True), - } - - @property - def host(self): - return self.consistencygroup.host - - @property - def cluster_name(self): - return self.consistencygroup.cluster_name - - @classmethod - def _from_db_object(cls, context, cgsnapshot, db_cgsnapshots, - expected_attrs=None): - expected_attrs = expected_attrs or [] - for name, field in cgsnapshot.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_cgsnapshots.get(name) - setattr(cgsnapshot, name, value) - - if 'consistencygroup' in expected_attrs: - consistencygroup = objects.ConsistencyGroup(context) - consistencygroup._from_db_object(context, consistencygroup, - db_cgsnapshots[ - 'consistencygroup']) - cgsnapshot.consistencygroup = consistencygroup - - if 'snapshots' in expected_attrs: - snapshots = base.obj_make_list( - context, objects.SnapshotsList(context), - objects.Snapshots, - db_cgsnapshots['snapshots']) - cgsnapshot.snapshots = snapshots - - cgsnapshot._context = context - cgsnapshot.obj_reset_changes() - return cgsnapshot - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already_created')) - updates = self.cinder_obj_get_changes() - - if 'consistencygroup' in updates: - raise exception.ObjectActionError( - action='create', reason=_('consistencygroup assigned')) - - db_cgsnapshots = db.cgsnapshot_create(self._context, updates) - self._from_db_object(self._context, self, db_cgsnapshots) - - def from_group_snapshot(self, group_snapshot): - """Convert a generic volume group object to a cg object.""" - self.id = group_snapshot.id - self.consistencygroup_id = group_snapshot.group_id - self.user_id = group_snapshot.user_id - self.project_id = group_snapshot.project_id - self.name = group_snapshot.name - self.description = group_snapshot.description - self.status = group_snapshot.status - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'consistencygroup': - self.consistencygroup = objects.ConsistencyGroup.get_by_id( - self._context, self.consistencygroup_id) - - if attrname == 'snapshots': - self.snapshots = objects.SnapshotList.get_all_for_cgsnapshot( - self._context, self.id) - - self.obj_reset_changes(fields=[attrname]) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'consistencygroup' in updates: - raise exception.ObjectActionError( - action='save', reason=_('consistencygroup changed')) - if 'snapshots' in updates: - raise exception.ObjectActionError( - action='save', reason=_('snapshots changed')) - db.cgsnapshot_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.cgsnapshot_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - -@base.CinderObjectRegistry.register -class CGSnapshotList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('CGSnapshot') - } - - @classmethod - def get_all(cls, context, filters=None): - cgsnapshots = db.cgsnapshot_get_all(context, filters) - return base.obj_make_list(context, cls(context), objects.CGSnapshot, - cgsnapshots) - - @classmethod - def get_all_by_project(cls, context, project_id, filters=None): - cgsnapshots = db.cgsnapshot_get_all_by_project(context, project_id, - filters) - return base.obj_make_list(context, cls(context), objects.CGSnapshot, - cgsnapshots) - - @classmethod - def get_all_by_group(cls, context, group_id, filters=None): - cgsnapshots = db.cgsnapshot_get_all_by_group(context, group_id, - filters) - return base.obj_make_list(context, cls(context), - objects.CGSnapshot, - cgsnapshots) diff --git a/cinder/objects/cleanable.py b/cinder/objects/cleanable.py deleted file mode 100644 index dc7964258..000000000 --- a/cinder/objects/cleanable.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -import decorator -from oslo_utils import versionutils - -from cinder import db -from cinder import exception -from cinder.objects import base -from cinder import service -from cinder.volume import rpcapi as vol_rpcapi - - -class CinderCleanableObject(base.CinderPersistentObject): - """Base class for cleanable OVO resources. - - All cleanable objects must have a host property/attribute. - """ - worker = None - - cleanable_resource_types = set() - - @classmethod - def get_rpc_api(cls): - # By default assume all resources are handled by c-vol services - return vol_rpcapi.VolumeAPI - - @classmethod - def cinder_ovo_cls_init(cls): - """Called on OVO registration, sets set of cleanable resources.""" - # First call persistent object method to store the DB model - super(CinderCleanableObject, cls).cinder_ovo_cls_init() - - # Add this class to the set of resources - cls.cleanable_resource_types.add(cls.obj_name()) - - @classmethod - def get_pinned_version(cls): - # We pin the version by the last service that gets updated, which is - # c-vol or c-bak - min_obj_vers_str = cls.get_rpc_api().determine_obj_version_cap() - - # Get current pinned down version for this object - version = base.OBJ_VERSIONS[min_obj_vers_str][cls.__name__] - return versionutils.convert_version_to_int(version) - - @staticmethod - def _is_cleanable(status, obj_version): - """Check if a specific status for a specific OBJ version is cleanable. - - Each CinderCleanableObject class should implement this method and - return True for cleanable status for versions equal or higher to the - ones where the functionality was added. - - :returns: Whether to create a workers DB entry or not - :param obj_version: Min object version running in the cloud or None if - current version. - :type obj_version: float - """ - return False - - def is_cleanable(self, pinned=False): - """Check if cleanable VO status is cleanable. - - :param pinned: If we should check against pinned version or current - version. - :type pinned: bool - :returns: Whether this needs a workers DB entry or not - """ - if pinned: - obj_version = self.get_pinned_version() - else: - obj_version = None - return self._is_cleanable(self.status, obj_version) - - def create_worker(self, pinned=True): - """Create a worker entry at the API.""" - # This method is mostly called from the rpc layer, therefore it checks - # if it's cleanable given current pinned version. - if not self.is_cleanable(pinned): - return False - - resource_type = self.__class__.__name__ - - entry_in_db = False - - # This will only loop on very rare race conditions - while not entry_in_db: - try: - # On the common case there won't be an entry in the DB, that's - # why we try to create first. - db.worker_create(self._context, status=self.status, - resource_type=resource_type, - resource_id=self.id) - entry_in_db = True - except exception.WorkerExists: - try: - db.worker_update(self._context, None, - filters={'resource_type': resource_type, - 'resource_id': self.id}, - service_id=None, - status=self.status) - entry_in_db = True - except exception.WorkerNotFound: - pass - return entry_in_db - - def set_worker(self): - worker = self.worker - - service_id = service.Service.service_id - resource_type = self.__class__.__name__ - - if worker: - if worker.cleaning: - return - else: - try: - worker = db.worker_get(self._context, - resource_type=resource_type, - resource_id=self.id) - except exception.WorkerNotFound: - # If the call didn't come from an RPC call we still have to - # create the entry in the DB. - try: - self.worker = db.worker_create(self._context, - status=self.status, - resource_type=resource_type, - resource_id=self.id, - service_id=service_id) - return - except exception.WorkerExists: - # If 2 cleanable operations are competing for this resource - # and the other one created the entry first that one won - raise exception.CleanableInUse(type=resource_type, - id=self.id) - - # If we have to claim this work or if the status has changed we have - # to update DB. - if (worker.service_id != service_id or worker.status != self.status): - try: - db.worker_update( - self._context, worker.id, - filters={'service_id': worker.service_id, - 'status': worker.status, - 'race_preventer': worker.race_preventer, - 'updated_at': worker.updated_at}, - service_id=service_id, - status=self.status, - orm_worker=worker) - except exception.WorkerNotFound: - self.worker = None - raise exception.CleanableInUse(type=self.__class__.__name__, - id=self.id) - self.worker = worker - - def unset_worker(self): - if self.worker: - db.worker_destroy(self._context, id=self.worker.id, - status=self.worker.status, - service_id=self.worker.service_id) - self.worker = None - - # NOTE(geguileo): To be compatible with decorate v3.4.x and v4.0.x - decorate = staticmethod(getattr(decorator, 'decorate', - lambda f, w: decorator.decorator(w, f))) - - @staticmethod - def set_workers(*decorator_args): - """Decorator that adds worker DB rows for cleanable versioned objects. - - By default will take care of all cleanable objects, but we can limit - which objects we want by passing the name of the arguments we want - to be added. - """ - def _decorator(f): - def wrapper(f, *args, **kwargs): - if decorator_args: - call_args = inspect.getcallargs(f, *args, **kwargs) - candidates = [call_args[obj] for obj in decorator_args] - else: - candidates = list(args) - candidates.extend(kwargs.values()) - cleanables = [cand for cand in candidates - if (isinstance(cand, CinderCleanableObject) - and cand.is_cleanable(pinned=False))] - try: - # Create the entries in the workers table - for cleanable in cleanables: - cleanable.set_worker() - - # Call the function - result = f(*args, **kwargs) - finally: - # Remove entries from the workers table - for cleanable in cleanables: - # NOTE(geguileo): We check that the status has changed - # to avoid removing the worker entry when we finished - # the operation due to an unexpected exception and also - # when this process stops because the main process has - # stopped. - if (cleanable.worker and - cleanable.status != cleanable.worker.status): - try: - cleanable.unset_worker() - except Exception: - pass - return result - return CinderCleanableObject.decorate(f, wrapper) - - # If we don't have optional decorator arguments the argument in - # decorator_args is the function we have to decorate - if len(decorator_args) == 1 and callable(decorator_args[0]): - function = decorator_args[0] - decorator_args = None - return _decorator(function) - return _decorator - - def refresh(self): - # We want to keep the worker entry on refresh - worker = self.worker - super(CinderCleanableObject, self).refresh() - self.worker = worker diff --git a/cinder/objects/cleanup_request.py b/cinder/objects/cleanup_request.py deleted file mode 100644 index 30c1ee7a0..000000000 --- a/cinder/objects/cleanup_request.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder.objects import base - - -@base.CinderObjectRegistry.register -class CleanupRequest(base.CinderObject, base.ClusteredObject): - """Versioned Object to send cleanup requests.""" - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'service_id': fields.IntegerField(nullable=True), - 'cluster_name': fields.StringField(nullable=True), - 'host': fields.StringField(nullable=True), - 'binary': fields.StringField(nullable=True), - 'is_up': fields.BooleanField(default=False, nullable=True), - 'disabled': fields.BooleanField(nullable=True), - 'resource_id': fields.UUIDField(nullable=True), - 'resource_type': fields.StringField(nullable=True), - 'until': fields.DateTimeField(nullable=True), - } - - def __init__(self, context=None, **kwargs): - super(CleanupRequest, self).__init__(**kwargs) - - # Set non initialized fields with default or None values - for field_name in self.fields: - if not self.obj_attr_is_set(field_name): - field = self.fields[field_name] - if field.default != fields.UnspecifiedDefault: - setattr(self, field_name, field.default) - elif field.nullable: - setattr(self, field_name, None) diff --git a/cinder/objects/cluster.py b/cinder/objects/cluster.py deleted file mode 100644 index c72598efd..000000000 --- a/cinder/objects/cluster.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields -from cinder import utils - - -@base.CinderObjectRegistry.register -class Cluster(base.CinderPersistentObject, base.CinderObject, - base.CinderComparableObject): - """Cluster Versioned Object. - - Method get_by_id supports as additional named arguments: - - get_services: If we want to load all services from this cluster. - - services_summary: If we want to load num_nodes and num_down_nodes - fields. - - is_up: Boolean value to filter based on the cluster's up status. - - read_deleted: Filtering based on delete status. Default value "no". - - Any other cluster field will be used as a filter. - """ - # Version 1.0: Initial version - # Version 1.1: Add replication fields - VERSION = '1.1' - OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services') - - # NOTE(geguileo): We don't want to expose race_preventer field at the OVO - # layer since it is only meant for the DB layer internal mechanism to - # prevent races. - fields = { - 'id': fields.IntegerField(), - 'name': fields.StringField(nullable=False), - 'binary': fields.StringField(nullable=False), - 'disabled': fields.BooleanField(default=False, nullable=True), - 'disabled_reason': fields.StringField(nullable=True), - 'num_hosts': fields.IntegerField(default=0, read_only=True), - 'num_down_hosts': fields.IntegerField(default=0, read_only=True), - 'last_heartbeat': fields.DateTimeField(nullable=True, read_only=True), - 'services': fields.ObjectField('ServiceList', nullable=True, - read_only=True), - # Replication properties - 'replication_status': c_fields.ReplicationStatusField(nullable=True), - 'frozen': fields.BooleanField(default=False), - 'active_backend_id': fields.StringField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - """Make a cluster representation compatible with a target version.""" - # Convert all related objects - super(Cluster, self).obj_make_compatible(primitive, target_version) - - # Before v1.1 we didn't have relication fields so we have to remove - # them. - if target_version == '1.0': - for obj_field in ('replication_status', 'frozen', - 'active_backend_id'): - primitive.pop(obj_field, None) - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - """Return expected attributes when getting a cluster. - - Expected attributes depend on whether we are retrieving all related - services as well as if we are getting the services summary. - """ - expected_attrs = [] - if kwargs.get('get_services'): - expected_attrs.append('services') - if kwargs.get('services_summary'): - expected_attrs.extend(('num_hosts', 'num_down_hosts')) - return expected_attrs - - @staticmethod - def _from_db_object(context, cluster, db_cluster, expected_attrs=None): - """Fill cluster OVO fields from cluster ORM instance.""" - expected_attrs = expected_attrs or tuple() - for name, field in cluster.fields.items(): - # The only field that cannot be assigned using setattr is services, - # because it is an ObjectField. So we don't assign the value if - # it's a non expected optional field or if it's services field. - if ((name in Cluster.OPTIONAL_FIELDS - and name not in expected_attrs) or name == 'services'): - continue - value = getattr(db_cluster, name) - setattr(cluster, name, value) - - cluster._context = context - if 'services' in expected_attrs: - cluster.services = base.obj_make_list( - context, - objects.ServiceList(context), - objects.Service, - db_cluster.services) - - cluster.obj_reset_changes() - return cluster - - def obj_load_attr(self, attrname): - """Lazy load services attribute.""" - # NOTE(geguileo): We only allow lazy loading services to raise - # awareness of the high cost of lazy loading num_hosts and - # num_down_hosts, so if we are going to need this information we should - # be certain we really need it and it should loaded when retrieving the - # data from the DB the first time we read the OVO. - if attrname != 'services': - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - self.services = objects.ServiceList.get_all( - self._context, {'cluster_name': self.name}) - - self.obj_reset_changes(fields=('services',)) - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.cinder_obj_get_changes() - if updates: - for field in self.OPTIONAL_FIELDS: - if field in updates: - raise exception.ObjectActionError( - action='create', reason=_('%s assigned') % field) - - db_cluster = db.cluster_create(self._context, updates) - self._from_db_object(self._context, self, db_cluster) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - for field in self.OPTIONAL_FIELDS: - if field in updates: - raise exception.ObjectActionError( - action='save', reason=_('%s changed') % field) - db.cluster_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.cluster_destroy(self._context, self.id) - for field, value in updated_values.items(): - setattr(self, field, value) - self.obj_reset_changes(updated_values.keys()) - - @property - def is_up(self): - return (self.last_heartbeat and - self.last_heartbeat >= utils.service_expired_time(True)) - - -@base.CinderObjectRegistry.register -class ClusterList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = {'objects': fields.ListOfObjectsField('Cluster')} - - @classmethod - def get_all(cls, context, is_up=None, get_services=False, - services_summary=False, read_deleted='no', **filters): - """Get all clusters that match the criteria. - - :param is_up: Boolean value to filter based on the cluster's up status. - :param get_services: If we want to load all services from this cluster. - :param services_summary: If we want to load num_nodes and - num_down_nodes fields. - :param read_deleted: Filtering based on delete status. Default value is - "no". - :param filters: Field based filters in the form of key/value. - """ - - expected_attrs = Cluster._get_expected_attrs( - context, - get_services=get_services, - services_summary=services_summary) - - clusters = db.cluster_get_all(context, is_up=is_up, - get_services=get_services, - services_summary=services_summary, - read_deleted=read_deleted, - **filters) - return base.obj_make_list(context, cls(context), Cluster, clusters, - expected_attrs=expected_attrs) diff --git a/cinder/objects/consistencygroup.py b/cinder/objects/consistencygroup.py deleted file mode 100644 index 05979cd65..000000000 --- a/cinder/objects/consistencygroup.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2015 Yahoo Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields -from oslo_versionedobjects import fields - - -@base.CinderObjectRegistry.register -class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Added cgsnapshots and volumes relationships - # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField - # Version 1.3: Added cluster fields - # Version 1.4: Added from_group - VERSION = '1.4' - - OPTIONAL_FIELDS = ('cgsnapshots', 'volumes', 'cluster') - - fields = { - 'id': fields.UUIDField(), - 'user_id': fields.StringField(), - 'project_id': fields.StringField(), - 'cluster_name': fields.StringField(nullable=True), - 'cluster': fields.ObjectField('Cluster', nullable=True, - read_only=True), - 'host': fields.StringField(nullable=True), - 'availability_zone': fields.StringField(nullable=True), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'volume_type_id': fields.StringField(nullable=True), - 'status': c_fields.ConsistencyGroupStatusField(nullable=True), - 'cgsnapshot_id': fields.UUIDField(nullable=True), - 'source_cgid': fields.UUIDField(nullable=True), - 'cgsnapshots': fields.ObjectField('CGSnapshotList', nullable=True), - 'volumes': fields.ObjectField('VolumeList', nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - """Make a CG representation compatible with a target version.""" - # Convert all related objects - super(ConsistencyGroup, self).obj_make_compatible(primitive, - target_version) - - target_version = versionutils.convert_version_to_tuple(target_version) - # Before v1.3 we didn't have cluster fields so we have to remove them. - if target_version < (1, 3): - for obj_field in ('cluster', 'cluster_name'): - primitive.pop(obj_field, None) - - @classmethod - def _from_db_object(cls, context, consistencygroup, db_consistencygroup, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in consistencygroup.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_consistencygroup.get(name) - setattr(consistencygroup, name, value) - - if 'cgsnapshots' in expected_attrs: - cgsnapshots = base.obj_make_list( - context, objects.CGSnapshotList(context), - objects.CGSnapshot, - db_consistencygroup['cgsnapshots']) - consistencygroup.cgsnapshots = cgsnapshots - - if 'volumes' in expected_attrs: - volumes = base.obj_make_list( - context, objects.VolumeList(context), - objects.Volume, - db_consistencygroup['volumes']) - consistencygroup.volumes = volumes - - if 'cluster' in expected_attrs: - db_cluster = db_consistencygroup.get('cluster') - # If this consistency group doesn't belong to a cluster the cluster - # field in the ORM instance will have value of None. - if db_cluster: - consistencygroup.cluster = objects.Cluster(context) - objects.Cluster._from_db_object(context, - consistencygroup.cluster, - db_cluster) - else: - consistencygroup.cluster = None - - consistencygroup._context = context - consistencygroup.obj_reset_changes() - return consistencygroup - - def create(self, cg_snap_id=None, cg_id=None): - """Create a consistency group. - - If cg_snap_id or cg_id are specified then volume_type_id, - availability_zone, and host will be taken from the source Consistency - Group. - """ - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already_created')) - updates = self.cinder_obj_get_changes() - - if 'cgsnapshots' in updates: - raise exception.ObjectActionError(action='create', - reason=_('cgsnapshots assigned')) - - if 'volumes' in updates: - raise exception.ObjectActionError(action='create', - reason=_('volumes assigned')) - - if 'cluster' in updates: - raise exception.ObjectActionError( - action='create', reason=_('cluster assigned')) - - db_consistencygroups = db.consistencygroup_create(self._context, - updates, - cg_snap_id, - cg_id) - self._from_db_object(self._context, self, db_consistencygroups) - - def from_group(self, group): - """Convert a generic volume group object to a cg object.""" - self.id = group.id - self.user_id = group.user_id - self.project_id = group.project_id - self.cluster_name = group.cluster_name - self.host = group.host - self.availability_zone = group.availability_zone - self.name = group.name - self.description = group.description - self.volume_type_id = "" - for v_type in group.volume_types: - self.volume_type_id += v_type.id + "," - self.status = group.status - self.cgsnapshot_id = group.group_snapshot_id - self.source_cgid = group.source_group_id - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'cgsnapshots': - self.cgsnapshots = objects.CGSnapshotList.get_all_by_group( - self._context, self.id) - - if attrname == 'volumes': - self.volumes = objects.VolumeList.get_all_by_group(self._context, - self.id) - - # If this consistency group doesn't belong to a cluster (cluster_name - # is empty), then cluster field will be None. - if attrname == 'cluster': - if self.cluster_name: - self.cluster = objects.Cluster.get_by_id( - self._context, name=self.cluster_name) - else: - self.cluster = None - - self.obj_reset_changes(fields=[attrname]) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'cgsnapshots' in updates: - raise exception.ObjectActionError( - action='save', reason=_('cgsnapshots changed')) - if 'volumes' in updates: - raise exception.ObjectActionError( - action='save', reason=_('volumes changed')) - if 'cluster' in updates: - raise exception.ObjectActionError( - action='save', reason=_('cluster changed')) - - db.consistencygroup_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.consistencygroup_destroy(self._context, - self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - -@base.CinderObjectRegistry.register -class ConsistencyGroupList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - # Version 1.1: Add pagination support to consistency group - VERSION = '1.1' - - fields = { - 'objects': fields.ListOfObjectsField('ConsistencyGroup') - } - - @staticmethod - def include_in_cluster(context, cluster, partial_rename=True, **filters): - """Include all consistency groups matching the filters into a cluster. - - When partial_rename is set we will not set the cluster_name with - cluster parameter value directly, we'll replace provided cluster_name - or host filter value with cluster instead. - - This is useful when we want to replace just the cluster name but leave - the backend and pool information as it is. If we are using - cluster_name to filter, we'll use that same DB field to replace the - cluster value and leave the rest as it is. Likewise if we use the host - to filter. - - Returns the number of consistency groups that have been changed. - """ - return db.consistencygroup_include_in_cluster(context, cluster, - partial_rename, - **filters) - - @classmethod - def get_all(cls, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - consistencygroups = db.consistencygroup_get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), - objects.ConsistencyGroup, - consistencygroups) - - @classmethod - def get_all_by_project(cls, context, project_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - consistencygroups = db.consistencygroup_get_all_by_project( - context, project_id, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), - objects.ConsistencyGroup, - consistencygroups) diff --git a/cinder/objects/dynamic_log.py b/cinder/objects/dynamic_log.py deleted file mode 100644 index b72e82789..000000000 --- a/cinder/objects/dynamic_log.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder.objects import base - - -@base.CinderObjectRegistry.register -class LogLevel(base.CinderObject): - """Versioned Object to send log change requests.""" - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'prefix': fields.StringField(nullable=True), - 'level': fields.StringField(nullable=True), - } - - def __init__(self, context=None, **kwargs): - super(LogLevel, self).__init__(**kwargs) - - # Set non initialized fields with default or None values - for field_name in self.fields: - if not self.obj_attr_is_set(field_name): - field = self.fields[field_name] - if field.default != fields.UnspecifiedDefault: - setattr(self, field_name, field.default) - elif field.nullable: - setattr(self, field_name, None) - - -@base.CinderObjectRegistry.register -class LogLevelList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('LogLevel'), - } diff --git a/cinder/objects/fields.py b/cinder/objects/fields.py deleted file mode 100644 index 19645823f..000000000 --- a/cinder/objects/fields.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Custom fields for Cinder objects.""" - -from oslo_versionedobjects import fields - - -BaseEnumField = fields.BaseEnumField -Enum = fields.Enum -Field = fields.Field -FieldType = fields.FieldType - - -class BaseCinderEnum(Enum): - def __init__(self): - super(BaseCinderEnum, self).__init__(valid_values=self.__class__.ALL) - - -class BackupStatus(BaseCinderEnum): - ERROR = 'error' - ERROR_DELETING = 'error_deleting' - CREATING = 'creating' - AVAILABLE = 'available' - DELETING = 'deleting' - DELETED = 'deleted' - RESTORING = 'restoring' - - ALL = (ERROR, ERROR_DELETING, CREATING, AVAILABLE, DELETING, DELETED, - RESTORING) - - -class BackupStatusField(BaseEnumField): - AUTO_TYPE = BackupStatus() - - -class ConsistencyGroupStatus(BaseCinderEnum): - ERROR = 'error' - AVAILABLE = 'available' - CREATING = 'creating' - DELETING = 'deleting' - DELETED = 'deleted' - UPDATING = 'updating' - ERROR_DELETING = 'error_deleting' - - ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, - UPDATING, ERROR_DELETING) - - -class ConsistencyGroupStatusField(BaseEnumField): - AUTO_TYPE = ConsistencyGroupStatus() - - -class GroupStatus(BaseCinderEnum): - ERROR = 'error' - AVAILABLE = 'available' - CREATING = 'creating' - DELETING = 'deleting' - DELETED = 'deleted' - UPDATING = 'updating' - IN_USE = 'in-use' - ERROR_DELETING = 'error_deleting' - - ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, - UPDATING, IN_USE, ERROR_DELETING) - - -class GroupStatusField(BaseEnumField): - AUTO_TYPE = GroupStatus() - - -class GroupSnapshotStatus(BaseCinderEnum): - ERROR = 'error' - AVAILABLE = 'available' - CREATING = 'creating' - DELETING = 'deleting' - DELETED = 'deleted' - UPDATING = 'updating' - ERROR_DELETING = 'error_deleting' - - ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, - UPDATING, ERROR_DELETING) - - -class GroupSnapshotStatusField(BaseEnumField): - AUTO_TYPE = GroupSnapshotStatus() - - -class ReplicationStatus(BaseCinderEnum): - ERROR = 'error' - ENABLED = 'enabled' - DISABLED = 'disabled' - NOT_CAPABLE = 'not-capable' - FAILING_OVER = 'failing-over' - FAILOVER_ERROR = 'failover-error' - FAILED_OVER = 'failed-over' - ENABLING = 'enabling' - DISABLING = 'disabling' - - ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER, - FAILED_OVER, ENABLING, DISABLING) - - -class ReplicationStatusField(BaseEnumField): - AUTO_TYPE = ReplicationStatus() - - -class SnapshotStatus(BaseCinderEnum): - ERROR = 'error' - AVAILABLE = 'available' - CREATING = 'creating' - DELETING = 'deleting' - DELETED = 'deleted' - UPDATING = 'updating' - ERROR_DELETING = 'error_deleting' - UNMANAGING = 'unmanaging' - BACKING_UP = 'backing-up' - RESTORING = 'restoring' - - ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, - UPDATING, ERROR_DELETING, UNMANAGING, BACKING_UP, RESTORING) - - -class SnapshotStatusField(BaseEnumField): - AUTO_TYPE = SnapshotStatus() - - -class QoSConsumerValues(BaseCinderEnum): - BACK_END = 'back-end' - FRONT_END = 'front-end' - BOTH = 'both' - - ALL = (BACK_END, FRONT_END, BOTH) - - -class QoSConsumerField(BaseEnumField): - AUTO_TYPE = QoSConsumerValues() - - -class VolumeAttachStatus(BaseCinderEnum): - ATTACHED = 'attached' - ATTACHING = 'attaching' - DETACHED = 'detached' - RESERVED = 'reserved' - ERROR_ATTACHING = 'error_attaching' - ERROR_DETACHING = 'error_detaching' - DELETED = 'deleted' - - ALL = (ATTACHED, ATTACHING, DETACHED, ERROR_ATTACHING, - ERROR_DETACHING, RESERVED, DELETED) - - -class VolumeAttachStatusField(BaseEnumField): - AUTO_TYPE = VolumeAttachStatus() - - -class DictOfNullableField(fields.AutoTypedField): - AUTO_TYPE = fields.Dict(fields.FieldType(), nullable=True) diff --git a/cinder/objects/group.py b/cinder/objects/group.py deleted file mode 100644 index 120438bae..000000000 --- a/cinder/objects/group.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields -from cinder.volume import utils as vol_utils - - -@base.CinderObjectRegistry.register -class Group(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Added group_snapshots, group_snapshot_id, and - # source_group_id - # Version 1.2: Added replication_status - VERSION = '1.2' - - OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots'] - - fields = { - 'id': fields.UUIDField(), - 'user_id': fields.StringField(), - 'project_id': fields.StringField(), - 'cluster_name': fields.StringField(nullable=True), - 'host': fields.StringField(nullable=True), - 'availability_zone': fields.StringField(nullable=True), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'group_type_id': fields.StringField(), - 'volume_type_ids': fields.ListOfStringsField(nullable=True), - 'status': c_fields.GroupStatusField(nullable=True), - 'group_snapshot_id': fields.UUIDField(nullable=True), - 'source_group_id': fields.UUIDField(nullable=True), - 'replication_status': c_fields.ReplicationStatusField(nullable=True), - 'volumes': fields.ObjectField('VolumeList', nullable=True), - 'volume_types': fields.ObjectField('VolumeTypeList', - nullable=True), - 'group_snapshots': fields.ObjectField('GroupSnapshotList', - nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - """Make an object representation compatible with target version.""" - super(Group, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - for key in ('group_snapshot_id', 'source_group_id', - 'group_snapshots'): - primitive.pop(key, None) - if target_version < (1, 2): - primitive.pop('replication_status', None) - - @staticmethod - def _from_db_object(context, group, db_group, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in group.fields.items(): - if name in Group.OPTIONAL_FIELDS: - continue - value = db_group.get(name) - setattr(group, name, value) - - if 'volumes' in expected_attrs: - volumes = base.obj_make_list( - context, objects.VolumeList(context), - objects.Volume, - db_group['volumes']) - group.volumes = volumes - - if 'volume_types' in expected_attrs: - volume_types = base.obj_make_list( - context, objects.VolumeTypeList(context), - objects.VolumeType, - db_group['volume_types']) - group.volume_types = volume_types - - if 'group_snapshots' in expected_attrs: - group_snapshots = base.obj_make_list( - context, objects.GroupSnapshotList(context), - objects.GroupSnapshot, - db_group['group_snapshots']) - group.group_snapshots = group_snapshots - - group._context = context - group.obj_reset_changes() - return group - - def create(self, group_snapshot_id=None, source_group_id=None): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already_created')) - updates = self.cinder_obj_get_changes() - - if 'volume_types' in updates: - raise exception.ObjectActionError( - action='create', - reason=_('volume_types assigned')) - - if 'volumes' in updates: - raise exception.ObjectActionError(action='create', - reason=_('volumes assigned')) - - if 'group_snapshots' in updates: - raise exception.ObjectActionError( - action='create', - reason=_('group_snapshots assigned')) - - db_groups = db.group_create(self._context, - updates, - group_snapshot_id, - source_group_id) - self._from_db_object(self._context, self, db_groups) - - def obj_load_attr(self, attrname): - if attrname not in Group.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'volume_types': - self.volume_types = objects.VolumeTypeList.get_all_by_group( - self._context, self.id) - - if attrname == 'volumes': - self.volumes = objects.VolumeList.get_all_by_generic_group( - self._context, self.id) - - if attrname == 'group_snapshots': - self.group_snapshots = objects.GroupSnapshotList.get_all_by_group( - self._context, self.id) - - self.obj_reset_changes(fields=[attrname]) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'volume_types' in updates: - msg = _('Cannot save volume_types changes in group object ' - 'update.') - raise exception.ObjectActionError( - action='save', reason=msg) - if 'volumes' in updates: - msg = _('Cannot save volumes changes in group object update.') - raise exception.ObjectActionError( - action='save', reason=msg) - if 'group_snapshots' in updates: - msg = _('Cannot save group_snapshots changes in group object ' - 'update.') - raise exception.ObjectActionError( - action='save', reason=msg) - - db.group_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - db.group_destroy(self._context, self.id) - - @property - def is_replicated(self): - if (vol_utils.is_group_a_type(self, "group_replication_enabled") or - vol_utils.is_group_a_type( - self, "consistent_group_replication_enabled")): - return True - return False - - -@base.CinderObjectRegistry.register -class GroupList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Group') - } - - @classmethod - def get_all(cls, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - groups = db.group_get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), - objects.Group, - groups) - - @classmethod - def get_all_by_project(cls, context, project_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - groups = db.group_get_all_by_project( - context, project_id, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), - objects.Group, - groups) - - @classmethod - def get_all_replicated(cls, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - groups = db.group_get_all( - context, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - grp_obj_list = base.obj_make_list(context, cls(context), - objects.Group, - groups) - - out_groups = [grp for grp in grp_obj_list - if grp.is_replicated] - - return out_groups diff --git a/cinder/objects/group_snapshot.py b/cinder/objects/group_snapshot.py deleted file mode 100644 index 9baca5b4a..000000000 --- a/cinder/objects/group_snapshot.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from oslo_versionedobjects import fields - - -@base.CinderObjectRegistry.register -class GroupSnapshot(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.ClusteredObject): - VERSION = '1.0' - - OPTIONAL_FIELDS = ['group', 'snapshots'] - - fields = { - 'id': fields.UUIDField(), - 'group_id': fields.UUIDField(nullable=False), - 'project_id': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'status': fields.StringField(nullable=True), - 'group_type_id': fields.UUIDField(nullable=True), - 'group': fields.ObjectField('Group', nullable=True), - 'snapshots': fields.ObjectField('SnapshotList', nullable=True), - } - - @property - def host(self): - return self.group.host - - @property - def cluster_name(self): - return self.group.cluster_name - - @classmethod - def _from_db_object(cls, context, group_snapshot, db_group_snapshots, - expected_attrs=None): - expected_attrs = expected_attrs or [] - for name, field in group_snapshot.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_group_snapshots.get(name) - setattr(group_snapshot, name, value) - - if 'group' in expected_attrs: - group = objects.Group(context) - group._from_db_object(context, group, - db_group_snapshots['group']) - group_snapshot.group = group - - if 'snapshots' in expected_attrs: - snapshots = base.obj_make_list( - context, objects.SnapshotsList(context), - objects.Snapshots, - db_group_snapshots['snapshots']) - group_snapshot.snapshots = snapshots - - group_snapshot._context = context - group_snapshot.obj_reset_changes() - return group_snapshot - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already_created')) - updates = self.cinder_obj_get_changes() - - if 'group' in updates: - raise exception.ObjectActionError( - action='create', reason=_('group assigned')) - - db_group_snapshots = db.group_snapshot_create(self._context, updates) - self._from_db_object(self._context, self, db_group_snapshots) - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'group': - self.group = objects.Group.get_by_id( - self._context, self.group_id) - - if attrname == 'snapshots': - self.snapshots = objects.SnapshotList.get_all_for_group_snapshot( - self._context, self.id) - - self.obj_reset_changes(fields=[attrname]) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'group' in updates: - raise exception.ObjectActionError( - action='save', reason=_('group changed')) - if 'snapshots' in updates: - raise exception.ObjectActionError( - action='save', reason=_('snapshots changed')) - db.group_snapshot_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.group_snapshot_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - -@base.CinderObjectRegistry.register -class GroupSnapshotList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('GroupSnapshot') - } - - @classmethod - def get_all(cls, context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - group_snapshots = db.group_snapshot_get_all(context, - filters=filters, - marker=marker, - limit=limit, - offset=offset, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), objects.GroupSnapshot, - group_snapshots) - - @classmethod - def get_all_by_project(cls, context, project_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - group_snapshots = db.group_snapshot_get_all_by_project( - context, project_id, filters=filters, marker=marker, - limit=limit, offset=offset, sort_keys=sort_keys, - sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), objects.GroupSnapshot, - group_snapshots) - - @classmethod - def get_all_by_group(cls, context, group_id, filters=None, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - group_snapshots = db.group_snapshot_get_all_by_group( - context, group_id, filters=filters, marker=marker, limit=limit, - offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - return base.obj_make_list(context, cls(context), objects.GroupSnapshot, - group_snapshots) diff --git a/cinder/objects/group_type.py b/cinder/objects/group_type.py deleted file mode 100644 index be66ee027..000000000 --- a/cinder/objects/group_type.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.volume import group_types - - -@base.CinderObjectRegistry.register -class GroupType(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - OPTIONAL_FIELDS = ['group_specs', 'projects'] - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'is_public': fields.BooleanField(default=True, nullable=True), - 'projects': fields.ListOfStringsField(nullable=True), - 'group_specs': fields.DictOfNullableStringsField(nullable=True), - } - - @classmethod - def _get_expected_attrs(cls, context): - return 'group_specs', 'projects' - - @classmethod - def _from_db_object(cls, context, type, db_type, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in type.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_type[name] - if isinstance(field, fields.IntegerField): - value = value or 0 - type[name] = value - - # Get data from db_type object that was queried by joined query - # from DB - if 'group_specs' in expected_attrs: - type.group_specs = {} - specs = db_type.get('group_specs') - if specs and isinstance(specs, list): - type.group_specs = {item['key']: item['value'] - for item in specs} - elif specs and isinstance(specs, dict): - type.group_specs = specs - if 'projects' in expected_attrs: - type.projects = db_type.get('projects', []) - - type._context = context - type.obj_reset_changes() - return type - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - db_group_type = group_types.create(self._context, self.name, - self.group_specs, - self.is_public, self.projects, - self.description) - self._from_db_object(self._context, self, db_group_type) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - group_types.update(self._context, self.id, self.name, - self.description) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - group_types.destroy(self._context, self.id) - - -@base.CinderObjectRegistry.register -class GroupTypeList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('GroupType'), - } - - @classmethod - def get_all(cls, context, inactive=0, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, offset=None): - types = group_types.get_all_group_types(context, inactive, filters, - marker=marker, limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - offset=offset) - expected_attrs = GroupType._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), - objects.GroupType, types.values(), - expected_attrs=expected_attrs) diff --git a/cinder/objects/manageableresources.py b/cinder/objects/manageableresources.py deleted file mode 100644 index 3ccb38f97..000000000 --- a/cinder/objects/manageableresources.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder.objects import base - - -class ManageableObject(object): - - fields = { - 'reference': fields.DictOfNullableStringsField(nullable=False), - 'size': fields.IntegerField(nullable=True), - 'safe_to_manage': fields.BooleanField(default=False, nullable=True), - 'reason_not_safe': fields.StringField(nullable=True), - 'cinder_id': fields.UUIDField(nullable=True), - 'extra_info': fields.DictOfNullableStringsField(nullable=True), - } - - @classmethod - def from_primitives(cls, context, dict_resource): - resource = cls() - driverkeys = set(dict_resource.keys()) - set(cls.fields.keys()) - for name, field in cls.fields.items(): - value = dict_resource.get(name) - resource[name] = value - - for key in driverkeys: - if resource['extra_info'] is None: - resource['extra_info'] = {key: dict_resource[key]} - - resource._context = context - resource.obj_reset_changes() - return resource - - -@base.CinderObjectRegistry.register -class ManageableVolume(base.CinderObject, base.CinderObjectDictCompat, - base.CinderComparableObject, ManageableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - -@base.CinderObjectRegistry.register -class ManageableSnapshot(base.CinderObject, base.CinderObjectDictCompat, - ManageableObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'source_reference': fields.DictOfNullableStringsField(), - } - - -@base.CinderObjectRegistry.register -class ManageableVolumeList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('ManageableVolume'), - } - - @classmethod - def from_primitives(cls, context, data): - ManageableVolumeList.objects = [] - - for item in data: - manage_vol_obj = ManageableVolume.from_primitives(context, item) - ManageableVolumeList.objects.append(manage_vol_obj) - ManageableVolumeList._context = context - return ManageableVolumeList.objects - - -@base.CinderObjectRegistry.register -class ManageableSnapshotList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('ManageableSnapshot'), - } - - @classmethod - def from_primitives(cls, context, data): - ManageableSnapshotList.objects = [] - - for item in data: - manage_snap_obj = ManageableSnapshot.from_primitives(context, item) - ManageableSnapshotList.objects.append(manage_snap_obj) - ManageableSnapshotList._context = context - return ManageableSnapshotList.objects diff --git a/cinder/objects/qos_specs.py b/cinder/objects/qos_specs.py deleted file mode 100644 index fb82a527b..000000000 --- a/cinder/objects/qos_specs.py +++ /dev/null @@ -1,200 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db import exception as db_exc -from oslo_log import log as logging - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields -from oslo_versionedobjects import fields - -LOG = logging.getLogger(__name__) - - -@base.CinderObjectRegistry.register -class QualityOfServiceSpecs(base.CinderPersistentObject, - base.CinderObject, - base.CinderObjectDictCompat, - base.CinderComparableObject): - # Version - # 1.0: Initial version - VERSION = "1.0" - - OPTIONAL_FIELDS = ['volume_types'] - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'consumer': c_fields.QoSConsumerField( - default=c_fields.QoSConsumerValues.BACK_END), - 'specs': fields.DictOfNullableStringsField(nullable=True), - 'volume_types': fields.ObjectField('VolumeTypeList', nullable=True), - } - - def __init__(self, *args, **kwargs): - super(QualityOfServiceSpecs, self).__init__(*args, **kwargs) - self._init_specs = {} - - def __setattr__(self, name, value): - try: - super(QualityOfServiceSpecs, self).__setattr__(name, value) - except ValueError: - if name == 'consumer': - # Give more descriptive error message for invalid 'consumer' - msg = (_("Valid consumer of QoS specs are: %s") % - c_fields.QoSConsumerField()) - raise exception.InvalidQoSSpecs(reason=msg) - else: - raise - - def obj_reset_changes(self, fields=None, recursive=False): - super(QualityOfServiceSpecs, self).obj_reset_changes(fields, recursive) - if fields is None or 'specs' in fields: - self._init_specs = self.specs.copy() if self.specs else {} - - def obj_what_changed(self): - changes = super(QualityOfServiceSpecs, self).obj_what_changed() - - # Do comparison of what's in the dict vs. reference to the specs object - if self.obj_attr_is_set('id'): - if self.specs != self._init_specs: - changes.add('specs') - else: - # If both dicts are equal don't consider anything gets changed - if 'specs' in changes: - changes.remove('specs') - - return changes - - def obj_get_changes(self): - changes = super(QualityOfServiceSpecs, self).obj_get_changes() - if 'specs' in changes: - # For specs, we only want what has changed in the dictionary, - # because otherwise we'll individually overwrite the DB value for - # every key in 'specs' even if it hasn't changed - specs_changes = {} - for key, val in self.specs.items(): - if val != self._init_specs.get(key): - specs_changes[key] = val - changes['specs'] = specs_changes - - specs_keys_removed = (set(self._init_specs.keys()) - - set(self.specs.keys())) - if specs_keys_removed: - # Special key notifying which specs keys have been deleted - changes['specs_keys_removed'] = specs_keys_removed - - return changes - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'volume_types': - self.volume_types = objects.VolumeTypeList.get_all_types_for_qos( - self._context, self.id) - - @classmethod - def _from_db_object(cls, context, qos_spec, db_qos_spec, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - - for name, field in qos_spec.fields.items(): - if name not in cls.OPTIONAL_FIELDS: - value = db_qos_spec.get(name) - # 'specs' could be null if only a consumer is given, so make - # it an empty dict instead of None - if not value and isinstance(field, fields.DictOfStringsField): - value = {} - setattr(qos_spec, name, value) - - if 'volume_types' in expected_attrs: - volume_types = objects.VolumeTypeList.get_all_types_for_qos( - context, db_qos_spec['id']) - qos_spec.volume_types = volume_types - - qos_spec._context = context - qos_spec.obj_reset_changes() - return qos_spec - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason='already created') - updates = self.cinder_obj_get_changes() - - try: - create_ret = db.qos_specs_create(self._context, updates) - except db_exc.DBDataError: - msg = _('Error writing field to database') - LOG.exception(msg) - raise exception.Invalid(msg) - except db_exc.DBError: - LOG.exception('DB error occurred when creating QoS specs.') - raise exception.QoSSpecsCreateFailed(name=self.name, - qos_specs=self.specs) - # Save ID with the object - updates['id'] = create_ret['id'] - self._from_db_object(self._context, self, updates) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'specs_keys_removed' in updates.keys(): - for specs_key_to_remove in updates['specs_keys_removed']: - db.qos_specs_item_delete( - self._context, self.id, specs_key_to_remove) - del updates['specs_keys_removed'] - db.qos_specs_update(self._context, self.id, updates) - - self.obj_reset_changes() - - def destroy(self, force=False): - """Deletes the QoS spec. - - :param force: when force is True, all volume_type mappings for this QoS - are deleted. When force is False and volume_type - mappings still exist, a QoSSpecsInUse exception is thrown - """ - if self.volume_types: - if not force: - raise exception.QoSSpecsInUse(specs_id=self.id) - # remove all association - db.qos_specs_disassociate_all(self._context, self.id) - updated_values = db.qos_specs_delete(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - -@base.CinderObjectRegistry.register -class QualityOfServiceSpecsList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('QualityOfServiceSpecs'), - } - - @classmethod - def get_all(cls, context, *args, **kwargs): - specs = db.qos_specs_get_all(context, *args, **kwargs) - return base.obj_make_list(context, cls(context), - objects.QualityOfServiceSpecs, specs) diff --git a/cinder/objects/request_spec.py b/cinder/objects/request_spec.py deleted file mode 100644 index b60c08534..000000000 --- a/cinder/objects/request_spec.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.objects import base - - -@base.CinderObjectRegistry.register -class RequestSpec(base.CinderObject, base.CinderObjectDictCompat, - base.CinderComparableObject): - # Version 1.0: Initial version - # Version 1.1: Added group_id and group_backend - VERSION = '1.1' - - fields = { - 'consistencygroup_id': fields.UUIDField(nullable=True), - 'group_id': fields.UUIDField(nullable=True), - 'cgsnapshot_id': fields.UUIDField(nullable=True), - 'image_id': fields.UUIDField(nullable=True), - 'snapshot_id': fields.UUIDField(nullable=True), - 'source_replicaid': fields.UUIDField(nullable=True), - 'source_volid': fields.UUIDField(nullable=True), - 'volume_id': fields.UUIDField(nullable=True), - 'volume': fields.ObjectField('Volume', nullable=True), - 'volume_type': fields.ObjectField('VolumeType', nullable=True), - 'volume_properties': fields.ObjectField('VolumeProperties', - nullable=True), - 'CG_backend': fields.StringField(nullable=True), - 'group_backend': fields.StringField(nullable=True), - } - - obj_extra_fields = ['resource_properties'] - - @property - def resource_properties(self): - # TODO(dulek): This is to maintain compatibility with filters from - # oslo-incubator. As we've moved them into our codebase we should adapt - # them to use volume_properties and remove this shim. - return self.volume_properties - - @classmethod - def from_primitives(cls, spec): - """Returns RequestSpec object creating it from legacy dictionary. - - FIXME(dulek): This should go away in early O as we stop supporting - backward compatibility with M. - """ - spec = spec.copy() - spec_obj = cls() - - vol_props = spec.pop('volume_properties', {}) - if vol_props is not None: - vol_props = VolumeProperties(**vol_props) - spec_obj.volume_properties = vol_props - - if 'volume' in spec: - vol = spec.pop('volume', {}) - vol.pop('name', None) - if vol is not None: - vol = objects.Volume(**vol) - spec_obj.volume = vol - - if 'volume_type' in spec: - vol_type = spec.pop('volume_type', {}) - if vol_type is not None: - vol_type = objects.VolumeType(**vol_type) - spec_obj.volume_type = vol_type - - spec.pop('resource_properties', None) - - for k, v in spec.items(): - setattr(spec_obj, k, v) - - return spec_obj - - -@base.CinderObjectRegistry.register -class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added group_id and group_type_id - VERSION = '1.1' - - # TODO(dulek): We should add this to initially move volume_properites to - # ovo, but this should be removed as soon as possible. Most of the data - # here is already in request_spec and volume there. Outstanding ones would - # be reservation, and qos_specs. First one may be moved to request_spec and - # second added as relationship in volume_type field and whole - # volume_properties (and resource_properties) in request_spec won't be - # needed. - - fields = { - 'attach_status': fields.StringField(nullable=True), - 'availability_zone': fields.StringField(nullable=True), - 'cgsnapshot_id': fields.UUIDField(nullable=True), - 'consistencygroup_id': fields.UUIDField(nullable=True), - 'group_id': fields.UUIDField(nullable=True), - 'display_description': fields.StringField(nullable=True), - 'display_name': fields.StringField(nullable=True), - 'encryption_key_id': fields.UUIDField(nullable=True), - 'metadata': fields.DictOfStringsField(nullable=True), - 'multiattach': fields.BooleanField(nullable=True), - 'project_id': fields.StringField(nullable=True), - 'qos_specs': fields.DictOfStringsField(nullable=True), - 'replication_status': fields.StringField(nullable=True), - 'reservations': fields.ListOfStringsField(nullable=True), - 'size': fields.IntegerField(nullable=True), - 'snapshot_id': fields.UUIDField(nullable=True), - 'source_replicaid': fields.UUIDField(nullable=True), - 'source_volid': fields.UUIDField(nullable=True), - 'status': fields.StringField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'volume_type_id': fields.UUIDField(nullable=True), - 'group_type_id': fields.UUIDField(nullable=True), - } diff --git a/cinder/objects/service.py b/cinder/objects/service.py deleted file mode 100644 index a1706184f..000000000 --- a/cinder/objects/service.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields -from cinder import utils - - -@base.CinderObjectRegistry.register -class Service(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject, - base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Add rpc_current_version and object_current_version fields - # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version() - # Version 1.3: Add replication fields - # Version 1.4: Add cluster fields - VERSION = '1.4' - - OPTIONAL_FIELDS = ('cluster',) - - fields = { - 'id': fields.IntegerField(), - 'host': fields.StringField(nullable=True), - 'binary': fields.StringField(nullable=True), - 'cluster_name': fields.StringField(nullable=True), - 'cluster': fields.ObjectField('Cluster', nullable=True, - read_only=True), - 'topic': fields.StringField(nullable=True), - 'report_count': fields.IntegerField(default=0), - 'disabled': fields.BooleanField(default=False, nullable=True), - 'availability_zone': fields.StringField(nullable=True, - default='cinder'), - 'disabled_reason': fields.StringField(nullable=True), - - 'modified_at': fields.DateTimeField(nullable=True), - 'rpc_current_version': fields.StringField(nullable=True), - 'object_current_version': fields.StringField(nullable=True), - - # Replication properties - 'replication_status': c_fields.ReplicationStatusField(nullable=True), - 'frozen': fields.BooleanField(default=False), - 'active_backend_id': fields.StringField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - """Make a service representation compatible with a target version.""" - # Convert all related objects - super(Service, self).obj_make_compatible(primitive, target_version) - - target_version = versionutils.convert_version_to_tuple(target_version) - # Before v1.4 we didn't have cluster fields so we have to remove them. - if target_version < (1, 4): - for obj_field in ('cluster', 'cluster_name'): - primitive.pop(obj_field, None) - - @staticmethod - def _from_db_object(context, service, db_service, expected_attrs=None): - expected_attrs = expected_attrs or [] - for name, field in service.fields.items(): - if name in Service.OPTIONAL_FIELDS: - continue - value = db_service.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - elif isinstance(field, fields.DateTimeField): - value = value or None - service[name] = value - - service._context = context - if 'cluster' in expected_attrs: - db_cluster = db_service.get('cluster') - # If this service doesn't belong to a cluster the cluster field in - # the ORM instance will have value of None. - if db_cluster: - service.cluster = objects.Cluster(context) - objects.Cluster._from_db_object(context, service.cluster, - db_cluster) - else: - service.cluster = None - - service.obj_reset_changes() - return service - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - # NOTE(geguileo): We only have 1 optional field, so we don't need to - # confirm that we are loading the cluster. - # If this service doesn't belong to a cluster (cluster_name is empty), - # then cluster field will be None. - if self.cluster_name: - self.cluster = objects.Cluster.get_by_id(self._context, None, - name=self.cluster_name) - else: - self.cluster = None - self.obj_reset_changes(fields=(attrname,)) - - @classmethod - def get_by_host_and_topic(cls, context, host, topic): - db_service = db.service_get(context, disabled=False, host=host, - topic=topic) - return cls._from_db_object(context, cls(context), db_service) - - @classmethod - def get_by_args(cls, context, host, binary_key): - db_service = db.service_get(context, host=host, binary=binary_key) - return cls._from_db_object(context, cls(context), db_service) - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.cinder_obj_get_changes() - if 'cluster' in updates: - raise exception.ObjectActionError( - action='create', reason=_('cluster assigned')) - db_service = db.service_create(self._context, updates) - self._from_db_object(self._context, self, db_service) - - def save(self): - updates = self.cinder_obj_get_changes() - if 'cluster' in updates: - raise exception.ObjectActionError( - action='save', reason=_('cluster changed')) - if updates: - db.service_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.service_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - @classmethod - def _get_minimum_version(cls, attribute, context, binary): - services = ServiceList.get_all_by_binary(context, binary) - min_ver = None - min_ver_str = None - for s in services: - ver_str = getattr(s, attribute) - if ver_str is None: - # NOTE(dulek) None in *_current_version means that this - # service is in Liberty version, which we now don't provide - # backward compatibility to. - msg = _('Service %s is in Liberty version. We do not provide ' - 'backward compatibility with Liberty now, so you ' - 'need to upgrade it, release by release if live ' - 'upgrade is required. After upgrade you may need to ' - 'remove any stale service records via ' - '"cinder-manage service remove".') % s.binary - raise exception.ServiceTooOld(msg) - ver = versionutils.convert_version_to_int(ver_str) - if min_ver is None or ver < min_ver: - min_ver = ver - min_ver_str = ver_str - - return min_ver_str - - @classmethod - def get_minimum_rpc_version(cls, context, binary): - return cls._get_minimum_version('rpc_current_version', context, binary) - - @classmethod - def get_minimum_obj_version(cls, context, binary=None): - return cls._get_minimum_version('object_current_version', context, - binary) - - @property - def is_up(self): - """Check whether a service is up based on last heartbeat.""" - return (self.updated_at and - self.updated_at >= utils.service_expired_time(True)) - - -@base.CinderObjectRegistry.register -class ServiceList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - # Version 1.1: Service object 1.2 - VERSION = '1.1' - - fields = { - 'objects': fields.ListOfObjectsField('Service'), - } - - @classmethod - def get_all(cls, context, filters=None): - services = db.service_get_all(context, **(filters or {})) - return base.obj_make_list(context, cls(context), objects.Service, - services) - - @classmethod - def get_all_by_topic(cls, context, topic, disabled=None): - services = db.service_get_all(context, topic=topic, disabled=disabled) - return base.obj_make_list(context, cls(context), objects.Service, - services) - - @classmethod - def get_all_by_binary(cls, context, binary, disabled=None): - services = db.service_get_all(context, binary=binary, - disabled=disabled) - return base.obj_make_list(context, cls(context), objects.Service, - services) diff --git a/cinder/objects/snapshot.py b/cinder/objects/snapshot.py deleted file mode 100644 index 4233b60dd..000000000 --- a/cinder/objects/snapshot.py +++ /dev/null @@ -1,360 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import cleanable -from cinder.objects import fields as c_fields - - -CONF = cfg.CONF - - -@base.CinderObjectRegistry.register -class Snapshot(cleanable.CinderCleanableObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject, - base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Changed 'status' field to use SnapshotStatusField - # Version 1.2: This object is now cleanable (adds rows to workers table) - # Version 1.3: SnapshotStatusField now includes "unmanaging" - # Version 1.4: SnapshotStatusField now includes "backing-up" - # Version 1.5: SnapshotStatusField now includes "restoring" - VERSION = '1.5' - - # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They - # are typically the relationship in the sqlalchemy object. - OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot', 'group_snapshot') - - fields = { - 'id': fields.UUIDField(), - - 'user_id': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - - 'volume_id': fields.UUIDField(nullable=True), - 'cgsnapshot_id': fields.UUIDField(nullable=True), - 'group_snapshot_id': fields.UUIDField(nullable=True), - 'status': c_fields.SnapshotStatusField(nullable=True), - 'progress': fields.StringField(nullable=True), - 'volume_size': fields.IntegerField(nullable=True), - - 'display_name': fields.StringField(nullable=True), - 'display_description': fields.StringField(nullable=True), - - 'encryption_key_id': fields.UUIDField(nullable=True), - 'volume_type_id': fields.UUIDField(nullable=True), - - 'provider_location': fields.StringField(nullable=True), - 'provider_id': fields.StringField(nullable=True), - 'metadata': fields.DictOfStringsField(), - 'provider_auth': fields.StringField(nullable=True), - - 'volume': fields.ObjectField('Volume', nullable=True), - 'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True), - 'group_snapshot': fields.ObjectField('GroupSnapshot', nullable=True), - } - - @property - def cluster_name(self): - return self.volume.cluster_name - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - return 'metadata', - - # NOTE(thangp): obj_extra_fields is used to hold properties that are not - # usually part of the model - obj_extra_fields = ['name', 'volume_name'] - - @property - def name(self): - return CONF.snapshot_name_template % self.id - - @property - def volume_name(self): - return self.volume.name - - def __init__(self, *args, **kwargs): - super(Snapshot, self).__init__(*args, **kwargs) - self._orig_metadata = {} - - self._reset_metadata_tracking() - - def obj_reset_changes(self, fields=None): - super(Snapshot, self).obj_reset_changes(fields) - self._reset_metadata_tracking(fields=fields) - - def _reset_metadata_tracking(self, fields=None): - if fields is None or 'metadata' in fields: - self._orig_metadata = (dict(self.metadata) - if self.obj_attr_is_set('metadata') else {}) - - def obj_what_changed(self): - changes = super(Snapshot, self).obj_what_changed() - if hasattr(self, 'metadata') and self.metadata != self._orig_metadata: - changes.add('metadata') - - return changes - - def obj_make_compatible(self, primitive, target_version): - """Make an object representation compatible with a target version.""" - super(Snapshot, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - - backport_statuses = (((1, 3), - (c_fields.SnapshotStatus.UNMANAGING, - c_fields.SnapshotStatus.DELETING)), - ((1, 4), - (c_fields.SnapshotStatus.BACKING_UP, - c_fields.SnapshotStatus.AVAILABLE)), - ((1, 5), - (c_fields.SnapshotStatus.RESTORING, - c_fields.SnapshotStatus.AVAILABLE))) - for version, status in backport_statuses: - if target_version < version: - if primitive.get('status') == status[0]: - primitive['status'] = status[1] - - @classmethod - def _from_db_object(cls, context, snapshot, db_snapshot, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in snapshot.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_snapshot.get(name) - if isinstance(field, fields.IntegerField): - value = value if value is not None else 0 - setattr(snapshot, name, value) - - if 'volume' in expected_attrs: - volume = objects.Volume(context) - volume._from_db_object(context, volume, db_snapshot['volume']) - snapshot.volume = volume - if snapshot.cgsnapshot_id and 'cgsnapshot' in expected_attrs: - cgsnapshot = objects.CGSnapshot(context) - cgsnapshot._from_db_object(context, cgsnapshot, - db_snapshot['cgsnapshot']) - snapshot.cgsnapshot = cgsnapshot - if snapshot.group_snapshot_id and 'group_snapshot' in expected_attrs: - group_snapshot = objects.GroupSnapshot(context) - group_snapshot._from_db_object(context, group_snapshot, - db_snapshot['group_snapshot']) - snapshot.group_snapshot = group_snapshot - - if 'metadata' in expected_attrs: - metadata = db_snapshot.get('snapshot_metadata') - if metadata is None: - raise exception.MetadataAbsent() - snapshot.metadata = {item['key']: item['value'] - for item in metadata} - snapshot._context = context - snapshot.obj_reset_changes() - return snapshot - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.cinder_obj_get_changes() - - if 'volume' in updates: - raise exception.ObjectActionError(action='create', - reason=_('volume assigned')) - if 'cgsnapshot' in updates: - raise exception.ObjectActionError(action='create', - reason=_('cgsnapshot assigned')) - if 'cluster' in updates: - raise exception.ObjectActionError( - action='create', reason=_('cluster assigned')) - if 'group_snapshot' in updates: - raise exception.ObjectActionError( - action='create', - reason=_('group_snapshot assigned')) - - db_snapshot = db.snapshot_create(self._context, updates) - self._from_db_object(self._context, self, db_snapshot) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'volume' in updates: - raise exception.ObjectActionError(action='save', - reason=_('volume changed')) - if 'cgsnapshot' in updates: - # NOTE(xyang): Allow this to pass if 'cgsnapshot' is - # set to None. This is to support backward compatibility. - if updates.get('cgsnapshot'): - raise exception.ObjectActionError( - action='save', reason=_('cgsnapshot changed')) - if 'group_snapshot' in updates: - raise exception.ObjectActionError( - action='save', reason=_('group_snapshot changed')) - - if 'cluster' in updates: - raise exception.ObjectActionError( - action='save', reason=_('cluster changed')) - - if 'metadata' in updates: - # Metadata items that are not specified in the - # self.metadata will be deleted - metadata = updates.pop('metadata', None) - self.metadata = db.snapshot_metadata_update(self._context, - self.id, metadata, - True) - - db.snapshot_update(self._context, self.id, updates) - - self.obj_reset_changes() - - def destroy(self): - updated_values = db.snapshot_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'volume': - self.volume = objects.Volume.get_by_id(self._context, - self.volume_id) - - if attrname == 'cgsnapshot': - if self.cgsnapshot_id is None: - self.cgsnapshot = None - else: - self.cgsnapshot = objects.CGSnapshot.get_by_id( - self._context, self.cgsnapshot_id) - if attrname == 'group_snapshot': - if self.group_snapshot_id is None: - self.group_snapshot = None - else: - self.group_snapshot = objects.GroupSnapshot.get_by_id( - self._context, - self.group_snapshot_id) - - self.obj_reset_changes(fields=[attrname]) - - def delete_metadata_key(self, context, key): - db.snapshot_metadata_delete(context, self.id, key) - md_was_changed = 'metadata' in self.obj_what_changed() - - del self.metadata[key] - self._orig_metadata.pop(key, None) - - if not md_was_changed: - self.obj_reset_changes(['metadata']) - - @classmethod - def snapshot_data_get_for_project(cls, context, project_id, - volume_type_id=None): - return db.snapshot_data_get_for_project(context, project_id, - volume_type_id) - - @staticmethod - def _is_cleanable(status, obj_version): - # Before 1.2 we didn't have workers table, so cleanup wasn't supported. - if obj_version and obj_version < 1.2: - return False - return status == 'creating' - - @property - def host(self): - """All cleanable VO must have a host property/attribute.""" - return self.volume.host - - -@base.CinderObjectRegistry.register -class SnapshotList(base.ObjectListBase, base.CinderObject): - VERSION = '1.0' - - fields = { - 'objects': fields.ListOfObjectsField('Snapshot'), - } - - @classmethod - def get_all(cls, context, filters, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - """Get all snapshot given some search_opts (filters). - - Special filters accepted are host and cluster_name, that refer to the - volume's fields. - """ - snapshots = db.snapshot_get_all(context, filters, marker, limit, - sort_keys, sort_dirs, offset) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_by_host(cls, context, host, filters=None): - snapshots = db.snapshot_get_all_by_host(context, host, filters) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_all_by_project(cls, context, project_id, search_opts, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None): - snapshots = db.snapshot_get_all_by_project( - context, project_id, search_opts, marker, limit, sort_keys, - sort_dirs, offset) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_all_for_volume(cls, context, volume_id): - snapshots = db.snapshot_get_all_for_volume(context, volume_id) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_all_active_by_window(cls, context, begin, end): - snapshots = db.snapshot_get_all_active_by_window(context, begin, end) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_all_for_cgsnapshot(cls, context, cgsnapshot_id): - snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) - - @classmethod - def get_all_for_group_snapshot(cls, context, group_snapshot_id): - snapshots = db.snapshot_get_all_for_group_snapshot( - context, group_snapshot_id) - expected_attrs = Snapshot._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Snapshot, - snapshots, expected_attrs=expected_attrs) diff --git a/cinder/objects/volume.py b/cinder/objects/volume.py deleted file mode 100644 index 50be64e2a..000000000 --- a/cinder/objects/volume.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import cleanable -from cinder.objects import fields as c_fields - -CONF = cfg.CONF - - -class MetadataObject(dict): - # This is a wrapper class that simulates SQLAlchemy (.*)Metadata objects to - # maintain compatibility with older representations of Volume that some - # drivers rely on. This is helpful in transition period while some driver - # methods are invoked with volume versioned object and some SQLAlchemy - # object or dict. - def __init__(self, key=None, value=None): - super(MetadataObject, self).__init__() - self.key = key - self.value = value - - def __getattr__(self, name): - if name in self: - return self[name] - else: - raise AttributeError("No such attribute: " + name) - - def __setattr__(self, name, value): - self[name] = value - - -@base.CinderObjectRegistry.register -class Volume(cleanable.CinderCleanableObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject, - base.ClusteredObject): - # Version 1.0: Initial version - # Version 1.1: Added metadata, admin_metadata, volume_attachment, and - # volume_type - # Version 1.2: Added glance_metadata, consistencygroup and snapshots - # Version 1.3: Added finish_volume_migration() - # Version 1.4: Added cluster fields - # Version 1.5: Added group - # Version 1.6: This object is now cleanable (adds rows to workers table) - VERSION = '1.6' - - OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', - 'volume_type', 'volume_attachment', 'consistencygroup', - 'snapshots', 'cluster', 'group') - - fields = { - 'id': fields.UUIDField(), - '_name_id': fields.UUIDField(nullable=True), - 'ec2_id': fields.UUIDField(nullable=True), - 'user_id': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - - 'snapshot_id': fields.UUIDField(nullable=True), - - 'cluster_name': fields.StringField(nullable=True), - 'cluster': fields.ObjectField('Cluster', nullable=True, - read_only=True), - 'host': fields.StringField(nullable=True), - 'size': fields.IntegerField(nullable=True), - 'availability_zone': fields.StringField(nullable=True), - 'status': fields.StringField(nullable=True), - 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), - 'migration_status': fields.StringField(nullable=True), - - 'scheduled_at': fields.DateTimeField(nullable=True), - 'launched_at': fields.DateTimeField(nullable=True), - 'terminated_at': fields.DateTimeField(nullable=True), - - 'display_name': fields.StringField(nullable=True), - 'display_description': fields.StringField(nullable=True), - - 'provider_id': fields.StringField(nullable=True), - 'provider_location': fields.StringField(nullable=True), - 'provider_auth': fields.StringField(nullable=True), - 'provider_geometry': fields.StringField(nullable=True), - - 'volume_type_id': fields.UUIDField(nullable=True), - 'source_volid': fields.UUIDField(nullable=True), - 'encryption_key_id': fields.UUIDField(nullable=True), - - 'consistencygroup_id': fields.UUIDField(nullable=True), - 'group_id': fields.UUIDField(nullable=True), - - 'deleted': fields.BooleanField(default=False, nullable=True), - 'bootable': fields.BooleanField(default=False, nullable=True), - 'multiattach': fields.BooleanField(default=False, nullable=True), - - 'replication_status': fields.StringField(nullable=True), - 'replication_extended_status': fields.StringField(nullable=True), - 'replication_driver_data': fields.StringField(nullable=True), - - 'previous_status': fields.StringField(nullable=True), - - 'metadata': fields.DictOfStringsField(nullable=True), - 'admin_metadata': fields.DictOfStringsField(nullable=True), - 'glance_metadata': fields.DictOfStringsField(nullable=True), - 'volume_type': fields.ObjectField('VolumeType', nullable=True), - 'volume_attachment': fields.ObjectField('VolumeAttachmentList', - nullable=True), - 'consistencygroup': fields.ObjectField('ConsistencyGroup', - nullable=True), - 'snapshots': fields.ObjectField('SnapshotList', nullable=True), - 'group': fields.ObjectField('Group', nullable=True), - } - - # NOTE(thangp): obj_extra_fields is used to hold properties that are not - # usually part of the model - obj_extra_fields = ['name', 'name_id', 'volume_metadata', - 'volume_admin_metadata', 'volume_glance_metadata'] - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] - if context.is_admin: - expected_attrs.append('admin_metadata') - - return expected_attrs - - @property - def name_id(self): - return self.id if not self._name_id else self._name_id - - @name_id.setter - def name_id(self, value): - self._name_id = value - - @property - def name(self): - return CONF.volume_name_template % self.name_id - - # TODO(dulek): Three properties below are for compatibility with dict - # representation of volume. The format there is different (list of - # SQLAlchemy models) so we need a conversion. Anyway - these should be - # removed when we stop this class from deriving from DictObjectCompat. - @property - def volume_metadata(self): - md = [MetadataObject(k, v) for k, v in self.metadata.items()] - return md - - @volume_metadata.setter - def volume_metadata(self, value): - md = {d['key']: d['value'] for d in value} - self.metadata = md - - @property - def volume_admin_metadata(self): - md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] - return md - - @volume_admin_metadata.setter - def volume_admin_metadata(self, value): - md = {d['key']: d['value'] for d in value} - self.admin_metadata = md - - @property - def volume_glance_metadata(self): - md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] - return md - - @volume_glance_metadata.setter - def volume_glance_metadata(self, value): - md = {d['key']: d['value'] for d in value} - self.glance_metadata = md - - def __init__(self, *args, **kwargs): - super(Volume, self).__init__(*args, **kwargs) - self._orig_metadata = {} - self._orig_admin_metadata = {} - self._orig_glance_metadata = {} - - self._reset_metadata_tracking() - - def obj_reset_changes(self, fields=None): - super(Volume, self).obj_reset_changes(fields) - self._reset_metadata_tracking(fields=fields) - - @classmethod - def _obj_from_primitive(cls, context, objver, primitive): - obj = super(Volume, Volume)._obj_from_primitive(context, objver, - primitive) - obj._reset_metadata_tracking() - return obj - - def _reset_metadata_tracking(self, fields=None): - if fields is None or 'metadata' in fields: - self._orig_metadata = (dict(self.metadata) - if 'metadata' in self else {}) - if fields is None or 'admin_metadata' in fields: - self._orig_admin_metadata = (dict(self.admin_metadata) - if 'admin_metadata' in self - else {}) - if fields is None or 'glance_metadata' in fields: - self._orig_glance_metadata = (dict(self.glance_metadata) - if 'glance_metadata' in self - else {}) - - def obj_what_changed(self): - changes = super(Volume, self).obj_what_changed() - if 'metadata' in self and self.metadata != self._orig_metadata: - changes.add('metadata') - if ('admin_metadata' in self and - self.admin_metadata != self._orig_admin_metadata): - changes.add('admin_metadata') - if ('glance_metadata' in self and - self.glance_metadata != self._orig_glance_metadata): - changes.add('glance_metadata') - - return changes - - def obj_make_compatible(self, primitive, target_version): - """Make a Volume representation compatible with a target version.""" - added_fields = (((1, 4), ('cluster', 'cluster_name')), - ((1, 5), ('group', 'group_id'))) - - # Convert all related objects - super(Volume, self).obj_make_compatible(primitive, target_version) - - target_version = versionutils.convert_version_to_tuple(target_version) - for version, remove_fields in added_fields: - if target_version < version: - for obj_field in remove_fields: - primitive.pop(obj_field, None) - - @classmethod - def _from_db_object(cls, context, volume, db_volume, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for name, field in volume.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_volume.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - volume[name] = value - - # Get data from db_volume object that was queried by joined query - # from DB - if 'metadata' in expected_attrs: - metadata = db_volume.get('volume_metadata', []) - volume.metadata = {item['key']: item['value'] for item in metadata} - if 'admin_metadata' in expected_attrs: - metadata = db_volume.get('volume_admin_metadata', []) - volume.admin_metadata = {item['key']: item['value'] - for item in metadata} - if 'glance_metadata' in expected_attrs: - metadata = db_volume.get('volume_glance_metadata', []) - volume.glance_metadata = {item['key']: item['value'] - for item in metadata} - if 'volume_type' in expected_attrs: - db_volume_type = db_volume.get('volume_type') - if db_volume_type: - vt_expected_attrs = [] - if 'volume_type.extra_specs' in expected_attrs: - vt_expected_attrs.append('extra_specs') - volume.volume_type = objects.VolumeType._from_db_object( - context, objects.VolumeType(), db_volume_type, - expected_attrs=vt_expected_attrs) - if 'volume_attachment' in expected_attrs: - attachments = base.obj_make_list( - context, objects.VolumeAttachmentList(context), - objects.VolumeAttachment, - db_volume.get('volume_attachment')) - volume.volume_attachment = attachments - if volume.consistencygroup_id and 'consistencygroup' in expected_attrs: - consistencygroup = objects.ConsistencyGroup(context) - consistencygroup._from_db_object(context, - consistencygroup, - db_volume['consistencygroup']) - volume.consistencygroup = consistencygroup - if 'snapshots' in expected_attrs: - snapshots = base.obj_make_list( - context, objects.SnapshotList(context), - objects.Snapshot, - db_volume['snapshots']) - volume.snapshots = snapshots - if 'cluster' in expected_attrs: - db_cluster = db_volume.get('cluster') - # If this volume doesn't belong to a cluster the cluster field in - # the ORM instance will have value of None. - if db_cluster: - volume.cluster = objects.Cluster(context) - objects.Cluster._from_db_object(context, volume.cluster, - db_cluster) - else: - volume.cluster = None - if volume.group_id and 'group' in expected_attrs: - group = objects.Group(context) - group._from_db_object(context, - group, - db_volume['group']) - volume.group = group - - volume._context = context - volume.obj_reset_changes() - return volume - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.cinder_obj_get_changes() - - if 'consistencygroup' in updates: - raise exception.ObjectActionError( - action='create', reason=_('consistencygroup assigned')) - if 'snapshots' in updates: - raise exception.ObjectActionError( - action='create', reason=_('snapshots assigned')) - if 'cluster' in updates: - raise exception.ObjectActionError( - action='create', reason=_('cluster assigned')) - if 'group' in updates: - raise exception.ObjectActionError( - action='create', reason=_('group assigned')) - - db_volume = db.volume_create(self._context, updates) - self._from_db_object(self._context, self, db_volume) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - # NOTE(xyang): Allow this to pass if 'consistencygroup' is - # set to None. This is to support backward compatibility. - # Also remove 'consistencygroup' from updates because - # consistencygroup is the name of a relationship in the ORM - # Volume model, so SQLA tries to do some kind of update of - # the foreign key based on the provided updates if - # 'consistencygroup' is in updates. - if updates.pop('consistencygroup', None): - raise exception.ObjectActionError( - action='save', reason=_('consistencygroup changed')) - if 'group' in updates: - raise exception.ObjectActionError( - action='save', reason=_('group changed')) - if 'glance_metadata' in updates: - raise exception.ObjectActionError( - action='save', reason=_('glance_metadata changed')) - if 'snapshots' in updates: - raise exception.ObjectActionError( - action='save', reason=_('snapshots changed')) - if 'cluster' in updates: - raise exception.ObjectActionError( - action='save', reason=_('cluster changed')) - if 'metadata' in updates: - # Metadata items that are not specified in the - # self.metadata will be deleted - metadata = updates.pop('metadata', None) - self.metadata = db.volume_metadata_update(self._context, - self.id, metadata, - True) - if self._context.is_admin and 'admin_metadata' in updates: - metadata = updates.pop('admin_metadata', None) - self.admin_metadata = db.volume_admin_metadata_update( - self._context, self.id, metadata, True) - - # When we are creating a volume and we change from 'creating' - # status to 'downloading' status we have to change the worker entry - # in the DB to reflect this change, otherwise the cleanup will - # not be performed as it will be mistaken for a volume that has - # been somehow changed (reset status, forced operation...) - if updates.get('status') == 'downloading': - self.set_worker() - - # updates are changed after popping out metadata. - if updates: - db.volume_update(self._context, self.id, updates) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = db.volume_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'metadata': - self.metadata = db.volume_metadata_get(self._context, self.id) - elif attrname == 'admin_metadata': - self.admin_metadata = {} - if self._context.is_admin: - self.admin_metadata = db.volume_admin_metadata_get( - self._context, self.id) - elif attrname == 'glance_metadata': - try: - # NOTE(dulek): We're using alias here to have conversion from - # list to dict done there. - self.volume_glance_metadata = db.volume_glance_metadata_get( - self._context, self.id) - except exception.GlanceMetadataNotFound: - # NOTE(dulek): DB API raises when volume has no - # glance_metadata. Silencing this because at this level no - # metadata is a completely valid result. - self.glance_metadata = {} - elif attrname == 'volume_type': - # If the volume doesn't have volume_type, VolumeType.get_by_id - # would trigger a db call which raise VolumeTypeNotFound exception. - self.volume_type = (objects.VolumeType.get_by_id( - self._context, self.volume_type_id) if self.volume_type_id - else None) - elif attrname == 'volume_attachment': - attachments = objects.VolumeAttachmentList.get_all_by_volume_id( - self._context, self.id) - self.volume_attachment = attachments - elif attrname == 'consistencygroup': - if self.consistencygroup_id is None: - self.consistencygroup = None - else: - consistencygroup = objects.ConsistencyGroup.get_by_id( - self._context, self.consistencygroup_id) - self.consistencygroup = consistencygroup - elif attrname == 'snapshots': - self.snapshots = objects.SnapshotList.get_all_for_volume( - self._context, self.id) - elif attrname == 'cluster': - # If this volume doesn't belong to a cluster (cluster_name is - # empty), then cluster field will be None. - if self.cluster_name: - self.cluster = objects.Cluster.get_by_id( - self._context, name=self.cluster_name) - else: - self.cluster = None - elif attrname == 'group': - if self.group_id is None: - self.group = None - else: - group = objects.Group.get_by_id( - self._context, self.group_id) - self.group = group - - self.obj_reset_changes(fields=[attrname]) - - def delete_metadata_key(self, key): - db.volume_metadata_delete(self._context, self.id, key) - md_was_changed = 'metadata' in self.obj_what_changed() - - del self.metadata[key] - self._orig_metadata.pop(key, None) - - if not md_was_changed: - self.obj_reset_changes(['metadata']) - - def finish_volume_migration(self, dest_volume): - # We swap fields between source (i.e. self) and destination at the - # end of migration because we want to keep the original volume id - # in the DB but now pointing to the migrated volume. - skip = ({'id', 'provider_location', 'glance_metadata', - 'volume_type'} | set(self.obj_extra_fields)) - for key in set(dest_volume.fields.keys()) - skip: - # Only swap attributes that are already set. We do not want to - # unexpectedly trigger a lazy-load. - if not dest_volume.obj_attr_is_set(key): - continue - - value = getattr(dest_volume, key) - value_to_dst = getattr(self, key) - - # Destination must have a _name_id since the id no longer matches - # the volume. If it doesn't have a _name_id we set one. - if key == '_name_id': - if not dest_volume._name_id: - setattr(dest_volume, key, self.id) - continue - elif key == 'migration_status': - value = None - value_to_dst = 'deleting' - elif key == 'display_description': - value_to_dst = 'migration src for ' + self.id - elif key == 'status': - value_to_dst = 'deleting' - # Because dest_volume will be deleted soon, we can - # skip to copy volume_type_id and volume_type which - # are not keys for volume deletion. - elif key == 'volume_type_id': - # Initialize volume_type of source volume using - # new volume_type_id. - self.update({'volume_type_id': value}) - continue - - setattr(self, key, value) - setattr(dest_volume, key, value_to_dst) - - self.save() - dest_volume.save() - return dest_volume - - def get_latest_snapshot(self): - """Get volume's latest snapshot""" - snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id) - snapshot = objects.Snapshot(self._context) - return snapshot._from_db_object(self._context, - snapshot, snapshot_db) - - @staticmethod - def _is_cleanable(status, obj_version): - # Before 1.6 we didn't have workers table, so cleanup wasn't supported. - # cleaning. - if obj_version and obj_version < 1.6: - return False - return status in ('creating', 'deleting', 'uploading', 'downloading') - - def begin_attach(self, attach_mode): - attachment = objects.VolumeAttachment( - context=self._context, - attach_status=c_fields.VolumeAttachStatus.ATTACHING, - volume_id=self.id) - attachment.create() - with self.obj_as_admin(): - self.admin_metadata['attached_mode'] = attach_mode - self.save() - return attachment - - def finish_detach(self, attachment_id): - with self.obj_as_admin(): - volume_updates, attachment_updates = ( - db.volume_detached(self._context, self.id, attachment_id)) - db.volume_admin_metadata_delete(self._context, self.id, - 'attached_mode') - self.admin_metadata.pop('attached_mode', None) - # Remove attachment in volume only when this field is loaded. - if attachment_updates and self.obj_attr_is_set('volume_attachment'): - for i, attachment in enumerate(self.volume_attachment): - if attachment.id == attachment_id: - del self.volume_attachment.objects[i] - break - - self.update(volume_updates) - self.obj_reset_changes( - list(volume_updates.keys()) + - ['volume_attachment', 'admin_metadata']) - - def is_replicated(self): - return self.volume_type and self.volume_type.is_replicated() - - -@base.CinderObjectRegistry.register -class VolumeList(base.ObjectListBase, base.CinderObject): - VERSION = '1.1' - - fields = { - 'objects': fields.ListOfObjectsField('Volume'), - } - - @staticmethod - def include_in_cluster(context, cluster, partial_rename=True, **filters): - """Include all volumes matching the filters into a cluster. - - When partial_rename is set we will not set the cluster_name with - cluster parameter value directly, we'll replace provided cluster_name - or host filter value with cluster instead. - - This is useful when we want to replace just the cluster name but leave - the backend and pool information as it is. If we are using - cluster_name to filter, we'll use that same DB field to replace the - cluster value and leave the rest as it is. Likewise if we use the host - to filter. - - Returns the number of volumes that have been changed. - """ - return db.volume_include_in_cluster(context, cluster, partial_rename, - **filters) - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - expected_attrs = ['metadata', 'volume_type'] - if context.is_admin: - expected_attrs.append('admin_metadata') - - return expected_attrs - - @classmethod - def get_all(cls, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, offset=None): - volumes = db.volume_get_all(context, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, - filters=filters, offset=offset) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) - - @classmethod - def get_all_by_host(cls, context, host, filters=None): - volumes = db.volume_get_all_by_host(context, host, filters) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) - - @classmethod - def get_all_by_group(cls, context, group_id, filters=None): - # Consistency group - volumes = db.volume_get_all_by_group(context, group_id, filters) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) - - @classmethod - def get_all_by_generic_group(cls, context, group_id, filters=None): - # Generic volume group - volumes = db.volume_get_all_by_generic_group(context, group_id, - filters) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) - - @classmethod - def get_all_by_project(cls, context, project_id, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None, - offset=None): - volumes = db.volume_get_all_by_project(context, project_id, marker, - limit, sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, offset=offset) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) - - @classmethod - def get_volume_summary(cls, context, project_only): - volumes = db.get_volume_summary(context, project_only) - return volumes - - @classmethod - def get_all_active_by_window(cls, context, begin, end): - volumes = db.volume_get_all_active_by_window(context, begin, end) - expected_attrs = cls._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), objects.Volume, - volumes, expected_attrs=expected_attrs) diff --git a/cinder/objects/volume_attachment.py b/cinder/objects/volume_attachment.py deleted file mode 100644 index 42916226e..000000000 --- a/cinder/objects/volume_attachment.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_utils import versionutils -from oslo_versionedobjects import fields - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.objects import fields as c_fields - - -@base.CinderObjectRegistry.register -class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, - base.CinderComparableObject): - # Version 1.0: Initial version - # Version 1.1: Added volume relationship - # Version 1.2: Added connection_info attribute - VERSION = '1.2' - - OPTIONAL_FIELDS = ['volume'] - obj_extra_fields = ['project_id', 'volume_host'] - - fields = { - 'id': fields.UUIDField(), - 'volume_id': fields.UUIDField(), - 'instance_uuid': fields.UUIDField(nullable=True), - 'attached_host': fields.StringField(nullable=True), - 'mountpoint': fields.StringField(nullable=True), - - 'attach_time': fields.DateTimeField(nullable=True), - 'detach_time': fields.DateTimeField(nullable=True), - - 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), - 'attach_mode': fields.StringField(nullable=True), - - 'volume': fields.ObjectField('Volume', nullable=False), - 'connection_info': c_fields.DictOfNullableField(nullable=True) - } - - @property - def project_id(self): - return self.volume.project_id - - @property - def volume_host(self): - return self.volume.host - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - return ['volume'] - - def obj_make_compatible(self, primitive, target_version): - """Make a object representation compatible with target version.""" - super(VolumeAttachment, self).obj_make_compatible(primitive, - target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 2): - primitive.pop('connection_info', None) - - @classmethod - def _from_db_object(cls, context, attachment, db_attachment, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = cls._get_expected_attrs(context) - - for name, field in attachment.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_attachment.get(name) - if isinstance(field, fields.IntegerField): - value = value or 0 - if name == 'connection_info': - attachment.connection_info = jsonutils.loads( - value) if value else None - else: - attachment[name] = value - if 'volume' in expected_attrs: - db_volume = db_attachment.get('volume') - if db_volume: - attachment.volume = objects.Volume._from_db_object( - context, objects.Volume(), db_volume) - - attachment._context = context - attachment.obj_reset_changes() - return attachment - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'volume': - volume = objects.Volume.get_by_id(self._context, self.id) - self.volume = volume - - self.obj_reset_changes(fields=[attrname]) - - @staticmethod - def _convert_connection_info_to_db_format(updates): - properties = updates.pop('connection_info', None) - if properties is not None: - updates['connection_info'] = jsonutils.dumps(properties) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - if 'connection_info' in updates: - self._convert_connection_info_to_db_format(updates) - if 'volume' in updates: - raise exception.ObjectActionError(action='save', - reason=_('volume changed')) - - db.volume_attachment_update(self._context, self.id, updates) - self.obj_reset_changes() - - def finish_attach(self, instance_uuid, host_name, - mount_point, attach_mode='rw'): - with self.obj_as_admin(): - db_volume, updated_values = db.volume_attached( - self._context, self.id, - instance_uuid, host_name, - mount_point, attach_mode) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - return objects.Volume._from_db_object(self._context, - objects.Volume(), - db_volume) - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - updates = self.cinder_obj_get_changes() - with self.obj_as_admin(): - db_attachment = db.volume_attach(self._context, updates) - self._from_db_object(self._context, self, db_attachment) - - def destroy(self): - updated_values = db.attachment_destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - -@base.CinderObjectRegistry.register -class VolumeAttachmentList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - # Version 1.1: Remove volume_id in get_by_host|instance - VERSION = '1.1' - - fields = { - 'objects': fields.ListOfObjectsField('VolumeAttachment'), - } - - @classmethod - def get_all_by_volume_id(cls, context, volume_id): - attachments = db.volume_attachment_get_all_by_volume_id(context, - volume_id) - return base.obj_make_list(context, - cls(context), - objects.VolumeAttachment, - attachments) - - @classmethod - def get_all_by_host(cls, context, host, search_opts=None): - attachments = db.volume_attachment_get_all_by_host(context, - host, - search_opts) - return base.obj_make_list(context, cls(context), - objects.VolumeAttachment, attachments) - - @classmethod - def get_all_by_instance_uuid(cls, context, - instance_uuid, search_opts=None): - attachments = db.volume_attachment_get_all_by_instance_uuid( - context, instance_uuid, search_opts) - return base.obj_make_list(context, cls(context), - objects.VolumeAttachment, attachments) - - @classmethod - def get_all(cls, context, search_opts=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_direction=None): - attachments = db.volume_attachment_get_all( - context, search_opts, marker, limit, offset, sort_keys, - sort_direction) - return base.obj_make_list(context, cls(context), - objects.VolumeAttachment, attachments) - - @classmethod - def get_all_by_project(cls, context, project_id, search_opts=None, - marker=None, limit=None, offset=None, - sort_keys=None, sort_direction=None): - attachments = db.volume_attachment_get_all_by_project( - context, project_id, search_opts, marker, limit, offset, sort_keys, - sort_direction) - return base.obj_make_list(context, cls(context), - objects.VolumeAttachment, attachments) diff --git a/cinder/objects/volume_type.py b/cinder/objects/volume_type.py deleted file mode 100644 index 73d366a08..000000000 --- a/cinder/objects/volume_type.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils -from oslo_versionedobjects import fields -import six - -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder.volume import utils -from cinder.volume import volume_types - - -@base.CinderObjectRegistry.register -class VolumeType(base.CinderPersistentObject, base.CinderObject, - base.CinderObjectDictCompat, base.CinderComparableObject): - # Version 1.0: Initial version - # Version 1.1: Changed extra_specs to DictOfNullableStringsField - # Version 1.2: Added qos_specs - # Version 1.3: Add qos_specs_id - VERSION = '1.3' - - OPTIONAL_FIELDS = ('extra_specs', 'projects', 'qos_specs') - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(nullable=True), - 'description': fields.StringField(nullable=True), - 'is_public': fields.BooleanField(default=True, nullable=True), - 'projects': fields.ListOfStringsField(nullable=True), - 'extra_specs': fields.DictOfNullableStringsField(nullable=True), - 'qos_specs_id': fields.UUIDField(nullable=True), - 'qos_specs': fields.ObjectField('QualityOfServiceSpecs', - nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - super(VolumeType, self).obj_make_compatible(primitive, target_version) - - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if primitive.get('extra_specs'): - # Before 1.1 extra_specs field didn't allowed None values. To - # make sure we won't explode on receiver side - change Nones to - # empty string. - for k, v in primitive['extra_specs'].items(): - if v is None: - primitive['extra_specs'][k] = '' - if target_version < (1, 3): - primitive.pop('qos_specs_id', None) - - @classmethod - def _get_expected_attrs(cls, context, *args, **kwargs): - return 'extra_specs', 'projects' - - @classmethod - def _from_db_object(cls, context, type, db_type, expected_attrs=None): - if expected_attrs is None: - expected_attrs = ['extra_specs', 'projects'] - for name, field in type.fields.items(): - if name in cls.OPTIONAL_FIELDS: - continue - value = db_type[name] - if isinstance(field, fields.IntegerField): - value = value or 0 - type[name] = value - - # Get data from db_type object that was queried by joined query - # from DB - if 'extra_specs' in expected_attrs: - type.extra_specs = {} - specs = db_type.get('extra_specs') - if specs and isinstance(specs, list): - type.extra_specs = {item['key']: item['value'] - for item in specs} - elif specs and isinstance(specs, dict): - type.extra_specs = specs - if 'projects' in expected_attrs: - # NOTE(geguileo): Until projects stops being a polymorphic value we - # have to do a conversion here for VolumeTypeProjects ORM instance - # lists. - projects = db_type.get('projects', []) - if projects and not isinstance(projects[0], six.string_types): - projects = [p.project_id for p in projects] - type.projects = projects - if 'qos_specs' in expected_attrs: - qos_specs = objects.QualityOfServiceSpecs(context) - qos_specs._from_db_object(context, qos_specs, db_type['qos_specs']) - type.qos_specs = qos_specs - type._context = context - type.obj_reset_changes() - return type - - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason=_('already created')) - db_volume_type = volume_types.create(self._context, self.name, - self.extra_specs, - self.is_public, self.projects, - self.description) - self._from_db_object(self._context, self, db_volume_type) - - def save(self): - updates = self.cinder_obj_get_changes() - if updates: - volume_types.update(self._context, self.id, self.name, - self.description) - self.obj_reset_changes() - - def destroy(self): - with self.obj_as_admin(): - updated_values = volume_types.destroy(self._context, self.id) - self.update(updated_values) - self.obj_reset_changes(updated_values.keys()) - - def obj_load_attr(self, attrname): - if attrname not in self.OPTIONAL_FIELDS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason=_('attribute %s not lazy-loadable') % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - if attrname == 'extra_specs': - self.extra_specs = db.volume_type_extra_specs_get(self._context, - self.id) - - elif attrname == 'qos_specs': - if self.qos_specs_id: - self.qos_specs = objects.QualityOfServiceSpecs.get_by_id( - self._context, self.qos_specs_id) - else: - self.qos_specs = None - - elif attrname == 'projects': - volume_type_projects = db.volume_type_access_get_all(self._context, - self.id) - self.projects = [x.project_id for x in volume_type_projects] - - self.obj_reset_changes(fields=[attrname]) - - @classmethod - def get_by_name_or_id(cls, context, identity): - orm_obj = volume_types.get_by_name_or_id(context, identity) - expected_attrs = cls._get_expected_attrs(context) - return cls._from_db_object(context, cls(context), - orm_obj, expected_attrs=expected_attrs) - - def is_replicated(self): - return utils.is_replicated_spec(self.extra_specs) - - -@base.CinderObjectRegistry.register -class VolumeTypeList(base.ObjectListBase, base.CinderObject): - # Version 1.0: Initial version - # Version 1.1: Add pagination support to volume type - VERSION = '1.1' - - fields = { - 'objects': fields.ListOfObjectsField('VolumeType'), - } - - @classmethod - def get_all(cls, context, inactive=0, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, offset=None): - types = volume_types.get_all_types(context, inactive, filters, - marker=marker, limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, offset=offset) - expected_attrs = VolumeType._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), - objects.VolumeType, types.values(), - expected_attrs=expected_attrs) - - @classmethod - def get_all_types_for_qos(cls, context, qos_id): - types = db.qos_specs_associations_get(context, qos_id) - return base.obj_make_list(context, cls(context), objects.VolumeType, - types) - - @classmethod - def get_all_by_group(cls, context, group_id): - # Generic volume group - types = volume_types.get_all_types_by_group( - context.elevated(), group_id) - expected_attrs = VolumeType._get_expected_attrs(context) - return base.obj_make_list(context, cls(context), - objects.VolumeType, types, - expected_attrs=expected_attrs) diff --git a/cinder/opts.py b/cinder/opts.py deleted file mode 100644 index 36c59316e..000000000 --- a/cinder/opts.py +++ /dev/null @@ -1,408 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -################################################################### -# WARNING! -# -# Do not edit this file directly. This file should be generated by -# running the command "tox -e genopts" any time a config option -# has been added, changed, or removed. -################################################################### - -import itertools - -from cinder import objects -objects.register_all() - -from cinder.api import common as cinder_api_common -from cinder.api.contrib import types_extra_specs as \ - cinder_api_contrib_typesextraspecs -from cinder.api.middleware import auth as cinder_api_middleware_auth -from cinder.api.views import versions as cinder_api_views_versions -from cinder.backup import api as cinder_backup_api -from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver -from cinder.backup import driver as cinder_backup_driver -from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph -from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs -from cinder.backup.drivers import google as cinder_backup_drivers_google -from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs -from cinder.backup.drivers import posix as cinder_backup_drivers_posix -from cinder.backup.drivers import swift as cinder_backup_drivers_swift -from cinder.backup.drivers import tsm as cinder_backup_drivers_tsm -from cinder.backup import manager as cinder_backup_manager -from cinder.cmd import volume as cinder_cmd_volume -from cinder.common import config as cinder_common_config -import cinder.compute -from cinder.compute import nova as cinder_compute_nova -from cinder import context as cinder_context -from cinder import coordination as cinder_coordination -from cinder.db import api as cinder_db_api -from cinder.db import base as cinder_db_base -from cinder import exception as cinder_exception -from cinder.image import glance as cinder_image_glance -from cinder.image import image_utils as cinder_image_imageutils -from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr -from cinder.message import api as cinder_message_api -from cinder import quota as cinder_quota -from cinder.scheduler import driver as cinder_scheduler_driver -from cinder.scheduler import host_manager as cinder_scheduler_hostmanager -from cinder.scheduler import manager as cinder_scheduler_manager -from cinder.scheduler import scheduler_options as \ - cinder_scheduler_scheduleroptions -from cinder.scheduler.weights import capacity as \ - cinder_scheduler_weights_capacity -from cinder.scheduler.weights import volume_number as \ - cinder_scheduler_weights_volumenumber -from cinder import service as cinder_service -from cinder import ssh_utils as cinder_sshutils -from cinder.transfer import api as cinder_transfer_api -from cinder.volume import api as cinder_volume_api -from cinder.volume import driver as cinder_volume_driver -from cinder.volume.drivers import block_device as \ - cinder_volume_drivers_blockdevice -from cinder.volume.drivers import blockbridge as \ - cinder_volume_drivers_blockbridge -from cinder.volume.drivers import coho as cinder_volume_drivers_coho -from cinder.volume.drivers.coprhd import common as \ - cinder_volume_drivers_coprhd_common -from cinder.volume.drivers.coprhd import scaleio as \ - cinder_volume_drivers_coprhd_scaleio -from cinder.volume.drivers.datera import datera_iscsi as \ - cinder_volume_drivers_datera_dateraiscsi -from cinder.volume.drivers.dell_emc import ps as \ - cinder_volume_drivers_dell_emc_ps -from cinder.volume.drivers.dell_emc.sc import storagecenter_common as \ - cinder_volume_drivers_dell_emc_sc_storagecentercommon -from cinder.volume.drivers.dell_emc.scaleio import driver as \ - cinder_volume_drivers_dell_emc_scaleio_driver -from cinder.volume.drivers.dell_emc.unity import driver as \ - cinder_volume_drivers_dell_emc_unity_driver -from cinder.volume.drivers.dell_emc.vmax import common as \ - cinder_volume_drivers_dell_emc_vmax_common -from cinder.volume.drivers.dell_emc.vnx import common as \ - cinder_volume_drivers_dell_emc_vnx_common -from cinder.volume.drivers.dell_emc import xtremio as \ - cinder_volume_drivers_dell_emc_xtremio -from cinder.volume.drivers.disco import disco as \ - cinder_volume_drivers_disco_disco -from cinder.volume.drivers import drbdmanagedrv as \ - cinder_volume_drivers_drbdmanagedrv -from cinder.volume.drivers.falconstor import fss_common as \ - cinder_volume_drivers_falconstor_fsscommon -from cinder.volume.drivers.fujitsu import eternus_dx_common as \ - cinder_volume_drivers_fujitsu_eternusdxcommon -from cinder.volume.drivers.fusionstorage import dsware as \ - cinder_volume_drivers_fusionstorage_dsware -from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst -from cinder.volume.drivers.hitachi import hbsd_common as \ - cinder_volume_drivers_hitachi_hbsdcommon -from cinder.volume.drivers.hitachi import hbsd_fc as \ - cinder_volume_drivers_hitachi_hbsdfc -from cinder.volume.drivers.hitachi import hbsd_horcm as \ - cinder_volume_drivers_hitachi_hbsdhorcm -from cinder.volume.drivers.hitachi import hbsd_iscsi as \ - cinder_volume_drivers_hitachi_hbsdiscsi -from cinder.volume.drivers.hitachi import hnas_nfs as \ - cinder_volume_drivers_hitachi_hnasnfs -from cinder.volume.drivers.hitachi import hnas_utils as \ - cinder_volume_drivers_hitachi_hnasutils -from cinder.volume.drivers.hitachi import vsp_common as \ - cinder_volume_drivers_hitachi_vspcommon -from cinder.volume.drivers.hitachi import vsp_fc as \ - cinder_volume_drivers_hitachi_vspfc -from cinder.volume.drivers.hitachi import vsp_horcm as \ - cinder_volume_drivers_hitachi_vsphorcm -from cinder.volume.drivers.hitachi import vsp_iscsi as \ - cinder_volume_drivers_hitachi_vspiscsi -from cinder.volume.drivers.hpe import hpe_3par_common as \ - cinder_volume_drivers_hpe_hpe3parcommon -from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ - cinder_volume_drivers_hpe_hpelefthandiscsi -from cinder.volume.drivers.huawei import huawei_driver as \ - cinder_volume_drivers_huawei_huaweidriver -from cinder.volume.drivers.ibm import flashsystem_common as \ - cinder_volume_drivers_ibm_flashsystemcommon -from cinder.volume.drivers.ibm import flashsystem_fc as \ - cinder_volume_drivers_ibm_flashsystemfc -from cinder.volume.drivers.ibm import flashsystem_iscsi as \ - cinder_volume_drivers_ibm_flashsystemiscsi -from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs -from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as \ - cinder_volume_drivers_ibm_ibm_storage_ds8kproxy -from cinder.volume.drivers.ibm.ibm_storage import ibm_storage as \ - cinder_volume_drivers_ibm_ibm_storage_ibmstorage -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \ - cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ - cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ - cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi -from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat -from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \ - cinder_volume_drivers_infortrend_raidcmd_cli_commoncli -from cinder.volume.drivers.kaminario import kaminario_common as \ - cinder_volume_drivers_kaminario_kaminariocommon -from cinder.volume.drivers.lenovo import lenovo_common as \ - cinder_volume_drivers_lenovo_lenovocommon -from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm -from cinder.volume.drivers.netapp import options as \ - cinder_volume_drivers_netapp_options -from cinder.volume.drivers.nexenta import options as \ - cinder_volume_drivers_nexenta_options -from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs -from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble -from cinder.volume.drivers.prophetstor import options as \ - cinder_volume_drivers_prophetstor_options -from cinder.volume.drivers import pure as cinder_volume_drivers_pure -from cinder.volume.drivers import qnap as cinder_volume_drivers_qnap -from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte -from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd -from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs -from cinder.volume.drivers.san.hp import hpmsa_common as \ - cinder_volume_drivers_san_hp_hpmsacommon -from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san -from cinder.volume.drivers import sheepdog as cinder_volume_drivers_sheepdog -from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire -from cinder.volume.drivers.synology import synology_common as \ - cinder_volume_drivers_synology_synologycommon -from cinder.volume.drivers import tegile as cinder_volume_drivers_tegile -from cinder.volume.drivers import tintri as cinder_volume_drivers_tintri -from cinder.volume.drivers.violin import v7000_common as \ - cinder_volume_drivers_violin_v7000common -from cinder.volume.drivers.vmware import vmdk as \ - cinder_volume_drivers_vmware_vmdk -from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage -from cinder.volume.drivers.windows import smbfs as \ - cinder_volume_drivers_windows_smbfs -from cinder.volume.drivers.windows import windows as \ - cinder_volume_drivers_windows_windows -from cinder.volume.drivers import xio as cinder_volume_drivers_xio -from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara -from cinder.volume.drivers.zfssa import zfssaiscsi as \ - cinder_volume_drivers_zfssa_zfssaiscsi -from cinder.volume.drivers.zfssa import zfssanfs as \ - cinder_volume_drivers_zfssa_zfssanfs -from cinder.volume.drivers.zte import zte_ks as cinder_volume_drivers_zte_zteks -from cinder.volume import manager as cinder_volume_manager -from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver -from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \ - cinder_zonemanager_drivers_brocade_brcdfabricopts -from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \ - cinder_zonemanager_drivers_brocade_brcdfczonedriver -from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \ - cinder_zonemanager_drivers_cisco_ciscofabricopts -from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \ - cinder_zonemanager_drivers_cisco_ciscofczonedriver -from cinder.zonemanager import fc_zone_manager as \ - cinder_zonemanager_fczonemanager - - -def list_opts(): - return [ - ('BACKEND', - itertools.chain( - [cinder_cmd_volume.host_opt], - )), - ('BRCD_FABRIC_EXAMPLE', - itertools.chain( - cinder_zonemanager_drivers_brocade_brcdfabricopts. - brcd_zone_opts, - )), - ('CISCO_FABRIC_EXAMPLE', - itertools.chain( - cinder_zonemanager_drivers_cisco_ciscofabricopts. - cisco_zone_opts, - )), - ('COORDINATION', - itertools.chain( - cinder_coordination.coordination_opts, - )), - ('DEFAULT', - itertools.chain( - cinder_api_common.api_common_opts, - cinder_api_contrib_typesextraspecs.extraspec_opts, - [cinder_api_middleware_auth.use_forwarded_for_opt], - cinder_api_views_versions.versions_opts, - cinder_backup_api.backup_api_opts, - cinder_backup_chunkeddriver.chunkedbackup_service_opts, - cinder_backup_driver.service_opts, - cinder_backup_drivers_ceph.service_opts, - cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, - cinder_backup_drivers_google.gcsbackup_service_opts, - cinder_backup_drivers_nfs.nfsbackup_service_opts, - cinder_backup_drivers_posix.posixbackup_service_opts, - cinder_backup_drivers_swift.swiftbackup_service_opts, - cinder_backup_drivers_tsm.tsm_opts, - cinder_backup_manager.backup_manager_opts, - [cinder_cmd_volume.cluster_opt], - cinder_common_config.core_opts, - cinder_common_config.global_opts, - cinder.compute.compute_opts, - cinder_compute_nova.old_opts, - cinder_context.context_opts, - cinder_db_api.db_opts, - [cinder_db_base.db_driver_opt], - cinder_exception.exc_log_opts, - cinder_image_glance.glance_opts, - cinder_image_glance.glance_core_properties_opts, - cinder_image_imageutils.image_helper_opts, - cinder_message_api.messages_opts, - cinder_quota.quota_opts, - cinder_scheduler_driver.scheduler_driver_opts, - cinder_scheduler_hostmanager.host_manager_opts, - [cinder_scheduler_manager.scheduler_driver_opt], - [cinder_scheduler_scheduleroptions. - scheduler_json_config_location_opt], - cinder_scheduler_weights_capacity.capacity_weight_opts, - cinder_scheduler_weights_volumenumber. - volume_number_weight_opts, - cinder_service.service_opts, - cinder_sshutils.ssh_opts, - cinder_transfer_api.volume_transfer_opts, - [cinder_volume_api.allow_force_upload_opt], - [cinder_volume_api.volume_host_opt], - [cinder_volume_api.volume_same_az_opt], - [cinder_volume_api.az_cache_time_opt], - cinder_volume_driver.volume_opts, - cinder_volume_driver.iser_opts, - cinder_volume_manager.volume_manager_opts, - cinder_wsgi_eventletserver.socket_opts, - )), - ('FC-ZONE-MANAGER', - itertools.chain( - cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, - cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, - cinder_zonemanager_fczonemanager.zone_manager_opts, - )), - ('KEY_MANAGER', - itertools.chain( - cinder_keymgr_confkeymgr.key_mgr_opts, - )), - ('NOVA_GROUP', - itertools.chain( - cinder_compute_nova.nova_opts, - cinder_compute_nova.nova_session_opts, - cinder_compute_nova.nova_auth_opts, - )), - ('backend_defaults', - itertools.chain( - cinder_volume_driver.volume_opts, - cinder_volume_driver.iser_opts, - cinder_volume_drivers_blockdevice.volume_opts, - cinder_volume_drivers_blockbridge.blockbridge_opts, - cinder_volume_drivers_coho.coho_opts, - cinder_volume_drivers_coprhd_common.volume_opts, - cinder_volume_drivers_coprhd_scaleio.scaleio_opts, - cinder_volume_drivers_datera_dateraiscsi.d_opts, - cinder_volume_drivers_dell_emc_ps.eqlx_opts, - cinder_volume_drivers_dell_emc_sc_storagecentercommon. - common_opts, - cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts, - cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS, - cinder_volume_drivers_dell_emc_vmax_common.vmax_opts, - cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS, - cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS, - cinder_volume_drivers_disco_disco.disco_opts, - cinder_volume_drivers_drbdmanagedrv.drbd_opts, - cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS, - cinder_volume_drivers_fujitsu_eternusdxcommon. - FJ_ETERNUS_DX_OPT_opts, - cinder_volume_drivers_fusionstorage_dsware.volume_opts, - cinder_volume_drivers_hgst.hgst_opts, - cinder_volume_drivers_hitachi_hbsdcommon.volume_opts, - cinder_volume_drivers_hitachi_hbsdfc.volume_opts, - cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts, - cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts, - cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS, - cinder_volume_drivers_hitachi_hnasutils.drivers_common_opts, - cinder_volume_drivers_hitachi_vspcommon.common_opts, - cinder_volume_drivers_hitachi_vspfc.fc_opts, - cinder_volume_drivers_hitachi_vsphorcm.horcm_opts, - cinder_volume_drivers_hitachi_vspiscsi.iscsi_opts, - cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, - cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, - cinder_volume_drivers_huawei_huaweidriver.huawei_opts, - cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, - cinder_volume_drivers_ibm_flashsystemfc.flashsystem_fc_opts, - cinder_volume_drivers_ibm_flashsystemiscsi. - flashsystem_iscsi_opts, - cinder_volume_drivers_ibm_gpfs.gpfs_opts, - cinder_volume_drivers_ibm_gpfs.gpfs_remote_ssh_opts, - cinder_volume_drivers_ibm_ibm_storage_ds8kproxy.ds8k_opts, - cinder_volume_drivers_ibm_ibm_storage_ibmstorage.driver_opts, - cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon. - storwize_svc_opts, - cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. - storwize_svc_fc_opts, - cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. - storwize_svc_iscsi_opts, - cinder_volume_drivers_infinidat.infinidat_opts, - cinder_volume_drivers_infortrend_raidcmd_cli_commoncli. - infortrend_esds_opts, - cinder_volume_drivers_infortrend_raidcmd_cli_commoncli. - infortrend_esds_extra_opts, - cinder_volume_drivers_kaminario_kaminariocommon. - kaminario_opts, - cinder_volume_drivers_lenovo_lenovocommon.common_opts, - cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, - cinder_volume_drivers_lvm.volume_opts, - cinder_volume_drivers_netapp_options.netapp_proxy_opts, - cinder_volume_drivers_netapp_options.netapp_connection_opts, - cinder_volume_drivers_netapp_options.netapp_transport_opts, - cinder_volume_drivers_netapp_options.netapp_basicauth_opts, - cinder_volume_drivers_netapp_options.netapp_cluster_opts, - cinder_volume_drivers_netapp_options.netapp_7mode_opts, - cinder_volume_drivers_netapp_options.netapp_provisioning_opts, - cinder_volume_drivers_netapp_options.netapp_img_cache_opts, - cinder_volume_drivers_netapp_options.netapp_eseries_opts, - cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts, - cinder_volume_drivers_netapp_options.netapp_san_opts, - cinder_volume_drivers_netapp_options.netapp_replication_opts, - cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS, - cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS, - cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS, - cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS, - cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS, - cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS, - cinder_volume_drivers_nfs.nfs_opts, - cinder_volume_drivers_nimble.nimble_opts, - cinder_volume_drivers_prophetstor_options.DPL_OPTS, - cinder_volume_drivers_pure.PURE_OPTS, - cinder_volume_drivers_qnap.qnap_opts, - cinder_volume_drivers_quobyte.volume_opts, - cinder_volume_drivers_rbd.RBD_OPTS, - cinder_volume_drivers_remotefs.nas_opts, - cinder_volume_drivers_remotefs.volume_opts, - cinder_volume_drivers_san_hp_hpmsacommon.common_opts, - cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts, - cinder_volume_drivers_san_san.san_opts, - cinder_volume_drivers_sheepdog.sheepdog_opts, - cinder_volume_drivers_solidfire.sf_opts, - cinder_volume_drivers_synology_synologycommon.cinder_opts, - cinder_volume_drivers_tegile.tegile_opts, - cinder_volume_drivers_tintri.tintri_opts, - cinder_volume_drivers_violin_v7000common.violin_opts, - cinder_volume_drivers_vmware_vmdk.vmdk_opts, - cinder_volume_drivers_vzstorage.vzstorage_opts, - cinder_volume_drivers_windows_smbfs.volume_opts, - cinder_volume_drivers_windows_windows.windows_opts, - cinder_volume_drivers_xio.XIO_OPTS, - cinder_volume_drivers_zadara.zadara_opts, - cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, - cinder_volume_drivers_zfssa_zfssanfs.ZFSSA_OPTS, - cinder_volume_drivers_zte_zteks.zte_opts, - cinder_volume_manager.volume_backend_opts, - )), - ] diff --git a/cinder/policy.py b/cinder/policy.py deleted file mode 100644 index 9a94d075c..000000000 --- a/cinder/policy.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Cinder""" - - -from oslo_config import cfg -from oslo_policy import opts as policy_opts -from oslo_policy import policy - -from cinder import exception - -CONF = cfg.CONF -policy_opts.set_defaults(cfg.CONF, 'policy.json') - -_ENFORCER = None - - -def init(): - global _ENFORCER - if not _ENFORCER: - _ENFORCER = policy.Enforcer(CONF) - - -def enforce_action(context, action): - """Checks that the action can be done by the given context. - - Applies a check to ensure the context's project_id and user_id can be - applied to the given action using the policy enforcement api. - """ - - return enforce(context, action, {'project_id': context.project_id, - 'user_id': context.user_id}) - - -def enforce(context, action, target): - """Verifies that the action is valid on the target in this context. - - :param context: cinder context - :param action: string representing the action to be checked - this should be colon separated for clarity. - i.e. ``compute:create_instance``, - ``compute:attach_volume``, - ``volume:attach_volume`` - - :param object: dictionary representing the object of the action - for object creation this should be a dictionary representing the - location of the object e.g. ``{'project_id': context.project_id}`` - - :raises PolicyNotAuthorized: if verification fails. - - """ - init() - - return _ENFORCER.enforce(action, - target, - context.to_policy_values(), - do_raise=True, - exc=exception.PolicyNotAuthorized, - action=action) - - -def check_is_admin(roles, context=None): - """Whether or not user is admin according to policy setting. - - """ - init() - - # include project_id on target to avoid KeyError if context_is_admin - # policy definition is missing, and default admin_or_owner rule - # attempts to apply. - target = {'project_id': ''} - if context is None: - credentials = {'roles': roles} - else: - credentials = context.to_dict() - - return _ENFORCER.enforce('context_is_admin', target, credentials) diff --git a/cinder/quota.py b/cinder/quota.py deleted file mode 100644 index 4ef6d86b5..000000000 --- a/cinder/quota.py +++ /dev/null @@ -1,1236 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Quotas for volumes.""" - -from collections import deque -import datetime - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import importutils -from oslo_utils import timeutils -import six - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import quota_utils - - -LOG = logging.getLogger(__name__) - -quota_opts = [ - cfg.IntOpt('quota_volumes', - default=10, - help='Number of volumes allowed per project'), - cfg.IntOpt('quota_snapshots', - default=10, - help='Number of volume snapshots allowed per project'), - cfg.IntOpt('quota_consistencygroups', - default=10, - help='Number of consistencygroups allowed per project'), - cfg.IntOpt('quota_groups', - default=10, - help='Number of groups allowed per project'), - cfg.IntOpt('quota_gigabytes', - default=1000, - help='Total amount of storage, in gigabytes, allowed ' - 'for volumes and snapshots per project'), - cfg.IntOpt('quota_backups', - default=10, - help='Number of volume backups allowed per project'), - cfg.IntOpt('quota_backup_gigabytes', - default=1000, - help='Total amount of storage, in gigabytes, allowed ' - 'for backups per project'), - cfg.IntOpt('reservation_expire', - default=86400, - help='Number of seconds until a reservation expires'), - cfg.IntOpt('reservation_clean_interval', - default='$reservation_expire', - help='Interval between periodic task runs to clean expired ' - 'reservations in seconds.'), - cfg.IntOpt('until_refresh', - default=0, - help='Count of reservations until usage is refreshed'), - cfg.IntOpt('max_age', - default=0, - help='Number of seconds between subsequent usage refreshes'), - cfg.StrOpt('quota_driver', - default="cinder.quota.DbQuotaDriver", - help='Default driver to use for quota checks'), - cfg.BoolOpt('use_default_quota_class', - default=True, - help='Enables or disables use of default quota class ' - 'with default quota.'), - cfg.IntOpt('per_volume_size_limit', - default=-1, - help='Max size allowed per volume, in gigabytes'), ] - -CONF = cfg.CONF -CONF.register_opts(quota_opts) - - -class DbQuotaDriver(object): - - """Driver to perform check to enforcement of quotas. - - Also allows to obtain quota information. - The default driver utilizes the local database. - """ - - def get_by_project(self, context, project_id, resource_name): - """Get a specific quota by project.""" - - return db.quota_get(context, project_id, resource_name) - - def get_by_class(self, context, quota_class, resource_name): - """Get a specific quota by quota class.""" - - return db.quota_class_get(context, quota_class, resource_name) - - def get_default(self, context, resource, project_id): - """Get a specific default quota for a resource.""" - default_quotas = db.quota_class_get_defaults(context) - return default_quotas.get(resource.name, resource.default) - - def get_defaults(self, context, resources, project_id=None): - """Given a list of resources, retrieve the default quotas. - - Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, - if it exists. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param project_id: The id of the current project - """ - - quotas = {} - default_quotas = {} - if CONF.use_default_quota_class: - default_quotas = db.quota_class_get_defaults(context) - - for resource in resources.values(): - if default_quotas: - if resource.name not in default_quotas: - versionutils.report_deprecated_feature(LOG, _( - "Default quota for resource: %(res)s is set " - "by the default quota flag: quota_%(res)s, " - "it is now deprecated. Please use the " - "default quota class for default " - "quota.") % {'res': resource.name}) - quotas[resource.name] = default_quotas.get(resource.name, - resource.default) - return quotas - - def get_class_quotas(self, context, resources, quota_class, - defaults=True): - """Given list of resources, retrieve the quotas for given quota class. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param quota_class: The name of the quota class to return - quotas for. - :param defaults: If True, the default value will be reported - if there is no specific value for the - resource. - """ - - quotas = {} - default_quotas = {} - class_quotas = db.quota_class_get_all_by_name(context, quota_class) - if defaults: - default_quotas = db.quota_class_get_defaults(context) - for resource in resources.values(): - if resource.name in class_quotas: - quotas[resource.name] = class_quotas[resource.name] - continue - - if defaults: - quotas[resource.name] = default_quotas.get(resource.name, - resource.default) - - return quotas - - def get_project_quotas(self, context, resources, project_id, - quota_class=None, defaults=True, - usages=True): - """Retrieve quotas for a project. - - Given a list of resources, retrieve the quotas for the given - project. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param project_id: The ID of the project to return quotas for. - :param quota_class: If project_id != context.project_id, the - quota class cannot be determined. This - parameter allows it to be specified. It - will be ignored if project_id == - context.project_id. - :param defaults: If True, the quota class value (or the - default value, if there is no value from the - quota class) will be reported if there is no - specific value for the resource. - :param usages: If True, the current in_use, reserved and allocated - counts will also be returned. - """ - - quotas = {} - project_quotas = db.quota_get_all_by_project(context, project_id) - allocated_quotas = None - default_quotas = None - if usages: - project_usages = db.quota_usage_get_all_by_project(context, - project_id) - allocated_quotas = db.quota_allocated_get_all_by_project( - context, project_id) - allocated_quotas.pop('project_id') - - # Get the quotas for the appropriate class. If the project ID - # matches the one in the context, we use the quota_class from - # the context, otherwise, we use the provided quota_class (if - # any) - if project_id == context.project_id: - quota_class = context.quota_class - if quota_class: - class_quotas = db.quota_class_get_all_by_name(context, quota_class) - else: - class_quotas = {} - - for resource in resources.values(): - # Omit default/quota class values - if not defaults and resource.name not in project_quotas: - continue - - quota_val = project_quotas.get(resource.name) - if quota_val is None: - quota_val = class_quotas.get(resource.name) - if quota_val is None: - # Lazy load the default quotas - if default_quotas is None: - default_quotas = self.get_defaults( - context, resources, project_id) - quota_val = default_quotas[resource.name] - - quotas[resource.name] = {'limit': quota_val} - - # Include usages if desired. This is optional because one - # internal consumer of this interface wants to access the - # usages directly from inside a transaction. - if usages: - usage = project_usages.get(resource.name, {}) - quotas[resource.name].update( - in_use=usage.get('in_use', 0), - reserved=usage.get('reserved', 0), ) - if allocated_quotas: - quotas[resource.name].update( - allocated=allocated_quotas.get(resource.name, 0), ) - return quotas - - def _get_quotas(self, context, resources, keys, has_sync, project_id=None): - """A helper method which retrieves the quotas for specific resources. - - This specific resource is identified by keys, and which apply to the - current context. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param keys: A list of the desired quotas to retrieve. - :param has_sync: If True, indicates that the resource must - have a sync attribute; if False, indicates - that the resource must NOT have a sync - attribute. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Filter resources - if has_sync: - sync_filt = lambda x: hasattr(x, 'sync') - else: - sync_filt = lambda x: not hasattr(x, 'sync') - desired = set(keys) - sub_resources = {k: v for k, v in resources.items() - if k in desired and sync_filt(v)} - - # Make sure we accounted for all of them... - if len(keys) != len(sub_resources): - unknown = desired - set(sub_resources.keys()) - raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) - - # Grab and return the quotas (without usages) - quotas = self.get_project_quotas(context, sub_resources, - project_id, - context.quota_class, usages=False) - - return {k: v['limit'] for k, v in quotas.items()} - - def limit_check(self, context, resources, values, project_id=None): - """Check simple quota limits. - - For limits--those quotas for which there is no usage - synchronization function--this method checks that a set of - proposed values are permitted by the limit restriction. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it is not a simple limit - resource. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns - nothing. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param values: A dictionary of the values to check against the - quota. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Ensure no value is less than zero - unders = [key for key, val in values.items() if val < 0] - if unders: - raise exception.InvalidQuotaValue(unders=sorted(unders)) - - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - # Get the applicable quotas - quotas = self._get_quotas(context, resources, values.keys(), - has_sync=False, project_id=project_id) - # Check the quotas and construct a list of the resources that - # would be put over limit by the desired values - overs = [key for key, val in values.items() - if quotas[key] >= 0 and quotas[key] < val] - if overs: - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages={}) - - def reserve(self, context, resources, deltas, expire=None, - project_id=None): - """Check quotas and reserve resources. - - For counting quotas--those quotas for which there is a usage - synchronization function--this method checks quotas against - current usage and the desired deltas. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it does not have a usage - synchronization function. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns a - list of reservation UUIDs which were created. - - :param context: The request context, for access checks. - :param resources: A dictionary of the registered resources. - :param deltas: A dictionary of the proposed delta changes. - :param expire: An optional parameter specifying an expiration - time for the reservations. If it is a simple - number, it is interpreted as a number of - seconds and added to the current time; if it is - a datetime.timedelta object, it will also be - added to the current time. A datetime.datetime - object will be interpreted as the absolute - expiration time. If None is specified, the - default expiration time set by - --default-reservation-expire will be used (this - value will be treated as a number of seconds). - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - # Set up the reservation expiration - if expire is None: - expire = CONF.reservation_expire - if isinstance(expire, six.integer_types): - expire = datetime.timedelta(seconds=expire) - if isinstance(expire, datetime.timedelta): - expire = timeutils.utcnow() + expire - if not isinstance(expire, datetime.datetime): - raise exception.InvalidReservationExpiration(expire=expire) - - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - # Get the applicable quotas. - # NOTE(Vek): We're not worried about races at this point. - # Yes, the admin may be in the process of reducing - # quotas, but that's a pretty rare thing. - quotas = self._get_quotas(context, resources, deltas.keys(), - has_sync=True, project_id=project_id) - return self._reserve(context, resources, quotas, deltas, expire, - project_id) - - def _reserve(self, context, resources, quotas, deltas, expire, project_id): - # NOTE(Vek): Most of the work here has to be done in the DB - # API, because we have to do it in a transaction, - # which means access to the session. Since the - # session isn't available outside the DBAPI, we - # have to do the work there. - return db.quota_reserve(context, resources, quotas, deltas, expire, - CONF.until_refresh, CONF.max_age, - project_id=project_id) - - def commit(self, context, reservations, project_id=None): - """Commit reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - db.reservation_commit(context, reservations, project_id=project_id) - - def rollback(self, context, reservations, project_id=None): - """Roll back reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - # If project_id is None, then we use the project_id in context - if project_id is None: - project_id = context.project_id - - db.reservation_rollback(context, reservations, project_id=project_id) - - def destroy_by_project(self, context, project_id): - """Destroy all limit quotas associated with a project. - - Leave usage and reservation quotas intact. - - :param context: The request context, for access checks. - :param project_id: The ID of the project being deleted. - """ - db.quota_destroy_by_project(context, project_id) - - def expire(self, context): - """Expire reservations. - - Explores all currently existing reservations and rolls back - any that have expired. - - :param context: The request context, for access checks. - """ - - db.reservation_expire(context) - - -class NestedDbQuotaDriver(DbQuotaDriver): - def validate_nested_setup(self, ctxt, resources, project_tree, - fix_allocated_quotas=False): - """Ensures project_tree has quotas that make sense as nested quotas. - - Validates the following: - * No parent project has child_projects who have more combined quota - than the parent's quota limit - * No child quota has a larger in-use value than it's current limit - (could happen before because child default values weren't enforced) - * All parent projects' "allocated" quotas match the sum of the limits - of its children projects - - TODO(mc_nair): need a better way to "flip the switch" to use nested - quotas to make this less race-ee - """ - self._allocated = {} - project_queue = deque(project_tree.items()) - borked_allocated_quotas = {} - - while project_queue: - # Tuple of (current root node, subtree) - cur_proj_id, project_subtree = project_queue.popleft() - - # If we're on a leaf node, no need to do validation on it, and in - # order to avoid complication trying to get its children, skip it. - if not project_subtree: - continue - - cur_project_quotas = self.get_project_quotas( - ctxt, resources, cur_proj_id) - - # Validate each resource when compared to it's child quotas - for resource in cur_project_quotas.keys(): - parent_quota = cur_project_quotas[resource] - parent_limit = parent_quota['limit'] - parent_usage = (parent_quota['in_use'] + - parent_quota['reserved']) - - cur_parent_allocated = parent_quota.get('allocated', 0) - calc_parent_allocated = self._get_cur_project_allocated( - ctxt, resources[resource], {cur_proj_id: project_subtree}) - - if parent_limit > 0: - parent_free_quota = parent_limit - parent_usage - if parent_free_quota < calc_parent_allocated: - msg = _("Sum of child usage '%(sum)s' is greater " - "than free quota of '%(free)s' for project " - "'%(proj)s' for resource '%(res)s'. Please " - "lower the limit or usage for one or more of " - "the following projects: '%(child_ids)s'") % { - 'sum': calc_parent_allocated, - 'free': parent_free_quota, - 'proj': cur_proj_id, 'res': resource, - 'child_ids': ', '.join(project_subtree.keys()) - } - raise exception.InvalidNestedQuotaSetup(reason=msg) - - # If "allocated" value wasn't right either err or fix DB - if calc_parent_allocated != cur_parent_allocated: - if fix_allocated_quotas: - try: - db.quota_allocated_update(ctxt, cur_proj_id, - resource, - calc_parent_allocated) - except exception.ProjectQuotaNotFound: - # If it was default quota create DB entry for it - db.quota_create( - ctxt, cur_proj_id, resource, - parent_limit, allocated=calc_parent_allocated) - else: - if cur_proj_id not in borked_allocated_quotas: - borked_allocated_quotas[cur_proj_id] = {} - - borked_allocated_quotas[cur_proj_id][resource] = { - 'db_allocated_quota': cur_parent_allocated, - 'expected_allocated_quota': calc_parent_allocated} - - project_queue.extend(project_subtree.items()) - - if borked_allocated_quotas: - msg = _("Invalid allocated quotas defined for the following " - "project quotas: %s") % borked_allocated_quotas - raise exception.InvalidNestedQuotaSetup(message=msg) - - def _get_cur_project_allocated(self, ctxt, resource, project_tree): - """Recursively calculates the allocated value of a project - - :param ctxt: context used to retrieve DB values - :param resource: the resource to calculate allocated value for - :param project_tree: the project tree used to calculate allocated - e.g. {'A': {'B': {'D': None}, 'C': None}} - - A project's "allocated" value depends on: - 1) the quota limits which have been "given" to it's children, in - the case those limits are not unlimited (-1) - 2) the current quota being used by a child plus whatever the child - has given to it's children, in the case of unlimited (-1) limits - - Scenario #2 requires recursively calculating allocated, and in order - to efficiently calculate things we will save off any previously - calculated allocated values. - - NOTE: this currently leaves a race condition when a project's allocated - value has been calculated (with a -1 limit), but then a child project - gets a volume created, thus changing the in-use value and messing up - the child's allocated value. We should look into updating the allocated - values as we're going along and switching to NestedQuotaDriver with - flip of a switch. - """ - # Grab the current node - cur_project_id = list(project_tree)[0] - project_subtree = project_tree[cur_project_id] - res_name = resource.name - - if cur_project_id not in self._allocated: - self._allocated[cur_project_id] = {} - - if res_name not in self._allocated[cur_project_id]: - # Calculate the allocated value for this resource since haven't yet - cur_project_allocated = 0 - child_proj_ids = project_subtree.keys() if project_subtree else {} - res_dict = {res_name: resource} - child_project_quotas = {child_id: self.get_project_quotas( - ctxt, res_dict, child_id) for child_id in child_proj_ids} - - for child_id, child_quota in child_project_quotas.items(): - child_limit = child_quota[res_name]['limit'] - # Non-unlimited quota is easy, anything explicitly given to a - # child project gets added into allocated value - if child_limit != -1: - if child_quota[res_name].get('in_use', 0) > child_limit: - msg = _("Quota limit invalid for project '%(proj)s' " - "for resource '%(res)s': limit of %(limit)d " - "is less than in-use value of %(used)d") % { - 'proj': child_id, 'res': res_name, - 'limit': child_limit, - 'used': child_quota[res_name]['in_use'] - } - raise exception.InvalidNestedQuotaSetup(reason=msg) - - cur_project_allocated += child_limit - # For -1, take any quota being eaten up by child, as well as - # what the child itself has given up to its children - else: - child_in_use = child_quota[res_name].get('in_use', 0) - # Recursively calculate child's allocated - child_alloc = self._get_cur_project_allocated( - ctxt, resource, {child_id: project_subtree[child_id]}) - cur_project_allocated += child_in_use + child_alloc - - self._allocated[cur_project_id][res_name] = cur_project_allocated - - return self._allocated[cur_project_id][res_name] - - def get_default(self, context, resource, project_id): - """Get a specific default quota for a resource.""" - resource = super(NestedDbQuotaDriver, self).get_default( - context, resource, project_id) - - return 0 if quota_utils.get_parent_project_id( - context, project_id) else resource.default - - def get_defaults(self, context, resources, project_id=None): - defaults = super(NestedDbQuotaDriver, self).get_defaults( - context, resources, project_id) - # All defaults are 0 for child project - if quota_utils.get_parent_project_id(context, project_id): - for key in defaults.keys(): - defaults[key] = 0 - return defaults - - def _reserve(self, context, resources, quotas, deltas, expire, project_id): - reserved = [] - # As to not change the exception behavior, flag every res that would - # be over instead of failing on first OverQuota - resources_failed_to_update = [] - failed_usages = {} - for res in deltas.keys(): - try: - reserved += db.quota_reserve( - context, resources, quotas, {res: deltas[res]}, - expire, CONF.until_refresh, CONF.max_age, project_id) - if quotas[res] == -1: - reserved += quota_utils.update_alloc_to_next_hard_limit( - context, resources, deltas, res, expire, project_id) - except exception.OverQuota as e: - resources_failed_to_update.append(res) - failed_usages.update(e.kwargs['usages']) - - if resources_failed_to_update: - db.reservation_rollback(context, reserved, project_id) - # We change OverQuota to OverVolumeLimit in other places and expect - # to find all of the OverQuota kwargs - raise exception.OverQuota(overs=sorted(resources_failed_to_update), - quotas=quotas, usages=failed_usages) - - return reserved - - -class BaseResource(object): - """Describe a single resource for quota checking.""" - - def __init__(self, name, flag=None, parent_project_id=None): - """Initializes a Resource. - - :param name: The name of the resource, i.e., "volumes". - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - :param parent_project_id: The id of the current project's parent, - if any. - """ - - self.name = name - self.flag = flag - self.parent_project_id = parent_project_id - - def quota(self, driver, context, **kwargs): - """Given a driver and context, obtain the quota for this resource. - - :param driver: A quota driver. - :param context: The request context. - :param project_id: The project to obtain the quota value for. - If not provided, it is taken from the - context. If it is given as None, no - project-specific quota will be searched - for. - :param quota_class: The quota class corresponding to the - project, or for which the quota is to be - looked up. If not provided, it is taken - from the context. If it is given as None, - no quota class-specific quota will be - searched for. Note that the quota class - defaults to the value in the context, - which may not correspond to the project if - project_id is not the same as the one in - the context. - """ - - # Get the project ID - project_id = kwargs.get('project_id', context.project_id) - - # Ditto for the quota class - quota_class = kwargs.get('quota_class', context.quota_class) - - # Look up the quota for the project - if project_id: - try: - return driver.get_by_project(context, project_id, self.name) - except exception.ProjectQuotaNotFound: - pass - - # Try for the quota class - if quota_class: - try: - return driver.get_by_class(context, quota_class, self.name) - except exception.QuotaClassNotFound: - pass - - # OK, return the default - return driver.get_default(context, self, - parent_project_id=self.parent_project_id) - - @property - def default(self): - """Return the default value of the quota.""" - - if self.parent_project_id: - return 0 - - return CONF[self.flag] if self.flag else -1 - - -class ReservableResource(BaseResource): - """Describe a reservable resource.""" - - def __init__(self, name, sync, flag=None): - """Initializes a ReservableResource. - - Reservable resources are those resources which directly - correspond to objects in the database, i.e., volumes, gigabytes, - etc. A ReservableResource must be constructed with a usage - synchronization function, which will be called to determine the - current counts of one or more resources. - - The usage synchronization function will be passed three - arguments: an admin context, the project ID, and an opaque - session object, which should in turn be passed to the - underlying database function. Synchronization functions - should return a dictionary mapping resource names to the - current in_use count for those resources; more than one - resource and resource count may be returned. Note that - synchronization functions may be associated with more than one - ReservableResource. - - :param name: The name of the resource, i.e., "volumes". - :param sync: A dbapi methods name which returns a dictionary - to resynchronize the in_use count for one or more - resources, as described above. - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - """ - - super(ReservableResource, self).__init__(name, flag=flag) - if sync: - self.sync = sync - - -class AbsoluteResource(BaseResource): - """Describe a non-reservable resource.""" - - pass - - -class CountableResource(AbsoluteResource): - """Describe a resource where counts aren't based only on the project ID.""" - - def __init__(self, name, count, flag=None): - """Initializes a CountableResource. - - Countable resources are those resources which directly - correspond to objects in the database, i.e., volumes, gigabytes, - etc., but for which a count by project ID is inappropriate. A - CountableResource must be constructed with a counting - function, which will be called to determine the current counts - of the resource. - - The counting function will be passed the context, along with - the extra positional and keyword arguments that are passed to - Quota.count(). It should return an integer specifying the - count. - - Note that this counting is not performed in a transaction-safe - manner. This resource class is a temporary measure to provide - required functionality, until a better approach to solving - this problem can be evolved. - - :param name: The name of the resource, i.e., "volumes". - :param count: A callable which returns the count of the - resource. The arguments passed are as described - above. - :param flag: The name of the flag or configuration option - which specifies the default value of the quota - for this resource. - """ - - super(CountableResource, self).__init__(name, flag=flag) - self.count = count - - -class VolumeTypeResource(ReservableResource): - """ReservableResource for a specific volume type.""" - - def __init__(self, part_name, volume_type): - """Initializes a VolumeTypeResource. - - :param part_name: The kind of resource, i.e., "volumes". - :param volume_type: The volume type for this resource. - """ - - self.volume_type_name = volume_type['name'] - self.volume_type_id = volume_type['id'] - name = "%s_%s" % (part_name, self.volume_type_name) - super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name) - - -class QuotaEngine(object): - """Represent the set of recognized quotas.""" - - def __init__(self, quota_driver_class=None): - """Initialize a Quota object.""" - - self._resources = {} - self._quota_driver_class = quota_driver_class - self._driver_class = None - - @property - def _driver(self): - # Lazy load the driver so we give a chance for the config file to - # be read before grabbing the config for which QuotaDriver to use - if self._driver_class: - return self._driver_class - - if not self._quota_driver_class: - # Grab the current driver class from CONF - self._quota_driver_class = CONF.quota_driver - - if isinstance(self._quota_driver_class, six.string_types): - self._quota_driver_class = importutils.import_object( - self._quota_driver_class) - - self._driver_class = self._quota_driver_class - return self._driver_class - - def using_nested_quotas(self): - """Returns true if nested quotas are being used""" - return isinstance(self._driver, NestedDbQuotaDriver) - - def __contains__(self, resource): - return resource in self.resources - - def register_resource(self, resource): - """Register a resource.""" - - self._resources[resource.name] = resource - - def register_resources(self, resources): - """Register a list of resources.""" - - for resource in resources: - self.register_resource(resource) - - def get_by_project(self, context, project_id, resource_name): - """Get a specific quota by project.""" - return self._driver.get_by_project(context, project_id, resource_name) - - def get_by_project_or_default(self, context, project_id, resource_name): - """Get specific quota by project or default quota if doesn't exists.""" - try: - val = self.get_by_project( - context, project_id, resource_name).hard_limit - except exception.ProjectQuotaNotFound: - val = self.get_defaults(context, project_id)[resource_name] - - return val - - def get_by_class(self, context, quota_class, resource_name): - """Get a specific quota by quota class.""" - - return self._driver.get_by_class(context, quota_class, resource_name) - - def get_default(self, context, resource, parent_project_id=None): - """Get a specific default quota for a resource. - - :param parent_project_id: The id of the current project's parent, - if any. - """ - - return self._driver.get_default(context, resource, - parent_project_id=parent_project_id) - - def get_defaults(self, context, project_id=None): - """Retrieve the default quotas. - - :param context: The request context, for access checks. - :param project_id: The id of the current project - """ - - return self._driver.get_defaults(context, self.resources, - project_id) - - def get_class_quotas(self, context, quota_class, defaults=True): - """Retrieve the quotas for the given quota class. - - :param context: The request context, for access checks. - :param quota_class: The name of the quota class to return - quotas for. - :param defaults: If True, the default value will be reported - if there is no specific value for the - resource. - """ - - return self._driver.get_class_quotas(context, self.resources, - quota_class, defaults=defaults) - - def get_project_quotas(self, context, project_id, quota_class=None, - defaults=True, usages=True): - """Retrieve the quotas for the given project. - - :param context: The request context, for access checks. - :param project_id: The ID of the project to return quotas for. - :param quota_class: If project_id != context.project_id, the - quota class cannot be determined. This - parameter allows it to be specified. - :param defaults: If True, the quota class value (or the - default value, if there is no value from the - quota class) will be reported if there is no - specific value for the resource. - :param usages: If True, the current in_use, reserved and - allocated counts will also be returned. - """ - return self._driver.get_project_quotas(context, self.resources, - project_id, - quota_class=quota_class, - defaults=defaults, - usages=usages) - - def count(self, context, resource, *args, **kwargs): - """Count a resource. - - For countable resources, invokes the count() function and - returns its result. Arguments following the context and - resource are passed directly to the count function declared by - the resource. - - :param context: The request context, for access checks. - :param resource: The name of the resource, as a string. - """ - - # Get the resource - res = self.resources.get(resource) - if not res or not hasattr(res, 'count'): - raise exception.QuotaResourceUnknown(unknown=[resource]) - - return res.count(context, *args, **kwargs) - - def limit_check(self, context, project_id=None, **values): - """Check simple quota limits. - - For limits--those quotas for which there is no usage - synchronization function--this method checks that a set of - proposed values are permitted by the limit restriction. The - values to check are given as keyword arguments, where the key - identifies the specific quota limit to check, and the value is - the proposed value. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it is not a simple limit - resource. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns - nothing. - - :param context: The request context, for access checks. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - return self._driver.limit_check(context, self.resources, values, - project_id=project_id) - - def reserve(self, context, expire=None, project_id=None, **deltas): - """Check quotas and reserve resources. - - For counting quotas--those quotas for which there is a usage - synchronization function--this method checks quotas against - current usage and the desired deltas. The deltas are given as - keyword arguments, and current usage and other reservations - are factored into the quota check. - - This method will raise a QuotaResourceUnknown exception if a - given resource is unknown or if it does not have a usage - synchronization function. - - If any of the proposed values is over the defined quota, an - OverQuota exception will be raised with the sorted list of the - resources which are too high. Otherwise, the method returns a - list of reservation UUIDs which were created. - - :param context: The request context, for access checks. - :param expire: An optional parameter specifying an expiration - time for the reservations. If it is a simple - number, it is interpreted as a number of - seconds and added to the current time; if it is - a datetime.timedelta object, it will also be - added to the current time. A datetime.datetime - object will be interpreted as the absolute - expiration time. If None is specified, the - default expiration time set by - --default-reservation-expire will be used (this - value will be treated as a number of seconds). - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - reservations = self._driver.reserve(context, self.resources, deltas, - expire=expire, - project_id=project_id) - - LOG.debug("Created reservations %s", reservations) - - return reservations - - def commit(self, context, reservations, project_id=None): - """Commit reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - try: - self._driver.commit(context, reservations, project_id=project_id) - except Exception: - # NOTE(Vek): Ignoring exceptions here is safe, because the - # usage resynchronization and the reservation expiration - # mechanisms will resolve the issue. The exception is - # logged, however, because this is less than optimal. - LOG.exception("Failed to commit reservations %s", reservations) - - def rollback(self, context, reservations, project_id=None): - """Roll back reservations. - - :param context: The request context, for access checks. - :param reservations: A list of the reservation UUIDs, as - returned by the reserve() method. - :param project_id: Specify the project_id if current context - is admin and admin wants to impact on - common user's tenant. - """ - - try: - self._driver.rollback(context, reservations, project_id=project_id) - except Exception: - # NOTE(Vek): Ignoring exceptions here is safe, because the - # usage resynchronization and the reservation expiration - # mechanisms will resolve the issue. The exception is - # logged, however, because this is less than optimal. - LOG.exception("Failed to roll back reservations %s", reservations) - - def destroy_by_project(self, context, project_id): - """Destroy all quota limits associated with a project. - - :param context: The request context, for access checks. - :param project_id: The ID of the project being deleted. - """ - - self._driver.destroy_by_project(context, project_id) - - def expire(self, context): - """Expire reservations. - - Explores all currently existing reservations and rolls back - any that have expired. - - :param context: The request context, for access checks. - """ - - self._driver.expire(context) - - def add_volume_type_opts(self, context, opts, volume_type_id): - """Add volume type resource options. - - Adds elements to the opts hash for volume type quotas. - If a resource is being reserved ('gigabytes', etc) and the volume - type is set up for its own quotas, these reservations are copied - into keys for 'gigabytes_', etc. - - :param context: The request context, for access checks. - :param opts: The reservations options hash. - :param volume_type_id: The volume type id for this reservation. - """ - if not volume_type_id: - return - - # NOTE(jdg): set inactive to True in volume_type_get, as we - # may be operating on a volume that was created with a type - # that has since been deleted. - volume_type = db.volume_type_get(context, volume_type_id, True) - - for quota in ('volumes', 'gigabytes', 'snapshots'): - if quota in opts: - vtype_quota = "%s_%s" % (quota, volume_type['name']) - opts[vtype_quota] = opts[quota] - - @property - def resource_names(self): - return sorted(self.resources.keys()) - - @property - def resources(self): - return self._resources - - -class VolumeTypeQuotaEngine(QuotaEngine): - """Represent the set of all quotas.""" - - @property - def resources(self): - """Fetches all possible quota resources.""" - - result = {} - # Global quotas. - argses = [('volumes', '_sync_volumes', 'quota_volumes'), - ('per_volume_gigabytes', None, 'per_volume_size_limit'), - ('snapshots', '_sync_snapshots', 'quota_snapshots'), - ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), - ('backups', '_sync_backups', 'quota_backups'), - ('backup_gigabytes', '_sync_backup_gigabytes', - 'quota_backup_gigabytes')] - for args in argses: - resource = ReservableResource(*args) - result[resource.name] = resource - - # Volume type quotas. - volume_types = db.volume_type_get_all(context.get_admin_context(), - False) - for volume_type in volume_types.values(): - for part_name in ('volumes', 'gigabytes', 'snapshots'): - resource = VolumeTypeResource(part_name, volume_type) - result[resource.name] = resource - return result - - def register_resource(self, resource): - raise NotImplementedError(_("Cannot register resource")) - - def register_resources(self, resources): - raise NotImplementedError(_("Cannot register resources")) - - def update_quota_resource(self, context, old_type_name, new_type_name): - """Update resource in quota. - - This is to update resource in quotas, quota_classes, and - quota_usages once the name of a volume type is changed. - - :param context: The request context, for access checks. - :param old_type_name: old name of volume type. - :param new_type_name: new name of volume type. - """ - - for quota in ('volumes', 'gigabytes', 'snapshots'): - old_res = "%s_%s" % (quota, old_type_name) - new_res = "%s_%s" % (quota, new_type_name) - db.quota_usage_update_resource(context, - old_res, - new_res) - db.quota_class_update_resource(context, - old_res, - new_res) - db.quota_update_resource(context, - old_res, - new_res) - - -class CGQuotaEngine(QuotaEngine): - """Represent the consistencygroup quotas.""" - - @property - def resources(self): - """Fetches all possible quota resources.""" - - result = {} - # Global quotas. - argses = [('consistencygroups', '_sync_consistencygroups', - 'quota_consistencygroups'), ] - for args in argses: - resource = ReservableResource(*args) - result[resource.name] = resource - - return result - - def register_resource(self, resource): - raise NotImplementedError(_("Cannot register resource")) - - def register_resources(self, resources): - raise NotImplementedError(_("Cannot register resources")) - - -class GroupQuotaEngine(QuotaEngine): - """Represent the group quotas.""" - - @property - def resources(self): - """Fetches all possible quota resources.""" - - result = {} - # Global quotas. - argses = [('groups', '_sync_groups', - 'quota_groups'), ] - for args in argses: - resource = ReservableResource(*args) - result[resource.name] = resource - - return result - - def register_resource(self, resource): - raise NotImplementedError(_("Cannot register resource")) - - def register_resources(self, resources): - raise NotImplementedError(_("Cannot register resources")) - -QUOTAS = VolumeTypeQuotaEngine() -CGQUOTAS = CGQuotaEngine() -GROUP_QUOTAS = GroupQuotaEngine() diff --git a/cinder/quota_utils.py b/cinder/quota_utils.py deleted file mode 100644 index 56fe1cb80..000000000 --- a/cinder/quota_utils.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg -from oslo_log import log as logging - -from keystoneauth1 import identity -from keystoneauth1 import loading as ka_loading -from keystoneclient import client -from keystoneclient import exceptions - -from cinder import db -from cinder import exception -from cinder.i18n import _ - -CONF = cfg.CONF -CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token.__init__', - 'keystone_authtoken') - -LOG = logging.getLogger(__name__) - - -class GenericProjectInfo(object): - """Abstraction layer for Keystone V2 and V3 project objects""" - def __init__(self, project_id, project_keystone_api_version, - project_parent_id=None, - project_subtree=None, - project_parent_tree=None, - is_admin_project=False): - self.id = project_id - self.keystone_api_version = project_keystone_api_version - self.parent_id = project_parent_id - self.subtree = project_subtree - self.parents = project_parent_tree - self.is_admin_project = is_admin_project - - -def get_volume_type_reservation(ctxt, volume, type_id, - reserve_vol_type_only=False): - from cinder import quota - QUOTAS = quota.QUOTAS - # Reserve quotas for the given volume type - try: - reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} - QUOTAS.add_volume_type_opts(ctxt, - reserve_opts, - type_id) - # If reserve_vol_type_only is True, just reserve volume_type quota, - # not volume quota. - if reserve_vol_type_only: - reserve_opts.pop('volumes') - reserve_opts.pop('gigabytes') - # Note that usually the project_id on the volume will be the same as - # the project_id in the context. But, if they are different then the - # reservations must be recorded against the project_id that owns the - # volume. - project_id = volume['project_id'] - reservations = QUOTAS.reserve(ctxt, - project_id=project_id, - **reserve_opts) - except exception.OverQuota as e: - process_reserve_over_quota(ctxt, e, - resource='volumes', - size=volume.size) - return reservations - - -def _filter_domain_id_from_parents(domain_id, tree): - """Removes the domain_id from the tree if present""" - new_tree = None - if tree: - parent, children = next(iter(tree.items())) - # Don't add the domain id to the parents hierarchy - if parent != domain_id: - new_tree = {parent: _filter_domain_id_from_parents(domain_id, - children)} - - return new_tree - - -def get_project_hierarchy(context, project_id, subtree_as_ids=False, - parents_as_ids=False, is_admin_project=False): - """A Helper method to get the project hierarchy. - - Along with hierarchical multitenancy in keystone API v3, projects can be - hierarchically organized. Therefore, we need to know the project - hierarchy, if any, in order to do nested quota operations properly. - If the domain is being used as the top most parent, it is filtered out from - the parent tree and parent_id. - """ - keystone = _keystone_client(context) - generic_project = GenericProjectInfo(project_id, keystone.version) - if keystone.version == 'v3': - project = keystone.projects.get(project_id, - subtree_as_ids=subtree_as_ids, - parents_as_ids=parents_as_ids) - - generic_project.parent_id = None - if project.parent_id != project.domain_id: - generic_project.parent_id = project.parent_id - - generic_project.subtree = ( - project.subtree if subtree_as_ids else None) - - generic_project.parents = None - if parents_as_ids: - generic_project.parents = _filter_domain_id_from_parents( - project.domain_id, project.parents) - - generic_project.is_admin_project = is_admin_project - - return generic_project - - -def get_parent_project_id(context, project_id): - return get_project_hierarchy(context, project_id).parent_id - - -def get_all_projects(context): - # Right now this would have to be done as cloud admin with Keystone v3 - return _keystone_client(context, (3, 0)).projects.list() - - -def get_all_root_project_ids(context): - project_list = get_all_projects(context) - - # Find every project which does not have a parent, meaning it is the - # root of the tree - project_roots = [project.id for project in project_list - if not project.parent_id] - - return project_roots - - -def update_alloc_to_next_hard_limit(context, resources, deltas, res, - expire, project_id): - from cinder import quota - QUOTAS = quota.QUOTAS - GROUP_QUOTAS = quota.GROUP_QUOTAS - reservations = [] - projects = get_project_hierarchy(context, project_id, - parents_as_ids=True).parents - hard_limit_found = False - # Update allocated values up the chain til we hit a hard limit or run out - # of parents - while projects and not hard_limit_found: - cur_proj_id = list(projects)[0] - projects = projects[cur_proj_id] - if res == 'groups': - cur_quota_lim = GROUP_QUOTAS.get_by_project_or_default( - context, cur_proj_id, res) - else: - cur_quota_lim = QUOTAS.get_by_project_or_default( - context, cur_proj_id, res) - hard_limit_found = (cur_quota_lim != -1) - cur_quota = {res: cur_quota_lim} - cur_delta = {res: deltas[res]} - try: - reservations += db.quota_reserve( - context, resources, cur_quota, cur_delta, expire, - CONF.until_refresh, CONF.max_age, cur_proj_id, - is_allocated_reserve=True) - except exception.OverQuota: - db.reservation_rollback(context, reservations) - raise - return reservations - - -def validate_setup_for_nested_quota_use(ctxt, resources, - nested_quota_driver, - fix_allocated_quotas=False): - """Validates the setup supports using nested quotas. - - Ensures that Keystone v3 or greater is being used, that the current - user is of the cloud admin role, and that the existing quotas make sense to - nest in the current hierarchy (e.g. that no child quota would be larger - than it's parent). - - :param resources: the quota resources to validate - :param nested_quota_driver: nested quota driver used to validate each tree - :param fix_allocated_quotas: if True, parent projects "allocated" total - will be calculated based on the existing child limits and the DB will - be updated. If False, an exception is raised reporting any parent - allocated quotas are currently incorrect. - """ - try: - project_roots = get_all_root_project_ids(ctxt) - - # Now that we've got the roots of each tree, validate the trees - # to ensure that each is setup logically for nested quotas - for root in project_roots: - root_proj = get_project_hierarchy(ctxt, root, - subtree_as_ids=True) - nested_quota_driver.validate_nested_setup( - ctxt, - resources, - {root_proj.id: root_proj.subtree}, - fix_allocated_quotas=fix_allocated_quotas - ) - except exceptions.VersionNotAvailable: - msg = _("Keystone version 3 or greater must be used to get nested " - "quota support.") - raise exception.CinderException(message=msg) - except exceptions.Forbidden: - msg = _("Must run this command as cloud admin using " - "a Keystone policy.json which allows cloud " - "admin to list and get any project.") - raise exception.CinderException(message=msg) - - -def _keystone_client(context, version=(3, 0)): - """Creates and returns an instance of a generic keystone client. - - :param context: The request context - :param version: version of Keystone to request - :return: keystoneclient.client.Client object - """ - auth_plugin = identity.Token( - auth_url=CONF.keystone_authtoken.auth_uri, - token=context.auth_token, - project_id=context.project_id) - - client_session = ka_loading.session.Session().load_from_options( - auth=auth_plugin, - insecure=CONF.keystone_authtoken.insecure, - cacert=CONF.keystone_authtoken.cafile, - key=CONF.keystone_authtoken.keyfile, - cert=CONF.keystone_authtoken.certfile) - return client.Client(auth_url=CONF.keystone_authtoken.auth_uri, - session=client_session, version=version) - - -OVER_QUOTA_RESOURCE_EXCEPTIONS = {'snapshots': exception.SnapshotLimitExceeded, - 'backups': exception.BackupLimitExceeded, - 'volumes': exception.VolumeLimitExceeded, - 'groups': exception.GroupLimitExceeded} - - -def process_reserve_over_quota(context, over_quota_exception, - resource, size=None): - """Handle OverQuota exception. - - Analyze OverQuota exception, and raise new exception related to - resource type. If there are unexpected items in overs, - UnexpectedOverQuota is raised. - - :param context: security context - :param over_quota_exception: OverQuota exception - :param resource: can be backups, snapshots, and volumes - :param size: requested size in reservation - """ - def _consumed(name): - return (usages[name]['reserved'] + usages[name]['in_use']) - - overs = over_quota_exception.kwargs['overs'] - usages = over_quota_exception.kwargs['usages'] - quotas = over_quota_exception.kwargs['quotas'] - invalid_overs = [] - - for over in overs: - if 'gigabytes' in over: - msg = ("Quota exceeded for %(s_pid)s, tried to create " - "%(s_size)dG %(s_resource)s (%(d_consumed)dG of " - "%(d_quota)dG already consumed).") - LOG.warning(msg, {'s_pid': context.project_id, - 's_size': size, - 's_resource': resource[:-1], - 'd_consumed': _consumed(over), - 'd_quota': quotas[over]}) - if resource == 'backups': - exc = exception.VolumeBackupSizeExceedsAvailableQuota - else: - exc = exception.VolumeSizeExceedsAvailableQuota - raise exc( - name=over, - requested=size, - consumed=_consumed(over), - quota=quotas[over]) - if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and - resource in over): - msg = ("Quota exceeded for %(s_pid)s, tried to create " - "%(s_resource)s (%(d_consumed)d %(s_resource)ss " - "already consumed).") - LOG.warning(msg, {'s_pid': context.project_id, - 'd_consumed': _consumed(over), - 's_resource': resource[:-1]}) - raise OVER_QUOTA_RESOURCE_EXCEPTIONS[resource]( - allowed=quotas[over], - name=over) - invalid_overs.append(over) - - if invalid_overs: - raise exception.UnexpectedOverQuota(name=', '.join(invalid_overs)) diff --git a/cinder/rpc.py b/cinder/rpc.py deleted file mode 100644 index 527ea8922..000000000 --- a/cinder/rpc.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -__all__ = [ - 'init', - 'cleanup', - 'set_defaults', - 'add_extra_exmods', - 'clear_extra_exmods', - 'get_allowed_exmods', - 'RequestContextSerializer', - 'get_client', - 'get_server', - 'get_notifier', -] - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher -from oslo_utils import importutils -profiler = importutils.try_import('osprofiler.profiler') -import six - -import cinder.context -import cinder.exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base -from cinder import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -NOTIFIER = None - -ALLOWED_EXMODS = [ - cinder.exception.__name__, -] -EXTRA_EXMODS = [] - - -def init(conf): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport(conf, - allowed_remote_exmods=exmods) - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - conf, - allowed_remote_exmods=exmods) - - # get_notification_transport has loaded oslo_messaging_notifications config - # group, so we can now check if notifications are actually enabled. - if utils.notifications_enabled(conf): - json_serializer = messaging.JsonPayloadSerializer() - serializer = RequestContextSerializer(json_serializer) - NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer) - else: - NOTIFIER = utils.DO_NOTHING - - -def initialized(): - return None not in [TRANSPORT, NOTIFIER] - - -def cleanup(): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - if NOTIFIER is None: - LOG.exception("RPC cleanup: NOTIFIER is None") - TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT.cleanup() - TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def add_extra_exmods(*args): - EXTRA_EXMODS.extend(args) - - -def clear_extra_exmods(): - del EXTRA_EXMODS[:] - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - _context = context.to_dict() - if profiler is not None: - prof = profiler.get() - if prof: - trace_info = { - "hmac_key": prof.hmac_key, - "base_id": prof.get_base_id(), - "parent_id": prof.get_id() - } - _context.update({"trace_info": trace_info}) - return _context - - def deserialize_context(self, context): - trace_info = context.pop("trace_info", None) - if trace_info: - if profiler is not None: - profiler.init(**trace_info) - - return cinder.context.RequestContext.from_dict(context) - - -def get_client(target, version_cap=None, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - access_policy = dispatcher.DefaultRPCAccessPolicy - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -@utils.if_notifications_enabled -def get_notifier(service=None, host=None, publisher_id=None): - assert NOTIFIER is not None - if not publisher_id: - publisher_id = "%s.%s" % (service, host or CONF.host) - return NOTIFIER.prepare(publisher_id=publisher_id) - - -def assert_min_rpc_version(min_ver, exc=None): - """Decorator to block RPC calls when version cap is lower than min_ver.""" - - if exc is None: - exc = cinder.exception.ServiceTooOld - - def decorator(f): - @six.wraps(f) - def _wrapper(self, *args, **kwargs): - if not self.client.can_send_version(min_ver): - msg = _('One of %(binary)s services is too old to accept ' - '%(method)s request. Required RPC API version is ' - '%(version)s. Are you running mixed versions of ' - '%(binary)ss?') % {'binary': self.BINARY, - 'version': min_ver, - 'method': f.__name__} - raise exc(msg) - return f(self, *args, **kwargs) - return _wrapper - return decorator - - -LAST_RPC_VERSIONS = {} -LAST_OBJ_VERSIONS = {} - - -class RPCAPI(object): - """Mixin class aggregating methods related to RPC API compatibility.""" - - RPC_API_VERSION = '1.0' - RPC_DEFAULT_VERSION = '1.0' - TOPIC = '' - BINARY = '' - - def __init__(self): - target = messaging.Target(topic=self.TOPIC, - version=self.RPC_API_VERSION) - obj_version_cap = self.determine_obj_version_cap() - serializer = base.CinderObjectSerializer(obj_version_cap) - - rpc_version_cap = self.determine_rpc_version_cap() - self.client = get_client(target, version_cap=rpc_version_cap, - serializer=serializer) - - def _compat_ver(self, current, *legacy): - versions = (current,) + legacy - for version in versions[:-1]: - if self.client.can_send_version(version): - return version - return versions[-1] - - def _get_cctxt(self, version=None, **kwargs): - """Prepare client context - - Version parameter accepts single version string or tuple of strings. - Compatible version can be obtained later using: - cctxt = _get_cctxt(...) - version = cctxt.target.version - """ - if version is None: - version = self.RPC_DEFAULT_VERSION - if isinstance(version, tuple): - version = self._compat_ver(*version) - return self.client.prepare(version=version, **kwargs) - - @classmethod - def determine_rpc_version_cap(cls): - global LAST_RPC_VERSIONS - if cls.BINARY in LAST_RPC_VERSIONS: - return LAST_RPC_VERSIONS[cls.BINARY] - - version_cap = objects.Service.get_minimum_rpc_version( - cinder.context.get_admin_context(), cls.BINARY) - if not version_cap: - # If there is no service we assume they will come up later and will - # have the same version as we do. - version_cap = cls.RPC_API_VERSION - LOG.info('Automatically selected %(binary)s RPC version ' - '%(version)s as minimum service version.', - {'binary': cls.BINARY, 'version': version_cap}) - LAST_RPC_VERSIONS[cls.BINARY] = version_cap - return version_cap - - @classmethod - def determine_obj_version_cap(cls): - global LAST_OBJ_VERSIONS - if cls.BINARY in LAST_OBJ_VERSIONS: - return LAST_OBJ_VERSIONS[cls.BINARY] - - version_cap = objects.Service.get_minimum_obj_version( - cinder.context.get_admin_context(), cls.BINARY) - # If there is no service we assume they will come up later and will - # have the same version as we do. - if not version_cap: - version_cap = base.OBJ_VERSIONS.get_current() - LOG.info('Automatically selected %(binary)s objects version ' - '%(version)s as minimum service version.', - {'binary': cls.BINARY, 'version': version_cap}) - LAST_OBJ_VERSIONS[cls.BINARY] = version_cap - return version_cap diff --git a/cinder/scheduler/__init__.py b/cinder/scheduler/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/scheduler/base_filter.py b/cinder/scheduler/base_filter.py deleted file mode 100644 index 07180667a..000000000 --- a/cinder/scheduler/base_filter.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2011-2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Filter support -""" -from oslo_log import log as logging - -from cinder.scheduler import base_handler - -LOG = logging.getLogger(__name__) - - -class BaseFilter(object): - """Base class for all filter classes.""" - def _filter_one(self, obj, filter_properties): - """Return True if it passes the filter, False otherwise. - - Override this in a subclass. - """ - return True - - def filter_all(self, filter_obj_list, filter_properties): - """Yield objects that pass the filter. - - Can be overridden in a subclass, if you need to base filtering - decisions on all objects. Otherwise, one can just override - _filter_one() to filter a single object. - """ - for obj in filter_obj_list: - if self._filter_one(obj, filter_properties): - yield obj - - # Set to true in a subclass if a filter only needs to be run once - # for each request rather than for each instance - run_filter_once_per_request = False - - def run_filter_for_index(self, index): - """Return True if the filter needs to be run for n-th instances. - - Only need to override this if a filter needs anything other than - "first only" or "all" behaviour. - """ - return not (self.run_filter_once_per_request and index > 0) - - -class BaseFilterHandler(base_handler.BaseHandler): - """Base class to handle loading filter classes. - - This class should be subclassed where one needs to use filters. - """ - - def _log_filtration(self, full_filter_results, - part_filter_results, filter_properties): - # Log the filtration history - rspec = filter_properties.get("request_spec", {}) - msg_dict = {"vol_id": rspec.get("volume_id", ""), - "str_results": full_filter_results} - LOG.debug("Filtering removed all hosts for the request with " - "volume ID '%(vol_id)s'. Filter results: %(str_results)s", - msg_dict) - msg_dict["str_results"] = ', '.join( - "%(cls_name)s: (start: %(start)s, end: %(end)s)" % { - "cls_name": value[0], "start": value[1], "end": value[2]} - for value in part_filter_results) - LOG.info("Filtering removed all hosts for the request with " - "volume ID '%(vol_id)s'. Filter results: %(str_results)s", - msg_dict) - - def get_filtered_objects(self, filter_classes, objs, - filter_properties, index=0): - """Get objects after filter - - :param filter_classes: filters that will be used to filter the - objects - :param objs: objects that will be filtered - :param filter_properties: client filter properties - :param index: This value needs to be increased in the caller - function of get_filtered_objects when handling - each resource. - """ - list_objs = list(objs) - LOG.debug("Starting with %d host(s)", len(list_objs)) - # The 'part_filter_results' list just tracks the number of hosts - # before and after the filter, unless the filter returns zero - # hosts, in which it records the host/nodename for the last batch - # that was removed. Since the full_filter_results can be very large, - # it is only recorded if the LOG level is set to debug. - part_filter_results = [] - full_filter_results = [] - for filter_cls in filter_classes: - cls_name = filter_cls.__name__ - start_count = len(list_objs) - filter_class = filter_cls() - - if filter_class.run_filter_for_index(index): - objs = filter_class.filter_all(list_objs, filter_properties) - if objs is None: - LOG.info("Filter %s returned 0 hosts", cls_name) - full_filter_results.append((cls_name, None)) - list_objs = None - break - - list_objs = list(objs) - end_count = len(list_objs) - part_filter_results.append((cls_name, start_count, end_count)) - remaining = [getattr(obj, "host", obj) - for obj in list_objs] - full_filter_results.append((cls_name, remaining)) - - LOG.debug("Filter %(cls_name)s returned " - "%(obj_len)d host(s)", - {'cls_name': cls_name, 'obj_len': len(list_objs)}) - if not list_objs: - self._log_filtration(full_filter_results, - part_filter_results, filter_properties) - return list_objs diff --git a/cinder/scheduler/base_handler.py b/cinder/scheduler/base_handler.py deleted file mode 100644 index bc8e14217..000000000 --- a/cinder/scheduler/base_handler.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2011-2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A common base for handling extension classes. - -Used by BaseFilterHandler and BaseWeightHandler -""" - -import inspect - -from stevedore import extension - - -class BaseHandler(object): - """Base class to handle loading filter and weight classes.""" - def __init__(self, modifier_class_type, modifier_namespace): - self.namespace = modifier_namespace - self.modifier_class_type = modifier_class_type - self.extension_manager = extension.ExtensionManager(modifier_namespace) - - def _is_correct_class(self, cls): - """Return whether an object is a class of the correct type. - - (or is not prefixed with an underscore) - """ - return (inspect.isclass(cls) and - not cls.__name__.startswith('_') and - issubclass(cls, self.modifier_class_type)) - - def get_all_classes(self): - # We use a set, as some classes may have an entrypoint of their own, - # and also be returned by a function such as 'all_filters' for example - return [ext.plugin for ext in self.extension_manager if - self._is_correct_class(ext.plugin)] diff --git a/cinder/scheduler/base_weight.py b/cinder/scheduler/base_weight.py deleted file mode 100644 index 0ea982ca8..000000000 --- a/cinder/scheduler/base_weight.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2011-2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Pluggable Weighing support -""" - -import abc - -import six - -from cinder.scheduler import base_handler - - -def normalize(weight_list, minval=None, maxval=None): - """Normalize the values in a list between 0 and 1.0. - - The normalization is made regarding the lower and upper values present in - weight_list. If the minval and/or maxval parameters are set, these values - will be used instead of the minimum and maximum from the list. - - If all the values are equal, they are normalized to 0. - """ - - if not weight_list: - return () - - if maxval is None: - maxval = max(weight_list) - - if minval is None: - minval = min(weight_list) - - maxval = float(maxval) - minval = float(minval) - - if minval == maxval: - return [0] * len(weight_list) - - range_ = maxval - minval - return ((i - minval) / range_ for i in weight_list) - - -class WeighedObject(object): - """Object with weight information.""" - def __init__(self, obj, weight): - self.obj = obj - self.weight = weight - - def __repr__(self): - return "" % (self.obj, self.weight) - - -@six.add_metaclass(abc.ABCMeta) -class BaseWeigher(object): - """Base class for pluggable weighers. - - The attributes maxval and minval can be specified to set up the maximum - and minimum values for the weighed objects. These values will then be - taken into account in the normalization step, instead of taking the values - from the calculated weights. - """ - - minval = None - maxval = None - - def weight_multiplier(self): - """How weighted this weigher should be. - - Override this method in a subclass, so that the returned value is - read from a configuration option to permit operators specify a - multiplier for the weigher. - """ - return 1.0 - - @abc.abstractmethod - def _weigh_object(self, obj, weight_properties): - """Override in a subclass to specify a weight for a specific object.""" - - def weigh_objects(self, weighed_obj_list, weight_properties): - """Weigh multiple objects. - - Override in a subclass if you need access to all objects in order - to calculate weights. Do not modify the weight of an object here, - just return a list of weights. - """ - # Calculate the weights - weights = [] - for obj in weighed_obj_list: - weight = self._weigh_object(obj.obj, weight_properties) - - # Record the min and max values if they are None. If they anything - # but none we assume that the weigher has set them - if self.minval is None: - self.minval = weight - if self.maxval is None: - self.maxval = weight - - if weight < self.minval: - self.minval = weight - elif weight > self.maxval: - self.maxval = weight - - weights.append(weight) - - return weights - - -class BaseWeightHandler(base_handler.BaseHandler): - object_class = WeighedObject - - def get_weighed_objects(self, weigher_classes, obj_list, - weighing_properties): - """Return a sorted (descending), normalized list of WeighedObjects.""" - - if not obj_list: - return [] - - weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] - for weigher_cls in weigher_classes: - weigher = weigher_cls() - weights = weigher.weigh_objects(weighed_objs, weighing_properties) - - # Normalize the weights - weights = normalize(weights, - minval=weigher.minval, - maxval=weigher.maxval) - - for i, weight in enumerate(weights): - obj = weighed_objs[i] - obj.weight += weigher.weight_multiplier() * weight - - return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py deleted file mode 100644 index a140d5eb1..000000000 --- a/cinder/scheduler/driver.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2010 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scheduler base class that all Schedulers should inherit from -""" - -from oslo_config import cfg -from oslo_utils import importutils -from oslo_utils import timeutils - -from cinder.i18n import _ -from cinder import objects -from cinder.volume import rpcapi as volume_rpcapi - - -scheduler_driver_opts = [ - cfg.StrOpt('scheduler_host_manager', - default='cinder.scheduler.host_manager.HostManager', - help='The scheduler host manager class to use'), - cfg.IntOpt('scheduler_max_attempts', - default=3, - help='Maximum number of attempts to schedule a volume'), -] - -CONF = cfg.CONF -CONF.register_opts(scheduler_driver_opts) - - -def volume_update_db(context, volume_id, host, cluster_name): - """Set the host, cluster_name, and set the scheduled_at field of a volume. - - :returns: A Volume with the updated fields set properly. - """ - volume = objects.Volume.get_by_id(context, volume_id) - volume.host = host - volume.cluster_name = cluster_name - volume.scheduled_at = timeutils.utcnow() - volume.save() - - # A volume object is expected to be returned, as it is used by - # filter_scheduler. - return volume - - -def generic_group_update_db(context, group, host, cluster_name): - """Set the host and the scheduled_at field of a group. - - :returns: A Group with the updated fields set properly. - """ - group.update({'host': host, 'updated_at': timeutils.utcnow(), - 'cluster_name': cluster_name}) - group.save() - return group - - -class Scheduler(object): - """The base class that all Scheduler classes should inherit from.""" - - def __init__(self): - self.host_manager = importutils.import_object( - CONF.scheduler_host_manager) - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - - def reset(self): - """Reset volume RPC API object to load new version pins.""" - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - - def is_ready(self): - """Returns True if Scheduler is ready to accept requests. - - This is to handle scheduler service startup when it has no volume hosts - stats and will fail all the requests. - """ - - return self.host_manager.has_all_capabilities() - - def update_service_capabilities(self, service_name, host, capabilities, - cluster_name, timestamp): - """Process a capability update from a service node.""" - self.host_manager.update_service_capabilities(service_name, - host, - capabilities, - cluster_name, - timestamp) - - def notify_service_capabilities(self, service_name, backend, - capabilities, timestamp): - """Notify capability update from a service node.""" - self.host_manager.notify_service_capabilities(service_name, - backend, - capabilities, - timestamp) - - def host_passes_filters(self, context, backend, request_spec, - filter_properties): - """Check if the specified backend passes the filters.""" - raise NotImplementedError(_("Must implement backend_passes_filters")) - - def find_retype_host(self, context, request_spec, filter_properties=None, - migration_policy='never'): - """Find a backend that can accept the volume with its new type.""" - raise NotImplementedError(_("Must implement find_retype_backend")) - - # NOTE(geguileo): For backward compatibility with out of tree Schedulers - # we don't change host_passes_filters or find_retype_host method names but - # create an "alias" for them with the right name instead. - backend_passes_filters = host_passes_filters - find_retype_backend = find_retype_host - - def schedule(self, context, topic, method, *_args, **_kwargs): - """Must override schedule method for scheduler to work.""" - raise NotImplementedError(_("Must implement a fallback schedule")) - - def schedule_create_volume(self, context, request_spec, filter_properties): - """Must override schedule method for scheduler to work.""" - raise NotImplementedError(_("Must implement schedule_create_volume")) - - def schedule_create_group(self, context, group, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list): - """Must override schedule method for scheduler to work.""" - raise NotImplementedError(_( - "Must implement schedule_create_group")) - - def get_pools(self, context, filters): - """Must override schedule method for scheduler to work.""" - raise NotImplementedError(_( - "Must implement schedule_get_pools")) diff --git a/cinder/scheduler/evaluator/__init__.py b/cinder/scheduler/evaluator/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/scheduler/evaluator/evaluator.py b/cinder/scheduler/evaluator/evaluator.py deleted file mode 100644 index 47887f3ed..000000000 --- a/cinder/scheduler/evaluator/evaluator.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import operator -import re - -import pyparsing -import six - -from cinder import exception -from cinder.i18n import _ - - -def _operatorOperands(tokenList): - it = iter(tokenList) - while True: - try: - op1 = next(it) - op2 = next(it) - yield(op1, op2) - except StopIteration: - break - - -class EvalConstant(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - result = self.value - if (isinstance(result, six.string_types) and - re.match("^[a-zA-Z_]+\.[a-zA-Z_]+$", result)): - (which_dict, entry) = result.split('.') - try: - result = _vars[which_dict][entry] - except KeyError as e: - raise exception.EvaluatorParseException( - _("KeyError: %s") % e) - except TypeError as e: - raise exception.EvaluatorParseException( - _("TypeError: %s") % e) - - try: - result = int(result) - except ValueError: - try: - result = float(result) - except ValueError as e: - raise exception.EvaluatorParseException( - _("ValueError: %s") % e) - - return result - - -class EvalSignOp(object): - operations = { - '+': 1, - '-': -1, - } - - def __init__(self, toks): - self.sign, self.value = toks[0] - - def eval(self): - return self.operations[self.sign] * self.value.eval() - - -class EvalAddOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - sum = self.value[0].eval() - for op, val in _operatorOperands(self.value[1:]): - if op == '+': - sum += val.eval() - elif op == '-': - sum -= val.eval() - return sum - - -class EvalMultOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - prod = self.value[0].eval() - for op, val in _operatorOperands(self.value[1:]): - try: - if op == '*': - prod *= val.eval() - elif op == '/': - prod /= float(val.eval()) - except ZeroDivisionError as e: - raise exception.EvaluatorParseException( - _("ZeroDivisionError: %s") % e) - return prod - - -class EvalPowerOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - prod = self.value[0].eval() - for op, val in _operatorOperands(self.value[1:]): - prod = pow(prod, val.eval()) - return prod - - -class EvalNegateOp(object): - def __init__(self, toks): - self.negation, self.value = toks[0] - - def eval(self): - return not self.value.eval() - - -class EvalComparisonOp(object): - operations = { - "<": operator.lt, - "<=": operator.le, - ">": operator.gt, - ">=": operator.ge, - "!=": operator.ne, - "==": operator.eq, - "<>": operator.ne, - } - - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - val1 = self.value[0].eval() - for op, val in _operatorOperands(self.value[1:]): - fn = self.operations[op] - val2 = val.eval() - if not fn(val1, val2): - break - val1 = val2 - else: - return True - return False - - -class EvalTernaryOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - condition = self.value[0].eval() - if condition: - return self.value[2].eval() - else: - return self.value[4].eval() - - -class EvalFunction(object): - functions = { - "abs": abs, - "max": max, - "min": min, - } - - def __init__(self, toks): - self.func, self.value = toks[0] - - def eval(self): - args = self.value.eval() - if type(args) is list: - return self.functions[self.func](*args) - else: - return self.functions[self.func](args) - - -class EvalCommaSeperator(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - val1 = self.value[0].eval() - val2 = self.value[2].eval() - if type(val2) is list: - val_list = [] - val_list.append(val1) - for val in val2: - val_list.append(val) - return val_list - - return [val1, val2] - - -class EvalBoolAndOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - left = self.value[0].eval() - right = self.value[2].eval() - return left and right - - -class EvalBoolOrOp(object): - def __init__(self, toks): - self.value = toks[0] - - def eval(self): - left = self.value[0].eval() - right = self.value[2].eval() - return left or right - -_parser = None -_vars = {} - - -def _def_parser(): - # Enabling packrat parsing greatly speeds up the parsing. - pyparsing.ParserElement.enablePackrat() - - alphas = pyparsing.alphas - Combine = pyparsing.Combine - Forward = pyparsing.Forward - nums = pyparsing.nums - oneOf = pyparsing.oneOf - opAssoc = pyparsing.opAssoc - operatorPrecedence = pyparsing.operatorPrecedence - Word = pyparsing.Word - - integer = Word(nums) - real = Combine(Word(nums) + '.' + Word(nums)) - variable = Word(alphas + '_' + '.') - number = real | integer - expr = Forward() - fn = Word(alphas + '_' + '.') - operand = number | variable | fn - - signop = oneOf('+ -') - addop = oneOf('+ -') - multop = oneOf('* /') - comparisonop = oneOf(' '.join(EvalComparisonOp.operations.keys())) - ternaryop = ('?', ':') - boolandop = oneOf('AND and &&') - boolorop = oneOf('OR or ||') - negateop = oneOf('NOT not !') - - operand.setParseAction(EvalConstant) - expr = operatorPrecedence(operand, [ - (fn, 1, opAssoc.RIGHT, EvalFunction), - ("^", 2, opAssoc.RIGHT, EvalPowerOp), - (signop, 1, opAssoc.RIGHT, EvalSignOp), - (multop, 2, opAssoc.LEFT, EvalMultOp), - (addop, 2, opAssoc.LEFT, EvalAddOp), - (negateop, 1, opAssoc.RIGHT, EvalNegateOp), - (comparisonop, 2, opAssoc.LEFT, EvalComparisonOp), - (ternaryop, 3, opAssoc.LEFT, EvalTernaryOp), - (boolandop, 2, opAssoc.LEFT, EvalBoolAndOp), - (boolorop, 2, opAssoc.LEFT, EvalBoolOrOp), - (',', 2, opAssoc.RIGHT, EvalCommaSeperator), ]) - - return expr - - -def evaluate(expression, **kwargs): - """Evaluates an expression. - - Provides the facility to evaluate mathematical expressions, and to - substitute variables from dictionaries into those expressions. - - Supports both integer and floating point values, and automatic - promotion where necessary. - """ - global _parser - if _parser is None: - _parser = _def_parser() - - global _vars - _vars = kwargs - - try: - result = _parser.parseString(expression, parseAll=True)[0] - except pyparsing.ParseException as e: - raise exception.EvaluatorParseException( - _("ParseException: %s") % e) - - return result.eval() diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py deleted file mode 100644 index 59c8668f7..000000000 --- a/cinder/scheduler/filter_scheduler.py +++ /dev/null @@ -1,552 +0,0 @@ -# Copyright (c) 2011 Intel Corporation -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The FilterScheduler is for creating volumes. - -You can customize this scheduler by specifying your own volume Filters and -Weighing Functions. -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from cinder import exception -from cinder.i18n import _ -from cinder.scheduler import driver -from cinder.scheduler import scheduler_options -from cinder.volume import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class FilterScheduler(driver.Scheduler): - """Scheduler that can be used for filtering and weighing.""" - def __init__(self, *args, **kwargs): - super(FilterScheduler, self).__init__(*args, **kwargs) - self.cost_function_cache = None - self.options = scheduler_options.SchedulerOptions() - self.max_attempts = self._max_attempts() - - def schedule(self, context, topic, method, *args, **kwargs): - """Schedule contract that returns best-suited host for this request.""" - self._schedule(context, topic, *args, **kwargs) - - def _get_configuration_options(self): - """Fetch options dictionary. Broken out for testing.""" - return self.options.get_configuration() - - def populate_filter_properties(self, request_spec, filter_properties): - """Stuff things into filter_properties. - - Can be overridden in a subclass to add more data. - """ - vol = request_spec['volume_properties'] - filter_properties['size'] = vol['size'] - filter_properties['availability_zone'] = vol.get('availability_zone') - filter_properties['user_id'] = vol.get('user_id') - filter_properties['metadata'] = vol.get('metadata') - filter_properties['qos_specs'] = vol.get('qos_specs') - - def schedule_create_group(self, context, group, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list): - weighed_backend = self._schedule_generic_group( - context, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list) - - if not weighed_backend: - raise exception.NoValidBackend(reason=_("No weighed backends " - "available")) - - backend = weighed_backend.obj - - updated_group = driver.generic_group_update_db(context, group, - backend.host, - backend.cluster_name) - - self.volume_rpcapi.create_group(context, updated_group) - - def schedule_create_volume(self, context, request_spec, filter_properties): - backend = self._schedule(context, request_spec, filter_properties) - - if not backend: - raise exception.NoValidBackend(reason=_("No weighed backends " - "available")) - - backend = backend.obj - volume_id = request_spec['volume_id'] - - updated_volume = driver.volume_update_db(context, volume_id, - backend.host, - backend.cluster_name) - self._post_select_populate_filter_properties(filter_properties, - backend) - - # context is not serializable - filter_properties.pop('context', None) - - self.volume_rpcapi.create_volume(context, updated_volume, request_spec, - filter_properties, - allow_reschedule=True) - - def backend_passes_filters(self, context, backend, request_spec, - filter_properties): - """Check if the specified backend passes the filters.""" - weighed_backends = self._get_weighted_candidates(context, request_spec, - filter_properties) - # If backend has no pool defined we will ignore it in the comparison - ignore_pool = not bool(utils.extract_host(backend, 'pool')) - for weighed_backend in weighed_backends: - backend_id = weighed_backend.obj.backend_id - if ignore_pool: - backend_id = utils.extract_host(backend_id) - if backend_id == backend: - return weighed_backend.obj - - volume_id = request_spec.get('volume_id', '??volume_id missing??') - raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s ' - 'on %(backend)s') % - {'id': volume_id, - 'backend': backend}) - - def find_retype_backend(self, context, request_spec, - filter_properties=None, migration_policy='never'): - """Find a backend that can accept the volume with its new type.""" - filter_properties = filter_properties or {} - backend = (request_spec['volume_properties'].get('cluster_name') - or request_spec['volume_properties']['host']) - - # The volume already exists on this backend, and so we shouldn't check - # if it can accept the volume again in the CapacityFilter. - filter_properties['vol_exists_on'] = backend - - weighed_backends = self._get_weighted_candidates(context, request_spec, - filter_properties) - if not weighed_backends: - raise exception.NoValidBackend( - reason=_('No valid backends for volume %(id)s with type ' - '%(type)s') % {'id': request_spec['volume_id'], - 'type': request_spec['volume_type']}) - - for weighed_backend in weighed_backends: - backend_state = weighed_backend.obj - if backend_state.backend_id == backend: - return backend_state - - if utils.extract_host(backend, 'pool') is None: - # legacy volumes created before pool is introduced has no pool - # info in host. But host_state.host always include pool level - # info. In this case if above exact match didn't work out, we - # find host_state that are of the same host of volume being - # retyped. In other words, for legacy volumes, retyping could - # cause migration between pools on same host, which we consider - # it is different from migration between hosts thus allow that - # to happen even migration policy is 'never'. - for weighed_backend in weighed_backends: - backend_state = weighed_backend.obj - new_backend = utils.extract_host(backend_state.backend_id, - 'backend') - if new_backend == backend: - return backend_state - - if migration_policy == 'never': - raise exception.NoValidBackend( - reason=_('Current backend not valid for volume %(id)s with ' - 'type %(type)s, migration not allowed') % - {'id': request_spec['volume_id'], - 'type': request_spec['volume_type']}) - - top_backend = self._choose_top_backend(weighed_backends, request_spec) - return top_backend.obj - - def get_pools(self, context, filters): - return self.host_manager.get_pools(context, filters) - - def _post_select_populate_filter_properties(self, filter_properties, - backend_state): - """Populate filter properties with additional information. - - Add additional information to the filter properties after a backend has - been selected by the scheduling process. - """ - # Add a retry entry for the selected volume backend: - self._add_retry_backend(filter_properties, backend_state.backend_id) - - def _add_retry_backend(self, filter_properties, backend): - """Add a retry entry for the selected volume backend. - - In the event that the request gets re-scheduled, this entry will signal - that the given backend has already been tried. - """ - retry = filter_properties.get('retry', None) - if not retry: - return - # TODO(geguileo): In P - change to only use backends - for key in ('hosts', 'backends'): - backends = retry.get(key) - if backends is not None: - backends.append(backend) - - def _max_attempts(self): - max_attempts = CONF.scheduler_max_attempts - if max_attempts < 1: - raise exception.InvalidParameterValue( - err=_("Invalid value for 'scheduler_max_attempts', " - "must be >=1")) - return max_attempts - - def _log_volume_error(self, volume_id, retry): - """Log requests with exceptions from previous volume operations.""" - exc = retry.pop('exc', None) # string-ified exception from volume - if not exc: - return # no exception info from a previous attempt, skip - - # TODO(geguileo): In P - change to hosts = retry.get('backends') - backends = retry.get('backends', retry.get('hosts')) - if not backends: - return # no previously attempted hosts, skip - - last_backend = backends[-1] - LOG.error("Error scheduling %(volume_id)s from last vol-service: " - "%(last_backend)s : %(exc)s", - {'volume_id': volume_id, - 'last_backend': last_backend, - 'exc': exc}) - - def _populate_retry(self, filter_properties, properties): - """Populate filter properties with history of retries for request. - - If maximum retries is exceeded, raise NoValidBackend. - """ - max_attempts = self.max_attempts - retry = filter_properties.pop('retry', {}) - - if max_attempts == 1: - # re-scheduling is disabled. - return - - # retry is enabled, update attempt count: - if retry: - retry['num_attempts'] += 1 - else: - retry = { - 'num_attempts': 1, - 'backends': [], # list of volume service backends tried - 'hosts': [] # TODO(geguileo): Remove in P and leave backends - } - filter_properties['retry'] = retry - - volume_id = properties.get('volume_id') - self._log_volume_error(volume_id, retry) - - if retry['num_attempts'] > max_attempts: - raise exception.NoValidBackend( - reason=_("Exceeded max scheduling attempts %(max_attempts)d " - "for volume %(volume_id)s") % - {'max_attempts': max_attempts, - 'volume_id': volume_id}) - - def _get_weighted_candidates(self, context, request_spec, - filter_properties=None): - """Return a list of backends that meet required specs. - - Returned list is ordered by their fitness. - """ - elevated = context.elevated() - - # Since Cinder is using mixed filters from Oslo and it's own, which - # takes 'resource_XX' and 'volume_XX' as input respectively, copying - # 'volume_XX' to 'resource_XX' will make both filters happy. - volume_type = request_spec.get("volume_type") - resource_type = volume_type if volume_type is not None else {} - - config_options = self._get_configuration_options() - - if filter_properties is None: - filter_properties = {} - self._populate_retry(filter_properties, - request_spec['volume_properties']) - - request_spec_dict = jsonutils.to_primitive(request_spec) - - filter_properties.update({'context': context, - 'request_spec': request_spec_dict, - 'config_options': config_options, - 'volume_type': volume_type, - 'resource_type': resource_type}) - - self.populate_filter_properties(request_spec, - filter_properties) - - # If multiattach is enabled on a volume, we need to add - # multiattach to extra specs, so that the capability - # filtering is enabled. - multiattach = request_spec['volume_properties'].get('multiattach', - False) - if multiattach and 'multiattach' not in resource_type.get( - 'extra_specs', {}): - if 'extra_specs' not in resource_type: - resource_type['extra_specs'] = {} - - resource_type['extra_specs'].update( - multiattach=' True') - - # Find our local list of acceptable backends by filtering and - # weighing our options. we virtually consume resources on - # it so subsequent selections can adjust accordingly. - - # Note: remember, we are using an iterator here. So only - # traverse this list once. - backends = self.host_manager.get_all_backend_states(elevated) - - # Filter local hosts based on requirements ... - backends = self.host_manager.get_filtered_backends(backends, - filter_properties) - if not backends: - return [] - - LOG.debug("Filtered %s", backends) - # weighted_backends = WeightedHost() ... the best - # backend for the job. - weighed_backends = self.host_manager.get_weighed_backends( - backends, filter_properties) - return weighed_backends - - def _get_weighted_candidates_generic_group( - self, context, group_spec, request_spec_list, - group_filter_properties=None, - filter_properties_list=None): - """Finds backends that supports the group. - - Returns a list of backends that meet the required specs, - ordered by their fitness. - """ - elevated = context.elevated() - - backends_by_group_type = self._get_weighted_candidates_by_group_type( - context, group_spec, group_filter_properties) - - weighed_backends = [] - backends_by_vol_type = [] - index = 0 - for request_spec in request_spec_list: - volume_properties = request_spec['volume_properties'] - # Since Cinder is using mixed filters from Oslo and it's own, which - # takes 'resource_XX' and 'volume_XX' as input respectively, - # copying 'volume_XX' to 'resource_XX' will make both filters - # happy. - resource_properties = volume_properties.copy() - volume_type = request_spec.get("volume_type", None) - resource_type = request_spec.get("volume_type", None) - request_spec.update({'resource_properties': resource_properties}) - - config_options = self._get_configuration_options() - - filter_properties = {} - if filter_properties_list: - filter_properties = filter_properties_list[index] - if filter_properties is None: - filter_properties = {} - self._populate_retry(filter_properties, resource_properties) - - # Add group_support in extra_specs if it is not there. - # Make sure it is populated in filter_properties - # if 'group_support' not in resource_type.get( - # 'extra_specs', {}): - # resource_type['extra_specs'].update( - # group_support=' True') - - filter_properties.update({'context': context, - 'request_spec': request_spec, - 'config_options': config_options, - 'volume_type': volume_type, - 'resource_type': resource_type}) - - self.populate_filter_properties(request_spec, - filter_properties) - - # Find our local list of acceptable backends by filtering and - # weighing our options. we virtually consume resources on - # it so subsequent selections can adjust accordingly. - - # Note: remember, we are using an iterator here. So only - # traverse this list once. - all_backends = self.host_manager.get_all_backend_states(elevated) - if not all_backends: - return [] - - # Filter local backends based on requirements ... - backends = self.host_manager.get_filtered_backends( - all_backends, filter_properties) - - if not backends: - return [] - - LOG.debug("Filtered %s", backends) - - # weighted_backend = WeightedHost() ... the best - # backend for the job. - temp_weighed_backends = self.host_manager.get_weighed_backends( - backends, - filter_properties) - if not temp_weighed_backends: - return [] - if index == 0: - backends_by_vol_type = temp_weighed_backends - else: - backends_by_vol_type = self._find_valid_backends( - backends_by_vol_type, temp_weighed_backends) - if not backends_by_vol_type: - return [] - - index += 1 - - # Find backends selected by both the group type and volume types. - weighed_backends = self._find_valid_backends(backends_by_vol_type, - backends_by_group_type) - - return weighed_backends - - def _find_valid_backends(self, backend_list1, backend_list2): - new_backends = [] - for backend1 in backend_list1: - for backend2 in backend_list2: - # Should schedule creation of group on backend level, - # not pool level. - if (utils.extract_host(backend1.obj.backend_id) == - utils.extract_host(backend2.obj.backend_id)): - new_backends.append(backend1) - if not new_backends: - return [] - return new_backends - - def _get_weighted_candidates_by_group_type( - self, context, group_spec, - group_filter_properties=None): - """Finds backends that supports the group type. - - Returns a list of backends that meet the required specs, - ordered by their fitness. - """ - elevated = context.elevated() - - weighed_backends = [] - volume_properties = group_spec['volume_properties'] - # Since Cinder is using mixed filters from Oslo and it's own, which - # takes 'resource_XX' and 'volume_XX' as input respectively, - # copying 'volume_XX' to 'resource_XX' will make both filters - # happy. - resource_properties = volume_properties.copy() - group_type = group_spec.get("group_type", None) - resource_type = group_spec.get("group_type", None) - group_spec.update({'resource_properties': resource_properties}) - - config_options = self._get_configuration_options() - - if group_filter_properties is None: - group_filter_properties = {} - self._populate_retry(group_filter_properties, resource_properties) - - group_filter_properties.update({'context': context, - 'request_spec': group_spec, - 'config_options': config_options, - 'group_type': group_type, - 'resource_type': resource_type}) - - self.populate_filter_properties(group_spec, - group_filter_properties) - - # Find our local list of acceptable backends by filtering and - # weighing our options. we virtually consume resources on - # it so subsequent selections can adjust accordingly. - - # Note: remember, we are using an iterator here. So only - # traverse this list once. - all_backends = self.host_manager.get_all_backend_states(elevated) - if not all_backends: - return [] - - # Filter local backends based on requirements ... - backends = self.host_manager.get_filtered_backends( - all_backends, group_filter_properties) - - if not backends: - return [] - - LOG.debug("Filtered %s", backends) - - # weighted_backends = WeightedHost() ... the best backend for the job. - weighed_backends = self.host_manager.get_weighed_backends( - backends, - group_filter_properties) - if not weighed_backends: - return [] - - return weighed_backends - - def _schedule(self, context, request_spec, filter_properties=None): - weighed_backends = self._get_weighted_candidates(context, request_spec, - filter_properties) - # When we get the weighed_backends, we clear those backends that don't - # match the group's backend. - group_backend = request_spec.get('group_backend') - if weighed_backends and group_backend: - # Get host name including host@backend#pool info from - # weighed_backends. - for backend in weighed_backends[::-1]: - backend_id = utils.extract_host(backend.obj.backend_id) - if backend_id != group_backend: - weighed_backends.remove(backend) - if not weighed_backends: - LOG.warning('No weighed backend found for volume ' - 'with properties: %s', - filter_properties['request_spec'].get('volume_type')) - return None - return self._choose_top_backend(weighed_backends, request_spec) - - def _schedule_generic_group(self, context, group_spec, request_spec_list, - group_filter_properties=None, - filter_properties_list=None): - weighed_backends = self._get_weighted_candidates_generic_group( - context, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list) - if not weighed_backends: - return None - return self._choose_top_backend_generic_group(weighed_backends) - - def _choose_top_backend(self, weighed_backends, request_spec): - top_backend = weighed_backends[0] - backend_state = top_backend.obj - LOG.debug("Choosing %s", backend_state.backend_id) - volume_properties = request_spec['volume_properties'] - backend_state.consume_from_volume(volume_properties) - return top_backend - - def _choose_top_backend_generic_group(self, weighed_backends): - top_backend = weighed_backends[0] - backend_state = top_backend.obj - LOG.debug("Choosing %s", backend_state.backend_id) - return top_backend diff --git a/cinder/scheduler/filters/__init__.py b/cinder/scheduler/filters/__init__.py deleted file mode 100644 index a4228f8b6..000000000 --- a/cinder/scheduler/filters/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scheduler host filters -""" - -from cinder.scheduler import base_filter - - -class BaseBackendFilter(base_filter.BaseFilter): - """Base class for host filters.""" - def _filter_one(self, obj, filter_properties): - """Return True if the object passes the filter, otherwise False.""" - # For backward compatibility with out of tree filters - passes_method = getattr(self, 'host_passes', self.backend_passes) - return passes_method(obj, filter_properties) - - def backend_passes(self, host_state, filter_properties): - """Return True if the HostState passes the filter, otherwise False. - - Override this in a subclass. - """ - raise NotImplementedError() - - -class BackendFilterHandler(base_filter.BaseFilterHandler): - def __init__(self, namespace): - super(BackendFilterHandler, self).__init__(BaseHostFilter, namespace) - - -# NOTE(geguileo): For backward compatibility with external filters that -# inherit from these classes -BaseHostFilter = BaseBackendFilter -HostFilterHandler = BackendFilterHandler diff --git a/cinder/scheduler/filters/affinity_filter.py b/cinder/scheduler/filters/affinity_filter.py deleted file mode 100644 index bcc56ed74..000000000 --- a/cinder/scheduler/filters/affinity_filter.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2014, eBay Inc. -# Copyright 2014, OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -from cinder.scheduler import filters -from cinder.volume import api as volume - - -class AffinityFilter(filters.BaseBackendFilter): - def __init__(self): - self.volume_api = volume.API() - - def _get_volumes(self, context, affinity_uuids, backend_state): - filters = {'id': affinity_uuids, 'deleted': False} - if backend_state.cluster_name: - filters['cluster_name'] = backend_state.cluster_name - else: - filters['host'] = backend_state.host - return self.volume_api.get_all(context, filters=filters) - - -class DifferentBackendFilter(AffinityFilter): - """Schedule volume on a different back-end from a set of volumes.""" - - def backend_passes(self, backend_state, filter_properties): - context = filter_properties['context'] - scheduler_hints = filter_properties.get('scheduler_hints') or {} - - affinity_uuids = scheduler_hints.get('different_host', []) - - # scheduler hint verification: affinity_uuids can be a list of uuids - # or single uuid. The checks here is to make sure every single string - # in the list looks like a uuid, otherwise, this filter will fail to - # pass. Note that the filter does *NOT* ignore string doesn't look - # like a uuid, it is better to fail the request than serving it wrong. - if isinstance(affinity_uuids, list): - for uuid in affinity_uuids: - if uuidutils.is_uuid_like(uuid): - continue - else: - return False - elif uuidutils.is_uuid_like(affinity_uuids): - affinity_uuids = [affinity_uuids] - else: - # Not a list, not a string looks like uuid, don't pass it - # to DB for query to avoid potential risk. - return False - - if affinity_uuids: - return not self._get_volumes(context, affinity_uuids, - backend_state) - # With no different_host key - return True - - -class SameBackendFilter(AffinityFilter): - """Schedule volume on the same back-end as another volume.""" - - def backend_passes(self, backend_state, filter_properties): - context = filter_properties['context'] - scheduler_hints = filter_properties.get('scheduler_hints') or {} - - affinity_uuids = scheduler_hints.get('same_host', []) - - # scheduler hint verification: affinity_uuids can be a list of uuids - # or single uuid. The checks here is to make sure every single string - # in the list looks like a uuid, otherwise, this filter will fail to - # pass. Note that the filter does *NOT* ignore string doesn't look - # like a uuid, it is better to fail the request than serving it wrong. - if isinstance(affinity_uuids, list): - for uuid in affinity_uuids: - if uuidutils.is_uuid_like(uuid): - continue - else: - return False - elif uuidutils.is_uuid_like(affinity_uuids): - affinity_uuids = [affinity_uuids] - else: - # Not a list, not a string looks like uuid, don't pass it - # to DB for query to avoid potential risk. - return False - - if affinity_uuids: - return self._get_volumes(context, affinity_uuids, backend_state) - - # With no same_host key - return True diff --git a/cinder/scheduler/filters/availability_zone_filter.py b/cinder/scheduler/filters/availability_zone_filter.py deleted file mode 100644 index 57b0a5495..000000000 --- a/cinder/scheduler/filters/availability_zone_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2011-2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.scheduler import filters - - -class AvailabilityZoneFilter(filters.BaseBackendFilter): - """Filters Backends by availability zone.""" - - # Availability zones do not change within a request - run_filter_once_per_request = True - - def backend_passes(self, backend_state, filter_properties): - spec = filter_properties.get('request_spec', {}) - props = spec.get('resource_properties', {}) - availability_zone = props.get('availability_zone') - - if availability_zone: - return (availability_zone == - backend_state.service['availability_zone']) - return True diff --git a/cinder/scheduler/filters/capabilities_filter.py b/cinder/scheduler/filters/capabilities_filter.py deleted file mode 100644 index d98caa498..000000000 --- a/cinder/scheduler/filters/capabilities_filter.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from cinder.scheduler import filters -from cinder.scheduler.filters import extra_specs_ops - -LOG = logging.getLogger(__name__) - - -class CapabilitiesFilter(filters.BaseBackendFilter): - """BackendFilter to work with resource (instance & volume) type records.""" - - def _satisfies_extra_specs(self, capabilities, resource_type): - """Check if capabilities satisfy resource type requirements. - - Check that the capabilities provided by the services satisfy - the extra specs associated with the resource type. - """ - - if not resource_type: - return True - - extra_specs = resource_type.get('extra_specs', []) - if not extra_specs: - return True - - for key, req in extra_specs.items(): - - # Either not scoped format, or in capabilities scope - scope = key.split(':') - - # Ignore scoped (such as vendor-specific) capabilities - if len(scope) > 1 and scope[0] != "capabilities": - continue - # Strip off prefix if spec started with 'capabilities:' - elif scope[0] == "capabilities": - del scope[0] - - cap = capabilities - for index in range(len(scope)): - try: - cap = cap[scope[index]] - except (TypeError, KeyError): - LOG.debug("Backend doesn't provide capability '%(cap)s' ", - {'cap': scope[index]}) - return False - - # Make all capability values a list so we can handle lists - cap_list = [cap] if not isinstance(cap, list) else cap - - # Loop through capability values looking for any match - for cap_value in cap_list: - if extra_specs_ops.match(cap_value, req): - break - else: - # Nothing matched, so bail out - LOG.debug('Volume type extra spec requirement ' - '"%(key)s=%(req)s" does not match reported ' - 'capability "%(cap)s"', - {'key': key, 'req': req, 'cap': cap}) - return False - return True - - def backend_passes(self, backend_state, filter_properties): - """Return a list of backends that can create resource_type.""" - # Note(zhiteng) Currently only Cinder and Nova are using - # this filter, so the resource type is either instance or - # volume. - resource_type = filter_properties.get('resource_type') - if not self._satisfies_extra_specs(backend_state.capabilities, - resource_type): - LOG.debug("%(backend_state)s fails resource_type extra_specs " - "requirements", {'backend_state': backend_state}) - return False - return True diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py deleted file mode 100644 index 9789ea84a..000000000 --- a/cinder/scheduler/filters/capacity_filter.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2012 Intel -# Copyright (c) 2012 OpenStack Foundation -# Copyright (c) 2015 EMC Corporation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import math - -from oslo_log import log as logging - -from cinder.scheduler import filters - - -LOG = logging.getLogger(__name__) - - -class CapacityFilter(filters.BaseBackendFilter): - """Capacity filters based on volume backend's capacity utilization.""" - - def backend_passes(self, backend_state, filter_properties): - """Return True if host has sufficient capacity.""" - - volid = None - # If the volume already exists on this host, don't fail it for - # insufficient capacity (e.g., if we are retyping) - if backend_state.backend_id == filter_properties.get('vol_exists_on'): - return True - - spec = filter_properties.get('request_spec') - if spec: - volid = spec.get('volume_id') - - grouping = 'cluster' if backend_state.cluster_name else 'host' - if filter_properties.get('new_size'): - # If new_size is passed, we are allocating space to extend a volume - requested_size = (int(filter_properties.get('new_size')) - - int(filter_properties.get('size'))) - LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend ' - 'the volume %(id)s in %(size)s GB', - {'grouping': grouping, - 'grouping_name': backend_state.backend_id, 'id': volid, - 'size': requested_size}) - else: - requested_size = filter_properties.get('size') - LOG.debug('Checking if %(grouping)s %(grouping_name)s can create ' - 'a %(size)s GB volume (%(id)s)', - {'grouping': grouping, - 'grouping_name': backend_state.backend_id, 'id': volid, - 'size': requested_size}) - - # requested_size is 0 means that it's a manage request. - if requested_size == 0: - return True - - if backend_state.free_capacity_gb is None: - # Fail Safe - LOG.error("Free capacity not set: " - "volume node info collection broken.") - return False - - free_space = backend_state.free_capacity_gb - total_space = backend_state.total_capacity_gb - reserved = float(backend_state.reserved_percentage) / 100 - if free_space in ['infinite', 'unknown']: - # NOTE(zhiteng) for those back-ends cannot report actual - # available capacity, we assume it is able to serve the - # request. Even if it was not, the retry mechanism is - # able to handle the failure by rescheduling - return True - elif total_space in ['infinite', 'unknown']: - # If total_space is 'infinite' or 'unknown' and reserved - # is 0, we assume the back-ends can serve the request. - # If total_space is 'infinite' or 'unknown' and reserved - # is not 0, we cannot calculate the reserved space. - # float(total_space) will throw an exception. total*reserved - # also won't work. So the back-ends cannot serve the request. - if reserved == 0: - return True - LOG.debug("Cannot calculate GB of reserved space (%s%%) with " - "backend's reported total capacity '%s'", - backend_state.reserved_percentage, total_space) - return False - total = float(total_space) - if total <= 0: - LOG.warning("Insufficient free space for volume creation. " - "Total capacity is %(total).2f on %(grouping)s " - "%(grouping_name)s.", - {"total": total, - "grouping": grouping, - "grouping_name": backend_state.backend_id}) - return False - - # Calculate how much free space is left after taking into account - # the reserved space. - free = free_space - math.floor(total * reserved) - - # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, - # we will not use max_over_subscription_ratio and - # provisioned_capacity_gb to determine whether a volume can be - # provisioned. Instead free capacity will be used to evaluate. - thin = True - vol_type = filter_properties.get('volume_type', {}) or {} - provision_type = vol_type.get('extra_specs', {}).get( - 'provisioning:type') - if provision_type == 'thick': - thin = False - - # Only evaluate using max_over_subscription_ratio if - # thin_provisioning_support is True. Check if the ratio of - # provisioned capacity over total capacity has exceeded over - # subscription ratio. - if (thin and backend_state.thin_provisioning_support and - backend_state.max_over_subscription_ratio >= 1): - provisioned_ratio = ((backend_state.provisioned_capacity_gb + - requested_size) / total) - if provisioned_ratio > backend_state.max_over_subscription_ratio: - msg_args = { - "provisioned_ratio": provisioned_ratio, - "oversub_ratio": backend_state.max_over_subscription_ratio, - "grouping": grouping, - "grouping_name": backend_state.backend_id, - } - LOG.warning( - "Insufficient free space for thin provisioning. " - "The ratio of provisioned capacity over total capacity " - "%(provisioned_ratio).2f has exceeded the maximum over " - "subscription ratio %(oversub_ratio).2f on %(grouping)s " - "%(grouping_name)s.", msg_args) - return False - else: - # Thin provisioning is enabled and projected over-subscription - # ratio does not exceed max_over_subscription_ratio. The host - # passes if "adjusted" free virtual capacity is enough to - # accommodate the volume. Adjusted free virtual capacity is - # the currently available free capacity (taking into account - # of reserved space) which we can over-subscribe. - adjusted_free_virtual = ( - free * backend_state.max_over_subscription_ratio) - res = adjusted_free_virtual >= requested_size - if not res: - msg_args = {"available": adjusted_free_virtual, - "size": requested_size, - "grouping": grouping, - "grouping_name": backend_state.backend_id} - LOG.warning("Insufficient free virtual space " - "(%(available)sGB) to accomodate thin " - "provisioned %(size)sGB volume on %(grouping)s" - " %(grouping_name)s.", msg_args) - return res - elif thin and backend_state.thin_provisioning_support: - LOG.warning("Filtering out %(grouping)s %(grouping_name)s " - "with an invalid maximum over subscription ratio " - "of %(oversub_ratio).2f. The ratio should be a " - "minimum of 1.0.", - {"oversub_ratio": - backend_state.max_over_subscription_ratio, - "grouping": grouping, - "grouping_name": backend_state.backend_id}) - return False - - msg_args = {"grouping_name": backend_state.backend_id, - "grouping": grouping, - "requested": requested_size, - "available": free} - - if free < requested_size: - LOG.warning("Insufficient free space for volume creation " - "on %(grouping)s %(grouping_name)s (requested / " - "avail): %(requested)s/%(available)s", - msg_args) - return False - - LOG.debug("Space information for volume creation " - "on %(grouping)s %(grouping_name)s (requested / avail): " - "%(requested)s/%(available)s", msg_args) - - return True diff --git a/cinder/scheduler/filters/driver_filter.py b/cinder/scheduler/filters/driver_filter.py deleted file mode 100644 index aa0338b5c..000000000 --- a/cinder/scheduler/filters/driver_filter.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six - -from cinder.scheduler.evaluator import evaluator -from cinder.scheduler import filters - - -LOG = logging.getLogger(__name__) - - -class DriverFilter(filters.BaseBackendFilter): - """DriverFilter filters backend based on a 'filter function' and metrics. - - DriverFilter filters based on volume backend's provided 'filter function' - and metrics. - """ - - def backend_passes(self, backend_state, filter_properties): - """Determines if a backend has a passing filter_function or not.""" - stats = self._generate_stats(backend_state, filter_properties) - - LOG.debug("Checking backend '%s'", - stats['backend_stats']['backend_id']) - result = self._check_filter_function(stats) - LOG.debug("Result: %s", result) - LOG.debug("Done checking backend '%s'", - stats['backend_stats']['backend_id']) - - return result - - def _check_filter_function(self, stats): - """Checks if a volume passes a backend's filter function. - - Returns a tuple in the format (filter_passing, filter_invalid). - Both values are booleans. - """ - if stats['filter_function'] is None: - LOG.debug("Filter function not set :: passing backend") - return True - - try: - filter_result = self._run_evaluator(stats['filter_function'], - stats) - except Exception as ex: - # Warn the admin for now that there is an error in the - # filter function. - LOG.warning("Error in filtering function " - "'%(function)s' : '%(error)s' :: failing backend", - {'function': stats['filter_function'], - 'error': ex, }) - return False - - return filter_result - - def _run_evaluator(self, func, stats): - """Evaluates a given function using the provided available stats.""" - backend_stats = stats['backend_stats'] - backend_caps = stats['backend_caps'] - extra_specs = stats['extra_specs'] - qos_specs = stats['qos_specs'] - volume_stats = stats['volume_stats'] - - result = evaluator.evaluate( - func, - extra=extra_specs, - stats=backend_stats, - capabilities=backend_caps, - volume=volume_stats, - qos=qos_specs) - - return result - - def _generate_stats(self, backend_state, filter_properties): - """Generates statistics from backend and volume data.""" - - backend_stats = { - 'host': backend_state.host, - 'cluster_name': backend_state.cluster_name, - 'backend_id': backend_state.backend_id, - 'volume_backend_name': backend_state.volume_backend_name, - 'vendor_name': backend_state.vendor_name, - 'driver_version': backend_state.driver_version, - 'storage_protocol': backend_state.storage_protocol, - 'QoS_support': backend_state.QoS_support, - 'total_capacity_gb': backend_state.total_capacity_gb, - 'allocated_capacity_gb': backend_state.allocated_capacity_gb, - 'free_capacity_gb': backend_state.free_capacity_gb, - 'reserved_percentage': backend_state.reserved_percentage, - 'updated': backend_state.updated, - } - - backend_caps = backend_state.capabilities - - filter_function = None - - if ('filter_function' in backend_caps and - backend_caps['filter_function'] is not None): - filter_function = six.text_type(backend_caps['filter_function']) - - qos_specs = filter_properties.get('qos_specs', {}) - - volume_type = filter_properties.get('volume_type', {}) - extra_specs = volume_type.get('extra_specs', {}) - - request_spec = filter_properties.get('request_spec', {}) - volume_stats = request_spec.get('volume_properties', {}) - - stats = { - 'backend_stats': backend_stats, - 'backend_caps': backend_caps, - 'extra_specs': extra_specs, - 'qos_specs': qos_specs, - 'volume_stats': volume_stats, - 'volume_type': volume_type, - 'filter_function': filter_function, - } - - return stats diff --git a/cinder/scheduler/filters/extra_specs_ops.py b/cinder/scheduler/filters/extra_specs_ops.py deleted file mode 100644 index 24b48a317..000000000 --- a/cinder/scheduler/filters/extra_specs_ops.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import operator - -from oslo_utils import strutils - -# 1. The following operations are supported: -# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= -# 2. Note that is handled in a different way below. -# 3. If the first word in the extra_specs is not one of the operators, -# it is ignored. -_op_methods = {'=': lambda x, y: float(x) >= float(y), - '': lambda x, y: y in x, - '': lambda x, y: (strutils.bool_from_string(x) is - strutils.bool_from_string(y)), - '==': lambda x, y: float(x) == float(y), - '!=': lambda x, y: float(x) != float(y), - '>=': lambda x, y: float(x) >= float(y), - '<=': lambda x, y: float(x) <= float(y), - 's==': operator.eq, - 's!=': operator.ne, - 's<': operator.lt, - 's<=': operator.le, - 's>': operator.gt, - 's>=': operator.ge} - - -def match(value, req): - if req is None: - if value is None: - return True - else: - return False - words = req.split() - - op = method = None - if words: - op = words.pop(0) - method = _op_methods.get(op) - - if op != '' and not method: - return value == req - - if value is None: - return False - - if op == '': # Ex: v1 v2 v3 - while True: - if words.pop(0) == value: - return True - if not words: - break - op = words.pop(0) # remove a keyword - if not words: - break - return False - - try: - if words and method(value, words[0]): - return True - except ValueError: - pass - - return False diff --git a/cinder/scheduler/filters/ignore_attempted_hosts_filter.py b/cinder/scheduler/filters/ignore_attempted_hosts_filter.py deleted file mode 100644 index eea84906a..000000000 --- a/cinder/scheduler/filters/ignore_attempted_hosts_filter.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from cinder.scheduler import filters - -LOG = logging.getLogger(__name__) - - -class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter): - """Filter out previously attempted hosts - - A host passes this filter if it has not already been attempted for - scheduling. The scheduler needs to add previously attempted hosts - to the 'retry' key of filter_properties in order for this to work - correctly. For example:: - - { - 'retry': { - 'backends': ['backend1', 'backend2'], - 'num_attempts': 3, - } - } - """ - - def backend_passes(self, backend_state, filter_properties): - """Skip nodes that have already been attempted.""" - attempted = filter_properties.get('retry') - if not attempted: - # Re-scheduling is disabled - LOG.debug("Re-scheduling is disabled.") - return True - - # TODO(geguileo): In P - Just use backends - backends = attempted.get('backends', attempted.get('hosts', [])) - backend = backend_state.backend_id - - passes = backend not in backends - pass_msg = "passes" if passes else "fails" - - LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried " - "backends: %(backends)s", {'backend': backend, - 'pass_msg': pass_msg, - 'backends': backends}) - return passes diff --git a/cinder/scheduler/filters/instance_locality_filter.py b/cinder/scheduler/filters/instance_locality_filter.py deleted file mode 100644 index 83ecafcec..000000000 --- a/cinder/scheduler/filters/instance_locality_filter.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014, Adrien Vergé -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from oslo_utils import uuidutils - -from cinder.compute import nova -from cinder import exception -from cinder.i18n import _ -from cinder.scheduler import filters -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - -HINT_KEYWORD = 'local_to_instance' -INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host' -REQUESTS_TIMEOUT = 5 - - -class InstanceLocalityFilter(filters.BaseBackendFilter): - """Schedule volume on the same host as a given instance. - - This filter enables selection of a storage back-end located on the host - where the instance's hypervisor is running. This provides data locality: - the instance and the volume are located on the same physical machine. - - In order to work: - - - The Extended Server Attributes extension needs to be active in Nova (this - is by default), so that the 'OS-EXT-SRV-ATTR:host' property is returned - when requesting instance info. - - Either an account with privileged rights for Nova must be configured in - Cinder configuration (configure a keystone authentication plugin in the - [nova] section), or the user making the call needs to have sufficient - rights (see 'extended_server_attributes' in Nova policy). - - """ - - def __init__(self): - # Cache Nova API answers directly into the Filter object. - # Since a BaseBackendFilter instance lives only during the volume's - # scheduling, the cache is re-created for every new volume creation. - self._cache = {} - super(InstanceLocalityFilter, self).__init__() - - def _nova_has_extended_server_attributes(self, context): - """Check Extended Server Attributes presence - - Find out whether the Extended Server Attributes extension is activated - in Nova or not. Cache the result to query Nova only once. - """ - - if not hasattr(self, '_nova_ext_srv_attr'): - self._nova_ext_srv_attr = nova.API().has_extension( - context, 'ExtendedServerAttributes', timeout=REQUESTS_TIMEOUT) - - return self._nova_ext_srv_attr - - def backend_passes(self, backend_state, filter_properties): - context = filter_properties['context'] - backend = volume_utils.extract_host(backend_state.backend_id, 'host') - - scheduler_hints = filter_properties.get('scheduler_hints') or {} - instance_uuid = scheduler_hints.get(HINT_KEYWORD, None) - - # Without 'local_to_instance' hint - if not instance_uuid: - return True - - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - # TODO(adrienverge): Currently it is not recommended to allow instance - # migrations for hypervisors where this hint will be used. In case of - # instance migration, a previously locally-created volume will not be - # automatically migrated. Also in case of instance migration during the - # volume's scheduling, the result is unpredictable. A future - # enhancement would be to subscribe to Nova migration events (e.g. via - # Ceilometer). - - # First, lookup for already-known information in local cache - if instance_uuid in self._cache: - return self._cache[instance_uuid] == backend - - if not self._nova_has_extended_server_attributes(context): - LOG.warning('Hint "%s" dropped because ' - 'ExtendedServerAttributes not active in Nova.', - HINT_KEYWORD) - raise exception.CinderException(_('Hint "%s" not supported.') % - HINT_KEYWORD) - - server = nova.API().get_server(context, instance_uuid, - privileged_user=True, - timeout=REQUESTS_TIMEOUT) - - if not hasattr(server, INSTANCE_HOST_PROP): - LOG.warning('Hint "%s" dropped because Nova did not return ' - 'enough information. Either Nova policy needs to ' - 'be changed or a privileged account for Nova ' - 'should be specified in conf.', HINT_KEYWORD) - raise exception.CinderException(_('Hint "%s" not supported.') % - HINT_KEYWORD) - - self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP) - - # Match if given instance is hosted on backend - return self._cache[instance_uuid] == backend diff --git a/cinder/scheduler/filters/json_filter.py b/cinder/scheduler/filters/json_filter.py deleted file mode 100644 index ee763a9d1..000000000 --- a/cinder/scheduler/filters/json_filter.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import operator - -from oslo_serialization import jsonutils -import six - -from cinder.scheduler import filters - - -class JsonFilter(filters.BaseBackendFilter): - """Backend filter for simple JSON-based grammar for selecting backends.""" - def _op_compare(self, args, op): - """Compare first item of args with the rest using specified operator. - - Returns True if the specified operator can successfully - compare the first item in the args with all the rest. Will - return False if only one item is in the list. - """ - if len(args) < 2: - return False - if op is operator.contains: - bad = args[0] not in args[1:] - else: - bad = [arg for arg in args[1:] - if not op(args[0], arg)] - return not bool(bad) - - def _equals(self, args): - """First term is == all the other terms.""" - return self._op_compare(args, operator.eq) - - def _less_than(self, args): - """First term is < all the other terms.""" - return self._op_compare(args, operator.lt) - - def _greater_than(self, args): - """First term is > all the other terms.""" - return self._op_compare(args, operator.gt) - - def _in(self, args): - """First term is in set of remaining terms.""" - return self._op_compare(args, operator.contains) - - def _less_than_equal(self, args): - """First term is <= all the other terms.""" - return self._op_compare(args, operator.le) - - def _greater_than_equal(self, args): - """First term is >= all the other terms.""" - return self._op_compare(args, operator.ge) - - def _not(self, args): - """Flip each of the arguments.""" - return [not arg for arg in args] - - def _or(self, args): - """True if any arg is True.""" - return any(args) - - def _and(self, args): - """True if all args are True.""" - return all(args) - - commands = { - '=': _equals, - '<': _less_than, - '>': _greater_than, - 'in': _in, - '<=': _less_than_equal, - '>=': _greater_than_equal, - 'not': _not, - 'or': _or, - 'and': _and, - } - - def _parse_string(self, string, backend_state): - """Parse capability lookup strings. - - Strings prefixed with $ are capability lookups in the - form '$variable' where 'variable' is an attribute in the - BackendState class. If $variable is a dictionary, you may - use: $variable.dictkey - """ - if not string: - return None - if not string.startswith("$"): - return string - - path = string[1:].split(".") - obj = getattr(backend_state, path[0], None) - if obj is None: - return None - for item in path[1:]: - obj = obj.get(item) - if obj is None: - return None - return obj - - def _process_filter(self, query, backend_state): - """Recursively parse the query structure.""" - if not query: - return True - cmd = query[0] - method = self.commands[cmd] - cooked_args = [] - for arg in query[1:]: - if isinstance(arg, list): - arg = self._process_filter(arg, backend_state) - elif isinstance(arg, six.string_types): - arg = self._parse_string(arg, backend_state) - if arg is not None: - cooked_args.append(arg) - result = method(self, cooked_args) - return result - - def backend_passes(self, backend_state, filter_properties): - """Return a list of backends that can fulfill query requirements.""" - # TODO(zhiteng) Add description for filter_properties structure - # and scheduler_hints. - try: - query = filter_properties['scheduler_hints']['query'] - except KeyError: - query = None - if not query: - return True - - # NOTE(comstud): Not checking capabilities or service for - # enabled/disabled so that a provided json filter can decide - - result = self._process_filter(jsonutils.loads(query), backend_state) - if isinstance(result, list): - # If any succeeded, include the backend - result = any(result) - if result: - # Filter it out. - return True - return False diff --git a/cinder/scheduler/flows/__init__.py b/cinder/scheduler/flows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/scheduler/flows/create_volume.py b/cinder/scheduler/flows/create_volume.py deleted file mode 100644 index 4074c369e..000000000 --- a/cinder/scheduler/flows/create_volume.py +++ /dev/null @@ -1,175 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import excutils -import taskflow.engines -from taskflow.patterns import linear_flow - -from cinder import exception -from cinder import flow_utils -from cinder.message import api as message_api -from cinder.message import message_field -from cinder import rpc -from cinder import utils -from cinder.volume.flows import common - -LOG = logging.getLogger(__name__) - -ACTION = 'volume:create' - - -class ExtractSchedulerSpecTask(flow_utils.CinderTask): - """Extracts a spec object from a partial and/or incomplete request spec. - - Reversion strategy: N/A - """ - - default_provides = set(['request_spec']) - - def __init__(self, **kwargs): - super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION], - **kwargs) - - def _populate_request_spec(self, volume, snapshot_id, image_id): - # Create the full request spec using the volume object. - # - # NOTE(dulek): At this point, a volume can be deleted before it gets - # scheduled. If a delete API call is made, the volume gets instantly - # delete and scheduling will fail when it tries to update the DB entry - # (with the host) in ScheduleCreateVolumeTask below. - volume_type_id = volume.volume_type_id - vol_type = volume.volume_type - return { - 'volume_id': volume.id, - 'snapshot_id': snapshot_id, - 'image_id': image_id, - 'volume_properties': { - 'size': utils.as_int(volume.size, quiet=False), - 'availability_zone': volume.availability_zone, - 'volume_type_id': volume_type_id, - }, - 'volume_type': list(dict(vol_type).items()), - } - - def execute(self, context, request_spec, volume, snapshot_id, - image_id): - # For RPC version < 1.2 backward compatibility - if request_spec is None: - request_spec = self._populate_request_spec(volume.id, - snapshot_id, image_id) - return { - 'request_spec': request_spec, - } - - -class ScheduleCreateVolumeTask(flow_utils.CinderTask): - """Activates a scheduler driver and handles any subsequent failures. - - Notification strategy: on failure the scheduler rpc notifier will be - activated and a notification will be emitted indicating what errored, - the reason, and the request (and misc. other data) that caused the error - to be triggered. - - Reversion strategy: N/A - """ - FAILURE_TOPIC = "scheduler.create_volume" - - def __init__(self, driver_api, **kwargs): - super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION], - **kwargs) - self.driver_api = driver_api - self.message_api = message_api.API() - - def _handle_failure(self, context, request_spec, cause): - try: - self._notify_failure(context, request_spec, cause) - finally: - LOG.error("Failed to run task %(name)s: %(cause)s", - {'cause': cause, 'name': self.name}) - - @utils.if_notifications_enabled - def _notify_failure(self, context, request_spec, cause): - """When scheduling fails send out an event that it failed.""" - payload = { - 'request_spec': request_spec, - 'volume_properties': request_spec.get('volume_properties', {}), - 'volume_id': request_spec['volume_id'], - 'state': 'error', - 'method': 'create_volume', - 'reason': cause, - } - try: - rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC, - payload) - except exception.CinderException: - LOG.exception("Failed notifying on %(topic)s " - "payload %(payload)s", - {'topic': self.FAILURE_TOPIC, 'payload': payload}) - - def execute(self, context, request_spec, filter_properties, volume): - try: - self.driver_api.schedule_create_volume(context, request_spec, - filter_properties) - except Exception as e: - self.message_api.create( - context, - message_field.Action.SCHEDULE_ALLOCATE_VOLUME, - resource_uuid=request_spec['volume_id'], - exception=e) - # An error happened, notify on the scheduler queue and log that - # this happened and set the volume to errored out and reraise the - # error *if* exception caught isn't NoValidBackend. Otherwise *do - # not* reraise (since what's the point?) - with excutils.save_and_reraise_exception( - reraise=not isinstance(e, exception.NoValidBackend)): - try: - self._handle_failure(context, request_spec, e) - finally: - common.error_out(volume, reason=e) - - -def get_flow(context, driver_api, request_spec=None, - filter_properties=None, - volume=None, snapshot_id=None, image_id=None): - - """Constructs and returns the scheduler entrypoint flow. - - This flow will do the following: - - 1. Inject keys & values for dependent tasks. - 2. Extract a scheduler specification from the provided inputs. - 3. Use provided scheduler driver to select host and pass volume creation - request further. - """ - create_what = { - 'context': context, - 'raw_request_spec': request_spec, - 'filter_properties': filter_properties, - 'volume': volume, - 'snapshot_id': snapshot_id, - 'image_id': image_id, - } - - flow_name = ACTION.replace(":", "_") + "_scheduler" - scheduler_flow = linear_flow.Flow(flow_name) - - # This will extract and clean the spec from the starting values. - scheduler_flow.add(ExtractSchedulerSpecTask( - rebind={'request_spec': 'raw_request_spec'})) - - # This will activate the desired scheduler driver (and handle any - # driver related failures appropriately). - scheduler_flow.add(ScheduleCreateVolumeTask(driver_api)) - - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(scheduler_flow, store=create_what) diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py deleted file mode 100644 index 641691676..000000000 --- a/cinder/scheduler/host_manager.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Manage backends in the current zone. -""" - -import collections - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils - -from cinder.common import constants -from cinder import context as cinder_context -from cinder import exception -from cinder import objects -from cinder.scheduler import filters -from cinder import utils -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types - - -# FIXME: This file should be renamed to backend_manager, we should also rename -# HostManager class, and scheduler_host_manager option, and also the weight -# classes, and add code to maintain backward compatibility. - - -host_manager_opts = [ - cfg.ListOpt('scheduler_default_filters', - default=[ - 'AvailabilityZoneFilter', - 'CapacityFilter', - 'CapabilitiesFilter' - ], - help='Which filter class names to use for filtering hosts ' - 'when not specified in the request.'), - cfg.ListOpt('scheduler_default_weighers', - default=[ - 'CapacityWeigher' - ], - help='Which weigher class names to use for weighing hosts.'), - cfg.StrOpt('scheduler_weight_handler', - default='cinder.scheduler.weights.OrderedHostWeightHandler', - help='Which handler to use for selecting the host/pool ' - 'after weighing'), -] - -CONF = cfg.CONF -CONF.register_opts(host_manager_opts) -CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') -CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver') - -LOG = logging.getLogger(__name__) - - -class ReadOnlyDict(collections.Mapping): - """A read-only dict.""" - def __init__(self, source=None): - if source is not None: - self.data = dict(source) - else: - self.data = {} - - def __getitem__(self, key): - return self.data[key] - - def __iter__(self): - return iter(self.data) - - def __len__(self): - return len(self.data) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.data) - - -class BackendState(object): - """Mutable and immutable information tracked for a volume backend.""" - - def __init__(self, host, cluster_name, capabilities=None, service=None): - self.capabilities = None - self.service = None - self.host = host - self.cluster_name = cluster_name - self.update_capabilities(capabilities, service) - - self.volume_backend_name = None - self.vendor_name = None - self.driver_version = 0 - self.storage_protocol = None - self.QoS_support = False - # Mutable available resources. - # These will change as resources are virtually "consumed". - self.total_capacity_gb = 0 - # capacity has been allocated in cinder POV, which should be - # sum(vol['size'] for vol in vols_on_hosts) - self.allocated_capacity_gb = 0 - self.free_capacity_gb = None - self.reserved_percentage = 0 - # The apparent allocated space indicating how much capacity - # has been provisioned. This could be the sum of sizes of - # all volumes on a backend, which could be greater than or - # equal to the allocated_capacity_gb. - self.provisioned_capacity_gb = 0 - self.max_over_subscription_ratio = 1.0 - self.thin_provisioning_support = False - self.thick_provisioning_support = False - # Does this backend support attaching a volume to more than - # once host/instance? - self.multiattach = False - - # PoolState for all pools - self.pools = {} - - self.updated = None - - @property - def backend_id(self): - return self.cluster_name or self.host - - def update_capabilities(self, capabilities=None, service=None): - # Read-only capability dicts - - if capabilities is None: - capabilities = {} - self.capabilities = ReadOnlyDict(capabilities) - if service is None: - service = {} - self.service = ReadOnlyDict(service) - - def update_from_volume_capability(self, capability, service=None): - """Update information about a host from its volume_node info. - - 'capability' is the status info reported by volume backend, a typical - capability looks like this: - - .. code-block:: python - - { - capability = { - 'volume_backend_name': 'Local iSCSI', # - 'vendor_name': 'OpenStack', # backend level - 'driver_version': '1.0', # mandatory/fixed - 'storage_protocol': 'iSCSI', # stats&capabilities - - 'active_volumes': 10, # - 'IOPS_provisioned': 30000, # optional custom - 'fancy_capability_1': 'eat', # stats & capabilities - 'fancy_capability_2': 'drink', # - - 'pools': [ - {'pool_name': '1st pool', # - 'total_capacity_gb': 500, # mandatory stats for - 'free_capacity_gb': 230, # pools - 'allocated_capacity_gb': 270, # - 'QoS_support': 'False', # - 'reserved_percentage': 0, # - - 'dying_disks': 100, # - 'super_hero_1': 'spider-man', # optional custom - 'super_hero_2': 'flash', # stats & capabilities - 'super_hero_3': 'neoncat' # - }, - {'pool_name': '2nd pool', - 'total_capacity_gb': 1024, - 'free_capacity_gb': 1024, - 'allocated_capacity_gb': 0, - 'QoS_support': 'False', - 'reserved_percentage': 0, - - 'dying_disks': 200, - 'super_hero_1': 'superman', - 'super_hero_2': ' ', - 'super_hero_2': 'Hulk' - } - ] - } - } - - """ - self.update_capabilities(capability, service) - - if capability: - if self.updated and self.updated > capability['timestamp']: - return - - # Update backend level info - self.update_backend(capability) - - # Update pool level info - self.update_pools(capability, service) - - def update_pools(self, capability, service): - """Update storage pools information from backend reported info.""" - if not capability: - return - - pools = capability.get('pools', None) - active_pools = set() - if pools and isinstance(pools, list): - # Update all pools stats according to information from list - # of pools in volume capacity - for pool_cap in pools: - pool_name = pool_cap['pool_name'] - self._append_backend_info(pool_cap) - cur_pool = self.pools.get(pool_name, None) - if not cur_pool: - # Add new pool - cur_pool = PoolState(self.host, self.cluster_name, - pool_cap, pool_name) - self.pools[pool_name] = cur_pool - cur_pool.update_from_volume_capability(pool_cap, service) - - active_pools.add(pool_name) - elif pools is None: - # To handle legacy driver that doesn't report pool - # information in the capability, we have to prepare - # a pool from backend level info, or to update the one - # we created in self.pools. - pool_name = self.volume_backend_name - if pool_name is None: - # To get DEFAULT_POOL_NAME - pool_name = vol_utils.extract_host(self.host, 'pool', True) - - if len(self.pools) == 0: - # No pool was there - single_pool = PoolState(self.host, self.cluster_name, - capability, pool_name) - self._append_backend_info(capability) - self.pools[pool_name] = single_pool - else: - # this is an update from legacy driver - try: - single_pool = self.pools[pool_name] - except KeyError: - single_pool = PoolState(self.host, self.cluster_name, - capability, pool_name) - self._append_backend_info(capability) - self.pools[pool_name] = single_pool - - single_pool.update_from_volume_capability(capability, service) - active_pools.add(pool_name) - - # remove non-active pools from self.pools - nonactive_pools = set(self.pools.keys()) - active_pools - for pool in nonactive_pools: - LOG.debug("Removing non-active pool %(pool)s @ %(host)s " - "from scheduler cache.", {'pool': pool, - 'host': self.host}) - del self.pools[pool] - - def _append_backend_info(self, pool_cap): - # Fill backend level info to pool if needed. - if not pool_cap.get('volume_backend_name', None): - pool_cap['volume_backend_name'] = self.volume_backend_name - - if not pool_cap.get('storage_protocol', None): - pool_cap['storage_protocol'] = self.storage_protocol - - if not pool_cap.get('vendor_name', None): - pool_cap['vendor_name'] = self.vendor_name - - if not pool_cap.get('driver_version', None): - pool_cap['driver_version'] = self.driver_version - - if not pool_cap.get('timestamp', None): - pool_cap['timestamp'] = self.updated - - def update_backend(self, capability): - self.volume_backend_name = capability.get('volume_backend_name', None) - self.vendor_name = capability.get('vendor_name', None) - self.driver_version = capability.get('driver_version', None) - self.storage_protocol = capability.get('storage_protocol', None) - self.updated = capability['timestamp'] - - def consume_from_volume(self, volume): - """Incrementally update host state from a volume.""" - volume_gb = volume['size'] - self.allocated_capacity_gb += volume_gb - self.provisioned_capacity_gb += volume_gb - if self.free_capacity_gb == 'infinite': - # There's virtually infinite space on back-end - pass - elif self.free_capacity_gb == 'unknown': - # Unable to determine the actual free space on back-end - pass - else: - self.free_capacity_gb -= volume_gb - self.updated = timeutils.utcnow() - - def __repr__(self): - # FIXME(zhiteng) backend level free_capacity_gb isn't as - # meaningful as it used to be before pool is introduced, we'd - # come up with better representation of HostState. - grouping = 'cluster' if self.cluster_name else 'host' - grouping_name = self.backend_id - return ("%s '%s': free_capacity_gb: %s, pools: %s" % - (grouping, grouping_name, self.free_capacity_gb, self.pools)) - - -class PoolState(BackendState): - def __init__(self, host, cluster_name, capabilities, pool_name): - new_host = vol_utils.append_host(host, pool_name) - new_cluster = vol_utils.append_host(cluster_name, pool_name) - super(PoolState, self).__init__(new_host, new_cluster, capabilities) - self.pool_name = pool_name - # No pools in pool - self.pools = None - - def update_from_volume_capability(self, capability, service=None): - """Update information about a pool from its volume_node info.""" - self.update_capabilities(capability, service) - if capability: - if self.updated and self.updated > capability['timestamp']: - return - self.update_backend(capability) - - self.total_capacity_gb = capability.get('total_capacity_gb', 0) - self.free_capacity_gb = capability.get('free_capacity_gb', 0) - self.allocated_capacity_gb = capability.get( - 'allocated_capacity_gb', 0) - self.QoS_support = capability.get('QoS_support', False) - self.reserved_percentage = capability.get('reserved_percentage', 0) - # provisioned_capacity_gb is the apparent total capacity of - # all the volumes created on a backend, which is greater than - # or equal to allocated_capacity_gb, which is the apparent - # total capacity of all the volumes created on a backend - # in Cinder. Using allocated_capacity_gb as the default of - # provisioned_capacity_gb if it is not set. - self.provisioned_capacity_gb = capability.get( - 'provisioned_capacity_gb', self.allocated_capacity_gb) - self.max_over_subscription_ratio = capability.get( - 'max_over_subscription_ratio', - CONF.max_over_subscription_ratio) - self.thin_provisioning_support = capability.get( - 'thin_provisioning_support', False) - self.thick_provisioning_support = capability.get( - 'thick_provisioning_support', False) - self.multiattach = capability.get('multiattach', False) - - def update_pools(self, capability): - # Do nothing, since we don't have pools within pool, yet - pass - - -class HostManager(object): - """Base HostManager class.""" - - backend_state_cls = BackendState - - REQUIRED_KEYS = frozenset([ - 'pool_name', - 'total_capacity_gb', - 'free_capacity_gb', - 'allocated_capacity_gb', - 'provisioned_capacity_gb', - 'thin_provisioning_support', - 'thick_provisioning_support', - 'max_over_subscription_ratio', - 'reserved_percentage']) - - def __init__(self): - self.service_states = {} # { : {: {cap k : v}}} - self.backend_state_map = {} - self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.' - 'filters') - self.filter_classes = self.filter_handler.get_all_classes() - self.weight_handler = importutils.import_object( - CONF.scheduler_weight_handler, - 'cinder.scheduler.weights') - self.weight_classes = self.weight_handler.get_all_classes() - - self._no_capabilities_backends = set() # Services without capabilities - self._update_backend_state_map(cinder_context.get_admin_context()) - self.service_states_last_update = {} - - def _choose_backend_filters(self, filter_cls_names): - """Return a list of available filter names. - - This function checks input filter names against a predefined set - of acceptable filters (all loaded filters). If input is None, - it uses CONF.scheduler_default_filters instead. - """ - if filter_cls_names is None: - filter_cls_names = CONF.scheduler_default_filters - if not isinstance(filter_cls_names, (list, tuple)): - filter_cls_names = [filter_cls_names] - good_filters = [] - bad_filters = [] - for filter_name in filter_cls_names: - found_class = False - for cls in self.filter_classes: - if cls.__name__ == filter_name: - found_class = True - good_filters.append(cls) - break - if not found_class: - bad_filters.append(filter_name) - if bad_filters: - raise exception.SchedulerHostFilterNotFound( - filter_name=", ".join(bad_filters)) - return good_filters - - def _choose_backend_weighers(self, weight_cls_names): - """Return a list of available weigher names. - - This function checks input weigher names against a predefined set - of acceptable weighers (all loaded weighers). If input is None, - it uses CONF.scheduler_default_weighers instead. - """ - if weight_cls_names is None: - weight_cls_names = CONF.scheduler_default_weighers - if not isinstance(weight_cls_names, (list, tuple)): - weight_cls_names = [weight_cls_names] - - good_weighers = [] - bad_weighers = [] - for weigher_name in weight_cls_names: - found_class = False - for cls in self.weight_classes: - if cls.__name__ == weigher_name: - good_weighers.append(cls) - found_class = True - break - if not found_class: - bad_weighers.append(weigher_name) - if bad_weighers: - raise exception.SchedulerHostWeigherNotFound( - weigher_name=", ".join(bad_weighers)) - return good_weighers - - def get_filtered_backends(self, backends, filter_properties, - filter_class_names=None): - """Filter backends and return only ones passing all filters.""" - filter_classes = self._choose_backend_filters(filter_class_names) - return self.filter_handler.get_filtered_objects(filter_classes, - backends, - filter_properties) - - def get_weighed_backends(self, backends, weight_properties, - weigher_class_names=None): - """Weigh the backends.""" - weigher_classes = self._choose_backend_weighers(weigher_class_names) - return self.weight_handler.get_weighed_objects(weigher_classes, - backends, - weight_properties) - - def update_service_capabilities(self, service_name, host, capabilities, - cluster_name, timestamp): - """Update the per-service capabilities based on this notification.""" - if service_name != 'volume': - LOG.debug('Ignoring %(service_name)s service update ' - 'from %(host)s', - {'service_name': service_name, 'host': host}) - return - - # TODO(geguileo): In P - Remove the next line since we receive the - # timestamp - timestamp = timestamp or timeutils.utcnow() - # Copy the capabilities, so we don't modify the original dict - capab_copy = dict(capabilities) - capab_copy["timestamp"] = timestamp - - # Set the default capabilities in case None is set. - backend = cluster_name or host - capab_old = self.service_states.get(backend, {"timestamp": 0}) - capab_last_update = self.service_states_last_update.get( - backend, {"timestamp": 0}) - - # Ignore older updates - if capab_old['timestamp'] and timestamp < capab_old['timestamp']: - LOG.info('Ignoring old capability report from %s.', backend) - return - - # If the capabilities are not changed and the timestamp is older, - # record the capabilities. - - # There are cases: capab_old has the capabilities set, - # but the timestamp may be None in it. So does capab_last_update. - - if (not self._get_updated_pools(capab_old, capab_copy)) and ( - (not capab_old.get("timestamp")) or - (not capab_last_update.get("timestamp")) or - (capab_last_update["timestamp"] < capab_old["timestamp"])): - self.service_states_last_update[backend] = capab_old - - self.service_states[backend] = capab_copy - - cluster_msg = (('Cluster: %s - Host: ' % cluster_name) if cluster_name - else '') - LOG.debug("Received %(service_name)s service update from %(cluster)s" - "%(host)s: %(cap)s%(cluster)s", - {'service_name': service_name, 'host': host, - 'cap': capabilities, - 'cluster': cluster_msg}) - - self._no_capabilities_backends.discard(backend) - - def notify_service_capabilities(self, service_name, backend, capabilities, - timestamp): - """Notify the ceilometer with updated volume stats""" - if service_name != 'volume': - return - - updated = [] - capa_new = self.service_states.get(backend, {}) - timestamp = timestamp or timeutils.utcnow() - - # Compare the capabilities and timestamps to decide notifying - if not capa_new: - updated = self._get_updated_pools(capa_new, capabilities) - else: - if timestamp > self.service_states[backend]["timestamp"]: - updated = self._get_updated_pools( - self.service_states[backend], capabilities) - if not updated: - updated = self._get_updated_pools( - self.service_states_last_update.get(backend, {}), - self.service_states.get(backend, {})) - - if updated: - capab_copy = dict(capabilities) - capab_copy["timestamp"] = timestamp - # If capabilities changes, notify and record the capabilities. - self.service_states_last_update[backend] = capab_copy - self.get_usage_and_notify(capabilities, updated, backend, - timestamp) - - def has_all_capabilities(self): - return len(self._no_capabilities_backends) == 0 - - def _update_backend_state_map(self, context): - - # Get resource usage across the available volume nodes: - topic = constants.VOLUME_TOPIC - volume_services = objects.ServiceList.get_all(context, - {'topic': topic, - 'disabled': False, - 'frozen': False}) - active_backends = set() - active_hosts = set() - no_capabilities_backends = set() - for service in volume_services.objects: - host = service.host - if not service.is_up: - LOG.warning("volume service is down. (host: %s)", host) - continue - - backend_key = service.service_topic_queue - # We only pay attention to the first up service of a cluster since - # they all refer to the same capabilities entry in service_states - if backend_key in active_backends: - active_hosts.add(host) - continue - - # Capabilities may come from the cluster or the host if the service - # has just been converted to a cluster service. - capabilities = (self.service_states.get(service.cluster_name, None) - or self.service_states.get(service.host, None)) - if capabilities is None: - no_capabilities_backends.add(backend_key) - continue - - # Since the service could have been added or remove from a cluster - backend_state = self.backend_state_map.get(backend_key, None) - if not backend_state: - backend_state = self.backend_state_cls( - host, - service.cluster_name, - capabilities=capabilities, - service=dict(service)) - self.backend_state_map[backend_key] = backend_state - - # update capabilities and attributes in backend_state - backend_state.update_from_volume_capability(capabilities, - service=dict(service)) - active_backends.add(backend_key) - - self._no_capabilities_backends = no_capabilities_backends - - # remove non-active keys from backend_state_map - inactive_backend_keys = set(self.backend_state_map) - active_backends - for backend_key in inactive_backend_keys: - # NOTE(geguileo): We don't want to log the removal of a host from - # the map when we are removing it because it has been added to a - # cluster. - if backend_key not in active_hosts: - LOG.info("Removing non-active backend: %(backend)s from " - "scheduler cache.", {'backend': backend_key}) - del self.backend_state_map[backend_key] - - def get_all_backend_states(self, context): - """Returns a dict of all the backends the HostManager knows about. - - Each of the consumable resources in BackendState are - populated with capabilities scheduler received from RPC. - - For example: - {'192.168.1.100': BackendState(), ...} - """ - - self._update_backend_state_map(context) - - # build a pool_state map and return that map instead of - # backend_state_map - all_pools = {} - for backend_key, state in self.backend_state_map.items(): - for key in state.pools: - pool = state.pools[key] - # use backend_key.pool_name to make sure key is unique - pool_key = '.'.join([backend_key, pool.pool_name]) - all_pools[pool_key] = pool - - return all_pools.values() - - def _filter_pools_by_volume_type(self, context, volume_type, pools): - """Return the pools filtered by volume type specs""" - - # wrap filter properties only with volume_type - filter_properties = { - 'context': context, - 'volume_type': volume_type, - 'resource_type': volume_type, - 'qos_specs': volume_type.get('qos_specs'), - } - - filtered = self.get_filtered_backends(pools.values(), - filter_properties) - - # filter the pools by value - return {k: v for k, v in pools.items() if v in filtered} - - def get_pools(self, context, filters=None): - """Returns a dict of all pools on all hosts HostManager knows about.""" - - self._update_backend_state_map(context) - - all_pools = {} - name = volume_type = None - if filters: - name = filters.pop('name', None) - volume_type = filters.pop('volume_type', None) - - for backend_key, state in self.backend_state_map.items(): - for key in state.pools: - filtered = False - pool = state.pools[key] - # use backend_key.pool_name to make sure key is unique - pool_key = vol_utils.append_host(backend_key, pool.pool_name) - new_pool = dict(name=pool_key) - new_pool.update(dict(capabilities=pool.capabilities)) - - if name and new_pool.get('name') != name: - continue - - if filters: - # filter all other items in capabilities - for (attr, value) in filters.items(): - cap = new_pool.get('capabilities').get(attr) - if not self._equal_after_convert(cap, value): - filtered = True - break - - if not filtered: - all_pools[pool_key] = pool - - # filter pools by volume type - if volume_type: - volume_type = volume_types.get_by_name_or_id( - context, volume_type) - all_pools = ( - self._filter_pools_by_volume_type(context, - volume_type, - all_pools)) - - # encapsulate pools in format:{name: XXX, capabilities: XXX} - return [dict(name=key, capabilities=value.capabilities) - for key, value in all_pools.items()] - - def get_usage_and_notify(self, capa_new, updated_pools, host, timestamp): - context = cinder_context.get_admin_context() - usage = self._get_usage(capa_new, updated_pools, host, timestamp) - - self._notify_capacity_usage(context, usage) - - def _get_usage(self, capa_new, updated_pools, host, timestamp): - pools = capa_new.get('pools') - usage = [] - if pools and isinstance(pools, list): - backend_usage = dict(type='backend', - name_to_id=host, - total=0, - free=0, - allocated=0, - provisioned=0, - virtual_free=0, - reported_at=timestamp) - - # Process the usage. - for pool in pools: - pool_usage = self._get_pool_usage(pool, host, timestamp) - if pool_usage: - backend_usage["total"] += pool_usage["total"] - backend_usage["free"] += pool_usage["free"] - backend_usage["allocated"] += pool_usage["allocated"] - backend_usage["provisioned"] += pool_usage["provisioned"] - backend_usage["virtual_free"] += pool_usage["virtual_free"] - # Only the updated pool is reported. - if pool in updated_pools: - usage.append(pool_usage) - usage.append(backend_usage) - return usage - - def _get_pool_usage(self, pool, host, timestamp): - total = pool["total_capacity_gb"] - free = pool["free_capacity_gb"] - - unknowns = ["unknown", "infinite", None] - if (total in unknowns) or (free in unknowns): - return {} - - allocated = pool["allocated_capacity_gb"] - provisioned = pool["provisioned_capacity_gb"] - reserved = pool["reserved_percentage"] - ratio = pool["max_over_subscription_ratio"] - support = pool["thin_provisioning_support"] - - virtual_free = utils.calculate_virtual_free_capacity( - total, - free, - provisioned, - support, - ratio, - reserved, - support) - - pool_usage = dict( - type='pool', - name_to_id='#'.join([host, pool['pool_name']]), - total=float(total), - free=float(free), - allocated=float(allocated), - provisioned=float(provisioned), - virtual_free=float(virtual_free), - reported_at=timestamp) - - return pool_usage - - def _get_updated_pools(self, old_capa, new_capa): - # Judge if the capabilities should be reported. - - new_pools = new_capa.get('pools', []) - if not new_pools: - return [] - - if isinstance(new_pools, list): - # If the volume_stats is not well prepared, don't notify. - if not all( - self.REQUIRED_KEYS.issubset(pool) for pool in new_pools): - return [] - else: - LOG.debug("The reported capabilities are not well structured...") - return [] - - old_pools = old_capa.get('pools', []) - if not old_pools: - return new_pools - - updated_pools = [] - - newpools = {} - oldpools = {} - for new_pool in new_pools: - newpools[new_pool['pool_name']] = new_pool - - for old_pool in old_pools: - oldpools[old_pool['pool_name']] = old_pool - - for key in newpools.keys(): - if key in oldpools.keys(): - for k in self.REQUIRED_KEYS: - if newpools[key][k] != oldpools[key][k]: - updated_pools.append(newpools[key]) - break - else: - updated_pools.append(newpools[key]) - - return updated_pools - - def _notify_capacity_usage(self, context, usage): - if usage: - for u in usage: - vol_utils.notify_about_capacity_usage( - context, u, u['type'], None, None) - LOG.debug("Publish storage capacity: %s.", usage) - - def _equal_after_convert(self, capability, value): - - if isinstance(value, type(capability)) or capability is None: - return value == capability - - if isinstance(capability, bool): - return capability == strutils.bool_from_string(value) - - # We can not check or convert value parameter's type in - # anywhere else. - # If the capability and value are not in the same type, - # we just convert them into string to compare them. - return str(value) == str(capability) diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py deleted file mode 100644 index ab1c3d5e7..000000000 --- a/cinder/scheduler/manager.py +++ /dev/null @@ -1,485 +0,0 @@ -# Copyright (c) 2010 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scheduler Service -""" - -import collections -from datetime import datetime - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_service import periodic_task -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import timeutils -from oslo_utils import versionutils -import six - -from cinder import context -from cinder import db -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder import manager -from cinder.message import api as mess_api -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import rpc -from cinder.scheduler.flows import create_volume -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder.volume import rpcapi as volume_rpcapi - - -scheduler_driver_opt = cfg.StrOpt('scheduler_driver', - default='cinder.scheduler.filter_scheduler.' - 'FilterScheduler', - help='Default scheduler driver to use') - -CONF = cfg.CONF -CONF.register_opt(scheduler_driver_opt) - -QUOTAS = quota.QUOTAS - -LOG = logging.getLogger(__name__) - - -class SchedulerManager(manager.CleanableManager, manager.Manager): - """Chooses a host to create volumes.""" - - RPC_API_VERSION = scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION - - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, scheduler_driver=None, service_name=None, - *args, **kwargs): - if not scheduler_driver: - scheduler_driver = CONF.scheduler_driver - self.driver = importutils.import_object(scheduler_driver) - super(SchedulerManager, self).__init__(*args, **kwargs) - self._startup_delay = True - self.volume_api = volume_rpcapi.VolumeAPI() - self.sch_api = scheduler_rpcapi.SchedulerAPI() - self.message_api = mess_api.API() - self.rpc_api_version = versionutils.convert_version_to_int( - self.RPC_API_VERSION) - - def init_host_with_rpc(self): - ctxt = context.get_admin_context() - self.request_service_capabilities(ctxt) - - eventlet.sleep(CONF.periodic_interval) - self._startup_delay = False - - def reset(self): - super(SchedulerManager, self).reset() - self.volume_api = volume_rpcapi.VolumeAPI() - self.sch_api = scheduler_rpcapi.SchedulerAPI() - self.driver.reset() - - @periodic_task.periodic_task(spacing=CONF.message_reap_interval, - run_immediately=True) - def _clean_expired_messages(self, context): - self.message_api.cleanup_expired_messages(context) - - @periodic_task.periodic_task(spacing=CONF.reservation_clean_interval, - run_immediately=True) - def _clean_expired_reservation(self, context): - QUOTAS.expire(context) - - def update_service_capabilities(self, context, service_name=None, - host=None, capabilities=None, - cluster_name=None, timestamp=None, - **kwargs): - """Process a capability update from a service node.""" - if capabilities is None: - capabilities = {} - # If we received the timestamp we have to deserialize it - elif timestamp: - timestamp = datetime.strptime(timestamp, - timeutils.PERFECT_TIME_FORMAT) - - self.driver.update_service_capabilities(service_name, - host, - capabilities, - cluster_name, - timestamp) - - def notify_service_capabilities(self, context, service_name, - capabilities, host=None, backend=None, - timestamp=None): - """Process a capability update from a service node.""" - # TODO(geguileo): On v4 remove host field. - if capabilities is None: - capabilities = {} - # If we received the timestamp we have to deserialize it - elif timestamp: - timestamp = datetime.strptime(timestamp, - timeutils.PERFECT_TIME_FORMAT) - backend = backend or host - self.driver.notify_service_capabilities(service_name, - backend, - capabilities, - timestamp) - - def _wait_for_scheduler(self): - # NOTE(dulek): We're waiting for scheduler to announce that it's ready - # or CONF.periodic_interval seconds from service startup has passed. - while self._startup_delay and not self.driver.is_ready(): - eventlet.sleep(1) - - def create_group(self, context, group, group_spec=None, - group_filter_properties=None, request_spec_list=None, - filter_properties_list=None): - self._wait_for_scheduler() - try: - self.driver.schedule_create_group( - context, group, - group_spec, - request_spec_list, - group_filter_properties, - filter_properties_list) - except exception.NoValidBackend: - LOG.error("Could not find a backend for group " - "%(group_id)s.", - {'group_id': group.id}) - group.status = fields.GroupStatus.ERROR - group.save() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception("Failed to create generic group " - "%(group_id)s.", - {'group_id': group.id}) - group.status = fields.GroupStatus.ERROR - group.save() - - @objects.Volume.set_workers - def create_volume(self, context, volume, snapshot_id=None, image_id=None, - request_spec=None, filter_properties=None): - self._wait_for_scheduler() - - try: - flow_engine = create_volume.get_flow(context, - self.driver, - request_spec, - filter_properties, - volume, - snapshot_id, - image_id) - except Exception: - msg = _("Failed to create scheduler manager volume flow") - LOG.exception(msg) - raise exception.CinderException(msg) - - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - - def _do_cleanup(self, ctxt, vo_resource): - # We can only receive cleanup requests for volumes, but we check anyway - # We need to cleanup the volume status for cases where the scheduler - # died while scheduling the volume creation. - if (isinstance(vo_resource, objects.Volume) and - vo_resource.status == 'creating'): - vo_resource.status = 'error' - vo_resource.save() - - def request_service_capabilities(self, context): - volume_rpcapi.VolumeAPI().publish_service_capabilities(context) - - def migrate_volume(self, context, volume, backend, force_copy, - request_spec, filter_properties): - """Ensure that the backend exists and can accept the volume.""" - self._wait_for_scheduler() - - def _migrate_volume_set_error(self, context, ex, request_spec): - if volume.status == 'maintenance': - previous_status = ( - volume.previous_status or 'maintenance') - volume_state = {'volume_state': {'migration_status': 'error', - 'status': previous_status}} - else: - volume_state = {'volume_state': {'migration_status': 'error'}} - self._set_volume_state_and_notify('migrate_volume_to_host', - volume_state, - context, ex, request_spec) - - try: - tgt_backend = self.driver.backend_passes_filters(context, backend, - request_spec, - filter_properties) - except exception.NoValidBackend as ex: - _migrate_volume_set_error(self, context, ex, request_spec) - except Exception as ex: - with excutils.save_and_reraise_exception(): - _migrate_volume_set_error(self, context, ex, request_spec) - else: - volume_rpcapi.VolumeAPI().migrate_volume(context, volume, - tgt_backend, - force_copy) - - # FIXME(geguileo): Remove this in v4.0 of RPC API. - def migrate_volume_to_host(self, context, volume, host, force_host_copy, - request_spec, filter_properties=None): - return self.migrate_volume(context, volume, host, force_host_copy, - request_spec, filter_properties) - - def retype(self, context, volume, request_spec, filter_properties=None): - """Schedule the modification of a volume's type. - - :param context: the request context - :param volume: the volume object to retype - :param request_spec: parameters for this retype request - :param filter_properties: parameters to filter by - """ - - self._wait_for_scheduler() - - def _retype_volume_set_error(self, context, ex, request_spec, - volume_ref, reservations, msg=None): - if reservations: - QUOTAS.rollback(context, reservations) - previous_status = ( - volume_ref.previous_status or volume_ref.status) - volume_state = {'volume_state': {'status': previous_status}} - self._set_volume_state_and_notify('retype', volume_state, - context, ex, request_spec, msg) - - reservations = request_spec.get('quota_reservations') - old_reservations = request_spec.get('old_reservations', None) - new_type = request_spec.get('volume_type') - if new_type is None: - msg = _('New volume type not specified in request_spec.') - ex = exception.ParameterNotFound(param='volume_type') - _retype_volume_set_error(self, context, ex, request_spec, - volume, reservations, msg) - - # Default migration policy is 'never' - migration_policy = request_spec.get('migration_policy') - if not migration_policy: - migration_policy = 'never' - - try: - tgt_backend = self.driver.find_retype_backend(context, - request_spec, - filter_properties, - migration_policy) - except Exception as ex: - # Not having a valid host is an expected exception, so we don't - # reraise on it. - reraise = not isinstance(ex, exception.NoValidBackend) - with excutils.save_and_reraise_exception(reraise=reraise): - _retype_volume_set_error(self, context, ex, request_spec, - volume, reservations) - else: - volume_rpcapi.VolumeAPI().retype(context, volume, - new_type['id'], tgt_backend, - migration_policy, - reservations, - old_reservations) - - def manage_existing(self, context, volume, request_spec, - filter_properties=None): - """Ensure that the host exists and can accept the volume.""" - - self._wait_for_scheduler() - - def _manage_existing_set_error(self, context, ex, request_spec): - volume_state = {'volume_state': {'status': 'error_managing'}} - self._set_volume_state_and_notify('manage_existing', volume_state, - context, ex, request_spec) - - try: - backend = self.driver.backend_passes_filters( - context, volume.service_topic_queue, request_spec, - filter_properties) - - # At the API we didn't have the pool info, so the volume DB entry - # was created without it, now we add it. - volume.host = backend.host - volume.cluster_name = backend.cluster_name - volume.save() - - except exception.NoValidBackend as ex: - _manage_existing_set_error(self, context, ex, request_spec) - except Exception as ex: - with excutils.save_and_reraise_exception(): - _manage_existing_set_error(self, context, ex, request_spec) - else: - volume_rpcapi.VolumeAPI().manage_existing(context, volume, - request_spec.get('ref')) - - def get_pools(self, context, filters=None): - """Get active pools from scheduler's cache. - - NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is - an RPC call (is blocking for the c-api). Also this is admin-only API - extension so it won't hurt the user much to retry the request manually. - """ - return self.driver.get_pools(context, filters) - - def extend_volume(self, context, volume, new_size, reservations, - request_spec=None, filter_properties=None): - - def _extend_volume_set_error(self, context, ex, request_spec): - volume_state = {'volume_state': {'status': 'available'}} - self._set_volume_state_and_notify('extend_volume', volume_state, - context, ex, request_spec) - - if not filter_properties: - filter_properties = {} - - filter_properties['new_size'] = new_size - try: - self.driver.backend_passes_filters(context, - volume.service_topic_queue, - request_spec, filter_properties) - volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size, - reservations) - except exception.NoValidBackend as ex: - QUOTAS.rollback(context, reservations, - project_id=volume.project_id) - _extend_volume_set_error(self, context, ex, request_spec) - - def _set_volume_state_and_notify(self, method, updates, context, ex, - request_spec, msg=None): - # TODO(harlowja): move into a task that just does this later. - if not msg: - msg = ("Failed to schedule_%(method)s: %(ex)s" % - {'method': method, 'ex': six.text_type(ex)}) - LOG.error(msg) - - volume_state = updates['volume_state'] - properties = request_spec.get('volume_properties', {}) - - volume_id = request_spec.get('volume_id', None) - - if volume_id: - db.volume_update(context, volume_id, volume_state) - - if volume_state.get('status') == 'error_managing': - volume_state['status'] = 'error' - - payload = dict(request_spec=request_spec, - volume_properties=properties, - volume_id=volume_id, - state=volume_state, - method=method, - reason=ex) - - rpc.get_notifier("scheduler").error(context, - 'scheduler.' + method, - payload) - - @property - def upgrading_cloud(self): - min_version_str = self.sch_api.determine_rpc_version_cap() - min_version = versionutils.convert_version_to_int(min_version_str) - return min_version < self.rpc_api_version - - def _cleanup_destination(self, clusters, service): - """Determines the RPC method, destination service and name. - - The name is only used for logging, and it is the topic queue. - """ - # For the scheduler we don't have a specific destination, as any - # scheduler will do and we know we are up, since we are running this - # code. - if service.binary == 'cinder-scheduler': - cleanup_rpc = self.sch_api.do_cleanup - dest = None - dest_name = service.host - else: - cleanup_rpc = self.volume_api.do_cleanup - - # For clustered volume services we try to get info from the cache. - if service.is_clustered: - # Get cluster info from cache - dest = clusters[service.binary].get(service.cluster_name) - # Cache miss forces us to get the cluster from the DB via OVO - if not dest: - dest = service.cluster - clusters[service.binary][service.cluster_name] = dest - dest_name = dest.name - # Non clustered volume services - else: - dest = service - dest_name = service.host - return cleanup_rpc, dest, dest_name - - def work_cleanup(self, context, cleanup_request): - """Process request from API to do cleanup on services. - - Here we retrieve from the DB which services we want to clean up based - on the request from the user. - - Then send individual cleanup requests to each of the services that are - up, and we finally return a tuple with services that we have sent a - cleanup request and those that were not up and we couldn't send it. - """ - if self.upgrading_cloud: - raise exception.UnavailableDuringUpgrade(action='workers cleanup') - - LOG.info('Workers cleanup request started.') - - filters = dict(service_id=cleanup_request.service_id, - cluster_name=cleanup_request.cluster_name, - host=cleanup_request.host, - binary=cleanup_request.binary, - is_up=cleanup_request.is_up, - disabled=cleanup_request.disabled) - # Get the list of all the services that match the request - services = objects.ServiceList.get_all(context, filters) - - until = cleanup_request.until or timeutils.utcnow() - requested = [] - not_requested = [] - - # To reduce DB queries we'll cache the clusters data - clusters = collections.defaultdict(dict) - - for service in services: - cleanup_request.cluster_name = service.cluster_name - cleanup_request.service_id = service.id - cleanup_request.host = service.host - cleanup_request.binary = service.binary - cleanup_request.until = until - - cleanup_rpc, dest, dest_name = self._cleanup_destination(clusters, - service) - - # If it's a scheduler or the service is up, send the request. - if not dest or dest.is_up: - LOG.info('Sending cleanup for %(binary)s %(dest_name)s.', - {'binary': service.binary, - 'dest_name': dest_name}) - cleanup_rpc(context, cleanup_request) - requested.append(service) - # We don't send cleanup requests when there are no services alive - # to do the cleanup. - else: - LOG.info('No service available to cleanup %(binary)s ' - '%(dest_name)s.', - {'binary': service.binary, - 'dest_name': dest_name}) - not_requested.append(service) - - LOG.info('Cleanup requests completed.') - return requested, not_requested diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py deleted file mode 100644 index 00c531ef0..000000000 --- a/cinder/scheduler/rpcapi.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of the scheduler manager RPC API. -""" - -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from cinder.common import constants -from cinder import rpc - - -class SchedulerAPI(rpc.RPCAPI): - """Client side of the scheduler RPC API. - - API version history: - - .. code-block:: none - - 1.0 - Initial version. - 1.1 - Add create_volume() method - 1.2 - Add request_spec, filter_properties arguments to - create_volume() - 1.3 - Add migrate_volume_to_host() method - 1.4 - Add retype method - 1.5 - Add manage_existing method - 1.6 - Add create_consistencygroup method - 1.7 - Add get_active_pools method - 1.8 - Add sending object over RPC in create_consistencygroup method - 1.9 - Adds support for sending objects over RPC in create_volume() - 1.10 - Adds support for sending objects over RPC in retype() - 1.11 - Adds support for sending objects over RPC in - migrate_volume_to_host() - - ... Mitaka supports messaging 1.11. Any changes to existing methods in - 1.x after this point should be done so that they can handle version cap - set to 1.11. - - 2.0 - Remove 1.x compatibility - 2.1 - Adds support for sending objects over RPC in manage_existing() - 2.2 - Sends request_spec as object in create_volume() - 2.3 - Add create_group method - - ... Newton supports messaging 2.3. Any changes to existing methods in - 2.x after this point should be done so that they can handle version cap - set to 2.3. - - 3.0 - Remove 2.x compatibility - 3.1 - Adds notify_service_capabilities() - 3.2 - Adds extend_volume() - 3.3 - Add cluster support to migrate_volume, and to - update_service_capabilities and send the timestamp from the - capabilities. - 3.4 - Adds work_cleanup and do_cleanup methods. - 3.5 - Make notify_service_capabilities support A/A - 3.6 - Removed create_consistencygroup method - 3.7 - Adds set_log_levels and get_log_levels - """ - - RPC_API_VERSION = '3.7' - RPC_DEFAULT_VERSION = '3.0' - TOPIC = constants.SCHEDULER_TOPIC - BINARY = 'cinder-scheduler' - - def create_group(self, ctxt, group, group_spec=None, - request_spec_list=None, group_filter_properties=None, - filter_properties_list=None): - cctxt = self._get_cctxt() - request_spec_p_list = [jsonutils.to_primitive(rs) - for rs in request_spec_list] - group_spec_p = jsonutils.to_primitive(group_spec) - msg_args = { - 'group': group, 'group_spec': group_spec_p, - 'request_spec_list': request_spec_p_list, - 'group_filter_properties': group_filter_properties, - 'filter_properties_list': filter_properties_list, - } - - cctxt.cast(ctxt, 'create_group', **msg_args) - - def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None, - request_spec=None, filter_properties=None): - volume.create_worker() - cctxt = self._get_cctxt() - msg_args = {'snapshot_id': snapshot_id, 'image_id': image_id, - 'request_spec': request_spec, - 'filter_properties': filter_properties, 'volume': volume} - return cctxt.cast(ctxt, 'create_volume', **msg_args) - - def migrate_volume(self, ctxt, volume, backend, force_copy=False, - request_spec=None, filter_properties=None): - request_spec_p = jsonutils.to_primitive(request_spec) - msg_args = {'request_spec': request_spec_p, - 'filter_properties': filter_properties, 'volume': volume} - version = '3.3' - if self.client.can_send_version(version): - msg_args['backend'] = backend - msg_args['force_copy'] = force_copy - method = 'migrate_volume' - else: - version = '3.0' - msg_args['host'] = backend - msg_args['force_host_copy'] = force_copy - method = 'migrate_volume_to_host' - - cctxt = self._get_cctxt(version=version) - return cctxt.cast(ctxt, method, **msg_args) - - def retype(self, ctxt, volume, request_spec=None, filter_properties=None): - cctxt = self._get_cctxt() - request_spec_p = jsonutils.to_primitive(request_spec) - msg_args = {'request_spec': request_spec_p, - 'filter_properties': filter_properties, 'volume': volume} - return cctxt.cast(ctxt, 'retype', **msg_args) - - def manage_existing(self, ctxt, volume, request_spec=None, - filter_properties=None): - cctxt = self._get_cctxt() - request_spec_p = jsonutils.to_primitive(request_spec) - msg_args = { - 'request_spec': request_spec_p, - 'filter_properties': filter_properties, 'volume': volume, - } - return cctxt.cast(ctxt, 'manage_existing', **msg_args) - - @rpc.assert_min_rpc_version('3.2') - def extend_volume(self, ctxt, volume, new_size, reservations, - request_spec, filter_properties=None): - cctxt = self._get_cctxt() - - request_spec_p = jsonutils.to_primitive(request_spec) - msg_args = { - 'volume': volume, - 'new_size': new_size, - 'reservations': reservations, - 'request_spec': request_spec_p, - 'filter_properties': filter_properties, - } - - return cctxt.cast(ctxt, 'extend_volume', **msg_args) - - def get_pools(self, ctxt, filters=None): - cctxt = self._get_cctxt() - return cctxt.call(ctxt, 'get_pools', filters=filters) - - @staticmethod - def prepare_timestamp(timestamp): - timestamp = timestamp or timeutils.utcnow() - return jsonutils.to_primitive(timestamp) - - def update_service_capabilities(self, ctxt, service_name, host, - capabilities, cluster_name, - timestamp=None): - msg_args = dict(service_name=service_name, host=host, - capabilities=capabilities) - - version = '3.3' - # If server accepts timestamping the capabilities and the cluster name - if self.client.can_send_version(version): - # Serialize the timestamp - msg_args.update(cluster_name=cluster_name, - timestamp=self.prepare_timestamp(timestamp)) - else: - version = '3.0' - - cctxt = self._get_cctxt(fanout=True, version=version) - cctxt.cast(ctxt, 'update_service_capabilities', **msg_args) - - @rpc.assert_min_rpc_version('3.1') - def notify_service_capabilities(self, ctxt, service_name, - backend, capabilities, timestamp=None): - parameters = {'service_name': service_name, - 'capabilities': capabilities} - if self.client.can_send_version('3.5'): - version = '3.5' - parameters.update(backend=backend, - timestamp=self.prepare_timestamp(timestamp)) - else: - version = '3.1' - parameters['host'] = backend - - cctxt = self._get_cctxt(version=version) - cctxt.cast(ctxt, 'notify_service_capabilities', **parameters) - - @rpc.assert_min_rpc_version('3.4') - def work_cleanup(self, ctxt, cleanup_request): - """Generate individual service cleanup requests from user request.""" - cctxt = self.client.prepare(version='3.4') - # Response will have services that are receiving the cleanup request - # and services that couldn't receive it since they are down. - return cctxt.call(ctxt, 'work_cleanup', - cleanup_request=cleanup_request) - - @rpc.assert_min_rpc_version('3.4') - def do_cleanup(self, ctxt, cleanup_request): - """Perform this scheduler's resource cleanup as per cleanup_request.""" - cctxt = self.client.prepare(version='3.4') - cctxt.cast(ctxt, 'do_cleanup', cleanup_request=cleanup_request) - - @rpc.assert_min_rpc_version('3.7') - def set_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(server=service.host, version='3.7') - cctxt.cast(context, 'set_log_levels', log_request=log_request) - - @rpc.assert_min_rpc_version('3.7') - def get_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(server=service.host, version='3.7') - return cctxt.call(context, 'get_log_levels', log_request=log_request) diff --git a/cinder/scheduler/scheduler_options.py b/cinder/scheduler/scheduler_options.py deleted file mode 100644 index f5ab62f5e..000000000 --- a/cinder/scheduler/scheduler_options.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SchedulerOptions monitors a local .json file for changes and loads -it if needed. This file is converted to a data structure and passed -into the filtering and weighing functions which can use it for -dynamic configuration. -""" - -import datetime -import json -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - - -scheduler_json_config_location_opt = cfg.StrOpt( - 'scheduler_json_config_location', - default='', - help='Absolute path to scheduler configuration JSON file.') - - -CONF = cfg.CONF -CONF.register_opt(scheduler_json_config_location_opt) - -LOG = logging.getLogger(__name__) - - -class SchedulerOptions(object): - """SchedulerOptions monitors a local .json file for changes. - - The file is reloaded if needed and converted to a data structure and - passed into the filtering and weighing functions which can use it - for dynamic configuration. - """ - - def __init__(self): - super(SchedulerOptions, self).__init__() - self.data = {} - self.last_modified = None - self.last_checked = None - - def _get_file_handle(self, filename): - """Get file handle. Broken out for testing.""" - return open(filename) - - def _get_file_timestamp(self, filename): - """Get the last modified datetime. Broken out for testing.""" - try: - return os.path.getmtime(filename) - except os.error: - LOG.exception("Could not stat scheduler options file " - "%(filename)s.", - {'filename': filename}) - raise - - def _load_file(self, handle): - """Decode the JSON file. Broken out for testing.""" - try: - return json.load(handle) - except ValueError: - LOG.exception("Could not decode scheduler options.") - return {} - - def _get_time_now(self): - """Get current UTC. Broken out for testing.""" - return timeutils.utcnow() - - def get_configuration(self, filename=None): - """Check the json file for changes and load it if needed.""" - if not filename: - filename = CONF.scheduler_json_config_location - if not filename: - return self.data - if self.last_checked: - now = self._get_time_now() - if now - self.last_checked < datetime.timedelta(minutes=5): - return self.data - - last_modified = self._get_file_timestamp(filename) - if (not last_modified or not self.last_modified or - last_modified > self.last_modified): - self.data = self._load_file(self._get_file_handle(filename)) - self.last_modified = last_modified - if not self.data: - self.data = {} - - return self.data diff --git a/cinder/scheduler/weights/__init__.py b/cinder/scheduler/weights/__init__.py deleted file mode 100644 index 93bc224d3..000000000 --- a/cinder/scheduler/weights/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scheduler host weights -""" - -from cinder.scheduler import base_weight - - -class WeighedHost(base_weight.WeighedObject): - def to_dict(self): - return { - 'weight': self.weight, - 'host': self.obj.host, - } - - def __repr__(self): - return ("WeighedHost [host: %s, weight: %s]" % - (self.obj.host, self.weight)) - - -class BaseHostWeigher(base_weight.BaseWeigher): - """Base class for host weights.""" - pass - - -class OrderedHostWeightHandler(base_weight.BaseWeightHandler): - object_class = WeighedHost - - def __init__(self, namespace): - super(OrderedHostWeightHandler, self).__init__(BaseHostWeigher, - namespace) diff --git a/cinder/scheduler/weights/capacity.py b/cinder/scheduler/weights/capacity.py deleted file mode 100644 index daeb7f24c..000000000 --- a/cinder/scheduler/weights/capacity.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2013 eBay Inc. -# Copyright (c) 2012 OpenStack Foundation -# Copyright (c) 2015 EMC Corporation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import math - -from oslo_config import cfg - -from cinder.scheduler import weights -from cinder import utils - - -capacity_weight_opts = [ - cfg.FloatOpt('capacity_weight_multiplier', - default=1.0, - help='Multiplier used for weighing free capacity. ' - 'Negative numbers mean to stack vs spread.'), - cfg.FloatOpt('allocated_capacity_weight_multiplier', - default=-1.0, - help='Multiplier used for weighing allocated capacity. ' - 'Positive numbers mean to stack vs spread.'), -] - -CONF = cfg.CONF -CONF.register_opts(capacity_weight_opts) - -OFFSET_MIN = 10000 -OFFSET_MULT = 100 - - -class CapacityWeigher(weights.BaseHostWeigher): - """Capacity Weigher weighs hosts by their virtual or actual free capacity. - - For thin provisioning, weigh hosts by their virtual free capacity - calculated by the total capacity multiplied by the max over subscription - ratio and subtracting the provisioned capacity; Otherwise, weigh hosts by - their actual free capacity, taking into account the reserved space. - - The default is to spread volumes across all hosts evenly. If you prefer - stacking, you can set the ``capacity_weight_multiplier`` option to a - negative number and the weighing has the opposite effect of the default. - - """ - def weight_multiplier(self): - """Override the weight multiplier.""" - return CONF.capacity_weight_multiplier - - def weigh_objects(self, weighed_obj_list, weight_properties): - """Override the weigh objects. - - - This override calls the parent to do the weigh objects and then - replaces any infinite weights with a value that is a multiple of the - delta between the min and max values. - - NOTE(jecarey): the infinite weight value is only used when the - smallest value is being favored (negative multiplier). When the - largest weight value is being used a weight of -1 is used instead. - See _weigh_object method. - """ - tmp_weights = super(CapacityWeigher, self).weigh_objects( - weighed_obj_list, weight_properties) - - if math.isinf(self.maxval): - # NOTE(jecarey): if all weights were infinite then parent - # method returns 0 for all of the weights. Thus self.minval - # cannot be infinite at this point - copy_weights = [w for w in tmp_weights if not math.isinf(w)] - self.maxval = max(copy_weights) - offset = (self.maxval - self.minval) * OFFSET_MULT - self.maxval += OFFSET_MIN if offset == 0.0 else offset - tmp_weights = [self.maxval if math.isinf(w) else w - for w in tmp_weights] - - return tmp_weights - - def _weigh_object(self, host_state, weight_properties): - """Higher weights win. We want spreading to be the default.""" - free_space = host_state.free_capacity_gb - total_space = host_state.total_capacity_gb - if (free_space == 'infinite' or free_space == 'unknown' or - total_space == 'infinite' or total_space == 'unknown'): - # (zhiteng) 'infinite' and 'unknown' are treated the same - # here, for sorting purpose. - - # As a partial fix for bug #1350638, 'infinite' and 'unknown' are - # given the lowest weight to discourage driver from report such - # capacity anymore. - free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf') - else: - # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, - # we will not use max_over_subscription_ratio and - # provisioned_capacity_gb to determine whether a volume can be - # provisioned. Instead free capacity will be used to evaluate. - thin = True - vol_type = weight_properties.get('volume_type', {}) or {} - provision_type = vol_type.get('extra_specs', {}).get( - 'provisioning:type') - if provision_type == 'thick': - thin = False - - free = utils.calculate_virtual_free_capacity( - total_space, - free_space, - host_state.provisioned_capacity_gb, - host_state.thin_provisioning_support, - host_state.max_over_subscription_ratio, - host_state.reserved_percentage, - thin) - - return free - - -class AllocatedCapacityWeigher(weights.BaseHostWeigher): - """Allocated Capacity Weigher weighs hosts by their allocated capacity. - - The default behavior is to place new volume to the host allocated the least - space. This weigher is intended to simulate the behavior of - SimpleScheduler. If you prefer to place volumes to host allocated the most - space, you can set the ``allocated_capacity_weight_multiplier`` option to a - positive number and the weighing has the opposite effect of the default. - """ - - def weight_multiplier(self): - """Override the weight multiplier.""" - return CONF.allocated_capacity_weight_multiplier - - def _weigh_object(self, host_state, weight_properties): - # Higher weights win. We want spreading (choose host with lowest - # allocated_capacity first) to be the default. - allocated_space = host_state.allocated_capacity_gb - return allocated_space diff --git a/cinder/scheduler/weights/chance.py b/cinder/scheduler/weights/chance.py deleted file mode 100644 index 901c2a4e1..000000000 --- a/cinder/scheduler/weights/chance.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from cinder.scheduler import weights - - -class ChanceWeigher(weights.BaseHostWeigher): - """Chance Weigher assigns random weights to hosts. - - Used to spread volumes randomly across a list of equally suitable hosts. - """ - def _weigh_object(self, host_state, weight_properties): - return random.random() diff --git a/cinder/scheduler/weights/goodness.py b/cinder/scheduler/weights/goodness.py deleted file mode 100644 index 72c88ad1c..000000000 --- a/cinder/scheduler/weights/goodness.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six - -from cinder.scheduler.evaluator import evaluator -from cinder.scheduler import weights - - -LOG = logging.getLogger(__name__) - - -class GoodnessWeigher(weights.BaseHostWeigher): - """Goodness Weigher. Assign weights based on a host's goodness function. - - Goodness rating is the following: - - .. code-block:: none - - 0 -- host is a poor choice - . - . - 50 -- host is a good choice - . - . - 100 -- host is a perfect choice - - """ - - def _weigh_object(self, host_state, weight_properties): - """Determine host's goodness rating based on a goodness_function.""" - stats = self._generate_stats(host_state, weight_properties) - LOG.debug("Checking host '%s'", stats['host_stats']['host']) - result = self._check_goodness_function(stats) - LOG.debug("Goodness: %s", result) - LOG.debug("Done checking host '%s'", stats['host_stats']['host']) - - return result - - def _check_goodness_function(self, stats): - """Gets a host's goodness rating based on its goodness function.""" - - goodness_rating = 0 - - if stats['goodness_function'] is None: - LOG.warning("Goodness function not set :: defaulting to " - "minimal goodness rating of 0") - else: - try: - goodness_result = self._run_evaluator( - stats['goodness_function'], - stats) - except Exception as ex: - LOG.warning("Error in goodness_function function " - "'%(function)s' : '%(error)s' :: Defaulting " - "to a goodness of 0", - {'function': stats['goodness_function'], - 'error': ex, }) - return goodness_rating - - if type(goodness_result) is bool: - if goodness_result: - goodness_rating = 100 - elif goodness_result < 0 or goodness_result > 100: - LOG.warning("Invalid goodness result. Result must be " - "between 0 and 100. Result generated: '%s' " - ":: Defaulting to a goodness of 0", - goodness_result) - else: - goodness_rating = goodness_result - - return goodness_rating - - def _run_evaluator(self, func, stats): - """Evaluates a given function using the provided available stats.""" - host_stats = stats['host_stats'] - host_caps = stats['host_caps'] - extra_specs = stats['extra_specs'] - qos_specs = stats['qos_specs'] - volume_stats = stats['volume_stats'] - - result = evaluator.evaluate( - func, - extra=extra_specs, - stats=host_stats, - capabilities=host_caps, - volume=volume_stats, - qos=qos_specs) - - return result - - def _generate_stats(self, host_state, weight_properties): - """Generates statistics from host and volume data.""" - - host_stats = { - 'host': host_state.host, - 'volume_backend_name': host_state.volume_backend_name, - 'vendor_name': host_state.vendor_name, - 'driver_version': host_state.driver_version, - 'storage_protocol': host_state.storage_protocol, - 'QoS_support': host_state.QoS_support, - 'total_capacity_gb': host_state.total_capacity_gb, - 'allocated_capacity_gb': host_state.allocated_capacity_gb, - 'free_capacity_gb': host_state.free_capacity_gb, - 'reserved_percentage': host_state.reserved_percentage, - 'updated': host_state.updated, - } - - host_caps = host_state.capabilities - - goodness_function = None - - if ('goodness_function' in host_caps and - host_caps['goodness_function'] is not None): - goodness_function = six.text_type(host_caps['goodness_function']) - - qos_specs = weight_properties.get('qos_specs', {}) - - volume_type = weight_properties.get('volume_type', {}) - extra_specs = volume_type.get('extra_specs', {}) - - request_spec = weight_properties.get('request_spec', {}) - volume_stats = request_spec.get('volume_properties', {}) - - stats = { - 'host_stats': host_stats, - 'host_caps': host_caps, - 'extra_specs': extra_specs, - 'qos_specs': qos_specs, - 'volume_stats': volume_stats, - 'volume_type': volume_type, - 'goodness_function': goodness_function, - } - - return stats diff --git a/cinder/scheduler/weights/stochastic.py b/cinder/scheduler/weights/stochastic.py deleted file mode 100644 index f8eab6ed0..000000000 --- a/cinder/scheduler/weights/stochastic.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Stochastic weight handler - -This weight handler differs from the default weight -handler by giving every pool a chance to be chosen -where the probability is proportional to each pools' -weight. -""" - -import random - -from cinder.scheduler import base_weight -from cinder.scheduler import weights as wts - - -class StochasticHostWeightHandler(base_weight.BaseWeightHandler): - def __init__(self, namespace): - super(StochasticHostWeightHandler, self).__init__(wts.BaseHostWeigher, - namespace) - - def get_weighed_objects(self, weigher_classes, obj_list, - weighing_properties): - # The normalization performed in the superclass is nonlinear, which - # messes up the probabilities, so override it. The probabilistic - # approach we use here is self-normalizing. - # Also, the sorting done by the parent implementation is harmless but - # useless for us. - - # Compute the object weights as the parent would but without sorting - # or normalization. - weighed_objs = [wts.WeighedHost(obj, 0.0) for obj in obj_list] - for weigher_cls in weigher_classes: - weigher = weigher_cls() - weights = weigher.weigh_objects(weighed_objs, weighing_properties) - for i, weight in enumerate(weights): - obj = weighed_objs[i] - obj.weight += weigher.weight_multiplier() * weight - - # Avoid processing empty lists - if not weighed_objs: - return [] - - # First compute the total weight of all the objects and the upper - # bound for each object to "win" the lottery. - total_weight = 0 - table = [] - for weighed_obj in weighed_objs: - total_weight += weighed_obj.weight - max_value = total_weight - table.append((max_value, weighed_obj)) - - # Now draw a random value with the computed range - winning_value = random.random() * total_weight - - # Scan the table to find the first object with a maximum higher than - # the random number. Save the index of the winner. - winning_index = 0 - for (i, (max_value, weighed_obj)) in enumerate(table): - if max_value > winning_value: - # Return a single element array with the winner. - winning_index = i - break - - # It's theoretically possible for the above loop to terminate with no - # winner. This happens when winning_value >= total_weight, which - # could only occur with very large numbers and floating point - # rounding. In those cases the actual winner should have been the - # last element, so return it. - return weighed_objs[winning_index:] + weighed_objs[0:winning_index] diff --git a/cinder/scheduler/weights/volume_number.py b/cinder/scheduler/weights/volume_number.py deleted file mode 100644 index cd641ddbf..000000000 --- a/cinder/scheduler/weights/volume_number.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from cinder import db -from cinder.scheduler import weights - - -volume_number_weight_opts = [ - cfg.FloatOpt('volume_number_multiplier', - default=-1.0, - help='Multiplier used for weighing volume number. ' - 'Negative numbers mean to spread vs stack.'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_number_weight_opts) - - -class VolumeNumberWeigher(weights.BaseHostWeigher): - """Weigher that weighs hosts by volume number in backends. - - The default is to spread volumes across all hosts evenly. If you prefer - stacking, you can set the ``volume_number_multiplier`` option to a positive - number and the weighing has the opposite effect of the default. - """ - - def weight_multiplier(self): - """Override the weight multiplier.""" - return CONF.volume_number_multiplier - - def _weigh_object(self, host_state, weight_properties): - """Less volume number weights win. - - We want spreading to be the default. - """ - context = weight_properties['context'] - context = context.elevated() - volume_number = db.volume_data_get_for_host(context=context, - host=host_state.host, - count_only=True) - return volume_number diff --git a/cinder/service.py b/cinder/service.py deleted file mode 100644 index 1bd9b31ad..000000000 --- a/cinder/service.py +++ /dev/null @@ -1,681 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - - -import inspect -import os -import random - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_service import loopingcall -from oslo_service import service -from oslo_service import wsgi -from oslo_utils import importutils -osprofiler_initializer = importutils.try_import('osprofiler.initializer') -profiler = importutils.try_import('osprofiler.profiler') -profiler_opts = importutils.try_import('osprofiler.opts') - - -from cinder.backup import rpcapi as backup_rpcapi -from cinder.common import constants -from cinder import context -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import base as objects_base -from cinder.objects import fields -from cinder import rpc -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import version -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as vol_utils - - -LOG = logging.getLogger(__name__) - -service_opts = [ - cfg.IntOpt('report_interval', - default=10, - help='Interval, in seconds, between nodes reporting state ' - 'to datastore'), - cfg.IntOpt('periodic_interval', - default=60, - help='Interval, in seconds, between running periodic tasks'), - cfg.IntOpt('periodic_fuzzy_delay', - default=60, - help='Range, in seconds, to randomly delay when starting the' - ' periodic task scheduler to reduce stampeding.' - ' (Disable by setting to 0)'), - cfg.StrOpt('osapi_volume_listen', - default="0.0.0.0", - help='IP address on which OpenStack Volume API listens'), - cfg.PortOpt('osapi_volume_listen_port', - default=8776, - help='Port on which OpenStack Volume API listens'), - cfg.IntOpt('osapi_volume_workers', - help='Number of workers for OpenStack Volume API service. ' - 'The default is equal to the number of CPUs available.'), - cfg.BoolOpt('osapi_volume_use_ssl', - default=False, - help='Wraps the socket in a SSL context if True is set. ' - 'A certificate file and key file must be specified.'), ] - - -CONF = cfg.CONF -CONF.register_opts(service_opts) -if profiler_opts: - profiler_opts.set_defaults(CONF) - - -def setup_profiler(binary, host): - if (osprofiler_initializer is None or - profiler is None or - profiler_opts is None): - LOG.debug('osprofiler is not present') - return - - if CONF.profiler.enabled: - osprofiler_initializer.init_from_conf( - conf=CONF, - context=context.get_admin_context().to_dict(), - project="cinder", - service=binary, - host=host - ) - LOG.warning( - "OSProfiler is enabled.\nIt means that person who knows " - "any of hmac_keys that are specified in " - "/etc/cinder/cinder.conf can trace his requests. \n" - "In real life only operator can read this file so there " - "is no security issue. Note that even if person can " - "trigger profiler, only admin user can retrieve trace " - "information.\n" - "To disable OSProfiler set in cinder.conf:\n" - "[profiler]\nenabled=false") - - -class Service(service.Service): - """Service object for binaries running on hosts. - - A service takes a manager and enables rpc by listening to queues based - on topic. It also periodically runs tasks on the manager and reports - it state to the database services table. - """ - # Make service_id a class attribute so it can be used for clean up - service_id = None - - def __init__(self, host, binary, topic, manager, report_interval=None, - periodic_interval=None, periodic_fuzzy_delay=None, - service_name=None, coordination=False, cluster=None, *args, - **kwargs): - super(Service, self).__init__() - - if not rpc.initialized(): - rpc.init(CONF) - - self.cluster = cluster - self.host = host - self.binary = binary - self.topic = topic - self.manager_class_name = manager - self.coordination = coordination - manager_class = importutils.import_class(self.manager_class_name) - if CONF.profiler.enabled: - manager_class = profiler.trace_cls("rpc")(manager_class) - - self.service = None - self.manager = manager_class(host=self.host, - cluster=self.cluster, - service_name=service_name, - *args, **kwargs) - self.availability_zone = self.manager.availability_zone - - # NOTE(geguileo): We need to create the Service DB entry before we - # create the manager, otherwise capped versions for serializer and rpc - # client would use existing DB entries not including us, which could - # result in us using None (if it's the first time the service is run) - # or an old version (if this is a normal upgrade of a single service). - ctxt = context.get_admin_context() - self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary) - try: - service_ref = objects.Service.get_by_args(ctxt, host, binary) - service_ref.rpc_current_version = manager_class.RPC_API_VERSION - obj_version = objects_base.OBJ_VERSIONS.get_current() - service_ref.object_current_version = obj_version - # TODO(geguileo): In O we can remove the service upgrading part on - # the next equation, because by then all our services will be - # properly setting the cluster during volume migrations since - # they'll have the new Volume ORM model. But until then we can - # only set the cluster in the DB and pass added_to_cluster to - # init_host when we have completed the rolling upgrade from M to N. - - # added_to_cluster attribute marks when we consider that we have - # just added a host to a cluster so we can include resources into - # that cluster. We consider that we have added the host when we - # didn't have data in the cluster DB field and our current - # configuration has a cluster value. We don't want to do anything - # automatic if the cluster is changed, in those cases we'll want - # to use cinder manage command and to it manually. - self.added_to_cluster = (not service_ref.cluster_name and cluster - and not self.is_upgrading_to_n) - - # TODO(geguileo): In O - Remove self.is_upgrading_to_n part - if (service_ref.cluster_name != cluster and - not self.is_upgrading_to_n): - LOG.info('This service has been moved from cluster ' - '%(cluster_svc)s to %(cluster_cfg)s. Resources ' - 'will %(opt_no)sbe moved to the new cluster', - {'cluster_svc': service_ref.cluster_name, - 'cluster_cfg': cluster, - 'opt_no': '' if self.added_to_cluster else 'NO '}) - - if self.added_to_cluster: - # We pass copy service's disable status in the cluster if we - # have to create it. - self._ensure_cluster_exists(ctxt, service_ref) - service_ref.cluster_name = cluster - service_ref.save() - Service.service_id = service_ref.id - except exception.NotFound: - # We don't want to include cluster information on the service or - # create the cluster entry if we are upgrading. - self._create_service_ref(ctxt, manager_class.RPC_API_VERSION) - # TODO(geguileo): In O set added_to_cluster to True - # We don't want to include resources in the cluster during the - # start while we are still doing the rolling upgrade. - self.added_to_cluster = not self.is_upgrading_to_n - - self.report_interval = report_interval - self.periodic_interval = periodic_interval - self.periodic_fuzzy_delay = periodic_fuzzy_delay - self.basic_config_check() - self.saved_args, self.saved_kwargs = args, kwargs - self.timers = [] - - setup_profiler(binary, host) - self.rpcserver = None - self.backend_rpcserver = None - self.cluster_rpcserver = None - - # TODO(geguileo): Remove method in O since it will no longer be used. - @staticmethod - def is_svc_upgrading_to_n(binary): - """Given an RPC API class determine if the service is upgrading.""" - rpcapis = {'cinder-scheduler': scheduler_rpcapi.SchedulerAPI, - 'cinder-volume': volume_rpcapi.VolumeAPI, - 'cinder-backup': backup_rpcapi.BackupAPI} - rpc_api = rpcapis[binary] - # If we are pinned to 1.3, then we are upgrading from M to N - return rpc_api.determine_obj_version_cap() == '1.3' - - def start(self): - version_string = version.version_string() - LOG.info('Starting %(topic)s node (version %(version_string)s)', - {'topic': self.topic, 'version_string': version_string}) - self.model_disconnected = False - - if self.coordination: - coordination.COORDINATOR.start() - - self.manager.init_host(added_to_cluster=self.added_to_cluster, - service_id=Service.service_id) - - LOG.debug("Creating RPC server for service %s", self.topic) - - ctxt = context.get_admin_context() - endpoints = [self.manager] - endpoints.extend(self.manager.additional_endpoints) - obj_version_cap = objects.Service.get_minimum_obj_version(ctxt) - LOG.debug("Pinning object versions for RPC server serializer to %s", - obj_version_cap) - serializer = objects_base.CinderObjectSerializer(obj_version_cap) - - target = messaging.Target(topic=self.topic, server=self.host) - self.rpcserver = rpc.get_server(target, endpoints, serializer) - self.rpcserver.start() - - # NOTE(dulek): Kids, don't do that at home. We're relying here on - # oslo.messaging implementation details to keep backward compatibility - # with pre-Ocata services. This will not matter once we drop - # compatibility with them. - if self.topic == constants.VOLUME_TOPIC: - target = messaging.Target( - topic='%(topic)s.%(host)s' % {'topic': self.topic, - 'host': self.host}, - server=vol_utils.extract_host(self.host, 'host')) - self.backend_rpcserver = rpc.get_server(target, endpoints, - serializer) - self.backend_rpcserver.start() - - # TODO(geguileo): In O - Remove the is_svc_upgrading_to_n part - if self.cluster and not self.is_svc_upgrading_to_n(self.binary): - LOG.info('Starting %(topic)s cluster %(cluster)s (version ' - '%(version)s)', - {'topic': self.topic, 'version': version_string, - 'cluster': self.cluster}) - target = messaging.Target( - topic='%s.%s' % (self.topic, self.cluster), - server=vol_utils.extract_host(self.cluster, 'host')) - serializer = objects_base.CinderObjectSerializer(obj_version_cap) - self.cluster_rpcserver = rpc.get_server(target, endpoints, - serializer) - self.cluster_rpcserver.start() - - self.manager.init_host_with_rpc() - - if self.report_interval: - pulse = loopingcall.FixedIntervalLoopingCall( - self.report_state) - pulse.start(interval=self.report_interval, - initial_delay=self.report_interval) - self.timers.append(pulse) - - if self.periodic_interval: - if self.periodic_fuzzy_delay: - initial_delay = random.randint(0, self.periodic_fuzzy_delay) - else: - initial_delay = None - - periodic = loopingcall.FixedIntervalLoopingCall( - self.periodic_tasks) - periodic.start(interval=self.periodic_interval, - initial_delay=initial_delay) - self.timers.append(periodic) - - def basic_config_check(self): - """Perform basic config checks before starting service.""" - # Make sure report interval is less than service down time - if self.report_interval: - if CONF.service_down_time <= self.report_interval: - new_down_time = int(self.report_interval * 2.5) - LOG.warning( - "Report interval must be less than service down " - "time. Current config service_down_time: " - "%(service_down_time)s, report_interval for this: " - "service is: %(report_interval)s. Setting global " - "service_down_time to: %(new_down_time)s", - {'service_down_time': CONF.service_down_time, - 'report_interval': self.report_interval, - 'new_down_time': new_down_time}) - CONF.set_override('service_down_time', new_down_time) - - def _ensure_cluster_exists(self, context, service): - if self.cluster: - try: - cluster = objects.Cluster.get_by_id(context, None, - name=self.cluster, - binary=self.binary) - # If the cluster already exists, then the service replication - # fields must match those of the cluster unless the service - # is in error status. - error_states = (fields.ReplicationStatus.ERROR, - fields.ReplicationStatus.FAILOVER_ERROR) - if service.replication_status not in error_states: - for attr in ('replication_status', 'active_backend_id', - 'frozen'): - if getattr(service, attr) != getattr(cluster, attr): - setattr(service, attr, getattr(cluster, attr)) - - except exception.ClusterNotFound: - # Since the cluster didn't exist, we copy replication fields - # from the service. - cluster = objects.Cluster( - context=context, - name=self.cluster, - binary=self.binary, - disabled=service.disabled, - replication_status=service.replication_status, - active_backend_id=service.active_backend_id, - frozen=service.frozen) - try: - cluster.create() - - # Race condition occurred and another service created the - # cluster, so we can continue as it already exists. - except exception.ClusterExists: - pass - - def _create_service_ref(self, context, rpc_version=None): - kwargs = { - 'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': self.availability_zone, - 'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION, - 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), - } - # TODO(geguileo): In O unconditionally set cluster_name like above - # If we are upgrading we have to ignore the cluster value - if not self.is_upgrading_to_n: - kwargs['cluster_name'] = self.cluster - service_ref = objects.Service(context=context, **kwargs) - service_ref.create() - Service.service_id = service_ref.id - # TODO(geguileo): In O unconditionally ensure that the cluster exists - if not self.is_upgrading_to_n: - self._ensure_cluster_exists(context, service_ref) - # If we have updated the service_ref with replication data from - # the cluster it will be saved. - service_ref.save() - - def __getattr__(self, key): - manager = self.__dict__.get('manager', None) - return getattr(manager, key) - - @classmethod - def create(cls, host=None, binary=None, topic=None, manager=None, - report_interval=None, periodic_interval=None, - periodic_fuzzy_delay=None, service_name=None, - coordination=False, cluster=None): - """Instantiates class and passes back application object. - - :param host: defaults to CONF.host - :param binary: defaults to basename of executable - :param topic: defaults to bin_name - 'cinder-' part - :param manager: defaults to CONF._manager - :param report_interval: defaults to CONF.report_interval - :param periodic_interval: defaults to CONF.periodic_interval - :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay - :param cluster: Defaults to None, as only some services will have it - - """ - if not host: - host = CONF.host - if not binary: - binary = os.path.basename(inspect.stack()[-1][1]) - if not topic: - topic = binary - if not manager: - subtopic = topic.rpartition('cinder-')[2] - manager = CONF.get('%s_manager' % subtopic, None) - if report_interval is None: - report_interval = CONF.report_interval - if periodic_interval is None: - periodic_interval = CONF.periodic_interval - if periodic_fuzzy_delay is None: - periodic_fuzzy_delay = CONF.periodic_fuzzy_delay - service_obj = cls(host, binary, topic, manager, - report_interval=report_interval, - periodic_interval=periodic_interval, - periodic_fuzzy_delay=periodic_fuzzy_delay, - service_name=service_name, - coordination=coordination, - cluster=cluster) - - return service_obj - - def stop(self): - # Try to shut the connection down, but if we get any sort of - # errors, go ahead and ignore them.. as we're shutting down anyway - try: - self.rpcserver.stop() - if self.backend_rpcserver: - self.backend_rpcserver.stop() - if self.cluster_rpcserver: - self.cluster_rpcserver.stop() - except Exception: - pass - - self.timers_skip = [] - for x in self.timers: - try: - x.stop() - except Exception: - self.timers_skip.append(x) - - if self.coordination: - try: - coordination.COORDINATOR.stop() - except Exception: - pass - super(Service, self).stop(graceful=True) - - def wait(self): - skip = getattr(self, 'timers_skip', []) - for x in self.timers: - if x not in skip: - try: - x.wait() - except Exception: - pass - if self.rpcserver: - self.rpcserver.wait() - if self.backend_rpcserver: - self.backend_rpcserver.wait() - if self.cluster_rpcserver: - self.cluster_rpcserver.wait() - super(Service, self).wait() - - def periodic_tasks(self, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - ctxt = context.get_admin_context() - self.manager.run_periodic_tasks(ctxt, raise_on_error=raise_on_error) - - def report_state(self): - """Update the state of this service in the datastore.""" - if not self.manager.is_working(): - # NOTE(dulek): If manager reports a problem we're not sending - # heartbeats - to indicate that service is actually down. - LOG.error('Manager for service %(binary)s %(host)s is ' - 'reporting problems, not sending heartbeat. ' - 'Service will appear "down".', - {'binary': self.binary, - 'host': self.host}) - return - - ctxt = context.get_admin_context() - try: - try: - service_ref = objects.Service.get_by_id(ctxt, - Service.service_id) - except exception.NotFound: - LOG.debug('The service database object disappeared, ' - 'recreating it.') - self._create_service_ref(ctxt) - service_ref = objects.Service.get_by_id(ctxt, - Service.service_id) - - service_ref.report_count += 1 - if self.availability_zone != service_ref.availability_zone: - service_ref.availability_zone = self.availability_zone - - service_ref.save() - - # TODO(termie): make this pattern be more elegant. - if getattr(self, 'model_disconnected', False): - self.model_disconnected = False - LOG.error('Recovered model server connection!') - - except db_exc.DBConnectionError: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('model server went away') - - # NOTE(jsbryant) Other DB errors can happen in HA configurations. - # such errors shouldn't kill this thread, so we handle them here. - except db_exc.DBError: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('DBError encountered: ') - - except Exception: - if not getattr(self, 'model_disconnected', False): - self.model_disconnected = True - LOG.exception('Exception encountered: ') - - def reset(self): - self.manager.reset() - super(Service, self).reset() - - -class WSGIService(service.ServiceBase): - """Provides ability to launch API from a 'paste' configuration.""" - - def __init__(self, name, loader=None): - """Initialize, but do not start the WSGI server. - - :param name: The name of the WSGI server given to the loader. - :param loader: Loads the WSGI application using the given name. - :returns: None - - """ - self.name = name - self.manager = self._get_manager() - self.loader = loader or wsgi.Loader(CONF) - self.app = self.loader.load_app(name) - self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") - self.port = getattr(CONF, '%s_listen_port' % name, 0) - self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False) - self.workers = (getattr(CONF, '%s_workers' % name, None) or - processutils.get_worker_count()) - if self.workers and self.workers < 1: - worker_name = '%s_workers' % name - msg = (_("%(worker_name)s value of %(workers)d is invalid, " - "must be greater than 0.") % - {'worker_name': worker_name, - 'workers': self.workers}) - raise exception.InvalidInput(msg) - setup_profiler(name, self.host) - - self.server = wsgi.Server(CONF, - name, - self.app, - host=self.host, - port=self.port, - use_ssl=self.use_ssl) - - def _get_manager(self): - """Initialize a Manager object appropriate for this service. - - Use the service name to look up a Manager subclass from the - configuration and initialize an instance. If no class name - is configured, just return None. - - :returns: a Manager instance, or None. - - """ - fl = '%s_manager' % self.name - if fl not in CONF: - return None - - manager_class_name = CONF.get(fl, None) - if not manager_class_name: - return None - - manager_class = importutils.import_class(manager_class_name) - return manager_class() - - def start(self): - """Start serving this service using loaded configuration. - - Also, retrieve updated port number in case '0' was passed in, which - indicates a random port should be used. - - :returns: None - - """ - if self.manager: - self.manager.init_host() - self.server.start() - self.port = self.server.port - - def stop(self): - """Stop serving this API. - - :returns: None - - """ - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API. - - :returns: None - - """ - self.server.wait() - - def reset(self): - """Reset server greenpool size to default. - - :returns: None - - """ - self.server.reset() - - -def process_launcher(): - return service.ProcessLauncher(CONF, restart_method='mutate') - - -# NOTE(vish): the global launcher is to maintain the existing -# functionality of calling service.serve + -# service.wait -_launcher = None - - -def serve(server, workers=None): - global _launcher - if _launcher: - raise RuntimeError(_('serve() can only be called once')) - - _launcher = service.launch(CONF, server, workers=workers) - - -def wait(): - LOG.debug('Full set of CONF:') - for flag in CONF: - flag_get = CONF.get(flag, None) - # hide flag contents from log if contains a password - # should use secret flag when switch over to openstack-common - if ("_password" in flag or "_key" in flag or - (flag == "sql_connection" and - ("mysql:" in flag_get or "postgresql:" in flag_get))): - LOG.debug('%s : FLAG SET ', flag) - else: - LOG.debug('%(flag)s : %(flag_get)s', - {'flag': flag, 'flag_get': flag_get}) - try: - _launcher.wait() - except KeyboardInterrupt: - _launcher.stop() - rpc.cleanup() - - -class Launcher(object): - def __init__(self): - self.launch_service = serve - self.wait = wait - - -def get_launcher(): - # Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows - # due to missing support of non-blocking I/O pipes. For this reason, the - # service must be spawned differently on Windows, using the ServiceLauncher - # class instead. - if os.name == 'nt': - return Launcher() - else: - return process_launcher() diff --git a/cinder/ssh_utils.py b/cinder/ssh_utils.py deleted file mode 100644 index c2fad9697..000000000 --- a/cinder/ssh_utils.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities related to SSH connection management.""" - -import os - -from eventlet import pools -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import paramiko -import six - -from cinder import exception -from cinder.i18n import _ - -LOG = logging.getLogger(__name__) - -ssh_opts = [ - cfg.BoolOpt('strict_ssh_host_key_policy', - default=False, - help='Option to enable strict host key checking. When ' - 'set to "True" Cinder will only connect to systems ' - 'with a host key present in the configured ' - '"ssh_hosts_key_file". When set to "False" the host key ' - 'will be saved upon first connection and used for ' - 'subsequent connections. Default=False'), - cfg.StrOpt('ssh_hosts_key_file', - default='$state_path/ssh_known_hosts', - help='File containing SSH host keys for the systems with which ' - 'Cinder needs to communicate. OPTIONAL: ' - 'Default=$state_path/ssh_known_hosts'), -] - -CONF = cfg.CONF -CONF.register_opts(ssh_opts) - - -class SSHPool(pools.Pool): - """A simple eventlet pool to hold ssh connections.""" - - def __init__(self, ip, port, conn_timeout, login, password=None, - privatekey=None, *args, **kwargs): - self.ip = ip - self.port = port - self.login = login - self.password = password - self.conn_timeout = conn_timeout if conn_timeout else None - self.privatekey = privatekey - self.hosts_key_file = None - - # Validate good config setting here. - # Paramiko handles the case where the file is inaccessible. - if not CONF.ssh_hosts_key_file: - raise exception.ParameterNotFound(param='ssh_hosts_key_file') - elif not os.path.isfile(CONF.ssh_hosts_key_file): - # If using the default path, just create the file. - if CONF.state_path in CONF.ssh_hosts_key_file: - open(CONF.ssh_hosts_key_file, 'a').close() - else: - msg = (_("Unable to find ssh_hosts_key_file: %s") % - CONF.ssh_hosts_key_file) - raise exception.InvalidInput(reason=msg) - - if 'hosts_key_file' in kwargs.keys(): - self.hosts_key_file = kwargs.pop('hosts_key_file') - LOG.info("Secondary ssh hosts key file %(kwargs)s will be " - "loaded along with %(conf)s from /etc/cinder.conf.", - {'kwargs': self.hosts_key_file, - 'conf': CONF.ssh_hosts_key_file}) - - LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' " - "using ssh_hosts_key_file '%(key_file)s'.", - {'policy': CONF.strict_ssh_host_key_policy, - 'key_file': CONF.ssh_hosts_key_file}) - - self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy - - if not self.hosts_key_file: - self.hosts_key_file = CONF.ssh_hosts_key_file - else: - self.hosts_key_file += ',' + CONF.ssh_hosts_key_file - - super(SSHPool, self).__init__(*args, **kwargs) - - def create(self): - try: - ssh = paramiko.SSHClient() - if ',' in self.hosts_key_file: - files = self.hosts_key_file.split(',') - for f in files: - ssh.load_host_keys(f) - else: - ssh.load_host_keys(self.hosts_key_file) - # If strict_ssh_host_key_policy is set we want to reject, by - # default if there is not entry in the known_hosts file. - # Otherwise we use AutoAddPolicy which accepts on the first - # Connect but fails if the keys change. load_host_keys can - # handle hashed known_host entries. - if self.strict_ssh_host_key_policy: - ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) - else: - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - if self.password: - ssh.connect(self.ip, - port=self.port, - username=self.login, - password=self.password, - timeout=self.conn_timeout) - elif self.privatekey: - pkfile = os.path.expanduser(self.privatekey) - privatekey = paramiko.RSAKey.from_private_key_file(pkfile) - ssh.connect(self.ip, - port=self.port, - username=self.login, - pkey=privatekey, - timeout=self.conn_timeout) - else: - msg = _("Specify a password or private_key") - raise exception.CinderException(msg) - - # Paramiko by default sets the socket timeout to 0.1 seconds, - # ignoring what we set through the sshclient. This doesn't help for - # keeping long lived connections. Hence we have to bypass it, by - # overriding it after the transport is initialized. We are setting - # the sockettimeout to None and setting a keepalive packet so that, - # the server will keep the connection open. All that does is send - # a keepalive packet every ssh_conn_timeout seconds. - if self.conn_timeout: - transport = ssh.get_transport() - transport.sock.settimeout(None) - transport.set_keepalive(self.conn_timeout) - return ssh - except Exception as e: - msg = _("Error connecting via ssh: %s") % six.text_type(e) - LOG.error(msg) - raise paramiko.SSHException(msg) - - def get(self): - """Return an item from the pool, when one is available. - - This may cause the calling greenthread to block. Check if a - connection is active before returning it. - - For dead connections create and return a new connection. - """ - conn = super(SSHPool, self).get() - if conn: - if conn.get_transport().is_active(): - return conn - else: - conn.close() - try: - new_conn = self.create() - except Exception: - LOG.error("Create new item in SSHPool failed.") - with excutils.save_and_reraise_exception(): - if conn: - self.current_size -= 1 - return new_conn - - def remove(self, ssh): - """Close an ssh client and remove it from free_items.""" - ssh.close() - if ssh in self.free_items: - self.free_items.remove(ssh) - if self.current_size > 0: - self.current_size -= 1 diff --git a/cinder/test.py b/cinder/test.py deleted file mode 100644 index d0821be63..000000000 --- a/cinder/test.py +++ /dev/null @@ -1,482 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Base classes for our unit tests. - -Allows overriding of CONF for use of fakes, and some black magic for -inline callbacks. - -""" -import copy -import logging -import os -import uuid - -import fixtures -import mock -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_log.fixture import logging_error as log_fixture -import oslo_messaging -from oslo_messaging import conffixture as messaging_conffixture -from oslo_serialization import jsonutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslotest import moxstubout -import six -import testtools - -from cinder.common import config # noqa Need to register global_opts -from cinder import context -from cinder import coordination -from cinder.db import migration -from cinder.db.sqlalchemy import api as sqla_api -from cinder import i18n -from cinder.objects import base as objects_base -from cinder import rpc -from cinder import service -from cinder.tests import fixtures as cinder_fixtures -from cinder.tests.unit import conf_fixture -from cinder.tests.unit import fake_notifier -from cinder.volume import utils - - -CONF = cfg.CONF - -_DB_CACHE = None - - -class TestingException(Exception): - pass - - -class Database(fixtures.Fixture): - - def __init__(self, db_api, db_migrate, sql_connection): - self.sql_connection = sql_connection - - # Suppress logging for test runs - migrate_logger = logging.getLogger('migrate') - migrate_logger.setLevel(logging.WARNING) - - self.engine = db_api.get_engine() - self.engine.dispose() - conn = self.engine.connect() - db_migrate.db_sync() - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - - def setUp(self): - super(Database, self).setUp() - - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - - -class TestCase(testtools.TestCase): - """Test case base class for all unit tests.""" - - POLICY_PATH = 'cinder/tests/unit/policy.json' - MOCK_WORKER = True - MOCK_TOOZ = True - - def _get_joined_notifier(self, *args, **kwargs): - # We create a new fake notifier but we join the notifications with - # the default notifier - notifier = fake_notifier.get_fake_notifier(*args, **kwargs) - notifier.notifications = self.notifier.notifications - return notifier - - def setUp(self): - """Run before each test method to initialize test environment.""" - super(TestCase, self).setUp() - - # Create default notifier - self.notifier = fake_notifier.get_fake_notifier() - - # Mock rpc get notifier with fake notifier method that joins all - # notifications with the default notifier - self.patch('cinder.rpc.get_notifier', - side_effect=self._get_joined_notifier) - - if self.MOCK_WORKER: - # Mock worker creation for all tests that don't care about it - clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s' - for method in ('create_worker', 'set_worker', 'unset_worker'): - self.patch(clean_path % method, return_value=None) - - if self.MOCK_TOOZ: - self.patch('cinder.coordination.Coordinator.start') - self.patch('cinder.coordination.Coordinator.stop') - self.patch('cinder.coordination.Coordinator.get_lock') - - # Unit tests do not need to use lazy gettext - i18n.enable_lazy(False) - - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - - environ_enabled = (lambda var_name: - strutils.bool_from_string(os.environ.get(var_name))) - if environ_enabled('OS_STDOUT_CAPTURE'): - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if environ_enabled('OS_STDERR_CAPTURE'): - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - self.useFixture(log_fixture.get_logging_handle_error_fixture()) - self.useFixture(cinder_fixtures.StandardLogging()) - - rpc.add_extra_exmods("cinder.tests.unit") - self.addCleanup(rpc.clear_extra_exmods) - self.addCleanup(rpc.cleanup) - - self.messaging_conf = messaging_conffixture.ConfFixture(CONF) - self.messaging_conf.transport_driver = 'fake' - self.messaging_conf.response_timeout = 15 - self.useFixture(self.messaging_conf) - - # Load oslo_messaging_notifications config group so we can set an - # override to prevent notifications from being ignored due to the - # short-circuit mechanism. - oslo_messaging.get_notification_transport(CONF) - # We need to use a valid driver for the notifications, so we use test. - self.override_config('driver', ['test'], - group='oslo_messaging_notifications') - rpc.init(CONF) - - # NOTE(geguileo): This is required because _determine_obj_version_cap - # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache - # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have - # weird interactions between tests if we don't clear them before each - # test. - rpc.LAST_OBJ_VERSIONS = {} - rpc.LAST_RPC_VERSIONS = {} - - conf_fixture.set_defaults(CONF) - CONF([], default_config_files=[]) - - # NOTE(vish): We need a better method for creating fixtures for tests - # now that we have some required db setup for the system - # to work properly. - self.start = timeutils.utcnow() - - CONF.set_default('connection', 'sqlite://', 'database') - CONF.set_default('sqlite_synchronous', False, 'database') - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection) - self.useFixture(_DB_CACHE) - - # NOTE(danms): Make sure to reset us back to non-remote objects - # for each test to avoid interactions. Also, backup the object - # registry. - objects_base.CinderObject.indirection_api = None - self._base_test_obj_backup = copy.copy( - objects_base.CinderObjectRegistry._registry._obj_classes) - self.addCleanup(self._restore_obj_registry) - - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - mox_fixture = self.useFixture(moxstubout.MoxStubout()) - self.mox = mox_fixture.mox - self.stubs = mox_fixture.stubs - self.addCleanup(CONF.reset) - self.addCleanup(self._common_cleanup) - self.injected = [] - self._services = [] - - fake_notifier.mock_notifier(self) - - self.override_config('fatal_exception_format_errors', True) - # This will be cleaned up by the NestedTempfile fixture - lock_path = self.useFixture(fixtures.TempDir()).path - self.fixture = self.useFixture( - config_fixture.Config(lockutils.CONF)) - self.fixture.config(lock_path=lock_path, - group='oslo_concurrency') - lockutils.set_defaults(lock_path) - self.override_config('policy_file', - os.path.join( - os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '..', - ) - ), - self.POLICY_PATH), - group='oslo_policy') - - self._disable_osprofiler() - self._disallow_invalid_uuids() - - # NOTE(geguileo): This is required because common get_by_id method in - # cinder.db.sqlalchemy.api caches get methods and if we use a mocked - # get method in one test it would carry on to the next test. So we - # clear out the cache. - sqla_api._GET_METHODS = {} - - self.override_config('backend_url', 'file://' + lock_path, - group='coordination') - coordination.COORDINATOR.start() - self.addCleanup(coordination.COORDINATOR.stop) - - def _restore_obj_registry(self): - objects_base.CinderObjectRegistry._registry._obj_classes = \ - self._base_test_obj_backup - - def _disable_osprofiler(self): - """Disable osprofiler. - - osprofiler should not run for unit tests. - """ - - side_effect = lambda value: value - mock_decorator = mock.MagicMock(side_effect=side_effect) - p = mock.patch("osprofiler.profiler.trace_cls", - return_value=mock_decorator) - p.start() - - def _disallow_invalid_uuids(self): - def catch_uuid_warning(message, *args, **kwargs): - ovo_message = "invalid UUID. Using UUIDFields with invalid UUIDs " \ - "is no longer supported" - if ovo_message in message: - raise AssertionError(message) - - p = mock.patch("warnings.warn", - side_effect=catch_uuid_warning) - p.start() - - def _common_cleanup(self): - """Runs after each test method to tear down test environment.""" - - # Stop any timers - for x in self.injected: - try: - x.stop() - except AssertionError: - pass - - # Kill any services - for x in self._services: - try: - x.kill() - except Exception: - pass - - # Delete attributes that don't start with _ so they don't pin - # memory around unnecessarily for the duration of the test - # suite - for key in [k for k in self.__dict__.keys() if k[0] != '_']: - del self.__dict__[key] - - def override_config(self, name, override, group=None): - """Cleanly override CONF variables.""" - CONF.set_override(name, override, group) - self.addCleanup(CONF.clear_override, name, group) - - def flags(self, **kw): - """Override CONF variables for a test.""" - for k, v in kw.items(): - self.override_config(k, v) - - def start_service(self, name, host=None, **kwargs): - host = host if host else uuid.uuid4().hex - kwargs.setdefault('host', host) - kwargs.setdefault('binary', 'cinder-%s' % name) - svc = service.Service.create(**kwargs) - svc.start() - self._services.append(svc) - return svc - - def mock_object(self, obj, attr_name, *args, **kwargs): - """Use python mock to mock an object attribute - - Mocks the specified objects attribute with the given value. - Automatically performs 'addCleanup' for the mock. - - """ - patcher = mock.patch.object(obj, attr_name, *args, **kwargs) - result = patcher.start() - self.addCleanup(patcher.stop) - return result - - def patch(self, path, *args, **kwargs): - """Use python mock to mock a path with automatic cleanup.""" - patcher = mock.patch(path, *args, **kwargs) - result = patcher.start() - self.addCleanup(patcher.stop) - return result - - # Useful assertions - def assert_notify_called(self, mock_notify, calls): - for i in range(0, len(calls)): - mock_call = mock_notify.call_args_list[i] - call = calls[i] - - posargs = mock_call[0] - - self.assertEqual(call[0], posargs[0]) - self.assertEqual(call[1], posargs[2]) - - def assertTrue(self, x, *args, **kwargs): - if isinstance(x, six.string_types): - raise AssertionError("%s (%s) is a string. Use a more " - "specific assertion such as assertEqual." % - (x, type(x))) - super(TestCase, self).assertTrue(x, *args, **kwargs) - - -class ModelsObjectComparatorMixin(object): - def _dict_from_object(self, obj, ignored_keys): - if ignored_keys is None: - ignored_keys = [] - obj = jsonutils.to_primitive(obj) # Convert to dict first. - items = obj.items() - return {k: v for k, v in items - if k not in ignored_keys} - - def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): - obj1 = self._dict_from_object(obj1, ignored_keys) - obj2 = self._dict_from_object(obj2, ignored_keys) - - self.assertEqual( - len(obj1), len(obj2), - "Keys mismatch: %s" % six.text_type( - set(obj1.keys()) ^ set(obj2.keys()))) - for key, value in obj1.items(): - self.assertEqual(value, obj2[key]) - - def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None, - msg=None): - obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) - objs1 = map(obj_to_dict, objs1) - objs2 = list(map(obj_to_dict, objs2)) - # We don't care about the order of the lists, as long as they are in - for obj1 in objs1: - self.assertIn(obj1, objs2) - objs2.remove(obj1) - self.assertEqual([], objs2) - - def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): - self.assertEqual(len(primitives1), len(primitives2)) - for primitive in primitives1: - self.assertIn(primitive, primitives2) - - for primitive in primitives2: - self.assertIn(primitive, primitives1) - - -class RPCAPITestCase(TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(RPCAPITestCase, self).setUp() - self.context = context.get_admin_context() - self.rpcapi = None - self.base_version = '2.0' - - def _test_rpc_api(self, method, rpc_method, server=None, fanout=False, - version=None, expected_method=None, - expected_kwargs_diff=None, retval=None, - expected_retval=None, **kwargs): - """Runs a test against RPC API method. - - :param method: Name of RPC API method. - :param rpc_method: Expected RPC message type (cast or call). - :param server: Expected hostname. - :param fanout: True if expected call/cast should be fanout. - :param version: Expected autocalculated RPC API version. - :param expected_method: Expected RPC method name. - :param expected_kwargs_diff: Map of expected changes between keyword - arguments passed into the method and sent - over RPC. - :param retval: Value returned by RPC call/cast. - :param expected_retval: Expected RPC API response (if different than - retval). - :param kwargs: Parameters passed into the RPC API method. - """ - - rpcapi = self.rpcapi() - expected_kwargs_diff = expected_kwargs_diff or {} - version = version or self.base_version - topic = None - if server is not None: - backend = utils.extract_host(server) - server = utils.extract_host(server, 'host') - topic = 'cinder-volume.%s' % backend - - if expected_method is None: - expected_method = method - - if expected_retval is None: - expected_retval = retval - - target = { - "server": server, - "fanout": fanout, - "version": version, - "topic": topic, - } - - # Initially we expect that we'll pass same arguments to RPC API method - # and RPC call/cast... - expected_msg = copy.deepcopy(kwargs) - # ... but here we're taking exceptions into account. - expected_msg.update(expected_kwargs_diff) - - def _fake_prepare_method(*args, **kwds): - # This is checking if target will be properly created. - for kwd in kwds: - self.assertEqual(target[kwd], kwds[kwd]) - return rpcapi.client - - def _fake_rpc_method(*args, **kwargs): - # This checks if positional arguments passed to RPC method match. - self.assertEqual((self.context, expected_method), args) - - # This checks if keyword arguments passed to RPC method match. - for kwarg, value in kwargs.items(): - # Getting possible changes into account. - if isinstance(value, objects_base.CinderObject): - # We need to compare objects differently. - self._assertEqualObjects(expected_msg[kwarg], value) - else: - self.assertEqual(expected_msg[kwarg], value) - - # Returning fake value we're supposed to return. - if retval: - return retval - - # Enable mocks that will check everything and run RPC method. - with mock.patch.object(rpcapi.client, "prepare", - side_effect=_fake_prepare_method): - with mock.patch.object(rpcapi.client, rpc_method, - side_effect=_fake_rpc_method): - real_retval = getattr(rpcapi, method)(self.context, **kwargs) - self.assertEqual(expected_retval, real_retval) diff --git a/cinder/tests/README.rst b/cinder/tests/README.rst deleted file mode 100644 index afdc5d2fd..000000000 --- a/cinder/tests/README.rst +++ /dev/null @@ -1,8 +0,0 @@ -IMPORTANT DEFINITION OF TESTS IN CINDER -======================================= - -Cinder has a number of different test types, PLEASE be sure to refer -to the Cinder Testing Docs to familiarize yourself with the various -options before creating any new tests. - -Please check `Cinder-Testing `_. diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/compliance/__init__.py b/cinder/tests/compliance/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/compliance/test_backup_drivers.py b/cinder/tests/compliance/test_backup_drivers.py deleted file mode 100644 index bad607a07..000000000 --- a/cinder/tests/compliance/test_backup_drivers.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ddt - -from cinder.interface import backup_driver -from cinder.interface import util -from cinder import test - -BACKUP_DRIVERS = util.get_backup_drivers() - - -@ddt.ddt -class TestBackupDrivers(test.TestCase): - - def test_backup_driver_decorator(self): - """Sanity check on the decorator. - - The interface code is somewhat implicitly tested. We don't need unit - tests for all of that code, but as a minimum we should make sure it - returns at least one registered driver, else the compliance test will - never even run. - """ - self.assertGreater(len(BACKUP_DRIVERS), 0) - - @ddt.data(*BACKUP_DRIVERS) - def test_backup_driver_compliance(self, driver): - """Makes sure all backup drivers support the minimum requirements.""" - self.assertTrue( - issubclass(driver.cls, backup_driver.BackupDriver), - "Driver {} does not conform to minimum backup driver " - "requirements!".format(driver.class_fqn)) diff --git a/cinder/tests/compliance/test_fczm_drivers.py b/cinder/tests/compliance/test_fczm_drivers.py deleted file mode 100644 index 95d39b966..000000000 --- a/cinder/tests/compliance/test_fczm_drivers.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ddt - -from cinder.interface import fczm_driver -from cinder.interface import util -from cinder import test - -FCZM_DRIVERS = util.get_fczm_drivers() - - -@ddt.ddt -class TestFibreChannelZoneManagerDrivers(test.TestCase): - - def test_fczm_driver_decorator(self): - """Sanity check on the decorator. - - The interface code is somewhat implicitly tested. We don't need unit - tests for all of that code, but as a minimum we should make sure it - returns at least one registered driver, else the compliance test will - never even run. - """ - self.assertGreater(len(FCZM_DRIVERS), 0) - - @ddt.data(*FCZM_DRIVERS) - def test_fczm_driver_compliance(self, driver): - """Makes sure all fczm drivers support the minimum requirements.""" - self.assertTrue( - issubclass(driver.cls, fczm_driver.FibreChannelZoneManagerDriver), - "Driver {} does not conform to minimum fczm driver " - "requirements!".format(driver.class_fqn)) diff --git a/cinder/tests/compliance/test_volume_drivers.py b/cinder/tests/compliance/test_volume_drivers.py deleted file mode 100644 index 5bfc0b131..000000000 --- a/cinder/tests/compliance/test_volume_drivers.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ddt - -from cinder.interface import util -from cinder.interface import volume_driver -from cinder import test - -VOLUME_DRIVERS = util.get_volume_drivers() - - -@ddt.ddt -class TestVolumeDrivers(test.TestCase): - - def test_volume_driver_decorator(self): - """Sanity check on the decorator. - - The interface code is somewhat implicitly tested. We don't need unit - tests for all of that code, but as a minimum we should make sure it - returns at least one registered driver, else the compliance test will - never even run. - """ - self.assertGreater(len(VOLUME_DRIVERS), 0) - - @ddt.data(*VOLUME_DRIVERS) - def test_volume_driver_compliance(self, driver): - self.assertTrue( - issubclass(driver.cls, volume_driver.VolumeDriverCore), - "Driver {} does not conform to minimum volume driver " - "requirements!".format(driver.class_fqn)) diff --git a/cinder/tests/fake_driver.py b/cinder/tests/fake_driver.py deleted file mode 100644 index f3932d357..000000000 --- a/cinder/tests/fake_driver.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit.brick import fake_lvm -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers import lvm -from cinder.volume import utils as vol_utils -from cinder.zonemanager import utils as fczm_utils - - -# TODO(e0ne): inherit from driver.VolumeDriver and fix unit-tests -class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): - """Logs calls instead of executing.""" - def __init__(self, *args, **kwargs): - super(FakeLoggingVolumeDriver, self).__init__( - execute=self.fake_execute, *args, **kwargs) - - self.backend_name = 'fake' - self.protocol = 'fake' - self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, - None, 'default', - self.fake_execute) - - @utils.trace_method - def check_for_setup_error(self): - """No setup necessary in fake mode.""" - pass - - @utils.trace_method - def create_volume(self, volume): - """Creates a volume.""" - super(FakeLoggingVolumeDriver, self).create_volume(volume) - model_update = {} - try: - if (volume.volume_type and volume.volume_type.extra_specs and - vol_utils.is_replicated_spec( - volume.volume_type.extra_specs)): - # Sets the new volume's replication_status to disabled - model_update['replication_status'] = ( - fields.ReplicationStatus.DISABLED) - except exception.VolumeTypeNotFound: - pass - if model_update: - return model_update - - @utils.trace_method - def delete_volume(self, volume): - pass - - @utils.trace_method - def create_snapshot(self, snapshot): - pass - - @utils.trace_method - def delete_snapshot(self, snapshot): - pass - - @utils.trace_method - def ensure_export(self, context, volume): - pass - - @utils.trace_method - def create_export(self, context, volume, connector): - pass - - @utils.trace_method - def remove_export(self, context, volume): - pass - - @utils.trace_method - def create_export_snapshot(self, context, snapshot): - pass - - @utils.trace_method - def remove_export_snapshot(self, context, snapshot): - pass - - @utils.trace_method - def terminate_connection_snapshot(self, snapshot, connector): - pass - - @utils.trace_method - def create_cloned_volume(self, volume, src_vol): - pass - - @utils.trace_method - def create_volume_from_snapshot(self, volume, snapshot): - pass - - @utils.trace_method - def initialize_connection(self, volume, connector): - # NOTE(thangp): There are several places in the core cinder code where - # the volume passed through is a dict and not an oslo_versionedobject. - # We need to react appropriately to what type of volume is passed in, - # until the switch over to oslo_versionedobjects is complete. - if isinstance(volume, objects.Volume): - volume_metadata = volume.admin_metadata - else: - volume_metadata = {} - for metadata in volume['volume_admin_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - - access_mode = volume_metadata.get('attached_mode') - if access_mode is None: - access_mode = ('ro' - if volume_metadata.get('readonly') == 'True' - else 'rw') - - return {'driver_volume_type': 'iscsi', - 'data': {'access_mode': access_mode}} - - @utils.trace_method - def initialize_connection_snapshot(self, snapshot, connector): - return { - 'driver_volume_type': 'iscsi', - } - - @utils.trace_method - def terminate_connection(self, volume, connector, **kwargs): - pass - - # Replication Group (Tiramisu) - @utils.trace_method - def enable_replication(self, context, group, volumes): - """Enables replication for a group and volumes in the group.""" - model_update = { - 'replication_status': fields.ReplicationStatus.ENABLED} - volume_model_updates = [] - for volume_ref in volumes: - volume_model_update = {'id': volume_ref.id} - volume_model_update['replication_status'] = ( - fields.ReplicationStatus.ENABLED) - volume_model_updates.append(volume_model_update) - - return model_update, volume_model_updates - - # Replication Group (Tiramisu) - @utils.trace_method - def disable_replication(self, context, group, volumes): - """Disables replication for a group and volumes in the group.""" - model_update = { - 'replication_status': fields.ReplicationStatus.DISABLED} - volume_model_updates = [] - for volume_ref in volumes: - volume_model_update = {'id': volume_ref.id} - volume_model_update['replication_status'] = ( - fields.ReplicationStatus.DISABLED) - volume_model_updates.append(volume_model_update) - - return model_update, volume_model_updates - - # Replication Group (Tiramisu) - @utils.trace_method - def failover_replication(self, context, group, volumes, - secondary_backend_id=None): - """Fails over replication for a group and volumes in the group.""" - model_update = { - 'replication_status': fields.ReplicationStatus.FAILED_OVER} - volume_model_updates = [] - for volume_ref in volumes: - volume_model_update = {'id': volume_ref.id} - volume_model_update['replication_status'] = ( - fields.ReplicationStatus.FAILED_OVER) - volume_model_updates.append(volume_model_update) - - return model_update, volume_model_updates - - # Replication Group (Tiramisu) - @utils.trace_method - def create_group(self, context, group): - """Creates a group.""" - model_update = super(FakeLoggingVolumeDriver, self).create_group( - context, group) - try: - if group.is_replicated: - # Sets the new group's replication_status to disabled - model_update['replication_status'] = ( - fields.ReplicationStatus.DISABLED) - except exception.GroupTypeNotFound: - pass - - return model_update - - def _update_volume_stats(self): - data = {'volume_backend_name': self.backend_name, - 'vendor_name': 'Open Source', - 'driver_version': self.VERSION, - 'storage_protocol': self.protocol, - 'pools': []} - - fake_pool = {'pool_name': data['volume_backend_name'], - 'total_capacity_gb': 'infinite', - 'free_capacity_gb': 'infinite', - 'provisioned_capacity_gb': 0, - 'reserved_percentage': 100, - 'QoS_support': False, - 'filter_function': self.get_filter_function(), - 'goodness_function': self.get_goodness_function(), - 'consistencygroup_support': False, - 'replication_enabled': True, - 'group_replication_enabled': True, } - - data['pools'].append(fake_pool) - self._stats = data - - @staticmethod - def fake_execute(cmd, *_args, **_kwargs): - """Execute that simply logs the command.""" - return (None, None) - - -class FakeISERDriver(FakeLoggingVolumeDriver): - def __init__(self, *args, **kwargs): - super(FakeISERDriver, self).__init__(execute=self.fake_execute, - *args, **kwargs) - - def initialize_connection(self, volume, connector): - return { - 'driver_volume_type': 'iser', - 'data': {} - } - - -class FakeFibreChannelDriver(driver.FibreChannelDriver): - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - return { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, - }} - - @fczm_utils.add_fc_zone - def no_zone_initialize_connection(self, volume, connector): - """This shouldn't call the ZM.""" - return { - 'driver_volume_type': 'bogus', - 'data': { - 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, - }} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - return { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, - }} - - @fczm_utils.remove_fc_zone - def no_zone_terminate_connection(self, volume, connector, **kwargs): - return { - 'driver_volume_type': 'bogus', - 'data': { - 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, - }} - - -class FakeGateDriver(lvm.LVMVolumeDriver): - """Class designation for FakeGateDriver. - - FakeGateDriver is for TESTING ONLY. There are a few - driver features such as CG and replication that are not - supported by the reference driver LVM currently. Adding - those functions in this fake driver will help detect - problems when changes are introduced in those functions. - - Implementation of this driver is NOT meant for production. - They are implemented simply to make sure calls to the driver - functions are passing in the correct parameters, and the - results returned by the driver are handled properly by the manager. - - """ - def __init__(self, *args, **kwargs): - super(FakeGateDriver, self).__init__(*args, **kwargs) - - def _update_volume_stats(self): - super(FakeGateDriver, self)._update_volume_stats() - self._stats["pools"][0]["consistencygroup_support"] = True - - # NOTE(xyang): Consistency Group functions implemented below - # are for testing purpose only. Data consistency cannot be - # achieved by running these functions. - def create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - # A consistencygroup entry is already created in db - # This driver just returns a status - now = timeutils.utcnow() - model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE, - 'updated_at': now} - - return model_update - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - soure_cg=None, source_vols=None): - """Creates a consistencygroup from cgsnapshot or source cg.""" - for vol in volumes: - try: - if snapshots: - for snapshot in snapshots: - if vol['snapshot_id'] == snapshot['id']: - self.create_volume_from_snapshot(vol, snapshot) - break - except Exception: - raise - try: - if source_vols: - for source_vol in source_vols: - if vol['source_volid'] == source_vol['id']: - self.create_cloned_volume(vol, source_vol) - break - except Exception: - raise - return None, None - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistencygroup and volumes in the group.""" - model_update = {'status': group.status} - volume_model_updates = [] - for volume_ref in volumes: - volume_model_update = {'id': volume_ref.id} - try: - self.remove_export(context, volume_ref) - self.delete_volume(volume_ref) - volume_model_update['status'] = 'deleted' - except exception.VolumeIsBusy: - volume_model_update['status'] = 'available' - except Exception: - volume_model_update['status'] = 'error' - model_update['status'] = fields.ConsistencyGroupStatus.ERROR - volume_model_updates.append(volume_model_update) - - return model_update, volume_model_updates - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a consistency group.""" - return None, None, None - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot. - - Snapshots created here are NOT consistent. This is for - testing purpose only. - """ - model_update = {'status': 'available'} - snapshot_model_updates = [] - for snapshot in snapshots: - snapshot_model_update = {'id': snapshot.id} - try: - self.create_snapshot(snapshot) - snapshot_model_update['status'] = ( - fields.SnapshotStatus.AVAILABLE) - except Exception: - snapshot_model_update['status'] = fields.SnapshotStatus.ERROR - model_update['status'] = 'error' - snapshot_model_updates.append(snapshot_model_update) - - return model_update, snapshot_model_updates - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - model_update = {'status': cgsnapshot.status} - snapshot_model_updates = [] - for snapshot in snapshots: - snapshot_model_update = {'id': snapshot.id} - try: - self.delete_snapshot(snapshot) - snapshot_model_update['status'] = ( - fields.SnapshotStatus.DELETED) - except exception.SnapshotIsBusy: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.AVAILABLE) - except Exception: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.ERROR) - model_update['status'] = 'error' - snapshot_model_updates.append(snapshot_model_update) - - return model_update, snapshot_model_updates diff --git a/cinder/tests/fixtures.py b/cinder/tests/fixtures.py deleted file mode 100644 index f85857813..000000000 --- a/cinder/tests/fixtures.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Fixtures for Cinder tests.""" -# NOTE(mriedem): This is needed for importing from fixtures. -from __future__ import absolute_import - -import logging as std_logging -import os - -import fixtures - -_TRUE_VALUES = ('True', 'true', '1', 'yes') - - -class NullHandler(std_logging.Handler): - """custom default NullHandler to attempt to format the record. - - Used in conjunction with - log_fixture.get_logging_handle_error_fixture to detect formatting errors in - debug level logs without saving the logs. - """ - def handle(self, record): - self.format(record) - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -class StandardLogging(fixtures.Fixture): - """Setup Logging redirection for tests. - - There are a number of things we want to handle with logging in tests: - - * Redirect the logging to somewhere that we can test or dump it later. - - * Ensure that as many DEBUG messages as possible are actually - executed, to ensure they are actually syntactically valid (they - often have not been). - - * Ensure that we create useful output for tests that doesn't - overwhelm the testing system (which means we can't capture the - 100 MB of debug logging on every run). - - To do this we create a logger fixture at the root level, which - defaults to INFO and create a Null Logger at DEBUG which lets - us execute log messages at DEBUG but not keep the output. - - To support local debugging OS_DEBUG=True can be set in the - environment, which will print out the full debug logging. - - There are also a set of overrides for particularly verbose - modules to be even less than INFO. - - """ - - def setUp(self): - super(StandardLogging, self).setUp() - - # set root logger to debug - root = std_logging.getLogger() - root.setLevel(std_logging.DEBUG) - - # supports collecting debug level for local runs - if os.environ.get('OS_DEBUG') in _TRUE_VALUES: - level = std_logging.DEBUG - else: - level = std_logging.INFO - - # Collect logs - fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' - self.logger = self.useFixture( - fixtures.FakeLogger(format=fs, level=None)) - # TODO(sdague): why can't we send level through the fake - # logger? Tests prove that it breaks, but it's worth getting - # to the bottom of. - root.handlers[0].setLevel(level) - - if level > std_logging.DEBUG: - # Just attempt to format debug level logs, but don't save them - handler = NullHandler() - self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) - handler.setLevel(std_logging.DEBUG) - - # Don't log every single DB migration step - std_logging.getLogger( - 'migrate.versioning.api').setLevel(std_logging.WARNING) diff --git a/cinder/tests/functional/__init__.py b/cinder/tests/functional/__init__.py deleted file mode 100644 index 3519f17cd..000000000 --- a/cinder/tests/functional/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import objects - -# NOTE(e0ne): Make sure we have all of the objects loaded. We do this -# at module import time, because we may be using mock decorators in our -# tests that run at import time. -objects.register_all() diff --git a/cinder/tests/functional/api/__init__.py b/cinder/tests/functional/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/functional/api/client.py b/cinder/tests/functional/api/client.py deleted file mode 100644 index 3bcfab9a9..000000000 --- a/cinder/tests/functional/api/client.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright (c) 2011 Justin Santa Barbara -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_utils import netutils -import requests -from six.moves import http_client -from six.moves import urllib - -from cinder.i18n import _ -from cinder.tests.unit import fake_constants as fake - - -class OpenStackApiException(Exception): - message = 'Unspecified error' - - def __init__(self, response=None, msg=None): - self.response = response - # Give chance to override default message - if msg: - self.message = msg - - if response: - self.message = _( - '%(message)s\nStatus Code: %(_status)s\nBody: %(_body)s') % { - '_status': response.status_code, '_body': response.text, - 'message': self.message} - - super(OpenStackApiException, self).__init__(self.message) - - -class OpenStackApiException401(OpenStackApiException): - message = _("401 Unauthorized Error") - - -class OpenStackApiException404(OpenStackApiException): - message = _("404 Not Found Error") - - -class OpenStackApiException413(OpenStackApiException): - message = _("413 Request entity too large") - - -class OpenStackApiException400(OpenStackApiException): - message = _("400 Bad Request") - - -class TestOpenStackClient(object): - """Simple OpenStack API Client. - - This is a really basic OpenStack API client that is under our control, - so we can make changes / insert hooks for testing - - """ - - def __init__(self, auth_user, auth_key, auth_uri, api_version=None): - super(TestOpenStackClient, self).__init__() - self.auth_result = None - self.auth_user = auth_user - self.auth_key = auth_key - self.auth_uri = auth_uri - # default project_id - self.project_id = fake.PROJECT_ID - self.api_version = api_version - - def request(self, url, method='GET', body=None, headers=None, - ssl_verify=True, stream=False): - _headers = {'Content-Type': 'application/json'} - _headers.update(headers or {}) - - parsed_url = urllib.parse.urlparse(url) - port = parsed_url.port - hostname = parsed_url.hostname - scheme = parsed_url.scheme - - if netutils.is_valid_ipv6(hostname): - hostname = "[%s]" % hostname - - relative_url = parsed_url.path - if parsed_url.query: - relative_url = relative_url + "?" + parsed_url.query - - if port: - _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url) - else: - _url = "%s://%s%s" % (scheme, hostname, relative_url) - - response = requests.request(method, _url, data=body, headers=_headers, - verify=ssl_verify, stream=stream) - - return response - - def _authenticate(self, reauthenticate=False): - if self.auth_result and not reauthenticate: - return self.auth_result - - auth_uri = self.auth_uri - headers = {'X-Auth-User': self.auth_user, - 'X-Auth-Key': self.auth_key, - 'X-Auth-Project-Id': self.project_id} - response = self.request(auth_uri, - headers=headers) - - http_status = response.status_code - - if http_status == http_client.UNAUTHORIZED: - raise OpenStackApiException401(response=response) - - self.auth_result = response.headers - return self.auth_result - - def update_project(self, new_project_id): - self.project_id = new_project_id - self._authenticate(True) - - def api_request(self, relative_uri, check_response_status=None, **kwargs): - auth_result = self._authenticate() - - # NOTE(justinsb): httplib 'helpfully' converts headers to lower case - base_uri = auth_result['x-server-management-url'] - - full_uri = '%s/%s' % (base_uri, relative_uri) - - headers = kwargs.setdefault('headers', {}) - headers['X-Auth-Token'] = auth_result['x-auth-token'] - - if self.api_version: - headers['OpenStack-API-Version'] = 'volume ' + self.api_version - - response = self.request(full_uri, **kwargs) - - http_status = response.status_code - if check_response_status: - if http_status not in check_response_status: - message = None - try: - exc = globals()["OpenStackApiException%s" % http_status] - except KeyError: - exc = OpenStackApiException - message = _("Unexpected status code") - raise exc(response, message) - - return response - - def _decode_json(self, response): - body = response.text - if body: - return jsonutils.loads(body) - else: - return "" - - def api_get(self, relative_uri, **kwargs): - kwargs.setdefault('check_response_status', [http_client.OK]) - response = self.api_request(relative_uri, **kwargs) - return self._decode_json(response) - - def api_post(self, relative_uri, body, **kwargs): - kwargs['method'] = 'POST' - if body: - headers = kwargs.setdefault('headers', {}) - headers['Content-Type'] = 'application/json' - kwargs['body'] = jsonutils.dumps(body) - - kwargs.setdefault('check_response_status', [http_client.OK, - http_client.ACCEPTED]) - response = self.api_request(relative_uri, **kwargs) - return self._decode_json(response) - - def api_put(self, relative_uri, body, **kwargs): - kwargs['method'] = 'PUT' - if body: - headers = kwargs.setdefault('headers', {}) - headers['Content-Type'] = 'application/json' - kwargs['body'] = jsonutils.dumps(body) - - kwargs.setdefault('check_response_status', [http_client.OK, - http_client.ACCEPTED, - http_client.NO_CONTENT]) - response = self.api_request(relative_uri, **kwargs) - return self._decode_json(response) - - def api_delete(self, relative_uri, **kwargs): - kwargs['method'] = 'DELETE' - kwargs.setdefault('check_response_status', [http_client.OK, - http_client.ACCEPTED, - http_client.NO_CONTENT]) - return self.api_request(relative_uri, **kwargs) - - def get_volume(self, volume_id): - return self.api_get('/volumes/%s' % volume_id)['volume'] - - def get_volumes(self, detail=True): - rel_url = '/volumes/detail' if detail else '/volumes' - return self.api_get(rel_url)['volumes'] - - def post_volume(self, volume): - return self.api_post('/volumes', volume)['volume'] - - def delete_volume(self, volume_id): - return self.api_delete('/volumes/%s' % volume_id) - - def put_volume(self, volume_id, volume): - return self.api_put('/volumes/%s' % volume_id, volume)['volume'] - - def quota_set(self, project_id, quota_update): - return self.api_put( - 'os-quota-sets/%s' % project_id, - {'quota_set': quota_update})['quota_set'] - - def quota_get(self, project_id, usage=True): - - return self.api_get('os-quota-sets/%s?usage=%s' - % (project_id, usage))['quota_set'] - - def create_type(self, type_name, extra_specs=None): - type = {"volume_type": {"name": type_name}} - if extra_specs: - type['extra_specs'] = extra_specs - - return self.api_post('/types', type)['volume_type'] - - def delete_type(self, type_id): - return self.api_delete('/types/%s' % type_id) - - def get_type(self, type_id): - return self.api_get('/types/%s' % type_id)['volume_type'] - - def create_volume_type_extra_specs(self, volume_type_id, extra_specs): - extra_specs = {"extra_specs": extra_specs} - url = "/types/%s/extra_specs" % volume_type_id - return self.api_post(url, extra_specs)['extra_specs'] - - def create_group_type_specs(self, grp_type_id, group_specs): - group_specs = {"group_specs": group_specs} - url = "/group_types/%s/group_specs" % grp_type_id - return self.api_post(url, group_specs)['group_specs'] - - def create_group_type(self, type_name, grp_specs=None): - grp_type = {"group_type": {"name": type_name}} - if grp_specs: - grp_type['group_specs'] = grp_specs - - return self.api_post('/group_types', grp_type)['group_type'] - - def delete_group_type(self, group_type_id): - return self.api_delete('/group_types/%s' % group_type_id) - - def get_group_type(self, grp_type_id): - return self.api_get('/group_types/%s' % grp_type_id)['group_type'] - - def get_group(self, group_id): - return self.api_get('/groups/%s' % group_id)['group'] - - def get_groups(self, detail=True): - rel_url = '/groups/detail' if detail else '/groups' - return self.api_get(rel_url)['groups'] - - def post_group(self, group): - return self.api_post('/groups', group)['group'] - - def post_group_from_src(self, group): - return self.api_post('/groups/action', group)['group'] - - def delete_group(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) - - def reset_group(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) - - def put_group(self, group_id, group): - return self.api_put('/groups/%s' % group_id, group)['group'] - - def get_group_snapshot(self, group_snapshot_id): - return self.api_get('/group_snapshots/%s' % group_snapshot_id)[ - 'group_snapshot'] - - def get_group_snapshots(self, detail=True): - rel_url = '/group_snapshots/detail' if detail else '/group_snapshots' - return self.api_get(rel_url)['group_snapshots'] - - def post_group_snapshot(self, group_snapshot): - return self.api_post('/group_snapshots', group_snapshot)[ - 'group_snapshot'] - - def delete_group_snapshot(self, group_snapshot_id): - return self.api_delete('/group_snapshots/%s' % group_snapshot_id) - - def reset_group_snapshot(self, group_snapshot_id, params): - return self.api_post('/group_snapshots/%s/action' % group_snapshot_id, - params) - - def enable_group_replication(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) - - def disable_group_replication(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) - - def failover_group_replication(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) - - def list_group_replication_targets(self, group_id, params): - return self.api_post('/groups/%s/action' % group_id, params) diff --git a/cinder/tests/functional/api/foxinsocks.py b/cinder/tests/functional/api/foxinsocks.py deleted file mode 100644 index 2eb570fdf..000000000 --- a/cinder/tests/functional/api/foxinsocks.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob.exc - -from cinder.api import extensions -from cinder.api.openstack import wsgi - - -class FoxInSocksController(object): - - def index(self, req): - return "Try to say this Mr. Knox, sir..." - - -class FoxInSocksServerControllerExtension(wsgi.Controller): - @wsgi.action('add_tweedle') - def _add_tweedle(self, req, id, body): - - return "Tweedle Beetle Added." - - @wsgi.action('delete_tweedle') - def _delete_tweedle(self, req, id, body): - - return "Tweedle Beetle Deleted." - - @wsgi.action('fail') - def _fail(self, req, id, body): - - raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') - - -class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): - @wsgi.extends - def show(self, req, resp_obj, id): - # NOTE: This only handles JSON responses. - resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') - - -class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): - @wsgi.extends - def show(self, req, resp_obj, id): - # NOTE: This only handles JSON responses. - resp_obj.obj['big_bands'] = 'Pig Bands!' - - -class Foxinsocks(extensions.ExtensionDescriptor): - """The Fox In Socks Extension.""" - - name = "Fox In Socks" - alias = "FOXNSOX" - namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" - updated = "2011-01-22T13:25:27-06:00" - - def __init__(self, ext_mgr): - ext_mgr.register(self) - - def get_resources(self): - resources = [] - resource = extensions.ResourceExtension('foxnsocks', - FoxInSocksController()) - resources.append(resource) - return resources - - def get_controller_extensions(self): - extension_list = [] - - extension_set = [ - (FoxInSocksServerControllerExtension, 'servers'), - (FoxInSocksFlavorGooseControllerExtension, 'flavors'), - (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] - for klass, collection in extension_set: - controller = klass() - ext = extensions.ControllerExtension(self, collection, controller) - extension_list.append(ext) - - return extension_list diff --git a/cinder/tests/functional/functional_helpers.py b/cinder/tests/functional/functional_helpers.py deleted file mode 100644 index 64b890b19..000000000 --- a/cinder/tests/functional/functional_helpers.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Provides common functionality for functional tests -""" -import os.path -import random -import string -import time -import uuid - -import fixtures -import mock -from oslo_config import cfg - -from cinder import service -from cinder import test # For the flags -from cinder.tests.functional.api import client -from cinder.tests.unit import fake_constants as fake - -CONF = cfg.CONF -VOLUME = 'VOLUME' -GROUP = 'GROUP' -GROUP_SNAPSHOT = 'GROUP_SNAPSHOT' - - -def generate_random_alphanumeric(length): - """Creates a random alphanumeric string of specified length.""" - return ''.join(random.choice(string.ascii_uppercase + string.digits) - for _x in range(length)) - - -def generate_random_numeric(length): - """Creates a random numeric string of specified length.""" - return ''.join(random.choice(string.digits) - for _x in range(length)) - - -def generate_new_element(items, prefix, numeric=False): - """Creates a random string with prefix, that is not in 'items' list.""" - while True: - if numeric: - candidate = prefix + generate_random_numeric(8) - else: - candidate = prefix + generate_random_alphanumeric(8) - if candidate not in items: - return candidate - - -class _FunctionalTestBase(test.TestCase): - osapi_version_major = '2' - osapi_version_minor = '0' - - def setUp(self): - super(_FunctionalTestBase, self).setUp() - - f = self._get_flags() - for k, value_dict in f.items(): - self.override_config(k, value_dict['v'], value_dict.get('g')) - - for var in ('http_proxy', 'HTTP_PROXY'): - self.useFixture(fixtures.EnvironmentVariable(var)) - - # set up services - self.volume = self.start_service('volume') - # NOTE(dulek): Mocking eventlet.sleep so test won't time out on - # scheduler service start. - with mock.patch('eventlet.sleep'): - self.scheduler = self.start_service('scheduler') - self._start_api_service() - self.addCleanup(self.osapi.stop) - - api_version = self.osapi_version_major + '.' + self.osapi_version_minor - self.api = client.TestOpenStackClient(fake.USER_ID, - fake.PROJECT_ID, self.auth_url, - api_version) - - def _update_project(self, new_project_id): - self.api.update_project(new_project_id) - - def _start_api_service(self): - default_conf = os.path.abspath(os.path.join( - os.path.dirname(__file__), '..', '..', '..', - 'etc/cinder/api-paste.ini')) - CONF.api_paste_config = default_conf - self.osapi = service.WSGIService("osapi_volume") - self.osapi.start() - # FIXME(ja): this is not the auth url - this is the service url - # FIXME(ja): this needs fixed in nova as well - self.auth_url = 'http://%s:%s/v' % (self.osapi.host, self.osapi.port) - self.auth_url += self.osapi_version_major - - def _get_flags(self): - """An opportunity to setup flags, before the services are started.""" - f = {} - - # Ensure tests only listen on localhost - f['osapi_volume_listen'] = {'v': '127.0.0.1'} - - # Auto-assign ports to allow concurrent tests - f['osapi_volume_listen_port'] = {'v': 0} - - # Use simple scheduler to avoid complications - we test schedulers - # separately - f['scheduler_driver'] = {'v': ('cinder.scheduler.filter_scheduler.' - 'FilterScheduler')} - - return f - - def get_unused_server_name(self): - servers = self.api.get_servers() - server_names = [server['name'] for server in servers] - return generate_new_element(server_names, 'server') - - def get_invalid_image(self): - return str(uuid.uuid4()) - - def _build_minimal_create_server_request(self): - server = {} - - image = self.api.get_images()[0] - - if 'imageRef' in image: - image_href = image['imageRef'] - else: - image_href = image['id'] - image_href = 'http://fake.server/%s' % image_href - - # We now have a valid imageId - server['imageRef'] = image_href - - # Set a valid flavorId - flavor = self.api.get_flavors()[0] - server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] - - # Set a valid server name - server_name = self.get_unused_server_name() - server['name'] = server_name - return server - - def _poll_resource_while(self, res_id, continue_states, res_type=VOLUME, - expected_end_status=None, max_retries=5, - status_field='status'): - """Poll (briefly) while the state is in continue_states. - - Continues until the state changes from continue_states or max_retries - are hit. If expected_end_status is specified, we assert that the end - status of the resource is expected_end_status. - """ - retries = 0 - while retries <= max_retries: - try: - if res_type == VOLUME: - found_res = self.api.get_volume(res_id) - elif res_type == GROUP: - found_res = self.api.get_group(res_id) - elif res_type == GROUP_SNAPSHOT: - found_res = self.api.get_group_snapshot(res_id) - else: - return None - except client.OpenStackApiException404: - return None - except client.OpenStackApiException: - # NOTE(xyang): Got OpenStackApiException( - # u'Unexpected status code',) sometimes, but - # it works if continue. - continue - - self.assertEqual(res_id, found_res['id']) - res_status = found_res[status_field] - if res_status not in continue_states: - if expected_end_status: - self.assertEqual(expected_end_status, res_status) - return found_res - - time.sleep(1) - retries += 1 - - def _poll_volume_while(self, volume_id, continue_states, - expected_end_status=None, max_retries=5, - status_field='status'): - return self._poll_resource_while(volume_id, continue_states, - VOLUME, expected_end_status, - max_retries, status_field) - - def _poll_group_while(self, group_id, continue_states, - expected_end_status=None, max_retries=30, - status_field='status'): - return self._poll_resource_while(group_id, continue_states, - GROUP, expected_end_status, - max_retries, status_field) - - def _poll_group_snapshot_while(self, group_snapshot_id, continue_states, - expected_end_status=None, max_retries=30): - return self._poll_resource_while(group_snapshot_id, continue_states, - GROUP_SNAPSHOT, expected_end_status, - max_retries) diff --git a/cinder/tests/functional/test_extensions.py b/cinder/tests/functional/test_extensions.py deleted file mode 100644 index 06589d4f5..000000000 --- a/cinder/tests/functional/test_extensions.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import iso8601 -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.v1 import router -from cinder.tests.functional import functional_helpers - - -NS = "{http://docs.openstack.org/common/api/v1.0}" -CONF = cfg.CONF - - -class ExtensionTestCase(functional_helpers._FunctionalTestBase): - def _get_flags(self): - f = super(ExtensionTestCase, self)._get_flags() - f['osapi_volume_extension'] = {'v': CONF.osapi_volume_extension[:]} - f['osapi_volume_extension']['v'].append( - 'cinder.tests.functional.api.foxinsocks.Foxinsocks') - return f - - -class ExtensionsTest(ExtensionTestCase): - def test_get_foxnsocks(self): - """Simple check that fox-n-socks works.""" - response = self.api.api_request('/foxnsocks') - foxnsocks = response.text - self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) - - -class ExtensionControllerTest(ExtensionTestCase): - - def setUp(self): - super(ExtensionControllerTest, self).setUp() - self.ext_list = ["TypesManage", "TypesExtraSpecs", ] - self.ext_list.sort() - - def test_list_extensions_json(self): - app = router.APIRouter() - request = webob.Request.blank("/fake/extensions") - response = request.get_response(app) - self.assertEqual(http_client.OK, response.status_int) - - # Make sure we have all the extensions, extra extensions being OK. - data = jsonutils.loads(response.body) - names = [str(x['name']) for x in data['extensions'] - if str(x['name']) in self.ext_list] - names.sort() - self.assertEqual(self.ext_list, names) - - # Ensure all the timestamps are valid according to iso8601 - for ext in data['extensions']: - iso8601.parse_date(ext['updated']) - - # Make sure that at least Fox in Sox is correct. - (fox_ext, ) = [ - x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] - self.assertEqual( - {'name': 'Fox In Socks', - 'updated': '2011-01-22T13:25:27-06:00', - 'description': 'The Fox In Socks Extension.', - 'alias': 'FOXNSOX', - 'links': []}, - fox_ext) - - for ext in data['extensions']: - url = '/fake/extensions/%s' % ext['alias'] - request = webob.Request.blank(url) - response = request.get_response(app) - output = jsonutils.loads(response.body) - self.assertEqual(ext['alias'], output['extension']['alias']) - - def test_get_extension_json(self): - app = router.APIRouter() - request = webob.Request.blank("/fake/extensions/FOXNSOX") - response = request.get_response(app) - self.assertEqual(http_client.OK, response.status_int) - - data = jsonutils.loads(response.body) - self.assertEqual( - {"name": "Fox In Socks", - "updated": "2011-01-22T13:25:27-06:00", - "description": "The Fox In Socks Extension.", - "alias": "FOXNSOX", - "links": []}, data['extension']) - - def test_get_non_existing_extension_json(self): - app = router.APIRouter() - request = webob.Request.blank("/fake/extensions/4") - response = request.get_response(app) - self.assertEqual(http_client.NOT_FOUND, response.status_int) - - -class StubExtensionManager(object): - """Provides access to Tweedle Beetles.""" - - name = "Tweedle Beetle Extension" - alias = "TWDLBETL" - - def __init__(self, resource_ext=None, action_ext=None, request_ext=None, - controller_ext=None): - self.resource_ext = resource_ext - self.controller_ext = controller_ext - self.extra_resource_ext = None - - def get_resources(self): - resource_exts = [] - if self.resource_ext: - resource_exts.append(self.resource_ext) - if self.extra_resource_ext: - resource_exts.append(self.extra_resource_ext) - return resource_exts - - def get_controller_extensions(self): - controller_extensions = [] - if self.controller_ext: - controller_extensions.append(self.controller_ext) - return controller_extensions - - -class ExtensionControllerIdFormatTest(ExtensionTestCase): - - def _bounce_id(self, test_id): - - class BounceController(object): - def show(self, req, id): - return id - res_ext = extensions.ResourceExtension('bounce', - BounceController()) - manager = StubExtensionManager(res_ext) - app = router.APIRouter(manager) - request = webob.Request.blank("/fake/bounce/%s" % test_id) - response = request.get_response(app) - return response.body - - def test_id_with_json_format(self): - result = self._bounce_id('foo.json') - self.assertEqual(b'foo', result) - - def test_id_with_bad_format(self): - result = self._bounce_id('foo.bad') - self.assertEqual(b'foo.bad', result) diff --git a/cinder/tests/functional/test_group_replication.py b/cinder/tests/functional/test_group_replication.py deleted file mode 100644 index 05e01d66d..000000000 --- a/cinder/tests/functional/test_group_replication.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from cinder.objects import fields -from cinder.tests.functional import functional_helpers -from cinder.volume import configuration - - -class GroupReplicationTest(functional_helpers._FunctionalTestBase): - _vol_type_name = 'functional_test_type' - _grp_type_name = 'functional_grp_test_type' - osapi_version_major = '3' - osapi_version_minor = '38' - - def setUp(self): - super(GroupReplicationTest, self).setUp() - self.volume_type = self.api.create_type(self._vol_type_name) - extra_specs = {"replication_enabled": " True"} - self.api.create_volume_type_extra_specs(self.volume_type['id'], - extra_specs=extra_specs) - self.volume_type = self.api.get_type(self.volume_type['id']) - self.group_type = self.api.create_group_type(self._grp_type_name) - grp_specs = {"group_replication_enabled": " True"} - self.api.create_group_type_specs(self.group_type['id'], - group_specs=grp_specs) - self.group_type = self.api.get_group_type(self.group_type['id']) - - def _get_flags(self): - f = super(GroupReplicationTest, self)._get_flags() - f['volume_driver'] = ( - {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - 'g': configuration.SHARED_CONF_GROUP}) - f['default_volume_type'] = {'v': self._vol_type_name} - f['default_group_type'] = {'v': self._grp_type_name} - return f - - def test_group_replication(self): - """Tests group replication APIs.""" - - # Create group - created_group = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) - created_group_id = created_group['id'] - - # Check it's there - found_group = self._poll_group_while(created_group_id, - ['creating']) - self.assertEqual(created_group_id, found_group['id']) - self.assertEqual(self.group_type['id'], found_group['group_type']) - self.assertEqual('available', found_group['status']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': created_group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - self.assertEqual(created_group_id, found_volume['group_id']) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Test enable replication group - self.api.enable_group_replication(created_group_id, - {'enable_replication': {}}) - - found_volume = self._poll_volume_while( - created_volume_id, [fields.ReplicationStatus.ENABLING], - status_field='replication_status') - found_group = self._poll_group_while( - created_group_id, [fields.ReplicationStatus.ENABLING], - status_field='replication_status') - - self.assertEqual(fields.ReplicationStatus.ENABLED, - found_group['replication_status']) - self.assertEqual(fields.ReplicationStatus.ENABLED, - found_volume['replication_status']) - - # Test list replication group targets - targets = self.api.list_group_replication_targets( - created_group_id, {'list_replication_targets': {}}) - self.assertEqual({'replication_targets': []}, targets) - - # Test failover replication group - self.api.failover_group_replication( - created_group_id, - {'failover_replication': {'secondary_backend_id': 'backend1', - 'allow_attached_volume': False}}) - - found_volume = self._poll_volume_while( - created_volume_id, [fields.ReplicationStatus.FAILING_OVER], - status_field='replication_status') - found_group = self._poll_group_while( - created_group_id, [fields.ReplicationStatus.FAILING_OVER], - status_field='replication_status') - - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - found_group['replication_status']) - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - found_volume['replication_status']) - - # Test failback replication group - self.api.failover_group_replication( - created_group_id, - {'failover_replication': {'secondary_backend_id': 'default', - 'allow_attached_volume': False}}) - - found_volume = self._poll_volume_while( - created_volume_id, [fields.ReplicationStatus.FAILING_OVER], - status_field='replication_status') - found_group = self._poll_group_while( - created_group_id, [fields.ReplicationStatus.FAILING_OVER], - status_field='replication_status') - - self.assertEqual(fields.ReplicationStatus.ENABLED, - found_group['replication_status']) - self.assertEqual(fields.ReplicationStatus.ENABLED, - found_volume['replication_status']) - - # Test disable replication group - self.api.disable_group_replication(created_group_id, - {'disable_replication': {}}) - - found_volume = self._poll_volume_while( - created_volume_id, [fields.ReplicationStatus.DISABLING], - status_field='replication_status') - found_group = self._poll_group_while( - created_group_id, [fields.ReplicationStatus.DISABLING], - status_field='replication_status') - - self.assertEqual(fields.ReplicationStatus.DISABLED, - found_group['replication_status']) - self.assertEqual(fields.ReplicationStatus.DISABLED, - found_volume['replication_status']) - - # Delete the original group - self.api.delete_group(created_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(created_group_id, ['deleting']) - - # Should be gone - self.assertFalse(found_volume) - self.assertFalse(found_group) diff --git a/cinder/tests/functional/test_group_snapshots.py b/cinder/tests/functional/test_group_snapshots.py deleted file mode 100644 index dd31297f3..000000000 --- a/cinder/tests/functional/test_group_snapshots.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from cinder.objects import fields -from cinder.tests.functional import functional_helpers -from cinder.volume import configuration - - -class GroupSnapshotsTest(functional_helpers._FunctionalTestBase): - _vol_type_name = 'functional_test_type' - _grp_type_name = 'functional_grp_test_type' - osapi_version_major = '3' - osapi_version_minor = '19' - - def setUp(self): - super(GroupSnapshotsTest, self).setUp() - self.volume_type = self.api.create_type(self._vol_type_name) - self.group_type = self.api.create_group_type(self._grp_type_name) - - def _get_flags(self): - f = super(GroupSnapshotsTest, self)._get_flags() - f['volume_driver'] = ( - {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - 'g': configuration.SHARED_CONF_GROUP}) - f['default_volume_type'] = {'v': self._vol_type_name} - f['default_group_type'] = {'v': self._grp_type_name} - return f - - def test_get_group_snapshots_summary(self): - """Simple check that listing group snapshots works.""" - grp_snaps = self.api.get_group_snapshots(False) - self.assertIsNotNone(grp_snaps) - - def test_get_group_snapshots(self): - """Simple check that listing group snapshots works.""" - grp_snaps = self.api.get_group_snapshots() - self.assertIsNotNone(grp_snaps) - - def test_create_and_delete_group_snapshot(self): - """Creates and deletes a group snapshot.""" - - # Create group - created_group = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) - created_group_id = created_group['id'] - - # Check it's there - found_group = self._poll_group_while(created_group_id, - ['creating']) - self.assertEqual(created_group_id, found_group['id']) - self.assertEqual(self.group_type['id'], found_group['group_type']) - self.assertEqual('available', found_group['status']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': created_group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - self.assertEqual(created_group_id, found_volume['group_id']) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Create group snapshot - created_group_snapshot = self.api.post_group_snapshot( - {'group_snapshot': {'group_id': created_group_id}}) - self.assertTrue(uuidutils.is_uuid_like(created_group_snapshot['id'])) - created_group_snapshot_id = created_group_snapshot['id'] - - # Check it's there - found_group_snapshot = self._poll_group_snapshot_while( - created_group_snapshot_id, [fields.GroupSnapshotStatus.CREATING]) - self.assertEqual(created_group_snapshot_id, found_group_snapshot['id']) - self.assertEqual(created_group_id, - found_group_snapshot['group_id']) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - found_group_snapshot['status']) - - # Delete the group snapshot - self.api.delete_group_snapshot(created_group_snapshot_id) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_group_snapshot = self._poll_group_snapshot_while( - created_group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) - - # Delete the original group - self.api.delete_group(created_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(created_group_id, ['deleting']) - - # Should be gone - self.assertFalse(found_group_snapshot) - self.assertFalse(found_volume) - self.assertFalse(found_group) - - def test_create_group_from_group_snapshot(self): - """Creates a group from a group snapshot.""" - - # Create group - created_group = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) - created_group_id = created_group['id'] - - # Check it's there - found_group = self._poll_group_while(created_group_id, - ['creating']) - self.assertEqual(created_group_id, found_group['id']) - self.assertEqual(self.group_type['id'], found_group['group_type']) - self.assertEqual('available', found_group['status']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': created_group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - self.assertEqual(created_group_id, found_volume['group_id']) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Create group snapshot - created_group_snapshot = self.api.post_group_snapshot( - {'group_snapshot': {'group_id': created_group_id}}) - self.assertTrue(uuidutils.is_uuid_like(created_group_snapshot['id'])) - created_group_snapshot_id = created_group_snapshot['id'] - - # Check it's there - found_group_snapshot = self._poll_group_snapshot_while( - created_group_snapshot_id, ['creating']) - self.assertEqual(created_group_snapshot_id, found_group_snapshot['id']) - self.assertEqual(created_group_id, - found_group_snapshot['group_id']) - self.assertEqual('available', found_group_snapshot['status']) - - # Create group from group snapshot - created_group_from_snap = self.api.post_group_from_src( - {'create-from-src': { - 'group_snapshot_id': created_group_snapshot_id}}) - self.assertTrue(uuidutils.is_uuid_like(created_group_from_snap['id'])) - created_group_from_snap_id = created_group_from_snap['id'] - - # Check it's there - found_volumes = self.api.get_volumes() - self._poll_volume_while(found_volumes[0], ['creating']) - self._poll_volume_while(found_volumes[1], ['creating']) - found_group_from_snap = self._poll_group_while( - created_group_from_snap_id, ['creating']) - self.assertEqual(created_group_from_snap_id, - found_group_from_snap['id']) - self.assertEqual(created_group_snapshot_id, - found_group_from_snap['group_snapshot_id']) - self.assertEqual(self.group_type['id'], - found_group_from_snap['group_type']) - self.assertEqual('available', found_group_from_snap['status']) - - # Delete the group from snap - self.api.delete_group(created_group_from_snap_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_group_from_snap = self._poll_group_while( - created_group_from_snap_id, ['deleting']) - - # Delete the group snapshot - self.api.delete_group_snapshot(created_group_snapshot_id) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_group_snapshot = self._poll_group_snapshot_while( - created_group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) - - # Delete the original group - self.api.delete_group(created_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(created_group_id, ['deleting']) - - # Should be gone - self.assertFalse(found_group_from_snap) - self.assertFalse(found_group_snapshot) - self.assertFalse(found_volume) - self.assertFalse(found_group) - - def test_create_group_from_source_group(self): - """Creates a group from a source group.""" - - # Create group - created_group = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) - created_group_id = created_group['id'] - - # Check it's there - found_group = self._poll_group_while(created_group_id, - ['creating']) - self.assertEqual(created_group_id, found_group['id']) - self.assertEqual(self.group_type['id'], found_group['group_type']) - self.assertEqual('available', found_group['status']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': created_group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - self.assertEqual(created_group_id, found_volume['group_id']) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Test create group from source group - created_group_from_group = self.api.post_group_from_src( - {'create-from-src': { - 'source_group_id': created_group_id}}) - self.assertTrue(uuidutils.is_uuid_like(created_group_from_group['id'])) - created_group_from_group_id = created_group_from_group['id'] - - # Check it's there - found_volumes = self.api.get_volumes() - self._poll_volume_while(found_volumes[0], ['creating']) - self._poll_volume_while(found_volumes[1], ['creating']) - found_group_from_group = self._poll_group_while( - created_group_from_group_id, ['creating']) - self.assertEqual(created_group_from_group_id, - found_group_from_group['id']) - self.assertEqual(created_group_id, - found_group_from_group['source_group_id']) - self.assertEqual(self.group_type['id'], - found_group_from_group['group_type']) - self.assertEqual('available', found_group_from_group['status']) - - # Delete the group from group - self.api.delete_group(created_group_from_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_group_from_group = self._poll_group_while( - created_group_from_group_id, ['deleting']) - - # Delete the original group - self.api.delete_group(created_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(created_group_id, ['deleting']) - - # Should be gone - self.assertFalse(found_group_from_group) - self.assertFalse(found_volume) - self.assertFalse(found_group) - - def test_reset_group_snapshot(self): - # Create group - group1 = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(group1['id'])) - group_id = group1['id'] - self._poll_group_while(group_id, ['creating']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - self._poll_volume_while(created_volume_id, ['creating']) - - # Create group snapshot - group_snapshot1 = self.api.post_group_snapshot( - {'group_snapshot': {'group_id': group_id}}) - self.assertTrue(uuidutils.is_uuid_like(group_snapshot1['id'])) - group_snapshot_id = group_snapshot1['id'] - - self._poll_group_snapshot_while(group_snapshot_id, - fields.GroupSnapshotStatus.CREATING) - - group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - group_snapshot1['status']) - - # reset group snapshot status - self.api.reset_group_snapshot(group_snapshot_id, {"reset_status": { - "status": fields.GroupSnapshotStatus.ERROR}}) - - group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id) - self.assertEqual(fields.GroupSnapshotStatus.ERROR, - group_snapshot1['status']) - - # Delete group, volume and group snapshot - self.api.delete_group_snapshot(group_snapshot_id) - found_group_snapshot = self._poll_group_snapshot_while( - group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) - self.api.delete_group(group_id, - {'delete': {'delete-volumes': True}}) - - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(group_id, ['deleting']) - - # Created resources should be gone - self.assertFalse(found_group_snapshot) - self.assertFalse(found_volume) - self.assertFalse(found_group) diff --git a/cinder/tests/functional/test_groups.py b/cinder/tests/functional/test_groups.py deleted file mode 100644 index b5650f1a4..000000000 --- a/cinder/tests/functional/test_groups.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from cinder.tests.functional import functional_helpers -from cinder.volume import configuration - - -class GroupsTest(functional_helpers._FunctionalTestBase): - _vol_type_name = 'functional_test_type' - _grp_type_name = 'functional_grp_test_type' - osapi_version_major = '3' - osapi_version_minor = '20' - - def setUp(self): - super(GroupsTest, self).setUp() - self.volume_type = self.api.create_type(self._vol_type_name) - self.group_type = self.api.create_group_type(self._grp_type_name) - self.group1 = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - - def _get_flags(self): - f = super(GroupsTest, self)._get_flags() - f['volume_driver'] = ( - {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - 'g': configuration.SHARED_CONF_GROUP}) - f['default_volume_type'] = {'v': self._vol_type_name} - f['default_group_type'] = {'v': self._grp_type_name} - return f - - def test_get_groups_summary(self): - """Simple check that listing groups works.""" - grps = self.api.get_groups(False) - self.assertIsNotNone(grps) - - def test_get_groups(self): - """Simple check that listing groups works.""" - grps = self.api.get_groups() - self.assertIsNotNone(grps) - - def test_reset_group_status(self): - """Reset group status""" - found_group = self._poll_group_while(self.group1['id'], - ['creating']) - self.assertEqual('available', found_group['status']) - self.api.reset_group(self.group1['id'], - {"reset_status": {"status": "error"}}) - - group = self.api.get_group(self.group1['id']) - self.assertEqual("error", group['status']) - - def test_create_and_delete_group(self): - """Creates and deletes a group.""" - - # Create group - created_group = self.api.post_group( - {'group': {'group_type': self.group_type['id'], - 'volume_types': [self.volume_type['id']]}}) - self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) - created_group_id = created_group['id'] - - # Check it's there - found_group = self._poll_group_while(created_group_id, - ['creating']) - self.assertEqual(created_group_id, found_group['id']) - self.assertEqual(self.group_type['id'], found_group['group_type']) - self.assertEqual('available', found_group['status']) - - # Create volume - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'group_id': created_group_id, - 'volume_type': self.volume_type['id']}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - self.assertEqual(created_group_id, found_volume['group_id']) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Delete the original group - self.api.delete_group(created_group_id, - {'delete': {'delete-volumes': True}}) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - found_group = self._poll_group_while(created_group_id, ['deleting']) - - # Should be gone - self.assertFalse(found_volume) - self.assertFalse(found_group) diff --git a/cinder/tests/functional/test_login.py b/cinder/tests/functional/test_login.py deleted file mode 100644 index 1b2ab678b..000000000 --- a/cinder/tests/functional/test_login.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.tests.functional import functional_helpers - - -class LoginTest(functional_helpers._FunctionalTestBase): - def test_login(self): - """Simple check - we list volumes - so we know we're logged in.""" - volumes = self.api.get_volumes() - self.assertIsNotNone(volumes) diff --git a/cinder/tests/functional/test_quotas.py b/cinder/tests/functional/test_quotas.py deleted file mode 100644 index 172de214a..000000000 --- a/cinder/tests/functional/test_quotas.py +++ /dev/null @@ -1,170 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -import uuid - -from cinder import quota -from cinder.tests.functional.api import client -from cinder.tests.functional import functional_helpers -from cinder.volume import configuration - - -class NestedQuotasTest(functional_helpers._FunctionalTestBase): - _vol_type_name = 'functional_test_type' - - def setUp(self): - super(NestedQuotasTest, self).setUp() - self.api.create_type(self._vol_type_name) - self._create_project_hierarchy() - # Need to mock out Keystone so the functional tests don't require other - # services - _keystone_client = mock.MagicMock() - _keystone_client.version = 'v3' - _keystone_client.projects.get.side_effect = self._get_project - _keystone_client_get = mock.patch( - 'cinder.quota_utils._keystone_client', - lambda *args, **kwargs: _keystone_client) - _keystone_client_get.start() - self.addCleanup(_keystone_client_get.stop) - # The QUOTA engine in Cinder is a global variable that lazy loads the - # quota driver, so even if we change the config for the quota driver, - # we won't reliably change the driver being used (or change it back) - # unless the global variables get cleaned up, so using mock instead to - # simulate this change - nested_driver = quota.NestedDbQuotaDriver() - _driver_patcher = mock.patch( - 'cinder.quota.QuotaEngine._driver', new=nested_driver) - _driver_patcher.start() - self.addCleanup(_driver_patcher.stop) - # Default to using the top parent in the hierarchy - self._update_project(self.A.id) - - def _get_flags(self): - f = super(NestedQuotasTest, self)._get_flags() - f['volume_driver'] = ( - {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - 'g': configuration.SHARED_CONF_GROUP}) - f['default_volume_type'] = {'v': self._vol_type_name} - return f - - # Currently we use 413 error for over quota - over_quota_exception = client.OpenStackApiException413 - - def _create_project_hierarchy(self): - """Sets up the nested hierarchy show below. - - +-----------+ - | A | - | / \ | - | B C | - | / | - | D | - +-----------+ - """ - self.A = self.FakeProject() - self.B = self.FakeProject(parent_id=self.A.id) - self.C = self.FakeProject(parent_id=self.A.id) - self.D = self.FakeProject(parent_id=self.B.id) - - self.B.subtree = {self.D.id: self.D.subtree} - self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree} - - self.A.parents = None - self.B.parents = {self.A.id: None} - self.C.parents = {self.A.id: None} - self.D.parents = {self.B.id: self.B.parents} - - # project_by_id attribute is used to recover a project based on its id. - self.project_by_id = {self.A.id: self.A, self.B.id: self.B, - self.C.id: self.C, self.D.id: self.D} - - class FakeProject(object): - _dom_id = uuid.uuid4().hex - - def __init__(self, parent_id=None): - self.id = uuid.uuid4().hex - self.parent_id = parent_id - self.domain_id = self._dom_id - self.subtree = None - self.parents = None - - def _get_project(self, project_id, *args, **kwargs): - return self.project_by_id[project_id] - - def _create_volume(self): - return self.api.post_volume({'volume': {'size': 1}}) - - def test_default_quotas_enforced(self): - # Should be able to create volume on parent project by default - created_vol = self._create_volume() - self._poll_volume_while(created_vol['id'], ['creating'], 'available') - self._update_project(self.B.id) - # Shouldn't be able to create volume on child project by default - self.assertRaises(self.over_quota_exception, self._create_volume) - - def test_update_child_with_parent_default_quota(self): - # Make sure we can update to a reasonable value - self.api.quota_set(self.B.id, {'volumes': 5}) - # Ensure that the update took and we can create a volume - self._poll_volume_while( - self._create_volume()['id'], ['creating'], 'available') - - def test_quota_update_child_greater_than_parent(self): - self.assertRaises( - client.OpenStackApiException400, - self.api.quota_set, self.B.id, {'volumes': 11}) - - def test_child_soft_limit_propagates_to_parent(self): - self.api.quota_set(self.B.id, {'volumes': 0}) - self.api.quota_set(self.D.id, {'volumes': -1}) - self._update_project(self.D.id) - self.assertRaises(self.over_quota_exception, self._create_volume) - - def test_child_quota_hard_limits_affects_parents_allocated(self): - self.api.quota_set(self.B.id, {'volumes': 5}) - self.api.quota_set(self.C.id, {'volumes': 3}) - alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] - self.assertEqual(8, alloc) - self.assertRaises(client.OpenStackApiException400, - self.api.quota_set, self.C.id, {'volumes': 6}) - - def _update_quota_and_def_type(self, project_id, quota): - self.api.quota_set(project_id, quota) - type_updates = {'%s_%s' % (key, self._vol_type_name): val for key, val - in quota.items() if key != 'per_volume_gigabytes'} - return self.api.quota_set(project_id, type_updates) - - def test_grandchild_soft_limit_propagates_up(self): - quota = {'volumes': -1, 'gigabytes': -1, 'per_volume_gigabytes': -1} - self._update_quota_and_def_type(self.B.id, quota) - self._update_quota_and_def_type(self.D.id, quota) - self._update_project(self.D.id) - # Create two volumes in the grandchild project and ensure grandparent's - # allocated is updated accordingly - vol = self._create_volume() - self._create_volume() - self._update_project(self.A.id) - alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] - self.assertEqual(2, alloc) - alloc = self.api.quota_get(self.B.id)['volumes']['allocated'] - self.assertEqual(2, alloc) - # Ensure delete reduces the quota - self._update_project(self.D.id) - self.api.delete_volume(vol['id']) - self._poll_volume_while(vol['id'], ['deleting']) - self._update_project(self.A.id) - alloc = self.api.quota_get(self.A.id)['volumes']['allocated'] - self.assertEqual(1, alloc) - alloc = self.api.quota_get(self.B.id)['volumes']['allocated'] - self.assertEqual(1, alloc) diff --git a/cinder/tests/functional/test_volumes.py b/cinder/tests/functional/test_volumes.py deleted file mode 100644 index 4289ea278..000000000 --- a/cinder/tests/functional/test_volumes.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from cinder.tests.functional import functional_helpers -from cinder.volume import configuration - - -class VolumesTest(functional_helpers._FunctionalTestBase): - _vol_type_name = 'functional_test_type' - - def setUp(self): - super(VolumesTest, self).setUp() - self.api.create_type(self._vol_type_name) - - def _get_flags(self): - f = super(VolumesTest, self)._get_flags() - f['volume_driver'] = ( - {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - 'g': configuration.SHARED_CONF_GROUP}) - f['default_volume_type'] = {'v': self._vol_type_name} - return f - - def test_get_volumes_summary(self): - """Simple check that listing volumes works.""" - volumes = self.api.get_volumes(False) - self.assertIsNotNone(volumes) - - def test_get_volumes(self): - """Simple check that listing volumes works.""" - volumes = self.api.get_volumes() - self.assertIsNotNone(volumes) - - def test_create_and_delete_volume(self): - """Creates and deletes a volume.""" - - # Create volume - created_volume = self.api.post_volume({'volume': {'size': 1}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(self._vol_type_name, found_volume['volume_type']) - - # It should also be in the all-volume list - volumes = self.api.get_volumes() - volume_names = [volume['id'] for volume in volumes] - self.assertIn(created_volume_id, volume_names) - - # Wait (briefly) for creation. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['creating']) - - # It should be available... - self.assertEqual('available', found_volume['status']) - - # Delete the volume - self.api.delete_volume(created_volume_id) - - # Wait (briefly) for deletion. Delay is due to the 'message queue' - found_volume = self._poll_volume_while(created_volume_id, ['deleting']) - - # Should be gone - self.assertFalse(found_volume) - - def test_create_volume_with_metadata(self): - """Creates a volume with metadata.""" - - # Create volume - metadata = {'key1': 'value1', - 'key2': 'value2'} - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'metadata': metadata}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there and metadata present - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(metadata, found_volume['metadata']) - - def test_create_volume_in_availability_zone(self): - """Creates a volume in availability_zone.""" - - # Create volume - availability_zone = 'nova' - created_volume = self.api.post_volume( - {'volume': {'size': 1, - 'availability_zone': availability_zone}}) - self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) - created_volume_id = created_volume['id'] - - # Check it's there and availability zone present - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual(availability_zone, found_volume['availability_zone']) - - def test_create_and_update_volume(self): - # Create vol1 - created_volume = self.api.post_volume({'volume': { - 'size': 1, 'name': 'vol1'}}) - self.assertEqual('vol1', created_volume['name']) - created_volume_id = created_volume['id'] - - # update volume - body = {'volume': {'name': 'vol-one'}} - updated_volume = self.api.put_volume(created_volume_id, body) - self.assertEqual('vol-one', updated_volume['name']) - - # check for update - found_volume = self.api.get_volume(created_volume_id) - self.assertEqual(created_volume_id, found_volume['id']) - self.assertEqual('vol-one', found_volume['name']) diff --git a/cinder/tests/tempest/README.rst b/cinder/tests/tempest/README.rst deleted file mode 100644 index 47ed98ef8..000000000 --- a/cinder/tests/tempest/README.rst +++ /dev/null @@ -1,62 +0,0 @@ -=============================================== -Tempest Integration for Cinder -=============================================== - -This directory contains additional Cinder tempest tests. - -See the tempest plugin docs for information on using it: -http://docs.openstack.org/developer/tempest/plugin.html#using-plugins - -To run all tests from this plugin, install cinder into your environment. Then -from the tempest directory run:: - - $ tox -e all-plugin -- volume - - -It is expected that Cinder third party CI's use the all-plugin tox environment -above for all test runs. Developers can also use this locally to perform more -extensive testing. - -Any typical devstack instance should be able to run all Cinder plugin tests. -For completeness, here is an example of a devstack local.conf that should -work. Update backend information to fit your environment. - -:: - - [[local|localrc]] - VIRT_DRIVER=libvirt - ADMIN_PASSWORD=secret - SERVICE_TOKEN=$ADMIN_PASSWORD - MYSQL_PASSWORD=$ADMIN_PASSWORD - RABBIT_PASSWORD=$ADMIN_PASSWORD - SERVICE_PASSWORD=$ADMIN_PASSWORD - SCREEN_LOGDIR=/opt/stack/screen-logs - LOGFILE=$DEST/logs/stack.sh.log - LOGDAYS=2 - SYSLOG=False - LOG_COLOR=False - RECLONE=yes - ENABLED_SERVICES=c-api,c-sch,c-vol,cinder,dstat,g-api,g-reg,key,mysql, - n-api,n-cond,n-cpu,n-crt,n-net,n-sch,rabbit,tempest - CINDER_ENABLED_BACKENDS=lvmdriver-1 - CINDER_DEFAULT_VOLUME_TYPE=lvmdriver-1 - CINDER_VOLUME_CLEAR=none - TEMPEST_ENABLED_BACKENDS=lvmdriver-1 - TEMPEST_VOLUME_DRIVER=lvmdriver-1 - TEMPEST_VOLUME_VENDOR="Open Source" - TEMPEST_STORAGE_PROTOCOL=iSCSI - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - VIRT_DRIVER=libvirt - ACTIVE_TIMEOUT=120 - BOOT_TIMEOUT=120 - ASSOCIATE_TIMEOUT=120 - TERMINATE_TIMEOUT=120 - - - [[post-config|$CINDER_CONF]] - [DEFAULT] - [lvmdriver-1] - volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver - volume_group=stack-volumes-1 - volume_backend_name=lvmdriver-1`` - diff --git a/cinder/tests/tempest/__init__.py b/cinder/tests/tempest/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/tempest/api/__init__.py b/cinder/tests/tempest/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/tempest/api/volume/__init__.py b/cinder/tests/tempest/api/volume/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/tempest/api/volume/test_consistencygroups.py b/cinder/tests/tempest/api/volume/test_consistencygroups.py deleted file mode 100644 index 817228d49..000000000 --- a/cinder/tests/tempest/api/volume/test_consistencygroups.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (C) 2015 EMC Corporation. -# Copyright (C) 2016 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.api.volume import base -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -from cinder.tests.tempest import cinder_clients - -CONF = config.CONF - - -class ConsistencyGroupsV2Test(base.BaseVolumeAdminTest): - - @classmethod - def setup_clients(cls): - cls._api_version = 2 - super(ConsistencyGroupsV2Test, cls).setup_clients() - - manager = cinder_clients.Manager(cls.os_adm) - cls.consistencygroups_adm_client = manager.consistencygroups_adm_client - - @classmethod - def skip_checks(cls): - super(ConsistencyGroupsV2Test, cls).skip_checks() - if not CONF.volume_feature_enabled.consistency_group: - raise cls.skipException("Cinder consistency group " - "feature disabled") - - def _delete_consistencygroup(self, cg_id): - self.consistencygroups_adm_client.delete_consistencygroup(cg_id) - vols = self.admin_volume_client.list_volumes(detail=True)['volumes'] - for vol in vols: - if vol['consistencygroup_id'] == cg_id: - self.admin_volume_client.wait_for_resource_deletion(vol['id']) - self.consistencygroups_adm_client.wait_for_consistencygroup_deletion( - cg_id) - - def _delete_cgsnapshot(self, cgsnapshot_id, cg_id): - self.consistencygroups_adm_client.delete_cgsnapshot(cgsnapshot_id) - vols = self.admin_volume_client.list_volumes(detail=True)['volumes'] - snapshots = self.admin_snapshots_client.list_snapshots( - detail=True)['snapshots'] - for vol in vols: - for snap in snapshots: - if (vol['consistencygroup_id'] == cg_id and - vol['id'] == snap['volume_id']): - self.snapshots_client.wait_for_resource_deletion( - snap['id']) - self.consistencygroups_adm_client.wait_for_cgsnapshot_deletion( - cgsnapshot_id) - - @decorators.idempotent_id('3fe776ba-ec1f-4e6c-8d78-4b14c3a7fc44') - def test_consistencygroup_create_delete(self): - # Create volume type - name = data_utils.rand_name("volume-type") - volume_type = self.admin_volume_types_client.create_volume_type( - name=name)['volume_type'] - - # Create CG - cg_name = data_utils.rand_name('CG') - create_consistencygroup = ( - self.consistencygroups_adm_client.create_consistencygroup) - cg = create_consistencygroup(volume_type['id'], - name=cg_name)['consistencygroup'] - vol_name = data_utils.rand_name("volume") - params = {'name': vol_name, - 'volume_type': volume_type['id'], - 'consistencygroup_id': cg['id'], - 'size': CONF.volume.volume_size} - - # Create volume - volume = self.admin_volume_client.create_volume(**params)['volume'] - - waiters.wait_for_volume_resource_status(self.admin_volume_client, - volume['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg['id'], 'available') - self.assertEqual(cg_name, cg['name']) - - # Get a given CG - cg = self.consistencygroups_adm_client.show_consistencygroup( - cg['id'])['consistencygroup'] - self.assertEqual(cg_name, cg['name']) - - # Get all CGs with detail - cgs = self.consistencygroups_adm_client.list_consistencygroups( - detail=True)['consistencygroups'] - self.assertIn((cg['name'], cg['id']), - [(m['name'], m['id']) for m in cgs]) - - # Clean up - self._delete_consistencygroup(cg['id']) - self.admin_volume_types_client.delete_volume_type(volume_type['id']) - - @decorators.idempotent_id('2134dd52-f333-4456-bb05-6cb0f009a44f') - def test_consistencygroup_cgsnapshot_create_delete(self): - # Create volume type - name = data_utils.rand_name("volume-type") - volume_type = self.admin_volume_types_client.create_volume_type( - name=name)['volume_type'] - - # Create CG - cg_name = data_utils.rand_name('CG') - create_consistencygroup = ( - self.consistencygroups_adm_client.create_consistencygroup) - cg = create_consistencygroup(volume_type['id'], - name=cg_name)['consistencygroup'] - vol_name = data_utils.rand_name("volume") - params = {'name': vol_name, - 'volume_type': volume_type['id'], - 'consistencygroup_id': cg['id'], - 'size': CONF.volume.volume_size} - - # Create volume - volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_resource_status(self.admin_volume_client, - volume['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg['id'], 'available') - self.assertEqual(cg_name, cg['name']) - - # Create cgsnapshot - cgsnapshot_name = data_utils.rand_name('cgsnapshot') - create_cgsnapshot = ( - self.consistencygroups_adm_client.create_cgsnapshot) - cgsnapshot = create_cgsnapshot(cg['id'], - name=cgsnapshot_name)['cgsnapshot'] - snapshots = self.admin_snapshots_client.list_snapshots( - detail=True)['snapshots'] - for snap in snapshots: - if volume['id'] == snap['volume_id']: - waiters.wait_for_volume_resource_status( - self.admin_snapshots_client, snap['id'], 'available') - self.consistencygroups_adm_client.wait_for_cgsnapshot_status( - cgsnapshot['id'], 'available') - self.assertEqual(cgsnapshot_name, cgsnapshot['name']) - - # Get a given CG snapshot - cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot( - cgsnapshot['id'])['cgsnapshot'] - self.assertEqual(cgsnapshot_name, cgsnapshot['name']) - - # Get all CG snapshots with detail - cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots( - detail=True)['cgsnapshots'] - self.assertIn((cgsnapshot['name'], cgsnapshot['id']), - [(m['name'], m['id']) for m in cgsnapshots]) - - # Clean up - self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) - self._delete_consistencygroup(cg['id']) - self.admin_volume_types_client.delete_volume_type(volume_type['id']) - - @decorators.idempotent_id('3a6a5525-25ca-4a6c-aac4-cac6fa8f5b43') - def test_create_consistencygroup_from_cgsnapshot(self): - # Create volume type - name = data_utils.rand_name("volume-type") - volume_type = self.admin_volume_types_client.create_volume_type( - name=name)['volume_type'] - - # Create CG - cg_name = data_utils.rand_name('CG') - create_consistencygroup = ( - self.consistencygroups_adm_client.create_consistencygroup) - cg = create_consistencygroup(volume_type['id'], - name=cg_name)['consistencygroup'] - vol_name = data_utils.rand_name("volume") - params = {'name': vol_name, - 'volume_type': volume_type['id'], - 'consistencygroup_id': cg['id'], - 'size': CONF.volume.volume_size} - - # Create volume - volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_resource_status(self.admin_volume_client, - volume['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg['id'], 'available') - self.assertEqual(cg_name, cg['name']) - - # Create cgsnapshot - cgsnapshot_name = data_utils.rand_name('cgsnapshot') - create_cgsnapshot = ( - self.consistencygroups_adm_client.create_cgsnapshot) - cgsnapshot = create_cgsnapshot(cg['id'], - name=cgsnapshot_name)['cgsnapshot'] - snapshots = self.snapshots_client.list_snapshots( - detail=True)['snapshots'] - for snap in snapshots: - if volume['id'] == snap['volume_id']: - waiters.wait_for_volume_resource_status( - self.admin_snapshots_client, snap['id'], 'available') - self.consistencygroups_adm_client.wait_for_cgsnapshot_status( - cgsnapshot['id'], 'available') - self.assertEqual(cgsnapshot_name, cgsnapshot['name']) - - # Create CG from CG snapshot - cg_name2 = data_utils.rand_name('CG_from_snap') - create_consistencygroup2 = ( - self.consistencygroups_adm_client.create_consistencygroup_from_src) - cg2 = create_consistencygroup2(cgsnapshot_id=cgsnapshot['id'], - name=cg_name2)['consistencygroup'] - vols = self.admin_volume_client.list_volumes( - detail=True)['volumes'] - for vol in vols: - if vol['consistencygroup_id'] == cg2['id']: - waiters.wait_for_volume_resource_status( - self.admin_volume_client, vol['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg2['id'], 'available') - self.assertEqual(cg_name2, cg2['name']) - - # Clean up - self._delete_consistencygroup(cg2['id']) - self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) - self._delete_consistencygroup(cg['id']) - self.admin_volume_types_client.delete_volume_type(volume_type['id']) - - @decorators.idempotent_id('556121ae-de9c-4342-9897-e54260447a19') - def test_create_consistencygroup_from_consistencygroup(self): - # Create volume type - name = data_utils.rand_name("volume-type") - volume_type = self.admin_volume_types_client.create_volume_type( - name=name)['volume_type'] - - # Create CG - cg_name = data_utils.rand_name('CG') - create_consistencygroup = ( - self.consistencygroups_adm_client.create_consistencygroup) - cg = create_consistencygroup(volume_type['id'], - name=cg_name)['consistencygroup'] - vol_name = data_utils.rand_name("volume") - params = {'name': vol_name, - 'volume_type': volume_type['id'], - 'consistencygroup_id': cg['id'], - 'size': CONF.volume.volume_size} - - # Create volume - volume = self.admin_volume_client.create_volume(**params)['volume'] - waiters.wait_for_volume_resource_status(self.admin_volume_client, - volume['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg['id'], 'available') - self.assertEqual(cg_name, cg['name']) - - # Create CG from CG - cg_name2 = data_utils.rand_name('CG_from_cg') - create_consistencygroup2 = ( - self.consistencygroups_adm_client.create_consistencygroup_from_src) - cg2 = create_consistencygroup2(source_cgid=cg['id'], - name=cg_name2)['consistencygroup'] - vols = self.admin_volume_client.list_volumes( - detail=True)['volumes'] - for vol in vols: - if vol['consistencygroup_id'] == cg2['id']: - waiters.wait_for_volume_resource_status( - self.admin_volume_client, vol['id'], 'available') - self.consistencygroups_adm_client.wait_for_consistencygroup_status( - cg2['id'], 'available') - self.assertEqual(cg_name2, cg2['name']) - - # Clean up - self._delete_consistencygroup(cg2['id']) - self._delete_consistencygroup(cg['id']) - self.admin_volume_types_client.delete_volume_type(volume_type['id']) diff --git a/cinder/tests/tempest/api/volume/test_volume_backup.py b/cinder/tests/tempest/api/volume/test_volume_backup.py deleted file mode 100644 index ec8efd89b..000000000 --- a/cinder/tests/tempest/api/volume/test_volume_backup.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.api.volume import base as volume_base -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib import decorators - -CONF = config.CONF - - -class VolumesBackupsTest(volume_base.BaseVolumeTest): - - @classmethod - def skip_checks(cls): - super(VolumesBackupsTest, cls).skip_checks() - if not CONF.volume_feature_enabled.backup: - raise cls.skipException("Cinder backup feature disabled") - - @decorators.idempotent_id('885410c6-cd1d-452c-a409-7c32b7e0be15') - def test_volume_snapshot_backup(self): - """Create backup from snapshot.""" - volume = self.create_volume() - # Create snapshot - snapshot = self.create_snapshot(volume['id']) - # Create backup - backup = self.create_backup( - volume_id=volume['id'], - snapshot_id=snapshot['id']) - # Get a given backup - backup = self.backups_client.show_backup( - backup['id'])['backup'] - waiters.wait_for_volume_resource_status( - self.backups_client, - backup['id'], 'available') - self.assertEqual(volume['id'], backup['volume_id']) - self.assertEqual(snapshot['id'], backup['snapshot_id']) - - self.snapshots_client.delete_snapshot(snapshot['id']) - self.snapshots_client.wait_for_resource_deletion(snapshot['id']) - - self.volumes_client.delete_volume(volume['id']) - self.volumes_client.wait_for_resource_deletion(volume['id']) - - @decorators.idempotent_id('b5d837b0-7066-455d-88fc-4a721a899306') - def test_backup_create_and_restore_to_an_existing_volume(self): - """Test backup create and restore to an existing volume.""" - # Create volume - src_vol = self.create_volume() - self.addCleanup(self.volumes_client.delete_volume, - src_vol['id']) - # Create backup - backup = self.backups_client.create_backup( - volume_id=src_vol['id'])['backup'] - self.addCleanup(self.backups_client.delete_backup, backup['id']) - waiters.wait_for_volume_resource_status( - self.backups_client, - backup['id'], 'available') - # Restore to existing volume - restore = self.backups_client.restore_backup( - backup_id=backup['id'], - volume_id=src_vol['id'])['restore'] - waiters.wait_for_volume_resource_status( - self.backups_client, - backup['id'], 'available') - waiters.wait_for_volume_resource_status( - self.volumes_client, - src_vol['id'], 'available') - self.assertEqual(src_vol['id'], restore['volume_id']) - self.assertEqual(backup['id'], restore['backup_id']) - - @decorators.idempotent_id('c810fe2c-cb40-43ab-96aa-471b74516a98') - def test_incremental_backup(self): - """Test create incremental backup.""" - # Create volume from image - volume = self.create_volume(size=CONF.volume.volume_size, - imageRef=CONF.compute.image_ref) - self.addCleanup(self.volumes_client.delete_volume, - volume['id']) - - # Create backup - backup = self.backups_client.create_backup( - volume_id=volume['id'])['backup'] - waiters.wait_for_volume_resource_status(self.backups_client, - backup['id'], 'available') - # Create a server - bd_map = [{'volume_id': volume['id'], - 'delete_on_termination': '0'}] - - server_name = data_utils.rand_name('instance') - server = self.create_server( - name=server_name, - block_device_mapping=bd_map, - wait_until='ACTIVE') - - # Delete VM - self.servers_client.delete_server(server['id']) - # Create incremental backup - waiters.wait_for_volume_resource_status(self.volumes_client, - volume['id'], 'available') - backup_incr = self.backups_client.create_backup( - volume_id=volume['id'], - incremental=True)['backup'] - - waiters.wait_for_volume_resource_status(self.backups_client, - backup_incr['id'], - 'available') - - is_incremental = self.backups_client.show_backup( - backup_incr['id'])['backup']['is_incremental'] - self.assertTrue(is_incremental) - - self.backups_client.delete_backup(backup_incr['id']) - self.backups_client.wait_for_resource_deletion(backup_incr['id']) - self.backups_client.delete_backup(backup['id']) - self.backups_client.wait_for_resource_deletion(backup['id']) diff --git a/cinder/tests/tempest/api/volume/test_volume_revert.py b/cinder/tests/tempest/api/volume/test_volume_revert.py deleted file mode 100644 index b5151c59d..000000000 --- a/cinder/tests/tempest/api/volume/test_volume_revert.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2017 Huawei. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.api.volume import base as volume_base -from tempest.common import waiters -from tempest import config -from tempest.lib import decorators - -from cinder.tests.tempest import cinder_clients - -CONF = config.CONF - - -class VolumeRevertTests(volume_base.BaseVolumeTest): - min_microversion = '3.40' - - @classmethod - def setup_clients(cls): - cls._api_version = 3 - super(VolumeRevertTests, cls).setup_clients() - - manager = cinder_clients.Manager(cls.os_primary) - cls.volume_revert_client = manager.volume_revet_client - - def setUp(self): - super(VolumeRevertTests, self).setUp() - # Create volume - self.volume = self.create_volume(size=1) - # Create snapshot - self.snapshot = self.create_snapshot(self.volume['id']) - - @decorators.idempotent_id('87b7dcb7-4950-4a3a-802c-ece55491846d') - def test_volume_revert_to_snapshot(self): - """Test revert to snapshot""" - # Revert to snapshot - self.volume_revert_client.revert_to_snapshot(self.volume, - self.snapshot['id']) - waiters.wait_for_volume_resource_status( - self.volumes_client, - self.volume['id'], 'available') - waiters.wait_for_volume_resource_status( - self.snapshots_client, - self.snapshot['id'], 'available') - volume = self.volumes_client.show_volume(self.volume['id'])['volume'] - - self.assertEqual(1, volume['size']) - - @decorators.idempotent_id('4e8b0788-87fe-430d-be7a-444d7f8e0347') - def test_volume_revert_to_snapshot_after_extended(self): - """Test revert to snapshot after extended""" - # Extend the volume - self.volumes_client.extend_volume(self.volume['id'], new_size=2) - waiters.wait_for_volume_resource_status(self.volumes_client, - self.volume['id'], 'available') - # Revert to snapshot - self.volume_revert_client.revert_to_snapshot(self.volume, - self.snapshot['id']) - waiters.wait_for_volume_resource_status( - self.volumes_client, - self.volume['id'], 'available') - waiters.wait_for_volume_resource_status( - self.snapshots_client, - self.snapshot['id'], 'available') - volume = self.volumes_client.show_volume(self.volume['id'])['volume'] - self.assertEqual(2, volume['size']) diff --git a/cinder/tests/tempest/api/volume/test_volume_unicode.py b/cinder/tests/tempest/api/volume/test_volume_unicode.py deleted file mode 100644 index eab9f3cb1..000000000 --- a/cinder/tests/tempest/api/volume/test_volume_unicode.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest.api.volume import base as volume_base -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils - -CONF = config.CONF - - -class CinderUnicodeTest(volume_base.BaseVolumeTest): - - @classmethod - def resource_setup(cls): - super(CinderUnicodeTest, cls).resource_setup() - - # Stick to three-byte unicode here, since four+ byte - # chars require utf8mb4 database support which may not - # be configured. - cls.volume_name = u"CinderUnicodeTest塵㼗‽" - cls.volume = cls.create_volume_with_args(name=cls.volume_name) - - @classmethod - def create_volume_with_args(cls, **kwargs): - if 'name' not in kwargs: - kwargs['name'] = data_utils.rand_name('Volume') - - kwargs['size'] = CONF.volume.volume_size - - volume = cls.volumes_client.create_volume(**kwargs)['volume'] - cls.volumes.append(volume) - - waiters.wait_for_volume_resource_status(cls.volumes_client, - volume['id'], - 'available') - - return volume - - def test_create_delete_unicode_volume_name(self): - """Create a volume with a unicode name and view it.""" - - result = self.volumes_client.show_volume(self.volumes[0]['id']) - fetched_volume = result['volume'] - self.assertEqual(fetched_volume['name'], - self.volume_name) diff --git a/cinder/tests/tempest/cinder_clients.py b/cinder/tests/tempest/cinder_clients.py deleted file mode 100644 index aad32d4ab..000000000 --- a/cinder/tests/tempest/cinder_clients.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2016 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from tempest import config - -from cinder.tests.tempest.services import consistencygroups_client -from cinder.tests.tempest.services import volume_revert_client - -CONF = config.CONF - - -class Manager(object): - def __init__(self, base_manager): - params = { - 'service': CONF.volume.catalog_type, - 'region': CONF.volume.region or CONF.identity.region, - 'endpoint_type': CONF.volume.endpoint_type, - 'build_interval': CONF.volume.build_interval, - 'build_timeout': CONF.volume.build_timeout - } - params.update(base_manager.default_params) - auth_provider = base_manager.auth_provider - - self.consistencygroups_adm_client = ( - consistencygroups_client.ConsistencyGroupsClient(auth_provider, - **params)) - self.volume_revet_client = ( - volume_revert_client.VolumeRevertClient(auth_provider, **params)) diff --git a/cinder/tests/tempest/config.py b/cinder/tests/tempest/config.py deleted file mode 100644 index f4eb0987d..000000000 --- a/cinder/tests/tempest/config.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -cinder_option = [ - cfg.BoolOpt('consistency_group', - default=False, - help='Enable to run Cinder volume consistency group tests'), -] diff --git a/cinder/tests/tempest/plugin.py b/cinder/tests/tempest/plugin.py deleted file mode 100644 index b816963ea..000000000 --- a/cinder/tests/tempest/plugin.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2015 -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cinder -import os - -from cinder.tests.tempest import config as project_config - -from tempest import config -from tempest.test_discover import plugins - - -class CinderTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(cinder.__file__)))[0] - test_dir = "cinder/tests/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group( - conf, config.volume_feature_group, - project_config.cinder_option - ) - - def get_opt_lists(self): - return [ - (config.volume_feature_group.name, - project_config.cinder_option), - ] diff --git a/cinder/tests/tempest/scenario/__init__.py b/cinder/tests/tempest/scenario/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/tempest/services/__init__.py b/cinder/tests/tempest/services/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/tempest/services/consistencygroups_client.py b/cinder/tests/tempest/services/consistencygroups_client.py deleted file mode 100644 index 10415d41e..000000000 --- a/cinder/tests/tempest/services/consistencygroups_client.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (C) 2015 EMC Corporation. -# Copyright (C) 2016 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_serialization import jsonutils as json -from six.moves import http_client -from tempest import exceptions -from tempest.lib.common import rest_client -from tempest.lib import exceptions as lib_exc - - -class ConsistencyGroupsClient(rest_client.RestClient): - """Client class to send CRUD Volume ConsistencyGroup API requests""" - - def __init__(self, auth_provider, service, region, **kwargs): - super(ConsistencyGroupsClient, self).__init__( - auth_provider, service, region, **kwargs) - - def create_consistencygroup(self, volume_types, **kwargs): - """Creates a consistency group.""" - post_body = {'volume_types': volume_types} - if kwargs.get('availability_zone'): - post_body['availability_zone'] = kwargs.get('availability_zone') - if kwargs.get('name'): - post_body['name'] = kwargs.get('name') - if kwargs.get('description'): - post_body['description'] = kwargs.get('description') - post_body = json.dumps({'consistencygroup': post_body}) - resp, body = self.post('consistencygroups', post_body) - body = json.loads(body) - self.expected_success(http_client.ACCEPTED, resp.status) - return rest_client.ResponseBody(resp, body) - - def create_consistencygroup_from_src(self, **kwargs): - """Creates a consistency group from source.""" - post_body = {} - if kwargs.get('cgsnapshot_id'): - post_body['cgsnapshot_id'] = kwargs.get('cgsnapshot_id') - if kwargs.get('source_cgid'): - post_body['source_cgid'] = kwargs.get('source_cgid') - if kwargs.get('name'): - post_body['name'] = kwargs.get('name') - if kwargs.get('description'): - post_body['description'] = kwargs.get('description') - post_body = json.dumps({'consistencygroup-from-src': post_body}) - resp, body = self.post('consistencygroups/create_from_src', post_body) - body = json.loads(body) - self.expected_success(http_client.ACCEPTED, resp.status) - return rest_client.ResponseBody(resp, body) - - def delete_consistencygroup(self, cg_id): - """Delete a consistency group.""" - post_body = {'force': True} - post_body = json.dumps({'consistencygroup': post_body}) - resp, body = self.post('consistencygroups/%s/delete' % cg_id, - post_body) - self.expected_success(http_client.ACCEPTED, resp.status) - return rest_client.ResponseBody(resp, body) - - def show_consistencygroup(self, cg_id): - """Returns the details of a single consistency group.""" - url = "consistencygroups/%s" % str(cg_id) - resp, body = self.get(url) - body = json.loads(body) - self.expected_success(http_client.OK, resp.status) - return rest_client.ResponseBody(resp, body) - - def list_consistencygroups(self, detail=False): - """Information for all the tenant's consistency groups.""" - url = "consistencygroups" - if detail: - url += "/detail" - resp, body = self.get(url) - body = json.loads(body) - self.expected_success(http_client.OK, resp.status) - return rest_client.ResponseBody(resp, body) - - def create_cgsnapshot(self, consistencygroup_id, **kwargs): - """Creates a consistency group snapshot.""" - post_body = {'consistencygroup_id': consistencygroup_id} - if kwargs.get('name'): - post_body['name'] = kwargs.get('name') - if kwargs.get('description'): - post_body['description'] = kwargs.get('description') - post_body = json.dumps({'cgsnapshot': post_body}) - resp, body = self.post('cgsnapshots', post_body) - body = json.loads(body) - self.expected_success(http_client.ACCEPTED, resp.status) - return rest_client.ResponseBody(resp, body) - - def delete_cgsnapshot(self, cgsnapshot_id): - """Delete a consistency group snapshot.""" - resp, body = self.delete('cgsnapshots/%s' % (str(cgsnapshot_id))) - self.expected_success(http_client.ACCEPTED, resp.status) - return rest_client.ResponseBody(resp, body) - - def show_cgsnapshot(self, cgsnapshot_id): - """Returns the details of a single consistency group snapshot.""" - url = "cgsnapshots/%s" % str(cgsnapshot_id) - resp, body = self.get(url) - body = json.loads(body) - self.expected_success(http_client.OK, resp.status) - return rest_client.ResponseBody(resp, body) - - def list_cgsnapshots(self, detail=False): - """Information for all the tenant's consistency group snapshotss.""" - url = "cgsnapshots" - if detail: - url += "/detail" - resp, body = self.get(url) - body = json.loads(body) - self.expected_success(http_client.OK, resp.status) - return rest_client.ResponseBody(resp, body) - - def wait_for_consistencygroup_status(self, cg_id, status): - """Waits for a consistency group to reach a given status.""" - body = self.show_consistencygroup(cg_id)['consistencygroup'] - cg_status = body['status'] - start = int(time.time()) - - while cg_status != status: - time.sleep(self.build_interval) - body = self.show_consistencygroup(cg_id)['consistencygroup'] - cg_status = body['status'] - if cg_status == 'error': - raise exceptions.ConsistencyGroupException(cg_id=cg_id) - - if int(time.time()) - start >= self.build_timeout: - message = ('Consistency group %s failed to reach %s status ' - '(current %s) within the required time (%s s).' % - (cg_id, status, cg_status, - self.build_timeout)) - raise exceptions.TimeoutException(message) - - def wait_for_consistencygroup_deletion(self, cg_id): - """Waits for consistency group deletion""" - start_time = int(time.time()) - while True: - try: - self.show_consistencygroup(cg_id) - except lib_exc.NotFound: - return - if int(time.time()) - start_time >= self.build_timeout: - raise exceptions.TimeoutException - time.sleep(self.build_interval) - - def wait_for_cgsnapshot_status(self, cgsnapshot_id, status): - """Waits for a consistency group snapshot to reach a given status.""" - body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot'] - cgsnapshot_status = body['status'] - start = int(time.time()) - - while cgsnapshot_status != status: - time.sleep(self.build_interval) - body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot'] - cgsnapshot_status = body['status'] - if cgsnapshot_status == 'error': - raise exceptions.ConsistencyGroupSnapshotException( - cgsnapshot_id=cgsnapshot_id) - - if int(time.time()) - start >= self.build_timeout: - message = ('Consistency group snapshot %s failed to reach ' - '%s status (current %s) within the required time ' - '(%s s).' % - (cgsnapshot_id, status, cgsnapshot_status, - self.build_timeout)) - raise exceptions.TimeoutException(message) - - def wait_for_cgsnapshot_deletion(self, cgsnapshot_id): - """Waits for consistency group snapshot deletion""" - start_time = int(time.time()) - while True: - try: - self.show_cgsnapshot(cgsnapshot_id) - except lib_exc.NotFound: - return - if int(time.time()) - start_time >= self.build_timeout: - raise exceptions.TimeoutException - time.sleep(self.build_interval) diff --git a/cinder/tests/tempest/services/volume_revert_client.py b/cinder/tests/tempest/services/volume_revert_client.py deleted file mode 100644 index 285494a3b..000000000 --- a/cinder/tests/tempest/services/volume_revert_client.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2017 Huawei. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils as json -from tempest.lib.common import rest_client -from tempest.lib.services.volume.v3 import base_client - - -class VolumeRevertClient(base_client.BaseClient): - """Client class to send revert to snapshot action API request""" - - def __init__(self, auth_provider, service, region, **kwargs): - super(VolumeRevertClient, self).__init__( - auth_provider, service, region, **kwargs) - - def revert_to_snapshot(self, volume, snapshot_id): - """Revert a volume to snapshot.""" - post_body = {'snapshot_id': snapshot_id} - post_body = json.dumps({'revert': post_body}) - resp, body = self.post('volumes/%s/action' % volume['id'], - post_body) - return rest_client.ResponseBody(resp, body) diff --git a/cinder/tests/unit/__init__.py b/cinder/tests/unit/__init__.py deleted file mode 100644 index cff971fd9..000000000 --- a/cinder/tests/unit/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`cinder.tests.unit` -- Cinder Unittests -===================================================== - -.. automodule:: cinder.tests.unit - :platform: Unix -""" - -import eventlet - -from cinder import objects - -eventlet.monkey_patch() - -# NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise -# the threading.local() store used in oslo_messaging will be initialized to -# threadlocal storage rather than greenthread local. This will cause context -# sets and deletes in that storage to clobber each other. -# NOTE(comstud): Make sure we have all of the objects loaded. We do this -# at module import time, because we may be using mock decorators in our -# tests that run at import time. -objects.register_all() diff --git a/cinder/tests/unit/api/__init__.py b/cinder/tests/unit/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/contrib/__init__.py b/cinder/tests/unit/api/contrib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/contrib/test_admin_actions.py b/cinder/tests/unit/api/contrib/test_admin_actions.py deleted file mode 100644 index 5afdd2948..000000000 --- a/cinder/tests/unit/api/contrib/test_admin_actions.py +++ /dev/null @@ -1,1189 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import fixtures -import mock -from oslo_concurrency import lockutils -from oslo_config import fixture as config_fixture -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from six.moves import http_client -import webob -from webob import exc - -from cinder.api.contrib import admin_actions -from cinder.api.openstack import api_version_request as api_version -from cinder.backup import api as backup_api -from cinder.backup import rpcapi as backup_rpcapi -from cinder.common import constants -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import base as obj_base -from cinder.objects import fields -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import test -from cinder.tests.unit.api.contrib import test_backups -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import cast_as_call -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.volume import api as volume_api -from cinder.volume import rpcapi - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -class BaseAdminTest(test.TestCase): - def setUp(self): - super(BaseAdminTest, self).setUp() - self.volume_api = volume_api.API() - # admin context - self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - def _create_volume(self, context, updates=None): - db_volume = {'status': 'available', - 'host': 'test', - 'binary': 'cinder-volume', - 'availability_zone': 'fake_zone', - 'attach_status': fields.VolumeAttachStatus.DETACHED} - if updates: - db_volume.update(updates) - - volume = objects.Volume(context=context, **db_volume) - volume.create() - return volume - - -@ddt.ddt -class AdminActionsTest(BaseAdminTest): - def setUp(self): - super(AdminActionsTest, self).setUp() - - self.tempdir = self.useFixture(fixtures.TempDir()).path - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(lock_path=self.tempdir, - group='oslo_concurrency') - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') - - cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) - cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) - - # start service to handle rpc messages for attach requests - self.svc = self.start_service('volume', host='test') - self.patch( - 'cinder.objects.Service.get_minimum_obj_version', - return_value=obj_base.OBJ_VERSIONS.get_current()) - - def _get_minimum_rpc_version_mock(ctxt, binary): - binary_map = { - 'cinder-volume': rpcapi.VolumeAPI, - 'cinder-backup': backup_rpcapi.BackupAPI, - 'cinder-scheduler': scheduler_rpcapi.SchedulerAPI, - } - return binary_map[binary].RPC_API_VERSION - - self.patch('cinder.objects.Service.get_minimum_rpc_version', - side_effect=_get_minimum_rpc_version_mock) - self.controller = admin_actions.VolumeAdminController() - - def tearDown(self): - self.svc.stop() - super(AdminActionsTest, self).tearDown() - - def _issue_resource_reset(self, ctx, name, id, status): - req = webob.Request.blank('/v2/%s/%s/%s/action' % ( - fake.PROJECT_ID, name, id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-reset_status': status}) - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - return resp - - def _issue_volume_reset(self, ctx, volume, updated_status): - return self._issue_resource_reset(ctx, - 'volumes', - volume['id'], - updated_status) - - def _issue_snapshot_reset(self, ctx, snapshot, updated_status): - return self._issue_resource_reset(ctx, - 'snapshots', - snapshot.id, - updated_status) - - def _issue_backup_reset(self, ctx, backup, updated_status): - self.mock_object(backup_api.API, - '_get_available_backup_service_host', - return_value='testhost') - return self._issue_resource_reset(ctx, - 'backups', - backup['id'], - updated_status) - - def test_valid_updates(self): - vac = self.controller - - vac.validate_update({'status': 'creating'}) - vac.validate_update({'status': 'available'}) - vac.validate_update({'status': 'deleting'}) - vac.validate_update({'status': 'error'}) - vac.validate_update({'status': 'error_deleting'}) - - vac.validate_update({'attach_status': - fields.VolumeAttachStatus.DETACHED}) - vac.validate_update({'attach_status': - fields.VolumeAttachStatus.ATTACHED}) - - vac.validate_update({'migration_status': 'migrating'}) - vac.validate_update({'migration_status': 'error'}) - vac.validate_update({'migration_status': 'completing'}) - vac.validate_update({'migration_status': 'none'}) - vac.validate_update({'migration_status': 'starting'}) - - def test_reset_attach_status(self): - volume = db.volume_create(self.ctx, - {'attach_status': - fields.VolumeAttachStatus.DETACHED}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'attach_status': - fields.VolumeAttachStatus.ATTACHED}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - volume['attach_status']) - - def test_reset_attach_invalid_status(self): - volume = db.volume_create(self.ctx, - {'attach_status': - fields.VolumeAttachStatus.DETACHED}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'attach_status': 'bogus-status'}) - - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - volume['attach_status']) - - def test_reset_migration_invalid_status(self): - volume = db.volume_create(self.ctx, {'migration_status': None}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'migration_status': 'bogus-status'}) - - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertIsNone(volume['migration_status']) - - def test_reset_migration_status(self): - volume = db.volume_create(self.ctx, {'migration_status': None}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'migration_status': 'migrating'}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('migrating', volume['migration_status']) - - def test_reset_status_as_admin(self): - volume = db.volume_create(self.ctx, {'status': 'available'}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'status': 'error'}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('error', volume['status']) - - def test_reset_status_as_non_admin(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - volume = db.volume_create(self.ctx, - {'status': 'error', 'size': 1}) - - resp = self._issue_volume_reset(ctx, - volume, - {'status': 'error'}) - - # request is not authorized - self.assertEqual(http_client.FORBIDDEN, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - # status is still 'error' - self.assertEqual('error', volume['status']) - - def test_backup_reset_status_as_admin(self): - volume = db.volume_create(self.ctx, {'status': 'available', - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID}) - backup = db.backup_create(self.ctx, - {'status': fields.BackupStatus.AVAILABLE, - 'size': 1, - 'volume_id': volume['id'], - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'test'}) - - resp = self._issue_backup_reset(self.ctx, - backup, - {'status': fields.BackupStatus.ERROR}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_backup_reset_status_as_non_admin(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - backup = db.backup_create(ctx, {'status': 'available', - 'size': 1, - 'volume_id': "fakeid", - 'host': 'test'}) - resp = self._issue_backup_reset(ctx, - backup, - {'status': fields.BackupStatus.ERROR}) - # request is not authorized - self.assertEqual(http_client.FORBIDDEN, resp.status_int) - - def test_backup_reset_status(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1}) - backup = db.backup_create(self.ctx, - {'status': fields.BackupStatus.AVAILABLE, - 'volume_id': volume['id'], - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'test'}) - - resp = self._issue_backup_reset(self.ctx, - backup, - {'status': fields.BackupStatus.ERROR}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_invalid_status_for_backup(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1}) - backup = db.backup_create(self.ctx, {'status': 'available', - 'volume_id': volume['id']}) - resp = self._issue_backup_reset(self.ctx, - backup, - {'status': 'restoring'}) - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - - def test_backup_reset_status_with_invalid_backup(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1}) - backup = db.backup_create(self.ctx, - {'status': fields.BackupStatus.AVAILABLE, - 'volume_id': volume['id'], - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID}) - - backup['id'] = fake.BACKUP_ID - resp = self._issue_backup_reset(self.ctx, - backup, - {'status': fields.BackupStatus.ERROR}) - - # Should raise 404 if backup doesn't exist. - self.assertEqual(http_client.NOT_FOUND, resp.status_int) - - def test_malformed_reset_status_body(self): - volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1}) - - resp = self._issue_volume_reset(self.ctx, - volume, - {'x-status': 'bad'}) - - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('available', volume['status']) - - def test_invalid_status_for_volume(self): - volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1}) - resp = self._issue_volume_reset(self.ctx, - volume, - {'status': 'invalid'}) - - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('available', volume['status']) - - def test_reset_status_for_missing_volume(self): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-reset_status': {'status': 'available'}} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = self.ctx - resp = req.get_response(app()) - self.assertEqual(http_client.NOT_FOUND, resp.status_int) - self.assertRaises(exception.NotFound, db.volume_get, self.ctx, - fake.WILL_NOT_BE_FOUND_ID) - - def test_reset_attached_status(self): - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - self.volume_api.reserve_volume(self.ctx, volume) - mountpoint = '/dev/vdb' - attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, - None, mountpoint, 'rw') - # volume is attached - volume = db.volume_get(self.ctx.elevated(), volume['id']) - attachment = db.volume_attachment_get(self.ctx, attachment['id']) - - self.assertEqual('in-use', volume['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - volume['attach_status']) - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual('attached', attachment['attach_status']) - admin_metadata = volume['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertEqual('attached_mode', admin_metadata[1]['key']) - self.assertEqual('rw', admin_metadata[1]['value']) - - # Reset attach_status - resp = self._issue_volume_reset( - self.ctx, - volume, - {'status': 'available', - 'attach_status': fields.VolumeAttachStatus.DETACHED}) - # request is accepted - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - # volume is detached - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('detached', volume['attach_status']) - self.assertEqual('available', volume['status']) - admin_metadata = volume['volume_admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctx, attachment['id']) - - def test_invalid_reset_attached_status(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1, - 'attach_status': - fields.VolumeAttachStatus.DETACHED}) - resp = self._issue_volume_reset( - self.ctx, - volume, - {'status': 'available', - 'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING}) - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - self.assertEqual('available', volume['status']) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - volume['attach_status']) - - def test_snapshot_reset_status(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1, - 'availability_zone': 'test', - 'attach_status': - fields.VolumeAttachStatus.DETACHED}) - kwargs = { - 'volume_id': volume['id'], - 'cgsnapshot_id': None, - 'user_id': self.ctx.user_id, - 'project_id': self.ctx.project_id, - 'status': fields.SnapshotStatus.ERROR_DELETING, - 'progress': '0%', - 'volume_size': volume['size'], - 'metadata': {} - } - snapshot = objects.Snapshot(context=self.ctx, **kwargs) - snapshot.create() - self.addCleanup(snapshot.destroy) - - resp = self._issue_snapshot_reset(self.ctx, snapshot, - {'status': - fields.SnapshotStatus.ERROR}) - - self.assertEqual(http_client.ACCEPTED, resp.status_int) - snapshot = objects.Snapshot.get_by_id(self.ctx, snapshot['id']) - self.assertEqual(fields.SnapshotStatus.ERROR, snapshot.status) - - def test_invalid_status_for_snapshot(self): - volume = db.volume_create(self.ctx, - {'status': 'available', 'host': 'test', - 'provider_location': '', 'size': 1}) - snapshot = objects.Snapshot(self.ctx, - status=fields.SnapshotStatus.AVAILABLE, - volume_id=volume['id']) - snapshot.create() - self.addCleanup(snapshot.destroy) - - resp = self._issue_snapshot_reset(self.ctx, snapshot, - {'status': 'attaching'}) - - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) - - def test_force_delete(self): - # current status is creating - volume = self._create_volume(self.ctx, {'size': 1, 'host': None}) - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - resp = req.get_response(app()) - # request is accepted - self.assertEqual(http_client.ACCEPTED, resp.status_int) - # volume is deleted - self.assertRaises(exception.NotFound, objects.Volume.get_by_id, - self.ctx, volume.id) - - @mock.patch.object(volume_api.API, 'delete_snapshot', return_value=True) - @mock.patch('cinder.objects.Snapshot.get_by_id') - @mock.patch.object(db, 'snapshot_get') - @mock.patch.object(db, 'volume_get') - def test_force_delete_snapshot(self, volume_get, snapshot_get, get_by_id, - delete_snapshot): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - snapshot = v2_fakes.fake_snapshot(fake.SNAPSHOT_ID) - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) - volume_get.return_value = volume - snapshot_get.return_value = snapshot - get_by_id.return_value = snapshot_obj - - path = '/v2/%s/snapshots/%s/action' % ( - fake.PROJECT_ID, snapshot['id']) - req = webob.Request.blank(path) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - resp = req.get_response(app()) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def _migrate_volume_prep(self): - # create volume's current host and the destination host - db.service_create(self.ctx, - {'host': 'test', - 'topic': constants.VOLUME_TOPIC, - 'binary': 'cinder-volume', - 'created_at': timeutils.utcnow()}) - db.service_create(self.ctx, - {'host': 'test2', - 'topic': constants.VOLUME_TOPIC, - 'binary': 'cinder-volume', - 'created_at': timeutils.utcnow()}) - db.service_create(self.ctx, - {'host': 'clustered_host', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'cluster_name': 'cluster', - 'created_at': timeutils.utcnow()}) - db.cluster_create(self.ctx, - {'name': 'cluster', - 'binary': constants.VOLUME_BINARY}) - # current status is available - volume = self._create_volume(self.ctx) - return volume - - def _migrate_volume_3_exec(self, ctx, volume, host, expected_status, - force_host_copy=False, version=None, - cluster=None): - # build request to migrate to host - # req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( - # fake.PROJECT_ID, volume['id'])) - req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-migrate_volume': {'host': host, - 'force_host_copy': force_host_copy}} - version = version or '3.0' - req.headers = {'OpenStack-API-Version': 'volume %s' % version} - req.api_version_request = api_version.APIVersionRequest(version) - if version == '3.16': - body['os-migrate_volume']['cluster'] = cluster - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = ctx - resp = self.controller._migrate_volume(req, volume.id, body) - - # verify status - self.assertEqual(expected_status, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - return volume - - @ddt.data('3.0', '3.15', '3.16') - def test_migrate_volume_success_3(self, version): - expected_status = http_client.ACCEPTED - host = 'test2' - volume = self._migrate_volume_prep() - volume = self._migrate_volume_3_exec(self.ctx, volume, host, - expected_status, version=version) - self.assertEqual('starting', volume['migration_status']) - - def test_migrate_volume_success_cluster(self): - expected_status = http_client.ACCEPTED - # We cannot provide host and cluster, so send host to None - host = None - cluster = 'cluster' - volume = self._migrate_volume_prep() - volume = self._migrate_volume_3_exec(self.ctx, volume, host, - expected_status, version='3.16', - cluster=cluster) - self.assertEqual('starting', volume['migration_status']) - - def test_migrate_volume_fail_host_and_cluster(self): - # We cannot send host and cluster in the request - host = 'test2' - cluster = 'cluster' - volume = self._migrate_volume_prep() - self.assertRaises(exception.InvalidInput, - self._migrate_volume_3_exec, self.ctx, volume, host, - None, version='3.16', cluster=cluster) - - def _migrate_volume_exec(self, ctx, volume, host, expected_status, - force_host_copy=False): - # build request to migrate to host - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-migrate_volume': {'host': host, - 'force_host_copy': force_host_copy}} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - # verify status - self.assertEqual(expected_status, resp.status_int) - volume = db.volume_get(self.ctx, volume['id']) - return volume - - def test_migrate_volume_success(self): - expected_status = http_client.ACCEPTED - host = 'test2' - volume = self._migrate_volume_prep() - volume = self._migrate_volume_exec(self.ctx, volume, host, - expected_status) - self.assertEqual('starting', volume['migration_status']) - - def test_migrate_volume_fail_replication(self): - expected_status = http_client.BAD_REQUEST - host = 'test2' - volume = self._migrate_volume_prep() - # current status is available - volume = self._create_volume(self.ctx, - {'provider_location': '', - 'attach_status': None, - 'replication_status': 'active'}) - volume = self._migrate_volume_exec(self.ctx, volume, host, - expected_status) - - def test_migrate_volume_as_non_admin(self): - expected_status = http_client.FORBIDDEN - host = 'test2' - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - volume = self._migrate_volume_prep() - self._migrate_volume_exec(ctx, volume, host, expected_status) - - def test_migrate_volume_without_host_parameter(self): - expected_status = http_client.BAD_REQUEST - host = 'test3' - volume = self._migrate_volume_prep() - # build request to migrate without host - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-migrate_volume': {'host': host, - 'force_host_copy': False}} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = self.ctx - resp = req.get_response(app()) - # verify status - self.assertEqual(expected_status, resp.status_int) - - def test_migrate_volume_host_no_exist(self): - expected_status = http_client.BAD_REQUEST - host = 'test3' - volume = self._migrate_volume_prep() - self._migrate_volume_exec(self.ctx, volume, host, expected_status) - - def test_migrate_volume_same_host(self): - expected_status = http_client.BAD_REQUEST - host = 'test' - volume = self._migrate_volume_prep() - self._migrate_volume_exec(self.ctx, volume, host, expected_status) - - def test_migrate_volume_migrating(self): - expected_status = http_client.BAD_REQUEST - host = 'test2' - volume = self._migrate_volume_prep() - volume.migration_status = 'migrating' - volume.save() - self._migrate_volume_exec(self.ctx, volume, host, expected_status) - - def test_migrate_volume_with_snap(self): - expected_status = http_client.BAD_REQUEST - host = 'test2' - volume = self._migrate_volume_prep() - snap = objects.Snapshot(self.ctx, volume_id=volume['id']) - snap.create() - self.addCleanup(snap.destroy) - self._migrate_volume_exec(self.ctx, volume, host, expected_status) - - def test_migrate_volume_bad_force_host_copy(self): - expected_status = http_client.BAD_REQUEST - host = 'test2' - volume = self._migrate_volume_prep() - self._migrate_volume_exec(self.ctx, volume, host, expected_status, - force_host_copy='foo') - - def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error, - expected_status, expected_id, no_body=False): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume['id'])) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'new_volume': new_volume['id'], 'error': error} - if no_body: - body = {'': body} - else: - body = {'os-migrate_volume_completion': body} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = ctx - resp = req.get_response(app()) - resp_dict = resp.json - # verify status - self.assertEqual(expected_status, resp.status_int) - if expected_id: - self.assertEqual(expected_id, resp_dict['save_volume_id']) - else: - self.assertNotIn('save_volume_id', resp_dict) - - def test_migrate_volume_comp_as_non_admin(self): - volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID}) - new_volume = db.volume_create(self.ctx, {'id': fake.VOLUME2_ID}) - expected_status = http_client.FORBIDDEN - expected_id = None - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - self._migrate_volume_comp_exec(ctx, volume, new_volume, False, - expected_status, expected_id) - - def test_migrate_volume_comp_no_mig_status(self): - volume1 = self._create_volume(self.ctx, {'migration_status': 'foo'}) - volume2 = self._create_volume(self.ctx, {'migration_status': None}) - - expected_status = http_client.BAD_REQUEST - expected_id = None - self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, - expected_status, expected_id) - self._migrate_volume_comp_exec(self.ctx, volume2, volume1, False, - expected_status, expected_id) - - def test_migrate_volume_comp_bad_mig_status(self): - volume1 = self._create_volume(self.ctx, - {'migration_status': 'migrating'}) - volume2 = self._create_volume(self.ctx, - {'migration_status': 'target:foo'}) - expected_status = http_client.BAD_REQUEST - expected_id = None - self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, - expected_status, expected_id) - - def test_migrate_volume_comp_no_action(self): - volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID}) - new_volume = db.volume_create(self.ctx, {'id': fake.VOLUME2_ID}) - expected_status = http_client.BAD_REQUEST - expected_id = None - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - self._migrate_volume_comp_exec(ctx, volume, new_volume, False, - expected_status, expected_id, True) - - def test_migrate_volume_comp_from_nova(self): - volume = self._create_volume(self.ctx, {'status': 'in-use', - 'migration_status': None, - 'attach_status': - fields.VolumeAttachStatus. - ATTACHED}) - new_volume = self._create_volume(self.ctx, - {'migration_status': None, - 'attach_status': - fields.VolumeAttachStatus. - DETACHED}) - expected_status = http_client.OK - expected_id = new_volume.id - self._migrate_volume_comp_exec(self.ctx, volume, new_volume, False, - expected_status, expected_id) - - def test_backup_reset_valid_updates(self): - vac = admin_actions.BackupAdminController() - vac.validate_update({'status': 'available'}) - vac.validate_update({'status': 'error'}) - self.assertRaises(exc.HTTPBadRequest, - vac.validate_update, - {'status': 'restoring'}) - self.assertRaises(exc.HTTPBadRequest, - vac.validate_update, - {'status': 'creating'}) - - @mock.patch('cinder.backup.rpcapi.BackupAPI.delete_backup', mock.Mock()) - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.backup.api.API._check_support_to_force_delete') - def _force_delete_backup_util(self, test_status, mock_check_support, - mock_service_get_all): - mock_service_get_all.return_value = [ - {'availability_zone': "az1", 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - # admin context - mock_check_support.return_value = True - # current status is dependent on argument: test_status. - id = test_backups.BackupsAPITestCase._create_backup(status=test_status) - req = webob.Request.blank('/v2/%s/backups/%s/action' % ( - fake.PROJECT_ID, id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) - req.environ['cinder.context'] = self.ctx - res = req.get_response(app()) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual( - 'deleting', - test_backups.BackupsAPITestCase._get_backup_attrib(id, 'status')) - db.backup_destroy(self.ctx, id) - - def test_delete_backup_force_when_creating(self): - self._force_delete_backup_util('creating') - - def test_delete_backup_force_when_deleting(self): - self._force_delete_backup_util('deleting') - - def test_delete_backup_force_when_restoring(self): - self._force_delete_backup_util('restoring') - - def test_delete_backup_force_when_available(self): - self._force_delete_backup_util('available') - - def test_delete_backup_force_when_error(self): - self._force_delete_backup_util('error') - - def test_delete_backup_force_when_error_deleting(self): - self._force_delete_backup_util('error_deleting') - - @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', - return_value=False) - def test_delete_backup_force_when_not_supported(self, mock_check_support): - # admin context - self.override_config('backup_driver', 'cinder.backup.drivers.ceph') - id = test_backups.BackupsAPITestCase._create_backup() - req = webob.Request.blank('/v2/%s/backups/%s/action' % ( - fake.PROJECT_ID, id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) - req.environ['cinder.context'] = self.ctx - res = req.get_response(app()) - self.assertEqual(http_client.METHOD_NOT_ALLOWED, res.status_int) - - -class AdminActionsAttachDetachTest(BaseAdminTest): - def setUp(self): - super(AdminActionsAttachDetachTest, self).setUp() - # start service to handle rpc messages for attach requests - self.svc = self.start_service('volume', host='test') - - def tearDown(self): - self.svc.stop() - super(AdminActionsAttachDetachTest, self).tearDown() - - def test_force_detach_instance_attached_volume(self): - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.reserve_volume(self.ctx, volume) - mountpoint = '/dev/vbd' - attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, - None, mountpoint, 'rw') - # volume is attached - volume.refresh() - self.assertEqual('in-use', volume.status) - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - admin_metadata = volume.admin_metadata - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - self.assertEqual('rw', admin_metadata['attached_mode']) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, - connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - # build request to force detach - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request status of 'error' - body = {'os-force_detach': {'attachment_id': attachment['id'], - 'connector': connector}} - req.body = jsonutils.dump_as_bytes(body) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - resp = req.get_response(app()) - # request is accepted - self.assertEqual(http_client.ACCEPTED, resp.status_int) - volume.refresh() - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctx, attachment['id']) - - # status changed to 'available' - self.assertEqual('available', volume.status) - admin_metadata = volume.admin_metadata - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - - def test_force_detach_host_attached_volume(self): - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.initialize_connection(self.ctx, volume, connector) - mountpoint = '/dev/vbd' - host_name = 'fake-host' - attachment = self.volume_api.attach(self.ctx, volume, None, host_name, - mountpoint, 'ro') - # volume is attached - volume.refresh() - self.assertEqual('in-use', volume.status) - self.assertIsNone(attachment['instance_uuid']) - self.assertEqual(host_name, attachment['attached_host']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - admin_metadata = volume.admin_metadata - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - self.assertEqual('ro', admin_metadata['attached_mode']) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - # build request to force detach - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - # request status of 'error' - body = {'os-force_detach': {'attachment_id': attachment['id'], - 'connector': connector}} - req.body = jsonutils.dump_as_bytes(body) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - resp = req.get_response(app()) - # request is accepted - self.assertEqual(http_client.ACCEPTED, resp.status_int) - volume.refresh() - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctx, attachment['id']) - # status changed to 'available' - self.assertEqual('available', volume['status']) - admin_metadata = volume['admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - - def test_volume_force_detach_raises_remote_error(self): - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.reserve_volume(self.ctx, volume) - mountpoint = '/dev/vbd' - attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, - None, mountpoint, 'rw') - # volume is attached - volume.refresh() - self.assertEqual('in-use', volume.status) - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - admin_metadata = volume.admin_metadata - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - self.assertEqual('rw', admin_metadata['attached_mode']) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, - connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - # build request to force detach - volume_remote_error = \ - messaging.RemoteError(exc_type='VolumeAttachmentNotFound') - with mock.patch.object(volume_api.API, 'detach', - side_effect=volume_remote_error): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} - req.body = jsonutils.dump_as_bytes(body) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - resp = req.get_response(app()) - self.assertEqual(http_client.BAD_REQUEST, resp.status_int) - - # test for VolumeBackendAPIException - volume_remote_error = ( - messaging.RemoteError(exc_type='VolumeBackendAPIException')) - with mock.patch.object(volume_api.API, 'detach', - side_effect=volume_remote_error): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, - 'connector': connector}} - req.body = jsonutils.dump_as_bytes(body) - - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - self.assertRaises(messaging.RemoteError, - req.get_response, - app()) - - def test_volume_force_detach_raises_db_error(self): - # In case of DB error 500 error code is returned to user - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.reserve_volume(self.ctx, volume) - mountpoint = '/dev/vbd' - attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, - None, mountpoint, 'rw') - # volume is attached - volume.refresh() - self.assertEqual('in-use', volume.status) - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - admin_metadata = volume.admin_metadata - - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - self.assertEqual('rw', admin_metadata['attached_mode']) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, - connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - # build request to force detach - volume_remote_error = messaging.RemoteError(exc_type='DBError') - with mock.patch.object(volume_api.API, 'detach', - side_effect=volume_remote_error): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, - 'connector': connector}} - req.body = jsonutils.dump_as_bytes(body) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - self.assertRaises(messaging.RemoteError, - req.get_response, - app()) - - def test_volume_force_detach_missing_connector(self): - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.reserve_volume(self.ctx, volume) - mountpoint = '/dev/vbd' - attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, - None, mountpoint, 'rw') - # volume is attached - volume.refresh() - self.assertEqual('in-use', volume.status) - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - admin_metadata = volume.admin_metadata - self.assertEqual(2, len(admin_metadata)) - self.assertEqual('False', admin_metadata['readonly']) - self.assertEqual('rw', admin_metadata['attached_mode']) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, - connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - # test when missing connector - with mock.patch.object(volume_api.API, 'detach'): - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, volume.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} - req.body = jsonutils.dump_as_bytes(body) - # attach admin context to request - req.environ['cinder.context'] = self.ctx - # make request - resp = req.get_response(app()) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_attach_in_used_volume_by_instance(self): - """Test that attaching to an in-use volume fails.""" - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - self.volume_api.reserve_volume(self.ctx, volume) - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, connector) - self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, - '/dev/vbd0', 'rw') - self.assertEqual('rw', conn_info['data']['access_mode']) - self.assertRaises(exception.InvalidVolume, - self.volume_api.attach, - self.ctx, - volume, - fake.INSTANCE_ID, - None, - '/dev/vdb1', - 'ro') - - def test_attach_in_used_volume_by_host(self): - """Test that attaching to an in-use volume fails.""" - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - - self.volume_api.reserve_volume(self.ctx, volume) - self.volume_api.initialize_connection(self.ctx, volume, connector) - self.volume_api.attach(self.ctx, volume, None, 'fake_host1', - '/dev/vbd0', 'rw') - conn_info = self.volume_api.initialize_connection(self.ctx, - volume, connector) - conn_info['data']['access_mode'] = 'rw' - self.assertRaises(exception.InvalidVolume, - self.volume_api.attach, - self.ctx, - volume, - None, - 'fake_host2', - '/dev/vbd1', - 'ro') - - def test_invalid_iscsi_connector(self): - """Test connector without the initiator (required by iscsi driver).""" - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - connector = {} - - self.assertRaises(exception.InvalidInput, - self.volume_api.initialize_connection, - self.ctx, volume, connector) - - def test_attach_attaching_volume_with_different_instance(self): - """Test that attaching volume reserved for another instance fails.""" - # current status is available - volume = self._create_volume(self.ctx, {'provider_location': '', - 'size': 1}) - - self.volume_api.reserve_volume(self.ctx, volume) - values = {'volume_id': volume['id'], - 'attach_status': fields.VolumeAttachStatus.ATTACHING, - 'attach_time': timeutils.utcnow(), - 'instance_uuid': 'abc123', - } - db.volume_attach(self.ctx, values) - db.volume_admin_metadata_update(self.ctx, volume['id'], - {"attached_mode": 'rw'}, False) - mountpoint = '/dev/vbd' - attachment = self.volume_api.attach(self.ctx, volume, - fake.INSTANCE_ID, None, - mountpoint, 'rw') - - self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) - self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - - def test_attach_attaching_volume_with_different_mode(self): - """Test that attaching volume reserved for another mode fails.""" - # current status is available - volume = self._create_volume( - self.ctx, - {'provider_location': '', - 'size': 1, - 'status': 'attaching', - 'instance_uuid': fake.INSTANCE_ID, - 'admin_metadata': {"attached_mode": 'rw'}}) - - values = {'status': 'attaching'} - db.volume_update(self.ctx, volume['id'], values) - db.volume_admin_metadata_update(self.ctx, volume['id'], - {"attached_mode": 'rw'}, False) - mountpoint = '/dev/vbd' - self.assertRaises(exception.InvalidVolume, - self.volume_api.attach, - self.ctx, - volume, - fake.INSTANCE_ID, - None, - mountpoint, - 'ro') diff --git a/cinder/tests/unit/api/contrib/test_availability_zones.py b/cinder/tests/unit/api/contrib/test_availability_zones.py deleted file mode 100644 index a85b6428b..000000000 --- a/cinder/tests/unit/api/contrib/test_availability_zones.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_utils import timeutils - -import cinder.api.contrib.availability_zones -import cinder.context -import cinder.test -import cinder.volume.api - - -created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) -current_time = timeutils.utcnow() - - -def list_availability_zones(self): - return ( - {'name': 'ping', 'available': True}, - {'name': 'pong', 'available': False}, - ) - - -class FakeRequest(object): - environ = {'cinder.context': cinder.context.get_admin_context()} - GET = {} - - -class ControllerTestCase(cinder.test.TestCase): - - def setUp(self): - super(ControllerTestCase, self).setUp() - self.controller = cinder.api.contrib.availability_zones.Controller() - self.req = FakeRequest() - self.mock_object(cinder.volume.api.API, - 'list_availability_zones', - list_availability_zones) - - def test_list_hosts(self): - """Verify that the volume hosts are returned.""" - actual = self.controller.index(self.req) - expected = { - 'availabilityZoneInfo': [ - {'zoneName': 'ping', 'zoneState': {'available': True}}, - {'zoneName': 'pong', 'zoneState': {'available': False}}, - ], - } - self.assertEqual(expected, actual) diff --git a/cinder/tests/unit/api/contrib/test_backup_project_attribute.py b/cinder/tests/unit/api/contrib/test_backup_project_attribute.py deleted file mode 100644 index d467ca0cb..000000000 --- a/cinder/tests/unit/api/contrib/test_backup_project_attribute.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -from oslo_serialization import jsonutils -import webob - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import router as router_v3 -from cinder.backup import api as backup_api -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.backup import fake_backup -from cinder.tests.unit import fake_constants as fake - - -def fake_backup_get(*args, **kwargs): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - bak = { - 'id': fake.BACKUP_ID, - 'project_id': fake.PROJECT_ID, - } - return fake_backup.fake_backup_obj(ctx, **bak) - - -def fake_backup_get_all(*args, **kwargs): - return objects.BackupList(objects=[fake_backup_get()]) - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = router_v3.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v3'] = api - return mapper - - -@ddt.ddt -class BackupProjectAttributeTest(test.TestCase): - - def setUp(self): - super(BackupProjectAttributeTest, self).setUp() - self.stubs.Set(backup_api.API, 'get', fake_backup_get) - self.stubs.Set(backup_api.API, 'get_all', fake_backup_get_all) - - def _send_backup_request(self, ctx, detail=False, version='3.18'): - req = None - if detail: - req = webob.Request.blank(('/v3/%s/backups/detail' - % fake.PROJECT_ID)) - else: - req = webob.Request.blank('/v3/%s/backups/%s' % (fake.PROJECT_ID, - fake.BACKUP_ID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.api_version_request = api_version.APIVersionRequest(version) - res = req.get_response(app()) - - if detail: - return jsonutils.loads(res.body)['backups'] - return jsonutils.loads(res.body)['backup'] - - @ddt.data(True, False) - def test_get_backup_with_project(self, is_admin): - ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin) - bak = self._send_backup_request(ctx) - if is_admin: - self.assertEqual(fake.PROJECT_ID, - bak['os-backup-project-attr:project_id']) - else: - self.assertNotIn('os-backup-project-attr:project_id', bak) - - @ddt.data(True, False) - def test_list_detail_backups_with_project(self, is_admin): - ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin) - baks = self._send_backup_request(ctx, detail=True) - if is_admin: - self.assertEqual(fake.PROJECT_ID, - baks[0]['os-backup-project-attr:project_id']) - else: - self.assertNotIn('os-backup-project-attr:project_id', baks[0]) - - def test_get_backup_under_allowed_api_version(self): - ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, True) - bak = self._send_backup_request(ctx, version='3.17') - self.assertNotIn('os-backup-project-attr:project_id', bak) diff --git a/cinder/tests/unit/api/contrib/test_backups.py b/cinder/tests/unit/api/contrib/test_backups.py deleted file mode 100644 index 521ce3526..000000000 --- a/cinder/tests/unit/api/contrib/test_backups.py +++ /dev/null @@ -1,2126 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for Backup code. -""" - -import ddt -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from six.moves import http_client -import webob - -from cinder.api.contrib import backups -# needed for stubs to work -import cinder.backup -from cinder.backup import api as backup_api -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -# needed for stubs to work -import cinder.volume - -NUM_ELEMENTS_IN_BACKUP = 17 - - -@ddt.ddt -class BackupsAPITestCase(test.TestCase): - """Test Case for backups API.""" - - def setUp(self): - super(BackupsAPITestCase, self).setUp() - self.volume_api = cinder.volume.API() - self.backup_api = cinder.backup.API() - self.context = context.get_admin_context() - self.context.project_id = fake.PROJECT_ID - self.context.user_id = fake.USER_ID - self.user_context = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.controller = backups.BackupsController() - self.patch('cinder.objects.service.Service._get_minimum_version', - return_value=None) - - @staticmethod - def _create_backup(volume_id=fake.VOLUME_ID, - display_name='test_backup', - display_description='this is a test backup', - container='volumebackups', - status=fields.BackupStatus.CREATING, - incremental=False, - parent_id=None, - size=0, object_count=0, host='testhost', - num_dependent_backups=0, - snapshot_id=None, - data_timestamp=None): - """Create a backup object.""" - backup = {} - backup['volume_id'] = volume_id - backup['user_id'] = fake.USER_ID - backup['project_id'] = fake.PROJECT_ID - backup['host'] = host - backup['availability_zone'] = 'az1' - backup['display_name'] = display_name - backup['display_description'] = display_description - backup['container'] = container - backup['status'] = status - backup['fail_reason'] = '' - backup['size'] = size - backup['object_count'] = object_count - backup['incremental'] = incremental - backup['parent_id'] = parent_id - backup['num_dependent_backups'] = num_dependent_backups - backup['snapshot_id'] = snapshot_id - backup['data_timestamp'] = data_timestamp - backup = db.backup_create(context.get_admin_context(), backup) - if not snapshot_id: - db.backup_update(context.get_admin_context(), - backup['id'], - {'data_timestamp': backup['created_at']}) - return backup['id'] - - @staticmethod - def _get_backup_attrib(backup_id, attrib_name): - return db.backup_get(context.get_admin_context(), - backup_id)[attrib_name] - - @ddt.data(False, True) - def test_show_backup(self, backup_from_snapshot): - volume_id = utils.create_volume(self.context, size=5, - status='creating').id - snapshot = None - snapshot_id = None - if backup_from_snapshot: - snapshot = utils.create_snapshot(self.context, - volume_id) - snapshot_id = snapshot.id - backup_id = self._create_backup(volume_id, - snapshot_id=snapshot_id) - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('az1', res_dict['backup']['availability_zone']) - self.assertEqual('volumebackups', res_dict['backup']['container']) - self.assertEqual('this is a test backup', - res_dict['backup']['description']) - self.assertEqual('test_backup', res_dict['backup']['name']) - self.assertEqual(backup_id, res_dict['backup']['id']) - self.assertEqual(0, res_dict['backup']['object_count']) - self.assertEqual(0, res_dict['backup']['size']) - self.assertEqual(fields.BackupStatus.CREATING, - res_dict['backup']['status']) - self.assertEqual(volume_id, res_dict['backup']['volume_id']) - self.assertFalse(res_dict['backup']['is_incremental']) - self.assertFalse(res_dict['backup']['has_dependent_backups']) - self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) - self.assertIn('updated_at', res_dict['backup']) - - if snapshot: - snapshot.destroy() - db.backup_destroy(context.get_admin_context(), backup_id) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_show_backup_with_backup_NotFound(self): - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Backup %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_list_backups_json(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(3, len(res_dict['backups'][0])) - self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - self.assertEqual('test_backup', res_dict['backups'][0]['name']) - self.assertEqual(3, len(res_dict['backups'][1])) - self.assertEqual(backup_id2, res_dict['backups'][1]['id']) - self.assertEqual('test_backup', res_dict['backups'][1]['name']) - self.assertEqual(3, len(res_dict['backups'][2])) - self.assertEqual(backup_id1, res_dict['backups'][2]['id']) - self.assertEqual('test_backup', res_dict['backups'][2]['name']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_with_limit(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - req = webob.Request.blank('/v2/%s/backups?limit=2' % fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(3, len(res_dict['backups'][0])) - self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - self.assertEqual('test_backup', res_dict['backups'][0]['name']) - self.assertEqual(3, len(res_dict['backups'][1])) - self.assertEqual(backup_id2, res_dict['backups'][1]['id']) - self.assertEqual('test_backup', res_dict['backups'][1]['name']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_with_offset_out_of_range(self): - url = '/v2/%s/backups?offset=252452434242342434' % fake.PROJECT_ID - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_list_backups_with_marker(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - url = '/v2/%s/backups?marker=%s' % (fake.PROJECT_ID, backup_id3) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(3, len(res_dict['backups'][0])) - self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - self.assertEqual('test_backup', res_dict['backups'][0]['name']) - self.assertEqual(3, len(res_dict['backups'][1])) - self.assertEqual(backup_id1, res_dict['backups'][1]['id']) - self.assertEqual('test_backup', res_dict['backups'][1]['name']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_with_limit_and_marker(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - url = ('/v2/%s/backups?limit=1&marker=%s' % (fake.PROJECT_ID, - backup_id3)) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(3, len(res_dict['backups'][0])) - self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - self.assertEqual('test_backup', res_dict['backups'][0]['name']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_json(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - req = webob.Request.blank('/v2/%s/backups/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) - self.assertEqual('az1', res_dict['backups'][0]['availability_zone']) - self.assertEqual('volumebackups', - res_dict['backups'][0]['container']) - self.assertEqual('this is a test backup', - res_dict['backups'][0]['description']) - self.assertEqual('test_backup', - res_dict['backups'][0]['name']) - self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - self.assertEqual(0, res_dict['backups'][0]['object_count']) - self.assertEqual(0, res_dict['backups'][0]['size']) - self.assertEqual(fields.BackupStatus.CREATING, - res_dict['backups'][0]['status']) - self.assertEqual(fake.VOLUME_ID, res_dict['backups'][0]['volume_id']) - self.assertIn('updated_at', res_dict['backups'][0]) - - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) - self.assertEqual('az1', res_dict['backups'][1]['availability_zone']) - self.assertEqual('volumebackups', - res_dict['backups'][1]['container']) - self.assertEqual('this is a test backup', - res_dict['backups'][1]['description']) - self.assertEqual('test_backup', - res_dict['backups'][1]['name']) - self.assertEqual(backup_id2, res_dict['backups'][1]['id']) - self.assertEqual(0, res_dict['backups'][1]['object_count']) - self.assertEqual(0, res_dict['backups'][1]['size']) - self.assertEqual(fields.BackupStatus.CREATING, - res_dict['backups'][1]['status']) - self.assertEqual(fake.VOLUME_ID, res_dict['backups'][1]['volume_id']) - self.assertIn('updated_at', res_dict['backups'][1]) - - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][2])) - self.assertEqual('az1', res_dict['backups'][2]['availability_zone']) - self.assertEqual('volumebackups', res_dict['backups'][2]['container']) - self.assertEqual('this is a test backup', - res_dict['backups'][2]['description']) - self.assertEqual('test_backup', - res_dict['backups'][2]['name']) - self.assertEqual(backup_id1, res_dict['backups'][2]['id']) - self.assertEqual(0, res_dict['backups'][2]['object_count']) - self.assertEqual(0, res_dict['backups'][2]['size']) - self.assertEqual(fields.BackupStatus.CREATING, - res_dict['backups'][2]['status']) - self.assertEqual(fake.VOLUME_ID, res_dict['backups'][2]['volume_id']) - self.assertIn('updated_at', res_dict['backups'][2]) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_using_filters(self): - backup_id1 = self._create_backup(display_name='test2') - backup_id2 = self._create_backup(status=fields.BackupStatus.AVAILABLE) - backup_id3 = self._create_backup(volume_id=fake.VOLUME3_ID) - - req = webob.Request.blank('/v2/%s/backups/detail?name=test2' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(backup_id1, res_dict['backups'][0]['id']) - - req = webob.Request.blank('/v2/%s/backups/detail?status=available' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - - req = webob.Request.blank('/v2/%s/backups/detail?volume_id=%s' % ( - fake.PROJECT_ID, fake.VOLUME3_ID)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_with_limit_and_sort_args(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - url = ('/v2/%s/backups/detail?limit=2&sort_key=created_at' - '&sort_dir=desc' % fake.PROJECT_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) - self.assertEqual(backup_id3, res_dict['backups'][0]['id']) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) - self.assertEqual(backup_id2, res_dict['backups'][1]['id']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_with_marker(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - url = ('/v2/%s/backups/detail?marker=%s' % ( - fake.PROJECT_ID, backup_id3)) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['backups'])) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) - self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) - self.assertEqual(backup_id1, res_dict['backups'][1]['id']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_with_limit_and_marker(self): - backup_id1 = self._create_backup() - backup_id2 = self._create_backup() - backup_id3 = self._create_backup() - - url = ('/v2/%s/backups/detail?limit=1&marker=%s' % ( - fake.PROJECT_ID, backup_id3)) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(1, len(res_dict['backups'])) - self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) - self.assertEqual(backup_id2, res_dict['backups'][0]['id']) - - db.backup_destroy(context.get_admin_context(), backup_id3) - db.backup_destroy(context.get_admin_context(), backup_id2) - db.backup_destroy(context.get_admin_context(), backup_id1) - - def test_list_backups_detail_with_offset_out_of_range(self): - url = ('/v2/%s/backups/detail?offset=234534543657634523' % - fake.PROJECT_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('cinder.db.service_get_all') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_backup_json(self, mock_validate, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5).id - - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['backup']) - _mock_service_get_all.assert_called_once_with(mock.ANY, - disabled=False, - topic='cinder-backup') - self.assertTrue(mock_validate.called) - - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.db.service_get_all') - def test_create_backup_inuse_no_force(self, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5, - status='in-use').id - - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.db.service_get_all') - def test_create_backup_inuse_force(self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5, - status='in-use').id - backup_id = self._create_backup(volume_id, - status=fields.BackupStatus.AVAILABLE) - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - "force": True, - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['backup']) - _mock_service_get_all.assert_called_once_with(mock.ANY, - disabled=False, - topic='cinder-backup') - - db.backup_destroy(context.get_admin_context(), backup_id) - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.db.service_get_all') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_backup_snapshot_json(self, mock_validate, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5, - status='available').id - - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['backup']) - _mock_service_get_all.assert_called_once_with(mock.ANY, - disabled=False, - topic='cinder-backup') - self.assertTrue(mock_validate.called) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_create_backup_snapshot_with_inconsistent_volume(self): - volume_id = utils.create_volume(self.context, size=5, - status='available').id - volume_id2 = utils.create_volume(self.context, size=5, - status='available').id - snapshot_id = utils.create_snapshot(self.context, - volume_id, - status='available')['id'] - - self.addCleanup(db.volume_destroy, - self.context.elevated(), - volume_id) - self.addCleanup(db.volume_destroy, - self.context.elevated(), - volume_id2) - self.addCleanup(db.snapshot_destroy, - self.context.elevated(), - snapshot_id) - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id2, - "snapshot_id": snapshot_id, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertIsNotNone(res_dict['badRequest']['message']) - - def test_create_backup_with_invalid_snapshot(self): - volume_id = utils.create_volume(self.context, size=5, - status='available').id - snapshot_id = utils.create_snapshot(self.context, volume_id, - status='error')['id'] - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "snapshot_id": snapshot_id, - "volume_id": volume_id, - } - } - self.addCleanup(db.volume_destroy, - self.context.elevated(), - volume_id) - self.addCleanup(db.snapshot_destroy, - self.context.elevated(), - snapshot_id) - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - def test_create_backup_with_non_existent_snapshot(self): - volume_id = utils.create_volume(self.context, size=5, - status='restoring').id - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "snapshot_id": fake.SNAPSHOT_ID, - "volume_id": volume_id, - } - } - self.addCleanup(db.volume_destroy, - self.context.elevated(), - volume_id) - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertIsNotNone(res_dict['itemNotFound']['message']) - - def test_create_backup_with_invalid_container(self): - volume_id = utils.create_volume(self.context, size=5, - status='available').id - body = {"backup": {"display_name": "nightly001", - "display_description": "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "a" * 256 - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.environ['cinder.context'] = self.context - self.assertRaises(exception.InvalidInput, - self.controller.create, - req, - body) - - @mock.patch('cinder.db.service_get_all') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - @ddt.data(False, True) - def test_create_backup_delta(self, backup_from_snapshot, - mock_validate, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5).id - snapshot = None - snapshot_id = None - if backup_from_snapshot: - snapshot = utils.create_snapshot(self.context, - volume_id, - status= - fields.SnapshotStatus.AVAILABLE) - snapshot_id = snapshot.id - backup_id = self._create_backup(volume_id, - status=fields.BackupStatus.AVAILABLE) - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - "incremental": True, - "snapshot_id": snapshot_id, - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['backup']) - _mock_service_get_all.assert_called_once_with(mock.ANY, - disabled=False, - topic='cinder-backup') - self.assertTrue(mock_validate.called) - - db.backup_destroy(context.get_admin_context(), backup_id) - if snapshot: - snapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.db.service_get_all') - def test_create_incremental_backup_invalid_status( - self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5).id - - backup_id = self._create_backup(volume_id) - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - "incremental": True, - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: The parent backup must be ' - 'available for incremental backup.', - res_dict['badRequest']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_create_backup_with_no_body(self): - # omit body from the request - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'backup' in request body.", - res_dict['badRequest']['message']) - - def test_create_backup_with_body_KeyError(self): - # omit volume_id from body - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Incorrect request body format', - res_dict['badRequest']['message']) - - def test_create_backup_with_VolumeNotFound(self): - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": fake.WILL_NOT_BE_FOUND_ID, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Volume %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_create_backup_with_InvalidVolume(self): - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5, - status='restoring').id - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - - @mock.patch('cinder.db.service_get_all') - def test_create_backup_WithOUT_enabled_backup_service( - self, - _mock_service_get_all): - # need an enabled backup service available - _mock_service_get_all.return_value = [] - - volume_id = utils.create_volume(self.context, size=2).id - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - } - } - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, - res_dict['computeFault']['code']) - self.assertEqual('Service cinder-backup could not be found.', - res_dict['computeFault']['message']) - - volume = self.volume_api.get(context.get_admin_context(), volume_id) - self.assertEqual('available', volume['status']) - - @mock.patch('cinder.db.service_get_all') - def test_create_incremental_backup_invalid_no_full( - self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'fake_az', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - volume_id = utils.create_volume(self.context, size=5, - status='available').id - - body = {"backup": {"display_name": "nightly001", - "display_description": - "Nightly Backup 03-Sep-2012", - "volume_id": volume_id, - "container": "nightlybackups", - "incremental": True, - } - } - req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: No backups available to do ' - 'an incremental backup.', - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.db.service_get_all') - def test_is_backup_service_enabled(self, _mock_service_get_all): - - testhost = 'test_host' - alt_host = 'strange_host' - empty_service = [] - # service host not match with volume's host - host_not_match = [{'availability_zone': 'fake_az', 'host': alt_host, - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - # service az not match with volume's az - az_not_match = [{'availability_zone': 'strange_az', 'host': testhost, - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - # service disabled - disabled_service = [] - - # dead service that last reported at 20th century - dead_service = [{'availability_zone': 'fake_az', 'host': alt_host, - 'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}] - - # first service's host not match but second one works. - multi_services = [{'availability_zone': 'fake_az', 'host': alt_host, - 'disabled': 0, 'updated_at': timeutils.utcnow()}, - {'availability_zone': 'fake_az', 'host': testhost, - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - # Setup mock to run through the following service cases - _mock_service_get_all.side_effect = [empty_service, - host_not_match, - az_not_match, - disabled_service, - dead_service, - multi_services] - - volume_id = utils.create_volume(self.context, size=2, - host=testhost).id - volume = self.volume_api.get(context.get_admin_context(), volume_id) - - # test empty service - self.assertEqual(False, - self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - # test host not match service - self.assertEqual(False, - self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - # test az not match service - self.assertEqual(False, - self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - # test disabled service - self.assertEqual(False, - self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - # test dead service - self.assertEqual(False, - self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - # test multi services and the last service matches - self.assertTrue(self.backup_api._is_backup_service_enabled( - volume['availability_zone'], - testhost)) - - @mock.patch('cinder.db.service_get_all') - def test_get_available_backup_service(self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost1', - 'disabled': 0, 'updated_at': timeutils.utcnow()}, - {'availability_zone': 'az2', 'host': 'testhost2', - 'disabled': 0, 'updated_at': timeutils.utcnow()}, - {'availability_zone': 'az2', 'host': 'testhost3', - 'disabled': 0, 'updated_at': timeutils.utcnow()}, ] - actual_host = self.backup_api._get_available_backup_service_host( - None, 'az1') - self.assertEqual('testhost1', actual_host) - actual_host = self.backup_api._get_available_backup_service_host( - 'testhost2', 'az2') - self.assertIn(actual_host, ['testhost2', 'testhost3']) - actual_host = self.backup_api._get_available_backup_service_host( - 'testhost4', 'az1') - self.assertEqual('testhost1', actual_host) - - @mock.patch('cinder.db.service_get_all') - def test_get_available_backup_service_with_same_host( - self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost1', - 'disabled': 0, 'updated_at': timeutils.utcnow()}, - {'availability_zone': 'az2', 'host': 'testhost2', - 'disabled': 0, 'updated_at': timeutils.utcnow()}, ] - self.override_config('backup_use_same_host', True) - actual_host = self.backup_api._get_available_backup_service_host( - None, 'az1') - self.assertEqual('testhost1', actual_host) - actual_host = self.backup_api._get_available_backup_service_host( - 'testhost2', 'az2') - self.assertEqual('testhost2', actual_host) - self.assertRaises(exception.ServiceNotFound, - self.backup_api._get_available_backup_service_host, - 'testhost4', 'az1') - - @mock.patch('cinder.db.service_get_all') - def test_delete_backup_available(self, _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(fields.BackupStatus.DELETING, - self._get_backup_attrib(backup_id, 'status')) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.db.service_get_all') - def test_delete_delta_backup(self, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - delta_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - incremental=True) - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, delta_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(fields.BackupStatus.DELETING, - self._get_backup_attrib(delta_id, 'status')) - - db.backup_destroy(context.get_admin_context(), delta_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.db.service_get_all') - def test_delete_backup_error(self, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - backup_id = self._create_backup(status=fields.BackupStatus.ERROR) - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(fields.BackupStatus.DELETING, - self._get_backup_attrib(backup_id, 'status')) - - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_delete_backup_with_backup_NotFound(self): - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Backup %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_delete_backup_with_InvalidBackup(self): - backup_id = self._create_backup() - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: Backup status must be ' - 'available or error', - res_dict['badRequest']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.db.service_get_all') - def test_delete_backup_with_InvalidBackup2(self, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - volume_id = utils.create_volume(self.context, size=5).id - backup_id = self._create_backup(volume_id, - status=fields.BackupStatus.AVAILABLE) - delta_backup_id = self._create_backup( - status=fields.BackupStatus.AVAILABLE, incremental=True, - parent_id=backup_id) - - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: Incremental backups ' - 'exist for this backup.', - res_dict['badRequest']['message']) - - db.backup_destroy(context.get_admin_context(), delta_backup_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.db.service_get_all') - def test_delete_backup_service_down(self, - _mock_service_get_all): - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': '1775-04-19 05:00:00'}] - backup_id = self._create_backup(status='available') - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - def test_restore_backup_volume_id_specified_json( - self, _mock_get_backup_host): - _mock_get_backup_host.return_value = 'testhost' - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - # need to create the volume referenced below first - volume_name = 'test1' - volume_id = utils.create_volume(self.context, - size=5, - display_name=volume_name).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - self.assertEqual(volume_id, res_dict['restore']['volume_id']) - self.assertEqual(volume_name, res_dict['restore']['volume_name']) - - def test_restore_backup_with_no_body(self): - # omit body from the request - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'restore' in request body.", - res_dict['badRequest']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_restore_backup_with_body_KeyError(self): - # omit restore from body - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - body = {"": {}} - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'restore' in request body.", - res_dict['badRequest']['message']) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.volume.api.API.create') - def test_restore_backup_volume_id_unspecified( - self, _mock_volume_api_create, _mock_service_get_all): - # intercept volume creation to ensure created volume - # has status of available - def fake_volume_api_create(context, size, name, description): - volume_id = utils.create_volume(self.context, size=size).id - return db.volume_get(context, volume_id) - - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - _mock_volume_api_create.side_effect = fake_volume_api_create - - backup_id = self._create_backup(size=5, - status=fields.BackupStatus.AVAILABLE) - - body = {"restore": {}} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.volume.api.API.create') - def test_restore_backup_name_specified(self, - _mock_volume_api_create, - _mock_service_get_all): - # Intercept volume creation to ensure created volume - # has status of available - def fake_volume_api_create(context, size, name, description): - volume_id = utils.create_volume(self.context, size=size, - display_name=name).id - return db.volume_get(context, volume_id) - - _mock_volume_api_create.side_effect = fake_volume_api_create - _mock_service_get_all.return_value = [ - {'availability_zone': 'az1', 'host': 'testhost', - 'disabled': 0, 'updated_at': timeutils.utcnow()}] - - backup_id = self._create_backup(size=5, - status=fields.BackupStatus.AVAILABLE) - - body = {"restore": {'name': 'vol-01'}} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % - (fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - description = 'auto-created_from_restore_from_backup' - # Assert that we have indeed passed on the name parameter - _mock_volume_api_create.assert_called_once_with( - mock.ANY, - 5, - body['restore']['name'], - description) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - def test_restore_backup_name_volume_id_specified( - self, _mock_get_backup_host): - _mock_get_backup_host.return_value = 'testhost' - backup_id = self._create_backup(size=5, - status=fields.BackupStatus.AVAILABLE) - orig_vol_name = "vol-00" - volume_id = utils.create_volume(self.context, size=5, - display_name=orig_vol_name).id - body = {"restore": {'name': 'vol-01', 'volume_id': volume_id}} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - self.assertEqual(volume_id, res_dict['restore']['volume_id']) - restored_vol = db.volume_get(self.context, - res_dict['restore']['volume_id']) - # Ensure that the original volume name wasn't overridden - self.assertEqual(orig_vol_name, restored_vol['display_name']) - - @mock.patch('cinder.backup.api.API.restore') - def test_restore_backup_with_InvalidInput(self, - _mock_volume_api_restore): - - msg = _("Invalid input") - _mock_volume_api_restore.side_effect = \ - exception.InvalidInput(reason=msg) - - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=0).id - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid input received: Invalid input', - res_dict['badRequest']['message']) - - def test_restore_backup_with_InvalidVolume(self): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5, - status='attaching').id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid volume: Volume to be restored to must ' - 'be available', - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_restore_backup_with_InvalidBackup(self): - backup_id = self._create_backup(status=fields.BackupStatus.RESTORING) - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: Backup status must be available', - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_restore_backup_with_BackupNotFound(self): - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % - (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Backup %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_restore_backup_with_VolumeNotFound(self): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - - body = {"restore": {"volume_id": fake.WILL_NOT_BE_FOUND_ID, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Volume %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API.restore') - def test_restore_backup_with_VolumeSizeExceedsAvailableQuota( - self, - _mock_backup_restore): - - _mock_backup_restore.side_effect = \ - exception.VolumeSizeExceedsAvailableQuota(requested='2', - consumed='2', - quota='3') - - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - res_dict['overLimit']['code']) - self.assertEqual('Requested volume or snapshot exceeds allowed ' - 'gigabytes quota. Requested 2G, quota is 3G and ' - '2G has been consumed.', - res_dict['overLimit']['message']) - - @mock.patch('cinder.backup.api.API.restore') - def test_restore_backup_with_VolumeLimitExceeded(self, - _mock_backup_restore): - - _mock_backup_restore.side_effect = \ - exception.VolumeLimitExceeded(allowed=1) - - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - # need to create the volume referenced below first - volume_id = utils.create_volume(self.context, size=5).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, res.status_int) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - res_dict['overLimit']['code']) - self.assertEqual("Maximum number of volumes allowed (1) exceeded for" - " quota 'volumes'.", res_dict['overLimit']['message']) - - def test_restore_backup_to_undersized_volume(self): - backup_size = 10 - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - size=backup_size) - # need to create the volume referenced below first - volume_size = 5 - volume_id = utils.create_volume(self.context, size=volume_size).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid volume: volume size %d is too ' - 'small to restore backup of size %d.' - % (volume_size, backup_size), - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - def test_restore_backup_to_oversized_volume(self, _mock_get_backup_host): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - size=10) - _mock_get_backup_host.return_value = 'testhost' - # need to create the volume referenced below first - volume_name = 'test1' - volume_id = utils.create_volume(self.context, - size=15, - display_name=volume_name).id - - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - self.assertEqual(volume_id, res_dict['restore']['volume_id']) - self.assertEqual(volume_name, res_dict['restore']['volume_name']) - - db.volume_destroy(context.get_admin_context(), volume_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - def test_restore_backup_with_different_host(self, _mock_get_backup_host, - mock_restore_backup): - volume_name = 'test1' - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - size=10, host='HostA') - volume_id = utils.create_volume(self.context, size=10, - host='HostB@BackendB#PoolB', - display_name=volume_name).id - - _mock_get_backup_host.return_value = 'testhost' - body = {"restore": {"volume_id": volume_id, }} - req = webob.Request.blank('/v2/%s/backups/%s/restore' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(backup_id, res_dict['restore']['backup_id']) - self.assertEqual(volume_id, res_dict['restore']['volume_id']) - self.assertEqual(volume_name, res_dict['restore']['volume_name']) - mock_restore_backup.assert_called_once_with(mock.ANY, u'testhost', - mock.ANY, volume_id) - # Manually check if restore_backup was called with appropriate backup. - self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id) - - db.volume_destroy(context.get_admin_context(), volume_id) - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_export_record_as_non_admin(self): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - size=10) - req = webob.Request.blank('/v2/%s/backups/%s/export_record' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - # request is not authorized - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') - def test_export_backup_record_id_specified_json(self, - _mock_export_record_rpc, - _mock_get_backup_host): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE, - size=10) - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_service = 'fake' - backup_url = 'fake' - _mock_export_record_rpc.return_value = \ - {'backup_service': backup_service, - 'backup_url': backup_url} - _mock_get_backup_host.return_value = 'testhost' - req = webob.Request.blank('/v2/%s/backups/%s/export_record' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - # verify that request is successful - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(backup_service, - res_dict['backup-record']['backup_service']) - self.assertEqual(backup_url, - res_dict['backup-record']['backup_url']) - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_export_record_with_bad_backup_id(self): - - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_id = fake.WILL_NOT_BE_FOUND_ID - req = webob.Request.blank('/v2/%s/backups/%s/export_record' % - (fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Backup %s could not be found.' % backup_id, - res_dict['itemNotFound']['message']) - - def test_export_record_for_unavailable_backup(self): - - backup_id = self._create_backup(status=fields.BackupStatus.RESTORING) - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - req = webob.Request.blank('/v2/%s/backups/%s/export_record' % - (fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: Backup status must be available ' - 'and not restoring.', - res_dict['badRequest']['message']) - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') - def test_export_record_with_unavailable_service(self, - _mock_export_record_rpc, - _mock_get_backup_host): - msg = 'fake unavailable service' - _mock_export_record_rpc.side_effect = \ - exception.InvalidBackup(reason=msg) - _mock_get_backup_host.return_value = 'testhost' - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - req = webob.Request.blank('/v2/%s/backups/%s/export_record' % - (fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: %s' % msg, - res_dict['badRequest']['message']) - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_import_record_as_non_admin(self): - backup_service = 'fake' - backup_url = 'fake' - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - # request is not authorized - self.assertEqual(http_client.FORBIDDEN, res.status_int) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') - def test_import_record_volume_id_specified_json(self, - _mock_import_record_rpc, - _mock_list_services): - utils.replace_obj_loader(self, objects.Backup) - project_id = fake.PROJECT_ID - backup_service = 'fake' - ctx = context.RequestContext(fake.USER_ID, project_id, is_admin=True) - backup = objects.Backup(ctx, id=fake.BACKUP_ID, user_id=fake.USER_ID, - project_id=project_id, - status=fields.BackupStatus.AVAILABLE) - backup_url = backup.encode_record() - _mock_import_record_rpc.return_value = None - _mock_list_services.return_value = [backup_service] - - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - - # verify that request is successful - self.assertEqual(http_client.CREATED, res.status_int) - self.assertIn('id', res_dict['backup']) - self.assertEqual(fake.BACKUP_ID, res_dict['backup']['id']) - - # Verify that entry in DB is as expected - db_backup = objects.Backup.get_by_id(ctx, fake.BACKUP_ID) - self.assertEqual(ctx.project_id, db_backup.project_id) - self.assertEqual(ctx.user_id, db_backup.user_id) - self.assertEqual(backup_api.IMPORT_VOLUME_ID, db_backup.volume_id) - self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') - def test_import_record_volume_id_exists_deleted(self, - _mock_import_record_rpc, - _mock_list_services): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - utils.replace_obj_loader(self, objects.Backup) - - # Original backup belonged to a different user_id and project_id - backup = objects.Backup(ctx, id=fake.BACKUP_ID, user_id=fake.USER2_ID, - project_id=fake.PROJECT2_ID, - status=fields.BackupStatus.AVAILABLE) - backup_url = backup.encode_record() - - # Deleted DB entry has project_id and user_id set to fake - backup_id = self._create_backup(fake.VOLUME_ID, - status=fields.BackupStatus.DELETED) - backup_service = 'fake' - _mock_import_record_rpc.return_value = None - _mock_list_services.return_value = [backup_service] - - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - - # verify that request is successful - self.assertEqual(http_client.CREATED, res.status_int) - self.assertIn('id', res_dict['backup']) - self.assertEqual(fake.BACKUP_ID, res_dict['backup']['id']) - - # Verify that entry in DB is as expected, with new project and user_id - db_backup = objects.Backup.get_by_id(ctx, fake.BACKUP_ID) - self.assertEqual(ctx.project_id, db_backup.project_id) - self.assertEqual(ctx.user_id, db_backup.user_id) - self.assertEqual(backup_api.IMPORT_VOLUME_ID, db_backup.volume_id) - self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - def test_import_record_with_no_backup_services(self, - _mock_list_services): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_service = 'fake' - backup_url = 'fake' - _mock_list_services.return_value = [] - - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, - res_dict['computeFault']['code']) - self.assertEqual('Service %s could not be found.' - % backup_service, - res_dict['computeFault']['message']) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - def test_import_backup_with_wrong_backup_url(self, _mock_list_services): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_service = 'fake' - backup_url = 'fake' - _mock_list_services.return_value = ['no-match1', 'no-match2'] - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Invalid input received: Can't parse backup record.", - res_dict['badRequest']['message']) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - def test_import_backup_with_existing_backup_record(self, - _mock_list_services): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_id = self._create_backup(fake.VOLUME_ID) - backup_service = 'fake' - backup = objects.Backup.get_by_id(ctx, backup_id) - backup_url = backup.encode_record() - _mock_list_services.return_value = ['no-match1', 'no-match2'] - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid backup: Backup already exists in database.', - res_dict['badRequest']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - - @mock.patch('cinder.backup.api.API._list_backup_hosts') - @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') - def test_import_backup_with_missing_backup_services(self, - _mock_import_record, - _mock_list_services): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_id = self._create_backup(fake.VOLUME_ID, - status=fields.BackupStatus.DELETED) - backup_service = 'fake' - backup = objects.Backup.get_by_id(ctx, backup_id) - backup_url = backup.encode_record() - _mock_list_services.return_value = ['no-match1', 'no-match2'] - _mock_import_record.side_effect = \ - exception.ServiceNotFound(service_id='fake') - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service, - 'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, - res_dict['computeFault']['code']) - self.assertEqual('Service %s could not be found.' % backup_service, - res_dict['computeFault']['message']) - - db.backup_destroy(context.get_admin_context(), backup_id) - - def test_import_record_with_missing_body_elements(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - backup_service = 'fake' - backup_url = 'fake' - - # test with no backup_service - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_url': backup_url}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Incorrect request body format.', - res_dict['badRequest']['message']) - - # test with no backup_url - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {'backup_service': backup_service}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Incorrect request body format.', - res_dict['badRequest']['message']) - - # test with no backup_url and backup_url - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - body = {'backup-record': {}} - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Incorrect request body format.', - res_dict['badRequest']['message']) - - def test_import_record_with_no_body(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - - req = webob.Request.blank('/v2/%s/backups/import_record' % - fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) - res_dict = jsonutils.loads(res.body) - # verify that request is successful - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'backup-record' in " - "request body.", - res_dict['badRequest']['message']) - - @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', - return_value=False) - def test_force_delete_with_not_supported_operation(self, - mock_check_support): - backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE) - backup = self.backup_api.get(self.context, backup_id) - self.assertRaises(exception.NotSupportedOperation, - self.backup_api.delete, self.context, backup, True) - - @ddt.data(False, True) - def test_show_incremental_backup(self, backup_from_snapshot): - volume_id = utils.create_volume(self.context, size=5).id - parent_backup_id = self._create_backup( - volume_id, status=fields.BackupStatus.AVAILABLE, - num_dependent_backups=1) - backup_id = self._create_backup(volume_id, - status=fields.BackupStatus.AVAILABLE, - incremental=True, - parent_id=parent_backup_id, - num_dependent_backups=1) - snapshot = None - snapshot_id = None - if backup_from_snapshot: - snapshot = utils.create_snapshot(self.context, - volume_id) - snapshot_id = snapshot.id - child_backup_id = self._create_backup( - volume_id, status=fields.BackupStatus.AVAILABLE, incremental=True, - parent_id=backup_id, snapshot_id=snapshot_id) - - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, backup_id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertTrue(res_dict['backup']['is_incremental']) - self.assertTrue(res_dict['backup']['has_dependent_backups']) - self.assertIsNone(res_dict['backup']['snapshot_id']) - - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, parent_backup_id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertFalse(res_dict['backup']['is_incremental']) - self.assertTrue(res_dict['backup']['has_dependent_backups']) - self.assertIsNone(res_dict['backup']['snapshot_id']) - - req = webob.Request.blank('/v2/%s/backups/%s' % ( - fake.PROJECT_ID, child_backup_id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_context)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertTrue(res_dict['backup']['is_incremental']) - self.assertFalse(res_dict['backup']['has_dependent_backups']) - self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) - - db.backup_destroy(context.get_admin_context(), child_backup_id) - db.backup_destroy(context.get_admin_context(), backup_id) - db.backup_destroy(context.get_admin_context(), parent_backup_id) - if snapshot: - snapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) diff --git a/cinder/tests/unit/api/contrib/test_capabilities.py b/cinder/tests/unit/api/contrib/test_capabilities.py deleted file mode 100644 index 3207c6a5f..000000000 --- a/cinder/tests/unit/api/contrib/test_capabilities.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2015 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -import oslo_messaging - -from cinder.api.contrib import capabilities -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -def rpcapi_get_capabilities(self, context, host, discover): - capabilities = dict( - vendor_name='OpenStack', - volume_backend_name='lvm', - pool_name='pool', - driver_version='2.0.0', - storage_protocol='iSCSI', - display_name='Capabilities of Cinder LVM driver', - description='These are volume type options provided by ' - 'Cinder LVM driver, blah, blah.', - replication_targets=[], - visibility='public', - properties=dict( - compression=dict( - title='Compression', - description='Enables compression.', - type='boolean'), - qos=dict( - title='QoS', - description='Enables QoS.', - type='boolean'), - replication=dict( - title='Replication', - description='Enables replication.', - type='boolean'), - thin_provisioning=dict( - title='Thin Provisioning', - description='Sets thin provisioning.', - type='boolean'), - ) - ) - return capabilities - - -class CapabilitiesAPITest(test.TestCase): - def setUp(self): - super(CapabilitiesAPITest, self).setUp() - self.flags(host='fake') - self.controller = capabilities.CapabilitiesController() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', - rpcapi_get_capabilities) - def test_capabilities_summary(self, mock_services): - mock_services.return_value = [{'name': 'fake', 'host': 'fake_host'}] - req = fakes.HTTPRequest.blank('/fake/capabilities/fake') - req.environ['cinder.context'] = self.ctxt - res = self.controller.show(req, 'fake') - - expected = { - 'namespace': 'OS::Storage::Capabilities::fake_host', - 'vendor_name': 'OpenStack', - 'volume_backend_name': 'lvm', - 'pool_name': 'pool', - 'driver_version': '2.0.0', - 'storage_protocol': 'iSCSI', - 'display_name': 'Capabilities of Cinder LVM driver', - 'description': 'These are volume type options provided by ' - 'Cinder LVM driver, blah, blah.', - 'visibility': 'public', - 'replication_targets': [], - 'properties': { - 'compression': { - 'title': 'Compression', - 'description': 'Enables compression.', - 'type': 'boolean'}, - 'qos': { - 'title': 'QoS', - 'description': 'Enables QoS.', - 'type': 'boolean'}, - 'replication': { - 'title': 'Replication', - 'description': 'Enables replication.', - 'type': 'boolean'}, - 'thin_provisioning': { - 'title': 'Thin Provisioning', - 'description': 'Sets thin provisioning.', - 'type': 'boolean'}, - } - } - - self.assertDictEqual(expected, res) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities') - def test_get_capabilities_rpc_timeout(self, mock_rpc, mock_services): - mock_rpc.side_effect = oslo_messaging.MessagingTimeout - mock_services.return_value = [{'name': 'fake'}] - - req = fakes.HTTPRequest.blank('/fake/capabilities/fake') - req.environ['cinder.context'] = self.ctxt - self.assertRaises(exception.RPCTimeout, - self.controller.show, req, 'fake') - - @mock.patch('cinder.db.service_get_all') - def test_get_capabilities_service_not_found(self, mock_services): - mock_services.return_value = [] - - req = fakes.HTTPRequest.blank('/fake/capabilities/fake') - req.environ['cinder.context'] = self.ctxt - self.assertRaises(exception.NotFound, - self.controller.show, req, 'fake') diff --git a/cinder/tests/unit/api/contrib/test_cgsnapshots.py b/cinder/tests/unit/api/contrib/test_cgsnapshots.py deleted file mode 100644 index e7025b511..000000000 --- a/cinder/tests/unit/api/contrib/test_cgsnapshots.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for cgsnapshot code. -""" - -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import db -from cinder import exception -from cinder.group import api as groupAPI -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - -from cinder.tests.unit import utils -import cinder.volume - - -class CgsnapshotsAPITestCase(test.TestCase): - """Test Case for cgsnapshots API.""" - - def setUp(self): - super(CgsnapshotsAPITestCase, self).setUp() - self.volume_api = cinder.volume.API() - self.context = context.get_admin_context() - self.context.project_id = fake.PROJECT_ID - self.context.user_id = fake.USER_ID - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - def test_show_cgsnapshot(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - snapshot_id = utils.create_snapshot( - self.context, - volume_type_id=vol_type['id'], - volume_id=volume_id, - group_snapshot_id=cgsnapshot.id)['id'] - - req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % ( - fake.PROJECT_ID, cgsnapshot.id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('this is a test group snapshot', - res_dict['cgsnapshot']['description']) - - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshot']['name']) - self.assertEqual('creating', res_dict['cgsnapshot']['status']) - - db.snapshot_destroy(context.get_admin_context(), snapshot_id) - cgsnapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - def test_show_cgsnapshot_with_cgsnapshot_NotFound(self): - req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('GroupSnapshot %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_list_cgsnapshots_json(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot1 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - cgsnapshot2 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - cgsnapshot3 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(cgsnapshot3.id, - res_dict['cgsnapshots'][0]['id']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][0]['name']) - self.assertEqual(cgsnapshot2.id, - res_dict['cgsnapshots'][1]['id']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][1]['name']) - self.assertEqual(cgsnapshot1.id, - res_dict['cgsnapshots'][2]['id']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][2]['name']) - - cgsnapshot3.destroy() - cgsnapshot2.destroy() - cgsnapshot1.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - def test_list_cgsnapshots_detail_json(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot1 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - cgsnapshot2 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - cgsnapshot3 = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - - req = webob.Request.blank('/v2/%s/cgsnapshots/detail' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('this is a test group snapshot', - res_dict['cgsnapshots'][0]['description']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][0]['name']) - self.assertEqual(cgsnapshot3.id, - res_dict['cgsnapshots'][0]['id']) - self.assertEqual('creating', - res_dict['cgsnapshots'][0]['status']) - - self.assertEqual('this is a test group snapshot', - res_dict['cgsnapshots'][1]['description']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][1]['name']) - self.assertEqual(cgsnapshot2.id, - res_dict['cgsnapshots'][1]['id']) - self.assertEqual('creating', - res_dict['cgsnapshots'][1]['status']) - - self.assertEqual('this is a test group snapshot', - res_dict['cgsnapshots'][2]['description']) - self.assertEqual('test_group_snapshot', - res_dict['cgsnapshots'][2]['name']) - self.assertEqual(cgsnapshot1.id, - res_dict['cgsnapshots'][2]['id']) - self.assertEqual('creating', - res_dict['cgsnapshots'][2]['status']) - - cgsnapshot3.destroy() - cgsnapshot2.destroy() - cgsnapshot1.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_cgsnapshot_json(self, mock_validate): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - - body = {"cgsnapshot": {"name": "cg1", - "description": - "CG Snapshot 1", - "consistencygroup_id": consistencygroup.id}} - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['cgsnapshot']) - self.assertTrue(mock_validate.called) - - cgsnapshot = objects.GroupSnapshot.get_by_id( - context.get_admin_context(), res_dict['cgsnapshot']['id']) - cgsnapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_cgsnapshot_when_volume_in_error_status(self, - mock_validate): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id=consistencygroup.id, - status='error')['id'] - - body = {"cgsnapshot": {"name": "cg1", - "description": - "CG Snapshot 1", - "consistencygroup_id": consistencygroup.id}} - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual( - "Invalid volume: The snapshot cannot be created when the volume " - "is in error status.", - res_dict['badRequest']['message'] - ) - self.assertTrue(mock_validate.called) - - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - def test_create_cgsnapshot_with_no_body(self): - # omit body from the request - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'cgsnapshot' in " - "request body.", - res_dict['badRequest']['message']) - - @mock.patch.object(groupAPI.API, 'create_group_snapshot', - side_effect=exception.InvalidGroupSnapshot( - reason='invalid group_snapshot')) - def test_create_with_invalid_cgsnapshot(self, mock_create_cgsnapshot): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id=consistencygroup.id)['id'] - - body = {"cgsnapshot": {"name": "cg1", - "description": - "CG Snapshot 1", - "consistencygroup_id": consistencygroup.id}} - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(body) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid GroupSnapshot: invalid group_snapshot', - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - @mock.patch.object(groupAPI.API, 'create_group_snapshot', - side_effect=exception.GroupSnapshotNotFound( - group_snapshot_id='invalid_id')) - def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id=consistencygroup.id)['id'] - - body = {"cgsnapshot": {"name": "cg1", - "description": - "CG Snapshot 1", - "consistencygroup_id": consistencygroup.id}} - - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('GroupSnapshot invalid_id could not be found.', - res_dict['itemNotFound']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - def test_create_cgsnapshot_from_empty_consistencygroup(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - - body = {"cgsnapshot": {"name": "cg1", - "description": - "CG Snapshot 1", - "consistencygroup_id": consistencygroup.id}} - - req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - # If failed to create cgsnapshot, its DB object should not be created - self.assertListEqual( - [], - list(objects.GroupSnapshotList.get_all(self.context))) - consistencygroup.destroy() - - def test_delete_cgsnapshot_available(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID, - status='available') - req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % - (fake.PROJECT_ID, cgsnapshot.id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, - cgsnapshot.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', cgsnapshot.status) - - cgsnapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - - def test_delete_cgsnapshot_available_used_as_source(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID, - status='available') - - cg2 = utils.create_consistencygroup( - self.context, status='creating', - group_snapshot_id=cgsnapshot.id, - group_type_id=fake.GROUP_TYPE_ID) - req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % - cgsnapshot.id) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app()) - - cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, - cgsnapshot.id) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual('available', cgsnapshot.status) - - cgsnapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() - cg2.destroy() - - def test_delete_cgsnapshot_with_cgsnapshot_NotFound(self): - req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % - (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('GroupSnapshot %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_delete_cgsnapshot_with_invalid_cgsnapshot(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']]) - volume_id = utils.create_volume(self.context, - volume_type_id=vol_type['id'], - group_id= - consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.context, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID, - status='invalid') - - req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % ( - fake.PROJECT_ID, cgsnapshot.id)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - cgsnapshot.destroy() - db.volume_destroy(context.get_admin_context(), volume_id) - consistencygroup.destroy() diff --git a/cinder/tests/unit/api/contrib/test_consistencygroups.py b/cinder/tests/unit/api/contrib/test_consistencygroups.py deleted file mode 100644 index 82e75ac36..000000000 --- a/cinder/tests/unit/api/contrib/test_consistencygroups.py +++ /dev/null @@ -1,1536 +0,0 @@ -# Copyright (C) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for consistency group code. -""" - -import ddt -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import db -from cinder import exception -import cinder.group -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -from cinder.volume import api as volume_api - - -@ddt.ddt -class ConsistencyGroupsAPITestCase(test.TestCase): - """Test Case for consistency groups API.""" - - def setUp(self): - super(ConsistencyGroupsAPITestCase, self).setUp() - self.cg_api = cinder.group.API() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - def _create_consistencygroup( - self, - ctxt=None, - name='test_consistencygroup', - user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - description='this is a test consistency group', - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='az1', - host='fakehost', - status=fields.ConsistencyGroupStatus.CREATING, - **kwargs): - """Create a consistency group object.""" - ctxt = ctxt or self.ctxt - consistencygroup = objects.Group(ctxt) - consistencygroup.user_id = user_id - consistencygroup.project_id = project_id - consistencygroup.availability_zone = availability_zone - consistencygroup.name = name - consistencygroup.description = description - consistencygroup.group_type_id = group_type_id - consistencygroup.volume_type_ids = volume_type_ids - consistencygroup.host = host - consistencygroup.status = status - consistencygroup.update(kwargs) - consistencygroup.create() - return consistencygroup - - def test_show_consistencygroup(self): - vol_type = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type') - consistencygroup = self._create_consistencygroup( - volume_type_ids=[vol_type['id']]) - req = webob.Request.blank('/v2/%s/consistencygroups/%s' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - consistencygroup.destroy() - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('az1', - res_dict['consistencygroup']['availability_zone']) - self.assertEqual('this is a test consistency group', - res_dict['consistencygroup']['description']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroup']['name']) - self.assertEqual('creating', - res_dict['consistencygroup']['status']) - self.assertEqual([vol_type['id']], - res_dict['consistencygroup']['volume_types']) - - def test_show_consistencygroup_with_consistencygroup_NotFound(self): - req = webob.Request.blank('/v2/%s/consistencygroups/%s' % - (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Group %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_show_consistencygroup_with_null_volume_type(self): - consistencygroup = self._create_consistencygroup(volume_type_id=None) - req = webob.Request.blank('/v2/%s/consistencygroups/%s' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('az1', - res_dict['consistencygroup']['availability_zone']) - self.assertEqual('this is a test consistency group', - res_dict['consistencygroup']['description']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroup']['name']) - self.assertEqual('creating', - res_dict['consistencygroup']['status']) - self.assertEqual([], res_dict['consistencygroup']['volume_types']) - - consistencygroup.destroy() - - @ddt.data(2, 3) - def test_list_consistencygroups_json(self, version): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - consistencygroup3 = self._create_consistencygroup() - - req = webob.Request.blank('/v%(version)s/%(project_id)s/' - 'consistencygroups' - % {'version': version, - 'project_id': fake.PROJECT_ID}) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(consistencygroup3.id, - res_dict['consistencygroups'][0]['id']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][0]['name']) - self.assertEqual(consistencygroup2.id, - res_dict['consistencygroups'][1]['id']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][1]['name']) - self.assertEqual(consistencygroup1.id, - res_dict['consistencygroups'][2]['id']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][2]['name']) - - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_limit(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - consistencygroup3 = self._create_consistencygroup() - url = '/v2/%s/consistencygroups?limit=1' % fake.PROJECT_ID - if is_detail: - url = '/v2/%s/consistencygroups/detail?limit=1' % fake.PROJECT_ID - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(1, len(res_dict['consistencygroups'])) - self.assertEqual(consistencygroup3.id, - res_dict['consistencygroups'][0]['id']) - next_link = ( - 'http://localhost/v2/%s/consistencygroups?limit=' - '1&marker=%s' % - (fake.PROJECT_ID, res_dict['consistencygroups'][0]['id'])) - self.assertEqual(next_link, - res_dict['consistencygroup_links'][0]['href']) - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_offset(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - consistencygroup3 = self._create_consistencygroup() - url = '/v2/%s/consistencygroups?offset=1' % fake.PROJECT_ID - if is_detail: - url = '/v2/%s/consistencygroups/detail?offset=1' % fake.PROJECT_ID - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['consistencygroups'])) - self.assertEqual(consistencygroup2.id, - res_dict['consistencygroups'][0]['id']) - self.assertEqual(consistencygroup1.id, - res_dict['consistencygroups'][1]['id']) - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_offset_out_of_range(self, is_detail): - url = ('/v2/%s/consistencygroups?offset=234523423455454' % - fake.PROJECT_ID) - if is_detail: - url = ('/v2/%s/consistencygroups/detail?offset=234523423455454' % - fake.PROJECT_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @ddt.data(False, True) - def test_list_consistencygroups_with_limit_and_offset(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - consistencygroup3 = self._create_consistencygroup() - url = '/v2/%s/consistencygroups?limit=2&offset=1' % fake.PROJECT_ID - if is_detail: - url = ('/v2/%s/consistencygroups/detail?limit=2&offset=1' % - fake.PROJECT_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(2, len(res_dict['consistencygroups'])) - self.assertEqual(consistencygroup2.id, - res_dict['consistencygroups'][0]['id']) - self.assertEqual(consistencygroup1.id, - res_dict['consistencygroups'][1]['id']) - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_filter(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - common_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=False) - consistencygroup3 = self._create_consistencygroup(ctxt=common_ctxt) - url = ('/v2/%s/consistencygroups?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - consistencygroup3.id) - if is_detail: - url = ('/v2/%s/consistencygroups/detail?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - consistencygroup3.id) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(1, len(res_dict['consistencygroups'])) - self.assertEqual(consistencygroup3.id, - res_dict['consistencygroups'][0]['id']) - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_project_id(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup( - name="group", project_id=fake.PROJECT2_ID) - - url = ('/v2/%s/consistencygroups?' - 'all_tenants=True&project_id=%s') % (fake.PROJECT_ID, - fake.PROJECT2_ID) - if is_detail: - url = ('/v2/%s/consistencygroups/detail?' - 'all_tenants=True&project_id=%s') % (fake.PROJECT_ID, - fake.PROJECT2_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(200, res.status_int) - self.assertEqual(1, len(res_dict['consistencygroups'])) - self.assertEqual("group", - res_dict['consistencygroups'][0]['name']) - consistencygroup1.destroy() - consistencygroup2.destroy() - - @ddt.data(False, True) - def test_list_consistencygroups_with_sort(self, is_detail): - consistencygroup1 = self._create_consistencygroup() - consistencygroup2 = self._create_consistencygroup() - consistencygroup3 = self._create_consistencygroup() - url = '/v2/%s/consistencygroups?sort=id:asc' % fake.PROJECT_ID - if is_detail: - url = ('/v2/%s/consistencygroups/detail?sort=id:asc' % - fake.PROJECT_ID) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - expect_result = [consistencygroup1.id, consistencygroup2.id, - consistencygroup3.id] - expect_result.sort() - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(3, len(res_dict['consistencygroups'])) - self.assertEqual(expect_result[0], - res_dict['consistencygroups'][0]['id']) - self.assertEqual(expect_result[1], - res_dict['consistencygroups'][1]['id']) - self.assertEqual(expect_result[2], - res_dict['consistencygroups'][2]['id']) - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - def test_list_consistencygroups_detail_json(self): - vol_type1 = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type1') - vol_type2 = utils.create_volume_type(context.get_admin_context(), - self, name='my_vol_type2') - consistencygroup1 = self._create_consistencygroup( - volume_type_ids=[vol_type1['id']]) - consistencygroup2 = self._create_consistencygroup( - volume_type_ids=[vol_type1['id']]) - consistencygroup3 = self._create_consistencygroup( - volume_type_ids=[vol_type1['id'], vol_type2['id']]) - req = webob.Request.blank('/v2/%s/consistencygroups/detail' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - cg_ids = [consistencygroup1.id, consistencygroup2.id, - consistencygroup3.id] - vol_type_ids = [vol_type1['id'], vol_type2['id']] - - consistencygroup1.destroy() - consistencygroup2.destroy() - consistencygroup3.destroy() - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('az1', - res_dict['consistencygroups'][0]['availability_zone']) - self.assertEqual('this is a test consistency group', - res_dict['consistencygroups'][0]['description']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][0]['name']) - self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) - self.assertEqual('creating', - res_dict['consistencygroups'][0]['status']) - for vol_type_id in res_dict['consistencygroups'][0]['volume_types']: - self.assertIn(vol_type_id, vol_type_ids) - - self.assertEqual('az1', - res_dict['consistencygroups'][1]['availability_zone']) - self.assertEqual('this is a test consistency group', - res_dict['consistencygroups'][1]['description']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][1]['name']) - self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) - self.assertEqual('creating', - res_dict['consistencygroups'][1]['status']) - for vol_type_id in res_dict['consistencygroups'][1]['volume_types']: - self.assertIn(vol_type_id, vol_type_ids) - - self.assertEqual('az1', - res_dict['consistencygroups'][2]['availability_zone']) - self.assertEqual('this is a test consistency group', - res_dict['consistencygroups'][2]['description']) - self.assertEqual('test_consistencygroup', - res_dict['consistencygroups'][2]['name']) - self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) - self.assertEqual('creating', - res_dict['consistencygroups'][2]['status']) - for vol_type_id in res_dict['consistencygroups'][2]['volume_types']: - self.assertIn(vol_type_id, vol_type_ids) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_consistencygroup_json(self, mock_validate): - group_id = fake.CONSISTENCY_GROUP_ID - - # Create volume type - vol_type = 'test' - vol_type_id = db.volume_type_create( - self.ctxt, {'name': vol_type, 'extra_specs': {}})['id'] - - body = {"consistencygroup": {"name": "cg1", - "volume_types": vol_type_id, - "description": - "Consistency Group 1", }} - req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['consistencygroup']) - self.assertTrue(mock_validate.called) - - group_id = res_dict['consistencygroup']['id'] - cg = objects.Group.get_by_id(self.ctxt, group_id) - - cg.destroy() - - def test_create_consistencygroup_with_no_body(self): - # omit body from the request - req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'consistencygroup' in " - "request body.", - res_dict['badRequest']['message']) - - def test_delete_consistencygroup_available(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({}) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', consistencygroup.status) - - consistencygroup.destroy() - - def test_delete_consistencygroup_available_used_as_source_success(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - # The other CG used the first CG as source, but it's no longer in - # creating status, so we should be able to delete it. - cg2 = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE, - source_cgid=consistencygroup.id) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({}) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', consistencygroup.status) - - consistencygroup.destroy() - cg2.destroy() - - def test_delete_consistencygroup_available_no_force(self): - consistencygroup = self._create_consistencygroup(status='available') - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": False}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(fields.ConsistencyGroupStatus.DELETING, - consistencygroup.status) - - consistencygroup.destroy() - - def test_delete_consistencygroup_with_consistencygroup_NotFound(self): - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(None) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Group %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_delete_consistencygroup_with_invalid_consistencygroup(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.CREATING) - self._assert_deleting_result_400(consistencygroup.id) - consistencygroup.destroy() - - def test_delete_consistencygroup_invalid_force(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.CREATING) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": True}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', consistencygroup.status) - - def test_delete_consistencygroup_no_host(self): - consistencygroup = self._create_consistencygroup( - host=None, - status=fields.ConsistencyGroupStatus.ERROR) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": True}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - cg = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - consistencygroup.id) - self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) - self.assertIsNone(cg.host) - - def test_create_delete_consistencygroup_update_quota(self): - name = 'mycg' - description = 'consistency group 1' - fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'} - fake_vol_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_vol_type'} - self.mock_object(db, 'group_type_get', - return_value=fake_grp_type) - self.mock_object(db, 'volume_types_get_by_name_or_id', - return_value=[fake_vol_type]) - self.mock_object(self.cg_api, '_cast_create_group') - self.mock_object(self.cg_api, 'update_quota') - cg = self.cg_api.create(self.ctxt, name, description, - fake.GROUP_TYPE_ID, fake_vol_type['name']) - self.cg_api.update_quota.assert_called_once_with( - self.ctxt, cg, 1) - - self.assertEqual(fields.ConsistencyGroupStatus.CREATING, cg.status) - self.assertIsNone(cg.host) - self.cg_api.update_quota.reset_mock() - cg.status = fields.ConsistencyGroupStatus.ERROR - self.cg_api.delete(self.ctxt, cg) - - self.cg_api.update_quota.assert_called_once_with( - self.ctxt, cg, -1, self.ctxt.project_id) - cg = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - cg.id) - self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) - - def test_delete_consistencygroup_with_invalid_body(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"invalid_request_element": {"force": False}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_delete_consistencygroup_with_invalid_force_value_in_body(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": "abcd"}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_delete_consistencygroup_with_empty_force_value_in_body(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": ""}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def _assert_deleting_result_400(self, cg_id, force=False): - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, cg_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": force}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - def test_delete_consistencygroup_with_volumes(self): - consistencygroup = self._create_consistencygroup(status='available') - utils.create_volume(self.ctxt, group_id=consistencygroup.id, - testcase_instance=self) - self._assert_deleting_result_400(consistencygroup.id) - consistencygroup.destroy() - - def test_delete_consistencygroup_with_cgsnapshot(self): - consistencygroup = self._create_consistencygroup(status='available') - # If we don't add a volume to the CG the cgsnapshot creation will fail - vol = utils.create_volume(self.ctxt, - group_id=consistencygroup.id, - testcase_instance=self) - cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID) - utils.create_snapshot(self.ctxt, volume_id=vol.id, - group_snapshot_id=cg_snap.id, - testcase_instance=self) - self._assert_deleting_result_400(consistencygroup.id) - cg_snap.destroy() - consistencygroup.destroy() - - def test_delete_consistencygroup_with_cgsnapshot_force(self): - consistencygroup = self._create_consistencygroup(status='available') - # If we don't add a volume to the CG the cgsnapshot creation will fail - vol = utils.create_volume(self.ctxt, - group_id=consistencygroup.id, - testcase_instance=self) - cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID) - utils.create_snapshot(self.ctxt, volume_id=vol.id, - group_snapshot_id=cg_snap.id, - testcase_instance=self) - self._assert_deleting_result_400(consistencygroup.id, force=True) - cg_snap.destroy() - consistencygroup.destroy() - - def test_delete_consistencygroup_force_with_volumes(self): - consistencygroup = self._create_consistencygroup(status='available') - utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id, - testcase_instance=self) - - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": True}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', consistencygroup.status) - consistencygroup.destroy() - - def test_delete_cg_force_with_volumes_with_deleted_snapshots(self): - consistencygroup = self._create_consistencygroup(status='available') - vol = utils.create_volume(self.ctxt, testcase_instance=self, - consistencygroup_id=consistencygroup.id) - utils.create_snapshot(self.ctxt, vol.id, status='deleted', - deleted=True, testcase_instance=self) - - req = webob.Request.blank('/v2/%s/consistencygroups/%s/delete' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"force": True}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual('deleting', consistencygroup.status) - consistencygroup.destroy() - - def test_create_consistencygroup_failed_no_volume_type(self): - name = 'cg1' - body = {"consistencygroup": {"name": name, - "description": - "Consistency Group 1", }} - req = webob.Request.blank('/v2/%s/consistencygroups' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_update_consistencygroup_success(self, mock_validate): - volume_type_id = utils.create_volume_type( - context.get_admin_context(), self, name='my_vol_type')['id'] - fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'} - self.mock_object(db, 'group_type_get', - return_value=fake_grp_type) - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE, - volume_type_ids=[volume_type_id], - group_type_id=fake.GROUP_TYPE_ID, - host='test_host') - - # We create another CG from the one we are updating to confirm that - # it will not affect the update if it is not CREATING - cg2 = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE, - host='test_host', - volume_type_ids=[volume_type_id], - source_group_id=consistencygroup.id,) - - remove_volume_id = utils.create_volume( - self.ctxt, - testcase_instance=self, - volume_type_id=volume_type_id, - group_id=consistencygroup.id)['id'] - remove_volume_id2 = utils.create_volume( - self.ctxt, - testcase_instance=self, - volume_type_id=volume_type_id, - group_id=consistencygroup.id, - status='error')['id'] - remove_volume_id3 = utils.create_volume( - self.ctxt, - testcase_instance=self, - volume_type_id=volume_type_id, - group_id=consistencygroup.id, - status='error_deleting')['id'] - - self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, - consistencygroup.status) - - cg_volumes = db.volume_get_all_by_generic_group(self.ctxt.elevated(), - consistencygroup.id) - cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes] - self.assertIn(remove_volume_id, cg_vol_ids) - self.assertIn(remove_volume_id2, cg_vol_ids) - self.assertIn(remove_volume_id3, cg_vol_ids) - - add_volume_id = utils.create_volume( - self.ctxt, - testcase_instance=self, - volume_type_id=volume_type_id)['id'] - add_volume_id2 = utils.create_volume( - self.ctxt, - testcase_instance=self, - volume_type_id=volume_type_id)['id'] - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - name = 'newcg' - description = 'New Consistency Group Description' - add_volumes = add_volume_id + "," + add_volume_id2 - remove_volumes = ','.join( - [remove_volume_id, remove_volume_id2, remove_volume_id3]) - body = {"consistencygroup": {"name": name, - "description": description, - "add_volumes": add_volumes, - "remove_volumes": remove_volumes, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertTrue(mock_validate.called) - self.assertEqual(fields.ConsistencyGroupStatus.UPDATING, - consistencygroup.status) - - consistencygroup.destroy() - cg2.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_update_consistencygroup_sourcing_cg(self, mock_validate): - volume_type_id = fake.VOLUME_TYPE_ID - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE, - host='test_host') - - cg2 = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.CREATING, - host='test_host', - source_cgid=consistencygroup.id) - - remove_volume_id = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - consistencygroup_id=consistencygroup.id)['id'] - remove_volume_id2 = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - consistencygroup_id=consistencygroup.id)['id'] - - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - name = 'newcg' - description = 'New Consistency Group Description' - remove_volumes = remove_volume_id + "," + remove_volume_id2 - body = {"consistencygroup": {"name": name, - "description": description, - "remove_volumes": remove_volumes, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, - consistencygroup.status) - - consistencygroup.destroy() - cg2.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_update_consistencygroup_creating_cgsnapshot(self, mock_validate): - volume_type_id = fake.VOLUME_TYPE_ID - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.AVAILABLE, - host='test_host') - - # If we don't add a volume to the CG the cgsnapshot creation will fail - utils.create_volume(self.ctxt, - consistencygroup_id=consistencygroup.id, - testcase_instance=self) - - cgsnapshot = utils.create_cgsnapshot( - self.ctxt, consistencygroup_id=consistencygroup.id) - - add_volume_id = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id)['id'] - add_volume_id2 = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id)['id'] - - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - name = 'newcg' - description = 'New Consistency Group Description' - add_volumes = add_volume_id + "," + add_volume_id2 - body = {"consistencygroup": {"name": name, - "description": description, - "add_volumes": add_volumes}} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app()) - - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, - consistencygroup.status) - - consistencygroup.destroy() - cgsnapshot.destroy() - - def test_update_consistencygroup_add_volume_not_found(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"name": None, - "description": None, - "add_volumes": "fake-volume-uuid", - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - def test_update_consistencygroup_remove_volume_not_found(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"name": None, - "description": "new description", - "add_volumes": None, - "remove_volumes": "fake-volume-uuid", }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - def test_update_consistencygroup_empty_parameters(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"name": "", - "description": "", - "add_volumes": None, - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - - consistencygroup.destroy() - - def test_update_consistencygroup_add_volume_invalid_state(self): - volume_type_id = fake.VOLUME_TYPE_ID - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - add_volume_id = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - status='wrong_status')['id'] - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - add_volumes = add_volume_id - body = {"consistencygroup": {"name": "cg1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - def test_update_consistencygroup_add_volume_invalid_volume_type(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - wrong_type = fake.VOLUME_TYPE2_ID - add_volume_id = utils.create_volume( - self.ctxt, - volume_type_id=wrong_type)['id'] - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - add_volumes = add_volume_id - body = {"consistencygroup": {"name": "cg1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - def test_update_consistencygroup_add_volume_already_in_cg(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - add_volume_id = utils.create_volume( - self.ctxt, - consistencygroup_id=fake.CONSISTENCY_GROUP2_ID)['id'] - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - add_volumes = add_volume_id - body = {"consistencygroup": {"name": "cg1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - def test_update_consistencygroup_invalid_state(self): - consistencygroup = self._create_consistencygroup( - status=fields.ConsistencyGroupStatus.CREATING, - ctxt=self.ctxt) - req = webob.Request.blank('/v2/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.method = 'PUT' - req.headers['Content-Type'] = 'application/json' - body = {"consistencygroup": {"name": "new name", - "description": None, - "add_volumes": None, - "remove_volumes": None, }} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - consistencygroup.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_consistencygroup_from_src_snap(self, mock_validate): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.ctxt, - volume_type_id=fake.VOLUME_TYPE_ID, - group_id=consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.ctxt, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID) - snapshot = utils.create_snapshot( - self.ctxt, - volume_id, - group_snapshot_id=cgsnapshot.id, - status=fields.SnapshotStatus.AVAILABLE) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "cgsnapshot_id": cgsnapshot.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['consistencygroup']) - self.assertEqual(test_cg_name, res_dict['consistencygroup']['name']) - self.assertTrue(mock_validate.called) - - cg_ref = objects.Group.get_by_id( - self.ctxt.elevated(), res_dict['consistencygroup']['id']) - - cg_ref.destroy() - snapshot.destroy() - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - cgsnapshot.destroy() - - def test_create_consistencygroup_from_src_cg(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - - source_cg = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - - volume_id = utils.create_volume( - self.ctxt, - group_id=source_cg.id)['id'] - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "source_cgid": source_cg.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['consistencygroup']) - self.assertEqual(test_cg_name, res_dict['consistencygroup']['name']) - - cg = objects.Group.get_by_id( - self.ctxt, res_dict['consistencygroup']['id']) - cg.destroy() - db.volume_destroy(self.ctxt.elevated(), volume_id) - source_cg.destroy() - - def test_create_consistencygroup_from_src_both_snap_cg(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - - volume_id = utils.create_volume( - self.ctxt, - group_id=consistencygroup.id)['id'] - cgsnapshot_id = utils.create_group_snapshot( - self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - group_id=consistencygroup.id)['id'] - snapshot = utils.create_snapshot( - self.ctxt, - volume_id, - group_snapshot_id=cgsnapshot_id, - status=fields.SnapshotStatus.AVAILABLE) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "cgsnapshot_id": cgsnapshot_id, - "source_cgid": - consistencygroup.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - snapshot.destroy() - db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id) - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - - def test_create_consistencygroup_from_src_invalid_body(self): - name = 'cg1' - body = {"invalid": {"name": name, - "description": - "Consistency Group 1", }} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - # Missing 'consistencygroup-from-src' in the body. - self.assertIsNotNone(res_dict['badRequest']['message']) - - def test_create_consistencygroup_from_src_no_source_id(self): - name = 'cg1' - body = {"consistencygroup-from-src": {"name": name, - "description": - "Consistency Group 1", }} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - def test_create_consistencygroup_from_src_no_host(self): - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], host=None) - volume_id = utils.create_volume( - self.ctxt, - group_id=consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.ctxt, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - snapshot = utils.create_snapshot( - self.ctxt, - volume_id, - group_snapshot_id=cgsnapshot.id, - status=fields.SnapshotStatus.AVAILABLE) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "cgsnapshot_id": cgsnapshot.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - msg = _('Invalid Group: No host to create group') - self.assertIn(msg, res_dict['badRequest']['message']) - - snapshot.destroy() - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - cgsnapshot.destroy() - - def test_create_consistencygroup_from_src_cgsnapshot_empty(self): - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.ctxt, - group_id=consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.ctxt, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "cgsnapshot_id": cgsnapshot.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - cgsnapshot.destroy() - - def test_create_consistencygroup_from_src_source_cg_empty(self): - source_cg = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "source_cgid": source_cg.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - source_cg.destroy() - - def test_create_consistencygroup_from_src_cgsnapshot_notfound(self): - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.ctxt, - group_id=consistencygroup.id)['id'] - - test_cg_name = 'test cg' - body = { - "consistencygroup-from-src": - { - "name": test_cg_name, - "description": "Consistency Group 1", - "source_cgid": fake.CGSNAPSHOT_ID - } - } - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertIsNotNone(res_dict['itemNotFound']['message']) - - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - - def test_create_consistencygroup_from_src_source_cg_notfound(self): - test_cg_name = 'test cg' - body = { - "consistencygroup-from-src": - { - "name": test_cg_name, - "description": "Consistency Group 1", - "source_cgid": fake.CONSISTENCY_GROUP_ID - } - } - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertIsNotNone(res_dict['itemNotFound']['message']) - - @mock.patch.object(volume_api.API, 'create', - side_effect=exception.CinderException( - 'Create volume failed.')) - def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed( - self, mock_create): - consistencygroup = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.ctxt, - group_id=consistencygroup.id)['id'] - cgsnapshot = utils.create_group_snapshot( - self.ctxt, group_id=consistencygroup.id, - group_type_id=fake.GROUP_TYPE_ID,) - snapshot = utils.create_snapshot( - self.ctxt, - volume_id, - group_snapshot_id=cgsnapshot.id, - status=fields.SnapshotStatus.AVAILABLE) - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "cgsnapshot_id": cgsnapshot.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - msg = _("Create volume failed.") - self.assertEqual(msg, res_dict['badRequest']['message']) - - snapshot.destroy() - db.volume_destroy(self.ctxt.elevated(), volume_id) - consistencygroup.destroy() - cgsnapshot.destroy() - - @mock.patch.object(volume_api.API, 'create', - side_effect=exception.CinderException( - 'Create volume failed.')) - def test_create_consistencygroup_from_src_cg_create_volume_failed( - self, mock_create): - source_cg = utils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.ctxt, - group_id=source_cg.id)['id'] - - test_cg_name = 'test cg' - body = {"consistencygroup-from-src": {"name": test_cg_name, - "description": - "Consistency Group 1", - "source_cgid": source_cg.id}} - req = webob.Request.blank('/v2/%s/consistencygroups/create_from_src' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertIsNotNone(res_dict['badRequest']['message']) - - db.volume_destroy(self.ctxt.elevated(), volume_id) - source_cg.destroy() diff --git a/cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py b/cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py deleted file mode 100644 index 33b52776e..000000000 --- a/cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume - - -UUID1 = fake.SNAPSHOT_ID -UUID2 = fake.SNAPSHOT2_ID - - -def _get_default_snapshot_param(): - return {'id': UUID1, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'project_id': fake.PROJECT_ID, - 'progress': '0%', - 'expected_attrs': ['metadata']} - - -def fake_snapshot_get(self, context, snapshot_id): - param = _get_default_snapshot_param() - return param - - -def fake_snapshot_get_all(self, context, search_opts=None): - param = _get_default_snapshot_param() - return [param] - - -class ExtendedSnapshotAttributesTest(test.TestCase): - content_type = 'application/json' - prefix = 'os-extended-snapshot-attributes:' - - def setUp(self): - super(ExtendedSnapshotAttributesTest, self).setUp() - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - def _make_request(self, url): - req = webob.Request.blank(url) - req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - return res - - def _get_snapshot(self, body): - return jsonutils.loads(body).get('snapshot') - - def _get_snapshots(self, body): - return jsonutils.loads(body).get('snapshots') - - def assertSnapshotAttributes(self, snapshot, project_id, progress): - self.assertEqual(project_id, - snapshot.get('%sproject_id' % self.prefix)) - self.assertEqual(progress, snapshot.get('%sprogress' % self.prefix)) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_show(self, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True) - snapshot = _get_default_snapshot_param() - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - - url = '/v2/%s/snapshots/%s' % (fake.PROJECT_ID, UUID1) - res = self._make_request(url) - - self.assertEqual(http_client.OK, res.status_int) - self.assertSnapshotAttributes(self._get_snapshot(res.body), - project_id=fake.PROJECT_ID, - progress='0%') - - def test_detail(self): - url = '/v2/%s/snapshots/detail' % fake.PROJECT_ID - res = self._make_request(url) - - self.assertEqual(http_client.OK, res.status_int) - for snapshot in self._get_snapshots(res.body): - self.assertSnapshotAttributes(snapshot, - project_id=fake.PROJECT_ID, - progress='0%') diff --git a/cinder/tests/unit/api/contrib/test_hosts.py b/cinder/tests/unit/api/contrib/test_hosts.py deleted file mode 100644 index 042dc43d5..000000000 --- a/cinder/tests/unit/api/contrib/test_hosts.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from iso8601 import iso8601 -from oslo_utils import timeutils -import webob.exc - -from cinder.api.contrib import hosts as os_hosts -from cinder import context -from cinder import exception -from cinder import test - - -created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) -curr_time = datetime.datetime(2013, 7, 3, 0, 0, 1) - -SERVICE_LIST = [ - {'created_at': created_time, 'updated_at': curr_time, - 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, - 'availability_zone': 'cinder'}, - {'created_at': created_time, 'updated_at': curr_time, - 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, - 'availability_zone': 'cinder'}, - {'created_at': created_time, 'updated_at': curr_time, - 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, - 'availability_zone': 'cinder'}, - {'created_at': created_time, 'updated_at': curr_time, - 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, - 'availability_zone': 'cinder'}, - {'created_at': created_time, 'updated_at': None, - 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, - 'availability_zone': 'cinder'}, -] - -LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', - 'zone': 'cinder', 'service-state': 'enabled', - 'host_name': 'test.host.1', 'last-update': curr_time}, - {'service-status': 'available', 'service': 'cinder-volume', - 'zone': 'cinder', 'service-state': 'enabled', - 'host_name': 'test.host.1', 'last-update': curr_time}, - {'service-status': 'available', 'service': 'cinder-volume', - 'zone': 'cinder', 'service-state': 'enabled', - 'host_name': 'test.host.1', 'last-update': curr_time}, - {'service-status': 'available', 'service': 'cinder-volume', - 'zone': 'cinder', 'service-state': 'enabled', - 'host_name': 'test.host.1', 'last-update': curr_time}, - {'service-status': 'unavailable', 'service': 'cinder-volume', - 'zone': 'cinder', 'service-state': 'enabled', - 'host_name': 'test.host.1', 'last-update': None}, - ] - - -def stub_utcnow(with_timezone=False): - tzinfo = iso8601.Utc() if with_timezone else None - return datetime.datetime(2013, 7, 3, 0, 0, 2, tzinfo=tzinfo) - - -class FakeRequest(object): - environ = {'cinder.context': context.get_admin_context()} - GET = {} - - -class FakeRequestWithcinderZone(object): - environ = {'cinder.context': context.get_admin_context()} - GET = {'zone': 'cinder'} - - -class HostTestCase(test.TestCase): - """Test Case for hosts.""" - - def setUp(self): - super(HostTestCase, self).setUp() - self.controller = os_hosts.HostController() - self.req = FakeRequest() - self.patch('cinder.db.service_get_all', autospec=True, - return_value=SERVICE_LIST) - self.mock_object(timeutils, 'utcnow', stub_utcnow) - - def _test_host_update(self, host, key, val, expected_value): - body = {key: val} - result = self.controller.update(self.req, host, body=body) - self.assertEqual(expected_value, result[key]) - - def test_list_hosts(self): - """Verify that the volume hosts are returned.""" - hosts = os_hosts._list_hosts(self.req) - self.assertEqual(LIST_RESPONSE, hosts) - - cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume') - expected = [host for host in LIST_RESPONSE - if host['service'] == 'cinder-volume'] - self.assertEqual(expected, cinder_hosts) - - def test_list_hosts_with_zone(self): - req = FakeRequestWithcinderZone() - hosts = os_hosts._list_hosts(req) - self.assertEqual(LIST_RESPONSE, hosts) - - def test_bad_status_value(self): - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, 'test.host.1', body={'status': 'bad'}) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - self.req, - 'test.host.1', - body={'status': 'disablabc'}) - - def test_bad_update_key(self): - bad_body = {'crazy': 'bad'} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, 'test.host.1', body=bad_body) - - def test_bad_update_key_and_correct_udpate_key(self): - bad_body = {'status': 'disable', 'crazy': 'bad'} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, 'test.host.1', body=bad_body) - - def test_good_udpate_keys(self): - body = {'status': 'disable'} - self.assertRaises(NotImplementedError, self.controller.update, - self.req, 'test.host.1', body=body) - - def test_bad_host(self): - self.assertRaises(exception.HostNotFound, - self.controller.update, - self.req, - 'bogus_host_name', - body={'disabled': 0}) - - def test_show_forbidden(self): - self.req.environ['cinder.context'].is_admin = False - dest = 'dummydest' - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.show, - self.req, dest) - self.req.environ['cinder.context'].is_admin = True - - def test_show_host_not_exist(self): - """A host given as an argument does not exists.""" - self.req.environ['cinder.context'].is_admin = True - dest = 'dummydest' - self.assertRaises(exception.ServiceNotFound, - self.controller.show, - self.req, dest) diff --git a/cinder/tests/unit/api/contrib/test_qos_specs_manage.py b/cinder/tests/unit/api/contrib/test_qos_specs_manage.py deleted file mode 100644 index b4b63e35f..000000000 --- a/cinder/tests/unit/api/contrib/test_qos_specs_manage.py +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright 2013 eBay Inc. -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from six.moves import http_client -import webob - -from cinder.api.contrib import qos_specs_manage -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_notifier - - -def stub_qos_specs(id): - res = dict(name='qos_specs_' + str(id)) - res.update(dict(consumer='back-end')) - res.update(dict(id=str(id))) - specs = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - res.update(dict(specs=specs)) - return objects.QualityOfServiceSpecs(**res) - - -def stub_qos_associates(id): - return [{ - 'association_type': 'volume_type', - 'name': 'FakeVolTypeName', - 'id': fake.VOLUME_TYPE_ID}] - - -def return_qos_specs_get_all(context, filters=None, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - return [ - stub_qos_specs(fake.QOS_SPEC_ID), - stub_qos_specs(fake.QOS_SPEC2_ID), - stub_qos_specs(fake.QOS_SPEC3_ID), - ] - - -def return_qos_specs_get_qos_specs(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - return stub_qos_specs(id) - - -def return_qos_specs_delete(context, id, force): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - elif id == fake.IN_USE_ID: - raise exception.QoSSpecsInUse(specs_id=id) - pass - - -def return_qos_specs_delete_keys(context, id, keys): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - - if 'foo' in keys: - raise exception.QoSSpecsKeyNotFound(specs_id=id, - specs_key='foo') - - -def return_qos_specs_update(context, id, specs): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - elif id == fake.INVALID_ID: - raise exception.InvalidQoSSpecs(reason=id) - elif id == fake.UPDATE_FAILED_ID: - raise exception.QoSSpecsUpdateFailed(specs_id=id, - qos_specs=specs) - pass - - -def return_qos_specs_create(context, name, specs): - if name == 'qos_spec_%s' % fake.ALREADY_EXISTS_ID: - raise exception.QoSSpecsExists(specs_id=name) - elif name == 'qos_spec_%s' % fake.ACTION_FAILED_ID: - raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) - elif name == 'qos_spec_%s' % fake.INVALID_ID: - raise exception.InvalidQoSSpecs(reason=name) - - return objects.QualityOfServiceSpecs(name=name, - specs=specs, - consumer='back-end', - id=fake.QOS_SPEC_ID) - - -def return_get_qos_associations(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - elif id == fake.RAISE_ID: - raise exception.CinderException() - - return stub_qos_associates(id) - - -def return_associate_qos_specs(context, id, type_id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - elif id == fake.ACTION_FAILED_ID: - raise exception.QoSSpecsAssociateFailed(specs_id=id, - type_id=type_id) - elif id == fake.ACTION2_FAILED_ID: - raise exception.QoSSpecsDisassociateFailed(specs_id=id, - type_id=type_id) - - if type_id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.VolumeTypeNotFound( - volume_type_id=type_id) - - pass - - -def return_disassociate_all(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.QoSSpecsNotFound(specs_id=id) - elif id == fake.ACTION2_FAILED_ID: - raise exception.QoSSpecsDisassociateFailed(specs_id=id, - type_id=None) - - -@ddt.ddt -class QoSSpecManageApiTest(test.TestCase): - - def _create_qos_specs(self, name, values=None): - """Create a transfer object.""" - if values: - specs = dict(name=name, qos_specs=values) - else: - specs = {'name': name, - 'consumer': 'back-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2'}} - return db.qos_specs_create(self.ctxt, specs)['id'] - - def setUp(self): - super(QoSSpecManageApiTest, self).setUp() - self.flags(host='fake') - self.controller = qos_specs_manage.QoSSpecsController() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.qos_id1 = self._create_qos_specs("Qos_test_1") - self.qos_id2 = self._create_qos_specs("Qos_test_2") - self.qos_id3 = self._create_qos_specs("Qos_test_3") - self.qos_id4 = self._create_qos_specs("Qos_test_4") - - @mock.patch('cinder.volume.qos_specs.get_all_specs', - side_effect=return_qos_specs_get_all) - def test_index(self, mock_get_all_specs): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(3, len(res['qos_specs'])) - - names = set() - for item in res['qos_specs']: - self.assertEqual('value1', item['specs']['key1']) - names.add(item['name']) - expected_names = ['qos_specs_%s' % fake.QOS_SPEC_ID, - 'qos_specs_%s' % fake.QOS_SPEC2_ID, - 'qos_specs_%s' % fake.QOS_SPEC3_ID] - self.assertEqual(set(expected_names), names) - - def test_index_with_limit(self): - url = '/v2/%s/qos-specs?limit=2' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(2, len(res['qos_specs'])) - self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) - self.assertEqual(self.qos_id3, res['qos_specs'][1]['id']) - - expect_next_link = ('http://localhost/v2/%s/qos-specs?limit' - '=2&marker=%s') % ( - fake.PROJECT_ID, res['qos_specs'][1]['id']) - self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href']) - - def test_index_with_offset(self): - url = '/v2/%s/qos-specs?offset=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(3, len(res['qos_specs'])) - - def test_index_with_offset_out_of_range(self): - url = '/v2/%s/qos-specs?offset=356576877698707' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, - req) - - def test_index_with_limit_and_offset(self): - url = '/v2/%s/qos-specs?limit=2&offset=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(2, len(res['qos_specs'])) - self.assertEqual(self.qos_id3, res['qos_specs'][0]['id']) - self.assertEqual(self.qos_id2, res['qos_specs'][1]['id']) - - def test_index_with_marker(self): - url = '/v2/%s/qos-specs?marker=%s' % (fake.PROJECT_ID, self.qos_id4) - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(3, len(res['qos_specs'])) - - def test_index_with_filter(self): - url = '/v2/%s/qos-specs?id=%s' % (fake.PROJECT_ID, self.qos_id4) - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - - self.assertEqual(1, len(res['qos_specs'])) - self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) - - def test_index_with_sort_keys(self): - url = '/v2/%s/qos-specs?sort=id' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - self.assertEqual(4, len(res['qos_specs'])) - expect_result = [self.qos_id1, self.qos_id2, - self.qos_id3, self.qos_id4] - expect_result.sort(reverse=True) - - self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) - self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) - self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) - self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) - - def test_index_with_sort_keys_and_sort_dirs(self): - url = '/v2/%s/qos-specs?sort=id:asc' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, use_admin_context=True) - res = self.controller.index(req) - self.assertEqual(4, len(res['qos_specs'])) - expect_result = [self.qos_id1, self.qos_id2, - self.qos_id3, self.qos_id4] - expect_result.sort() - - self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) - self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) - self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) - self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.delete', - side_effect=return_qos_specs_delete) - def test_qos_specs_delete(self, mock_qos_delete, mock_qos_get_specs): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.controller.delete(req, fake.QOS_SPEC_ID) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.delete', - side_effect=return_qos_specs_delete) - def test_qos_specs_delete_not_found(self, mock_qos_delete, - mock_qos_get_specs): - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.delete, req, - fake.WILL_NOT_BE_FOUND_ID) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.delete', - side_effect=return_qos_specs_delete) - def test_qos_specs_delete_inuse(self, mock_qos_delete, - mock_qos_get_specs): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( - fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, - req, fake.IN_USE_ID) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.delete', - side_effect=return_qos_specs_delete) - def test_qos_specs_delete_inuse_force(self, mock_qos_delete, - mock_qos_get_specs): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s?force=True' % - (fake.PROJECT_ID, fake.IN_USE_ID), - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.delete, - req, fake.IN_USE_ID) - self.assertEqual(1, notifier.get_notification_count()) - - def test_qos_specs_delete_with_invalid_force(self): - invalid_force = "invalid_bool" - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/delete_keys?force=%s' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID, invalid_force), - use_admin_context=True) - - self.assertRaises(exception.InvalidParameterValue, - self.controller.delete, - req, fake.QOS_SPEC_ID) - - @mock.patch('cinder.volume.qos_specs.delete_keys', - side_effect=return_qos_specs_delete_keys) - def test_qos_specs_delete_keys(self, mock_qos_delete_keys): - body = {"keys": ['bar', 'zoo']} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % - (fake.PROJECT_ID, fake.IN_USE_ID), - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.controller.delete_keys(req, fake.IN_USE_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.delete_keys', - side_effect=return_qos_specs_delete_keys) - def test_qos_specs_delete_keys_qos_notfound(self, mock_qos_specs_delete): - body = {"keys": ['bar', 'zoo']} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.delete_keys, - req, fake.WILL_NOT_BE_FOUND_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.delete_keys', - side_effect=return_qos_specs_delete_keys) - def test_qos_specs_delete_keys_badkey(self, mock_qos_specs_delete): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % - (fake.PROJECT_ID, fake.IN_USE_ID), - use_admin_context=True) - body = {"keys": ['foo', 'zoo']} - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(exception.QoSSpecsKeyNotFound, - self.controller.delete_keys, - req, fake.IN_USE_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.delete_keys', - side_effect=return_qos_specs_delete_keys) - def test_qos_specs_delete_keys_get_notifier(self, mock_qos_delete_keys): - body = {"keys": ['bar', 'zoo']} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/delete_keys' % - (fake.PROJECT_ID, fake.IN_USE_ID), - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier, - autospec=True) as mock_get_notifier: - self.controller.delete_keys(req, fake.IN_USE_ID, body) - mock_get_notifier.assert_called_once_with('QoSSpecs') - - @mock.patch('cinder.volume.qos_specs.create', - side_effect=return_qos_specs_create) - @mock.patch('cinder.utils.validate_dictionary_string_length') - def test_create(self, mock_validate, mock_qos_spec_create): - - body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, - "key1": "value1"}} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % - fake.PROJECT_ID, - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - res_dict = self.controller.create(req, body) - - self.assertEqual(1, notifier.get_notification_count()) - self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, - res_dict['qos_specs']['name']) - self.assertTrue(mock_validate.called) - - @mock.patch('cinder.volume.qos_specs.create', - side_effect=return_qos_specs_create) - def test_create_invalid_input(self, mock_qos_get_specs): - body = {"qos_specs": {"name": 'qos_spec_%s' % fake.INVALID_ID, - "consumer": "invalid_consumer"}} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.create', - side_effect=return_qos_specs_create) - def test_create_conflict(self, mock_qos_spec_create): - body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ALREADY_EXISTS_ID, - "key1": "value1"}} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPConflict, - self.controller.create, req, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.create', - side_effect=return_qos_specs_create) - def test_create_failed(self, mock_qos_spec_create): - body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ACTION_FAILED_ID, - "key1": "value1"}} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.create, req, body) - self.assertEqual(1, notifier.get_notification_count()) - - @ddt.data({'foo': {'a': 'b'}}, - {'qos_specs': {'a': 'b'}}, - {'qos_specs': 'string'}, - None) - def test_create_invalid_body_bad_request(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - req.method = 'POST' - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - @ddt.data({'name': 'fake_name', 'a' * 256: 'a'}, - {'name': 'fake_name', 'a': 'a' * 256}, - {'name': 'fake_name', '': 'a'}) - def test_create_qos_with_invalid_specs(self, value): - body = {'qos_specs': value} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - req.method = 'POST' - self.assertRaises(exception.InvalidInput, - self.controller.create, req, body) - - @ddt.data({'name': None}, - {'name': 'n' * 256}, - {'name': ''}, - {'name': ' '}) - def test_create_qos_with_invalid_spec_name(self, value): - body = {'qos_specs': value} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % fake.PROJECT_ID, - use_admin_context=True) - req.method = 'POST' - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - @mock.patch('cinder.volume.qos_specs.update', - side_effect=return_qos_specs_update) - def test_update(self, mock_qos_update): - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID), - use_admin_context=True) - body = {'qos_specs': {'key1': 'value1', - 'key2': 'value2'}} - res = self.controller.update(req, fake.QOS_SPEC_ID, body) - self.assertDictEqual(body, res) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.update', - side_effect=return_qos_specs_update) - def test_update_not_found(self, mock_qos_update): - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - body = {'qos_specs': {'key1': 'value1', - 'key2': 'value2'}} - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.update, - req, fake.WILL_NOT_BE_FOUND_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.update', - side_effect=return_qos_specs_update) - def test_update_invalid_input(self, mock_qos_update): - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, fake.INVALID_ID), - use_admin_context=True) - body = {'qos_specs': {'key1': 'value1', - 'key2': 'value2'}} - self.assertRaises(exception.InvalidQoSSpecs, - self.controller.update, - req, fake.INVALID_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.update', - side_effect=return_qos_specs_update) - def test_update_failed(self, mock_qos_update): - notifier = fake_notifier.get_fake_notifier() - with mock.patch('cinder.rpc.get_notifier', return_value=notifier): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, - fake.UPDATE_FAILED_ID), - use_admin_context=True) - body = {'qos_specs': {'key1': 'value1', - 'key2': 'value2'}} - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.update, - req, fake.UPDATE_FAILED_ID, body) - self.assertEqual(1, notifier.get_notification_count()) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - def test_show(self, mock_get_qos_specs): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) - res_dict = self.controller.show(req, fake.QOS_SPEC_ID) - - self.assertEqual(fake.QOS_SPEC_ID, res_dict['qos_specs']['id']) - self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, - res_dict['qos_specs']['name']) - - @mock.patch('cinder.volume.qos_specs.get_associations', - side_effect=return_get_qos_associations) - def test_get_associations(self, mock_get_assciations): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associations' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) - res = self.controller.associations(req, fake.QOS_SPEC_ID) - - self.assertEqual('FakeVolTypeName', - res['qos_associations'][0]['name']) - self.assertEqual(fake.VOLUME_TYPE_ID, - res['qos_associations'][0]['id']) - - @mock.patch('cinder.volume.qos_specs.get_associations', - side_effect=return_get_qos_associations) - def test_get_associations_not_found(self, mock_get_assciations): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associations' % - (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.associations, - req, fake.WILL_NOT_BE_FOUND_ID) - - @mock.patch('cinder.volume.qos_specs.get_associations', - side_effect=return_get_qos_associations) - def test_get_associations_failed(self, mock_get_associations): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associations' % ( - fake.PROJECT_ID, fake.RAISE_ID), use_admin_context=True) - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.associations, - req, fake.RAISE_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', - side_effect=return_associate_qos_specs) - def test_associate(self, mock_associate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID), - use_admin_context=True) - res = self.controller.associate(req, fake.QOS_SPEC_ID) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', - side_effect=return_associate_qos_specs) - def test_associate_no_type(self, mock_associate, mock_get_qos): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s/associate' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID), - use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.associate, req, fake.QOS_SPEC_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', - side_effect=return_associate_qos_specs) - def test_associate_not_found(self, mock_associate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, - fake.VOLUME_TYPE_ID), use_admin_context=True) - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.associate, req, - fake.WILL_NOT_BE_FOUND_ID) - - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - - self.assertRaises(exception.VolumeTypeNotFound, - self.controller.associate, req, fake.QOS_SPEC_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', - side_effect=return_associate_qos_specs) - def test_associate_fail(self, mock_associate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/associate?vol_type_id=%s' % - (fake.PROJECT_ID, fake.ACTION_FAILED_ID, fake.VOLUME_TYPE_ID), - use_admin_context=True) - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.associate, req, - fake.ACTION_FAILED_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', - side_effect=return_associate_qos_specs) - def test_disassociate(self, mock_disassociate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID), - use_admin_context=True) - res = self.controller.disassociate(req, fake.QOS_SPEC_ID) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', - side_effect=return_associate_qos_specs) - def test_disassociate_no_type(self, mock_disassociate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.disassociate, req, fake.QOS_SPEC_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', - side_effect=return_associate_qos_specs) - def test_disassociate_not_found(self, mock_disassociate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, - fake.VOLUME_TYPE_ID), use_admin_context=True) - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.disassociate, req, - fake.WILL_NOT_BE_FOUND_ID) - - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % - (fake.PROJECT_ID, fake.VOLUME_TYPE_ID, fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - self.assertRaises(exception.VolumeTypeNotFound, - self.controller.disassociate, req, - fake.VOLUME_TYPE_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', - side_effect=return_associate_qos_specs) - def test_disassociate_failed(self, mock_disassociate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( - fake.PROJECT_ID, fake.ACTION2_FAILED_ID, fake.VOLUME_TYPE_ID), - use_admin_context=True) - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.disassociate, req, - fake.ACTION2_FAILED_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_all', - side_effect=return_disassociate_all) - def test_disassociate_all(self, mock_disassociate, mock_get_qos): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate_all' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) - res = self.controller.disassociate_all(req, fake.QOS_SPEC_ID) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_all', - side_effect=return_disassociate_all) - def test_disassociate_all_not_found(self, mock_disassociate, mock_get): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate_all' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), - use_admin_context=True) - self.assertRaises(exception.QoSSpecsNotFound, - self.controller.disassociate_all, req, - fake.WILL_NOT_BE_FOUND_ID) - - @mock.patch('cinder.volume.qos_specs.get_qos_specs', - side_effect=return_qos_specs_get_qos_specs) - @mock.patch('cinder.volume.qos_specs.disassociate_all', - side_effect=return_disassociate_all) - def test_disassociate_all_failed(self, mock_disassociate, mock_get): - req = fakes.HTTPRequest.blank( - '/v2/%s/qos-specs/%s/disassociate_all' % ( - fake.PROJECT_ID, fake.ACTION2_FAILED_ID), - use_admin_context=True) - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller.disassociate_all, req, - fake.ACTION2_FAILED_ID) - - def test_index_no_admin_user(self): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % - fake.PROJECT_ID, use_admin_context=False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.index, req) - - def test_create_no_admin_user(self): - body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, - "key1": "value1"}} - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs' % - fake.PROJECT_ID, use_admin_context=False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.create, req, body) - - def test_update_no_admin_user(self): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % - (fake.PROJECT_ID, fake.QOS_SPEC_ID), - use_admin_context=False) - body = {'qos_specs': {'key1': 'value1', - 'key2': 'value2'}} - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.update, req, fake.QOS_SPEC_ID, body) - - def test_qos_specs_delete_no_admin_user(self): - req = fakes.HTTPRequest.blank('/v2/%s/qos-specs/%s' % ( - fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.delete, req, fake.QOS_SPEC_ID) diff --git a/cinder/tests/unit/api/contrib/test_quotas.py b/cinder/tests/unit/api/contrib/test_quotas.py deleted file mode 100644 index fcb96c753..000000000 --- a/cinder/tests/unit/api/contrib/test_quotas.py +++ /dev/null @@ -1,1083 +0,0 @@ -# -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for cinder.api.contrib.quotas.py -""" - - -import mock - -import uuid -import webob.exc - -from cinder.api.contrib import quotas -from cinder import context -from cinder import db -from cinder import exception -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import test_db_api - - -from oslo_config import cfg -from oslo_config import fixture as config_fixture - - -CONF = cfg.CONF - - -def make_body(root=True, gigabytes=1000, snapshots=10, - volumes=10, backups=10, backup_gigabytes=1000, - tenant_id=fake.PROJECT_ID, per_volume_gigabytes=-1, groups=10): - resources = {'gigabytes': gigabytes, - 'snapshots': snapshots, - 'volumes': volumes, - 'backups': backups, - 'backup_gigabytes': backup_gigabytes, - 'per_volume_gigabytes': per_volume_gigabytes, - 'groups': groups} - # need to consider preexisting volume types as well - volume_types = db.volume_type_get_all(context.get_admin_context()) - - for volume_type in volume_types: - resources['gigabytes_' + volume_type] = -1 - resources['snapshots_' + volume_type] = -1 - resources['volumes_' + volume_type] = -1 - - if tenant_id: - resources['id'] = tenant_id - if root: - result = {'quota_set': resources} - else: - result = resources - return result - - -def make_subproject_body(root=True, gigabytes=0, snapshots=0, - volumes=0, backups=0, backup_gigabytes=0, - tenant_id=fake.PROJECT_ID, per_volume_gigabytes=0): - return make_body(root=root, gigabytes=gigabytes, snapshots=snapshots, - volumes=volumes, backups=backups, - backup_gigabytes=backup_gigabytes, tenant_id=tenant_id, - per_volume_gigabytes=per_volume_gigabytes) - - -class QuotaSetsControllerTestBase(test.TestCase): - - class FakeProject(object): - - def __init__(self, id=fake.PROJECT_ID, parent_id=None, - is_admin_project=False): - self.id = id - self.parent_id = parent_id - self.subtree = None - self.parents = None - self.is_admin_project = is_admin_project - - def setUp(self): - super(QuotaSetsControllerTestBase, self).setUp() - - self.controller = quotas.QuotaSetsController() - - self.req = mock.Mock() - self.req.environ = {'cinder.context': context.get_admin_context()} - self.req.environ['cinder.context'].is_admin = True - self.req.params = {} - - self._create_project_hierarchy() - self.req.environ['cinder.context'].project_id = self.A.id - - get_patcher = mock.patch('cinder.quota_utils.get_project_hierarchy', - self._get_project) - get_patcher.start() - self.addCleanup(get_patcher.stop) - - def _list_projects(context): - return self.project_by_id.values() - - list_patcher = mock.patch('cinder.quota_utils.get_all_projects', - _list_projects) - list_patcher.start() - self.addCleanup(list_patcher.stop) - - self.auth_url = 'http://localhost:5000' - self.fixture = self.useFixture(config_fixture.Config(CONF)) - self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken') - - def _create_project_hierarchy(self): - r"""Sets an environment used for nested quotas tests. - - Create a project hierarchy such as follows: - +-----------+ - | | - | A | - | / \ | - | B C | - | / | - | D | - +-----------+ - """ - self.A = self.FakeProject(id=uuid.uuid4().hex, parent_id=None) - self.B = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.A.id) - self.C = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.A.id) - self.D = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.B.id) - - # update projects subtrees - self.B.subtree = {self.D.id: self.D.subtree} - self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree} - - self.A.parents = None - self.B.parents = {self.A.id: None} - self.C.parents = {self.A.id: None} - self.D.parents = {self.B.id: self.B.parents} - - # project_by_id attribute is used to recover a project based on its id. - self.project_by_id = {self.A.id: self.A, self.B.id: self.B, - self.C.id: self.C, self.D.id: self.D} - - def _get_project(self, context, id, subtree_as_ids=False, - parents_as_ids=False, is_admin_project=False): - return self.project_by_id.get(id, self.FakeProject()) - - def _create_fake_quota_usages(self, usage_map): - self._fake_quota_usages = {} - for key, val in usage_map.items(): - self._fake_quota_usages[key] = {'in_use': val} - - def _fake_quota_usage_get_all_by_project(self, context, project_id): - return {'volumes': self._fake_quota_usages[project_id]} - - -class QuotaSetsControllerTest(QuotaSetsControllerTestBase): - def test_defaults(self): - result = self.controller.defaults(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result) - - def test_show(self): - result = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result) - - def test_show_not_authorized(self): - self.req.environ['cinder.context'].is_admin = False - self.req.environ['cinder.context'].user_id = fake.USER_ID - self.req.environ['cinder.context'].project_id = fake.PROJECT_ID - self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, - self.req, fake.PROJECT2_ID) - - def test_show_non_admin_user(self): - self.controller._get_quotas = mock.Mock(side_effect= - self.controller._get_quotas) - result = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result) - self.controller._get_quotas.assert_called_with( - self.req.environ['cinder.context'], fake.PROJECT_ID, False) - - def test_show_with_invalid_usage_param(self): - self.req.params = {'usage': 'InvalidBool'} - self.assertRaises(exception.InvalidParameterValue, - self.controller.show, - self.req, fake.PROJECT2_ID) - - def test_show_with_valid_usage_param(self): - self.req.params = {'usage': 'false'} - result = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result) - - def test_update(self): - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(body, result) - - body = make_body(gigabytes=db.MAX_INT, tenant_id=None) - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(body, result) - - def test_update_subproject_not_in_hierarchy_non_nested(self): - # When not using nested quotas, the hierarchy should not be considered - # for an update - E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None) - F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id) - E.subtree = {F.id: F.subtree} - self.project_by_id[E.id] = E - self.project_by_id[F.id] = F - - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Try to update the quota of F, it will be allowed even though - # project E doesn't belong to the project hierarchy of A, because - # we are NOT using the nested quota driver - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - self.controller.update(self.req, F.id, body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_string_length') - @mock.patch( - 'cinder.utils.validate_integer') - def test_update_limit(self, mock_validate_integer, mock_validate): - mock_validate_integer.return_value = 10 - - body = {'quota_set': {'volumes': 10}} - result = self.controller.update(self.req, fake.PROJECT_ID, body) - - self.assertEqual(10, result['quota_set']['volumes']) - self.assertTrue(mock_validate.called) - self.assertTrue(mock_validate_integer.called) - - def test_update_wrong_key(self): - body = {'quota_set': {'bad': 'bad'}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_invalid_value_key_value(self): - body = {'quota_set': {'gigabytes': "should_be_int"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_invalid_type_key_value(self): - body = {'quota_set': {'gigabytes': None}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_multi_value_with_bad_data(self): - orig_quota = self.controller.show(self.req, fake.PROJECT_ID) - body = make_body(gigabytes=2000, snapshots=15, volumes="should_be_int", - backups=5, tenant_id=None) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - # Verify that quota values are not updated in db - new_quota = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(orig_quota, new_quota) - - def test_update_bad_quota_limit(self): - body = {'quota_set': {'gigabytes': -1000}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - body = {'quota_set': {'gigabytes': db.MAX_INT + 1}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_no_admin(self): - self.req.environ['cinder.context'].is_admin = False - self.req.environ['cinder.context'].project_id = fake.PROJECT_ID - self.req.environ['cinder.context'].user_id = 'foo_user' - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.update, self.req, fake.PROJECT_ID, - make_body(tenant_id=None)) - - def test_update_without_quota_set_field(self): - body = {'fake_quota_set': {'gigabytes': 100}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_empty_body(self): - body = {} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def _commit_quota_reservation(self): - # Create simple quota and quota usage. - ctxt = context.get_admin_context() - res = test_db_api._quota_reserve(ctxt, fake.PROJECT_ID) - db.reservation_commit(ctxt, res, fake.PROJECT_ID) - expected = {'project_id': fake.PROJECT_ID, - 'volumes': {'reserved': 0, 'in_use': 1}, - 'gigabytes': {'reserved': 0, 'in_use': 2}, - } - self.assertEqual(expected, - db.quota_usage_get_all_by_project(ctxt, - fake.PROJECT_ID)) - - def test_update_lower_than_existing_resources_when_skip_false(self): - self._commit_quota_reservation() - body = {'quota_set': {'volumes': 0}, - 'skip_validation': 'false'} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - # Ensure that validation works even if some resources are valid - body = {'quota_set': {'gigabytes': 1, 'volumes': 10}, - 'skip_validation': 'false'} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_lower_than_existing_resources_when_skip_true(self): - self._commit_quota_reservation() - body = {'quota_set': {'volumes': 0}, - 'skip_validation': 'true'} - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertEqual(body['quota_set']['volumes'], - result['quota_set']['volumes']) - - def test_update_lower_than_existing_resources_without_skip_argument(self): - self._commit_quota_reservation() - body = {'quota_set': {'volumes': 0}} - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertEqual(body['quota_set']['volumes'], - result['quota_set']['volumes']) - - def test_delete(self): - result_show = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result_show) - - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, - backup_gigabytes=1000, tenant_id=None) - result_update = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(body, result_update) - - self.controller.delete(self.req, fake.PROJECT_ID) - - result_show_after = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(result_show, result_show_after) - - def test_delete_with_allocated_quota_different_from_zero(self): - self.req.environ['cinder.context'].project_id = self.A.id - - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, - backup_gigabytes=1000, tenant_id=None) - result_update = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result_update) - - # Set usage param to True in order to see get allocated values. - self.req.params = {'usage': 'True'} - result_show = self.controller.show(self.req, self.A.id) - - result_update = self.controller.update(self.req, self.B.id, body) - self.assertDictEqual(body, result_update) - - self.controller.delete(self.req, self.B.id) - - result_show_after = self.controller.show(self.req, self.A.id) - self.assertDictEqual(result_show, result_show_after) - - def test_delete_no_admin(self): - self.req.environ['cinder.context'].is_admin = False - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.delete, self.req, fake.PROJECT_ID) - - def test_subproject_show_not_using_nested_quotas(self): - # Current roles say for non-nested quotas, an admin should be able to - # see anyones quota - self.req.environ['cinder.context'].project_id = self.B.id - self.controller.show(self.req, self.C.id) - self.controller.show(self.req, self.A.id) - - -class QuotaSetControllerValidateNestedQuotaSetup(QuotaSetsControllerTestBase): - """Validates the setup before using NestedQuota driver. - - Test case validates flipping on NestedQuota driver after using the - non-nested quota driver for some time. - """ - - def _create_project_hierarchy(self): - r"""Sets an environment used for nested quotas tests. - - Create a project hierarchy such as follows: - +-----------------+ - | | - | A G E | - | / \ \ | - | B C F | - | / | - | D | - +-----------------+ - """ - super(QuotaSetControllerValidateNestedQuotaSetup, - self)._create_project_hierarchy() - # Project A, B, C, D are already defined by parent test class - self.E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None) - self.F = self.FakeProject(id=uuid.uuid4().hex, parent_id=self.E.id) - self.G = self.FakeProject(id=uuid.uuid4().hex, parent_id=None) - - self.E.subtree = {self.F.id: self.F.subtree} - - self.project_by_id.update({self.E.id: self.E, self.F.id: self.F, - self.G.id: self.G}) - - def test_validate_nested_quotas_no_in_use_vols(self): - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - quota = {'volumes': 5} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - - quota['volumes'] = 3 - self.controller.update(self.req, self.B.id, body) - # Allocated value for quota A is borked, because update was done - # without nested quota driver - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - # Fix the allocated values in DB - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use( - self.req) - - self.req.params['fix_allocated_quotas'] = False - # Ensure that we've properly fixed the allocated quotas - self.controller.validate_setup_for_nested_quota_use(self.req) - - # Over-allocate the quotas between children - self.controller.update(self.req, self.C.id, body) - - # This is we should fail because the child limits are too big - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - quota['volumes'] = 1 - self.controller.update(self.req, self.C.id, body) - - # Make sure we're validating all hierarchy trees - self.req.environ['cinder.context'].project_id = self.E.id - quota['volumes'] = 1 - self.controller.update(self.req, self.E.id, body) - quota['volumes'] = 3 - self.controller.update(self.req, self.F.id, body) - - self.assertRaises( - webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - # Put quotas in a good state - quota['volumes'] = 1 - self.controller.update(self.req, self.F.id, body) - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use(self.req) - - @mock.patch('cinder.db.quota_usage_get_all_by_project') - def test_validate_nested_quotas_in_use_vols(self, mock_usage): - self._create_fake_quota_usages( - {self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3, - self.E.id: 0, self.F.id: 0, self.G.id: 0}) - mock_usage.side_effect = self._fake_quota_usage_get_all_by_project - - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - quota_limit = {'volumes': 7} - body = {'quota_set': quota_limit} - self.controller.update(self.req, self.A.id, body) - - quota_limit['volumes'] = 3 - self.controller.update(self.req, self.B.id, body) - - quota_limit['volumes'] = 3 - self.controller.update(self.req, self.C.id, body) - - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use(self.req) - - quota_limit['volumes'] = 6 - self.controller.update(self.req, self.A.id, body) - - # Should fail because the one in_use volume of 'A' - self.assertRaises( - webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - @mock.patch('cinder.db.quota_usage_get_all_by_project') - def test_validate_nested_quotas_quota_borked(self, mock_usage): - self._create_fake_quota_usages( - {self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3, - self.E.id: 0, self.F.id: 0, self.G.id: 0}) - mock_usage.side_effect = self._fake_quota_usage_get_all_by_project - - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - quota_limit = {'volumes': 7} - body = {'quota_set': quota_limit} - self.controller.update(self.req, self.A.id, body) - - # Other quotas would default to 0 but already have some limit being - # used - self.assertRaises( - webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - @mock.patch('cinder.db.quota_usage_get_all_by_project') - def test_validate_nested_quota_negative_limits(self, mock_usage): - # TODO(mc_nair): this test case can be moved to Tempest once nested - # quota coverage added - self._create_fake_quota_usages( - {self.A.id: 1, self.B.id: 3, self.C.id: 0, self.D.id: 2, - self.E.id: 2, self.F.id: 0, self.G.id: 0}) - mock_usage.side_effect = self._fake_quota_usage_get_all_by_project - - # Setting E-F as children of D for this test case to flex the muscles - # of more complex nesting - self.D.subtree = {self.E.id: self.E.subtree} - self.E.parent_id = self.D.id - # Get B's subtree up to date with this change - self.B.subtree[self.D.id] = self.D.subtree - - # Quota hierarchy now is - # / B - D - E - F - # A - # \ C - # - # G - - self.req.environ['cinder.context'].project_id = self.A.id - quota_limit = {'volumes': 10} - body = {'quota_set': quota_limit} - self.controller.update(self.req, self.A.id, body) - - quota_limit['volumes'] = 1 - self.controller.update(self.req, self.C.id, body) - - quota_limit['volumes'] = -1 - self.controller.update(self.req, self.B.id, body) - self.controller.update(self.req, self.D.id, body) - self.controller.update(self.req, self.F.id, body) - quota_limit['volumes'] = 5 - self.controller.update(self.req, self.E.id, body) - - # Should fail because too much is allocated to children for A - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_setup_for_nested_quota_use, - self.req) - - # When root has -1 limit, children can allocate as much as they want - quota_limit['volumes'] = -1 - self.controller.update(self.req, self.A.id, body) - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use(self.req) - - # Not unlimited, but make children's allocated within bounds - quota_limit['volumes'] = 10 - self.controller.update(self.req, self.A.id, body) - quota_limit['volumes'] = 3 - self.controller.update(self.req, self.E.id, body) - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use(self.req) - self.req.params['fix_allocated_quotas'] = False - self.controller.validate_setup_for_nested_quota_use(self.req) - - -class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase): - def setUp(self): - super(QuotaSetsControllerNestedQuotasTest, self).setUp() - driver = quota.NestedDbQuotaDriver() - patcher = mock.patch('cinder.quota.VolumeTypeQuotaEngine._driver', - driver) - patcher.start() - self.addCleanup(patcher.stop) - - def test_subproject_defaults(self): - context = self.req.environ['cinder.context'] - context.project_id = self.B.id - result = self.controller.defaults(self.req, self.B.id) - expected = make_subproject_body(tenant_id=self.B.id) - self.assertDictEqual(expected, result) - - def test_subproject_show(self): - self.req.environ['cinder.context'].project_id = self.A.id - result = self.controller.show(self.req, self.B.id) - expected = make_subproject_body(tenant_id=self.B.id) - self.assertDictEqual(expected, result) - - def test_subproject_show_in_hierarchy(self): - # A user scoped to a root project in a hierarchy can see its children - # quotas. - self.req.environ['cinder.context'].project_id = self.A.id - result = self.controller.show(self.req, self.D.id) - expected = make_subproject_body(tenant_id=self.D.id) - self.assertDictEqual(expected, result) - # A user scoped to a parent project can see its immediate children - # quotas. - self.req.environ['cinder.context'].project_id = self.B.id - result = self.controller.show(self.req, self.D.id) - expected = make_subproject_body(tenant_id=self.D.id) - self.assertDictEqual(expected, result) - - def test_subproject_show_not_in_hierarchy_admin_context(self): - E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None, - is_admin_project=True) - self.project_by_id[E.id] = E - self.req.environ['cinder.context'].project_id = E.id - result = self.controller.show(self.req, self.B.id) - expected = make_subproject_body(tenant_id=self.B.id) - self.assertDictEqual(expected, result) - - def test_subproject_show_target_project_equals_to_context_project( - self): - self.req.environ['cinder.context'].project_id = self.B.id - result = self.controller.show(self.req, self.B.id) - expected = make_subproject_body(tenant_id=self.B.id) - self.assertDictEqual(expected, result) - - def test_subproject_show_not_authorized(self): - self.req.environ['cinder.context'].project_id = self.B.id - self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, - self.req, self.C.id) - self.req.environ['cinder.context'].project_id = self.B.id - self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, - self.req, self.A.id) - - def test_update_subproject_not_in_hierarchy(self): - # Create another project hierarchy - E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None) - F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id) - E.subtree = {F.id: F.subtree} - self.project_by_id[E.id] = E - self.project_by_id[F.id] = F - - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Try to update the quota of F, it will not be allowed, since the - # project E doesn't belongs to the project hierarchy of A. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, self.req, F.id, body) - - def test_update_subproject_not_in_hierarchy_admin_context(self): - E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None, - is_admin_project=True) - self.project_by_id[E.id] = E - self.req.environ['cinder.context'].project_id = E.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - # Update the project A quota, not in the project hierarchy - # of E but it will be allowed because E is the cloud admin. - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Update the quota of B to be equal to its parent A. - result = self.controller.update(self.req, self.B.id, body) - self.assertDictEqual(body, result) - # Remove the admin role from project E - E.is_admin_project = False - # Now updating the quota of B will fail, because it is not - # a member of E's hierarchy and E is no longer a cloud admin. - self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, self.req, self.B.id, body) - - def test_update_subproject(self): - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Update the quota of B to be equal to its parent quota - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.B.id, body) - self.assertDictEqual(body, result) - # Try to update the quota of C, it will not be allowed, since the - # project A doesn't have free quota available. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, self.C.id, body) - # Successfully update the quota of D. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=1000, snapshots=7, - volumes=3, backups=3, tenant_id=None) - result = self.controller.update(self.req, self.D.id, body) - self.assertDictEqual(body, result) - # An admin of B can also update the quota of D, since D is its - # immediate child. - self.req.environ['cinder.context'].project_id = self.B.id - body = make_body(gigabytes=1500, snapshots=10, - volumes=4, backups=4, tenant_id=None) - self.controller.update(self.req, self.D.id, body) - - def test_update_subproject_repetitive(self): - # Update the project A volumes quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=10, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Update the quota of B to be equal to its parent quota - # three times should be successful, the quota will not be - # allocated to 'allocated' value of parent project - for i in range(0, 3): - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=10, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.B.id, body) - self.assertDictEqual(body, result) - - def test_update_subproject_with_not_root_context_project(self): - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result) - # Try to update the quota of B, it will not be allowed, since the - # project in the context (B) is not a root project. - self.req.environ['cinder.context'].project_id = self.B.id - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, backups=5, tenant_id=None) - self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, - self.req, self.B.id, body) - - def test_update_subproject_quota_when_parent_has_default_quotas(self): - # Since the quotas of the project A were not updated, it will have - # default quotas. - self.req.environ['cinder.context'].project_id = self.A.id - # Update the project B quota. - expected = make_body(gigabytes=1000, snapshots=10, - volumes=5, backups=5, tenant_id=None) - result = self.controller.update(self.req, self.B.id, expected) - self.assertDictEqual(expected, result) - - def _assert_quota_show(self, proj_id, resource, in_use=0, reserved=0, - allocated=0, limit=0): - self.req.params = {'usage': 'True'} - show_res = self.controller.show(self.req, proj_id) - expected = {'in_use': in_use, 'reserved': reserved, - 'allocated': allocated, 'limit': limit} - self.assertEqual(expected, show_res['quota_set'][resource]) - - def test_project_allocated_considered_on_reserve(self): - def _reserve(project_id): - quotas.QUOTAS._driver.reserve( - self.req.environ['cinder.context'], quotas.QUOTAS.resources, - {'volumes': 1}, project_id=project_id) - - # A's quota will default to 10 for volumes - quota = {'volumes': 5} - body = {'quota_set': quota} - self.controller.update(self.req, self.B.id, body) - self._assert_quota_show(self.A.id, 'volumes', allocated=5, limit=10) - quota['volumes'] = 3 - self.controller.update(self.req, self.C.id, body) - self._assert_quota_show(self.A.id, 'volumes', allocated=8, limit=10) - _reserve(self.A.id) - _reserve(self.A.id) - self.assertRaises(exception.OverQuota, _reserve, self.A.id) - - def test_update_parent_project_lower_than_child(self): - # A's quota will be default of 10 - quota = {'volumes': 10} - body = {'quota_set': quota} - self.controller.update(self.req, self.B.id, body) - quota['volumes'] = 9 - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, self.req, self.A.id, body) - - def test_project_delete_with_default_quota_less_than_in_use(self): - quota = {'volumes': 11} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - quotas.QUOTAS._driver.reserve( - self.req.environ['cinder.context'], quotas.QUOTAS.resources, - quota, project_id=self.A.id) - # Should not be able to delete if it will cause the used values to go - # over quota when nested quotas are used - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete, - self.req, - self.A.id) - - def test_subproject_delete_with_default_quota_less_than_in_use(self): - quota = {'volumes': 1} - body = {'quota_set': quota} - self.controller.update(self.req, self.B.id, body) - quotas.QUOTAS._driver.reserve( - self.req.environ['cinder.context'], quotas.QUOTAS.resources, - quota, project_id=self.B.id) - - # Should not be able to delete if it will cause the used values to go - # over quota when nested quotas are used - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete, - self.req, - self.B.id) - - def test_subproject_delete(self): - self.req.environ['cinder.context'].project_id = self.A.id - - body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5, - backup_gigabytes=1000, tenant_id=None) - result_update = self.controller.update(self.req, self.A.id, body) - self.assertDictEqual(body, result_update) - - # Set usage param to True in order to see get allocated values. - self.req.params = {'usage': 'True'} - result_show = self.controller.show(self.req, self.A.id) - - result_update = self.controller.update(self.req, self.B.id, body) - self.assertDictEqual(body, result_update) - - self.controller.delete(self.req, self.B.id) - - result_show_after = self.controller.show(self.req, self.A.id) - self.assertDictEqual(result_show, result_show_after) - - def test_subproject_delete_not_considering_default_quotas(self): - """Test delete subprojects' quotas won't consider default quotas. - - Test plan: - - Update the volume quotas of project A - - Update the volume quotas of project B - - Delete the quotas of project B - - Resources with default quotas aren't expected to be considered when - updating the allocated values of the parent project. Thus, the delete - operation should succeed. - """ - self.req.environ['cinder.context'].project_id = self.A.id - - body = {'quota_set': {'volumes': 5}} - result = self.controller.update(self.req, self.A.id, body) - self.assertEqual(body['quota_set']['volumes'], - result['quota_set']['volumes']) - - body = {'quota_set': {'volumes': 2}} - result = self.controller.update(self.req, self.B.id, body) - self.assertEqual(body['quota_set']['volumes'], - result['quota_set']['volumes']) - - self.controller.delete(self.req, self.B.id) - - def test_subproject_delete_with_child_present(self): - # Update the project A quota. - self.req.environ['cinder.context'].project_id = self.A.id - body = make_body(volumes=5) - self.controller.update(self.req, self.A.id, body) - - # Allocate some of that quota to a child project - body = make_body(volumes=3) - self.controller.update(self.req, self.B.id, body) - - # Deleting 'A' should be disallowed since 'B' is using some of that - # quota - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, - self.req, self.A.id) - - def test_subproject_delete_with_child_updates_parent_allocated(self): - quota = {'volumes': 5} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - - # Allocate some of that quota to a child project using hard limit - quota['volumes'] = -1 - self.controller.update(self.req, self.B.id, body) - quota['volumes'] = 2 - self.controller.update(self.req, self.D.id, body) - - res = 'volumes' - self._assert_quota_show(self.A.id, res, allocated=2, limit=5) - self._assert_quota_show(self.B.id, res, allocated=2, limit=-1) - self.controller.delete(self.req, self.D.id) - self._assert_quota_show(self.A.id, res, allocated=0, limit=5) - self._assert_quota_show(self.B.id, res, allocated=0, limit=-1) - - def test_negative_child_limit_not_affecting_parents_free_quota(self): - quota = {'volumes': -1} - body = {'quota_set': quota} - self.controller.update(self.req, self.C.id, body) - self.controller.update(self.req, self.B.id, body) - - # Shouldn't be able to set greater than parent - quota['volumes'] = 11 - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, self.B.id, body) - - def test_child_neg_limit_set_grandkid_zero_limit(self): - cur_quota_a = self.controller.show(self.req, self.A.id) - self.assertEqual(10, cur_quota_a['quota_set']['volumes']) - - quota = {'volumes': -1} - body = {'quota_set': quota} - self.controller.update(self.req, self.B.id, body) - - cur_quota_d = self.controller.show(self.req, self.D.id) - # Default child value is 0 - self.assertEqual(0, cur_quota_d['quota_set']['volumes']) - # Should be able to set D explicitly to 0 since that's already the val - quota['volumes'] = 0 - self.controller.update(self.req, self.D.id, body) - - def test_grandkid_negative_one_limit_enforced(self): - quota = {'volumes': 2, 'gigabytes': 2} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - - quota['volumes'] = -1 - quota['gigabytes'] = -1 - self.controller.update(self.req, self.B.id, body) - self.controller.update(self.req, self.C.id, body) - self.controller.update(self.req, self.D.id, body) - - def _reserve(project_id): - quotas.QUOTAS._driver.reserve( - self.req.environ['cinder.context'], quotas.QUOTAS.resources, - {'volumes': 1, 'gigabytes': 1}, project_id=project_id) - - _reserve(self.C.id) - _reserve(self.D.id) - self.assertRaises(exception.OverQuota, _reserve, self.B.id) - self.assertRaises(exception.OverQuota, _reserve, self.C.id) - self.assertRaises(exception.OverQuota, _reserve, self.D.id) - - # Make sure the rollbacks went successfully for allocated for all res - for res in quota.keys(): - self._assert_quota_show(self.A.id, res, allocated=2, limit=2) - self._assert_quota_show(self.B.id, res, allocated=1, limit=-1) - self._assert_quota_show(self.C.id, res, reserved=1, limit=-1) - self._assert_quota_show(self.D.id, res, reserved=1, limit=-1) - - def test_child_update_affects_allocated_and_rolls_back(self): - quota = {'gigabytes': -1, 'volumes': 3} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - quota['volumes'] = -1 - self.controller.update(self.req, self.B.id, body) - quota['volumes'] = 1 - self.controller.update(self.req, self.C.id, body) - - # Shouldn't be able to update to greater than the grandparent - quota['volumes'] = 3 - quota['gigabytes'] = 1 - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, self.req, self.D.id, body) - # Validate we haven't updated either parents' allocated value for - # any of the keys (even if some keys were valid) - self._assert_quota_show(self.A.id, 'volumes', allocated=1, limit=3) - self._assert_quota_show(self.A.id, 'gigabytes', limit=-1) - self._assert_quota_show(self.B.id, 'volumes', limit=-1) - self._assert_quota_show(self.B.id, 'gigabytes', limit=-1) - - quota['volumes'] = 2 - self.controller.update(self.req, self.D.id, body) - # Validate we have now updated the parent and grandparents' - self.req.params = {'usage': 'True'} - self._assert_quota_show(self.A.id, 'volumes', allocated=3, limit=3) - self._assert_quota_show(self.A.id, 'gigabytes', allocated=1, limit=-1) - self._assert_quota_show(self.B.id, 'volumes', allocated=2, limit=-1) - self._assert_quota_show(self.B.id, 'gigabytes', allocated=1, limit=-1) - - def test_negative_child_limit_reserve_and_rollback(self): - quota = {'volumes': 2, 'gigabytes': 2} - body = {'quota_set': quota} - self.controller.update(self.req, self.A.id, body) - - quota['volumes'] = -1 - quota['gigabytes'] = -1 - self.controller.update(self.req, self.B.id, body) - self.controller.update(self.req, self.C.id, body) - self.controller.update(self.req, self.D.id, body) - - res = quotas.QUOTAS._driver.reserve( - self.req.environ['cinder.context'], quotas.QUOTAS.resources, - {'volumes': 2, 'gigabytes': 2}, project_id=self.D.id) - - self.req.params = {'usage': 'True'} - quota_b = self.controller.show(self.req, self.B.id) - self.assertEqual(2, quota_b['quota_set']['volumes']['allocated']) - # A will be the next hard limit to set - quota_a = self.controller.show(self.req, self.A.id) - self.assertEqual(2, quota_a['quota_set']['volumes']['allocated']) - quota_d = self.controller.show(self.req, self.D.id) - self.assertEqual(2, quota_d['quota_set']['volumes']['reserved']) - - quotas.QUOTAS.rollback(self.req.environ['cinder.context'], res, - self.D.id) - # After the rollback, A's limit should be properly set again - quota_a = self.controller.show(self.req, self.A.id) - self.assertEqual(0, quota_a['quota_set']['volumes']['allocated']) - quota_d = self.controller.show(self.req, self.D.id) - self.assertEqual(0, quota_d['quota_set']['volumes']['in_use']) - - @mock.patch('cinder.db.sqlalchemy.api._get_quota_usages') - @mock.patch('cinder.db.quota_usage_get_all_by_project') - def test_nested_quota_set_negative_limit(self, mock_usage, mock_get_usage): - # TODO(mc_nair): this test should be moved to Tempest once nested quota - # coverage is added - fake_usages = {self.A.id: 1, self.B.id: 1, self.D.id: 2, self.C.id: 0} - self._create_fake_quota_usages(fake_usages) - mock_usage.side_effect = self._fake_quota_usage_get_all_by_project - - class FakeUsage(object): - def __init__(self, in_use, reserved): - self.in_use = in_use - self.reserved = reserved - self.until_refresh = None - self.total = self.reserved + self.in_use - - def _fake__get_quota_usages(context, session, project_id): - if not project_id: - return {} - return {'volumes': FakeUsage(fake_usages[project_id], 0)} - mock_get_usage.side_effect = _fake__get_quota_usages - - # Update the project A quota. - quota_limit = {'volumes': 7} - body = {'quota_set': quota_limit} - self.controller.update(self.req, self.A.id, body) - - quota_limit['volumes'] = 4 - self.controller.update(self.req, self.B.id, body) - quota_limit['volumes'] = -1 - self.controller.update(self.req, self.D.id, body) - - quota_limit['volumes'] = 1 - self.controller.update(self.req, self.C.id, body) - - self.req.params['fix_allocated_quotas'] = True - self.controller.validate_setup_for_nested_quota_use(self.req) - - # Validate that the allocated values look right for each project - self.req.params = {'usage': 'True'} - - res = 'volumes' - # A has given 4 vols to B and 1 vol to C (from limits) - self._assert_quota_show(self.A.id, res, allocated=5, in_use=1, limit=7) - self._assert_quota_show(self.B.id, res, allocated=2, in_use=1, limit=4) - self._assert_quota_show(self.D.id, res, in_use=2, limit=-1) - self._assert_quota_show(self.C.id, res, limit=1) - - # Update B to -1 limit, and make sure that A's allocated gets updated - # with B + D's in_use values (one less than current limit - quota_limit['volumes'] = -1 - self.controller.update(self.req, self.B.id, body) - self._assert_quota_show(self.A.id, res, allocated=4, in_use=1, limit=7) - - quota_limit['volumes'] = 6 - self.assertRaises( - webob.exc.HTTPBadRequest, - self.controller.update, self.req, self.B.id, body) - - quota_limit['volumes'] = 5 - self.controller.update(self.req, self.B.id, body) - self._assert_quota_show(self.A.id, res, allocated=6, in_use=1, limit=7) diff --git a/cinder/tests/unit/api/contrib/test_quotas_classes.py b/cinder/tests/unit/api/contrib/test_quotas_classes.py deleted file mode 100644 index d0b158702..000000000 --- a/cinder/tests/unit/api/contrib/test_quotas_classes.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2013 Huawei Technologies Co., Ltd -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for cinder.api.contrib.quota_classes.py -""" - - -import mock - -import webob.exc - - -from cinder.api.contrib import quota_classes -from cinder import context -from cinder import exception -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import volume_types - - -QUOTAS = quota.QUOTAS -GROUP_QUOTAS = quota.GROUP_QUOTAS - - -def make_body(root=True, gigabytes=1000, snapshots=10, - volumes=10, backups=10, - backup_gigabytes=1000, per_volume_gigabytes=-1, - volume_types_faked=None, - tenant_id=fake.PROJECT_ID, groups=10): - resources = {'gigabytes': gigabytes, - 'snapshots': snapshots, - 'volumes': volumes, - 'backups': backups, - 'per_volume_gigabytes': per_volume_gigabytes, - 'backup_gigabytes': backup_gigabytes, - 'groups': groups} - if not volume_types_faked: - volume_types_faked = {'fake_type': None} - for volume_type in volume_types_faked: - resources['gigabytes_' + volume_type] = -1 - resources['snapshots_' + volume_type] = -1 - resources['volumes_' + volume_type] = -1 - - if tenant_id: - resources['id'] = tenant_id - if root: - result = {'quota_class_set': resources} - else: - result = resources - return result - - -def make_response_body(root=True, ctxt=None, quota_class='foo', - request_body=None, tenant_id=fake.PROJECT_ID): - resources = {} - if not ctxt: - ctxt = context.get_admin_context() - resources.update(QUOTAS.get_class_quotas(ctxt, quota_class)) - resources.update(GROUP_QUOTAS.get_class_quotas(ctxt, quota_class)) - if not request_body and not request_body['quota_class_set']: - resources.update(request_body['quota_class_set']) - - if tenant_id: - resources['id'] = tenant_id - if root: - result = {'quota_class_set': resources} - else: - result = resources - return result - - -class QuotaClassSetsControllerTest(test.TestCase): - - def setUp(self): - super(QuotaClassSetsControllerTest, self).setUp() - self.controller = quota_classes.QuotaClassSetsController() - - self.ctxt = context.get_admin_context() - self.req = mock.Mock() - self.req.environ = {'cinder.context': self.ctxt} - self.req.environ['cinder.context'].is_admin = True - - def test_show(self): - volume_types.create(self.ctxt, 'fake_type') - result = self.controller.show(self.req, fake.PROJECT_ID) - self.assertDictEqual(make_body(), result) - - def test_show_not_authorized(self): - self.req.environ['cinder.context'].is_admin = False - self.req.environ['cinder.context'].user_id = fake.USER_ID - self.req.environ['cinder.context'].project_id = fake.PROJECT_ID - self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, - self.req, fake.PROJECT_ID) - - def test_update(self): - volume_types.create(self.ctxt, 'fake_type') - body = make_body(gigabytes=2000, snapshots=15, - volumes=5, tenant_id=None) - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(body, result) - - @mock.patch('cinder.api.openstack.wsgi.Controller.validate_string_length') - @mock.patch('cinder.utils.validate_integer') - def test_update_limit(self, mock_validate_integer, mock_validate): - mock_validate_integer.return_value = 5 - volume_types.create(self.ctxt, 'fake_type') - body = make_body(volumes=5) - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertEqual(5, result['quota_class_set']['volumes']) - self.assertTrue(mock_validate.called) - self.assertTrue(mock_validate_integer.called) - - def test_update_wrong_key(self): - volume_types.create(self.ctxt, 'fake_type') - body = {'quota_class_set': {'bad': 'bad'}} - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(make_body(tenant_id=None), result) - - def test_update_invalid_key_value(self): - body = {'quota_class_set': {'gigabytes': "should_be_int"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_bad_quota_limit(self): - body = {'quota_class_set': {'gigabytes': -1000}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, fake.PROJECT_ID, body) - - def test_update_no_admin(self): - self.req.environ['cinder.context'].is_admin = False - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.update, self.req, fake.PROJECT_ID, - make_body(tenant_id=None)) - - def test_update_with_more_volume_types(self): - volume_types.create(self.ctxt, 'fake_type_1') - volume_types.create(self.ctxt, 'fake_type_2') - body = {'quota_class_set': {'gigabytes_fake_type_1': 1111, - 'volumes_fake_type_2': 2222}} - result = self.controller.update(self.req, fake.PROJECT_ID, body) - self.assertDictEqual(make_response_body(ctxt=self.ctxt, - quota_class=fake.PROJECT_ID, - request_body=body, - tenant_id=None), - result) diff --git a/cinder/tests/unit/api/contrib/test_scheduler_hints.py b/cinder/tests/unit/api/contrib/test_scheduler_hints.py deleted file mode 100644 index e74929046..000000000 --- a/cinder/tests/unit/api/contrib/test_scheduler_hints.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_serialization import jsonutils -from six.moves import http_client - -import cinder -from cinder.api.openstack import wsgi -from cinder import context -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake - - -UUID = fakes.FAKE_UUID - - -class SchedulerHintsTestCase(test.TestCase): - - def setUp(self): - super(SchedulerHintsTestCase, self).setUp() - self.fake_instance = v2_fakes.create_fake_volume(fake.VOLUME_ID, - uuid=UUID) - self.fake_instance['created_at'] =\ - datetime.datetime(2013, 1, 1, 1, 1, 1) - self.fake_instance['launched_at'] =\ - datetime.datetime(2013, 1, 1, 1, 1, 1) - self.flags( - osapi_volume_extension=[ - 'cinder.api.contrib.select_extensions'], - osapi_volume_ext_list=['Scheduler_hints']) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.app = fakes.wsgi_app(fake_auth_context=self.user_ctxt) - - def test_create_server_without_hints(self): - - @wsgi.response(http_client.ACCEPTED) - def fake_create(*args, **kwargs): - self.assertNotIn('scheduler_hints', kwargs['body']) - return self.fake_instance - - self.mock_object(cinder.api.v2.volumes.VolumeController, 'create', - fake_create) - - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'POST' - req.content_type = 'application/json' - body = {'id': UUID, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'volume_id': fake.VOLUME_ID, } - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(self.app) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_create_server_with_hints(self): - - @wsgi.response(http_client.ACCEPTED) - def fake_create(*args, **kwargs): - self.assertIn('scheduler_hints', kwargs['body']) - self.assertEqual({"a": "b"}, kwargs['body']['scheduler_hints']) - return self.fake_instance - - self.mock_object(cinder.api.v2.volumes.VolumeController, 'create', - fake_create) - - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'POST' - req.content_type = 'application/json' - body = {'id': UUID, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'volume_id': fake.VOLUME_ID, - 'scheduler_hints': {'a': 'b'}, } - - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(self.app) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_create_server_bad_hints(self): - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'POST' - req.content_type = 'application/json' - body = {'volume': { - 'id': UUID, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'volume_id': fake.VOLUME_ID, - 'scheduler_hints': 'a', }} - - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(self.app) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) diff --git a/cinder/tests/unit/api/contrib/test_scheduler_stats.py b/cinder/tests/unit/api/contrib/test_scheduler_stats.py deleted file mode 100644 index 63ac4a673..000000000 --- a/cinder/tests/unit/api/contrib/test_scheduler_stats.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2013 eBay Inc. -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import webob - -from cinder.api.contrib import scheduler_stats -from cinder.api.openstack import api_version_request as api_version -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -def schedule_rpcapi_get_pools(self, context, filters=None): - all_pools = [] - pool1 = dict(name='pool1', - capabilities=dict( - total_capacity=1024, free_capacity=100, - volume_backend_name='pool1', reserved_percentage=0, - driver_version='1.0.0', storage_protocol='iSCSI', - QoS_support='False', updated=None)) - all_pools.append(pool1) - pool2 = dict(name='pool2', - capabilities=dict( - total_capacity=512, free_capacity=200, - volume_backend_name='pool2', reserved_percentage=0, - driver_version='1.0.1', storage_protocol='iSER', - QoS_support='True', updated=None)) - all_pools.append(pool2) - - return all_pools - - -@ddt.ddt -class SchedulerStatsAPITest(test.TestCase): - def setUp(self): - super(SchedulerStatsAPITest, self).setUp() - self.flags(host='fake') - self.controller = scheduler_stats.SchedulerStatsController() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', - schedule_rpcapi_get_pools) - def test_get_pools_summary(self): - req = fakes.HTTPRequest.blank('/v2/%s/scheduler_stats' % - fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.get_pools(req) - - self.assertEqual(2, len(res['pools'])) - - expected = { - 'pools': [ - { - 'name': 'pool1', - }, - { - 'name': 'pool2', - } - ] - } - - self.assertDictEqual(expected, res) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') - def test_get_pools_summary_filter_name(self, mock_rpcapi): - req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?name=pool1' % - fake.PROJECT_ID) - mock_rpcapi.return_value = [dict(name='pool1', - capabilities=dict(foo='bar'))] - req.api_version_request = api_version.APIVersionRequest('3.28') - req.environ['cinder.context'] = self.ctxt - res = self.controller.get_pools(req) - - expected = { - 'pools': [ - { - 'name': 'pool1', - } - ] - } - - self.assertDictEqual(expected, res) - filters = {'name': 'pool1'} - mock_rpcapi.assert_called_with(mock.ANY, filters=filters) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') - def test_get_pools_summary_filter_capabilities(self, mock_rpcapi): - req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True' - '&foo=bar' % fake.PROJECT_ID) - mock_rpcapi.return_value = [dict(name='pool1', - capabilities=dict(foo='bar'))] - req.api_version_request = api_version.APIVersionRequest('3.28') - req.environ['cinder.context'] = self.ctxt - res = self.controller.get_pools(req) - - expected = { - 'pools': [ - { - 'name': 'pool1', - 'capabilities': { - 'foo': 'bar' - } - } - ] - } - - self.assertDictEqual(expected, res) - filters = {'foo': 'bar'} - mock_rpcapi.assert_called_with(mock.ANY, filters=filters) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', - schedule_rpcapi_get_pools) - def test_get_pools_detail(self): - req = fakes.HTTPRequest.blank('/v2/%s/scheduler_stats?detail=True' % - fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.get_pools(req) - - self.assertEqual(2, len(res['pools'])) - - expected = { - 'pools': [ - { - 'name': 'pool1', - 'capabilities': { - 'updated': None, - 'total_capacity': 1024, - 'free_capacity': 100, - 'volume_backend_name': 'pool1', - 'reserved_percentage': 0, - 'driver_version': '1.0.0', - 'storage_protocol': 'iSCSI', - 'QoS_support': 'False', } - }, - { - 'name': 'pool2', - 'capabilities': { - 'updated': None, - 'total_capacity': 512, - 'free_capacity': 200, - 'volume_backend_name': 'pool2', - 'reserved_percentage': 0, - 'driver_version': '1.0.1', - 'storage_protocol': 'iSER', - 'QoS_support': 'True', } - } - ] - } - - self.assertDictEqual(expected, res) - - def test_get_pools_detail_invalid_bool(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/scheduler_stats?detail=InvalidBool' % - fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - self.assertRaises(exception.InvalidParameterValue, - self.controller.get_pools, - req) - - @ddt.data(('3.34', False), - ('3.35', True)) - @ddt.unpack - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_get_pools_by_volume_type(self, - version, - support_volume_type, - mock_reject_invalid_filters, - mock_get_pools - ): - req = fakes.HTTPRequest.blank('/v3/%s/scheduler-stats/get_pools?' - 'volume_type=lvm' % fake.PROJECT_ID) - mock_get_pools.return_value = [{'name': 'pool1', - 'capabilities': {'foo': 'bar'}}] - req.api_version_request = api_version.APIVersionRequest(version) - req.environ['cinder.context'] = self.ctxt - res = self.controller.get_pools(req) - - expected = { - 'pools': [{'name': 'pool1'}] - } - - filters = dict() - if support_volume_type: - filters = {'volume_type': 'lvm'} - filters = webob.multidict.MultiDict(filters) - mock_reject_invalid_filters.assert_called_once_with(self.ctxt, filters, - 'pool', True) - self.assertDictEqual(expected, res) - mock_get_pools.assert_called_with(mock.ANY, filters=filters) diff --git a/cinder/tests/unit/api/contrib/test_services.py b/cinder/tests/unit/api/contrib/test_services.py deleted file mode 100644 index ea193fc76..000000000 --- a/cinder/tests/unit/api/contrib/test_services.py +++ /dev/null @@ -1,884 +0,0 @@ -# Copyright 2012 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import datetime - -import ddt -from iso8601 import iso8601 -import mock -from oslo_config import cfg -from six.moves import http_client -import webob.exc - -from cinder.api.contrib import services -from cinder.api import extensions -from cinder.api.openstack import api_version_request as api_version -from cinder import context -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -CONF = cfg.CONF - - -fake_services_list = [ - {'binary': 'cinder-scheduler', - 'host': 'host1', - 'cluster_name': None, - 'availability_zone': 'cinder', - 'id': 1, - 'disabled': True, - 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), - 'disabled_reason': 'test1', - 'modified_at': ''}, - {'binary': 'cinder-volume', - 'host': 'host1', - 'cluster_name': None, - 'availability_zone': 'cinder', - 'id': 2, - 'disabled': True, - 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), - 'disabled_reason': 'test2', - 'modified_at': ''}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'cluster_name': 'cluster1', - 'availability_zone': 'cinder', - 'id': 3, - 'disabled': False, - 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), - 'disabled_reason': '', - 'modified_at': ''}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'cluster_name': 'cluster1', - 'availability_zone': 'cinder', - 'id': 4, - 'disabled': True, - 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), - 'disabled_reason': 'test4', - 'modified_at': ''}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'cluster_name': 'cluster2', - 'availability_zone': 'cinder', - 'id': 5, - 'disabled': True, - 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), - 'disabled_reason': 'test5', - 'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'cluster_name': 'cluster2', - 'availability_zone': 'cinder', - 'id': 6, - 'disabled': False, - 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), - 'disabled_reason': '', - 'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'cluster_name': None, - 'availability_zone': 'cinder', - 'id': 7, - 'disabled': False, - 'updated_at': None, - 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), - 'disabled_reason': '', - 'modified_at': None}, -] - - -class FakeRequest(object): - environ = {"cinder.context": context.get_admin_context()} - - def __init__(self, version='3.0', **kwargs): - self.GET = kwargs - self.headers = {'OpenStack-API-Version': 'volume ' + version} - self.api_version_request = api_version.APIVersionRequest(version) - - -class FakeRequestWithBinary(FakeRequest): - def __init__(self, **kwargs): - kwargs.setdefault('binary', 'cinder-volume') - super(FakeRequestWithBinary, self).__init__(**kwargs) - - -class FakeRequestWithHost(FakeRequest): - def __init__(self, **kwargs): - kwargs.setdefault('host', 'host1') - super(FakeRequestWithHost, self).__init__(**kwargs) - - -class FakeRequestWithHostBinary(FakeRequestWithBinary): - def __init__(self, **kwargs): - kwargs.setdefault('host', 'host1') - super(FakeRequestWithHostBinary, self).__init__(**kwargs) - - -def fake_service_get_all(context, **filters): - result = [] - host = filters.pop('host', None) - for service in fake_services_list: - if (host and service['host'] != host and - not service['host'].startswith(host + '@')): - continue - - if all(v is None or service.get(k) == v for k, v in filters.items()): - result.append(service) - return result - - -def fake_service_get(context, service_id=None, **filters): - result = fake_service_get_all(context, id=service_id, **filters) - if not result: - raise exception.ServiceNotFound(service_id=service_id) - return result[0] - - -def fake_service_get_by_id(value): - for service in fake_services_list: - if service['id'] == value: - return service - return None - - -def fake_service_update(context, service_id, values): - service = fake_service_get_by_id(service_id) - if service is None: - raise exception.ServiceNotFound(service_id=service_id) - else: - {'host': 'host1', 'service': 'cinder-volume', - 'disabled': values['disabled']} - - -def fake_policy_enforce(context, action, target): - pass - - -def fake_utcnow(with_timezone=False): - tzinfo = iso8601.Utc() if with_timezone else None - return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo) - - -@ddt.ddt -@mock.patch('cinder.db.service_get_all', fake_service_get_all) -@mock.patch('cinder.db.service_get', fake_service_get) -@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) -@mock.patch('cinder.db.sqlalchemy.api.service_update', fake_service_update) -@mock.patch('cinder.policy.enforce', fake_policy_enforce) -class ServicesTest(test.TestCase): - - def setUp(self): - super(ServicesTest, self).setUp() - - self.context = context.get_admin_context() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = services.ServiceController(self.ext_mgr) - - def test_services_list(self): - req = FakeRequest() - res_dict = self.controller.index(req) - - response = {'services': [{'binary': 'cinder-scheduler', - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 2)}, - {'binary': 'cinder-volume', - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5)}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 19, 6, 55, 34)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38)}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': None}, - ]} - self.assertEqual(response, res_dict) - - def test_failover_old_version(self): - req = FakeRequest(version='3.18') - self.assertRaises(exception.InvalidInput, self.controller.update, req, - 'failover', {'cluster': 'cluster1'}) - - def test_failover_no_values(self): - req = FakeRequest(version='3.26') - self.assertRaises(exception.InvalidInput, self.controller.update, req, - 'failover', {'backend_id': 'replica1'}) - - @ddt.data({'host': 'hostname'}, {'cluster': 'mycluster'}) - @mock.patch('cinder.volume.api.API.failover') - def test_failover(self, body, failover_mock): - req = FakeRequest(version='3.26') - body['backend_id'] = 'replica1' - res = self.controller.update(req, 'failover', body) - self.assertEqual(202, res.status_code) - failover_mock.assert_called_once_with(req.environ['cinder.context'], - body.get('host'), - body.get('cluster'), 'replica1') - - @ddt.data({}, {'host': 'hostname', 'cluster': 'mycluster'}) - @mock.patch('cinder.volume.api.API.failover') - def test_failover_invalid_input(self, body, failover_mock): - req = FakeRequest(version='3.26') - body['backend_id'] = 'replica1' - self.assertRaises(exception.InvalidInput, - self.controller.update, req, 'failover', body) - failover_mock.assert_not_called() - - def test_services_list_with_cluster_name(self): - req = FakeRequest(version='3.7') - res_dict = self.controller.index(req) - - response = {'services': [{'binary': 'cinder-scheduler', - 'cluster': None, - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 2)}, - {'binary': 'cinder-volume', - 'cluster': None, - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5)}, - {'binary': 'cinder-scheduler', - 'cluster': 'cluster1', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 19, 6, 55, 34)}, - {'binary': 'cinder-volume', - 'cluster': 'cluster1', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38)}, - {'binary': 'cinder-volume', - 'cluster': 'cluster2', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5)}, - {'binary': 'cinder-volume', - 'cluster': 'cluster2', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38)}, - {'binary': 'cinder-scheduler', - 'cluster': None, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': None}, - ]} - self.assertEqual(response, res_dict) - - def test_services_detail(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = FakeRequest() - res_dict = self.controller.index(req) - - response = {'services': [{'binary': 'cinder-scheduler', - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 2), - 'disabled_reason': 'test1'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'frozen': False, - 'host': 'host1', 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5), - 'disabled_reason': 'test2'}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 19, 6, 55, 34), - 'disabled_reason': ''}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'frozen': False, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38), - 'disabled_reason': 'test4'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'frozen': False, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 5), - 'disabled_reason': 'test5'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'frozen': False, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': datetime.datetime( - 2012, 9, 18, 8, 3, 38), - 'disabled_reason': ''}, - {'binary': 'cinder-scheduler', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', 'state': 'down', - 'updated_at': None, - 'disabled_reason': ''}, - ]} - self.assertEqual(response, res_dict) - - def test_services_list_with_host(self): - req = FakeRequestWithHost() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-scheduler', - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, - 29, 13, 42, 2)}, - {'binary': 'cinder-volume', - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5)}]} - self.assertEqual(response, res_dict) - - def test_services_detail_with_host(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = FakeRequestWithHost() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-scheduler', - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, - 29, 13, 42, 2), - 'disabled_reason': 'test1'}, - {'binary': 'cinder-volume', - 'frozen': False, - 'replication_status': None, - 'active_backend_id': None, - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5), - 'disabled_reason': 'test2'}]} - self.assertEqual(response, res_dict) - - def test_services_list_with_binary(self): - req = FakeRequestWithBinary() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-volume', - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'down', - 'updated_at': datetime.datetime(2012, 9, 18, - 8, 3, 38)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'down', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5)}, - {'binary': 'cinder-volume', - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', - 'state': 'down', - 'updated_at': datetime.datetime(2012, 9, 18, - 8, 3, 38)}]} - self.assertEqual(response, res_dict) - - def test_services_detail_with_binary(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = FakeRequestWithBinary() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'up', - 'frozen': False, - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5), - 'disabled_reason': 'test2'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'down', - 'frozen': False, - 'updated_at': datetime.datetime(2012, 9, 18, - 8, 3, 38), - 'disabled_reason': 'test4'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'down', - 'frozen': False, - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5), - 'disabled_reason': 'test5'}, - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'host': 'host2', - 'zone': 'cinder', - 'status': 'enabled', - 'state': 'down', - 'frozen': False, - 'updated_at': datetime.datetime(2012, 9, 18, - 8, 3, 38), - 'disabled_reason': ''}]} - self.assertEqual(response, res_dict) - - def test_services_list_with_host_binary(self): - req = FakeRequestWithHostBinary() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-volume', - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5)}]} - self.assertEqual(response, res_dict) - - def test_services_detail_with_host_binary(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = FakeRequestWithHostBinary() - res_dict = self.controller.index(req) - - response = {'services': [ - {'binary': 'cinder-volume', - 'replication_status': None, - 'active_backend_id': None, - 'frozen': False, - 'host': 'host1', - 'zone': 'cinder', - 'status': 'disabled', - 'state': 'up', - 'updated_at': datetime.datetime(2012, 10, 29, - 13, 42, 5), - 'disabled_reason': 'test2'}]} - self.assertEqual(response, res_dict) - - def test_services_enable_with_service_key(self): - body = {'host': 'host1', 'service': 'cinder-volume'} - req = fakes.HTTPRequest.blank( - '/v2/%s/os-services/enable' % fake.PROJECT_ID) - res_dict = self.controller.update(req, "enable", body) - - self.assertEqual('enabled', res_dict['status']) - - def test_services_enable_with_binary_key(self): - body = {'host': 'host1', 'binary': 'cinder-volume'} - req = fakes.HTTPRequest.blank( - '/v2/%s/os-services/enable' % fake.PROJECT_ID) - res_dict = self.controller.update(req, "enable", body) - - self.assertEqual('enabled', res_dict['status']) - - def test_services_disable_with_service_key(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/os-services/disable' % fake.PROJECT_ID) - body = {'host': 'host1', 'service': 'cinder-volume'} - res_dict = self.controller.update(req, "disable", body) - - self.assertEqual('disabled', res_dict['status']) - - def test_services_disable_with_binary_key(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/os-services/disable' % fake.PROJECT_ID) - body = {'host': 'host1', 'binary': 'cinder-volume'} - res_dict = self.controller.update(req, "disable", body) - - self.assertEqual('disabled', res_dict['status']) - - def test_services_disable_log_reason(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = ( - fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) - body = {'host': 'host1', - 'binary': 'cinder-scheduler', - 'disabled_reason': 'test-reason', - } - res_dict = self.controller.update(req, "disable-log-reason", body) - - self.assertEqual('disabled', res_dict['status']) - self.assertEqual('test-reason', res_dict['disabled_reason']) - - def test_services_disable_log_reason_unicode(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = ( - fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) - body = {'host': 'host1', - 'binary': 'cinder-scheduler', - 'disabled_reason': u'test-reason', - } - res_dict = self.controller.update(req, "disable-log-reason", body) - - self.assertEqual('disabled', res_dict['status']) - self.assertEqual('test-reason', res_dict['disabled_reason']) - - def test_services_disable_log_reason_none(self): - self.ext_mgr.extensions['os-extended-services'] = True - self.controller = services.ServiceController(self.ext_mgr) - req = ( - fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) - body = {'host': 'host1', - 'binary': 'cinder-scheduler', - 'disabled_reason': None, - } - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, "disable-log-reason", body) - - def test_invalid_reason_field(self): - # Check that empty strings are not allowed - reason = ' ' * 10 - self.assertFalse(self.controller._is_valid_as_reason(reason)) - reason = 'a' * 256 - self.assertFalse(self.controller._is_valid_as_reason(reason)) - # Check that spaces at the end are also counted - reason = 'a' * 255 + ' ' - self.assertFalse(self.controller._is_valid_as_reason(reason)) - reason = 'it\'s a valid reason.' - self.assertTrue(self.controller._is_valid_as_reason(reason)) - reason = None - self.assertFalse(self.controller._is_valid_as_reason(reason)) - - def test_services_failover_host(self): - url = '/v2/%s/os-services/failover_host' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url) - body = {'host': mock.sentinel.host, - 'backend_id': mock.sentinel.backend_id} - with mock.patch.object(self.controller.volume_api, 'failover') \ - as failover_mock: - res = self.controller.update(req, 'failover_host', body) - failover_mock.assert_called_once_with(req.environ['cinder.context'], - mock.sentinel.host, - None, - mock.sentinel.backend_id) - self.assertEqual(http_client.ACCEPTED, res.status_code) - - @ddt.data(('failover_host', {'host': mock.sentinel.host, - 'backend_id': mock.sentinel.backend_id}), - ('freeze', {'host': mock.sentinel.host}), - ('thaw', {'host': mock.sentinel.host})) - @ddt.unpack - @mock.patch('cinder.objects.ServiceList.get_all') - def test_services_action_host_not_found(self, method, body, - mock_get_all_services): - url = '/v2/%s/os-services/%s' % (fake.PROJECT_ID, method) - req = fakes.HTTPRequest.blank(url) - mock_get_all_services.return_value = [] - msg = 'No service found with host=%s' % mock.sentinel.host - result = self.assertRaises(exception.InvalidInput, - self.controller.update, - req, method, body) - self.assertEqual(msg, result.msg) - - @ddt.data(('failover', {'cluster': mock.sentinel.cluster, - 'backend_id': mock.sentinel.backend_id}), - ('freeze', {'cluster': mock.sentinel.cluster}), - ('thaw', {'cluster': mock.sentinel.cluster})) - @ddt.unpack - @mock.patch('cinder.objects.ServiceList.get_all') - def test_services_action_cluster_not_found(self, method, body, - mock_get_all_services): - url = '/v3/%s/os-services/%s' % (fake.PROJECT_ID, method) - req = fakes.HTTPRequest.blank(url, version='3.26') - mock_get_all_services.return_value = [] - msg = 'No service found with cluster=%s' % mock.sentinel.cluster - result = self.assertRaises(exception.InvalidInput, - self.controller.update, req, - method, body) - self.assertEqual(msg, result.msg) - - def test_services_freeze(self): - url = '/v2/%s/os-services/freeze' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url) - body = {'host': mock.sentinel.host} - with mock.patch.object(self.controller.volume_api, 'freeze_host') \ - as freeze_mock: - res = self.controller.update(req, 'freeze', body) - freeze_mock.assert_called_once_with(req.environ['cinder.context'], - mock.sentinel.host, None) - self.assertEqual(freeze_mock.return_value, res) - - def test_services_thaw(self): - url = '/v2/%s/os-services/thaw' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url) - body = {'host': mock.sentinel.host} - with mock.patch.object(self.controller.volume_api, 'thaw_host') \ - as thaw_mock: - res = self.controller.update(req, 'thaw', body) - thaw_mock.assert_called_once_with(req.environ['cinder.context'], - mock.sentinel.host, None) - self.assertEqual(thaw_mock.return_value, res) - - @ddt.data('freeze', 'thaw', 'failover_host') - def test_services_replication_calls_no_host(self, method): - url = '/v2/%s/os-services/%s' % (fake.PROJECT_ID, method) - req = fakes.HTTPRequest.blank(url) - self.assertRaises(exception.InvalidInput, - self.controller.update, req, method, {}) - - @mock.patch('cinder.api.contrib.services.ServiceController._set_log') - def test_set_log(self, set_log_mock): - set_log_mock.return_value = None - req = FakeRequest(version='3.32') - body = mock.sentinel.body - res = self.controller.update(req, 'set-log', body) - self.assertEqual(set_log_mock.return_value, res) - set_log_mock.assert_called_once_with(mock.ANY, body) - - @mock.patch('cinder.api.contrib.services.ServiceController._get_log') - def test_get_log(self, get_log_mock): - get_log_mock.return_value = None - req = FakeRequest(version='3.32') - body = mock.sentinel.body - res = self.controller.update(req, 'get-log', body) - self.assertEqual(get_log_mock.return_value, res) - get_log_mock.assert_called_once_with(mock.ANY, body) - - def test__log_params_binaries_services_wrong_binary(self): - body = {'binary': 'wrong-binary'} - self.assertRaises(exception.InvalidInput, - self.controller._log_params_binaries_services, - 'get-log', body) - - @ddt.data(None, '', '*') - @mock.patch('cinder.objects.ServiceList.get_all') - def test__log_params_binaries_service_all(self, binary, service_list_mock): - body = {'binary': binary, 'server': 'host1'} - binaries, services = self.controller._log_params_binaries_services( - mock.sentinel.context, body) - self.assertEqual(self.controller.LOG_BINARIES, binaries) - self.assertEqual(service_list_mock.return_value, services) - service_list_mock.assert_called_once_with( - mock.sentinel.context, filters={'host_or_cluster': body['server'], - 'is_up': True}) - - @ddt.data('cinder-api', 'cinder-volume', 'cinder-scheduler', - 'cinder-backup') - @mock.patch('cinder.objects.ServiceList.get_all') - def test__log_params_binaries_service_one(self, binary, service_list_mock): - body = {'binary': binary, 'server': 'host1'} - binaries, services = self.controller._log_params_binaries_services( - mock.sentinel.context, body) - self.assertEqual([binary], binaries) - - if binary == 'cinder-api': - self.assertEqual([], services) - service_list_mock.assert_not_called() - else: - self.assertEqual(service_list_mock.return_value, services) - service_list_mock.assert_called_once_with( - mock.sentinel.context, - filters={'host_or_cluster': body['server'], 'binary': binary, - 'is_up': True}) - - @ddt.data(None, '', 'wronglevel') - def test__set_log_invalid_level(self, level): - body = {'level': level} - self.assertRaises(exception.InvalidInput, - self.controller._set_log, self.context, body) - - @mock.patch('cinder.utils.get_log_method') - @mock.patch('cinder.objects.ServiceList.get_all') - @mock.patch('cinder.utils.set_log_levels') - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.set_log_levels') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.set_log_levels') - @mock.patch('cinder.backup.rpcapi.BackupAPI.set_log_levels') - def test__set_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock, - set_log_mock, get_all_mock, get_log_mock): - services = [ - objects.Service(self.context, binary='cinder-scheduler'), - objects.Service(self.context, binary='cinder-volume'), - objects.Service(self.context, binary='cinder-backup'), - ] - get_all_mock.return_value = services - body = {'binary': '*', 'prefix': 'eventlet.', 'level': 'debug'} - log_level = objects.LogLevel(prefix=body['prefix'], - level=body['level']) - with mock.patch('cinder.objects.LogLevel') as log_level_mock: - log_level_mock.return_value = log_level - res = self.controller._set_log(mock.sentinel.context, body) - log_level_mock.assert_called_once_with(mock.sentinel.context, - prefix=body['prefix'], - level=body['level']) - - self.assertEqual(202, res.status_code) - - set_log_mock.assert_called_once_with(body['prefix'], body['level']) - sch_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[0], log_level) - vol_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[1], log_level) - backup_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[2], log_level) - get_log_mock.assert_called_once_with(body['level']) - - @mock.patch('cinder.objects.ServiceList.get_all') - @mock.patch('cinder.utils.get_log_levels') - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_log_levels') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_log_levels') - @mock.patch('cinder.backup.rpcapi.BackupAPI.get_log_levels') - def test__get_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock, - get_log_mock, get_all_mock): - get_log_mock.return_value = mock.sentinel.api_levels - backup_rpc_mock.return_value = [ - objects.LogLevel(prefix='p1', level='l1'), - objects.LogLevel(prefix='p2', level='l2') - ] - vol_rpc_mock.return_value = [ - objects.LogLevel(prefix='p3', level='l3'), - objects.LogLevel(prefix='p4', level='l4') - ] - sch_rpc_mock.return_value = [ - objects.LogLevel(prefix='p5', level='l5'), - objects.LogLevel(prefix='p6', level='l6') - ] - - services = [ - objects.Service(self.context, binary='cinder-scheduler', - host='host'), - objects.Service(self.context, binary='cinder-volume', - host='host@backend#pool'), - objects.Service(self.context, binary='cinder-backup', host='host'), - ] - get_all_mock.return_value = services - body = {'binary': '*', 'prefix': 'eventlet.'} - - log_level = objects.LogLevel(prefix=body['prefix']) - with mock.patch('cinder.objects.LogLevel') as log_level_mock: - log_level_mock.return_value = log_level - res = self.controller._get_log(mock.sentinel.context, body) - log_level_mock.assert_called_once_with(mock.sentinel.context, - prefix=body['prefix']) - - expected = {'log_levels': [ - {'binary': 'cinder-api', - 'host': CONF.host, - 'levels': mock.sentinel.api_levels}, - {'binary': 'cinder-scheduler', 'host': 'host', - 'levels': {'p5': 'l5', 'p6': 'l6'}}, - {'binary': 'cinder-volume', - 'host': 'host@backend#pool', - 'levels': {'p3': 'l3', 'p4': 'l4'}}, - {'binary': 'cinder-backup', 'host': 'host', - 'levels': {'p1': 'l1', 'p2': 'l2'}}, - ]} - - self.assertDictEqual(expected, res) - - get_log_mock.assert_called_once_with(body['prefix']) - sch_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[0], log_level) - vol_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[1], log_level) - backup_rpc_mock.assert_called_once_with(mock.sentinel.context, - services[2], log_level) diff --git a/cinder/tests/unit/api/contrib/test_snapshot_actions.py b/cinder/tests/unit/api/contrib/test_snapshot_actions.py deleted file mode 100644 index 800c4a753..000000000 --- a/cinder/tests/unit/api/contrib/test_snapshot_actions.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import db -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake - - -def fake_snapshot_get(context, snapshot_id): - snapshot = v2_fakes.fake_snapshot(snapshot_id) - - if snapshot_id == fake.SNAPSHOT_ID: - snapshot['status'] = fields.SnapshotStatus.CREATING - else: - snapshot['status'] = fields.SnapshotStatus.ERROR - return snapshot - - -class SnapshotActionsTest(test.TestCase): - - def setUp(self): - super(SnapshotActionsTest, self).setUp() - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - @mock.patch('cinder.db.snapshot_update', autospec=True) - @mock.patch('cinder.db.sqlalchemy.api._snapshot_get', - side_effect=fake_snapshot_get) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_update_snapshot_status(self, metadata_get, *args): - - body = {'os-update_snapshot_status': - {'status': fields.SnapshotStatus.AVAILABLE}} - req = webob.Request.blank('/v2/%s/snapshots/%s/action' % ( - fake.PROJECT_ID, fake.SNAPSHOT_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - @mock.patch('cinder.db.sqlalchemy.api._snapshot_get', - side_effect=fake_snapshot_get) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_update_snapshot_status_invalid_status(self, metadata_get, *args): - body = {'os-update_snapshot_status': {'status': 'in-use'}} - req = webob.Request.blank('/v2/%s/snapshots/%s/action' % ( - fake.PROJECT_ID, fake.SNAPSHOT_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_update_snapshot_status_without_status(self): - self.mock_object(db, 'snapshot_get', fake_snapshot_get) - body = {'os-update_snapshot_status': {}} - req = webob.Request.blank('/v2/%s/snapshots/%s/action' % ( - fake.PROJECT_ID, fake.SNAPSHOT_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) diff --git a/cinder/tests/unit/api/contrib/test_snapshot_manage.py b/cinder/tests/unit/api/contrib/test_snapshot_manage.py deleted file mode 100644 index 152db5926..000000000 --- a/cinder/tests/unit/api/contrib/test_snapshot_manage.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright (c) 2015 Huawei Technologies Co., Ltd. -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from six.moves import http_client -from six.moves.urllib.parse import urlencode -import webob - -from cinder import context -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_service - -CONF = cfg.CONF - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -def volume_get(self, context, volume_id, viewable_admin_meta=False): - if volume_id == fake.VOLUME_ID: - return objects.Volume(context, id=fake.VOLUME_ID, - _name_id=fake.VOLUME2_ID, - host='fake_host', cluster_name=None) - raise exception.VolumeNotFound(volume_id=volume_id) - - -def api_get_manageable_snapshots(*args, **kwargs): - """Replacement for cinder.volume.api.API.get_manageable_snapshots.""" - snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff' - snaps = [ - {'reference': {'source-name': 'snapshot-%s' % snap_id}, - 'size': 4, - 'extra_info': 'qos_setting:high', - 'safe_to_manage': False, - 'reason_not_safe': 'snapshot in use', - 'cinder_id': snap_id, - 'source_reference': {'source-name': - 'volume-00000000-ffff-0000-ffff-000000'}}, - {'reference': {'source-name': 'mysnap'}, - 'size': 5, - 'extra_info': 'qos_setting:low', - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'source_reference': {'source-name': 'myvol'}}] - return snaps - - -@mock.patch('cinder.volume.api.API.get', volume_get) -class SnapshotManageTest(test.TestCase): - """Test cases for cinder/api/contrib/snapshot_manage.py - - The API extension adds a POST /os-snapshot-manage API that is passed a - cinder volume id, and a driver-specific reference parameter. - If everything is passed correctly, - then the cinder.volume.api.API.manage_existing_snapshot method - is invoked to manage an existing storage object on the host. - - In this set of test cases, we are ensuring that the code correctly parses - the request structure and raises the correct exceptions when things are not - right, and calls down into cinder.volume.api.API.manage_existing_snapshot - with the correct arguments. - """ - - def setUp(self): - super(SnapshotManageTest, self).setUp() - self._admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=True) - self._non_admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=False) - - def _get_resp_post(self, body): - """Helper to execute an os-snapshot-manage API call.""" - req = webob.Request.blank('/v2/%s/os-snapshot-manage' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = self._admin_ctxt - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') - @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_manage_snapshot_ok(self, mock_db, - mock_create_snapshot, mock_rpcapi): - """Test successful manage snapshot execution. - - Tests for correct operation when valid arguments are passed in the - request body. We ensure that cinder.volume.api.API.manage_existing got - called with the correct arguments, and that we return the correct HTTP - code to the caller. - """ - mock_db.return_value = fake_service.fake_service_obj( - self._admin_ctxt, - binary='cinder-volume') - body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int, res) - - # Check the db.service_get was called with correct arguments. - mock_db.assert_called_once_with( - mock.ANY, None, host='fake_host', binary='cinder-volume', - cluster_name=None) - - # Check the create_snapshot_in_db was called with correct arguments. - self.assertEqual(1, mock_create_snapshot.call_count) - args = mock_create_snapshot.call_args[0] - named_args = mock_create_snapshot.call_args[1] - self.assertEqual(fake.VOLUME_ID, args[1].get('id')) - # We should commit quota in cinder-volume layer for this operation. - self.assertFalse(named_args['commit_quota']) - - # Check the volume_rpcapi.manage_existing_snapshot was called with - # correct arguments. - self.assertEqual(1, mock_rpcapi.call_count) - args = mock_rpcapi.call_args[0] - self.assertEqual('fake_ref', args[2]) - - @mock.patch('cinder.objects.service.Service.is_up', - return_value=True, - new_callable=mock.PropertyMock) - @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') - @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_manage_snapshot_disabled(self, mock_db, mock_create_snapshot, - mock_rpcapi, mock_is_up): - """Test manage snapshot failure due to disabled service.""" - mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, - disabled=True) - body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - mock_create_snapshot.assert_not_called() - mock_rpcapi.assert_not_called() - mock_is_up.assert_not_called() - - @mock.patch('cinder.objects.service.Service.is_up', return_value=False, - new_callable=mock.PropertyMock) - @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') - @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_manage_snapshot_is_down(self, mock_db, mock_create_snapshot, - mock_rpcapi, mock_is_up): - """Test manage snapshot failure due to down service.""" - mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) - body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - mock_create_snapshot.assert_not_called() - mock_rpcapi.assert_not_called() - self.assertTrue(mock_is_up.called) - - def test_manage_snapshot_missing_volume_id(self): - """Test correct failure when volume_id is not specified.""" - body = {'snapshot': {'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_snapshot_missing_ref(self): - """Test correct failure when the ref is not specified.""" - body = {'snapshot': {'volume_id': fake.VOLUME_ID}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_snapshot_error_body(self): - """Test correct failure when body is invaild.""" - body = {'error_snapshot': {'volume_id': fake.VOLUME_ID}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_snapshot_error_volume_id(self): - """Test correct failure when volume can't be found.""" - body = {'snapshot': {'volume_id': 'error_volume_id', - 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - def _get_resp_get(self, host, detailed, paging, admin=True): - """Helper to execute a GET os-snapshot-manage API call.""" - params = {'host': host} - if paging: - params.update({'marker': '1234', 'limit': 10, - 'offset': 4, 'sort': 'reference:asc'}) - query_string = "?%s" % urlencode(params) - detail = "" - if detailed: - detail = "/detail" - url = "/v2/%s/os-snapshot-manage%s%s" % (fake.PROJECT_ID, detail, - query_string) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = (self._admin_ctxt if admin - else self._non_admin_ctxt) - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - wraps=api_get_manageable_snapshots) - def test_get_manageable_snapshots_non_admin(self, mock_api_manageable): - res = self._get_resp_get('fakehost', False, False, admin=False) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - self.assertEqual(False, mock_api_manageable.called) - res = self._get_resp_get('fakehost', True, False, admin=False) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - self.assertEqual(False, mock_api_manageable.called) - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - wraps=api_get_manageable_snapshots) - def test_get_manageable_snapshots_ok(self, mock_api_manageable): - res = self._get_resp_get('fakehost', False, False) - snap_name = 'snapshot-ffffffff-0000-ffff-0000-ffffffffffff' - exp = {'manageable-snapshots': - [{'reference': {'source-name': snap_name}, 'size': 4, - 'safe_to_manage': False, - 'source_reference': - {'source-name': 'volume-00000000-ffff-0000-ffff-000000'}}, - {'reference': {'source-name': 'mysnap'}, 'size': 5, - 'safe_to_manage': True, - 'source_reference': {'source-name': 'myvol'}}]} - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(jsonutils.loads(res.body), exp) - mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, - marker=None, offset=0, sort_dirs=['desc'], - sort_keys=['reference']) - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - side_effect=messaging.RemoteError( - exc_type='InvalidInput', value='marker not found: 1234')) - def test_get_manageable_snapshots_non_existent_marker( - self, mock_api_manageable): - res = self._get_resp_get('fakehost', detailed=False, paging=True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertTrue(mock_api_manageable.called) - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - wraps=api_get_manageable_snapshots) - def test_get_manageable_snapshots_detailed_ok(self, mock_api_manageable): - res = self._get_resp_get('fakehost', True, True) - snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff' - exp = {'manageable-snapshots': - [{'reference': {'source-name': 'snapshot-%s' % snap_id}, - 'size': 4, 'safe_to_manage': False, 'cinder_id': snap_id, - 'reason_not_safe': 'snapshot in use', - 'extra_info': 'qos_setting:high', - 'source_reference': - {'source-name': 'volume-00000000-ffff-0000-ffff-000000'}}, - {'reference': {'source-name': 'mysnap'}, 'size': 5, - 'cinder_id': None, 'safe_to_manage': True, - 'reason_not_safe': None, 'extra_info': 'qos_setting:low', - 'source_reference': {'source-name': 'myvol'}}]} - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(jsonutils.loads(res.body), exp) - mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', - offset=4, sort_dirs=['asc'], sort_keys=['reference']) - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - side_effect=messaging.RemoteError( - exc_type='InvalidInput', value='marker not found: 1234')) - def test_get_manageable_snapshots_non_existent_marker_detailed( - self, mock_api_manageable): - res = self._get_resp_get('fakehost', detailed=True, paging=True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertTrue(mock_api_manageable.called) - - @mock.patch('cinder.objects.service.Service.is_up', return_value=True) - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_get_manageable_snapshots_disabled(self, mock_db, mock_is_up): - mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, - disabled=True) - res = self._get_resp_get('host_ok', False, True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - mock_is_up.assert_not_called() - - @mock.patch('cinder.objects.service.Service.is_up', return_value=False, - new_callable=mock.PropertyMock) - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_get_manageable_snapshots_is_down(self, mock_db, mock_is_up): - mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) - res = self._get_resp_get('host_ok', False, True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - self.assertTrue(mock_is_up.called) diff --git a/cinder/tests/unit/api/contrib/test_snapshot_unmanage.py b/cinder/tests/unit/api/contrib/test_snapshot_unmanage.py deleted file mode 100644 index 24e79b2a0..000000000 --- a/cinder/tests/unit/api/contrib/test_snapshot_unmanage.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot - - -# This list of fake snapshot is used by our tests. -snapshot_id = fake.SNAPSHOT_ID -bad_snp_id = fake.WILL_NOT_BE_FOUND_ID - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -def api_snapshot_get(self, context, snp_id): - """Replacement for cinder.volume.api.API.get_snapshot. - - We stub the cinder.volume.api.API.get_snapshot method to check for the - existence of snapshot_id in our list of fake snapshots and raise an - exception if the specified snapshot ID is not in our list. - """ - snapshot = {'id': fake.SNAPSHOT_ID, - 'progress': '100%', - 'volume_id': fake.VOLUME_ID, - 'project_id': fake.PROJECT_ID, - 'status': fields.SnapshotStatus.AVAILABLE} - if snp_id == snapshot_id: - snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot) - return snapshot_objct - else: - raise exception.SnapshotNotFound(snapshot_id=snp_id) - - -@mock.patch('cinder.volume.api.API.get_snapshot', api_snapshot_get) -class SnapshotUnmanageTest(test.TestCase): - """Test cases for cinder/api/contrib/snapshot_unmanage.py - - The API extension adds an action to snapshots, "os-unmanage", which will - effectively issue a delete operation on the snapshot, but with a flag set - that means that a different method will be invoked on the driver, so that - the snapshot is not actually deleted in the storage backend. - - In this set of test cases, we are ensuring that the code correctly parses - the request structure and raises the correct exceptions when things are not - right, and calls down into cinder.volume.api.API.delete_snapshot with the - correct arguments. - """ - - def _get_resp(self, snapshot_id): - """Helper to build an os-unmanage req for the specified snapshot_id.""" - req = webob.Request.blank('/v2/%s/snapshots/%s/action' % ( - fake.PROJECT_ID, snapshot_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - True) - body = {'os-unmanage': ''} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - @mock.patch('cinder.db.conditional_update', return_value=1) - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot') - def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_db_update, - mock_conditional_update): - """Return success for valid and unattached volume.""" - res = self._get_resp(snapshot_id) - - self.assertEqual(1, mock_rpcapi.call_count) - self.assertEqual(3, len(mock_rpcapi.call_args[0])) - self.assertEqual(0, len(mock_rpcapi.call_args[1])) - - self.assertEqual(http_client.ACCEPTED, res.status_int, res) - - def test_unmanage_snapshot_bad_snapshot_id(self): - """Return 404 if the volume does not exist.""" - res = self._get_resp(bad_snp_id) - self.assertEqual(http_client.NOT_FOUND, res.status_int, res) diff --git a/cinder/tests/unit/api/contrib/test_types_extra_specs.py b/cinder/tests/unit/api/contrib/test_types_extra_specs.py deleted file mode 100644 index b0b35fb95..000000000 --- a/cinder/tests/unit/api/contrib/test_types_extra_specs.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# Copyright 2011 University of Southern California -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_utils import timeutils -import webob - -from cinder.api.contrib import types_extra_specs -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -import cinder.wsgi - -CONF = cfg.CONF - - -def return_create_volume_type_extra_specs(context, volume_type_id, - extra_specs): - return fake_volume_type_extra_specs() - - -def return_volume_type_extra_specs(context, volume_type_id): - return fake_volume_type_extra_specs() - - -def return_volume_type(context, volume_type_id, expected_fields=None): - specs = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - return dict(id=id, - name='vol_type_%s' % id, - description='vol_type_desc_%s' % id, - extra_specs=specs, - created_at=timeutils.utcnow(), - updated_at=timeutils.utcnow(), - deleted_at=timeutils.utcnow()) - - -def fake_volume_type_extra_specs(): - specs = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - return specs - - -class VolumeTypesExtraSpecsTest(test.TestCase): - - def setUp(self): - super(VolumeTypesExtraSpecsTest, self).setUp() - self.flags(host='fake') - self.mock_object(cinder.db, 'volume_type_get', return_volume_type) - self.api_path = '/v2/%s/os-volume-types/%s/extra_specs' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE_ID) - self.controller = types_extra_specs.VolumeTypeExtraSpecsController() - - """to reset notifier drivers left over from other api/contrib tests""" - - def test_index(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_get', - return_volume_type_extra_specs) - - req = fakes.HTTPRequest.blank(self.api_path) - res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID) - - self.assertEqual('value1', res_dict['extra_specs']['key1']) - - def test_index_no_data(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_get', - return_value={}) - - req = fakes.HTTPRequest.blank(self.api_path) - res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID) - - self.assertEqual(0, len(res_dict['extra_specs'])) - - def test_show(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_get', - return_volume_type_extra_specs) - - req = fakes.HTTPRequest.blank(self.api_path + '/key5') - res_dict = self.controller.show(req, fake.VOLUME_TYPE_ID, 'key5') - - self.assertEqual('value5', res_dict['key5']) - - def test_show_spec_not_found(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_get', - return_value={}) - - req = fakes.HTTPRequest.blank(self.api_path + '/key6') - self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, - self.controller.show, req, fake.VOLUME_ID, 'key6') - - def test_delete(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_delete') - - self.assertEqual(0, len(self.notifier.notifications)) - req = fakes.HTTPRequest.blank(self.api_path + '/key5') - self.controller.delete(req, fake.VOLUME_ID, 'key5') - self.assertEqual(1, len(self.notifier.notifications)) - self.assertIn('created_at', self.notifier.notifications[0]['payload']) - self.assertIn('updated_at', self.notifier.notifications[0]['payload']) - self.assertIn('deleted_at', self.notifier.notifications[0]['payload']) - - def test_delete_not_found(self): - self.mock_object(cinder.db, 'volume_type_extra_specs_delete', - side_effect=exception.VolumeTypeExtraSpecsNotFound( - "Not Found")) - - req = fakes.HTTPRequest.blank(self.api_path + '/key6') - self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, - self.controller.delete, req, fake.VOLUME_ID, 'key6') - - @mock.patch('cinder.utils.check_string_length') - def test_create(self, mock_check): - self.mock_object(cinder.db, - 'volume_type_extra_specs_update_or_create', - return_create_volume_type_extra_specs) - body = {"extra_specs": {"key1": "value1"}} - - self.assertEqual(0, len(self.notifier.notifications)) - req = fakes.HTTPRequest.blank(self.api_path) - res_dict = self.controller.create(req, fake.VOLUME_ID, body) - self.assertEqual(1, len(self.notifier.notifications)) - self.assertIn('created_at', self.notifier.notifications[0]['payload']) - self.assertIn('updated_at', self.notifier.notifications[0]['payload']) - self.assertTrue(mock_check.called) - self.assertEqual('value1', res_dict['extra_specs']['key1']) - - @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') - @mock.patch('cinder.utils.check_string_length') - def test_create_key_allowed_chars( - self, mock_check, volume_type_extra_specs_update_or_create): - mock_return_value = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - volume_type_extra_specs_update_or_create.\ - return_value = mock_return_value - - body = {"extra_specs": {"other_alphanum.-_:": "value1"}} - - self.assertEqual(0, len(self.notifier.notifications)) - - req = fakes.HTTPRequest.blank(self.api_path) - res_dict = self.controller.create(req, fake.VOLUME_ID, body) - self.assertEqual(1, len(self.notifier.notifications)) - self.assertTrue(mock_check.called) - self.assertEqual('value1', - res_dict['extra_specs']['other_alphanum.-_:']) - - @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') - @mock.patch('cinder.utils.check_string_length') - def test_create_too_many_keys_allowed_chars( - self, mock_check, volume_type_extra_specs_update_or_create): - mock_return_value = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - volume_type_extra_specs_update_or_create.\ - return_value = mock_return_value - - body = {"extra_specs": {"other_alphanum.-_:": "value1", - "other2_alphanum.-_:": "value2", - "other3_alphanum.-_:": "value3"}} - - self.assertEqual(0, len(self.notifier.notifications)) - - req = fakes.HTTPRequest.blank(self.api_path) - res_dict = self.controller.create(req, fake.VOLUME_ID, body) - self.assertEqual(1, len(self.notifier.notifications)) - self.assertTrue(mock_check.called) - self.assertEqual('value1', - res_dict['extra_specs']['other_alphanum.-_:']) - self.assertEqual('value2', - res_dict['extra_specs']['other2_alphanum.-_:']) - self.assertEqual('value3', - res_dict['extra_specs']['other3_alphanum.-_:']) - - @mock.patch('cinder.utils.check_string_length') - def test_update_item(self, mock_check): - self.mock_object(cinder.db, - 'volume_type_extra_specs_update_or_create', - return_create_volume_type_extra_specs) - body = {"key1": "value1"} - - self.assertEqual(0, len(self.notifier.notifications)) - req = fakes.HTTPRequest.blank(self.api_path + '/key1') - res_dict = self.controller.update(req, fake.VOLUME_ID, 'key1', body) - self.assertEqual(1, len(self.notifier.notifications)) - self.assertIn('created_at', self.notifier.notifications[0]['payload']) - self.assertIn('updated_at', self.notifier.notifications[0]['payload']) - self.assertTrue(mock_check.called) - - self.assertEqual('value1', res_dict['key1']) - - def test_update_item_too_many_keys(self): - self.mock_object(cinder.db, - 'volume_type_extra_specs_update_or_create', - return_create_volume_type_extra_specs) - body = {"key1": "value1", "key2": "value2"} - - req = fakes.HTTPRequest.blank(self.api_path + '/key1') - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, fake.VOLUME_ID, 'key1', body) - - def test_update_item_body_uri_mismatch(self): - self.mock_object(cinder.db, - 'volume_type_extra_specs_update_or_create', - return_create_volume_type_extra_specs) - body = {"key1": "value1"} - - req = fakes.HTTPRequest.blank(self.api_path + '/bad') - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, fake.VOLUME_ID, 'bad', body) - - def _extra_specs_empty_update(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/extra_specs' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE_ID)) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, fake.VOLUME_ID, body) - - def test_update_no_body(self): - self._extra_specs_empty_update(body=None) - - def test_update_empty_body(self): - self._extra_specs_empty_update(body={}) - - def _extra_specs_create_bad_body(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/extra_specs' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE_ID)) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, fake.VOLUME_ID, body) - - def test_create_no_body(self): - self._extra_specs_create_bad_body(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._extra_specs_create_bad_body(body=body) - - def test_create_malformed_entity(self): - body = {'extra_specs': 'string'} - self._extra_specs_create_bad_body(body=body) - - def test_create_invalid_key(self): - body = {"extra_specs": {"ke/y1": "value1"}} - self._extra_specs_create_bad_body(body=body) - - def test_create_invalid_too_many_key(self): - body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"} - self._extra_specs_create_bad_body(body=body) - - def test_create_volumes_exist(self): - self.mock_object(cinder.db, - 'volume_type_extra_specs_update_or_create', - return_create_volume_type_extra_specs) - body = {"extra_specs": {"key1": "value1"}} - req = fakes.HTTPRequest.blank(self.api_path) - with mock.patch.object( - cinder.db, - 'volume_get_all', - return_value=['a']): - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/extra_specs' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE_ID)) - req.method = 'POST' - - body = {"extra_specs": {"key1": "value1"}} - req = fakes.HTTPRequest.blank(self.api_path) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - fake.VOLUME_ID, body) - - # Again but with conf set to allow modification - CONF.set_default('allow_inuse_volume_type_modification', True) - res_dict = self.controller.create(req, fake.VOLUME_ID, body) - self.assertEqual({'extra_specs': {'key1': 'value1'}}, - res_dict) diff --git a/cinder/tests/unit/api/contrib/test_types_manage.py b/cinder/tests/unit/api/contrib/test_types_manage.py deleted file mode 100644 index 5f7b77080..000000000 --- a/cinder/tests/unit/api/contrib/test_types_manage.py +++ /dev/null @@ -1,699 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six -import webob - -import ddt -from oslo_utils import strutils - -from cinder.api.contrib import types_manage -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.volume import volume_types - -DEFAULT_VOLUME_TYPE = fake.VOLUME_TYPE_ID -IN_USE_VOLUME_TYPE = fake.VOLUME_TYPE2_ID -UPDATE_DESC_ONLY_TYPE = fake.VOLUME_TYPE3_ID -UPDATE_NAME_ONLY_TYPE = fake.VOLUME_TYPE4_ID -UPDATE_NAME_AFTER_DELETE_TYPE = fake.VOLUME_TYPE5_ID -NOT_FOUND_VOLUME_TYPE = fake.WILL_NOT_BE_FOUND_ID - - -def stub_volume_type(id): - specs = {"key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} - return dict(id=id, - name='vol_type_%s' % six.text_type(id), - description='vol_type_desc_%s' % six.text_type(id), - extra_specs=specs) - - -def stub_volume_type_updated(id, is_public=True): - return dict(id=id, - name='vol_type_%s_%s' % (six.text_type(id), six.text_type(id)), - is_public=is_public, - description='vol_type_desc_%s_%s' % ( - six.text_type(id), six.text_type(id))) - - -def stub_volume_type_updated_desc_only(id): - return dict(id=id, - name='vol_type_%s' % six.text_type(id), - description='vol_type_desc_%s_%s' % ( - six.text_type(id), six.text_type(id))) - - -def return_volume_types_get_volume_type(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.VolumeTypeNotFound(volume_type_id=id) - return stub_volume_type(id) - - -def return_volume_types_destroy(context, name): - if name == fake.WILL_NOT_BE_FOUND_ID: - raise exception.VolumeTypeNotFoundByName(volume_type_name=name) - pass - - -def return_volume_types_with_volumes_destroy(context, id): - if id == IN_USE_VOLUME_TYPE: - raise exception.VolumeTypeInUse(volume_type_id=id) - pass - - -def return_volume_types_create(context, - name, - specs, - is_public, - description): - pass - - -def return_volume_types_create_duplicate_type(context, - name, - specs, - is_public, - description): - raise exception.VolumeTypeExists(id=name) - - -def stub_volume_type_updated_name_only(id): - return dict(id=id, - name='vol_type_%s_%s' % (six.text_type(id), six.text_type(id)), - description='vol_type_desc_%s' % six.text_type(id)) - - -def stub_volume_type_updated_name_after_delete(id): - return dict(id=id, - name='vol_type_%s' % six.text_type(id), - description='vol_type_desc_%s' % six.text_type(id)) - - -def return_volume_types_get_volume_type_updated(id, is_public=True): - if id == NOT_FOUND_VOLUME_TYPE: - raise exception.VolumeTypeNotFound(volume_type_id=id) - if id == UPDATE_DESC_ONLY_TYPE: - return stub_volume_type_updated_desc_only(id) - if id == UPDATE_NAME_ONLY_TYPE: - return stub_volume_type_updated_name_only(id) - if id == UPDATE_NAME_AFTER_DELETE_TYPE: - return stub_volume_type_updated_name_after_delete(id) - - # anything else - return stub_volume_type_updated(id, is_public=is_public) - - -def return_volume_types_get_by_name(context, name): - if name == NOT_FOUND_VOLUME_TYPE: - raise exception.VolumeTypeNotFoundByName(volume_type_name=name) - return stub_volume_type(name.split("_")[2]) - - -def return_volume_types_get_default(): - return stub_volume_type(DEFAULT_VOLUME_TYPE) - - -def return_volume_types_get_default_not_found(): - return {} - - -@ddt.ddt -class VolumeTypesManageApiTest(test.TestCase): - def setUp(self): - super(VolumeTypesManageApiTest, self).setUp() - self.flags(host='fake') - self.controller = types_manage.VolumeTypesManageController() - """to reset notifier drivers left over from other api/contrib tests""" - - def test_volume_types_delete(self): - self.stubs.Set(volume_types, 'get_volume_type', - return_volume_types_get_volume_type) - self.stubs.Set(volume_types, 'destroy', - return_volume_types_destroy) - - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - self.assertEqual(0, len(self.notifier.notifications)) - self.controller._delete(req, DEFAULT_VOLUME_TYPE) - self.assertEqual(1, len(self.notifier.notifications)) - - def test_volume_types_delete_not_found(self): - self.stubs.Set(volume_types, 'get_volume_type', - return_volume_types_get_volume_type) - self.stubs.Set(volume_types, 'destroy', - return_volume_types_destroy) - - self.assertEqual(0, len(self.notifier.notifications)) - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, NOT_FOUND_VOLUME_TYPE)) - self.assertRaises(exception.VolumeTypeNotFound, - self.controller._delete, req, NOT_FOUND_VOLUME_TYPE) - self.assertEqual(1, len(self.notifier.notifications)) - - def test_volume_types_with_volumes_destroy(self): - self.stubs.Set(volume_types, 'get_volume_type', - return_volume_types_get_volume_type) - self.stubs.Set(volume_types, 'destroy', - return_volume_types_with_volumes_destroy) - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - self.assertEqual(0, len(self.notifier.notifications)) - self.controller._delete(req, DEFAULT_VOLUME_TYPE) - self.assertEqual(1, len(self.notifier.notifications)) - - @mock.patch('cinder.volume.volume_types.destroy') - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch('cinder.policy.enforce') - def test_volume_types_delete_with_non_admin(self, mock_policy_enforce, - mock_get, mock_destroy): - - # allow policy authorized user to delete type - mock_policy_enforce.return_value = None - mock_get.return_value = \ - {'extra_specs': {"key1": "value1"}, - 'id': DEFAULT_VOLUME_TYPE, - 'name': u'vol_type_1', - 'description': u'vol_type_desc_%s' % DEFAULT_VOLUME_TYPE} - mock_destroy.side_effect = return_volume_types_destroy - - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % - (fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), - use_admin_context=False) - self.assertEqual(0, len(self.notifier.notifications)) - self.controller._delete(req, DEFAULT_VOLUME_TYPE) - self.assertEqual(1, len(self.notifier.notifications)) - # non policy authorized user fails to delete type - mock_policy_enforce.side_effect = ( - exception.PolicyNotAuthorized(action='type_delete')) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller._delete, - req, DEFAULT_VOLUME_TYPE) - - def test_create(self): - self.stubs.Set(volume_types, 'create', - return_volume_types_create) - self.stubs.Set(volume_types, 'get_volume_type_by_name', - return_volume_types_get_by_name) - - body = {"volume_type": {"name": "vol_type_1", - "os-volume-type-access:is_public": True, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._create(req, body) - - self.assertEqual(1, len(self.notifier.notifications)) - id = res_dict['volume_type']['id'] - self._check_test_results(res_dict, { - 'expected_name': 'vol_type_1', - 'expected_desc': 'vol_type_desc_%s' % id}) - - @mock.patch('cinder.volume.volume_types.create') - @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') - def test_create_with_description_of_zero_length( - self, mock_get_volume_type_by_name, mock_create_type): - mock_get_volume_type_by_name.return_value = \ - {'extra_specs': {"key1": "value1"}, - 'id': DEFAULT_VOLUME_TYPE, - 'name': u'vol_type_1', - 'description': u''} - - type_description = "" - body = {"volume_type": {"name": "vol_type_1", - "description": type_description, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - - res_dict = self.controller._create(req, body) - - self._check_test_results(res_dict, { - 'expected_name': 'vol_type_1', 'expected_desc': ''}) - - def test_create_type_with_name_too_long(self): - type_name = 'a' * 256 - body = {"volume_type": {"name": type_name, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - self.assertRaises(exception.InvalidInput, - self.controller._create, req, body) - - def test_create_type_with_description_too_long(self): - type_description = 'a' * 256 - body = {"volume_type": {"name": "vol_type_1", - "description": type_description, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - self.assertRaises(exception.InvalidInput, - self.controller._create, req, body) - - def test_create_duplicate_type_fail(self): - self.stubs.Set(volume_types, 'create', - return_volume_types_create_duplicate_type) - self.stubs.Set(volume_types, 'get_volume_type_by_name', - return_volume_types_get_by_name) - - body = {"volume_type": {"name": "vol_type_1", - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - self.assertRaises(webob.exc.HTTPConflict, - self.controller._create, req, body) - - def test_create_type_with_invalid_is_public(self): - body = {"volume_type": {"name": "vol_type_1", - "os-volume-type-access:is_public": "fake", - "description": "test description", - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - self.assertRaises(exception.InvalidParameterValue, - self.controller._create, req, body) - - @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', - 'y', 'yes') - @mock.patch.object(volume_types, "get_volume_type_by_name") - @mock.patch.object(volume_types, "create") - @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") - @mock.patch("cinder.api.views.types.ViewBuilder.show") - def test_create_type_with_valid_is_public_in_string( - self, is_public, mock_show, mock_cache_resource, - mock_create, mock_get): - boolean_is_public = strutils.bool_from_string(is_public) - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - body = {"volume_type": {"name": "vol_type_1", - "os-volume-type-access:is_public": - is_public, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - req.environ['cinder.context'] = ctxt - self.controller._create(req, body) - mock_create.assert_called_once_with( - ctxt, 'vol_type_1', {'key1': 'value1'}, - boolean_is_public, description=None) - - def _create_volume_type_bad_body(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - req.method = 'POST' - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._create, req, body) - - def test_create_no_body(self): - self._create_volume_type_bad_body(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._create_volume_type_bad_body(body=body) - - def test_create_malformed_entity(self): - body = {'volume_type': 'string'} - self._create_volume_type_bad_body(body=body) - - @mock.patch('cinder.volume.volume_types.create') - @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') - @mock.patch('cinder.policy.enforce') - def test_create_with_none_admin(self, mock_policy_enforce, - mock_get_volume_type_by_name, - mock_create_type): - - # allow policy authorized user to create type - mock_policy_enforce.return_value = None - mock_get_volume_type_by_name.return_value = \ - {'extra_specs': {"key1": "value1"}, - 'id': DEFAULT_VOLUME_TYPE, - 'name': u'vol_type_1', - 'description': u'vol_type_desc_1'} - - body = {"volume_type": {"name": "vol_type_1", - "os-volume-type-access:is_public": True, - "extra_specs": {"key1": "value1"}}} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, - use_admin_context=False) - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._create(req, body) - - self.assertEqual(1, len(self.notifier.notifications)) - self._check_test_results(res_dict, { - 'expected_name': 'vol_type_1', 'expected_desc': 'vol_type_desc_1'}) - - # non policy authorized user fails to create type - mock_policy_enforce.side_effect = ( - exception.PolicyNotAuthorized(action='type_create')) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller._create, - req, body) - - @ddt.data({'a' * 256: 'a'}, - {'a': 'a' * 256}, - {'': 'a'}, - 'foo', - None) - def test_create_type_with_invalid_extra_specs(self, value): - body = {"volume_type": {"name": "vol_type_1", - "os-volume-type-access:is_public": False, - "description": "test description"}} - body['volume_type']['extra_specs'] = value - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - self.assertRaises(exception.InvalidInput, - self.controller._create, req, body) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_update(self, mock_get, mock_update): - mock_get.return_value = return_volume_types_get_volume_type_updated( - DEFAULT_VOLUME_TYPE, is_public=False) - body = {"volume_type": {"is_public": False}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - self._check_test_results( - res_dict, - {'expected_desc': 'vol_type_desc_%s_%s' % - (DEFAULT_VOLUME_TYPE, DEFAULT_VOLUME_TYPE), - 'expected_name': 'vol_type_%s_%s' % - (DEFAULT_VOLUME_TYPE, DEFAULT_VOLUME_TYPE), - 'is_public': False}) - - @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', - 'y', 'yes') - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") - @mock.patch("cinder.api.views.types.ViewBuilder.show") - def test_update_with_valid_is_public_in_string( - self, is_public, mock_show, mock_cache_resource, - mock_get, mock_update): - body = {"volume_type": {"is_public": is_public}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = ctxt - boolean_is_public = strutils.bool_from_string(is_public) - - self.controller._update(req, DEFAULT_VOLUME_TYPE, body) - mock_update.assert_called_once_with( - ctxt, DEFAULT_VOLUME_TYPE, None, None, - is_public=boolean_is_public) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_update_type_with_description_having_length_zero( - self, mock_get_volume_type, mock_type_update): - - mock_get_volume_type.return_value = \ - {'id': DEFAULT_VOLUME_TYPE, 'name': u'vol_type_1', - 'description': u''} - - type_description = "" - body = {"volume_type": {"description": type_description}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - resp = self.controller._update(req, DEFAULT_VOLUME_TYPE, body) - self._check_test_results(resp, - {'expected_desc': '', - 'expected_name': 'vol_type_1'}) - - def test_update_type_with_name_too_long(self): - type_name = 'a' * 256 - body = {"volume_type": {"name": type_name, - "description": ""}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - self.assertRaises(exception.InvalidInput, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - - def test_update_type_with_description_too_long(self): - type_description = 'a' * 256 - body = {"volume_type": {"description": type_description}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - self.assertRaises(exception.InvalidInput, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch('cinder.volume.volume_types.update') - def test_update_non_exist(self, mock_update, mock_get_volume_type): - mock_get_volume_type.side_effect = exception.VolumeTypeNotFound( - volume_type_id=NOT_FOUND_VOLUME_TYPE) - body = {"volume_type": {"name": "vol_type_1_1", - "description": "vol_type_desc_1_1"}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, NOT_FOUND_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(exception.VolumeTypeNotFound, - self.controller._update, req, - NOT_FOUND_VOLUME_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch('cinder.volume.volume_types.update') - def test_update_db_fail(self, mock_update, mock_get_volume_type): - mock_update.side_effect = exception.VolumeTypeUpdateFailed( - id=DEFAULT_VOLUME_TYPE) - mock_get_volume_type.return_value = stub_volume_type( - DEFAULT_VOLUME_TYPE) - - body = {"volume_type": {"name": "vol_type_1_1", - "description": "vol_type_desc_1_1"}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(webob.exc.HTTPInternalServerError, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - - def test_update_no_name_no_description(self): - body = {"volume_type": {}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - - def test_update_empty_name(self): - body = {"volume_type": {"name": " ", - "description": "something"}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch('cinder.db.volume_type_update') - @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' - 'update_quota_resource') - def test_update_only_name(self, mock_update_quota, - mock_update, mock_get): - mock_get.return_value = return_volume_types_get_volume_type_updated( - UPDATE_NAME_ONLY_TYPE) - - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - name = "vol_type_%s" % UPDATE_NAME_ONLY_TYPE - updated_name = "%s_%s" % (name, UPDATE_NAME_ONLY_TYPE) - desc = "vol_type_desc_%s" % UPDATE_NAME_ONLY_TYPE - body = {"volume_type": {"name": name}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % - (fake.PROJECT_ID, UPDATE_NAME_ONLY_TYPE)) - req.method = 'PUT' - req.environ['cinder.context'] = ctxt - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, UPDATE_NAME_ONLY_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - mock_update_quota.assert_called_once_with(ctxt, updated_name, name) - self._check_test_results(res_dict, - {'expected_name': updated_name, - 'expected_desc': desc}) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_update_only_description(self, mock_get, mock_update): - mock_get.return_value = return_volume_types_get_volume_type_updated( - UPDATE_DESC_ONLY_TYPE) - name = "vol_type_%s" % UPDATE_DESC_ONLY_TYPE - desc = "vol_type_desc_%s" % UPDATE_DESC_ONLY_TYPE - updated_desc = "%s_%s" % (desc, UPDATE_DESC_ONLY_TYPE) - body = {"volume_type": {"description": updated_desc}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, UPDATE_DESC_ONLY_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, UPDATE_DESC_ONLY_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - self._check_test_results(res_dict, - {'expected_name': name, - 'expected_desc': updated_desc}) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_update_only_is_public(self, mock_get, mock_update): - is_public = False - mock_get.return_value = return_volume_types_get_volume_type_updated( - DEFAULT_VOLUME_TYPE, is_public=is_public) - name = "vol_type_%s" % DEFAULT_VOLUME_TYPE - updated_name = '%s_%s' % (name, DEFAULT_VOLUME_TYPE) - desc = "vol_type_desc_%s" % DEFAULT_VOLUME_TYPE - updated_desc = "%s_%s" % (desc, DEFAULT_VOLUME_TYPE) - body = {"volume_type": {"is_public": is_public}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - self._check_test_results(res_dict, - {'expected_name': updated_name, - 'expected_desc': updated_desc, - 'is_public': False}) - - def test_update_invalid_is_public(self): - body = {"volume_type": {"name": "test", - "description": "something", - "is_public": "fake"}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) - req.method = 'PUT' - - self.assertRaises(exception.InvalidParameterValue, - self.controller._update, req, - DEFAULT_VOLUME_TYPE, body) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_rename_existing_name(self, mock_get, mock_update): - id = UPDATE_NAME_AFTER_DELETE_TYPE - name = "vol_type_%s" % id - updated_name = "%s_%s" % (name, id) - desc = "vol_type_desc_%s" % id - mock_update.side_effect = exception.VolumeTypeExists( - id=id, name=name) - mock_get.return_value = return_volume_types_get_volume_type_updated( - UPDATE_NAME_AFTER_DELETE_TYPE) - # first attempt fail - body = {"volume_type": {"name": name}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE)) - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - self.assertRaises(webob.exc.HTTPConflict, - self.controller._update, req, - UPDATE_NAME_AFTER_DELETE_TYPE, body) - - self.assertEqual(1, len(self.notifier.notifications)) - - # delete - self.notifier.reset() - self.stubs.Set(volume_types, 'destroy', - return_volume_types_destroy) - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE)) - self.assertEqual(0, len(self.notifier.notifications)) - self.controller._delete(req, UPDATE_NAME_AFTER_DELETE_TYPE) - self.assertEqual(1, len(self.notifier.notifications)) - - # update again - mock_update.side_effect = mock.MagicMock() - body = {"volume_type": {"name": updated_name}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE)) - req.method = 'PUT' - - self.notifier.reset() - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, UPDATE_NAME_AFTER_DELETE_TYPE, - body) - self._check_test_results(res_dict, - {'expected_name': name, - 'expected_desc': desc}) - self.assertEqual(1, len(self.notifier.notifications)) - - @mock.patch('cinder.volume.volume_types.update') - @mock.patch('cinder.volume.volume_types.get_volume_type') - @mock.patch('cinder.policy.enforce') - def test_update_with_non_admin(self, mock_policy_enforce, mock_get, - mock_update): - - # allow policy authorized user to update type - mock_policy_enforce.return_value = None - mock_get.return_value = return_volume_types_get_volume_type_updated( - DEFAULT_VOLUME_TYPE, is_public=False) - name = "vol_type_%s" % DEFAULT_VOLUME_TYPE - updated_name = "%s_%s" % (name, DEFAULT_VOLUME_TYPE) - desc = "vol_type_desc_%s" % DEFAULT_VOLUME_TYPE - updated_desc = "%s_%s" % (desc, DEFAULT_VOLUME_TYPE) - body = {"volume_type": {"name": updated_name, - "description": updated_desc, - "is_public": False}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % ( - fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), - use_admin_context=False) - - req.method = 'PUT' - - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body) - self.assertEqual(1, len(self.notifier.notifications)) - self._check_test_results(res_dict, - {'expected_desc': updated_desc, - 'expected_name': updated_name, - 'is_public': False}) - - # non policy authorized user fails to update type - mock_policy_enforce.side_effect = ( - exception.PolicyNotAuthorized(action='type_update')) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller._update, - req, DEFAULT_VOLUME_TYPE, body) - - def _check_test_results(self, results, expected_results): - self.assertEqual(1, len(results)) - self.assertEqual(expected_results['expected_desc'], - results['volume_type']['description']) - if expected_results.get('expected_name'): - self.assertEqual(expected_results['expected_name'], - results['volume_type']['name']) - if expected_results.get('is_public') is not None: - self.assertEqual(expected_results['is_public'], - results['volume_type']['is_public']) diff --git a/cinder/tests/unit/api/contrib/test_used_limits.py b/cinder/tests/unit/api/contrib/test_used_limits.py deleted file mode 100644 index cca43233d..000000000 --- a/cinder/tests/unit/api/contrib/test_used_limits.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder.api.contrib import used_limits -from cinder.api.openstack import api_version_request -from cinder.api.openstack import wsgi -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -class FakeRequest(object): - def __init__(self, context, filter=None, api_version='2.0'): - self.environ = {'cinder.context': context} - self.params = filter or {} - self.api_version_request = api_version_request.APIVersionRequest( - api_version) - - -@ddt.ddt -class UsedLimitsTestCase(test.TestCase): - def setUp(self): - """Run before each test.""" - super(UsedLimitsTestCase, self).setUp() - self.controller = used_limits.UsedLimitsController() - - @ddt.data(('2.0', False), ('3.38', True), ('3.38', False), ('3.39', True), - ('3.39', False)) - @mock.patch('cinder.quota.QUOTAS.get_project_quotas') - @mock.patch('cinder.policy.enforce') - def test_used_limits(self, ver_project, _mock_policy_enforce, - _mock_get_project_quotas): - version, has_project = ver_project - fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=True), - api_version=version) - if has_project: - fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=True), - filter={'project_id': fake.UUID1}, - api_version=version) - obj = { - "limits": { - "rate": [], - "absolute": {}, - }, - } - res = wsgi.ResponseObject(obj) - - def get_project_quotas(context, project_id, quota_class=None, - defaults=True, usages=True): - if project_id == fake.UUID1: - return {"gigabytes": {'limit': 5, 'in_use': 1}} - return {"gigabytes": {'limit': 10, 'in_use': 2}} - - _mock_get_project_quotas.side_effect = get_project_quotas - # allow user to access used limits - _mock_policy_enforce.return_value = None - - self.controller.index(fake_req, res) - abs_limits = res.obj['limits']['absolute'] - - # if admin, only 3.39 and req contains project_id filter, cinder - # returns the specified project's quota. - if version == '3.39' and has_project: - self.assertEqual(1, abs_limits['totalGigabytesUsed']) - else: - self.assertEqual(2, abs_limits['totalGigabytesUsed']) - - fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, - fake.PROJECT_ID), - api_version=version) - if has_project: - fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, - fake.PROJECT_ID), - filter={'project_id': fake.UUID1}, - api_version=version) - # if non-admin, cinder always returns self quota. - self.controller.index(fake_req, res) - abs_limits = res.obj['limits']['absolute'] - self.assertEqual(2, abs_limits['totalGigabytesUsed']) - - obj = { - "limits": { - "rate": [], - "absolute": {}, - }, - } - res = wsgi.ResponseObject(obj) - - # unallow user to access used limits - _mock_policy_enforce.side_effect = exception.NotAuthorized - - self.controller.index(fake_req, res) - abs_limits = res.obj['limits']['absolute'] - self.assertNotIn('totalVolumesUsed', abs_limits) - self.assertNotIn('totalGigabytesUsed', abs_limits) - self.assertNotIn('totalSnapshotsUsed', abs_limits) diff --git a/cinder/tests/unit/api/contrib/test_volume_actions.py b/cinder/tests/unit/api/contrib/test_volume_actions.py deleted file mode 100644 index 55eea53ab..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_actions.py +++ /dev/null @@ -1,1373 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import ddt -import mock -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api.contrib import volume_actions -from cinder.api.openstack import api_version_request as api_version -from cinder import context -from cinder import db -from cinder import exception -from cinder.image import glance -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils -from cinder import volume -from cinder.volume import api as volume_api -from cinder.volume import rpcapi as volume_rpcapi - - -CONF = cfg.CONF - - -@ddt.ddt -class VolumeActionsTest(test.TestCase): - - _actions = ('os-reserve', 'os-unreserve') - - _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') - - def setUp(self): - super(VolumeActionsTest, self).setUp() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=False) - self.UUID = uuid.uuid4() - self.controller = volume_actions.VolumeActionsController() - self.api_patchers = {} - for _meth in self._methods: - self.api_patchers[_meth] = mock.patch('cinder.volume.api.API.' + - _meth) - self.api_patchers[_meth].start() - self.addCleanup(self.api_patchers[_meth].stop) - self.api_patchers[_meth].return_value = True - - db_vol = {'id': fake.VOLUME_ID, 'host': 'fake', 'status': 'available', - 'size': 1, 'migration_status': None, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'project_id': fake.PROJECT_ID} - vol = fake_volume.fake_volume_obj(self.context, **db_vol) - self.get_patcher = mock.patch('cinder.volume.api.API.get') - self.mock_volume_get = self.get_patcher.start() - self.addCleanup(self.get_patcher.stop) - self.mock_volume_get.return_value = vol - self.update_patcher = mock.patch('cinder.volume.api.API.update') - self.mock_volume_update = self.update_patcher.start() - self.addCleanup(self.update_patcher.stop) - self.mock_volume_update.return_value = vol - self.db_get_patcher = mock.patch( - 'cinder.db.sqlalchemy.api._volume_get') - self.mock_volume_db_get = self.db_get_patcher.start() - self.addCleanup(self.db_get_patcher.stop) - self.mock_volume_db_get.return_value = vol - - self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') - - def test_simple_api_actions(self): - app = fakes.wsgi_app(fake_auth_context=self.context) - for _action in self._actions: - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, self.UUID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes({_action: None}) - req.content_type = 'application/json' - res = req.get_response(app) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_initialize_connection(self): - with mock.patch.object(volume_api.API, - 'initialize_connection') as init_conn: - init_conn.return_value = {} - body = {'os-initialize_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.OK, res.status_int) - - def test_initialize_connection_without_connector(self): - with mock.patch.object(volume_api.API, - 'initialize_connection') as init_conn: - init_conn.return_value = {} - body = {'os-initialize_connection': {}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.initialize_connection') - def test_initialize_connection_without_initiator(self, - _init_connection): - _init_connection.side_effect = messaging.RemoteError('InvalidInput') - body = {'os-initialize_connection': {'connector': 'w/o_initiator'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_initialize_connection_exception(self): - with mock.patch.object(volume_api.API, - 'initialize_connection') as init_conn: - init_conn.side_effect = \ - exception.VolumeBackendAPIException(data=None) - body = {'os-initialize_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, - res.status_int) - - def test_terminate_connection(self): - with mock.patch.object(volume_api.API, - 'terminate_connection') as terminate_conn: - terminate_conn.return_value = {} - body = {'os-terminate_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_terminate_connection_without_connector(self): - with mock.patch.object(volume_api.API, - 'terminate_connection') as terminate_conn: - terminate_conn.return_value = {} - body = {'os-terminate_connection': {}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_terminate_connection_with_exception(self): - with mock.patch.object(volume_api.API, - 'terminate_connection') as terminate_conn: - terminate_conn.side_effect = \ - exception.VolumeBackendAPIException(data=None) - body = {'os-terminate_connection': {'connector': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, - res.status_int) - - def test_attach_to_instance(self): - body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, - 'mountpoint': '/dev/vdc', - 'mode': 'rw'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, - 'host_name': 'fake_host', - 'mountpoint': '/dev/vdc'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.headers["content-type"] = "application/json" - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_attach_to_host(self): - # using 'read-write' mode attach volume by default - body = {'os-attach': {'host_name': 'fake_host', - 'mountpoint': '/dev/vdc'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_volume_attach_to_instance_raises_remote_error(self): - volume_remote_error = \ - messaging.RemoteError(exc_type='InvalidUUID') - with mock.patch.object(volume_api.API, 'attach', - side_effect=volume_remote_error): - id = fake.VOLUME_ID - vol = {"instance_uuid": self.UUID, - "mountpoint": "/dev/vdc", - "mode": "rw"} - body = {"os-attach": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._attach, - req, - id, - body) - - def test_volume_attach_to_instance_raises_db_error(self): - # In case of DB error 500 error code is returned to user - volume_remote_error = \ - messaging.RemoteError(exc_type='DBError') - with mock.patch.object(volume_api.API, 'attach', - side_effect=volume_remote_error): - id = fake.VOLUME_ID - vol = {"instance_uuid": self.UUID, - "mountpoint": "/dev/vdc", - "mode": "rw"} - body = {"os-attach": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(messaging.RemoteError, - self.controller._attach, - req, - id, - body) - - def test_detach(self): - body = {'os-detach': {'attachment_id': fake.ATTACHMENT_ID}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_volume_detach_raises_remote_error(self): - volume_remote_error = \ - messaging.RemoteError(exc_type='VolumeAttachmentNotFound') - with mock.patch.object(volume_api.API, 'detach', - side_effect=volume_remote_error): - id = fake.VOLUME_ID - vol = {"attachment_id": self.UUID} - body = {"os-detach": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._detach, - req, - id, - body) - - def test_volume_detach_raises_db_error(self): - # In case of DB error 500 error code is returned to user - volume_remote_error = \ - messaging.RemoteError(exc_type='DBError') - with mock.patch.object(volume_api.API, 'detach', - side_effect=volume_remote_error): - id = fake.VOLUME_ID - vol = {"attachment_id": self.UUID} - body = {"os-detach": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(messaging.RemoteError, - self.controller._detach, - req, - id, - body) - - def test_attach_with_invalid_arguments(self): - # Invalid request to attach volume an invalid target - body = {'os-attach': {'mountpoint': '/dev/vdc'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.headers["content-type"] = "application/json" - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - # Invalid request to attach volume with an invalid mode - body = {'os-attach': {'instance_uuid': 'fake', - 'mountpoint': '/dev/vdc', - 'mode': 'rr'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.headers["content-type"] = "application/json" - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - body = {'os-attach': {'host_name': 'fake_host', - 'mountpoint': '/dev/vdc', - 'mode': 'ww'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.headers["content-type"] = "application/json" - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_attach_to_instance_no_mountpoint(self): - # The mountpoint parameter is required. If not provided the - # API should fail with a 400 error. - body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, - 'mode': 'rw'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(400, res.status_int) - - def test_begin_detaching(self): - def fake_begin_detaching(*args, **kwargs): - return {} - self.mock_object(volume.api.API, 'begin_detaching', - fake_begin_detaching) - - body = {'os-begin_detaching': {'fake': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_roll_detaching(self): - def fake_roll_detaching(*args, **kwargs): - return {} - self.mock_object(volume.api.API, 'roll_detaching', - fake_roll_detaching) - - body = {'os-roll_detaching': {'fake': 'fake'}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_extend_volume(self): - def fake_extend_volume(*args, **kwargs): - return {} - self.mock_object(volume.api.API, 'extend', - fake_extend_volume) - - body = {'os-extend': {'new_size': 5}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - def test_extend_volume_invalid_status(self): - def fake_extend_volume(*args, **kwargs): - msg = "Volume status must be available" - raise exception.InvalidVolume(reason=msg) - self.mock_object(volume.api.API, 'extend', - fake_extend_volume) - - body = {'os-extend': {'new_size': 5}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @ddt.data((True, http_client.ACCEPTED), (False, http_client.ACCEPTED), - ('1', http_client.ACCEPTED), ('0', http_client.ACCEPTED), - ('true', http_client.ACCEPTED), ('false', http_client.ACCEPTED), - ('tt', http_client.BAD_REQUEST), (11, http_client.BAD_REQUEST), - (None, http_client.BAD_REQUEST)) - @ddt.unpack - def test_update_readonly_flag(self, readonly, return_code): - def fake_update_readonly_flag(*args, **kwargs): - return {} - self.mock_object(volume.api.API, 'update_readonly_flag', - fake_update_readonly_flag) - - body = {"os-update_readonly_flag": {"readonly": readonly}} - if readonly is None: - body = {"os-update_readonly_flag": {}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(return_code, res.status_int) - - @ddt.data((True, http_client.OK), (False, http_client.OK), - ('1', http_client.OK), ('0', http_client.OK), - ('true', http_client.OK), ('false', http_client.OK), - ('tt', http_client.BAD_REQUEST), (11, http_client.BAD_REQUEST), - (None, http_client.BAD_REQUEST)) - @ddt.unpack - def test_set_bootable(self, bootable, return_code): - body = {"os-set_bootable": {"bootable": bootable}} - if bootable is None: - body = {"os-set_bootable": {}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.context)) - self.assertEqual(return_code, res.status_int) - - -@ddt.ddt -class VolumeRetypeActionsTest(test.TestCase): - def setUp(self): - super(VolumeRetypeActionsTest, self).setUp() - - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=False) - self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') - - self.retype_mocks = {} - paths = ('cinder.quota.QUOTAS.add_volume_type_opts', - 'cinder.quota.QUOTAS.reserve') - for path in paths: - name = path.split('.')[-1] - patcher = mock.patch(path, return_value=None) - self.retype_mocks[name] = patcher.start() - self.addCleanup(patcher.stop) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) - def _retype_volume_exec(self, expected_status, - new_type=fake.VOLUME_TYPE2_ID, vol_id=None, - exists_mock=None): - vol_id = vol_id or fake.VOLUME_ID - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, vol_id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - retype_body = {'new_type': new_type, 'migration_policy': 'never'} - req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(expected_status, res.status_int) - - def test_retype_volume_no_body(self): - # Request with no body should fail - vol = utils.create_volume(self.context, - status='available', - testcase_instance=self) - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, vol.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - req.body = jsonutils.dump_as_bytes({'os-retype': None}) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_retype_volume_bad_policy(self): - # Request with invalid migration policy should fail - vol = utils.create_volume(self.context, - status='available', - testcase_instance=self) - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, vol.id)) - req.method = 'POST' - req.headers['content-type'] = 'application/json' - retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'} - req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_retype_volume_bad_status(self): - # Should fail if volume does not have proper status - vol_type_old = utils.create_volume_type(context.get_admin_context(), - self, name='old') - vol_type_new = utils.create_volume_type(context.get_admin_context(), - self, name='new') - vol = utils.create_volume(self.context, - status='error', - volume_type_id=vol_type_old.id, - testcase_instance=self) - - self._retype_volume_exec(http_client.BAD_REQUEST, vol_type_new.id, - vol.id) - - def test_retype_type_no_exist(self): - # Should fail if new type does not exist - vol_type_old = utils.create_volume_type(context.get_admin_context(), - self, name='old') - vol = utils.create_volume(self.context, - status='available', - volume_type_id=vol_type_old.id, - testcase_instance=self) - self._retype_volume_exec(http_client.NOT_FOUND, 'fake_vol_type', - vol.id) - - def test_retype_same_type(self): - # Should fail if new type and old type are the same - vol_type_old = utils.create_volume_type(context.get_admin_context(), - self, name='old') - vol = utils.create_volume(self.context, - status='available', - volume_type_id=vol_type_old.id, - testcase_instance=self) - self._retype_volume_exec(http_client.BAD_REQUEST, vol_type_old.id, - vol.id) - - def test_retype_over_quota(self): - # Should fail if going over quota for new type - vol_type_new = utils.create_volume_type(context.get_admin_context(), - self, name='old') - vol = utils.create_volume(self.context, - status='available', - testcase_instance=self) - - exc = exception.OverQuota(overs=['gigabytes'], - quotas={'gigabytes': 20}, - usages={'gigabytes': {'reserved': 5, - 'in_use': 15}}) - self.retype_mocks['reserve'].side_effect = exc - self._retype_volume_exec(http_client.REQUEST_ENTITY_TOO_LARGE, - vol_type_new.id, vol.id) - - @ddt.data(('in-use', 'front-end', http_client.BAD_REQUEST), - ('in-use', 'back-end', http_client.ACCEPTED), - ('available', 'front-end', http_client.ACCEPTED), - ('available', 'back-end', http_client.ACCEPTED), - ('in-use', 'front-end', http_client.ACCEPTED, True), - ('in-use', 'back-end', http_client.ACCEPTED, True), - ('available', 'front-end', http_client.ACCEPTED, True), - ('available', 'back-end', http_client.ACCEPTED, True), - ('in-use', 'front-end', http_client.BAD_REQUEST, False, False), - ('in-use', 'back-end', http_client.ACCEPTED, False, False), - ('in-use', '', http_client.ACCEPTED, True, False), - ('available', 'front-end', http_client.ACCEPTED, False, False), - ('available', 'back-end', http_client.ACCEPTED, False, False), - ('available', '', http_client.ACCEPTED, True, False), - ('in-use', 'front-end', http_client.BAD_REQUEST, False, - False, False), - ('in-use', '', http_client.ACCEPTED, True, False, False), - ('in-use', 'back-end', http_client.ACCEPTED, False, - False, False), - ('available', 'front-end', http_client.ACCEPTED, False, - False, False), - ('in-use', '', http_client.ACCEPTED, True, False, False), - ('in-use', 'back-end', http_client.ACCEPTED, False, - False, False)) - @ddt.unpack - def test_retype_volume_qos(self, vol_status, consumer_pass, - expected_status, same_qos=False, has_qos=True, - has_type=True): - """Test volume retype with QoS - - This test conatins following test-cases: - 1) should fail if changing qos enforced by front-end for in-use volume - 2) should NOT fail for in-use if changing qos enforced by back-end - 3) should NOT fail if changing qos enforced by FE for available - volumes - 4) should NOT fail if changing qos enforced by back-end for available - volumes - 5) should NOT fail if changing qos enforced by front-end for in-use - volumes if the qos is the same - 6) should NOT fail if changing qos enforced by back-end for in-use - volumes if the qos is the same - 7) should NOT fail if changing qos enforced by front-end for available - volumes if the qos is the same - 8) should NOT fail if changing qos enforced by back-end for available - volumes if the qos is the same - 9) should fail if changing qos enforced by front-end on the new type - and volume originally had no qos and was in-use - 10) should NOT fail if changing qos enforced by back-end on the - new type and volume originally had no qos and was in-use - 11) should NOT fail if original and destinal types had no qos for - in-use volumes - 12) should NOT fail if changing qos enforced by front-end on the - new type and volume originally had no qos and was available - 13) should NOT fail if changing qos enforced by back-end on the - new type and volume originally had no qos and was available - 14) should NOT fail if original and destinal types had no qos for - available volumes - 15) should fail if changing volume had no type, was in-use and - destination type qos was enforced by front-end - 16) should NOT fail if changing volume had no type, was in-use and - destination type had no qos - and volume originally had no type and was in-use - 17) should NOT fail if changing volume had no type, was in-use and - destination type qos was enforced by back-end - 18) should NOT fail if changing volume had no type, was in-use and - destination type qos was enforced by front-end - 19) should NOT fail if changing volume had no type, was available and - destination type had no qos - and volume originally had no type and was in-use - 20) should NOT fail if changing volume had no type, was available and - destination type qos was enforced by back-end - """ - - admin_ctxt = context.get_admin_context() - if has_qos: - qos_old = utils.create_qos(admin_ctxt, self, - name='old', - consumer=consumer_pass)['id'] - else: - qos_old = None - - if same_qos: - qos_new = qos_old - else: - qos_new = utils.create_qos(admin_ctxt, self, - name='new', - consumer=consumer_pass)['id'] - - if has_type: - vol_type_old = utils.create_volume_type(admin_ctxt, self, - name='old', - qos_specs_id=qos_old).id - else: - vol_type_old = None - - vol_type_new = utils.create_volume_type(admin_ctxt, self, - name='new', - qos_specs_id=qos_new).id - - vol = utils.create_volume(self.context, - status=vol_status, - volume_type_id=vol_type_old, - testcase_instance=self) - - self._retype_volume_exec(expected_status, vol_type_new, vol.id) - - @ddt.data(('available', http_client.ACCEPTED, False, False, False), - ('available', http_client.ACCEPTED, False, False), - ('available', http_client.ACCEPTED, True, False, False), - ('available', http_client.ACCEPTED, True, False), - ('available', http_client.ACCEPTED)) - @ddt.unpack - def test_retype_volume_encryption(self, vol_status, expected_status, - has_type=True, - enc_orig=True, enc_dest=True): - enc_orig = None - admin_ctxt = context.get_admin_context() - if has_type: - vol_type_old = utils.create_volume_type(admin_ctxt, self, - name='old').id - if enc_orig: - utils.create_encryption(admin_ctxt, vol_type_old, self) - else: - vol_type_old = None - - vol_type_new = utils.create_volume_type(admin_ctxt, self, - name='new').id - if enc_dest: - utils.create_encryption(admin_ctxt, vol_type_new, self) - - vol = utils.create_volume(self.context, - status=vol_status, - volume_type_id=vol_type_old, - testcase_instance=self) - - self._retype_volume_exec(expected_status, vol_type_new, vol.id) - - -def fake_volume_get(self, context, volume_id): - volume = v2_fakes.create_fake_volume(volume_id) - if volume_id == fake.VOLUME3_ID: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - return volume - - -def fake_volume_get_obj(self, context, volume_id, **kwargs): - volume = fake_volume.fake_volume_obj(context, - id=volume_id, - display_description='displaydesc', - **kwargs) - if volume_id == fake.VOLUME3_ID: - volume.status = 'in-use' - else: - volume.status = 'available' - - volume.volume_type = fake_volume.fake_volume_type_obj( - context, - name=v2_fakes.DEFAULT_VOL_TYPE) - return volume - - -def fake_upload_volume_to_image_service(self, context, volume, metadata, - force): - ret = {"id": volume['id'], - "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), - "status": 'uploading', - "display_description": volume['display_description'], - "size": volume['size'], - "volume_type": volume['volume_type'], - "image_id": fake.IMAGE_ID, - "container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name'} - return ret - - -@ddt.ddt -class VolumeImageActionsTest(test.TestCase): - def setUp(self): - super(VolumeImageActionsTest, self).setUp() - self.controller = volume_actions.VolumeActionsController() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=False) - self.maxDiff = 2000 - - def _get_os_volume_upload_image(self): - vol = { - "container_format": 'bare', - "disk_format": 'raw', - "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - - return body - - def fake_image_service_create(self, *args): - ret = { - 'status': u'queued', - 'name': u'image_name', - 'deleted': False, - 'container_format': u'bare', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'disk_format': u'raw', - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'id': fake.IMAGE_ID, - 'min_ram': 0, - 'checksum': None, - 'min_disk': 0, - 'deleted_at': None, - 'properties': {u'x_billing_code_license': u'246254365'}, - 'size': 0} - return ret - - def fake_image_service_create_3_1(self, *args): - ret = { - 'status': u'queued', - 'name': u'image_name', - 'deleted': False, - 'container_format': u'bare', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'disk_format': u'raw', - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'id': fake.IMAGE_ID, - 'min_ram': 0, - 'checksum': None, - 'min_disk': 0, - 'visibility': 'public', - 'protected': True, - 'deleted_at': None, - 'properties': {u'x_billing_code_license': u'246254365'}, - 'size': 0} - return ret - - def fake_rpc_copy_volume_to_image(self, *args): - pass - - @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) - @mock.patch.object(volume_api.API, "copy_volume_to_image", - fake_upload_volume_to_image_service) - def test_copy_volume_to_image(self): - id = fake.VOLUME_ID - img = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": img} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - res_dict = self.controller._volume_upload_image(req, id, body) - expected = {'os-volume_upload_image': - {'id': id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'status': 'uploading', - 'display_description': 'displaydesc', - 'size': 1, - 'volume_type': fake_volume.fake_volume_type_obj( - context, - name='vol_type_name'), - 'image_id': fake.IMAGE_ID, - 'container_format': 'bare', - 'disk_format': 'raw', - 'image_name': 'image_name'}} - self.assertDictEqual(expected, res_dict) - - def test_copy_volume_to_image_volumenotfound(self): - def fake_volume_get_raise_exc(self, context, volume_id): - raise exception.VolumeNotFound(volume_id=volume_id) - - self.mock_object(volume_api.API, 'get', fake_volume_get_raise_exc) - - id = fake.WILL_NOT_BE_FOUND_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(exception.VolumeNotFound, - self.controller._volume_upload_image, - req, - id, - body) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) - @mock.patch.object(volume_api.API, 'copy_volume_to_image', - side_effect=exception.InvalidVolume(reason='blah')) - def test_copy_volume_to_image_invalidvolume(self, mock_copy): - id = fake.VOLUME2_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get) - def test_copy_volume_to_image_invalid_disk_format(self): - id = fake.IMAGE_ID - vol = {"container_format": 'bare', - "disk_format": 'iso', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' - % (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) - @mock.patch.object(volume_api.API, 'copy_volume_to_image', - side_effect=ValueError) - def test_copy_volume_to_image_valueerror(self, mock_copy): - id = fake.VOLUME2_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) - @mock.patch.object(volume_api.API, 'copy_volume_to_image', - side_effect=messaging.RemoteError) - def test_copy_volume_to_image_remoteerror(self, mock_copy): - id = fake.VOLUME2_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": 'image_name', - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - def test_volume_upload_image_typeerror(self): - id = fake.VOLUME2_ID - body = {"os-volume_upload_image_fake": "fake"} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_volume_upload_image_without_type(self): - id = fake.VOLUME2_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": None, - "force": True} - body = {"": vol} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get) - def test_extend_volume_valueerror(self): - id = fake.VOLUME2_ID - body = {'os-extend': {'new_size': 'fake'}} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._extend, - req, - id, - body) - - @ddt.data({'version': '3.41', - 'status': 'available'}, - {'version': '3.41', - 'status': 'in-use'}, - {'version': '3.42', - 'status': 'available'}, - {'version': '3.42', - 'status': 'in-use'}) - @ddt.unpack - def test_extend_attached_volume(self, version, status): - vol = db.volume_create(self.context, - {'size': 1, 'project_id': fake.PROJECT_ID, - 'status': status}) - self.mock_object(volume_api.API, 'get', return_value=vol) - mock_extend = self.mock_object(volume_api.API, '_extend') - body = {"os-extend": {"new_size": 2}} - req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % - (fake.PROJECT_ID, vol['id'])) - req.api_version_request = api_version.APIVersionRequest(version) - self.controller._extend(req, vol['id'], body) - if version == '3.42' and status == 'in-use': - mock_extend.assert_called_with(req.environ['cinder.context'], - vol, 2, attached=True) - else: - mock_extend.assert_called_with(req.environ['cinder.context'], - vol, 2, attached=False) - - def test_copy_volume_to_image_notimagename(self): - id = fake.VOLUME2_ID - vol = {"container_format": 'bare', - "disk_format": 'raw', - "image_name": None, - "force": True} - body = {"os-volume_upload_image": vol} - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, id)) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, - req, - id, - body) - - def _create_volume_with_type(self, status='available', - display_description='displaydesc', **kwargs): - admin_ctxt = context.get_admin_context() - vol_type = db.volume_type_create(admin_ctxt, {'name': 'vol_name'}) - self.addCleanup(db.volume_type_destroy, admin_ctxt, vol_type.id) - - volume = utils.create_volume(self.context, volume_type_id=vol_type.id, - status=status, - display_description=display_description, - **kwargs) - self.addCleanup(db.volume_destroy, admin_ctxt, volume.id) - - expected = { - 'os-volume_upload_image': { - 'id': volume.id, - 'updated_at': mock.ANY, - 'status': 'uploading', - 'display_description': 'displaydesc', - 'size': 1, - 'volume_type': mock.ANY, - 'image_id': fake.IMAGE_ID, - 'container_format': 'bare', - 'disk_format': 'raw', - 'image_name': 'image_name' - } - } - return volume, expected - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_with_protected_prop( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from volume with protected properties.""" - volume, expected = self._create_volume_with_type() - mock_get_image_metadata.return_value = {"volume_id": volume.id, - "key": "x_billing_license", - "value": "246254365"} - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), - use_admin_context=self.context.is_admin) - body = self._get_os_volume_upload_image() - - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) - - @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) - def test_copy_volume_to_image_public_not_authorized(self): - """Test unauthorized create public image from volume.""" - id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' - req = fakes.HTTPRequest.blank('/v3/tenant1/volumes/%s/action' % id) - req.environ['cinder.context'].is_admin = False - req.headers = {'OpenStack-API-Version': 'volume 3.1'} - req.api_version_request = api_version.APIVersionRequest('3.1') - body = self._get_os_volume_upload_image() - body['os-volume_upload_image']['visibility'] = 'public' - self.assertRaises(exception.PolicyNotAuthorized, - self.controller._volume_upload_image, - req, id, body) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_without_glance_metadata( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from volume if volume is created without image. - - In this case volume glance metadata will not be available for this - volume. - """ - volume, expected = self._create_volume_with_type() - - mock_get_image_metadata.side_effect = \ - exception.GlanceMetadataNotFound(id=volume.id) - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), - use_admin_context=self.context.is_admin) - body = self._get_os_volume_upload_image() - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_fail_image_create( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from volume if create image fails. - - In this case API will rollback to previous status. - """ - volume = utils.create_volume(self.context) - - mock_get_image_metadata.return_value = {} - mock_create.side_effect = Exception() - - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id) - body = self._get_os_volume_upload_image() - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, req, volume.id, - body) - - self.assertFalse(mock_copy_to_image.called) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('available', vol_db.status) - self.assertIsNone(vol_db.previous_status) - db.volume_destroy(context.get_admin_context(), volume.id) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_in_use_no_force( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from in-use volume. - - In this case API will fail because we are not passing force. - """ - volume = utils.create_volume(self.context, status='in-use') - - mock_get_image_metadata.return_value = {} - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id) - body = self._get_os_volume_upload_image() - body['os-volume_upload_image']['force'] = False - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, req, volume.id, - body) - - self.assertFalse(mock_copy_to_image.called) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('in-use', vol_db.status) - self.assertIsNone(vol_db.previous_status) - db.volume_destroy(context.get_admin_context(), volume.id) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_in_use_with_force( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from in-use volume. - - In this case API will succeed only when CON.enable_force_upload is - enabled. - """ - volume, expected = self._create_volume_with_type(status='in-use') - mock_get_image_metadata.return_value = {} - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id, - use_admin_context=self.context.is_admin) - body = self._get_os_volume_upload_image() - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller._volume_upload_image, req, volume.id, - body) - - self.assertFalse(mock_copy_to_image.called) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('in-use', vol_db.status) - self.assertIsNone(vol_db.previous_status) - - CONF.set_default('enable_force_upload', True) - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('in-use', vol_db.previous_status) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_without_protected_prop( - self, mock_volume_to_image, mock_create, mock_get_image_metadata): - """Test protected property is not defined with the root image.""" - volume, expected = self._create_volume_with_type() - - mock_get_image_metadata.return_value = {} - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id, - use_admin_context=self.context.is_admin) - - body = self._get_os_volume_upload_image() - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) - - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_without_core_prop( - self, mock_copy_to_image, mock_create): - """Test glance_core_properties defined in cinder.conf is empty.""" - volume, expected = self._create_volume_with_type() - mock_create.side_effect = self.fake_image_service_create - - self.override_config('glance_core_properties', []) - - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id, - use_admin_context=self.context.is_admin) - - body = self._get_os_volume_upload_image() - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_volume_type_none( - self, - mock_copy_volume_to_image, - mock_create, - mock_get_volume_image_metadata): - """Test create image from volume with none type volume.""" - volume, expected = self._create_volume_with_type() - - mock_create.side_effect = self.fake_image_service_create - - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), - use_admin_context=self.context.is_admin) - body = self._get_os_volume_upload_image() - res_dict = self.controller._volume_upload_image(req, volume.id, body) - self.assertDictEqual(expected, res_dict) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_version_3_1( - self, - mock_copy_volume_to_image, - mock_create, - mock_get_volume_image_metadata): - """Test create image from volume with protected properties.""" - volume, expected = self._create_volume_with_type() - - mock_get_volume_image_metadata.return_value = { - "volume_id": volume.id, - "key": "x_billing_code_license", - "value": "246254365"} - mock_create.side_effect = self.fake_image_service_create_3_1 - mock_copy_volume_to_image.side_effect = \ - self.fake_rpc_copy_volume_to_image - - self.override_config('glance_api_version', 2) - - req = fakes.HTTPRequest.blank( - '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), - use_admin_context=self.context.is_admin) - req.environ['cinder.context'].is_admin = True - req.headers = {'OpenStack-API-Version': 'volume 3.1'} - req.api_version_request = api_version.APIVersionRequest('3.1') - body = self._get_os_volume_upload_image() - body['os-volume_upload_image']['visibility'] = 'public' - body['os-volume_upload_image']['protected'] = True - res_dict = self.controller._volume_upload_image(req, - volume.id, - body) - - expected['os-volume_upload_image'].update(visibility='public', - protected=True) - self.assertDictEqual(expected, res_dict) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_vhd( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from volume with vhd disk format""" - volume, expected = self._create_volume_with_type() - mock_get_image_metadata.return_value = {} - mock_create.side_effect = self.fake_image_service_create - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id) - body = self._get_os_volume_upload_image() - body['os-volume_upload_image']['force'] = True - body['os-volume_upload_image']['container_format'] = 'bare' - body['os-volume_upload_image']['disk_format'] = 'vhd' - - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) - - @mock.patch.object(volume_api.API, "get_volume_image_metadata") - @mock.patch.object(glance.GlanceImageService, "create") - @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") - def test_copy_volume_to_image_vhdx( - self, mock_copy_to_image, mock_create, mock_get_image_metadata): - """Test create image from volume with vhdx disk format""" - volume, expected = self._create_volume_with_type() - mock_get_image_metadata.return_value = {} - mock_create.side_effect = self.fake_image_service_create - req = fakes.HTTPRequest.blank( - '/v2/fakeproject/volumes/%s/action' % volume.id) - body = self._get_os_volume_upload_image() - body['os-volume_upload_image']['force'] = True - body['os-volume_upload_image']['container_format'] = 'bare' - body['os-volume_upload_image']['disk_format'] = 'vhdx' - - res_dict = self.controller._volume_upload_image(req, volume.id, body) - - self.assertDictEqual(expected, res_dict) - vol_db = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('uploading', vol_db.status) - self.assertEqual('available', vol_db.previous_status) diff --git a/cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py b/cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py deleted file mode 100644 index c18769bf1..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api.contrib import volume_encryption_metadata -from cinder import context -from cinder import db -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -def return_volume_type_encryption_metadata(context, volume_type_id): - return fake_volume_type_encryption() - - -def fake_volume_type_encryption(): - values = { - 'cipher': 'cipher', - 'key_size': 256, - 'provider': 'nova.volume.encryptors.base.VolumeEncryptor', - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'control_location': 'front-end', - } - return values - - -class VolumeEncryptionMetadataTest(test.TestCase): - @staticmethod - def _create_volume(context, - display_name='test_volume', - display_description='this is a test volume', - status='creating', - availability_zone='fake_az', - host='fake_host', - size=1, - encryption_key_id=fake.ENCRYPTION_KEY_ID): - """Create a volume object.""" - volume = { - 'size': size, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'status': status, - 'display_name': display_name, - 'display_description': display_description, - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'availability_zone': availability_zone, - 'host': host, - 'encryption_key_id': encryption_key_id, - } - return db.volume_create(context, volume)['id'] - - def setUp(self): - super(VolumeEncryptionMetadataTest, self).setUp() - self.controller = (volume_encryption_metadata. - VolumeEncryptionMetadataController()) - self.mock_object(db.sqlalchemy.api, 'volume_type_encryption_get', - return_volume_type_encryption_metadata) - - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - self.volume_id = self._create_volume(self.ctxt) - self.addCleanup(db.volume_destroy, self.ctxt.elevated(), - self.volume_id) - - def test_index(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption' % ( - fake.PROJECT_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - - expected = { - "encryption_key_id": fake.ENCRYPTION_KEY_ID, - "control_location": "front-end", - "cipher": "cipher", - "provider": "nova.volume.encryptors.base.VolumeEncryptor", - "key_size": 256, - } - self.assertEqual(expected, res_dict) - - def test_index_bad_tenant_id(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption' % ( - fake.WILL_NOT_BE_FOUND_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - - res_dict = jsonutils.loads(res.body) - expected = {'badRequest': {'code': http_client.BAD_REQUEST, - 'message': 'Malformed request url'}} - self.assertEqual(expected, res_dict) - - def test_index_bad_volume_id(self): - bad_volume_id = fake.WILL_NOT_BE_FOUND_ID - req = webob.Request.blank('/v2/%s/volumes/%s/encryption' % ( - fake.PROJECT_ID, bad_volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.NOT_FOUND, res.status_code) - - res_dict = jsonutils.loads(res.body) - expected = {'itemNotFound': {'code': http_client.NOT_FOUND, - 'message': 'Volume %s could not be found.' - % bad_volume_id}} - self.assertEqual(expected, res_dict) - - def test_show_key(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'encryption_key_id' % ( - fake.PROJECT_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.OK, res.status_code) - - self.assertEqual(fake.ENCRYPTION_KEY_ID, res.body.decode()) - - def test_show_control(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'control_location' % ( - fake.PROJECT_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.OK, res.status_code) - - self.assertEqual(b'front-end', res.body) - - def test_show_provider(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'provider' % ( - fake.PROJECT_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.OK, res.status_code) - - self.assertEqual(b'nova.volume.encryptors.base.VolumeEncryptor', - res.body) - - def test_show_bad_tenant_id(self): - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'encryption_key_id' % - (fake.WILL_NOT_BE_FOUND_ID, - self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - - res_dict = jsonutils.loads(res.body) - expected = {'badRequest': {'code': http_client.BAD_REQUEST, - 'message': 'Malformed request url'}} - self.assertEqual(expected, res_dict) - - def test_show_bad_volume_id(self): - bad_volume_id = fake.WILL_NOT_BE_FOUND_ID - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'encryption_key_id' % ( - fake.PROJECT_ID, bad_volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.NOT_FOUND, res.status_code) - - res_dict = jsonutils.loads(res.body) - expected = {'itemNotFound': {'code': http_client.NOT_FOUND, - 'message': 'Volume %s could not be found.' - % bad_volume_id}} - self.assertEqual(expected, res_dict) - - def test_retrieve_key_admin(self): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'encryption_key_id' % ( - fake.PROJECT_ID, self.volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) - self.assertEqual(http_client.OK, res.status_code) - - self.assertEqual(fake.ENCRYPTION_KEY_ID, res.body.decode()) - - def test_show_volume_not_encrypted_type(self): - self.mock_object(db.sqlalchemy.api, 'volume_type_encryption_get', - return_value=None) - - volume_id = self._create_volume(self.ctxt, encryption_key_id=None) - self.addCleanup(db.volume_destroy, self.ctxt.elevated(), volume_id) - - req = webob.Request.blank('/v2/%s/volumes/%s/encryption/' - 'encryption_key_id' % ( - fake.PROJECT_ID, volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - self.assertEqual(http_client.OK, res.status_code) - self.assertEqual(0, len(res.body)) - - def test_index_volume_not_encrypted_type(self): - self.mock_object(db.sqlalchemy.api, 'volume_type_encryption_get', - return_value=None) - - volume_id = self._create_volume(self.ctxt, encryption_key_id=None) - self.addCleanup(db.volume_destroy, self.ctxt.elevated(), volume_id) - - req = webob.Request.blank('/v2/%s/volumes/%s/encryption' % ( - fake.PROJECT_ID, volume_id)) - res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) - - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - - expected = { - 'encryption_key_id': None - } - self.assertEqual(expected, res_dict) diff --git a/cinder/tests/unit/api/contrib/test_volume_host_attribute.py b/cinder/tests/unit/api/contrib/test_volume_host_attribute.py deleted file mode 100644 index 6268790ee..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_host_attribute.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import webob - -from cinder import context -from cinder import db -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume - - -def fake_db_volume_get(*args, **kwargs): - return { - 'id': fake.VOLUME_ID, - 'host': 'host001', - 'status': 'available', - 'size': 5, - 'availability_zone': 'somewhere', - 'created_at': timeutils.utcnow(), - 'display_name': 'anothervolume', - 'display_description': 'Just another volume!', - 'volume_type_id': None, - 'snapshot_id': None, - 'project_id': fake.PROJECT_ID, - 'migration_status': None, - '_name_id': fake.VOLUME2_ID, - 'attach_status': fields.VolumeAttachStatus.DETACHED, - } - - -def fake_volume_api_get(*args, **kwargs): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - db_volume = fake_db_volume_get() - return fake_volume.fake_volume_obj(ctx, **db_volume) - - -def fake_volume_get_all(*args, **kwargs): - return objects.VolumeList(objects=[fake_volume_api_get()]) - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -class VolumeHostAttributeTest(test.TestCase): - - def setUp(self): - super(VolumeHostAttributeTest, self).setUp() - self.mock_object(volume.api.API, 'get', fake_volume_api_get) - self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) - self.mock_object(db, 'volume_get', fake_db_volume_get) - - self.UUID = uuid.uuid4() - - def test_get_volume_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertEqual('host001', vol['os-vol-host-attr:host']) - - def test_get_volume_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertNotIn('os-vol-host-attr:host', vol) - - def test_list_detail_volumes_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertEqual('host001', vol[0]['os-vol-host-attr:host']) - - def test_list_detail_volumes_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-host-attr:host', vol[0]) - - def test_list_simple_volumes_no_host(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-host-attr:host', vol[0]) diff --git a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py deleted file mode 100644 index 372e99426..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from six.moves import http_client -import webob - -from cinder.api.contrib import volume_image_metadata -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume - - -def fake_db_volume_get(*args, **kwargs): - return { - 'id': kwargs.get('volume_id') or fake.VOLUME_ID, - 'host': 'host001', - 'status': 'available', - 'size': 5, - 'availability_zone': 'somewhere', - 'created_at': timeutils.utcnow(), - 'display_name': 'anothervolume', - 'display_description': 'Just another volume!', - 'volume_type_id': None, - 'snapshot_id': None, - 'project_id': fake.PROJECT_ID, - 'migration_status': None, - '_name_id': fake.VOLUME2_ID, - 'attach_status': fields.VolumeAttachStatus.DETACHED, - } - - -def fake_volume_api_get(*args, **kwargs): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - db_volume = fake_db_volume_get(volume_id=kwargs.get('volume_id')) - return fake_volume.fake_volume_obj(ctx, **db_volume) - - -def fake_volume_get_all(*args, **kwargs): - return objects.VolumeList(objects=[fake_volume_api_get(), - fake_volume_api_get( - volume_id=fake.VOLUME2_ID)]) - - -def fake_volume_get_all_empty(*args, **kwargs): - return objects.VolumeList(objects=[]) - - -fake_image_metadata = { - 'image_id': fake.IMAGE_ID, - 'image_name': 'fake', - 'kernel_id': 'somekernel', - 'ramdisk_id': 'someramdisk', -} - - -def fake_get_volume_image_metadata(*args, **kwargs): - return fake_image_metadata - - -def fake_get_volumes_image_metadata(*args, **kwargs): - return {'fake': fake_image_metadata} - - -def return_empty_image_metadata(*args, **kwargs): - return {} - - -def volume_metadata_delete(context, volume_id, key, meta_type): - pass - - -def fake_create_volume_metadata(context, volume_id, metadata, - delete, meta_type): - return fake_get_volume_image_metadata() - - -def return_volume_nonexistent(*args, **kwargs): - raise exception.VolumeNotFound('bogus test message') - - -class VolumeImageMetadataTest(test.TestCase): - content_type = 'application/json' - - def setUp(self): - super(VolumeImageMetadataTest, self).setUp() - self.mock_object(volume.api.API, 'get', fake_volume_api_get) - self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) - self.mock_object(volume.api.API, 'get_volume_image_metadata', - fake_get_volume_image_metadata) - self.mock_object(volume.api.API, 'get_volumes_image_metadata', - fake_get_volumes_image_metadata) - self.UUID = uuid.uuid4() - self.controller = (volume_image_metadata. - VolumeImageMetadataController()) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - def _make_request(self, url): - req = webob.Request.blank(url) - req.accept = self.content_type - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - return res - - def _get_image_metadata(self, body): - return jsonutils.loads(body)['volume']['volume_image_metadata'] - - def _get_image_metadata_list(self, body): - return [ - volume['volume_image_metadata'] - for volume in jsonutils.loads(body)['volumes'] - if volume.get('volume_image_metadata') - ] - - def _create_volume_and_glance_metadata(self): - ctxt = context.get_admin_context() - # create a bootable volume - db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, - 'image_id', fake.IMAGE_ID) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, - 'image_name', 'fake') - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'kernel_id', - 'somekernel') - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'ramdisk_id', - 'someramdisk') - - # create a unbootable volume - db.volume_create(ctxt, {'id': fake.VOLUME2_ID, 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - - def test_get_volume(self): - self._create_volume_and_glance_metadata() - res = self._make_request('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(fake_image_metadata, - self._get_image_metadata(res.body)) - - def test_list_detail_volumes(self): - self._create_volume_and_glance_metadata() - res = self._make_request('/v2/%s/volumes/detail' % fake.PROJECT_ID) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(fake_image_metadata, - self._get_image_metadata_list(res.body)[0]) - - def test_list_detail_empty_volumes(self): - def fake_dont_call_this(*args, **kwargs): - fake_dont_call_this.called = True - fake_dont_call_this.called = False - self.mock_object(volume.api.API, 'get_list_volumes_image_metadata', - fake_dont_call_this) - self.mock_object(volume.api.API, 'get_all', - fake_volume_get_all_empty) - - res = self._make_request('/v2/%s/volumes/detail' % fake.PROJECT_ID) - self.assertEqual(http_client.OK, res.status_int) - self.assertFalse(fake_dont_call_this.called) - - def test_list_detail_volumes_with_limit(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, - 'key1', 'value1') - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, - 'key2', 'value2') - res = self._make_request('/v2/%s/volumes/detail?limit=1' - % fake.PROJECT_ID) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual({'key1': 'value1', 'key2': 'value2'}, - self._get_image_metadata_list(res.body)[0]) - - def test_create_image_metadata(self): - self.mock_object(volume.api.API, 'get_volume_image_metadata', - return_empty_image_metadata) - self.mock_object(db, 'volume_metadata_update', - fake_create_volume_metadata) - - body = {"os-set_image_metadata": {"metadata": fake_image_metadata}} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = "POST" - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(fake_image_metadata, - jsonutils.loads(res.body)["metadata"]) - - def test_create_with_keys_case_insensitive(self): - # If the keys in uppercase_and_lowercase, should return the one - # which server added - self.mock_object(volume.api.API, 'get_volume_image_metadata', - return_empty_image_metadata) - self.mock_object(db, 'volume_metadata_update', - fake_create_volume_metadata) - - body = { - "os-set_image_metadata": { - "metadata": { - "Image_Id": "someid", - "image_name": "fake", - "Kernel_id": "somekernel", - "ramdisk_id": "someramdisk" - }, - }, - } - - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(fake_image_metadata, - jsonutils.loads(res.body)["metadata"]) - - def test_create_empty_body(self): - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, fake.VOLUME_ID, None) - - def test_create_nonexistent_volume(self): - self.mock_object(volume.api.API, 'get', return_volume_nonexistent) - - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.content_type = "application/json" - body = {"os-set_image_metadata": { - "metadata": {"image_name": "fake"}} - } - req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(exception.VolumeNotFound, - self.controller.create, req, fake.VOLUME_ID, body) - - def test_invalid_metadata_items_on_create(self): - self.mock_object(db, 'volume_metadata_update', - fake_create_volume_metadata) - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - data = {"os-set_image_metadata": { - "metadata": {"a" * 260: "value1"}} - } - - # Test for long key - req.body = jsonutils.dump_as_bytes(data) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, fake.VOLUME_ID, data) - - # Test for long value - data = {"os-set_image_metadata": { - "metadata": {"key": "v" * 260}} - } - req.body = jsonutils.dump_as_bytes(data) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, fake.VOLUME_ID, data) - - # Test for empty key. - data = {"os-set_image_metadata": { - "metadata": {"": "value1"}} - } - req.body = jsonutils.dump_as_bytes(data) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, fake.VOLUME_ID, data) - - def test_delete(self): - self.mock_object(db, 'volume_metadata_delete', - volume_metadata_delete) - - body = {"os-unset_image_metadata": { - "key": "ramdisk_id"} - } - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.OK, res.status_int) - - def test_delete_meta_not_found(self): - data = {"os-unset_image_metadata": { - "key": "invalid_id"} - } - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes(data) - req.headers["content-type"] = "application/json" - - self.assertRaises(exception.GlanceMetadataNotFound, - self.controller.delete, req, fake.VOLUME_ID, data) - - def test_delete_nonexistent_volume(self): - self.mock_object(db, 'volume_metadata_delete', - return_volume_nonexistent) - - body = {"os-unset_image_metadata": { - "key": "fake"} - } - req = fakes.HTTPRequest.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(exception.GlanceMetadataNotFound, - self.controller.delete, req, fake.VOLUME_ID, body) - - def test_show_image_metadata(self): - body = {"os-show_image_metadata": None} - req = webob.Request.blank('/v2/%s/volumes/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_ID)) - req.method = 'POST' - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(fake_image_metadata, - jsonutils.loads(res.body)["metadata"]) diff --git a/cinder/tests/unit/api/contrib/test_volume_manage.py b/cinder/tests/unit/api/contrib/test_volume_manage.py deleted file mode 100644 index 5f6287f94..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_manage.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright 2014 IBM Corp. -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from six.moves import http_client -from six.moves.urllib.parse import urlencode -import webob - -from cinder.api.contrib import volume_manage -from cinder.api.openstack import api_version_request as api_version -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume - -CONF = cfg.CONF - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -def app_v3(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v3'] = api - return mapper - - -def service_get(context, service_id, backend_match_level=None, host=None, - **filters): - """Replacement for db.sqlalchemy.api.service_get. - - We mock the db.sqlalchemy.api.service_get method to return something for a - specific host, and raise an exception for anything else. - We don't use the returned data (the code under test just use the call to - check for existence of a host, so the content returned doesn't matter. - """ - if host == 'host_ok': - return {'disabled': False} - if host == 'host_disabled': - return {'disabled': True} - raise exception.ServiceNotFound(service_id=host) - -# Some of the tests check that volume types are correctly validated during a -# volume manage operation. This data structure represents an existing volume -# type. -fake_vt = {'id': fake.VOLUME_TYPE_ID, - 'name': 'good_fakevt'} - - -def vt_get_volume_type_by_name(context, name): - """Replacement for cinder.volume.volume_types.get_volume_type_by_name. - - Overrides cinder.volume.volume_types.get_volume_type_by_name to return - the volume type based on inspection of our fake structure, rather than - going to the Cinder DB. - """ - if name == fake_vt['name']: - return fake_vt - raise exception.VolumeTypeNotFoundByName(volume_type_name=name) - - -def vt_get_volume_type(context, vt_id): - """Replacement for cinder.volume.volume_types.get_volume_type. - - Overrides cinder.volume.volume_types.get_volume_type to return the - volume type based on inspection of our fake structure, rather than going - to the Cinder DB. - """ - if vt_id == fake_vt['id']: - return fake_vt - raise exception.VolumeTypeNotFound(volume_type_id=vt_id) - - -def api_manage(*args, **kwargs): - """Replacement for cinder.volume.api.API.manage_existing. - - Overrides cinder.volume.api.API.manage_existing to return some fake volume - data structure, rather than initiating a real volume managing. - - Note that we don't try to replicate any passed-in information (e.g. name, - volume type) in the returned structure. - """ - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - vol = { - 'status': 'creating', - 'display_name': 'fake_name', - 'availability_zone': 'nova', - 'tenant_id': fake.PROJECT_ID, - 'id': fake.VOLUME_ID, - 'volume_type': None, - 'snapshot_id': None, - 'user_id': fake.USER_ID, - 'size': 0, - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'volume_type_id': None} - return fake_volume.fake_volume_obj(ctx, **vol) - - -def api_manage_new(*args, **kwargs): - volume = api_manage() - volume.status = 'managing' - return volume - - -def api_get_manageable_volumes(*args, **kwargs): - """Replacement for cinder.volume.api.API.get_manageable_volumes.""" - vols = [ - {'reference': {'source-name': 'volume-%s' % fake.VOLUME_ID}, - 'size': 4, - 'extra_info': 'qos_setting:high', - 'safe_to_manage': False, - 'cinder_id': fake.VOLUME_ID, - 'reason_not_safe': 'volume in use'}, - {'reference': {'source-name': 'myvol'}, - 'size': 5, - 'extra_info': 'qos_setting:low', - 'safe_to_manage': True, - 'cinder_id': None, - 'reason_not_safe': None}] - return vols - - -@ddt.ddt -@mock.patch('cinder.db.sqlalchemy.api.service_get', service_get) -@mock.patch('cinder.volume.volume_types.get_volume_type_by_name', - vt_get_volume_type_by_name) -@mock.patch('cinder.volume.volume_types.get_volume_type', - vt_get_volume_type) -class VolumeManageTest(test.TestCase): - """Test cases for cinder/api/contrib/volume_manage.py - - The API extension adds a POST /os-volume-manage API that is passed a cinder - host name, and a driver-specific reference parameter. If everything - is passed correctly, then the cinder.volume.api.API.manage_existing method - is invoked to manage an existing storage object on the host. - - In this set of test cases, we are ensuring that the code correctly parses - the request structure and raises the correct exceptions when things are not - right, and calls down into cinder.volume.api.API.manage_existing with the - correct arguments. - """ - - def setUp(self): - super(VolumeManageTest, self).setUp() - self._admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=True) - self._non_admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - is_admin=False) - self.controller = volume_manage.VolumeManageController() - - def _get_resp_post(self, body): - """Helper to execute a POST os-volume-manage API call.""" - req = webob.Request.blank('/v2/%s/os-volume-manage' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = self._admin_ctxt - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - def _get_resp_post_v3(self, body, version): - """Helper to execute a POST os-volume-manage API call.""" - req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = self._admin_ctxt - req.headers["OpenStack-API-Version"] = "volume " + version - req.api_version_request = api_version.APIVersionRequest(version) - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app_v3()) - return res - - @ddt.data(False, True) - @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_ok(self, cluster, mock_validate, mock_api_manage): - """Test successful manage volume execution. - - Tests for correct operation when valid arguments are passed in the - request body. We ensure that cinder.volume.api.API.manage_existing got - called with the correct arguments, and that we return the correct HTTP - code to the caller. - """ - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref'}} - # This will be ignored - if cluster: - body['volume']['cluster'] = 'cluster' - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int) - - # Check that the manage API was called with the correct arguments. - self.assertEqual(1, mock_api_manage.call_count) - args = mock_api_manage.call_args[0] - self.assertEqual(body['volume']['host'], args[1]) - self.assertIsNone(args[2]) # Cluster argument - self.assertEqual(body['volume']['ref'], args[3]) - self.assertTrue(mock_validate.called) - - def _get_resp_create(self, body, version='3.0'): - url = '/v3/%s/os-volume-manage' % fake.PROJECT_ID - req = webob.Request.blank(url, base_url='http://localhost.com' + url) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = self._admin_ctxt - req.body = jsonutils.dump_as_bytes(body) - req.headers = {'OpenStack-API-Version': 'volume %s' % version} - req.api_version_request = api_version.APIVersionRequest(version) - res = self.controller.create(req, body) - return res - - @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_ok_cluster(self, mock_validate, mock_api_manage): - body = {'volume': {'cluster': 'cluster', - 'ref': 'fake_ref'}} - res = self._get_resp_create(body, '3.16') - self.assertEqual(['volume'], list(res.keys())) - - # Check that the manage API was called with the correct arguments. - self.assertEqual(1, mock_api_manage.call_count) - args = mock_api_manage.call_args[0] - self.assertIsNone(args[1]) - self.assertEqual(body['volume']['cluster'], args[2]) - self.assertEqual(body['volume']['ref'], args[3]) - self.assertTrue(mock_validate.called) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_fail_host_cluster(self, mock_validate): - body = {'volume': {'host': 'host_ok', - 'cluster': 'cluster', - 'ref': 'fake_ref'}} - self.assertRaises(exception.InvalidInput, - self._get_resp_create, body, '3.16') - - def test_manage_volume_missing_host(self): - """Test correct failure when host is not specified.""" - body = {'volume': {'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('cinder.objects.Service.get_by_args') - def test_manage_volume_service_not_found_on_host(self, mock_service): - """Test correct failure when host having no volume service on it.""" - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref'}} - mock_service.side_effect = exception.ServiceNotFound( - service_id='cinder-volume', - host='host_ok') - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_volume_missing_ref(self): - """Test correct failure when the ref is not specified.""" - body = {'volume': {'host': 'host_ok'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_volume_with_invalid_bootable(self): - """Test correct failure when invalid bool value is specified.""" - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - 'bootable': 'InvalidBool'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('cinder.objects.service.Service.is_up', return_value=True, - new_callable=mock.PropertyMock) - def test_manage_volume_disabled(self, mock_is_up): - """Test manage volume failure due to disabled service.""" - body = {'volume': {'host': 'host_disabled', 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - mock_is_up.assert_not_called() - - @mock.patch('cinder.objects.service.Service.is_up', return_value=False, - new_callable=mock.PropertyMock) - def test_manage_volume_is_down(self, mock_is_up): - """Test manage volume failure due to down service.""" - body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - self.assertTrue(mock_is_up.called) - - @mock.patch('cinder.volume.api.API.manage_existing', api_manage) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_volume_type_by_uuid(self, mock_validate): - """Tests for correct operation when a volume type is specified by ID. - - We wrap cinder.volume.api.API.manage_existing so that managing is not - actually attempted. - """ - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - 'volume_type': fake.VOLUME_TYPE_ID, - 'bootable': True}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertTrue(mock_validate.called) - - @mock.patch('cinder.volume.api.API.manage_existing', api_manage) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_volume_type_by_name(self, mock_validate): - """Tests for correct operation when a volume type is specified by name. - - We wrap cinder.volume.api.API.manage_existing so that managing is not - actually attempted. - """ - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - 'volume_type': 'good_fakevt'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertTrue(mock_validate.called) - - def test_manage_volume_bad_volume_type_by_uuid(self): - """Test failure on nonexistent volume type specified by ID.""" - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - 'volume_type': fake.WILL_NOT_BE_FOUND_ID}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_manage_volume_bad_volume_type_by_name(self): - """Test failure on nonexistent volume type specified by name.""" - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - 'volume_type': 'bad_fakevt'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def _get_resp_get(self, host, detailed, paging, admin=True): - """Helper to execute a GET os-volume-manage API call.""" - params = {'host': host} - if paging: - params.update({'marker': '1234', 'limit': 10, - 'offset': 4, 'sort': 'reference:asc'}) - query_string = "?%s" % urlencode(params) - detail = "" - if detailed: - detail = "/detail" - url = "/v2/%s/os-volume-manage%s%s" % (fake.PROJECT_ID, detail, - query_string) - req = webob.Request.blank(url) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = (self._admin_ctxt if admin - else self._non_admin_ctxt) - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - wraps=api_get_manageable_volumes) - def test_get_manageable_volumes_non_admin(self, mock_api_manageable): - res = self._get_resp_get('fakehost', False, False, admin=False) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - mock_api_manageable.assert_not_called() - res = self._get_resp_get('fakehost', True, False, admin=False) - self.assertEqual(http_client.FORBIDDEN, res.status_int) - mock_api_manageable.assert_not_called() - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - wraps=api_get_manageable_volumes) - def test_get_manageable_volumes_ok(self, mock_api_manageable): - res = self._get_resp_get('fakehost', False, True) - exp = {'manageable-volumes': - [{'reference': - {'source-name': - 'volume-%s' % fake.VOLUME_ID}, - 'size': 4, 'safe_to_manage': False}, - {'reference': {'source-name': 'myvol'}, - 'size': 5, 'safe_to_manage': True}]} - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(exp, jsonutils.loads(res.body)) - mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', - offset=4, sort_dirs=['asc'], sort_keys=['reference']) - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - side_effect=messaging.RemoteError( - exc_type='InvalidInput', value='marker not found: 1234')) - def test_get_manageable_volumes_non_existent_marker(self, - mock_api_manageable): - res = self._get_resp_get('fakehost', detailed=False, paging=True) - self.assertEqual(400, res.status_int) - self.assertTrue(mock_api_manageable.called) - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - wraps=api_get_manageable_volumes) - def test_get_manageable_volumes_detailed_ok(self, mock_api_manageable): - res = self._get_resp_get('fakehost', True, False) - exp = {'manageable-volumes': - [{'reference': {'source-name': 'volume-%s' % fake.VOLUME_ID}, - 'size': 4, 'reason_not_safe': 'volume in use', - 'cinder_id': fake.VOLUME_ID, 'safe_to_manage': False, - 'extra_info': 'qos_setting:high'}, - {'reference': {'source-name': 'myvol'}, 'cinder_id': None, - 'size': 5, 'reason_not_safe': None, 'safe_to_manage': True, - 'extra_info': 'qos_setting:low'}]} - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(exp, jsonutils.loads(res.body)) - mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, - marker=None, offset=0, sort_dirs=['desc'], - sort_keys=['reference']) - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - side_effect=messaging.RemoteError( - exc_type='InvalidInput', value='marker not found: 1234')) - def test_get_manageable_volumes_non_existent_marker_detailed( - self, mock_api_manageable): - res = self._get_resp_get('fakehost', detailed=True, paging=True) - self.assertEqual(400, res.status_int) - self.assertTrue(mock_api_manageable.called) - - @ddt.data({'a' * 256: 'a'}, - {'a': 'a' * 256}, - {'': 'a'}, - {'a': None}, - ) - def test_manage_volume_with_invalid_metadata(self, value): - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref', - "metadata": value}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - @mock.patch('cinder.objects.service.Service.is_up', return_value=True, - new_callable=mock.PropertyMock) - def test_get_manageable_volumes_disabled(self, mock_is_up): - res = self._get_resp_get('host_disabled', False, True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - mock_is_up.assert_not_called() - - @mock.patch('cinder.objects.service.Service.is_up', return_value=False, - new_callable=mock.PropertyMock) - def test_get_manageable_volumes_is_down(self, mock_is_up): - res = self._get_resp_get('host_ok', False, True) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - self.assertEqual(exception.ServiceUnavailable.message, - res.json['badRequest']['message']) - self.assertTrue(mock_is_up.called) - - @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage_new) - def test_manage_volume_with_creating_status_in_v3(self, mock_api_manage): - """Test managing volume to return 'creating' status in V3 API.""" - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref'}} - res = self._get_resp_post_v3(body, '3.15') - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(1, mock_api_manage.call_count) - self.assertEqual('creating', - jsonutils.loads(res.body)['volume']['status']) - - @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage_new) - def test_manage_volume_with_creating_status_in_v2(self, mock_api_manage): - """Test managing volume to return 'creating' status in V2 API.""" - - body = {'volume': {'host': 'host_ok', - 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(1, mock_api_manage.call_count) - self.assertEqual('creating', - jsonutils.loads(res.body)['volume']['status']) diff --git a/cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py b/cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py deleted file mode 100644 index b52f7c475..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import webob - -from cinder import context -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume - - -def fake_db_volume_get(*args, **kwargs): - return { - 'id': fake.VOLUME_ID, - 'host': 'host001', - 'status': 'available', - 'size': 5, - 'availability_zone': 'somewhere', - 'created_at': timeutils.utcnow(), - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'display_name': 'anothervolume', - 'display_description': 'Just another volume!', - 'volume_type_id': None, - 'snapshot_id': None, - 'project_id': fake.PROJECT_ID, - 'migration_status': 'migrating', - '_name_id': fake.VOLUME2_ID, - } - - -def fake_volume_api_get(*args, **kwargs): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - db_volume = fake_db_volume_get() - return fake_volume.fake_volume_obj(ctx, **db_volume) - - -def fake_volume_get_all(*args, **kwargs): - return objects.VolumeList(objects=[fake_volume_api_get()]) - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -class VolumeMigStatusAttributeTest(test.TestCase): - - def setUp(self): - super(VolumeMigStatusAttributeTest, self).setUp() - self.mock_object(volume.api.API, 'get', fake_volume_api_get) - self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) - self.UUID = uuid.uuid4() - - def test_get_volume_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertEqual('migrating', - vol['os-vol-mig-status-attr:migstat']) - self.assertEqual(fake.VOLUME2_ID, - vol['os-vol-mig-status-attr:name_id']) - - def test_get_volume_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertNotIn('os-vol-mig-status-attr:migstat', vol) - self.assertNotIn('os-vol-mig-status-attr:name_id', vol) - - def test_list_detail_volumes_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertEqual('migrating', - vol[0]['os-vol-mig-status-attr:migstat']) - self.assertEqual(fake.VOLUME2_ID, - vol[0]['os-vol-mig-status-attr:name_id']) - - def test_list_detail_volumes_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) - self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) - - def test_list_simple_volumes_no_migration_status(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) - self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) diff --git a/cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py b/cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py deleted file mode 100644 index 47ca4248f..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_serialization import jsonutils -import webob - -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume - - -PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9' - - -def fake_volume_get(*args, **kwargs): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - vol = { - 'id': fake.VOLUME_ID, - 'project_id': PROJECT_ID, - } - return fake_volume.fake_volume_obj(ctx, **vol) - - -def fake_volume_get_all(*args, **kwargs): - return objects.VolumeList(objects=[fake_volume_get()]) - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = fakes.router.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v2'] = api - return mapper - - -class VolumeTenantAttributeTest(test.TestCase): - - def setUp(self): - super(VolumeTenantAttributeTest, self).setUp() - self.mock_object(volume.api.API, 'get', fake_volume_get) - self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) - self.UUID = uuid.uuid4() - - def test_get_volume_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertEqual(PROJECT_ID, vol['os-vol-tenant-attr:tenant_id']) - - def test_get_volume_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, self.UUID)) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volume'] - self.assertNotIn('os-vol-tenant-attr:tenant_id', vol) - - def test_list_detail_volumes_allowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertEqual(PROJECT_ID, vol[0]['os-vol-tenant-attr:tenant_id']) - - def test_list_detail_volumes_unallowed(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) - req = webob.Request.blank('/v2/%s/volumes/detail' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) - - def test_list_simple_volumes_no_tenant_id(self): - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req = webob.Request.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'GET' - req.environ['cinder.context'] = ctx - res = req.get_response(app()) - vol = jsonutils.loads(res.body)['volumes'] - self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) diff --git a/cinder/tests/unit/api/contrib/test_volume_transfer.py b/cinder/tests/unit/api/contrib/test_volume_transfer.py deleted file mode 100644 index bc8229297..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_transfer.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for volume transfer code. -""" - -import mock - -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api.contrib import volume_transfer -from cinder import context -from cinder import db -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -import cinder.transfer - - -class VolumeTransferAPITestCase(test.TestCase): - """Test Case for transfers API.""" - - def setUp(self): - super(VolumeTransferAPITestCase, self).setUp() - self.volume_transfer_api = cinder.transfer.API() - self.controller = volume_transfer.VolumeTransferController() - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) - - def _create_transfer(self, volume_id=fake.VOLUME_ID, - display_name='test_transfer'): - """Create a transfer object.""" - return self.volume_transfer_api.create(context.get_admin_context(), - volume_id, - display_name) - - @staticmethod - def _create_volume(display_name='test_volume', - display_description='this is a test volume', - status='available', - size=1, - project_id=fake.PROJECT_ID, - attach_status=fields.VolumeAttachStatus.DETACHED): - """Create a volume object.""" - vol = {} - vol['host'] = 'fake_host' - vol['size'] = size - vol['user_id'] = fake.USER_ID - vol['project_id'] = project_id - vol['status'] = status - vol['display_name'] = display_name - vol['display_description'] = display_description - vol['attach_status'] = attach_status - vol['availability_zone'] = 'fake_zone' - return db.volume_create(context.get_admin_context(), vol)['id'] - - def test_show_transfer(self): - volume_id = self._create_volume(size=5) - transfer = self._create_transfer(volume_id) - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( - fake.PROJECT_ID, transfer['id'])) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual('test_transfer', res_dict['transfer']['name']) - self.assertEqual(transfer['id'], res_dict['transfer']['id']) - self.assertEqual(volume_id, res_dict['transfer']['volume_id']) - - db.transfer_destroy(context.get_admin_context(), transfer['id']) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_show_transfer_with_transfer_NotFound(self): - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Transfer %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_list_transfers_json(self): - volume_id_1 = self._create_volume(size=5) - volume_id_2 = self._create_volume(size=5) - transfer1 = self._create_transfer(volume_id_1) - transfer2 = self._create_transfer(volume_id_2) - - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(4, len(res_dict['transfers'][0])) - self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) - self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) - self.assertEqual(4, len(res_dict['transfers'][1])) - self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) - - db.transfer_destroy(context.get_admin_context(), transfer2['id']) - db.transfer_destroy(context.get_admin_context(), transfer1['id']) - db.volume_destroy(context.get_admin_context(), volume_id_1) - db.volume_destroy(context.get_admin_context(), volume_id_2) - - def test_list_transfers_detail_json(self): - volume_id_1 = self._create_volume(size=5) - volume_id_2 = self._create_volume(size=5) - transfer1 = self._create_transfer(volume_id_1) - transfer2 = self._create_transfer(volume_id_2) - - req = webob.Request.blank('/v2/%s/os-volume-transfer/detail' % - fake.PROJECT_ID) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_int) - self.assertEqual(5, len(res_dict['transfers'][0])) - self.assertEqual('test_transfer', - res_dict['transfers'][0]['name']) - self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) - self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) - - self.assertEqual(5, len(res_dict['transfers'][1])) - self.assertEqual('test_transfer', - res_dict['transfers'][1]['name']) - self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) - self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) - - db.transfer_destroy(context.get_admin_context(), transfer2['id']) - db.transfer_destroy(context.get_admin_context(), transfer1['id']) - db.volume_destroy(context.get_admin_context(), volume_id_2) - db.volume_destroy(context.get_admin_context(), volume_id_1) - - def test_list_transfers_with_all_tenants(self): - volume_id_1 = self._create_volume(size=5) - volume_id_2 = self._create_volume(size=5, project_id=fake.PROJECT_ID) - transfer1 = self._create_transfer(volume_id_1) - transfer2 = self._create_transfer(volume_id_2) - - req = fakes.HTTPRequest.blank('/v2/%s/os-volume-transfer?' - 'all_tenants=1' % fake.PROJECT_ID, - use_admin_context=True) - res_dict = self.controller.index(req) - - expected = [(transfer1['id'], 'test_transfer'), - (transfer2['id'], 'test_transfer')] - ret = [] - for item in res_dict['transfers']: - ret.append((item['id'], item['name'])) - self.assertEqual(set(expected), set(ret)) - - db.transfer_destroy(context.get_admin_context(), transfer2['id']) - db.transfer_destroy(context.get_admin_context(), transfer1['id']) - db.volume_destroy(context.get_admin_context(), volume_id_1) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_string_length') - def test_create_transfer_json(self, mock_validate): - volume_id = self._create_volume(status='available', size=5) - body = {"transfer": {"name": "transfer1", - "volume_id": volume_id}} - - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertIn('id', res_dict['transfer']) - self.assertIn('auth_key', res_dict['transfer']) - self.assertIn('created_at', res_dict['transfer']) - self.assertIn('name', res_dict['transfer']) - self.assertIn('volume_id', res_dict['transfer']) - self.assertTrue(mock_validate.called) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_create_transfer_with_no_body(self): - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'transfer' in " - "request body.", - res_dict['badRequest']['message']) - - def test_create_transfer_with_body_KeyError(self): - body = {"transfer": {"name": "transfer1"}} - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Incorrect request body format', - res_dict['badRequest']['message']) - - def test_create_transfer_with_VolumeNotFound(self): - body = {"transfer": {"name": "transfer1", - "volume_id": 1234}} - - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Volume 1234 could not be found.', - res_dict['itemNotFound']['message']) - - def test_create_transfer_with_InvalidVolume(self): - volume_id = self._create_volume(status='attached') - body = {"transfer": {"name": "transfer1", - "volume_id": volume_id}} - req = webob.Request.blank('/v2/%s/os-volume-transfer' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual('Invalid volume: status must be available', - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_delete_transfer_awaiting_transfer(self): - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( - fake.PROJECT_ID, transfer['id'])) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - - # verify transfer has been deleted - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( - fake.PROJECT_ID, transfer['id'])) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Transfer %s could not be found.' % transfer['id'], - res_dict['itemNotFound']['message']) - self.assertEqual(db.volume_get(context.get_admin_context(), - volume_id)['status'], 'available') - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_delete_transfer_with_transfer_NotFound(self): - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'DELETE' - req.headers['Content-Type'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Transfer %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - def test_accept_transfer_volume_id_specified_json(self): - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - - svc = self.start_service('volume', host='fake_host') - body = {"accept": {"id": transfer['id'], - "auth_key": transfer['auth_key']}} - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.ACCEPTED, res.status_int) - self.assertEqual(transfer['id'], res_dict['transfer']['id']) - self.assertEqual(volume_id, res_dict['transfer']['volume_id']) - # cleanup - svc.stop() - - def test_accept_transfer_with_no_body(self): - volume_id = self._create_volume(size=5) - transfer = self._create_transfer(volume_id) - - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - req.body = jsonutils.dump_as_bytes(None) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'accept' in request body.", - res_dict['badRequest']['message']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_accept_transfer_with_body_KeyError(self): - volume_id = self._create_volume(size=5) - transfer = self._create_transfer(volume_id) - - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - body = {"": {}} - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['Accept'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual("Missing required element 'accept' in request body.", - res_dict['badRequest']['message']) - - def test_accept_transfer_invalid_id_auth_key(self): - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - - body = {"accept": {"id": transfer['id'], - "auth_key": 1}} - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - self.assertEqual(http_client.BAD_REQUEST, - res_dict['badRequest']['code']) - self.assertEqual(res_dict['badRequest']['message'], - 'Invalid auth key: Attempt to transfer %s with ' - 'invalid auth key.' % transfer['id']) - - db.transfer_destroy(context.get_admin_context(), transfer['id']) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_accept_transfer_with_invalid_transfer(self): - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - - body = {"accept": {"id": transfer['id'], - "auth_key": 1}} - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_int) - self.assertEqual(http_client.NOT_FOUND, - res_dict['itemNotFound']['code']) - self.assertEqual('Transfer %s could not be found.' % - fake.WILL_NOT_BE_FOUND_ID, - res_dict['itemNotFound']['message']) - - db.transfer_destroy(context.get_admin_context(), transfer['id']) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_accept_transfer_with_VolumeSizeExceedsAvailableQuota(self): - - def fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota( - cls, context, transfer, volume_id): - raise exception.VolumeSizeExceedsAvailableQuota(requested='2', - consumed='2', - quota='3') - - self.mock_object( - cinder.transfer.API, - 'accept', - fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota) - - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - - body = {"accept": {"id": transfer['id'], - "auth_key": transfer['auth_key']}} - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(413, res.status_int) - self.assertEqual(413, res_dict['overLimit']['code']) - self.assertEqual('Requested volume or snapshot exceeds allowed ' - 'gigabytes quota. Requested 2G, quota is 3G and ' - '2G has been consumed.', - res_dict['overLimit']['message']) - - def test_accept_transfer_with_VolumeLimitExceeded(self): - - def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls, - context, - transfer, - volume_id): - raise exception.VolumeLimitExceeded(allowed=1) - - self.mock_object(cinder.transfer.API, 'accept', - fake_transfer_api_accept_throwing_VolumeLimitExceeded) - - volume_id = self._create_volume() - transfer = self._create_transfer(volume_id) - - body = {"accept": {"id": transfer['id'], - "auth_key": transfer['auth_key']}} - req = webob.Request.blank('/v2/%s/os-volume-transfer/%s/accept' % ( - fake.PROJECT_ID, transfer['id'])) - - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(fakes.wsgi_app( - fake_auth_context=self.user_ctxt)) - res_dict = jsonutils.loads(res.body) - - self.assertEqual(413, res.status_int) - self.assertEqual(413, res_dict['overLimit']['code']) - self.assertEqual("VolumeLimitExceeded: Maximum number of volumes " - "allowed (1) exceeded for quota 'volumes'.", - res_dict['overLimit']['message']) diff --git a/cinder/tests/unit/api/contrib/test_volume_type_access.py b/cinder/tests/unit/api/contrib/test_volume_type_access.py deleted file mode 100644 index 8f56f0701..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_type_access.py +++ /dev/null @@ -1,344 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from six.moves import http_client -import webob - -from cinder.api.contrib import volume_type_access as type_access -from cinder.api.v2 import types as types_api_v2 -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -def generate_type(type_id, is_public): - return { - 'id': type_id, - 'name': u'test', - 'deleted': False, - 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), - 'updated_at': None, - 'deleted_at': None, - 'is_public': bool(is_public) - } - -VOLUME_TYPES = { - fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True), - fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True), - fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False), - fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)} - -PROJ1_UUID = fake.PROJECT_ID -PROJ2_UUID = fake.PROJECT2_ID -PROJ3_UUID = fake.PROJECT3_ID - -ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID, - 'project_id': PROJ2_UUID}, - {'volume_type_id': fake.VOLUME_TYPE3_ID, - 'project_id': PROJ3_UUID}, - {'volume_type_id': fake.VOLUME_TYPE4_ID, - 'project_id': PROJ3_UUID}] - - -def fake_volume_type_get(context, id, inactive=False, expected_fields=None): - vol = VOLUME_TYPES[id] - if expected_fields and 'projects' in expected_fields: - vol['projects'] = [a['project_id'] - for a in ACCESS_LIST if a['volume_type_id'] == id] - return vol - - -def _has_type_access(type_id, project_id): - for access in ACCESS_LIST: - if access['volume_type_id'] == type_id and \ - access['project_id'] == project_id: - return True - return False - - -def fake_volume_type_get_all(context, inactive=False, filters=None, - marker=None, limit=None, sort_keys=None, - sort_dirs=None, offset=None, list_result=False): - if filters is None or filters['is_public'] is None: - if list_result: - return list(VOLUME_TYPES.values()) - return VOLUME_TYPES - res = {} - for k, v in VOLUME_TYPES.items(): - if filters['is_public'] and _has_type_access(k, context.project_id): - res.update({k: v}) - continue - if v['is_public'] == filters['is_public']: - res.update({k: v}) - if list_result: - return list(res.values()) - return res - - -class FakeResponse(object): - obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID}, - 'volume_types': [ - {'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE3_ID}]} - - def attach(self, **kwargs): - pass - - -class FakeRequest(object): - environ = {"cinder.context": context.get_admin_context()} - - def cached_resource_by_id(self, resource_id, name=None): - return VOLUME_TYPES[resource_id] - - -class VolumeTypeAccessTest(test.TestCase): - - def setUp(self): - super(VolumeTypeAccessTest, self).setUp() - self.type_controller_v2 = types_api_v2.VolumeTypesController() - self.type_access_controller = type_access.VolumeTypeAccessController() - self.type_action_controller = type_access.VolumeTypeActionController() - self.req = FakeRequest() - self.context = self.req.environ['cinder.context'] - self.mock_object(db, 'volume_type_get', - fake_volume_type_get) - self.mock_object(db, 'volume_type_get_all', - fake_volume_type_get_all) - - def assertVolumeTypeListEqual(self, expected, observed): - self.assertEqual(len(expected), len(observed)) - expected = sorted(expected, key=lambda item: item['id']) - observed = sorted(observed, key=lambda item: item['id']) - for d1, d2 in zip(expected, observed): - self.assertEqual(d1['id'], d2['id']) - - def test_list_type_access_public(self): - """Querying os-volume-type-access on public type should return 404.""" - req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' % - fake.PROJECT_ID, - use_admin_context=True) - self.assertRaises(exception.VolumeTypeAccessNotFound, - self.type_access_controller.index, - req, fake.VOLUME_TYPE2_ID) - - def test_list_type_access_private(self): - expected = {'volume_type_access': [ - {'volume_type_id': fake.VOLUME_TYPE3_ID, - 'project_id': PROJ2_UUID}, - {'volume_type_id': fake.VOLUME_TYPE3_ID, - 'project_id': PROJ3_UUID}]} - result = self.type_access_controller.index(self.req, - fake.VOLUME_TYPE3_ID) - self.assertEqual(expected, result) - - def test_list_with_no_context(self): - req = fakes.HTTPRequest.blank('/v2/flavors/%s/flavors' % - fake.PROJECT_ID) - - def fake_authorize(context, target=None, action=None): - raise exception.PolicyNotAuthorized(action='index') - self.mock_object(type_access, 'authorize', fake_authorize) - - self.assertRaises(exception.PolicyNotAuthorized, - self.type_access_controller.index, - req, fake.PROJECT_ID) - - def test_list_type_with_admin_default_proj1(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, - use_admin_context=True) - req.environ['cinder.context'].project_id = PROJ1_UUID - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_admin_default_proj2(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}, - {'id': fake.VOLUME_TYPE3_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types' % PROJ2_UUID, - use_admin_context=True) - req.environ['cinder.context'].project_id = PROJ2_UUID - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_admin_ispublic_true(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' % - fake.PROJECT_ID, - use_admin_context=True) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_admin_ispublic_false(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, - {'id': fake.VOLUME_TYPE4_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % - fake.PROJECT_ID, - use_admin_context=True) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_admin_ispublic_false_proj2(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, - {'id': fake.VOLUME_TYPE4_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % - fake.PROJECT_ID, - use_admin_context=True) - req.environ['cinder.context'].project_id = PROJ2_UUID - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_admin_ispublic_none(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}, - {'id': fake.VOLUME_TYPE3_ID}, - {'id': fake.VOLUME_TYPE4_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' % - fake.PROJECT_ID, - use_admin_context=True) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_no_admin_default(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, - use_admin_context=False) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_no_admin_ispublic_true(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' % - fake.PROJECT_ID, - use_admin_context=False) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_no_admin_ispublic_false(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % - fake.PROJECT_ID, - use_admin_context=False) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_list_type_with_no_admin_ispublic_none(self): - expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}]} - req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' % - fake.PROJECT_ID, - use_admin_context=False) - result = self.type_controller_v2.index(req) - self.assertVolumeTypeListEqual(expected['volume_types'], - result['volume_types']) - - def test_show(self): - resp = FakeResponse() - self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID) - self.assertEqual({'id': fake.VOLUME_TYPE_ID, - 'os-volume-type-access:is_public': True}, - resp.obj['volume_type']) - - def test_detail(self): - resp = FakeResponse() - self.type_action_controller.detail(self.req, resp) - self.assertEqual( - [{'id': fake.VOLUME_TYPE_ID, - 'os-volume-type-access:is_public': True}, - {'id': fake.VOLUME_TYPE3_ID, - 'os-volume-type-access:is_public': False}], - resp.obj['volume_types']) - - def test_create(self): - resp = FakeResponse() - self.type_action_controller.create(self.req, {}, resp) - self.assertEqual({'id': fake.VOLUME_TYPE_ID, - 'os-volume-type-access:is_public': True}, - resp.obj['volume_type']) - - def test_add_project_access(self): - def fake_add_volume_type_access(context, type_id, project_id): - self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id") - self.assertEqual(PROJ2_UUID, project_id, "project_id") - self.mock_object(db, 'volume_type_access_add', - fake_add_volume_type_access) - body = {'addProjectAccess': {'project': PROJ2_UUID}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), - use_admin_context=True) - result = self.type_action_controller._addProjectAccess( - req, fake.VOLUME_TYPE4_ID, body) - self.assertEqual(http_client.ACCEPTED, result.status_code) - - def test_add_project_access_with_no_admin_user(self): - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), - use_admin_context=False) - body = {'addProjectAccess': {'project': PROJ2_UUID}} - self.assertRaises(exception.PolicyNotAuthorized, - self.type_action_controller._addProjectAccess, - req, fake.VOLUME_TYPE3_ID, body) - - def test_add_project_access_with_already_added_access(self): - def fake_add_volume_type_access(context, type_id, project_id): - raise exception.VolumeTypeAccessExists(volume_type_id=type_id, - project_id=project_id) - self.mock_object(db, 'volume_type_access_add', - fake_add_volume_type_access) - body = {'addProjectAccess': {'project': PROJ2_UUID}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) - self.assertRaises(webob.exc.HTTPConflict, - self.type_action_controller._addProjectAccess, - req, fake.VOLUME_TYPE3_ID, body) - - def test_remove_project_access_with_bad_access(self): - def fake_remove_volume_type_access(context, type_id, project_id): - raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id, - project_id=project_id) - self.mock_object(db, 'volume_type_access_remove', - fake_remove_volume_type_access) - body = {'removeProjectAccess': {'project': PROJ2_UUID}} - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) - self.assertRaises(exception.VolumeTypeAccessNotFound, - self.type_action_controller._removeProjectAccess, - req, fake.VOLUME_TYPE4_ID, body) - - def test_remove_project_access_with_no_admin_user(self): - req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False) - body = {'removeProjectAccess': {'project': PROJ2_UUID}} - self.assertRaises(exception.PolicyNotAuthorized, - self.type_action_controller._removeProjectAccess, - req, fake.VOLUME_TYPE3_ID, body) diff --git a/cinder/tests/unit/api/contrib/test_volume_type_encryption.py b/cinder/tests/unit/api/contrib/test_volume_type_encryption.py deleted file mode 100644 index 819949cef..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_type_encryption.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import db -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder import utils - - -def return_volume_type_encryption(context, volume_type_id): - return fake_volume_type_encryption() - - -def fake_volume_type_encryption(): - values = { - 'cipher': 'fake_cipher', - 'control_location': 'front-end', - 'key_size': 256, - 'provider': 'fake_provider', - 'volume_type_id': fake.VOLUME_TYPE_ID - } - return values - - -class VolumeTypeEncryptionTest(test.TestCase): - - _default_volume_type = { - 'id': fake.VOLUME_TYPE_ID, - 'name': 'fake_type', - } - - def setUp(self): - super(VolumeTypeEncryptionTest, self).setUp() - self.flags(host='fake') - self.api_path = '/v2/%s/os-volume-types/%s/encryption' % ( - fake.PROJECT_ID, fake.VOLUME_TYPE_ID) - """to reset notifier drivers left over from other api/contrib tests""" - - def _get_response(self, volume_type, admin=True, - url='/v2/%s/types/%s/encryption', - req_method='GET', req_body=None, - req_headers=None): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=admin) - - req = webob.Request.blank(url % (fake.PROJECT_ID, volume_type['id'])) - req.method = req_method - req.body = req_body - if req_headers: - req.headers['Content-Type'] = req_headers - - return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) - - def _create_type_and_encryption(self, volume_type, body=None): - if body is None: - body = {"encryption": fake_volume_type_encryption()} - - db.volume_type_create(context.get_admin_context(), volume_type) - - return self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - - def test_index(self): - self.mock_object(db, 'volume_type_encryption_get', - return_volume_type_encryption) - - volume_type = self._default_volume_type - self._create_type_and_encryption(volume_type) - - res = self._get_response(volume_type) - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - - expected = fake_volume_type_encryption() - self.assertEqual(expected, res_dict) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_index_invalid_type(self): - volume_type = self._default_volume_type - res = self._get_response(volume_type) - self.assertEqual(http_client.NOT_FOUND, res.status_code) - res_dict = jsonutils.loads(res.body) - - expected = { - 'itemNotFound': { - 'code': http_client.NOT_FOUND, - 'message': ('Volume type %s could not be found.' - % volume_type['id']) - } - } - self.assertEqual(expected, res_dict) - - def test_show_key_size(self): - volume_type = self._default_volume_type - self._create_type_and_encryption(volume_type) - res = self._get_response(volume_type, - url='/v2/%s/types/%s/encryption/key_size') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_code) - self.assertEqual(256, res_dict['key_size']) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_show_provider(self): - volume_type = self._default_volume_type - self._create_type_and_encryption(volume_type) - - res = self._get_response(volume_type, - url='/v2/%s/types/%s/encryption/provider') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.OK, res.status_code) - self.assertEqual('fake_provider', res_dict['provider']) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_show_item_not_found(self): - volume_type = self._default_volume_type - self._create_type_and_encryption(volume_type) - - res = self._get_response(volume_type, - url='/v2/%s/types/%s/encryption/fake') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(http_client.NOT_FOUND, res.status_code) - expected = { - 'itemNotFound': { - 'code': http_client.NOT_FOUND, - 'message': ('Volume type encryption for type %s does not ' - 'exist.' % volume_type['id']) - } - } - self.assertEqual(expected, res_dict) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def _create(self, cipher, control_location, key_size, provider): - volume_type = self._default_volume_type - db.volume_type_create(context.get_admin_context(), volume_type) - - body = {"encryption": {'cipher': cipher, - 'control_location': control_location, - 'key_size': key_size, - 'provider': provider, - 'volume_type_id': volume_type['id']}} - - self.assertEqual(0, len(self.notifier.notifications)) - res = self._get_response(volume_type) - res_dict = jsonutils.loads(res.body) - self.assertEqual(http_client.OK, res.status_code) - # Confirm that volume type has no encryption information - # before create. - self.assertEqual(b'{}', res.body) - - # Create encryption specs for the volume type - # with the defined body. - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(1, len(self.notifier.notifications)) - - # check response - self.assertIn('encryption', res_dict) - self.assertEqual(cipher, res_dict['encryption']['cipher']) - self.assertEqual(control_location, - res_dict['encryption']['control_location']) - self.assertEqual(key_size, res_dict['encryption']['key_size']) - self.assertEqual(provider, res_dict['encryption']['provider']) - self.assertEqual(volume_type['id'], - res_dict['encryption']['volume_type_id']) - - # check database - encryption = db.volume_type_encryption_get(context.get_admin_context(), - volume_type['id']) - self.assertIsNotNone(encryption) - self.assertEqual(cipher, encryption['cipher']) - self.assertEqual(key_size, encryption['key_size']) - self.assertEqual(provider, encryption['provider']) - self.assertEqual(volume_type['id'], encryption['volume_type_id']) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_create_json(self): - with mock.patch.object(utils, - 'validate_integer') as mock_validate_integer: - mock_validate_integer.return_value = 128 - self._create('fake_cipher', 'front-end', 128, 'fake_encryptor') - self.assertTrue(mock_validate_integer.called) - - def test_create_invalid_volume_type(self): - volume_type = self._default_volume_type - body = {"encryption": fake_volume_type_encryption()} - - # Attempt to create encryption without first creating type - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res_dict = jsonutils.loads(res.body) - - self.assertEqual(0, len(self.notifier.notifications)) - self.assertEqual(http_client.NOT_FOUND, res.status_code) - - expected = { - 'itemNotFound': { - 'code': http_client.NOT_FOUND, - 'message': ('Volume type %s could not be found.' - % volume_type['id']) - } - } - self.assertEqual(expected, res_dict) - - def test_create_encryption_type_exists(self): - volume_type = self._default_volume_type - body = {"encryption": fake_volume_type_encryption()} - self._create_type_and_encryption(volume_type, body) - - # Try to create encryption specs for a volume type - # that already has them. - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res_dict = jsonutils.loads(res.body) - - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': ('Volume type encryption for type ' - '%s already exists.' % fake.VOLUME_TYPE_ID) - } - } - self.assertEqual(expected, res_dict) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_create_volume_exists(self): - # Create the volume type and a volume with the volume type. - volume_type = self._default_volume_type - db.volume_type_create(context.get_admin_context(), volume_type) - db.volume_create(context.get_admin_context(), - {'id': fake.VOLUME_ID, - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy', - 'volume_type_id': volume_type['id']}) - - body = {"encryption": {'cipher': 'cipher', - 'key_size': 128, - 'control_location': 'front-end', - 'provider': 'fake_provider', - 'volume_type_id': volume_type['id']}} - - # Try to create encryption specs for a volume type - # with a volume. - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res_dict = jsonutils.loads(res.body) - - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': ('Cannot create encryption specs. ' - 'Volume type in use.') - } - } - self.assertEqual(expected, res_dict) - db.volume_destroy(context.get_admin_context(), fake.VOLUME_ID) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def _encryption_create_bad_body(self, body, - msg='Create body is not valid.'): - - volume_type = self._default_volume_type - db.volume_type_create(context.get_admin_context(), volume_type) - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - - res_dict = jsonutils.loads(res.body) - - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': (msg) - } - } - self.assertEqual(expected, res_dict) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_create_no_body(self): - msg = "Missing required element 'encryption' in request body." - self._encryption_create_bad_body(body=None, msg=msg) - - def test_create_malformed_entity(self): - body = {'encryption': 'string'} - msg = "Missing required element 'encryption' in request body." - self._encryption_create_bad_body(body=body, msg=msg) - - def test_create_negative_key_size(self): - body = {"encryption": {'cipher': 'cipher', - 'key_size': -128, - 'provider': 'fake_provider', - 'volume_type_id': fake.VOLUME_TYPE_ID}} - msg = 'key_size must be >= 0' - self._encryption_create_bad_body(body=body, msg=msg) - - def test_create_none_key_size(self): - self._create('fake_cipher', 'front-end', None, 'fake_encryptor') - - def test_create_invalid_control_location(self): - body = {"encryption": {'cipher': 'cipher', - 'control_location': 'fake_control', - 'provider': 'fake_provider', - 'volume_type_id': fake.VOLUME_TYPE_ID}} - msg = ("Invalid input received: Valid control location are: " - "['front-end', 'back-end']") - self._encryption_create_bad_body(body=body, msg=msg) - - def test_create_no_provider(self): - body = {"encryption": {'cipher': 'cipher', - 'volume_type_id': fake.VOLUME_TYPE_ID}} - msg = ("Invalid input received: provider must be defined") - self._encryption_create_bad_body(body=body, msg=msg) - - def test_delete(self): - volume_type = self._default_volume_type - db.volume_type_create(context.get_admin_context(), volume_type) - - # Test that before create, there's nothing with a get - res = self._get_response(volume_type) - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual({}, res_dict) - - body = {"encryption": {'cipher': 'cipher', - 'key_size': 128, - 'control_location': 'front-end', - 'provider': 'fake_provider', - 'volume_type_id': volume_type['id']}} - - # Create, and test that get returns something - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res_dict = jsonutils.loads(res.body) - - res = self._get_response(volume_type, req_method='GET', - req_headers='application/json', - url='/v2/%s/types/%s/encryption') - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual(volume_type['id'], res_dict['volume_type_id']) - - # Delete, and test that get returns nothing - res = self._get_response(volume_type, req_method='DELETE', - req_headers='application/json', - url='/v2/%s/types/%s/encryption/provider') - self.assertEqual(http_client.ACCEPTED, res.status_code) - self.assertEqual(0, len(res.body)) - res = self._get_response(volume_type, req_method='GET', - req_headers='application/json', - url='/v2/%s/types/%s/encryption') - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual({}, res_dict) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_delete_with_volume_in_use(self): - # Create the volume type - volume_type = self._default_volume_type - db.volume_type_create(context.get_admin_context(), volume_type) - - body = {"encryption": {'cipher': 'cipher', - 'key_size': 128, - 'control_location': 'front-end', - 'provider': 'fake_provider', - 'volume_type_id': volume_type['id']}} - - # Create encryption with volume type, and test with GET - res = self._get_response(volume_type, req_method='POST', - req_body=jsonutils.dump_as_bytes(body), - req_headers='application/json') - res = self._get_response(volume_type, req_method='GET', - req_headers='application/json', - url='/v2/%s/types/%s/encryption') - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual(volume_type['id'], res_dict['volume_type_id']) - - # Create volumes with the volume type - db.volume_create(context.get_admin_context(), - {'id': fake.VOLUME_ID, - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy', - 'volume_type_id': volume_type['id']}) - - db.volume_create(context.get_admin_context(), - {'id': fake.VOLUME2_ID, - 'display_description': 'Test Desc2', - 'size': 2, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy', - 'volume_type_id': volume_type['id']}) - - # Delete, and test that there is an error since volumes exist - res = self._get_response(volume_type, req_method='DELETE', - req_headers='application/json', - url='/v2/%s/types/%s/encryption/provider') - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - res_dict = jsonutils.loads(res.body) - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': 'Cannot delete encryption specs. ' - 'Volume type in use.' - } - } - self.assertEqual(expected, res_dict) - - # Delete the volumes - db.volume_destroy(context.get_admin_context(), fake.VOLUME_ID) - db.volume_destroy(context.get_admin_context(), fake.VOLUME2_ID) - - # Delete, and test that get returns nothing - res = self._get_response(volume_type, req_method='DELETE', - req_headers='application/json', - url='/v2/%s/types/%s/encryption/provider') - self.assertEqual(http_client.ACCEPTED, res.status_code) - self.assertEqual(0, len(res.body)) - res = self._get_response(volume_type, req_method='GET', - req_headers='application/json', - url='/v2/%s/types/%s/encryption') - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual({}, res_dict) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_delete_with_no_encryption(self): - volume_type = self._default_volume_type - # create a volume type - db.volume_type_create(context.get_admin_context(), volume_type) - - # without creating encryption type, try to delete - # and check if 404 is raised. - res = self._get_response(volume_type, req_method='DELETE', - req_headers='application/json', - url='/v2/%s/types/%s/encryption/provider') - self.assertEqual(http_client.NOT_FOUND, res.status_code) - expected = { - "itemNotFound": { - "message": "Volume type encryption for type " - "%s does not exist." % fake.VOLUME_TYPE_ID, - "code": http_client.NOT_FOUND - } - } - self.assertEqual(expected, jsonutils.loads(res.body)) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - @mock.patch('cinder.utils.validate_integer') - def test_update_item(self, mock_validate_integer): - mock_validate_integer.return_value = 512 - volume_type = self._default_volume_type - - # Create Encryption Specs - create_body = {"encryption": {'cipher': 'cipher', - 'control_location': 'front-end', - 'key_size': 128, - 'provider': 'fake_provider', - 'volume_type_id': volume_type['id']}} - self._create_type_and_encryption(volume_type, create_body) - - # Update Encryption Specs - update_body = {"encryption": {'key_size': 512, - 'provider': 'fake_provider2'}} - - res = self.\ - _get_response(volume_type, req_method='PUT', - req_body=jsonutils.dump_as_bytes(update_body), - req_headers='application/json', - url='/v2/%s/types/%s/encryption/' + - fake.ENCRYPTION_KEY_ID) - - res_dict = jsonutils.loads(res.body) - self.assertEqual(512, res_dict['encryption']['key_size']) - self.assertEqual('fake_provider2', res_dict['encryption']['provider']) - - # Get Encryption Specs - res = self._get_response(volume_type) - res_dict = jsonutils.loads(res.body) - - # Confirm Encryption Specs - self.assertEqual(512, res_dict['key_size']) - self.assertEqual('fake_provider2', res_dict['provider']) - self.assertTrue(mock_validate_integer.called) - - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def _encryption_update_bad_body(self, update_body, msg): - - # Create Volume Type and Encryption - volume_type = self._default_volume_type - res = self._create_type_and_encryption(volume_type) - # Update Encryption - res = self.\ - _get_response(volume_type, req_method='PUT', - req_body=jsonutils.dump_as_bytes(update_body), - req_headers='application/json', - url='/v2/%s/types/%s/encryption/' + - fake.ENCRYPTION_KEY_ID) - res_dict = jsonutils.loads(res.body) - - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': (msg) - } - } - - # Confirm Failure - self.assertEqual(expected, res_dict) - db.volume_type_destroy(context.get_admin_context(), volume_type['id']) - - def test_update_too_many_items(self): - update_body = {"encryption": {'key_size': 512}, - "encryption2": {'key_size': 256}} - msg = 'Request body contains too many items.' - self._encryption_update_bad_body(update_body, msg) - - def test_update_key_size_non_integer(self): - update_body = {"encryption": {'key_size': 'abc'}} - msg = 'key_size must be an integer.' - self._encryption_update_bad_body(update_body, msg) - - def test_update_item_invalid_body(self): - update_body = {"key_size": "value1"} - msg = "Missing required element 'encryption' in request body." - self._encryption_update_bad_body(update_body, msg) - - def _encryption_empty_update(self, update_body): - msg = "Missing required element 'encryption' in request body." - self._encryption_update_bad_body(update_body, msg) - - def test_update_no_body(self): - self._encryption_empty_update(update_body=None) - - def test_update_empty_body(self): - self._encryption_empty_update(update_body={}) - - def test_update_with_volume_in_use(self): - # Create the volume type and encryption - volume_type = self._default_volume_type - self._create_type_and_encryption(volume_type) - - # Create a volume with the volume type - db.volume_create(context.get_admin_context(), - {'id': fake.VOLUME_ID, - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'instance_uuid': None, - 'host': 'dummy', - 'volume_type_id': volume_type['id']}) - - # Get the Encryption - res = self._get_response(volume_type) - self.assertEqual(http_client.OK, res.status_code) - res_dict = jsonutils.loads(res.body) - self.assertEqual(volume_type['id'], res_dict['volume_type_id']) - - # Update, and test that there is an error since volumes exist - update_body = {"encryption": {'key_size': 512}} - - res = self.\ - _get_response(volume_type, req_method='PUT', - req_body=jsonutils.dump_as_bytes(update_body), - req_headers='application/json', - url='/v2/%s/types/%s/encryption/' + - fake.ENCRYPTION_KEY_ID) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - res_dict = jsonutils.loads(res.body) - expected = { - 'badRequest': { - 'code': http_client.BAD_REQUEST, - 'message': 'Cannot update encryption specs. ' - 'Volume type in use.' - } - } - self.assertEqual(expected, res_dict) diff --git a/cinder/tests/unit/api/contrib/test_volume_unmanage.py b/cinder/tests/unit/api/contrib/test_volume_unmanage.py deleted file mode 100644 index 6e35707d5..000000000 --- a/cinder/tests/unit/api/contrib/test_volume_unmanage.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder import context -from cinder import db -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils - - -class VolumeUnmanageTest(test.TestCase): - """Test cases for cinder/api/contrib/volume_unmanage.py - - The API extension adds an action to volumes, "os-unmanage", which will - effectively issue a delete operation on the volume, but with a flag set - that means that a different method will be invoked on the driver, so that - the volume is not actually deleted in the storage backend. - - In this set of test cases, we are ensuring that the code correctly parses - the request structure and raises the correct exceptions when things are not - right, and calls down into cinder.volume.api.API.delete with the correct - arguments. - """ - - def setUp(self): - super(VolumeUnmanageTest, self).setUp() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - api = fakes.router.APIRouter() - self.app = fakes.urlmap.URLMap() - self.app['/v2'] = api - - def _get_resp(self, volume_id): - """Helper to build an os-unmanage req for the specified volume_id.""" - req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (self.ctxt.project_id, volume_id)) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.environ['cinder.context'] = self.ctxt - body = {'os-unmanage': ''} - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(self.app) - return res - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume') - def test_unmanage_volume_ok(self, mock_rpcapi): - """Return success for valid and unattached volume.""" - vol = utils.create_volume(self.ctxt) - res = self._get_resp(vol.id) - self.assertEqual(http_client.ACCEPTED, res.status_int, res) - - mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False) - vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id) - self.assertEqual('unmanaging', vol.status) - db.volume_destroy(self.ctxt, vol.id) - - def test_unmanage_volume_bad_volume_id(self): - """Return 404 if the volume does not exist.""" - res = self._get_resp(fake.WILL_NOT_BE_FOUND_ID) - self.assertEqual(http_client.NOT_FOUND, res.status_int, res) - - def test_unmanage_volume_attached(self): - """Return 400 if the volume exists but is attached.""" - vol = utils.create_volume( - self.ctxt, status='in-use', - attach_status=fields.VolumeAttachStatus.ATTACHED) - res = self._get_resp(vol.id) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - db.volume_destroy(self.ctxt, vol.id) - - def test_unmanage_volume_with_snapshots(self): - """Return 400 if the volume exists but has snapshots.""" - vol = utils.create_volume(self.ctxt) - snap = utils.create_snapshot(self.ctxt, vol.id) - res = self._get_resp(vol.id) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - db.volume_destroy(self.ctxt, vol.id) - db.snapshot_destroy(self.ctxt, snap.id) diff --git a/cinder/tests/unit/api/fakes.py b/cinder/tests/unit/api/fakes.py deleted file mode 100644 index d30ed3d8b..000000000 --- a/cinder/tests/unit/api/fakes.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_service import wsgi -from oslo_utils import timeutils -import routes -import webob -import webob.dec -import webob.request - -from cinder.api.middleware import auth -from cinder.api.middleware import fault -from cinder.api.openstack import api_version_request as api_version -from cinder.api.openstack import wsgi as os_wsgi -from cinder.api import urlmap -from cinder.api.v2 import limits -from cinder.api.v2 import router -from cinder.api.v3 import router as router_v3 -from cinder.api import versions -from cinder import context -from cinder.tests.unit import fake_constants as fake - - -FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' -FAKE_UUIDS = {} - - -class Context(object): - pass - - -class FakeRouter(wsgi.Router): - def __init__(self, ext_mgr=None): - pass - - @webob.dec.wsgify - def __call__(self, req): - res = webob.Response() - res.status = '200' - res.headers['X-Test-Success'] = 'True' - return res - - -@webob.dec.wsgify -def fake_wsgi(self, req): - return self.application - - -def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, - use_no_auth=False, ext_mgr=None, - inner_app_v3=None): - if not inner_app_v2: - inner_app_v2 = router.APIRouter(ext_mgr) - - if not inner_app_v3: - inner_app_v3 = router_v3.APIRouter(ext_mgr) - - if fake_auth: - if fake_auth_context is not None: - ctxt = fake_auth_context - else: - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True) - api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, - inner_app_v2)) - api_v3 = fault.FaultWrapper(auth.InjectContext(ctxt, - inner_app_v3)) - elif use_no_auth: - api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v2))) - api_v3 = fault.FaultWrapper(auth.NoAuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v3))) - else: - api_v2 = fault.FaultWrapper(auth.AuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v2))) - api_v3 = fault.FaultWrapper(auth.AuthMiddleware( - limits.RateLimitingMiddleware(inner_app_v3))) - - mapper = urlmap.URLMap() - mapper['/v2'] = api_v2 - mapper['/v3'] = api_v3 - mapper['/'] = fault.FaultWrapper(versions.VersionsController()) - return mapper - - -class FakeToken(object): - id_count = 0 - - def __getitem__(self, key): - return getattr(self, key) - - def __init__(self, **kwargs): - FakeToken.id_count += 1 - self.id = FakeToken.id_count - for k, v in kwargs.items(): - setattr(self, k, v) - - -class FakeRequestContext(context.RequestContext): - def __init__(self, *args, **kwargs): - kwargs['auth_token'] = kwargs.get(fake.USER_ID, fake.PROJECT_ID) - super(FakeRequestContext, self).__init__(*args, **kwargs) - - -class HTTPRequest(webob.Request): - - @classmethod - def blank(cls, *args, **kwargs): - if args is not None: - if 'v1' in args[0]: - kwargs['base_url'] = 'http://localhost/v1' - if 'v2' in args[0]: - kwargs['base_url'] = 'http://localhost/v2' - if 'v3' in args[0]: - kwargs['base_url'] = 'http://localhost/v3' - use_admin_context = kwargs.pop('use_admin_context', False) - version = kwargs.pop('version', api_version._MIN_API_VERSION) - out = os_wsgi.Request.blank(*args, **kwargs) - out.environ['cinder.context'] = FakeRequestContext( - fake.USER_ID, - fake.PROJECT_ID, - is_admin=use_admin_context) - out.api_version_request = api_version.APIVersionRequest(version) - return out - - -class TestRouter(wsgi.Router): - def __init__(self, controller): - mapper = routes.Mapper() - mapper.resource("test", "tests", - controller=os_wsgi.Resource(controller)) - super(TestRouter, self).__init__(mapper) - - -class FakeAuthDatabase(object): - data = {} - - @staticmethod - def auth_token_get(context, token_hash): - return FakeAuthDatabase.data.get(token_hash, None) - - @staticmethod - def auth_token_create(context, token): - fake_token = FakeToken(created_at=timeutils.utcnow(), **token) - FakeAuthDatabase.data[fake_token.token_hash] = fake_token - FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token - return fake_token - - @staticmethod - def auth_token_destroy(context, token_id): - token = FakeAuthDatabase.data.get('id_%i' % token_id) - if token and token.token_hash in FakeAuthDatabase.data: - del FakeAuthDatabase.data[token.token_hash] - del FakeAuthDatabase.data['id_%i' % token_id] - - -class FakeRateLimiter(object): - def __init__(self, application): - self.application = application - - @webob.dec.wsgify - def __call__(self, req): - return self.application - - -def get_fake_uuid(token=0): - if token not in FAKE_UUIDS: - FAKE_UUIDS[token] = str(uuid.uuid4()) - return FAKE_UUIDS[token] diff --git a/cinder/tests/unit/api/middleware/__init__.py b/cinder/tests/unit/api/middleware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/middleware/test_auth.py b/cinder/tests/unit/api/middleware/test_auth.py deleted file mode 100644 index cb17b5da4..000000000 --- a/cinder/tests/unit/api/middleware/test_auth.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_middleware import request_id -from six.moves import http_client -import webob - -import cinder.api.middleware.auth -from cinder import test - - -class TestCinderKeystoneContextMiddleware(test.TestCase): - - def setUp(self): - super(TestCinderKeystoneContextMiddleware, self).setUp() - - @webob.dec.wsgify() - def fake_app(req): - self.context = req.environ['cinder.context'] - return webob.Response() - - self.context = None - self.middleware = (cinder.api.middleware.auth - .CinderKeystoneContext(fake_app)) - self.request = webob.Request.blank('/') - self.request.headers['X_TENANT_ID'] = 'testtenantid' - self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' - - def test_no_user_or_user_id(self): - response = self.request.get_response(self.middleware) - self.assertEqual(http_client.UNAUTHORIZED, response.status_int) - - def test_user_only(self): - self.request.headers['X_USER'] = 'testuser' - response = self.request.get_response(self.middleware) - self.assertEqual(http_client.OK, response.status_int) - self.assertEqual('testuser', self.context.user_id) - - def test_user_id_only(self): - self.request.headers['X_USER_ID'] = 'testuserid' - response = self.request.get_response(self.middleware) - self.assertEqual(http_client.OK, response.status_int) - self.assertEqual('testuserid', self.context.user_id) - - def test_user_id_trumps_user(self): - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_USER'] = 'testuser' - response = self.request.get_response(self.middleware) - self.assertEqual(http_client.OK, response.status_int) - self.assertEqual('testuserid', self.context.user_id) - - def test_tenant_id_name(self): - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.headers['X_TENANT_NAME'] = 'testtenantname' - response = self.request.get_response(self.middleware) - self.assertEqual(http_client.OK, response.status_int) - self.assertEqual('testtenantid', self.context.project_id) - self.assertEqual('testtenantname', self.context.project_name) - - def test_request_id_extracted_from_env(self): - req_id = 'dummy-request-id' - self.request.headers['X_PROJECT_ID'] = 'testtenantid' - self.request.headers['X_USER_ID'] = 'testuserid' - self.request.environ[request_id.ENV_REQUEST_ID] = req_id - self.request.get_response(self.middleware) - self.assertEqual(req_id, self.context.request_id) diff --git a/cinder/tests/unit/api/middleware/test_faults.py b/cinder/tests/unit/api/middleware/test_faults.py deleted file mode 100644 index a284b7854..000000000 --- a/cinder/tests/unit/api/middleware/test_faults.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_i18n import fixture as i18n_fixture -from oslo_serialization import jsonutils -from six.moves import http_client -import webob.dec - -from cinder.api.openstack import wsgi -from cinder import test - - -class TestFaults(test.TestCase): - """Tests covering `cinder.api.openstack.faults:Fault` class.""" - - def setUp(self): - super(TestFaults, self).setUp() - self.useFixture(i18n_fixture.ToggleLazy(True)) - - def test_400_fault_json(self): - """Test fault serialized to JSON via file-extension and/or header.""" - requests = [ - webob.Request.blank('/.json'), - webob.Request.blank('/', headers={"Accept": "application/json"}), - ] - - for request in requests: - fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) - response = request.get_response(fault) - - expected = { - "badRequest": { - "message": "scram", - "code": http_client.BAD_REQUEST, - }, - } - actual = jsonutils.loads(response.body) - - self.assertEqual("application/json", response.content_type) - self.assertEqual(expected, actual) - - def test_413_fault_json(self): - """Test fault serialized to JSON via file-extension and/or header.""" - requests = [ - webob.Request.blank('/.json'), - webob.Request.blank('/', headers={"Accept": "application/json"}), - ] - - for request in requests: - exc = webob.exc.HTTPRequestEntityTooLarge - fault = wsgi.Fault(exc(explanation='sorry', - headers={'Retry-After': '4'})) - response = request.get_response(fault) - - expected = { - "overLimit": { - "message": "sorry", - "code": http_client.REQUEST_ENTITY_TOO_LARGE, - "retryAfter": "4", - }, - } - actual = jsonutils.loads(response.body) - - self.assertEqual("application/json", response.content_type) - self.assertEqual(expected, actual) - - def test_fault_has_status_int(self): - """Ensure the status_int is set correctly on faults.""" - fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) - self.assertEqual(http_client.BAD_REQUEST, fault.status_int) diff --git a/cinder/tests/unit/api/openstack/__init__.py b/cinder/tests/unit/api/openstack/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/openstack/test_api_version_request.py b/cinder/tests/unit/api/openstack/test_api_version_request.py deleted file mode 100644 index 214842c92..000000000 --- a/cinder/tests/unit/api/openstack/test_api_version_request.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2014 IBM Corp. -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import six - -from cinder.api.openstack import api_version_request -from cinder import exception -from cinder import test - - -@ddt.ddt -class APIVersionRequestTests(test.TestCase): - - def test_init(self): - - result = api_version_request.APIVersionRequest() - - self.assertIsNone(result._ver_major) - self.assertIsNone(result._ver_minor) - - def test_min_version(self): - - self.assertEqual( - api_version_request.APIVersionRequest( - api_version_request._MIN_API_VERSION), - api_version_request.min_api_version()) - - def test_max_api_version(self): - - self.assertEqual( - api_version_request.APIVersionRequest( - api_version_request._MAX_API_VERSION), - api_version_request.max_api_version()) - - @ddt.data( - ('1.1', 1, 1), - ('2.10', 2, 10), - ('5.234', 5, 234), - ('12.5', 12, 5), - ('2.0', 2, 0), - ('2.200', 2, 200) - ) - @ddt.unpack - def test_valid_version_strings(self, version_string, major, minor): - - request = api_version_request.APIVersionRequest(version_string) - - self.assertEqual(major, request._ver_major) - self.assertEqual(minor, request._ver_minor) - - def test_null_version(self): - v = api_version_request.APIVersionRequest() - self.assertFalse(v) - - def test_not_null_version(self): - v = api_version_request.APIVersionRequest('1.1') - self.assertTrue(v) - - @ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3', - '5.03', '02.1', '2.001', '', ' 2.1', '2.1 ') - def test_invalid_version_strings(self, version_string): - - self.assertRaises(exception.InvalidAPIVersionString, - api_version_request.APIVersionRequest, - version_string) - - def test_cmpkey(self): - request = api_version_request.APIVersionRequest('1.2') - self.assertEqual((1, 2), request._cmpkey()) - - def test_version_comparisons(self): - v1 = api_version_request.APIVersionRequest('2.0') - v2 = api_version_request.APIVersionRequest('2.5') - v3 = api_version_request.APIVersionRequest('5.23') - v4 = api_version_request.APIVersionRequest('2.0') - v_null = api_version_request.APIVersionRequest() - - self.assertLess(v1, v2) - self.assertLessEqual(v1, v2) - self.assertGreater(v3, v2) - self.assertGreaterEqual(v3, v2) - self.assertNotEqual(v1, v2) - self.assertEqual(v1, v4) - self.assertNotEqual(v1, v_null) - self.assertEqual(v_null, v_null) - self.assertNotEqual('2.0', v1) - - def test_version_matches(self): - v1 = api_version_request.APIVersionRequest('2.0') - v2 = api_version_request.APIVersionRequest('2.5') - v3 = api_version_request.APIVersionRequest('2.45') - v4 = api_version_request.APIVersionRequest('3.3') - v5 = api_version_request.APIVersionRequest('3.23') - v6 = api_version_request.APIVersionRequest('2.0') - v7 = api_version_request.APIVersionRequest('3.3') - v8 = api_version_request.APIVersionRequest('4.0') - v_null = api_version_request.APIVersionRequest() - - self.assertTrue(v2.matches(v1, v3)) - self.assertTrue(v2.matches(v1, v_null)) - self.assertTrue(v1.matches(v6, v2)) - self.assertTrue(v4.matches(v2, v7)) - self.assertTrue(v4.matches(v_null, v7)) - self.assertTrue(v4.matches(v_null, v8)) - self.assertFalse(v1.matches(v2, v3)) - self.assertFalse(v5.matches(v2, v4)) - self.assertFalse(v2.matches(v3, v1)) - self.assertTrue(v1.matches(v_null, v_null)) - - self.assertRaises(ValueError, v_null.matches, v1, v3) - - def test_matches_versioned_method(self): - - request = api_version_request.APIVersionRequest('2.0') - - self.assertRaises(exception.InvalidParameterValue, - request.matches_versioned_method, - 'fake_method') - - def test_get_string(self): - v1_string = '3.23' - v1 = api_version_request.APIVersionRequest(v1_string) - self.assertEqual(v1_string, v1.get_string()) - - self.assertRaises(ValueError, - api_version_request.APIVersionRequest().get_string) - - @ddt.data(('1', '0'), ('1', '1')) - @ddt.unpack - def test_str(self, major, minor): - request_input = '%s.%s' % (major, minor) - request = api_version_request.APIVersionRequest(request_input) - request_string = six.text_type(request) - - self.assertEqual('API Version Request ' - 'Major: %s, Minor: %s' % (major, minor), - request_string) diff --git a/cinder/tests/unit/api/openstack/test_versioned_method.py b/cinder/tests/unit/api/openstack/test_versioned_method.py deleted file mode 100644 index c5dbb510a..000000000 --- a/cinder/tests/unit/api/openstack/test_versioned_method.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from cinder.api.openstack import versioned_method -from cinder import test - - -class VersionedMethodTestCase(test.TestCase): - - def test_str(self): - args = ('fake_name', 'fake_min', 'fake_max') - method = versioned_method.VersionedMethod(*(args + (False, None))) - method_string = six.text_type(method) - - self.assertEqual('Version Method %s: min: %s, max: %s' % args, - method_string) - - def test_cmpkey(self): - method = versioned_method.VersionedMethod( - 'fake_name', 'fake_start_version', 'fake_end_version', False, - 'fake_func') - self.assertEqual('fake_start_version', method._cmpkey()) diff --git a/cinder/tests/unit/api/openstack/test_wsgi.py b/cinder/tests/unit/api/openstack/test_wsgi.py deleted file mode 100644 index 19d22a698..000000000 --- a/cinder/tests/unit/api/openstack/test_wsgi.py +++ /dev/null @@ -1,889 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import inspect - -import mock -from oslo_utils import encodeutils -from six.moves import http_client -import webob - -from cinder.api.openstack import wsgi -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes - - -class RequestTest(test.TestCase): - def test_content_type_missing(self): - request = wsgi.Request.blank('/tests/123', method='POST') - request.body = b"" - self.assertIsNone(request.get_content_type()) - - def test_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123', method='POST') - request.headers["Content-Type"] = "text/html" - request.body = b"asdf
" - self.assertRaises(exception.InvalidContentType, - request.get_content_type) - - def test_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept(self): - for content_type in ('application/json', - 'application/vnd.openstack.volume+json'): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = content_type - result = request.best_match_content_type() - self.assertEqual(content_type, result) - - def test_content_type_from_accept_best(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_query_extension(self): - request = wsgi.Request.blank('/tests/123.json') - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_best_match_language(self): - # Test that we are actually invoking language negotiation by webob - request = wsgi.Request.blank('/') - accepted = 'unknown-lang' - request.headers = {'Accept-Language': accepted} - - self.mock_object(request.accept_language, - 'best_match', return_value=None) - - self.assertIsNone(request.best_match_language()) - # If accept-language is not included or empty, match should be None - request.headers = {'Accept-Language': ''} - self.assertIsNone(request.best_match_language()) - request.headers.pop('Accept-Language') - self.assertIsNone(request.best_match_language()) - - def test_cache_and_retrieve_resources(self): - request = wsgi.Request.blank('/foo') - # Test that trying to retrieve a cached object on - # an empty cache fails gracefully - self.assertIsNone(request.cached_resource()) - self.assertIsNone(request.cached_resource_by_id('r-0')) - - resources = [] - for x in range(3): - resources.append({'id': 'r-%s' % x}) - - # Cache an empty list of resources using the default name - request.cache_resource([]) - self.assertEqual({}, request.cached_resource()) - self.assertIsNone(request.cached_resource('r-0')) - # Cache some resources - request.cache_resource(resources[:2]) - # Cache one resource - request.cache_resource(resources[2]) - # Cache a different resource name - other_resource = {'id': 'o-0'} - request.cache_resource(other_resource, name='other-resource') - - self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) - self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) - self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) - self.assertIsNone(request.cached_resource_by_id('r-3')) - self.assertEqual({'r-0': resources[0], - 'r-1': resources[1], - 'r-2': resources[2]}, request.cached_resource()) - self.assertEqual(other_resource, - request.cached_resource_by_id('o-0', - name='other-resource')) - - def test_cache_and_retrieve_volumes(self): - self._test_cache_and_retrieve_resources('volume') - - def test_cache_and_retrieve_volume_types(self): - self._test_cache_and_retrieve_resources('volume_type') - - def test_cache_and_retrieve_snapshots(self): - self._test_cache_and_retrieve_resources('snapshot') - - def test_cache_and_retrieve_backups(self): - self._test_cache_and_retrieve_resources('backup') - - def _test_cache_and_retrieve_resources(self, resource_name): - """Generic helper for cache tests.""" - cache_all_func = 'cache_db_%ss' % resource_name - cache_one_func = 'cache_db_%s' % resource_name - get_db_all_func = 'get_db_%ss' % resource_name - get_db_one_func = 'get_db_%s' % resource_name - - r = wsgi.Request.blank('/foo') - resources = [] - for x in range(3): - resources.append({'id': 'id%s' % x}) - - # Store 2 - getattr(r, cache_all_func)(resources[:2]) - # Store 1 - getattr(r, cache_one_func)(resources[2]) - - self.assertEqual(resources[0], getattr(r, get_db_one_func)('id0')) - self.assertEqual(resources[1], getattr(r, get_db_one_func)('id1')) - self.assertEqual(resources[2], getattr(r, get_db_one_func)('id2')) - self.assertIsNone(getattr(r, get_db_one_func)('id3')) - self.assertEqual({'id0': resources[0], - 'id1': resources[1], - 'id2': resources[2]}, getattr(r, get_db_all_func)()) - - -class ActionDispatcherTest(test.TestCase): - def test_dispatch(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - self.assertEqual('pants', serializer.dispatch({}, action='create')) - - def test_dispatch_action_None(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - serializer.default = lambda x: 'trousers' - self.assertEqual('trousers', serializer.dispatch({}, action=None)) - - def test_dispatch_default(self): - serializer = wsgi.ActionDispatcher() - serializer.create = lambda x: 'pants' - serializer.default = lambda x: 'trousers' - self.assertEqual('trousers', serializer.dispatch({}, action='update')) - - -class DictSerializerTest(test.TestCase): - def test_dispatch_default(self): - serializer = wsgi.DictSerializer() - self.assertEqual('', serializer.serialize({}, 'update')) - - -class JSONDictSerializerTest(test.TestCase): - def test_json(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_json = b'{"servers":{"a":[2,3]}}' - serializer = wsgi.JSONDictSerializer() - result = serializer.serialize(input_dict) - result = result.replace(b'\n', b'').replace(b' ', b'') - self.assertEqual(expected_json, result) - - -class TextDeserializerTest(test.TestCase): - def test_dispatch_default(self): - deserializer = wsgi.TextDeserializer() - self.assertEqual({}, deserializer.deserialize({}, 'update')) - - -class JSONDeserializerTest(test.TestCase): - def test_json(self): - data = """{"a": { - "a1": "1", - "a2": "2", - "bs": ["1", "2", "3", {"c": {"c1": "1"}}], - "d": {"e": "1"}, - "f": "1"}}""" - as_dict = { - 'body': { - 'a': { - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], - 'd': {'e': '1'}, - 'f': '1', - }, - }, - } - deserializer = wsgi.JSONDeserializer() - self.assertEqual(as_dict, deserializer.deserialize(data)) - - -class ResourceTest(test.TestCase): - def test_resource_call(self): - class Controller(object): - def index(self, req): - return 'off' - - req = webob.Request.blank('/tests') - app = fakes.TestRouter(Controller()) - response = req.get_response(app) - self.assertEqual(b'off', response.body) - self.assertEqual(http_client.OK, response.status_int) - - def test_resource_not_authorized(self): - class Controller(object): - def index(self, req): - raise exception.NotAuthorized() - - req = webob.Request.blank('/tests') - app = fakes.TestRouter(Controller()) - response = req.get_response(app) - self.assertEqual(http_client.FORBIDDEN, response.status_int) - - def test_dispatch(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - method, _extensions = resource.get_method(None, 'index', None, '') - actual = resource.dispatch(method, None, {'pants': 'off'}) - expected = 'off' - self.assertEqual(expected, actual) - - @mock.patch('oslo_utils.strutils.mask_password') - def test_process_stack_non_ascii(self, masker): - class Controller(wsgi.Controller): - @wsgi.action('fooAction') - def fooAction(self, req, id, body): - return 'done' - - controller = Controller() - resource = wsgi.Resource(controller) - # The following body has a non-ascii chars - serialized_body = '{"foo": {"nonascii": "\xe2\x80\x96\xe2\x88\xa5"}}' - request = webob.Request.blank('/tests/fooAction') - action_args = {'id': 12} - # Now test _process_stack() mainline flow. - # Without the fix to safe_decode the body in _process_stack(), - # this test fails with: - # UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in - # position 22: ordinal not in range(128) - response = resource._process_stack(request, 'fooAction', action_args, - 'application/json', serialized_body, - 'application/json') - self.assertEqual('done', response) - # The following check verifies that mask_password was called with - # the decoded body. - self.assertEqual(1, masker.call_count) - decoded_body = encodeutils.safe_decode( - serialized_body, errors='ignore') - self.assertIn(decoded_body, masker.call_args[0][0]) - - def test_get_method_undefined_controller_action(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertRaises(AttributeError, resource.get_method, - None, 'create', None, '') - - def test_get_method_action_json(self): - class Controller(wsgi.Controller): - @wsgi.action('fooAction') - def _action_foo(self, req, id, body): - return body - - controller = Controller() - resource = wsgi.Resource(controller) - method, _extensions = resource.get_method(None, 'action', - 'application/json', - '{"fooAction": true}') - self.assertEqual(controller._action_foo, method) - - def test_get_method_action_bad_body(self): - class Controller(wsgi.Controller): - @wsgi.action('fooAction') - def _action_foo(self, req, id, body): - return body - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertRaises(exception.MalformedRequestBody, resource.get_method, - None, 'action', 'application/json', '{}') - - def test_get_method_unknown_controller_action(self): - class Controller(wsgi.Controller): - @wsgi.action('fooAction') - def _action_foo(self, req, id, body): - return body - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertRaises(KeyError, resource.get_method, - None, 'action', 'application/json', - '{"barAction": true}') - - def test_get_action_args(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - env = { - 'wsgiorg.routing_args': [None, { - 'controller': None, - 'format': None, - 'action': 'update', - 'id': 12, - }], - } - - expected = {'action': 'update', 'id': 12} - - self.assertEqual(expected, resource.get_action_args(env)) - - def test_get_body_bad_content(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - request = wsgi.Request.blank('/', method='POST') - request.headers['Content-Type'] = 'application/none' - request.body = b'foo' - - content_type, body = resource.get_body(request) - self.assertIsNone(content_type) - self.assertEqual('', body) - - def test_get_body_no_content_type(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - request = wsgi.Request.blank('/', method='POST') - request.body = b'foo' - - content_type, body = resource.get_body(request) - self.assertIsNone(content_type) - self.assertEqual('', body) - - def test_get_body_no_content_body(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - request = wsgi.Request.blank('/', method='POST') - request.headers['Content-Type'] = 'application/json' - request.body = b'' - - content_type, body = resource.get_body(request) - self.assertIsNone(content_type) - self.assertEqual('', body) - - def test_get_body(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - request = wsgi.Request.blank('/', method='POST') - request.headers['Content-Type'] = 'application/json' - request.body = b'foo' - - content_type, body = resource.get_body(request) - self.assertEqual('application/json', content_type) - self.assertEqual(b'foo', body) - - def test_deserialize_badtype(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertRaises(exception.InvalidContentType, - resource.deserialize, - controller.index, 'application/none', 'foo') - - def test_deserialize_default(self): - class JSONDeserializer(object): - def deserialize(self, body): - return 'json' - - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller, json=JSONDeserializer) - - obj = resource.deserialize(controller.index, 'application/json', 'foo') - self.assertEqual('json', obj) - - def test_register_actions(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - class ControllerExtended(wsgi.Controller): - @wsgi.action('fooAction') - def _action_foo(self, req, id, body): - return body - - @wsgi.action('barAction') - def _action_bar(self, req, id, body): - return body - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertEqual({}, resource.wsgi_actions) - - extended = ControllerExtended() - resource.register_actions(extended) - self.assertEqual({'fooAction': extended._action_foo, - 'barAction': extended._action_bar, }, - resource.wsgi_actions) - - def test_register_extensions(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - class ControllerExtended(wsgi.Controller): - @wsgi.extends - def index(self, req, resp_obj, pants=None): - return None - - @wsgi.extends(action='fooAction') - def _action_foo(self, req, resp, id, body): - return None - - controller = Controller() - resource = wsgi.Resource(controller) - self.assertEqual({}, resource.wsgi_extensions) - self.assertEqual({}, resource.wsgi_action_extensions) - - extended = ControllerExtended() - resource.register_extensions(extended) - self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions) - self.assertEqual({'fooAction': [extended._action_foo]}, - resource.wsgi_action_extensions) - - def test_get_method_extensions(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - class ControllerExtended(wsgi.Controller): - @wsgi.extends - def index(self, req, resp_obj, pants=None): - return None - - controller = Controller() - extended = ControllerExtended() - resource = wsgi.Resource(controller) - resource.register_extensions(extended) - method, extensions = resource.get_method(None, 'index', None, '') - self.assertEqual(controller.index, method) - self.assertEqual([extended.index], extensions) - - def test_get_method_action_extensions(self): - class Controller(wsgi.Controller): - def index(self, req, pants=None): - return pants - - @wsgi.action('fooAction') - def _action_foo(self, req, id, body): - return body - - class ControllerExtended(wsgi.Controller): - @wsgi.extends(action='fooAction') - def _action_foo(self, req, resp_obj, id, body): - return None - - controller = Controller() - extended = ControllerExtended() - resource = wsgi.Resource(controller) - resource.register_extensions(extended) - method, extensions = resource.get_method(None, 'action', - 'application/json', - '{"fooAction": true}') - self.assertEqual(controller._action_foo, method) - self.assertEqual([extended._action_foo], extensions) - - def test_get_method_action_whitelist_extensions(self): - class Controller(wsgi.Controller): - def index(self, req, pants=None): - return pants - - class ControllerExtended(wsgi.Controller): - @wsgi.action('create') - def _create(self, req, body): - pass - - @wsgi.action('delete') - def _delete(self, req, id): - pass - - controller = Controller() - extended = ControllerExtended() - resource = wsgi.Resource(controller) - resource.register_actions(extended) - - method, extensions = resource.get_method(None, 'create', - 'application/json', - '{"create": true}') - self.assertEqual(extended._create, method) - self.assertEqual([], extensions) - - method, extensions = resource.get_method(None, 'delete', None, None) - self.assertEqual(extended._delete, method) - self.assertEqual([], extensions) - - def test_pre_process_extensions_regular(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req, resp_obj): - called.append(1) - return None - - def extension2(req, resp_obj): - called.append(2) - return None - - extensions = [extension1, extension2] - response, post = resource.pre_process_extensions(extensions, None, {}) - self.assertEqual([], called) - self.assertIsNone(response) - self.assertEqual([extension2, extension1], list(post)) - - def test_pre_process_extensions_generator(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req): - called.append('pre1') - yield - called.append('post1') - - def extension2(req): - called.append('pre2') - yield - called.append('post2') - - extensions = [extension1, extension2] - response, post = resource.pre_process_extensions(extensions, None, {}) - post = list(post) - self.assertEqual(['pre1', 'pre2'], called) - self.assertIsNone(response) - self.assertEqual(2, len(post)) - self.assertTrue(inspect.isgenerator(post[0])) - self.assertTrue(inspect.isgenerator(post[1])) - - for gen in post: - try: - gen.send(None) - except StopIteration: - continue - - self.assertEqual(['pre1', 'pre2', 'post2', 'post1'], called) - - def test_pre_process_extensions_generator_response(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req): - called.append('pre1') - yield 'foo' - - def extension2(req): - called.append('pre2') - - extensions = [extension1, extension2] - response, post = resource.pre_process_extensions(extensions, None, {}) - self.assertEqual(['pre1'], called) - self.assertEqual('foo', response) - self.assertEqual([], post) - - def test_post_process_extensions_regular(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req, resp_obj): - called.append(1) - return None - - def extension2(req, resp_obj): - called.append(2) - return None - - response = resource.post_process_extensions([extension2, extension1], - None, None, {}) - self.assertEqual([2, 1], called) - self.assertIsNone(response) - - def test_post_process_extensions_regular_response(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req, resp_obj): - called.append(1) - return None - - def extension2(req, resp_obj): - called.append(2) - return 'foo' - - response = resource.post_process_extensions([extension2, extension1], - None, None, {}) - self.assertEqual([2], called) - self.assertEqual('foo', response) - - def test_post_process_extensions_version_not_found(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req, resp_obj): - called.append(1) - return 'bar' - - def extension2(req, resp_obj): - raise exception.VersionNotFoundForAPIMethod(version='fake_version') - - response = resource.post_process_extensions([extension2, extension1], - None, None, {}) - self.assertEqual([1], called) - self.assertEqual('bar', response) - - def test_post_process_extensions_generator(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req): - yield - called.append(1) - - def extension2(req): - yield - called.append(2) - - ext1 = extension1(None) - next(ext1) - ext2 = extension2(None) - next(ext2) - - response = resource.post_process_extensions([ext2, ext1], - None, None, {}) - - self.assertEqual([2, 1], called) - self.assertIsNone(response) - - def test_post_process_extensions_generator_response(self): - class Controller(object): - def index(self, req, pants=None): - return pants - - controller = Controller() - resource = wsgi.Resource(controller) - - called = [] - - def extension1(req): - yield - called.append(1) - - def extension2(req): - yield - called.append(2) - yield 'foo' - - ext1 = extension1(None) - next(ext1) - ext2 = extension2(None) - next(ext2) - - response = resource.post_process_extensions([ext2, ext1], - None, None, {}) - - self.assertEqual([2], called) - self.assertEqual('foo', response) - - -class ResponseObjectTest(test.TestCase): - def test_default_code(self): - robj = wsgi.ResponseObject({}) - self.assertEqual(http_client.OK, robj.code) - - def test_modified_code(self): - robj = wsgi.ResponseObject({}) - robj._default_code = http_client.ACCEPTED - self.assertEqual(http_client.ACCEPTED, robj.code) - - def test_override_default_code(self): - robj = wsgi.ResponseObject({}, code=http_client.NOT_FOUND) - self.assertEqual(http_client.NOT_FOUND, robj.code) - - def test_override_modified_code(self): - robj = wsgi.ResponseObject({}, code=http_client.NOT_FOUND) - robj._default_code = http_client.ACCEPTED - self.assertEqual(http_client.NOT_FOUND, robj.code) - - def test_set_header(self): - robj = wsgi.ResponseObject({}) - robj['Header'] = 'foo' - self.assertEqual({'header': 'foo'}, robj.headers) - - def test_get_header(self): - robj = wsgi.ResponseObject({}) - robj['Header'] = 'foo' - self.assertEqual('foo', robj['hEADER']) - - def test_del_header(self): - robj = wsgi.ResponseObject({}) - robj['Header'] = 'foo' - del robj['hEADER'] - self.assertNotIn('header', robj.headers) - - def test_header_isolation(self): - robj = wsgi.ResponseObject({}) - robj['Header'] = 'foo' - hdrs = robj.headers - hdrs['hEADER'] = 'bar' - self.assertEqual('foo', robj['hEADER']) - - def test_default_serializers(self): - robj = wsgi.ResponseObject({}) - self.assertEqual({}, robj.serializers) - - -@ddt.data -class ValidBodyTest(test.TestCase): - - def setUp(self): - super(ValidBodyTest, self).setUp() - self.controller = wsgi.Controller() - - def test_is_valid_body(self): - body = {'foo': {}} - self.assertTrue(self.controller.is_valid_body(body, 'foo')) - - def test_is_valid_body_none(self): - wsgi.Resource(controller=None) - self.assertFalse(self.controller.is_valid_body(None, 'foo')) - - def test_is_valid_body_empty(self): - wsgi.Resource(controller=None) - self.assertFalse(self.controller.is_valid_body({}, 'foo')) - - def test_is_valid_body_no_entity(self): - wsgi.Resource(controller=None) - body = {'bar': {}} - self.assertFalse(self.controller.is_valid_body(body, 'foo')) - - def test_is_valid_body_malformed_entity(self): - wsgi.Resource(controller=None) - body = {'foo': 'bar'} - self.assertFalse(self.controller.is_valid_body(body, 'foo')) - - def test_validate_string_length_with_name_too_long(self): - name = 'a' * 256 - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_string_length, - name, 'Name', min_length=1, max_length=255, - remove_whitespaces=False) - - @ddt.data('name', 'display_name', 'description', 'display_description') - def test_validate_name_and_description_with_name_too_long(self, attribute): - body = {attribute: 'a' * 256} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_name_and_description, - body) - - @ddt.data('name', 'display_name', 'description', 'display_description') - def test_validate_name_and_description_with_name_as_int(self, attribute): - body = {attribute: 1234} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.validate_name_and_description, - body) - - @ddt.data('name', 'display_name', 'description', 'display_description') - def test_validate_name_and_description_with_name_zero_length(self, - attribute): - # NOTE(jdg): We allow zero length names currently, particularly - # from Nova, changes to this require an API version bump - body = {attribute: ""} - self.controller.validate_name_and_description(body) - self.assertEqual('', body[attribute]) - - @ddt.data('name', 'display_name', 'description', 'display_description') - def test_validate_name_and_description_with_name_contains_white_spaces( - self, attribute): - body = {attribute: 'a' * 255 + " "} - self.controller.validate_name_and_description(body) - self.assertEqual('a' * 255, body[attribute]) diff --git a/cinder/tests/unit/api/test_common.py b/cinder/tests/unit/api/test_common.py deleted file mode 100644 index 0a8f62448..000000000 --- a/cinder/tests/unit/api/test_common.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test suites for 'common' code used throughout the OpenStack HTTP API. -""" - -import ddt -import mock -from testtools import matchers -import webob -import webob.exc - -from oslo_config import cfg - -from cinder.api import common -from cinder import test - - -NS = "{http://docs.openstack.org/compute/api/v1.1}" -ATOMNS = "{http://www.w3.org/2005/Atom}" -CONF = cfg.CONF - -TINY = list(range(1)) -SMALL = list(range(10)) -MEDIUM = list(range(1000)) -LARGE = list(range(10000)) -ITEMS = list(range(2000)) - - -@ddt.ddt -class LimiterTest(test.TestCase): - """Unit tests for the `cinder.api.common.limited` method. - - This method takes in a list of items and, depending on the 'offset' - and 'limit' GET params, returns a subset or complete set of the given - items. - """ - @ddt.data('/?offset=', '/?offset=123456789012346456', - u'/?offset=\u0020aa', '/?offset=-30', - u'/?limit=hello', '/?limit=-3000', - '/?offset=30034522235674530&limit=10') - def test_limiter_bad_offset_or_limit_values(self, value): - """Test limiter with bad offset or limit values - - This test includes next test cases: - 1) Offset key works with a blank offset; - 2) Offset key works with a offset out of range; - 3) Offset key works with a BAD offset; - 4) Offset value is negative; - 5) Limit value is bad; - 6) Limit value is negative value. - 7) With both offset and limit; - """ - req = webob.Request.blank(value) - self.assertRaises( - webob.exc.HTTPBadRequest, common.limited, SMALL, req) - - @ddt.data( - ({'req': '/?offset=0', 'values': ((TINY, TINY), - (SMALL, SMALL), - (MEDIUM, MEDIUM), - (LARGE[:1000], LARGE))}), - ({'req': '/?offset=10', 'values': (([], TINY), - (SMALL[10:], SMALL), - (MEDIUM[10:], MEDIUM), - (LARGE[10:1010], LARGE))}), - ({'req': '/?offset=1001', 'values': (([], TINY), - ([], SMALL), - ([], MEDIUM), - (LARGE[1001:2001], LARGE))}), - ({'req': '/', 'values': ((TINY, TINY), - (SMALL, SMALL), - (MEDIUM, MEDIUM), - (LARGE[:1000], LARGE))}), - ({'req': '/?limit=0', 'values': ((TINY, TINY), - (SMALL, SMALL), - (MEDIUM, MEDIUM), - (LARGE[:1000], LARGE))}), - ({'req': '/?limit=10', 'values': ((TINY, TINY), - (SMALL, SMALL), - (MEDIUM[:10], MEDIUM), - (LARGE[:10], LARGE))}), - ({'req': '/?limit=3000', 'values': ((TINY, TINY), - (SMALL, SMALL), - (MEDIUM, MEDIUM), - (LARGE[:1000], LARGE))})) - @ddt.unpack - def test_limiter(self, req, values): - """Test limited method with different input parameters. - - This test includes next test cases: - 1) Test offset key works with 0; - 2) Test offset key works with a medium sized number; - 3) Test offset key works with a number over 1000 (max_limit); - 4) Test request with no offset or limit; - 5) Test limit of zero; - 6) Test limit of 10; - 7) Test limit of 3000; - """ - req = webob.Request.blank(req) - for expected, value, in values: - self.assertEqual(expected, common.limited(value, req)) - - @ddt.data(('/?offset=1&limit=3', 1, 4), - ('/?offset=3&limit=0', 3, 1003), - ('/?offset=3&limit=1500', 3, 1003), - ('/?offset=3000&limit=10', 0, 0), - ('/?offset=1&limit=3', 1, 4, 2000), - ('/?offset=3&limit=0', 3, None, 2000), - ('/?offset=3&limit=2500', 3, None, 2000), - ('/?offset=3000&limit=10', 0, 0, 2000)) - @ddt.unpack - def test_limiter_with_offset_limit_max_limit(self, req, - slice_start, - slice_end, - max_limit=None): - """Test with both parameters offset and limit and custom max_limit.""" - # NOTE(mdovgal): using 0 as slice_start and slice_end we will - # get empty list as a result - # [3:None] equal to [3:] - req = webob.Request.blank(req) - self.assertEqual(ITEMS[slice_start:slice_end], common.limited(ITEMS, - req, max_limit=max_limit)) - - -class PaginationParamsTest(test.TestCase): - """Unit tests for `cinder.api.common.get_pagination_params` method. - - This method takes in a request object and returns 'marker' and 'limit' - GET params. - """ - - def test_nonnumerical_limit(self): - """Test nonnumerical limit param.""" - req = webob.Request.blank('/?limit=hello') - self.assertRaises( - webob.exc.HTTPBadRequest, common.get_pagination_params, - req.GET.copy()) - - @mock.patch.object(common, 'CONF') - def test_no_params(self, mock_cfg): - """Test no params.""" - mock_cfg.osapi_max_limit = 100 - req = webob.Request.blank('/') - expected = (None, 100, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_valid_marker(self): - """Test valid marker param.""" - marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' - req = webob.Request.blank('/?marker=' + marker) - expected = (marker, CONF.osapi_max_limit, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_valid_limit(self): - """Test valid limit param.""" - req = webob.Request.blank('/?limit=10') - expected = (None, 10, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - def test_invalid_limit(self): - """Test invalid limit param.""" - req = webob.Request.blank('/?limit=-2') - self.assertRaises( - webob.exc.HTTPBadRequest, common.get_pagination_params, - req.GET.copy()) - - def test_valid_limit_and_marker(self): - """Test valid limit and marker parameters.""" - marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' - req = webob.Request.blank('/?limit=20&marker=%s' % marker) - expected = (marker, 20, 0) - self.assertEqual(expected, - common.get_pagination_params(req.GET.copy())) - - -@ddt.ddt -class SortParamUtilsTest(test.TestCase): - - @ddt.data(({'params': {}}, ['created_at'], ['desc']), - ({'params': {}, 'default_key': 'key1', 'default_dir': 'dir1'}, - ['key1'], ['dir1']), - ({'params': {'sort': 'key1:dir1'}}, ['key1'], ['dir1']), - ({'params': {'sort_key': 'key1', 'sort_dir': 'dir1'}}, - ['key1'], ['dir1']), - ({'params': {'sort': 'key1'}}, ['key1'], ['desc']), - ({'params': {'sort': 'key1:dir1,key2:dir2,key3:dir3'}}, - ['key1', 'key2', 'key3'], ['dir1', 'dir2', 'dir3']), - ({'params': {'sort': 'key1:dir1,key2,key3:dir3'}}, - ['key1', 'key2', 'key3'], ['dir1', 'desc', 'dir3']), - ({'params': {'sort': 'key1:dir1,key2,key3'}, - 'default_dir': 'foo'}, - ['key1', 'key2', 'key3'], ['dir1', 'foo', 'foo']), - ({'params': {'sort': ' key1 : dir1,key2: dir2 , key3 '}}, - ['key1', 'key2', 'key3'], ['dir1', 'dir2', 'desc'])) - @ddt.unpack - def test_get_sort_params(self, parameters, expected_keys, expected_dirs): - """Test for get sort parameters method - - This test includes next test cases: - 1) Verifies the default sort key and direction. - 2) Verifies that the defaults can be overridden. - 3) Verifies a single sort key and direction. - 4) Verifies a single sort key and direction. - 5) Verifies a single sort value with a default direction. - 6) Verifies multiple sort parameter values. - 7) Verifies multiple sort keys without all directions. - 8) Verifies multiple sort keys and overriding default direction. - 9) Verifies that leading and trailing spaces are removed. - """ - sort_keys, sort_dirs = common.get_sort_params(**parameters) - self.assertEqual(expected_keys, sort_keys) - self.assertEqual(expected_dirs, sort_dirs) - - def test_get_sort_params_params_modified(self): - """Verifies that the input sort parameter are modified.""" - params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} - common.get_sort_params(params) - self.assertEqual({}, params) - - params = {'sort_key': 'key1', 'sort_dir': 'dir1'} - common.get_sort_params(params) - self.assertEqual({}, params) - - def test_get_params_mix_sort_and_old_params(self): - """An exception is raised if both types of sorting params are given.""" - for params in ({'sort': 'k1', 'sort_key': 'k1'}, - {'sort': 'k1', 'sort_dir': 'd1'}, - {'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}): - self.assertRaises(webob.exc.HTTPBadRequest, - common.get_sort_params, - params) - - -@ddt.ddt -class MiscFunctionsTest(test.TestCase): - - @ddt.data(('http://cinder.example.com/v1/images', - 'http://cinder.example.com/images'), - ('http://cinder.example.com/v1.1/images', - 'http://cinder.example.com/images'), - ('http://cinder.example.com/v1.1/', - 'http://cinder.example.com/'), - ('http://cinder.example.com/v10.10', - 'http://cinder.example.com'), - ('http://cinder.example.com/v1.1/images/v10.5', - 'http://cinder.example.com/images/v10.5'), - ('http://cinder.example.com/cinder/v2', - 'http://cinder.example.com/cinder')) - @ddt.unpack - def test_remove_version_from_href(self, fixture, expected): - """Test for removing version from href - - This test conatins following test-cases: - 1) remove major version from href - 2-5) remove version from href - 6) remove version from href version not trailing domain - """ - actual = common.remove_version_from_href(fixture) - self.assertEqual(expected, actual) - - @ddt.data('http://cinder.example.com/1.1/images', - 'http://cinder.example.com/v/images', - 'http://cinder.example.com/v1.1images') - def test_remove_version_from_href_bad_request(self, fixture): - self.assertRaises(ValueError, - common.remove_version_from_href, - fixture) - - -@ddt.ddt -class TestCollectionLinks(test.TestCase): - """Tests the _get_collection_links method.""" - - def _validate_next_link(self, item_count, osapi_max_limit, limit, - should_link_exist): - req = webob.Request.blank('/?limit=%s' % limit if limit else '/') - link_return = [{"rel": "next", "href": "fake_link"}] - self.flags(osapi_max_limit=osapi_max_limit) - if limit is None: - limited_list_size = min(item_count, osapi_max_limit) - else: - limited_list_size = min(item_count, osapi_max_limit, limit) - limited_list = [{"uuid": str(i)} for i in range(limited_list_size)] - builder = common.ViewBuilder() - - def get_pagination_params(params, max_limit=CONF.osapi_max_limit, - original_call=common.get_pagination_params): - return original_call(params, max_limit) - - def _get_limit_param(params, max_limit=CONF.osapi_max_limit, - original_call=common._get_limit_param): - return original_call(params, max_limit) - - with mock.patch.object(common, 'get_pagination_params', - get_pagination_params), \ - mock.patch.object(common, '_get_limit_param', - _get_limit_param), \ - mock.patch.object(common.ViewBuilder, '_generate_next_link', - return_value=link_return) as href_link_mock: - results = builder._get_collection_links(req, limited_list, - mock.sentinel.coll_key, - item_count, "uuid") - if should_link_exist: - href_link_mock.assert_called_once_with(limited_list, "uuid", - req, - mock.sentinel.coll_key) - self.assertThat(results, matchers.HasLength(1)) - else: - self.assertFalse(href_link_mock.called) - self.assertThat(results, matchers.HasLength(0)) - - @ddt.data((5, 5, True), (5, 5, True, 4), (5, 5, True, 5), - (5, 5, True, 6), (5, 7, False), (5, 7, True, 4), - (5, 7, True, 5), (5, 7, False, 6), (5, 7, False, 7), - (5, 7, False, 8), (5, 3, True), (5, 3, True, 2), - (5, 3, True, 3), (5, 3, True, 4), (5, 3, True, 5), - (5, 3, True, 6)) - @ddt.unpack - def test_items(self, item_count, osapi_max_limit, - should_link_exist, limit=None): - """Test - - 1) Items count equals osapi_max_limit without limit; - 2) Items count equals osapi_max_limit and greater than limit; - 3) Items count equals osapi_max_limit and equals limit; - 4) Items count equals osapi_max_limit and less than limit; - 5) Items count less than osapi_max_limit without limit; - 6) Limit less than items count and less than osapi_max_limit; - 7) Limit equals items count and less than osapi_max_limit; - 8) Items count less than limit and less than osapi_max_limit; - 9) Items count less than osapi_max_limit and equals limit; - 10) Items count less than osapi_max_limit and less than limit; - 11) Items count greater than osapi_max_limit without limit; - 12) Limit less than items count and greater than osapi_max_limit; - 13) Items count greater than osapi_max_limit and equals limit; - 14) Items count greater than limit and greater than osapi_max_limit; - 15) Items count equals limit and greater than osapi_max_limit; - 16) Limit greater than items count and greater than osapi_max_limit; - """ - self._validate_next_link(item_count, osapi_max_limit, limit, - should_link_exist) - - -@ddt.ddt -class GeneralFiltersTest(test.TestCase): - - @ddt.data({'filters': {'volume': ['key1', 'key2']}, - 'resource': 'volume', - 'expected': {'volume': ['key1', 'key2']}}, - {'filters': {'volume': ['key1', 'key2']}, - 'resource': 'snapshot', - 'expected': {}}, - {'filters': {'volume': ['key1', 'key2']}, - 'resource': None, - 'expected': {'volume': ['key1', 'key2']}}) - @ddt.unpack - def test_get_enabled_resource_filters(self, filters, resource, expected): - common._FILTERS_COLLECTION = filters - result = common.get_enabled_resource_filters(resource) - self.assertEqual(expected, result) - - @ddt.data({'filters': {'key1': 'value1'}, - 'is_admin': False, - 'result': {'fake_resource': ['key1']}, - 'expected': {'key1': 'value1'}, - 'resource': 'fake_resource'}, - {'filters': {'key1': 'value1', 'key2': 'value2'}, - 'is_admin': False, - 'result': {'fake_resource': ['key1']}, - 'expected': None, - 'resource': 'fake_resource'}, - {'filters': {'key1': 'value1', - 'all_tenants': 'value2', - 'key3': 'value3'}, - 'is_admin': True, - 'result': {'fake_resource': []}, - 'expected': {'key1': 'value1', - 'all_tenants': 'value2', - 'key3': 'value3'}, - 'resource': 'fake_resource'}, - {'filters': {'key1': 'value1', - 'all_tenants': 'value2', - 'key3': 'value3'}, - 'is_admin': True, - 'result': {'pool': []}, - 'expected': None, - 'resource': 'pool'}) - @ddt.unpack - @mock.patch('cinder.api.common.get_enabled_resource_filters') - def test_reject_invalid_filters(self, mock_get, filters, - is_admin, result, expected, resource): - class FakeContext(object): - def __init__(self, admin): - self.is_admin = admin - - fake_context = FakeContext(is_admin) - mock_get.return_value = result - if expected: - common.reject_invalid_filters(fake_context, - filters, resource) - self.assertEqual(expected, filters) - else: - self.assertRaises( - webob.exc.HTTPBadRequest, - common.reject_invalid_filters, fake_context, - filters, resource) - - @ddt.data({'filters': {'name': 'value1'}, - 'is_admin': False, - 'result': {'fake_resource': ['name']}, - 'expected': {'name': 'value1'}}, - {'filters': {'name~': 'value1'}, - 'is_admin': False, - 'result': {'fake_resource': ['name']}, - 'expected': None}, - {'filters': {'name': 'value1'}, - 'is_admin': False, - 'result': {'fake_resource': ['name~']}, - 'expected': {'name': 'value1'}}, - {'filters': {'name~': 'value1'}, - 'is_admin': False, - 'result': {'fake_resource': ['name~']}, - 'expected': {'name~': 'value1'}} - ) - @ddt.unpack - @mock.patch('cinder.api.common.get_enabled_resource_filters') - def test_reject_invalid_filters_like_operator_enabled( - self, mock_get, filters, is_admin, result, expected): - class FakeContext(object): - def __init__(self, admin): - self.is_admin = admin - - fake_context = FakeContext(is_admin) - mock_get.return_value = result - if expected: - common.reject_invalid_filters(fake_context, - filters, 'fake_resource', True) - self.assertEqual(expected, filters) - else: - self.assertRaises( - webob.exc.HTTPBadRequest, - common.reject_invalid_filters, fake_context, - filters, 'fake_resource') - - @ddt.data({'resource': 'group', - 'filters': {'name~': 'value'}, - 'expected': {'name~': 'value'}}, - {'resource': 'snapshot', - 'filters': {'status~': 'value'}, - 'expected': {'status~': 'value'}}, - {'resource': 'volume', - 'filters': {'name~': 'value', - 'description~': 'value'}, - 'expected': {'display_name~': 'value', - 'display_description~': 'value'}}, - {'resource': 'backup', - 'filters': {'name~': 'value', - 'description~': 'value'}, - 'expected': {'display_name~': 'value', - 'display_description~': 'value'}}, - ) - @ddt.unpack - def test_convert_filter_attributes(self, resource, filters, expected): - common.convert_filter_attributes(filters, resource) - self.assertEqual(expected, filters) - - -@ddt.ddt -class LinkPrefixTest(test.TestCase): - - @ddt.data((["http://192.168.0.243:24/", "http://127.0.0.1/volume"], - "http://127.0.0.1/volume"), - (["http://foo.x.com/v1", "http://new.prefix.com"], - "http://new.prefix.com/v1"), - (["http://foo.x.com/v1", - "http://new.prefix.com:20455/new_extra_prefix"], - "http://new.prefix.com:20455/new_extra_prefix/v1")) - @ddt.unpack - def test_update_link_prefix(self, update_args, expected): - vb = common.ViewBuilder() - result = vb._update_link_prefix(*update_args) - self.assertEqual(expected, result) - - -class RequestUrlTest(test.TestCase): - def test_get_request_url_no_forward(self): - app_url = 'http://127.0.0.1/v2;param?key=value#frag' - request = type('', (), { - 'application_url': app_url, - 'headers': {} - }) - result = common.get_request_url(request) - self.assertEqual(app_url, result) - - def test_get_request_url_forward(self): - request = type('', (), { - 'application_url': 'http://127.0.0.1/v2;param?key=value#frag', - 'headers': {'X-Forwarded-Host': '192.168.0.243:24'} - }) - result = common.get_request_url(request) - self.assertEqual('http://192.168.0.243:24/v2;param?key=value#frag', - result) diff --git a/cinder/tests/unit/api/test_versions.py b/cinder/tests/unit/api/test_versions.py deleted file mode 100644 index acbe698b6..000000000 --- a/cinder/tests/unit/api/test_versions.py +++ /dev/null @@ -1,471 +0,0 @@ -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import six -from six.moves import http_client -import webob - -from cinder.api.openstack import api_version_request -from cinder.api.openstack import wsgi -from cinder.api.v1 import router -from cinder.api import versions -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes - - -VERSION_HEADER_NAME = 'OpenStack-API-Version' -VOLUME_SERVICE = 'volume ' - - -@ddt.ddt -class VersionsControllerTestCase(test.TestCase): - - def setUp(self): - super(VersionsControllerTestCase, self).setUp() - self.wsgi_apps = (versions.Versions(), router.APIRouter()) - - def build_request(self, base_dir=None, base_url='http://localhost/v3', - header_version=None): - if base_dir: - req = fakes.HTTPRequest.blank(base_dir, base_url=base_url) - else: - req = fakes.HTTPRequest.blank('/', base_url=base_url) - req.method = 'GET' - req.content_type = 'application/json' - if header_version: - req.headers = {VERSION_HEADER_NAME: VOLUME_SERVICE + - header_version} - - return req - - def check_response(self, response, version): - self.assertEqual(VOLUME_SERVICE + version, - response.headers[VERSION_HEADER_NAME]) - self.assertEqual(VERSION_HEADER_NAME, response.headers['Vary']) - - @ddt.data('1.0', '2.0', '3.0') - def test_versions_root(self, version): - req = self.build_request(base_url='http://localhost') - - response = req.get_response(versions.Versions()) - self.assertEqual(http_client.MULTIPLE_CHOICES, response.status_int) - body = jsonutils.loads(response.body) - version_list = body['versions'] - - ids = [v['id'] for v in version_list] - self.assertEqual({'v1.0', 'v2.0', 'v3.0'}, set(ids)) - - v1 = [v for v in version_list if v['id'] == 'v1.0'][0] - self.assertEqual('', v1.get('min_version')) - self.assertEqual('', v1.get('version')) - - v2 = [v for v in version_list if v['id'] == 'v2.0'][0] - self.assertEqual('', v2.get('min_version')) - self.assertEqual('', v2.get('version')) - - v3 = [v for v in version_list if v['id'] == 'v3.0'][0] - self.assertEqual(api_version_request._MAX_API_VERSION, - v3.get('version')) - self.assertEqual(api_version_request._MIN_API_VERSION, - v3.get('min_version')) - - def test_versions_v1_no_header(self): - req = self.build_request(base_url='http://localhost/v1') - - response = req.get_response(router.APIRouter()) - self.assertEqual(http_client.OK, response.status_int) - - def test_versions_v2_no_header(self): - req = self.build_request(base_url='http://localhost/v2') - - response = req.get_response(router.APIRouter()) - self.assertEqual(http_client.OK, response.status_int) - - @ddt.data('1.0', '2.0', '3.0') - def test_versions(self, version): - req = self.build_request( - base_url='http://localhost/v{}'.format(version[0]), - header_version=version) - - if version is not None: - req.headers = {VERSION_HEADER_NAME: VOLUME_SERVICE + version} - - response = req.get_response(router.APIRouter()) - self.assertEqual(http_client.OK, response.status_int) - body = jsonutils.loads(response.body) - version_list = body['versions'] - - ids = [v['id'] for v in version_list] - self.assertEqual({'v{}'.format(version)}, set(ids)) - - if version == '3.0': - self.check_response(response, version) - self.assertEqual(api_version_request._MAX_API_VERSION, - version_list[0].get('version')) - self.assertEqual(api_version_request._MIN_API_VERSION, - version_list[0].get('min_version')) - else: - self.assertEqual('', version_list[0].get('min_version')) - self.assertEqual('', version_list[0].get('version')) - - def test_versions_version_latest(self): - req = self.build_request(header_version='latest') - - response = req.get_response(router.APIRouter()) - - self.assertEqual(http_client.OK, response.status_int) - self.check_response(response, api_version_request._MAX_API_VERSION) - - def test_versions_version_invalid(self): - req = self.build_request(header_version='2.0.1') - - for app in self.wsgi_apps: - response = req.get_response(app) - - self.assertEqual(http_client.BAD_REQUEST, response.status_int) - - @ddt.data('1.0', '2.0', '3.0') - def test_versions_response_fault(self, version): - req = self.build_request(header_version=version) - req.api_version_request = ( - api_version_request.APIVersionRequest(version)) - - app = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) - response = req.get_response(app) - - self.assertEqual(http_client.BAD_REQUEST, response.status_int) - if version == '3.0': - self.check_response(response, '3.0') - else: - self.assertNotIn(VERSION_HEADER_NAME, response.headers) - - def test_versions_inheritance_internals_of_non_base_controller(self): - """Test ControllerMetaclass works inheriting from non base class.""" - def _get_str_version(version): - return "%s.%s" % (version._ver_major, version._ver_minor) - - def assert_method_equal(expected, observed): - if six.PY2: - expected = expected.im_func - self.assertEqual(expected, observed) - - class ControllerParent(wsgi.Controller): - @wsgi.Controller.api_version('3.0') - def index(self, req): - pass - - # We create this class in between to confirm that we don't leave - # undesired versioned methods in the wsgi.Controller class. - class Controller(wsgi.Controller): - @wsgi.Controller.api_version('2.0') - def index(self, req): - pass - - class ControllerChild(ControllerParent): - @wsgi.Controller.api_version('3.1') - def index(self, req): - pass - - @wsgi.Controller.api_version('3.2') - def new_method(self, req): - pass - - # ControllerParent will only have its own index method - self.assertSetEqual({'index'}, set(ControllerParent.versioned_methods)) - self.assertEqual(1, len(ControllerParent.versioned_methods['index'])) - index = ControllerParent.versioned_methods['index'][0] - assert_method_equal(ControllerParent.index, index.func) - self.assertEqual('index', index.name) - self.assertEqual('3.0', _get_str_version(index.start_version)) - self.assertEqual('None.None', _get_str_version(index.end_version)) - - # Same thing will happen with the Controller class, thus confirming - # that we don't cross pollinate our classes with undesired methods. - self.assertSetEqual({'index'}, set(Controller.versioned_methods)) - self.assertEqual(1, len(Controller.versioned_methods['index'])) - index = Controller.versioned_methods['index'][0] - assert_method_equal(Controller.index, index.func) - self.assertEqual('index', index.name) - self.assertEqual('2.0', _get_str_version(index.start_version)) - self.assertEqual('None.None', _get_str_version(index.end_version)) - - # ControllerChild will inherit index method from ControllerParent and - # add its own version as well as add a new method - self.assertSetEqual({'index', 'new_method'}, - set(ControllerChild.versioned_methods)) - self.assertEqual(2, len(ControllerChild.versioned_methods['index'])) - - # The methods are ordered from newest version to oldest version - index = ControllerChild.versioned_methods['index'][0] - assert_method_equal(ControllerChild.index, index.func) - self.assertEqual('index', index.name) - self.assertEqual('3.1', _get_str_version(index.start_version)) - self.assertEqual('None.None', _get_str_version(index.end_version)) - - index = ControllerChild.versioned_methods['index'][1] - assert_method_equal(ControllerParent.index, index.func) - self.assertEqual('index', index.name) - self.assertEqual('3.0', _get_str_version(index.start_version)) - self.assertEqual('None.None', _get_str_version(index.end_version)) - - # New method also gets added even if it didn't exist in any of the base - # classes. - self.assertEqual(1, - len(ControllerChild.versioned_methods['new_method'])) - new_method = ControllerChild.versioned_methods['new_method'][0] - assert_method_equal(ControllerChild.new_method, new_method.func) - self.assertEqual('new_method', new_method.name) - self.assertEqual('3.2', _get_str_version(new_method.start_version)) - self.assertEqual('None.None', _get_str_version(new_method.end_version)) - - @ddt.data( - ('2.0', 'index', http_client.NOT_ACCEPTABLE, 'ControllerParent'), - ('2.0', 'show', http_client.NOT_ACCEPTABLE, 'ControllerParent'), - ('3.0', 'index', http_client.NOT_FOUND, 'ControllerParent'), - ('3.0', 'show', http_client.NOT_FOUND, 'ControllerParent'), - ('3.1', 'index', 'parent', 'ControllerParent'), - ('3.1', 'show', http_client.NOT_FOUND, 'ControllerParent'), - ('3.2', 'index', 'parent', 'ControllerParent'), - ('3.2', 'show', http_client.NOT_FOUND, 'ControllerParent'), - - ('2.0', 'index', http_client.NOT_ACCEPTABLE, 'Controller'), - ('2.0', 'show', http_client.NOT_ACCEPTABLE, 'Controller'), - ('3.0', 'index', http_client.NOT_FOUND, 'Controller'), - ('3.0', 'show', http_client.NOT_FOUND, 'Controller'), - ('3.1', 'index', 'single', 'Controller'), - ('3.1', 'show', http_client.NOT_FOUND, 'Controller'), - ('3.2', 'index', 'single', 'Controller'), - ('3.2', 'show', http_client.NOT_FOUND, 'Controller'), - - ('2.0', 'index', http_client.NOT_ACCEPTABLE, 'ControllerChild'), - ('2.0', 'show', http_client.NOT_ACCEPTABLE, 'ControllerChild'), - ('3.0', 'index', http_client.NOT_FOUND, 'ControllerChild'), - ('3.0', 'show', http_client.NOT_FOUND, 'ControllerChild'), - ('3.1', 'index', 'parent', 'ControllerChild'), - ('3.1', 'show', http_client.NOT_FOUND, 'ControllerChild'), - ('3.2', 'index', 'child 3.2', 'ControllerChild'), - ('3.2', 'show', http_client.NOT_FOUND, 'ControllerChild'), - ('3.3', 'index', 'child 3.3', 'ControllerChild'), - ('3.3', 'show', 'show', 'ControllerChild'), - ('3.4', 'index', 'child 3.4', 'ControllerChild')) - @ddt.unpack - def test_versions_inheritance_of_non_base_controller(self, version, call, - expected, controller): - """Test ControllerMetaclass works inheriting from non base class.""" - class ControllerParent(wsgi.Controller): - @wsgi.Controller.api_version('3.1') - def index(self, req): - return 'parent' - - # We create this class in between to confirm that we don't leave - # undesired versioned methods in the wsgi.Controller class. - class Controller(wsgi.Controller): - @wsgi.Controller.api_version('3.1') - def index(self, req): - return 'single' - - class ControllerChild(ControllerParent): - # We don't add max version to confirm that once we set a newer - # version it doesn't really matter because the newest one will be - # called. - @wsgi.Controller.api_version('3.2') - def index(self, req): - return 'child 3.2' - - @index.api_version('3.3') - def index(self, req): - return 'child 3.3' - - @index.api_version('3.4') - def index(self, req): - return 'child 3.4' - - @wsgi.Controller.api_version('3.3') - def show(self, req, *args, **kwargs): - return 'show' - - base_dir = '/tests' if call == 'index' else '/tests/123' - req = self.build_request(base_dir=base_dir, header_version=version) - app = fakes.TestRouter(locals()[controller]()) - - response = req.get_response(app) - resp = encodeutils.safe_decode(response.body, incoming='utf-8') - - if isinstance(expected, six.string_types): - self.assertEqual(http_client.OK, response.status_int) - self.assertEqual(expected, resp) - else: - self.assertEqual(expected, response.status_int) - - def test_versions_version_not_found(self): - api_version_request_4_0 = api_version_request.APIVersionRequest('4.0') - self.mock_object(api_version_request, - 'max_api_version', - return_value=api_version_request_4_0) - - class Controller(wsgi.Controller): - - @wsgi.Controller.api_version('3.0', '3.0') - def index(self, req): - return 'off' - - req = self.build_request(header_version='3.5') - app = fakes.TestRouter(Controller()) - - response = req.get_response(app) - - self.assertEqual(http_client.NOT_FOUND, response.status_int) - - def test_versions_version_not_acceptable(self): - req = self.build_request(header_version='4.0') - - response = req.get_response(router.APIRouter()) - - self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) - - @ddt.data(['volume 3.0, compute 2.22', True], - ['volume 3.0, compute 2.22, identity 2.3', True], - ['compute 2.22, identity 2.3', False]) - @ddt.unpack - def test_versions_multiple_services_header( - self, service_list, should_pass): - req = self.build_request() - req.headers = {VERSION_HEADER_NAME: service_list} - - try: - response = req.get_response(router.APIRouter()) - except exception.VersionNotFoundForAPIMethod: - if should_pass: - raise - elif not should_pass: - return - - self.assertEqual(http_client.OK, response.status_int) - body = jsonutils.loads(response.body) - version_list = body['versions'] - - ids = [v['id'] for v in version_list] - self.assertEqual({'v3.0'}, set(ids)) - self.check_response(response, '3.0') - - self.assertEqual(api_version_request._MAX_API_VERSION, - version_list[0].get('version')) - self.assertEqual(api_version_request._MIN_API_VERSION, - version_list[0].get('min_version')) - - @ddt.data(['3.5', http_client.OK], ['3.55', http_client.NOT_FOUND]) - @ddt.unpack - def test_req_version_matches(self, version, HTTP_ret): - version_request = api_version_request.APIVersionRequest(version) - self.mock_object(api_version_request, - 'max_api_version', - return_value=version_request) - - class Controller(wsgi.Controller): - - @wsgi.Controller.api_version('3.0', '3.6') - def index(self, req): - return 'off' - - req = self.build_request(base_dir='/tests', header_version=version) - app = fakes.TestRouter(Controller()) - - response = req.get_response(app) - resp = encodeutils.safe_decode(response.body, incoming='utf-8') - - if HTTP_ret == http_client.OK: - self.assertEqual('off', resp) - elif HTTP_ret == http_client.NOT_FOUND: - self.assertNotEqual('off', resp) - self.assertEqual(HTTP_ret, response.status_int) - - @ddt.data(['3.5', 'older'], ['3.37', 'newer']) - @ddt.unpack - def test_req_version_matches_with_if(self, version, ret_val): - version_request = api_version_request.APIVersionRequest(version) - self.mock_object(api_version_request, - 'max_api_version', - return_value=version_request) - - class Controller(wsgi.Controller): - - def index(self, req): - req_version = req.api_version_request - if req_version.matches('3.1', '3.8'): - return 'older' - if req_version.matches('3.9', '8.8'): - return 'newer' - - req = self.build_request(base_dir='/tests', header_version=version) - app = fakes.TestRouter(Controller()) - - response = req.get_response(app) - - resp = encodeutils.safe_decode(response.body, incoming='utf-8') - self.assertEqual(ret_val, resp) - self.assertEqual(http_client.OK, response.status_int) - - @ddt.data(['3.5', 'older'], ['3.37', 'newer']) - @ddt.unpack - def test_req_version_matches_with_None(self, version, ret_val): - version_request = api_version_request.APIVersionRequest(version) - self.mock_object(api_version_request, - 'max_api_version', - return_value=version_request) - - class Controller(wsgi.Controller): - - def index(self, req): - req_version = req.api_version_request - if req_version.matches(None, '3.8'): - return 'older' - if req_version.matches('3.9', None): - return 'newer' - - req = self.build_request(base_dir='/tests', header_version=version) - app = fakes.TestRouter(Controller()) - - response = req.get_response(app) - - resp = encodeutils.safe_decode(response.body, incoming='utf-8') - self.assertEqual(ret_val, resp) - self.assertEqual(http_client.OK, response.status_int) - - def test_req_version_matches_with_None_None(self): - version_request = api_version_request.APIVersionRequest('3.39') - self.mock_object(api_version_request, - 'max_api_version', - return_value=version_request) - - class Controller(wsgi.Controller): - - def index(self, req): - req_version = req.api_version_request - # This case is artificial, and will return True - if req_version.matches(None, None): - return "Pass" - - req = self.build_request(base_dir='/tests', header_version='3.39') - app = fakes.TestRouter(Controller()) - - response = req.get_response(app) - - resp = encodeutils.safe_decode(response.body, incoming='utf-8') - self.assertEqual("Pass", resp) - self.assertEqual(http_client.OK, response.status_int) diff --git a/cinder/tests/unit/api/v1/__init__.py b/cinder/tests/unit/api/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/v1/test_snapshots.py b/cinder/tests/unit/api/v1/test_snapshots.py deleted file mode 100644 index 549cf7f9f..000000000 --- a/cinder/tests/unit/api/v1/test_snapshots.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright 2011 Denali Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from six.moves import http_client -from six.moves.urllib import parse as urllib -import webob - -from cinder.api import common -from cinder.api.v1 import snapshots -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils -from cinder import volume - - -CONF = cfg.CONF - -UUID = '00000000-0000-0000-0000-000000000003' -INVALID_UUID = '00000000-0000-0000-0000-000000000004' - - -def _get_default_snapshot_param(): - return { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'created_at': None, - 'updated_at': None, - 'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b', - 'project_id': '7ffe17a15c724e2aa79fc839540aec15', - 'display_name': 'Default name', - 'display_description': 'Default description', - 'deleted': None, - 'volume': {'availability_zone': 'test_zone'} - } - - -def fake_snapshot_delete(self, context, snapshot): - if snapshot['id'] != UUID: - raise exception.SnapshotNotFound(snapshot['id']) - - -def fake_snapshot_get(self, context, snapshot_id): - if snapshot_id != UUID: - raise exception.SnapshotNotFound(snapshot_id) - - param = _get_default_snapshot_param() - return param - - -def fake_snapshot_get_all(self, context, search_opts=None): - param = _get_default_snapshot_param() - return [param] - - -@ddt.ddt -class SnapshotApiTest(test.TestCase): - def setUp(self): - super(SnapshotApiTest, self).setUp() - self.controller = snapshots.SnapshotsController() - self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_snapshot_create(self, mock_validate): - volume = utils.create_volume(self.ctx) - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": False, - "name": snapshot_name, - "description": snapshot_description - } - - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v1/snapshots') - resp_dict = self.controller.create(req, body) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(snapshot_name, resp_dict['snapshot']['display_name']) - self.assertEqual(snapshot_description, - resp_dict['snapshot']['display_description']) - self.assertTrue(mock_validate.called) - self.assertNotIn('updated_at', resp_dict['snapshot']) - db.volume_destroy(self.ctx, volume.id) - - @ddt.data(True, 'y', 'true', 'trUE', 'yes', '1', 'on', 1, "1 ") - def test_snapshot_create_force(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v1/snapshots') - resp_dict = self.controller.create(req, body) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(snapshot_name, - resp_dict['snapshot']['display_name']) - self.assertEqual(snapshot_description, - resp_dict['snapshot']['display_description']) - self.assertNotIn('updated_at', resp_dict['snapshot']) - - db.volume_destroy(self.ctx, volume.id) - - @ddt.data(False, 'n', 'false', 'falSE', 'No', '0', 'off', 0) - def test_snapshot_create_force_failure(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v1/snapshots') - self.assertRaises(exception.InvalidVolume, - self.controller.create, - req, - body) - - db.volume_destroy(self.ctx, volume.id) - - @ddt.data("**&&^^%%$$##@@", '-1', 2, '01') - def test_snapshot_create_invalid_force_param(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v1/snapshots') - self.assertRaises(exception.InvalidParameterValue, - self.controller.create, - req, - body) - - db.volume_destroy(self.ctx, volume.id) - - def test_snapshot_create_without_volume_id(self): - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - body = { - "snapshot": { - "force": True, - "name": snapshot_name, - "description": snapshot_description - } - } - req = fakes.HTTPRequest.blank('/v1/snapshots') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - @mock.patch.object(volume.api.API, "update_snapshot", - side_effect=v2_fakes.fake_snapshot_update) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.db.volume_get') - @mock.patch('cinder.objects.Snapshot.get_by_id') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_snapshot_update( - self, mock_validate, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get, update_snapshot): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - - updates = { - "display_name": "Updated Test Name", - } - body = {"snapshot": updates} - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) - res_dict = self.controller.update(req, UUID, body) - expected = { - 'snapshot': { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'size': 100, - 'created_at': None, - 'display_name': u'Updated Test Name', - 'display_description': u'Default description', - 'metadata': {}, - } - } - self.assertEqual(expected, res_dict) - - def test_snapshot_update_missing_body(self): - body = {} - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, req, UUID, body) - - def test_snapshot_update_invalid_body(self): - body = {'name': 'missing top level snapshot key'} - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, req, UUID, body) - - def test_snapshot_update_not_found(self): - self.mock_object(volume.api.API, "get_snapshot", fake_snapshot_get) - updates = { - "display_name": "Updated Test Name", - } - body = {"snapshot": updates} - req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid') - self.assertRaises(exception.SnapshotNotFound, self.controller.update, - req, 'not-the-uuid', body) - - @mock.patch.object(volume.api.API, "delete_snapshot", - side_effect=v2_fakes.fake_snapshot_update) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get, delete_snapshot): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - - snapshot_id = UUID - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) - resp = self.controller.delete(req, snapshot_id) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_snapshot_delete_invalid_id(self): - self.mock_object(volume.api.API, "delete_snapshot", - fake_snapshot_delete) - snapshot_id = INVALID_UUID - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) - self.assertRaises(exception.SnapshotNotFound, self.controller.delete, - req, snapshot_id) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID) - resp_dict = self.controller.show(req, UUID) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(UUID, resp_dict['snapshot']['id']) - self.assertNotIn('updated_at', resp_dict['snapshot']) - - def test_snapshot_show_invalid_id(self): - snapshot_id = INVALID_UUID - req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id) - self.assertRaises(exception.SnapshotNotFound, - self.controller.show, req, snapshot_id) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - @mock.patch('cinder.volume.api.API.get_all_snapshots') - def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id, - volume_get_by_id, snapshot_metadata_get): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - snapshots = objects.SnapshotList(objects=[snapshot_obj]) - get_all_snapshots.return_value = snapshots - - req = fakes.HTTPRequest.blank('/v1/snapshots/detail') - resp_dict = self.controller.detail(req) - - self.assertIn('snapshots', resp_dict) - resp_snapshots = resp_dict['snapshots'] - self.assertEqual(1, len(resp_snapshots)) - self.assertNotIn('updated_at', resp_snapshots[0]) - - resp_snapshot = resp_snapshots.pop() - self.assertEqual(UUID, resp_snapshot['id']) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_limited_to_project(self, - snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_list_snapshots_with_limit_and_offset(self, - snapshot_metadata_get): - def list_snapshots_with_limit_and_offset(snaps, is_admin): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots?limit=1' - '&offset=1' % fake.PROJECT_ID, - use_admin_context=is_admin) - res = self.controller.index(req) - - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - self.assertEqual(snaps[1].id, res['snapshots'][0]['id']) - self.assertNotIn('updated_at', res['snapshots'][0]) - - # Test that we get an empty list with an offset greater than the - # number of items - req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=3') - self.assertEqual({'snapshots': []}, self.controller.index(req)) - - volume, snaps = self._create_db_snapshots(3) - # admin case - list_snapshots_with_limit_and_offset(snaps, is_admin=True) - # non-admin case - list_snapshots_with_limit_and_offset(snaps, is_admin=False) - - @mock.patch.object(db, 'snapshot_get_all_by_project') - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_list_snpashots_with_wrong_limit_and_offset(self, - mock_metadata_get, - mock_snapshot_get_all): - """Test list with negative and non numeric limit and offset.""" - mock_snapshot_get_all.return_value = [] - - # Negative limit - req = fakes.HTTPRequest.blank('/v1/snapshots?limit=-1&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Non numeric limit - req = fakes.HTTPRequest.blank('/v1/snapshots?limit=a&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Negative offset - req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=-1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Non numeric offset - req = fakes.HTTPRequest.blank('/v1/snapshots?limit=1&offset=a') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Test that we get an exception HTTPBadRequest(400) with an offset - # greater than the maximum offset value. - url = '/v1/snapshots?limit=1&offset=323245324356534235' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, req) - - def _assert_list_next(self, expected_query=None, project=fake.PROJECT_ID, - **kwargs): - """Check a page of snapshots list.""" - # Since we are accessing v2 api directly we don't need to specify - # v2 in the request path, if we did, we'd get /v2/v2 links back - request_path = '/v2/%s/snapshots' % project - expected_path = request_path - - # Construct the query if there are kwargs - if kwargs: - request_str = request_path + '?' + urllib.urlencode(kwargs) - else: - request_str = request_path - - # Make the request - req = fakes.HTTPRequest.blank(request_str) - res = self.controller.index(req) - - # We only expect to have a next link if there is an actual expected - # query. - if expected_query: - # We must have the links - self.assertIn('snapshots_links', res) - links = res['snapshots_links'] - - # Must be a list of links, even if we only get 1 back - self.assertTrue(list, type(links)) - next_link = links[0] - - # rel entry must be next - self.assertIn('rel', next_link) - self.assertIn('next', next_link['rel']) - - # href entry must have the right path - self.assertIn('href', next_link) - href_parts = urllib.urlparse(next_link['href']) - self.assertEqual(expected_path, href_parts.path) - - # And the query from the next link must match what we were - # expecting - params = urllib.parse_qs(href_parts.query) - self.assertDictEqual(expected_query, params) - - # Make sure we don't have links if we were not expecting them - else: - self.assertNotIn('snapshots_links', res) - - def _create_db_snapshots(self, num_snaps): - volume = utils.create_volume(self.ctx) - snaps = [utils.create_snapshot(self.ctx, - volume.id, - display_name='snap' + str(i)) - for i in range(num_snaps)] - - self.addCleanup(db.volume_destroy, self.ctx, volume.id) - for snap in snaps: - self.addCleanup(db.snapshot_destroy, self.ctx, snap.id) - - snaps.reverse() - return volume, snaps - - def test_list_snapshots_next_link_default_limit(self): - """Test that snapshot list pagination is limited by osapi_max_limit.""" - volume, snaps = self._create_db_snapshots(3) - - # NOTE(geguileo): Since cinder.api.common.limited has already been - # imported his argument max_limit already has a default value of 1000 - # so it doesn't matter that we change it to 2. That's why we need to - # mock it and send it current value. We still need to set the default - # value because other sections of the code use it, for example - # _get_collection_links - CONF.set_default('osapi_max_limit', 2) - - def get_pagination_params(params, max_limit=CONF.osapi_max_limit, - original_call=common.get_pagination_params): - return original_call(params, max_limit) - - def _get_limit_param(params, max_limit=CONF.osapi_max_limit, - original_call=common._get_limit_param): - return original_call(params, max_limit) - - with mock.patch.object(common, 'get_pagination_params', - get_pagination_params), \ - mock.patch.object(common, '_get_limit_param', - _get_limit_param): - # The link from the first page should link to the second - self._assert_list_next({'marker': [snaps[1].id]}) - - # Second page should have no next link - self._assert_list_next(marker=snaps[1].id) - - def test_list_snapshots_next_link_with_limit(self): - """Test snapshot list pagination with specific limit.""" - volume, snaps = self._create_db_snapshots(2) - - # The link from the first page should link to the second - self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]}, - limit=1) - - # Even though there are no more elements, we should get a next element - # per specification. - expected = {'limit': ['1'], 'marker': [snaps[1].id]} - self._assert_list_next(expected, limit=1, marker=snaps[0].id) - - # When we go beyond the number of elements there should be no more - # next links - self._assert_list_next(limit=1, marker=snaps[1].id) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1' % - fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(3, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all') - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get, - snapshot_get_all): - def get_all(context, filters=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - if 'project_id' in filters and 'tenant1' in filters['project_id']: - return [v2_fakes.fake_snapshot(fake.VOLUME_ID, - tenant_id='tenant1')] - else: - return [] - - snapshot_get_all.side_effect = get_all - - req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1' - '&project_id=tenant1' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_all_tenants_non_admin_gets_all_tenants(self, - snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots?all_tenants=1' % - fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_non_admin_get_by_project(self, snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - def _create_snapshot_bad_body(self, body): - req = fakes.HTTPRequest.blank('/v1/%s/snapshots' % fake.PROJECT_ID) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.create, req, body) - - def test_create_no_body(self): - self._create_snapshot_bad_body(body=None) - - def test_create_missing_snapshot(self): - body = {'foo': {'a': 'b'}} - self._create_snapshot_bad_body(body=body) - - def test_create_malformed_entity(self): - body = {'snapshot': 'string'} - self._create_snapshot_bad_body(body=body) diff --git a/cinder/tests/unit/api/v1/test_volumes.py b/cinder/tests/unit/api/v1/test_volumes.py deleted file mode 100644 index 2e83e0af6..000000000 --- a/cinder/tests/unit/api/v1/test_volumes.py +++ /dev/null @@ -1,805 +0,0 @@ -# Copyright 2013 Josh Durgin -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import datetime -import iso8601 - -import ddt -import mock -from oslo_config import cfg -from six.moves import http_client -from six.moves import range -import webob - -from cinder.api import extensions -from cinder.api.v1 import volumes -from cinder import context -from cinder import db -from cinder import exception as exc -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils -from cinder.volume import api as volume_api - -CONF = cfg.CONF - - -@ddt.ddt -class VolumeApiTest(test.TestCase): - def setUp(self): - super(VolumeApiTest, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - fake_image.mock_image_service(self) - self.controller = volumes.VolumeController(self.ext_mgr) - self.maxDiff = None - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - def test_volume_create(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - vol = {"size": 100, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.create(req, body) - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'Volume Test Desc', - 'availability_zone': 'zone1:host1', - 'display_name': 'Volume Test Name', - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 100, - 'encrypted': False}} - self.assertEqual(expected, res_dict) - - @mock.patch.object(db, 'service_get_all', - return_value=v2_fakes.fake_service_get_all_by_topic( - None, None), - autospec=True) - def test_volume_create_with_type(self, mock_service_get): - vol_type = db.volume_type_create( - context.get_admin_context(), - dict(name=CONF.default_volume_type, extra_specs={}) - ) - db_vol_type = db.volume_type_get(context.get_admin_context(), - vol_type.id) - - vol = {"size": 100, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1", - "volume_type": "FakeTypeName"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - # Raise 404 when type name isn't valid - self.assertRaises(exc.VolumeTypeNotFoundByName, - self.controller.create, req, body) - - # Use correct volume type name - vol.update(dict(volume_type=CONF.default_volume_type)) - body.update(dict(volume=vol)) - res_dict = self.controller.create(req, body) - self.assertIn('id', res_dict['volume']) - self.assertEqual(1, len(res_dict)) - self.assertEqual(db_vol_type['name'], - res_dict['volume']['volume_type']) - - # Use correct volume type id - vol.update(dict(volume_type=db_vol_type['id'])) - body.update(dict(volume=vol)) - res_dict = self.controller.create(req, body) - self.assertIn('id', res_dict['volume']) - self.assertEqual(1, len(res_dict)) - self.assertEqual(db_vol_type['name'], - res_dict['volume']['volume_type']) - - def test_volume_creation_fails_with_bad_size(self): - vol = {"size": '', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(exc.InvalidInput, - self.controller.create, - req, - body) - - def test_volume_creation_fails_with_bad_availability_zone(self): - vol = {"size": '1', - "name": "Volume Test Name", - "description": "Volume Test Desc", - "availability_zone": "zonen:hostn"} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(exc.InvalidInput, - self.controller.create, - req, body) - - def test_volume_create_with_image_id(self): - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - self.ext_mgr.extensions = {'os-image-create': 'fake'} - test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77" - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "nova", - "imageRef": test_id} - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'Volume Test Desc', - 'availability_zone': 'nova', - 'display_name': 'Volume Test Name', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'image_id': test_id, - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.create(req, body) - self.assertEqual(expected, res_dict) - - def test_volume_create_with_image_id_is_integer(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": 1234} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_not_uuid_format(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": '1', - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": '12345'} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_with_empty_string(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = {"size": 1, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "cinder", - "imageRef": ''} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v1/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_update(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - self.mock_object(db, 'volume_admin_metadata_get', - return_value={'attached_mode': 'rw', - 'readonly': 'False'}) - - updates = { - "display_name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = {'volume': { - 'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'Updated Test Name', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'attached_mode': 'rw', - 'readonly': 'False'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - - def test_volume_update_metadata(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - updates = { - "metadata": {"qos_max_iops": '2000'} - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = {'volume': { - 'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {"qos_max_iops": '2000', - "readonly": "False", - "attached_mode": "rw"}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1 - }} - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - - def test_volume_update_with_admin_metadata(self): - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, - None, '/') - - updates = { - "display_name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = {'volume': { - 'status': 'in-use', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'Updated Test Name', - 'encrypted': False, - 'attachments': [{ - 'attachment_id': attachment['id'], - 'id': fake.VOLUME_ID, - 'volume_id': fake.VOLUME_ID, - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'device': '/' - }], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': None, - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'key': 'value', - 'readonly': 'True'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - - def test_update_empty_body(self): - body = {} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_update_invalid_body(self): - body = {'display_name': 'missing top level volume key'} - req = fakes.HTTPRequest.blank('/v1/volumes/1') - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_update_not_found(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - updates = { - "name": "Updated Test Name", - } - - body = {"volume": updates} - req = fakes.HTTPRequest.blank( - '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(exc.VolumeNotFound, - self.controller.update, - req, fake.WILL_NOT_BE_FOUND_ID, body) - - def test_volume_list(self): - self.mock_object(volume_api.API, 'get_all', - v2_fakes.fake_volume_api_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes') - res_dict = self.controller.index(req) - expected = {'volumes': [{'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'attached_mode': 'rw', - 'readonly': 'False'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}]} - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volumes - self.assertEqual(1, len(req.cached_resource())) - - def test_volume_list_with_admin_metadata(self): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - - req = fakes.HTTPRequest.blank('/v1/volumes') - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.index(req) - expected = {'volumes': [{'status': 'in-use', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [ - {'attachment_id': attachment['id'], - 'device': '/', - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'id': fake.VOLUME_ID, - 'volume_id': fake.VOLUME_ID}], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': None, - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'key': 'value', - 'readonly': 'True'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}]} - self.assertEqual(expected, res_dict) - - def test_volume_list_detail(self): - self.mock_object(volume_api.API, 'get_all', - v2_fakes.fake_volume_api_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/detail') - res_dict = self.controller.detail(req) - expected = {'volumes': [{'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'attached_mode': 'rw', - 'readonly': 'False'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}]} - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volumes - self.assertEqual(1, len(req.cached_resource())) - - def test_volume_list_detail_with_admin_metadata(self): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - - req = fakes.HTTPRequest.blank('/v1/volumes/detail') - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.index(req) - expected = {'volumes': [{'status': 'in-use', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [ - {'attachment_id': attachment['id'], - 'device': '/', - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'id': fake.VOLUME_ID, - 'volume_id': fake.VOLUME_ID}], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': None, - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'key': 'value', - 'readonly': 'True'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}]} - self.assertEqual(expected, res_dict) - - def test_volume_show(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'attached_mode': 'rw', - 'readonly': 'False'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volume - self.assertIsNotNone(req.cached_resource_by_id(fake.VOLUME_ID)) - - def test_volume_show_no_attachments(self): - def fake_volume_get(self, context, volume_id, **kwargs): - vol = v2_fakes.create_fake_volume( - volume_id, - attach_status=fields.VolumeAttachStatus.DETACHED) - return fake_volume.fake_volume_obj(context, **vol) - - def fake_volume_admin_metadata_get(context, volume_id, **kwargs): - return v2_fakes.fake_volume_admin_metadata_get( - context, volume_id, - attach_status=fields.VolumeAttachStatus.DETACHED) - - self.mock_object(volume_api.API, 'get', fake_volume_get) - self.mock_object(db, 'volume_admin_metadata_get', - fake_volume_admin_metadata_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = {'volume': {'status': 'fakestatus', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': 'vol_type_name', - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'readonly': 'False'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - - self.assertEqual(expected, res_dict) - - def test_volume_show_no_volume(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - - req = fakes.HTTPRequest.blank( - '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(exc.VolumeNotFound, - self.controller.show, - req, - fake.WILL_NOT_BE_FOUND_ID) - # Finally test that nothing was cached - self.assertIsNone(req.cached_resource_by_id(fake.WILL_NOT_BE_FOUND_ID)) - - def _create_db_volumes(self, num_volumes): - volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i) - for i in range(num_volumes)] - for vol in volumes: - self.addCleanup(db.volume_destroy, self.ctxt, vol.id) - volumes.reverse() - return volumes - - def test_volume_detail_limit_offset(self): - created_volumes = self._create_db_volumes(2) - - def volume_detail_limit_offset(is_admin): - req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2' - '&offset=1', - use_admin_context=is_admin) - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(created_volumes[1].id, volumes[0]['id']) - - # admin case - volume_detail_limit_offset(is_admin=True) - # non_admin case - volume_detail_limit_offset(is_admin=False) - - def test_volume_show_with_admin_metadata(self): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = {'volume': {'status': 'in-use', - 'display_description': 'displaydesc', - 'availability_zone': 'fakeaz', - 'display_name': 'displayname', - 'encrypted': False, - 'attachments': [ - {'attachment_id': attachment['id'], - 'device': '/', - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'id': fake.VOLUME_ID, - 'volume_id': fake.VOLUME_ID}], - 'multiattach': 'false', - 'bootable': 'false', - 'volume_type': None, - 'snapshot_id': None, - 'source_volid': None, - 'metadata': {'key': 'value', - 'readonly': 'True'}, - 'id': fake.VOLUME_ID, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'size': 1}} - self.assertEqual(expected, res_dict) - - def test_volume_show_with_encrypted_volume(self): - def fake_volume_get(self, context, volume_id, **kwargs): - vol = v2_fakes.create_fake_volume(volume_id, - encryption_key_id=fake.KEY_ID) - return fake_volume.fake_volume_obj(context, **vol) - - self.mock_object(volume_api.API, 'get', fake_volume_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - self.assertTrue(res_dict['volume']['encrypted']) - - def test_volume_show_with_unencrypted_volume(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - self.assertEqual(False, res_dict['volume']['encrypted']) - - @mock.patch.object(volume_api.API, 'delete', v2_fakes.fake_volume_delete) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_volume_delete(self): - req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) - resp = self.controller.delete(req, fake.VOLUME_ID) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_volume_delete_no_volume(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - - req = fakes.HTTPRequest.blank( - '/v1/volumes/%s' % fake.WILL_NOT_BE_FOUND_ID) - self.assertRaises(exc.VolumeNotFound, - self.controller.delete, - req, fake.WILL_NOT_BE_FOUND_ID) - - def test_admin_list_volumes_limited_to_project(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - - req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - def test_admin_list_volumes_all_tenants(self): - req = fakes.HTTPRequest.blank( - '/v1/%s/volumes?all_tenants=1' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(3, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_all_tenants_non_admin_gets_all_tenants(self): - req = fakes.HTTPRequest.blank( - '/v1/%s/volumes?all_tenants=1' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_non_admin_get_by_project(self): - req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - def _unprocessable_volume_create(self, body): - req = fakes.HTTPRequest.blank('/v1/%s/volumes' % fake.PROJECT_ID) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPUnprocessableEntity, - self.controller.create, req, body) - - def test_create_no_body(self): - self._unprocessable_volume_create(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._unprocessable_volume_create(body=body) - - def test_create_malformed_entity(self): - body = {'volume': 'string'} - self._unprocessable_volume_create(body=body) diff --git a/cinder/tests/unit/api/v2/__init__.py b/cinder/tests/unit/api/v2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/v2/fakes.py b/cinder/tests/unit/api/v2/fakes.py deleted file mode 100644 index 0052373d1..000000000 --- a/cinder/tests/unit/api/v2/fakes.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import iso8601 - -from cinder import exception as exc -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import utils - - -DEFAULT_VOL_NAME = "displayname" -DEFAULT_VOL_DESCRIPTION = "displaydesc" -DEFAULT_VOL_SIZE = 1 -DEFAULT_VOL_TYPE = "vol_type_name" -DEFAULT_VOL_STATUS = "fakestatus" -DEFAULT_VOL_ID = fake.VOLUME_ID - -# TODO(vbala): api.v1 tests use hard-coded "fakeaz" for verifying -# post-conditions. Update value to "zone1:host1" once we remove -# api.v1 tests and use it in api.v2 tests. -DEFAULT_AZ = "fakeaz" - - -def create_fake_volume(id, **kwargs): - volume = { - 'id': id, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fakehost', - 'size': DEFAULT_VOL_SIZE, - 'availability_zone': DEFAULT_AZ, - 'status': DEFAULT_VOL_STATUS, - 'migration_status': None, - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'name': 'vol name', - 'display_name': DEFAULT_VOL_NAME, - 'display_description': DEFAULT_VOL_DESCRIPTION, - 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'snapshot_id': None, - 'source_volid': None, - 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', - 'encryption_key_id': None, - 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, - {'key': 'readonly', 'value': 'False'}], - 'bootable': False, - 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), - 'replication_status': 'disabled', - 'replication_extended_status': None, - 'replication_driver_data': None, - 'volume_attachment': [], - 'multiattach': False, - } - - volume.update(kwargs) - if kwargs.get('volume_glance_metadata', None): - volume['bootable'] = True - if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED: - del volume['volume_admin_metadata'][0] - return volume - - -def fake_volume_create(self, context, size, name, description, snapshot=None, - **param): - vol = create_fake_volume(DEFAULT_VOL_ID) - vol['size'] = size - vol['display_name'] = name - vol['display_description'] = description - source_volume = param.get('source_volume') or {} - vol['source_volid'] = source_volume.get('id') - vol['bootable'] = False - vol['volume_attachment'] = [] - vol['multiattach'] = utils.get_bool_param('multiattach', param) - try: - vol['snapshot_id'] = snapshot['id'] - except (KeyError, TypeError): - vol['snapshot_id'] = None - vol['availability_zone'] = param.get('availability_zone', 'fakeaz') - return vol - - -def fake_volume_api_create(self, context, *args, **kwargs): - vol = fake_volume_create(self, context, *args, **kwargs) - return fake_volume.fake_volume_obj(context, **vol) - - -def fake_image_service_detail(self, context, **kwargs): - filters = kwargs.get('filters', {'name': ''}) - if filters['name'] == "Fedora-x86_64-20-20140618-sda": - return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}] - elif filters['name'] == "multi": - return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}, - {'id': "c905cedb-abcd-47e4-8a62-f26bc5fc4c77"}] - return [] - - -def fake_volume_create_from_image(self, context, size, name, description, - snapshot, volume_type, metadata, - availability_zone): - vol = create_fake_volume(fake.VOLUME_ID) - vol['status'] = 'creating' - vol['size'] = size - vol['display_name'] = name - vol['display_description'] = description - vol['availability_zone'] = 'cinder' - vol['bootable'] = False - return vol - - -def fake_volume_update(self, context, *args, **param): - pass - - -def fake_volume_delete(self, context, *args, **param): - pass - - -def fake_volume_get(self, context, volume_id, viewable_admin_meta=False): - if viewable_admin_meta: - return create_fake_volume(volume_id) - else: - volume = create_fake_volume(volume_id) - del volume['volume_admin_metadata'] - return volume - - -def fake_volume_get_notfound(self, context, - volume_id, viewable_admin_meta=False): - raise exc.VolumeNotFound(volume_id) - - -def fake_volume_get_db(context, volume_id): - if context.is_admin: - return create_fake_volume(volume_id) - else: - volume = create_fake_volume(volume_id) - del volume['volume_admin_metadata'] - return volume - - -def fake_volume_api_get(self, context, volume_id, viewable_admin_meta=False): - vol = create_fake_volume(volume_id) - return fake_volume.fake_volume_obj(context, **vol) - - -def fake_volume_get_all(context, search_opts=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, filters=None, - viewable_admin_meta=False, offset=None): - return [create_fake_volume(fake.VOLUME_ID, project_id=fake.PROJECT_ID), - create_fake_volume(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID), - create_fake_volume(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)] - - -def fake_volume_get_all_by_project(self, context, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, offset=None): - return [fake_volume_get(self, context, fake.VOLUME_ID, - viewable_admin_meta=True)] - - -def fake_volume_api_get_all_by_project(self, context, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=None): - vol = fake_volume_get(self, context, fake.VOLUME_ID, - viewable_admin_meta=viewable_admin_meta) - vol_obj = fake_volume.fake_volume_obj(context, **vol) - return objects.VolumeList(objects=[vol_obj]) - - -def fake_snapshot(id, **kwargs): - snapshot = {'id': id, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'created_at': None, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'project_id': fake.PROJECT_ID, - 'snapshot_metadata': []} - - snapshot.update(kwargs) - return snapshot - - -def fake_snapshot_get_all(context, filters=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - return [fake_snapshot(fake.VOLUME_ID, project_id=fake.PROJECT_ID), - fake_snapshot(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID), - fake_snapshot(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)] - - -def fake_snapshot_get_all_by_project(context, project_id, filters=None, - marker=None, limit=None, sort_keys=None, - sort_dirs=None, offset=None): - return [fake_snapshot(fake.SNAPSHOT_ID)] - - -def fake_snapshot_update(self, context, *args, **param): - pass - - -def fake_service_get_all(*args, **kwargs): - return [{'availability_zone': "zone1:host1", "disabled": 0}] - - -def fake_service_get_all_by_topic(context, topic, disabled=None): - return [{'availability_zone': "zone1:host1", "disabled": 0}] - - -def fake_snapshot_get(self, context, snapshot_id): - if snapshot_id == fake.WILL_NOT_BE_FOUND_ID: - raise exc.SnapshotNotFound(snapshot_id=snapshot_id) - - return fake_snapshot(snapshot_id) - - -def fake_consistencygroup_get_notfound(self, context, cg_id): - raise exc.GroupNotFound(group_id=cg_id) - - -def fake_volume_type_get(context, id, *args, **kwargs): - return {'id': id, - 'name': 'vol_type_name', - 'description': 'A fake volume type', - 'is_public': True, - 'projects': [], - 'extra_specs': {}, - 'created_at': None, - 'deleted_at': None, - 'updated_at': None, - 'qos_specs_id': fake.QOS_SPEC_ID, - 'deleted': False} - - -def fake_volume_admin_metadata_get(context, volume_id, **kwargs): - admin_meta = {'attached_mode': 'rw', 'readonly': 'False'} - if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED: - del admin_meta['attached_mode'] - - return admin_meta diff --git a/cinder/tests/unit/api/v2/test_limits.py b/cinder/tests/unit/api/v2/test_limits.py deleted file mode 100644 index 6b93effd5..000000000 --- a/cinder/tests/unit/api/v2/test_limits.py +++ /dev/null @@ -1,813 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests dealing with HTTP rate-limiting. -""" - -from oslo_serialization import jsonutils -import six -from six.moves import http_client -from six.moves import range -import webob - -from cinder.api.v2 import limits -from cinder.api import views -import cinder.context -from cinder import test - - -TEST_LIMITS = [ - limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), - limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), - limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), - limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), - limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), -] -NS = { - 'atom': 'http://www.w3.org/2005/Atom', - 'ns': 'http://docs.openstack.org/common/api/v1.0', -} - - -class BaseLimitTestSuite(test.TestCase): - """Base test suite which provides relevant stubs and time abstraction.""" - - def setUp(self): - super(BaseLimitTestSuite, self).setUp() - self.time = 0.0 - self.mock_object(limits.Limit, "_get_time", self._get_time) - self.absolute_limits = {} - - def fake_get_project_quotas(context, project_id, usages=True): - return {k: dict(limit=v) for k, v in self.absolute_limits.items()} - - self.mock_object(cinder.quota.QUOTAS, "get_project_quotas", - fake_get_project_quotas) - - def _get_time(self): - """Return the "time" according to this test suite.""" - return self.time - - -class LimitsControllerTest(BaseLimitTestSuite): - - """Tests for `limits.LimitsController` class.""" - - def setUp(self): - """Run before each test.""" - super(LimitsControllerTest, self).setUp() - self.controller = limits.create_resource() - - def _get_index_request(self, accept_header="application/json"): - """Helper to set routing arguments.""" - request = webob.Request.blank("/") - request.accept = accept_header - request.environ["wsgiorg.routing_args"] = (None, { - "action": "index", - "controller": "", - }) - context = cinder.context.RequestContext('testuser', 'testproject') - request.environ["cinder.context"] = context - return request - - def _populate_limits(self, request): - """Put limit info into a request.""" - _limits = [ - limits.Limit("GET", "*", ".*", 10, 60).display(), - limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), - limits.Limit("GET", "changes-since*", "changes-since", - 5, 60).display(), - ] - request.environ["cinder.limits"] = _limits - return request - - def test_empty_index_json(self): - """Test getting empty limit details in JSON.""" - request = self._get_index_request() - response = request.get_response(self.controller) - expected = { - "limits": { - "rate": [], - "absolute": {}, - }, - } - body = jsonutils.loads(response.body) - self.assertEqual(expected, body) - - def test_index_json(self): - """Test getting limit details in JSON.""" - request = self._get_index_request() - request = self._populate_limits(request) - self.absolute_limits = { - 'gigabytes': 512, - 'volumes': 5, - } - response = request.get_response(self.controller) - expected = { - "limits": { - "rate": [ - { - "regex": ".*", - "uri": "*", - "limit": [ - { - "verb": "GET", - "next-available": "1970-01-01T00:00:00", - "unit": "MINUTE", - "value": 10, - "remaining": 10, - }, - { - "verb": "POST", - "next-available": "1970-01-01T00:00:00", - "unit": "HOUR", - "value": 5, - "remaining": 5, - }, - ], - }, - { - "regex": "changes-since", - "uri": "changes-since*", - "limit": [ - { - "verb": "GET", - "next-available": "1970-01-01T00:00:00", - "unit": "MINUTE", - "value": 5, - "remaining": 5, - }, - ], - }, - - ], - "absolute": {"maxTotalVolumeGigabytes": 512, - "maxTotalVolumes": 5, }, - }, - } - body = jsonutils.loads(response.body) - self.assertEqual(expected, body) - - def _populate_limits_diff_regex(self, request): - """Put limit info into a request.""" - _limits = [ - limits.Limit("GET", "*", ".*", 10, 60).display(), - limits.Limit("GET", "*", "*.*", 10, 60).display(), - ] - request.environ["cinder.limits"] = _limits - return request - - def test_index_diff_regex(self): - """Test getting limit details in JSON.""" - request = self._get_index_request() - request = self._populate_limits_diff_regex(request) - response = request.get_response(self.controller) - expected = { - "limits": { - "rate": [ - { - "regex": ".*", - "uri": "*", - "limit": [ - { - "verb": "GET", - "next-available": "1970-01-01T00:00:00", - "unit": "MINUTE", - "value": 10, - "remaining": 10, - }, - ], - }, - { - "regex": "*.*", - "uri": "*", - "limit": [ - { - "verb": "GET", - "next-available": "1970-01-01T00:00:00", - "unit": "MINUTE", - "value": 10, - "remaining": 10, - }, - ], - }, - - ], - "absolute": {}, - }, - } - body = jsonutils.loads(response.body) - self.assertEqual(expected, body) - - def _test_index_absolute_limits_json(self, expected): - request = self._get_index_request() - response = request.get_response(self.controller) - body = jsonutils.loads(response.body) - self.assertEqual(expected, body['limits']['absolute']) - - def test_index_ignores_extra_absolute_limits_json(self): - self.absolute_limits = {'unknown_limit': 9001} - self._test_index_absolute_limits_json({}) - - -class TestLimiter(limits.Limiter): - pass - - -class LimitMiddlewareTest(BaseLimitTestSuite): - - """Tests for the `limits.RateLimitingMiddleware` class.""" - - @webob.dec.wsgify - def _empty_app(self, request): - """Do-nothing WSGI app.""" - pass - - def setUp(self): - """Prepare middleware for use through fake WSGI app.""" - super(LimitMiddlewareTest, self).setUp() - _limits = '(GET, *, .*, 1, MINUTE)' - self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, - "%s.TestLimiter" % - self.__class__.__module__) - - def test_limit_class(self): - """Test that middleware selected correct limiter class.""" - self.assertIsInstance(self.app._limiter, TestLimiter) - - def test_good_request(self): - """Test successful GET request through middleware.""" - request = webob.Request.blank("/") - response = request.get_response(self.app) - self.assertEqual(http_client.OK, response.status_int) - - def test_limited_request_json(self): - """Test a rate-limited (413) GET request through middleware.""" - request = webob.Request.blank("/") - response = request.get_response(self.app) - self.assertEqual(http_client.OK, response.status_int) - - request = webob.Request.blank("/") - response = request.get_response(self.app) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, - response.status_int) - - self.assertIn('Retry-After', response.headers) - retry_after = int(response.headers['Retry-After']) - self.assertAlmostEqual(retry_after, 60, 1) - - body = jsonutils.loads(response.body) - expected = "Only 1 GET request(s) can be made to * every minute." - value = body["overLimitFault"]["details"].strip() - self.assertEqual(expected, value) - - -class LimitTest(BaseLimitTestSuite): - - """Tests for the `limits.Limit` class.""" - - def test_GET_no_delay(self): - """Test a limit handles 1 GET per second.""" - limit = limits.Limit("GET", "*", ".*", 1, 1) - delay = limit("GET", "/anything") - self.assertIsNone(delay) - self.assertEqual(0, limit.next_request) - self.assertEqual(0, limit.last_request) - - def test_GET_delay(self): - """Test two calls to 1 GET per second limit.""" - limit = limits.Limit("GET", "*", ".*", 1, 1) - delay = limit("GET", "/anything") - self.assertIsNone(delay) - - delay = limit("GET", "/anything") - self.assertEqual(1, delay) - self.assertEqual(1, limit.next_request) - self.assertEqual(0, limit.last_request) - - self.time += 4 - - delay = limit("GET", "/anything") - self.assertIsNone(delay) - self.assertEqual(4, limit.next_request) - self.assertEqual(4, limit.last_request) - - def test_invalid_limit(self): - """Test that invalid limits are properly checked on construction.""" - self.assertRaises(ValueError, limits.Limit, "GET", "*", ".*", 0, 1) - - -class ParseLimitsTest(BaseLimitTestSuite): - - """Tests for the default limits parser in the `limits.Limiter` class.""" - - def test_invalid(self): - """Test that parse_limits() handles invalid input correctly.""" - self.assertRaises(ValueError, limits.Limiter.parse_limits, - ';;;;;') - - def test_bad_rule(self): - """Test that parse_limits() handles bad rules correctly.""" - self.assertRaises(ValueError, limits.Limiter.parse_limits, - 'GET, *, .*, 20, minute') - - def test_missing_arg(self): - """Test that parse_limits() handles missing args correctly.""" - self.assertRaises(ValueError, limits.Limiter.parse_limits, - '(GET, *, .*, 20)') - - def test_bad_value(self): - """Test that parse_limits() handles bad values correctly.""" - self.assertRaises(ValueError, limits.Limiter.parse_limits, - '(GET, *, .*, foo, minute)') - - def test_bad_unit(self): - """Test that parse_limits() handles bad units correctly.""" - self.assertRaises(ValueError, limits.Limiter.parse_limits, - '(GET, *, .*, 20, lightyears)') - - def test_multiple_rules(self): - """Test that parse_limits() handles multiple rules correctly.""" - try: - l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' - '(PUT, /foo*, /foo.*, 10, hour);' - '(POST, /bar*, /bar.*, 5, second);' - '(Say, /derp*, /derp.*, 1, day)') - except ValueError as e: - self.assertFalse(six.text_type(e)) - - # Make sure the number of returned limits are correct - self.assertEqual(4, len(l)) - - # Check all the verbs... - expected = ['GET', 'PUT', 'POST', 'SAY'] - self.assertEqual(expected, [t.verb for t in l]) - - # ...the URIs... - expected = ['*', '/foo*', '/bar*', '/derp*'] - self.assertEqual(expected, [t.uri for t in l]) - - # ...the regexes... - expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] - self.assertEqual(expected, [t.regex for t in l]) - - # ...the values... - expected = [20, 10, 5, 1] - self.assertEqual(expected, [t.value for t in l]) - - # ...and the units... - expected = [limits.PER_MINUTE, limits.PER_HOUR, - limits.PER_SECOND, limits.PER_DAY] - self.assertEqual(expected, [t.unit for t in l]) - - -class LimiterTest(BaseLimitTestSuite): - - """Tests for the in-memory `limits.Limiter` class.""" - - def setUp(self): - """Run before each test.""" - super(LimiterTest, self).setUp() - userlimits = {'limits.user3': '', - 'limits.user0': '(get, *, .*, 4, minute);' - '(put, *, .*, 2, minute)'} - self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) - - def _check(self, num, verb, url, username=None): - """Check and yield results from checks.""" - for x in range(num): - yield self.limiter.check_for_delay(verb, url, username)[0] - - def _check_sum(self, num, verb, url, username=None): - """Check and sum results from checks.""" - results = self._check(num, verb, url, username) - return sum(item for item in results if item) - - def test_no_delay_GET(self): - """Ensure no delay on a single call for a limit verb we didn't set.""" - delay = self.limiter.check_for_delay("GET", "/anything") - self.assertEqual((None, None), delay) - - def test_no_delay_PUT(self): - """Ensure no delay on a single call for a known limit.""" - delay = self.limiter.check_for_delay("PUT", "/anything") - self.assertEqual((None, None), delay) - - def test_delay_PUT(self): - """Test delay on 11th PUT request. - - Ensure the 11th PUT will result in a delay of 6.0 seconds until - the next request will be granced. - """ - expected = [None] * 10 + [6.0] - results = list(self._check(11, "PUT", "/anything")) - - self.assertEqual(expected, results) - - def test_delay_POST(self): - """Test delay on 8th POST request. - - Ensure the 8th POST will result in a delay of 6.0 seconds until - the next request will be granced. - """ - expected = [None] * 7 - results = list(self._check(7, "POST", "/anything")) - self.assertEqual(expected, results) - - expected = 60.0 / 7.0 - results = self._check_sum(1, "POST", "/anything") - self.assertAlmostEqual(expected, results, 8) - - def test_delay_GET(self): - """Ensure the 11th GET will result in NO delay.""" - expected = [None] * 11 - results = list(self._check(11, "GET", "/anything")) - self.assertEqual(expected, results) - - expected = [None] * 4 + [15.0] - results = list(self._check(5, "GET", "/foo", "user0")) - self.assertEqual(expected, results) - - def test_delay_PUT_volumes(self): - """Test delay on /volumes. - - Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere - is still OK after 5 requests...but then after 11 total requests, - PUT limiting kicks in. - """ - # First 6 requests on PUT /volumes - expected = [None] * 5 + [12.0] - results = list(self._check(6, "PUT", "/volumes")) - self.assertEqual(expected, results) - - # Next 5 request on PUT /anything - expected = [None] * 4 + [6.0] - results = list(self._check(5, "PUT", "/anything")) - self.assertEqual(expected, results) - - def test_delay_PUT_wait(self): - """Test limit is lifted again. - - Ensure after hitting the limit and then waiting for - the correct amount of time, the limit will be lifted. - """ - expected = [None] * 10 + [6.0] - results = list(self._check(11, "PUT", "/anything")) - self.assertEqual(expected, results) - - # Advance time - self.time += 6.0 - - expected = [None, 6.0] - results = list(self._check(2, "PUT", "/anything")) - self.assertEqual(expected, results) - - def test_multiple_delays(self): - """Ensure multiple requests still get a delay.""" - expected = [None] * 10 + [6.0] * 10 - results = list(self._check(20, "PUT", "/anything")) - self.assertEqual(expected, results) - - self.time += 1.0 - - expected = [5.0] * 10 - results = list(self._check(10, "PUT", "/anything")) - self.assertEqual(expected, results) - - expected = [None] * 2 + [30.0] * 8 - results = list(self._check(10, "PUT", "/anything", "user0")) - self.assertEqual(expected, results) - - def test_user_limit(self): - """Test user-specific limits.""" - self.assertEqual([], self.limiter.levels['user3']) - self.assertEqual(2, len(self.limiter.levels['user0'])) - - def test_multiple_users(self): - """Tests involving multiple users.""" - - # User0 - expected = [None] * 2 + [30.0] * 8 - results = list(self._check(10, "PUT", "/anything", "user0")) - self.assertEqual(expected, results) - - # User1 - expected = [None] * 10 + [6.0] * 10 - results = list(self._check(20, "PUT", "/anything", "user1")) - self.assertEqual(expected, results) - - # User2 - expected = [None] * 10 + [6.0] * 5 - results = list(self._check(15, "PUT", "/anything", "user2")) - self.assertEqual(expected, results) - - # User3 - expected = [None] * 20 - results = list(self._check(20, "PUT", "/anything", "user3")) - self.assertEqual(expected, results) - - self.time += 1.0 - - # User1 again - expected = [5.0] * 10 - results = list(self._check(10, "PUT", "/anything", "user1")) - self.assertEqual(expected, results) - - self.time += 1.0 - - # User1 again - expected = [4.0] * 5 - results = list(self._check(5, "PUT", "/anything", "user2")) - self.assertEqual(expected, results) - - # User0 again - expected = [28.0] - results = list(self._check(1, "PUT", "/anything", "user0")) - self.assertEqual(expected, results) - - self.time += 28.0 - - expected = [None, 30.0] - results = list(self._check(2, "PUT", "/anything", "user0")) - self.assertEqual(expected, results) - - -class WsgiLimiterTest(BaseLimitTestSuite): - - """Tests for `limits.WsgiLimiter` class.""" - - def setUp(self): - """Run before each test.""" - super(WsgiLimiterTest, self).setUp() - self.app = limits.WsgiLimiter(TEST_LIMITS) - - def _request_data(self, verb, path): - """Get data describing a limit request verb/path.""" - return jsonutils.dump_as_bytes({"verb": verb, "path": path}) - - def _request(self, verb, url, username=None): - """POST request to given url by given username. - - Make sure that POSTing to the given url causes the given username - to perform the given action. Make the internal rate limiter return - delay and make sure that the WSGI app returns the correct response. - """ - if username: - request = webob.Request.blank("/%s" % username) - else: - request = webob.Request.blank("/") - - request.method = "POST" - request.body = self._request_data(verb, url) - response = request.get_response(self.app) - - if "X-Wait-Seconds" in response.headers: - self.assertEqual(http_client.FORBIDDEN, response.status_int) - return response.headers["X-Wait-Seconds"] - - self.assertEqual(http_client.NO_CONTENT, response.status_int) - - def test_invalid_methods(self): - """Only POSTs should work.""" - for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: - request = webob.Request.blank("/", method=method) - response = request.get_response(self.app) - self.assertEqual(http_client.METHOD_NOT_ALLOWED, - response.status_int) - - def test_good_url(self): - delay = self._request("GET", "/something") - self.assertIsNone(delay) - - def test_escaping(self): - delay = self._request("GET", "/something/jump%20up") - self.assertIsNone(delay) - - def test_response_to_delays(self): - delay = self._request("GET", "/delayed") - self.assertIsNone(delay) - - delay = self._request("GET", "/delayed") - self.assertEqual('60.00', delay) - - def test_response_to_delays_usernames(self): - delay = self._request("GET", "/delayed", "user1") - self.assertIsNone(delay) - - delay = self._request("GET", "/delayed", "user2") - self.assertIsNone(delay) - - delay = self._request("GET", "/delayed", "user1") - self.assertEqual('60.00', delay) - - delay = self._request("GET", "/delayed", "user2") - self.assertEqual('60.00', delay) - - -class FakeHttplibSocket(object): - - """Fake `http_client.HTTPResponse` replacement.""" - - def __init__(self, response_string): - """Initialize new `FakeHttplibSocket`.""" - if isinstance(response_string, six.text_type): - response_string = response_string.encode('utf-8') - self._buffer = six.BytesIO(response_string) - - def makefile(self, mode, *args): - """Returns the socket's internal buffer.""" - return self._buffer - - -class FakeHttplibConnection(object): - - """Fake `http_client.HTTPConnection`.""" - - def __init__(self, app, host): - """Initialize `FakeHttplibConnection`.""" - self.app = app - self.host = host - - def request(self, method, path, body="", headers=None): - """Fake request handler. - - Requests made via this connection actually get translated and - routed into our WSGI app, we then wait for the response and turn - it back into an `http_client.HTTPResponse`. - """ - if not headers: - headers = {} - - req = webob.Request.blank(path) - req.method = method - req.headers = headers - req.host = self.host - req.body = body - - resp = str(req.get_response(self.app)) - resp = "HTTP/1.0 %s" % resp - sock = FakeHttplibSocket(resp) - self.http_response = http_client.HTTPResponse(sock) - self.http_response.begin() - - def getresponse(self): - """Return our generated response from the request.""" - return self.http_response - - -def wire_HTTPConnection_to_WSGI(host, app): - """Monkeypatches HTTPConnection. - - Monkeypatches HTTPConnection so that if you try to connect to host, you - are instead routed straight to the given WSGI app. - - After calling this method, when any code calls - - http_client.HTTPConnection(host) - - the connection object will be a fake. Its requests will be sent directly - to the given WSGI app rather than through a socket. - - Code connecting to hosts other than host will not be affected. - - This method may be called multiple times to map different hosts to - different apps. - - This method returns the original HTTPConnection object, so that the caller - can restore the default HTTPConnection interface (for all hosts). - """ - class HTTPConnectionDecorator(object): - """Decorator to mock the HTTPConecction class. - - Wraps the real HTTPConnection class so that when you instantiate - the class you might instead get a fake instance. - """ - - def __init__(self, wrapped): - self.wrapped = wrapped - - def __call__(self, connection_host, *args, **kwargs): - if connection_host == host: - return FakeHttplibConnection(app, host) - else: - return self.wrapped(connection_host, *args, **kwargs) - - oldHTTPConnection = http_client.HTTPConnection - new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection) - http_client.HTTPConnection = new_http_connection - return oldHTTPConnection - - -class WsgiLimiterProxyTest(BaseLimitTestSuite): - - """Tests for the `limits.WsgiLimiterProxy` class.""" - - def setUp(self): - """setUp() for WsgiLimiterProxyTest. - - Do some nifty HTTP/WSGI magic which allows for WSGI to be called - directly by something like the `http_client` library. - """ - super(WsgiLimiterProxyTest, self).setUp() - self.app = limits.WsgiLimiter(TEST_LIMITS) - oldHTTPConnection = ( - wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) - self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") - self.addCleanup(self._restore, oldHTTPConnection) - - def _restore(self, oldHTTPConnection): - # restore original HTTPConnection object - http_client.HTTPConnection = oldHTTPConnection - - def test_200(self): - """Successful request test.""" - delay = self.proxy.check_for_delay("GET", "/anything") - self.assertEqual((None, None), delay) - - def test_403(self): - """Forbidden request test.""" - delay = self.proxy.check_for_delay("GET", "/delayed") - self.assertEqual((None, None), delay) - - delay, error = self.proxy.check_for_delay("GET", "/delayed") - error = error.strip() - - expected = ("60.00", - b"403 Forbidden\n\nOnly 1 GET request(s) can be " - b"made to /delayed every minute.") - - self.assertEqual(expected, (delay, error)) - - -class LimitsViewBuilderTest(test.TestCase): - def setUp(self): - super(LimitsViewBuilderTest, self).setUp() - self.view_builder = views.limits.ViewBuilder() - self.rate_limits = [{"URI": "*", - "regex": ".*", - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "resetTime": 1311272226}, - {"URI": "*/volumes", - "regex": "^/volumes", - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "resetTime": 1311272226}] - self.absolute_limits = {"gigabytes": 1, - "backup_gigabytes": 2, - "volumes": 3, - "snapshots": 4, - "backups": 5} - - def test_build_limits(self): - tdate = "2011-07-21T18:17:06" - expected_limits = { - "limits": {"rate": [{"uri": "*", - "regex": ".*", - "limit": [{"value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": tdate}]}, - {"uri": "*/volumes", - "regex": "^/volumes", - "limit": [{"value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": tdate}]}], - "absolute": {"maxTotalVolumeGigabytes": 1, - "maxTotalBackupGigabytes": 2, - "maxTotalVolumes": 3, - "maxTotalSnapshots": 4, - "maxTotalBackups": 5}}} - - output = self.view_builder.build(self.rate_limits, - self.absolute_limits) - self.assertDictEqual(expected_limits, output) - - def test_build_limits_empty_limits(self): - expected_limits = {"limits": {"rate": [], - "absolute": {}}} - - abs_limits = {} - rate_limits = [] - output = self.view_builder.build(rate_limits, abs_limits) - self.assertDictEqual(expected_limits, output) diff --git a/cinder/tests/unit/api/v2/test_snapshot_metadata.py b/cinder/tests/unit/api/v2/test_snapshot_metadata.py deleted file mode 100644 index 165ac0bba..000000000 --- a/cinder/tests/unit/api/v2/test_snapshot_metadata.py +++ /dev/null @@ -1,673 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.v2 import snapshot_metadata -from cinder.api.v2 import snapshots -from cinder import context -import cinder.db -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import volume - - -def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): - return fake_snapshot_metadata() - - -def return_create_snapshot_metadata_insensitive(context, snapshot_id, - metadata, delete): - return fake_snapshot_metadata_insensitive() - - -def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): - return fake_new_snapshot_metadata() - - -def fake_snapshot_metadata(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - } - return metadata - - -def fake_snapshot_metadata_insensitive(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "KEY4": "value4", - } - return metadata - - -def fake_new_snapshot_metadata(): - metadata = { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - } - return metadata - - -def return_snapshot(context, snapshot_id): - return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', - 'name': 'fake', - 'status': 'available', - 'metadata': {}} - - -# First argument needs to be self to receive the context argument in the right -# variable, as this'll be used to replace the original API.get method which -# receives self as the first argument. -def fake_get(self, context, *args, **kwargs): - vol = {'id': fake.VOLUME_ID, - 'size': 100, - 'name': 'fake', - 'host': 'fake-host', - 'status': 'available', - 'encryption_key_id': None, - 'volume_type_id': None, - 'migration_status': None, - 'availability_zone': 'fake-zone', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'metadata': {}} - return fake_volume.fake_volume_obj(context, **vol) - - -def return_snapshot_nonexistent(context, snapshot_id): - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - - -@ddt.ddt -class SnapshotMetaDataTest(test.TestCase): - - def setUp(self): - super(SnapshotMetaDataTest, self).setUp() - self.volume_api = cinder.volume.api.API() - self.mock_object(volume.api.API, 'get', fake_get) - self.mock_object(cinder.db, 'snapshot_get', return_snapshot) - self.mock_object(self.volume_api, 'update_snapshot_metadata') - - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) - self.controller = snapshot_metadata.Controller() - self.req_id = str(uuid.uuid4()) - self.url = '/v2/%s/snapshots/%s/metadata' % ( - fake.PROJECT_ID, self.req_id) - - snap = {"volume_size": 100, - "volume_id": fake.VOLUME_ID, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1", - "host": "fake-host", - "metadata": {}} - body = {"snapshot": snap} - req = fakes.HTTPRequest.blank('/v2/snapshots') - self.snapshot_controller.create(req, body) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_index(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_obj['metadata'] = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url) - res_dict = self.controller.index(req, self.req_id) - - expected = { - 'metadata': { - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - }, - } - self.assertEqual(expected, res_dict) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_index_nonexistent_snapshot(self, snapshot_get_by_id): - snapshot_get_by_id.side_effect = \ - exception.SnapshotNotFound(snapshot_id=self.req_id) - - req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(exception.SnapshotNotFound, - self.controller.index, req, self.url) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_index_no_data(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url) - res_dict = self.controller.index(req, self.req_id) - expected = {'metadata': {}} - self.assertEqual(expected, res_dict) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_show(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_obj['metadata'] = {'key2': 'value2'} - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url + '/key2') - res_dict = self.controller.show(req, self.req_id, 'key2') - expected = {'meta': {'key2': 'value2'}} - self.assertEqual(expected, res_dict) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_show_nonexistent_snapshot(self, snapshot_get_by_id): - snapshot_get_by_id.side_effect = \ - exception.SnapshotNotFound(snapshot_id=self.req_id) - - req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(exception.SnapshotNotFound, - self.controller.show, req, self.req_id, 'key2') - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_show_meta_not_found(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(exception.SnapshotMetadataNotFound, - self.controller.show, req, self.req_id, 'key6') - - @mock.patch('cinder.db.snapshot_metadata_delete') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_obj['metadata'] = {'key2': 'value2'} - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url + '/key2') - req.method = 'DELETE' - res = self.controller.delete(req, self.req_id, 'key2') - - self.assertEqual(http_client.OK, res.status_int) - - def test_delete_nonexistent_snapshot(self): - self.mock_object(cinder.db, 'snapshot_get', - return_snapshot_nonexistent) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'DELETE' - self.assertRaises(exception.SnapshotNotFound, - self.controller.delete, req, self.req_id, 'key1') - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_delete_meta_not_found(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url + '/key6') - req.method = 'DELETE' - self.assertRaises(exception.SnapshotMetadataNotFound, - self.controller.delete, req, self.req_id, 'key6') - - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_create(self, snapshot_get_by_id, volume_get_by_id, - snapshot_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - - req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key1": "value1", - "key2": "value2", - "key3": "value3"}} - req.body = jsonutils.dump_as_bytes(body) - res_dict = self.controller.create(req, self.req_id, body) - self.assertEqual(body, res_dict) - - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_create_with_keys_in_uppercase_and_lowercase( - self, snapshot_get_by_id, snapshot_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - # if the keys in uppercase_and_lowercase, should return the one - # which server added - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata_insensitive) - - req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key1": "value1", - "KEY1": "value1", - "key2": "value2", - "KEY2": "value2", - "key3": "value3", - "KEY4": "value4"}} - expected = {"metadata": {"key1": "value1", - "key2": "value2", - "key3": "value3", - "KEY4": "value4"}} - req.body = jsonutils.dump_as_bytes(body) - res_dict = self.controller.create(req, self.req_id, body) - self.assertEqual(expected, res_dict) - - def test_create_empty_body(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, None) - - def test_create_item_empty_key(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, body) - - def test_create_item_key_too_long(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {("a" * 260): "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, self.req_id, body) - - def test_create_nonexistent_snapshot(self): - self.mock_object(cinder.db, 'snapshot_get', - return_snapshot_nonexistent) - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - - req = fakes.HTTPRequest.blank('/v2/snapshot_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key9": "value9"}} - req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(exception.SnapshotNotFound, - self.controller.create, req, self.req_id, body) - - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_all(self, snapshot_get_by_id, snapshot_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': [] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_new_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - res_dict = self.controller.update_all(req, self.req_id, expected) - - self.assertEqual(expected, res_dict) - - @mock.patch('cinder.db.snapshot_update', - return_value={'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20'}) - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_all_with_keys_in_uppercase_and_lowercase( - self, snapshot_get_by_id, snapshot_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_new_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - body = { - 'metadata': { - 'key10': 'value10', - 'KEY10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - res_dict = self.controller.update_all(req, self.req_id, body) - - self.assertEqual(expected, res_dict) - - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_all_empty_container(self, snapshot_get_by_id, - snapshot_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': [] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_value={}) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'metadata': {}} - req.body = jsonutils.dump_as_bytes(expected) - res_dict = self.controller.update_all(req, self.req_id, expected) - - self.assertEqual(expected, res_dict) - - def test_update_all_malformed_container(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'meta': {}} - req.body = jsonutils.dump_as_bytes(expected) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update_all, req, self.req_id, - expected) - - def test_update_all_malformed_data(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'metadata': ['asdf']} - req.body = jsonutils.dump_as_bytes(expected) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update_all, req, self.req_id, - expected) - - def test_update_all_nonexistent_snapshot(self): - self.mock_object(cinder.db, 'snapshot_get', - return_snapshot_nonexistent) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - body = {'metadata': {'key10': 'value10'}} - req.body = jsonutils.dump_as_bytes(body) - - self.assertRaises(exception.SnapshotNotFound, - self.controller.update_all, req, '100', body) - - @mock.patch('cinder.db.snapshot_metadata_update', return_value=dict()) - @mock.patch('cinder.db.snapshot_update') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_item(self, snapshot_get_by_id, - snapshot_update, snapshot_metadata_update): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - res_dict = self.controller.update(req, self.req_id, 'key1', body) - expected = {'meta': {'key1': 'value1'}} - self.assertEqual(expected, res_dict) - - def test_update_item_nonexistent_snapshot(self): - self.mock_object(cinder.db, 'snapshot_get', - return_snapshot_nonexistent) - req = fakes.HTTPRequest.blank( - '/v2/%s/snapshots/asdf/metadata/key1' % fake.PROJECT_ID) - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(exception.SnapshotNotFound, - self.controller.update, req, self.req_id, 'key1', - body) - - def test_update_item_empty_body(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'key1', - None) - - @mock.patch('cinder.db.sqlalchemy.api._snapshot_get') - @mock.patch('cinder.db.snapshot_metadata_update', autospec=True) - def test_update_item_empty_key(self, metadata_update, snapshot_get): - snapshot_get.return_value = fake_get - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, '', body) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_item_key_too_long(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {("a" * 260): "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - req, self.req_id, ("a" * 260), body) - - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_item_value_too_long(self, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": ("a" * 260)}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - req, self.req_id, "key1", body) - - @ddt.data({"meta": {"key1": "value1", "key2": "value2"}}, - {"meta": {"key1": None}}) - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_update_invalid_metadata(self, body, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'key1', - body) - - def test_update_item_body_uri_mismatch(self): - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url + '/bad') - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'bad', - body) - - @ddt.data({"metadata": {"a" * 260: "value1"}}, - {"metadata": {"key": "v" * 260}}, - {"metadata": {"": "value1"}}, - {"metadata": {"key": None}}) - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_invalid_metadata_items_on_create(self, data, snapshot_get_by_id): - snapshot = { - 'id': self.req_id, - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - snapshot_get_by_id.return_value = snapshot_obj - - self.mock_object(cinder.db, 'snapshot_metadata_update', - return_create_snapshot_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - exc = webob.exc.HTTPBadRequest - if (len(list(data['metadata'].keys())[0]) > 255 or - (list(data['metadata'].values())[0] is not None and - len(list(data['metadata'].values())[0]) > 255)): - exc = webob.exc.HTTPRequestEntityTooLarge - - req.body = jsonutils.dump_as_bytes(data) - self.assertRaises(exc, self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/unit/api/v2/test_snapshots.py b/cinder/tests/unit/api/v2/test_snapshots.py deleted file mode 100644 index be35d3964..000000000 --- a/cinder/tests/unit/api/v2/test_snapshots.py +++ /dev/null @@ -1,621 +0,0 @@ -# Copyright 2011 Denali Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from six.moves import http_client -from six.moves.urllib import parse as urllib -import webob - -from cinder.api import common -from cinder.api.v2 import snapshots -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils -from cinder import volume - - -CONF = cfg.CONF - -UUID = '00000000-0000-0000-0000-000000000001' -INVALID_UUID = '00000000-0000-0000-0000-000000000002' - - -def _get_default_snapshot_param(): - return { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'created_at': None, - 'updated_at': None, - 'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b', - 'project_id': '7ffe17a15c724e2aa79fc839540aec15', - 'display_name': 'Default name', - 'display_description': 'Default description', - 'deleted': None, - 'volume': {'availability_zone': 'test_zone'} - } - - -def fake_snapshot_delete(self, context, snapshot): - if snapshot['id'] != UUID: - raise exception.SnapshotNotFound(snapshot['id']) - - -def fake_snapshot_get(self, context, snapshot_id): - if snapshot_id != UUID: - raise exception.SnapshotNotFound(snapshot_id) - - param = _get_default_snapshot_param() - return param - - -def fake_snapshot_get_all(self, context, search_opts=None): - param = _get_default_snapshot_param() - return [param] - - -@ddt.ddt -class SnapshotApiTest(test.TestCase): - def setUp(self): - super(SnapshotApiTest, self).setUp() - self.controller = snapshots.SnapshotsController() - self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_snapshot_create(self, mock_validate): - volume = utils.create_volume(self.ctx) - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": False, - "name": snapshot_name, - "description": snapshot_description - } - - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v2/snapshots') - resp_dict = self.controller.create(req, body) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(snapshot_name, resp_dict['snapshot']['name']) - self.assertEqual(snapshot_description, - resp_dict['snapshot']['description']) - self.assertTrue(mock_validate.called) - self.assertIn('updated_at', resp_dict['snapshot']) - db.volume_destroy(self.ctx, volume.id) - - @ddt.data(True, 'y', 'true', 'trUE', 'yes', '1', 'on', 1, "1 ") - def test_snapshot_create_force(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v2/snapshots') - resp_dict = self.controller.create(req, body) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(snapshot_name, - resp_dict['snapshot']['name']) - self.assertEqual(snapshot_description, - resp_dict['snapshot']['description']) - self.assertIn('updated_at', resp_dict['snapshot']) - - db.volume_destroy(self.ctx, volume.id) - - @ddt.data(False, 'n', 'false', 'falSE', 'No', '0', 'off', 0) - def test_snapshot_create_force_failure(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v2/snapshots') - self.assertRaises(exception.InvalidVolume, - self.controller.create, - req, - body) - - db.volume_destroy(self.ctx, volume.id) - - @ddt.data("**&&^^%%$$##@@", '-1', 2, '01') - def test_snapshot_create_invalid_force_param(self, force_param): - volume = utils.create_volume(self.ctx, status='in-use') - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - - snapshot = { - "volume_id": volume.id, - "force": force_param, - "name": snapshot_name, - "description": snapshot_description - } - body = dict(snapshot=snapshot) - req = fakes.HTTPRequest.blank('/v2/snapshots') - self.assertRaises(exception.InvalidParameterValue, - self.controller.create, - req, - body) - - db.volume_destroy(self.ctx, volume.id) - - def test_snapshot_create_without_volume_id(self): - snapshot_name = 'Snapshot Test Name' - snapshot_description = 'Snapshot Test Desc' - body = { - "snapshot": { - "force": True, - "name": snapshot_name, - "description": snapshot_description - } - } - req = fakes.HTTPRequest.blank('/v2/snapshots') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - @mock.patch.object(volume.api.API, "update_snapshot", - side_effect=v2_fakes.fake_snapshot_update) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.db.volume_get') - @mock.patch('cinder.objects.Snapshot.get_by_id') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_snapshot_update( - self, mock_validate, snapshot_get_by_id, volume_get, - snapshot_metadata_get, update_snapshot): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get.return_value = fake_volume_obj - - updates = { - "name": "Updated Test Name", - } - body = {"snapshot": updates} - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) - res_dict = self.controller.update(req, UUID, body) - expected = { - 'snapshot': { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'size': 100, - 'created_at': None, - 'updated_at': None, - 'name': u'Updated Test Name', - 'description': u'Default description', - 'metadata': {}, - } - } - self.assertEqual(expected, res_dict) - self.assertTrue(mock_validate.called) - self.assertEqual(2, len(self.notifier.notifications)) - - def test_snapshot_update_missing_body(self): - body = {} - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, UUID, body) - - def test_snapshot_update_invalid_body(self): - body = {'name': 'missing top level snapshot key'} - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, UUID, body) - - def test_snapshot_update_not_found(self): - self.mock_object(volume.api.API, "get_snapshot", fake_snapshot_get) - updates = { - "name": "Updated Test Name", - } - body = {"snapshot": updates} - req = fakes.HTTPRequest.blank('/v2/snapshots/not-the-uuid') - self.assertRaises(exception.SnapshotNotFound, self.controller.update, - req, 'not-the-uuid', body) - - @mock.patch.object(volume.api.API, "delete_snapshot", - side_effect=v2_fakes.fake_snapshot_update) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get, delete_snapshot): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - - snapshot_id = UUID - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) - resp = self.controller.delete(req, snapshot_id) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_snapshot_delete_invalid_id(self): - self.mock_object(volume.api.API, "delete_snapshot", - fake_snapshot_delete) - snapshot_id = INVALID_UUID - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) - self.assertRaises(exception.SnapshotNotFound, self.controller.delete, - req, snapshot_id) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % UUID) - resp_dict = self.controller.show(req, UUID) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(UUID, resp_dict['snapshot']['id']) - self.assertIn('updated_at', resp_dict['snapshot']) - - def test_snapshot_show_invalid_id(self): - snapshot_id = INVALID_UUID - req = fakes.HTTPRequest.blank('/v2/snapshots/%s' % snapshot_id) - self.assertRaises(exception.SnapshotNotFound, - self.controller.show, req, snapshot_id) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - @mock.patch('cinder.volume.api.API.get_all_snapshots') - def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id, - volume_get_by_id, snapshot_metadata_get): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'] - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - snapshots = objects.SnapshotList(objects=[snapshot_obj]) - get_all_snapshots.return_value = snapshots - - req = fakes.HTTPRequest.blank('/v2/snapshots/detail') - resp_dict = self.controller.detail(req) - - self.assertIn('snapshots', resp_dict) - resp_snapshots = resp_dict['snapshots'] - self.assertEqual(1, len(resp_snapshots)) - self.assertIn('updated_at', resp_snapshots[0]) - - resp_snapshot = resp_snapshots.pop() - self.assertEqual(UUID, resp_snapshot['id']) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_limited_to_project(self, - snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_list_snapshots_with_limit_and_offset(self, - snapshot_metadata_get): - def list_snapshots_with_limit_and_offset(snaps, is_admin): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots?limit=1' - '&offset=1' % fake.PROJECT_ID, - use_admin_context=is_admin) - res = self.controller.index(req) - - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - self.assertEqual(snaps[1].id, res['snapshots'][0]['id']) - self.assertIn('updated_at', res['snapshots'][0]) - - # Test that we get an empty list with an offset greater than the - # number of items - req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=3') - self.assertEqual({'snapshots': []}, self.controller.index(req)) - - volume, snaps = self._create_db_snapshots(3) - # admin case - list_snapshots_with_limit_and_offset(snaps, is_admin=True) - # non-admin case - list_snapshots_with_limit_and_offset(snaps, is_admin=False) - - @mock.patch.object(db, 'snapshot_get_all_by_project') - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_list_snpashots_with_wrong_limit_and_offset(self, - mock_metadata_get, - mock_snapshot_get_all): - """Test list with negative and non numeric limit and offset.""" - mock_snapshot_get_all.return_value = [] - - # Negative limit - req = fakes.HTTPRequest.blank('/v2/snapshots?limit=-1&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Non numeric limit - req = fakes.HTTPRequest.blank('/v2/snapshots?limit=a&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Negative offset - req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=-1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Non numeric offset - req = fakes.HTTPRequest.blank('/v2/snapshots?limit=1&offset=a') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Test that we get an exception HTTPBadRequest(400) with an offset - # greater than the maximum offset value. - url = '/v2/snapshots?limit=1&offset=323245324356534235' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, req) - - def _assert_list_next(self, expected_query=None, project=fake.PROJECT_ID, - **kwargs): - """Check a page of snapshots list.""" - # Since we are accessing v2 api directly we don't need to specify - # v2 in the request path, if we did, we'd get /v2/v2 links back - request_path = '/v2/%s/snapshots' % project - expected_path = request_path - - # Construct the query if there are kwargs - if kwargs: - request_str = request_path + '?' + urllib.urlencode(kwargs) - else: - request_str = request_path - - # Make the request - req = fakes.HTTPRequest.blank(request_str) - res = self.controller.index(req) - - # We only expect to have a next link if there is an actual expected - # query. - if expected_query: - # We must have the links - self.assertIn('snapshots_links', res) - links = res['snapshots_links'] - - # Must be a list of links, even if we only get 1 back - self.assertTrue(list, type(links)) - next_link = links[0] - - # rel entry must be next - self.assertIn('rel', next_link) - self.assertIn('next', next_link['rel']) - - # href entry must have the right path - self.assertIn('href', next_link) - href_parts = urllib.urlparse(next_link['href']) - self.assertEqual(expected_path, href_parts.path) - - # And the query from the next link must match what we were - # expecting - params = urllib.parse_qs(href_parts.query) - self.assertDictEqual(expected_query, params) - - # Make sure we don't have links if we were not expecting them - else: - self.assertNotIn('snapshots_links', res) - - def _create_db_snapshots(self, num_snaps): - volume = utils.create_volume(self.ctx) - snaps = [utils.create_snapshot(self.ctx, - volume.id, - display_name='snap' + str(i)) - for i in range(num_snaps)] - - self.addCleanup(db.volume_destroy, self.ctx, volume.id) - for snap in snaps: - self.addCleanup(db.snapshot_destroy, self.ctx, snap.id) - - snaps.reverse() - return volume, snaps - - def test_list_snapshots_next_link_default_limit(self): - """Test that snapshot list pagination is limited by osapi_max_limit.""" - volume, snaps = self._create_db_snapshots(3) - - # NOTE(geguileo): Since cinder.api.common.limited has already been - # imported his argument max_limit already has a default value of 1000 - # so it doesn't matter that we change it to 2. That's why we need to - # mock it and send it current value. We still need to set the default - # value because other sections of the code use it, for example - # _get_collection_links - CONF.set_default('osapi_max_limit', 2) - - def get_pagination_params(params, max_limit=CONF.osapi_max_limit, - original_call=common.get_pagination_params): - return original_call(params, max_limit) - - def _get_limit_param(params, max_limit=CONF.osapi_max_limit, - original_call=common._get_limit_param): - return original_call(params, max_limit) - - with mock.patch.object(common, 'get_pagination_params', - get_pagination_params), \ - mock.patch.object(common, '_get_limit_param', - _get_limit_param): - # The link from the first page should link to the second - self._assert_list_next({'marker': [snaps[1].id]}) - - # Second page should have no next link - self._assert_list_next(marker=snaps[1].id) - - def test_list_snapshots_next_link_with_limit(self): - """Test snapshot list pagination with specific limit.""" - volume, snaps = self._create_db_snapshots(2) - - # The link from the first page should link to the second - self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]}, - limit=1) - - # Even though there are no more elements, we should get a next element - # per specification. - expected = {'limit': ['1'], 'marker': [snaps[1].id]} - self._assert_list_next(expected, limit=1, marker=snaps[0].id) - - # When we go beyond the number of elements there should be no more - # next links - self._assert_list_next(limit=1, marker=snaps[1].id) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots?all_tenants=1' % - fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(3, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all') - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get, - snapshot_get_all): - def get_all(context, filters=None, marker=None, limit=None, - sort_keys=None, sort_dirs=None, offset=None): - if 'project_id' in filters and 'tenant1' in filters['project_id']: - return [v2_fakes.fake_snapshot(fake.VOLUME_ID, - tenant_id='tenant1')] - else: - return [] - - snapshot_get_all.side_effect = get_all - - req = fakes.HTTPRequest.blank('/v2/%s/snapshots?all_tenants=1' - '&project_id=tenant1' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_all_tenants_non_admin_gets_all_tenants(self, - snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots?all_tenants=1' % - fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - @mock.patch.object(db, 'snapshot_get_all_by_project', - v2_fakes.fake_snapshot_get_all_by_project) - @mock.patch.object(db, 'snapshot_get_all', - v2_fakes.fake_snapshot_get_all) - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - def test_non_admin_get_by_project(self, snapshot_metadata_get): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('snapshots', res) - self.assertEqual(1, len(res['snapshots'])) - - def _create_snapshot_bad_body(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/snapshots' % fake.PROJECT_ID) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - def test_create_no_body(self): - self._create_snapshot_bad_body(body=None) - - def test_create_missing_snapshot(self): - body = {'foo': {'a': 'b'}} - self._create_snapshot_bad_body(body=body) - - def test_create_malformed_entity(self): - body = {'snapshot': 'string'} - self._create_snapshot_bad_body(body=body) diff --git a/cinder/tests/unit/api/v2/test_types.py b/cinder/tests/unit/api/v2/test_types.py deleted file mode 100644 index df50b835f..000000000 --- a/cinder/tests/unit/api/v2/test_types.py +++ /dev/null @@ -1,533 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import mock -from oslo_utils import timeutils -import six -import webob - -import cinder.api.common as common -from cinder.api.v2 import types -from cinder.api.v2.views import types as views_types -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.volume import volume_types - - -def fake_volume_type(id): - specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5" - } - return dict( - id=id, - name='vol_type_%s' % six.text_type(id), - description='vol_type_desc_%s' % six.text_type(id), - extra_specs=specs, - ) - - -def return_volume_types_get_all_types(context, filters=None, marker=None, - limit=None, sort_keys=None, - sort_dirs=None, offset=None, - list_result=False): - result = dict(vol_type_1=fake_volume_type(1), - vol_type_2=fake_volume_type(2), - vol_type_3=fake_volume_type(3) - ) - if list_result: - return list(result.values()) - return result - - -def return_empty_volume_types_get_all_types(context, filters=None, marker=None, - limit=None, sort_keys=None, - sort_dirs=None, offset=None, - list_result=False): - if list_result: - return [] - return {} - - -def return_volume_types_get_volume_type(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.VolumeTypeNotFound(volume_type_id=id) - return fake_volume_type(id) - - -def return_volume_types_get_default(): - return fake_volume_type(1) - - -class VolumeTypesApiTest(test.TestCase): - - def _create_volume_type(self, volume_type_name, extra_specs=None, - is_public=True, projects=None): - return volume_types.create(self.ctxt, volume_type_name, extra_specs, - is_public, projects).get('id') - - def setUp(self): - super(VolumeTypesApiTest, self).setUp() - self.controller = types.VolumeTypesController() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - self.type_id1 = self._create_volume_type('volume_type1', - {'key1': 'value1'}) - self.type_id2 = self._create_volume_type('volume_type2', - {'key2': 'value2'}) - self.type_id3 = self._create_volume_type('volume_type3', - {'key3': 'value3'}, False, - [fake.PROJECT_ID]) - - def test_volume_types_index(self): - self.mock_object(volume_types, 'get_all_types', - return_volume_types_get_all_types) - - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, - use_admin_context=True) - res_dict = self.controller.index(req) - - self.assertEqual(3, len(res_dict['volume_types'])) - - expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] - actual_names = map(lambda e: e['name'], res_dict['volume_types']) - self.assertEqual(set(expected_names), set(actual_names)) - for entry in res_dict['volume_types']: - self.assertEqual('value1', entry['extra_specs']['key1']) - - def test_volume_types_index_no_data(self): - self.mock_object(volume_types, 'get_all_types', - return_empty_volume_types_get_all_types) - - req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID) - res_dict = self.controller.index(req) - - self.assertEqual(0, len(res_dict['volume_types'])) - - def test_volume_types_index_with_limit(self): - req = fakes.HTTPRequest.blank('/v2/%s/types?limit=1' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(1, len(res['volume_types'])) - self.assertEqual(self.type_id3, res['volume_types'][0]['id']) - - expect_next_link = ('http://localhost/v2/%s/types?limit=1' - '&marker=%s' % - (fake.PROJECT_ID, res['volume_types'][0]['id'])) - self.assertEqual(expect_next_link, res['volume_type_links'][0]['href']) - - def test_volume_types_index_with_offset(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?offset=1' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(2, len(res['volume_types'])) - - def test_volume_types_index_with_offset_out_of_range(self): - url = '/v2/%s/types?offset=424366766556787' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, req) - - def test_volume_types_index_with_limit_and_offset(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?limit=2&offset=1' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(2, len(res['volume_types'])) - self.assertEqual(self.type_id2, res['volume_types'][0]['id']) - self.assertEqual(self.type_id1, res['volume_types'][1]['id']) - - def test_volume_types_index_with_limit_and_marker(self): - req = fakes.HTTPRequest.blank('/v2/%s/types?limit=1' - '&marker=%s' % - (fake.PROJECT_ID, - self.type_id2)) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(1, len(res['volume_types'])) - self.assertEqual(self.type_id1, res['volume_types'][0]['id']) - - def test_volume_types_index_with_valid_filter(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?is_public=True' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(3, len(res['volume_types'])) - self.assertEqual(self.type_id3, res['volume_types'][0]['id']) - self.assertEqual(self.type_id2, res['volume_types'][1]['id']) - self.assertEqual(self.type_id1, res['volume_types'][2]['id']) - - def test_volume_types_index_with_invalid_filter(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?id=%s' % (fake.PROJECT_ID, self.type_id1)) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(3, len(res['volume_types'])) - - def test_volume_types_index_with_sort_keys(self): - req = fakes.HTTPRequest.blank('/v2/%s/types?sort=id' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id1, self.type_id2, self.type_id3] - expect_result.sort(reverse=True) - - self.assertEqual(3, len(res['volume_types'])) - self.assertEqual(expect_result[0], res['volume_types'][0]['id']) - self.assertEqual(expect_result[1], res['volume_types'][1]['id']) - self.assertEqual(expect_result[2], res['volume_types'][2]['id']) - - def test_volume_types_index_with_sort_and_limit(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?sort=id&limit=2' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id1, self.type_id2, self.type_id3] - expect_result.sort(reverse=True) - - self.assertEqual(2, len(res['volume_types'])) - self.assertEqual(expect_result[0], res['volume_types'][0]['id']) - self.assertEqual(expect_result[1], res['volume_types'][1]['id']) - - def test_volume_types_index_with_sort_keys_and_sort_dirs(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/types?sort=id:asc' % fake.PROJECT_ID) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id1, self.type_id2, self.type_id3] - expect_result.sort() - - self.assertEqual(3, len(res['volume_types'])) - self.assertEqual(expect_result[0], res['volume_types'][0]['id']) - self.assertEqual(expect_result[1], res['volume_types'][1]['id']) - self.assertEqual(expect_result[2], res['volume_types'][2]['id']) - - def test_volume_types_show(self): - self.mock_object(volume_types, 'get_volume_type', - return_volume_types_get_volume_type) - - type_id = str(uuid.uuid4()) - req = fakes.HTTPRequest.blank('/v2/%s/types/' % fake.PROJECT_ID - + type_id) - res_dict = self.controller.show(req, type_id) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(type_id, res_dict['volume_type']['id']) - type_name = 'vol_type_' + type_id - self.assertEqual(type_name, res_dict['volume_type']['name']) - - def test_volume_types_show_not_found(self): - self.mock_object(volume_types, 'get_volume_type', - return_volume_types_get_volume_type) - - req = fakes.HTTPRequest.blank('/v2/%s/types/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID)) - self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, - req, fake.WILL_NOT_BE_FOUND_ID) - - def test_get_default(self): - self.mock_object(volume_types, 'get_default_volume_type', - return_volume_types_get_default) - req = fakes.HTTPRequest.blank('/v2/%s/types/default' % fake.PROJECT_ID) - req.method = 'GET' - res_dict = self.controller.show(req, 'default') - self.assertEqual(1, len(res_dict)) - self.assertEqual('vol_type_1', res_dict['volume_type']['name']) - self.assertEqual('vol_type_desc_1', - res_dict['volume_type']['description']) - - def test_get_default_not_found(self): - self.mock_object(volume_types, 'get_default_volume_type', - return_value={}) - req = fakes.HTTPRequest.blank('/v2/%s/types/default' % fake.PROJECT_ID) - req.method = 'GET' - - self.assertRaises(exception.VolumeTypeNotFound, - self.controller.show, req, 'default') - - def test_view_builder_show(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - def test_view_builder_show_admin(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2", use_admin_context=True) - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - extra_specs={}, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - def test_view_builder_show_qos_specs_id_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[False, True]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - def test_view_builder_show_extra_specs_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[True, False]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - extra_specs={}, - is_public=True, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - with mock.patch.object(common, - 'validate_policy', - side_effect=[False, False]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - def test_view_builder_show_pass_all_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[True, True]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.show(request, raw_volume_type) - - self.assertIn('volume_type', output) - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - extra_specs={}, - is_public=True, - id=42, - ) - self.assertDictEqual(expected_volume_type, output['volume_type']) - - def test_view_builder_list(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_volume_types = [] - for i in range(0, 10): - raw_volume_types.append( - dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42 + i - ) - ) - - request = fakes.HTTPRequest.blank("/v2") - output = view_builder.index(request, raw_volume_types) - - self.assertIn('volume_types', output) - for i in range(0, 10): - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42 + i - ) - self.assertDictEqual(expected_volume_type, - output['volume_types'][i]) - - def test_view_builder_list_admin(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_volume_types = [] - for i in range(0, 10): - raw_volume_types.append( - dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - extra_specs={}, - deleted_at=None, - id=42 + i - ) - ) - - request = fakes.HTTPRequest.blank("/v2", use_admin_context=True) - output = view_builder.index(request, raw_volume_types) - - self.assertIn('volume_types', output) - for i in range(0, 10): - expected_volume_type = dict( - name='new_type', - description='new_type_desc', - qos_specs_id='new_id', - is_public=True, - extra_specs={}, - id=42 + i - ) - self.assertDictEqual(expected_volume_type, - output['volume_types'][i]) diff --git a/cinder/tests/unit/api/v2/test_volume_metadata.py b/cinder/tests/unit/api/v2/test_volume_metadata.py deleted file mode 100644 index b0730001e..000000000 --- a/cinder/tests/unit/api/v2/test_volume_metadata.py +++ /dev/null @@ -1,744 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api import extensions -from cinder.api.v2 import volume_metadata -from cinder.api.v2 import volumes -from cinder import db -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume -from cinder.volume import api as volume_api - - -CONF = cfg.CONF - - -def return_create_volume_metadata_max(context, volume_id, metadata, delete): - return fake_max_volume_metadata() - - -def return_create_volume_metadata(context, volume_id, metadata, - delete, meta_type): - return fake_volume_metadata() - - -def return_new_volume_metadata(context, volume_id, metadata, - delete, meta_type): - return fake_new_volume_metadata() - - -def return_create_volume_metadata_insensitive(context, snapshot_id, - metadata, delete, - meta_type): - return fake_volume_metadata_insensitive() - - -def return_volume_metadata(context, volume_id): - return fake_volume_metadata() - - -def fake_volume_metadata(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - } - return metadata - - -def fake_new_volume_metadata(): - metadata = { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - } - return metadata - - -def fake_volume_metadata_insensitive(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "KEY4": "value4", - } - return metadata - - -def fake_max_volume_metadata(): - metadata = {"metadata": {}} - for num in range(CONF.quota_metadata_items): - metadata['metadata']['key%i' % num] = "blah" - return metadata - - -def get_volume(*args, **kwargs): - vol = {'name': 'fake', - 'metadata': {}} - return fake_volume.fake_volume_obj(args[0], **vol) - - -def return_volume_nonexistent(*args, **kwargs): - raise exception.VolumeNotFound('bogus test message') - - -class volumeMetaDataTest(test.TestCase): - - def setUp(self): - super(volumeMetaDataTest, self).setUp() - self.volume_api = volume_api.API() - self.mock_object(volume.api.API, 'get', get_volume) - self.mock_object(db, 'volume_metadata_get', - return_volume_metadata) - self.mock_object(db, 'service_get_all', - return_value=v2_fakes.fake_service_get_all_by_topic( - None, None), - autospec=True) - self.mock_object(self.volume_api, 'update_volume_metadata') - - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.volume_controller = volumes.VolumeController(self.ext_mgr) - self.controller = volume_metadata.Controller() - self.req_id = str(uuid.uuid4()) - self.url = '/v2/%s/volumes/%s/metadata' % ( - fake.PROJECT_ID, self.req_id) - - vol = {"size": 100, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1", - "metadata": {}} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.volume_controller.create(req, body) - - def test_index(self): - req = fakes.HTTPRequest.blank(self.url) - res_dict = self.controller.index(req, self.req_id) - - expected = { - 'metadata': { - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - }, - } - self.assertEqual(expected, res_dict) - - def test_index_nonexistent_volume(self): - self.mock_object(db, 'volume_metadata_get', - return_volume_nonexistent) - req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(exception.VolumeNotFound, - self.controller.index, req, self.url) - - def test_index_no_data(self): - self.mock_object(db, 'volume_metadata_get', - return_value={}) - req = fakes.HTTPRequest.blank(self.url) - res_dict = self.controller.index(req, self.req_id) - expected = {'metadata': {}} - self.assertEqual(expected, res_dict) - - def test_show(self): - req = fakes.HTTPRequest.blank(self.url + '/key2') - res_dict = self.controller.show(req, self.req_id, 'key2') - expected = {'meta': {'key2': 'value2'}} - self.assertEqual(expected, res_dict) - - def test_show_nonexistent_volume(self): - self.mock_object(db, 'volume_metadata_get', - return_volume_nonexistent) - req = fakes.HTTPRequest.blank(self.url + '/key2') - self.assertRaises(exception.VolumeNotFound, - self.controller.show, req, self.req_id, 'key2') - - def test_show_meta_not_found(self): - self.mock_object(db, 'volume_metadata_get', - return_value={}) - req = fakes.HTTPRequest.blank(self.url + '/key6') - self.assertRaises(exception.VolumeMetadataNotFound, - self.controller.show, req, self.req_id, 'key6') - - @mock.patch.object(db, 'volume_metadata_delete') - @mock.patch.object(db, 'volume_metadata_get') - def test_delete(self, metadata_get, metadata_delete): - fake_volume = objects.Volume(id=self.req_id, status='available') - fake_context = mock.Mock() - metadata_get.side_effect = return_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key2') - req.method = 'DELETE' - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res = self.controller.delete(req, self.req_id, 'key2') - self.assertEqual(http_client.OK, res.status_int) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_delete') - @mock.patch.object(db, 'volume_metadata_get') - def test_delete_volume_maintenance(self, metadata_get, metadata_delete): - fake_volume = objects.Volume(id=self.req_id, status='maintenance') - fake_context = mock.Mock() - metadata_get.side_effect = return_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key2') - req.method = 'DELETE' - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(exception.InvalidVolume, - self.controller.delete, req, - self.req_id, 'key2') - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_delete') - @mock.patch.object(db, 'volume_metadata_get') - def test_delete_nonexistent_volume(self, metadata_get, metadata_delete): - fake_volume = objects.Volume(id=self.req_id, status='available') - fake_context = mock.Mock() - metadata_get.side_effect = return_volume_metadata - metadata_delete.side_effect = return_volume_nonexistent - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'DELETE' - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(exception.VolumeNotFound, - self.controller.delete, req, - self.req_id, 'key1') - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_delete_meta_not_found(self): - self.mock_object(db, 'volume_metadata_get', - return_value={}) - req = fakes.HTTPRequest.blank(self.url + '/key6') - req.method = 'DELETE' - self.assertRaises(exception.VolumeMetadataNotFound, - self.controller.delete, req, self.req_id, 'key6') - - @mock.patch.object(db, 'volume_metadata_update') - @mock.patch.object(db, 'volume_metadata_get') - def test_create(self, metadata_get, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_get.return_value = {} - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank('/v2/volume_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key1": "value1", - "key2": "value2", - "key3": "value3", }} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.create(req, self.req_id, body) - self.assertEqual(body, res_dict) - - @mock.patch.object(db, 'volume_metadata_update') - @mock.patch.object(db, 'volume_metadata_get') - def test_create_volume_maintenance(self, metadata_get, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'maintenance'} - fake_context = mock.Mock() - metadata_get.return_value = {} - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank('/v2/volume_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key1": "value1", - "key2": "value2", - "key3": "value3", }} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(exception.InvalidVolume, - self.controller.create, - req, self.req_id, body) - - @mock.patch.object(db, 'volume_metadata_update') - @mock.patch.object(db, 'volume_metadata_get') - def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get, - metadata_update): - # if the keys in uppercase_and_lowercase, should return the one - # which server added - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_get.return_value = {} - metadata_update.side_effect = return_create_volume_metadata_insensitive - - req = fakes.HTTPRequest.blank('/v2/volume_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key1": "value1", - "KEY1": "value1", - "key2": "value2", - "KEY2": "value2", - "key3": "value3", - "KEY4": "value4"}} - expected = {"metadata": {"key1": "value1", - "key2": "value2", - "key3": "value3", - "KEY4": "value4"}} - req.body = jsonutils.dump_as_bytes(body) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.create(req, self.req_id, body) - self.assertEqual(expected, res_dict) - - def test_create_empty_body(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, None) - - def test_create_metadata_keys_value_none(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'POST' - req.headers["content-type"] = "application/json" - body = {"meta": {"key": None}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, body) - - def test_create_item_empty_key(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, body) - - def test_create_item_key_too_long(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {("a" * 260): "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, self.req_id, body) - - def test_create_nonexistent_volume(self): - self.mock_object(volume.api.API, 'get', - return_volume_nonexistent) - self.mock_object(db, 'volume_metadata_get', - return_volume_metadata) - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - - req = fakes.HTTPRequest.blank('/v2/volume_metadata') - req.method = 'POST' - req.content_type = "application/json" - body = {"metadata": {"key9": "value9"}} - req.body = jsonutils.dump_as_bytes(body) - self.assertRaises(exception.VolumeNotFound, - self.controller.create, req, self.req_id, body) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_all(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_new_volume_metadata - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update_all(req, self.req_id, expected) - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_all_volume_maintenance(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'maintenance'} - fake_context = mock.Mock() - metadata_update.side_effect = return_new_volume_metadata - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(exception.InvalidVolume, - self.controller.update_all, req, - self.req_id, expected) - self.assertFalse(metadata_update.called) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - @mock.patch.object(db, 'volume_metadata_get') - def test_update_all_with_keys_in_uppercase_and_lowercase(self, - metadata_get, - metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_get.side_effect = return_create_volume_metadata - metadata_update.side_effect = return_new_volume_metadata - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - body = { - 'metadata': { - 'key10': 'value10', - 'KEY10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update_all(req, self.req_id, body) - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_all_empty_container(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.return_value = {} - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'metadata': {}} - req.body = jsonutils.dump_as_bytes(expected) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update_all(req, self.req_id, expected) - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_update_all_malformed_container(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'meta': {}} - req.body = jsonutils.dump_as_bytes(expected) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update_all, req, self.req_id, - expected) - - def test_update_all_malformed_data(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - expected = {'metadata': ['asdf']} - req.body = jsonutils.dump_as_bytes(expected) - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update_all, req, self.req_id, - expected) - - def test_update_all_nonexistent_volume(self): - self.mock_object(db, 'volume_get', return_volume_nonexistent) - req = fakes.HTTPRequest.blank(self.url) - req.method = 'PUT' - req.content_type = "application/json" - body = {'metadata': {'key10': 'value10'}} - req.body = jsonutils.dump_as_bytes(body) - - self.assertRaises(exception.VolumeNotFound, - self.controller.update_all, req, '100', body) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update(req, self.req_id, 'key1', body) - expected = {'meta': {'key1': 'value1'}} - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_update_metadata_item_keys_value_none(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"a": None}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, self.req_id, 'key1', body) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item_volume_maintenance(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'maintenance'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(exception.InvalidVolume, - self.controller.update, req, - self.req_id, 'key1', body) - self.assertFalse(metadata_update.called) - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_update_item_nonexistent_volume(self): - self.mock_object(db, 'volume_get', - return_volume_nonexistent) - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes/asdf/metadata/key1' % fake.PROJECT_ID) - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(exception.VolumeNotFound, - self.controller.update, req, self.req_id, 'key1', - body) - - def test_update_item_empty_body(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'key1', - None) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item_empty_key(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, - '', body) - self.assertFalse(metadata_update.called) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item_key_too_long(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {("a" * 260): "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - req, self.req_id, ("a" * 260), body) - self.assertFalse(metadata_update.called) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item_value_too_long(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": ("a" * 260)}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update, - req, self.req_id, "key1", body) - self.assertFalse(metadata_update.called) - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_update_item_too_many_keys(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1') - req.method = 'PUT' - body = {"meta": {"key1": "value1", "key2": "value2"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'key1', - body) - - def test_update_item_body_uri_mismatch(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/bad') - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, req, self.req_id, 'bad', - body) - - @mock.patch.object(db, 'volume_metadata_update') - def test_invalid_metadata_items_on_create(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url) - req.method = 'POST' - req.headers["content-type"] = "application/json" - - # test for long key - data = {"metadata": {"a" * 260: "value1"}} - req.body = jsonutils.dump_as_bytes(data) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, self.req_id, data) - - # test for long value - data = {"metadata": {"key": "v" * 260}} - req.body = jsonutils.dump_as_bytes(data) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, self.req_id, data) - - # test for empty key. - data = {"metadata": {"": "value1"}} - req.body = jsonutils.dump_as_bytes(data) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, data) diff --git a/cinder/tests/unit/api/v2/test_volumes.py b/cinder/tests/unit/api/v2/test_volumes.py deleted file mode 100644 index 6a3888975..000000000 --- a/cinder/tests/unit/api/v2/test_volumes.py +++ /dev/null @@ -1,1728 +0,0 @@ -# Copyright 2013 Josh Durgin -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import datetime -import iso8601 - -import ddt -import mock -from oslo_config import cfg -import six -from six.moves import http_client -from six.moves import range -from six.moves import urllib -import webob - -from cinder.api import common -from cinder.api import extensions -from cinder.api.v2 import volumes -from cinder import context -from cinder import db -from cinder import exception -from cinder import group as groupAPI -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils -from cinder.volume import api as volume_api - -CONF = cfg.CONF - -NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}' - -DEFAULT_AZ = "zone1:host1" - - -@ddt.ddt -class VolumeApiTest(test.TestCase): - def setUp(self): - super(VolumeApiTest, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - fake_image.mock_image_service(self) - self.controller = volumes.VolumeController(self.ext_mgr) - self.maxDiff = None - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create(self, mock_validate): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - vol = self._vol_in_request_body() - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - ex = self._expected_vol_from_controller() - self.assertEqual(ex, res_dict) - self.assertTrue(mock_validate.called) - - @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) - @mock.patch.object(db, 'service_get_all', - return_value=v2_fakes.fake_service_get_all_by_topic( - None, None), - autospec=True) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create_with_type(self, mock_validate, mock_service_get): - vol_type = db.volume_type_create( - context.get_admin_context(), - dict(name=CONF.default_volume_type, extra_specs={}) - ) - - db_vol_type = db.volume_type_get(context.get_admin_context(), - vol_type.id) - - vol = self._vol_in_request_body(volume_type="FakeTypeName") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 404 when type name isn't valid - self.assertRaises(exception.VolumeTypeNotFoundByName, - self.controller.create, req, body) - - # Use correct volume type name - vol.update(dict(volume_type=CONF.default_volume_type)) - body.update(dict(volume=vol)) - res_dict = self.controller.create(req, body) - volume_id = res_dict['volume']['id'] - self.assertEqual(1, len(res_dict)) - - # Use correct volume type id - vol.update(dict(volume_type=db_vol_type['id'])) - body.update(dict(volume=vol)) - res_dict = self.controller.create(req, body) - volume_id = res_dict['volume']['id'] - self.assertEqual(1, len(res_dict)) - - vol_db = v2_fakes.create_fake_volume(volume_id, - volume_type={'name': vol_type}) - vol_obj = fake_volume.fake_volume_obj(context.get_admin_context(), - **vol_db) - self.mock_object(volume_api.API, 'get_all', - return_value=objects.VolumeList(objects=[vol_obj])) - # NOTE(geguileo): This is required because common get_by_id method in - # cinder.db.sqlalchemy.api caches the real get method. - db.sqlalchemy.api._GET_METHODS = {} - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - req = fakes.HTTPRequest.blank('/v2/volumes/detail') - res_dict = self.controller.detail(req) - self.assertTrue(mock_validate.called) - - @classmethod - def _vol_in_request_body(cls, - size=v2_fakes.DEFAULT_VOL_SIZE, - name=v2_fakes.DEFAULT_VOL_NAME, - description=v2_fakes.DEFAULT_VOL_DESCRIPTION, - availability_zone=DEFAULT_AZ, - snapshot_id=None, - source_volid=None, - source_replica=None, - consistencygroup_id=None, - volume_type=None, - image_ref=None, - image_id=None, - multiattach=False): - vol = {"size": size, - "name": name, - "description": description, - "availability_zone": availability_zone, - "snapshot_id": snapshot_id, - "source_volid": source_volid, - "source_replica": source_replica, - "consistencygroup_id": consistencygroup_id, - "volume_type": volume_type, - "multiattach": multiattach, - } - - if image_id is not None: - vol['image_id'] = image_id - elif image_ref is not None: - vol['imageRef'] = image_ref - - return vol - - def _expected_vol_from_controller( - self, - size=v2_fakes.DEFAULT_VOL_SIZE, - availability_zone=DEFAULT_AZ, - description=v2_fakes.DEFAULT_VOL_DESCRIPTION, - name=v2_fakes.DEFAULT_VOL_NAME, - consistencygroup_id=None, - source_volid=None, - snapshot_id=None, - metadata=None, - attachments=None, - volume_type=v2_fakes.DEFAULT_VOL_TYPE, - status=v2_fakes.DEFAULT_VOL_STATUS, - with_migration_status=False, - multiattach=False): - metadata = metadata or {} - attachments = attachments or [] - volume = {'volume': - {'attachments': attachments, - 'availability_zone': availability_zone, - 'bootable': 'false', - 'consistencygroup_id': consistencygroup_id, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), - 'updated_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), - 'description': description, - 'id': v2_fakes.DEFAULT_VOL_ID, - 'links': - [{'href': 'http://localhost/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'self'}, - {'href': 'http://localhost/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'bookmark'}], - 'metadata': metadata, - 'name': name, - 'replication_status': 'disabled', - 'multiattach': multiattach, - 'size': size, - 'snapshot_id': snapshot_id, - 'source_volid': source_volid, - 'status': status, - 'user_id': fake.USER_ID, - 'volume_type': volume_type, - 'encrypted': False}} - - if with_migration_status: - volume['volume']['migration_status'] = None - - return volume - - def _expected_volume_api_create_kwargs(self, snapshot=None, - availability_zone=DEFAULT_AZ, - source_volume=None): - return {'metadata': None, - 'snapshot': snapshot, - 'source_volume': source_volume, - 'source_replica': None, - 'group': None, - 'consistencygroup': None, - 'availability_zone': availability_zone, - 'scheduler_hints': None, - 'multiattach': False, - } - - @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', - autospec=True) - @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) - @mock.patch.object(volume_api.API, 'create', autospec=True) - def test_volume_creation_from_snapshot(self, create, get_snapshot, - volume_type_get): - create.side_effect = v2_fakes.fake_volume_api_create - get_snapshot.side_effect = v2_fakes.fake_snapshot_get - volume_type_get.side_effect = v2_fakes.fake_volume_type_get - - snapshot_id = fake.SNAPSHOT_ID - vol = self._vol_in_request_body(snapshot_id=snapshot_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - - ex = self._expected_vol_from_controller(snapshot_id=snapshot_id) - self.assertEqual(ex, res_dict) - - context = req.environ['cinder.context'] - get_snapshot.assert_called_once_with(self.controller.volume_api, - context, snapshot_id) - - kwargs = self._expected_volume_api_create_kwargs( - v2_fakes.fake_snapshot(snapshot_id)) - create.assert_called_once_with(self.controller.volume_api, context, - vol['size'], v2_fakes.DEFAULT_VOL_NAME, - v2_fakes.DEFAULT_VOL_DESCRIPTION, - **kwargs) - - @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) - def test_volume_creation_fails_with_invalid_snapshot(self, get_snapshot): - - get_snapshot.side_effect = v2_fakes.fake_snapshot_get - - snapshot_id = fake.WILL_NOT_BE_FOUND_ID - vol = self._vol_in_request_body(snapshot_id=snapshot_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 404 when snapshot cannot be found. - self.assertRaises(exception.SnapshotNotFound, self.controller.create, - req, body) - context = req.environ['cinder.context'] - get_snapshot.assert_called_once_with(self.controller.volume_api, - context, snapshot_id) - - @ddt.data({'s': 'ea895e29-8485-4930-bbb8-c5616a309c0e'}, - ['ea895e29-8485-4930-bbb8-c5616a309c0e'], - 42) - def test_volume_creation_fails_with_invalid_snapshot_type(self, value): - snapshot_id = value - vol = self._vol_in_request_body(snapshot_id=snapshot_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 400 when snapshot has not uuid type. - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', - autospec=True) - @mock.patch.object(volume_api.API, 'get_volume', autospec=True) - @mock.patch.object(volume_api.API, 'create', autospec=True) - def test_volume_creation_from_source_volume(self, create, get_volume, - volume_type_get): - get_volume.side_effect = v2_fakes.fake_volume_api_get - create.side_effect = v2_fakes.fake_volume_api_create - volume_type_get.side_effect = v2_fakes.fake_volume_type_get - - source_volid = '2f49aa3a-6aae-488d-8b99-a43271605af6' - vol = self._vol_in_request_body(source_volid=source_volid) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - - ex = self._expected_vol_from_controller(source_volid=source_volid) - self.assertEqual(ex, res_dict) - - context = req.environ['cinder.context'] - get_volume.assert_called_once_with(self.controller.volume_api, - context, source_volid) - - db_vol = v2_fakes.create_fake_volume(source_volid) - vol_obj = fake_volume.fake_volume_obj(context, **db_vol) - kwargs = self._expected_volume_api_create_kwargs( - source_volume=vol_obj) - create.assert_called_once_with(self.controller.volume_api, context, - vol['size'], v2_fakes.DEFAULT_VOL_NAME, - v2_fakes.DEFAULT_VOL_DESCRIPTION, - **kwargs) - - @mock.patch.object(volume_api.API, 'get_volume', autospec=True) - def test_volume_creation_fails_with_invalid_source_volume(self, - get_volume): - - get_volume.side_effect = v2_fakes.fake_volume_get_notfound - - source_volid = fake.VOLUME_ID - vol = self._vol_in_request_body(source_volid=source_volid) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 404 when source volume cannot be found. - self.assertRaises(exception.VolumeNotFound, self.controller.create, - req, body) - - context = req.environ['cinder.context'] - get_volume.assert_called_once_with(self.controller.volume_api, - context, source_volid) - - @ddt.data({'source_volid': 1}, - {'source_volid': []}, - {'source_replica': 1}, - {'source_replica': []}, - {'consistencygroup_id': 1}, - {'consistencygroup_id': []}) - def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids): - vol = self._vol_in_request_body() - vol.update(updated_uuids) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 400 for resource requested with invalid uuids. - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - @mock.patch.object(volume_api.API, 'get_volume', autospec=True) - def test_volume_creation_fails_with_invalid_source_replica(self, - get_volume): - - get_volume.side_effect = v2_fakes.fake_volume_get_notfound - - source_replica = fake.VOLUME_ID - vol = self._vol_in_request_body(source_replica=source_replica) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 404 when source replica cannot be found. - self.assertRaises(exception.VolumeNotFound, self.controller.create, - req, body) - - context = req.environ['cinder.context'] - get_volume.assert_called_once_with(self.controller.volume_api, - context, source_replica) - - @mock.patch.object(volume_api.API, 'get_volume', autospec=True) - def test_volume_creation_fails_with_invalid_source_replication_status( - self, get_volume): - - get_volume.side_effect = v2_fakes.fake_volume_get - - source_replica = '2f49aa3a-6aae-488d-8b99-a43271605af6' - vol = self._vol_in_request_body(source_replica=source_replica) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 400 when replication status is disabled. - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - context = req.environ['cinder.context'] - get_volume.assert_called_once_with(self.controller.volume_api, - context, source_replica) - - @mock.patch.object(groupAPI.API, 'get', autospec=True) - def test_volume_creation_fails_with_invalid_consistency_group(self, - get_cg): - - get_cg.side_effect = v2_fakes.fake_consistencygroup_get_notfound - - consistencygroup_id = '4f49aa3a-6aae-488d-8b99-a43271605af6' - vol = self._vol_in_request_body( - consistencygroup_id=consistencygroup_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 404 when consistency group is not found. - self.assertRaises(exception.GroupNotFound, - self.controller.create, req, body) - - context = req.environ['cinder.context'] - get_cg.assert_called_once_with(self.controller.group_api, - context, consistencygroup_id) - - def test_volume_creation_fails_with_bad_size(self): - vol = self._vol_in_request_body(size="") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(exception.InvalidInput, - self.controller.create, - req, - body) - - def test_volume_creation_fails_with_bad_availability_zone(self): - vol = self._vol_in_request_body(availability_zone="zonen:hostn") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(exception.InvalidInput, - self.controller.create, - req, body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create_with_image_ref(self, mock_validate): - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body( - availability_zone="nova", - image_ref="c905cedb-7281-47e4-8a62-f26bc5fc4c77") - ex = self._expected_vol_from_controller(availability_zone="nova") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - self.assertEqual(ex, res_dict) - self.assertTrue(mock_validate.called) - - def test_volume_create_with_image_ref_is_integer(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_ref=1234) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_ref_not_uuid_format(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_ref="12345") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_ref_with_empty_string(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_ref="") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create_with_image_id(self, mock_validate): - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body( - availability_zone="nova", - image_id="c905cedb-7281-47e4-8a62-f26bc5fc4c77") - ex = self._expected_vol_from_controller(availability_zone="nova") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - self.assertEqual(ex, res_dict) - self.assertTrue(mock_validate.called) - - def test_volume_create_with_image_id_is_integer(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_id=1234) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_not_uuid_format(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_id="12345") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_image_id_with_empty_string(self): - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="cinder", - image_id="") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create_with_image_name(self, mock_validate): - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - - test_id = "Fedora-x86_64-20-20140618-sda" - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="nova", - image_ref=test_id) - ex = self._expected_vol_from_controller(availability_zone="nova") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - self.assertEqual(ex, res_dict) - self.assertTrue(mock_validate.called) - - def test_volume_create_with_image_name_has_multiple(self): - self.mock_object(db, 'volume_get', v2_fakes.fake_volume_get_db) - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - - test_id = "multi" - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="nova", - image_ref=test_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPConflict, - self.controller.create, - req, - body) - - def test_volume_create_with_image_name_no_match(self): - self.mock_object(db, 'volume_get', v2_fakes.fake_volume_get_db) - self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) - self.mock_object(fake_image._FakeImageService, - "detail", - v2_fakes.fake_image_service_detail) - - test_id = "MissingName" - self.ext_mgr.extensions = {'os-image-create': 'fake'} - vol = self._vol_in_request_body(availability_zone="nova", - image_ref=test_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, - body) - - def test_volume_create_with_invalid_multiattach(self): - vol = self._vol_in_request_body(multiattach="InvalidBool") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - - self.assertRaises(exception.InvalidParameterValue, - self.controller.create, - req, - body) - - @mock.patch.object(volume_api.API, 'create', autospec=True) - @mock.patch.object(volume_api.API, 'get', autospec=True) - @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', - autospec=True) - def test_volume_create_with_valid_multiattach(self, - volume_type_get, - get, create): - create.side_effect = v2_fakes.fake_volume_api_create - get.side_effect = v2_fakes.fake_volume_get - volume_type_get.side_effect = v2_fakes.fake_volume_type_get - - vol = self._vol_in_request_body(multiattach=True) - body = {"volume": vol} - - ex = self._expected_vol_from_controller(multiattach=True) - - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.create(req, body) - - self.assertEqual(ex, res_dict) - - @ddt.data({'a' * 256: 'a'}, - {'a': 'a' * 256}, - {'': 'a'}, - {'a': None}) - def test_volume_create_with_invalid_metadata(self, value): - vol = self._vol_in_request_body() - vol['metadata'] = value - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - - if len(list(value.keys())[0]) == 0 or list(value.values())[0] is None: - exc = exception.InvalidVolumeMetadata - else: - exc = exception.InvalidVolumeMetadataSize - self.assertRaises(exc, - self.controller.create, - req, - body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update(self, mock_validate): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - updates = { - "name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, name="Updated Test Name", - metadata={'attached_mode': 'rw', 'readonly': 'False'}) - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - self.assertTrue(mock_validate.called) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_deprecation(self, mock_validate): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - updates = { - "display_name": "Updated Test Name", - "display_description": "Updated Test Description", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, name="Updated Test Name", - description="Updated Test Description", - metadata={'attached_mode': 'rw', 'readonly': 'False'}) - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - self.assertTrue(mock_validate.called) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_deprecation_key_priority(self, mock_validate): - """Test current update keys have priority over deprecated keys.""" - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - updates = { - "name": "New Name", - "description": "New Description", - "display_name": "Not Shown Name", - "display_description": "Not Shown Description", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - name="New Name", description="New Description", - metadata={'attached_mode': 'rw', 'readonly': 'False'}) - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - self.assertTrue(mock_validate.called) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_metadata(self, mock_validate): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - updates = { - "metadata": {"qos_max_iops": '2000'} - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - metadata={'attached_mode': 'rw', 'readonly': 'False', - 'qos_max_iops': '2000'}) - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - self.assertTrue(mock_validate.called) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_update_with_admin_metadata(self, mock_validate): - self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) - - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - attach_tmp = db.volume_attachment_get(context.get_admin_context(), - attachment['id']) - volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) - updates = { - "name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertEqual(0, len(self.notifier.notifications)) - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.update(req, fake.VOLUME_ID, body) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, volume_type=None, - status='in-use', name='Updated Test Name', - attachments=[{'id': fake.VOLUME_ID, - 'attachment_id': attachment['id'], - 'volume_id': v2_fakes.DEFAULT_VOL_ID, - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'device': '/', - 'attached_at': attach_tmp['attach_time'].replace( - tzinfo=iso8601.iso8601.Utc()), - }], - metadata={'key': 'value', 'readonly': 'True'}, - with_migration_status=True) - expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - self.assertEqual(expected, res_dict) - self.assertEqual(2, len(self.notifier.notifications)) - self.assertTrue(mock_validate.called) - - @ddt.data({'a' * 256: 'a'}, - {'a': 'a' * 256}, - {'': 'a'}, - {'a': None}) - @mock.patch.object(volume_api.API, 'get', - side_effect=v2_fakes.fake_volume_api_get, autospec=True) - def test_volume_update_with_invalid_metadata(self, value, get): - updates = { - "metadata": value - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - - if len(list(value.keys())[0]) == 0 or list(value.values())[0] is None: - exc = exception.InvalidVolumeMetadata - else: - exc = webob.exc.HTTPRequestEntityTooLarge - self.assertRaises(exc, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_update_empty_body(self): - body = {} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_update_invalid_body(self): - body = { - 'name': 'missing top level volume key' - } - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_update_not_found(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - updates = { - "name": "Updated Test Name", - } - body = {"volume": updates} - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(exception.VolumeNotFound, - self.controller.update, - req, fake.VOLUME_ID, body) - - def test_volume_list_summary(self): - self.mock_object(volume_api.API, 'get_all', - v2_fakes.fake_volume_api_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes') - res_dict = self.controller.index(req) - expected = { - 'volumes': [ - { - 'name': v2_fakes.DEFAULT_VOL_NAME, - 'id': fake.VOLUME_ID, - 'links': [ - { - 'href': 'http://localhost/v2/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'self' - }, - { - 'href': 'http://localhost/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'bookmark' - } - ], - } - ] - } - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volumes - self.assertEqual(1, len(req.cached_resource())) - - def test_volume_list_detail(self): - self.mock_object(volume_api.API, 'get_all', - v2_fakes.fake_volume_api_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail') - res_dict = self.controller.detail(req) - exp_vol = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - metadata={'attached_mode': 'rw', 'readonly': 'False'}) - expected = {'volumes': [exp_vol['volume']]} - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volumes - self.assertEqual(1, len(req.cached_resource())) - - def test_volume_list_detail_with_admin_metadata(self): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - attach_tmp = db.volume_attachment_get(context.get_admin_context(), - attachment['id']) - volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail') - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.detail(req) - exp_vol = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - status="in-use", volume_type=None, - attachments=[{'attachment_id': attachment['id'], - 'device': '/', - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'id': fake.VOLUME_ID, - 'volume_id': v2_fakes.DEFAULT_VOL_ID, - 'attached_at': attach_tmp['attach_time'].replace( - tzinfo=iso8601.iso8601.Utc()), - }], - metadata={'key': 'value', 'readonly': 'True'}, - with_migration_status=True) - exp_vol['volume']['updated_at'] = volume_tmp['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected = {'volumes': [exp_vol['volume']]} - self.assertEqual(expected, res_dict) - - def test_volume_index_with_marker(self): - def fake_volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=0): - return [ - v2_fakes.create_fake_volume(fake.VOLUME_ID, - display_name='vol1'), - v2_fakes.create_fake_volume(fake.VOLUME2_ID, - display_name='vol2'), - ] - self.mock_object(db, 'volume_get_all_by_project', - fake_volume_get_all_by_project) - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - - req = fakes.HTTPRequest.blank('/v2/volumes?marker=1') - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(2, len(volumes)) - self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) - self.assertEqual(fake.VOLUME2_ID, volumes[1]['id']) - - def test_volume_index_limit(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - - req = fakes.HTTPRequest.blank('/v2/volumes' - '?limit=1&name=foo' - '&sort=id1:asc') - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - - # Ensure that the next link is correctly formatted, it should - # contain the same limit, filter, and sort information as the - # original request as well as a marker; this ensures that the - # caller can simply use the "next" link and that they do not - # need to manually insert the limit and sort information. - links = res_dict['volumes_links'] - self.assertEqual('next', links[0]['rel']) - href_parts = urllib.parse.urlparse(links[0]['href']) - self.assertEqual('/v2/%s/volumes' % fake.PROJECT_ID, href_parts.path) - params = urllib.parse.parse_qs(href_parts.query) - self.assertEqual(str(volumes[0]['id']), params['marker'][0]) - self.assertEqual('1', params['limit'][0]) - self.assertEqual('foo', params['name'][0]) - self.assertEqual('id1:asc', params['sort'][0]) - - def test_volume_index_limit_negative(self): - req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - def test_volume_index_limit_non_int(self): - req = fakes.HTTPRequest.blank('/v2/volumes?limit=a') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - def test_volume_index_limit_marker(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - - req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1') - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) - - def _create_db_volumes(self, num_volumes): - volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i) - for i in range(num_volumes)] - for vol in volumes: - self.addCleanup(db.volume_destroy, self.ctxt, vol.id) - volumes.reverse() - return volumes - - def test_volume_index_limit_offset(self): - created_volumes = self._create_db_volumes(2) - req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1') - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(created_volumes[1].id, volumes[0]['id']) - - req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - # Test that we get an exception HTTPBadRequest(400) with an offset - # greater than the maximum offset value. - url = '/v2/volumes?limit=2&offset=43543564546567575' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, - req) - - def test_volume_detail_with_marker(self): - def fake_volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=0): - return [ - v2_fakes.create_fake_volume(fake.VOLUME_ID, - display_name='vol1'), - v2_fakes.create_fake_volume(fake.VOLUME2_ID, - display_name='vol2'), - ] - self.mock_object(db, 'volume_get_all_by_project', - fake_volume_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1') - res_dict = self.controller.detail(req) - volumes = res_dict['volumes'] - self.assertEqual(2, len(volumes)) - self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) - self.assertEqual(fake.VOLUME2_ID, volumes[1]['id']) - - def test_volume_detail_limit(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1') - res_dict = self.controller.detail(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - - # Ensure that the next link is correctly formatted - links = res_dict['volumes_links'] - self.assertEqual('next', links[0]['rel']) - href_parts = urllib.parse.urlparse(links[0]['href']) - self.assertEqual('/v2/%s/volumes/detail' % fake.PROJECT_ID, - href_parts.path) - params = urllib.parse.parse_qs(href_parts.query) - self.assertIn('marker', params) - self.assertEqual('1', params['limit'][0]) - - def test_volume_detail_limit_negative(self): - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.detail, - req) - - def test_volume_detail_limit_non_int(self): - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.detail, - req) - - def test_volume_detail_limit_marker(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1') - res_dict = self.controller.detail(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) - - def test_volume_detail_limit_offset(self): - created_volumes = self._create_db_volumes(2) - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1') - res_dict = self.controller.detail(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(created_volumes[1].id, volumes[0]['id']) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1', - use_admin_context=True) - res_dict = self.controller.detail(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(created_volumes[1].id, volumes[0]['id']) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.detail, - req) - - req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1') - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.detail, - req) - - url = '/v2/volumes/detail?limit=2&offset=4536546546546467' - req = fakes.HTTPRequest.blank(url) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.detail, - req) - - def test_volume_with_limit_zero(self): - def fake_volume_get_all(context, marker, limit, **kwargs): - return [] - self.mock_object(db, 'volume_get_all', fake_volume_get_all) - req = fakes.HTTPRequest.blank('/v2/volumes?limit=0') - res_dict = self.controller.index(req) - expected = {'volumes': []} - self.assertEqual(expected, res_dict) - - def _validate_next_link(self, detailed, item_count, osapi_max_limit, limit, - should_link_exist): - keys_fns = (('volumes', self.controller.index), - ('volumes/detail', self.controller.detail)) - key, fn = keys_fns[detailed] - - req_string = '/v2/%s?all_tenants=1' % key - if limit: - req_string += '&limit=%s' % limit - req = fakes.HTTPRequest.blank(req_string, use_admin_context=True) - - link_return = [{"rel": "next", "href": "fake_link"}] - self.flags(osapi_max_limit=osapi_max_limit) - - def get_pagination_params(params, max_limit=CONF.osapi_max_limit, - original_call=common.get_pagination_params): - return original_call(params, max_limit) - - def _get_limit_param(params, max_limit=CONF.osapi_max_limit, - original_call=common._get_limit_param): - return original_call(params, max_limit) - - with mock.patch.object(common, 'get_pagination_params', - get_pagination_params), \ - mock.patch.object(common, '_get_limit_param', - _get_limit_param), \ - mock.patch.object(common.ViewBuilder, '_generate_next_link', - return_value=link_return): - res_dict = fn(req) - self.assertEqual(item_count, len(res_dict['volumes'])) - self.assertEqual(should_link_exist, 'volumes_links' in res_dict) - - def test_volume_default_limit(self): - self._create_db_volumes(3) - - # Verify both the index and detail queries - for detailed in (True, False): - # Number of volumes less than max, do not include - self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, - limit=None, should_link_exist=False) - - # Number of volumes equals the max, next link will be included - self._validate_next_link(detailed, item_count=3, osapi_max_limit=3, - limit=None, should_link_exist=True) - - # Number of volumes more than the max, include next link - self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, - limit=None, should_link_exist=True) - - # Limit lower than max but doesn't limit, no next link - self._validate_next_link(detailed, item_count=3, osapi_max_limit=5, - limit=4, should_link_exist=False) - - # Limit lower than max and limits, we have next link - self._validate_next_link(detailed, item_count=2, osapi_max_limit=4, - limit=2, should_link_exist=True) - - # Limit higher than max and max limits, we have next link - self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, - limit=4, should_link_exist=True) - - # Limit higher than max but none of them limiting, no next link - self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, - limit=5, should_link_exist=False) - - def test_volume_list_default_filters(self): - """Tests that the default filters from volume.api.API.get_all are set. - - 1. 'no_migration_status'=True for non-admins and get_all_by_project is - invoked. - 2. 'no_migration_status' is not included for admins. - 3. When 'all_tenants' is not specified, then it is removed and - get_all_by_project is invoked for admins. - 3. When 'all_tenants' is specified, then it is removed and get_all - is invoked for admins. - """ - # Non-admin, project function should be called with no_migration_status - def fake_volume_get_all_by_project(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=0): - self.assertTrue(filters['no_migration_targets']) - self.assertNotIn('all_tenants', filters) - return [v2_fakes.create_fake_volume(fake.VOLUME_ID, - display_name='vol1')] - - def fake_volume_get_all(context, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, offset=0): - return [] - self.mock_object(db, 'volume_get_all_by_project', - fake_volume_get_all_by_project) - self.mock_object(db, 'volume_get_all', fake_volume_get_all) - - # all_tenants does not matter for non-admin - for params in ['', '?all_tenants=1']: - req = fakes.HTTPRequest.blank('/v2/volumes%s' % params) - resp = self.controller.index(req) - self.assertEqual(1, len(resp['volumes'])) - self.assertEqual('vol1', resp['volumes'][0]['name']) - - # Admin, all_tenants is not set, project function should be called - # without no_migration_status - def fake_volume_get_all_by_project2(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=0): - self.assertNotIn('no_migration_targets', filters) - return [v2_fakes.create_fake_volume(fake.VOLUME_ID, - display_name='vol2')] - - def fake_volume_get_all2(context, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, offset=0): - return [] - self.mock_object(db, 'volume_get_all_by_project', - fake_volume_get_all_by_project2) - self.mock_object(db, 'volume_get_all', fake_volume_get_all2) - - req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True) - resp = self.controller.index(req) - self.assertEqual(1, len(resp['volumes'])) - self.assertEqual('vol2', resp['volumes'][0]['name']) - - # Admin, all_tenants is set, get_all function should be called - # without no_migration_status - def fake_volume_get_all_by_project3(context, project_id, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, - offset=0): - return [] - - def fake_volume_get_all3(context, marker, limit, - sort_keys=None, sort_dirs=None, - filters=None, - viewable_admin_meta=False, offset=0): - self.assertNotIn('no_migration_targets', filters) - self.assertNotIn('all_tenants', filters) - return [v2_fakes.create_fake_volume(fake.VOLUME3_ID, - display_name='vol3')] - self.mock_object(db, 'volume_get_all_by_project', - fake_volume_get_all_by_project3) - self.mock_object(db, 'volume_get_all', fake_volume_get_all3) - - req = fakes.HTTPRequest.blank('/v2/volumes?all_tenants=1', - use_admin_context=True) - resp = self.controller.index(req) - self.assertEqual(1, len(resp['volumes'])) - self.assertEqual('vol3', resp['volumes'][0]['name']) - - def test_volume_show(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - metadata={'attached_mode': 'rw', 'readonly': 'False'}) - self.assertEqual(expected, res_dict) - # Finally test that we cached the returned volume - self.assertIsNotNone(req.cached_resource_by_id(fake.VOLUME_ID)) - - def test_volume_show_no_attachments(self): - def fake_volume_get(self, context, volume_id, **kwargs): - vol = v2_fakes.create_fake_volume( - volume_id, attach_status= - fields.VolumeAttachStatus.DETACHED) - return fake_volume.fake_volume_obj(context, **vol) - - def fake_volume_admin_metadata_get(context, volume_id, **kwargs): - return v2_fakes.fake_volume_admin_metadata_get( - context, volume_id, attach_status= - fields.VolumeAttachStatus.DETACHED) - - self.mock_object(volume_api.API, 'get', fake_volume_get) - self.mock_object(db, 'volume_admin_metadata_get', - fake_volume_admin_metadata_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - metadata={'readonly': 'False'}) - - self.assertEqual(expected, res_dict) - - def test_volume_show_no_volume(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(exception.VolumeNotFound, self.controller.show, - req, 1) - # Finally test that nothing was cached - self.assertIsNone(req.cached_resource_by_id(fake.VOLUME_ID)) - - def test_volume_show_with_admin_metadata(self): - volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) - del volume['name'] - del volume['volume_type'] - del volume['volume_type_id'] - volume['metadata'] = {'key': 'value'} - db.volume_create(context.get_admin_context(), volume) - db.volume_admin_metadata_update(context.get_admin_context(), - fake.VOLUME_ID, - {"readonly": "True", - "invisible_key": "invisible_value"}, - False) - values = {'volume_id': fake.VOLUME_ID, } - attachment = db.volume_attach(context.get_admin_context(), values) - db.volume_attached(context.get_admin_context(), - attachment['id'], fake.INSTANCE_ID, None, '/') - attach_tmp = db.volume_attachment_get(context.get_admin_context(), - attachment['id']) - volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.show(req, fake.VOLUME_ID) - expected = self._expected_vol_from_controller( - availability_zone=v2_fakes.DEFAULT_AZ, - volume_type=None, status='in-use', - attachments=[{'id': fake.VOLUME_ID, - 'attachment_id': attachment['id'], - 'volume_id': v2_fakes.DEFAULT_VOL_ID, - 'server_id': fake.INSTANCE_ID, - 'host_name': None, - 'device': '/', - 'attached_at': attach_tmp['attach_time'].replace( - tzinfo=iso8601.iso8601.Utc()), - }], - metadata={'key': 'value', 'readonly': 'True'}, - with_migration_status=True) - expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - self.assertEqual(expected, res_dict) - - def test_volume_show_with_encrypted_volume(self): - def fake_volume_get(self, context, volume_id, **kwargs): - vol = v2_fakes.create_fake_volume(volume_id, - encryption_key_id=fake.KEY_ID) - return fake_volume.fake_volume_obj(context, **vol) - - self.mock_object(volume_api.API, 'get', fake_volume_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - self.assertTrue(res_dict['volume']['encrypted']) - - def test_volume_show_with_unencrypted_volume(self): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - self.assertEqual(False, res_dict['volume']['encrypted']) - - def test_volume_show_with_error_managing_deleting(self): - def fake_volume_get(self, context, volume_id, **kwargs): - vol = v2_fakes.create_fake_volume(volume_id, - status='error_managing_deleting') - return fake_volume.fake_volume_obj(context, **vol) - - self.mock_object(volume_api.API, 'get', fake_volume_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - res_dict = self.controller.show(req, fake.VOLUME_ID) - self.assertEqual('deleting', res_dict['volume']['status']) - - @mock.patch.object(volume_api.API, 'delete', v2_fakes.fake_volume_delete) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_volume_delete(self): - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - resp = self.controller.delete(req, fake.VOLUME_ID) - self.assertEqual(http_client.ACCEPTED, resp.status_int) - - def test_volume_delete_attached(self): - def fake_volume_attached(self, context, volume, - force=False, cascade=False): - raise exception.VolumeAttached(volume_id=volume['id']) - self.mock_object(volume_api.API, "delete", fake_volume_attached) - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - exp = self.assertRaises(exception.VolumeAttached, - self.controller.delete, - req, 1) - expect_msg = "Volume 1 is still attached, detach volume first." - self.assertEqual(expect_msg, six.text_type(exp)) - - def test_volume_delete_no_volume(self): - self.mock_object(volume_api.API, "get", - v2_fakes.fake_volume_get_notfound) - - req = fakes.HTTPRequest.blank('/v2/volumes/%s' % fake.VOLUME_ID) - self.assertRaises(exception.VolumeNotFound, self.controller.delete, - req, 1) - - def test_admin_list_volumes_limited_to_project(self): - self.mock_object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - def test_admin_list_volumes_all_tenants(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes?all_tenants=1' % fake.PROJECT_ID, - use_admin_context=True) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(3, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_all_tenants_non_admin_gets_all_tenants(self): - req = fakes.HTTPRequest.blank( - '/v2/%s/volumes?all_tenants=1' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - @mock.patch.object(db, 'volume_get_all_by_project', - v2_fakes.fake_volume_get_all_by_project) - @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) - def test_non_admin_get_by_project(self): - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) - res = self.controller.index(req) - self.assertIn('volumes', res) - self.assertEqual(1, len(res['volumes'])) - - def _create_volume_bad_request(self, body): - req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) - req.method = 'POST' - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - def test_create_no_body(self): - self._create_volume_bad_request(body=None) - - def test_create_missing_volume(self): - body = {'foo': {'a': 'b'}} - self._create_volume_bad_request(body=body) - - def test_create_malformed_entity(self): - body = {'volume': 'string'} - self._create_volume_bad_request(body=body) - - def _test_get_volumes_by_name(self, get_all, display_name): - req = mock.MagicMock() - context = mock.Mock() - req.environ = {'cinder.context': context} - req.params = {'display_name': display_name} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - context, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'display_name': display_name}, - viewable_admin_meta=True, offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_string(self, get_all): - """Test to get a volume with an alpha-numeric display name.""" - self._test_get_volumes_by_name(get_all, 'Volume-573108026') - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_double_quoted_string(self, get_all): - """Test to get a volume with a double-quoted display name.""" - self._test_get_volumes_by_name(get_all, '"Volume-573108026"') - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_single_quoted_string(self, get_all): - """Test to get a volume with a single-quoted display name.""" - self._test_get_volumes_by_name(get_all, "'Volume-573108026'") - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_quote_in_between_string(self, get_all): - """Test to get a volume with a quote in between the display name.""" - self._test_get_volumes_by_name(get_all, 'Volu"me-573108026') - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_mixed_quoted_string(self, get_all): - """Test to get a volume with a mix of single and double quotes. """ - # The display name starts with a single quote and ends with a - # double quote - self._test_get_volumes_by_name(get_all, '\'Volume-573108026"') - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_true(self, get_all): - req = mock.MagicMock() - context = mock.Mock() - req.environ = {'cinder.context': context} - req.params = {'display_name': 'Volume-573108026', 'bootable': 1} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - context, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'display_name': 'Volume-573108026', 'bootable': True}, - viewable_admin_meta=True, offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_false(self, get_all): - req = mock.MagicMock() - context = mock.Mock() - req.environ = {'cinder.context': context} - req.params = {'display_name': 'Volume-573108026', 'bootable': 0} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - context, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'display_name': 'Volume-573108026', 'bootable': False}, - viewable_admin_meta=True, offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_list(self, get_all): - req = mock.MagicMock() - context = mock.Mock() - req.environ = {'cinder.context': context} - req.params = {'id': "['%s', '%s', '%s']" % ( - fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID)} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - context, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'id': [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]}, - viewable_admin_meta=True, - offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_expression(self, get_all): - req = mock.MagicMock() - context = mock.Mock() - req.environ = {'cinder.context': context} - req.params = {'name': "d-"} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - context, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'display_name': 'd-'}, viewable_admin_meta=True, offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_status(self, get_all): - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'status': 'available'} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'status': 'available'}, viewable_admin_meta=True, - offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_metadata(self, get_all): - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'metadata': "{'fake_key': 'fake_value'}"} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'metadata': {'fake_key': 'fake_value'}}, - viewable_admin_meta=True, offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_availability_zone(self, get_all): - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'availability_zone': 'nova'} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'availability_zone': 'nova'}, viewable_admin_meta=True, - offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_bootable(self, get_all): - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'bootable': 1} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'bootable': True}, viewable_admin_meta=True, - offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_filter_with_invalid_filter(self, get_all): - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'invalid_filter': 'invalid', - 'availability_zone': 'nova'} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_keys=['created_at'], sort_dirs=['desc'], - filters={'availability_zone': 'nova'}, viewable_admin_meta=True, - offset=0) - - @mock.patch('cinder.volume.api.API.get_all') - def test_get_volumes_sort_by_name(self, get_all): - """Name in client means display_name in database.""" - - req = mock.MagicMock() - ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - req.environ = {'cinder.context': ctxt} - req.params = {'sort': 'name'} - self.controller._view_builder.detail_list = mock.Mock() - self.controller._get_volumes(req, True) - get_all.assert_called_once_with( - ctxt, None, CONF.osapi_max_limit, - sort_dirs=['desc'], viewable_admin_meta=True, - sort_keys=['display_name'], filters={}, offset=0) - - def test_get_volume_filter_options_using_config(self): - filter_list = ['name', 'status', 'metadata', 'bootable', - 'availability_zone'] - self.override_config('query_volume_filters', filter_list) - self.assertEqual(filter_list, - self.controller._get_volume_filter_options()) diff --git a/cinder/tests/unit/api/v3/__init__.py b/cinder/tests/unit/api/v3/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/v3/fakes.py b/cinder/tests/unit/api/v3/fakes.py deleted file mode 100644 index c53fb3468..000000000 --- a/cinder/tests/unit/api/v3/fakes.py +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import iso8601 - -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import utils - -FAKE_UUID = fake.OBJECT_ID -DEFAULT_VOL_NAME = "displayname" -DEFAULT_VOL_DESCRIPTION = "displaydesc" -DEFAULT_VOL_SIZE = 1 -DEFAULT_VOL_TYPE = "vol_type_name" -DEFAULT_VOL_STATUS = "fakestatus" -DEFAULT_VOL_ID = fake.VOLUME_ID -DEFAULT_AZ = "fakeaz" - - -def fake_message(id, **kwargs): - message = { - 'id': id, - 'action_id': "002", - 'detail_id': "001", - 'event_id': "VOLUME_VOLUME_002_001", - 'message_level': "ERROR", - 'request_id': FAKE_UUID, - 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - } - - message.update(kwargs) - return message - - -def fake_message_get(self, context, message_id): - return fake_message(message_id) - - -def create_volume(id, **kwargs): - volume = { - 'id': id, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fakehost', - 'size': DEFAULT_VOL_SIZE, - 'availability_zone': DEFAULT_AZ, - 'status': DEFAULT_VOL_STATUS, - 'migration_status': None, - 'attach_status': 'attached', - 'name': 'vol name', - 'display_name': DEFAULT_VOL_NAME, - 'display_description': DEFAULT_VOL_DESCRIPTION, - 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'snapshot_id': None, - 'source_volid': None, - 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', - 'encryption_key_id': None, - 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, - {'key': 'readonly', 'value': 'False'}], - 'bootable': False, - 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), - 'replication_status': 'disabled', - 'replication_extended_status': None, - 'replication_driver_data': None, - 'volume_attachment': [], - 'multiattach': False, - 'group_id': fake.GROUP_ID, - } - - volume.update(kwargs) - if kwargs.get('volume_glance_metadata', None): - volume['bootable'] = True - if kwargs.get('attach_status') == 'detached': - del volume['volume_admin_metadata'][0] - return volume - - -def fake_volume_create(self, context, size, name, description, snapshot=None, - group_id=None, **param): - vol = create_volume(DEFAULT_VOL_ID) - vol['size'] = size - vol['display_name'] = name - vol['display_description'] = description - source_volume = param.get('source_volume') or {} - vol['source_volid'] = source_volume.get('id') - vol['bootable'] = False - vol['volume_attachment'] = [] - vol['multiattach'] = utils.get_bool_param('multiattach', param) - try: - vol['snapshot_id'] = snapshot['id'] - except (KeyError, TypeError): - vol['snapshot_id'] = None - vol['availability_zone'] = param.get('availability_zone', 'fakeaz') - if group_id: - vol['group_id'] = group_id - return vol diff --git a/cinder/tests/unit/api/v3/stubs.py b/cinder/tests/unit/api/v3/stubs.py deleted file mode 100644 index 830891f3f..000000000 --- a/cinder/tests/unit/api/v3/stubs.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import iso8601 - -from cinder.tests.unit import fake_constants as fake - - -FAKE_UUID = fake.OBJECT_ID - - -def stub_message(id, **kwargs): - message = { - 'id': id, - 'action_id': "002", - 'detail_id': "001", - 'event_id': "VOLUME_VOLUME_002_001", - 'message_level': "ERROR", - 'request_id': FAKE_UUID, - 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - 'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1, - tzinfo=iso8601.iso8601.Utc()), - } - - message.update(kwargs) - return message - - -def stub_message_get(self, context, message_id): - return stub_message(message_id) diff --git a/cinder/tests/unit/api/v3/test_attachments.py b/cinder/tests/unit/api/v3/test_attachments.py deleted file mode 100644 index 950106672..000000000 --- a/cinder/tests/unit/api/v3/test_attachments.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (C) 2017 HuaWei Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for attachments Api. -""" - -import ddt -import mock -import webob - -from cinder.api.v3 import attachments as v3_attachments -from cinder import context -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.volume import api as volume_api -from cinder.volume import rpcapi as volume_rpcapi - -ATTACHMENTS_MICRO_VERSION = '3.27' - - -@ddt.ddt -class AttachmentsAPITestCase(test.TestCase): - """Test Case for attachment API.""" - - def setUp(self): - super(AttachmentsAPITestCase, self).setUp() - self.controller = v3_attachments.AttachmentsController() - self.volume_api = volume_api.API() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.volume1 = self._create_volume(display_name='fake_volume_1', - project_id=fake.PROJECT_ID) - self.volume2 = self._create_volume(display_name='fake_volume_2', - project_id=fake.PROJECT2_ID) - self.attachment1 = self._create_attachment( - volume_uuid=self.volume1.id, instance_uuid=fake.UUID1) - self.attachment2 = self._create_attachment( - volume_uuid=self.volume1.id, instance_uuid=fake.UUID1) - self.attachment3 = self._create_attachment( - volume_uuid=self.volume1.id, instance_uuid=fake.UUID2) - self.attachment4 = self._create_attachment( - volume_uuid=self.volume2.id, instance_uuid=fake.UUID2) - self.addCleanup(self._cleanup) - - def _cleanup(self): - self.attachment1.destroy() - self.attachment2.destroy() - self.attachment3.destroy() - self.attachment4.destroy() - self.volume1.destroy() - self.volume2.destroy() - - def _create_volume(self, ctxt=None, display_name=None, project_id=None): - """Create a volume object.""" - ctxt = ctxt or self.ctxt - volume = objects.Volume(ctxt) - volume.display_name = display_name - volume.project_id = project_id - volume.status = 'available' - volume.attach_status = 'attached' - volume.create() - return volume - - def test_create_attachment(self): - req = fakes.HTTPRequest.blank('/v3/%s/attachments' % - fake.PROJECT_ID, - version=ATTACHMENTS_MICRO_VERSION) - body = { - "attachment": - { - "connector": None, - "instance_uuid": fake.UUID1, - "volume_uuid": self.volume1.id - }, - } - - attachment = self.controller.create(req, body) - - self.assertEqual(self.volume1.id, - attachment['attachment']['volume_id']) - self.assertEqual(fake.UUID1, - attachment['attachment']['instance']) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_update') - def test_update_attachment(self, mock_update): - fake_connector = {'fake_key': 'fake_value'} - mock_update.return_value = fake_connector - req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % - (fake.PROJECT_ID, self.attachment1.id), - version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - body = { - "attachment": - { - "connector": {'fake_key': 'fake_value'}, - }, - } - - attachment = self.controller.update(req, self.attachment1.id, body) - - self.assertEqual(fake_connector, - attachment['attachment']['connection_info']) - self.assertEqual(fake.UUID1, attachment['attachment']['instance']) - - @mock.patch.object(objects.VolumeAttachment, 'get_by_id') - def test_attachment_operations_not_authorized(self, mock_get): - mock_get.return_value = {'project_id': fake.PROJECT2_ID} - req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % - (fake.PROJECT_ID, self.attachment1.id), - version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=False) - body = { - "attachment": - { - "connector": {'fake_key': 'fake_value'}, - }, - } - self.assertRaises(exception.NotAuthorized, - self.controller.update, req, - self.attachment1.id, body) - self.assertRaises(exception.NotAuthorized, - self.controller.delete, req, - self.attachment1.id) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_attachment_list_with_general_filter(self, version, mock_update): - url = '/v3/%s/attachments' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'attachment', - support_like) - - @ddt.data('reserved', 'attached') - @mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_delete') - def test_delete_attachment(self, status, mock_delete): - volume1 = self._create_volume(display_name='fake_volume_1', - project_id=fake.PROJECT_ID) - attachment = self._create_attachment( - volume_uuid=volume1.id, instance_uuid=fake.UUID1, - attach_status=status) - req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % - (fake.PROJECT_ID, attachment.id), - version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - - self.controller.delete(req, attachment.id) - - volume2 = objects.Volume.get_by_id(self.ctxt, volume1.id) - if status == 'reserved': - self.assertEqual('detached', volume2.attach_status) - self.assertRaises( - exception.VolumeAttachmentNotFound, - objects.VolumeAttachment.get_by_id, self.ctxt, attachment.id) - else: - self.assertEqual('attached', volume2.attach_status) - mock_delete.assert_called_once_with(req.environ['cinder.context'], - attachment.id, mock.ANY) - - def _create_attachment(self, ctxt=None, volume_uuid=None, - instance_uuid=None, mountpoint=None, - attach_time=None, detach_time=None, - attach_status=None, attach_mode=None): - """Create an attachment object.""" - ctxt = ctxt or self.ctxt - attachment = objects.VolumeAttachment(ctxt) - attachment.volume_id = volume_uuid - attachment.instance_uuid = instance_uuid - attachment.mountpoint = mountpoint - attachment.attach_time = attach_time - attachment.detach_time = detach_time - attachment.attach_status = attach_status or 'reserved' - attachment.attach_mode = attach_mode - attachment.create() - return attachment - - @ddt.data("instance_uuid", "volume_uuid") - def test_create_attachment_without_resource_uuid(self, resource_uuid): - req = fakes.HTTPRequest.blank('/v3/%s/attachments' % - fake.PROJECT_ID, - version=ATTACHMENTS_MICRO_VERSION) - body = { - "attachment": - { - "connector": None - } - } - body["attachment"][resource_uuid] = "test_id" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body) - - @ddt.data(False, True) - def test_list_attachments(self, is_detail): - url = '/v3/%s/attachments' % fake.PROJECT_ID - list_func = self.controller.index - if is_detail: - url = '/v3/%s/groups/detail' % fake.PROJECT_ID - list_func = self.controller.detail - req = fakes.HTTPRequest.blank(url, version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - res_dict = list_func(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['attachments'])) - self.assertEqual(self.attachment3.id, - res_dict['attachments'][0]['id']) - - def test_list_attachments_with_limit(self): - url = '/v3/%s/attachments?limit=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(1, len(res_dict['attachments'])) - - def test_list_attachments_with_marker(self): - url = '/v3/%s/attachments?marker=%s' % (fake.PROJECT_ID, - self.attachment3.id) - req = fakes.HTTPRequest.blank(url, version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(2, len(res_dict['attachments'])) - self.assertEqual(self.attachment2.id, - res_dict['attachments'][0]['id']) - - @ddt.data("desc", "asc") - def test_list_attachments_with_sort(self, sort_dir): - url = '/v3/%s/attachments?sort_key=id&sort_dir=%s' % (fake.PROJECT_ID, - sort_dir) - req = fakes.HTTPRequest.blank(url, version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=True) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['attachments'])) - order_ids = sorted([self.attachment1.id, - self.attachment2.id, - self.attachment3.id]) - expect_result = order_ids[2] if sort_dir == "desc" else order_ids[0] - self.assertEqual(expect_result, - res_dict['attachments'][0]['id']) - - @ddt.data({'admin': True, 'request_url': '?all_tenants=1', 'count': 4}, - {'admin': False, 'request_url': '?all_tenants=1', 'count': 3}, - {'admin': True, 'request_url': - '?all_tenants=1&project_id=%s' % fake.PROJECT2_ID, - 'count': 1}, - {'admin': False, 'request_url': '', 'count': 3}, - {'admin': False, 'request_url': '?instance_id=%s' % fake.UUID1, - 'count': 2}, - {'admin': False, 'request_url': '?instance_id=%s' % fake.UUID2, - 'count': 1}) - @ddt.unpack - def test_list_attachment_with_tenants(self, admin, request_url, count): - url = '/v3/%s/attachments%s' % (fake.PROJECT_ID, request_url) - req = fakes.HTTPRequest.blank(url, version=ATTACHMENTS_MICRO_VERSION, - use_admin_context=admin) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(count, len(res_dict['attachments'])) diff --git a/cinder/tests/unit/api/v3/test_backups.py b/cinder/tests/unit/api/v3/test_backups.py deleted file mode 100644 index 33fdabae8..000000000 --- a/cinder/tests/unit/api/v3/test_backups.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2016 Intel, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The backups V3 api.""" - -import ddt -import mock -import webob - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import backups -from cinder.api.views import backups as backup_view -import cinder.backup -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as test_utils - - -@ddt.ddt -class BackupsControllerAPITestCase(test.TestCase): - """Test cases for backups API.""" - - def setUp(self): - super(BackupsControllerAPITestCase, self).setUp() - self.backup_api = cinder.backup.API() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.controller = backups.BackupsController() - - def _fake_update_request(self, backup_id, version='3.9'): - req = fakes.HTTPRequest.blank('/v3/%s/backups/%s/update' % - (fake.PROJECT_ID, backup_id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.api_version_request = api_version.APIVersionRequest(version) - return req - - def test_update_wrong_version(self): - req = self._fake_update_request(fake.BACKUP_ID, version='3.6') - body = {"backup": {"name": "Updated Test Name", }} - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.update, req, fake.BACKUP_ID, - body) - - def test_backup_update_with_no_body(self): - # omit body from the request - req = self._fake_update_request(fake.BACKUP_ID) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, fake.BACKUP_ID, None) - - def test_backup_update_with_unsupported_field(self): - req = self._fake_update_request(fake.BACKUP_ID) - body = {"backup": {"id": fake.BACKUP2_ID, - "description": "", }} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, fake.BACKUP_ID, body) - - def test_backup_update_with_backup_not_found(self): - req = self._fake_update_request(fake.BACKUP_ID) - updates = { - "name": "Updated Test Name", - "description": "Updated Test description.", - } - body = {"backup": updates} - self.assertRaises(exception.NotFound, - self.controller.update, - req, fake.BACKUP_ID, body) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_backup_list_with_general_filter(self, version, mock_update): - url = '/v3/%s/backups' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'backup', - support_like) - - @ddt.data('3.36', '3.37') - def test_backup_list_with_name(self, version): - backup1 = test_utils.create_backup( - self.ctxt, display_name='b_test_name', - status=fields.BackupStatus.AVAILABLE) - backup2 = test_utils.create_backup( - self.ctxt, display_name='a_test_name', - status=fields.BackupStatus.AVAILABLE) - url = '/v3/%s/backups?sort_key=name' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=version) - if version == '3.36': - self.assertRaises(exception.InvalidInput, - self.controller.index, - req) - else: - expect = backup_view.ViewBuilder().summary_list(req, - [backup1, backup2]) - result = self.controller.index(req) - self.assertEqual(expect, result) - - def test_backup_update(self): - backup = test_utils.create_backup( - self.ctxt, - status=fields.BackupStatus.AVAILABLE) - req = self._fake_update_request(fake.BACKUP_ID) - new_name = "updated_test_name" - new_description = "Updated Test description." - updates = { - "name": new_name, - "description": new_description, - } - body = {"backup": updates} - self.controller.update(req, - backup.id, - body) - - backup.refresh() - self.assertEqual(new_name, backup.display_name) - self.assertEqual(new_description, - backup.display_description) diff --git a/cinder/tests/unit/api/v3/test_cluster.py b/cinder/tests/unit/api/v3/test_cluster.py deleted file mode 100644 index e6f280d79..000000000 --- a/cinder/tests/unit/api/v3/test_cluster.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import ddt -from iso8601 import iso8601 -import mock -from oslo_utils import versionutils - -from cinder.api import extensions -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import clusters -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_cluster - - -CLUSTERS = [ - fake_cluster.fake_db_cluster( - id=1, - replication_status='error', - frozen=False, - active_backend_id='replication1', - last_heartbeat=datetime.datetime(2016, 6, 1, 2, 46, 28), - updated_at=datetime.datetime(2016, 6, 1, 2, 46, 28), - created_at=datetime.datetime(2016, 6, 1, 2, 46, 28)), - fake_cluster.fake_db_cluster( - id=2, name='cluster2', num_hosts=2, num_down_hosts=1, disabled=True, - replication_status='error', - frozen=True, - active_backend_id='replication2', - updated_at=datetime.datetime(2016, 6, 1, 1, 46, 28), - created_at=datetime.datetime(2016, 6, 1, 1, 46, 28)) -] - -CLUSTERS_ORM = [fake_cluster.fake_cluster_orm(**kwargs) for kwargs in CLUSTERS] - -EXPECTED = [{'created_at': datetime.datetime(2016, 6, 1, 2, 46, 28), - 'disabled_reason': None, - 'last_heartbeat': datetime.datetime(2016, 6, 1, 2, 46, 28), - 'name': 'cluster_name', - 'binary': 'cinder-volume', - 'num_down_hosts': 0, - 'num_hosts': 0, - 'state': 'up', - 'status': 'enabled', - 'replication_status': 'error', - 'frozen': False, - 'active_backend_id': 'replication1', - 'updated_at': datetime.datetime(2016, 6, 1, 2, 46, 28)}, - {'created_at': datetime.datetime(2016, 6, 1, 1, 46, 28), - 'disabled_reason': None, - 'last_heartbeat': '', - 'name': 'cluster2', - 'binary': 'cinder-volume', - 'num_down_hosts': 1, - 'num_hosts': 2, - 'state': 'down', - 'status': 'disabled', - 'replication_status': 'error', - 'frozen': True, - 'active_backend_id': 'replication2', - 'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28)}] - - -class FakeRequest(object): - def __init__(self, is_admin=True, version='3.7', **kwargs): - self.GET = kwargs - self.headers = {'OpenStack-API-Version': 'volume ' + version} - self.api_version_request = api_version.APIVersionRequest(version) - self.environ = { - 'cinder.context': context.RequestContext(user_id=None, - project_id=None, - is_admin=is_admin, - read_deleted='no', - overwrite=False) - } - - -def fake_utcnow(with_timezone=False): - tzinfo = iso8601.Utc() if with_timezone else None - return datetime.datetime(2016, 6, 1, 2, 46, 30, tzinfo=tzinfo) - - -@ddt.ddt -@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) -class ClustersTestCase(test.TestCase): - """Test Case for Clusters.""" - LIST_FILTERS = ({}, {'is_up': True}, {'disabled': False}, {'num_hosts': 2}, - {'num_down_hosts': 1}, {'binary': 'cinder-volume'}, - {'is_up': True, 'disabled': False, 'num_hosts': 2, - 'num_down_hosts': 1, 'binary': 'cinder-volume'}) - - REPLICATION_FILTERS = ({'replication_status': 'error'}, {'frozen': True}, - {'active_backend_id': 'replication'}) - - def _get_expected(self, version='3.8'): - if versionutils.convert_version_to_tuple(version) >= (3, 19): - return EXPECTED - - expect = [] - for cluster in EXPECTED: - cluster = cluster.copy() - for key in ('replication_status', 'frozen', 'active_backend_id'): - cluster.pop(key) - expect.append(cluster) - return expect - - def setUp(self): - super(ClustersTestCase, self).setUp() - - self.context = context.get_admin_context() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = clusters.ClusterController(self.ext_mgr) - - @mock.patch('cinder.db.cluster_get_all', return_value=CLUSTERS_ORM) - def _test_list(self, get_all_mock, detailed, filters=None, expected=None, - version='3.8'): - filters = filters or {} - req = FakeRequest(version=version, **filters) - method = getattr(self.controller, 'detail' if detailed else 'index') - clusters = method(req) - - filters = filters.copy() - filters.setdefault('is_up', None) - filters.setdefault('read_deleted', 'no') - self.assertEqual(expected, clusters) - get_all_mock.assert_called_once_with( - req.environ['cinder.context'], - get_services=False, - services_summary=detailed, - **filters) - - @ddt.data(*LIST_FILTERS) - def test_index_detail(self, filters): - """Verify that we get all clusters with detailed data.""" - expected = {'clusters': self._get_expected()} - self._test_list(detailed=True, filters=filters, expected=expected) - - @ddt.data(*LIST_FILTERS) - def test_index_summary(self, filters): - """Verify that we get all clusters with summary data.""" - expected = {'clusters': [{'name': 'cluster_name', - 'binary': 'cinder-volume', - 'state': 'up', - 'status': 'enabled'}, - {'name': 'cluster2', - 'binary': 'cinder-volume', - 'state': 'down', - 'status': 'disabled'}]} - self._test_list(detailed=False, filters=filters, expected=expected) - - @ddt.data(*REPLICATION_FILTERS) - def test_index_detail_fail_old(self, filters): - self.assertRaises(exception.InvalidInput, self._test_list, - detailed=True, filters=filters) - - @ddt.data(*REPLICATION_FILTERS) - def test_index_summary_fail_old(self, filters): - self.assertRaises(exception.InvalidInput, self._test_list, - detailed=False, filters=filters) - - @ddt.data(True, False) - def test_index_unauthorized(self, detailed): - """Verify that unauthorized user can't list clusters.""" - self.assertRaises(exception.PolicyNotAuthorized, - self._test_list, detailed=detailed, - filters={'is_admin': False}) - - @ddt.data(True, False) - def test_index_wrong_version(self, detailed): - """Verify the wrong version so that user can't list clusters.""" - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self._test_list, detailed=detailed, - version='3.6') - - @ddt.data(*REPLICATION_FILTERS) - def test_index_detail_replication_new_fields(self, filters): - version = '3.26' - expected = {'clusters': self._get_expected(version)} - self._test_list(detailed=True, filters=filters, expected=expected, - version=version) - - @ddt.data(*REPLICATION_FILTERS) - def test_index_summary_replication_new_fields(self, filters): - expected = {'clusters': [{'name': 'cluster_name', - 'binary': 'cinder-volume', - 'state': 'up', - 'replication_status': 'error', - 'status': 'enabled'}, - {'name': 'cluster2', - 'binary': 'cinder-volume', - 'state': 'down', - 'replication_status': 'error', - 'status': 'disabled'}]} - self._test_list(detailed=False, filters=filters, expected=expected, - version='3.26') - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', - return_value=CLUSTERS_ORM[0]) - def test_show(self, get_mock): - req = FakeRequest() - expected = {'cluster': self._get_expected()[0]} - cluster = self.controller.show(req, mock.sentinel.name, - mock.sentinel.binary) - self.assertEqual(expected, cluster) - get_mock.assert_called_once_with( - req.environ['cinder.context'], - None, - services_summary=True, - name=mock.sentinel.name, - binary=mock.sentinel.binary) - - def test_show_unauthorized(self): - req = FakeRequest(is_admin=False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.show, req, 'name') - - def test_show_wrong_version(self): - req = FakeRequest(version='3.5') - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.show, req, 'name') - - @mock.patch('cinder.db.sqlalchemy.api.cluster_update') - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', - return_value=CLUSTERS_ORM[1]) - def test_update_enable(self, get_mock, update_mock): - req = FakeRequest() - expected = {'cluster': {'name': u'cluster2', - 'binary': 'cinder-volume', - 'state': 'down', - 'status': 'enabled', - 'disabled_reason': None}} - res = self.controller.update(req, 'enable', - {'name': mock.sentinel.name, - 'binary': mock.sentinel.binary}) - self.assertEqual(expected, res) - ctxt = req.environ['cinder.context'] - get_mock.assert_called_once_with(ctxt, - None, binary=mock.sentinel.binary, - name=mock.sentinel.name) - update_mock.assert_called_once_with(ctxt, get_mock.return_value.id, - {'disabled': False, - 'disabled_reason': None}) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_update') - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', - return_value=CLUSTERS_ORM[0]) - def test_update_disable(self, get_mock, update_mock): - req = FakeRequest() - disabled_reason = 'For testing' - expected = {'cluster': {'name': u'cluster_name', - 'state': 'up', - 'binary': 'cinder-volume', - 'status': 'disabled', - 'disabled_reason': disabled_reason}} - res = self.controller.update(req, 'disable', - {'name': mock.sentinel.name, - 'binary': mock.sentinel.binary, - 'disabled_reason': disabled_reason}) - self.assertEqual(expected, res) - ctxt = req.environ['cinder.context'] - get_mock.assert_called_once_with(ctxt, - None, binary=mock.sentinel.binary, - name=mock.sentinel.name) - update_mock.assert_called_once_with( - ctxt, get_mock.return_value.id, - {'disabled': True, 'disabled_reason': disabled_reason}) - - def test_update_wrong_action(self): - req = FakeRequest() - self.assertRaises(exception.NotFound, self.controller.update, req, - 'action', {}) - - @ddt.data('enable', 'disable') - def test_update_missing_name(self, action): - req = FakeRequest() - self.assertRaises(exception.MissingRequired, self.controller.update, - req, action, {'binary': mock.sentinel.binary}) - - def test_update_wrong_disabled_reason(self): - req = FakeRequest() - self.assertRaises(exception.InvalidInput, self.controller.update, req, - 'disable', {'name': mock.sentinel.name, - 'disabled_reason': ' '}) - - @ddt.data('enable', 'disable') - def test_update_unauthorized(self, action): - req = FakeRequest(is_admin=False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.update, req, action, {}) - - @ddt.data('enable', 'disable') - def test_update_wrong_version(self, action): - req = FakeRequest(version='3.5') - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.update, req, action, {}) diff --git a/cinder/tests/unit/api/v3/test_consistencygroups.py b/cinder/tests/unit/api/v3/test_consistencygroups.py deleted file mode 100644 index ece7524ab..000000000 --- a/cinder/tests/unit/api/v3/test_consistencygroups.py +++ /dev/null @@ -1,194 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -from six.moves import http_client -import webob - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import consistencygroups -from cinder import context -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -@ddt.ddt -class ConsistencyGroupsAPITestCase(test.TestCase): - """Test Case for consistency groups API.""" - - def setUp(self): - super(ConsistencyGroupsAPITestCase, self).setUp() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.controller = consistencygroups.ConsistencyGroupsController() - - def _create_consistencygroup( - self, - ctxt=None, - name='test_consistencygroup', - description='this is a test consistency group', - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='az1', - host='fakehost', - status=fields.ConsistencyGroupStatus.CREATING, - **kwargs): - """Create a consistency group object.""" - ctxt = ctxt or self.ctxt - consistencygroup = objects.Group(ctxt) - consistencygroup.user_id = fake.USER_ID - consistencygroup.project_id = fake.PROJECT_ID - consistencygroup.availability_zone = availability_zone - consistencygroup.name = name - consistencygroup.description = description - consistencygroup.group_type_id = group_type_id - consistencygroup.volume_type_ids = volume_type_ids - consistencygroup.host = host - consistencygroup.status = status - consistencygroup.update(kwargs) - consistencygroup.create() - return consistencygroup - - def test_update_consistencygroup_empty_parameters(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.6' - req.api_version_request = api_version.APIVersionRequest('3.6') - body = {"consistencygroup": {"name": "", - "description": "", - "add_volumes": None, - "remove_volumes": None, }} - res_dict = self.controller.update(req, - consistencygroup.id, - body) - consistencygroup = objects.Group.get_by_id( - self.ctxt, consistencygroup.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual("", consistencygroup.name) - self.assertEqual("", consistencygroup.description) - consistencygroup.destroy() - - def test_update_consistencygroup_empty_parameters_unsupport_version(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.5' - req.api_version_request = api_version.APIVersionRequest('3.5') - body = {"consistencygroup": {"name": "", - "description": "", - "add_volumes": None, - "remove_volumes": None, }} - self.assertRaisesRegexp(webob.exc.HTTPBadRequest, - "Name, description, add_volumes, " - "and remove_volumes can not be all " - "empty in the request body.", - self.controller.update, - req, consistencygroup.id, body) - consistencygroup.destroy() - - def test_update_consistencygroup_all_empty_parameters_version_36(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.6' - req.api_version_request = api_version.APIVersionRequest('3.6') - body = {"consistencygroup": {"name": None, - "description": None, - "add_volumes": None, - "remove_volumes": None, }} - self.assertRaisesRegexp(webob.exc.HTTPBadRequest, "Must specify " - "one or more of the following keys to " - "update: name, description, add_volumes, " - "remove_volumes.", self.controller.update, - req, consistencygroup.id, body) - consistencygroup.destroy() - - def test_update_consistencygroup_all_empty_parameters_not_version_36(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.5' - req.api_version_request = api_version.APIVersionRequest('3.5') - body = {"consistencygroup": {"name": None, - "description": None, - "add_volumes": None, - "remove_volumes": None, }} - self.assertRaisesRegexp(webob.exc.HTTPBadRequest, "Name, description, " - "add_volumes, and remove_volumes can not be " - "all empty in the request body.", - self.controller.update, - req, consistencygroup.id, body) - consistencygroup.destroy() - - def test_update_consistencygroup_no_body(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.5' - req.api_version_request = api_version.APIVersionRequest('3.5') - body = None - self.assertRaisesRegexp(webob.exc.HTTPBadRequest, - "Missing request body", - self.controller.update, - req, consistencygroup.id, body) - consistencygroup.destroy() - - def test_update_consistencygroups_no_empty_parameters(self): - consistencygroup = self._create_consistencygroup( - ctxt=self.ctxt, - status=fields.ConsistencyGroupStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % - (fake.PROJECT_ID, consistencygroup.id)) - req.environ['cinder.context'].is_admin = True - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume 3.5' - req.api_version_request = api_version.APIVersionRequest('3.5') - body = {"consistencygroup": {"name": "my_fake_cg", - "description": "fake consistency group", - "add_volumes": "volume-uuid-1", - "remove_volumes": - "volume-uuid-2, volume uuid-3", }} - allow_empty = self.controller._check_update_parameters_v3( - req, body['consistencygroup']['name'], - body['consistencygroup']['description'], - body['consistencygroup']['add_volumes'], - body['consistencygroup']['remove_volumes']) - self.assertEqual(False, allow_empty) - consistencygroup.destroy() diff --git a/cinder/tests/unit/api/v3/test_group_snapshots.py b/cinder/tests/unit/api/v3/test_group_snapshots.py deleted file mode 100644 index d464c4e5f..000000000 --- a/cinder/tests/unit/api/v3/test_group_snapshots.py +++ /dev/null @@ -1,540 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for group_snapshot code. -""" - -import ddt -import mock -from six.moves import http_client -import webob - -from cinder.api.v3 import group_snapshots as v3_group_snapshots -from cinder import context -from cinder import db -from cinder import exception -from cinder.group import api as group_api -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -import cinder.volume - -GROUP_MICRO_VERSION = '3.14' -SUPPORT_FILTER_VERSION = '3.29' - - -@ddt.ddt -class GroupSnapshotsAPITestCase(test.TestCase): - """Test Case for group_snapshots API.""" - - def setUp(self): - super(GroupSnapshotsAPITestCase, self).setUp() - self.controller = v3_group_snapshots.GroupSnapshotsController() - self.volume_api = cinder.volume.API() - self.context = context.get_admin_context() - self.context.project_id = fake.PROJECT_ID - self.context.user_id = fake.USER_ID - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.group = utils.create_group(self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID]) - self.volume = utils.create_volume(self.context, - group_id=self.group.id, - volume_type_id=fake.VOLUME_TYPE_ID) - self.g_snapshots_array = [ - utils.create_group_snapshot( - self.context, - group_id=self.group.id, - group_type_id=self.group.group_type_id) for _ in range(3)] - self.addCleanup(self._cleanup) - - def _cleanup(self): - for snapshot in self.g_snapshots_array: - snapshot.destroy() - self.volume.destroy() - self.group.destroy() - - def test_show_group_snapshot(self): - group_snapshot = utils.create_group_snapshot( - self.context, group_id=self.group.id) - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, group_snapshot.id), - version=GROUP_MICRO_VERSION) - res_dict = self.controller.show(req, group_snapshot.id) - - self.assertEqual(1, len(res_dict)) - self.assertEqual('this is a test group snapshot', - res_dict['group_snapshot']['description']) - self.assertEqual('test_group_snapshot', - res_dict['group_snapshot']['name']) - self.assertEqual(fields.GroupSnapshotStatus.CREATING, - res_dict['group_snapshot']['status']) - - group_snapshot.destroy() - - @ddt.data(True, False) - def test_list_group_snapshots_with_limit(self, is_detail): - - url = '/v3/%s/group_snapshots?limit=1' % fake.PROJECT_ID - if is_detail: - url = '/v3/%s/group_snapshots/detail?limit=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION) - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(2, len(res_dict)) - self.assertEqual(1, len(res_dict['group_snapshots'])) - self.assertEqual(self.g_snapshots_array[2].id, - res_dict['group_snapshots'][0]['id']) - next_link = ( - 'http://localhost/v3/%s/group_snapshots?limit=' - '1&marker=%s' % - (fake.PROJECT_ID, res_dict['group_snapshots'][0]['id'])) - self.assertEqual(next_link, - res_dict['group_snapshot_links'][0]['href']) - if is_detail: - self.assertIn('description', res_dict['group_snapshots'][0].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][0].keys()) - - @ddt.data(True, False) - def test_list_group_snapshot_with_offset(self, is_detail): - url = '/v3/%s/group_snapshots?offset=1' % fake.PROJECT_ID - if is_detail: - url = '/v3/%s/group_snapshots/detail?offset=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION) - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - self.assertEqual(1, len(res_dict)) - self.assertEqual(2, len(res_dict['group_snapshots'])) - self.assertEqual(self.g_snapshots_array[1].id, - res_dict['group_snapshots'][0]['id']) - self.assertEqual(self.g_snapshots_array[0].id, - res_dict['group_snapshots'][1]['id']) - if is_detail: - self.assertIn('description', res_dict['group_snapshots'][0].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][0].keys()) - - @ddt.data(True, False) - def test_list_group_snapshot_with_offset_out_of_range(self, is_detail): - url = ('/v3/%s/group_snapshots?offset=234523423455454' % - fake.PROJECT_ID) - if is_detail: - url = ('/v3/%s/group_snapshots/detail?offset=234523423455454' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION) - if is_detail: - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, - req) - else: - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, - req) - - @ddt.data(False, True) - def test_list_group_snapshot_with_limit_and_offset(self, is_detail): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - group_type_id=self.group.group_type_id) - url = '/v3/%s/group_snapshots?limit=2&offset=1' % fake.PROJECT_ID - if is_detail: - url = ('/v3/%s/group_snapshots/detail?limit=2&offset=1' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION) - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(2, len(res_dict)) - self.assertEqual(2, len(res_dict['group_snapshots'])) - self.assertEqual(self.g_snapshots_array[2].id, - res_dict['group_snapshots'][0]['id']) - self.assertEqual(self.g_snapshots_array[1].id, - res_dict['group_snapshots'][1]['id']) - self.assertIsNotNone(res_dict['group_snapshot_links'][0]['href']) - if is_detail: - self.assertIn('description', res_dict['group_snapshots'][0].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][0].keys()) - group_snapshot.destroy() - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_group_snapshot_list_with_general_filter(self, - version, mock_update): - url = '/v3/%s/group_snapshots' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'group_snapshot', - support_like) - - @ddt.data(False, True) - def test_list_group_snapshot_with_filter(self, is_detail): - url = ('/v3/%s/group_snapshots?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.g_snapshots_array[0].id) - if is_detail: - url = ('/v3/%s/group_snapshots/detail?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.g_snapshots_array[0].id) - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION, - use_admin_context=True) - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(1, len(res_dict['group_snapshots'])) - self.assertEqual(self.g_snapshots_array[0].id, - res_dict['group_snapshots'][0]['id']) - if is_detail: - self.assertIn('description', res_dict['group_snapshots'][0].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][0].keys()) - - @ddt.data({'is_detail': True, 'version': GROUP_MICRO_VERSION}, - {'is_detail': False, 'version': GROUP_MICRO_VERSION}, - {'is_detail': True, 'version': '3.28'}, - {'is_detail': False, 'version': '3.28'},) - @ddt.unpack - def test_list_group_snapshot_with_filter_previous_version(self, is_detail, - version): - url = ('/v3/%s/group_snapshots?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.g_snapshots_array[0].id) - if is_detail: - url = ('/v3/%s/group_snapshots/detail?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.g_snapshots_array[0].id) - req = fakes.HTTPRequest.blank(url, version=version, - use_admin_context=True) - - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['group_snapshots'])) - - @ddt.data(False, True) - def test_list_group_snapshot_with_sort(self, is_detail): - url = '/v3/%s/group_snapshots?sort=id:asc' % fake.PROJECT_ID - if is_detail: - url = ('/v3/%s/group_snapshots/detail?sort=id:asc' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=SUPPORT_FILTER_VERSION) - expect_result = [snapshot.id for snapshot in self.g_snapshots_array] - expect_result.sort() - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['group_snapshots'])) - self.assertEqual(expect_result[0], - res_dict['group_snapshots'][0]['id']) - self.assertEqual(expect_result[1], - res_dict['group_snapshots'][1]['id']) - self.assertEqual(expect_result[2], - res_dict['group_snapshots'][2]['id']) - if is_detail: - self.assertIn('description', res_dict['group_snapshots'][0].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][0].keys()) - - def test_show_group_snapshot_with_group_snapshot_not_found(self): - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - version=GROUP_MICRO_VERSION) - self.assertRaises(exception.GroupSnapshotNotFound, - self.controller.show, - req, fake.WILL_NOT_BE_FOUND_ID) - - @ddt.data(True, False) - def test_list_group_snapshots_json(self, is_detail): - if is_detail: - request_url = '/v3/%s/group_snapshots/detail' - else: - request_url = '/v3/%s/group_snapshots' - req = fakes.HTTPRequest.blank(request_url % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['group_snapshots'])) - for index, snapshot in enumerate(self.g_snapshots_array): - self.assertEqual(snapshot.id, - res_dict['group_snapshots'][2 - index]['id']) - self.assertIsNotNone( - res_dict['group_snapshots'][2 - index]['name']) - if is_detail: - self.assertIn('description', - res_dict['group_snapshots'][2 - index].keys()) - else: - self.assertNotIn('description', - res_dict['group_snapshots'][2 - index].keys()) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - @mock.patch('cinder.db.volume_type_get') - @mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve') - def test_create_group_snapshot_json(self, mock_quota, mock_vol_type, - mock_validate): - body = {"group_snapshot": {"name": "group_snapshot1", - "description": - "Group Snapshot 1", - "group_id": self.group.id}} - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - res_dict = self.controller.create(req, body) - - self.assertEqual(1, len(res_dict)) - self.assertIn('id', res_dict['group_snapshot']) - self.assertTrue(mock_validate.called) - group_snapshot = objects.GroupSnapshot.get_by_id( - context.get_admin_context(), res_dict['group_snapshot']['id']) - group_snapshot.destroy() - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - @mock.patch('cinder.db.volume_type_get') - def test_create_group_snapshot_when_volume_in_error_status( - self, mock_vol_type, mock_validate): - group = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - volume_id = utils.create_volume( - self.context, - status='error', - group_id=group.id, - volume_type_id=fake.VOLUME_TYPE_ID)['id'] - body = {"group_snapshot": {"name": "group_snapshot1", - "description": - "Group Snapshot 1", - "group_id": group.id}} - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - self.assertTrue(mock_validate.called) - - group.destroy() - db.volume_destroy(context.get_admin_context(), - volume_id) - - def test_create_group_snapshot_with_no_body(self): - # omit body from the request - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, None) - - @mock.patch.object(group_api.API, 'create_group_snapshot', - side_effect=exception.InvalidGroupSnapshot( - reason='Invalid group snapshot')) - def test_create_with_invalid_group_snapshot(self, mock_create_group_snap): - body = {"group_snapshot": {"name": "group_snapshot1", - "description": - "Group Snapshot 1", - "group_id": self.group.id}} - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - @mock.patch.object(group_api.API, 'create_group_snapshot', - side_effect=exception.GroupSnapshotNotFound( - group_snapshot_id='invalid_id')) - def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap): - body = {"group_snapshot": {"name": "group_snapshot1", - "description": - "Group Snapshot 1", - "group_id": self.group.id}} - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(exception.GroupSnapshotNotFound, - self.controller.create, - req, body) - - def test_create_group_snapshot_from_empty_group(self): - empty_group = utils.create_group( - self.context, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID]) - body = {"group_snapshot": {"name": "group_snapshot1", - "description": - "Group Snapshot 1", - "group_id": empty_group.id}} - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - empty_group.destroy() - - def test_delete_group_snapshot_available(self): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - status=fields.GroupSnapshotStatus.AVAILABLE) - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, group_snapshot.id), - version=GROUP_MICRO_VERSION) - res_dict = self.controller.delete(req, group_snapshot.id) - - group_snapshot = objects.GroupSnapshot.get_by_id(self.context, - group_snapshot.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupSnapshotStatus.DELETING, - group_snapshot.status) - - group_snapshot.destroy() - - def test_delete_group_snapshot_available_used_as_source(self): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - status=fields.GroupSnapshotStatus.AVAILABLE) - - group2 = utils.create_group( - self.context, status='creating', - group_snapshot_id=group_snapshot.id, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID],) - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, group_snapshot.id), - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, - req, group_snapshot.id) - - group_snapshot.destroy() - group2.destroy() - - def test_delete_group_snapshot_with_group_snapshot_NotFound(self): - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - version=GROUP_MICRO_VERSION) - self.assertRaises(exception.GroupSnapshotNotFound, - self.controller.delete, - req, fake.WILL_NOT_BE_FOUND_ID) - - def test_delete_group_snapshot_with_invalid_group_snapshot(self): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - status='invalid') - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % - (fake.PROJECT_ID, group_snapshot.id), - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, - req, group_snapshot.id) - - group_snapshot.destroy() - - @ddt.data(('3.11', 'fake_snapshot_001', - fields.GroupSnapshotStatus.AVAILABLE, - exception.VersionNotFoundForAPIMethod), - ('3.18', 'fake_snapshot_001', - fields.GroupSnapshotStatus.AVAILABLE, - exception.VersionNotFoundForAPIMethod), - ('3.19', 'fake_snapshot_001', - fields.GroupSnapshotStatus.AVAILABLE, - exception.GroupSnapshotNotFound)) - @ddt.unpack - def test_reset_group_snapshot_status_illegal(self, version, - group_snapshot_id, - status, exceptions): - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % - (fake.PROJECT_ID, group_snapshot_id), - version=version) - body = {"reset_status": { - "status": status - }} - self.assertRaises(exceptions, - self.controller.reset_status, - req, group_snapshot_id, body) - - def test_reset_group_snapshot_status_invalid_status(self): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - status=fields.GroupSnapshotStatus.CREATING) - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % - (fake.PROJECT_ID, group_snapshot.id), - version='3.19') - body = {"reset_status": { - "status": "invalid_test_status" - }} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.reset_status, - req, group_snapshot.id, body) - group_snapshot.destroy() - - def test_reset_group_snapshot_status(self): - group_snapshot = utils.create_group_snapshot( - self.context, - group_id=self.group.id, - status=fields.GroupSnapshotStatus.CREATING) - req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % - (fake.PROJECT_ID, group_snapshot.id), - version='3.19') - body = {"reset_status": { - "status": fields.GroupSnapshotStatus.AVAILABLE - }} - response = self.controller.reset_status(req, group_snapshot.id, - body) - - g_snapshot = objects.GroupSnapshot.get_by_id(self.context, - group_snapshot.id) - self.assertEqual(http_client.ACCEPTED, response.status_int) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - g_snapshot.status) - group_snapshot.destroy() diff --git a/cinder/tests/unit/api/v3/test_group_specs.py b/cinder/tests/unit/api/v3/test_group_specs.py deleted file mode 100644 index 4c15d75b6..000000000 --- a/cinder/tests/unit/api/v3/test_group_specs.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2017 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import webob - -from cinder import context -from cinder import db -from cinder import rpc -from cinder import test - -from cinder.api.v3 import group_specs as v3_group_specs -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - -GROUP_TYPE_MICRO_VERSION = '3.11' - -fake_group_specs = { - 'key1': 'value1', - 'key2': 'value2' -} - -create_fake_group_specs = { - 'group_specs': { - 'key1': 'value1', - 'key2': 'value2' - } -} - -update_fake_group_specs = { - 'id': 'any_string' -} - -incorrect_fake_group_specs = { - 'group_specs': { - 'key#': 'value1', - 'key2': 'value2' - } -} - - -class GroupSpecsTestCase(test.TestCase): - """test cases for the group specs API""" - - def setUp(self): - super(GroupSpecsTestCase, self).setUp() - self.controller = v3_group_specs.GroupTypeSpecsController() - self.ctxt = context.RequestContext( - user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - - @mock.patch.object(db, 'group_type_get', return_value={}) - @mock.patch.object(db, 'group_type_specs_get', - return_value=fake_group_specs) - def test_group_types_index(self, - mock_group_type_specs_get, - mock_group_type_get): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res_dict = self.controller.index(req, fake.GROUP_ID) - group_specs_dict = res_dict['group_specs'] - mock_group_type_specs_get.assert_called() - self.assertEqual('value1', group_specs_dict['key1']) - self.assertEqual('value2', group_specs_dict['key2']) - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_get', return_value={}) - @mock.patch.object(db, 'group_type_specs_update_or_create', - return_value={}) - def test_group_types_create(self, - mock_update_or_create, - mock_group_type_get, - mock_rpc_notifier): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.controller.create(req, fake.GROUP_ID, create_fake_group_specs) - self.assertTrue(mock_rpc_notifier.called) - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_get', return_value={}) - @mock.patch.object(db, 'group_type_specs_get', - return_value=fake_group_specs) - @mock.patch.object(db, 'group_type_specs_update_or_create', - return_value={}) - def test_group_types_update(self, - mock_update_or_create, - mock_typ_specs_get, - mock_group_type_get, - mock_rpc_notifier): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.controller.update(req, - fake.GROUP_TYPE_ID, - 'id', - update_fake_group_specs) - self.assertTrue(mock_rpc_notifier.called) - - @mock.patch.object(db, 'group_type_specs_get', - return_value=fake_group_specs) - @mock.patch.object(db, 'group_type_get', return_value={}) - def test_group_types_show(self, - mock_group_type_get, - mock_fake_group_specs): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - res_dict = self.controller.show(req, fake.GROUP_TYPE_ID, 'key1') - self.assertEqual('value1', res_dict['key1']) - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_specs_delete', return_value={}) - @mock.patch.object(db, 'group_type_get', return_value={}) - def test_group_types_delete(self, - mock_group_type_get, - mock_group_spec_delete, - rpc_notifier_mock): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.controller.delete(req, fake.GROUP_TYPE_ID, 'key1') - self.assertTrue(rpc_notifier_mock.called) - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_specs_update_or_create', - return_value={}) - def test_check_type_should_raise_exception(self, - mock_db_update_or_create, - mock_rpc_notifier): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.create, - req, - fake.GROUP_ID, - create_fake_group_specs) - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_get', return_value={}) - def test_delete_should_raise_exception(self, - mock_group_type_get, - mock_get_notifier): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.delete, - req, - fake.GROUP_TYPE_ID, - 'key1') - - @mock.patch.object(db, 'group_type_get', return_value={}) - def test_update_should_raise_exceptions(self, mock_group_type_get): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, - fake.GROUP_TYPE_ID, - 'id') - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, fake.GROUP_TYPE_ID, 'id', fake_group_specs) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, fake.GROUP_TYPE_ID, 'key1', fake_group_specs) - - @mock.patch.object(db, 'group_type_specs_get', - return_value=fake_group_specs) - @mock.patch.object(db, 'group_type_get', return_value={}) - def test_show_should_raise_exception(self, - mock_group_type_get, - mock_group_type_specs_get): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, - req, - fake.GROUP_TYPE_ID, - 'key') - - @mock.patch.object(rpc, 'get_notifier') - @mock.patch.object(db, 'group_type_get', return_value={}) - @mock.patch.object(db, 'group_type_specs_update_or_create', - return_value={}) - def test_check_key_name_should_raise_exception(self, - mock_update_or_create, - mock_group_type_get, - mock_rpc_notifier): - req = fakes.HTTPRequest.blank('v3/%s/group_specs' % - fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, fake.GROUP_ID, incorrect_fake_group_specs) diff --git a/cinder/tests/unit/api/v3/test_group_types.py b/cinder/tests/unit/api/v3/test_group_types.py deleted file mode 100644 index 48ea6a6f4..000000000 --- a/cinder/tests/unit/api/v3/test_group_types.py +++ /dev/null @@ -1,640 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -import mock -from oslo_utils import strutils -from oslo_utils import timeutils -import six -import webob - -import cinder.api.common as common -from cinder.api.v3 import group_specs as v3_group_specs -from cinder.api.v3 import group_types as v3_group_types -from cinder.api.v3.views import group_types as views_types -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.volume import group_types - -GROUP_TYPE_MICRO_VERSION = '3.11' -IN_USE_GROUP_TYPE = fake.GROUP_TYPE3_ID - - -def stub_group_type(id): - specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5" - } - return dict( - id=id, - name='group_type_%s' % six.text_type(id), - description='group_type_desc_%s' % six.text_type(id), - group_specs=specs, - ) - - -def return_group_types_get_all_types(context, filters=None, marker=None, - limit=None, sort_keys=None, - sort_dirs=None, offset=None, - list_result=False): - result = dict(group_type_1=stub_group_type(1), - group_type_2=stub_group_type(2), - group_type_3=stub_group_type(3) - ) - if list_result: - return list(result.values()) - return result - - -def return_empty_group_types_get_all_types(context, filters=None, marker=None, - limit=None, sort_keys=None, - sort_dirs=None, offset=None, - list_result=False): - if list_result: - return [] - return {} - - -def return_group_types_get_group_type(context, id): - if id == fake.WILL_NOT_BE_FOUND_ID: - raise exception.GroupTypeNotFound(group_type_id=id) - return stub_group_type(id) - - -def return_group_types_get_default(): - return stub_group_type(1) - - -def return_group_types_get_default_not_found(): - return {} - - -def return_group_types_with_groups_destroy(context, id): - if id == IN_USE_GROUP_TYPE: - raise exception.GroupTypeInUse(group_type_id=id) - - -@ddt.ddt -class GroupTypesApiTest(test.TestCase): - - def _create_group_type(self, group_type_name, group_specs=None, - is_public=True, projects=None): - return group_types.create(self.ctxt, group_type_name, group_specs, - is_public, projects).get('id') - - def setUp(self): - super(GroupTypesApiTest, self).setUp() - self.controller = v3_group_types.GroupTypesController() - self.specs_controller = v3_group_specs.GroupTypeSpecsController() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID, - project_id=fake.PROJECT2_ID, - is_admin=False) - self.type_id1 = self._create_group_type('group_type1', - {'key1': 'value1'}) - self.type_id2 = self._create_group_type('group_type2', - {'key2': 'value2'}) - self.type_id3 = self._create_group_type('group_type3', - {'key3': 'value3'}, False, - [fake.PROJECT_ID]) - self.type_id0 = group_types.get_default_cgsnapshot_type()['id'] - - @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', - 'y', 'yes') - @mock.patch.object(group_types, "get_group_type_by_name") - @mock.patch.object(group_types, "create") - @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") - @mock.patch("cinder.api.views.types.ViewBuilder.show") - def test_create_group_type_with_valid_is_public_in_string( - self, is_public, mock_show, mock_cache_resource, - mock_create, mock_get): - boolean_is_public = strutils.bool_from_string(is_public) - req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - - body = {"group_type": {"is_public": is_public, "name": "group_type1", - "description": None}} - self.controller.create(req, body) - mock_create.assert_called_once_with( - self.ctxt, 'group_type1', {}, - boolean_is_public, description=None) - - @ddt.data(fake.GROUP_TYPE_ID, IN_USE_GROUP_TYPE) - def test_group_type_destroy(self, grp_type_id): - grp_type = {'id': grp_type_id, 'name': 'grp' + grp_type_id} - self.mock_object(group_types, 'get_group_type', - return_value=grp_type) - self.mock_object(group_types, 'destroy', - return_group_types_with_groups_destroy) - mock_notify_info = self.mock_object( - v3_group_types.GroupTypesController, - '_notify_group_type_info') - mock_notify_error = self.mock_object( - v3_group_types.GroupTypesController, - '_notify_group_type_error') - req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % ( - fake.PROJECT_ID, grp_type_id), - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - if grp_type_id == IN_USE_GROUP_TYPE: - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete, - req, grp_type_id) - mock_notify_error.assert_called_once_with( - self.ctxt, 'group_type.delete', mock.ANY, - group_type=grp_type) - else: - self.controller.delete(req, grp_type_id) - mock_notify_info.assert_called_once_with( - self.ctxt, 'group_type.delete', grp_type) - - def test_group_types_index(self): - self.mock_object(group_types, 'get_all_group_types', - return_group_types_get_all_types) - - req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, - use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - res_dict = self.controller.index(req) - - self.assertEqual(3, len(res_dict['group_types'])) - - expected_names = ['group_type_1', 'group_type_2', 'group_type_3'] - actual_names = map(lambda e: e['name'], res_dict['group_types']) - self.assertEqual(set(expected_names), set(actual_names)) - for entry in res_dict['group_types']: - self.assertEqual('value1', entry['group_specs']['key1']) - - def test_group_types_index_no_data(self): - self.mock_object(group_types, 'get_all_group_types', - return_empty_group_types_get_all_types) - - req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - res_dict = self.controller.index(req) - - self.assertEqual(0, len(res_dict['group_types'])) - - def test_group_types_index_with_limit(self): - req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' % - fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(1, len(res['group_types'])) - self.assertEqual(self.type_id3, res['group_types'][0]['id']) - - expect_next_link = ('http://localhost/v3/%s/group_types?limit=1' - '&marker=%s' % - (fake.PROJECT_ID, res['group_types'][0]['id'])) - self.assertEqual(expect_next_link, res['group_type_links'][0]['href']) - - def test_group_types_index_with_offset(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?offset=1' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(3, len(res['group_types'])) - - def test_group_types_index_with_offset_out_of_range(self): - url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.index, req) - - def test_group_types_index_with_limit_and_offset(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(2, len(res['group_types'])) - self.assertEqual(self.type_id2, res['group_types'][0]['id']) - self.assertEqual(self.type_id1, res['group_types'][1]['id']) - - def test_group_types_index_with_limit_and_marker(self): - req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' - '&marker=%s' % - (fake.PROJECT_ID, - self.type_id2), - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(1, len(res['group_types'])) - self.assertEqual(self.type_id1, res['group_types'][0]['id']) - - def test_group_types_index_with_valid_filter(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?is_public=True' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(4, len(res['group_types'])) - self.assertEqual(self.type_id3, res['group_types'][0]['id']) - self.assertEqual(self.type_id2, res['group_types'][1]['id']) - self.assertEqual(self.type_id1, res['group_types'][2]['id']) - self.assertEqual(self.type_id0, res['group_types'][3]['id']) - - def test_group_types_index_with_invalid_filter(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1), - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - - self.assertEqual(4, len(res['group_types'])) - - def test_group_types_index_with_sort_keys(self): - req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' % - fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id0, self.type_id1, self.type_id2, - self.type_id3] - expect_result.sort(reverse=True) - - self.assertEqual(4, len(res['group_types'])) - self.assertEqual(expect_result[0], res['group_types'][0]['id']) - self.assertEqual(expect_result[1], res['group_types'][1]['id']) - self.assertEqual(expect_result[2], res['group_types'][2]['id']) - self.assertEqual(expect_result[3], res['group_types'][3]['id']) - - def test_group_types_index_with_sort_and_limit(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id0, self.type_id1, self.type_id2, - self.type_id3] - expect_result.sort(reverse=True) - - self.assertEqual(2, len(res['group_types'])) - self.assertEqual(expect_result[0], res['group_types'][0]['id']) - self.assertEqual(expect_result[1], res['group_types'][1]['id']) - - def test_group_types_index_with_sort_keys_and_sort_dirs(self): - req = fakes.HTTPRequest.blank( - '/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - res = self.controller.index(req) - expect_result = [self.type_id0, self.type_id1, self.type_id2, - self.type_id3] - expect_result.sort() - - self.assertEqual(4, len(res['group_types'])) - self.assertEqual(expect_result[0], res['group_types'][0]['id']) - self.assertEqual(expect_result[1], res['group_types'][1]['id']) - self.assertEqual(expect_result[2], res['group_types'][2]['id']) - self.assertEqual(expect_result[3], res['group_types'][3]['id']) - - @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', - 'y', 'yes') - @mock.patch.object(group_types, "get_group_type") - @mock.patch.object(group_types, "update") - @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") - @mock.patch("cinder.api.views.types.ViewBuilder.show") - def test_update_group_type_with_valid_is_public_in_string( - self, is_public, mock_show, mock_cache_resource, - mock_update, mock_get): - boolean_is_public = strutils.bool_from_string(is_public) - type_id = six.text_type(uuid.uuid4()) - req = fakes.HTTPRequest.blank( - '/v3/%s/types/%s' % (fake.PROJECT_ID, type_id), - version=GROUP_TYPE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - body = {"group_type": {"is_public": is_public, "name": "group_type1"}} - self.controller.update(req, type_id, body) - mock_update.assert_called_once_with( - self.ctxt, type_id, 'group_type1', None, - is_public=boolean_is_public) - - def test_group_types_show(self): - self.mock_object(group_types, 'get_group_type', - return_group_types_get_group_type) - - type_id = six.text_type(uuid.uuid4()) - req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID - + type_id, - version=GROUP_TYPE_MICRO_VERSION) - res_dict = self.controller.show(req, type_id) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(type_id, res_dict['group_type']['id']) - type_name = 'group_type_' + type_id - self.assertEqual(type_name, res_dict['group_type']['name']) - - def test_group_types_show_pre_microversion(self): - self.mock_object(group_types, 'get_group_type', - return_group_types_get_group_type) - - type_id = six.text_type(uuid.uuid4()) - req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID - + type_id, - version='3.5') - - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.show, req, type_id) - - def test_group_types_show_not_found(self): - self.mock_object(group_types, 'get_group_type', - return_group_types_get_group_type) - - req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - version=GROUP_TYPE_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - req, fake.WILL_NOT_BE_FOUND_ID) - - def test_get_default(self): - self.mock_object(group_types, 'get_default_group_type', - return_group_types_get_default) - req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % - fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.method = 'GET' - res_dict = self.controller.show(req, 'default') - self.assertEqual(1, len(res_dict)) - self.assertEqual('group_type_1', res_dict['group_type']['name']) - self.assertEqual('group_type_desc_1', - res_dict['group_type']['description']) - - def test_get_default_not_found(self): - self.mock_object(group_types, 'get_default_group_type', - return_group_types_get_default_not_found) - req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % - fake.PROJECT_ID, - version=GROUP_TYPE_MICRO_VERSION) - req.method = 'GET' - - self.assertRaises(webob.exc.HTTPNotFound, - self.controller.show, req, 'default') - - def test_view_builder_show(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v3", - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.show(request, raw_group_type) - - self.assertIn('group_type', output) - expected_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42, - ) - self.assertDictEqual(expected_group_type, output['group_type']) - - def test_view_builder_show_admin(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.show(request, raw_group_type) - - self.assertIn('group_type', output) - expected_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - group_specs={}, - id=42, - ) - self.assertDictEqual(expected_group_type, output['group_type']) - - def __test_view_builder_show_qos_specs_id_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[False, True]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v3", - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.show(request, raw_group_type) - - self.assertIn('group_type', output) - expected_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42, - ) - self.assertDictEqual(expected_group_type, output['group_type']) - - def test_view_builder_show_group_specs_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[True, False]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v3", - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.show(request, raw_group_type) - - self.assertIn('group_type', output) - expected_group_type = dict( - name='new_type', - description='new_type_desc', - group_specs={}, - is_public=True, - id=42, - ) - self.assertDictEqual(expected_group_type, output['group_type']) - - def test_view_builder_show_pass_all_policy(self): - with mock.patch.object(common, - 'validate_policy', - side_effect=[True, True]): - view_builder = views_types.ViewBuilder() - now = timeutils.utcnow().isoformat() - raw_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42, - ) - - request = fakes.HTTPRequest.blank("/v3", - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.show(request, raw_group_type) - - self.assertIn('group_type', output) - expected_group_type = dict( - name='new_type', - description='new_type_desc', - group_specs={}, - is_public=True, - id=42, - ) - self.assertDictEqual(expected_group_type, output['group_type']) - - def test_view_builder_list(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_group_types = [] - for i in range(0, 10): - raw_group_types.append( - dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42 + i - ) - ) - - request = fakes.HTTPRequest.blank("/v3", - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.index(request, raw_group_types) - - self.assertIn('group_types', output) - for i in range(0, 10): - expected_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - id=42 + i - ) - self.assertDictEqual(expected_group_type, - output['group_types'][i]) - - def test_view_builder_list_admin(self): - view_builder = views_types.ViewBuilder() - - now = timeutils.utcnow().isoformat() - raw_group_types = [] - for i in range(0, 10): - raw_group_types.append( - dict( - name='new_type', - description='new_type_desc', - is_public=True, - deleted=False, - created_at=now, - updated_at=now, - group_specs={}, - deleted_at=None, - id=42 + i - ) - ) - - request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, - version=GROUP_TYPE_MICRO_VERSION) - output = view_builder.index(request, raw_group_types) - - self.assertIn('group_types', output) - for i in range(0, 10): - expected_group_type = dict( - name='new_type', - description='new_type_desc', - is_public=True, - group_specs={}, - id=42 + i - ) - self.assertDictEqual(expected_group_type, - output['group_types'][i]) - - def test_check_policy(self): - self.controller._check_policy(self.ctxt) - - self.assertRaises(exception.PolicyNotAuthorized, - self.controller._check_policy, - self.user_ctxt) - - self.specs_controller._check_policy(self.ctxt) - - self.assertRaises(exception.PolicyNotAuthorized, - self.specs_controller._check_policy, - self.user_ctxt) diff --git a/cinder/tests/unit/api/v3/test_groups.py b/cinder/tests/unit/api/v3/test_groups.py deleted file mode 100644 index d89fddb46..000000000 --- a/cinder/tests/unit/api/v3/test_groups.py +++ /dev/null @@ -1,1322 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for group code. -""" - -import ddt -import mock -from six.moves import http_client -import webob - -from cinder.api.v3 import groups as v3_groups -from cinder import context -from cinder import db -from cinder import exception -import cinder.group -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v3 import fakes as v3_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -from cinder.volume import api as volume_api - -GROUP_MICRO_VERSION = '3.13' -GROUP_FROM_SRC_MICRO_VERSION = '3.14' -GROUP_REPLICATION_MICRO_VERSION = '3.38' -INVALID_GROUP_REPLICATION_MICRO_VERSION = '3.37' - - -@ddt.ddt -class GroupsAPITestCase(test.TestCase): - """Test Case for groups API.""" - - def setUp(self): - super(GroupsAPITestCase, self).setUp() - self.controller = v3_groups.GroupsController() - self.group_api = cinder.group.API() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - self.volume_type1 = self._create_volume_type(id=fake.VOLUME_TYPE_ID) - self.group1 = self._create_group() - self.group2 = self._create_group() - self.group3 = self._create_group(ctxt=self.user_ctxt) - self.addCleanup(self._cleanup) - - def _cleanup(self): - self.group1.destroy() - self.group2.destroy() - self.group3.destroy() - db.volume_type_destroy(self.ctxt, self.volume_type1.id) - - def _create_group( - self, - ctxt=None, - name='test_group', - description='this is a test group', - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='az1', - host='fakehost', - status=fields.GroupStatus.CREATING, - replication_status=fields.ReplicationStatus.DISABLED, - **kwargs): - """Create a group object.""" - ctxt = ctxt or self.ctxt - group = objects.Group(ctxt) - group.user_id = fake.USER_ID - group.project_id = fake.PROJECT_ID - group.availability_zone = availability_zone - group.name = name - group.description = description - group.group_type_id = group_type_id - group.volume_type_ids = volume_type_ids - group.host = host - group.status = status - group.replication_status = replication_status - group.update(kwargs) - group.create() - return group - - def _create_volume_type( - self, - ctxt=None, - id=fake.VOLUME_TYPE_ID, - name='test_volume_type', - description='this is a test volume type', - extra_specs={"test_key": "test_val"}, - testcase_instance=None, - **kwargs): - """Create a volume type.""" - ctxt = ctxt or self.ctxt - vol_type = utils.create_volume_type( - ctxt, - testcase_instance=testcase_instance, - id=id, - name=name, - description=description, - extra_specs=extra_specs, - **kwargs) - return vol_type - - @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') - @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') - def test_show_group(self, mock_vol_get_all_by_group, - mock_vol_type_get_all_by_group): - volume_objs = [objects.Volume(context=self.ctxt, id=i) - for i in [fake.VOLUME_ID]] - volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) - mock_vol_get_all_by_group.return_value = volumes - - vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) - for i in [fake.VOLUME_TYPE_ID]] - vol_types = objects.VolumeTypeList(context=self.ctxt, - objects=vol_type_objs) - mock_vol_type_get_all_by_group.return_value = vol_types - - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - res_dict = self.controller.show(req, self.group1.id) - - self.assertEqual(1, len(res_dict)) - self.assertEqual('az1', - res_dict['group']['availability_zone']) - self.assertEqual('this is a test group', - res_dict['group']['description']) - self.assertEqual('test_group', - res_dict['group']['name']) - self.assertEqual('creating', - res_dict['group']['status']) - self.assertEqual([fake.VOLUME_TYPE_ID], - res_dict['group']['volume_types']) - - @ddt.data(('3.24', False), ('3.24', True), ('3.25', False), ('3.25', True)) - @ddt.unpack - @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') - @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') - def test_list_group_with_list_volume(self, version, has_list_volume, - mock_vol_get_all_by_group, - mock_vol_type_get_all_by_group): - volume_objs = [objects.Volume(context=self.ctxt, id=i) - for i in [fake.VOLUME_ID]] - volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) - mock_vol_get_all_by_group.return_value = volumes - - vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) - for i in [fake.VOLUME_TYPE_ID]] - vol_types = objects.VolumeTypeList(context=self.ctxt, - objects=vol_type_objs) - mock_vol_type_get_all_by_group.return_value = vol_types - - if has_list_volume: - req = fakes.HTTPRequest.blank( - '/v3/%s/groups/detail?list_volume=True' % fake.PROJECT_ID, - version=version) - else: - req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' % - fake.PROJECT_ID, - version=version) - res_dict = self.controller.detail(req) - - # If the microversion >= 3.25 and "list_volume=True", "volumes" should - # be contained in the response body. Else,"volumes" should not be - # contained in the response body. - self.assertEqual(3, len(res_dict['groups'])) - if (version, has_list_volume) == ('3.25', True): - self.assertEqual([fake.VOLUME_ID], - res_dict['groups'][0]['volumes']) - else: - self.assertIsNone(res_dict['groups'][0].get('volumes', None)) - - # "volumes" should not be contained in the response body when list - # groups without detail. - res_dict = self.controller.index(req) - self.assertIsNone(res_dict['groups'][0].get('volumes', None)) - - @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') - @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') - def test_show_group_with_list_volume(self, mock_vol_get_all_by_group, - mock_vol_type_get_all_by_group): - volume_objs = [objects.Volume(context=self.ctxt, id=i) - for i in [fake.VOLUME_ID]] - volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) - mock_vol_get_all_by_group.return_value = volumes - - vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) - for i in [fake.VOLUME_TYPE_ID]] - vol_types = objects.VolumeTypeList(context=self.ctxt, - objects=vol_type_objs) - mock_vol_type_get_all_by_group.return_value = vol_types - - # If the microversion >= 3.25 and "list_volume=True", "volumes" should - # be contained in the response body. - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s?list_volume=True' % - (fake.PROJECT_ID, self.group1.id), - version='3.25') - res_dict = self.controller.show(req, self.group1.id) - self.assertEqual(1, len(res_dict)) - self.assertEqual([fake.VOLUME_ID], - res_dict['group']['volumes']) - - # If the microversion >= 3.25 but "list_volume" is missing, "volumes" - # should not be contained in the response body. - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % - (fake.PROJECT_ID, self.group1.id), - version='3.25') - res_dict = self.controller.show(req, self.group1.id) - self.assertEqual(1, len(res_dict)) - self.assertIsNone(res_dict['group'].get('volumes', None)) - - # If the microversion < 3.25, "volumes" should not be contained in the - # response body. - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s?list_volume=True' % - (fake.PROJECT_ID, self.group1.id), - version='3.24') - res_dict = self.controller.show(req, self.group1.id) - self.assertEqual(1, len(res_dict)) - self.assertIsNone(res_dict['group'].get('volumes', None)) - - def test_show_group_with_group_NotFound(self): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - version=GROUP_MICRO_VERSION) - self.assertRaises(exception.GroupNotFound, self.controller.show, - req, fake.WILL_NOT_BE_FOUND_ID) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_group_list_with_general_filter(self, version, mock_update): - url = '/v3/%s/groups' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'group', - support_like) - - def test_list_groups_json(self): - self.group2.group_type_id = fake.GROUP_TYPE2_ID - # TODO(geguileo): One `volume_type_ids` gets sorted out make proper - # changes here - # self.group2.volume_type_ids = [fake.VOLUME_TYPE2_ID] - - self.group2.save() - - self.group3.group_type_id = fake.GROUP_TYPE3_ID - # TODO(geguileo): One `volume_type_ids` gets sorted out make proper - # changes here - # self.group3.volume_type_ids = [fake.VOLUME_TYPE3_ID] - self.group3.save() - - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(self.group3.id, - res_dict['groups'][0]['id']) - self.assertEqual('test_group', - res_dict['groups'][0]['name']) - self.assertEqual(self.group2.id, - res_dict['groups'][1]['id']) - self.assertEqual('test_group', - res_dict['groups'][1]['name']) - self.assertEqual(self.group1.id, - res_dict['groups'][2]['id']) - self.assertEqual('test_group', - res_dict['groups'][2]['name']) - - @ddt.data(False, True) - def test_list_groups_with_limit(self, is_detail): - url = '/v3/%s/groups?limit=1' % fake.PROJECT_ID - if is_detail: - url = '/v3/%s/groups/detail?limit=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) - - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(2, len(res_dict)) - self.assertEqual(1, len(res_dict['groups'])) - self.assertEqual(self.group3.id, - res_dict['groups'][0]['id']) - next_link = ( - 'http://localhost/v3/%s/groups?limit=' - '1&marker=%s' % - (fake.PROJECT_ID, res_dict['groups'][0]['id'])) - self.assertEqual(next_link, - res_dict['group_links'][0]['href']) - if is_detail: - self.assertIn('description', res_dict['groups'][0].keys()) - - @ddt.data(False, True) - def test_list_groups_with_offset(self, is_detail): - url = '/v3/%s/groups?offset=1' % fake.PROJECT_ID - if is_detail: - url = '/v3/%s/groups/detail?offset=1' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(2, len(res_dict['groups'])) - self.assertEqual(self.group2.id, - res_dict['groups'][0]['id']) - self.assertEqual(self.group1.id, - res_dict['groups'][1]['id']) - - @ddt.data(False, True) - def test_list_groups_with_offset_out_of_range(self, is_detail): - url = ('/v3/%s/groups?offset=234523423455454' % - fake.PROJECT_ID) - if is_detail: - url = ('/v3/%s/groups/detail?offset=234523423455454' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) - if is_detail: - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, - req) - else: - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, - req) - - @ddt.data(False, True) - def test_list_groups_with_limit_and_offset(self, is_detail): - url = '/v3/%s/groups?limit=2&offset=1' % fake.PROJECT_ID - if is_detail: - url = ('/v3/%s/groups/detail?limit=2&offset=1' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) - - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(2, len(res_dict)) - self.assertEqual(2, len(res_dict['groups'])) - self.assertEqual(self.group2.id, - res_dict['groups'][0]['id']) - self.assertEqual(self.group1.id, - res_dict['groups'][1]['id']) - if is_detail: - self.assertIn('description', res_dict['groups'][0].keys()) - - @ddt.data(False, True) - def test_list_groups_with_filter(self, is_detail): - # Create a group with user context - url = ('/v3/%s/groups?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.group3.id) - if is_detail: - url = ('/v3/%s/groups/detail?' - 'all_tenants=True&id=%s') % (fake.PROJECT_ID, - self.group3.id) - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION, - use_admin_context=True) - - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(1, len(res_dict['groups'])) - self.assertEqual(self.group3.id, - res_dict['groups'][0]['id']) - if is_detail: - self.assertIn('description', res_dict['groups'][0].keys()) - - @ddt.data(False, True) - def test_list_groups_with_sort(self, is_detail): - url = '/v3/%s/groups?sort=id:asc' % fake.PROJECT_ID - if is_detail: - url = ('/v3/%s/groups/detail?sort=id:asc' % - fake.PROJECT_ID) - req = fakes.HTTPRequest.blank(url, version=GROUP_MICRO_VERSION) - expect_result = [self.group1.id, self.group2.id, - self.group3.id] - expect_result.sort() - - if is_detail: - res_dict = self.controller.detail(req) - else: - res_dict = self.controller.index(req) - - self.assertEqual(1, len(res_dict)) - self.assertEqual(3, len(res_dict['groups'])) - self.assertEqual(expect_result[0], - res_dict['groups'][0]['id']) - self.assertEqual(expect_result[1], - res_dict['groups'][1]['id']) - self.assertEqual(expect_result[2], - res_dict['groups'][2]['id']) - if is_detail: - self.assertIn('description', res_dict['groups'][0].keys()) - - @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') - def test_list_groups_detail_json(self, mock_vol_type_get_all_by_group): - volume_type_ids = [fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID] - vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) - for i in volume_type_ids] - vol_types = objects.VolumeTypeList(context=self.ctxt, - objects=vol_type_objs) - mock_vol_type_get_all_by_group.return_value = vol_types - - # TODO(geguileo): One `volume_type_ids` gets sorted out make proper - # changes here - # self.group1.volume_type_ids = volume_type_ids - # self.group1.save() - # self.group2.volume_type_ids = volume_type_ids - # self.group2.save() - # self.group3.volume_type_ids = volume_type_ids - # self.group3.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' % - fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - res_dict = self.controller.detail(req) - - self.assertEqual(1, len(res_dict)) - index = 0 - for group in [self.group3, self.group2, self.group1]: - self.assertEqual(group.id, - res_dict['groups'][index]['id']) - self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID], - res_dict['groups'][index]['volume_types']) - self.assertEqual('test_group', - res_dict['groups'][index]['name']) - self.assertTrue({'availability_zone', 'description', - 'status'}.issubset( - set(res_dict['groups'][index].keys()))) - index += 1 - - @ddt.data(False, True) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_group_json(self, use_group_type_name, mock_validate): - # Create volume types and group type - vol_type = 'test' - vol_type_id = db.volume_type_create( - self.ctxt, - {'name': vol_type, 'extra_specs': {}}).get('id') - grp_type_name = 'test_grp_type' - grp_type = db.group_type_create( - self.ctxt, - {'name': grp_type_name, 'group_specs': {}}).get('id') - if use_group_type_name: - grp_type = grp_type_name - body = {"group": {"name": "group1", - "volume_types": [vol_type_id], - "group_type": grp_type, - "description": - "Group 1", }} - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - res_dict = self.controller.create(req, body) - - self.assertEqual(1, len(res_dict)) - self.assertIn('id', res_dict['group']) - self.assertTrue(mock_validate.called) - - group_id = res_dict['group']['id'] - objects.Group.get_by_id(self.ctxt, group_id) - - def test_create_group_with_no_body(self): - # omit body from the request - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, None) - - def test_delete_group_available(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": False}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, group.status) - - def test_delete_group_available_no_delete_volumes(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": False}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, - group.status) - - def test_delete_group_with_group_NotFound(self): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, - fake.WILL_NOT_BE_FOUND_ID), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": False}} - self.assertRaises(exception.GroupNotFound, - self.controller.delete_group, - req, fake.WILL_NOT_BE_FOUND_ID, body) - - def test_delete_group_with_invalid_group(self): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, - self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": False}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - def test_delete_group_invalid_delete_volumes(self): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, - self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, group.status) - - def test_delete_group_no_host(self): - self.group1.host = None - self.group1.status = fields.GroupStatus.ERROR - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, - self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - group = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - self.group1.id) - self.assertEqual(fields.GroupStatus.DELETED, group.status) - self.assertIsNone(group.host) - - def test_create_delete_group_update_quota(self): - name = 'mygroup' - description = 'group 1' - grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'group_type'} - fake_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_type'} - self.mock_object(db, 'volume_types_get_by_name_or_id', - return_value=[fake_type]) - self.mock_object(db, 'group_type_get', return_value=grp_type) - self.mock_object(self.group_api, '_cast_create_group') - self.mock_object(self.group_api, 'update_quota') - group = self.group_api.create(self.ctxt, name, description, - grp_type['id'], [fake_type['id']]) - self.group_api.update_quota.assert_called_once_with( - self.ctxt, group, 1) - - self.assertEqual(fields.GroupStatus.CREATING, group.status) - self.assertIsNone(group.host) - self.group_api.update_quota.reset_mock() - group.status = fields.GroupStatus.ERROR - self.group_api.delete(self.ctxt, group) - - self.group_api.update_quota.assert_called_once_with( - self.ctxt, group, -1, self.ctxt.project_id) - group = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - group.id) - self.assertEqual(fields.GroupStatus.DELETED, group.status) - - @mock.patch('cinder.group.api.API.create') - def test_create_group_failed_exceeded_quota(self, mock_group_create): - mock_group_create.side_effect = exception.GroupLimitExceeded(allowed=1) - name = 'group1' - body = {"group": {"group_type": fake.GROUP_TYPE_ID, - "volume_types": [fake.VOLUME_TYPE_ID], - "name": name, - "description": - "Group 1", }} - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - ex = self.assertRaises(exception.GroupLimitExceeded, - self.controller.create, - req, body) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, ex.code) - - def test_delete_group_with_invalid_body(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"invalid_request_element": {"delete-volumes": False}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - def test_delete_group_with_invalid_delete_volumes_value_in_body(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": "abcd"}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - def test_delete_group_with_empty_delete_volumes_value_in_body(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": ""}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - def test_delete_group_with_group_snapshot(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - g_snapshot = utils.create_group_snapshot(self.ctxt, self.group1.id) - - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - g_snapshot.destroy() - - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, group.status) - - def test_delete_group_delete_volumes(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - vol = utils.create_volume(self.ctxt, group_id=self.group1.id) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, group.status) - - vol.destroy() - - def test_delete_group_delete_volumes_with_attached_volumes(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - vol = utils.create_volume(self.ctxt, group_id=self.group1.id, - attach_status='attached') - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - vol.destroy() - - def test_delete_group_delete_volumes_with_snapshots(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - vol = utils.create_volume(self.ctxt, group_id=self.group1.id) - utils.create_snapshot(self.ctxt, vol.id) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.delete_group, - req, self.group1.id, body) - - vol.destroy() - - def test_delete_group_delete_volumes_with_deleted_snapshots(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - vol = utils.create_volume(self.ctxt, group_id=self.group1.id) - utils.create_snapshot(self.ctxt, vol.id, - status=fields.SnapshotStatus.DELETED, - deleted=True) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"delete": {"delete-volumes": True}} - res_dict = self.controller.delete_group( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertEqual(fields.GroupStatus.DELETING, group.status) - - vol.destroy() - - def test_create_group_failed_no_group_type(self): - name = 'group1' - body = {"group": {"volume_types": [fake.VOLUME_TYPE_ID], - "name": name, - "description": - "Group 1", }} - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, body) - - def test_create_group_failed_no_volume_types(self): - name = 'group1' - body = {"group": {"group_type": fake.GROUP_TYPE_ID, - "name": name, - "description": - "Group 1", }} - req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, - version=GROUP_MICRO_VERSION) - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, - req, body) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_update_group_success(self, mock_validate): - volume_type_id = fake.VOLUME_TYPE_ID - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.host = 'test_host' - # TODO(geguileo): One `volume_type_ids` gets sorted out make proper - # changes here - # self.group1.volume_type_ids = [volume_type_id] - self.group1.save() - - remove_volume = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - group_id=self.group1.id) - remove_volume2 = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - group_id=self.group1.id, - status='error') - remove_volume3 = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id, - group_id=self.group1.id, - status='error_deleting') - - self.assertEqual(fields.GroupStatus.AVAILABLE, - self.group1.status) - - group_volumes = db.volume_get_all_by_generic_group( - self.ctxt.elevated(), - self.group1.id) - group_vol_ids = [group_vol['id'] for group_vol in group_volumes] - self.assertIn(remove_volume.id, group_vol_ids) - self.assertIn(remove_volume2.id, group_vol_ids) - self.assertIn(remove_volume3.id, group_vol_ids) - - add_volume = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id) - add_volume2 = utils.create_volume( - self.ctxt, - volume_type_id=volume_type_id) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - name = 'newgroup' - description = 'New Group Description' - add_volumes = add_volume.id + "," + add_volume2.id - remove_volumes = ','.join( - [remove_volume.id, remove_volume2.id, remove_volume3.id]) - body = {"group": {"name": name, - "description": description, - "add_volumes": add_volumes, - "remove_volumes": remove_volumes, }} - res_dict = self.controller.update( - req, self.group1.id, body) - - group = objects.Group.get_by_id( - self.ctxt, self.group1.id) - self.assertEqual(http_client.ACCEPTED, res_dict.status_int) - self.assertTrue(mock_validate.called) - self.assertEqual(fields.GroupStatus.UPDATING, - group.status) - - remove_volume.destroy() - remove_volume2.destroy() - remove_volume3.destroy() - add_volume.destroy() - add_volume2.destroy() - - def test_update_group_add_volume_not_found(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"group": {"name": None, - "description": None, - "add_volumes": "fake-volume-uuid", - "remove_volumes": None, }} - - self.assertRaises(exception.InvalidVolume, - self.controller.update, - req, self.group1.id, body) - - def test_update_group_remove_volume_not_found(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"group": {"name": None, - "description": "new description", - "add_volumes": None, - "remove_volumes": "fake-volume-uuid", }} - - self.assertRaises(exception.InvalidVolume, - self.controller.update, - req, self.group1.id, body) - - def test_update_group_empty_parameters(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"group": {"name": None, - "description": None, - "add_volumes": None, - "remove_volumes": None, }} - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, self.group1.id, body) - - def test_update_group_add_volume_invalid_state(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - add_volume = utils.create_volume( - self.ctxt, - volume_type_id=fake.VOLUME_TYPE_ID, - status='wrong_status') - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - add_volumes = add_volume.id - body = {"group": {"name": "group1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - - self.assertRaises(exception.InvalidVolume, - self.controller.update, - req, self.group1.id, body) - - add_volume.destroy() - - def test_update_group_add_volume_invalid_volume_type(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - wrong_type = fake.VOLUME_TYPE2_ID - add_volume = utils.create_volume( - self.ctxt, - volume_type_id=wrong_type) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - add_volumes = add_volume.id - body = {"group": {"name": "group1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - - self.assertRaises(exception.InvalidVolume, - self.controller.update, - req, self.group1.id, body) - - add_volume.destroy() - - def test_update_group_add_volume_already_in_group(self): - self.group1.status = fields.GroupStatus.AVAILABLE - self.group1.save() - add_volume = utils.create_volume( - self.ctxt, - group_id=fake.GROUP2_ID) - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - add_volumes = add_volume.id - body = {"group": {"name": "group1", - "description": "", - "add_volumes": add_volumes, - "remove_volumes": None, }} - - self.assertRaises(exception.InvalidVolume, - self.controller.update, - req, self.group1.id, body) - - add_volume.destroy() - - @ddt.data(fields.GroupStatus.CREATING, fields.GroupStatus.UPDATING) - def test_update_group_invalid_state(self, status): - self.group1.status = status - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % - (fake.PROJECT_ID, self.group1.id), - version=GROUP_MICRO_VERSION) - body = {"group": {"name": "new name", - "description": None, - "add_volumes": None, - "remove_volumes": None, }} - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.update, - req, self.group1.id, body) - - @ddt.data(('3.11', 'fake_group_001', - fields.GroupStatus.AVAILABLE, - exception.VersionNotFoundForAPIMethod), - ('3.19', 'fake_group_001', - fields.GroupStatus.AVAILABLE, - exception.VersionNotFoundForAPIMethod), - ('3.20', 'fake_group_001', - fields.GroupStatus.AVAILABLE, - exception.GroupNotFound), - ('3.20', None, - 'invalid_test_status', - webob.exc.HTTPBadRequest), - ) - @ddt.unpack - def test_reset_group_status_illegal(self, version, group_id, - status, exceptions): - g_id = group_id or self.group2.id - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, g_id), - version=version) - body = {"reset_status": { - "status": status - }} - self.assertRaises(exceptions, - self.controller.reset_status, - req, g_id, body) - - def test_reset_group_status(self): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group2.id), - version='3.20') - body = {"reset_status": { - "status": fields.GroupStatus.AVAILABLE - }} - response = self.controller.reset_status(req, - self.group2.id, body) - - group = objects.Group.get_by_id(self.ctxt, self.group2.id) - self.assertEqual(http_client.ACCEPTED, response.status_int) - self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) - - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_create_group_from_src_snap(self, mock_validate): - self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) - - group = utils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID]) - volume = utils.create_volume( - self.ctxt, - group_id=group.id, - volume_type_id=fake.VOLUME_TYPE_ID) - group_snapshot = utils.create_group_snapshot( - self.ctxt, group_id=group.id, - group_type_id=group.group_type_id) - snapshot = utils.create_snapshot( - self.ctxt, - volume.id, - group_snapshot_id=group_snapshot.id, - status=fields.SnapshotStatus.AVAILABLE, - volume_type_id=volume.volume_type_id) - - test_grp_name = 'test grp' - body = {"create-from-src": {"name": test_grp_name, - "description": "Group 1", - "group_snapshot_id": group_snapshot.id}} - req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % - fake.PROJECT_ID, - version=GROUP_FROM_SRC_MICRO_VERSION) - res_dict = self.controller.create_from_src(req, body) - - self.assertIn('id', res_dict['group']) - self.assertEqual(test_grp_name, res_dict['group']['name']) - self.assertTrue(mock_validate.called) - - grp_ref = objects.Group.get_by_id( - self.ctxt.elevated(), res_dict['group']['id']) - - grp_ref.destroy() - snapshot.destroy() - volume.destroy() - group.destroy() - group_snapshot.destroy() - - def test_create_group_from_src_grp(self): - self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) - - source_grp = utils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID]) - volume = utils.create_volume( - self.ctxt, - group_id=source_grp.id, - volume_type_id=fake.VOLUME_TYPE_ID) - - test_grp_name = 'test cg' - body = {"create-from-src": {"name": test_grp_name, - "description": "Consistency Group 1", - "source_group_id": source_grp.id}} - req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % - fake.PROJECT_ID, - version=GROUP_FROM_SRC_MICRO_VERSION) - res_dict = self.controller.create_from_src(req, body) - - self.assertIn('id', res_dict['group']) - self.assertEqual(test_grp_name, res_dict['group']['name']) - - grp = objects.Group.get_by_id( - self.ctxt, res_dict['group']['id']) - grp.destroy() - volume.destroy() - source_grp.destroy() - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - def test_enable_replication(self, mock_rep_grp_type, mock_rep_vol_type): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.save() - body = {"enable_replication": {}} - response = self.controller.enable_replication(req, - self.group3.id, body) - - group = objects.Group.get_by_id(self.ctxt, self.group3.id) - self.assertEqual(202, response.status_int) - self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) - self.assertEqual(fields.ReplicationStatus.ENABLING, - group.replication_status) - - @ddt.data((True, False), (False, True), (False, False)) - @ddt.unpack - @mock.patch('cinder.volume.utils.is_replicated_spec') - @mock.patch('cinder.volume.utils.is_group_a_type') - def test_enable_replication_wrong_type(self, is_grp_rep_type, - is_vol_rep_type, - mock_rep_grp_type, - mock_rep_vol_type): - mock_rep_grp_type.return_value = is_grp_rep_type - mock_rep_vol_type.return_value = is_vol_rep_type - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.save() - body = {"enable_replication": {}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.enable_replication, - req, self.group3.id, body) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=False) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - def test_enable_replication_wrong_group_type(self, mock_rep_grp_type, - mock_rep_vol_type): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.save() - body = {"enable_replication": {}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.enable_replication, - req, self.group3.id, body) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.CREATING, - webob.exc.HTTPBadRequest), - (GROUP_REPLICATION_MICRO_VERSION, False, - fields.GroupStatus.AVAILABLE, - exception.GroupNotFound), - (INVALID_GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.AVAILABLE, - exception.VersionNotFoundForAPIMethod), - ) - @ddt.unpack - def test_enable_replication_negative(self, version, not_fake, - status, exceptions, - mock_rep_grp_type, mock_rep_vol_type): - if not_fake: - group_id = self.group3.id - else: - group_id = fake.GROUP_ID - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, group_id), - version=version) - if not_fake: - self.group3.status = status - self.group3.save() - body = {"enable_replication": {}} - self.assertRaises(exceptions, - self.controller.enable_replication, - req, group_id, body) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - def test_disable_replication(self, mock_rep_grp_type, mock_rep_vol_type): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.replication_status = fields.ReplicationStatus.ENABLED - self.group3.save() - body = {"disable_replication": {}} - response = self.controller.disable_replication(req, - self.group3.id, body) - - group = objects.Group.get_by_id(self.ctxt, self.group3.id) - self.assertEqual(202, response.status_int) - self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) - self.assertEqual(fields.ReplicationStatus.DISABLING, - group.replication_status) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.CREATING, - fields.ReplicationStatus.ENABLED, - webob.exc.HTTPBadRequest), - (GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.DISABLED, - webob.exc.HTTPBadRequest), - (GROUP_REPLICATION_MICRO_VERSION, False, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.DISABLED, - exception.GroupNotFound), - (INVALID_GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.ENABLED, - exception.VersionNotFoundForAPIMethod), - ) - @ddt.unpack - def test_disable_replication_negative(self, version, not_fake, - status, rep_status, exceptions, - mock_rep_grp_type, - mock_rep_vol_type): - if not_fake: - group_id = self.group3.id - else: - group_id = fake.GROUP_ID - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, group_id), - version=version) - if not_fake: - self.group3.status = status - self.group3.replication_status = rep_status - self.group3.save() - body = {"disable_replication": {}} - self.assertRaises(exceptions, - self.controller.disable_replication, - req, group_id, body) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - def test_failover_replication(self, mock_rep_grp_type, mock_rep_vol_type): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.replication_status = fields.ReplicationStatus.ENABLED - self.group3.save() - body = {"failover_replication": {}} - response = self.controller.failover_replication(req, - self.group3.id, body) - - group = objects.Group.get_by_id(self.ctxt, self.group3.id) - self.assertEqual(202, response.status_int) - self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) - self.assertEqual(fields.ReplicationStatus.FAILING_OVER, - group.replication_status) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.CREATING, - fields.ReplicationStatus.ENABLED, - webob.exc.HTTPBadRequest), - (GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.DISABLED, - webob.exc.HTTPBadRequest), - (GROUP_REPLICATION_MICRO_VERSION, False, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.DISABLED, - exception.GroupNotFound), - (INVALID_GROUP_REPLICATION_MICRO_VERSION, True, - fields.GroupStatus.AVAILABLE, - fields.ReplicationStatus.ENABLED, - exception.VersionNotFoundForAPIMethod), - ) - @ddt.unpack - def test_failover_replication_negative(self, version, not_fake, - status, rep_status, exceptions, - mock_rep_grp_type, - mock_rep_vol_type): - if not_fake: - group_id = self.group3.id - else: - group_id = fake.GROUP_ID - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, group_id), - version=version) - if not_fake: - self.group3.status = status - self.group3.replication_status = rep_status - self.group3.save() - body = {"failover_replication": {}} - self.assertRaises(exceptions, - self.controller.failover_replication, - req, group_id, body) - - @mock.patch('cinder.volume.utils.is_replicated_spec', - return_value=True) - @mock.patch('cinder.volume.utils.is_group_a_type', - return_value=True) - @mock.patch('cinder.volume.rpcapi.VolumeAPI.list_replication_targets') - def test_list_replication_targets(self, mock_list_rep_targets, - mock_rep_grp_type, mock_rep_vol_type): - req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % - (fake.PROJECT_ID, self.group3.id), - version=GROUP_REPLICATION_MICRO_VERSION) - targets = { - 'replication_targets': [ - {'backend_id': 'lvm_backend_1'} - ] - } - mock_list_rep_targets.return_value = targets - self.group3.status = fields.GroupStatus.AVAILABLE - self.group3.save() - body = {"list_replication_targets": {}} - response = self.controller.list_replication_targets( - req, self.group3.id, body) - - self.assertIn('replication_targets', response) - self.assertEqual('lvm_backend_1', - response['replication_targets'][0]['backend_id']) diff --git a/cinder/tests/unit/api/v3/test_limits.py b/cinder/tests/unit/api/v3/test_limits.py deleted file mode 100644 index 14aa726b5..000000000 --- a/cinder/tests/unit/api/v3/test_limits.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2017 Huawei Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import limits -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -@ddt.ddt -class LimitsControllerTest(test.TestCase): - def setUp(self): - super(LimitsControllerTest, self).setUp() - self.controller = limits.LimitsController() - - @ddt.data(('3.38', True), ('3.38', False), ('3.39', True), ('3.39', False)) - @mock.patch('cinder.quota.VolumeTypeQuotaEngine.get_project_quotas') - def test_get_limit_with_project_id(self, ver_project, mock_get_quotas): - max_ver, has_project = ver_project - req = fakes.HTTPRequest.blank('/v3/limits', use_admin_context=True) - if has_project: - req = fakes.HTTPRequest.blank( - '/v3/limits?project_id=%s' % fake.UUID1, - use_admin_context=True) - req.api_version_request = api_version.APIVersionRequest(max_ver) - - def get_project_quotas(context, project_id, quota_class=None, - defaults=True, usages=True): - if project_id == fake.UUID1: - return {"gigabytes": {'limit': 5}} - return {"gigabytes": {'limit': 10}} - mock_get_quotas.side_effect = get_project_quotas - - resp_dict = self.controller.index(req) - # if admin, only 3.39 and req contains project_id filter, cinder - # returns the specified project's quota. - if max_ver == '3.39' and has_project: - self.assertEqual( - 5, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) - else: - self.assertEqual( - 10, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) - - # if non-admin, cinder always returns self quota. - req = fakes.HTTPRequest.blank('/v3/limits', use_admin_context=False) - if has_project: - req = fakes.HTTPRequest.blank( - '/v3/limits?project_id=%s' % fake.UUID1, - use_admin_context=False) - req.api_version_request = api_version.APIVersionRequest(max_ver) - resp_dict = self.controller.index(req) - - self.assertEqual( - 10, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) diff --git a/cinder/tests/unit/api/v3/test_messages.py b/cinder/tests/unit/api/v3/test_messages.py deleted file mode 100644 index 06ad44b9b..000000000 --- a/cinder/tests/unit/api/v3/test_messages.py +++ /dev/null @@ -1,156 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from six.moves import http_client - -from cinder.api import extensions -from cinder.api.v3 import messages -from cinder import context -from cinder import exception -from cinder.message import api as message_api -from cinder.message import message_field -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v3 import fakes as v3_fakes - - -NS = '{http://docs.openstack.org/api/openstack-block-storage/3.0/content}' - - -@ddt.ddt -class MessageApiTest(test.TestCase): - def setUp(self): - super(MessageApiTest, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = messages.MessagesController(self.ext_mgr) - - self.maxDiff = None - self.ctxt = context.RequestContext('admin', 'fakeproject', True) - - def _expected_message_from_controller(self, id): - message = v3_fakes.fake_message(id) - links = [ - {'href': 'http://localhost/v3/fakeproject/messages/%s' % id, - 'rel': 'self'}, - {'href': 'http://localhost/fakeproject/messages/%s' % id, - 'rel': 'bookmark'}, - ] - return { - 'message': { - 'id': message.get('id'), - 'user_message': "%s:%s" % ( - message_field.translate_action(message.get('action_id')), - message_field.translate_detail(message.get('detail_id'))), - 'request_id': message.get('request_id'), - 'event_id': message.get('event_id'), - 'created_at': message.get('created_at'), - 'message_level': message.get('message_level'), - 'guaranteed_until': message.get('expires_at'), - 'links': links, - } - } - - def test_show(self): - self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) - - req = fakes.HTTPRequest.blank( - '/v3/messages/%s' % fakes.FAKE_UUID, - version=messages.MESSAGES_BASE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - - res_dict = self.controller.show(req, fakes.FAKE_UUID) - - ex = self._expected_message_from_controller(fakes.FAKE_UUID) - self.assertEqual(ex, res_dict) - - def test_show_not_found(self): - self.mock_object(message_api.API, 'get', - side_effect=exception.MessageNotFound( - message_id=fakes.FAKE_UUID)) - - req = fakes.HTTPRequest.blank( - '/v3/messages/%s' % fakes.FAKE_UUID, - version=messages.MESSAGES_BASE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - - self.assertRaises(exception.MessageNotFound, self.controller.show, - req, fakes.FAKE_UUID) - - def test_show_pre_microversion(self): - self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) - - req = fakes.HTTPRequest.blank('/v3/messages/%s' % fakes.FAKE_UUID, - version='3.0') - req.environ['cinder.context'] = self.ctxt - - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.show, req, fakes.FAKE_UUID) - - def test_delete(self): - self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) - self.mock_object(message_api.API, 'delete') - - req = fakes.HTTPRequest.blank( - '/v3/messages/%s' % fakes.FAKE_UUID, - version=messages.MESSAGES_BASE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - - resp = self.controller.delete(req, fakes.FAKE_UUID) - - self.assertEqual(http_client.NO_CONTENT, resp.status_int) - self.assertTrue(message_api.API.delete.called) - - def test_delete_not_found(self): - self.mock_object(message_api.API, 'get', - side_effect=exception.MessageNotFound( - message_id=fakes.FAKE_UUID)) - - req = fakes.HTTPRequest.blank( - '/v3/messages/%s' % fakes.FAKE_UUID, - version=messages.MESSAGES_BASE_MICRO_VERSION) - - self.assertRaises(exception.MessageNotFound, self.controller.delete, - req, fakes.FAKE_UUID) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_message_list_with_general_filter(self, version, mock_update): - url = '/v3/%s/messages' % fakes.FAKE_UUID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'message', - support_like) - - def test_index(self): - self.mock_object(message_api.API, 'get_all', - return_value=[v3_fakes.fake_message(fakes.FAKE_UUID)]) - req = fakes.HTTPRequest.blank( - '/v3/messages/%s' % fakes.FAKE_UUID, - version=messages.MESSAGES_BASE_MICRO_VERSION) - req.environ['cinder.context'] = self.ctxt - - res_dict = self.controller.index(req) - - ex = self._expected_message_from_controller(fakes.FAKE_UUID) - expected = { - 'messages': [ex['message']] - } - self.assertDictEqual(expected, res_dict) diff --git a/cinder/tests/unit/api/v3/test_resource_filters.py b/cinder/tests/unit/api/v3/test_resource_filters.py deleted file mode 100644 index e514f949a..000000000 --- a/cinder/tests/unit/api/v3/test_resource_filters.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for resource filters API. -""" - -import ddt -import six - -from cinder.api import common -from cinder.api.v3 import resource_filters as v3_filters -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - -FILTERS_MICRO_VERSION = '3.33' - - -@ddt.ddt -class ResourceFiltersAPITestCase(test.TestCase): - """Test Case for filter API.""" - - def setUp(self): - super(ResourceFiltersAPITestCase, self).setUp() - self.controller = v3_filters.ResourceFiltersController() - - @ddt.data({'filters': {'volume': ['key1']}, - 'resource': 'volume', - 'expected_filters': [{'resource': 'volume', - 'filters': ['key1']}]}, - {'filters': {'volume': ['key1'], 'snapshot': ['key2']}, - 'resource': None, - 'expected_filters': [{'resource': 'volume', - 'filters': ['key1']}, - {'resource': 'snapshot', - 'filters': ['key2']}]}, - {'filters': {'volume': ['key1', 'key2']}, - 'resource': 'snapshot', - 'expected_filters': []}) - @ddt.unpack - def test_get_allowed_filters(self, filters, resource, expected_filters): - common._FILTERS_COLLECTION = filters - request_url = '/v3/%s/resource_filters' % fake.PROJECT_ID - if resource is not None: - request_url += '?resource=%s' % resource - req = fakes.HTTPRequest.blank(request_url, - version=FILTERS_MICRO_VERSION) - - result = self.controller.index(req) - - six.assertCountEqual(self, - list(six.viewkeys(result)), - ['resource_filters']) - six.assertCountEqual(self, - expected_filters, - result['resource_filters']) diff --git a/cinder/tests/unit/api/v3/test_snapshot_manage.py b/cinder/tests/unit/api/v3/test_snapshot_manage.py deleted file mode 100644 index 21f3fc156..000000000 --- a/cinder/tests/unit/api/v3/test_snapshot_manage.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -from six.moves.urllib.parse import urlencode -import webob - -from cinder.api.v3 import router as router_v3 -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit.api.contrib import test_snapshot_manage as test_contrib -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_service - - -CONF = cfg.CONF - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = router_v3.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v3'] = api - return mapper - - -@ddt.ddt -@mock.patch('cinder.volume.api.API.get', test_contrib.volume_get) -class SnapshotManageTest(test.TestCase): - """Test cases for cinder/api/v3/snapshot_manage.py""" - def setUp(self): - super(SnapshotManageTest, self).setUp() - self._admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - True) - - def _get_resp_post(self, body, version="3.8"): - """Helper to execute a POST manageable_snapshots API call.""" - req = webob.Request.blank('/v3/%s/manageable_snapshots' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.environ['cinder.context'] = self._admin_ctxt - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') - @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.objects.service.Service.get_by_id') - def test_manage_snapshot_route(self, mock_service_get, - mock_create_snapshot, mock_rpcapi): - """Test call to manage snapshot. - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - mock_service_get.return_value = fake_service.fake_service_obj( - self._admin_ctxt, - binary='cinder-volume') - - body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int, res) - - def test_manage_snapshot_previous_version(self): - body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} - res = self._get_resp_post(body, version="3.7") - self.assertEqual(http_client.NOT_FOUND, res.status_int, res) - - def _get_resp_get(self, host, detailed, paging, version="3.8", **kwargs): - """Helper to execute a GET os-snapshot-manage API call.""" - params = {'host': host} if host else {} - params.update(kwargs) - if paging: - params.update({'marker': '1234', 'limit': 10, - 'offset': 4, 'sort': 'reference:asc'}) - query_string = "?%s" % urlencode(params) - detail = "" - if detailed: - detail = "/detail" - req = webob.Request.blank('/v3/%s/manageable_snapshots%s%s' % - (fake.PROJECT_ID, detail, query_string)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.environ['cinder.context'] = self._admin_ctxt - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - wraps=test_contrib.api_get_manageable_snapshots) - def test_get_manageable_snapshots_route(self, mock_api_manageable): - """Test call to get manageable volumes. - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - res = self._get_resp_get('fakehost', False, False) - self.assertEqual(http_client.OK, res.status_int) - - def test_get_manageable_snapshots_previous_version(self): - res = self._get_resp_get('fakehost', False, False, version="3.7") - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - @mock.patch('cinder.volume.api.API.get_manageable_snapshots', - wraps=test_contrib.api_get_manageable_snapshots) - def test_get_manageable_snapshots_detail_route(self, mock_api_manageable): - """Test call to get manageable volumes (detailed). - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - res = self._get_resp_get('fakehost', True, True) - self.assertEqual(http_client.OK, res.status_int) - - def test_get_manageable_snapshots_detail_previous_version(self): - res = self._get_resp_get('fakehost', True, True, version="3.7") - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), - (False, True, 'detail_list'), (False, False, 'summary_list')) - @ddt.unpack - @mock.patch('cinder.objects.Service.is_up', True) - @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') - @mock.patch('cinder.objects.Service.get_by_id') - def test_get_manageable_detail(self, clustered, is_detail, view_method, - get_service_mock, get_cctxt_mock): - if clustered: - host = None - cluster_name = 'mycluster' - version = '3.17' - kwargs = {'cluster': cluster_name} - else: - host = 'fakehost' - cluster_name = None - version = '3.8' - kwargs = {} - service = objects.Service(disabled=False, host='fakehost', - cluster_name=cluster_name) - get_service_mock.return_value = service - snaps = [mock.sentinel.snap1, mock.sentinel.snap2] - get_cctxt_mock.return_value.call.return_value = snaps - - view_data = {'manageable-snapshots': [{'vol': 'mock.sentinel.snap1'}, - {'vol': 'mock.sentinel.snap2'}]} - view_path = ('cinder.api.views.manageable_snapshots.ViewBuilder.' + - view_method) - with mock.patch(view_path, return_value=view_data) as detail_view_mock: - res = self._get_resp_get(host, is_detail, False, version=version, - **kwargs) - - self.assertEqual(http_client.OK, res.status_int) - get_cctxt_mock.assert_called_once_with(service.service_topic_queue, - version=('3.10', '3.0')) - get_cctxt_mock.return_value.call.assert_called_once_with( - mock.ANY, 'get_manageable_snapshots', marker=None, - limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], - sort_dirs=['desc'], want_objects=True) - detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps)) - get_service_mock.assert_called_once_with( - mock.ANY, None, host=host, binary='cinder-volume', - cluster_name=cluster_name) - - @ddt.data('3.8', '3.17') - def test_get_manageable_missing_host(self, version): - res = self._get_resp_get(None, True, False, version=version) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_get_manageable_both_host_cluster(self): - res = self._get_resp_get('host', True, False, version='3.17', - cluster='cluster') - self.assertEqual(http_client.BAD_REQUEST, res.status_int) diff --git a/cinder/tests/unit/api/v3/test_snapshots.py b/cinder/tests/unit/api/v3/test_snapshots.py deleted file mode 100644 index 5cb140882..000000000 --- a/cinder/tests/unit/api/v3/test_snapshots.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -import mock - -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import snapshots -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import volume - -UUID = '00000000-0000-0000-0000-000000000001' -INVALID_UUID = '00000000-0000-0000-0000-000000000002' - - -def stub_get(self, context, *args, **kwargs): - vol = {'id': fake.VOLUME_ID, - 'size': 100, - 'name': 'fake', - 'host': 'fake-host', - 'status': 'available', - 'encryption_key_id': None, - 'volume_type_id': None, - 'migration_status': None, - 'availability_zone': 'fake-zone', - 'attach_status': 'detached', - 'metadata': {}} - return fake_volume.fake_volume_obj(context, **vol) - - -def create_snapshot_query_with_metadata(metadata_query_string, - api_microversion): - """Helper to create metadata querystring with microversion""" - req = fakes.HTTPRequest.blank('/v3/snapshots?metadata=' + - metadata_query_string) - req.headers["OpenStack-API-Version"] = "volume " + api_microversion - req.api_version_request = api_version.APIVersionRequest( - api_microversion) - - return req - - -@ddt.ddt -class SnapshotApiTest(test.TestCase): - def setUp(self): - super(SnapshotApiTest, self).setUp() - self.stubs.Set(volume.api.API, 'get', stub_get) - self.controller = snapshots.SnapshotsController() - self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - @ddt.data('3.14', '3.13', '3.41') - @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_snapshot_show(self, max_ver, snapshot_get_by_id, volume_get_by_id, - snapshot_metadata_get): - snapshot = { - 'id': UUID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 100, - 'display_name': 'Default name', - 'display_description': 'Default description', - 'expected_attrs': ['metadata'], - 'group_snapshot_id': None, - } - ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) - fake_volume_obj = fake_volume.fake_volume_obj(ctx) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = fake_volume_obj - req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) - req.api_version_request = api_version.APIVersionRequest(max_ver) - resp_dict = self.controller.show(req, UUID) - - self.assertIn('snapshot', resp_dict) - self.assertEqual(UUID, resp_dict['snapshot']['id']) - self.assertIn('updated_at', resp_dict['snapshot']) - if max_ver == '3.14': - self.assertIn('group_snapshot_id', resp_dict['snapshot']) - self.assertNotIn('user_id', resp_dict['snapshot']) - elif max_ver == '3.13': - self.assertNotIn('group_snapshot_id', resp_dict['snapshot']) - self.assertNotIn('user_id', resp_dict['snapshot']) - elif max_ver == '3.41': - self.assertIn('user_id', resp_dict['snapshot']) - - def test_snapshot_show_invalid_id(self): - snapshot_id = INVALID_UUID - req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) - self.assertRaises(exception.SnapshotNotFound, - self.controller.show, req, snapshot_id) - - def _create_snapshot(self, name=None, metadata=None): - """Creates test snapshopt with provided metadata""" - req = fakes.HTTPRequest.blank('/v3/snapshots') - snap = {"volume_size": 200, - "volume_id": fake.VOLUME_ID, - "display_name": name or "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1", - "host": "fake-host"} - if metadata: - snap["metadata"] = metadata - body = {"snapshot": snap} - self.controller.create(req, body) - - def test_snapshot_list_with_sort_name(self): - self._create_snapshot(name='test1') - self._create_snapshot(name='test2') - - req = fakes.HTTPRequest.blank('/v3/snapshots?sort_key=name', - version='3.29') - self.assertRaises(exception.InvalidInput, self.controller.detail, req) - - req = fakes.HTTPRequest.blank('/v3/snapshots?sort_key=name', - version='3.30') - res_dict = self.controller.detail(req) - self.assertEqual(2, len(res_dict['snapshots'])) - self.assertEqual('test2', res_dict['snapshots'][0]['name']) - self.assertEqual('test1', res_dict['snapshots'][1]['name']) - - def test_snapshot_list_with_one_metadata_in_filter(self): - # Create snapshot with metadata key1: value1 - metadata = {"key1": "val1"} - self._create_snapshot(metadata=metadata) - - # Create request with metadata filter key1: value1 - req = create_snapshot_query_with_metadata('{"key1":"val1"}', '3.22') - - # query controller with above request - res_dict = self.controller.detail(req) - - # verify 1 snapshot is returned - self.assertEqual(1, len(res_dict['snapshots'])) - - # verify if the medadata of the returned snapshot is key1: value1 - self.assertDictEqual({"key1": "val1"}, res_dict['snapshots'][0][ - 'metadata']) - - # Create request with metadata filter key2: value2 - req = create_snapshot_query_with_metadata('{"key2":"val2"}', '3.22') - - # query controller with above request - res_dict = self.controller.detail(req) - - # verify no snapshot is returned - self.assertEqual(0, len(res_dict['snapshots'])) - - def test_snapshot_list_with_multiple_metadata_in_filter(self): - # Create snapshot with metadata key1: value1, key11: value11 - metadata = {"key1": "val1", "key11": "val11"} - self._create_snapshot(metadata=metadata) - - # Create request with metadata filter key1: value1, key11: value11 - req = create_snapshot_query_with_metadata( - '{"key1":"val1", "key11":"val11"}', '3.22') - - # query controller with above request - res_dict = self.controller.detail(req) - - # verify 1 snapshot is returned - self.assertEqual(1, len(res_dict['snapshots'])) - - # verify if the medadata of the returned snapshot is key1: value1 - self.assertDictEqual({"key1": "val1", "key11": "val11"}, res_dict[ - 'snapshots'][0]['metadata']) - - # Create request with metadata filter key1: value1 - req = create_snapshot_query_with_metadata('{"key1":"val1"}', '3.22') - - # query controller with above request - res_dict = self.controller.detail(req) - - # verify 1 snapshot is returned - self.assertEqual(1, len(res_dict['snapshots'])) - - # verify if the medadata of the returned snapshot is key1: value1 - self.assertDictEqual({"key1": "val1", "key11": "val11"}, res_dict[ - 'snapshots'][0]['metadata']) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_snapshot_list_with_general_filter(self, version, mock_update): - url = '/v3/%s/snapshots' % fake.PROJECT_ID - req = fakes.HTTPRequest.blank(url, - version=version, - use_admin_context=False) - self.controller.index(req) - - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'snapshot', - support_like) - - def test_snapshot_list_with_metadata_unsupported_microversion(self): - # Create snapshot with metadata key1: value1 - metadata = {"key1": "val1"} - self._create_snapshot(metadata=metadata) - - # Create request with metadata filter key2: value2 - req = create_snapshot_query_with_metadata('{"key2":"val2"}', '3.21') - - # query controller with above request - res_dict = self.controller.detail(req) - - # verify some snapshot is returned - self.assertNotEqual(0, len(res_dict['snapshots'])) diff --git a/cinder/tests/unit/api/v3/test_volume_manage.py b/cinder/tests/unit/api/v3/test_volume_manage.py deleted file mode 100644 index 22c82342b..000000000 --- a/cinder/tests/unit/api/v3/test_volume_manage.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2016 Stratoscale, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves import http_client -from six.moves.urllib.parse import urlencode -import webob - -from cinder.api.v3 import router as router_v3 -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit.api.contrib import test_volume_manage as test_contrib -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -CONF = cfg.CONF - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = router_v3.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v3'] = api - return mapper - - -@ddt.ddt -@mock.patch('cinder.objects.service.Service.get_by_host_and_topic', - test_contrib.service_get) -@mock.patch('cinder.volume.volume_types.get_volume_type_by_name', - test_contrib.vt_get_volume_type_by_name) -@mock.patch('cinder.volume.volume_types.get_volume_type', - test_contrib.vt_get_volume_type) -class VolumeManageTest(test.TestCase): - """Test cases for cinder/api/v3/volume_manage.py""" - - def setUp(self): - super(VolumeManageTest, self).setUp() - self._admin_ctxt = context.RequestContext(fake.USER_ID, - fake.PROJECT_ID, - True) - - def _get_resp_post(self, body, version="3.8"): - """Helper to execute a POST manageable_volumes API call.""" - req = webob.Request.blank('/v3/%s/manageable_volumes' % - fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.environ['cinder.context'] = self._admin_ctxt - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.api.API.manage_existing', - wraps=test_contrib.api_manage) - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_manage_volume_route(self, mock_validate, mock_api_manage): - """Test call to manage volume. - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_int, res) - - def test_manage_volume_previous_version(self): - body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_int, res) - - def _get_resp_get(self, host, detailed, paging, version="3.8", **kwargs): - """Helper to execute a GET os-volume-manage API call.""" - params = {'host': host} if host else {} - params.update(kwargs) - if paging: - params.update({'marker': '1234', 'limit': 10, - 'offset': 4, 'sort': 'reference:asc'}) - query_string = "?%s" % urlencode(params) - detail = "" - if detailed: - detail = "/detail" - - req = webob.Request.blank('/v3/%s/manageable_volumes%s%s' % - (fake.PROJECT_ID, detail, query_string)) - req.method = 'GET' - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.environ['cinder.context'] = self._admin_ctxt - res = req.get_response(app()) - return res - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - wraps=test_contrib.api_get_manageable_volumes) - def test_get_manageable_volumes_route(self, mock_api_manageable): - """Test call to get manageable volumes. - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - res = self._get_resp_get('fakehost', False, True) - self.assertEqual(http_client.OK, res.status_int) - - def test_get_manageable_volumes_previous_version(self): - res = self._get_resp_get('fakehost', False, True, version="3.7") - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - @mock.patch('cinder.volume.api.API.get_manageable_volumes', - wraps=test_contrib.api_get_manageable_volumes) - def test_get_manageable_volumes_detail_route(self, mock_api_manageable): - """Test call to get manageable volumes (detailed). - - There is currently no change between the API in contrib and the API in - v3, so here we simply check that the call is routed properly, rather - than copying all the tests. - """ - res = self._get_resp_get('fakehost', True, False) - self.assertEqual(http_client.OK, res.status_int) - - def test_get_manageable_volumes_detail_previous_version(self): - res = self._get_resp_get('fakehost', True, False, version="3.7") - self.assertEqual(http_client.NOT_FOUND, res.status_int) - - @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), - (False, True, 'detail_list'), (False, False, 'summary_list')) - @ddt.unpack - @mock.patch('cinder.objects.Service.is_up', True) - @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') - @mock.patch('cinder.objects.Service.get_by_id') - def test_get_manageable_detail(self, clustered, is_detail, view_method, - get_service_mock, get_cctxt_mock): - if clustered: - host = None - cluster_name = 'mycluster' - version = '3.17' - kwargs = {'cluster': cluster_name} - else: - host = 'fakehost' - cluster_name = None - version = '3.8' - kwargs = {} - service = objects.Service(disabled=False, host='fakehost', - cluster_name=cluster_name) - get_service_mock.return_value = service - volumes = [mock.sentinel.volume1, mock.sentinel.volume2] - get_cctxt_mock.return_value.call.return_value = volumes - - view_data = {'manageable-volumes': [{'vol': str(v)} for v in volumes]} - view_path = ('cinder.api.views.manageable_volumes.ViewBuilder.' + - view_method) - with mock.patch(view_path, return_value=view_data) as detail_view_mock: - res = self._get_resp_get(host, is_detail, False, version=version, - **kwargs) - - self.assertEqual(http_client.OK, res.status_int) - get_cctxt_mock.assert_called_once_with(service.service_topic_queue, - version=('3.10', '3.0')) - get_cctxt_mock.return_value.call.assert_called_once_with( - mock.ANY, 'get_manageable_volumes', marker=None, - limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], - sort_dirs=['desc'], want_objects=True) - detail_view_mock.assert_called_once_with(mock.ANY, volumes, - len(volumes)) - get_service_mock.assert_called_once_with( - mock.ANY, None, host=host, binary='cinder-volume', - cluster_name=cluster_name) - - @ddt.data('3.8', '3.17') - def test_get_manageable_missing_host(self, version): - res = self._get_resp_get(None, True, False, version=version) - self.assertEqual(http_client.BAD_REQUEST, res.status_int) - - def test_get_manageable_both_host_cluster(self): - res = self._get_resp_get('host', True, False, version='3.17', - cluster='cluster') - self.assertEqual(http_client.BAD_REQUEST, res.status_int) diff --git a/cinder/tests/unit/api/v3/test_volume_metadata.py b/cinder/tests/unit/api/v3/test_volume_metadata.py deleted file mode 100644 index 808647553..000000000 --- a/cinder/tests/unit/api/v3/test_volume_metadata.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2016 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import mock -from oslo_config import cfg -from oslo_serialization import jsonutils -import webob - -from cinder.api import extensions -from cinder.api.v3 import volume_metadata -from cinder.api.v3 import volumes -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder import volume -from cinder.volume import api as volume_api - - -CONF = cfg.CONF - - -def return_create_volume_metadata_max(context, volume_id, metadata, delete): - return stub_max_volume_metadata() - - -def return_create_volume_metadata(context, volume_id, metadata, - delete, meta_type): - return stub_volume_metadata() - - -def return_new_volume_metadata(context, volume_id, metadata, - delete, meta_type): - return stub_new_volume_metadata() - - -def return_create_volume_metadata_insensitive(context, snapshot_id, - metadata, delete, - meta_type): - return stub_volume_metadata_insensitive() - - -def return_volume_metadata(context, volume_id): - return stub_volume_metadata() - - -def return_empty_volume_metadata(context, volume_id): - return {} - - -def return_empty_container_metadata(context, volume_id, metadata, - delete, meta_type): - return {} - - -def stub_volume_metadata(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - } - return metadata - - -def stub_new_volume_metadata(): - metadata = { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - } - return metadata - - -def stub_volume_metadata_insensitive(): - metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "KEY4": "value4", - } - return metadata - - -def stub_max_volume_metadata(): - metadata = {"metadata": {}} - for num in range(CONF.quota_metadata_items): - metadata['metadata']['key%i' % num] = "blah" - return metadata - - -def get_volume(*args, **kwargs): - vol = {'name': 'fake', - 'metadata': {}} - return fake_volume.fake_volume_obj(args[0], **vol) - - -def return_volume_nonexistent(*args, **kwargs): - raise exception.VolumeNotFound('bogus test message') - - -def fake_update_volume_metadata(self, context, volume, diff): - pass - - -class volumeMetaDataTest(test.TestCase): - - def setUp(self): - super(volumeMetaDataTest, self).setUp() - self.volume_api = volume_api.API() - self.mock_object(volume.api.API, 'get', get_volume) - self.mock_object(db, 'volume_metadata_get', - return_volume_metadata) - self.patch( - 'cinder.db.service_get_all', autospec=True, - return_value=v2_fakes.fake_service_get_all_by_topic(None, None)) - - self.mock_object(self.volume_api, 'update_volume_metadata', - fake_update_volume_metadata) - - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.volume_controller = volumes.VolumeController(self.ext_mgr) - self.controller = volume_metadata.Controller() - self.req_id = str(uuid.uuid4()) - self.url = '/v2/%s/volumes/%s/metadata' % ( - fake.PROJECT_ID, self.req_id) - - vol = {"size": 100, - "display_name": "Volume Test Name", - "display_description": "Volume Test Desc", - "availability_zone": "zone1:host1", - "metadata": {}} - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - self.volume_controller.create(req, body) - - def test_index(self): - req = fakes.HTTPRequest.blank(self.url, version="3.15") - data = self.controller.index(req, self.req_id) - - expected = { - 'metadata': { - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - }, - } - result = jsonutils.loads(data.body) - self.assertDictEqual(expected, result) - - def test_index_nonexistent_volume(self): - self.mock_object(db, 'volume_metadata_get', - return_volume_nonexistent) - req = fakes.HTTPRequest.blank(self.url, version="3.15") - self.assertRaises(exception.VolumeNotFound, - self.controller.index, req, self.url) - - def test_index_no_data(self): - self.mock_object(db, 'volume_metadata_get', - return_empty_volume_metadata) - req = fakes.HTTPRequest.blank(self.url, version="3.15") - data = self.controller.index(req, self.req_id) - expected = {'metadata': {}} - result = jsonutils.loads(data.body) - self.assertDictEqual(expected, result) - - def test_validate_etag_true(self): - self.mock_object(db, 'volume_metadata_get', - return_value={'key1': 'vanue1', 'key2': 'value2'}) - req = fakes.HTTPRequest.blank(self.url, version="3.15") - req.environ['cinder.context'] = mock.Mock() - req.if_match.etags = ['d5103bf7b26ff0310200d110da3ed186'] - self.assertTrue(self.controller._validate_etag(req, self.req_id)) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_all(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_new_volume_metadata - req = fakes.HTTPRequest.blank(self.url, version="3.15") - req.method = 'PUT' - req.content_type = "application/json" - expected = { - 'metadata': { - 'key10': 'value10', - 'key99': 'value99', - 'KEY20': 'value20', - }, - } - req.body = jsonutils.dump_as_bytes(expected) - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update_all(req, self.req_id, expected) - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - @mock.patch.object(db, 'volume_metadata_update') - def test_update_item(self, metadata_update): - fake_volume = {'id': self.req_id, 'status': 'available'} - fake_context = mock.Mock() - metadata_update.side_effect = return_create_volume_metadata - req = fakes.HTTPRequest.blank(self.url + '/key1', version="3.15") - req.method = 'PUT' - body = {"meta": {"key1": "value1"}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - req.environ['cinder.context'] = fake_context - - with mock.patch.object(self.controller.volume_api, - 'get') as get_volume: - get_volume.return_value = fake_volume - res_dict = self.controller.update(req, self.req_id, 'key1', body) - expected = {'meta': {'key1': 'value1'}} - self.assertEqual(expected, res_dict) - get_volume.assert_called_once_with(fake_context, self.req_id) - - def test_create_metadata_keys_value_none(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url, version="3.15") - req.method = 'POST' - req.headers["content-type"] = "application/json" - body = {"meta": {"key": None}} - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, body) - - def test_update_items_value_none(self): - self.mock_object(db, 'volume_metadata_update', - return_create_volume_metadata) - req = fakes.HTTPRequest.blank(self.url + '/key1', version="3.15") - req.method = 'PUT' - body = {"metadata": {"key": None}} - req.body = jsonutils.dump_as_bytes(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, self.req_id, body) diff --git a/cinder/tests/unit/api/v3/test_volumes.py b/cinder/tests/unit/api/v3/test_volumes.py deleted file mode 100644 index d039a31d3..000000000 --- a/cinder/tests/unit/api/v3/test_volumes.py +++ /dev/null @@ -1,579 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import ddt -import iso8601 - -import mock -import webob - -from cinder.api import extensions -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import volumes -from cinder import context -from cinder import db -from cinder import exception -from cinder.group import api as group_api -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit.api.v2 import fakes as v2_fakes -from cinder.tests.unit.api.v2 import test_volumes as v2_test_volumes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as test_utils -from cinder import utils -from cinder.volume import api as volume_api -from cinder.volume import api as vol_get - -version_header_name = 'OpenStack-API-Version' - -DEFAULT_AZ = "zone1:host1" -REVERT_TO_SNAPSHOT_VERSION = '3.40' - - -@ddt.ddt -class VolumeApiTest(test.TestCase): - def setUp(self): - super(VolumeApiTest, self).setUp() - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = volumes.VolumeController(self.ext_mgr) - - self.flags(host='fake') - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - def test_check_volume_filters_called(self): - with mock.patch.object(vol_get.API, - 'check_volume_filters') as volume_get: - req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.0'} - req.environ['cinder.context'].is_admin = True - - self.override_config('query_volume_filters', 'bootable') - self.controller.index(req) - filters = req.params.copy() - - volume_get.assert_called_with(filters, False) - - def test_check_volume_filters_strict_called(self): - - with mock.patch.object(vol_get.API, - 'check_volume_filters') as volume_get: - req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.2'} - req.environ['cinder.context'].is_admin = True - req.api_version_request = api_version.APIVersionRequest('3.29') - - self.override_config('query_volume_filters', 'bootable') - self.controller.index(req) - filters = req.params.copy() - - volume_get.assert_called_with(filters, True) - - def _create_volume_with_glance_metadata(self): - vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', - 'project_id': - self.ctxt.project_id}) - db.volume_glance_metadata_create(self.ctxt, vol1.id, 'image_name', - 'imageTestOne') - vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', - 'project_id': - self.ctxt.project_id}) - db.volume_glance_metadata_create(self.ctxt, vol2.id, 'image_name', - 'imageTestTwo') - db.volume_glance_metadata_create(self.ctxt, vol2.id, 'disk_format', - 'qcow2') - return [vol1, vol2] - - def _create_volume_with_group(self): - vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', - 'project_id': - self.ctxt.project_id, - 'group_id': - fake.GROUP_ID}) - vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', - 'project_id': - self.ctxt.project_id, - 'group_id': - fake.GROUP2_ID}) - return [vol1, vol2] - - def test_volume_index_filter_by_glance_metadata(self): - vols = self._create_volume_with_glance_metadata() - req = fakes.HTTPRequest.blank("/v3/volumes?glance_metadata=" - "{'image_name': 'imageTestOne'}") - req.headers["OpenStack-API-Version"] = "volume 3.4" - req.api_version_request = api_version.APIVersionRequest('3.4') - req.environ['cinder.context'] = self.ctxt - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(vols[0].id, volumes[0]['id']) - - def test_volume_index_filter_by_glance_metadata_in_unsupport_version(self): - self._create_volume_with_glance_metadata() - req = fakes.HTTPRequest.blank("/v3/volumes?glance_metadata=" - "{'image_name': 'imageTestOne'}") - req.headers["OpenStack-API-Version"] = "volume 3.0" - req.api_version_request = api_version.APIVersionRequest('3.0') - req.environ['cinder.context'] = self.ctxt - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(2, len(volumes)) - - def test_volume_index_filter_by_group_id(self): - vols = self._create_volume_with_group() - req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % - fake.GROUP_ID) - req.headers["OpenStack-API-Version"] = "volume 3.10" - req.api_version_request = api_version.APIVersionRequest('3.10') - req.environ['cinder.context'] = self.ctxt - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(1, len(volumes)) - self.assertEqual(vols[0].id, volumes[0]['id']) - - def test_volume_index_filter_by_group_id_in_unsupport_version(self): - self._create_volume_with_group() - req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % - fake.GROUP_ID) - req.headers["OpenStack-API-Version"] = "volume 3.9" - req.api_version_request = api_version.APIVersionRequest('3.9') - req.environ['cinder.context'] = self.ctxt - res_dict = self.controller.index(req) - volumes = res_dict['volumes'] - self.assertEqual(2, len(volumes)) - - def _fake_volumes_summary_request(self, version='3.12', all_tenant=False, - is_admin=False): - req_url = '/v3/volumes/summary' - if all_tenant: - req_url += '?all_tenants=True' - req = fakes.HTTPRequest.blank(req_url, use_admin_context=is_admin) - req.headers = {'OpenStack-API-Version': 'volume ' + version} - req.api_version_request = api_version.APIVersionRequest(version) - return req - - def test_volumes_summary_in_unsupport_version(self): - """Function call to test summary volumes API in unsupported version""" - req = self._fake_volumes_summary_request(version='3.7') - self.assertRaises(exception.VersionNotFoundForAPIMethod, - self.controller.summary, req) - - def test_volumes_summary_in_supported_version(self): - """Function call to test the summary volumes API for version v3.""" - req = self._fake_volumes_summary_request() - res_dict = self.controller.summary(req) - expected = {'volume-summary': {'total_size': 0.0, 'total_count': 0}} - self.assertEqual(expected, res_dict) - - vol = v2_test_volumes.VolumeApiTest._vol_in_request_body( - availability_zone="nova") - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v3/volumes') - res_dict = self.controller.create(req, body) - - req = self._fake_volumes_summary_request() - res_dict = self.controller.summary(req) - expected = {'volume-summary': {'total_size': 1.0, 'total_count': 1}} - self.assertEqual(expected, res_dict) - - @ddt.data( - ('3.35', {'volume-summary': {'total_size': 0.0, - 'total_count': 0}}), - ('3.36', {'volume-summary': {'total_size': 0.0, - 'total_count': 0, - 'metadata': {}}})) - @ddt.unpack - def test_volume_summary_empty(self, summary_api_version, expect_result): - req = self._fake_volumes_summary_request(version=summary_api_version) - res_dict = self.controller.summary(req) - self.assertEqual(expect_result, res_dict) - - @ddt.data( - ('3.35', {'volume-summary': {'total_size': 2, - 'total_count': 2}}), - ('3.36', {'volume-summary': {'total_size': 2, - 'total_count': 2, - 'metadata': { - 'name': ['test_name1', 'test_name2'], - 'age': ['test_age']}}})) - @ddt.unpack - def test_volume_summary_return_metadata(self, summary_api_version, - expect_result): - test_utils.create_volume(self.ctxt, metadata={'name': 'test_name1', - 'age': 'test_age'}) - test_utils.create_volume(self.ctxt, metadata={'name': 'test_name2', - 'age': 'test_age'}) - ctxt2 = context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True) - test_utils.create_volume(ctxt2, metadata={'name': 'test_name3'}) - - req = self._fake_volumes_summary_request(version=summary_api_version) - res_dict = self.controller.summary(req) - self.assertEqual(expect_result, res_dict) - - @ddt.data( - ('3.35', {'volume-summary': {'total_size': 2, - 'total_count': 2}}), - ('3.36', {'volume-summary': {'total_size': 2, - 'total_count': 2, - 'metadata': { - 'name': ['test_name1', 'test_name2'], - 'age': ['test_age']}}})) - @ddt.unpack - def test_volume_summary_return_metadata_all_tenant( - self, summary_api_version, expect_result): - test_utils.create_volume(self.ctxt, metadata={'name': 'test_name1', - 'age': 'test_age'}) - ctxt2 = context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True) - test_utils.create_volume(ctxt2, metadata={'name': 'test_name2', - 'age': 'test_age'}) - - req = self._fake_volumes_summary_request(version=summary_api_version, - all_tenant=True, - is_admin=True) - res_dict = self.controller.summary(req) - self.assertEqual(expect_result, res_dict) - - def _vol_in_request_body(self, - size=v2_fakes.DEFAULT_VOL_SIZE, - name=v2_fakes.DEFAULT_VOL_NAME, - description=v2_fakes.DEFAULT_VOL_DESCRIPTION, - availability_zone=DEFAULT_AZ, - snapshot_id=None, - source_volid=None, - source_replica=None, - consistencygroup_id=None, - volume_type=None, - image_ref=None, - image_id=None, - group_id=None): - vol = {"size": size, - "name": name, - "description": description, - "availability_zone": availability_zone, - "snapshot_id": snapshot_id, - "source_volid": source_volid, - "source_replica": source_replica, - "consistencygroup_id": consistencygroup_id, - "volume_type": volume_type, - "group_id": group_id, - } - - if image_id is not None: - vol['image_id'] = image_id - elif image_ref is not None: - vol['imageRef'] = image_ref - - return vol - - def _expected_vol_from_controller( - self, - size=v2_fakes.DEFAULT_VOL_SIZE, - availability_zone=DEFAULT_AZ, - description=v2_fakes.DEFAULT_VOL_DESCRIPTION, - name=v2_fakes.DEFAULT_VOL_NAME, - consistencygroup_id=None, - source_volid=None, - snapshot_id=None, - metadata=None, - attachments=None, - volume_type=v2_fakes.DEFAULT_VOL_TYPE, - status=v2_fakes.DEFAULT_VOL_STATUS, - with_migration_status=False, - group_id=None, - req_version=None): - metadata = metadata or {} - attachments = attachments or [] - volume = {'volume': - {'attachments': attachments, - 'availability_zone': availability_zone, - 'bootable': 'false', - 'consistencygroup_id': consistencygroup_id, - 'group_id': group_id, - 'created_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), - 'updated_at': datetime.datetime( - 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), - 'description': description, - 'id': v2_fakes.DEFAULT_VOL_ID, - 'links': - [{'href': 'http://localhost/v3/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'self'}, - {'href': 'http://localhost/%s/volumes/%s' % ( - fake.PROJECT_ID, fake.VOLUME_ID), - 'rel': 'bookmark'}], - 'metadata': metadata, - 'name': name, - 'replication_status': 'disabled', - 'multiattach': False, - 'size': size, - 'snapshot_id': snapshot_id, - 'source_volid': source_volid, - 'status': status, - 'user_id': fake.USER_ID, - 'volume_type': volume_type, - 'encrypted': False}} - - if with_migration_status: - volume['volume']['migration_status'] = None - - # Remove group_id if max version is less than 3.13. - if req_version and req_version.matches(None, "3.12"): - volume['volume'].pop('group_id') - - return volume - - def _expected_volume_api_create_kwargs(self, snapshot=None, - availability_zone=DEFAULT_AZ, - source_volume=None, - test_group=None, - req_version=None): - volume = { - 'metadata': None, - 'snapshot': snapshot, - 'source_volume': source_volume, - 'source_replica': None, - 'consistencygroup': None, - 'availability_zone': availability_zone, - 'scheduler_hints': None, - 'multiattach': False, - 'group': test_group, - } - - # Remove group_id if max version is less than 3.13. - if req_version and req_version.matches(None, "3.12"): - volume.pop('group') - - return volume - - @ddt.data('3.13', '3.12') - @mock.patch( - 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') - def test_volume_create(self, max_ver, mock_validate): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) - self.mock_object(volume_api.API, "create", - v2_fakes.fake_volume_api_create) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - vol = self._vol_in_request_body() - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v3/volumes') - req.api_version_request = api_version.APIVersionRequest(max_ver) - res_dict = self.controller.create(req, body) - ex = self._expected_vol_from_controller( - req_version=req.api_version_request) - self.assertEqual(ex, res_dict) - self.assertTrue(mock_validate.called) - - @ddt.data('3.14', '3.13') - @mock.patch.object(group_api.API, 'get') - @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', - autospec=True) - @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) - @mock.patch.object(volume_api.API, 'create', autospec=True) - def test_volume_creation_from_snapshot(self, max_ver, create, get_snapshot, - volume_type_get, group_get): - create.side_effect = v2_fakes.fake_volume_api_create - get_snapshot.side_effect = v2_fakes.fake_snapshot_get - volume_type_get.side_effect = v2_fakes.fake_volume_type_get - fake_group = { - 'id': fake.GROUP_ID, - 'group_type_id': fake.GROUP_TYPE_ID, - 'name': 'fake_group' - } - group_get.return_value = fake_group - - snapshot_id = fake.SNAPSHOT_ID - vol = self._vol_in_request_body(snapshot_id=snapshot_id, - group_id=fake.GROUP_ID) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v3/volumes') - req.api_version_request = api_version.APIVersionRequest(max_ver) - res_dict = self.controller.create(req, body) - ex = self._expected_vol_from_controller( - snapshot_id=snapshot_id, - req_version=req.api_version_request) - self.assertEqual(ex, res_dict) - - context = req.environ['cinder.context'] - get_snapshot.assert_called_once_with(self.controller.volume_api, - context, snapshot_id) - - kwargs = self._expected_volume_api_create_kwargs( - v2_fakes.fake_snapshot(snapshot_id), - test_group=fake_group, - req_version=req.api_version_request) - create.assert_called_once_with(self.controller.volume_api, context, - vol['size'], v2_fakes.DEFAULT_VOL_NAME, - v2_fakes.DEFAULT_VOL_DESCRIPTION, - **kwargs) - - @ddt.data({'s': 'ea895e29-8485-4930-bbb8-c5616a309c0e'}, - ['ea895e29-8485-4930-bbb8-c5616a309c0e'], - 42) - def test_volume_creation_fails_with_invalid_snapshot_type(self, value): - snapshot_id = value - vol = self._vol_in_request_body(snapshot_id=snapshot_id) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v3/volumes') - # Raise 400 when snapshot has not uuid type. - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - @ddt.data({'source_volid': 1}, - {'source_volid': []}, - {'source_replica': 1}, - {'source_replica': []}, - {'consistencygroup_id': 1}, - {'consistencygroup_id': []}) - def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids): - vol = self._vol_in_request_body() - vol.update(updated_uuids) - body = {"volume": vol} - req = fakes.HTTPRequest.blank('/v2/volumes') - # Raise 400 for resource requested with invalid uuids. - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, body) - - @ddt.data('3.30', '3.31', '3.34') - @mock.patch.object(volume_api.API, 'check_volume_filters', mock.Mock()) - @mock.patch.object(utils, 'add_visible_admin_metadata', mock.Mock()) - @mock.patch('cinder.api.common.reject_invalid_filters') - def test_list_volume_with_general_filter(self, version, mock_update): - req = fakes.HTTPRequest.blank('/v3/volumes', version=version) - self.controller.index(req) - if version != '3.30': - support_like = True if version == '3.34' else False - mock_update.assert_called_once_with(req.environ['cinder.context'], - mock.ANY, 'volume', - support_like) - - @ddt.data({'admin': True, 'version': '3.21'}, - {'admin': False, 'version': '3.21'}, - {'admin': True, 'version': '3.20'}, - {'admin': False, 'version': '3.20'}) - @ddt.unpack - def test_volume_show_provider_id(self, admin, version): - self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) - self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', - v2_fakes.fake_volume_type_get) - - req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID, - version=version) - if admin: - admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - True) - req.environ['cinder.context'] = admin_ctx - res_dict = self.controller.show(req, fake.VOLUME_ID) - req_version = req.api_version_request - # provider_id is in view if min version is greater than or equal to - # 3.21 for admin. - if req_version.matches("3.21", None) and admin: - self.assertIn('provider_id', res_dict['volume']) - else: - self.assertNotIn('provider_id', res_dict['volume']) - - def _fake_create_volume(self): - vol = { - 'display_name': 'fake_volume1', - 'status': 'available' - } - volume = objects.Volume(context=self.ctxt, **vol) - volume.create() - return volume - - def _fake_create_snapshot(self, volume_id): - snap = { - 'display_name': 'fake_snapshot1', - 'status': 'available', - 'volume_id': volume_id - } - snapshot = objects.Snapshot(context=self.ctxt, **snap) - snapshot.create() - return snapshot - - @mock.patch.object(objects.Volume, 'get_latest_snapshot') - @mock.patch.object(volume_api.API, 'get_volume') - def test_volume_revert_with_snapshot_not_found(self, mock_volume, - mock_latest): - fake_volume = self._fake_create_volume() - mock_volume.return_value = fake_volume - mock_latest.side_effect = exception.VolumeSnapshotNotFound(volume_id= - 'fake_id') - req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert') - req.headers = {'OpenStack-API-Version': - 'volume %s' % REVERT_TO_SNAPSHOT_VERSION} - req.api_version_request = api_version.APIVersionRequest( - REVERT_TO_SNAPSHOT_VERSION) - - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert, - req, 'fake_id', {'revert': {'snapshot_id': - 'fake_snapshot_id'}}) - - @mock.patch.object(objects.Volume, 'get_latest_snapshot') - @mock.patch.object(volume_api.API, 'get_volume') - def test_volume_revert_with_snapshot_not_match(self, mock_volume, - mock_latest): - fake_volume = self._fake_create_volume() - mock_volume.return_value = fake_volume - fake_snapshot = self._fake_create_snapshot(fake.UUID1) - mock_latest.return_value = fake_snapshot - req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert') - req.headers = {'OpenStack-API-Version': - 'volume %s' % REVERT_TO_SNAPSHOT_VERSION} - req.api_version_request = api_version.APIVersionRequest( - REVERT_TO_SNAPSHOT_VERSION) - - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert, - req, 'fake_id', {'revert': {'snapshot_id': - 'fake_snapshot_id'}}) - - @mock.patch.object(objects.Volume, 'get_latest_snapshot') - @mock.patch('cinder.objects.base.' - 'CinderPersistentObject.update_single_status_where') - @mock.patch.object(volume_api.API, 'get_volume') - def test_volume_revert_update_status_failed(self, - mock_volume, - mock_update, - mock_latest): - fake_volume = self._fake_create_volume() - fake_snapshot = self._fake_create_snapshot(fake_volume['id']) - mock_volume.return_value = fake_volume - mock_latest.return_value = fake_snapshot - req = fakes.HTTPRequest.blank('/v3/volumes/%s/revert' - % fake_volume['id']) - req.headers = {'OpenStack-API-Version': - 'volume %s' % REVERT_TO_SNAPSHOT_VERSION} - req.api_version_request = api_version.APIVersionRequest( - REVERT_TO_SNAPSHOT_VERSION) - # update volume's status failed - mock_update.side_effect = [False, True] - - self.assertRaises(webob.exc.HTTPConflict, self.controller.revert, - req, fake_volume['id'], {'revert': {'snapshot_id': - fake_snapshot['id']}}) - - # update snapshot's status failed - mock_update.side_effect = [True, False] - - self.assertRaises(webob.exc.HTTPConflict, self.controller.revert, - req, fake_volume['id'], {'revert': {'snapshot_id': - fake_snapshot['id']}}) diff --git a/cinder/tests/unit/api/v3/test_workers.py b/cinder/tests/unit/api/v3/test_workers.py deleted file mode 100644 index 9fe9e34aa..000000000 --- a/cinder/tests/unit/api/v3/test_workers.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_serialization import jsonutils -from six.moves import http_client -import webob - -from cinder.api.v3 import router as router_v3 -from cinder.api.v3 import workers -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake - - -SERVICES = ( - [objects.Service(id=1, host='host1', binary='cinder-volume', - cluster_name='mycluster'), - objects.Service(id=2, host='host2', binary='cinder-volume', - cluster_name='mycluster')], - [objects.Service(id=3, host='host3', binary='cinder-volume', - cluster_name='mycluster'), - objects.Service(id=4, host='host4', binary='cinder-volume', - cluster_name='mycluster')], -) - - -def app(): - # no auth, just let environ['cinder.context'] pass through - api = router_v3.APIRouter() - mapper = fakes.urlmap.URLMap() - mapper['/v3'] = api - return mapper - - -@ddt.ddt -class WorkersTestCase(test.TestCase): - """Tes Case for the cleanup of Workers entries.""" - def setUp(self): - super(WorkersTestCase, self).setUp() - - self.context = context.RequestContext(user_id=None, - project_id=fake.PROJECT_ID, - is_admin=True, - read_deleted='no', - overwrite=False) - self.controller = workers.create_resource() - - def _get_resp_post(self, body, version='3.24', ctxt=None): - """Helper to execute a POST workers API call.""" - req = webob.Request.blank('/v3/%s/workers/cleanup' % fake.PROJECT_ID) - req.method = 'POST' - req.headers['Content-Type'] = 'application/json' - req.headers['OpenStack-API-Version'] = 'volume ' + version - req.environ['cinder.context'] = ctxt or self.context - req.body = jsonutils.dump_as_bytes(body) - res = req.get_response(app()) - return res - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') - def test_cleanup_old_api_version(self, rpc_mock): - res = self._get_resp_post({}, '3.19') - self.assertEqual(http_client.NOT_FOUND, res.status_code) - rpc_mock.assert_not_called() - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') - def test_cleanup_not_authorized(self, rpc_mock): - ctxt = context.RequestContext(user_id=None, - project_id=fake.PROJECT_ID, - is_admin=False, - read_deleted='no', - overwrite=False) - res = self._get_resp_post({}, ctxt=ctxt) - self.assertEqual(http_client.FORBIDDEN, res.status_code) - rpc_mock.assert_not_called() - - @ddt.data({'fake_key': 'value'}, {'binary': 'nova-scheduler'}, - {'disabled': 'sure'}, {'is_up': 'nop'}, - {'resource_type': 'service'}, {'resource_id': 'non UUID'}) - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') - def test_cleanup_wrong_param(self, body, rpc_mock): - res = self._get_resp_post(body) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - if 'disabled' in body or 'is_up' in body: - expected = 'is not a boolean' - else: - expected = 'Invalid input' - self.assertIn(expected, res.json['badRequest']['message']) - rpc_mock.assert_not_called() - - def _expected_services(self, cleaning, unavailable): - def service_view(service): - return {'id': service.id, 'host': service.host, - 'binary': service.binary, - 'cluster_name': service.cluster_name} - return {'cleaning': [service_view(s) for s in cleaning], - 'unavailable': [service_view(s) for s in unavailable]} - - @ddt.data({'service_id': 10}, {'cluster_name': 'cluster_name'}, - {'host': 'hostname'}, {'binary': 'cinder-volume'}, - {'binary': 'cinder-scheduler'}, {'disabled': 'true'}, - {'is_up': 'no'}, {'resource_type': 'Volume'}, - {'resource_id': fake.VOLUME_ID, 'host': 'hostname'}) - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', - return_value=SERVICES) - def test_cleanup_params(self, body, rpc_mock): - res = self._get_resp_post(body) - self.assertEqual(http_client.ACCEPTED, res.status_code) - rpc_mock.assert_called_once_with(self.context, mock.ANY) - cleanup_request = rpc_mock.call_args[0][1] - for key, value in body.items(): - if key in ('disabled', 'is_up'): - value = value == 'true' - self.assertEqual(value, getattr(cleanup_request, key)) - self.assertEqual(self._expected_services(*SERVICES), res.json) - - @mock.patch('cinder.db.worker_get_all', - return_value=[mock.Mock(service_id=1, resource_type='Volume')]) - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', - return_value=SERVICES) - def test_cleanup_missing_location_ok(self, rpc_mock, worker_mock): - res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) - self.assertEqual(http_client.ACCEPTED, res.status_code) - rpc_mock.assert_called_once_with(self.context, mock.ANY) - cleanup_request = rpc_mock.call_args[0][1] - self.assertEqual(fake.VOLUME_ID, cleanup_request.resource_id) - self.assertEqual(1, cleanup_request.service_id) - self.assertEqual('Volume', cleanup_request.resource_type) - self.assertEqual(self._expected_services(*SERVICES), res.json) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') - def test_cleanup_missing_location_fail_none(self, rpc_mock): - res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - self.assertIn('Invalid input', res.json['badRequest']['message']) - rpc_mock.assert_not_called() - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', - return_value=[1, 2]) - def test_cleanup_missing_location_fail_multiple(self, rpc_mock): - res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) - self.assertEqual(http_client.BAD_REQUEST, res.status_code) - self.assertIn('Invalid input', res.json['badRequest']['message']) - rpc_mock.assert_not_called() diff --git a/cinder/tests/unit/api/views/__init__.py b/cinder/tests/unit/api/views/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/api/views/test_versions.py b/cinder/tests/unit/api/views/test_versions.py deleted file mode 100644 index ae333cb88..000000000 --- a/cinder/tests/unit/api/views/test_versions.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015 Clinton Knight -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt - -from cinder.api.views import versions -from cinder import test - - -class FakeRequest(object): - def __init__(self, application_url): - self.application_url = application_url - - -URL_BASE = 'http://localhost/' -FAKE_HREF = URL_BASE + 'v1/' - -FAKE_VERSIONS = { - "v1.0": { - "id": "v1.0", - "status": "CURRENT", - "version": "1.1", - "min_version": "1.0", - "updated": "2015-07-30T11:33:21Z", - "links": [ - { - "rel": "describedby", - "type": "text/html", - "href": 'http://docs.openstack.org/', - }, - ], - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.share+json;version=1", - } - ], - }, -} - -FAKE_LINKS = [ - { - "rel": "describedby", - "type": "text/html", - "href": 'http://docs.openstack.org/', - }, - { - 'rel': 'self', - 'href': FAKE_HREF - }, -] - - -@ddt.ddt -class ViewBuilderTestCase(test.TestCase): - - def _get_builder(self): - request = FakeRequest('fake') - return versions.get_view_builder(request) - - def test_build_versions(self): - - self.mock_object(versions.ViewBuilder, - '_build_links', - return_value=FAKE_LINKS) - - result = self._get_builder().build_versions(FAKE_VERSIONS) - - expected = {'versions': list(FAKE_VERSIONS.values())} - expected['versions'][0]['links'] = FAKE_LINKS - - self.assertEqual(expected, result) - - def test_build_version(self): - - self.mock_object(versions.ViewBuilder, - '_build_links', - return_value=FAKE_LINKS) - - result = self._get_builder()._build_version(FAKE_VERSIONS['v1.0']) - - expected = copy.deepcopy(FAKE_VERSIONS['v1.0']) - expected['links'] = FAKE_LINKS - - self.assertEqual(expected, result) - - def test_build_links(self): - - self.mock_object(versions.ViewBuilder, - '_generate_href', - return_value=FAKE_HREF) - - result = self._get_builder()._build_links(FAKE_VERSIONS['v1.0']) - - self.assertEqual(FAKE_LINKS, result) - - def test_generate_href_defaults(self): - - self.mock_object(versions.ViewBuilder, - '_get_base_url_without_version', - return_value=URL_BASE) - - result = self._get_builder()._generate_href() - - self.assertEqual('http://localhost/v3/', result) - - @ddt.data( - ('v2', None, URL_BASE + 'v2/'), - ('/v2/', None, URL_BASE + 'v2/'), - ('/v2/', 'fake_path', URL_BASE + 'v2/fake_path'), - ('/v2/', '/fake_path/', URL_BASE + 'v2/fake_path/'), - ) - @ddt.unpack - def test_generate_href_no_path(self, version, path, expected): - - self.mock_object(versions.ViewBuilder, - '_get_base_url_without_version', - return_value=URL_BASE) - - result = self._get_builder()._generate_href(version=version, - path=path) - - self.assertEqual(expected, result) - - @ddt.data( - ('http://1.1.1.1/', 'http://1.1.1.1/'), - ('http://localhost/', 'http://localhost/'), - ('http://1.1.1.1/v1/', 'http://1.1.1.1/'), - ('http://1.1.1.1/v1', 'http://1.1.1.1/'), - ('http://1.1.1.1/v11', 'http://1.1.1.1/'), - ) - @ddt.unpack - def test_get_base_url_without_version(self, base_url, base_url_no_version): - - request = FakeRequest(base_url) - builder = versions.get_view_builder(request) - - result = builder._get_base_url_without_version() - - self.assertEqual(base_url_no_version, result) diff --git a/cinder/tests/unit/attachments/__init__.py b/cinder/tests/unit/attachments/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/attachments/test_attachments_api.py b/cinder/tests/unit/attachments/test_attachments_api.py deleted file mode 100644 index a1fcaaf45..000000000 --- a/cinder/tests/unit/attachments/test_attachments_api.py +++ /dev/null @@ -1,180 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as tests_utils -from cinder.volume import api as volume_api -from cinder.volume import configuration as conf - -CONF = cfg.CONF - - -class AttachmentManagerTestCase(test.TestCase): - """Attachment related test for volume/api.py.""" - - def setUp(self): - """Setup test class.""" - super(AttachmentManagerTestCase, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.context = context.get_admin_context() - self.context.user_id = fake.USER_ID - self.project_id = fake.PROJECT3_ID - self.context.project_id = self.project_id - self.volume_api = volume_api.API() - - @mock.patch('cinder.volume.api.check_policy') - def test_attachment_create_no_connector(self, mock_policy): - """Test attachment_create no connector.""" - volume_params = {'status': 'available'} - - vref = tests_utils.create_volume(self.context, **volume_params) - aref = self.volume_api.attachment_create(self.context, - vref, - fake.UUID2) - self.assertEqual(fake.UUID2, aref.instance_uuid) - self.assertIsNone(aref.attach_time) - self.assertEqual('reserved', aref.attach_status) - self.assertIsNone(aref.attach_mode) - self.assertEqual(vref.id, aref.volume_id) - self.assertEqual({}, aref.connection_info) - - @mock.patch('cinder.volume.api.check_policy') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') - def test_attachment_create_with_connector(self, - mock_rpc_attachment_update, - mock_policy): - """Test attachment_create with connector.""" - volume_params = {'status': 'available'} - connection_info = {'fake_key': 'fake_value', - 'fake_key2': ['fake_value1', 'fake_value2']} - mock_rpc_attachment_update.return_value = connection_info - - vref = tests_utils.create_volume(self.context, **volume_params) - connector = {'fake': 'connector'} - attachment = self.volume_api.attachment_create(self.context, - vref, - fake.UUID2, - connector) - mock_rpc_attachment_update.assert_called_once_with(self.context, - mock.ANY, - connector, - mock.ANY) - new_attachment = objects.VolumeAttachment.get_by_id(self.context, - attachment.id) - self.assertEqual(connection_info, new_attachment.connection_info) - - @mock.patch('cinder.volume.api.check_policy') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete') - def test_attachment_delete_reserved(self, - mock_rpc_attachment_delete, - mock_policy): - """Test attachment_delete with reserved.""" - volume_params = {'status': 'available'} - - vref = tests_utils.create_volume(self.context, **volume_params) - aref = self.volume_api.attachment_create(self.context, - vref, - fake.UUID2) - aobj = objects.VolumeAttachment.get_by_id(self.context, - aref.id) - self.assertEqual('reserved', aref.attach_status) - self.assertEqual(vref.id, aref.volume_id) - self.volume_api.attachment_delete(self.context, - aobj) - - # Since it's just reserved and never finalized, we should never make an - # rpc call - mock_rpc_attachment_delete.assert_not_called() - - @mock.patch('cinder.volume.api.check_policy') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') - def test_attachment_create_update_and_delete( - self, - mock_rpc_attachment_update, - mock_rpc_attachment_delete, - mock_policy): - """Test attachment_delete.""" - volume_params = {'status': 'available'} - connection_info = {'fake_key': 'fake_value', - 'fake_key2': ['fake_value1', 'fake_value2']} - mock_rpc_attachment_update.return_value = connection_info - - vref = tests_utils.create_volume(self.context, **volume_params) - aref = self.volume_api.attachment_create(self.context, - vref, - fake.UUID2) - aref = objects.VolumeAttachment.get_by_id(self.context, - aref.id) - vref = objects.Volume.get_by_id(self.context, - vref.id) - - connector = {'fake': 'connector'} - self.volume_api.attachment_update(self.context, - aref, - connector) - aref = objects.VolumeAttachment.get_by_id(self.context, - aref.id) - self.assertEqual(connection_info, aref.connection_info) - # We mock the actual call that updates the status - # so force it here - values = {'volume_id': vref.id, - 'volume_host': vref.host, - 'attach_status': 'attached', - 'instance_uuid': fake.UUID2} - aref = db.volume_attach(self.context, values) - - aref = objects.VolumeAttachment.get_by_id(self.context, - aref.id) - self.assertEqual(vref.id, aref.volume_id) - self.volume_api.attachment_delete(self.context, - aref) - - mock_rpc_attachment_delete.assert_called_once_with(self.context, - aref.id, - mock.ANY) - - @mock.patch('cinder.volume.api.check_policy') - def test_additional_attachment_create_no_connector(self, mock_policy): - """Test attachment_create no connector.""" - volume_params = {'status': 'available'} - - vref = tests_utils.create_volume(self.context, **volume_params) - aref = self.volume_api.attachment_create(self.context, - vref, - fake.UUID2) - self.assertEqual(fake.UUID2, aref.instance_uuid) - self.assertIsNone(aref.attach_time) - self.assertEqual('reserved', aref.attach_status) - self.assertIsNone(aref.attach_mode) - self.assertEqual(vref.id, aref.volume_id) - self.assertEqual({}, aref.connection_info) - - self.assertRaises(exception.InvalidVolume, - self.volume_api.attachment_create, - self.context, - vref, - fake.UUID1) - self.volume_api.attachment_create(self.context, - vref, - fake.UUID2) - vref = objects.Volume.get_by_id(self.context, - vref.id) - self.assertEqual(2, len(vref.volume_attachment)) diff --git a/cinder/tests/unit/attachments/test_attachments_manager.py b/cinder/tests/unit/attachments/test_attachments_manager.py deleted file mode 100644 index 4f79b0bf0..000000000 --- a/cinder/tests/unit/attachments/test_attachments_manager.py +++ /dev/null @@ -1,199 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_utils import importutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder.objects import fields -from cinder.objects import volume_attachment -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as tests_utils -from cinder.volume import configuration as conf - -CONF = cfg.CONF - - -class AttachmentManagerTestCase(test.TestCase): - """Attachment related test for volume.manager.py.""" - - def setUp(self): - """Setup test class.""" - super(AttachmentManagerTestCase, self).setUp() - self.manager = importutils.import_object(CONF.volume_manager) - self.configuration = mock.Mock(conf.Configuration) - self.context = context.get_admin_context() - self.context.user_id = fake.USER_ID - self.project_id = fake.PROJECT3_ID - self.context.project_id = self.project_id - self.manager.driver.set_initialized() - self.manager.stats = {'allocated_capacity_gb': 100, - 'pools': {}} - - @mock.patch.object(db, 'volume_admin_metadata_update') - @mock.patch('cinder.message.api.API.create', mock.Mock()) - def test_attachment_update_with_readonly_volume(self, mock_update): - mock_update.return_value = {'readonly': 'True'} - vref = tests_utils.create_volume(self.context, **{'status': - 'available'}) - self.manager.create_volume(self.context, vref) - attachment_ref = db.volume_attach(self.context, - {'volume_id': vref.id, - 'volume_host': vref.host, - 'attach_status': 'reserved', - 'instance_uuid': fake.UUID1}) - - with mock.patch.object(self.manager, - '_notify_about_volume_usage', - return_value=None), mock.patch.object( - self.manager, '_connection_create'): - self.assertRaises(exception.InvalidVolumeAttachMode, - self.manager.attachment_update, - self.context, vref, {}, attachment_ref.id) - attachment = db.volume_attachment_get(self.context, - attachment_ref.id) - self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, - attachment['attach_status']) - - def test_attachment_update(self): - """Test attachment_update.""" - volume_params = {'status': 'available'} - connector = { - "initiator": "iqn.1993-08.org.debian:01:cad181614cec", - "ip": "192.168.1.20", - "platform": "x86_64", - "host": "tempest-1", - "os_type": "linux2", - "multipath": False} - - vref = tests_utils.create_volume(self.context, **volume_params) - self.manager.create_volume(self.context, vref) - values = {'volume_id': vref.id, - 'attached_host': vref.host, - 'attach_status': 'reserved', - 'instance_uuid': fake.UUID1} - attachment_ref = db.volume_attach(self.context, values) - with mock.patch.object( - self.manager, '_notify_about_volume_usage'),\ - mock.patch.object( - self.manager.driver, 'attach_volume') as mock_attach: - expected = { - 'encrypted': False, - 'qos_specs': None, - 'access_mode': 'rw', - 'driver_volume_type': 'iscsi', - 'attachment_id': attachment_ref.id} - - self.assertEqual(expected, - self.manager.attachment_update( - self.context, - vref, - connector, - attachment_ref.id)) - mock_attach.assert_called_once_with(self.context, - vref, - attachment_ref.instance_uuid, - connector['host'], - "na") - - new_attachment_ref = db.volume_attachment_get(self.context, - attachment_ref.id) - self.assertEqual(attachment_ref.instance_uuid, - new_attachment_ref['instance_uuid']) - self.assertEqual(connector['host'], - new_attachment_ref['attached_host']) - self.assertEqual('na', new_attachment_ref['mountpoint']) - self.assertEqual('rw', new_attachment_ref['attach_mode']) - - new_volume_ref = db.volume_get(self.context, vref.id) - self.assertEqual('in-use', new_volume_ref.status) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - new_volume_ref.attach_status) - - def test_attachment_delete(self): - """Test attachment_delete.""" - volume_params = {'status': 'available'} - - vref = tests_utils.create_volume(self.context, **volume_params) - self.manager.create_volume(self.context, vref) - values = {'volume_id': vref.id, - 'volume_host': vref.host, - 'attach_status': 'reserved', - 'instance_uuid': fake.UUID1} - attachment_ref = db.volume_attach(self.context, values) - attachment_ref = db.volume_attachment_get( - self.context, - attachment_ref['id']) - self.manager.attachment_delete(self.context, - attachment_ref['id'], - vref) - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.context, - attachment_ref.id) - - def test_attachment_delete_multiple_attachments(self): - volume_params = {'status': 'available'} - vref = tests_utils.create_volume(self.context, **volume_params) - attachment1 = volume_attachment.VolumeAttachment() - attachment2 = volume_attachment.VolumeAttachment() - - attachment1.id = fake.UUID1 - attachment2.id = fake.UUID2 - - @mock.patch.object(self.manager.db, 'volume_admin_metadata_delete') - @mock.patch.object(self.manager.db, 'volume_detached') - @mock.patch.object(self.context, 'elevated') - @mock.patch.object(self.manager, '_connection_terminate') - @mock.patch.object(self.manager.driver, 'remove_export') - @mock.patch.object(self.manager.driver, 'detach_volume') - def _test(mock_detach, mock_rm_export, mock_con_term, - mock_elevated, mock_db_detached, mock_db_meta_delete): - mock_elevated.return_value = self.context - mock_con_term.return_value = False - - # test single attachment. This should call - # detach and remove_export - vref.volume_attachment.objects.append(attachment1) - - self.manager._do_attachment_delete(self.context, vref, attachment1) - - mock_detach.assert_called_once_with(self.context, vref, - attachment1) - mock_db_detached.called_once_with(self.context, vref, - attachment1.id) - mock_db_meta_delete.called_once_with(self.context, vref.id, - 'attached_mode') - mock_rm_export.assert_called_once_with(self.context, vref) - - # test more than 1 attachment. This should skip - # detach and remove_export - mock_con_term.return_value = True - vref.volume_attachment.objects.append(attachment2) - - mock_detach.reset_mock() - mock_rm_export.reset_mock() - mock_db_detached.reset_mock() - mock_db_meta_delete.reset_mock() - - self.manager._do_attachment_delete(self.context, vref, attachment2) - - mock_rm_export.assert_not_called() - mock_db_detached.called_once_with(self.context, vref, - attachment2.id) - mock_db_meta_delete.called_once_with(self.context, vref.id, - 'attached_mode') - _test() diff --git a/cinder/tests/unit/backup/__init__.py b/cinder/tests/unit/backup/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/backup/drivers/__init__.py b/cinder/tests/unit/backup/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/backup/drivers/test_backup_ceph.py b/cinder/tests/unit/backup/drivers/test_backup_ceph.py deleted file mode 100644 index 74371e574..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_ceph.py +++ /dev/null @@ -1,1271 +0,0 @@ -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Tests for Ceph backup service.""" - -import hashlib -import os -import tempfile -import uuid - -import ddt -import mock -from os_brick.initiator import linuxrbd -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import units -import six -from six.moves import range - -from cinder.backup import driver -from cinder.backup.drivers import ceph -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake - -# This is used to collect raised exceptions so that tests may check what was -# raised. -# NOTE: this must be initialised in test setUp(). -RAISED_EXCEPTIONS = [] - -CONF = cfg.CONF - - -class MockException(Exception): - - def __init__(self, *args, **kwargs): - RAISED_EXCEPTIONS.append(self.__class__) - - -class MockImageNotFoundException(MockException): - """Used as mock for rbd.ImageNotFound.""" - - -class MockImageBusyException(MockException): - """Used as mock for rbd.ImageBusy.""" - - -class MockObjectNotFoundException(MockException): - """Used as mock for rados.MockObjectNotFoundException.""" - - -def common_mocks(f): - """Decorator to set mocks common to all tests. - - The point of doing these mocks here is so that we don't accidentally set - mocks that can't/don't get unset. - """ - def _common_inner_inner1(inst, *args, **kwargs): - # NOTE(dosaboy): mock Popen to, by default, raise Exception in order to - # ensure that any test ending up in a subprocess fails - # if not properly mocked. - @mock.patch('subprocess.Popen', spec=True) - # NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing. - @mock.patch('eventlet.sleep', spec=True) - @mock.patch('time.time', spec=True) - # NOTE(dosaboy): set spec to empty object so that hasattr calls return - # False by default. - @mock.patch('cinder.backup.drivers.ceph.rbd') - @mock.patch('cinder.backup.drivers.ceph.rados') - def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep, - mock_popen): - mock_time.side_effect = inst.time_inc - mock_popen.side_effect = Exception - - inst.mock_rados = mock_rados - inst.mock_rbd = mock_rbd - inst.mock_rbd.ImageBusy = MockImageBusyException - inst.mock_rbd.ImageNotFound = MockImageNotFoundException - inst.mock_rados.ObjectNotFound = MockObjectNotFoundException - - inst.service.rbd = inst.mock_rbd - inst.service.rados = inst.mock_rados - return f(inst, *args, **kwargs) - - return _common_inner_inner2() - - return _common_inner_inner1 - - -@ddt.ddt -class BackupCephTestCase(test.TestCase): - """Test case for ceph backup driver.""" - - def _create_volume_db_entry(self, id, size): - vol = {'id': id, 'size': size, 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, backupid, volid, size, - userid=str(uuid.uuid4()), - projectid=str(uuid.uuid4())): - backup = {'id': backupid, 'size': size, 'volume_id': volid, - 'user_id': userid, 'project_id': projectid} - return db.backup_create(self.ctxt, backup)['id'] - - def time_inc(self): - self.counter += 1 - return self.counter - - def _get_wrapped_rbd_io(self, rbd_image): - rbd_meta = linuxrbd.RBDImageMetadata(rbd_image, 'pool_foo', - 'user_foo', 'conf_foo') - return linuxrbd.RBDVolumeIOWrapper(rbd_meta) - - def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None, - p2hook=None): - - class MockPopen(object): - hooks = [p2hook, p1hook] - - def __init__(mock_inst, cmd, *args, **kwargs): - self.callstack.append('popen_init') - mock_inst.stdout = mock.Mock() - mock_inst.stdout.close = mock.Mock() - mock_inst.stdout.close.side_effect = \ - lambda *args: self.callstack.append('stdout_close') - mock_inst.returncode = 0 - hook = mock_inst.__class__.hooks.pop() - if hook is not None: - hook() - - def communicate(mock_inst): - self.callstack.append('communicate') - return retval - - mock_popen.side_effect = MockPopen - - def setUp(self): - global RAISED_EXCEPTIONS - RAISED_EXCEPTIONS = [] - super(BackupCephTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - # Create volume. - self.volume_size = 1 - self.volume_id = str(uuid.uuid4()) - self._create_volume_db_entry(self.volume_id, self.volume_size) - self.volume = db.volume_get(self.ctxt, self.volume_id) - - # Create backup of volume. - self.backup_id = str(uuid.uuid4()) - self._create_backup_db_entry(self.backup_id, self.volume_id, - self.volume_size) - self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) - self.backup.container = "backups" - - # Create alternate volume. - self.alt_volume_id = str(uuid.uuid4()) - self._create_volume_db_entry(self.alt_volume_id, self.volume_size) - self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id) - - self.chunk_size = 1024 - self.num_chunks = 128 - self.data_length = self.num_chunks * self.chunk_size - self.checksum = hashlib.sha256() - - # Create a file with some data in it. - self.volume_file = tempfile.NamedTemporaryFile() - self.addCleanup(self.volume_file.close) - for _i in range(0, self.num_chunks): - data = os.urandom(self.chunk_size) - self.checksum.update(data) - self.volume_file.write(data) - - self.volume_file.seek(0) - - # Always trigger an exception if a command is executed since it should - # always be dealt with gracefully. At time of writing on rbd - # export/import-diff is executed and if they fail we expect to find - # alternative means of backing up. - mock_exec = mock.Mock() - mock_exec.side_effect = processutils.ProcessExecutionError - - self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec) - - # Ensure that time.time() always returns more than the last time it was - # called to avoid div by zero errors. - self.counter = float(0) - - self.callstack = [] - - @common_mocks - def test_get_rbd_support(self): - del self.service.rbd.RBD_FEATURE_LAYERING - del self.service.rbd.RBD_FEATURE_STRIPINGV2 - del self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK - del self.service.rbd.RBD_FEATURE_JOURNALING - self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING')) - self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2')) - self.assertFalse(hasattr(self.service.rbd, - 'RBD_FEATURE_EXCLUSIVE_LOCK')) - self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_JOURNALING')) - - oldformat, features = self.service._get_rbd_support() - self.assertTrue(oldformat) - self.assertEqual(0, features) - - self.service.rbd.RBD_FEATURE_LAYERING = 1 - - oldformat, features = self.service._get_rbd_support() - self.assertFalse(oldformat) - self.assertEqual(1, features) - - self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2 - - oldformat, features = self.service._get_rbd_support() - self.assertFalse(oldformat) - self.assertEqual(1 | 2, features) - - # initially, backup_ceph_image_journals = False. test that - # the flags are defined, but that they are not returned. - self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK = 4 - - oldformat, features = self.service._get_rbd_support() - self.assertFalse(oldformat) - self.assertEqual(1 | 2, features) - - self.service.rbd.RBD_FEATURE_JOURNALING = 64 - - oldformat, features = self.service._get_rbd_support() - self.assertFalse(oldformat) - self.assertEqual(1 | 2, features) - - # test that the config setting properly sets the FEATURE bits. - # because journaling requires exclusive-lock, these are set - # at the same time. - CONF.set_override("backup_ceph_image_journals", True) - oldformat, features = self.service._get_rbd_support() - self.assertFalse(oldformat) - self.assertEqual(1 | 2 | 4 | 64, features) - - @common_mocks - def test_get_most_recent_snap(self): - last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4()) - - image = self.mock_rbd.Image.return_value - image.list_snaps.return_value = \ - [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())}, - {'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())}, - {'name': last}, - {'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}] - - snap = self.service._get_most_recent_snap(image) - self.assertEqual(last, snap) - - @common_mocks - def test_get_backup_snap_name(self): - snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) - - def get_backup_snaps(inst, *args): - return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()), - 'backup_id': str(uuid.uuid4())}, - {'name': snap_name, - 'backup_id': self.backup_id}] - - with mock.patch.object(self.service, 'get_backup_snaps'): - name = self.service._get_backup_snap_name(self.service.rbd.Image(), - 'base_foo', - self.backup_id) - self.assertIsNone(name) - - with mock.patch.object(self.service, 'get_backup_snaps') as \ - mock_get_backup_snaps: - mock_get_backup_snaps.side_effect = get_backup_snaps - name = self.service._get_backup_snap_name(self.service.rbd.Image(), - 'base_foo', - self.backup_id) - self.assertEqual(snap_name, name) - self.assertTrue(mock_get_backup_snaps.called) - - @common_mocks - def test_get_backup_snaps(self): - image = self.mock_rbd.Image.return_value - image.list_snaps.return_value = [ - {'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())}, - {'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())}, - {'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())}, - {'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())}, - {'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}] - snaps = self.service.get_backup_snaps(image) - self.assertEqual(3, len(snaps)) - - @common_mocks - def test_transfer_data_from_rbd_to_file(self): - def fake_read(offset, length): - self.volume_file.seek(offset) - return self.volume_file.read(length) - - self.mock_rbd.Image.return_value.read.side_effect = fake_read - self.mock_rbd.Image.return_value.size.return_value = self.data_length - - with tempfile.NamedTemporaryFile() as test_file: - self.volume_file.seek(0) - - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - self.service._transfer_data(rbd_io, 'src_foo', test_file, - 'dest_foo', self.data_length) - - checksum = hashlib.sha256() - test_file.seek(0) - for _c in range(0, self.num_chunks): - checksum.update(test_file.read(self.chunk_size)) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - @common_mocks - def test_transfer_data_from_rbd_to_rbd(self): - def fake_read(offset, length): - self.volume_file.seek(offset) - return self.volume_file.read(length) - - def mock_write_data(data, offset): - checksum.update(data) - test_file.write(data) - - rbd1 = mock.Mock() - rbd1.read.side_effect = fake_read - rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size - - rbd2 = mock.Mock() - rbd2.write.side_effect = mock_write_data - - with tempfile.NamedTemporaryFile() as test_file: - self.volume_file.seek(0) - checksum = hashlib.sha256() - - src_rbd_io = self._get_wrapped_rbd_io(rbd1) - dest_rbd_io = self._get_wrapped_rbd_io(rbd2) - self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io, - 'dest_foo', self.data_length) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - @common_mocks - def test_transfer_data_from_file_to_rbd(self): - - def mock_write_data(data, offset): - checksum.update(data) - test_file.write(data) - - self.mock_rbd.Image.return_value.write.side_effect = mock_write_data - - with tempfile.NamedTemporaryFile() as test_file: - self.volume_file.seek(0) - checksum = hashlib.sha256() - - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - self.service._transfer_data(self.volume_file, 'src_foo', - rbd_io, 'dest_foo', self.data_length) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - @common_mocks - def test_transfer_data_from_file_to_file(self): - with tempfile.NamedTemporaryFile() as test_file: - self.volume_file.seek(0) - checksum = hashlib.sha256() - - self.service._transfer_data(self.volume_file, 'src_foo', test_file, - 'dest_foo', self.data_length) - - checksum = hashlib.sha256() - test_file.seek(0) - for _c in range(0, self.num_chunks): - checksum.update(test_file.read(self.chunk_size)) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - @common_mocks - def test_backup_volume_from_file(self): - checksum = hashlib.sha256() - - def mock_write_data(data, offset): - checksum.update(data) - test_file.write(data) - - self.service.rbd.Image.return_value.write.side_effect = mock_write_data - - with mock.patch.object(self.service, '_backup_metadata'): - with mock.patch.object(self.service, '_discard_bytes'): - with tempfile.NamedTemporaryFile() as test_file: - self.service.backup(self.backup, self.volume_file) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - self.assertTrue(self.service.rbd.Image.return_value.write.called) - - @common_mocks - def test_get_backup_base_name(self): - name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) - self.assertEqual("volume-%s.backup.base" % (self.volume_id), name) - - self.assertRaises(exception.InvalidParameterValue, - self.service._get_backup_base_name, - self.volume_id) - - name = self.service._get_backup_base_name(self.volume_id, '1234') - self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'), - name) - - @common_mocks - @mock.patch('fcntl.fcntl', spec=True) - @mock.patch('subprocess.Popen', spec=True) - def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl): - backup_name = self.service._get_backup_base_name(self.backup_id, - diff_format=True) - - def mock_write_data(): - self.volume_file.seek(0) - data = self.volume_file.read(self.data_length) - self.callstack.append('write') - checksum.update(data) - test_file.write(data) - - def mock_read_data(): - self.callstack.append('read') - return self.volume_file.read(self.data_length) - - self._setup_mock_popen(mock_popen, - ['out', 'err'], - p1hook=mock_read_data, - p2hook=mock_write_data) - - self.mock_rbd.RBD.list = mock.Mock() - self.mock_rbd.RBD.list.return_value = [backup_name] - - with mock.patch.object(self.service, '_backup_metadata'): - with mock.patch.object(self.service, 'get_backup_snaps') as \ - mock_get_backup_snaps: - with mock.patch.object(self.service, '_full_backup') as \ - mock_full_backup: - with mock.patch.object(self.service, - '_try_delete_base_image'): - with tempfile.NamedTemporaryFile() as test_file: - checksum = hashlib.sha256() - image = self.service.rbd.Image() - meta = linuxrbd.RBDImageMetadata(image, - 'pool_foo', - 'user_foo', - 'conf_foo') - rbdio = linuxrbd.RBDVolumeIOWrapper(meta) - self.service.backup(self.backup, rbdio) - - self.assertEqual(['popen_init', - 'read', - 'popen_init', - 'write', - 'stdout_close', - 'communicate'], self.callstack) - - self.assertFalse(mock_full_backup.called) - self.assertTrue(mock_get_backup_snaps.called) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), - self.checksum.digest()) - - @common_mocks - @mock.patch('fcntl.fcntl', spec=True) - @mock.patch('subprocess.Popen', spec=True) - def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl): - """Test of when an exception occurs in an exception handler. - - In _backup_rbd(), after an exception.BackupRBDOperationFailed - occurs in self._rbd_diff_transfer(), we want to check the - process when the second exception occurs in - self._try_delete_base_image(). - """ - backup_name = self.service._get_backup_base_name(self.backup_id, - diff_format=True) - - def mock_write_data(): - self.volume_file.seek(0) - data = self.volume_file.read(self.data_length) - self.callstack.append('write') - checksum.update(data) - test_file.write(data) - - def mock_read_data(): - self.callstack.append('read') - return self.volume_file.read(self.data_length) - - self._setup_mock_popen(mock_popen, - ['out', 'err'], - p1hook=mock_read_data, - p2hook=mock_write_data) - - self.mock_rbd.RBD.list = mock.Mock() - self.mock_rbd.RBD.list.return_value = [backup_name] - - with mock.patch.object(self.service, 'get_backup_snaps'), \ - mock.patch.object(self.service, '_rbd_diff_transfer') as \ - mock_rbd_diff_transfer: - def mock_rbd_diff_transfer_side_effect(src_name, src_pool, - dest_name, dest_pool, - src_user, src_conf, - dest_user, dest_conf, - src_snap, from_snap): - raise exception.BackupRBDOperationFailed(_('mock')) - - # Raise a pseudo exception.BackupRBDOperationFailed. - mock_rbd_diff_transfer.side_effect \ - = mock_rbd_diff_transfer_side_effect - - with mock.patch.object(self.service, '_full_backup'), \ - mock.patch.object(self.service, - '_try_delete_base_image') as \ - mock_try_delete_base_image: - def mock_try_delete_base_image_side_effect(backup_id, - base_name): - raise self.service.rbd.ImageNotFound(_('mock')) - - # Raise a pesudo exception rbd.ImageNotFound. - mock_try_delete_base_image.side_effect \ - = mock_try_delete_base_image_side_effect - with mock.patch.object(self.service, '_backup_metadata'): - with tempfile.NamedTemporaryFile() as test_file: - checksum = hashlib.sha256() - image = self.service.rbd.Image() - meta = linuxrbd.RBDImageMetadata(image, - 'pool_foo', - 'user_foo', - 'conf_foo') - rbdio = linuxrbd.RBDVolumeIOWrapper(meta) - - # We expect that the second exception is - # notified. - self.assertRaises( - self.service.rbd.ImageNotFound, - self.service.backup, - self.backup, rbdio) - - @common_mocks - @mock.patch('fcntl.fcntl', spec=True) - @mock.patch('subprocess.Popen', spec=True) - def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl): - """Test of when an exception occurs in an exception handler. - - In backup(), after an exception.BackupOperationError occurs in - self._backup_metadata(), we want to check the process when the - second exception occurs in self.delete_backup(). - """ - backup_name = self.service._get_backup_base_name(self.backup_id, - diff_format=True) - - def mock_write_data(): - self.volume_file.seek(0) - data = self.volume_file.read(self.data_length) - self.callstack.append('write') - checksum.update(data) - test_file.write(data) - - def mock_read_data(): - self.callstack.append('read') - return self.volume_file.read(self.data_length) - - self._setup_mock_popen(mock_popen, - ['out', 'err'], - p1hook=mock_read_data, - p2hook=mock_write_data) - - self.mock_rbd.RBD.list = mock.Mock() - self.mock_rbd.RBD.list.return_value = [backup_name] - - with mock.patch.object(self.service, 'get_backup_snaps'), \ - mock.patch.object(self.service, '_rbd_diff_transfer'), \ - mock.patch.object(self.service, '_full_backup'), \ - mock.patch.object(self.service, '_backup_metadata') as \ - mock_backup_metadata: - - def mock_backup_metadata_side_effect(backup): - raise exception.BackupOperationError(_('mock')) - - # Raise a pseudo exception.BackupOperationError. - mock_backup_metadata.side_effect = mock_backup_metadata_side_effect - with mock.patch.object(self.service, 'delete_backup') as \ - mock_delete: - def mock_delete_side_effect(backup): - raise self.service.rbd.ImageBusy() - - # Raise a pseudo exception rbd.ImageBusy. - mock_delete.side_effect = mock_delete_side_effect - with tempfile.NamedTemporaryFile() as test_file: - checksum = hashlib.sha256() - image = self.service.rbd.Image() - meta = linuxrbd.RBDImageMetadata(image, - 'pool_foo', - 'user_foo', - 'conf_foo') - rbdio = linuxrbd.RBDVolumeIOWrapper(meta) - - # We expect that the second exception is - # notified. - self.assertRaises( - self.service.rbd.ImageBusy, - self.service.backup, - self.backup, rbdio) - - @common_mocks - def test_backup_vol_length_0(self): - volume_id = fake.VOLUME_ID - self._create_volume_db_entry(volume_id, 0) - backup_id = fake.BACKUP_ID - self._create_backup_db_entry(backup_id, volume_id, 1) - backup = objects.Backup.get_by_id(self.ctxt, backup_id) - - self.assertRaises(exception.InvalidParameterValue, self.service.backup, - backup, self.volume_file) - - @common_mocks - def test_backup_with_container_name(self): - volume_size = self.volume_size * units.Gi - backup_id = fake.BACKUP_ID - self._create_backup_db_entry(backup_id, self.volume_id, 1) - backup = objects.Backup.get_by_id(self.ctxt, backup_id) - backup.container = "test" - with mock.patch.object( - self.service, '_full_backup', - side_effect=exception.BackupOperationError()) as mock_full: - self.assertRaises(exception.BackupOperationError, - self.service.backup, backup, self.volume_file) - mock_full.assert_called_once_with(backup, self.volume_file, - self.volume.name, volume_size) - - @common_mocks - def test_restore(self): - backup_name = self.service._get_backup_base_name(self.backup_id, - diff_format=True) - - self.mock_rbd.RBD.return_value.list.return_value = [backup_name] - - def mock_read_data(offset, length): - return self.volume_file.read(self.data_length) - - self.mock_rbd.Image.return_value.read.side_effect = mock_read_data - - self.mock_rbd.Image.return_value.size.return_value = \ - self.chunk_size * self.num_chunks - - with mock.patch.object(self.service, '_restore_metadata') as \ - mock_restore_metadata: - with mock.patch.object(self.service, '_discard_bytes') as \ - mock_discard_bytes: - with tempfile.NamedTemporaryFile() as test_file: - self.volume_file.seek(0) - - self.service.restore(self.backup, self.volume_id, - test_file) - - checksum = hashlib.sha256() - test_file.seek(0) - for _c in range(0, self.num_chunks): - checksum.update(test_file.read(self.chunk_size)) - - # Ensure the files are equal - self.assertEqual(checksum.digest(), self.checksum.digest()) - - self.assertTrue(mock_restore_metadata.called) - self.assertTrue(mock_discard_bytes.called) - self.assertTrue(mock_discard_bytes.called) - - self.assertTrue(self.service.rbd.Image.return_value.read.called) - - @common_mocks - def test_discard_bytes(self): - # Lower the chunksize to a memory manageable number - self.service.chunk_size = 1024 - image = self.mock_rbd.Image.return_value - wrapped_rbd = self._get_wrapped_rbd_io(image) - - self.service._discard_bytes(wrapped_rbd, 0, 0) - self.assertEqual(0, image.discard.call_count) - - self.service._discard_bytes(wrapped_rbd, 0, 1234) - self.assertEqual(1, image.discard.call_count) - image.reset_mock() - - # Test discard with no remainder - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = False - - self.service._discard_bytes(wrapped_rbd, 0, - self.service.chunk_size * 2) - - self.assertEqual(2, image.write.call_count) - self.assertEqual(2, image.flush.call_count) - self.assertFalse(image.discard.called) - zeroes = '\0' * self.service.chunk_size - image.write.assert_has_calls([mock.call(zeroes, 0), - mock.call(zeroes, self.chunk_size)]) - - image.reset_mock() - image.write.reset_mock() - - # Now test with a remainder. - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = False - - self.service._discard_bytes(wrapped_rbd, 0, - (self.service.chunk_size * 2) + 1) - - self.assertEqual(3, image.write.call_count) - self.assertEqual(3, image.flush.call_count) - self.assertFalse(image.discard.called) - image.write.assert_has_calls([mock.call(zeroes, - self.chunk_size * 2), - mock.call(zeroes, - self.chunk_size * 3), - mock.call('\0', - self.chunk_size * 4)]) - - @common_mocks - def test_delete_backup_snapshot(self): - snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) - base_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) - self.mock_rbd.RBD.remove_snap = mock.Mock() - - with mock.patch.object(self.service, '_get_backup_snap_name') as \ - mock_get_backup_snap_name: - mock_get_backup_snap_name.return_value = snap_name - with mock.patch.object(self.service, 'get_backup_snaps') as \ - mock_get_backup_snaps: - mock_get_backup_snaps.return_value = None - rem = self.service._delete_backup_snapshot(self.mock_rados, - base_name, - self.backup_id) - - self.assertTrue(mock_get_backup_snap_name.called) - self.assertTrue(mock_get_backup_snaps.called) - self.assertEqual((snap_name, 0), rem) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_try_delete_base_image_diff_format(self, mock_meta_backup): - backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) - - self.mock_rbd.RBD.return_value.list.return_value = [backup_name] - - with mock.patch.object(self.service, '_delete_backup_snapshot') as \ - mock_del_backup_snap: - snap_name = self.service._get_new_snap_name(self.backup_id) - mock_del_backup_snap.return_value = (snap_name, 0) - - self.service.delete_backup(self.backup) - self.assertTrue(mock_del_backup_snap.called) - - self.assertTrue(self.mock_rbd.RBD.return_value.list.called) - self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_try_delete_base_image(self, mock_meta_backup): - backup_name = self.service._get_backup_base_name(self.volume_id, - self.backup_id) - - self.mock_rbd.RBD.return_value.list.return_value = [backup_name] - - with mock.patch.object(self.service, 'get_backup_snaps'): - self.service.delete_backup(self.backup) - self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) - - @common_mocks - def test_try_delete_base_image_busy(self): - """This should induce retries then raise rbd.ImageBusy.""" - backup_name = self.service._get_backup_base_name(self.volume_id, - self.backup_id) - - rbd = self.mock_rbd.RBD.return_value - rbd.list.return_value = [backup_name] - rbd.remove.side_effect = self.mock_rbd.ImageBusy - - with mock.patch.object(self.service, 'get_backup_snaps') as \ - mock_get_backup_snaps: - self.assertRaises(self.mock_rbd.ImageBusy, - self.service._try_delete_base_image, - self.backup) - self.assertTrue(mock_get_backup_snaps.called) - - self.assertTrue(rbd.list.called) - self.assertTrue(rbd.remove.called) - self.assertIn(MockImageBusyException, RAISED_EXCEPTIONS) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_delete(self, mock_meta_backup): - with mock.patch.object(self.service, '_try_delete_base_image'): - self.service.delete_backup(self.backup) - self.assertEqual([], RAISED_EXCEPTIONS) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_delete_image_not_found(self, mock_meta_backup): - with mock.patch.object(self.service, '_try_delete_base_image') as \ - mock_del_base: - mock_del_base.side_effect = self.mock_rbd.ImageNotFound - # ImageNotFound exception is caught so that db entry can be cleared - self.service.delete_backup(self.backup) - self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_delete_pool_not_found(self, mock_meta_backup): - with mock.patch.object( - self.service, '_try_delete_base_image') as mock_del_base: - mock_del_base.side_effect = self.mock_rados.ObjectNotFound - # ObjectNotFound exception is caught so that db entry can be - # cleared - self.service.delete_backup(self.backup) - self.assertEqual([MockObjectNotFoundException], - RAISED_EXCEPTIONS) - mock_del_base.assert_called_once_with(self.backup) - mock_meta_backup.return_value.remove_if_exists.assert_not_called() - - @common_mocks - def test_diff_restore_allowed_with_image_not_exists(self): - """Test diff restore not allowed when backup not diff-format.""" - not_allowed = (False, None) - backup_base = 'backup.base' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_different = [backup_base, self.backup, self.alt_volume, - rbd_io, self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (False, backup_base) - - resp = self.service._diff_restore_allowed(*args_vols_different) - - self.assertEqual(not_allowed, resp) - mock_rbd_image_exists.assert_called_once_with( - backup_base, - self.backup['volume_id'], - self.mock_rados) - - @common_mocks - def test_diff_restore_allowed_with_no_restore_point(self): - """Test diff restore not allowed when no restore point found. - - Detail conditions: - 1. backup base is diff-format - 2. restore point does not exist - """ - not_allowed = (False, None) - backup_base = 'backup.base' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_different = [backup_base, self.backup, self.alt_volume, - rbd_io, self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (True, backup_base) - with mock.patch.object(self.service, '_get_restore_point') as \ - mock_get_restore_point: - mock_get_restore_point.return_value = None - - args = args_vols_different - resp = self.service._diff_restore_allowed(*args) - - self.assertEqual(not_allowed, resp) - self.assertTrue(mock_rbd_image_exists.called) - mock_get_restore_point.assert_called_once_with( - backup_base, - self.backup['id']) - - @common_mocks - def test_diff_restore_allowed_with_not_rbd(self): - """Test diff restore not allowed when destination volume is not rbd. - - Detail conditions: - 1. backup base is diff-format - 2. restore point exists - 3. destination volume is not an rbd. - """ - backup_base = 'backup.base' - restore_point = 'backup.snap.1' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_different = [backup_base, self.backup, self.alt_volume, - rbd_io, self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (True, backup_base) - with mock.patch.object(self.service, '_get_restore_point') as \ - mock_get_restore_point: - mock_get_restore_point.return_value = restore_point - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = False - - args = args_vols_different - resp = self.service._diff_restore_allowed(*args) - - self.assertEqual((False, restore_point), resp) - self.assertTrue(mock_rbd_image_exists.called) - self.assertTrue(mock_get_restore_point.called) - mock_file_is_rbd.assert_called_once_with( - rbd_io) - - @common_mocks - def test_diff_restore_allowed_with_same_volume(self): - """Test diff restore not allowed when volumes are same. - - Detail conditions: - 1. backup base is diff-format - 2. restore point exists - 3. destination volume is an rbd - 4. source and destination volumes are the same - """ - backup_base = 'backup.base' - restore_point = 'backup.snap.1' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_same = [backup_base, self.backup, self.volume, rbd_io, - self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (True, backup_base) - with mock.patch.object(self.service, '_get_restore_point') as \ - mock_get_restore_point: - mock_get_restore_point.return_value = restore_point - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = True - - resp = self.service._diff_restore_allowed(*args_vols_same) - - self.assertEqual((False, restore_point), resp) - self.assertTrue(mock_rbd_image_exists.called) - self.assertTrue(mock_get_restore_point.called) - self.assertTrue(mock_file_is_rbd.called) - - @common_mocks - def test_diff_restore_allowed_with_has_extents(self): - """Test diff restore not allowed when destination volume has data. - - Detail conditions: - 1. backup base is diff-format - 2. restore point exists - 3. destination volume is an rbd - 4. source and destination volumes are different - 5. destination volume has data on it - full copy is mandated - """ - backup_base = 'backup.base' - restore_point = 'backup.snap.1' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_different = [backup_base, self.backup, self.alt_volume, - rbd_io, self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (True, backup_base) - with mock.patch.object(self.service, '_get_restore_point') as \ - mock_get_restore_point: - mock_get_restore_point.return_value = restore_point - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = True - with mock.patch.object(self.service, '_rbd_has_extents') \ - as mock_rbd_has_extents: - mock_rbd_has_extents.return_value = True - - args = args_vols_different - resp = self.service._diff_restore_allowed(*args) - - self.assertEqual((False, restore_point), resp) - self.assertTrue(mock_rbd_image_exists.called) - self.assertTrue(mock_get_restore_point.called) - self.assertTrue(mock_file_is_rbd.called) - mock_rbd_has_extents.assert_called_once_with( - rbd_io.rbd_image) - - @common_mocks - def test_diff_restore_allowed_with_no_extents(self): - """Test diff restore allowed when no data in destination volume. - - Detail conditions: - 1. backup base is diff-format - 2. restore point exists - 3. destination volume is an rbd - 4. source and destination volumes are different - 5. destination volume no data on it - """ - backup_base = 'backup.base' - restore_point = 'backup.snap.1' - rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) - args_vols_different = [backup_base, self.backup, self.alt_volume, - rbd_io, self.mock_rados] - - with mock.patch.object(self.service, '_rbd_image_exists') as \ - mock_rbd_image_exists: - mock_rbd_image_exists.return_value = (True, backup_base) - with mock.patch.object(self.service, '_get_restore_point') as \ - mock_get_restore_point: - mock_get_restore_point.return_value = restore_point - with mock.patch.object(self.service, '_file_is_rbd') as \ - mock_file_is_rbd: - mock_file_is_rbd.return_value = True - with mock.patch.object(self.service, '_rbd_has_extents') \ - as mock_rbd_has_extents: - mock_rbd_has_extents.return_value = False - - args = args_vols_different - resp = self.service._diff_restore_allowed(*args) - - self.assertEqual((True, restore_point), resp) - self.assertTrue(mock_rbd_image_exists.called) - self.assertTrue(mock_get_restore_point.called) - self.assertTrue(mock_file_is_rbd.called) - self.assertTrue(mock_rbd_has_extents.called) - - @common_mocks - @mock.patch('fcntl.fcntl', spec=True) - @mock.patch('subprocess.Popen', spec=True) - def test_piped_execute(self, mock_popen, mock_fcntl): - mock_fcntl.return_value = 0 - self._setup_mock_popen(mock_popen, ['out', 'err']) - self.service._piped_execute(['foo'], ['bar']) - self.assertEqual(['popen_init', 'popen_init', - 'stdout_close', 'communicate'], self.callstack) - - @common_mocks - def test_restore_metdata(self): - version = 2 - - def mock_read(*args): - base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META - glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META - return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, - glance_tag: {'image_name': 'image.glance'}, - 'version': version}) - - self.mock_rados.Object.return_value.read.side_effect = mock_read - - self.service._restore_metadata(self.backup, self.volume_id) - - self.assertTrue(self.mock_rados.Object.return_value.stat.called) - self.assertTrue(self.mock_rados.Object.return_value.read.called) - - version = 3 - try: - self.service._restore_metadata(self.backup, self.volume_id) - except exception.BackupOperationError as exc: - msg = _("Metadata restore failed due to incompatible version") - self.assertEqual(msg, six.text_type(exc)) - else: - # Force a test failure - self.assertFalse(True) - - @common_mocks - @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) - def test_backup_metadata_already_exists(self, mock_meta_backup): - - def mock_set(json_meta): - msg = (_("Metadata backup object '%s' already exists") % - ("backup.%s.meta" % (self.backup_id))) - raise exception.VolumeMetadataBackupExists(msg) - - mock_meta_backup.return_value.set = mock.Mock() - mock_meta_backup.return_value.set.side_effect = mock_set - - with mock.patch.object(self.service, 'get_metadata') as \ - mock_get_metadata: - mock_get_metadata.return_value = "some.json.metadata" - try: - self.service._backup_metadata(self.backup) - except exception.BackupOperationError as e: - msg = (_("Failed to backup volume metadata - Metadata backup " - "object 'backup.%s.meta' already exists") % - (self.backup_id)) - self.assertEqual(msg, six.text_type(e)) - else: - # Make the test fail - self.assertFalse(True) - - self.assertFalse(mock_meta_backup.set.called) - - @common_mocks - def test_backup_metadata_error(self): - """Ensure that delete_backup() is called if the metadata backup fails. - - Also ensure that the exception is propagated to the caller. - """ - with mock.patch.object(self.service, '_backup_metadata') as \ - mock_backup_metadata: - mock_backup_metadata.side_effect = exception.BackupOperationError - with mock.patch.object(self.service, '_get_volume_size_gb'): - with mock.patch.object(self.service, '_file_is_rbd', - return_value=False): - with mock.patch.object(self.service, '_full_backup'): - with mock.patch.object(self.service, 'delete_backup') as \ - mock_delete: - self.assertRaises(exception.BackupOperationError, - self.service.backup, self.backup, - mock.Mock(), - backup_metadata=True) - self.assertTrue(mock_delete.called) - - @common_mocks - def test_restore_invalid_metadata_version(self): - - def mock_read(*args): - base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META - glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META - return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, - glance_tag: {'image_name': 'image.glance'}, - 'version': 3}) - - self.mock_rados.Object.return_value.read.side_effect = mock_read - with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \ - mock_exists: - mock_exists.return_value = True - - self.assertRaises(exception.BackupOperationError, - self.service._restore_metadata, - self.backup, self.volume_id) - - self.assertTrue(mock_exists.called) - - self.assertTrue(self.mock_rados.Object.return_value.read.called) - - @ddt.data((None, False), - ([{'name': 'test'}], False), - ([{'name': 'test'}, {'name': 'fake'}], True)) - @ddt.unpack - @common_mocks - def test__snap_exists(self, snapshots, snap_exist): - client = mock.Mock() - with mock.patch.object(self.service.rbd.Image(), - 'list_snaps') as snaps: - snaps.return_value = snapshots - exist = self.service._snap_exists(None, 'fake', client) - self.assertEqual(snap_exist, exist) - - -def common_meta_backup_mocks(f): - """Decorator to set mocks common to all metadata backup tests. - - The point of doing these mocks here is so that we don't accidentally set - mocks that can't/don't get unset. - """ - def _common_inner_inner1(inst, *args, **kwargs): - @mock.patch('cinder.backup.drivers.ceph.rbd') - @mock.patch('cinder.backup.drivers.ceph.rados') - def _common_inner_inner2(mock_rados, mock_rbd): - inst.mock_rados = mock_rados - inst.mock_rbd = mock_rbd - inst.mock_rados.ObjectNotFound = MockObjectNotFoundException - return f(inst, *args, **kwargs) - - return _common_inner_inner2() - return _common_inner_inner1 - - -class VolumeMetadataBackupTestCase(test.TestCase): - - def setUp(self): - global RAISED_EXCEPTIONS - RAISED_EXCEPTIONS = [] - super(VolumeMetadataBackupTestCase, self).setUp() - self.backup_id = str(uuid.uuid4()) - self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id) - - @common_meta_backup_mocks - def test_name(self): - self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name) - - @common_meta_backup_mocks - def test_exists(self): - # True - self.assertTrue(self.mb.exists) - self.assertTrue(self.mock_rados.Object.return_value.stat.called) - self.mock_rados.Object.return_value.reset_mock() - - # False - self.mock_rados.Object.return_value.stat.side_effect = ( - self.mock_rados.ObjectNotFound) - self.assertFalse(self.mb.exists) - self.assertTrue(self.mock_rados.Object.return_value.stat.called) - self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) - - @common_meta_backup_mocks - def test_set(self): - obj_data = [] - called = [] - - def mock_read(*args): - called.append('read') - self.assertEqual(1, len(obj_data)) - return obj_data[0] - - def _mock_write(data): - obj_data.append(data) - called.append('write') - - self.mb.get = mock.Mock() - self.mb.get.side_effect = mock_read - - with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write: - mock_write.side_effect = _mock_write - - self.mb.set({'foo': 'bar'}) - self.assertEqual({'foo': 'bar'}, self.mb.get()) - self.assertTrue(self.mb.get.called) - - self.mb._exists = mock.Mock() - self.mb._exists.return_value = True - - # use the unmocked set() method. - self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set, - {'doo': 'dah'}) - - # check the meta obj state has not changed. - self.assertEqual({'foo': 'bar'}, self.mb.get()) - - self.assertEqual(['write', 'read', 'read'], called) - - @common_meta_backup_mocks - def test_get(self): - self.mock_rados.Object.return_value.stat.side_effect = ( - self.mock_rados.ObjectNotFound) - self.mock_rados.Object.return_value.read.return_value = 'meta' - self.assertIsNone(self.mb.get()) - self.mock_rados.Object.return_value.stat.side_effect = None - self.assertEqual('meta', self.mb.get()) - - @common_meta_backup_mocks - def remove_if_exists(self): - with mock.patch.object(self.mock_rados.Object, 'remove') as \ - mock_remove: - mock_remove.side_effect = self.mock_rados.ObjectNotFound - self.mb.remove_if_exists() - self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) - - self.mock_rados.Object.remove.side_effect = None - self.mb.remove_if_exists() - self.assertEqual([], RAISED_EXCEPTIONS) diff --git a/cinder/tests/unit/backup/drivers/test_backup_driver_base.py b/cinder/tests/unit/backup/drivers/test_backup_driver_base.py deleted file mode 100644 index 255830d9a..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_driver_base.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Tests for the backup service base driver. """ - -import uuid - -import mock -from oslo_serialization import jsonutils - -from cinder.backup import driver -from cinder import context -from cinder import db -from cinder import exception -from cinder import keymgr as key_manager -from cinder import objects -from cinder import test -from cinder.tests.unit.backup import fake_service -from cinder.volume import volume_types - -_backup_db_fields = ['id', 'user_id', 'project_id', - 'volume_id', 'host', 'availability_zone', - 'display_name', 'display_description', - 'container', 'status', 'fail_reason', - 'service_metadata', 'service', 'size', - 'object_count'] - - -class BackupBaseDriverTestCase(test.TestCase): - - def _create_volume_db_entry(self, id, size): - vol = {'id': id, 'size': size, 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, backupid, volid, size, - userid=str(uuid.uuid4()), - projectid=str(uuid.uuid4())): - backup = {'id': backupid, 'size': size, 'volume_id': volid, - 'user_id': userid, 'project_id': projectid} - return db.backup_create(self.ctxt, backup)['id'] - - def setUp(self): - super(BackupBaseDriverTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - self.volume_id = str(uuid.uuid4()) - self.backup_id = str(uuid.uuid4()) - - self._create_backup_db_entry(self.backup_id, self.volume_id, 1) - self._create_volume_db_entry(self.volume_id, 1) - self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) - self.driver = fake_service.FakeBackupService(self.ctxt) - - def test_get_metadata(self): - json_metadata = self.driver.get_metadata(self.volume_id) - metadata = jsonutils.loads(json_metadata) - self.assertEqual(2, metadata['version']) - - def test_put_metadata(self): - metadata = {'version': 1} - self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata)) - - def test_get_put_metadata(self): - json_metadata = self.driver.get_metadata(self.volume_id) - self.driver.put_metadata(self.volume_id, json_metadata) - - def test_export_record(self): - export_record = self.driver.export_record(self.backup) - self.assertDictEqual({}, export_record) - - def test_import_record(self): - export_record = {'key1': 'value1'} - self.assertIsNone(self.driver.import_record(self.backup, - export_record)) - - -class BackupMetadataAPITestCase(test.TestCase): - - def _create_volume_db_entry(self, id, size, display_name, - display_description): - vol = {'id': id, 'size': size, 'status': 'available', - 'display_name': display_name, - 'display_description': display_description} - return db.volume_create(self.ctxt, vol)['id'] - - def setUp(self): - super(BackupMetadataAPITestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.volume_id = str(uuid.uuid4()) - self.backup_id = str(uuid.uuid4()) - self.volume_display_name = 'vol-1' - self.volume_display_description = 'test vol' - self._create_volume_db_entry(self.volume_id, 1, - self.volume_display_name, - self.volume_display_description) - self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt) - - def _add_metadata(self, vol_meta=False, vol_glance_meta=False): - if vol_meta: - # Add some VolumeMetadata - db.volume_metadata_update(self.ctxt, self.volume_id, - {'fee': 'fi'}, False) - db.volume_metadata_update(self.ctxt, self.volume_id, - {'fo': 'fum'}, False) - - if vol_glance_meta: - # Add some GlanceMetadata - db.volume_glance_metadata_create(self.ctxt, self.volume_id, - 'disk_format', 'bare') - db.volume_glance_metadata_create(self.ctxt, self.volume_id, - 'container_type', 'ovf') - - def test_get(self): - # Volume won't have anything other than base by default - meta = self.bak_meta_api.get(self.volume_id) - s1 = set(jsonutils.loads(meta).keys()) - s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META] - self.assertEqual(set(), s1.symmetric_difference(s2)) - - self._add_metadata(vol_glance_meta=True) - - meta = self.bak_meta_api.get(self.volume_id) - s1 = set(jsonutils.loads(meta).keys()) - s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] - self.assertEqual(set(), s1.symmetric_difference(s2)) - - self._add_metadata(vol_meta=True) - - meta = self.bak_meta_api.get(self.volume_id) - s1 = set(jsonutils.loads(meta).keys()) - s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META, - self.bak_meta_api.TYPE_TAG_VOL_META] - self.assertEqual(set(), s1.symmetric_difference(s2)) - - def test_put(self): - meta = self.bak_meta_api.get(self.volume_id) - self.bak_meta_api.put(self.volume_id, meta) - - self._add_metadata(vol_glance_meta=True) - meta = self.bak_meta_api.get(self.volume_id) - self.bak_meta_api.put(self.volume_id, meta) - - self._add_metadata(vol_meta=True) - meta = self.bak_meta_api.get(self.volume_id) - self.bak_meta_api.put(self.volume_id, meta) - - def test_put_invalid_version(self): - container = jsonutils.dumps({'version': 3}) - self.assertRaises(exception.BackupMetadataUnsupportedVersion, - self.bak_meta_api.put, self.volume_id, container) - - def test_v1_restore_factory(self): - fact = self.bak_meta_api._v1_restore_factory() - - keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, - self.bak_meta_api.TYPE_TAG_VOL_META, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] - - self.assertEqual(set([]), - set(keys).symmetric_difference(set(fact.keys()))) - - meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: - {'display_name': 'my-backed-up-volume', - 'display_description': 'backed up description'}, - self.bak_meta_api.TYPE_TAG_VOL_META: {}, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} - - # Emulate restore to new volume - volume_id = str(uuid.uuid4()) - vol_name = 'restore_backup_%s' % (self.backup_id) - self._create_volume_db_entry(volume_id, 1, vol_name, 'fake volume') - - for f in fact: - func = fact[f][0] - fields = fact[f][1] - func(meta_container[f], volume_id, fields) - - vol = db.volume_get(self.ctxt, volume_id) - self.assertEqual('my-backed-up-volume', vol['display_name']) - self.assertEqual('backed up description', vol['display_description']) - - def test_v1_restore_factory_no_restore_name(self): - fact = self.bak_meta_api._v1_restore_factory() - - keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, - self.bak_meta_api.TYPE_TAG_VOL_META, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] - - self.assertEqual(set([]), - set(keys).symmetric_difference(set(fact.keys()))) - - meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: - {'display_name': 'my-backed-up-volume', - 'display_description': 'backed up description'}, - self.bak_meta_api.TYPE_TAG_VOL_META: {}, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} - for f in fact: - func = fact[f][0] - fields = fact[f][1] - func(meta_container[f], self.volume_id, fields) - - vol = db.volume_get(self.ctxt, self.volume_id) - self.assertEqual(self.volume_display_name, vol['display_name']) - self.assertEqual(self.volume_display_description, - vol['display_description']) - - def test_v2_restore_factory(self): - fact = self.bak_meta_api._v2_restore_factory() - - keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, - self.bak_meta_api.TYPE_TAG_VOL_META, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] - - self.assertEqual(set([]), - set(keys).symmetric_difference(set(fact.keys()))) - - volume_types.create(self.ctxt, 'faketype') - vol_type = volume_types.get_volume_type_by_name(self.ctxt, 'faketype') - - meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: - {'encryption_key_id': '123', - 'volume_type_id': vol_type.get('id'), - 'display_name': 'vol-2', - 'display_description': 'description'}, - self.bak_meta_api.TYPE_TAG_VOL_META: {}, - self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} - - for f in fact: - func = fact[f][0] - fields = fact[f][1] - func(meta_container[f], self.volume_id, fields) - - vol = db.volume_get(self.ctxt, self.volume_id) - self.assertEqual(self.volume_display_name, vol['display_name']) - self.assertEqual(self.volume_display_description, - vol['display_description']) - self.assertEqual('123', vol['encryption_key_id']) - - def test_restore_vol_glance_meta(self): - # Fields is an empty list for _restore_vol_glance_meta method. - fields = [] - container = {} - self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) - self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, - fields) - self._add_metadata(vol_glance_meta=True) - self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) - self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, - fields) - - def test_restore_vol_meta(self): - # Fields is an empty list for _restore_vol_meta method. - fields = [] - container = {} - self.bak_meta_api._save_vol_meta(container, self.volume_id) - # Extract volume metadata from container. - metadata = container.get('volume-metadata', {}) - self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, - fields) - self._add_metadata(vol_meta=True) - self.bak_meta_api._save_vol_meta(container, self.volume_id) - # Extract volume metadata from container. - metadata = container.get('volume-metadata', {}) - self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields) - - def test_restore_vol_base_meta(self): - # Fields is a list with 'encryption_key_id' for - # _restore_vol_base_meta method. - fields = ['encryption_key_id'] - container = {} - self.bak_meta_api._save_vol_base_meta(container, self.volume_id) - self.bak_meta_api._restore_vol_base_meta(container, self.volume_id, - fields) - - def _create_encrypted_volume_db_entry(self, id, type_id, encrypted): - if encrypted: - key_id = key_manager.API().key_id - vol = {'id': id, 'size': 1, 'status': 'available', - 'volume_type_id': type_id, 'encryption_key_id': key_id} - else: - vol = {'id': id, 'size': 1, 'status': 'available', - 'volume_type_id': type_id, 'encryption_key_id': None} - return db.volume_create(self.ctxt, vol)['id'] - - def test_restore_encrypted_vol_to_different_volume_type(self): - fields = ['encryption_key_id'] - container = {} - - # Create an encrypted volume - enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type', - True) - - # Create a second encrypted volume, of a different volume type - enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type2', - True) - - # Backup the first volume and attempt to restore to the second - self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) - self.assertRaises(exception.EncryptedBackupOperationFailed, - self.bak_meta_api._restore_vol_base_meta, - container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], - enc_vol2_id, fields) - - def test_restore_unencrypted_vol_to_different_volume_type(self): - fields = ['encryption_key_id'] - container = {} - - # Create an unencrypted volume - vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'vol_type1', - False) - - # Create a second unencrypted volume, of a different volume type - vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'vol_type2', - False) - - # Backup the first volume and restore to the second - self.bak_meta_api._save_vol_base_meta(container, vol1_id) - self.bak_meta_api._restore_vol_base_meta( - container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], vol2_id, - fields) - self.assertNotEqual( - db.volume_get(self.ctxt, vol1_id)['volume_type_id'], - db.volume_get(self.ctxt, vol2_id)['volume_type_id']) - - def test_restore_encrypted_vol_to_same_volume_type(self): - fields = ['encryption_key_id'] - container = {} - - # Create an encrypted volume - enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type', - True) - - # Create an encrypted volume of the same type - enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type', - True) - - # Backup the first volume and restore to the second - self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) - self.bak_meta_api._restore_vol_base_meta( - container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id, - fields) - - def test_restore_encrypted_vol_to_none_type_source_type_unavailable(self): - fields = ['encryption_key_id'] - container = {} - enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type', - True) - undef_vol_id = self._create_encrypted_volume_db_entry( - str(uuid.uuid4()), None, False) - self.bak_meta_api._save_vol_base_meta(container, enc_vol_id) - self.assertRaises(exception.EncryptedBackupOperationFailed, - self.bak_meta_api._restore_vol_base_meta, - container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], - undef_vol_id, fields) - - def test_restore_encrypted_vol_to_none_type_source_type_available(self): - fields = ['encryption_key_id'] - container = {} - db.volume_type_create(self.ctxt, {'id': 'enc_vol_type_id', - 'name': 'enc_vol_type'}) - enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()), - 'enc_vol_type_id', - True) - undef_vol_id = self._create_encrypted_volume_db_entry( - str(uuid.uuid4()), None, False) - self.bak_meta_api._save_vol_base_meta(container, enc_vol_id) - self.bak_meta_api._restore_vol_base_meta( - container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id, - fields) - self.assertEqual( - db.volume_get(self.ctxt, undef_vol_id)['volume_type_id'], - db.volume_get(self.ctxt, enc_vol_id)['volume_type_id']) - - def test_filter(self): - metadata = {'a': 1, 'b': 2, 'c': 3} - self.assertEqual(metadata, self.bak_meta_api._filter(metadata, [])) - self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b'])) - self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d'])) - self.assertEqual({'a': 1, 'b': 2}, - self.bak_meta_api._filter(metadata, ['a', 'b'])) - - def test_save_vol_glance_meta(self): - container = {} - self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) - - def test_save_vol_meta(self): - container = {} - self.bak_meta_api._save_vol_meta(container, self.volume_id) - - def test_save_vol_base_meta(self): - container = {} - self.bak_meta_api._save_vol_base_meta(container, self.volume_id) - - def test_is_serializable(self): - data = {'foo': 'bar'} - if self.bak_meta_api._is_serializable(data): - jsonutils.dumps(data) - - def test_is_not_serializable(self): - data = {'foo': 'bar'} - with mock.patch.object(jsonutils, 'dumps') as mock_dumps: - mock_dumps.side_effect = TypeError - self.assertFalse(self.bak_meta_api._is_serializable(data)) - mock_dumps.assert_called_once_with(data) diff --git a/cinder/tests/unit/backup/drivers/test_backup_glusterfs.py b/cinder/tests/unit/backup/drivers/test_backup_glusterfs.py deleted file mode 100644 index 975e46a7f..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_glusterfs.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for GlusterFS backup driver. - -""" -import os - -import mock -from os_brick.remotefs import remotefs as remotefs_brick - -from cinder.backup.drivers import glusterfs -from cinder import context -from cinder import exception -from cinder import test -from cinder import utils - -FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' -FAKE_HOST = 'fake_host' -FAKE_VOL_NAME = 'backup_vol' -FAKE_BACKUP_SHARE = '%s:%s' % (FAKE_HOST, FAKE_VOL_NAME) -FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, - 'e51e43e3c63fd5770e90e58e2eafc709') - - -class BackupGlusterfsShareTestCase(test.TestCase): - - def setUp(self): - super(BackupGlusterfsShareTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def test_check_configuration(self): - self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) - self.mock_object(glusterfs.GlusterfsBackupDriver, - '_init_backup_repo_path', - return_value=FAKE_BACKUP_PATH) - - with mock.patch.object(glusterfs.GlusterfsBackupDriver, - '_check_configuration'): - driver = glusterfs.GlusterfsBackupDriver(self.ctxt) - driver._check_configuration() - - def test_check_configuration_no_backup_share(self): - self.override_config('glusterfs_backup_share', None) - self.mock_object(glusterfs.GlusterfsBackupDriver, - '_init_backup_repo_path', - return_value=FAKE_BACKUP_PATH) - - with mock.patch.object(glusterfs.GlusterfsBackupDriver, - '_check_configuration'): - driver = glusterfs.GlusterfsBackupDriver(self.ctxt) - self.assertRaises(exception.ConfigNotFound, - driver._check_configuration) - - def test_init_backup_repo_path(self): - self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) - self.override_config('glusterfs_backup_mount_point', - FAKE_BACKUP_MOUNT_POINT_BASE) - mock_remotefsclient = mock.Mock() - mock_remotefsclient.get_mount_point = mock.Mock( - return_value=FAKE_BACKUP_PATH) - self.mock_object(glusterfs.GlusterfsBackupDriver, - '_check_configuration') - self.mock_object(remotefs_brick, 'RemoteFsClient', - return_value=mock_remotefsclient) - self.mock_object(os, 'getegid', - return_value=333333) - self.mock_object(utils, 'get_file_gid', - return_value=333333) - self.mock_object(utils, 'get_file_mode', - return_value=00000) - self.mock_object(utils, 'get_root_helper') - - with mock.patch.object(glusterfs.GlusterfsBackupDriver, - '_init_backup_repo_path'): - driver = glusterfs.GlusterfsBackupDriver(self.ctxt) - self.mock_object(driver, '_execute') - path = driver._init_backup_repo_path() - - self.assertEqual(FAKE_BACKUP_PATH, path) - utils.get_root_helper.called_once() - mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) - mock_remotefsclient.get_mount_point.assert_called_once_with( - FAKE_BACKUP_SHARE) diff --git a/cinder/tests/unit/backup/drivers/test_backup_google.py b/cinder/tests/unit/backup/drivers/test_backup_google.py deleted file mode 100644 index 5774460c5..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_google.py +++ /dev/null @@ -1,583 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2016 Vedams Inc. -# Copyright (C) 2016 Google Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Google Backup code. - -""" - -import bz2 -import filecmp -import hashlib -import os -import shutil -import tempfile -import zlib - -import mock -from oslo_utils import units - -from cinder.backup.drivers import google as google_dr -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import test -from cinder.tests.unit.backup import fake_google_client -from cinder.tests.unit.backup import fake_google_client2 -from cinder.tests.unit import fake_constants as fake - - -class FakeMD5(object): - def __init__(self, *args, **kwargs): - pass - - @classmethod - def digest(self): - return 'gcscindermd5' - - @classmethod - def hexdigest(self): - return 'gcscindermd5' - - -class FakeObjectName(object): - @classmethod - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup.id) - volume = 'volume_%s' % (backup.volume_id) - prefix = volume + '_' + backup_name - return prefix - - -def gcs_client(func): - @mock.patch.object(google_dr.client, 'GoogleCredentials', - fake_google_client.FakeGoogleCredentials) - @mock.patch.object(google_dr.discovery, 'build', - fake_google_client.FakeGoogleDiscovery.Build) - @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', - fake_google_client.FakeGoogleMediaIoBaseDownload) - @mock.patch.object(hashlib, 'md5', FakeMD5) - def func_wrapper(self, *args, **kwargs): - return func(self, *args, **kwargs) - - return func_wrapper - - -def gcs_client2(func): - @mock.patch.object(google_dr.client, 'GoogleCredentials', - fake_google_client2.FakeGoogleCredentials) - @mock.patch.object(google_dr.discovery, 'build', - fake_google_client2.FakeGoogleDiscovery.Build) - @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', - fake_google_client2.FakeGoogleMediaIoBaseDownload) - @mock.patch.object(google_dr.GoogleBackupDriver, - '_generate_object_name_prefix', - FakeObjectName._fake_generate_object_name_prefix) - @mock.patch.object(hashlib, 'md5', FakeMD5) - def func_wrapper(self, *args, **kwargs): - return func(self, *args, **kwargs) - - return func_wrapper - - -def fake_backup_metadata(self, backup, object_meta): - raise exception.BackupDriverException(message=_('fake')) - - -def fake_delete(self, backup): - raise exception.BackupOperationError() - - -def _fake_delete_object(self, bucket_name, object_name): - raise AssertionError('delete_object method should not be called.') - - -class GoogleBackupDriverTestCase(test.TestCase): - """Test Case for Google""" - - _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' - - def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): - vol = {'id': volume_id, - 'size': 1, - 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, - volume_id=_DEFAULT_VOLUME_ID, - container=google_dr.CONF.backup_gcs_bucket, - parent_id=None, - service_metadata=None): - - try: - db.volume_get(self.ctxt, volume_id) - except exception.NotFound: - self._create_volume_db_entry(volume_id=volume_id) - - kwargs = {'size': 1, - 'container': container, - 'volume_id': volume_id, - 'parent_id': parent_id, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'service_metadata': service_metadata, - } - backup = objects.Backup(context=self.ctxt, **kwargs) - backup.create() - return backup - - def setUp(self): - super(GoogleBackupDriverTestCase, self).setUp() - self.flags(backup_gcs_bucket='gcscinderbucket') - self.flags(backup_gcs_credential_file='test-file') - self.flags(backup_gcs_project_id='test-gcs') - self.ctxt = context.get_admin_context() - self.volume_file = tempfile.NamedTemporaryFile() - self.temp_dir = tempfile.mkdtemp() - self.addCleanup(self.volume_file.close) - # Remove tempdir. - self.addCleanup(shutil.rmtree, self.temp_dir) - for _i in range(0, 64): - self.volume_file.write(os.urandom(units.Ki)) - - @gcs_client - def test_backup(self): - volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' - container_name = 'test-bucket' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - result = service.backup(backup, self.volume_file) - self.assertIsNone(result) - - @gcs_client - def test_backup_uncompressed(self): - volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' - backup = self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - - @gcs_client - def test_backup_bz2(self): - volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' - backup = self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='bz2') - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - - @gcs_client - def test_backup_zlib(self): - volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' - backup = self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='zlib') - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - - @gcs_client - def test_backup_default_container(self): - volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=None) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertEqual('gcscinderbucket', backup.container) - - @gcs_client - @mock.patch('httplib2.proxy_info_from_url') - def test_backup_proxy_configured(self, mock_proxy_info): - google_dr.CONF.set_override("backup_gcs_proxy_url", - "http://myproxy.example.com") - google_dr.GoogleBackupDriver(self.ctxt) - mock_proxy_info.assert_called_with("http://myproxy.example.com") - - @gcs_client - @mock.patch('httplib2.proxy_info_from_environment') - def test_backup_proxy_environment(self, mock_proxy_env): - google_dr.GoogleBackupDriver(self.ctxt) - mock_proxy_env.assert_called_once_with() - - @gcs_client - @mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.' - '_send_progress_end') - @mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.' - '_send_progress_notification') - def test_backup_default_container_notify(self, _send_progress, - _send_progress_end): - volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=None) - # If the backup_object_number_per_notification is set to 1, - # the _send_progress method will be called for sure. - google_dr.CONF.set_override("backup_object_number_per_notification", 1) - google_dr.CONF.set_override("backup_gcs_enable_progress_timer", False) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - # If the backup_object_number_per_notification is increased to - # another value, the _send_progress method will not be called. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - google_dr.CONF.set_override("backup_object_number_per_notification", - 10) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertFalse(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - # If the timer is enabled, the _send_progress will be called, - # since the timer can trigger the progress notification. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - google_dr.CONF.set_override("backup_object_number_per_notification", - 10) - google_dr.CONF.set_override("backup_gcs_enable_progress_timer", True) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - @gcs_client - def test_backup_custom_container(self): - volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' - container_name = 'fake99' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertEqual(container_name, backup.container) - - @gcs_client2 - def test_backup_shafile(self): - volume_id = '6465dad4-22af-48f7-8a1a-000000218907' - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service.backup(backup, self.volume_file) - self.assertEqual(container_name, backup.container) - - # Verify sha contents - content1 = service._read_sha256file(backup) - self.assertEqual(64 * units.Ki / content1['chunk_size'], - len(content1['sha256s'])) - - @gcs_client2 - def test_backup_cmp_shafiles(self): - volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service1 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service1.backup(backup, self.volume_file) - self.assertEqual(container_name, backup.container) - - # Create incremental backup with no change to contents - deltabackup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - parent_id=backup.id) - service2 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service2.backup(deltabackup, self.volume_file) - self.assertEqual(container_name, deltabackup.container) - - # Compare shas from both files - content1 = service1._read_sha256file(backup) - content2 = service2._read_sha256file(deltabackup) - - self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) - self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) - - @gcs_client2 - def test_backup_delta_two_objects_change(self): - volume_id = '30dab288-265a-4583-9abe-000000d42c67' - - self.flags(backup_gcs_object_size=8 * units.Ki) - self.flags(backup_gcs_block_size=units.Ki) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service1 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service1.backup(backup, self.volume_file) - self.assertEqual(container_name, backup.container) - - # Create incremental backup with no change to contents - self.volume_file.seek(2 * 8 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - self.volume_file.seek(4 * 8 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - - deltabackup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - parent_id=backup.id) - service2 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service2.backup(deltabackup, self.volume_file) - self.assertEqual(container_name, deltabackup.container) - - content1 = service1._read_sha256file(backup) - content2 = service2._read_sha256file(deltabackup) - - # Verify that two shas are changed at index 16 and 32 - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) - - @gcs_client2 - def test_backup_delta_two_blocks_in_object_change(self): - volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' - - self.flags(backup_gcs_object_size=8 * units.Ki) - self.flags(backup_gcs_block_size=units.Ki) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - - service1 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service1.backup(backup, self.volume_file) - self.assertEqual(container_name, backup.container) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - self.volume_file.seek(20 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - - deltabackup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - parent_id=backup.id) - service2 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service2.backup(deltabackup, self.volume_file) - self.assertEqual(container_name, deltabackup.container) - - # Verify that two shas are changed at index 16 and 20 - content1 = service1._read_sha256file(backup) - content2 = service2._read_sha256file(deltabackup) - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) - - @gcs_client - def test_create_backup_fail(self): - volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3' - container_name = 'gcs_api_failure' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - self.assertRaises(exception.GCSApiFailure, - service.backup, - backup, self.volume_file) - - @gcs_client - def test_create_backup_fail2(self): - volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec4' - container_name = 'gcs_oauth2_failure' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - self.assertRaises(exception.GCSOAuth2Failure, - service.backup, - backup, self.volume_file) - - @gcs_client - @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', - fake_backup_metadata) - def test_backup_backup_metadata_fail(self): - """Test of when an exception occurs in backup(). - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process of an - exception handler. - """ - volume_id = '020d9142-339c-4876-a445-000000f1520c' - - backup = self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - # We expect that an exception be notified directly. - self.assertRaises(exception.BackupDriverException, - service.backup, - backup, self.volume_file) - - @gcs_client - @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', - fake_backup_metadata) - @mock.patch.object(google_dr.GoogleBackupDriver, 'delete_backup', - fake_delete) - def test_backup_backup_metadata_fail2(self): - """Test of when an exception occurs in an exception handler. - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process when the - second exception occurs in self.delete_backup(). - """ - volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' - - backup = self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - # We expect that the second exception is notified. - self.assertRaises(exception.BackupOperationError, - service.backup, - backup, self.volume_file) - - @gcs_client - def test_restore(self): - volume_id = 'c2a81f09-f480-4325-8424-00000071685b' - backup = self._create_backup_db_entry(volume_id=volume_id) - service = google_dr.GoogleBackupDriver(self.ctxt) - - with tempfile.NamedTemporaryFile() as volume_file: - service.restore(backup, volume_id, volume_file) - - @gcs_client - def test_restore_fail(self): - volume_id = 'c2a81f09-f480-4325-8424-00000071685b' - container_name = 'gcs_connection_failure' - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = google_dr.GoogleBackupDriver(self.ctxt) - - with tempfile.NamedTemporaryFile() as volume_file: - self.assertRaises(exception.GCSConnectionFailure, - service.restore, - backup, volume_id, volume_file) - - @gcs_client2 - def test_restore_delta(self): - volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' - self.flags(backup_gcs_object_size=8 * units.Ki) - self.flags(backup_gcs_block_size=units.Ki) - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - backup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service1 = google_dr.GoogleBackupDriver(self.ctxt) - self.volume_file.seek(0) - service1.backup(backup, self.volume_file) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - self.volume_file.seek(20 * units.Ki) - self.volume_file.write(os.urandom(units.Ki)) - - deltabackup = self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - parent_id=backup.id) - self.volume_file.seek(0) - service2 = google_dr.GoogleBackupDriver(self.ctxt) - service2.backup(deltabackup, self.volume_file, True) - - with tempfile.NamedTemporaryFile() as restored_file: - service2.restore(deltabackup, volume_id, - restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - @gcs_client - def test_delete(self): - volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' - object_prefix = 'test_prefix' - backup = self._create_backup_db_entry(volume_id=volume_id, - service_metadata=object_prefix) - service = google_dr.GoogleBackupDriver(self.ctxt) - service.delete_backup(backup) - - @gcs_client - @mock.patch.object(google_dr.GoogleBackupDriver, 'delete_object', - _fake_delete_object) - def test_delete_without_object_prefix(self): - volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' - backup = self._create_backup_db_entry(volume_id=volume_id) - service = google_dr.GoogleBackupDriver(self.ctxt) - service.delete_backup(backup) - - @gcs_client - def test_get_compressor(self): - service = google_dr.GoogleBackupDriver(self.ctxt) - compressor = service._get_compressor('None') - self.assertIsNone(compressor) - compressor = service._get_compressor('zlib') - self.assertEqual(zlib, compressor) - compressor = service._get_compressor('bz2') - self.assertEqual(bz2, compressor) - self.assertRaises(ValueError, service._get_compressor, 'fake') - - @gcs_client - def test_prepare_output_data_effective_compression(self): - service = google_dr.GoogleBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - - result = service._prepare_output_data(fake_data) - - self.assertEqual('zlib', result[0]) - self.assertGreater(len(fake_data), len(result)) - - @gcs_client - def test_prepare_output_data_no_compression(self): - self.flags(backup_compression_algorithm='none') - service = google_dr.GoogleBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - - result = service._prepare_output_data(fake_data) - - self.assertEqual('none', result[0]) - self.assertEqual(fake_data, result[1]) - - @gcs_client - def test_prepare_output_data_ineffective_compression(self): - service = google_dr.GoogleBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - # Pre-compress so that compression in the driver will be ineffective. - already_compressed_data = service.compressor.compress(fake_data) - - result = service._prepare_output_data(already_compressed_data) - - self.assertEqual('none', result[0]) - self.assertEqual(already_compressed_data, result[1]) diff --git a/cinder/tests/unit/backup/drivers/test_backup_nfs.py b/cinder/tests/unit/backup/drivers/test_backup_nfs.py deleted file mode 100644 index 22433504a..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_nfs.py +++ /dev/null @@ -1,707 +0,0 @@ -# Copyright (C) 2015 Tom Barron -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Backup NFS driver. - -""" -import bz2 -import filecmp -import hashlib -import os -import shutil -import tempfile -import zlib - -import mock -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_config import cfg -import six - -from cinder.backup.drivers import nfs -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder import utils - -CONF = cfg.CONF - -FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' -FAKE_HOST = 'fake_host' -FAKE_EXPORT_PATH = 'fake/export/path' -FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH) -FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, - FAKE_EXPORT_PATH) -FAKE_BACKUP_ID = fake.BACKUP_ID -FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2] -FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4] -FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:] -UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, - FAKE_BACKUP_ID_PART2, - FAKE_BACKUP_ID) - - -class BackupNFSShareTestCase(test.TestCase): - - def setUp(self): - super(BackupNFSShareTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.mock_object(nfs, 'LOG') - - def test_check_configuration_no_backup_share(self): - self.override_config('backup_share', None) - self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path', - return_value=FAKE_BACKUP_PATH) - - with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'): - driver = nfs.NFSBackupDriver(self.ctxt) - self.assertRaises(exception.ConfigNotFound, - driver._check_configuration) - - @mock.patch.object(remotefs_brick, 'RemoteFsClient') - def test_init_backup_repo_path(self, mock_remotefs_client_class): - self.override_config('backup_share', FAKE_BACKUP_SHARE) - self.override_config('backup_mount_point_base', - FAKE_BACKUP_MOUNT_POINT_BASE) - mock_remotefsclient = mock.Mock() - mock_remotefsclient.get_mount_point = mock.Mock( - return_value=FAKE_BACKUP_PATH) - self.mock_object(nfs.NFSBackupDriver, '_check_configuration') - mock_remotefs_client_class.return_value = mock_remotefsclient - self.mock_object(utils, 'get_root_helper') - with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): - driver = nfs.NFSBackupDriver(self.ctxt) - - path = driver._init_backup_repo_path() - - self.assertEqual(FAKE_BACKUP_PATH, path) - utils.get_root_helper.called_once() - mock_remotefs_client_class.assert_called_once_with( - 'nfs', - utils.get_root_helper(), - nfs_mount_point_base=FAKE_BACKUP_MOUNT_POINT_BASE, - nfs_mount_options=None - ) - mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) - mock_remotefsclient.get_mount_point.assert_called_once_with( - FAKE_BACKUP_SHARE) - - -def fake_md5(arg): - class result(object): - def hexdigest(self): - return 'fake-md5-sum' - - ret = result() - return ret - - -class BackupNFSSwiftBasedTestCase(test.TestCase): - """Test Cases for based on Swift tempest backup tests.""" - - _DEFAULT_VOLUME_ID = fake.VOLUME_ID - - def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): - vol = {'id': volume_id, - 'size': 1, - 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, - volume_id=_DEFAULT_VOLUME_ID, - container='test-container', - backup_id=fake.BACKUP_ID, - parent_id=None): - - try: - db.volume_get(self.ctxt, volume_id) - except exception.NotFound: - self._create_volume_db_entry(volume_id=volume_id) - - backup = {'id': backup_id, - 'size': 1, - 'container': container, - 'volume_id': volume_id, - 'parent_id': parent_id, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - } - return db.backup_create(self.ctxt, backup)['id'] - - def setUp(self): - super(BackupNFSSwiftBasedTestCase, self).setUp() - - self.ctxt = context.get_admin_context() - self.mock_object(hashlib, 'md5', fake_md5) - self.volume_file = tempfile.NamedTemporaryFile() - self.temp_dir = tempfile.mkdtemp() - self.addCleanup(self.volume_file.close) - self.override_config('backup_share', FAKE_BACKUP_SHARE) - self.override_config('backup_mount_point_base', - '/tmp') - self.override_config('backup_file_size', 52428800) - mock_remotefsclient = mock.Mock() - mock_remotefsclient.get_mount_point = mock.Mock( - return_value=self.temp_dir) - self.mock_object(remotefs_brick, 'RemoteFsClient', - return_value=mock_remotefsclient) - # Remove tempdir. - self.addCleanup(shutil.rmtree, self.temp_dir) - for _i in range(0, 32): - self.volume_file.write(os.urandom(1024)) - - def test_backup_uncompressed(self): - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - def test_backup_bz2(self): - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='bz2') - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - def test_backup_zlib(self): - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='zlib') - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - def test_backup_default_container(self): - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id, - container=None, - backup_id=FAKE_BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) - self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME) - - @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' - 'update_container_name', - return_value='testcontainer1') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_end') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_notification') - def test_backup_container_notify_1(self, _send_progress, - _send_progress_end, - _mock_update_container_name): - # This unit test writes data to disk. It should be - # updated to not do that. - - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id, - container='testcontainer1') - - # If the backup_object_number_per_notification is set to 1, - # the _send_progress method will be called for sure. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - CONF.set_override("backup_object_number_per_notification", 1) - CONF.set_override("backup_enable_progress_timer", False) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' - 'update_container_name', - return_value='testcontainer2') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_end') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_notification') - def test_backup_container_notify_2(self, _send_progress, - _send_progress_end, - _mock_update_container_name): - # This unit test writes data to disk. It should be - # updated to not do that. - - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id, - container='testcontainer2') - - # If the backup_object_number_per_notification is increased to - # another value, the _send_progress method will not be called. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - CONF.set_override("backup_object_number_per_notification", 10) - CONF.set_override("backup_enable_progress_timer", False) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertFalse(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' - 'update_container_name', - return_value='testcontainer3') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_end') - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_send_progress_notification') - def test_backup_container_notify_3(self, _send_progress, - _send_progress_end, - _mock_update_container_name): - # This unit test writes data to disk. It should be - # updated to not do that. - - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id, - container='testcontainer3') - - # If the timer is enabled, the _send_progress will be called, - # since the timer can trigger the progress notification. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - CONF.set_override("backup_object_number_per_notification", 10) - CONF.set_override("backup_enable_progress_timer", True) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - def test_backup_custom_container(self): - volume_id = fake.VOLUME_ID - container_name = 'fake99' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(backup['container'], container_name) - - def test_backup_shafile(self): - volume_id = fake.VOLUME_ID - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(backup['container'], container_name) - - # Verify sha contents - content1 = service._read_sha256file(backup) - self.assertEqual(32 * 1024 / content1['chunk_size'], - len(content1['sha256s'])) - - def test_backup_cmp_shafiles(self): - volume_id = fake.VOLUME_ID - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(backup['container'], container_name) - - # Create incremental backup with no change to contents - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(deltabackup['container'], container_name) - - # Compare shas from both files - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - - self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) - self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) - - def test_backup_delta_two_objects_change(self): - volume_id = fake.VOLUME_ID - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_file_size=(8 * 1024)) - self.flags(backup_sha_block_size_bytes=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(backup['container'], container_name) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(20 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(deltabackup['container'], container_name) - - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - - # Verify that two shas are changed at index 16 and 20 - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) - - def test_backup_delta_two_blocks_in_object_change(self): - volume_id = fake.VOLUME_ID - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_file_size=(8 * 1024)) - self.flags(backup_sha_block_size_bytes=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(backup['container'], container_name) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(20 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(deltabackup['container'], container_name) - - # Verify that two shas are changed at index 16 and 20 - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) - - def test_backup_backup_metadata_fail(self): - """Test of when an exception occurs in backup(). - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process of an - exception handler. - """ - volume_id = fake.VOLUME_ID - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - def fake_backup_metadata(self, backup, object_meta): - raise exception.BackupDriverException(message=_('fake')) - - # Raise a pseudo exception.BackupDriverException. - self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', - fake_backup_metadata) - - # We expect that an exception be notified directly. - self.assertRaises(exception.BackupDriverException, - service.backup, - backup, self.volume_file) - - def test_backup_backup_metadata_fail2(self): - """Test of when an exception occurs in an exception handler. - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process when the - second exception occurs in self.delete_backup(). - """ - volume_id = fake.VOLUME_ID - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - def fake_backup_metadata(self, backup, object_meta): - raise exception.BackupDriverException(message=_('fake')) - - # Raise a pseudo exception.BackupDriverException. - self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', - fake_backup_metadata) - - def fake_delete(self, backup): - raise exception.BackupOperationError() - - # Raise a pseudo exception.BackupOperationError. - self.mock_object(nfs.NFSBackupDriver, 'delete_backup', fake_delete) - - # We expect that the second exception is notified. - self.assertRaises(exception.BackupOperationError, - service.backup, - backup, self.volume_file) - - def test_restore_uncompressed(self): - volume_id = fake.VOLUME_ID - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - self.flags(backup_sha_block_size_bytes=32) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - service.backup(backup, self.volume_file) - - with tempfile.NamedTemporaryFile() as restored_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.restore(backup, volume_id, restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - def test_restore_bz2(self): - volume_id = fake.VOLUME_ID - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='bz2') - self.flags(backup_file_size=(1024 * 3)) - self.flags(backup_sha_block_size_bytes=1024) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - with tempfile.NamedTemporaryFile() as restored_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.restore(backup, volume_id, restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - def test_restore_zlib(self): - volume_id = fake.VOLUME_ID - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='zlib') - self.flags(backup_file_size=(1024 * 3)) - self.flags(backup_sha_block_size_bytes=1024) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - with tempfile.NamedTemporaryFile() as restored_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.restore(backup, volume_id, restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - def test_restore_delta(self): - volume_id = fake.VOLUME_ID - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(nfs.NFSBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_file_size=(1024 * 8)) - self.flags(backup_sha_block_size_bytes=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - service = nfs.NFSBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(20 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file, True) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - - with tempfile.NamedTemporaryFile() as restored_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.restore(backup, volume_id, - restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - def test_delete(self): - volume_id = fake.VOLUME_ID - self._create_backup_db_entry(volume_id=volume_id) - service = nfs.NFSBackupDriver(self.ctxt) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.delete_backup(backup) - - def test_get_compressor(self): - service = nfs.NFSBackupDriver(self.ctxt) - compressor = service._get_compressor('None') - self.assertIsNone(compressor) - compressor = service._get_compressor('zlib') - self.assertEqual(compressor, zlib) - compressor = service._get_compressor('bz2') - self.assertEqual(compressor, bz2) - self.assertRaises(ValueError, service._get_compressor, 'fake') - - def create_buffer(self, size): - # Set up buffer of zeroed bytes - fake_data = bytearray(size) - if six.PY2: - # On Python 2, zlib.compressor() accepts buffer, but not bytearray - # NOTE(jsbryant): Pep8 fails on py3 based installations as buffer() - # was removed. 'noqa' used here to avoid that failure. - fake_data = buffer(fake_data) # noqa - return fake_data - - def test_prepare_output_data_effective_compression(self): - service = nfs.NFSBackupDriver(self.ctxt) - fake_data = self.create_buffer(128) - result = service._prepare_output_data(fake_data) - - self.assertEqual('zlib', result[0]) - self.assertGreater(len(fake_data), len(result)) - - def test_prepare_output_data_no_compresssion(self): - self.flags(backup_compression_algorithm='none') - service = nfs.NFSBackupDriver(self.ctxt) - fake_data = self.create_buffer(128) - - result = service._prepare_output_data(fake_data) - - self.assertEqual('none', result[0]) - self.assertEqual(fake_data, result[1]) - - def test_prepare_output_data_ineffective_compression(self): - service = nfs.NFSBackupDriver(self.ctxt) - fake_data = self.create_buffer(128) - - # Pre-compress so that compression in the driver will be ineffective. - already_compressed_data = service.compressor.compress(fake_data) - - result = service._prepare_output_data(already_compressed_data) - - self.assertEqual('none', result[0]) - self.assertEqual(already_compressed_data, result[1]) diff --git a/cinder/tests/unit/backup/drivers/test_backup_posix.py b/cinder/tests/unit/backup/drivers/test_backup_posix.py deleted file mode 100644 index 70ac5005d..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_posix.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Posix backup driver. - -""" - -import os - -import mock -from six.moves import builtins - -from cinder.backup.drivers import posix -from cinder import context -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake - - -FAKE_FILE_SIZE = 52428800 -FAKE_SHA_BLOCK_SIZE_BYTES = 1024 -FAKE_BACKUP_ENABLE_PROGRESS_TIMER = True - -FAKE_CONTAINER = 'fake/container' -FAKE_BACKUP_ID = fake.BACKUP_ID -FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2] -FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4] -FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:] -FAKE_BACKUP = {'id': FAKE_BACKUP_ID, 'container': None} - -UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, - FAKE_BACKUP_ID_PART2, - FAKE_BACKUP_ID) - -FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' -FAKE_EXPORT_PATH = 'fake/export/path' - -FAKE_BACKUP_POSIX_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, - FAKE_EXPORT_PATH) - -FAKE_PREFIX = 'prefix-' -FAKE_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two', 'three'] -EXPECTED_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two'] -FAKE_OBJECT_NAME = 'fake-object-name' -FAKE_OBJECT_PATH = os.path.join(FAKE_BACKUP_POSIX_PATH, FAKE_CONTAINER, - FAKE_OBJECT_NAME) - - -class PosixBackupDriverTestCase(test.TestCase): - - def setUp(self): - super(PosixBackupDriverTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - self.override_config('backup_file_size', - FAKE_FILE_SIZE) - self.override_config('backup_sha_block_size_bytes', - FAKE_SHA_BLOCK_SIZE_BYTES) - self.override_config('backup_enable_progress_timer', - FAKE_BACKUP_ENABLE_PROGRESS_TIMER) - self.override_config('backup_posix_path', - FAKE_BACKUP_POSIX_PATH) - self.mock_object(posix, 'LOG') - - self.driver = posix.PosixBackupDriver(self.ctxt) - - def test_init(self): - drv = posix.PosixBackupDriver(self.ctxt) - self.assertEqual(FAKE_BACKUP_POSIX_PATH, - drv.backup_path) - - def test_update_container_name_container_passed(self): - result = self.driver.update_container_name(FAKE_BACKUP, FAKE_CONTAINER) - - self.assertEqual(FAKE_CONTAINER, result) - - def test_update_container_na_container_passed(self): - result = self.driver.update_container_name(FAKE_BACKUP, None) - - self.assertEqual(UPDATED_CONTAINER_NAME, result) - - def test_put_container(self): - self.mock_object(os.path, 'exists', return_value=False) - self.mock_object(os, 'makedirs') - self.mock_object(os, 'chmod') - path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) - - self.driver.put_container(FAKE_CONTAINER) - - os.path.exists.assert_called_once_with(path) - os.makedirs.assert_called_once_with(path) - os.chmod.assert_called_once_with(path, 0o770) - - def test_put_container_already_exists(self): - self.mock_object(os.path, 'exists', return_value=True) - self.mock_object(os, 'makedirs') - self.mock_object(os, 'chmod') - path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) - - self.driver.put_container(FAKE_CONTAINER) - - os.path.exists.assert_called_once_with(path) - self.assertEqual(0, os.makedirs.call_count) - self.assertEqual(0, os.chmod.call_count) - - def test_put_container_exception(self): - self.mock_object(os.path, 'exists', return_value=False) - self.mock_object(os, 'makedirs', side_effect=OSError) - self.mock_object(os, 'chmod') - path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) - - self.assertRaises(OSError, self.driver.put_container, - FAKE_CONTAINER) - os.path.exists.assert_called_once_with(path) - os.makedirs.assert_called_once_with(path) - self.assertEqual(0, os.chmod.call_count) - - def test_get_container_entries(self): - self.mock_object(os, 'listdir', return_value=FAKE_CONTAINER_ENTRIES) - - result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) - - self.assertEqual(EXPECTED_CONTAINER_ENTRIES, result) - - def test_get_container_entries_no_list(self): - self.mock_object(os, 'listdir', return_value=[]) - - result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) - - self.assertEqual([], result) - - def test_get_container_entries_no_match(self): - self.mock_object(os, 'listdir', return_value=FAKE_CONTAINER_ENTRIES) - - result = self.driver.get_container_entries(FAKE_CONTAINER, - FAKE_PREFIX + 'garbage') - - self.assertEqual([], result) - - def test_get_object_writer(self): - self.mock_object(builtins, 'open', mock.mock_open()) - self.mock_object(os, 'chmod') - - self.driver.get_object_writer(FAKE_CONTAINER, FAKE_OBJECT_NAME) - - os.chmod.assert_called_once_with(FAKE_OBJECT_PATH, 0o660) - builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'wb') - - def test_get_object_reader(self): - self.mock_object(builtins, 'open', mock.mock_open()) - - self.driver.get_object_reader(FAKE_CONTAINER, FAKE_OBJECT_NAME) - - builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'rb') - - def test_delete_object(self): - self.mock_object(os, 'remove') - - self.driver.delete_object(FAKE_CONTAINER, FAKE_OBJECT_NAME) - - def test_delete_nonexistent_object(self): - self.mock_object(os, 'remove', side_effect=OSError) - - self.assertRaises(OSError, - self.driver.delete_object, FAKE_CONTAINER, - FAKE_OBJECT_NAME) - - @mock.patch.object(posix.timeutils, 'utcnow') - def test_generate_object_name_prefix(self, utcnow_mock): - timestamp = '20170518102205' - utcnow_mock.return_value.strftime.return_value = timestamp - backup = objects.Backup(self.ctxt, volume_id=fake.VOLUME_ID, - id=fake.BACKUP_ID) - res = self.driver._generate_object_name_prefix(backup) - expected = 'volume_%s_%s_backup_%s' % (backup.volume_id, - timestamp, - backup.id) - self.assertEqual(expected, res) diff --git a/cinder/tests/unit/backup/drivers/test_backup_swift.py b/cinder/tests/unit/backup/drivers/test_backup_swift.py deleted file mode 100644 index e71a72f05..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_swift.py +++ /dev/null @@ -1,850 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Backup swift code. - -""" - -import bz2 -import ddt -import filecmp -import hashlib -import os -import shutil -import tempfile -import zlib - -import mock -from oslo_config import cfg -from swiftclient import client as swift - -from cinder.backup.drivers import swift as swift_dr -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import test -from cinder.tests.unit.backup import fake_swift_client -from cinder.tests.unit.backup import fake_swift_client2 -from cinder.tests.unit import fake_constants as fake - - -CONF = cfg.CONF - -ANY = mock.ANY - - -def fake_md5(arg): - class result(object): - def hexdigest(self): - return 'fake-md5-sum' - - ret = result() - return ret - - -@ddt.ddt -class BackupSwiftTestCase(test.TestCase): - """Test Case for swift.""" - - _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' - - def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): - vol = {'id': volume_id, - 'size': 1, - 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, - volume_id=_DEFAULT_VOLUME_ID, - container='test-container', - backup_id=fake.BACKUP_ID, parent_id=None, - service_metadata=None): - - try: - db.volume_get(self.ctxt, volume_id) - except exception.NotFound: - self._create_volume_db_entry(volume_id=volume_id) - - backup = {'id': backup_id, - 'size': 1, - 'container': container, - 'volume_id': volume_id, - 'parent_id': parent_id, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'service_metadata': service_metadata, - } - return db.backup_create(self.ctxt, backup)['id'] - - def setUp(self): - super(BackupSwiftTestCase, self).setUp() - service_catalog = [{u'type': u'object-store', u'name': u'swift', - u'endpoints': [{ - u'publicURL': u'http://example.com'}]}, - {u'type': u'identity', u'name': u'keystone', - u'endpoints': [{ - u'publicURL': u'http://example.com'}]}] - self.ctxt = context.get_admin_context() - self.ctxt.service_catalog = service_catalog - - self.mock_object(swift, 'Connection', - fake_swift_client.FakeSwiftClient.Connection) - self.mock_object(hashlib, 'md5', fake_md5) - - self.volume_file = tempfile.NamedTemporaryFile() - self.temp_dir = tempfile.mkdtemp() - self.addCleanup(self.volume_file.close) - # Remove tempdir. - self.addCleanup(shutil.rmtree, self.temp_dir) - for _i in range(0, 64): - self.volume_file.write(os.urandom(1024)) - - notify_patcher = mock.patch( - 'cinder.volume.utils.notify_about_backup_usage') - notify_patcher.start() - self.addCleanup(notify_patcher.stop) - - def test_backup_swift_url(self): - self.ctxt.service_catalog = [{u'type': u'object-store', - u'name': u'swift', - u'endpoints': [{ - u'adminURL': - u'http://example.com'}]}, - {u'type': u'identity', - u'name': u'keystone', - u'endpoints': [{ - u'publicURL': - u'http://example.com'}]}] - self.assertRaises(exception.BackupDriverException, - swift_dr.SwiftBackupDriver, - self.ctxt) - - def test_backup_swift_auth_url(self): - self.ctxt.service_catalog = [{u'type': u'object-store', - u'name': u'swift', - u'endpoints': [{ - u'publicURL': - u'http://example.com'}]}, - {u'type': u'identity', - u'name': u'keystone', - u'endpoints': [{ - u'adminURL': - u'http://example.com'}]}] - self.override_config("backup_swift_auth", - "single_user") - self.override_config("backup_swift_user", - "fake_user") - self.assertRaises(exception.BackupDriverException, - swift_dr.SwiftBackupDriver, - self.ctxt) - - def test_backup_swift_url_conf(self): - self.ctxt.service_catalog = [{u'type': u'object-store', - u'name': u'swift', - u'endpoints': [{ - u'adminURL': - u'http://example.com'}]}, - {u'type': u'identity', - u'name': u'keystone', - u'endpoints': [{ - u'publicURL': - u'http://example.com'}]}] - self.ctxt.project_id = fake.PROJECT_ID - self.override_config("backup_swift_url", - "http://public.example.com/") - backup = swift_dr.SwiftBackupDriver(self.ctxt) - self.assertEqual("%s%s" % (CONF.backup_swift_url, - self.ctxt.project_id), - backup.swift_url) - - def test_backup_swift_url_conf_nocatalog(self): - self.ctxt.service_catalog = [] - self.ctxt.project_id = fake.PROJECT_ID - self.override_config("backup_swift_url", - "http://public.example.com/") - backup = swift_dr.SwiftBackupDriver(self.ctxt) - self.assertEqual("%s%s" % (CONF.backup_swift_url, - self.ctxt.project_id), - backup.swift_url) - - def test_backup_swift_auth_url_conf(self): - self.ctxt.service_catalog = [{u'type': u'object-store', - u'name': u'swift', - u'endpoints': [{ - u'publicURL': - u'http://example.com'}]}, - {u'type': u'identity', - u'name': u'keystone', - u'endpoints': [{ - u'adminURL': - u'http://example.com'}]}] - - self.ctxt.project_id = fake.PROJECT_ID - self.override_config("backup_swift_auth_url", - "http://public.example.com") - self.override_config("backup_swift_auth", - "single_user") - self.override_config("backup_swift_user", - "fake_user") - backup = swift_dr.SwiftBackupDriver(self.ctxt) - self.assertEqual(CONF.backup_swift_auth_url, backup.auth_url) - - def test_backup_swift_info(self): - self.override_config("swift_catalog_info", "dummy") - self.assertRaises(exception.BackupDriverException, - swift_dr.SwiftBackupDriver, - self.ctxt) - - @ddt.data( - {'auth': 'single_user', 'insecure': True}, - {'auth': 'single_user', 'insecure': False}, - {'auth': 'per_user', 'insecure': True}, - {'auth': 'per_user', 'insecure': False}, - ) - @ddt.unpack - def test_backup_swift_auth_insecure(self, auth, insecure): - self.override_config("backup_swift_auth_insecure", insecure) - self.override_config('backup_swift_auth', auth) - if auth == 'single_user': - self.override_config('backup_swift_user', 'swift-user') - - mock_connection = self.mock_object(swift, 'Connection') - - swift_dr.SwiftBackupDriver(self.ctxt) - - if auth == 'single_user': - mock_connection.assert_called_once_with(insecure=insecure, - authurl=ANY, - auth_version=ANY, - tenant_name=ANY, - user=ANY, - key=ANY, - os_options={}, - retries=ANY, - starting_backoff=ANY, - cacert=ANY) - else: - mock_connection.assert_called_once_with(insecure=insecure, - retries=ANY, - preauthurl=ANY, - preauthtoken=ANY, - starting_backoff=ANY, - cacert=ANY) - - @ddt.data( - {'auth_version': '3', 'user_domain': 'UserDomain', - 'project': 'Project', 'project_domain': 'ProjectDomain'}, - {'auth_version': '3', 'user_domain': None, - 'project': 'Project', 'project_domain': 'ProjectDomain'}, - {'auth_version': '3', 'user_domain': 'UserDomain', - 'project': None, 'project_domain': 'ProjectDomain'}, - {'auth_version': '3', 'user_domain': 'UserDomain', - 'project': 'Project', 'project_domain': None}, - {'auth_version': '3', 'user_domain': None, - 'project': None, 'project_domain': None}, - ) - @ddt.unpack - def test_backup_swift_auth_v3_single_user(self, auth_version, user_domain, - project, project_domain): - self.override_config('backup_swift_auth', 'single_user') - self.override_config('backup_swift_user', 'swift-user') - self.override_config('backup_swift_auth_version', auth_version) - self.override_config('backup_swift_user_domain', user_domain) - self.override_config('backup_swift_project', project) - self.override_config('backup_swift_project_domain', project_domain) - - os_options = {} - if user_domain is not None: - os_options['user_domain_name'] = user_domain - if project is not None: - os_options['project_name'] = project - if project_domain is not None: - os_options['project_domain_name'] = project_domain - - mock_connection = self.mock_object(swift, 'Connection') - swift_dr.SwiftBackupDriver(self.ctxt) - mock_connection.assert_called_once_with(insecure=ANY, - authurl=ANY, - auth_version=auth_version, - tenant_name=ANY, - user=ANY, - key=ANY, - os_options=os_options, - retries=ANY, - starting_backoff=ANY, - cacert=ANY) - - def test_backup_uncompressed(self): - volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - def test_backup_bz2(self): - volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='bz2') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - def test_backup_zlib(self): - volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='zlib') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - @mock.patch.object(db, 'backup_update', wraps=db.backup_update) - def test_backup_default_container(self, backup_update_mock): - volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' - self._create_backup_db_entry(volume_id=volume_id, - container=None) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual('volumebackups', backup['container']) - self.assertEqual(3, backup_update_mock.call_count) - - @mock.patch.object(db, 'backup_update', wraps=db.backup_update) - def test_backup_db_container(self, backup_update_mock): - volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' - self._create_backup_db_entry(volume_id=volume_id, - container='existing_name') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual('existing_name', backup['container']) - # Make sure we are not making a DB update when we are using the same - # value that's already in the DB. - self.assertEqual(2, backup_update_mock.call_count) - - @mock.patch.object(db, 'backup_update', wraps=db.backup_update) - def test_backup_driver_container(self, backup_update_mock): - volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' - self._create_backup_db_entry(volume_id=volume_id, - container=None) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - with mock.patch.object(service, 'update_container_name', - return_value='driver_name'): - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual('driver_name', backup['container']) - self.assertEqual(3, backup_update_mock.call_count) - - @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' - '_send_progress_end') - @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' - '_send_progress_notification') - def test_backup_default_container_notify(self, _send_progress, - _send_progress_end): - volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' - self._create_backup_db_entry(volume_id=volume_id, - container=None) - # If the backup_object_number_per_notification is set to 1, - # the _send_progress method will be called for sure. - CONF.set_override("backup_object_number_per_notification", 1) - CONF.set_override("backup_swift_enable_progress_timer", False) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - # If the backup_object_number_per_notification is increased to - # another value, the _send_progress method will not be called. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - CONF.set_override("backup_object_number_per_notification", 10) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertFalse(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - # If the timer is enabled, the _send_progress will be called, - # since the timer can trigger the progress notification. - _send_progress.reset_mock() - _send_progress_end.reset_mock() - CONF.set_override("backup_object_number_per_notification", 10) - CONF.set_override("backup_swift_enable_progress_timer", True) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - self.assertTrue(_send_progress.called) - self.assertTrue(_send_progress_end.called) - - def test_backup_custom_container(self): - volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' - container_name = 'fake99' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(container_name, backup['container']) - - def test_backup_shafile(self): - volume_id = '6465dad4-22af-48f7-8a1a-000000218907' - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(container_name, backup['container']) - - # Verify sha contents - content1 = service._read_sha256file(backup) - self.assertEqual(64 * 1024 / content1['chunk_size'], - len(content1['sha256s'])) - - def test_backup_cmp_shafiles(self): - volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(container_name, backup['container']) - - # Create incremental backup with no change to contents - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(container_name, deltabackup['container']) - - # Compare shas from both files - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - - self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) - self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) - - def test_backup_delta_two_objects_change(self): - volume_id = '30dab288-265a-4583-9abe-000000d42c67' - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_swift_object_size=8 * 1024) - self.flags(backup_swift_block_size=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(container_name, backup['container']) - - # Create incremental backup with no change to contents - self.volume_file.seek(2 * 8 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(4 * 8 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(container_name, deltabackup['container']) - - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - - # Verify that two shas are changed at index 16 and 32 - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) - - def test_backup_delta_two_blocks_in_object_change(self): - volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_swift_object_size=8 * 1024) - self.flags(backup_swift_block_size=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertEqual(container_name, backup['container']) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(20 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.assertEqual(container_name, deltabackup['container']) - - # Verify that two shas are changed at index 16 and 20 - content1 = service._read_sha256file(backup) - content2 = service._read_sha256file(deltabackup) - self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) - self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) - - def test_create_backup_put_object_wraps_socket_error(self): - volume_id = 'c09b1ad4-5f0e-4d3f-8b9e-0000004caec8' - container_name = 'socket_error_on_put' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertRaises(exception.SwiftConnectionFailed, - service.backup, - backup, self.volume_file) - - def test_backup_backup_metadata_fail(self): - """Test of when an exception occurs in backup(). - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process of an - exception handler. - """ - volume_id = '020d9142-339c-4876-a445-000000f1520c' - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - def fake_backup_metadata(self, backup, object_meta): - raise exception.BackupDriverException(message=_('fake')) - - # Raise a pseudo exception.BackupDriverException. - self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', - fake_backup_metadata) - - # We expect that an exception be notified directly. - self.assertRaises(exception.BackupDriverException, - service.backup, - backup, self.volume_file) - - def test_backup_backup_metadata_fail2(self): - """Test of when an exception occurs in an exception handler. - - In backup(), after an exception occurs in - self._backup_metadata(), we want to check the process when the - second exception occurs in self.delete_backup(). - """ - volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' - - self._create_backup_db_entry(volume_id=volume_id) - self.flags(backup_compression_algorithm='none') - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - - def fake_backup_metadata(self, backup, object_meta): - raise exception.BackupDriverException(message=_('fake')) - - # Raise a pseudo exception.BackupDriverException. - self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', - fake_backup_metadata) - - def fake_delete(self, backup): - raise exception.BackupOperationError() - - # Raise a pseudo exception.BackupOperationError. - self.mock_object(swift_dr.SwiftBackupDriver, 'delete_backup', - fake_delete) - - # We expect that the second exception is notified. - self.assertRaises(exception.BackupOperationError, - service.backup, - backup, self.volume_file) - - def test_restore(self): - volume_id = 'c2a81f09-f480-4325-8424-00000071685b' - self._create_backup_db_entry(volume_id=volume_id) - service = swift_dr.SwiftBackupDriver(self.ctxt) - - with tempfile.NamedTemporaryFile() as volume_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.restore(backup, volume_id, volume_file) - - def test_restore_delta(self): - volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' - - def _fake_generate_object_name_prefix(self, backup): - az = 'az_fake' - backup_name = '%s_backup_%s' % (az, backup['id']) - volume = 'volume_%s' % (backup['volume_id']) - prefix = volume + '_' + backup_name - return prefix - - self.mock_object(swift_dr.SwiftBackupDriver, - '_generate_object_name_prefix', - _fake_generate_object_name_prefix) - - self.flags(backup_swift_object_size=8 * 1024) - self.flags(backup_swift_block_size=1024) - - container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', - '', 1) - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP_ID) - self.mock_object(swift, 'Connection', - fake_swift_client2.FakeSwiftClient2.Connection) - service = swift_dr.SwiftBackupDriver(self.ctxt) - self.volume_file.seek(0) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.backup(backup, self.volume_file) - - # Create incremental backup with no change to contents - self.volume_file.seek(16 * 1024) - self.volume_file.write(os.urandom(1024)) - self.volume_file.seek(20 * 1024) - self.volume_file.write(os.urandom(1024)) - - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - backup_id=fake.BACKUP2_ID, - parent_id=fake.BACKUP_ID) - self.volume_file.seek(0) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.backup(deltabackup, self.volume_file, True) - deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - - with tempfile.NamedTemporaryFile() as restored_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - service.restore(backup, volume_id, - restored_file) - self.assertTrue(filecmp.cmp(self.volume_file.name, - restored_file.name)) - - def test_restore_wraps_socket_error(self): - volume_id = 'c1160de7-2774-4f20-bf14-0000001ac139' - container_name = 'socket_error_on_get' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = swift_dr.SwiftBackupDriver(self.ctxt) - - with tempfile.NamedTemporaryFile() as volume_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertRaises(exception.SwiftConnectionFailed, - service.restore, - backup, volume_id, volume_file) - - def test_restore_unsupported_version(self): - volume_id = '390db8c1-32d3-42ca-82c9-00000010c703' - container_name = 'unsupported_version' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name) - service = swift_dr.SwiftBackupDriver(self.ctxt) - - with tempfile.NamedTemporaryFile() as volume_file: - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertRaises(exception.InvalidBackup, - service.restore, - backup, volume_id, volume_file) - - def test_delete(self): - volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' - object_prefix = 'test_prefix' - self._create_backup_db_entry(volume_id=volume_id, - service_metadata=object_prefix) - service = swift_dr.SwiftBackupDriver(self.ctxt) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.delete_backup(backup) - - def test_delete_wraps_socket_error(self): - volume_id = 'f74cb6fa-2900-40df-87ac-0000000f72ea' - container_name = 'socket_error_on_delete' - object_prefix = 'test_prefix' - self._create_backup_db_entry(volume_id=volume_id, - container=container_name, - service_metadata=object_prefix) - service = swift_dr.SwiftBackupDriver(self.ctxt) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertRaises(exception.SwiftConnectionFailed, - service.delete_backup, - backup) - - def test_delete_without_object_prefix(self): - volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' - - def _fake_delete_object(self, container, object_name): - raise AssertionError('delete_object method should not be called.') - - self.mock_object(swift_dr.SwiftBackupDriver, - 'delete_object', - _fake_delete_object) - - self._create_backup_db_entry(volume_id=volume_id) - service = swift_dr.SwiftBackupDriver(self.ctxt) - backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - service.delete_backup(backup) - - def test_get_compressor(self): - service = swift_dr.SwiftBackupDriver(self.ctxt) - compressor = service._get_compressor('None') - self.assertIsNone(compressor) - compressor = service._get_compressor('zlib') - self.assertEqual(zlib, compressor) - compressor = service._get_compressor('bz2') - self.assertEqual(bz2, compressor) - self.assertRaises(ValueError, service._get_compressor, 'fake') - - def test_prepare_output_data_effective_compression(self): - service = swift_dr.SwiftBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - - result = service._prepare_output_data(fake_data) - - self.assertEqual('zlib', result[0]) - self.assertGreater(len(fake_data), len(result)) - - def test_prepare_output_data_no_compresssion(self): - self.flags(backup_compression_algorithm='none') - service = swift_dr.SwiftBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - - result = service._prepare_output_data(fake_data) - - self.assertEqual('none', result[0]) - self.assertEqual(fake_data, result[1]) - - def test_prepare_output_data_ineffective_compression(self): - service = swift_dr.SwiftBackupDriver(self.ctxt) - # Set up buffer of 128 zeroed bytes - fake_data = b'\0' * 128 - # Pre-compress so that compression in the driver will be ineffective. - already_compressed_data = service.compressor.compress(fake_data) - - result = service._prepare_output_data(already_compressed_data) - - self.assertEqual('none', result[0]) - self.assertEqual(already_compressed_data, result[1]) diff --git a/cinder/tests/unit/backup/drivers/test_backup_tsm.py b/cinder/tests/unit/backup/drivers/test_backup_tsm.py deleted file mode 100644 index 3ae39cc5c..000000000 --- a/cinder/tests/unit/backup/drivers/test_backup_tsm.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Tests for volume backup to IBM Tivoli Storage Manager (TSM). -""" - -import json -import mock -import posix - -from oslo_concurrency import processutils as putils -from oslo_utils import timeutils - -from cinder.backup.drivers import tsm -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake - -SIM = None -VOLUME_PATH = '/dev/null' - - -class TSMBackupSimulator(object): - """Simulates TSM dsmc command. - - The simulator simulates the execution of the 'dsmc' command. - This allows the TSM backup test to succeed even if TSM is not installed. - """ - def __init__(self): - self._backup_list = {} - self._hardlinks = [] - self._next_cmd_error = { - 'backup': '', - } - self._intro_msg = ('IBM Tivoli Storage Manager\n' - 'Command Line Backup-Archive Client Interface\n' - '...\n\n') - - def _cmd_backup(self, **kwargs): - # simulates the execution of the dsmc backup command - ret_msg = self._intro_msg - path = kwargs['path'] - - ret_msg += ('Image backup of volume \'%s\'\n\n' - 'Total number of objects inspected: 1\n' - % path) - - if self._next_cmd_error['backup'] == 'fail': - ret_msg += ('ANS1228E Sending of object \'%s\' ' - 'failed\n' % path) - ret_msg += ('ANS1063E The specified path is not a valid file ' - 'system or logical volume name.') - self._next_cmd_error['backup'] = '' - retcode = 12 - else: - ret_msg += 'Total number of objects backed up: 1' - if path not in self._backup_list: - self._backup_list[path] = [] - else: - self._backup_list[path][-1]['active'] = False - date = timeutils.utcnow() - datestr = date.strftime("%m/%d/%Y %H:%M:%S") - self._backup_list[path].append({'date': datestr, 'active': True}) - retcode = 0 - - return (ret_msg, '', retcode) - - def _backup_exists(self, path): - if path not in self._backup_list: - return ('ANS4000E Error processing \'%s\': file space does ' - 'not exist.' % path) - - return 'OK' - - def _cmd_restore(self, **kwargs): - - ret_msg = self._intro_msg - path = kwargs['path'] - exists = self._backup_exists(path) - - if exists == 'OK': - ret_msg += ('Total number of objects restored: 1\n' - 'Total number of objects failed: 0') - retcode = 0 - else: - ret_msg += exists - retcode = 12 - - return (ret_msg, '', retcode) - - def _cmd_delete(self, **kwargs): - # simulates the execution of the dsmc delete command - ret_msg = self._intro_msg - path = kwargs['path'] - exists = self._backup_exists(path) - - if exists == 'OK': - ret_msg += ('Total number of objects deleted: 1\n' - 'Total number of objects failed: 0') - retcode = 0 - index = len(self._backup_list[path]) - 1 - del self._backup_list[path][index] - if not len(self._backup_list[path]): - del self._backup_list[path] - else: - ret_msg += exists - retcode = 12 - - return (ret_msg, '', retcode) - - def _cmd_to_dict(self, arg_list): - """Convert command for kwargs (assumes a properly formed command).""" - ret = {'cmd': arg_list[0], - 'type': arg_list[1], - 'path': arg_list[-1]} - - for i in range(2, len(arg_list) - 1): - arg = arg_list[i].split('=') - if len(arg) == 1: - ret[arg[0]] = True - else: - ret[arg[0]] = arg[1] - - return ret - - def _exec_dsmc_cmd(self, cmd): - """Simulates the execution of the dsmc command.""" - cmd_switch = {'backup': self._cmd_backup, - 'restore': self._cmd_restore, - 'delete': self._cmd_delete} - - kwargs = self._cmd_to_dict(cmd) - if kwargs['cmd'] != 'dsmc' or kwargs['type'] not in cmd_switch: - raise putils.ProcessExecutionError(exit_code=1, - stdout='', - stderr='Not dsmc command', - cmd=' '.join(cmd)) - out, err, ret = cmd_switch[kwargs['type']](**kwargs) - return (out, err, ret) - - def exec_cmd(self, cmd): - """Simulates the execution of dsmc, rm, and ln commands.""" - if cmd[0] == 'dsmc': - out, err, ret = self._exec_dsmc_cmd(cmd) - elif cmd[0] == 'ln': - dest = cmd[2] - out = '' - if dest in self._hardlinks: - err = ('ln: failed to create hard link `%s\': ' - 'File exists' % dest) - ret = 1 - else: - self._hardlinks.append(dest) - err = '' - ret = 0 - elif cmd[0] == 'rm': - dest = cmd[2] - out = '' - if dest not in self._hardlinks: - err = ('rm: cannot remove `%s\': No such file or ' - 'directory' % dest) - ret = 1 - else: - index = self._hardlinks.index(dest) - del self._hardlinks[index] - err = '' - ret = 0 - else: - raise putils.ProcessExecutionError(exit_code=1, - stdout='', - stderr='Unsupported command', - cmd=' '.join(cmd)) - return (out, err, ret) - - def error_injection(self, cmd, error): - self._next_cmd_error[cmd] = error - - -def fake_exec(*cmd, **kwargs): - # Support only bool - check_exit_code = kwargs.pop('check_exit_code', True) - global SIM - - out, err, ret = SIM.exec_cmd(cmd) - if ret and check_exit_code: - raise putils.ProcessExecutionError( - exit_code=-1, - stdout=out, - stderr=err, - cmd=' '.join(cmd)) - return (out, err) - - -def fake_stat_image(path): - # Simulate stat to return the mode of a block device - # make sure that st_mode (the first in the sequence( - # matches the mode of a block device - return posix.stat_result((25008, 5753, 5, 1, 0, 6, 0, - 1375881199, 1375881197, 1375881197)) - - -def fake_stat_file(path): - # Simulate stat to return the mode of a block device - # make sure that st_mode (the first in the sequence( - # matches the mode of a block device - return posix.stat_result((33188, 5753, 5, 1, 0, 6, 0, - 1375881199, 1375881197, 1375881197)) - - -def fake_stat_illegal(path): - # Simulate stat to return the mode of a block device - # make sure that st_mode (the first in the sequence( - # matches the mode of a block device - return posix.stat_result((17407, 5753, 5, 1, 0, 6, 0, - 1375881199, 1375881197, 1375881197)) - - -@mock.patch('cinder.utils.execute', fake_exec) -class BackupTSMTestCase(test.TestCase): - def setUp(self): - super(BackupTSMTestCase, self).setUp() - global SIM - SIM = TSMBackupSimulator() - self.sim = SIM - self.ctxt = context.get_admin_context() - self.driver = tsm.TSMBackupDriver(self.ctxt) - - def _create_volume_db_entry(self, volume_id): - vol = {'id': volume_id, - 'size': 1, - 'status': 'available'} - return db.volume_create(self.ctxt, vol)['id'] - - def _create_backup_db_entry(self, backup_id, mode): - if mode == 'file': - backup_path = VOLUME_PATH - else: - backup_path = '/dev/backup-%s' % backup_id - service_metadata = json.dumps({'backup_mode': mode, - 'backup_path': backup_path}) - backup = {'id': backup_id, - 'size': 1, - 'container': 'test-container', - 'volume_id': fake.VOLUME_ID, - 'service_metadata': service_metadata, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - } - return db.backup_create(self.ctxt, backup)['id'] - - @mock.patch.object(tsm.os, 'stat', fake_stat_image) - def test_backup_image(self): - volume_id = fake.VOLUME_ID - mode = 'image' - self._create_volume_db_entry(volume_id) - - backup_id1 = fake.BACKUP_ID - backup_id2 = fake.BACKUP2_ID - backup_id3 = fake.BACKUP3_ID - self._create_backup_db_entry(backup_id1, mode) - self._create_backup_db_entry(backup_id2, mode) - self._create_backup_db_entry(backup_id3, mode) - - with open(VOLUME_PATH, 'w+') as volume_file: - # Create two backups of the volume - backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1) - self.driver.backup(backup1, volume_file) - backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2) - self.driver.backup(backup2, volume_file) - - # Create a backup that fails - fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3) - self.sim.error_injection('backup', 'fail') - self.assertRaises(exception.InvalidBackup, - self.driver.backup, fail_back, volume_file) - - # Try to restore one, then the other - self.driver.restore(backup1, volume_id, volume_file) - self.driver.restore(backup2, volume_id, volume_file) - - # Delete both backups - self.driver.delete_backup(backup2) - self.driver.delete_backup(backup1) - - @mock.patch.object(tsm.os, 'stat', fake_stat_file) - def test_backup_file(self): - volume_id = fake.VOLUME_ID - mode = 'file' - self._create_volume_db_entry(volume_id) - - self._create_backup_db_entry(fake.BACKUP_ID, mode) - self._create_backup_db_entry(fake.BACKUP2_ID, mode) - - with open(VOLUME_PATH, 'w+') as volume_file: - # Create two backups of the volume - backup1 = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.driver.backup(backup1, volume_file) - backup2 = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) - self.driver.backup(backup2, volume_file) - - # Create a backup that fails - self._create_backup_db_entry(fake.BACKUP3_ID, mode) - fail_back = objects.Backup.get_by_id(self.ctxt, fake.BACKUP3_ID) - self.sim.error_injection('backup', 'fail') - self.assertRaises(exception.InvalidBackup, - self.driver.backup, fail_back, volume_file) - - # Try to restore one, then the other - self.driver.restore(backup1, volume_id, volume_file) - self.driver.restore(backup2, volume_id, volume_file) - - # Delete both backups - self.driver.delete_backup(backup1) - self.driver.delete_backup(backup2) - - @mock.patch.object(tsm.os, 'stat', fake_stat_illegal) - def test_backup_invalid_mode(self): - volume_id = fake.VOLUME_ID - mode = 'illegal' - self._create_volume_db_entry(volume_id) - - self._create_backup_db_entry(fake.BACKUP_ID, mode) - - with open(VOLUME_PATH, 'w+') as volume_file: - # Create two backups of the volume - backup1 = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) - self.assertRaises(exception.InvalidBackup, - self.driver.backup, backup1, volume_file) - - self.assertRaises(exception.InvalidBackup, - self.driver.restore, - backup1, - volume_id, - volume_file) - - self.assertRaises(exception.InvalidBackup, - self.driver.delete_backup, backup1) diff --git a/cinder/tests/unit/backup/fake_backup.py b/cinder/tests/unit/backup/fake_backup.py deleted file mode 100644 index 41379095c..000000000 --- a/cinder/tests/unit/backup/fake_backup.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.objects import fields as c_fields -from cinder.tests.unit import fake_constants as fake - - -def fake_db_backup(**updates): - db_backup = { - 'id': fake.BACKUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'volume_id': fake.VOLUME_ID, - 'status': c_fields.BackupStatus.CREATING, - 'host': 'fake_host', - 'display_name': 'fake_name', - 'size': 5, - 'display_description': 'fake_description', - 'service_metadata': 'fake_metadata', - 'service': 'fake_service', - 'object_count': 5, - 'num_dependent_backups': 0, - } - - for name, field in objects.Backup.fields.items(): - if name in db_backup: - continue - if field.nullable: - db_backup[name] = None - elif field.default != fields.UnspecifiedDefault: - db_backup[name] = field.default - else: - raise Exception('fake_db_backup needs help with %s' % name) - - if updates: - db_backup.update(updates) - - return db_backup - - -def fake_backup_obj(context, **updates): - return objects.Backup._from_db_object(context, objects.Backup(), - fake_db_backup(**updates)) diff --git a/cinder/tests/unit/backup/fake_google_client.py b/cinder/tests/unit/backup/fake_google_client.py deleted file mode 100644 index 703a030fc..000000000 --- a/cinder/tests/unit/backup/fake_google_client.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2016 Vedams Inc. -# Copyright (C) 2016 Google Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json -import os -import zlib - -from googleapiclient import errors -from oauth2client import client -from oslo_utils import units -import six - - -class FakeGoogleObjectInsertExecute(object): - - def __init__(self, *args, **kwargs): - self.container_name = kwargs['bucket'] - - def execute(self, *args, **kwargs): - if self.container_name == 'gcs_api_failure': - raise errors.Error - return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} - - -class FakeGoogleObjectListExecute(object): - - def __init__(self, *args, **kwargs): - self.container_name = kwargs['bucket'] - - def execute(self, *args, **kwargs): - if self.container_name == 'gcs_connection_failure': - raise Exception - - return {'items': [{'name': 'backup_001'}, - {'name': 'backup_002'}, - {'name': 'backup_003'}]} - - -class FakeGoogleBucketListExecute(object): - - def __init__(self, *args, **kwargs): - self.container_name = kwargs['prefix'] - - def execute(self, *args, **kwargs): - if self.container_name == 'gcs_oauth2_failure': - raise client.Error - return {u'items': [{u'name': u'gcscinderbucket'}, - {u'name': u'gcsbucket'}]} - - -class FakeGoogleBucketInsertExecute(object): - def execute(self, *args, **kwargs): - pass - - -class FakeMediaObject(object): - def __init__(self, *args, **kwargs): - self.bucket_name = kwargs['bucket'] - self.object_name = kwargs['object'] - - -class FakeGoogleObject(object): - - def insert(self, *args, **kwargs): - return FakeGoogleObjectInsertExecute(*args, **kwargs) - - def get_media(self, *args, **kwargs): - return FakeMediaObject(*args, **kwargs) - - def list(self, *args, **kwargs): - return FakeGoogleObjectListExecute(*args, **kwargs) - - -class FakeGoogleBucket(object): - - def list(self, *args, **kwargs): - return FakeGoogleBucketListExecute(*args, **kwargs) - - def insert(self, *args, **kwargs): - return FakeGoogleBucketInsertExecute() - - -class FakeGoogleDiscovery(object): - """Logs calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - @classmethod - def Build(self, *args, **kargs): - return FakeDiscoveryBuild() - - -class FakeDiscoveryBuild(object): - """Logging calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - def objects(self): - return FakeGoogleObject() - - def buckets(self): - return FakeGoogleBucket() - - -class FakeGoogleCredentials(object): - def __init__(self, *args, **kwargs): - pass - - @classmethod - def from_stream(self, *args, **kwargs): - pass - - -class FakeGoogleMediaIoBaseDownload(object): - def __init__(self, fh, req, chunksize=None): - - if 'metadata' in req.object_name: - metadata = {} - metadata['version'] = '1.0.0' - metadata['backup_id'] = 123 - metadata['volume_id'] = 123 - metadata['backup_name'] = 'fake backup' - metadata['backup_description'] = 'fake backup description' - metadata['created_at'] = '2016-01-09 11:20:54,805' - metadata['objects'] = [{ - 'backup_001': {'compression': 'zlib', 'length': 10, - 'offset': 0}, - 'backup_002': {'compression': 'zlib', 'length': 10, - 'offset': 10}, - 'backup_003': {'compression': 'zlib', 'length': 10, - 'offset': 20} - }] - metadata_json = json.dumps(metadata, sort_keys=True, indent=2) - if six.PY3: - metadata_json = metadata_json.encode('utf-8') - fh.write(metadata_json) - else: - fh.write(zlib.compress(os.urandom(units.Mi))) - - def next_chunk(self, **kwargs): - return (100, True) diff --git a/cinder/tests/unit/backup/fake_google_client2.py b/cinder/tests/unit/backup/fake_google_client2.py deleted file mode 100644 index fb2e16499..000000000 --- a/cinder/tests/unit/backup/fake_google_client2.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2016 Vedams Inc. -# Copyright (C) 2016 Google Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import tempfile - - -class FakeGoogleObjectInsertExecute(object): - - def execute(self, *args, **kwargs): - return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} - - -class FakeGoogleObjectListExecute(object): - - def __init__(self, *args, **kwargs): - self.bucket_name = kwargs['bucket'] - self.prefix = kwargs['prefix'] - - def execute(self, *args, **kwargs): - bucket_dir = tempfile.gettempdir() + '/' + self.bucket_name - fake_body = [] - for f in os.listdir(bucket_dir): - try: - f.index(self.prefix) - fake_body.append({'name': f}) - except Exception: - pass - - return {'items': fake_body} - - -class FakeGoogleBucketListExecute(object): - - def execute(self, *args, **kwargs): - return {u'items': [{u'name': u'gcscinderbucket'}, - {u'name': u'gcsbucket'}]} - - -class FakeGoogleBucketInsertExecute(object): - def execute(self, *args, **kwargs): - pass - - -class FakeMediaObject(object): - def __init__(self, *args, **kwargs): - self.bucket_name = kwargs['bucket'] - self.object_name = kwargs['object'] - - -class FakeGoogleObject(object): - - def insert(self, *args, **kwargs): - object_path = (tempfile.gettempdir() + '/' + kwargs['bucket'] + '/' + - kwargs['name']) - kwargs['media_body']._fd.getvalue() - with open(object_path, 'wb') as object_file: - kwargs['media_body']._fd.seek(0) - object_file.write(kwargs['media_body']._fd.read()) - - return FakeGoogleObjectInsertExecute() - - def get_media(self, *args, **kwargs): - return FakeMediaObject(*args, **kwargs) - - def list(self, *args, **kwargs): - return FakeGoogleObjectListExecute(*args, **kwargs) - - -class FakeGoogleBucket(object): - - def list(self, *args, **kwargs): - return FakeGoogleBucketListExecute() - - def insert(self, *args, **kwargs): - return FakeGoogleBucketInsertExecute() - - -class FakeGoogleDiscovery(object): - """Logs calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - @classmethod - def Build(self, *args, **kargs): - return FakeDiscoveryBuild() - - -class FakeDiscoveryBuild(object): - """Logging calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - def objects(self): - return FakeGoogleObject() - - def buckets(self): - return FakeGoogleBucket() - - -class FakeGoogleCredentials(object): - def __init__(self, *args, **kwargs): - pass - - @classmethod - def from_stream(self, *args, **kwargs): - pass - - -class FakeGoogleMediaIoBaseDownload(object): - def __init__(self, fh, req, chunksize=None): - object_path = (tempfile.gettempdir() + '/' + req.bucket_name + '/' + - req.object_name) - with open(object_path, 'rb') as object_file: - fh.write(object_file.read()) - - def next_chunk(self, **kwargs): - return (100, True) diff --git a/cinder/tests/unit/backup/fake_service.py b/cinder/tests/unit/backup/fake_service.py deleted file mode 100644 index 14e48dac5..000000000 --- a/cinder/tests/unit/backup/fake_service.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.backup import driver - - -class FakeBackupService(driver.BackupDriver): - def __init__(self, context, db_driver=None): - super(FakeBackupService, self).__init__(context, db_driver) - - def backup(self, backup, volume_file): - pass - - def restore(self, backup, volume_id, volume_file): - pass - - def delete_backup(self, backup): - # if backup has magic name of 'fail_on_delete' - # we raise an error - useful for some tests - - # otherwise we return without error - if backup['display_name'] == 'fail_on_delete': - raise IOError('fake') - - -def get_backup_driver(context): - return FakeBackupService(context) diff --git a/cinder/tests/unit/backup/fake_service_with_verify.py b/cinder/tests/unit/backup/fake_service_with_verify.py deleted file mode 100644 index ae6401cd7..000000000 --- a/cinder/tests/unit/backup/fake_service_with_verify.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2014 Deutsche Telekom AG -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.backup import driver -from cinder.tests.unit.backup import fake_service - - -class FakeBackupServiceWithVerify(driver.BackupDriverWithVerify, - fake_service.FakeBackupService): - def verify(self, backup): - pass - - -def get_backup_driver(context): - return FakeBackupServiceWithVerify(context) diff --git a/cinder/tests/unit/backup/fake_swift_client.py b/cinder/tests/unit/backup/fake_swift_client.py deleted file mode 100644 index 8d1b3c2c0..000000000 --- a/cinder/tests/unit/backup/fake_swift_client.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import socket -import zlib - -import six -from six.moves import http_client - -from swiftclient import client as swift - - -class FakeSwiftClient(object): - """Logs calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - @classmethod - def Connection(self, *args, **kargs): - return FakeSwiftConnection() - - -class FakeSwiftConnection(object): - """Logging calls instead of executing.""" - def __init__(self, *args, **kwargs): - pass - - def head_container(self, container): - if container == 'missing_container': - raise swift.ClientException('fake exception', - http_status=http_client.NOT_FOUND) - elif container == 'unauthorized_container': - raise swift.ClientException('fake exception', - http_status=http_client.UNAUTHORIZED) - elif container == 'socket_error_on_head': - raise socket.error(111, 'ECONNREFUSED') - pass - - def put_container(self, container): - pass - - def get_container(self, container, **kwargs): - fake_header = None - fake_body = [{'name': 'backup_001'}, - {'name': 'backup_002'}, - {'name': 'backup_003'}] - return fake_header, fake_body - - def head_object(self, container, name): - return {'etag': 'fake-md5-sum'} - - def get_object(self, container, name): - if container == 'socket_error_on_get': - raise socket.error(111, 'ECONNREFUSED') - if 'metadata' in name: - fake_object_header = None - metadata = {} - if container == 'unsupported_version': - metadata['version'] = '9.9.9' - else: - metadata['version'] = '1.0.0' - metadata['backup_id'] = 123 - metadata['volume_id'] = 123 - metadata['backup_name'] = 'fake backup' - metadata['backup_description'] = 'fake backup description' - metadata['created_at'] = '2013-02-19 11:20:54,805' - metadata['objects'] = [{ - 'backup_001': {'compression': 'zlib', 'length': 10, - 'offset': 0}, - 'backup_002': {'compression': 'zlib', 'length': 10, - 'offset': 10}, - 'backup_003': {'compression': 'zlib', 'length': 10, - 'offset': 20} - }] - metadata_json = json.dumps(metadata, sort_keys=True, indent=2) - if six.PY3: - metadata_json = metadata_json.encode('utf-8') - fake_object_body = metadata_json - return (fake_object_header, fake_object_body) - - fake_header = None - fake_object_body = os.urandom(1024 * 1024) - return (fake_header, zlib.compress(fake_object_body)) - - def put_object(self, container, name, reader, content_length=None, - etag=None, chunk_size=None, content_type=None, - headers=None, query_string=None): - if container == 'socket_error_on_put': - raise socket.error(111, 'ECONNREFUSED') - return 'fake-md5-sum' - - def delete_object(self, container, name): - if container == 'socket_error_on_delete': - raise socket.error(111, 'ECONNREFUSED') - pass diff --git a/cinder/tests/unit/backup/fake_swift_client2.py b/cinder/tests/unit/backup/fake_swift_client2.py deleted file mode 100644 index 475722405..000000000 --- a/cinder/tests/unit/backup/fake_swift_client2.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2014 TrilioData, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib -import os -import socket -import tempfile - -from six.moves import http_client - -from swiftclient import client as swift - - -class FakeSwiftClient2(object): - def __init__(self, *args, **kwargs): - pass - - @classmethod - def Connection(self, *args, **kargs): - return FakeSwiftConnection2() - - -class FakeSwiftConnection2(object): - def __init__(self, *args, **kwargs): - self.tempdir = tempfile.mkdtemp() - - def head_container(self, container): - if container == 'missing_container': - raise swift.ClientException('fake exception', - http_status=http_client.NOT_FOUND) - elif container == 'unauthorized_container': - raise swift.ClientException('fake exception', - http_status=http_client.UNAUTHORIZED) - elif container == 'socket_error_on_head': - raise socket.error(111, 'ECONNREFUSED') - - def put_container(self, container): - pass - - def get_container(self, container, **kwargs): - fake_header = None - container_dir = tempfile.gettempdir() + '/' + container - fake_body = [] - for f in os.listdir(container_dir): - try: - f.index(kwargs['prefix']) - fake_body.append({'name': f}) - except Exception: - pass - - return fake_header, fake_body - - def head_object(self, container, name): - return {'etag': 'fake-md5-sum'} - - def get_object(self, container, name): - if container == 'socket_error_on_get': - raise socket.error(111, 'ECONNREFUSED') - object_path = tempfile.gettempdir() + '/' + container + '/' + name - with open(object_path, 'rb') as object_file: - return (None, object_file.read()) - - def put_object(self, container, name, reader, content_length=None, - etag=None, chunk_size=None, content_type=None, - headers=None, query_string=None): - object_path = tempfile.gettempdir() + '/' + container + '/' + name - with open(object_path, 'wb') as object_file: - object_file.write(reader.read()) - return hashlib.md5(reader.read()).hexdigest() - - def delete_object(self, container, name): - pass diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py deleted file mode 100644 index f9d2fbf57..000000000 --- a/cinder/tests/unit/backup/test_backup.py +++ /dev/null @@ -1,1631 +0,0 @@ -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Backup code.""" - -import copy -import ddt -import os -import uuid - -import mock -from os_brick.initiator.connectors import fake -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_utils import importutils -from oslo_utils import timeutils - -import cinder -from cinder.backup import api -from cinder.backup import manager -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests import fake_driver -from cinder.tests.unit.backup import fake_service_with_verify as fake_service -from cinder.tests.unit import utils -from cinder.volume import rpcapi as volume_rpcapi - - -CONF = cfg.CONF - - -class FakeBackupException(Exception): - pass - - -class BaseBackupTest(test.TestCase): - def setUp(self): - super(BaseBackupTest, self).setUp() - self.backup_mgr = importutils.import_object(CONF.backup_manager) - self.backup_mgr.host = 'testhost' - self.ctxt = context.get_admin_context() - - paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', - 'cinder.volume.rpcapi.VolumeAPI.delete_volume', - 'cinder.volume.rpcapi.VolumeAPI.detach_volume', - 'cinder.volume.rpcapi.VolumeAPI.' - 'secure_file_operations_enabled'] - self.volume_patches = {} - self.volume_mocks = {} - for path in paths: - name = path.split('.')[-1] - self.volume_patches[name] = mock.patch(path) - self.volume_mocks[name] = self.volume_patches[name].start() - self.addCleanup(self.volume_patches[name].stop) - - def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), - restore_volume_id=None, - display_name='test_backup', - display_description='this is a test backup', - container='volumebackups', - status=fields.BackupStatus.CREATING, - size=1, - object_count=0, - project_id=str(uuid.uuid4()), - service=None, - temp_volume_id=None, - temp_snapshot_id=None, - snapshot_id=None): - """Create a backup entry in the DB. - - Return the entry ID - """ - kwargs = {} - kwargs['volume_id'] = volume_id - kwargs['restore_volume_id'] = restore_volume_id - kwargs['user_id'] = str(uuid.uuid4()) - kwargs['project_id'] = project_id - kwargs['host'] = 'testhost' - kwargs['availability_zone'] = '1' - kwargs['display_name'] = display_name - kwargs['display_description'] = display_description - kwargs['container'] = container - kwargs['status'] = status - kwargs['fail_reason'] = '' - kwargs['service'] = service or CONF.backup_driver - kwargs['snapshot_id'] = snapshot_id - kwargs['parent_id'] = None - kwargs['size'] = size - kwargs['object_count'] = object_count - kwargs['temp_volume_id'] = temp_volume_id - kwargs['temp_snapshot_id'] = temp_snapshot_id - backup = objects.Backup(context=self.ctxt, **kwargs) - backup.create() - return backup - - def _create_volume_db_entry(self, display_name='test_volume', - display_description='this is a test volume', - status='backing-up', - previous_status='available', - size=1, - host='testhost'): - """Create a volume entry in the DB. - - Return the entry ID - """ - vol = {} - vol['size'] = size - vol['host'] = host - vol['user_id'] = str(uuid.uuid4()) - vol['project_id'] = str(uuid.uuid4()) - vol['status'] = status - vol['display_name'] = display_name - vol['display_description'] = display_description - vol['attach_status'] = fields.VolumeAttachStatus.DETACHED - vol['availability_zone'] = '1' - vol['previous_status'] = previous_status - volume = objects.Volume(context=self.ctxt, **vol) - volume.create() - return volume.id - - def _create_snapshot_db_entry(self, display_name='test_snapshot', - display_description='test snapshot', - status=fields.SnapshotStatus.AVAILABLE, - size=1, - volume_id=str(uuid.uuid4()), - provider_location=None): - """Create a snapshot entry in the DB. - - Return the entry ID. - """ - kwargs = {} - kwargs['size'] = size - kwargs['user_id'] = str(uuid.uuid4()) - kwargs['project_id'] = str(uuid.uuid4()) - kwargs['status'] = status - kwargs['display_name'] = display_name - kwargs['display_description'] = display_description - kwargs['volume_id'] = volume_id - kwargs['cgsnapshot_id'] = None - kwargs['volume_size'] = size - kwargs['metadata'] = {} - kwargs['provider_location'] = provider_location - snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) - snapshot_obj.create() - return snapshot_obj - - def _create_volume_attach(self, volume_id): - values = {'volume_id': volume_id, - 'attach_status': fields.VolumeAttachStatus.ATTACHED, } - attachment = db.volume_attach(self.ctxt, values) - db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', - '/dev/vd0') - - def _create_exported_record_entry(self, vol_size=1, exported_id=None): - """Create backup metadata export entry.""" - vol_id = self._create_volume_db_entry(status='available', - size=vol_size) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) - - if exported_id is not None: - backup.id = exported_id - - export = self.backup_mgr.export_record(self.ctxt, backup) - return export - - def _create_export_record_db_entry(self, - volume_id=str(uuid.uuid4()), - status=fields.BackupStatus.CREATING, - project_id=str(uuid.uuid4()), - backup_id=None): - """Create a backup entry in the DB. - - Return the entry ID - """ - kwargs = {} - kwargs['volume_id'] = volume_id - kwargs['user_id'] = str(uuid.uuid4()) - kwargs['project_id'] = project_id - kwargs['status'] = status - if backup_id: - kwargs['id'] = backup_id - backup = objects.BackupImport(context=self.ctxt, **kwargs) - backup.create() - return backup - - -@ddt.ddt -class BackupTestCase(BaseBackupTest): - """Test Case for backups.""" - - @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, - 'set_initialized') - @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, - 'do_setup') - @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, - 'check_for_setup_error') - @mock.patch('cinder.context.get_admin_context') - def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, - mock_set_initialized): - """Test stuck volumes and backups. - - Make sure stuck volumes and backups are reset to correct - states when backup_manager.init_host() is called - """ - def get_admin_context(): - return self.ctxt - - self.override_config('backup_service_inithost_offload', False) - - vol1_id = self._create_volume_db_entry() - self._create_volume_attach(vol1_id) - db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) - vol2_id = self._create_volume_db_entry() - self._create_volume_attach(vol2_id) - db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) - vol3_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) - vol4_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) - temp_vol_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) - vol5_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) - temp_snap = self._create_snapshot_db_entry() - temp_snap.status = fields.SnapshotStatus.AVAILABLE - temp_snap.save() - - backup1 = self._create_backup_db_entry( - status=fields.BackupStatus.CREATING, volume_id=vol1_id) - backup2 = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, - restore_volume_id=vol2_id) - backup3 = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol3_id) - self._create_backup_db_entry(status=fields.BackupStatus.CREATING, - volume_id=vol4_id, - temp_volume_id=temp_vol_id) - self._create_backup_db_entry(status=fields.BackupStatus.CREATING, - volume_id=vol5_id, - temp_snapshot_id=temp_snap.id) - - mock_get_admin_context.side_effect = get_admin_context - self.volume = importutils.import_object(CONF.volume_manager) - self.backup_mgr.init_host() - - self.assertEqual({}, self.backup_mgr.volume_managers) - - vol1 = db.volume_get(self.ctxt, vol1_id) - self.assertEqual('available', vol1['status']) - vol2 = db.volume_get(self.ctxt, vol2_id) - self.assertEqual('error_restoring', vol2['status']) - vol3 = db.volume_get(self.ctxt, vol3_id) - self.assertEqual('available', vol3['status']) - vol4 = db.volume_get(self.ctxt, vol4_id) - self.assertEqual('available', vol4['status']) - vol5 = db.volume_get(self.ctxt, vol5_id) - self.assertEqual('available', vol5['status']) - - backup1 = db.backup_get(self.ctxt, backup1.id) - self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) - backup2 = db.backup_get(self.ctxt, backup2.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) - self.assertRaises(exception.BackupNotFound, - db.backup_get, - self.ctxt, - backup3.id) - - temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) - self.volume_mocks['delete_volume'].assert_called_once_with( - self.ctxt, temp_vol) - self.assertTrue(self.volume_mocks['detach_volume'].called) - - @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') - @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') - def test_init_host_with_service_inithost_offload(self, - mock_add_threadpool, - mock_get_all_by_host): - vol1_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) - backup1 = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol1_id) - - vol2_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) - backup2 = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol2_id) - mock_get_all_by_host.return_value = [backup1, backup2] - self.backup_mgr.init_host() - calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), - mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] - mock_add_threadpool.assert_has_calls(calls, any_order=True) - self.assertEqual(2, mock_add_threadpool.call_count) - - @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') - @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') - @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', - 'cinder-volume': '1.7'}) - @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.2', - 'cinder-volume': '1.4'}) - def test_reset(self, get_min_obj, get_min_rpc): - get_min_obj.return_value = 'liberty' - backup_mgr = manager.BackupManager() - - backup_rpcapi = backup_mgr.backup_rpcapi - volume_rpcapi = backup_mgr.volume_rpcapi - self.assertEqual('1.3', backup_rpcapi.client.version_cap) - self.assertEqual('1.2', - backup_rpcapi.client.serializer._base.version_cap) - self.assertEqual('1.7', volume_rpcapi.client.version_cap) - self.assertEqual('1.4', - volume_rpcapi.client.serializer._base.version_cap) - get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() - backup_mgr.reset() - - backup_rpcapi = backup_mgr.backup_rpcapi - volume_rpcapi = backup_mgr.volume_rpcapi - self.assertEqual(get_min_rpc.return_value, - backup_rpcapi.client.version_cap) - self.assertEqual(get_min_obj.return_value, - backup_rpcapi.client.serializer._base.version_cap) - self.assertIsNone(backup_rpcapi.client.serializer._base.manifest) - self.assertEqual(get_min_rpc.return_value, - volume_rpcapi.client.version_cap) - self.assertEqual(get_min_obj.return_value, - volume_rpcapi.client.serializer._base.version_cap) - self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) - - def test_is_working(self): - self.assertTrue(self.backup_mgr.is_working()) - - def test_cleanup_incomplete_backup_operations_with_exceptions(self): - """Test cleanup resilience in the face of exceptions.""" - - fake_backup_list = [{'id': str(uuid.uuid4())}, - {'id': str(uuid.uuid4())}, - {'id': str(uuid.uuid4())}] - mock_backup_get_by_host = self.mock_object( - objects.BackupList, 'get_all_by_host') - mock_backup_get_by_host.return_value = fake_backup_list - - mock_backup_cleanup = self.mock_object( - self.backup_mgr, '_cleanup_one_backup') - mock_backup_cleanup.side_effect = [Exception] - - mock_temp_cleanup = self.mock_object( - self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') - mock_temp_cleanup.side_effect = [Exception] - - self.assertIsNone( - self.backup_mgr._cleanup_incomplete_backup_operations( - self.ctxt)) - - self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) - self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) - - def test_cleanup_one_backing_up_volume(self): - """Test cleanup_one_volume for volume status 'backing-up'.""" - - volume_id = self._create_volume_db_entry(status='backing-up', - previous_status='available') - volume = db.volume_get(self.ctxt, volume_id) - - self.backup_mgr._cleanup_one_volume(self.ctxt, volume) - - volume = db.volume_get(self.ctxt, volume_id) - self.assertEqual('available', volume['status']) - - def test_cleanup_one_restoring_backup_volume(self): - """Test cleanup_one_volume for volume status 'restoring-backup'.""" - - volume_id = self._create_volume_db_entry(status='restoring-backup') - volume = db.volume_get(self.ctxt, volume_id) - - self.backup_mgr._cleanup_one_volume(self.ctxt, volume) - - volume = db.volume_get(self.ctxt, volume_id) - self.assertEqual('error_restoring', volume['status']) - - def test_cleanup_one_creating_backup(self): - """Test cleanup_one_backup for volume status 'creating'.""" - - vol1_id = self._create_volume_db_entry() - self._create_volume_attach(vol1_id) - db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) - - backup = self._create_backup_db_entry( - status=fields.BackupStatus.CREATING, - volume_id=vol1_id) - - self.backup_mgr._cleanup_one_backup(self.ctxt, backup) - - self.assertEqual(fields.BackupStatus.ERROR, backup.status) - volume = objects.Volume.get_by_id(self.ctxt, vol1_id) - self.assertEqual('available', volume.status) - - def test_cleanup_one_restoring_backup(self): - """Test cleanup_one_backup for volume status 'restoring'.""" - - vol1_id = self._create_volume_db_entry() - db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) - - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, - restore_volume_id=vol1_id) - - self.backup_mgr._cleanup_one_backup(self.ctxt, backup) - - self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) - volume = objects.Volume.get_by_id(self.ctxt, vol1_id) - self.assertEqual('error_restoring', volume.status) - - def test_cleanup_one_deleting_backup(self): - """Test cleanup_one_backup for volume status 'deleting'.""" - - self.override_config('backup_service_inithost_offload', False) - - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING) - - self.backup_mgr._cleanup_one_backup(self.ctxt, backup) - - self.assertRaises(exception.BackupNotFound, - db.backup_get, - self.ctxt, - backup.id) - - def test_detach_all_attachments_handles_exceptions(self): - """Test detach_all_attachments with exceptions.""" - - mock_log = self.mock_object(manager, 'LOG') - self.volume_mocks['detach_volume'].side_effect = [Exception] - - fake_attachments = [ - { - 'id': str(uuid.uuid4()), - 'attached_host': 'testhost', - 'instance_uuid': None, - }, - { - 'id': str(uuid.uuid4()), - 'attached_host': 'testhost', - 'instance_uuid': None, - } - ] - fake_volume = { - 'id': str(uuid.uuid4()), - 'volume_attachment': fake_attachments - } - - self.backup_mgr._detach_all_attachments(self.ctxt, - fake_volume) - - self.assertEqual(len(fake_attachments), mock_log.exception.call_count) - - @ddt.data(KeyError, exception.VolumeNotFound) - def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( - self, err): - """Ensure we handle missing volume for a backup.""" - - mock_volume_get = self.mock_object(db, 'volume_get') - mock_volume_get.side_effect = [err] - - backup = self._create_backup_db_entry( - status=fields.BackupStatus.CREATING) - - self.assertIsNone( - self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( - self.ctxt, - backup)) - - def test_cleanup_temp_snapshot_for_one_backup_not_found(self): - """Ensure we handle missing temp snapshot for a backup.""" - - vol1_id = self._create_volume_db_entry() - self._create_volume_attach(vol1_id) - db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.ERROR, - volume_id=vol1_id, - temp_snapshot_id=str(uuid.uuid4())) - - self.assertIsNone( - self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( - self.ctxt, - backup)) - - self.assertFalse(self.volume_mocks['delete_snapshot'].called) - self.assertIsNone(backup.temp_snapshot_id) - - backup.destroy() - db.volume_destroy(self.ctxt, vol1_id) - - def test_cleanup_temp_volume_for_one_backup_not_found(self): - """Ensure we handle missing temp volume for a backup.""" - - vol1_id = self._create_volume_db_entry() - self._create_volume_attach(vol1_id) - db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) - backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, - volume_id=vol1_id, - temp_volume_id=str(uuid.uuid4())) - - self.assertIsNone( - self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( - self.ctxt, - backup)) - - self.assertFalse(self.volume_mocks['delete_volume'].called) - self.assertIsNone(backup.temp_volume_id) - - backup.destroy() - db.volume_destroy(self.ctxt, vol1_id) - - def test_create_backup_with_bad_volume_status(self): - """Test creating a backup from a volume with a bad status.""" - vol_id = self._create_volume_db_entry(status='restoring', size=1) - backup = self._create_backup_db_entry(volume_id=vol_id) - self.assertRaises(exception.InvalidVolume, - self.backup_mgr.create_backup, - self.ctxt, - backup) - - def test_create_backup_with_bad_backup_status(self): - """Test creating a backup with a backup with a bad status.""" - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.create_backup, - self.ctxt, - backup) - - def test_create_backup_with_error(self): - """Test error handling when error occurs during backup creation.""" - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry(volume_id=vol_id) - - mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup') - mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) - self.assertRaises(FakeBackupException, - self.backup_mgr.create_backup, - self.ctxt, - backup) - vol = db.volume_get(self.ctxt, vol_id) - self.assertEqual('available', vol['status']) - self.assertEqual('error_backing-up', vol['previous_status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - self.assertTrue(mock_run_backup.called) - - @mock.patch('cinder.backup.manager.BackupManager._run_backup', - side_effect=FakeBackupException(str(uuid.uuid4()))) - def test_create_backup_with_snapshot_error(self, mock_run_backup): - """Test error handling when error occurs during backup creation.""" - vol_id = self._create_volume_db_entry(size=1) - snapshot = self._create_snapshot_db_entry(status='backing-up', - volume_id=vol_id) - backup = self._create_backup_db_entry(volume_id=vol_id, - snapshot_id=snapshot.id) - self.assertRaises(FakeBackupException, - self.backup_mgr.create_backup, - self.ctxt, - backup) - - snapshot.refresh() - self.assertEqual('available', snapshot.status) - - backup.refresh() - self.assertEqual(fields.BackupStatus.ERROR, backup.status) - self.assertTrue(mock_run_backup.called) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') - @mock.patch('cinder.utils.temporary_chown') - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os.path, 'isdir', return_value=False) - def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown, - mock_get_backup_device, mock_get_conn): - """Test normal backup creation.""" - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size) - backup = self._create_backup_db_entry(volume_id=vol_id) - - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - backup_device_dict = {'backup_device': vol, 'secure_enabled': False, - 'is_snapshot': False, } - mock_get_backup_device.return_value = ( - objects.BackupDeviceInfo.from_primitive(backup_device_dict, - self.ctxt, - ['admin_metadata', - 'metadata'])) - attach_info = {'device': {'path': '/dev/null'}} - mock_detach_device = self.mock_object(self.backup_mgr, - '_detach_device') - mock_attach_device = self.mock_object(self.backup_mgr, - '_attach_device') - mock_attach_device.return_value = attach_info - properties = {} - mock_get_conn.return_value = properties - mock_open.return_value = open('/dev/null', 'rb') - - self.backup_mgr.create_backup(self.ctxt, backup) - - mock_temporary_chown.assert_called_once_with('/dev/null') - mock_attach_device.assert_called_once_with(self.ctxt, vol, - properties, False) - mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) - mock_get_conn.assert_called_once_with() - mock_detach_device.assert_called_once_with(self.ctxt, attach_info, - vol, properties, False) - - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - self.assertEqual('available', vol['status']) - self.assertEqual('backing-up', vol['previous_status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - self.assertEqual(vol_size, backup['size']) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') - @mock.patch('cinder.utils.temporary_chown') - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os.path, 'isdir', return_value=True) - def test_run_backup_with_dir_device_path(self, mock_isdir, - mock_open, - mock_chown, - mock_backup_device, - mock_brick): - backup_service = lambda: None - backup_service.backup = mock.Mock() - self.backup_mgr.service.get_backup_driver = lambda x: backup_service - - vol_id = self._create_volume_db_entry() - backup = self._create_backup_db_entry(volume_id=vol_id) - volume = objects.Volume.get_by_id(self.ctxt, vol_id) - - # device_path is represented by a directory - device_path = '/fake/disk/path/' - attach_info = {'device': {'path': device_path}} - self.backup_mgr._attach_device = mock.Mock( - return_value=attach_info) - self.backup_mgr._detach_device = mock.Mock() - self.backup_mgr._run_backup(self.ctxt, backup, volume) - - mock_chown.assert_not_called() - mock_open.assert_not_called() - backup_service.backup.assert_called_once_with( - backup, device_path) - - @mock.patch('cinder.backup.manager.BackupManager._run_backup') - @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), - (fields.SnapshotStatus.BACKING_UP, 'in-use'), - (fields.SnapshotStatus.AVAILABLE, 'available'), - (fields.SnapshotStatus.AVAILABLE, 'in-use')) - @ddt.unpack - def test_create_backup_with_snapshot(self, snapshot_status, volume_status, - mock_run_backup): - vol_id = self._create_volume_db_entry(status=volume_status) - snapshot = self._create_snapshot_db_entry(volume_id=vol_id, - status=snapshot_status) - backup = self._create_backup_db_entry(volume_id=vol_id, - snapshot_id=snapshot.id) - if snapshot_status == fields.SnapshotStatus.BACKING_UP: - self.backup_mgr.create_backup(self.ctxt, backup) - - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) - - self.assertEqual(volume_status, vol.status) - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) - else: - self.assertRaises(exception.InvalidSnapshot, - self.backup_mgr.create_backup, self.ctxt, backup) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') - @mock.patch('cinder.utils.temporary_chown') - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os.path, 'isdir', return_value=False) - def test_create_backup_with_temp_snapshot(self, mock_isdir, - mock_open, - mock_temporary_chown, - mock_get_backup_device, - mock_get_conn): - """Test backup in-use volume using temp snapshot.""" - self.override_config('backup_use_same_host', True) - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size, - previous_status='in-use') - backup = self._create_backup_db_entry(volume_id=vol_id) - snap = self._create_snapshot_db_entry(volume_id=vol_id) - - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - mock_get_backup_device.return_value = ( - objects.BackupDeviceInfo.from_primitive({ - 'backup_device': snap, 'secure_enabled': False, - 'is_snapshot': True, }, - self.ctxt, expected_attrs=['metadata'])) - - attach_info = { - 'device': {'path': '/dev/null'}, - 'conn': {'data': {}}, - 'connector': fake.FakeConnector(None)} - mock_terminate_connection_snapshot = self.mock_object( - volume_rpcapi.VolumeAPI, - 'terminate_connection_snapshot') - mock_initialize_connection_snapshot = self.mock_object( - volume_rpcapi.VolumeAPI, - 'initialize_connection_snapshot') - mock_connect_device = self.mock_object( - manager.BackupManager, - '_connect_device') - mock_connect_device.return_value = attach_info - properties = {} - mock_get_conn.return_value = properties - mock_open.return_value = open('/dev/null', 'rb') - - self.backup_mgr.create_backup(self.ctxt, backup) - mock_temporary_chown.assert_called_once_with('/dev/null') - mock_initialize_connection_snapshot.assert_called_once_with( - self.ctxt, snap, properties) - mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) - mock_get_conn.assert_called_once_with() - mock_terminate_connection_snapshot.assert_called_once_with( - self.ctxt, snap, properties, force=False) - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual('backing-up', vol['previous_status']) - backup = objects.Backup.get_by_id(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) - self.assertEqual(vol_size, backup.size) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot') - def test_create_temp_snapshot(self, mock_create_snapshot): - volume_manager = importutils.import_object(CONF.volume_manager) - volume_manager.driver.set_initialized() - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size, - previous_status='in-use') - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - mock_create_snapshot.return_value = {'provider_id': - 'fake_provider_id'} - - temp_snap = volume_manager.driver._create_temp_snapshot( - self.ctxt, vol) - - self.assertEqual('available', temp_snap['status']) - self.assertEqual('fake_provider_id', temp_snap['provider_id']) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, - 'create_cloned_volume') - def test_create_temp_cloned_volume(self, mock_create_cloned_volume): - volume_manager = importutils.import_object(CONF.volume_manager) - volume_manager.driver.set_initialized() - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size, - previous_status='in-use') - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - mock_create_cloned_volume.return_value = {'provider_id': - 'fake_provider_id'} - - temp_vol = volume_manager.driver._create_temp_cloned_volume( - self.ctxt, vol) - - self.assertEqual('available', temp_vol['status']) - self.assertEqual('fake_provider_id', temp_vol['provider_id']) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, - 'create_volume_from_snapshot') - def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap): - volume_manager = importutils.import_object(CONF.volume_manager) - volume_manager.driver.set_initialized() - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size, - previous_status='in-use') - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - snap = self._create_snapshot_db_entry(volume_id=vol_id) - mock_create_vol_from_snap.return_value = {'provider_id': - 'fake_provider_id'} - - temp_vol = volume_manager.driver._create_temp_volume_from_snapshot( - self.ctxt, vol, snap) - - self.assertEqual('available', temp_vol['status']) - self.assertEqual('fake_provider_id', temp_vol['provider_id']) - - @mock.patch('cinder.volume.utils.notify_about_backup_usage') - def test_create_backup_with_notify(self, notify): - """Test normal backup creation with notifications.""" - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size) - backup = self._create_backup_db_entry(volume_id=vol_id) - - self.mock_object(self.backup_mgr, '_run_backup') - self.backup_mgr.create_backup(self.ctxt, backup) - self.assertEqual(2, notify.call_count) - - def test_restore_backup_with_bad_volume_status(self): - """Test error handling. - - Test error handling when restoring a backup to a volume - with a bad status. - """ - vol_id = self._create_volume_db_entry(status='available', size=1) - backup = self._create_backup_db_entry(volume_id=vol_id) - self.assertRaises(exception.InvalidVolume, - self.backup_mgr.restore_backup, - self.ctxt, - backup, - vol_id) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - - def test_restore_backup_with_bad_backup_status(self): - """Test error handling. - - Test error handling when restoring a backup with a backup - with a bad status. - """ - vol_id = self._create_volume_db_entry(status='restoring-backup', - size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.restore_backup, - self.ctxt, - backup, - vol_id) - vol = db.volume_get(self.ctxt, vol_id) - self.assertEqual('error', vol['status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - def test_restore_backup_with_driver_error(self): - """Test error handling when an error occurs during backup restore.""" - vol_id = self._create_volume_db_entry(status='restoring-backup', - size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, volume_id=vol_id) - - mock_run_restore = self.mock_object( - self.backup_mgr, - '_run_restore') - mock_run_restore.side_effect = FakeBackupException('fake') - self.assertRaises(FakeBackupException, - self.backup_mgr.restore_backup, - self.ctxt, - backup, - vol_id) - vol = db.volume_get(self.ctxt, vol_id) - self.assertEqual('error_restoring', vol['status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - self.assertTrue(mock_run_restore.called) - - def test_restore_backup_with_bad_service(self): - """Test error handling. - - Test error handling when attempting a restore of a backup - with a different service to that used to create the backup. - """ - vol_id = self._create_volume_db_entry(status='restoring-backup', - size=1) - service = 'cinder.tests.backup.bad_service' - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, volume_id=vol_id, - service=service) - - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.restore_backup, - self.ctxt, - backup, - vol_id) - vol = db.volume_get(self.ctxt, vol_id) - self.assertEqual('error', vol['status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.utils.temporary_chown') - @mock.patch('six.moves.builtins.open') - @mock.patch.object(os.path, 'isdir', return_value=False) - def test_restore_backup(self, mock_isdir, mock_open, - mock_temporary_chown, mock_get_conn): - """Test normal backup restoration.""" - vol_size = 1 - vol_id = self._create_volume_db_entry(status='restoring-backup', - size=vol_size) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, volume_id=vol_id) - - properties = {} - mock_get_conn.return_value = properties - mock_open.return_value = open('/dev/null', 'wb') - mock_secure_enabled = ( - self.volume_mocks['secure_file_operations_enabled']) - mock_secure_enabled.return_value = False - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - attach_info = {'device': {'path': '/dev/null'}} - mock_detach_device = self.mock_object(self.backup_mgr, - '_detach_device') - mock_attach_device = self.mock_object(self.backup_mgr, - '_attach_device') - mock_attach_device.return_value = attach_info - - self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) - - mock_temporary_chown.assert_called_once_with('/dev/null') - mock_get_conn.assert_called_once_with() - mock_secure_enabled.assert_called_once_with(self.ctxt, vol) - mock_attach_device.assert_called_once_with(self.ctxt, vol, - properties) - mock_detach_device.assert_called_once_with(self.ctxt, attach_info, - vol, properties) - - vol = objects.Volume.get_by_id(self.ctxt, vol_id) - self.assertEqual('available', vol['status']) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - - @mock.patch('cinder.volume.utils.notify_about_backup_usage') - def test_restore_backup_with_notify(self, notify): - """Test normal backup restoration with notifications.""" - vol_size = 1 - vol_id = self._create_volume_db_entry(status='restoring-backup', - size=vol_size) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, volume_id=vol_id) - self.backup_mgr._run_restore = mock.Mock() - - self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) - self.assertEqual(2, notify.call_count) - - def test_delete_backup_with_bad_backup_status(self): - """Test error handling. - - Test error handling when deleting a backup with a backup - with a bad status. - """ - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.delete_backup, - self.ctxt, - backup) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - def test_delete_backup_with_error(self): - """Test error handling when an error occurs during backup deletion.""" - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, - display_name='fail_on_delete', volume_id=vol_id) - self.assertRaises(IOError, - self.backup_mgr.delete_backup, - self.ctxt, - backup) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - def test_delete_backup_with_bad_service(self): - """Test error handling. - - Test error handling when attempting a delete of a backup - with a different service to that used to create the backup. - """ - vol_id = self._create_volume_db_entry(size=1) - service = 'cinder.tests.backup.bad_service' - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol_id, - service=service) - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.delete_backup, - self.ctxt, - backup) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - def test_delete_backup_with_no_service(self): - """Test error handling. - - Test error handling when attempting a delete of a backup - with no service defined for that backup, relates to bug #1162908 - """ - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol_id) - backup.service = None - backup.save() - self.backup_mgr.delete_backup(self.ctxt, backup) - - def test_delete_backup(self): - """Test normal backup deletion.""" - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol_id) - self.backup_mgr.delete_backup(self.ctxt, backup) - self.assertRaises(exception.BackupNotFound, - db.backup_get, - self.ctxt, - backup.id) - - ctxt_read_deleted = context.get_admin_context('yes') - backup = db.backup_get(ctxt_read_deleted, backup.id) - self.assertTrue(backup.deleted) - self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) - self.assertEqual(fields.BackupStatus.DELETED, backup.status) - - @mock.patch('cinder.volume.utils.notify_about_backup_usage') - def test_delete_backup_with_notify(self, notify): - """Test normal backup deletion with notifications.""" - vol_id = self._create_volume_db_entry(size=1) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.DELETING, volume_id=vol_id) - self.backup_mgr.delete_backup(self.ctxt, backup) - self.assertEqual(2, notify.call_count) - - def test_list_backup(self): - project_id = str(uuid.uuid4()) - backups = db.backup_get_all_by_project(self.ctxt, project_id) - self.assertEqual(0, len(backups)) - - self._create_backup_db_entry() - b2 = self._create_backup_db_entry(project_id=project_id) - backups = db.backup_get_all_by_project(self.ctxt, project_id) - self.assertEqual(1, len(backups)) - self.assertEqual(b2.id, backups[0].id) - - def test_backup_get_all_by_project_with_deleted(self): - """Test deleted backups. - - Test deleted backups don't show up in backup_get_all_by_project. - Unless context.read_deleted is 'yes'. - """ - project_id = str(uuid.uuid4()) - backups = db.backup_get_all_by_project(self.ctxt, project_id) - self.assertEqual(0, len(backups)) - - backup_keep = self._create_backup_db_entry(project_id=project_id) - backup = self._create_backup_db_entry(project_id=project_id) - db.backup_destroy(self.ctxt, backup.id) - - backups = db.backup_get_all_by_project(self.ctxt, project_id) - self.assertEqual(1, len(backups)) - self.assertEqual(backup_keep.id, backups[0].id) - - ctxt_read_deleted = context.get_admin_context('yes') - backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) - self.assertEqual(2, len(backups)) - - def test_backup_get_all_by_host_with_deleted(self): - """Test deleted backups. - - Test deleted backups don't show up in backup_get_all_by_project. - Unless context.read_deleted is 'yes' - """ - backups = db.backup_get_all_by_host(self.ctxt, 'testhost') - self.assertEqual(0, len(backups)) - - backup_keep = self._create_backup_db_entry() - backup = self._create_backup_db_entry() - db.backup_destroy(self.ctxt, backup.id) - - backups = db.backup_get_all_by_host(self.ctxt, 'testhost') - self.assertEqual(1, len(backups)) - self.assertEqual(backup_keep.id, backups[0].id) - - ctxt_read_deleted = context.get_admin_context('yes') - backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') - self.assertEqual(2, len(backups)) - - def test_backup_manager_driver_name(self): - """Test mapping between backup services and backup drivers.""" - self.override_config('backup_driver', "cinder.backup.services.swift") - backup_mgr = \ - importutils.import_object(CONF.backup_manager) - self.assertEqual('cinder.backup.drivers.swift', - backup_mgr.driver_name) - - def test_export_record_with_bad_service(self): - """Test error handling. - - Test error handling when attempting an export of a backup - record with a different service to that used to create the backup. - """ - vol_id = self._create_volume_db_entry(size=1) - service = 'cinder.tests.backup.bad_service' - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, - service=service) - - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.export_record, - self.ctxt, - backup) - - def test_export_record_with_bad_backup_status(self): - """Test error handling. - - Test error handling when exporting a backup record with a backup - with a bad status. - """ - vol_id = self._create_volume_db_entry(status='available', - size=1) - backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, - volume_id=vol_id) - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.export_record, - self.ctxt, - backup) - - def test_export_record(self): - """Test normal backup record export.""" - vol_size = 1 - vol_id = self._create_volume_db_entry(status='available', - size=vol_size) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) - - export = self.backup_mgr.export_record(self.ctxt, backup) - self.assertEqual(CONF.backup_driver, export['backup_service']) - self.assertIn('backup_url', export) - - def test_import_record_with_verify_not_implemented(self): - """Test normal backup record import. - - Test the case when import succeeds for the case that the - driver does not support verify. - """ - vol_size = 1 - backup_id = uuid.uuid4() - export = self._create_exported_record_entry(vol_size=vol_size, - exported_id=backup_id) - imported_record = self._create_export_record_db_entry( - backup_id=backup_id) - backup_hosts = [] - self.backup_mgr.import_record(self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - backup = db.backup_get(self.ctxt, imported_record.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - self.assertEqual(vol_size, backup['size']) - - def test_import_record_with_wrong_id(self): - """Test normal backup record import. - - Test the case when import succeeds for the case that the - driver does not support verify. - """ - vol_size = 1 - export = self._create_exported_record_entry(vol_size=vol_size) - imported_record = self._create_export_record_db_entry() - backup_hosts = [] - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.import_record, - self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - - def test_import_record_with_bad_service(self): - """Test error handling. - - Test error handling when attempting an import of a backup - record with a different service to that used to create the backup. - """ - export = self._create_exported_record_entry() - export['backup_service'] = 'cinder.tests.unit.backup.bad_service' - imported_record = self._create_export_record_db_entry() - - # Test the case where the additional hosts list is empty - backup_hosts = [] - self.assertRaises(exception.ServiceNotFound, - self.backup_mgr.import_record, - self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - - # Test that the import backup keeps calling other hosts to find a - # suitable host for the backup service - backup_hosts = ['fake1', 'fake2'] - backup_hosts_expect = list(backup_hosts) - BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' - with mock.patch(BackupAPI_import) as _mock_backup_import: - self.backup_mgr.import_record(self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - - next_host = backup_hosts_expect.pop() - _mock_backup_import.assert_called_once_with( - self.ctxt, - next_host, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts_expect) - - def test_import_record_with_invalid_backup(self): - """Test error handling. - - Test error handling when attempting an import of a backup - record where the backup driver returns an exception. - """ - export = self._create_exported_record_entry() - backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) - _mock_record_import_class = ('%s.%s.%s' % - (backup_driver.__module__, - backup_driver.__class__.__name__, - 'import_record')) - imported_record = self._create_export_record_db_entry() - backup_hosts = [] - with mock.patch(_mock_record_import_class) as _mock_record_import: - _mock_record_import.side_effect = FakeBackupException('fake') - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.import_record, - self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - self.assertTrue(_mock_record_import.called) - backup = db.backup_get(self.ctxt, imported_record.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - def test_not_supported_driver_to_force_delete(self): - """Test force delete check method for not supported drivers.""" - self.override_config('backup_driver', 'cinder.backup.drivers.ceph') - self.backup_mgr = importutils.import_object(CONF.backup_manager) - result = self.backup_mgr.check_support_to_force_delete(self.ctxt) - self.assertFalse(result) - - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_init_backup_repo_path', return_value=None) - @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' - '_check_configuration', return_value=None) - def test_check_support_to_force_delete(self, mock_check_configuration, - mock_init_backup_repo_path): - """Test force delete check method for supported drivers.""" - self.override_config('backup_driver', 'cinder.backup.drivers.nfs') - self.backup_mgr = importutils.import_object(CONF.backup_manager) - result = self.backup_mgr.check_support_to_force_delete(self.ctxt) - self.assertTrue(result) - - def test_backup_has_dependent_backups(self): - """Test backup has dependent backups. - - Test the query of has_dependent_backups in backup object is correct. - """ - vol_size = 1 - vol_id = self._create_volume_db_entry(size=vol_size) - backup = self._create_backup_db_entry(volume_id=vol_id) - self.assertFalse(backup.has_dependent_backups) - - -class BackupTestCaseWithVerify(BaseBackupTest): - """Test Case for backups.""" - - def setUp(self): - self.override_config( - "backup_driver", - "cinder.tests.unit.backup.fake_service_with_verify") - super(BackupTestCaseWithVerify, self).setUp() - - def test_import_record_with_verify(self): - """Test normal backup record import. - - Test the case when import succeeds for the case that the - driver implements verify. - """ - vol_size = 1 - backup_id = uuid.uuid4() - export = self._create_exported_record_entry( - vol_size=vol_size, exported_id=backup_id) - imported_record = self._create_export_record_db_entry( - backup_id=backup_id) - backup_hosts = [] - backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) - _mock_backup_verify_class = ('%s.%s.%s' % - (backup_driver.__module__, - backup_driver.__class__.__name__, - 'verify')) - - def mock_verify(backup_id): - backup = db.backup_get(self.ctxt, backup_id) - self.assertEqual(fields.BackupStatus.CREATING, backup['status']) - - with mock.patch(_mock_backup_verify_class) as mock_backup_verify: - mock_backup_verify.side_effect = mock_verify - self.backup_mgr.import_record(self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - backup = db.backup_get(self.ctxt, imported_record.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - self.assertEqual(vol_size, backup['size']) - - def test_import_record_with_verify_invalid_backup(self): - """Test error handling. - - Test error handling when attempting an import of a backup - record where the backup driver returns an exception. - """ - vol_size = 1 - backup_id = uuid.uuid4() - export = self._create_exported_record_entry( - vol_size=vol_size, exported_id=backup_id) - imported_record = self._create_export_record_db_entry( - backup_id=backup_id) - backup_hosts = [] - backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) - _mock_backup_verify_class = ('%s.%s.%s' % - (backup_driver.__module__, - backup_driver.__class__.__name__, - 'verify')) - with mock.patch(_mock_backup_verify_class) as _mock_record_verify: - _mock_record_verify.side_effect = \ - exception.InvalidBackup(reason='fake') - - self.assertRaises(exception.InvalidBackup, - self.backup_mgr.import_record, - self.ctxt, - imported_record, - export['backup_service'], - export['backup_url'], - backup_hosts) - self.assertTrue(_mock_record_verify.called) - backup = db.backup_get(self.ctxt, imported_record.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - @mock.patch.object(manager.BackupManager, - '_cleanup_temp_volumes_snapshots_for_one_backup') - def test_backup_reset_status_from_nonrestoring_to_available( - self, mock_clean_temp): - vol_id = self._create_volume_db_entry(status='available', - size=1) - backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, - volume_id=vol_id) - with mock.patch.object(manager.BackupManager, - '_map_service_to_driver') as \ - mock_map_service_to_driver: - # It should works when the service name is a string - mock_map_service_to_driver.return_value = 'swift' - self.backup_mgr.reset_status(self.ctxt, - backup, - fields.BackupStatus.AVAILABLE) - mock_clean_temp.assert_called_once_with(self.ctxt, backup) - new_backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, - new_backup['status']) - - mock_map_service_to_driver.return_value = \ - fake_service.get_backup_driver(self.ctxt) - self.backup_mgr.reset_status(self.ctxt, - backup, - fields.BackupStatus.ERROR) - mock_clean_temp.reset_mock() - - self.backup_mgr.reset_status(self.ctxt, - backup, - fields.BackupStatus.AVAILABLE) - mock_clean_temp.assert_called_once_with(self.ctxt, backup) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - - def test_backup_reset_status_to_available_invalid_backup(self): - volume = db.volume_create(self.ctxt, {'status': 'available', - 'host': 'test', - 'provider_location': '', - 'size': 1}) - backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, - volume_id=volume['id']) - - backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) - _mock_backup_verify_class = ('%s.%s.%s' % - (backup_driver.__module__, - backup_driver.__class__.__name__, - 'verify')) - with mock.patch(_mock_backup_verify_class) as \ - _mock_record_verify: - _mock_record_verify.side_effect = \ - exception.BackupVerifyUnsupportedDriver(reason='fake') - - self.assertRaises(exception.BackupVerifyUnsupportedDriver, - self.backup_mgr.reset_status, - self.ctxt, - backup, - fields.BackupStatus.AVAILABLE) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - @mock.patch.object(manager.BackupManager, - '_cleanup_temp_volumes_snapshots_for_one_backup') - def test_backup_reset_status_from_restoring_to_available( - self, mock_clean_temp): - volume = db.volume_create(self.ctxt, - {'status': 'available', - 'host': 'test', - 'provider_location': '', - 'size': 1}) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.RESTORING, - volume_id=volume['id']) - - self.backup_mgr.reset_status(self.ctxt, backup, - fields.BackupStatus.AVAILABLE) - mock_clean_temp.assert_called_once_with(self.ctxt, backup) - backup = db.backup_get(self.ctxt, backup.id) - self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) - - @mock.patch.object(manager.BackupManager, - '_cleanup_temp_volumes_snapshots_for_one_backup') - def test_backup_reset_status_to_error(self, mock_clean_temp): - volume = db.volume_create(self.ctxt, - {'status': 'available', - 'host': 'test', - 'provider_location': '', - 'size': 1}) - backup = self._create_backup_db_entry( - status=fields.BackupStatus.CREATING, - volume_id=volume['id']) - self.backup_mgr.reset_status(self.ctxt, backup, - fields.BackupStatus.ERROR) - mock_clean_temp.assert_called_once_with(self.ctxt, backup) - backup = db.backup_get(self.ctxt, backup['id']) - self.assertEqual(fields.BackupStatus.ERROR, backup['status']) - - -@ddt.ddt -class BackupAPITestCase(BaseBackupTest): - def setUp(self): - super(BackupAPITestCase, self).setUp() - self.api = api.API() - - def test_get_all_wrong_all_tenants_value(self): - self.assertRaises(exception.InvalidParameterValue, - self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) - - @mock.patch.object(objects, 'BackupList') - def test_get_all_no_all_tenants_value(self, mock_backuplist): - result = self.api.get_all(self.ctxt, {'key': 'value'}) - self.assertFalse(mock_backuplist.get_all.called) - self.assertEqual(mock_backuplist.get_all_by_project.return_value, - result) - mock_backuplist.get_all_by_project.assert_called_once_with( - self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, - None, None, None) - - @mock.patch.object(objects, 'BackupList') - @ddt.data(False, 'false', '0', 0, 'no') - def test_get_all_false_value_all_tenants( - self, false_value, mock_backuplist): - result = self.api.get_all(self.ctxt, {'all_tenants': false_value, - 'key': 'value'}) - self.assertFalse(mock_backuplist.get_all.called) - self.assertEqual(mock_backuplist.get_all_by_project.return_value, - result) - mock_backuplist.get_all_by_project.assert_called_once_with( - self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, - None, None, None) - - @mock.patch.object(objects, 'BackupList') - @ddt.data(True, 'true', '1', 1, 'yes') - def test_get_all_true_value_all_tenants( - self, true_value, mock_backuplist): - result = self.api.get_all(self.ctxt, {'all_tenants': true_value, - 'key': 'value'}) - self.assertFalse(mock_backuplist.get_all_by_project.called) - self.assertEqual(mock_backuplist.get_all.return_value, - result) - mock_backuplist.get_all.assert_called_once_with( - self.ctxt, {'key': 'value'}, None, None, None, None, None) - - @mock.patch.object(objects, 'BackupList') - def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): - ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) - result = self.api.get_all(ctxt, {'all_tenants': '1', - 'key': 'value'}) - self.assertFalse(mock_backuplist.get_all.called) - self.assertEqual(mock_backuplist.get_all_by_project.return_value, - result) - mock_backuplist.get_all_by_project.assert_called_once_with( - ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, - None) - - @mock.patch.object(api.API, '_get_available_backup_service_host', - return_value='fake_host') - @mock.patch.object(db, 'backup_create', - side_effect=db_exc.DBError()) - def test_create_when_failed_to_create_backup_object( - self, mock_create, - mock_get_service): - - # Create volume in admin context - volume_id = utils.create_volume(self.ctxt)['id'] - - # Will try to backup from a different context - new_context = copy.copy(self.ctxt) - new_context.user_id = uuid.uuid4() - new_context.project_id = uuid.uuid4() - - # The opposite side of this test case is a "NotImplementedError: - # Cannot load 'id' in the base class" being raised. - # More detailed, in the try clause, if backup.create() failed - # with DB exception, backup.id won't be assigned. However, - # in the except clause, backup.destroy() is invoked to do cleanup, - # which internally tries to access backup.id. - self.assertRaises(db_exc.DBError, self.api.create, - context=new_context, - name="test_backup", - description="test backup description", - volume_id=volume_id, - container='volumebackups') - - @mock.patch.object(api.API, '_get_available_backup_service_host', - return_value='fake_host') - @mock.patch.object(objects.Backup, '__init__', - side_effect=exception.InvalidInput( - reason='Failed to new')) - def test_create_when_failed_to_new_backup_object(self, mock_new, - mock_get_service): - volume_id = utils.create_volume(self.ctxt)['id'] - - # The opposite side of this test case is that a "UnboundLocalError: - # local variable 'backup' referenced before assignment" is raised. - # More detailed, in the try clause, backup = objects.Backup(...) - # raises exception, so 'backup' is not assigned. But in the except - # clause, 'backup' is referenced to invoke cleanup methods. - self.assertRaises(exception.InvalidInput, self.api.create, - context=self.ctxt, - name="test_backup", - description="test backup description", - volume_id=volume_id, - container='volumebackups') - - @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') - @mock.patch('cinder.backup.api.API._is_backup_service_enabled') - def test_create_backup_in_same_host(self, mock_is_enable, - mock_create): - self.override_config('backup_use_same_host', True) - mock_is_enable.return_value = True - self.ctxt.user_id = 'fake_user' - self.ctxt.project_id = 'fake_project' - volume_id = self._create_volume_db_entry(status='available', - host='testhost#lvm', - size=1) - backup = self.api.create(self.ctxt, None, None, volume_id, None) - self.assertEqual('testhost', backup.host) - - @mock.patch.object(api.API, '_get_available_backup_service_host', - return_value='fake_host') - @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') - def test_create_backup_from_snapshot_with_volume_in_use( - self, mock_create, mock_get_service): - self.ctxt.user_id = 'fake_user' - self.ctxt.project_id = 'fake_project' - volume_id = self._create_volume_db_entry(status='in-use') - snapshot = self._create_snapshot_db_entry(volume_id=volume_id) - backup = self.api.create(self.ctxt, None, None, volume_id, None, - snapshot_id=snapshot.id) - - self.assertEqual(fields.BackupStatus.CREATING, backup.status) - volume = objects.Volume.get_by_id(self.ctxt, volume_id) - snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) - self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) - self.assertEqual('in-use', volume.status) - - @mock.patch.object(api.API, '_get_available_backup_service_host', - return_value='fake_host') - @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') - @ddt.data(True, False) - def test_create_backup_resource_status(self, is_snapshot, mock_create, - mock_get_service): - self.ctxt.user_id = 'fake_user' - self.ctxt.project_id = 'fake_project' - volume_id = self._create_volume_db_entry(status='available') - snapshot = self._create_snapshot_db_entry(volume_id=volume_id) - if is_snapshot: - self.api.create(self.ctxt, None, None, volume_id, None, - snapshot_id=snapshot.id) - volume = objects.Volume.get_by_id(self.ctxt, volume_id) - snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) - - self.assertEqual('backing-up', snapshot.status) - self.assertEqual('available', volume.status) - else: - self.api.create(self.ctxt, None, None, volume_id, None) - volume = objects.Volume.get_by_id(self.ctxt, volume_id) - snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) - - self.assertEqual('available', snapshot.status) - self.assertEqual('backing-up', volume.status) - - @mock.patch('cinder.backup.api.API._get_available_backup_service_host') - @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') - def test_restore_volume(self, - mock_rpcapi_restore, - mock_get_backup_host): - volume_id = self._create_volume_db_entry(status='available', - size=1) - backup = self._create_backup_db_entry(size=1, - status='available') - mock_get_backup_host.return_value = 'testhost' - self.api.restore(self.ctxt, backup.id, volume_id) - backup = objects.Backup.get_by_id(self.ctxt, backup.id) - self.assertEqual(volume_id, backup.restore_volume_id) diff --git a/cinder/tests/unit/backup/test_rpcapi.py b/cinder/tests/unit/backup/test_rpcapi.py deleted file mode 100644 index 5d6c6f88f..000000000 --- a/cinder/tests/unit/backup/test_rpcapi.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for cinder.backup.rpcapi -""" - -import mock - -from cinder.backup import rpcapi as backup_rpcapi -from cinder import objects -from cinder import test -from cinder.tests.unit.backup import fake_backup -from cinder.tests.unit import fake_constants as fake - - -class BackupRPCAPITestCase(test.RPCAPITestCase): - def setUp(self): - super(BackupRPCAPITestCase, self).setUp() - self.rpcapi = backup_rpcapi.BackupAPI - self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) - - def test_create_backup(self): - self._test_rpc_api('create_backup', - rpc_method='cast', - server=self.fake_backup_obj.host, - backup=self.fake_backup_obj) - - def test_restore_backup(self): - self._test_rpc_api('restore_backup', - rpc_method='cast', - server='fake_volume_host', - volume_host='fake_volume_host', - backup=self.fake_backup_obj, - volume_id=fake.VOLUME_ID) - - def test_delete_backup(self): - self._test_rpc_api('delete_backup', - rpc_method='cast', - server=self.fake_backup_obj.host, - backup=self.fake_backup_obj) - - def test_export_record(self): - self._test_rpc_api('export_record', - rpc_method='call', - server=self.fake_backup_obj.host, - backup=self.fake_backup_obj, - retval={'backup_service': 'fake_backup_driver', - 'backup_url': 'http://fake_url'}) - - def test_import_record(self): - self._test_rpc_api('import_record', - rpc_method='cast', - server='fake_volume_host', - host='fake_volume_host', - backup=self.fake_backup_obj, - backup_service='fake_service', - backup_url='fake_url', - backup_hosts=['fake_host1', 'fake_host2']) - - def test_reset_status(self): - self._test_rpc_api('reset_status', - rpc_method='cast', - server=self.fake_backup_obj.host, - backup=self.fake_backup_obj, - status='error') - - def test_check_support_to_force_delete(self): - self._test_rpc_api('check_support_to_force_delete', - rpc_method='call', - server='fake_volume_host', - host='fake_volume_host', - retval=True) - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_set_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('set_log_levels', - rpc_method='cast', - server=service.host, - service=service, - log_request='log_request', - version='2.1') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_get_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('get_log_levels', - rpc_method='call', - server=service.host, - service=service, - log_request='log_request', - version='2.1') diff --git a/cinder/tests/unit/brick/__init__.py b/cinder/tests/unit/brick/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/brick/fake_lvm.py b/cinder/tests/unit/brick/fake_lvm.py deleted file mode 100644 index c82408e3d..000000000 --- a/cinder/tests/unit/brick/fake_lvm.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class FakeBrickLVM(object): - """Logs and records calls, for unit tests.""" - def __init__(self, vg_name, create, pv_list, vtype, execute=None): - super(FakeBrickLVM, self).__init__() - self.vg_size = '5.00' - self.vg_free_space = '5.00' - self.vg_name = vg_name - - def supports_thin_provisioning(): - return False - - def get_volumes(self): - return ['fake-volume'] - - def get_volume(self, name): - return ['name'] - - def get_all_physical_volumes(vg_name=None): - return [] - - def update_volume_group_info(self): - pass - - def create_thin_pool(self, name=None, size_str=0): - pass - - def create_volume(self, name, size_str, lv_type='default', mirror_count=0): - pass - - def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): - pass - - def delete(self, name): - pass - - def revert(self, snapshot_name): - pass - - def deactivate_lv(self, name): - pass - - def lv_has_snapshot(self, name): - return False - - def activate_lv(self, lv, is_snapshot=False, permanent=False): - pass - - def rename_volume(self, lv_name, new_name): - pass diff --git a/cinder/tests/unit/brick/test_brick_lvm.py b/cinder/tests/unit/brick/test_brick_lvm.py deleted file mode 100644 index e0bec3974..000000000 --- a/cinder/tests/unit/brick/test_brick_lvm.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import ddt -import mock -from oslo_concurrency import processutils - -from cinder.brick.local_dev import lvm as brick -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf - - -@ddt.ddt -class BrickLvmTestCase(test.TestCase): - def setUp(self): - if not hasattr(self, 'configuration'): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.lvm_suppress_fd_warnings = False - self.configuration.volume_group_name = 'fake-vg' - super(BrickLvmTestCase, self).setUp() - - self.mock_object(processutils, 'execute', self.fake_execute) - self.vg = brick.LVM( - self.configuration.volume_group_name, - 'sudo', - False, None, - 'default', - self.fake_execute, - suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings) - - def failed_fake_execute(obj, *cmd, **kwargs): - return ("\n", "fake-error") - - def fake_pretend_lvm_version(obj, *cmd, **kwargs): - return (" LVM version: 2.03.00 (2012-03-06)\n", "") - - def fake_old_lvm_version(obj, *cmd, **kwargs): - # Does not support thin prov or snap activation - return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") - - def fake_customised_lvm_version(obj, *cmd, **kwargs): - return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") - - def fake_execute(obj, *cmd, **kwargs): # noqa - if obj.configuration.lvm_suppress_fd_warnings: - _lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, ' - else: - _lvm_prefix = 'env, LC_ALL=C, ' - - cmd_string = ', '.join(cmd) - data = "\n" - if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' == - cmd_string): - data = " fake-vg\n" - data += " some-other-vg\n" - elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' == - cmd_string): - data = " fake-vg\n" - elif _lvm_prefix + 'vgs, --version' in cmd_string: - data = " LVM version: 2.02.103(2) (2012-03-06)\n" - elif(_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in - cmd_string): - data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" - elif(_lvm_prefix + 'vgs, --noheadings, --unit=g, ' - '-o, name,size,free,lv_count,uuid, ' - '--separator, :, --nosuffix' in cmd_string): - data = (" test-prov-cap-vg-unit:10.00:10.00:0:" - "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") - if 'test-prov-cap-vg-unit' in cmd_string: - return (data, "") - data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" - "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") - if 'test-prov-cap-vg-no-unit' in cmd_string: - return (data, "") - data = " fake-vg:10.00:10.00:0:"\ - "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" - if 'fake-vg' in cmd_string: - return (data, "") - data += " fake-vg-2:10.00:10.00:0:"\ - "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" - data += " fake-vg-3:10.00:10.00:0:"\ - "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" - elif (_lvm_prefix + 'lvs, --noheadings, ' - '--unit=g, -o, vg_name,name,size, --nosuffix, ' - 'fake-vg/lv-nothere' in cmd_string): - raise processutils.ProcessExecutionError( - stderr="One or more specified logical volume(s) not found.") - elif (_lvm_prefix + 'lvs, --noheadings, ' - '--unit=g, -o, vg_name,name,size, --nosuffix, ' - 'fake-vg/lv-newerror' in cmd_string): - raise processutils.ProcessExecutionError( - stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") - elif (_lvm_prefix + 'lvs, --noheadings, ' - '--unit=g, -o, vg_name,name,size' in cmd_string): - if 'fake-unknown' in cmd_string: - raise processutils.ProcessExecutionError( - stderr="One or more volume(s) not found." - ) - if 'test-prov-cap-vg-unit' in cmd_string: - data = " fake-vg test-prov-cap-pool-unit 9.50g\n" - data += " fake-vg fake-volume-1 1.00g\n" - data += " fake-vg fake-volume-2 2.00g\n" - elif 'test-prov-cap-vg-no-unit' in cmd_string: - data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" - data += " fake-vg fake-volume-1 1.00\n" - data += " fake-vg fake-volume-2 2.00\n" - elif 'test-found-lv-name' in cmd_string: - data = " fake-vg test-found-lv-name 9.50\n" - else: - data = " fake-vg fake-1 1.00g\n" - data += " fake-vg fake-2 1.00g\n" - elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in - cmd_string): - if 'test-volumes' in cmd_string: - data = ' wi-a-' - elif 'snapshot' in cmd_string: - data = ' swi-a-s--' - elif 'open' in cmd_string: - data = ' -wi-ao---' - else: - data = ' owi-a-' - elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Origin' in - cmd_string): - if 'snapshot' in cmd_string: - data = ' fake-volume-1' - else: - data = ' ' - elif _lvm_prefix + 'pvs, --noheadings' in cmd_string: - data = " fake-vg|/dev/sda|10.00|1.00\n" - data += " fake-vg|/dev/sdb|10.00|1.00\n" - data += " fake-vg|/dev/sdc|10.00|8.99\n" - data += " fake-vg-2|/dev/sdd|10.00|9.99\n" - if '--ignoreskippedcluster' not in cmd_string: - raise processutils.ProcessExecutionError( - stderr="Skipping clustered volume group", - stdout=data, - exit_code=5 - ) - elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \ - ', -o, size,data_percent, --separator, :' in cmd_string: - if 'test-prov-cap-pool' in cmd_string: - data = " 9.5:20\n" - else: - data = " 9:12\n" - elif 'lvcreate, -T, -L, ' in cmd_string: - pass - elif 'lvcreate, -T, -V, ' in cmd_string: - pass - elif 'lvcreate, -n, ' in cmd_string: - pass - elif 'lvcreate, --name, ' in cmd_string: - pass - elif 'lvextend, -L, ' in cmd_string: - pass - else: - raise AssertionError('unexpected command called: %s' % cmd_string) - - return (data, "") - - def test_create_lv_snapshot(self): - self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1')) - - with mock.patch.object(self.vg, 'get_volume', return_value=None): - try: - self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent') - except exception.VolumeDeviceNotFound as e: - self.assertEqual('fake-non-existent', e.kwargs['device']) - else: - self.fail("Exception not raised") - - def test_vg_exists(self): - self.assertTrue(self.vg._vg_exists()) - - def test_get_all_volumes(self): - out = self.vg.get_volumes() - - self.assertEqual('fake-1', out[0]['name']) - self.assertEqual('1.00g', out[0]['size']) - self.assertEqual('fake-vg', out[0]['vg']) - - def test_get_volume(self): - self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name']) - - def test_get_volume_none(self): - self.assertIsNone(self.vg.get_volume('fake-unknown')) - - def test_get_lv_info_notfound(self): - # lv-nothere will raise lvm < 2.102.112 exception - self.assertEqual( - [], - self.vg.get_lv_info( - 'sudo', vg_name='fake-vg', lv_name='lv-nothere') - ) - # lv-newerror will raise lvm > 2.102.112 exception - self.assertEqual( - [], - self.vg.get_lv_info( - 'sudo', vg_name='fake-vg', lv_name='lv-newerror') - ) - - def test_get_lv_info_found(self): - lv_info = [{'size': '9.50', 'name': 'test-found-lv-name', - 'vg': 'fake-vg'}] - self.assertEqual( - lv_info, - self.vg.get_lv_info( - 'sudo', vg_name='fake-vg', - lv_name='test-found-lv-name') - ) - - def test_get_lv_info_no_lv_name(self): - lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, - {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}] - self.assertEqual( - lv_info, - self.vg.get_lv_info( - 'sudo', vg_name='fake-vg') - ) - - def test_get_all_physical_volumes(self): - # Filtered VG version - pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg') - self.assertEqual(3, len(pvs)) - - # Non-Filtered, all VG's - pvs = self.vg.get_all_physical_volumes('sudo') - self.assertEqual(4, len(pvs)) - - def test_get_volume_groups(self): - self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo'))) - self.assertEqual(1, - len(self.vg.get_all_volume_groups('sudo', 'fake-vg'))) - - def test_thin_support(self): - # lvm.supports_thin() is a static method and doesn't - # use the self._executor fake we pass in on init - # so we need to stub processutils.execute appropriately - - self.assertTrue(self.vg.supports_thin_provisioning('sudo')) - - with mock.patch.object(processutils, 'execute', - self.fake_pretend_lvm_version): - self.assertTrue(self.vg.supports_thin_provisioning('sudo')) - - with mock.patch.object(processutils, 'execute', - self.fake_old_lvm_version): - self.assertFalse(self.vg.supports_thin_provisioning('sudo')) - - with mock.patch.object(processutils, 'execute', - self.fake_customised_lvm_version): - self.assertTrue(self.vg.supports_thin_provisioning('sudo')) - - def test_snapshot_lv_activate_support(self): - self.vg._supports_snapshot_lv_activation = None - self.assertTrue(self.vg.supports_snapshot_lv_activation) - - self.vg._supports_snapshot_lv_activation = None - with mock.patch.object(processutils, 'execute', - self.fake_old_lvm_version): - self.assertFalse(self.vg.supports_snapshot_lv_activation) - - self.vg._supports_snapshot_lv_activation = None - - def test_lvchange_ignskipact_support_yes(self): - """Tests if lvchange -K is available via a lvm2 version check.""" - - self.vg._supports_lvchange_ignoreskipactivation = None - with mock.patch.object(processutils, 'execute', - self.fake_pretend_lvm_version): - self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation) - - self.vg._supports_lvchange_ignoreskipactivation = None - with mock.patch.object(processutils, 'execute', - self.fake_old_lvm_version): - self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation) - - self.vg._supports_lvchange_ignoreskipactivation = None - - def test_pvs_ignoreskippedcluster_support(self): - """Tests if lvm support ignoreskippedcluster option.""" - - brick.LVM._supports_pvs_ignoreskippedcluster = None - with mock.patch.object(processutils, 'execute', - self.fake_pretend_lvm_version): - self.assertTrue(brick.LVM.supports_pvs_ignoreskippedcluster( - 'sudo')) - - brick.LVM._supports_pvs_ignoreskippedcluster = None - with mock.patch.object(processutils, 'execute', - self.fake_old_lvm_version): - self.assertFalse(brick.LVM.supports_pvs_ignoreskippedcluster( - 'sudo')) - - brick.LVM._supports_pvs_ignoreskippedcluster = None - - def test_thin_pool_creation(self): - - # The size of fake-vg volume group is 10g, so the calculated thin - # pool size should be 9.5g (95% of 10g). - self.assertEqual("9.5g", self.vg.create_thin_pool()) - - # Passing a size parameter should result in a thin pool of that exact - # size. - for size in ("1g", "1.2g", "1.75g"): - self.assertEqual(size, self.vg.create_thin_pool(size_str=size)) - - def test_thin_pool_provisioned_capacity(self): - self.vg.vg_thin_pool = "test-prov-cap-pool-unit" - self.vg.vg_name = 'test-prov-cap-vg-unit' - self.assertEqual( - "9.5g", - self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) - self.assertEqual("9.50", self.vg.vg_thin_pool_size) - self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) - self.assertEqual(3.0, self.vg.vg_provisioned_capacity) - - self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit" - self.vg.vg_name = 'test-prov-cap-vg-no-unit' - self.assertEqual( - "9.5g", - self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) - self.assertEqual("9.50", self.vg.vg_thin_pool_size) - self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) - self.assertEqual(3.0, self.vg.vg_provisioned_capacity) - - def test_thin_pool_free_space(self): - # The size of fake-vg-pool is 9g and the allocated data sums up to - # 12% so the calculated free space should be 7.92 - self.assertEqual(float("7.92"), - self.vg._get_thin_pool_free_space("fake-vg", - "fake-vg-pool")) - - def test_volume_create_after_thin_creation(self): - """Test self.vg.vg_thin_pool is set to pool_name - - See bug #1220286 for more info. - """ - - vg_name = "vg-name" - pool_name = vg_name + "-pool" - pool_path = "%s/%s" % (vg_name, pool_name) - - def executor(obj, *cmd, **kwargs): - self.assertEqual(pool_path, cmd[-1]) - - self.vg._executor = executor - self.vg.create_thin_pool(pool_name, "1G") - self.vg.create_volume("test", "1G", lv_type='thin') - - self.assertEqual(pool_name, self.vg.vg_thin_pool) - - def test_volume_create_when_executor_failed(self): - def fail(*args, **kwargs): - raise processutils.ProcessExecutionError() - self.vg._execute = fail - - with mock.patch.object(self.vg, 'get_all_volume_groups') as m_gavg: - self.assertRaises( - processutils.ProcessExecutionError, - self.vg.create_volume, "test", "1G" - ) - m_gavg.assert_called() - - def test_lv_has_snapshot(self): - self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) - self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) - - def test_lv_is_snapshot(self): - self.assertTrue(self.vg.lv_is_snapshot('fake-snapshot')) - self.assertFalse(self.vg.lv_is_snapshot('test-volumes')) - - def test_lv_is_open(self): - self.assertTrue(self.vg.lv_is_open('fake-open')) - self.assertFalse(self.vg.lv_is_open('fake-snapshot')) - - def test_lv_get_origin(self): - self.assertEqual('fake-volume-1', - self.vg.lv_get_origin('fake-snapshot')) - self.assertFalse(None, self.vg.lv_get_origin('test-volumes')) - - def test_activate_lv(self): - with mock.patch.object(self.vg, '_execute'): - self.vg._supports_lvchange_ignoreskipactivation = True - - self.vg._execute('lvchange', '-a', 'y', '--yes', '-K', - 'fake-vg/my-lv', - root_helper='sudo', run_as_root=True) - - self.vg.activate_lv('my-lv') - - def test_get_mirrored_available_capacity(self): - self.assertEqual(2.0, self.vg.vg_mirror_free_space(1)) - - @ddt.data(True, False) - def test_lv_extend(self, has_snapshot): - with mock.patch.object(self.vg, '_execute'): - with mock.patch.object(self.vg, 'lv_has_snapshot'): - self.vg.deactivate_lv = mock.MagicMock() - self.vg.activate_lv = mock.MagicMock() - - self.vg.lv_has_snapshot.return_value = has_snapshot - self.vg.extend_volume("test", "2G") - - self.vg.lv_has_snapshot.assert_called_once_with("test") - if has_snapshot: - self.vg.activate_lv.assert_called_once_with("test") - self.vg.deactivate_lv.assert_called_once_with("test") - else: - self.vg.activate_lv.assert_not_called() - self.vg.deactivate_lv.assert_not_called() - - def test_lv_deactivate(self): - with mock.patch.object(self.vg, '_execute'): - is_active_mock = mock.Mock() - is_active_mock.return_value = False - self.vg._lv_is_active = is_active_mock - self.vg.create_volume('test', '1G') - self.vg.deactivate_lv('test') - - @mock.patch('time.sleep') - def test_lv_deactivate_timeout(self, _mock_sleep): - with mock.patch.object(self.vg, '_execute'): - is_active_mock = mock.Mock() - is_active_mock.return_value = True - self.vg._lv_is_active = is_active_mock - self.vg.create_volume('test', '1G') - self.assertRaises(exception.VolumeNotDeactivated, - self.vg.deactivate_lv, 'test') - - -class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase): - def setUp(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.lvm_suppress_fd_warnings = True - super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp() diff --git a/cinder/tests/unit/cast_as_call.py b/cinder/tests/unit/cast_as_call.py deleted file mode 100644 index caf47d442..000000000 --- a/cinder/tests/unit/cast_as_call.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - - -def mock_cast_as_call(obj=None): - """Use this to mock `cast` as calls. - - :param obj: Either an instance of RPCClient - or an instance of _Context. - """ - orig_prepare = obj.prepare - - def prepare(*args, **kwargs): - cctxt = orig_prepare(*args, **kwargs) - mock_cast_as_call(obj=cctxt) # woo, recurse! - return cctxt - - prepare_patch = mock.patch.object(obj, 'prepare').start() - prepare_patch.side_effect = prepare - - cast_patch = mock.patch.object(obj, 'cast').start() - cast_patch.side_effect = obj.call diff --git a/cinder/tests/unit/compute/__init__.py b/cinder/tests/unit/compute/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/compute/test_nova.py b/cinder/tests/unit/compute/test_nova.py deleted file mode 100644 index aa2eae842..000000000 --- a/cinder/tests/unit/compute/test_nova.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder.compute import nova -from cinder import context -from cinder import test -from keystoneauth1 import loading as ks_loading -from novaclient import exceptions as nova_exceptions -from oslo_config import cfg - -CONF = cfg.CONF - - -class NovaClientTestCase(test.TestCase): - def setUp(self): - super(NovaClientTestCase, self).setUp() - - # Register the Password auth plugin options, - # so we can use CONF.set_override - # reset() first, otherwise already registered CLI options will - # prevent unregister in tearDown() - # Use CONF.set_override(), because we'll unregister the opts, - # no need (and not possible) to cleanup. - CONF.reset() - self.password_opts = \ - ks_loading.get_auth_plugin_conf_options('password') - CONF.register_opts(self.password_opts, group='nova') - CONF.set_override('auth_url', - 'http://keystonehost:5000', - group='nova') - CONF.set_override('username', 'adminuser', group='nova') - CONF.set_override('password', 'strongpassword', group='nova') - self.ctx = context.RequestContext('regularuser', 'e3f0833dc08b4cea', - auth_token='token', is_admin=False) - self.ctx.service_catalog = \ - [{'type': 'compute', 'name': 'nova', 'endpoints': - [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, - {'type': 'identity', 'name': 'keystone', 'endpoints': - [{'publicURL': 'http://keystonehostfromsc:5000/v3'}]}] - - self.override_config('auth_type', 'password', group='nova') - self.override_config('cafile', 'my.ca', group='nova') - - def tearDown(self): - super(NovaClientTestCase, self).tearDown() - - CONF.unregister_opts(self.password_opts, group='nova') - - @mock.patch('novaclient.api_versions.APIVersion') - @mock.patch('novaclient.client.Client') - @mock.patch('keystoneauth1.identity.Token') - @mock.patch('keystoneauth1.session.Session') - def test_nova_client_regular(self, p_session, p_token_plugin, p_client, - p_api_version): - - self.override_config('token_auth_url', - 'http://keystonehost:5000', - group='nova') - nova.novaclient(self.ctx) - p_token_plugin.assert_called_once_with( - auth_url='http://keystonehost:5000', - token='token', project_name=None, project_domain_id=None - ) - p_client.assert_called_once_with( - p_api_version(nova.NOVA_API_VERSION), - session=p_session.return_value, region_name=None, - insecure=False, endpoint_type='public', cacert='my.ca', - global_request_id=self.ctx.request_id, - timeout=None, extensions=nova.nova_extensions) - - @mock.patch('novaclient.api_versions.APIVersion') - @mock.patch('novaclient.client.Client') - @mock.patch('keystoneauth1.identity.Token') - @mock.patch('keystoneauth1.session.Session') - def test_nova_client_regular_service_catalog(self, p_session, - p_token_plugin, p_client, - p_api_version): - - nova.novaclient(self.ctx) - p_token_plugin.assert_called_once_with( - auth_url='http://keystonehostfromsc:5000/v3', - token='token', project_name=None, project_domain_id=None - ) - p_client.assert_called_once_with( - p_api_version(nova.NOVA_API_VERSION), - session=p_session.return_value, region_name=None, - insecure=False, endpoint_type='public', cacert='my.ca', - global_request_id=self.ctx.request_id, - timeout=None, extensions=nova.nova_extensions) - - @mock.patch('novaclient.api_versions.APIVersion') - @mock.patch('novaclient.client.Client') - @mock.patch('keystoneauth1.identity.Password') - @mock.patch('keystoneauth1.session.Session') - def test_nova_client_privileged_user(self, p_session, p_password_plugin, - p_client, p_api_version): - - nova.novaclient(self.ctx, privileged_user=True) - p_password_plugin.assert_called_once_with( - auth_url='http://keystonehost:5000', default_domain_id=None, - default_domain_name=None, domain_id=None, domain_name=None, - password='strongpassword', project_domain_id=None, - project_domain_name=None, project_id=None, project_name=None, - trust_id=None, user_domain_id=None, user_domain_name=None, - user_id=None, username='adminuser' - ) - p_client.assert_called_once_with( - p_api_version(nova.NOVA_API_VERSION), - session=p_session.return_value, region_name=None, - insecure=False, endpoint_type='public', cacert='my.ca', - global_request_id=self.ctx.request_id, - timeout=None, extensions=nova.nova_extensions) - - @mock.patch('novaclient.api_versions.APIVersion') - @mock.patch('novaclient.client.Client') - @mock.patch('keystoneauth1.identity.Password') - @mock.patch('keystoneauth1.session.Session') - def test_nova_client_privileged_user_custom_auth_url(self, p_session, - p_password_plugin, - p_client, - p_api_version): - - CONF.set_override('auth_url', - 'http://privatekeystonehost:5000', - group='nova') - nova.novaclient(self.ctx, privileged_user=True) - p_password_plugin.assert_called_once_with( - auth_url='http://privatekeystonehost:5000', default_domain_id=None, - default_domain_name=None, domain_id=None, domain_name=None, - password='strongpassword', project_domain_id=None, - project_domain_name=None, project_id=None, project_name=None, - trust_id=None, user_domain_id=None, user_domain_name=None, - user_id=None, username='adminuser' - ) - p_client.assert_called_once_with( - p_api_version(nova.NOVA_API_VERSION), - session=p_session.return_value, region_name=None, - insecure=False, endpoint_type='public', cacert='my.ca', - global_request_id=self.ctx.request_id, - timeout=None, extensions=nova.nova_extensions) - - @mock.patch('novaclient.api_versions.APIVersion') - @mock.patch('novaclient.client.Client') - @mock.patch('keystoneauth1.identity.Password') - @mock.patch('keystoneauth1.session.Session') - def test_nova_client_custom_region(self, p_session, p_password_plugin, - p_client, p_api_version): - - CONF.set_override('region_name', 'farfaraway', group='nova') - nova.novaclient(self.ctx, privileged_user=True) - p_password_plugin.assert_called_once_with( - auth_url='http://keystonehost:5000', default_domain_id=None, - default_domain_name=None, domain_id=None, domain_name=None, - password='strongpassword', project_domain_id=None, - project_domain_name=None, project_id=None, project_name=None, - trust_id=None, user_domain_id=None, user_domain_name=None, - user_id=None, username='adminuser' - ) - p_client.assert_called_once_with( - p_api_version(nova.NOVA_API_VERSION), - session=p_session.return_value, region_name='farfaraway', - insecure=False, endpoint_type='public', cacert='my.ca', - global_request_id=self.ctx.request_id, - timeout=None, extensions=nova.nova_extensions) - - def test_novaclient_exceptions(self): - # This is to prevent regression if exceptions are - # removed from novaclient since the service catalog - # code does not have thorough tests. - self.assertTrue(hasattr(nova_exceptions, 'EndpointNotFound')) - - -class FakeNovaClient(object): - class ServerExternalEvents(object): - def __getattr__(self, item): - return None - - class Volumes(object): - def __getattr__(self, item): - return None - - def __init__(self): - self.server_external_events = self.ServerExternalEvents() - self.volumes = self.Volumes() - - def create_volume_snapshot(self, *args, **kwargs): - pass - - def delete_volume_snapshot(self, *args, **kwargs): - pass - - -class NovaApiTestCase(test.TestCase): - def setUp(self): - super(NovaApiTestCase, self).setUp() - - self.api = nova.API() - self.novaclient = FakeNovaClient() - self.ctx = context.get_admin_context() - - def test_update_server_volume(self): - with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ - mock.patch.object(self.novaclient.volumes, - 'update_server_volume') as \ - mock_update_server_volume: - mock_novaclient.return_value = self.novaclient - - self.api.update_server_volume(self.ctx, 'server_id', - 'attach_id', 'new_volume_id') - - mock_novaclient.assert_called_once_with(self.ctx, - privileged_user=True) - mock_update_server_volume.assert_called_once_with( - 'server_id', - 'attach_id', - 'new_volume_id' - ) - - def test_extend_volume(self): - server_ids = ['server-id-1', 'server-id-2'] - with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ - mock.patch.object(self.novaclient.server_external_events, - 'create') as mock_create_event: - mock_novaclient.return_value = self.novaclient - - self.api.extend_volume(self.ctx, server_ids, 'volume_id') - - mock_novaclient.assert_called_once_with(self.ctx, - privileged_user=True, - api_version='2.51') - mock_create_event.assert_called_once_with([ - {'name': 'volume-extended', - 'server_uuid': 'server-id-1', - 'tag': 'volume_id'}, - {'name': 'volume-extended', - 'server_uuid': 'server-id-2', - 'tag': 'volume_id'}, - ]) diff --git a/cinder/tests/unit/conf_fixture.py b/cinder/tests/unit/conf_fixture.py deleted file mode 100644 index 22464ee18..000000000 --- a/cinder/tests/unit/conf_fixture.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg - -from cinder.volume import configuration - -CONF = cfg.CONF - -CONF.import_opt('policy_file', 'cinder.policy', group='oslo_policy') -CONF.import_opt('volume_driver', 'cinder.volume.manager', - group=configuration.SHARED_CONF_GROUP) -CONF.import_opt('backup_driver', 'cinder.backup.manager') -CONF.import_opt('api_class', 'cinder.keymgr', group='key_manager') -CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager') -CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') - -def_vol_type = 'fake_vol_type' - - -def set_defaults(conf): - conf.set_default('default_volume_type', def_vol_type) - conf.set_default('volume_driver', - 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', - group=configuration.SHARED_CONF_GROUP) - conf.set_default('iscsi_helper', 'fake') - conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake') - conf.set_default('connection', 'sqlite://', group='database') - conf.set_default('sqlite_synchronous', False, group='database') - conf.set_default('policy_file', 'cinder.tests.unit/policy.json', - group='oslo_policy') - conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service') - conf.set_default('api_class', - 'cinder.keymgr.conf_key_mgr.ConfKeyManager', - group='key_manager') - conf.set_default('fixed_key', default='0' * 64, group='key_manager') - conf.set_default('scheduler_driver', - 'cinder.scheduler.filter_scheduler.FilterScheduler') - conf.set_default('state_path', os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', '..'))) - conf.set_default('policy_dirs', [], group='oslo_policy') - # This is where we don't authenticate - conf.set_default('auth_strategy', 'noauth') - conf.set_default('auth_uri', 'fake', 'keystone_authtoken') diff --git a/cinder/tests/unit/consistencygroup/__init__.py b/cinder/tests/unit/consistencygroup/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/consistencygroup/fake_cgsnapshot.py b/cinder/tests/unit/consistencygroup/fake_cgsnapshot.py deleted file mode 100644 index 598b914c1..000000000 --- a/cinder/tests/unit/consistencygroup/fake_cgsnapshot.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016 EMC Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.tests.unit import fake_constants as fake - - -def fake_db_cgsnapshot(**updates): - db_values = { - 'id': fake.CGSNAPSHOT_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - } - for name, field in objects.CGSnapshot.fields.items(): - if name in db_values: - continue - if field.nullable: - db_values[name] = None - elif field.default != fields.UnspecifiedDefault: - db_values[name] = field.default - else: - raise Exception('fake_db_snapshot needs help with %s' % - name) - - if updates: - db_values.update(updates) - - return db_values - - -def fake_cgsnapshot_obj(context, **updates): - expected_attrs = updates.pop('expected_attrs', None) - return objects.CGSnapshot._from_db_object(context, - objects.CGSnapshot(), - fake_db_cgsnapshot( - **updates), - expected_attrs=expected_attrs) diff --git a/cinder/tests/unit/consistencygroup/fake_consistencygroup.py b/cinder/tests/unit/consistencygroup/fake_consistencygroup.py deleted file mode 100644 index ad81e8c76..000000000 --- a/cinder/tests/unit/consistencygroup/fake_consistencygroup.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.tests.unit import fake_constants as fake - - -def fake_db_consistencygroup(**updates): - db_values = { - 'id': fake.CONSISTENCY_GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'FakeHost', - 'volumes': [], - } - for name, field in objects.ConsistencyGroup.fields.items(): - if name in db_values: - continue - if field.nullable: - db_values[name] = None - elif field.default != fields.UnspecifiedDefault: - db_values[name] = field.default - else: - raise Exception('fake_db_consistencygroup needs help with %s' % - name) - - if updates: - db_values.update(updates) - - return db_values - - -def fake_consistencyobject_obj(context, **updates): - return objects.ConsistencyGroup._from_db_object(context, - objects.ConsistencyGroup(), - fake_db_consistencygroup( - **updates)) diff --git a/cinder/tests/unit/db/__init__.py b/cinder/tests/unit/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/db/test_cluster.py b/cinder/tests/unit/db/test_cluster.py deleted file mode 100644 index 5350da36a..000000000 --- a/cinder/tests/unit/db/test_cluster.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for cluster table related operations.""" - -import mock -from oslo_config import cfg -from sqlalchemy.orm import exc - -from cinder import db -from cinder import exception -from cinder.tests.unit import test_db_api -from cinder.tests.unit import utils - - -CONF = cfg.CONF - - -class ClusterTestCase(test_db_api.BaseTest): - """Unit tests for cinder.db.api.cluster_*.""" - - def test_cluster_create_and_get(self): - """Basic cluster creation test.""" - values = utils.default_cluster_values() - cluster = db.cluster_create(self.ctxt, values) - values['last_heartbeat'] = None - self.assertEqual(0, cluster.race_preventer) - for k, v in values.items(): - self.assertEqual(v, getattr(cluster, k)) - - db_cluster = db.cluster_get(self.ctxt, cluster.id, - services_summary=True) - for k, v in values.items(): - self.assertEqual(v, getattr(db_cluster, k)) - self.assertEqual(0, db_cluster.race_preventer) - - def test_cluster_create_cfg_disabled(self): - """Test that create uses enable_new_services configuration option.""" - self.override_config('enable_new_services', False) - cluster = utils.create_cluster(self.ctxt, disabled=None) - self.assertTrue(cluster.disabled) - - def test_cluster_create_disabled_preference(self): - """Test that provided disabled value has highest priority on create.""" - self.override_config('enable_new_services', False) - cluster = utils.create_cluster(self.ctxt) - self.assertFalse(cluster.disabled) - - def test_cluster_create_duplicate(self): - """Test that unique constraints are working. - - To remove potential races on creation we have a constraint set on name - and race_preventer fields, and we set value on creation to 0, so 2 - clusters with the same name will fail this constraint. On deletion we - change this field to the same value as the id which will be unique and - will not conflict with the creation of another cluster with the same - name. - """ - cluster = utils.create_cluster(self.ctxt) - self.assertRaises(exception.ClusterExists, - utils.create_cluster, - self.ctxt, - name=cluster.name) - - def test_cluster_create_not_duplicate(self): - """Test that unique constraints will work with delete operation. - - To remove potential races on creation we have a constraint set on name - and race_preventer fields, and we set value on creation to 0, so 2 - clusters with the same name will fail this constraint. On deletion we - change this field to the same value as the id which will be unique and - will not conflict with the creation of another cluster with the same - name. - """ - cluster = utils.create_cluster(self.ctxt) - self.assertIsNone(db.cluster_destroy(self.ctxt, cluster.id)) - self.assertIsNotNone(utils.create_cluster(self.ctxt, - name=cluster.name)) - - def test_cluster_get_fail(self): - """Test that cluster get will fail if the cluster doesn't exists.""" - utils.create_cluster(self.ctxt, name='cluster@backend') - self.assertRaises(exception.ClusterNotFound, - db.cluster_get, self.ctxt, 'name=cluster@backend2') - - def test_cluster_get_by_name(self): - """Getting a cluster by name will include backends if not specified.""" - cluster = utils.create_cluster(self.ctxt, name='cluster@backend') - # Get without the backend - db_cluster = db.cluster_get(self.ctxt, name='cluster') - self.assertEqual(cluster.id, db_cluster.id) - # Get with the backend detail - db_cluster = db.cluster_get(self.ctxt, name='cluster@backend') - self.assertEqual(cluster.id, db_cluster.id) - - def test_cluster_get_without_summary(self): - """Test getting cluster without summary information.""" - cluster = utils.create_cluster(self.ctxt) - db_cluster = db.cluster_get(self.ctxt, cluster.id) - self.assertRaises(exc.DetachedInstanceError, - getattr, db_cluster, 'num_hosts') - self.assertRaises(exc.DetachedInstanceError, - getattr, db_cluster, 'num_down_hosts') - self.assertIsNone(db_cluster.last_heartbeat) - - def test_cluster_get_with_summary_empty_cluster(self): - """Test getting empty cluster with summary information.""" - cluster = utils.create_cluster(self.ctxt) - db_cluster = db.cluster_get(self.ctxt, cluster.id, - services_summary=True) - self.assertEqual(0, db_cluster.num_hosts) - self.assertEqual(0, db_cluster.num_down_hosts) - self.assertIsNone(db_cluster.last_heartbeat) - - def test_cluster_get_with_summary(self): - """Test getting cluster with summary information.""" - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) - db_cluster = db.cluster_get(self.ctxt, cluster.id, - services_summary=True) - self.assertEqual(3, db_cluster.num_hosts) - self.assertEqual(1, db_cluster.num_down_hosts) - self.assertEqual(svcs[1].updated_at, db_cluster.last_heartbeat) - - def test_cluster_get_is_up_on_empty_cluster(self): - """Test is_up filter works on empty clusters.""" - cluster = utils.create_cluster(self.ctxt) - db_cluster = db.cluster_get(self.ctxt, cluster.id, is_up=False) - self.assertEqual(cluster.id, db_cluster.id) - self.assertRaises(exception.ClusterNotFound, - db.cluster_get, self.ctxt, cluster.id, is_up=True) - - def test_cluster_get_services_on_empty_cluster(self): - """Test get_services filter works on empty clusters.""" - cluster = utils.create_cluster(self.ctxt) - db_cluster = db.cluster_get(self.ctxt, cluster.id, get_services=True) - self.assertEqual(cluster.id, db_cluster.id) - self.assertListEqual([], db_cluster.services) - - def test_cluster_get_services(self): - """Test services is properly populated on non empty cluster.""" - # We create another cluster to see we do the selection correctly - utils.create_populated_cluster(self.ctxt, 2, name='cluster2') - # We create our cluster with 2 up nodes and 1 down - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) - # Add a deleted service to the cluster - db.service_create(self.ctxt, - {'cluster_name': cluster.name, - 'deleted': True}) - db_cluster = db.cluster_get(self.ctxt, name=cluster.name, - get_services=True) - self.assertEqual(3, len(db_cluster.services)) - self.assertSetEqual({svc.id for svc in svcs}, - {svc.id for svc in db_cluster.services}) - - def test_cluster_get_is_up_all_are_down(self): - """Test that is_up filter works when all services are down.""" - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 3) - self.assertRaises(exception.ClusterNotFound, - db.cluster_get, self.ctxt, cluster.id, is_up=True) - db_cluster = db.cluster_get(self.ctxt, name=cluster.name, is_up=False) - self.assertEqual(cluster.id, db_cluster.id) - - def test_cluster_get_by_num_down_hosts(self): - """Test cluster_get by subquery field num_down_hosts.""" - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 2) - result = db.cluster_get(self.ctxt, num_down_hosts=2) - self.assertEqual(cluster.id, result.id) - - def test_cluster_get_by_num_hosts(self): - """Test cluster_get by subquery field num_hosts.""" - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 2) - result = db.cluster_get(self.ctxt, num_hosts=3) - self.assertEqual(cluster.id, result.id) - - def test_cluster_destroy(self): - """Test basic cluster destroy.""" - cluster = utils.create_cluster(self.ctxt) - # On creation race_preventer is marked with a 0 - self.assertEqual(0, cluster.race_preventer) - db.cluster_destroy(self.ctxt, cluster.id) - db_cluster = db.cluster_get(self.ctxt, cluster.id, read_deleted='yes') - self.assertTrue(db_cluster.deleted) - self.assertIsNotNone(db_cluster.deleted_at) - # On deletion race_preventer is marked with the id - self.assertEqual(cluster.id, db_cluster.race_preventer) - - def test_cluster_destroy_non_existent(self): - """Test destroying non existent cluster.""" - self.assertRaises(exception.ClusterNotFound, - db.cluster_destroy, self.ctxt, 0) - - def test_cluster_destroy_has_services(self): - """Test that we cannot delete a cluster with non deleted services.""" - cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) - self.assertRaises(exception.ClusterHasHosts, - db.cluster_destroy, self.ctxt, cluster.id) - - def test_cluster_update_non_existent(self): - """Test that we raise an exception on updating non existent cluster.""" - self.assertRaises(exception.ClusterNotFound, - db.cluster_update, self.ctxt, 0, {'disabled': True}) - - def test_cluster_update(self): - """Test basic cluster update.""" - cluster = utils.create_cluster(self.ctxt) - self.assertFalse(cluster.disabled) - db.cluster_update(self.ctxt, cluster.id, {'disabled': True}) - db_cluster = db.cluster_get(self.ctxt, cluster.id) - self.assertTrue(db_cluster.disabled) - - def test_cluster_get_all_empty(self): - """Test basic empty cluster get_all.""" - self.assertListEqual([], db.cluster_get_all(self.ctxt)) - - def test_cluster_get_all_matches(self): - """Basic test of get_all with a matching filter.""" - cluster1, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) - cluster2, svcs = utils.create_populated_cluster(self.ctxt, 3, 2, - name='cluster2') - cluster3, svcs = utils.create_populated_cluster(self.ctxt, 3, 3, - name='cluster3') - - expected = {cluster1.id, cluster2.id} - result = db.cluster_get_all(self.ctxt, is_up=True) - self.assertEqual(len(expected), len(result)) - self.assertSetEqual(expected, {cluster.id for cluster in result}) - - def test_cluster_get_all_no_match(self): - """Basic test of get_all with a non matching filter.""" - cluster1, svcs = utils.create_populated_cluster(self.ctxt, 3, 3) - result = db.cluster_get_all(self.ctxt, is_up=True) - self.assertListEqual([], result) - - @mock.patch('cinder.db.sqlalchemy.api._cluster_query') - def test_cluster_get_all_passes_parameters(self, cluster_query_mock): - """Test that get_all passes all parameters. - - Since we have already tested all filters and parameters with - cluster_get method all we have to do for get_all is to check that we - are passing them to the query building method. - """ - args = (mock.sentinel.read_deleted, mock.sentinel.get_services, - mock.sentinel.services_summary, mock.sentinel.is_up, - mock.sentinel.name_match_level) - filters = {'session': mock.sentinel.session, - 'name': mock.sentinel.name, - 'disabled': mock.sentinel.disabled, - 'disabled_reason': mock.sentinel.disabled_reason, - 'race_preventer': mock.sentinel.race_preventer, - 'last_heartbeat': mock.sentinel.last_heartbeat, - 'num_hosts': mock.sentinel.num_hosts, - 'num_down_hosts': mock.sentinel.num_down_hosts} - db.cluster_get_all(self.ctxt, *args, **filters) - cluster_query_mock.assert_called_once_with(self.ctxt, *args, **filters) diff --git a/cinder/tests/unit/db/test_name_id.py b/cinder/tests/unit/db/test_name_id.py deleted file mode 100644 index 0bf5c9e31..000000000 --- a/cinder/tests/unit/db/test_name_id.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for volume name_id.""" - -from oslo_config import cfg - -from cinder import context -from cinder import db -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as testutils - - -CONF = cfg.CONF - - -class NameIDsTestCase(test.TestCase): - """Test cases for naming volumes with name_id.""" - - def setUp(self): - super(NameIDsTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID) - - def test_name_id_same(self): - """New volume should have same 'id' and 'name_id'.""" - vol_ref = testutils.create_volume(self.ctxt, size=1) - self.assertEqual(vol_ref['name_id'], vol_ref['id']) - expected_name = CONF.volume_name_template % vol_ref['id'] - self.assertEqual(expected_name, vol_ref['name']) - - def test_name_id_diff(self): - """Change name ID to mimic volume after migration.""" - vol_ref = testutils.create_volume(self.ctxt, size=1, - _name_id=fake.VOLUME2_ID) - vol_ref = db.volume_get(self.ctxt, vol_ref['id']) - expected_name = CONF.volume_name_template % fake.VOLUME2_ID - self.assertEqual(expected_name, vol_ref['name']) - - def test_name_id_snapshot_volume_name(self): - """Make sure snapshot['volume_name'] is updated.""" - vol_ref = testutils.create_volume(self.ctxt, size=1, - _name_id=fake.VOLUME2_ID) - snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) - expected_name = CONF.volume_name_template % fake.VOLUME2_ID - self.assertEqual(expected_name, snap_ref['volume_name']) diff --git a/cinder/tests/unit/db/test_purge.py b/cinder/tests/unit/db/test_purge.py deleted file mode 100644 index 194a35ba8..000000000 --- a/cinder/tests/unit/db/test_purge.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (C) 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for db purge.""" - -import datetime -import uuid - -from oslo_db import exception as db_exc -from oslo_utils import timeutils -from sqlalchemy.dialects import sqlite - -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import api as db_api -from cinder import exception -from cinder import test - -from oslo_db.sqlalchemy import utils as sqlalchemyutils - - -class PurgeDeletedTest(test.TestCase): - - def setUp(self): - super(PurgeDeletedTest, self).setUp() - self.context = context.get_admin_context() - self.engine = db_api.get_engine() - self.session = db_api.get_session() - self.conn = self.engine.connect() - self.volumes = sqlalchemyutils.get_table( - self.engine, "volumes") - # The volume_metadata table has a FK of volume_id - self.vm = sqlalchemyutils.get_table( - self.engine, "volume_metadata") - - self.vol_types = sqlalchemyutils.get_table( - self.engine, "volume_types") - # The volume_type_projects table has a FK of volume_type_id - self.vol_type_proj = sqlalchemyutils.get_table( - self.engine, "volume_type_projects") - - self.snapshots = sqlalchemyutils.get_table( - self.engine, "snapshots") - - self.sm = sqlalchemyutils.get_table( - self.engine, "snapshot_metadata") - - self.vgm = sqlalchemyutils.get_table( - self.engine, "volume_glance_metadata") - - self.qos = sqlalchemyutils.get_table( - self.engine, "quality_of_service_specs") - - self.uuidstrs = [] - for unused in range(6): - self.uuidstrs.append(uuid.uuid4().hex) - # Add 6 rows to table - for uuidstr in self.uuidstrs: - ins_stmt = self.volumes.insert().values(id=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt = self.vm.insert().values(volume_id=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt = self.vgm.insert().values( - volume_id=uuidstr, key='image_name', value='test') - self.conn.execute(ins_stmt) - - ins_stmt = self.vol_types.insert().values(id=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt = self.vol_type_proj.insert().\ - values(volume_type_id=uuidstr) - self.conn.execute(ins_stmt) - - ins_stmt = self.snapshots.insert().values( - id=uuidstr, volume_id=uuidstr) - self.conn.execute(ins_stmt) - ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) - self.conn.execute(ins_stmt) - - ins_stmt = self.vgm.insert().values( - snapshot_id=uuidstr, key='image_name', value='test') - self.conn.execute(ins_stmt) - - ins_stmt = self.qos.insert().values( - id=uuidstr, key='QoS_Specs_Name', value='test') - self.conn.execute(ins_stmt) - - ins_stmt = self.vol_types.insert().values( - id=uuid.uuid4().hex, qos_specs_id=uuidstr) - self.conn.execute(ins_stmt) - - ins_stmt = self.qos.insert().values( - id=uuid.uuid4().hex, specs_id=uuidstr, key='desc', - value='test') - self.conn.execute(ins_stmt) - - # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago - old = timeutils.utcnow() - datetime.timedelta(days=20) - older = timeutils.utcnow() - datetime.timedelta(days=60) - - make_vol_old = self.volumes.update().\ - where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_vol_older = self.volumes.update().\ - where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - make_vol_meta_old = self.vm.update().\ - where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_vol_meta_older = self.vm.update().\ - where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - - make_vol_types_old = self.vol_types.update().\ - where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_vol_types_older = self.vol_types.update().\ - where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - make_vol_type_proj_old = self.vol_type_proj.update().\ - where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_vol_type_proj_older = self.vol_type_proj.update().\ - where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - - make_snap_old = self.snapshots.update().\ - where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_snap_older = self.snapshots.update().\ - where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - make_snap_meta_old = self.sm.update().\ - where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_snap_meta_older = self.sm.update().\ - where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - - make_vol_glance_meta_old = self.vgm.update().\ - where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_vol_glance_meta_older = self.vgm.update().\ - where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - make_snap_glance_meta_old = self.vgm.update().\ - where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ - .values(deleted_at=old) - make_snap_glance_meta_older = self.vgm.update().\ - where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ - .values(deleted_at=older) - - make_qos_old = self.qos.update().where( - self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old) - make_qos_older = self.qos.update().where( - self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older) - - make_qos_child_record_old = self.qos.update().where( - self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) - make_qos_child_record_older = self.qos.update().where( - self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) - - make_vol_types1_old = self.vol_types.update().where( - self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( - deleted_at=old) - make_vol_types1_older = self.vol_types.update().where( - self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( - deleted_at=older) - - self.conn.execute(make_vol_old) - self.conn.execute(make_vol_older) - self.conn.execute(make_vol_meta_old) - self.conn.execute(make_vol_meta_older) - - self.conn.execute(make_vol_types_old) - self.conn.execute(make_vol_types_older) - self.conn.execute(make_vol_type_proj_old) - self.conn.execute(make_vol_type_proj_older) - - self.conn.execute(make_snap_old) - self.conn.execute(make_snap_older) - self.conn.execute(make_snap_meta_old) - self.conn.execute(make_snap_meta_older) - - self.conn.execute(make_vol_glance_meta_old) - self.conn.execute(make_vol_glance_meta_older) - self.conn.execute(make_snap_glance_meta_old) - self.conn.execute(make_snap_glance_meta_older) - - self.conn.execute(make_qos_old) - self.conn.execute(make_qos_older) - - self.conn.execute(make_qos_child_record_old) - self.conn.execute(make_qos_child_record_older) - - self.conn.execute(make_vol_types1_old) - self.conn.execute(make_vol_types1_older) - - def test_purge_deleted_rows_old(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # Force foreign_key checking if running SQLite >= 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): - self.conn.execute("PRAGMA foreign_keys = ON") - # Purge at 30 days old, should only delete 2 rows - db.purge_deleted_rows(self.context, age_in_days=30) - - vol_rows = self.session.query(self.volumes).count() - vol_meta_rows = self.session.query(self.vm).count() - vol_type_rows = self.session.query(self.vol_types).count() - vol_type_proj_rows = self.session.query(self.vol_type_proj).count() - snap_rows = self.session.query(self.snapshots).count() - snap_meta_rows = self.session.query(self.sm).count() - vol_glance_meta_rows = self.session.query(self.vgm).count() - qos_rows = self.session.query(self.qos).count() - - # Verify that we only deleted 2 - self.assertEqual(4, vol_rows) - self.assertEqual(4, vol_meta_rows) - self.assertEqual(8, vol_type_rows) - self.assertEqual(4, vol_type_proj_rows) - self.assertEqual(4, snap_rows) - self.assertEqual(4, snap_meta_rows) - self.assertEqual(8, vol_glance_meta_rows) - self.assertEqual(8, qos_rows) - - def test_purge_deleted_rows_older(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # Force foreign_key checking if running SQLite >= 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): - self.conn.execute("PRAGMA foreign_keys = ON") - # Purge at 10 days old now, should delete 2 more rows - db.purge_deleted_rows(self.context, age_in_days=10) - - vol_rows = self.session.query(self.volumes).count() - vol_meta_rows = self.session.query(self.vm).count() - vol_type_rows = self.session.query(self.vol_types).count() - vol_type_proj_rows = self.session.query(self.vol_type_proj).count() - snap_rows = self.session.query(self.snapshots).count() - snap_meta_rows = self.session.query(self.sm).count() - vol_glance_meta_rows = self.session.query(self.vgm).count() - qos_rows = self.session.query(self.qos).count() - - # Verify that we only have 2 rows now - self.assertEqual(2, vol_rows) - self.assertEqual(2, vol_meta_rows) - self.assertEqual(4, vol_type_rows) - self.assertEqual(2, vol_type_proj_rows) - self.assertEqual(2, snap_rows) - self.assertEqual(2, snap_meta_rows) - self.assertEqual(4, vol_glance_meta_rows) - self.assertEqual(4, qos_rows) - - def test_purge_deleted_rows_bad_args(self): - # Test with no age argument - self.assertRaises(TypeError, db.purge_deleted_rows, self.context) - # Test purge with non-integer - self.assertRaises(exception.InvalidParameterValue, - db.purge_deleted_rows, self.context, - age_in_days='ten') - - def test_purge_deleted_rows_integrity_failure(self): - dialect = self.engine.url.get_dialect() - if dialect == sqlite.dialect: - # We're seeing issues with foreign key support in SQLite 3.6.20 - # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 - # It works fine in SQLite 3.7. - # So return early to skip this test if running SQLite < 3.7 - import sqlite3 - tup = sqlite3.sqlite_version_info - if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): - self.skipTest( - 'sqlite version too old for reliable SQLA foreign_keys') - self.conn.execute("PRAGMA foreign_keys = ON") - - # add new entry in volume and volume_admin_metadata for - # integrity check - uuid_str = uuid.uuid4().hex - ins_stmt = self.volumes.insert().values(id=uuid_str) - self.conn.execute(ins_stmt) - ins_stmt = self.vm.insert().values(volume_id=uuid_str) - self.conn.execute(ins_stmt) - - # set volume record to deleted 20 days ago - old = timeutils.utcnow() - datetime.timedelta(days=20) - make_old = self.volumes.update().where( - self.volumes.c.id.in_([uuid_str])).values(deleted_at=old) - self.conn.execute(make_old) - - # Verify that purge_deleted_rows fails due to Foreign Key constraint - self.assertRaises(db_exc.DBReferenceError, db.purge_deleted_rows, - self.context, age_in_days=10) diff --git a/cinder/tests/unit/db/test_qos_specs.py b/cinder/tests/unit/db/test_qos_specs.py deleted file mode 100644 index dcaff931f..000000000 --- a/cinder/tests/unit/db/test_qos_specs.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (C) 2013 eBay Inc. -# Copyright (C) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for quality_of_service_specs table.""" - - -import time - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import volume_types - - -def fake_qos_specs_get_by_name(context, name, session=None, inactive=False): - pass - - -class QualityOfServiceSpecsTableTestCase(test.TestCase): - """Test case for QualityOfServiceSpecs model.""" - - def setUp(self): - super(QualityOfServiceSpecsTableTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - - def _create_qos_specs(self, name, consumer='back-end', values=None): - """Create a transfer object.""" - if values is None: - values = {'key1': 'value1', 'key2': 'value2'} - - specs = {'name': name, - 'consumer': consumer, - 'specs': values} - return db.qos_specs_create(self.ctxt, specs)['id'] - - def test_qos_specs_create(self): - # If there is qos specs with the same name exists, - # a QoSSpecsExists exception will be raised. - name = 'QoSSpecsCreationTest' - self._create_qos_specs(name) - self.assertRaises(exception.QoSSpecsExists, - db.qos_specs_create, self.ctxt, dict(name=name)) - - specs_id = self._create_qos_specs('NewName') - query_id = db.qos_specs_get_by_name( - self.ctxt, 'NewName')['id'] - self.assertEqual(specs_id, query_id) - - def test_qos_specs_get(self): - qos_spec = {'name': 'Name1', - 'consumer': 'front-end', - 'specs': {'key1': 'foo', 'key2': 'bar'}} - specs_id = self._create_qos_specs(qos_spec['name'], - qos_spec['consumer'], - qos_spec['specs']) - - fake_id = fake.WILL_NOT_BE_FOUND_ID - self.assertRaises(exception.QoSSpecsNotFound, - db.qos_specs_get, self.ctxt, fake_id) - - specs_returned = db.qos_specs_get(self.ctxt, specs_id) - qos_spec['id'] = specs_id - self.assertDictEqual(qos_spec, specs_returned) - - def test_qos_specs_get_all(self): - qos_list = [ - {'name': 'Name1', - 'consumer': 'front-end', - 'specs': {'key1': 'v1', 'key2': 'v2'}}, - {'name': 'Name2', - 'consumer': 'back-end', - 'specs': {'key1': 'v3', 'key2': 'v4'}}, - {'name': 'Name3', - 'consumer': 'back-end', - 'specs': {'key1': 'v5', 'key2': 'v6'}}] - - for qos in qos_list: - qos['id'] = self._create_qos_specs(qos['name'], - qos['consumer'], - qos['specs']) - - specs_list_returned = db.qos_specs_get_all(self.ctxt) - self.assertEqual(len(qos_list), len(specs_list_returned), - "Unexpected number of qos specs records") - - for expected_qos in qos_list: - self.assertIn(expected_qos, specs_list_returned) - - def test_qos_specs_delete(self): - name = str(int(time.time())) - specs_id = self._create_qos_specs(name) - - db.qos_specs_delete(self.ctxt, specs_id) - self.assertRaises(exception.QoSSpecsNotFound, - db.qos_specs_get, - self.ctxt, specs_id) - - def test_qos_specs_item_delete(self): - name = str(int(time.time())) - value = dict(foo='Foo', bar='Bar') - specs_id = self._create_qos_specs(name, 'front-end', value) - - del value['foo'] - expected = {'name': name, - 'id': specs_id, - 'consumer': 'front-end', - 'specs': value} - db.qos_specs_item_delete(self.ctxt, specs_id, 'foo') - specs = db.qos_specs_get(self.ctxt, specs_id) - self.assertDictEqual(expected, specs) - - def test_associate_type_with_qos(self): - self.assertRaises(exception.VolumeTypeNotFound, - db.volume_type_qos_associate, - self.ctxt, fake.VOLUME_ID, fake.QOS_SPEC_ID) - type_id = volume_types.create(self.ctxt, 'TypeName')['id'] - specs_id = self._create_qos_specs('FakeQos') - db.volume_type_qos_associate(self.ctxt, type_id, specs_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(1, len(res)) - self.assertEqual(type_id, res[0]['id']) - self.assertEqual(specs_id, res[0]['qos_specs_id']) - - def test_qos_associations_get(self): - self.assertRaises(exception.QoSSpecsNotFound, - db.qos_specs_associations_get, - self.ctxt, fake.WILL_NOT_BE_FOUND_ID) - - type_id = volume_types.create(self.ctxt, 'TypeName')['id'] - specs_id = self._create_qos_specs('FakeQos') - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(0, len(res)) - - db.volume_type_qos_associate(self.ctxt, type_id, specs_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(1, len(res)) - self.assertEqual(type_id, res[0]['id']) - self.assertEqual(specs_id, res[0]['qos_specs_id']) - - type0_id = volume_types.create(self.ctxt, 'Type0Name')['id'] - db.volume_type_qos_associate(self.ctxt, type0_id, specs_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(2, len(res)) - self.assertEqual(specs_id, res[0]['qos_specs_id']) - self.assertEqual(specs_id, res[1]['qos_specs_id']) - - def test_qos_specs_disassociate(self): - type_id = volume_types.create(self.ctxt, 'TypeName')['id'] - specs_id = self._create_qos_specs('FakeQos') - db.volume_type_qos_associate(self.ctxt, type_id, specs_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(type_id, res[0]['id']) - self.assertEqual(specs_id, res[0]['qos_specs_id']) - - db.qos_specs_disassociate(self.ctxt, specs_id, type_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(0, len(res)) - res = db.volume_type_get(self.ctxt, type_id) - self.assertIsNone(res['qos_specs_id']) - - def test_qos_specs_disassociate_all(self): - specs_id = self._create_qos_specs('FakeQos') - type1_id = volume_types.create(self.ctxt, 'Type1Name')['id'] - type2_id = volume_types.create(self.ctxt, 'Type2Name')['id'] - type3_id = volume_types.create(self.ctxt, 'Type3Name')['id'] - db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) - db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) - db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) - - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(3, len(res)) - - db.qos_specs_disassociate_all(self.ctxt, specs_id) - res = db.qos_specs_associations_get(self.ctxt, specs_id) - self.assertEqual(0, len(res)) - - def test_qos_specs_update(self): - name = 'FakeName' - specs_id = self._create_qos_specs(name) - value = {'consumer': 'both', - 'specs': {'key2': 'new_value2', 'key3': 'value3'}} - - self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update, - self.ctxt, fake.WILL_NOT_BE_FOUND_ID, value) - db.qos_specs_update(self.ctxt, specs_id, value) - specs = db.qos_specs_get(self.ctxt, specs_id) - self.assertEqual('new_value2', specs['specs']['key2']) - self.assertEqual('value3', specs['specs']['key3']) - self.assertEqual('both', specs['consumer']) diff --git a/cinder/tests/unit/db/test_transfers.py b/cinder/tests/unit/db/test_transfers.py deleted file mode 100644 index 64b811284..000000000 --- a/cinder/tests/unit/db/test_transfers.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for transfers table.""" - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils - - -class TransfersTableTestCase(test.TestCase): - """Test case for transfers model.""" - - def setUp(self): - super(TransfersTableTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID) - - def _create_transfer(self, volume_id=None): - """Create a transfer object.""" - transfer = {'display_name': 'display_name', - 'salt': 'salt', - 'crypt_hash': 'crypt_hash'} - if volume_id is not None: - transfer['volume_id'] = volume_id - return db.transfer_create(self.ctxt, transfer)['id'] - - def test_transfer_create(self): - # If the volume_id is Null a KeyError exception will be raised. - self.assertRaises(KeyError, - self._create_transfer) - - volume_id = utils.create_volume(self.ctxt)['id'] - self._create_transfer(volume_id) - - def test_transfer_create_not_available(self): - volume_id = utils.create_volume(self.ctxt, size=1, - status='notavailable')['id'] - self.assertRaises(exception.InvalidVolume, - self._create_transfer, - volume_id) - - def test_transfer_get(self): - volume_id1 = utils.create_volume(self.ctxt)['id'] - xfer_id1 = self._create_transfer(volume_id1) - - xfer = db.transfer_get(self.ctxt, xfer_id1) - self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") - - nctxt = context.RequestContext(user_id='new_user_id', - project_id='new_project_id') - self.assertRaises(exception.TransferNotFound, - db.transfer_get, nctxt, xfer_id1) - - xfer = db.transfer_get(nctxt.elevated(), xfer_id1) - self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") - - def test_transfer_get_all(self): - volume_id1 = utils.create_volume(self.ctxt)['id'] - volume_id2 = utils.create_volume(self.ctxt)['id'] - self._create_transfer(volume_id1) - self._create_transfer(volume_id2) - - self.assertRaises(exception.NotAuthorized, - db.transfer_get_all, - self.ctxt) - xfer = db.transfer_get_all(context.get_admin_context()) - self.assertEqual(2, len(xfer), "Unexpected number of transfer records") - - xfer = db.transfer_get_all_by_project(self.ctxt, self.ctxt.project_id) - self.assertEqual(2, len(xfer), "Unexpected number of transfer records") - - nctxt = context.RequestContext(user_id=fake.USER2_ID, - project_id=fake.PROJECT2_ID) - self.assertRaises(exception.NotAuthorized, - db.transfer_get_all_by_project, - nctxt, self.ctxt.project_id) - xfer = db.transfer_get_all_by_project(nctxt.elevated(), - self.ctxt.project_id) - self.assertEqual(2, len(xfer), "Unexpected number of transfer records") - - def test_transfer_destroy(self): - volume_id = utils.create_volume(self.ctxt)['id'] - volume_id2 = utils.create_volume(self.ctxt)['id'] - xfer_id1 = self._create_transfer(volume_id) - xfer_id2 = self._create_transfer(volume_id2) - - xfer = db.transfer_get_all(context.get_admin_context()) - self.assertEqual(2, len(xfer), "Unexpected number of transfer records") - self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") - - db.transfer_destroy(self.ctxt, xfer_id1) - xfer = db.transfer_get_all(context.get_admin_context()) - self.assertEqual(1, len(xfer), "Unexpected number of transfer records") - self.assertEqual(xfer[0]['id'], xfer_id2, - "Unexpected value for Transfer id") - - nctxt = context.RequestContext(user_id=fake.USER2_ID, - project_id=fake.PROJECT2_ID) - self.assertRaises(exception.TransferNotFound, - db.transfer_destroy, nctxt, xfer_id2) - - db.transfer_destroy(nctxt.elevated(), xfer_id2) - xfer = db.transfer_get_all(context.get_admin_context()) - self.assertEqual(0, len(xfer), "Unexpected number of transfer records") diff --git a/cinder/tests/unit/db/test_volume_type.py b/cinder/tests/unit/db/test_volume_type.py deleted file mode 100644 index e250129d0..000000000 --- a/cinder/tests/unit/db/test_volume_type.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for volume type.""" - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -from cinder.volume import volume_types - - -class VolumeTypeTestCase(test.TestCase): - """Test cases for volume type.""" - - def setUp(self): - super(VolumeTypeTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID, - is_admin=True) - - def test_volume_type_delete(self): - volume_type = db.volume_type_create(self.ctxt, {'name': - 'fake volume type'}) - volume_types.destroy(self.ctxt, volume_type['id']) - self.assertRaises(exception.VolumeTypeNotFound, - volume_types.get_by_name_or_id, self.ctxt, - volume_type['id']) - - def test_volume_type_delete_with_volume_in_use(self): - volume_type = db.volume_type_create(self.ctxt, {'name': - 'fake volume type'}) - volume = db.volume_create(self.ctxt, {'volume_type_id': - volume_type['id']}) - self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, - self.ctxt, volume_type['id']) - db.volume_destroy(self.ctxt, volume['id']) - volume_types.destroy(self.ctxt, volume_type['id']) - - def test_volume_type_delete_with_group_in_use(self): - volume_type = db.volume_type_create(self.ctxt, {'name': - 'fake volume type'}) - - group = db.group_create(self.ctxt, {}) - db.group_volume_type_mapping_create(self.ctxt, group['id'], - volume_type['id']) - self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, - self.ctxt, volume_type['id']) - db.group_destroy(self.ctxt, group['id']) - volume_types.destroy(self.ctxt, volume_type['id']) - - def test_volume_type_delete_with_consistencygroups_in_use(self): - volume_type = db.volume_type_create(self.ctxt, {'name': - 'fake volume type'}) - consistency_group1 = db.consistencygroup_create(self.ctxt, - {'volume_type_id': - volume_type['id']}) - consistency_group2 = db.consistencygroup_create(self.ctxt, - {'volume_type_id': - volume_type['id']}) - self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, - self.ctxt, volume_type['id']) - db.consistencygroup_destroy(self.ctxt, consistency_group1['id']) - self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, - self.ctxt, volume_type['id']) - db.consistencygroup_destroy(self.ctxt, consistency_group2['id']) - volume_types.destroy(self.ctxt, volume_type['id']) - - def test_volume_type_update(self): - vol_type_ref = volume_types.create(self.ctxt, 'fake volume type') - updates = dict(name='test_volume_type_update', - description=None, - is_public=None) - db.volume_type_update(self.ctxt, vol_type_ref.id, updates) - updated_vol_type = db.volume_type_get(self.ctxt, vol_type_ref.id) - self.assertEqual('test_volume_type_update', updated_vol_type['name']) - volume_types.destroy(self.ctxt, vol_type_ref.id) - - def test_volume_type_get_with_qos_specs(self): - """Ensure volume types get can load qos_specs.""" - qos_data = {'name': 'qos', 'consumer': 'front-end', - 'specs': {'key': 'value', 'key2': 'value2'}} - qos = utils.create_qos(self.ctxt, **qos_data) - vol_type = db.volume_type_create(self.ctxt, - {'name': 'my-vol-type', - 'qos_specs_id': qos['id']}) - - db_vol_type = db.volume_type_get(self.ctxt, vol_type.id, - expected_fields=['qos_specs']) - - expected = {('QoS_Specs_Name', 'qos'), ('consumer', 'front-end'), - ('key', 'value'), ('key2', 'value2')} - actual = {(spec.key, spec.value) for spec in db_vol_type['qos_specs']} - self.assertEqual(expected, actual) - - def test_volume_type_get_with_projects(self): - """Ensure volume types get can load projects.""" - projects = [fake.PROJECT_ID, fake.PROJECT2_ID, fake.PROJECT3_ID] - vol_type = db.volume_type_create(self.ctxt, - {'name': 'my-vol-type'}, - projects=projects) - - db_vol_type = db.volume_type_get(self.ctxt, vol_type.id, - expected_fields=['projects']) - - self.assertEqual(set(projects), set(db_vol_type['projects'])) diff --git a/cinder/tests/unit/fake_cluster.py b/cinder/tests/unit/fake_cluster.py deleted file mode 100644 index 7ea3395a4..000000000 --- a/cinder/tests/unit/fake_cluster.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_versionedobjects import fields - -from cinder.db.sqlalchemy import models -from cinder import objects - - -def cluster_basic_fields(): - """Return basic fields for a cluster.""" - return { - 'id': 1, - 'created_at': timeutils.utcnow(with_timezone=False), - 'deleted': False, - 'name': 'cluster_name', - 'binary': 'cinder-volume', - 'race_preventer': 0, - } - - -def fake_cluster_orm(**updates): - """Create a fake ORM cluster instance.""" - db_cluster = fake_db_cluster(**updates) - del db_cluster['services'] - cluster = models.Cluster(**db_cluster) - return cluster - - -def fake_db_cluster(**updates): - """Helper method for fake_cluster_orm. - - Creates a complete dictionary filling missing fields based on the Cluster - field definition (defaults and nullable). - """ - db_cluster = cluster_basic_fields() - - for name, field in objects.Cluster.fields.items(): - if name in db_cluster: - continue - if field.default != fields.UnspecifiedDefault: - db_cluster[name] = field.default - elif field.nullable: - db_cluster[name] = None - else: - raise Exception('fake_db_cluster needs help with %s.' % name) - - if updates: - db_cluster.update(updates) - - return db_cluster - - -def fake_cluster_ovo(context, **updates): - """Create a fake Cluster versioned object.""" - return objects.Cluster._from_db_object(context, objects.Cluster(), - fake_cluster_orm(**updates)) diff --git a/cinder/tests/unit/fake_constants.py b/cinder/tests/unit/fake_constants.py deleted file mode 100644 index c14367116..000000000 --- a/cinder/tests/unit/fake_constants.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -ACTION_FAILED_ID = 'f26f181d-7891-4720-b022-b074ec1733ef' -ACTION2_FAILED_ID = '02f53bd8-3514-485b-ba60-2722ef09c016' -ALREADY_EXISTS_ID = '8f7495fe-5e44-4f33-81af-4b28e9b2952f' -ATTACHMENT_ID = '4dc3bb12-ad75-41b9-ab2c-7609e743e600' -ATTACHMENT2_ID = 'ac2439fe-c071-468f-94e3-547bedb95de0' -BACKUP_ID = '707844eb-6d8a-4ac1-8b98-618e1c0b3a3a' -BACKUP2_ID = '40e8462a-c9d8-462f-a810-b732a1790535' -BACKUP3_ID = '30ae7641-017e-4221-a642-855687c8bd71' -BACKUP4_ID = '23f8605b-8273-4f49-9b3d-1eeca81a63c2' -BACKUP5_ID = '50c97b22-51ea-440b-8d01-ded20a55d7e0' -CGSNAPSHOT_ID = '5e34cce3-bc97-46b7-a127-5cfb95ef445d' -CGSNAPSHOT_NAME = 'cgsnapshot-5e34cce3-bc97-46b7-a127-5cfb95ef445d' -CGSNAPSHOT2_ID = '5c36d762-d6ba-4f04-bd07-88a298cc410a' -CGSNAPSHOT3_ID = '5f392156-fc03-492a-9cb8-e46a7eedaf33' -CONSISTENCY_GROUP_ID = 'f18abf73-79ee-4f2b-8d4f-1c044148f117' -CONSISTENCY_GROUP2_ID = '8afc8952-9dce-4228-9f8a-706c5cb5fc82' -ENCRYPTION_KEY_ID = 'e8387001-745d-45d0-9e4e-0473815ef09a' -IMAGE_ID = 'e79161cd-5f9d-4007-8823-81a807a64332' -INSTANCE_ID = 'fa617131-cdbc-45dc-afff-f21f17ae054e' -IN_USE_ID = '8ee42073-4ac2-4099-8c7a-d416630e6aee' -INVALID_ID = 'f45dcab0-ff2a-46ec-b3b7-74d6f4bb0027' -KEY_ID = '9112ecec-fb9d-4299-a948-ffb52650a5b5' -OBJECT_ID = 'd7c5b12f-d57d-4762-99ab-db5f62ae3569' -OBJECT2_ID = '51f5b8fa-c13c-48ba-8c9d-b470466cbc9c' -OBJECT3_ID = '7bf5ffa9-18a2-4b64-aab4-0798b53ee4e7' -PROJECT_ID = '89afd400-b646-4bbc-b12b-c0a4d63e5bd3' -PROJECT2_ID = '452ebfbc-55d9-402a-87af-65061916c24b' -PROJECT3_ID = 'f6c912d7-bf30-4b12-af81-a9e0b2f85f85' -PROVIDER_ID = '60087173-e899-470a-9e3a-ba4cffa3e3e3' -PROVIDER2_ID = '1060eccd-64bb-4ed2-86ce-aeaf135a97b8' -PROVIDER3_ID = '63736819-1c95-440e-a873-b9d685afede5' -PROVIDER4_ID = '7db06e02-26b6-4282-945d-7f6c9347a7b0' -QOS_SPEC_ID = 'fc0f7527-79d7-44be-a4f6-3b24db8e11ac' -QOS_SPEC2_ID = 'c561b69d-98d9-478c-815b-6de11f5a09c9' -QOS_SPEC3_ID = '6034720b-f586-4302-a1eb-fe30672069f6' -RAISE_ID = 'a56762e1-4a30-4008-b997-5a438ec9c457' -SNAPSHOT_ID = '253b2878-ec60-4793-ad19-e65496ec7aab' -SNAPSHOT_NAME = 'snapshot-253b2878-ec60-4793-ad19-e65496ec7aab' -SNAPSHOT2_ID = 'c02c44fa-5665-4a26-9e66-2ebaf25e5d2d' -SNAPSHOT3_ID = '454f9970-1e05-4193-a3ed-5c390c3faa18' -UPDATE_FAILED_ID = '110b29df-5e0f-4dbb-840c-ef5963d06933' -USER_ID = 'c853ca26-e8ea-4797-8a52-ee124a013d0e' -USER2_ID = '95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df' -VOLUME_ID = '1e5177e7-95e5-4a0f-b170-e45f4b469f6a' -VOLUME_NAME = 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a' -VOLUME2_ID = '43a09914-e495-475f-b862-0bda3c8918e4' -VOLUME2_NAME = 'volume-43a09914-e495-475f-b862-0bda3c8918e4' -VOLUME3_ID = '1b1cf149-219c-44ac-aee3-13121a7f86a7' -VOLUME3_NAME = 'volume-1b1cf149-219c-44ac-aee3-13121a7f86a7' -VOLUME4_ID = '904d4602-4301-4e9b-8df1-8133b51904e6' -VOLUME4_NAME = 'volume-904d4602-4301-4e9b-8df1-8133b51904e6' -VOLUME5_ID = '17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' -VOLUME5_NAME = 'volume-17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' -VOLUME6_ID = '84375761-46e0-4df2-a567-02f0113428d7' -VOLUME_NAME_ID = 'ee73d33c-52ed-4cb7-a8a9-2687c1205c22' -VOLUME2_NAME_ID = '63fbdd21-03bc-4309-b867-2893848f86af' -VOLUME_TYPE_ID = '4e9e6d23-eed0-426d-b90a-28f87a94b6fe' -VOLUME_TYPE2_ID = 'c4daaf47-c530-4901-b28e-f5f0a359c4e6' -VOLUME_TYPE3_ID = 'a3d55d15-eeb1-4816-ada9-bf82decc09b3' -VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1' -VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835' -WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a' -GROUP_TYPE_ID = '29514915-5208-46ab-9ece-1cc4688ad0c1' -GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c' -GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334' -GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f' -GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07' -GROUP3_ID = '1078414b-380c-474c-bf76-57e2c235841c' -GROUP_SNAPSHOT_ID = '1e2ab152-44f0-11e6-819f-000c29d19d84' -GROUP_SNAPSHOT2_ID = '33e2ff04-44f0-11e6-819f-000c29d19d84' - -# I don't care what it's used for, I just want a damn UUID -UUID1 = '84d0c5f7-2349-401c-8672-f76214d13cab' -UUID2 = '25406d50-e645-4e62-a9ef-1f53f9cba13f' -UUID3 = '29c80662-3a9f-4844-a585-55cd3cd180b5' -UUID4 = '4cd72b2b-5a4f-4f24-93dc-7c0212002916' -UUID5 = '0a574d83-cacf-42b9-8f9f-8f4faa6d4746' diff --git a/cinder/tests/unit/fake_group.py b/cinder/tests/unit/fake_group.py deleted file mode 100644 index be44fceb5..000000000 --- a/cinder/tests/unit/fake_group.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.tests.unit import fake_constants as fake - - -def fake_db_group(**updates): - db_group = { - 'id': fake.GROUP_ID, - 'name': 'group-1', - 'status': 'available', - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'group_type_id': fake.GROUP_TYPE_ID, - 'group_snapshot_id': None, - 'source_group_id': None, - } - - for name, field in objects.Group.fields.items(): - if name in db_group: - continue - if field.nullable: - db_group[name] = None - elif field.default != fields.UnspecifiedDefault: - db_group[name] = field.default - else: - raise Exception('fake_db_group needs help with %s.' % name) - - if updates: - db_group.update(updates) - - return db_group - - -def fake_db_group_type(**updates): - db_group_type = { - 'id': fake.GROUP_TYPE_ID, - 'name': 'type-1', - 'description': 'A fake group type', - 'is_public': True, - 'projects': [], - 'group_specs': {}, - } - - for name, field in objects.GroupType.fields.items(): - if name in db_group_type: - continue - if field.nullable: - db_group_type[name] = None - elif field.default != fields.UnspecifiedDefault: - db_group_type[name] = field.default - else: - raise Exception('fake_db_group_type needs help with %s.' % name) - - if updates: - db_group_type.update(updates) - - return db_group_type - - -def fake_group_obj(context, **updates): - return objects.Group._from_db_object( - context, objects.Group(), fake_db_group(**updates)) - - -def fake_group_type_obj(context, **updates): - return objects.GroupType._from_db_object( - context, objects.GroupType(), fake_db_group_type(**updates)) diff --git a/cinder/tests/unit/fake_group_snapshot.py b/cinder/tests/unit/fake_group_snapshot.py deleted file mode 100644 index 0a880ef41..000000000 --- a/cinder/tests/unit/fake_group_snapshot.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.tests.unit import fake_constants as fake - - -def fake_db_group_snapshot(**updates): - db_group_snapshot = { - 'id': fake.GROUP_SNAPSHOT_ID, - 'name': 'group-1', - 'status': 'available', - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'group_type_id': fake.GROUP_TYPE_ID, - 'group_id': fake.GROUP_ID, - } - - for name, field in objects.GroupSnapshot.fields.items(): - if name in db_group_snapshot: - continue - if field.nullable: - db_group_snapshot[name] = None - elif field.default != fields.UnspecifiedDefault: - db_group_snapshot[name] = field.default - else: - raise Exception('fake_db_group_snapshot needs help with %s.' - % name) - - if updates: - db_group_snapshot.update(updates) - - return db_group_snapshot - - -def fake_group_snapshot_obj(context, **updates): - return objects.GroupSnapshot._from_db_object( - context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates)) diff --git a/cinder/tests/unit/fake_notifier.py b/cinder/tests/unit/fake_notifier.py deleted file mode 100644 index 7e07a263b..000000000 --- a/cinder/tests/unit/fake_notifier.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import functools - -import json -import oslo_messaging as messaging - -from cinder import rpc - - -FakeMessage = collections.namedtuple('Message', - ['publisher_id', 'priority', - 'event_type', 'payload']) - - -class FakeNotifier(object): - - def __init__(self, transport, publisher_id=None, serializer=None, - driver=None, topic=None, retry=None): - self.transport = transport - self.publisher_id = publisher_id - for priority in ['debug', 'info', 'warn', 'error', 'critical']: - setattr(self, priority, - functools.partial(self._notify, priority.upper())) - self._serializer = serializer or messaging.serializer.NoOpSerializer() - self._topic = topic - self.retry = retry - self.notifications = [] - - def prepare(self, publisher_id=None): - if publisher_id is None: - publisher_id = self.publisher_id - return self.__class__(self.transport, publisher_id, self._serializer) - - def get_notification_count(self): - return len(self.notifications) - - def _notify(self, priority, ctxt, event_type, payload): - payload = self._serializer.serialize_entity(ctxt, payload) - # NOTE(sileht): simulate the kombu serializer - # this permit to raise an exception if something have not - # been serialized correctly - json.dumps(payload) - msg = dict(publisher_id=self.publisher_id, - priority=priority, - event_type=event_type, - payload=payload) - self.notifications.append(msg) - - def reset(self): - del self.notifications[:] - - -def mock_notifier(testcase): - testcase.mock_object(messaging, 'Notifier', FakeNotifier) - if rpc.NOTIFIER: - serializer = getattr(rpc.NOTIFIER, '_serializer', None) - testcase.mock_object(rpc, 'NOTIFIER', - FakeNotifier(rpc.NOTIFIER.transport, - rpc.NOTIFIER.publisher_id, - serializer=serializer)) - - -def get_fake_notifier(service=None, host=None, publisher_id=None): - if not publisher_id: - publisher_id = "%s.%s" % (service, host) - serializer = getattr(rpc.NOTIFIER, '_serializer', None) - notifier = FakeNotifier(None, publisher_id=publisher_id, - serializer=serializer) - return notifier.prepare(publisher_id=publisher_id) diff --git a/cinder/tests/unit/fake_objects.py b/cinder/tests/unit/fake_objects.py deleted file mode 100644 index e1787ff0d..000000000 --- a/cinder/tests/unit/fake_objects.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2016 Red Hat Inc. -# Copyright (c) 2016 Intel Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils - -from cinder import objects - - -@objects.base.CinderObjectRegistry.register_if(False) -class ChildObject(objects.base.CinderObject): - VERSION = '1.2' - - fields = { - 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), - 'uuid': objects.base.fields.UUIDField(), - 'text': objects.base.fields.StringField(nullable=True), - 'integer': objects.base.fields.IntegerField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - super(ChildObject, self).obj_make_compatible(primitive, - target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - primitive.pop('text', None) - if target_version < (1, 2): - primitive.pop('integer', None) - - -@objects.base.CinderObjectRegistry.register_if(False) -class ParentObject(objects.base.CinderObject): - VERSION = '1.1' - - fields = { - 'uuid': objects.base.fields.UUIDField(), - 'child': objects.base.fields.ObjectField('ChildObject', nullable=True), - 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - super(ParentObject, self).obj_make_compatible(primitive, - target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - primitive.pop('scheduled_at', None) - - -@objects.base.CinderObjectRegistry.register_if(False) -class ParentObjectList(objects.base.CinderObject, objects.base.ObjectListBase): - VERSION = ParentObject.VERSION - - fields = { - 'objects': objects.base.fields.ListOfObjectsField('ParentObject'), - } - - -class MyHistory(objects.base.CinderObjectVersionsHistory): - linked_objects = {'ParentObject': 'ParentObjectList'} - - def __init__(self): - self.versions = ['1.0'] - self['1.0'] = {'ChildObject': '1.0'} - self.add('1.1', {'ChildObject': '1.1'}) - self.add('1.2', {'ParentObject': '1.0'}) - self.add('1.3', {'ParentObjectList': '1.0'}) - self.add('1.4', {'ParentObject': '1.1'}) - self.add('1.5', {'ParentObjectList': '1.1'}) - self.add('1.6', {'ChildObject': '1.2'}) diff --git a/cinder/tests/unit/fake_service.py b/cinder/tests/unit/fake_service.py deleted file mode 100644 index 455db247c..000000000 --- a/cinder/tests/unit/fake_service.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_versionedobjects import fields - -from cinder.db.sqlalchemy import models -from cinder import objects - - -def fake_service_orm(**updates): - """Create a fake ORM service instance.""" - db_service = fake_db_service(**updates) - service = models.Service(**db_service) - return service - - -def fake_db_service(**updates): - NOW = timeutils.utcnow().replace(microsecond=0) - db_service = { - 'created_at': NOW, - 'updated_at': NOW, - 'deleted_at': None, - 'deleted': False, - 'id': 123, - 'host': 'fake-host', - 'binary': 'fake-service', - 'topic': 'fake-service-topic', - 'report_count': 1, - 'disabled': False, - 'disabled_reason': None, - 'modified_at': NOW, - } - - for name, field in objects.Service.fields.items(): - if name in db_service: - continue - if field.nullable: - db_service[name] = None - elif field.default != fields.UnspecifiedDefault: - db_service[name] = field.default - else: - raise Exception('fake_db_service needs help with %s.' % name) - - if updates: - db_service.update(updates) - - return db_service - - -def fake_service_obj(context, **updates): - return objects.Service._from_db_object(context, objects.Service(), - fake_db_service(**updates)) diff --git a/cinder/tests/unit/fake_snapshot.py b/cinder/tests/unit/fake_snapshot.py deleted file mode 100644 index d7459c516..000000000 --- a/cinder/tests/unit/fake_snapshot.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder.objects import fields as c_fields -from cinder.objects import snapshot -from cinder.tests.unit import fake_constants as fake - - -def fake_db_snapshot(**updates): - db_snapshot = { - 'id': fake.SNAPSHOT_ID, - 'volume_id': fake.VOLUME_ID, - 'status': c_fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': 1, - 'display_name': 'fake_name', - 'display_description': 'fake_description', - 'metadata': {}, - 'snapshot_metadata': [], - } - - for name, field in snapshot.Snapshot.fields.items(): - if name in db_snapshot: - continue - if field.nullable: - db_snapshot[name] = None - elif field.default != fields.UnspecifiedDefault: - db_snapshot[name] = field.default - else: - raise Exception('fake_db_snapshot needs help with %s' % name) - - if updates: - db_snapshot.update(updates) - - return db_snapshot - - -def fake_snapshot_obj(context, **updates): - expected_attrs = updates.pop('expected_attrs', None) or [] - if 'volume' in updates and 'volume' not in expected_attrs: - expected_attrs.append('volume') - return snapshot.Snapshot._from_db_object(context, snapshot.Snapshot(), - fake_db_snapshot(**updates), - expected_attrs=expected_attrs) diff --git a/cinder/tests/unit/fake_utils.py b/cinder/tests/unit/fake_utils.py deleted file mode 100644 index a535453ea..000000000 --- a/cinder/tests/unit/fake_utils.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""This modules stubs out functions in cinder.utils.""" - -import re - -from eventlet import greenthread -import six - -_fake_execute_repliers = [] -_fake_execute_log = [] - - -def fake_execute_get_log(): - return _fake_execute_log - - -def fake_execute_clear_log(): - global _fake_execute_log - _fake_execute_log = [] - - -def fake_execute_set_repliers(repliers): - """Allows the client to configure replies to commands.""" - global _fake_execute_repliers - _fake_execute_repliers = repliers - - -def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): - """A reply handler for commands that haven't been added to the reply list. - - Returns empty strings for stdout and stderr. - - """ - return '', '' - - -def fake_execute(*cmd_parts, **kwargs): - """This function stubs out execute. - - It optionally executes a preconfigued function to return expected data. - - """ - global _fake_execute_repliers - - process_input = kwargs.get('process_input', None) - check_exit_code = kwargs.get('check_exit_code', 0) - delay_on_retry = kwargs.get('delay_on_retry', True) - attempts = kwargs.get('attempts', 1) - run_as_root = kwargs.get('run_as_root', False) - cmd_str = ' '.join(str(part) for part in cmd_parts) - - _fake_execute_log.append(cmd_str) - - reply_handler = fake_execute_default_reply_handler - - for fake_replier in _fake_execute_repliers: - if re.match(fake_replier[0], cmd_str): - reply_handler = fake_replier[1] - break - - if isinstance(reply_handler, six.string_types): - # If the reply handler is a string, return it as stdout - reply = reply_handler, '' - else: - # Alternative is a function, so call it - reply = reply_handler(cmd_parts, - process_input=process_input, - delay_on_retry=delay_on_retry, - attempts=attempts, - run_as_root=run_as_root, - check_exit_code=check_exit_code) - - # Replicate the sleep call in the real function - greenthread.sleep(0) - return reply diff --git a/cinder/tests/unit/fake_volume.py b/cinder/tests/unit/fake_volume.py deleted file mode 100644 index d6eb49778..000000000 --- a/cinder/tests/unit/fake_volume.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fields - -from cinder import objects -from cinder.objects import fields as c_fields -from cinder.tests.unit import fake_constants as fake - - -def fake_db_volume(**updates): - db_volume = { - 'id': fake.VOLUME_ID, - 'size': 1, - 'name': 'volume-%s' % fake.VOLUME_ID, - 'availability_zone': 'fake_availability_zone', - 'status': 'available', - 'attach_status': c_fields.VolumeAttachStatus.DETACHED, - 'previous_status': None, - 'volume_attachment': [], - 'volume_metadata': [], - 'volume_admin_metadata': [], - 'volume_glance_metadata': [], - 'snapshots': [], - } - - for name, field in objects.Volume.fields.items(): - if name in db_volume: - continue - if field.nullable: - db_volume[name] = None - elif field.default != fields.UnspecifiedDefault: - db_volume[name] = field.default - else: - raise Exception('fake_db_volume needs help with %s.' % name) - - if updates: - db_volume.update(updates) - - return db_volume - - -def fake_db_volume_type(**updates): - db_volume_type = { - 'id': fake.VOLUME_TYPE_ID, - 'name': 'type-1', - 'description': 'A fake volume type', - 'is_public': True, - 'projects': [], - 'extra_specs': {}, - } - - for name, field in objects.VolumeType.fields.items(): - if name in db_volume_type: - continue - if field.nullable: - db_volume_type[name] = None - elif field.default != fields.UnspecifiedDefault: - db_volume_type[name] = field.default - else: - raise Exception('fake_db_volume_type needs help with %s.' % name) - - if updates: - db_volume_type.update(updates) - - return db_volume_type - - -def fake_db_volume_attachment(**updates): - db_volume_attachment = { - 'id': fake.ATTACHMENT_ID, - 'volume_id': fake.VOLUME_ID, - 'volume': fake_db_volume(), - } - - for name, field in objects.VolumeAttachment.fields.items(): - if name in db_volume_attachment: - continue - if field.nullable: - db_volume_attachment[name] = None - elif field.default != fields.UnspecifiedDefault: - db_volume_attachment[name] = field.default - else: - raise Exception( - 'fake_db_volume_attachment needs help with %s.' % name) - - if updates: - db_volume_attachment.update(updates) - - return db_volume_attachment - - -def fake_volume_obj(context, **updates): - expected_attrs = updates.pop('expected_attrs', - ['metadata', 'admin_metadata']) - vol = objects.Volume._from_db_object(context, objects.Volume(), - fake_db_volume(**updates), - expected_attrs=expected_attrs) - return vol - - -def fake_volume_type_obj(context, **updates): - return objects.VolumeType._from_db_object( - context, objects.VolumeType(), fake_db_volume_type(**updates)) - - -def fake_volume_attachment_obj(context, **updates): - return objects.VolumeAttachment._from_db_object( - context, objects.VolumeAttachment(), - fake_db_volume_attachment(**updates)) diff --git a/cinder/tests/unit/glance/__init__.py b/cinder/tests/unit/glance/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/glance/stubs.py b/cinder/tests/unit/glance/stubs.py deleted file mode 100644 index b1b18a687..000000000 --- a/cinder/tests/unit/glance/stubs.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glanceclient.exc - - -NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" - - -IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', - 'container_format', 'checksum', 'id', - 'name', 'created_at', 'updated_at', - 'deleted', 'status', - 'min_disk', 'min_ram', 'visibility', - 'protected'] - - -class StubGlanceClient(object): - - def __init__(self, images=None): - self._images = [] - _images = images or [] - map(lambda image: self.create(**image), _images) - - # NOTE(bcwaldon): HACK to get client.images.* to work - self.images = lambda: None - for fn in ('list', 'get', 'data', 'create', 'update', 'upload', - 'delete'): - setattr(self.images, fn, getattr(self, fn)) - - self.schemas = lambda: None - setattr(self.schemas, 'get', getattr(self, 'schemas_get')) - - # TODO(bcwaldon): implement filters - def list(self, filters=None, marker=None, limit=30): - if marker is None: - index = 0 - else: - for index, image in enumerate(self._images): - if image.id == str(marker): - index += 1 - break - else: - raise glanceclient.exc.BadRequest('Marker not found') - - return self._images[index:index + limit] - - def get(self, image_id): - for image in self._images: - if image.id == str(image_id): - return image - raise glanceclient.exc.NotFound(image_id) - - def data(self, image_id): - image = self.get(image_id) - if getattr(image, 'size', 0): - return ['*' * image.size] - else: - return [] - - def create(self, **metadata): - metadata['created_at'] = NOW_GLANCE_FORMAT - metadata['updated_at'] = NOW_GLANCE_FORMAT - - self._images.append(FakeImage(metadata)) - - try: - image_id = str(metadata['id']) - except KeyError: - # auto-generate an id if one wasn't provided - image_id = str(len(self._images)) - - self._images[-1].id = image_id - - return self._images[-1] - - def update(self, image_id, **metadata): - for i, image in enumerate(self._images): - if image.id == str(image_id): - for k, v in metadata.items(): - if k == 'data': - setattr(self._images[i], 'size', len(v)) - else: - setattr(self._images[i], k, v) - return self._images[i] - raise glanceclient.exc.NotFound(image_id) - - def delete(self, image_id): - for i, image in enumerate(self._images): - if image.id == image_id: - del self._images[i] - return - raise glanceclient.exc.NotFound(image_id) - - def upload(self, image_id, data): - for i, image in enumerate(self._images): - if image.id == image_id: - setattr(self._images[i], 'size', len(data)) - return - raise glanceclient.exc.NotFound(image_id) - - def schemas_get(self, schema_name): - if schema_name != 'image': - raise glanceclient.exc.NotFound() - return FakeSchema() - - -class FakeImage(object): - def __init__(self, metadata): - raw = dict.fromkeys(IMAGE_ATTRIBUTES) - raw.update(metadata) - self.__dict__['raw'] = raw - - def __getattr__(self, key): - try: - return self.__dict__['raw'][key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - try: - self.__dict__['raw'][key] = value - except KeyError: - raise AttributeError(key) - - def keys(self): - return self.__dict__['raw'].keys() - - -class FakeSchema(object): - def is_base_property(self, key): - if key in IMAGE_ATTRIBUTES: - return True - else: - return False diff --git a/cinder/tests/unit/group/__init__.py b/cinder/tests/unit/group/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/group/test_groups_api.py b/cinder/tests/unit/group/test_groups_api.py deleted file mode 100644 index 07b13e050..000000000 --- a/cinder/tests/unit/group/test_groups_api.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for group API. -""" - -import ddt -import mock - -from cinder import context -from cinder import exception -import cinder.group -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils - - -GROUP_QUOTAS = quota.GROUP_QUOTAS - - -@ddt.ddt -class GroupAPITestCase(test.TestCase): - """Test Case for group API.""" - - def setUp(self): - super(GroupAPITestCase, self).setUp() - self.group_api = cinder.group.API() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.user_ctxt = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - @mock.patch('cinder.objects.Group.get_by_id') - @mock.patch('cinder.group.api.check_policy') - def test_get(self, mock_policy, mock_group_get): - fake_group = 'fake_group' - mock_group_get.return_value = fake_group - grp = self.group_api.get(self.ctxt, fake.GROUP_ID) - self.assertEqual(fake_group, grp) - mock_policy.assert_called_once_with(self.ctxt, 'get', mock.ANY) - - @ddt.data(True, False) - @mock.patch('cinder.objects.GroupList.get_all') - @mock.patch('cinder.objects.GroupList.get_all_by_project') - @mock.patch('cinder.group.api.check_policy') - def test_get_all(self, is_admin, mock_policy, mock_get_all_by_project, - mock_get_all): - self.group_api.LOG = mock.Mock() - fake_groups = ['fake_group1', 'fake_group2'] - fake_groups_by_project = ['fake_group1'] - mock_get_all.return_value = fake_groups - mock_get_all_by_project.return_value = fake_groups_by_project - - if is_admin: - grps = self.group_api.get_all(self.ctxt, - filters={'all_tenants': True}) - self.assertEqual(fake_groups, grps) - mock_policy.assert_called_once_with(self.ctxt, 'get_all') - else: - grps = self.group_api.get_all(self.user_ctxt) - self.assertEqual(fake_groups_by_project, grps) - mock_policy.assert_called_once_with(self.user_ctxt, 'get_all') - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group') - @mock.patch('cinder.db.volume_get_all_by_generic_group') - @mock.patch('cinder.db.volumes_update') - @mock.patch('cinder.group.api.API._cast_create_group') - @mock.patch('cinder.group.api.API.update_quota') - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.db.group_type_get') - @mock.patch('cinder.db.volume_types_get_by_name_or_id') - @mock.patch('cinder.group.api.check_policy') - def test_create_delete(self, mock_policy, mock_volume_types_get, - mock_group_type_get, mock_group, - mock_update_quota, mock_cast_create_group, - mock_volumes_update, mock_volume_get_all, - mock_rpc_delete_group): - mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] - mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', host=None, - name=name, description=description, - status=fields.GroupStatus.CREATING) - mock_group.return_value = grp - - ret_group = self.group_api.create(self.ctxt, name, description, - fake.GROUP_TYPE_ID, - [fake.VOLUME_TYPE_ID], - availability_zone='nova') - mock_policy.assert_called_with(self.ctxt, 'create') - self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) - - ret_group.host = "test_host@fakedrv#fakepool" - ret_group.status = fields.GroupStatus.AVAILABLE - ret_group.assert_not_frozen = mock.Mock(return_value=True) - ret_group.group_snapshots = [] - self.group_api.delete(self.ctxt, ret_group, delete_volumes=True) - mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) - mock_volumes_update.assert_called_once_with(self.ctxt, []) - mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group) - mock_policy.assert_called_with(self.ctxt, 'delete', mock.ANY) - - @mock.patch('cinder.group.api.API._cast_create_group') - @mock.patch('cinder.group.api.API.update_quota') - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.db.group_type_get_by_name') - @mock.patch('cinder.db.volume_types_get_by_name_or_id') - @mock.patch('cinder.group.api.check_policy') - def test_create_with_group_name(self, mock_policy, mock_volume_types_get, - mock_group_type_get, mock_group, - mock_update_quota, mock_cast_create_group): - mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] - mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', host=None, - name=name, description=description, - status=fields.GroupStatus.CREATING) - mock_group.return_value = grp - - ret_group = self.group_api.create(self.ctxt, name, description, - "fake-grouptype-name", - [fake.VOLUME_TYPE_ID], - availability_zone='nova') - self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) - - mock_group_type_get.assert_called_once_with(self.ctxt, - "fake-grouptype-name") - mock_policy.assert_called_with(self.ctxt, 'create') - - @mock.patch('cinder.group.api.API._cast_create_group') - @mock.patch('cinder.group.api.API.update_quota') - @mock.patch('cinder.db.group_type_get_by_name') - @mock.patch('cinder.db.volume_types_get_by_name_or_id') - @mock.patch('cinder.group.api.check_policy') - def test_create_with_multi_types(self, mock_policy, mock_volume_types_get, - mock_group_type_get, - mock_update_quota, - mock_cast_create_group): - volume_types = [{'id': fake.VOLUME_TYPE_ID}, - {'id': fake.VOLUME_TYPE2_ID}] - mock_volume_types_get.return_value = volume_types - mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} - volume_type_names = ['fake-volume-type1', 'fake-volume-type2'] - name = "test_group" - description = "this is a test group" - - group = self.group_api.create(self.ctxt, name, description, - "fake-grouptype-name", - volume_type_names, - availability_zone='nova') - self.assertEqual(group["volume_type_ids"], - [t['id'] for t in volume_types]) - self.assertEqual(group["group_type_id"], fake.GROUP_TYPE_ID) - - mock_group_type_get.assert_called_once_with(self.ctxt, - "fake-grouptype-name") - mock_volume_types_get.assert_called_once_with(mock.ANY, - volume_type_names) - mock_policy.assert_called_with(self.ctxt, 'create') - - @mock.patch('oslo_utils.timeutils.utcnow') - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.group.api.check_policy') - def test_reset_status(self, mock_policy, mock_group, mock_time_util): - mock_time_util.return_value = "time_now" - self.group_api.reset_status(self.ctxt, mock_group, - fields.GroupStatus.AVAILABLE) - - update_field = {'updated_at': "time_now", - 'status': fields.GroupStatus.AVAILABLE} - mock_group.update.assert_called_once_with(update_field) - mock_group.save.assert_called_once_with() - mock_policy.assert_called_once_with(self.ctxt, - 'reset_status', mock.ANY) - - @mock.patch.object(GROUP_QUOTAS, "reserve") - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.db.group_type_get_by_name') - @mock.patch('cinder.db.volume_types_get_by_name_or_id') - @mock.patch('cinder.group.api.check_policy') - def test_create_group_failed_update_quota(self, mock_policy, - mock_volume_types_get, - mock_group_type_get, mock_group, - mock_group_quota_reserve): - mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] - mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} - fake_overs = ['groups'] - fake_quotas = {'groups': 1} - fake_usages = {'groups': {'reserved': 0, 'in_use': 1}} - mock_group_quota_reserve.side_effect = exception.OverQuota( - overs=fake_overs, - quotas=fake_quotas, - usages=fake_usages) - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', host=None, - name=name, description=description, - status=fields.GroupStatus.CREATING) - mock_group.return_value = grp - - self.assertRaises(exception.GroupLimitExceeded, - self.group_api.create, - self.ctxt, name, description, - "fake-grouptype-name", - [fake.VOLUME_TYPE_ID], - availability_zone='nova') - mock_policy.assert_called_with(self.ctxt, 'create') - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group') - @mock.patch('cinder.db.volume_get_all_by_generic_group') - @mock.patch('cinder.group.api.API._cast_create_group') - @mock.patch('cinder.group.api.API.update_quota') - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.db.group_type_get') - @mock.patch('cinder.db.volume_types_get_by_name_or_id') - @mock.patch('cinder.group.api.check_policy') - def test_update(self, mock_policy, mock_volume_types_get, - mock_group_type_get, mock_group, - mock_update_quota, mock_cast_create_group, - mock_volume_get_all, mock_rpc_update_group): - vol_type_dict = {'id': fake.VOLUME_TYPE_ID, - 'name': 'fake_volume_type'} - vol_type = objects.VolumeType(self.ctxt, **vol_type_dict) - - mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] - mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', host=None, - name=name, description=description, - status=fields.GroupStatus.CREATING) - mock_group.return_value = grp - - ret_group = self.group_api.create(self.ctxt, name, description, - fake.GROUP_TYPE_ID, - [fake.VOLUME_TYPE_ID], - availability_zone='nova') - self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) - - ret_group.volume_types = [vol_type] - ret_group.host = "test_host@fakedrv#fakepool" - ret_group.status = fields.GroupStatus.AVAILABLE - ret_group.id = fake.GROUP_ID - - vol1 = utils.create_volume( - self.ctxt, host=ret_group.host, - availability_zone=ret_group.availability_zone, - volume_type_id=fake.VOLUME_TYPE_ID) - - vol2 = utils.create_volume( - self.ctxt, host=ret_group.host, - availability_zone=ret_group.availability_zone, - volume_type_id=fake.VOLUME_TYPE_ID, - group_id=fake.GROUP_ID) - vol2_dict = { - 'id': vol2.id, - 'group_id': fake.GROUP_ID, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'availability_zone': ret_group.availability_zone, - 'host': ret_group.host, - 'status': 'available', - } - mock_volume_get_all.return_value = [vol2_dict] - - new_name = "new_group_name" - new_desc = "this is a new group" - self.group_api.update(self.ctxt, ret_group, new_name, new_desc, - vol1.id, vol2.id) - mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) - mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group, - add_volumes=vol1.id, - remove_volumes=vol2.id) - mock_policy.assert_called_with(self.ctxt, 'update', mock.ANY) - - @mock.patch('cinder.objects.GroupSnapshot.get_by_id') - @mock.patch('cinder.group.api.check_policy') - def test_get_group_snapshot(self, mock_policy, mock_group_snap): - fake_group_snap = 'fake_group_snap' - mock_group_snap.return_value = fake_group_snap - grp_snap = self.group_api.get_group_snapshot( - self.ctxt, fake.GROUP_SNAPSHOT_ID) - self.assertEqual(fake_group_snap, grp_snap) - mock_policy.assert_called_with(self.ctxt, 'get_group_snapshot') - - @ddt.data(True, False) - @mock.patch('cinder.objects.GroupSnapshotList.get_all') - @mock.patch('cinder.objects.GroupSnapshotList.get_all_by_project') - @mock.patch('cinder.group.api.check_policy') - def test_get_all_group_snapshots(self, is_admin, mock_policy, - mock_get_all_by_project, - mock_get_all): - fake_group_snaps = ['fake_group_snap1', 'fake_group_snap2'] - fake_group_snaps_by_project = ['fake_group_snap1'] - mock_get_all.return_value = fake_group_snaps - mock_get_all_by_project.return_value = fake_group_snaps_by_project - - if is_admin: - grp_snaps = self.group_api.get_all_group_snapshots( - self.ctxt, filters={'all_tenants': True}) - self.assertEqual(fake_group_snaps, grp_snaps) - mock_policy.assert_called_with(self.ctxt, - 'get_all_group_snapshots') - else: - grp_snaps = self.group_api.get_all_group_snapshots( - self.user_ctxt) - self.assertEqual(fake_group_snaps_by_project, grp_snaps) - mock_policy.assert_called_with(self.user_ctxt, - 'get_all_group_snapshots') - - @mock.patch('cinder.objects.GroupSnapshot') - @mock.patch('cinder.group.api.check_policy') - def test_update_group_snapshot(self, mock_policy, mock_group_snap): - grp_snap_update = {"name": "new_name", - "description": "This is a new description"} - self.group_api.update_group_snapshot(self.ctxt, mock_group_snap, - grp_snap_update) - mock_group_snap.update.assert_called_once_with(grp_snap_update) - mock_group_snap.save.assert_called_once_with() - mock_policy.assert_called_with(self.ctxt, 'update_group_snapshot') - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group_snapshot') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_snapshot') - @mock.patch('cinder.volume.api.API.create_snapshots_in_db') - @mock.patch('cinder.objects.Group') - @mock.patch('cinder.objects.GroupSnapshot') - @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') - @mock.patch('cinder.group.api.check_policy') - def test_create_delete_group_snapshot(self, mock_policy, - mock_snap_get_all, - mock_group_snap, mock_group, - mock_create_in_db, - mock_create_api, mock_delete_api): - name = "fake_name" - description = "fake description" - mock_group.id = fake.GROUP_ID - mock_group.group_type_id = fake.GROUP_TYPE_ID - mock_group.assert_not_frozen = mock.Mock(return_value=True) - mock_group.volumes = [] - ret_group_snap = self.group_api.create_group_snapshot( - self.ctxt, mock_group, name, description) - mock_snap_get_all.return_value = [] - - options = {'group_id': fake.GROUP_ID, - 'user_id': self.ctxt.user_id, - 'project_id': self.ctxt.project_id, - 'status': "creating", - 'name': name, - 'description': description, - 'group_type_id': fake.GROUP_TYPE_ID} - mock_group_snap.assert_called_once_with(self.ctxt, **options) - ret_group_snap.create.assert_called_once_with() - mock_create_in_db.assert_called_once_with(self.ctxt, [], - ret_group_snap.name, - ret_group_snap.description, - None, - ret_group_snap.id) - mock_create_api.assert_called_once_with(self.ctxt, ret_group_snap) - - mock_policy.assert_called_once_with(self.ctxt, 'create_group_snapshot', - mock.ANY) - ret_group_snap.assert_not_frozen = mock.Mock(return_value=True) - self.group_api.delete_group_snapshot(self.ctxt, ret_group_snap) - mock_delete_api.assert_called_once_with(mock.ANY, ret_group_snap) - mock_policy.assert_called_with(self.ctxt, 'delete_group_snapshot') - - @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') - @mock.patch('cinder.db.group_volume_type_mapping_create') - @mock.patch('cinder.volume.api.API.create') - @mock.patch('cinder.objects.GroupSnapshot.get_by_id') - @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') - @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') - def test_create_group_from_snap(self, mock_volume_get_all, - mock_rpc_create_group_from_src, - mock_snap_get_all, mock_group_snap_get, - mock_volume_api_create, - mock_mapping_create, - mock_get_volume_type): - vol_type = fake_volume.fake_volume_type_obj( - self.ctxt, - id=fake.VOLUME_TYPE_ID, - name='fake_volume_type') - mock_get_volume_type.return_value = vol_type - - grp_snap = utils.create_group_snapshot( - self.ctxt, fake.GROUP_ID, - group_type_id=fake.GROUP_TYPE_ID, - status=fields.GroupStatus.CREATING) - mock_group_snap_get.return_value = grp_snap - vol1 = utils.create_volume( - self.ctxt, - availability_zone='nova', - volume_type_id=vol_type['id'], - group_id=fake.GROUP_ID) - - snap = utils.create_snapshot(self.ctxt, vol1.id, - volume_type_id=vol_type['id'], - status=fields.GroupStatus.CREATING) - mock_snap_get_all.return_value = [snap] - - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']], - availability_zone='nova', - name=name, description=description, - group_snapshot_id=grp_snap.id, - status=fields.GroupStatus.CREATING) - - vol2 = utils.create_volume( - self.ctxt, - availability_zone=grp.availability_zone, - volume_type_id=vol_type['id'], - group_id=grp.id, - snapshot_id=snap.id) - mock_volume_get_all.return_value = [vol2] - - self.group_api._create_group_from_group_snapshot(self.ctxt, grp, - grp_snap.id) - - mock_volume_api_create.assert_called_once_with( - self.ctxt, 1, None, None, - availability_zone=grp.availability_zone, - group_snapshot=grp_snap, - group=grp, - snapshot=snap, - volume_type=vol_type) - - mock_rpc_create_group_from_src.assert_called_once_with( - self.ctxt, grp, grp_snap) - - vol2.destroy() - grp.destroy() - snap.destroy() - vol1.destroy() - grp_snap.destroy() - - @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') - @mock.patch('cinder.db.group_volume_type_mapping_create') - @mock.patch('cinder.volume.api.API.create') - @mock.patch('cinder.objects.Group.get_by_id') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') - @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') - @mock.patch('cinder.group.api.check_policy') - def test_create_group_from_group(self, mock_policy, mock_volume_get_all, - mock_rpc_create_group_from_src, - mock_group_get, - mock_volume_api_create, - mock_mapping_create, - mock_get_volume_type): - vol_type = fake_volume.fake_volume_type_obj( - self.ctxt, - id=fake.VOLUME_TYPE_ID, - name='fake_volume_type') - mock_get_volume_type.return_value = vol_type - - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']], - availability_zone='nova', - status=fields.GroupStatus.CREATING) - mock_group_get.return_value = grp - - vol = utils.create_volume( - self.ctxt, - availability_zone=grp.availability_zone, - volume_type_id=fake.VOLUME_TYPE_ID, - group_id=grp.id) - mock_volume_get_all.return_value = [vol] - - grp2 = utils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[vol_type['id']], - availability_zone='nova', - source_group_id=grp.id, - status=fields.GroupStatus.CREATING) - - vol2 = utils.create_volume( - self.ctxt, - availability_zone=grp.availability_zone, - volume_type_id=vol_type['id'], - group_id=grp2.id, - source_volid=vol.id) - - self.group_api._create_group_from_source_group(self.ctxt, grp2, - grp.id) - - mock_volume_api_create.assert_called_once_with( - self.ctxt, 1, None, None, - availability_zone=grp.availability_zone, - source_group=grp, - group=grp2, - source_volume=vol, - volume_type=vol_type) - - mock_rpc_create_group_from_src.assert_called_once_with( - self.ctxt, grp2, None, grp) - - vol2.destroy() - grp2.destroy() - vol.destroy() - grp.destroy() - - @mock.patch('cinder.group.api.API._create_group_from_group_snapshot') - @mock.patch('cinder.group.api.API._create_group_from_source_group') - @mock.patch('cinder.group.api.API.update_quota') - @mock.patch('cinder.objects.GroupSnapshot.get_by_id') - @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') - @mock.patch('cinder.group.api.check_policy') - def test_create_from_src(self, mock_policy, mock_snap_get_all, - mock_group_snap_get, mock_update_quota, - mock_create_from_group, - mock_create_from_snap): - name = "test_group" - description = "this is a test group" - grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', - name=name, description=description, - status=fields.GroupStatus.AVAILABLE,) - - vol1 = utils.create_volume( - self.ctxt, - availability_zone='nova', - volume_type_id=fake.VOLUME_TYPE_ID, - group_id=grp.id) - - snap = utils.create_snapshot(self.ctxt, vol1.id, - volume_type_id=fake.VOLUME_TYPE_ID, - status=fields.SnapshotStatus.AVAILABLE) - mock_snap_get_all.return_value = [snap] - - grp_snap = utils.create_group_snapshot( - self.ctxt, grp.id, - group_type_id=fake.GROUP_TYPE_ID, - status=fields.GroupStatus.AVAILABLE) - mock_group_snap_get.return_value = grp_snap - - grp2 = utils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID], - availability_zone='nova', - name=name, description=description, - status=fields.GroupStatus.CREATING, - group_snapshot_id=grp_snap.id) - - with mock.patch('cinder.objects.Group') as mock_group: - mock_group.return_value = grp2 - with mock.patch('cinder.objects.group.Group.create'): - ret_group = self.group_api.create_from_src( - self.ctxt, name, description, - group_snapshot_id=grp_snap.id, - source_group_id=None) - self.assertEqual(grp2.obj_to_primitive(), - ret_group.obj_to_primitive()) - mock_create_from_snap.assert_called_once_with( - self.ctxt, grp2, grp_snap.id) - - snap.destroy() - grp_snap.destroy() - vol1.destroy() - grp.destroy() - grp2.destroy() - - @mock.patch('oslo_utils.timeutils.utcnow') - @mock.patch('cinder.objects.GroupSnapshot') - @mock.patch('cinder.group.api.check_policy') - def test_reset_group_snapshot_status(self, mock_policy, - mock_group_snapshot, - mock_time_util): - mock_time_util.return_value = "time_now" - self.group_api.reset_group_snapshot_status( - self.ctxt, mock_group_snapshot, fields.GroupSnapshotStatus.ERROR) - - update_field = {'updated_at': "time_now", - 'status': fields.GroupSnapshotStatus.ERROR} - mock_group_snapshot.update.assert_called_once_with(update_field) - mock_group_snapshot.save.assert_called_once_with() - mock_policy.assert_called_once_with(self.ctxt, - 'reset_group_snapshot_status') - - def test_create_group_from_src_frozen(self): - service = utils.create_service(self.ctxt, {'frozen': True}) - group = utils.create_group(self.ctxt, host=service.host, - group_type_id='gt') - group_api = cinder.group.api.API() - self.assertRaises(exception.InvalidInput, - group_api.create_from_src, - self.ctxt, 'group', 'desc', - group_snapshot_id=None, source_group_id=group.id) - - def test_delete_group_frozen(self): - service = utils.create_service(self.ctxt, {'frozen': True}) - group = utils.create_group(self.ctxt, host=service.host, - group_type_id='gt') - group_api = cinder.group.api.API() - self.assertRaises(exception.InvalidInput, - group_api.delete, self.ctxt, group) - - def test_create_group_snapshot_frozen(self): - service = utils.create_service(self.ctxt, {'frozen': True}) - group = utils.create_group(self.ctxt, host=service.host, - group_type_id='gt') - group_api = cinder.group.api.API() - self.assertRaises(exception.InvalidInput, - group_api.create_group_snapshot, - self.ctxt, group, 'group_snapshot', 'desc') - - def test_delete_group_snapshot_frozen(self): - service = utils.create_service(self.ctxt, {'frozen': True}) - group = utils.create_group(self.ctxt, host=service.host, - group_type_id='gt') - gsnap = utils.create_group_snapshot(self.ctxt, group.id) - group_api = cinder.group.api.API() - self.assertRaises(exception.InvalidInput, - group_api.delete_group_snapshot, - self.ctxt, gsnap) diff --git a/cinder/tests/unit/group/test_groups_manager.py b/cinder/tests/unit/group/test_groups_manager.py deleted file mode 100644 index 5b5d659f6..000000000 --- a/cinder/tests/unit/group/test_groups_manager.py +++ /dev/null @@ -1,975 +0,0 @@ -# Copyright (C) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from oslo_utils import importutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import test -from cinder.tests.unit import conf_fixture -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_group_snapshot -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils -from cinder.volume import api as volume_api -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume import utils as volutils - -GROUP_QUOTAS = quota.GROUP_QUOTAS -CONF = cfg.CONF - - -@ddt.ddt -class GroupManagerTestCase(test.TestCase): - - def setUp(self): - super(GroupManagerTestCase, self).setUp() - self.volume = importutils.import_object(CONF.volume_manager) - self.configuration = mock.Mock(conf.Configuration) - self.context = context.get_admin_context() - self.context.user_id = fake.USER_ID - self.project_id = fake.PROJECT3_ID - self.context.project_id = self.project_id - self.volume.driver.set_initialized() - self.volume.stats = {'allocated_capacity_gb': 0, - 'pools': {}} - self.volume_api = volume_api.API() - - def test_delete_volume_in_group(self): - """Test deleting a volume that's tied to a group fails.""" - volume_params = {'status': 'available', - 'group_id': fake.GROUP_ID} - volume = tests_utils.create_volume(self.context, **volume_params) - self.assertRaises(exception.InvalidVolume, - self.volume_api.delete, self.context, volume) - - @mock.patch.object(GROUP_QUOTAS, "reserve", - return_value=["RESERVATION"]) - @mock.patch.object(GROUP_QUOTAS, "commit") - @mock.patch.object(GROUP_QUOTAS, "rollback") - @mock.patch.object(driver.VolumeDriver, - "delete_group", - return_value=({'status': ( - fields.GroupStatus.DELETED)}, [])) - def test_create_delete_group(self, fake_delete_grp, - fake_rollback, - fake_commit, fake_reserve): - """Test group can be created and deleted.""" - - def fake_driver_create_grp(context, group): - """Make sure that the pool is part of the host.""" - self.assertIn('host', group) - host = group.host - pool = volutils.extract_host(host, level='pool') - self.assertEqual('fakepool', pool) - return {'status': fields.GroupStatus.AVAILABLE} - - self.mock_object(self.volume.driver, 'create_group', - fake_driver_create_grp) - - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - host='fakehost@fakedrv#fakepool', - group_type_id=fake.GROUP_TYPE_ID) - group = objects.Group.get_by_id(self.context, group.id) - self.assertEqual(0, len(self.notifier.notifications), - self.notifier.notifications) - self.volume.create_group(self.context, group) - self.assertEqual(2, len(self.notifier.notifications), - self.notifier.notifications) - msg = self.notifier.notifications[0] - self.assertEqual('group.create.start', msg['event_type']) - expected = { - 'status': fields.GroupStatus.AVAILABLE, - 'name': 'test_group', - 'availability_zone': 'nova', - 'tenant_id': self.context.project_id, - 'created_at': mock.ANY, - 'user_id': fake.USER_ID, - 'group_id': group.id, - 'group_type': fake.GROUP_TYPE_ID - } - self.assertDictEqual(expected, msg['payload']) - msg = self.notifier.notifications[1] - self.assertEqual('group.create.end', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - self.assertEqual( - group.id, - objects.Group.get_by_id(context.get_admin_context(), - group.id).id) - - self.volume.delete_group(self.context, group) - grp = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), group.id) - self.assertEqual(fields.GroupStatus.DELETED, grp.status) - self.assertEqual(4, len(self.notifier.notifications), - self.notifier.notifications) - msg = self.notifier.notifications[2] - self.assertEqual('group.delete.start', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - msg = self.notifier.notifications[3] - self.assertEqual('group.delete.end', msg['event_type']) - expected['status'] = fields.GroupStatus.DELETED - self.assertDictEqual(expected, msg['payload']) - self.assertRaises(exception.NotFound, - objects.Group.get_by_id, - self.context, - group.id) - - @ddt.data(('', [], 0, None, True), - ('1,2', ['available', 'in-use'], 2, None, True), - ('1,2,3', ['available', 'in-use', 'error_deleting'], 3, - None, False), - ('1,2', ['wrong_status', 'available'], 0, - exception.InvalidVolume, True), - ('1,2', ['available', exception.VolumeNotFound], - 0, exception.VolumeNotFound, True)) - @ddt.unpack - @mock.patch('cinder.objects.Volume.get_by_id') - def test__collect_volumes_for_group(self, add_volumes, returned, expected, - raise_error, add, mock_get): - side_effect = [] - - class FakeVolume(object): - def __init__(self, status): - self.status = status - self.id = fake.UUID1 - - for value in returned: - if isinstance(value, str): - value = FakeVolume(value) - else: - value = value(volume_id=fake.UUID1) - side_effect.append(value) - mock_get.side_effect = side_effect - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - - with mock.patch.object(self.volume, '_check_is_our_resource', - mock.Mock()) as mock_check: - if raise_error: - self.assertRaises(raise_error, - self.volume._collect_volumes_for_group, - None, group, add_volumes, add) - else: - result = self.volume._collect_volumes_for_group(None, group, - add_volumes, - add=add) - if add: - self.assertEqual(expected, mock_check.call_count) - self.assertEqual(expected, len(result)) - - @ddt.data((False, fake.GROUP_TYPE_ID), - (True, fake.GROUP_TYPE_ID), - (True, fake.GROUP_TYPE2_ID)) - @ddt.unpack - @mock.patch('cinder.volume.group_types.get_default_cgsnapshot_type', - return_value={'id': fake.GROUP_TYPE2_ID}) - @mock.patch.object(GROUP_QUOTAS, "reserve", - return_value=["RESERVATION"]) - @mock.patch.object(GROUP_QUOTAS, "commit") - @mock.patch.object(GROUP_QUOTAS, "rollback") - @mock.patch.object(driver.VolumeDriver, - "create_group", - return_value={'status': 'available'}) - @mock.patch('cinder.volume.manager.VolumeManager._update_group_generic') - @mock.patch.object(driver.VolumeDriver, - 'update_consistencygroup') - @mock.patch.object(driver.VolumeDriver, - "update_group") - def test_update_group(self, raise_error, type_id, - fake_update_grp, fake_update_cg, - fake_generic_update, - fake_create_grp, fake_rollback, - fake_commit, fake_reserve, fake_get_type): - """Test group can be updated.""" - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=type_id, - host=CONF.host) - self.volume.create_group(self.context, group) - - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - volume_type_id=fake.VOLUME_TYPE_ID, - status='available', - host=group.host) - self.volume.create_volume(self.context, volume) - - volume2 = tests_utils.create_volume( - self.context, - group_id=None, - volume_type_id=fake.VOLUME_TYPE_ID, - status='available', - host=group.host) - self.volume.create_volume(self.context, volume) - - driver_result = ({'status': fields.GroupStatus.AVAILABLE}, - [{'id': volume2.id, 'status': 'available'}], - [{'id': volume.id, 'status': 'available'}]) - if raise_error: - fake_update_grp.side_effect = [NotImplementedError] - fake_update_cg.return_value = driver_result - fake_generic_update.return_value = driver_result - else: - fake_update_grp.return_value = driver_result - - with mock.patch.object( - self.volume, '_convert_group_to_cg', - mock.Mock()) as mock_convert, mock.patch.object( - self.volume, - '_remove_consistencygroup_id_from_volumes', - mock.Mock()): - mock_convert.return_value = ('fake_cg', [volume]) - self.volume.update_group(self.context, group, - add_volumes=volume2.id, - remove_volumes=volume.id) - if raise_error: - if type_id == fake.GROUP_TYPE2_ID: - fake_update_cg.assert_called_once_with( - self.context, 'fake_cg', - add_volumes=mock.ANY, - remove_volumes=[volume]) - else: - fake_generic_update.assert_called_once_with( - self.context, group, - add_volumes=mock.ANY, - remove_volumes=mock.ANY) - - grp = objects.Group.get_by_id(self.context, group.id) - expected = { - 'status': fields.GroupStatus.AVAILABLE, - 'name': 'test_group', - 'availability_zone': 'nova', - 'tenant_id': self.context.project_id, - 'created_at': mock.ANY, - 'user_id': fake.USER_ID, - 'group_id': group.id, - 'group_type': type_id - } - self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) - self.assertEqual(10, len(self.notifier.notifications), - self.notifier.notifications) - msg = self.notifier.notifications[6] - self.assertEqual('group.update.start', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - msg = self.notifier.notifications[8] - self.assertEqual('group.update.end', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id) - grpvol_ids = [grpvol['id'] for grpvol in grpvolumes] - # Verify volume is removed. - self.assertNotIn(volume.id, grpvol_ids) - # Verify volume is added. - self.assertIn(volume2.id, grpvol_ids) - - volume3 = tests_utils.create_volume( - self.context, - group_id=None, - host=group.host, - volume_type_id=fake.VOLUME_TYPE_ID, - status='wrong-status') - volume_id3 = volume3['id'] - - volume_get_orig = self.volume.db.volume_get - self.volume.db.volume_get = mock.Mock( - return_value={'status': 'wrong_status', - 'id': volume_id3}) - # Try to add a volume in wrong status - self.assertRaises(exception.InvalidVolume, - self.volume.update_group, - self.context, - group, - add_volumes=volume_id3, - remove_volumes=None) - self.volume.db.volume_get.reset_mock() - self.volume.db.volume_get = volume_get_orig - - @mock.patch('cinder.db.sqlalchemy.api.' - 'volume_glance_metadata_copy_to_volume') - @mock.patch('cinder.db.sqlalchemy.api.' - 'volume_glance_metadata_copy_from_volume_to_volume') - @mock.patch.object(driver.VolumeDriver, - "create_group", - return_value={'status': 'available'}) - @mock.patch.object(driver.VolumeDriver, - "delete_group", - return_value=({'status': 'deleted'}, [])) - @mock.patch.object(driver.VolumeDriver, - "create_group_snapshot", - return_value={'status': 'available'}) - @mock.patch.object(driver.VolumeDriver, - "delete_group_snapshot", - return_value=({'status': 'deleted'}, [])) - @mock.patch.object(driver.VolumeDriver, - "create_group_from_src", - return_value=(None, None)) - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'create_volume_from_snapshot') - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'create_cloned_volume') - def test_create_group_from_src(self, - mock_create_cloned_vol, - mock_create_vol_from_snap, - mock_create_from_src, - mock_delete_grpsnap, - mock_create_grpsnap, - mock_delete_grp, - mock_create_grp, - mock_metadata_copy_volume_to_volume, - mock_metadata_copy_to_volume): - """Test group can be created and deleted.""" - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - status=fields.GroupStatus.AVAILABLE, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - status='available', - multiattach=True, - bootable=True, - host=group.host, - volume_type_id=fake.VOLUME_TYPE_ID, - size=1) - volume_id = volume['id'] - group_snapshot_returns = self._create_group_snapshot(group.id, - [volume_id]) - group_snapshot = group_snapshot_returns[0] - snapshot_id = group_snapshot_returns[1][0]['id'] - - # Create group from source group snapshot. - group2 = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - group_snapshot_id=group_snapshot.id, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - group2 = objects.Group.get_by_id(self.context, group2.id) - volume2 = tests_utils.create_volume( - self.context, - group_id=group2.id, - snapshot_id=snapshot_id, - status='available', - host=group2.host, - volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume2) - self.volume.create_group_from_src( - self.context, group2, group_snapshot=group_snapshot) - grp2 = objects.Group.get_by_id(self.context, group2.id) - expected = { - 'status': fields.GroupStatus.AVAILABLE, - 'name': 'test_group', - 'availability_zone': 'nova', - 'tenant_id': self.context.project_id, - 'created_at': mock.ANY, - 'user_id': fake.USER_ID, - 'group_id': group2.id, - 'group_type': fake.GROUP_TYPE_ID, - } - self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status) - self.assertEqual(group2.id, grp2['id']) - self.assertEqual(group_snapshot.id, grp2['group_snapshot_id']) - self.assertIsNone(grp2['source_group_id']) - - msg = self.notifier.notifications[2] - self.assertEqual('group.create.start', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - msg = self.notifier.notifications[4] - self.assertEqual('group.create.end', msg['event_type']) - self.assertDictEqual(expected, msg['payload']) - - if len(self.notifier.notifications) > 6: - self.assertFalse(self.notifier.notifications[6], - self.notifier.notifications) - self.assertEqual(6, len(self.notifier.notifications), - self.notifier.notifications) - - self.volume.delete_group(self.context, group2) - - if len(self.notifier.notifications) > 9: - self.assertFalse(self.notifier.notifications[10], - self.notifier.notifications) - self.assertEqual(9, len(self.notifier.notifications), - self.notifier.notifications) - - msg = self.notifier.notifications[6] - self.assertEqual('group.delete.start', msg['event_type']) - expected['status'] = fields.GroupStatus.AVAILABLE - self.assertDictEqual(expected, msg['payload']) - msg = self.notifier.notifications[8] - self.assertEqual('group.delete.end', msg['event_type']) - expected['status'] = fields.GroupStatus.DELETED - self.assertDictEqual(expected, msg['payload']) - - grp2 = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), group2.id) - self.assertEqual(fields.GroupStatus.DELETED, grp2.status) - self.assertRaises(exception.NotFound, - objects.Group.get_by_id, - self.context, - group2.id) - - # Create group from source group - group3 = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - source_group_id=group.id, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - volume3 = tests_utils.create_volume( - self.context, - group_id=group3.id, - source_volid=volume_id, - status='available', - host=group3.host, - volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume3) - self.volume.create_group_from_src( - self.context, group3, source_group=group) - - grp3 = objects.Group.get_by_id(self.context, group3.id) - vol3 = objects.Volume.get_by_id(self.context, volume3.id) - - self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status) - self.assertEqual(group3.id, grp3.id) - self.assertEqual(group.id, grp3.source_group_id) - self.assertIsNone(grp3.group_snapshot_id) - self.assertEqual(volume.multiattach, vol3.multiattach) - self.assertEqual(volume.bootable, vol3.bootable) - - self.volume.delete_group_snapshot(self.context, group_snapshot) - self.volume.delete_group(self.context, group) - - def test_sort_snapshots(self): - vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1', - 'snapshot_id': fake.SNAPSHOT_ID, - 'group_id': fake.GROUP_ID} - vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2', - 'snapshot_id': fake.SNAPSHOT2_ID, - 'group_id': fake.GROUP_ID} - vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3', - 'snapshot_id': fake.SNAPSHOT3_ID, - 'group_id': fake.GROUP_ID} - snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', - 'group_snapshot_id': fake.GROUP_ID} - snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', - 'group_snapshot_id': fake.GROUP_ID} - snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3', - 'group_snapshot_id': fake.GROUP_ID} - snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) - snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) - snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3) - volumes = [] - snapshots = [] - volumes.append(vol1) - volumes.append(vol2) - volumes.append(vol3) - snapshots.append(snp2_obj) - snapshots.append(snp3_obj) - snapshots.append(snp1_obj) - i = 0 - for vol in volumes: - snap = snapshots[i] - i += 1 - self.assertNotEqual(vol['snapshot_id'], snap.id) - sorted_snaps = self.volume._sort_snapshots(volumes, snapshots) - i = 0 - for vol in volumes: - snap = sorted_snaps[i] - i += 1 - self.assertEqual(vol['snapshot_id'], snap.id) - - snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID - self.assertRaises(exception.SnapshotNotFound, - self.volume._sort_snapshots, - volumes, snapshots) - - self.assertRaises(exception.InvalidInput, - self.volume._sort_snapshots, - volumes, []) - - def test_sort_source_vols(self): - vol1 = {'id': '1', 'name': 'volume 1', - 'source_volid': '1', - 'group_id': '2'} - vol2 = {'id': '2', 'name': 'volume 2', - 'source_volid': '2', - 'group_id': '2'} - vol3 = {'id': '3', 'name': 'volume 3', - 'source_volid': '3', - 'group_id': '2'} - src_vol1 = {'id': '1', 'name': 'source vol 1', - 'group_id': '1'} - src_vol2 = {'id': '2', 'name': 'source vol 2', - 'group_id': '1'} - src_vol3 = {'id': '3', 'name': 'source vol 3', - 'group_id': '1'} - volumes = [] - src_vols = [] - volumes.append(vol1) - volumes.append(vol2) - volumes.append(vol3) - src_vols.append(src_vol2) - src_vols.append(src_vol3) - src_vols.append(src_vol1) - i = 0 - for vol in volumes: - src_vol = src_vols[i] - i += 1 - self.assertNotEqual(vol['source_volid'], src_vol['id']) - sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols) - i = 0 - for vol in volumes: - src_vol = sorted_src_vols[i] - i += 1 - self.assertEqual(vol['source_volid'], src_vol['id']) - - src_vols[2]['id'] = '9999' - self.assertRaises(exception.VolumeNotFound, - self.volume._sort_source_vols, - volumes, src_vols) - - self.assertRaises(exception.InvalidInput, - self.volume._sort_source_vols, - volumes, []) - - def _create_group_snapshot(self, group_id, volume_ids, size='0'): - """Create a group_snapshot object.""" - grpsnap = objects.GroupSnapshot(self.context) - grpsnap.user_id = fake.USER_ID - grpsnap.project_id = fake.PROJECT_ID - grpsnap.group_id = group_id - grpsnap.status = fields.GroupStatus.CREATING - grpsnap.create() - - # Create snapshot list - for volume_id in volume_ids: - snaps = [] - snap = objects.Snapshot(context.get_admin_context()) - snap.volume_size = size - snap.user_id = fake.USER_ID - snap.project_id = fake.PROJECT_ID - snap.volume_id = volume_id - snap.status = fields.SnapshotStatus.AVAILABLE - snap.group_snapshot_id = grpsnap.id - snap.create() - snaps.append(snap) - - return grpsnap, snaps - - @ddt.data((CONF.host, None), (CONF.host + 'fake', 'mycluster')) - @ddt.unpack - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - @mock.patch('cinder.volume.driver.VolumeDriver.create_group', - autospec=True, - return_value={'status': 'available'}) - @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', - autospec=True, - return_value=({'status': 'deleted'}, [])) - @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', - autospec=True, - return_value=({'status': 'available'}, [])) - @mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot', - autospec=True, - return_value=({'status': 'deleted'}, [])) - def test_create_delete_group_snapshot(self, host, cluster, - mock_del_grpsnap, - mock_create_grpsnap, - mock_del_grp, - _mock_create_grp, - mock_notify): - """Test group_snapshot can be created and deleted.""" - self.volume.cluster = cluster - group = tests_utils.create_group( - self.context, - cluster_name=cluster, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=host) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - host=group.host, - cluster_name=group.cluster_name, - volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'])) - - group_snapshot_returns = self._create_group_snapshot(group.id, - [volume.id]) - group_snapshot = group_snapshot_returns[0] - self.volume.create_group_snapshot(self.context, group_snapshot) - self.assertEqual(group_snapshot.id, - objects.GroupSnapshot.get_by_id( - context.get_admin_context(), - group_snapshot.id).id) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'], - ['INFO', 'group_snapshot.create.start'], - ['INFO', 'snapshot.create.start'], - ['INFO', 'group_snapshot.create.end'], - ['INFO', 'snapshot.create.end'])) - - self.volume.delete_group_snapshot(self.context, group_snapshot) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'], - ['INFO', 'group_snapshot.create.start'], - ['INFO', 'snapshot.create.start'], - ['INFO', 'group_snapshot.create.end'], - ['INFO', 'snapshot.create.end'], - ['INFO', 'group_snapshot.delete.start'], - ['INFO', 'snapshot.delete.start'], - ['INFO', 'group_snapshot.delete.end'], - ['INFO', 'snapshot.delete.end'])) - - grpsnap = objects.GroupSnapshot.get_by_id( - context.get_admin_context(read_deleted='yes'), - group_snapshot.id) - self.assertEqual('deleted', grpsnap.status) - self.assertRaises(exception.NotFound, - objects.GroupSnapshot.get_by_id, - self.context, - group_snapshot.id) - - self.volume.delete_group(self.context, group) - - self.assertTrue(mock_create_grpsnap.called) - self.assertTrue(mock_del_grpsnap.called) - self.assertTrue(mock_del_grp.called) - - @mock.patch('cinder.volume.driver.VolumeDriver.create_group', - return_value={'status': 'available'}) - @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', - return_value=({'status': 'deleted'}, [])) - def test_delete_group_correct_host(self, - mock_del_grp, - _mock_create_grp): - """Test group can be deleted. - - Test group can be deleted when volumes are on - the correct volume node. - """ - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - host='host1@backend1#pool1', - status='creating', - volume_type_id=fake.VOLUME_TYPE_ID, - size=1) - self.volume.host = 'host1@backend1' - self.volume.create_volume(self.context, volume) - - self.volume.delete_group(self.context, group) - grp = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - group.id) - self.assertEqual(fields.GroupStatus.DELETED, grp.status) - self.assertRaises(exception.NotFound, - objects.Group.get_by_id, - self.context, - group.id) - - self.assertTrue(mock_del_grp.called) - - @mock.patch('cinder.volume.driver.VolumeDriver.create_group', - mock.Mock(return_value={'status': 'available'})) - @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', - return_value=({'status': 'deleted'}, [])) - def test_delete_group_cluster(self, mock_del_grp): - """Test group can be deleted on another service in the cluster.""" - cluster_name = 'cluster@backend1' - self.volume.host = 'host2@backend1' - self.volume.cluster = cluster_name - group = tests_utils.create_group( - self.context, - host=CONF.host + 'fake', - cluster_name=cluster_name, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - host='host1@backend1#pool1', - cluster_name=cluster_name, - status='creating', - volume_type_id=fake.VOLUME_TYPE_ID, - size=1) - self.volume.host = 'host2@backend1' - self.volume.create_volume(self.context, volume) - - self.volume.delete_group(self.context, group) - grp = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), - group.id) - self.assertEqual(fields.GroupStatus.DELETED, grp.status) - self.assertRaises(exception.NotFound, - objects.Group.get_by_id, - self.context, - group.id) - - self.assertTrue(mock_del_grp.called) - - @mock.patch('cinder.volume.driver.VolumeDriver.create_group', - return_value={'status': 'available'}) - def test_delete_group_wrong_host(self, *_mock_create_grp): - """Test group cannot be deleted. - - Test group cannot be deleted when volumes in the - group are not local to the volume node. - """ - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - host='host1@backend1#pool1', - status='creating', - volume_type_id=fake.VOLUME_TYPE_ID, - size=1) - self.volume.host = 'host1@backend2' - self.volume.create_volume(self.context, volume) - - self.assertRaises(exception.Invalid, - self.volume.delete_group, - self.context, - group) - grp = objects.Group.get_by_id(self.context, group.id) - # Group is not deleted - self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) - - def test_create_volume_with_group_invalid_type(self): - """Test volume creation with group & invalid volume type.""" - vol_type = db.volume_type_create( - context.get_admin_context(), - dict(name=conf_fixture.def_vol_type, extra_specs={}) - ) - db_vol_type = db.volume_type_get(context.get_admin_context(), - vol_type.id) - - grp = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - status=fields.GroupStatus.AVAILABLE, - volume_type_ids=[db_vol_type['id']], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - - fake_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='fake') - - # Volume type must be provided when creating a volume in a - # group. - self.assertRaises(exception.InvalidInput, - self.volume_api.create, - self.context, 1, 'vol1', 'volume 1', - group=grp) - - # Volume type must be valid. - self.assertRaises(exception.InvalidInput, - self.volume_api.create, - self.context, 1, 'vol1', 'volume 1', - volume_type=fake_type, - group=grp) - - @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', - autospec=True, - return_value=({'status': 'available'}, [])) - def test_create_group_snapshot_with_bootable_volumes(self, - mock_create_grpsnap): - """Test group_snapshot can be created and deleted.""" - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - group_type_id=fake.GROUP_TYPE_ID, - host=CONF.host) - volume = tests_utils.create_volume( - self.context, - group_id=group.id, - host=group.host, - volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume) - # Create a bootable volume - bootable_vol_params = {'status': 'creating', 'host': CONF.host, - 'size': 1, 'bootable': True} - bootable_vol = tests_utils.create_volume(self.context, - group_id=group.id, - **bootable_vol_params) - # Create a common volume - self.volume.create_volume(self.context, bootable_vol) - - volume_ids = [volume.id, bootable_vol.id] - group_snapshot_returns = self._create_group_snapshot(group.id, - volume_ids) - group_snapshot = group_snapshot_returns[0] - self.volume.create_group_snapshot(self.context, group_snapshot) - self.assertEqual(group_snapshot.id, - objects.GroupSnapshot.get_by_id( - context.get_admin_context(), - group_snapshot.id).id) - self.assertTrue(mock_create_grpsnap.called) - - @mock.patch( - 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.create_snapshot') - def test_create_group_snapshot_generic(self, mock_create_snap): - grp_snp = {'id': fake.GROUP_SNAPSHOT_ID, 'group_id': fake.GROUP_ID, - 'name': 'group snap 1'} - snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', - 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, - 'volume_id': fake.VOLUME_ID} - snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', - 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, - 'volume_id': fake.VOLUME2_ID} - snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) - snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) - snapshots = [] - snapshots.append(snp1_obj) - snapshots.append(snp2_obj) - - driver_update = {'test_snap_key': 'test_val'} - mock_create_snap.return_value = driver_update - model_update, snapshot_model_updates = ( - self.volume._create_group_snapshot_generic( - self.context, grp_snp, snapshots)) - for update in snapshot_model_updates: - self.assertEqual(driver_update['test_snap_key'], - update['test_snap_key']) - - @mock.patch( - 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' - 'create_volume_from_snapshot') - @mock.patch( - 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' - 'create_cloned_volume') - def test_create_group_from_src_generic(self, mock_create_clone, - mock_create_vol_from_snap): - grp = {'id': fake.GROUP_ID, 'name': 'group 1'} - grp_snp = {'id': fake.GROUP_SNAPSHOT_ID, 'group_id': fake.GROUP_ID, - 'name': 'group snap 1'} - grp2 = {'id': fake.GROUP2_ID, 'name': 'group 2', - 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID} - vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1', - 'group_id': fake.GROUP_ID} - vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2', - 'group_id': fake.GROUP_ID} - snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', - 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, - 'volume_id': fake.VOLUME_ID} - snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', - 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, - 'volume_id': fake.VOLUME2_ID} - snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) - snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) - snapshots = [] - snapshots.append(snp1_obj) - snapshots.append(snp2_obj) - vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3', - 'snapshot_id': fake.SNAPSHOT_ID, - 'group_id': fake.GROUP2_ID} - vol4 = {'id': fake.VOLUME4_ID, 'name': 'volume 4', - 'snapshot_id': fake.SNAPSHOT2_ID, - 'group_id': fake.GROUP2_ID} - vol3_obj = fake_volume.fake_volume_obj(self.context, **vol3) - vol4_obj = fake_volume.fake_volume_obj(self.context, **vol4) - vols2 = [] - vols2.append(vol3_obj) - vols2.append(vol4_obj) - grp2_obj = fake_group.fake_group_obj(self.context, **grp2) - grp_snp_obj = fake_group_snapshot.fake_group_snapshot_obj( - self.context, **grp_snp) - - driver_update = {'test_key': 'test_val'} - mock_create_vol_from_snap.return_value = driver_update - model_update, vol_model_updates = ( - self.volume._create_group_from_src_generic( - self.context, grp2_obj, vols2, grp_snp_obj, snapshots)) - for update in vol_model_updates: - self.assertEqual(driver_update['test_key'], - update['test_key']) - - vol1_obj = fake_volume.fake_volume_obj(self.context, **vol1) - vol2_obj = fake_volume.fake_volume_obj(self.context, **vol2) - vols = [] - vols.append(vol1_obj) - vols.append(vol2_obj) - grp_obj = fake_group.fake_group_obj(self.context, **grp) - - grp3 = {'id': fake.GROUP3_ID, 'name': 'group 3', - 'source_group_id': fake.GROUP_ID} - grp3_obj = fake_group.fake_group_obj(self.context, **grp3) - vol5 = {'id': fake.VOLUME5_ID, 'name': 'volume 5', - 'source_volid': fake.VOLUME_ID, - 'group_id': fake.GROUP3_ID} - vol6 = {'id': fake.VOLUME6_ID, 'name': 'volume 6', - 'source_volid': fake.VOLUME2_ID, - 'group_id': fake.GROUP3_ID} - vol5_obj = fake_volume.fake_volume_obj(self.context, **vol5) - vol6_obj = fake_volume.fake_volume_obj(self.context, **vol6) - vols3 = [] - vols3.append(vol5_obj) - vols3.append(vol6_obj) - - driver_update = {'test_key2': 'test_val2'} - mock_create_clone.return_value = driver_update - model_update, vol_model_updates = ( - self.volume._create_group_from_src_generic( - self.context, grp3_obj, vols3, None, None, grp_obj, vols)) - for update in vol_model_updates: - self.assertEqual(driver_update['test_key2'], - update['test_key2']) diff --git a/cinder/tests/unit/group/test_groups_manager_replication.py b/cinder/tests/unit/group/test_groups_manager_replication.py deleted file mode 100644 index cf39d8b1d..000000000 --- a/cinder/tests/unit/group/test_groups_manager_replication.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_config import cfg -from oslo_utils import importutils - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as tests_utils -from cinder.volume import api as volume_api -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume import utils as volutils - -GROUP_QUOTAS = quota.GROUP_QUOTAS -CONF = cfg.CONF - - -@ddt.ddt -class GroupManagerTestCase(test.TestCase): - - def setUp(self): - super(GroupManagerTestCase, self).setUp() - self.volume = importutils.import_object(CONF.volume_manager) - self.configuration = mock.Mock(conf.Configuration) - self.context = context.get_admin_context() - self.context.user_id = fake.USER_ID - self.project_id = fake.PROJECT3_ID - self.context.project_id = self.project_id - self.volume.driver.set_initialized() - self.volume.stats = {'allocated_capacity_gb': 0, - 'pools': {}} - self.volume_api = volume_api.API() - - @mock.patch.object(GROUP_QUOTAS, "reserve", - return_value=["RESERVATION"]) - @mock.patch.object(GROUP_QUOTAS, "commit") - @mock.patch.object(GROUP_QUOTAS, "rollback") - @mock.patch.object(driver.VolumeDriver, - "delete_group", - return_value=({'status': ( - fields.GroupStatus.DELETED)}, [])) - @mock.patch.object(driver.VolumeDriver, - "enable_replication", - return_value=(None, [])) - @mock.patch.object(driver.VolumeDriver, - "disable_replication", - return_value=(None, [])) - @mock.patch.object(driver.VolumeDriver, - "failover_replication", - return_value=(None, [])) - def test_replication_group(self, fake_failover_rep, fake_disable_rep, - fake_enable_rep, fake_delete_grp, - fake_rollback, fake_commit, fake_reserve): - """Test enable, disable, and failover replication for group.""" - - def fake_driver_create_grp(context, group): - """Make sure that the pool is part of the host.""" - self.assertIn('host', group) - host = group.host - pool = volutils.extract_host(host, level='pool') - self.assertEqual('fakepool', pool) - return {'status': fields.GroupStatus.AVAILABLE, - 'replication_status': fields.ReplicationStatus.DISABLING} - - self.mock_object(self.volume.driver, 'create_group', - fake_driver_create_grp) - - group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - volume_type_ids=[fake.VOLUME_TYPE_ID], - host='fakehost@fakedrv#fakepool', - group_type_id=fake.GROUP_TYPE_ID) - group = objects.Group.get_by_id(self.context, group.id) - self.volume.create_group(self.context, group) - self.assertEqual( - group.id, - objects.Group.get_by_id(context.get_admin_context(), - group.id).id) - - self.volume.disable_replication(self.context, group) - group = objects.Group.get_by_id( - context.get_admin_context(), group.id) - self.assertEqual(fields.ReplicationStatus.DISABLED, - group.replication_status) - - group.replication_status = fields.ReplicationStatus.ENABLING - group.save() - self.volume.enable_replication(self.context, group) - group = objects.Group.get_by_id( - context.get_admin_context(), group.id) - self.assertEqual(fields.ReplicationStatus.ENABLED, - group.replication_status) - - group.replication_status = fields.ReplicationStatus.FAILING_OVER - group.save() - self.volume.failover_replication(self.context, group) - group = objects.Group.get_by_id( - context.get_admin_context(), group.id) - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - group.replication_status) - - targets = self.volume.list_replication_targets(self.context, group) - self.assertIn('replication_targets', targets) - - self.volume.delete_group(self.context, group) - grp = objects.Group.get_by_id( - context.get_admin_context(read_deleted='yes'), group.id) - self.assertEqual(fields.GroupStatus.DELETED, grp.status) - self.assertRaises(exception.NotFound, - objects.Group.get_by_id, - self.context, - group.id) diff --git a/cinder/tests/unit/image/__init__.py b/cinder/tests/unit/image/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/image/fake.py b/cinder/tests/unit/image/fake.py deleted file mode 100644 index ef439385b..000000000 --- a/cinder/tests/unit/image/fake.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a fake image service.""" - -import copy -import datetime -import mock -import uuid - -from cinder import exception -import cinder.image.glance -from cinder.tests.unit import fake_constants - - -class _FakeImageService(object): - """Mock (fake) image service for unit testing.""" - - def __init__(self): - self.images = {} - # NOTE(justinsb): The OpenStack API can't upload an image? - # So, make sure we've got one.. - timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) - - image1 = {'id': fake_constants.IMAGE_ID, - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'private', - 'protected': False, - 'container_format': 'raw', - 'disk_format': 'raw', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64'}, - 'size': 12345678} - - image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'public', - 'protected': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}, - 'size': 1} - - image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'public', - 'protected': True, - 'container_format': None, - 'disk_format': None, - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}, - 'size': 1000000000000} - - image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'public', - 'protected': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel'}, - 'size': 20000000} - - image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'size': 1024, - 'status': 'active', - 'visibility': 'public', - 'protected': True, - 'container_format': 'ami', - 'disk_format': 'ami', - 'properties': { - 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', - 'ramdisk_id': None}, - 'size': 50000} - - image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', - 'name': 'fakeimage6', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'public', - 'protected': False, - 'container_format': 'ova', - 'disk_format': 'vhd', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64', - 'auto_disk_config': 'False'}, - 'size': 7777777} - - image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', - 'name': 'fakeimage7', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'public', - 'protected': False, - 'container_format': 'ova', - 'disk_format': 'vhd', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64', - 'auto_disk_config': 'True'}, - 'size': 1234000000} - - self.create(None, image1) - self.create(None, image2) - self.create(None, image3) - self.create(None, image4) - self.create(None, image5) - self.create(None, image6) - self.create(None, image7) - self._imagedata = {} - self.temp_images = mock.MagicMock() - super(_FakeImageService, self).__init__() - - # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir - def detail(self, context, **kwargs): - """Return list of detailed image information.""" - return copy.deepcopy(self.images.values()) - - def download(self, context, image_id, data): - self.show(context, image_id) - data.write(self._imagedata.get(image_id, '')) - - def show(self, context, image_id): - """Get data about specified image. - - Returns a dict containing image data for the given opaque image id. - - """ - image = self.images.get(str(image_id)) - if image: - return copy.deepcopy(image) - raise exception.ImageNotFound(image_id=image_id) - - def create(self, context, metadata, data=None): - """Store the image data and return the new image id. - - :raises Duplicate: if the image already exist. - - """ - image_id = str(metadata.get('id', uuid.uuid4())) - metadata['id'] = image_id - if image_id in self.images: - raise exception.Duplicate() - self.images[image_id] = copy.deepcopy(metadata) - if data: - self._imagedata[image_id] = data.read() - return self.images[image_id] - - def update(self, context, image_id, metadata, data=None, - purge_props=False): - """Replace the contents of the given image with the new data. - - :raises ImageNotFound: if the image does not exist. - - """ - if not self.images.get(image_id): - raise exception.ImageNotFound(image_id=image_id) - if purge_props: - self.images[image_id] = copy.deepcopy(metadata) - else: - image = self.images[image_id] - try: - image['properties'].update(metadata.pop('properties')) - except Exception: - pass - image.update(metadata) - return self.images[image_id] - - def delete(self, context, image_id): - """Delete the given image. - - :raises ImageNotFound: if the image does not exist. - - """ - removed = self.images.pop(image_id, None) - if not removed: - raise exception.ImageNotFound(image_id=image_id) - - def get_location(self, context, image_id): - if image_id in self.images: - return 'fake_location' - return None - - def add_location(self, context, image_id, url, metadata): - self.update(context, image_id, {'locations': [{'url': url, - 'metadata': metadata}]}) - return True - - -_fakeImageService = _FakeImageService() - - -def FakeImageService(): - return _fakeImageService - - -def FakeImageService_reset(): - global _fakeImageService - _fakeImageService = _FakeImageService() - - -def mock_image_service(testcase): - testcase.mock_object(cinder.image.glance, 'get_remote_image_service', - lambda x, y: (FakeImageService(), y)) - testcase.mock_object(cinder.image.glance, 'get_default_image_service', - mock.Mock(side_effect=FakeImageService)) diff --git a/cinder/tests/unit/image/test_cache.py b/cinder/tests/unit/image/test_cache.py deleted file mode 100644 index 2e19a4b7b..000000000 --- a/cinder/tests/unit/image/test_cache.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (C) 2015 Pure Storage, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import timedelta -import ddt -import mock - -from oslo_utils import timeutils - -from cinder import context as ctxt -from cinder.db.sqlalchemy import models -from cinder.image import cache as image_cache -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake - - -@ddt.ddt -class ImageVolumeCacheTestCase(test.TestCase): - - def setUp(self): - super(ImageVolumeCacheTestCase, self).setUp() - self.mock_db = mock.Mock() - self.mock_volume_api = mock.Mock() - self.context = ctxt.get_admin_context() - self.volume = models.Volume() - vol_params = {'id': fake.VOLUME_ID, - 'host': 'foo@bar#whatever', - 'cluster_name': 'cluster', - 'size': 0} - self.volume.update(vol_params) - self.volume_ovo = objects.Volume(self.context, **vol_params) - - def _build_cache(self, max_gb=0, max_count=0): - cache = image_cache.ImageVolumeCache(self.mock_db, - self.mock_volume_api, - max_gb, - max_count) - cache.notifier = self.notifier - return cache - - def _build_entry(self, size=10): - entry = { - 'id': 1, - 'host': 'test@foo#bar', - 'cluster_name': 'cluster@foo#bar', - 'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2', - 'image_updated_at': timeutils.utcnow(with_timezone=True), - 'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b', - 'size': size, - 'last_used': timeutils.utcnow(with_timezone=True) - } - return entry - - def test_get_by_image_volume(self): - cache = self._build_cache() - ret = {'id': 1} - volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' - self.mock_db.image_volume_cache_get_by_volume_id.return_value = ret - entry = cache.get_by_image_volume(self.context, volume_id) - self.assertEqual(ret, entry) - - self.mock_db.image_volume_cache_get_by_volume_id.return_value = None - entry = cache.get_by_image_volume(self.context, volume_id) - self.assertIsNone(entry) - - def test_evict(self): - cache = self._build_cache() - entry = self._build_entry() - cache.evict(self.context, entry) - self.mock_db.image_volume_cache_delete.assert_called_once_with( - self.context, - entry['volume_id'] - ) - - msg = self.notifier.notifications[0] - self.assertEqual('image_volume_cache.evict', msg['event_type']) - self.assertEqual('INFO', msg['priority']) - self.assertEqual(entry['host'], msg['payload']['host']) - self.assertEqual(entry['image_id'], msg['payload']['image_id']) - self.assertEqual(1, len(self.notifier.notifications)) - - @ddt.data(True, False) - def test_get_entry(self, clustered): - cache = self._build_cache() - entry = self._build_entry() - image_meta = { - 'is_public': True, - 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', - 'properties': { - 'virtual_size': '1.7' - }, - 'updated_at': entry['image_updated_at'] - } - (self.mock_db. - image_volume_cache_get_and_update_last_used.return_value) = entry - if not clustered: - self.volume_ovo.cluster_name = None - expect = {'host': self.volume.host} - else: - expect = {'cluster_name': self.volume.cluster_name} - found_entry = cache.get_entry(self.context, - self.volume_ovo, - entry['image_id'], - image_meta) - self.assertDictEqual(entry, found_entry) - (self.mock_db. - image_volume_cache_get_and_update_last_used.assert_called_once_with)( - self.context, - entry['image_id'], - **expect - ) - - msg = self.notifier.notifications[0] - self.assertEqual('image_volume_cache.hit', msg['event_type']) - self.assertEqual('INFO', msg['priority']) - self.assertEqual(entry['host'], msg['payload']['host']) - self.assertEqual(entry['image_id'], msg['payload']['image_id']) - self.assertEqual(1, len(self.notifier.notifications)) - - def test_get_entry_not_exists(self): - cache = self._build_cache() - image_meta = { - 'is_public': True, - 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', - 'properties': { - 'virtual_size': '1.7' - }, - 'updated_at': timeutils.utcnow(with_timezone=True) - } - image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' - (self.mock_db. - image_volume_cache_get_and_update_last_used.return_value) = None - - found_entry = cache.get_entry(self.context, - self.volume_ovo, - image_id, - image_meta) - - self.assertIsNone(found_entry) - - msg = self.notifier.notifications[0] - self.assertEqual('image_volume_cache.miss', msg['event_type']) - self.assertEqual('INFO', msg['priority']) - self.assertEqual(self.volume.host, msg['payload']['host']) - self.assertEqual(image_id, msg['payload']['image_id']) - self.assertEqual(1, len(self.notifier.notifications)) - - @mock.patch('cinder.objects.Volume.get_by_id') - def test_get_entry_needs_update(self, mock_volume_by_id): - cache = self._build_cache() - entry = self._build_entry() - image_meta = { - 'is_public': True, - 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', - 'properties': { - 'virtual_size': '1.7' - }, - 'updated_at': entry['image_updated_at'] + timedelta(hours=2) - } - (self.mock_db. - image_volume_cache_get_and_update_last_used.return_value) = entry - - mock_volume = mock.MagicMock() - mock_volume_by_id.return_value = mock_volume - - found_entry = cache.get_entry(self.context, - self.volume_ovo, - entry['image_id'], - image_meta) - - # Expect that the cache entry is not returned and the image-volume - # for it is deleted. - self.assertIsNone(found_entry) - self.mock_volume_api.delete.assert_called_with(self.context, - mock_volume) - msg = self.notifier.notifications[0] - self.assertEqual('image_volume_cache.miss', msg['event_type']) - self.assertEqual('INFO', msg['priority']) - self.assertEqual(self.volume.host, msg['payload']['host']) - self.assertEqual(entry['image_id'], msg['payload']['image_id']) - self.assertEqual(1, len(self.notifier.notifications)) - - def test_create_cache_entry(self): - cache = self._build_cache() - entry = self._build_entry() - image_meta = { - 'updated_at': entry['image_updated_at'] - } - self.mock_db.image_volume_cache_create.return_value = entry - created_entry = cache.create_cache_entry(self.context, - self.volume_ovo, - entry['image_id'], - image_meta) - self.assertEqual(entry, created_entry) - self.mock_db.image_volume_cache_create.assert_called_once_with( - self.context, - self.volume_ovo.host, - self.volume_ovo.cluster_name, - entry['image_id'], - entry['image_updated_at'].replace(tzinfo=None), - self.volume_ovo.id, - self.volume_ovo.size - ) - - def test_ensure_space_unlimited(self): - cache = self._build_cache(max_gb=0, max_count=0) - has_space = cache.ensure_space(self.context, self.volume) - self.assertTrue(has_space) - - self.volume.size = 500 - has_space = cache.ensure_space(self.context, self.volume) - self.assertTrue(has_space) - - def test_ensure_space_no_entries(self): - cache = self._build_cache(max_gb=100, max_count=10) - self.mock_db.image_volume_cache_get_all.return_value = [] - - self.volume_ovo.size = 5 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertTrue(has_space) - - self.volume_ovo.size = 101 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertFalse(has_space) - - def test_ensure_space_need_gb(self): - cache = self._build_cache(max_gb=30, max_count=10) - mock_delete = mock.patch.object(cache, '_delete_image_volume').start() - - entries = [] - entry1 = self._build_entry(size=12) - entries.append(entry1) - entry2 = self._build_entry(size=5) - entries.append(entry2) - entry3 = self._build_entry(size=10) - entries.append(entry3) - self.mock_db.image_volume_cache_get_all.return_value = entries - - self.volume_ovo.size = 15 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertTrue(has_space) - self.assertEqual(2, mock_delete.call_count) - mock_delete.assert_any_call(self.context, entry2) - mock_delete.assert_any_call(self.context, entry3) - self.mock_db.image_volume_cache_get_all.assert_called_with( - self.context, cluster_name=self.volume_ovo.cluster_name) - - def test_ensure_space_need_count(self): - cache = self._build_cache(max_gb=30, max_count=2) - mock_delete = mock.patch.object(cache, '_delete_image_volume').start() - - entries = [] - entry1 = self._build_entry(size=10) - entries.append(entry1) - entry2 = self._build_entry(size=5) - entries.append(entry2) - self.mock_db.image_volume_cache_get_all.return_value = entries - - self.volume_ovo.size = 12 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertTrue(has_space) - self.assertEqual(1, mock_delete.call_count) - mock_delete.assert_any_call(self.context, entry2) - - def test_ensure_space_need_gb_and_count(self): - cache = self._build_cache(max_gb=30, max_count=3) - mock_delete = mock.patch.object(cache, '_delete_image_volume').start() - - entries = [] - entry1 = self._build_entry(size=10) - entries.append(entry1) - entry2 = self._build_entry(size=5) - entries.append(entry2) - entry3 = self._build_entry(size=12) - entries.append(entry3) - self.mock_db.image_volume_cache_get_all.return_value = entries - - self.volume_ovo.size = 16 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertTrue(has_space) - self.assertEqual(2, mock_delete.call_count) - mock_delete.assert_any_call(self.context, entry2) - mock_delete.assert_any_call(self.context, entry3) - - def test_ensure_space_cant_free_enough_gb(self): - cache = self._build_cache(max_gb=30, max_count=10) - mock_delete = mock.patch.object(cache, '_delete_image_volume').start() - - entries = list(self._build_entry(size=25)) - self.mock_db.image_volume_cache_get_all.return_value = entries - - self.volume_ovo.size = 50 - has_space = cache.ensure_space(self.context, self.volume_ovo) - self.assertFalse(has_space) - mock_delete.assert_not_called() diff --git a/cinder/tests/unit/image/test_glance.py b/cinder/tests/unit/image/test_glance.py deleted file mode 100644 index 0724450a6..000000000 --- a/cinder/tests/unit/image/test_glance.py +++ /dev/null @@ -1,954 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import datetime -import itertools - -import ddt -import glanceclient.exc -import mock -from oslo_config import cfg - -from cinder import context -from cinder import exception -from cinder.image import glance -from cinder import test -from cinder.tests.unit.glance import stubs as glance_stubs - - -CONF = cfg.CONF - - -class NullWriter(object): - """Used to test ImageService.get which takes a writer object.""" - - def write(self, *arg, **kwargs): - pass - - -class TestGlanceSerializer(test.TestCase): - def test_serialize(self): - metadata = {'name': 'image1', - 'visibility': 'public', - 'protected': True, - 'foo': 'bar', - 'properties': { - 'prop1': 'propvalue1', - 'mappings': [ - {'device': 'bbb'}, - {'device': 'yyy'}], - 'block_device_mapping': [ - {'device_name': '/dev/fake'}, - {'device_name': '/dev/fake0'}]}} - - converted_expected = { - 'name': 'image1', - 'visibility': 'public', - 'protected': True, - 'foo': 'bar', - 'properties': { - 'prop1': 'propvalue1', - 'mappings': - '[{"device": "bbb"}, ' - '{"device": "yyy"}]', - 'block_device_mapping': - '[{"device_name": "/dev/fake"}, ' - '{"device_name": "/dev/fake0"}]'}} - converted = glance._convert_to_string(metadata) - self.assertEqual(converted_expected, converted) - self.assertEqual(metadata, glance._convert_from_string(converted)) - - -@ddt.ddt -class TestGlanceImageService(test.TestCase): - """Tests the Glance image service. - - At a high level, the translations involved are: - - 1. Glance -> ImageService - This is needed so we can support - multiple ImageServices (Glance, Local, etc) - - 2. ImageService -> API - This is needed so we can support multiple - APIs (OpenStack, EC2) - - """ - NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" - NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" - - class tzinfo(datetime.tzinfo): - @staticmethod - def utcoffset(*args, **kwargs): - return datetime.timedelta() - - NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) - - def setUp(self): - super(TestGlanceImageService, self).setUp() - - client = glance_stubs.StubGlanceClient() - service_catalog = [{u'type': u'image', u'name': u'glance', - u'endpoints': [{ - u'publicURL': u'http://example.com:9292'}]}] - self.service = self._create_image_service(client) - self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.context.service_catalog = service_catalog - self.mock_object(glance.time, 'sleep', return_value=None) - - def _create_image_service(self, client): - def _fake_create_glance_client(context, netloc, use_ssl, version): - return client - - self.mock_object(glance, '_create_glance_client', - _fake_create_glance_client) - - client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) - return glance.GlanceImageService(client=client_wrapper) - - @staticmethod - def _make_fixture(**kwargs): - fixture = {'name': None, - 'properties': {}, - 'status': None, - 'visibility': None, - 'protected': None} - fixture.update(kwargs) - return fixture - - def _make_datetime_fixture(self): - return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, - updated_at=self.NOW_GLANCE_FORMAT, - deleted_at=self.NOW_GLANCE_FORMAT) - - def test_get_api_servers(self): - result = glance.get_api_servers(self.context) - expected = (u'example.com:9292', False) - self.assertEqual(expected, next(result)) - - def test_get_api_servers_not_mounted_at_root_and_ssl(self): - service_catalog = [{u'type': u'image', u'name': u'glance', - u'endpoints': [{ - u'publicURL': u'https://example.com/image'}]}] - self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.context.service_catalog = service_catalog - result = glance.get_api_servers(self.context) - expected = (u'example.com/image', True) - self.assertEqual(expected, next(result)) - - def test_create_with_instance_id(self): - """Ensure instance_id is persisted as an image-property.""" - fixture = {'name': 'test image', - 'is_public': False, - 'protected': False, - 'properties': {'instance_id': '42', 'user_id': 'fake'}} - - image_id = self.service.create(self.context, fixture)['id'] - image_meta = self.service.show(self.context, image_id) - expected = { - 'id': image_id, - 'name': 'test image', - 'protected': False, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'status': None, - 'properties': {'instance_id': '42', 'is_public': False, - 'user_id': 'fake'}, - 'owner': None, - 'visibility': None, - } - self.assertDictEqual(expected, image_meta) - - image_metas = self.service.detail(self.context) - self.assertDictEqual(expected, image_metas[0]) - - def test_create_without_instance_id(self): - """Test Creating images without instance_id. - - Ensure we can create an image without having to specify an - instance_id. Public images are an example of an image not tied to an - instance. - """ - fixture = {'name': 'test image', 'is_public': False, - 'protected': False} - image_id = self.service.create(self.context, fixture)['id'] - - expected = { - 'id': image_id, - 'name': 'test image', - 'protected': False, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'status': None, - 'properties': {'is_public': False}, - 'owner': None, - 'visibility': None, - } - actual = self.service.show(self.context, image_id) - self.assertDictEqual(expected, actual) - - def test_create(self): - fixture = self._make_fixture(name='test image') - num_images = len(self.service.detail(self.context)) - image_id = self.service.create(self.context, fixture)['id'] - - self.assertIsNotNone(image_id) - self.assertEqual(num_images + 1, - len(self.service.detail(self.context))) - - def test_create_and_show_non_existing_image(self): - fixture = self._make_fixture(name='test image') - image_id = self.service.create(self.context, fixture)['id'] - - self.assertIsNotNone(image_id) - self.assertRaises(exception.ImageNotFound, - self.service.show, - self.context, - 'bad image id') - - def test_detail_private_image(self): - fixture = self._make_fixture(name='test image') - fixture['visibility'] = 'private' - fixture['protected'] = False - properties = {'owner_id': 'proj1'} - fixture['properties'] = properties - - self.service.create(self.context, fixture) - - proj = self.context.project_id - self.context.project_id = 'proj1' - - image_metas = self.service.detail(self.context) - - self.context.project_id = proj - - self.assertEqual(1, len(image_metas)) - self.assertEqual('test image', image_metas[0]['name']) - self.assertEqual('private', image_metas[0]['visibility']) - - def test_detail_v1(self): - """Confirm we send is_public = None as default when using Glance v1.""" - self.override_config('glance_api_version', 1) - with mock.patch.object(self.service, '_client') as client_mock: - client_mock.return_value = [] - result = self.service.detail(self.context) - self.assertListEqual([], result) - client_mock.call.assert_called_once_with(self.context, 'list', - filters={'is_public': 'none'}) - - def test_detail_v2(self): - """Check we don't send is_public key by default with Glance v2.""" - self.override_config('glance_api_version', 2) - with mock.patch.object(self.service, '_client') as client_mock: - client_mock.return_value = [] - result = self.service.detail(self.context) - self.assertListEqual([], result) - client_mock.call.assert_called_once_with(self.context, 'list') - - def test_detail_marker(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture(name='TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, marker=ids[1]) - self.assertEqual(8, len(image_metas)) - i = 2 - for meta in image_metas: - expected = { - 'id': ids[i], - 'status': None, - 'protected': None, - 'name': 'TestImage %d' % (i), - 'properties': {'properties': {}}, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'owner': None, - 'visibility': None, - } - - self.assertDictEqual(expected, meta) - i = i + 1 - - def test_detail_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture(name='TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, limit=5) - self.assertEqual(5, len(image_metas)) - - def test_detail_default_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture(name='TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context) - for i, meta in enumerate(image_metas): - self.assertEqual(meta['name'], 'TestImage %d' % (i)) - - def test_detail_marker_and_limit(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture(name='TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - image_metas = self.service.detail(self.context, marker=ids[3], limit=5) - self.assertEqual(5, len(image_metas)) - i = 4 - for meta in image_metas: - expected = { - 'id': ids[i], - 'status': None, - 'protected': None, - 'name': 'TestImage %d' % (i), - 'properties': {'properties': {}}, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'owner': None, - 'visibility': None, - } - self.assertDictEqual(expected, meta) - i = i + 1 - - def test_detail_invalid_marker(self): - fixtures = [] - ids = [] - for i in range(10): - fixture = self._make_fixture(name='TestImage %d' % (i)) - fixtures.append(fixture) - ids.append(self.service.create(self.context, fixture)['id']) - - self.assertRaises(exception.Invalid, self.service.detail, - self.context, marker='invalidmarker') - - def test_update(self): - fixture = self._make_fixture(name='test image') - image = self.service.create(self.context, fixture) - image_id = image['id'] - fixture['name'] = 'new image name' - self.service.update(self.context, image_id, fixture) - - new_image_data = self.service.show(self.context, image_id) - self.assertEqual('new image name', new_image_data['name']) - - def test_update_v2(self): - self.flags(glance_api_version=2) - self.test_update() - - def test_update_with_data(self): - fixture = self._make_fixture(name='test image') - image = self.service.create(self.context, fixture) - image_id = image['id'] - fixture['name'] = 'new image name' - data = '*' * 256 - self.service.update(self.context, image_id, fixture, data=data) - - new_image_data = self.service.show(self.context, image_id) - self.assertEqual(256, new_image_data['size']) - self.assertEqual('new image name', new_image_data['name']) - - def test_update_with_data_v2(self): - self.flags(glance_api_version=2) - self.test_update_with_data() - - @mock.patch.object(glance.GlanceImageService, '_translate_from_glance') - @mock.patch.object(glance.GlanceImageService, 'show') - @ddt.data(1, 2) - def test_update_purge_props(self, ver, show, translate_from_glance): - self.flags(glance_api_version=ver) - - image_id = mock.sentinel.image_id - client = mock.Mock(call=mock.Mock()) - service = glance.GlanceImageService(client=client) - - image_meta = {'properties': {'k1': 'v1'}} - client.call.return_value = {'k1': 'v1'} - if ver == 2: - show.return_value = {'properties': {'k2': 'v2'}} - translate_from_glance.return_value = image_meta.copy() - - ret = service.update(self.context, image_id, image_meta) - self.assertDictEqual(image_meta, ret) - if ver == 2: - client.call.assert_called_once_with( - self.context, 'update', image_id, k1='v1', remove_props=['k2']) - else: - client.call.assert_called_once_with( - self.context, 'update', image_id, properties={'k1': 'v1'}, - purge_props=True) - translate_from_glance.assert_called_once_with(self.context, - {'k1': 'v1'}) - - def test_delete(self): - fixture1 = self._make_fixture(name='test image 1') - fixture2 = self._make_fixture(name='test image 2') - fixtures = [fixture1, fixture2] - - num_images = len(self.service.detail(self.context)) - self.assertEqual(0, num_images) - - ids = [] - for fixture in fixtures: - new_id = self.service.create(self.context, fixture)['id'] - ids.append(new_id) - - num_images = len(self.service.detail(self.context)) - self.assertEqual(2, num_images) - - self.service.delete(self.context, ids[0]) - - num_images = len(self.service.detail(self.context)) - self.assertEqual(1, num_images) - - def test_show_passes_through_to_client(self): - fixture = self._make_fixture(name='image1', is_public=True) - image_id = self.service.create(self.context, fixture)['id'] - - image_meta = self.service.show(self.context, image_id) - expected = { - 'id': image_id, - 'name': 'image1', - 'protected': None, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'status': None, - 'properties': {'is_public': True, 'properties': {}}, - 'owner': None, - 'visibility': None - } - self.assertEqual(expected, image_meta) - - def test_show_raises_when_no_authtoken_in_the_context(self): - fixture = self._make_fixture(name='image1', - is_public=False, - protected=False) - image_id = self.service.create(self.context, fixture)['id'] - self.context.auth_token = False - self.assertRaises(exception.ImageNotFound, - self.service.show, - self.context, - image_id) - - def test_detail_passes_through_to_client(self): - fixture = self._make_fixture(name='image10', is_public=True) - image_id = self.service.create(self.context, fixture)['id'] - image_metas = self.service.detail(self.context) - expected = [ - { - 'id': image_id, - 'name': 'image10', - 'protected': None, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted': None, - 'status': None, - 'properties': {'is_public': True, 'properties': {}}, - 'owner': None, - 'visibility': None - }, - ] - self.assertEqual(expected, image_metas) - - def test_show_makes_datetimes(self): - fixture = self._make_datetime_fixture() - image_id = self.service.create(self.context, fixture)['id'] - image_meta = self.service.show(self.context, image_id) - self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) - self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) - - def test_detail_makes_datetimes(self): - fixture = self._make_datetime_fixture() - self.service.create(self.context, fixture) - image_meta = self.service.detail(self.context)[0] - self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) - self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) - - def test_download_with_retries(self): - tries = [0] - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that fails the first time, then succeeds.""" - def get(self, image_id): - if tries[0] == 0: - tries[0] = 1 - raise glanceclient.exc.ServiceUnavailable('') - else: - return {} - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() - - # When retries are disabled, we should get an exception - self.flags(glance_num_retries=0) - self.assertRaises(exception.GlanceConnectionFailed, - service.download, - self.context, - image_id, - writer) - - # Now lets enable retries. No exception should happen now. - tries = [0] - self.flags(glance_num_retries=1) - service.download(self.context, image_id, writer) - - def test_client_forbidden_converts_to_imagenotauthed(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a Forbidden exception.""" - def get(self, image_id): - raise glanceclient.exc.Forbidden(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() - self.assertRaises(exception.ImageNotAuthorized, service.download, - self.context, image_id, writer) - - def test_client_httpforbidden_converts_to_imagenotauthed(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a HTTPForbidden exception.""" - def get(self, image_id): - raise glanceclient.exc.HTTPForbidden(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() - self.assertRaises(exception.ImageNotAuthorized, service.download, - self.context, image_id, writer) - - def test_client_notfound_converts_to_imagenotfound(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a NotFound exception.""" - def get(self, image_id): - raise glanceclient.exc.NotFound(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() - self.assertRaises(exception.ImageNotFound, service.download, - self.context, image_id, writer) - - def test_client_httpnotfound_converts_to_imagenotfound(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a HTTPNotFound exception.""" - def get(self, image_id): - raise glanceclient.exc.HTTPNotFound(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() - self.assertRaises(exception.ImageNotFound, service.download, - self.context, image_id, writer) - - @mock.patch('six.moves.builtins.open') - @mock.patch('shutil.copyfileobj') - @mock.patch('cinder.image.glance.get_api_servers', - return_value=itertools.cycle([(False, 'localhost:9292')])) - def test_download_from_direct_file(self, api_servers, - mock_copyfileobj, mock_open): - fixture = self._make_fixture(name='test image', - locations=[{'url': 'file:///tmp/test'}]) - image_id = self.service.create(self.context, fixture)['id'] - writer = NullWriter() - self.flags(allowed_direct_url_schemes=['file']) - self.flags(glance_api_version=2) - self.service.download(self.context, image_id, writer) - mock_copyfileobj.assert_called_once_with(mock.ANY, writer) - - @mock.patch('six.moves.builtins.open') - @mock.patch('shutil.copyfileobj') - @mock.patch('cinder.image.glance.get_api_servers', - return_value=itertools.cycle([(False, 'localhost:9292')])) - def test_download_from_direct_file_non_file(self, api_servers, - mock_copyfileobj, mock_open): - fixture = self._make_fixture(name='test image', - direct_url='swift+http://test/image') - image_id = self.service.create(self.context, fixture)['id'] - writer = NullWriter() - self.flags(allowed_direct_url_schemes=['file']) - self.flags(glance_api_version=2) - self.service.download(self.context, image_id, writer) - self.assertIsNone(mock_copyfileobj.call_args) - - def test_glance_client_image_id(self): - fixture = self._make_fixture(name='test image') - image_id = self.service.create(self.context, fixture)['id'] - (_service, same_id) = glance.get_remote_image_service(self.context, - image_id) - self.assertEqual(same_id, image_id) - - def test_glance_client_image_ref(self): - fixture = self._make_fixture(name='test image') - image_id = self.service.create(self.context, fixture)['id'] - image_url = 'http://something-less-likely/%s' % image_id - (service, same_id) = glance.get_remote_image_service(self.context, - image_url) - self.assertEqual(same_id, image_id) - self.assertEqual('something-less-likely', service._client.netloc) - for ipv6_url in ('[::1]', '::1', '[::1]:444'): - image_url = 'http://%s/%s' % (ipv6_url, image_id) - (service, same_id) = glance.get_remote_image_service(self.context, - image_url) - self.assertEqual(same_id, image_id) - self.assertEqual(ipv6_url, service._client.netloc) - - def test_extracting_missing_attributes(self): - """Verify behavior from glance objects that are missing attributes - - This fakes the image class and is missing the checksum and name - attribute as the client would return if they're not set in the - database. Regression test for bug #1308058. - """ - class MyFakeGlanceImage(glance_stubs.FakeImage): - def __init__(self, metadata): - IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', - 'container_format', 'id', 'created_at', - 'updated_at', 'deleted', 'status', - 'min_disk', 'min_ram', 'is_public', - 'visibility', 'protected'] - raw = dict.fromkeys(IMAGE_ATTRIBUTES) - raw.update(metadata) - self.__dict__['raw'] = raw - - metadata = { - 'id': 1, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - } - image = MyFakeGlanceImage(metadata) - actual = glance._extract_attributes(image) - expected = { - 'id': 1, - 'name': None, - 'protected': None, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted_at': None, - 'deleted': None, - 'status': None, - 'properties': {}, - 'owner': None, - 'visibility': None, - } - self.assertEqual(expected, actual) - - @mock.patch('cinder.image.glance.CONF') - def test_v2_passes_visibility_param(self, config): - - config.glance_api_version = 2 - config.glance_num_retries = 0 - - metadata = { - 'id': 1, - 'size': 2, - 'visibility': 'public', - } - - image = glance_stubs.FakeImage(metadata) - client = glance_stubs.StubGlanceClient() - - service = self._create_image_service(client) - service._image_schema = glance_stubs.FakeSchema() - - actual = service._translate_from_glance('fake_context', image) - expected = { - 'id': 1, - 'name': None, - 'visibility': 'public', - 'protected': None, - 'size': 2, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'deleted': None, - 'status': None, - 'properties': {}, - 'owner': None, - 'created_at': None, - 'updated_at': None - } - - self.assertEqual(expected, actual) - - @mock.patch('cinder.image.glance.CONF') - def test_extracting_v2_boot_properties(self, config): - - config.glance_api_version = 2 - config.glance_num_retries = 0 - - metadata = { - 'id': 1, - 'size': 2, - 'min_disk': 2, - 'min_ram': 2, - 'kernel_id': 'foo', - 'ramdisk_id': 'bar', - } - - image = glance_stubs.FakeImage(metadata) - client = glance_stubs.StubGlanceClient() - - service = self._create_image_service(client) - service._image_schema = glance_stubs.FakeSchema() - - actual = service._translate_from_glance('fake_context', image) - expected = { - 'id': 1, - 'name': None, - 'visibility': None, - 'protected': None, - 'size': 2, - 'min_disk': 2, - 'min_ram': 2, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'deleted': None, - 'status': None, - 'properties': {'kernel_id': 'foo', - 'ramdisk_id': 'bar'}, - 'owner': None, - 'created_at': None, - 'updated_at': None - } - - self.assertEqual(expected, actual) - - def test_translate_to_glance(self): - self.flags(glance_api_version=1) - client = glance_stubs.StubGlanceClient() - service = self._create_image_service(client) - - metadata = { - 'id': 1, - 'size': 2, - 'min_disk': 2, - 'min_ram': 2, - 'properties': {'kernel_id': 'foo', - 'ramdisk_id': 'bar', - 'x_billinginfo': '123'}, - } - - actual = service._translate_to_glance(metadata) - expected = metadata - self.assertEqual(expected, actual) - - def test_translate_to_glance_v2(self): - self.flags(glance_api_version=2) - client = glance_stubs.StubGlanceClient() - service = self._create_image_service(client) - - metadata = { - 'id': 1, - 'size': 2, - 'min_disk': 2, - 'min_ram': 2, - 'properties': {'kernel_id': 'foo', - 'ramdisk_id': 'bar', - 'x_billinginfo': '123'}, - } - - actual = service._translate_to_glance(metadata) - expected = { - 'id': 1, - 'size': 2, - 'min_disk': 2, - 'min_ram': 2, - 'kernel_id': 'foo', - 'ramdisk_id': 'bar', - 'x_billinginfo': '123', - } - self.assertEqual(expected, actual) - - -class TestGlanceClientVersion(test.TestCase): - """Tests the version of the glance client generated.""" - - @mock.patch('cinder.image.glance.glanceclient.Client') - def test_glance_version_by_flag(self, _mockglanceclient): - """Test glance version set by flag is honoured.""" - ctx = mock.MagicMock() - glance.GlanceClientWrapper(ctx, 'fake_host', 9292) - self.assertEqual('2', _mockglanceclient.call_args[0][0]) - self.flags(glance_api_version=1) - glance.GlanceClientWrapper(ctx, 'fake_host', 9292) - self.assertEqual('1', _mockglanceclient.call_args[0][0]) - CONF.reset() - - @mock.patch('cinder.image.glance.glanceclient.Client') - def test_glance_version_by_arg(self, _mockglanceclient): - """Test glance version set by arg to GlanceClientWrapper""" - ctx = mock.MagicMock() - glance.GlanceClientWrapper(ctx, 'fake_host', 9292, version=1) - self.assertEqual('1', _mockglanceclient.call_args[0][0]) - glance.GlanceClientWrapper(ctx, 'fake_host', 9292, version=2) - self.assertEqual('2', _mockglanceclient.call_args[0][0]) - - @mock.patch('cinder.image.glance.glanceclient.Client') - @mock.patch('cinder.image.glance.get_api_servers', - return_value=itertools.cycle([(False, 'localhost:9292')])) - def test_call_glance_version_by_arg(self, api_servers, _mockglanceclient): - """Test glance version set by arg to GlanceClientWrapper""" - glance_wrapper = glance.GlanceClientWrapper() - ctx = mock.MagicMock() - glance_wrapper.call(ctx, 'method', version=2) - - self.assertEqual('2', _mockglanceclient.call_args[0][0]) - - @mock.patch('cinder.image.glance.glanceclient.Client') - @mock.patch('cinder.image.glance.get_api_servers', - return_value=itertools.cycle([(False, 'localhost:9292')])) - def test_call_glance_over_quota(self, api_servers, _mockglanceclient): - """Test glance version set by arg to GlanceClientWrapper""" - glance_wrapper = glance.GlanceClientWrapper() - fake_client = mock.Mock() - fake_client.images.method = mock.Mock( - side_effect=glanceclient.exc.HTTPOverLimit) - self.mock_object(glance_wrapper, 'client', fake_client) - self.assertRaises(exception.ImageLimitExceeded, - glance_wrapper.call, 'fake_context', 'method', - version=2) - - -def _create_failing_glance_client(info): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that fails the first time, then succeeds.""" - def get(self, image_id): - info['num_calls'] += 1 - if info['num_calls'] == 1: - raise glanceclient.exc.ServiceUnavailable('') - return {} - - return MyGlanceStubClient() - - -class TestGlanceImageServiceClient(test.TestCase): - - def setUp(self): - super(TestGlanceImageServiceClient, self).setUp() - self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.mock_object(glance.time, 'sleep', return_value=None) - - def test_create_glance_client(self): - self.flags(auth_strategy='keystone') - self.flags(glance_request_timeout=60) - - class MyGlanceStubClient(object): - def __init__(inst, version, *args, **kwargs): - self.assertEqual('2', version) - self.assertEqual("http://fake_host:9292", args[0]) - self.assertTrue(kwargs['token']) - self.assertEqual(60, kwargs['timeout']) - - self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) - client = glance._create_glance_client(self.context, 'fake_host:9292', - False) - self.assertIsInstance(client, MyGlanceStubClient) - - def test_create_glance_client_auth_strategy_is_not_keystone(self): - self.flags(auth_strategy='noauth') - self.flags(glance_request_timeout=60) - - class MyGlanceStubClient(object): - def __init__(inst, version, *args, **kwargs): - self.assertEqual('2', version) - self.assertEqual('http://fake_host:9292', args[0]) - self.assertNotIn('token', kwargs) - self.assertEqual(60, kwargs['timeout']) - - self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) - client = glance._create_glance_client(self.context, 'fake_host:9292', - False) - self.assertIsInstance(client, MyGlanceStubClient) - - def test_create_glance_client_glance_request_default_timeout(self): - self.flags(auth_strategy='keystone') - self.flags(glance_request_timeout=None) - - class MyGlanceStubClient(object): - def __init__(inst, version, *args, **kwargs): - self.assertEqual("2", version) - self.assertEqual("http://fake_host:9292", args[0]) - self.assertTrue(kwargs['token']) - self.assertNotIn('timeout', kwargs) - - self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) - client = glance._create_glance_client(self.context, 'fake_host:9292', - False) - self.assertIsInstance(client, MyGlanceStubClient) diff --git a/cinder/tests/unit/keymgr/__init__.py b/cinder/tests/unit/keymgr/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/keymgr/fake.py b/cinder/tests/unit/keymgr/fake.py deleted file mode 100644 index 000add23b..000000000 --- a/cinder/tests/unit/keymgr/fake.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of a fake key manager.""" - - -from castellan.tests.unit.key_manager import mock_key_manager - - -def fake_api(configuration=None): - return mock_key_manager.MockKeyManager(configuration) diff --git a/cinder/tests/unit/keymgr/test_conf_key_mgr.py b/cinder/tests/unit/keymgr/test_conf_key_mgr.py deleted file mode 100644 index f9669940b..000000000 --- a/cinder/tests/unit/keymgr/test_conf_key_mgr.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test cases for the conf key manager. -""" - -import binascii - -from castellan.common.objects import symmetric_key as key -from oslo_config import cfg - -from cinder import context -from cinder import exception -from cinder.keymgr import conf_key_mgr -from cinder import test - -CONF = cfg.CONF -CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager') - - -class ConfKeyManagerTestCase(test.TestCase): - def __init__(self, *args, **kwargs): - super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) - - self._hex_key = '1' * 64 - - def _create_key_manager(self): - CONF.set_default('fixed_key', default=self._hex_key, - group='key_manager') - return conf_key_mgr.ConfKeyManager(CONF) - - def setUp(self): - super(ConfKeyManagerTestCase, self).setUp() - self.key_mgr = self._create_key_manager() - - self.ctxt = context.RequestContext('fake', 'fake') - - self.key_id = '00000000-0000-0000-0000-000000000000' - encoded = bytes(binascii.unhexlify(self._hex_key)) - self.key = key.SymmetricKey('AES', len(encoded) * 8, encoded) - - def test___init__(self): - self.assertEqual(self.key_id, self.key_mgr.key_id) - - def test_create_key(self): - key_id_1 = self.key_mgr.create_key(self.ctxt) - key_id_2 = self.key_mgr.create_key(self.ctxt) - # ensure that the UUIDs are the same - self.assertEqual(key_id_1, key_id_2) - - def test_create_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.create_key, None) - - def test_create_key_pair(self): - self.assertRaises(NotImplementedError, - self.key_mgr.create_key_pair, self.ctxt) - - def test_create_key_pair_null_context(self): - self.assertRaises(NotImplementedError, - self.key_mgr.create_key_pair, None) - - def test_store_key(self): - key_id = self.key_mgr.store(self.ctxt, self.key) - - actual_key = self.key_mgr.get(self.ctxt, key_id) - self.assertEqual(self.key, actual_key) - - def test_store_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.store, None, self.key) - - def test_store_key_invalid(self): - encoded = bytes(binascii.unhexlify('0' * 64)) - inverse_key = key.SymmetricKey('AES', len(encoded) * 8, encoded) - - self.assertRaises(exception.KeyManagerError, - self.key_mgr.store, self.ctxt, inverse_key) - - def test_delete_key(self): - key_id = self.key_mgr.create_key(self.ctxt) - self.key_mgr.delete(self.ctxt, key_id) - - # cannot delete key -- might have lingering references - self.assertEqual(self.key, - self.key_mgr.get(self.ctxt, self.key_id)) - - def test_delete_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.delete, None, None) - - def test_delete_unknown_key(self): - self.assertRaises(exception.KeyManagerError, - self.key_mgr.delete, self.ctxt, None) - - def test_get_key(self): - self.assertEqual(self.key, - self.key_mgr.get(self.ctxt, self.key_id)) - - def test_get_null_context(self): - self.assertRaises(exception.NotAuthorized, - self.key_mgr.get, None, None) - - def test_get_unknown_key(self): - self.assertRaises(KeyError, self.key_mgr.get, self.ctxt, None) diff --git a/cinder/tests/unit/keymgr/test_init.py b/cinder/tests/unit/keymgr/test_init.py deleted file mode 100644 index b74c91dc7..000000000 --- a/cinder/tests/unit/keymgr/test_init.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import castellan -from castellan import key_manager -from castellan import options as castellan_opts - -from oslo_config import cfg - -from cinder import keymgr -from cinder import test - - -class InitTestCase(test.TestCase): - def setUp(self): - super(InitTestCase, self).setUp() - self.config = cfg.ConfigOpts() - castellan_opts.set_defaults(self.config) - self.config.set_default('api_class', - 'cinder.keymgr.conf_key_mgr.ConfKeyManager', - group='key_manager') - - def test_blank_config(self): - kmgr = keymgr.API(self.config) - self.assertEqual(type(kmgr), keymgr.conf_key_mgr.ConfKeyManager) - - def test_set_barbican_key_manager(self): - self.config.set_override( - 'api_class', - 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager', - group='key_manager') - kmgr = keymgr.API(self.config) - self.assertEqual( - type(kmgr), - key_manager.barbican_key_manager.BarbicanKeyManager) - - def test_set_mock_key_manager(self): - self.config.set_override( - 'api_class', - 'castellan.tests.unit.key_manager.mock_key_manager.MockKeyManager', - group='key_manager') - kmgr = keymgr.API(self.config) - self.assertEqual( - type(kmgr), - castellan.tests.unit.key_manager.mock_key_manager.MockKeyManager) - - def test_set_conf_key_manager(self): - self.config.set_override( - 'api_class', - 'cinder.keymgr.conf_key_mgr.ConfKeyManager', - group='key_manager') - kmgr = keymgr.API(self.config) - self.assertEqual(type(kmgr), keymgr.conf_key_mgr.ConfKeyManager) - - def test_deprecated_barbican_key_manager(self): - self.config.set_override( - 'api_class', - 'cinder.keymgr.barbican.BarbicanKeyManager', - group='key_manager') - kmgr = keymgr.API(self.config) - self.assertEqual( - type(kmgr), - key_manager.barbican_key_manager.BarbicanKeyManager) - - def test_deprecated_mock_key_manager(self): - self.config.set_override( - 'api_class', - 'cinder.tests.unit.keymgr.mock_key_mgr.MockKeyManager', - group='key_manager') - kmgr = keymgr.API(self.config) - self.assertEqual( - type(kmgr), - castellan.tests.unit.key_manager.mock_key_manager.MockKeyManager) diff --git a/cinder/tests/unit/message/__init__.py b/cinder/tests/unit/message/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/message/test_api.py b/cinder/tests/unit/message/test_api.py deleted file mode 100644 index 1233a3328..000000000 --- a/cinder/tests/unit/message/test_api.py +++ /dev/null @@ -1,275 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -import mock -from oslo_config import cfg -from oslo_utils import timeutils - -from cinder.api import extensions -from cinder.api.openstack import api_version_request as api_version -from cinder.api.v3 import messages -from cinder import context -from cinder.message import api as message_api -from cinder.message import message_field -from cinder import test -from cinder.tests.unit.api import fakes -import cinder.tests.unit.fake_constants as fake_constants -from cinder.tests.unit import utils - -CONF = cfg.CONF - -version_header_name = 'OpenStack-API-Version' - - -class MessageApiTest(test.TestCase): - def setUp(self): - super(MessageApiTest, self).setUp() - self.message_api = message_api.API() - self.mock_object(self.message_api, 'db') - self.ctxt = context.RequestContext('admin', 'fakeproject', True) - self.ctxt.request_id = 'fakerequestid' - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = messages.MessagesController(self.ext_mgr) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create(self, mock_utcnow): - CONF.set_override('message_ttl', 300) - mock_utcnow.return_value = datetime.datetime.utcnow() - expected_expires_at = timeutils.utcnow() + datetime.timedelta( - seconds=300) - expected_message_record = { - 'project_id': 'fakeproject', - 'request_id': 'fakerequestid', - 'resource_type': 'fake_resource_type', - 'resource_uuid': None, - 'action_id': - message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], - 'detail_id': message_field.Detail.UNKNOWN_ERROR[0], - 'message_level': 'ERROR', - 'expires_at': expected_expires_at, - 'event_id': "VOLUME_fake_resource_type_001_001", - } - self.message_api.create(self.ctxt, - message_field.Action.SCHEDULE_ALLOCATE_VOLUME, - detail=message_field.Detail.UNKNOWN_ERROR, - resource_type="fake_resource_type") - - self.message_api.db.message_create.assert_called_once_with( - self.ctxt, expected_message_record) - mock_utcnow.assert_called_with() - - def test_create_swallows_exception(self): - self.mock_object(self.message_api.db, 'create', - side_effect=Exception()) - self.message_api.create(self.ctxt, - message_field.Action.ATTACH_VOLUME, - "fake_resource") - - self.message_api.db.message_create.assert_called_once_with( - self.ctxt, mock.ANY) - - def test_get(self): - self.message_api.get(self.ctxt, 'fake_id') - - self.message_api.db.message_get.assert_called_once_with(self.ctxt, - 'fake_id') - - def test_get_all(self): - self.message_api.get_all(self.ctxt) - - self.message_api.db.message_get_all.assert_called_once_with( - self.ctxt, filters={}, limit=None, marker=None, offset=None, - sort_dirs=None, sort_keys=None) - - def test_delete(self): - admin_context = mock.Mock() - self.mock_object(self.ctxt, 'elevated', return_value=admin_context) - - self.message_api.delete(self.ctxt, 'fake_id') - - self.message_api.db.message_destroy.assert_called_once_with( - admin_context, 'fake_id') - - def test_cleanup_expired_messages(self): - admin_context = mock.Mock() - self.mock_object(self.ctxt, 'elevated', return_value=admin_context) - self.message_api.cleanup_expired_messages(self.ctxt) - self.message_api.db.cleanup_expired_messages.assert_called_once_with( - admin_context) - - def create_message_for_tests(self): - """Create messages to test pagination functionality""" - utils.create_message( - self.ctxt, action=message_field.Action.ATTACH_VOLUME) - utils.create_message( - self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME) - utils.create_message( - self.ctxt, - action=message_field.Action.COPY_VOLUME_TO_IMAGE) - utils.create_message( - self.ctxt, - action=message_field.Action.COPY_VOLUME_TO_IMAGE) - - def test_get_all_messages_with_limit(self): - self.create_message_for_tests() - - url = '/v3/messages?limit=1' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.5'} - req.api_version_request = api_version.APIVersionRequest('3.30') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(1, len(res['messages'])) - - url = '/v3/messages?limit=3' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.5'} - req.api_version_request = api_version.APIVersionRequest('3.30') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(3, len(res['messages'])) - - def test_get_all_messages_with_limit_wrong_version(self): - self.create_message_for_tests() - - url = '/v3/messages?limit=1' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers["OpenStack-API-Version"] = "volume 3.3" - req.api_version_request = api_version.APIVersionRequest('3.3') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(4, len(res['messages'])) - - def test_get_all_messages_with_offset(self): - self.create_message_for_tests() - - url = '/v3/messages?offset=1' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers["OpenStack-API-Version"] = "volume 3.5" - req.api_version_request = api_version.APIVersionRequest('3.5') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(3, len(res['messages'])) - - def test_get_all_messages_with_limit_and_offset(self): - self.create_message_for_tests() - - url = '/v3/messages?limit=2&offset=1' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers["OpenStack-API-Version"] = "volume 3.5" - req.api_version_request = api_version.APIVersionRequest('3.5') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(2, len(res['messages'])) - - def test_get_all_messages_with_filter(self): - self.create_message_for_tests() - - url = '/v3/messages?action_id=%s' % ( - message_field.Action.ATTACH_VOLUME[0]) - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers["OpenStack-API-Version"] = "volume 3.5" - req.api_version_request = api_version.APIVersionRequest('3.5') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(1, len(res['messages'])) - - def test_get_all_messages_with_sort(self): - self.create_message_for_tests() - - url = '/v3/messages?sort=event_id:asc' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers["OpenStack-API-Version"] = "volume 3.5" - req.api_version_request = api_version.APIVersionRequest('3.5') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - - expect_result = [ - "VOLUME_VOLUME_001_002", - "VOLUME_VOLUME_002_002", - "VOLUME_VOLUME_003_002", - "VOLUME_VOLUME_003_002", - ] - expect_result.sort() - - self.assertEqual(4, len(res['messages'])) - self.assertEqual(expect_result[0], - res['messages'][0]['event_id']) - self.assertEqual(expect_result[1], - res['messages'][1]['event_id']) - self.assertEqual(expect_result[2], - res['messages'][2]['event_id']) - self.assertEqual(expect_result[3], - res['messages'][3]['event_id']) - - def test_get_all_messages_paging(self): - self.create_message_for_tests() - - # first request of this test - url = '/v3/fake/messages?limit=2' - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.5'} - req.api_version_request = api_version.APIVersionRequest('3.30') - req.environ['cinder.context'].is_admin = True - - res = self.controller.index(req) - self.assertEqual(2, len(res['messages'])) - - next_link = ('http://localhost/v3/%s/messages?limit=' - '2&marker=%s') % (fake_constants.PROJECT_ID, - res['messages'][1]['id']) - self.assertEqual(next_link, - res['messages_links'][0]['href']) - - # Second request in this test - # Test for second page using marker (res['messages][0]['id']) - # values fetched in first request with limit 2 in this test - url = '/v3/fake/messages?limit=1&marker=%s' % ( - res['messages'][0]['id']) - req = fakes.HTTPRequest.blank(url) - req.method = 'GET' - req.content_type = 'application/json' - req.headers = {version_header_name: 'volume 3.5'} - req.api_version_request = api_version.max_api_version() - req.environ['cinder.context'].is_admin = True - - result = self.controller.index(req) - self.assertEqual(1, len(result['messages'])) - - # checking second message of first request in this test with first - # message of second request. (to test paging mechanism) - self.assertEqual(res['messages'][1], result['messages'][0]) diff --git a/cinder/tests/unit/message/test_defined_messages.py b/cinder/tests/unit/message/test_defined_messages.py deleted file mode 100644 index 389230913..000000000 --- a/cinder/tests/unit/message/test_defined_messages.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from cinder.message import defined_messages -from cinder import test - -CONF = cfg.CONF - - -class DefinedMessagesTest(test.TestCase): - def test_event_id_formats(self): - """Assert all cinder event ids start with VOLUME_.""" - for attr_name in dir(defined_messages.EventIds): - if not attr_name.startswith('_'): - value = getattr(defined_messages.EventIds, attr_name) - self.assertTrue(value.startswith('VOLUME_')) - - def test_unique_event_ids(self): - """Assert that no event_id is duplicated.""" - event_ids = [] - for attr_name in dir(defined_messages.EventIds): - if not attr_name.startswith('_'): - value = getattr(defined_messages.EventIds, attr_name) - event_ids.append(value) - - self.assertEqual(len(event_ids), len(set(event_ids))) - - def test_event_id_has_message(self): - for attr_name in dir(defined_messages.EventIds): - if not attr_name.startswith('_'): - value = getattr(defined_messages.EventIds, attr_name) - msg = defined_messages.event_id_message_map.get(value) - self.assertGreater(len(msg), 1) diff --git a/cinder/tests/unit/message/test_message_field.py b/cinder/tests/unit/message/test_message_field.py deleted file mode 100644 index f983abbd9..000000000 --- a/cinder/tests/unit/message/test_message_field.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -from oslo_config import cfg - -from cinder import exception -from cinder.message import message_field -from cinder import test - -CONF = cfg.CONF - - -@ddt.ddt -class MessageFieldTest(test.TestCase): - - @ddt.data({'id': '001', 'content': 'schedule allocate volume'}, - {'id': '002', 'content': 'attach volume'}, - {'id': 'invalid', 'content': None}) - @ddt.unpack - def test_translate_action(self, id, content): - result = message_field.translate_action(id) - if content is None: - content = 'unknown action' - self.assertEqual(content, result) - - @ddt.data({'id': '001', - 'content': 'An unknown error occurred.'}, - {'id': '002', - 'content': 'Driver is not initialized at present.'}, - {'id': 'invalid', 'content': None}) - @ddt.unpack - def test_translate_detail(self, id, content): - result = message_field.translate_detail(id) - if content is None: - content = 'An unknown error occurred.' - self.assertEqual(content, result) - - @ddt.data({'exception': exception.DriverNotInitialized(), - 'detail': '', - 'expected': '002'}, - {'exception': exception.CinderException(), - 'detail': '', - 'expected': '001'}, - {'exception': exception.CinderException(), - 'detail': message_field.Detail.QUOTA_EXCEED, - 'expected': '007'}, - {'exception': '', 'detail': message_field.Detail.QUOTA_EXCEED, - 'expected': '007'}) - @ddt.unpack - def translate_detail_id(self, exception, detail, expected): - result = message_field.translate_detail_id(exception, detail) - self.assertEqual(expected, result) diff --git a/cinder/tests/unit/monkey_patch_example/__init__.py b/cinder/tests/unit/monkey_patch_example/__init__.py deleted file mode 100644 index d367a4849..000000000 --- a/cinder/tests/unit/monkey_patch_example/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Example Module for testing utils.monkey_patch().""" - - -CALLED_FUNCTION = [] - - -def example_decorator(name, function): - """decorator for notify which is used from utils.monkey_patch(). - - :param name: name of the function - :param function: - object of the function - :returns: function -- decorated function - """ - def wrapped_func(*args, **kwarg): - CALLED_FUNCTION.append(name) - return function(*args, **kwarg) - return wrapped_func diff --git a/cinder/tests/unit/monkey_patch_example/example_a.py b/cinder/tests/unit/monkey_patch_example/example_a.py deleted file mode 100644 index 3844766ea..000000000 --- a/cinder/tests/unit/monkey_patch_example/example_a.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Example Module A for testing utils.monkey_patch().""" - - -def example_function_a(): - return 'Example function' - - -class ExampleClassA(object): - def example_method(self): - return 'Example method' - - def example_method_add(self, arg1, arg2): - return arg1 + arg2 diff --git a/cinder/tests/unit/monkey_patch_example/example_b.py b/cinder/tests/unit/monkey_patch_example/example_b.py deleted file mode 100644 index 52803377f..000000000 --- a/cinder/tests/unit/monkey_patch_example/example_b.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Example Module B for testing utils.monkey_patch().""" - - -def example_function_b(): - return 'Example function' - - -class ExampleClassB(object): - def example_method(self): - return 'Example method' - - def example_method_add(self, arg1, arg2): - return arg1 + arg2 diff --git a/cinder/tests/unit/objects/__init__.py b/cinder/tests/unit/objects/__init__.py deleted file mode 100644 index 08d8ffd6a..000000000 --- a/cinder/tests/unit/objects/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - -from cinder import context -from cinder import exception -from cinder.objects import base as obj_base -from cinder import test - - -class BaseObjectsTestCase(test.TestCase): - def setUp(self, *args, **kwargs): - super(BaseObjectsTestCase, self).setUp(*args, **kwargs) - self.user_id = 'fake-user' - self.project_id = 'fake-project' - self.context = context.RequestContext(self.user_id, self.project_id, - is_admin=False) - # We only test local right now. - # TODO(mriedem): Testing remote would be nice... - self.assertIsNone(obj_base.CinderObject.indirection_api) - - # TODO(mriedem): Replace this with - # oslo_versionedobjects.fixture.compare_obj when that is in a released - # version of o.vo. - @staticmethod - def _compare(test, db, obj): - for field, value in db.items(): - try: - getattr(obj, field) - except (AttributeError, exception.CinderException, - NotImplementedError): - # NotImplementedError: ignore "Cannot load 'projects' in the - # base class" error - continue - - obj_field = getattr(obj, field) - if field in ('modified_at', 'created_at', 'updated_at', - 'deleted_at', 'last_heartbeat') and db[field]: - test.assertEqual(db[field], - timeutils.normalize_time(obj_field)) - elif isinstance(obj_field, obj_base.ObjectListBase): - test.assertEqual(db[field], obj_field.objects) - else: - test.assertEqual(db[field], obj_field) diff --git a/cinder/tests/unit/objects/test_backup.py b/cinder/tests/unit/objects/test_backup.py deleted file mode 100644 index fcda7f107..000000000 --- a/cinder/tests/unit/objects/test_backup.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects -from cinder.tests.unit import utils - - -fake_backup = { - 'id': fake.BACKUP_ID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.BackupStatus.CREATING, - 'size': 1, - 'display_name': 'fake_name', - 'display_description': 'fake_description', - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'temp_volume_id': None, - 'temp_snapshot_id': None, - 'snapshot_id': None, - 'data_timestamp': None, - 'restore_volume_id': None, -} - -vol_props = {'status': 'available', 'size': 1} -fake_vol = fake_volume.fake_db_volume(**vol_props) -snap_props = {'status': fields.BackupStatus.AVAILABLE, - 'volume_id': fake_vol['id'], - 'expected_attrs': ['metadata']} -fake_snap = fake_snapshot.fake_db_snapshot(**snap_props) - - -class TestBackup(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.get_by_id', return_value=fake_backup) - def test_get_by_id(self, backup_get): - backup = objects.Backup.get_by_id(self.context, fake.USER_ID) - self._compare(self, fake_backup, backup) - backup_get.assert_called_once_with(self.context, models.Backup, - fake.USER_ID) - - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_get_by_id_no_existing_id(self, model_query): - query = mock.Mock() - filter_by = mock.Mock() - filter_by.first.return_value = None - query.filter_by.return_value = filter_by - model_query.return_value = query - self.assertRaises(exception.BackupNotFound, objects.Backup.get_by_id, - self.context, 123) - - @mock.patch('cinder.db.backup_create', return_value=fake_backup) - def test_create(self, backup_create): - backup = objects.Backup(context=self.context) - backup.create() - self.assertEqual(fake_backup['id'], backup.id) - self.assertEqual(fake_backup['volume_id'], backup.volume_id) - - @mock.patch('cinder.db.backup_update') - def test_save(self, backup_update): - backup = objects.Backup._from_db_object( - self.context, objects.Backup(), fake_backup) - backup.display_name = 'foobar' - backup.save() - backup_update.assert_called_once_with(self.context, backup.id, - {'display_name': 'foobar'}) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.backup_destroy') - def test_destroy(self, backup_destroy, utcnow_mock): - backup_destroy.return_value = { - 'status': fields.BackupStatus.DELETED, - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - backup = objects.Backup(context=self.context, id=fake.BACKUP_ID) - backup.destroy() - self.assertTrue(backup_destroy.called) - admin_context = backup_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(backup.deleted) - self.assertEqual(fields.BackupStatus.DELETED, backup.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - backup.deleted_at) - - def test_obj_field_temp_volume_snapshot_id(self): - backup = objects.Backup(context=self.context, - temp_volume_id='2', - temp_snapshot_id='3') - self.assertEqual('2', backup.temp_volume_id) - self.assertEqual('3', backup.temp_snapshot_id) - - def test_obj_field_snapshot_id(self): - backup = objects.Backup(context=self.context, - snapshot_id='2') - self.assertEqual('2', backup.snapshot_id) - - def test_obj_field_restore_volume_id(self): - backup = objects.Backup(context=self.context, - restore_volume_id='2') - self.assertEqual('2', backup.restore_volume_id) - - def test_import_record(self): - utils.replace_obj_loader(self, objects.Backup) - backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, - parent_id=None, - num_dependent_backups=0) - export_string = backup.encode_record() - imported_backup = objects.Backup.decode_record(export_string) - - # Make sure we don't lose data when converting from string - self.assertDictEqual(self._expected_backup(backup), imported_backup) - - def test_import_record_additional_info(self): - utils.replace_obj_loader(self, objects.Backup) - backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, - parent_id=None, - num_dependent_backups=0) - extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}} - extra_info_copy = extra_info.copy() - export_string = backup.encode_record(extra_info=extra_info) - imported_backup = objects.Backup.decode_record(export_string) - - # Dictionary passed should not be modified - self.assertDictEqual(extra_info_copy, extra_info) - - # Make sure we don't lose data when converting from string and that - # extra info is still there - expected = self._expected_backup(backup) - expected['extra_info'] = extra_info - self.assertDictEqual(expected, imported_backup) - - def _expected_backup(self, backup): - record = {name: field.to_primitive(backup, name, getattr(backup, name)) - for name, field in backup.fields.items()} - return record - - def test_import_record_additional_info_cant_overwrite(self): - utils.replace_obj_loader(self, objects.Backup) - backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, - parent_id=None, - num_dependent_backups=0) - export_string = backup.encode_record(id='fake_id') - imported_backup = objects.Backup.decode_record(export_string) - - # Make sure the extra_info can't overwrite basic data - self.assertDictEqual(self._expected_backup(backup), imported_backup) - - def test_import_record_decoding_error(self): - export_string = '123456' - self.assertRaises(exception.InvalidInput, - objects.Backup.decode_record, - export_string) - - def test_import_record_parsing_error(self): - export_string = '' - self.assertRaises(exception.InvalidInput, - objects.Backup.decode_record, - export_string) - - @mock.patch('cinder.db.sqlalchemy.api.backup_get') - def test_refresh(self, backup_get): - db_backup1 = fake_backup.copy() - db_backup2 = db_backup1.copy() - db_backup2['display_name'] = 'foobar' - - # On the second backup_get, return the backup with an updated - # display_name - backup_get.side_effect = [db_backup1, db_backup2] - backup = objects.Backup.get_by_id(self.context, fake.BACKUP_ID) - self._compare(self, db_backup1, backup) - - # display_name was updated, so a backup refresh should have a new value - # for that field - backup.refresh() - self._compare(self, db_backup2, backup) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - backup_get.assert_has_calls([mock.call(self.context, fake.BACKUP_ID), - call_bool, - mock.call(self.context, fake.BACKUP_ID)]) - - -class TestBackupList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) - def test_get_all(self, backup_get_all): - backups = objects.BackupList.get_all(self.context) - self.assertEqual(1, len(backups)) - TestBackup._compare(self, fake_backup, backups[0]) - - @mock.patch('cinder.db.backup_get_all_by_project', - return_value=[fake_backup]) - def test_get_all_by_project(self, get_all_by_project): - backups = objects.BackupList.get_all_by_project( - self.context, self.project_id) - self.assertEqual(1, len(backups)) - TestBackup._compare(self, fake_backup, backups[0]) - - @mock.patch('cinder.db.backup_get_all_by_host', - return_value=[fake_backup]) - def test_get_all_by_host(self, get_all_by_host): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - - backups = objects.BackupList.get_all_by_host(self.context, - fake_volume_obj.id) - self.assertEqual(1, len(backups)) - TestBackup._compare(self, fake_backup, backups[0]) - - @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) - def test_get_all_tenants(self, backup_get_all): - search_opts = {'all_tenants': 1} - backups = objects.BackupList.get_all(self.context, search_opts) - self.assertEqual(1, len(backups)) - TestBackup._compare(self, fake_backup, backups[0]) - - @mock.patch('cinder.db.backup_get_all_by_volume', - return_value=[fake_backup]) - def test_get_all_by_volume(self, get_all_by_volume): - backups = objects.BackupList.get_all_by_volume(self.context, - fake.VOLUME_ID) - self.assertEqual(1, len(backups)) - get_all_by_volume.assert_called_once_with(self.context, - fake.VOLUME_ID, None) - TestBackup._compare(self, fake_backup, backups[0]) - - -class BackupDeviceInfoTestCase(test_objects.BaseObjectsTestCase): - def setUp(self): - super(BackupDeviceInfoTestCase, self).setUp() - self.vol_obj = fake_volume.fake_volume_obj(self.context, **vol_props) - self.snap_obj = fake_snapshot.fake_snapshot_obj(self.context, - **snap_props) - self.backup_device_dict = {'secure_enabled': False, - 'is_snapshot': False, } - - @mock.patch('cinder.db.volume_get', return_value=fake_vol) - def test_from_primitive_with_volume(self, mock_fake_vol): - vol_obj = self.vol_obj - self.backup_device_dict['backup_device'] = vol_obj - backup_device_info = objects.BackupDeviceInfo.from_primitive( - self.backup_device_dict, self.context) - self.assertFalse(backup_device_info.is_snapshot) - self.assertEqual(self.backup_device_dict['secure_enabled'], - backup_device_info.secure_enabled) - self.assertEqual(vol_obj, backup_device_info.volume) - - self.backup_device_dict['backup_device'] = fake_vol - backup_device_info = objects.BackupDeviceInfo.from_primitive( - self.backup_device_dict, self.context) - vol_obj_from_db = objects.Volume._from_db_object(self.context, - objects.Volume(), - fake_vol) - self.assertEqual(vol_obj_from_db, backup_device_info.volume) - - @mock.patch('cinder.db.snapshot_get', return_value=fake_snap) - def test_from_primitive_with_snapshot(self, mock_fake_snap): - snap_obj = self.snap_obj - self.backup_device_dict['is_snapshot'] = True - self.backup_device_dict['backup_device'] = snap_obj - backup_device_info = objects.BackupDeviceInfo.from_primitive( - self.backup_device_dict, self.context, expected_attrs=['metadata']) - self.assertTrue(backup_device_info.is_snapshot) - self.assertEqual(self.backup_device_dict['secure_enabled'], - backup_device_info.secure_enabled) - self.assertEqual(snap_obj, backup_device_info.snapshot) - - self.backup_device_dict['backup_device'] = fake_snap - backup_device_info = objects.BackupDeviceInfo.from_primitive( - self.backup_device_dict, self.context, expected_attrs=['metadata']) - self.assertEqual(snap_obj, backup_device_info.snapshot) - - @mock.patch('cinder.db.volume_get', return_value=fake_vol) - def test_to_primitive_with_volume(self, mock_fake_vol): - vol_obj = self.vol_obj - self.backup_device_dict['backup_device'] = fake_vol - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.volume = vol_obj - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - - backup_device_ret_dict = backup_device_info.to_primitive(self.context) - self.assertEqual(self.backup_device_dict['secure_enabled'], - backup_device_ret_dict['secure_enabled']) - self.assertFalse(backup_device_ret_dict['is_snapshot']) - self.assertEqual(self.backup_device_dict['backup_device'], - backup_device_ret_dict['backup_device']) - - @mock.patch('cinder.db.snapshot_get', return_value=fake_snap) - def test_to_primitive_with_snapshot(self, mock_fake_snap): - snap_obj = self.snap_obj - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.snapshot = snap_obj - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - - backup_device_ret_dict = backup_device_info.to_primitive(self.context) - self.assertEqual(self.backup_device_dict['secure_enabled'], - backup_device_ret_dict['secure_enabled']) - self.assertTrue(backup_device_ret_dict['is_snapshot']) - # NOTE(sborkows): since volume in sqlalchemy snapshot is a sqlalchemy - # object too, to compare snapshots we need to convert their volumes to - # dicts. - snap_actual_dict = fake_snap - snap_ref_dict = backup_device_ret_dict['backup_device'] - snap_actual_dict['volume'] = self.vol_obj.obj_to_primitive() - snap_ref_dict['volume'] = snap_ref_dict['volume'] - self.assertEqual(snap_actual_dict, snap_ref_dict) - - def test_is_snapshot_both_volume_and_snapshot_raises_error(self): - snap = self.snap_obj - vol = self.vol_obj - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.snapshot = snap - backup_device_info.volume = vol - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - self.assertRaises(exception.ProgrammingError, getattr, - backup_device_info, 'is_snapshot') - - def test_is_snapshot_neither_volume_nor_snapshot_raises_error(self): - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - self.assertRaises(exception.ProgrammingError, getattr, - backup_device_info, 'is_snapshot') - - def test_device_obj_with_volume(self): - vol = self.vol_obj - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.volume = vol - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - backup_device_obj = backup_device_info.device_obj - self.assertIsInstance(backup_device_obj, objects.Volume) - self.assertEqual(vol, backup_device_obj) - - def test_device_obj_with_snapshot(self): - snap = self.snap_obj - backup_device_info = objects.BackupDeviceInfo() - backup_device_info.snapshot = snap - backup_device_info.secure_enabled = ( - self.backup_device_dict['secure_enabled']) - backup_device_obj = backup_device_info.device_obj - self.assertIsInstance(backup_device_obj, objects.Snapshot) - self.assertEqual(snap, backup_device_obj) diff --git a/cinder/tests/unit/objects/test_base.py b/cinder/tests/unit/objects/test_base.py deleted file mode 100644 index 2b7bdc9cf..000000000 --- a/cinder/tests/unit/objects/test_base.py +++ /dev/null @@ -1,912 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import ddt -import uuid - -from iso8601 import iso8601 -import mock -from oslo_versionedobjects import fields -from sqlalchemy import sql - -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import objects -from cinder.objects import fields as c_fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_objects -from cinder.tests.unit import objects as test_objects - - -class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase): - def test_add(self): - history = test_objects.obj_base.CinderObjectVersionsHistory() - v10 = {'Backup': '2.0'} - v11 = {'Backup': '2.1'} - history.add('1.0', v10) - history.add('1.1', v11) - # We have 3 elements because we have the liberty version by default - self.assertEqual(2 + 1, len(history)) - - expected_v10 = history['liberty'].copy() - expected_v10.update(v10) - expected_v11 = history['liberty'].copy() - expected_v11.update(v11) - - self.assertEqual('1.1', history.get_current()) - self.assertEqual(expected_v11, history.get_current_versions()) - self.assertEqual(expected_v10, history['1.0']) - - def test_add_existing(self): - history = test_objects.obj_base.CinderObjectVersionsHistory() - history.add('1.0', {'Backup': '1.0'}) - self.assertRaises(exception.ProgrammingError, - history.add, '1.0', {'Backup': '1.0'}) - - -class TestCinderObject(test_objects.BaseObjectsTestCase): - """Tests methods from CinderObject.""" - - def setUp(self): - super(TestCinderObject, self).setUp() - self.obj = fake_objects.ChildObject( - scheduled_at=None, - uuid=uuid.uuid4(), - text='text') - self.obj.obj_reset_changes() - - def test_cinder_obj_get_changes_no_changes(self): - self.assertDictEqual({}, self.obj.cinder_obj_get_changes()) - - def test_cinder_obj_get_changes_other_changes(self): - self.obj.text = 'text2' - self.assertDictEqual({'text': 'text2'}, - self.obj.cinder_obj_get_changes()) - - def test_cinder_obj_get_changes_datetime_no_tz(self): - now = datetime.datetime.utcnow() - self.obj.scheduled_at = now - self.assertDictEqual({'scheduled_at': now}, - self.obj.cinder_obj_get_changes()) - - def test_cinder_obj_get_changes_datetime_tz_utc(self): - now_tz = iso8601.parse_date('2015-06-26T22:00:01Z') - now = now_tz.replace(tzinfo=None) - self.obj.scheduled_at = now_tz - self.assertDictEqual({'scheduled_at': now}, - self.obj.cinder_obj_get_changes()) - - def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self): - now_tz = iso8601.parse_date('2015-06-26T22:00:01+01') - now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1) - self.obj.scheduled_at = now_tz - self.assertDictEqual({'scheduled_at': now}, - self.obj.cinder_obj_get_changes()) - - def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self): - now_tz = iso8601.parse_date('2015-06-26T10:00:01-05') - now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5) - self.obj.scheduled_at = now_tz - self.assertDictEqual({'scheduled_at': now}, - self.obj.cinder_obj_get_changes()) - - @mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id') - def test_refresh(self, get_by_id): - @objects.base.CinderObjectRegistry.register_if(False) - class MyTestObject(objects.base.CinderObject, - objects.base.CinderObjectDictCompat, - objects.base.CinderComparableObject, - objects.base.CinderPersistentObject): - fields = {'id': fields.UUIDField(), - 'name': fields.StringField()} - - test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo') - refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar') - get_by_id.return_value = refresh_obj - - test_obj.refresh() - self._compare(self, refresh_obj, test_obj) - - @mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id') - def test_refresh_readonly(self, get_by_id_mock): - @objects.base.CinderObjectRegistry.register_if(False) - class MyTestObject(objects.base.CinderObject, - objects.base.CinderObjectDictCompat, - objects.base.CinderComparableObject, - objects.base.CinderPersistentObject): - fields = {'id': fields.UUIDField(), - 'name': fields.StringField(read_only=True)} - - test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo') - refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar') - get_by_id_mock.return_value = refresh_obj - - test_obj.refresh() - self._compare(self, refresh_obj, test_obj) - - def test_refresh_no_id_field(self): - @objects.base.CinderObjectRegistry.register_if(False) - class MyTestObjectNoId(objects.base.CinderObject, - objects.base.CinderObjectDictCompat, - objects.base.CinderComparableObject, - objects.base.CinderPersistentObject): - fields = {'uuid': fields.UUIDField()} - - test_obj = MyTestObjectNoId(uuid=fake.OBJECT_ID, name='foo') - self.assertRaises(NotImplementedError, test_obj.refresh) - - @mock.patch('cinder.objects.base.objects', mock.Mock()) - def test_cls_init(self): - """Test that class init method gets called on registration.""" - @objects.base.CinderObjectRegistry.register - class MyTestObject(objects.base.CinderObject, - objects.base.CinderPersistentObject): - cinder_ovo_cls_init = mock.Mock() - - MyTestObject.cinder_ovo_cls_init.assert_called_once_with() - - -class TestCinderComparableObject(test_objects.BaseObjectsTestCase): - def test_comparable_objects(self): - @objects.base.CinderObjectRegistry.register - class MyComparableObj(objects.base.CinderObject, - objects.base.CinderObjectDictCompat, - objects.base.CinderComparableObject): - fields = {'foo': fields.Field(fields.Integer())} - - class NonVersionedObject(object): - pass - - obj1 = MyComparableObj(foo=1) - obj2 = MyComparableObj(foo=1) - obj3 = MyComparableObj(foo=2) - obj4 = NonVersionedObject() - self.assertTrue(obj1 == obj2) - self.assertFalse(obj1 == obj3) - self.assertFalse(obj1 == obj4) - self.assertIsNotNone(obj1) - - -@ddt.ddt -class TestCinderObjectConditionalUpdate(test.TestCase): - - def setUp(self): - super(TestCinderObjectConditionalUpdate, self).setUp() - self.context = context.get_admin_context() - - def _create_volume(self): - vol = { - 'display_description': 'Test Desc', - 'size': 1, - 'status': 'available', - 'availability_zone': 'az', - 'host': 'dummy', - 'attach_status': c_fields.VolumeAttachStatus.DETACHED, - } - volume = objects.Volume(context=self.context, **vol) - volume.create() - return volume - - def _create_snapshot(self, volume): - snapshot = objects.Snapshot(context=self.context, volume_id=volume.id) - snapshot.create() - return snapshot - - def _check_volume(self, volume, status, size, reload=False, dirty_keys=(), - **kwargs): - if reload: - volume = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual(status, volume.status) - self.assertEqual(size, volume.size) - dirty = volume.cinder_obj_get_changes() - self.assertEqual(list(dirty_keys), list(dirty.keys())) - for key, value in kwargs.items(): - self.assertEqual(value, getattr(volume, key)) - - def test_conditional_update_non_iterable_expected(self): - volume = self._create_volume() - # We also check that we can check for None values - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 2}, - {'status': 'available', 'migration_status': None})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 2) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 2, True) - - def test_conditional_update_non_iterable_expected_model_field(self): - volume = self._create_volume() - # We also check that we can check for None values - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 2, - 'previous_status': volume.model.status}, - {'status': 'available', 'migration_status': None})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 2, previous_status='available') - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 2, True, - previous_status='available') - - def test_conditional_update_non_iterable_expected_save_all(self): - volume = self._create_volume() - volume.size += 1 - # We also check that we can check for not None values - self.assertTrue(volume.conditional_update( - {'status': 'deleting'}, - {'status': 'available', 'availability_zone': volume.Not(None)}, - save_all=True)) - - # Check that the object in memory has been updated and that the size - # is not a dirty key - self._check_volume(volume, 'deleting', 2) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 2, True) - - def test_conditional_update_non_iterable_expected_dont_save_all(self): - volume = self._create_volume() - volume.size += 1 - self.assertTrue(volume.conditional_update( - {'status': 'deleting'}, - {'status': 'available'}, save_all=False)) - - # Check that the object in memory has been updated with the new status - # but that size has not been saved and is a dirty key - self._check_volume(volume, 'deleting', 2, False, ['size']) - - # Check that the volume in the DB also has been updated but not the - # size - self._check_volume(volume, 'deleting', 1, True) - - def test_conditional_update_fail_non_iterable_expected_save_all(self): - volume = self._create_volume() - volume.size += 1 - self.assertFalse(volume.conditional_update( - {'status': 'available'}, - {'status': 'deleting'}, save_all=True)) - - # Check that the object in memory has not been updated and that the - # size is still a dirty key - self._check_volume(volume, 'available', 2, False, ['size']) - - # Check that the volume in the DB hasn't been updated - self._check_volume(volume, 'available', 1, True) - - def test_default_conditional_update_non_iterable_expected(self): - volume = self._create_volume() - self.assertTrue(volume.conditional_update({'status': 'deleting'})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 1, True) - - def test_default_conditional_fail_update_non_iterable_expected(self): - volume_in_db = self._create_volume() - volume = objects.Volume.get_by_id(self.context, volume_in_db.id) - volume_in_db.size += 1 - volume_in_db.save() - # This will fail because size in DB is different - self.assertFalse(volume.conditional_update({'status': 'deleting'})) - - # Check that the object in memory has not been updated - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB hasn't changed the status but has - # the size we changed before the conditional update - self._check_volume(volume_in_db, 'available', 2, True) - - def test_default_conditional_update_non_iterable_expected_with_dirty(self): - volume_in_db = self._create_volume() - volume = objects.Volume.get_by_id(self.context, volume_in_db.id) - volume_in_db.size += 1 - volume_in_db.save() - volume.size = 33 - # This will fail because even though we have excluded the size from - # the default condition when we dirtied it in the volume object, we - # still have the last update timestamp that will be included in the - # condition - self.assertFalse(volume.conditional_update({'status': 'deleting'})) - - # Check that the object in memory has not been updated - self._check_volume(volume, 'available', 33, False, ['size']) - - # Check that the volume in the DB hasn't changed the status but has - # the size we changed before the conditional update - self._check_volume(volume_in_db, 'available', 2, True) - - def test_conditional_update_negated_non_iterable_expected(self): - volume = self._create_volume() - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 2}, - {'status': db.Not('in-use'), 'size': db.Not(2)})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 2) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 2, True) - - def test_conditional_update_non_iterable_expected_filter(self): - # Volume we want to change - volume = self._create_volume() - - # Another volume that has no snapshots - volume2 = self._create_volume() - - # A volume with snapshots - volume3 = self._create_volume() - self._create_snapshot(volume3) - - # Update only it it has no snapshot - filters = (~sql.exists().where( - models.Snapshot.volume_id == models.Volume.id),) - - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 2}, - {'status': 'available'}, - filters)) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 2) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 2, True) - - # Check that the other volumes in the DB haven't changed - self._check_volume(volume2, 'available', 1, True) - self._check_volume(volume3, 'available', 1, True) - - def test_conditional_update_iterable_expected(self): - volume = self._create_volume() - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 20}, - {'status': ('error', 'available'), 'size': range(10)})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 20) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 20, True) - - def test_conditional_update_negated_iterable_expected(self): - volume = self._create_volume() - self.assertTrue(volume.conditional_update( - {'status': 'deleting', 'size': 20}, - {'status': db.Not(('creating', 'in-use')), 'size': range(10)})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 20) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 20, True) - - def test_conditional_update_fail_non_iterable_expected(self): - volume = self._create_volume() - self.assertFalse(volume.conditional_update( - {'status': 'deleting'}, - {'status': 'available', 'size': 2})) - - # Check that the object in memory hasn't changed - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB hasn't changed either - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_fail_negated_non_iterable_expected(self): - volume = self._create_volume() - result = volume.conditional_update({'status': 'deleting'}, - {'status': db.Not('in-use'), - 'size': 2}) - self.assertFalse(result) - - # Check that the object in memory hasn't changed - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB hasn't changed either - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_fail_iterable_expected(self): - volume = self._create_volume() - self.assertFalse(volume.conditional_update( - {'status': 'available'}, - {'status': ('error', 'creating'), 'size': range(2, 10)})) - - # Check that the object in memory hasn't changed - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB hasn't changed either - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_fail_negated_iterable_expected(self): - volume = self._create_volume() - self.assertFalse(volume.conditional_update( - {'status': 'error'}, - {'status': db.Not(('available', 'in-use')), 'size': range(2, 10)})) - - # Check that the object in memory hasn't changed - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB hasn't changed either - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_fail_non_iterable_expected_filter(self): - # Volume we want to change - volume = self._create_volume() - self._create_snapshot(volume) - - # A volume that has no snapshots - volume2 = self._create_volume() - - # Another volume with snapshots - volume3 = self._create_volume() - self._create_snapshot(volume3) - - # Update only it it has no snapshot - filters = (~sql.exists().where( - models.Snapshot.volume_id == models.Volume.id),) - - self.assertFalse(volume.conditional_update( - {'status': 'deleting', 'size': 2}, - {'status': 'available'}, - filters)) - - # Check that the object in memory hasn't been updated - self._check_volume(volume, 'available', 1) - - # Check that no volume in the DB also has been updated - self._check_volume(volume, 'available', 1, True) - self._check_volume(volume2, 'available', 1, True) - self._check_volume(volume3, 'available', 1, True) - - def test_conditional_update_non_iterable_case_value(self): - # Volume we want to change and has snapshots - volume = self._create_volume() - self._create_snapshot(volume) - - # Filter that checks if a volume has snapshots - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - - # We want the updated value to depend on whether it has snapshots or - # not - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - self.assertTrue(volume.conditional_update({'status': case_values}, - {'status': 'available'})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'has-snapshot', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'has-snapshot', 1, True) - - def test_conditional_update_non_iterable_case_value_else(self): - # Volume we want to change - volume = self._create_volume() - - # Filter that checks if a volume has snapshots - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - - # We want the updated value to depend on whether it has snapshots or - # not - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - self.assertTrue(volume.conditional_update({'status': case_values}, - {'status': 'available'})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'no-snapshot', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'no-snapshot', 1, True) - - def test_conditional_update_non_iterable_case_value_fail(self): - # Volume we want to change doesn't have snapshots - volume = self._create_volume() - - # Filter that checks if a volume has snapshots - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - - # We want the updated value to depend on whether it has snapshots or - # not - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - # We won't update because volume status is available - self.assertFalse(volume.conditional_update({'status': case_values}, - {'status': 'deleting'})) - - # Check that the object in memory has not been updated - self._check_volume(volume, 'available', 1) - - # Check that the volume in the DB also hasn't been updated either - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_iterable_with_none_expected(self): - volume = self._create_volume() - # We also check that we can check for None values in an iterable - self.assertTrue(volume.conditional_update( - {'status': 'deleting'}, - {'status': (None, 'available'), - 'migration_status': (None, 'finished')})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 1, True) - - def test_conditional_update_iterable_with_not_none_expected(self): - volume = self._create_volume() - # We also check that we can check for None values in a negated iterable - self.assertTrue(volume.conditional_update( - {'status': 'deleting'}, - {'status': volume.Not((None, 'in-use'))})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 1, True) - - def test_conditional_update_iterable_with_not_includes_null(self): - volume = self._create_volume() - # We also check that negation includes None values by default like we - # do in Python and not like MySQL does - self.assertTrue(volume.conditional_update( - {'status': 'deleting'}, - {'status': 'available', - 'migration_status': volume.Not(('migrating', 'error'))})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', 1) - - # Check that the volume in the DB also has been updated - self._check_volume(volume, 'deleting', 1, True) - - def test_conditional_update_iterable_with_not_includes_null_fails(self): - volume = self._create_volume() - # We also check that negation excludes None values if we ask it to - self.assertFalse(volume.conditional_update( - {'status': 'deleting'}, - {'status': 'available', - 'migration_status': volume.Not(('migrating', 'error'), - auto_none=False)})) - - # Check that the object in memory has not been updated - self._check_volume(volume, 'available', 1, False) - - # Check that the volume in the DB hasn't been updated - self._check_volume(volume, 'available', 1, True) - - def test_conditional_update_use_operation_in_value(self): - volume = self._create_volume() - expected_size = volume.size + 1 - - # We also check that using fields in requested changes will work as - # expected - self.assertTrue(volume.conditional_update( - {'status': 'deleting', - 'size': volume.model.size + 1}, - {'status': 'available'})) - - # Check that the object in memory has been updated - self._check_volume(volume, 'deleting', expected_size, False) - - # Check that the volume in the DB has also been updated - self._check_volume(volume, 'deleting', expected_size, True) - - def test_conditional_update_auto_order(self): - volume = self._create_volume() - - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - - values = {'status': 'deleting', - 'previous_status': volume.model.status, - 'migration_status': case_values} - - with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: - update = model_query.return_value.filter.return_value.update - update.return_value = 0 - self.assertFalse(volume.conditional_update( - values, {'status': 'available'})) - - # We check that we are passing values to update to SQLAlchemy in the - # right order - self.assertEqual(1, update.call_count) - self.assertListEqual( - [('previous_status', volume.model.status), - ('migration_status', mock.ANY), - ('status', 'deleting')], - list(update.call_args[0][0])) - self.assertDictEqual( - {'synchronize_session': False, - 'update_args': {'preserve_parameter_order': True}}, - update.call_args[1]) - - def test_conditional_update_force_order(self): - volume = self._create_volume() - - has_snapshot_filter = sql.exists().where( - models.Snapshot.volume_id == models.Volume.id) - - case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], - else_='no-snapshot') - - values = {'status': 'deleting', - 'previous_status': volume.model.status, - 'migration_status': case_values} - - order = ['status'] - - with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: - update = model_query.return_value.filter.return_value.update - update.return_value = 0 - self.assertFalse(volume.conditional_update( - values, {'status': 'available'}, order=order)) - - # We check that we are passing values to update to SQLAlchemy in the - # right order - self.assertEqual(1, update.call_count) - self.assertListEqual( - [('status', 'deleting'), - ('previous_status', volume.model.status), - ('migration_status', mock.ANY)], - list(update.call_args[0][0])) - self.assertDictEqual( - {'synchronize_session': False, - 'update_args': {'preserve_parameter_order': True}}, - update.call_args[1]) - - def test_conditional_update_no_order(self): - volume = self._create_volume() - - values = {'status': 'deleting', - 'previous_status': 'available', - 'migration_status': None} - - with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: - update = model_query.return_value.filter.return_value.update - update.return_value = 0 - self.assertFalse(volume.conditional_update( - values, {'status': 'available'})) - - # Check that arguments passed to SQLAlchemy's update are correct (order - # is not relevant). - self.assertEqual(1, update.call_count) - arg = update.call_args[0][0] - self.assertIsInstance(arg, dict) - self.assertEqual(set(values.keys()), set(arg.keys())) - - def test_conditional_update_multitable_fail(self): - volume = self._create_volume() - self.assertRaises(exception.ProgrammingError, - volume.conditional_update, - {'status': 'deleting', - objects.Snapshot.model.status: 'available'}, - {'status': 'available'}) - - def test_conditional_update_multitable_fail_fields_different_models(self): - volume = self._create_volume() - self.assertRaises(exception.ProgrammingError, - volume.conditional_update, - {objects.Backup.model.status: 'available', - objects.Snapshot.model.status: 'available'}) - - def test_conditional_update_not_multitable(self): - volume = self._create_volume() - with mock.patch('cinder.db.sqlalchemy.api._create_facade_lazily') as m: - res = volume.conditional_update( - {objects.Volume.model.status: 'deleting', - objects.Volume.model.size: 12}, reflect_changes=False) - self.assertTrue(res) - self.assertTrue(m.called) - - @ddt.data(('available', 'error', None), - ('error', 'rolling_back', [{'fake_filter': 'faked'}])) - @ddt.unpack - @mock.patch('cinder.objects.base.' - 'CinderPersistentObject.conditional_update') - def test_update_status_where(self, value, expected, filters, mock_update): - volume = self._create_volume() - if filters: - volume.update_single_status_where(value, expected, filters) - mock_update.assert_called_with({'status': value}, - {'status': expected}, - filters) - else: - volume.update_single_status_where(value, expected) - mock_update.assert_called_with({'status': value}, - {'status': expected}, - ()) - - -class TestCinderDictObject(test_objects.BaseObjectsTestCase): - @objects.base.CinderObjectRegistry.register_if(False) - class TestDictObject(objects.base.CinderObjectDictCompat, - objects.base.CinderObject): - obj_extra_fields = ['foo'] - - fields = { - 'abc': fields.StringField(nullable=True), - 'def': fields.IntegerField(nullable=True), - } - - @property - def foo(self): - return 42 - - def test_dict_objects(self): - obj = self.TestDictObject() - self.assertNotIn('non_existing', obj) - self.assertEqual('val', obj.get('abc', 'val')) - self.assertNotIn('abc', obj) - obj.abc = 'val2' - self.assertEqual('val2', obj.get('abc', 'val')) - self.assertEqual(42, obj.get('foo')) - self.assertEqual(42, obj.get('foo', None)) - - self.assertIn('foo', obj) - self.assertIn('abc', obj) - self.assertNotIn('def', obj) - - -@mock.patch('cinder.objects.base.OBJ_VERSIONS', fake_objects.MyHistory()) -class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase): - BACKPORT_MSG = ('Backporting %(obj_name)s from version %(src_vers)s to ' - 'version %(dst_vers)s') - - def setUp(self): - super(TestCinderObjectSerializer, self).setUp() - self.obj = fake_objects.ChildObject(scheduled_at=None, - uuid=uuid.uuid4(), - text='text', - integer=1) - self.parent = fake_objects.ParentObject(uuid=uuid.uuid4(), - child=self.obj, - scheduled_at=None) - self.parent_list = fake_objects.ParentObjectList(objects=[self.parent]) - - def test_serialize_init_current_has_no_manifest(self): - """Test that pinned to current version we have no manifest.""" - serializer = objects.base.CinderObjectSerializer('1.6') - # Serializer should not have a manifest - self.assertIsNone(serializer.manifest) - - def test_serialize_init_no_cap_has_no_manifest(self): - """Test that without cap we have no manifest.""" - serializer = objects.base.CinderObjectSerializer() - # Serializer should not have a manifest - self.assertIsNone(serializer.manifest) - - def test_serialize_init_pinned_has_manifest(self): - """Test that pinned to older version we have manifest.""" - objs_version = '1.5' - serializer = objects.base.CinderObjectSerializer(objs_version) - # Serializer should have the right manifest - self.assertDictEqual(fake_objects.MyHistory()[objs_version], - serializer.manifest) - - def test_serialize_entity_unknown_version(self): - """Test that bad cap version will prevent serializer creation.""" - self.assertRaises(exception.CappedVersionUnknown, - objects.base.CinderObjectSerializer, '0.9') - - @mock.patch('cinder.objects.base.LOG.debug') - def test_serialize_entity_basic_no_backport(self, log_debug_mock): - """Test single element serializer with no backport.""" - serializer = objects.base.CinderObjectSerializer('1.6') - primitive = serializer.serialize_entity(self.context, self.obj) - self.assertEqual('1.2', primitive['versioned_object.version']) - data = primitive['versioned_object.data'] - self.assertEqual(1, data['integer']) - self.assertEqual('text', data['text']) - log_debug_mock.assert_not_called() - - @mock.patch('cinder.objects.base.LOG.debug') - def test_serialize_entity_basic_backport(self, log_debug_mock): - """Test single element serializer with backport.""" - serializer = objects.base.CinderObjectSerializer('1.5') - primitive = serializer.serialize_entity(self.context, self.obj) - self.assertEqual('1.1', primitive['versioned_object.version']) - data = primitive['versioned_object.data'] - self.assertNotIn('integer', data) - self.assertEqual('text', data['text']) - log_debug_mock.assert_called_once_with(self.BACKPORT_MSG, - {'obj_name': 'ChildObject', - 'src_vers': '1.2', - 'dst_vers': '1.1'}) - - @mock.patch('cinder.objects.base.LOG.debug') - def test_serialize_entity_full_no_backport(self, log_debug_mock): - """Test related elements serialization with no backport.""" - serializer = objects.base.CinderObjectSerializer('1.6') - primitive = serializer.serialize_entity(self.context, self.parent_list) - self.assertEqual('1.1', primitive['versioned_object.version']) - parent = primitive['versioned_object.data']['objects'][0] - self.assertEqual('1.1', parent['versioned_object.version']) - child = parent['versioned_object.data']['child'] - self.assertEqual('1.2', child['versioned_object.version']) - log_debug_mock.assert_not_called() - - @mock.patch('cinder.objects.base.LOG.debug') - def test_serialize_entity_full_backport_last_children(self, - log_debug_mock): - """Test related elements serialization with backport of the last child. - - Test that using the manifest we properly backport a child object even - when all its parents have not changed their version. - """ - serializer = objects.base.CinderObjectSerializer('1.5') - primitive = serializer.serialize_entity(self.context, self.parent_list) - self.assertEqual('1.1', primitive['versioned_object.version']) - parent = primitive['versioned_object.data']['objects'][0] - self.assertEqual('1.1', parent['versioned_object.version']) - # Only the child has been backported - child = parent['versioned_object.data']['child'] - self.assertEqual('1.1', child['versioned_object.version']) - # Check that the backport has been properly done - data = child['versioned_object.data'] - self.assertNotIn('integer', data) - self.assertEqual('text', data['text']) - log_debug_mock.assert_called_once_with(self.BACKPORT_MSG, - {'obj_name': 'ChildObject', - 'src_vers': '1.2', - 'dst_vers': '1.1'}) - - @mock.patch('cinder.objects.base.LOG.debug') - def test_serialize_entity_full_backport(self, log_debug_mock): - """Test backport of the whole tree of related elements.""" - serializer = objects.base.CinderObjectSerializer('1.3') - primitive = serializer.serialize_entity(self.context, self.parent_list) - # List has been backported - self.assertEqual('1.0', primitive['versioned_object.version']) - parent = primitive['versioned_object.data']['objects'][0] - # Parent has been backported as well - self.assertEqual('1.0', parent['versioned_object.version']) - # And the backport has been properly done - data = parent['versioned_object.data'] - self.assertNotIn('scheduled_at', data) - # And child as well - child = parent['versioned_object.data']['child'] - self.assertEqual('1.1', child['versioned_object.version']) - # Check that the backport has been properly done - data = child['versioned_object.data'] - self.assertNotIn('integer', data) - self.assertEqual('text', data['text']) - log_debug_mock.assert_has_calls([ - mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObjectList', - 'src_vers': '1.1', - 'dst_vers': '1.0'}), - mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObject', - 'src_vers': '1.1', - 'dst_vers': '1.0'}), - mock.call(self.BACKPORT_MSG, {'obj_name': 'ChildObject', - 'src_vers': '1.2', - 'dst_vers': '1.1'})]) diff --git a/cinder/tests/unit/objects/test_cgsnapshot.py b/cinder/tests/unit/objects/test_cgsnapshot.py deleted file mode 100644 index 42696122b..000000000 --- a/cinder/tests/unit/objects/test_cgsnapshot.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2015 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder import exception -from cinder import objects -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import objects as test_objects -from cinder.tests.unit.objects.test_consistencygroup import \ - fake_consistencygroup - -fake_cgsnapshot = { - 'id': fake.CGSNAPSHOT_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'name': 'fake_name', - 'description': 'fake_description', - 'status': 'creating', - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, -} - - -class TestCGSnapshot(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get', - return_value=fake_cgsnapshot) - def test_get_by_id(self, cgsnapshot_get): - cgsnapshot = objects.CGSnapshot.get_by_id(self.context, - fake.CGSNAPSHOT_ID) - self._compare(self, fake_cgsnapshot, cgsnapshot) - - @mock.patch('cinder.db.cgsnapshot_create', - return_value=fake_cgsnapshot) - def test_create(self, cgsnapshot_create): - fake_cgsnap = fake_cgsnapshot.copy() - del fake_cgsnap['id'] - cgsnapshot = objects.CGSnapshot(context=self.context, **fake_cgsnap) - cgsnapshot.create() - self._compare(self, fake_cgsnapshot, cgsnapshot) - - def test_create_with_id_except_exception(self): - cgsnapshot = objects.CGSnapshot(context=self.context, - **{'id': fake.CONSISTENCY_GROUP_ID}) - self.assertRaises(exception.ObjectActionError, cgsnapshot.create) - - @mock.patch('cinder.db.cgsnapshot_update') - def test_save(self, cgsnapshot_update): - cgsnapshot = objects.CGSnapshot._from_db_object( - self.context, objects.CGSnapshot(), fake_cgsnapshot) - cgsnapshot.status = 'active' - cgsnapshot.save() - cgsnapshot_update.assert_called_once_with(self.context, cgsnapshot.id, - {'status': 'active'}) - - @mock.patch('cinder.db.consistencygroup_update', - return_value=fake_consistencygroup) - @mock.patch('cinder.db.cgsnapshot_update') - def test_save_with_consistencygroup(self, cgsnapshot_update, - cgsnapshot_cg_update): - consistencygroup = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), fake_consistencygroup) - cgsnapshot = objects.CGSnapshot._from_db_object( - self.context, objects.CGSnapshot(), fake_cgsnapshot) - cgsnapshot.name = 'foobar' - cgsnapshot.consistencygroup = consistencygroup - self.assertEqual({'name': 'foobar', - 'consistencygroup': consistencygroup}, - cgsnapshot.obj_get_changes()) - self.assertRaises(exception.ObjectActionError, cgsnapshot.save) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_destroy') - def test_destroy(self, cgsnapshot_destroy, utcnow_mock): - cgsnapshot_destroy.return_value = { - 'status': 'deleted', - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - cgsnapshot = objects.CGSnapshot(context=self.context, - id=fake.CGSNAPSHOT_ID) - cgsnapshot.destroy() - self.assertTrue(cgsnapshot_destroy.called) - admin_context = cgsnapshot_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(cgsnapshot.deleted) - self.assertEqual('deleted', cgsnapshot.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - cgsnapshot.deleted_at) - - @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_obj_load_attr(self, snapshotlist_get_for_cgs, - consistencygroup_get_by_id): - cgsnapshot = objects.CGSnapshot._from_db_object( - self.context, objects.CGSnapshot(), fake_cgsnapshot) - # Test consistencygroup lazy-loaded field - consistencygroup = objects.ConsistencyGroup( - context=self.context, id=fake.CONSISTENCY_GROUP_ID) - consistencygroup_get_by_id.return_value = consistencygroup - self.assertEqual(consistencygroup, cgsnapshot.consistencygroup) - consistencygroup_get_by_id.assert_called_once_with( - self.context, cgsnapshot.consistencygroup_id) - # Test snapshots lazy-loaded field - snapshots_objs = [objects.Snapshot(context=self.context, id=i) - for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, - fake.SNAPSHOT3_ID]] - snapshots = objects.SnapshotList(context=self.context, - objects=snapshots_objs) - snapshotlist_get_for_cgs.return_value = snapshots - self.assertEqual(snapshots, cgsnapshot.snapshots) - snapshotlist_get_for_cgs.assert_called_once_with( - self.context, cgsnapshot.id) - - @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get') - def test_refresh(self, cgsnapshot_get): - db_cgsnapshot1 = fake_cgsnapshot.copy() - db_cgsnapshot2 = db_cgsnapshot1.copy() - db_cgsnapshot2['description'] = 'foobar' - - # On the second cgsnapshot_get, return the CGSnapshot with an updated - # description - cgsnapshot_get.side_effect = [db_cgsnapshot1, db_cgsnapshot2] - cgsnapshot = objects.CGSnapshot.get_by_id(self.context, - fake.CGSNAPSHOT_ID) - self._compare(self, db_cgsnapshot1, cgsnapshot) - - # description was updated, so a CGSnapshot refresh should have a new - # value for that field - cgsnapshot.refresh() - self._compare(self, db_cgsnapshot2, cgsnapshot) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - cgsnapshot_get.assert_has_calls([mock.call(self.context, - fake.CGSNAPSHOT_ID), - call_bool, - mock.call(self.context, - fake.CGSNAPSHOT_ID)]) - - -class TestCGSnapshotList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.cgsnapshot_get_all', - return_value=[fake_cgsnapshot]) - def test_get_all(self, cgsnapshot_get_all): - cgsnapshots = objects.CGSnapshotList.get_all(self.context) - self.assertEqual(1, len(cgsnapshots)) - TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) - - @mock.patch('cinder.db.cgsnapshot_get_all_by_project', - return_value=[fake_cgsnapshot]) - def test_get_all_by_project(self, cgsnapshot_get_all_by_project): - cgsnapshots = objects.CGSnapshotList.get_all_by_project( - self.context, self.project_id) - self.assertEqual(1, len(cgsnapshots)) - TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) - - @mock.patch('cinder.db.cgsnapshot_get_all_by_group', - return_value=[fake_cgsnapshot]) - def test_get_all_by_group(self, cgsnapshot_get_all_by_group): - cgsnapshots = objects.CGSnapshotList.get_all_by_group( - self.context, self.project_id) - self.assertEqual(1, len(cgsnapshots)) - TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) diff --git a/cinder/tests/unit/objects/test_cleanable.py b/cinder/tests/unit/objects/test_cleanable.py deleted file mode 100644 index 7830f3902..000000000 --- a/cinder/tests/unit/objects/test_cleanable.py +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -import mock - -from cinder import context -from cinder import exception -from cinder.objects import cleanable -from cinder import rpc -from cinder import service -from cinder.tests.unit import objects as test_objects -from cinder.volume import rpcapi - - -# NOTE(geguileo): We use Backup because we have version changes from 1.0 to 1.3 - -class Backup(cleanable.CinderCleanableObject): - def __init__(self, *args, **kwargs): - super(Backup, self).__init__(*args) - for attr, value in kwargs.items(): - setattr(self, attr, value) - - @staticmethod - def _is_cleanable(status, obj_version): - if obj_version and obj_version <= 1003: - return False - return status == 'cleanable' - - -class TestCleanable(test_objects.BaseObjectsTestCase): - MOCK_WORKER = False - - def setUp(self): - super(TestCleanable, self).setUp() - self.context = context.RequestContext(self.user_id, self.project_id, - is_admin=True) - - def test_get_rpc_api(self): - """Test get_rpc_api.""" - vol_rpcapi = cleanable.CinderCleanableObject.get_rpc_api() - self.assertEqual(rpcapi.VolumeAPI, vol_rpcapi) - - def test_get_pinned_version(self): - """Test that we get the pinned version for this specific object.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.0' - version = Backup.get_pinned_version() - self.assertEqual(1003, version) - - def test_is_cleanable_pinned_pinned_too_old(self): - """Test is_cleanable with pinned version with uncleanable version.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.0' - backup = Backup(status='cleanable') - self.assertFalse(backup.is_cleanable(pinned=True)) - - def test_is_cleanable_pinned_result_true(self): - """Test with pinned version with cleanable version and status.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - backup = Backup(status='cleanable') - self.assertTrue(backup.is_cleanable(pinned=True)) - - def test_is_cleanable_pinned_result_false(self): - """Test with pinned version with cleanable version but not status.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - backup = Backup(status='not_cleanable') - self.assertFalse(backup.is_cleanable(pinned=True)) - - def test_is_cleanable_unpinned_result_false(self): - """Test unpinned version with old version and non cleanable status.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.0' - backup = Backup(status='not_cleanable') - self.assertFalse(backup.is_cleanable(pinned=False)) - - def test_is_cleanable_unpinned_result_true(self): - """Test unpinned version with old version and cleanable status.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.0' - backup = Backup(status='cleanable') - self.assertTrue(backup.is_cleanable(pinned=False)) - - @mock.patch('cinder.db.worker_create', autospec=True) - def test_create_worker(self, mock_create): - """Test worker creation as if it were from an rpc call.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - mock_create.return_value = mock.sentinel.worker - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - res = backup.create_worker() - self.assertTrue(res) - mock_create.assert_called_once_with(self.context, - status='cleanable', - resource_type='Backup', - resource_id=mock.sentinel.id) - - @mock.patch('cinder.db.worker_create', autospec=True) - def test_create_worker_pinned_too_old(self, mock_create): - """Test worker creation when we are pinnned with an old version.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.0' - mock_create.return_value = mock.sentinel.worker - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - res = backup.create_worker() - self.assertFalse(res) - self.assertFalse(mock_create.called) - - @mock.patch('cinder.db.worker_create', autospec=True) - def test_create_worker_non_cleanable(self, mock_create): - """Test worker creation when status is non cleanable.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - mock_create.return_value = mock.sentinel.worker - backup = Backup(_context=self.context, status='non_cleanable', - id=mock.sentinel.id) - res = backup.create_worker() - self.assertFalse(res) - self.assertFalse(mock_create.called) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_create', autospec=True) - def test_create_worker_already_exists(self, mock_create, mock_update): - """Test worker creation when a worker for the resource exists.""" - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - mock_create.side_effect = exception.WorkerExists(type='type', id='id') - - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - res = backup.create_worker() - self.assertTrue(res) - self.assertTrue(mock_create.called) - mock_update.assert_called_once_with( - self.context, None, - filters={'resource_type': 'Backup', - 'resource_id': mock.sentinel.id}, - service_id=None, status='cleanable') - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_create', autospec=True) - def test_create_worker_cleaning(self, mock_create, mock_update): - """Test worker creation on race condition. - - Test that we still create an entry if there is a rare race condition - that the entry gets removed from the DB between our failure to create - it and our try to update the entry. - """ - rpc.LAST_OBJ_VERSIONS[Backup.get_rpc_api().BINARY] = '1.3' - mock_create.side_effect = [ - exception.WorkerExists(type='type', id='id'), mock.sentinel.worker] - mock_update.side_effect = exception.WorkerNotFound - - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - self.assertTrue(backup.create_worker()) - self.assertEqual(2, mock_create.call_count) - self.assertTrue(mock_update.called) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_worker(self, mock_get, mock_update): - """Test set worker for a normal job received from an rpc call.""" - service.Service.service_id = mock.sentinel.service_id - mock_get.return_value.cleaning = False - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - - backup.set_worker() - mock_get.assert_called_once_with(self.context, resource_type='Backup', - resource_id=mock.sentinel.id) - worker = mock_get.return_value - mock_update.assert_called_once_with( - self.context, worker.id, - filters={'service_id': worker.service_id, - 'status': worker.status, - 'race_preventer': worker.race_preventer, - 'updated_at': worker.updated_at}, - service_id=mock.sentinel.service_id, - status=mock.sentinel.status, - orm_worker=worker) - self.assertEqual(worker, backup.worker) - - @mock.patch('cinder.db.worker_create', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_worker_direct(self, mock_get, mock_create): - """Test set worker for direct call (non rpc call).""" - mock_get.side_effect = exception.WorkerNotFound - service_id = mock.sentinel.service_id - service.Service.service_id = service_id - mock_create.return_value = mock.Mock(service_id=service_id, - status=mock.sentinel.status, - deleted=False, cleaning=False) - - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - - backup.set_worker() - mock_get.assert_called_once_with(self.context, resource_type='Backup', - resource_id=mock.sentinel.id) - mock_create.assert_called_once_with(self.context, - status=mock.sentinel.status, - resource_type='Backup', - resource_id=mock.sentinel.id, - service_id=service_id) - self.assertEqual(mock_create.return_value, backup.worker) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_worker_claim_from_another_host(self, mock_get, mock_update): - """Test set worker when the job was started on another failed host.""" - service_id = mock.sentinel.service_id - service.Service.service_id = service_id - worker = mock.Mock(service_id=mock.sentinel.service_id2, - status=mock.sentinel.status, cleaning=False, - updated_at=mock.sentinel.updated_at) - mock_get.return_value = worker - - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - - backup.set_worker() - - mock_update.assert_called_once_with( - self.context, worker.id, - filters={'service_id': mock.sentinel.service_id2, - 'status': mock.sentinel.status, - 'race_preventer': worker.race_preventer, - 'updated_at': mock.sentinel.updated_at}, - service_id=service_id, status=mock.sentinel.status, - orm_worker=worker) - self.assertEqual(worker, backup.worker) - - @mock.patch('cinder.db.worker_create', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_worker_race_condition_fail(self, mock_get, mock_create): - """Test we cannot claim a work if we lose race condition.""" - service.Service.service_id = mock.sentinel.service_id - mock_get.side_effect = exception.WorkerNotFound - mock_create.side_effect = exception.WorkerExists(type='type', id='id') - - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - - self.assertRaises(exception.CleanableInUse, backup.set_worker) - self.assertTrue(mock_get.called) - self.assertTrue(mock_create.called) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_worker_claim_fail_after_get(self, mock_get, mock_update): - """Test we don't have race condition if worker changes after get.""" - service.Service.service_id = mock.sentinel.service_id - worker = mock.Mock(service_id=mock.sentinel.service_id2, - status=mock.sentinel.status, deleted=False, - cleaning=False) - mock_get.return_value = worker - mock_update.side_effect = exception.WorkerNotFound - - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - - self.assertRaises(exception.CleanableInUse, backup.set_worker) - self.assertTrue(mock_get.called) - self.assertTrue(mock_update.called) - - @mock.patch('cinder.db.worker_destroy') - def test_unset_worker(self, destroy_mock): - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - worker = mock.Mock() - backup.worker = worker - backup.unset_worker() - destroy_mock.assert_called_once_with(self.context, id=worker.id, - status=worker.status, - service_id=worker.service_id) - self.assertIsNone(backup.worker) - - @mock.patch('cinder.db.worker_destroy') - def test_unset_worker_not_set(self, destroy_mock): - backup = Backup(_context=self.context, status=mock.sentinel.status, - id=mock.sentinel.id) - backup.unset_worker() - self.assertFalse(destroy_mock.called) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_workers_no_arguments(self, mock_get, mock_update): - """Test set workers decorator without arguments.""" - @Backup.set_workers - def my_function(arg1, arg2, kwarg1=None, kwarg2=True): - return arg1, arg2, kwarg1, kwarg2 - - # Decorator with no args must preserve the method's signature - self.assertEqual('my_function', my_function.__name__) - call_args = inspect.getcallargs( - my_function, mock.sentinel.arg1, mock.sentinel.arg2, - mock.sentinel.kwargs1, kwarg2=mock.sentinel.kwarg2) - expected = {'arg1': mock.sentinel.arg1, - 'arg2': mock.sentinel.arg2, - 'kwarg1': mock.sentinel.kwargs1, - 'kwarg2': mock.sentinel.kwarg2} - self.assertDictEqual(expected, call_args) - - service.Service.service_id = mock.sentinel.service_id - mock_get.return_value.cleaning = False - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - backup2 = Backup(_context=self.context, status='non-cleanable', - id=mock.sentinel.id2) - - res = my_function(backup, backup2) - self.assertEqual((backup, backup2, None, True), res) - - mock_get.assert_called_once_with(self.context, resource_type='Backup', - resource_id=mock.sentinel.id) - worker = mock_get.return_value - mock_update.assert_called_once_with( - self.context, worker.id, - filters={'service_id': worker.service_id, - 'status': worker.status, - 'race_preventer': worker.race_preventer, - 'updated_at': worker.updated_at}, - service_id=mock.sentinel.service_id, - status='cleanable', - orm_worker=worker) - self.assertEqual(worker, backup.worker) - - @mock.patch('cinder.db.worker_update', autospec=True) - @mock.patch('cinder.db.worker_get', autospec=True) - def test_set_workers_with_arguments(self, mock_get, mock_update): - """Test set workers decorator with an argument.""" - @Backup.set_workers('arg2', 'kwarg1') - def my_function(arg1, arg2, kwarg1=None, kwarg2=True): - return arg1, arg2, kwarg1, kwarg2 - - # Decorator with args must preserve the method's signature - self.assertEqual('my_function', my_function.__name__) - call_args = inspect.getcallargs( - my_function, mock.sentinel.arg1, mock.sentinel.arg2, - mock.sentinel.kwargs1, kwarg2=mock.sentinel.kwarg2) - expected = {'arg1': mock.sentinel.arg1, - 'arg2': mock.sentinel.arg2, - 'kwarg1': mock.sentinel.kwargs1, - 'kwarg2': mock.sentinel.kwarg2} - self.assertDictEqual(expected, call_args) - - service.Service.service_id = mock.sentinel.service_id - mock_get.return_value.cleaning = False - backup = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id) - backup2 = Backup(_context=self.context, status='non-cleanable', - id=mock.sentinel.id2) - backup3 = Backup(_context=self.context, status='cleanable', - id=mock.sentinel.id3) - - res = my_function(backup, backup2, backup3) - self.assertEqual((backup, backup2, backup3, True), res) - - mock_get.assert_called_once_with(self.context, resource_type='Backup', - resource_id=mock.sentinel.id3) - worker = mock_get.return_value - mock_update.assert_called_once_with( - self.context, worker.id, - filters={'service_id': worker.service_id, - 'status': worker.status, - 'race_preventer': worker.race_preventer, - 'updated_at': worker.updated_at}, - service_id=mock.sentinel.service_id, - status='cleanable', - orm_worker=worker) - self.assertEqual(worker, backup3.worker) diff --git a/cinder/tests/unit/objects/test_cleanup_request.py b/cinder/tests/unit/objects/test_cleanup_request.py deleted file mode 100644 index 5ec6d65c9..000000000 --- a/cinder/tests/unit/objects/test_cleanup_request.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_utils import timeutils - -from cinder import objects -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import objects as test_objects - - -class TestCleanupRequest(test_objects.BaseObjectsTestCase): - - all_fields = ('service_id', 'cluster_name', 'host', 'binary', 'service_id', - 'is_up', 'disabled', 'resource_id', 'resource_type', 'until') - - default = {'is_up': False} - - def setUp(self): - super(TestCleanupRequest, self).setUp() - - self.fields = dict(service_id=1, cluster_name='cluster_name', - host='host_name', binary='binary_name', is_up=False, - resource_id=fake.VOLUME_ID, resource_type='Volume', - until=timeutils.utcnow(with_timezone=True), - disabled=True) - - def _req_as_dict(self, req): - return {field: getattr(req, field) for field in self.all_fields} - - def _req_default(self, field): - return self.default.get(field, None) - - def test_init_all_set(self): - """Test __init__ when setting all field values.""" - req = objects.CleanupRequest(mock.sentinel.context, **self.fields) - self.assertDictEqual(self.fields, self._req_as_dict(req)) - - def test_init_default(self): - """Test __init__ when one field is missing.""" - for field in self.fields: - fields = self.fields.copy() - del fields[field] - req = objects.CleanupRequest(mock.sentinel.context, **fields) - fields[field] = self._req_default(field) - self.assertDictEqual(fields, self._req_as_dict(req)) - - def test_init_defaults(self): - """Test __init__ when only one field is set.""" - all_defaults = {field: self._req_default(field) - for field in self.all_fields} - - for field in self.fields: - fields = {field: self.fields[field]} - req = objects.CleanupRequest(mock.sentinel.context, **fields) - expected = all_defaults.copy() - expected.update(fields) - self.assertDictEqual(expected, self._req_as_dict(req)) diff --git a/cinder/tests/unit/objects/test_cluster.py b/cinder/tests/unit/objects/test_cluster.py deleted file mode 100644 index 7cef0d318..000000000 --- a/cinder/tests/unit/objects/test_cluster.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_utils import timeutils - -from cinder import objects -from cinder.tests.unit import fake_cluster -from cinder.tests.unit import objects as test_objects -from cinder import utils - - -def _get_filters_sentinel(): - return {'session': mock.sentinel.session, - 'read_deleted': mock.sentinel.read_deleted, - 'get_services': mock.sentinel.get_services, - 'services_summary': mock.sentinel.services_summary, - 'name': mock.sentinel.name, - 'binary': mock.sentinel.binary, - 'is_up': mock.sentinel.is_up, - 'disabled': mock.sentinel.disabled, - 'disabled_reason': mock.sentinel.disabled_reason, - 'race_preventer': mock.sentinel.race_preventer, - 'last_heartbeat': mock.sentinel.last_heartbeat, - 'num_hosts': mock.sentinel.num_hosts, - 'name_match_level': mock.sentinel.name_match_level, - 'num_down_hosts': mock.sentinel.num_down_hosts} - - -@ddt.ddt -class TestCluster(test_objects.BaseObjectsTestCase): - """Test Cluster Versioned Object methods.""" - cluster = fake_cluster.fake_cluster_orm() - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) - def test_get_by_id(self, cluster_get_mock): - filters = _get_filters_sentinel() - cluster = objects.Cluster.get_by_id(self.context, - mock.sentinel.cluster_id, - **filters) - self.assertIsInstance(cluster, objects.Cluster) - self._compare(self, self.cluster, cluster) - cluster_get_mock.assert_called_once_with(self.context, - mock.sentinel.cluster_id, - **filters) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_create', - return_value=cluster) - def test_create(self, cluster_create_mock): - cluster = objects.Cluster(context=self.context, name='cluster_name') - cluster.create() - self.assertEqual(self.cluster.id, cluster.id) - cluster_create_mock.assert_called_once_with(self.context, - {'name': 'cluster_name'}) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_update', - return_value=cluster) - def test_save(self, cluster_update_mock): - cluster = fake_cluster.fake_cluster_ovo(self.context) - cluster.disabled = True - cluster.save() - cluster_update_mock.assert_called_once_with(self.context, cluster.id, - {'disabled': True}) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy') - def test_destroy(self, cluster_destroy_mock): - cluster = fake_cluster.fake_cluster_ovo(self.context) - cluster.destroy() - cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) - def test_refresh(self, cluster_get_mock): - cluster = fake_cluster.fake_cluster_ovo(self.context) - cluster.refresh() - cluster_get_mock.assert_called_once_with(self.context, cluster.id) - - def test_is_up_no_last_hearbeat(self): - cluster = fake_cluster.fake_cluster_ovo(self.context, - last_heartbeat=None) - self.assertFalse(cluster.is_up) - - def test_is_up(self): - cluster = fake_cluster.fake_cluster_ovo( - self.context, - last_heartbeat=timeutils.utcnow(with_timezone=True)) - self.assertTrue(cluster.is_up) - - def test_is_up_limit(self): - limit_expired = (utils.service_expired_time(True) + - timeutils.datetime.timedelta(seconds=1)) - cluster = fake_cluster.fake_cluster_ovo(self.context, - last_heartbeat=limit_expired) - self.assertTrue(cluster.is_up) - - def test_is_up_down(self): - expired_time = (utils.service_expired_time(True) - - timeutils.datetime.timedelta(seconds=1)) - cluster = fake_cluster.fake_cluster_ovo(self.context, - last_heartbeat=expired_time) - self.assertFalse(cluster.is_up) - - @ddt.data('1.0', '1.1') - def tests_obj_make_compatible(self, version): - new_fields = {'replication_status': 'error', 'frozen': True, - 'active_backend_id': 'replication'} - cluster = objects.Cluster(self.context, **new_fields) - primitive = cluster.obj_to_primitive(version) - converted_cluster = objects.Cluster.obj_from_primitive(primitive) - for key, value in new_fields.items(): - if version == '1.0': - self.assertFalse(converted_cluster.obj_attr_is_set(key)) - else: - self.assertEqual(value, getattr(converted_cluster, key)) - - -class TestClusterList(test_objects.BaseObjectsTestCase): - """Test ClusterList Versioned Object methods.""" - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get_all') - def test_cluster_get_all(self, cluster_get_all_mock): - orm_values = [ - fake_cluster.fake_cluster_orm(), - fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'), - ] - cluster_get_all_mock.return_value = orm_values - filters = _get_filters_sentinel() - - result = objects.ClusterList.get_all(self.context, **filters) - - cluster_get_all_mock.assert_called_once_with( - self.context, filters.pop('is_up'), filters.pop('get_services'), - filters.pop('services_summary'), filters.pop('read_deleted'), - filters.pop('name_match_level'), **filters) - self.assertEqual(2, len(result)) - for i in range(len(result)): - self.assertIsInstance(result[i], objects.Cluster) - self._compare(self, orm_values[i], result[i]) diff --git a/cinder/tests/unit/objects/test_consistencygroup.py b/cinder/tests/unit/objects/test_consistencygroup.py deleted file mode 100644 index e27d66fa6..000000000 --- a/cinder/tests/unit/objects/test_consistencygroup.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2015 Yahoo Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - -fake_consistencygroup = { - 'id': fake.CONSISTENCY_GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake_host', - 'availability_zone': 'fake_az', - 'name': 'fake_name', - 'description': 'fake_description', - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'status': fields.ConsistencyGroupStatus.CREATING, - 'cgsnapshot_id': fake.CGSNAPSHOT_ID, - 'source_cgid': None, -} - -fake_cgsnapshot = { - 'id': fake.CGSNAPSHOT_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'name': 'fake_name', - 'description': 'fake_description', - 'status': 'creating', - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, -} - -fake_group = { - 'id': fake.GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake_host', - 'availability_zone': 'fake_az', - 'name': 'fake_name', - 'description': 'fake_description', - 'group_type_id': fake.GROUP_TYPE_ID, - 'status': fields.GroupStatus.CREATING, -} - - -class TestConsistencyGroup(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get', - return_value=fake_consistencygroup) - def test_get_by_id(self, consistencygroup_get): - consistencygroup = objects.ConsistencyGroup.get_by_id( - self.context, fake.CONSISTENCY_GROUP_ID) - self._compare(self, fake_consistencygroup, consistencygroup) - consistencygroup_get.assert_called_once_with( - self.context, fake.CONSISTENCY_GROUP_ID) - - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_get_by_id_no_existing_id(self, model_query): - model_query().filter_by().first.return_value = None - self.assertRaises(exception.ConsistencyGroupNotFound, - objects.ConsistencyGroup.get_by_id, self.context, - 123) - - @mock.patch('cinder.db.consistencygroup_create', - return_value=fake_consistencygroup) - def test_create(self, consistencygroup_create): - fake_cg = fake_consistencygroup.copy() - del fake_cg['id'] - consistencygroup = objects.ConsistencyGroup(context=self.context, - **fake_cg) - consistencygroup.create() - self._compare(self, fake_consistencygroup, consistencygroup) - - @mock.patch('cinder.db.group_create', - return_value=fake_group) - def test_create_from_group(self, group_create): - fake_grp = fake_group.copy() - del fake_grp['id'] - group = objects.Group(context=self.context, - **fake_grp) - group.create() - volumes_objs = [objects.Volume(context=self.context, id=i) - for i in [fake.VOLUME_ID, fake.VOLUME2_ID, - fake.VOLUME3_ID]] - volumes = objects.VolumeList(objects=volumes_objs) - group.volumes = volumes - consistencygroup = objects.ConsistencyGroup() - consistencygroup.from_group(group) - self.assertEqual(group.id, consistencygroup.id) - self.assertEqual(group.name, consistencygroup.name) - - def test_create_with_id_except_exception(self, ): - consistencygroup = objects.ConsistencyGroup( - context=self.context, **{'id': fake.CONSISTENCY_GROUP_ID}) - self.assertRaises(exception.ObjectActionError, consistencygroup.create) - - @mock.patch('cinder.db.consistencygroup_update') - def test_save(self, consistencygroup_update): - consistencygroup = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), fake_consistencygroup) - consistencygroup.status = fields.ConsistencyGroupStatus.AVAILABLE - consistencygroup.save() - consistencygroup_update.assert_called_once_with( - self.context, - consistencygroup.id, - {'status': fields.ConsistencyGroupStatus.AVAILABLE}) - - def test_save_with_cgsnapshots(self): - consistencygroup = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), fake_consistencygroup) - cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) - for i in [fake.CGSNAPSHOT_ID, fake.CGSNAPSHOT2_ID, - fake.CGSNAPSHOT3_ID]] - cgsnapshots = objects.CGSnapshotList(objects=cgsnapshots_objs) - consistencygroup.name = 'foobar' - consistencygroup.cgsnapshots = cgsnapshots - self.assertEqual({'name': 'foobar', - 'cgsnapshots': cgsnapshots}, - consistencygroup.obj_get_changes()) - self.assertRaises(exception.ObjectActionError, consistencygroup.save) - - def test_save_with_volumes(self): - consistencygroup = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), fake_consistencygroup) - volumes_objs = [objects.Volume(context=self.context, id=i) - for i in [fake.VOLUME_ID, fake.VOLUME2_ID, - fake.VOLUME3_ID]] - volumes = objects.VolumeList(objects=volumes_objs) - consistencygroup.name = 'foobar' - consistencygroup.volumes = volumes - self.assertEqual({'name': 'foobar', - 'volumes': volumes}, - consistencygroup.obj_get_changes()) - self.assertRaises(exception.ObjectActionError, consistencygroup.save) - - @mock.patch('cinder.objects.cgsnapshot.CGSnapshotList.get_all_by_group') - @mock.patch('cinder.objects.volume.VolumeList.get_all_by_group') - def test_obj_load_attr(self, mock_vol_get_all_by_group, - mock_cgsnap_get_all_by_group): - consistencygroup = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), fake_consistencygroup) - # Test cgsnapshots lazy-loaded field - cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) - for i in [fake.CGSNAPSHOT_ID, fake.CGSNAPSHOT2_ID, - fake.CGSNAPSHOT3_ID]] - cgsnapshots = objects.CGSnapshotList(context=self.context, - objects=cgsnapshots_objs) - mock_cgsnap_get_all_by_group.return_value = cgsnapshots - self.assertEqual(cgsnapshots, consistencygroup.cgsnapshots) - mock_cgsnap_get_all_by_group.assert_called_once_with( - self.context, consistencygroup.id) - - # Test volumes lazy-loaded field - volume_objs = [objects.Volume(context=self.context, id=i) - for i in [fake.VOLUME_ID, fake.VOLUME2_ID, - fake.VOLUME3_ID]] - volumes = objects.VolumeList(context=self.context, objects=volume_objs) - mock_vol_get_all_by_group.return_value = volumes - self.assertEqual(volumes, consistencygroup.volumes) - mock_vol_get_all_by_group.assert_called_once_with(self.context, - consistencygroup.id) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_destroy') - def test_destroy(self, consistencygroup_destroy, utcnow_mock): - consistencygroup_destroy.return_value = { - 'status': fields.ConsistencyGroupStatus.DELETED, - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - consistencygroup = objects.ConsistencyGroup( - context=self.context, id=fake.CONSISTENCY_GROUP_ID) - consistencygroup.destroy() - self.assertTrue(consistencygroup_destroy.called) - admin_context = consistencygroup_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(consistencygroup.deleted) - self.assertEqual(fields.ConsistencyGroupStatus.DELETED, - consistencygroup.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - consistencygroup.deleted_at) - - @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get') - def test_refresh(self, consistencygroup_get): - db_cg1 = fake_consistencygroup.copy() - db_cg2 = db_cg1.copy() - db_cg2['description'] = 'foobar' - - # On the second consistencygroup_get, return the ConsistencyGroup with - # an updated description - consistencygroup_get.side_effect = [db_cg1, db_cg2] - cg = objects.ConsistencyGroup.get_by_id(self.context, - fake.CONSISTENCY_GROUP_ID) - self._compare(self, db_cg1, cg) - - # description was updated, so a ConsistencyGroup refresh should have a - # new value for that field - cg.refresh() - self._compare(self, db_cg2, cg) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - consistencygroup_get.assert_has_calls([ - mock.call( - self.context, - fake.CONSISTENCY_GROUP_ID), - call_bool, - mock.call( - self.context, - fake.CONSISTENCY_GROUP_ID)]) - - def test_from_db_object_with_all_expected_attributes(self): - expected_attrs = ['volumes', 'cgsnapshots'] - db_volumes = [fake_volume.fake_db_volume(admin_metadata={}, - volume_metadata={})] - db_cgsnaps = [fake_cgsnapshot.copy()] - db_cg = fake_consistencygroup.copy() - db_cg['volumes'] = db_volumes - db_cg['cgsnapshots'] = db_cgsnaps - cg = objects.ConsistencyGroup._from_db_object( - self.context, objects.ConsistencyGroup(), db_cg, expected_attrs) - self.assertEqual(len(db_volumes), len(cg.volumes)) - self._compare(self, db_volumes[0], cg.volumes[0]) - self.assertEqual(len(db_cgsnaps), len(cg.cgsnapshots)) - self._compare(self, db_cgsnaps[0], cg.cgsnapshots[0]) - - -class TestConsistencyGroupList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.consistencygroup_get_all', - return_value=[fake_consistencygroup]) - def test_get_all(self, consistencygroup_get_all): - consistencygroups = objects.ConsistencyGroupList.get_all(self.context) - self.assertEqual(1, len(consistencygroups)) - TestConsistencyGroup._compare(self, fake_consistencygroup, - consistencygroups[0]) - - @mock.patch('cinder.db.consistencygroup_get_all_by_project', - return_value=[fake_consistencygroup]) - def test_get_all_by_project(self, consistencygroup_get_all_by_project): - consistencygroups = objects.ConsistencyGroupList.get_all_by_project( - self.context, self.project_id) - self.assertEqual(1, len(consistencygroups)) - TestConsistencyGroup._compare(self, fake_consistencygroup, - consistencygroups[0]) - - @mock.patch('cinder.db.consistencygroup_get_all', - return_value=[fake_consistencygroup]) - def test_get_all_with_pagination(self, consistencygroup_get_all): - consistencygroups = objects.ConsistencyGroupList.get_all( - self.context, filters={'id': 'fake'}, marker=None, limit=1, - offset=None, sort_keys='id', sort_dirs='asc') - self.assertEqual(1, len(consistencygroups)) - consistencygroup_get_all.assert_called_once_with( - self.context, filters={'id': 'fake'}, marker=None, limit=1, - offset=None, sort_keys='id', sort_dirs='asc') - TestConsistencyGroup._compare(self, fake_consistencygroup, - consistencygroups[0]) - - @mock.patch('cinder.db.consistencygroup_get_all_by_project', - return_value=[fake_consistencygroup]) - def test_get_all_by_project_with_pagination( - self, consistencygroup_get_all_by_project): - consistencygroups = objects.ConsistencyGroupList.get_all_by_project( - self.context, self.project_id, filters={'id': 'fake'}, marker=None, - limit=1, offset=None, sort_keys='id', sort_dirs='asc') - self.assertEqual(1, len(consistencygroups)) - consistencygroup_get_all_by_project.assert_called_once_with( - self.context, self.project_id, filters={'id': 'fake'}, marker=None, - limit=1, offset=None, sort_keys='id', sort_dirs='asc') - TestConsistencyGroup._compare(self, fake_consistencygroup, - consistencygroups[0]) - - @mock.patch('cinder.db.consistencygroup_include_in_cluster') - def test_include_in_cluster(self, include_mock): - filters = {'host': mock.sentinel.host, - 'cluster_name': mock.sentinel.cluster_name} - cluster = 'new_cluster' - objects.ConsistencyGroupList.include_in_cluster(self.context, cluster, - **filters) - include_mock.assert_called_once_with(self.context, cluster, True, - **filters) - - @mock.patch('cinder.db.consistencygroup_include_in_cluster') - def test_include_in_cluster_specify_partial(self, include_mock): - filters = {'host': mock.sentinel.host, - 'cluster_name': mock.sentinel.cluster_name} - cluster = 'new_cluster' - objects.ConsistencyGroupList.include_in_cluster( - self.context, cluster, mock.sentinel.partial_rename, **filters) - include_mock.assert_called_once_with( - self.context, cluster, mock.sentinel.partial_rename, **filters) diff --git a/cinder/tests/unit/objects/test_fields.py b/cinder/tests/unit/objects/test_fields.py deleted file mode 100644 index 21ba3c9f5..000000000 --- a/cinder/tests/unit/objects/test_fields.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.objects import fields -from cinder import test - - -class FakeFieldType(fields.FieldType): - def coerce(self, obj, attr, value): - return '*%s*' % value - - def to_primitive(self, obj, attr, value): - return '!%s!' % value - - def from_primitive(self, obj, attr, value): - return value[1:-1] - - -class TestField(test.TestCase): - def setUp(self): - super(TestField, self).setUp() - self.field = fields.Field(FakeFieldType()) - self.coerce_good_values = [('foo', '*foo*')] - self.coerce_bad_values = [] - self.to_primitive_values = [('foo', '!foo!')] - self.from_primitive_values = [('!foo!', 'foo')] - - def test_coerce_good_values(self): - for in_val, out_val in self.coerce_good_values: - self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) - - def test_coerce_bad_values(self): - for in_val in self.coerce_bad_values: - self.assertRaises((TypeError, ValueError), - self.field.coerce, 'obj', 'attr', in_val) - - def test_to_primitive(self): - for in_val, prim_val in self.to_primitive_values: - self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', - in_val)) - - def test_from_primitive(self): - class ObjectLikeThing(object): - _context = 'context' - - for prim_val, out_val in self.from_primitive_values: - self.assertEqual(out_val, self.field.from_primitive( - ObjectLikeThing, 'attr', prim_val)) - - def test_stringify(self): - self.assertEqual('123', self.field.stringify(123)) - - -class TestBackupStatus(TestField): - def setUp(self): - super(TestBackupStatus, self).setUp() - self.field = fields.BackupStatusField() - self.coerce_good_values = [('error', fields.BackupStatus.ERROR), - ('error_deleting', - fields.BackupStatus.ERROR_DELETING), - ('creating', fields.BackupStatus.CREATING), - ('available', - fields.BackupStatus.AVAILABLE), - ('deleting', fields.BackupStatus.DELETING), - ('deleted', fields.BackupStatus.DELETED), - ('restoring', - fields.BackupStatus.RESTORING)] - self.coerce_bad_values = ['acme'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'error'", self.field.stringify('error')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'not_a_status') - - -class TestConsistencyGroupStatus(TestField): - def setUp(self): - super(TestConsistencyGroupStatus, self).setUp() - self.field = fields.ConsistencyGroupStatusField() - self.coerce_good_values = [ - ('error', fields.ConsistencyGroupStatus.ERROR), - ('available', fields.ConsistencyGroupStatus.AVAILABLE), - ('creating', fields.ConsistencyGroupStatus.CREATING), - ('deleting', fields.ConsistencyGroupStatus.DELETING), - ('deleted', fields.ConsistencyGroupStatus.DELETED), - ('updating', fields.ConsistencyGroupStatus.UPDATING), - ('error_deleting', fields.ConsistencyGroupStatus.ERROR_DELETING)] - self.coerce_bad_values = ['acme'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'error'", self.field.stringify('error')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'not_a_status') - - -class TestSnapshotStatus(TestField): - def setUp(self): - super(TestSnapshotStatus, self).setUp() - self.field = fields.SnapshotStatusField() - self.coerce_good_values = [ - ('error', fields.SnapshotStatus.ERROR), - ('available', fields.SnapshotStatus.AVAILABLE), - ('creating', fields.SnapshotStatus.CREATING), - ('deleting', fields.SnapshotStatus.DELETING), - ('deleted', fields.SnapshotStatus.DELETED), - ('updating', fields.SnapshotStatus.UPDATING), - ('error_deleting', fields.SnapshotStatus.ERROR_DELETING)] - self.coerce_bad_values = ['acme'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'error'", self.field.stringify('error')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'not_a_status') - - -class TestVolumeAttachStatus(TestField): - def setUp(self): - super(TestVolumeAttachStatus, self).setUp() - self.field = fields.VolumeAttachStatusField() - self.coerce_good_values = [('attaching', - fields.VolumeAttachStatus.ATTACHING), - ('attached', - fields.VolumeAttachStatus.ATTACHED), - ('detached', - fields.VolumeAttachStatus.DETACHED), - ('error_attaching', - fields.VolumeAttachStatus.ERROR_ATTACHING), - ('error_detaching', - fields.VolumeAttachStatus.ERROR_DETACHING)] - self.coerce_bad_values = ['acme'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'attaching'", self.field.stringify('attaching')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'not_a_status') diff --git a/cinder/tests/unit/objects/test_group.py b/cinder/tests/unit/objects/test_group.py deleted file mode 100644 index 6aed446c7..000000000 --- a/cinder/tests/unit/objects/test_group.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import six - -from cinder import exception -from cinder import objects -from cinder.objects import base as ovo_base -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - -fake_group = { - 'id': fake.GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake_host', - 'availability_zone': 'fake_az', - 'name': 'fake_name', - 'description': 'fake_description', - 'group_type_id': fake.GROUP_TYPE_ID, - 'status': fields.GroupStatus.CREATING, -} - - -@ddt.ddt -class TestGroup(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.group_get', - return_value=fake_group) - def test_get_by_id(self, group_get): - group = objects.Group.get_by_id( - self.context, fake.GROUP_ID) - self._compare(self, fake_group, group) - group_get.assert_called_once_with( - self.context, fake.GROUP_ID) - - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_get_by_id_no_existing_id(self, model_query): - model_query().filter_by().first.return_value = None - self.assertRaises(exception.GroupNotFound, - objects.Group.get_by_id, self.context, - 123) - - @mock.patch('cinder.db.group_create', - return_value=fake_group) - def test_create(self, group_create): - fake_grp = fake_group.copy() - del fake_grp['id'] - group = objects.Group(context=self.context, - **fake_grp) - group.create() - self._compare(self, fake_group, group) - - def test_create_with_id_except_exception(self, ): - group = objects.Group( - context=self.context, **{'id': fake.GROUP_ID}) - self.assertRaises(exception.ObjectActionError, group.create) - - @mock.patch('cinder.db.group_update') - def test_save(self, group_update): - group = objects.Group._from_db_object( - self.context, objects.Group(), fake_group) - group.status = fields.GroupStatus.AVAILABLE - group.save() - group_update.assert_called_once_with( - self.context, - group.id, - {'status': fields.GroupStatus.AVAILABLE}) - - def test_save_with_volumes(self): - group = objects.Group._from_db_object( - self.context, objects.Group(), fake_group) - volumes_objs = [objects.Volume(context=self.context, id=i) - for i in [fake.VOLUME_ID, fake.VOLUME2_ID, - fake.VOLUME3_ID]] - volumes = objects.VolumeList(objects=volumes_objs) - group.name = 'foobar' - group.volumes = volumes - self.assertEqual({'name': 'foobar', - 'volumes': volumes}, - group.obj_get_changes()) - self.assertRaises(exception.ObjectActionError, group.save) - - @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') - @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') - def test_obj_load_attr(self, mock_vol_get_all_by_group, - mock_vol_type_get_all_by_group): - group = objects.Group._from_db_object( - self.context, objects.Group(), fake_group) - - # Test volumes lazy-loaded field - volume_objs = [objects.Volume(context=self.context, id=i) - for i in [fake.VOLUME_ID, fake.VOLUME2_ID, - fake.VOLUME3_ID]] - volumes = objects.VolumeList(context=self.context, objects=volume_objs) - mock_vol_get_all_by_group.return_value = volumes - self.assertEqual(volumes, group.volumes) - mock_vol_get_all_by_group.assert_called_once_with(self.context, - group.id) - - @mock.patch('cinder.db.group_destroy') - def test_destroy(self, group_destroy): - group = objects.Group( - context=self.context, id=fake.GROUP_ID) - group.destroy() - self.assertTrue(group_destroy.called) - admin_context = group_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - @mock.patch('cinder.db.sqlalchemy.api.group_get') - def test_refresh(self, group_get): - db_group1 = fake_group.copy() - db_group2 = db_group1.copy() - db_group2['description'] = 'foobar' - - # On the second group_get, return the Group with - # an updated description - group_get.side_effect = [db_group1, db_group2] - group = objects.Group.get_by_id(self.context, - fake.GROUP_ID) - self._compare(self, db_group1, group) - - # description was updated, so a Group refresh should have a - # new value for that field - group.refresh() - self._compare(self, db_group2, group) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - group_get.assert_has_calls([ - mock.call( - self.context, - fake.GROUP_ID), - call_bool, - mock.call( - self.context, - fake.GROUP_ID)]) - - def test_from_db_object_with_all_expected_attributes(self): - expected_attrs = ['volumes'] - db_volumes = [fake_volume.fake_db_volume(admin_metadata={}, - volume_metadata={})] - db_group = fake_group.copy() - db_group['volumes'] = db_volumes - group = objects.Group._from_db_object( - self.context, objects.Group(), db_group, expected_attrs) - self.assertEqual(len(db_volumes), len(group.volumes)) - self._compare(self, db_volumes[0], group.volumes[0]) - - @ddt.data('1.10', '1.11') - def test_obj_make_compatible(self, version): - extra_data = {'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, - 'source_group_id': fake.GROUP_ID, - 'group_snapshots': objects.GroupSnapshotList()} - group = objects.Group(self.context, name='name', **extra_data) - - serializer = ovo_base.CinderObjectSerializer(version) - primitive = serializer.serialize_entity(self.context, group) - - converted_group = objects.Group.obj_from_primitive(primitive) - is_set = version == '1.11' - for key in extra_data: - self.assertEqual(is_set, converted_group.obj_attr_is_set(key)) - self.assertEqual('name', converted_group.name) - - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_is_replicated_true(self, mock_get_specs): - mock_get_specs.return_value = ' True' - group = objects.Group(self.context, group_type_id=fake.GROUP_TYPE_ID) - # NOTE(xyang): Changed the following from self.assertTrue( - # group.is_replicated) to self.assertEqual(True, group.is_replicated) - # to address a review comment. This way this test will still pass - # even if is_replicated is a method and not a property. - self.assertTrue(True, group.is_replicated) - - @ddt.data(' False', None, 'notASpecValueWeCareAbout') - def test_is_replicated_false(self, spec_value): - with mock.patch('cinder.volume.group_types' - '.get_group_type_specs') as mock_get_specs: - mock_get_specs.return_value = spec_value - group = objects.Group(self.context, - group_type_id=fake.GROUP_TYPE_ID) - # NOTE(xyang): Changed the following from self.assertFalse( - # group.is_replicated) to self.assertEqual(False, - # group.is_replicated) to address a review comment. This way this - # test will still pass even if is_replicated is a method and not - # a property. - self.assertEqual(False, group.is_replicated) - - -@ddt.ddt -class TestGroupList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.group_get_all', - return_value=[fake_group]) - def test_get_all(self, group_get_all): - groups = objects.GroupList.get_all(self.context) - self.assertEqual(1, len(groups)) - TestGroup._compare(self, fake_group, - groups[0]) - - @mock.patch('cinder.db.group_get_all_by_project', - return_value=[fake_group]) - def test_get_all_by_project(self, group_get_all_by_project): - groups = objects.GroupList.get_all_by_project( - self.context, self.project_id) - self.assertEqual(1, len(groups)) - TestGroup._compare(self, fake_group, - groups[0]) - - @mock.patch('cinder.db.group_get_all', - return_value=[fake_group]) - def test_get_all_with_pagination(self, group_get_all): - groups = objects.GroupList.get_all( - self.context, filters={'id': 'fake'}, marker=None, limit=1, - offset=None, sort_keys='id', sort_dirs='asc') - self.assertEqual(1, len(groups)) - group_get_all.assert_called_once_with( - self.context, filters={'id': 'fake'}, marker=None, limit=1, - offset=None, sort_keys='id', sort_dirs='asc') - TestGroup._compare(self, fake_group, - groups[0]) - - @mock.patch('cinder.db.group_get_all_by_project', - return_value=[fake_group]) - def test_get_all_by_project_with_pagination( - self, group_get_all_by_project): - groups = objects.GroupList.get_all_by_project( - self.context, self.project_id, filters={'id': 'fake'}, marker=None, - limit=1, offset=None, sort_keys='id', sort_dirs='asc') - self.assertEqual(1, len(groups)) - group_get_all_by_project.assert_called_once_with( - self.context, self.project_id, filters={'id': 'fake'}, marker=None, - limit=1, offset=None, sort_keys='id', sort_dirs='asc') - TestGroup._compare(self, fake_group, - groups[0]) - - @ddt.data({'cluster_name': 'fake_cluster'}, {'host': 'fake_host'}) - @mock.patch('cinder.volume.group_types.get_group_type_specs') - @mock.patch('cinder.db.group_get_all') - def test_get_all_replicated(self, filters, mock_get_groups, - mock_get_specs): - mock_get_specs.return_value = ' True' - fake_group2 = fake_group.copy() - fake_group2['id'] = fake.GROUP2_ID - fake_group2['cluster_name'] = 'fake_cluster' - if filters.get('cluster_name'): - mock_get_groups.return_value = [fake_group2] - else: - mock_get_groups.return_value = [fake_group] - res = objects.GroupList.get_all_replicated(self.context, - filters=filters) - self.assertEqual(1, len(res)) - if filters.get('cluster_name'): - self.assertEqual(fake.GROUP2_ID, res[0].id) - self.assertEqual('fake_cluster', res[0].cluster_name) - else: - self.assertEqual(fake.GROUP_ID, res[0].id) - self.assertIsNone(res[0].cluster_name) diff --git a/cinder/tests/unit/objects/test_group_snapshot.py b/cinder/tests/unit/objects/test_group_snapshot.py deleted file mode 100644 index b51924f5a..000000000 --- a/cinder/tests/unit/objects/test_group_snapshot.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import objects as test_objects -from cinder.tests.unit.objects.test_group import fake_group - -fake_group_snapshot = { - 'id': fake.GROUP_SNAPSHOT_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'name': 'fake_name', - 'description': 'fake_description', - 'status': fields.GroupSnapshotStatus.CREATING, - 'group_id': fake.GROUP_ID, -} - - -class TestGroupSnapshot(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get', - return_value=fake_group_snapshot) - def test_get_by_id(self, group_snapshot_get): - group_snapshot = objects.GroupSnapshot.get_by_id( - self.context, - fake.GROUP_SNAPSHOT_ID) - self._compare(self, fake_group_snapshot, group_snapshot) - - @mock.patch('cinder.db.group_snapshot_create', - return_value=fake_group_snapshot) - def test_create(self, group_snapshot_create): - fake_group_snap = fake_group_snapshot.copy() - del fake_group_snap['id'] - group_snapshot = objects.GroupSnapshot(context=self.context, - **fake_group_snap) - group_snapshot.create() - self._compare(self, fake_group_snapshot, group_snapshot) - - def test_create_with_id_except_exception(self): - group_snapshot = objects.GroupSnapshot( - context=self.context, - **{'id': fake.GROUP_ID}) - self.assertRaises(exception.ObjectActionError, group_snapshot.create) - - @mock.patch('cinder.db.group_snapshot_update') - def test_save(self, group_snapshot_update): - group_snapshot = objects.GroupSnapshot._from_db_object( - self.context, objects.GroupSnapshot(), fake_group_snapshot) - group_snapshot.status = 'active' - group_snapshot.save() - group_snapshot_update.assert_called_once_with(self.context, - group_snapshot.id, - {'status': 'active'}) - - @mock.patch('cinder.db.group_update', - return_value=fake_group) - @mock.patch('cinder.db.group_snapshot_update') - def test_save_with_group(self, group_snapshot_update, - group_snapshot_cg_update): - group = objects.Group._from_db_object( - self.context, objects.Group(), fake_group) - group_snapshot = objects.GroupSnapshot._from_db_object( - self.context, objects.GroupSnapshot(), fake_group_snapshot) - group_snapshot.name = 'foobar' - group_snapshot.group = group - self.assertEqual({'name': 'foobar', - 'group': group}, - group_snapshot.obj_get_changes()) - self.assertRaises(exception.ObjectActionError, group_snapshot.save) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_destroy') - def test_destroy(self, group_snapshot_destroy, utcnow_mock): - group_snapshot_destroy.return_value = { - 'status': fields.GroupSnapshotStatus.DELETED, - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - group_snapshot = objects.GroupSnapshot(context=self.context, - id=fake.GROUP_SNAPSHOT_ID) - group_snapshot.destroy() - self.assertTrue(group_snapshot_destroy.called) - admin_context = group_snapshot_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(group_snapshot.deleted) - self.assertEqual(fields.GroupSnapshotStatus.DELETED, - group_snapshot.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - group_snapshot.deleted_at) - - @mock.patch('cinder.objects.group.Group.get_by_id') - @mock.patch( - 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') - def test_obj_load_attr(self, snapshotlist_get_for_cgs, - group_get_by_id): - group_snapshot = objects.GroupSnapshot._from_db_object( - self.context, objects.GroupSnapshot(), fake_group_snapshot) - # Test group lazy-loaded field - group = objects.Group( - context=self.context, id=fake.GROUP_ID) - group_get_by_id.return_value = group - self.assertEqual(group, group_snapshot.group) - group_get_by_id.assert_called_once_with( - self.context, group_snapshot.group_id) - # Test snapshots lazy-loaded field - snapshots_objs = [objects.Snapshot(context=self.context, id=i) - for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, - fake.SNAPSHOT3_ID]] - snapshots = objects.SnapshotList(context=self.context, - objects=snapshots_objs) - snapshotlist_get_for_cgs.return_value = snapshots - self.assertEqual(snapshots, group_snapshot.snapshots) - snapshotlist_get_for_cgs.assert_called_once_with( - self.context, group_snapshot.id) - - @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get') - def test_refresh(self, group_snapshot_get): - db_group_snapshot1 = fake_group_snapshot.copy() - db_group_snapshot2 = db_group_snapshot1.copy() - db_group_snapshot2['description'] = 'foobar' - - # On the second group_snapshot_get, return the GroupSnapshot with an - # updated description - group_snapshot_get.side_effect = [db_group_snapshot1, - db_group_snapshot2] - group_snapshot = objects.GroupSnapshot.get_by_id( - self.context, fake.GROUP_SNAPSHOT_ID) - self._compare(self, db_group_snapshot1, group_snapshot) - - # description was updated, so a GroupSnapshot refresh should have a new - # value for that field - group_snapshot.refresh() - self._compare(self, db_group_snapshot2, group_snapshot) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - group_snapshot_get.assert_has_calls( - [mock.call(self.context, - fake.GROUP_SNAPSHOT_ID), - call_bool, - mock.call(self.context, - fake.GROUP_SNAPSHOT_ID)]) - - -class TestGroupSnapshotList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.group_snapshot_get_all', - return_value=[fake_group_snapshot]) - def test_get_all(self, group_snapshot_get_all): - group_snapshots = objects.GroupSnapshotList.get_all(self.context) - self.assertEqual(1, len(group_snapshots)) - TestGroupSnapshot._compare(self, fake_group_snapshot, - group_snapshots[0]) - - @mock.patch('cinder.db.group_snapshot_get_all_by_project', - return_value=[fake_group_snapshot]) - def test_get_all_by_project(self, group_snapshot_get_all_by_project): - group_snapshots = objects.GroupSnapshotList.get_all_by_project( - self.context, self.project_id) - self.assertEqual(1, len(group_snapshots)) - TestGroupSnapshot._compare(self, fake_group_snapshot, - group_snapshots[0]) - - @mock.patch('cinder.db.group_snapshot_get_all_by_group', - return_value=[fake_group_snapshot]) - def test_get_all_by_group(self, group_snapshot_get_all_by_group): - group_snapshots = objects.GroupSnapshotList.get_all_by_group( - self.context, self.project_id) - self.assertEqual(1, len(group_snapshots)) - TestGroupSnapshot._compare(self, fake_group_snapshot, - group_snapshots[0]) diff --git a/cinder/tests/unit/objects/test_group_type.py b/cinder/tests/unit/objects/test_group_type.py deleted file mode 100644 index 71c1877e9..000000000 --- a/cinder/tests/unit/objects/test_group_type.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from cinder import objects -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_group -from cinder.tests.unit import objects as test_objects - - -class TestGroupType(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') - def test_get_by_id(self, group_type_get): - db_group_type = fake_group.fake_db_group_type() - group_type_get.return_value = db_group_type - group_type = objects.GroupType.get_by_id(self.context, - fake.GROUP_TYPE_ID) - self._compare(self, db_group_type, group_type) - - @mock.patch('cinder.volume.group_types.create') - def test_create(self, group_type_create): - db_group_type = fake_group.fake_db_group_type() - group_type_create.return_value = db_group_type - - group_type = objects.GroupType(context=self.context) - group_type.name = db_group_type['name'] - group_type.group_specs = db_group_type['group_specs'] - group_type.is_public = db_group_type['is_public'] - group_type.projects = db_group_type['projects'] - group_type.description = db_group_type['description'] - group_type.create() - - group_type_create.assert_called_once_with( - self.context, db_group_type['name'], - db_group_type['group_specs'], db_group_type['is_public'], - db_group_type['projects'], db_group_type['description']) - - @mock.patch('cinder.volume.group_types.update') - def test_save(self, group_type_update): - db_group_type = fake_group.fake_db_group_type() - group_type = objects.GroupType._from_db_object(self.context, - objects.GroupType(), - db_group_type) - group_type.description = 'foobar' - group_type.save() - group_type_update.assert_called_once_with(self.context, - group_type.id, - group_type.name, - group_type.description) - - @mock.patch('cinder.volume.group_types.destroy') - def test_destroy(self, group_type_destroy): - db_group_type = fake_group.fake_db_group_type() - group_type = objects.GroupType._from_db_object(self.context, - objects.GroupType(), - db_group_type) - group_type.destroy() - self.assertTrue(group_type_destroy.called) - admin_context = group_type_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - - @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') - def test_refresh(self, group_type_get): - db_type1 = fake_group.fake_db_group_type() - db_type2 = db_type1.copy() - db_type2['description'] = 'foobar' - - # updated description - group_type_get.side_effect = [db_type1, db_type2] - group_type = objects.GroupType.get_by_id(self.context, - fake.GROUP_TYPE_ID) - self._compare(self, db_type1, group_type) - - # description was updated, so a group type refresh should have a new - # value for that field - group_type.refresh() - self._compare(self, db_type2, group_type) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - group_type_get.assert_has_calls([mock.call(self.context, - fake.GROUP_TYPE_ID), - call_bool, - mock.call(self.context, - fake.GROUP_TYPE_ID)]) - - -class TestGroupTypeList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.volume.group_types.get_all_group_types') - def test_get_all(self, get_all_types): - db_group_type = fake_group.fake_db_group_type() - get_all_types.return_value = {db_group_type['name']: db_group_type} - - group_types = objects.GroupTypeList.get_all(self.context) - self.assertEqual(1, len(group_types)) - TestGroupType._compare(self, db_group_type, group_types[0]) - - @mock.patch('cinder.volume.group_types.get_all_group_types') - def test_get_all_with_pagination(self, get_all_types): - db_group_type = fake_group.fake_db_group_type() - get_all_types.return_value = {db_group_type['name']: db_group_type} - - group_types = objects.GroupTypeList.get_all(self.context, - filters={'is_public': - True}, - marker=None, - limit=1, - sort_keys='id', - sort_dirs='desc', - offset=None) - self.assertEqual(1, len(group_types)) - TestGroupType._compare(self, db_group_type, group_types[0]) diff --git a/cinder/tests/unit/objects/test_manageable_volumes_snapshots.py b/cinder/tests/unit/objects/test_manageable_volumes_snapshots.py deleted file mode 100644 index a2b45ad3b..000000000 --- a/cinder/tests/unit/objects/test_manageable_volumes_snapshots.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2016 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -from cinder import objects -from cinder.tests.unit import objects as test_objects - - -@ddt.ddt -class TestManageableResources(test_objects.BaseObjectsTestCase): - - def resource_test(self, resource, resource_type): - if resource_type == "manageable_volume_obj": - resource.manageable_volume_obj.wrong_key - elif resource_type == "manageable_snapshot_obj": - resource.manageable_snapshot_obj.wrong_key - - def setUp(self): - super(TestManageableResources, self).setUp() - self.manageable_volume_dict = [ - {'cinder_id': - 'e334aab4-c987-4eb0-9c81-d4a773b4f7a6', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': - {'source-name': - 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, - 'safe_to_manage': False, - 'size': 1, - 'foo': 'bar'}, - {'cinder_id': - 'da25ac53-3fe0-4f56-9369-4d289d8902fd', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': - {'source-name': - 'volume-da25ac53-3fe0-4f56-9369-4d289d8902fd'}, - 'safe_to_manage': False, - 'size': 2} - ] - - self.manageable_snapshot_dict = [ - {'cinder_id': - 'e334aab4-c987-4eb0-9c81-d4a773b4f7a6', - 'reference': - {'source-name': - 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'source_reference': - {'source-name': - 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, - 'safe_to_manage': False, - 'size': 1, - 'foo': 'bar'}, - {'cinder_id': - 'da25ac53-3fe0-4f56-9369-4d289d8902fd', - 'reference': - {'source-name': - 'volume-da25ac53-3fe0-4f56-9369-4d289d8902fd'}, - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'source_reference': - {'source-name': - 'da25ac53-3fe0-4f56-9369-4d289d8902fd'}, - 'safe_to_manage': False, - 'size': 2} - ] - - vol_mang_list = (objects.ManageableVolumeList.from_primitives - (self.context, self.manageable_volume_dict)) - self.manageable_volume_obj_list = vol_mang_list - - snap_mang_list = (objects.ManageableSnapshotList.from_primitives - (self.context, self.manageable_snapshot_dict)) - self.manageable_snapshot_obj_list = snap_mang_list - - self.manageable_volume_obj = self.manageable_volume_obj_list[0] - self.manageable_snapshot_obj = self.manageable_snapshot_obj_list[0] - - @ddt.data('manageable_volume_obj', 'manageable_snapshot_obj') - def test_extra_info(self, obj): - # Making sure that any new key assignment gets stored in extra_info - # field of manageable_volume_object & manageable_snapshot_object - self.assertEqual( - 'bar', - getattr(self, obj).extra_info['foo']) - - @ddt.data('manageable_volume_obj', 'manageable_snapshot_obj') - def test_extra_info_wrong_key(self, obj): - # Making sure referring an attribute before setting it raises an - # Attribute Error for manageable_volume_object & - # manageable_snapshot_object - getattr(self, obj).foo = "test" - self.assertRaises(AttributeError, self.resource_test, self, obj) diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py deleted file mode 100644 index 03efd416d..000000000 --- a/cinder/tests/unit/objects/test_objects.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_versionedobjects import fixture - -from cinder import db -from cinder import objects -from cinder.objects import base -from cinder import test - - -# NOTE: The hashes in this list should only be changed if they come with a -# corresponding version bump in the affected objects. -object_data = { - 'Backup': '1.4-c50f7a68bb4c400dd53dd219685b3992', - 'BackupDeviceInfo': '1.0-74b3950676c690538f4bc6796bd0042e', - 'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992', - 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28', - 'Cluster': '1.1-e2c533eb8cdd8d229b6c45c6cf3a9e2c', - 'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'CGSnapshot': '1.1-3212ac2b4c2811b7134fb9ba2c49ff74', - 'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'ConsistencyGroup': '1.4-7bf01a79b82516639fc03cd3ab6d9c01', - 'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'LogLevel': '1.0-7a8200b6b5063b33ec7b569dc6be66d2', - 'LogLevelList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'ManageableSnapshot': '1.0-5be933366eb17d12db0115c597158d0d', - 'ManageableSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'ManageableVolume': '1.0-5fd0152237ec9dfb7b5c7095b8b09ffa', - 'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', - 'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733', - 'Service': '1.4-a6727ccda6d4043f5e38e75c7c518c7f', - 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201', - 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Volume': '1.6-7d3bc8577839d5725670d55e480fe95f', - 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'VolumeAttachment': '1.2-b68b357a1756582b706006ea9de40c9a', - 'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'VolumeProperties': '1.1-cadac86b2bdc11eb79d1dcea988ff9e8', - 'VolumeType': '1.3-a5d8c3473db9bc3bbcdbab9313acf4d1', - 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30', - 'GroupTypeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Group': '1.2-2ade6acf2e55687b980048fc3f51dad9', - 'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d', - 'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', -} - - -class TestObjectVersions(test.TestCase): - - def test_versions(self): - checker = fixture.ObjectVersionChecker( - base.CinderObjectRegistry.obj_classes()) - expected, actual = checker.test_hashes(object_data) - self.assertEqual(expected, actual, - "Some objects have changed; please make sure the " - "versions have been bumped and backporting " - "compatibility code has been added to " - "obj_make_compatible if necessary, and then update " - "their hashes in the object_data map in this test " - "module. If we don't need to add backporting code " - "then it means we also don't need the version bump " - "and we just have to change the hash in this module.") - - def test_versions_history(self): - classes = base.CinderObjectRegistry.obj_classes() - versions = base.OBJ_VERSIONS.get_current_versions() - expected = {} - actual = {} - for name, cls in classes.items(): - if name not in versions: - expected[name] = cls[0].VERSION - elif cls[0].VERSION != versions[name]: - expected[name] = cls[0].VERSION - actual[name] = versions[name] - - self.assertEqual(expected, actual, - 'Some objects versions have changed; please make ' - 'sure a new objects history version was added in ' - 'cinder.objects.base.OBJ_VERSIONS.') - - def test_object_nullable_match_db(self): - # This test is to keep nullable of every field in corresponding - # db model and object match. - def _check_table_matched(db_model, cls): - for column in db_model.__table__.columns: - # NOTE(xyang): Skip the comparison of the colume name - # group_type_id in table Group because group_type_id - # is in the object Group but it is stored in a different - # table in the database, not in the Group table. - if (column.name in cls.fields and - (column.name != 'group_type_id' and name != 'Group')): - self.assertEqual( - column.nullable, - cls.fields[column.name].nullable, - 'Column %(c)s in table %(t)s not match.' - % {'c': column.name, - 't': name}) - - classes = base.CinderObjectRegistry.obj_classes() - for name, cls in classes.items(): - if issubclass(cls[0], base.CinderPersistentObject): - db_model = db.get_model_for_versioned_object(cls[0]) - _check_table_matched(db_model, cls[0]) - - def test_obj_make_compatible(self): - # Go through all of the object classes and run obj_to_primitive() with - # a target version of all previous minor versions. It doesn't test - # the converted data, but at least ensures the method doesn't blow - # up on something simple. - init_args = {} - init_kwargs = {objects.Snapshot: {'context': 'ctxt'}} - checker = fixture.ObjectVersionChecker( - base.CinderObjectRegistry.obj_classes()) - checker.test_compatibility_routines(init_args=init_args, - init_kwargs=init_kwargs) diff --git a/cinder/tests/unit/objects/test_qos.py b/cinder/tests/unit/objects/test_qos.py deleted file mode 100644 index 8f900d24d..000000000 --- a/cinder/tests/unit/objects/test_qos.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import timeutils -import pytz - -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import objects -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import objects as test_objects - -fake_qos = {'consumer': 'front-end', - 'id': fake.OBJECT_ID, - 'name': 'qos_name', - 'specs': {'key1': 'val1', 'key2': 'val2'}} - -fake_qos_no_id = fake_qos.copy() -del fake_qos_no_id['id'] - - -class TestQos(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.get_by_id', return_value=fake_qos) - def test_get_by_id(self, qos_get): - qos_object = objects.QualityOfServiceSpecs.get_by_id( - self.context, fake.OBJECT_ID) - self._compare(self, fake_qos, qos_object) - qos_get.assert_called_once_with( - self.context, models.QualityOfServiceSpecs, fake.OBJECT_ID) - - @mock.patch('cinder.db.qos_specs_create', - return_value={'name': 'qos_name', 'id': fake.OBJECT_ID}) - def test_create(self, qos_fake_create): - qos_object = objects.QualityOfServiceSpecs( - self.context, **fake_qos_no_id) - qos_object.create() - self._compare(self, fake_qos, qos_object) - - # Fail to create a second time - self.assertRaises(exception.ObjectActionError, qos_object.create) - - self.assertEqual(1, len(qos_fake_create.mock_calls)) - - @mock.patch('cinder.db.qos_specs_item_delete') - @mock.patch('cinder.db.qos_specs_update') - def test_save(self, qos_fake_update, qos_fake_delete): - qos_dict = fake_qos.copy() - qos_dict['specs']['key_to_remove1'] = 'val' - qos_dict['specs']['key_to_remove2'] = 'val' - qos_object = objects.QualityOfServiceSpecs._from_db_object( - self.context, objects.QualityOfServiceSpecs(), qos_dict) - - qos_object.specs['key1'] = 'val1' - qos_object.save() - # No values have changed so no updates should be made - self.assertFalse(qos_fake_update.called) - - qos_object.consumer = 'back-end' - qos_object.specs['key1'] = 'val2' - qos_object.specs['new_key'] = 'val3' - - del qos_object.specs['key_to_remove1'] - del qos_object.specs['key_to_remove2'] - qos_object.save() - qos_fake_update.assert_called_once_with( - self.context, fake.OBJECT_ID, - {'specs': {'key1': 'val2', 'new_key': 'val3'}, - 'consumer': 'back-end'}) - qos_fake_delete.assert_has_calls([ - mock.call(self.context, fake.OBJECT_ID, 'key_to_remove1'), - mock.call(self.context, fake.OBJECT_ID, 'key_to_remove2')], - any_order=True) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', - return_value=None) - @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') - def test_destroy_no_vol_types(self, qos_fake_delete, fake_get_vol_types, - utcnow_mock): - qos_fake_delete.return_value = { - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - qos_object = objects.QualityOfServiceSpecs._from_db_object( - self.context, objects.QualityOfServiceSpecs(), fake_qos) - qos_object.destroy() - - qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) - self.assertTrue(qos_object.deleted) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - qos_object.deleted_at) - - @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') - @mock.patch('cinder.db.qos_specs_disassociate_all') - @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos') - def test_destroy_with_vol_types(self, fake_get_vol_types, - qos_fake_disassociate, qos_fake_delete): - qos_object = objects.QualityOfServiceSpecs._from_db_object( - self.context, objects.QualityOfServiceSpecs(), fake_qos) - fake_get_vol_types.return_value = objects.VolumeTypeList( - objects=[objects.VolumeType(id=fake.VOLUME_TYPE_ID)]) - self.assertRaises(exception.QoSSpecsInUse, qos_object.destroy) - - qos_object.destroy(force=True) - qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) - qos_fake_disassociate.assert_called_once_with( - self.context, fake_qos['id']) - - @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', - return_value=None) - @mock.patch('cinder.db.get_by_id', return_value=fake_qos) - def test_get_volume_type(self, fake_get_by_id, fake_get_vol_types): - qos_object = objects.QualityOfServiceSpecs.get_by_id( - self.context, fake.OBJECT_ID) - self.assertFalse(fake_get_vol_types.called) - # Access lazy-loadable attribute - qos_object.volume_types - self.assertTrue(fake_get_vol_types.called) diff --git a/cinder/tests/unit/objects/test_service.py b/cinder/tests/unit/objects/test_service.py deleted file mode 100644 index 1babfb584..000000000 --- a/cinder/tests/unit/objects/test_service.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder import exception -from cinder import objects -from cinder.tests.unit import fake_cluster -from cinder.tests.unit import fake_service -from cinder.tests.unit import objects as test_objects - - -class TestService(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_get_by_id(self, service_get): - db_service = fake_service.fake_db_service() - service_get.return_value = db_service - service = objects.Service.get_by_id(self.context, 1) - self._compare(self, db_service, service) - service_get.assert_called_once_with(self.context, 1) - - @mock.patch('cinder.db.service_get') - def test_get_by_host_and_topic(self, service_get): - db_service = fake_service.fake_db_service() - service_get.return_value = db_service - service = objects.Service.get_by_host_and_topic( - self.context, 'fake-host', 'fake-topic') - self._compare(self, db_service, service) - service_get.assert_called_once_with( - self.context, disabled=False, host='fake-host', topic='fake-topic') - - @mock.patch('cinder.db.service_get') - def test_get_by_args(self, service_get): - db_service = fake_service.fake_db_service() - service_get.return_value = db_service - service = objects.Service.get_by_args( - self.context, 'fake-host', 'fake-key') - self._compare(self, db_service, service) - service_get.assert_called_once_with( - self.context, host='fake-host', binary='fake-key') - - @mock.patch('cinder.db.service_create') - def test_create(self, service_create): - db_service = fake_service.fake_db_service() - service_create.return_value = db_service - service = objects.Service(context=self.context) - service.create() - self.assertEqual(db_service['id'], service.id) - service_create.assert_called_once_with(self.context, {}) - - @mock.patch('cinder.db.service_update') - def test_save(self, service_update): - db_service = fake_service.fake_db_service() - service = objects.Service._from_db_object( - self.context, objects.Service(), db_service) - service.topic = 'foobar' - service.save() - service_update.assert_called_once_with(self.context, service.id, - {'topic': 'foobar'}) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.service_destroy') - def test_destroy(self, service_destroy, utcnow_mock): - service_destroy.return_value = { - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - db_service = fake_service.fake_db_service() - service = objects.Service._from_db_object( - self.context, objects.Service(), db_service) - with mock.patch.object(service._context, 'elevated') as elevated_ctx: - service.destroy() - service_destroy.assert_called_once_with(elevated_ctx(), 123) - self.assertTrue(service.deleted) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - service.deleted_at) - - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_refresh(self, service_get): - db_service1 = fake_service.fake_db_service() - db_service2 = db_service1.copy() - db_service2['availability_zone'] = 'foobar' - - # On the second service_get, return the service with an updated - # availability_zone - service_get.side_effect = [db_service1, db_service2] - service = objects.Service.get_by_id(self.context, 123) - self._compare(self, db_service1, service) - - # availability_zone was updated, so a service refresh should have a - # new value for that field - service.refresh() - self._compare(self, db_service2, service) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - service_get.assert_has_calls([mock.call(self.context, 123), - call_bool, - mock.call(self.context, 123)]) - - @mock.patch('cinder.db.service_get_all') - def test_get_minimum_version(self, service_get_all): - services_update = [ - {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, - {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, - {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, - ] - expected = ('1.0', '1.2') - services = [fake_service.fake_db_service(**s) for s in services_update] - service_get_all.return_value = services - - min_rpc = objects.Service.get_minimum_rpc_version(self.context, 'foo') - self.assertEqual(expected[0], min_rpc) - min_obj = objects.Service.get_minimum_obj_version(self.context, 'foo') - self.assertEqual(expected[1], min_obj) - service_get_all.assert_has_calls( - [mock.call(self.context, binary='foo', disabled=None)] * 2) - - @mock.patch('cinder.db.service_get_all') - def test_get_minimum_version_liberty(self, service_get_all): - services_update = [ - {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, - {'rpc_current_version': '1.1', 'object_current_version': None}, - {'rpc_current_version': None, 'object_current_version': '2.5'}, - ] - services = [fake_service.fake_db_service(**s) for s in services_update] - service_get_all.return_value = services - - self.assertRaises(exception.ServiceTooOld, - objects.Service.get_minimum_rpc_version, - self.context, 'foo') - self.assertRaises(exception.ServiceTooOld, - objects.Service.get_minimum_obj_version, - self.context, 'foo') - - @mock.patch('cinder.db.service_get_all') - def test_get_minimum_version_no_binary(self, service_get_all): - services_update = [ - {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, - {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, - {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, - ] - services = [fake_service.fake_db_service(**s) for s in services_update] - service_get_all.return_value = services - - min_obj = objects.Service.get_minimum_obj_version(self.context) - self.assertEqual('1.2', min_obj) - service_get_all.assert_called_once_with(self.context, binary=None, - disabled=None) - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get') - def test_lazy_loading_cluster_field(self, cluster_get): - cluster_orm = fake_cluster.fake_cluster_orm(name='mycluster') - cluster_get.return_value = cluster_orm - cluster = objects.Cluster._from_db_object(self.context, - objects.Cluster(), - cluster_orm) - - service = fake_service.fake_service_obj(self.context, - cluster_name='mycluster') - self.assertEqual(cluster, service.cluster) - cluster_get.assert_called_once_with(self.context, None, - name='mycluster') - - def test_service_is_up(self): - # NOTE(mdovgal): don't use @ddt.data with the real timestamp value - # for this test. - # When using ddt decorators ddt.data seems to have been calculated - # not at the time of test's execution but at the tests's beginning. - # And this one depends on utcnow func. So it won't be utcnow at the - # execution moment and the result will be unexpected. - down_time = 5 - self.flags(service_down_time=down_time) - - # test if service is up - service = fake_service.fake_service_obj(self.context) - self.assertTrue(service.is_up) - - service.updated_at = timeutils.utcnow() - self.assertTrue(service.is_up) - - # test is service is down now - past_time = timeutils.utcnow() - datetime.timedelta(seconds=64) - service.updated_at = past_time - self.assertFalse(service.is_up) - - -class TestServiceList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.service_get_all') - def test_get_all(self, service_get_all): - db_service = fake_service.fake_db_service() - service_get_all.return_value = [db_service] - - filters = {'host': 'host', 'binary': 'foo', 'disabled': False} - services = objects.ServiceList.get_all(self.context, filters) - service_get_all.assert_called_once_with(self.context, **filters) - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) - - @mock.patch('cinder.db.service_get_all') - def test_get_all_by_topic(self, service_get_all): - db_service = fake_service.fake_db_service() - service_get_all.return_value = [db_service] - - services = objects.ServiceList.get_all_by_topic( - self.context, 'foo', 'bar') - service_get_all.assert_called_once_with( - self.context, topic='foo', disabled='bar') - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) - - @mock.patch('cinder.db.service_get_all') - def test_get_all_by_binary(self, service_get_all): - db_service = fake_service.fake_db_service() - service_get_all.return_value = [db_service] - - services = objects.ServiceList.get_all_by_binary( - self.context, 'foo', 'bar') - service_get_all.assert_called_once_with( - self.context, binary='foo', disabled='bar') - self.assertEqual(1, len(services)) - TestService._compare(self, db_service, services[0]) diff --git a/cinder/tests/unit/objects/test_snapshot.py b/cinder/tests/unit/objects/test_snapshot.py deleted file mode 100644 index c0d462221..000000000 --- a/cinder/tests/unit/objects/test_snapshot.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - - -fake_db_snapshot = fake_snapshot.fake_db_snapshot( - cgsnapshot_id=fake.CGSNAPSHOT_ID) -del fake_db_snapshot['metadata'] -del fake_db_snapshot['volume'] - - -# NOTE(andrey-mp): make Snapshot object here to check object algorithms -fake_snapshot_obj = { - 'id': fake.SNAPSHOT_ID, - 'volume_id': fake.VOLUME_ID, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': 1, - 'display_name': 'fake_name', - 'display_description': 'fake_description', - 'metadata': {}, -} - - -@ddt.ddt -class TestSnapshot(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.get_by_id', return_value=fake_db_snapshot) - def test_get_by_id(self, snapshot_get): - snapshot = objects.Snapshot.get_by_id(self.context, 1) - self._compare(self, fake_snapshot_obj, snapshot) - snapshot_get.assert_called_once_with(self.context, models.Snapshot, 1) - - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_get_by_id_no_existing_id(self, model_query): - query = model_query().options().options().filter_by().first - query.return_value = None - self.assertRaises(exception.SnapshotNotFound, - objects.Snapshot.get_by_id, self.context, 123) - - def test_reset_changes(self): - snapshot = objects.Snapshot() - snapshot.metadata = {'key1': 'value1'} - self.assertEqual({}, snapshot._orig_metadata) - snapshot.obj_reset_changes(['metadata']) - self.assertEqual({'key1': 'value1'}, snapshot._orig_metadata) - - @mock.patch('cinder.db.snapshot_create', return_value=fake_db_snapshot) - def test_create(self, snapshot_create): - snapshot = objects.Snapshot(context=self.context) - snapshot.create() - self.assertEqual(fake_snapshot_obj['id'], snapshot.id) - self.assertEqual(fake_snapshot_obj['volume_id'], snapshot.volume_id) - - @mock.patch('cinder.db.snapshot_create') - def test_create_with_provider_id(self, snapshot_create): - snapshot_create.return_value = copy.deepcopy(fake_db_snapshot) - snapshot_create.return_value['provider_id'] = fake.PROVIDER_ID - - snapshot = objects.Snapshot(context=self.context) - snapshot.create() - self.assertEqual(fake.PROVIDER_ID, snapshot.provider_id) - - @mock.patch('cinder.db.snapshot_update') - def test_save(self, snapshot_update): - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_db_snapshot) - snapshot.display_name = 'foobar' - snapshot.save() - snapshot_update.assert_called_once_with(self.context, snapshot.id, - {'display_name': 'foobar'}) - - @mock.patch('cinder.db.snapshot_metadata_update', - return_value={'key1': 'value1'}) - @mock.patch('cinder.db.snapshot_update') - def test_save_with_metadata(self, snapshot_update, - snapshot_metadata_update): - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_db_snapshot) - snapshot.display_name = 'foobar' - snapshot.metadata = {'key1': 'value1'} - self.assertEqual({'display_name': 'foobar', - 'metadata': {'key1': 'value1'}}, - snapshot.obj_get_changes()) - snapshot.save() - snapshot_update.assert_called_once_with(self.context, snapshot.id, - {'display_name': 'foobar'}) - snapshot_metadata_update.assert_called_once_with(self.context, - fake.SNAPSHOT_ID, - {'key1': 'value1'}, - True) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.snapshot_destroy') - def test_destroy(self, snapshot_destroy, utcnow_mock): - snapshot_destroy.return_value = { - 'status': 'deleted', - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - snapshot = objects.Snapshot(context=self.context, id=fake.SNAPSHOT_ID) - snapshot.destroy() - snapshot_destroy.assert_called_once_with(self.context, - fake.SNAPSHOT_ID) - self.assertTrue(snapshot.deleted) - self.assertEqual(fields.SnapshotStatus.DELETED, snapshot.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - snapshot.deleted_at) - - @mock.patch('cinder.db.snapshot_metadata_delete') - def test_delete_metadata_key(self, snapshot_metadata_delete): - snapshot = objects.Snapshot(self.context, id=fake.SNAPSHOT_ID) - snapshot.metadata = {'key1': 'value1', 'key2': 'value2'} - self.assertEqual({}, snapshot._orig_metadata) - snapshot.delete_metadata_key(self.context, 'key2') - self.assertEqual({'key1': 'value1'}, snapshot.metadata) - snapshot_metadata_delete.assert_called_once_with(self.context, - fake.SNAPSHOT_ID, - 'key2') - - def test_obj_fields(self): - volume = objects.Volume(context=self.context, id=fake.VOLUME_ID, - _name_id=fake.VOLUME_NAME_ID) - snapshot = objects.Snapshot(context=self.context, id=fake.VOLUME_ID, - volume=volume) - self.assertEqual(['name', 'volume_name'], snapshot.obj_extra_fields) - self.assertEqual('snapshot-%s' % fake.VOLUME_ID, snapshot.name) - self.assertEqual('volume-%s' % fake.VOLUME_NAME_ID, - snapshot.volume_name) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id') - def test_obj_load_attr(self, cgsnapshot_get_by_id, volume_get_by_id): - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_db_snapshot) - # Test volume lazy-loaded field - volume = objects.Volume(context=self.context, id=fake.VOLUME_ID) - volume_get_by_id.return_value = volume - self.assertEqual(volume, snapshot.volume) - volume_get_by_id.assert_called_once_with(self.context, - snapshot.volume_id) - # Test cgsnapshot lazy-loaded field - cgsnapshot = objects.CGSnapshot(context=self.context, - id=fake.CGSNAPSHOT_ID) - cgsnapshot_get_by_id.return_value = cgsnapshot - self.assertEqual(cgsnapshot, snapshot.cgsnapshot) - cgsnapshot_get_by_id.assert_called_once_with(self.context, - snapshot.cgsnapshot_id) - - @mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id') - def test_obj_load_attr_cgroup_not_exist(self, cgsnapshot_get_by_id): - fake_non_cg_db_snapshot = fake_snapshot.fake_db_snapshot( - cgsnapshot_id=None) - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_non_cg_db_snapshot) - self.assertIsNone(snapshot.cgsnapshot) - cgsnapshot_get_by_id.assert_not_called() - - @mock.patch('cinder.objects.group_snapshot.GroupSnapshot.get_by_id') - def test_obj_load_attr_group_not_exist(self, group_snapshot_get_by_id): - fake_non_cg_db_snapshot = fake_snapshot.fake_db_snapshot( - group_snapshot_id=None) - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_non_cg_db_snapshot) - self.assertIsNone(snapshot.group_snapshot) - group_snapshot_get_by_id.assert_not_called() - - @mock.patch('cinder.db.snapshot_data_get_for_project') - def test_snapshot_data_get_for_project(self, snapshot_data_get): - snapshot = objects.Snapshot._from_db_object( - self.context, objects.Snapshot(), fake_db_snapshot) - volume_type_id = mock.sentinel.volume_type_id - snapshot.snapshot_data_get_for_project(self.context, - self.project_id, - volume_type_id) - snapshot_data_get.assert_called_once_with(self.context, - self.project_id, - volume_type_id) - - @mock.patch('cinder.db.sqlalchemy.api.snapshot_get') - def test_refresh(self, snapshot_get): - db_snapshot1 = fake_snapshot.fake_db_snapshot() - db_snapshot2 = db_snapshot1.copy() - db_snapshot2['display_name'] = 'foobar' - - # On the second snapshot_get, return the snapshot with an updated - # display_name - snapshot_get.side_effect = [db_snapshot1, db_snapshot2] - snapshot = objects.Snapshot.get_by_id(self.context, fake.SNAPSHOT_ID) - self._compare(self, db_snapshot1, snapshot) - - # display_name was updated, so a snapshot refresh should have a new - # value for that field - snapshot.refresh() - self._compare(self, db_snapshot2, snapshot) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - snapshot_get.assert_has_calls([ - mock.call(self.context, - fake.SNAPSHOT_ID), - call_bool, - mock.call(self.context, - fake.SNAPSHOT_ID)]) - - @ddt.data('1.1', '1.3') - def test_obj_make_compatible_1_3(self, version): - snapshot = objects.Snapshot(context=self.context) - snapshot.status = fields.SnapshotStatus.UNMANAGING - primitive = snapshot.obj_to_primitive(version) - snapshot = objects.Snapshot.obj_from_primitive(primitive) - if version == '1.3': - status = fields.SnapshotStatus.UNMANAGING - else: - status = fields.SnapshotStatus.DELETING - self.assertEqual(status, snapshot.status) - - @ddt.data('1.3', '1.4') - def test_obj_make_compatible_1_4(self, version): - snapshot = objects.Snapshot(context=self.context) - snapshot.status = fields.SnapshotStatus.BACKING_UP - primitive = snapshot.obj_to_primitive(version) - snapshot = objects.Snapshot.obj_from_primitive(primitive) - if version == '1.4': - status = fields.SnapshotStatus.BACKING_UP - else: - status = fields.SnapshotStatus.AVAILABLE - self.assertEqual(status, snapshot.status) - - -class TestSnapshotList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all', return_value=[fake_db_snapshot]) - def test_get_all(self, snapshot_get_all, volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - search_opts = mock.sentinel.search_opts - snapshots = objects.SnapshotList.get_all( - self.context, search_opts) - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - snapshot_get_all.assert_called_once_with(self.context, search_opts, - None, None, None, None, None) - - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all_by_host', - return_value=[fake_db_snapshot]) - def test_get_by_host(self, get_by_host, volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - snapshots = objects.SnapshotList.get_by_host( - self.context, 'fake-host') - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all_by_project', - return_value=[fake_db_snapshot]) - def test_get_all_by_project(self, get_all_by_project, volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - search_opts = mock.sentinel.search_opts - snapshots = objects.SnapshotList.get_all_by_project( - self.context, self.project_id, search_opts) - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - get_all_by_project.assert_called_once_with(self.context, - self.project_id, - search_opts, None, None, - None, None, None) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all_for_volume', - return_value=[fake_db_snapshot]) - def test_get_all_for_volume(self, get_all_for_volume, volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - snapshots = objects.SnapshotList.get_all_for_volume( - self.context, fake_volume_obj.id) - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all_active_by_window', - return_value=[fake_db_snapshot]) - def test_get_all_active_by_window(self, get_all_active_by_window, - volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - snapshots = objects.SnapshotList.get_all_active_by_window( - self.context, mock.sentinel.begin, mock.sentinel.end) - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot', - return_value=[fake_db_snapshot]) - def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot, - volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - snapshots = objects.SnapshotList.get_all_for_cgsnapshot( - self.context, mock.sentinel.cgsnapshot_id) - self.assertEqual(1, len(snapshots)) - TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all') - def test_get_all_without_metadata(self, snapshot_get_all, - volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - snapshot = copy.deepcopy(fake_db_snapshot) - del snapshot['snapshot_metadata'] - snapshot_get_all.return_value = [snapshot] - - search_opts = mock.sentinel.search_opts - self.assertRaises(exception.MetadataAbsent, - objects.SnapshotList.get_all, - self.context, search_opts) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.db.snapshot_get_all') - def test_get_all_with_metadata(self, snapshot_get_all, volume_get_by_id): - fake_volume_obj = fake_volume.fake_volume_obj(self.context) - volume_get_by_id.return_value = fake_volume_obj - - db_snapshot = copy.deepcopy(fake_db_snapshot) - db_snapshot['snapshot_metadata'] = [{'key': 'fake_key', - 'value': 'fake_value'}] - snapshot_get_all.return_value = [db_snapshot] - - search_opts = mock.sentinel.search_opts - snapshots = objects.SnapshotList.get_all( - self.context, search_opts) - self.assertEqual(1, len(snapshots)) - - snapshot_obj = copy.deepcopy(fake_snapshot_obj) - snapshot_obj['metadata'] = {'fake_key': 'fake_value'} - TestSnapshot._compare(self, snapshot_obj, snapshots[0]) - snapshot_get_all.assert_called_once_with(self.context, search_opts, - None, None, None, None, None) diff --git a/cinder/tests/unit/objects/test_volume.py b/cinder/tests/unit/objects/test_volume.py deleted file mode 100644 index 6aceea4dd..000000000 --- a/cinder/tests/unit/objects/test_volume.py +++ /dev/null @@ -1,662 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.objects import base as ovo_base -from cinder.objects import fields -from cinder.tests.unit.consistencygroup import fake_consistencygroup -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - - -@ddt.ddt -class TestVolume(test_objects.BaseObjectsTestCase): - @staticmethod - def _compare(test, db, obj): - db = {k: v for k, v in db.items() - if not k.endswith('metadata') or k.startswith('volume')} - test_objects.BaseObjectsTestCase._compare(test, db, obj) - - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_get_by_id(self, volume_get): - db_volume = fake_volume.fake_db_volume() - volume_get.return_value = db_volume - volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID) - volume_get.assert_called_once_with(self.context, fake.VOLUME_ID) - self._compare(self, db_volume, volume) - - @mock.patch('cinder.db.sqlalchemy.api.model_query') - def test_get_by_id_no_existing_id(self, model_query): - pf = (model_query().options().options().options().options().options(). - options()) - pf.filter_by().first.return_value = None - self.assertRaises(exception.VolumeNotFound, - objects.Volume.get_by_id, self.context, 123) - - @mock.patch('cinder.db.volume_create') - def test_create(self, volume_create): - db_volume = fake_volume.fake_db_volume() - volume_create.return_value = db_volume - volume = objects.Volume(context=self.context) - volume.create() - self.assertEqual(db_volume['id'], volume.id) - - @mock.patch('cinder.db.volume_update') - @ddt.data(False, True) - def test_save(self, test_cg, volume_update): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - if test_cg: - volume.consistencygroup = None - volume.save() - volume_update.assert_called_once_with(self.context, volume.id, - {'display_name': 'foobar'}) - - def test_save_error(self): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - volume.consistencygroup = ( - fake_consistencygroup.fake_consistencyobject_obj(self.context)) - self.assertRaises(exception.ObjectActionError, - volume.save) - - @mock.patch('cinder.db.volume_metadata_update', - return_value={'key1': 'value1'}) - @mock.patch('cinder.db.volume_update') - def test_save_with_metadata(self, volume_update, metadata_update): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - volume.metadata = {'key1': 'value1'} - self.assertEqual({'display_name': 'foobar', - 'metadata': {'key1': 'value1'}}, - volume.obj_get_changes()) - volume.save() - volume_update.assert_called_once_with(self.context, volume.id, - {'display_name': 'foobar'}) - metadata_update.assert_called_once_with(self.context, volume.id, - {'key1': 'value1'}, True) - - @mock.patch('cinder.db.volume_admin_metadata_update', - return_value={'key1': 'value1'}) - @mock.patch('cinder.db.volume_update') - def test_save_with_admin_metadata(self, volume_update, - admin_metadata_update): - # Test with no admin context - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.admin_metadata = {'key1': 'value1'} - volume.save() - self.assertFalse(admin_metadata_update.called) - - # Test with admin context - admin_context = context.RequestContext(self.user_id, self.project_id, - is_admin=True) - volume = objects.Volume._from_db_object(admin_context, - objects.Volume(), db_volume) - volume.admin_metadata = {'key1': 'value1'} - volume.save() - admin_metadata_update.assert_called_once_with( - admin_context, volume.id, {'key1': 'value1'}, True) - - def test_save_with_glance_metadata(self): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - volume.glance_metadata = {'key1': 'value1'} - self.assertRaises(exception.ObjectActionError, volume.save) - - def test_save_with_consistencygroup(self): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - volume.consistencygroup = objects.ConsistencyGroup() - self.assertRaises(exception.ObjectActionError, volume.save) - - def test_save_with_snapshots(self): - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.display_name = 'foobar' - volume.snapshots = objects.SnapshotList() - self.assertRaises(exception.ObjectActionError, volume.save) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.volume_destroy') - def test_destroy(self, volume_destroy, utcnow_mock): - volume_destroy.return_value = { - 'status': 'deleted', - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - db_volume = fake_volume.fake_db_volume() - volume = objects.Volume._from_db_object(self.context, - objects.Volume(), db_volume) - volume.destroy() - self.assertTrue(volume_destroy.called) - admin_context = volume_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(volume.deleted) - self.assertEqual('deleted', volume.status) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - volume.deleted_at) - self.assertIsNone(volume.migration_status) - - def test_obj_fields(self): - volume = objects.Volume(context=self.context, id=fake.VOLUME_ID, - name_id=fake.VOLUME_NAME_ID) - self.assertEqual(['name', 'name_id', 'volume_metadata', - 'volume_admin_metadata', 'volume_glance_metadata'], - volume.obj_extra_fields) - self.assertEqual('volume-%s' % fake.VOLUME_NAME_ID, volume.name) - self.assertEqual(fake.VOLUME_NAME_ID, volume.name_id) - - def test_obj_field_previous_status(self): - volume = objects.Volume(context=self.context, - previous_status='backing-up') - self.assertEqual('backing-up', volume.previous_status) - - @mock.patch('cinder.db.volume_metadata_delete') - def test_delete_metadata_key(self, metadata_delete): - volume = objects.Volume(self.context, id=fake.VOLUME_ID) - volume.metadata = {'key1': 'value1', 'key2': 'value2'} - self.assertEqual({}, volume._orig_metadata) - volume.delete_metadata_key('key2') - self.assertEqual({'key1': 'value1'}, volume.metadata) - metadata_delete.assert_called_once_with(self.context, fake.VOLUME_ID, - 'key2') - - @mock.patch('cinder.db.volume_metadata_get') - @mock.patch('cinder.db.volume_glance_metadata_get') - @mock.patch('cinder.db.volume_admin_metadata_get') - @mock.patch('cinder.objects.volume_type.VolumeType.get_by_id') - @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' - 'get_all_by_volume_id') - @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_volume') - def test_obj_load_attr(self, mock_sl_get_all_for_volume, mock_cg_get_by_id, - mock_va_get_all_by_vol, mock_vt_get_by_id, - mock_admin_metadata_get, mock_glance_metadata_get, - mock_metadata_get): - fake_db_volume = fake_volume.fake_db_volume( - consistencygroup_id=fake.CONSISTENCY_GROUP_ID) - volume = objects.Volume._from_db_object( - self.context, objects.Volume(), fake_db_volume) - - # Test metadata lazy-loaded field - metadata = {'foo': 'bar'} - mock_metadata_get.return_value = metadata - self.assertEqual(metadata, volume.metadata) - mock_metadata_get.assert_called_once_with(self.context, volume.id) - - # Test glance_metadata lazy-loaded field - glance_metadata = [{'key': 'foo', 'value': 'bar'}] - mock_glance_metadata_get.return_value = glance_metadata - self.assertEqual({'foo': 'bar'}, volume.glance_metadata) - mock_glance_metadata_get.assert_called_once_with( - self.context, volume.id) - - # Test volume_type lazy-loaded field - # Case1. volume.volume_type_id = None - self.assertIsNone(volume.volume_type) - - # Case2. volume2.volume_type_id = 1 - fake2 = fake_volume.fake_db_volume() - fake2.update({'volume_type_id': fake.VOLUME_ID}) - volume2 = objects.Volume._from_db_object( - self.context, objects.Volume(), fake2) - volume_type = objects.VolumeType(context=self.context, - id=fake.VOLUME_TYPE_ID) - mock_vt_get_by_id.return_value = volume_type - self.assertEqual(volume_type, volume2.volume_type) - mock_vt_get_by_id.assert_called_once_with(self.context, - volume2.volume_type_id) - - # Test consistencygroup lazy-loaded field - consistencygroup = objects.ConsistencyGroup( - context=self.context, id=fake.CONSISTENCY_GROUP_ID) - mock_cg_get_by_id.return_value = consistencygroup - self.assertEqual(consistencygroup, volume.consistencygroup) - mock_cg_get_by_id.assert_called_once_with(self.context, - volume.consistencygroup_id) - - # Test snapshots lazy-loaded field - snapshots = objects.SnapshotList(context=self.context, - id=fake.SNAPSHOT_ID) - mock_sl_get_all_for_volume.return_value = snapshots - self.assertEqual(snapshots, volume.snapshots) - mock_sl_get_all_for_volume.assert_called_once_with(self.context, - volume.id) - - # Test volume_attachment lazy-loaded field - va_objs = [objects.VolumeAttachment(context=self.context, id=i) - for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] - va_list = objects.VolumeAttachmentList(context=self.context, - objects=va_objs) - mock_va_get_all_by_vol.return_value = va_list - self.assertEqual(va_list, volume.volume_attachment) - mock_va_get_all_by_vol.assert_called_once_with(self.context, volume.id) - - # Test admin_metadata lazy-loaded field - user context - adm_metadata = {'bar': 'foo'} - mock_admin_metadata_get.return_value = adm_metadata - self.assertEqual({}, volume.admin_metadata) - self.assertFalse(mock_admin_metadata_get.called) - - # Test admin_metadata lazy-loaded field - admin context - adm_context = self.context.elevated() - volume = objects.Volume._from_db_object(adm_context, objects.Volume(), - fake_volume.fake_db_volume()) - adm_metadata = {'bar': 'foo'} - mock_admin_metadata_get.return_value = adm_metadata - self.assertEqual(adm_metadata, volume.admin_metadata) - mock_admin_metadata_get.assert_called_once_with(adm_context, volume.id) - - @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') - def test_obj_load_attr_cgroup_not_exist(self, mock_cg_get_by_id): - fake_db_volume = fake_volume.fake_db_volume(consistencygroup_id=None) - volume = objects.Volume._from_db_object( - self.context, objects.Volume(), fake_db_volume) - - self.assertIsNone(volume.consistencygroup) - mock_cg_get_by_id.assert_not_called() - - @mock.patch('cinder.objects.group.Group.get_by_id') - def test_obj_load_attr_group_not_exist(self, mock_group_get_by_id): - fake_db_volume = fake_volume.fake_db_volume(group_id=None) - volume = objects.Volume._from_db_object( - self.context, objects.Volume(), fake_db_volume) - - self.assertIsNone(volume.group) - mock_group_get_by_id.assert_not_called() - - def test_from_db_object_with_all_expected_attributes(self): - expected_attrs = ['metadata', 'admin_metadata', 'glance_metadata', - 'volume_type', 'volume_attachment', - 'consistencygroup'] - - db_metadata = [{'key': 'foo', 'value': 'bar'}] - db_admin_metadata = [{'key': 'admin_foo', 'value': 'admin_bar'}] - db_glance_metadata = [{'key': 'glance_foo', 'value': 'glance_bar'}] - db_volume_type = fake_volume.fake_db_volume_type() - db_volume_attachments = fake_volume.fake_db_volume_attachment() - db_consistencygroup = fake_consistencygroup.fake_db_consistencygroup() - db_snapshots = fake_snapshot.fake_db_snapshot() - - db_volume = fake_volume.fake_db_volume( - volume_metadata=db_metadata, - volume_admin_metadata=db_admin_metadata, - volume_glance_metadata=db_glance_metadata, - volume_type=db_volume_type, - volume_attachment=[db_volume_attachments], - consistencygroup=db_consistencygroup, - snapshots=[db_snapshots], - ) - volume = objects.Volume._from_db_object(self.context, objects.Volume(), - db_volume, expected_attrs) - - self.assertEqual({'foo': 'bar'}, volume.metadata) - self.assertEqual({'admin_foo': 'admin_bar'}, volume.admin_metadata) - self.assertEqual({'glance_foo': 'glance_bar'}, volume.glance_metadata) - self._compare(self, db_volume_type, volume.volume_type) - self._compare(self, db_volume_attachments, volume.volume_attachment) - self._compare(self, db_consistencygroup, volume.consistencygroup) - self._compare(self, db_snapshots, volume.snapshots) - - @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_refresh(self, volume_get, volume_metadata_get): - db_volume1 = fake_volume.fake_db_volume() - db_volume2 = db_volume1.copy() - db_volume2['display_name'] = 'foobar' - - # On the second volume_get, return the volume with an updated - # display_name - volume_get.side_effect = [db_volume1, db_volume2] - volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID) - self._compare(self, db_volume1, volume) - - # display_name was updated, so a volume refresh should have a new value - # for that field - volume.refresh() - self._compare(self, db_volume2, volume) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - volume_get.assert_has_calls([mock.call(self.context, fake.VOLUME_ID), - call_bool, - mock.call(self.context, fake.VOLUME_ID)]) - - def test_metadata_aliases(self): - volume = objects.Volume(context=self.context) - # metadata<->volume_metadata - volume.metadata = {'abc': 'def'} - self.assertEqual([{'key': 'abc', 'value': 'def'}], - volume.volume_metadata) - - md = [{'key': 'def', 'value': 'abc'}] - volume.volume_metadata = md - self.assertEqual({'def': 'abc'}, volume.metadata) - - # admin_metadata<->volume_admin_metadata - volume.admin_metadata = {'foo': 'bar'} - self.assertEqual([{'key': 'foo', 'value': 'bar'}], - volume.volume_admin_metadata) - - volume.volume_admin_metadata = [{'key': 'xyz', 'value': '42'}] - self.assertEqual({'xyz': '42'}, volume.admin_metadata) - - # glance_metadata<->volume_glance_metadata - volume.glance_metadata = {'jkl': 'mno'} - self.assertEqual([{'key': 'jkl', 'value': 'mno'}], - volume.volume_glance_metadata) - - volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}] - self.assertEqual({'prs': 'tuw'}, volume.glance_metadata) - - @mock.patch('cinder.db.volume_metadata_update', return_value={}) - @mock.patch('cinder.db.volume_update') - @ddt.data({'src_vol_type_id': fake.VOLUME_TYPE_ID, - 'dest_vol_type_id': fake.VOLUME_TYPE2_ID}, - {'src_vol_type_id': None, - 'dest_vol_type_id': fake.VOLUME_TYPE2_ID}) - @ddt.unpack - def test_finish_volume_migration(self, volume_update, metadata_update, - src_vol_type_id, dest_vol_type_id): - src_volume_db = fake_volume.fake_db_volume( - **{'id': fake.VOLUME_ID, 'volume_type_id': src_vol_type_id}) - if src_vol_type_id: - src_volume_db['volume_type'] = fake_volume.fake_db_volume_type( - id=src_vol_type_id) - dest_volume_db = fake_volume.fake_db_volume( - **{'id': fake.VOLUME2_ID, 'volume_type_id': dest_vol_type_id}) - if dest_vol_type_id: - dest_volume_db['volume_type'] = fake_volume.fake_db_volume_type( - id=dest_vol_type_id) - expected_attrs = objects.Volume._get_expected_attrs(self.context) - src_volume = objects.Volume._from_db_object( - self.context, objects.Volume(), src_volume_db, - expected_attrs=expected_attrs) - dest_volume = objects.Volume._from_db_object( - self.context, objects.Volume(), dest_volume_db, - expected_attrs=expected_attrs) - updated_dest_volume = src_volume.finish_volume_migration( - dest_volume) - self.assertEqual('deleting', updated_dest_volume.migration_status) - self.assertEqual('migration src for ' + src_volume.id, - updated_dest_volume.display_description) - self.assertEqual(src_volume.id, updated_dest_volume._name_id) - self.assertTrue(volume_update.called) - volume_update.assert_has_calls([ - mock.call(self.context, src_volume.id, mock.ANY), - mock.call(self.context, dest_volume.id, mock.ANY)]) - ctxt, vol_id, updates = volume_update.call_args[0] - self.assertNotIn('volume_type', updates) - - # Ensure that the destination volume type has not been overwritten - self.assertEqual(dest_vol_type_id, - getattr(updated_dest_volume, 'volume_type_id')) - # Ignore these attributes, since they were updated by - # finish_volume_migration - ignore_keys = ('id', 'provider_location', '_name_id', - 'migration_status', 'display_description', 'status', - 'volume_glance_metadata', 'volume_type') - - dest_vol_dict = {k: updated_dest_volume[k] for k in - updated_dest_volume.keys() if k not in ignore_keys} - src_vol_dict = {k: src_volume[k] for k in src_volume.keys() - if k not in ignore_keys} - self.assertEqual(src_vol_dict, dest_vol_dict) - - def test_volume_with_metadata_serialize_deserialize_no_changes(self): - updates = {'volume_glance_metadata': [{'key': 'foo', 'value': 'bar'}], - 'expected_attrs': ['glance_metadata']} - volume = fake_volume.fake_volume_obj(self.context, **updates) - serializer = objects.base.CinderObjectSerializer() - serialized_volume = serializer.serialize_entity(self.context, volume) - volume = serializer.deserialize_entity(self.context, serialized_volume) - self.assertDictEqual({}, volume.obj_get_changes()) - - @mock.patch('cinder.db.volume_admin_metadata_update') - @mock.patch('cinder.db.sqlalchemy.api.volume_attach') - def test_begin_attach(self, volume_attach, metadata_update): - volume = fake_volume.fake_volume_obj(self.context) - db_attachment = fake_volume.fake_db_volume_attachment( - volume_id=volume.id, - attach_status=fields.VolumeAttachStatus.ATTACHING) - volume_attach.return_value = db_attachment - metadata_update.return_value = {'attached_mode': 'rw'} - - with mock.patch.object(self.context, 'elevated') as mock_elevated: - mock_elevated.return_value = context.get_admin_context() - attachment = volume.begin_attach("rw") - self.assertIsInstance(attachment, objects.VolumeAttachment) - self.assertEqual(volume.id, attachment.volume_id) - self.assertEqual(fields.VolumeAttachStatus.ATTACHING, - attachment.attach_status) - metadata_update.assert_called_once_with(self.context.elevated(), - volume.id, - {'attached_mode': u'rw'}, - True) - self.assertEqual('rw', volume.admin_metadata['attached_mode']) - - @mock.patch('cinder.db.volume_admin_metadata_delete') - @mock.patch('cinder.db.sqlalchemy.api.volume_detached') - @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' - 'get_all_by_volume_id') - def test_volume_detached_with_attachment( - self, volume_attachment_get, - volume_detached, - metadata_delete): - va_objs = [objects.VolumeAttachment(context=self.context, id=i) - for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] - # As changes are not saved, we need reset it here. Later changes - # will be checked. - for obj in va_objs: - obj.obj_reset_changes() - va_list = objects.VolumeAttachmentList(context=self.context, - objects=va_objs) - va_list.obj_reset_changes() - volume_attachment_get.return_value = va_list - admin_context = context.get_admin_context() - volume = fake_volume.fake_volume_obj( - admin_context, - volume_attachment=va_list, - volume_admin_metadata=[{'key': 'attached_mode', - 'value': 'rw'}]) - self.assertEqual(3, len(volume.volume_attachment)) - volume_detached.return_value = ({'status': 'in-use'}, - {'attached_mode': 'rw'}) - with mock.patch.object(admin_context, 'elevated') as mock_elevated: - mock_elevated.return_value = admin_context - volume.finish_detach(fake.OBJECT_ID) - volume_detached.assert_called_once_with(admin_context, - volume.id, - fake.OBJECT_ID) - metadata_delete.assert_called_once_with(admin_context, - volume.id, - 'attached_mode') - self.assertEqual('in-use', volume.status) - self.assertEqual({}, volume.cinder_obj_get_changes()) - self.assertEqual(2, len(volume.volume_attachment)) - self.assertIsNone(volume.admin_metadata.get('attached_mode')) - - @mock.patch('cinder.db.volume_admin_metadata_delete') - @mock.patch('cinder.db.sqlalchemy.api.volume_detached') - @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' - 'get_all_by_volume_id') - def test_volume_detached_without_attachment( - self, volume_attachment_get, volume_detached, metadata_delete): - admin_context = context.get_admin_context() - volume = fake_volume.fake_volume_obj( - admin_context, - volume_admin_metadata=[{'key': 'attached_mode', - 'value': 'rw'}]) - self.assertFalse(volume.obj_attr_is_set('volume_attachment')) - volume_detached.return_value = ({'status': 'in-use'}, None) - with mock.patch.object(admin_context, 'elevated') as mock_elevated: - mock_elevated.return_value = admin_context - volume.finish_detach(fake.OBJECT_ID) - metadata_delete.assert_called_once_with(admin_context, - volume.id, - 'attached_mode') - volume_detached.assert_called_once_with(admin_context, - volume.id, - fake.OBJECT_ID) - self.assertEqual('in-use', volume.status) - self.assertEqual({}, volume.cinder_obj_get_changes()) - self.assertFalse(volume_attachment_get.called) - - @ddt.data('1.6', '1.7') - def test_obj_make_compatible_cluster_added(self, version): - extra_data = {'cluster_name': 'cluster_name', - 'cluster': objects.Cluster()} - volume = objects.Volume(self.context, host='host', **extra_data) - - serializer = ovo_base.CinderObjectSerializer(version) - primitive = serializer.serialize_entity(self.context, volume) - - converted_volume = objects.Volume.obj_from_primitive(primitive) - is_set = version == '1.7' - for key in extra_data: - self.assertEqual(is_set, converted_volume.obj_attr_is_set(key)) - self.assertEqual('host', converted_volume.host) - - @ddt.data('1.9', '1.10') - def test_obj_make_compatible_groups_added(self, version): - extra_data = {'group_id': fake.GROUP_ID, - 'group': objects.Group()} - volume = objects.Volume(self.context, host='host', **extra_data) - - serializer = ovo_base.CinderObjectSerializer(version) - primitive = serializer.serialize_entity(self.context, volume) - - converted_volume = objects.Volume.obj_from_primitive(primitive) - is_set = version == '1.10' - for key in extra_data: - self.assertEqual(is_set, converted_volume.obj_attr_is_set(key)) - self.assertEqual('host', converted_volume.host) - - @ddt.data(True, False) - def test_is_replicated(self, result): - volume_type = fake_volume.fake_volume_type_obj(self.context) - volume = fake_volume.fake_volume_obj( - self.context, volume_type_id=volume_type.id) - volume.volume_type = volume_type - with mock.patch.object(volume_type, 'is_replicated', - return_value=result) as is_replicated: - self.assertEqual(result, volume.is_replicated()) - is_replicated.assert_called_once_with() - - def test_is_replicated_no_type(self): - volume = fake_volume.fake_volume_obj( - self.context, volume_type_id=None, volume_type=None) - self.assertFalse(volume.is_replicated()) - - -@ddt.ddt -class TestVolumeList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.volume_get_all') - def test_get_all(self, volume_get_all): - db_volume = fake_volume.fake_db_volume() - volume_get_all.return_value = [db_volume] - - volumes = objects.VolumeList.get_all(self.context, - mock.sentinel.marker, - mock.sentinel.limit, - mock.sentinel.sort_key, - mock.sentinel.sort_dir) - self.assertEqual(1, len(volumes)) - TestVolume._compare(self, db_volume, volumes[0]) - - @mock.patch('cinder.db.volume_get_all_by_host') - def test_get_by_host(self, get_all_by_host): - db_volume = fake_volume.fake_db_volume() - get_all_by_host.return_value = [db_volume] - - volumes = objects.VolumeList.get_all_by_host( - self.context, 'fake-host') - self.assertEqual(1, len(volumes)) - TestVolume._compare(self, db_volume, volumes[0]) - - @mock.patch('cinder.db.volume_get_all_by_group') - def test_get_by_group(self, get_all_by_group): - db_volume = fake_volume.fake_db_volume() - get_all_by_group.return_value = [db_volume] - - volumes = objects.VolumeList.get_all_by_group( - self.context, 'fake-host') - self.assertEqual(1, len(volumes)) - TestVolume._compare(self, db_volume, volumes[0]) - - @mock.patch('cinder.db.volume_get_all_by_project') - def test_get_by_project(self, get_all_by_project): - db_volume = fake_volume.fake_db_volume() - get_all_by_project.return_value = [db_volume] - - volumes = objects.VolumeList.get_all_by_project( - self.context, mock.sentinel.project_id, mock.sentinel.marker, - mock.sentinel.limit, mock.sentinel.sorted_keys, - mock.sentinel.sorted_dirs, mock.sentinel.filters) - self.assertEqual(1, len(volumes)) - TestVolume._compare(self, db_volume, volumes[0]) - - @ddt.data(['name_id'], ['__contains__']) - def test_get_by_project_with_sort_key(self, sort_keys): - fake_volume.fake_db_volume() - - self.assertRaises(exception.InvalidInput, - objects.VolumeList.get_all_by_project, - self.context, - self.context.project_id, - sort_keys=sort_keys) - - @mock.patch('cinder.db.volume_include_in_cluster') - def test_include_in_cluster(self, include_mock): - filters = {'host': mock.sentinel.host, - 'cluster_name': mock.sentinel.cluster_name} - cluster = 'new_cluster' - objects.VolumeList.include_in_cluster(self.context, cluster, **filters) - include_mock.assert_called_once_with(self.context, cluster, True, - **filters) - - @mock.patch('cinder.db.volume_include_in_cluster') - def test_include_in_cluster_specify_partial(self, include_mock): - filters = {'host': mock.sentinel.host, - 'cluster_name': mock.sentinel.cluster_name} - cluster = 'new_cluster' - objects.VolumeList.include_in_cluster(self.context, cluster, - mock.sentinel.partial_rename, - **filters) - include_mock.assert_called_once_with(self.context, cluster, - mock.sentinel.partial_rename, - **filters) diff --git a/cinder/tests/unit/objects/test_volume_attachment.py b/cinder/tests/unit/objects/test_volume_attachment.py deleted file mode 100644 index eccbd58c7..000000000 --- a/cinder/tests/unit/objects/test_volume_attachment.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import six - -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - - -@ddt.ddt -class TestVolumeAttachment(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') - def test_get_by_id(self, volume_attachment_get): - db_attachment = fake_volume.fake_db_volume_attachment() - attachment_obj = fake_volume.fake_volume_attachment_obj(self.context) - volume_attachment_get.return_value = db_attachment - attachment = objects.VolumeAttachment.get_by_id(self.context, - fake.ATTACHMENT_ID) - self._compare(self, attachment_obj, attachment) - - @mock.patch('cinder.db.volume_attachment_update') - def test_save(self, volume_attachment_update): - attachment = fake_volume.fake_volume_attachment_obj(self.context) - attachment.attach_status = fields.VolumeAttachStatus.ATTACHING - attachment.save() - volume_attachment_update.assert_called_once_with( - self.context, attachment.id, - {'attach_status': fields.VolumeAttachStatus.ATTACHING}) - - @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') - def test_refresh(self, attachment_get): - db_attachment1 = fake_volume.fake_db_volume_attachment() - attachment_obj1 = fake_volume.fake_volume_attachment_obj(self.context) - db_attachment2 = db_attachment1.copy() - db_attachment2['mountpoint'] = '/dev/sdc' - attachment_obj2 = fake_volume.fake_volume_attachment_obj( - self.context, mountpoint='/dev/sdc') - - # On the second volume_attachment_get, return the volume attachment - # with an updated mountpoint - attachment_get.side_effect = [db_attachment1, db_attachment2] - attachment = objects.VolumeAttachment.get_by_id(self.context, - fake.ATTACHMENT_ID) - self._compare(self, attachment_obj1, attachment) - - # mountpoint was updated, so a volume attachment refresh should have a - # new value for that field - attachment.refresh() - self._compare(self, attachment_obj2, attachment) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - attachment_get.assert_has_calls([mock.call(self.context, - fake.ATTACHMENT_ID), - call_bool, - mock.call(self.context, - fake.ATTACHMENT_ID)]) - - @mock.patch('cinder.db.sqlalchemy.api.volume_attached') - def test_volume_attached(self, volume_attached): - attachment = fake_volume.fake_volume_attachment_obj(self.context) - updated_values = {'mountpoint': '/dev/sda', - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'instance_uuid': fake.INSTANCE_ID} - volume_attached.return_value = (fake_volume.fake_db_volume(), - updated_values) - volume = attachment.finish_attach(fake.INSTANCE_ID, - 'fake_host', - '/dev/sda', - 'rw') - self.assertIsInstance(volume, objects.Volume) - volume_attached.assert_called_once_with(mock.ANY, - attachment.id, - fake.INSTANCE_ID, - 'fake_host', - '/dev/sda', - 'rw') - self.assertEqual('/dev/sda', attachment.mountpoint) - self.assertEqual(fake.INSTANCE_ID, attachment.instance_uuid) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment.attach_status) - - @ddt.data('1.0', '1.1', '1.2') - def test_obj_make_compatible(self, version): - connection_info = {'field': 'value'} - vol_attach = objects.VolumeAttachment(self.context, - connection_info=connection_info) - primitive = vol_attach.obj_to_primitive(version) - converted_vol_attach = objects.VolumeAttachment.obj_from_primitive( - primitive) - if version == '1.2': - self.assertEqual(connection_info, - converted_vol_attach.connection_info) - else: - self.assertFalse(converted_vol_attach.obj_attr_is_set( - 'connection_info')) - - -class TestVolumeAttachmentList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') - def test_get_all_by_volume_id(self, get_used_by_volume_id): - db_attachment = fake_volume.fake_db_volume_attachment() - get_used_by_volume_id.return_value = [db_attachment] - attachment_obj = fake_volume.fake_volume_attachment_obj(self.context) - - attachments = objects.VolumeAttachmentList.get_all_by_volume_id( - self.context, mock.sentinel.volume_id) - self.assertEqual(1, len(attachments)) - TestVolumeAttachment._compare(self, attachment_obj, attachments[0]) - - @mock.patch('cinder.db.volume_attachment_get_all_by_host') - def test_get_all_by_host(self, get_by_host): - db_attachment = fake_volume.fake_db_volume_attachment() - attachment_obj = fake_volume.fake_volume_attachment_obj(self.context) - get_by_host.return_value = [db_attachment] - - attachments = objects.VolumeAttachmentList.get_all_by_host( - self.context, mock.sentinel.host) - self.assertEqual(1, len(attachments)) - TestVolumeAttachment._compare(self, attachment_obj, attachments[0]) - - @mock.patch('cinder.db.volume_attachment_get_all_by_instance_uuid') - def test_get_all_by_instance_uuid(self, get_by_instance_uuid): - db_attachment = fake_volume.fake_db_volume_attachment() - get_by_instance_uuid.return_value = [db_attachment] - attachment_obj = fake_volume.fake_volume_attachment_obj(self.context) - - attachments = objects.VolumeAttachmentList.get_all_by_instance_uuid( - self.context, mock.sentinel.uuid) - self.assertEqual(1, len(attachments)) - TestVolumeAttachment._compare(self, attachment_obj, attachments[0]) diff --git a/cinder/tests/unit/objects/test_volume_type.py b/cinder/tests/unit/objects/test_volume_type.py deleted file mode 100644 index bcf523e53..000000000 --- a/cinder/tests/unit/objects/test_volume_type.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2015 SimpliVity Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from oslo_utils import timeutils -import pytz -import six - -from cinder.db.sqlalchemy import models -from cinder import objects -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import objects as test_objects - - -@ddt.ddt -class TestVolumeType(test_objects.BaseObjectsTestCase): - - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') - def test_get_by_id(self, volume_type_get): - db_volume_type = fake_volume.fake_db_volume_type() - volume_type_get.return_value = db_volume_type - volume_type = objects.VolumeType.get_by_id(self.context, - fake.VOLUME_TYPE_ID) - self._compare(self, db_volume_type, volume_type) - - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') - def test_get_by_id_with_projects(self, volume_type_get): - projects = [models.VolumeTypeProjects(project_id=fake.PROJECT_ID), - models.VolumeTypeProjects(project_id=fake.PROJECT2_ID)] - db_volume_type = fake_volume.fake_db_volume_type(projects=projects) - volume_type_get.return_value = db_volume_type - volume_type = objects.VolumeType.get_by_id(self.context, - fake.VOLUME_TYPE_ID) - db_volume_type['projects'] = [p.project_id for p in projects] - self._compare(self, db_volume_type, volume_type) - - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') - def test_get_by_id_with_string_projects(self, volume_type_get): - projects = [fake.PROJECT_ID, fake.PROJECT2_ID] - db_volume_type = fake_volume.fake_db_volume_type(projects=projects) - volume_type_get.return_value = db_volume_type - volume_type = objects.VolumeType.get_by_id(self.context, - fake.VOLUME_TYPE_ID) - self._compare(self, db_volume_type, volume_type) - - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') - def test_get_by_id_null_spec(self, volume_type_get): - db_volume_type = fake_volume.fake_db_volume_type( - extra_specs={'foo': None}) - volume_type_get.return_value = db_volume_type - volume_type = objects.VolumeType.get_by_id(self.context, - fake.VOLUME_TYPE_ID) - self._compare(self, db_volume_type, volume_type) - - @mock.patch('cinder.volume.volume_types.get_by_name_or_id') - def test_get_by_name_or_id(self, volume_type_get): - db_volume_type = fake_volume.fake_db_volume_type() - volume_type_get.return_value = db_volume_type - volume_type = objects.VolumeType.get_by_name_or_id( - self.context, fake.VOLUME_TYPE_ID) - self._compare(self, db_volume_type, volume_type) - - @ddt.data('1.0', '1.1') - def test_obj_make_compatible(self, version): - volume_type = objects.VolumeType(context=self.context) - volume_type.extra_specs = {'foo': None, 'bar': 'baz'} - volume_type.qos_specs_id = fake.QOS_SPEC_ID - primitive = volume_type.obj_to_primitive(version) - volume_type = objects.VolumeType.obj_from_primitive(primitive) - foo = '' if version == '1.0' else None - self.assertEqual(foo, volume_type.extra_specs['foo']) - self.assertEqual('baz', volume_type.extra_specs['bar']) - self.assertFalse(volume_type.obj_attr_is_set('qos_specs_id')) - - @mock.patch('cinder.volume.volume_types.create') - def test_create(self, volume_type_create): - db_volume_type = fake_volume.fake_db_volume_type() - volume_type_create.return_value = db_volume_type - - volume_type = objects.VolumeType(context=self.context) - volume_type.name = db_volume_type['name'] - volume_type.extra_specs = db_volume_type['extra_specs'] - volume_type.is_public = db_volume_type['is_public'] - volume_type.projects = db_volume_type['projects'] - volume_type.description = db_volume_type['description'] - volume_type.create() - - volume_type_create.assert_called_once_with( - self.context, db_volume_type['name'], - db_volume_type['extra_specs'], db_volume_type['is_public'], - db_volume_type['projects'], db_volume_type['description']) - - @mock.patch('cinder.volume.volume_types.update') - def test_save(self, volume_type_update): - db_volume_type = fake_volume.fake_db_volume_type() - volume_type = objects.VolumeType._from_db_object(self.context, - objects.VolumeType(), - db_volume_type) - volume_type.description = 'foobar' - volume_type.save() - volume_type_update.assert_called_once_with(self.context, - volume_type.id, - volume_type.name, - volume_type.description) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) - @mock.patch('cinder.db.sqlalchemy.api.volume_type_destroy') - def test_destroy(self, volume_type_destroy, utcnow_mock): - volume_type_destroy.return_value = { - 'deleted': True, - 'deleted_at': utcnow_mock.return_value} - db_volume_type = fake_volume.fake_db_volume_type() - volume_type = objects.VolumeType._from_db_object(self.context, - objects.VolumeType(), - db_volume_type) - volume_type.destroy() - self.assertTrue(volume_type_destroy.called) - admin_context = volume_type_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertTrue(volume_type.deleted) - self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC), - volume_type.deleted_at) - - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') - def test_refresh(self, volume_type_get): - db_type1 = fake_volume.fake_db_volume_type() - db_type2 = db_type1.copy() - db_type2['description'] = 'foobar' - - # updated description - volume_type_get.side_effect = [db_type1, db_type2] - volume_type = objects.VolumeType.get_by_id(self.context, - fake.VOLUME_TYPE_ID) - self._compare(self, db_type1, volume_type) - - # description was updated, so a volume type refresh should have a new - # value for that field - volume_type.refresh() - self._compare(self, db_type2, volume_type) - if six.PY3: - call_bool = mock.call.__bool__() - else: - call_bool = mock.call.__nonzero__() - volume_type_get.assert_has_calls([mock.call(self.context, - fake.VOLUME_TYPE_ID), - call_bool, - mock.call(self.context, - fake.VOLUME_TYPE_ID)]) - - @mock.patch('cinder.objects.QualityOfServiceSpecs.get_by_id') - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') - def test_lazy_loading_qos(self, get_mock, qos_get_mock): - qos_get_mock.return_value = objects.QualityOfServiceSpecs( - id=fake.QOS_SPEC_ID) - vol_type = fake_volume.fake_db_volume_type( - qos_specs_id=fake.QOS_SPEC_ID) - get_mock.return_value = vol_type - - volume_type = objects.VolumeType.get_by_id(self.context, - vol_type['id']) - self._compare(self, qos_get_mock.return_value, volume_type.qos_specs) - qos_get_mock.assert_called_once_with(self.context, fake.QOS_SPEC_ID) - - @mock.patch('cinder.db.volume_type_access_get_all') - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') - def test_lazy_loading_projects(self, get_mock, get_projects_mock): - vol_type = fake_volume.fake_db_volume_type( - qos_specs_id=fake.QOS_SPEC_ID) - get_mock.return_value = vol_type - - projects = [models.VolumeTypeProjects(project_id=fake.PROJECT_ID), - models.VolumeTypeProjects(project_id=fake.PROJECT2_ID)] - get_projects_mock.return_value = projects - - volume_type = objects.VolumeType.get_by_id(self.context, - vol_type['id']) - # Simulate this type has been loaded by a volume get_all method - del volume_type.projects - - self.assertEqual([p.project_id for p in projects], - volume_type.projects) - get_projects_mock.assert_called_once_with(self.context, vol_type['id']) - - @mock.patch('cinder.db.volume_type_extra_specs_get') - @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') - def test_lazy_loading_extra_specs(self, get_mock, get_specs_mock): - get_specs_mock.return_value = {'key': 'value', 'key2': 'value2'} - vol_type = fake_volume.fake_db_volume_type( - qos_specs_id=fake.QOS_SPEC_ID) - get_mock.return_value = vol_type - - volume_type = objects.VolumeType.get_by_id(self.context, - vol_type['id']) - # Simulate this type has been loaded by a volume get_all method - del volume_type.extra_specs - - self.assertEqual(get_specs_mock.return_value, volume_type.extra_specs) - get_specs_mock.assert_called_once_with(self.context, vol_type['id']) - - @ddt.data(' True', ' true', ' yes') - def test_is_replicated_true(self, enabled): - volume_type = fake_volume.fake_volume_type_obj( - self.context, extra_specs={'replication_enabled': enabled}) - self.assertTrue(volume_type.is_replicated()) - - def test_is_replicated_no_specs(self): - volume_type = fake_volume.fake_volume_type_obj( - self.context, extra_specs={}) - self.assertFalse(volume_type.is_replicated()) - - @ddt.data(' False', ' false', ' f', 'baddata', 'bad data') - def test_is_replicated_specs_false(self, not_enabled): - volume_type = fake_volume.fake_volume_type_obj( - self.context, extra_specs={'replication_enabled': not_enabled}) - self.assertFalse(volume_type.is_replicated()) - - -class TestVolumeTypeList(test_objects.BaseObjectsTestCase): - @mock.patch('cinder.volume.volume_types.get_all_types') - def test_get_all(self, get_all_types): - db_volume_type = fake_volume.fake_db_volume_type() - get_all_types.return_value = {db_volume_type['name']: db_volume_type} - - volume_types = objects.VolumeTypeList.get_all(self.context) - self.assertEqual(1, len(volume_types)) - TestVolumeType._compare(self, db_volume_type, volume_types[0]) - - @mock.patch('cinder.volume.volume_types.get_all_types') - def test_get_all_with_pagination(self, get_all_types): - db_volume_type = fake_volume.fake_db_volume_type() - get_all_types.return_value = {db_volume_type['name']: db_volume_type} - - volume_types = objects.VolumeTypeList.get_all(self.context, - filters={'is_public': - True}, - marker=None, - limit=1, - sort_keys='id', - sort_dirs='desc', - offset=None) - self.assertEqual(1, len(volume_types)) - TestVolumeType._compare(self, db_volume_type, volume_types[0]) diff --git a/cinder/tests/unit/policy.json b/cinder/tests/unit/policy.json deleted file mode 100644 index 0912de6b2..000000000 --- a/cinder/tests/unit/policy.json +++ /dev/null @@ -1,165 +0,0 @@ -{ - "context_is_admin": "role:admin", - "admin_api": "is_admin:True", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - - "volume:create": "", - "volume:create_from_image": "", - "volume:get": "rule:admin_or_owner", - "volume:get_all": "", - "volume:get_volume_metadata": "", - "volume:get_volume_image_metadata": "", - "volume:create_volume_metadata": "", - "volume:delete_volume_metadata": "", - "volume:update_volume_metadata": "", - "volume:get_volume_admin_metadata": "rule:admin_api", - "volume:update_volume_admin_metadata": "rule:admin_api", - "volume:delete": "", - "volume:force_delete": "rule:admin_api", - "volume:update": "", - "volume:attach": "", - "volume:detach": "", - "volume:reserve_volume": "", - "volume:unreserve_volume": "", - "volume:begin_detaching": "", - "volume:roll_detaching": "", - "volume:initialize_connection": "", - "volume:terminate_connection": "", - "volume:create_snapshot": "", - "volume:delete_snapshot": "", - "volume:get_snapshot": "", - "volume:get_all_snapshots": "", - "volume:update_snapshot": "", - "volume:get_snapshot_metadata": "", - "volume:delete_snapshot_metadata": "", - "volume:update_snapshot_metadata": "", - "volume:extend": "", - "volume:extend_attached_volume": "", - "volume:migrate_volume": "rule:admin_api", - "volume:migrate_volume_completion": "rule:admin_api", - "volume:update_readonly_flag": "", - "volume:retype": "", - "volume:copy_volume_to_image": "", - "volume:failover_host": "rule:admin_api", - "volume:freeze_host": "rule:admin_api", - "volume:thaw_host": "rule:admin_api", - "volume:revert_to_snapshot": "", - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", - "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", - "volume_extension:volume_actions:upload_image": "", - "volume_extension:volume_actions:upload_public": "rule:admin_api", - "volume_extension:types_manage": "", - "volume_extension:types_extra_specs:create": "", - "volume_extension:types_extra_specs:delete": "", - "volume_extension:types_extra_specs:index": "", - "volume_extension:types_extra_specs:show": "", - "volume_extension:types_extra_specs:update": "", - "volume_extension:access_types_qos_specs_id": "rule:admin_api", - "volume_extension:access_types_extra_specs": "rule:admin_api", - "volume_extension:volume_type_access": "", - "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", - "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", - "volume_extension:volume_type_encryption": "rule:admin_api", - "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", - "volume_extension:qos_specs_manage:create": "rule:admin_api", - "volume_extension:qos_specs_manage:get": "rule:admin_api", - "volume_extension:qos_specs_manage:get_all": "rule:admin_api", - "volume_extension:qos_specs_manage:update": "rule:admin_api", - "volume_extension:qos_specs_manage:delete": "rule:admin_api", - "volume_extension:extended_snapshot_attributes": "", - "volume_extension:volume_image_metadata": "", - "volume_extension:volume_host_attribute": "rule:admin_api", - "volume_extension:volume_tenant_attribute": "rule:admin_api", - "volume_extension:volume_mig_status_attribute": "rule:admin_api", - "volume_extension:hosts": "rule:admin_api", - "volume_extension:quotas:show": "", - "volume_extension:quotas:update": "rule:admin_api", - "volume_extension:quotas:delete": "rule:admin_api", - "volume_extension:quota_classes": "rule:admin_api", - "volume_extension:services:index": "", - "volume_extension:services:update" : "rule:admin_api", - "volume_extension:volume_manage": "rule:admin_api", - "volume_extension:volume_unmanage": "rule:admin_api", - "volume_extension:list_manageable": "rule:admin_api", - "volume_extension:capabilities": "rule:admin_api", - - "limits_extension:used_limits": "", - - "snapshot_extension:snapshot_actions:update_snapshot_status": "", - "snapshot_extension:snapshot_manage": "rule:admin_api", - "snapshot_extension:snapshot_unmanage": "rule:admin_api", - "snapshot_extension:list_manageable": "rule:admin_api", - - "volume:create_transfer": "", - "volume:accept_transfer": "", - "volume:delete_transfer": "", - "volume:get_transfer": "", - "volume:get_all_transfers": "", - - "backup:create" : "", - "backup:delete": "", - "backup:get": "", - "backup:get_all": "", - "backup:restore": "", - "backup:backup-import": "rule:admin_api", - "backup:backup-export": "rule:admin_api", - "backup:update": "rule:admin_or_owner", - "backup:backup_project_attribute": "rule:admin_api", - - "volume:attachment_create": "", - "volume:attachment_update": "rule:admin_or_owner", - "volume:attachment_delete": "rule:admin_or_owner", - - "consistencygroup:create" : "", - "consistencygroup:delete": "", - "consistencygroup:update": "", - "consistencygroup:get": "", - "consistencygroup:get_all": "", - - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - - "group:group_types_manage": "rule:admin_api", - "group:group_types_specs": "rule:admin_api", - "group:access_group_types_specs": "rule:admin_api", - "group:group_type_access": "rule:admin_or_owner", - - "group:create" : "", - "group:delete": "", - "group:update": "", - "group:get": "", - "group:get_all": "", - - "group:create_group_snapshot": "", - "group:delete_group_snapshot": "", - "group:update_group_snapshot": "", - "group:get_group_snapshot": "", - "group:get_all_group_snapshots": "", - "group:reset_group_snapshot_status":"", - "group:reset_status":"", - "group:enable_replication": "", - "group:disable_replication": "", - "group:failover_replication": "", - "group:list_replication_targets": "", - - "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", - - "message:delete": "rule:admin_or_owner", - "message:get": "rule:admin_or_owner", - "message:get_all": "rule:admin_or_owner", - - "clusters:get": "rule:admin_api", - "clusters:get_all": "rule:admin_api", - "clusters:update": "rule:admin_api", - - "workers:cleanup": "rule:admin_api" -} diff --git a/cinder/tests/unit/scheduler/__init__.py b/cinder/tests/unit/scheduler/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/scheduler/fake_hosts.py b/cinder/tests/unit/scheduler/fake_hosts.py deleted file mode 100644 index 04660c976..000000000 --- a/cinder/tests/unit/scheduler/fake_hosts.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2012 Intel Inc, OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Fakes For filters tests. -""" - - -class FakeHostManager(object): - """Defines fake hosts. - - host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 - host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 - host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 - host4: free_ram_mb=8192 free_disk_gb=8192 - """ - - def __init__(self): - self.service_states = { - 'host1': { - 'compute': {'host_memory_free': 1073741824}, - }, - 'host2': { - 'compute': {'host_memory_free': 2147483648}, - }, - 'host3': { - 'compute': {'host_memory_free': 3221225472}, - }, - 'host4': { - 'compute': {'host_memory_free': 999999999}, - }, - } - - -class FakeHostState(object): - def __init__(self, host, attribute_dict): - self.host = host - for (key, val) in attribute_dict.items(): - setattr(self, key, val) diff --git a/cinder/tests/unit/scheduler/fakes.py b/cinder/tests/unit/scheduler/fakes.py deleted file mode 100644 index 532815c00..000000000 --- a/cinder/tests/unit/scheduler/fakes.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Fakes For Scheduler tests. -""" - -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from cinder.scheduler import filter_scheduler -from cinder.scheduler import host_manager - - -UTC_NOW = timeutils.utcnow() - - -class FakeFilterScheduler(filter_scheduler.FilterScheduler): - def __init__(self, *args, **kwargs): - super(FakeFilterScheduler, self).__init__(*args, **kwargs) - self.host_manager = host_manager.HostManager() - - -class FakeHostManager(host_manager.HostManager): - def __init__(self): - super(FakeHostManager, self).__init__() - - self.service_states = { - 'host1': {'total_capacity_gb': 1024, - 'free_capacity_gb': 1024, - 'allocated_capacity_gb': 0, - 'provisioned_capacity_gb': 0, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'reserved_percentage': 10, - 'volume_backend_name': 'lvm1', - 'timestamp': UTC_NOW, - 'multiattach': True}, - 'host2': {'total_capacity_gb': 2048, - 'free_capacity_gb': 300, - 'allocated_capacity_gb': 1748, - 'provisioned_capacity_gb': 1748, - 'max_over_subscription_ratio': 1.5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 10, - 'volume_backend_name': 'lvm2', - 'timestamp': UTC_NOW}, - 'host3': {'total_capacity_gb': 512, - 'free_capacity_gb': 256, - 'allocated_capacity_gb': 256, - 'provisioned_capacity_gb': 256, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'reserved_percentage': 0, - 'volume_backend_name': 'lvm3', - 'timestamp': UTC_NOW}, - 'host4': {'total_capacity_gb': 2048, - 'free_capacity_gb': 200, - 'allocated_capacity_gb': 1848, - 'provisioned_capacity_gb': 2047, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'volume_backend_name': 'lvm4', - 'timestamp': UTC_NOW, - 'consistent_group_snapshot_enabled': True}, - 'host5': {'total_capacity_gb': 'infinite', - 'free_capacity_gb': 'unknown', - 'allocated_capacity_gb': 1548, - 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'timestamp': UTC_NOW}, - } - - -class FakeBackendState(host_manager.BackendState): - def __init__(self, host, attribute_dict): - super(FakeBackendState, self).__init__(host, None) - for (key, val) in attribute_dict.items(): - setattr(self, key, val) - - -class FakeNovaClient(object): - class Server(object): - def __init__(self, host): - self.uuid = uuidutils.generate_uuid() - self.host = host - setattr(self, 'OS-EXT-SRV-ATTR:host', host) - - class ServerManager(object): - def __init__(self): - self._servers = [] - - def create(self, host): - self._servers.append(FakeNovaClient.Server(host)) - return self._servers[-1].uuid - - def get(self, server_uuid): - for s in self._servers: - if s.uuid == server_uuid: - return s - return None - - def list(self, detailed=True, search_opts=None): - matching = list(self._servers) - if search_opts: - for opt, val in search_opts.items(): - matching = [m for m in matching - if getattr(m, opt, None) == val] - return matching - - class ListExtResource(object): - def __init__(self, ext_name): - self.name = ext_name - - class ListExtManager(object): - def __init__(self, ext_srv_attr=True): - self.ext_srv_attr = ext_srv_attr - - def show_all(self): - if self.ext_srv_attr: - return [ - FakeNovaClient.ListExtResource('ExtendedServerAttributes')] - return [] - - def __init__(self, ext_srv_attr=True): - self.servers = FakeNovaClient.ServerManager() - self.list_extensions = FakeNovaClient.ListExtManager( - ext_srv_attr=ext_srv_attr) - - -def mock_host_manager_db_calls(mock_obj, disabled=None): - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=2, host='host2', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=3, host='host3', topic='volume', disabled=False, - availability_zone='zone2', updated_at=timeutils.utcnow()), - dict(id=4, host='host4', topic='volume', disabled=False, - availability_zone='zone3', updated_at=timeutils.utcnow()), - dict(id=5, host='host5', topic='volume', disabled=False, - availability_zone='zone3', updated_at=timeutils.utcnow()), - ] - if disabled is None: - mock_obj.return_value = services - else: - mock_obj.return_value = [service for service in services - if service['disabled'] == disabled] diff --git a/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py b/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py deleted file mode 100644 index f212f755f..000000000 --- a/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2013 eBay Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Allocated Capacity Weigher. -""" - -import mock - -from cinder.common import constants -from cinder import context -from cinder.scheduler import weights -from cinder import test -from cinder.tests.unit.scheduler import fakes -from cinder.volume import utils - - -class AllocatedCapacityWeigherTestCase(test.TestCase): - def setUp(self): - super(AllocatedCapacityWeigherTestCase, self).setUp() - self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.OrderedHostWeightHandler( - 'cinder.scheduler.weights') - - def _get_weighed_host(self, hosts, weight_properties=None): - if weight_properties is None: - weight_properties = {} - return self.weight_handler.get_weighed_objects( - [weights.capacity.AllocatedCapacityWeigher], hosts, - weight_properties)[0] - - @mock.patch('cinder.db.sqlalchemy.api.service_get_all') - def _get_all_backends(self, _mock_service_get_all, disabled=False): - ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all, - disabled=disabled) - host_states = self.host_manager.get_all_backend_states(ctxt) - _mock_service_get_all.assert_called_once_with( - ctxt, - None, # backend_match_level - topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled) - return host_states - - def test_default_of_spreading_first(self): - hostinfo_list = self._get_all_backends() - - # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 - # host2: allocated_capacity_gb=1748, weight=-1748 - # host3: allocated_capacity_gb=256, weight=-256 - # host4: allocated_capacity_gb=1848, weight=-1848 Norm=-1.0 - # host5: allocated_capacity_gb=1548, weight=-1540 - - # so, host1 should win: - weighed_host = self._get_weighed_host(hostinfo_list) - self.assertEqual(0.0, weighed_host.weight) - self.assertEqual( - 'host1', utils.extract_host(weighed_host.obj.host)) - - def test_capacity_weight_multiplier1(self): - self.flags(allocated_capacity_weight_multiplier=1.0) - hostinfo_list = self._get_all_backends() - - # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 - # host2: allocated_capacity_gb=1748, weight=1748 - # host3: allocated_capacity_gb=256, weight=256 - # host4: allocated_capacity_gb=1848, weight=1848 Norm=1.0 - # host5: allocated_capacity_gb=1548, weight=1540 - - # so, host4 should win: - weighed_host = self._get_weighed_host(hostinfo_list) - self.assertEqual(1.0, weighed_host.weight) - self.assertEqual( - 'host4', utils.extract_host(weighed_host.obj.host)) - - def test_capacity_weight_multiplier2(self): - self.flags(allocated_capacity_weight_multiplier=-2.0) - hostinfo_list = self._get_all_backends() - - # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 - # host2: allocated_capacity_gb=1748, weight=-3496 - # host3: allocated_capacity_gb=256, weight=-512 - # host4: allocated_capacity_gb=1848, weight=-3696 Norm=-2.0 - # host5: allocated_capacity_gb=1548, weight=-3080 - - # so, host1 should win: - weighed_host = self._get_weighed_host(hostinfo_list) - self.assertEqual(0.0, weighed_host.weight) - self.assertEqual( - 'host1', utils.extract_host(weighed_host.obj.host)) diff --git a/cinder/tests/unit/scheduler/test_base_filter.py b/cinder/tests/unit/scheduler/test_base_filter.py deleted file mode 100644 index cf950b464..000000000 --- a/cinder/tests/unit/scheduler/test_base_filter.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from cinder.scheduler import base_filter -from cinder import test - - -class TestBaseFilter(test.TestCase): - - def setUp(self): - super(TestBaseFilter, self).setUp() - self.filter = base_filter.BaseFilter() - - def test_filter_one_is_called(self): - filters = [1, 2, 3, 4] - filter_properties = {'x': 'y'} - - self.filter._filter_one = mock.Mock() - self.filter._filter_one.side_effect = [False, True, True, False] - calls = [mock.call(i, filter_properties) for i in filters] - - result = list(self.filter.filter_all(filters, filter_properties)) - self.assertEqual([2, 3], result) - self.filter._filter_one.assert_has_calls(calls) - - -class FakeExtension(object): - - def __init__(self, plugin): - self.plugin = plugin - - -class BaseFakeFilter(base_filter.BaseFilter): - pass - - -class FakeFilter1(BaseFakeFilter): - """Derives from BaseFakeFilter and has a fake entry point defined. - - Entry point is returned by fake ExtensionManager. - Should be included in the output of all_classes. - """ - pass - - -class FakeFilter2(BaseFakeFilter): - """Derives from BaseFakeFilter but has no entry point. - - Should be not included in all_classes. - """ - pass - - -class FakeFilter3(base_filter.BaseFilter): - """Does not derive from BaseFakeFilter. - - Should not be included. - """ - pass - - -class FakeFilter4(BaseFakeFilter): - """Derives from BaseFakeFilter and has an entry point. - - Should be included. - """ - pass - - -class FakeFilter5(BaseFakeFilter): - """Derives from BaseFakeFilter but has no entry point. - - Should not be included. - """ - run_filter_once_per_request = True - pass - - -class FilterA(base_filter.BaseFilter): - def filter_all(self, list_objs, filter_properties): - # return all but the first object - return list_objs[1:] - - -class FilterB(base_filter.BaseFilter): - def filter_all(self, list_objs, filter_properties): - # return an empty list - return None - - -class FakeExtensionManager(list): - - def __init__(self, namespace): - classes = [FakeFilter1, FakeFilter3, FakeFilter4] - exts = map(FakeExtension, classes) - super(FakeExtensionManager, self).__init__(exts) - self.namespace = namespace - - -class TestBaseFilterHandler(test.TestCase): - - def setUp(self): - super(TestBaseFilterHandler, self).setUp() - self.mock_object(base_filter.base_handler.extension, - 'ExtensionManager', FakeExtensionManager) - self.handler = base_filter.BaseFilterHandler(BaseFakeFilter, - 'fake_filters') - - def test_get_all_classes(self): - # In order for a FakeFilter to be returned by get_all_classes, it has - # to comply with these rules: - # * It must be derived from BaseFakeFilter - # AND - # * It must have a python entrypoint assigned (returned by - # FakeExtensionManager) - expected = [FakeFilter1, FakeFilter4] - result = self.handler.get_all_classes() - self.assertEqual(expected, result) - - def _get_filtered_objects(self, filter_classes, index=0): - filter_objs_initial = [1, 2, 3, 4] - filter_properties = {'x': 'y'} - return self.handler.get_filtered_objects(filter_classes, - filter_objs_initial, - filter_properties, - index) - - @mock.patch.object(FakeFilter4, 'filter_all') - @mock.patch.object(FakeFilter3, 'filter_all', return_value=None) - def test_get_filtered_objects_return_none(self, fake3_filter_all, - fake4_filter_all): - filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] - result = self._get_filtered_objects(filter_classes) - self.assertIsNone(result) - self.assertFalse(fake4_filter_all.called) - - def test_get_filtered_objects(self): - filter_objs_expected = [1, 2, 3, 4] - filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] - result = self._get_filtered_objects(filter_classes) - self.assertEqual(filter_objs_expected, result) - - def test_get_filtered_objects_with_filter_run_once(self): - filter_objs_expected = [1, 2, 3, 4] - filter_classes = [FakeFilter5] - - with mock.patch.object(FakeFilter5, 'filter_all', - return_value=filter_objs_expected - ) as fake5_filter_all: - result = self._get_filtered_objects(filter_classes) - self.assertEqual(filter_objs_expected, result) - self.assertEqual(1, fake5_filter_all.call_count) - - result = self._get_filtered_objects(filter_classes, index=1) - self.assertEqual(filter_objs_expected, result) - self.assertEqual(1, fake5_filter_all.call_count) - - result = self._get_filtered_objects(filter_classes, index=2) - self.assertEqual(filter_objs_expected, result) - self.assertEqual(1, fake5_filter_all.call_count) diff --git a/cinder/tests/unit/scheduler/test_capacity_weigher.py b/cinder/tests/unit/scheduler/test_capacity_weigher.py deleted file mode 100644 index b7474a2f4..000000000 --- a/cinder/tests/unit/scheduler/test_capacity_weigher.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Capacity Weigher. -""" -from datetime import datetime - -import ddt -import mock - -from cinder.common import constants -from cinder import context -from cinder.scheduler import weights -from cinder import test -from cinder.tests.unit.scheduler import fakes -from cinder.volume import utils - - -@ddt.ddt -class CapacityWeigherTestCase(test.TestCase): - def setUp(self): - super(CapacityWeigherTestCase, self).setUp() - self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.OrderedHostWeightHandler( - 'cinder.scheduler.weights') - - def _get_weighed_hosts(self, hosts, weight_properties=None): - if weight_properties is None: - weight_properties = {'size': 1} - return self.weight_handler.get_weighed_objects( - [weights.capacity.CapacityWeigher], - hosts, - weight_properties) - - @mock.patch('cinder.db.sqlalchemy.api.service_get_all') - def _get_all_backends(self, _mock_service_get_all, disabled=False): - ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all, - disabled=disabled) - backend_states = self.host_manager.get_all_backend_states(ctxt) - _mock_service_get_all.assert_called_once_with( - ctxt, - None, # backend_match_level - topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled) - return backend_states - - # If thin and thin_provisioning_support are True, - # use the following formula: - # free = (total * host_state.max_over_subscription_ratio - # - host_state.provisioned_capacity_gb - # - math.floor(total * reserved)) - # Otherwise, use the following formula: - # free = free_space - math.floor(total * reserved) - - @ddt.data( - {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, - 'winner': 'host2'}, - {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, - 'winner': 'host1'}, - {'volume_type': {'extra_specs': {}}, - 'winner': 'host2'}, - {'volume_type': {}, - 'winner': 'host2'}, - {'volume_type': None, - 'winner': 'host2'}, - ) - @ddt.unpack - def test_default_of_spreading_first(self, volume_type, winner): - backend_info_list = self._get_all_backends() - - # Results for the 1st test - # {'provisioning:type': 'thin'}: - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=1024-math.floor(1024*0.1)=922 - # Norm=0.837837837838 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=2048*1.5-1748-math.floor(2048*0.1)=1120 - # Norm=1.0 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=256-512*0=256 - # Norm=0.292383292383 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=2048*1.0-2047-math.floor(2048*0.05)=-101 - # Norm=0.0 - # host5: free_capacity_gb=unknown free=-1 - # Norm=0.0819000819001 - - # so, host2 should win: - weight_properties = { - 'size': 1, - 'volume_type': volume_type, - } - weighed_host = self._get_weighed_hosts( - backend_info_list, - weight_properties=weight_properties)[0] - self.assertEqual(1.0, weighed_host.weight) - self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) - - @ddt.data( - {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, - 'winner': 'host4'}, - {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, - 'winner': 'host2'}, - {'volume_type': {'extra_specs': {}}, - 'winner': 'host4'}, - {'volume_type': {}, - 'winner': 'host4'}, - {'volume_type': None, - 'winner': 'host4'}, - ) - @ddt.unpack - def test_capacity_weight_multiplier1(self, volume_type, winner): - self.flags(capacity_weight_multiplier=-1.0) - backend_info_list = self._get_all_backends() - - # Results for the 1st test - # {'provisioning:type': 'thin'}: - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=-(1024-math.floor(1024*0.1))=-922 - # Norm=-0.00829542413701 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-0.00990099009901 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=-(256-512*0)=-256 - # Norm=--0.002894884083 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=-(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - # host5: free_capacity_gb=unknown free=-float('inf') - # Norm=-1.0 - - # so, host4 should win: - weight_properties = { - 'size': 1, - 'volume_type': volume_type, - } - weighed_host = self._get_weighed_hosts( - backend_info_list, - weight_properties=weight_properties)[0] - self.assertEqual(0.0, weighed_host.weight) - self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) - - @ddt.data( - {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, - 'winner': 'host2'}, - {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, - 'winner': 'host1'}, - {'volume_type': {'extra_specs': {}}, - 'winner': 'host2'}, - {'volume_type': {}, - 'winner': 'host2'}, - {'volume_type': None, - 'winner': 'host2'}, - ) - @ddt.unpack - def test_capacity_weight_multiplier2(self, volume_type, winner): - self.flags(capacity_weight_multiplier=2.0) - backend_info_list = self._get_all_backends() - - # Results for the 1st test - # {'provisioning:type': 'thin'}: - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))*2=1844 - # Norm=1.67567567568 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240 - # Norm=2.0 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)*2=512 - # Norm=0.584766584767 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202 - # Norm=0.0 - # host5: free_capacity_gb=unknown free=-2 - # Norm=0.1638001638 - - # so, host2 should win: - weight_properties = { - 'size': 1, - 'volume_type': volume_type, - } - weighed_host = self._get_weighed_hosts( - backend_info_list, - weight_properties=weight_properties)[0] - self.assertEqual(1.0 * 2, weighed_host.weight) - self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) - - def test_capacity_weight_no_unknown_or_infinite(self): - self.flags(capacity_weight_multiplier=-1.0) - del self.host_manager.service_states['host5'] - backend_info_list = self._get_all_backends() - - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))=-922 - # Norm=-0.837837837838 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-1.0 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)=-256 - # Norm=-0.292383292383 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - - # so, host4 should win: - weighed_hosts = self._get_weighed_hosts(backend_info_list) - best_host = weighed_hosts[0] - self.assertEqual(0.0, best_host.weight) - self.assertEqual('host4', utils.extract_host(best_host.obj.host)) - # and host2 is the worst: - worst_host = weighed_hosts[-1] - self.assertEqual(-1.0, worst_host.weight) - self.assertEqual('host2', utils.extract_host(worst_host.obj.host)) - - def test_capacity_weight_free_unknown(self): - self.flags(capacity_weight_multiplier=-1.0) - self.host_manager.service_states['host5'] = { - 'total_capacity_gb': 3000, - 'free_capacity_gb': 'unknown', - 'allocated_capacity_gb': 1548, - 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'timestamp': datetime.utcnow()} - backend_info_list = self._get_all_backends() - - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))=-922 - # Norm= -0.00829542413701 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-0.00990099009901 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)=-256 - # Norm=-0.002894884083 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - # host5: free_capacity_gb=unknown free=3000 - # Norm=-1.0 - - # so, host4 should win: - weighed_hosts = self._get_weighed_hosts(backend_info_list) - best_host = weighed_hosts[0] - self.assertEqual(0.0, best_host.weight) - self.assertEqual('host4', utils.extract_host(best_host.obj.host)) - # and host5 is the worst: - worst_host = weighed_hosts[-1] - self.assertEqual(-1.0, worst_host.weight) - self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) - - def test_capacity_weight_cap_unknown(self): - self.flags(capacity_weight_multiplier=-1.0) - self.host_manager.service_states['host5'] = { - 'total_capacity_gb': 'unknown', - 'free_capacity_gb': 3000, - 'allocated_capacity_gb': 1548, - 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'timestamp': datetime.utcnow()} - backend_info_list = self._get_all_backends() - - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))=-922 - # Norm= -0.00829542413701 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-0.00990099009901 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)=-256 - # Norm=-0.002894884083 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - # host5: free_capacity_gb=3000 free=unknown - # Norm=-1.0 - - # so, host4 should win: - weighed_hosts = self._get_weighed_hosts(backend_info_list) - best_host = weighed_hosts[0] - self.assertEqual(0.0, best_host.weight) - self.assertEqual('host4', utils.extract_host(best_host.obj.host)) - # and host5 is the worst: - worst_host = weighed_hosts[-1] - self.assertEqual(-1.0, worst_host.weight) - self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) - - def test_capacity_weight_free_infinite(self): - self.flags(capacity_weight_multiplier=-1.0) - self.host_manager.service_states['host5'] = { - 'total_capacity_gb': 3000, - 'free_capacity_gb': 'infinite', - 'allocated_capacity_gb': 1548, - 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'timestamp': datetime.utcnow()} - backend_info_list = self._get_all_backends() - - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))=-922 - # Norm= -0.00829542413701 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-0.00990099009901 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)=-256 - # Norm=-0.002894884083 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - # host5: free_capacity_gb=infinite free=3000 - # Norm=-1.0 - - # so, host4 should win: - weighed_hosts = self._get_weighed_hosts(backend_info_list) - best_host = weighed_hosts[0] - self.assertEqual(0.0, best_host.weight) - self.assertEqual('host4', utils.extract_host(best_host.obj.host)) - # and host5 is the worst: - worst_host = weighed_hosts[-1] - self.assertEqual(-1.0, worst_host.weight) - self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) - - def test_capacity_weight_cap_infinite(self): - self.flags(capacity_weight_multiplier=-1.0) - self.host_manager.service_states['host5'] = { - 'total_capacity_gb': 'infinite', - 'free_capacity_gb': 3000, - 'allocated_capacity_gb': 1548, - 'provisioned_capacity_gb': 1548, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5, - 'timestamp': datetime.utcnow()} - backend_info_list = self._get_all_backends() - - # host1: thin_provisioning_support = False - # free_capacity_gb=1024, - # free=(1024-math.floor(1024*0.1))=-922 - # Norm= -0.00829542413701 - # host2: thin_provisioning_support = True - # free_capacity_gb=300, - # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 - # Norm=-0.00990099009901 - # host3: thin_provisioning_support = False - # free_capacity_gb=512, free=(256-512*0)=-256 - # Norm=-0.002894884083 - # host4: thin_provisioning_support = True - # free_capacity_gb=200, - # free=(2048*1.0-2047-math.floor(2048*0.05))=101 - # Norm=0.0 - # host5: free_capacity_gb=3000 free=infinite - # Norm=-1.0 - - # so, host4 should win: - weighed_hosts = self._get_weighed_hosts(backend_info_list) - best_host = weighed_hosts[0] - self.assertEqual(0.0, best_host.weight) - self.assertEqual('host4', utils.extract_host(best_host.obj.host)) - # and host5 is the worst: - worst_host = weighed_hosts[-1] - self.assertEqual(-1.0, worst_host.weight) - self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) diff --git a/cinder/tests/unit/scheduler/test_chance_weigher.py b/cinder/tests/unit/scheduler/test_chance_weigher.py deleted file mode 100644 index e99c42d13..000000000 --- a/cinder/tests/unit/scheduler/test_chance_weigher.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Chance Weigher. -""" - -import mock - -from cinder.scheduler import host_manager -from cinder.scheduler.weights import chance -from cinder import test - - -class ChanceWeigherTestCase(test.TestCase): - def fake_random(self, reset=False): - if reset: - self.not_random_float = 0.0 - else: - self.not_random_float += 1.0 - return self.not_random_float - - @mock.patch('random.random') - def test_chance_weigher(self, _mock_random): - # stub random.random() to verify the ChanceWeigher - # is using random.random() (repeated calls to weigh should - # return incrementing weights) - weigher = chance.ChanceWeigher() - _mock_random.side_effect = self.fake_random - self.fake_random(reset=True) - host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999} - weight = weigher._weigh_object(host_state, None) - self.assertEqual(1.0, weight) - weight = weigher._weigh_object(host_state, None) - self.assertEqual(2.0, weight) - weight = weigher._weigh_object(host_state, None) - self.assertEqual(3.0, weight) - - def test_host_manager_choosing_chance_weigher(self): - # ensure HostManager can load the ChanceWeigher - # via the entry points mechanism - hm = host_manager.HostManager() - weighers = hm._choose_backend_weighers('ChanceWeigher') - self.assertEqual(1, len(weighers)) - self.assertEqual(weighers[0], chance.ChanceWeigher) - - def test_use_of_chance_weigher_via_host_manager(self): - # ensure we don't lose any hosts when weighing with - # the ChanceWeigher - hm = host_manager.HostManager() - fake_backends = [host_manager.BackendState('fake_be%s' % x, None) - for x in range(1, 5)] - weighed_backends = hm.get_weighed_backends(fake_backends, {}, - 'ChanceWeigher') - self.assertEqual(4, len(weighed_backends)) diff --git a/cinder/tests/unit/scheduler/test_filter_scheduler.py b/cinder/tests/unit/scheduler/test_filter_scheduler.py deleted file mode 100644 index 51dbe4615..000000000 --- a/cinder/tests/unit/scheduler/test_filter_scheduler.py +++ /dev/null @@ -1,495 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Filter Scheduler. -""" - -import ddt -import mock - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.scheduler import filter_scheduler -from cinder.scheduler import host_manager -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.scheduler import fakes -from cinder.tests.unit.scheduler import test_scheduler -from cinder.volume import utils - - -@ddt.ddt -class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): - """Test case for Filter Scheduler.""" - - driver_cls = filter_scheduler.FilterScheduler - - def test_create_group_no_hosts(self): - # Ensure empty hosts result in NoValidBackend exception. - sched = fakes.FakeFilterScheduler() - - fake_context = context.RequestContext('user', 'project') - request_spec = {'volume_properties': {'project_id': 1, - 'size': 0}, - 'volume_type': {'name': 'Type1', - 'extra_specs': {}}} - request_spec2 = {'volume_properties': {'project_id': 1, - 'size': 0}, - 'volume_type': {'name': 'Type2', - 'extra_specs': {}}} - request_spec_list = [request_spec, request_spec2] - group_spec = {'group_type': {'name': 'GrpType'}, - 'volume_properties': {'project_id': 1, - 'size': 0}} - self.assertRaises(exception.NoValidBackend, - sched.schedule_create_group, - fake_context, 'faki-id1', group_spec, - request_spec_list, {}, []) - - @ddt.data( - {'capabilities:consistent_group_snapshot_enabled': ' True'}, - {'consistent_group_snapshot_enabled': ' True'} - ) - @mock.patch('cinder.db.service_get_all') - def test_schedule_group(self, specs, _mock_service_get_all): - # Make sure _schedule_group() can find host successfully. - sched = fakes.FakeFilterScheduler() - sched.host_manager = fakes.FakeHostManager() - fake_context = context.RequestContext('user', 'project', - is_admin=True) - - fakes.mock_host_manager_db_calls(_mock_service_get_all) - - request_spec = {'volume_properties': {'project_id': 1, - 'size': 0}, - 'volume_type': {'name': 'Type1', - 'extra_specs': specs}} - request_spec2 = {'volume_properties': {'project_id': 1, - 'size': 0}, - 'volume_type': {'name': 'Type2', - 'extra_specs': specs}} - request_spec_list = [request_spec, request_spec2] - group_spec = {'group_type': {'name': 'GrpType'}, - 'volume_properties': {'project_id': 1, - 'size': 0}} - weighed_host = sched._schedule_generic_group(fake_context, - group_spec, - request_spec_list, - {}, []) - self.assertIsNotNone(weighed_host.obj) - self.assertTrue(_mock_service_get_all.called) - - def test_create_volume_no_hosts(self): - # Ensure empty hosts/child_zones result in NoValidBackend exception. - sched = fakes.FakeFilterScheduler() - - fake_context = context.RequestContext('user', 'project') - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': fake.VOLUME_ID} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, - sched.schedule_create_volume, fake_context, - request_spec, {}) - - def test_create_volume_no_hosts_invalid_req(self): - sched = fakes.FakeFilterScheduler() - - fake_context = context.RequestContext('user', 'project') - - # request_spec is missing 'volume_id' - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_type': {'name': 'LVM_iSCSI'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, - sched.schedule_create_volume, - fake_context, - request_spec, - {}) - - def test_create_volume_no_volume_type(self): - sched = fakes.FakeFilterScheduler() - - fake_context = context.RequestContext('user', 'project') - - # request_spec is missing 'volume_type' - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_id': fake.VOLUME_ID} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, - sched.schedule_create_volume, - fake_context, - request_spec, - {}) - - @mock.patch('cinder.scheduler.host_manager.HostManager.' - 'get_all_backend_states') - def test_create_volume_non_admin(self, _mock_get_all_backend_states): - # Test creating a volume locally using create_volume, passing - # a non-admin context. DB actions should work. - self.was_admin = False - - def fake_get(ctxt): - # Make sure this is called with admin context, even though - # we're using user context below. - self.was_admin = ctxt.is_admin - return {} - - sched = fakes.FakeFilterScheduler() - _mock_get_all_backend_states.side_effect = fake_get - - fake_context = context.RequestContext('user', 'project') - - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': fake.VOLUME_ID} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, - sched.schedule_create_volume, fake_context, - request_spec, {}) - self.assertTrue(self.was_admin) - - @mock.patch('cinder.db.service_get_all') - def test_schedule_happy_day(self, _mock_service_get_all): - # Make sure there's nothing glaringly wrong with _schedule() - # by doing a happy day pass through. - sched = fakes.FakeFilterScheduler() - sched.host_manager = fakes.FakeHostManager() - fake_context = context.RequestContext('user', 'project', - is_admin=True) - - fakes.mock_host_manager_db_calls(_mock_service_get_all) - - request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - weighed_host = sched._schedule(fake_context, request_spec, {}) - self.assertIsNotNone(weighed_host.obj) - self.assertTrue(_mock_service_get_all.called) - - @mock.patch('cinder.db.service_get_all') - def test_create_volume_clear_host_different_with_group( - self, _mock_service_get_all): - # Ensure we clear those hosts whose backend is not same as - # group's backend. - sched = fakes.FakeFilterScheduler() - sched.host_manager = fakes.FakeHostManager() - fakes.mock_host_manager_db_calls(_mock_service_get_all) - fake_context = context.RequestContext('user', 'project') - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'group_backend': 'host@lvmdriver'} - weighed_host = sched._schedule(fake_context, request_spec, {}) - self.assertIsNone(weighed_host) - - @mock.patch('cinder.db.service_get_all') - def test_create_volume_host_same_as_group(self, _mock_service_get_all): - # Ensure we don't clear the host whose backend is same as - # group's backend. - sched = fakes.FakeFilterScheduler() - sched.host_manager = fakes.FakeHostManager() - fakes.mock_host_manager_db_calls(_mock_service_get_all) - fake_context = context.RequestContext('user', 'project') - request_spec = {'volume_properties': {'project_id': 1, - 'size': 1}, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'group_backend': 'host1'} - weighed_host = sched._schedule(fake_context, request_spec, {}) - self.assertEqual('host1#lvm1', weighed_host.obj.host) - - def test_max_attempts(self): - self.flags(scheduler_max_attempts=4) - - sched = fakes.FakeFilterScheduler() - self.assertEqual(4, sched._max_attempts()) - - def test_invalid_max_attempts(self): - self.flags(scheduler_max_attempts=0) - - self.assertRaises(exception.InvalidParameterValue, - fakes.FakeFilterScheduler) - - def test_retry_disabled(self): - # Retry info should not get populated when re-scheduling is off. - self.flags(scheduler_max_attempts=1) - sched = fakes.FakeFilterScheduler() - - request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - filter_properties = {} - - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - - # Should not have retry info in the populated filter properties. - self.assertNotIn("retry", filter_properties) - - def test_retry_attempt_one(self): - # Test retry logic on initial scheduling attempt. - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeFilterScheduler() - - request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - filter_properties = {} - - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - - num_attempts = filter_properties['retry']['num_attempts'] - self.assertEqual(1, num_attempts) - - def test_retry_attempt_two(self): - # Test retry logic when re-scheduling. - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeFilterScheduler() - - request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - - retry = dict(num_attempts=1) - filter_properties = dict(retry=retry) - - sched._schedule(self.context, request_spec, - filter_properties=filter_properties) - - num_attempts = filter_properties['retry']['num_attempts'] - self.assertEqual(2, num_attempts) - - def test_retry_exceeded_max_attempts(self): - # Test for necessary explosion when max retries is exceeded. - self.flags(scheduler_max_attempts=2) - sched = fakes.FakeFilterScheduler() - - request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - - retry = dict(num_attempts=2) - filter_properties = dict(retry=retry) - - self.assertRaises(exception.NoValidBackend, sched._schedule, - self.context, request_spec, - filter_properties=filter_properties) - - def test_add_retry_backend(self): - retry = dict(num_attempts=1, backends=[]) - filter_properties = dict(retry=retry) - backend = "fakehost" - - sched = fakes.FakeFilterScheduler() - sched._add_retry_backend(filter_properties, backend) - - backends = filter_properties['retry']['backends'] - self.assertListEqual([backend], backends) - - def test_post_select_populate(self): - # Test addition of certain filter props after a node is selected. - retry = {'backends': [], 'num_attempts': 1} - filter_properties = {'retry': retry} - sched = fakes.FakeFilterScheduler() - - backend_state = host_manager.BackendState('host', None) - backend_state.total_capacity_gb = 1024 - sched._post_select_populate_filter_properties(filter_properties, - backend_state) - - self.assertEqual('host', - filter_properties['retry']['backends'][0]) - - self.assertEqual(1024, backend_state.total_capacity_gb) - - def _backend_passes_filters_setup(self, mock_obj): - sched = fakes.FakeFilterScheduler() - sched.host_manager = fakes.FakeHostManager() - fake_context = context.RequestContext('user', 'project', - is_admin=True) - - fakes.mock_host_manager_db_calls(mock_obj) - - return (sched, fake_context) - - @ddt.data(None, {'name': 'LVM_iSCSI'}) - @mock.patch('cinder.db.service_get_all') - def test_backend_passes_filters_happy_day(self, volume_type, - _mock_service_get_topic): - """Do a successful pass through of with backend_passes_filters().""" - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': volume_type, - 'volume_properties': {'project_id': 1, - 'size': 1, - 'multiattach': True}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1', - request_spec, {}) - self.assertEqual('host1', utils.extract_host(ret_host.host)) - self.assertTrue(_mock_service_get_topic.called) - - @mock.patch('cinder.db.service_get_all') - def test_backend_passes_filters_default_pool_happy_day( - self, _mock_service_get_topic): - """Do a successful pass through of with backend_passes_filters().""" - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0', - request_spec, {}) - self.assertEqual('host5', utils.extract_host(ret_host.host)) - self.assertTrue(_mock_service_get_topic.called) - - @mock.patch('cinder.db.service_get_all') - def test_backend_passes_filters_without_pool(self, mock_service_get_all): - """Do a successful pass through of with backend_passes_filters().""" - sched, ctx = self._backend_passes_filters_setup(mock_service_get_all) - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - ret_host = sched.backend_passes_filters(ctx, 'host1', request_spec, {}) - self.assertEqual('host1', utils.extract_host(ret_host.host)) - self.assertTrue(mock_service_get_all.called) - - @mock.patch('cinder.db.service_get_all') - def test_backend_passes_filters_no_capacity(self, _mock_service_get_topic): - """Fail the host due to insufficient capacity.""" - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_properties': {'project_id': 1, - 'size': 1024}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, - sched.backend_passes_filters, - ctx, 'host1#lvm1', request_spec, {}) - self.assertTrue(_mock_service_get_topic.called) - - @mock.patch('cinder.db.service_get_all') - def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic): - # Retype should pass if current host passes filters and - # policy=never. host4 doesn't have enough space to hold an additional - # 200GB, but it is already the host of this volume and should not be - # counted twice. - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - extra_specs = {'volume_backend_name': 'lvm4'} - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI', - 'extra_specs': extra_specs}, - 'volume_properties': {'project_id': 1, - 'size': 200, - 'host': 'host4#lvm4'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - host_state = sched.find_retype_backend(ctx, request_spec, - filter_properties={}, - migration_policy='never') - self.assertEqual('host4', utils.extract_host(host_state.host)) - - @mock.patch('cinder.db.service_get_all') - def test_retype_with_pool_policy_never_migrate_pass( - self, _mock_service_get_topic): - # Retype should pass if current host passes filters and - # policy=never. host4 doesn't have enough space to hold an additional - # 200GB, but it is already the host of this volume and should not be - # counted twice. - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - extra_specs = {'volume_backend_name': 'lvm3'} - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI', - 'extra_specs': extra_specs}, - 'volume_properties': {'project_id': 1, - 'size': 200, - 'host': 'host3#lvm3'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - host_state = sched.find_retype_backend(ctx, request_spec, - filter_properties={}, - migration_policy='never') - self.assertEqual('host3#lvm3', host_state.host) - - @mock.patch('cinder.db.service_get_all') - def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): - # Retype should fail if current host doesn't pass filters and - # policy=never. - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI', - 'extra_specs': extra_specs}, - 'volume_properties': {'project_id': 1, - 'size': 200, - 'host': 'host4'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, sched.find_retype_backend, - ctx, request_spec, filter_properties={}, - migration_policy='never') - - @mock.patch('cinder.db.service_get_all') - def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): - # Retype should pass if current host fails filters but another host - # is suitable when policy=on-demand. - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI', - 'extra_specs': extra_specs}, - 'volume_properties': {'project_id': 1, - 'size': 200, - 'host': 'host4'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - host_state = sched.find_retype_backend(ctx, request_spec, - filter_properties={}, - migration_policy='on-demand') - self.assertEqual('host1', utils.extract_host(host_state.host)) - - @mock.patch('cinder.db.service_get_all') - def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): - # Retype should fail if current host doesn't pass filters and - # no other suitable candidates exist even if policy=on-demand. - sched, ctx = self._backend_passes_filters_setup( - _mock_service_get_topic) - extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': fake.VOLUME_ID, - 'volume_type': {'name': 'LVM_iSCSI', - 'extra_specs': extra_specs}, - 'volume_properties': {'project_id': 1, - 'size': 2048, - 'host': 'host4'}} - request_spec = objects.RequestSpec.from_primitives(request_spec) - self.assertRaises(exception.NoValidBackend, sched.find_retype_backend, - ctx, request_spec, filter_properties={}, - migration_policy='on-demand') diff --git a/cinder/tests/unit/scheduler/test_goodness_weigher.py b/cinder/tests/unit/scheduler/test_goodness_weigher.py deleted file mode 100644 index 80f14118f..000000000 --- a/cinder/tests/unit/scheduler/test_goodness_weigher.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Goodness Weigher. -""" - -from cinder.scheduler.weights import goodness -from cinder import test -from cinder.tests.unit.scheduler import fakes - - -class GoodnessWeigherTestCase(test.TestCase): - def test_goodness_weigher_with_no_goodness_function(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'foo': '50' - } - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(0, weight) - - def test_goodness_weigher_passing_host(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '100' - } - }) - host_state_2 = fakes.FakeBackendState('host2', { - 'host': 'host2.example.com', - 'capabilities': { - 'goodness_function': '0' - } - }) - host_state_3 = fakes.FakeBackendState('host3', { - 'host': 'host3.example.com', - 'capabilities': { - 'goodness_function': '100 / 2' - } - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(100, weight) - weight = weigher._weigh_object(host_state_2, weight_properties) - self.assertEqual(0, weight) - weight = weigher._weigh_object(host_state_3, weight_properties) - self.assertEqual(50, weight) - - def test_goodness_weigher_capabilities_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'foo': 50, - 'goodness_function': '10 + capabilities.foo' - } - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(60, weight) - - def test_goodness_weigher_extra_specs_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '10 + extra.foo' - } - }) - - weight_properties = { - 'volume_type': { - 'extra_specs': { - 'foo': 50 - } - } - } - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(60, weight) - - def test_goodness_weigher_volume_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '10 + volume.foo' - } - }) - - weight_properties = { - 'request_spec': { - 'volume_properties': { - 'foo': 50 - } - } - } - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(60, weight) - - def test_goodness_weigher_qos_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '10 + qos.foo' - } - }) - - weight_properties = { - 'qos_specs': { - 'foo': 50 - } - } - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(60, weight) - - def test_goodness_weigher_stats_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': 'stats.free_capacity_gb > 20' - }, - 'free_capacity_gb': 50 - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(100, weight) - - def test_goodness_weigher_invalid_substitution(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '10 + stats.my_val' - }, - 'foo': 50 - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(0, weight) - - def test_goodness_weigher_host_rating_out_of_bounds(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '-10' - } - }) - host_state_2 = fakes.FakeBackendState('host2', { - 'host': 'host2.example.com', - 'capabilities': { - 'goodness_function': '200' - } - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(0, weight) - weight = weigher._weigh_object(host_state_2, weight_properties) - self.assertEqual(0, weight) - - def test_goodness_weigher_invalid_goodness_function(self): - weigher = goodness.GoodnessWeigher() - host_state = fakes.FakeBackendState('host1', { - 'host': 'host.example.com', - 'capabilities': { - 'goodness_function': '50 / 0' - } - }) - - weight_properties = {} - weight = weigher._weigh_object(host_state, weight_properties) - self.assertEqual(0, weight) diff --git a/cinder/tests/unit/scheduler/test_host_filters.py b/cinder/tests/unit/scheduler/test_host_filters.py deleted file mode 100644 index c9f60d660..000000000 --- a/cinder/tests/unit/scheduler/test_host_filters.py +++ /dev/null @@ -1,1774 +0,0 @@ -# Copyright 2011 OpenStack Foundation # All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import ddt -import mock -from oslo_serialization import jsonutils -from requests import exceptions as request_exceptions - -from cinder.compute import nova -from cinder import context -from cinder import db -from cinder import exception -from cinder.scheduler import filters -from cinder.scheduler.filters import extra_specs_ops -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.scheduler import fakes -from cinder.tests.unit import utils - - -class BackendFiltersTestCase(test.TestCase): - """Test case for backend filters.""" - - def setUp(self): - super(BackendFiltersTestCase, self).setUp() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - # This has a side effect of testing 'get_filter_classes' - # when specifying a method (in this case, our standard filters) - filter_handler = filters.BackendFilterHandler( - 'cinder.scheduler.filters') - classes = filter_handler.get_all_classes() - self.class_map = {} - for cls in classes: - self.class_map[cls.__name__] = cls - - -@ddt.ddt -@mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) -class CapacityFilterTestCase(BackendFiltersTestCase): - def setUp(self): - super(CapacityFilterTestCase, self).setUp() - self.json_query = jsonutils.dumps( - ['and', - ['>=', '$free_capacity_gb', 1024], - ['>=', '$total_capacity_gb', 10 * 1024]]) - - def test_filter_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_passes_without_volume_id(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filter_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'updated_at': None, - 'service': service}) - self.assertTrue(filter_cls.backend_passes(host, filter_properties)) - - def test_filter_current_backend_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, 'vol_exists_on': 'host1', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 100, - 'free_capacity_gb': 10, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_fails(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 200, - 'free_capacity_gb': 120, - 'reserved_percentage': 20, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_fails_free_capacity_None(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': None, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_with_size_0(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 0, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 1500, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_passes_infinite(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 'infinite', - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_extend_request(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'new_size': 100, 'size': 50, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 200, - 'updated_at': None, - 'total_capacity_gb': 500, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_extend_request_negative(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 50, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 49, - 'updated_at': None, - 'total_capacity_gb': 500, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_passes_unknown(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 'unknown', - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_passes_total_infinite(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 'infinite', - 'total_capacity_gb': 'infinite', - 'reserved_percentage': 0, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_passes_total_unknown(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 'unknown', - 'total_capacity_gb': 'unknown', - 'reserved_percentage': 0, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_fails_total_infinite(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 'infinite', - 'reserved_percentage': 5, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_fails_total_unknown(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 'unknown', - 'reserved_percentage': 5, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_fails_total_zero(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 0, - 'reserved_percentage': 5, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_thin_true_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 500, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_thin_true_passes2(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 3000, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 7000, - 'max_over_subscription_ratio': 20, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_thin_false_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' False', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - # If "thin_provisioning_support" is False, - # "max_over_subscription_ratio" will be ignored. - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 300, - 'max_over_subscription_ratio': 1.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_over_subscription_less_than_1(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 200, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 100, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 0.8, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_over_subscription_equal_to_1(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 150, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 1.0, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_over_subscription_fails(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 200, - 'provisioned_capacity_gb': 700, - 'max_over_subscription_ratio': 1.5, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_over_subscription_fails2(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 2000, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 30, - 'provisioned_capacity_gb': 9000, - 'max_over_subscription_ratio': 20, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 100, - 'provisioned_capacity_gb': 1000, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' False', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - # If "thin_provisioning_support" is False, - # "max_over_subscription_ratio" will be ignored. - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 100, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 1.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 0, - 'provisioned_capacity_gb': 800, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 125, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_true_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' False', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 80, - 'provisioned_capacity_gb': 600, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 99, - 'provisioned_capacity_gb': 1000, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 5, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'capabilities:thin_provisioning_support': - ' True', - 'capabilities:thick_provisioning_support': - ' True', - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 100, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - @ddt.data( - {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}}, - {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}}, - {'volume_type': {'extra_specs': {}}}, - {'volume_type': {}}, - {'volume_type': None}, - ) - @ddt.unpack - def test_filter_provisioning_type(self, _mock_serv_is_up, volume_type): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['CapacityFilter']() - filter_properties = {'size': 100, - 'volume_type': volume_type, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'total_capacity_gb': 500, - 'free_capacity_gb': 100, - 'provisioned_capacity_gb': 400, - 'max_over_subscription_ratio': 2.0, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'updated_at': None, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - -class AffinityFilterTestCase(BackendFiltersTestCase): - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_different_filter_passes(self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['DifferentBackendFilter']() - service = {'disabled': False} - host = fakes.FakeBackendState('host1:pool0', - {'free_capacity_gb': '1000', - 'updated_at': None, - 'service': service}) - volume = utils.create_volume(self.context, host='host1:pool1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': {'different_host': [vol_id], }, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_different_filter_legacy_volume_hint_passes( - self, _mock_serv_is_up): - _mock_serv_is_up.return_value = True - filt_cls = self.class_map['DifferentBackendFilter']() - service = {'disabled': False} - host = fakes.FakeBackendState('host1:pool0', - {'free_capacity_gb': '1000', - 'updated_at': None, - 'service': service}) - volume = utils.create_volume(self.context, host='host1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': {'different_host': [vol_id], }, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_non_list_fails(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host2', {}) - volume = utils.create_volume(self.context, host='host2') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'different_host': vol_id}} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_fails(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume = utils.create_volume(self.context, host='host1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': {'different_host': [vol_id], }, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_handles_none(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': None, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_handles_deleted_instance(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume = utils.create_volume(self.context, host='host1') - vol_id = volume.id - db.volume_destroy(utils.get_test_admin_context(), vol_id) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'different_host': [vol_id], }} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_fail_nonuuid_hint(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'different_host': "NOT-a-valid-UUID", }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_handles_multiple_uuids(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1#pool0', {}) - volume1 = utils.create_volume(self.context, host='host1:pool1') - vol_id1 = volume1.id - volume2 = utils.create_volume(self.context, host='host1:pool3') - vol_id2 = volume2.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'different_host': [vol_id1, vol_id2], }} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_different_filter_handles_invalid_uuids(self): - filt_cls = self.class_map['DifferentBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume = utils.create_volume(self.context, host='host2') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'different_host': [vol_id, "NOT-a-valid-UUID"], }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_no_list_passes(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume = utils.create_volume(self.context, host='host1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': vol_id}} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_passes(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1#pool0', {}) - volume = utils.create_volume(self.context, host='host1#pool0') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': [vol_id], }} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_legacy_vol_fails(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1#pool0', {}) - volume = utils.create_volume(self.context, host='host1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': [vol_id], }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_fails(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1#pool0', {}) - volume = utils.create_volume(self.context, host='host1#pool1') - vol_id = volume.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': [vol_id], }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_vol_list_pass(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume1 = utils.create_volume(self.context, host='host1') - vol_id1 = volume1.id - volume2 = utils.create_volume(self.context, host='host2') - vol_id2 = volume2.id - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': [vol_id1, vol_id2], }} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_handles_none(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': None} - - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_handles_deleted_instance(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - volume = utils.create_volume(self.context, host='host2') - vol_id = volume.id - db.volume_destroy(utils.get_test_admin_context(), vol_id) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': [vol_id], }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_same_filter_fail_nonuuid_hint(self): - filt_cls = self.class_map['SameBackendFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context.elevated(), - 'scheduler_hints': { - 'same_host': "NOT-a-valid-UUID", }} - - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - -class DriverFilterTestCase(BackendFiltersTestCase): - def test_passing_function(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': '1 == 1', - } - }) - - filter_properties = {'volume_type': {}} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_failing_function(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': '1 == 2', - } - }) - - filter_properties = {'volume_type': {}} - - self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) - - def test_no_filter_function(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': None, - } - }) - - filter_properties = {'volume_type': {}} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_not_implemented(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': {} - }) - - filter_properties = {'volume_type': {}} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_no_volume_extra_specs(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': '1 == 1', - } - }) - - filter_properties = {'volume_type': {}} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_extra_spec_replacement(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': 'extra.var == 1', - } - }) - - filter_properties = { - 'volume_type': { - 'extra_specs': { - 'var': 1, - } - } - } - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_stats_replacement(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'total_capacity_gb': 100, - 'capabilities': { - 'filter_function': 'stats.total_capacity_gb < 200', - } - }) - - filter_properties = {'volume_type': {}} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_volume_replacement(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': 'volume.size < 5', - } - }) - - filter_properties = { - 'request_spec': { - 'volume_properties': { - 'size': 1 - } - } - } - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_qos_spec_replacement(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': 'qos.var == 1', - } - }) - - filter_properties = { - 'qos_specs': { - 'var': 1 - } - } - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_exception_caught(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': '1 / 0 == 0', - } - }) - - filter_properties = {} - - self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) - - def test_function_empty_qos(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'filter_function': 'qos.maxiops == 1', - } - }) - - filter_properties = { - 'qos_specs': None - } - - self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) - - def test_capabilities(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'foo': 10, - 'filter_function': 'capabilities.foo == 10', - }, - }) - - filter_properties = {} - - self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) - - def test_wrong_capabilities(self): - filt_cls = self.class_map['DriverFilter']() - host1 = fakes.FakeBackendState( - 'host1', { - 'capabilities': { - 'bar': 10, - 'filter_function': 'capabilities.foo == 10', - }, - }) - - filter_properties = {} - - self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) - - -class InstanceLocalityFilterTestCase(BackendFiltersTestCase): - def setUp(self): - super(InstanceLocalityFilterTestCase, self).setUp() - self.override_config('nova_endpoint_template', - 'http://novahost:8774/v2/%(project_id)s') - self.context.service_catalog = \ - [{'type': 'compute', 'name': 'nova', 'endpoints': - [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, - {'type': 'identity', 'name': 'keystone', 'endpoints': - [{'publicURL': 'http://keystonehost:5000/v2.0'}]}] - - @mock.patch('novaclient.client.discover_extensions') - @mock.patch('cinder.compute.nova.novaclient') - def test_same_host(self, _mock_novaclient, fake_extensions): - _mock_novaclient.return_value = fakes.FakeNovaClient() - fake_extensions.return_value = ( - fakes.FakeNovaClient().list_extensions.show_all()) - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - uuid = nova.novaclient().servers.create('host1') - - filter_properties = {'context': self.context, - 'scheduler_hints': {'local_to_instance': uuid}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - @mock.patch('novaclient.client.discover_extensions') - @mock.patch('cinder.compute.nova.novaclient') - def test_different_host(self, _mock_novaclient, fake_extensions): - _mock_novaclient.return_value = fakes.FakeNovaClient() - fake_extensions.return_value = ( - fakes.FakeNovaClient().list_extensions.show_all()) - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - uuid = nova.novaclient().servers.create('host2') - - filter_properties = {'context': self.context, - 'scheduler_hints': {'local_to_instance': uuid}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_handles_none(self): - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context, - 'scheduler_hints': None, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_invalid_uuid(self): - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context, - 'scheduler_hints': - {'local_to_instance': 'e29b11d4-not-valid-a716'}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertRaises(exception.InvalidUUID, - filt_cls.backend_passes, host, filter_properties) - - @mock.patch('cinder.compute.nova.novaclient') - def test_nova_no_extended_server_attributes(self, _mock_novaclient): - _mock_novaclient.return_value = fakes.FakeNovaClient( - ext_srv_attr=False) - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - uuid = nova.novaclient().servers.create('host1') - - filter_properties = {'context': self.context, - 'scheduler_hints': {'local_to_instance': uuid}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertRaises(exception.CinderException, - filt_cls.backend_passes, host, filter_properties) - - @mock.patch('cinder.compute.nova.novaclient') - def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient): - # Simulate Nova API is not available - _mock_novaclient.side_effect = Exception - - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = {'context': self.context, 'size': 100, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - @mock.patch('cinder.compute.nova.novaclient') - def test_nova_timeout(self, mock_novaclient): - # Simulate a HTTP timeout - mock_show_all = mock_novaclient.return_value.list_extensions.show_all - mock_show_all.side_effect = request_exceptions.Timeout - - filt_cls = self.class_map['InstanceLocalityFilter']() - host = fakes.FakeBackendState('host1', {}) - - filter_properties = \ - {'context': self.context, 'scheduler_hints': - {'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - self.assertRaises(exception.APITimeout, - filt_cls.backend_passes, host, filter_properties) - - -class TestFilter(filters.BaseBackendFilter): - pass - - -class TestBogusFilter(object): - """Class that doesn't inherit from BaseBackendFilter.""" - pass - - -@ddt.ddt -class ExtraSpecsOpsTestCase(test.TestCase): - def _do_extra_specs_ops_test(self, value, req, matches): - assertion = self.assertTrue if matches else self.assertFalse - assertion(extra_specs_ops.match(value, req)) - - def test_extra_specs_fails_with_bogus_ops(self): - self._do_extra_specs_ops_test( - value='4', - req='> 2', - matches=False) - - @ddt.data({'value': '1', 'req': '1', 'matches': True}, - {'value': '', 'req': '1', 'matches': False}, - {'value': '3', 'req': '1', 'matches': False}, - {'value': '222', 'req': '2', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_simple(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '123', 'req': '= 123', 'matches': True}, - {'value': '124', 'req': '= 123', 'matches': True}, - {'value': '34', 'req': '= 234', 'matches': False}, - {'value': '34', 'req': '=', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_eq(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '2', 'req': '<= 10', 'matches': True}, - {'value': '3', 'req': '<= 2', 'matches': False}, - {'value': '3', 'req': '>= 1', 'matches': True}, - {'value': '2', 'req': '>= 3', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_not_eq(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '123', 'req': 's== 123', 'matches': True}, - {'value': '1234', 'req': 's== 123', 'matches': False}, - {'value': '1234', 'req': 's!= 123', 'matches': True}, - {'value': '123', 'req': 's!= 123', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_seq(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '1000', 'req': 's>= 234', 'matches': False}, - {'value': '1234', 'req': 's<= 1000', 'matches': False}, - {'value': '2', 'req': 's< 12', 'matches': False}, - {'value': '12', 'req': 's> 2', 'matches': False}) - @ddt.unpack - def test_extra_specs_fails_with_op_not_seq(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '12311321', 'req': ' 11', 'matches': True}, - {'value': '12311321', 'req': ' 12311321', 'matches': True}, - {'value': '12311321', 'req': - ' 12311321 ', 'matches': True}, - {'value': '12310321', 'req': ' 11', 'matches': False}, - {'value': '12310321', 'req': ' 11 ', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_in(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': True, 'req': ' True', 'matches': True}, - {'value': False, 'req': ' False', 'matches': True}, - {'value': False, 'req': ' Nonsense', 'matches': True}, - {'value': True, 'req': ' False', 'matches': False}, - {'value': False, 'req': ' True', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_is(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': '12', 'req': ' 11 12', 'matches': True}, - {'value': '12', 'req': ' 11 12 ', 'matches': True}, - {'value': '13', 'req': ' 11 12', 'matches': False}, - {'value': '13', 'req': ' 11 12 ', 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_with_op_or(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - @ddt.data({'value': None, 'req': None, 'matches': True}, - {'value': 'foo', 'req': None, 'matches': False}) - @ddt.unpack - def test_extra_specs_matches_none_req(self, value, req, matches): - self._do_extra_specs_ops_test( - value=value, - req=req, - matches=matches) - - -@ddt.ddt -class BasicFiltersTestCase(BackendFiltersTestCase): - """Test case for host filters.""" - - def setUp(self): - super(BasicFiltersTestCase, self).setUp() - self.json_query = jsonutils.dumps( - ['and', ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024]]) - - def test_all_filters(self): - # Double check at least a couple of known filters exist - self.assertIn('JsonFilter', self.class_map) - self.assertIn('CapabilitiesFilter', self.class_map) - self.assertIn('AvailabilityZoneFilter', self.class_map) - self.assertIn('IgnoreAttemptedHostsFilter', self.class_map) - - def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): - filt_cls = self.class_map['CapabilitiesFilter']() - capabilities = {'enabled': True} - capabilities.update(ecaps) - service = {'disabled': False} - filter_properties = {'resource_type': {'name': 'fake_type', - 'extra_specs': especs}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - host = fakes.FakeBackendState('host1', - {'free_capacity_gb': 1024, - 'capabilities': capabilities, - 'service': service}) - assertion = self.assertTrue if passes else self.assertFalse - assertion(filt_cls.backend_passes(host, filter_properties)) - - def test_capability_filter_passes_extra_specs_simple(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': '1', 'opt2': '2'}, - especs={'opt1': '1', 'opt2': '2'}, - passes=True) - - def test_capability_filter_fails_extra_specs_simple(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': '1', 'opt2': '2'}, - especs={'opt1': '1', 'opt2': '222'}, - passes=False) - - def test_capability_filter_passes_extra_specs_complex(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': 10, 'opt2': 5}, - especs={'opt1': '>= 2', 'opt2': '<= 8'}, - passes=True) - - def test_capability_filter_fails_extra_specs_complex(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': 10, 'opt2': 5}, - especs={'opt1': '>= 2', 'opt2': '>= 8'}, - passes=False) - - def test_capability_filter_passes_extra_specs_list_simple(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': ['1', '2'], 'opt2': '2'}, - especs={'opt1': '1', 'opt2': '2'}, - passes=True) - - @ddt.data(' True', ' False') - def test_capability_filter_passes_extra_specs_list_complex(self, opt1): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, - especs={'opt1': opt1, 'opt2': '<= 8'}, - passes=True) - - def test_capability_filter_fails_extra_specs_list_simple(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': ['1', '2'], 'opt2': ['2']}, - especs={'opt1': '3', 'opt2': '2'}, - passes=False) - - def test_capability_filter_fails_extra_specs_list_complex(self): - self._do_test_type_filter_extra_specs( - ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, - especs={'opt1': 'fake', 'opt2': '<= 8'}, - passes=False) - - def test_capability_filter_passes_scope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv1': {'opt1': 10}}, - especs={'capabilities:scope_lv1:opt1': '>= 2'}, - passes=True) - - def test_capability_filter_passes_fakescope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, - especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, - passes=True) - - def test_capability_filter_fails_scope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv1': {'opt1': 10}}, - especs={'capabilities:scope_lv1:opt1': '<= 2'}, - passes=False) - - def test_capability_filter_passes_multi_level_scope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'scope_lv1': - {'scope_lv2': {'opt1': 10}}}}, - especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, - passes=True) - - def test_capability_filter_fails_unenough_level_scope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'scope_lv1': None}}, - especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, - passes=False) - - def test_capability_filter_fails_wrong_scope_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'opt1': 10}}, - especs={'capabilities:scope_lv1:opt1': '>= 2'}, - passes=False) - - def test_capability_filter_passes_none_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'opt1': None}}, - especs={'capabilities:scope_lv0:opt1': None}, - passes=True) - - def test_capability_filter_fails_none_extra_specs(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'opt1': 10}}, - especs={'capabilities:scope_lv0:opt1': None}, - passes=False) - - def test_capability_filter_fails_none_caps(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'opt1': None}}, - especs={'capabilities:scope_lv0:opt1': 'foo'}, - passes=False) - - def test_capability_filter_passes_multi_level_scope_extra_specs_list(self): - self._do_test_type_filter_extra_specs( - ecaps={ - 'scope_lv0': { - 'scope_lv1': { - 'scope_lv2': { - 'opt1': [True, False], - }, - }, - }, - }, - especs={ - 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', - }, - passes=True) - - def test_capability_filter_fails_multi_level_scope_extra_specs_list(self): - self._do_test_type_filter_extra_specs( - ecaps={ - 'scope_lv0': { - 'scope_lv1': { - 'scope_lv2': { - 'opt1': [True, False], - 'opt2': ['1', '2'], - }, - }, - }, - }, - especs={ - 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', - 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt2': '3', - }, - passes=False) - - def test_capability_filter_fails_wrong_scope_extra_specs_list(self): - self._do_test_type_filter_extra_specs( - ecaps={'scope_lv0': {'opt1': [True, False]}}, - especs={'capabilities:scope_lv1:opt1': ' True'}, - passes=False) - - def test_json_filter_passes(self): - filt_cls = self.class_map['JsonFilter']() - filter_properties = {'resource_type': {'memory_mb': 1024, - 'root_gb': 200, - 'ephemeral_gb': 0}, - 'scheduler_hints': {'query': self.json_query}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 1024, - 'free_disk_mb': 200 * 1024, - 'capabilities': capabilities}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_passes_with_no_query(self): - filt_cls = self.class_map['JsonFilter']() - filter_properties = {'resource_type': {'memory_mb': 1024, - 'root_gb': 200, - 'ephemeral_gb': 0}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 0, - 'free_disk_mb': 0, - 'capabilities': capabilities}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_fails_on_memory(self): - filt_cls = self.class_map['JsonFilter']() - filter_properties = {'resource_type': {'memory_mb': 1024, - 'root_gb': 200, - 'ephemeral_gb': 0}, - 'scheduler_hints': {'query': self.json_query}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 1023, - 'free_disk_mb': 200 * 1024, - 'capabilities': capabilities}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_fails_on_disk(self): - filt_cls = self.class_map['JsonFilter']() - filter_properties = {'resource_type': {'memory_mb': 1024, - 'root_gb': 200, - 'ephemeral_gb': 0}, - 'scheduler_hints': {'query': self.json_query}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 1024, - 'free_disk_mb': (200 * 1024) - 1, - 'capabilities': capabilities}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_fails_on_caps_disabled(self): - filt_cls = self.class_map['JsonFilter']() - json_query = jsonutils.dumps( - ['and', ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024], - '$capabilities.enabled']) - filter_properties = {'resource_type': {'memory_mb': 1024, - 'root_gb': 200, - 'ephemeral_gb': 0}, - 'scheduler_hints': {'query': json_query}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 1024, - 'free_disk_mb': 200 * 1024, - 'capabilities': capabilities}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_fails_on_service_disabled(self): - filt_cls = self.class_map['JsonFilter']() - json_query = jsonutils.dumps( - ['and', ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024], - ['not', '$service.disabled']]) - filter_properties = {'resource_type': {'memory_mb': 1024, - 'local_gb': 200}, - 'scheduler_hints': {'query': json_query}, - 'request_spec': {'volume_id': fake.VOLUME_ID}} - capabilities = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 1024, - 'free_disk_mb': 200 * 1024, - 'capabilities': capabilities}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_happy_day(self): - """Test json filter more thoroughly.""" - filt_cls = self.class_map['JsonFilter']() - raw = ['and', - '$capabilities.enabled', - ['=', '$capabilities.opt1', 'match'], - ['or', - ['and', - ['<', '$free_ram_mb', 30], - ['<', '$free_disk_mb', 300]], - ['and', - ['>', '$free_ram_mb', 30], - ['>', '$free_disk_mb', 300]]]] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - 'request_spec': {'volume_id': fake.VOLUME_ID} - } - - # Passes - capabilities = {'enabled': True, 'opt1': 'match'} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 10, - 'free_disk_mb': 200, - 'capabilities': capabilities, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - # Passes - capabilities = {'enabled': True, 'opt1': 'match'} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 40, - 'free_disk_mb': 400, - 'capabilities': capabilities, - 'service': service}) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - # Fails due to capabilities being disabled - capabilities = {'enabled': False, 'opt1': 'match'} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 40, - 'free_disk_mb': 400, - 'capabilities': capabilities, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - # Fails due to being exact memory/disk we don't want - capabilities = {'enabled': True, 'opt1': 'match'} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 30, - 'free_disk_mb': 300, - 'capabilities': capabilities, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - # Fails due to memory lower but disk higher - capabilities = {'enabled': True, 'opt1': 'match'} - service = {'disabled': False} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 20, - 'free_disk_mb': 400, - 'capabilities': capabilities, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - # Fails due to capabilities 'opt1' not equal - capabilities = {'enabled': True, 'opt1': 'no-match'} - service = {'enabled': True} - host = fakes.FakeBackendState('host1', - {'free_ram_mb': 20, - 'free_disk_mb': 400, - 'capabilities': capabilities, - 'service': service}) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_basic_operators(self): - filt_cls = self.class_map['JsonFilter']() - host = fakes.FakeBackendState('host1', - {'capabilities': {'enabled': True}}) - # (operator, arguments, expected_result) - ops_to_test = [ - ['=', [1, 1], True], - ['=', [1, 2], False], - ['<', [1, 2], True], - ['<', [1, 1], False], - ['<', [2, 1], False], - ['>', [2, 1], True], - ['>', [2, 2], False], - ['>', [2, 3], False], - ['<=', [1, 2], True], - ['<=', [1, 1], True], - ['<=', [2, 1], False], - ['>=', [2, 1], True], - ['>=', [2, 2], True], - ['>=', [2, 3], False], - ['in', [1, 1], True], - ['in', [1, 1, 2, 3], True], - ['in', [4, 1, 2, 3], False], - ['not', [True], False], - ['not', [False], True], - ['or', [True, False], True], - ['or', [False, False], False], - ['and', [True, True], True], - ['and', [False, False], False], - ['and', [True, False], False], - # Nested ((True or False) and (2 > 1)) == Passes - ['and', [['or', True, False], ['>', 2, 1]], True]] - - for (op, args, expected) in ops_to_test: - raw = [op] + args - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - 'request_spec': {'volume_id': fake.VOLUME_ID} - } - self.assertEqual(expected, - filt_cls.backend_passes(host, filter_properties)) - - # This results in [False, True, False, True] and if any are True - # then it passes... - raw = ['not', True, False, True, False] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - # This results in [False, False, False] and if any are True - # then it passes...which this doesn't - raw = ['not', True, True, True] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_unknown_operator_raises(self): - filt_cls = self.class_map['JsonFilter']() - raw = ['!=', 1, 2] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - host = fakes.FakeBackendState('host1', - {'capabilities': {'enabled': True}}) - self.assertRaises(KeyError, - filt_cls.backend_passes, host, filter_properties) - - def test_json_filter_empty_filters_pass(self): - filt_cls = self.class_map['JsonFilter']() - host = fakes.FakeBackendState('host1', - {'capabilities': {'enabled': True}}) - - raw = [] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - raw = {} - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_invalid_num_arguments_fails(self): - filt_cls = self.class_map['JsonFilter']() - host = fakes.FakeBackendState('host1', - {'capabilities': {'enabled': True}}) - - raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - raw = ['>', 1] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) - - def test_json_filter_unknown_variable_ignored(self): - filt_cls = self.class_map['JsonFilter']() - host = fakes.FakeBackendState('host1', - {'capabilities': {'enabled': True}}) - - raw = ['=', '$........', 1, 1] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - raw = ['=', '$foo', 2, 2] - filter_properties = { - 'scheduler_hints': { - 'query': jsonutils.dumps(raw), - }, - } - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - @staticmethod - def _make_zone_request(zone, is_admin=False): - ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) - return { - 'context': ctxt, - 'request_spec': { - 'resource_properties': { - 'availability_zone': zone - } - } - } - - def test_availability_zone_filter_same(self): - filt_cls = self.class_map['AvailabilityZoneFilter']() - service = {'availability_zone': 'nova'} - request = self._make_zone_request('nova') - host = fakes.FakeBackendState('host1', {'service': service}) - self.assertTrue(filt_cls.backend_passes(host, request)) - - def test_availability_zone_filter_different(self): - filt_cls = self.class_map['AvailabilityZoneFilter']() - service = {'availability_zone': 'nova'} - request = self._make_zone_request('bad') - host = fakes.FakeBackendState('host1', {'service': service}) - self.assertFalse(filt_cls.backend_passes(host, request)) - - def test_availability_zone_filter_empty(self): - filt_cls = self.class_map['AvailabilityZoneFilter']() - service = {'availability_zone': 'nova'} - request = {} - host = fakes.FakeBackendState('host1', {'service': service}) - self.assertTrue(filt_cls.backend_passes(host, request)) - - def test_ignore_attempted_hosts_filter_disabled(self): - # Test case where re-scheduling is disabled. - filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() - host = fakes.FakeBackendState('host1', {}) - filter_properties = {} - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_ignore_attempted_hosts_filter_pass(self): - # Node not previously tried. - filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() - host = fakes.FakeBackendState('host1', {}) - attempted = dict(num_attempts=2, hosts=['host2']) - filter_properties = dict(retry=attempted) - self.assertTrue(filt_cls.backend_passes(host, filter_properties)) - - def test_ignore_attempted_hosts_filter_fail(self): - # Node was already tried. - filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() - host = fakes.FakeBackendState('host1', {}) - attempted = dict(num_attempts=2, backends=['host1']) - filter_properties = dict(retry=attempted) - self.assertFalse(filt_cls.backend_passes(host, filter_properties)) diff --git a/cinder/tests/unit/scheduler/test_host_manager.py b/cinder/tests/unit/scheduler/test_host_manager.py deleted file mode 100644 index 81b5db813..000000000 --- a/cinder/tests/unit/scheduler/test_host_manager.py +++ /dev/null @@ -1,1383 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For HostManager -""" - -from datetime import datetime -from datetime import timedelta -import ddt - -import mock -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from cinder.common import constants -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.scheduler import filters -from cinder.scheduler import host_manager -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.objects import test_service - - -class FakeFilterClass1(filters.BaseBackendFilter): - def backend_passes(self, host_state, filter_properties): - pass - - -class FakeFilterClass2(filters.BaseBackendFilter): - def backend_passes(self, host_state, filter_properties): - pass - - -class FakeFilterClass3(filters.BaseHostFilter): - def host_passes(self, host_state, filter_properties): - return host_state.get('volume_backend_name') == \ - filter_properties.get('volume_type')['volume_backend_name'] - - -@ddt.ddt -class HostManagerTestCase(test.TestCase): - """Test case for HostManager class.""" - - def setUp(self): - super(HostManagerTestCase, self).setUp() - self.host_manager = host_manager.HostManager() - self.fake_backends = [host_manager.BackendState('fake_be%s' % x, None) - for x in range(1, 5)] - # For a second scheduler service. - self.host_manager_1 = host_manager.HostManager() - - def test_choose_backend_filters_not_found(self): - self.flags(scheduler_default_filters='FakeFilterClass3') - self.host_manager.filter_classes = [FakeFilterClass1, - FakeFilterClass2] - self.assertRaises(exception.SchedulerHostFilterNotFound, - self.host_manager._choose_backend_filters, None) - - def test_choose_backend_filters(self): - self.flags(scheduler_default_filters=['FakeFilterClass2']) - self.host_manager.filter_classes = [FakeFilterClass1, - FakeFilterClass2] - - # Test 'volume' returns 1 correct function - filter_classes = self.host_manager._choose_backend_filters(None) - self.assertEqual(1, len(filter_classes)) - self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) - - @mock.patch('cinder.scheduler.host_manager.HostManager.' - '_choose_backend_filters') - def test_get_filtered_backends(self, _mock_choose_backend_filters): - filter_class = FakeFilterClass1 - mock_func = mock.Mock() - mock_func.return_value = True - filter_class._filter_one = mock_func - _mock_choose_backend_filters.return_value = [filter_class] - - fake_properties = {'moo': 1, 'cow': 2} - expected = [] - for fake_backend in self.fake_backends: - expected.append(mock.call(fake_backend, fake_properties)) - - result = self.host_manager.get_filtered_backends(self.fake_backends, - fake_properties) - self.assertEqual(expected, mock_func.call_args_list) - self.assertEqual(set(self.fake_backends), set(result)) - - @mock.patch('cinder.scheduler.host_manager.HostManager._get_updated_pools') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_service_capabilities(self, _mock_utcnow, - _mock_get_updated_pools): - service_states = self.host_manager.service_states - self.assertDictEqual({}, service_states) - _mock_utcnow.side_effect = [31338, 31339] - - _mock_get_updated_pools.return_value = [] - timestamp = jsonutils.to_primitive(datetime.utcnow()) - host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=timestamp) - host1_old_volume_capabs = dict(free_capacity_gb=1, timestamp=timestamp) - host2_volume_capabs = dict(free_capacity_gb=5432) - host3_volume_capabs = dict(free_capacity_gb=6543) - - service_name = 'volume' - # The host manager receives a deserialized timestamp - timestamp = datetime.strptime(timestamp, timeutils.PERFECT_TIME_FORMAT) - self.host_manager.update_service_capabilities(service_name, 'host1', - host1_volume_capabs, - None, timestamp) - # It'll ignore older updates - old_timestamp = timestamp - timedelta(hours=1) - self.host_manager.update_service_capabilities(service_name, 'host1', - host1_old_volume_capabs, - None, old_timestamp) - self.host_manager.update_service_capabilities(service_name, 'host2', - host2_volume_capabs, - None, None) - self.host_manager.update_service_capabilities(service_name, 'host3', - host3_volume_capabs, - None, None) - - # Make sure dictionary isn't re-assigned - self.assertEqual(service_states, self.host_manager.service_states) - - host1_volume_capabs['timestamp'] = timestamp - host2_volume_capabs['timestamp'] = 31338 - host3_volume_capabs['timestamp'] = 31339 - - expected = {'host1': host1_volume_capabs, - 'host2': host2_volume_capabs, - 'host3': host3_volume_capabs} - self.assertDictEqual(expected, service_states) - - @mock.patch( - 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_notify_service_capabilities_case1( - self, _mock_utcnow, - _mock_get_usage_and_notify): - - _mock_utcnow.side_effect = [31337, 31338, 31339] - service_name = 'volume' - - capab1 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, - 'reserved_percentage': 0}]} - - # Run 1: - # capa: capa1 - # S0: update_service_capabilities() - # S0: notify_service_capabilities() - # S1: update_service_capabilities() - # - # notify capab1 to ceilometer by S0 - # - - # S0: update_service_capabilities() - self.host_manager.update_service_capabilities(service_name, 'host1', - capab1, None, None) - self.assertDictEqual(dict(dict(timestamp=31337), **capab1), - self.host_manager.service_states['host1']) - - # S0: notify_service_capabilities() - self.host_manager.notify_service_capabilities(service_name, 'host1', - capab1, None) - self.assertDictEqual(dict(dict(timestamp=31337), **capab1), - self.host_manager.service_states['host1']) - self.assertDictEqual( - dict(dict(timestamp=31338), **capab1), - self.host_manager.service_states_last_update['host1']) - - # notify capab1 to ceilometer by S0 - self.assertTrue(1, _mock_get_usage_and_notify.call_count) - - # S1: update_service_capabilities() - self.host_manager_1.update_service_capabilities(service_name, 'host1', - capab1, None, None) - - self.assertDictEqual(dict(dict(timestamp=31339), **capab1), - self.host_manager_1.service_states['host1']) - - @mock.patch( - 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_notify_service_capabilities_case2( - self, _mock_utcnow, - _mock_get_usage_and_notify): - - _mock_utcnow.side_effect = [31340, 31341, 31342] - - service_name = 'volume' - - capab1 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, - 'reserved_percentage': 0}]} - - self.host_manager.service_states['host1'] = ( - dict(dict(timestamp=31337), **capab1)) - self.host_manager.service_states_last_update['host1'] = ( - dict(dict(timestamp=31338), **capab1)) - self.host_manager_1.service_states['host1'] = ( - dict(dict(timestamp=31339), **capab1)) - - # Run 2: - # capa: capa1 - # S0: update_service_capabilities() - # S1: update_service_capabilities() - # S1: notify_service_capabilities() - # - # Don't notify capab1 to ceilometer. - - # S0: update_service_capabilities() - self.host_manager.update_service_capabilities(service_name, 'host1', - capab1, None, None) - - self.assertDictEqual(dict(dict(timestamp=31340), **capab1), - self.host_manager.service_states['host1']) - - self.assertDictEqual( - dict(dict(timestamp=31338), **capab1), - self.host_manager.service_states_last_update['host1']) - - # S1: update_service_capabilities() - self.host_manager_1.update_service_capabilities(service_name, 'host1', - capab1, None, None) - - self.assertDictEqual(dict(dict(timestamp=31341), **capab1), - self.host_manager_1.service_states['host1']) - - self.assertDictEqual( - dict(dict(timestamp=31339), **capab1), - self.host_manager_1.service_states_last_update['host1']) - - # S1: notify_service_capabilities() - self.host_manager_1.notify_service_capabilities(service_name, 'host1', - capab1, None) - - self.assertDictEqual(dict(dict(timestamp=31341), **capab1), - self.host_manager_1.service_states['host1']) - - self.assertDictEqual( - self.host_manager_1.service_states_last_update['host1'], - dict(dict(timestamp=31339), **capab1)) - - # Don't notify capab1 to ceilometer. - self.assertTrue(1, _mock_get_usage_and_notify.call_count) - - @mock.patch( - 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_notify_service_capabilities_case3( - self, _mock_utcnow, - _mock_get_usage_and_notify): - - _mock_utcnow.side_effect = [31343, 31344, 31345] - - service_name = 'volume' - - capab1 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, - 'reserved_percentage': 0}]} - - self.host_manager.service_states['host1'] = ( - dict(dict(timestamp=31340), **capab1)) - self.host_manager.service_states_last_update['host1'] = ( - dict(dict(timestamp=31338), **capab1)) - self.host_manager_1.service_states['host1'] = ( - dict(dict(timestamp=31341), **capab1)) - self.host_manager_1.service_states_last_update['host1'] = ( - dict(dict(timestamp=31339), **capab1)) - - # Run 3: - # capa: capab1 - # S0: notify_service_capabilities() - # S0: update_service_capabilities() - # S1: update_service_capabilities() - # - # Don't notify capab1 to ceilometer. - - # S0: notify_service_capabilities() - self.host_manager.notify_service_capabilities(service_name, 'host1', - capab1, None) - self.assertDictEqual( - dict(dict(timestamp=31338), **capab1), - self.host_manager.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31340), **capab1), - self.host_manager.service_states['host1']) - - # Don't notify capab1 to ceilometer. - self.assertTrue(1, _mock_get_usage_and_notify.call_count) - - # S0: update_service_capabilities() - self.host_manager.update_service_capabilities(service_name, 'host1', - capab1, None, None) - - self.assertDictEqual( - dict(dict(timestamp=31340), **capab1), - self.host_manager.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31344), **capab1), - self.host_manager.service_states['host1']) - - # S1: update_service_capabilities() - self.host_manager_1.update_service_capabilities(service_name, 'host1', - capab1, None, None) - self.assertDictEqual(dict(dict(timestamp=31345), **capab1), - self.host_manager_1.service_states['host1']) - - self.assertDictEqual( - dict(dict(timestamp=31341), **capab1), - self.host_manager_1.service_states_last_update['host1']) - - @mock.patch( - 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_notify_service_capabilities_case4( - self, _mock_utcnow, - _mock_get_usage_and_notify): - - _mock_utcnow.side_effect = [31346, 31347, 31348] - - service_name = 'volume' - - capab1 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, - 'reserved_percentage': 0}]} - - self.host_manager.service_states['host1'] = ( - dict(dict(timestamp=31344), **capab1)) - self.host_manager.service_states_last_update['host1'] = ( - dict(dict(timestamp=31340), **capab1)) - self.host_manager_1.service_states['host1'] = ( - dict(dict(timestamp=31345), **capab1)) - self.host_manager_1.service_states_last_update['host1'] = ( - dict(dict(timestamp=31341), **capab1)) - - capab2 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 9, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, - 'reserved_percentage': 0}]} - - # Run 4: - # capa: capab2 - # S0: update_service_capabilities() - # S1: notify_service_capabilities() - # S1: update_service_capabilities() - # - # notify capab2 to ceilometer. - - # S0: update_service_capabilities() - self.host_manager.update_service_capabilities(service_name, 'host1', - capab2, None, None) - self.assertDictEqual( - dict(dict(timestamp=31340), **capab1), - self.host_manager.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31346), **capab2), - self.host_manager.service_states['host1']) - - # S1: notify_service_capabilities() - self.host_manager_1.notify_service_capabilities(service_name, 'host1', - capab2, None) - self.assertDictEqual(dict(dict(timestamp=31345), **capab1), - self.host_manager_1.service_states['host1']) - - self.assertDictEqual( - dict(dict(timestamp=31347), **capab2), - self.host_manager_1.service_states_last_update['host1']) - - # notify capab2 to ceilometer. - self.assertTrue(2, _mock_get_usage_and_notify.call_count) - - # S1: update_service_capabilities() - self.host_manager_1.update_service_capabilities(service_name, 'host1', - capab2, None, None) - self.assertDictEqual(dict(dict(timestamp=31348), **capab2), - self.host_manager_1.service_states['host1']) - - self.assertDictEqual( - dict(dict(timestamp=31347), **capab2), - self.host_manager_1.service_states_last_update['host1']) - - @mock.patch( - 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_notify_service_capabilities_case5( - self, _mock_utcnow, - _mock_get_usage_and_notify): - - _mock_utcnow.side_effect = [31349, 31350, 31351] - - service_name = 'volume' - - capab1 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, - 'reserved_percentage': 0}]} - - capab2 = {'pools': [{ - 'pool_name': 'pool1', 'thick_provisioning_support': True, - 'thin_provisioning_support': False, 'total_capacity_gb': 10, - 'free_capacity_gb': 9, 'max_over_subscription_ratio': 1, - 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, - 'reserved_percentage': 0}]} - - self.host_manager.service_states['host1'] = ( - dict(dict(timestamp=31346), **capab2)) - self.host_manager.service_states_last_update['host1'] = ( - dict(dict(timestamp=31340), **capab1)) - self.host_manager_1.service_states['host1'] = ( - dict(dict(timestamp=31348), **capab2)) - self.host_manager_1.service_states_last_update['host1'] = ( - dict(dict(timestamp=31347), **capab2)) - - # Run 5: - # capa: capa2 - # S0: notify_service_capabilities() - # S0: update_service_capabilities() - # S1: update_service_capabilities() - # - # This is the special case not handled. - # 1) capab is changed (from capab1 to capab2) - # 2) S1 has already notify the capab2 in Run 4. - # 3) S0 just got update_service_capabilities() in Run 4. - # 4) S0 got notify_service_capabilities() immediately in next run, - # here is Run 5. - # S0 has no ways to know whether other scheduler (here is S1) who - # has noitified the changed capab2 or not. S0 just thinks it's his - # own turn to notify the changed capab2. - # In this case, we have notified the same capabilities twice. - # - # S0: notify_service_capabilities() - self.host_manager.notify_service_capabilities(service_name, 'host1', - capab2, None) - self.assertDictEqual( - dict(dict(timestamp=31349), **capab2), - self.host_manager.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31346), **capab2), - self.host_manager.service_states['host1']) - - # S0 notify capab2 to ceilometer. - self.assertTrue(3, _mock_get_usage_and_notify.call_count) - - # S0: update_service_capabilities() - self.host_manager.update_service_capabilities(service_name, 'host1', - capab2, None, None) - self.assertDictEqual( - dict(dict(timestamp=31349), **capab2), - self.host_manager.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31350), **capab2), - self.host_manager.service_states['host1']) - - # S1: update_service_capabilities() - self.host_manager_1.update_service_capabilities(service_name, 'host1', - capab2, None, None) - - self.assertDictEqual( - dict(dict(timestamp=31348), **capab2), - self.host_manager_1.service_states_last_update['host1']) - - self.assertDictEqual(dict(dict(timestamp=31351), **capab2), - self.host_manager_1.service_states['host1']) - - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - @mock.patch('cinder.db.service_get_all') - def test_has_all_capabilities(self, _mock_service_get_all, - _mock_service_is_up): - _mock_service_is_up.return_value = True - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=2, host='host2', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=3, host='host3', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - ] - _mock_service_get_all.return_value = services - # Create host_manager again to let db.service_get_all mock run - self.host_manager = host_manager.HostManager() - self.assertFalse(self.host_manager.has_all_capabilities()) - - timestamp = jsonutils.to_primitive(datetime.utcnow()) - host1_volume_capabs = dict(free_capacity_gb=4321) - host2_volume_capabs = dict(free_capacity_gb=5432) - host3_volume_capabs = dict(free_capacity_gb=6543) - - service_name = 'volume' - self.host_manager.update_service_capabilities(service_name, 'host1', - host1_volume_capabs, - None, timestamp) - self.assertFalse(self.host_manager.has_all_capabilities()) - self.host_manager.update_service_capabilities(service_name, 'host2', - host2_volume_capabs, - None, timestamp) - self.assertFalse(self.host_manager.has_all_capabilities()) - self.host_manager.update_service_capabilities(service_name, 'host3', - host3_volume_capabs, - None, timestamp) - self.assertTrue(self.host_manager.has_all_capabilities()) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - @mock.patch('oslo_utils.timeutils.utcnow') - def test_update_and_get_pools(self, _mock_utcnow, - _mock_service_is_up, - _mock_service_get_all): - """Test interaction between update and get_pools - - This test verifies that each time that get_pools is called it gets the - latest copy of service_capabilities, which is timestamped with the - current date/time. - """ - context = 'fake_context' - dates = [datetime.fromtimestamp(400), datetime.fromtimestamp(401), - datetime.fromtimestamp(402)] - _mock_utcnow.side_effect = dates - - services = [ - # This is the first call to utcnow() - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - ] - - mocked_service_states = { - 'host1': dict(volume_backend_name='AAA', - total_capacity_gb=512, free_capacity_gb=200, - timestamp=dates[1], reserved_percentage=0), - } - - _mock_service_get_all.return_value = services - _mock_service_is_up.return_value = True - _mock_warning = mock.Mock() - host_manager.LOG.warn = _mock_warning - - host_volume_capabs = dict(free_capacity_gb=4321) - - service_name = 'volume' - with mock.patch.dict(self.host_manager.service_states, - mocked_service_states): - self.host_manager.update_service_capabilities(service_name, - 'host1', - host_volume_capabs, - None, None) - res = self.host_manager.get_pools(context) - self.assertEqual(1, len(res)) - self.assertEqual(dates[1], res[0]['capabilities']['timestamp']) - - @mock.patch('cinder.objects.Service.is_up', True) - def test_get_all_backend_states_cluster(self): - """Test get_all_backend_states when we have clustered services. - - Confirm that clustered services are grouped and that only the latest - of the capability reports is relevant. - """ - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) - - cluster_name = 'cluster' - db.cluster_create(ctxt, {'name': cluster_name, - 'binary': constants.VOLUME_BINARY}) - - services = ( - db.service_create(ctxt, - {'host': 'clustered_host_1', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'cluster_name': cluster_name, - 'created_at': timeutils.utcnow()}), - # Even if this service is disabled, since it belongs to an enabled - # cluster, it's not really disabled. - db.service_create(ctxt, - {'host': 'clustered_host_2', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'disabled': True, - 'cluster_name': cluster_name, - 'created_at': timeutils.utcnow()}), - db.service_create(ctxt, - {'host': 'clustered_host_3', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'cluster_name': cluster_name, - 'created_at': timeutils.utcnow()}), - db.service_create(ctxt, - {'host': 'non_clustered_host', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'created_at': timeutils.utcnow()}), - # This service has no capabilities - db.service_create(ctxt, - {'host': 'no_capabilities_host', - 'topic': constants.VOLUME_TOPIC, - 'binary': constants.VOLUME_BINARY, - 'created_at': timeutils.utcnow()}), - ) - - capabilities = ((1, {'free_capacity_gb': 1000}), - # This is the capacity that will be selected for the - # cluster because is the one with the latest timestamp. - (3, {'free_capacity_gb': 2000}), - (2, {'free_capacity_gb': 3000}), - (1, {'free_capacity_gb': 4000})) - - for i in range(len(capabilities)): - self.host_manager.update_service_capabilities( - 'volume', services[i].host, capabilities[i][1], - services[i].cluster_name, capabilities[i][0]) - - res = self.host_manager.get_all_backend_states(ctxt) - result = {(s.cluster_name or s.host, s.free_capacity_gb) for s in res} - expected = {(cluster_name + '#_pool0', 2000), - ('non_clustered_host#_pool0', 4000)} - self.assertSetEqual(expected, result) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_get_all_backend_states(self, _mock_service_is_up, - _mock_service_get_all): - context = 'fake_context' - timestamp = datetime.utcnow() - topic = constants.VOLUME_TOPIC - - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow(), - binary=None, deleted=False, created_at=None, modified_at=None, - report_count=0, deleted_at=None, disabled_reason=None), - dict(id=2, host='host2', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow(), - binary=None, deleted=False, created_at=None, modified_at=None, - report_count=0, deleted_at=None, disabled_reason=None), - dict(id=3, host='host3', topic='volume', disabled=False, - availability_zone='zone2', updated_at=timeutils.utcnow(), - binary=None, deleted=False, created_at=None, modified_at=None, - report_count=0, deleted_at=None, disabled_reason=None), - dict(id=4, host='host4', topic='volume', disabled=False, - availability_zone='zone3', updated_at=timeutils.utcnow(), - binary=None, deleted=False, created_at=None, modified_at=None, - report_count=0, deleted_at=None, disabled_reason=None), - ] - - service_objs = [] - for db_service in services: - service_obj = objects.Service() - service_objs.append(objects.Service._from_db_object(context, - service_obj, - db_service)) - - service_states = { - 'host1': dict(volume_backend_name='AAA', - total_capacity_gb=512, free_capacity_gb=200, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=312), - 'host2': dict(volume_backend_name='BBB', - total_capacity_gb=256, free_capacity_gb=100, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=156), - 'host3': dict(volume_backend_name='CCC', - total_capacity_gb=10000, free_capacity_gb=700, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=9300), - } - # First test: service.is_up is always True, host5 is disabled, - # host4 has no capabilities - self.host_manager.service_states = service_states - _mock_service_get_all.return_value = services - _mock_service_is_up.return_value = True - _mock_warning = mock.Mock() - host_manager.LOG.warning = _mock_warning - - # Get all states - self.host_manager.get_all_backend_states(context) - _mock_service_get_all.assert_called_with(context, - disabled=False, - frozen=False, - topic=topic) - - # verify that Service.is_up was called for each srv - expected = [mock.call() for s in service_objs] - self.assertEqual(expected, _mock_service_is_up.call_args_list) - - # Get backend_state_map and make sure we have the first 3 hosts - backend_state_map = self.host_manager.backend_state_map - self.assertEqual(3, len(backend_state_map)) - for i in range(3): - volume_node = services[i] - host = volume_node['host'] - test_service.TestService._compare(self, volume_node, - backend_state_map[host].service) - - # Second test: Now service.is_up returns False for host3 - _mock_service_is_up.reset_mock() - _mock_service_is_up.side_effect = [True, True, False, True] - _mock_service_get_all.reset_mock() - _mock_warning.reset_mock() - - # Get all states, make sure host 3 is reported as down - self.host_manager.get_all_backend_states(context) - _mock_service_get_all.assert_called_with(context, - disabled=False, - frozen=False, - topic=topic) - - self.assertEqual(expected, _mock_service_is_up.call_args_list) - self.assertGreater(_mock_warning.call_count, 0) - - # Get backend_state_map and make sure we have the first 2 hosts (host3 - # is down, host4 is missing capabilities) - backend_state_map = self.host_manager.backend_state_map - self.assertEqual(2, len(backend_state_map)) - for i in range(2): - volume_node = services[i] - host = volume_node['host'] - test_service.TestService._compare(self, volume_node, - backend_state_map[host].service) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_get_pools(self, _mock_service_is_up, - _mock_service_get_all): - context = 'fake_context' - timestamp = datetime.utcnow() - - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=2, host='host2@back1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=3, host='host2@back2', topic='volume', disabled=False, - availability_zone='zone2', updated_at=timeutils.utcnow()), - ] - - mocked_service_states = { - 'host1': dict(volume_backend_name='AAA', - total_capacity_gb=512, free_capacity_gb=200, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=312), - 'host2@back1': dict(volume_backend_name='BBB', - total_capacity_gb=256, free_capacity_gb=100, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=156), - 'host2@back2': dict(volume_backend_name='CCC', - total_capacity_gb=10000, free_capacity_gb=700, - timestamp=timestamp, reserved_percentage=0, - provisioned_capacity_gb=9300), - } - - _mock_service_get_all.return_value = services - _mock_service_is_up.return_value = True - _mock_warning = mock.Mock() - host_manager.LOG.warn = _mock_warning - - with mock.patch.dict(self.host_manager.service_states, - mocked_service_states): - res = self.host_manager.get_pools(context) - - # check if get_pools returns all 3 pools - self.assertEqual(3, len(res)) - - expected = [ - { - 'name': 'host1#AAA', - 'capabilities': { - 'timestamp': timestamp, - 'volume_backend_name': 'AAA', - 'free_capacity_gb': 200, - 'driver_version': None, - 'total_capacity_gb': 512, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'provisioned_capacity_gb': 312}, - }, - { - 'name': 'host2@back1#BBB', - 'capabilities': { - 'timestamp': timestamp, - 'volume_backend_name': 'BBB', - 'free_capacity_gb': 100, - 'driver_version': None, - 'total_capacity_gb': 256, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'provisioned_capacity_gb': 156}, - }, - { - 'name': 'host2@back2#CCC', - 'capabilities': { - 'timestamp': timestamp, - 'volume_backend_name': 'CCC', - 'free_capacity_gb': 700, - 'driver_version': None, - 'total_capacity_gb': 10000, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'provisioned_capacity_gb': 9300}, - } - ] - - def sort_func(data): - return data['name'] - - self.assertEqual(len(expected), len(res)) - self.assertEqual(sorted(expected, key=sort_func), - sorted(res, key=sort_func)) - - def test_get_usage(self): - host = "host1@backend1" - timestamp = 40000 - volume_stats1 = {'pools': [ - {'pool_name': 'pool1', - 'total_capacity_gb': 30.01, - 'free_capacity_gb': 28.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'reserved_percentage': 5}, - {'pool_name': 'pool2', - 'total_capacity_gb': 20.01, - 'free_capacity_gb': 18.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5}]} - - updated_pools1 = [{'pool_name': 'pool1', - 'total_capacity_gb': 30.01, - 'free_capacity_gb': 28.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 1.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'reserved_percentage': 5}, - {'pool_name': 'pool2', - 'total_capacity_gb': 20.01, - 'free_capacity_gb': 18.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5}] - - volume_stats2 = {'pools': [ - {'pool_name': 'pool1', - 'total_capacity_gb': 30.01, - 'free_capacity_gb': 28.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 0}, - {'pool_name': 'pool2', - 'total_capacity_gb': 20.01, - 'free_capacity_gb': 18.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 5}]} - - updated_pools2 = [{'pool_name': 'pool1', - 'total_capacity_gb': 30.01, - 'free_capacity_gb': 28.01, - 'allocated_capacity_gb': 2.0, - 'provisioned_capacity_gb': 2.0, - 'max_over_subscription_ratio': 2.0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 0}] - - expected1 = [ - {"name_to_id": 'host1@backend1#pool1', - "type": "pool", - "total": 30.01, - "free": 28.01, - "allocated": 2.0, - "provisioned": 2.0, - "virtual_free": 27.01, - "reported_at": 40000}, - {"name_to_id": 'host1@backend1#pool2', - "type": "pool", - "total": 20.01, - "free": 18.01, - "allocated": 2.0, - "provisioned": 2.0, - "virtual_free": 37.02, - "reported_at": 40000}, - {"name_to_id": 'host1@backend1', - "type": "backend", - "total": 50.02, - "free": 46.02, - "allocated": 4.0, - "provisioned": 4.0, - "virtual_free": 64.03, - "reported_at": 40000}] - - expected2 = [ - {"name_to_id": 'host1@backend1#pool1', - "type": "pool", - "total": 30.01, - "free": 28.01, - "allocated": 2.0, - "provisioned": 2.0, - "virtual_free": 58.02, - "reported_at": 40000}, - {"name_to_id": 'host1@backend1', - "type": "backend", - "total": 50.02, - "free": 46.02, - "allocated": 4.0, - "provisioned": 4.0, - "virtual_free": 95.04, - "reported_at": 40000}] - - def sort_func(data): - return data['name_to_id'] - - res1 = self.host_manager._get_usage(volume_stats1, - updated_pools1, host, timestamp) - self.assertEqual(len(expected1), len(res1)) - self.assertEqual(sorted(expected1, key=sort_func), - sorted(res1, key=sort_func)) - - res2 = self.host_manager._get_usage(volume_stats2, - updated_pools2, host, timestamp) - self.assertEqual(len(expected2), len(res2)) - self.assertEqual(sorted(expected2, key=sort_func), - sorted(res2, key=sort_func)) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_get_pools_filter_name(self, _mock_service_is_up, - _mock_service_get_all_by_topic): - context = 'fake_context' - - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=2, host='host2@back1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()) - ] - - mocked_service_states = { - 'host1': dict(volume_backend_name='AAA', - total_capacity_gb=512, free_capacity_gb=200, - timestamp=None, reserved_percentage=0, - provisioned_capacity_gb=312), - 'host2@back1': dict(volume_backend_name='BBB', - total_capacity_gb=256, free_capacity_gb=100, - timestamp=None, reserved_percentage=0, - provisioned_capacity_gb=156) - } - - _mock_service_get_all_by_topic.return_value = services - _mock_service_is_up.return_value = True - _mock_warning = mock.Mock() - host_manager.LOG.warn = _mock_warning - - with mock.patch.dict(self.host_manager.service_states, - mocked_service_states): - filters = {'name': 'host1#AAA'} - res = self.host_manager.get_pools(context, filters=filters) - - expected = [ - { - 'name': 'host1#AAA', - 'capabilities': { - 'timestamp': None, - 'volume_backend_name': 'AAA', - 'free_capacity_gb': 200, - 'driver_version': None, - 'total_capacity_gb': 512, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'provisioned_capacity_gb': 312}, - } - ] - - self.assertEqual(expected, res) - - @mock.patch('cinder.scheduler.host_manager.HostManager.' - '_choose_backend_filters') - def test_get_pools_filtered_by_volume_type(self, - _mock_choose_backend_filters): - context = 'fake_context' - filter_class = FakeFilterClass3 - _mock_choose_backend_filters.return_value = [filter_class] - - hosts = { - 'host1': {'volume_backend_name': 'AAA', - 'total_capacity_gb': 512, - 'free_capacity_gb': 200, - 'timestamp': None, - 'reserved_percentage': 0, - 'provisioned_capacity_gb': 312}, - 'host2@back1': {'volume_backend_name': 'BBB', - 'total_capacity_gb': 256, - 'free_capacity_gb': 100, - 'timestamp': None, - 'reserved_percentage': 0, - 'provisioned_capacity_gb': 156}} - mock_warning = mock.Mock() - host_manager.LOG.warn = mock_warning - mock_volume_type = { - 'volume_backend_name': 'AAA', - 'qos_specs': 'BBB', - } - - res = self.host_manager._filter_pools_by_volume_type(context, - mock_volume_type, - hosts) - expected = {'host1': {'volume_backend_name': 'AAA', - 'total_capacity_gb': 512, - 'free_capacity_gb': 200, - 'timestamp': None, 'reserved_percentage': 0, - 'provisioned_capacity_gb': 312}} - - self.assertEqual(expected, res) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - def test_get_pools_filter_mulitattach(self, _mock_service_is_up, - _mock_service_get_all_by_topic): - context = 'fake_context' - - services = [ - dict(id=1, host='host1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()), - dict(id=2, host='host2@back1', topic='volume', disabled=False, - availability_zone='zone1', updated_at=timeutils.utcnow()) - ] - - mocked_service_states = { - 'host1': dict(volume_backend_name='AAA', - total_capacity_gb=512, free_capacity_gb=200, - timestamp=None, reserved_percentage=0, - multiattach=True), - 'host2@back1': dict(volume_backend_name='BBB', - total_capacity_gb=256, free_capacity_gb=100, - timestamp=None, reserved_percentage=0, - multiattach=False) - } - - _mock_service_get_all_by_topic.return_value = services - _mock_service_is_up.return_value = True - _mock_warning = mock.Mock() - host_manager.LOG.warn = _mock_warning - - with mock.patch.dict(self.host_manager.service_states, - mocked_service_states): - filters_t = {'multiattach': 'true'} - filters_f = {'multiattach': False} - res_t = self.host_manager.get_pools(context, filters=filters_t) - res_f = self.host_manager.get_pools(context, filters=filters_f) - - expected_t = [ - { - 'name': 'host1#AAA', - 'capabilities': { - 'timestamp': None, - 'volume_backend_name': 'AAA', - 'free_capacity_gb': 200, - 'driver_version': None, - 'total_capacity_gb': 512, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'multiattach': True}, - } - ] - expected_f = [ - { - 'name': 'host2@back1#BBB', - 'capabilities': { - 'timestamp': None, - 'volume_backend_name': 'BBB', - 'free_capacity_gb': 100, - 'driver_version': None, - 'total_capacity_gb': 256, - 'reserved_percentage': 0, - 'vendor_name': None, - 'storage_protocol': None, - 'multiattach': False}, - } - ] - - self.assertEqual(expected_t, res_t) - self.assertEqual(expected_f, res_f) - - @ddt.data( - (None, None, True), - (None, 'value', False), - ('cap', None, False), - (False, 'True', False), - (True, 'True', True), - (True, True, True), - (False, 'false', True), - (1.1, '1.1', True), - (0, '0', True), - (1.1, '1.11', False), - ('str', 'str', True), - ('str1', 'str2', False), - ('str', 'StR', False), - ([], [], True), - (['hdd', 'ssd'], ['ssd'], False), - (['hdd', 'ssd'], ['ssd', 'hdd'], False), - (['hdd', 'ssd'], "['hdd', 'ssd']", True), - ({}, {}, True), - ({'a': 'a', 'b': 'b'}, {'b': 'b', 'a': 'a'}, True), - ({'a': 'a', 'b': 'b'}, {'b': 'b'}, False), - ({'a': 'a'}, "{'a': 'a'}", True), - ) - @ddt.unpack - def test_equal_after_convert(self, cap, value, ret_value): - self.assertEqual(ret_value, - self.host_manager._equal_after_convert(cap, value)) - - -class BackendStateTestCase(test.TestCase): - """Test case for BackendState class.""" - - def test_update_from_volume_capability_nopool(self): - fake_backend = host_manager.BackendState('be1', None) - self.assertIsNone(fake_backend.free_capacity_gb) - - volume_capability = {'total_capacity_gb': 1024, - 'free_capacity_gb': 512, - 'provisioned_capacity_gb': 512, - 'reserved_percentage': 0, - 'timestamp': None} - - fake_backend.update_from_volume_capability(volume_capability) - # Backend level stats remain uninitialized - self.assertEqual(0, fake_backend.total_capacity_gb) - self.assertIsNone(fake_backend.free_capacity_gb) - # Pool stats has been updated - self.assertEqual(1024, fake_backend.pools['_pool0'].total_capacity_gb) - self.assertEqual(512, fake_backend.pools['_pool0'].free_capacity_gb) - self.assertEqual(512, - fake_backend.pools['_pool0'].provisioned_capacity_gb) - - # Test update for existing host state - volume_capability.update(dict(total_capacity_gb=1000)) - fake_backend.update_from_volume_capability(volume_capability) - self.assertEqual(1000, fake_backend.pools['_pool0'].total_capacity_gb) - - # Test update for existing host state with different backend name - volume_capability.update(dict(volume_backend_name='magic')) - fake_backend.update_from_volume_capability(volume_capability) - self.assertEqual(1000, fake_backend.pools['magic'].total_capacity_gb) - self.assertEqual(512, fake_backend.pools['magic'].free_capacity_gb) - self.assertEqual(512, - fake_backend.pools['magic'].provisioned_capacity_gb) - # 'pool0' becomes nonactive pool, and is deleted - self.assertRaises(KeyError, lambda: fake_backend.pools['pool0']) - - def test_update_from_volume_capability_with_pools(self): - fake_backend = host_manager.BackendState('host1', None) - self.assertIsNone(fake_backend.free_capacity_gb) - capability = { - 'volume_backend_name': 'Local iSCSI', - 'vendor_name': 'OpenStack', - 'driver_version': '1.0.1', - 'storage_protocol': 'iSCSI', - 'pools': [ - {'pool_name': '1st pool', - 'total_capacity_gb': 500, - 'free_capacity_gb': 230, - 'allocated_capacity_gb': 270, - 'provisioned_capacity_gb': 270, - 'QoS_support': 'False', - 'reserved_percentage': 0, - 'dying_disks': 100, - 'super_hero_1': 'spider-man', - 'super_hero_2': 'flash', - 'super_hero_3': 'neoncat', - }, - {'pool_name': '2nd pool', - 'total_capacity_gb': 1024, - 'free_capacity_gb': 1024, - 'allocated_capacity_gb': 0, - 'provisioned_capacity_gb': 0, - 'QoS_support': 'False', - 'reserved_percentage': 0, - 'dying_disks': 200, - 'super_hero_1': 'superman', - 'super_hero_2': 'Hulk', - } - ], - 'timestamp': None, - } - - fake_backend.update_from_volume_capability(capability) - - self.assertEqual('Local iSCSI', fake_backend.volume_backend_name) - self.assertEqual('iSCSI', fake_backend.storage_protocol) - self.assertEqual('OpenStack', fake_backend.vendor_name) - self.assertEqual('1.0.1', fake_backend.driver_version) - - # Backend level stats remain uninitialized - self.assertEqual(0, fake_backend.total_capacity_gb) - self.assertIsNone(fake_backend.free_capacity_gb) - # Pool stats has been updated - self.assertEqual(2, len(fake_backend.pools)) - - self.assertEqual(500, fake_backend.pools['1st pool'].total_capacity_gb) - self.assertEqual(230, fake_backend.pools['1st pool'].free_capacity_gb) - self.assertEqual( - 270, fake_backend.pools['1st pool'].provisioned_capacity_gb) - self.assertEqual( - 1024, fake_backend.pools['2nd pool'].total_capacity_gb) - self.assertEqual(1024, fake_backend.pools['2nd pool'].free_capacity_gb) - self.assertEqual( - 0, fake_backend.pools['2nd pool'].provisioned_capacity_gb) - - capability = { - 'volume_backend_name': 'Local iSCSI', - 'vendor_name': 'OpenStack', - 'driver_version': '1.0.2', - 'storage_protocol': 'iSCSI', - 'pools': [ - {'pool_name': '3rd pool', - 'total_capacity_gb': 10000, - 'free_capacity_gb': 10000, - 'allocated_capacity_gb': 0, - 'provisioned_capacity_gb': 0, - 'QoS_support': 'False', - 'reserved_percentage': 0, - }, - ], - 'timestamp': None, - } - - # test update BackendState Record - fake_backend.update_from_volume_capability(capability) - - self.assertEqual('1.0.2', fake_backend.driver_version) - - # Non-active pool stats has been removed - self.assertEqual(1, len(fake_backend.pools)) - - self.assertRaises(KeyError, lambda: fake_backend.pools['1st pool']) - self.assertRaises(KeyError, lambda: fake_backend.pools['2nd pool']) - - self.assertEqual(10000, - fake_backend.pools['3rd pool'].total_capacity_gb) - self.assertEqual(10000, - fake_backend.pools['3rd pool'].free_capacity_gb) - self.assertEqual( - 0, fake_backend.pools['3rd pool'].provisioned_capacity_gb) - - def test_update_from_volume_infinite_capability(self): - fake_backend = host_manager.BackendState('host1', None) - self.assertIsNone(fake_backend.free_capacity_gb) - - volume_capability = {'total_capacity_gb': 'infinite', - 'free_capacity_gb': 'infinite', - 'reserved_percentage': 0, - 'timestamp': None} - - fake_backend.update_from_volume_capability(volume_capability) - # Backend level stats remain uninitialized - self.assertEqual(0, fake_backend.total_capacity_gb) - self.assertIsNone(fake_backend.free_capacity_gb) - # Pool stats has been updated - self.assertEqual( - 'infinite', - fake_backend.pools['_pool0'].total_capacity_gb) - self.assertEqual( - 'infinite', - fake_backend.pools['_pool0'].free_capacity_gb) - - def test_update_from_volume_unknown_capability(self): - fake_backend = host_manager.BackendState('host1', None) - self.assertIsNone(fake_backend.free_capacity_gb) - - volume_capability = {'total_capacity_gb': 'infinite', - 'free_capacity_gb': 'unknown', - 'reserved_percentage': 0, - 'timestamp': None} - - fake_backend.update_from_volume_capability(volume_capability) - # Backend level stats remain uninitialized - self.assertEqual(0, fake_backend.total_capacity_gb) - self.assertIsNone(fake_backend.free_capacity_gb) - # Pool stats has been updated - self.assertEqual( - 'infinite', - fake_backend.pools['_pool0'].total_capacity_gb) - self.assertEqual( - 'unknown', - fake_backend.pools['_pool0'].free_capacity_gb) - - def test_update_from_empty_volume_capability(self): - fake_backend = host_manager.BackendState('host1', None) - - vol_cap = {'timestamp': None} - - fake_backend.update_from_volume_capability(vol_cap) - self.assertEqual(0, fake_backend.total_capacity_gb) - self.assertIsNone(fake_backend.free_capacity_gb) - # Pool stats has been updated - self.assertEqual(0, - fake_backend.pools['_pool0'].total_capacity_gb) - self.assertEqual(0, - fake_backend.pools['_pool0'].free_capacity_gb) - self.assertEqual(0, - fake_backend.pools['_pool0'].provisioned_capacity_gb) - - -class PoolStateTestCase(test.TestCase): - """Test case for BackendState class.""" - - def test_update_from_volume_capability(self): - fake_pool = host_manager.PoolState('host1', None, None, 'pool0') - self.assertIsNone(fake_pool.free_capacity_gb) - - volume_capability = {'total_capacity_gb': 1024, - 'free_capacity_gb': 512, - 'reserved_percentage': 0, - 'provisioned_capacity_gb': 512, - 'timestamp': None, - 'cap1': 'val1', - 'cap2': 'val2'} - - fake_pool.update_from_volume_capability(volume_capability) - self.assertEqual('host1#pool0', fake_pool.host) - self.assertEqual('pool0', fake_pool.pool_name) - self.assertEqual(1024, fake_pool.total_capacity_gb) - self.assertEqual(512, fake_pool.free_capacity_gb) - self.assertEqual(512, - fake_pool.provisioned_capacity_gb) - - self.assertDictEqual(volume_capability, dict(fake_pool.capabilities)) diff --git a/cinder/tests/unit/scheduler/test_rpcapi.py b/cinder/tests/unit/scheduler/test_rpcapi.py deleted file mode 100644 index d3863dd91..000000000 --- a/cinder/tests/unit/scheduler/test_rpcapi.py +++ /dev/null @@ -1,245 +0,0 @@ - -# Copyright 2012, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for cinder.scheduler.rpcapi -""" - -from datetime import datetime - -import ddt -import mock - -from cinder import exception -from cinder import objects -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_volume - - -@ddt.ddt -class SchedulerRPCAPITestCase(test.RPCAPITestCase): - def setUp(self): - super(SchedulerRPCAPITestCase, self).setUp() - self.rpcapi = scheduler_rpcapi.SchedulerAPI - self.base_version = '3.0' - self.volume_id = fake_constants.VOLUME_ID - self.fake_volume = fake_volume.fake_volume_obj( - self.context, expected_attrs=['metadata', 'admin_metadata', - 'glance_metadata']) - self.fake_rs_obj = objects.RequestSpec.from_primitives({}) - self.fake_rs_dict = {'volume_id': self.volume_id} - self.fake_fp_dict = {'availability_zone': 'fake_az'} - - @ddt.data('3.0', '3.3') - @mock.patch('oslo_messaging.RPCClient.can_send_version') - def test_update_service_capabilities(self, version, can_send_version): - can_send_version.side_effect = lambda x: x == version - self._test_rpc_api('update_service_capabilities', - rpc_method='cast', - service_name='fake_name', - host='fake_host', - cluster_name='cluster_name', - capabilities={}, - fanout=True, - version=version, - timestamp='123') - can_send_version.assert_called_once_with('3.3') - - def test_create_volume(self): - create_worker_mock = self.mock_object(self.fake_volume, - 'create_worker') - self._test_rpc_api('create_volume', - rpc_method='cast', - volume=self.fake_volume, - snapshot_id=fake_constants.SNAPSHOT_ID, - image_id=fake_constants.IMAGE_ID, - request_spec=self.fake_rs_obj, - filter_properties=self.fake_fp_dict) - create_worker_mock.assert_called_once() - - @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_notify_service_capabilities_backend(self, can_send_version_mock): - """Test sending new backend by RPC instead of old host parameter.""" - capabilities = {'host': 'fake_host', - 'total': '10.01', } - with mock.patch('oslo_utils.timeutils.utcnow', - return_value=datetime(1970, 1, 1)): - self._test_rpc_api('notify_service_capabilities', - rpc_method='cast', - service_name='fake_name', - backend='fake_host', - capabilities=capabilities, - timestamp='1970-01-01T00:00:00.000000', - version='3.5') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', - side_effect=(True, False)) - def test_notify_service_capabilities_host(self, can_send_version_mock): - """Test sending old host RPC parameter instead of backend.""" - capabilities = {'host': 'fake_host', - 'total': '10.01', } - self._test_rpc_api('notify_service_capabilities', - rpc_method='cast', - service_name='fake_name', - server='fake_host', - expected_kwargs_diff={'host': 'fake_host'}, - backend='fake_host', - capabilities=capabilities, - version='3.1') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', - return_value=False) - def test_notify_service_capabilities_capped(self, can_send_version_mock): - capabilities = {'host': 'fake_host', - 'total': '10.01', } - self.assertRaises(exception.ServiceTooOld, - self._test_rpc_api, - 'notify_service_capabilities', - rpc_method='cast', - service_name='fake_name', - backend='fake_host', - server='fake_host', - # ignore_for_method=['host'], - # ignore_for_rpc=['backend'], - capabilities=capabilities, - version='3.1') - - @mock.patch('oslo_messaging.RPCClient.can_send_version') - def test_migrate_volume(self, can_send_version): - create_worker_mock = self.mock_object(self.fake_volume, - 'create_worker') - self._test_rpc_api('migrate_volume', - rpc_method='cast', - backend='host', - force_copy=True, - request_spec='fake_request_spec', - filter_properties='filter_properties', - volume=self.fake_volume, - version='3.3') - create_worker_mock.assert_not_called() - - def test_retype(self): - self._test_rpc_api('retype', - rpc_method='cast', - request_spec=self.fake_rs_dict, - filter_properties=self.fake_fp_dict, - volume=self.fake_volume) - - def test_manage_existing(self): - self._test_rpc_api('manage_existing', - rpc_method='cast', - request_spec=self.fake_rs_dict, - filter_properties=self.fake_fp_dict, - volume=self.fake_volume) - - @mock.patch('oslo_messaging.RPCClient.can_send_version', - return_value=False) - def test_extend_volume_capped(self, can_send_version_mock): - self.assertRaises(exception.ServiceTooOld, - self._test_rpc_api, - 'extend_volume', - rpc_method='cast', - request_spec='fake_request_spec', - filter_properties='filter_properties', - volume=self.fake_volume, - new_size=4, - reservations=['RESERVATIONS'], - version='3.0') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_extend_volume(self, can_send_version_mock): - create_worker_mock = self.mock_object(self.fake_volume, - 'create_worker') - self._test_rpc_api('extend_volume', - rpc_method='cast', - request_spec='fake_request_spec', - filter_properties='filter_properties', - volume=self.fake_volume, - new_size=4, - reservations=['RESERVATIONS']) - create_worker_mock.assert_not_called() - - def test_get_pools(self): - self._test_rpc_api('get_pools', - rpc_method='call', - filters=None, - retval=[{ - 'name': 'fake_pool', - 'capabilities': {}, - }]) - - def test_create_group(self): - self._test_rpc_api('create_group', - rpc_method='cast', - group='group', - group_spec=self.fake_rs_dict, - request_spec_list=[self.fake_rs_dict], - group_filter_properties=[self.fake_fp_dict], - filter_properties_list=[self.fake_fp_dict]) - - @ddt.data(('work_cleanup', 'myhost', None), - ('work_cleanup', 'myhost', 'mycluster'), - ('do_cleanup', 'myhost', None), - ('do_cleanup', 'myhost', 'mycluster')) - @ddt.unpack - @mock.patch('cinder.rpc.get_client') - def test_cleanup(self, method, host, cluster, get_client): - cleanup_request = objects.CleanupRequest(self.context, - host=host, - cluster_name=cluster) - rpcapi = scheduler_rpcapi.SchedulerAPI() - getattr(rpcapi, method)(self.context, cleanup_request) - - prepare = get_client.return_value.prepare - - prepare.assert_called_once_with( - version='3.4') - rpc_call = 'cast' if method == 'do_cleanup' else 'call' - getattr(prepare.return_value, rpc_call).assert_called_once_with( - self.context, method, cleanup_request=cleanup_request) - - @ddt.data('do_cleanup', 'work_cleanup') - def test_cleanup_too_old(self, method): - cleanup_request = objects.CleanupRequest(self.context) - rpcapi = scheduler_rpcapi.SchedulerAPI() - with mock.patch.object(rpcapi.client, 'can_send_version', - return_value=False) as can_send_mock: - self.assertRaises(exception.ServiceTooOld, - getattr(rpcapi, method), - self.context, - cleanup_request) - can_send_mock.assert_called_once_with('3.4') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_set_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('set_log_levels', - rpc_method='cast', - server=service.host, - service=service, - log_request='log_request', - version='3.7') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_get_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('get_log_levels', - rpc_method='call', - server=service.host, - service=service, - log_request='log_request', - version='3.7') diff --git a/cinder/tests/unit/scheduler/test_scheduler.py b/cinder/tests/unit/scheduler/test_scheduler.py deleted file mode 100644 index 1e548f4e9..000000000 --- a/cinder/tests/unit/scheduler/test_scheduler.py +++ /dev/null @@ -1,520 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import collections -from datetime import datetime - -import mock -from oslo_config import cfg - -from cinder import context -from cinder import exception -from cinder.message import message_field -from cinder import objects -from cinder.scheduler import driver -from cinder.scheduler import manager -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils - -CONF = cfg.CONF - - -class SchedulerManagerTestCase(test.TestCase): - """Test case for scheduler manager.""" - - manager_cls = manager.SchedulerManager - driver_cls = driver.Scheduler - driver_cls_name = 'cinder.scheduler.driver.Scheduler' - - class AnException(Exception): - pass - - def setUp(self): - super(SchedulerManagerTestCase, self).setUp() - self.flags(scheduler_driver=self.driver_cls_name) - self.manager = self.manager_cls() - self.manager._startup_delay = False - self.context = context.get_admin_context() - self.topic = 'fake_topic' - self.fake_args = (1, 2, 3) - self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} - - def test_1_correct_init(self): - # Correct scheduler driver - manager = self.manager - self.assertIsInstance(manager.driver, self.driver_cls) - - @mock.patch('eventlet.sleep') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities') - def test_init_host_with_rpc(self, publish_capabilities_mock, sleep_mock): - self.manager._startup_delay = True - self.manager.init_host_with_rpc() - publish_capabilities_mock.assert_called_once_with(mock.ANY) - sleep_mock.assert_called_once_with(CONF.periodic_interval) - self.assertFalse(self.manager._startup_delay) - - @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') - @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') - @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'}) - @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.4', - 'cinder-scheduler': '1.4'}) - def test_reset(self, get_min_obj, get_min_rpc): - mgr = self.manager_cls() - - volume_rpcapi = mgr.driver.volume_rpcapi - self.assertEqual('1.3', volume_rpcapi.client.version_cap) - self.assertEqual('1.4', - volume_rpcapi.client.serializer._base.version_cap) - get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() - mgr.reset() - - volume_rpcapi = mgr.driver.volume_rpcapi - self.assertEqual(get_min_rpc.return_value, - volume_rpcapi.client.version_cap) - self.assertEqual(get_min_obj.return_value, - volume_rpcapi.client.serializer._base.version_cap) - self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) - - @mock.patch('cinder.message.api.API.cleanup_expired_messages') - def test_clean_expired_messages(self, mock_clean): - - self.manager._clean_expired_messages(self.context) - - mock_clean.assert_called_once_with(self.context) - - @mock.patch('cinder.quota.QuotaEngine.expire') - def test_clean_expired_reservation(self, mock_clean): - - self.manager._clean_expired_reservation(self.context) - - mock_clean.assert_called_once_with(self.context) - - @mock.patch('cinder.scheduler.driver.Scheduler.' - 'update_service_capabilities') - def test_update_service_capabilities_empty_dict(self, _mock_update_cap): - # Test no capabilities passes empty dictionary - service = 'fake_service' - host = 'fake_host' - - self.manager.update_service_capabilities(self.context, - service_name=service, - host=host) - _mock_update_cap.assert_called_once_with(service, host, {}, None, None) - - @mock.patch('cinder.scheduler.driver.Scheduler.' - 'update_service_capabilities') - def test_update_service_capabilities_correct(self, _mock_update_cap): - # Test capabilities passes correctly - service = 'fake_service' - host = 'fake_host' - capabilities = {'fake_capability': 'fake_value'} - - self.manager.update_service_capabilities(self.context, - service_name=service, - host=host, - capabilities=capabilities) - _mock_update_cap.assert_called_once_with(service, host, capabilities, - None, None) - - @mock.patch('cinder.scheduler.driver.Scheduler.' - 'notify_service_capabilities') - def test_notify_service_capabilities_no_timestamp(self, _mock_notify_cap): - """Test old interface that receives host.""" - service = 'volume' - host = 'fake_host' - capabilities = {'fake_capability': 'fake_value'} - - self.manager.notify_service_capabilities(self.context, - service_name=service, - host=host, - capabilities=capabilities) - _mock_notify_cap.assert_called_once_with(service, host, capabilities, - None) - - @mock.patch('cinder.scheduler.driver.Scheduler.' - 'notify_service_capabilities') - def test_notify_service_capabilities_timestamp(self, _mock_notify_cap): - """Test new interface that receives backend and timestamp.""" - service = 'volume' - backend = 'fake_cluster' - capabilities = {'fake_capability': 'fake_value'} - - timestamp = '1970-01-01T00:00:00.000000' - - self.manager.notify_service_capabilities(self.context, - service_name=service, - backend=backend, - capabilities=capabilities, - timestamp=timestamp) - _mock_notify_cap.assert_called_once_with(service, backend, - capabilities, - datetime(1970, 1, 1)) - - @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') - @mock.patch('cinder.message.api.API.create') - @mock.patch('cinder.db.volume_update') - def test_create_volume_exception_puts_volume_in_error_state( - self, _mock_volume_update, _mock_message_create, - _mock_sched_create): - # Test NoValidBackend exception behavior for create_volume. - # Puts the volume in 'error' state and eats the exception. - _mock_sched_create.side_effect = exception.NoValidBackend(reason="") - volume = fake_volume.fake_volume_obj(self.context) - request_spec = {'volume_id': volume.id, - 'volume': {'id': volume.id, '_name_id': None, - 'metadata': {}, 'admin_metadata': {}, - 'glance_metadata': {}}} - request_spec_obj = objects.RequestSpec.from_primitives(request_spec) - - self.manager.create_volume(self.context, volume, - request_spec=request_spec_obj, - filter_properties={}) - _mock_volume_update.assert_called_once_with(self.context, - volume.id, - {'status': 'error'}) - _mock_sched_create.assert_called_once_with(self.context, - request_spec_obj, {}) - - _mock_message_create.assert_called_once_with( - self.context, message_field.Action.SCHEDULE_ALLOCATE_VOLUME, - resource_uuid=volume.id, - exception=mock.ANY) - - @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') - @mock.patch('eventlet.sleep') - def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create): - volume = fake_volume.fake_volume_obj(self.context) - - request_spec = {'volume_id': volume.id} - request_spec_obj = objects.RequestSpec.from_primitives(request_spec) - - self.manager.create_volume(self.context, volume, - request_spec=request_spec_obj, - filter_properties={}) - _mock_sched_create.assert_called_once_with(self.context, - request_spec_obj, {}) - self.assertFalse(_mock_sleep.called) - - @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') - @mock.patch('eventlet.sleep') - def test_create_volume_set_worker(self, _mock_sleep, _mock_sched_create): - """Make sure that the worker is created when creating a volume.""" - volume = tests_utils.create_volume(self.context, status='creating') - - request_spec = {'volume_id': volume.id} - - self.manager.create_volume(self.context, volume, - request_spec=request_spec, - filter_properties={}) - volume.set_worker.assert_called_once_with() - - @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') - @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') - @mock.patch('eventlet.sleep') - def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep, - _mock_is_ready, - _mock_sched_create): - self.manager._startup_delay = True - volume = fake_volume.fake_volume_obj(self.context) - - request_spec = {'volume_id': volume.id} - request_spec_obj = objects.RequestSpec.from_primitives(request_spec) - - _mock_is_ready.side_effect = [False, False, True] - - self.manager.create_volume(self.context, volume, - request_spec=request_spec_obj, - filter_properties={}) - _mock_sched_create.assert_called_once_with(self.context, - request_spec_obj, {}) - calls = [mock.call(1)] * 2 - _mock_sleep.assert_has_calls(calls) - self.assertEqual(2, _mock_sleep.call_count) - - @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') - @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') - @mock.patch('eventlet.sleep') - def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep, - _mock_is_ready, - _mock_sched_create): - self.manager._startup_delay = True - volume = fake_volume.fake_volume_obj(self.context) - - request_spec = {'volume_id': volume.id} - request_spec_obj = objects.RequestSpec.from_primitives(request_spec) - - _mock_is_ready.return_value = True - - self.manager.create_volume(self.context, volume, - request_spec=request_spec_obj, - filter_properties={}) - _mock_sched_create.assert_called_once_with(self.context, - request_spec_obj, {}) - self.assertFalse(_mock_sleep.called) - - @mock.patch('cinder.db.volume_get') - @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') - @mock.patch('cinder.db.volume_update') - def test_migrate_volume_exception_returns_volume_state( - self, _mock_volume_update, _mock_backend_passes, - _mock_volume_get): - # Test NoValidBackend exception behavior for migrate_volume_to_host. - # Puts the volume in 'error_migrating' state and eats the exception. - fake_updates = {'migration_status': 'error'} - self._test_migrate_volume_exception_returns_volume_state( - _mock_volume_update, _mock_backend_passes, _mock_volume_get, - 'available', fake_updates) - - @mock.patch('cinder.db.volume_get') - @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') - @mock.patch('cinder.db.volume_update') - def test_migrate_volume_exception_returns_volume_state_maintenance( - self, _mock_volume_update, _mock_backend_passes, - _mock_volume_get): - fake_updates = {'status': 'available', - 'migration_status': 'error'} - self._test_migrate_volume_exception_returns_volume_state( - _mock_volume_update, _mock_backend_passes, _mock_volume_get, - 'maintenance', fake_updates) - - def _test_migrate_volume_exception_returns_volume_state( - self, _mock_volume_update, _mock_backend_passes, - _mock_volume_get, status, fake_updates): - volume = tests_utils.create_volume(self.context, - status=status, - previous_status='available') - fake_volume_id = volume.id - request_spec = {'volume_id': fake_volume_id} - _mock_backend_passes.side_effect = exception.NoValidBackend(reason="") - _mock_volume_get.return_value = volume - - self.manager.migrate_volume_to_host(self.context, volume, 'host', True, - request_spec=request_spec, - filter_properties={}) - _mock_volume_update.assert_called_once_with(self.context, - fake_volume_id, - fake_updates) - _mock_backend_passes.assert_called_once_with(self.context, 'host', - request_spec, {}) - - @mock.patch('cinder.db.volume_update') - @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') - @mock.patch('cinder.quota.QUOTAS.rollback') - def test_retype_volume_exception_returns_volume_state( - self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update): - # Test NoValidBackend exception behavior for retype. - # Puts the volume in original state and eats the exception. - volume = tests_utils.create_volume(self.context, - status='retyping', - previous_status='in-use') - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume_attach = tests_utils.attach_volume(self.context, volume.id, - instance_uuid, None, - '/dev/fake') - _mock_vol_attachment_get.return_value = [volume_attach] - reservations = mock.sentinel.reservations - request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, - 'migration_policy': 'on-demand', - 'quota_reservations': reservations} - _mock_vol_update.return_value = {'status': 'in-use'} - _mock_find_retype_backend = mock.Mock( - side_effect=exception.NoValidBackend(reason="")) - orig_retype = self.manager.driver.find_retype_backend - self.manager.driver.find_retype_backend = _mock_find_retype_backend - - self.manager.retype(self.context, volume, request_spec=request_spec, - filter_properties={}) - - _mock_find_retype_backend.assert_called_once_with(self.context, - request_spec, {}, - 'on-demand') - quota_rollback.assert_called_once_with(self.context, reservations) - _mock_vol_update.assert_called_once_with(self.context, volume.id, - {'status': 'in-use'}) - self.manager.driver.find_retype_host = orig_retype - - def test_do_cleanup(self): - vol = tests_utils.create_volume(self.context, status='creating') - self.manager._do_cleanup(self.context, vol) - - vol.refresh() - self.assertEqual('error', vol.status) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI' - '.determine_rpc_version_cap', mock.Mock(return_value='2.0')) - def test_upgrading_cloud(self): - self.assertTrue(self.manager.upgrading_cloud) - - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI' - '.determine_rpc_version_cap') - def test_upgrading_cloud_not(self, cap_mock): - cap_mock.return_value = self.manager.RPC_API_VERSION - self.assertFalse(self.manager.upgrading_cloud) - - def test_cleanup_destination_scheduler(self): - service = objects.Service(id=1, host='hostname', - binary='cinder-scheduler') - result = self.manager._cleanup_destination(None, service) - expected = self.manager.sch_api.do_cleanup, None, service.host - self.assertEqual(expected, result) - - def test_cleanup_destination_volume(self): - service = objects.Service(id=1, host='hostname', cluster_name=None, - binary='cinder-volume') - result = self.manager._cleanup_destination(None, service) - expected = self.manager.volume_api.do_cleanup, service, service.host - self.assertEqual(expected, result) - - def test_cleanup_destination_volume_cluster_cache_hit(self): - cluster = objects.Cluster(id=1, name='mycluster', - binary='cinder-volume') - service = objects.Service(id=2, host='hostname', - cluster_name=cluster.name, - binary='cinder-volume') - cluster_cache = {'cinder-volume': {'mycluster': cluster}} - result = self.manager._cleanup_destination(cluster_cache, service) - expected = self.manager.volume_api.do_cleanup, cluster, cluster.name - self.assertEqual(expected, result) - - @mock.patch('cinder.objects.Cluster.get_by_id') - def test_cleanup_destination_volume_cluster_cache_miss(self, get_mock): - cluster = objects.Cluster(id=1, name='mycluster', - binary='cinder-volume') - service = objects.Service(self.context, - id=2, host='hostname', - cluster_name=cluster.name, - binary='cinder-volume') - get_mock.return_value = cluster - cluster_cache = collections.defaultdict(dict) - result = self.manager._cleanup_destination(cluster_cache, service) - expected = self.manager.volume_api.do_cleanup, cluster, cluster.name - self.assertEqual(expected, result) - - @mock.patch('cinder.scheduler.manager.SchedulerManager.upgrading_cloud') - def test_work_cleanup_upgrading(self, upgrading_mock): - cleanup_request = objects.CleanupRequest(host='myhost') - upgrading_mock.return_value = True - self.assertRaises(exception.UnavailableDuringUpgrade, - self.manager.work_cleanup, - self.context, - cleanup_request) - - @mock.patch('cinder.objects.Cluster.is_up', True) - @mock.patch('cinder.objects.Service.is_up', False) - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.do_cleanup') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.do_cleanup') - @mock.patch('cinder.objects.ServiceList.get_all') - def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock): - args = dict(service_id=1, cluster_name='cluster_name', host='host', - binary='cinder-volume', is_up=False, disabled=True, - resource_id=fake.VOLUME_ID, resource_type='Volume') - - cluster = objects.Cluster(id=1, name=args['cluster_name'], - binary='cinder-volume') - services = [objects.Service(self.context, - id=2, host='hostname', - cluster_name=cluster.name, - binary='cinder-volume', - cluster=cluster), - objects.Service(self.context, - id=3, host='hostname', - cluster_name=None, - binary='cinder-scheduler'), - objects.Service(self.context, - id=4, host='hostname', - cluster_name=None, - binary='cinder-volume')] - get_mock.return_value = services - - cleanup_request = objects.CleanupRequest(self.context, **args) - res = self.manager.work_cleanup(self.context, cleanup_request) - self.assertEqual((services[:2], services[2:]), res) - self.assertEqual(1, vol_clean_mock.call_count) - self.assertEqual(1, sch_clean_mock.call_count) - - -class SchedulerTestCase(test.TestCase): - """Test case for base scheduler driver class.""" - - # So we can subclass this test and re-use tests if we need. - driver_cls = driver.Scheduler - - def setUp(self): - super(SchedulerTestCase, self).setUp() - self.driver = self.driver_cls() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - self.topic = 'fake_topic' - - @mock.patch('cinder.scheduler.driver.Scheduler.' - 'update_service_capabilities') - def test_update_service_capabilities(self, _mock_update_cap): - service_name = 'fake_service' - host = 'fake_host' - capabilities = {'fake_capability': 'fake_value'} - self.driver.update_service_capabilities(service_name, host, - capabilities, None) - _mock_update_cap.assert_called_once_with(service_name, host, - capabilities, None) - - @mock.patch('cinder.scheduler.host_manager.HostManager.' - 'has_all_capabilities', return_value=False) - def test_is_ready(self, _mock_has_caps): - ready = self.driver.is_ready() - _mock_has_caps.assert_called_once_with() - self.assertFalse(ready) - - -class SchedulerDriverBaseTestCase(SchedulerTestCase): - """Test schedule driver class. - - Test cases for base scheduler driver class methods - that will fail if the driver is changed. - """ - - def test_unimplemented_schedule(self): - fake_args = (1, 2, 3) - fake_kwargs = {'cat': 'meow'} - - self.assertRaises(NotImplementedError, self.driver.schedule, - self.context, self.topic, 'schedule_something', - *fake_args, **fake_kwargs) - - -class SchedulerDriverModuleTestCase(test.TestCase): - """Test case for scheduler driver module methods.""" - - def setUp(self): - super(SchedulerDriverModuleTestCase, self).setUp() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - - @mock.patch('cinder.db.volume_update') - @mock.patch('cinder.objects.volume.Volume.get_by_id') - def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update): - volume = fake_volume.fake_volume_obj(self.context) - _mock_volume_get.return_value = volume - - driver.volume_update_db(self.context, volume.id, 'fake_host', - 'fake_cluster') - scheduled_at = volume.scheduled_at.replace(tzinfo=None) - _mock_vol_update.assert_called_once_with( - self.context, volume.id, {'host': 'fake_host', - 'cluster_name': 'fake_cluster', - 'scheduled_at': scheduled_at}) diff --git a/cinder/tests/unit/scheduler/test_scheduler_options.py b/cinder/tests/unit/scheduler/test_scheduler_options.py deleted file mode 100644 index ec758840e..000000000 --- a/cinder/tests/unit/scheduler/test_scheduler_options.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For PickledScheduler. -""" - -import datetime - -from oslo_serialization import jsonutils -import six - -from cinder.scheduler import scheduler_options -from cinder import test - - -class FakeSchedulerOptions(scheduler_options.SchedulerOptions): - def __init__(self, last_checked, now, file_old, file_now, data, filedata): - super(FakeSchedulerOptions, self).__init__() - # Change internals ... - self.last_modified = file_old - self.last_checked = last_checked - self.data = data - - # For overrides ... - self._time_now = now - self._file_now = file_now - self._file_data = filedata - - self.file_was_loaded = False - - def _get_file_timestamp(self, filename): - return self._file_now - - def _get_file_handle(self, filename): - self.file_was_loaded = True - return six.StringIO(self._file_data) - - def _get_time_now(self): - return self._time_now - - -class SchedulerOptionsTestCase(test.TestCase): - def test_get_configuration_first_time_no_flag(self): - last_checked = None - now = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_old = None - file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) - - data = dict(a=1, b=2, c=3) - jdata = jsonutils.dumps(data) - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - {}, jdata) - self.assertEqual({}, fake.get_configuration()) - self.assertFalse(fake.file_was_loaded) - - def test_get_configuration_first_time_empty_file(self): - last_checked = None - now = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_old = None - file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) - - jdata = "" - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - {}, jdata) - self.assertEqual({}, fake.get_configuration('foo.json')) - self.assertTrue(fake.file_was_loaded) - - def test_get_configuration_first_time_happy_day(self): - last_checked = None - now = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_old = None - file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) - - data = dict(a=1, b=2, c=3) - jdata = jsonutils.dumps(data) - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - {}, jdata) - self.assertEqual(data, fake.get_configuration('foo.json')) - self.assertTrue(fake.file_was_loaded) - - def test_get_configuration_second_time_no_change(self): - last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) - now = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) - - data = dict(a=1, b=2, c=3) - jdata = jsonutils.dumps(data) - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - data, jdata) - self.assertEqual(data, fake.get_configuration('foo.json')) - self.assertFalse(fake.file_was_loaded) - - def test_get_configuration_second_time_too_fast(self): - last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) - now = datetime.datetime(2011, 1, 1, 1, 1, 2) - file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) - - old_data = dict(a=1, b=2, c=3) - data = dict(a=11, b=12, c=13) - jdata = jsonutils.dumps(data) - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - old_data, jdata) - self.assertEqual(old_data, fake.get_configuration('foo.json')) - self.assertFalse(fake.file_was_loaded) - - def test_get_configuration_second_time_change(self): - last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) - now = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) - file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) - - old_data = dict(a=1, b=2, c=3) - data = dict(a=11, b=12, c=13) - jdata = jsonutils.dumps(data) - - fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, - old_data, jdata) - self.assertEqual(data, fake.get_configuration('foo.json')) - self.assertTrue(fake.file_was_loaded) diff --git a/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py b/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py deleted file mode 100644 index 9cd7e786a..000000000 --- a/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for stochastic weight handler -""" - -import ddt -import random - -from cinder.scheduler import base_weight -from cinder.scheduler.weights.stochastic import StochasticHostWeightHandler -from cinder import test - - -@ddt.ddt -class StochasticWeightHandlerTestCase(test.TestCase): - """Test case for StochasticHostWeightHandler.""" - - @ddt.data( - (0.0, 'A'), - (0.1, 'A'), - (0.2, 'B'), - (0.3, 'B'), - (0.4, 'B'), - (0.5, 'B'), - (0.6, 'B'), - (0.7, 'C'), - (0.8, 'C'), - (0.9, 'C'), - ) - @ddt.unpack - def test_get_weighed_objects_correct(self, rand_value, expected_obj): - self.mock_object(random, - 'random', - return_value=rand_value) - - class MapWeigher(base_weight.BaseWeigher): - minval = 0 - maxval = 100 - - def _weigh_object(self, obj, weight_map): - return weight_map[obj] - - weight_map = {'A': 1, 'B': 3, 'C': 2} - objs = sorted(weight_map.keys()) - - weigher_classes = [MapWeigher] - handler = StochasticHostWeightHandler('fake_namespace') - weighted_objs = handler.get_weighed_objects(weigher_classes, - objs, - weight_map) - winner = weighted_objs[0].obj - self.assertEqual(expected_obj, winner) diff --git a/cinder/tests/unit/scheduler/test_volume_number_weigher.py b/cinder/tests/unit/scheduler/test_volume_number_weigher.py deleted file mode 100644 index 6aceaaab8..000000000 --- a/cinder/tests/unit/scheduler/test_volume_number_weigher.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Volume Number Weigher. -""" - -import mock - -from cinder.common import constants -from cinder import context -from cinder.db.sqlalchemy import api -from cinder.scheduler import weights -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit.scheduler import fakes -from cinder.volume import utils - - -def fake_volume_data_get_for_host(context, host, count_only=False): - host = utils.extract_host(host) - if host == 'host1': - return 1 - elif host == 'host2': - return 2 - elif host == 'host3': - return 3 - elif host == 'host4': - return 4 - elif host == 'host5': - return 5 - else: - return 6 - - -class VolumeNumberWeigherTestCase(test.TestCase): - - def setUp(self): - super(VolumeNumberWeigherTestCase, self).setUp() - uid = fake_constants.USER_ID - pid = fake_constants.PROJECT_ID - self.context = context.RequestContext(user_id=uid, - project_id=pid, - is_admin=False, - read_deleted="no", - overwrite=False) - self.host_manager = fakes.FakeHostManager() - self.weight_handler = weights.OrderedHostWeightHandler( - 'cinder.scheduler.weights') - - def _get_weighed_host(self, hosts, weight_properties=None): - if weight_properties is None: - weight_properties = {'context': self.context} - return self.weight_handler.get_weighed_objects( - [weights.volume_number.VolumeNumberWeigher], - hosts, - weight_properties)[0] - - @mock.patch('cinder.db.sqlalchemy.api.service_get_all') - def _get_all_backends(self, _mock_service_get_all, disabled=False): - ctxt = context.get_admin_context() - fakes.mock_host_manager_db_calls(_mock_service_get_all, - disabled=disabled) - backend_states = self.host_manager.get_all_backend_states(ctxt) - _mock_service_get_all.assert_called_once_with( - ctxt, - None, # backend_match_level - topic=constants.VOLUME_TOPIC, - frozen=False, - disabled=disabled) - return backend_states - - def test_volume_number_weight_multiplier1(self): - self.flags(volume_number_multiplier=-1.0) - backend_info_list = self._get_all_backends() - - # host1: 1 volume Norm=0.0 - # host2: 2 volumes - # host3: 3 volumes - # host4: 4 volumes - # host5: 5 volumes Norm=-1.0 - # so, host1 should win: - with mock.patch.object(api, 'volume_data_get_for_host', - fake_volume_data_get_for_host): - weighed_host = self._get_weighed_host(backend_info_list) - self.assertEqual(0.0, weighed_host.weight) - self.assertEqual('host1', - utils.extract_host(weighed_host.obj.host)) - - def test_volume_number_weight_multiplier2(self): - self.flags(volume_number_multiplier=1.0) - backend_info_list = self._get_all_backends() - - # host1: 1 volume Norm=0 - # host2: 2 volumes - # host3: 3 volumes - # host4: 4 volumes - # host5: 5 volumes Norm=1 - # so, host5 should win: - with mock.patch.object(api, 'volume_data_get_for_host', - fake_volume_data_get_for_host): - weighed_host = self._get_weighed_host(backend_info_list) - self.assertEqual(1.0, weighed_host.weight) - self.assertEqual('host5', - utils.extract_host(weighed_host.obj.host)) diff --git a/cinder/tests/unit/scheduler/test_weights.py b/cinder/tests/unit/scheduler/test_weights.py deleted file mode 100644 index 687bd1d03..000000000 --- a/cinder/tests/unit/scheduler/test_weights.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2011-2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests For Scheduler weights. -""" - -from cinder.scheduler import base_weight -from cinder import test - - -class TestWeightHandler(test.TestCase): - def test_no_multiplier(self): - class FakeWeigher(base_weight.BaseWeigher): - def _weigh_object(self, *args, **kwargs): - pass - - self.assertEqual(1.0, - FakeWeigher().weight_multiplier()) - - def test_no_weight_object(self): - class FakeWeigher(base_weight.BaseWeigher): - def weight_multiplier(self, *args, **kwargs): - pass - self.assertRaises(TypeError, - FakeWeigher) - - def test_normalization(self): - # weight_list, expected_result, minval, maxval - map_ = ( - ((), (), None, None), - ((0.0, 0.0), (0.0, 0.0), None, None), - ((1.0, 1.0), (0.0, 0.0), None, None), - - ((20.0, 50.0), (0.0, 1.0), None, None), - ((20.0, 50.0), (0.0, 0.375), None, 100.0), - ((20.0, 50.0), (0.4, 1.0), 0.0, None), - ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), - ) - for seq, result, minval, maxval in map_: - ret = base_weight.normalize(seq, minval=minval, maxval=maxval) - self.assertEqual(result, tuple(ret)) diff --git a/cinder/tests/unit/targets/__init__.py b/cinder/tests/unit/targets/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/targets/targets_fixture.py b/cinder/tests/unit/targets/targets_fixture.py deleted file mode 100644 index 1d1bf2066..000000000 --- a/cinder/tests/unit/targets/targets_fixture.py +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os -import shutil -import tempfile - -import mock -from oslo_utils import fileutils -from oslo_utils import timeutils - -from cinder import test -from cinder.volume import configuration as conf - - -class TargetDriverFixture(test.TestCase): - def setUp(self): - super(TargetDriverFixture, self).setUp() - self.configuration = conf.Configuration(None) - self.configuration.append_config_values = mock.Mock(return_value=0) - self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get) - self.configuration.iscsi_ip_address = '10.9.8.7' - self.configuration.iscsi_port = 3260 - - self.fake_volumes_dir = tempfile.mkdtemp() - fileutils.ensure_tree(self.fake_volumes_dir) - - self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' - self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' - self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' - - self.addCleanup(self._cleanup) - - self.testvol =\ - {'project_id': self.fake_project_id, - 'name': 'testvol', - 'size': 1, - 'id': self.fake_volume_id, - 'volume_type_id': None, - 'provider_location': '10.10.7.1:3260 ' - 'iqn.2010-10.org.openstack:' - 'volume-%s 0' % self.fake_volume_id, - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '512 512', - 'created_at': timeutils.utcnow(), - 'host': 'fake_host@lvm#lvm'} - - self.testvol_no_prov_loc = copy.copy(self.testvol) - self.testvol_no_prov_loc['provider_location'] = None - - self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' - self.target_string = ('127.0.0.1:3260,1 ' + - self.iscsi_target_prefix + - 'volume-%s' % self.testvol['id']) - - self.testvol_2 =\ - {'project_id': self.fake_project_id_2, - 'name': 'testvol2', - 'size': 1, - 'id': self.fake_volume_id, - 'volume_type_id': None, - 'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' % - {'ip': self.configuration.iscsi_ip_address, - 'port': self.configuration.iscsi_port, - 'iqn': self.iscsi_target_prefix, - 'vol': self.fake_volume_id}), - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '512 512', - 'created_at': timeutils.utcnow(), - 'host': 'fake_host@lvm#lvm'} - - self.expected_iscsi_properties = \ - {'auth_method': 'CHAP', - 'auth_password': '2FE0CQ8J196R', - 'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b', - 'encrypted': False, - 'logical_block_size': '512', - 'physical_block_size': '512', - 'target_discovered': False, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % - self.fake_volume_id, - 'target_lun': 0, - 'target_portal': '10.10.7.1:3260', - 'volume_id': self.fake_volume_id} - - self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45' - self.VOLUME_NAME = 'volume-' + self.VOLUME_ID - self.test_vol = (self.iscsi_target_prefix + - self.VOLUME_NAME) - - def _cleanup(self): - if os.path.exists(self.fake_volumes_dir): - shutil.rmtree(self.fake_volumes_dir) - - def fake_safe_get(self, value): - if value == 'volumes_dir': - return self.fake_volumes_dir - elif value == 'iscsi_protocol': - return self.configuration.iscsi_protocol - elif value == 'iscsi_target_prefix': - return self.iscsi_target_prefix diff --git a/cinder/tests/unit/targets/test_base_iscsi_driver.py b/cinder/tests/unit/targets/test_base_iscsi_driver.py deleted file mode 100644 index 8c935a2e4..000000000 --- a/cinder/tests/unit/targets/test_base_iscsi_driver.py +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from cinder import context -from cinder import exception -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.targets import fake -from cinder.volume.targets import iscsi - - -class FakeIncompleteDriver(iscsi.ISCSITarget): - def null_method(): - pass - - -class TestBaseISCSITargetDriver(tf.TargetDriverFixture): - - def setUp(self): - super(TestBaseISCSITargetDriver, self).setUp() - self.target = fake.FakeTarget(root_helper=utils.get_root_helper(), - configuration=self.configuration) - self.target.db = mock.MagicMock( - volume_get=mock.MagicMock(return_value={'provider_auth': - 'CHAP otzL 234Z'})) - - def test_abc_methods_not_present_fails(self): - configuration = conf.Configuration(cfg.StrOpt('iscsi_target_prefix', - default='foo', - help='you wish')) - self.assertRaises(TypeError, - FakeIncompleteDriver, - configuration=configuration) - - def test_get_iscsi_properties(self): - self.assertEqual(self.expected_iscsi_properties, - self.target._get_iscsi_properties(self.testvol)) - - def test_get_iscsi_properties_multiple_targets(self): - testvol = self.testvol.copy() - expected_iscsi_properties = self.expected_iscsi_properties.copy() - iqn = expected_iscsi_properties['target_iqn'] - testvol.update( - {'provider_location': '10.10.7.1:3260;10.10.8.1:3260 ' - 'iqn.2010-10.org.openstack:' - 'volume-%s 0' % self.fake_volume_id}) - expected_iscsi_properties.update( - {'target_portals': ['10.10.7.1:3260', '10.10.8.1:3260'], - 'target_iqns': [iqn, iqn], - 'target_luns': [0, 0]}) - self.assertEqual(expected_iscsi_properties, - self.target._get_iscsi_properties(testvol)) - - def test_build_iscsi_auth_string(self): - auth_string = 'chap chap-user chap-password' - self.assertEqual(auth_string, - self.target._iscsi_authentication('chap', - 'chap-user', - 'chap-password')) - - def test_do_iscsi_discovery(self): - with mock.patch.object(self.configuration, - 'safe_get', return_value='127.0.0.1'),\ - mock.patch('cinder.utils.execute', - return_value=(self.target_string, '')): - self.assertEqual(self.target_string, - self.target._do_iscsi_discovery(self.testvol)) - - def test_remove_export(self): - - with mock.patch.object(self.target, '_get_target_and_lun') as \ - mock_get_target,\ - mock.patch.object(self.target, 'show_target'),\ - mock.patch.object(self.target, 'remove_iscsi_target') as \ - mock_remove_target: - - mock_get_target.return_value = (0, 1) - iscsi_target, lun = mock_get_target.return_value - ctxt = context.get_admin_context() - self.target.remove_export(ctxt, self.testvol) - mock_remove_target.assert_called_once_with( - iscsi_target, - lun, - 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', - 'testvol') - - def test_remove_export_notfound(self): - - with mock.patch.object(self.target, '_get_target_and_lun') as \ - mock_get_target,\ - mock.patch.object(self.target, 'show_target'),\ - mock.patch.object(self.target, 'remove_iscsi_target'): - - mock_get_target.side_effect = exception.NotFound - ctxt = context.get_admin_context() - self.assertIsNone(self.target.remove_export(ctxt, - self.testvol)) - - def test_remove_export_show_error(self): - - with mock.patch.object(self.target, '_get_target_and_lun') as \ - mock_get_target,\ - mock.patch.object(self.target, 'show_target') as mshow,\ - mock.patch.object(self.target, 'remove_iscsi_target'): - - mock_get_target.return_value = (0, 1) - iscsi_target, lun = mock_get_target.return_value - mshow.side_effect = Exception - ctxt = context.get_admin_context() - self.assertIsNone(self.target.remove_export(ctxt, - self.testvol)) - - def test_initialize_connection(self): - expected = {'driver_volume_type': 'iscsi', - 'data': self.expected_iscsi_properties} - self.assertEqual(expected, - self.target.initialize_connection(self.testvol, {})) - - def test_validate_connector(self): - bad_connector = {'no_initiator': 'nada'} - self.assertRaises(exception.InvalidConnectorException, - self.target.validate_connector, - bad_connector) - - connector = {'initiator': 'fake_init'} - self.assertTrue(self.target.validate_connector, - connector) - - def test_show_target_error(self): - self.assertRaises(exception.InvalidParameterValue, - self.target.show_target, - 0, None) - - with mock.patch.object(self.target, '_get_target') as mock_get_target: - mock_get_target.side_effect = exception.NotFound() - self.assertRaises(exception.NotFound, - self.target.show_target, 0, - self.expected_iscsi_properties['target_iqn']) - - def test_iscsi_location(self): - location = self.target._iscsi_location('portal', 1, 'target', 2) - self.assertEqual('portal:3260,1 target 2', location) - - location = self.target._iscsi_location('portal', 1, 'target', 2, - ['portal2']) - self.assertEqual('portal:3260;portal2:3260,1 target 2', location) - - def test_get_target_chap_auth(self): - ctxt = context.get_admin_context() - self.assertEqual(('otzL', '234Z'), - self.target._get_target_chap_auth(ctxt, - self.testvol)) - self.target.db.volume_get.assert_called_once_with( - ctxt, self.testvol['id']) diff --git a/cinder/tests/unit/targets/test_cxt_driver.py b/cinder/tests/unit/targets/test_cxt_driver.py deleted file mode 100644 index 0a164a624..000000000 --- a/cinder/tests/unit/targets/test_cxt_driver.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2015 Chelsio Communications Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -import mock - -from cinder import context -from cinder import test -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import cxt - - -class TestCxtAdmDriver(tf.TargetDriverFixture): - def setUp(self): - super(TestCxtAdmDriver, self).setUp() - self.cxt_subdir = cxt.CxtAdm.cxt_subdir - self.target = cxt.CxtAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - self.VG = 'stack-volumes-lvmdriver-1' - self.fake_iscsi_scan = \ - ('\n' - 'TARGET: iqn.2010-10.org.openstack:%(vol)s, id=1, login_ip=0\n' - ' PortalGroup=1@10.9.8.7:3260,timeout=0\n' - ' TargetDevice=/dev/%(vg)s/%(vol)s' - ',BLK,PROD=CHISCSI ' - 'Target,SN=0N0743000000000,ID=0D074300000000000000000,' - 'WWN=:W00743000000000\n' - % {'vol': self.VOLUME_NAME, 'vg': self.VG}) - - def test_get_target(self): - with mock.patch.object(self.target, '_get_volumes_dir', - return_value=self.fake_volumes_dir),\ - mock.patch('cinder.utils.execute', - return_value=(self.fake_iscsi_scan, None)) as m_exec: - self.assertEqual( - '1', - self.target._get_target( - 'iqn.2010-10.org.openstack:volume-%s' % self.VOLUME_ID - ) - ) - self.assertTrue(m_exec.called) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute') - def test_create_iscsi_target(self, mock_execute, mock_get_targ): - mock_execute.return_value = ('', '') - with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: - mock_get.return_value = self.fake_volumes_dir - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir, - portals_ips=[self.configuration.iscsi_ip_address])) - self.assertTrue(mock_get.called) - self.assertTrue(mock_execute.called) - self.assertTrue(mock_get_targ.called) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) - def test_create_iscsi_target_port_ips(self, mock_execute, mock_get_targ): - ips = ['10.0.0.15', '127.0.0.1'] - port = 3261 - mock_execute.return_value = ('', '') - with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: - mock_get.return_value = self.fake_volumes_dir - test_vol = 'iqn.2010-10.org.openstack:'\ - 'volume-83c2e877-feed-46be-8435-77884fe55b45' - self.assertEqual( - 1, - self.target.create_iscsi_target( - test_vol, - 1, - 0, - self.fake_volumes_dir, - portals_port=port, - portals_ips=ips)) - - self.assertTrue(mock_get.called) - self.assertTrue(mock_execute.called) - self.assertTrue(mock_get_targ.called) - - file_path = os.path.join(self.fake_volumes_dir, - test_vol.split(':')[1]) - - expected_cfg = { - 'name': test_vol, - 'device': self.fake_volumes_dir, - 'ips': ','.join(map(lambda ip: '%s:%s' % (ip, port), ips)), - 'spaces': ' ' * 14, - 'spaces2': ' ' * 23} - - expected_file = ('\n%(spaces)starget:' - '\n%(spaces2)sTargetName=%(name)s' - '\n%(spaces2)sTargetDevice=%(device)s' - '\n%(spaces2)sPortalGroup=1@%(ips)s' - '\n%(spaces)s ') % expected_cfg - - with open(file_path, 'r') as cfg_file: - result = cfg_file.read() - self.assertEqual(expected_file, result) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) - def test_create_iscsi_target_already_exists(self, mock_execute, - mock_get_targ): - with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: - mock_get.return_value = self.fake_volumes_dir - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir, - portals_ips=[self.configuration.iscsi_ip_address])) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_targ.called) - self.assertTrue(mock_execute.called) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute') - @mock.patch.object(cxt.CxtAdm, '_get_target_chap_auth') - def test_create_export(self, mock_chap, mock_execute, - mock_get_targ): - mock_execute.return_value = ('', '') - mock_chap.return_value = ('QZJbisGmn9AL954FNF4D', - 'P68eE7u9eFqDGexd28DQ') - with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: - mock_get.return_value = self.fake_volumes_dir - - expected_result = {'location': '10.9.8.7:3260,1 ' - 'iqn.2010-10.org.openstack:testvol 0', - 'auth': 'CHAP ' - 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} - - ctxt = context.get_admin_context() - self.assertEqual(expected_result, - self.target.create_export(ctxt, - self.testvol, - self.fake_volumes_dir)) - self.assertTrue(mock_get.called) - self.assertTrue(mock_execute.called) - - @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target_chap_auth') - def test_ensure_export(self, mock_get_chap): - fake_creds = ('asdf', 'qwert') - mock_get_chap.return_value = fake_creds - ctxt = context.get_admin_context() - with mock.patch.object(self.target, 'create_iscsi_target'): - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - self.target.create_iscsi_target.assert_called_once_with( - 'iqn.2010-10.org.openstack:testvol', - 1, 0, self.fake_volumes_dir, fake_creds, - check_exit_code=False, - old_name=None, - portals_ips=[self.configuration.iscsi_ip_address], - portals_port=self.configuration.iscsi_port) diff --git a/cinder/tests/unit/targets/test_iet_driver.py b/cinder/tests/unit/targets/test_iet_driver.py deleted file mode 100644 index 8dfc48242..000000000 --- a/cinder/tests/unit/targets/test_iet_driver.py +++ /dev/null @@ -1,202 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib - -import mock -from oslo_concurrency import processutils as putils -import six - -from cinder import context -from cinder import exception - -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import iet - - -class TestIetAdmDriver(tf.TargetDriverFixture): - - def setUp(self): - super(TestIetAdmDriver, self).setUp() - self.target = iet.IetAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - - def test_get_target(self): - tmp_file = six.StringIO() - tmp_file.write( - 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa - ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa - ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') - tmp_file.seek(0) - with mock.patch('six.moves.builtins.open') as mock_open: - mock_open.return_value = contextlib.closing(tmp_file) - self.assertEqual('1', - self.target._get_target( - 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa - )) - - # Test the failure case: Failed to handle the config file - mock_open.side_effect = MemoryError() - self.assertRaises(MemoryError, - self.target._get_target, - '') - - @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', - return_value=0) - @mock.patch('cinder.utils.execute') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.utils.temporary_chown') - @mock.patch.object(iet, 'LOG') - def test_create_iscsi_target(self, mock_log, mock_chown, mock_exists, - mock_execute, mock_get_targ): - mock_execute.return_value = ('', '') - tmp_file = six.StringIO() - with mock.patch('six.moves.builtins.open') as mock_open: - mock_open.return_value = contextlib.closing(tmp_file) - self.assertEqual( - 0, - self.target.create_iscsi_target( - self.test_vol, - 0, - 0, - self.fake_volumes_dir)) - self.assertTrue(mock_execute.called) - self.assertTrue(mock_open.called) - self.assertTrue(mock_get_targ.called) - - # Test the failure case: Failed to chown the config file - mock_open.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetCreateFailed, - self.target.create_iscsi_target, - self.test_vol, - 0, - 0, - self.fake_volumes_dir) - - # Test the failure case: Failed to set new auth - mock_execute.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetCreateFailed, - self.target.create_iscsi_target, - self.test_vol, - 0, - 0, - self.fake_volumes_dir) - - @mock.patch('cinder.utils.execute') - @mock.patch('os.path.exists', return_value=True) - def test_update_config_file_failure(self, mock_exists, mock_execute): - # Test the failure case: conf file does not exist - mock_exists.return_value = False - mock_execute.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetCreateFailed, - self.target.update_config_file, - self.test_vol, - 0, - self.fake_volumes_dir, - "foo bar") - - @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute') - def test_create_iscsi_target_already_exists(self, mock_execute, - mock_get_targ): - mock_execute.return_value = ('fake out', 'fake err') - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir)) - self.assertTrue(mock_get_targ.called) - self.assertTrue(mock_execute.called) - - @mock.patch('cinder.volume.targets.iet.IetAdm._find_sid_cid_for_target', - return_value=None) - @mock.patch('os.path.exists', return_value=False) - @mock.patch('cinder.utils.execute') - def test_remove_iscsi_target(self, mock_execute, mock_exists, mock_find): - - # Test the normal case - self.target.remove_iscsi_target(1, - 0, - self.testvol['id'], - self.testvol['name']) - mock_execute.assert_any_call('ietadm', - '--op', - 'delete', - '--tid=1', - run_as_root=True) - - # Test the failure case: putils.ProcessExecutionError - mock_execute.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetRemoveFailed, - self.target.remove_iscsi_target, - 1, - 0, - self.testvol['id'], - self.testvol['name']) - - def test_find_sid_cid_for_target(self): - tmp_file = six.StringIO() - tmp_file.write( - 'tid:1 name:iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa - ' sid:844427031282176 initiator:iqn.1994-05.com.redhat:5a6894679665\n' # noqa - ' cid:0 ip:10.9.8.7 state:active hd:none dd:none') - tmp_file.seek(0) - with mock.patch('six.moves.builtins.open') as mock_open: - mock_open.return_value = contextlib.closing(tmp_file) - self.assertEqual(('844427031282176', '0'), - self.target._find_sid_cid_for_target( - '1', - 'iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45', # noqa - 'volume-83c2e877-feed-46be-8435-77884fe55b45' # noqa - )) - - @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', - return_value=1) - @mock.patch('cinder.utils.execute') - @mock.patch.object(iet.IetAdm, '_get_target_chap_auth') - def test_create_export(self, mock_get_chap, mock_execute, - mock_get_targ): - mock_execute.return_value = ('', '') - mock_get_chap.return_value = ('QZJbisGmn9AL954FNF4D', - 'P68eE7u9eFqDGexd28DQ') - expected_result = {'location': '10.9.8.7:3260,1 ' - 'iqn.2010-10.org.openstack:testvol 0', - 'auth': 'CHAP ' - 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} - ctxt = context.get_admin_context() - self.assertEqual(expected_result, - self.target.create_export(ctxt, - self.testvol, - self.fake_volumes_dir)) - self.assertTrue(mock_execute.called) - - @mock.patch('cinder.volume.targets.iet.IetAdm._get_target_chap_auth', - return_value=None) - @mock.patch('cinder.volume.targets.iet.IetAdm._get_target', - return_value=1) - def test_ensure_export(self, mock_get_targetm, mock_get_chap): - ctxt = context.get_admin_context() - with mock.patch.object(self.target, 'create_iscsi_target'): - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - self.target.create_iscsi_target.assert_called_once_with( - 'iqn.2010-10.org.openstack:testvol', - 1, 0, self.fake_volumes_dir, None, - portals_ips=[self.configuration.iscsi_ip_address], - portals_port=int(self.configuration.iscsi_port), - check_exit_code=False, - old_name=None) diff --git a/cinder/tests/unit/targets/test_iser_driver.py b/cinder/tests/unit/targets/test_iser_driver.py deleted file mode 100644 index 82e36106c..000000000 --- a/cinder/tests/unit/targets/test_iser_driver.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import lio -from cinder.volume.targets import tgt - - -class TestIserTgtDriver(tf.TargetDriverFixture): - """Unit tests for the iSER TGT flow""" - - def setUp(self): - super(TestIserTgtDriver, self).setUp() - self.configuration.iscsi_protocol = 'iser' - self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - - def test_iscsi_protocol(self): - self.assertEqual('iser', self.target.iscsi_protocol) - - @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') - def test_initialize_connection(self, mock_get_iscsi): - - connector = {'initiator': 'fake_init'} - - mock_get_iscsi.return_value = {} - expected_return = {'driver_volume_type': 'iser', - 'data': {}} - self.assertEqual(expected_return, - self.target.initialize_connection(self.testvol, - connector)) - - -class TestIserLioAdmDriver(tf.TargetDriverFixture): - """Unit tests for the iSER LIO flow""" - def setUp(self): - super(TestIserLioAdmDriver, self).setUp() - self.configuration.iscsi_protocol = 'iser' - with mock.patch.object(lio.LioAdm, '_verify_rtstool'): - self.target = lio.LioAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - self.target.db = mock.MagicMock( - volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) - - def test_iscsi_protocol(self): - self.assertEqual('iser', self.target.iscsi_protocol) - - @mock.patch('cinder.utils.execute') - @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') - def test_initialize_connection(self, mock_get_iscsi, mock_execute): - - connector = {'initiator': 'fake_init'} - - mock_get_iscsi.return_value = {} - ret = self.target.initialize_connection(self.testvol, connector) - driver_volume_type = ret['driver_volume_type'] - self.assertEqual('iser', driver_volume_type) diff --git a/cinder/tests/unit/targets/test_lio_driver.py b/cinder/tests/unit/targets/test_lio_driver.py deleted file mode 100644 index a542aa908..000000000 --- a/cinder/tests/unit/targets/test_lio_driver.py +++ /dev/null @@ -1,354 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_concurrency import processutils as putils - -from cinder import context -from cinder import exception -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import lio - - -class TestLioAdmDriver(tf.TargetDriverFixture): - - def setUp(self): - super(TestLioAdmDriver, self).setUp() - - with mock.patch.object(lio.LioAdm, '_verify_rtstool'): - self.target = lio.LioAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - def test_get_target(self, mexecute, mpersist_cfg, mlock_exec): - mexecute.return_value = (self.test_vol, None) - self.assertEqual(self.test_vol, self.target._get_target(self.test_vol)) - self.assertFalse(mpersist_cfg.called) - expected_args = ('cinder-rtstool', 'get-targets') - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - mexecute.assert_called_once_with(*expected_args, run_as_root=True) - - def test_get_iscsi_target(self): - ctxt = context.get_admin_context() - expected = 0 - self.assertEqual(expected, - self.target._get_iscsi_target(ctxt, - self.testvol['id'])) - - def test_get_target_and_lun(self): - lun = 0 - iscsi_target = 0 - ctxt = context.get_admin_context() - expected = (iscsi_target, lun) - self.assertEqual(expected, - self.target._get_target_and_lun(ctxt, self.testvol)) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - @mock.patch.object(lio.LioAdm, '_get_target') - def test_create_iscsi_target(self, mget_target, mexecute, mpersist_cfg, - mlock_exec): - - mget_target.return_value = 1 - # create_iscsi_target sends volume_name instead of volume_id on error - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir)) - mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) - mexecute.assert_called_once_with( - 'cinder-rtstool', - 'create', - self.fake_volumes_dir, - self.test_vol, - '', - '', - self.target.iscsi_protocol == 'iser', - run_as_root=True) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch.object(utils, 'execute') - @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) - def test_create_iscsi_target_port_ip(self, mget_target, mexecute, - mpersist_cfg, mlock_exec): - ip = '10.0.0.15' - port = 3261 - - self.assertEqual( - 1, - self.target.create_iscsi_target( - name=self.test_vol, - tid=1, - lun=0, - path=self.fake_volumes_dir, - **{'portals_port': port, 'portals_ips': [ip]})) - - expected_args = ( - 'cinder-rtstool', - 'create', - self.fake_volumes_dir, - self.test_vol, - '', - '', - self.target.iscsi_protocol == 'iser', - '-p%s' % port, - '-a' + ip) - - mlock_exec.assert_any_call(*expected_args, run_as_root=True) - mexecute.assert_any_call(*expected_args, run_as_root=True) - mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch.object(utils, 'execute') - @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) - def test_create_iscsi_target_port_ips(self, mget_target, mexecute, - mpersist_cfg, mlock_exec): - test_vol = 'iqn.2010-10.org.openstack:' + self.VOLUME_NAME - ips = ['10.0.0.15', '127.0.0.1'] - port = 3261 - - self.assertEqual( - 1, - self.target.create_iscsi_target( - name=test_vol, - tid=1, - lun=0, - path=self.fake_volumes_dir, - **{'portals_port': port, 'portals_ips': ips})) - - expected_args = ( - 'cinder-rtstool', - 'create', - self.fake_volumes_dir, - test_vol, - '', - '', - self.target.iscsi_protocol == 'iser', - '-p%s' % port, - '-a' + ','.join(ips)) - - mlock_exec.assert_any_call(*expected_args, run_as_root=True) - mexecute.assert_any_call(*expected_args, run_as_root=True) - mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute', - side_effect=putils.ProcessExecutionError) - @mock.patch.object(lio.LioAdm, '_get_target') - def test_create_iscsi_target_already_exists(self, mget_target, mexecute, - mpersist_cfg, mlock_exec): - chap_auth = ('foo', 'bar') - self.assertRaises(exception.ISCSITargetCreateFailed, - self.target.create_iscsi_target, - self.test_vol, - 1, - 0, - self.fake_volumes_dir, - chap_auth) - self.assertFalse(mpersist_cfg.called) - expected_args = ('cinder-rtstool', 'create', self.fake_volumes_dir, - self.test_vol, chap_auth[0], chap_auth[1], False) - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - mexecute.assert_called_once_with(*expected_args, run_as_root=True) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - def test_remove_iscsi_target(self, mexecute, mpersist_cfg, mlock_exec): - # Test the normal case - self.target.remove_iscsi_target(0, - 0, - self.testvol['id'], - self.testvol['name']) - expected_args = ('cinder-rtstool', 'delete', - self.iscsi_target_prefix + self.testvol['name']) - - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - mexecute.assert_called_once_with(*expected_args, run_as_root=True) - mpersist_cfg.assert_called_once_with(self.fake_volume_id) - - # Test the failure case: putils.ProcessExecutionError - mlock_exec.reset_mock() - mpersist_cfg.reset_mock() - mexecute.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetRemoveFailed, - self.target.remove_iscsi_target, - 0, - 0, - self.testvol['id'], - self.testvol['name']) - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - - # Ensure there have been no calls to persist configuration - self.assertFalse(mpersist_cfg.called) - - @mock.patch.object(lio.LioAdm, '_get_targets') - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch('cinder.utils.execute') - def test_ensure_export(self, mock_exec, mock_execute, mock_get_targets): - - ctxt = context.get_admin_context() - mock_get_targets.return_value = None - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - - expected_args = ('cinder-rtstool', 'restore') - mock_exec.assert_called_once_with(*expected_args, run_as_root=True) - - @mock.patch.object(lio.LioAdm, '_get_targets') - @mock.patch.object(lio.LioAdm, '_restore_configuration') - def test_ensure_export_target_exist(self, mock_restore, mock_get_targets): - - ctxt = context.get_admin_context() - mock_get_targets.return_value = 'target' - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - self.assertFalse(mock_restore.called) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') - def test_initialize_connection(self, mock_get_iscsi, mock_execute, - mpersist_cfg, mlock_exec): - target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id - connector = {'initiator': 'fake_init'} - - # Test the normal case - mock_get_iscsi.return_value = 'foo bar' - expected_return = {'driver_volume_type': 'iscsi', - 'data': 'foo bar'} - self.assertEqual(expected_return, - self.target.initialize_connection(self.testvol, - connector)) - - expected_args = ('cinder-rtstool', 'add-initiator', target_id, - self.expected_iscsi_properties['auth_username'], - '2FE0CQ8J196R', connector['initiator']) - - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - mock_execute.assert_called_once_with(*expected_args, run_as_root=True) - mpersist_cfg.assert_called_once_with(self.fake_volume_id) - - # Test the failure case: putils.ProcessExecutionError - mlock_exec.reset_mock() - mpersist_cfg.reset_mock() - mock_execute.side_effect = putils.ProcessExecutionError - self.assertRaises(exception.ISCSITargetAttachFailed, - self.target.initialize_connection, - self.testvol, - connector) - - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - - # Ensure there have been no calls to persist configuration - self.assertFalse(mpersist_cfg.called) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - def test_terminate_connection(self, mock_execute, mpersist_cfg, - mlock_exec): - - target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id - - connector = {'initiator': 'fake_init'} - self.target.terminate_connection(self.testvol, - connector) - expected_args = ('cinder-rtstool', 'delete-initiator', target_id, - connector['initiator']) - - mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) - mock_execute.assert_called_once_with(*expected_args, run_as_root=True) - mpersist_cfg.assert_called_once_with(self.fake_volume_id) - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - def test_terminate_connection_no_prov_loc(self, - mock_execute, - mpersist_cfg, - mlock_exec): - """terminate_connection does nothing if provider_location is None""" - - connector = {'initiator': 'fake_init'} - self.target.terminate_connection(self.testvol_no_prov_loc, - connector) - - mlock_exec.assert_not_called() - mock_execute.assert_not_called() - mpersist_cfg.assert_not_called() - - @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) - @mock.patch.object(lio.LioAdm, '_persist_configuration') - @mock.patch('cinder.utils.execute') - def test_terminate_connection_fail(self, mock_execute, mpersist_cfg, - mlock_exec): - - target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id - mock_execute.side_effect = putils.ProcessExecutionError - connector = {'initiator': 'fake_init'} - self.assertRaises(exception.ISCSITargetDetachFailed, - self.target.terminate_connection, - self.testvol, - connector) - mlock_exec.assert_called_once_with('cinder-rtstool', - 'delete-initiator', target_id, - connector['initiator'], - run_as_root=True) - self.assertFalse(mpersist_cfg.called) - - def test_iscsi_protocol(self): - self.assertEqual('iscsi', self.target.iscsi_protocol) - - @mock.patch.object(lio.LioAdm, '_get_target_and_lun', return_value=(1, 2)) - @mock.patch.object(lio.LioAdm, 'create_iscsi_target', return_value=3) - @mock.patch.object(lio.LioAdm, '_get_target_chap_auth', - return_value=(mock.sentinel.user, mock.sentinel.pwd)) - def test_create_export(self, mock_chap, mock_create, mock_get_target): - ctxt = context.get_admin_context() - result = self.target.create_export(ctxt, self.testvol_2, - self.fake_volumes_dir) - - loc = (u'%(ip)s:%(port)d,3 %(prefix)s%(name)s 2' % - {'ip': self.configuration.iscsi_ip_address, - 'port': self.configuration.iscsi_port, - 'prefix': self.iscsi_target_prefix, - 'name': self.testvol_2['name']}) - - expected_result = { - 'location': loc, - 'auth': 'CHAP %s %s' % (mock.sentinel.user, mock.sentinel.pwd), - } - - self.assertEqual(expected_result, result) - - mock_create.assert_called_once_with( - self.iscsi_target_prefix + self.testvol_2['name'], - 1, - 2, - self.fake_volumes_dir, - (mock.sentinel.user, mock.sentinel.pwd), - portals_ips=[self.configuration.iscsi_ip_address], - portals_port=self.configuration.iscsi_port) diff --git a/cinder/tests/unit/targets/test_scst_driver.py b/cinder/tests/unit/targets/test_scst_driver.py deleted file mode 100644 index c45a56a0b..000000000 --- a/cinder/tests/unit/targets/test_scst_driver.py +++ /dev/null @@ -1,233 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import scst -from cinder.volume import utils as vutils - - -class TestSCSTAdmDriver(tf.TargetDriverFixture): - - def setUp(self): - super(TestSCSTAdmDriver, self).setUp() - self.target = scst.SCSTAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - - self.fake_iscsi_scan = \ - ('Collecting current configuration: done.\n' - 'Driver Target\n' - '----------------------------------------------\n' - 'iscsi iqn.2010-10.org.openstack:' - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba\n' - 'All done.\n') - - self.fake_iscsi_attribute_scan = \ - ('Collecting current configuration: done.\n' - 'Attribute Value Writable KEY\n' - '------------------------------------------\n' - 'rel_tgt_id 1 Yes Yes\n' - 'Dynamic attributes available\n' - '----------------------------\n' - 'IncomingUser\n' - 'OutgoingUser\n' - 'allowed_portal\n' - 'LUN CREATE attributes available\n' - '-------------------------------\n' - 'read_only\n' - 'All done.\n') - self.fake_list_group = \ - ('org.openstack:volume-vedams\n' - 'Collecting current configuration: done.\n' - 'Driver: iscsi\n' - 'Target: iqn.2010-10.org.openstack:volume-vedams\n' - 'Driver/target \'iscsi/iqn.2010-10.org.openstack:volume-vedams\'' - 'has no associated LUNs.\n' - 'Group: iqn.1993-08.org.debian:01:626bf14ebdc\n' - 'Assigned LUNs:\n' - 'LUN Device\n' - '------------------\n' - '1 1b67387810256\n' - '2 2a0f1cc9cd595\n' - 'Assigned Initiators:\n' - 'Initiator\n' - '-------------------------------------\n' - 'iqn.1993-08.org.debian:01:626bf14ebdc\n' - 'All done.\n') - - self.target.db = mock.MagicMock( - volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(scst.SCSTAdm, '_target_attribute') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_get_target(self, mock_execute, - mock_target_attribute, - mock_scst_execute): - mock_target_attribute.return_value = 1 - mock_execute.return_value = (self.fake_iscsi_scan, None) - expected = 1 - self.assertEqual(expected, self.target._get_target( - 'iqn.2010-10.org.openstack:' - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) - - @mock.patch.object(utils, 'execute') - def test_target_attribute(self, mock_execute): - mock_execute.return_value = (self.fake_iscsi_attribute_scan, None) - self.assertEqual(str(1), self.target._target_attribute( - 'iqn.2010-10.org.openstack:' - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) - - def test_single_lun_get_target_and_lun(self): - ctxt = context.get_admin_context() - self.assertEqual((0, 1), self.target._get_target_and_lun( - ctxt, self.testvol)) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(scst.SCSTAdm, '_get_group') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_multi_lun_get_target_and_lun(self, mock_execute, mock_get_group, - mock_scst_execute): - mock_execute.return_value = (self.fake_list_group, None) - mock_get_group.return_value = self.fake_list_group - - ctxt = context.get_admin_context() - with mock.patch.object(self.target, 'target_name', - return_value='iqn.2010-10.org.openstack:' - 'volume-vedams'): - self.assertEqual((0, 3), self.target._get_target_and_lun( - ctxt, self.testvol)) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(scst.SCSTAdm, '_get_target') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_create_iscsi_target(self, mock_execute, mock_get_target, - mock_scst_execute): - mock_execute.return_value = (None, None) - mock_get_target.return_value = 1 - - self.assertEqual(1, - self.target.create_iscsi_target( - 'iqn.2010-10.org.openstack:' - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba', - 'vol1', - 0, 1, self.fake_volumes_dir)) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(scst.SCSTAdm, '_get_target') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_create_export(self, mock_execute, - mock_get_target, - mock_scst_execute): - mock_execute.return_value = (None, None) - mock_scst_execute.return_value = (None, None) - mock_get_target.return_value = 1 - - def _fake_get_target_and_lun(*args, **kwargs): - return 0, 1 - - def _fake_iscsi_location(*args, **kwargs): - return '10.9.8.7:3260,1 iqn.2010-10.org.openstack:' \ - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1' - - def _fake_get_target_chap_auth(*args, **kwargs): - return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') - - ctxt = context.get_admin_context() - expected_result = {'location': '10.9.8.7:3260,1 ' - 'iqn.2010-10.org.openstack:' - 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1', - 'auth': 'CHAP ' - 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} - - with mock.patch.object(self.target, '_get_target_and_lun', - side_effect=_fake_get_target_and_lun),\ - mock.patch.object(self.target, '_get_target_chap_auth', - side_effect=_fake_get_target_chap_auth),\ - mock.patch.object(self.target, 'initiator_iqn', - return_value='iqn.1993-08.org.debian:' - '01:626bf14ebdc'),\ - mock.patch.object(self.target, '_iscsi_location', - side_effect=_fake_iscsi_location),\ - mock.patch.object(self.target, 'target_driver', - return_value='iscsi'),\ - mock.patch.object(vutils, 'generate_username', - side_effect=lambda: 'QZJbisGmn9AL954FNF4D'),\ - mock.patch.object(vutils, 'generate_password', - side_effect=lambda: 'P68eE7u9eFqDGexd28DQ'): - self.assertEqual(expected_result, - self.target.create_export(ctxt, - self.testvol, - self.fake_volumes_dir)) - - @mock.patch('cinder.utils.execute') - @mock.patch.object(scst.SCSTAdm, '_get_target') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_ensure_export(self, mock_execute, - mock_get_target, - mock_scst_execute): - mock_execute.return_value = (None, None) - mock_scst_execute.return_value = (None, None) - mock_get_target.return_value = 1 - ctxt = context.get_admin_context() - - def _fake_get_target_and_lun(*args, **kwargs): - return 0, 1 - - def _fake_get_target_chap_auth(*args, **kwargs): - return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') - - with mock.patch.object(self.target, 'create_iscsi_target'),\ - mock.patch.object(self.target, '_get_target_chap_auth', - side_effect=_fake_get_target_chap_auth),\ - mock.patch.object(self.target, '_get_target_and_lun', - side_effect=_fake_get_target_and_lun): - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - self.target.create_iscsi_target.assert_called_once_with( - 'iqn.2010-10.org.openstack:testvol', - 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', - 0, 1, self.fake_volumes_dir, _fake_get_target_chap_auth()) - - @mock.patch('cinder.utils.execute') - @mock.patch.object(scst.SCSTAdm, '_get_target') - @mock.patch.object(scst.SCSTAdm, 'scst_execute') - def test_ensure_export_chap(self, mock_execute, - mock_get_target, - mock_scst_execute): - mock_execute.return_value = (None, None) - mock_scst_execute.return_value = (None, None) - mock_get_target.return_value = 1 - ctxt = context.get_admin_context() - - def _fake_get_target_and_lun(*args, **kwargs): - return 0, 1 - - def _fake_get_target_chap_auth(*args, **kwargs): - return None - - with mock.patch.object(self.target, 'create_iscsi_target'),\ - mock.patch.object(self.target, '_get_target_chap_auth', - side_effect=_fake_get_target_chap_auth),\ - mock.patch.object(self.target, '_get_target_and_lun', - side_effect=_fake_get_target_and_lun): - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - self.target.create_iscsi_target.assert_called_once_with( - 'iqn.2010-10.org.openstack:testvol', - 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', - 0, 1, self.fake_volumes_dir, None) diff --git a/cinder/tests/unit/targets/test_tgt_driver.py b/cinder/tests/unit/targets/test_tgt_driver.py deleted file mode 100644 index 21caab324..000000000 --- a/cinder/tests/unit/targets/test_tgt_driver.py +++ /dev/null @@ -1,406 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys -import time - -import mock -from oslo_concurrency import processutils as putils - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.targets import targets_fixture as tf -from cinder import utils -from cinder.volume.targets import tgt -from cinder.volume import utils as vutils - - -class TestTgtAdmDriver(tf.TargetDriverFixture): - - def setUp(self): - super(TestTgtAdmDriver, self).setUp() - self.configuration.get = mock.Mock(side_effect=self.fake_get) - - self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), - configuration=self.configuration) - self.testvol_path = \ - '/dev/stack-volumes-lvmdriver-1/%s' % self.VOLUME_NAME - - self.fake_iscsi_scan =\ - ('Target 1: %(test_vol)s\n' - ' System information:\n' - ' Driver: iscsi\n' - ' State: ready\n' - ' I_T nexus information:\n' - ' LUN information:\n' - ' LUN: 0\n' - ' Type: controller\n' - ' SCSI ID: IET 00010000\n' - ' SCSI SN: beaf10\n' - ' Size: 0 MB, Block size: 1\n' - ' Online: Yes\n' - ' Removable media: No\n' - ' Prevent removal: No\n' - ' Readonly: No\n' - ' SWP: No\n' - ' Thin-provisioning: No\n' - ' Backing store type: null\n' - ' Backing store path: None\n' - ' Backing store flags:\n' - ' LUN: 1\n' - ' Type: disk\n' - ' SCSI ID: IET 00010001\n' - ' SCSI SN: beaf11\n' - ' Size: 1074 MB, Block size: 512\n' - ' Online: Yes\n' - ' Removable media: No\n' - ' Prevent removal: No\n' - ' Readonly: No\n' - ' SWP: No\n' - ' Thin-provisioning: No\n' - ' Backing store type: rdwr\n' - ' Backing store path: %(bspath)s\n' - ' Backing store flags:\n' - ' Account information:\n' - ' mDVpzk8cZesdahJC9h73\n' - ' ACL information:\n' - ' ALL"\n' % {'test_vol': self.test_vol, - 'bspath': self.testvol_path}) - self.patch('time.sleep') - - def fake_get(self, value, default): - if value in ('iscsi_target_flags', 'iscsi_write_cache'): - return getattr(self, value, default) - - def test_iscsi_protocol(self): - self.assertEqual('iscsi', self.target.iscsi_protocol) - - def test_get_target(self): - with mock.patch('cinder.utils.execute', - return_value=(self.fake_iscsi_scan, None)): - iqn = self.test_vol - self.assertEqual('1', self.target._get_target(iqn)) - - def test_verify_backing_lun(self): - iqn = self.test_vol - - with mock.patch('cinder.utils.execute', - return_value=(self.fake_iscsi_scan, None)): - self.assertTrue(self.target._verify_backing_lun(iqn, '1')) - - # Test the failure case - bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3') - - with mock.patch('cinder.utils.execute', - return_value=(bad_scan, None)): - self.assertFalse(self.target._verify_backing_lun(iqn, '1')) - - @mock.patch.object(time, 'sleep') - @mock.patch('cinder.utils.execute') - def test_recreate_backing_lun(self, mock_execute, mock_sleep): - mock_execute.return_value = ('out', 'err') - self.target._recreate_backing_lun(self.test_vol, '1', - self.testvol['name'], - self.testvol_path) - - expected_command = ('tgtadm', '--lld', 'iscsi', '--op', 'new', - '--mode', 'logicalunit', '--tid', '1', - '--lun', '1', '-b', - self.testvol_path) - - mock_execute.assert_called_once_with(*expected_command, - run_as_root=True) - - # Test the failure case - mock_execute.side_effect = putils.ProcessExecutionError - self.assertIsNone( - self.target._recreate_backing_lun(self.test_vol, - '1', - self.testvol['name'], - self.testvol_path)) - - def test_get_iscsi_target(self): - ctxt = context.get_admin_context() - expected = 0 - self.assertEqual(expected, - self.target._get_iscsi_target(ctxt, - self.testvol['id'])) - - def test_get_target_and_lun(self): - lun = 1 - iscsi_target = 0 - ctxt = context.get_admin_context() - expected = (iscsi_target, lun) - self.assertEqual(expected, - self.target._get_target_and_lun(ctxt, self.testvol)) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_create_iscsi_target(self): - with mock.patch('cinder.utils.execute', return_value=('', '')),\ - mock.patch.object(self.target, '_get_target', - side_effect=lambda x: 1),\ - mock.patch.object(self.target, '_verify_backing_lun', - side_effect=lambda x, y: True): - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir)) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_create_iscsi_target_content(self): - - self.iscsi_target_flags = 'foo' - self.iscsi_write_cache = 'bar' - - mock_open = mock.mock_open() - with mock.patch('cinder.utils.execute', return_value=('', '')),\ - mock.patch.object(self.target, '_get_target', - side_effect=lambda x: 1),\ - mock.patch.object(self.target, '_verify_backing_lun', - side_effect=lambda x, y: True),\ - mock.patch('cinder.volume.targets.tgt.open', - mock_open, create=True): - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.testvol_path, - chap_auth=('chap_foo', 'chap_bar'))) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_create_iscsi_target_already_exists(self): - def _fake_execute(*args, **kwargs): - if 'update' in args: - raise putils.ProcessExecutionError( - exit_code=1, - stdout='', - stderr='target already exists', - cmd='tgtad --lld iscsi --op show --mode target') - else: - return 'fake out', 'fake err' - - with mock.patch.object(self.target, '_get_target', - side_effect=lambda x: 1),\ - mock.patch.object(self.target, '_verify_backing_lun', - side_effect=lambda x, y: True),\ - mock.patch('cinder.utils.execute', _fake_execute): - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir)) - - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.utils.execute') - @mock.patch('os.unlink', return_value=None) - def test_delete_target_not_found(self, - mock_unlink, - mock_exec, - mock_pathexists, - mock_isfile): - def _fake_execute(*args, **kwargs): - raise putils.ProcessExecutionError( - exit_code=1, - stdout='', - stderr='can\'t find the target', - cmd='tgt-admin --force --delete') - - def _fake_execute_wrong_message(*args, **kwargs): - raise putils.ProcessExecutionError( - exit_code=1, - stdout='', - stderr='this is not the error you are looking for', - cmd='tgt-admin --force --delete') - - mock_exec.side_effect = _fake_execute - - with mock.patch.object(self.target, '_get_target', return_value=False): - self.assertIsNone(self.target.remove_iscsi_target( - 1, - 0, - self.VOLUME_ID, - self.VOLUME_NAME)) - - mock_exec.side_effect = _fake_execute_wrong_message - self.assertRaises(exception.ISCSITargetRemoveFailed, - self.target.remove_iscsi_target, - 1, - 0, - self.VOLUME_ID, - self.VOLUME_NAME) - - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.utils.execute') - @mock.patch('os.unlink', return_value=None) - def test_delete_target_acl_not_found(self, - mock_unlink, - mock_exec, - mock_pathexists, - mock_isfile): - def _fake_execute(*args, **kwargs): - raise putils.ProcessExecutionError( - exit_code=1, - stdout='', - stderr='this access control rule does not exist', - cmd='tgt-admin --force --delete') - - def _fake_execute_wrong_message(*args, **kwargs): - raise putils.ProcessExecutionError( - exit_code=1, - stdout='', - stderr='this is not the error you are looking for', - cmd='tgt-admin --force --delete') - - mock_exec.side_effect = _fake_execute - - with mock.patch.object(self.target, '_get_target', return_value=False): - self.assertIsNone(self.target.remove_iscsi_target( - 1, - 0, - self.VOLUME_ID, - self.VOLUME_NAME)) - - mock_exec.side_effect = _fake_execute_wrong_message - self.assertRaises(exception.ISCSITargetRemoveFailed, - self.target.remove_iscsi_target, - 1, - 0, - self.VOLUME_ID, - self.VOLUME_NAME) - - @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') - def test_initialize_connection(self, mock_get_iscsi): - - connector = {'initiator': 'fake_init'} - - # Test the normal case - mock_get_iscsi.return_value = 'foo bar' - expected_return = {'driver_volume_type': 'iscsi', - 'data': 'foo bar'} - self.assertEqual(expected_return, - self.target.initialize_connection(self.testvol, - connector)) - - @mock.patch('cinder.utils.execute') - @mock.patch.object(tgt.TgtAdm, '_get_target') - @mock.patch.object(os.path, 'exists') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(os, 'unlink') - def test_remove_iscsi_target(self, - mock_unlink, - mock_isfile, - mock_path_exists, - mock_get_target, - mock_execute): - - # Test the failure case: path does not exist - mock_path_exists.return_value = None - self.assertIsNone(self.target.remove_iscsi_target( - 0, - 1, - self.testvol['id'], - self.testvol['name'])) - - # Test the normal case - mock_path_exists.return_value = True - mock_isfile.return_value = True - self.target.remove_iscsi_target(0, - 1, - self.testvol['id'], - self.testvol['name']) - calls = [mock.call('tgt-admin', '--force', '--delete', - self.iscsi_target_prefix + self.testvol['name'], - run_as_root=True), - mock.call('tgt-admin', '--delete', - self.iscsi_target_prefix + self.testvol['name'], - run_as_root=True)] - - mock_execute.assert_has_calls(calls) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_create_export(self): - expected_result = {'location': '10.9.8.7:3260,1 ' + - self.iscsi_target_prefix + - self.testvol['name'] + ' 1', - 'auth': 'CHAP QZJb P68e'} - - with mock.patch('cinder.utils.execute', return_value=('', '')),\ - mock.patch.object(self.target, '_get_target', - side_effect=lambda x: 1),\ - mock.patch.object(self.target, '_verify_backing_lun', - side_effect=lambda x, y: True),\ - mock.patch.object(self.target, '_get_target_chap_auth', - side_effect=lambda x, y: None) as m_chap,\ - mock.patch.object(vutils, 'generate_username', - side_effect=lambda: 'QZJb'),\ - mock.patch.object(vutils, 'generate_password', - side_effect=lambda: 'P68e'): - - ctxt = context.get_admin_context() - self.assertEqual(expected_result, - self.target.create_export(ctxt, - self.testvol, - self.fake_volumes_dir)) - - m_chap.side_effect = lambda x, y: ('otzL', '234Z') - - expected_result['auth'] = ('CHAP otzL 234Z') - - self.assertEqual(expected_result, - self.target.create_export(ctxt, - self.testvol, - self.fake_volumes_dir)) - - @mock.patch.object(tgt.TgtAdm, '_get_target_chap_auth') - @mock.patch.object(tgt.TgtAdm, 'create_iscsi_target') - def test_ensure_export(self, _mock_create, mock_get_chap): - ctxt = context.get_admin_context() - mock_get_chap.return_value = ('foo', 'bar') - self.target.ensure_export(ctxt, - self.testvol, - self.fake_volumes_dir) - - _mock_create.assert_called_once_with( - self.iscsi_target_prefix + self.testvol['name'], - 0, 1, self.fake_volumes_dir, ('foo', 'bar'), - check_exit_code=False, - old_name=None, - portals_ips=[self.configuration.iscsi_ip_address], - portals_port=self.configuration.iscsi_port) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_create_iscsi_target_retry(self): - with mock.patch('cinder.utils.execute', return_value=('', '')),\ - mock.patch.object(self.target, '_get_target', - side_effect=[None, None, 1]) as get_target,\ - mock.patch.object(self.target, '_verify_backing_lun', - side_effect=lambda x, y: True): - self.assertEqual( - 1, - self.target.create_iscsi_target( - self.test_vol, - 1, - 0, - self.fake_volumes_dir)) - # 3 - default retries count value for utils.retry - self.assertEqual(3, get_target.call_count) diff --git a/cinder/tests/unit/test_api.py b/cinder/tests/unit/test_api.py deleted file mode 100644 index bb96cb2fa..000000000 --- a/cinder/tests/unit/test_api.py +++ /dev/null @@ -1,73 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for the API endpoint.""" - -import six -from six.moves import http_client -import webob - - -class FakeHttplibSocket(object): - """A fake socket implementation for http_client.HTTPResponse, trivial.""" - def __init__(self, response_string): - self.response_string = response_string - self._buffer = six.StringIO(response_string) - - def makefile(self, _mode, _other): - """Returns the socket's internal buffer.""" - return self._buffer - - -class FakeHttplibConnection(object): - """A fake http_client.HTTPConnection for boto. - - requests made via this connection actually get translated and routed into - our WSGI app, we then wait for the response and turn it back into - the http_client.HTTPResponse that boto expects. - """ - def __init__(self, app, host, is_secure=False): - self.app = app - self.host = host - - def request(self, method, path, data, headers): - req = webob.Request.blank(path) - req.method = method - req.body = data - req.headers = headers - req.headers['Accept'] = 'text/html' - req.host = self.host - # Call the WSGI app, get the HTTP response - resp = str(req.get_response(self.app)) - # For some reason, the response doesn't have "HTTP/1.0 " prepended; I - # guess that's a function the web server usually provides. - resp = "HTTP/1.0 %s" % resp - self.sock = FakeHttplibSocket(resp) - self.http_response = http_client.HTTPResponse(self.sock) - # NOTE(vish): boto is accessing private variables for some reason - self._HTTPConnection__response = self.http_response - self.http_response.begin() - - def getresponse(self): - return self.http_response - - def getresponsebody(self): - return self.sock.response_string - - def close(self): - """Required for compatibility with boto/tornado.""" - pass diff --git a/cinder/tests/unit/test_api_urlmap.py b/cinder/tests/unit/test_api_urlmap.py deleted file mode 100644 index dfa508cfd..000000000 --- a/cinder/tests/unit/test_api_urlmap.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for cinder.api.urlmap.py -""" - -import mock - -from cinder.api import urlmap -from cinder import test - - -class TestParseFunctions(test.TestCase): - def test_unquote_header_value_without_quotes(self): - arg = 'TestString' - result = urlmap.unquote_header_value(arg) - self.assertEqual(arg, result) - - def test_unquote_header_value_with_quotes(self): - result = urlmap.unquote_header_value('"TestString"') - self.assertEqual('TestString', result) - - def test_parse_list_header(self): - arg = 'token, "quoted value"' - result = urlmap.parse_list_header(arg) - self.assertEqual(['token', 'quoted value'], result) - - def test_parse_options_header(self): - result = urlmap.parse_options_header('Content-Type: text/html;' - ' mimetype=text/html') - self.assertEqual(('Content-Type:', {'mimetype': 'text/html'}), result) - - def test_parse_options_header_without_value(self): - result = urlmap.parse_options_header(None) - self.assertEqual(('', {}), result) - - -class TestAccept(test.TestCase): - def test_best_match_ValueError(self): - arg = 'text/html; q=some_invalud_value' - accept = urlmap.Accept(arg) - self.assertEqual((None, {}), accept.best_match(['text/html'])) - - def test_best_match(self): - arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8' - accept = urlmap.Accept(arg) - self.assertEqual(('application/json', {'q': '0.7'}), - accept.best_match(['application/json', - 'text/html'])) - - def test_match_mask_one_asterisk(self): - arg = 'text/*; q=0.7' - accept = urlmap.Accept(arg) - self.assertEqual(('text/html', {'q': '0.7'}), - accept.best_match(['text/html'])) - - def test_match_mask_two_asterisk(self): - arg = '*/*; q=0.7' - accept = urlmap.Accept(arg) - self.assertEqual(('text/html', {'q': '0.7'}), - accept.best_match(['text/html'])) - - def test_match_mask_no_asterisk(self): - arg = 'application/json; q=0.7' - accept = urlmap.Accept(arg) - self.assertEqual((None, {}), accept.best_match(['text/html'])) - - def test_content_type_params(self): - arg = "application/json; q=0.2," \ - " text/html; q=0.3" - accept = urlmap.Accept(arg) - self.assertEqual({'q': '0.2'}, - accept.content_type_params('application/json')) - - def test_content_type_params_wrong_content_type(self): - arg = 'text/html; q=0.1' - accept = urlmap.Accept(arg) - self.assertEqual({}, accept.content_type_params('application/json')) - - -class TestUrlMapFactory(test.TestCase): - def setUp(self): - super(TestUrlMapFactory, self).setUp() - self.global_conf = {'not_found_app': 'app_global', - 'domain hoobar.com port 10 /': 'some_app_global'} - self.loader = mock.Mock() - - def test_not_found_app_in_local_conf(self): - local_conf = {'not_found_app': 'app_local', - 'domain foobar.com port 20 /': 'some_app_local'} - - self.loader.get_app.side_effect = ['app_local_loader', - 'some_app_loader'] - calls = [mock.call('app_local', global_conf=self.global_conf), - mock.call('some_app_local', global_conf=self.global_conf)] - - expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader') - expected_urlmap['http://foobar.com:20'] = 'some_app_loader' - self.assertEqual(expected_urlmap, - urlmap.urlmap_factory(self.loader, self.global_conf, - **local_conf)) - self.loader.get_app.assert_has_calls(calls) - - def test_not_found_app_not_in_local_conf(self): - local_conf = {'domain foobar.com port 20 /': 'some_app_local'} - - self.loader.get_app.side_effect = ['app_global_loader', - 'some_app_returned_by_loader'] - calls = [mock.call('app_global', global_conf=self.global_conf), - mock.call('some_app_local', global_conf=self.global_conf)] - - expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader') - expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ - '_by_loader' - self.assertEqual(expected_urlmap, - urlmap.urlmap_factory(self.loader, self.global_conf, - **local_conf)) - self.loader.get_app.assert_has_calls(calls) - - def test_not_found_app_is_none(self): - local_conf = {'not_found_app': None, - 'domain foobar.com port 20 /': 'some_app_local'} - self.loader.get_app.return_value = 'some_app_returned_by_loader' - - expected_urlmap = urlmap.URLMap(not_found_app=None) - expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ - '_by_loader' - self.assertEqual(expected_urlmap, - urlmap.urlmap_factory(self.loader, self.global_conf, - **local_conf)) - self.loader.get_app.assert_called_once_with( - 'some_app_local', global_conf=self.global_conf) - - -class TestURLMap(test.TestCase): - def setUp(self): - super(TestURLMap, self).setUp() - self.urlmap = urlmap.URLMap() - - def test_match_with_applications(self): - self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app' - self.assertEqual((None, None), - self.urlmap._match('20.30.40.50', '20', - 'path/somepath')) - - def test_match_without_applications(self): - self.assertEqual((None, None), - self.urlmap._match('host', 20, 'app_url/somepath')) - - def test_match_path_info_equals_app_url(self): - self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app' - self.assertEqual(('app', '/app_url/somepath'), - self.urlmap._match('http://20.30.40.50', '60', - '/app_url/somepath')) - - def test_match_path_info_equals_app_url_many_app(self): - self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1' - self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2' - self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \ - 'app3' - self.assertEqual(('app3', '/path/somepath/elsepath'), - self.urlmap._match('http://20.30.40.50', '60', - '/path/somepath/elsepath')) - - def test_path_strategy_wrong_path_info(self): - self.assertEqual((None, None, None), - self.urlmap._path_strategy('http://10.20.30.40', '50', - '/resource')) - - def test_path_strategy_wrong_mime_type(self): - self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app' - with mock.patch.object(self.urlmap, '_munge_path') as mock_munge_path: - mock_munge_path.return_value = 'value' - self.assertEqual( - (None, 'value', '/path/elsepath'), - self.urlmap._path_strategy('http://10.20.30.40', '50', - '/path/elsepath/resource.abc')) - mock_munge_path.assert_called_once_with( - 'app', '/path/elsepath/resource.abc', '/path/elsepath') diff --git a/cinder/tests/unit/test_cleanable_manager.py b/cinder/tests/unit/test_cleanable_manager.py deleted file mode 100644 index e682501d5..000000000 --- a/cinder/tests/unit/test_cleanable_manager.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_utils import timeutils - -from cinder import context -from cinder import db -from cinder import manager -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import utils - - -class FakeManager(manager.CleanableManager): - def __init__(self, service_id=None, keep_after_clean=False): - if service_id: - self.service_id = service_id - self.keep_after_clean = keep_after_clean - - def _do_cleanup(self, ctxt, vo_resource): - vo_resource.status += '_cleaned' - vo_resource.save() - return self.keep_after_clean - - -class TestCleanableManager(test.TestCase): - def setUp(self): - super(TestCleanableManager, self).setUp() - self.user_id = fake_constants.USER_ID - self.project_id = fake_constants.PROJECT_ID - self.context = context.RequestContext(self.user_id, self.project_id, - is_admin=True) - self.service = db.service_create(self.context, {}) - - @mock.patch('cinder.db.workers_init', autospec=True) - @mock.patch('cinder.manager.CleanableManager.do_cleanup', autospec=True) - def test_init_host_with_service(self, mock_cleanup, mock_workers_init): - mngr = FakeManager() - self.assertFalse(hasattr(mngr, 'service_id')) - mngr.init_host(service_id=self.service.id) - - self.assertEqual(self.service.id, mngr.service_id) - mock_cleanup.assert_called_once_with(mngr, mock.ANY, mock.ANY) - clean_req = mock_cleanup.call_args[0][2] - self.assertIsInstance(clean_req, objects.CleanupRequest) - self.assertEqual(self.service.id, clean_req.service_id) - mock_workers_init.assert_called_once_with() - - def test_do_cleanup(self): - """Basic successful cleanup.""" - vol = utils.create_volume(self.context, status='creating') - db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - mngr.do_cleanup(self.context, clean_req) - - self.assertListEqual([], db.worker_get_all(self.context)) - vol.refresh() - self.assertEqual('creating_cleaned', vol.status) - - def test_do_cleanup_not_cleaning_already_claimed(self): - """Basic cleanup that doesn't touch already cleaning works.""" - vol = utils.create_volume(self.context, status='creating') - worker1 = db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - worker1 = db.worker_get(self.context, id=worker1.id) - vol2 = utils.create_volume(self.context, status='deleting') - worker2 = db.worker_create(self.context, status='deleting', - resource_type='Volume', resource_id=vol2.id, - service_id=self.service.id + 1) - worker2 = db.worker_get(self.context, id=worker2.id) - - # Simulate that the change to vol2 worker happened between - # worker_get_all and trying to claim a work for cleanup - worker2.service_id = self.service.id - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - with mock.patch('cinder.db.worker_get_all') as get_all_mock: - get_all_mock.return_value = [worker1, worker2] - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertEqual(1, len(workers)) - self.assertEqual(worker2.id, workers[0].id) - - vol.refresh() - self.assertEqual('creating_cleaned', vol.status) - vol2.refresh() - self.assertEqual('deleting', vol2.status) - - def test_do_cleanup_not_cleaning_already_claimed_by_us(self): - """Basic cleanup that doesn't touch other thread's claimed works.""" - original_time = timeutils.utcnow() - other_thread_claimed_time = timeutils.utcnow() - vol = utils.create_volume(self.context, status='creating') - worker1 = db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id, - updated_at=original_time) - worker1 = db.worker_get(self.context, id=worker1.id) - vol2 = utils.create_volume(self.context, status='deleting') - worker2 = db.worker_create(self.context, status='deleting', - resource_type='Volume', resource_id=vol2.id, - service_id=self.service.id, - updated_at=other_thread_claimed_time) - worker2 = db.worker_get(self.context, id=worker2.id) - - # Simulate that the change to vol2 worker happened between - # worker_get_all and trying to claim a work for cleanup - worker2.updated_at = original_time - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - with mock.patch('cinder.db.worker_get_all') as get_all_mock: - get_all_mock.return_value = [worker1, worker2] - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertEqual(1, len(workers)) - self.assertEqual(worker2.id, workers[0].id) - - vol.refresh() - self.assertEqual('creating_cleaned', vol.status) - vol2.refresh() - self.assertEqual('deleting', vol2.status) - - def test_do_cleanup_resource_deleted(self): - """Cleanup on a resource that's been already deleted.""" - vol = utils.create_volume(self.context, status='creating') - db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - vol.destroy() - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertListEqual([], workers) - - def test_do_cleanup_resource_on_another_service(self): - """Cleanup on a resource that's been claimed by other service.""" - vol = utils.create_volume(self.context, status='deleting') - db.worker_create(self.context, status='deleting', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id + 1) - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertEqual(1, len(workers)) - - vol.refresh() - self.assertEqual('deleting', vol.status) - - def test_do_cleanup_resource_changed_status(self): - """Cleanup on a resource that's changed status.""" - vol = utils.create_volume(self.context, status='available') - db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertListEqual([], workers) - - vol.refresh() - self.assertEqual('available', vol.status) - - def test_do_cleanup_keep_worker(self): - """Cleanup on a resource that will remove worker when cleaning up.""" - vol = utils.create_volume(self.context, status='deleting') - db.worker_create(self.context, status='deleting', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id, keep_after_clean=True) - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertEqual(1, len(workers)) - vol.refresh() - self.assertEqual('deleting_cleaned', vol.status) - - @mock.patch.object(FakeManager, '_do_cleanup', side_effect=Exception) - def test_do_cleanup_revive_on_cleanup_fail(self, mock_clean): - """Cleanup will revive a worker if cleanup fails.""" - vol = utils.create_volume(self.context, status='creating') - db.worker_create(self.context, status='creating', - resource_type='Volume', resource_id=vol.id, - service_id=self.service.id) - - clean_req = objects.CleanupRequest(service_id=self.service.id) - mngr = FakeManager(self.service.id) - mngr.do_cleanup(self.context, clean_req) - - workers = db.worker_get_all(self.context) - self.assertEqual(1, len(workers)) - vol.refresh() - self.assertEqual('creating', vol.status) diff --git a/cinder/tests/unit/test_cmd.py b/cinder/tests/unit/test_cmd.py deleted file mode 100644 index 27a9ab973..000000000 --- a/cinder/tests/unit/test_cmd.py +++ /dev/null @@ -1,2012 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -from iso8601 import iso8601 -import sys -import time - -import ddt -import fixtures -import mock -from oslo_config import cfg -from oslo_db import exception as oslo_exception -from oslo_utils import timeutils -import six -from six.moves import StringIO - -try: - import rtslib_fb -except ImportError: - import rtslib as rtslib_fb - - -from cinder.cmd import api as cinder_api -from cinder.cmd import backup as cinder_backup -from cinder.cmd import manage as cinder_manage -from cinder.cmd import rtstool as cinder_rtstool -from cinder.cmd import scheduler as cinder_scheduler -from cinder.cmd import volume as cinder_volume -from cinder.cmd import volume_usage_audit -from cinder.common import constants -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_cluster -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_service -from cinder.tests.unit import fake_volume -from cinder import version - -CONF = cfg.CONF - - -class TestCinderApiCmd(test.TestCase): - """Unit test cases for python modules under cinder/cmd.""" - - def setUp(self): - super(TestCinderApiCmd, self).setUp() - sys.argv = ['cinder-api'] - - @mock.patch('cinder.service.WSGIService') - @mock.patch('cinder.service.process_launcher') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.utils.monkey_patch') - @mock.patch('oslo_log.log.setup') - def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher, - wsgi_service): - launcher = process_launcher.return_value - server = wsgi_service.return_value - server.workers = mock.sentinel.worker_count - - cinder_api.main() - - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - monkey_patch.assert_called_once_with() - rpc_init.assert_called_once_with(CONF) - process_launcher.assert_called_once_with() - wsgi_service.assert_called_once_with('osapi_volume') - launcher.launch_service.assert_called_once_with(server, - workers=server.workers) - launcher.wait.assert_called_once_with() - - -class TestCinderBackupCmd(test.TestCase): - - def setUp(self): - super(TestCinderBackupCmd, self).setUp() - sys.argv = ['cinder-backup'] - - @mock.patch('cinder.service.wait') - @mock.patch('cinder.service.serve') - @mock.patch('cinder.service.Service.create') - @mock.patch('cinder.utils.monkey_patch') - @mock.patch('oslo_log.log.setup') - def test_main(self, log_setup, monkey_patch, service_create, service_serve, - service_wait): - server = service_create.return_value - - cinder_backup.main() - - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - monkey_patch.assert_called_once_with() - service_create.assert_called_once_with(binary='cinder-backup', - coordination=True) - service_serve.assert_called_once_with(server) - service_wait.assert_called_once_with() - - -class TestCinderSchedulerCmd(test.TestCase): - - def setUp(self): - super(TestCinderSchedulerCmd, self).setUp() - sys.argv = ['cinder-scheduler'] - - @mock.patch('cinder.service.wait') - @mock.patch('cinder.service.serve') - @mock.patch('cinder.service.Service.create') - @mock.patch('cinder.utils.monkey_patch') - @mock.patch('oslo_log.log.setup') - def test_main(self, log_setup, monkey_patch, service_create, - service_serve, service_wait): - server = service_create.return_value - - cinder_scheduler.main() - - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - monkey_patch.assert_called_once_with() - service_create.assert_called_once_with(binary='cinder-scheduler') - service_serve.assert_called_once_with(server) - service_wait.assert_called_once_with() - - -class TestCinderVolumeCmd(test.TestCase): - - def setUp(self): - super(TestCinderVolumeCmd, self).setUp() - sys.argv = ['cinder-volume'] - - @mock.patch('cinder.service.get_launcher') - @mock.patch('cinder.service.Service.create') - @mock.patch('cinder.utils.monkey_patch') - @mock.patch('oslo_log.log.setup') - def test_main(self, log_setup, monkey_patch, service_create, - get_launcher): - CONF.set_override('enabled_backends', None) - self.assertRaises(SystemExit, cinder_volume.main) - self.assertFalse(service_create.called) - - @mock.patch('cinder.service.get_launcher') - @mock.patch('cinder.service.Service.create') - @mock.patch('cinder.utils.monkey_patch') - @mock.patch('oslo_log.log.setup') - def test_main_with_backends(self, log_setup, monkey_patch, service_create, - get_launcher): - backends = ['', 'backend1', 'backend2', ''] - CONF.set_override('enabled_backends', backends) - CONF.set_override('host', 'host') - launcher = get_launcher.return_value - - cinder_volume.main() - - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - monkey_patch.assert_called_once_with() - get_launcher.assert_called_once_with() - c1 = mock.call(binary='cinder-volume', host='host@backend1', - service_name='backend1', coordination=True, - cluster=None) - c2 = mock.call(binary='cinder-volume', host='host@backend2', - service_name='backend2', coordination=True, - cluster=None) - service_create.assert_has_calls([c1, c2]) - self.assertEqual(2, launcher.launch_service.call_count) - launcher.wait.assert_called_once_with() - - -@ddt.ddt -class TestCinderManageCmd(test.TestCase): - - def setUp(self): - super(TestCinderManageCmd, self).setUp() - sys.argv = ['cinder-manage'] - - def _test_purge_invalid_age_in_days(self, age_in_days): - db_cmds = cinder_manage.DbCommands() - ex = self.assertRaises(SystemExit, db_cmds.purge, age_in_days) - self.assertEqual(1, ex.code) - - @mock.patch('cinder.db.migration.db_sync') - def test_db_commands_sync(self, db_sync): - version = 11 - db_cmds = cinder_manage.DbCommands() - db_cmds.sync(version=version) - db_sync.assert_called_once_with(version) - - @mock.patch('oslo_db.sqlalchemy.migration.db_version') - def test_db_commands_version(self, db_version): - db_cmds = cinder_manage.DbCommands() - with mock.patch('sys.stdout', new=six.StringIO()): - db_cmds.version() - self.assertEqual(1, db_version.call_count) - - def test_db_commands_upgrade_out_of_range(self): - version = 2147483647 - db_cmds = cinder_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.sync, version + 1) - self.assertEqual(1, exit.code) - - @mock.patch("oslo_db.sqlalchemy.migration.db_sync") - def test_db_commands_script_not_present(self, db_sync): - db_sync.side_effect = oslo_exception.DbMigrationError - db_cmds = cinder_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.sync, 101) - self.assertEqual(1, exit.code) - - @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', - (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) - def test_db_commands_online_data_migrations(self): - db_cmds = cinder_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations) - self.assertEqual(0, exit.code) - cinder_manage.DbCommands.online_migrations[0].assert_has_calls( - (mock.call(mock.ANY, 50, False),) * 2) - - def _fake_db_command(self, migrations=None): - if migrations is None: - mock_mig_1 = mock.MagicMock(__name__="mock_mig_1") - mock_mig_2 = mock.MagicMock(__name__="mock_mig_2") - mock_mig_1.return_value = (5, 4) - mock_mig_2.return_value = (6, 6) - migrations = (mock_mig_1, mock_mig_2) - - class _CommandSub(cinder_manage.DbCommands): - online_migrations = migrations - - return _CommandSub - - @mock.patch('cinder.context.get_admin_context') - def test_online_migrations(self, mock_get_context): - self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO())) - ctxt = mock_get_context.return_value - db_cmds = self._fake_db_command() - command = db_cmds() - exit = self.assertRaises(SystemExit, - command.online_data_migrations, 10) - self.assertEqual(1, exit.code) - expected = """\ -5 rows matched query mock_mig_1, 4 migrated, 1 remaining -6 rows matched query mock_mig_2, 6 migrated, 0 remaining -+------------+-------+------+-----------+ -| Migration | Found | Done | Remaining | -+------------+-------+------+-----------+ -| mock_mig_1 | 5 | 4 | 1 | -| mock_mig_2 | 6 | 6 | 0 | -+------------+-------+------+-----------+ -""" - command.online_migrations[0].assert_has_calls([mock.call(ctxt, - 10, False)]) - command.online_migrations[1].assert_has_calls([mock.call(ctxt, - 6, False)]) - - self.assertEqual(expected, sys.stdout.getvalue()) - - @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', - (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) - def test_db_commands_online_data_migrations_ignore_state_and_max(self): - db_cmds = cinder_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations, - 2, True) - self.assertEqual(1, exit.code) - cinder_manage.DbCommands.online_migrations[0].assert_called_once_with( - mock.ANY, 2, True) - - @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', - (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) - def test_db_commands_online_data_migrations_max_negative(self): - db_cmds = cinder_manage.DbCommands() - exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations, - -1) - self.assertEqual(127, exit.code) - cinder_manage.DbCommands.online_migrations[0].assert_not_called() - - @mock.patch('cinder.version.version_string') - def test_versions_commands_list(self, version_string): - version_cmds = cinder_manage.VersionCommands() - with mock.patch('sys.stdout', new=six.StringIO()): - version_cmds.list() - version_string.assert_called_once_with() - - @mock.patch('cinder.version.version_string') - def test_versions_commands_call(self, version_string): - version_cmds = cinder_manage.VersionCommands() - with mock.patch('sys.stdout', new=six.StringIO()): - version_cmds.__call__() - version_string.assert_called_once_with() - - def test_purge_age_in_days_value_equal_to_zero(self): - age_in_days = 0 - self._test_purge_invalid_age_in_days(age_in_days) - - def test_purge_with_negative_age_in_days(self): - age_in_days = -1 - self._test_purge_invalid_age_in_days(age_in_days) - - def test_purge_exceeded_age_in_days_limit(self): - age_in_days = int(time.time() / 86400) + 1 - self._test_purge_invalid_age_in_days(age_in_days) - - @mock.patch('cinder.db.sqlalchemy.api.purge_deleted_rows') - @mock.patch('cinder.context.get_admin_context') - def test_purge_less_than_age_in_days_limit(self, get_admin_context, - purge_deleted_rows): - age_in_days = int(time.time() / 86400) - 1 - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - get_admin_context.return_value = ctxt - - purge_deleted_rows.return_value = None - - db_cmds = cinder_manage.DbCommands() - db_cmds.purge(age_in_days) - - get_admin_context.assert_called_once_with() - purge_deleted_rows.assert_called_once_with( - ctxt, age_in_days=age_in_days) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.context.get_admin_context') - def test_host_commands_list(self, get_admin_context, service_get_all): - get_admin_context.return_value = mock.sentinel.ctxt - service_get_all.return_value = [{'host': 'fake-host', - 'availability_zone': 'fake-az'}] - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - expected_out = ("%(host)-25s\t%(zone)-15s\n" % - {'host': 'host', 'zone': 'zone'}) - expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % - {'host': 'fake-host', - 'availability_zone': 'fake-az'}) - host_cmds = cinder_manage.HostCommands() - host_cmds.list() - - get_admin_context.assert_called_once_with() - service_get_all.assert_called_once_with(mock.sentinel.ctxt) - self.assertEqual(expected_out, fake_out.getvalue()) - - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.context.get_admin_context') - def test_host_commands_list_with_zone(self, get_admin_context, - service_get_all): - get_admin_context.return_value = mock.sentinel.ctxt - service_get_all.return_value = [{'host': 'fake-host', - 'availability_zone': 'fake-az1'}, - {'host': 'fake-host', - 'availability_zone': 'fake-az2'}] - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - expected_out = ("%(host)-25s\t%(zone)-15s\n" % - {'host': 'host', 'zone': 'zone'}) - expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % - {'host': 'fake-host', - 'availability_zone': 'fake-az1'}) - host_cmds = cinder_manage.HostCommands() - host_cmds.list(zone='fake-az1') - - get_admin_context.assert_called_once_with() - service_get_all.assert_called_once_with(mock.sentinel.ctxt) - self.assertEqual(expected_out, fake_out.getvalue()) - - @mock.patch('cinder.objects.base.CinderObjectSerializer') - @mock.patch('cinder.rpc.get_client') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.rpc.initialized', return_value=False) - @mock.patch('oslo_messaging.Target') - def test_volume_commands_init(self, messaging_target, rpc_initialized, - rpc_init, get_client, object_serializer): - mock_target = messaging_target.return_value - mock_rpc_client = get_client.return_value - - volume_cmds = cinder_manage.VolumeCommands() - rpc_client = volume_cmds._rpc_client() - - rpc_initialized.assert_called_once_with() - rpc_init.assert_called_once_with(CONF) - messaging_target.assert_called_once_with(topic=constants.VOLUME_TOPIC) - get_client.assert_called_once_with(mock_target, - serializer=object_serializer()) - self.assertEqual(mock_rpc_client, rpc_client) - - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - @mock.patch('cinder.context.get_admin_context') - @mock.patch('cinder.rpc.get_client') - @mock.patch('cinder.rpc.init') - def test_volume_commands_delete(self, rpc_init, get_client, - get_admin_context, volume_get): - ctxt = context.RequestContext('admin', 'fake', True) - get_admin_context.return_value = ctxt - mock_client = mock.MagicMock() - cctxt = mock.MagicMock() - mock_client.prepare.return_value = cctxt - get_client.return_value = mock_client - host = 'fake@host' - db_volume = {'host': host + '#pool1'} - volume = fake_volume.fake_db_volume(**db_volume) - volume_obj = fake_volume.fake_volume_obj(ctxt, **volume) - volume_id = volume['id'] - volume_get.return_value = volume - - volume_cmds = cinder_manage.VolumeCommands() - volume_cmds._client = mock_client - volume_cmds.delete(volume_id) - - volume_get.assert_called_once_with(ctxt, volume_id) - mock_client.prepare.assert_called_once_with(server=host) - cctxt.cast.assert_called_once_with(ctxt, 'delete_volume', - volume_id=volume['id'], - volume=volume_obj) - - @mock.patch('cinder.db.volume_destroy') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - @mock.patch('cinder.context.get_admin_context') - @mock.patch('cinder.rpc.init') - def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context, - volume_get, volume_destroy): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=True) - get_admin_context.return_value = ctxt - volume = fake_volume.fake_db_volume() - volume_id = volume['id'] - volume_get.return_value = volume - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - expected_out = ('Volume not yet assigned to host.\n' - 'Deleting volume from database and skipping' - ' rpc.\n') - volume_cmds = cinder_manage.VolumeCommands() - volume_cmds.delete(volume_id) - - get_admin_context.assert_called_once_with() - volume_get.assert_called_once_with(ctxt, volume_id) - self.assertTrue(volume_destroy.called) - admin_context = volume_destroy.call_args[0][0] - self.assertTrue(admin_context.is_admin) - self.assertEqual(expected_out, fake_out.getvalue()) - - @mock.patch('cinder.db.volume_destroy') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - @mock.patch('cinder.context.get_admin_context') - @mock.patch('cinder.rpc.init') - def test_volume_commands_delete_volume_in_use(self, rpc_init, - get_admin_context, - volume_get, volume_destroy): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - db_volume = {'status': 'in-use', 'host': 'fake-host'} - volume = fake_volume.fake_db_volume(**db_volume) - volume_id = volume['id'] - volume_get.return_value = volume - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - expected_out = ('Volume is in-use.\n' - 'Detach volume from instance and then try' - ' again.\n') - volume_cmds = cinder_manage.VolumeCommands() - volume_cmds.delete(volume_id) - - volume_get.assert_called_once_with(ctxt, volume_id) - self.assertEqual(expected_out, fake_out.getvalue()) - - def test_config_commands_list(self): - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - expected_out = '' - for key, value in CONF.items(): - expected_out += '%s = %s' % (key, value) + '\n' - - config_cmds = cinder_manage.ConfigCommands() - config_cmds.list() - - self.assertEqual(expected_out, fake_out.getvalue()) - - def test_config_commands_list_param(self): - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - CONF.set_override('host', 'fake') - expected_out = 'host = fake\n' - - config_cmds = cinder_manage.ConfigCommands() - config_cmds.list(param='host') - - self.assertEqual(expected_out, fake_out.getvalue()) - - def test_get_log_commands_no_errors(self): - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - CONF.set_override('log_dir', None) - expected_out = 'No errors in logfiles!\n' - - get_log_cmds = cinder_manage.GetLogCommands() - get_log_cmds.errors() - - self.assertEqual(expected_out, fake_out.getvalue()) - - @mock.patch('six.moves.builtins.open') - @mock.patch('os.listdir') - def test_get_log_commands_errors(self, listdir, open): - CONF.set_override('log_dir', 'fake-dir') - listdir.return_value = ['fake-error.log'] - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - open.return_value = six.StringIO( - '[ ERROR ] fake-error-message') - expected_out = ('fake-dir/fake-error.log:-\n' - 'Line 1 : [ ERROR ] fake-error-message\n') - - get_log_cmds = cinder_manage.GetLogCommands() - get_log_cmds.errors() - - self.assertEqual(expected_out, fake_out.getvalue()) - open.assert_called_once_with('fake-dir/fake-error.log', 'r') - listdir.assert_called_once_with(CONF.log_dir) - - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.exists') - def test_get_log_commands_syslog_no_log_file(self, path_exists, open): - path_exists.return_value = False - - get_log_cmds = cinder_manage.GetLogCommands() - with mock.patch('sys.stdout', new=six.StringIO()): - exit = self.assertRaises(SystemExit, get_log_cmds.syslog) - self.assertEqual(1, exit.code) - - path_exists.assert_any_call('/var/log/syslog') - path_exists.assert_any_call('/var/log/messages') - - @mock.patch('cinder.db.backup_get_all') - @mock.patch('cinder.context.get_admin_context') - def test_backup_commands_list(self, get_admin_context, backup_get_all): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - backup = {'id': fake.BACKUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake-host', - 'display_name': 'fake-display-name', - 'container': 'fake-container', - 'status': fields.BackupStatus.AVAILABLE, - 'size': 123, - 'object_count': 1, - 'volume_id': fake.VOLUME_ID, - } - backup_get_all.return_value = [backup] - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s' - '\t%-12s') - header = hdr % ('ID', - 'User ID', - 'Project ID', - 'Host', - 'Name', - 'Container', - 'Status', - 'Size', - 'Object Count') - res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d' - '\t%-12s') - resource = res % (backup['id'], - backup['user_id'], - backup['project_id'], - backup['host'], - backup['display_name'], - backup['container'], - backup['status'], - backup['size'], - 1) - expected_out = header + '\n' + resource + '\n' - - backup_cmds = cinder_manage.BackupCommands() - backup_cmds.list() - - get_admin_context.assert_called_once_with() - backup_get_all.assert_called_once_with(ctxt, None, None, None, - None, None, None) - self.assertEqual(expected_out, fake_out.getvalue()) - - @mock.patch('cinder.db.backup_update') - @mock.patch('cinder.db.backup_get_all_by_host') - @mock.patch('cinder.context.get_admin_context') - def test_update_backup_host(self, get_admin_context, - backup_get_by_host, - backup_update): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - backup = {'id': fake.BACKUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake-host', - 'display_name': 'fake-display-name', - 'container': 'fake-container', - 'status': fields.BackupStatus.AVAILABLE, - 'size': 123, - 'object_count': 1, - 'volume_id': fake.VOLUME_ID, - } - backup_get_by_host.return_value = [backup] - backup_cmds = cinder_manage.BackupCommands() - backup_cmds.update_backup_host('fake_host', 'fake_host2') - - get_admin_context.assert_called_once_with() - backup_get_by_host.assert_called_once_with(ctxt, 'fake_host') - backup_update.assert_called_once_with(ctxt, fake.BACKUP_ID, - {'host': 'fake_host2'}) - - @mock.patch('cinder.db.consistencygroup_update') - @mock.patch('cinder.db.consistencygroup_get_all') - @mock.patch('cinder.context.get_admin_context') - def test_update_consisgroup_host(self, get_admin_context, - consisgroup_get_all, - consisgroup_update): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - consisgroup = {'id': fake.CONSISTENCY_GROUP_ID, - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'host': 'fake-host', - 'status': fields.ConsistencyGroupStatus.AVAILABLE - } - consisgroup_get_all.return_value = [consisgroup] - consisgrup_cmds = cinder_manage.ConsistencyGroupCommands() - consisgrup_cmds.update_cg_host('fake_host', 'fake_host2') - - get_admin_context.assert_called_once_with() - consisgroup_get_all.assert_called_once_with( - ctxt, filters={'host': 'fake_host'}, limit=None, marker=None, - offset=None, sort_dirs=None, sort_keys=None) - consisgroup_update.assert_called_once_with( - ctxt, fake.CONSISTENCY_GROUP_ID, {'host': 'fake_host2'}) - - @mock.patch('cinder.objects.service.Service.is_up', - new_callable=mock.PropertyMock) - @mock.patch('cinder.db.service_get_all') - @mock.patch('cinder.context.get_admin_context') - def _test_service_commands_list(self, service, get_admin_context, - service_get_all, service_is_up): - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - service_get_all.return_value = [service] - service_is_up.return_value = True - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" - print_format = format % ('Binary', - 'Host', - 'Zone', - 'Status', - 'State', - 'Updated At', - 'RPC Version', - 'Object Version', - 'Cluster') - rpc_version = service['rpc_current_version'] - object_version = service['object_current_version'] - cluster = service.get('cluster_name', '') - service_format = format % (service['binary'], - service['host'].partition('.')[0], - service['availability_zone'], - 'enabled', - ':-)', - service['updated_at'], - rpc_version, - object_version, - cluster) - expected_out = print_format + '\n' + service_format + '\n' - - service_cmds = cinder_manage.ServiceCommands() - service_cmds.list() - - self.assertEqual(expected_out, fake_out.getvalue()) - get_admin_context.assert_called_with() - service_get_all.assert_called_with(ctxt) - - def test_service_commands_list(self): - service = {'binary': 'cinder-binary', - 'host': 'fake-host.fake-domain', - 'availability_zone': 'fake-zone', - 'updated_at': '2014-06-30 11:22:33', - 'disabled': False, - 'rpc_current_version': '1.1', - 'object_current_version': '1.1', - 'cluster_name': 'my_cluster'} - for binary in ('volume', 'scheduler', 'backup'): - service['binary'] = 'cinder-%s' % binary - self._test_service_commands_list(service) - - def test_service_commands_list_no_updated_at_or_cluster(self): - service = {'binary': 'cinder-binary', - 'host': 'fake-host.fake-domain', - 'availability_zone': 'fake-zone', - 'updated_at': None, - 'disabled': False, - 'rpc_current_version': '1.1', - 'object_current_version': '1.1'} - for binary in ('volume', 'scheduler', 'backup'): - service['binary'] = 'cinder-%s' % binary - self._test_service_commands_list(service) - - @ddt.data(('foobar', 'foobar'), ('-foo bar', 'foo bar'), - ('--foo bar', 'foo bar'), ('--foo-bar', 'foo_bar'), - ('---foo-bar', '_foo_bar')) - @ddt.unpack - def test_get_arg_string(self, arg, expected): - self.assertEqual(expected, cinder_manage.get_arg_string(arg)) - - def test_fetch_func_args(self): - @cinder_manage.args('--full-rename') - @cinder_manage.args('--different-dest', dest='my_dest') - @cinder_manage.args('current') - def my_func(): - pass - - expected = {'full_rename': mock.sentinel.full_rename, - 'my_dest': mock.sentinel.my_dest, - 'current': mock.sentinel.current} - - with mock.patch.object(cinder_manage, 'CONF') as mock_conf: - mock_conf.category = mock.Mock(**expected) - self.assertDictEqual(expected, - cinder_manage.fetch_func_args(my_func)) - - @mock.patch('cinder.context.get_admin_context') - @mock.patch('cinder.db.cluster_get_all') - def tests_cluster_commands_list(self, get_all_mock, get_admin_mock, - ): - now = timeutils.utcnow() - cluster = fake_cluster.fake_cluster_orm(num_hosts=4, num_down_hosts=2, - created_at=now, - last_heartbeat=now) - get_all_mock.return_value = [cluster] - - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_mock.return_value = ctxt - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - format_ = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" - print_format = format_ % ('Name', - 'Binary', - 'Status', - 'State', - 'Heartbeat', - 'Hosts', - 'Down Hosts', - 'Updated At') - cluster_format = format_ % (cluster.name, cluster.binary, - 'enabled', ':-)', - cluster.last_heartbeat, - cluster.num_hosts, - cluster.num_down_hosts, - None) - expected_out = print_format + '\n' + cluster_format + '\n' - - cluster_cmds = cinder_manage.ClusterCommands() - cluster_cmds.list() - - self.assertEqual(expected_out, fake_out.getvalue()) - get_admin_mock.assert_called_with() - get_all_mock.assert_called_with(ctxt, is_up=None, - get_services=False, - services_summary=True, - read_deleted='no') - - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_remove_not_found(self, admin_ctxt_mock, - cluster_get_mock): - cluster_get_mock.side_effect = exception.ClusterNotFound(id=1) - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.remove(False, 'abinary', 'acluster') - self.assertEqual(2, exit) - cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, - None, name='acluster', - binary='abinary', - get_services=False) - - @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_remove_fail_has_hosts(self, admin_ctxt_mock, - cluster_get_mock, - cluster_destroy_mock, - service_destroy_mock): - cluster = fake_cluster.fake_cluster_ovo(mock.Mock()) - cluster_get_mock.return_value = cluster - cluster_destroy_mock.side_effect = exception.ClusterHasHosts(id=1) - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.remove(False, 'abinary', 'acluster') - self.assertEqual(2, exit) - cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, - None, name='acluster', - binary='abinary', - get_services=False) - cluster_destroy_mock.assert_called_once_with( - admin_ctxt_mock.return_value.elevated.return_value, cluster.id) - service_destroy_mock.assert_not_called() - - @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_remove_success_no_hosts(self, admin_ctxt_mock, - cluster_get_mock, - cluster_destroy_mock, - service_destroy_mock): - cluster = fake_cluster.fake_cluster_orm() - cluster_get_mock.return_value = cluster - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.remove(False, 'abinary', 'acluster') - self.assertIsNone(exit) - cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, - None, name='acluster', - binary='abinary', - get_services=False) - cluster_destroy_mock.assert_called_once_with( - admin_ctxt_mock.return_value.elevated.return_value, cluster.id) - service_destroy_mock.assert_not_called() - - @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) - @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_remove_recursive(self, admin_ctxt_mock, - cluster_get_mock, - cluster_destroy_mock, - service_destroy_mock): - cluster = fake_cluster.fake_cluster_orm() - cluster.services = [fake_service.fake_service_orm()] - cluster_get_mock.return_value = cluster - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.remove(True, 'abinary', 'acluster') - self.assertIsNone(exit) - cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, - None, name='acluster', - binary='abinary', - get_services=True) - cluster_destroy_mock.assert_called_once_with( - admin_ctxt_mock.return_value.elevated.return_value, cluster.id) - service_destroy_mock.assert_called_once_with( - admin_ctxt_mock.return_value.elevated.return_value, - cluster.services[0]['id']) - - @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', - auto_specs=True, return_value=1) - @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', - auto_specs=True, return_value=2) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_rename(self, admin_ctxt_mock, - volume_include_mock, cg_include_mock): - """Test that cluster rename changes volumes and cgs.""" - current_cluster_name = mock.sentinel.old_cluster_name - new_cluster_name = mock.sentinel.new_cluster_name - partial = mock.sentinel.partial - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.rename(partial, current_cluster_name, - new_cluster_name) - - self.assertIsNone(exit) - volume_include_mock.assert_called_once_with( - admin_ctxt_mock.return_value, new_cluster_name, partial, - cluster_name=current_cluster_name) - cg_include_mock.assert_called_once_with( - admin_ctxt_mock.return_value, new_cluster_name, partial, - cluster_name=current_cluster_name) - - @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', - auto_specs=True, return_value=0) - @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', - auto_specs=True, return_value=0) - @mock.patch('cinder.context.get_admin_context') - def test_cluster_commands_rename_no_changes(self, admin_ctxt_mock, - volume_include_mock, - cg_include_mock): - """Test that we return an error when cluster rename has no effect.""" - cluster_commands = cinder_manage.ClusterCommands() - exit = cluster_commands.rename(False, 'cluster', 'new_cluster') - self.assertEqual(2, exit) - - @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') - def test_main_argv_lt_2(self, register_cli_opt): - script_name = 'cinder-manage' - sys.argv = [script_name] - CONF(sys.argv[1:], project='cinder', version=version.version_string()) - - with mock.patch('sys.stdout', new=six.StringIO()): - exit = self.assertRaises(SystemExit, cinder_manage.main) - self.assertTrue(register_cli_opt.called) - self.assertEqual(2, exit.code) - - @mock.patch('oslo_config.cfg.ConfigOpts.__call__') - @mock.patch('oslo_log.log.setup') - @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') - def test_main_sudo_failed(self, register_cli_opt, log_setup, - config_opts_call): - script_name = 'cinder-manage' - sys.argv = [script_name, 'fake_category', 'fake_action'] - config_opts_call.side_effect = cfg.ConfigFilesNotFoundError( - mock.sentinel._namespace) - - with mock.patch('sys.stdout', new=six.StringIO()): - exit = self.assertRaises(SystemExit, cinder_manage.main) - - self.assertTrue(register_cli_opt.called) - config_opts_call.assert_called_once_with( - sys.argv[1:], project='cinder', - version=version.version_string()) - self.assertFalse(log_setup.called) - self.assertEqual(2, exit.code) - - @mock.patch('oslo_config.cfg.ConfigOpts.__call__') - @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') - def test_main(self, register_cli_opt, config_opts_call): - script_name = 'cinder-manage' - sys.argv = [script_name, 'config', 'list'] - action_fn = mock.MagicMock() - CONF.category = mock.MagicMock(action_fn=action_fn) - - cinder_manage.main() - - self.assertTrue(register_cli_opt.called) - config_opts_call.assert_called_once_with( - sys.argv[1:], project='cinder', version=version.version_string()) - self.assertTrue(action_fn.called) - - @mock.patch('oslo_config.cfg.ConfigOpts.__call__') - @mock.patch('oslo_log.log.setup') - @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') - def test_main_invalid_dir(self, register_cli_opt, log_setup, - config_opts_call): - script_name = 'cinder-manage' - fake_dir = 'fake-dir' - invalid_dir = 'Invalid directory:' - sys.argv = [script_name, '--config-dir', fake_dir] - config_opts_call.side_effect = cfg.ConfigDirNotFoundError(fake_dir) - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - exit = self.assertRaises(SystemExit, cinder_manage.main) - self.assertTrue(register_cli_opt.called) - config_opts_call.assert_called_once_with( - sys.argv[1:], project='cinder', - version=version.version_string()) - self.assertIn(invalid_dir, fake_out.getvalue()) - self.assertIn(fake_dir, fake_out.getvalue()) - self.assertFalse(log_setup.called) - self.assertEqual(2, exit.code) - - @mock.patch('cinder.db') - def test_remove_service_failure(self, mock_db): - mock_db.service_destroy.side_effect = SystemExit(1) - service_commands = cinder_manage.ServiceCommands() - exit = service_commands.remove('abinary', 'ahost') - self.assertEqual(2, exit) - - @mock.patch('cinder.db.service_destroy') - @mock.patch('cinder.db.service_get', return_value = {'id': '12'}) - def test_remove_service_success(self, mock_get_by_args, - mock_service_destroy): - service_commands = cinder_manage.ServiceCommands() - self.assertIsNone(service_commands.remove('abinary', 'ahost')) - - -class TestCinderRtstoolCmd(test.TestCase): - - def setUp(self): - super(TestCinderRtstoolCmd, self).setUp() - sys.argv = ['cinder-rtstool'] - - self.INITIATOR_IQN = 'iqn.2015.12.com.example.openstack.i:UNIT1' - self.TARGET_IQN = 'iqn.2015.12.com.example.openstack.i:TARGET1' - - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_create_rtslib_error(self, rtsroot): - rtsroot.side_effect = rtslib_fb.utils.RTSLibError() - - with mock.patch('sys.stdout', new=six.StringIO()): - self.assertRaises(rtslib_fb.utils.RTSLibError, - cinder_rtstool.create, - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled) - - def _test_create_rtslib_error_network_portal(self, ip): - with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ - mock.patch.object(rtslib_fb, 'LUN') as lun, \ - mock.patch.object(rtslib_fb, 'TPG') as tpg, \ - mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \ - mock.patch.object(rtslib_fb, 'Target') as target, \ - mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ - block_storage_object, \ - mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: - root_new = mock.MagicMock(storage_objects=mock.MagicMock()) - rts_root.return_value = root_new - block_storage_object.return_value = mock.sentinel.so_new - target.return_value = mock.sentinel.target_new - fabric_module.return_value = mock.sentinel.fabric_new - tpg_new = tpg.return_value - lun.return_value = mock.sentinel.lun_new - - if ip == '0.0.0.0': - network_portal.side_effect = rtslib_fb.utils.RTSLibError() - self.assertRaises(rtslib_fb.utils.RTSLibError, - cinder_rtstool.create, - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled) - else: - cinder_rtstool.create(mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled) - - rts_root.assert_called_once_with() - block_storage_object.assert_called_once_with( - name=mock.sentinel.name, dev=mock.sentinel.backing_device) - target.assert_called_once_with(mock.sentinel.fabric_new, - mock.sentinel.name, 'create') - fabric_module.assert_called_once_with('iscsi') - tpg.assert_called_once_with(mock.sentinel.target_new, - mode='create') - tpg_new.set_attribute.assert_called_once_with('authentication', - '1') - lun.assert_called_once_with(tpg_new, - storage_object=mock.sentinel.so_new) - self.assertEqual(1, tpg_new.enable) - - if ip == '::0': - ip = '[::0]' - - network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') - - def test_create_rtslib_error_network_portal_ipv4(self): - with mock.patch('sys.stdout', new=six.StringIO()): - self._test_create_rtslib_error_network_portal('0.0.0.0') - - def test_create_rtslib_error_network_portal_ipv6(self): - with mock.patch('sys.stdout', new=six.StringIO()): - self._test_create_rtslib_error_network_portal('::0') - - def _test_create(self, ip): - with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ - mock.patch.object(rtslib_fb, 'LUN') as lun, \ - mock.patch.object(rtslib_fb, 'TPG') as tpg, \ - mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \ - mock.patch.object(rtslib_fb, 'Target') as target, \ - mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ - block_storage_object, \ - mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: - root_new = mock.MagicMock(storage_objects=mock.MagicMock()) - rts_root.return_value = root_new - block_storage_object.return_value = mock.sentinel.so_new - target.return_value = mock.sentinel.target_new - fabric_module.return_value = mock.sentinel.fabric_new - tpg_new = tpg.return_value - lun.return_value = mock.sentinel.lun_new - - cinder_rtstool.create(mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled) - - rts_root.assert_called_once_with() - block_storage_object.assert_called_once_with( - name=mock.sentinel.name, dev=mock.sentinel.backing_device) - target.assert_called_once_with(mock.sentinel.fabric_new, - mock.sentinel.name, 'create') - fabric_module.assert_called_once_with('iscsi') - tpg.assert_called_once_with(mock.sentinel.target_new, - mode='create') - tpg_new.set_attribute.assert_called_once_with('authentication', - '1') - lun.assert_called_once_with(tpg_new, - storage_object=mock.sentinel.so_new) - self.assertEqual(1, tpg_new.enable) - - if ip == '::0': - ip = '[::0]' - - network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') - - def test_create_ipv4(self): - self._test_create('0.0.0.0') - - def test_create_ipv6(self): - self._test_create('::0') - - def _test_create_ips_and_port(self, mock_rtslib, port, ips, expected_ips): - mock_rtslib.BlockStorageObject.return_value = mock.sentinel.bso - mock_rtslib.Target.return_value = mock.sentinel.target_new - mock_rtslib.FabricModule.return_value = mock.sentinel.iscsi_fabric - tpg_new = mock_rtslib.TPG.return_value - - cinder_rtstool.create(mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled, - portals_ips=ips, - portals_port=port) - - mock_rtslib.Target.assert_called_once_with(mock.sentinel.iscsi_fabric, - mock.sentinel.name, - 'create') - mock_rtslib.TPG.assert_called_once_with(mock.sentinel.target_new, - mode='create') - mock_rtslib.LUN.assert_called_once_with( - tpg_new, - storage_object=mock.sentinel.bso) - - mock_rtslib.NetworkPortal.assert_has_calls( - map(lambda ip: mock.call(tpg_new, ip, port, mode='any'), - expected_ips), any_order=True - ) - - @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) - def test_create_ips_and_port_ipv4(self, mock_rtslib): - ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4'] - port = 3261 - self._test_create_ips_and_port(mock_rtslib, port, ips, ips) - - @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) - def test_create_ips_and_port_ipv6(self, mock_rtslib): - ips = ['fe80::fc16:3eff:fecb:ad2f'] - expected_ips = ['[fe80::fc16:3eff:fecb:ad2f]'] - port = 3261 - self._test_create_ips_and_port(mock_rtslib, port, ips, - expected_ips) - - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator_rtslib_error(self, rtsroot): - rtsroot.side_effect = rtslib_fb.utils.RTSLibError() - - with mock.patch('sys.stdout', new=six.StringIO()): - self.assertRaises(rtslib_fb.utils.RTSLibError, - cinder_rtstool.add_initiator, - mock.sentinel.target_iqn, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator_rtstool_error(self, rtsroot): - rtsroot.targets.return_value = {} - - self.assertRaises(cinder_rtstool.RtstoolError, - cinder_rtstool.add_initiator, - mock.sentinel.target_iqn, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun): - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': self.INITIATOR_IQN}] - acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) - tpg = mock.MagicMock(node_acls=[acl]) - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=self.TARGET_IQN) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - cinder_rtstool.add_initiator(self.TARGET_IQN, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - self.assertFalse(node_acl.called) - self.assertFalse(mapped_lun.called) - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator_acl_exists_case_1(self, - rtsroot, - node_acl, - mapped_lun): - """Ensure initiator iqns are handled in a case-insensitive manner.""" - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': self.INITIATOR_IQN.lower()}] - acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) - tpg = mock.MagicMock(node_acls=[acl]) - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - cinder_rtstool.add_initiator(target_iqn, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - self.assertFalse(node_acl.called) - self.assertFalse(mapped_lun.called) - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator_acl_exists_case_2(self, - rtsroot, - node_acl, - mapped_lun): - """Ensure initiator iqns are handled in a case-insensitive manner.""" - iqn_lower = self.INITIATOR_IQN.lower() - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': self.INITIATOR_IQN}] - acl = mock.MagicMock(node_wwn=iqn_lower) - tpg = mock.MagicMock(node_acls=[acl]) - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - cinder_rtstool.add_initiator(target_iqn, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - self.assertFalse(node_acl.called) - self.assertFalse(mapped_lun.called) - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_add_initiator(self, rtsroot, node_acl, mapped_lun): - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': self.INITIATOR_IQN}] - tpg = mock.MagicMock() - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid, - chap_password=mock.sentinel.password) - node_acl.return_value = acl_new - - cinder_rtstool.add_initiator(target_iqn, - self.INITIATOR_IQN, - mock.sentinel.userid, - mock.sentinel.password) - node_acl.assert_called_once_with(tpg, - self.INITIATOR_IQN, - mode='create') - mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0) - - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_get_targets(self, rtsroot): - target = mock.MagicMock() - target.dump.return_value = {'wwn': 'fake-wwn'} - rtsroot.return_value = mock.MagicMock(targets=[target]) - - with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: - cinder_rtstool.get_targets() - - self.assertEqual(str(target.wwn), fake_out.getvalue().strip()) - - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_delete(self, rtsroot): - target = mock.MagicMock(wwn=mock.sentinel.iqn) - storage_object = mock.MagicMock() - name = mock.PropertyMock(return_value=mock.sentinel.iqn) - type(storage_object).name = name - rtsroot.return_value = mock.MagicMock( - targets=[target], storage_objects=[storage_object]) - - cinder_rtstool.delete(mock.sentinel.iqn) - - target.delete.assert_called_once_with() - storage_object.delete.assert_called_once_with() - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_delete_initiator(self, rtsroot, node_acl, mapped_lun): - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': self.INITIATOR_IQN}] - acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) - tpg = mock.MagicMock(node_acls=[acl]) - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - cinder_rtstool.delete_initiator(target_iqn, - self.INITIATOR_IQN) - - @mock.patch.object(rtslib_fb, 'MappedLUN') - @mock.patch.object(rtslib_fb, 'NodeACL') - @mock.patch.object(rtslib_fb.root, 'RTSRoot') - def test_delete_initiator_case(self, rtsroot, node_acl, mapped_lun): - """Ensure iqns are handled in a case-insensitive manner.""" - initiator_iqn_lower = self.INITIATOR_IQN.lower() - target_iqn = mock.MagicMock() - target_iqn.tpgs.return_value = \ - [{'node_acls': initiator_iqn_lower}] - acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) - tpg = mock.MagicMock(node_acls=[acl]) - tpgs = iter([tpg]) - target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) - rtsroot.return_value = mock.MagicMock(targets=[target]) - - cinder_rtstool.delete_initiator(target_iqn, - self.INITIATOR_IQN) - - @mock.patch.object(cinder_rtstool, 'os', autospec=True) - @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) - def test_save_with_filename(self, mock_rtslib, mock_os): - filename = mock.sentinel.filename - cinder_rtstool.save_to_file(filename) - rtsroot = mock_rtslib.root.RTSRoot - rtsroot.assert_called_once_with() - self.assertEqual(0, mock_os.path.dirname.call_count) - self.assertEqual(0, mock_os.path.exists.call_count) - self.assertEqual(0, mock_os.makedirs.call_count) - rtsroot.return_value.save_to_file.assert_called_once_with(filename) - - @mock.patch.object(cinder_rtstool, 'os', - **{'path.exists.return_value': True, - 'path.dirname.return_value': mock.sentinel.dirname}) - @mock.patch.object(cinder_rtstool, 'rtslib_fb', - **{'root.default_save_file': mock.sentinel.filename}) - def test_save(self, mock_rtslib, mock_os): - """Test that we check path exists with default file.""" - cinder_rtstool.save_to_file(None) - rtsroot = mock_rtslib.root.RTSRoot - rtsroot.assert_called_once_with() - rtsroot.return_value.save_to_file.assert_called_once_with( - mock.sentinel.filename) - mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) - mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) - self.assertEqual(0, mock_os.makedirs.call_count) - - @mock.patch.object(cinder_rtstool, 'os', - **{'path.exists.return_value': False, - 'path.dirname.return_value': mock.sentinel.dirname}) - @mock.patch.object(cinder_rtstool, 'rtslib_fb', - **{'root.default_save_file': mock.sentinel.filename}) - def test_save_no_targetcli(self, mock_rtslib, mock_os): - """Test that we create path if it doesn't exist with default file.""" - cinder_rtstool.save_to_file(None) - rtsroot = mock_rtslib.root.RTSRoot - rtsroot.assert_called_once_with() - rtsroot.return_value.save_to_file.assert_called_once_with( - mock.sentinel.filename) - mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) - mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) - mock_os.makedirs.assert_called_once_with(mock.sentinel.dirname, 0o755) - - @mock.patch.object(cinder_rtstool, 'os', autospec=True) - @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) - def test_save_error_creating_dir(self, mock_rtslib, mock_os): - mock_os.path.dirname.return_value = 'dirname' - mock_os.path.exists.return_value = False - mock_os.makedirs.side_effect = OSError('error') - - regexp = (u'targetcli not installed and could not create default ' - 'directory \(dirname\): error$') - self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp, - cinder_rtstool.save_to_file, None) - - @mock.patch.object(cinder_rtstool, 'os', autospec=True) - @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) - def test_save_error_saving(self, mock_rtslib, mock_os): - save = mock_rtslib.root.RTSRoot.return_value.save_to_file - save.side_effect = OSError('error') - regexp = u'Could not save configuration to myfile: error' - self.assertRaisesRegexp(cinder_rtstool.RtstoolError, regexp, - cinder_rtstool.save_to_file, 'myfile') - - @mock.patch.object(cinder_rtstool, 'rtslib_fb', - **{'root.default_save_file': mock.sentinel.filename}) - def test_restore(self, mock_rtslib): - """Test that we restore target configuration with default file.""" - cinder_rtstool.restore_from_file(None) - rtsroot = mock_rtslib.root.RTSRoot - rtsroot.assert_called_once_with() - rtsroot.return_value.restore_from_file.assert_called_once_with( - mock.sentinel.filename) - - @mock.patch.object(cinder_rtstool, 'rtslib_fb') - def test_restore_with_file(self, mock_rtslib): - """Test that we restore target configuration with specified file.""" - cinder_rtstool.restore_from_file('saved_file') - rtsroot = mock_rtslib.root.RTSRoot - rtsroot.return_value.restore_from_file.assert_called_once_with( - 'saved_file') - - @mock.patch('cinder.cmd.rtstool.restore_from_file') - def test_restore_error(self, restore_from_file): - """Test that we fail to restore target configuration.""" - restore_from_file.side_effect = OSError - self.assertRaises(OSError, - cinder_rtstool.restore_from_file, - mock.sentinel.filename) - - def test_usage(self): - with mock.patch('sys.stdout', new=six.StringIO()): - exit = self.assertRaises(SystemExit, cinder_rtstool.usage) - self.assertEqual(1, exit.code) - - @mock.patch('cinder.cmd.rtstool.usage') - def test_main_argc_lt_2(self, usage): - usage.side_effect = SystemExit(1) - sys.argv = ['cinder-rtstool'] - - exit = self.assertRaises(SystemExit, cinder_rtstool.usage) - - self.assertTrue(usage.called) - self.assertEqual(1, exit.code) - - def test_main_create_argv_lt_6(self): - sys.argv = ['cinder-rtstool', 'create'] - self._test_main_check_argv() - - def test_main_create_argv_gt_7(self): - sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2', - 'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6'] - self._test_main_check_argv() - - def test_main_add_initiator_argv_lt_6(self): - sys.argv = ['cinder-rtstool', 'add-initiator'] - self._test_main_check_argv() - - def test_main_delete_argv_lt_3(self): - sys.argv = ['cinder-rtstool', 'delete'] - self._test_main_check_argv() - - def test_main_no_action(self): - sys.argv = ['cinder-rtstool'] - self._test_main_check_argv() - - def _test_main_check_argv(self): - with mock.patch('cinder.cmd.rtstool.usage') as usage: - usage.side_effect = SystemExit(1) - sys.argv = ['cinder-rtstool', 'create'] - - exit = self.assertRaises(SystemExit, cinder_rtstool.main) - - self.assertTrue(usage.called) - self.assertEqual(1, exit.code) - - @mock.patch('cinder.cmd.rtstool.save_to_file') - def test_main_save(self, mock_save): - sys.argv = ['cinder-rtstool', - 'save'] - rc = cinder_rtstool.main() - mock_save.assert_called_once_with(None) - self.assertEqual(0, rc) - - @mock.patch('cinder.cmd.rtstool.save_to_file') - def test_main_save_with_file(self, mock_save): - sys.argv = ['cinder-rtstool', - 'save', - mock.sentinel.filename] - rc = cinder_rtstool.main() - mock_save.assert_called_once_with(mock.sentinel.filename) - self.assertEqual(0, rc) - - def test_main_create(self): - with mock.patch('cinder.cmd.rtstool.create') as create: - sys.argv = ['cinder-rtstool', - 'create', - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled, - str(mock.sentinel.initiator_iqns)] - - rc = cinder_rtstool.main() - - create.assert_called_once_with( - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled, - initiator_iqns=str(mock.sentinel.initiator_iqns)) - self.assertEqual(0, rc) - - @mock.patch('cinder.cmd.rtstool.create') - def test_main_create_ips_and_port(self, mock_create): - sys.argv = ['cinder-rtstool', - 'create', - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled, - str(mock.sentinel.initiator_iqns), - '-p3261', - '-aip1,ip2,ip3'] - - rc = cinder_rtstool.main() - - mock_create.assert_called_once_with( - mock.sentinel.backing_device, - mock.sentinel.name, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.iser_enabled, - initiator_iqns=str(mock.sentinel.initiator_iqns), - portals_ips=['ip1', 'ip2', 'ip3'], - portals_port=3261) - self.assertEqual(0, rc) - - def test_main_add_initiator(self): - with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator: - sys.argv = ['cinder-rtstool', - 'add-initiator', - mock.sentinel.target_iqn, - mock.sentinel.userid, - mock.sentinel.password, - mock.sentinel.initiator_iqns] - - rc = cinder_rtstool.main() - - add_initiator.assert_called_once_with( - mock.sentinel.target_iqn, mock.sentinel.initiator_iqns, - mock.sentinel.userid, mock.sentinel.password) - self.assertEqual(0, rc) - - def test_main_get_targets(self): - with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets: - sys.argv = ['cinder-rtstool', 'get-targets'] - - rc = cinder_rtstool.main() - - get_targets.assert_called_once_with() - self.assertEqual(0, rc) - - def test_main_delete(self): - with mock.patch('cinder.cmd.rtstool.delete') as delete: - sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn] - - rc = cinder_rtstool.main() - - delete.assert_called_once_with(mock.sentinel.iqn) - self.assertEqual(0, rc) - - @mock.patch.object(cinder_rtstool, 'verify_rtslib') - def test_main_verify(self, mock_verify_rtslib): - sys.argv = ['cinder-rtstool', 'verify'] - - rc = cinder_rtstool.main() - - mock_verify_rtslib.assert_called_once_with() - self.assertEqual(0, rc) - - -class TestCinderVolumeUsageAuditCmd(test.TestCase): - - def setUp(self): - super(TestCinderVolumeUsageAuditCmd, self).setUp() - sys.argv = ['cinder-volume-usage-audit'] - - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('oslo_log.log.getLogger') - @mock.patch('oslo_log.log.setup') - @mock.patch('cinder.context.get_admin_context') - def test_main_time_error(self, get_admin_context, log_setup, get_logger, - version_string, rpc_init, - last_completed_audit_period): - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2013-01-01 01:00:00') - last_completed_audit_period.return_value = (mock.sentinel.begin, - mock.sentinel.end) - - exit = self.assertRaises(SystemExit, volume_usage_audit.main) - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - get_logger.assert_called_once_with('cinder') - self.assertEqual(-1, exit.code) - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('oslo_log.log.getLogger') - @mock.patch('oslo_log.log.setup') - @mock.patch('cinder.context.get_admin_context') - def test_main_send_create_volume_error(self, get_admin_context, log_setup, - get_logger, version_string, - rpc_init, - last_completed_audit_period, - volume_get_all_active_by_window, - notify_about_volume_usage): - CONF.set_override('send_actions', True) - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2014-02-02 02:00:00') - begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.Utc()) - end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.Utc()) - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - last_completed_audit_period.return_value = (begin, end) - volume1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, - created_at=volume1_created, - deleted_at=volume1_deleted) - volume_get_all_active_by_window.return_value = [volume1] - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - local_extra_info = { - 'audit_period_beginning': str(volume1.created_at), - 'audit_period_ending': str(volume1.created_at), - } - - def _notify_about_volume_usage(*args, **kwargs): - if 'create.end' in args: - raise Exception() - else: - pass - - notify_about_volume_usage.side_effect = _notify_about_volume_usage - - volume_usage_audit.main() - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - get_logger.assert_called_once_with('cinder') - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, - end) - notify_about_volume_usage.assert_has_calls([ - mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), - mock.call(ctxt, volume1, 'create.start', - extra_usage_info=local_extra_info), - mock.call(ctxt, volume1, 'create.end', - extra_usage_info=local_extra_info) - ]) - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('oslo_log.log.getLogger') - @mock.patch('oslo_log.log.setup') - @mock.patch('cinder.context.get_admin_context') - def test_main_send_delete_volume_error(self, get_admin_context, log_setup, - get_logger, version_string, - rpc_init, - last_completed_audit_period, - volume_get_all_active_by_window, - notify_about_volume_usage): - CONF.set_override('send_actions', True) - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2014-02-02 02:00:00') - begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.Utc()) - end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.Utc()) - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - last_completed_audit_period.return_value = (begin, end) - volume1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, - created_at=volume1_created, - deleted_at=volume1_deleted) - volume_get_all_active_by_window.return_value = [volume1] - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - local_extra_info_create = { - 'audit_period_beginning': str(volume1.created_at), - 'audit_period_ending': str(volume1.created_at), - } - local_extra_info_delete = { - 'audit_period_beginning': str(volume1.deleted_at), - 'audit_period_ending': str(volume1.deleted_at), - } - - def _notify_about_volume_usage(*args, **kwargs): - if 'delete.end' in args: - raise Exception() - else: - pass - - notify_about_volume_usage.side_effect = _notify_about_volume_usage - - volume_usage_audit.main() - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - get_logger.assert_called_once_with('cinder') - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, - end) - notify_about_volume_usage.assert_has_calls([ - mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), - mock.call(ctxt, volume1, 'create.start', - extra_usage_info=local_extra_info_create), - mock.call(ctxt, volume1, 'create.end', - extra_usage_info=local_extra_info_create), - mock.call(ctxt, volume1, 'delete.start', - extra_usage_info=local_extra_info_delete), - mock.call(ctxt, volume1, 'delete.end', - extra_usage_info=local_extra_info_delete) - ]) - - @mock.patch('cinder.volume.utils.notify_about_snapshot_usage') - @mock.patch('cinder.objects.snapshot.SnapshotList.' - 'get_all_active_by_window') - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('oslo_log.log.getLogger') - @mock.patch('oslo_log.log.setup') - @mock.patch('cinder.context.get_admin_context') - def test_main_send_snapshot_error(self, get_admin_context, - log_setup, get_logger, - version_string, rpc_init, - last_completed_audit_period, - volume_get_all_active_by_window, - notify_about_volume_usage, - snapshot_get_all_active_by_window, - notify_about_snapshot_usage): - CONF.set_override('send_actions', True) - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2014-02-02 02:00:00') - begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.Utc()) - end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.Utc()) - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - last_completed_audit_period.return_value = (begin, end) - snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - snapshot1 = mock.MagicMock(id=fake.VOLUME_ID, - project_id=fake.PROJECT_ID, - created_at=snapshot1_created, - deleted_at=snapshot1_deleted) - volume_get_all_active_by_window.return_value = [] - snapshot_get_all_active_by_window.return_value = [snapshot1] - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - local_extra_info_create = { - 'audit_period_beginning': str(snapshot1.created_at), - 'audit_period_ending': str(snapshot1.created_at), - } - local_extra_info_delete = { - 'audit_period_beginning': str(snapshot1.deleted_at), - 'audit_period_ending': str(snapshot1.deleted_at), - } - - def _notify_about_snapshot_usage(*args, **kwargs): - # notify_about_snapshot_usage raises an exception, but does not - # block - raise Exception() - - notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage - - volume_usage_audit.main() - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - get_logger.assert_called_once_with('cinder') - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, - end) - self.assertFalse(notify_about_volume_usage.called) - notify_about_snapshot_usage.assert_has_calls([ - mock.call(ctxt, snapshot1, 'exists', extra_info), - mock.call(ctxt, snapshot1, 'create.start', - extra_usage_info=local_extra_info_create), - mock.call(ctxt, snapshot1, 'delete.start', - extra_usage_info=local_extra_info_delete) - ]) - - @mock.patch('cinder.volume.utils.notify_about_backup_usage') - @mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window') - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('cinder.context.get_admin_context') - def test_main_send_backup_error(self, get_admin_context, - version_string, rpc_init, - last_completed_audit_period, - volume_get_all_active_by_window, - notify_about_volume_usage, - backup_get_all_active_by_window, - notify_about_backup_usage): - CONF.set_override('send_actions', True) - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2014-02-02 02:00:00') - begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.Utc()) - end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.Utc()) - ctxt = context.RequestContext('fake-user', 'fake-project') - get_admin_context.return_value = ctxt - last_completed_audit_period.return_value = (begin, end) - backup1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - backup1 = mock.MagicMock(id=fake.BACKUP_ID, - project_id=fake.PROJECT_ID, - created_at=backup1_created, - deleted_at=backup1_deleted) - volume_get_all_active_by_window.return_value = [] - backup_get_all_active_by_window.return_value = [backup1] - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - local_extra_info_create = { - 'audit_period_beginning': str(backup1.created_at), - 'audit_period_ending': str(backup1.created_at), - } - local_extra_info_delete = { - 'audit_period_beginning': str(backup1.deleted_at), - 'audit_period_ending': str(backup1.deleted_at), - } - - notify_about_backup_usage.side_effect = Exception() - - volume_usage_audit.main() - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - volume_get_all_active_by_window.assert_called_once_with(ctxt, - begin, end) - self.assertFalse(notify_about_volume_usage.called) - notify_about_backup_usage.assert_any_call(ctxt, backup1, 'exists', - extra_info) - notify_about_backup_usage.assert_any_call( - ctxt, backup1, 'create.start', - extra_usage_info=local_extra_info_create) - notify_about_backup_usage.assert_any_call( - ctxt, backup1, 'delete.start', - extra_usage_info=local_extra_info_delete) - - @mock.patch('cinder.volume.utils.notify_about_backup_usage') - @mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window') - @mock.patch('cinder.volume.utils.notify_about_snapshot_usage') - @mock.patch('cinder.objects.snapshot.SnapshotList.' - 'get_all_active_by_window') - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') - @mock.patch('cinder.utils.last_completed_audit_period') - @mock.patch('cinder.rpc.init') - @mock.patch('cinder.version.version_string') - @mock.patch('oslo_log.log.getLogger') - @mock.patch('oslo_log.log.setup') - @mock.patch('cinder.context.get_admin_context') - def test_main(self, get_admin_context, log_setup, get_logger, - version_string, rpc_init, last_completed_audit_period, - volume_get_all_active_by_window, notify_about_volume_usage, - snapshot_get_all_active_by_window, - notify_about_snapshot_usage, backup_get_all_active_by_window, - notify_about_backup_usage): - CONF.set_override('send_actions', True) - CONF.set_override('start_time', '2014-01-01 01:00:00') - CONF.set_override('end_time', '2014-02-02 02:00:00') - begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.Utc()) - end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.Utc()) - ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) - get_admin_context.return_value = ctxt - last_completed_audit_period.return_value = (begin, end) - - volume1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, - created_at=volume1_created, - deleted_at=volume1_deleted) - volume_get_all_active_by_window.return_value = [volume1] - extra_info = { - 'audit_period_beginning': str(begin), - 'audit_period_ending': str(end), - } - extra_info_volume_create = { - 'audit_period_beginning': str(volume1.created_at), - 'audit_period_ending': str(volume1.created_at), - } - extra_info_volume_delete = { - 'audit_period_beginning': str(volume1.deleted_at), - 'audit_period_ending': str(volume1.deleted_at), - } - - snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - snapshot1 = mock.MagicMock(id=fake.VOLUME_ID, - project_id=fake.PROJECT_ID, - created_at=snapshot1_created, - deleted_at=snapshot1_deleted) - snapshot_get_all_active_by_window.return_value = [snapshot1] - extra_info_snapshot_create = { - 'audit_period_beginning': str(snapshot1.created_at), - 'audit_period_ending': str(snapshot1.created_at), - } - extra_info_snapshot_delete = { - 'audit_period_beginning': str(snapshot1.deleted_at), - 'audit_period_ending': str(snapshot1.deleted_at), - } - - backup1_created = datetime.datetime(2014, 1, 1, 2, 0, - tzinfo=iso8601.Utc()) - backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0, - tzinfo=iso8601.Utc()) - backup1 = mock.MagicMock(id=fake.BACKUP_ID, - project_id=fake.PROJECT_ID, - created_at=backup1_created, - deleted_at=backup1_deleted) - backup_get_all_active_by_window.return_value = [backup1] - extra_info_backup_create = { - 'audit_period_beginning': str(backup1.created_at), - 'audit_period_ending': str(backup1.created_at), - } - extra_info_backup_delete = { - 'audit_period_beginning': str(backup1.deleted_at), - 'audit_period_ending': str(backup1.deleted_at), - } - - volume_usage_audit.main() - - get_admin_context.assert_called_once_with() - self.assertEqual('cinder', CONF.project) - self.assertEqual(CONF.version, version.version_string()) - log_setup.assert_called_once_with(CONF, "cinder") - get_logger.assert_called_once_with('cinder') - rpc_init.assert_called_once_with(CONF) - last_completed_audit_period.assert_called_once_with() - volume_get_all_active_by_window.assert_called_once_with(ctxt, - begin, end) - notify_about_volume_usage.assert_has_calls([ - mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), - mock.call(ctxt, volume1, 'create.start', - extra_usage_info=extra_info_volume_create), - mock.call(ctxt, volume1, 'create.end', - extra_usage_info=extra_info_volume_create), - mock.call(ctxt, volume1, 'delete.start', - extra_usage_info=extra_info_volume_delete), - mock.call(ctxt, volume1, 'delete.end', - extra_usage_info=extra_info_volume_delete) - ]) - - notify_about_snapshot_usage.assert_has_calls([ - mock.call(ctxt, snapshot1, 'exists', extra_info), - mock.call(ctxt, snapshot1, 'create.start', - extra_usage_info=extra_info_snapshot_create), - mock.call(ctxt, snapshot1, 'create.end', - extra_usage_info=extra_info_snapshot_create), - mock.call(ctxt, snapshot1, 'delete.start', - extra_usage_info=extra_info_snapshot_delete), - mock.call(ctxt, snapshot1, 'delete.end', - extra_usage_info=extra_info_snapshot_delete) - ]) - - notify_about_backup_usage.assert_has_calls([ - mock.call(ctxt, backup1, 'exists', extra_info), - mock.call(ctxt, backup1, 'create.start', - extra_usage_info=extra_info_backup_create), - mock.call(ctxt, backup1, 'create.end', - extra_usage_info=extra_info_backup_create), - mock.call(ctxt, backup1, 'delete.start', - extra_usage_info=extra_info_backup_delete), - mock.call(ctxt, backup1, 'delete.end', - extra_usage_info=extra_info_backup_delete) - ]) diff --git a/cinder/tests/unit/test_context.py b/cinder/tests/unit/test_context.py deleted file mode 100644 index 68f89282d..000000000 --- a/cinder/tests/unit/test_context.py +++ /dev/null @@ -1,130 +0,0 @@ - -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import context -from cinder import test - - -@ddt.ddt -class ContextTestCase(test.TestCase): - - def test_request_context_sets_is_admin(self): - ctxt = context.RequestContext('111', - '222', - roles=['admin', 'weasel']) - self.assertTrue(ctxt.is_admin) - - def test_request_context_sets_is_admin_upcase(self): - ctxt = context.RequestContext('111', - '222', - roles=['Admin', 'weasel']) - self.assertTrue(ctxt.is_admin) - - def test_request_context_read_deleted(self): - ctxt = context.RequestContext('111', - '222', - read_deleted='yes') - self.assertEqual('yes', ctxt.read_deleted) - - ctxt.read_deleted = 'no' - self.assertEqual('no', ctxt.read_deleted) - - def test_request_context_read_deleted_invalid(self): - self.assertRaises(ValueError, - context.RequestContext, - '111', - '222', - read_deleted=True) - - ctxt = context.RequestContext('111', '222') - self.assertRaises(ValueError, - setattr, - ctxt, - 'read_deleted', - True) - - def test_request_context_elevated(self): - user_context = context.RequestContext( - 'fake_user', 'fake_project', is_admin=False) - self.assertFalse(user_context.is_admin) - admin_context = user_context.elevated() - self.assertFalse(user_context.is_admin) - self.assertTrue(admin_context.is_admin) - self.assertNotIn('admin', user_context.roles) - self.assertIn('admin', admin_context.roles) - - def test_service_catalog_nova_and_swift(self): - service_catalog = [ - {u'type': u'compute', u'name': u'nova'}, - {u'type': u's3', u'name': u's3'}, - {u'type': u'image', u'name': u'glance'}, - {u'type': u'volume', u'name': u'cinder'}, - {u'type': u'ec2', u'name': u'ec2'}, - {u'type': u'object-store', u'name': u'swift'}, - {u'type': u'identity', u'name': u'keystone'}, - {u'type': None, u'name': u'S_withtypeNone'}, - {u'type': u'co', u'name': u'S_partofcompute'}] - - compute_catalog = [{u'type': u'compute', u'name': u'nova'}] - object_catalog = [{u'name': u'swift', u'type': u'object-store'}] - ctxt = context.RequestContext('111', '222', - service_catalog=service_catalog) - self.assertEqual(4, len(ctxt.service_catalog)) - return_compute = [v for v in ctxt.service_catalog if - v['type'] == u'compute'] - return_object = [v for v in ctxt.service_catalog if - v['type'] == u'object-store'] - self.assertEqual(compute_catalog, return_compute) - self.assertEqual(object_catalog, return_object) - - def test_user_identity(self): - ctx = context.RequestContext("user", "tenant", - domain="domain", - user_domain="user-domain", - project_domain="project-domain") - self.assertEqual('user tenant domain user-domain project-domain', - ctx.to_dict()["user_identity"]) - - @ddt.data(('ec729e9946bc43c39ece6dfa7de70eea', - 'c466a48309794261b64a4f02cfcc3d64'), - ('ec729e9946bc43c39ece6dfa7de70eea', None), - (None, 'c466a48309794261b64a4f02cfcc3d64'), - (None, None)) - @ddt.unpack - @mock.patch('cinder.context.CONF') - def test_cinder_internal_context(self, project_id, user_id, mock_conf): - mock_conf.cinder_internal_tenant_project_id = project_id - mock_conf.cinder_internal_tenant_user_id = user_id - ctx = context.get_internal_tenant_context() - if project_id is None or user_id is None: - self.assertIsNone(ctx) - else: - self.assertEqual(user_id, ctx.user_id) - self.assertEqual(project_id, ctx.project_id) - - def test_request_context_no_roles(self): - ctxt = context.RequestContext('111', - '222') - self.assertEqual([], ctxt.roles) - - def test_request_context_with_roles(self): - roles = ['alpha', 'beta'] - ctxt = context.RequestContext('111', - '222', - roles=roles) - self.assertEqual(roles, ctxt.roles) diff --git a/cinder/tests/unit/test_coordination.py b/cinder/tests/unit/test_coordination.py deleted file mode 100644 index 10d43e8ce..000000000 --- a/cinder/tests/unit/test_coordination.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -import mock -import tooz.coordination -import tooz.locking - -from cinder import coordination -from cinder import test - - -class Locked(Exception): - pass - - -class MockToozLock(tooz.locking.Lock): - active_locks = set() - - def acquire(self, blocking=True): - if self.name not in self.active_locks: - self.active_locks.add(self.name) - return True - elif not blocking: - return False - else: - raise Locked - - def release(self): - self.active_locks.remove(self.name) - - -@mock.patch('tooz.coordination.get_coordinator') -class CoordinatorTestCase(test.TestCase): - MOCK_TOOZ = False - - def test_coordinator_start(self, get_coordinator): - crd = get_coordinator.return_value - - agent = coordination.Coordinator() - agent.start() - self.assertTrue(get_coordinator.called) - self.assertTrue(crd.start.called) - - def test_coordinator_stop(self, get_coordinator): - crd = get_coordinator.return_value - - agent = coordination.Coordinator() - agent.start() - self.assertIsNotNone(agent.coordinator) - agent.stop() - self.assertTrue(crd.stop.called) - self.assertIsNone(agent.coordinator) - - def test_coordinator_lock(self, get_coordinator): - crd = get_coordinator.return_value - crd.get_lock.side_effect = lambda n: MockToozLock(n) - - agent1 = coordination.Coordinator() - agent1.start() - agent2 = coordination.Coordinator() - agent2.start() - - lock_name = 'lock' - expected_name = lock_name.encode('ascii') - - self.assertNotIn(expected_name, MockToozLock.active_locks) - with agent1.get_lock(lock_name): - self.assertIn(expected_name, MockToozLock.active_locks) - self.assertRaises(Locked, agent1.get_lock(lock_name).acquire) - self.assertRaises(Locked, agent2.get_lock(lock_name).acquire) - self.assertNotIn(expected_name, MockToozLock.active_locks) - - def test_coordinator_offline(self, get_coordinator): - crd = get_coordinator.return_value - crd.start.side_effect = tooz.coordination.ToozConnectionError('err') - - agent = coordination.Coordinator() - self.assertRaises(tooz.coordination.ToozError, agent.start) - self.assertFalse(agent.started) - - -@mock.patch.object(coordination.COORDINATOR, 'get_lock') -class CoordinationTestCase(test.TestCase): - def test_synchronized(self, get_lock): - @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') - def func(foo, bar): - pass - - foo = mock.Mock() - foo.val = 7 - bar = mock.MagicMock() - bar.__getitem__.return_value = 8 - func(foo, bar) - get_lock.assert_called_with('lock-func-7-8') - self.assertEqual(['foo', 'bar'], inspect.getargspec(func)[0]) diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py deleted file mode 100644 index 8f074c910..000000000 --- a/cinder/tests/unit/test_db_api.py +++ /dev/null @@ -1,3150 +0,0 @@ -# Copyright 2014 IBM Corp. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for cinder.db.api.""" - - -import datetime - -import ddt -import enum -import mock -from mock import call -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from sqlalchemy.sql import operators - -from cinder.api import common -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils - -CONF = cfg.CONF -THREE = 3 -THREE_HUNDREDS = 300 -ONE_HUNDREDS = 100 -UTC_NOW = timeutils.utcnow() - - -def _quota_reserve(context, project_id): - """Create sample Quota, QuotaUsage and Reservation objects. - - There is no method db.quota_usage_create(), so we have to use - db.quota_reserve() for creating QuotaUsage objects. - - Returns reservations uuids. - - """ - def get_sync(resource, usage): - def sync(elevated, project_id, session): - return {resource: usage} - return sync - quotas = {} - resources = {} - deltas = {} - for i, resource in enumerate(('volumes', 'gigabytes')): - quota_obj = db.quota_create(context, project_id, resource, i + 1) - quotas[resource] = quota_obj.hard_limit - resources[resource] = quota.ReservableResource(resource, - '_sync_%s' % resource) - deltas[resource] = i + 1 - return db.quota_reserve( - context, resources, quotas, deltas, - datetime.datetime.utcnow(), datetime.datetime.utcnow(), - datetime.timedelta(days=1), project_id - ) - - -class BaseTest(test.TestCase, test.ModelsObjectComparatorMixin): - def setUp(self): - super(BaseTest, self).setUp() - self.ctxt = context.get_admin_context() - - -@ddt.ddt -class DBCommonFilterTestCase(BaseTest): - - def setUp(self): - super(DBCommonFilterTestCase, self).setUp() - self.fake_volume = db.volume_create(self.ctxt, - {'display_name': 'fake_name'}) - self.fake_group = utils.create_group( - self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[fake.VOLUME_TYPE_ID]) - - @mock.patch('sqlalchemy.orm.query.Query.filter') - def test__process_model_like_filter(self, mock_filter): - filters = {'display_name': 'fake_name', - 'display_description': 'fake_description', - 'host': 123, - 'status': []} - session = sqlalchemy_api.get_session() - query = session.query(models.Volume) - mock_filter.return_value = query - with mock.patch.object(operators.Operators, 'op') as mock_op: - def fake_operator(value): - return value - mock_op.return_value = fake_operator - sqlalchemy_api._process_model_like_filter(models.Volume, - query, filters) - calls = [call('%fake_description%'), - call('%fake_name%'), call('%123%')] - mock_filter.assert_has_calls(calls, any_order=True) - - @ddt.data({'handler': [db.volume_create, db.volume_get_all], - 'column': 'display_name', - 'resource': 'volume'}, - {'handler': [db.snapshot_create, db.snapshot_get_all], - 'column': 'display_name', - 'resource': 'snapshot'}, - {'handler': [db.message_create, db.message_get_all], - 'column': 'message_level', - 'resource': 'message'}, - {'handler': [db.backup_create, db.backup_get_all], - 'column': 'display_name', - 'resource': 'backup'}, - {'handler': [db.group_create, db.group_get_all], - 'column': 'name', - 'resource': 'group'}, - {'handler': [utils.create_group_snapshot, - db.group_snapshot_get_all], - 'column': 'name', - 'resource': 'group_snapshot'}) - @ddt.unpack - def test_resource_get_all_like_filter(self, handler, column, resource): - for index in ['001', '002']: - option = {column: "fake_%s_%s" % (column, index)} - if resource in ['snapshot', 'backup']: - option['volume_id'] = self.fake_volume.id - if resource in ['message']: - option['project_id'] = fake.PROJECT_ID - option['event_id'] = fake.UUID1 - if resource in ['group_snapshot']: - handler[0](self.ctxt, self.fake_group.id, - name="fake_%s_%s" % (column, index)) - else: - handler[0](self.ctxt, option) - - # test exact match - exact_filter = {column: 'fake_%s' % column} - resources = handler[1](self.ctxt, filters=exact_filter) - self.assertEqual(0, len(resources)) - - # test inexact match - inexact_filter = {"%s~" % column: 'fake_%s' % column} - resources = handler[1](self.ctxt, filters=inexact_filter) - self.assertEqual(2, len(resources)) - - -@ddt.ddt -class DBAPIServiceTestCase(BaseTest): - - """Unit tests for cinder.db.api.service_*.""" - - def test_service_create(self): - # Add a cluster value to the service - values = {'cluster_name': 'cluster'} - service = utils.create_service(self.ctxt, values) - self.assertIsNotNone(service['id']) - expected = utils.default_service_values() - expected.update(values) - for key, value in expected.items(): - self.assertEqual(value, service[key]) - - def test_service_destroy(self): - service1 = utils.create_service(self.ctxt, {}) - service2 = utils.create_service(self.ctxt, {'host': 'fake_host2'}) - - self.assertDictEqual( - {'deleted': True, 'deleted_at': mock.ANY}, - db.service_destroy(self.ctxt, service1['id'])) - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, service1['id']) - self._assertEqualObjects( - service2, - db.service_get(self.ctxt, service2['id'])) - - def test_service_update(self): - service = utils.create_service(self.ctxt, {}) - new_values = { - 'host': 'fake_host1', - 'binary': 'fake_binary1', - 'topic': 'fake_topic1', - 'report_count': 4, - 'disabled': True - } - db.service_update(self.ctxt, service['id'], new_values) - updated_service = db.service_get(self.ctxt, service['id']) - for key, value in new_values.items(): - self.assertEqual(value, updated_service[key]) - - def test_service_update_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_update, self.ctxt, 100500, {}) - - def test_service_get(self): - service1 = utils.create_service(self.ctxt, {}) - real_service1 = db.service_get(self.ctxt, service1['id']) - self._assertEqualObjects(service1, real_service1) - - def test_service_get_by_cluster(self): - service = utils.create_service(self.ctxt, - {'cluster_name': 'cluster@backend'}) - # Search with an exact match - real_service = db.service_get(self.ctxt, - cluster_name='cluster@backend') - self._assertEqualObjects(service, real_service) - # Search without the backend - real_service = db.service_get(self.ctxt, cluster_name='cluster') - self._assertEqualObjects(service, real_service) - - def test_service_get_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_get, self.ctxt, 100500) - - def test_service_get_by_host_and_topic(self): - service1 = utils.create_service(self.ctxt, - {'host': 'host1', 'topic': 'topic1'}) - - real_service1 = db.service_get(self.ctxt, host='host1', topic='topic1') - self._assertEqualObjects(service1, real_service1) - - @ddt.data('disabled', 'frozen') - def test_service_get_all_boolean_by_cluster(self, field_name): - values = [ - # Enabled/Unfrozen services - {'host': 'host1', 'binary': 'b1', field_name: False}, - {'host': 'host2', 'binary': 'b1', field_name: False, - 'cluster_name': 'enabled_unfrozen_cluster'}, - {'host': 'host3', 'binary': 'b1', field_name: True, - 'cluster_name': 'enabled_unfrozen_cluster'}, - - # Disabled/Frozen services - {'host': 'host4', 'binary': 'b1', field_name: True}, - {'host': 'host5', 'binary': 'b1', field_name: False, - 'cluster_name': 'disabled_frozen_cluster'}, - {'host': 'host6', 'binary': 'b1', field_name: True, - 'cluster_name': 'disabled_frozen_cluster'}, - ] - - db.cluster_create(self.ctxt, {'name': 'enabled_unfrozen_cluster', - 'binary': 'b1', - field_name: False}), - db.cluster_create(self.ctxt, {'name': 'disabled_frozen_cluster', - 'binary': 'b1', - field_name: True}), - services = [utils.create_service(self.ctxt, vals) for vals in values] - - false_services = db.service_get_all(self.ctxt, **{field_name: False}) - true_services = db.service_get_all(self.ctxt, **{field_name: True}) - - self.assertSetEqual({s.host for s in services[:3]}, - {s.host for s in false_services}) - self.assertSetEqual({s.host for s in services[3:]}, - {s.host for s in true_services}) - - def test_service_get_all(self): - expired = (datetime.datetime.utcnow() - - datetime.timedelta(seconds=CONF.service_down_time + 1)) - db.cluster_create(self.ctxt, {'name': 'cluster_disabled', - 'binary': 'fake_binary', - 'disabled': True}) - db.cluster_create(self.ctxt, {'name': 'cluster_enabled', - 'binary': 'fake_binary', - 'disabled': False}) - values = [ - # Now we are updating updated_at at creation as well so this one - # is up. - {'host': 'host1', 'binary': 'b1', 'created_at': expired}, - {'host': 'host1@ceph', 'binary': 'b2'}, - {'host': 'host2', 'binary': 'b2'}, - {'disabled': False, 'cluster_name': 'cluster_enabled'}, - {'disabled': True, 'cluster_name': 'cluster_enabled'}, - {'disabled': False, 'cluster_name': 'cluster_disabled'}, - {'disabled': True, 'cluster_name': 'cluster_disabled'}, - {'disabled': True, 'created_at': expired, 'updated_at': expired}, - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - - disabled_services = services[-3:] - non_disabled_services = services[:-3] - up_services = services[:7] - down_services = [services[7]] - expected = services[:2] - expected_bin = services[1:3] - compares = [ - (services, db.service_get_all(self.ctxt)), - (expected, db.service_get_all(self.ctxt, host='host1')), - (expected_bin, db.service_get_all(self.ctxt, binary='b2')), - (disabled_services, db.service_get_all(self.ctxt, disabled=True)), - (non_disabled_services, db.service_get_all(self.ctxt, - disabled=False)), - (up_services, db.service_get_all(self.ctxt, is_up=True)), - (down_services, db.service_get_all(self.ctxt, is_up=False)), - ] - for i, comp in enumerate(compares): - self._assertEqualListsOfObjects(*comp, - msg='Error comparing %s' % i) - - def test_service_get_all_by_topic(self): - values = [ - {'host': 'host1', 'topic': 't1'}, - {'host': 'host2', 'topic': 't1'}, - {'host': 'host4', 'disabled': True, 'topic': 't1'}, - {'host': 'host3', 'topic': 't2'} - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - expected = services[:3] - real = db.service_get_all(self.ctxt, topic='t1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_all_by_binary(self): - values = [ - {'host': 'host1', 'binary': 'b1'}, - {'host': 'host2', 'binary': 'b1'}, - {'host': 'host4', 'disabled': True, 'binary': 'b1'}, - {'host': 'host3', 'binary': 'b2'} - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - expected = services[:3] - real = db.service_get_all(self.ctxt, binary='b1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_by_args(self): - values = [ - {'host': 'host1', 'binary': 'a'}, - {'host': 'host2', 'binary': 'b'} - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - - service1 = db.service_get(self.ctxt, host='host1', binary='a') - self._assertEqualObjects(services[0], service1) - - service2 = db.service_get(self.ctxt, host='host2', binary='b') - self._assertEqualObjects(services[1], service2) - - def test_service_get_all_by_cluster(self): - values = [ - {'host': 'host1', 'cluster_name': 'cluster'}, - {'host': 'host2', 'cluster_name': 'cluster'}, - {'host': 'host3', 'cluster_name': 'cluster@backend'}, - {'host': 'host4', 'cluster_name': 'cluster2'}, - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - expected = services[:3] - real = db.service_get_all(self.ctxt, cluster_name='cluster') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_all_by_host_or_cluster(self): - values = [ - {'host': 'host1', 'cluster_name': 'cluster'}, - {'host': 'host2', 'cluster_name': 'host1'}, - {'host': 'host3', 'cluster_name': 'cluster@backend'}, - {'host': 'host4', 'cluster_name': 'cluster2'}, - ] - services = [utils.create_service(self.ctxt, vals) for vals in values] - expected = services[0:2] - real = db.service_get_all(self.ctxt, host_or_cluster='host1') - self._assertEqualListsOfObjects(expected, real) - - def test_service_get_by_args_not_found_exception(self): - self.assertRaises(exception.ServiceNotFound, - db.service_get, - self.ctxt, host='non-exists-host', binary='a') - - @mock.patch('sqlalchemy.orm.query.Query.filter_by') - def test_service_get_by_args_with_case_insensitive(self, filter_by): - CONF.set_default('connection', 'mysql://', 'database') - db.service_get(self.ctxt, host='host', binary='a') - - self.assertNotEqual(0, filter_by.call_count) - self.assertEqual(1, filter_by.return_value.filter.call_count) - or_op = filter_by.return_value.filter.call_args[0][0].clauses[0] - self.assertIsInstance(or_op, - sqlalchemy_api.sql.elements.BinaryExpression) - binary_op = or_op.right - self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function) - self.assertEqual('binary', binary_op.name) - - -@ddt.ddt -class DBAPIVolumeTestCase(BaseTest): - - """Unit tests for cinder.db.api.volume_*.""" - - def test_volume_create(self): - volume = db.volume_create(self.ctxt, {'host': 'host1'}) - self.assertTrue(uuidutils.is_uuid_like(volume['id'])) - self.assertEqual('host1', volume.host) - - def test_volume_attached_invalid_uuid(self): - self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt, - 42, 'invalid-uuid', None, '/tmp') - - def test_volume_attached_to_instance(self): - volume = db.volume_create(self.ctxt, {'host': 'host1'}) - instance_uuid = fake.INSTANCE_ID - values = {'volume_id': volume['id'], - 'instance_uuid': instance_uuid, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.ctxt, values) - volume_db, updated_values = db.volume_attached( - self.ctxt, - attachment['id'], - instance_uuid, None, '/tmp') - expected_updated_values = { - 'mountpoint': '/tmp', - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'instance_uuid': instance_uuid, - 'attached_host': None, - 'attach_time': mock.ANY, - 'attach_mode': 'rw'} - self.assertDictEqual(expected_updated_values, updated_values) - - volume = db.volume_get(self.ctxt, volume['id']) - attachment = db.volume_attachment_get(self.ctxt, attachment['id']) - self._assertEqualObjects(volume, volume_db, - ignored_keys='volume_attachment') - self._assertEqualListsOfObjects(volume.volume_attachment, - volume_db.volume_attachment, 'volume') - self.assertEqual('in-use', volume['status']) - self.assertEqual('/tmp', attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - self.assertEqual(volume.project_id, attachment['volume']['project_id']) - - def test_volume_attached_to_host(self): - volume = db.volume_create(self.ctxt, {'host': 'host1'}) - host_name = 'fake_host' - values = {'volume_id': volume['id'], - 'attached_host': host_name, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.ctxt, values) - volume_db, updated_values = db.volume_attached( - self.ctxt, attachment['id'], - None, host_name, '/tmp') - expected_updated_values = { - 'mountpoint': '/tmp', - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'instance_uuid': None, - 'attached_host': host_name, - 'attach_time': mock.ANY, - 'attach_mode': 'rw'} - self.assertDictEqual(expected_updated_values, updated_values) - volume = db.volume_get(self.ctxt, volume['id']) - self._assertEqualObjects(volume, volume_db, - ignored_keys='volume_attachment') - self._assertEqualListsOfObjects(volume.volume_attachment, - volume_db.volume_attachment, 'volume') - attachment = db.volume_attachment_get(self.ctxt, attachment['id']) - self.assertEqual('in-use', volume['status']) - self.assertEqual('/tmp', attachment['mountpoint']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertIsNone(attachment['instance_uuid']) - self.assertEqual(attachment['attached_host'], host_name) - self.assertEqual(volume.project_id, attachment['volume']['project_id']) - - def test_volume_data_get_for_host(self): - for i in range(THREE): - for j in range(THREE): - db.volume_create(self.ctxt, {'host': 'h%d' % i, - 'size': ONE_HUNDREDS}) - for i in range(THREE): - self.assertEqual((THREE, THREE_HUNDREDS), - db.volume_data_get_for_host( - self.ctxt, 'h%d' % i)) - - def test_volume_data_get_for_host_for_multi_backend(self): - for i in range(THREE): - for j in range(THREE): - db.volume_create(self.ctxt, {'host': - 'h%d@lvmdriver-1#lvmdriver-1' % i, - 'size': ONE_HUNDREDS}) - for i in range(THREE): - self.assertEqual((THREE, THREE_HUNDREDS), - db.volume_data_get_for_host( - self.ctxt, 'h%d@lvmdriver-1' % i)) - - def test_volume_data_get_for_project(self): - for i in range(THREE): - for j in range(THREE): - db.volume_create(self.ctxt, {'project_id': 'p%d' % i, - 'size': ONE_HUNDREDS, - 'host': 'h-%d-%d' % (i, j), - }) - for i in range(THREE): - self.assertEqual((THREE, THREE_HUNDREDS), - db.volume_data_get_for_project( - self.ctxt, 'p%d' % i)) - - def test_volume_detached_from_instance(self): - volume = db.volume_create(self.ctxt, {}) - instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' - values = {'volume_id': volume['id'], - 'instance_uuid': instance_uuid, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.ctxt, values) - db.volume_attached(self.ctxt, attachment.id, - instance_uuid, - None, '/tmp') - volume_updates, attachment_updates = ( - db.volume_detached(self.ctxt, volume.id, attachment.id)) - expected_attachment = { - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'detach_time': mock.ANY, - 'deleted': True, - 'deleted_at': mock.ANY, } - self.assertDictEqual(expected_attachment, attachment_updates) - expected_volume = { - 'status': 'available', - 'attach_status': fields.VolumeAttachStatus.DETACHED, } - self.assertDictEqual(expected_volume, volume_updates) - volume = db.volume_get(self.ctxt, volume.id) - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctxt, - attachment.id) - self.assertEqual('available', volume.status) - - def test_volume_detached_two_attachments(self): - volume = db.volume_create(self.ctxt, {}) - instance_uuid = fake.INSTANCE_ID - values = {'volume_id': volume.id, - 'instance_uuid': instance_uuid, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.ctxt, values) - values2 = {'volume_id': volume.id, - 'instance_uuid': fake.OBJECT_ID, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - db.volume_attach(self.ctxt, values2) - db.volume_attached(self.ctxt, attachment.id, - instance_uuid, - None, '/tmp') - volume_updates, attachment_updates = ( - db.volume_detached(self.ctxt, volume.id, attachment.id)) - expected_attachment = { - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'detach_time': mock.ANY, - 'deleted': True, - 'deleted_at': mock.ANY, } - self.assertDictEqual(expected_attachment, attachment_updates) - expected_volume = { - 'status': 'in-use', - 'attach_status': fields.VolumeAttachStatus.ATTACHED, } - self.assertDictEqual(expected_volume, volume_updates) - volume = db.volume_get(self.ctxt, volume.id) - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctxt, - attachment.id) - self.assertEqual('in-use', volume.status) - - def test_volume_detached_invalid_attachment(self): - volume = db.volume_create(self.ctxt, {}) - # detach it again - volume_updates, attachment_updates = ( - db.volume_detached(self.ctxt, volume.id, fake.ATTACHMENT_ID)) - self.assertIsNone(attachment_updates) - expected_volume = { - 'status': 'available', - 'attach_status': fields.VolumeAttachStatus.DETACHED, } - self.assertDictEqual(expected_volume, volume_updates) - volume = db.volume_get(self.ctxt, volume.id) - self.assertEqual('available', volume.status) - - def test_volume_detached_from_host(self): - volume = db.volume_create(self.ctxt, {}) - host_name = 'fake_host' - values = {'volume_id': volume.id, - 'attach_host': host_name, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.ctxt, values) - db.volume_attached(self.ctxt, attachment.id, - None, host_name, '/tmp') - volume_updates, attachment_updates = ( - db.volume_detached(self.ctxt, volume.id, attachment.id)) - expected_attachment = { - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'detach_time': mock.ANY, - 'deleted': True, - 'deleted_at': mock.ANY} - self.assertDictEqual(expected_attachment, attachment_updates) - expected_volume = { - 'status': 'available', - 'attach_status': fields.VolumeAttachStatus.DETACHED, } - self.assertDictEqual(expected_volume, volume_updates) - volume = db.volume_get(self.ctxt, volume.id) - self.assertRaises(exception.VolumeAttachmentNotFound, - db.volume_attachment_get, - self.ctxt, - attachment.id) - self.assertEqual('available', volume.status) - - def test_volume_get(self): - volume = db.volume_create(self.ctxt, {}) - self._assertEqualObjects(volume, db.volume_get(self.ctxt, - volume['id'])) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) - def test_volume_destroy(self, utcnow_mock): - volume = db.volume_create(self.ctxt, {}) - self.assertDictEqual( - {'status': 'deleted', 'deleted': True, 'deleted_at': UTC_NOW, - 'migration_status': None}, - db.volume_destroy(self.ctxt, volume['id'])) - self.assertRaises(exception.VolumeNotFound, db.volume_get, - self.ctxt, volume['id']) - - def test_volume_get_all(self): - volumes = [db.volume_create(self.ctxt, - {'host': 'h%d' % i, 'size': i}) - for i in range(3)] - self._assertEqualListsOfObjects(volumes, db.volume_get_all( - self.ctxt, None, None, ['host'], None)) - - @ddt.data('cluster_name', 'host') - def test_volume_get_all_filter_host_and_cluster(self, field): - volumes = [] - for i in range(2): - for value in ('host%d@backend#pool', 'host%d@backend', 'host%d'): - kwargs = {field: value % i} - volumes.append(utils.create_volume(self.ctxt, **kwargs)) - - for i in range(3): - filters = {field: getattr(volumes[i], field)} - result = db.volume_get_all(self.ctxt, filters=filters) - self.assertEqual(i + 1, len(result)) - self.assertSetEqual({v.id for v in volumes[:i + 1]}, - {v.id for v in result}) - - def test_volume_get_all_marker_passed(self): - volumes = [ - db.volume_create(self.ctxt, {'id': 1}), - db.volume_create(self.ctxt, {'id': 2}), - db.volume_create(self.ctxt, {'id': 3}), - db.volume_create(self.ctxt, {'id': 4}), - ] - - self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all( - self.ctxt, 2, 2, ['id'], ['asc'])) - - def test_volume_get_all_by_host(self): - volumes = [] - for i in range(3): - volumes.append([db.volume_create(self.ctxt, {'host': 'h%d' % i}) - for j in range(3)]) - for i in range(3): - self._assertEqualListsOfObjects(volumes[i], - db.volume_get_all_by_host( - self.ctxt, 'h%d' % i)) - - def test_volume_get_all_by_host_with_pools(self): - volumes = [] - vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'}) - for j in range(3)] - vol_on_host_w_pool = [db.volume_create( - self.ctxt, {'host': 'foo#pool0'})] - volumes.append((vol_on_host_wo_pool + - vol_on_host_w_pool)) - # insert an additional record that doesn't belongs to the same - # host as 'foo' and test if it is included in the result - db.volume_create(self.ctxt, {'host': 'foobar'}) - self._assertEqualListsOfObjects(volumes[0], - db.volume_get_all_by_host( - self.ctxt, 'foo')) - - def test_volume_get_all_by_host_with_filters(self): - v1 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v1', - 'status': 'available'}) - v2 = db.volume_create(self.ctxt, {'host': 'h1', 'display_name': 'v2', - 'status': 'available'}) - v3 = db.volume_create(self.ctxt, {'host': 'h2', 'display_name': 'v1', - 'status': 'available'}) - self._assertEqualListsOfObjects( - [v1], - db.volume_get_all_by_host(self.ctxt, 'h1', - filters={'display_name': 'v1'})) - self._assertEqualListsOfObjects( - [v1, v2], - db.volume_get_all_by_host( - self.ctxt, 'h1', - filters={'display_name': ['v1', 'v2', 'foo']})) - self._assertEqualListsOfObjects( - [v1, v2], - db.volume_get_all_by_host(self.ctxt, 'h1', - filters={'status': 'available'})) - self._assertEqualListsOfObjects( - [v3], - db.volume_get_all_by_host(self.ctxt, 'h2', - filters={'display_name': 'v1'})) - # No match - vols = db.volume_get_all_by_host(self.ctxt, 'h1', - filters={'status': 'foo'}) - self.assertEqual([], vols) - # Bogus filter, should return empty list - vols = db.volume_get_all_by_host(self.ctxt, 'h1', - filters={'foo': 'bar'}) - self.assertEqual([], vols) - - def test_volume_get_all_by_group(self): - volumes = [] - for i in range(3): - volumes.append([db.volume_create(self.ctxt, { - 'consistencygroup_id': 'g%d' % i}) for j in range(3)]) - for i in range(3): - self._assertEqualListsOfObjects(volumes[i], - db.volume_get_all_by_group( - self.ctxt, 'g%d' % i)) - - def test_volume_get_all_by_group_with_filters(self): - v1 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', - 'display_name': 'v1'}) - v2 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', - 'display_name': 'v2'}) - v3 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g2', - 'display_name': 'v1'}) - self._assertEqualListsOfObjects( - [v1], - db.volume_get_all_by_group(self.ctxt, 'g1', - filters={'display_name': 'v1'})) - self._assertEqualListsOfObjects( - [v1, v2], - db.volume_get_all_by_group(self.ctxt, 'g1', - filters={'display_name': ['v1', 'v2']})) - self._assertEqualListsOfObjects( - [v3], - db.volume_get_all_by_group(self.ctxt, 'g2', - filters={'display_name': 'v1'})) - # No match - vols = db.volume_get_all_by_group(self.ctxt, 'g1', - filters={'display_name': 'foo'}) - self.assertEqual([], vols) - # Bogus filter, should return empty list - vols = db.volume_get_all_by_group(self.ctxt, 'g1', - filters={'foo': 'bar'}) - self.assertEqual([], vols) - - def test_volume_get_all_by_project(self): - volumes = [] - for i in range(3): - volumes.append([db.volume_create(self.ctxt, { - 'project_id': 'p%d' % i}) for j in range(3)]) - for i in range(3): - self._assertEqualListsOfObjects(volumes[i], - db.volume_get_all_by_project( - self.ctxt, 'p%d' % i, None, - None, ['host'], None)) - - def test_volume_get_by_name(self): - db.volume_create(self.ctxt, {'display_name': 'vol1'}) - db.volume_create(self.ctxt, {'display_name': 'vol2'}) - db.volume_create(self.ctxt, {'display_name': 'vol3'}) - - # no name filter - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc']) - self.assertEqual(3, len(volumes)) - # filter on name - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'display_name': 'vol2'}) - self.assertEqual(1, len(volumes)) - self.assertEqual('vol2', volumes[0]['display_name']) - # filter no match - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'display_name': 'vol4'}) - self.assertEqual(0, len(volumes)) - - def test_volume_list_by_status(self): - db.volume_create(self.ctxt, {'display_name': 'vol1', - 'status': 'available'}) - db.volume_create(self.ctxt, {'display_name': 'vol2', - 'status': 'available'}) - db.volume_create(self.ctxt, {'display_name': 'vol3', - 'status': 'in-use'}) - - # no status filter - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc']) - self.assertEqual(3, len(volumes)) - # single match - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'status': 'in-use'}) - self.assertEqual(1, len(volumes)) - self.assertEqual('in-use', volumes[0]['status']) - # multiple match - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'status': 'available'}) - self.assertEqual(2, len(volumes)) - for volume in volumes: - self.assertEqual('available', volume['status']) - # multiple filters - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'status': 'available', - 'display_name': 'vol1'}) - self.assertEqual(1, len(volumes)) - self.assertEqual('vol1', volumes[0]['display_name']) - self.assertEqual('available', volumes[0]['status']) - # no match - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['asc'], {'status': 'in-use', - 'display_name': 'vol1'}) - self.assertEqual(0, len(volumes)) - - def _assertEqualsVolumeOrderResult(self, correct_order, limit=None, - sort_keys=None, sort_dirs=None, - filters=None, project_id=None, - marker=None, - match_keys=['id', 'display_name', - 'volume_metadata', - 'created_at']): - """Verifies that volumes are returned in the correct order.""" - if project_id: - result = db.volume_get_all_by_project(self.ctxt, project_id, - marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters) - else: - result = db.volume_get_all(self.ctxt, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters) - self.assertEqual(len(correct_order), len(result)) - for vol1, vol2 in zip(result, correct_order): - for key in match_keys: - val1 = vol1.get(key) - val2 = vol2.get(key) - # metadata is a dict, compare the 'key' and 'value' of each - if key == 'volume_metadata': - self.assertEqual(len(val1), len(val2)) - val1_dict = {x.key: x.value for x in val1} - val2_dict = {x.key: x.value for x in val2} - self.assertDictEqual(val1_dict, val2_dict) - else: - self.assertEqual(val1, val2) - return result - - def test_volume_get_by_filter(self): - """Verifies that all filtering is done at the DB layer.""" - vols = [] - vols.extend([db.volume_create(self.ctxt, - {'project_id': 'g1', - 'display_name': 'name_%d' % i, - 'size': 1}) - for i in range(2)]) - vols.extend([db.volume_create(self.ctxt, - {'project_id': 'g1', - 'display_name': 'name_%d' % i, - 'size': 2}) - for i in range(2)]) - vols.extend([db.volume_create(self.ctxt, - {'project_id': 'g1', - 'display_name': 'name_%d' % i}) - for i in range(2)]) - vols.extend([db.volume_create(self.ctxt, - {'project_id': 'g2', - 'display_name': 'name_%d' % i, - 'size': 1}) - for i in range(2)]) - - # By project, filter on size and name - filters = {'size': '1'} - correct_order = [vols[1], vols[0]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - project_id='g1') - filters = {'size': '1', 'display_name': 'name_1'} - correct_order = [vols[1]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - project_id='g1') - - # Remove project scope - filters = {'size': '1'} - correct_order = [vols[7], vols[6], vols[1], vols[0]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters) - filters = {'size': '1', 'display_name': 'name_1'} - correct_order = [vols[7], vols[1]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters) - - # Remove size constraint - filters = {'display_name': 'name_1'} - correct_order = [vols[5], vols[3], vols[1]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - project_id='g1') - correct_order = [vols[7], vols[5], vols[3], vols[1]] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters) - - # Verify bogus values return nothing - filters = {'display_name': 'name_1', 'bogus_value': 'foo'} - self._assertEqualsVolumeOrderResult([], filters=filters, - project_id='g1') - self._assertEqualsVolumeOrderResult([], project_id='bogus') - self._assertEqualsVolumeOrderResult([], filters=filters) - self._assertEqualsVolumeOrderResult([], filters={'metadata': - 'not valid'}) - self._assertEqualsVolumeOrderResult([], filters={'metadata': - ['not', 'valid']}) - - # Verify that relationship property keys return nothing, these - # exist on the Volumes model but are not columns - filters = {'volume_type': 'bogus_type'} - self._assertEqualsVolumeOrderResult([], filters=filters) - - def test_volume_get_all_filters_limit(self): - vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'}) - vol2 = db.volume_create(self.ctxt, {'display_name': 'test2'}) - vol3 = db.volume_create(self.ctxt, {'display_name': 'test2', - 'metadata': {'key1': 'val1'}}) - vol4 = db.volume_create(self.ctxt, {'display_name': 'test3', - 'metadata': {'key1': 'val1', - 'key2': 'val2'}}) - vol5 = db.volume_create(self.ctxt, {'display_name': 'test3', - 'metadata': {'key2': 'val2', - 'key3': 'val3'}, - 'host': 'host5'}) - db.volume_admin_metadata_update(self.ctxt, vol5.id, - {"readonly": "True"}, False) - - vols = [vol5, vol4, vol3, vol2, vol1] - - # Ensure we have 5 total instances - self._assertEqualsVolumeOrderResult(vols) - - # No filters, test limit - self._assertEqualsVolumeOrderResult(vols[:1], limit=1) - self._assertEqualsVolumeOrderResult(vols[:4], limit=4) - - # Just the test2 volumes - filters = {'display_name': 'test2'} - self._assertEqualsVolumeOrderResult([vol3, vol2], filters=filters) - self._assertEqualsVolumeOrderResult([vol3], limit=1, - filters=filters) - self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, - filters=filters) - self._assertEqualsVolumeOrderResult([vol3, vol2], limit=100, - filters=filters) - - # metadata filters - filters = {'metadata': {'key1': 'val1'}} - self._assertEqualsVolumeOrderResult([vol4, vol3], filters=filters) - self._assertEqualsVolumeOrderResult([vol4], limit=1, - filters=filters) - self._assertEqualsVolumeOrderResult([vol4, vol3], limit=10, - filters=filters) - - filters = {'metadata': {'readonly': 'True'}} - self._assertEqualsVolumeOrderResult([vol5], filters=filters) - - filters = {'metadata': {'key1': 'val1', - 'key2': 'val2'}} - self._assertEqualsVolumeOrderResult([vol4], filters=filters) - self._assertEqualsVolumeOrderResult([vol4], limit=1, - filters=filters) - - # No match - filters = {'metadata': {'key1': 'val1', - 'key2': 'val2', - 'key3': 'val3'}} - self._assertEqualsVolumeOrderResult([], filters=filters) - filters = {'metadata': {'key1': 'val1', - 'key2': 'bogus'}} - self._assertEqualsVolumeOrderResult([], filters=filters) - filters = {'metadata': {'key1': 'val1', - 'key2': 'val1'}} - self._assertEqualsVolumeOrderResult([], filters=filters) - - # Combination - filters = {'display_name': 'test2', - 'metadata': {'key1': 'val1'}} - self._assertEqualsVolumeOrderResult([vol3], filters=filters) - self._assertEqualsVolumeOrderResult([vol3], limit=1, - filters=filters) - self._assertEqualsVolumeOrderResult([vol3], limit=100, - filters=filters) - filters = {'display_name': 'test3', - 'metadata': {'key2': 'val2', - 'key3': 'val3'}, - 'host': 'host5'} - self._assertEqualsVolumeOrderResult([vol5], filters=filters) - self._assertEqualsVolumeOrderResult([vol5], limit=1, - filters=filters) - - def test_volume_get_no_migration_targets(self): - """Verifies the unique 'no_migration_targets'=True filter. - - This filter returns volumes with either a NULL 'migration_status' - or a non-NULL value that does not start with 'target:'. - """ - vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'}) - vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', - 'migration_status': 'bogus'}) - vol3 = db.volume_create(self.ctxt, {'display_name': 'test3', - 'migration_status': 'btarget:'}) - vol4 = db.volume_create(self.ctxt, {'display_name': 'test4', - 'migration_status': 'target:'}) - - # Ensure we have 4 total instances, default sort of created_at (desc) - self._assertEqualsVolumeOrderResult([vol4, vol3, vol2, vol1]) - - # Apply the unique filter - filters = {'no_migration_targets': True} - self._assertEqualsVolumeOrderResult([vol3, vol2, vol1], - filters=filters) - self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, - filters=filters) - - filters = {'no_migration_targets': True, - 'display_name': 'test4'} - self._assertEqualsVolumeOrderResult([], filters=filters) - - def test_volume_get_all_by_filters_sort_keys(self): - # Volumes that will reply to the query - test_h1_avail = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'available', - 'host': 'h1'}) - test_h1_error = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'error', - 'host': 'h1'}) - test_h1_error2 = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'error', - 'host': 'h1'}) - test_h2_avail = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'available', - 'host': 'h2'}) - test_h2_error = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'error', - 'host': 'h2'}) - test_h2_error2 = db.volume_create(self.ctxt, {'display_name': 'test', - 'status': 'error', - 'host': 'h2'}) - # Other volumes in the DB, will not match name filter - other_error = db.volume_create(self.ctxt, {'display_name': 'other', - 'status': 'error', - 'host': 'a'}) - other_active = db.volume_create(self.ctxt, {'display_name': 'other', - 'status': 'available', - 'host': 'a'}) - filters = {'display_name': 'test'} - - # Verify different sort key/direction combinations - sort_keys = ['host', 'status', 'created_at'] - sort_dirs = ['asc', 'asc', 'asc'] - correct_order = [test_h1_avail, test_h1_error, test_h1_error2, - test_h2_avail, test_h2_error, test_h2_error2] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - sort_dirs = ['asc', 'desc', 'asc'] - correct_order = [test_h1_error, test_h1_error2, test_h1_avail, - test_h2_error, test_h2_error2, test_h2_avail] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - sort_dirs = ['desc', 'desc', 'asc'] - correct_order = [test_h2_error, test_h2_error2, test_h2_avail, - test_h1_error, test_h1_error2, test_h1_avail] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - # created_at is added by default if not supplied, descending order - sort_keys = ['host', 'status'] - sort_dirs = ['desc', 'desc'] - correct_order = [test_h2_error2, test_h2_error, test_h2_avail, - test_h1_error2, test_h1_error, test_h1_avail] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - sort_dirs = ['asc', 'asc'] - correct_order = [test_h1_avail, test_h1_error, test_h1_error2, - test_h2_avail, test_h2_error, test_h2_error2] - self._assertEqualsVolumeOrderResult(correct_order, filters=filters, - sort_keys=sort_keys, - sort_dirs=sort_dirs) - - # Remove name filter - correct_order = [other_active, other_error, - test_h1_avail, test_h1_error, test_h1_error2, - test_h2_avail, test_h2_error, test_h2_error2] - self._assertEqualsVolumeOrderResult(correct_order, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - # No sort data, default sort of created_at, id (desc) - correct_order = [other_active, other_error, - test_h2_error2, test_h2_error, test_h2_avail, - test_h1_error2, test_h1_error, test_h1_avail] - self._assertEqualsVolumeOrderResult(correct_order) - - def test_volume_get_all_by_filters_sort_keys_paginate(self): - """Verifies sort order with pagination.""" - # Volumes that will reply to the query - test1_avail = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 1, - 'status': 'available'}) - test1_error = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 1, - 'status': 'error'}) - test1_error2 = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 1, - 'status': 'error'}) - test2_avail = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 2, - 'status': 'available'}) - test2_error = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 2, - 'status': 'error'}) - test2_error2 = db.volume_create(self.ctxt, {'display_name': 'test', - 'size': 2, - 'status': 'error'}) - - # Other volumes in the DB, will not match name filter - db.volume_create(self.ctxt, {'display_name': 'other'}) - db.volume_create(self.ctxt, {'display_name': 'other'}) - filters = {'display_name': 'test'} - # Common sort information for every query - sort_keys = ['size', 'status', 'created_at'] - sort_dirs = ['asc', 'desc', 'asc'] - # Overall correct volume order based on the sort keys - correct_order = [test1_error, test1_error2, test1_avail, - test2_error, test2_error2, test2_avail] - - # Limits of 1, 2, and 3, verify that the volumes returned are in the - # correct sorted order, update the marker to get the next correct page - for limit in range(1, 4): - marker = None - # Include the maximum number of volumes (ie, 6) to ensure that - # the last query (with marker pointing to the last volume) - # returns 0 servers - for i in range(0, 7, limit): - if i == len(correct_order): - correct = [] - else: - correct = correct_order[i:i + limit] - vols = self._assertEqualsVolumeOrderResult( - correct, filters=filters, - sort_keys=sort_keys, sort_dirs=sort_dirs, - limit=limit, marker=marker) - if correct: - marker = vols[-1]['id'] - self.assertEqual(correct[-1]['id'], marker) - - def test_volume_get_all_invalid_sort_key(self): - for keys in (['foo'], ['display_name', 'foo']): - self.assertRaises(exception.InvalidInput, db.volume_get_all, - self.ctxt, None, None, sort_keys=keys) - - def test_volume_update(self): - volume = db.volume_create(self.ctxt, {'host': 'h1'}) - db.volume_update(self.ctxt, volume.id, - {'host': 'h2', - 'metadata': {'m1': 'v1'}}) - volume = db.volume_get(self.ctxt, volume.id) - self.assertEqual('h2', volume.host) - self.assertEqual(1, len(volume.volume_metadata)) - db_metadata = volume.volume_metadata[0] - self.assertEqual('m1', db_metadata.key) - self.assertEqual('v1', db_metadata.value) - - def test_volume_update_nonexistent(self): - self.assertRaises(exception.VolumeNotFound, db.volume_update, - self.ctxt, 42, {}) - - def test_volume_metadata_get(self): - metadata = {'a': 'b', 'c': 'd'} - db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) - - self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) - - def test_volume_metadata_update(self): - metadata1 = {'a': '1', 'c': '2'} - metadata2 = {'a': '3', 'd': '5'} - should_be = {'a': '3', 'c': '2', 'd': '5'} - - db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) - db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False) - - self.assertEqual(should_be, db_meta) - - @mock.patch.object(db.sqlalchemy.api, - '_volume_glance_metadata_key_to_id', - return_value = '1') - def test_volume_glance_metadata_key_to_id_called(self, - metadata_key_to_id_mock): - image_metadata = {'abc': '123'} - - # create volume with metadata. - db.volume_create(self.ctxt, {'id': 1, - 'metadata': image_metadata}) - - # delete metadata associated with the volume. - db.volume_metadata_delete(self.ctxt, - 1, - 'abc', - meta_type=common.METADATA_TYPES.image) - - # assert _volume_glance_metadata_key_to_id() was called exactly once - metadata_key_to_id_mock.assert_called_once_with(self.ctxt, 1, 'abc') - - def test_case_sensitive_glance_metadata_delete(self): - user_metadata = {'a': '1', 'c': '2'} - image_metadata = {'abc': '123', 'ABC': '123'} - - # create volume with metadata. - db.volume_create(self.ctxt, {'id': 1, - 'metadata': user_metadata}) - - # delete user metadata associated with the volume. - db.volume_metadata_delete(self.ctxt, 1, 'c', - meta_type=common.METADATA_TYPES.user) - user_metadata.pop('c') - - self.assertEqual(user_metadata, - db.volume_metadata_get(self.ctxt, 1)) - - # create image metadata associated with the volume. - db.volume_metadata_update( - self.ctxt, - 1, - image_metadata, - False, - meta_type=common.METADATA_TYPES.image) - - # delete image metadata associated with the volume. - db.volume_metadata_delete( - self.ctxt, - 1, - 'abc', - meta_type=common.METADATA_TYPES.image) - - image_metadata.pop('abc') - - # parse the result to build the dict. - rows = db.volume_glance_metadata_get(self.ctxt, 1) - result = {} - for row in rows: - result[row['key']] = row['value'] - self.assertEqual(image_metadata, result) - - def test_volume_metadata_update_with_metatype(self): - user_metadata1 = {'a': '1', 'c': '2'} - user_metadata2 = {'a': '3', 'd': '5'} - expected1 = {'a': '3', 'c': '2', 'd': '5'} - image_metadata1 = {'e': '1', 'f': '2'} - image_metadata2 = {'e': '3', 'g': '5'} - expected2 = {'e': '3', 'f': '2', 'g': '5'} - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - - db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata1}) - - # update user metatdata associated with volume. - db_meta = db.volume_metadata_update( - self.ctxt, - 1, - user_metadata2, - False, - meta_type=common.METADATA_TYPES.user) - self.assertEqual(expected1, db_meta) - - # create image metatdata associated with volume. - db_meta = db.volume_metadata_update( - self.ctxt, - 1, - image_metadata1, - False, - meta_type=common.METADATA_TYPES.image) - self.assertEqual(image_metadata1, db_meta) - - # update image metatdata associated with volume. - db_meta = db.volume_metadata_update( - self.ctxt, - 1, - image_metadata2, - False, - meta_type=common.METADATA_TYPES.image) - self.assertEqual(expected2, db_meta) - - # update volume with invalid metadata type. - self.assertRaises(exception.InvalidMetadataType, - db.volume_metadata_update, - self.ctxt, - 1, - image_metadata1, - False, - FAKE_METADATA_TYPE.fake_type) - - @ddt.data(common.METADATA_TYPES.user, common.METADATA_TYPES.image) - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(sqlalchemy_api, 'resource_exists') - @mock.patch.object(sqlalchemy_api, 'conditional_update') - @mock.patch.object(sqlalchemy_api, '_volume_x_metadata_get_query') - def test_volume_metadata_delete_deleted_at_updated(self, - meta_type, - mock_query, - mock_update, - mock_resource, - mock_utc): - mock_query.all.return_value = {} - mock_utc.return_value = 'fake_time' - - db.volume_metadata_update(self.ctxt, 1, {}, True, meta_type=meta_type) - - mock_update.assert_called_once_with(mock.ANY, mock.ANY, - {'deleted': True, - 'deleted_at': 'fake_time'}, - mock.ANY) - - def test_volume_metadata_update_delete(self): - metadata1 = {'a': '1', 'c': '2'} - metadata2 = {'a': '3', 'd': '4'} - should_be = metadata2 - - db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) - db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True) - - self.assertEqual(should_be, db_meta) - - def test_volume_metadata_delete(self): - metadata = {'a': 'b', 'c': 'd'} - db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata}) - db.volume_metadata_delete(self.ctxt, 1, 'c') - metadata.pop('c') - self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) - - def test_volume_metadata_delete_with_metatype(self): - user_metadata = {'a': '1', 'c': '2'} - image_metadata = {'e': '1', 'f': '2'} - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - - # test that user metadata deleted with meta_type specified. - db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata}) - db.volume_metadata_delete(self.ctxt, 1, 'c', - meta_type=common.METADATA_TYPES.user) - user_metadata.pop('c') - self.assertEqual(user_metadata, db.volume_metadata_get(self.ctxt, 1)) - - # update the image metadata associated with the volume. - db.volume_metadata_update( - self.ctxt, - 1, - image_metadata, - False, - meta_type=common.METADATA_TYPES.image) - - # test that image metadata deleted with meta_type specified. - db.volume_metadata_delete(self.ctxt, 1, 'e', - meta_type=common.METADATA_TYPES.image) - image_metadata.pop('e') - - # parse the result to build the dict. - rows = db.volume_glance_metadata_get(self.ctxt, 1) - result = {} - for row in rows: - result[row['key']] = row['value'] - self.assertEqual(image_metadata, result) - - # delete volume with invalid metadata type. - self.assertRaises(exception.InvalidMetadataType, - db.volume_metadata_delete, - self.ctxt, - 1, - 'f', - FAKE_METADATA_TYPE.fake_type) - - def test_volume_glance_metadata_create(self): - volume = db.volume_create(self.ctxt, {'host': 'h1'}) - db.volume_glance_metadata_create(self.ctxt, volume['id'], - 'image_name', - u'\xe4\xbd\xa0\xe5\xa5\xbd') - glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) - for meta_entry in glance_meta: - if meta_entry.key == 'image_name': - image_name = meta_entry.value - self.assertEqual(u'\xe4\xbd\xa0\xe5\xa5\xbd', image_name) - - def test_volume_glance_metadata_list_get(self): - """Test volume_glance_metadata_list_get in DB API.""" - db.volume_create(self.ctxt, {'id': 'fake1', 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key1', 'value1') - db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key2', 'value2') - - db.volume_create(self.ctxt, {'id': 'fake2', 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key3', 'value3') - db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key4', 'value4') - - expect_result = [{'volume_id': 'fake1', 'key': 'key1', - 'value': 'value1'}, - {'volume_id': 'fake1', 'key': 'key2', - 'value': 'value2'}, - {'volume_id': 'fake2', 'key': 'key3', - 'value': 'value3'}, - {'volume_id': 'fake2', 'key': 'key4', - 'value': 'value4'}] - self._assertEqualListsOfObjects(expect_result, - db.volume_glance_metadata_list_get( - self.ctxt, ['fake1', 'fake2']), - ignored_keys=['id', - 'snapshot_id', - 'created_at', - 'deleted', 'deleted_at', - 'updated_at']) - - def _create_volume_with_image_metadata(self): - vol1 = db.volume_create(self.ctxt, {'display_name': 'test1'}) - db.volume_glance_metadata_create(self.ctxt, vol1.id, 'image_name', - 'imageTestOne') - db.volume_glance_metadata_create(self.ctxt, vol1.id, 'test_image_key', - 'test_image_value') - vol2 = db.volume_create(self.ctxt, {'display_name': 'test2'}) - db.volume_glance_metadata_create(self.ctxt, vol2.id, 'image_name', - 'imageTestTwo') - db.volume_glance_metadata_create(self.ctxt, vol2.id, 'disk_format', - 'qcow2') - return [vol1, vol2] - - def test_volume_get_all_by_image_name_and_key(self): - vols = self._create_volume_with_image_metadata() - filters = {'glance_metadata': {'image_name': 'imageTestOne', - 'test_image_key': 'test_image_value'}} - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['desc'], filters=filters) - self._assertEqualListsOfObjects([vols[0]], volumes) - - def test_volume_get_all_by_image_name_and_disk_format(self): - vols = self._create_volume_with_image_metadata() - filters = {'glance_metadata': {'image_name': 'imageTestTwo', - 'disk_format': 'qcow2'}} - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['desc'], filters=filters) - self._assertEqualListsOfObjects([vols[1]], volumes) - - def test_volume_get_all_by_invalid_image_metadata(self): - # Test with invalid image metadata - self._create_volume_with_image_metadata() - filters = {'glance_metadata': {'invalid_key': 'invalid_value', - 'test_image_key': 'test_image_value'}} - volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], - ['desc'], filters=filters) - self._assertEqualListsOfObjects([], volumes) - - def _create_volumes_to_test_include_in(self): - """Helper method for test_volume_include_in_* tests.""" - return [ - db.volume_create(self.ctxt, - {'host': 'host1@backend1#pool1', - 'cluster_name': 'cluster1@backend1#pool1'}), - db.volume_create(self.ctxt, - {'host': 'host1@backend2#pool2', - 'cluster_name': 'cluster1@backend2#pool2'}), - db.volume_create(self.ctxt, - {'host': 'host2@backend#poo1', - 'cluster_name': 'cluster2@backend#pool'}), - ] - - @ddt.data('host1@backend1#pool1', 'host1@backend1') - def test_volume_include_in_cluster_by_host(self, host): - """Basic volume include test filtering by host and with full rename.""" - vol = self._create_volumes_to_test_include_in()[0] - - cluster_name = 'my_cluster' - result = db.volume_include_in_cluster(self.ctxt, cluster_name, - partial_rename=False, - host=host) - self.assertEqual(1, result) - db_vol = db.volume_get(self.ctxt, vol.id) - self.assertEqual(cluster_name, db_vol.cluster_name) - - def test_volume_include_in_cluster_by_host_multiple(self): - """Partial cluster rename filtering with host level info.""" - vols = self._create_volumes_to_test_include_in()[0:2] - - host = 'host1' - cluster_name = 'my_cluster' - result = db.volume_include_in_cluster(self.ctxt, cluster_name, - partial_rename=True, - host=host) - self.assertEqual(2, result) - db_vols = [db.volume_get(self.ctxt, vols[0].id), - db.volume_get(self.ctxt, vols[1].id)] - for i in range(2): - self.assertEqual(cluster_name + vols[i].host[len(host):], - db_vols[i].cluster_name) - - @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') - def test_volume_include_in_cluster_by_cluster_name(self, cluster_name): - """Basic volume include test filtering by cluster with full rename.""" - vol = self._create_volumes_to_test_include_in()[0] - - new_cluster_name = 'cluster_new@backend1#pool' - result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, - partial_rename=False, - cluster_name=cluster_name) - self.assertEqual(1, result) - db_vol = db.volume_get(self.ctxt, vol.id) - self.assertEqual(new_cluster_name, db_vol.cluster_name) - - def test_volume_include_in_cluster_by_cluster_multiple(self): - """Partial rename filtering with cluster with host level info.""" - vols = self._create_volumes_to_test_include_in()[0:2] - - cluster_name = 'cluster1' - new_cluster_name = 'my_cluster' - result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, - partial_rename=True, - cluster_name=cluster_name) - self.assertEqual(2, result) - db_vols = [db.volume_get(self.ctxt, vols[0].id), - db.volume_get(self.ctxt, vols[1].id)] - for i in range(2): - self.assertEqual( - new_cluster_name + vols[i].cluster_name[len(cluster_name):], - db_vols[i].cluster_name) - - -@ddt.ddt -class DBAPISnapshotTestCase(BaseTest): - - """Tests for cinder.db.api.snapshot_*.""" - - def test_snapshot_data_get_for_project(self): - actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') - self.assertEqual((0, 0), actual) - db.volume_create(self.ctxt, {'id': 1, - 'project_id': 'project1', - 'size': 42}) - db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, - 'project_id': 'project1', - 'volume_size': 42}) - actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') - self.assertEqual((1, 42), actual) - - @ddt.data({'time_collection': [1, 2, 3], - 'latest': 1}, - {'time_collection': [4, 2, 6], - 'latest': 2}, - {'time_collection': [8, 2, 1], - 'latest': 1}) - @ddt.unpack - def test_snapshot_get_latest_for_volume(self, time_collection, latest): - def hours_ago(hour): - return timeutils.utcnow() - datetime.timedelta( - hours=hour) - db.volume_create(self.ctxt, {'id': 1}) - for snapshot in time_collection: - db.snapshot_create(self.ctxt, - {'id': snapshot, 'volume_id': 1, - 'display_name': 'one', - 'created_at': hours_ago(snapshot), - 'status': fields.SnapshotStatus.AVAILABLE}) - - snapshot = db.snapshot_get_latest_for_volume(self.ctxt, 1) - - self.assertEqual(six.text_type(latest), snapshot['id']) - - def test_snapshot_get_latest_for_volume_not_found(self): - - db.volume_create(self.ctxt, {'id': 1}) - for t_id in [2, 3]: - db.snapshot_create(self.ctxt, - {'id': t_id, 'volume_id': t_id, - 'display_name': 'one', - 'status': fields.SnapshotStatus.AVAILABLE}) - - self.assertRaises(exception.VolumeSnapshotNotFound, - db.snapshot_get_latest_for_volume, self.ctxt, 1) - - def test_snapshot_get_all_by_filter(self): - db.volume_create(self.ctxt, {'id': 1}) - db.volume_create(self.ctxt, {'id': 2}) - snapshot1 = db.snapshot_create(self.ctxt, - {'id': 1, 'volume_id': 1, - 'display_name': 'one', - 'status': - fields.SnapshotStatus.AVAILABLE}) - snapshot2 = db.snapshot_create(self.ctxt, - {'id': 2, 'volume_id': 1, - 'display_name': 'two', - 'status': - fields.SnapshotStatus.CREATING}) - snapshot3 = db.snapshot_create(self.ctxt, - {'id': 3, 'volume_id': 2, - 'display_name': 'three', - 'status': - fields.SnapshotStatus.AVAILABLE}) - # no filter - filters = {} - snapshots = db.snapshot_get_all(self.ctxt, filters=filters) - self.assertEqual(3, len(snapshots)) - # single match - filters = {'display_name': 'two'} - self._assertEqualListsOfObjects([snapshot2], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - filters = {'volume_id': 2} - self._assertEqualListsOfObjects([snapshot3], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - # filter no match - filters = {'volume_id': 5} - self._assertEqualListsOfObjects([], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - filters = {'status': fields.SnapshotStatus.ERROR} - self._assertEqualListsOfObjects([], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - # multiple match - filters = {'volume_id': 1} - self._assertEqualListsOfObjects([snapshot1, snapshot2], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - filters = {'status': fields.SnapshotStatus.AVAILABLE} - self._assertEqualListsOfObjects([snapshot1, snapshot3], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - filters = {'volume_id': 1, 'status': fields.SnapshotStatus.AVAILABLE} - self._assertEqualListsOfObjects([snapshot1], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - filters = {'fake_key': 'fake'} - self._assertEqualListsOfObjects([], - db.snapshot_get_all( - self.ctxt, - filters), - ignored_keys=['metadata', 'volume']) - - @ddt.data('cluster_name', 'host') - def test_snapshot_get_all_filter_host_and_cluster(self, field): - volumes = [] - snapshots = [] - for i in range(2): - for value in ('host%d@backend#pool', 'host%d@backend', 'host%d'): - kwargs = {field: value % i} - vol = utils.create_volume(self.ctxt, **kwargs) - volumes.append(vol) - snapshots.append(utils.create_snapshot(self.ctxt, vol.id)) - - for i in range(3): - filters = {field: getattr(volumes[i], field)} - result = db.snapshot_get_all(self.ctxt, filters=filters) - self.assertEqual(i + 1, len(result)) - self.assertSetEqual({s.id for s in snapshots[:i + 1]}, - {s.id for s in result}) - - def test_snapshot_get_all_by_host(self): - db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'}) - db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'}) - snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) - snapshot2 = db.snapshot_create(self.ctxt, - {'id': 2, - 'volume_id': 2, - 'status': - fields.SnapshotStatus.ERROR}) - - self._assertEqualListsOfObjects([snapshot1], - db.snapshot_get_all_by_host( - self.ctxt, - 'host1'), - ignored_keys='volume') - self._assertEqualListsOfObjects([snapshot2], - db.snapshot_get_all_by_host( - self.ctxt, - 'host2'), - ignored_keys='volume') - self._assertEqualListsOfObjects( - [], db.snapshot_get_all_by_host(self.ctxt, 'host2', { - 'status': fields.SnapshotStatus.AVAILABLE}), - ignored_keys='volume') - self._assertEqualListsOfObjects( - [snapshot2], db.snapshot_get_all_by_host(self.ctxt, 'host2', { - 'status': fields.SnapshotStatus.ERROR}), - ignored_keys='volume') - self._assertEqualListsOfObjects([], - db.snapshot_get_all_by_host( - self.ctxt, - 'host2', {'fake_key': 'fake'}), - ignored_keys='volume') - # If host is None or empty string, empty list should be returned. - self.assertEqual([], db.snapshot_get_all_by_host(self.ctxt, None)) - self.assertEqual([], db.snapshot_get_all_by_host(self.ctxt, '')) - - def test_snapshot_get_all_by_host_with_pools(self): - db.volume_create(self.ctxt, {'id': 1, 'host': 'host1#pool1'}) - db.volume_create(self.ctxt, {'id': 2, 'host': 'host1#pool2'}) - - snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) - snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2}) - - self._assertEqualListsOfObjects([snapshot1, snapshot2], - db.snapshot_get_all_by_host( - self.ctxt, - 'host1'), - ignored_keys='volume') - self._assertEqualListsOfObjects([snapshot1], - db.snapshot_get_all_by_host( - self.ctxt, - 'host1#pool1'), - ignored_keys='volume') - - self._assertEqualListsOfObjects([], - db.snapshot_get_all_by_host( - self.ctxt, - 'host1#pool0'), - ignored_keys='volume') - - def test_snapshot_get_all_by_project(self): - db.volume_create(self.ctxt, {'id': 1}) - db.volume_create(self.ctxt, {'id': 2}) - snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, - 'project_id': 'project1'}) - snapshot2 = db.snapshot_create( - self.ctxt, {'id': 2, 'volume_id': 2, 'status': - fields.SnapshotStatus.ERROR, 'project_id': 'project2'}) - - self._assertEqualListsOfObjects([snapshot1], - db.snapshot_get_all_by_project( - self.ctxt, - 'project1'), - ignored_keys='volume') - self._assertEqualListsOfObjects([snapshot2], - db.snapshot_get_all_by_project( - self.ctxt, - 'project2'), - ignored_keys='volume') - self._assertEqualListsOfObjects( - [], db.snapshot_get_all_by_project( - self.ctxt, - 'project2', - {'status': fields.SnapshotStatus.AVAILABLE}), - ignored_keys='volume') - self._assertEqualListsOfObjects( - [snapshot2], db.snapshot_get_all_by_project( - self.ctxt, 'project2', { - 'status': fields.SnapshotStatus.ERROR}), - ignored_keys='volume') - self._assertEqualListsOfObjects([], - db.snapshot_get_all_by_project( - self.ctxt, - 'project2', - {'fake_key': 'fake'}), - ignored_keys='volume') - - def test_snapshot_metadata_get(self): - metadata = {'a': 'b', 'c': 'd'} - db.volume_create(self.ctxt, {'id': 1}) - db.snapshot_create(self.ctxt, - {'id': 1, 'volume_id': 1, 'metadata': metadata}) - - self.assertEqual(metadata, db.snapshot_metadata_get(self.ctxt, 1)) - - def test_snapshot_metadata_update(self): - metadata1 = {'a': '1', 'c': '2'} - metadata2 = {'a': '3', 'd': '5'} - should_be = {'a': '3', 'c': '2', 'd': '5'} - - db.volume_create(self.ctxt, {'id': 1}) - db.snapshot_create(self.ctxt, - {'id': 1, 'volume_id': 1, 'metadata': metadata1}) - db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, False) - - self.assertEqual(should_be, db_meta) - - def test_snapshot_metadata_update_delete(self): - metadata1 = {'a': '1', 'c': '2'} - metadata2 = {'a': '3', 'd': '5'} - should_be = metadata2 - - db.volume_create(self.ctxt, {'id': 1}) - db.snapshot_create(self.ctxt, - {'id': 1, 'volume_id': 1, 'metadata': metadata1}) - db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, True) - - self.assertEqual(should_be, db_meta) - - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(sqlalchemy_api, 'resource_exists') - @mock.patch.object(sqlalchemy_api, '_snapshot_metadata_get') - @mock.patch.object(sqlalchemy_api, '_snapshot_metadata_get_item') - def test_snapshot_metadata_delete_deleted_at_updated(self, - mock_metadata_item, - mock_metadata, - mock_resource, - mock_utc): - fake_metadata = {'fake_key1': 'fake_value1'} - mock_item = mock.Mock() - mock_metadata.return_value = fake_metadata - mock_utc.return_value = 'fake_time' - mock_metadata_item.side_effect = [mock_item] - - db.snapshot_metadata_update(self.ctxt, 1, {}, True) - - mock_item.update.assert_called_once_with({'deleted': True, - 'deleted_at': 'fake_time'}) - - def test_snapshot_metadata_delete(self): - metadata = {'a': '1', 'c': '2'} - should_be = {'a': '1'} - - db.volume_create(self.ctxt, {'id': 1}) - db.snapshot_create(self.ctxt, - {'id': 1, 'volume_id': 1, 'metadata': metadata}) - db.snapshot_metadata_delete(self.ctxt, 1, 'c') - - self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1)) - - -@ddt.ddt -class DBAPIConsistencygroupTestCase(BaseTest): - def _create_cgs_to_test_include_in(self): - """Helper method for test_consistencygroup_include_in_* tests.""" - return [ - db.consistencygroup_create( - self.ctxt, {'host': 'host1@backend1#pool1', - 'cluster_name': 'cluster1@backend1#pool1'}), - db.consistencygroup_create( - self.ctxt, {'host': 'host1@backend2#pool2', - 'cluster_name': 'cluster1@backend2#pool1'}), - db.consistencygroup_create( - self.ctxt, {'host': 'host2@backend#poo1', - 'cluster_name': 'cluster2@backend#pool'}), - ] - - @ddt.data('host1@backend1#pool1', 'host1@backend1') - def test_consistencygroup_include_in_cluster_by_host(self, host): - """Basic CG include test filtering by host and with full rename.""" - cg = self._create_cgs_to_test_include_in()[0] - - cluster_name = 'my_cluster' - result = db.consistencygroup_include_in_cluster(self.ctxt, - cluster_name, - partial_rename=False, - host=host) - self.assertEqual(1, result) - db_cg = db.consistencygroup_get(self.ctxt, cg.id) - self.assertEqual(cluster_name, db_cg.cluster_name) - - def test_consistencygroup_include_in_cluster_by_host_multiple(self): - """Partial cluster rename filtering with host level info.""" - cgs = self._create_cgs_to_test_include_in()[0:2] - - host = 'host1' - cluster_name = 'my_cluster' - result = db.consistencygroup_include_in_cluster(self.ctxt, - cluster_name, - partial_rename=True, - host=host) - self.assertEqual(2, result) - db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), - db.consistencygroup_get(self.ctxt, cgs[1].id)] - for i in range(2): - self.assertEqual(cluster_name + cgs[i].host[len(host):], - db_cgs[i].cluster_name) - - @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') - def test_consistencygroup_include_in_cluster_by_cluster_name(self, - cluster_name): - """Basic CG include test filtering by cluster with full rename.""" - cg = self._create_cgs_to_test_include_in()[0] - - new_cluster_name = 'cluster_new@backend1#pool' - result = db.consistencygroup_include_in_cluster( - self.ctxt, new_cluster_name, partial_rename=False, - cluster_name=cluster_name) - - self.assertEqual(1, result) - db_cg = db.consistencygroup_get(self.ctxt, cg.id) - self.assertEqual(new_cluster_name, db_cg.cluster_name) - - def test_consistencygroup_include_in_cluster_by_cluster_multiple(self): - """Partial rename filtering with cluster with host level info.""" - cgs = self._create_cgs_to_test_include_in()[0:2] - - cluster_name = 'cluster1' - new_cluster_name = 'my_cluster' - result = db.consistencygroup_include_in_cluster( - self.ctxt, new_cluster_name, partial_rename=True, - cluster_name=cluster_name) - - self.assertEqual(2, result) - db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), - db.consistencygroup_get(self.ctxt, cgs[1].id)] - for i in range(2): - self.assertEqual( - new_cluster_name + cgs[i].cluster_name[len(cluster_name):], - db_cgs[i].cluster_name) - - -class DBAPICgsnapshotTestCase(BaseTest): - """Tests for cinder.db.api.cgsnapshot_*.""" - - def _cgsnapshot_create(self, values): - return utils.create_cgsnapshot(self.ctxt, return_vo=False, **values) - - def test_cgsnapshot_get_all_by_filter(self): - cgsnapshot1 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) - cgsnapshot2 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT2_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) - cgsnapshot3 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT3_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}) - tests = [ - ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, - [cgsnapshot1, cgsnapshot2]), - ({'id': fake.CGSNAPSHOT3_ID}, [cgsnapshot3]), - ({'fake_key': 'fake'}, []) - ] - - # no filter - filters = None - cgsnapshots = db.cgsnapshot_get_all(self.ctxt, filters=filters) - self.assertEqual(3, len(cgsnapshots)) - - for filters, expected in tests: - self._assertEqualListsOfObjects(expected, - db.cgsnapshot_get_all( - self.ctxt, - filters)) - - def test_cgsnapshot_get_all_by_group(self): - cgsnapshot1 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) - cgsnapshot2 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT2_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) - self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT3_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}) - tests = [ - ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, - [cgsnapshot1, cgsnapshot2]), - ({'id': fake.CGSNAPSHOT3_ID}, []), - ({'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}, []), - (None, [cgsnapshot1, cgsnapshot2]), - ] - - for filters, expected in tests: - self._assertEqualListsOfObjects(expected, - db.cgsnapshot_get_all_by_group( - self.ctxt, - fake.CONSISTENCY_GROUP_ID, - filters)) - - db.cgsnapshot_destroy(self.ctxt, '1') - db.cgsnapshot_destroy(self.ctxt, '2') - db.cgsnapshot_destroy(self.ctxt, '3') - - def test_cgsnapshot_get_all_by_project(self): - cgsnapshot1 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, - 'project_id': fake.PROJECT_ID}) - cgsnapshot2 = self._cgsnapshot_create( - {'id': fake.CGSNAPSHOT2_ID, - 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, - 'project_id': fake.PROJECT_ID}) - tests = [ - ({'id': fake.CGSNAPSHOT_ID}, [cgsnapshot1]), - ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, - [cgsnapshot1, cgsnapshot2]), - ({'fake_key': 'fake'}, []) - ] - - for filters, expected in tests: - self._assertEqualListsOfObjects(expected, - db.cgsnapshot_get_all_by_project( - self.ctxt, - fake.PROJECT_ID, - filters)) - - -class DBAPIVolumeTypeTestCase(BaseTest): - - """Tests for the db.api.volume_type_* methods.""" - - def setUp(self): - self.ctxt = context.get_admin_context() - super(DBAPIVolumeTypeTestCase, self).setUp() - - def test_volume_type_create_exists(self): - vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) - self.assertRaises(exception.VolumeTypeExists, - db.volume_type_create, - self.ctxt, - {'name': 'n1'}) - self.assertRaises(exception.VolumeTypeExists, - db.volume_type_create, - self.ctxt, - {'name': 'n2', 'id': vt['id']}) - - def test_volume_type_access_remove(self): - vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) - db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') - vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) - self.assertEqual(1, len(vtas)) - db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') - vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) - self.assertEqual(0, len(vtas)) - - def test_volume_type_access_remove_high_id(self): - vt = db.volume_type_create(self.ctxt, {'name': 'n1'}) - vta = db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') - vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) - self.assertEqual(1, len(vtas)) - - # NOTE(dulek): Bug 1496747 uncovered problems when deleting accesses - # with id column higher than 128. This is regression test for that - # case. - - session = sqlalchemy_api.get_session() - vta.id = 150 - vta.save(session=session) - session.close() - - db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') - vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) - self.assertEqual(0, len(vtas)) - - def test_get_volume_type_extra_specs(self): - # Ensure that volume type extra specs can be accessed after - # the DB session is closed. - vt_extra_specs = {'mock_key': 'mock_value'} - vt = db.volume_type_create(self.ctxt, - {'name': 'n1', - 'extra_specs': vt_extra_specs}) - volume_ref = db.volume_create(self.ctxt, {'volume_type_id': vt.id}) - - session = sqlalchemy_api.get_session() - volume = sqlalchemy_api._volume_get(self.ctxt, volume_ref.id, - session=session) - session.close() - - actual_specs = {} - for spec in volume.volume_type.extra_specs: - actual_specs[spec.key] = spec.value - self.assertEqual(vt_extra_specs, actual_specs) - - -class DBAPIEncryptionTestCase(BaseTest): - - """Tests for the db.api.volume_(type_)?encryption_* methods.""" - - _ignored_keys = [ - 'deleted', - 'deleted_at', - 'created_at', - 'updated_at', - 'encryption_id', - ] - - def setUp(self): - super(DBAPIEncryptionTestCase, self).setUp() - self.created = \ - [db.volume_type_encryption_create(self.ctxt, - values['volume_type_id'], values) - for values in self._get_values()] - - def _get_values(self, one=False, updated=False): - base_values = { - 'cipher': 'fake_cipher', - 'key_size': 256, - 'provider': 'fake_provider', - 'volume_type_id': 'fake_type', - 'control_location': 'front-end', - } - updated_values = { - 'cipher': 'fake_updated_cipher', - 'key_size': 512, - 'provider': 'fake_updated_provider', - 'volume_type_id': 'fake_type', - 'control_location': 'front-end', - } - - if one: - return base_values - - if updated: - values = updated_values - else: - values = base_values - - def compose(val, step): - if isinstance(val, str): - step = str(step) - return val + step - - return [{k: compose(v, i) for k, v in values.items()} - for i in range(1, 4)] - - def test_volume_type_encryption_create(self): - values = self._get_values() - for i, encryption in enumerate(self.created): - self._assertEqualObjects(values[i], encryption, self._ignored_keys) - - def test_volume_type_encryption_update(self): - for values in self._get_values(updated=True): - db.volume_type_encryption_update(self.ctxt, - values['volume_type_id'], values) - db_enc = db.volume_type_encryption_get(self.ctxt, - values['volume_type_id']) - self._assertEqualObjects(values, db_enc, self._ignored_keys) - - def test_volume_type_encryption_get(self): - for encryption in self.created: - encryption_get = \ - db.volume_type_encryption_get(self.ctxt, - encryption['volume_type_id']) - self._assertEqualObjects(encryption, encryption_get, - self._ignored_keys) - - def test_volume_type_encryption_update_with_no_create(self): - self.assertRaises(exception.VolumeTypeEncryptionNotFound, - db.volume_type_encryption_update, - self.ctxt, - 'fake_no_create_type', - {'cipher': 'fake_updated_cipher'}) - - def test_volume_type_encryption_delete(self): - values = { - 'cipher': 'fake_cipher', - 'key_size': 256, - 'provider': 'fake_provider', - 'volume_type_id': 'fake_type', - 'control_location': 'front-end', - } - - encryption = db.volume_type_encryption_create(self.ctxt, 'fake_type', - values) - self._assertEqualObjects(values, encryption, self._ignored_keys) - - db.volume_type_encryption_delete(self.ctxt, - encryption['volume_type_id']) - encryption_get = \ - db.volume_type_encryption_get(self.ctxt, - encryption['volume_type_id']) - self.assertIsNone(encryption_get) - - def test_volume_type_encryption_delete_no_create(self): - self.assertRaises(exception.VolumeTypeEncryptionNotFound, - db.volume_type_encryption_delete, - self.ctxt, - 'fake_no_create_type') - - def test_volume_encryption_get(self): - # normal volume -- metadata should be None - volume = db.volume_create(self.ctxt, {}) - values = db.volume_encryption_metadata_get(self.ctxt, volume.id) - - self.assertEqual({'encryption_key_id': None}, values) - - # encrypted volume -- metadata should match volume type - volume_type = self.created[0] - - volume = db.volume_create(self.ctxt, {'volume_type_id': - volume_type['volume_type_id']}) - values = db.volume_encryption_metadata_get(self.ctxt, volume.id) - - expected = { - 'encryption_key_id': volume.encryption_key_id, - 'control_location': volume_type['control_location'], - 'cipher': volume_type['cipher'], - 'key_size': volume_type['key_size'], - 'provider': volume_type['provider'], - } - self.assertEqual(expected, values) - - -class DBAPIReservationTestCase(BaseTest): - - """Tests for db.api.reservation_* methods.""" - - def setUp(self): - super(DBAPIReservationTestCase, self).setUp() - self.values = { - 'uuid': 'sample-uuid', - 'project_id': 'project1', - 'resource': 'resource', - 'delta': 42, - 'expire': (datetime.datetime.utcnow() + - datetime.timedelta(days=1)), - 'usage': {'id': 1} - } - - def test_reservation_commit(self): - reservations = _quota_reserve(self.ctxt, 'project1') - expected = {'project_id': 'project1', - 'volumes': {'reserved': 1, 'in_use': 0}, - 'gigabytes': {'reserved': 2, 'in_use': 0}, - } - self.assertEqual(expected, - db.quota_usage_get_all_by_project( - self.ctxt, 'project1')) - db.reservation_commit(self.ctxt, reservations, 'project1') - expected = {'project_id': 'project1', - 'volumes': {'reserved': 0, 'in_use': 1}, - 'gigabytes': {'reserved': 0, 'in_use': 2}, - } - self.assertEqual(expected, - db.quota_usage_get_all_by_project( - self.ctxt, - 'project1')) - - def test_reservation_rollback(self): - reservations = _quota_reserve(self.ctxt, 'project1') - expected = {'project_id': 'project1', - 'volumes': {'reserved': 1, 'in_use': 0}, - 'gigabytes': {'reserved': 2, 'in_use': 0}, - } - self.assertEqual(expected, - db.quota_usage_get_all_by_project( - self.ctxt, - 'project1')) - db.reservation_rollback(self.ctxt, reservations, 'project1') - expected = {'project_id': 'project1', - 'volumes': {'reserved': 0, 'in_use': 0}, - 'gigabytes': {'reserved': 0, 'in_use': 0}, - } - self.assertEqual(expected, - db.quota_usage_get_all_by_project( - self.ctxt, - 'project1')) - - def test_reservation_expire(self): - self.values['expire'] = datetime.datetime.utcnow() + \ - datetime.timedelta(days=1) - _quota_reserve(self.ctxt, 'project1') - db.reservation_expire(self.ctxt) - - expected = {'project_id': 'project1', - 'gigabytes': {'reserved': 0, 'in_use': 0}, - 'volumes': {'reserved': 0, 'in_use': 0}} - self.assertEqual(expected, - db.quota_usage_get_all_by_project( - self.ctxt, - 'project1')) - - -class DBAPIMessageTestCase(BaseTest): - - """Tests for message operations""" - def setUp(self): - super(DBAPIMessageTestCase, self).setUp() - self.context = context.get_admin_context() - - def _create_fake_messages(self, m_id, time): - db.message_create(self.context, - {'id': m_id, - 'event_id': m_id, - 'message_level': 'error', - 'project_id': 'fake_id', - 'expires_at': time}) - - def test_cleanup_expired_messages(self): - now = timeutils.utcnow() - # message expired 1 day ago - self._create_fake_messages( - uuidutils.generate_uuid(), now - datetime.timedelta(days=1)) - # message expired now - self._create_fake_messages( - uuidutils.generate_uuid(), now) - # message expired 1 day after - self._create_fake_messages( - uuidutils.generate_uuid(), now + datetime.timedelta(days=1)) - - with mock.patch.object(timeutils, 'utcnow') as mock_time_now: - mock_time_now.return_value = now - db.cleanup_expired_messages(self.context) - messages = db.message_get_all(self.context) - self.assertEqual(2, len(messages)) - - -class DBAPIQuotaClassTestCase(BaseTest): - - """Tests for db.api.quota_class_* methods.""" - - def setUp(self): - super(DBAPIQuotaClassTestCase, self).setUp() - self.sample_qc = db.quota_class_create(self.ctxt, 'test_qc', - 'test_resource', 42) - - def test_quota_class_get(self): - qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') - self._assertEqualObjects(self.sample_qc, qc) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) - def test_quota_class_destroy(self, utcnow_mock): - self.assertDictEqual( - {'deleted': True, 'deleted_at': UTC_NOW}, - db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource')) - self.assertRaises(exception.QuotaClassNotFound, - db.quota_class_get, self.ctxt, - 'test_qc', 'test_resource') - - def test_quota_class_get_not_found(self): - self.assertRaises(exception.QuotaClassNotFound, - db.quota_class_get, self.ctxt, 'nonexistent', - 'nonexistent') - - def test_quota_class_get_all_by_name(self): - db.quota_class_create(self.ctxt, 'test2', 'res1', 43) - db.quota_class_create(self.ctxt, 'test2', 'res2', 44) - self.assertEqual({'class_name': 'test_qc', 'test_resource': 42}, - db.quota_class_get_all_by_name(self.ctxt, 'test_qc')) - self.assertEqual({'class_name': 'test2', 'res1': 43, 'res2': 44}, - db.quota_class_get_all_by_name(self.ctxt, 'test2')) - - def test_quota_class_update(self): - db.quota_class_update(self.ctxt, 'test_qc', 'test_resource', 43) - updated = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') - self.assertEqual(43, updated['hard_limit']) - - def test_quota_class_update_resource(self): - old = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') - db.quota_class_update_resource(self.ctxt, - 'test_resource', - 'test_resource1') - new = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource1') - self.assertEqual(old.id, new.id) - self.assertEqual('test_resource1', new.resource) - - def test_quota_class_destroy_all_by_name(self): - db.quota_class_create(self.ctxt, 'test2', 'res1', 43) - db.quota_class_create(self.ctxt, 'test2', 'res2', 44) - db.quota_class_destroy_all_by_name(self.ctxt, 'test2') - self.assertEqual({'class_name': 'test2'}, - db.quota_class_get_all_by_name(self.ctxt, 'test2')) - - -class DBAPIQuotaTestCase(BaseTest): - - """Tests for db.api.reservation_* methods.""" - - def test_quota_create(self): - quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) - self.assertEqual('resource', quota.resource) - self.assertEqual(99, quota.hard_limit) - self.assertEqual('project1', quota.project_id) - - def test_quota_get(self): - quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) - quota_db = db.quota_get(self.ctxt, 'project1', 'resource') - self._assertEqualObjects(quota, quota_db) - - def test_quota_get_all_by_project(self): - for i in range(3): - for j in range(3): - db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j) - for i in range(3): - quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i) - self.assertEqual({'project_id': 'proj%d' % i, - 'res0': 0, - 'res1': 1, - 'res2': 2}, quotas_db) - - def test_quota_update(self): - db.quota_create(self.ctxt, 'project1', 'resource1', 41) - db.quota_update(self.ctxt, 'project1', 'resource1', 42) - quota = db.quota_get(self.ctxt, 'project1', 'resource1') - self.assertEqual(42, quota.hard_limit) - self.assertEqual('resource1', quota.resource) - self.assertEqual('project1', quota.project_id) - - def test_quota_update_resource(self): - old = db.quota_create(self.ctxt, 'project1', 'resource1', 41) - db.quota_update_resource(self.ctxt, 'resource1', 'resource2') - new = db.quota_get(self.ctxt, 'project1', 'resource2') - self.assertEqual(old.id, new.id) - self.assertEqual('resource2', new.resource) - - def test_quota_update_nonexistent(self): - self.assertRaises(exception.ProjectQuotaNotFound, - db.quota_update, - self.ctxt, - 'project1', - 'resource1', - 42) - - def test_quota_get_nonexistent(self): - self.assertRaises(exception.ProjectQuotaNotFound, - db.quota_get, - self.ctxt, - 'project1', - 'resource1') - - def test_quota_reserve(self): - reservations = _quota_reserve(self.ctxt, 'project1') - self.assertEqual(2, len(reservations)) - quota_usage = db.quota_usage_get_all_by_project(self.ctxt, 'project1') - self.assertEqual({'project_id': 'project1', - 'gigabytes': {'reserved': 2, 'in_use': 0}, - 'volumes': {'reserved': 1, 'in_use': 0}}, - quota_usage) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) - def test_quota_destroy(self, utcnow_mock): - db.quota_create(self.ctxt, 'project1', 'resource1', 41) - self.assertDictEqual( - {'deleted': True, 'deleted_at': UTC_NOW}, - db.quota_destroy(self.ctxt, 'project1', 'resource1')) - self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, - self.ctxt, 'project1', 'resource1') - - def test_quota_destroy_by_project(self): - # Create limits, reservations and usage for project - project = 'project1' - _quota_reserve(self.ctxt, project) - expected_usage = {'project_id': project, - 'volumes': {'reserved': 1, 'in_use': 0}, - 'gigabytes': {'reserved': 2, 'in_use': 0}} - expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} - - # Check that quotas are there - self.assertEqual(expected, - db.quota_get_all_by_project(self.ctxt, project)) - self.assertEqual(expected_usage, - db.quota_usage_get_all_by_project(self.ctxt, project)) - - # Destroy only the limits - db.quota_destroy_by_project(self.ctxt, project) - - # Confirm that limits have been removed - self.assertEqual({'project_id': project}, - db.quota_get_all_by_project(self.ctxt, project)) - - # But that usage and reservations are the same - self.assertEqual(expected_usage, - db.quota_usage_get_all_by_project(self.ctxt, project)) - - def test_quota_destroy_sqlalchemy_all_by_project_(self): - # Create limits, reservations and usage for project - project = 'project1' - _quota_reserve(self.ctxt, project) - expected_usage = {'project_id': project, - 'volumes': {'reserved': 1, 'in_use': 0}, - 'gigabytes': {'reserved': 2, 'in_use': 0}} - expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} - expected_result = {'project_id': project} - - # Check that quotas are there - self.assertEqual(expected, - db.quota_get_all_by_project(self.ctxt, project)) - self.assertEqual(expected_usage, - db.quota_usage_get_all_by_project(self.ctxt, project)) - - # Destroy all quotas using SQLAlchemy Implementation - sqlalchemy_api.quota_destroy_all_by_project(self.ctxt, project, - only_quotas=False) - - # Check that all quotas have been deleted - self.assertEqual(expected_result, - db.quota_get_all_by_project(self.ctxt, project)) - self.assertEqual(expected_result, - db.quota_usage_get_all_by_project(self.ctxt, project)) - - def test_quota_usage_get_nonexistent(self): - self.assertRaises(exception.QuotaUsageNotFound, - db.quota_usage_get, - self.ctxt, - 'p1', - 'nonexitent_resource') - - def test_quota_usage_get(self): - _quota_reserve(self.ctxt, 'p1') - quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes') - expected = {'resource': 'gigabytes', 'project_id': 'p1', - 'in_use': 0, 'reserved': 2, 'total': 2} - for key, value in expected.items(): - self.assertEqual(value, quota_usage[key], key) - - def test_quota_usage_get_all_by_project(self): - _quota_reserve(self.ctxt, 'p1') - expected = {'project_id': 'p1', - 'volumes': {'in_use': 0, 'reserved': 1}, - 'gigabytes': {'in_use': 0, 'reserved': 2}} - self.assertEqual(expected, db.quota_usage_get_all_by_project( - self.ctxt, 'p1')) - - -class DBAPIBackupTestCase(BaseTest): - - """Tests for db.api.backup_* methods.""" - - _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', - 'updated_at', 'data_timestamp'] - - def setUp(self): - super(DBAPIBackupTestCase, self).setUp() - self.created = [db.backup_create(self.ctxt, values) - for values in self._get_values()] - - def _get_values(self, one=False): - base_values = { - 'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'volume_id': 'volume', - 'host': 'host', - 'availability_zone': 'zone', - 'display_name': 'display', - 'display_description': 'description', - 'container': 'container', - 'status': 'status', - 'fail_reason': 'test', - 'service_metadata': 'metadata', - 'service': 'service', - 'parent_id': "parent_id", - 'size': 1000, - 'object_count': 100, - 'temp_volume_id': 'temp_volume_id', - 'temp_snapshot_id': 'temp_snapshot_id', - 'num_dependent_backups': 0, - 'snapshot_id': 'snapshot_id', - 'restore_volume_id': 'restore_volume_id'} - if one: - return base_values - - def compose(val, step): - if isinstance(val, bool): - return val - if isinstance(val, str): - step = str(step) - return val + step - - return [{k: compose(v, i) for k, v in base_values.items()} - for i in range(1, 4)] - - def test_backup_create(self): - values = self._get_values() - for i, backup in enumerate(self.created): - self.assertEqual(36, len(backup['id'])) # dynamic UUID - self._assertEqualObjects(values[i], backup, self._ignored_keys) - - def test_backup_get(self): - for backup in self.created: - backup_get = db.backup_get(self.ctxt, backup['id']) - self._assertEqualObjects(backup, backup_get) - - def test_backup_get_deleted(self): - backup_dic = {'user_id': fake.USER_ID, - 'project_id': fake.PROJECT_ID, - 'volume_id': fake.VOLUME_ID, - 'size': 1, - 'object_count': 1} - backup = objects.Backup(self.ctxt, **backup_dic) - backup.create() - backup.destroy() - backup_get = db.backup_get(self.ctxt, backup.id, read_deleted='yes') - self.assertEqual(backup.id, backup_get.id) - - def tests_backup_get_all(self): - all_backups = db.backup_get_all(self.ctxt) - self._assertEqualListsOfObjects(self.created, all_backups) - - def tests_backup_get_all_by_filter(self): - filters = {'status': self.created[1]['status']} - filtered_backups = db.backup_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects([self.created[1]], filtered_backups) - - filters = {'display_name': self.created[1]['display_name']} - filtered_backups = db.backup_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects([self.created[1]], filtered_backups) - - filters = {'volume_id': self.created[1]['volume_id']} - filtered_backups = db.backup_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects([self.created[1]], filtered_backups) - - filters = {'fake_key': 'fake'} - filtered_backups = db.backup_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects([], filtered_backups) - - def test_backup_get_all_by_host(self): - byhost = db.backup_get_all_by_host(self.ctxt, - self.created[1]['host']) - self._assertEqualObjects(self.created[1], byhost[0]) - - def test_backup_get_all_by_project(self): - byproj = db.backup_get_all_by_project(self.ctxt, - self.created[1]['project_id']) - self._assertEqualObjects(self.created[1], byproj[0]) - - byproj = db.backup_get_all_by_project(self.ctxt, - self.created[1]['project_id'], - {'fake_key': 'fake'}) - self._assertEqualListsOfObjects([], byproj) - - def test_backup_get_all_by_volume(self): - byvol = db.backup_get_all_by_volume(self.ctxt, - self.created[1]['volume_id']) - self._assertEqualObjects(self.created[1], byvol[0]) - - byvol = db.backup_get_all_by_volume(self.ctxt, - self.created[1]['volume_id'], - {'fake_key': 'fake'}) - self._assertEqualListsOfObjects([], byvol) - - def test_backup_update_nonexistent(self): - self.assertRaises(exception.BackupNotFound, - db.backup_update, - self.ctxt, 'nonexistent', {}) - - def test_backup_update(self): - updated_values = self._get_values(one=True) - update_id = self.created[1]['id'] - db.backup_update(self.ctxt, update_id, updated_values) - updated_backup = db.backup_get(self.ctxt, update_id) - self._assertEqualObjects(updated_values, updated_backup, - self._ignored_keys) - - def test_backup_update_with_fail_reason_truncation(self): - updated_values = self._get_values(one=True) - fail_reason = '0' * 512 - updated_values['fail_reason'] = fail_reason - - update_id = self.created[1]['id'] - db.backup_update(self.ctxt, update_id, updated_values) - updated_backup = db.backup_get(self.ctxt, update_id) - updated_values['fail_reason'] = fail_reason[:255] - self._assertEqualObjects(updated_values, updated_backup, - self._ignored_keys) - - @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) - def test_backup_destroy(self, utcnow_mock): - for backup in self.created: - self.assertDictEqual( - {'status': fields.BackupStatus.DELETED, 'deleted': True, - 'deleted_at': UTC_NOW}, - db.backup_destroy(self.ctxt, backup['id'])) - self.assertFalse(db.backup_get_all(self.ctxt)) - - def test_backup_not_found(self): - self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, - 'notinbase') - - -class DBAPIProcessSortParamTestCase(test.TestCase): - - def test_process_sort_params_defaults(self): - """Verifies default sort parameters.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], []) - self.assertEqual(['created_at', 'id'], sort_keys) - self.assertEqual(['asc', 'asc'], sort_dirs) - - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None) - self.assertEqual(['created_at', 'id'], sort_keys) - self.assertEqual(['asc', 'asc'], sort_dirs) - - def test_process_sort_params_override_default_keys(self): - """Verifies that the default keys can be overridden.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - [], [], default_keys=['key1', 'key2', 'key3']) - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) - - def test_process_sort_params_override_default_dir(self): - """Verifies that the default direction can be overridden.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - [], [], default_dir='dir1') - self.assertEqual(['created_at', 'id'], sort_keys) - self.assertEqual(['dir1', 'dir1'], sort_dirs) - - def test_process_sort_params_override_default_key_and_dir(self): - """Verifies that the default key and dir can be overridden.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - [], [], default_keys=['key1', 'key2', 'key3'], - default_dir='dir1') - self.assertEqual(['key1', 'key2', 'key3'], sort_keys) - self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs) - - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - [], [], default_keys=[], default_dir='dir1') - self.assertEqual([], sort_keys) - self.assertEqual([], sort_dirs) - - def test_process_sort_params_non_default(self): - """Verifies that non-default keys are added correctly.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['key1', 'key2'], ['asc', 'desc']) - self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys) - # First sort_dir in list is used when adding the default keys - self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs) - - def test_process_sort_params_default(self): - """Verifies that default keys are added correctly.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2'], ['asc', 'desc']) - self.assertEqual(['id', 'key2', 'created_at'], sort_keys) - self.assertEqual(['asc', 'desc', 'asc'], sort_dirs) - - # Include default key value, rely on default direction - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2'], []) - self.assertEqual(['id', 'key2', 'created_at'], sort_keys) - self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) - - def test_process_sort_params_default_dir(self): - """Verifies that the default dir is applied to all keys.""" - # Direction is set, ignore default dir - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2'], ['desc'], default_dir='dir') - self.assertEqual(['id', 'key2', 'created_at'], sort_keys) - self.assertEqual(['desc', 'desc', 'desc'], sort_dirs) - - # But should be used if no direction is set - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2'], [], default_dir='dir') - self.assertEqual(['id', 'key2', 'created_at'], sort_keys) - self.assertEqual(['dir', 'dir', 'dir'], sort_dirs) - - def test_process_sort_params_unequal_length(self): - """Verifies that a sort direction list is applied correctly.""" - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2', 'key3'], ['desc']) - self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) - self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs) - - # Default direction is the first key in the list - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2', 'key3'], ['desc', 'asc']) - self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) - self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs) - - sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( - ['id', 'key2', 'key3'], ['desc', 'asc', 'asc']) - self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) - self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs) - - def test_process_sort_params_extra_dirs_lengths(self): - """InvalidInput raised if more directions are given.""" - self.assertRaises(exception.InvalidInput, - sqlalchemy_api.process_sort_params, - ['key1', 'key2'], - ['asc', 'desc', 'desc']) - - def test_process_sort_params_invalid_sort_dir(self): - """InvalidInput raised if invalid directions are given.""" - for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]: - self.assertRaises(exception.InvalidInput, - sqlalchemy_api.process_sort_params, - ['key'], - dirs) - - -class DBAPIDriverInitiatorDataTestCase(BaseTest): - initiator = 'iqn.1993-08.org.debian:01:222' - namespace = 'test_ns' - - def _test_insert(self, key, value, expected_result=True): - result = db.driver_initiator_data_insert_by_key( - self.ctxt, self.initiator, self.namespace, key, value) - self.assertEqual(expected_result, result) - - data = db.driver_initiator_data_get(self.ctxt, self.initiator, - self.namespace) - self.assertEqual(data[0].key, key) - self.assertEqual(data[0].value, value) - - def test_insert(self): - self._test_insert('key1', 'foo') - - def test_insert_already_exists(self): - self._test_insert('key2', 'bar') - self._test_insert('key2', 'bar', expected_result=False) - - -@ddt.ddt -class DBAPIImageVolumeCacheEntryTestCase(BaseTest): - - def _validate_entry(self, entry, host, cluster_name, image_id, - image_updated_at, volume_id, size): - self.assertIsNotNone(entry) - self.assertIsNotNone(entry['id']) - self.assertEqual(host, entry['host']) - self.assertEqual(cluster_name, entry['cluster_name']) - self.assertEqual(image_id, entry['image_id']) - self.assertEqual(image_updated_at, entry['image_updated_at']) - self.assertEqual(volume_id, entry['volume_id']) - self.assertEqual(size, entry['size']) - self.assertIsNotNone(entry['last_used']) - - def test_create_delete_query_cache_entry(self): - host = 'abc@123#poolz' - cluster_name = 'def@123#poolz' - image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' - image_updated_at = datetime.datetime.utcnow() - volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' - size = 6 - - entry = db.image_volume_cache_create(self.ctxt, host, cluster_name, - image_id, image_updated_at, - volume_id, size) - self._validate_entry(entry, host, cluster_name, image_id, - image_updated_at, volume_id, size) - - entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, - image_id, - host=host) - self._validate_entry(entry, host, cluster_name, image_id, - image_updated_at, volume_id, size) - - entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) - self._validate_entry(entry, host, cluster_name, image_id, - image_updated_at, volume_id, size) - - db.image_volume_cache_delete(self.ctxt, entry['volume_id']) - - entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, - image_id, - host=host) - self.assertIsNone(entry) - - def test_cache_entry_get_multiple(self): - host = 'abc@123#poolz' - cluster_name = 'def@123#poolz' - image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' - image_updated_at = datetime.datetime.utcnow() - volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' - size = 6 - - entries = [] - for i in range(0, 3): - entries.append(db.image_volume_cache_create(self.ctxt, - host, - cluster_name, - image_id, - image_updated_at, - volume_id, - size)) - # It is considered OK for the cache to have multiple of the same - # entries. Expect only a single one from the query. - entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, - image_id, - host=host) - self._validate_entry(entry, host, cluster_name, image_id, - image_updated_at, volume_id, size) - - # We expect to get the same one on subsequent queries due to the - # last_used field being updated each time and ordering by it. - entry_id = entry['id'] - entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, - image_id, - host=host) - self._validate_entry(entry, host, cluster_name, image_id, - image_updated_at, volume_id, size) - self.assertEqual(entry_id, entry['id']) - - # Cleanup - for entry in entries: - db.image_volume_cache_delete(self.ctxt, entry['volume_id']) - - def test_cache_entry_get_none(self): - host = 'abc@123#poolz' - image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' - entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, - image_id, - host=host) - self.assertIsNone(entry) - - def test_cache_entry_get_by_volume_id_none(self): - volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' - entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) - self.assertIsNone(entry) - - def test_cache_entry_get_all_for_host(self): - host = 'abc@123#poolz' - image_updated_at = datetime.datetime.utcnow() - size = 6 - - entries = [] - for i in range(0, 3): - entries.append(db.image_volume_cache_create(self.ctxt, - host, - 'cluster-%s' % i, - 'image-' + str(i), - image_updated_at, - 'vol-' + str(i), - size)) - - other_entry = db.image_volume_cache_create(self.ctxt, - 'someOtherHost', - 'someOtherCluster', - 'image-12345', - image_updated_at, - 'vol-1234', - size) - - found_entries = db.image_volume_cache_get_all(self.ctxt, host=host) - self.assertIsNotNone(found_entries) - self.assertEqual(len(entries), len(found_entries)) - for found_entry in found_entries: - for entry in entries: - if found_entry['id'] == entry['id']: - self._validate_entry(found_entry, - entry['host'], - entry['cluster_name'], - entry['image_id'], - entry['image_updated_at'], - entry['volume_id'], - entry['size']) - - # Cleanup - db.image_volume_cache_delete(self.ctxt, other_entry['volume_id']) - for entry in entries: - db.image_volume_cache_delete(self.ctxt, entry['volume_id']) - - def test_cache_entry_get_all_for_host_none(self): - host = 'abc@123#poolz' - entries = db.image_volume_cache_get_all(self.ctxt, host=host) - self.assertEqual([], entries) - - @ddt.data('host1@backend1#pool1', 'host1@backend1') - def test_cache_entry_include_in_cluster_by_host(self, host): - """Basic cache include test filtering by host and with full rename.""" - image_updated_at = datetime.datetime.utcnow() - image_cache = ( - db.image_volume_cache_create( - self.ctxt, 'host1@backend1#pool1', 'cluster1@backend1#pool1', - 'image-1', image_updated_at, 'vol-1', 6), - db.image_volume_cache_create( - self.ctxt, 'host1@backend2#pool2', 'cluster1@backend2#pool2', - 'image-2', image_updated_at, 'vol-2', 6), - db.image_volume_cache_create( - self.ctxt, 'host2@backend#pool', 'cluster2@backend#pool', - 'image-3', image_updated_at, 'vol-3', 6), - - ) - - cluster_name = 'my_cluster' - result = db.image_volume_cache_include_in_cluster(self.ctxt, - cluster_name, - partial_rename=False, - host=host) - self.assertEqual(1, result) - db_image_cache = db.image_volume_cache_get_by_volume_id( - self.ctxt, image_cache[0].volume_id) - self.assertEqual(cluster_name, db_image_cache.cluster_name) - - -class DBAPIGenericTestCase(BaseTest): - def test_resource_exists_volume(self): - # NOTE(geguileo): We create 2 volumes in this test (even if the second - # one is not being used) to confirm that the DB exists subquery is - # properly formulated and doesn't result in multiple rows, as such - # case would raise an exception when converting the result to an - # scalar. This would happen if for example the query wasn't generated - # directly using get_session but using model_query like this: - # query = model_query(context, model, - # sql.exists().where(and_(*conditions))) - # Instead of what we do: - # query = get_session().query(sql.exists().where(and_(*conditions))) - db.volume_create(self.ctxt, {'id': fake.VOLUME_ID}) - db.volume_create(self.ctxt, {'id': fake.VOLUME2_ID}) - model = db.get_model_for_versioned_object(objects.Volume) - res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.VOLUME_ID) - self.assertTrue(res, msg="Couldn't find existing Volume") - - def test_resource_exists_volume_fails(self): - db.volume_create(self.ctxt, {'id': fake.VOLUME_ID}) - model = db.get_model_for_versioned_object(objects.Volume) - res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.VOLUME2_ID) - self.assertFalse(res, msg='Found nonexistent Volume') - - def test_resource_exists_snapshot(self): - # Read NOTE in test_resource_exists_volume on why we create 2 snapshots - vol = db.volume_create(self.ctxt, {'id': fake.VOLUME_ID}) - db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT_ID, - 'volume_id': vol.id}) - db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT2_ID, - 'volume_id': vol.id}) - model = db.get_model_for_versioned_object(objects.Snapshot) - res = sqlalchemy_api.resource_exists(self.ctxt, model, - fake.SNAPSHOT_ID) - self.assertTrue(res, msg="Couldn't find existing Snapshot") - - def test_resource_exists_snapshot_fails(self): - vol = db.volume_create(self.ctxt, {'id': fake.VOLUME_ID}) - db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT_ID, - 'volume_id': vol.id}) - model = db.get_model_for_versioned_object(objects.Snapshot) - res = sqlalchemy_api.resource_exists(self.ctxt, model, - fake.SNAPSHOT2_ID) - self.assertFalse(res, msg='Found nonexistent Snapshot') - - def test_resource_exists_volume_project_separation(self): - user_context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=False) - user2_context = context.RequestContext(fake.USER2_ID, fake.PROJECT2_ID, - is_admin=False) - volume = db.volume_create(user_context, - {'project_id': fake.PROJECT_ID}) - model = db.get_model_for_versioned_object(objects.Volume) - - # Owner can find it - res = sqlalchemy_api.resource_exists(user_context, model, volume.id) - self.assertTrue(res, msg='Owner cannot find its own Volume') - - # Non admin user that is not the owner cannot find it - res = sqlalchemy_api.resource_exists(user2_context, model, volume.id) - self.assertFalse(res, msg="Non admin user can find somebody else's " - "volume") - - # Admin can find it - res = sqlalchemy_api.resource_exists(self.ctxt, model, volume.id) - self.assertTrue(res, msg="Admin cannot find the volume") - - def test_resource_exists_snapshot_project_separation(self): - user_context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - is_admin=False) - user2_context = context.RequestContext(fake.USER2_ID, fake.PROJECT2_ID, - is_admin=False) - vol = db.volume_create(user_context, {'project_id': fake.PROJECT_ID}) - snap = db.snapshot_create(self.ctxt, {'project_id': fake.PROJECT_ID, - 'volume_id': vol.id}) - model = db.get_model_for_versioned_object(objects.Snapshot) - - # Owner can find it - res = sqlalchemy_api.resource_exists(user_context, model, snap.id) - self.assertTrue(res, msg='Owner cannot find its own Snapshot') - - # Non admin user that is not the owner cannot find it - res = sqlalchemy_api.resource_exists(user2_context, model, snap.id) - self.assertFalse(res, msg="Non admin user can find somebody else's " - "Snapshot") - - # Admin can find it - res = sqlalchemy_api.resource_exists(self.ctxt, model, snap.id) - self.assertTrue(res, msg="Admin cannot find the Snapshot") - - -@ddt.ddt -class DBAPIBackendTestCase(BaseTest): - @ddt.data((True, True), (True, False), (False, True), (False, False)) - @ddt.unpack - def test_is_backend_frozen_service(self, frozen, pool): - service = utils.create_service(self.ctxt, {'frozen': frozen}) - utils.create_service(self.ctxt, {'host': service.host + '2', - 'frozen': not frozen}) - host = service.host - if pool: - host += '#poolname' - self.assertEqual(frozen, db.is_backend_frozen(self.ctxt, host, - service.cluster_name)) - - @ddt.data((True, True), (True, False), (False, True), (False, False)) - @ddt.unpack - def test_is_backend_frozen_cluster(self, frozen, pool): - cluster = utils.create_cluster(self.ctxt, frozen=frozen) - utils.create_service(self.ctxt, {'frozen': frozen, 'host': 'hostA', - 'cluster_name': cluster.name}) - service = utils.create_service(self.ctxt, - {'frozen': not frozen, - 'host': 'hostB', - 'cluster_name': cluster.name}) - utils.create_populated_cluster(self.ctxt, 3, 0, frozen=not frozen, - name=cluster.name + '2') - host = service.host - cluster = service.cluster_name - if pool: - host += '#poolname' - cluster += '#poolname' - self.assertEqual(frozen, - db.is_backend_frozen(self.ctxt, host, cluster)) - - -class DBAPIGroupTestCase(BaseTest): - def test_group_get_all_by_host(self): - grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'}) - groups = [] - backend = 'host1@lvm' - for i in range(3): - groups.append([db.group_create( - self.ctxt, - {'host': '%(b)s%(n)d' % {'b': backend, 'n': i}, - 'group_type_id': grp_type['id']}) - for j in range(3)]) - - for i in range(3): - host = '%(b)s%(n)d' % {'b': backend, 'n': i} - filters = {'host': host, 'backend_match_level': 'backend'} - grps = db.group_get_all( - self.ctxt, filters=filters) - self._assertEqualListsOfObjects(groups[i], grps) - for grp in grps: - db.group_destroy(self.ctxt, grp['id']) - - db.group_type_destroy(self.ctxt, grp_type['id']) - - def test_group_get_all_by_host_with_pools(self): - grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'}) - groups = [] - backend = 'host1@lvm' - pool = '%s#pool1' % backend - grp_on_host_wo_pool = [db.group_create( - self.ctxt, - {'host': backend, - 'group_type_id': grp_type['id']}) - for j in range(3)] - grp_on_host_w_pool = [db.group_create( - self.ctxt, - {'host': pool, - 'group_type_id': grp_type['id']})] - groups.append(grp_on_host_wo_pool + grp_on_host_w_pool) - # insert an additional record that doesn't belongs to the same - # host as 'foo' and test if it is included in the result - grp_foobar = db.group_create(self.ctxt, - {'host': '%sfoo' % backend, - 'group_type_id': grp_type['id']}) - - filters = {'host': backend, 'backend_match_level': 'backend'} - grps = db.group_get_all(self.ctxt, filters=filters) - self._assertEqualListsOfObjects(groups[0], grps) - for grp in grps: - db.group_destroy(self.ctxt, grp['id']) - - db.group_destroy(self.ctxt, grp_foobar['id']) - - db.group_type_destroy(self.ctxt, grp_type['id']) diff --git a/cinder/tests/unit/test_db_worker_api.py b/cinder/tests/unit/test_db_worker_api.py deleted file mode 100644 index 44721c449..000000000 --- a/cinder/tests/unit/test_db_worker_api.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Unit tests for cinder.db.api.Worker""" - -from datetime import datetime -import time -import uuid - -import mock -from oslo_db import exception as db_exception -import six - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake - - -class DBAPIWorkerTestCase(test.TestCase, test.ModelsObjectComparatorMixin): - worker_fields = {'resource_type': 'Volume', - 'resource_id': fake.VOLUME_ID, - 'status': 'creating'} - - def _uuid(self): - return six.text_type(uuid.uuid4()) - - def setUp(self): - super(DBAPIWorkerTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def tearDown(self): - db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION = True - super(DBAPIWorkerTestCase, self).tearDown() - - def test_workers_init(self): - # SQLite supports subsecond resolution so result is True - db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION = None - db.workers_init() - self.assertTrue(db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION) - - def test_workers_init_not_supported(self): - # Fake a Db that doesn't support sub-second resolution in datetimes - db.worker_update( - self.ctxt, None, - {'resource_type': 'SENTINEL', 'ignore_sentinel': False}, - updated_at=datetime.utcnow().replace(microsecond=0)) - db.workers_init() - self.assertFalse(db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION) - - def test_worker_create_and_get(self): - """Test basic creation of a worker record.""" - worker = db.worker_create(self.ctxt, **self.worker_fields) - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(worker, db_worker) - - @mock.patch('oslo_utils.timeutils.utcnow', - return_value=datetime.utcnow().replace(microsecond=123)) - def test_worker_create_no_subsecond(self, mock_utcnow): - """Test basic creation of a worker record.""" - db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION = False - worker = db.worker_create(self.ctxt, **self.worker_fields) - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(worker, db_worker) - self.assertEqual(0, db_worker.updated_at.microsecond) - - def test_worker_create_unique_constrains(self): - """Test when we use an already existing resource type and id.""" - db.worker_create(self.ctxt, **self.worker_fields) - self.assertRaises(exception.WorkerExists, db.worker_create, - self.ctxt, - resource_type=self.worker_fields['resource_type'], - resource_id=self.worker_fields['resource_id'], - status='not_' + self.worker_fields['status']) - - def test_worker_create_missing_required_field(self): - """Try creating a worker with a missing required field.""" - for field in self.worker_fields: - params = self.worker_fields.copy() - del params[field] - self.assertRaises(db_exception.DBError, db.worker_create, - self.ctxt, **params) - - def test_worker_create_invalid_field(self): - """Try creating a worker with a non existent db field.""" - self.assertRaises(TypeError, db.worker_create, self.ctxt, - myfield='123', **self.worker_fields) - - def test_worker_get_non_existent(self): - """Check basic non existent worker record get method.""" - db.worker_create(self.ctxt, **self.worker_fields) - self.assertRaises(exception.WorkerNotFound, db.worker_get, - self.ctxt, service_id='1', **self.worker_fields) - - def _create_workers(self, num, read_back=False, **fields): - workers = [] - base_params = self.worker_fields.copy() - base_params.update(fields) - - for i in range(num): - params = base_params.copy() - params['resource_id'] = self._uuid() - workers.append(db.worker_create(self.ctxt, **params)) - - if read_back: - for i in range(len(workers)): - workers[i] = db.worker_get(self.ctxt, id=workers[i].id) - - return workers - - def test_worker_get_all(self): - """Test basic get_all method.""" - self._create_workers(1) - service = db.service_create(self.ctxt, {}) - workers = self._create_workers(3, service_id=service.id) - - db_workers = db.worker_get_all(self.ctxt, service_id=service.id) - self._assertEqualListsOfObjects(workers, db_workers) - - def test_worker_get_all_until(self): - """Test get_all until a specific time.""" - workers = self._create_workers(3, read_back=True) - timestamp = workers[-1].updated_at - time.sleep(0.1) - self._create_workers(3) - - db_workers = db.worker_get_all(self.ctxt, until=timestamp) - self._assertEqualListsOfObjects(workers, db_workers) - - def test_worker_get_all_returns_empty(self): - """Test that get_all returns an empty list when there's no results.""" - self._create_workers(3, deleted=True) - db_workers = db.worker_get_all(self.ctxt) - self.assertListEqual([], db_workers) - - def test_worker_update_not_exists(self): - """Test worker update when the worker doesn't exist.""" - self.assertRaises(exception.WorkerNotFound, db.worker_update, - self.ctxt, 1) - - def test_worker_update(self): - """Test basic worker update.""" - worker = self._create_workers(1)[0] - worker = db.worker_get(self.ctxt, id=worker.id) - res = db.worker_update(self.ctxt, worker.id, service_id=1) - self.assertEqual(1, res) - worker.service_id = 1 - - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(worker, db_worker, - ['updated_at', 'race_preventer']) - self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) - - def test_worker_update_no_subsecond(self): - """Test basic worker update.""" - db.sqlalchemy.api.DB_SUPPORTS_SUBSECOND_RESOLUTION = False - worker = self._create_workers(1)[0] - worker = db.worker_get(self.ctxt, id=worker.id) - now = datetime.utcnow().replace(microsecond=123) - with mock.patch('oslo_utils.timeutils.utcnow', return_value=now): - res = db.worker_update(self.ctxt, worker.id, service_id=1) - self.assertEqual(1, res) - worker.service_id = 1 - - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(worker, db_worker, - ['updated_at', 'race_preventer']) - self.assertEqual(0, db_worker.updated_at.microsecond) - self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) - - def test_worker_update_update_orm(self): - """Test worker update updating the worker orm object.""" - worker = self._create_workers(1)[0] - res = db.worker_update(self.ctxt, worker.id, orm_worker=worker, - service_id=1) - self.assertEqual(1, res) - - db_worker = db.worker_get(self.ctxt, id=worker.id) - # If we are updating the ORM object we don't ignore the update_at field - # because it will get updated in the ORM instance. - self._assertEqualObjects(worker, db_worker) - - def test_worker_destroy(self): - """Test that worker destroy really deletes the DB entry.""" - worker = self._create_workers(1)[0] - res = db.worker_destroy(self.ctxt, id=worker.id) - self.assertEqual(1, res) - - db_workers = db.worker_get_all(self.ctxt, read_deleted='yes') - self.assertListEqual([], db_workers) - - def test_worker_destroy_non_existent(self): - """Test that worker destroy returns 0 when entry doesn't exist.""" - res = db.worker_destroy(self.ctxt, id=100) - self.assertEqual(0, res) - - def test_worker_claim(self): - """Test worker claim of normal DB entry.""" - service_id = 1 - worker = db.worker_create(self.ctxt, resource_type='Volume', - resource_id=fake.VOLUME_ID, - status='deleting') - - res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) - self.assertEqual(1, res) - - db_worker = db.worker_get(self.ctxt, id=worker.id) - - self._assertEqualObjects(worker, db_worker, ['updated_at']) - self.assertEqual(service_id, db_worker.service_id) - self.assertEqual(worker.service_id, db_worker.service_id) - - def test_worker_claim_fails_status_change(self): - """Test that claim fails if the work entry has changed its status.""" - worker = db.worker_create(self.ctxt, resource_type='Volume', - resource_id=fake.VOLUME_ID, - status='deleting') - worker.status = 'creating' - - res = db.worker_claim_for_cleanup(self.ctxt, 1, worker) - self.assertEqual(0, res) - - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(worker, db_worker, ['status']) - self.assertIsNone(db_worker.service_id) - - def test_worker_claim_fails_service_change(self): - """Test that claim fails on worker service change.""" - failed_service = 1 - working_service = 2 - this_service = 3 - worker = db.worker_create(self.ctxt, resource_type='Volume', - resource_id=fake.VOLUME_ID, - status='deleting', - service_id=working_service) - - worker.service_id = failed_service - res = db.worker_claim_for_cleanup(self.ctxt, this_service, worker) - self.assertEqual(0, res) - db_worker = db.worker_get(self.ctxt, id=worker.id) - self.assertEqual(working_service, db_worker.service_id) - - def test_worker_claim_same_service(self): - """Test worker claim of a DB entry that has our service_id.""" - service_id = 1 - worker = db.worker_create(self.ctxt, resource_type='Volume', - resource_id=fake.VOLUME_ID, - status='deleting', service_id=service_id) - # Read from DB to get updated_at field - worker = db.worker_get(self.ctxt, id=worker.id) - claimed_worker = db.worker_get(self.ctxt, id=worker.id) - - res = db.worker_claim_for_cleanup(self.ctxt, - service_id, - claimed_worker) - self.assertEqual(1, res) - - db_worker = db.worker_get(self.ctxt, id=worker.id) - - self._assertEqualObjects(claimed_worker, db_worker) - self._assertEqualObjects(worker, db_worker, - ['updated_at', 'race_preventer']) - self.assertNotEqual(worker.updated_at, db_worker.updated_at) - self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) - - def test_worker_claim_fails_this_service_claimed(self): - """Test claim fails when worker was already claimed by this service.""" - service_id = 1 - worker = db.worker_create(self.ctxt, resource_type='Volume', - resource_id=fake.VOLUME_ID, - status='creating', - service_id=service_id) - - # Read it back to have the updated_at value - worker = db.worker_get(self.ctxt, id=worker.id) - claimed_worker = db.worker_get(self.ctxt, id=worker.id) - - time.sleep(0.1) - # Simulate that this service starts processing this entry - res = db.worker_claim_for_cleanup(self.ctxt, - service_id, - claimed_worker) - self.assertEqual(1, res) - - res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) - self.assertEqual(0, res) - db_worker = db.worker_get(self.ctxt, id=worker.id) - self._assertEqualObjects(claimed_worker, db_worker) - self._assertEqualObjects(worker, db_worker, - ['updated_at', 'race_preventer']) - self.assertNotEqual(worker.updated_at, db_worker.updated_at) - self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) diff --git a/cinder/tests/unit/test_evaluator.py b/cinder/tests/unit/test_evaluator.py deleted file mode 100644 index 4ff3384ee..000000000 --- a/cinder/tests/unit/test_evaluator.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import exception -from cinder.scheduler.evaluator import evaluator -from cinder import test - - -class EvaluatorTestCase(test.TestCase): - def test_simple_integer(self): - self.assertEqual(2, evaluator.evaluate("1+1")) - self.assertEqual(9, evaluator.evaluate("2+3+4")) - self.assertEqual(23, evaluator.evaluate("11+12")) - self.assertEqual(30, evaluator.evaluate("5*6")) - self.assertEqual(2, evaluator.evaluate("22/11")) - self.assertEqual(38, evaluator.evaluate("109-71")) - self.assertEqual( - 493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66")) - - def test_simple_float(self): - self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0")) - self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0")) - self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0")) - - def test_int_float_mix(self): - self.assertEqual(2.5, evaluator.evaluate("1.5 + 1")) - self.assertEqual(4.25, evaluator.evaluate("8.5 / 2")) - self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2")) - - def test_negative_numbers(self): - self.assertEqual(-2, evaluator.evaluate("-2")) - self.assertEqual(-1, evaluator.evaluate("-2+1")) - self.assertEqual(3, evaluator.evaluate("5+-2")) - - def test_exponent(self): - self.assertEqual(8, evaluator.evaluate("2^3")) - self.assertEqual(-8, evaluator.evaluate("-2 ^ 3")) - self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3")) - self.assertEqual(8, evaluator.evaluate("4 ^ 1.5")) - - def test_function(self): - self.assertEqual(5, evaluator.evaluate("abs(-5)")) - self.assertEqual(2, evaluator.evaluate("abs(2)")) - self.assertEqual(1, evaluator.evaluate("min(1, 100)")) - self.assertEqual(100, evaluator.evaluate("max(1, 100)")) - - def test_parentheses(self): - self.assertEqual(1, evaluator.evaluate("(1)")) - self.assertEqual(-1, evaluator.evaluate("(-1)")) - self.assertEqual(2, evaluator.evaluate("(1+1)")) - self.assertEqual(15, evaluator.evaluate("(1+2) * 5")) - self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))")) - self.assertEqual( - -8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)")) - - def test_comparisons(self): - self.assertTrue(evaluator.evaluate("1 < 2")) - self.assertTrue(evaluator.evaluate("2 > 1")) - self.assertTrue(evaluator.evaluate("2 != 1")) - self.assertFalse(evaluator.evaluate("1 > 2")) - self.assertFalse(evaluator.evaluate("2 < 1")) - self.assertFalse(evaluator.evaluate("2 == 1")) - self.assertTrue(evaluator.evaluate("(1 == 1) == !(1 == 2)")) - - def test_logic_ops(self): - self.assertTrue(evaluator.evaluate("(1 == 1) AND (2 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) and (2 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) && (2 == 2)")) - self.assertFalse(evaluator.evaluate("(1 == 1) && (5 == 2)")) - - self.assertTrue(evaluator.evaluate("(1 == 1) OR (5 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) or (5 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) || (5 == 2)")) - self.assertFalse(evaluator.evaluate("(5 == 1) || (5 == 2)")) - - self.assertFalse(evaluator.evaluate("(1 == 1) AND NOT (2 == 2)")) - self.assertFalse(evaluator.evaluate("(1 == 1) AND not (2 == 2)")) - self.assertFalse(evaluator.evaluate("(1 == 1) AND !(2 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) AND NOT (5 == 2)")) - self.assertTrue(evaluator.evaluate("(1 == 1) OR NOT (2 == 2) " - "AND (5 == 5)")) - - def test_ternary_conditional(self): - self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10")) - self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10")) - - def test_variables_dict(self): - stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} - request = {'iops': 500, 'size': 4} - self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops", - stats=stats, - request=request)) - - def test_missing_var(self): - stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} - request = {'iops': 500, 'size': 4} - self.assertRaises(exception.EvaluatorParseException, - evaluator.evaluate, - "foo.bob + 5", - stats=stats, request=request) - self.assertRaises(exception.EvaluatorParseException, - evaluator.evaluate, - "stats.bob + 5", - stats=stats, request=request) - self.assertRaises(exception.EvaluatorParseException, - evaluator.evaluate, - "fake.var + 1", - stats=stats, request=request, fake=None) - - def test_bad_expression(self): - self.assertRaises(exception.EvaluatorParseException, - evaluator.evaluate, - "1/*1") - - def test_nonnumber_comparison(self): - nonnumber = {'test': 'foo'} - request = {'test': 'bar'} - self.assertRaises( - exception.EvaluatorParseException, - evaluator.evaluate, - "nonnumber.test != request.test", - nonnumber=nonnumber, request=request) - - def test_div_zero(self): - self.assertRaises(exception.EvaluatorParseException, - evaluator.evaluate, - "7 / 0") diff --git a/cinder/tests/unit/test_exception.py b/cinder/tests/unit/test_exception.py deleted file mode 100644 index 909618a86..000000000 --- a/cinder/tests/unit/test_exception.py +++ /dev/null @@ -1,141 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import exception -from cinder import test - -import mock -import six -from six.moves import http_client -import webob.util - - -class ExceptionTestCase(test.TestCase): - @staticmethod - def _raise_exc(exc): - raise exc() - - def test_exceptions_raise(self): - # NOTE(dprince): disable format errors since we are not passing kwargs - self.flags(fatal_exception_format_errors=False) - for name in dir(exception): - exc = getattr(exception, name) - if isinstance(exc, type): - self.assertRaises(exc, self._raise_exc, exc) - - -class CinderExceptionTestCase(test.TestCase): - def test_default_error_msg(self): - class FakeCinderException(exception.CinderException): - message = "default message" - - exc = FakeCinderException() - self.assertEqual('default message', six.text_type(exc)) - - def test_error_msg(self): - self.assertEqual('test', - six.text_type(exception.CinderException('test'))) - - def test_default_error_msg_with_kwargs(self): - class FakeCinderException(exception.CinderException): - message = "default message: %(code)s" - - exc = FakeCinderException(code=int(http_client.INTERNAL_SERVER_ERROR)) - self.assertEqual('default message: 500', six.text_type(exc)) - - def test_error_msg_exception_with_kwargs(self): - # NOTE(dprince): disable format errors for this test - self.flags(fatal_exception_format_errors=False) - - class FakeCinderException(exception.CinderException): - message = "default message: %(misspelled_code)s" - - exc = FakeCinderException(code=http_client.INTERNAL_SERVER_ERROR) - self.assertEqual('default message: %(misspelled_code)s', - six.text_type(exc)) - - def test_default_error_code(self): - class FakeCinderException(exception.CinderException): - code = http_client.NOT_FOUND - - exc = FakeCinderException() - self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) - - def test_error_code_from_kwarg(self): - class FakeCinderException(exception.CinderException): - code = http_client.INTERNAL_SERVER_ERROR - - exc = FakeCinderException(code=http_client.NOT_FOUND) - self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) - - def test_error_msg_is_exception_to_string(self): - msg = 'test message' - exc1 = Exception(msg) - exc2 = exception.CinderException(exc1) - self.assertEqual(msg, exc2.msg) - - def test_exception_kwargs_to_string(self): - msg = 'test message' - exc1 = Exception(msg) - exc2 = exception.CinderException(kwarg1=exc1) - self.assertEqual(msg, exc2.kwargs['kwarg1']) - - def test_message_in_format_string(self): - class FakeCinderException(exception.CinderException): - message = 'FakeCinderException: %(message)s' - - exc = FakeCinderException(message='message') - self.assertEqual('FakeCinderException: message', six.text_type(exc)) - - def test_message_and_kwarg_in_format_string(self): - class FakeCinderException(exception.CinderException): - message = 'Error %(code)d: %(message)s' - - exc = FakeCinderException(message='message', - code=http_client.NOT_FOUND) - self.assertEqual('Error 404: message', six.text_type(exc)) - - def test_message_is_exception_in_format_string(self): - class FakeCinderException(exception.CinderException): - message = 'Exception: %(message)s' - - msg = 'test message' - exc1 = Exception(msg) - exc2 = FakeCinderException(message=exc1) - self.assertEqual('Exception: test message', six.text_type(exc2)) - - -class CinderConvertedExceptionTestCase(test.TestCase): - def test_default_args(self): - exc = exception.ConvertedException() - self.assertNotEqual('', exc.title) - self.assertEqual(http_client.INTERNAL_SERVER_ERROR, exc.code) - self.assertEqual('', exc.explanation) - - def test_standard_status_code(self): - with mock.patch.dict(webob.util.status_reasons, - {http_client.OK: 'reason'}): - exc = exception.ConvertedException(code=int(http_client.OK)) - self.assertEqual('reason', exc.title) - - @mock.patch.dict(webob.util.status_reasons, { - http_client.INTERNAL_SERVER_ERROR: 'reason'}) - def test_generic_status_code(self): - with mock.patch.dict(webob.util.status_generic_reasons, - {5: 'generic_reason'}): - exc = exception.ConvertedException(code=599) - self.assertEqual('generic_reason', exc.title) diff --git a/cinder/tests/unit/test_fixtures.py b/cinder/tests/unit/test_fixtures.py deleted file mode 100644 index 79d159ebf..000000000 --- a/cinder/tests/unit/test_fixtures.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures as fx -from oslo_log import log as logging -import testtools - -from cinder.tests import fixtures - - -class TestLogging(testtools.TestCase): - def test_default_logging(self): - stdlog = self.useFixture(fixtures.StandardLogging()) - root = logging.getLogger() - # there should be a null handler as well at DEBUG - self.assertEqual(2, len(root.handlers), root.handlers) - log = logging.getLogger(__name__) - log.info("at info") - log.debug("at debug") - self.assertIn("at info", stdlog.logger.output) - self.assertNotIn("at debug", stdlog.logger.output) - - # broken debug messages should still explode, even though we - # aren't logging them in the regular handler - self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo") - - # and, ensure that one of the terrible log messages isn't - # output at info - warn_log = logging.getLogger('migrate.versioning.api') - warn_log.info("warn_log at info, should be skipped") - warn_log.error("warn_log at error") - self.assertIn("warn_log at error", stdlog.logger.output) - self.assertNotIn("warn_log at info", stdlog.logger.output) - - def test_debug_logging(self): - self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) - - stdlog = self.useFixture(fixtures.StandardLogging()) - root = logging.getLogger() - # there should no longer be a null handler - self.assertEqual(1, len(root.handlers), root.handlers) - log = logging.getLogger(__name__) - log.info("at info") - log.debug("at debug") - self.assertIn("at info", stdlog.logger.output) - self.assertIn("at debug", stdlog.logger.output) diff --git a/cinder/tests/unit/test_hacking.py b/cinder/tests/unit/test_hacking.py deleted file mode 100644 index 959644e19..000000000 --- a/cinder/tests/unit/test_hacking.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import textwrap - -import mock -import pep8 - -from cinder.hacking import checks -from cinder import test - - -@ddt.ddt -class HackingTestCase(test.TestCase): - """This class tests the hacking checks in cinder.hacking.checks - - This class ensures that Cinder's hacking checks are working by passing - strings to the check methods like the pep8/flake8 parser would. The parser - loops over each line in the file and then passes the parameters to the - check method. The parameter names in the check method dictate what type of - object is passed to the check method. The parameter types are:: - - logical_line: A processed line with the following modifications: - - Multi-line statements converted to a single line. - - Stripped left and right. - - Contents of strings replaced with "xxx" of same length. - - Comments removed. - physical_line: Raw line of text from the input file. - lines: a list of the raw lines from the input file - tokens: the tokens that contribute to this logical line - line_number: line number in the input file - total_lines: number of lines in the input file - blank_lines: blank lines before this one - indent_char: indentation character in this file (" " or "\t") - indent_level: indentation (with tabs expanded to multiples of 8) - previous_indent_level: indentation on previous line - previous_logical: previous logical line - filename: Path of the file being run through pep8 - - When running a test on a check method the return will be False/None if - there is no violation in the sample input. If there is an error a tuple is - returned with a position in the line, and a message. So to check the result - just assertTrue if the check is expected to fail and assertFalse if it - should pass. - """ - - def test_no_vi_headers(self): - - lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n', - 'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n', - 'Line 11\n'] - - self.assertIsNone(checks.no_vi_headers( - "Test string foo", 1, lines)) - self.assertEqual(2, len(list(checks.no_vi_headers( - "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", - 2, lines)))) - self.assertEqual(2, len(list(checks.no_vi_headers( - "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", - 8, lines)))) - self.assertIsNone(checks.no_vi_headers( - "Test end string for vi", - 9, lines)) - # vim header outside of boundary (first/last 5 lines) - self.assertIsNone(checks.no_vi_headers( - "# vim: et tabstop=4 shiftwidth=4 softtabstop=4", - 6, lines)) - - def test_no_translate_logs(self): - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.audit(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.debug(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.error(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.info(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.warning(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.exception(_('foo'))", "cinder/scheduler/foo.py")))) - self.assertEqual(1, len(list(checks.no_translate_logs( - "LOG.critical(_('foo'))", "cinder/scheduler/foo.py")))) - - def test_check_explicit_underscore_import(self): - self.assertEqual(1, len(list(checks.check_explicit_underscore_import( - "LOG.info(_('My info message'))", - "cinder.tests.unit/other_files.py")))) - self.assertEqual(1, len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "cinder.tests.unit/other_files.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "from cinder.i18n import _", - "cinder.tests.unit/other_files.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "LOG.info(_('My info message'))", - "cinder.tests.unit/other_files.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "cinder.tests.unit/other_files.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "from cinder.i18n import _", - "cinder.tests.unit/other_files2.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "cinder.tests.unit/other_files2.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "_ = translations.ugettext", - "cinder.tests.unit/other_files3.py")))) - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "cinder.tests.unit/other_files3.py")))) - # Complete code coverage by falling through all checks - self.assertEqual(0, len(list(checks.check_explicit_underscore_import( - "LOG.info('My info message')", - "cinder.tests.unit/other_files4.py")))) - self.assertEqual(1, len(list(checks.check_explicit_underscore_import( - "msg = _('My message')", - "cinder.tests.unit/other_files5.py")))) - - # We are patching pep8 so that only the check under test is actually - # installed. - @mock.patch('pep8._checks', - {'physical_line': {}, 'logical_line': {}, 'tree': {}}) - def _run_check(self, code, checker, filename=None): - pep8.register_check(checker) - - lines = textwrap.dedent(code).strip().splitlines(True) - - checker = pep8.Checker(filename=filename, lines=lines) - checker.check_all() - checker.report._deferred_print.sort() - return checker.report._deferred_print - - def _assert_has_errors(self, code, checker, expected_errors=None, - filename=None): - actual_errors = [e[:3] for e in - self._run_check(code, checker, filename)] - self.assertEqual(expected_errors or [], actual_errors) - - def _assert_has_no_errors(self, code, checker, filename=None): - self._assert_has_errors(code, checker, filename=filename) - - def test_logging_format_args(self): - checker = checks.CheckLoggingFormatArgs - code = """ - import logging - LOG = logging.getLogger() - LOG.info("Message without a second argument.") - LOG.critical("Message with %s arguments.", 'two') - LOG.debug("Volume %s caught fire and is at %d degrees C and" - " climbing.", 'volume1', 500) - """ - self._assert_has_no_errors(code, checker) - - code = """ - import logging - LOG = logging.getLogger() - LOG.{0}("Volume %s caught fire and is at %d degrees C and " - "climbing.", ('volume1', 500)) - """ - for method in checker.LOG_METHODS: - self._assert_has_errors(code.format(method), checker, - expected_errors=[(4, 21, 'C310')]) - - code = """ - import logging - LOG = logging.getLogger() - LOG.log(logging.DEBUG, "Volume %s caught fire and is at %d" - " degrees C and climbing.", ('volume1', 500)) - """ - self._assert_has_errors(code, checker, - expected_errors=[(4, 37, 'C310')]) - - def test_opt_type_registration_args(self): - checker = checks.CheckOptRegistrationArgs - code = """ - CONF.register_opts([opt1, opt2, opt3]) - CONF.register_opts((opt4, opt5)) - CONF.register_opt(lonely_opt) - CONF.register_opts([OPT1, OPT2], group="group_of_opts") - CONF.register_opt(single_opt, group=blah) - """ - self._assert_has_no_errors(code, checker) - - code = """ - CONF.register_opt([opt4, opt5, opt6]) - CONF.register_opt((opt7, opt8)) - CONF.register_opts(lonely_opt) - CONF.register_opt((an_opt, another_opt)) - """ - self._assert_has_errors(code, checker, - expected_errors=[(1, 18, 'C311'), - (2, 19, 'C311'), - (3, 19, 'C311'), - (4, 19, 'C311')]) - - code = """ - CONF.register_opt(single_opt) - CONF.register_opts(other_opt) - CONF.register_opt(multiple_opts) - tuple_opts = (one_opt, two_opt) - CONF.register_opts(tuple_opts) - """ - self._assert_has_errors(code, checker, - expected_errors=[(2, 19, 'C311'), - (3, 18, 'C311')]) - - def test_str_unicode_exception(self): - - checker = checks.CheckForStrUnicodeExc - code = """ - def f(a, b): - try: - p = str(a) + str(b) - except ValueError as e: - p = str(e) - return p - """ - errors = [(5, 16, 'N325')] - self._assert_has_errors(code, checker, expected_errors=errors) - - code = """ - def f(a, b): - try: - p = unicode(a) + str(b) - except ValueError as e: - p = e - return p - """ - self._assert_has_no_errors(code, checker) - - code = """ - def f(a, b): - try: - p = str(a) + str(b) - except ValueError as e: - p = unicode(e) - return p - """ - errors = [(5, 20, 'N325')] - self._assert_has_errors(code, checker, expected_errors=errors) - - code = """ - def f(a, b): - try: - p = str(a) + str(b) - except ValueError as e: - try: - p = unicode(a) + unicode(b) - except ValueError as ve: - p = str(e) + str(ve) - p = e - return p - """ - errors = [(8, 20, 'N325'), (8, 29, 'N325')] - self._assert_has_errors(code, checker, expected_errors=errors) - - code = """ - def f(a, b): - try: - p = str(a) + str(b) - except ValueError as e: - try: - p = unicode(a) + unicode(b) - except ValueError as ve: - p = str(e) + unicode(ve) - p = str(e) - return p - """ - errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')] - self._assert_has_errors(code, checker, expected_errors=errors) - - def test_check_no_log_audit(self): - self.assertEqual(1, len(list(checks.check_no_log_audit( - "LOG.audit('My test audit log')")))) - self.assertEqual(0, len(list(checks.check_no_log_audit( - "LOG.info('My info test log.')")))) - - def test_no_mutable_default_args(self): - self.assertEqual(0, len(list(checks.no_mutable_default_args( - "def foo (bar):")))) - self.assertEqual(1, len(list(checks.no_mutable_default_args( - "def foo (bar=[]):")))) - self.assertEqual(1, len(list(checks.no_mutable_default_args( - "def foo (bar={}):")))) - - def test_check_datetime_now(self): - self.assertEqual(1, len(list(checks.check_datetime_now( - "datetime.now", False)))) - self.assertEqual(0, len(list(checks.check_datetime_now( - "timeutils.utcnow", False)))) - - def test_check_datetime_now_noqa(self): - self.assertEqual(0, len(list(checks.check_datetime_now( - "datetime.now() # noqa", True)))) - - def test_check_timeutils_strtime(self): - self.assertEqual(1, len(list(checks.check_timeutils_strtime( - "timeutils.strtime")))) - self.assertEqual(0, len(list(checks.check_timeutils_strtime( - "strftime")))) - - def test_check_unicode_usage(self): - self.assertEqual(1, len(list(checks.check_unicode_usage( - "unicode(msg)", False)))) - self.assertEqual(0, len(list(checks.check_unicode_usage( - "unicode(msg) # noqa", True)))) - - def test_no_print_statements(self): - self.assertEqual(0, len(list(checks.check_no_print_statements( - "a line with no print statement", - "cinder/file.py", False)))) - self.assertEqual(1, len(list(checks.check_no_print_statements( - "print('My print statement')", - "cinder/file.py", False)))) - self.assertEqual(0, len(list(checks.check_no_print_statements( - "print('My print statement in cinder/cmd, which is ok.')", - "cinder/cmd/file.py", False)))) - self.assertEqual(0, len(list(checks.check_no_print_statements( - "print('My print statement that I just must have.')", - "cinder.tests.unit/file.py", True)))) - self.assertEqual(1, len(list(checks.check_no_print_statements( - "print ('My print with space')", - "cinder/volume/anotherFile.py", False)))) - - def test_dict_constructor_with_list_copy(self): - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([(i, connect_info[i])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " attrs = dict([(k, _from_json(v))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " type_names = dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict((value, key) for key, value in")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - "foo(param=dict((k, v) for k, v in bar.items()))")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dict([[i,i] for i in range(3)])")))) - - self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( - " dd = dict([i,i] for i in range(3))")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " dict()")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " create_kwargs = dict(snapshot=snapshot,")))) - - self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( - " self._render_dict(xml, data_el, data.__dict__)")))) - - def test_validate_assertTrue(self): - test_value = True - self.assertEqual(0, len(list(checks.validate_assertTrue( - "assertTrue(True)")))) - self.assertEqual(1, len(list(checks.validate_assertTrue( - "assertEqual(True, %s)" % test_value)))) - - @ddt.unpack - @ddt.data( - (1, 'LOG.info', "cinder/tests/unit/fake.py", False), - (1, 'LOG.warning', "cinder/tests/fake.py", False), - (1, 'LOG.error', "cinder/tests/fake.py", False), - (1, 'LOG.exception', "cinder/tests/fake.py", False), - (1, 'LOG.debug', "cinder/tests/fake.py", False), - (0, 'LOG.info.assert_called_once_with', "cinder/tests/fake.py", False), - (0, 'some.LOG.error.call', "cinder/tests/fake.py", False), - (0, 'LOG.warning', "cinder/tests/unit/fake.py", True)) - def test_no_test_log(self, first, second, third, fourth): - self.assertEqual(first, len(list(checks.no_test_log( - "%s('arg')" % second, third, fourth)))) diff --git a/cinder/tests/unit/test_image_utils.py b/cinder/tests/unit/test_image_utils.py deleted file mode 100644 index 17518424c..000000000 --- a/cinder/tests/unit/test_image_utils.py +++ /dev/null @@ -1,1618 +0,0 @@ - -# Copyright (c) 2013 eNovance , Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for image utils.""" - -import errno -import math - -import mock -from oslo_concurrency import processutils -from oslo_utils import units - -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import throttling - - -class TestQemuImgInfo(test.TestCase): - @mock.patch('oslo_utils.imageutils.QemuImgInfo') - @mock.patch('cinder.utils.execute') - def test_qemu_img_info(self, mock_exec, mock_info): - mock_out = mock.sentinel.out - mock_err = mock.sentinel.err - test_path = mock.sentinel.path - mock_exec.return_value = (mock_out, mock_err) - - output = image_utils.qemu_img_info(test_path) - mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', - 'info', test_path, run_as_root=True, - prlimit=image_utils.QEMU_IMG_LIMITS) - self.assertEqual(mock_info.return_value, output) - - @mock.patch('oslo_utils.imageutils.QemuImgInfo') - @mock.patch('cinder.utils.execute') - def test_qemu_img_info_not_root(self, mock_exec, mock_info): - mock_out = mock.sentinel.out - mock_err = mock.sentinel.err - test_path = mock.sentinel.path - mock_exec.return_value = (mock_out, mock_err) - - output = image_utils.qemu_img_info(test_path, run_as_root=False) - mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img', - 'info', test_path, run_as_root=False, - prlimit=image_utils.QEMU_IMG_LIMITS) - self.assertEqual(mock_info.return_value, output) - - @mock.patch('cinder.image.image_utils.os') - @mock.patch('oslo_utils.imageutils.QemuImgInfo') - @mock.patch('cinder.utils.execute') - def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os): - mock_out = mock.sentinel.out - mock_err = mock.sentinel.err - test_path = mock.sentinel.path - mock_exec.return_value = (mock_out, mock_err) - mock_os.name = 'nt' - - output = image_utils.qemu_img_info(test_path) - mock_exec.assert_called_once_with('qemu-img', 'info', test_path, - run_as_root=True, - prlimit=image_utils.QEMU_IMG_LIMITS) - self.assertEqual(mock_info.return_value, output) - - @mock.patch('cinder.utils.execute') - def test_get_qemu_img_version(self, mock_exec): - mock_out = "qemu-img version 2.0.0" - mock_err = mock.sentinel.err - mock_exec.return_value = (mock_out, mock_err) - - expected_version = [2, 0, 0] - version = image_utils.get_qemu_img_version() - - mock_exec.assert_called_once_with('qemu-img', '--version', - check_exit_code=False) - self.assertEqual(expected_version, version) - - @mock.patch.object(image_utils, 'get_qemu_img_version') - def test_validate_qemu_img_version(self, mock_get_qemu_img_version): - fake_current_version = [1, 8] - mock_get_qemu_img_version.return_value = fake_current_version - minimum_version = '1.8' - - image_utils.check_qemu_img_version(minimum_version) - - mock_get_qemu_img_version.assert_called_once_with() - - @mock.patch.object(image_utils, 'get_qemu_img_version') - def _test_validate_unsupported_qemu_img_version(self, - mock_get_qemu_img_version, - current_version=None): - mock_get_qemu_img_version.return_value = current_version - minimum_version = '2.0' - - self.assertRaises(exception.VolumeBackendAPIException, - image_utils.check_qemu_img_version, - minimum_version) - - mock_get_qemu_img_version.assert_called_once_with() - - def test_validate_qemu_img_version_not_installed(self): - self._test_validate_unsupported_qemu_img_version() - - def test_validate_older_qemu_img_version(self): - self._test_validate_unsupported_qemu_img_version( - current_version=[1, 8]) - - -class TestConvertImage(test.TestCase): - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.utils.is_blk_device', return_value=True) - def test_defaults_block_dev_with_size_info(self, mock_isblk, - mock_exec, mock_info): - source = mock.sentinel.source - dest = mock.sentinel.dest - out_format = mock.sentinel.out_format - mock_info.return_value.virtual_size = 1048576 - throttle = throttling.Throttle(prefix=['cgcmd']) - - with mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True): - output = image_utils.convert_image(source, dest, out_format, - throttle=throttle) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', - '-t', 'none', '-O', out_format, - source, dest, run_as_root=True) - - mock_exec.reset_mock() - - with mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False): - output = image_utils.convert_image(source, dest, out_format) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'convert', - '-O', out_format, source, dest, - run_as_root=True) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.utils.is_blk_device', return_value=True) - def test_defaults_block_dev_without_size_info(self, mock_isblk, - mock_exec, - mock_info): - source = mock.sentinel.source - dest = mock.sentinel.dest - out_format = mock.sentinel.out_format - mock_info.side_effect = ValueError - throttle = throttling.Throttle(prefix=['cgcmd']) - - with mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True): - output = image_utils.convert_image(source, dest, out_format, - throttle=throttle) - - mock_info.assert_called_once_with(source, run_as_root=True) - self.assertIsNone(output) - mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', - '-t', 'none', '-O', out_format, - source, dest, run_as_root=True) - - mock_exec.reset_mock() - - with mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False): - output = image_utils.convert_image(source, dest, out_format) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'convert', - '-O', out_format, source, dest, - run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True) - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.utils.is_blk_device', return_value=False) - def test_defaults_not_block_dev_with_size_info(self, mock_isblk, - mock_exec, - mock_info, - mock_odirect): - source = mock.sentinel.source - dest = mock.sentinel.dest - out_format = mock.sentinel.out_format - mock_info.return_value.virtual_size = 1048576 - - output = image_utils.convert_image(source, dest, out_format) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', - out_format, source, dest, - run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True) - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.utils.is_blk_device', return_value=False) - def test_defaults_not_block_dev_without_size_info(self, - mock_isblk, - mock_exec, - mock_info, - mock_odirect): - source = mock.sentinel.source - dest = mock.sentinel.dest - out_format = mock.sentinel.out_format - mock_info.side_effect = ValueError - - output = image_utils.convert_image(source, dest, out_format) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', - out_format, source, dest, - run_as_root=True) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.utils.is_blk_device', return_value=True) - def test_defaults_block_dev_ami_img(self, mock_isblk, mock_exec, - mock_info): - source = mock.sentinel.source - dest = mock.sentinel.dest - out_format = mock.sentinel.out_format - mock_info.return_value.virtual_size = 1048576 - - with mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True): - output = image_utils.convert_image(source, dest, out_format, - src_format='AMI') - - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'convert', - '-t', 'none', '-O', out_format, - source, dest, run_as_root=True) - - -class TestResizeImage(test.TestCase): - @mock.patch('cinder.utils.execute') - def test_defaults(self, mock_exec): - source = mock.sentinel.source - size = mock.sentinel.size - output = image_utils.resize_image(source, size) - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'resize', source, - 'sentinel.sizeG', run_as_root=False) - - @mock.patch('cinder.utils.execute') - def test_run_as_root(self, mock_exec): - source = mock.sentinel.source - size = mock.sentinel.size - output = image_utils.resize_image(source, size, run_as_root=True) - self.assertIsNone(output) - mock_exec.assert_called_once_with('qemu-img', 'resize', source, - 'sentinel.sizeG', run_as_root=True) - - -class TestFetch(test.TestCase): - @mock.patch('os.stat') - @mock.patch('cinder.image.image_utils.fileutils') - def test_defaults(self, mock_fileutils, mock_stat): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - path = 'test_path' - _user_id = mock.sentinel._user_id - _project_id = mock.sentinel._project_id - mock_open = mock.mock_open() - mock_stat.return_value.st_size = 1048576 - - with mock.patch('cinder.image.image_utils.open', - new=mock_open, create=True): - output = image_utils.fetch(ctxt, image_service, image_id, path, - _user_id, _project_id) - self.assertIsNone(output) - image_service.download.assert_called_once_with(ctxt, image_id, - mock_open.return_value) - mock_open.assert_called_once_with(path, 'wb') - mock_fileutils.remove_path_on_error.assert_called_once_with(path) - (mock_fileutils.remove_path_on_error.return_value.__enter__ - .assert_called_once_with()) - (mock_fileutils.remove_path_on_error.return_value.__exit__ - .assert_called_once_with(None, None, None)) - - def test_fetch_enospc(self): - context = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - e = exception.ImageTooBig(image_id=image_id, reason = "fake") - e.errno = errno.ENOSPC - image_service.download.side_effect = e - path = '/test_path' - _user_id = mock.sentinel._user_id - _project_id = mock.sentinel._project_id - - with mock.patch('cinder.image.image_utils.open', - new=mock.mock_open(), create=True): - self.assertRaises(exception.ImageTooBig, - image_utils.fetch, - context, image_service, image_id, path, - _user_id, _project_id) - - -class TestVerifyImage(test.TestCase): - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.fileutils') - @mock.patch('cinder.image.image_utils.fetch') - def test_defaults(self, mock_fetch, mock_fileutils, mock_info): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - mock_data = mock_info.return_value - mock_data.file_format = 'test_format' - mock_data.backing_file = None - - output = image_utils.fetch_verify_image(ctxt, image_service, - image_id, dest) - self.assertIsNone(output) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - dest, None, None) - mock_info.assert_called_once_with(dest, run_as_root=True) - mock_fileutils.remove_path_on_error.assert_called_once_with(dest) - (mock_fileutils.remove_path_on_error.return_value.__enter__ - .assert_called_once_with()) - (mock_fileutils.remove_path_on_error.return_value.__exit__ - .assert_called_once_with(None, None, None)) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.fileutils') - @mock.patch('cinder.image.image_utils.fetch') - def test_kwargs(self, mock_fetch, mock_fileutils, mock_info, - mock_check_space): - ctxt = mock.sentinel.context - image_service = FakeImageService() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 2 - run_as_root = mock.sentinel.run_as_root - mock_data = mock_info.return_value - mock_data.file_format = 'test_format' - mock_data.backing_file = None - mock_data.virtual_size = 1 - - output = image_utils.fetch_verify_image( - ctxt, image_service, image_id, dest, user_id=user_id, - project_id=project_id, size=size, run_as_root=run_as_root) - self.assertIsNone(output) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - dest, None, None) - mock_fileutils.remove_path_on_error.assert_called_once_with(dest) - (mock_fileutils.remove_path_on_error.return_value.__enter__ - .assert_called_once_with()) - (mock_fileutils.remove_path_on_error.return_value.__exit__ - .assert_called_once_with(None, None, None)) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.fileutils') - @mock.patch('cinder.image.image_utils.fetch') - def test_format_error(self, mock_fetch, mock_fileutils, mock_info): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - mock_data = mock_info.return_value - mock_data.file_format = None - mock_data.backing_file = None - - self.assertRaises(exception.ImageUnacceptable, - image_utils.fetch_verify_image, - ctxt, image_service, image_id, dest) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.fileutils') - @mock.patch('cinder.image.image_utils.fetch') - def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - mock_data = mock_info.return_value - mock_data.file_format = 'test_format' - mock_data.backing_file = 'test_backing_file' - - self.assertRaises(exception.ImageUnacceptable, - image_utils.fetch_verify_image, - ctxt, image_service, image_id, dest) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.fileutils') - @mock.patch('cinder.image.image_utils.fetch') - def test_size_error(self, mock_fetch, mock_fileutils, mock_info): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - size = 1 - mock_data = mock_info.return_value - mock_data.file_format = 'test_format' - mock_data.backing_file = None - mock_data.virtual_size = 2 - - self.assertRaises(exception.ImageUnacceptable, - image_utils.fetch_verify_image, - ctxt, image_service, image_id, dest, size=size) - - -class TestTemporaryDir(test.TestCase): - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('os.makedirs') - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.image.image_utils.utils.tempdir') - def test_conv_dir_exists(self, mock_tempdir, mock_exists, mock_make, - mock_conf): - mock_conf.image_conversion_dir = mock.sentinel.conv_dir - - output = image_utils.temporary_dir() - - self.assertFalse(mock_make.called) - mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) - self.assertEqual(output, mock_tempdir.return_value) - - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('os.makedirs') - @mock.patch('os.path.exists', return_value=False) - @mock.patch('cinder.image.image_utils.utils.tempdir') - def test_create_conv_dir(self, mock_tempdir, mock_exists, mock_make, - mock_conf): - mock_conf.image_conversion_dir = mock.sentinel.conv_dir - - output = image_utils.temporary_dir() - - mock_make.assert_called_once_with(mock.sentinel.conv_dir) - mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir) - self.assertEqual(output, mock_tempdir.return_value) - - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('os.makedirs') - @mock.patch('os.path.exists', return_value=False) - @mock.patch('cinder.image.image_utils.utils.tempdir') - def test_no_conv_dir(self, mock_tempdir, mock_exists, mock_make, - mock_conf): - mock_conf.image_conversion_dir = None - - output = image_utils.temporary_dir() - - self.assertFalse(mock_make.called) - mock_tempdir.assert_called_once_with(dir=None) - self.assertEqual(output, mock_tempdir.return_value) - - -class TestUploadVolume(test.TestCase): - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.os') - def test_diff_format(self, mock_os, mock_temp, mock_convert, mock_info, - mock_open, mock_conf): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_meta = {'id': 'test_id', - 'disk_format': mock.sentinel.disk_format} - volume_path = mock.sentinel.volume_path - mock_os.name = 'posix' - data = mock_info.return_value - data.file_format = mock.sentinel.disk_format - data.backing_file = None - temp_file = mock_temp.return_value.__enter__.return_value - - output = image_utils.upload_volume(ctxt, image_service, image_meta, - volume_path) - - self.assertIsNone(output) - mock_convert.assert_called_once_with(volume_path, - temp_file, - mock.sentinel.disk_format, - run_as_root=True) - mock_info.assert_called_with(temp_file, run_as_root=True) - self.assertEqual(2, mock_info.call_count) - mock_open.assert_called_once_with(temp_file, 'rb') - image_service.update.assert_called_once_with( - ctxt, image_meta['id'], {}, - mock_open.return_value.__enter__.return_value) - - @mock.patch('cinder.image.image_utils.utils.temporary_chown') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.os') - def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info, - mock_open, mock_conf, mock_chown): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_meta = {'id': 'test_id', - 'disk_format': 'raw'} - volume_path = mock.sentinel.volume_path - mock_os.name = 'posix' - mock_os.access.return_value = False - - output = image_utils.upload_volume(ctxt, image_service, image_meta, - volume_path) - - self.assertIsNone(output) - self.assertFalse(mock_convert.called) - self.assertFalse(mock_info.called) - mock_chown.assert_called_once_with(volume_path) - mock_open.assert_called_once_with(volume_path, 'rb') - image_service.update.assert_called_once_with( - ctxt, image_meta['id'], {}, - mock_open.return_value.__enter__.return_value) - - @mock.patch('cinder.image.image_utils.utils.temporary_chown') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.os') - def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert, - mock_info, mock_open, mock_conf, mock_chown): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_meta = {'id': 'test_id', - 'disk_format': 'raw'} - volume_path = mock.sentinel.volume_path - mock_os.name = 'nt' - mock_os.access.return_value = False - - output = image_utils.upload_volume(ctxt, image_service, image_meta, - volume_path) - - self.assertIsNone(output) - self.assertFalse(mock_convert.called) - self.assertFalse(mock_info.called) - mock_open.assert_called_once_with(volume_path, 'rb') - image_service.update.assert_called_once_with( - ctxt, image_meta['id'], {}, - mock_open.return_value.__enter__.return_value) - - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.os') - def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info, - mock_open, mock_conf): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_meta = {'id': 'test_id', - 'disk_format': mock.sentinel.disk_format} - volume_path = mock.sentinel.volume_path - mock_os.name = 'posix' - data = mock_info.return_value - data.file_format = mock.sentinel.other_disk_format - data.backing_file = None - temp_file = mock_temp.return_value.__enter__.return_value - - self.assertRaises(exception.ImageUnacceptable, - image_utils.upload_volume, - ctxt, image_service, image_meta, volume_path) - mock_convert.assert_called_once_with(volume_path, - temp_file, - mock.sentinel.disk_format, - run_as_root=True) - mock_info.assert_called_with(temp_file, run_as_root=True) - self.assertEqual(2, mock_info.call_count) - self.assertFalse(image_service.update.called) - - -class TestFetchToVhd(test.TestCase): - @mock.patch('cinder.image.image_utils.fetch_to_volume_format') - def test_defaults(self, mock_fetch_to): - ctxt = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - blocksize = mock.sentinel.blocksize - - output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, - dest, blocksize) - self.assertIsNone(output) - mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, - dest, 'vpc', blocksize, None, - None, run_as_root=True) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.fetch_to_volume_format') - def test_kwargs(self, mock_fetch_to, mock_check_space): - ctxt = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - blocksize = mock.sentinel.blocksize - user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - run_as_root = mock.sentinel.run_as_root - - output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, - dest, blocksize, user_id=user_id, - project_id=project_id, - run_as_root=run_as_root) - self.assertIsNone(output) - mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, - dest, 'vpc', blocksize, user_id, - project_id, - run_as_root=run_as_root) - - -class TestFetchToRaw(test.TestCase): - @mock.patch('cinder.image.image_utils.fetch_to_volume_format') - def test_defaults(self, mock_fetch_to): - ctxt = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - blocksize = mock.sentinel.blocksize - - output = image_utils.fetch_to_raw(ctxt, image_service, image_id, - dest, blocksize) - self.assertIsNone(output) - mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, - dest, 'raw', blocksize, None, - None, None, run_as_root=True) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.fetch_to_volume_format') - def test_kwargs(self, mock_fetch_to, mock_check_space): - ctxt = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - blocksize = mock.sentinel.blocksize - user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = mock.sentinel.size - run_as_root = mock.sentinel.run_as_root - - output = image_utils.fetch_to_raw(ctxt, image_service, image_id, - dest, blocksize, user_id=user_id, - project_id=project_id, size=size, - run_as_root=run_as_root) - self.assertIsNone(output) - mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, - dest, 'raw', blocksize, user_id, - project_id, size, - run_as_root=run_as_root) - - -class FakeImageService(object): - def __init__(self, db_driver=None, image_service=None, disk_format='raw'): - self.temp_images = None - self.disk_format = disk_format - - def show(self, context, image_id): - return {'size': 2 * units.Gi, - 'disk_format': self.disk_format, - 'container_format': 'bare', - 'status': 'active'} - - -class TestFetchToVolumeFormat(test.TestCase): - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_defaults(self, mock_conf, mock_temp, mock_info, mock_fetch, - mock_is_xen, mock_repl_xen, mock_copy, mock_convert, - mock_check_space): - ctxt = mock.sentinel.context - ctxt.user_id = mock.sentinel.user_id - image_service = FakeImageService() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - - output = image_utils.fetch_to_volume_format(ctxt, image_service, - image_id, dest, - volume_format, blocksize) - - self.assertIsNone(output) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=True), - mock.call(tmp, run_as_root=True)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, None, None) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - mock_convert.assert_called_once_with(tmp, dest, volume_format, - run_as_root=True, - src_format='raw') - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_kwargs(self, mock_conf, mock_temp, mock_info, mock_fetch, - mock_is_xen, mock_repl_xen, mock_copy, mock_convert, - mock_check_space): - ctxt = mock.sentinel.context - image_service = FakeImageService() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - - output = image_utils.fetch_to_volume_format( - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - self.assertIsNone(output) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - mock_convert.assert_called_once_with(tmp, dest, volume_format, - run_as_root=run_as_root, - src_format='raw') - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=True) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_convert_from_vhd(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert, mock_check_space): - ctxt = mock.sentinel.context - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - image_service = FakeImageService(disk_format='vhd') - expect_format = 'vpc' - - output = image_utils.fetch_to_volume_format( - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - self.assertIsNone(output) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - mock_repl_xen.assert_called_once_with(tmp) - self.assertFalse(mock_copy.called) - mock_convert.assert_called_once_with(tmp, dest, volume_format, - run_as_root=run_as_root, - src_format=expect_format) - - @mock.patch('cinder.image.image_utils.check_available_space', - new=mock.Mock()) - @mock.patch('cinder.image.image_utils.is_xenserver_format', - new=mock.Mock(return_value=False)) - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_temporary_images(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - ctxt.user_id = mock.sentinel.user_id - image_service = FakeImageService() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = 1234 - tmp = mock.sentinel.tmp - dummy = mock.sentinel.dummy - mock_temp.return_value.__enter__.side_effect = [tmp, dummy] - - with image_utils.TemporaryImages.fetch(image_service, ctxt, - image_id) as tmp_img: - self.assertEqual(tmp_img, tmp) - output = image_utils.fetch_to_volume_format(ctxt, image_service, - image_id, dest, - volume_format, - blocksize) - - self.assertIsNone(output) - self.assertEqual(2, mock_temp.call_count) - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=True), - mock.call(dummy, run_as_root=True), - mock.call(tmp, run_as_root=True)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, None, None) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - mock_convert.assert_called_once_with(tmp, dest, volume_format, - run_as_root=True, - src_format='raw') - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_no_qemu_img_and_is_raw(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - tmp = mock_temp.return_value.__enter__.return_value - image_service.show.return_value = {'disk_format': 'raw', - 'size': 41126400} - image_size_m = math.ceil(float(41126400) / units.Mi) - - output = image_utils.fetch_to_volume_format( - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - self.assertIsNone(output) - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - self.assertFalse(mock_repl_xen.called) - mock_copy.assert_called_once_with(tmp, dest, image_size_m, - blocksize) - self.assertFalse(mock_convert.called) - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_no_qemu_img_not_raw(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - tmp = mock_temp.return_value.__enter__.return_value - image_service.show.return_value = {'disk_format': 'not_raw'} - - self.assertRaises( - exception.ImageUnacceptable, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) - self.assertFalse(mock_fetch.called) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - self.assertFalse(mock_convert.called) - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_no_qemu_img_no_metadata(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - tmp = mock_temp.return_value.__enter__.return_value - image_service.show.return_value = None - - self.assertRaises( - exception.ImageUnacceptable, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_called_once_with(tmp, run_as_root=run_as_root) - self.assertFalse(mock_fetch.called) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - self.assertFalse(mock_convert.called) - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_size_error(self, mock_conf, mock_temp, mock_info, mock_fetch, - mock_is_xen, mock_repl_xen, mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 1234 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = int(1234.5 * units.Gi) - tmp = mock_temp.return_value.__enter__.return_value - - self.assertRaises( - exception.ImageUnacceptable, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - self.assertFalse(mock_convert.called) - - @mock.patch('psutil.disk_usage') - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_check_no_available_space_error(self, mock_conf, mock_temp, - mock_info, mock_fetch, mock_is_xen, - mock_repl_xen, mock_copy, - mock_convert, mock_check_space, - mock_disk_usage): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 1234 - run_as_root = mock.sentinel.run_as_root - - mock_disk_usage.return_value = units.Gi - 1 - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = units.Gi - - mock_check_space.side_effect = exception.ImageTooBig( - image_id='fake_image_id', reason='test') - - self.assertRaises( - exception.ImageTooBig, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_qemu_img_parse_error(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = None - data.backing_file = None - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - - self.assertRaises( - exception.ImageUnacceptable, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - self.assertFalse(mock_convert.called) - - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=False) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_backing_file_error(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = mock.sentinel.backing_file - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - - self.assertRaises( - exception.ImageUnacceptable, - image_utils.fetch_to_volume_format, - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - self.assertFalse(mock_repl_xen.called) - self.assertFalse(mock_copy.called) - self.assertFalse(mock_convert.called) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') - @mock.patch( - 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') - @mock.patch('cinder.image.image_utils.is_xenserver_format', - return_value=True) - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_xenserver_to_vhd(self, mock_conf, mock_temp, mock_info, - mock_fetch, mock_is_xen, mock_repl_xen, - mock_copy, mock_convert, mock_check_space): - ctxt = mock.sentinel.context - image_service = FakeImageService() - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - volume_format = mock.sentinel.volume_format - blocksize = mock.sentinel.blocksize - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - data = mock_info.return_value - data.file_format = volume_format - data.backing_file = None - data.virtual_size = 1234 - tmp = mock_temp.return_value.__enter__.return_value - - output = image_utils.fetch_to_volume_format( - ctxt, image_service, image_id, dest, volume_format, blocksize, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - self.assertIsNone(output) - mock_temp.assert_called_once_with() - mock_info.assert_has_calls([ - mock.call(tmp, run_as_root=run_as_root), - mock.call(tmp, run_as_root=run_as_root)]) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - tmp, user_id, project_id) - mock_repl_xen.assert_called_once_with(tmp) - self.assertFalse(mock_copy.called) - mock_convert.assert_called_once_with(tmp, dest, volume_format, - run_as_root=run_as_root, - src_format='raw') - - @mock.patch('cinder.image.image_utils.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_no_qemu_img_fetch_verify_image(self, mock_conf, - mock_temp, mock_info, - mock_fetch): - ctxt = mock.sentinel.context - image_service = mock.Mock(temp_images=None) - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - ctxt.user_id = user_id = mock.sentinel.user_id - project_id = mock.sentinel.project_id - size = 4321 - run_as_root = mock.sentinel.run_as_root - - image_service.show.return_value = {'disk_format': 'raw', - 'size': 41126400} - - image_utils.fetch_verify_image( - ctxt, image_service, image_id, dest, - user_id=user_id, project_id=project_id, size=size, - run_as_root=run_as_root) - - image_service.show.assert_called_once_with(ctxt, image_id) - mock_info.assert_called_once_with(dest, run_as_root=run_as_root) - mock_fetch.assert_called_once_with(ctxt, image_service, image_id, - dest, None, None) - - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_get_qemu_data_returns_none(self, mock_conf, mock_temp, mock_info): - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - run_as_root = mock.sentinel.run_as_root - disk_format_raw = True - has_meta = True - - output = image_utils.get_qemu_data(image_id, has_meta, - disk_format_raw, dest, - run_as_root=run_as_root) - - self.assertIsNone(output) - - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_get_qemu_data_with_image_meta_exception(self, mock_conf, - mock_temp, mock_info): - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - run_as_root = mock.sentinel.run_as_root - disk_format_raw = False - has_meta = True - self.assertRaises( - exception.ImageUnacceptable, - image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, - dest, run_as_root=run_as_root) - - @mock.patch('cinder.image.image_utils.qemu_img_info', - side_effect=processutils.ProcessExecutionError) - @mock.patch('cinder.image.image_utils.temporary_file') - @mock.patch('cinder.image.image_utils.CONF') - def test_get_qemu_data_without_image_meta_except(self, mock_conf, - mock_temp, mock_info): - image_id = mock.sentinel.image_id - dest = mock.sentinel.dest - run_as_root = mock.sentinel.run_as_root - - disk_format_raw = False - has_meta = False - self.assertRaises( - exception.ImageUnacceptable, - image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, - dest, run_as_root=run_as_root) - - -class TestXenserverUtils(test.TestCase): - def test_is_xenserver_format(self): - image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'} - self.assertTrue(image_utils.is_xenserver_format(image_meta1)) - - image_meta2 = {'disk_format': 'test_disk_format', - 'container_format': 'test_cont_format'} - self.assertFalse(image_utils.is_xenserver_format(image_meta2)) - - @mock.patch('cinder.image.image_utils.utils.execute') - def test_extract_targz(self, mock_exec): - name = mock.sentinel.archive_name - target = mock.sentinel.target - - output = image_utils.extract_targz(name, target) - - mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target) - self.assertIsNone(output) - - -class TestVhdUtils(test.TestCase): - @mock.patch('cinder.image.image_utils.utils.execute') - def test_set_vhd_parent(self, mock_exec): - vhd_path = mock.sentinel.vhd_path - parentpath = mock.sentinel.parentpath - - output = image_utils.set_vhd_parent(vhd_path, parentpath) - - mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path, - '-p', parentpath) - self.assertIsNone(output) - - @mock.patch('cinder.image.image_utils.set_vhd_parent') - def test_fix_vhd_chain(self, mock_set_parent): - vhd_chain = (mock.sentinel.first, - mock.sentinel.second, - mock.sentinel.third, - mock.sentinel.fourth, - mock.sentinel.fifth) - - output = image_utils.fix_vhd_chain(vhd_chain) - - self.assertIsNone(output) - mock_set_parent.assert_has_calls([ - mock.call(mock.sentinel.first, mock.sentinel.second), - mock.call(mock.sentinel.second, mock.sentinel.third), - mock.call(mock.sentinel.third, mock.sentinel.fourth), - mock.call(mock.sentinel.fourth, mock.sentinel.fifth)]) - - @mock.patch('cinder.image.image_utils.utils.execute', - return_value=(98765.43210, mock.sentinel.error)) - def test_get_vhd_size(self, mock_exec): - vhd_path = mock.sentinel.vhd_path - - output = image_utils.get_vhd_size(vhd_path) - - mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path, - '-v') - self.assertEqual(98765, output) - - @mock.patch('cinder.image.image_utils.utils.execute') - def test_resize_vhd(self, mock_exec): - vhd_path = mock.sentinel.vhd_path - size = 387549349 - journal = mock.sentinel.journal - - output = image_utils.resize_vhd(vhd_path, size, journal) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path, - '-s', str(size), '-j', journal) - - @mock.patch('cinder.image.image_utils.utils.execute') - def test_coalesce_vhd(self, mock_exec): - vhd_path = mock.sentinel.vhd_path - - output = image_utils.coalesce_vhd(vhd_path) - - self.assertIsNone(output) - mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n', - vhd_path) - - @mock.patch('cinder.image.image_utils.temporary_dir') - @mock.patch('cinder.image.image_utils.coalesce_vhd') - @mock.patch('cinder.image.image_utils.resize_vhd') - @mock.patch('cinder.image.image_utils.get_vhd_size') - @mock.patch('cinder.image.image_utils.utils.execute') - def test_coalesce_chain(self, mock_exec, mock_size, mock_resize, - mock_coal, mock_temp): - vhd_chain = (mock.sentinel.first, - mock.sentinel.second, - mock.sentinel.third, - mock.sentinel.fourth, - mock.sentinel.fifth) - - output = image_utils.coalesce_chain(vhd_chain) - - self.assertEqual(mock.sentinel.fifth, output) - mock_size.assert_has_calls([ - mock.call(mock.sentinel.first), - mock.call(mock.sentinel.second), - mock.call(mock.sentinel.third), - mock.call(mock.sentinel.fourth)]) - mock_resize.assert_has_calls([ - mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY), - mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY), - mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY), - mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)]) - mock_coal.assert_has_calls([ - mock.call(mock.sentinel.first), - mock.call(mock.sentinel.second), - mock.call(mock.sentinel.third), - mock.call(mock.sentinel.fourth)]) - - @mock.patch('cinder.image.image_utils.os.path') - def test_discover_vhd_chain(self, mock_path): - directory = '/some/test/directory' - mock_path.join.side_effect = lambda x, y: '/'.join((x, y)) - mock_path.exists.side_effect = (True, True, True, False) - - output = image_utils.discover_vhd_chain(directory) - - expected_output = ['/some/test/directory/0.vhd', - '/some/test/directory/1.vhd', - '/some/test/directory/2.vhd'] - self.assertEqual(expected_output, output) - - @mock.patch('cinder.image.image_utils.temporary_dir') - @mock.patch('cinder.image.image_utils.os.rename') - @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') - @mock.patch('cinder.image.image_utils.coalesce_chain') - @mock.patch('cinder.image.image_utils.fix_vhd_chain') - @mock.patch('cinder.image.image_utils.discover_vhd_chain') - @mock.patch('cinder.image.image_utils.extract_targz') - def test_replace_xenserver_image_with_coalesced_vhd( - self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete, - mock_rename, mock_temp): - image_file = mock.sentinel.image_file - tmp = mock_temp.return_value.__enter__.return_value - - output = image_utils.replace_xenserver_image_with_coalesced_vhd( - image_file) - - self.assertIsNone(output) - mock_targz.assert_called_once_with(image_file, tmp) - mock_discover.assert_called_once_with(tmp) - mock_fix.assert_called_once_with(mock_discover.return_value) - mock_coal.assert_called_once_with(mock_discover.return_value) - mock_delete.assert_called_once_with(image_file) - mock_rename.assert_called_once_with(mock_coal.return_value, image_file) - - -class TestCreateTemporaryFile(test.TestCase): - @mock.patch('cinder.image.image_utils.os.close') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.path.exists') - @mock.patch('cinder.image.image_utils.os.makedirs') - @mock.patch('cinder.image.image_utils.tempfile.mkstemp') - def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs, - mock_path, mock_conf, mock_close): - mock_conf.image_conversion_dir = None - fd = mock.sentinel.file_descriptor - path = mock.sentinel.absolute_pathname - mock_mkstemp.return_value = (fd, path) - - output = image_utils.create_temporary_file() - - self.assertEqual(path, output) - mock_mkstemp.assert_called_once_with(dir=None) - mock_close.assert_called_once_with(fd) - - @mock.patch('cinder.image.image_utils.os.close') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) - @mock.patch('cinder.image.image_utils.os.makedirs') - @mock.patch('cinder.image.image_utils.tempfile.mkstemp') - def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs, - mock_path, mock_conf, mock_close): - conv_dir = mock.sentinel.image_conversion_dir - mock_conf.image_conversion_dir = conv_dir - fd = mock.sentinel.file_descriptor - path = mock.sentinel.absolute_pathname - mock_mkstemp.return_value = (fd, path) - - output = image_utils.create_temporary_file() - - self.assertEqual(path, output) - self.assertFalse(mock_dirs.called) - mock_mkstemp.assert_called_once_with(dir=conv_dir) - mock_close.assert_called_once_with(fd) - - @mock.patch('cinder.image.image_utils.os.close') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.path.exists', return_value=False) - @mock.patch('cinder.image.image_utils.os.makedirs') - @mock.patch('cinder.image.image_utils.tempfile.mkstemp') - def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs, - mock_path, mock_conf, mock_close): - conv_dir = mock.sentinel.image_conversion_dir - mock_conf.image_conversion_dir = conv_dir - fd = mock.sentinel.file_descriptor - path = mock.sentinel.absolute_pathname - mock_mkstemp.return_value = (fd, path) - - output = image_utils.create_temporary_file() - - self.assertEqual(path, output) - mock_dirs.assert_called_once_with(conv_dir) - mock_mkstemp.assert_called_once_with(dir=conv_dir) - mock_close.assert_called_once_with(fd) - - @mock.patch('cinder.image.image_utils.os.remove') - @mock.patch('cinder.image.image_utils.os.path.join') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.listdir') - @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) - def test_cleanup_temporary_file(self, mock_path, mock_listdir, mock_conf, - mock_join, mock_remove): - mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] - conv_dir = mock.sentinel.image_conversion_dir - mock_conf.image_conversion_dir = conv_dir - mock_join.return_value = '/test/tmp/tmphost@backend1' - image_utils.cleanup_temporary_file('host@backend1') - mock_listdir.assert_called_once_with(conv_dir) - mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') - - @mock.patch('cinder.image.image_utils.os.remove') - @mock.patch('cinder.image.image_utils.os.listdir') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.path.exists', return_value=False) - def test_cleanup_temporary_file_with_not_exist_path(self, mock_path, - mock_conf, - mock_listdir, - mock_remove): - conv_dir = mock.sentinel.image_conversion_dir - mock_conf.image_conversion_dir = conv_dir - image_utils.cleanup_temporary_file('host@backend1') - self.assertFalse(mock_listdir.called) - self.assertFalse(mock_remove.called) - - @mock.patch('cinder.image.image_utils.os.remove') - @mock.patch('cinder.image.image_utils.os.path.join') - @mock.patch('cinder.image.image_utils.CONF') - @mock.patch('cinder.image.image_utils.os.listdir') - @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) - def test_cleanup_temporary_file_with_exception(self, mock_path, - mock_listdir, mock_conf, - mock_join, mock_remove): - mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] - conv_dir = mock.sentinel.image_conversion_dir - mock_conf.image_conversion_dir = conv_dir - mock_join.return_value = '/test/tmp/tmphost@backend1' - mock_remove.side_effect = OSError - image_utils.cleanup_temporary_file('host@backend1') - mock_listdir.assert_called_once_with(conv_dir) - mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') - - -class TestTemporaryFileContextManager(test.TestCase): - @mock.patch('cinder.image.image_utils.create_temporary_file', - return_value=mock.sentinel.temporary_file) - @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') - def test_temporary_file(self, mock_delete, mock_create): - with image_utils.temporary_file() as tmp_file: - self.assertEqual(mock.sentinel.temporary_file, tmp_file) - self.assertFalse(mock_delete.called) - mock_delete.assert_called_once_with(mock.sentinel.temporary_file) - - -class TestImageUtils(test.TestCase): - def test_get_virtual_size(self): - image_id = fake.IMAGE_ID - virtual_size = 1073741824 - volume_size = 2 - virt_size = image_utils.check_virtual_size(virtual_size, - volume_size, - image_id) - self.assertEqual(1, virt_size) - - def test_get_bigger_virtual_size(self): - image_id = fake.IMAGE_ID - virtual_size = 3221225472 - volume_size = 2 - self.assertRaises(exception.ImageUnacceptable, - image_utils.check_virtual_size, - virtual_size, - volume_size, - image_id) diff --git a/cinder/tests/unit/test_manager.py b/cinder/tests/unit/test_manager.py deleted file mode 100644 index f51e3cc34..000000000 --- a/cinder/tests/unit/test_manager.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2017 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from cinder import manager -from cinder import objects -from cinder import test - - -class FakeManager(manager.CleanableManager): - def __init__(self, service_id=None, keep_after_clean=False): - if service_id: - self.service_id = service_id - self.keep_after_clean = keep_after_clean - - def _do_cleanup(self, ctxt, vo_resource): - vo_resource.status += '_cleaned' - vo_resource.save() - return self.keep_after_clean - - -class TestManager(test.TestCase): - @mock.patch('cinder.utils.set_log_levels') - def test_set_log_levels(self, set_log_mock): - service = manager.Manager() - log_request = objects.LogLevel(prefix='sqlalchemy.', level='debug') - service.set_log_levels(mock.sentinel.context, log_request) - set_log_mock.assert_called_once_with(log_request.prefix, - log_request.level) - - @mock.patch('cinder.utils.get_log_levels') - def test_get_log_levels(self, get_log_mock): - get_log_mock.return_value = {'cinder': 'DEBUG', 'cinder.api': 'ERROR'} - service = manager.Manager() - log_request = objects.LogLevel(prefix='sqlalchemy.') - result = service.get_log_levels(mock.sentinel.context, log_request) - get_log_mock.assert_called_once_with(log_request.prefix) - - expected = (objects.LogLevel(prefix='cinder', level='DEBUG'), - objects.LogLevel(prefix='cinder.api', level='ERROR')) - - self.assertEqual(set(six.text_type(r) for r in result.objects), - set(six.text_type(e) for e in expected)) diff --git a/cinder/tests/unit/test_migrations.py b/cinder/tests/unit/test_migrations.py deleted file mode 100644 index 6f78cfc85..000000000 --- a/cinder/tests/unit/test_migrations.py +++ /dev/null @@ -1,1310 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for database migrations. This test case reads the configuration -file test_migrations.conf for database connection settings -to use in the tests. For each connection found in the config file, -the test case runs a series of test cases to ensure that migrations work -properly both upgrading and downgrading, and that no data loss occurs -if possible. -""" - -import os -import uuid - -import fixtures -from migrate.versioning import api as migration_api -from migrate.versioning import repository -from oslo_db.sqlalchemy import test_base -from oslo_db.sqlalchemy import test_migrations -from oslo_db.sqlalchemy import utils as db_utils -import sqlalchemy -from sqlalchemy.engine import reflection -from sqlalchemy import func, select - -from cinder.db import migration -import cinder.db.sqlalchemy.migrate_repo -from cinder.volume import group_types as volume_group_types - - -class MigrationsMixin(test_migrations.WalkVersionsMixin): - """Test sqlalchemy-migrate migrations.""" - - BOOL_TYPE = sqlalchemy.types.BOOLEAN - TIME_TYPE = sqlalchemy.types.DATETIME - INTEGER_TYPE = sqlalchemy.types.INTEGER - VARCHAR_TYPE = sqlalchemy.types.VARCHAR - TEXT_TYPE = sqlalchemy.types.Text - - @property - def INIT_VERSION(self): - return migration.INIT_VERSION - - @property - def REPOSITORY(self): - migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__ - return repository.Repository( - os.path.abspath(os.path.dirname(migrate_file))) - - @property - def migration_api(self): - return migration_api - - @property - def migrate_engine(self): - return self.engine - - def get_table_ref(self, engine, name, metadata): - metadata.bind = engine - return sqlalchemy.Table(name, metadata, autoload=True) - - class BannedDBSchemaOperations(fixtures.Fixture): - """Ban some operations for migrations""" - def __init__(self, banned_resources=None): - super(MigrationsMixin.BannedDBSchemaOperations, self).__init__() - self._banned_resources = banned_resources or [] - - @staticmethod - def _explode(resource, op): - print('%s.%s()' % (resource, op)) # noqa - raise Exception( - 'Operation %s.%s() is not allowed in a database migration' % ( - resource, op)) - - def setUp(self): - super(MigrationsMixin.BannedDBSchemaOperations, self).setUp() - for thing in self._banned_resources: - self.useFixture(fixtures.MonkeyPatch( - 'sqlalchemy.%s.drop' % thing, - lambda *a, **k: self._explode(thing, 'drop'))) - self.useFixture(fixtures.MonkeyPatch( - 'sqlalchemy.%s.alter' % thing, - lambda *a, **k: self._explode(thing, 'alter'))) - - def migrate_up(self, version, with_data=False): - # NOTE(dulek): This is a list of migrations where we allow dropping - # things. The rules for adding things here are very very specific. - # Insight on how to drop things from the DB in a backward-compatible - # manner is provided in Cinder's developer documentation. - # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE - exceptions = [ - # NOTE(dulek): 62 alters the column type from boolean to integer to - # fix the bug 1518363. If we've followed the guidelines for live - # schema upgrades we would end up either waiting 3 releases to fix - # a simple bug or trigger a rebuild index operation in migration - # (because constraint was impossible to delete without deleting - # other foreign key constraints). Either way it's harsh... We've - # decided to go with alter to minimise upgrade impact. The only - # consequence for deployments running recent MySQL is inability - # to perform volume-type-access modifications while running this - # migration. - 62, - # NOTE(dulek): 66 sets reservations.usage_id to nullable. This is - # 100% backward compatible and according to MySQL docs such ALTER - # is performed with the same restrictions as column addition, which - # we of course allow. - 66, - # NOTE(dulek): 73 drops tables and columns we've stopped using a - # release ago. - 73, - # NOTE(ameade): 87 sets messages.request_id to nullable. This - # should be safe for the same reason as migration 66. - 87, - # NOTE : 104 modifies size of messages.project_id to 255. - # This should be safe for the same reason as migration 87. - 104, - ] - - # NOTE(dulek): We only started requiring things be additive in - # Mitaka, so ignore all migrations before that point. - MITAKA_START = 61 - - if version >= MITAKA_START and version not in exceptions: - banned = ['Table', 'Column'] - else: - banned = None - with MigrationsMixin.BannedDBSchemaOperations(banned): - super(MigrationsMixin, self).migrate_up(version, with_data) - - def _pre_upgrade_004(self, engine): - """Change volume types to UUID """ - data = { - 'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1', - 'volume_type_id': 1}, - {'id': str(uuid.uuid4()), 'host': 'test2', - 'volume_type_id': 1}, - {'id': str(uuid.uuid4()), 'host': 'test3', - 'volume_type_id': 3}, - ], - 'volume_types': [{'name': 'vtype1'}, - {'name': 'vtype2'}, - {'name': 'vtype3'}, - ], - 'volume_type_extra_specs': [{'volume_type_id': 1, - 'key': 'v1', - 'value': 'hotep', - }, - {'volume_type_id': 1, - 'key': 'v2', - 'value': 'bending rodrigez', - }, - {'volume_type_id': 2, - 'key': 'v3', - 'value': 'bending rodrigez', - }, - ]} - - volume_types = db_utils.get_table(engine, 'volume_types') - for vtype in data['volume_types']: - r = volume_types.insert().values(vtype).execute() - vtype['id'] = r.inserted_primary_key[0] - - volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs') - for vtes in data['volume_type_extra_specs']: - r = volume_type_es.insert().values(vtes).execute() - vtes['id'] = r.inserted_primary_key[0] - - volumes = db_utils.get_table(engine, 'volumes') - for vol in data['volumes']: - r = volumes.insert().values(vol).execute() - vol['id'] = r.inserted_primary_key[0] - - return data - - def _check_004(self, engine, data): - volumes = db_utils.get_table(engine, 'volumes') - v1 = volumes.select(volumes.c.id == - data['volumes'][0]['id'] - ).execute().first() - v2 = volumes.select(volumes.c.id == - data['volumes'][1]['id'] - ).execute().first() - v3 = volumes.select(volumes.c.id == - data['volumes'][2]['id'] - ).execute().first() - - volume_types = db_utils.get_table(engine, 'volume_types') - vt1 = volume_types.select(volume_types.c.name == - data['volume_types'][0]['name'] - ).execute().first() - vt2 = volume_types.select(volume_types.c.name == - data['volume_types'][1]['name'] - ).execute().first() - vt3 = volume_types.select(volume_types.c.name == - data['volume_types'][2]['name'] - ).execute().first() - - vtes = db_utils.get_table(engine, 'volume_type_extra_specs') - vtes1 = vtes.select(vtes.c.key == - data['volume_type_extra_specs'][0]['key'] - ).execute().first() - vtes2 = vtes.select(vtes.c.key == - data['volume_type_extra_specs'][1]['key'] - ).execute().first() - vtes3 = vtes.select(vtes.c.key == - data['volume_type_extra_specs'][2]['key'] - ).execute().first() - - self.assertEqual(v1['volume_type_id'], vt1['id']) - self.assertEqual(v2['volume_type_id'], vt1['id']) - self.assertEqual(v3['volume_type_id'], vt3['id']) - - self.assertEqual(vtes1['volume_type_id'], vt1['id']) - self.assertEqual(vtes2['volume_type_id'], vt1['id']) - self.assertEqual(vtes3['volume_type_id'], vt2['id']) - - def _check_005(self, engine, data): - """Test that adding source_volid column works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.source_volid.type, - self.VARCHAR_TYPE) - - def _check_006(self, engine, data): - snapshots = db_utils.get_table(engine, 'snapshots') - self.assertIsInstance(snapshots.c.provider_location.type, - self.VARCHAR_TYPE) - - def _check_007(self, engine, data): - snapshots = db_utils.get_table(engine, 'snapshots') - fkey, = snapshots.c.volume_id.foreign_keys - - self.assertIsNotNone(fkey) - - def _pre_upgrade_008(self, engine): - self.assertFalse(engine.dialect.has_table(engine.connect(), - "backups")) - - def _check_008(self, engine, data): - """Test that adding and removing the backups table works correctly.""" - - self.assertTrue(engine.dialect.has_table(engine.connect(), - "backups")) - backups = db_utils.get_table(engine, 'backups') - - self.assertIsInstance(backups.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(backups.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(backups.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(backups.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(backups.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.volume_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.user_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.host.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.availability_zone.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.display_name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.display_description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.container.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.status.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.fail_reason.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.service_metadata.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.service.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.size.type, - self.INTEGER_TYPE) - self.assertIsInstance(backups.c.object_count.type, - self.INTEGER_TYPE) - - def _check_009(self, engine, data): - """Test adding snapshot_metadata table works correctly.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "snapshot_metadata")) - snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata') - - self.assertIsInstance(snapshot_metadata.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(snapshot_metadata.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(snapshot_metadata.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(snapshot_metadata.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(snapshot_metadata.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(snapshot_metadata.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(snapshot_metadata.c.snapshot_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(snapshot_metadata.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(snapshot_metadata.c.value.type, - self.VARCHAR_TYPE) - - def _check_010(self, engine, data): - """Test adding transfers table works correctly.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "transfers")) - transfers = db_utils.get_table(engine, 'transfers') - - self.assertIsInstance(transfers.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(transfers.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(transfers.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(transfers.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(transfers.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(transfers.c.volume_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(transfers.c.display_name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(transfers.c.salt.type, - self.VARCHAR_TYPE) - self.assertIsInstance(transfers.c.crypt_hash.type, - self.VARCHAR_TYPE) - self.assertIsInstance(transfers.c.expires_at.type, - self.TIME_TYPE) - - def _check_011(self, engine, data): - """Test adding transfers table works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIn('bootable', volumes.c) - self.assertIsInstance(volumes.c.bootable.type, - self.BOOL_TYPE) - - def _check_012(self, engine, data): - """Test that adding attached_host column works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.attached_host.type, - self.VARCHAR_TYPE) - - def _check_013(self, engine, data): - """Test that adding provider_geometry column works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.provider_geometry.type, - self.VARCHAR_TYPE) - - def _check_014(self, engine, data): - """Test that adding _name_id column works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c._name_id.type, - self.VARCHAR_TYPE) - - def _check_015(self, engine, data): - """Test removing migrations table works correctly.""" - self.assertFalse(engine.dialect.has_table(engine.connect(), - "migrations")) - - def _check_016(self, engine, data): - """Test that dropping xen storage manager tables works correctly.""" - self.assertFalse(engine.dialect.has_table(engine.connect(), - 'sm_flavors')) - self.assertFalse(engine.dialect.has_table(engine.connect(), - 'sm_backend_config')) - self.assertFalse(engine.dialect.has_table(engine.connect(), - 'sm_volume')) - - def _check_017(self, engine, data): - """Test that added encryption information works correctly.""" - # encryption key UUID - volumes = db_utils.get_table(engine, 'volumes') - self.assertIn('encryption_key_id', volumes.c) - self.assertIsInstance(volumes.c.encryption_key_id.type, - self.VARCHAR_TYPE) - - snapshots = db_utils.get_table(engine, 'snapshots') - self.assertIn('encryption_key_id', snapshots.c) - self.assertIsInstance(snapshots.c.encryption_key_id.type, - self.VARCHAR_TYPE) - self.assertIn('volume_type_id', snapshots.c) - self.assertIsInstance(snapshots.c.volume_type_id.type, - self.VARCHAR_TYPE) - - # encryption types table - encryption = db_utils.get_table(engine, 'encryption') - self.assertIsInstance(encryption.c.volume_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(encryption.c.cipher.type, - self.VARCHAR_TYPE) - self.assertIsInstance(encryption.c.key_size.type, - self.INTEGER_TYPE) - self.assertIsInstance(encryption.c.provider.type, - self.VARCHAR_TYPE) - - def _check_018(self, engine, data): - """Test that added qos_specs table works correctly.""" - self.assertTrue(engine.dialect.has_table( - engine.connect(), "quality_of_service_specs")) - qos_specs = db_utils.get_table(engine, 'quality_of_service_specs') - self.assertIsInstance(qos_specs.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(qos_specs.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(qos_specs.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(qos_specs.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(qos_specs.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(qos_specs.c.specs_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(qos_specs.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(qos_specs.c.value.type, - self.VARCHAR_TYPE) - - def _check_019(self, engine, data): - """Test that adding migration_status column works correctly.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.migration_status.type, - self.VARCHAR_TYPE) - - def _check_020(self, engine, data): - """Test adding volume_admin_metadata table works correctly.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "volume_admin_metadata")) - volume_admin_metadata = db_utils.get_table(engine, - 'volume_admin_metadata') - - self.assertIsInstance(volume_admin_metadata.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_admin_metadata.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_admin_metadata.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_admin_metadata.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(volume_admin_metadata.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(volume_admin_metadata.c.volume_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(volume_admin_metadata.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(volume_admin_metadata.c.value.type, - self.VARCHAR_TYPE) - - def _verify_quota_defaults(self, engine): - quota_class_metadata = db_utils.get_table(engine, 'quota_classes') - - num_defaults = quota_class_metadata.count().\ - where(quota_class_metadata.c.class_name == 'default').\ - execute().scalar() - - self.assertEqual(3, num_defaults) - - def _check_021(self, engine, data): - """Test adding default data for quota classes works correctly.""" - self._verify_quota_defaults(engine) - - def _check_022(self, engine, data): - """Test that adding disabled_reason column works correctly.""" - services = db_utils.get_table(engine, 'services') - self.assertIsInstance(services.c.disabled_reason.type, - self.VARCHAR_TYPE) - - def _check_023(self, engine, data): - """Test that adding reservations index works correctly.""" - reservations = db_utils.get_table(engine, 'reservations') - index_columns = [] - for idx in reservations.indexes: - if idx.name == 'reservations_deleted_expire_idx': - index_columns = idx.columns.keys() - break - - self.assertEqual(sorted(['deleted', 'expire']), - sorted(index_columns)) - - def _check_024(self, engine, data): - """Test adding replication columns to volume table.""" - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.replication_status.type, - self.VARCHAR_TYPE) - self.assertIsInstance(volumes.c.replication_extended_status.type, - self.VARCHAR_TYPE) - self.assertIsInstance(volumes.c.replication_driver_data.type, - self.VARCHAR_TYPE) - - def _check_025(self, engine, data): - """Test adding table and columns for consistencygroups.""" - # Test consistencygroup_id is in Table volumes - metadata = sqlalchemy.MetaData() - volumes = self.get_table_ref(engine, 'volumes', metadata) - self.assertIsInstance(volumes.c.consistencygroup_id.type, - self.VARCHAR_TYPE) - - # Test cgsnapshot_id is in Table snapshots - snapshots = self.get_table_ref(engine, 'snapshots', metadata) - self.assertIsInstance(snapshots.c.cgsnapshot_id.type, - self.VARCHAR_TYPE) - - # Test Table consistencygroups exists - self.assertTrue(engine.dialect.has_table(engine.connect(), - "consistencygroups")) - consistencygroups = self.get_table_ref(engine, - 'consistencygroups', - metadata) - self.assertIsInstance(consistencygroups.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(consistencygroups.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(consistencygroups.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(consistencygroups.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(consistencygroups.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.user_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.host.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.availability_zone.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.volume_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(consistencygroups.c.status.type, - self.VARCHAR_TYPE) - - # Test Table cgsnapshots exists - self.assertTrue(engine.dialect.has_table(engine.connect(), - "cgsnapshots")) - cgsnapshots = self.get_table_ref(engine, - 'cgsnapshots', - metadata) - - self.assertIsInstance(cgsnapshots.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(cgsnapshots.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(cgsnapshots.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(cgsnapshots.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(cgsnapshots.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.user_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(cgsnapshots.c.status.type, - self.VARCHAR_TYPE) - - # Verify foreign keys are created - fkey, = volumes.c.consistencygroup_id.foreign_keys - self.assertEqual(consistencygroups.c.id, fkey.column) - self.assertEqual(1, len(volumes.foreign_keys)) - - fkey, = snapshots.c.cgsnapshot_id.foreign_keys - self.assertEqual(cgsnapshots.c.id, fkey.column) - fkey, = snapshots.c.volume_id.foreign_keys - self.assertEqual(volumes.c.id, fkey.column) - # 2 foreign keys in Table snapshots - self.assertEqual(2, len(snapshots.foreign_keys)) - - def _pre_upgrade_026(self, engine): - """Test adding default data for consistencygroups quota class.""" - quota_class_metadata = db_utils.get_table(engine, 'quota_classes') - - num_defaults = quota_class_metadata.count().\ - where(quota_class_metadata.c.class_name == 'default').\ - execute().scalar() - - self.assertEqual(3, num_defaults) - - def _check_026(self, engine, data): - quota_class_metadata = db_utils.get_table(engine, 'quota_classes') - num_defaults = quota_class_metadata.count().\ - where(quota_class_metadata.c.class_name == 'default').\ - execute().scalar() - - self.assertEqual(4, num_defaults) - - def _check_032(self, engine, data): - """Test adding volume_type_projects table works correctly.""" - volume_type_projects = db_utils.get_table(engine, - 'volume_type_projects') - self.assertIsInstance(volume_type_projects.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_type_projects.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_type_projects.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(volume_type_projects.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(volume_type_projects.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(volume_type_projects.c.volume_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(volume_type_projects.c.project_id.type, - self.VARCHAR_TYPE) - - volume_types = db_utils.get_table(engine, 'volume_types') - self.assertIsInstance(volume_types.c.is_public.type, - self.BOOL_TYPE) - - def _check_033(self, engine, data): - """Test adding encryption_id column to encryption table.""" - encryptions = db_utils.get_table(engine, 'encryption') - self.assertIsInstance(encryptions.c.encryption_id.type, - self.VARCHAR_TYPE) - - def _check_034(self, engine, data): - """Test adding description columns to volume_types table.""" - volume_types = db_utils.get_table(engine, 'volume_types') - self.assertIsInstance(volume_types.c.description.type, - self.VARCHAR_TYPE) - - def _check_035(self, engine, data): - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.provider_id.type, - self.VARCHAR_TYPE) - - def _check_036(self, engine, data): - snapshots = db_utils.get_table(engine, 'snapshots') - self.assertIsInstance(snapshots.c.provider_id.type, - self.VARCHAR_TYPE) - - def _check_037(self, engine, data): - consistencygroups = db_utils.get_table(engine, 'consistencygroups') - self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type, - self.VARCHAR_TYPE) - - def _check_038(self, engine, data): - """Test adding and removing driver_initiator_data table.""" - - has_table = engine.dialect.has_table(engine.connect(), - "driver_initiator_data") - self.assertTrue(has_table) - - private_data = db_utils.get_table( - engine, - 'driver_initiator_data' - ) - - self.assertIsInstance(private_data.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(private_data.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(private_data.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(private_data.c.initiator.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.namespace.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.value.type, - self.VARCHAR_TYPE) - - def _check_039(self, engine, data): - backups = db_utils.get_table(engine, 'backups') - self.assertIsInstance(backups.c.parent_id.type, - self.VARCHAR_TYPE) - - def _check_040(self, engine, data): - volumes = db_utils.get_table(engine, 'volumes') - self.assertNotIn('instance_uuid', volumes.c) - self.assertNotIn('attached_host', volumes.c) - self.assertNotIn('attach_time', volumes.c) - self.assertNotIn('mountpoint', volumes.c) - self.assertIsInstance(volumes.c.multiattach.type, - self.BOOL_TYPE) - - attachments = db_utils.get_table(engine, 'volume_attachment') - self.assertIsInstance(attachments.c.attach_mode.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachments.c.instance_uuid.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachments.c.attached_host.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachments.c.mountpoint.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachments.c.attach_status.type, - self.VARCHAR_TYPE) - - def _check_041(self, engine, data): - """Test that adding modified_at column works correctly.""" - services = db_utils.get_table(engine, 'services') - self.assertIsInstance(services.c.modified_at.type, - self.TIME_TYPE) - - def _check_048(self, engine, data): - quotas = db_utils.get_table(engine, 'quotas') - self.assertIsInstance(quotas.c.allocated.type, - self.INTEGER_TYPE) - - def _check_049(self, engine, data): - backups = db_utils.get_table(engine, 'backups') - self.assertIsInstance(backups.c.temp_volume_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.temp_snapshot_id.type, - self.VARCHAR_TYPE) - - def _check_050(self, engine, data): - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.previous_status.type, - self.VARCHAR_TYPE) - - def _check_051(self, engine, data): - consistencygroups = db_utils.get_table(engine, 'consistencygroups') - self.assertIsInstance(consistencygroups.c.source_cgid.type, - self.VARCHAR_TYPE) - - def _check_052(self, engine, data): - snapshots = db_utils.get_table(engine, 'snapshots') - self.assertIsInstance(snapshots.c.provider_auth.type, - self.VARCHAR_TYPE) - - def _check_053(self, engine, data): - services = db_utils.get_table(engine, 'services') - self.assertIsInstance(services.c.rpc_current_version.type, - self.VARCHAR_TYPE) - self.assertIsInstance(services.c.rpc_available_version.type, - self.VARCHAR_TYPE) - self.assertIsInstance(services.c.object_current_version.type, - self.VARCHAR_TYPE) - self.assertIsInstance(services.c.object_available_version.type, - self.VARCHAR_TYPE) - - def _check_054(self, engine, data): - backups = db_utils.get_table(engine, 'backups') - self.assertIsInstance(backups.c.num_dependent_backups.type, - self.INTEGER_TYPE) - - def _check_055(self, engine, data): - """Test adding image_volume_cache_entries table.""" - has_table = engine.dialect.has_table(engine.connect(), - "image_volume_cache_entries") - self.assertTrue(has_table) - - private_data = db_utils.get_table( - engine, - 'image_volume_cache_entries' - ) - - self.assertIsInstance(private_data.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(private_data.c.host.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.image_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.image_updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(private_data.c.volume_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(private_data.c.size.type, - self.INTEGER_TYPE) - self.assertIsInstance(private_data.c.last_used.type, - self.TIME_TYPE) - - def _check_061(self, engine, data): - backups = db_utils.get_table(engine, 'backups') - self.assertIsInstance(backups.c.snapshot_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(backups.c.data_timestamp.type, - self.TIME_TYPE) - - def _check_062(self, engine, data): - volume_type_projects = db_utils.get_table(engine, - 'volume_type_projects') - self.assertIsInstance(volume_type_projects.c.id.type, - self.INTEGER_TYPE) - - def _check_064(self, engine, data): - backups = db_utils.get_table(engine, 'backups') - self.assertIsInstance(backups.c.restore_volume_id.type, - self.VARCHAR_TYPE) - - def _check_065(self, engine, data): - services = db_utils.get_table(engine, 'services') - self.assertIsInstance(services.c.replication_status.type, - self.VARCHAR_TYPE) - self.assertIsInstance(services.c.frozen.type, - self.BOOL_TYPE) - self.assertIsInstance(services.c.active_backend_id.type, - self.VARCHAR_TYPE) - - def _check_066(self, engine, data): - reservations = db_utils.get_table(engine, 'reservations') - self.assertIsInstance(reservations.c.allocated_id.type, - self.INTEGER_TYPE) - - def __check_cinderbase_fields(self, columns): - """Check fields inherited from CinderBase ORM class.""" - self.assertIsInstance(columns.created_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE) - self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE) - - def _check_067(self, engine, data): - iscsi_targets = db_utils.get_table(engine, 'iscsi_targets') - fkey, = iscsi_targets.c.volume_id.foreign_keys - self.assertIsNotNone(fkey) - - def _check_074(self, engine, data): - """Test adding message table.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "messages")) - messages = db_utils.get_table(engine, 'messages') - - self.assertIsInstance(messages.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(messages.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(messages.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(messages.c.message_level.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.request_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.resource_uuid.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.event_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(messages.c.resource_type.type, - self.VARCHAR_TYPE) - - def _check_075(self, engine, data): - """Test adding cluster table and cluster_id fields.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters')) - clusters = db_utils.get_table(engine, 'clusters') - columns = clusters.c - self.__check_cinderbase_fields(columns) - - # Cluster specific fields - self.assertIsInstance(columns.id.type, self.INTEGER_TYPE) - self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE) - self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE) - self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE) - self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE) - - # Check that we have added cluster_name field to all required tables - for table_name in ('services', 'consistencygroups', 'volumes'): - table = db_utils.get_table(engine, table_name) - self.assertIsInstance(table.c.cluster_name.type, - self.VARCHAR_TYPE) - - def _check_076(self, engine, data): - workers = db_utils.get_table(engine, 'workers') - columns = workers.c - self.__check_cinderbase_fields(columns) - - # Workers specific fields - self.assertIsInstance(columns.id.type, self.INTEGER_TYPE) - self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE) - self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE) - self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE) - self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE) - - def _check_077(self, engine, data): - """Test adding group types and specs tables.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_types")) - group_types = db_utils.get_table(engine, 'group_types') - - self.assertIsInstance(group_types.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_types.c.name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_types.c.description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_types.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_types.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_types.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_types.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(group_types.c.is_public.type, - self.BOOL_TYPE) - - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_type_specs")) - group_specs = db_utils.get_table(engine, 'group_type_specs') - - self.assertIsInstance(group_specs.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(group_specs.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_specs.c.value.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_specs.c.group_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_specs.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_specs.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_specs.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_specs.c.deleted.type, - self.BOOL_TYPE) - - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_type_projects")) - type_projects = db_utils.get_table(engine, 'group_type_projects') - - self.assertIsInstance(type_projects.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(type_projects.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(type_projects.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(type_projects.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(type_projects.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(type_projects.c.group_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(type_projects.c.project_id.type, - self.VARCHAR_TYPE) - - def _check_078(self, engine, data): - """Test adding groups tables.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "groups")) - groups = db_utils.get_table(engine, 'groups') - - self.assertIsInstance(groups.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(groups.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(groups.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(groups.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(groups.c.user_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.host.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.availability_zone.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.group_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.status.type, - self.VARCHAR_TYPE) - - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_volume_type_mapping")) - mapping = db_utils.get_table(engine, 'group_volume_type_mapping') - - self.assertIsInstance(mapping.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(mapping.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(mapping.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(mapping.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(mapping.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(mapping.c.volume_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(mapping.c.group_id.type, - self.VARCHAR_TYPE) - - volumes = db_utils.get_table(engine, 'volumes') - self.assertIsInstance(volumes.c.group_id.type, - self.VARCHAR_TYPE) - - quota_classes = db_utils.get_table(engine, 'quota_classes') - rows = select([func.count()]).select_from( - quota_classes).where(quota_classes.c.resource == 'groups').\ - execute().scalar() - self.assertEqual(1, rows) - - def _check_079(self, engine, data): - """Test adding group_snapshots tables.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_snapshots")) - group_snapshots = db_utils.get_table(engine, 'group_snapshots') - - self.assertIsInstance(group_snapshots.c.id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.name.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.description.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_snapshots.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_snapshots.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(group_snapshots.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(group_snapshots.c.user_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.project_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.group_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.group_type_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(group_snapshots.c.status.type, - self.VARCHAR_TYPE) - - snapshots = db_utils.get_table(engine, 'snapshots') - self.assertIsInstance(snapshots.c.group_snapshot_id.type, - self.VARCHAR_TYPE) - - groups = db_utils.get_table(engine, 'groups') - self.assertIsInstance(groups.c.group_snapshot_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(groups.c.source_group_id.type, - self.VARCHAR_TYPE) - - def _check_086(self, engine, data): - """Test inserting default cgsnapshot group type.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "group_types")) - group_types = db_utils.get_table(engine, 'group_types') - t1 = (group_types.select(group_types.c.name == - volume_group_types.DEFAULT_CGSNAPSHOT_TYPE). - execute().first()) - self.assertIsNotNone(t1) - - group_specs = db_utils.get_table(engine, 'group_type_specs') - specs = group_specs.select( - group_specs.c.group_type_id == t1.id and - group_specs.c.key == 'consistent_group_snapshot_enabled' - ).execute().first() - self.assertIsNotNone(specs) - self.assertEqual(' True', specs.value) - - def _check_087(self, engine, data): - """Test request_id column in messages is nullable.""" - self.assertTrue(engine.dialect.has_table(engine.connect(), - "messages")) - messages = db_utils.get_table(engine, 'messages') - - self.assertIsInstance(messages.c.request_id.type, - self.VARCHAR_TYPE) - self.assertTrue(messages.c.request_id.nullable) - - def _check_088(self, engine, data): - """Test adding replication data to cluster table.""" - clusters = db_utils.get_table(engine, 'clusters') - self.assertIsInstance(clusters.c.replication_status.type, - self.VARCHAR_TYPE) - self.assertIsInstance(clusters.c.active_backend_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(clusters.c.frozen.type, - self.BOOL_TYPE) - - def _check_089(self, engine, data): - """Test adding cluster_name to image volume cache table.""" - image_cache = db_utils.get_table(engine, 'image_volume_cache_entries') - self.assertIsInstance(image_cache.c.cluster_name.type, - self.VARCHAR_TYPE) - - def _check_090(self, engine, data): - """Test adding race_preventer to workers table.""" - workers = db_utils.get_table(engine, 'workers') - self.assertIsInstance(workers.c.race_preventer.type, - self.INTEGER_TYPE) - - def _check_091(self, engine, data): - self.assertTrue(engine.dialect.has_table(engine.connect(), - "attachment_specs")) - attachment = db_utils.get_table(engine, 'attachment_specs') - - self.assertIsInstance(attachment.c.created_at.type, - self.TIME_TYPE) - self.assertIsInstance(attachment.c.updated_at.type, - self.TIME_TYPE) - self.assertIsInstance(attachment.c.deleted_at.type, - self.TIME_TYPE) - self.assertIsInstance(attachment.c.deleted.type, - self.BOOL_TYPE) - self.assertIsInstance(attachment.c.id.type, - self.INTEGER_TYPE) - self.assertIsInstance(attachment.c.key.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachment.c.value.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachment.c.attachment_id.type, - self.VARCHAR_TYPE) - f_keys = self.get_foreign_key_columns(engine, 'attachment_specs') - self.assertEqual({'attachment_id'}, f_keys) - - def _check_098(self, engine, data): - self.assertTrue(engine.dialect.has_table(engine.connect(), - "messages")) - ids = self.get_indexed_columns(engine, 'messages') - self.assertTrue('expires_at' in ids) - - def _check_099(self, engine, data): - self.assertTrue(engine.dialect.has_table(engine.connect(), - "volume_attachment")) - attachment = db_utils.get_table(engine, 'volume_attachment') - - self.assertIsInstance(attachment.c.connection_info.type, - self.TEXT_TYPE) - - def get_table_names(self, engine): - inspector = reflection.Inspector.from_engine(engine) - return inspector.get_table_names() - - def get_foreign_key_columns(self, engine, table_name): - foreign_keys = set() - table = db_utils.get_table(engine, table_name) - inspector = reflection.Inspector.from_engine(engine) - for column_dict in inspector.get_columns(table_name): - column_name = column_dict['name'] - column = getattr(table.c, column_name) - if column.foreign_keys: - foreign_keys.add(column_name) - return foreign_keys - - def get_indexed_columns(self, engine, table_name): - indexed_columns = set() - for index in db_utils.get_indexes(engine, table_name): - for column_name in index['column_names']: - indexed_columns.add(column_name) - return indexed_columns - - def assert_each_foreign_key_is_part_of_an_index(self): - engine = self.migrate_engine - - non_indexed_foreign_keys = set() - - for table_name in self.get_table_names(engine): - indexed_columns = self.get_indexed_columns(engine, table_name) - foreign_key_columns = self.get_foreign_key_columns( - engine, table_name - ) - for column_name in foreign_key_columns - indexed_columns: - non_indexed_foreign_keys.add(table_name + '.' + column_name) - - self.assertSetEqual(set(), non_indexed_foreign_keys) - - def _pre_upgrade_101(self, engine): - """Add data to test the SQL migration.""" - types_table = db_utils.get_table(engine, 'volume_types') - for i in range(1, 5): - types_table.insert().execute({'id': str(i)}) - - specs_table = db_utils.get_table(engine, 'volume_type_extra_specs') - specs = [ - {'volume_type_id': '1', 'key': 'key', 'value': ' False'}, - {'volume_type_id': '2', 'key': 'replication_enabled', - 'value': ' False'}, - {'volume_type_id': '3', 'key': 'replication_enabled', - 'value': ' True', 'deleted': True}, - {'volume_type_id': '3', 'key': 'key', 'value': ' True'}, - {'volume_type_id': '4', 'key': 'replication_enabled', - 'value': ' True'}, - {'volume_type_id': '4', 'key': 'key', 'value': ' True'}, - ] - for spec in specs: - specs_table.insert().execute(spec) - - volumes_table = db_utils.get_table(engine, 'volumes') - volumes = [ - {'id': '1', 'replication_status': 'disabled', - 'volume_type_id': None}, - {'id': '2', 'replication_status': 'disabled', - 'volume_type_id': ''}, - {'id': '3', 'replication_status': 'disabled', - 'volume_type_id': '1'}, - {'id': '4', 'replication_status': 'disabled', - 'volume_type_id': '2'}, - {'id': '5', 'replication_status': 'disabled', - 'volume_type_id': '2'}, - {'id': '6', 'replication_status': 'disabled', - 'volume_type_id': '3'}, - {'id': '7', 'replication_status': 'error', 'volume_type_id': '4'}, - {'id': '8', 'deleted': True, 'replication_status': 'disabled', - 'volume_type_id': '4'}, - {'id': '9', 'replication_status': 'disabled', 'deleted': None, - 'volume_type_id': '4'}, - {'id': '10', 'replication_status': 'disabled', 'deleted': False, - 'volume_type_id': '4'}, - ] - for volume in volumes: - volumes_table.insert().execute(volume) - - # Only the last volume should be changed to enabled - expected = {v['id']: v['replication_status'] for v in volumes} - expected['9'] = 'enabled' - expected['10'] = 'enabled' - return expected - - def _check_101(self, engine, data): - # Get existing volumes after the migration - volumes_table = db_utils.get_table(engine, 'volumes') - volumes = volumes_table.select().execute() - # Check that the replication_status is the one we expect according to - # _pre_upgrade_098 - for volume in volumes: - self.assertEqual(data[volume.id], volume.replication_status, - 'id %s' % volume.id) - - def _check_102(self, engine, data): - """Test adding replication_status to groups table.""" - groups = db_utils.get_table(engine, 'groups') - self.assertIsInstance(groups.c.replication_status.type, - self.VARCHAR_TYPE) - - def _check_103(self, engine, data): - self.assertTrue(engine.dialect.has_table(engine.connect(), - "messages")) - attachment = db_utils.get_table(engine, 'messages') - - self.assertIsInstance(attachment.c.detail_id.type, - self.VARCHAR_TYPE) - self.assertIsInstance(attachment.c.action_id.type, - self.VARCHAR_TYPE) - - def _check_104(self, engine, data): - messages = db_utils.get_table(engine, 'messages') - self.assertEqual(255, messages.c.project_id.type.length) - - def test_walk_versions(self): - self.walk_versions(False, False) - self.assert_each_foreign_key_is_part_of_an_index() - - -class TestSqliteMigrations(test_base.DbTestCase, - MigrationsMixin): - def assert_each_foreign_key_is_part_of_an_index(self): - # Skip the test for SQLite because SQLite does not list - # UniqueConstraints as indexes, which makes this test fail. - # Given that SQLite is only for testing purposes, it is safe to skip - pass - - -class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase, - MigrationsMixin): - - BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT - - def test_mysql_innodb(self): - """Test that table creation on mysql only builds InnoDB tables.""" - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - # sanity check - migration.db_sync(engine=self.migrate_engine) - - total = self.migrate_engine.execute( - "SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='{0}'".format( - self.migrate_engine.url.database)) - self.assertGreater(total.scalar(), 0, - msg="No tables found. Wrong schema?") - - noninnodb = self.migrate_engine.execute( - "SELECT count(*) " - "from information_schema.TABLES " - "where TABLE_SCHEMA='openstack_citest' " - "and ENGINE!='InnoDB' " - "and TABLE_NAME!='migrate_version'") - count = noninnodb.scalar() - self.assertEqual(count, 0, "%d non InnoDB tables created" % count) - - -class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase, - MigrationsMixin): - TIME_TYPE = sqlalchemy.types.TIMESTAMP diff --git a/cinder/tests/unit/test_paginate_query.py b/cinder/tests/unit/test_paginate_query.py deleted file mode 100644 index 05a6c9856..000000000 --- a/cinder/tests/unit/test_paginate_query.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from cinder.common import sqlalchemyutils -from cinder import context -from cinder.db.sqlalchemy import api as db_api -from cinder.db.sqlalchemy import models -from cinder import test -from cinder.tests.unit import fake_constants as fake - - -class TestPaginateQuery(test.TestCase): - def setUp(self): - super(TestPaginateQuery, self).setUp() - self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - auth_token=True, - is_admin=True) - self.query = db_api._volume_get_query(self.ctxt) - self.model = models.Volume - - def test_paginate_query_marker_null(self): - marker_object = self.model() - self.assertIsNone(marker_object.display_name) - self.assertIsNone(marker_object.updated_at) - - marker_object.size = 1 - # There is no error raised here. - sqlalchemyutils.paginate_query(self.query, self.model, 10, - sort_keys=['display_name', - 'updated_at', - 'size'], - marker=marker_object, - sort_dirs=['desc', 'asc', 'desc']) diff --git a/cinder/tests/unit/test_qos_specs.py b/cinder/tests/unit/test_qos_specs.py deleted file mode 100644 index 100fccda4..000000000 --- a/cinder/tests/unit/test_qos_specs.py +++ /dev/null @@ -1,396 +0,0 @@ - -# Copyright (c) 2013 eBay Inc. -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for qos specs internal API -""" - -import mock -import six -import time - -from oslo_db import exception as db_exc -from oslo_utils import timeutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -def fake_db_qos_specs_create(context, values): - if values['name'] == 'DupQoSName': - raise exception.QoSSpecsExists(specs_id=values['name']) - elif values['name'] == 'FailQoSName': - raise db_exc.DBError() - - pass - - -def fake_db_get_vol_type(vol_type_number=1): - return {'name': 'type-' + six.text_type(vol_type_number), - 'id': fake.QOS_SPEC_ID, - 'updated_at': None, - 'created_at': None, - 'deleted_at': None, - 'description': 'desc', - 'deleted': False, - 'is_public': True, - 'projects': [], - 'qos_specs_id': fake.QOS_SPEC_ID, - 'extra_specs': None} - - -class QoSSpecsTestCase(test.TestCase): - """Test cases for qos specs code.""" - def setUp(self): - super(QoSSpecsTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _create_qos_specs(self, name, consumer='back-end', values=None): - """Create a transfer object.""" - if values is None: - values = {'key1': 'value1', 'key2': 'value2'} - - specs = {'name': name, - 'consumer': consumer, - 'specs': values} - return db.qos_specs_create(self.ctxt, specs)['id'] - - def test_create(self): - input = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - ref = qos_specs.create(self.ctxt, 'FakeName', input) - specs_obj = qos_specs.get_qos_specs(self.ctxt, ref['id']) - specs_obj_dic = {'consumer': specs_obj['consumer'], - 'id': specs_obj['id'], - 'name': specs_obj['name'], - 'specs': specs_obj['specs']} - expected = {'consumer': 'back-end', - 'id': ref['id'], - 'name': 'FakeName', - 'specs': input} - self.assertDictEqual(expected, - specs_obj_dic) - - # qos specs must have unique name - self.assertRaises(exception.QoSSpecsExists, - qos_specs.create, self.ctxt, 'FakeName', input) - - # consumer must be one of: front-end, back-end, both - input['consumer'] = 'fake' - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.create, self.ctxt, 'QoSName', input) - - del input['consumer'] - - self.mock_object(db, 'qos_specs_create', - fake_db_qos_specs_create) - # able to catch DBError - self.assertRaises(exception.QoSSpecsCreateFailed, - qos_specs.create, self.ctxt, 'FailQoSName', input) - - def test_update(self): - def fake_db_update(context, specs_id, values): - raise db_exc.DBError() - - qos = {'consumer': 'back-end', - 'specs': {'key1': 'value1'}} - - # qos specs must exists - self.assertRaises(exception.QoSSpecsNotFound, - qos_specs.update, self.ctxt, 'fake_id', qos) - - specs_id = self._create_qos_specs('Name', - qos['consumer'], - qos['specs']) - - qos_specs.update(self.ctxt, specs_id, - {'key1': 'newvalue1', 'key2': 'value2'}) - - specs = qos_specs.get_qos_specs(self.ctxt, specs_id) - self.assertEqual('newvalue1', specs['specs']['key1']) - self.assertEqual('value2', specs['specs']['key2']) - - # consumer must be one of: front-end, back-end, both - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.update, self.ctxt, specs_id, - {'consumer': 'not-real'}) - - self.mock_object(db, 'qos_specs_update', fake_db_update) - self.assertRaises(exception.QoSSpecsUpdateFailed, - qos_specs.update, self.ctxt, specs_id, {'key': - 'new_key'}) - - def test_delete(self): - qos_id = self._create_qos_specs('my_qos') - - def fake_db_associations_get(context, id): - vol_types = [] - if id == qos_id: - vol_types = [fake_db_get_vol_type(id)] - return vol_types - - def fake_db_delete(context, id): - return {'deleted': True, - 'deleted_at': timeutils.utcnow()} - - def fake_disassociate_all(context, id): - pass - - self.mock_object(db, 'qos_specs_associations_get', - fake_db_associations_get) - self.mock_object(qos_specs, 'disassociate_all', - fake_disassociate_all) - self.mock_object(db, 'qos_specs_delete', fake_db_delete) - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.delete, self.ctxt, None) - self.assertRaises(exception.QoSSpecsNotFound, - qos_specs.delete, self.ctxt, 'NotFound') - self.assertRaises(exception.QoSSpecsInUse, - qos_specs.delete, self.ctxt, qos_id) - # able to delete in-use qos specs if force=True - qos_specs.delete(self.ctxt, qos_id, force=True) - - # Can delete without forcing when no volume types - qos_id_with_no_vol_types = self._create_qos_specs('no_vol_types') - qos_specs.delete(self.ctxt, qos_id_with_no_vol_types, force=False) - - def test_delete_keys(self): - def fake_db_qos_delete_key(context, id, key): - if key == 'NotFound': - raise exception.QoSSpecsKeyNotFound(specs_id=id, - specs_key=key) - else: - pass - - value = {'foo': 'Foo', 'bar': 'Bar', 'zoo': 'tiger'} - name = 'QoSName' - consumer = 'front-end' - specs_id = self._create_qos_specs(name, consumer, value) - qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) - - del value['foo'] - del value['bar'] - expected = {'name': name, - 'id': specs_id, - 'consumer': consumer, - 'specs': value} - specs = qos_specs.get_qos_specs(self.ctxt, specs_id) - specs_dic = {'consumer': specs['consumer'], - 'id': specs['id'], - 'name': specs['name'], - 'specs': specs['specs']} - self.assertDictEqual(expected, specs_dic) - - self.mock_object(db, 'qos_specs_item_delete', fake_db_qos_delete_key) - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.delete_keys, self.ctxt, None, []) - self.assertRaises(exception.QoSSpecsNotFound, - qos_specs.delete_keys, self.ctxt, 'NotFound', []) - self.assertRaises(exception.QoSSpecsKeyNotFound, - qos_specs.delete_keys, self.ctxt, - specs_id, ['NotFound']) - self.assertRaises(exception.QoSSpecsKeyNotFound, - qos_specs.delete_keys, self.ctxt, specs_id, - ['foo', 'bar', 'NotFound']) - - @mock.patch.object(db, 'qos_specs_associations_get') - def test_get_associations(self, mock_qos_specs_associations_get): - vol_types = [fake_db_get_vol_type(x) for x in range(2)] - - mock_qos_specs_associations_get.return_value = vol_types - specs_id = self._create_qos_specs('new_spec') - res = qos_specs.get_associations(self.ctxt, specs_id) - for vol_type in vol_types: - expected_type = { - 'association_type': 'volume_type', - 'id': vol_type['id'], - 'name': vol_type['name'] - } - self.assertIn(expected_type, res) - - e = exception.QoSSpecsNotFound(specs_id='Trouble') - mock_qos_specs_associations_get.side_effect = e - self.assertRaises(exception.CinderException, - qos_specs.get_associations, self.ctxt, - 'Trouble') - - def test_associate_qos_with_type(self): - def fake_qos_specs_get(context, id): - if id == 'NotFound': - raise exception.QoSSpecsNotFound(specs_id=id) - else: - pass - - def fake_db_associate(context, id, type_id): - if id == 'Trouble': - raise db_exc.DBError() - elif type_id == 'NotFound': - raise exception.VolumeTypeNotFound(volume_type_id=type_id) - pass - - def fake_vol_type_qos_get(type_id): - if type_id == 'Invalid': - return {'qos_specs': {'id': 'Invalid'}} - else: - return {'qos_specs': None} - - type_ref = volume_types.create(self.ctxt, 'TypeName') - specs_id = self._create_qos_specs('QoSName') - - qos_specs.associate_qos_with_type(self.ctxt, specs_id, - type_ref['id']) - res = qos_specs.get_associations(self.ctxt, specs_id) - self.assertEqual(1, len(res)) - self.assertEqual('TypeName', res[0]['name']) - self.assertEqual(type_ref['id'], res[0]['id']) - - self.mock_object(db, 'qos_specs_associate', - fake_db_associate) - self.mock_object(qos_specs, 'get_qos_specs', fake_qos_specs_get) - self.mock_object(volume_types, 'get_volume_type_qos_specs', - fake_vol_type_qos_get) - self.assertRaises(exception.VolumeTypeNotFound, - qos_specs.associate_qos_with_type, - self.ctxt, 'specs-id', 'NotFound') - self.assertRaises(exception.QoSSpecsAssociateFailed, - qos_specs.associate_qos_with_type, - self.ctxt, 'Trouble', 'id') - self.assertRaises(exception.QoSSpecsNotFound, - qos_specs.associate_qos_with_type, - self.ctxt, 'NotFound', 'id') - self.assertRaises(exception.InvalidVolumeType, - qos_specs.associate_qos_with_type, - self.ctxt, 'specs-id', 'Invalid') - - def test_disassociate_qos_specs(self): - def fake_db_disassociate(context, id, type_id): - raise db_exc.DBError() - - type_ref = volume_types.create(self.ctxt, 'TypeName') - specs_id = self._create_qos_specs('QoSName') - - qos_specs.associate_qos_with_type(self.ctxt, specs_id, - type_ref['id']) - res = qos_specs.get_associations(self.ctxt, specs_id) - self.assertEqual(1, len(res)) - - qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) - res = qos_specs.get_associations(self.ctxt, specs_id) - self.assertEqual(0, len(res)) - - self.assertRaises(exception.VolumeTypeNotFound, - qos_specs.disassociate_qos_specs, - self.ctxt, specs_id, 'NotFound') - - # Verify we can disassociate specs from volume_type even if they are - # not associated with no error - qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) - qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) - self.mock_object(db, 'qos_specs_disassociate', - fake_db_disassociate) - self.assertRaises(exception.QoSSpecsDisassociateFailed, - qos_specs.disassociate_qos_specs, - self.ctxt, specs_id, type_ref['id']) - - def test_disassociate_all(self): - def fake_db_disassociate_all(context, id): - if id == 'Trouble': - raise db_exc.DBError() - pass - - def fake_qos_specs_get(context, id): - if id == 'NotFound': - raise exception.QoSSpecsNotFound(specs_id=id) - else: - pass - - type1_ref = volume_types.create(self.ctxt, 'TypeName1') - type2_ref = volume_types.create(self.ctxt, 'TypeName2') - specs_id = self._create_qos_specs('QoSName') - - qos_specs.associate_qos_with_type(self.ctxt, specs_id, - type1_ref['id']) - qos_specs.associate_qos_with_type(self.ctxt, specs_id, - type2_ref['id']) - res = qos_specs.get_associations(self.ctxt, specs_id) - self.assertEqual(2, len(res)) - - qos_specs.disassociate_all(self.ctxt, specs_id) - res = qos_specs.get_associations(self.ctxt, specs_id) - self.assertEqual(0, len(res)) - - self.mock_object(db, 'qos_specs_disassociate_all', - fake_db_disassociate_all) - self.mock_object(qos_specs, 'get_qos_specs', - fake_qos_specs_get) - self.assertRaises(exception.QoSSpecsDisassociateFailed, - qos_specs.disassociate_all, - self.ctxt, 'Trouble') - - def test_get_all_specs(self): - qos_specs_list = [{'name': 'Specs1', - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': None, - 'consumer': 'both', - 'specs': {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'}}, - {'name': 'Specs2', - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': None, - 'consumer': 'both', - 'specs': {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - 'key4': 'value4'}}] - - for qos_specs_dict in qos_specs_list: - qos_specs_id = self._create_qos_specs( - qos_specs_dict['name'], - qos_specs_dict['consumer'], - qos_specs_dict['specs']) - qos_specs_dict['id'] = qos_specs_id - - res = qos_specs.get_all_specs(self.ctxt) - self.assertEqual(len(qos_specs_list), len(res)) - - qos_res_simple_dict = [] - # Need to make list of dictionaries instead of VOs for assertIn to work - for qos in res: - qos_res_simple_dict.append( - qos.obj_to_primitive()['versioned_object.data']) - for qos_spec in qos_specs_list: - self.assertIn(qos_spec, qos_res_simple_dict) - - def test_get_qos_specs(self): - one_time_value = str(int(time.time())) - specs = {'key1': one_time_value, - 'key2': 'value2', - 'key3': 'value3'} - qos_id = self._create_qos_specs('Specs1', 'both', specs) - specs = qos_specs.get_qos_specs(self.ctxt, qos_id) - self.assertEqual(one_time_value, specs['specs']['key1']) - self.assertRaises(exception.InvalidQoSSpecs, - qos_specs.get_qos_specs, self.ctxt, None) diff --git a/cinder/tests/unit/test_quota.py b/cinder/tests/unit/test_quota.py deleted file mode 100644 index 99f91c9c3..000000000 --- a/cinder/tests/unit/test_quota.py +++ /dev/null @@ -1,2212 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import mock -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_utils import timeutils -import six - -from cinder import backup -from cinder.backup import api as backup_api -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import api as sqa_api -from cinder.db.sqlalchemy import models as sqa_models -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import quota_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -import cinder.tests.unit.image.fake -from cinder import volume - - -CONF = cfg.CONF - - -class QuotaIntegrationTestCase(test.TestCase): - - def setUp(self): - objects.register_all() - super(QuotaIntegrationTestCase, self).setUp() - self.volume_type_name = CONF.default_volume_type - self.volume_type = objects.VolumeType(context.get_admin_context(), - name=self.volume_type_name, - description='', - is_public=False, - projects=[], - extra_specs={}) - self.volume_type.create() - - self.addCleanup(db.volume_type_destroy, context.get_admin_context(), - self.volume_type['id']) - - self.flags(quota_volumes=2, - quota_snapshots=2, - quota_gigabytes=20, - quota_backups=2, - quota_backup_gigabytes=20) - - self.user_id = fake.USER_ID - self.project_id = fake.PROJECT_ID - self.context = context.RequestContext(self.user_id, - self.project_id, - is_admin=True) - - # Destroy the 'default' quota_class in the database to avoid - # conflicts with the test cases here that are setting up their own - # defaults. - db.quota_class_destroy_all_by_name(self.context, 'default') - self.addCleanup(cinder.tests.unit.image.fake.FakeImageService_reset) - - def _create_volume(self, size=1): - """Create a test volume.""" - vol = {} - vol['user_id'] = self.user_id - vol['project_id'] = self.project_id - vol['size'] = size - vol['status'] = 'available' - vol['volume_type_id'] = self.volume_type['id'] - vol['host'] = 'fake_host' - vol['availability_zone'] = 'fake_zone' - vol['attach_status'] = fields.VolumeAttachStatus.DETACHED - volume = objects.Volume(context=self.context, **vol) - volume.create() - return volume - - def _create_snapshot(self, volume): - snapshot = objects.Snapshot(self.context) - snapshot.user_id = self.user_id or fake.USER_ID - snapshot.project_id = self.project_id or fake.PROJECT_ID - snapshot.volume_id = volume['id'] - snapshot.volume_size = volume['size'] - snapshot.status = fields.SnapshotStatus.AVAILABLE - snapshot.create() - return snapshot - - def _create_backup(self, volume): - backup = {} - backup['user_id'] = self.user_id - backup['project_id'] = self.project_id - backup['volume_id'] = volume['id'] - backup['volume_size'] = volume['size'] - backup['status'] = fields.BackupStatus.AVAILABLE - return db.backup_create(self.context, backup) - - def test_volume_size_limit_exceeds(self): - resource = 'volumes_%s' % self.volume_type_name - db.quota_class_create(self.context, 'default', resource, 1) - flag_args = { - 'quota_volumes': 10, - 'quota_gigabytes': 1000, - 'per_volume_size_limit': 5 - } - self.flags(**flag_args) - self.assertRaises(exception.VolumeSizeExceedsLimit, - volume.API().create, - self.context, 10, '', '',) - - def test_too_many_volumes(self): - volume_ids = [] - for _i in range(CONF.quota_volumes): - vol_ref = self._create_volume() - volume_ids.append(vol_ref['id']) - ex = self.assertRaises(exception.VolumeLimitExceeded, - volume.API().create, - self.context, 1, '', '', - volume_type=self.volume_type) - msg = ("Maximum number of volumes allowed (%d) exceeded for" - " quota 'volumes'." % CONF.quota_volumes) - self.assertEqual(msg, six.text_type(ex)) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_volumes_of_type(self): - resource = 'volumes_%s' % self.volume_type_name - db.quota_class_create(self.context, 'default', resource, 1) - flag_args = { - 'quota_volumes': 2000, - 'quota_gigabytes': 2000 - } - self.flags(**flag_args) - vol_ref = self._create_volume() - ex = self.assertRaises(exception.VolumeLimitExceeded, - volume.API().create, - self.context, 1, '', '', - volume_type=self.volume_type) - msg = ("Maximum number of volumes allowed (1) exceeded for" - " quota '%s'." % resource) - self.assertEqual(msg, six.text_type(ex)) - vol_ref.destroy() - - def test_too_many_snapshots_of_type(self): - resource = 'snapshots_%s' % self.volume_type_name - db.quota_class_create(self.context, 'default', resource, 1) - flag_args = { - 'quota_volumes': 2000, - 'quota_gigabytes': 2000, - } - self.flags(**flag_args) - vol_ref = self._create_volume() - snap_ref = self._create_snapshot(vol_ref) - self.assertRaises(exception.SnapshotLimitExceeded, - volume.API().create_snapshot, - self.context, vol_ref, '', '') - snap_ref.destroy() - vol_ref.destroy() - - def test_too_many_backups(self): - resource = 'backups' - db.quota_class_create(self.context, 'default', resource, 1) - flag_args = { - 'quota_backups': 2000, - 'quota_backup_gigabytes': 2000 - } - self.flags(**flag_args) - vol_ref = self._create_volume() - backup_ref = self._create_backup(vol_ref) - with mock.patch.object(backup_api.API, - '_get_available_backup_service_host') as \ - mock__get_available_backup_service: - mock__get_available_backup_service.return_value = 'host' - self.assertRaises(exception.BackupLimitExceeded, - backup.API().create, - self.context, - 'name', - 'description', - vol_ref['id'], - 'container', - False, - None) - db.backup_destroy(self.context, backup_ref['id']) - db.volume_destroy(self.context, vol_ref['id']) - - def test_too_many_gigabytes(self): - volume_ids = [] - vol_ref = self._create_volume(size=20) - volume_ids.append(vol_ref['id']) - raised_exc = self.assertRaises( - exception.VolumeSizeExceedsAvailableQuota, volume.API().create, - self.context, 1, '', '', volume_type=self.volume_type) - expected = exception.VolumeSizeExceedsAvailableQuota( - requested=1, quota=20, consumed=20) - self.assertEqual(str(expected), str(raised_exc)) - for volume_id in volume_ids: - db.volume_destroy(self.context, volume_id) - - def test_too_many_combined_gigabytes(self): - vol_ref = self._create_volume(size=10) - snap_ref = self._create_snapshot(vol_ref) - self.assertRaises(exception.QuotaError, - volume.API().create_snapshot, - self.context, vol_ref, '', '') - usages = db.quota_usage_get_all_by_project(self.context, - self.project_id) - self.assertEqual(20, usages['gigabytes']['in_use']) - snap_ref.destroy() - vol_ref.destroy() - - def test_too_many_combined_backup_gigabytes(self): - vol_ref = self._create_volume(size=10000) - backup_ref = self._create_backup(vol_ref) - with mock.patch.object(backup_api.API, - '_get_available_backup_service_host') as \ - mock__get_available_backup_service: - mock__get_available_backup_service.return_value = 'host' - self.assertRaises( - exception.VolumeBackupSizeExceedsAvailableQuota, - backup.API().create, - context=self.context, - name='name', - description='description', - volume_id=vol_ref['id'], - container='container', - incremental=False) - db.backup_destroy(self.context, backup_ref['id']) - vol_ref.destroy() - - def test_no_snapshot_gb_quota_flag(self): - self.flags(quota_volumes=2, - quota_snapshots=2, - quota_gigabytes=20, - no_snapshot_gb_quota=True) - vol_ref = self._create_volume(size=10) - snap_ref = self._create_snapshot(vol_ref) - snap_ref2 = volume.API().create_snapshot(self.context, - vol_ref, '', '') - - # Make sure the snapshot volume_size isn't included in usage. - vol_ref2 = volume.API().create(self.context, 10, '', '') - usages = db.quota_usage_get_all_by_project(self.context, - self.project_id) - self.assertEqual(20, usages['gigabytes']['in_use']) - self.assertEqual(0, usages['gigabytes']['reserved']) - - snap_ref.destroy() - snap_ref2.destroy() - vol_ref.destroy() - vol_ref2.destroy() - - def test_backup_gb_quota_flag(self): - self.flags(quota_volumes=2, - quota_snapshots=2, - quota_backups=2, - quota_gigabytes=20 - ) - vol_ref = self._create_volume(size=10) - backup_ref = self._create_backup(vol_ref) - with mock.patch.object(backup_api.API, - '_get_available_backup_service_host') as \ - mock_mock__get_available_backup_service: - mock_mock__get_available_backup_service.return_value = 'host' - backup_ref2 = backup.API().create(self.context, - 'name', - 'description', - vol_ref['id'], - 'container', - False, - None) - - # Make sure the backup volume_size isn't included in usage. - vol_ref2 = volume.API().create(self.context, 10, '', '') - usages = db.quota_usage_get_all_by_project(self.context, - self.project_id) - self.assertEqual(20, usages['gigabytes']['in_use']) - self.assertEqual(0, usages['gigabytes']['reserved']) - - db.backup_destroy(self.context, backup_ref['id']) - db.backup_destroy(self.context, backup_ref2['id']) - vol_ref.destroy() - vol_ref2.destroy() - - def test_too_many_gigabytes_of_type(self): - resource = 'gigabytes_%s' % self.volume_type_name - db.quota_class_create(self.context, 'default', resource, 10) - flag_args = { - 'quota_volumes': 2000, - 'quota_gigabytes': 2000, - } - self.flags(**flag_args) - vol_ref = self._create_volume(size=10) - raised_exc = self.assertRaises( - exception.VolumeSizeExceedsAvailableQuota, volume.API().create, - self.context, 1, '', '', volume_type=self.volume_type) - expected = exception.VolumeSizeExceedsAvailableQuota( - requested=1, quota=10, consumed=10, name=resource) - self.assertEqual(str(expected), str(raised_exc)) - vol_ref.destroy() - - -class FakeContext(object): - def __init__(self, project_id, quota_class): - self.is_admin = False - self.user_id = 'fake_user' - self.project_id = project_id - self.quota_class = quota_class - - def elevated(self): - elevated = self.__class__(self.project_id, self.quota_class) - elevated.is_admin = True - return elevated - - -class FakeDriver(object): - def __init__(self, by_project=None, by_class=None, reservations=None): - self.called = [] - self.by_project = by_project or {} - self.by_class = by_class or {} - self.reservations = reservations or [] - - def get_by_project(self, context, project_id, resource): - self.called.append(('get_by_project', context, project_id, resource)) - try: - return self.by_project[project_id][resource] - except KeyError: - raise exception.ProjectQuotaNotFound(project_id=project_id) - - def get_by_class(self, context, quota_class, resource): - self.called.append(('get_by_class', context, quota_class, resource)) - try: - return self.by_class[quota_class][resource] - except KeyError: - raise exception.QuotaClassNotFound(class_name=quota_class) - - def get_default(self, context, resource, parent_project_id=None): - self.called.append(('get_default', context, resource, - parent_project_id)) - return resource.default - - def get_defaults(self, context, resources, parent_project_id=None): - self.called.append(('get_defaults', context, resources, - parent_project_id)) - return resources - - def get_class_quotas(self, context, resources, quota_class, - defaults=True): - self.called.append(('get_class_quotas', context, resources, - quota_class, defaults)) - return resources - - def get_project_quotas(self, context, resources, project_id, - quota_class=None, defaults=True, usages=True): - self.called.append(('get_project_quotas', context, resources, - project_id, quota_class, defaults, usages)) - return resources - - def limit_check(self, context, resources, values, project_id=None): - self.called.append(('limit_check', context, resources, - values, project_id)) - - def reserve(self, context, resources, deltas, expire=None, - project_id=None): - self.called.append(('reserve', context, resources, deltas, - expire, project_id)) - return self.reservations - - def commit(self, context, reservations, project_id=None): - self.called.append(('commit', context, reservations, project_id)) - - def rollback(self, context, reservations, project_id=None): - self.called.append(('rollback', context, reservations, project_id)) - - def destroy_by_project(self, context, project_id): - self.called.append(('destroy_by_project', context, project_id)) - - def expire(self, context): - self.called.append(('expire', context)) - - -class BaseResourceTestCase(test.TestCase): - def test_no_flag(self): - resource = quota.BaseResource('test_resource') - self.assertEqual('test_resource', resource.name) - self.assertIsNone(resource.flag) - self.assertEqual(-1, resource.default) - - def test_with_flag(self): - # We know this flag exists, so use it... - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - self.assertEqual('test_resource', resource.name) - self.assertEqual('quota_volumes', resource.flag) - self.assertEqual(10, resource.default) - - def test_with_flag_no_quota(self): - self.flags(quota_volumes=-1) - resource = quota.BaseResource('test_resource', 'quota_volumes') - - self.assertEqual('test_resource', resource.name) - self.assertEqual('quota_volumes', resource.flag) - self.assertEqual(-1, resource.default) - - def test_quota_no_project_no_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver() - context = FakeContext(None, None) - quota_value = resource.quota(driver, context) - - self.assertEqual(10, quota_value) - - def test_quota_with_project_no_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver( - by_project=dict( - test_project=dict(test_resource=15), )) - context = FakeContext('test_project', None) - quota_value = resource.quota(driver, context) - - self.assertEqual(15, quota_value) - - def test_quota_no_project_with_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver( - by_class=dict( - test_class=dict(test_resource=20), )) - context = FakeContext(None, 'test_class') - quota_value = resource.quota(driver, context) - - self.assertEqual(20, quota_value) - - def test_quota_with_project_with_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver(by_project=dict( - test_project=dict(test_resource=15), ), - by_class=dict(test_class=dict(test_resource=20), )) - context = FakeContext('test_project', 'test_class') - quota_value = resource.quota(driver, context) - - self.assertEqual(15, quota_value) - - def test_quota_override_project_with_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver(by_project=dict( - test_project=dict(test_resource=15), - override_project=dict(test_resource=20), )) - context = FakeContext('test_project', 'test_class') - quota_value = resource.quota(driver, context, - project_id='override_project') - - self.assertEqual(20, quota_value) - - def test_quota_override_subproject_no_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes', - parent_project_id='test_parent_project') - driver = FakeDriver() - context = FakeContext('test_project', None) - quota_value = resource.quota(driver, context) - - self.assertEqual(0, quota_value) - - def test_quota_with_project_override_class(self): - self.flags(quota_volumes=10) - resource = quota.BaseResource('test_resource', 'quota_volumes') - driver = FakeDriver(by_class=dict( - test_class=dict(test_resource=15), - override_class=dict(test_resource=20), )) - context = FakeContext('test_project', 'test_class') - quota_value = resource.quota(driver, context, - quota_class='override_class') - - self.assertEqual(20, quota_value) - - -class VolumeTypeResourceTestCase(test.TestCase): - def test_name_and_flag(self): - volume_type_name = 'foo' - volume = {'name': volume_type_name, 'id': 'myid'} - resource = quota.VolumeTypeResource('volumes', volume) - - self.assertEqual('volumes_%s' % volume_type_name, resource.name) - self.assertIsNone(resource.flag) - self.assertEqual(-1, resource.default) - - -class QuotaEngineTestCase(test.TestCase): - def test_init(self): - quota_obj = quota.QuotaEngine() - - self.assertEqual({}, quota_obj.resources) - self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) - - def test_init_override_string(self): - quota_obj = quota.QuotaEngine( - quota_driver_class='cinder.tests.unit.test_quota.FakeDriver') - - self.assertEqual({}, quota_obj.resources) - self.assertIsInstance(quota_obj._driver, FakeDriver) - - def test_init_override_obj(self): - quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) - - self.assertEqual({}, quota_obj.resources) - self.assertEqual(FakeDriver, quota_obj._driver) - - def test_register_resource(self): - quota_obj = quota.QuotaEngine() - resource = quota.AbsoluteResource('test_resource') - quota_obj.register_resource(resource) - - self.assertEqual(dict(test_resource=resource), quota_obj.resources) - - def test_register_resources(self): - quota_obj = quota.QuotaEngine() - resources = [ - quota.AbsoluteResource('test_resource1'), - quota.AbsoluteResource('test_resource2'), - quota.AbsoluteResource('test_resource3'), ] - quota_obj.register_resources(resources) - - self.assertEqual(dict(test_resource1=resources[0], - test_resource2=resources[1], - test_resource3=resources[2], ), - quota_obj.resources) - - def test_get_by_project(self): - context = FakeContext('test_project', 'test_class') - driver = FakeDriver( - by_project=dict( - test_project=dict(test_resource=42))) - quota_obj = quota.QuotaEngine(quota_driver_class=driver) - result = quota_obj.get_by_project(context, 'test_project', - 'test_resource') - - self.assertEqual([('get_by_project', - context, - 'test_project', - 'test_resource'), ], driver.called) - self.assertEqual(42, result) - - def test_get_by_class(self): - context = FakeContext('test_project', 'test_class') - driver = FakeDriver( - by_class=dict( - test_class=dict(test_resource=42))) - quota_obj = quota.QuotaEngine(quota_driver_class=driver) - result = quota_obj.get_by_class(context, 'test_class', 'test_resource') - - self.assertEqual([('get_by_class', - context, - 'test_class', - 'test_resource'), ], driver.called) - self.assertEqual(42, result) - - def _make_quota_obj(self, driver): - quota_obj = quota.QuotaEngine(quota_driver_class=driver) - resources = [ - quota.AbsoluteResource('test_resource4'), - quota.AbsoluteResource('test_resource3'), - quota.AbsoluteResource('test_resource2'), - quota.AbsoluteResource('test_resource1'), ] - quota_obj.register_resources(resources) - - return quota_obj - - def test_get_defaults(self): - context = FakeContext(None, None) - parent_project_id = None - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - result = quota_obj.get_defaults(context) - - self.assertEqual([('get_defaults', - context, - quota_obj.resources, - parent_project_id), ], driver.called) - self.assertEqual(quota_obj.resources, result) - - def test_get_class_quotas(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - result1 = quota_obj.get_class_quotas(context, 'test_class') - result2 = quota_obj.get_class_quotas(context, 'test_class', False) - - self.assertEqual([ - ('get_class_quotas', - context, - quota_obj.resources, - 'test_class', True), - ('get_class_quotas', - context, quota_obj.resources, - 'test_class', False), ], driver.called) - self.assertEqual(quota_obj.resources, result1) - self.assertEqual(quota_obj.resources, result2) - - def test_get_project_quotas(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - result1 = quota_obj.get_project_quotas(context, 'test_project') - result2 = quota_obj.get_project_quotas(context, 'test_project', - quota_class='test_class', - defaults=False, - usages=False) - - self.assertEqual([ - ('get_project_quotas', - context, - quota_obj.resources, - 'test_project', - None, - True, - True), - ('get_project_quotas', - context, - quota_obj.resources, - 'test_project', - 'test_class', - False, - False), ], driver.called) - self.assertEqual(quota_obj.resources, result1) - self.assertEqual(quota_obj.resources, result2) - - def test_get_subproject_quotas(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - result1 = quota_obj.get_project_quotas(context, 'test_project') - result2 = quota_obj.get_project_quotas(context, 'test_project', - quota_class='test_class', - defaults=False, - usages=False) - - self.assertEqual([ - ('get_project_quotas', - context, - quota_obj.resources, - 'test_project', - None, - True, - True), - ('get_project_quotas', - context, - quota_obj.resources, - 'test_project', - 'test_class', - False, - False), ], driver.called) - self.assertEqual(quota_obj.resources, result1) - self.assertEqual(quota_obj.resources, result2) - - def test_count_no_resource(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - self.assertRaises(exception.QuotaResourceUnknown, - quota_obj.count, context, 'test_resource5', - True, foo='bar') - - def test_count_wrong_resource(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - self.assertRaises(exception.QuotaResourceUnknown, - quota_obj.count, context, 'test_resource1', - True, foo='bar') - - def test_count(self): - def fake_count(context, *args, **kwargs): - self.assertEqual((True,), args) - self.assertEqual(dict(foo='bar'), kwargs) - return 5 - - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.register_resource(quota.CountableResource('test_resource5', - fake_count)) - result = quota_obj.count(context, 'test_resource5', True, foo='bar') - - self.assertEqual(5, result) - - def test_limit_check(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.limit_check(context, test_resource1=4, test_resource2=3, - test_resource3=2, test_resource4=1) - - self.assertEqual([ - ('limit_check', - context, - quota_obj.resources, - dict( - test_resource1=4, - test_resource2=3, - test_resource3=2, - test_resource4=1,), - None), ], - driver.called) - - def test_reserve(self): - context = FakeContext(None, None) - driver = FakeDriver(reservations=['resv-01', - 'resv-02', - 'resv-03', - 'resv-04', ]) - quota_obj = self._make_quota_obj(driver) - result1 = quota_obj.reserve(context, test_resource1=4, - test_resource2=3, test_resource3=2, - test_resource4=1) - result2 = quota_obj.reserve(context, expire=3600, - test_resource1=1, test_resource2=2, - test_resource3=3, test_resource4=4) - result3 = quota_obj.reserve(context, project_id='fake_project', - test_resource1=1, test_resource2=2, - test_resource3=3, test_resource4=4) - - self.assertEqual([ - ('reserve', - context, - quota_obj.resources, - dict( - test_resource1=4, - test_resource2=3, - test_resource3=2, - test_resource4=1, ), - None, - None), - ('reserve', - context, - quota_obj.resources, - dict( - test_resource1=1, - test_resource2=2, - test_resource3=3, - test_resource4=4, ), - 3600, - None), - ('reserve', - context, - quota_obj.resources, - dict( - test_resource1=1, - test_resource2=2, - test_resource3=3, - test_resource4=4, ), - None, - 'fake_project'), ], - driver.called) - self.assertEqual(['resv-01', - 'resv-02', - 'resv-03', - 'resv-04', ], result1) - self.assertEqual(['resv-01', - 'resv-02', - 'resv-03', - 'resv-04', ], result2) - self.assertEqual(['resv-01', - 'resv-02', - 'resv-03', - 'resv-04', ], result3) - - def test_commit(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) - - self.assertEqual([('commit', - context, - ['resv-01', - 'resv-02', - 'resv-03'], - None), ], - driver.called) - - def test_rollback(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) - - self.assertEqual([('rollback', - context, - ['resv-01', - 'resv-02', - 'resv-03'], - None), ], - driver.called) - - def test_destroy_by_project(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.destroy_by_project(context, 'test_project') - - self.assertEqual([('destroy_by_project', - context, - 'test_project'), ], - driver.called) - - def test_expire(self): - context = FakeContext(None, None) - driver = FakeDriver() - quota_obj = self._make_quota_obj(driver) - quota_obj.expire(context) - - self.assertEqual([('expire', context), ], driver.called) - - def test_resource_names(self): - quota_obj = self._make_quota_obj(None) - - self.assertEqual(['test_resource1', 'test_resource2', - 'test_resource3', 'test_resource4'], - quota_obj.resource_names) - - -class VolumeTypeQuotaEngineTestCase(test.TestCase): - def test_default_resources(self): - def fake_vtga(context, inactive=False, filters=None): - return {} - self.mock_object(db, 'volume_type_get_all', fake_vtga) - - engine = quota.VolumeTypeQuotaEngine() - self.assertEqual(['backup_gigabytes', 'backups', - 'gigabytes', 'per_volume_gigabytes', - 'snapshots', 'volumes'], - engine.resource_names) - - def test_volume_type_resources(self): - ctx = context.RequestContext('admin', 'admin', is_admin=True) - vtype = db.volume_type_create(ctx, {'name': 'type1'}) - vtype2 = db.volume_type_create(ctx, {'name': 'type_2'}) - - def fake_vtga(context, inactive=False, filters=None): - return { - 'type1': { - 'id': vtype['id'], - 'name': 'type1', - 'extra_specs': {}, - }, - 'type_2': { - 'id': vtype['id'], - 'name': 'type_2', - 'extra_specs': {}, - }, - } - self.mock_object(db, 'volume_type_get_all', fake_vtga) - - engine = quota.VolumeTypeQuotaEngine() - self.assertEqual(['backup_gigabytes', 'backups', - 'gigabytes', 'gigabytes_type1', 'gigabytes_type_2', - 'per_volume_gigabytes', 'snapshots', - 'snapshots_type1', 'snapshots_type_2', 'volumes', - 'volumes_type1', 'volumes_type_2', - ], engine.resource_names) - db.volume_type_destroy(ctx, vtype['id']) - db.volume_type_destroy(ctx, vtype2['id']) - - def test_update_quota_resource(self): - ctx = context.RequestContext('admin', 'admin', is_admin=True) - - engine = quota.VolumeTypeQuotaEngine() - engine.update_quota_resource(ctx, 'type1', 'type2') - - -class DbQuotaDriverBaseTestCase(test.TestCase): - def setUp(self): - super(DbQuotaDriverBaseTestCase, self).setUp() - - self.flags(quota_volumes=10, - quota_snapshots=10, - quota_gigabytes=1000, - quota_backups=10, - quota_backup_gigabytes=1000, - reservation_expire=86400, - until_refresh=0, - max_age=0, - ) - - # These can be used for expected defaults for child/non-child - self._default_quotas_non_child = dict( - volumes=10, - snapshots=10, - gigabytes=1000, - backups=10, - backup_gigabytes=1000, - per_volume_gigabytes=-1) - self._default_quotas_child = dict( - volumes=0, - snapshots=0, - gigabytes=0, - backups=0, - backup_gigabytes=0, - per_volume_gigabytes=0) - - self.calls = [] - - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime.utcnow() - - def _mock_quota_class_get_default(self): - # Mock quota_class_get_default - def fake_qcgd(context): - self.calls.append('quota_class_get_defaults') - return dict(volumes=10, - snapshots=10, - gigabytes=1000, - backups=10, - backup_gigabytes=1000 - ) - self.mock_object(db, 'quota_class_get_defaults', fake_qcgd) - - def _mock_volume_type_get_all(self): - def fake_vtga(context, inactive=False, filters=None): - return {} - self.mock_object(db, 'volume_type_get_all', fake_vtga) - - def _mock_quota_class_get_all_by_name(self): - # Mock quota_class_get_all_by_name - def fake_qcgabn(context, quota_class): - self.calls.append('quota_class_get_all_by_name') - self.assertEqual('test_class', quota_class) - return dict(gigabytes=500, volumes=10, snapshots=10, backups=10, - backup_gigabytes=500) - self.mock_object(db, 'quota_class_get_all_by_name', fake_qcgabn) - - def _mock_allocated_get_all_by_project(self, allocated_quota=False): - def fake_qagabp(context, project_id, session=None): - self.calls.append('quota_allocated_get_all_by_project') - if allocated_quota: - return dict(project_id=project_id, volumes=3) - return dict(project_id=project_id) - - self.mock_object(db, 'quota_allocated_get_all_by_project', fake_qagabp) - - -class DbQuotaDriverTestCase(DbQuotaDriverBaseTestCase): - def setUp(self): - super(DbQuotaDriverTestCase, self).setUp() - - self.driver = quota.DbQuotaDriver() - - def test_get_defaults(self): - # Use our pre-defined resources - self._mock_quota_class_get_default() - self._mock_volume_type_get_all() - result = self.driver.get_defaults(None, quota.QUOTAS.resources) - - self.assertEqual( - dict( - volumes=10, - snapshots=10, - gigabytes=1000, - backups=10, - backup_gigabytes=1000, - per_volume_gigabytes=-1), result) - - def test_get_class_quotas(self): - self._mock_quota_class_get_all_by_name() - self._mock_volume_type_get_all() - result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, - 'test_class') - - self.assertEqual(['quota_class_get_all_by_name'], self.calls) - self.assertEqual(dict(volumes=10, - gigabytes=500, - snapshots=10, - backups=10, - backup_gigabytes=500, - per_volume_gigabytes=-1), result) - - def test_get_class_quotas_no_defaults(self): - self._mock_quota_class_get_all_by_name() - result = self.driver.get_class_quotas(None, quota.QUOTAS.resources, - 'test_class', False) - - self.assertEqual(['quota_class_get_all_by_name'], self.calls) - self.assertEqual(dict(volumes=10, - gigabytes=500, - snapshots=10, - backups=10, - backup_gigabytes=500), result) - - def _mock_get_by_project(self): - def fake_qgabp(context, project_id): - self.calls.append('quota_get_all_by_project') - self.assertEqual('test_project', project_id) - return dict(volumes=10, gigabytes=50, reserved=0, - snapshots=10, backups=10, - backup_gigabytes=50) - - def fake_qugabp(context, project_id): - self.calls.append('quota_usage_get_all_by_project') - self.assertEqual('test_project', project_id) - return dict(volumes=dict(in_use=2, reserved=0), - snapshots=dict(in_use=2, reserved=0), - gigabytes=dict(in_use=10, reserved=0), - backups=dict(in_use=2, reserved=0), - backup_gigabytes=dict(in_use=10, reserved=0) - ) - - self.mock_object(db, 'quota_get_all_by_project', fake_qgabp) - self.mock_object(db, 'quota_usage_get_all_by_project', fake_qugabp) - - self._mock_quota_class_get_all_by_name() - self._mock_quota_class_get_default() - - def test_get_project_quotas(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - self._mock_allocated_get_all_by_project() - result = self.driver.get_project_quotas( - FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, 'test_project') - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_allocated_get_all_by_project', - 'quota_class_get_all_by_name', - 'quota_class_get_defaults', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, - in_use=2, - reserved=0, ), - snapshots=dict(limit=10, - in_use=2, - reserved=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - backups=dict(limit=10, - in_use=2, - reserved=0, ), - backup_gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - per_volume_gigabytes=dict(in_use=0, - limit=-1, - reserved= 0) - ), result) - - @mock.patch('cinder.quota.db.quota_get_all_by_project') - @mock.patch('cinder.quota.db.quota_class_get_defaults') - def test_get_project_quotas_lazy_load_defaults( - self, mock_defaults, mock_quotas): - mock_quotas.return_value = self._default_quotas_non_child - self.driver.get_project_quotas( - FakeContext('test_project', None), - quota.QUOTAS.resources, 'test_project', usages=False) - # Shouldn't load a project's defaults if all the quotas are already - # defined in the DB - self.assertFalse(mock_defaults.called) - - mock_quotas.return_value = {} - self.driver.get_project_quotas( - FakeContext('test_project', None), - quota.QUOTAS.resources, 'test_project', usages=False) - self.assertTrue(mock_defaults.called) - - def test_get_root_project_with_subprojects_quotas(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - self._mock_allocated_get_all_by_project(allocated_quota=True) - result = self.driver.get_project_quotas( - FakeContext('test_project', None), - quota.QUOTAS.resources, 'test_project') - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_allocated_get_all_by_project', - 'quota_class_get_defaults', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, - in_use=2, - reserved=0, - allocated=3, ), - snapshots=dict(limit=10, - in_use=2, - reserved=0, - allocated=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, - allocated=0, ), - backups=dict(limit=10, - in_use=2, - reserved=0, - allocated=0, ), - backup_gigabytes=dict(limit=50, - in_use=10, - reserved=0, - allocated=0, ), - per_volume_gigabytes=dict(in_use=0, - limit=-1, - reserved=0, - allocated=0) - ), result) - - def test_get_project_quotas_alt_context_no_class(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - result = self.driver.get_project_quotas( - FakeContext('other_project', 'other_class'), - quota.QUOTAS.resources, 'test_project') - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_defaults', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, - in_use=2, - reserved=0, ), - snapshots=dict(limit=10, - in_use=2, - reserved=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - backups=dict(limit=10, - in_use=2, - reserved=0, ), - backup_gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - per_volume_gigabytes=dict(in_use=0, - limit=-1, - reserved=0) - ), result) - - def test_get_project_quotas_alt_context_with_class(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - result = self.driver.get_project_quotas( - FakeContext('other_project', 'other_class'), - quota.QUOTAS.resources, 'test_project', quota_class='test_class') - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_all_by_name', - 'quota_class_get_defaults', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, - in_use=2, - reserved=0, ), - snapshots=dict(limit=10, - in_use=2, - reserved=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - backups=dict(limit=10, - in_use=2, - reserved=0, ), - backup_gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - per_volume_gigabytes=dict(in_use=0, - limit=-1, - reserved= 0)), - result) - - def test_get_project_quotas_no_defaults(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - result = self.driver.get_project_quotas( - FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, 'test_project', defaults=False) - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_class_get_all_by_name'], self.calls) - self.assertEqual(dict(backups=dict(limit=10, - in_use=2, - reserved=0, ), - backup_gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, ), - snapshots=dict(limit=10, - in_use=2, - reserved=0, ), - volumes=dict(limit=10, - in_use=2, - reserved=0, ), - - ), result) - - def test_get_project_quotas_no_usages(self): - self._mock_get_by_project() - self._mock_volume_type_get_all() - result = self.driver.get_project_quotas( - FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, 'test_project', usages=False) - - self.assertEqual(['quota_get_all_by_project', - 'quota_class_get_all_by_name', - 'quota_class_get_defaults', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, ), - snapshots=dict(limit=10, ), - backups=dict(limit=10, ), - gigabytes=dict(limit=50, ), - backup_gigabytes=dict(limit=50, ), - per_volume_gigabytes=dict(limit=-1, )), result) - - def _mock_get_project_quotas(self): - def fake_get_project_quotas(context, resources, project_id, - quota_class=None, defaults=True, - usages=True, parent_project_id=None): - self.calls.append('get_project_quotas') - return {k: dict(limit=v.default) for k, v in resources.items()} - - self.mock_object(self.driver, 'get_project_quotas', - fake_get_project_quotas) - - def test_get_quotas_has_sync_unknown(self): - self._mock_get_project_quotas() - self.assertRaises(exception.QuotaResourceUnknown, - self.driver._get_quotas, - None, quota.QUOTAS.resources, - ['unknown'], True) - self.assertEqual([], self.calls) - - def test_get_quotas_no_sync_unknown(self): - self._mock_get_project_quotas() - self.assertRaises(exception.QuotaResourceUnknown, - self.driver._get_quotas, - None, quota.QUOTAS.resources, - ['unknown'], False) - self.assertEqual([], self.calls) - - def test_get_quotas_has_sync_no_sync_resource(self): - self._mock_get_project_quotas() - self.assertRaises(exception.QuotaResourceUnknown, - self.driver._get_quotas, - None, quota.QUOTAS.resources, - ['metadata_items'], True) - self.assertEqual([], self.calls) - - def test_get_quotas_no_sync_has_sync_resource(self): - self._mock_get_project_quotas() - self.assertRaises(exception.QuotaResourceUnknown, - self.driver._get_quotas, - None, quota.QUOTAS.resources, - ['volumes'], False) - self.assertEqual([], self.calls) - - def test_get_quotas_has_sync(self): - self._mock_get_project_quotas() - result = self.driver._get_quotas(FakeContext('test_project', - 'test_class'), - quota.QUOTAS.resources, - ['volumes', 'gigabytes'], - True) - - self.assertEqual(['get_project_quotas'], self.calls) - self.assertEqual(dict(volumes=10, gigabytes=1000, ), result) - - def _mock_quota_reserve(self): - def fake_quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None): - self.calls.append(('quota_reserve', expire, until_refresh, - max_age)) - return ['resv-1', 'resv-2', 'resv-3'] - self.mock_object(db, 'quota_reserve', fake_quota_reserve) - - def test_reserve_bad_expire(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - self.assertRaises(exception.InvalidReservationExpiration, - self.driver.reserve, - FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire='invalid') - self.assertEqual([], self.calls) - - def test_reserve_default_expire(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2)) - - expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 0, 0), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def test_reserve_int_expire(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire=3600) - - expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 0, 0), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def test_reserve_timedelta_expire(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - expire_delta = datetime.timedelta(seconds=60) - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire=expire_delta) - - expire = timeutils.utcnow() + expire_delta - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 0, 0), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def test_reserve_datetime_expire(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - expire = timeutils.utcnow() + datetime.timedelta(seconds=120) - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire=expire) - - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 0, 0), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def test_reserve_until_refresh(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - self.flags(until_refresh=500) - expire = timeutils.utcnow() + datetime.timedelta(seconds=120) - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire=expire) - - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 500, 0), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def test_reserve_max_age(self): - self._mock_get_project_quotas() - self._mock_quota_reserve() - self.flags(max_age=86400) - expire = timeutils.utcnow() + datetime.timedelta(seconds=120) - result = self.driver.reserve(FakeContext('test_project', 'test_class'), - quota.QUOTAS.resources, - dict(volumes=2), expire=expire) - - self.assertEqual(['get_project_quotas', - ('quota_reserve', expire, 0, 86400), ], self.calls) - self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) - - def _mock_quota_destroy_by_project(self): - def fake_quota_destroy_by_project(context, project_id): - self.calls.append(('quota_destroy_by_project', project_id)) - return None - self.mock_object(sqa_api, 'quota_destroy_by_project', - fake_quota_destroy_by_project) - - def test_destroy_quota_by_project(self): - self._mock_quota_destroy_by_project() - self.driver.destroy_by_project(FakeContext('test_project', - 'test_class'), - 'test_project') - self.assertEqual([('quota_destroy_by_project', ('test_project')), ], - self.calls) - - -class NestedDbQuotaDriverBaseTestCase(DbQuotaDriverBaseTestCase): - def setUp(self): - super(NestedDbQuotaDriverBaseTestCase, self).setUp() - self.context = context.RequestContext('user_id', - 'project_id', - is_admin=True, - auth_token="fake_token") - self.auth_url = 'http://localhost:5000' - self._child_proj_id = 'child_id' - self._non_child_proj_id = 'non_child_id' - - keystone_mock = mock.Mock() - keystone_mock.version = 'v3' - - class FakeProject(object): - def __init__(self, parent_id): - self.parent_id = parent_id - self.parents = {parent_id: None} - self.domain_id = 'default' - - def fake_get_project(project_id, subtree_as_ids=False, - parents_as_ids=False): - # Enable imitation of projects with and without parents - if project_id == self._child_proj_id: - return FakeProject('parent_id') - else: - return FakeProject(None) - - keystone_mock.projects.get.side_effect = fake_get_project - - def _keystone_mock(self): - return keystone_mock - - keystone_patcher = mock.patch('cinder.quota_utils._keystone_client', - _keystone_mock) - keystone_patcher.start() - self.addCleanup(keystone_patcher.stop) - - self.fixture = self.useFixture(config_fixture.Config(CONF)) - self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken') - self.driver = quota.NestedDbQuotaDriver() - - def _mock_get_by_subproject(self): - def fake_qgabp(context, project_id): - self.calls.append('quota_get_all_by_project') - return dict(volumes=10, gigabytes=50, reserved=0) - - def fake_qugabp(context, project_id): - self.calls.append('quota_usage_get_all_by_project') - return dict(volumes=dict(in_use=2, reserved=0), - gigabytes=dict(in_use=10, reserved=0)) - - self.mock_object(db, 'quota_get_all_by_project', fake_qgabp) - self.mock_object(db, 'quota_usage_get_all_by_project', fake_qugabp) - - self._mock_quota_class_get_all_by_name() - - -class NestedDbQuotaDriverTestCase(NestedDbQuotaDriverBaseTestCase): - def test_get_defaults(self): - self._mock_volume_type_get_all() - - # Test for child project defaults - result = self.driver.get_defaults(self.context, - quota.QUOTAS.resources, - self._child_proj_id) - self.assertEqual(self._default_quotas_child, result) - - # Test for non-child project defaults - result = self.driver.get_defaults(self.context, - quota.QUOTAS.resources, - self._non_child_proj_id) - self.assertEqual(self._default_quotas_non_child, result) - - def test_subproject_enforce_defaults(self): - # Non-child defaults should allow volume to get created - self.driver.reserve(self.context, - quota.QUOTAS.resources, - {'volumes': 1, 'gigabytes': 1}, - project_id=self._non_child_proj_id) - - # Child defaults should not allow volume to be created - self.assertRaises(exception.OverQuota, - self.driver.reserve, self.context, - quota.QUOTAS.resources, - {'volumes': 1, 'gigabytes': 1}, - project_id=self._child_proj_id) - - def test_get_subproject_quotas(self): - self._mock_get_by_subproject() - self._mock_volume_type_get_all() - self._mock_allocated_get_all_by_project(allocated_quota=True) - result = self.driver.get_project_quotas( - self.context, - quota.QUOTAS.resources, self._child_proj_id) - - self.assertEqual(['quota_get_all_by_project', - 'quota_usage_get_all_by_project', - 'quota_allocated_get_all_by_project', ], self.calls) - self.assertEqual(dict(volumes=dict(limit=10, - in_use=2, - reserved=0, - allocated=3, ), - snapshots=dict(limit=0, - in_use=0, - reserved=0, - allocated=0, ), - gigabytes=dict(limit=50, - in_use=10, - reserved=0, - allocated=0, ), - backups=dict(limit=0, - in_use=0, - reserved=0, - allocated=0, ), - backup_gigabytes=dict(limit=0, - in_use=0, - reserved=0, - allocated=0, ), - per_volume_gigabytes=dict(in_use=0, - limit=0, - reserved=0, - allocated=0) - ), result) - - -class NestedQuotaValidation(NestedDbQuotaDriverBaseTestCase): - def setUp(self): - super(NestedQuotaValidation, self).setUp() - r""" - Quota hierarchy setup like so - +-----------+ - | | - | A | - | / \ | - | B C | - | / | - | D | - +-----------+ - """ - self.project_tree = {'A': {'B': {'D': None}, 'C': None}} - self.proj_vals = { - 'A': {'limit': 7, 'in_use': 1, 'alloc': 6}, - 'B': {'limit': 3, 'in_use': 1, 'alloc': 2}, - 'D': {'limit': 2, 'in_use': 0}, - 'C': {'limit': 3, 'in_use': 3}, - } - - # Just using one resource currently for simplicity of test - self.resources = {'volumes': quota.ReservableResource( - 'volumes', '_sync_volumes', 'quota_volumes')} - - to_patch = [('cinder.db.quota_allocated_get_all_by_project', - self._fake_quota_allocated_get_all_by_project), - ('cinder.db.quota_get_all_by_project', - self._fake_quota_get_all_by_project), - ('cinder.db.quota_usage_get_all_by_project', - self._fake_quota_usage_get_all_by_project)] - - for patch_path, patch_obj in to_patch: - patcher = mock.patch(patch_path, patch_obj) - patcher.start() - self.addCleanup(patcher.stop) - - def _fake_quota_get_all_by_project(self, context, project_id): - return {'volumes': self.proj_vals[project_id]['limit']} - - def _fake_quota_usage_get_all_by_project(self, context, project_id): - return {'volumes': self.proj_vals[project_id]} - - def _fake_quota_allocated_get_all_by_project(self, context, project_id, - session=None): - ret = {'project_id': project_id} - proj_val = self.proj_vals[project_id] - if 'alloc' in proj_val: - ret['volumes'] = proj_val['alloc'] - return ret - - def test_validate_nested_quotas(self): - self.driver.validate_nested_setup(self.context, - self.resources, self.project_tree) - - # Fail because 7 - 2 < 3 + 3 - self.proj_vals['A']['in_use'] = 2 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, - self.resources, self.project_tree) - self.proj_vals['A']['in_use'] = 1 - - # Fail because 7 - 1 < 3 + 7 - self.proj_vals['C']['limit'] = 7 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, - self.resources, self.project_tree) - self.proj_vals['C']['limit'] = 3 - - # Fail because 3 < 4 - self.proj_vals['D']['limit'] = 4 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, - self.resources, self.project_tree) - self.proj_vals['D']['limit'] = 2 - - def test_validate_nested_quotas_usage_over_limit(self): - self.proj_vals['D']['in_use'] = 5 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, self.resources, self.project_tree) - - def test_validate_nested_quota_bad_allocated_quotas(self): - self.proj_vals['A']['alloc'] = 5 - self.proj_vals['B']['alloc'] = 8 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, self.resources, self.project_tree) - - def test_validate_nested_quota_negative_child_limits(self): - # Redefining the project limits with -1, doing it all in this test - # for readability - self.proj_vals = { - 'A': {'limit': 8, 'in_use': 1}, - 'B': {'limit': -1, 'in_use': 3}, - 'D': {'limit': 4, 'in_use': 0}, - 'C': {'limit': 2, 'in_use': 2}, - } - - # A's child usage is 3 (from B) + 4 (from D) + 2 (from C) = 9 - self.assertRaises(exception.InvalidNestedQuotaSetup, - self.driver.validate_nested_setup, - self.context, self.resources, self.project_tree) - - self.proj_vals['D']['limit'] = 2 - self.driver.validate_nested_setup( - self.context, self.resources, self.project_tree, - fix_allocated_quotas=True) - - def test_get_cur_project_allocated(self): - # Redefining the project limits with -1, doing it all in this test - # for readability - self.proj_vals = { - # Allocated are here to simulate a bad existing value - 'A': {'limit': 8, 'in_use': 1, 'alloc': 6}, - 'B': {'limit': -1, 'in_use': 3, 'alloc': 2}, - 'D': {'limit': 1, 'in_use': 0}, - 'C': {'limit': 2, 'in_use': 2}, - } - - self.driver._allocated = {} - allocated_a = self.driver._get_cur_project_allocated( - self.context, self.resources['volumes'], - self.project_tree) - - # A's allocated will be: - # 2 (from C's limit) + 3 (from B's in-use) + 1 (from D's limit) = 6 - self.assertEqual(6, allocated_a) - - # B's allocated value should also be calculated and cached as part - # of A's calculation - self.assertEqual(1, self.driver._allocated['B']['volumes']) - - -class FakeSession(object): - def begin(self): - return self - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - return False - - def query(self, *args, **kwargs): - pass - - -class FakeUsage(sqa_models.QuotaUsage): - def save(self, *args, **kwargs): - pass - - -class QuotaReserveSqlAlchemyTestCase(test.TestCase): - # cinder.db.sqlalchemy.api.quota_reserve is so complex it needs its - # own test case, and since it's a quota manipulator, this is the - # best place to put it... - - def setUp(self): - super(QuotaReserveSqlAlchemyTestCase, self).setUp() - - self.sync_called = set() - - def make_sync(res_name): - def fake_sync(context, project_id, volume_type_id=None, - volume_type_name=None, session=None): - self.sync_called.add(res_name) - if res_name in self.usages: - if self.usages[res_name].in_use < 0: - return {res_name: 2} - else: - return {res_name: self.usages[res_name].in_use - 1} - return {res_name: 0} - return fake_sync - - self.resources = {} - QUOTA_SYNC_FUNCTIONS = {} - for res_name in ('volumes', 'gigabytes'): - res = quota.ReservableResource(res_name, '_sync_%s' % res_name) - QUOTA_SYNC_FUNCTIONS['_sync_%s' % res_name] = make_sync(res_name) - self.resources[res_name] = res - - self.mock_object(sqa_api, 'QUOTA_SYNC_FUNCTIONS', QUOTA_SYNC_FUNCTIONS) - self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) - - self.usages = {} - self.usages_created = {} - self.reservations_created = {} - - def fake_get_session(): - return FakeSession() - - def fake_get_quota_usages(context, session, project_id): - return self.usages.copy() - - def fake_quota_usage_create(context, project_id, resource, in_use, - reserved, until_refresh, session=None, - save=True): - quota_usage_ref = self._make_quota_usage( - project_id, resource, in_use, reserved, until_refresh, - timeutils.utcnow(), timeutils.utcnow()) - - self.usages_created[resource] = quota_usage_ref - - return quota_usage_ref - - def fake_reservation_create(context, uuid, usage_id, project_id, - resource, delta, expire, session=None, - allocated_id=None): - reservation_ref = self._make_reservation( - uuid, usage_id, project_id, resource, delta, expire, - timeutils.utcnow(), timeutils.utcnow(), allocated_id) - - self.reservations_created[resource] = reservation_ref - - return reservation_ref - - self.mock_object(sqa_api, 'get_session', - fake_get_session) - self.mock_object(sqa_api, '_get_quota_usages', - fake_get_quota_usages) - self.mock_object(sqa_api, '_quota_usage_create', - fake_quota_usage_create) - self.mock_object(sqa_api, '_reservation_create', - fake_reservation_create) - - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime.utcnow() - - def _make_quota_usage(self, project_id, resource, in_use, reserved, - until_refresh, created_at, updated_at): - quota_usage_ref = FakeUsage() - quota_usage_ref.id = len(self.usages) + len(self.usages_created) - quota_usage_ref.project_id = project_id - quota_usage_ref.resource = resource - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - quota_usage_ref.created_at = created_at - quota_usage_ref.updated_at = updated_at - quota_usage_ref.deleted_at = None - quota_usage_ref.deleted = False - - return quota_usage_ref - - def init_usage(self, project_id, resource, in_use, reserved, - until_refresh=None, created_at=None, updated_at=None): - if created_at is None: - created_at = timeutils.utcnow() - if updated_at is None: - updated_at = timeutils.utcnow() - - quota_usage_ref = self._make_quota_usage(project_id, resource, in_use, - reserved, until_refresh, - created_at, updated_at) - - self.usages[resource] = quota_usage_ref - - def compare_usage(self, usage_dict, expected): - for usage in expected: - resource = usage['resource'] - for key, value in usage.items(): - actual = getattr(usage_dict[resource], key) - self.assertEqual(value, actual, - "%s != %s on usage for resource %s" % - (actual, value, resource)) - - def _make_reservation(self, uuid, usage_id, project_id, resource, - delta, expire, created_at, updated_at, alloc_id): - reservation_ref = sqa_models.Reservation() - reservation_ref.id = len(self.reservations_created) - reservation_ref.uuid = uuid - reservation_ref.usage_id = usage_id - reservation_ref.project_id = project_id - reservation_ref.resource = resource - reservation_ref.delta = delta - reservation_ref.expire = expire - reservation_ref.created_at = created_at - reservation_ref.updated_at = updated_at - reservation_ref.deleted_at = None - reservation_ref.deleted = False - reservation_ref.allocated_id = alloc_id - - return reservation_ref - - def compare_reservation(self, reservations, expected): - reservations = set(reservations) - for resv in expected: - resource = resv['resource'] - resv_obj = self.reservations_created[resource] - - self.assertIn(resv_obj.uuid, reservations) - reservations.discard(resv_obj.uuid) - - for key, value in resv.items(): - actual = getattr(resv_obj, key) - self.assertEqual(value, actual, - "%s != %s on reservation for resource %s" % - (actual, value, resource)) - - self.assertEqual(0, len(reservations)) - - def _mock_allocated_get_all_by_project(self, allocated_quota=False): - def fake_qagabp(context, project_id, session=None): - self.assertEqual('test_project', project_id) - self.assertIsNotNone(session) - if allocated_quota: - return dict(project_id=project_id, volumes=3, - gigabytes = 2 * 1024) - return dict(project_id=project_id) - - self.mock_object(sqa_api, 'quota_allocated_get_all_by_project', - fake_qagabp) - - def test_quota_reserve_with_allocated(self): - context = FakeContext('test_project', 'test_class') - # Allocated quota for volume will be updated for 3 - self._mock_allocated_get_all_by_project(allocated_quota=True) - # Quota limited for volume updated for 10 - quotas = dict(volumes=10, - gigabytes=10 * 1024, ) - # Try reserve 7 volumes - deltas = dict(volumes=7, - gigabytes=2 * 1024, ) - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 5, 0) - # The reservation works - self.compare_reservation( - result, - [dict(resource='volumes', - usage_id=self.usages_created['volumes'], - project_id='test_project', - delta=7), - dict(resource='gigabytes', - usage_id=self.usages_created['gigabytes'], - delta=2 * 1024), ]) - - # But if we try reserve 8 volumes(more free quota that we have) - deltas = dict(volumes=8, - gigabytes=2 * 1024, ) - - self.assertRaises(exception.OverQuota, - sqa_api.quota_reserve, - context, self.resources, quotas, - deltas, self.expire, 0, 0) - - def test_quota_reserve_create_usages(self): - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, - gigabytes=10 * 1024, ) - deltas = dict(volumes=2, - gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, 0) - - self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) - self.compare_usage(self.usages_created, - [dict(resource='volumes', - project_id='test_project', - in_use=0, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=0, - reserved=2 * 1024, - until_refresh=None), ]) - self.compare_reservation( - result, - [dict(resource='volumes', - usage_id=self.usages_created['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages_created['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_negative_in_use(self): - self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) - self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, - gigabytes=10 * 1024, ) - deltas = dict(volumes=2, - gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 5, 0) - - self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=5), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=5), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_until_refresh(self): - self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1) - self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=2, gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 5, 0) - - self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=5), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=5), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_max_age(self): - max_age = 3600 - record_created = (timeutils.utcnow() - - datetime.timedelta(seconds=max_age)) - self.init_usage('test_project', 'volumes', 3, 0, - created_at=record_created, updated_at=record_created) - self.init_usage('test_project', 'gigabytes', 3, 0, - created_at=record_created, updated_at=record_created) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=2, gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, max_age) - - self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=2, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=2, - reserved=2 * 1024, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_max_age_negative(self): - max_age = 3600 - record_created = (timeutils.utcnow() + - datetime.timedelta(seconds=max_age)) - self.init_usage('test_project', 'volumes', 3, 0, - created_at=record_created, updated_at=record_created) - self.init_usage('test_project', 'gigabytes', 3, 0, - created_at=record_created, updated_at=record_created) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=2, gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, max_age) - - self.assertEqual(set(), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=3, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=3, - reserved=2 * 1024, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_no_refresh(self): - self.init_usage('test_project', 'volumes', 3, 0) - self.init_usage('test_project', 'gigabytes', 3, 0) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=2, gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, 0) - - self.assertEqual(set([]), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=3, - reserved=2, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=3, - reserved=2 * 1024, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=2 * 1024), ]) - - def test_quota_reserve_unders(self): - self.init_usage('test_project', 'volumes', 1, 0) - self.init_usage('test_project', 'gigabytes', 1 * 1024, 0) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, 0) - - self.assertEqual(set([]), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=1, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=1 * 1024, - reserved=0, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=-2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - delta=-2 * 1024), ]) - - def test_quota_reserve_overs(self): - self.init_usage('test_project', 'volumes', 4, 0) - self.init_usage('test_project', 'gigabytes', 10 * 1024, 0) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=2, gigabytes=2 * 1024, ) - self._mock_allocated_get_all_by_project() - self.assertRaises(exception.OverQuota, - sqa_api.quota_reserve, - context, self.resources, quotas, - deltas, self.expire, 0, 0) - - self.assertEqual(set([]), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=4, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=10 * 1024, - reserved=0, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.assertEqual({}, self.reservations_created) - - def test_quota_reserve_reduction(self): - self.init_usage('test_project', 'volumes', 10, 0) - self.init_usage('test_project', 'gigabytes', 20 * 1024, 0) - context = FakeContext('test_project', 'test_class') - quotas = dict(volumes=5, gigabytes=10 * 1024, ) - deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) - self._mock_allocated_get_all_by_project() - result = sqa_api.quota_reserve(context, self.resources, quotas, - deltas, self.expire, 0, 0) - - self.assertEqual(set([]), self.sync_called) - self.compare_usage(self.usages, [dict(resource='volumes', - project_id='test_project', - in_use=10, - reserved=0, - until_refresh=None), - dict(resource='gigabytes', - project_id='test_project', - in_use=20 * 1024, - reserved=0, - until_refresh=None), ]) - self.assertEqual({}, self.usages_created) - self.compare_reservation(result, - [dict(resource='volumes', - usage_id=self.usages['volumes'], - project_id='test_project', - delta=-2), - dict(resource='gigabytes', - usage_id=self.usages['gigabytes'], - project_id='test_project', - delta=-2 * 1024), ]) - - -class QuotaVolumeTypeReservationTestCase(test.TestCase): - - def setUp(self): - super(QuotaVolumeTypeReservationTestCase, self).setUp() - - self.volume_type_name = CONF.default_volume_type - self.volume_type = db.volume_type_create( - context.get_admin_context(), - dict(name=self.volume_type_name)) - - @mock.patch.object(quota.QUOTAS, 'reserve') - @mock.patch.object(quota.QUOTAS, 'add_volume_type_opts') - def test_volume_type_reservation(self, - mock_add_volume_type_opts, - mock_reserve): - my_context = FakeContext('MyProject', None) - volume = {'name': 'my_vol_name', - 'id': 'my_vol_id', - 'size': '1', - 'project_id': 'vol_project_id', - } - reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} - quota_utils.get_volume_type_reservation(my_context, - volume, - self.volume_type['id']) - mock_add_volume_type_opts.assert_called_once_with( - my_context, - reserve_opts, - self.volume_type['id']) - mock_reserve.assert_called_once_with(my_context, - project_id='vol_project_id', - gigabytes='1', - volumes=1) - - @mock.patch.object(quota.QUOTAS, 'reserve') - def test_volume_type_reservation_with_type_only(self, mock_reserve): - my_context = FakeContext('MyProject', None) - volume = {'name': 'my_vol_name', - 'id': 'my_vol_id', - 'size': '1', - 'project_id': 'vol_project_id', - } - quota_utils.get_volume_type_reservation(my_context, - volume, - self.volume_type['id'], - reserve_vol_type_only=True) - vtype_volume_quota = "%s_%s" % ('volumes', self.volume_type['name']) - vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name']) - reserve_opts = {vtype_volume_quota: 1, - vtype_size_quota: volume['size']} - mock_reserve.assert_called_once_with(my_context, - project_id='vol_project_id', - **reserve_opts) diff --git a/cinder/tests/unit/test_quota_utils.py b/cinder/tests/unit/test_quota_utils.py deleted file mode 100644 index d1b4b4b37..000000000 --- a/cinder/tests/unit/test_quota_utils.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder import exception -from cinder import quota_utils -from cinder import test - -from keystoneclient import exceptions - -from oslo_config import cfg -from oslo_config import fixture as config_fixture - -CONF = cfg.CONF - - -class QuotaUtilsTest(test.TestCase): - class FakeProject(object): - def __init__(self, id='foo', parent_id=None): - self.id = id - self.parent_id = parent_id - self.subtree = None - self.parents = None - self.domain_id = 'default' - - def setUp(self): - super(QuotaUtilsTest, self).setUp() - - self.auth_url = 'http://localhost:5000' - self.context = context.RequestContext('fake_user', 'fake_proj_id') - self.fixture = self.useFixture(config_fixture.Config(CONF)) - self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken') - - @mock.patch('keystoneclient.client.Client') - @mock.patch('keystoneauth1.session.Session') - def test_keystone_client_instantiation(self, ksclient_session, - ksclient_class): - quota_utils._keystone_client(self.context) - ksclient_class.assert_called_once_with(auth_url=self.auth_url, - session=ksclient_session(), - version=(3, 0)) - - @mock.patch('keystoneclient.client.Client') - def test_get_project_keystoneclient_v2(self, ksclient_class): - keystoneclient = ksclient_class.return_value - keystoneclient.version = 'v2.0' - expected_project = quota_utils.GenericProjectInfo( - self.context.project_id, 'v2.0') - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id) - self.assertEqual(expected_project.__dict__, project.__dict__) - - @mock.patch('keystoneclient.client.Client') - def test_get_project_keystoneclient_v3(self, ksclient_class): - keystoneclient = ksclient_class.return_value - keystoneclient.version = 'v3' - returned_project = self.FakeProject(self.context.project_id, 'bar') - del returned_project.subtree - keystoneclient.projects.get.return_value = returned_project - expected_project = quota_utils.GenericProjectInfo( - self.context.project_id, 'v3', 'bar') - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id) - self.assertEqual(expected_project.__dict__, project.__dict__) - - @mock.patch('keystoneclient.client.Client') - def test_get_project_keystoneclient_v3_with_subtree(self, ksclient_class): - keystoneclient = ksclient_class.return_value - keystoneclient.version = 'v3' - returned_project = self.FakeProject(self.context.project_id, 'bar') - subtree_dict = {'baz': {'quux': None}} - returned_project.subtree = subtree_dict - keystoneclient.projects.get.return_value = returned_project - expected_project = quota_utils.GenericProjectInfo( - self.context.project_id, 'v3', 'bar', subtree_dict) - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id, subtree_as_ids=True) - keystoneclient.projects.get.assert_called_once_with( - self.context.project_id, parents_as_ids=False, subtree_as_ids=True) - self.assertEqual(expected_project.__dict__, project.__dict__) - - def _setup_mock_ksclient(self, mock_client, version='v3', - subtree=None, parents=None): - keystoneclient = mock_client.return_value - keystoneclient.version = version - proj = self.FakeProject(self.context.project_id) - proj.subtree = subtree - if parents: - proj.parents = parents - proj.parent_id = next(iter(parents.keys())) - keystoneclient.projects.get.return_value = proj - - @mock.patch('keystoneclient.client.Client') - def test__filter_domain_id_from_parents_domain_as_parent( - self, mock_client): - # Test with a top level project (domain is direct parent) - self._setup_mock_ksclient(mock_client, parents={'default': None}) - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id, parents_as_ids=True) - self.assertIsNone(project.parent_id) - self.assertIsNone(project.parents) - - @mock.patch('keystoneclient.client.Client') - def test__filter_domain_id_from_parents_domain_as_grandparent( - self, mock_client): - # Test with a child project (domain is more than a parent) - self._setup_mock_ksclient(mock_client, - parents={'bar': {'default': None}}) - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id, parents_as_ids=True) - self.assertEqual('bar', project.parent_id) - self.assertEqual({'bar': None}, project.parents) - - @mock.patch('keystoneclient.client.Client') - def test__filter_domain_id_from_parents_no_domain_in_parents( - self, mock_client): - # Test that if top most parent is not a domain (to simulate an older - # keystone version) nothing gets removed from the tree - parents = {'bar': {'foo': None}} - self._setup_mock_ksclient(mock_client, parents=parents) - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id, parents_as_ids=True) - self.assertEqual('bar', project.parent_id) - self.assertEqual(parents, project.parents) - - @mock.patch('keystoneclient.client.Client') - def test__filter_domain_id_from_parents_no_parents( - self, mock_client): - # Test that if top no parents are present (to simulate an older - # keystone version) things don't blow up - self._setup_mock_ksclient(mock_client) - project = quota_utils.get_project_hierarchy( - self.context, self.context.project_id, parents_as_ids=True) - self.assertIsNone(project.parent_id) - self.assertIsNone(project.parents) - - @mock.patch('cinder.quota_utils._keystone_client') - def test_validate_nested_projects_with_keystone_v2(self, _keystone_client): - _keystone_client.side_effect = exceptions.VersionNotAvailable - - self.assertRaises(exception.CinderException, - quota_utils.validate_setup_for_nested_quota_use, - self.context, [], None) - - @mock.patch('cinder.quota_utils._keystone_client') - def test_validate_nested_projects_non_cloud_admin(self, _keystone_client): - # Covers not cloud admin or using old policy.json - _keystone_client.side_effect = exceptions.Forbidden - - self.assertRaises(exception.CinderException, - quota_utils.validate_setup_for_nested_quota_use, - self.context, [], None) - - def _process_reserve_over_quota(self, overs, usages, quotas, - expected_ex, - resource='volumes'): - ctxt = context.get_admin_context() - ctxt.project_id = 'fake' - size = 1 - kwargs = {'overs': overs, - 'usages': usages, - 'quotas': quotas} - exc = exception.OverQuota(**kwargs) - - self.assertRaises(expected_ex, - quota_utils.process_reserve_over_quota, - ctxt, exc, - resource=resource, - size=size) - - def test_volume_size_exceed_quota(self): - overs = ['gigabytes'] - usages = {'gigabytes': {'reserved': 1, 'in_use': 9}} - quotas = {'gigabytes': 10, 'snapshots': 10} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.VolumeSizeExceedsAvailableQuota) - - def test_snapshot_limit_exceed_quota(self): - overs = ['snapshots'] - usages = {'snapshots': {'reserved': 1, 'in_use': 9}} - quotas = {'gigabytes': 10, 'snapshots': 10} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.SnapshotLimitExceeded, - resource='snapshots') - - def test_backup_gigabytes_exceed_quota(self): - overs = ['backup_gigabytes'] - usages = {'backup_gigabytes': {'reserved': 1, 'in_use': 9}} - quotas = {'backup_gigabytes': 10} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.VolumeBackupSizeExceedsAvailableQuota, - resource='backups') - - def test_backup_limit_quota(self): - overs = ['backups'] - usages = {'backups': {'reserved': 1, 'in_use': 9}} - quotas = {'backups': 9} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.BackupLimitExceeded, - resource='backups') - - def test_volumes_limit_quota(self): - overs = ['volumes'] - usages = {'volumes': {'reserved': 1, 'in_use': 9}} - quotas = {'volumes': 9} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.VolumeLimitExceeded) - - def test_groups_limit_quota(self): - overs = ['groups'] - usages = {'groups': {'reserved': 1, 'in_use': 9}} - quotas = {'groups': 9} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.GroupLimitExceeded, - resource='groups') - - def test_unknown_quota(self): - overs = ['unknown'] - usages = {'volumes': {'reserved': 1, 'in_use': 9}} - quotas = {'volumes': 9} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.UnexpectedOverQuota) - - def test_unknown_quota2(self): - overs = ['volumes'] - usages = {'volumes': {'reserved': 1, 'in_use': 9}} - quotas = {'volumes': 9} - self._process_reserve_over_quota( - overs, usages, quotas, - exception.UnexpectedOverQuota, - resource='snapshots') diff --git a/cinder/tests/unit/test_rpc.py b/cinder/tests/unit/test_rpc.py deleted file mode 100644 index 87c4f4a0c..000000000 --- a/cinder/tests/unit/test_rpc.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder.objects import base -from cinder import rpc -from cinder import test - - -class FakeAPI(rpc.RPCAPI): - RPC_API_VERSION = '1.5' - TOPIC = 'cinder-scheduler-topic' - BINARY = 'cinder-scheduler' - - -@ddt.ddt -class RPCAPITestCase(test.TestCase): - """Tests RPCAPI mixin aggregating stuff related to RPC compatibility.""" - - def setUp(self): - super(RPCAPITestCase, self).setUp() - # Reset cached version pins - rpc.LAST_RPC_VERSIONS = {} - rpc.LAST_OBJ_VERSIONS = {} - - @mock.patch('cinder.objects.Service.get_minimum_rpc_version', - return_value='1.2') - @mock.patch('cinder.objects.Service.get_minimum_obj_version', - return_value='1.4') - @mock.patch('cinder.rpc.get_client') - def test_init(self, get_client, get_min_obj, get_min_rpc): - def fake_get_client(target, version_cap, serializer): - self.assertEqual(FakeAPI.TOPIC, target.topic) - self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) - self.assertEqual('1.2', version_cap) - self.assertEqual('1.4', serializer.version_cap) - - get_client.side_effect = fake_get_client - FakeAPI() - - @mock.patch('cinder.objects.Service.get_minimum_rpc_version', - return_value=None) - @mock.patch('cinder.objects.Service.get_minimum_obj_version', - return_value=None) - @mock.patch('cinder.objects.base.CinderObjectSerializer') - @mock.patch('cinder.rpc.get_client') - def test_init_none_caps(self, get_client, serializer, get_min_obj, - get_min_rpc): - """Test that with no service latest versions are selected.""" - FakeAPI() - serializer.assert_called_once_with(base.OBJ_VERSIONS.get_current()) - get_client.assert_called_once_with(mock.ANY, - version_cap=FakeAPI.RPC_API_VERSION, - serializer=serializer.return_value) - self.assertTrue(get_min_obj.called) - self.assertTrue(get_min_rpc.called) - - @mock.patch('cinder.objects.Service.get_minimum_rpc_version') - @mock.patch('cinder.objects.Service.get_minimum_obj_version') - @mock.patch('cinder.rpc.get_client') - @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.4'}) - @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.3'}) - def test_init_cached_caps(self, get_client, get_min_obj, get_min_rpc): - def fake_get_client(target, version_cap, serializer): - self.assertEqual(FakeAPI.TOPIC, target.topic) - self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) - self.assertEqual('1.4', version_cap) - self.assertEqual('1.3', serializer.version_cap) - - get_client.side_effect = fake_get_client - FakeAPI() - - self.assertFalse(get_min_obj.called) - self.assertFalse(get_min_rpc.called) - - @ddt.data([], ['noop'], ['noop', 'noop']) - @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True) - def test_init_no_notifications(self, driver, serializer_mock): - """Test short-circuiting notifications with default and noop driver.""" - self.override_config('driver', driver, - group='oslo_messaging_notifications') - rpc.init(test.CONF) - self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER) - serializer_mock.assert_not_called() - - @mock.patch.object(rpc, 'messaging') - def test_init_notifications(self, messaging_mock): - rpc.init(test.CONF) - self.assertTrue(messaging_mock.JsonPayloadSerializer.called) - self.assertTrue(messaging_mock.Notifier.called) - self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value) diff --git a/cinder/tests/unit/test_service.py b/cinder/tests/unit/test_service.py deleted file mode 100644 index 170992eff..000000000 --- a/cinder/tests/unit/test_service.py +++ /dev/null @@ -1,672 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for remote procedure calls using queue -""" - -import ddt -import mock -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_db import exception as db_exc - -from cinder import context -from cinder import db -from cinder import exception -from cinder import manager -from cinder import objects -from cinder.objects import fields -from cinder import rpc -from cinder import service -from cinder import test - - -test_service_opts = [ - cfg.StrOpt("fake_manager", - default="cinder.tests.unit.test_service.FakeManager", - help="Manager for testing"), - cfg.StrOpt("test_service_listen", - help="Host to bind test service to"), - cfg.IntOpt("test_service_listen_port", - default=0, - help="Port number to bind test service to"), ] - -CONF = cfg.CONF -CONF.register_opts(test_service_opts) - - -class FakeManager(manager.Manager): - """Fake manager for tests.""" - def __init__(self, host=None, - db_driver=None, service_name=None, cluster=None): - super(FakeManager, self).__init__(host=host, - db_driver=db_driver, - cluster=cluster) - - def test_method(self): - return 'manager' - - -class ExtendedService(service.Service): - def test_method(self): - return 'service' - - -class ServiceManagerTestCase(test.TestCase): - """Test cases for Services.""" - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_message_gets_to_manager(self, is_upgrading_mock): - serv = service.Service('test', - 'test', - 'test', - 'cinder.tests.unit.test_service.FakeManager') - serv.start() - self.assertEqual('manager', serv.test_method()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_override_manager_method(self, is_upgrading_mock): - serv = ExtendedService('test', - 'test', - 'test', - 'cinder.tests.unit.test_service.FakeManager') - serv.start() - self.assertEqual('service', serv.test_method()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'}) - @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'}) - def test_reset(self, is_upgrading_mock): - serv = service.Service('test', - 'test', - 'test', - 'cinder.tests.unit.test_service.FakeManager') - serv.start() - serv.reset() - self.assertEqual({}, rpc.LAST_OBJ_VERSIONS) - self.assertEqual({}, rpc.LAST_RPC_VERSIONS) - - -class ServiceFlagsTestCase(test.TestCase): - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_service_enabled_on_create_based_on_flag(self, - is_upgrading_mock=False): - ctxt = context.get_admin_context() - self.flags(enable_new_services=True) - host = 'foo' - binary = 'cinder-fake' - cluster = 'cluster' - app = service.Service.create(host=host, binary=binary, cluster=cluster) - ref = db.service_get(ctxt, app.service_id) - db.service_destroy(ctxt, app.service_id) - self.assertFalse(ref.disabled) - - # Check that the cluster is also enabled - db_cluster = objects.ClusterList.get_all(ctxt)[0] - self.assertFalse(db_cluster.disabled) - db.cluster_destroy(ctxt, db_cluster.id) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_service_disabled_on_create_based_on_flag(self, is_upgrading_mock): - ctxt = context.get_admin_context() - self.flags(enable_new_services=False) - host = 'foo' - binary = 'cinder-fake' - cluster = 'cluster' - app = service.Service.create(host=host, binary=binary, cluster=cluster) - ref = db.service_get(ctxt, app.service_id) - db.service_destroy(ctxt, app.service_id) - self.assertTrue(ref.disabled) - - # Check that the cluster is also enabled - db_cluster = objects.ClusterList.get_all(ctxt)[0] - self.assertTrue(db_cluster.disabled) - db.cluster_destroy(ctxt, db_cluster.id) - - -@ddt.ddt -class ServiceTestCase(test.TestCase): - """Test cases for Services.""" - - def setUp(self): - super(ServiceTestCase, self).setUp() - self.host = 'foo' - self.binary = 'cinder-fake' - self.topic = 'fake' - self.service_ref = {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} - self.ctxt = context.get_admin_context() - - def _check_app(self, app, cluster=None, cluster_exists=None, - is_upgrading=False, svc_id=None, added_to_cluster=None): - """Check that Service instance and DB service and cluster are ok.""" - self.assertIsNotNone(app) - - # Check that we have the service ID - self.assertTrue(hasattr(app, 'service_id')) - - if svc_id: - self.assertEqual(svc_id, app.service_id) - - # Check that cluster has been properly set - self.assertEqual(cluster, app.cluster) - # Check that the entry has been really created in the DB - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - - cluster_name = cluster if cluster_exists is not False else None - - # Check that cluster name matches - self.assertEqual(cluster_name, svc.cluster_name) - - clusters = objects.ClusterList.get_all(self.ctxt) - - if added_to_cluster is None: - added_to_cluster = not is_upgrading - - if cluster_name: - # Make sure we have created the cluster in the DB - self.assertEqual(1, len(clusters)) - cluster = clusters[0] - self.assertEqual(cluster_name, cluster.name) - self.assertEqual(self.binary, cluster.binary) - else: - # Make sure we haven't created any cluster in the DB - self.assertListEqual([], clusters.objects) - - self.assertEqual(added_to_cluster, app.added_to_cluster) - - @ddt.data(False, True) - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n') - def test_create(self, is_upgrading, is_upgrading_mock): - """Test non clustered service creation.""" - is_upgrading_mock.return_value = is_upgrading - - # NOTE(vish): Create was moved out of mock replay to make sure that - # the looping calls are created in StartService. - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - self._check_app(app, is_upgrading=is_upgrading) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_create_with_cluster_not_upgrading(self, is_upgrading_mock): - """Test DB cluster creation when service is created.""" - cluster_name = 'cluster' - app = service.Service.create(host=self.host, binary=self.binary, - cluster=cluster_name, topic=self.topic) - self._check_app(app, cluster_name) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=True) - def test_create_with_cluster_upgrading(self, is_upgrading_mock): - """Test that we don't create the cluster while we are upgrading.""" - cluster_name = 'cluster' - app = service.Service.create(host=self.host, binary=self.binary, - cluster=cluster_name, topic=self.topic) - self._check_app(app, cluster_name, cluster_exists=False, - is_upgrading=True) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_create_svc_exists_upgrade_cluster(self, is_upgrading_mock): - """Test that we update cluster_name field when cfg has changed.""" - # Create the service in the DB - db_svc = db.service_create(context.get_admin_context(), - {'host': self.host, 'binary': self.binary, - 'topic': self.topic, - 'cluster_name': None}) - cluster_name = 'cluster' - app = service.Service.create(host=self.host, binary=self.binary, - cluster=cluster_name, topic=self.topic) - self._check_app(app, cluster_name, svc_id=db_svc.id) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=True) - def test_create_svc_exists_not_upgrade_cluster(self, is_upgrading_mock): - """Test we don't update cluster_name on cfg change when upgrading.""" - # Create the service in the DB - db_svc = db.service_create(context.get_admin_context(), - {'host': self.host, 'binary': self.binary, - 'topic': self.topic, - 'cluster': None}) - cluster_name = 'cluster' - app = service.Service.create(host=self.host, binary=self.binary, - cluster=cluster_name, topic=self.topic) - self._check_app(app, cluster_name, cluster_exists=False, - is_upgrading=True, svc_id=db_svc.id) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch.object(objects.service.Service, 'get_by_args') - @mock.patch.object(objects.service.Service, 'get_by_id') - def test_report_state_newly_disconnected(self, get_by_id, get_by_args, - is_upgrading_mock): - get_by_args.side_effect = exception.NotFound() - get_by_id.side_effect = db_exc.DBConnectionError() - with mock.patch.object(objects.service, 'db') as mock_db: - mock_db.service_create.return_value = self.service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.report_state() - self.assertTrue(serv.model_disconnected) - self.assertFalse(mock_db.service_update.called) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch.object(objects.service.Service, 'get_by_args') - @mock.patch.object(objects.service.Service, 'get_by_id') - def test_report_state_disconnected_DBError(self, get_by_id, get_by_args, - is_upgrading_mock): - get_by_args.side_effect = exception.NotFound() - get_by_id.side_effect = db_exc.DBError() - with mock.patch.object(objects.service, 'db') as mock_db: - mock_db.service_create.return_value = self.service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.report_state() - self.assertTrue(serv.model_disconnected) - self.assertFalse(mock_db.service_update.called) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch('cinder.db.sqlalchemy.api.service_update') - @mock.patch('cinder.db.sqlalchemy.api.service_get') - def test_report_state_newly_connected(self, get_by_id, service_update, - is_upgrading_mock): - get_by_id.return_value = self.service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.model_disconnected = True - serv.report_state() - - self.assertFalse(serv.model_disconnected) - self.assertTrue(service_update.called) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_report_state_manager_not_working(self, is_upgrading_mock): - with mock.patch('cinder.db') as mock_db: - mock_db.service_get.return_value = self.service_ref - - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.manager.is_working = mock.Mock(return_value=False) - serv.start() - serv.report_state() - - serv.manager.is_working.assert_called_once_with() - self.assertFalse(mock_db.service_update.called) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - def test_service_with_long_report_interval(self, is_upgrading_mock): - self.override_config('service_down_time', 10) - self.override_config('report_interval', 10) - service.Service.create( - binary="test_service", - manager="cinder.tests.unit.test_service.FakeManager") - self.assertEqual(25, CONF.service_down_time) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch.object(rpc, 'get_server') - @mock.patch('cinder.db') - def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc, - is_upgrading_mock): - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager' - ) - serv.start() - serv.stop() - serv.wait() - serv.rpcserver.start.assert_called_once_with() - serv.rpcserver.stop.assert_called_once_with() - serv.rpcserver.wait.assert_called_once_with() - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - return_value=False) - @mock.patch('cinder.service.Service.report_state') - @mock.patch('cinder.service.Service.periodic_tasks') - @mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall') - @mock.patch.object(rpc, 'get_server') - @mock.patch('cinder.db') - def test_service_stop_waits_for_timers(self, mock_db, mock_rpc, - mock_loopcall, mock_periodic, - mock_report, is_upgrading_mock): - """Test that we wait for loopcalls only if stop succeeds.""" - serv = service.Service( - self.host, - self.binary, - self.topic, - 'cinder.tests.unit.test_service.FakeManager', - report_interval=5, - periodic_interval=10, - ) - - # One of the loopcalls will raise an exception on stop - mock_loopcall.side_effect = ( - mock.Mock(**{'stop.side_effect': Exception}), - mock.Mock()) - - serv.start() - serv.stop() - serv.wait() - serv.rpcserver.start.assert_called_once_with() - serv.rpcserver.stop.assert_called_once_with() - serv.rpcserver.wait.assert_called_once_with() - - # The first loopcall will have failed on the stop call, so we will not - # have waited for it to stop - self.assertEqual(1, serv.timers[0].start.call_count) - self.assertEqual(1, serv.timers[0].stop.call_count) - self.assertFalse(serv.timers[0].wait.called) - - # We will wait for the second loopcall - self.assertEqual(1, serv.timers[1].start.call_count) - self.assertEqual(1, serv.timers[1].stop.call_count) - self.assertEqual(1, serv.timers[1].wait.call_count) - - @mock.patch('cinder.manager.Manager.init_host') - @mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall') - @mock.patch('oslo_messaging.Target') - @mock.patch.object(rpc, 'get_server') - def _check_rpc_servers_and_init_host(self, app, added_to_cluster, cluster, - rpc_mock, target_mock, loop_mock, - init_host_mock): - app.start() - - # Since we have created the service entry we call init_host with - # added_to_cluster=True - init_host_mock.assert_called_once_with( - added_to_cluster=added_to_cluster, - service_id=self.service_ref['id']) - - expected_target_calls = [mock.call(topic=self.topic, server=self.host)] - expected_rpc_calls = [mock.call(target_mock.return_value, mock.ANY, - mock.ANY), - mock.call().start()] - - if cluster and added_to_cluster: - self.assertIsNotNone(app.cluster_rpcserver) - expected_target_calls.append(mock.call( - topic=self.topic + '.' + cluster, - server=cluster.split('@')[0])) - expected_rpc_calls.extend(expected_rpc_calls[:]) - - # Check that we create message targets for host and cluster - target_mock.assert_has_calls(expected_target_calls) - - # Check we get and start rpc services for host and cluster - rpc_mock.assert_has_calls(expected_rpc_calls) - - self.assertIsNotNone(app.rpcserver) - - app.stop() - - @mock.patch('cinder.objects.Service.get_minimum_obj_version', - return_value='1.6') - def test_start_rpc_and_init_host_no_cluster(self, is_upgrading_mock): - """Test that without cluster we don't create rpc service.""" - app = service.Service.create(host=self.host, binary='cinder-volume', - cluster=None, topic=self.topic) - self._check_rpc_servers_and_init_host(app, True, None) - - @ddt.data('1.3', '1.7') - @mock.patch('cinder.objects.Service.get_minimum_obj_version') - def test_start_rpc_and_init_host_cluster(self, obj_version, - get_min_obj_mock): - """Test that with cluster we create the rpc service.""" - get_min_obj_mock.return_value = obj_version - cluster = 'cluster@backend#pool' - self.host = 'host@backend#pool' - app = service.Service.create(host=self.host, binary='cinder-volume', - cluster=cluster, topic=self.topic) - self._check_rpc_servers_and_init_host(app, obj_version != '1.3', - cluster) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - mock.Mock(return_value=False)) - @mock.patch('cinder.objects.Cluster.get_by_id') - def test_ensure_cluster_exists_no_cluster(self, get_mock): - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - app._ensure_cluster_exists(self.ctxt, svc) - get_mock.assert_not_called() - self.assertEqual({}, svc.cinder_obj_get_changes()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - mock.Mock(return_value=False)) - @mock.patch('cinder.objects.Cluster.get_by_id') - def test_ensure_cluster_exists_cluster_exists_non_relicated(self, - get_mock): - cluster = objects.Cluster( - name='cluster_name', active_backend_id=None, frozen=False, - replication_status=fields.ReplicationStatus.NOT_CAPABLE) - get_mock.return_value = cluster - - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - app.cluster = cluster.name - app._ensure_cluster_exists(self.ctxt, svc) - get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, - binary=app.binary) - self.assertEqual({}, svc.cinder_obj_get_changes()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - mock.Mock(return_value=False)) - @mock.patch('cinder.objects.Cluster.get_by_id') - def test_ensure_cluster_exists_cluster_change(self, get_mock): - """We copy replication fields from the cluster to the service.""" - changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, - active_backend_id='secondary', - frozen=True) - cluster = objects.Cluster(name='cluster_name', **changes) - get_mock.return_value = cluster - - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - app.cluster = cluster.name - app._ensure_cluster_exists(self.ctxt, svc) - get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, - binary=app.binary) - self.assertEqual(changes, svc.cinder_obj_get_changes()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - mock.Mock(return_value=False)) - @mock.patch('cinder.objects.Cluster.get_by_id') - def test_ensure_cluster_exists_cluster_no_change(self, get_mock): - """Don't copy replication fields from cluster if replication error.""" - changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, - active_backend_id='secondary', - frozen=True) - cluster = objects.Cluster(name='cluster_name', **changes) - get_mock.return_value = cluster - - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - svc.replication_status = fields.ReplicationStatus.ERROR - svc.obj_reset_changes() - app.cluster = cluster.name - app._ensure_cluster_exists(self.ctxt, svc) - get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, - binary=app.binary) - self.assertEqual({}, svc.cinder_obj_get_changes()) - - @mock.patch('cinder.service.Service.is_svc_upgrading_to_n', - mock.Mock(return_value=False)) - def test_ensure_cluster_exists_cluster_create_replicated_and_non(self): - """We use service replication fields to create the cluster.""" - changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, - active_backend_id='secondary', - frozen=True) - - app = service.Service.create(host=self.host, - binary=self.binary, - topic=self.topic) - svc = objects.Service.get_by_id(self.ctxt, app.service_id) - for key, value in changes.items(): - setattr(svc, key, value) - - app.cluster = 'cluster_name' - app._ensure_cluster_exists(self.ctxt, svc) - - cluster = objects.Cluster.get_by_id(self.ctxt, None, name=app.cluster) - for key, value in changes.items(): - self.assertEqual(value, getattr(cluster, key)) - - -class TestWSGIService(test.TestCase): - - @mock.patch('oslo_service.wsgi.Loader') - def test_service_random_port(self, mock_loader): - test_service = service.WSGIService("test_service") - self.assertEqual(0, test_service.port) - test_service.start() - self.assertNotEqual(0, test_service.port) - test_service.stop() - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_reset_pool_size_to_default(self, mock_loader): - test_service = service.WSGIService("test_service") - test_service.start() - - # Stopping the service, which in turn sets pool size to 0 - test_service.stop() - self.assertEqual(0, test_service.server._pool.size) - - # Resetting pool size to default - test_service.reset() - test_service.start() - self.assertEqual(cfg.CONF.wsgi_default_pool_size, - test_service.server._pool.size) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_default(self, mock_loader): - self.override_config('osapi_volume_listen_port', - CONF.test_service_listen_port) - test_service = service.WSGIService("osapi_volume") - self.assertEqual(processutils.get_worker_count(), - test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_good_user_setting(self, mock_loader): - self.override_config('osapi_volume_listen_port', - CONF.test_service_listen_port) - self.override_config('osapi_volume_workers', 8) - test_service = service.WSGIService("osapi_volume") - self.assertEqual(8, test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_zero_user_setting(self, mock_loader): - self.override_config('osapi_volume_listen_port', - CONF.test_service_listen_port) - self.override_config('osapi_volume_workers', 0) - test_service = service.WSGIService("osapi_volume") - # If a value less than 1 is used, defaults to number of procs - # available - self.assertEqual(processutils.get_worker_count(), - test_service.workers) - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Loader') - def test_workers_set_negative_user_setting(self, mock_loader): - self.override_config('osapi_volume_workers', -1) - self.assertRaises(exception.InvalidInput, - service.WSGIService, "osapi_volume") - self.assertTrue(mock_loader.called) - - @mock.patch('oslo_service.wsgi.Server') - @mock.patch('oslo_service.wsgi.Loader') - def test_ssl_enabled(self, mock_loader, mock_server): - self.override_config('osapi_volume_use_ssl', True) - - service.WSGIService("osapi_volume") - mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, - port=mock.ANY, host=mock.ANY, - use_ssl=True) - - self.assertTrue(mock_loader.called) - - -class OSCompatibilityTestCase(test.TestCase): - def _test_service_launcher(self, fake_os): - # Note(lpetrut): The cinder-volume service needs to be spawned - # differently on Windows due to an eventlet bug. For this reason, - # we must check the process launcher used. - fake_process_launcher = mock.MagicMock() - with mock.patch('os.name', fake_os): - with mock.patch('cinder.service.process_launcher', - fake_process_launcher): - launcher = service.get_launcher() - if fake_os == 'nt': - self.assertEqual(service.Launcher, type(launcher)) - else: - self.assertEqual(fake_process_launcher(), launcher) - - def test_process_launcher_on_windows(self): - self._test_service_launcher('nt') - - def test_process_launcher_on_linux(self): - self._test_service_launcher('posix') diff --git a/cinder/tests/unit/test_setup_profiler.py b/cinder/tests/unit/test_setup_profiler.py deleted file mode 100644 index f29ba96f8..000000000 --- a/cinder/tests/unit/test_setup_profiler.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import service -from cinder import test - - -class SetupProfilerTestCase(test.TestCase): - def setUp(self): - super(SetupProfilerTestCase, self).setUp() - service.osprofiler_initializer = mock.MagicMock() - service.profiler = mock.MagicMock() - service.profiler_opts = mock.MagicMock() - service.osprofiler_initializer.init_from_conf = mock.MagicMock() - - def test_profiler_not_present(self): - service.profiler = None - service.LOG.debug = mock.MagicMock() - service.setup_profiler("cinder-volume", "localhost") - service.LOG.debug.assert_called_once_with("osprofiler is not present") - - @mock.patch("cinder.service.context") - def test_profiler_enabled(self, context): - service.CONF.profiler.enabled = True - return_value = {"Meaning Of Life": 42} - context.get_admin_context().to_dict.return_value = return_value - service.setup_profiler("cinder-volume", "localhost") - service.osprofiler_initializer.init_from_conf.assert_called_once_with( - conf=service.CONF, - context=return_value, - project="cinder", - service="cinder-volume", - host="localhost") - - def test_profiler_disabled(self): - service.CONF.profiler.enabled = False - service.setup_profiler("cinder-volume", "localhost") - service.osprofiler_initializer.init_from_conf.assert_not_called() diff --git a/cinder/tests/unit/test_ssh_utils.py b/cinder/tests/unit/test_ssh_utils.py deleted file mode 100644 index 393139b01..000000000 --- a/cinder/tests/unit/test_ssh_utils.py +++ /dev/null @@ -1,349 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import paramiko -import uuid - -from cinder import exception -from cinder import ssh_utils -from cinder import test - - -class FakeSock(object): - def settimeout(self, timeout): - pass - - -class FakeTransport(object): - - def __init__(self): - self.active = True - self.sock = FakeSock() - - def set_keepalive(self, timeout): - pass - - def is_active(self): - return self.active - - -class FakeSSHClient(object): - - def __init__(self): - self.id = uuid.uuid4() - self.transport = FakeTransport() - - def set_missing_host_key_policy(self, policy): - self.policy = policy - - def load_system_host_keys(self): - self.system_host_keys = 'system_host_keys' - - def load_host_keys(self, hosts_key_file): - self.hosts_key_file = hosts_key_file - - def connect(self, ip, port=22, username=None, password=None, - pkey=None, timeout=10): - pass - - def get_transport(self): - return self.transport - - def get_policy(self): - return self.policy - - def get_host_keys(self): - return '127.0.0.1 ssh-rsa deadbeef' - - def close(self): - pass - - def __call__(self, *args, **kwargs): - pass - - -class SSHPoolTestCase(test.TestCase): - """Unit test for SSH Connection Pool.""" - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_sshpool_remove(self, mock_isfile, mock_sshclient, mock_open): - ssh_to_remove = mock.MagicMock() - mock_sshclient.side_effect = [mock.MagicMock(), - ssh_to_remove, mock.MagicMock()] - self.override_config('ssh_hosts_key_file', 'dummy') - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=3, - max_size=3) - self.assertIn(ssh_to_remove, list(sshpool.free_items)) - sshpool.remove(ssh_to_remove) - self.assertNotIn(ssh_to_remove, list(sshpool.free_items)) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_sshpool_remove_object_not_in_pool(self, mock_isfile, - mock_sshclient, mock_open): - # create an SSH Client that is not a part of sshpool. - ssh_to_remove = mock.MagicMock() - mock_sshclient.side_effect = [mock.MagicMock(), mock.MagicMock()] - - self.override_config('ssh_hosts_key_file', 'dummy') - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=2, - max_size=2) - listBefore = list(sshpool.free_items) - self.assertNotIn(ssh_to_remove, listBefore) - sshpool.remove(ssh_to_remove) - self.assertEqual(listBefore, list(sshpool.free_items)) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient, - mock_open): - mock_ssh = mock.MagicMock() - mock_sshclient.return_value = mock_ssh - self.override_config('ssh_hosts_key_file', - '/var/lib/cinder/ssh_known_hosts') - - # create with customized setting - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - host_key_files = sshpool.hosts_key_file - - self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files) - - mock_ssh.load_host_keys.assert_called_once_with( - '/var/lib/cinder/ssh_known_hosts') - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient, - mock_open): - mock_ssh = mock.MagicMock() - mock_sshclient.return_value = mock_ssh - self.override_config('ssh_hosts_key_file', - '/var/lib/cinder/ssh_known_hosts') - - # create with customized setting - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1, - hosts_key_file='dummy_host_keyfile') - - host_key_files = sshpool.hosts_key_file - - self.assertIn('dummy_host_keyfile', host_key_files) - self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files) - - expected = [ - mock.call.load_host_keys('dummy_host_keyfile'), - mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')] - - mock_ssh.assert_has_calls(expected, any_order=True) - - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('paramiko.SSHClient') - def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile, - mock_open): - self.override_config( - 'ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') - - # create with password - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - with sshpool.item() as ssh: - first_id = ssh.id - - with sshpool.item() as ssh: - second_id = ssh.id - - self.assertEqual(first_id, second_id) - self.assertEqual(1, mock_sshclient.return_value.connect.call_count) - - # create with private key - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - privatekey="test", - min_size=1, - max_size=1) - self.assertEqual(2, mock_sshclient.return_value.connect.call_count) - - # attempt to create with no password or private key - self.assertRaises(paramiko.SSHException, - ssh_utils.SSHPool, - "127.0.0.1", 22, 10, - "test", - min_size=1, - max_size=1) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open): - mock_sshclient.return_value = FakeSSHClient() - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=4) - with sshpool.item() as ssh: - mock_sshclient.reset_mock() - first_id = ssh.id - - with sshpool.item() as ssh: - second_id = ssh.id - ssh.get_transport().active = False - sshpool.remove(ssh) - - self.assertEqual(first_id, second_id) - - # create a new client - mock_sshclient.return_value = FakeSSHClient() - with sshpool.item() as ssh: - third_id = ssh.id - - self.assertNotEqual(first_id, third_id) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - def test_missing_ssh_hosts_key_config(self, mock_sshclient, mock_open): - mock_sshclient.return_value = FakeSSHClient() - self.override_config('ssh_hosts_key_file', None) - # create with password - self.assertRaises(exception.ParameterNotFound, - ssh_utils.SSHPool, - "127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - def test_create_default_known_hosts_file(self, mock_sshclient, - mock_open): - mock_sshclient.return_value = FakeSSHClient() - - self.flags(state_path='/var/lib/cinder', - ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') - - default_file = '/var/lib/cinder/ssh_known_hosts' - - ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - with ssh_pool.item() as ssh: - mock_open.assert_called_once_with(default_file, 'a') - ssh_pool.remove(ssh) - - @mock.patch('os.path.isfile', return_value=False) - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_open, - mock_isfile): - mock_sshclient.return_value = FakeSSHClient() - - self.flags(state_path='/var/lib/cinder', - ssh_hosts_key_file='/tmp/blah') - - self.assertRaises(exception.InvalidInput, - ssh_utils.SSHPool, - "127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient, - mock_open): - mock_sshclient.return_value = FakeSSHClient() - - self.flags(strict_ssh_host_key_policy=True, - ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') - - # create with customized setting - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - with sshpool.item() as ssh: - self.assertIsInstance(ssh.get_policy(), - paramiko.RejectPolicy) - - @mock.patch('six.moves.builtins.open') - @mock.patch('paramiko.SSHClient') - @mock.patch('os.path.isfile', return_value=True) - def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient, - mock_open): - mock_sshclient.return_value = FakeSSHClient() - - self.override_config('strict_ssh_host_key_policy', False) - - # create with customized setting - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - - with sshpool.item() as ssh: - self.assertIsInstance(ssh.get_policy(), - paramiko.AutoAddPolicy) - - @mock.patch('paramiko.SSHClient') - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.isfile', return_value=False) - def test_ssh_timeout(self, mock_isfile, mock_open, mock_sshclient): - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - self.assertEqual(1, sshpool.current_size) - conn = sshpool.get() - conn.connect = mock.MagicMock() - # create failed due to time out - conn.connect.side_effect = paramiko.SSHException("time out") - mock_transport = mock.MagicMock() - conn.get_transport.return_value = mock_transport - # connection is down - mock_transport.is_active.return_value = False - sshpool.put(conn) - self.assertRaises(paramiko.SSHException, - sshpool.get) - self.assertEqual(0, sshpool.current_size) diff --git a/cinder/tests/unit/test_test.py b/cinder/tests/unit/test_test.py deleted file mode 100644 index b59244c1a..000000000 --- a/cinder/tests/unit/test_test.py +++ /dev/null @@ -1,73 +0,0 @@ - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the testing base code.""" - -import mock -from oslo_config import cfg -import oslo_messaging as messaging - -from cinder import rpc -from cinder import test - - -class IsolationTestCase(test.TestCase): - """Ensure that things are cleaned up after failed tests. - - These tests don't really do much here, but if isolation fails a bunch - of other tests should fail. - - """ - def test_service_isolation(self): - self.start_service('volume') - - def test_rpc_consumer_isolation(self): - class NeverCalled(object): - - def __getattribute__(*args): - self.fail(msg="I should never get called.") - - server = rpc.get_server(messaging.Target(topic='volume', - server=cfg.CONF.host), - endpoints=[NeverCalled()]) - server.start() - - -class MockAssertTestCase(test.TestCase): - """Ensure that valid mock assert methods are used.""" - def test_assert_has_calls(self): - mock_call = mock.MagicMock(return_value=None) - mock_call(1) - mock_call(2) - mock_call.assert_has_calls([mock.call(1), mock.call(2)]) - - def test_assert_any_call(self): - mock_call = mock.MagicMock(return_value=None) - mock_call(1) - mock_call(2) - mock_call(3) - mock_call.assert_any_call(1) - - def test_assert_called_with(self): - mock_call = mock.MagicMock(return_value=None) - mock_call(1, 'foo', a='123') - mock_call.assert_called_with(1, 'foo', a='123') - - def test_assert_called_once_with(self): - mock_call = mock.MagicMock(return_value=None) - mock_call(1, 'foobar', a='123') - mock_call.assert_called_once_with(1, 'foobar', a='123') diff --git a/cinder/tests/unit/test_test_utils.py b/cinder/tests/unit/test_test_utils.py deleted file mode 100644 index ae3606eb2..000000000 --- a/cinder/tests/unit/test_test_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import test -from cinder.tests.unit import utils as test_utils - - -class TestUtilsTestCase(test.TestCase): - def test_get_test_admin_context(self): - """get_test_admin_context's return value behaves like admin context.""" - ctxt = test_utils.get_test_admin_context() - - self.assertIsNone(ctxt.project_id) - self.assertIsNone(ctxt.user_id) - self.assertIsNone(ctxt.domain) - self.assertIsNone(ctxt.project_domain) - self.assertIsNone(ctxt.user_domain) - self.assertIsNone(ctxt.project_name) - self.assertIsNone(ctxt.remote_address) - self.assertIsNone(ctxt.auth_token) - self.assertIsNone(ctxt.quota_class) - - self.assertIsNotNone(ctxt.request_id) - self.assertIsNotNone(ctxt.timestamp) - - self.assertEqual(['admin'], ctxt.roles) - self.assertEqual([], ctxt.service_catalog) - self.assertEqual('no', ctxt.read_deleted) - - self.assertTrue(ctxt.is_admin) diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py deleted file mode 100644 index b25bff637..000000000 --- a/cinder/tests/unit/test_utils.py +++ /dev/null @@ -1,1535 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import datetime -import functools -import json -import os -import sys -import time - -import ddt -import mock -from oslo_concurrency import processutils as putils -from oslo_utils import timeutils -import six -from six.moves import range -import webob.exc - -import cinder -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder import utils - - -class ExecuteTestCase(test.TestCase): - @mock.patch('cinder.utils.processutils.execute') - def test_execute(self, mock_putils_exe): - output = utils.execute('a', 1, foo='bar') - self.assertEqual(mock_putils_exe.return_value, output) - mock_putils_exe.assert_called_once_with('a', 1, foo='bar') - - @mock.patch('cinder.utils.get_root_helper') - @mock.patch('cinder.utils.processutils.execute') - def test_execute_root(self, mock_putils_exe, mock_get_helper): - output = utils.execute('a', 1, foo='bar', run_as_root=True) - self.assertEqual(mock_putils_exe.return_value, output) - mock_helper = mock_get_helper.return_value - mock_putils_exe.assert_called_once_with('a', 1, foo='bar', - run_as_root=True, - root_helper=mock_helper) - - @mock.patch('cinder.utils.get_root_helper') - @mock.patch('cinder.utils.processutils.execute') - def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper): - mock_helper = mock.Mock() - output = utils.execute('a', 1, foo='bar', run_as_root=True, - root_helper=mock_helper) - self.assertEqual(mock_putils_exe.return_value, output) - self.assertFalse(mock_get_helper.called) - mock_putils_exe.assert_called_once_with('a', 1, foo='bar', - run_as_root=True, - root_helper=mock_helper) - - -@ddt.ddt -class GenericUtilsTestCase(test.TestCase): - def test_as_int(self): - test_obj_int = '2' - test_obj_float = '2.2' - for obj in [test_obj_int, test_obj_float]: - self.assertEqual(2, utils.as_int(obj)) - - obj = 'not_a_number' - self.assertEqual(obj, utils.as_int(obj)) - self.assertRaises(TypeError, - utils.as_int, - obj, - quiet=False) - - def test_check_exclusive_options(self): - utils.check_exclusive_options() - utils.check_exclusive_options(something=None, - pretty_keys=True, - unit_test=True) - - self.assertRaises(exception.InvalidInput, - utils.check_exclusive_options, - test=True, - unit=False, - pretty_keys=True) - - self.assertRaises(exception.InvalidInput, - utils.check_exclusive_options, - test=True, - unit=False, - pretty_keys=False) - - def test_require_driver_intialized(self): - driver = mock.Mock() - driver.initialized = True - utils.require_driver_initialized(driver) - - driver.initialized = False - self.assertRaises(exception.DriverNotInitialized, - utils.require_driver_initialized, - driver) - - def test_hostname_unicode_sanitization(self): - hostname = u"\u7684.test.example.com" - self.assertEqual("test.example.com", - utils.sanitize_hostname(hostname)) - - def test_hostname_sanitize_periods(self): - hostname = "....test.example.com..." - self.assertEqual("test.example.com", - utils.sanitize_hostname(hostname)) - - def test_hostname_sanitize_dashes(self): - hostname = "----test.example.com---" - self.assertEqual("test.example.com", - utils.sanitize_hostname(hostname)) - - def test_hostname_sanitize_characters(self): - hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" - self.assertEqual("91----test-host.example.com-0", - utils.sanitize_hostname(hostname)) - - def test_hostname_translate(self): - hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" - self.assertEqual("hello", utils.sanitize_hostname(hostname)) - - @mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y))) - def test_make_dev_path(self, mock_join): - self.assertEqual('/dev/xvda', utils.make_dev_path('xvda')) - self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1)) - self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo')) - - @mock.patch('cinder.utils.execute') - def test_read_file_as_root(self, mock_exec): - out = mock.Mock() - err = mock.Mock() - mock_exec.return_value = (out, err) - test_filepath = '/some/random/path' - output = utils.read_file_as_root(test_filepath) - mock_exec.assert_called_once_with('cat', test_filepath, - run_as_root=True) - self.assertEqual(out, output) - - @mock.patch('cinder.utils.execute', - side_effect=putils.ProcessExecutionError) - def test_read_file_as_root_fails(self, mock_exec): - test_filepath = '/some/random/path' - self.assertRaises(exception.FileNotFound, - utils.read_file_as_root, - test_filepath) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - @mock.patch('tempfile.NamedTemporaryFile') - @mock.patch.object(os, 'open') - @mock.patch.object(os, 'fdatasync') - @mock.patch.object(os, 'fsync') - @mock.patch.object(os, 'rename') - @mock.patch.object(os, 'close') - @mock.patch.object(os.path, 'isfile') - @mock.patch.object(os, 'unlink') - def test_write_configfile(self, mock_unlink, mock_isfile, mock_close, - mock_rename, mock_fsync, mock_fdatasync, - mock_open, mock_tmp): - filename = 'foo' - directory = '/some/random/path' - filepath = os.path.join(directory, filename) - expected = ('\n\n' - ' backing-store %(bspath)s\n' - ' driver iscsi\n' - ' incominguser chap_foo chap_bar\n' - ' bsoflags foo\n' - ' write-cache bar\n' - '\n' % {'id': filename, - 'bspath': filepath}) - - # Normal case - utils.robust_file_write(directory, filename, expected) - mock_open.assert_called_once_with(directory, os.O_DIRECTORY) - mock_rename.assert_called_once_with(mock.ANY, filepath) - self.assertEqual( - expected.encode('utf-8'), - mock_tmp.return_value.__enter__.return_value.write.call_args[0][0] - ) - - # Failure to write persistent file. - tempfile = '/some/tempfile' - mock_tmp.return_value.__enter__.return_value.name = tempfile - mock_rename.side_effect = OSError - self.assertRaises(OSError, - utils.robust_file_write, - directory, - filename, - mock.MagicMock()) - mock_isfile.assert_called_once_with(tempfile) - mock_unlink.assert_called_once_with(tempfile) - - def test_check_ssh_injection(self): - cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer'] - self.assertIsNone(utils.check_ssh_injection(cmd_list)) - cmd_list = ['echo', '"quoted arg with space"'] - self.assertIsNone(utils.check_ssh_injection(cmd_list)) - cmd_list = ['echo', "'quoted arg with space'"] - self.assertIsNone(utils.check_ssh_injection(cmd_list)) - - def test_check_ssh_injection_on_error(self): - with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_unquoted_space) - with_danger_chars = ['||', 'my_name@name_of_remote_computer'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_danger_chars) - with_danger_char = [';', 'my_name@name_of_remote_computer'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_danger_char) - with_special = ['cmd', 'virus;ls'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_special) - quoted_with_unescaped = ['cmd', '"arg\"withunescaped"'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - quoted_with_unescaped) - bad_before_quotes = ['cmd', 'virus;"quoted argument"'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - bad_before_quotes) - bad_after_quotes = ['echo', '"quoted argument";rm -rf'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - bad_after_quotes) - bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - bad_within_quotes) - with_multiple_quotes = ['echo', '"quoted";virus;"quoted"'] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_multiple_quotes) - with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\''] - self.assertRaises(exception.SSHInjectionThreat, - utils.check_ssh_injection, - with_multiple_quotes) - - @mock.patch('os.stat') - def test_get_file_mode(self, mock_stat): - class stat_result(object): - st_mode = 0o777 - st_gid = 33333 - - test_file = '/var/tmp/made_up_file' - mock_stat.return_value = stat_result - mode = utils.get_file_mode(test_file) - self.assertEqual(0o777, mode) - mock_stat.assert_called_once_with(test_file) - - @mock.patch('os.stat') - def test_get_file_gid(self, mock_stat): - - class stat_result(object): - st_mode = 0o777 - st_gid = 33333 - - test_file = '/var/tmp/made_up_file' - mock_stat.return_value = stat_result - gid = utils.get_file_gid(test_file) - self.assertEqual(33333, gid) - mock_stat.assert_called_once_with(test_file) - - @mock.patch('cinder.utils.CONF') - def test_get_root_helper(self, mock_conf): - mock_conf.rootwrap_config = '/path/to/conf' - self.assertEqual('sudo cinder-rootwrap /path/to/conf', - utils.get_root_helper()) - - @ddt.data({'path_a': 'test', 'path_b': 'test', 'exp_eq': True}) - @ddt.data({'path_a': 'test', 'path_b': 'other', 'exp_eq': False}) - @ddt.unpack - @mock.patch('os.path.normcase') - def test_paths_normcase_equal(self, mock_normcase, path_a, - path_b, exp_eq): - # os.path.normcase will lower the path string on Windows - # while doing nothing on other platforms. - mock_normcase.side_effect = lambda x: x - - result = utils.paths_normcase_equal(path_a, path_b) - self.assertEqual(exp_eq, result) - - mock_normcase.assert_has_calls([mock.call(path_a), mock.call(path_b)]) - - -class TemporaryChownTestCase(test.TestCase): - @mock.patch('os.stat') - @mock.patch('os.getuid', return_value=1234) - @mock.patch('cinder.utils.execute') - def test_get_uid(self, mock_exec, mock_getuid, mock_stat): - mock_stat.return_value.st_uid = 5678 - test_filename = 'a_file' - with utils.temporary_chown(test_filename): - mock_exec.assert_called_once_with('chown', 1234, test_filename, - run_as_root=True) - mock_getuid.assert_called_once_with() - mock_stat.assert_called_once_with(test_filename) - calls = [mock.call('chown', 1234, test_filename, run_as_root=True), - mock.call('chown', 5678, test_filename, run_as_root=True)] - mock_exec.assert_has_calls(calls) - - @mock.patch('os.stat') - @mock.patch('os.getuid', return_value=1234) - @mock.patch('cinder.utils.execute') - def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat): - mock_stat.return_value.st_uid = 5678 - test_filename = 'a_file' - with utils.temporary_chown(test_filename, owner_uid=9101): - mock_exec.assert_called_once_with('chown', 9101, test_filename, - run_as_root=True) - self.assertFalse(mock_getuid.called) - mock_stat.assert_called_once_with(test_filename) - calls = [mock.call('chown', 9101, test_filename, run_as_root=True), - mock.call('chown', 5678, test_filename, run_as_root=True)] - mock_exec.assert_has_calls(calls) - - @mock.patch('os.stat') - @mock.patch('os.getuid', return_value=5678) - @mock.patch('cinder.utils.execute') - def test_matching_uid(self, mock_exec, mock_getuid, mock_stat): - mock_stat.return_value.st_uid = 5678 - test_filename = 'a_file' - with utils.temporary_chown(test_filename): - pass - mock_getuid.assert_called_once_with() - mock_stat.assert_called_once_with(test_filename) - self.assertFalse(mock_exec.called) - - -class TempdirTestCase(test.TestCase): - @mock.patch('tempfile.mkdtemp') - @mock.patch('shutil.rmtree') - def test_tempdir(self, mock_rmtree, mock_mkdtemp): - with utils.tempdir(a='1', b=2) as td: - self.assertEqual(mock_mkdtemp.return_value, td) - self.assertFalse(mock_rmtree.called) - mock_mkdtemp.assert_called_once_with(a='1', b=2) - mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) - - @mock.patch('tempfile.mkdtemp') - @mock.patch('shutil.rmtree', side_effect=OSError) - def test_tempdir_error(self, mock_rmtree, mock_mkdtemp): - with utils.tempdir(a='1', b=2) as td: - self.assertEqual(mock_mkdtemp.return_value, td) - self.assertFalse(mock_rmtree.called) - mock_mkdtemp.assert_called_once_with(a='1', b=2) - mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) - - -class WalkClassHierarchyTestCase(test.TestCase): - def test_walk_class_hierarchy(self): - class A(object): - pass - - class B(A): - pass - - class C(A): - pass - - class D(B): - pass - - class E(A): - pass - - class_pairs = zip((D, B, E), - utils.walk_class_hierarchy(A, encountered=[C])) - for actual, expected in class_pairs: - self.assertEqual(expected, actual) - - class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A)) - for actual, expected in class_pairs: - self.assertEqual(expected, actual) - - -class GetDiskOfPartitionTestCase(test.TestCase): - def test_devpath_is_diskpath(self): - devpath = '/some/path' - st_mock = mock.Mock() - output = utils._get_disk_of_partition(devpath, st_mock) - self.assertEqual('/some/path', output[0]) - self.assertIs(st_mock, output[1]) - - with mock.patch('os.stat') as mock_stat: - devpath = '/some/path' - output = utils._get_disk_of_partition(devpath) - mock_stat.assert_called_once_with(devpath) - self.assertEqual(devpath, output[0]) - self.assertIs(mock_stat.return_value, output[1]) - - @mock.patch('os.stat', side_effect=OSError) - def test_stat_oserror(self, mock_stat): - st_mock = mock.Mock() - devpath = '/some/path1' - output = utils._get_disk_of_partition(devpath, st_mock) - mock_stat.assert_called_once_with('/some/path') - self.assertEqual(devpath, output[0]) - self.assertIs(st_mock, output[1]) - - @mock.patch('stat.S_ISBLK', return_value=True) - @mock.patch('os.stat') - def test_diskpath_is_block_device(self, mock_stat, mock_isblk): - st_mock = mock.Mock() - devpath = '/some/path1' - output = utils._get_disk_of_partition(devpath, st_mock) - self.assertEqual('/some/path', output[0]) - self.assertEqual(mock_stat.return_value, output[1]) - - @mock.patch('stat.S_ISBLK', return_value=False) - @mock.patch('os.stat') - def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk): - st_mock = mock.Mock() - devpath = '/some/path1' - output = utils._get_disk_of_partition(devpath, st_mock) - self.assertEqual(devpath, output[0]) - self.assertEqual(st_mock, output[1]) - - -class GetBlkdevMajorMinorTestCase(test.TestCase): - @mock.patch('os.stat') - def test_get_file_size(self, mock_stat): - - class stat_result(object): - st_mode = 0o777 - st_size = 1074253824 - - test_file = '/var/tmp/made_up_file' - mock_stat.return_value = stat_result - size = utils.get_file_size(test_file) - self.assertEqual(size, stat_result.st_size) - mock_stat.assert_called_once_with(test_file) - - @mock.patch('os.stat') - def test_get_blkdev_major_minor(self, mock_stat): - - class stat_result(object): - st_mode = 0o60660 - st_rdev = os.makedev(253, 7) - - test_device = '/dev/made_up_blkdev' - mock_stat.return_value = stat_result - dev = utils.get_blkdev_major_minor(test_device) - self.assertEqual('253:7', dev) - mock_stat.assert_called_once_with(test_device) - - @mock.patch('os.stat') - @mock.patch.object(utils, 'execute') - def _test_get_blkdev_major_minor_file(self, test_partition, - mock_exec, mock_stat): - mock_exec.return_value = ( - 'Filesystem Size Used Avail Use%% Mounted on\n' - '%s 4096 2048 2048 50%% /tmp\n' % test_partition, None) - - test_file = '/tmp/file' - test_disk = '/dev/made_up_disk' - - class stat_result_file(object): - st_mode = 0o660 - - class stat_result_partition(object): - st_mode = 0o60660 - st_rdev = os.makedev(8, 65) - - class stat_result_disk(object): - st_mode = 0o60660 - st_rdev = os.makedev(8, 64) - - def fake_stat(path): - try: - return {test_file: stat_result_file, - test_partition: stat_result_partition, - test_disk: stat_result_disk}[path] - except KeyError: - raise OSError - - mock_stat.side_effect = fake_stat - - dev = utils.get_blkdev_major_minor(test_file) - mock_stat.assert_any_call(test_file) - mock_exec.assert_called_once_with('df', test_file) - if test_partition.startswith('/'): - mock_stat.assert_any_call(test_partition) - mock_stat.assert_any_call(test_disk) - return dev - - def test_get_blkdev_major_minor_file(self): - dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1') - self.assertEqual('8:64', dev) - - def test_get_blkdev_major_minor_file_nfs(self): - dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path') - self.assertIsNone(dev) - - @mock.patch('os.stat') - @mock.patch('stat.S_ISCHR', return_value=False) - @mock.patch('stat.S_ISBLK', return_value=False) - def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat): - path = '/some/path' - self.assertRaises(exception.Error, - utils.get_blkdev_major_minor, - path, lookup_for_file=False) - mock_stat.assert_called_once_with(path) - mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) - mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) - - @mock.patch('os.stat') - @mock.patch('stat.S_ISCHR', return_value=True) - @mock.patch('stat.S_ISBLK', return_value=False) - def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat): - path = '/some/path' - output = utils.get_blkdev_major_minor(path, lookup_for_file=False) - mock_stat.assert_called_once_with(path) - mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) - mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) - self.assertIsNone(output) - - -class MonkeyPatchTestCase(test.TestCase): - """Unit test for utils.monkey_patch().""" - def setUp(self): - super(MonkeyPatchTestCase, self).setUp() - self.example_package = 'cinder.tests.unit.monkey_patch_example.' - self.flags( - monkey_patch=True, - monkey_patch_modules=[self.example_package + 'example_a' + ':' - + self.example_package - + 'example_decorator']) - - def test_monkey_patch(self): - utils.monkey_patch() - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = [] - from cinder.tests.unit.monkey_patch_example import example_a - from cinder.tests.unit.monkey_patch_example import example_b - - self.assertEqual('Example function', example_a.example_function_a()) - exampleA = example_a.ExampleClassA() - exampleA.example_method() - ret_a = exampleA.example_method_add(3, 5) - self.assertEqual(8, ret_a) - - self.assertEqual('Example function', example_b.example_function_b()) - exampleB = example_b.ExampleClassB() - exampleB.example_method() - ret_b = exampleB.example_method_add(3, 5) - - self.assertEqual(8, ret_b) - package_a = self.example_package + 'example_a.' - self.assertIn(package_a + 'example_function_a', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - - self.assertIn(package_a + 'ExampleClassA.example_method', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - self.assertIn(package_a + 'ExampleClassA.example_method_add', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - package_b = self.example_package + 'example_b.' - self.assertNotIn( - package_b + 'example_function_b', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - self.assertNotIn( - package_b + 'ExampleClassB.example_method', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - self.assertNotIn( - package_b + 'ExampleClassB.example_method_add', - cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) - - -class AuditPeriodTest(test.TestCase): - - def setUp(self): - super(AuditPeriodTest, self).setUp() - test_time = datetime.datetime(second=23, - minute=12, - hour=8, - day=5, - month=3, - year=2012) - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = test_time - - def test_hour(self): - begin, end = utils.last_completed_audit_period(unit='hour') - self.assertEqual(datetime.datetime(hour=7, day=5, month=3, year=2012), - begin) - self.assertEqual(datetime.datetime(hour=8, day=5, month=3, year=2012), - end) - - def test_hour_with_offset_before_current(self): - begin, end = utils.last_completed_audit_period(unit='hour@10') - self.assertEqual(datetime.datetime(minute=10, - hour=7, - day=5, - month=3, - year=2012), - begin) - self.assertEqual(datetime.datetime(minute=10, - hour=8, - day=5, - month=3, - year=2012), - end) - - def test_hour_with_offset_after_current(self): - begin, end = utils.last_completed_audit_period(unit='hour@30') - self.assertEqual(datetime.datetime(minute=30, - hour=6, - day=5, - month=3, - year=2012), - begin) - self.assertEqual(datetime.datetime(minute=30, - hour=7, - day=5, - month=3, - year=2012), - end) - - def test_day(self): - begin, end = utils.last_completed_audit_period(unit='day') - self.assertEqual(datetime.datetime(day=4, month=3, year=2012), begin) - self.assertEqual(datetime.datetime(day=5, month=3, year=2012), end) - - def test_day_with_offset_before_current(self): - begin, end = utils.last_completed_audit_period(unit='day@6') - self.assertEqual(datetime.datetime(hour=6, day=4, month=3, year=2012), - begin) - self.assertEqual(datetime.datetime(hour=6, day=5, month=3, year=2012), - end) - - def test_day_with_offset_after_current(self): - begin, end = utils.last_completed_audit_period(unit='day@10') - self.assertEqual(datetime.datetime(hour=10, day=3, month=3, year=2012), - begin) - self.assertEqual(datetime.datetime(hour=10, day=4, month=3, year=2012), - end) - - def test_month(self): - begin, end = utils.last_completed_audit_period(unit='month') - self.assertEqual(datetime.datetime(day=1, month=2, year=2012), begin) - self.assertEqual(datetime.datetime(day=1, month=3, year=2012), end) - - def test_month_with_offset_before_current(self): - begin, end = utils.last_completed_audit_period(unit='month@2') - self.assertEqual(datetime.datetime(day=2, month=2, year=2012), begin) - self.assertEqual(datetime.datetime(day=2, month=3, year=2012), end) - - def test_month_with_offset_after_current(self): - begin, end = utils.last_completed_audit_period(unit='month@15') - self.assertEqual(datetime.datetime(day=15, month=1, year=2012), begin) - self.assertEqual(datetime.datetime(day=15, month=2, year=2012), end) - - @mock.patch('oslo_utils.timeutils.utcnow', - return_value=datetime.datetime(day=1, - month=1, - year=2012)) - def test_month_jan_day_first(self, mock_utcnow): - begin, end = utils.last_completed_audit_period(unit='month') - self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin) - self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end) - - @mock.patch('oslo_utils.timeutils.utcnow', - return_value=datetime.datetime(day=2, - month=1, - year=2012)) - def test_month_jan_day_not_first(self, mock_utcnow): - begin, end = utils.last_completed_audit_period(unit='month') - self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin) - self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) - - def test_year(self): - begin, end = utils.last_completed_audit_period(unit='year') - self.assertEqual(datetime.datetime(day=1, month=1, year=2011), begin) - self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) - - def test_year_with_offset_before_current(self): - begin, end = utils.last_completed_audit_period(unit='year@2') - self.assertEqual(datetime.datetime(day=1, month=2, year=2011), begin) - self.assertEqual(datetime.datetime(day=1, month=2, year=2012), end) - - def test_year_with_offset_after_current(self): - begin, end = utils.last_completed_audit_period(unit='year@6') - self.assertEqual(datetime.datetime(day=1, month=6, year=2010), begin) - self.assertEqual(datetime.datetime(day=1, month=6, year=2011), end) - - def test_invalid_unit(self): - self.assertRaises(ValueError, - utils.last_completed_audit_period, - unit='invalid_unit') - - @mock.patch('cinder.utils.CONF') - def test_uses_conf_unit(self, mock_conf): - mock_conf.volume_usage_audit_period = 'hour' - begin1, end1 = utils.last_completed_audit_period() - self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds()) - - mock_conf.volume_usage_audit_period = 'day' - begin2, end2 = utils.last_completed_audit_period() - - self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds()) - - -class BrickUtils(test.TestCase): - """Unit test to test the brick utility wrapper functions.""" - - @mock.patch('cinder.utils.CONF') - @mock.patch('os_brick.initiator.connector.get_connector_properties') - @mock.patch('cinder.utils.get_root_helper') - def test_brick_get_connector_properties(self, mock_helper, mock_get, - mock_conf): - mock_conf.my_ip = '1.2.3.4' - output = utils.brick_get_connector_properties() - mock_helper.assert_called_once_with() - mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4', - False, False) - self.assertEqual(mock_get.return_value, output) - - @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory') - @mock.patch('cinder.utils.get_root_helper') - def test_brick_get_connector(self, mock_helper, mock_factory): - output = utils.brick_get_connector('protocol') - mock_helper.assert_called_once_with() - self.assertEqual(mock_factory.return_value, output) - mock_factory.assert_called_once_with( - 'protocol', mock_helper.return_value, driver=None, - use_multipath=False, device_scan_attempts=3) - - @mock.patch('os_brick.encryptors.get_volume_encryptor') - @mock.patch('cinder.utils.get_root_helper') - def test_brick_attach_volume_encryptor(self, mock_helper, - mock_get_encryptor): - attach_info = {'device': {'path': 'dev/sda'}, - 'conn': {'driver_volume_type': 'iscsi', - 'data': {}, }} - encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} - ctxt = mock.Mock(name='context') - mock_encryptor = mock.Mock() - mock_get_encryptor.return_value = mock_encryptor - utils.brick_attach_volume_encryptor(ctxt, attach_info, encryption) - - connection_info = attach_info['conn'] - connection_info['data']['device_path'] = attach_info['device']['path'] - mock_helper.assert_called_once_with() - mock_get_encryptor.assert_called_once_with( - root_helper=mock_helper.return_value, - connection_info=connection_info, - keymgr=mock.ANY, - **encryption) - mock_encryptor.attach_volume.assert_called_once_with( - ctxt, **encryption) - - @mock.patch('os_brick.encryptors.get_volume_encryptor') - @mock.patch('cinder.utils.get_root_helper') - def test_brick_detach_volume_encryptor(self, - mock_helper, mock_get_encryptor): - attach_info = {'device': {'path': 'dev/sda'}, - 'conn': {'driver_volume_type': 'iscsi', - 'data': {}, }} - encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} - mock_encryptor = mock.Mock() - mock_get_encryptor.return_value = mock_encryptor - utils.brick_detach_volume_encryptor(attach_info, encryption) - - mock_helper.assert_called_once_with() - connection_info = attach_info['conn'] - connection_info['data']['device_path'] = attach_info['device']['path'] - mock_get_encryptor.assert_called_once_with( - root_helper=mock_helper.return_value, - connection_info=connection_info, - keymgr=mock.ANY, - **encryption) - mock_encryptor.detach_volume.assert_called_once_with(**encryption) - - -class StringLengthTestCase(test.TestCase): - def test_check_string_length(self): - self.assertIsNone(utils.check_string_length( - 'test', 'name', max_length=255)) - self.assertRaises(exception.InvalidInput, - utils.check_string_length, - 11, 'name', max_length=255) - self.assertRaises(exception.InvalidInput, - utils.check_string_length, - '', 'name', min_length=1) - self.assertRaises(exception.InvalidInput, - utils.check_string_length, - 'a' * 256, 'name', max_length=255) - self.assertRaises(exception.InvalidInput, - utils.check_string_length, - dict(), 'name', max_length=255) - - -class AddVisibleAdminMetadataTestCase(test.TestCase): - def test_add_visible_admin_metadata_visible_key_only(self): - admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, - {"key": "readonly", "value": "visible"}, - {"key": "attached_mode", "value": "visible"}] - metadata = [{"key": "key", "value": "value"}, - {"key": "readonly", "value": "existing"}] - volume = {'volume_admin_metadata': admin_metadata, - 'volume_metadata': metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual([{"key": "key", "value": "value"}, - {"key": "readonly", "value": "visible"}, - {"key": "attached_mode", "value": "visible"}], - volume['volume_metadata']) - - admin_metadata = {"invisible_key": "invisible_value", - "readonly": "visible", - "attached_mode": "visible"} - metadata = {"key": "value", "readonly": "existing"} - volume = {'admin_metadata': admin_metadata, - 'metadata': metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual({'key': 'value', - 'attached_mode': 'visible', - 'readonly': 'visible'}, - volume['metadata']) - - def test_add_visible_admin_metadata_no_visible_keys(self): - admin_metadata = [ - {"key": "invisible_key1", "value": "invisible_value1"}, - {"key": "invisible_key2", "value": "invisible_value2"}, - {"key": "invisible_key3", "value": "invisible_value3"}] - metadata = [{"key": "key", "value": "value"}] - volume = {'volume_admin_metadata': admin_metadata, - 'volume_metadata': metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual([{"key": "key", "value": "value"}], - volume['volume_metadata']) - - admin_metadata = {"invisible_key1": "invisible_value1", - "invisible_key2": "invisible_value2", - "invisible_key3": "invisible_value3"} - metadata = {"key": "value"} - volume = {'admin_metadata': admin_metadata, - 'metadata': metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual({'key': 'value'}, volume['metadata']) - - def test_add_visible_admin_metadata_no_existing_metadata(self): - admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, - {"key": "readonly", "value": "visible"}, - {"key": "attached_mode", "value": "visible"}] - volume = {'volume_admin_metadata': admin_metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, - volume['metadata']) - - admin_metadata = {"invisible_key": "invisible_value", - "readonly": "visible", - "attached_mode": "visible"} - volume = {'admin_metadata': admin_metadata} - utils.add_visible_admin_metadata(volume) - self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, - volume['metadata']) - - -class InvalidFilterTestCase(test.TestCase): - def test_admin_allows_all_options(self): - ctxt = mock.Mock(name='context') - ctxt.is_admin = True - - filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} - fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} - allowed_search_options = ('allowed1', 'allowed2') - allowed_orig = ('allowed1', 'allowed2') - - utils.remove_invalid_filter_options(ctxt, filters, - allowed_search_options) - - self.assertEqual(allowed_orig, allowed_search_options) - self.assertEqual(fltrs_orig, filters) - - def test_admin_allows_some_options(self): - ctxt = mock.Mock(name='context') - ctxt.is_admin = False - - filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} - fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} - allowed_search_options = ('allowed1', 'allowed2') - allowed_orig = ('allowed1', 'allowed2') - - utils.remove_invalid_filter_options(ctxt, filters, - allowed_search_options) - - self.assertEqual(allowed_orig, allowed_search_options) - self.assertNotEqual(fltrs_orig, filters) - self.assertEqual(allowed_search_options, tuple(sorted(filters.keys()))) - - -class IsBlkDeviceTestCase(test.TestCase): - @mock.patch('stat.S_ISBLK', return_value=True) - @mock.patch('os.stat') - def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK): - dev = 'some_device' - self.assertTrue(utils.is_blk_device(dev)) - - @mock.patch('stat.S_ISBLK', return_value=False) - @mock.patch('os.stat') - def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK): - dev = 'not_some_device' - self.assertFalse(utils.is_blk_device(dev)) - - @mock.patch('stat.S_ISBLK', side_effect=Exception) - @mock.patch('os.stat') - def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK): - dev = 'device_exception' - self.assertFalse(utils.is_blk_device(dev)) - - -class WrongException(Exception): - pass - - -class TestRetryDecorator(test.TestCase): - def test_no_retry_required(self): - self.counter = 0 - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException, - interval=2, - retries=3, - backoff_rate=2) - def succeeds(): - self.counter += 1 - return 'success' - - ret = succeeds() - self.assertFalse(mock_sleep.called) - self.assertEqual('success', ret) - self.assertEqual(1, self.counter) - - def test_no_retry_required_random(self): - self.counter = 0 - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException, - interval=2, - retries=3, - backoff_rate=2, - wait_random=True) - def succeeds(): - self.counter += 1 - return 'success' - - ret = succeeds() - self.assertFalse(mock_sleep.called) - self.assertEqual('success', ret) - self.assertEqual(1, self.counter) - - def test_retries_once(self): - self.counter = 0 - interval = 2 - backoff_rate = 2 - retries = 3 - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException, - interval, - retries, - backoff_rate) - def fails_once(): - self.counter += 1 - if self.counter < 2: - raise exception.VolumeBackendAPIException(data='fake') - else: - return 'success' - - ret = fails_once() - self.assertEqual('success', ret) - self.assertEqual(2, self.counter) - self.assertEqual(1, mock_sleep.call_count) - mock_sleep.assert_called_with(interval * backoff_rate) - - def test_retries_once_random(self): - self.counter = 0 - interval = 2 - backoff_rate = 2 - retries = 3 - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException, - interval, - retries, - backoff_rate, - wait_random=True) - def fails_once(): - self.counter += 1 - if self.counter < 2: - raise exception.VolumeBackendAPIException(data='fake') - else: - return 'success' - - ret = fails_once() - self.assertEqual('success', ret) - self.assertEqual(2, self.counter) - self.assertEqual(1, mock_sleep.call_count) - self.assertTrue(mock_sleep.called) - - def test_limit_is_reached(self): - self.counter = 0 - retries = 3 - interval = 2 - backoff_rate = 4 - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException, - interval, - retries, - backoff_rate) - def always_fails(): - self.counter += 1 - raise exception.VolumeBackendAPIException(data='fake') - - self.assertRaises(exception.VolumeBackendAPIException, - always_fails) - self.assertEqual(retries, self.counter) - - expected_sleep_arg = [] - - for i in range(retries): - if i > 0: - interval *= backoff_rate - expected_sleep_arg.append(float(interval)) - - mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg)) - - def test_wrong_exception_no_retry(self): - - with mock.patch.object(time, 'sleep') as mock_sleep: - @utils.retry(exception.VolumeBackendAPIException) - def raise_unexpected_error(): - raise WrongException("wrong exception") - - self.assertRaises(WrongException, raise_unexpected_error) - self.assertFalse(mock_sleep.called) - - -@ddt.ddt -class LogTracingTestCase(test.TestCase): - - def test_utils_setup_tracing(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(None) - self.assertFalse(utils.TRACE_API) - self.assertFalse(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - utils.setup_tracing(['method']) - self.assertFalse(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - utils.setup_tracing(['method', 'api']) - self.assertTrue(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(0, utils.LOG.warning.call_count) - - def test_utils_setup_tracing_invalid_key(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(['fake']) - - self.assertFalse(utils.TRACE_API) - self.assertFalse(utils.TRACE_METHOD) - self.assertEqual(1, utils.LOG.warning.call_count) - - def test_utils_setup_tracing_valid_and_invalid_key(self): - self.mock_object(utils, 'LOG') - - utils.setup_tracing(['method', 'fake']) - - self.assertFalse(utils.TRACE_API) - self.assertTrue(utils.TRACE_METHOD) - self.assertEqual(1, utils.LOG.warning.call_count) - - def test_trace_no_tracing(self): - self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(None) - - result = _trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(0, utils.LOG.debug.call_count) - - def test_utils_trace_method(self): - self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method() - self.assertEqual('OK', result) - self.assertEqual(2, utils.LOG.debug.call_count) - - def test_utils_trace_api(self): - self.mock_object(utils, 'LOG') - - @utils.trace_api - def _trace_test_api(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['api']) - - result = _trace_test_api() - self.assertEqual('OK', result) - self.assertEqual(2, utils.LOG.debug.call_count) - - def test_utils_trace_method_default_logger(self): - mock_log = self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method_custom_logger(*args, **kwargs): - return 'OK' - utils.setup_tracing(['method']) - - result = _trace_test_method_custom_logger() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - - def test_utils_trace_method_inner_decorator(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - def _test_decorator(f): - def blah(*args, **kwargs): - return f(*args, **kwargs) - return blah - - @_test_decorator - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the correct function name was logged - for call in mock_log.debug.call_args_list: - self.assertIn('_trace_test_method', str(call)) - self.assertNotIn('blah', str(call)) - - def test_utils_trace_method_outer_decorator(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - def _test_decorator(f): - def blah(*args, **kwargs): - return f(*args, **kwargs) - return blah - - @utils.trace_method - @_test_decorator - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the incorrect function name was logged - for call in mock_log.debug.call_args_list: - self.assertNotIn('_trace_test_method', str(call)) - self.assertIn('blah', str(call)) - - def test_utils_trace_method_outer_decorator_with_functools(self): - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - self.mock_object(utils.logging, 'getLogger', mock_log) - mock_log = self.mock_object(utils, 'LOG') - - def _test_decorator(f): - @functools.wraps(f) - def wraps(*args, **kwargs): - return f(*args, **kwargs) - return wraps - - @utils.trace_method - @_test_decorator - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - # Ensure the incorrect function name was logged - for call in mock_log.debug.call_args_list: - self.assertIn('_trace_test_method', str(call)) - self.assertNotIn('wraps', str(call)) - - def test_utils_trace_method_with_exception(self): - self.LOG = self.mock_object(utils, 'LOG') - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - raise exception.APITimeout('test message') - - utils.setup_tracing(['method']) - - self.assertRaises(exception.APITimeout, _trace_test_method) - - exception_log = self.LOG.debug.call_args_list[1] - self.assertIn('exception', str(exception_log)) - self.assertIn('test message', str(exception_log)) - - def test_utils_trace_method_with_time(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - mock_time = mock.Mock(side_effect=[3.1, 6]) - self.mock_object(time, 'time', mock_time) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return 'OK' - - utils.setup_tracing(['method']) - - result = _trace_test_method(self) - - self.assertEqual('OK', result) - return_log = mock_log.debug.call_args_list[1] - self.assertIn('2900', str(return_log)) - - def test_utils_trace_wrapper_class(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - utils.setup_tracing(['method']) - - @six.add_metaclass(utils.TraceWrapperMetaclass) - class MyClass(object): - def trace_test_method(self): - return 'OK' - - test_class = MyClass() - result = test_class.trace_test_method() - - self.assertEqual('OK', result) - self.assertEqual(2, mock_log.debug.call_count) - - def test_utils_trace_method_with_password_dict(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return {'something': 'test', - 'password': 'Now you see me'} - - utils.setup_tracing(['method']) - result = _trace_test_method(self) - expected_unmasked_dict = {'something': 'test', - 'password': 'Now you see me'} - - self.assertEqual(expected_unmasked_dict, result) - self.assertEqual(2, mock_log.debug.call_count) - self.assertIn("'password': '***'", - str(mock_log.debug.call_args_list[1])) - - def test_utils_trace_method_with_password_str(self): - mock_logging = self.mock_object(utils, 'logging') - mock_log = mock.Mock() - mock_log.isEnabledFor = lambda x: True - mock_logging.getLogger = mock.Mock(return_value=mock_log) - - @utils.trace_method - def _trace_test_method(*args, **kwargs): - return "'adminPass': 'Now you see me'" - - utils.setup_tracing(['method']) - result = _trace_test_method(self) - expected_unmasked_str = "'adminPass': 'Now you see me'" - - self.assertEqual(expected_unmasked_str, result) - self.assertEqual(2, mock_log.debug.call_count) - self.assertIn("'adminPass': '***'", - str(mock_log.debug.call_args_list[1])) - - @ddt.data( - {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0, - 'thin_support': False, 'thick_support': True, - 'is_thin_lun': False, 'expected': 27.01}, - {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, - 'thin_support': True, 'thick_support': False, - 'is_thin_lun': True, 'expected': 37.02}, - {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, - 'thin_support': True, 'thick_support': True, - 'is_thin_lun': True, 'expected': 37.02}, - {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0, - 'thin_support': True, 'thick_support': True, - 'is_thin_lun': False, 'expected': 27.01}, - ) - @ddt.unpack - def test_utils_calculate_virtual_free_capacity_provision_type( - self, total, free, provisioned, max_ratio, thin_support, - thick_support, is_thin_lun, expected): - host_stat = {'total_capacity_gb': total, - 'free_capacity_gb': free, - 'provisioned_capacity_gb': provisioned, - 'max_over_subscription_ratio': max_ratio, - 'thin_provisioning_support': thin_support, - 'thick_provisioning_support': thick_support, - 'reserved_percentage': 5} - - free_capacity = utils.calculate_virtual_free_capacity( - host_stat['total_capacity_gb'], - host_stat['free_capacity_gb'], - host_stat['provisioned_capacity_gb'], - host_stat['thin_provisioning_support'], - host_stat['max_over_subscription_ratio'], - host_stat['reserved_percentage'], - is_thin_lun) - - self.assertEqual(expected, free_capacity) - - -class Comparable(utils.ComparableMixin): - def __init__(self, value): - self.value = value - - def _cmpkey(self): - return self.value - - -class TestComparableMixin(test.TestCase): - - def setUp(self): - super(TestComparableMixin, self).setUp() - self.one = Comparable(1) - self.two = Comparable(2) - - def test_lt(self): - self.assertTrue(self.one < self.two) - self.assertFalse(self.two < self.one) - self.assertFalse(self.one < self.one) - - def test_le(self): - self.assertTrue(self.one <= self.two) - self.assertFalse(self.two <= self.one) - self.assertTrue(self.one <= self.one) - - def test_eq(self): - self.assertFalse(self.one == self.two) - self.assertFalse(self.two == self.one) - self.assertTrue(self.one == self.one) - - def test_ge(self): - self.assertFalse(self.one >= self.two) - self.assertTrue(self.two >= self.one) - self.assertTrue(self.one >= self.one) - - def test_gt(self): - self.assertFalse(self.one > self.two) - self.assertTrue(self.two > self.one) - self.assertFalse(self.one > self.one) - - def test_ne(self): - self.assertTrue(self.one != self.two) - self.assertTrue(self.two != self.one) - self.assertFalse(self.one != self.one) - - def test_compare(self): - self.assertEqual(NotImplemented, - self.one._compare(1, self.one._cmpkey)) - - -@ddt.ddt -class TestValidateInteger(test.TestCase): - - @ddt.data( - (2 ** 31) + 1, # More than max value - -12, # Less than min value - 2.05, # Float value - "12.05", # Float value in string format - "should be int", # String - u"test" # String in unicode format - ) - def test_validate_integer_raise_assert(self, value): - self.assertRaises(webob.exc.HTTPBadRequest, - utils.validate_integer, - value, 'limit', min_value=-1, max_value=(2 ** 31)) - - @ddt.data( - "123", # integer in string format - 123, # integer - u"123" # integer in unicode format - ) - def test_validate_integer(self, value): - res = utils.validate_integer(value, 'limit', min_value=-1, - max_value=(2 ** 31)) - self.assertEqual(123, res) - - -@ddt.ddt -class TestNotificationShortCircuit(test.TestCase): - def test_do_nothing_getter(self): - """Test any attribute will always return the same instance (self).""" - donothing = utils.DoNothing() - self.assertIs(donothing, donothing.anyname) - - def test_do_nothing_caller(self): - """Test calling the object will always return the same instance.""" - donothing = utils.DoNothing() - self.assertIs(donothing, donothing()) - - def test_do_nothing_json_serializable(self): - """Test calling the object will always return the same instance.""" - donothing = utils.DoNothing() - self.assertEqual('""', json.dumps(donothing)) - - @utils.if_notifications_enabled - def _decorated_method(self): - return mock.sentinel.success - - def test_if_notification_enabled_when_enabled(self): - """Test method is called when notifications are enabled.""" - result = self._decorated_method() - self.assertEqual(mock.sentinel.success, result) - - @ddt.data([], ['noop'], ['noop', 'noop']) - def test_if_notification_enabled_when_disabled(self, driver): - """Test method is not called when notifications are disabled.""" - self.override_config('driver', driver, - group='oslo_messaging_notifications') - result = self._decorated_method() - self.assertEqual(utils.DO_NOTHING, result) - - -@ddt.ddt -class TestLogLevels(test.TestCase): - @ddt.data(None, '', 'wronglevel') - def test_get_log_method_invalid(self, level): - self.assertRaises(exception.InvalidInput, - utils.get_log_method, level) - - @ddt.data(('info', utils.logging.INFO), ('warning', utils.logging.WARNING), - ('INFO', utils.logging.INFO), ('wArNiNg', utils.logging.WARNING), - ('error', utils.logging.ERROR), ('debug', utils.logging.DEBUG)) - @ddt.unpack - def test_get_log_method(self, level, logger): - result = utils.get_log_method(level) - self.assertEqual(logger, result) - - def test_get_log_levels(self): - levels = utils.get_log_levels('cinder.api') - self.assertTrue(len(levels) > 1) - self.assertSetEqual({'DEBUG'}, set(levels.values())) - - @ddt.data(None, '', 'wronglevel') - def test_set_log_levels_invalid(self, level): - self.assertRaises(exception.InvalidInput, - utils.set_log_levels, '', level) - - def test_set_log_levels(self): - prefix = 'cinder.utils' - levels = utils.get_log_levels(prefix) - self.assertEqual('DEBUG', levels[prefix]) - - utils.set_log_levels(prefix, 'warning') - levels = utils.get_log_levels(prefix) - self.assertEqual('WARNING', levels[prefix]) - - utils.set_log_levels(prefix, 'debug') - levels = utils.get_log_levels(prefix) - self.assertEqual('DEBUG', levels[prefix]) - - -@ddt.ddt -class TestCheckMetadataProperties(test.TestCase): - @ddt.data( - {'a': {'foo': 'bar'}}, # value is a nested dict - {'a': 123}, # value is an integer - {'a': 123.4}, # value is a float - {'a': True}, # value is a bool - {'a': ('foo', 'bar')}, # value is a tuple - {'a': []}, # value is a list - {'a': None} # value is None - ) - def test_metadata_value_not_string_raise(self, meta): - self.assertRaises(exception.InvalidVolumeMetadata, - utils.check_metadata_properties, - meta) - - def test_metadata_value_not_dict_raise(self): - meta = 123 - self.assertRaises(exception.InvalidInput, - utils.check_metadata_properties, - meta) diff --git a/cinder/tests/unit/test_volume_cleanup.py b/cinder/tests/unit/test_volume_cleanup.py deleted file mode 100644 index 190e9ea15..000000000 --- a/cinder/tests/unit/test_volume_cleanup.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import cfg - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import service -from cinder.tests.unit.api import fakes -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base - - -CONF = cfg.CONF - - -class VolumeCleanupTestCase(base.BaseVolumeTestCase): - MOCK_WORKER = False - - def setUp(self): - super(VolumeCleanupTestCase, self).setUp() - self.service_id = 1 - self.mock_object(service.Service, 'service_id', self.service_id) - self.patch('cinder.volume.utils.clear_volume', autospec=True) - - def _assert_workers_are_removed(self): - workers = db.worker_get_all(self.context, read_deleted='yes') - self.assertListEqual([], workers) - - def test_init_host_clears_uploads_available_volume(self): - """init_host will clean an available volume stuck in uploading.""" - volume = tests_utils.create_volume(self.context, status='uploading', - size=0, host=CONF.host) - - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - - self.volume.init_host(service_id=service.Service.service_id) - volume.refresh() - self.assertEqual("available", volume.status) - self._assert_workers_are_removed() - - @mock.patch('cinder.manager.CleanableManager.init_host') - def test_init_host_clears_uploads_in_use_volume(self, init_host_mock): - """init_host will clean an in-use volume stuck in uploading.""" - volume = tests_utils.create_volume(self.context, status='uploading', - size=0, host=CONF.host) - - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - - fake_uuid = fakes.get_fake_uuid() - tests_utils.attach_volume(self.context, volume.id, fake_uuid, - 'fake_host', '/dev/vda') - self.volume.init_host(service_id=mock.sentinel.service_id) - init_host_mock.assert_called_once_with( - service_id=mock.sentinel.service_id, added_to_cluster=None) - volume.refresh() - self.assertEqual("in-use", volume.status) - self._assert_workers_are_removed() - - @mock.patch('cinder.image.image_utils.cleanup_temporary_file') - def test_init_host_clears_downloads(self, mock_cleanup_tmp_file): - """Test that init_host will unwedge a volume stuck in downloading.""" - volume = tests_utils.create_volume(self.context, status='downloading', - size=0, host=CONF.host) - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - mock_clear = self.mock_object(self.volume.driver, 'clear_download') - - self.volume.init_host(service_id=service.Service.service_id) - self.assertEqual(1, mock_clear.call_count) - self.assertEqual(volume.id, mock_clear.call_args[0][1].id) - volume.refresh() - self.assertEqual("error", volume['status']) - mock_cleanup_tmp_file.assert_called_once_with(CONF.host) - - self.volume.delete_volume(self.context, volume=volume) - self._assert_workers_are_removed() - - @mock.patch('cinder.image.image_utils.cleanup_temporary_file') - def test_init_host_resumes_deletes(self, mock_cleanup_tmp_file): - """init_host will resume deleting volume in deleting status.""" - volume = tests_utils.create_volume(self.context, status='deleting', - size=0, host=CONF.host) - - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - - self.volume.init_host(service_id=service.Service.service_id) - - self.assertRaises(exception.VolumeNotFound, db.volume_get, - context.get_admin_context(), volume.id) - mock_cleanup_tmp_file.assert_called_once_with(CONF.host) - self._assert_workers_are_removed() - - @mock.patch('cinder.image.image_utils.cleanup_temporary_file') - def test_create_volume_fails_with_creating_and_downloading_status( - self, mock_cleanup_tmp_file): - """Test init_host_with_service in case of volume. - - While the status of volume is 'creating' or 'downloading', - volume process down. - After process restarting this 'creating' status is changed to 'error'. - """ - for status in ('creating', 'downloading'): - volume = tests_utils.create_volume(self.context, status=status, - size=0, host=CONF.host) - - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - - self.volume.init_host(service_id=service.Service.service_id) - volume.refresh() - - self.assertEqual('error', volume['status']) - self.volume.delete_volume(self.context, volume) - self.assertTrue(mock_cleanup_tmp_file.called) - self._assert_workers_are_removed() - - def test_create_snapshot_fails_with_creating_status(self): - """Test init_host_with_service in case of snapshot. - - While the status of snapshot is 'creating', volume process - down. After process restarting this 'creating' status is - changed to 'error'. - """ - volume = tests_utils.create_volume(self.context, - **self.volume_params) - snapshot = tests_utils.create_snapshot( - self.context, - volume.id, - status=fields.SnapshotStatus.CREATING) - db.worker_create(self.context, resource_type='Snapshot', - resource_id=snapshot.id, status=snapshot.status, - service_id=self.service_id) - - self.volume.init_host(service_id=service.Service.service_id) - - snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot.id) - - self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) - self.assertEqual(service.Service.service_id, - self.volume.service_id) - self._assert_workers_are_removed() - - self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume) - - def test_init_host_clears_deleting_snapshots(self): - """Test that init_host will delete a snapshot stuck in deleting.""" - volume = tests_utils.create_volume(self.context, status='deleting', - size=1, host=CONF.host) - snapshot = tests_utils.create_snapshot(self.context, - volume.id, status='deleting') - - db.worker_create(self.context, resource_type='Volume', - resource_id=volume.id, status=volume.status, - service_id=self.service_id) - - self.volume.init_host(service_id=self.service_id) - self.assertRaises(exception.VolumeNotFound, volume.refresh) - self.assertRaises(exception.SnapshotNotFound, snapshot.refresh) diff --git a/cinder/tests/unit/test_volume_configuration.py b/cinder/tests/unit/test_volume_configuration.py deleted file mode 100644 index 2abb78b12..000000000 --- a/cinder/tests/unit/test_volume_configuration.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the configuration wrapper in volume drivers.""" - -from oslo_config import cfg - -from cinder import test -from cinder.volume import configuration - - -volume_opts = [ - cfg.StrOpt('str_opt', default='STR_OPT'), - cfg.BoolOpt('bool_opt', default=False) -] -more_volume_opts = [ - cfg.IntOpt('int_opt', default=1), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts) -CONF.register_opts(more_volume_opts) - - -class VolumeConfigurationTest(test.TestCase): - - def test_group_grafts_opts(self): - c = configuration.Configuration(volume_opts, config_group='foo') - self.assertEqual(c.str_opt, 'STR_OPT') - self.assertEqual(c.bool_opt, False) - self.assertEqual(c.str_opt, CONF.backend_defaults.str_opt) - self.assertEqual(c.bool_opt, CONF.backend_defaults.bool_opt) - self.assertIsNone(CONF.foo.str_opt) - self.assertIsNone(CONF.foo.bool_opt) - - def test_opts_no_group(self): - c = configuration.Configuration(volume_opts) - self.assertEqual(c.str_opt, CONF.str_opt) - self.assertEqual(c.bool_opt, CONF.bool_opt) - - def test_grafting_multiple_opts(self): - c = configuration.Configuration(volume_opts, config_group='foo') - c.append_config_values(more_volume_opts) - self.assertEqual(c.str_opt, 'STR_OPT') - self.assertEqual(c.bool_opt, False) - self.assertEqual(c.int_opt, 1) - - # We get the right values, but they are coming from the backend_default - # group of CONF no the 'foo' one. - self.assertEqual(c.str_opt, CONF.backend_defaults.str_opt) - self.assertEqual(c.bool_opt, CONF.backend_defaults.bool_opt) - self.assertEqual(c.int_opt, CONF.backend_defaults.int_opt) - self.assertIsNone(CONF.foo.str_opt) - self.assertIsNone(CONF.foo.bool_opt) - self.assertIsNone(CONF.foo.int_opt) - - def test_safe_get(self): - c = configuration.Configuration(volume_opts, config_group='foo') - self.assertIsNone(c.safe_get('none_opt')) - - def test_backend_specific_value(self): - c = configuration.Configuration(volume_opts, config_group='foo') - - # Set some new non-default value - CONF.set_override('str_opt', 'bar', group='backend_defaults') - actual_value = c.str_opt - self.assertEqual('bar', actual_value) - - CONF.set_override('str_opt', 'notbar', group='foo') - actual_value = c.str_opt - # Make sure that we pick up the backend value and not the shared group - # value... - self.assertEqual('notbar', actual_value) diff --git a/cinder/tests/unit/test_volume_glance_metadata.py b/cinder/tests/unit/test_volume_glance_metadata.py deleted file mode 100644 index ec2145a9f..000000000 --- a/cinder/tests/unit/test_volume_glance_metadata.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# Copyright 2011 University of Southern California -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for volume types extra specs code -""" - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake - - -class VolumeGlanceMetadataTestCase(test.TestCase): - - def setUp(self): - super(VolumeGlanceMetadataTestCase, self).setUp() - self.ctxt = context.get_admin_context() - objects.register_all() - - def test_vol_glance_metadata_bad_vol_id(self): - ctxt = context.get_admin_context() - self.assertRaises(exception.VolumeNotFound, - db.volume_glance_metadata_create, - ctxt, fake.VOLUME_ID, 'key1', 'value1') - self.assertRaises(exception.VolumeNotFound, - db.volume_glance_metadata_get, ctxt, fake.VOLUME_ID) - db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME2_ID) - - def test_vol_update_glance_metadata(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID}) - db.volume_create(ctxt, {'id': fake.VOLUME2_ID}) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', - 'value1') - db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key1', - 'value1') - db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key2', - 'value2') - db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key3', 123) - - expected_metadata_1 = {'volume_id': fake.VOLUME_ID, - 'key': 'key1', - 'value': 'value1'} - - metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME_ID) - self.assertEqual(1, len(metadata)) - for key, value in expected_metadata_1.items(): - self.assertEqual(value, metadata[0][key]) - - expected_metadata_2 = ({'volume_id': fake.VOLUME2_ID, - 'key': 'key1', - 'value': 'value1'}, - {'volume_id': fake.VOLUME2_ID, - 'key': 'key2', - 'value': 'value2'}, - {'volume_id': fake.VOLUME2_ID, - 'key': 'key3', - 'value': '123'}) - - metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME2_ID) - self.assertEqual(3, len(metadata)) - for expected, meta in zip(expected_metadata_2, metadata): - for key, value in expected.items(): - self.assertEqual(value, meta[key]) - - self.assertRaises(exception.GlanceMetadataExists, - db.volume_glance_metadata_create, - ctxt, fake.VOLUME_ID, 'key1', 'value1a') - - metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME_ID) - self.assertEqual(1, len(metadata)) - for key, value in expected_metadata_1.items(): - self.assertEqual(value, metadata[0][key]) - - def test_vols_get_glance_metadata(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID}) - db.volume_create(ctxt, {'id': fake.VOLUME2_ID}) - db.volume_create(ctxt, {'id': '3'}) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', - 'value1') - db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key2', - 'value2') - db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key22', - 'value22') - - metadata = db.volume_glance_metadata_get_all(ctxt) - self.assertEqual(3, len(metadata)) - self._assert_metadata_equals(fake.VOLUME_ID, 'key1', 'value1', - metadata[0]) - self._assert_metadata_equals(fake.VOLUME2_ID, 'key2', 'value2', - metadata[1]) - self._assert_metadata_equals(fake.VOLUME2_ID, 'key22', 'value22', - metadata[2]) - - def _assert_metadata_equals(self, volume_id, key, value, observed): - self.assertEqual(volume_id, observed.volume_id) - self.assertEqual(key, observed.key) - self.assertEqual(value, observed.value) - - def test_vol_delete_glance_metadata(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID}) - db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', - 'value1') - db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_glance_metadata_get, ctxt, fake.VOLUME_ID) - - def test_vol_glance_metadata_copy_to_snapshot(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID}) - snap = objects.Snapshot(ctxt, volume_id=fake.VOLUME_ID) - snap.create() - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', - 'value1') - db.volume_glance_metadata_copy_to_snapshot(ctxt, snap.id, - fake.VOLUME_ID) - - expected_meta = {'snapshot_id': snap.id, - 'key': 'key1', - 'value': 'value1'} - - for meta in db.volume_snapshot_glance_metadata_get(ctxt, snap.id): - for (key, value) in expected_meta.items(): - self.assertEqual(value, meta[key]) - snap.destroy() - - def test_vol_glance_metadata_copy_from_volume_to_volume(self): - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': fake.VOLUME_ID}) - db.volume_create(ctxt, {'id': fake.VOLUME2_ID, - 'source_volid': fake.VOLUME_ID}) - db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', - 'value1') - db.volume_glance_metadata_copy_from_volume_to_volume(ctxt, - fake.VOLUME_ID, - fake.VOLUME2_ID) - - expected_meta = {'key': 'key1', - 'value': 'value1'} - - for meta in db.volume_glance_metadata_get(ctxt, fake.VOLUME2_ID): - for (key, value) in expected_meta.items(): - self.assertEqual(value, meta[key]) - - def test_volume_glance_metadata_copy_to_volume(self): - vol1 = db.volume_create(self.ctxt, {}) - vol2 = db.volume_create(self.ctxt, {}) - db.volume_glance_metadata_create(self.ctxt, vol1['id'], 'm1', 'v1') - snapshot = objects.Snapshot(self.ctxt, volume_id=vol1['id']) - snapshot.create() - db.volume_glance_metadata_copy_to_snapshot(self.ctxt, snapshot.id, - vol1['id']) - db.volume_glance_metadata_copy_to_volume(self.ctxt, vol2['id'], - snapshot.id) - metadata = db.volume_glance_metadata_get(self.ctxt, vol2['id']) - metadata = {m['key']: m['value'] for m in metadata} - self.assertEqual({'m1': 'v1'}, metadata) - - def test_volume_snapshot_glance_metadata_get_nonexistent(self): - vol = db.volume_create(self.ctxt, {}) - snapshot = objects.Snapshot(self.ctxt, volume_id=vol['id']) - snapshot.create() - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_snapshot_glance_metadata_get, - self.ctxt, snapshot.id) - snapshot.destroy() diff --git a/cinder/tests/unit/test_volume_throttling.py b/cinder/tests/unit/test_volume_throttling.py deleted file mode 100644 index 82e264557..000000000 --- a/cinder/tests/unit/test_volume_throttling.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2015 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for volume copy throttling helpers.""" - -import mock - -from cinder import test -from cinder import utils -from cinder.volume import throttling - - -class ThrottleTestCase(test.TestCase): - - def test_NoThrottle(self): - with throttling.Throttle().subcommand('volume1', 'volume2') as cmd: - self.assertEqual([], cmd['prefix']) - - @mock.patch.object(utils, 'get_blkdev_major_minor') - def test_BlkioCgroup(self, mock_major_minor): - - def fake_get_blkdev_major_minor(path): - return {'src_volume1': "253:0", 'dst_volume1': "253:1", - 'src_volume2': "253:2", 'dst_volume2': "253:3"}[path] - - mock_major_minor.side_effect = fake_get_blkdev_major_minor - - self.exec_cnt = 0 - - def fake_execute(*cmd, **kwargs): - cmd_set = ['cgset', '-r', - 'blkio.throttle.%s_bps_device=%s %d', 'fake_group'] - set_order = [None, - ('read', '253:0', 1024), - ('write', '253:1', 1024), - # a nested job starts; bps limit are set to the half - ('read', '253:0', 512), - ('read', '253:2', 512), - ('write', '253:1', 512), - ('write', '253:3', 512), - # a nested job ends; bps limit is resumed - ('read', '253:0', 1024), - ('write', '253:1', 1024)] - - if set_order[self.exec_cnt] is None: - self.assertEqual(('cgcreate', '-g', 'blkio:fake_group'), cmd) - else: - cmd_set[2] %= set_order[self.exec_cnt] - self.assertEqual(tuple(cmd_set), cmd) - - self.exec_cnt += 1 - - with mock.patch.object(utils, 'execute', side_effect=fake_execute): - throttle = throttling.BlkioCgroup(1024, 'fake_group') - with throttle.subcommand('src_volume1', 'dst_volume1') as cmd: - self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], - cmd['prefix']) - - # a nested job - with throttle.subcommand('src_volume2', 'dst_volume2') as cmd: - self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], - cmd['prefix']) diff --git a/cinder/tests/unit/test_volume_transfer.py b/cinder/tests/unit/test_volume_transfer.py deleted file mode 100644 index c4c40a61b..000000000 --- a/cinder/tests/unit/test_volume_transfer.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit Tests for volume transfers.""" - - -import mock -from oslo_utils import timeutils - -from cinder import context -from cinder import exception -from cinder import objects -from cinder import quota -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils -from cinder.transfer import api as transfer_api - - -QUOTAS = quota.QUOTAS - - -class VolumeTransferTestCase(test.TestCase): - """Test cases for volume transfer code.""" - def setUp(self): - super(VolumeTransferTestCase, self).setUp() - self.ctxt = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID) - self.updated_at = timeutils.utcnow() - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_volume_create_delete(self, mock_notify): - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) - response = tx_api.create(self.ctxt, volume.id, 'Description') - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual('awaiting-transfer', volume['status'], - 'Unexpected state') - calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), - mock.call(self.ctxt, mock.ANY, "transfer.create.end")] - mock_notify.assert_has_calls(calls) - self.assertEqual(2, mock_notify.call_count) - - tx_api.delete(self.ctxt, response['id']) - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual('available', volume['status'], 'Unexpected state') - calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"), - mock.call(self.ctxt, mock.ANY, "transfer.delete.end")] - mock_notify.assert_has_calls(calls) - self.assertEqual(4, mock_notify.call_count) - - def test_transfer_invalid_volume(self): - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, status='in-use', - updated_at=self.updated_at) - self.assertRaises(exception.InvalidVolume, - tx_api.create, - self.ctxt, volume.id, 'Description') - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual('in-use', volume['status'], 'Unexpected state') - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept_invalid_authkey(self, mock_notify): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual('awaiting-transfer', volume['status'], - 'Unexpected state') - - self.assertRaises(exception.TransferNotFound, - tx_api.accept, - self.ctxt, '2', transfer['auth_key']) - - self.assertRaises(exception.InvalidAuthKey, - tx_api.accept, - self.ctxt, transfer['id'], 'wrong') - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept_invalid_volume(self, mock_notify): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual('awaiting-transfer', volume['status'], - 'Unexpected state') - - calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), - mock.call(self.ctxt, mock.ANY, "transfer.create.end")] - mock_notify.assert_has_calls(calls) - self.assertEqual(2, mock_notify.call_count) - - volume.status = 'wrong' - volume.save() - self.assertRaises(exception.InvalidVolume, - tx_api.accept, - self.ctxt, transfer['id'], transfer['auth_key']) - volume.status = 'awaiting-transfer' - volume.save() - - # Because the InvalidVolume exception is raised in tx_api, so there is - # only transfer.accept.start called and missing transfer.accept.end. - calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")] - mock_notify.assert_has_calls(calls) - self.assertEqual(3, mock_notify.call_count) - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept_volume_in_consistencygroup(self, mock_notify): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - consistencygroup = utils.create_consistencygroup(self.ctxt) - volume = utils.create_volume(self.ctxt, - updated_at=self.updated_at, - consistencygroup_id= - consistencygroup.id) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - - self.assertRaises(exception.InvalidVolume, - tx_api.accept, - self.ctxt, transfer['id'], transfer['auth_key']) - - @mock.patch.object(QUOTAS, "limit_check") - @mock.patch.object(QUOTAS, "reserve") - @mock.patch.object(QUOTAS, "add_volume_type_opts") - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept(self, mock_notify, mock_quota_voltype, - mock_quota_reserve, mock_quota_limit): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, - volume_type_id=fake.VOLUME_TYPE_ID, - updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - - self.ctxt.user_id = fake.USER2_ID - self.ctxt.project_id = fake.PROJECT2_ID - response = tx_api.accept(self.ctxt, - transfer['id'], - transfer['auth_key']) - volume = objects.Volume.get_by_id(self.ctxt, volume.id) - self.assertEqual(fake.PROJECT2_ID, volume.project_id) - self.assertEqual(fake.USER2_ID, volume.user_id) - - self.assertEqual(response['volume_id'], volume.id, - 'Unexpected volume id in response.') - self.assertEqual(response['id'], transfer['id'], - 'Unexpected transfer id in response.') - - calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), - mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] - mock_notify.assert_has_calls(calls) - # The notify_about_volume_usage is called twice at create(), - # and twice at accept(). - self.assertEqual(4, mock_notify.call_count) - - # Check QUOTAS reservation calls - # QUOTAS.add_volume_type_opts - reserve_opt = {'volumes': 1, 'gigabytes': 1} - release_opt = {'volumes': -1, 'gigabytes': -1} - calls = [mock.call(self.ctxt, reserve_opt, fake.VOLUME_TYPE_ID), - mock.call(self.ctxt, release_opt, fake.VOLUME_TYPE_ID)] - mock_quota_voltype.assert_has_calls(calls) - - # QUOTAS.reserve - calls = [mock.call(mock.ANY, **reserve_opt), - mock.call(mock.ANY, project_id=fake.PROJECT_ID, - **release_opt)] - mock_quota_reserve.assert_has_calls(calls) - - # QUOTAS.limit_check - values = {'per_volume_gigabytes': 1} - mock_quota_limit.assert_called_once_with(self.ctxt, - project_id=fake.PROJECT2_ID, - **values) - - @mock.patch.object(QUOTAS, "reserve") - @mock.patch.object(QUOTAS, "add_volume_type_opts") - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept_over_quota(self, mock_notify, mock_quota_voltype, - mock_quota_reserve): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, - volume_type_id=fake.VOLUME_TYPE_ID, - updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - fake_overs = ['volumes_lvmdriver-3'] - fake_quotas = {'gigabytes_lvmdriver-3': 1, - 'volumes_lvmdriver-3': 10} - fake_usages = {'gigabytes_lvmdriver-3': {'reserved': 0, 'in_use': 1}, - 'volumes_lvmdriver-3': {'reserved': 0, 'in_use': 1}} - - mock_quota_reserve.side_effect = exception.OverQuota( - overs=fake_overs, - quotas=fake_quotas, - usages=fake_usages) - - self.ctxt.user_id = fake.USER2_ID - self.ctxt.project_id = fake.PROJECT2_ID - self.assertRaises(exception.VolumeLimitExceeded, - tx_api.accept, - self.ctxt, - transfer['id'], - transfer['auth_key']) - # notification of transfer.accept is sent only after quota check - # passes - self.assertEqual(2, mock_notify.call_count) - - @mock.patch.object(QUOTAS, "limit_check") - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_transfer_accept_over_quota_check_limit(self, mock_notify, - mock_quota_limit): - svc = self.start_service('volume', host='test_host') - self.addCleanup(svc.stop) - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, - volume_type_id=fake.VOLUME_TYPE_ID, - updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume.id, 'Description') - fake_overs = ['per_volume_gigabytes'] - fake_quotas = {'per_volume_gigabytes': 1} - fake_usages = {} - - mock_quota_limit.side_effect = exception.OverQuota( - overs=fake_overs, - quotas=fake_quotas, - usages=fake_usages) - - self.ctxt.user_id = fake.USER2_ID - self.ctxt.project_id = fake.PROJECT2_ID - self.assertRaises(exception.VolumeSizeExceedsLimit, - tx_api.accept, - self.ctxt, - transfer['id'], - transfer['auth_key']) - # notification of transfer.accept is sent only after quota check - # passes - self.assertEqual(2, mock_notify.call_count) - - def test_transfer_get(self): - tx_api = transfer_api.API() - volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) - transfer = tx_api.create(self.ctxt, volume['id'], 'Description') - t = tx_api.get(self.ctxt, transfer['id']) - self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') - - ts = tx_api.get_all(self.ctxt) - self.assertEqual(1, len(ts), 'Unexpected number of transfers.') - - nctxt = context.RequestContext(user_id=fake.USER2_ID, - project_id=fake.PROJECT2_ID) - utils.create_volume(nctxt, updated_at=self.updated_at) - self.assertRaises(exception.TransferNotFound, - tx_api.get, - nctxt, - transfer['id']) - - ts = tx_api.get_all(nctxt) - self.assertEqual(0, len(ts), 'Unexpected transfers listed.') - - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_delete_transfer_with_deleted_volume(self, mock_notify): - # create a volume - volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) - # create a transfer - tx_api = transfer_api.API() - transfer = tx_api.create(self.ctxt, volume['id'], 'Description') - t = tx_api.get(self.ctxt, transfer['id']) - self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') - - calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), - mock.call(self.ctxt, mock.ANY, "transfer.create.end")] - mock_notify.assert_has_calls(calls) - self.assertEqual(2, mock_notify.call_count) - # force delete volume - volume.destroy() - # Make sure transfer has been deleted. - self.assertRaises(exception.TransferNotFound, - tx_api.get, - self.ctxt, - transfer['id']) diff --git a/cinder/tests/unit/test_volume_types.py b/cinder/tests/unit/test_volume_types.py deleted file mode 100644 index cd4572583..000000000 --- a/cinder/tests/unit/test_volume_types.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit Tests for volume types code.""" - - -import datetime -import mock -import time - -from oslo_config import cfg - -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import api as db_api -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import test -from cinder.tests.unit import conf_fixture -from cinder.tests.unit import fake_constants as fake -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -class VolumeTypeTestCase(test.TestCase): - """Test cases for volume type code.""" - def setUp(self): - super(VolumeTypeTestCase, self).setUp() - - self.ctxt = context.get_admin_context() - self.vol_type1_name = str(int(time.time())) - self.vol_type1_specs = dict(type="physical drive", - drive_type="SAS", - size="300", - rpm="7200", - visible="True") - self.vol_type1_description = self.vol_type1_name + '_desc' - - def test_volume_type_create_then_destroy(self): - """Ensure volume types can be created and deleted.""" - project_id = fake.PROJECT_ID - prev_all_vtypes = volume_types.get_all_types(self.ctxt) - - # create - type_ref = volume_types.create(self.ctxt, - self.vol_type1_name, - self.vol_type1_specs, - description=self.vol_type1_description, - projects=[project_id], is_public=False) - new = volume_types.get_volume_type_by_name(self.ctxt, - self.vol_type1_name) - - self.assertEqual(self.vol_type1_description, new['description']) - - for k, v in self.vol_type1_specs.items(): - self.assertEqual(v, new['extra_specs'][k], - 'one of fields does not match') - - new_all_vtypes = volume_types.get_all_types(self.ctxt) - self.assertEqual(len(prev_all_vtypes) + 1, - len(new_all_vtypes), - 'drive type was not created') - # Assert that volume type is associated to a project - vol_type_access = db.volume_type_access_get_all(self.ctxt, - type_ref['id']) - self.assertIn(project_id, [a.project_id for a in vol_type_access]) - - # update - new_type_name = self.vol_type1_name + '_updated' - new_type_desc = self.vol_type1_description + '_updated' - volume_types.update(self.ctxt, type_ref.id, new_type_name, - new_type_desc) - type_ref_updated = volume_types.get_volume_type(self.ctxt, type_ref.id) - self.assertEqual(new_type_name, type_ref_updated['name']) - self.assertEqual(new_type_desc, type_ref_updated['description']) - - # destroy - volume_types.destroy(self.ctxt, type_ref['id']) - new_all_vtypes = volume_types.get_all_types(self.ctxt) - self.assertEqual(prev_all_vtypes, - new_all_vtypes, - 'drive type was not deleted') - # Assert that associated volume type access is deleted successfully - # on destroying the volume type - vol_type_access = db_api._volume_type_access_query( - self.ctxt).filter_by(volume_type_id=type_ref['id']).all() - self.assertFalse(vol_type_access) - - @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' - 'update_quota_resource') - def test_update_volume_type_name(self, mock_update_quota): - type_ref = volume_types.create(self.ctxt, - self.vol_type1_name, - self.vol_type1_specs, - description=self.vol_type1_description) - new_type_name = self.vol_type1_name + '_updated' - volume_types.update(self.ctxt, - type_ref.id, - new_type_name, - None) - mock_update_quota.assert_called_once_with(self.ctxt, - self.vol_type1_name, - new_type_name) - volume_types.destroy(self.ctxt, type_ref.id) - - def test_volume_type_create_then_destroy_with_non_admin(self): - """Ensure volume types can be created and deleted by non-admin user. - - If a non-admn user is authorized at API, volume type operations - should be permitted. - """ - prev_all_vtypes = volume_types.get_all_types(self.ctxt) - self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) - - # create - type_ref = volume_types.create(self.ctxt, - self.vol_type1_name, - self.vol_type1_specs, - description=self.vol_type1_description) - new = volume_types.get_volume_type_by_name(self.ctxt, - self.vol_type1_name) - self.assertEqual(self.vol_type1_description, new['description']) - new_all_vtypes = volume_types.get_all_types(self.ctxt) - self.assertEqual(len(prev_all_vtypes) + 1, - len(new_all_vtypes), - 'drive type was not created') - - # update - new_type_name = self.vol_type1_name + '_updated' - new_type_desc = self.vol_type1_description + '_updated' - volume_types.update(self.ctxt, type_ref.id, new_type_name, - new_type_desc) - type_ref_updated = volume_types.get_volume_type(self.ctxt, type_ref.id) - self.assertEqual(new_type_name, type_ref_updated['name']) - self.assertEqual(new_type_desc, type_ref_updated['description']) - - # destroy - volume_types.destroy(self.ctxt, type_ref['id']) - new_all_vtypes = volume_types.get_all_types(self.ctxt) - self.assertEqual(prev_all_vtypes, - new_all_vtypes, - 'drive type was not deleted') - - def test_create_volume_type_with_invalid_params(self): - """Ensure exception will be returned.""" - vol_type_invalid_specs = "invalid_extra_specs" - - self.assertRaises(exception.VolumeTypeCreateFailed, - volume_types.create, self.ctxt, - self.vol_type1_name, - vol_type_invalid_specs) - - def test_get_all_volume_types(self): - """Ensures that all volume types can be retrieved.""" - session = db_api.get_session() - total_volume_types = session.query(models.VolumeTypes).count() - vol_types = volume_types.get_all_types(self.ctxt) - self.assertEqual(total_volume_types, len(vol_types)) - - def test_get_default_volume_type(self): - """Ensures default volume type can be retrieved.""" - volume_types.create(self.ctxt, conf_fixture.def_vol_type, {}) - default_vol_type = volume_types.get_default_volume_type() - self.assertEqual(conf_fixture.def_vol_type, - default_vol_type.get('name')) - - def test_default_volume_type_missing_in_db(self): - """Test default volume type is missing in database. - - Ensures proper exception raised if default volume type - is not in database. - """ - default_vol_type = volume_types.get_default_volume_type() - self.assertEqual({}, default_vol_type) - - def test_get_default_volume_type_under_non_default(self): - cfg.CONF.set_default('default_volume_type', None) - - self.assertEqual({}, volume_types.get_default_volume_type()) - - def test_non_existent_vol_type_shouldnt_delete(self): - """Ensures that volume type creation fails with invalid args.""" - self.assertRaises(exception.VolumeTypeNotFound, - volume_types.destroy, self.ctxt, "sfsfsdfdfs") - - def test_volume_type_with_volumes_shouldnt_delete(self): - """Ensures volume type deletion with associated volumes fail.""" - type_ref = volume_types.create(self.ctxt, self.vol_type1_name) - db.volume_create(self.ctxt, - {'id': '1', - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'available', - 'volume_type_id': type_ref['id']}) - self.assertRaises(exception.VolumeTypeInUse, - volume_types.destroy, self.ctxt, type_ref['id']) - - def test_repeated_vol_types_shouldnt_raise(self): - """Ensures that volume duplicates don't raise.""" - new_name = self.vol_type1_name + "dup" - type_ref = volume_types.create(self.ctxt, new_name) - volume_types.destroy(self.ctxt, type_ref['id']) - type_ref = volume_types.create(self.ctxt, new_name) - - def test_invalid_volume_types_params(self): - """Ensures that volume type creation fails with invalid args.""" - self.assertRaises(exception.InvalidVolumeType, - volume_types.destroy, self.ctxt, None) - self.assertRaises(exception.InvalidVolumeType, - volume_types.get_volume_type, self.ctxt, None) - self.assertRaises(exception.InvalidVolumeType, - volume_types.get_volume_type_by_name, - self.ctxt, None) - - def test_volume_type_get_by_id_and_name(self): - """Ensure volume types get returns same entry.""" - volume_types.create(self.ctxt, - self.vol_type1_name, - self.vol_type1_specs) - new = volume_types.get_volume_type_by_name(self.ctxt, - self.vol_type1_name) - - new2 = volume_types.get_volume_type(self.ctxt, new['id']) - self.assertEqual(new, new2) - - def test_volume_type_search_by_extra_spec(self): - """Ensure volume types get by extra spec returns correct type.""" - volume_types.create(self.ctxt, "type1", {"key1": "val1", - "key2": "val2"}) - volume_types.create(self.ctxt, "type2", {"key2": "val2", - "key3": "val3"}) - volume_types.create(self.ctxt, "type3", {"key3": "another_value", - "key4": "val4"}) - - vol_types = volume_types.get_all_types( - self.ctxt, - filters={'extra_specs': {"key1": "val1"}}) - self.assertEqual(1, len(vol_types)) - self.assertIn("type1", vol_types.keys()) - self.assertEqual({"key1": "val1", "key2": "val2"}, - vol_types['type1']['extra_specs']) - - vol_types = volume_types.get_all_types( - self.ctxt, - filters={'extra_specs': {"key2": "val2"}}) - self.assertEqual(2, len(vol_types)) - self.assertIn("type1", vol_types.keys()) - self.assertIn("type2", vol_types.keys()) - - vol_types = volume_types.get_all_types( - self.ctxt, - filters={'extra_specs': {"key3": "val3"}}) - self.assertEqual(1, len(vol_types)) - self.assertIn("type2", vol_types.keys()) - - def test_volume_type_search_by_extra_spec_multiple(self): - """Ensure volume types get by extra spec returns correct type.""" - volume_types.create(self.ctxt, "type1", {"key1": "val1", - "key2": "val2", - "key3": "val3"}) - volume_types.create(self.ctxt, "type2", {"key2": "val2", - "key3": "val3"}) - volume_types.create(self.ctxt, "type3", {"key1": "val1", - "key3": "val3", - "key4": "val4"}) - - vol_types = volume_types.get_all_types( - self.ctxt, - filters={'extra_specs': {"key1": "val1", "key3": "val3"}}) - self.assertEqual(2, len(vol_types)) - self.assertIn("type1", vol_types.keys()) - self.assertIn("type3", vol_types.keys()) - self.assertEqual({"key1": "val1", "key2": "val2", "key3": "val3"}, - vol_types['type1']['extra_specs']) - self.assertEqual({"key1": "val1", "key3": "val3", "key4": "val4"}, - vol_types['type3']['extra_specs']) - - def test_is_encrypted(self): - volume_type = volume_types.create(self.ctxt, "type1") - volume_type_id = volume_type.get('id') - self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id)) - - encryption = { - 'control_location': 'front-end', - 'provider': 'fake_provider', - } - db_api.volume_type_encryption_create(self.ctxt, volume_type_id, - encryption) - self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) - - def test_add_access(self): - project_id = fake.PROJECT_ID - vtype = volume_types.create(self.ctxt, 'type1', is_public=False) - vtype_id = vtype.get('id') - - volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) - vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) - self.assertIn(project_id, [a.project_id for a in vtype_access]) - - def test_remove_access(self): - project_id = fake.PROJECT_ID - vtype = volume_types.create(self.ctxt, 'type1', projects=[project_id], - is_public=False) - vtype_id = vtype.get('id') - - volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) - vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) - self.assertNotIn(project_id, vtype_access) - - def test_add_access_with_non_admin(self): - self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) - project_id = fake.PROJECT_ID - vtype = volume_types.create(self.ctxt, 'type1', is_public=False) - vtype_id = vtype.get('id') - - volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) - vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), - vtype_id) - self.assertIn(project_id, [a.project_id for a in vtype_access]) - - def test_remove_access_with_non_admin(self): - self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) - project_id = fake.PROJECT_ID - vtype = volume_types.create(self.ctxt, 'type1', projects=[project_id], - is_public=False) - vtype_id = vtype.get('id') - - volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) - vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), - vtype_id) - self.assertNotIn(project_id, vtype_access) - - def test_get_volume_type_qos_specs(self): - qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1', - 'k2': 'v2', - 'k3': 'v3'}) - type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2", - "key3": "val3"}) - res = volume_types.get_volume_type_qos_specs(type_ref['id']) - self.assertIsNone(res['qos_specs']) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - - expected = {'qos_specs': {'id': qos_ref['id'], - 'name': 'qos-specs-1', - 'consumer': 'back-end', - 'specs': { - 'k1': 'v1', - 'k2': 'v2', - 'k3': 'v3'}}} - res = volume_types.get_volume_type_qos_specs(type_ref['id']) - self.assertDictEqual(expected, res) - - def test_volume_types_diff(self): - # type_ref 1 and 2 have the same extra_specs, while 3 has different - keyvals1 = {"key1": "val1", "key2": "val2"} - keyvals2 = {"key1": "val0", "key2": "val2"} - type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1) - type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1) - type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2) - - # Check equality with only extra_specs - diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], - type_ref2['id']) - self.assertTrue(same) - self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) - diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], - type_ref3['id']) - self.assertFalse(same) - self.assertEqual(('val1', 'val0'), diff['extra_specs']['key1']) - - # qos_ref 1 and 2 have the same specs, while 3 has different - qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'} - qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'} - qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1) - qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1) - qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2) - - # Check equality with qos specs too - qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'], - type_ref1['id']) - qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], - type_ref2['id']) - diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], - type_ref2['id']) - self.assertTrue(same) - self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) - self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) - qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'], - type_ref2['id']) - qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'], - type_ref2['id']) - diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], - type_ref2['id']) - self.assertFalse(same) - self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) - self.assertEqual(('v1', 'v0'), diff['qos_specs']['k1']) - qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'], - type_ref2['id']) - qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], - type_ref2['id']) - - # And add encryption for good measure - enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1', - 'control_location': 'front-end', - 'encryption_id': 'uuid1'} - enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1', - 'control_location': 'front-end', - 'encryption_id': 'uuid2'} - db.volume_type_encryption_create(self.ctxt, type_ref1['id'], - enc_keyvals1) - db.volume_type_encryption_create(self.ctxt, type_ref2['id'], - enc_keyvals2) - diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], - type_ref2['id']) - self.assertFalse(same) - self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) - self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) - self.assertEqual((256, 128), diff['encryption']['key_size']) - - # Check diff equals type specs when one type is None - diff, same = volume_types.volume_types_diff(self.ctxt, None, - type_ref1['id']) - self.assertFalse(same) - self.assertEqual({'key1': (None, 'val1'), 'key2': (None, 'val2')}, - diff['extra_specs']) - self.assertEqual({'consumer': (None, 'back-end'), - 'k1': (None, 'v1'), - 'k2': (None, 'v2'), - 'k3': (None, 'v3')}, diff['qos_specs']) - self.assertEqual({'cipher': (None, 'c1'), - 'control_location': (None, 'front-end'), - 'deleted': (None, False), - 'key_size': (None, 256), - 'provider': (None, 'p1'), - 'encryption_id': (None, 'uuid1')}, - diff['encryption']) - - def test_encryption_create(self): - volume_type = volume_types.create(self.ctxt, "type1") - volume_type_id = volume_type.get('id') - encryption = { - 'control_location': 'front-end', - 'provider': 'fake_provider', - } - db_api.volume_type_encryption_create(self.ctxt, volume_type_id, - encryption) - self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) - - def test_get_volume_type_encryption(self): - volume_type = volume_types.create(self.ctxt, "type1") - volume_type_id = volume_type.get('id') - encryption = { - 'control_location': 'front-end', - 'provider': 'fake_provider', - } - db.volume_type_encryption_create(self.ctxt, volume_type_id, - encryption) - - ret = volume_types.get_volume_type_encryption(self.ctxt, - volume_type_id) - self.assertIsNotNone(ret) - - def test_get_volume_type_encryption_without_volume_type_id(self): - ret = volume_types.get_volume_type_encryption(self.ctxt, None) - self.assertIsNone(ret) - - def test_check_public_volume_type_failed(self): - project_id = fake.PROJECT_ID - volume_type = volume_types.create(self.ctxt, "type1") - volume_type_id = volume_type.get('id') - self.assertRaises(exception.InvalidVolumeType, - volume_types.add_volume_type_access, - self.ctxt, volume_type_id, project_id) - self.assertRaises(exception.InvalidVolumeType, - volume_types.remove_volume_type_access, - self.ctxt, volume_type_id, project_id) - - def test_check_private_volume_type(self): - volume_type = volume_types.create(self.ctxt, "type1", is_public=False) - volume_type_id = volume_type.get('id') - self.assertFalse(volume_types.is_public_volume_type(self.ctxt, - volume_type_id)) - - def test_ensure__extra_specs_for_non_admin(self): - # non-admin users get extra-specs back in type-get/list etc at DB layer - ctxt = context.RequestContext('average-joe', - 'd802f078-0af1-4e6b-8c02-7fac8d4339aa', - auth_token='token', - is_admin=False) - volume_types.create(self.ctxt, "type-test", is_public=False) - vtype = volume_types.get_volume_type_by_name(ctxt, 'type-test') - self.assertIsNotNone(vtype.get('extra_specs', None)) - - def test_ensure_extra_specs_for_admin(self): - # admin users should get extra-specs back in type-get/list etc - volume_types.create(self.ctxt, "type-test", is_public=False) - vtype = volume_types.get_volume_type_by_name(self.ctxt, 'type-test') - self.assertIsNotNone(vtype.get('extra_specs', None)) - - @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') - def _exec_volume_types_encryption_changed(self, enc1, enc2, - expected_result, - mock_get_encryption): - def _get_encryption(ctxt, type_id): - if enc1 and enc1['volume_type_id'] == type_id: - return enc1 - if enc2 and enc2['volume_type_id'] == type_id: - return enc2 - return None - - mock_get_encryption.side_effect = _get_encryption - actual_result = volume_types.volume_types_encryption_changed( - self.ctxt, fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID) - self.assertEqual(expected_result, actual_result) - - def test_volume_types_encryption_changed(self): - enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'cipher': 'fake', - 'created_at': 'time1', } - enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, - 'cipher': 'fake', - 'created_at': 'time2', } - self._exec_volume_types_encryption_changed(enc1, enc2, False) - - def test_volume_types_encryption_changed2(self): - enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'cipher': 'fake1', - 'created_at': 'time1', } - enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, - 'cipher': 'fake2', - 'created_at': 'time1', } - self._exec_volume_types_encryption_changed(enc1, enc2, True) - - def test_volume_types_encryption_changed3(self): - self._exec_volume_types_encryption_changed(None, None, False) - - def test_volume_types_encryption_changed4(self): - enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'cipher': 'fake1', - 'created_at': 'time1', } - self._exec_volume_types_encryption_changed(enc1, None, True) - - @mock.patch('cinder.volume.volume_types.CONF') - @mock.patch('cinder.volume.volume_types.rpc') - def test_notify_about_volume_type_access_usage(self, mock_rpc, - mock_conf): - mock_conf.host = 'host1' - project_id = fake.PROJECT_ID - volume_type_id = fake.VOLUME_TYPE_ID - - output = volume_types.notify_about_volume_type_access_usage( - mock.sentinel.context, - volume_type_id, - project_id, - 'test_suffix') - - self.assertIsNone(output) - mock_rpc.get_notifier.assert_called_once_with('volume_type_project', - 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'volume_type_project.test_suffix', - {'volume_type_id': volume_type_id, - 'project_id': project_id}) diff --git a/cinder/tests/unit/test_volume_types_extra_specs.py b/cinder/tests/unit/test_volume_types_extra_specs.py deleted file mode 100644 index 07ae7258a..000000000 --- a/cinder/tests/unit/test_volume_types_extra_specs.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# Copyright 2011 University of Southern California -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for volume types extra specs code -""" - -from cinder import context -from cinder import db -from cinder import test - - -class VolumeTypeExtraSpecsTestCase(test.TestCase): - - def setUp(self): - super(VolumeTypeExtraSpecsTestCase, self).setUp() - self.context = context.get_admin_context() - self.vol_type1 = dict(name="TEST: Regular volume test") - self.vol_type1_specs = dict(vol_extra1="value1", - vol_extra2="value2", - vol_extra3=3) - self.vol_type1['extra_specs'] = self.vol_type1_specs - ref = db.volume_type_create(self.context, self.vol_type1) - self.addCleanup(db.volume_type_destroy, context.get_admin_context(), - self.vol_type1['id']) - self.volume_type1_id = ref.id - for k, v in self.vol_type1_specs.items(): - self.vol_type1_specs[k] = str(v) - - self.vol_type2_noextra = dict(name="TEST: Volume type without extra") - ref = db.volume_type_create(self.context, self.vol_type2_noextra) - self.addCleanup(db.volume_type_destroy, context.get_admin_context(), - self.vol_type2_noextra['id']) - self.vol_type2_id = ref.id - - def test_volume_type_specs_get(self): - expected_specs = self.vol_type1_specs.copy() - actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEqual(expected_specs, actual_specs) - - def test_volume_type_extra_specs_delete(self): - expected_specs = self.vol_type1_specs.copy() - del expected_specs['vol_extra2'] - db.volume_type_extra_specs_delete(context.get_admin_context(), - self.volume_type1_id, - 'vol_extra2') - actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEqual(expected_specs, actual_specs) - - def test_volume_type_extra_specs_update(self): - expected_specs = self.vol_type1_specs.copy() - expected_specs['vol_extra3'] = "4" - db.volume_type_extra_specs_update_or_create( - context.get_admin_context(), - self.volume_type1_id, - dict(vol_extra3=4)) - actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEqual(expected_specs, actual_specs) - - def test_volume_type_extra_specs_create(self): - expected_specs = self.vol_type1_specs.copy() - expected_specs['vol_extra4'] = 'value4' - expected_specs['vol_extra5'] = 'value5' - db.volume_type_extra_specs_update_or_create( - context.get_admin_context(), - self.volume_type1_id, - dict(vol_extra4="value4", - vol_extra5="value5")) - actual_specs = db.volume_type_extra_specs_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEqual(expected_specs, actual_specs) - - def test_volume_type_get_with_extra_specs(self): - volume_type = db.volume_type_get( - context.get_admin_context(), - self.volume_type1_id) - self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) - - volume_type = db.volume_type_get( - context.get_admin_context(), - self.vol_type2_id) - self.assertEqual({}, volume_type['extra_specs']) - - def test_volume_type_get_by_name_with_extra_specs(self): - volume_type = db.volume_type_get_by_name( - context.get_admin_context(), - self.vol_type1['name']) - self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) - - volume_type = db.volume_type_get_by_name( - context.get_admin_context(), - self.vol_type2_noextra['name']) - self.assertEqual({}, volume_type['extra_specs']) - - def test_volume_type_get_all(self): - expected_specs = self.vol_type1_specs.copy() - - types = db.volume_type_get_all(context.get_admin_context()) - - self.assertEqual(expected_specs, - types[self.vol_type1['name']]['extra_specs']) - - self.assertEqual({}, - types[self.vol_type2_noextra['name']]['extra_specs']) diff --git a/cinder/tests/unit/test_volume_utils.py b/cinder/tests/unit/test_volume_utils.py deleted file mode 100644 index 67028a9c7..000000000 --- a/cinder/tests/unit/test_volume_utils.py +++ /dev/null @@ -1,1058 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests For miscellaneous util methods used with volume.""" - - -import datetime -import io -import mock -import six - -import ddt -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder import keymgr -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.backup import fake_backup -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import throttling -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - - -CONF = cfg.CONF - - -class NotifyUsageTestCase(test.TestCase): - @mock.patch('cinder.volume.utils._usage_from_volume') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_volume_usage(mock.sentinel.context, - mock.sentinel.volume, - 'test_suffix') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.context, - mock.sentinel.volume) - mock_rpc.get_notifier.assert_called_once_with('volume', 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'volume.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_volume') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf, - mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_volume_usage( - mock.sentinel.context, - mock.sentinel.volume, - 'test_suffix', - extra_usage_info={'a': 'b', 'c': 'd'}, - host='host2') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.context, - mock.sentinel.volume, a='b', c='d') - mock_rpc.get_notifier.assert_called_once_with('volume', 'host2') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'volume.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_snapshot') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_snapshot_usage(self, mock_rpc, - mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_snapshot_usage( - mock.sentinel.context, - mock.sentinel.snapshot, - 'test_suffix') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.snapshot, - mock.sentinel.context) - mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'snapshot.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_snapshot') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf, - mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_snapshot_usage( - mock.sentinel.context, - mock.sentinel.snapshot, - 'test_suffix', - extra_usage_info={'a': 'b', 'c': 'd'}, - host='host2') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.snapshot, - mock.sentinel.context, - a='b', c='d') - mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'snapshot.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.db.volume_get') - def test_usage_from_snapshot(self, volume_get): - raw_volume = { - 'id': fake.VOLUME_ID, - 'availability_zone': 'nova' - } - ctxt = context.get_admin_context() - volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) - volume_get.return_value = volume_obj - raw_snapshot = { - 'project_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'volume': volume_obj, - 'volume_id': fake.VOLUME_ID, - 'volume_size': 1, - 'id': fake.SNAPSHOT_ID, - 'display_name': '11', - 'created_at': '2014-12-11T10:10:00', - 'status': fields.SnapshotStatus.ERROR, - 'deleted': '', - 'snapshot_metadata': [{'key': 'fake_snap_meta_key', - 'value': 'fake_snap_meta_value'}], - 'expected_attrs': ['metadata'], - } - - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) - usage_info = volume_utils._usage_from_snapshot(snapshot_obj, ctxt) - expected_snapshot = { - 'tenant_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'availability_zone': 'nova', - 'volume_id': fake.VOLUME_ID, - 'volume_size': 1, - 'snapshot_id': fake.SNAPSHOT_ID, - 'display_name': '11', - 'created_at': mock.ANY, - 'status': fields.SnapshotStatus.ERROR, - 'deleted': '', - 'metadata': six.text_type({'fake_snap_meta_key': - u'fake_snap_meta_value'}), - } - self.assertDictEqual(expected_snapshot, usage_info) - - @mock.patch('cinder.db.volume_get') - def test_usage_from_deleted_snapshot(self, volume_get): - raw_volume = { - 'id': fake.VOLUME_ID, - 'availability_zone': 'nova', - 'deleted': 1 - } - ctxt = context.get_admin_context() - volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) - volume_get.return_value = volume_obj - - raw_snapshot = { - 'project_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'volume': volume_obj, - 'volume_id': fake.VOLUME_ID, - 'volume_size': 1, - 'id': fake.SNAPSHOT_ID, - 'display_name': '11', - 'created_at': '2014-12-11T10:10:00', - 'status': fields.SnapshotStatus.ERROR, - 'deleted': '', - 'snapshot_metadata': [{'key': 'fake_snap_meta_key', - 'value': 'fake_snap_meta_value'}], - 'expected_attrs': ['metadata'], - } - - snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) - usage_info = volume_utils._usage_from_snapshot(snapshot_obj, ctxt) - expected_snapshot = { - 'tenant_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'availability_zone': 'nova', - 'volume_id': fake.VOLUME_ID, - 'volume_size': 1, - 'snapshot_id': fake.SNAPSHOT_ID, - 'display_name': '11', - 'created_at': mock.ANY, - 'status': fields.SnapshotStatus.ERROR, - 'deleted': '', - 'metadata': six.text_type({'fake_snap_meta_key': - u'fake_snap_meta_value'}), - } - self.assertDictEqual(expected_snapshot, usage_info) - - @mock.patch('cinder.db.volume_glance_metadata_get') - @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') - def test_usage_from_volume(self, mock_attachment, mock_image_metadata): - mock_image_metadata.return_value = {'image_id': 'fake_image_id'} - mock_attachment.return_value = [{'instance_uuid': 'fake_instance_id'}] - raw_volume = { - 'project_id': '12b0330ec2584a', - 'user_id': '158cba1b8c2bb6008e', - 'host': 'fake_host', - 'availability_zone': 'nova', - 'volume_type_id': 'fake_volume_type_id', - 'id': 'fake_volume_id', - 'size': 1, - 'display_name': 'test_volume', - 'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1), - 'launched_at': datetime.datetime(2015, 1, 1, 1, 1, 1), - 'snapshot_id': None, - 'replication_status': None, - 'replication_extended_status': None, - 'replication_driver_data': None, - 'status': 'available', - 'volume_metadata': {'fake_metadata_key': 'fake_metadata_value'}, - } - usage_info = volume_utils._usage_from_volume( - mock.sentinel.context, - raw_volume) - expected_volume = { - 'tenant_id': '12b0330ec2584a', - 'user_id': '158cba1b8c2bb6008e', - 'host': 'fake_host', - 'availability_zone': 'nova', - 'volume_type': 'fake_volume_type_id', - 'volume_id': 'fake_volume_id', - 'size': 1, - 'display_name': 'test_volume', - 'created_at': '2015-01-01T01:01:01', - 'launched_at': '2015-01-01T01:01:01', - 'snapshot_id': None, - 'replication_status': None, - 'replication_extended_status': None, - 'replication_driver_data': None, - 'status': 'available', - 'metadata': {'fake_metadata_key': 'fake_metadata_value'}, - 'glance_metadata': {'image_id': 'fake_image_id'}, - 'volume_attachment': [{'instance_uuid': 'fake_instance_id'}], - } - self.assertEqual(expected_volume, usage_info) - - @mock.patch('cinder.volume.utils._usage_from_consistencygroup') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_consistencygroup_usage(self, mock_rpc, - mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_consistencygroup_usage( - mock.sentinel.context, - mock.sentinel.consistencygroup, - 'test_suffix') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.consistencygroup) - mock_rpc.get_notifier.assert_called_once_with('consistencygroup', - 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'consistencygroup.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_consistencygroup') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc, - mock_conf, - mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_consistencygroup_usage( - mock.sentinel.context, - mock.sentinel.consistencygroup, - 'test_suffix', - extra_usage_info={'a': 'b', 'c': 'd'}, - host='host2') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.consistencygroup, - a='b', c='d') - mock_rpc.get_notifier.assert_called_once_with('consistencygroup', - 'host2') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'consistencygroup.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_cgsnapshot') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_cgsnapshot_usage(self, mock_rpc, - mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_cgsnapshot_usage( - mock.sentinel.context, - mock.sentinel.cgsnapshot, - 'test_suffix') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot) - mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'cgsnapshot.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_cgsnapshot') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc, - mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_cgsnapshot_usage( - mock.sentinel.context, - mock.sentinel.cgsnapshot, - 'test_suffix', - extra_usage_info={'a': 'b', 'c': 'd'}, - host='host2') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot, - a='b', c='d') - mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'cgsnapshot.test_suffix', - mock_usage.return_value) - - def test_usage_from_backup(self): - raw_backup = { - 'project_id': fake.PROJECT_ID, - 'user_id': fake.USER_ID, - 'availability_zone': 'nova', - 'id': fake.BACKUP_ID, - 'host': 'fake_host', - 'display_name': 'test_backup', - 'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1), - 'status': 'available', - 'volume_id': fake.VOLUME_ID, - 'size': 1, - 'service_metadata': None, - 'service': 'cinder.backup.drivers.swift', - 'fail_reason': None, - 'parent_id': fake.BACKUP2_ID, - 'num_dependent_backups': 0, - 'snapshot_id': None, - } - - ctxt = context.get_admin_context() - backup_obj = fake_backup.fake_backup_obj(ctxt, **raw_backup) - - # Make it easier to find out differences between raw and expected. - expected_backup = raw_backup.copy() - expected_backup['tenant_id'] = expected_backup.pop('project_id') - expected_backup['backup_id'] = expected_backup.pop('id') - expected_backup['created_at'] = ( - six.text_type(expected_backup['created_at']) + '+00:00') - - usage_info = volume_utils._usage_from_backup(backup_obj) - self.assertDictEqual(expected_backup, usage_info) - - -class LVMVolumeDriverTestCase(test.TestCase): - def test_convert_blocksize_option(self): - # Test valid volume_dd_blocksize - bs = volume_utils._check_blocksize('10M') - self.assertEqual('10M', bs) - - bs = volume_utils._check_blocksize('1xBBB') - self.assertEqual('1M', bs) - - # Test 'volume_dd_blocksize' with fraction - bs = volume_utils._check_blocksize('1.3M') - self.assertEqual('1M', bs) - - # Test zero-size 'volume_dd_blocksize' - bs = volume_utils._check_blocksize('0M') - self.assertEqual('1M', bs) - - # Test negative 'volume_dd_blocksize' - bs = volume_utils._check_blocksize('-1M') - self.assertEqual('1M', bs) - - # Test non-digital 'volume_dd_blocksize' - bs = volume_utils._check_blocksize('ABM') - self.assertEqual('1M', bs) - - @mock.patch('cinder.volume.utils._usage_from_capacity') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_capacity_usage(self, mock_rpc, - mock_conf, mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_capacity_usage( - mock.sentinel.context, - mock.sentinel.capacity, - 'test_suffix') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.capacity) - mock_rpc.get_notifier.assert_called_once_with('capacity', 'host1') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'capacity.test_suffix', - mock_usage.return_value) - - @mock.patch('cinder.volume.utils._usage_from_capacity') - @mock.patch('cinder.volume.utils.CONF') - @mock.patch('cinder.volume.utils.rpc') - def test_notify_about_capacity_usage_with_kwargs(self, mock_rpc, mock_conf, - mock_usage): - mock_conf.host = 'host1' - output = volume_utils.notify_about_capacity_usage( - mock.sentinel.context, - mock.sentinel.capacity, - 'test_suffix', - extra_usage_info={'a': 'b', 'c': 'd'}, - host='host2') - self.assertIsNone(output) - mock_usage.assert_called_once_with(mock.sentinel.capacity, - a='b', c='d') - mock_rpc.get_notifier.assert_called_once_with('capacity', 'host2') - mock_rpc.get_notifier.return_value.info.assert_called_once_with( - mock.sentinel.context, - 'capacity.test_suffix', - mock_usage.return_value) - - def test_usage_from_capacity(self): - test_capacity = { - 'name_to_id': 'host1@backend1#pool1', - 'type': 'pool', - 'total': '10.01', - 'free': '8.01', - 'allocated': '2', - 'provisioned': '2', - 'virtual_free': '8.01', - 'reported_at': '2014-12-11T10:10:00', - } - - usage_info = volume_utils._usage_from_capacity( - test_capacity) - expected_capacity = { - 'name_to_id': 'host1@backend1#pool1', - 'total': '10.01', - 'free': '8.01', - 'allocated': '2', - 'provisioned': '2', - 'virtual_free': '8.01', - 'reported_at': '2014-12-11T10:10:00', - } - self.assertEqual(expected_capacity, usage_info) - - -class OdirectSupportTestCase(test.TestCase): - @mock.patch('cinder.utils.execute') - def test_check_for_odirect_support(self, mock_exec): - output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') - self.assertTrue(output) - mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', - 'of=/dev/def', 'oflag=direct', - run_as_root=True) - mock_exec.reset_mock() - - output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def', - 'iflag=direct') - self.assertTrue(output) - mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', - 'of=/dev/def', 'iflag=direct', - run_as_root=True) - mock_exec.reset_mock() - - output = volume_utils.check_for_odirect_support('/dev/zero', - '/dev/def', - 'iflag=direct') - self.assertFalse(output) - mock_exec.reset_mock() - - output = volume_utils.check_for_odirect_support('/dev/zero', - '/dev/def') - self.assertTrue(output) - mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', - 'of=/dev/def', 'oflag=direct', - run_as_root=True) - - @mock.patch('cinder.utils.execute', - side_effect=processutils.ProcessExecutionError) - def test_check_for_odirect_support_error(self, mock_exec): - output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') - self.assertFalse(output) - mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', - 'of=/dev/def', 'oflag=direct', - run_as_root=True) - mock_exec.reset_mock() - output = volume_utils.check_for_odirect_support('/dev/zero', - '/dev/def') - self.assertFalse(output) - mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', - 'of=/dev/def', 'oflag=direct', - run_as_root=True) - - -class ClearVolumeTestCase(test.TestCase): - @mock.patch('cinder.volume.utils.copy_volume', return_value=None) - @mock.patch('cinder.volume.utils.CONF') - def test_clear_volume_conf(self, mock_conf, mock_copy): - mock_conf.volume_clear = 'zero' - mock_conf.volume_clear_size = 0 - mock_conf.volume_dd_blocksize = '1M' - mock_conf.volume_clear_ionice = '-c3' - output = volume_utils.clear_volume(1024, 'volume_path') - self.assertIsNone(output) - mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024, - '1M', sync=True, - execute=utils.execute, ionice='-c3', - throttle=None, sparse=False) - - @mock.patch('cinder.volume.utils.copy_volume', return_value=None) - @mock.patch('cinder.volume.utils.CONF') - def test_clear_volume_args(self, mock_conf, mock_copy): - mock_conf.volume_clear = 'should_override_with_arg' - mock_conf.volume_clear_size = 0 - mock_conf.volume_dd_blocksize = '1M' - mock_conf.volume_clear_ionice = '-c3' - output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1, - '-c0') - self.assertIsNone(output) - mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1, - '1M', sync=True, - execute=utils.execute, ionice='-c0', - throttle=None, sparse=False) - - @mock.patch('cinder.volume.utils.CONF') - def test_clear_volume_invalid_opt(self, mock_conf): - mock_conf.volume_clear = 'non_existent_volume_clearer' - mock_conf.volume_clear_size = 0 - mock_conf.volume_clear_ionice = None - self.assertRaises(exception.InvalidConfigurationValue, - volume_utils.clear_volume, - 1024, "volume_path") - - -class CopyVolumeTestCase(test.TestCase): - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True) - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.utils.CONF') - def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec, - mock_support): - fake_throttle = throttling.Throttle(['fake_throttle']) - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - ionice=None, throttle=fake_throttle) - self.assertIsNone(output) - mock_exec.assert_called_once_with('fake_throttle', 'dd', - 'if=/dev/zero', - 'of=/dev/null', - 'count=%s' % units.Gi, - 'bs=3M', 'iflag=count_bytes,direct', - 'oflag=direct', run_as_root=True) - - mock_exec.reset_mock() - - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=False, execute=utils.execute, - ionice=None, throttle=fake_throttle) - self.assertIsNone(output) - mock_exec.assert_called_once_with('fake_throttle', 'dd', - 'if=/dev/zero', - 'of=/dev/null', - 'count=%s' % units.Gi, - 'bs=3M', 'iflag=count_bytes,direct', - 'oflag=direct', run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False) - @mock.patch('cinder.utils.execute') - def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec, mock_support): - fake_throttle = throttling.Throttle(['fake_throttle']) - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - ionice=None, throttle=fake_throttle) - self.assertIsNone(output) - mock_exec.assert_called_once_with('fake_throttle', 'dd', - 'if=/dev/zero', - 'of=/dev/null', - 'count=%s' % units.Gi, - 'bs=3M', 'iflag=count_bytes', - 'conv=fdatasync', run_as_root=True) - - mock_exec.reset_mock() - - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=False, execute=utils.execute, - ionice=None, throttle=fake_throttle) - self.assertIsNone(output) - mock_exec.assert_called_once_with('fake_throttle', 'dd', - 'if=/dev/zero', - 'of=/dev/null', - 'count=%s' % units.Gi, - 'bs=3M', 'iflag=count_bytes', - run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False) - @mock.patch('cinder.utils.execute') - def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support): - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - ionice=None) - self.assertIsNone(output) - mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', - 'count=%s' % units.Gi, 'bs=3M', - 'iflag=count_bytes', - 'conv=fdatasync', run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False) - @mock.patch('cinder.utils.execute') - def test_copy_volume_dd_with_ionice(self, mock_exec, mock_support): - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - ionice='-c3') - self.assertIsNone(output) - mock_exec.assert_called_once_with('ionice', '-c3', 'dd', - 'if=/dev/zero', 'of=/dev/null', - 'count=%s' % units.Gi, 'bs=3M', - 'iflag=count_bytes', - 'conv=fdatasync', run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=False) - @mock.patch('cinder.utils.execute') - def test_copy_volume_dd_with_sparse(self, mock_exec, mock_support): - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - sparse=True) - self.assertIsNone(output) - mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', - 'count=%s' % units.Gi, 'bs=3M', - 'iflag=count_bytes', - 'conv=fdatasync,sparse', - run_as_root=True) - - @mock.patch('cinder.volume.utils.check_for_odirect_support', - return_value=True) - @mock.patch('cinder.utils.execute') - def test_copy_volume_dd_with_sparse_iflag_and_oflag(self, mock_exec, - mock_support): - output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', - sync=True, execute=utils.execute, - sparse=True) - self.assertIsNone(output) - mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', - 'count=%s' % units.Gi, 'bs=3M', - 'iflag=count_bytes,direct', - 'oflag=direct', 'conv=sparse', - run_as_root=True) - - @mock.patch('cinder.volume.utils._copy_volume_with_file') - def test_copy_volume_handles(self, mock_copy): - handle1 = io.RawIOBase() - handle2 = io.RawIOBase() - output = volume_utils.copy_volume(handle1, handle2, 1024, 1) - self.assertIsNone(output) - mock_copy.assert_called_once_with(handle1, handle2, 1024) - - @mock.patch('cinder.volume.utils._transfer_data') - @mock.patch('cinder.volume.utils._open_volume_with_path') - def test_copy_volume_handle_transfer(self, mock_open, mock_transfer): - handle = io.RawIOBase() - output = volume_utils.copy_volume('/foo/bar', handle, 1024, 1) - self.assertIsNone(output) - mock_transfer.assert_called_once_with(mock.ANY, mock.ANY, - 1073741824, mock.ANY) - - -@ddt.ddt -class VolumeUtilsTestCase(test.TestCase): - def test_null_safe_str(self): - self.assertEqual('', volume_utils.null_safe_str(None)) - self.assertEqual('', volume_utils.null_safe_str(False)) - self.assertEqual('', volume_utils.null_safe_str(0)) - self.assertEqual('', volume_utils.null_safe_str([])) - self.assertEqual('', volume_utils.null_safe_str(())) - self.assertEqual('', volume_utils.null_safe_str({})) - self.assertEqual('', volume_utils.null_safe_str(set())) - self.assertEqual('a', volume_utils.null_safe_str('a')) - self.assertEqual('1', volume_utils.null_safe_str(1)) - self.assertEqual('True', volume_utils.null_safe_str(True)) - - @mock.patch('cinder.utils.get_root_helper') - @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning') - def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper): - self.assertEqual(mock_supports_thin.return_value, - volume_utils.supports_thin_provisioning()) - mock_helper.assert_called_once_with() - - @mock.patch('cinder.utils.get_root_helper') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') - def test_get_all_physical_volumes(self, mock_get_vols, mock_helper): - self.assertEqual(mock_get_vols.return_value, - volume_utils.get_all_physical_volumes()) - mock_helper.assert_called_once_with() - - @mock.patch('cinder.utils.get_root_helper') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups') - def test_get_all_volume_groups(self, mock_get_groups, mock_helper): - self.assertEqual(mock_get_groups.return_value, - volume_utils.get_all_volume_groups()) - mock_helper.assert_called_once_with() - - def test_generate_password(self): - password = volume_utils.generate_password() - self.assertTrue(any(c for c in password if c in '23456789')) - self.assertTrue(any(c for c in password - if c in 'abcdefghijkmnopqrstuvwxyz')) - self.assertTrue(any(c for c in password - if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ')) - self.assertEqual(16, len(password)) - self.assertEqual(10, len(volume_utils.generate_password(10))) - - @mock.patch('cinder.volume.utils.generate_password') - def test_generate_username(self, mock_gen_pass): - output = volume_utils.generate_username() - self.assertEqual(mock_gen_pass.return_value, output) - - def test_extract_host(self): - host = 'Host' - # default level is 'backend' - self.assertEqual(host, - volume_utils.extract_host(host)) - self.assertEqual(host, - volume_utils.extract_host(host, 'host')) - self.assertEqual(host, - volume_utils.extract_host(host, 'backend')) - # default_pool_name doesn't work for level other than 'pool' - self.assertEqual(host, - volume_utils.extract_host(host, 'host', True)) - self.assertEqual(host, - volume_utils.extract_host(host, 'host', False)) - self.assertEqual(host, - volume_utils.extract_host(host, 'backend', True)) - self.assertEqual(host, - volume_utils.extract_host(host, 'backend', False)) - self.assertIsNone(volume_utils.extract_host(host, 'pool')) - self.assertEqual('_pool0', - volume_utils.extract_host(host, 'pool', True)) - - host = 'Host@Backend' - self.assertEqual('Host@Backend', - volume_utils.extract_host(host)) - self.assertEqual('Host', - volume_utils.extract_host(host, 'host')) - self.assertEqual(host, - volume_utils.extract_host(host, 'backend')) - self.assertIsNone(volume_utils.extract_host(host, 'pool')) - self.assertEqual('_pool0', - volume_utils.extract_host(host, 'pool', True)) - - host = 'Host@Backend#Pool' - pool = 'Pool' - self.assertEqual('Host@Backend', - volume_utils.extract_host(host)) - self.assertEqual('Host', - volume_utils.extract_host(host, 'host')) - self.assertEqual('Host@Backend', - volume_utils.extract_host(host, 'backend')) - self.assertEqual(pool, - volume_utils.extract_host(host, 'pool')) - self.assertEqual(pool, - volume_utils.extract_host(host, 'pool', True)) - - host = 'Host#Pool' - self.assertEqual('Host', - volume_utils.extract_host(host)) - self.assertEqual('Host', - volume_utils.extract_host(host, 'host')) - self.assertEqual('Host', - volume_utils.extract_host(host, 'backend')) - self.assertEqual(pool, - volume_utils.extract_host(host, 'pool')) - self.assertEqual(pool, - volume_utils.extract_host(host, 'pool', True)) - - def test_extract_host_none_string(self): - self.assertRaises(exception.InvalidVolume, - volume_utils.extract_host, - None) - - def test_append_host(self): - host = 'Host' - pool = 'Pool' - expected = 'Host#Pool' - self.assertEqual(expected, - volume_utils.append_host(host, pool)) - - pool = None - expected = 'Host' - self.assertEqual(expected, - volume_utils.append_host(host, pool)) - - host = None - pool = 'pool' - expected = None - self.assertEqual(expected, - volume_utils.append_host(host, pool)) - - host = None - pool = None - expected = None - self.assertEqual(expected, - volume_utils.append_host(host, pool)) - - def test_compare_hosts(self): - host_1 = 'fake_host@backend1' - host_2 = 'fake_host@backend1#pool1' - self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) - - host_2 = 'fake_host@backend1' - self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) - - host_2 = 'fake_host2@backend1' - self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2)) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_volume_name_vol_id_pattern(self, conf_mock): - conf_mock.volume_name_template = 'volume-%s' - vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_name = conf_mock.volume_name_template % vol_id - result = volume_utils.extract_id_from_volume_name(vol_name) - self.assertEqual(vol_id, result) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_volume_name_vol_id_vol_pattern(self, conf_mock): - conf_mock.volume_name_template = 'volume-%s-volume' - vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_name = conf_mock.volume_name_template % vol_id - result = volume_utils.extract_id_from_volume_name(vol_name) - self.assertEqual(vol_id, result) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_volume_name_id_vol_pattern(self, conf_mock): - conf_mock.volume_name_template = '%s-volume' - vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - vol_name = conf_mock.volume_name_template % vol_id - result = volume_utils.extract_id_from_volume_name(vol_name) - self.assertEqual(vol_id, result) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_volume_name_no_match(self, conf_mock): - conf_mock.volume_name_template = '%s-volume' - vol_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - result = volume_utils.extract_id_from_volume_name(vol_name) - self.assertIsNone(result) - vol_name = 'blahblahblah' - result = volume_utils.extract_id_from_volume_name(vol_name) - self.assertIsNone(result) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) - def test_check_managed_volume_already_managed(self, exists_mock): - id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - result = volume_utils.check_already_managed_volume(id_) - self.assertTrue(result) - exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=False) - def test_check_managed_volume_not_managed_proper_uuid(self, exists_mock): - id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - result = volume_utils.check_already_managed_volume(id_) - self.assertFalse(result) - exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) - - def test_check_managed_volume_not_managed_invalid_id(self): - result = volume_utils.check_already_managed_volume(1) - self.assertFalse(result) - result = volume_utils.check_already_managed_volume('not-a-uuid') - self.assertFalse(result) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_snapshot_name(self, conf_mock): - conf_mock.snapshot_name_template = '%s-snapshot' - snap_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - snap_name = conf_mock.snapshot_name_template % snap_id - result = volume_utils.extract_id_from_snapshot_name(snap_name) - self.assertEqual(snap_id, result) - - @mock.patch('cinder.volume.utils.CONF') - def test_extract_id_from_snapshot_name_no_match(self, conf_mock): - conf_mock.snapshot_name_template = '%s-snapshot' - snap_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - result = volume_utils.extract_id_from_snapshot_name(snap_name) - self.assertIsNone(result) - snap_name = 'blahblahblah' - result = volume_utils.extract_id_from_snapshot_name(snap_name) - self.assertIsNone(result) - - @ddt.data({"name": "vol02"}, '{"name": "vol02"}') - def test_paginate_entries_list_with_marker(self, marker): - entries = [{'reference': {'name': 'vol03'}, 'size': 1}, - {'reference': {'name': 'vol01'}, 'size': 3}, - {'reference': {'name': 'vol02'}, 'size': 3}, - {'reference': {'name': 'vol04'}, 'size': 2}, - {'reference': {'name': 'vol06'}, 'size': 3}, - {'reference': {'name': 'vol07'}, 'size': 1}, - {'reference': {'name': 'vol05'}, 'size': 1}] - expected = [{'reference': {'name': 'vol04'}, 'size': 2}, - {'reference': {'name': 'vol03'}, 'size': 1}, - {'reference': {'name': 'vol05'}, 'size': 1}] - res = volume_utils.paginate_entries_list(entries, marker, 3, - 1, ['size', 'reference'], - ['desc', 'asc']) - self.assertEqual(expected, res) - - def test_paginate_entries_list_without_marker(self): - entries = [{'reference': {'name': 'vol03'}, 'size': 1}, - {'reference': {'name': 'vol01'}, 'size': 3}, - {'reference': {'name': 'vol02'}, 'size': 3}, - {'reference': {'name': 'vol04'}, 'size': 2}, - {'reference': {'name': 'vol06'}, 'size': 3}, - {'reference': {'name': 'vol07'}, 'size': 1}, - {'reference': {'name': 'vol05'}, 'size': 1}] - expected = [{'reference': {'name': 'vol07'}, 'size': 1}, - {'reference': {'name': 'vol06'}, 'size': 3}, - {'reference': {'name': 'vol05'}, 'size': 1}] - res = volume_utils.paginate_entries_list(entries, None, 3, None, - ['reference'], ['desc']) - self.assertEqual(expected, res) - - def test_paginate_entries_list_marker_invalid_format(self): - entries = [{'reference': {'name': 'vol03'}, 'size': 1}, - {'reference': {'name': 'vol01'}, 'size': 3}] - self.assertRaises(exception.InvalidInput, - volume_utils.paginate_entries_list, - entries, "invalid_format", 3, None, - ['size', 'reference'], ['desc', 'asc']) - - def test_paginate_entries_list_marker_not_found(self): - entries = [{'reference': {'name': 'vol03'}, 'size': 1}, - {'reference': {'name': 'vol01'}, 'size': 3}] - self.assertRaises(exception.InvalidInput, - volume_utils.paginate_entries_list, - entries, {'name': 'vol02'}, 3, None, - ['size', 'reference'], ['desc', 'asc']) - - def test_convert_config_string_to_dict(self): - test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}" - expected_dict = {'key-1': 'val-1', 'key-2': 'val-2', 'key-3': 'val-3'} - - self.assertEqual( - expected_dict, - volume_utils.convert_config_string_to_dict(test_string)) - - @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=False) - def test_create_encryption_key_unencrypted(self, is_encrypted): - result = volume_utils.create_encryption_key(mock.ANY, - mock.ANY, - fake.VOLUME_TYPE_ID) - self.assertIsNone(result) - - @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=True) - @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') - @mock.patch('cinder.keymgr.conf_key_mgr.ConfKeyManager.create_key') - def test_create_encryption_key_encrypted(self, create_key, - get_volume_type_encryption, - is_encryption): - enc_key = {'cipher': 'aes-xts-plain64', - 'key_size': 256, - 'provider': 'p1', - 'control_location': 'front-end', - 'encryption_id': 'uuid1'} - ctxt = context.get_admin_context() - type_ref1 = volume_types.create(ctxt, "type1") - encryption = db.volume_type_encryption_create( - ctxt, type_ref1['id'], enc_key) - get_volume_type_encryption.return_value = encryption - CONF.set_override( - 'api_class', - 'cinder.keymgr.conf_key_mgr.ConfKeyManager', - group='key_manager') - key_manager = keymgr.API() - volume_utils.create_encryption_key(ctxt, - key_manager, - fake.VOLUME_TYPE_ID) - is_encryption.assert_called_once_with(ctxt, - fake.VOLUME_TYPE_ID) - get_volume_type_encryption.assert_called_once_with( - ctxt, - fake.VOLUME_TYPE_ID) - create_key.assert_called_once_with(ctxt, - algorithm='aes', - length=256) - - @ddt.data(' True', ' true', ' yes') - def test_is_replicated_spec_true(self, enabled): - res = volume_utils.is_replicated_spec({'replication_enabled': enabled}) - self.assertTrue(res) - - @ddt.data({}, None, {'key': 'value'}) - def test_is_replicated_no_specs(self, extra_specs): - res = volume_utils.is_replicated_spec(extra_specs) - self.assertFalse(res) - - @ddt.data(' False', ' false', ' f', 'baddata', 'bad data') - def test_is_replicated_spec_false(self, enabled): - res = volume_utils.is_replicated_spec({'replication_enabled': enabled}) - self.assertFalse(res) - - @mock.patch('cinder.db.group_get') - def test_group_get_by_id(self, mock_db_group_get): - expected = mock.Mock() - mock_db_group_get.return_value = expected - group_id = fake.GROUP_ID - actual = volume_utils.group_get_by_id(group_id) - self.assertEqual(expected, actual) - - @mock.patch('cinder.db.group_get') - def test_group_get_by_id_group_not_found(self, mock_db_group_get): - group_id = fake.GROUP_ID - mock_db_group_get.side_effect = exception.GroupNotFound( - group_id=group_id) - self.assertRaises( - exception.GroupNotFound, - volume_utils.group_get_by_id, - group_id - ) - - @ddt.data(' False', None, 'notASpecValueWeCareAbout') - def test_is_group_a_cg_snapshot_type_is_false(self, spec_value): - with mock.patch('cinder.volume.group_types' - '.get_group_type_specs') as mock_get_specs: - mock_get_specs.return_value = spec_value - group = fake_group.fake_group_obj( - None, group_type_id=fake.GROUP_TYPE_ID) - self.assertFalse(volume_utils.is_group_a_cg_snapshot_type(group)) - - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_is_group_a_cg_snapshot_type_is_true(self, mock_get_specs): - mock_get_specs.return_value = ' True' - group = fake_group.fake_group_obj( - None, group_type_id=fake.GROUP_TYPE_ID) - self.assertTrue(volume_utils.is_group_a_cg_snapshot_type(group)) diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py deleted file mode 100644 index 2d1528bed..000000000 --- a/cinder/tests/unit/utils.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import datetime -import socket -import sys -import uuid - -import mock -from oslo_config import cfg -from oslo_service import loopingcall -from oslo_utils import timeutils -import oslo_versionedobjects - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake - - -CONF = cfg.CONF - - -def get_test_admin_context(): - return context.get_admin_context() - - -def obj_attr_is_set(obj_class): - """Method to allow setting the ID on an OVO on creation.""" - original_method = obj_class.obj_attr_is_set - - def wrapped(self, attr): - if attr == 'id' and not hasattr(self, 'id_first_call'): - self.id_first_call = False - return False - else: - original_method(self, attr) - return wrapped - - -def create_volume(ctxt, - host='test_host', - display_name='test_volume', - display_description='this is a test volume', - status='available', - migration_status=None, - size=1, - availability_zone='fake_az', - volume_type_id=None, - replication_status='disabled', - replication_extended_status=None, - replication_driver_data=None, - consistencygroup_id=None, - group_id=None, - previous_status=None, - testcase_instance=None, - id=None, - metadata=None, - **kwargs): - """Create a volume object in the DB.""" - vol = {} - vol['size'] = size - vol['host'] = host - vol['user_id'] = ctxt.user_id - vol['project_id'] = ctxt.project_id - vol['status'] = status - if migration_status: - vol['migration_status'] = migration_status - vol['display_name'] = display_name - vol['display_description'] = display_description - vol['attach_status'] = fields.VolumeAttachStatus.DETACHED - vol['availability_zone'] = availability_zone - if consistencygroup_id: - vol['consistencygroup_id'] = consistencygroup_id - if group_id: - vol['group_id'] = group_id - if volume_type_id: - vol['volume_type_id'] = volume_type_id - if metadata: - vol['metadata'] = metadata - for key in kwargs: - vol[key] = kwargs[key] - vol['replication_status'] = replication_status - - if replication_extended_status: - vol['replication_extended_status'] = replication_extended_status - if replication_driver_data: - vol['replication_driver_data'] = replication_driver_data - if previous_status: - vol['previous_status'] = previous_status - - if id: - with mock.patch('cinder.objects.Volume.obj_attr_is_set', - obj_attr_is_set(objects.Volume)): - volume = objects.Volume(ctxt, id=id, **vol) - volume.create() - else: - volume = objects.Volume(ctxt, **vol) - volume.create() - - # If we get a TestCase instance we add cleanup - if testcase_instance: - testcase_instance.addCleanup(volume.destroy) - return volume - - -def attach_volume(ctxt, volume_id, instance_uuid, attached_host, - mountpoint, mode='rw'): - - now = timeutils.utcnow() - values = {} - values['volume_id'] = volume_id - values['attached_host'] = attached_host - values['mountpoint'] = mountpoint - values['attach_time'] = now - - attachment = db.volume_attach(ctxt, values) - volume, updated_values = db.volume_attached( - ctxt, attachment['id'], instance_uuid, - attached_host, mountpoint, mode) - return volume - - -def create_snapshot(ctxt, - volume_id, - display_name='test_snapshot', - display_description='this is a test snapshot', - cgsnapshot_id = None, - status=fields.SnapshotStatus.CREATING, - testcase_instance=None, - id=None, - **kwargs): - vol = db.volume_get(ctxt, volume_id) - snap = objects.Snapshot(ctxt) - snap.volume_id = volume_id - snap.user_id = ctxt.user_id or fake.USER_ID - snap.project_id = ctxt.project_id or fake.PROJECT_ID - snap.status = status - snap.metadata = {} - snap.volume_size = vol['size'] - snap.display_name = display_name - snap.display_description = display_description - snap.cgsnapshot_id = cgsnapshot_id - - if id: - with mock.patch('cinder.objects.Snapshot.obj_attr_is_set', - obj_attr_is_set(objects.Snapshot)): - snap.id = id - snap.create() - else: - snap.create() - - # We do the update after creating the snapshot in case we want to set - # deleted field - snap.update(kwargs) - snap.save() - - # If we get a TestCase instance we add cleanup - if testcase_instance: - testcase_instance.addCleanup(snap.destroy) - return snap - - -def create_consistencygroup(ctxt, - host='test_host@fakedrv#fakepool', - name='test_cg', - description='this is a test cg', - status=fields.ConsistencyGroupStatus.AVAILABLE, - availability_zone='fake_az', - volume_type_id=None, - cgsnapshot_id=None, - source_cgid=None, - **kwargs): - """Create a consistencygroup object in the DB.""" - - cg = objects.ConsistencyGroup(ctxt) - cg.host = host - cg.user_id = ctxt.user_id or fake.USER_ID - cg.project_id = ctxt.project_id or fake.PROJECT_ID - cg.status = status - cg.name = name - cg.description = description - cg.availability_zone = availability_zone - - if volume_type_id: - cg.volume_type_id = volume_type_id - cg.cgsnapshot_id = cgsnapshot_id - cg.source_cgid = source_cgid - new_id = kwargs.pop('id', None) - cg.update(kwargs) - cg.create() - if new_id and new_id != cg.id: - db.consistencygroup_update(ctxt, cg.id, {'id': new_id}) - cg = objects.ConsistencyGroup.get_by_id(ctxt, new_id) - return cg - - -def create_group(ctxt, - host='test_host@fakedrv#fakepool', - name='test_group', - description='this is a test group', - status=fields.GroupStatus.AVAILABLE, - availability_zone='fake_az', - group_type_id=None, - volume_type_ids=None, - **kwargs): - """Create a group object in the DB.""" - - grp = objects.Group(ctxt) - grp.host = host - grp.user_id = ctxt.user_id or fake.USER_ID - grp.project_id = ctxt.project_id or fake.PROJECT_ID - grp.status = status - grp.name = name - grp.description = description - grp.availability_zone = availability_zone - if group_type_id: - grp.group_type_id = group_type_id - if volume_type_ids: - grp.volume_type_ids = volume_type_ids - new_id = kwargs.pop('id', None) - grp.update(kwargs) - grp.create() - if new_id and new_id != grp.id: - db.group_update(ctxt, grp.id, {'id': new_id}) - grp = objects.Group.get_by_id(ctxt, new_id) - return grp - - -def create_cgsnapshot(ctxt, - consistencygroup_id, - name='test_cgsnapshot', - description='this is a test cgsnapshot', - status='creating', - recursive_create_if_needed=True, - return_vo=True, - **kwargs): - """Create a cgsnapshot object in the DB.""" - values = { - 'user_id': ctxt.user_id or fake.USER_ID, - 'project_id': ctxt.project_id or fake.PROJECT_ID, - 'status': status, - 'name': name, - 'description': description, - 'consistencygroup_id': consistencygroup_id} - values.update(kwargs) - - if recursive_create_if_needed and consistencygroup_id: - create_cg = False - try: - objects.ConsistencyGroup.get_by_id(ctxt, - consistencygroup_id) - create_vol = not db.volume_get_all_by_group( - ctxt, consistencygroup_id) - except exception.ConsistencyGroupNotFound: - create_cg = True - create_vol = True - if create_cg: - create_consistencygroup(ctxt, id=consistencygroup_id) - if create_vol: - create_volume(ctxt, consistencygroup_id=consistencygroup_id) - - cgsnap = db.cgsnapshot_create(ctxt, values) - - if not return_vo: - return cgsnap - - return objects.CGSnapshot.get_by_id(ctxt, cgsnap.id) - - -def create_group_snapshot(ctxt, - group_id, - group_type_id=None, - name='test_group_snapshot', - description='this is a test group snapshot', - status='creating', - recursive_create_if_needed=True, - return_vo=True, - **kwargs): - """Create a group snapshot object in the DB.""" - values = { - 'user_id': ctxt.user_id or fake.USER_ID, - 'project_id': ctxt.project_id or fake.PROJECT_ID, - 'status': status, - 'name': name, - 'description': description, - 'group_id': group_id, - 'group_type_id': group_type_id} - values.update(kwargs) - - if recursive_create_if_needed and group_id: - create_grp = False - try: - objects.Group.get_by_id(ctxt, - group_id) - create_vol = not db.volume_get_all_by_generic_group( - ctxt, group_id) - except exception.GroupNotFound: - create_grp = True - create_vol = True - if create_grp: - create_group(ctxt, id=group_id, group_type_id=group_type_id) - if create_vol: - create_volume(ctxt, group_id=group_id) - - if not return_vo: - return db.group_snapshot_create(ctxt, values) - else: - group_snapshot = objects.GroupSnapshot(ctxt) - new_id = values.pop('id', None) - group_snapshot.update(values) - group_snapshot.create() - if new_id and new_id != group_snapshot.id: - db.group_snapshot_update(ctxt, group_snapshot.id, {'id': new_id}) - group_snapshot = objects.GroupSnapshot.get_by_id(ctxt, new_id) - return group_snapshot - - -def create_backup(ctxt, - volume_id=fake.VOLUME_ID, - display_name='test_backup', - display_description='This is a test backup', - status=fields.BackupStatus.CREATING, - parent_id=None, - temp_volume_id=None, - temp_snapshot_id=None, - snapshot_id=None, - data_timestamp=None, - **kwargs): - """Create a backup object.""" - values = { - 'user_id': ctxt.user_id or fake.USER_ID, - 'project_id': ctxt.project_id or fake.PROJECT_ID, - 'volume_id': volume_id, - 'status': status, - 'display_name': display_name, - 'display_description': display_description, - 'container': 'fake', - 'availability_zone': 'fake', - 'service': 'fake', - 'size': 5 * 1024 * 1024, - 'object_count': 22, - 'host': socket.gethostname(), - 'parent_id': parent_id, - 'temp_volume_id': temp_volume_id, - 'temp_snapshot_id': temp_snapshot_id, - 'snapshot_id': snapshot_id, - 'data_timestamp': data_timestamp, } - - values.update(kwargs) - backup = objects.Backup(ctxt, **values) - backup.create() - return backup - - -def create_message(ctxt, - project_id='fake_project', - request_id='test_backup', - resource_type='This is a test backup', - resource_uuid='3asf434-3s433df43-434adf3-343df443', - action=None, - message_level='Error'): - """Create a message in the DB.""" - expires_at = (timeutils.utcnow() + datetime.timedelta( - seconds=30)) - message_record = {'project_id': project_id, - 'request_id': request_id, - 'resource_type': resource_type, - 'resource_uuid': resource_uuid, - 'action_id': action[0] if action else '', - 'event_id': "VOLUME_VOLUME_%s_002" % action[0], - 'message_level': message_level, - 'expires_at': expires_at} - return db.message_create(ctxt, message_record) - - -def create_volume_type(ctxt, testcase_instance=None, **kwargs): - vol_type = db.volume_type_create(ctxt, kwargs) - - # If we get a TestCase instance we add cleanup - if testcase_instance: - testcase_instance.addCleanup(db.volume_type_destroy, ctxt, vol_type.id) - - return vol_type - - -def create_encryption(ctxt, vol_type_id, testcase_instance=None, **kwargs): - encrypt = db.volume_type_encryption_create(ctxt, vol_type_id, kwargs) - - # If we get a TestCase instance we add cleanup - if testcase_instance: - testcase_instance.addCleanup(db.volume_type_encryption_delete, ctxt, - vol_type_id) - return encrypt - - -def create_qos(ctxt, testcase_instance=None, **kwargs): - qos = db.qos_specs_create(ctxt, kwargs) - if testcase_instance: - testcase_instance.addCleanup(db.qos_specs_delete, ctxt, qos['id']) - return qos - - -class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall): - def start(self, interval, **kwargs): - kwargs['initial_delay'] = 0 - return super(ZeroIntervalLoopingCall, self).start(0, **kwargs) - - -def replace_obj_loader(testcase, obj): - def fake_obj_load_attr(self, name): - # This will raise KeyError for non existing fields as expected - field = self.fields[name] - - if field.default != oslo_versionedobjects.fields.UnspecifiedDefault: - value = field.default - elif field.nullable: - value = None - elif isinstance(field, oslo_versionedobjects.fields.StringField): - value = '' - elif isinstance(field, oslo_versionedobjects.fields.IntegerField): - value = 1 - elif isinstance(field, oslo_versionedobjects.fields.UUIDField): - value = uuid.uuid4() - setattr(self, name, value) - - testcase.addCleanup(setattr, obj, 'obj_load_attr', obj.obj_load_attr) - obj.obj_load_attr = fake_obj_load_attr - - -file_spec = None - - -def get_file_spec(): - """Return a Python 2 and 3 compatible version of a 'file' spec. - - This is to be used anywhere that you need to do something such as - mock.MagicMock(spec=file) to mock out something with the file attributes. - - Due to the 'file' built-in method being removed in Python 3 we need to do - some special handling for it. - """ - global file_spec - # set on first use - if file_spec is None: - if sys.version_info[0] == 3: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union( - set(dir(_io.BytesIO)))) - else: - # NOTE(jsbryant): Pep8 on py3 based systems will fail because - # 'file' has been removed. Using noqa here to avoid the failure. - file_spec = file # noqa - - -def generate_timeout_series(timeout): - """Generate a series of times that exceeds the given timeout. - - Yields a series of fake time.time() floating point numbers - such that the difference between each pair in the series just - exceeds the timeout value that is passed in. Useful for - mocking time.time() in methods that otherwise wait for timeout - seconds. - """ - iteration = 0 - while True: - iteration += 1 - yield (iteration * timeout) + iteration - - -def default_service_values(): - return { - 'host': 'fake_host', - 'cluster_name': None, - 'binary': 'fake_binary', - 'topic': 'fake_topic', - 'report_count': 3, - 'disabled': False, - } - - -def create_service(ctxt, values=None): - values = values or {} - v = default_service_values() - v.update(values) - service = db.service_create(ctxt, v) - # We need to read the contents from the DB if we have set updated_at - # or created_at fields - if 'updated_at' in values or 'created_at' in values: - service = db.service_get(ctxt, service.id) - return service - - -def default_cluster_values(): - return { - 'name': 'cluster_name', - 'binary': 'cinder-volume', - 'disabled': False, - 'disabled_reason': None, - 'deleted': False, - 'updated_at': None, - 'deleted_at': None, - } - - -def create_cluster(ctxt, **values): - create_values = default_cluster_values() - create_values.update(values) - cluster = db.cluster_create(ctxt, create_values) - return db.cluster_get(ctxt, cluster.id, services_summary=True) - - -def create_populated_cluster(ctxt, num_services, num_down_svcs=0, **values): - """Helper method that creates a cluster with up and down services.""" - up_time = timeutils.utcnow() - down_time = (up_time - - datetime.timedelta(seconds=CONF.service_down_time + 1)) - cluster = create_cluster(ctxt, **values) - - svcs = [ - db.service_create( - ctxt, - {'cluster_name': cluster.name, - 'host': 'host' + str(i), - 'updated_at': down_time if i < num_down_svcs else up_time}) - for i in range(num_services) - ] - return cluster, svcs diff --git a/cinder/tests/unit/volume/__init__.py b/cinder/tests/unit/volume/__init__.py deleted file mode 100644 index 97824a88f..000000000 --- a/cinder/tests/unit/volume/__init__.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import shutil -import tempfile - -import mock -from oslo_config import cfg -from oslo_utils import importutils -from stevedore import extension - -from cinder.brick.local_dev import lvm as brick_lvm -from cinder import context -from cinder.image import image_utils -from cinder import objects -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils as tests_utils -from cinder.volume import api as volume_api -from cinder.volume import configuration as conf - - -CONF = cfg.CONF - - -class BaseVolumeTestCase(test.TestCase): - """Test Case for volumes.""" - - FAKE_UUID = fake.IMAGE_ID - - def setUp(self, *args, **kwargs): - super(BaseVolumeTestCase, self).setUp(*args, **kwargs) - self.extension_manager = extension.ExtensionManager( - "BaseVolumeTestCase") - vol_tmpdir = tempfile.mkdtemp() - self.flags(volumes_dir=vol_tmpdir) - self.addCleanup(self._cleanup) - self.volume = importutils.import_object(CONF.volume_manager) - self.volume.message_api = mock.Mock() - self.configuration = mock.Mock(conf.Configuration) - self.context = context.get_admin_context() - self.context.user_id = fake.USER_ID - # NOTE(mriedem): The id is hard-coded here for tracking race fail - # assertions with the notification code, it's part of an - # elastic-recheck query so don't remove it or change it. - self.project_id = '7f265bd4-3a85-465e-a899-5dc4854a86d3' - self.user_context = context.RequestContext(user_id=fake.USER_ID, - project_id=self.project_id, - is_admin=False) - self.context.project_id = self.project_id - self.volume_params = { - 'status': 'creating', - 'host': CONF.host, - 'size': 1} - self.mock_object(brick_lvm.LVM, - 'get_all_volume_groups', - self.fake_get_all_volume_groups) - fake_image.mock_image_service(self) - self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True) - self.mock_object(os.path, 'exists', lambda x: True) - self.mock_object(image_utils, 'check_available_space', - lambda x, y, z: True) - self.volume.driver.set_initialized() - self.volume.stats = {'allocated_capacity_gb': 0, - 'pools': {}} - # keep ordered record of what we execute - self.called = [] - self.volume_api = volume_api.API() - - def _cleanup(self): - try: - shutil.rmtree(CONF.volumes_dir) - except OSError: - pass - - def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): - return [{'name': 'cinder-volumes', - 'size': '5.00', - 'available': '2.50', - 'lv_count': '2', - 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] - - @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask._clone_image_volume') - def _create_volume_from_image(self, mock_clone_image_volume, - mock_fetch_img, - fakeout_copy_image_to_volume=False, - fakeout_clone_image=False, - clone_image_volume=False): - """Test function of create_volume_from_image. - - Test cases call this function to create a volume from image, caller - can choose whether to fake out copy_image_to_volume and clone_image, - after calling this, test cases should check status of the volume. - """ - def fake_local_path(volume): - return dst_path - - def fake_copy_image_to_volume(context, volume, - image_service, image_id): - pass - - def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize, - size=None, throttle=None): - pass - - def fake_clone_image(ctx, volume_ref, - image_location, image_meta, - image_service): - return {'provider_location': None}, True - - dst_fd, dst_path = tempfile.mkstemp() - os.close(dst_fd) - self.mock_object(self.volume.driver, 'local_path', fake_local_path) - if fakeout_clone_image: - self.mock_object(self.volume.driver, 'clone_image', - fake_clone_image) - self.mock_object(image_utils, 'fetch_to_raw', fake_fetch_to_raw) - if fakeout_copy_image_to_volume: - self.mock_object(self.volume.driver, 'copy_image_to_volume', - fake_copy_image_to_volume) - mock_clone_image_volume.return_value = ({}, clone_image_volume) - mock_fetch_img.return_value = mock.MagicMock( - spec=tests_utils.get_file_spec()) - - image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - volume = tests_utils.create_volume(self.context, **self.volume_params) - # creating volume testdata - try: - request_spec = { - 'volume_properties': self.volume_params, - 'image_id': image_id, - 'image_size': 1 - } - self.volume.create_volume(self.context, volume, request_spec) - finally: - # cleanup - os.unlink(dst_path) - volume = objects.Volume.get_by_id(self.context, volume.id) - - return volume diff --git a/cinder/tests/unit/volume/drivers/__init__.py b/cinder/tests/unit/volume/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/sc/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/sc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py b/cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py deleted file mode 100644 index fec94a365..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py +++ /dev/null @@ -1,888 +0,0 @@ -# Copyright (c) 2014 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume.drivers.dell_emc.sc import storagecenter_api -from cinder.volume.drivers.dell_emc.sc import storagecenter_fc - - -# We patch these here as they are used by every test to keep -# from trying to contact a Dell Storage Center. -@mock.patch.object(storagecenter_api.HttpClient, - '__init__', - return_value=None) -@mock.patch.object(storagecenter_api.SCApi, - 'open_connection') -@mock.patch.object(storagecenter_api.SCApi, - 'close_connection') -class DellSCSanFCDriverTestCase(test.TestCase): - - VOLUME = {u'instanceId': u'64702.4829', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 4831, - u'objectType': u'ScVolume', - u'index': 4829, - u'volumeFolderPath': u'dopnstktst/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', - u'statusMessage': u'', - u'status': u'Down', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'opnstktst', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe0000000000000012df', - u'active': False, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-000012df', - u'replayAllowed': False, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False} - - SCSERVER = {u'scName': u'Storage Center 64702', - u'volumeCount': 0, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 4, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'Server_21000024ff30441d', - u'instanceId': u'64702.47', - u'serverFolderPath': u'opnstktst/', - u'portType': [u'FibreChannel'], - u'type': u'Physical', - u'statusMessage': u'Only 5 of 6 expected paths are up', - u'status': u'Degraded', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.4', - u'instanceName': u'opnstktst', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Partial', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 5, - u'name': u'Server_21000024ff30441d', - u'hbaPresent': True, - u'hbaCount': 2, - u'notes': u'Created by Dell EMC Cinder Driver', - u'mapped': False, - u'operatingSystem': {u'instanceId': u'64702.38', - u'instanceName': u'Red Hat Linux 6.x', - u'objectType': u'ScServerOperatingSystem'} - } - - MAPPING = {u'instanceId': u'64702.2183', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'lunUsed': [1], - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.4829', - u'instanceName': - u'5729f1db-4c45-416c-bc15-c8ea13a4465d', - u'objectType': u'ScVolume'}, - u'connectivity': u'Up', - u'readOnly': False, - u'objectType': u'ScMappingProfile', - u'hostCache': False, - u'mappedVia': u'Server', - u'mapCount': 2, - u'instanceName': u'4829-47', - u'lunRequested': u'N/A' - } - - def setUp(self): - super(DellSCSanFCDriverTestCase, self).setUp() - - # configuration is a mock. A mock is pretty much a blank - # slate. I believe mock's done in setup are not happy time - # mocks. So we just do a few things like driver config here. - self.configuration = mock.Mock() - - self.configuration.san_is_local = False - self.configuration.san_ip = "192.168.0.1" - self.configuration.san_login = "admin" - self.configuration.san_password = "pwd" - self.configuration.dell_sc_ssn = 64702 - self.configuration.dell_sc_server_folder = 'opnstktst' - self.configuration.dell_sc_volume_folder = 'opnstktst' - self.configuration.dell_sc_api_port = 3033 - self._context = context.get_admin_context() - - self.driver = storagecenter_fc.SCFCDriver( - configuration=self.configuration) - - self.driver.do_setup(None) - - self.driver._stats = {'QoS_support': False, - 'volume_backend_name': 'dell-1', - 'free_capacity_gb': 12123, - 'driver_version': '1.0.1', - 'total_capacity_gb': 12388, - 'reserved_percentage': 0, - 'vendor_name': 'Dell', - 'storage_protocol': 'FC'} - - # Start with none. Add in the specific tests later. - # Mock tests bozo this. - self.driver.backends = None - self.driver.replication_enabled = False - - self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d' - self.volume_name = "volume" + self.volid - self.connector = {'ip': '192.168.0.77', - 'host': 'cinderfc-vm', - 'wwnns': ['20000024ff30441c', '20000024ff30441d'], - 'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1', - 'wwpns': ['21000024ff30441c', '21000024ff30441d']} - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - def test_initialize_connection(self, - mock_find_wwns, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - res = self.driver.initialize_connection(volume, connector) - expected = {'data': - {'discard': True, - 'initiator_target_map': - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}, - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': - [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, - 'driver_volume_type': 'fibre_channel'} - - self.assertEqual(expected, res, 'Unexpected return data') - # verify find_volume has been called and that is has been called twice - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) - mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_fc.SCFCDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns') - @mock.patch.object(storagecenter_fc.SCFCDriver, - 'initialize_secondary') - @mock.patch.object(storagecenter_api.SCApi, - 'get_live_volume') - def test_initialize_connection_live_vol(self, - mock_get_live_volume, - mock_initialize_secondary, - mock_find_wwns, - mock_is_live_volume, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Secondary'} - mock_is_live_volume.return_value = True - mock_find_wwns.return_value = ( - 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}) - mock_initialize_secondary.return_value = ( - 1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'], - {u'21000024FF30441E': [u'5000D31000FCBE36'], - u'21000024FF30441F': [u'5000D31000FCBE3E']}) - mock_get_live_volume.return_value = sclivevol - res = self.driver.initialize_connection(volume, connector) - expected = {'data': - {'discard': True, - 'initiator_target_map': - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D'], - u'21000024FF30441E': [u'5000D31000FCBE36'], - u'21000024FF30441F': [u'5000D31000FCBE3E']}, - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35', - u'5000D31000FCBE3E', u'5000D31000FCBE36']}, - 'driver_volume_type': 'fibre_channel'} - - self.assertEqual(expected, res, 'Unexpected return data') - # verify find_volume has been called and that is has been called twice - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True) - mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_fc.SCFCDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns') - @mock.patch.object(storagecenter_fc.SCFCDriver, - 'initialize_secondary') - @mock.patch.object(storagecenter_api.SCApi, - 'get_live_volume') - def test_initialize_connection_live_vol_afo(self, - mock_get_live_volume, - mock_initialize_secondary, - mock_find_wwns, - mock_is_live_volume, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} - scvol = {'instanceId': '102.101'} - mock_find_volume.return_value = scvol - mock_get_volume.return_value = scvol - connector = self.connector - sclivevol = {'instanceId': '101.10001', - 'primaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'primaryScSerialNumber': 102, - 'secondaryVolume': {'instanceId': '101.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 101, - 'secondaryRole': 'Activated'} - - mock_is_live_volume.return_value = True - mock_find_wwns.return_value = ( - 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}) - mock_get_live_volume.return_value = sclivevol - res = self.driver.initialize_connection(volume, connector) - expected = {'data': - {'discard': True, - 'initiator_target_map': - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}, - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, - 'driver_volume_type': 'fibre_channel'} - - self.assertEqual(expected, res, 'Unexpected return data') - # verify find_volume has been called and that is has been called twice - self.assertFalse(mock_initialize_secondary.called) - mock_find_volume.assert_called_once_with( - fake.VOLUME_ID, '101.101', True) - mock_get_volume.assert_called_once_with('102.101') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(None, [], {})) - def test_initialize_connection_no_wwns(self, - mock_find_wwns, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(None, [], {})) - def test_initialize_connection_no_server(self, - mock_find_wwns, - mock_map_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPING) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(None, [], {})) - def test_initialize_connection_vol_not_found(self, - mock_find_wwns, - mock_map_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(None, [], {})) - def test_initialize_connection_map_vol_fail(self, - mock_find_wwns, - mock_map_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where map_volume returns None (no mappings) - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - def test_initialize_secondary(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}) - mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - ret = self.driver.initialize_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - - self.assertEqual(find_wwns_ret, ret) - - def test_initialize_secondary_create_server(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=None) - mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}) - mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - ret = self.driver.initialize_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - self.assertEqual(find_wwns_ret, ret) - - def test_initialize_secondary_no_server(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=None) - mock_api.create_server = mock.MagicMock(return_value=None) - ret = self.driver.initialize_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - expected = (None, [], {}) - self.assertEqual(expected, ret) - - def test_initialize_secondary_map_fail(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock(return_value=None) - ret = self.driver.initialize_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - expected = (None, [], {}) - self.assertEqual(expected, ret) - - def test_initialize_secondary_vol_not_found(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - mock_api.get_volume = mock.MagicMock(return_value=None) - ret = self.driver.initialize_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - expected = (None, [], {}) - self.assertEqual(expected, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - def test_terminate_connection(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - res = self.driver.terminate_connection(volume, connector) - mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) - expected = {'driver_volume_type': 'fibre_channel', - 'data': {}} - self.assertEqual(expected, res, 'Unexpected return data') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - @mock.patch.object(storagecenter_fc.SCFCDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_fc.SCFCDriver, - 'terminate_secondary') - def test_terminate_connection_live_vol(self, - mock_terminate_secondary, - mock_is_live_vol, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - mock_terminate_secondary.return_value = (None, [], {}) - mock_is_live_vol.return_value = True - res = self.driver.terminate_connection(volume, connector) - mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) - expected = {'driver_volume_type': 'fibre_channel', - 'data': {}} - self.assertEqual(expected, res, 'Unexpected return data') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - def test_terminate_connection_no_server(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - def test_terminate_connection_no_volume(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(None, - [], - {})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - def test_terminate_connection_no_wwns(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - res = self.driver.terminate_connection(volume, connector) - expected = {'driver_volume_type': 'fibre_channel', - 'data': {}} - self.assertEqual(expected, res, 'Unexpected return data') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=1) - def test_terminate_connection_failure(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_wwns', - return_value=(1, - [u'5000D31000FCBE3D', - u'5000D31000FCBE35'], - {u'21000024FF30441C': - [u'5000D31000FCBE35'], - u'21000024FF30441D': - [u'5000D31000FCBE3D']})) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume_count', - return_value=0) - def test_terminate_connection_vol_count_zero(self, - mock_get_volume_count, - mock_find_wwns, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where get_volume_count is zero - volume = {'id': fake.VOLUME_ID} - connector = self.connector - res = self.driver.terminate_connection(volume, connector) - mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) - expected = {'data': - {'initiator_target_map': - {u'21000024FF30441C': [u'5000D31000FCBE35'], - u'21000024FF30441D': [u'5000D31000FCBE3D']}, - 'target_wwn': - [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, - 'driver_volume_type': 'fibre_channel'} - self.assertEqual(expected, res, 'Unexpected return data') - - def test_terminate_secondary(self, - mock_close_connection, - mock_open_connection, - mock_init): - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {})) - mock_api.unmap_volume = mock.MagicMock(return_value=True) - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - ret = self.driver.terminate_secondary(mock_api, sclivevol, - ['wwn1', 'wwn2']) - expected = (None, [], {}) - self.assertEqual(expected, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_storage_usage', - return_value={'availableSpace': 100, 'freeSpace': 50}) - def test_update_volume_stats_with_refresh(self, - mock_get_storage_usage, - mock_close_connection, - mock_open_connection, - mock_init): - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - mock_get_storage_usage.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - 'get_storage_usage', - return_value={'availableSpace': 100, 'freeSpace': 50}) - def test_get_volume_stats_no_refresh(self, - mock_get_storage_usage, - mock_close_connection, - mock_open_connection, - mock_init): - stats = self.driver.get_volume_stats(False) - self.assertEqual('FC', stats['storage_protocol']) - mock_get_storage_usage.assert_not_called() diff --git a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_sc.py b/cinder/tests/unit/volume/drivers/dell_emc/sc/test_sc.py deleted file mode 100644 index 28f44230d..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_sc.py +++ /dev/null @@ -1,4326 +0,0 @@ -# Copyright (c) 2014 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import mock -import uuid - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume.drivers.dell_emc.sc import storagecenter_api -from cinder.volume.drivers.dell_emc.sc import storagecenter_iscsi -from cinder.volume import volume_types - -# We patch these here as they are used by every test to keep -# from trying to contact a Dell Storage Center. -MOCKAPI = mock.MagicMock() - - -@mock.patch.object(storagecenter_api.HttpClient, - '__init__', - return_value=None) -@mock.patch.object(storagecenter_api.SCApi, - 'open_connection', - return_value=MOCKAPI) -@mock.patch.object(storagecenter_api.SCApi, - 'close_connection') -class DellSCSanISCSIDriverTestCase(test.TestCase): - - VOLUME = {u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3494, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da8', - u'active': True, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False} - - SCSERVER = {u'scName': u'Storage Center 64702', - u'volumeCount': 0, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 4, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'Server_21000024ff30441d', - u'instanceId': u'64702.47', - u'serverFolderPath': u'devstacksrv/', - u'portType': [u'FibreChannel'], - u'type': u'Physical', - u'statusMessage': u'Only 5 of 6 expected paths are up', - u'status': u'Degraded', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.4', - u'instanceName': u'devstacksrv', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Partial', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 5, - u'name': u'Server_21000024ff30441d', - u'hbaPresent': True, - u'hbaCount': 2, - u'notes': u'Created by Dell EMC Cinder Driver', - u'mapped': False, - u'operatingSystem': {u'instanceId': u'64702.38', - u'instanceName': u'Red Hat Linux 6.x', - u'objectType': u'ScServerOperatingSystem'} - } - - MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', - u'instanceName': u'92-30', - u'objectType': u'ScMappingProfile'}, - u'status': u'Down', - u'statusMessage': u'', - u'instanceId': u'64702.969.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.30', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.92', - u'instanceName': - u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'lunUsed': [1], - u'serverHba': {u'instanceId': u'64702.3454975614', - u'instanceName': - u'iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64702.31.8', - u'instanceName': - u'iqn.1993-08.org.debian:' - '01:3776df826e4f-5000D31000FCBE43', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736131.91', - u'instanceName': u'5000D31000FCBE43', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-969', - u'transport': u'Iscsi', - u'objectType': u'ScMapping'}] - - RPLAY = {u'scSerialNumber': 64702, - u'globalIndex': u'64702-46-250', - u'description': u'Cinder Clone Replay', - u'parent': {u'instanceId': u'64702.46.249', - u'instanceName': u'64702-46-249', - u'objectType': u'ScReplay'}, - u'instanceId': u'64702.46.250', - u'scName': u'Storage Center 64702', - u'consistent': False, - u'expires': True, - u'freezeTime': u'12/09/2014 03:52:08 PM', - u'createVolume': {u'instanceId': u'64702.46', - u'instanceName': - u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', - u'objectType': u'ScVolume'}, - u'expireTime': u'12/09/2014 04:52:08 PM', - u'source': u'Manual', - u'spaceRecovery': False, - u'writesHeldDuration': 7910, - u'active': False, - u'markedForExpiration': False, - u'objectType': u'ScReplay', - u'instanceName': u'12/09/2014 03:52:08 PM', - u'size': u'0.0 Bytes' - } - - SCRPLAYPROFILE = {u'ruleCount': 0, - u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', - u'volumeCount': 0, - u'scName': u'Storage Center 64702', - u'notes': u'Created by Dell EMC Cinder Driver', - u'scSerialNumber': 64702, - u'userCreated': True, - u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', - u'instanceId': u'64702.11', - u'enforceReplayCreationTimeout': False, - u'replayCreationTimeout': 20, - u'objectType': u'ScReplayProfile', - u'type': u'Consistent', - u'expireIncompleteReplaySets': True} - - IQN = 'iqn.2002-03.com.compellent:5000D31000000001' - - ISCSI_PROPERTIES = {'access_mode': 'rw', - 'discard': True, - 'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'iqn.2002-03.com.compellent:5000d31000fcbe44'], - 'target_lun': 1, - 'target_luns': [1, 1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260', - u'192.168.0.22:3260']} - - def setUp(self): - super(DellSCSanISCSIDriverTestCase, self).setUp() - - # configuration is a mock. A mock is pretty much a blank - # slate. I believe mock's done in setup are not happy time - # mocks. So we just do a few things like driver config here. - self.configuration = mock.Mock() - - self.configuration.san_is_local = False - self.configuration.san_ip = "192.168.0.1" - self.configuration.san_login = "admin" - self.configuration.san_password = "mmm" - self.configuration.dell_sc_ssn = 12345 - self.configuration.dell_sc_server_folder = 'opnstktst' - self.configuration.dell_sc_volume_folder = 'opnstktst' - self.configuration.dell_sc_api_port = 3033 - self.configuration.iscsi_ip_address = '192.168.1.1' - self.configuration.iscsi_port = 3260 - self._context = context.get_admin_context() - - self.driver = storagecenter_iscsi.SCISCSIDriver( - configuration=self.configuration) - - self.driver.do_setup(None) - - self.driver._stats = {'QoS_support': False, - 'volume_backend_name': 'dell-1', - 'free_capacity_gb': 12123, - 'driver_version': '1.0.1', - 'total_capacity_gb': 12388, - 'reserved_percentage': 0, - 'vendor_name': 'Dell EMC', - 'storage_protocol': 'iSCSI'} - - # Start with none. Add in the specific tests later. - # Mock tests bozo this. - self.driver.backends = None - self.driver.replication_enabled = False - - self.mock_sleep = self.mock_object(eventlet, 'sleep') - - self.volid = str(uuid.uuid4()) - self.volume_name = "volume" + self.volid - self.connector = { - 'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', - 'host': 'fakehost'} - self.connector_multipath = { - 'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', - 'host': 'fakehost', - 'multipath': True} - self.access_record_output = [ - "ID Initiator Ipaddress AuthMethod UserName Apply-To", - "--- --------------- ------------- ---------- ---------- --------", - "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", - " 7dab76162"] - - self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001' - self.properties = { - 'target_discovered': True, - 'target_portal': '%s:3260' - % self.driver.configuration.dell_sc_iscsi_ip, - 'target_iqn': self.fake_iqn, - 'volume_id': 1} - self._model_update = { - 'provider_location': "%s:3260,1 %s 0" - % (self.driver.configuration.dell_sc_iscsi_ip, - self.fake_iqn) - } - - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_check_for_setup_error(self, - mock_find_sc, - mock_close_connection, - mock_open_connection, - mock_init): - # Fail, Fail due to repl partner not found, success. - mock_find_sc.side_effect = [exception.VolumeBackendAPIException(''), - 10000, - 12345, - exception.VolumeBackendAPIException(''), - 10000, - 12345, - 67890] - - # Find SC throws - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - # Replication enabled but one backend is down. - self.driver.replication_enabled = True - self.driver.backends = [{'target_device_id': '12345', - 'managed_backend_name': 'host@dell1', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'managed_backend_name': 'host@dell2', - 'qosnode': 'otherqos'}] - self.assertRaises(exception.InvalidHost, - self.driver.check_for_setup_error) - # Good run. Should run without exceptions. - self.driver.check_for_setup_error() - # failed over run - mock_find_sc.side_effect = None - mock_find_sc.reset_mock() - mock_find_sc.return_value = 10000 - self.driver.failed_over = True - self.driver.check_for_setup_error() - # find sc should be called exactly once - mock_find_sc.assert_called_once_with() - # No repl run - mock_find_sc.reset_mock() - mock_find_sc.return_value = 10000 - self.driver.failed_over = False - self.driver.replication_enabled = False - self.driver.backends = None - self.driver.check_for_setup_error() - mock_find_sc.assert_called_once_with() - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test__create_replications(self, - mock_get_volume_extra_specs, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - mock_get_volume_extra_specs.return_value = { - 'replication_enabled': ' True'} - model_update = {'replication_status': 'enabled', - 'replication_driver_data': '12345,67890'} - - vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} - scvol = {'name': fake.VOLUME_ID} - self.driver.backends = [{'target_device_id': '12345', - 'managed_backend_name': 'host@dell1', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'managed_backend_name': 'host@dell2', - 'qosnode': 'otherqos'}] - mock_api = mock.MagicMock() - mock_api.create_replication = mock.MagicMock( - return_value={'instanceId': '1'}) - # Create regular replication test. - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_replication.assert_any_call( - scvol, '12345', 'cinderqos', False, None, False) - mock_api.create_replication.assert_any_call( - scvol, '67890', 'otherqos', False, None, False) - self.assertEqual(model_update, res) - # Create replication with activereplay set. - mock_get_volume_extra_specs.return_value = { - 'replication:activereplay': ' True', - 'replication_enabled': ' True'} - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_replication.assert_any_call( - scvol, '12345', 'cinderqos', False, None, True) - mock_api.create_replication.assert_any_call( - scvol, '67890', 'otherqos', False, None, True) - self.assertEqual(model_update, res) - # Create replication with sync set. - mock_get_volume_extra_specs.return_value = { - 'replication:activereplay': ' True', - 'replication_enabled': ' True', - 'replication_type': ' sync'} - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_replication.assert_any_call( - scvol, '12345', 'cinderqos', True, None, True) - mock_api.create_replication.assert_any_call( - scvol, '67890', 'otherqos', True, None, True) - self.assertEqual(model_update, res) - # Create replication with disk folder set. - self.driver.backends = [{'target_device_id': '12345', - 'managed_backend_name': 'host@dell1', - 'qosnode': 'cinderqos', - 'diskfolder': 'ssd'}, - {'target_device_id': '67890', - 'managed_backend_name': 'host@dell2', - 'qosnode': 'otherqos', - 'diskfolder': 'ssd'}] - mock_get_volume_extra_specs.return_value = { - 'replication:activereplay': ' True', - 'replication_enabled': ' True', - 'replication_type': ' sync'} - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_replication.assert_any_call( - scvol, '12345', 'cinderqos', True, 'ssd', True) - mock_api.create_replication.assert_any_call( - scvol, '67890', 'otherqos', True, 'ssd', True) - self.assertEqual(model_update, res) - # Failed to create replication test. - mock_api.create_replication.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._create_replications, - mock_api, - vol, - scvol) - # Replication not enabled test - mock_get_volume_extra_specs.return_value = {} - res = self.driver._create_replications(mock_api, vol, scvol) - self.assertEqual({}, res) - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test__create_replications_live_volume(self, - mock_get_volume_extra_specs, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - model_update = {'replication_status': 'enabled', - 'replication_driver_data': '12345'} - - vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} - scvol = {'name': fake.VOLUME_ID} - - mock_api = mock.MagicMock() - mock_api.create_live_volume = mock.MagicMock( - return_value={'instanceId': '1'}) - # Live volume with two backends defined. - self.driver.backends = [{'target_device_id': '12345', - 'managed_backend_name': 'host@dell1', - 'qosnode': 'cinderqos', - 'remoteqos': 'remoteqos'}, - {'target_device_id': '67890', - 'managed_backend_name': 'host@dell2', - 'qosnode': 'otherqos', - 'remoteqos': 'remoteqos'}] - mock_get_volume_extra_specs.return_value = { - 'replication:activereplay': ' True', - 'replication_enabled': ' True', - 'replication:livevolume': ' True'} - self.assertRaises(exception.ReplicationError, - self.driver._create_replications, - mock_api, - vol, - scvol) - # Live volume - self.driver.backends = [{'target_device_id': '12345', - 'managed_backend_name': 'host@dell1', - 'qosnode': 'cinderqos', - 'diskfolder': 'ssd', - 'remoteqos': 'remoteqos'}] - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_live_volume.assert_called_once_with( - scvol, '12345', True, False, False, 'cinderqos', 'remoteqos') - self.assertEqual(model_update, res) - # Active replay False - mock_get_volume_extra_specs.return_value = { - 'replication_enabled': ' True', - 'replication:livevolume': ' True'} - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_live_volume.assert_called_with( - scvol, '12345', False, False, False, 'cinderqos', 'remoteqos') - self.assertEqual(model_update, res) - # Sync - mock_get_volume_extra_specs.return_value = { - 'replication_enabled': ' True', - 'replication:livevolume': ' True', - 'replication_type': ' sync'} - res = self.driver._create_replications(mock_api, vol, scvol) - mock_api.create_live_volume.assert_called_with( - scvol, '12345', False, True, False, 'cinderqos', 'remoteqos') - self.assertEqual(model_update, res) - - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test__delete_replications(self, - mock_get_volume_extra_specs, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - vol = {'id': fake.VOLUME_ID} - scvol = {'instanceId': '1'} - mock_api = mock.MagicMock() - mock_api.delete_replication = mock.MagicMock() - mock_api.find_volume = mock.MagicMock(return_value=scvol) - # Start replication disabled. Should fail immediately. - mock_get_volume_extra_specs.return_value = {} - self.driver._delete_replications(mock_api, vol) - self.assertFalse(mock_api.delete_replication.called) - # Replication enabled. No replications listed. - mock_get_volume_extra_specs.return_value = { - 'replication_enabled': ' True'} - vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} - self.driver._delete_replications(mock_api, vol) - self.assertFalse(mock_api.delete_replication.called) - # Something to call. - vol = {'id': fake.VOLUME_ID, 'replication_driver_data': '12345,67890'} - self.driver._delete_replications(mock_api, vol) - mock_api.delete_replication.assert_any_call(scvol, 12345) - mock_api.delete_replication.assert_any_call(scvol, 67890) - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test__delete_live_volume(self, - mock_get_volume_extra_specs, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - vol = {'id': fake.VOLUME_ID, - 'provider_id': '101.101'} - mock_api = mock.MagicMock() - sclivevol = {'instanceId': '101.102', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Secondary'} - mock_api.get_live_volume = mock.MagicMock(return_value=sclivevol) - # No replication driver data. - ret = self.driver._delete_live_volume(mock_api, vol) - self.assertFalse(mock_api.get_live_volume.called) - self.assertFalse(ret) - # Bogus rdd - vol = {'id': fake.VOLUME_ID, - 'provider_id': '101.101', - 'replication_driver_data': ''} - ret = self.driver._delete_live_volume(mock_api, vol) - self.assertFalse(mock_api.get_live_volume.called) - self.assertFalse(ret) - # Valid delete. - mock_api.delete_live_volume = mock.MagicMock(return_value=True) - vol = {'id': fake.VOLUME_ID, - 'provider_id': '101.101', - 'replication_driver_data': '102'} - ret = self.driver._delete_live_volume(mock_api, vol) - mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) - self.assertTrue(ret) - # Wrong ssn. - vol = {'id': fake.VOLUME_ID, - 'provider_id': '101.101', - 'replication_driver_data': '103'} - ret = self.driver._delete_live_volume(mock_api, vol) - mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) - self.assertFalse(ret) - # No live volume found. - mock_api.get_live_volume.return_value = None - ret = self.driver._delete_live_volume(mock_api, vol) - mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) - self.assertFalse(ret) - - self.driver.backends = backends - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - def test_create_volume(self, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, None, None, None, None) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'update_cg_volumes') - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - def test_create_volume_with_group(self, - mock_create_volume, - mock_update_cg_volumes, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, - 'group_id': fake.GROUP_ID} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, None, None, None, None) - self.assertTrue(mock_find_replay_profile.called) - self.assertTrue(mock_update_cg_volumes.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype:volumeqos': 'volumeqos'}) - def test_create_volume_volumeqos_profile(self, - mock_extra, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, None, 'volumeqos', None, None) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype:groupqos': 'groupqos'}) - def test_create_volume_groupqos_profile(self, - mock_extra, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, None, None, 'groupqos', None) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype:datareductionprofile': 'drprofile'}) - def test_create_volume_data_reduction_profile(self, - mock_extra, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, None, None, None, 'drprofile') - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype:storageprofile': 'HighPriority'}) - def test_create_volume_storage_profile(self, - mock_extra, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, "HighPriority", None, None, None, None) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype:replayprofiles': 'Daily'}) - def test_create_volume_replay_profiles(self, - mock_extra, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} - self.driver.create_volume(volume) - mock_create_volume.assert_called_once_with( - fake.VOLUME_ID, 1, None, 'Daily', None, None, None) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={'replication_status': 'enabled', - 'replication_driver_data': 'ssn'}) - def test_create_volume_replication(self, - mock_create_replications, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - ret = self.driver.create_volume(volume) - self.assertEqual({'replication_status': 'enabled', - 'replication_driver_data': 'ssn', - 'provider_id': self.VOLUME[u'instanceId']}, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - def test_create_volume_replication_raises(self, - mock_create_replications, - mock_delete_volume, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - mock_create_replications.side_effect = ( - exception.VolumeBackendAPIException(data='abc')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - volume) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_volume_failure(self, - mock_delete_volume, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, volume) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_delete_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume', - return_value=True) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, - 'live': False}) - def test_delete_volume(self, - mock_get_replication_specs, - mock_delete_volume, - mock_delete_replications, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - self.driver.delete_volume(volume) - mock_delete_volume.assert_called_once_with(fake.VOLUME_ID, None) - self.assertTrue(mock_delete_replications.called) - self.assertEqual(1, mock_delete_replications.call_count) - volume = {'id': fake.VOLUME_ID, 'provider_id': '1.1'} - self.driver.delete_volume(volume) - mock_delete_volume.assert_called_with(fake.VOLUME_ID, '1.1') - self.assertTrue(mock_delete_replications.called) - self.assertEqual(2, mock_delete_replications.call_count) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_delete_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume', - return_value=True) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, - 'live': False}) - def test_delete_volume_migrating(self, - mock_get_replication_specs, - mock_delete_volume, - mock_delete_replications, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, '_name_id': fake.VOLUME2_ID, - 'provider_id': '12345.100', 'migration_status': 'deleting'} - self.driver.delete_volume(volume) - mock_delete_volume.assert_called_once_with(fake.VOLUME2_ID, None) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_delete_live_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume', - return_value=True) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, - 'live': True}) - def test_delete_volume_live_volume(self, - mock_get_replication_specs, - mock_delete_volume, - mock_delete_live_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '1.1'} - self.driver.delete_volume(volume) - mock_delete_volume.assert_called_with(fake.VOLUME_ID, '1.1') - self.assertTrue(mock_delete_live_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_delete_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume', - return_value=False) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, - 'live': False}) - def test_delete_volume_failure(self, - mock_get_replication_specs, - mock_delete_volume, - mock_delete_replications, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - self.assertRaises(exception.VolumeIsBusy, - self.driver.delete_volume, - volume) - self.assertTrue(mock_delete_replications.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS[0]) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=ISCSI_PROPERTIES) - def test_initialize_connection(self, - mock_find_iscsi_props, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = self.VOLUME[u'instanceId'] - volume = {'id': fake.VOLUME_ID, - 'provider_id': provider_id} - connector = self.connector - data = self.driver.initialize_connection(volume, connector) - self.assertEqual('iscsi', data['driver_volume_type']) - # verify find_volume has been called and that is has been called twice - mock_find_volume.assert_called_once_with( - fake.VOLUME_ID, provider_id, False) - mock_get_volume.assert_called_once_with(provider_id) - expected = {'data': self.ISCSI_PROPERTIES, - 'driver_volume_type': 'iscsi'} - self.assertEqual(expected, data, 'Unexpected return value') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS[0]) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=ISCSI_PROPERTIES) - def test_initialize_connection_multi_path(self, - mock_find_iscsi_props, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where connection is multipath - provider_id = self.VOLUME[u'instanceId'] - volume = {'id': fake.VOLUME_ID, - 'provider_id': provider_id} - connector = self.connector_multipath - - data = self.driver.initialize_connection(volume, connector) - self.assertEqual('iscsi', data['driver_volume_type']) - # verify find_volume has been called and that is has been called twice - mock_find_volume.called_once_with(fake.VOLUME_ID, provider_id) - mock_get_volume.called_once_with(provider_id) - props = self.ISCSI_PROPERTIES.copy() - expected = {'data': props, - 'driver_volume_type': 'iscsi'} - self.assertEqual(expected, data, 'Unexpected return value') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=None) - def test_initialize_connection_no_iqn(self, - mock_find_iscsi_properties, - mock_map_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = {} - mock_find_iscsi_properties.side_effect = Exception('abc') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=None) - def test_initialize_connection_no_server(self, - mock_find_iscsi_properties, - mock_map_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=None) - def test_initialize_connection_vol_not_found(self, - mock_find_iscsi_properties, - mock_map_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'name': fake.VOLUME_ID} - connector = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=ISCSI_PROPERTIES) - def test_initialize_connection_map_vol_fail(self, - mock_find_iscsi_props, - mock_map_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where map_volume returns None (no mappings) - volume = {'id': fake.VOLUME_ID} - connector = self.connector - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS[0]) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties', - return_value=ISCSI_PROPERTIES) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'initialize_secondary') - def test_initialize_connection_live_volume(self, - mock_initialize_secondary, - mock_is_live_vol, - mock_find_iscsi_props, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - mock_is_live_vol.return_value = True - lvol_properties = {'access_mode': 'rw', - 'target_discovered': False, - 'target_iqn': - u'iqn:1', - 'target_iqns': - [ - u'iqn:1', - u'iqn:2'], - 'target_lun': 1, - 'target_luns': [1, 1], - 'target_portal': u'192.168.1.21:3260', - 'target_portals': [u'192.168.1.21:3260', - u'192.168.1.22:3260']} - mock_initialize_secondary.return_value = lvol_properties - props = self.ISCSI_PROPERTIES.copy() - props['target_iqns'] += lvol_properties['target_iqns'] - props['target_luns'] += lvol_properties['target_luns'] - props['target_portals'] += lvol_properties['target_portals'] - ret = self.driver.initialize_connection(volume, connector) - expected = {'data': props, - 'driver_volume_type': 'iscsi'} - self.assertEqual(expected, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS[0]) - @mock.patch.object(storagecenter_api.SCApi, - 'find_iscsi_properties') - @mock.patch.object(storagecenter_api.SCApi, - 'get_live_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'initialize_secondary') - def test_initialize_connection_live_volume_afo(self, - mock_initialize_secondary, - mock_is_live_vol, - mock_get_live_vol, - mock_find_iscsi_props, - mock_map_volume, - mock_get_volume, - mock_find_volume, - mock_create_server, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} - scvol = {'instanceId': '102.101'} - mock_find_volume.return_value = scvol - mock_get_volume.return_value = scvol - connector = self.connector - sclivevol = {'instanceId': '101.10001', - 'primaryVolume': {'instanceId': '101.101', - 'instanceName': fake.VOLUME_ID}, - 'primaryScSerialNumber': 101, - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Activated'} - mock_is_live_vol.return_value = True - mock_get_live_vol.return_value = sclivevol - props = { - 'access_mode': 'rw', - 'target_discovered': False, - 'target_iqn': u'iqn:1', - 'target_iqns': [u'iqn:1', - u'iqn:2'], - 'target_lun': 1, - 'target_luns': [1, 1], - 'target_portal': u'192.168.1.21:3260', - 'target_portals': [u'192.168.1.21:3260', - u'192.168.1.22:3260'] - } - mock_find_iscsi_props.return_value = props - ret = self.driver.initialize_connection(volume, connector) - expected = {'data': props, - 'driver_volume_type': 'iscsi'} - expected['data']['discard'] = True - self.assertEqual(expected, ret) - self.assertFalse(mock_initialize_secondary.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, 'live': True}) - def test_is_live_vol(self, - mock_get_replication_specs, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, - 'provider_id': '101.1'} - ret = self.driver._is_live_vol(volume) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': True, 'live': False}) - def test_is_live_vol_repl_not_live(self, - mock_get_replication_specs, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - ret = self.driver._is_live_vol(volume) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs', - return_value={'enabled': False, 'live': False}) - def test_is_live_vol_no_repl(self, - mock_get_replication_specs, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - ret = self.driver._is_live_vol(volume) - self.assertFalse(ret) - - def test_initialize_secondary(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - mock_api.find_iscsi_properties = mock.MagicMock( - return_value=self.ISCSI_PROPERTIES) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') - self.assertEqual(self.ISCSI_PROPERTIES, ret) - - def test_initialize_secondary_create_server(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=None) - mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - mock_api.find_iscsi_properties = mock.MagicMock( - return_value=self.ISCSI_PROPERTIES) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') - self.assertEqual(self.ISCSI_PROPERTIES, ret) - - def test_initialize_secondary_no_server(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=None) - mock_api.create_server = mock.MagicMock(return_value=None) - expected = {'target_discovered': False, - 'target_iqn': None, - 'target_iqns': [], - 'target_portal': None, - 'target_portals': [], - 'target_lun': None, - 'target_luns': [], - } - ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') - self.assertEqual(expected, ret) - - def test_initialize_secondary_map_fail(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock(return_value=None) - expected = {'target_discovered': False, - 'target_iqn': None, - 'target_iqns': [], - 'target_portal': None, - 'target_portals': [], - 'target_lun': None, - 'target_luns': [], - } - ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') - self.assertEqual(expected, ret) - - def test_initialize_secondary_vol_not_found(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.map_secondary_volume = mock.MagicMock( - return_value=self.VOLUME) - mock_api.get_volume = mock.MagicMock(return_value=None) - expected = {'target_discovered': False, - 'target_iqn': None, - 'target_iqns': [], - 'target_portal': None, - 'target_portals': [], - 'target_lun': None, - 'target_luns': [], - } - ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') - self.assertEqual(expected, ret) - - def test_terminate_secondary(self, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102} - mock_api = mock.MagicMock() - mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) - mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) - mock_api.unmap_volume = mock.MagicMock() - self.driver.terminate_secondary(mock_api, sclivevol, 'iqn') - mock_api.find_server.assert_called_once_with('iqn', 102) - mock_api.get_volume.assert_called_once_with('102.101') - mock_api.unmap_volume.assert_called_once_with(self.VOLUME, - self.SCSERVER) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - def test_terminate_connection(self, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = self.connector - res = self.driver.terminate_connection(volume, connector) - mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_is_live_vol') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'terminate_secondary') - @mock.patch.object(storagecenter_api.SCApi, - 'get_live_volume') - def test_terminate_connection_live_volume(self, - mock_get_live_vol, - mock_terminate_secondary, - mock_is_live_vol, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Secondary'} - mock_is_live_vol.return_value = True - mock_get_live_vol.return_value = sclivevol - connector = self.connector - res = self.driver.terminate_connection(volume, connector) - mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) - self.assertIsNone(res, 'None expected') - self.assertTrue(mock_terminate_secondary.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_all', - return_value=True) - def test_terminate_connection_no_server(self, - mock_unmap_all, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} - connector = {'initiator': ''} - res = self.driver.terminate_connection(volume, connector) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '101.101', - False) - mock_unmap_all.assert_called_once_with(self.VOLUME) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - def test_terminate_connection_no_volume(self, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = {'initiator': 'fake'} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - volume, - connector) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_server', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=False) - def test_terminate_connection_failure(self, - mock_unmap_volume, - mock_find_volume, - mock_find_server, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - connector = {'initiator': 'fake'} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - volume, - connector) - - def _simple_volume(self, **kwargs): - updates = {'display_name': fake.VOLUME_NAME, - 'id': fake.VOLUME_ID, - 'provider_id': self.VOLUME[u'instanceId']} - updates.update(kwargs) - - return fake_volume.fake_volume_obj(self._context, **updates) - - def _simple_snapshot(self, **kwargs): - updates = {'id': fake.SNAPSHOT_ID, - 'display_name': fake.SNAPSHOT_NAME, - 'status': 'available', - 'provider_location': None, - 'volume_size': 1} - - updates.update(kwargs) - snapshot = fake_snapshot.fake_snapshot_obj(self._context, **updates) - volume = self._simple_volume() - snapshot.volume = volume - - return snapshot - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay', - return_value='fake') - def test_create_snapshot(self, - mock_create_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = self.VOLUME[u'instanceId'] - snapshot = self._simple_snapshot() - expected = {'status': 'available', - 'provider_id': provider_id} - ret = self.driver.create_snapshot(snapshot) - self.assertEqual(expected, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay', - return_value=None) - def test_create_snapshot_no_volume(self, - mock_create_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = self._simple_snapshot() - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - snapshot) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay', - return_value=None) - def test_create_snapshot_failure(self, - mock_create_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = self._simple_snapshot() - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - snapshot) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=VOLUME) - def test_create_volume_from_snapshot(self, - mock_create_view_volume, - mock_find_replay, - mock_find_volume, - mock_find_replay_profile, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - model_update = {'something': 'something'} - mock_create_replications.return_value = model_update - volume = {'id': fake.VOLUME_ID, 'size': 1} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume_size': 1} - res = self.driver.create_volume_from_snapshot(volume, snapshot) - mock_create_view_volume.assert_called_once_with( - fake.VOLUME_ID, 'fake', None, None, None, None) - self.assertTrue(mock_find_replay.called) - self.assertTrue(mock_find_volume.called) - self.assertFalse(mock_find_replay_profile.called) - # This just makes sure that we created - self.assertTrue(mock_create_replications.called) - self.assertEqual(model_update, res) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test_create_volume_from_snapshot_with_profiles( - self, mock_get_volume_extra_specs, mock_create_view_volume, - mock_find_replay, mock_find_volume, mock_find_replay_profile, - mock_create_replications, mock_close_connection, - mock_open_connection, mock_init): - mock_get_volume_extra_specs.return_value = { - 'storagetype:replayprofiles': 'replayprofiles', - 'storagetype:volumeqos': 'volumeqos', - 'storagetype:groupqos': 'groupqos', - 'storagetype:datareductionprofile': 'drprofile'} - - mock_create_view_volume.return_value = self.VOLUME - mock_find_replay.return_value = 'fake' - mock_find_volume.return_value = self.VOLUME - model_update = {'something': 'something'} - mock_create_replications.return_value = model_update - volume = {'id': fake.VOLUME_ID, 'size': 1} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume_size': 1} - res = self.driver.create_volume_from_snapshot(volume, snapshot) - mock_create_view_volume.assert_called_once_with( - fake.VOLUME_ID, 'fake', 'replayprofiles', 'volumeqos', 'groupqos', - 'drprofile') - self.assertTrue(mock_find_replay.called) - self.assertTrue(mock_find_volume.called) - self.assertFalse(mock_find_replay_profile.called) - # This just makes sure that we created - self.assertTrue(mock_create_replications.called) - self.assertEqual(model_update, res) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume', - return_value=VOLUME) - def test_create_volume_from_snapshot_expand(self, - mock_expand_volume, - mock_create_view_volume, - mock_find_replay, - mock_find_volume, - mock_find_replay_profile, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - model_update = {'something': 'something'} - mock_create_replications.return_value = model_update - volume = {'id': fake.VOLUME_ID, 'size': 2} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume_size': 1} - res = self.driver.create_volume_from_snapshot(volume, snapshot) - mock_create_view_volume.assert_called_once_with( - fake.VOLUME_ID, 'fake', None, None, None, None) - self.assertTrue(mock_find_replay.called) - self.assertTrue(mock_find_volume.called) - self.assertFalse(mock_find_replay_profile.called) - # This just makes sure that we created - self.assertTrue(mock_create_replications.called) - mock_expand_volume.assert_called_once_with(self.VOLUME, 2) - self.assertEqual(model_update, res) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'update_cg_volumes') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=VOLUME) - def test_create_volume_from_snapshot_cg(self, - mock_create_view_volume, - mock_find_replay, - mock_find_volume, - mock_update_cg_volumes, - mock_find_replay_profile, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - model_update = {'something': 'something'} - mock_create_replications.return_value = model_update - volume = {'id': fake.VOLUME_ID, - 'group_id': fake.GROUP_ID, 'size': 1} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume_size': 1} - res = self.driver.create_volume_from_snapshot(volume, snapshot) - mock_create_view_volume.assert_called_once_with( - fake.VOLUME_ID, 'fake', None, None, None, None) - self.assertTrue(mock_find_replay.called) - self.assertTrue(mock_find_volume.called) - self.assertTrue(mock_find_replay_profile.called) - self.assertTrue(mock_update_cg_volumes.called) - # This just makes sure that we created - self.assertTrue(mock_create_replications.called) - self.assertEqual(model_update, res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_volume_from_snapshot_failed(self, - mock_delete_volume, - mock_create_view_volume, - mock_find_replay_profile, - mock_find_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - volume, snapshot) - self.assertTrue(mock_find_replay.called) - self.assertTrue(mock_find_volume.called) - self.assertFalse(mock_find_replay_profile.called) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_volume_from_snapshot_failed_replication( - self, - mock_delete_volume, - mock_create_view_volume, - mock_find_replay, - mock_find_volume, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - mock_create_replications.side_effect = ( - exception.VolumeBackendAPIException(data='abc')) - volume = {'id': fake.VOLUME_ID, 'size': 1} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume_size': 1} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - volume, snapshot) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_view_volume', - return_value=VOLUME) - def test_create_volume_from_snapshot_no_replay(self, - mock_create_view_volume, - mock_find_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME2_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - volume, snapshot) - self.assertTrue(mock_find_volume.called) - self.assertTrue(mock_find_replay.called) - self.assertFalse(mock_create_view_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - def test_create_cloned_volume(self, - mock_create_cloned_volume, - mock_find_volume, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = self.VOLUME[u'instanceId'] - volume = {'id': fake.VOLUME_ID, 'size': 1} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - ret = self.driver.create_cloned_volume(volume, src_vref) - mock_create_cloned_volume.assert_called_once_with( - fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) - self.assertTrue(mock_find_volume.called) - self.assertEqual({'provider_id': provider_id}, ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test_create_cloned_volume_with_profiles( - self, mock_get_volume_extra_specs, mock_create_cloned_volume, - mock_find_volume, mock_create_replications, mock_close_connection, - mock_open_connection, mock_init): - mock_get_volume_extra_specs.return_value = { - 'storagetype:storageprofile': 'storageprofile', - 'storagetype:replayprofiles': 'replayprofiles', - 'storagetype:volumeqos': 'volumeqos', - 'storagetype:groupqos': 'groupqos', - 'storagetype:datareductionprofile': 'drprofile'} - mock_find_volume.return_value = self.VOLUME - mock_create_cloned_volume.return_value = self.VOLUME - mock_create_replications.return_value = {} - provider_id = self.VOLUME[u'instanceId'] - volume = {'id': fake.VOLUME_ID, 'size': 1} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - ret = self.driver.create_cloned_volume(volume, src_vref) - mock_create_cloned_volume.assert_called_once_with( - fake.VOLUME_ID, self.VOLUME, 'storageprofile', 'replayprofiles', - 'volumeqos', 'groupqos', 'drprofile') - self.assertTrue(mock_find_volume.called) - self.assertEqual({'provider_id': provider_id}, ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume', - return_value=VOLUME) - def test_create_cloned_volume_expand(self, - mock_expand_volume, - mock_create_cloned_volume, - mock_find_volume, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = self.VOLUME[u'instanceId'] - volume = {'id': fake.VOLUME_ID, 'size': 2} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - ret = self.driver.create_cloned_volume(volume, src_vref) - mock_create_cloned_volume.assert_called_once_with( - fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) - self.assertTrue(mock_find_volume.called) - self.assertEqual({'provider_id': provider_id}, ret) - self.assertTrue(mock_expand_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_failed(self, - mock_delete_volume, - mock_create_cloned_volume, - mock_find_volume, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - src_vref = {'id': fake.VOLUME2_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - volume, src_vref) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume') - def test_create_cloned_volume_expand_failed(self, - mock_expand_volume, - mock_delete_volume, - mock_create_cloned_volume, - mock_find_volume, - mock_create_replications, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 2} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - mock_create_replications.side_effect = ( - exception.VolumeBackendAPIException(data='abc')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - volume, src_vref) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - def test_create_cloned_volume_replication_fail(self, - mock_create_cloned_volume, - mock_find_volume, - mock_create_replications, - mock_delete_volume, - mock_close_connection, - mock_open_connection, - mock_init): - mock_create_replications.side_effect = ( - exception.VolumeBackendAPIException(data='abc')) - volume = {'id': fake.VOLUME_ID, 'size': 1} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - volume, src_vref) - self.assertTrue(mock_delete_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value='fake') - @mock.patch.object(storagecenter_api.SCApi, - 'update_cg_volumes') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - def test_create_cloned_volume_consistency_group(self, - mock_create_cloned_volume, - mock_find_volume, - mock_update_cg_volumes, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, - 'group_id': fake.CONSISTENCY_GROUP_ID, - 'size': 1} - src_vref = {'id': fake.VOLUME2_ID, 'size': 1} - self.driver.create_cloned_volume(volume, src_vref) - mock_create_cloned_volume.assert_called_once_with( - fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) - self.assertTrue(mock_find_volume.called) - self.assertTrue(mock_find_replay_profile.called) - self.assertTrue(mock_update_cg_volumes.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'create_cloned_volume', - return_value=VOLUME) - def test_create_cloned_volume_no_volume(self, - mock_create_cloned_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - src_vref = {'id': fake.VOLUME2_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - volume, src_vref) - self.assertTrue(mock_find_volume.called) - self.assertFalse(mock_create_cloned_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replay', - return_value=True) - def test_delete_snapshot(self, - mock_delete_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'volume_id': fake.VOLUME_ID, - 'id': fake.SNAPSHOT_ID} - self.driver.delete_snapshot(snapshot) - mock_delete_replay.assert_called_once_with( - self.VOLUME, fake.SNAPSHOT_ID) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replay', - return_value=True) - def test_delete_snapshot_no_volume(self, - mock_delete_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'volume_id': fake.VOLUME_ID, - 'id': fake.SNAPSHOT_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_snapshot, - snapshot) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_ensure_export(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} - self.driver.ensure_export(context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - def test_ensure_export_failed(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - volume = {'id': fake.VOLUME_ID} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.ensure_export, - context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - def test_ensure_export_no_volume(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.ensure_export, context, volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume', - return_value=VOLUME) - def test_extend_volume(self, - mock_expand_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - new_size = 2 - self.driver.extend_volume(volume, new_size) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) - mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume', - return_value=None) - def test_extend_volume_no_volume(self, - mock_expand_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake', 'size': 1} - new_size = 2 - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - volume, new_size) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'expand_volume', - return_value=None) - def test_extend_volume_fail(self, - mock_expand_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'size': 1} - new_size = 2 - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, volume, new_size) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) - mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_storage_usage', - return_value={'availableSpace': 100, 'freeSpace': 50}) - def test_update_volume_stats_with_refresh(self, - mock_get_storage_usage, - mock_close_connection, - mock_open_connection, - mock_init): - stats = self.driver.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertTrue(mock_get_storage_usage.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_storage_usage', - return_value={'availableSpace': 100, 'freeSpace': 50}) - def test_update_volume_stats_with_refresh_and_repl( - self, - mock_get_storage_usage, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - repliation_enabled = self.driver.replication_enabled - self.driver.backends = [{'a': 'a'}, {'b': 'b'}, {'c': 'c'}] - self.driver.replication_enabled = True - stats = self.driver.get_volume_stats(True) - self.assertEqual(3, stats['replication_count']) - self.assertEqual(['async', 'sync'], stats['replication_type']) - self.assertTrue(stats['replication_enabled']) - self.assertTrue(mock_get_storage_usage.called) - self.driver.backends = backends - self.driver.replication_enabled = repliation_enabled - - @mock.patch.object(storagecenter_api.SCApi, - 'get_storage_usage', - return_value={'availableSpace': 100, 'freeSpace': 50}) - def test_get_volume_stats_no_refresh(self, - mock_get_storage_usage, - mock_close_connection, - mock_open_connection, - mock_init): - stats = self.driver.get_volume_stats(False) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertFalse(mock_get_storage_usage.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'rename_volume', - return_value=True) - def test_update_migrated_volume(self, - mock_rename_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - backend_volume = {'id': fake.VOLUME2_ID} - model_update = {'_name_id': None, - 'provider_id': self.VOLUME['instanceId']} - rt = self.driver.update_migrated_volume(None, volume, backend_volume, - 'available') - mock_rename_volume.assert_called_once_with(self.VOLUME, fake.VOLUME_ID) - self.assertEqual(model_update, rt) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'rename_volume', - return_value=False) - def test_update_migrated_volume_rename_fail(self, - mock_rename_volume, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - backend_volume = {'id': fake.VOLUME2_ID, - '_name_id': fake.VOLUME2_NAME_ID} - rt = self.driver.update_migrated_volume(None, volume, backend_volume, - 'available') - mock_rename_volume.assert_called_once_with(self.VOLUME, fake.VOLUME_ID) - self.assertEqual({'_name_id': fake.VOLUME2_NAME_ID}, rt) - - def test_update_migrated_volume_no_volume_id(self, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': None} - backend_volume = {'id': fake.VOLUME2_ID, - '_name_id': fake.VOLUME2_NAME_ID} - rt = self.driver.update_migrated_volume(None, volume, backend_volume, - 'available') - self.assertEqual({'_name_id': fake.VOLUME2_NAME_ID}, rt) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - def test_update_migrated_volume_no_backend_id(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - backend_volume = {'id': None, '_name_id': None} - rt = self.driver.update_migrated_volume(None, volume, backend_volume, - 'available') - mock_find_volume.assert_called_once_with(None, None) - self.assertEqual({'_name_id': None}, rt) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group(self, - mock_is_cg, - mock_create_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - model_update = self.driver.create_group(context, group) - mock_create_replay_profile.assert_called_once_with(fake.GROUP_ID) - self.assertEqual({'status': 'available'}, model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_create_group_not_a_cg(self, - mock_is_cg, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - self.assertRaises(NotImplementedError, self.driver.create_group, - context, group) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay_profile', - return_value=None) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_fail(self, - mock_is_cg, - mock_create_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - model_update = self.driver.create_group(context, group) - mock_create_replay_profile.assert_called_once_with(fake.GROUP_ID) - self.assertEqual({'status': 'error'}, model_update) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'delete_volume') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group(self, - mock_is_cg, - mock_delete_volume, - mock_find_replay_profile, - mock_delete_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - expected_volumes = [{'id': fake.VOLUME_ID, - 'status': 'deleted'}] - context = {} - group = {'id': fake.GROUP_ID, - 'status': fields.ConsistencyGroupStatus.DELETED} - model_update, volumes = self.driver.delete_group( - context, group, [volume]) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE) - mock_delete_volume.assert_called_once_with(volume) - self.assertEqual(group['status'], model_update['status']) - self.assertEqual(expected_volumes, volumes) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_delete_group_not_a_cg( - self, mock_is_cg, mock_close_connection, - mock_open_connection, mock_init): - - volume = {'id': fake.VOLUME_ID} - context = {} - group = {'id': fake.GROUP_ID, - 'status': fields.ConsistencyGroupStatus.DELETED} - self.assertRaises(NotImplementedError, self.driver.delete_group, - context, group, [volume]) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'delete_volume') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_not_found(self, - mock_is_cg, - mock_delete_volume, - mock_find_replay_profile, - mock_delete_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID, - 'status': fields.ConsistencyGroupStatus.DELETED} - model_update, volumes = self.driver.delete_group(context, group, []) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - self.assertFalse(mock_delete_replay_profile.called) - self.assertFalse(mock_delete_volume.called) - self.assertEqual(group['status'], model_update['status']) - self.assertEqual([], volumes) - - @mock.patch.object(storagecenter_api.SCApi, - 'update_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group(self, - mock_is_cg, - mock_find_replay_profile, - mock_update_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - add_volumes = [{'id': fake.VOLUME_ID}] - remove_volumes = [{'id': fake.VOLUME2_ID}] - rt1, rt2, rt3 = self.driver.update_group(context, group, add_volumes, - remove_volumes) - mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, - add_volumes, - remove_volumes) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - self.assertIsNone(rt1) - self.assertIsNone(rt2) - self.assertIsNone(rt3) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_update_group_not_a_cg(self, - mock_is_cg, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - add_volumes = [{'id': fake.VOLUME_ID}] - remove_volumes = [{'id': fake.VOLUME2_ID}] - self.assertRaises(NotImplementedError, self.driver.update_group, - context, group, add_volumes, remove_volumes) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_not_found(self, - mock_is_cg, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - add_volumes = [{'id': fake.VOLUME_ID}] - remove_volumes = [{'id': fake.VOLUME2_ID}] - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.update_group, - context, - group, - add_volumes, - remove_volumes) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - - @mock.patch.object(storagecenter_api.SCApi, - 'update_cg_volumes', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_error(self, - mock_is_cg, - mock_find_replay_profile, - mock_update_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - add_volumes = [{'id': fake.VOLUME_ID}] - remove_volumes = [{'id': fake.VOLUME2_ID}] - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.update_group, - context, - group, - add_volumes, - remove_volumes) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, - add_volumes, - remove_volumes) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'update_group') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'create_group') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'create_cloned_volume') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_from_src( - self, mock_is_cg, mock_create_cloned_volume, mock_create_group, - mock_update_group, mock_close_connection, mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP2_ID} - volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] - source_group = {'id': fake.GROUP_ID} - source_volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] - mock_create_cloned_volume.side_effect = [{'id': fake.VOLUME3_ID}, - {'id': fake.VOLUME4_ID}] - mock_create_group.return_value = {'status': 'available'} - model_update, volumes_model_update = self.driver.create_group_from_src( - context, group, volumes, group_snapshot=None, snapshots=None, - source_group=source_group, source_vols=source_volumes) - expected = [{'id': fake.VOLUME3_ID, 'status': 'available'}, - {'id': fake.VOLUME4_ID, 'status': 'available'}] - self.assertEqual({'status': 'available'}, model_update) - self.assertEqual(expected, volumes_model_update) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'update_group') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'create_group') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'create_volume_from_snapshot') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_from_src_from_snapshot( - self, mock_is_cg, mock_create_volume_from_snapshot, - mock_create_group, mock_update_group, mock_close_connection, - mock_open_connection, - mock_init): - context = {} - group = {'id': fake.GROUP_ID} - volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] - group_snapshot = {'id': fake.GROUP_SNAPSHOT_ID} - source_snapshots = [{'id': fake.SNAPSHOT_ID}, - {'id': fake.SNAPSHOT2_ID}] - mock_create_volume_from_snapshot.side_effect = [ - {'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] - mock_create_group.return_value = {'status': 'available'} - model_update, volumes_model_update = self.driver.create_group_from_src( - context, group, volumes, - group_snapshot=group_snapshot, snapshots=source_snapshots, - source_group=None, source_vols=None) - expected = [{'id': fake.VOLUME_ID, 'status': 'available'}, - {'id': fake.VOLUME2_ID, 'status': 'available'}] - self.assertEqual({'status': 'available'}, model_update) - self.assertEqual(expected, volumes_model_update) - - def test_create_group_from_src_bad_input( - self, mock_close_connection, mock_open_connection, mock_init): - context = {} - group = {'id': fake.GROUP2_ID} - volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - context, group, volumes, None, None, None, None) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_create_group_from_src_not_a_cg( - self, mock_is_cg, mock_close_connection, - mock_open_connection, mock_init): - context = {} - group = {'id': fake.GROUP2_ID} - volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] - source_group = {'id': fake.GROUP_ID} - source_volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] - self.assertRaises(NotImplementedError, - self.driver.create_group_from_src, - context, group, volumes, None, None, - source_group, source_volumes) - - @mock.patch.object(storagecenter_api.SCApi, - 'snap_cg_replay', - return_value={'instanceId': '100'}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_snapshot(self, - mock_is_cg, - mock_find_replay_profile, - mock_snap_cg_replay, - mock_close_connection, - mock_open_connection, - mock_init): - mock_snapshot = mock.MagicMock() - mock_snapshot.id = fake.SNAPSHOT_ID - expected_snapshots = [{'id': fake.SNAPSHOT_ID, 'status': 'available'}] - - context = {} - cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} - model_update, snapshots = self.driver.create_group_snapshot( - context, cggrp, [mock_snapshot]) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, - fake.GROUP_SNAPSHOT_ID, - 0) - self.assertEqual('available', model_update['status']) - self.assertEqual(expected_snapshots, snapshots) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_snapshot_profile_not_found(self, - mock_is_cg, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} - model_update, snapshot_updates = self.driver.create_group_snapshot( - context, cggrp, []) - self.assertEqual({'status': 'error'}, model_update) - self.assertIsNone(snapshot_updates) - - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_create_group_snapshot_not_a_cg( - self, mock_is_cg, mock_close_connection, - mock_open_connection, mock_init): - context = {} - cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} - self.assertRaises(NotImplementedError, - self.driver.create_group_snapshot, - context, cggrp, []) - - @mock.patch.object(storagecenter_api.SCApi, - 'snap_cg_replay', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_snapshot_fail(self, - mock_is_cg, - mock_find_replay_profile, - mock_snap_cg_replay, - mock_close_connection, - mock_open_connection, - mock_init): - context = {} - cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} - model_update, snapshot_updates = self.driver.create_group_snapshot( - context, cggrp, []) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, - fake.GROUP_SNAPSHOT_ID, 0) - self.assertEqual({'status': 'error'}, model_update) - self.assertIsNone(snapshot_updates) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_cg_replay', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snapshot(self, - mock_is_cg, - mock_find_replay_profile, - mock_delete_cg_replay, - mock_close_connection, - mock_open_connection, - mock_init): - mock_snapshot = {'id': fake.SNAPSHOT_ID, 'status': 'available'} - context = {} - cgsnap = {'group_id': fake.GROUP_ID, - 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'deleted'} - model_update, snapshots = self.driver.delete_group_snapshot( - context, cgsnap, [mock_snapshot]) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, - fake.GROUP_SNAPSHOT_ID) - self.assertEqual({'status': cgsnap['status']}, model_update) - self.assertEqual([{'id': fake.SNAPSHOT_ID, 'status': 'deleted'}], - snapshots) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_cg_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snapshot_profile_not_found(self, - mock_is_cg, - mock_find_replay_profile, - mock_delete_cg_replay, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'id': fake.SNAPSHOT_ID, 'status': 'available'} - context = {} - cgsnap = {'group_id': fake.GROUP_ID, - 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} - model_update, snapshots = self.driver.delete_group_snapshot( - context, cgsnap, [snapshot]) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - self.assertFalse(mock_delete_cg_replay.called) - self.assertEqual({'status': 'error'}, model_update) - self.assertIsNone(snapshots) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_cg_replay', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=SCRPLAYPROFILE) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snapshot_profile_failed_delete( - self, mock_is_cg, mock_find_replay_profile, mock_delete_cg_replay, - mock_close_connection, mock_open_connection, mock_init): - context = {} - cgsnap = {'group_id': fake.GROUP_ID, - 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} - model_update, snapshot_updates = self.driver.delete_group_snapshot( - context, cgsnap, []) - self.assertEqual({'status': 'error_deleting'}, model_update) - mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) - mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, - fake.GROUP_SNAPSHOT_ID) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=False) - def test_delete_group_snapshot_not_a_cg( - self, mock_is_cg, mock_close_connection, - mock_open_connection, mock_init): - context = {} - cgsnap = {'group_id': fake.GROUP_ID, - 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} - self.assertRaises(NotImplementedError, - self.driver.delete_group_snapshot, - context, cgsnap, []) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value={'id': 'guid'}) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'manage_existing') - def test_manage_existing(self, - mock_manage_existing, - mock_create_replications, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Very little to do in this one. The call is sent - # straight down. - volume = {'id': fake.VOLUME_ID} - existing_ref = {'source-name': 'imavolumename'} - self.driver.manage_existing(volume, existing_ref) - mock_manage_existing.assert_called_once_with(fake.VOLUME_ID, - existing_ref) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value={'id': 'guid'}) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'manage_existing') - def test_manage_existing_id(self, - mock_manage_existing, - mock_create_replications, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Very little to do in this one. The call is sent - # straight down. - volume = {'id': fake.VOLUME_ID} - existing_ref = {'source-id': 'imadeviceid'} - self.driver.manage_existing(volume, existing_ref) - mock_manage_existing.assert_called_once_with(fake.VOLUME_ID, - existing_ref) - - def test_manage_existing_bad_ref(self, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - existing_ref = {'banana-name': 'imavolumename'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - volume, existing_ref) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_unmanaged_volume_size', - return_value=4) - def test_manage_existing_get_size(self, - mock_get_unmanaged_volume_size, - mock_close_connection, - mock_open_connection, - mock_init): - # Almost nothing to test here. Just that we call our function. - volume = {'id': fake.VOLUME_ID} - existing_ref = {'source-name': 'imavolumename'} - res = self.driver.manage_existing_get_size(volume, existing_ref) - mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) - # The above is 4GB and change. - self.assertEqual(4, res) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_unmanaged_volume_size', - return_value=4) - def test_manage_existing_get_size_id(self, - mock_get_unmanaged_volume_size, - mock_close_connection, - mock_open_connection, - mock_init): - # Almost nothing to test here. Just that we call our function. - volume = {'id': fake.VOLUME_ID} - existing_ref = {'source-id': 'imadeviceid'} - res = self.driver.manage_existing_get_size(volume, existing_ref) - mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) - # The above is 4GB and change. - self.assertEqual(4, res) - - def test_manage_existing_get_size_bad_ref(self, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - existing_ref = {'banana-name': 'imavolumename'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - volume, existing_ref) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'update_storage_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'update_replay_profiles') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications') - @mock.patch.object(storagecenter_api.SCApi, - 'update_replicate_active_replay') - def test_retype_not_our_extra_specs(self, - mock_update_replicate_active_replay, - mock_create_replications, - mock_update_replay_profile, - mock_update_storage_profile, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, {'extra_specs': None}, None) - self.assertTrue(res) - self.assertFalse(mock_update_replicate_active_replay.called) - self.assertFalse(mock_create_replications.called) - self.assertFalse(mock_update_replay_profile.called) - self.assertFalse(mock_update_storage_profile.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'update_replay_profiles') - def test_retype_replay_profiles(self, - mock_update_replay_profiles, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - mock_update_replay_profiles.side_effect = [True, False] - # Normal successful run. - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'storagetype:replayprofiles': ['A', 'B']}}, - None) - mock_update_replay_profiles.assert_called_once_with(self.VOLUME, 'B') - self.assertTrue(res) - # Run fails. Make sure this returns False. - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'storagetype:replayprofiles': ['B', 'A']}}, - None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_create_replications', - return_value={'replication_status': 'enabled', - 'replication_driver_data': '54321'}) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_delete_replications') - def test_retype_create_replications(self, - mock_delete_replications, - mock_create_replications, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, - {'extra_specs': {'replication_enabled': [None, ' True']}}, - {'extra_specs': {'replication_enabled': [None, ' True']}}, - None) - self.assertTrue(mock_create_replications.called) - self.assertFalse(mock_delete_replications.called) - self.assertEqual((True, {'replication_status': 'enabled', - 'replication_driver_data': '54321'}), res) - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'replication_enabled': [' True', None]}}, - None) - self.assertTrue(mock_delete_replications.called) - self.assertEqual((True, {'replication_status': 'disabled', - 'replication_driver_data': ''}), res) - - @mock.patch.object(storagecenter_api.SCApi, - 'update_replicate_active_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_retype_active_replay(self, - mock_find_volume, - mock_update_replicate_active_replay, - mock_close_connection, - mock_open_connection, - mock_init): - # Success, Success, Not called and fail. - mock_update_replicate_active_replay.side_effect = [True, True, False] - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'replication:activereplay': ['', ' True']}}, - None) - self.assertTrue(res) - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'replication:activereplay': [' True', '']}}, - None) - self.assertTrue(res) - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'replication:activereplay': ['', '']}}, - None) - self.assertTrue(res) - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'replication:activereplay': ['', ' True']}}, - None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_retype_same(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'storagetype:storageprofile': ['A', 'A']}}, - None) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmanage') - def test_unmanage(self, - mock_unmanage, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '11111.1'} - self.driver.unmanage(volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '11111.1') - mock_unmanage.assert_called_once_with(self.VOLUME) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - 'unmanage') - def test_unmanage_volume_not_found(self, - mock_unmanage, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID, 'provider_id': '11111.1'} - self.driver.unmanage(volume) - mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '11111.1') - self.assertFalse(mock_unmanage.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'update_storage_profile') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_retype(self, - mock_find_volume, - mock_update_storage_profile, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.driver.retype( - None, {'id': fake.VOLUME_ID}, None, - {'extra_specs': {'storagetype:storageprofile': ['A', 'B']}}, - None) - mock_update_storage_profile.assert_called_once_with( - self.VOLUME, 'B') - self.assertTrue(res) - - def test__parse_secondary(self, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - mock_api = mock.MagicMock() - # Good run. Secondary in replication_driver_data and backend. sc up. - destssn = self.driver._parse_secondary(mock_api, '67890') - self.assertEqual(67890, destssn) - # Bad run. Secondary not in backend. - destssn = self.driver._parse_secondary(mock_api, '99999') - self.assertIsNone(destssn) - # Good run. - destssn = self.driver._parse_secondary(mock_api, '12345') - self.assertEqual(12345, destssn) - self.driver.backends = backends - - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test__parse_secondary_sc_down(self, - mock_find_sc, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - mock_api = mock.MagicMock() - # Bad run. Good selection. SC down. - mock_api.find_sc = mock.MagicMock( - side_effect=exception.VolumeBackendAPIException(data='1234')) - destssn = self.driver._parse_secondary(mock_api, '12345') - self.assertIsNone(destssn) - self.driver.backends = backends - - def test__failover_live_volume(self, - mock_close_connection, - mock_open_connection, - mock_init): - mock_api = mock.MagicMock() - sclivevol = {'instanceId': '101.100', - 'primaryVolume': {'instanceId': '101.101', - 'instanceName': fake.VOLUME2_ID}, - 'secondaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Secondary'} - postfail = {'instanceId': '101.100', - 'primaryVolume': {'instanceId': '102.101', - 'instanceName': fake.VOLUME_ID}, - 'secondaryVolume': {'instanceId': '101.101', - 'instanceName': fake.VOLUME2_ID}, - 'secondaryScSerialNumber': 102, - 'secondaryRole': 'Secondary'} - mock_api.get_live_volume = mock.MagicMock() - mock_api.get_live_volume.side_effect = [sclivevol, postfail, - sclivevol, sclivevol] - # Good run. - mock_api.is_swapped = mock.MagicMock(return_value=False) - mock_api.swap_roles_live_volume = mock.MagicMock(return_value=True) - model_update = {'provider_id': '102.101', - 'replication_status': 'failed-over'} - ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, - '101.101') - self.assertEqual(model_update, ret) - # Swap fail - mock_api.swap_roles_live_volume.return_value = False - model_update = {'status': 'error'} - ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, - '101.101') - self.assertEqual(model_update, ret) - # Can't find live volume. - mock_api.get_live_volume.return_value = None - ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, - '101.101') - self.assertEqual(model_update, ret) - - def test__failover_replication(self, - mock_close_connection, - mock_open_connection, - mock_init): - rvol = {'instanceId': '102.101'} - mock_api = mock.MagicMock() - mock_api.break_replication = mock.MagicMock(return_value=rvol) - # Good run. - model_update = {'replication_status': 'failed-over', - 'provider_id': '102.101'} - ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, - '101.100', 102) - self.assertEqual(model_update, ret) - # break fail - mock_api.break_replication.return_value = None - model_update = {'status': 'error'} - ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, - '101.100', 102) - self.assertEqual(model_update, ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_failover_replication') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_parse_secondary') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'remove_mappings') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'failback_volumes') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs') - def test_failover_host(self, - mock_get_replication_specs, - mock_failback_volumes, - mock_remove_mappings, - mock_find_volume, - mock_parse_secondary, - mock_failover_replication, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_replication_specs.return_value = {'enabled': False, - 'live': False} - self.driver.replication_enabled = False - self.driver.failed_over = False - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '1.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '1.2'}] - # No run. Not doing repl. Should raise. - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.failover_host, - {}, - volumes, - '12345') - # Good run - self.driver.replication_enabled = True - mock_get_replication_specs.return_value = {'enabled': True, - 'live': False} - mock_parse_secondary.return_value = 12345 - expected_destssn = 12345 - mock_failover_replication.side_effect = [ - {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 - {'provider_id': '2.2', 'replication_status': 'failed-over'}, - {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 - {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.2'}}] - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. Not all volumes replicated. - volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, - {'id': fake.VOLUME2_ID, 'replication_driver_data': ''}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. Not all volumes replicated. No replication_driver_data. - volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, - {'id': fake.VOLUME2_ID}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. No volumes replicated. No replication_driver_data. - volumes = [{'id': fake.VOLUME_ID}, - {'id': fake.VOLUME2_ID}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'status': 'error'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Secondary not found. - mock_parse_secondary.return_value = None - self.driver.failed_over = False - self.driver.active_backend_id = None - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, - {}, - volumes, - '54321', - []) - # Already failed over. - self.driver.failed_over = True - self.driver.failover_host({}, volumes, 'default') - mock_failback_volumes.assert_called_once_with(volumes) - # Already failed over. - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, {}, volumes, '67890', []) - self.driver.replication_enabled = False - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_failover_live_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_parse_secondary') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'remove_mappings') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - 'failback_volumes') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs') - def test_failover_host_live_volume(self, - mock_get_replication_specs, - mock_failback_volumes, - mock_remove_mappings, - mock_find_volume, - mock_parse_secondary, - mock_failover_live_volume, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_replication_specs.return_value = {'enabled': False, - 'live': False} - self.driver.replication_enabled = False - self.driver.failed_over = False - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '1.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '1.2'}] - # No run. Not doing repl. Should raise. - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.failover_host, - {}, - volumes, - '12345') - # Good run - self.driver.replication_enabled = True - mock_get_replication_specs.return_value = {'enabled': True, - 'live': True} - mock_parse_secondary.return_value = 12345 - expected_destssn = 12345 - mock_failover_live_volume.side_effect = [ - {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 - {'provider_id': '2.2', 'replication_status': 'failed-over'}, - {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 - {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.2'}}] - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. Not all volumes replicated. - volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, - {'id': fake.VOLUME2_ID, 'replication_driver_data': ''}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. Not all volumes replicated. No replication_driver_data. - volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, - {'id': fake.VOLUME2_ID}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'replication_status': 'failed-over', - 'provider_id': '2.1'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Good run. No volumes replicated. No replication_driver_data. - volumes = [{'id': fake.VOLUME_ID}, - {'id': fake.VOLUME2_ID}] - expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': - {'status': 'error'}}, - {'volume_id': fake.VOLUME2_ID, 'updates': - {'status': 'error'}}] - self.driver.failed_over = False - self.driver.active_backend_id = None - destssn, volume_update, __ = self.driver.failover_host( - {}, volumes, '12345', []) - self.assertEqual(expected_destssn, destssn) - self.assertEqual(expected_volume_update, volume_update) - # Secondary not found. - mock_parse_secondary.return_value = None - self.driver.failed_over = False - self.driver.active_backend_id = None - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, - {}, - volumes, - '54321', - []) - # Already failed over. - self.driver.failed_over = True - self.driver.failover_host({}, volumes, 'default') - mock_failback_volumes.assert_called_once_with(volumes) - self.driver.replication_enabled = False - - def test__get_unmanaged_replay(self, - mock_close_connection, - mock_open_connection, - mock_init): - mock_api = mock.MagicMock() - existing_ref = None - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_unmanaged_replay, - mock_api, - fake.VOLUME_ID, - '11111.1', - existing_ref) - existing_ref = {'source-id': 'Not a source-name'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_unmanaged_replay, - mock_api, - fake.VOLUME_ID, - '11111.1', - existing_ref) - existing_ref = {'source-name': 'name'} - mock_api.find_volume = mock.MagicMock(return_value=None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_unmanaged_replay, - mock_api, - fake.VOLUME_ID, - '11111.1', - existing_ref) - mock_api.find_volume.return_value = {'instanceId': '11111.1'} - mock_api.find_replay = mock.MagicMock(return_value=None) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_unmanaged_replay, - mock_api, - fake.VOLUME_ID, - '11111.1', - existing_ref) - mock_api.find_replay.return_value = {'instanceId': '11111.101'} - ret = self.driver._get_unmanaged_replay(mock_api, fake.VOLUME_ID, - '11111.1', existing_ref) - self.assertEqual({'instanceId': '11111.101'}, ret) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_unmanaged_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'manage_replay') - def test_manage_existing_snapshot(self, - mock_manage_replay, - mock_get_unmanaged_replay, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'volume_id': fake.VOLUME_ID, - 'id': fake.SNAPSHOT_ID} - existing_ref = {'source-name': 'name'} - screplay = {'description': 'name', 'createVolume': {'instanceId': '1'}} - expected = {'provider_id': '1'} - mock_get_unmanaged_replay.return_value = screplay - mock_manage_replay.return_value = True - ret = self.driver.manage_existing_snapshot(snapshot, existing_ref) - self.assertEqual(expected, ret) - self.assertEqual(1, mock_get_unmanaged_replay.call_count) - mock_manage_replay.assert_called_once_with(screplay, fake.SNAPSHOT_ID) - mock_manage_replay.return_value = False - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_snapshot, - snapshot, - existing_ref) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_unmanaged_replay') - def test_manage_existing_snapshot_get_size(self, - mock_get_unmanaged_replay, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'volume_id': fake.VOLUME_ID, - 'id': fake.SNAPSHOT_ID} - existing_ref = {'source-name'} - # Good size. - mock_get_unmanaged_replay.return_value = {'size': - '1.073741824E9 Bytes'} - ret = self.driver.manage_existing_snapshot_get_size(snapshot, - existing_ref) - self.assertEqual(1, ret) - # Not on 1GB boundries. - mock_get_unmanaged_replay.return_value = {'size': - '2.073741824E9 Bytes'} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_snapshot_get_size, - snapshot, - existing_ref) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'unmanage_replay') - def test_unmanage_snapshot(self, - mock_unmanage_replay, - mock_find_replay, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - snapshot = {'volume_id': fake.VOLUME_ID, - 'id': fake.SNAPSHOT_ID} - mock_find_volume.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.unmanage_snapshot, - snapshot) - mock_find_volume.return_value = {'name': fake.VOLUME_ID} - mock_find_replay.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.unmanage_snapshot, - snapshot) - screplay = {'description': fake.SNAPSHOT_ID} - mock_find_replay.return_value = screplay - self.driver.unmanage_snapshot(snapshot) - mock_unmanage_replay.assert_called_once_with(screplay) - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_qos', - return_value='cinderqos') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_parse_extraspecs', - return_value={'replay_profile_string': 'pro'}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'find_repl_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replication') - @mock.patch.object(storagecenter_api.SCApi, - 'replicate_to_common') - @mock.patch.object(storagecenter_api.SCApi, - 'remove_mappings') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_wait_for_replication') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_reattach_remaining_replications') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_fixup_types') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_volume_updates', - return_value=[]) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_update_backend') - def test_failback_volumes(self, - mock_update_backend, - mock_volume_updates, - mock_fixup_types, - mock_reattach_remaining_replications, - mock_wait_for_replication, - mock_remove_mappings, - mock_replicate_to_common, - mock_delete_replication, - mock_find_repl_volume, - mock_find_volume, - mock_parse_extraspecs, - mock_get_qos, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.2'}] - mock_find_volume.side_effect = [{'instanceId': '12345.1'}, - {'instanceId': '12345.2'}] - mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'}, - {'instanceId': '11111.2'}] - mock_replicate_to_common.side_effect = [{'instanceId': '12345.100', - 'destinationVolume': - {'instanceId': '11111.3'} - }, - {'instanceId': '12345.200', - 'destinationVolume': - {'instanceId': '11111.4'} - }] - # we don't care about the return. We just want to make sure that - # _wait_for_replication is called with the proper replitems. - self.driver.failback_volumes(volumes) - expected = [{'volume': volumes[0], - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'inprogress'}, - {'volume': volumes[1], - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345', - 'status': 'inprogress'} - ] - # We are stubbing everything out so we just want to be sure this hits - # _volume_updates as expected. (Ordinarily this would be modified by - # the time it hit this but since it isn't we use this to our advantage - # and check that our replitems was set correctly coming out of the - # main loop.) - mock_volume_updates.assert_called_once_with(expected) - - self.driver.replication_enabled = False - self.driver.failed_over = False - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_qos', - return_value='cinderqos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_update_backend') - @mock.patch.object(storagecenter_api.SCApi, - 'get_live_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'swap_roles_live_volume') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_replication_specs') - def test_failback_volumes_live_vol(self, - mock_get_replication_specs, - mock_swap_roles_live_volume, - mock_get_live_volume, - mock_update_backend, - mock_find_volume, - mock_get_qos, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos', - 'remoteqos': 'remoteqos'}] - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.2'}] - mock_get_live_volume.side_effect = [ - {'instanceId': '11111.101', - 'secondaryVolume': {'instanceId': '11111.1001', - 'instanceName': fake.VOLUME_ID}, - 'secondaryScSerialNumber': 11111}, - {'instanceId': '11111.102', - 'secondaryVolume': {'instanceId': '11111.1002', - 'instanceName': fake.VOLUME2_ID}, - 'secondaryScSerialNumber': 11111} - ] - mock_get_replication_specs.return_value = {'enabled': True, - 'live': True} - mock_swap_roles_live_volume.side_effect = [True, True] - mock_find_volume.side_effect = [{'instanceId': '12345.1'}, - {'instanceId': '12345.2'}] - - # we don't care about the return. We just want to make sure that - # _wait_for_replication is called with the proper replitems. - ret = self.driver.failback_volumes(volumes) - expected = [{'updates': {'provider_id': '11111.1001', - 'replication_status': 'enabled', - 'status': 'available'}, - 'volume_id': fake.VOLUME_ID}, - {'updates': {'provider_id': '11111.1002', - 'replication_status': 'enabled', - 'status': 'available'}, - 'volume_id': fake.VOLUME2_ID}] - - self.assertEqual(expected, ret) - - self.driver.replication_enabled = False - self.driver.failed_over = False - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_qos', - return_value='cinderqos') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_parse_extraspecs', - return_value={'replay_profile_string': 'pro'}) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'find_repl_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replication') - @mock.patch.object(storagecenter_api.SCApi, - 'replicate_to_common') - @mock.patch.object(storagecenter_api.SCApi, - 'remove_mappings') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_wait_for_replication') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_reattach_remaining_replications') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_fixup_types') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_volume_updates', - return_value=[]) - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_update_backend') - def test_failback_volumes_with_some_not_replicated( - self, - mock_update_backend, - mock_volume_updates, - mock_fixup_types, - mock_reattach_remaining_replications, - mock_wait_for_replication, - mock_remove_mappings, - mock_replicate_to_common, - mock_delete_replication, - mock_find_repl_volume, - mock_find_volume, - mock_parse_extraspecs, - mock_get_qos, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.2'}, - {'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}] - mock_find_volume.side_effect = [{'instanceId': '12345.1'}, - {'instanceId': '12345.2'}] - mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'}, - {'instanceId': '11111.2'}] - mock_replicate_to_common.side_effect = [{'instanceId': '12345.100', - 'destinationVolume': - {'instanceId': '11111.3'} - }, - {'instanceId': '12345.200', - 'destinationVolume': - {'instanceId': '11111.4'} - }] - expected = [{'volume': volumes[0], - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'inprogress'}, - {'volume': volumes[1], - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345', - 'status': 'inprogress'} - ] - ret = self.driver.failback_volumes(volumes) - mock_volume_updates.assert_called_once_with(expected) - - # make sure ret is right. In this case just the unreplicated volume - # as our volume updates elsewhere return nothing. - expected_updates = [{'volume_id': fake.VOLUME3_ID, - 'updates': {'status': 'available'}}] - self.assertEqual(expected_updates, ret) - self.driver.replication_enabled = False - self.driver.failed_over = False - self.driver.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_qos', - return_value='cinderqos') - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_update_backend') - def test_failback_volumes_with_none_replicated( - self, - mock_update_backend, - mock_get_qos, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - volumes = [{'id': fake.VOLUME_ID, - 'provider_id': '11111.1'}, - {'id': fake.VOLUME2_ID, 'provider_id': '11111.2'}, - {'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}] - - ret = self.driver.failback_volumes(volumes) - - # make sure ret is right. In this case just the unreplicated volume - # as our volume updates elsewhere return nothing. - expected_updates = [{'volume_id': fake.VOLUME_ID, - 'updates': {'status': 'available'}}, - {'volume_id': fake.VOLUME2_ID, - 'updates': {'status': 'available'}}, - {'volume_id': fake.VOLUME3_ID, - 'updates': {'status': 'available'}}] - self.assertEqual(expected_updates, ret) - self.driver.replication_enabled = False - self.driver.failed_over = False - self.driver.backends = backends - - def test_volume_updates(self, - mock_close_connection, - mock_open_connection, - mock_init): - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'available'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'available'} - ] - ret = self.driver._volume_updates(items) - expected = [{'volume_id': fake.VOLUME_ID, - 'updates': {'status': 'available', - 'replication_status': 'enabled', - 'provider_id': '11111.3', - 'replication_driver_data': '12345,67890'}}, - {'volume_id': fake.VOLUME2_ID, - 'updates': {'status': 'available', - 'replication_status': 'enabled', - 'provider_id': '11111.4', - 'replication_driver_data': '12345,67890'}} - ] - self.assertEqual(expected, ret) - items.append({'volume': {'id': fake.VOLUME3_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.300', - 'cvol': '12345.5', - 'ovol': '11111.5', - 'nvol': '11111.6', - 'rdd': '12345', - 'status': 'error'}) - - ret = self.driver._volume_updates(items) - expected.append({'volume_id': fake.VOLUME3_ID, - 'updates': {'status': 'error', - 'replication_status': 'error', - 'provider_id': '11111.6', - 'replication_driver_data': '12345'}}) - self.assertEqual(expected, ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - def test_fixup_types(self, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'reattached'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'reattached'} - ] - mock_api = mock.Mock() - mock_api.update_replay_profiles.return_value = True - self.driver._fixup_types(mock_api, items) - expected = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'available'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'available'}] - self.assertEqual(expected, items) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - def test_fixup_types_with_error(self, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'reattached'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'reattached'} - ] - # One good one fail. - mock_api = mock.Mock() - mock_api.update_replay_profiles.side_effect = [True, False] - self.driver._fixup_types(mock_api, items) - expected = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'available'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'error'}] - self.assertEqual(expected, items) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - def test_fixup_types_with_previous_error(self, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'reattached'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'error'} - ] - mock_api = mock.Mock() - mock_api.update_replay_profiles.return_value = True - self.driver._fixup_types(mock_api, items) - expected = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'available'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replay_profile_string': 'pro'}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'error'}] - self.assertEqual(expected, items) - - def test_reattach_remaining_replications(self, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replicationtype': 'Synchronous', - 'activereplay': False}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'synced'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345', - 'status': 'synced'} - ] - mock_api = mock.Mock() - mock_api.ssn = self.driver.active_backend_id - mock_api.get_volume.return_value = self.VOLUME - mock_api.find_repl_volume.return_value = self.VOLUME - mock_api.start_replication.side_effect = [{'instanceId': '11111.1001'}, - {'instanceId': '11111.1002'}, - None, - {'instanceId': '11111.1001'}] - self.driver._reattach_remaining_replications(mock_api, items) - - expected = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replicationtype': 'Synchronous', - 'activereplay': False}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345,67890', - 'status': 'reattached'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'reattached'}] - self.assertEqual(expected, items) - mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, - 'Synchronous', 'cinderqos', - False) - - mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, - 'Asynchronous', 'cinderqos', - True) - items = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replicationtype': 'Synchronous', - 'activereplay': False}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'synced'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345', - 'status': 'synced'} - ] - self.driver._reattach_remaining_replications(mock_api, items) - - expected = [{'volume': {'id': fake.VOLUME_ID}, - 'specs': {'replicationtype': 'Synchronous', - 'activereplay': False}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'error'}, - {'volume': {'id': fake.VOLUME2_ID}, - 'specs': {'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345,67890', - 'status': 'reattached'}] - self.assertEqual(expected, items) - mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, - 'Synchronous', 'cinderqos', - False) - - mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, - 'Asynchronous', 'cinderqos', - True) - - self.driver.backends = backends - - def _setup_items(self): - self.driver.replication_enabled = True - self.driver.failed_over = True - self.driver.active_backend_id = 12345 - self.driver.primaryssn = 11111 - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos'}] - volumes = [{'id': fake.VOLUME_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.1'}, - {'id': fake.VOLUME2_ID, - 'replication_driver_data': '12345', - 'provider_id': '12345.2'}] - - items = [{'volume': volumes[0], - 'specs': {'replay_profile_string': 'pro', - 'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.100', - 'cvol': '12345.1', - 'ovol': '11111.1', - 'nvol': '11111.3', - 'rdd': '12345', - 'status': 'inprogress'}, - {'volume': volumes[1], - 'specs': {'replay_profile_string': 'pro', - 'replicationtype': 'Asynchronous', - 'activereplay': True}, - 'qosnode': 'cinderqos', - 'screpl': '12345.200', - 'cvol': '12345.2', - 'ovol': '11111.2', - 'nvol': '11111.4', - 'rdd': '12345', - 'status': 'inprogress'} - ] - return items, backends - - def test_wait_for_replication(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'synced' - expected[1]['status'] = 'synced' - mock_api = mock.Mock() - mock_api.flip_replication.return_value = True - mock_api.get_volume.return_value = self.VOLUME - mock_api.replication_progress.return_value = (True, 0) - mock_api.rename_volume.return_value = True - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - self.backends = backends - - def test_wait_for_replication_flip_flops(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'synced' - expected[1]['status'] = 'error' - mock_api = mock.Mock() - mock_api.flip_replication.side_effect = [True, False] - mock_api.get_volume.return_value = self.VOLUME - mock_api.replication_progress.return_value = (True, 0) - mock_api.rename_volume.return_value = True - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - self.backends = backends - - def test_wait_for_replication_flip_no_vol(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'synced' - expected[1]['status'] = 'error' - mock_api = mock.Mock() - mock_api.flip_replication.return_value = True - mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, - self.VOLUME, - self.VOLUME, None] - mock_api.replication_progress.return_value = (True, 0) - mock_api.rename_volume.return_value = True - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - self.backends = backends - - def test_wait_for_replication_cant_find_orig(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'synced' - expected[1]['status'] = 'synced' - mock_api = mock.Mock() - mock_api.flip_replication.return_value = True - mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, - None, - self.VOLUME, self.VOLUME, - None] - mock_api.replication_progress.return_value = (True, 0) - mock_api.rename_volume.return_value = True - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - self.backends = backends - - def test_wait_for_replication_rename_fail(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'synced' - expected[1]['status'] = 'synced' - mock_api = mock.Mock() - mock_api.flip_replication.return_value = True - mock_api.get_volume.return_value = self.VOLUME - mock_api.replication_progress.return_value = (True, 0) - mock_api.rename_volume.return_value = True - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - self.backends = backends - - def test_wait_for_replication_timeout(self, - mock_close_connection, - mock_open_connection, - mock_init): - items, backends = self._setup_items() - expected = [] - for item in items: - expected.append(dict(item)) - expected[0]['status'] = 'error' - expected[1]['status'] = 'error' - self.assertNotEqual(items, expected) - mock_api = mock.Mock() - mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, - self.VOLUME, - self.VOLUME, None] - mock_api.replication_progress.return_value = (False, 500) - self.driver.failback_timeout = 1 - self.driver._wait_for_replication(mock_api, items) - self.assertEqual(expected, items) - calls = [mock.call(1)] * 5 - self.mock_sleep.assert_has_calls(calls) - self.backends = backends - - @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, - '_get_volume_extra_specs') - def test_parse_extraspecs(self, - mock_get_volume_extra_specs, - mock_close_connection, - mock_open_connection, - mock_init): - volume = {'id': fake.VOLUME_ID} - mock_get_volume_extra_specs.return_value = {} - ret = self.driver._parse_extraspecs(volume) - expected = {'replicationtype': 'Asynchronous', - 'activereplay': False, - 'storage_profile': None, - 'replay_profile_string': None} - self.assertEqual(expected, ret) - - def test_get_qos(self, - mock_close_connection, - mock_open_connection, - mock_init): - backends = self.driver.backends - self.driver.backends = [{'target_device_id': '12345', - 'qosnode': 'cinderqos1'}, - {'target_device_id': '67890', - 'qosnode': 'cinderqos2'}] - ret = self.driver._get_qos(12345) - self.assertEqual('cinderqos1', ret) - ret = self.driver._get_qos(67890) - self.assertEqual('cinderqos2', ret) - ret = self.driver._get_qos(11111) - self.assertIsNone(ret) - self.driver.backends[0] = {'target_device_id': '12345'} - ret = self.driver._get_qos(12345) - self.assertEqual('cinderqos', ret) - self.driver.backends = backends - - def test_thaw_backend(self, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.failed_over = False - ret = self.driver.thaw_backend(self._context) - self.assertTrue(ret) - - def test_thaw_backend_failed_over(self, - mock_close_connection, - mock_open_connection, - mock_init): - self.driver.failed_over = True - self.assertRaises(exception.Invalid, - self.driver.thaw_backend, - self._context) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_scapi.py b/cinder/tests/unit/volume/drivers/dell_emc/sc/test_scapi.py deleted file mode 100644 index 25f7d72d7..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/sc/test_scapi.py +++ /dev/null @@ -1,8886 +0,0 @@ -# Copyright (c) 2015 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import eventlet -import mock -import requests -from requests import models -import uuid - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume.drivers.dell_emc.sc import storagecenter_api - - -# We patch these here as they are used by every test to keep -# from trying to contact a Dell Storage Center. -@ddt.ddt -@mock.patch.object(storagecenter_api.SCApi, - '__init__', - return_value=None) -@mock.patch.object(storagecenter_api.SCApi, - 'open_connection') -@mock.patch.object(storagecenter_api.SCApi, - 'close_connection') -class DellSCSanAPITestCase(test.TestCase): - - """DellSCSanAPITestCase - - Class to test the Storage Center API using Mock. - """ - - SC = {u'IPv6ManagementIPPrefix': 128, - u'connectionError': u'', - u'instanceId': u'64702', - u'scSerialNumber': 64702, - u'dataProgressionRunning': False, - u'hostOrIpAddress': u'192.168.0.80', - u'userConnected': True, - u'portsBalanced': True, - u'managementIp': u'192.168.0.80', - u'version': u'6.5.1.269', - u'location': u'', - u'objectType': u'StorageCenter', - u'instanceName': u'Storage Center 64702', - u'statusMessage': u'', - u'status': u'Up', - u'flashOptimizedConfigured': False, - u'connected': True, - u'operationMode': u'Normal', - u'userName': u'Admin', - u'nonFlashOptimizedConfigured': True, - u'name': u'Storage Center 64702', - u'scName': u'Storage Center 64702', - u'notes': u'', - u'serialNumber': 64702, - u'raidRebalanceRunning': False, - u'userPasswordExpired': False, - u'contact': u'', - u'IPv6ManagementIP': u'::'} - - VOLUME = {u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3494, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da8', - u'active': True, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False} - - VOLUME_LIST = [{u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3494, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': - u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': - u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da8', - u'active': True, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False}] - - # Volume list that contains multiple volumes - VOLUME_LIST_MULTI_VOLS = [ - {u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3494, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': - u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': - u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da8', - u'active': True, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False}, - {u'instanceId': u'64702.3495', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3495, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': - u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': - u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da9', - u'active': True, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False}] - - VOLUME_CONFIG = \ - {u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'maximumSiblingCount': 100, - u'writeCacheStatus': u'Up', - u'objectType': u'ScVolumeConfiguration', - u'currentSiblingConfiguredSize': u'2.147483648E9 Bytes', - u'compressionPaused': False, - u'enforceConsumptionLimit': False, - u'volumeSpaceConsumptionLimit': u'2.147483648E9 Bytes', - u'readCacheEnabled': True, - u'writeCacheEnabled': True, - u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', - u'dateModified': u'04/03/2015 12:01:08 AM', - u'modifyUser': u'Admin', - u'replayExpirationPaused': False, - u'currentSiblingCount': 1, - u'replayCreationPaused': False, - u'replayProfileList': [{u'instanceId': u'64702.2', - u'instanceName': u'Daily', - u'objectType': u'ScReplayProfile'}], - u'dateCreated': u'04/04/2014 03:54:26 AM', - u'volume': {u'instanceId': u'64702.3494', - u'instanceName': - u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'objectType': u'ScVolume'}, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'coalesceIntoActive': False, - u'createUser': u'Admin', - u'importToLowestTier': False, - u'readCacheStatus': u'Up', - u'maximumSiblingConfiguredSpace': u'5.49755813888E14 Bytes', - u'storageProfile': {u'instanceId': u'64702.1', - u'instanceName': u'Recommended', - u'objectType': u'ScStorageProfile'}, - u'scName': u'Storage Center 64702', - u'notes': u'', - u'diskFolder': {u'instanceId': u'64702.3', - u'instanceName': u'Assigned', - u'objectType': u'ScDiskFolder'}, - u'openVmsUniqueDiskId': 48, - u'compressionEnabled': False} - - INACTIVE_VOLUME = \ - {u'instanceId': u'64702.3494', - u'scSerialNumber': 64702, - u'replicationSource': False, - u'liveVolume': False, - u'vpdId': 3496, - u'objectType': u'ScVolume', - u'index': 3494, - u'volumeFolderPath': u'devstackvol/fcvm/', - u'hostCacheEnabled': False, - u'usedByLegacyFluidFsNasVolume': False, - u'inRecycleBin': False, - u'volumeFolderIndex': 17, - u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'statusMessage': u'', - u'status': u'Up', - u'storageType': {u'instanceId': u'64702.1', - u'instanceName': u'Assigned - Redundant - 2 MB', - u'objectType': u'ScStorageType'}, - u'cmmDestination': False, - u'replicationDestination': False, - u'volumeFolder': {u'instanceId': u'64702.17', - u'instanceName': u'fcvm', - u'objectType': u'ScVolumeFolder'}, - u'deviceId': u'6000d31000fcbe000000000000000da8', - u'active': False, - u'portableVolumeDestination': False, - u'deleteAllowed': True, - u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', - u'scName': u'Storage Center 64702', - u'secureDataUsed': False, - u'serialNumber': u'0000fcbe-00000da8', - u'replayAllowed': True, - u'flashOptimized': False, - u'configuredSize': u'1.073741824E9 Bytes', - u'mapped': False, - u'cmmSource': False} - - SCSERVER = {u'scName': u'Storage Center 64702', - u'volumeCount': 0, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 4, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'Server_21000024ff30441d', - u'instanceId': u'64702.47', - u'serverFolderPath': u'devstacksrv/', - u'portType': [u'FibreChannel'], - u'type': u'Physical', - u'statusMessage': u'Only 5 of 6 expected paths are up', - u'status': u'Degraded', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.4', - u'instanceName': u'devstacksrv', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Partial', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 5, - u'name': u'Server_21000024ff30441d', - u'hbaPresent': True, - u'hbaCount': 2, - u'notes': u'Created by Dell EMC Cinder Driver', - u'mapped': False, - u'operatingSystem': {u'instanceId': u'64702.38', - u'instanceName': u'Red Hat Linux 6.x', - u'objectType': u'ScServerOperatingSystem'} - } - - # ScServer where deletedAllowed=False (not allowed to be deleted) - SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702', - u'volumeCount': 0, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 4, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'Server_21000024ff30441d', - u'instanceId': u'64702.47', - u'serverFolderPath': u'devstacksrv/', - u'portType': [u'FibreChannel'], - u'type': u'Physical', - u'statusMessage': u'Only 5 of 6 expected paths are up', - u'status': u'Degraded', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.4', - u'instanceName': u'devstacksrv', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Partial', - u'hostCacheIndex': 0, - u'deleteAllowed': False, - u'pathCount': 5, - u'name': u'Server_21000024ff30441d', - u'hbaPresent': True, - u'hbaCount': 2, - u'notes': u'Created by Dell EMC Cinder Driver', - u'mapped': False, - u'operatingSystem': - {u'instanceId': u'64702.38', - u'instanceName': u'Red Hat Linux 6.x', - u'objectType': u'ScServerOperatingSystem'} - } - - SCSERVERS = [{u'scName': u'Storage Center 64702', - u'volumeCount': 5, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 0, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'openstack4', - u'instanceId': u'64702.1', - u'serverFolderPath': u'', - u'portType': [u'Iscsi'], - u'type': u'Physical', - u'statusMessage': u'', - u'status': u'Up', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.0', - u'instanceName': u'Servers', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Up', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 0, - u'name': u'openstack4', - u'hbaPresent': True, - u'hbaCount': 1, - u'notes': u'', - u'mapped': True, - u'operatingSystem': - {u'instanceId': u'64702.3', - u'instanceName': u'Other Multipath', - u'objectType': u'ScServerOperatingSystem'}}, - {u'scName': u'Storage Center 64702', - u'volumeCount': 1, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 0, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'openstack5', - u'instanceId': u'64702.2', - u'serverFolderPath': u'', - u'portType': [u'Iscsi'], - u'type': u'Physical', - u'statusMessage': u'', - u'status': u'Up', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.0', - u'instanceName': u'Servers', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Up', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 0, u'name': u'openstack5', - u'hbaPresent': True, - u'hbaCount': 1, - u'notes': u'', - u'mapped': True, - u'operatingSystem': - {u'instanceId': u'64702.2', - u'instanceName': u'Other Singlepath', - u'objectType': u'ScServerOperatingSystem'}}] - - # ScServers list where status = Down - SCSERVERS_DOWN = \ - [{u'scName': u'Storage Center 64702', - u'volumeCount': 5, - u'removeHbasAllowed': True, - u'legacyFluidFs': False, - u'serverFolderIndex': 0, - u'alertOnConnectivity': True, - u'objectType': u'ScPhysicalServer', - u'instanceName': u'openstack4', - u'instanceId': u'64702.1', - u'serverFolderPath': u'', - u'portType': [u'Iscsi'], - u'type': u'Physical', - u'statusMessage': u'', - u'status': u'Down', - u'scSerialNumber': 64702, - u'serverFolder': {u'instanceId': u'64702.0', - u'instanceName': u'Servers', - u'objectType': u'ScServerFolder'}, - u'parentIndex': 0, - u'connectivity': u'Up', - u'hostCacheIndex': 0, - u'deleteAllowed': True, - u'pathCount': 0, - u'name': u'openstack4', - u'hbaPresent': True, - u'hbaCount': 1, - u'notes': u'', - u'mapped': True, - u'operatingSystem': - {u'instanceId': u'64702.3', - u'instanceName': u'Other Multipath', - u'objectType': u'ScServerOperatingSystem'}}] - - MAP_PROFILE = {u'instanceId': u'64702.2941', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'lunUsed': [1], - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': - {u'instanceId': u'64702.6025', - u'instanceName': u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'connectivity': u'Up', - u'readOnly': False, - u'objectType': u'ScMappingProfile', - u'hostCache': False, - u'mappedVia': u'Server', - u'mapCount': 3, - u'instanceName': u'6025-47', - u'lunRequested': u'N/A'} - - MAP_PROFILES = [MAP_PROFILE] - - MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', - u'instanceName': u'92-30', - u'objectType': u'ScMappingProfile'}, - u'status': u'Down', - u'statusMessage': u'', - u'instanceId': u'64702.969.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.30', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.92', - u'instanceName': - u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'lunUsed': [1], - u'serverHba': {u'instanceId': u'64702.3454975614', - u'instanceName': - u'iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64702.31.8', - u'instanceName': - u'iqn.1993-08.org.debian:' - '01:3776df826e4f-5000D31000FCBE43', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736131.91', - u'instanceName': u'5000D31000FCBE43', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-969', - u'transport': u'Iscsi', - u'objectType': u'ScMapping'}] - - # Multiple mappings to test find_iscsi_properties with multiple portals - MAPPINGS_MULTI_PORTAL = \ - [{u'profile': {u'instanceId': u'64702.104', - u'instanceName': u'92-30', - u'objectType': u'ScMappingProfile'}, - u'status': u'Down', - u'statusMessage': u'', - u'instanceId': u'64702.969.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.30', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.92', - u'instanceName': - u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'lunUsed': [1], - u'serverHba': {u'instanceId': u'64702.3454975614', - u'instanceName': - u'iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64702.31.8', - u'instanceName': - u'iqn.1993-08.org.debian:' - '01:3776df826e4f-5000D31000FCBE43', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736131.91', - u'instanceName': u'5000D31000FCBE43', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-969', - u'transport': u'Iscsi', - u'objectType': u'ScMapping'}, - {u'profile': {u'instanceId': u'64702.104', - u'instanceName': u'92-30', - u'objectType': u'ScMappingProfile'}, - u'status': u'Down', - u'statusMessage': u'', - u'instanceId': u'64702.969.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.30', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.92', - u'instanceName': - u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'lunUsed': [1], - u'serverHba': {u'instanceId': u'64702.3454975614', - u'instanceName': - u'iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64702.31.8', - u'instanceName': - u'iqn.1993-08.org.debian:' - '01:3776df826e4f-5000D31000FCBE43', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736131.91', - u'instanceName': u'5000D31000FCBE43', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-969', - u'transport': u'Iscsi', - u'objectType': u'ScMapping'}] - - MAPPINGS_READ_ONLY = \ - [{u'profile': {u'instanceId': u'64702.104', - u'instanceName': u'92-30', - u'objectType': u'ScMappingProfile'}, - u'status': u'Down', - u'statusMessage': u'', - u'instanceId': u'64702.969.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.30', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.92', - u'instanceName': - u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', - u'objectType': u'ScVolume'}, - u'readOnly': True, - u'lun': 1, - u'lunUsed': [1], - u'serverHba': {u'instanceId': u'64702.3454975614', - u'instanceName': - u'iqn.1993-08.org.debian:01:3776df826e4f', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64702.31.8', - u'instanceName': - u'iqn.1993-08.org.debian:' - '01:3776df826e4f-5000D31000FCBE43', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736131.91', - u'instanceName': - u'5000D31000FCBE43', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-969', - u'transport': u'Iscsi', - u'objectType': u'ScMapping'}] - - FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7639.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'serverHba': {u'instanceId': u'64702.3282218607', - u'instanceName': u'21000024FF30441C', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64703.27.73', - u'instanceName': - u'21000024FF30441C-5000D31000FCBE36', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': - {u'instanceId': u'64702.5764839588723736118.50', - u'instanceName': u'5000D31000FCBE36', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7639', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}, - {u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7640.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': - {u'instanceId': u'64702.6025', - u'instanceName': u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'serverHba': {u'instanceId': u'64702.3282218606', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'}, - u'path': - {u'instanceId': u'64702.64702.64703.27.78', - u'instanceName': u'21000024FF30441D-5000D31000FCBE36', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': - {u'instanceId': u'64702.5764839588723736118.50', - u'instanceName': u'5000D31000FCBE36', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7640', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}, - {u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7638.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'serverHba': {u'instanceId': u'64702.3282218606', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'}, - u'path': - {u'instanceId': u'64702.64702.64703.28.76', - u'instanceName': u'21000024FF30441D-5000D31000FCBE3E', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736126.60', - u'instanceName': u'5000D31000FCBE3E', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7638', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}] - - FC_MAPPINGS_LUN_MISMATCH = \ - [{u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7639.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'serverHba': {u'instanceId': u'64702.3282218607', - u'instanceName': u'21000024FF30441C', - u'objectType': u'ScServerHba'}, - u'path': {u'instanceId': u'64702.64702.64703.27.73', - u'instanceName': - u'21000024FF30441C-5000D31000FCBE36', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': - {u'instanceId': u'64702.5764839588723736118.50', - u'instanceName': u'5000D31000FCBE36', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7639', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}, - {u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7640.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': - {u'instanceId': u'64702.6025', - u'instanceName': u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 1, - u'serverHba': {u'instanceId': u'64702.3282218606', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'}, - u'path': - {u'instanceId': u'64702.64702.64703.27.78', - u'instanceName': u'21000024FF30441D-5000D31000FCBE36', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': - {u'instanceId': u'64702.5764839588723736118.50', - u'instanceName': u'5000D31000FCBE36', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7640', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}, - {u'profile': {u'instanceId': u'64702.2941', - u'instanceName': u'6025-47', - u'objectType': u'ScMappingProfile'}, - u'status': u'Up', - u'statusMessage': u'', - u'instanceId': u'64702.7638.64702', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'volume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'readOnly': False, - u'lun': 2, - u'serverHba': {u'instanceId': u'64702.3282218606', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'}, - u'path': - {u'instanceId': u'64702.64702.64703.28.76', - u'instanceName': u'21000024FF30441D-5000D31000FCBE3E', - u'objectType': u'ScServerHbaPath'}, - u'controllerPort': {u'instanceId': - u'64702.5764839588723736126.60', - u'instanceName': u'5000D31000FCBE3E', - u'objectType': u'ScControllerPort'}, - u'instanceName': u'64702-7638', - u'transport': u'FibreChannel', - u'objectType': u'ScMapping'}] - - RPLAY = {u'scSerialNumber': 64702, - u'globalIndex': u'64702-46-250', - u'description': u'Cinder Clone Replay', - u'parent': {u'instanceId': u'64702.46.249', - u'instanceName': u'64702-46-249', - u'objectType': u'ScReplay'}, - u'instanceId': u'64702.46.250', - u'scName': u'Storage Center 64702', - u'consistent': False, - u'expires': True, - u'freezeTime': u'12/09/2014 03:52:08 PM', - u'createVolume': {u'instanceId': u'64702.46', - u'instanceName': - u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', - u'objectType': u'ScVolume'}, - u'expireTime': u'12/09/2014 04:52:08 PM', - u'source': u'Manual', - u'spaceRecovery': False, - u'writesHeldDuration': 7910, - u'active': False, - u'markedForExpiration': False, - u'objectType': u'ScReplay', - u'instanceName': u'12/09/2014 03:52:08 PM', - u'size': u'0.0 Bytes' - } - - RPLAYS = [{u'scSerialNumber': 64702, - u'globalIndex': u'64702-6025-5', - u'description': u'Manually Created', - u'parent': {u'instanceId': u'64702.6025.4', - u'instanceName': u'64702-6025-4', - u'objectType': u'ScReplay'}, - u'instanceId': u'64702.6025.5', - u'scName': u'Storage Center 64702', - u'consistent': False, - u'expires': True, - u'freezeTime': u'02/02/2015 08:23:55 PM', - u'createVolume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'expireTime': u'02/02/2015 09:23:55 PM', - u'source': u'Manual', - u'spaceRecovery': False, - u'writesHeldDuration': 7889, - u'active': False, - u'markedForExpiration': False, - u'objectType': u'ScReplay', - u'instanceName': u'02/02/2015 08:23:55 PM', - u'size': u'0.0 Bytes'}, - {u'scSerialNumber': 64702, - u'globalIndex': u'64702-6025-4', - u'description': u'Cinder Test Replay012345678910', - u'parent': {u'instanceId': u'64702.6025.3', - u'instanceName': u'64702-6025-3', - u'objectType': u'ScReplay'}, - u'instanceId': u'64702.6025.4', - u'scName': u'Storage Center 64702', - u'consistent': False, - u'expires': True, - u'freezeTime': u'02/02/2015 08:23:47 PM', - u'createVolume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'expireTime': u'02/02/2015 09:23:47 PM', - u'source': u'Manual', - u'spaceRecovery': False, - u'writesHeldDuration': 7869, - u'active': False, - u'markedForExpiration': False, - u'objectType': u'ScReplay', - u'instanceName': u'02/02/2015 08:23:47 PM', - u'size': u'0.0 Bytes'}] - - TST_RPLAY = {u'scSerialNumber': 64702, - u'globalIndex': u'64702-6025-4', - u'description': u'Cinder Test Replay012345678910', - u'parent': {u'instanceId': u'64702.6025.3', - u'instanceName': u'64702-6025-3', - u'objectType': u'ScReplay'}, - u'instanceId': u'64702.6025.4', - u'scName': u'Storage Center 64702', - u'consistent': False, - u'expires': True, - u'freezeTime': u'02/02/2015 08:23:47 PM', - u'createVolume': {u'instanceId': u'64702.6025', - u'instanceName': - u'Server_21000024ff30441d Test Vol', - u'objectType': u'ScVolume'}, - u'expireTime': u'02/02/2015 09:23:47 PM', - u'source': u'Manual', - u'spaceRecovery': False, - u'writesHeldDuration': 7869, - u'active': False, - u'markedForExpiration': False, - u'objectType': u'ScReplay', - u'instanceName': u'02/02/2015 08:23:47 PM', - u'size': u'0.0 Bytes'} - - FLDR = {u'status': u'Up', - u'instanceName': u'opnstktst', - u'name': u'opnstktst', - u'parent': - {u'instanceId': u'64702.0', - u'instanceName': u'Volumes', - u'objectType': u'ScVolumeFolder'}, - u'instanceId': u'64702.43', - u'scName': u'Storage Center 64702', - u'notes': u'Folder for OpenStack Cinder Driver', - u'scSerialNumber': 64702, - u'parentIndex': 0, - u'okToDelete': True, - u'folderPath': u'', - u'root': False, - u'statusMessage': u'', - u'objectType': u'ScVolumeFolder'} - - SVR_FLDR = {u'status': u'Up', - u'instanceName': u'devstacksrv', - u'name': u'devstacksrv', - u'parent': {u'instanceId': u'64702.0', - u'instanceName': u'Servers', - u'objectType': u'ScServerFolder'}, - u'instanceId': u'64702.4', - u'scName': u'Storage Center 64702', - u'notes': u'Folder for OpenStack Cinder Driver', - u'scSerialNumber': 64702, - u'parentIndex': 0, - u'okToDelete': False, - u'folderPath': u'', - u'root': False, - u'statusMessage': u'', - u'objectType': u'ScServerFolder'} - - ISCSI_HBA = {u'portWwnList': [], - u'iscsiIpAddress': u'0.0.0.0', - u'pathCount': 1, - u'name': u'iqn.1993-08.org.debian:01:52332b70525', - u'connectivity': u'Down', - u'instanceId': u'64702.3786433166', - u'scName': u'Storage Center 64702', - u'notes': u'', - u'scSerialNumber': 64702, - u'server': - {u'instanceId': u'64702.38', - u'instanceName': - u'Server_iqn.1993-08.org.debian:01:52332b70525', - u'objectType': u'ScPhysicalServer'}, - u'remoteStorageCenter': False, - u'iscsiName': u'', - u'portType': u'Iscsi', - u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525', - u'objectType': u'ScServerHba'} - - FC_HBAS = [{u'portWwnList': [], - u'iscsiIpAddress': u'0.0.0.0', - u'pathCount': 2, - u'name': u'21000024FF30441C', - u'connectivity': u'Up', - u'instanceId': u'64702.3282218607', - u'scName': u'Storage Center 64702', - u'notes': u'', - u'scSerialNumber': 64702, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'remoteStorageCenter': False, - u'iscsiName': u'', - u'portType': u'FibreChannel', - u'instanceName': u'21000024FF30441C', - u'objectType': u'ScServerHba'}, - {u'portWwnList': [], - u'iscsiIpAddress': u'0.0.0.0', - u'pathCount': 3, - u'name': u'21000024FF30441D', - u'connectivity': u'Partial', - u'instanceId': u'64702.3282218606', - u'scName': u'Storage Center 64702', - u'notes': u'', - u'scSerialNumber': 64702, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'remoteStorageCenter': False, - u'iscsiName': u'', - u'portType': u'FibreChannel', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'}] - - FC_HBA = {u'portWwnList': [], - u'iscsiIpAddress': u'0.0.0.0', - u'pathCount': 3, - u'name': u'21000024FF30441D', - u'connectivity': u'Partial', - u'instanceId': u'64702.3282218606', - u'scName': u'Storage Center 64702', - u'notes': u'', - u'scSerialNumber': 64702, - u'server': {u'instanceId': u'64702.47', - u'instanceName': u'Server_21000024ff30441d', - u'objectType': u'ScPhysicalServer'}, - u'remoteStorageCenter': False, - u'iscsiName': u'', - u'portType': u'FibreChannel', - u'instanceName': u'21000024FF30441D', - u'objectType': u'ScServerHba'} - - SVR_OS_S = [{u'allowsLunGaps': True, - u'product': u'Red Hat Linux', - u'supportsActiveMappingDeletion': True, - u'version': u'6.x', - u'requiresLunZero': False, - u'scName': u'Storage Center 64702', - u'virtualMachineGuest': True, - u'virtualMachineHost': False, - u'allowsCrossTransportMapping': False, - u'objectType': u'ScServerOperatingSystem', - u'instanceId': u'64702.38', - u'lunCanVaryAcrossPaths': False, - u'scSerialNumber': 64702, - u'maximumVolumeSize': u'0.0 Bytes', - u'multipath': True, - u'instanceName': u'Red Hat Linux 6.x', - u'supportsActiveMappingCreation': True, - u'name': u'Red Hat Linux 6.x'}] - - ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False, - u'classOfServicePriority': 0, - u'wellKnownIpAddress': u'192.168.0.21', - u'scSerialNumber': 64702, - u'iscsiName': - u'iqn.2002-03.com.compellent:5000d31000fcbe42', - u'portNumber': 3260, - u'subnetMask': u'255.255.255.0', - u'gateway': u'192.168.0.1', - u'objectType': u'ScIscsiFaultDomain', - u'chapEnabled': False, - u'instanceId': u'64702.6.5.3', - u'childStatus': u'Up', - u'defaultTimeToRetain': u'SECONDS_20', - u'dataDigestEnabled': False, - u'instanceName': u'iSCSI 10G 2', - u'statusMessage': u'', - u'status': u'Up', - u'transportType': u'Iscsi', - u'vlanId': 0, - u'windowSize': u'131072.0 Bytes', - u'defaultTimeToWait': u'SECONDS_2', - u'scsiCommandTimeout': u'MINUTES_1', - u'deleteAllowed': False, - u'name': u'iSCSI 10G 2', - u'immediateDataWriteEnabled': False, - u'scName': u'Storage Center 64702', - u'notes': u'', - u'mtu': u'MTU_1500', - u'bidirectionalChapSecret': u'', - u'keepAliveTimeout': u'SECONDS_30'}] - - # For testing find_iscsi_properties where multiple portals are found - ISCSI_FLT_DOMAINS_MULTI_PORTALS = \ - [{u'headerDigestEnabled': False, - u'classOfServicePriority': 0, - u'wellKnownIpAddress': u'192.168.0.21', - u'scSerialNumber': 64702, - u'iscsiName': - u'iqn.2002-03.com.compellent:5000d31000fcbe42', - u'portNumber': 3260, - u'subnetMask': u'255.255.255.0', - u'gateway': u'192.168.0.1', - u'objectType': u'ScIscsiFaultDomain', - u'chapEnabled': False, - u'instanceId': u'64702.6.5.3', - u'childStatus': u'Up', - u'defaultTimeToRetain': u'SECONDS_20', - u'dataDigestEnabled': False, - u'instanceName': u'iSCSI 10G 2', - u'statusMessage': u'', - u'status': u'Up', - u'transportType': u'Iscsi', - u'vlanId': 0, - u'windowSize': u'131072.0 Bytes', - u'defaultTimeToWait': u'SECONDS_2', - u'scsiCommandTimeout': u'MINUTES_1', - u'deleteAllowed': False, - u'name': u'iSCSI 10G 2', - u'immediateDataWriteEnabled': False, - u'scName': u'Storage Center 64702', - u'notes': u'', - u'mtu': u'MTU_1500', - u'bidirectionalChapSecret': u'', - u'keepAliveTimeout': u'SECONDS_30'}, - {u'headerDigestEnabled': False, - u'classOfServicePriority': 0, - u'wellKnownIpAddress': u'192.168.0.25', - u'scSerialNumber': 64702, - u'iscsiName': - u'iqn.2002-03.com.compellent:5000d31000fcbe42', - u'portNumber': 3260, - u'subnetMask': u'255.255.255.0', - u'gateway': u'192.168.0.1', - u'objectType': u'ScIscsiFaultDomain', - u'chapEnabled': False, - u'instanceId': u'64702.6.5.3', - u'childStatus': u'Up', - u'defaultTimeToRetain': u'SECONDS_20', - u'dataDigestEnabled': False, - u'instanceName': u'iSCSI 10G 2', - u'statusMessage': u'', - u'status': u'Up', - u'transportType': u'Iscsi', - u'vlanId': 0, - u'windowSize': u'131072.0 Bytes', - u'defaultTimeToWait': u'SECONDS_2', - u'scsiCommandTimeout': u'MINUTES_1', - u'deleteAllowed': False, - u'name': u'iSCSI 10G 2', - u'immediateDataWriteEnabled': False, - u'scName': u'Storage Center 64702', - u'notes': u'', - u'mtu': u'MTU_1500', - u'bidirectionalChapSecret': u'', - u'keepAliveTimeout': u'SECONDS_30'}] - - ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False, - u'classOfServicePriority': 0, - u'wellKnownIpAddress': u'192.168.0.21', - u'scSerialNumber': 64702, - u'iscsiName': - u'iqn.2002-03.com.compellent:5000d31000fcbe42', - u'portNumber': 3260, - u'subnetMask': u'255.255.255.0', - u'gateway': u'192.168.0.1', - u'objectType': u'ScIscsiFaultDomain', - u'chapEnabled': False, - u'instanceId': u'64702.6.5.3', - u'childStatus': u'Up', - u'defaultTimeToRetain': u'SECONDS_20', - u'dataDigestEnabled': False, - u'instanceName': u'iSCSI 10G 2', - u'statusMessage': u'', - u'status': u'Up', - u'transportType': u'Iscsi', - u'vlanId': 0, - u'windowSize': u'131072.0 Bytes', - u'defaultTimeToWait': u'SECONDS_2', - u'scsiCommandTimeout': u'MINUTES_1', - u'deleteAllowed': False, - u'name': u'iSCSI 10G 2', - u'immediateDataWriteEnabled': False, - u'scName': u'Storage Center 64702', - u'notes': u'', - u'mtu': u'MTU_1500', - u'bidirectionalChapSecret': u'', - u'keepAliveTimeout': u'SECONDS_30'} - - CTRLR_PORT = {u'status': u'Up', - u'iscsiIpAddress': u'0.0.0.0', - u'WWN': u'5000D31000FCBE06', - u'name': u'5000D31000FCBE06', - u'iscsiGateway': u'0.0.0.0', - u'instanceId': u'64702.5764839588723736070.51', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'transportType': u'FibreChannel', - u'virtual': False, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'iscsiName': u'', - u'purpose': u'FrontEnd', - u'iscsiSubnetMask': u'0.0.0.0', - u'faultDomain': - {u'instanceId': u'64702.4.3', - u'instanceName': u'Domain 1', - u'objectType': u'ScControllerPortFaultDomain'}, - u'instanceName': u'5000D31000FCBE06', - u'statusMessage': u'', - u'objectType': u'ScControllerPort'} - - ISCSI_CTRLR_PORT = {u'preferredParent': - {u'instanceId': u'64702.5764839588723736074.69', - u'instanceName': u'5000D31000FCBE0A', - u'objectType': u'ScControllerPort'}, - u'status': u'Up', - u'iscsiIpAddress': u'10.23.8.235', - u'WWN': u'5000D31000FCBE43', - u'name': u'5000D31000FCBE43', - u'parent': - {u'instanceId': u'64702.5764839588723736074.69', - u'instanceName': u'5000D31000FCBE0A', - u'objectType': u'ScControllerPort'}, - u'iscsiGateway': u'0.0.0.0', - u'instanceId': u'64702.5764839588723736131.91', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'transportType': u'Iscsi', - u'virtual': True, - u'controller': {u'instanceId': u'64702.64702', - u'instanceName': u'SN 64702', - u'objectType': u'ScController'}, - u'iscsiName': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'purpose': u'FrontEnd', - u'iscsiSubnetMask': u'0.0.0.0', - u'faultDomain': - {u'instanceId': u'64702.6.5', - u'instanceName': u'iSCSI 10G 2', - u'objectType': u'ScControllerPortFaultDomain'}, - u'instanceName': u'5000D31000FCBE43', - u'childStatus': u'Up', - u'statusMessage': u'', - u'objectType': u'ScControllerPort'} - - FC_CTRLR_PORT = {u'preferredParent': - {u'instanceId': u'64702.5764839588723736093.57', - u'instanceName': u'5000D31000FCBE1D', - u'objectType': u'ScControllerPort'}, - u'status': u'Up', - u'iscsiIpAddress': u'0.0.0.0', - u'WWN': u'5000D31000FCBE36', - u'name': u'5000D31000FCBE36', - u'parent': - {u'instanceId': u'64702.5764839588723736093.57', - u'instanceName': u'5000D31000FCBE1D', - u'objectType': u'ScControllerPort'}, - u'iscsiGateway': u'0.0.0.0', - u'instanceId': u'64702.5764839588723736118.50', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'transportType': u'FibreChannel', - u'virtual': True, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'iscsiName': u'', - u'purpose': u'FrontEnd', - u'iscsiSubnetMask': u'0.0.0.0', - u'faultDomain': - {u'instanceId': u'64702.1.0', - u'instanceName': u'Domain 0', - u'objectType': u'ScControllerPortFaultDomain'}, - u'instanceName': u'5000D31000FCBE36', - u'childStatus': u'Up', - u'statusMessage': u'', - u'objectType': u'ScControllerPort'} - - FC_CTRLR_PORT_WWN_ERROR = \ - {u'preferredParent': - {u'instanceId': u'64702.5764839588723736093.57', - u'instanceName': u'5000D31000FCBE1D', - u'objectType': u'ScControllerPort'}, - u'status': u'Up', - u'iscsiIpAddress': u'0.0.0.0', - u'Wwn': u'5000D31000FCBE36', - u'name': u'5000D31000FCBE36', - u'parent': - {u'instanceId': u'64702.5764839588723736093.57', - u'instanceName': u'5000D31000FCBE1D', - u'objectType': u'ScControllerPort'}, - u'iscsiGateway': u'0.0.0.0', - u'instanceId': u'64702.5764839588723736118.50', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'transportType': u'FibreChannel', - u'virtual': True, - u'controller': {u'instanceId': u'64702.64703', - u'instanceName': u'SN 64703', - u'objectType': u'ScController'}, - u'iscsiName': u'', - u'purpose': u'FrontEnd', - u'iscsiSubnetMask': u'0.0.0.0', - u'faultDomain': - {u'instanceId': u'64702.1.0', - u'instanceName': u'Domain 0', - u'objectType': u'ScControllerPortFaultDomain'}, - u'instanceName': u'5000D31000FCBE36', - u'childStatus': u'Up', - u'statusMessage': u'', - u'objectType': u'ScControllerPort'} - - STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes', - u'freeSpace': u'1.297659461632E13 Bytes', - u'oversubscribedSpace': u'0.0 Bytes', - u'instanceId': u'64702', - u'scName': u'Storage Center 64702', - u'savingVsRaidTen': u'1.13737990144E11 Bytes', - u'allocatedSpace': u'1.66791217152E12 Bytes', - u'usedSpace': u'3.25716017152E11 Bytes', - u'configuredSpace': u'9.155796533248E12 Bytes', - u'alertThresholdSpace': u'1.197207956992E13 Bytes', - u'availableSpace': u'1.3302310633472E13 Bytes', - u'badSpace': u'0.0 Bytes', - u'time': u'02/02/2015 02:23:39 PM', - u'scSerialNumber': 64702, - u'instanceName': u'Storage Center 64702', - u'storageAlertThreshold': 10, - u'objectType': u'StorageCenterStorageUsage'} - - RPLAY_PROFILE = {u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', - u'type': u'Consistent', - u'notes': u'Created by Dell EMC Cinder Driver', - u'volumeCount': 0, - u'expireIncompleteReplaySets': True, - u'replayCreationTimeout': 20, - u'enforceReplayCreationTimeout': False, - u'ruleCount': 0, - u'userCreated': True, - u'scSerialNumber': 64702, - u'scName': u'Storage Center 64702', - u'objectType': u'ScReplayProfile', - u'instanceId': u'64702.11', - u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} - STORAGE_PROFILE_LIST = [ - {u'allowedForFlashOptimized': False, - u'allowedForNonFlashOptimized': True, - u'index': 1, - u'instanceId': u'64158.1', - u'instanceName': u'Recommended', - u'name': u'Recommended', - u'notes': u'', - u'objectType': u'ScStorageProfile', - u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', - u'raidTypeUsed': u'Mixed', - u'scName': u'Storage Center 64158', - u'scSerialNumber': 64158, - u'tiersUsedDescription': u'Tier 1, Tier 2, Tier 3', - u'useTier1Storage': True, - u'useTier2Storage': True, - u'useTier3Storage': True, - u'userCreated': False, - u'volumeCount': 125}, - {u'allowedForFlashOptimized': False, - u'allowedForNonFlashOptimized': True, - u'index': 2, - u'instanceId': u'64158.2', - u'instanceName': u'High Priority', - u'name': u'High Priority', - u'notes': u'', - u'objectType': u'ScStorageProfile', - u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', - u'raidTypeUsed': u'Mixed', - u'scName': u'Storage Center 64158', - u'scSerialNumber': 64158, - u'tiersUsedDescription': u'Tier 1', - u'useTier1Storage': True, - u'useTier2Storage': False, - u'useTier3Storage': False, - u'userCreated': False, - u'volumeCount': 0}, - {u'allowedForFlashOptimized': False, - u'allowedForNonFlashOptimized': True, - u'index': 3, - u'instanceId': u'64158.3', - u'instanceName': u'Medium Priority', - u'name': u'Medium Priority', - u'notes': u'', - u'objectType': u'ScStorageProfile', - u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', - u'raidTypeUsed': u'Mixed', - u'scName': u'Storage Center 64158', - u'scSerialNumber': 64158, - u'tiersUsedDescription': u'Tier 2', - u'useTier1Storage': False, - u'useTier2Storage': True, - u'useTier3Storage': False, - u'userCreated': False, - u'volumeCount': 0}, - {u'allowedForFlashOptimized': True, - u'allowedForNonFlashOptimized': True, - u'index': 4, - u'instanceId': u'64158.4', - u'instanceName': u'Low Priority', - u'name': u'Low Priority', - u'notes': u'', - u'objectType': u'ScStorageProfile', - u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', - u'raidTypeUsed': u'Mixed', - u'scName': u'Storage Center 64158', - u'scSerialNumber': 64158, - u'tiersUsedDescription': u'Tier 3', - u'useTier1Storage': False, - u'useTier2Storage': False, - u'useTier3Storage': True, - u'userCreated': False, - u'volumeCount': 0}] - - CGS = [{u'profile': - {u'instanceId': u'65690.4', - u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', - u'objectType': u'ScReplayProfile'}, - u'scSerialNumber': 65690, - u'globalIndex': u'65690-4-2', - u'description': u'GUID1-0869559e-6881-454e-ba18-15c6726d33c1', - u'instanceId': u'65690.65690.4.2', - u'scName': u'Storage Center 65690', - u'expires': False, - u'freezeTime': u'2015-09-28T14:00:59-05:00', - u'expireTime': u'1969-12-31T18:00:00-06:00', - u'expectedReplayCount': 2, - u'writesHeldDuration': 19809, - u'replayCount': 2, - u'instanceName': u'Name1', - u'objectType': u'ScReplayConsistencyGroup'}, - {u'profile': - {u'instanceId': u'65690.4', - u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', - u'objectType': u'ScReplayProfile'}, - u'scSerialNumber': 65690, - u'globalIndex': u'65690-4-3', - u'description': u'GUID2-0869559e-6881-454e-ba18-15c6726d33c1', - u'instanceId': u'65690.65690.4.3', - u'scName': u'Storage Center 65690', - u'expires': False, - u'freezeTime': u'2015-09-28T14:00:59-05:00', - u'expireTime': u'1969-12-31T18:00:00-06:00', - u'expectedReplayCount': 2, - u'writesHeldDuration': 19809, - u'replayCount': 2, - u'instanceName': u'Name2', - u'objectType': u'ScReplayConsistencyGroup'} - ] - - ISCSI_CONFIG = { - u'initialReadyToTransfer': True, - u'scSerialNumber': 64065, - u'macAddress': u'00c0dd-1da173', - u'instanceId': u'64065.5764839588723573038.6', - u'vlanTagging': False, - u'mapCount': 8, - u'cardModel': u'Qle4062', - u'portNumber': 3260, - u'firstBurstSize': 256, - u'deviceName': u'PCIDEV09', - u'subnetMask': u'255.255.255.0', - u'speed': u'1 Gbps', - u'maximumVlanCount': 0, - u'gatewayIpAddress': u'192.168.0.1', - u'slot': 4, - u'sfpData': u'', - u'dataDigest': False, - u'chapEnabled': False, - u'firmwareVersion': u'03.00.01.77', - u'preferredControllerIndex': 64066, - u'defaultTimeToRetain': 20, - u'objectType': u'ScControllerPortIscsiConfiguration', - u'instanceName': u'5000d31000FCBE43', - u'scName': u'sc64065', - u'revision': u'0', - u'controllerPortIndex': 5764839588723573038, - u'maxBurstSize': 512, - u'targetCount': 20, - u'description': u'QLogic QLE4062 iSCSI Adapter Rev 0 Copper', - u'vlanSupported': True, - u'chapName': u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'windowSize': 128, - u'vlanId': 0, - u'defaultTimeToWait': 2, - u'headerDigest': False, - u'slotPort': 2, - u'immediateDataWrite': False, - u'storageCenterTargetCount': 20, - u'vlanCount': 0, - u'scsiCommandTimeout': 60, - u'slotType': u'PCI4', - u'ipAddress': u'192.168.0.21', - u'vlanUserPriority': 0, - u'bothCount': 0, - u'initiatorCount': 33, - u'keepAliveTimeout': 30, - u'homeControllerIndex': 64066, - u'chapSecret': u'', - u'maximumTransmissionUnit': 1500} - - SCQOS = {u'linkSpeed': u'1 Gbps', - u'numberDevices': 1, - u'bandwidthLimited': False, - u'name': u'Cinder QoS', - u'instanceId': u'64702.2', - u'scName': u'Storage Center 64702', - u'scSerialNumber': 64702, - u'instanceName': u'Cinder QoS', - u'advancedSettings': {u'globalMaxSectorPerIo': 512, - u'destinationMaxSectorCount': 65536, - u'queuePassMaxSectorCount': 65536, - u'destinationMaxIoCount': 18, - u'globalMaxIoCount': 32, - u'queuePassMaxIoCount': 8}, - u'objectType': u'ScReplicationQosNode'} - - SCREPL = [{u'destinationVolume': {u'instanceId': u'65495.167', - u'instanceName': u'Cinder repl of abcd9' - u'5b2-1284-4cf0-a397-9' - u'70fa6c68092', - u'objectType': u'ScVolume'}, - u'instanceId': u'64702.9', - u'scSerialNumber': 64702, - u'syncStatus': u'NotApplicable', - u'objectType': u'ScReplication', - u'sourceStorageCenter': {u'instanceId': u'64702', - u'instanceName': u'Storage Center ' - '64702', - u'objectType': u'StorageCenter'}, - u'secondaryTransportTypes': [], - u'dedup': False, - u'state': u'Up', - u'replicateActiveReplay': False, - u'qosNode': {u'instanceId': u'64702.2', - u'instanceName': u'Cinder QoS', - u'objectType': u'ScReplicationQosNode'}, - u'sourceVolume': {u'instanceId': u'64702.13108', - u'instanceName': u'abcd95b2-1284-4cf0-a397-' - u'970fa6c68092', - u'objectType': u'ScVolume'}, - u'type': u'Asynchronous', - u'statusMessage': u'', - u'status': u'Up', - u'syncMode': u'None', - u'stateMessage': u'', - u'managedByLiveVolume': False, - u'destinationScSerialNumber': 65495, - u'pauseAllowed': True, - u'instanceName': u"Replication of 'abcd95b2-1284-4cf0-" - u"a397-970fa6c68092'", - u'simulation': False, - u'transportTypes': [u'FibreChannel'], - u'replicateStorageToLowestTier': True, - u'scName': u'Storage Center 64702', - u'destinationStorageCenter': {u'instanceId': u'65495', - u'instanceName': u'Storage Center' - u' 65495', - u'objectType': u'StorageCenter'}}] - - IQN = 'iqn.2002-03.com.compellent:5000D31000000001' - WWN = u'21000024FF30441C' - - WWNS = [u'21000024FF30441C', - u'21000024FF30441D'] - - # Used to test finding no match in find_wwns - WWNS_NO_MATCH = [u'21000024FF30451C', - u'21000024FF30451D'] - - FLDR_PATH = 'StorageCenter/ScVolumeFolder/' - - # Create a Response object that indicates OK - - response_ok = models.Response() - response_ok.status_code = 200 - response_ok.reason = u'ok' - response_ok._content = '' - response_ok._content_consumed = True - RESPONSE_200 = response_ok - - # Create a Response object that indicates created - response_created = models.Response() - response_created.status_code = 201 - response_created.reason = u'created' - response_created._content = '' - response_created._content_consumed = True - RESPONSE_201 = response_created - - # Create a Response object that can indicate a failure. Although - # 204 can be a success with no return. (Know your calls!) - response_nc = models.Response() - response_nc.status_code = 204 - response_nc.reason = u'duplicate' - response_nc._content = '' - response_nc._content_consumed = True - RESPONSE_204 = response_nc - - # Create a Response object is a pure error. - response_bad = models.Response() - response_bad.status_code = 400 - response_bad.reason = u'bad request' - response_bad._content = '' - response_bad._content_consumed = True - RESPONSE_400 = response_bad - - # Create a Response object is a pure error. - response_bad = models.Response() - response_bad.status_code = 404 - response_bad.reason = u'not found' - response_bad._content = '' - response_bad._content_consumed = True - RESPONSE_404 = response_bad - - def setUp(self): - super(DellSCSanAPITestCase, self).setUp() - - # Configuration is a mock. A mock is pretty much a blank - # slate. I believe mock's done in setup are not happy time - # mocks. So we just do a few things like driver config here. - self.configuration = mock.Mock() - - self.configuration.san_is_local = False - self.configuration.san_ip = "192.168.0.1" - self.configuration.san_login = "admin" - self.configuration.san_password = "mmm" - self.configuration.dell_sc_ssn = 12345 - self.configuration.dell_sc_server_folder = 'opnstktst' - self.configuration.dell_sc_volume_folder = 'opnstktst' - # Note that we set this to True even though we do not - # test this functionality. This is sent directly to - # the requests calls as the verify parameter and as - # that is a third party library deeply stubbed out is - # not directly testable by this code. Note that in the - # case that this fails the driver fails to even come - # up. - self.configuration.dell_sc_verify_cert = True - self.configuration.dell_sc_api_port = 3033 - self.configuration.iscsi_ip_address = '192.168.1.1' - self.configuration.iscsi_port = 3260 - self._context = context.get_admin_context() - self.apiversion = '2.0' - - # Set up the SCApi - self.scapi = storagecenter_api.SCApi( - self.configuration.san_ip, - self.configuration.dell_sc_api_port, - self.configuration.san_login, - self.configuration.san_password, - self.configuration.dell_sc_verify_cert, - self.apiversion) - - # Set up the scapi configuration vars - self.scapi.ssn = self.configuration.dell_sc_ssn - self.scapi.sfname = self.configuration.dell_sc_server_folder - self.scapi.vfname = self.configuration.dell_sc_volume_folder - # Note that we set this to True (or not) on the replication tests. - self.scapi.failed_over = False - - self.volid = str(uuid.uuid4()) - self.volume_name = "volume" + self.volid - self.repl_name = "Cinder repl of volume" + self.volid - - def test_path_to_array(self, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._path_to_array(u'folder1/folder2/folder3') - expected = [u'folder1', u'folder2', u'folder3'] - self.assertEqual(expected, res, 'Unexpected folder path') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_result', - return_value=SC) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_sc(self, - mock_get, - mock_get_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_sc() - mock_get.assert_called_once_with('StorageCenter/StorageCenter') - self.assertTrue(mock_get_result.called) - self.assertEqual(u'64702', res, 'Unexpected SSN') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_get_result', - return_value=None) - def test_find_sc_failure(self, - mock_get_result, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_sc) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_folder(self, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._create_folder( - 'StorageCenter/ScVolumeFolder', - '', - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.FLDR, res, 'Unexpected Folder') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_folder_with_parent(self, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where parent folder name is specified - res = self.scapi._create_folder( - 'StorageCenter/ScVolumeFolder', 'parentFolder', - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.FLDR, res, 'Unexpected Folder') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_create_folder_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._create_folder( - 'StorageCenter/ScVolumeFolder', '', - self.configuration.dell_sc_volume_folder) - self.assertIsNone(res, 'Test Create folder - None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_path_to_array', - return_value=['Cinder_Test_Folder']) - def test_create_folder_path(self, - mock_path_to_array, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._create_folder_path( - 'StorageCenter/ScVolumeFolder', - self.configuration.dell_sc_volume_folder) - mock_path_to_array.assert_called_once_with( - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_find_folder.called) - self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') - - @mock.patch.object(storagecenter_api.SCApi, - '_create_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_path_to_array', - return_value=['Cinder_Test_Folder']) - def test_create_folder_path_create_fldr(self, - mock_path_to_array, - mock_find_folder, - mock_create_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where folder is not found and must be created - res = self.scapi._create_folder_path( - 'StorageCenter/ScVolumeFolder', - self.configuration.dell_sc_volume_folder) - mock_path_to_array.assert_called_once_with( - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_find_folder.called) - self.assertTrue(mock_create_folder.called) - self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') - - @mock.patch.object(storagecenter_api.SCApi, - '_create_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_path_to_array', - return_value=['Cinder_Test_Folder']) - def test_create_folder_path_failure(self, - mock_path_to_array, - mock_find_folder, - mock_create_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where folder is not found, must be created - # and creation fails - res = self.scapi._create_folder_path( - 'StorageCenter/ScVolumeFolder', - self.configuration.dell_sc_volume_folder) - mock_path_to_array.assert_called_once_with( - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_find_folder.called) - self.assertTrue(mock_create_folder.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_result', - return_value=u'devstackvol/fcvm/') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_folder(self, - mock_post, - mock_get_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_folder( - 'StorageCenter/ScVolumeFolder', - self.configuration.dell_sc_volume_folder) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_result.called) - self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_result', - return_value=u'devstackvol/fcvm/') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_folder_multi_fldr(self, - mock_post, - mock_get_result, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case for folder path with multiple folders - res = self.scapi._find_folder( - 'StorageCenter/ScVolumeFolder', - u'testParentFolder/opnstktst') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_result.called) - self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_find_folder_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_folder( - 'StorageCenter/ScVolumeFolder', - self.configuration.dell_sc_volume_folder) - self.assertIsNone(res, 'Test find folder - None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - def test_find_volume_folder_fail(self, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where _find_volume_folder returns none - res = self.scapi._find_volume_folder( - False) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder, -1) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=FLDR) - def test_find_volume_folder(self, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_volume_folder( - False) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder, -1) - self.assertEqual(self.FLDR, res, 'Unexpected Folder') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=STORAGE_PROFILE_LIST) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_storage_profile_fail(self, - mock_json, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where _find_volume_folder returns none - res = self.scapi._find_storage_profile("Blah") - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=STORAGE_PROFILE_LIST) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_storage_profile_none(self, - mock_json, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where _find_storage_profile returns none - res = self.scapi._find_storage_profile(None) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=STORAGE_PROFILE_LIST) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @ddt.data('HighPriority', 'highpriority', 'High Priority') - def test_find_storage_profile(self, - value, - mock_json, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_storage_profile(value) - self.assertIsNotNone(res, 'Expected matching storage profile!') - self.assertEqual(self.STORAGE_PROFILE_LIST[1]['instanceId'], - res.get('instanceId')) - - @mock.patch.object(storagecenter_api.SCApi, - '_create_folder_path', - return_value=FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - def test_find_volume_folder_create_folder(self, - mock_find_folder, - mock_create_folder_path, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where _find_volume_folder returns none and folder must be - # created - res = self.scapi._find_volume_folder( - True) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScVolumeFolder/GetList', - self.configuration.dell_sc_volume_folder, -1) - self.assertTrue(mock_create_folder_path.called) - self.assertEqual(self.FLDR, res, 'Unexpected Folder') - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCSERVERS) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_init_volume(self, - mock_post, - mock_get_json, - mock_map_volume, - mock_unmap_volume, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - self.scapi._init_volume(self.VOLUME) - self.assertTrue(mock_map_volume.called) - self.assertTrue(mock_unmap_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_init_volume_retry(self, - mock_post, - mock_get_json, - mock_map_volume, - mock_unmap_volume, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_json.return_value = [{'name': 'srv1', 'status': 'up', - 'type': 'physical'}, - {'name': 'srv2', 'status': 'up', - 'type': 'physical'}] - mock_get_volume.side_effect = [{'name': 'guid', 'active': False, - 'instanceId': '12345.1'}, - {'name': 'guid', 'active': True, - 'instanceId': '12345.1'}] - self.scapi._init_volume(self.VOLUME) - # First return wasn't active. So try second. - self.assertEqual(2, mock_map_volume.call_count) - self.assertEqual(2, mock_unmap_volume.call_count) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_init_volume_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScServer list fails - self.scapi._init_volume(self.VOLUME) - self.assertTrue(mock_post.called) - - @mock.patch.object(storagecenter_api.SCApi, - 'unmap_volume', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - 'map_volume', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCSERVERS_DOWN) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_init_volume_servers_down(self, - mock_post, - mock_get_json, - mock_map_volume, - mock_unmap_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScServer Status = Down - self.scapi._init_volume(self.VOLUME) - self.assertFalse(mock_map_volume.called) - self.assertFalse(mock_unmap_volume.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_volume(self, - mock_post, - mock_find_volume_folder, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_volume( - self.volume_name, - 1) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_storage_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - def test_create_volume_with_profiles(self, - mock_find_replay_profiles, - mock_find_storage_profile, - mock_find_data_reduction_profile, - mock_find_qos_profile, - mock_post, - mock_find_volume_folder, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_find_replay_profiles.return_value = (['12345.4'], []) - mock_get_json.return_value = self.VOLUME - mock_find_volume_folder.return_value = {'instanceId': '12345.200'} - mock_post.return_value = self.RESPONSE_201 - mock_find_storage_profile.return_value = {'instanceId': '12345.0'} - mock_find_data_reduction_profile.return_value = {'instanceId': - '12345.1'} - mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, - {'instanceId': '12345.3'}] - res = self.scapi.create_volume(self.volume_name, 1, 'storage_profile', - 'replay_profile_string', 'volume_qos', - 'group_qos', 'datareductionprofile') - expected_payload = {'Name': self.volume_name, - 'Notes': 'Created by Dell EMC Cinder Driver', - 'Size': '1 GB', - 'StorageCenter': 12345, - 'VolumeFolder': '12345.200', - 'StorageProfile': '12345.0', - 'VolumeQosProfile': '12345.2', - 'GroupQosProfile': '12345.3', - 'DataReductionProfile': '12345.1', - 'ReplayProfileList': ['12345.4']} - mock_find_volume_folder.assert_called_once_with(True) - mock_post.assert_called_once_with('StorageCenter/ScVolume', - expected_payload, True) - self.assertEqual(self.VOLUME, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_storage_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - def test_create_volume_profile_not_found(self, - mock_find_replay_profiles, - mock_find_storage_profile, - mock_find_qos_profile, - mock_find_volume_folder, - mock_close_connection, - mock_open_connection, - mock_init): - mock_find_replay_profiles.return_value = (['12345.4'], []) - mock_find_volume_folder.return_value = self.FLDR - mock_find_storage_profile.return_value = [{'instanceId': '12345.0'}] - # Failure is on the volumeqosprofile. - mock_find_qos_profile.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_volume, self.volume_name, 1, - 'storage_profile', 'replay_profile_string', - 'volume_qos', 'group_qos', 'datareductionprofile') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_storage_profile', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - def test_create_volume_storage_profile_missing(self, - mock_find_volume_folder, - mock_find_storage_profile, - mock_close_connection, - mock_open_connection, - mock_init): - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_volume, - self.volume_name, - 1, - 'Blah') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_storage_profile', - return_value=STORAGE_PROFILE_LIST[0]) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_volume_storage_profile(self, - mock_post, - mock_find_volume_folder, - mock_find_storage_profile, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - self.scapi.create_volume( - self.volume_name, - 1, - 'Recommended') - actual = mock_post.call_args[0][1]['StorageProfile'] - expected = self.STORAGE_PROFILE_LIST[0]['instanceId'] - self.assertEqual(expected, actual) - - @mock.patch.object(storagecenter_api.SCApi, - '_search_for_volume', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_volume_retry_find(self, - mock_post, - mock_find_volume_folder, - mock_get_json, - mock_search_for_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where find_volume is used to do a retry of finding the - # created volume - res = self.scapi.create_volume( - self.volume_name, - 1) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - mock_search_for_volume.assert_called_once_with(self.volume_name) - mock_find_volume_folder.assert_called_once_with(True) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_vol_folder_fail(self, - mock_post, - mock_find_volume_folder, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test calling create_volume where volume folder does not exist and - # fails to be created - res = self.scapi.create_volume( - self.volume_name, - 1) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_create_volume_failure(self, - mock_post, - mock_find_volume_folder, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_volume( - self.volume_name, - 1) - mock_find_volume_folder.assert_called_once_with(True) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME_LIST) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test__get_volume_list_enforce_vol_fldr(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find volume in the configured volume folder - res = self.scapi._get_volume_list(self.volume_name, None, True) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME_LIST) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test__get_volume_list_any_fldr(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find volume anywhere in the configured SC - res = self.scapi._get_volume_list(self.volume_name, None, False) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') - - def test_get_volume_list_no_name_no_id(self, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case specified volume name is None and device id is None. - res = self.scapi._get_volume_list(None, None, True) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test__get_volume_list_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find volume in the configured volume folder - res = self.scapi._get_volume_list(self.volume_name, None, True) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_search_for_volume', - return_value=VOLUME) - def test_find_volume(self, - mock_search_for_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find volume by name - res = self.scapi.find_volume(self.volume_name, None) - mock_search_for_volume.assert_called_once_with(self.volume_name) - self.assertEqual(self.VOLUME, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_search_for_volume', - return_value=None) - def test_find_volume_not_found(self, - mock_search_for_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case to find volume by name - res = self.scapi.find_volume(self.volume_name, None) - mock_search_for_volume.assert_called_once_with(self.volume_name) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=VOLUME) - def test_find_volume_with_provider_id(self, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - res = self.scapi.find_volume(self.volume_name, provider_id) - mock_get_volume.assert_called_once_with(provider_id) - self.assertEqual(self.VOLUME, res) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - '_search_for_volume', - return_value=VOLUME) - def test_find_volume_with_invalid_provider_id(self, - mock_search_for_volume, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = 'WrongSSN.1' - res = self.scapi.find_volume(self.volume_name, provider_id) - mock_search_for_volume.assert_called_once_with(self.volume_name) - self.assertFalse(mock_get_volume.called) - self.assertEqual(self.VOLUME, res) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume', - return_value=None) - def test_find_volume_with_provider_id_not_found(self, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - res = self.scapi.find_volume(self.volume_name, provider_id) - mock_get_volume.assert_called_once_with(provider_id) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - '_import_one', - return_value=VOLUME) - def test_find_volume_with_provider_id_complete_replication( - self, - mock_import_one, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - # Configure to middle of failover. - self.scapi.failed_over = True - mock_get_volume.return_value = {'name': self.repl_name} - res = self.scapi.find_volume(self.volume_name, provider_id) - self.scapi.failed_over = False - mock_import_one.assert_called_once_with(mock_get_volume.return_value, - self.volume_name) - mock_get_volume.assert_called_once_with(provider_id) - self.assertEqual(self.VOLUME, res, 'Unexpected volume') - - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.SCApi, - '_import_one', - return_value=None) - def test_find_volume_with_provider_id_import_fail(self, - mock_import_one, - mock_get_volume, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - # Configure to middle of failover. - self.scapi.failed_over = True - mock_get_volume.return_value = {'name': self.repl_name} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_volume, self.volume_name, - provider_id) - self.scapi.failed_over = False - mock_import_one.assert_called_once_with(mock_get_volume.return_value, - self.volume_name) - mock_get_volume.assert_called_once_with(provider_id) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=None) - def test_search_for_volume_no_name(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - # Test calling find_volume with no name or instanceid - res = self.scapi._search_for_volume(None) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list') - def test_search_for_volume_not_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - # Test calling find_volume with result of no volume found - mock_get_volume_list.side_effect = [[], []] - res = self.scapi._search_for_volume(self.volume_name) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=VOLUME_LIST_MULTI_VOLS) - def test_search_for_volume_multi_vols_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where multiple volumes are found - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._search_for_volume, self.volume_name) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - def test_get_volume(self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - res = self.scapi.get_volume(provider_id) - mock_get.assert_called_once_with( - 'StorageCenter/ScVolume/' + provider_id) - self.assertEqual(self.VOLUME, res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_get_volume_error(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - res = self.scapi.get_volume(provider_id) - mock_get.assert_called_once_with( - 'StorageCenter/ScVolume/' + provider_id) - self.assertIsNone(res) - - def test_get_volume_no_id(self, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = None - res = self.scapi.get_volume(provider_id) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=True) - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_delete_volume(self, - mock_find_volume, - mock_delete, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.delete_volume(self.volume_name) - self.assertTrue(mock_delete.called) - mock_find_volume.assert_called_once_with(self.volume_name, None) - self.assertTrue(mock_get_json.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=True) - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_delete_volume_with_provider_id(self, - mock_find_volume, - mock_delete, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - res = self.scapi.delete_volume(self.volume_name, provider_id) - mock_find_volume.assert_called_once_with(self.volume_name, provider_id) - self.assertTrue(mock_delete.called) - self.assertTrue(mock_get_json.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=VOLUME) - def test_delete_volume_failure(self, - mock_find_volume, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - provider_id = str(self.scapi.ssn) + '.1' - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.delete_volume, self.volume_name, - provider_id) - mock_find_volume.assert_called_once_with(self.volume_name, provider_id) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=None) - def test_delete_volume_no_vol_found(self, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where volume to be deleted does not exist - res = self.scapi.delete_volume(self.volume_name, None) - mock_find_volume.assert_called_once_with(self.volume_name, None) - self.assertTrue(res, 'Expected True') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=SVR_FLDR) - def test_find_server_folder(self, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_server_folder(False) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_server_folder, 12345) - self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') - - @mock.patch.object(storagecenter_api.SCApi, - '_create_folder_path', - return_value=SVR_FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - def test_find_server_folder_create_folder(self, - mock_find_folder, - mock_create_folder_path, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where specified server folder is not found and must be - # created - res = self.scapi._find_server_folder(True) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_server_folder, 12345) - self.assertTrue(mock_create_folder_path.called) - self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_folder', - return_value=None) - def test_find_server_folder_fail(self, - mock_find_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where _find_server_folder returns none - res = self.scapi._find_server_folder( - False) - mock_find_folder.assert_called_once_with( - 'StorageCenter/ScServerFolder/GetList', - self.configuration.dell_sc_volume_folder, 12345) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_add_hba(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._add_hba(self.SCSERVER, - self.IQN) - self.assertTrue(mock_post.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_add_hba_fc(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - saveproto = self.scapi.protocol - self.scapi.protocol = 'FibreChannel' - res = self.scapi._add_hba(self.SCSERVER, - self.WWN) - self.assertTrue(mock_post.called) - self.assertTrue(res) - self.scapi.protocol = saveproto - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_add_hba_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._add_hba(self.SCSERVER, - self.IQN) - self.assertTrue(mock_post.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SVR_OS_S) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_serveros(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_serveros('Red Hat Linux 6.x') - self.assertTrue(mock_get_json.called) - self.assertTrue(mock_post.called) - self.assertEqual('64702.38', res, 'Wrong InstanceId') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SVR_OS_S) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_serveros_not_found(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test requesting a Server OS that will not be found - res = self.scapi._find_serveros('Non existent OS') - self.assertTrue(mock_get_json.called) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_find_serveros_failed(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_serveros('Red Hat Linux 6.x') - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=SVR_FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=FC_HBA) - @mock.patch.object(storagecenter_api.SCApi, - '_create_server', - return_value=SCSERVER) - def test_create_server_multiple_hbas(self, - mock_create_server, - mock_add_hba, - mock_find_server_folder, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_server(self.WWNS, 'Red Hat Linux 6.x') - self.assertTrue(mock_create_server.called) - self.assertTrue(mock_add_hba.called) - self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') - - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=SVR_FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value='64702.38') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_server(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') - self.assertTrue(mock_find_serveros.called) - self.assertTrue(mock_find_server_folder.called) - self.assertTrue(mock_first_result.called) - self.assertTrue(mock_add_hba.called) - self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') - - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=SVR_FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_server_os_not_found(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_server(self.IQN, 'Red Hat Binux 6.x') - self.assertTrue(mock_find_serveros.called) - self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') - - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value='64702.38') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_server_fldr_not_found(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') - self.assertTrue(mock_find_server_folder.called) - self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') - - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value='64702.38') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_create_server_failure(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value='64702.38') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_server_not_found(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_close_connection, - mock_open_connection, - mock_init): - # Test create server where _first_result is None - res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_delete_server', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_add_hba', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_server_folder', - return_value=SVR_FLDR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serveros', - return_value='64702.38') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_server_addhba_fail(self, - mock_post, - mock_find_serveros, - mock_find_server_folder, - mock_first_result, - mock_add_hba, - mock_delete_server, - mock_close_connection, - mock_open_connection, - mock_init): - # Tests create server where add hba fails - res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') - self.assertTrue(mock_delete_server.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=SCSERVER) - @mock.patch.object(storagecenter_api.SCApi, - '_find_serverhba', - return_value=ISCSI_HBA) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_server(self, - mock_post, - mock_find_serverhba, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_server(self.IQN) - self.assertTrue(mock_find_serverhba.called) - self.assertTrue(mock_first_result.called) - self.assertIsNotNone(res, 'Expected ScServer') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_serverhba', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_server_no_hba(self, - mock_post, - mock_find_serverhba, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where a ScServer HBA does not exist with the specified IQN - # or WWN - res = self.scapi.find_server(self.IQN) - self.assertTrue(mock_find_serverhba.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_serverhba', - return_value=ISCSI_HBA) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_find_server_failure(self, - mock_post, - mock_find_serverhba, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where a ScServer does not exist with the specified - # ScServerHba - res = self.scapi.find_server(self.IQN) - self.assertTrue(mock_find_serverhba.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=ISCSI_HBA) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_serverhba(self, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_server(self.IQN) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertIsNotNone(res, 'Expected ScServerHba') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_find_serverhba_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where a ScServer does not exist with the specified - # ScServerHba - res = self.scapi.find_server(self.IQN) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=ISCSI_FLT_DOMAINS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_domains(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_domains(u'64702.5764839588723736074.69') - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual( - self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_204) - def test_find_domains_error(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where get of ScControllerPort FaultDomainList fails - res = self.scapi._find_domains(u'64702.5764839588723736074.69') - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=FC_HBAS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_initiators(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_initiators(self.SCSERVER) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertIsNotNone(res, 'Expected WWN list') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_initiators_error(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where get of ScServer HbaList fails - res = self.scapi._find_initiators(self.SCSERVER) - self.assertListEqual([], res, 'Expected empty list') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_get_volume_count(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.get_volume_count(self.SCSERVER) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_get_volume_count_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case of where get of ScServer MappingList fails - res = self.scapi.get_volume_count(self.SCSERVER) - self.assertTrue(mock_get.called) - self.assertEqual(-1, res, 'Mapping count not -1') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_get_volume_count_no_volumes(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.get_volume_count(self.SCSERVER) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(len([]), res, 'Mapping count mismatch') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_mappings(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_mappings(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_mappings_inactive_vol(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test getting volume mappings on inactive volume - res = self.scapi._find_mappings(self.INACTIVE_VOLUME) - self.assertFalse(mock_get.called) - self.assertEqual([], res, 'No mappings expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_mappings_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case of where get of ScVolume MappingList fails - res = self.scapi._find_mappings(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertEqual([], res, 'Mapping count not empty') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_mappings_no_mappings(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume has no mappings - res = self.scapi._find_mappings(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual([], res, 'Mapping count mismatch') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=MAP_PROFILES) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_mapping_profiles(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume has no mappings - res = self.scapi._find_mapping_profiles(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.MAP_PROFILES, res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_mapping_profiles_error(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume has no mappings - res = self.scapi._find_mapping_profiles(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertEqual([], res) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=CTRLR_PORT) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_controller_port(self, - mock_get, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_controller_port(u'64702.5764839588723736070.51') - self.assertTrue(mock_get.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_204) - def test_find_controller_port_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where get of ScVolume MappingList fails - res = self.scapi._find_controller_port(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=FC_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=FC_MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=WWNS) - def test_find_wwns(self, - mock_find_initiators, - mock_find_mappings, - mock_find_controller_port, - mock_close_connection, - mock_open_connection, - mock_init): - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_controller_port.called) - - # The _find_controller_port is Mocked, so all mapping pairs - # will have the same WWN for the ScControllerPort - itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'], - u'21000024FF30441D': - [u'5000D31000FCBE36', u'5000D31000FCBE36']} - self.assertEqual(1, lun, 'Incorrect LUN') - self.assertIsNotNone(wwns, 'WWNs is None') - self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=[]) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=FC_HBAS) - def test_find_wwns_no_mappings(self, - mock_find_initiators, - mock_find_mappings, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are no ScMapping(s) - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertIsNone(lun, 'Incorrect LUN') - self.assertEqual([], wwns, 'WWNs is not empty') - self.assertEqual({}, itmap, 'WWN mapping not empty') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=FC_MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=WWNS) - def test_find_wwns_no_ctlr_port(self, - mock_find_initiators, - mock_find_mappings, - mock_find_controller_port, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScControllerPort is none - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_controller_port.called) - self.assertIsNone(lun, 'Incorrect LUN') - self.assertEqual([], wwns, 'WWNs is not empty') - self.assertEqual({}, itmap, 'WWN mapping not empty') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=FC_CTRLR_PORT_WWN_ERROR) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=FC_MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=WWNS) - def test_find_wwns_wwn_error(self, - mock_find_initiators, - mock_find_mappings, - mock_find_controller_port, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScControllerPort object has WWn instead of wwn for a - # property - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_controller_port.called) - - self.assertIsNone(lun, 'Incorrect LUN') - self.assertEqual([], wwns, 'WWNs is not empty') - self.assertEqual({}, itmap, 'WWN mapping not empty') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=FC_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=FC_MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=WWNS_NO_MATCH) - # Test case where HBA name is not found in list of initiators - def test_find_wwns_hbaname_not_found(self, - mock_find_initiators, - mock_find_mappings, - mock_find_controller_port, - mock_close_connection, - mock_open_connection, - mock_init): - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_controller_port.called) - - self.assertIsNone(lun, 'Incorrect LUN') - self.assertEqual([], wwns, 'WWNs is not empty') - self.assertEqual({}, itmap, 'WWN mapping not empty') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=FC_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=FC_MAPPINGS_LUN_MISMATCH) - @mock.patch.object(storagecenter_api.SCApi, - '_find_initiators', - return_value=WWNS) - # Test case where FC mappings contain a LUN mismatch - def test_find_wwns_lun_mismatch(self, - mock_find_initiators, - mock_find_mappings, - mock_find_controller_port, - mock_close_connection, - mock_open_connection, - mock_init): - lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_initiators.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_controller_port.called) - # The _find_controller_port is Mocked, so all mapping pairs - # will have the same WWN for the ScControllerPort - itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'], - u'21000024FF30441D': - [u'5000D31000FCBE36', u'5000D31000FCBE36']} - self.assertEqual(1, lun, 'Incorrect LUN') - self.assertIsNotNone(wwns, 'WWNs is None') - self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=VOLUME_CONFIG) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_active_controller(self, - mock_get, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_active_controller(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertTrue(mock_first_result.called) - self.assertEqual('64702.64703', res, 'Unexpected Active Controller') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_active_controller_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case of where get of ScVolume MappingList fails - res = self.scapi._find_active_controller(self.VOLUME) - self.assertTrue(mock_get.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.5764839588723736131.91') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_mappings(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=[]) - def test_find_iscsi_properties_no_mapping(self, - mock_find_mappings, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are no ScMapping(s) - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_iscsi_properties, - self.VOLUME) - self.assertTrue(mock_find_mappings.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_domains', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_no_domain(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are no ScFaultDomain(s) - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_iscsi_properties, - self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_no_ctrl_port(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are no ScFaultDomain(s) - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_iscsi_properties, - self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS_READ_ONLY) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_ro(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where Read Only mappings are found - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port') - @mock.patch.object(storagecenter_api.SCApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS_MULTI_PORTAL) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_multi_portals(self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are multiple portals - mock_find_ctrl_port.side_effect = [ - {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, - {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - self.assertTrue(mock_is_virtualport_mode.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe44', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe44', - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'iqn.2002-03.com.compellent:5000d31000fcbe44'], - 'target_lun': 1, - 'target_luns': [1, 1, 1, 1], - 'target_portal': u'192.168.0.25:3260', - 'target_portals': [u'192.168.0.25:3260', - u'192.168.0.21:3260', - u'192.168.0.25:3260', - u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port') - @mock.patch.object(storagecenter_api.SCApi, - '_find_domains', - return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS_MULTI_PORTAL) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=True) - def test_find_iscsi_properties_multi_portals_duplicates( - self, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_domains, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where there are multiple portals and - mock_find_ctrl_port.return_value = { - 'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'} - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_domains.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - self.assertTrue(mock_is_virtualport_mode.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43', - u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1, 1], - 'target_portal': u'192.168.0.25:3260', - 'target_portals': [u'192.168.0.25:3260', - u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.5764839588723736131.91') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port_iscsi_config', - return_value=ISCSI_CONFIG) - def test_find_iscsi_properties_mappings_legacy( - self, - mock_find_controller_port_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_controller_port_iscsi_config.called) - self.assertTrue(mock_find_active_controller.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.5764839588723736131.91') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port_iscsi_config', - return_value=None) - def test_find_iscsi_properties_mappings_legacy_no_iscsi_config( - self, - mock_find_controller_port_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_iscsi_properties, - self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_controller_port_iscsi_config.called) - self.assertTrue(mock_find_active_controller.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port', - return_value=ISCSI_CTRLR_PORT) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS_READ_ONLY) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port_iscsi_config', - return_value=ISCSI_CONFIG) - def test_find_iscsi_properties_ro_legacy(self, - mock_find_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where Read Only mappings are found - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - self.assertTrue(mock_find_iscsi_config.called) - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe43', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_active_controller', - return_value='64702.64702') - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port') - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=MAPPINGS_MULTI_PORTAL) - @mock.patch.object(storagecenter_api.SCApi, - '_is_virtualport_mode', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_find_controller_port_iscsi_config', - return_value=ISCSI_CONFIG) - def test_find_iscsi_properties_multi_portals_legacy( - self, - mock_find_controller_port_iscsi_config, - mock_is_virtualport_mode, - mock_find_mappings, - mock_find_ctrl_port, - mock_find_active_controller, - mock_close_connection, - mock_open_connection, - mock_init): - mock_find_ctrl_port.side_effect = [ - {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, - {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] - # Test case where there are multiple portals - res = self.scapi.find_iscsi_properties(self.VOLUME) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_find_ctrl_port.called) - self.assertTrue(mock_find_active_controller.called) - self.assertTrue(mock_is_virtualport_mode.called) - self.assertTrue(mock_find_controller_port_iscsi_config.called) - # We're feeding the same info back multiple times the information - # will be scrubbed to a single item. - expected = {'target_discovered': False, - 'target_iqn': - u'iqn.2002-03.com.compellent:5000d31000fcbe44', - 'target_iqns': - [u'iqn.2002-03.com.compellent:5000d31000fcbe44', - u'iqn.2002-03.com.compellent:5000d31000fcbe43'], - 'target_lun': 1, - 'target_luns': [1, 1], - 'target_portal': u'192.168.0.21:3260', - 'target_portals': [u'192.168.0.21:3260', - u'192.168.0.21:3260']} - self.assertEqual(expected, res, 'Wrong Target Info') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=MAP_PROFILE) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=[]) - def test_map_volume(self, - mock_find_mapping_profiles, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.map_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=MAP_PROFILE) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - def test_map_volume_existing_mapping(self, - mock_find_mappings, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.map_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mappings.called) - self.assertFalse(mock_post.called) - self.assertFalse(mock_first_result.called) - self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=MAP_PROFILE) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=[]) - def test_map_volume_existing_mapping_not_us(self, - mock_find_mappings, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - server = {'instanceId': 64702.48, 'name': 'Server X'} - res = self.scapi.map_volume(self.VOLUME, - server) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - @mock.patch.object(storagecenter_api.SCApi, - '_first_result') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_map_volume_no_vol_id(self, - mock_post, - mock_first_result, - mock_get_id, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume instanceId is None - mock_get_id.side_effect = [None, '64702.47'] - res = self.scapi.map_volume(self.VOLUME, - self.SCSERVER) - self.assertFalse(mock_post.called) - self.assertFalse(mock_first_result.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - @mock.patch.object(storagecenter_api.SCApi, - '_first_result') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_map_volume_no_server_id(self, - mock_post, - mock_first_result, - mock_get_id, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume instanceId is None - mock_get_id.side_effect = ['64702.3494', None] - res = self.scapi.map_volume(self.VOLUME, - self.SCSERVER) - self.assertFalse(mock_post.called) - self.assertFalse(mock_first_result.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=[]) - def test_map_volume_failure(self, - mock_find_mapping_profiles, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where mapping volume to server fails - res = self.scapi.map_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value={'result': True}) - def test_unmap_volume(self, - mock_get_json, - mock_find_mapping_profiles, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(mock_delete.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_204) - def test_unmap_volume_failure(self, - mock_delete, - mock_find_mapping_profiles, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(mock_delete.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=[]) - def test_unmap_volume_no_map_profile(self, - mock_find_mapping_profiles, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_204) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - def test_unmap_volume_del_fail(self, - mock_find_mapping_profiles, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertTrue(mock_find_mapping_profiles.called) - self.assertTrue(mock_delete.called) - self.assertFalse(res, False) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - def test_unmap_volume_no_vol_id(self, - mock_find_mapping_profiles, - mock_delete, - mock_get_id, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume instanceId = None - mock_get_id.side_effect = [None, '64702.47'] - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertFalse(mock_find_mapping_profiles.called) - self.assertFalse(mock_delete.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mapping_profiles', - return_value=MAP_PROFILES) - def test_unmap_volume_no_server_id(self, - mock_find_mapping_profiles, - mock_delete, - mock_get_id, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where ScVolume instanceId = None - mock_get_id.side_effect = ['64702.3494', None] - res = self.scapi.unmap_volume(self.VOLUME, - self.SCSERVER) - self.assertFalse(mock_find_mapping_profiles.called) - self.assertFalse(mock_delete.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[{'a': 1}, {'a': 2}]) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_controller_port_iscsi_config(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Not much to test here. Just make sure we call our stuff and - # that we return the first item returned to us. - res = self.scapi._find_controller_port_iscsi_config('guid') - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual({'a': 1}, res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_controller_port_iscsi_config_err(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_controller_port_iscsi_config('guid') - self.assertTrue(mock_get.called) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=STRG_USAGE) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_get_storage_usage(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.get_storage_usage() - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_204) - def test_get_storage_usage_no_ssn(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where SSN is none - self.scapi.ssn = None - res = self.scapi.get_storage_usage() - self.scapi.ssn = 12345 - self.assertFalse(mock_get.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_204) - # Test case where get of Storage Usage fails - def test_get_storage_usage_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.get_storage_usage() - self.assertTrue(mock_get.called) - self.assertIsNone(res, 'None expected') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=RPLAY) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_replay(self, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_replay(self.VOLUME, - 'Test Replay', - 60) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=RPLAY) - @mock.patch.object(storagecenter_api.SCApi, - '_init_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_replay_inact_vol(self, - mock_post, - mock_get_volume, - mock_init_volume, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where the specified volume is inactive - mock_get_volume.return_value = self.VOLUME - res = self.scapi.create_replay(self.INACTIVE_VOLUME, - 'Test Replay', - 60) - self.assertTrue(mock_post.called) - mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') - - @mock.patch.object(storagecenter_api.SCApi, - '_init_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'get_volume') - def test_create_replay_inact_vol_init_fail( - self, mock_get_volume, mock_init_volume, mock_close_connection, - mock_open_connection, mock_init): - # Test case where the specified volume is inactive - mock_get_volume.return_value = self.INACTIVE_VOLUME - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_replay, - self.INACTIVE_VOLUME, 'Test Replay', 60) - mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=RPLAY) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_replay_no_expire(self, - mock_post, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_replay(self.VOLUME, - 'Test Replay', - 0) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_replay_no_volume(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where no ScVolume is specified - res = self.scapi.create_replay(None, - 'Test Replay', - 60) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_create_replay_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where create ScReplay fails - res = self.scapi.create_replay(self.VOLUME, - 'Test Replay', - 60) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=RPLAYS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_replay(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_replay(self.VOLUME, - u'Cinder Test Replay012345678910') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_replay_no_replays(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where no replays are found - res = self.scapi.find_replay(self.VOLUME, - u'Cinder Test Replay012345678910') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_204) - def test_find_replay_failure(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where None is returned for replays - res = self.scapi.find_replay(self.VOLUME, - u'Cinder Test Replay012345678910') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value=RPLAYS) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_delete_replay(self, - mock_post, - mock_find_replay, - mock_close_connection, - mock_open_connection, - mock_init): - replayId = u'Cinder Test Replay012345678910' - res = self.scapi.delete_replay(self.VOLUME, - replayId) - self.assertTrue(mock_post.called) - mock_find_replay.assert_called_once_with(self.VOLUME, replayId) - self.assertTrue(res, 'Expected True') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_delete_replay_no_replay(self, - mock_post, - mock_find_replay, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where specified ScReplay does not exist - replayId = u'Cinder Test Replay012345678910' - res = self.scapi.delete_replay(self.VOLUME, - replayId) - self.assertFalse(mock_post.called) - mock_find_replay.assert_called_once_with(self.VOLUME, replayId) - self.assertTrue(res, 'Expected True') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay', - return_value=TST_RPLAY) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_delete_replay_failure(self, - mock_post, - mock_find_replay, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where delete ScReplay results in an error - replayId = u'Cinder Test Replay012345678910' - res = self.scapi.delete_replay(self.VOLUME, - replayId) - self.assertTrue(mock_post.called) - mock_find_replay.assert_called_once_with(self.VOLUME, replayId) - self.assertFalse(res, 'Expected False') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_view_volume(self, - mock_post, - mock_find_volume_folder, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - vol_name = u'Test_create_vol' - res = self.scapi.create_view_volume( - vol_name, self.TST_RPLAY, None, None, None, None) - self.assertTrue(mock_post.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_view_volume_create_fldr(self, - mock_post, - mock_find_volume_folder, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where volume folder does not exist and must be created - vol_name = u'Test_create_vol' - res = self.scapi.create_view_volume( - vol_name, self.TST_RPLAY, None, None, None, None) - self.assertTrue(mock_post.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_create_view_volume_no_vol_fldr(self, - mock_post, - mock_find_volume_folder, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where volume folder does not exist and cannot be created - vol_name = u'Test_create_vol' - res = self.scapi.create_view_volume( - vol_name, self.TST_RPLAY, None, None, None, None) - self.assertTrue(mock_post.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder', - return_value=FLDR) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_create_view_volume_failure(self, - mock_post, - mock_find_volume_folder, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where view volume create fails - vol_name = u'Test_create_vol' - res = self.scapi.create_view_volume( - vol_name, self.TST_RPLAY, None, None, None, None) - self.assertTrue(mock_post.called) - mock_find_volume_folder.assert_called_once_with(True) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result') - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - @mock.patch.object(storagecenter_api.SCApi, - 'update_datareduction_profile') - def test_create_view_volume_with_profiles( - self, mock_update_datareduction_profile, mock_find_replay_profiles, - mock_find_qos_profile, mock_post, mock_find_volume_folder, - mock_first_result, mock_close_connection, mock_open_connection, - mock_init): - mock_find_replay_profiles.return_value = (['12345.4'], []) - mock_first_result.return_value = {'name': 'name'} - mock_post.return_value = self.RESPONSE_200 - mock_find_volume_folder.return_value = {'instanceId': '12345.200'} - mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, - {'instanceId': '12345.3'}] - screplay = {'instanceId': '12345.100.1'} - res = self.scapi.create_view_volume( - 'name', screplay, 'replay_profile_string', 'volume_qos', - 'group_qos', 'datareductionprofile') - expected_payload = {'Name': 'name', - 'Notes': 'Created by Dell EMC Cinder Driver', - 'VolumeFolder': '12345.200', - 'ReplayProfileList': ['12345.4'], - 'VolumeQosProfile': '12345.2', - 'GroupQosProfile': '12345.3'} - mock_find_volume_folder.assert_called_once_with(True) - mock_post.assert_called_once_with( - 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, - True) - mock_update_datareduction_profile.assert_called_once_with( - {'name': 'name'}, 'datareductionprofile') - self.assertEqual({'name': 'name'}, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result') - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - @mock.patch.object(storagecenter_api.SCApi, - 'update_datareduction_profile') - def test_create_view_volume_with_profiles_no_dr( - self, mock_update_datareduction_profile, mock_find_replay_profiles, - mock_find_qos_profile, mock_post, mock_find_volume_folder, - mock_first_result, mock_close_connection, mock_open_connection, - mock_init): - mock_find_replay_profiles.return_value = (['12345.4'], []) - mock_first_result.return_value = {'name': 'name'} - mock_post.return_value = self.RESPONSE_200 - mock_find_volume_folder.return_value = {'instanceId': '12345.200'} - mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, - {'instanceId': '12345.3'}] - screplay = {'instanceId': '12345.100.1'} - res = self.scapi.create_view_volume('name', screplay, - 'replay_profile_string', - 'volume_qos', - 'group_qos', - None) - expected_payload = {'Name': 'name', - 'Notes': 'Created by Dell EMC Cinder Driver', - 'VolumeFolder': '12345.200', - 'ReplayProfileList': ['12345.4'], - 'VolumeQosProfile': '12345.2', - 'GroupQosProfile': '12345.3'} - mock_find_volume_folder.assert_called_once_with(True) - mock_post.assert_called_once_with( - 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, - True) - mock_update_datareduction_profile.assert_not_called() - self.assertEqual({'name': 'name'}, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result') - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_create_view_volume_with_profiles_no_replayprofiles( - self, mock_find_qos_profile, mock_post, mock_find_volume_folder, - mock_first_result, mock_close_connection, mock_open_connection, - mock_init): - mock_first_result.return_value = {'name': 'name'} - mock_post.return_value = self.RESPONSE_200 - mock_find_volume_folder.return_value = {'instanceId': '12345.200'} - mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, - {'instanceId': '12345.3'}] - screplay = {'instanceId': '12345.100.1'} - res = self.scapi.create_view_volume('name', screplay, - None, - 'volume_qos', - 'group_qos', - None) - expected_payload = {'Name': 'name', - 'Notes': 'Created by Dell EMC Cinder Driver', - 'VolumeFolder': '12345.200', - 'VolumeQosProfile': '12345.2', - 'GroupQosProfile': '12345.3'} - mock_find_volume_folder.assert_called_once_with(True) - mock_post.assert_called_once_with( - 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, - True) - self.assertEqual({'name': 'name'}, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - def test_create_view_volume_with_profiles_not_found( - self, mock_find_replay_profiles, mock_find_qos_profile, - mock_find_volume_folder, mock_close_connection, - mock_open_connection, mock_init): - mock_find_replay_profiles.return_value = (['12345.4'], []) - mock_find_volume_folder.return_value = {'instanceId': '12345.200'} - # Our qos profile isn't found. - mock_find_qos_profile.return_value = None - screplay = {'instanceId': '12345.100.1'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_view_volume, - 'name', screplay, 'replay_profile_string', - 'volume_qos', 'group_qos', 'datareductionprofile') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test__expire_all_replays(self, - mock_get_json, - mock_get, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '12345.1'} - mock_get.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'instanceId': '12345.100', - 'active': False}, - {'instanceId': '12345.101', - 'active': True}] - self.scapi._expire_all_replays(scvolume) - mock_get.assert_called_once_with( - 'StorageCenter/ScVolume/12345.1/ReplayList') - mock_post.assert_called_once_with( - 'StorageCenter/ScReplay/12345.100/Expire', {}, True) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - def test__expire_all_replays_error(self, - mock_get, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '12345.1'} - mock_get.return_value = self.RESPONSE_400 - self.scapi._expire_all_replays(scvolume) - mock_get.assert_called_once_with( - 'StorageCenter/ScVolume/12345.1/ReplayList') - self.assertFalse(mock_post.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test__expire_all_replays_no_replays(self, - mock_get_json, - mock_get, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '12345.1'} - mock_get.return_value = self.RESPONSE_200 - mock_get_json.return_value = None - self.scapi._expire_all_replays(scvolume) - mock_get.assert_called_once_with( - 'StorageCenter/ScVolume/12345.1/ReplayList') - self.assertFalse(mock_post.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test__wait_for_cmm( - self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - cmm = {'instanceId': '12345.300'} - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1'} - replayid = '12345.200' - mock_get.return_value = self.RESPONSE_200 - mock_get_json.return_value = {'instanceId': '12345.300', - 'state': 'Finished'} - ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) - self.assertTrue(ret) - mock_get_json.return_value['state'] = 'Erred' - ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) - self.assertFalse(ret) - mock_get_json.return_value['state'] = 'Paused' - ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay') - def test__wait_for_cmm_404( - self, - mock_find_replay, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - cmm = {'instanceId': '12345.300'} - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1'} - replayid = '12345.200' - mock_get.return_value = self.RESPONSE_404 - mock_find_replay.return_value = {'instanceId': '12345.200'} - ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay') - @mock.patch.object(eventlet, 'sleep') - def test__wait_for_cmm_timeout( - self, - mock_sleep, - mock_find_replay, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - cmm = {'instanceId': '12345.300'} - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1'} - replayid = '12345.200' - mock_get.return_value = self.RESPONSE_404 - mock_find_replay.return_value = None - ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) - self.assertFalse(ret) - self.assertEqual(21, mock_sleep.call_count) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - '_wait_for_cmm') - @mock.patch.object(storagecenter_api.SCApi, - '_expire_all_replays') - def test_create_cloned_volume( - self, - mock_expire_all_replays, - mock_wait_for_cmm, - mock_get_json, - mock_post, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - cmm = {'state': 'Running'} - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = cmm - mock_create_replay.return_value = {'instanceId': '12345.100'} - mock_create_volume.return_value = newvol - mock_wait_for_cmm.return_value = True - - # our call - res = self.scapi.create_cloned_volume( - vol_name, scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - expected_payload = {} - expected_payload['CopyReplays'] = True - expected_payload['DestinationVolume'] = '12345.2' - expected_payload['SourceVolume'] = '12345.1' - expected_payload['StorageCenter'] = 12345 - expected_payload['Priority'] = 'High' - mock_post.assert_called_once_with( - 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) - mock_wait_for_cmm.assert_called_once_with(cmm, newvol, str(replayuuid)) - mock_expire_all_replays.assert_called_once_with(newvol) - self.assertEqual(newvol, res) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - def test_create_cloned_volume_create_vol_fail( - self, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = None - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - - # our call returns - mock_create_volume.return_value = newvol - - # our call - res = self.scapi.create_cloned_volume( - vol_name, scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_replay_fail( - self, - mock_delete_volume, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_create_replay.return_value = None - mock_create_volume.return_value = newvol - - # our call - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_cloned_volume, vol_name, - scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - mock_delete_volume.assert_called_once_with(vol_name, '12345.2') - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_copy_fail( - self, - mock_delete_volume, - mock_post, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_post.return_value = self.RESPONSE_400 - mock_create_replay.return_value = {'instanceId': '12345.100'} - mock_create_volume.return_value = newvol - - # our call - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_cloned_volume, vol_name, - scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - expected_payload = {} - expected_payload['CopyReplays'] = True - expected_payload['DestinationVolume'] = '12345.2' - expected_payload['SourceVolume'] = '12345.1' - expected_payload['StorageCenter'] = 12345 - expected_payload['Priority'] = 'High' - mock_post.assert_called_once_with( - 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) - mock_delete_volume.assert_called_once_with(vol_name, '12345.2') - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_cmm_erred( - self, - mock_delete_volume, - mock_get_json, - mock_post, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - cmm = {'state': 'Erred'} - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = cmm - mock_create_replay.return_value = {'instanceId': '12345.100'} - mock_create_volume.return_value = newvol - - # our call - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_cloned_volume, vol_name, - scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - expected_payload = {} - expected_payload['CopyReplays'] = True - expected_payload['DestinationVolume'] = '12345.2' - expected_payload['SourceVolume'] = '12345.1' - expected_payload['StorageCenter'] = 12345 - expected_payload['Priority'] = 'High' - mock_post.assert_called_once_with( - 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) - mock_delete_volume.assert_called_once_with(vol_name, '12345.2') - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_cmm_paused( - self, - mock_delete_volume, - mock_get_json, - mock_post, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - cmm = {'state': 'Paused'} - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = cmm - mock_create_replay.return_value = {'instanceId': '12345.100'} - mock_create_volume.return_value = newvol - - # our call - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_cloned_volume, vol_name, - scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - expected_payload = {} - expected_payload['CopyReplays'] = True - expected_payload['DestinationVolume'] = '12345.2' - expected_payload['SourceVolume'] = '12345.1' - expected_payload['StorageCenter'] = 12345 - expected_payload['Priority'] = 'High' - mock_post.assert_called_once_with( - 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) - mock_delete_volume.assert_called_once_with(vol_name, '12345.2') - - @mock.patch.object(storagecenter_api.SCApi, - 'create_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(uuid, 'uuid4') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - '_wait_for_cmm') - @mock.patch.object(storagecenter_api.SCApi, - 'delete_volume') - def test_create_cloned_volume_cmm_wait_for_cmm_fail( - self, - mock_delete_volume, - mock_wait_for_cmm, - mock_get_json, - mock_post, - mock_uuid4, - mock_create_replay, - mock_create_volume, - mock_close_connection, - mock_open_connection, - mock_init): - # our state. - vol_name = fake.VOLUME_ID - scvolume = {'name': fake.VOLUME2_ID, - 'instanceId': '12345.1', - 'configuredSize': '1073741824 Bytes'} - newvol = {'instanceId': '12345.2', - 'configuredSize': '1073741824 Bytes'} - storage_profile = 'profile1' - replay_profile_list = ['profile2'] - volume_qos = 'vqos' - group_qos = 'gqos' - dr_profile = 'dqos' - cmm = {'state': 'Running'} - - # our call returns - replayuuid = uuid.uuid4() - mock_uuid4.return_value = replayuuid - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = cmm - mock_create_replay.return_value = {'instanceId': '12345.100'} - mock_create_volume.return_value = newvol - mock_wait_for_cmm.return_value = False - - # our call - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.create_cloned_volume, vol_name, - scvolume, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - - # assert expected - mock_create_volume.assert_called_once_with( - vol_name, 1, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - mock_create_replay.assert_called_once_with( - scvolume, str(replayuuid), 60) - expected_payload = {} - expected_payload['CopyReplays'] = True - expected_payload['DestinationVolume'] = '12345.2' - expected_payload['SourceVolume'] = '12345.1' - expected_payload['StorageCenter'] = 12345 - expected_payload['Priority'] = 'High' - mock_post.assert_called_once_with( - 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) - mock_wait_for_cmm.assert_called_once_with(cmm, newvol, str(replayuuid)) - mock_delete_volume.assert_called_once_with(vol_name, '12345.2') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_expand_volume(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.expand_volume(self.VOLUME, 550) - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_expand_volume_failure(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.expand_volume(self.VOLUME, 550) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - def test_rename_volume(self, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.rename_volume(self.VOLUME, 'newname') - self.assertTrue(mock_put.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_400) - def test_rename_volume_failure(self, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.rename_volume(self.VOLUME, 'newname') - self.assertTrue(mock_put.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - def test_delete_server(self, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._delete_server(self.SCSERVER) - self.assertTrue(mock_delete.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - def test_delete_server_del_not_allowed(self, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - # Test case where delete of ScServer not allowed - res = self.scapi._delete_server(self.SCSERVER_NO_DEL) - self.assertFalse(mock_delete.called) - self.assertIsNone(res, 'Expected None') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value={'test': 'test'}) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_get_user_preferences(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - # Not really testing anything other than the ability to mock, but - # including for completeness. - res = self.scapi._get_user_preferences() - self.assertEqual({'test': 'test'}, res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_get_user_preferences_failure(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._get_user_preferences() - self.assertEqual({}, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences', - return_value=None) - def test_update_storage_profile_noprefs(self, - mock_prefs, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.update_storage_profile(None, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences', - return_value={'allowStorageProfileSelection': False}) - def test_update_storage_profile_not_allowed(self, - mock_prefs, - mock_close_connection, - mock_open_connection, - mock_init): - LOG = self.mock_object(storagecenter_api, "LOG") - res = self.scapi.update_storage_profile(None, None) - self.assertFalse(res) - self.assertEqual(1, LOG.error.call_count) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_storage_profile', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences', - return_value={'allowStorageProfileSelection': True}) - def test_update_storage_profile_prefs_not_found(self, - mock_profile, - mock_prefs, - mock_close_connection, - mock_open_connection, - mock_init): - LOG = self.mock_object(storagecenter_api, "LOG") - res = self.scapi.update_storage_profile(None, 'Fake') - self.assertFalse(res) - self.assertEqual(1, LOG.error.call_count) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences', - return_value={'allowStorageProfileSelection': True, - 'storageProfile': None}) - def test_update_storage_profile_default_not_found(self, - mock_prefs, - mock_close_connection, - mock_open_connection, - mock_init): - LOG = self.mock_object(storagecenter_api, "LOG") - res = self.scapi.update_storage_profile(None, None) - self.assertFalse(res) - self.assertEqual(1, LOG.error.call_count) - - @mock.patch.object( - storagecenter_api.SCApi, - '_get_user_preferences', - return_value={'allowStorageProfileSelection': True, - 'storageProfile': {'name': 'Fake', - 'instanceId': 'fakeId'}}) - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - def test_update_storage_profile(self, - mock_put, - mock_prefs, - mock_close_connection, - mock_open_connection, - mock_init): - LOG = self.mock_object(storagecenter_api, "LOG") - fake_scvolume = {'name': 'name', 'instanceId': 'id'} - res = self.scapi.update_storage_profile(fake_scvolume, None) - self.assertTrue(res) - self.assertIn('fakeId', repr(mock_put.call_args_list[0])) - self.assertEqual(1, LOG.info.call_count) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[RPLAY_PROFILE]) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_replay_profile(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_replay_profile('guid') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[RPLAY_PROFILE, RPLAY_PROFILE]) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_replay_profile_more_than_one(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.find_replay_profile, - 'guid') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_find_replay_profile_empty_list(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_replay_profile('guid') - self.assertTrue(mock_post.called) - self.assertTrue(mock_get_json.called) - self.assertIsNone(res, 'Unexpected return') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_find_replay_profile_error(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.find_replay_profile('guid') - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'Unexpected return') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=RPLAY_PROFILE) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_201) - def test_create_replay_profile(self, - mock_post, - mock_first_result, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_replay_profile('guid') - self.assertTrue(mock_find_replay_profile.called) - self.assertTrue(mock_post.called) - self.assertTrue(mock_first_result.called) - self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=RPLAY_PROFILE) - def test_create_replay_profile_exists(self, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_replay_profile('guid') - self.assertTrue(mock_find_replay_profile.called) - self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') - - @mock.patch.object(storagecenter_api.SCApi, - 'find_replay_profile', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_create_replay_profile_fail(self, - mock_post, - mock_find_replay_profile, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.create_replay_profile('guid') - self.assertTrue(mock_find_replay_profile.called) - self.assertTrue(mock_post.called) - self.assertIsNone(res, 'Unexpected return') - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_delete_replay_profile(self, - mock_get_id, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - self.scapi.delete_replay_profile(profile) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_delete.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_delete_replay_profile_fail(self, - mock_get_id, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.delete_replay_profile, - profile) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_delete.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_first_result', - return_value=VOLUME_CONFIG) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_get_volume_configuration(self, - mock_get_id, - mock_get, - mock_first_result, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._get_volume_configuration({}) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get.called) - self.assertEqual(self.VOLUME_CONFIG, res, 'Unexpected config') - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_get_volume_configuration_bad_response(self, - mock_get_id, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._get_volume_configuration({}) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get.called) - self.assertIsNone(res, 'Unexpected result') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_configuration', - return_value=VOLUME_CONFIG) - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_update_volume_profiles(self, - mock_get_id, - mock_put, - mock_get_volume_configuration, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '1'} - existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] - vcid = self.VOLUME_CONFIG[u'instanceId'] - # First get_id is for our existing replay profile id and the second - # is for the volume config and the last is for the volume id. And - # then we do this again for the second call below. - mock_get_id.side_effect = [existingid, - vcid, - scvolume['instanceId'], - existingid, - vcid, - scvolume['instanceId']] - newid = '64702.1' - expected_payload = {'ReplayProfileList': [newid, existingid]} - expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid - res = self.scapi._update_volume_profiles(scvolume, newid, None) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get_volume_configuration.called) - mock_put.assert_called_once_with(expected_url, expected_payload, True) - self.assertTrue(res) - - # Now do a remove. (Restarting with the original config so this will - # end up as an empty list.) - expected_payload['ReplayProfileList'] = [] - res = self.scapi._update_volume_profiles(scvolume, None, existingid) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get_volume_configuration.called) - mock_put.assert_called_with(expected_url, expected_payload, True) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_configuration', - return_value=VOLUME_CONFIG) - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_400) - # We set this to 1 so we can check our payload - @mock.patch.object(storagecenter_api.SCApi, - '_get_id') - def test_update_volume_profiles_bad_response(self, - mock_get_id, - mock_put, - mock_get_volume_configuration, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '1'} - existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] - vcid = self.VOLUME_CONFIG[u'instanceId'] - # First get_id is for our existing replay profile id and the second - # is for the volume config and the last is for the volume id. And - # then we do this again for the second call below. - mock_get_id.side_effect = [existingid, - vcid, - scvolume['instanceId'], - existingid, - vcid, - scvolume['instanceId']] - newid = '64702.1' - expected_payload = {'ReplayProfileList': [newid, existingid]} - expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid - res = self.scapi._update_volume_profiles(scvolume, newid, None) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get_volume_configuration.called) - mock_put.assert_called_once_with(expected_url, expected_payload, True) - self.assertFalse(res) - - # Now do a remove. (Restarting with the original config so this will - # end up as an empty list.) - expected_payload['ReplayProfileList'] = [] - res = self.scapi._update_volume_profiles(scvolume, None, existingid) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_get_volume_configuration.called) - mock_put.assert_called_with(expected_url, expected_payload, True) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_configuration', - return_value=None) - def test_update_volume_profiles_no_config(self, - mock_get_volume_configuration, - mock_close_connection, - mock_open_connection, - mock_init): - scvolume = {'instanceId': '1'} - res = self.scapi._update_volume_profiles(scvolume, '64702.2', None) - self.assertTrue(mock_get_volume_configuration.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=999) - @mock.patch.object(storagecenter_api.SCApi, - '_update_volume_profiles', - return_value=True) - def test_add_cg_volumes(self, - mock_update_volume_profiles, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = '100' - add_volumes = [{'id': '1', 'provider_id': '1'}] - res = self.scapi._add_cg_volumes(profileid, add_volumes) - self.assertTrue(mock_find_volume.called) - mock_update_volume_profiles.assert_called_once_with(999, - addid=profileid, - removeid=None) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=999) - @mock.patch.object(storagecenter_api.SCApi, - '_update_volume_profiles', - return_value=False) - def test_add_cg_volumes_fail(self, - mock_update_volume_profiles, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = '100' - add_volumes = [{'id': '1', 'provider_id': '1'}] - res = self.scapi._add_cg_volumes(profileid, add_volumes) - self.assertTrue(mock_find_volume.called) - mock_update_volume_profiles.assert_called_once_with(999, - addid=profileid, - removeid=None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=999) - @mock.patch.object(storagecenter_api.SCApi, - '_update_volume_profiles', - return_value=True) - def test_remove_cg_volumes(self, - mock_update_volume_profiles, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = '100' - remove_volumes = [{'id': '1', 'provider_id': '1'}] - res = self.scapi._remove_cg_volumes(profileid, remove_volumes) - self.assertTrue(mock_find_volume.called) - mock_update_volume_profiles.assert_called_once_with(999, - addid=None, - removeid=profileid) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume', - return_value=999) - @mock.patch.object(storagecenter_api.SCApi, - '_update_volume_profiles', - return_value=False) - def test_remove_cg_volumes_false(self, - mock_update_volume_profiles, - mock_find_volume, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = '100' - remove_volumes = [{'id': '1', 'provider_id': '1'}] - res = self.scapi._remove_cg_volumes(profileid, remove_volumes) - self.assertTrue(mock_find_volume.called) - mock_update_volume_profiles.assert_called_once_with(999, - addid=None, - removeid=profileid) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_remove_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_add_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_update_cg_volumes(self, - mock_get_id, - mock_add_cg_volumes, - mock_remove_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - add_volumes = [{'id': '1'}] - remove_volumes = [{'id': '2'}] - res = self.scapi.update_cg_volumes(profile, - add_volumes, - remove_volumes) - self.assertTrue(mock_get_id.called) - mock_add_cg_volumes.assert_called_once_with('100', add_volumes) - mock_remove_cg_volumes.assert_called_once_with('100', - remove_volumes) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_remove_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_add_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_update_cg_volumes_no_remove(self, - mock_get_id, - mock_add_cg_volumes, - mock_remove_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - add_volumes = [{'id': '1'}] - remove_volumes = [] - res = self.scapi.update_cg_volumes(profile, - add_volumes, - remove_volumes) - self.assertTrue(mock_get_id.called) - mock_add_cg_volumes.assert_called_once_with('100', add_volumes) - self.assertFalse(mock_remove_cg_volumes.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_remove_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_add_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_update_cg_volumes_no_add(self, - mock_get_id, - mock_add_cg_volumes, - mock_remove_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - add_volumes = [] - remove_volumes = [{'id': '1'}] - res = self.scapi.update_cg_volumes(profile, - add_volumes, - remove_volumes) - self.assertTrue(mock_get_id.called) - mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) - self.assertFalse(mock_add_cg_volumes.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_remove_cg_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_add_cg_volumes', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_update_cg_volumes_add_fail(self, - mock_get_id, - mock_add_cg_volumes, - mock_remove_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - add_volumes = [{'id': '1'}] - remove_volumes = [{'id': '2'}] - res = self.scapi.update_cg_volumes(profile, - add_volumes, - remove_volumes) - self.assertTrue(mock_get_id.called) - mock_add_cg_volumes.assert_called_once_with('100', add_volumes) - self.assertTrue(not mock_remove_cg_volumes.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_remove_cg_volumes', - return_value=False) - @mock.patch.object(storagecenter_api.SCApi, - '_add_cg_volumes', - return_value=True) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_update_cg_volumes_remove_fail(self, - mock_get_id, - mock_add_cg_volumes, - mock_remove_cg_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'name': 'guid'} - add_volumes = [{'id': '1'}] - remove_volumes = [{'id': '2'}] - res = self.scapi.update_cg_volumes(profile, - add_volumes, - remove_volumes) - self.assertTrue(mock_get_id.called) - mock_add_cg_volumes.assert_called_once_with('100', add_volumes) - mock_remove_cg_volumes.assert_called_once_with('100', - remove_volumes) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[INACTIVE_VOLUME]) - @mock.patch.object(storagecenter_api.SCApi, - '_init_volume') - def test_init_cg_volumes_inactive(self, - mock_init_volume, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = 100 - self.scapi._init_cg_volumes(profileid) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[VOLUME]) - @mock.patch.object(storagecenter_api.SCApi, - '_init_volume') - def test_init_cg_volumes_active(self, - mock_init_volume, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - profileid = 100 - self.scapi._init_cg_volumes(profileid) - self.assertTrue(mock_get.called) - self.assertTrue(mock_get_json.called) - self.assertFalse(mock_init_volume.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - @mock.patch.object(storagecenter_api.SCApi, - '_init_cg_volumes') - def test_snap_cg_replay(self, - mock_init_cg_volumes, - mock_get_id, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - replayid = 'guid' - expire = 0 - profile = {'instanceId': '100'} - # See the 100 from get_id above? - expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' - expected_payload = {'description': replayid, 'expireTime': expire} - res = self.scapi.snap_cg_replay(profile, replayid, expire) - mock_post.assert_called_once_with(expected_url, expected_payload, True) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_init_cg_volumes.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - @mock.patch.object(storagecenter_api.SCApi, - '_init_cg_volumes') - def test_snap_cg_replay_bad_return(self, - mock_init_cg_volumes, - mock_get_id, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - replayid = 'guid' - expire = 0 - profile = {'instanceId': '100'} - # See the 100 from get_id above? - expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' - expected_payload = {'description': replayid, 'expireTime': expire} - res = self.scapi.snap_cg_replay(profile, replayid, expire) - mock_post.assert_called_once_with(expected_url, expected_payload, True) - self.assertTrue(mock_get_id.called) - self.assertTrue(mock_init_cg_volumes.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=CGS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_sc_cg(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_sc_cg( - {}, - 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') - self.assertEqual(self.CGS[0], res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=CGS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_find_sc_cg_not_found(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_sc_cg( - {}, - 'GUID3-0869559e-6881-454e-ba18-15c6726d33c1') - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_find_sc_cg_fail(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi._find_sc_cg( - {}, - 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_sc_cg', - return_value={'instanceId': 101}) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=RPLAYS) - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - def test_find_cg_replays(self, - mock_get, - mock_get_json, - mock_find_sc_cg, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'instanceId': '100'} - replayid = 'Cinder Test Replay012345678910' - res = self.scapi._find_cg_replays(profile, replayid) - expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' - mock_get.assert_called_once_with(expected_url) - self.assertTrue(mock_find_sc_cg.called) - self.assertTrue(mock_get_json.called) - # We should fine RPLAYS - self.assertEqual(self.RPLAYS, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_sc_cg', - return_value=None) - def test_find_cg_replays_no_cg(self, - mock_find_sc_cg, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'instanceId': '100'} - replayid = 'Cinder Test Replay012345678910' - res = self.scapi._find_cg_replays(profile, replayid) - self.assertTrue(mock_find_sc_cg.called) - # We should return an empty list. - self.assertEqual([], res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_sc_cg', - return_value={'instanceId': 101}) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=None) - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - def test_find_cg_replays_bad_json(self, - mock_get, - mock_get_json, - mock_find_sc_cg, - mock_close_connection, - mock_open_connection, - mock_init): - profile = {'instanceId': '100'} - replayid = 'Cinder Test Replay012345678910' - res = self.scapi._find_cg_replays(profile, replayid) - expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' - mock_get.assert_called_once_with(expected_url) - self.assertTrue(mock_find_sc_cg.called) - self.assertTrue(mock_get_json.called) - self.assertIsNone(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_cg_replays', - return_value=RPLAYS) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_delete_cg_replay(self, - mock_post, - mock_find_cg_replays, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.delete_cg_replay({}, '') - expected_url = ('StorageCenter/ScReplay/' + - self.RPLAYS[0]['instanceId'] + - '/Expire') - mock_post.assert_any_call(expected_url, {}, True) - expected_url = ('StorageCenter/ScReplay/' + - self.RPLAYS[1]['instanceId'] + - '/Expire') - mock_post.assert_any_call(expected_url, {}, True) - self.assertTrue(mock_find_cg_replays.called) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_cg_replays', - return_value=RPLAYS) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_delete_cg_replay_error(self, - mock_post, - mock_find_cg_replays, - mock_close_connection, - mock_open_connection, - mock_init): - expected_url = ('StorageCenter/ScReplay/' + - self.RPLAYS[0]['instanceId'] + - '/Expire') - res = self.scapi.delete_cg_replay({}, '') - mock_post.assert_called_once_with(expected_url, {}, True) - self.assertTrue(mock_find_cg_replays.called) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_cg_replays', - return_value=[]) - def test_delete_cg_replay_cant_find(self, - mock_find_cg_replays, - mock_close_connection, - mock_open_connection, - mock_init): - res = self.scapi.delete_cg_replay({}, '') - self.assertTrue(mock_find_cg_replays.called) - self.assertTrue(res) - - def test_size_to_gb(self, - mock_close_connection, - mock_open_connection, - mock_init): - gb, rem = self.scapi.size_to_gb('1.073741824E9 Byte') - self.assertEqual(1, gb) - self.assertEqual(0, rem) - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.size_to_gb, - 'banana') - gb, rem = self.scapi.size_to_gb('1.073741924E9 Byte') - self.assertEqual(1, gb) - self.assertEqual(100, rem) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_volume_folder') - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=VOLUME) - def test_import_one(self, - mock_get_json, - mock_put, - mock_find_volume_folder, - mock_close_connection, - mock_open_connection, - mock_init): - newname = 'guid' - # First test is folder found. Second ist is not found. - mock_find_volume_folder.side_effect = [{'instanceId': '1'}, None] - expected_url = 'StorageCenter/ScVolume/100' - expected_payload = {'Name': newname, - 'VolumeFolder': '1'} - self.scapi._import_one({'instanceId': '100'}, newname) - mock_put.assert_called_once_with(expected_url, expected_payload, True) - self.assertTrue(mock_find_volume_folder.called) - expected_payload = {'Name': newname} - self.scapi._import_one({'instanceId': '100'}, newname) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741824E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 0)) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=[]) - @mock.patch.object(storagecenter_api.SCApi, - '_import_one', - return_value=VOLUME) - def test_manage_existing(self, - mock_import_one, - mock_find_mappings, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.scapi.manage_existing(newname, existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), None, False) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_size_to_gb.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[]) - def test_manage_existing_vol_not_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - - # Same as above only we don't have a volume folder. - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.scapi.manage_existing, - newname, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{}, {}, {}]) - def test_manage_existing_vol_multiple_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - - # Same as above only we don't have a volume folder. - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.scapi.manage_existing, - newname, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741924E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 100)) - def test_manage_existing_bad_size(self, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - - # Same as above only we don't have a volume folder. - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.manage_existing, - newname, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - self.assertTrue(mock_size_to_gb.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741824E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 0)) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=[{}, {}]) - def test_manage_existing_already_mapped(self, - mock_find_mappings, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.manage_existing, - newname, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_size_to_gb.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741824E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 0)) - @mock.patch.object(storagecenter_api.SCApi, - '_find_mappings', - return_value=[]) - @mock.patch.object(storagecenter_api.SCApi, - '_import_one', - return_value=None) - def test_manage_existing_import_fail(self, - mock_import_one, - mock_find_mappings, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - # We fail on the _find_volume_folder to make this easier. - newname = 'guid' - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.manage_existing, - newname, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - self.assertTrue(mock_find_mappings.called) - self.assertTrue(mock_size_to_gb.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741824E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 0)) - def test_get_unmanaged_volume_size(self, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - existing = {'source-name': 'scvolname'} - res = self.scapi.get_unmanaged_volume_size(existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - self.assertTrue(mock_size_to_gb.called) - self.assertEqual(1, res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[]) - def test_get_unmanaged_volume_size_not_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.scapi.get_unmanaged_volume_size, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{}, {}, {}]) - def test_get_unmanaged_volume_size_many_found(self, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.scapi.get_unmanaged_volume_size, - existing) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_volume_list', - return_value=[{'configuredSize': - '1.073741924E9 Bytes'}]) - @mock.patch.object(storagecenter_api.SCApi, - 'size_to_gb', - return_value=(1, 100)) - def test_get_unmanaged_volume_size_bad_size(self, - mock_size_to_gb, - mock_get_volume_list, - mock_close_connection, - mock_open_connection, - mock_init): - existing = {'source-name': 'scvolname'} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.get_unmanaged_volume_size, - existing) - self.assertTrue(mock_size_to_gb.called) - mock_get_volume_list.assert_called_once_with( - existing.get('source-name'), - existing.get('source-id'), - False) - - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_unmanage(self, - mock_get_id, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - # Same as above only we don't have a volume folder. - scvolume = {'name': 'guid'} - expected_url = 'StorageCenter/ScVolume/100' - newname = 'Unmanaged_' + scvolume['name'] - expected_payload = {'Name': newname} - self.scapi.unmanage(scvolume) - self.assertTrue(mock_get_id.called) - mock_put.assert_called_once_with(expected_url, expected_payload, True) - - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_get_id', - return_value='100') - def test_unmanage_fail(self, - mock_get_id, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - # Same as above only we don't have a volume folder. - scvolume = {'name': 'guid'} - expected_url = 'StorageCenter/ScVolume/100' - newname = 'Unmanaged_' + scvolume['name'] - expected_payload = {'Name': newname} - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.unmanage, - scvolume) - self.assertTrue(mock_get_id.called) - mock_put.assert_called_once_with(expected_url, expected_payload, True) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[SCQOS]) - # def _find_qos(self, qosnode): - def test__find_qos(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi._find_qos('Cinder QoS') - self.assertDictEqual(self.SCQOS, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - # def _find_qos(self, qosnode): - def test__find_qos_not_found(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # set side effect for posts. - # first empty second returns qosnode - mock_get_json.side_effect = [[], self.SCQOS] - ret = self.scapi._find_qos('Cinder QoS') - self.assertDictEqual(self.SCQOS, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - # def _find_qos(self, qosnode): - def test__find_qos_find_fail(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._find_qos, - 'Cinder QoS') - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - # def _find_qos(self, qosnode): - def test__find_qos_create_fail(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.side_effect = [self.RESPONSE_200, self.RESPONSE_400] - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._find_qos, - 'Cinder QoS') - - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL) - def test_update_replicate_active_replay_fail(self, - mock_get_json, - mock_get, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, - True) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL) - def test_update_replicate_active_replay_nothing_to_do( - self, mock_get_json, mock_get, mock_close_connection, - mock_open_connection, mock_init): - ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, - False) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - def test_update_replicate_active_replay_not_found(self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, - True) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - def test_update_replicate_active_replay_not_found2(self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, - True) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[{'instanceId': '12345.1'}]) - def test_get_disk_folder(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi._get_disk_folder(12345, 'name') - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'scSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'name', - 'attributeValue': 'name'}]}} - mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', - expected_payload) - self.assertEqual({'instanceId': '12345.1'}, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_get_disk_folder_fail(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi._get_disk_folder(12345, 'name') - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'scSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'name', - 'attributeValue': 'name'}]}} - mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', - expected_payload) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test_get_disk_folder_fail_bad_json(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_json.side_effect = (exception.VolumeBackendAPIException('')) - ret = self.scapi._get_disk_folder(12345, 'name') - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'scSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'name', - 'attributeValue': 'name'}]}} - mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', - expected_payload) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL) - def test_get_screplication(self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) - self.assertDictEqual(self.SCREPL[0], ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - def test_get_screplication_not_found(self, - mock_get_json, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.get_screplication({'instanceId': '1'}, 65496) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_get_screplication_error(self, - mock_get, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_screplication', - return_value=SCREPL[0]) - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_200) - def test_delete_replication(self, - mock_delete, - mock_get_screplication, - mock_close_connection, - mock_open_connection, - mock_init): - destssn = 65495 - expected = 'StorageCenter/ScReplication/%s' % ( - self.SCREPL[0]['instanceId']) - expected_payload = {'DeleteDestinationVolume': True, - 'RecycleDestinationVolume': True, - 'DeleteRestorePoint': True} - ret = self.scapi.delete_replication(self.VOLUME, destssn) - mock_delete.assert_any_call(expected, payload=expected_payload, - async=True) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_screplication', - return_value=None) - def test_delete_replication_not_found(self, - mock_get_screplication, - mock_close_connection, - mock_open_connection, - mock_init): - destssn = 65495 - ret = self.scapi.delete_replication(self.VOLUME, destssn) - self.assertFalse(ret) - ret = self.scapi.delete_replication(self.VOLUME, destssn) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_screplication', - return_value=SCREPL[0]) - @mock.patch.object(storagecenter_api.HttpClient, - 'delete', - return_value=RESPONSE_400) - def test_delete_replication_error(self, - mock_delete, - mock_get_screplication, - mock_close_connection, - mock_open_connection, - mock_init): - destssn = 65495 - expected = 'StorageCenter/ScReplication/%s' % ( - self.SCREPL[0]['instanceId']) - expected_payload = {'DeleteDestinationVolume': True, - 'RecycleDestinationVolume': True, - 'DeleteRestorePoint': True} - ret = self.scapi.delete_replication(self.VOLUME, destssn) - mock_delete.assert_any_call(expected, payload=expected_payload, - async=True) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos', - return_value=SCQOS) - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL[0]) - def test_create_replication(self, - mock_get_json, - mock_post, - mock_find_sc, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - # We don't test diskfolder. If one is found we include it. If not - # then we leave it out. Checking for disk folder is tested elsewhere. - ssn = 64702 - destssn = 65495 - qosnode = 'Cinder QoS' - notes = 'Created by Dell EMC Cinder Driver' - repl_prefix = 'Cinder repl of ' - - mock_find_sc.side_effect = [destssn, ssn, destssn, ssn, destssn, ssn] - payload = {'DestinationStorageCenter': destssn, - 'QosNode': self.SCQOS['instanceId'], - 'SourceVolume': self.VOLUME['instanceId'], - 'StorageCenter': ssn, - 'ReplicateActiveReplay': False, - 'Type': 'Asynchronous', - 'DestinationVolumeAttributes': - {'CreateSourceVolumeFolderPath': True, - 'Notes': notes, - 'Name': repl_prefix + self.VOLUME['name']} - } - ret = self.scapi.create_replication(self.VOLUME, - str(destssn), - qosnode, - False, - None, - False) - mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) - self.assertDictEqual(self.SCREPL[0], ret) - payload['Type'] = 'Synchronous' - payload['ReplicateActiveReplay'] = True - payload['SyncMode'] = 'HighAvailability' - ret = self.scapi.create_replication(self.VOLUME, - str(destssn), - qosnode, - True, - None, - False) - mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) - self.assertDictEqual(self.SCREPL[0], ret) - ret = self.scapi.create_replication(self.VOLUME, - str(destssn), - qosnode, - True, - None, - True) - mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) - self.assertDictEqual(self.SCREPL[0], ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos', - return_value=SCQOS) - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL[0]) - def test_create_replication_error(self, - mock_get_json, - mock_post, - mock_find_sc, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - ssn = 64702 - destssn = 65495 - qosnode = 'Cinder QoS' - notes = 'Created by Dell EMC Cinder Driver' - repl_prefix = 'Cinder repl of ' - - mock_find_sc.side_effect = [destssn, ssn, destssn, ssn] - mock_post.side_effect = [self.RESPONSE_400, self.RESPONSE_400, - self.RESPONSE_400, self.RESPONSE_400] - payload = {'DestinationStorageCenter': destssn, - 'QosNode': self.SCQOS['instanceId'], - 'SourceVolume': self.VOLUME['instanceId'], - 'StorageCenter': ssn, - 'ReplicateActiveReplay': False, - 'Type': 'Asynchronous', - 'DestinationVolumeAttributes': - {'CreateSourceVolumeFolderPath': True, - 'Notes': notes, - 'Name': repl_prefix + self.VOLUME['name']} - } - ret = self.scapi.create_replication(self.VOLUME, - str(destssn), - qosnode, - False, - None, - False) - mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) - self.assertIsNone(ret) - - payload['Type'] = 'Synchronous' - payload['ReplicateActiveReplay'] = True - payload['SyncMode'] = 'HighAvailability' - ret = self.scapi.create_replication(self.VOLUME, - str(destssn), - qosnode, - True, - None, - True) - mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=SCREPL) - def test_find_repl_volume(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.find_repl_volume('guid', 65495) - self.assertDictEqual(self.SCREPL[0], ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[]) - def test_find_repl_volume_empty_list(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.find_repl_volume('guid', 65495) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=[{'instanceId': '1'}, {'instanceId': '2'}]) - def test_find_repl_volume_multiple_results(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.find_repl_volume('guid', 65495) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_find_repl_volume_error(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi.find_repl_volume('guid', 65495) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'get_screplication') - @mock.patch.object(storagecenter_api.SCApi, - 'find_repl_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'find_volume') - @mock.patch.object(storagecenter_api.SCApi, - 'remove_mappings') - def test_break_replication(self, - mock_remove_mappings, - mock_find_volume, - mock_find_repl_volume, - mock_get_screplication, - mock_close_connection, - mock_open_connection, - mock_init): - # Find_volume doesn't actually matter. We do not gate on this. - # Switch it up just to prove that. - mock_find_volume.side_effect = [self.VOLUME, # 1 - self.VOLUME, # 2 - None, # 3 - None] # 4 - # Much like find volume we do not gate on this. - mock_get_screplication.side_effect = [self.SCREPL[0], # 1 - None] # 2 - # This - mock_find_repl_volume.side_effect = [self.VOLUME, # 1 - self.VOLUME, # 2 - self.VOLUME, # 3 - self.VOLUME] # 4 - mock_remove_mappings.side_effect = [True, # 1 - True, - True, # 2 - False, - True, # 3 - True, - False] # 4 - # Good path. - ret = self.scapi.break_replication('name', None, 65495) - self.assertEqual(self.VOLUME, ret) - # Source found, screpl not found. - ret = self.scapi.break_replication('name', None, 65495) - self.assertEqual(self.VOLUME, ret) - # No source vol good path. - ret = self.scapi.break_replication('name', None, 65495) - self.assertEqual(self.VOLUME, ret) - # fail remove mappings - ret = self.scapi.break_replication('name', None, 65495) - self.assertEqual(self.VOLUME, ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - def test__find_user_replay_profiles(self, - mock_get_user_preferences, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_user_preferences.return_value = {} - ret = self.scapi._find_user_replay_profiles() - self.assertEqual([], ret) - mock_get_user_preferences.return_value = {'test': 'test', - 'replayProfileList': []} - ret = self.scapi._find_user_replay_profiles() - self.assertEqual([], ret) - mock_get_user_preferences.return_value = { - 'test': 'test', 'replayProfileList': [{'instanceId': 'a'}, - {'instanceId': 'b'}]} - ret = self.scapi._find_user_replay_profiles() - self.assertEqual(['a', 'b'], ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test__find_daily_replay_profile(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'instanceId': 'a'}] - ret = self.scapi._find_daily_replay_profile() - self.assertEqual('a', ret) - mock_get_json.return_value = [] - ret = self.scapi._find_daily_replay_profile() - self.assertIsNone(ret) - mock_get_json.return_value = None - ret = self.scapi._find_daily_replay_profile() - self.assertIsNone(ret) - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi._find_daily_replay_profile() - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test__find_replay_profiles(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - # Good run. - rps = 'a,b' - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'name': 'a', 'instanceId': 'a'}, - {'name': 'b', 'instanceId': 'b'}, - {'name': 'c', 'instanceId': 'c'}] - reta, retb = self.scapi._find_replay_profiles(rps) - self.assertEqual(['a', 'b'], reta) - self.assertEqual(['c'], retb) - # Looking for profile that doesn't exist. - rps = 'a,b,d' - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._find_replay_profiles, - rps) - # Looking for nothing. - rps = '' - reta, retb = self.scapi._find_replay_profiles(rps) - self.assertEqual([], reta) - self.assertEqual([], retb) - # Still Looking for nothing. - rps = None - reta, retb = self.scapi._find_replay_profiles(rps) - self.assertEqual([], reta) - self.assertEqual([], retb) - # Bad call. - rps = 'a,b' - mock_post.return_value = self.RESPONSE_400 - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._find_replay_profiles, - rps) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_replay_profiles') - @mock.patch.object(storagecenter_api.SCApi, - '_find_user_replay_profiles') - @mock.patch.object(storagecenter_api.SCApi, - '_find_daily_replay_profile') - @mock.patch.object(storagecenter_api.SCApi, - '_update_volume_profiles') - def test_update_replay_profiles(self, - mock_update_volume_profiles, - mock_find_daily_replay_profile, - mock_find_user_replay_profiles, - mock_find_replay_profiles, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {} - mock_find_replay_profiles.return_value = (['a', 'b'], ['c']) - mock_update_volume_profiles.side_effect = [ - True, True, True, - False, - True, True, False, - True, True, True, True, True, - True, True, True, True, - False] - ret = self.scapi.update_replay_profiles(scvol, 'a,b') - # Two adds and one remove - self.assertEqual(3, mock_update_volume_profiles.call_count) - self.assertTrue(ret) - # Now update fails. - ret = self.scapi.update_replay_profiles(scvol, 'a,b') - # 1 failed update plus 3 from before. - self.assertEqual(4, mock_update_volume_profiles.call_count) - self.assertFalse(ret) - # Fail adding Ids.. - ret = self.scapi.update_replay_profiles(scvol, 'a,b') - # 3 more 4 from before. - self.assertEqual(7, mock_update_volume_profiles.call_count) - self.assertFalse(ret) - # User clearing profiles. - mock_find_replay_profiles.return_value = ([], ['a', 'b', 'c']) - mock_find_user_replay_profiles.return_value = ['d', 'u'] - ret = self.scapi.update_replay_profiles(scvol, '') - # 3 removes and 2 adds plus 7 from before - self.assertEqual(12, mock_update_volume_profiles.call_count) - self.assertTrue(ret) - # User clearing profiles and no defaults. (Probably not possible.) - mock_find_user_replay_profiles.return_value = [] - mock_find_daily_replay_profile.return_value = 'd' - ret = self.scapi.update_replay_profiles(scvol, '') - # 3 removes and 1 add plus 12 from before. - self.assertEqual(16, mock_update_volume_profiles.call_count) - self.assertTrue(ret) - # _find_replay_profiles blows up so we do too. - mock_find_replay_profiles.side_effect = ( - exception.VolumeBackendAPIException('aaa')) - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.update_replay_profiles, - scvol, - 'a,b') - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - # Basic check - retlv = self.scapi.get_live_volume(None) - self.assertIsNone(retlv) - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1'}} - lv2 = {'primaryVolume': {'instanceId': '12345.2'}} - mock_sc_live_volumes.return_value = [lv1, lv2] - # Good Run - retlv = self.scapi.get_live_volume('12345.2') - self.assertEqual(lv2, retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - self.assertFalse(mock_get_live_volumes.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_on_secondary(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - # Basic check - retlv = self.scapi.get_live_volume(None) - self.assertIsNone(retlv) - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1'}} - lv2 = {'primaryVolume': {'instanceId': '12345.2'}} - mock_sc_live_volumes.return_value = [] - mock_get_live_volumes.return_value = [lv1, lv2] - # Good Run - retlv = self.scapi.get_live_volume('12345.2') - self.assertEqual(lv2, retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - mock_get_live_volumes.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_not_found(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1'}} - lv2 = {'primaryVolume': {'instanceId': '12345.2'}, - 'secondaryVolume': {'instanceId': '67890.2'}} - mock_get_live_volumes.return_value = [lv1, lv2] - mock_sc_live_volumes.return_value = [] - retlv = self.scapi.get_live_volume('12345.3') - self.assertIsNone(retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - mock_get_live_volumes.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_swapped(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1'}} - lv2 = {'primaryVolume': {'instanceId': '67890.2'}, - 'secondaryVolume': {'instanceId': '12345.2'}} - mock_get_live_volumes.return_value = [lv1, lv2] - mock_sc_live_volumes.return_value = [] - retlv = self.scapi.get_live_volume('12345.2') - self.assertEqual(lv2, retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - mock_get_live_volumes.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_error(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get_live_volumes.return_value = [] - mock_sc_live_volumes.return_value = [] - retlv = self.scapi.get_live_volume('12345.2') - self.assertIsNone(retlv) - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_by_name(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1', - 'instanceName': fake.VOLUME2_ID}, - 'instanceName': 'Live volume of ' + fake.VOLUME2_ID} - lv2 = {'primaryVolume': {'instanceId': '67890.2'}, - 'secondaryVolume': {'instanceId': '12345.2', - 'instanceName': fake.VOLUME_ID}, - 'instanceName': 'Live volume of ' + fake.VOLUME_ID} - mock_get_live_volumes.return_value = [lv1, lv2] - mock_sc_live_volumes.return_value = [] - retlv = self.scapi.get_live_volume('12345.2', fake.VOLUME_ID) - self.assertEqual(lv2, retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - mock_get_live_volumes.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - '_sc_live_volumes') - @mock.patch.object(storagecenter_api.SCApi, - '_get_live_volumes') - def test_get_live_volume_by_name_unknown(self, - mock_get_live_volumes, - mock_sc_live_volumes, - mock_close_connection, - mock_open_connection, - mock_init): - lv1 = {'primaryVolume': {'instanceId': '12345.1'}, - 'secondaryVolume': {'instanceId': '67890.1', - 'instanceName': fake.VOLUME2_ID}, - 'instanceName': 'Live volume of ' + fake.VOLUME2_ID} - lv2 = {'secondaryVolume': {'instanceId': '12345.2', - 'instanceName': fake.VOLUME_ID}, - 'instanceName': 'unknown'} - mock_get_live_volumes.return_value = [lv1, lv2] - mock_sc_live_volumes.return_value = [] - retlv = self.scapi.get_live_volume('12345.3', fake.VOLUME_ID) - self.assertEqual(lv2, retlv) - mock_sc_live_volumes.assert_called_once_with('12345') - mock_get_live_volumes.assert_called_once_with() - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - def test_map_secondary_volume(self, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101'}, - 'secondaryScSerialNumber': 102} - scdestsrv = {'instanceId': '102.1000'} - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = {'instanceId': '102.101.1'} - ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) - expected_payload = {'Server': '102.1000', - 'Advanced': {'MapToDownServerHbas': True}} - mock_post.assert_called_once_with( - 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', - expected_payload, True - ) - self.assertEqual({'instanceId': '102.101.1'}, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_map_secondary_volume_err(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101'}, - 'secondaryScSerialNumber': 102} - scdestsrv = {'instanceId': '102.1000'} - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) - expected_payload = {'Server': '102.1000', - 'Advanced': {'MapToDownServerHbas': True}} - mock_post.assert_called_once_with( - 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', - expected_payload, True - ) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume(self, - mock_find_sc, - mock_find_qos, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101'}, - 'secondaryScSerialNumber': 102} - - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.side_effect = [{'instanceId': '101.1001'}, - {'instanceId': '102.1001'}] - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = sclivevol - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - False, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertEqual(sclivevol, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume_autofailover(self, - mock_find_sc, - mock_find_qos, - mock_get_json, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - sclivevol = {'instanceId': '101.101', - 'secondaryVolume': {'instanceId': '102.101'}, - 'secondaryScSerialNumber': 102} - - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.side_effect = [{'instanceId': '101.1001'}, - {'instanceId': '102.1001'}] - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = sclivevol - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - True, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertEqual(sclivevol, ret) - # Make sure sync flipped and that we set HighAvailability. - expected = {'SyncMode': 'HighAvailability', - 'SwapRolesAutomaticallyEnabled': False, - 'SecondaryStorageCenter': 102, - 'FailoverAutomaticallyEnabled': True, - 'StorageCenter': 12345, - 'RestoreAutomaticallyEnabled': True, - 'SecondaryQosNode': '102.1001', - 'ReplicateActiveReplay': True, - 'PrimaryQosNode': '101.1001', - 'Type': 'Synchronous', - 'PrimaryVolume': '101.1', - 'SecondaryVolumeAttributes': - {'Notes': 'Created by Dell EMC Cinder Driver', - 'CreateSourceVolumeFolderPath': True, - 'Name': 'name'} - } - mock_post.assert_called_once_with('StorageCenter/ScLiveVolume', - expected, True) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume_error(self, - mock_find_sc, - mock_find_qos, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.side_effect = [{'instanceId': '101.1001'}, - {'instanceId': '102.1001'}] - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - False, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume_no_dest(self, - mock_find_sc, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.return_value = {} - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - False, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume_no_qos(self, - mock_find_sc, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.return_value = None - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - False, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - 'find_sc') - def test_create_live_volume_no_secondary_qos(self, - mock_find_sc, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - scvol = {'instanceId': '101.1', - 'name': 'name'} - remotessn = '102' - active = True - sync = False - primaryqos = 'fast' - secondaryqos = 'slow' - mock_find_sc.return_value = 102 - mock_find_qos.side_effect = [{'instanceId': '101.1001'}, - None] - ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, - False, primaryqos, secondaryqos) - mock_find_sc.assert_called_once_with(102) - mock_find_qos.assert_any_call(primaryqos) - mock_find_qos.assert_any_call(secondaryqos, 102) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - def test_manage_replay(self, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - screplay = {'description': 'notguid', - 'instanceId': 1} - payload = {'description': 'guid', - 'expireTime': 0} - mock_put.return_value = self.RESPONSE_200 - ret = self.scapi.manage_replay(screplay, 'guid') - self.assertTrue(ret) - mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload, - True) - mock_put.return_value = self.RESPONSE_400 - ret = self.scapi.manage_replay(screplay, 'guid') - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - def test_unmanage_replay(self, - mock_put, - mock_close_connection, - mock_open_connection, - mock_init): - screplay = {'description': 'guid', - 'instanceId': 1} - payload = {'expireTime': 1440} - mock_put.return_value = self.RESPONSE_200 - ret = self.scapi.unmanage_replay(screplay) - self.assertTrue(ret) - mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload, - True) - mock_put.return_value = self.RESPONSE_400 - ret = self.scapi.unmanage_replay(screplay) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_replay_list') - def test_find_common_replay(self, - mock_get_replay_list, - mock_close_connection, - mock_open_connection, - mock_init): - dreplays = [{'globalIndex': '11111.113'}, - {'globalIndex': '11111.112'}, - {'globalIndex': '11111.111'}] - sreplays = [{'globalIndex': '12345.112'}, - {'globalIndex': '12345.111'}, - {'globalIndex': '11111.112'}, - {'globalIndex': '11111.111'}] - xreplays = [{'globalIndex': '12345.112'}, - {'globalIndex': '12345.111'}] - mock_get_replay_list.side_effect = [dreplays, sreplays, - dreplays, xreplays] - ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, - {'instanceId': '11111.1'}) - self.assertEqual({'globalIndex': '11111.112'}, ret) - ret = self.scapi.find_common_replay(None, {'instanceId': '11111.1'}) - self.assertIsNone(ret) - ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, None) - self.assertIsNone(ret) - ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, - {'instanceId': '11111.1'}) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_start_replication(self, - mock_post, - mock_get_json, - mock_find_qos, - mock_close_connection, - mock_open_connection, - mock_init): - svolume = {'name': 'guida', 'instanceId': '12345.101', - 'scSerialNumber': 12345} - dvolume = {'name': 'guidb', 'instanceId': '11111.101', - 'scSerialNumber': 11111} - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = {'instanceId': '12345.201'} - mock_find_qos.return_value = {'instanceId': '12345.1'} - expected = {'QosNode': '12345.1', - 'SourceVolume': '12345.101', - 'StorageCenter': 12345, - 'ReplicateActiveReplay': False, - 'Type': 'Asynchronous', - 'DestinationVolume': '11111.101', - 'DestinationStorageCenter': 11111} - ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', - 'cinderqos', False) - self.assertEqual(mock_get_json.return_value, ret) - mock_post.assert_called_once_with('StorageCenter/ScReplication', - expected, True) - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', - 'cinderqos', False) - self.assertIsNone(ret) - mock_post.return_value = self.RESPONSE_200 - mock_find_qos.return_value = None - ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', - 'cinderqos', False) - self.assertIsNone(ret) - mock_find_qos.return_value = {'instanceId': '12345.1'} - ret = self.scapi.start_replication(None, dvolume, 'Asynchronous', - 'cinderqos', False) - self.assertIsNone(ret) - ret = self.scapi.start_replication(svolume, None, 'Asynchronous', - 'cinderqos', False) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'find_common_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'create_replay') - @mock.patch.object(storagecenter_api.SCApi, - 'start_replication') - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_replicate_to_common(self, - mock_post, - mock_get_json, - mock_start_replication, - mock_create_replay, - mock_find_common_replay, - mock_close_connection, - mock_open_connection, - mock_init): - creplay = {'instanceId': '11111.201'} - svolume = {'name': 'guida'} - dvolume = {'name': 'guidb', 'volumeFolder': {'instanceId': '11111.1'}} - vvolume = {'name': 'guidc'} - mock_find_common_replay.return_value = creplay - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = vvolume - mock_create_replay.return_value = {'instanceId': '12345.202'} - mock_start_replication.return_value = {'instanceId': '12345.203'} - # Simple common test. - ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') - self.assertEqual(mock_start_replication.return_value, ret) - mock_post.assert_called_once_with( - 'StorageCenter/ScReplay/11111.201/CreateView', - {'Name': 'fback:guidb', - 'Notes': 'Created by Dell EMC Cinder Driver', - 'VolumeFolder': '11111.1'}, - True) - mock_create_replay.assert_called_once_with(svolume, 'failback', 600) - mock_start_replication.assert_called_once_with(svolume, vvolume, - 'Asynchronous', - 'cinderqos', - False) - mock_create_replay.return_value = None - # Unable to create a replay. - ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') - self.assertIsNone(ret) - mock_create_replay.return_value = {'instanceId': '12345.202'} - mock_get_json.return_value = None - # Create view volume fails. - ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') - self.assertIsNone(ret) - mock_get_json.return_value = vvolume - mock_post.return_value = self.RESPONSE_400 - # Post call returns an error. - ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') - self.assertIsNone(ret) - mock_post.return_value = self.RESPONSE_200 - mock_find_common_replay.return_value = None - # No common replay found. - ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - 'delete_replication') - @mock.patch.object(storagecenter_api.SCApi, - 'start_replication') - @mock.patch.object(storagecenter_api.SCApi, - 'rename_volume') - def test_flip_replication(self, - mock_rename_volume, - mock_start_replication, - mock_delete_replication, - mock_close_connection, - mock_open_connection, - mock_init): - svolume = {'scSerialNumber': '12345.1'} - dvolume = {'scSerialNumber': '11111.1'} - name = 'guid' - replicationtype = 'Synchronous' - qosnode = 'cinderqos' - activereplay = True - mock_delete_replication.return_value = True - mock_start_replication.return_value = {'instanceId': '11111.101'} - mock_rename_volume.return_value = True - # Good run. - ret = self.scapi.flip_replication(svolume, dvolume, name, - replicationtype, qosnode, - activereplay) - self.assertTrue(ret) - mock_delete_replication.assert_called_once_with(svolume, '11111.1', - False) - mock_start_replication.assert_called_once_with(dvolume, svolume, - replicationtype, - qosnode, activereplay) - mock_rename_volume.assert_any_call(svolume, 'Cinder repl of guid') - mock_rename_volume.assert_any_call(dvolume, 'guid') - mock_rename_volume.return_value = False - # Unable to rename volumes. - ret = self.scapi.flip_replication(svolume, dvolume, name, - replicationtype, qosnode, - activereplay) - self.assertFalse(ret) - mock_rename_volume.return_value = True - mock_start_replication.return_value = None - # Start replication call fails. - ret = self.scapi.flip_replication(svolume, dvolume, name, - replicationtype, qosnode, - activereplay) - self.assertFalse(ret) - mock_delete_replication.return_value = False - mock_start_replication.return_value = {'instanceId': '11111.101'} - # Delete old replication call fails. - ret = self.scapi.flip_replication(svolume, dvolume, name, - replicationtype, qosnode, - activereplay) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - def test_replication_progress(self, - mock_get, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_get.return_value = self.RESPONSE_200 - mock_get_json.return_value = {'synced': True, - 'amountRemaining': '0 Bytes'} - # Good run - retbool, retnum = self.scapi.replication_progress('11111.101') - self.assertTrue(retbool) - self.assertEqual(0.0, retnum) - # SC replication ID is None. - retbool, retnum = self.scapi.replication_progress(None) - self.assertIsNone(retbool) - self.assertIsNone(retnum) - mock_get.return_value = self.RESPONSE_400 - # Get progress call fails. - retbool, retnum = self.scapi.replication_progress('11111.101') - self.assertIsNone(retbool) - self.assertIsNone(retnum) - - @mock.patch.object(storagecenter_api.HttpClient, - 'delete') - def test_delete_live_volume(self, - mock_delete, - mock_close_connection, - mock_open_connection, - mock_init): - mock_delete.return_value = self.RESPONSE_200 - ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, - True) - self.assertTrue(ret) - mock_delete.return_value = self.RESPONSE_400 - ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, - True) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_swap_roles_live_volume(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - lv = {'instanceId': '12345.0'} - ret = self.scapi.swap_roles_live_volume(lv) - self.assertTrue(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test_swap_roles_live_volume_fail(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_400 - lv = {'instanceId': '12345.0'} - ret = self.scapi.swap_roles_live_volume(lv) - self.assertFalse(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_qos_profile(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'instanceId': '12345.0'}] - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'Name', - 'attributeValue': 'Default'}, - {'filterType': 'Equals', 'attributeName': 'profileType', - 'attributeValue': 'VolumeQosProfile'}]}} - ret = self.scapi._find_qos_profile('Default', False) - self.assertEqual({'instanceId': '12345.0'}, ret) - mock_post.assert_called_once_with('StorageCenter/ScQosProfile/GetList', - expected_payload) - - def test__find_qos_no_qosprofile(self, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi._find_qos_profile('', False) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_qos_error(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi._find_qos_profile('Default', False) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_qos_profile_empty_list(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [] - ret = self.scapi._find_qos_profile('Default', False) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_qos_profile_group(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'instanceId': '12345.0'}] - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'Name', - 'attributeValue': 'Default'}, - {'filterType': 'Equals', 'attributeName': 'profileType', - 'attributeValue': 'GroupQosProfile'}]}} - ret = self.scapi._find_qos_profile('Default', True) - self.assertEqual({'instanceId': '12345.0'}, ret) - mock_post.assert_called_once_with('StorageCenter/ScQosProfile/GetList', - expected_payload) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_datareduction_profile(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [{'instanceId': '12345.0'}] - expected_payload = {'filter': {'filterType': 'AND', 'filters': [ - {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', - 'attributeValue': 12345}, - {'filterType': 'Equals', 'attributeName': 'type', - 'attributeValue': 'Compression'}]}} - ret = self.scapi._find_data_reduction_profile('Compression') - self.assertEqual({'instanceId': '12345.0'}, ret) - mock_post.assert_called_once_with( - 'StorageCenter/ScDataReductionProfile/GetList', expected_payload) - - def test__find_datareduction_profile_no_drprofile(self, - mock_close_connection, - mock_open_connection, - mock_init): - ret = self.scapi._find_data_reduction_profile('') - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_datareduction_profile_error(self, - mock_post, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_400 - ret = self.scapi._find_data_reduction_profile('Compression') - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_json') - @mock.patch.object(storagecenter_api.HttpClient, - 'post') - def test__find_datareduction_profile_empty_list(self, - mock_post, - mock_get_json, - mock_close_connection, - mock_open_connection, - mock_init): - mock_post.return_value = self.RESPONSE_200 - mock_get_json.return_value = [] - ret = self.scapi._find_data_reduction_profile('Compression') - self.assertIsNone(ret) - - def test__check_add_profile_payload(self, - mock_close_connection, - mock_open_connection, - mock_init): - payload = {} - profile = {'instanceId': '12345.0'} - self.scapi._check_add_profile_payload(payload, profile, - 'Profile1', 'GroupQosProfile') - self.assertEqual({'GroupQosProfile': '12345.0'}, payload) - - def test__check_add_profile_payload_no_name(self, - mock_close_connection, - mock_open_connection, - mock_init): - payload = {} - profile = {'instanceId': '12345.0'} - self.scapi._check_add_profile_payload(payload, profile, - None, 'GroupQosProfile') - self.assertEqual({}, payload) - - def test__check_add_profile_payload_no_profile(self, - mock_close_connection, - mock_open_connection, - mock_init): - payload = {} - profile = None - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi._check_add_profile_payload, - payload, profile, 'Profile1', - 'VolumeQosProfile') - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile( - self, mock_find_datareduction_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_datareduction_profile.return_value = {} - mock_prefs.return_value = { - 'allowDataReductionSelection': True, - 'dataReductionProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - mock_put.return_value = self.RESPONSE_200 - expected = {'dataReductionProfile': '12345.0'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertTrue(res) - mock_put.assert_called_once_with( - 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_error( - self, mock_find_datareduction_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_datareduction_profile.return_value = {} - mock_prefs.return_value = { - 'allowDataReductionSelection': True, - 'dataReductionProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - mock_put.return_value = self.RESPONSE_400 - expected = {'dataReductionProfile': '12345.0'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertFalse(res) - mock_put.assert_called_once_with( - 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_not_found( - self, mock_find_datareduction_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_datareduction_profile.return_value = None - mock_prefs.return_value = {'allowDataReductionSelection': True} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_datareduction_profile(scvolume, 'Profile') - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_not_allowed( - self, mock_find_datareduction_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_datareduction_profile.return_value = None - mock_prefs.return_value = {'allowDataReductionSelection': False} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_prefs_not_found( - self, mock_find_datareduction_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_datareduction_profile.return_value = None - mock_prefs.return_value = None - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_default_not_found( - self, mock_find_datareduction_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_datareduction_profile.return_value = None - mock_prefs.return_value = {'allowDataReductionSelection': True} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_datareduction_profile_default( - self, mock_find_datareduction_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_datareduction_profile.return_value = None - mock_prefs.return_value = { - 'allowDataReductionSelection': True, - 'dataReductionProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_datareduction_profile(scvolume, None) - self.assertTrue(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile( - self, mock_find_qos_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_qos_profile.return_value = {} - mock_prefs.return_value = { - 'allowQosProfileSelection': True, - 'volumeQosProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - mock_put.return_value = self.RESPONSE_200 - expected = {'volumeQosProfile': '12345.0'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertTrue(res) - mock_put.assert_called_once_with( - 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - @mock.patch.object(storagecenter_api.SCApi, - '_find_data_reduction_profile') - def test_update_qos_profile_error( - self, mock_find_qos_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_qos_profile.return_value = {} - mock_prefs.return_value = { - 'allowQosProfileSelection': True, - 'volumeQosProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - mock_put.return_value = self.RESPONSE_400 - expected = {'volumeQosProfile': '12345.0'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertFalse(res) - mock_put.assert_called_once_with( - 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile_not_found( - self, mock_find_qos_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_qos_profile.return_value = None - mock_prefs.return_value = {'allowQosProfileSelection': True} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_qos_profile(scvolume, 'Profile') - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile_not_allowed( - self, mock_find_qos_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_qos_profile.return_value = None - mock_prefs.return_value = {'allowQosProfileSelection': False} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile_prefs_not_found( - self, mock_find_qos_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_qos_profile.return_value = None - mock_prefs.return_value = None - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile_default_not_found( - self, mock_find_qos_profile, mock_prefs, - mock_close_connection, mock_open_connection, - mock_init): - mock_find_qos_profile.return_value = None - mock_prefs.return_value = {'allowQosProfileSelection': True} - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertFalse(res) - - @mock.patch.object(storagecenter_api.SCApi, - '_get_user_preferences') - @mock.patch.object(storagecenter_api.HttpClient, - 'put') - @mock.patch.object(storagecenter_api.SCApi, - '_find_qos_profile') - def test_update_qos_profile_default( - self, mock_find_qos_profile, mock_put, mock_prefs, - mock_close_connection, mock_open_connection, mock_init): - # Test we get and set our default - mock_find_qos_profile.return_value = None - mock_prefs.return_value = { - 'allowQosProfileSelection': True, - 'volumeQosProfile': {'name': 'Default', - 'instanceId': '12345.0'}} - mock_put.return_value = self.RESPONSE_200 - scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} - res = self.scapi.update_qos_profile(scvolume, None) - self.assertTrue(res) - - -class DellSCSanAPIConnectionTestCase(test.TestCase): - - """DellSCSanAPIConnectionTestCase - - Class to test the Storage Center API connection using Mock. - """ - - # Create a Response object that indicates OK - response_ok = models.Response() - response_ok.status_code = 200 - response_ok.reason = u'ok' - RESPONSE_200 = response_ok - - # Create a Response object with no content - response_nc = models.Response() - response_nc.status_code = 204 - response_nc.reason = u'duplicate' - RESPONSE_204 = response_nc - - # Create a Response object is a pure error. - response_bad = models.Response() - response_bad.status_code = 400 - response_bad._content = '' - response_bad._content_consumed = True - response_bad.reason = u'bad request' - response_bad._content = '' - response_bad._content_consumed = True - RESPONSE_400 = response_bad - - APIDICT = {u'instanceId': u'0', - u'hostName': u'192.168.0.200', - u'userId': 434226, - u'connectionKey': u'', - u'minApiVersion': u'0.1', - u'webServicesPort': 3033, - u'locale': u'en_US', - u'objectType': u'ApiConnection', - u'secureString': u'', - u'applicationVersion': u'2.0.1', - u'source': u'REST', - u'commandLine': False, - u'application': u'Cinder REST Driver', - u'sessionKey': 1436460614863, - u'provider': u'EnterpriseManager', - u'instanceName': u'ApiConnection', - u'connected': True, - u'userName': u'Admin', - u'useHttps': False, - u'providerVersion': u'15.3.1.186', - u'apiVersion': u'2.2', - u'apiBuild': 199} - - def setUp(self): - super(DellSCSanAPIConnectionTestCase, self).setUp() - - # Configuration is a mock. A mock is pretty much a blank - # slate. I believe mock's done in setup are not happy time - # mocks. So we just do a few things like driver config here. - self.configuration = mock.Mock() - - self.configuration.san_is_local = False - self.configuration.san_ip = "192.168.0.1" - self.configuration.san_login = "admin" - self.configuration.san_password = "mmm" - self.configuration.dell_sc_ssn = 12345 - self.configuration.dell_sc_server_folder = 'openstack' - self.configuration.dell_sc_volume_folder = 'openstack' - # Note that we set this to True even though we do not - # test this functionality. This is sent directly to - # the requests calls as the verify parameter and as - # that is a third party library deeply stubbed out is - # not directly testable by this code. Note that in the - # case that this fails the driver fails to even come - # up. - self.configuration.dell_sc_verify_cert = True - self.configuration.dell_sc_api_port = 3033 - self.configuration.iscsi_ip_address = '192.168.1.1' - self.configuration.iscsi_port = 3260 - self._context = context.get_admin_context() - self.apiversion = '2.0' - - # Set up the SCApi - self.scapi = storagecenter_api.SCApi( - self.configuration.san_ip, - self.configuration.dell_sc_api_port, - self.configuration.san_login, - self.configuration.san_password, - self.configuration.dell_sc_verify_cert, - self.apiversion) - - # Set up the scapi configuration vars - self.scapi.ssn = self.configuration.dell_sc_ssn - self.scapi.sfname = self.configuration.dell_sc_server_folder - self.scapi.vfname = self.configuration.dell_sc_volume_folder - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=APIDICT) - def test_open_connection(self, - mock_get_json, - mock_post): - self.scapi.open_connection() - self.assertTrue(mock_post.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - @mock.patch.object(storagecenter_api.SCApi, - '_check_version_fail', - return_value=RESPONSE_400) - def test_open_connection_failure(self, - mock_check_version_fail, - mock_post): - - self.assertRaises(exception.VolumeBackendAPIException, - self.scapi.open_connection) - self.assertTrue(mock_check_version_fail.called) - - @mock.patch.object(storagecenter_api.SCApi, - '_check_version_fail', - return_value=RESPONSE_200) - @mock.patch.object(storagecenter_api.SCApi, - '_get_json', - return_value=APIDICT) - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_400) - def test_open_connection_sc(self, - mock_post, - mock_get_json, - mock_check_version_fail): - self.scapi.open_connection() - self.assertTrue(mock_check_version_fail.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_204) - def test_close_connection(self, - mock_post): - self.scapi.close_connection() - self.assertTrue(mock_post.called) - - @mock.patch.object(storagecenter_api.HttpClient, - 'post', - return_value=RESPONSE_200) - def test_close_connection_failure(self, - mock_post): - self.scapi.close_connection() - self.assertTrue(mock_post.called) - - -class DellHttpClientTestCase(test.TestCase): - - """DellSCSanAPIConnectionTestCase - - Class to test the Storage Center API connection using Mock. - """ - - ASYNCTASK = {"state": "Running", - "methodName": "GetScUserPreferencesDefaults", - "error": "", - "started": True, - "userName": "", - "localizedError": "", - "returnValue": "https://localhost:3033/api/rest/" - "ApiConnection/AsyncTask/1418394170395", - "storageCenter": 0, - "errorState": "None", - "successful": False, - "stepMessage": "Running Method [Object: ScUserPreferences] " - "[Method: GetScUserPreferencesDefaults]", - "localizedStepMessage": "", - "warningList": [], - "totalSteps": 2, - "timeFinished": "1969-12-31T18:00:00-06:00", - "timeStarted": "2015-01-07T14:07:10-06:00", - "currentStep": 1, - "objectTypeName": "ScUserPreferences", - "objectType": "AsyncTask", - "instanceName": "1418394170395", - "instanceId": "1418394170395"} - - # Create a Response object that indicates OK - response_ok = models.Response() - response_ok.status_code = 200 - response_ok.reason = u'ok' - response_ok._content = '' - response_ok._content_consumed = True - RESPONSE_200 = response_ok - - # Create a Response object with no content - response_nc = models.Response() - response_nc.status_code = 204 - response_nc.reason = u'duplicate' - response_nc._content = '' - response_nc._content_consumed = True - RESPONSE_204 = response_nc - - # Create a Response object is a pure error. - response_bad = models.Response() - response_bad.status_code = 400 - response_bad.reason = u'bad request' - response_bad._content = '' - response_bad._content_consumed = True - RESPONSE_400 = response_bad - - def setUp(self): - super(DellHttpClientTestCase, self).setUp() - self.host = 'localhost' - self.port = '3033' - self.user = 'johnnyuser' - self.password = 'password' - self.verify = False - self.apiversion = '3.1' - self.httpclient = storagecenter_api.HttpClient( - self.host, self.port, self.user, self.password, - self.verify, self.apiversion) - - def test_get_async_url(self): - url = self.httpclient._get_async_url(self.ASYNCTASK) - self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) - - def test_get_async_url_no_id_on_url(self): - badTask = self.ASYNCTASK.copy() - badTask['returnValue'] = ('https://localhost:3033/api/rest/' - 'ApiConnection/AsyncTask/') - url = self.httpclient._get_async_url(badTask) - self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) - - def test_get_async_url_none(self): - self.assertRaises(AttributeError, self.httpclient._get_async_url, None) - - def test_get_async_url_no_id(self): - badTask = self.ASYNCTASK.copy() - badTask['returnValue'] = ('https://localhost:3033/api/rest/' - 'ApiConnection/AsyncTask/') - badTask['instanceId'] = '' - self.assertRaises(exception.VolumeBackendAPIException, - self.httpclient._get_async_url, badTask) - - def test_get_async_url_no_returnvalue(self): - badTask = self.ASYNCTASK.copy() - badTask['returnValue'] = None - url = self.httpclient._get_async_url(badTask) - self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) - - def test_get_async_url_no_blank_returnvalue(self): - badTask = self.ASYNCTASK.copy() - badTask['returnValue'] = '' - url = self.httpclient._get_async_url(badTask) - self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) - - def test_get_async_url_xml_returnvalue(self): - badTask = self.ASYNCTASK.copy() - badTask['returnValue'] = ('' - '1' - '' - '1' - '' - 'ApiMethodReturn' - '1' - 'True' - '' - 'false') - self.assertRaises(exception.VolumeBackendAPIException, - self.httpclient._get_async_url, badTask) - - def test_rest_ret(self): - rest_response = self.RESPONSE_200 - response = self.httpclient._rest_ret(rest_response, False) - self.assertEqual(self.RESPONSE_200, response) - - @mock.patch.object(storagecenter_api.HttpClient, - '_wait_for_async_complete', - return_value=RESPONSE_200) - def test_rest_ret_async(self, - mock_wait_for_async_complete): - mock_rest_response = mock.MagicMock() - mock_rest_response.status_code = 202 - response = self.httpclient._rest_ret(mock_rest_response, True) - self.assertEqual(self.RESPONSE_200, response) - self.assertTrue(mock_wait_for_async_complete.called) - - def test_rest_ret_async_error(self): - mock_rest_response = mock.MagicMock() - mock_rest_response.status_code = 400 - self.assertRaises(exception.VolumeBackendAPIException, - self.httpclient._rest_ret, mock_rest_response, True) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_wait_for_async_complete(self, - mock_get): - ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) - self.assertEqual(self.RESPONSE_200, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - '_get_async_url', - return_value=None) - def test_wait_for_async_complete_bad_url(self, - mock_get_async_url): - ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) - self.assertIsNone(ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_400) - def test_wait_for_async_complete_bad_result(self, - mock_get): - ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) - self.assertEqual(self.RESPONSE_400, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get', - return_value=RESPONSE_200) - def test_wait_for_async_complete_loop(self, - mock_get): - mock_response = mock.MagicMock() - mock_response.content = mock.MagicMock() - mock_response.json = mock.MagicMock() - mock_response.json.side_effect = [self.ASYNCTASK, - {'objectType': 'ScVol'}] - ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) - self.assertEqual(self.RESPONSE_200, ret) - - @mock.patch.object(storagecenter_api.HttpClient, - 'get') - def test_wait_for_async_complete_get_raises(self, - mock_get): - mock_get.side_effect = (exception.DellDriverRetryableException()) - self.assertRaises(exception.VolumeBackendAPIException, - self.httpclient._wait_for_async_complete, - self.ASYNCTASK) - - @mock.patch.object(requests.Session, - 'get', - return_value=RESPONSE_200) - def test_get(self, - mock_get): - ret = self.httpclient.get('url') - self.assertEqual(self.RESPONSE_200, ret) - expected_headers = self.httpclient.header.copy() - mock_get.assert_called_once_with('https://localhost:3033/api/rest/url', - headers=expected_headers, - verify=False) - - -class DellStorageCenterApiHelperTestCase(test.TestCase): - - """DellStorageCenterApiHelper test case - - Class to test the Storage Center API helper using Mock. - """ - - @mock.patch.object(storagecenter_api.SCApi, - 'open_connection') - def test_setup_connection(self, - mock_open_connection): - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.dell_sc_volume_folder = 'a' - config.dell_sc_server_folder = 'a' - config.dell_sc_verify_cert = False - config.san_port = 3033 - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - ret = helper._setup_connection() - self.assertEqual(12345, ret.primaryssn) - self.assertEqual(12345, ret.ssn) - self.assertEqual('FibreChannel', ret.protocol) - mock_open_connection.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - 'open_connection') - def test_setup_connection_iscsi(self, - mock_open_connection): - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.dell_sc_volume_folder = 'a' - config.dell_sc_server_folder = 'a' - config.dell_sc_verify_cert = False - config.san_port = 3033 - helper = storagecenter_api.SCApiHelper(config, None, 'iSCSI') - ret = helper._setup_connection() - self.assertEqual(12345, ret.primaryssn) - self.assertEqual(12345, ret.ssn) - self.assertEqual('Iscsi', ret.protocol) - mock_open_connection.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApi, - 'open_connection') - def test_setup_connection_failover(self, - mock_open_connection): - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.dell_sc_volume_folder = 'a' - config.dell_sc_server_folder = 'a' - config.dell_sc_verify_cert = False - config.san_port = 3033 - helper = storagecenter_api.SCApiHelper(config, '67890', 'iSCSI') - ret = helper._setup_connection() - self.assertEqual(12345, ret.primaryssn) - self.assertEqual(67890, ret.ssn) - self.assertEqual('Iscsi', ret.protocol) - mock_open_connection.assert_called_once_with() - - @mock.patch.object(storagecenter_api.SCApiHelper, - '_setup_connection') - def test_open_connection(self, - mock_setup_connection): - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.san_port = 3033 - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - mock_connection = mock.MagicMock() - mock_connection.apiversion = '3.1' - mock_setup_connection.return_value = mock_connection - ret = helper.open_connection() - self.assertEqual('3.1', ret.apiversion) - self.assertEqual('192.168.0.101', helper.san_ip) - self.assertEqual('username', helper.san_login) - self.assertEqual('password', helper.san_password) - - @mock.patch.object(storagecenter_api.SCApiHelper, - '_setup_connection') - def test_open_connection_fail_no_secondary(self, - mock_setup_connection): - - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.san_port = 3033 - config.secondary_san_ip = '' - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - mock_setup_connection.side_effect = ( - exception.VolumeBackendAPIException('abc')) - self.assertRaises(exception.VolumeBackendAPIException, - helper.open_connection) - mock_setup_connection.assert_called_once_with() - self.assertEqual('192.168.0.101', helper.san_ip) - self.assertEqual('username', helper.san_login) - self.assertEqual('password', helper.san_password) - - @mock.patch.object(storagecenter_api.SCApiHelper, - '_setup_connection') - def test_open_connection_secondary(self, - mock_setup_connection): - - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.san_port = 3033 - config.secondary_san_ip = '192.168.0.102' - config.secondary_san_login = 'username2' - config.secondary_san_password = 'password2' - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - mock_connection = mock.MagicMock() - mock_connection.apiversion = '3.1' - mock_setup_connection.side_effect = [ - (exception.VolumeBackendAPIException('abc')), mock_connection] - ret = helper.open_connection() - self.assertEqual('3.1', ret.apiversion) - self.assertEqual(2, mock_setup_connection.call_count) - self.assertEqual('192.168.0.102', helper.san_ip) - self.assertEqual('username2', helper.san_login) - self.assertEqual('password2', helper.san_password) - - @mock.patch.object(storagecenter_api.SCApiHelper, - '_setup_connection') - def test_open_connection_fail_partial_secondary_config( - self, mock_setup_connection): - - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.san_port = 3033 - config.secondary_san_ip = '192.168.0.102' - config.secondary_san_login = 'username2' - config.secondary_san_password = '' - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - mock_setup_connection.side_effect = ( - exception.VolumeBackendAPIException('abc')) - self.assertRaises(exception.VolumeBackendAPIException, - helper.open_connection) - mock_setup_connection.assert_called_once_with() - self.assertEqual('192.168.0.101', helper.san_ip) - self.assertEqual('username', helper.san_login) - self.assertEqual('password', helper.san_password) - - @mock.patch.object(storagecenter_api.SCApiHelper, - '_setup_connection') - def test_open_connection_to_secondary_and_back(self, - mock_setup_connection): - - config = mock.MagicMock() - config.dell_sc_ssn = 12345 - config.san_ip = '192.168.0.101' - config.san_login = 'username' - config.san_password = 'password' - config.san_port = 3033 - config.secondary_san_ip = '192.168.0.102' - config.secondary_san_login = 'username2' - config.secondary_san_password = 'password2' - helper = storagecenter_api.SCApiHelper(config, None, 'FC') - mock_connection = mock.MagicMock() - mock_connection.apiversion = '3.1' - mock_setup_connection.side_effect = [ - (exception.VolumeBackendAPIException('abc')), mock_connection, - (exception.VolumeBackendAPIException('abc')), mock_connection] - helper.open_connection() - self.assertEqual('192.168.0.102', helper.san_ip) - self.assertEqual('username2', helper.san_login) - self.assertEqual('password2', helper.san_password) - self.assertEqual(2, mock_setup_connection.call_count) - helper.open_connection() - self.assertEqual('192.168.0.101', helper.san_ip) - self.assertEqual('username', helper.san_login) - self.assertEqual('password', helper.san_password) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/__init__.py deleted file mode 100644 index ad24802ef..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/__init__.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import requests -import six - -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.scaleio import driver - - -class CustomResponseMode(object): - """A context manager to define a custom set of per-request response modes. - - Example: - - with CustomResponseMode(self, **{ - 'some/api/path': RESPONSE_MODE.Valid, - 'another/api/path': RESPONSE_MODE.BadStatus, - 'last/api/path': MockResponse('some data', - status_code=403), - }): - self.assertRaises(SomeException, self.driver.api_call, data) - """ - def __init__(self, test_instance, **kwargs): - self.test_instance = test_instance - self.custom_responses = kwargs - self.current_responses = None - - def __enter__(self): - self.current_responses = self.test_instance.HTTPS_MOCK_RESPONSES - - https_responses = copy.deepcopy( - self.test_instance.HTTPS_MOCK_RESPONSES - ) - current_mode = self.test_instance.current_https_response_mode - - for call, new_mode in self.custom_responses.items(): - if isinstance(new_mode, mocks.MockHTTPSResponse): - https_responses[current_mode][call] = new_mode - else: - https_responses[current_mode][call] = \ - self.test_instance.get_https_response(call, new_mode) - - self.test_instance.HTTPS_MOCK_RESPONSES = https_responses - - def __exit__(self, exc_type, exc_val, exc_tb): - self.test_instance.HTTPS_MOCK_RESPONSES = self.current_responses - - -class TestScaleIODriver(test.TestCase): - """Base ``TestCase`` subclass for the ``ScaleIODriver``""" - RESPONSE_MODE = type(str('ResponseMode'), (object, ), dict( - Valid='0', - Invalid='1', - BadStatus='2', - ValidVariant='3', - )) - __RESPONSE_MODE_NAMES = { - '0': 'Valid', - '1': 'Invalid', - '2': 'BadStatus', - '3': 'ValidVariant', - } - - BAD_STATUS_RESPONSE = mocks.MockHTTPSResponse( - { - 'errorCode': 500, - 'message': 'BadStatus Response Test', - }, 500 - ) - - OLD_VOLUME_NOT_FOUND_ERROR = 78 - VOLUME_NOT_FOUND_ERROR = 79 - - HTTPS_MOCK_RESPONSES = {} - __COMMON_HTTPS_MOCK_RESPONSES = { - RESPONSE_MODE.Valid: { - 'login': 'login_token', - }, - RESPONSE_MODE.BadStatus: { - 'login': mocks.MockHTTPSResponse( - { - 'errorCode': 403, - 'message': 'Bad Login Response Test', - }, 403 - ), - }, - } - __https_response_mode = RESPONSE_MODE.Valid - log = None - - STORAGE_POOL_ID = six.text_type('1') - STORAGE_POOL_NAME = 'SP1' - - PROT_DOMAIN_ID = six.text_type('1') - PROT_DOMAIN_NAME = 'PD1' - - def setUp(self): - """Setup a test case environment. - - Creates a ``ScaleIODriver`` instance - Mocks the ``requests.get/post`` methods to return - ``MockHTTPSResponse``'s instead. - """ - super(TestScaleIODriver, self).setUp() - self.configuration = conf.Configuration(driver.scaleio_opts, - conf.SHARED_CONF_GROUP) - self._set_overrides() - self.driver = mocks.ScaleIODriver(configuration=self.configuration) - - self.mock_object(requests, 'get', self.do_request) - self.mock_object(requests, 'post', self.do_request) - - def _set_overrides(self): - # Override the defaults to fake values - self.override_config('san_ip', override='127.0.0.1', - group=conf.SHARED_CONF_GROUP) - self.override_config('sio_rest_server_port', override='8888', - group=conf.SHARED_CONF_GROUP) - self.override_config('san_login', override='test', - group=conf.SHARED_CONF_GROUP) - self.override_config('san_password', override='pass', - group=conf.SHARED_CONF_GROUP) - self.override_config('sio_storage_pool_id', override='test_pool', - group=conf.SHARED_CONF_GROUP) - self.override_config('sio_protection_domain_id', - override='test_domain', - group=conf.SHARED_CONF_GROUP) - self.override_config('sio_storage_pools', - override='test_domain:test_pool', - group=conf.SHARED_CONF_GROUP) - self.override_config('max_over_subscription_ratio', - override=5.0, group=conf.SHARED_CONF_GROUP) - self.override_config('sio_server_api_version', - override='2.0.0', group=conf.SHARED_CONF_GROUP) - - def do_request(self, url, *args, **kwargs): - """Do a fake GET/POST API request. - - Splits `url` on '/api/' to get the what API call is, then returns - the value of `self.HTTPS_MOCK_RESPONSES[][]` - converting to a `MockHTTPSResponse` if necessary. - - :raises test.TestingException: If the current mode/api_call does not - exist. - :returns MockHTTPSResponse: - """ - return self.get_https_response(url.split('/api/')[1]) - - def set_https_response_mode(self, mode=RESPONSE_MODE.Valid): - """Set the HTTPS response mode. - - RESPONSE_MODE.Valid: Respond with valid data - RESPONSE_MODE.Invalid: Respond with invalid data - RESPONSE_MODE.BadStatus: Response with not-OK status code. - """ - self.__https_response_mode = mode - - def get_https_response(self, api_path, mode=None): - if mode is None: - mode = self.__https_response_mode - - try: - response = self.HTTPS_MOCK_RESPONSES[mode][api_path] - except KeyError: - try: - response = self.__COMMON_HTTPS_MOCK_RESPONSES[mode][api_path] - except KeyError: - raise test.TestingException( - 'Mock API Endpoint not implemented: [{}]{}'.format( - self.__RESPONSE_MODE_NAMES[mode], api_path - ) - ) - - if not isinstance(response, mocks.MockHTTPSResponse): - return mocks.MockHTTPSResponse(response, 200) - return response - - @property - def current_https_response_mode(self): - return self.__https_response_mode - - def https_response_mode_name(self, mode): - return self.__RESPONSE_MODE_NAMES[mode] - - def custom_response_mode(self, **kwargs): - return CustomResponseMode(self, **kwargs) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/mocks.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/mocks.py deleted file mode 100644 index dd3911cce..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/mocks.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json -import requests -import six - -from oslo_config import cfg - -from cinder.volume.drivers.dell_emc.scaleio import driver - -CONF = cfg.CONF - - -class ScaleIODriver(driver.ScaleIODriver): - """Mock ScaleIO Driver class. - - Provides some fake configuration options - """ - def local_path(self, volume): - pass - - def reenable_replication(self, context, volume): - pass - - def promote_replica(self, context, volume): - pass - - def create_replica_test_volume(self, volume, src_vref): - pass - - def unmanage(self, volume): - pass - - -class MockHTTPSResponse(requests.Response): - """Mock HTTP Response - - Defines the https replies from the mocked calls to do_request() - """ - def __init__(self, content, status_code=200): - super(MockHTTPSResponse, self).__init__() - - if isinstance(content, six.text_type): - content = content.encode('utf-8') - self._content = content - self.status_code = status_code - - def json(self, **kwargs): - if isinstance(self._content, (bytes, six.text_type)): - return super(MockHTTPSResponse, self).json(**kwargs) - - return self._content - - @property - def text(self): - if not isinstance(self._content, (bytes, six.text_type)): - return json.dumps(self._content) - - return super(MockHTTPSResponse, self).text diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_attach_detach_volume.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_attach_detach_volume.py deleted file mode 100644 index 0cb158ac6..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_attach_detach_volume.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import context -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio - - -class TestAttachDetachVolume(scaleio.TestScaleIODriver): - - def setUp(self): - super(TestAttachDetachVolume, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - self.fake_path = '/fake/path/vol-xx' - self.volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - self.driver.connector = FakeConnector() - - def test_attach_volume(self): - path = self.driver._sio_attach_volume(self.volume) - self.assertEqual(self.fake_path, path) - - def test_detach_volume(self): - self.driver._sio_detach_volume(self.volume) - - -class FakeConnector(object): - def connect_volume(self, connection_properties): - return {'path': '/fake/path/vol-xx'} - - def disconnect_volume(self, connection_properties, volume): - return None diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_cloned_volume.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_cloned_volume.py deleted file mode 100644 index 4edad7ffd..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_cloned_volume.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks - - -class TestCreateClonedVolume(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.create_cloned_volume()``""" - def setUp(self): - """Setup a test case environment. - - Creates fake volume objects and sets up the required API responses. - """ - super(TestCreateClonedVolume, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.src_volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - - self.src_volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote( - self.driver._id_to_base64(self.src_volume.id) - ) - ) - - self.new_volume_extras = { - 'volumeIdList': ['cloned'], - 'snapshotGroupId': 'cloned_snapshot' - } - - self.new_volume = fake_volume.fake_volume_obj( - ctx, **self.new_volume_extras - ) - - self.new_volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote( - self.driver._id_to_base64(self.new_volume.id) - ) - ) - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.src_volume_name_2x_enc: self.src_volume.id, - 'instances/System/action/snapshotVolumes': '{}'.format( - json.dumps(self.new_volume_extras)), - }, - self.RESPONSE_MODE.BadStatus: { - 'instances/System/action/snapshotVolumes': - self.BAD_STATUS_RESPONSE, - 'types/Volume/instances/getByName::' + - self.src_volume['provider_id']: self.BAD_STATUS_RESPONSE, - }, - self.RESPONSE_MODE.Invalid: { - 'types/Volume/instances/getByName::' + - self.src_volume_name_2x_enc: None, - 'instances/System/action/snapshotVolumes': - mocks.MockHTTPSResponse( - { - 'errorCode': 400, - 'message': 'Invalid Volume Snapshot Test' - }, 400 - ), - }, - } - - def test_bad_login(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.new_volume, self.src_volume) - - def test_invalid_source_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.new_volume, self.src_volume) - - def test_create_cloned_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.create_cloned_volume(self.new_volume, self.src_volume) - - def test_create_cloned_volume_larger_size(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.new_volume.size = 2 - self.driver.create_cloned_volume(self.new_volume, self.src_volume) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_snapshot.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_snapshot.py deleted file mode 100644 index 816b89a21..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_snapshot.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json - -from six.moves import urllib - -from cinder import context -from cinder import db -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks - - -class TestCreateSnapShot(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.create_snapshot()``""" - def return_fake_volume(self, ctx, id): - return self.fake_volume - - def setUp(self): - """Setup a test case environment. - - Creates fake volume and snapshot objects and sets up the required - API responses. - """ - super(TestCreateSnapShot, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.fake_volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - - self.snapshot = fake_snapshot.fake_snapshot_obj( - ctx, **{'volume': self.fake_volume}) - - self.mock_object(db.sqlalchemy.api, 'volume_get', - self.return_fake_volume) - - snap_vol_id = self.snapshot.volume_id - self.volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(snap_vol_id)) - ) - self.snapshot_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.snapshot.id)) - ) - - self.snapshot_reply = json.dumps( - { - 'volumeIdList': ['cloned'], - 'snapshotGroupId': 'cloned_snapshot' - } - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: '"{}"'.format( - self.snapshot.volume_id - ), - 'instances/System/action/snapshotVolumes': - self.snapshot_reply, - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.snapshot.id, - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, - 'instances/System/action/snapshotVolumes': - self.BAD_STATUS_RESPONSE, - }, - self.RESPONSE_MODE.Invalid: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: None, - 'instances/System/action/snapshotVolumes': - mocks.MockHTTPSResponse( - { - 'errorCode': 400, - 'message': 'Invalid Volume Snapshot Test' - }, 400 - ), - }, - } - - def test_bad_login(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.snapshot - ) - - def test_invalid_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.snapshot - ) - - def test_create_snapshot(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.create_snapshot(self.snapshot) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume.py deleted file mode 100644 index 17fd77afe..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio - - -@ddt.ddt -class TestCreateVolume(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.create_volume()``""" - def setUp(self): - """Setup a test case environment. - - Creates a fake volume object and sets up the required API responses. - """ - super(TestCreateVolume, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.volume = fake_volume.fake_volume_obj(ctx) - host = 'host@backend#{}:{}'.format( - self.PROT_DOMAIN_NAME, - self.STORAGE_POOL_NAME) - self.volume.host = host - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.volume.name: '"{}"'.format(self.volume.id), - 'types/Volume/instances': {'id': self.volume.id}, - 'types/Domain/instances/getByName::' + - self.PROT_DOMAIN_NAME: - '"{}"'.format(self.PROT_DOMAIN_ID), - 'types/Pool/instances/getByName::{},{}'.format( - self.PROT_DOMAIN_ID, - self.STORAGE_POOL_NAME - ): '"{}"'.format(self.STORAGE_POOL_ID), - }, - self.RESPONSE_MODE.Invalid: { - 'types/Domain/instances/getByName::' + - self.PROT_DOMAIN_NAME: None, - 'types/Pool/instances/getByName::{},{}'.format( - self.PROT_DOMAIN_ID, - self.STORAGE_POOL_NAME - ): None, - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Volume/instances': self.BAD_STATUS_RESPONSE, - 'types/Domain/instances/getByName::' + - self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE, - 'types/Pool/instances/getByName::{},{}'.format( - self.PROT_DOMAIN_ID, - self.STORAGE_POOL_NAME - ): self.BAD_STATUS_RESPONSE, - }, - } - - def test_no_domain(self): - """No protection domain name or ID provided.""" - self.driver.configuration.sio_protection_domain_name = None - self.driver.configuration.sio_protection_domain_id = None - self.driver.storage_pools = None - self.volume.host = "host@backend" - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume) - - def test_no_domain_id(self): - """Only protection domain name provided.""" - self.driver.protection_domain_id = None - self.driver.protection_domain_name = self.PROT_DOMAIN_NAME - self.driver.storage_pool_name = None - self.driver.storage_pool_id = self.STORAGE_POOL_ID - self.test_create_volume() - - def test_no_domain_id_invalid_response(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_no_domain_id) - - def test_no_domain_id_badstatus_response(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_no_domain_id) - - def test_no_storage_id(self): - """Only protection domain name provided.""" - self.driver.storage_pool_id = None - self.driver.storage_pool_name = self.STORAGE_POOL_NAME - self.driver.protection_domain_id = self.PROT_DOMAIN_ID - self.driver.protection_domain_name = None - self.test_create_volume() - - def test_no_storage_id_invalid_response(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_no_storage_id) - - def test_no_storage_id_badstatus_response(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_no_storage_id) - - def test_create_volume(self): - """Valid create volume parameters""" - self.driver.create_volume(self.volume) - - def test_create_volume_non_8_gran(self): - self.volume.size = 14 - model_update = self.driver.create_volume(self.volume) - self.assertEqual(16, model_update['size']) - - def test_create_volume_badstatus_response(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume) - - @ddt.data({'provisioning:type': 'thin'}, {'provisioning:type': 'thin'}) - def test_create_thin_thick_volume(self, extraspecs): - self.driver._get_volumetype_extraspecs = mock.MagicMock() - self.driver._get_volumetype_extraspecs.return_value = extraspecs - self.driver.create_volume(self.volume) - - def test_create_volume_bad_provisioning_type(self): - extraspecs = {'provisioning:type': 'other'} - self.driver._get_volumetype_extraspecs = mock.MagicMock() - self.driver._get_volumetype_extraspecs.return_value = extraspecs - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume_from_snapshot.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume_from_snapshot.py deleted file mode 100644 index 9623b2ca0..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_create_volume_from_snapshot.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json - -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks - - -class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.create_volume_from_snapshot()``""" - def setUp(self): - """Setup a test case environment. - - Creates fake volume and snapshot objects and sets up the required - API responses. - """ - super(TestCreateVolumeFromSnapShot, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.snapshot = fake_snapshot.fake_snapshot_obj(ctx) - self.snapshot_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.snapshot.id)) - ) - self.volume = fake_volume.fake_volume_obj(ctx) - self.volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) - ) - - self.snapshot_reply = json.dumps( - { - 'volumeIdList': [self.volume.id], - 'snapshotGroupId': 'snap_group' - } - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.snapshot.id, - 'instances/System/action/snapshotVolumes': - self.snapshot_reply, - }, - self.RESPONSE_MODE.BadStatus: { - 'instances/System/action/snapshotVolumes': - self.BAD_STATUS_RESPONSE, - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, - }, - self.RESPONSE_MODE.Invalid: { - 'instances/System/action/snapshotVolumes': - mocks.MockHTTPSResponse( - { - 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, - 'message': 'BadStatus Volume Test', - }, 400 - ), - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: None, - }, - } - - def test_bad_login(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.volume, - self.snapshot - ) - - def test_invalid_snapshot(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.volume, - self.snapshot - ) - - def test_create_volume_from_snapshot(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.create_volume_from_snapshot(self.volume, self.snapshot) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_snapshot.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_snapshot.py deleted file mode 100644 index 217c738ee..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_snapshot.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.fake_snapshot import fake_snapshot_obj -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import configuration - - -class TestDeleteSnapShot(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.delete_snapshot()``""" - def setUp(self): - """Setup a test case environment. - - Creates fake volume and snapshot objects and sets up the required - API responses. - """ - super(TestDeleteSnapShot, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.snapshot = fake_snapshot_obj( - ctx, **{'provider_id': fake.SNAPSHOT_ID}) - self.snapshot_name_2x_enc = urllib.parse.quote( - urllib.parse.quote( - self.driver._id_to_base64(self.snapshot.id) - ) - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.snapshot.id, - 'instances/Volume::{}/action/removeMappedSdc'.format( - self.snapshot.provider_id - ): self.snapshot.id, - 'instances/Volume::{}/action/removeVolume'.format( - self.snapshot.provider_id - ): self.snapshot.id, - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, - 'instances/Volume::{}/action/removeVolume'.format( - self.snapshot.provider_id - ): self.BAD_STATUS_RESPONSE, - }, - self.RESPONSE_MODE.Invalid: { - 'types/Volume/instances/getByName::' + - self.snapshot_name_2x_enc: mocks.MockHTTPSResponse( - { - 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, - 'message': 'Test Delete Invalid Snapshot', - }, 400 - ), - 'instances/Volume::{}/action/removeVolume'.format( - self.snapshot.provider_id): mocks.MockHTTPSResponse( - { - 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, - 'message': 'Test Delete Invalid Snapshot', - }, 400, - ) - }, - } - - def test_bad_login(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_snapshot, self.snapshot) - - def test_delete_invalid_snapshot(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.delete_snapshot(self.snapshot) - - def test_delete_snapshot(self): - """Setting the unmap volume before delete flag for tests """ - self.override_config('sio_unmap_volume_before_deletion', True, - configuration.SHARED_CONF_GROUP) - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.delete_snapshot(self.snapshot) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_volume.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_volume.py deleted file mode 100644 index aa55ca70d..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_delete_volume.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import configuration - - -class TestDeleteVolume(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.delete_volume()``""" - def setUp(self): - """Setup a test case environment. - - Creates a fake volume object and sets up the required API responses. - """ - super(TestDeleteVolume, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - - self.volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: self.volume.id, - 'instances/Volume::{}/action/removeMappedSdc'.format( - self.volume.provider_id): self.volume.provider_id, - 'instances/Volume::{}/action/removeVolume'.format( - self.volume.provider_id - ): self.volume.provider_id, - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: mocks.MockHTTPSResponse( - { - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401 - ), - 'instances/Volume::{}/action/removeVolume'.format( - self.volume.provider_id - ): mocks.MockHTTPSResponse( - { - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401 - ), - }, - } - - def test_bad_login_and_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, - self.volume) - - def test_delete_volume(self): - """Setting the unmap volume before delete flag for tests """ - self.override_config('sio_unmap_volume_before_deletion', True, - configuration.SHARED_CONF_GROUP) - self.driver.delete_volume(self.volume) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_extend_volume.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_extend_volume.py deleted file mode 100644 index 90e1adcc5..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_extend_volume.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.fake_volume import fake_volume_obj -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import configuration - - -class TestExtendVolume(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.extend_volume()``""" - - """ New sizes for the volume. - Since ScaleIO has a granularity of 8 GB, multiples of 8 always work. - The 7 size should be either rounded up to 8 or raise an exception - based on the round_volume_capacity config setting. - """ - NEW_SIZE = 16 - BAD_SIZE = 7 - - def setUp(self): - """Setup a test case environment. - - Creates fake volume object and sets up the required API responses. - """ - super(TestExtendVolume, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.volume = fake_volume_obj(ctx, **{'id': fake.VOLUME_ID, - 'provider_id': fake.PROVIDER_ID}) - self.volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: '"{}"'.format(self.volume.id), - 'instances/Volume::{}/action/setVolumeSize'.format( - self.volume.provider_id - ): mocks.MockHTTPSResponse({}, 200), - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, - 'instances/Volume::{}/action/setVolumeSize'.format( - self.volume.provider_id): self.BAD_STATUS_RESPONSE, - }, - self.RESPONSE_MODE.Invalid: { - 'types/Volume/instances/getByName::' + - self.volume_name_2x_enc: None, - 'instances/Volume::{}/action/setVolumeSize'.format( - self.volume.provider_id): mocks.MockHTTPSResponse( - { - 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, - 'message': 'BadStatus Volume Test', - }, 400 - ), - }, - } - - def test_bad_login(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.volume, - self.NEW_SIZE) - - def test_invalid_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.volume, - self.NEW_SIZE) - - def test_extend_volume_bad_size_no_round(self): - self.override_config('sio_round_volume_capacity', False, - configuration.SHARED_CONF_GROUP) - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.extend_volume(self.volume, self.BAD_SIZE) - - def test_extend_volume_bad_size_round(self): - self.override_config('sio_round_volume_capacity', True, - configuration.SHARED_CONF_GROUP) - self.driver.extend_volume(self.volume, self.BAD_SIZE) - - def test_extend_volume(self): - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.driver.extend_volume(self.volume, self.NEW_SIZE) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_get_manageable.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_get_manageable.py deleted file mode 100644 index ca290975f..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_get_manageable.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy - -import ddt -import mock - -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio - - -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdabcdabcd" -PROVIDER_ID = "0000000000000001" - -MANAGEABLE_SCALEIO_VOLS = [ - { - "volumeType": "ThinProvisioned", - "storagePoolId": "6c6dc54500000000", - "sizeInKb": 8388608, - "name": "volume1", - "id": PROVIDER_ID, - "mappedSdcInfo": [], - }, - { - "volumeType": "ThinProvisioned", - "storagePoolId": "6c6dc54500000000", - "sizeInKb": 8388608, - "name": "volume2", - "id": "0000000000000002", - "mappedSdcInfo": [], - }, - { - "volumeType": "ThickProvisioned", - "storagePoolId": "6c6dc54500000000", - "sizeInKb": 8388608, - "name": "volume3", - "id": "0000000000000003", - "mappedSdcInfo": [], - } -] - -SCALEIO_SNAPSHOT = { - "volumeType": "Snapshot", - "storagePoolId": "6c6dc54500000000", - "sizeInKb": 8388608, - "name": "snapshot1", - "id": "1000000000000001", - "mappedSdcInfo": [], -} - -MANAGEABLE_SCALEIO_VOL_REFS = [ - { - 'reference': {'source-id': PROVIDER_ID}, - 'size': 8, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': { - "volumeType": "ThinProvisioned", - "name": "volume1" - } - }, - { - 'reference': {'source-id': '0000000000000002'}, - 'size': 8, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': { - "volumeType": "ThinProvisioned", - "name": "volume2" - } - }, - { - 'reference': {'source-id': '0000000000000003'}, - 'size': 8, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': { - "volumeType": "ThickProvisioned", - "name": "volume3" - } - } -] - - -@ddt.ddt -class ScaleIOManageableCase(scaleio.TestScaleIODriver): - - def setUp(self): - """Setup a test case environment.""" - super(ScaleIOManageableCase, self).setUp() - - def _test_get_manageable_things(self, - scaleio_objects=MANAGEABLE_SCALEIO_VOLS, - expected_refs=MANAGEABLE_SCALEIO_VOL_REFS, - cinder_objs=list()): - marker = mock.Mock() - limit = mock.Mock() - offset = mock.Mock() - sort_keys = mock.Mock() - sort_dirs = mock.Mock() - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'instances/StoragePool::test_pool/relationships/Volume': - scaleio_objects, - 'types/Pool/instances/getByName::{},{}'.format( - "test_domain", - "test_pool" - ): '"{}"'.format("test_pool").encode('ascii', 'ignore'), - 'types/Domain/instances/getByName::' + - "test_domain": '"{}"'.format("test_domain").encode( - 'ascii', - 'ignore' - ), - }, - - } - - with mock.patch('cinder.volume.utils.paginate_entries_list') as mpage: - test_func = self.driver.get_manageable_volumes - test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - expected_refs, - marker, - limit, - offset, - sort_keys, - sort_dirs - ) - - def test_get_manageable_volumes(self): - """Default success case. - - Given a list of scaleio volumes from the REST API, give back a list - of volume references. - """ - - self._test_get_manageable_things() - - def test_get_manageable_volumes_connected_vol(self): - """Make sure volumes connected to hosts are flagged as unsafe.""" - mapped_sdc = deepcopy(MANAGEABLE_SCALEIO_VOLS) - mapped_sdc[0]['mappedSdcInfo'] = ["host1"] - mapped_sdc[1]['mappedSdcInfo'] = ["host1", "host2"] - - # change up the expected results - expected_refs = deepcopy(MANAGEABLE_SCALEIO_VOL_REFS) - for x in range(len(mapped_sdc)): - sdc = mapped_sdc[x]['mappedSdcInfo'] - if sdc and len(sdc) > 0: - expected_refs[x]['safe_to_manage'] = False - expected_refs[x]['reason_not_safe'] \ - = 'Volume mapped to %d host(s).' % len(sdc) - - self._test_get_manageable_things(expected_refs=expected_refs, - scaleio_objects=mapped_sdc) - - def test_get_manageable_volumes_already_managed(self): - """Make sure volumes already owned by cinder are flagged as unsafe.""" - cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) - cinder_vol.id = VOLUME_ID - cinder_vol.provider_id = PROVIDER_ID - cinders_vols = [cinder_vol] - - # change up the expected results - expected_refs = deepcopy(MANAGEABLE_SCALEIO_VOL_REFS) - expected_refs[0]['reference'] = {'source-id': PROVIDER_ID} - expected_refs[0]['safe_to_manage'] = False - expected_refs[0]['reason_not_safe'] = 'Volume already managed.' - expected_refs[0]['cinder_id'] = VOLUME_ID - - self._test_get_manageable_things(expected_refs=expected_refs, - cinder_objs=cinders_vols) - - def test_get_manageable_volumes_no_snapshots(self): - """Make sure refs returned do not include snapshots.""" - volumes = deepcopy(MANAGEABLE_SCALEIO_VOLS) - volumes.append(SCALEIO_SNAPSHOT) - - self._test_get_manageable_things(scaleio_objects=volumes) - - def test_get_manageable_volumes_no_scaleio_volumes(self): - """Expect no refs to be found if no volumes are on ScaleIO.""" - self._test_get_manageable_things(scaleio_objects=[], - expected_refs=[]) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_groups.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_groups.py deleted file mode 100644 index 2bdb666d3..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_groups.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import mock - -from cinder import context -from cinder.objects import fields - -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks - - -class TestGroups(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver groups support``""" - - def setUp(self): - """Setup a test case environment. - - Creates a fake volume object and sets up the required API responses. - """ - super(TestGroups, self).setUp() - self.ctx = context.RequestContext('fake', 'fake', auth_token=True) - self.fake_grp_snap = {'id': 'group_snap_id', - 'name': 'test_group_snapshot', - 'group_id': fake.GROUP_ID, - 'status': fields.GroupSnapshotStatus.AVAILABLE - } - self.group = ( - fake_group.fake_group_obj( - self.ctx, **{'id': fake.GROUP_ID})) - fake_volume1 = fake_volume.fake_volume_obj( - self.ctx, - **{'id': fake.VOLUME_ID, 'provider_id': fake.PROVIDER_ID}) - fake_volume2 = fake_volume.fake_volume_obj( - self.ctx, - **{'id': fake.VOLUME2_ID, 'provider_id': fake.PROVIDER2_ID}) - fake_volume3 = fake_volume.fake_volume_obj( - self.ctx, - **{'id': fake.VOLUME3_ID, 'provider_id': fake.PROVIDER3_ID}) - fake_volume4 = fake_volume.fake_volume_obj( - self.ctx, - **{'id': fake.VOLUME4_ID, 'provider_id': fake.PROVIDER4_ID}) - self.volumes = [fake_volume1, fake_volume2] - self.volumes2 = [fake_volume3, fake_volume4] - fake_snapshot1 = fake_snapshot.fake_snapshot_obj( - self.ctx, - **{'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, - 'volume': fake_volume1}) - fake_snapshot2 = fake_snapshot.fake_snapshot_obj( - self.ctx, - **{'id': fake.SNAPSHOT2_ID, 'volume_id': fake.VOLUME2_ID, 'volume': - fake_volume2}) - self.snapshots = [fake_snapshot1, fake_snapshot2] - self.snapshot_reply = json.dumps({ - 'volumeIdList': ['sid1', 'sid2'], - 'snapshotGroupId': 'sgid1'}) - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'instances/Volume::{}/action/removeVolume'.format( - fake_volume1['provider_id'] - ): fake_volume1['provider_id'], - 'instances/Volume::{}/action/removeVolume'.format( - fake_volume2['provider_id'] - ): fake_volume2['provider_id'], - 'instances/Volume::{}/action/removeMappedSdc'.format( - fake_volume1['provider_id'] - ): fake_volume1['provider_id'], - 'instances/Volume::{}/action/removeMappedSdc'.format( - fake_volume2['provider_id'] - ): fake_volume2['provider_id'], - 'instances/System/action/snapshotVolumes': - self.snapshot_reply, - }, - self.RESPONSE_MODE.BadStatus: { - 'instances/Volume::{}/action/removeVolume'.format( - fake_volume1['provider_id'] - ): mocks.MockHTTPSResponse( - { - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401 - ), - 'instances/Volume::{}/action/removeVolume'.format( - fake_volume2['provider_id'] - ): mocks.MockHTTPSResponse( - { - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401 - ), - 'instances/System/action/snapshotVolumes': - self.BAD_STATUS_RESPONSE - }, - } - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group(self, is_group_a_cg_snapshot_type): - """Test group create. - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns status of 'available' - """ - is_group_a_cg_snapshot_type.side_effect = [False, True] - - self.assertRaises(NotImplementedError, - self.driver.create_group, self.ctx, self.group) - - model_update = self.driver.create_group(self.ctx, self.group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, is_group_a_cg_snapshot_type): - """Test group deletion. - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns status of 'deleted' - """ - is_group_a_cg_snapshot_type.side_effect = [False, True] - - self.assertRaises(NotImplementedError, - self.driver.delete_group, - self.ctx, self.group, self.volumes) - - model_update = self.driver.delete_group(self.ctx, - self.group, - self.volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group(self, is_group_a_cg_snapshot_type): - """Test updating a group - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns 'None' for each of the updates - """ - is_group_a_cg_snapshot_type.side_effect = [False, True] - - self.assertRaises(NotImplementedError, - self.driver.update_group, self.ctx, self.group) - - mod_up, add_up, remove_up = self.driver.update_group(self.ctx, - self.group) - self.assertIsNone(mod_up) - self.assertIsNone(add_up) - self.assertIsNone(remove_up) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_group(self, is_group_a_cg_snapshot_type): - """Test creating group from source group - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns list of volumes in 'available' state - """ - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - - is_group_a_cg_snapshot_type.side_effect = [False, True] - - self.assertRaises(NotImplementedError, - self.driver.create_group_from_src, - self.ctx, self.group, self.volumes, - source_group=self.group, source_vols=self.volumes) - - result_model_update, result_volumes_model_update = ( - self.driver.create_group_from_src( - self.ctx, self.group, self.volumes, - source_group=self.group, source_vols=self.volumes)) - self.assertEqual(fields.GroupStatus.AVAILABLE, - result_model_update['status']) - get_pid = lambda snapshot: snapshot['provider_id'] - volume_provider_list = list(map(get_pid, result_volumes_model_update)) - self.assertListEqual(volume_provider_list, ['sid1', 'sid2']) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_snapshot(self, is_group_a_cg_snapshot_type): - """Test creating group from snapshot - - """ - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - is_group_a_cg_snapshot_type.side_effect = [False, True] - - self.assertRaises(NotImplementedError, - self.driver.create_group_from_src, - self.ctx, self.group, self.volumes, - group_snapshot=self.fake_grp_snap, - snapshots=self.snapshots) - - result_model_update, result_volumes_model_update = ( - self.driver.create_group_from_src( - self.ctx, self.group, self.volumes, - group_snapshot=self.fake_grp_snap, - snapshots=self.snapshots)) - self.assertEqual(fields.GroupStatus.AVAILABLE, - result_model_update['status']) - get_pid = lambda snapshot: snapshot['provider_id'] - volume_provider_list = list(map(get_pid, result_volumes_model_update)) - self.assertListEqual(volume_provider_list, ['sid1', 'sid2']) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_snapshot(self, is_group_a_cg_snapshot_type): - """Test deleting group snapshot - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns model updates - """ - is_group_a_cg_snapshot_type.side_effect = [False, True] - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - - self.snapshots[0].volume = self.volumes[0] - self.snapshots[1].volume = self.volumes[1] - self.snapshots[0].provider_id = fake.PROVIDER_ID - self.snapshots[1].provider_id = fake.PROVIDER2_ID - - self.assertRaises(NotImplementedError, - self.driver.delete_group_snapshot, - self.ctx, - self.group, - self.snapshots) - - result_model_update, result_snapshot_model_update = ( - self.driver.delete_group_snapshot( - self.ctx, - self.group, - self.snapshots - )) - self.assertEqual(fields.GroupSnapshotStatus.DELETED, - result_model_update['status']) - self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in - result_snapshot_model_update)) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot(self, is_group_a_cg_snapshot_type): - """Test creating group snapshot - - should throw NotImplementedError, is_group_a_cg_snapshot_type=False - otherwise returns model updates - """ - is_group_a_cg_snapshot_type.side_effect = [False, True] - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - - self.assertRaises(NotImplementedError, - self.driver.create_group_snapshot, - self.ctx, - self.group, - self.snapshots) - - result_model_update, result_snapshot_model_update = ( - self.driver.create_group_snapshot( - self.ctx, - self.group, - self.snapshots - )) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - result_model_update['status']) - self.assertTrue(all(snapshot['status'] == 'available' for snapshot in - result_snapshot_model_update)) - get_pid = lambda snapshot: snapshot['provider_id'] - snapshot_provider_list = list(map(get_pid, - result_snapshot_model_update)) - - self.assertListEqual(['sid1', 'sid2'], snapshot_provider_list) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_initialize_connection.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_initialize_connection.py deleted file mode 100644 index 4cf97fb63..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_initialize_connection.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock - -from cinder import context -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio - - -class TestInitializeConnection(scaleio.TestScaleIODriver): - def setUp(self): - """Setup a test case environment.""" - - super(TestInitializeConnection, self).setUp() - self.connector = {} - self.ctx = ( - context.RequestContext('fake', 'fake', True, auth_token=True)) - self.volume = fake_volume.fake_volume_obj( - self.ctx, **{'provider_id': fake.PROVIDER_ID}) - - def test_only_qos(self): - qos = {'maxIOPS': 1000, 'maxBWS': 2048} - extraspecs = {} - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(1000, int(connection_properties['iopsLimit'])) - self.assertEqual(2048, int(connection_properties['bandwidthLimit'])) - - def test_no_qos(self): - qos = {} - extraspecs = {} - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertIsNone(connection_properties['iopsLimit']) - self.assertIsNone(connection_properties['bandwidthLimit']) - - def test_only_extraspecs(self): - qos = {} - extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4096} - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(2000, int(connection_properties['iopsLimit'])) - self.assertEqual(4096, int(connection_properties['bandwidthLimit'])) - - def test_qos_and_extraspecs(self): - qos = {'maxIOPS': 1000, 'maxBWS': 3072} - extraspecs = {'sio:iops_limit': 2000, 'sio:bandwidth_limit': 4000} - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(1000, int(connection_properties['iopsLimit'])) - self.assertEqual(3072, int(connection_properties['bandwidthLimit'])) - - def test_qos_scaling_and_max(self): - qos = {'maxIOPS': 100, 'maxBWS': 2048, 'maxIOPSperGB': 10, - 'maxBWSperGB': 128} - extraspecs = {} - self.volume.size = 8 - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(80, int(connection_properties['iopsLimit'])) - self.assertEqual(1024, int(connection_properties['bandwidthLimit'])) - - self.volume.size = 24 - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(100, int(connection_properties['iopsLimit'])) - self.assertEqual(2048, int(connection_properties['bandwidthLimit'])) - - def test_qos_scaling_no_max(self): - qos = {'maxIOPSperGB': 10, 'maxBWSperGB': 128} - extraspecs = {} - self.volume.size = 8 - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(80, int(connection_properties['iopsLimit'])) - self.assertEqual(1024, int(connection_properties['bandwidthLimit'])) - - def test_qos_round_up(self): - qos = {'maxBWS': 2000, 'maxBWSperGB': 100} - extraspecs = {} - self.volume.size = 8 - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(1024, int(connection_properties['bandwidthLimit'])) - - self.volume.size = 24 - connection_properties = ( - self._initialize_connection(qos, extraspecs)['data']) - self.assertEqual(2048, int(connection_properties['bandwidthLimit'])) - - def test_vol_id(self): - extraspecs = qos = {} - connection_properties = ( - self._initialize_connection(extraspecs, qos)['data']) - self.assertEqual(fake.PROVIDER_ID, - connection_properties['scaleIO_volume_id']) - - def _initialize_connection(self, qos, extraspecs): - self.driver._get_volumetype_qos = mock.MagicMock() - self.driver._get_volumetype_qos.return_value = qos - self.driver._get_volumetype_extraspecs = mock.MagicMock() - self.driver._get_volumetype_extraspecs.return_value = extraspecs - return self.driver.initialize_connection(self.volume, self.connector) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing.py deleted file mode 100644 index 4a693e89c..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import volume_types -from mock import patch -from six.moves import urllib - - -class TestManageExisting(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.manage_existing()``""" - - def setUp(self): - """Setup a test case environment. - - Creates a fake volume object and sets up the required API responses. - """ - super(TestManageExisting, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - self.volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - self.volume_attached = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER2_ID}) - self.volume_no_provider_id = fake_volume.fake_volume_obj(ctx) - self.volume_name_2x_enc = urllib.parse.quote( - urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'instances/Volume::' + self.volume['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER_ID, - 'sizeInKb': 8000000, - 'mappedSdcInfo': None - }, 200) - }, - self.RESPONSE_MODE.BadStatus: { - 'instances/Volume::' + self.volume['provider_id']: - mocks.MockHTTPSResponse({ - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401), - 'instances/Volume::' + self.volume_attached['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER2_ID, - 'sizeInKb': 8388608, - 'mappedSdcInfo': 'Mapped' - }, 200) - } - } - - def test_no_source_id(self): - existing_ref = {'source-name': 'scaleioVolName'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_ref) - - def test_no_type_id(self): - self.volume['volume_type_id'] = None - existing_ref = {'source-id': fake.PROVIDER_ID} - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, self.volume, - existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_volume_not_found(self, _mock_volume_type): - self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID - existing_ref = {'source-id': fake.PROVIDER_ID} - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_volume_attached(self, _mock_volume_type): - self.volume_attached['volume_type_id'] = fake.VOLUME_TYPE_ID - existing_ref = {'source-id': fake.PROVIDER2_ID} - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume_attached, - existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_manage_get_size_calc(self, _mock_volume_type): - self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID - existing_ref = {'source-id': fake.PROVIDER_ID} - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - result = self.driver.manage_existing_get_size(self.volume, - existing_ref) - self.assertEqual(8, result) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_manage_existing_valid(self, _mock_volume_type): - self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID - existing_ref = {'source-id': fake.PROVIDER_ID} - result = self.driver.manage_existing(self.volume, existing_ref) - self.assertEqual(fake.PROVIDER_ID, result['provider_id']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing_snapshot.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing_snapshot.py deleted file mode 100644 index e3e65ded8..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_manage_existing_snapshot.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from mock import patch - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import volume_types - - -class TestManageExistingSnapshot(scaleio.TestScaleIODriver): - """Test cases for ``ScaleIODriver.manage_existing_snapshot()``""" - - def setUp(self): - """Setup a test case environment. - - Creates a fake volume object and sets up the required API responses. - """ - super(TestManageExistingSnapshot, self).setUp() - ctx = context.RequestContext('fake', 'fake', auth_token=True) - self.volume = fake_volume.fake_volume_obj( - ctx, **{'provider_id': fake.PROVIDER_ID}) - self.snapshot = fake_snapshot.fake_snapshot_obj( - ctx, **{'provider_id': fake.PROVIDER2_ID}) - self.snapshot2 = fake_snapshot.fake_snapshot_obj( - ctx, **{'provider_id': fake.PROVIDER3_ID}) - self.snapshot.volume = self.snapshot2.volume = self.volume - self.snapshot['volume_type_id'] = fake.VOLUME_TYPE_ID - self.snapshot2['volume_type_id'] = fake.VOLUME_TYPE_ID - self.snapshot_attached = fake_snapshot.fake_snapshot_obj( - ctx, **{'provider_id': fake.PROVIDER3_ID}) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'instances/Volume::' + self.volume['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER_ID, - 'sizeInKb': 8388608, - 'mappedSdcInfo': None, - 'ancestorVolumeId': None - }, 200), - 'instances/Volume::' + self.snapshot['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER2_ID, - 'sizeInKb': 8000000, - 'mappedSdcInfo': None, - 'ancestorVolumeId': fake.PROVIDER_ID - }, 200), - 'instances/Volume::' + self.snapshot2['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER3_ID, - 'sizeInKb': 8388608, - 'mappedSdcInfo': None, - 'ancestorVolumeId': fake.PROVIDER2_ID - }, 200) - }, - self.RESPONSE_MODE.BadStatus: { - 'instances/Volume::' + self.snapshot['provider_id']: - mocks.MockHTTPSResponse({ - 'errorCode': 401, - 'message': 'BadStatus Volume Test', - }, 401), - 'instances/Volume::' + self.snapshot2['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER3_ID, - 'sizeInKb': 8388608, - 'ancestorVolumeId': fake.PROVIDER2_ID - }, 200), - 'instances/Volume::' + self.snapshot_attached['provider_id']: - mocks.MockHTTPSResponse({ - 'id': fake.PROVIDER3_ID, - 'sizeInKb': 8388608, - 'mappedSdcInfo': 'Mapped', - 'ancestorVolumeId': fake.PROVIDER_ID - }, 200) - } - } - - def test_no_source_id(self): - existing_ref = {'source-name': 'scaleioSnapName'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, self.snapshot, - existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_snapshot_not_found(self, _mock_volume_type): - existing_ref = {'source-id': fake.PROVIDER2_ID} - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, self.snapshot, - existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_snapshot_attached(self, _mock_volume_type): - self.snapshot_attached['volume_type_id'] = fake.VOLUME_TYPE_ID - existing_ref = {'source-id': fake.PROVIDER2_ID} - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.snapshot_attached, existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_different_ancestor(self, _mock_volume_type): - existing_ref = {'source-id': fake.PROVIDER3_ID} - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.snapshot2, existing_ref) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_manage_snapshot_get_size_calc(self, _mock_volume_type): - existing_ref = {'source-id': fake.PROVIDER2_ID} - self.set_https_response_mode(self.RESPONSE_MODE.Valid) - result = self.driver.manage_existing_snapshot_get_size( - self.snapshot, existing_ref) - self.assertEqual(8, result) - - @patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) - def test_manage_existing_snapshot_valid(self, _mock_volume_type): - existing_ref = {'source-id': fake.PROVIDER2_ID} - result = self.driver.manage_existing_snapshot( - self.snapshot, existing_ref) - self.assertEqual(fake.PROVIDER2_ID, result['provider_id']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_misc.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_misc.py deleted file mode 100644 index b686d5ae3..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_misc.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc import scaleio -from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks -from cinder.volume import configuration - - -@ddt.ddt -class TestMisc(scaleio.TestScaleIODriver): - DOMAIN_NAME = 'PD1' - POOL_NAME = 'SP1' - STORAGE_POOLS = ['{}:{}'.format(DOMAIN_NAME, POOL_NAME)] - - def setUp(self): - """Set up the test case environment. - - Defines the mock HTTPS responses for the REST API calls. - """ - super(TestMisc, self).setUp() - self.domain_name_enc = urllib.parse.quote(self.DOMAIN_NAME) - self.pool_name_enc = urllib.parse.quote(self.POOL_NAME) - self.ctx = context.RequestContext('fake', 'fake', auth_token=True) - - self.volume = fake_volume.fake_volume_obj( - self.ctx, **{'name': 'vol1', 'provider_id': fake.PROVIDER_ID} - ) - self.new_volume = fake_volume.fake_volume_obj( - self.ctx, **{'name': 'vol2', 'provider_id': fake.PROVIDER2_ID} - ) - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'types/Domain/instances/getByName::' + - self.domain_name_enc: '"{}"'.format(self.DOMAIN_NAME).encode( - 'ascii', - 'ignore' - ), - 'types/Pool/instances/getByName::{},{}'.format( - self.DOMAIN_NAME, - self.POOL_NAME - ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'), - 'types/StoragePool/instances/action/querySelectedStatistics': { - '"{}"'.format(self.POOL_NAME): { - 'capacityAvailableForVolumeAllocationInKb': 5000000, - 'capacityLimitInKb': 16000000, - 'spareCapacityInKb': 6000000, - 'thickCapacityInUseInKb': 266, - 'thinCapacityAllocatedInKm': 0, - }, - }, - 'instances/Volume::{}/action/setVolumeName'.format( - self.volume['provider_id']): - self.new_volume['provider_id'], - 'instances/Volume::{}/action/setVolumeName'.format( - self.new_volume['provider_id']): - self.volume['provider_id'], - 'version': '"{}"'.format('2.0.1'), - 'instances/StoragePool::{}'.format( - "test_pool" - ): { - 'name': 'test_pool', - 'protectionDomainId': 'test_domain', - }, - 'instances/ProtectionDomain::{}'.format( - "test_domain" - ): { - 'name': 'test_domain', - }, - }, - self.RESPONSE_MODE.BadStatus: { - 'types/Domain/instances/getByName::' + - self.domain_name_enc: self.BAD_STATUS_RESPONSE, - 'instances/Volume::{}/action/setVolumeName'.format( - self.volume['provider_id']): mocks.MockHTTPSResponse( - { - 'message': 'Invalid volume.', - 'httpStatusCode': 400, - 'errorCode': self.VOLUME_NOT_FOUND_ERROR - }, 400), - }, - self.RESPONSE_MODE.Invalid: { - 'types/Domain/instances/getByName::' + - self.domain_name_enc: None, - 'instances/Volume::{}/action/setVolumeName'.format( - self.volume['provider_id']): mocks.MockHTTPSResponse( - { - 'message': 'Invalid volume.', - 'httpStatusCode': 400, - 'errorCode': 0 - }, 400), - }, - } - - def test_valid_configuration(self): - self.driver.check_for_setup_error() - - def test_both_storage_pool(self): - """Both storage name and ID provided. - - INVALID - """ - self.driver.configuration.sio_storage_pool_id = "test_pool_id" - self.driver.configuration.sio_storage_pool_name = "test_pool_name" - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - - def test_no_storage_pool(self): - """No storage name or ID provided. - - VALID as storage_pools are defined - """ - self.driver.configuration.sio_storage_pool_name = None - self.driver.configuration.sio_storage_pool_id = None - self.driver.check_for_setup_error() - - def test_both_domain(self): - """Both domain and ID are provided - - INVALID - """ - self.driver.configuration.sio_protection_domain_name = ( - "test_domain_name") - self.driver.configuration.sio_protection_domain_id = ( - "test_domain_id") - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - - def test_no_storage_pools(self): - """No storage pools. - - VALID as domain and storage pool names are provided - """ - self.driver.storage_pools = None - self.driver.check_for_setup_error() - - def test_volume_size_round_true(self): - self.driver._check_volume_size(1) - - def test_volume_size_round_false(self): - self.override_config('sio_round_volume_capacity', False, - configuration.SHARED_CONF_GROUP) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._check_volume_size, 1) - - def test_get_volume_stats_bad_status(self): - self.driver.storage_pools = self.STORAGE_POOLS - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.get_volume_stats, True) - - def test_get_volume_stats_invalid_domain(self): - self.driver.storage_pools = self.STORAGE_POOLS - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.get_volume_stats, True) - - def test_get_volume_stats(self): - self.driver.storage_pools = self.STORAGE_POOLS - self.driver.get_volume_stats(True) - - def _setup_valid_variant_property(self, property): - """Setup valid response that returns a variety of property name - - """ - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.ValidVariant: { - 'types/Domain/instances/getByName::' + - self.domain_name_enc: '"{}"'.format(self.DOMAIN_NAME).encode( - 'ascii', - 'ignore' - ), - 'types/Pool/instances/getByName::{},{}'.format( - self.DOMAIN_NAME, - self.POOL_NAME - ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'), - 'types/StoragePool/instances/action/querySelectedStatistics': { - '"{}"'.format(self.POOL_NAME): { - 'capacityAvailableForVolumeAllocationInKb': 5000000, - 'capacityLimitInKb': 16000000, - 'spareCapacityInKb': 6000000, - 'thickCapacityInUseInKb': 266, - property: 0, - }, - }, - 'instances/Volume::{}/action/setVolumeName'.format( - self.volume['provider_id']): - self.new_volume['provider_id'], - 'instances/Volume::{}/action/setVolumeName'.format( - self.new_volume['provider_id']): - self.volume['provider_id'], - 'version': '"{}"'.format('2.0.1'), - 'instances/StoragePool::{}'.format( - self.STORAGE_POOL_NAME - ): '"{}"'.format(self.STORAGE_POOL_ID), - } - } - - def test_get_volume_stats_with_varying_properties(self): - """Test getting volume stats with various property names - - In SIO 3.0, a property was renamed. - The change is backwards compatible for now but this tests - ensures that the driver is tolerant of that change - """ - self.driver.storage_pools = self.STORAGE_POOLS - self._setup_valid_variant_property("thinCapacityAllocatedInKb") - self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant) - self.driver.get_volume_stats(True) - self._setup_valid_variant_property("nonexistentProperty") - self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant) - self.driver.get_volume_stats(True) - - @mock.patch( - 'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.' - '_rename_volume', - return_value=None) - def test_update_migrated_volume(self, mock_rename): - test_vol = self.driver.update_migrated_volume( - self.ctx, self.volume, self.new_volume, 'available') - mock_rename.assert_called_with(self.new_volume, self.volume['id']) - self.assertEqual({'_name_id': None, 'provider_location': None}, - test_vol) - - @mock.patch( - 'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.' - '_rename_volume', - return_value=None) - def test_update_unavailable_migrated_volume(self, mock_rename): - test_vol = self.driver.update_migrated_volume( - self.ctx, self.volume, self.new_volume, 'unavailable') - self.assertFalse(mock_rename.called) - self.assertEqual({'_name_id': fake.VOLUME_ID, - 'provider_location': None}, - test_vol) - - @mock.patch( - 'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.' - '_rename_volume', - side_effect=exception.VolumeBackendAPIException(data='Error!')) - def test_fail_update_migrated_volume(self, mock_rename): - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.update_migrated_volume, - self.ctx, - self.volume, - self.new_volume, - 'available' - ) - mock_rename.assert_called_with(self.volume, "ff" + self.volume['id']) - - def test_rename_volume(self): - rc = self.driver._rename_volume( - self.volume, self.new_volume['id']) - self.assertIsNone(rc) - - def test_rename_volume_illegal_syntax(self): - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - rc = self.driver._rename_volume( - self.volume, self.new_volume['id']) - self.assertIsNone(rc) - - def test_rename_volume_non_sio(self): - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - rc = self.driver._rename_volume( - self.volume, self.new_volume['id']) - self.assertIsNone(rc) - - def test_default_provisioning_type_unspecified(self): - empty_storage_type = {} - self.assertEqual( - 'thin', - self.driver._find_provisioning_type(empty_storage_type)) - - @ddt.data((True, 'thin'), (False, 'thick')) - @ddt.unpack - def test_default_provisioning_type_thin(self, config_provisioning_type, - expected_provisioning_type): - self.override_config('san_thin_provision', config_provisioning_type, - configuration.SHARED_CONF_GROUP) - self.driver = mocks.ScaleIODriver(configuration=self.configuration) - empty_storage_type = {} - self.assertEqual( - expected_provisioning_type, - self.driver._find_provisioning_type(empty_storage_type)) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_versions.py b/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_versions.py deleted file mode 100644 index 7c70ac5e1..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_versions.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -from cinder import exception -from cinder.tests.unit.volume.drivers.dell_emc import scaleio - - -@ddt.ddt -class TestMultipleVersions(scaleio.TestScaleIODriver): - - version = '1.2.3.4' - good_versions = ['1.2.3.4', - '101.102.103.104.105.106.107', - '1.0' - ] - bad_versions = ['bad', - 'bad.version.number', - '1.0b', - '.6' - ] - - # Test cases for ``ScaleIODriver._get_server_api_version()`` - def setUp(self): - """Setup a test case environment.""" - super(TestMultipleVersions, self).setUp() - - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'version': '"{}"'.format(self.version), - }, - self.RESPONSE_MODE.Invalid: { - 'version': None, - }, - self.RESPONSE_MODE.BadStatus: { - 'version': self.BAD_STATUS_RESPONSE, - }, - } - - def test_version_api_fails(self): - """version api returns a non-200 response.""" - self.set_https_response_mode(self.RESPONSE_MODE.Invalid) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_version) - - def test_version(self): - """Valid version request.""" - self.driver._get_server_api_version(False) - - def test_version_badstatus_response(self): - """Version api returns a bad response.""" - self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_version) - - def setup_response(self): - self.HTTPS_MOCK_RESPONSES = { - self.RESPONSE_MODE.Valid: { - 'version': '"{}"'.format(self.version), - }, - } - - def test_version_badversions(self): - """Version api returns an invalid version number.""" - for vers in self.bad_versions: - self.version = vers - self.setup_response() - self.assertRaises(exception.VolumeBackendAPIException, - self.test_version) - - def test_version_goodversions(self): - """Version api returns a valid version number.""" - for vers in self.good_versions: - self.version = vers - self.setup_response() - self.driver._get_server_api_version(False) - self.assertEqual( - self.driver._get_server_api_version(False), - vers - ) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/test_ps.py b/cinder/tests/unit/volume/drivers/dell_emc/test_ps.py deleted file mode 100644 index 0ddccc8d2..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/test_ps.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright (c) 2013-2017 Dell Inc, or its subsidiaries. -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time -import unittest - -from eventlet import greenthread -import mock -from oslo_concurrency import processutils -import paramiko -import six - -from cinder import context -from cinder import exception -from cinder import ssh_utils -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc import ps - - -class PSSeriesISCSIDriverTestCase(test.TestCase): - - def setUp(self): - super(PSSeriesISCSIDriverTestCase, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.san_is_local = False - self.configuration.san_ip = "10.0.0.1" - self.configuration.san_login = "foo" - self.configuration.san_password = "bar" - self.configuration.san_ssh_port = 16022 - self.configuration.san_thin_provision = True - self.configuration.san_private_key = 'foo' - self.configuration.ssh_min_pool_conn = 1 - self.configuration.ssh_max_pool_conn = 5 - self.configuration.ssh_conn_timeout = 30 - self.configuration.eqlx_pool = 'non-default' - self.configuration.eqlx_group_name = 'group-0' - self.configuration.eqlx_cli_max_retries = 5 - - self.configuration.use_chap_auth = True - self.configuration.chap_username = 'admin' - self.configuration.chap_password = 'password' - - self.configuration.max_over_subscription_ratio = 1.0 - - self.driver_stats_output = ['TotalCapacity: 111GB', - 'FreeSpace: 11GB', - 'VolumeReserve: 80GB'] - self.cmd = 'this is dummy command' - self._context = context.get_admin_context() - self.driver = ps.PSSeriesISCSIDriver( - configuration=self.configuration) - self.volume_name = "fakevolume" - self.volid = "fakeid" - self.volume = {'name': self.volume_name, - 'display_name': 'fake_display_name'} - self.connector = { - 'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', - 'host': 'fakehost'} - self.access_record_output = [ - "ID Initiator Ipaddress AuthMethod UserName Apply-To", - "--- --------------- ------------- ---------- ---------- --------", - "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", - " 7dab76162"] - - self.fake_iqn = 'iqn.2003-10.com.equallogic:group01:25366:fakev' - self.fake_iqn_return = ['iSCSI target name is %s.' % self.fake_iqn] - self.fake_volume_output = ["Size: 5GB", - "iSCSI Name: %s" % self.fake_iqn, - "Description: "] - self.fake_volume_info = {'size': 5.0, - 'iSCSI_Name': self.fake_iqn} - self.driver._group_ip = '10.0.1.6' - self.properties = { - 'target_discovered': True, - 'target_portal': '%s:3260' % self.driver._group_ip, - 'target_iqn': self.fake_iqn, - 'volume_id': 1, - 'discard': True} - self._model_update = { - 'provider_location': "%s:3260,1 %s 0" % (self.driver._group_ip, - self.fake_iqn), - 'provider_auth': 'CHAP %s %s' % ( - self.configuration.chap_username, - self.configuration.chap_password) - } - - def _fake_get_iscsi_properties(self, volume): - return self.properties - - def test_create_volume(self): - volume = {'name': self.volume_name, 'size': 1} - mock_attrs = {'args': ['volume', 'create', volume['name'], - "%sG" % (volume['size']), 'pool', - self.configuration.eqlx_pool, - 'thin-provision']} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.fake_iqn_return - model_update = self.driver.create_volume(volume) - self.assertEqual(self._model_update, model_update) - - def test_delete_volume(self): - volume = {'name': self.volume_name, 'size': 1} - show_attrs = {'args': ['volume', 'select', volume['name'], 'show']} - off_attrs = {'args': ['volume', 'select', volume['name'], 'offline']} - delete_attrs = {'args': ['volume', 'delete', volume['name']]} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**show_attrs) - mock_eql_execute.configure_mock(**off_attrs) - mock_eql_execute.configure_mock(**delete_attrs) - self.driver.delete_volume(volume) - - def test_delete_absent_volume(self): - volume = {'name': self.volume_name, 'size': 1, 'id': self.volid} - mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.side_effect = processutils.ProcessExecutionError( - stdout='% Error ..... does not exist.\n') - self.driver.delete_volume(volume) - - def test_ensure_export(self): - volume = {'name': self.volume_name, 'size': 1} - mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - self.driver.ensure_export({}, volume) - - def test_create_snapshot(self): - snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} - snap_name = 'fake_snap_name' - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.return_value = ['Snapshot name is %s' % snap_name] - self.driver.create_snapshot(snapshot) - - def test_create_volume_from_snapshot(self): - snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name', - 'volume_size': '1'} - volume = {'name': self.volume_name, 'size': '1'} - mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], - 'snapshot', 'select', snapshot['name'], - 'clone', volume['name']]} - - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - 'extend_volume') as mock_extend_volume: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.fake_iqn_return - mock_extend_volume.return_value = self.fake_iqn_return - model_update = self.driver.create_volume_from_snapshot( - volume, snapshot) - self.assertEqual(self._model_update, model_update) - self.assertFalse(self.driver.extend_volume.called) - - def test_create_volume_from_snapshot_extend(self): - snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name', - 'volume_size': '100'} - volume = {'name': self.volume_name, 'size': '200'} - mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], - 'snapshot', 'select', snapshot['name'], - 'clone', volume['name']]} - - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - 'extend_volume') as mock_extend_volume: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.fake_iqn_return - mock_extend_volume.return_value = self.fake_iqn_return - model_update = self.driver.create_volume_from_snapshot( - volume, snapshot) - self.assertEqual(self._model_update, model_update) - self.assertTrue(self.driver.extend_volume.called) - self.driver.extend_volume.assert_called_once_with( - volume, volume['size']) - - def test_create_cloned_volume(self): - src_vref = {'name': 'fake_uuid', 'size': '1'} - volume = {'name': self.volume_name, 'size': '1'} - mock_attrs = {'args': ['volume', 'select', volume['name'], - 'multihost-access', 'enable']} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - 'extend_volume') as mock_extend_volume: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.fake_iqn_return - mock_extend_volume.return_value = self.fake_iqn_return - model_update = self.driver.create_cloned_volume( - volume, src_vref) - self.assertEqual(self._model_update, model_update) - self.assertFalse(self.driver.extend_volume.called) - - def test_create_cloned_volume_extend(self): - src_vref = {'name': 'fake_uuid', 'size': '100'} - volume = {'name': self.volume_name, 'size': '200'} - mock_attrs = {'args': ['volume', 'select', volume['name'], - 'multihost-access', 'enable']} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - 'extend_volume') as mock_extend_volume: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.fake_iqn_return - mock_extend_volume.return_value = self.fake_iqn_return - cloned_vol = self.driver.create_cloned_volume(volume, src_vref) - self.assertEqual(self._model_update, cloned_vol) - self.assertTrue(self.driver.extend_volume.called) - - def test_delete_snapshot(self): - snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} - mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], - 'snapshot', 'delete', snapshot['name']]} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - self.driver.delete_snapshot(snapshot) - - def test_delete_absent_snapshot(self): - snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'} - mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'], - 'snapshot', 'delete', snapshot['name']]} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.side_effect = processutils.ProcessExecutionError( - stdout='% Error ..... does not exist.\n') - self.driver.delete_snapshot(snapshot) - - def test_extend_volume(self): - new_size = '200' - volume = {'name': self.volume_name, 'size': 100} - mock_attrs = {'args': ['volume', 'select', volume['name'], - 'size', "%sG" % new_size]} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - self.driver.extend_volume(volume, new_size) - - def test_get_volume_info(self): - attrs = ('volume', 'select', self.volume, 'show') - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.return_value = self.fake_volume_output - data = self.driver._get_volume_info(self.volume) - mock_eql_execute.assert_called_with(*attrs) - self.assertEqual(self.fake_volume_info, data) - - def test_get_volume_info_negative(self): - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.side_effect = processutils.ProcessExecutionError( - stdout='% Error ..... does not exist.\n') - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_volume_info, self.volume_name) - - def test_manage_existing(self): - ref = {'source-name': self.volume_name} - attrs = ('volume', 'select', self.volume_name, - 'multihost-access', 'enable') - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - '_get_volume_info') as mock_volume_info: - mock_volume_info.return_value = self.fake_volume_info - mock_eql_execute.return_value = self.fake_iqn_return - model_update = self.driver.manage_existing(self.volume, ref) - mock_eql_execute.assert_called_with(*attrs) - self.assertEqual(self._model_update, model_update) - - def test_manage_existing_invalid_ref(self): - ref = {} - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing, self.volume, ref) - - def test_manage_existing_get_size(self): - ref = {'source-name': self.volume_name} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.return_value = self.fake_volume_output - size = self.driver.manage_existing_get_size(self.volume, ref) - self.assertEqual(float('5.0'), size) - - def test_manage_existing_get_size_invalid_ref(self): - """Error on manage with invalid reference.""" - ref = {} - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_get_size, - self.volume, ref) - - def test_unmanage(self): - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.return_value = None - self.driver.unmanage(self.volume) - - def test_initialize_connection(self): - volume = {'name': self.volume_name} - mock_attrs = {'args': ['volume', 'select', volume['name'], 'access', - 'create', 'initiator', - self.connector['initiator'], - 'authmethod', 'chap', - 'username', - self.configuration.chap_username]} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - with mock.patch.object(self.driver, - '_get_iscsi_properties') as mock_iscsi: - mock_eql_execute.configure_mock(**mock_attrs) - mock_iscsi.return_value = self.properties - iscsi_properties = self.driver.initialize_connection( - volume, self.connector) - self.assertEqual(self._fake_get_iscsi_properties(volume), - iscsi_properties['data']) - self.assertTrue(iscsi_properties['data']['discard']) - - def test_terminate_connection(self): - def my_side_effect(*args, **kwargs): - if args[4] == 'show': - return self.access_record_output - else: - return '' - volume = {'name': self.volume_name} - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.side_effect = my_side_effect - self.driver.terminate_connection(volume, self.connector) - - def test_do_setup(self): - fake_group_ip = '10.1.2.3' - - def my_side_effect(*args, **kwargs): - if args[0] == 'grpparams': - return ['Group-Ipaddress: %s' % fake_group_ip] - else: - return '' - - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.side_effect = my_side_effect - self.driver.do_setup(self._context) - self.assertEqual(fake_group_ip, self.driver._group_ip) - - def test_update_volume_stats_thin(self): - mock_attrs = {'args': ['pool', 'select', - self.configuration.eqlx_pool, 'show']} - self.configuration.san_thin_provision = True - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.driver_stats_output - self.driver._update_volume_stats() - self.assert_volume_stats(self.driver._stats) - - def test_update_volume_stats_thick(self): - mock_attrs = {'args': ['pool', 'select', - self.configuration.eqlx_pool, 'show']} - self.configuration.san_thin_provision = False - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.driver_stats_output - self.driver._update_volume_stats() - self.assert_volume_stats(self.driver._stats) - - def test_get_volume_stats_thin(self): - mock_attrs = {'args': ['pool', 'select', - self.configuration.eqlx_pool, 'show']} - self.configuration.san_thin_provision = True - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.driver_stats_output - stats = self.driver.get_volume_stats(refresh=True) - self.assert_volume_stats(stats) - - def test_get_volume_stats_thick(self): - mock_attrs = {'args': ['pool', 'select', - self.configuration.eqlx_pool, 'show']} - self.configuration.san_thin_provision = False - with mock.patch.object(self.driver, - '_eql_execute') as mock_eql_execute: - mock_eql_execute.configure_mock(**mock_attrs) - mock_eql_execute.return_value = self.driver_stats_output - stats = self.driver.get_volume_stats(refresh=True) - self.assert_volume_stats(stats) - - def assert_volume_stats(self, stats): - thin_enabled = self.configuration.san_thin_provision - self.assertEqual(float('111.0'), stats['total_capacity_gb']) - self.assertEqual(float('11.0'), stats['free_capacity_gb']) - - if thin_enabled: - self.assertEqual(80.0, stats['provisioned_capacity_gb']) - else: - space = stats['total_capacity_gb'] - stats['free_capacity_gb'] - self.assertEqual(space, stats['provisioned_capacity_gb']) - - self.assertEqual(thin_enabled, stats['thin_provisioning_support']) - self.assertEqual(not thin_enabled, - stats['thick_provisioning_support']) - self.assertEqual('Dell EMC', stats['vendor_name']) - self.assertFalse(stats['multiattach']) - - def test_get_space_in_gb(self): - self.assertEqual(123.0, self.driver._get_space_in_gb('123.0GB')) - self.assertEqual(124.0, self.driver._get_space_in_gb('123.5GB')) - self.assertEqual(123.0 * 1024, self.driver._get_space_in_gb('123.0TB')) - self.assertEqual(1.0, self.driver._get_space_in_gb('1024.0MB')) - self.assertEqual(2.0, self.driver._get_space_in_gb('1536.0MB')) - - def test_get_output(self): - - def _fake_recv(ignore_arg): - return '%s> ' % self.configuration.eqlx_group_name - - chan = mock.Mock(paramiko.Channel) - mock_recv = self.mock_object(chan, 'recv') - mock_recv.return_value = '%s> ' % self.configuration.eqlx_group_name - self.assertEqual([_fake_recv(None)], self.driver._get_output(chan)) - - def test_get_prefixed_value(self): - lines = ['Line1 passed', 'Line1 failed'] - prefix = ['Line1', 'Line2'] - expected_output = [' passed', None] - self.assertEqual(expected_output[0], - self.driver._get_prefixed_value(lines, prefix[0])) - self.assertEqual(expected_output[1], - self.driver._get_prefixed_value(lines, prefix[1])) - - def test_ssh_execute(self): - ssh = mock.Mock(paramiko.SSHClient) - chan = mock.Mock(paramiko.Channel) - transport = mock.Mock(paramiko.Transport) - mock_get_output = self.mock_object(self.driver, '_get_output') - self.mock_object(chan, 'invoke_shell') - expected_output = ['NoError: test run'] - mock_get_output.return_value = expected_output - ssh.get_transport.return_value = transport - transport.open_session.return_value = chan - chan.invoke_shell() - chan.send('stty columns 255' + '\r') - chan.send(self.cmd + '\r') - chan.close() - self.assertEqual(expected_output, - self.driver._ssh_execute(ssh, self.cmd)) - - def test_ssh_execute_error(self): - self.mock_object(self.driver, '_ssh_execute', - side_effect=processutils.ProcessExecutionError) - ssh = mock.Mock(paramiko.SSHClient) - chan = mock.Mock(paramiko.Channel) - transport = mock.Mock(paramiko.Transport) - mock_get_output = self.mock_object(self.driver, '_get_output') - self.mock_object(ssh, 'get_transport') - self.mock_object(chan, 'invoke_shell') - expected_output = ['Error: test run', '% Error'] - mock_get_output.return_value = expected_output - ssh.get_transport().return_value = transport - transport.open_session.return_value = chan - chan.invoke_shell() - chan.send('stty columns 255' + '\r') - chan.send(self.cmd + '\r') - chan.close() - self.assertRaises(processutils.ProcessExecutionError, - self.driver._ssh_execute, ssh, self.cmd) - - @mock.patch.object(greenthread, 'sleep') - def test_ensure_retries(self, _gt_sleep): - num_attempts = 3 - self.driver.configuration.eqlx_cli_max_retries = num_attempts - self.mock_object(self.driver, '_ssh_execute', - side_effect=exception.VolumeBackendAPIException( - "some error")) - # mocks for calls in _run_ssh - self.mock_object(utils, 'check_ssh_injection') - self.mock_object(ssh_utils, 'SSHPool') - - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - self.driver.sshpool = mock.Mock(return_value=sshpool) - ssh = mock.Mock(paramiko.SSHClient) - self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) - self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) - # now call the execute - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._eql_execute, "fake command") - self.assertEqual(num_attempts + 1, - self.driver._ssh_execute.call_count) - - @mock.patch.object(greenthread, 'sleep') - def test_ensure_connection_retries(self, _gt_sleep): - num_attempts = 3 - self.driver.configuration.eqlx_cli_max_retries = num_attempts - self.mock_object(self.driver, '_ssh_execute', - side_effect=processutils.ProcessExecutionError( - stdout='% Error ... some error.\n')) - # mocks for calls in _run_ssh - self.mock_object(utils, 'check_ssh_injection') - self.mock_object(ssh_utils, 'SSHPool') - - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - self.driver.sshpool = mock.Mock(return_value=sshpool) - ssh = mock.Mock(paramiko.SSHClient) - self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) - self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) - # now call the execute - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._eql_execute, "fake command") - self.assertEqual(num_attempts + 1, - self.driver._ssh_execute.call_count) - - @unittest.skip("Skip until bug #1578986 is fixed") - @mock.patch.object(greenthread, 'sleep') - def test_ensure_retries_on_channel_timeout(self, _gt_sleep): - num_attempts = 3 - self.driver.configuration.eqlx_cli_max_retries = num_attempts - - # mocks for calls and objects in _run_ssh - self.mock_object(utils, 'check_ssh_injection') - self.mock_object(ssh_utils, 'SSHPool') - - sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, - "test", - password="test", - min_size=1, - max_size=1) - self.driver.sshpool = mock.Mock(return_value=sshpool) - ssh = mock.Mock(paramiko.SSHClient) - self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh) - self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False) - # mocks for _ssh_execute and _get_output - self.mock_object(self.driver, '_get_output', - side_effect=exception.VolumeBackendAPIException( - "some error")) - # now call the execute - with mock.patch('sys.stderr', new=six.StringIO()): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._eql_execute, "fake command") - - self.assertEqual(num_attempts + 1, self.driver._get_output.call_count) - - @unittest.skip("Skip until bug #1578986 is fixed") - def test_with_timeout(self): - @ps.with_timeout - def no_timeout(cmd, *args, **kwargs): - return 'no timeout' - - @ps.with_timeout - def w_timeout(cmd, *args, **kwargs): - time.sleep(1) - - self.assertEqual('no timeout', no_timeout('fake cmd')) - self.assertRaises(exception.VolumeBackendAPIException, - w_timeout, 'fake cmd', timeout=0.1) - - def test_local_path(self): - self.assertRaises(NotImplementedError, self.driver.local_path, '') diff --git a/cinder/tests/unit/volume/drivers/dell_emc/test_xtremio.py b/cinder/tests/unit/volume/drivers/dell_emc/test_xtremio.py deleted file mode 100644 index 974e9a82f..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/test_xtremio.py +++ /dev/null @@ -1,1400 +0,0 @@ -# Copyright (c) 2012 - 2014 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import time - -import mock -import six - -from cinder import exception -from cinder import test -from cinder.tests.unit.consistencygroup import fake_consistencygroup as fake_cg -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit.fake_volume import fake_volume_obj -from cinder.volume.drivers.dell_emc import xtremio - -typ2id = {'volumes': 'vol-id', - 'snapshots': 'vol-id', - 'initiators': 'initiator-id', - 'initiator-groups': 'ig-id', - 'lun-maps': 'mapping-id', - 'consistency-groups': 'cg-id', - 'consistency-group-volumes': 'cg-vol-id', - } - -xms_init = {'xms': {1: {'version': '4.2.0', - 'sw-version': '4.2.0-30'}}, - 'clusters': {1: {'name': 'brick1', - 'sys-sw-version': "4.2.0-devel_ba23ee5381eeab73", - 'ud-ssd-space': '8146708710', - 'ud-ssd-space-in-use': '708710', - 'vol-size': '29884416', - 'chap-authentication-mode': 'disabled', - 'chap-discovery-mode': 'disabled', - "index": 1, - }, - }, - 'target-groups': {'Default': {"index": 1, "name": "Default"}, - }, - 'iscsi-portals': {'10.205.68.5/16': - {"port-address": - "iqn.2008-05.com.xtremio:001e67939c34", - "ip-port": 3260, - "ip-addr": "10.205.68.5/16", - "name": "10.205.68.5/16", - "index": 1, - }, - }, - 'targets': {'X1-SC2-target1': {'index': 1, "name": "X1-SC2-fc1", - "port-address": - "21:00:00:24:ff:57:b2:36", - 'port-type': 'fc', - 'port-state': 'up', - }, - 'X1-SC2-target2': {'index': 2, "name": "X1-SC2-fc2", - "port-address": - "21:00:00:24:ff:57:b2:55", - 'port-type': 'fc', - 'port-state': 'up', - } - }, - 'volumes': {}, - 'initiator-groups': {}, - 'initiators': {}, - 'lun-maps': {}, - 'consistency-groups': {}, - 'consistency-group-volumes': {}, - } - -xms_data = None - -xms_filters = { - 'eq': lambda x, y: x == y, - 'ne': lambda x, y: x != y, - 'gt': lambda x, y: x > y, - 'ge': lambda x, y: x >= y, - 'lt': lambda x, y: x < y, - 'le': lambda x, y: x <= y, -} - - -def get_xms_obj_by_name(typ, name): - for item in xms_data[typ].values(): - if 'name' in item and item['name'] == name: - return item - raise exception.NotFound() - - -def clean_xms_data(): - global xms_data - xms_data = copy.deepcopy(xms_init) - - -def fix_data(data, object_type): - d = {} - for key, value in data.items(): - if 'name' in key: - key = 'name' - d[key] = value - - if object_type == 'lun-maps': - d['lun'] = 1 - - vol_idx = get_xms_obj_by_name('volumes', data['vol-id'])['index'] - ig_idx = get_xms_obj_by_name('initiator-groups', - data['ig-id'])['index'] - - d['name'] = '_'.join([six.text_type(vol_idx), - six.text_type(ig_idx), - '1']) - - d[typ2id[object_type]] = ["a91e8c81c2d14ae4865187ce4f866f8a", - d.get('name'), - len(xms_data.get(object_type, [])) + 1] - d['index'] = len(xms_data[object_type]) + 1 - return d - - -def get_xms_obj_key(data): - for key in data.keys(): - if 'name' in key: - return key - - -def get_obj(typ, name, idx): - if name: - return {"content": get_xms_obj_by_name(typ, name)} - elif idx: - if idx not in xms_data.get(typ, {}): - raise exception.NotFound() - return {"content": xms_data[typ][idx]} - - -def xms_request(object_type='volumes', method='GET', data=None, - name=None, idx=None, ver='v1'): - if object_type == 'snapshots': - object_type = 'volumes' - - try: - res = xms_data[object_type] - except KeyError: - raise exception.VolumeDriverException - if method == 'GET': - if name or idx: - return get_obj(object_type, name, idx) - else: - if data and data.get('full') == 1: - filter_term = data.get('filter') - if not filter_term: - entities = list(res.values()) - else: - field, oper, value = filter_term.split(':', 2) - comp = xms_filters[oper] - entities = [o for o in res.values() - if comp(o.get(field), value)] - return {object_type: entities} - else: - return {object_type: [{"href": "/%s/%d" % (object_type, - obj['index']), - "name": obj.get('name')} - for obj in res.values()]} - elif method == 'POST': - data = fix_data(data, object_type) - name_key = get_xms_obj_key(data) - try: - if name_key and get_xms_obj_by_name(object_type, data[name_key]): - raise (exception - .VolumeBackendAPIException - ('Volume by this name already exists')) - except exception.NotFound: - pass - data['index'] = len(xms_data[object_type]) + 1 - xms_data[object_type][data['index']] = data - # find the name key - if name_key: - data['name'] = data[name_key] - if object_type == 'lun-maps': - data['ig-name'] = data['ig-id'] - - return {"links": [{"href": "/%s/%d" % - (object_type, data[typ2id[object_type]][2])}]} - elif method == 'DELETE': - if object_type == 'consistency-group-volumes': - data = [cgv for cgv in - xms_data['consistency-group-volumes'].values() - if cgv['vol-id'] == data['vol-id'] - and cgv['cg-id'] == data['cg-id']][0] - else: - data = get_obj(object_type, name, idx)['content'] - if data: - del xms_data[object_type][data['index']] - else: - raise exception.NotFound() - elif method == 'PUT': - obj = get_obj(object_type, name, idx)['content'] - data = fix_data(data, object_type) - del data['index'] - obj.update(data) - - -def xms_bad_request(object_type='volumes', method='GET', data=None, - name=None, idx=None, ver='v1'): - if method == 'GET': - raise exception.NotFound() - elif method == 'POST': - raise exception.VolumeBackendAPIException('Failed to create ig') - - -def xms_failed_rename_snapshot_request(object_type='volumes', - method='GET', data=None, - name=None, idx=None, ver='v1'): - if method == 'POST': - xms_data['volumes'][27] = {} - return { - "links": [ - { - "href": "https://host/api/json/v2/types/snapshots/27", - "rel": "self"}]} - elif method == 'PUT': - raise exception.VolumeBackendAPIException(data='Failed to delete') - elif method == 'DELETE': - del xms_data['volumes'][27] - - -class D(dict): - def update(self, *args, **kwargs): - self.__dict__.update(*args, **kwargs) - return dict.update(self, *args, **kwargs) - - -class CommonData(object): - context = {'user': 'admin', } - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:222', - 'wwpns': ["123456789012345", "123456789054321"], - 'wwnns': ["223456789012345", "223456789054321"], - 'host': 'fakehost', - } - - test_volume = fake_volume_obj(context, - name='vol1', - size=1, - volume_name='vol1', - id='192eb39b-6c2f-420c-bae3-3cfd117f0001', - provider_auth=None, - project_id='project', - display_name='vol1', - display_description='test volume', - volume_type_id=None, - consistencygroup_id= - '192eb39b-6c2f-420c-bae3-3cfd117f0345', - ) - test_snapshot = D() - test_snapshot.update({'name': 'snapshot1', - 'size': 1, - 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0002', - 'volume_name': 'vol-vol1', - 'volume_id': '192eb39b-6c2f-420c-bae3-3cfd117f0001', - 'project_id': 'project', - 'consistencygroup_id': - '192eb39b-6c2f-420c-bae3-3cfd117f0345', - }) - test_snapshot.__dict__.update(test_snapshot) - test_volume2 = {'name': 'vol2', - 'size': 1, - 'volume_name': 'vol2', - 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0004', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol2', - 'display_description': 'test volume 2', - 'volume_type_id': None, - 'consistencygroup_id': - '192eb39b-6c2f-420c-bae3-3cfd117f0345', - } - test_clone = {'name': 'clone1', - 'size': 1, - 'volume_name': 'vol3', - 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0003', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'clone1', - 'display_description': 'volume created from snapshot', - 'volume_type_id': None, - 'consistencygroup_id': - '192eb39b-6c2f-420c-bae3-3cfd117f0345', - } - unmanaged1 = {'id': 'unmanaged1', - 'name': 'unmanaged1', - 'size': 3, - } - group = {'id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', - 'name': 'cg1', - 'status': 'OK', - } - - cgsnapshot = { - 'id': '192eb39b-6c2f-420c-bae3-3cfd117f9876', - 'consistencygroup_id': group['id'], - 'group_id': None, } - - cgsnapshot_as_group_id = { - 'id': '192eb39b-6c2f-420c-bae3-3cfd117f9876', - 'consistencygroup_id': None, - 'group_id': group['id'], } - - -class BaseXtremIODriverTestCase(test.TestCase): - def __init__(self, *args, **kwargs): - super(BaseXtremIODriverTestCase, self).__init__(*args, **kwargs) - self.config = mock.Mock(san_login='', - san_password='', - san_ip='', - xtremio_cluster_name='brick1', - xtremio_provisioning_factor=20.0, - max_over_subscription_ratio=20.0, - xtremio_volumes_per_glance_cache=100, - driver_ssl_cert_verify=True, - driver_ssl_cert_path= '/test/path/root_ca.crt', - xtremio_array_busy_retry_count=5, - xtremio_array_busy_retry_interval=5) - - def safe_get(key): - return getattr(self.config, key) - self.config.safe_get = safe_get - - def setUp(self): - super(BaseXtremIODriverTestCase, self).setUp() - clean_xms_data() - - self.driver = xtremio.XtremIOISCSIDriver(configuration=self.config) - self.driver.client = xtremio.XtremIOClient42(self.config, - self.config - .xtremio_cluster_name) - self.data = CommonData() - - -@mock.patch('cinder.volume.drivers.dell_emc.xtremio.XtremIOClient.req') -class XtremIODriverISCSITestCase(BaseXtremIODriverTestCase): - # ##### SetUp Check ##### - def test_check_for_setup_error(self, req): - req.side_effect = xms_request - self.driver.check_for_setup_error() - self.assertEqual(self.driver.client.__class__.__name__, - 'XtremIOClient42') - - def test_fail_check_for_setup_error(self, req): - req.side_effect = xms_request - clusters = xms_data.pop('clusters') - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - xms_data['clusters'] = clusters - - def test_check_for_setup_error_ver4(self, req): - req.side_effect = xms_request - xms_data['xms'][1]['sw-version'] = '4.0.10-34.hotfix1' - self.driver.check_for_setup_error() - self.assertEqual(self.driver.client.__class__.__name__, - 'XtremIOClient4') - - def test_fail_check_for_array_version(self, req): - req.side_effect = xms_request - cluster = xms_data['clusters'][1] - ver = cluster['sys-sw-version'] - cluster['sys-sw-version'] = '2.0.0-test' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - cluster['sys-sw-version'] = ver - - def test_client4_uses_v2(self, req): - def base_req(*args, **kwargs): - self.assertIn('v2', args) - req.side_effect = base_req - self.driver.client.req('volumes') - - def test_get_stats(self, req): - req.side_effect = xms_request - stats = self.driver.get_volume_stats(True) - self.assertEqual(self.driver.backend_name, - stats['volume_backend_name']) - -# ##### Volumes ##### - def test_create_volume_with_cg(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - - def test_extend_volume(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.extend_volume(self.data.test_volume, 5) - - def test_fail_extend_volume(self, req): - req.side_effect = xms_request - self.assertRaises(exception.VolumeDriverException, - self.driver.extend_volume, self.data.test_volume, 5) - - def test_delete_volume(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.delete_volume(self.data.test_volume) - - def test_duplicate_volume(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.data.test_volume) - -# ##### Snapshots ##### - def test_create_snapshot(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_snapshot(self.data.test_snapshot) - self.assertEqual(self.data.test_snapshot['id'], - xms_data['volumes'][2]['name']) - - def test_create_delete_snapshot(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_snapshot(self.data.test_snapshot) - self.assertEqual(self.data.test_snapshot['id'], - xms_data['volumes'][2]['name']) - self.driver.delete_snapshot(self.data.test_snapshot) - - def test_failed_rename_snapshot(self, req): - req.side_effect = xms_failed_rename_snapshot_request - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.data.test_snapshot) - self.assertEqual(0, len(xms_data['volumes'])) - - def test_volume_from_snapshot(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {} - self.driver.create_volume(self.data.test_volume) - self.driver.create_snapshot(self.data.test_snapshot) - self.driver.create_volume_from_snapshot(self.data.test_volume2, - self.data.test_snapshot) - -# ##### Clone Volume ##### - def test_clone_volume(self, req): - req.side_effect = xms_request - self.driver.db = mock.Mock() - (self.driver.db. - image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() - self.driver.create_volume(self.data.test_volume) - xms_data['volumes'][1]['num-of-dest-snaps'] = 50 - self.driver.create_cloned_volume(self.data.test_clone, - self.data.test_volume) - - def test_clone_volume_exceed_conf_limit(self, req): - req.side_effect = xms_request - self.driver.db = mock.Mock() - (self.driver.db. - image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() - self.driver.create_volume(self.data.test_volume) - xms_data['volumes'][1]['num-of-dest-snaps'] = 200 - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - self.data.test_clone, - self.data.test_volume) - - @mock.patch.object(xtremio.XtremIOClient4, 'create_snapshot') - def test_clone_volume_exceed_array_limit(self, create_snap, req): - create_snap.side_effect = exception.XtremIOSnapshotsLimitExceeded() - req.side_effect = xms_request - self.driver.db = mock.Mock() - (self.driver.db. - image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() - self.driver.create_volume(self.data.test_volume) - xms_data['volumes'][1]['num-of-dest-snaps'] = 50 - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - self.data.test_clone, - self.data.test_volume) - - def test_clone_volume_too_many_snaps(self, req): - req.side_effect = xms_request - response = mock.MagicMock() - response.status_code = 400 - response.json.return_value = { - "message": "too_many_snapshots_per_vol", - "error_code": 400 - } - self.assertRaises(exception.XtremIOSnapshotsLimitExceeded, - self.driver.client.handle_errors, - response, '', '') - - def test_clone_volume_too_many_objs(self, req): - req.side_effect = xms_request - response = mock.MagicMock() - response.status_code = 400 - response.json.return_value = { - "message": "too_many_objs", - "error_code": 400 - } - self.assertRaises(exception.XtremIOSnapshotsLimitExceeded, - self.driver.client.handle_errors, - response, '', '') - - def test_update_migrated_volume(self, req): - original = self.data.test_volume - new = self.data.test_volume2 - update = (self.driver. - update_migrated_volume({}, - original, new, 'available')) - req.assert_called_once_with('volumes', 'PUT', - {'name': original['id']}, new['id'], - None, 'v2') - self.assertEqual({'_name_id': None, - 'provider_location': None}, update) - - def test_update_migrated_volume_failed_rename(self, req): - req.side_effect = exception.VolumeBackendAPIException( - data='failed rename') - original = self.data.test_volume - new = copy.deepcopy(self.data.test_volume2) - fake_provider = '__provider' - new['provider_location'] = fake_provider - new['_name_id'] = None - update = (self.driver. - update_migrated_volume({}, - original, new, 'available')) - self.assertEqual({'_name_id': new['id'], - 'provider_location': fake_provider}, - update) - - def test_clone_volume_and_resize(self, req): - req.side_effect = xms_request - self.driver.db = mock.Mock() - (self.driver.db. - image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() - self.driver.create_volume(self.data.test_volume) - vol = xms_data['volumes'][1] - vol['num-of-dest-snaps'] = 0 - clone = self.data.test_clone.copy() - clone['size'] = 2 - with mock.patch.object(self.driver, - 'extend_volume') as extend: - self.driver.create_cloned_volume(clone, self.data.test_volume) - extend.assert_called_once_with(clone, clone['size']) - - def test_clone_volume_and_resize_fail(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - vol = xms_data['volumes'][1] - - def failed_extend(obj_type='volumes', method='GET', data=None, - *args, **kwargs): - if method == 'GET': - return {'content': vol} - elif method == 'POST': - return {'links': [{'href': 'volume/2'}]} - elif method == 'PUT': - if 'name' in data: - return - raise exception.VolumeBackendAPIException('Failed Clone') - - req.side_effect = failed_extend - self.driver.db = mock.Mock() - (self.driver.db. - image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() - vol['num-of-dest-snaps'] = 0 - clone = self.data.test_clone.copy() - clone['size'] = 2 - with mock.patch.object(self.driver, - 'delete_volume') as delete: - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - clone, - self.data.test_volume) - self.assertTrue(delete.called) - -# ##### Connection ##### - def test_no_portals_configured(self, req): - req.side_effect = xms_request - portals = xms_data['iscsi-portals'].copy() - xms_data['iscsi-portals'].clear() - lunmap = {'lun': 4} - self.assertRaises(exception.VolumeDriverException, - self.driver._get_iscsi_properties, lunmap) - xms_data['iscsi-portals'] = portals - - def test_initialize_connection(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_volume(self.data.test_volume2) - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertEqual(1, map_data['data']['target_lun']) - - def test_initialize_connection_existing_ig(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_volume(self.data.test_volume2) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - i1 = xms_data['initiators'][1] - i1['ig-id'] = ['', i1['ig-id'], 1] - i1['chap-authentication-initiator-password'] = 'chap_password1' - i1['chap-discovery-initiator-password'] = 'chap_password2' - self.driver.initialize_connection(self.data.test_volume2, - self.data.connector) - - def test_terminate_connection(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_volume(self.data.test_volume2) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - - def test_terminate_connection_fail_on_bad_volume(self, req): - req.side_effect = xms_request - self.assertRaises(exception.NotFound, - self.driver.terminate_connection, - self.data.test_volume, - self.data.connector) - - def test_get_ig_indexes_from_initiators_called_once(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - i1 = xms_data['initiators'][1] - i1['ig-id'] = ['', i1['ig-id'], 1] - self.assertEqual(1, map_data['data']['target_lun']) - - with mock.patch.object(self.driver, - '_get_ig_indexes_from_initiators') as get_idx: - get_idx.return_value = [1] - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - get_idx.assert_called_once_with(self.data.connector) - - def test_initialize_connection_after_enabling_chap(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_volume(self.data.test_volume2) - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertIsNone(map_data['data'].get('access_mode')) - c1 = xms_data['clusters'][1] - c1['chap-authentication-mode'] = 'initiator' - c1['chap-discovery-mode'] = 'initiator' - i1 = xms_data['initiators'][1] - i1['ig-id'] = ['', i1['ig-id'], 1] - i1['chap-authentication-initiator-password'] = 'chap_password1' - i1['chap-discovery-initiator-password'] = 'chap_password2' - map_data = self.driver.initialize_connection(self.data.test_volume2, - self.data.connector) - self.assertEqual('chap_password1', map_data['data']['auth_password']) - self.assertEqual('chap_password2', - map_data['data']['discovery_auth_password']) - - def test_initialize_connection_after_disabling_chap(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.create_volume(self.data.test_volume2) - c1 = xms_data['clusters'][1] - c1['chap-authentication-mode'] = 'initiator' - c1['chap-discovery-mode'] = 'initiator' - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - i1 = xms_data['initiators'][1] - i1['ig-id'] = ['', i1['ig-id'], 1] - i1['chap-authentication-initiator-password'] = 'chap_password1' - i1['chap-discovery-initiator-password'] = 'chap_password2' - i1['chap-authentication-initiator-password'] = None - i1['chap-discovery-initiator-password'] = None - self.driver.initialize_connection(self.data.test_volume2, - self.data.connector) - - @mock.patch('oslo_utils.strutils.mask_dict_password') - def test_initialize_connection_masks_password(self, mask_dict, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertTrue(mask_dict.called) - - def test_add_auth(self, req): - req.side_effect = xms_request - data = {} - self.driver._add_auth(data, True, True) - self.assertIn('initiator-discovery-user-name', data, - 'Missing discovery user in data') - self.assertIn('initiator-discovery-password', data, - 'Missing discovery password in data') - - def test_initialize_connection_bad_ig(self, req): - req.side_effect = xms_bad_request - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - self.driver.delete_volume(self.data.test_volume) - -# ##### Manage Volumes ##### - def test_manage_volume(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {1: {'name': 'unmanaged1', - 'index': 1, - 'vol-size': '3', - }, - } - ref_vol = {"source-name": "unmanaged1"} - self.driver.manage_existing(self.data.test_volume, ref_vol) - - def test_failed_manage_volume(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {1: {'name': 'unmanaged1', - 'index': 1, - 'vol-size': '3', - }, - } - invalid_ref = {"source-name": "invalid"} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.data.test_volume, invalid_ref) - - def test_get_manage_volume_size(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {1: {'name': 'unmanaged1', - 'index': 1, - 'vol-size': '1000000', - }, - } - ref_vol = {"source-name": "unmanaged1"} - size = self.driver.manage_existing_get_size(self.data.test_volume, - ref_vol) - self.assertEqual(1, size) - - def test_manage_volume_size_invalid_input(self, req): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - self.data.test_volume, {}) - - def test_failed_manage_volume_size(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {1: {'name': 'unmanaged1', - 'index': 1, - 'vol-size': '3', - }, - } - invalid_ref = {"source-name": "invalid"} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - self.data.test_volume, invalid_ref) - - def test_unmanage_volume(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.unmanage(self.data.test_volume) - - def test_failed_unmanage_volume(self, req): - req.side_effect = xms_request - self.assertRaises(exception.VolumeNotFound, self.driver.unmanage, - self.data.test_volume2) - - def test_manage_snapshot(self, req): - req.side_effect = xms_request - vol_uid = self.data.test_snapshot.volume_id - xms_data['volumes'] = {1: {'name': vol_uid, - 'index': 1, - 'vol-size': '3', - }, - 2: {'name': 'unmanaged', - 'index': 2, - 'ancestor-vol-id': ['', vol_uid, 1], - 'vol-size': '3'} - } - ref_vol = {"source-name": "unmanaged"} - self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) - - def test_get_manage_snapshot_size(self, req): - req.side_effect = xms_request - vol_uid = self.data.test_snapshot.volume_id - xms_data['volumes'] = {1: {'name': vol_uid, - 'index': 1, - 'vol-size': '3', - }, - 2: {'name': 'unmanaged', - 'index': 2, - 'ancestor-vol-id': ['', vol_uid, 1], - 'vol-size': '3'} - } - ref_vol = {"source-name": "unmanaged"} - self.driver.manage_existing_snapshot_get_size(self.data.test_snapshot, - ref_vol) - - def test_manage_snapshot_invalid_snapshot(self, req): - req.side_effect = xms_request - xms_data['volumes'] = {1: {'name': 'unmanaged1', - 'index': 1, - 'vol-size': '3', - 'ancestor-vol-id': []} - } - ref_vol = {"source-name": "unmanaged1"} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.data.test_snapshot, ref_vol) - - def test_unmanage_snapshot(self, req): - req.side_effect = xms_request - vol_uid = self.data.test_snapshot.volume_id - xms_data['volumes'] = {1: {'name': vol_uid, - 'index': 1, - 'vol-size': '3', - }, - 2: {'name': 'unmanaged', - 'index': 2, - 'ancestor-vol-id': ['', vol_uid, 1], - 'vol-size': '3'} - } - ref_vol = {"source-name": "unmanaged"} - self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) - self.driver.unmanage_snapshot(self.data.test_snapshot) - -# ##### Consistancy Groups ##### - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_create(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - - self.driver.create_consistencygroup(d.context, d.group) - self.assertEqual(1, len(xms_data['consistency-groups'])) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_update(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - - self.driver.create_consistencygroup(d.context, d.group) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - self.assertEqual(2, len(xms_data['consistency-group-volumes'])) - self.driver.update_consistencygroup(d.context, d.group, - remove_volumes=[d.test_volume2]) - self.assertEqual(1, len(xms_data['consistency-group-volumes'])) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_create_cg(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - self.driver.create_consistencygroup(d.context, d.group) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - self.driver.db = mock.Mock() - (self.driver.db. - volume_get_all_by_group.return_value) = [mock.MagicMock()] - res = self.driver.create_cgsnapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.assertEqual((None, None), res) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_delete(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - self.driver.create_consistencygroup(d.context, d.group) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - self.driver.db = mock.Mock() - self.driver.create_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) - self.driver.delete_consistencygroup(d.context, d.group, []) - - def test_cg_delete_with_volume(self, req): - req.side_effect = xms_request - d = self.data - self.driver.create_consistencygroup(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume]) - self.driver.db = mock.Mock() - - results, volumes = \ - self.driver.delete_consistencygroup(d.context, - d.group, - [d.test_volume]) - - self.assertTrue(all(volume['status'] == 'deleted' for volume in - volumes)) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_snapshot(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - self.driver.create_consistencygroup(d.context, d.group) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - self.assertEqual(snapset_name, - '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' - '33cfd117f9876') - snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], - 'consistencygroup_id': d.group['id'], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - res = self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.assertEqual((None, None), res) - - def test_delete_cgsnapshot(self, req): - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - req.assert_called_once_with('snapshot-sets', 'DELETE', None, - '192eb39b6c2f420cbae33cfd117f0345192eb39' - 'b6c2f420cbae33cfd117f9876', None, 'v2') - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_from_src_snapshot(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - snapshot_obj.volume_id = d.test_volume['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - - self.driver.create_consistencygroup(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) - xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] - ['vol-id']) - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - - snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - cg_obj = fake_cg.fake_consistencyobject_obj(d.context) - new_vol1 = fake_volume_obj(d.context) - snapshot1 = (fake_snapshot - .fake_snapshot_obj - (d.context, volume_id=d.test_volume['id'])) - res = self.driver.create_consistencygroup_from_src(d.context, cg_obj, - [new_vol1], - d.cgsnapshot, - [snapshot1]) - self.assertEqual((None, None), res) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_cg_from_src_cg(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - snapshot_obj.volume_id = d.test_volume['id'] - get_all_for_cgsnapshot.return_value = [snapshot_obj] - - self.driver.create_consistencygroup(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) - xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] - ['vol-id']) - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - - snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - cg_obj = fake_cg.fake_consistencyobject_obj(d.context) - new_vol1 = fake_volume_obj(d.context) - new_cg_obj = fake_cg.fake_consistencyobject_obj( - d.context, id=fake.CONSISTENCY_GROUP2_ID) - snapset2_name = new_cg_obj.id - new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001' - new_vol2 = fake_volume_obj(d.context) - snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset2_name, - 'index': 1} - xms_data['snapshot-sets'].update({5: snapset2, - snapset2_name: snapset2}) - self.driver.create_consistencygroup_from_src(d.context, new_cg_obj, - [new_vol2], - None, None, - cg_obj, [new_vol1]) - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') - def test_invalid_cg_from_src_input(self, get_all_for_cgsnapshot, req): - req.side_effect = xms_request - d = self.data - - self.assertRaises(exception.InvalidInput, - self.driver.create_consistencygroup_from_src, - d.context, d.group, [], None, None, None, None) - -# #### Groups #### - def test_group_create(self, req): - """Test group create.""" - - req.side_effect = xms_request - d = self.data - - self.driver.create_group(d.context, d.group) - self.assertEqual(1, len(xms_data['consistency-groups'])) - - def test_group_update(self, req): - """Test group update.""" - - req.side_effect = xms_request - d = self.data - - self.driver.create_consistencygroup(d.context, d.group) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - self.assertEqual(2, len(xms_data['consistency-group-volumes'])) - self.driver.update_group(d.context, d.group, - remove_volumes=[d.test_volume2]) - self.assertEqual(1, len(xms_data['consistency-group-volumes'])) - - def test_create_group_snapshot(self, req): - """Test create group snapshot.""" - - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - - self.driver.create_group(d.context, d.group) - self.driver.update_group(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - - res = self.driver.create_group_snapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.assertEqual((None, None), res) - - def test_group_delete(self, req): - """"Test delete group.""" - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.create_group(d.context, d.group) - self.driver.update_group(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - self.driver.db = mock.Mock() - (self.driver.db. - volume_get_all_by_group.return_value) = [mock.MagicMock()] - self.driver.create_group_snapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.driver.delete_group(d.context, d.group, []) - - def test_group_delete_with_volume(self, req): - req.side_effect = xms_request - d = self.data - self.driver.create_consistencygroup(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.update_consistencygroup(d.context, d.group, - add_volumes=[d.test_volume]) - self.driver.db = mock.Mock() - - results, volumes = \ - self.driver.delete_group(d.context, d.group, [d.test_volume]) - - self.assertTrue(all(volume['status'] == 'deleted' for volume in - volumes)) - - def test_group_snapshot(self, req): - """test group snapshot.""" - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.create_group(d.context, d.group) - self.driver.update_group(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - self.assertEqual(snapset_name, - '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' - '33cfd117f9876') - snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], - 'consistencygroup_id': d.group['id'], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - res = self.driver.delete_group_snapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.assertEqual((None, None), res) - - def test_group_snapshot_with_generic_group(self, req): - """test group snapshot shot with generic group .""" - req.side_effect = xms_request - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.create_group(d.context, d.group) - self.driver.update_group(d.context, d.group, - add_volumes=[d.test_volume, - d.test_volume2]) - - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot_as_group_id) - self.assertEqual(snapset_name, - '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' - '33cfd117f9876') - snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], - 'consistencygroup_id': d.group['id'], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - res = self.driver.delete_group_snapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - self.assertEqual((None, None), res) - - def test_delete_group_snapshot(self, req): - """test delete group snapshot.""" - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.delete_group_snapshot(d.context, d.cgsnapshot, - [snapshot_obj]) - req.assert_called_once_with('snapshot-sets', 'DELETE', None, - '192eb39b6c2f420cbae33cfd117f0345192eb39' - 'b6c2f420cbae33cfd117f9876', None, 'v2') - - def test_delete_group_snapshot_with_generic_group(self, req): - """test delete group snapshot.""" - d = self.data - snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) - snapshot_obj.consistencygroup_id = d.group['id'] - self.driver.delete_group_snapshot(d.context, d.cgsnapshot_as_group_id, - [snapshot_obj]) - req.assert_called_once_with('snapshot-sets', 'DELETE', None, - '192eb39b6c2f420cbae33cfd117f0345192eb39' - 'b6c2f420cbae33cfd117f9876', None, 'v2') - - def test_group_from_src_snapshot(self, req): - """test group from source snapshot.""" - req.side_effect = xms_request - d = self.data - - self.driver.create_group(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.create_group_snapshot(d.context, d.cgsnapshot, []) - xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] - ['vol-id']) - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - - snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - cg_obj = fake_cg.fake_consistencyobject_obj(d.context) - new_vol1 = fake_volume_obj(d.context) - snapshot1 = (fake_snapshot - .fake_snapshot_obj - (d.context, volume_id=d.test_volume['id'])) - res = self.driver.create_group_from_src(d.context, cg_obj, - [new_vol1], - d.cgsnapshot, - [snapshot1]) - self.assertEqual((None, None), res) - - def test_group_from_src_group(self, req): - """test group from source group.""" - req.side_effect = xms_request - d = self.data - - self.driver.create_group(d.context, d.group) - self.driver.create_volume(d.test_volume) - self.driver.create_group_snapshot(d.context, d.cgsnapshot, []) - xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] - ['vol-id']) - snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) - - snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset_name, - 'index': 1} - xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} - cg_obj = fake_cg.fake_consistencyobject_obj(d.context) - new_vol1 = fake_volume_obj(d.context) - new_cg_obj = fake_cg.fake_consistencyobject_obj( - d.context, id=fake.CONSISTENCY_GROUP2_ID) - snapset2_name = new_cg_obj.id - new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001' - new_vol2 = fake_volume_obj(d.context) - snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']], - 'name': snapset2_name, - 'index': 1} - xms_data['snapshot-sets'].update({5: snapset2, - snapset2_name: snapset2}) - self.driver.create_group_from_src(d.context, new_cg_obj, - [new_vol2], - None, None, - cg_obj, [new_vol1]) - - def test_invalid_group_from_src_input(self, req): - """test invalid group from source.""" - req.side_effect = xms_request - d = self.data - - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - d.context, d.group, [], None, None, None, None) - - -@mock.patch('requests.request') -class XtremIODriverTestCase(BaseXtremIODriverTestCase): - # ##### XMS Client ##### - @mock.patch.object(time, 'sleep', mock.Mock(return_value=0)) - def test_retry_request(self, req): - busy_response = mock.MagicMock() - busy_response.status_code = 400 - busy_response.json.return_value = { - "message": "system_is_busy", - "error_code": 400 - } - good_response = mock.MagicMock() - good_response.status_code = 200 - - XtremIODriverTestCase.req_count = 0 - - def busy_request(*args, **kwargs): - if XtremIODriverTestCase.req_count < 1: - XtremIODriverTestCase.req_count += 1 - return busy_response - return good_response - - req.side_effect = busy_request - self.driver.create_volume(self.data.test_volume) - - def test_verify_cert(self, req): - good_response = mock.MagicMock() - good_response.status_code = 200 - - def request_verify_cert(*args, **kwargs): - self.assertEqual(kwargs['verify'], '/test/path/root_ca.crt') - return good_response - - req.side_effect = request_verify_cert - self.driver.client.req('volumes') - - -@mock.patch('cinder.volume.drivers.dell_emc.xtremio.XtremIOClient.req') -class XtremIODriverFCTestCase(BaseXtremIODriverTestCase): - def setUp(self): - super(XtremIODriverFCTestCase, self).setUp() - self.driver = xtremio.XtremIOFCDriver( - configuration=self.config) - -# ##### Connection FC##### - def test_initialize_connection(self, req): - req.side_effect = xms_request - - self.driver.create_volume(self.data.test_volume) - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertEqual(1, map_data['data']['target_lun']) - - def test_terminate_connection(self, req): - req.side_effect = xms_request - - self.driver.create_volume(self.data.test_volume) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - for i1 in xms_data['initiators'].values(): - i1['ig-id'] = ['', i1['ig-id'], 1] - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - - def test_force_terminate_connection(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - vol1 = xms_data['volumes'][1] - # lun mapping list is a list of triplets (IG OID, TG OID, lun number) - vol1['lun-mapping-list'] = [[['a91e8c81c2d14ae4865187ce4f866f8a', - 'iqn.1993-08.org.debian:01:222', - 1], - ['', 'Default', 1], - 1]] - self.driver.terminate_connection(self.data.test_volume, None) - - def test_initialize_existing_ig_connection(self, req): - req.side_effect = xms_request - self.driver.create_volume(self.data.test_volume) - - pre_existing = 'pre_existing_host' - self.driver._create_ig(pre_existing) - wwpns = self.driver._get_initiator_names(self.data.connector) - for wwpn in wwpns: - data = {'initiator-name': wwpn, 'ig-id': pre_existing, - 'port-address': wwpn} - self.driver.client.req('initiators', 'POST', data) - - def get_fake_initiator(wwpn): - return {'port-address': wwpn, 'ig-id': ['', pre_existing, 1]} - with mock.patch.object(self.driver.client, 'get_initiator', - side_effect=get_fake_initiator): - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertEqual(1, map_data['data']['target_lun']) - self.assertEqual(1, len(xms_data['initiator-groups'])) - - def test_get_initiator_igs_ver4(self, req): - req.side_effect = xms_request - wwpn1 = '11:22:33:44:55:66:77:88' - wwpn2 = '11:22:33:44:55:66:77:89' - port_addresses = [wwpn1, wwpn2] - ig_id = ['', 'my_ig', 1] - self.driver.client = xtremio.XtremIOClient4(self.config, - self.config - .xtremio_cluster_name) - - def get_fake_initiator(wwpn): - return {'port-address': wwpn, 'ig-id': ig_id} - with mock.patch.object(self.driver.client, 'get_initiator', - side_effect=get_fake_initiator): - self.driver.client.get_initiators_igs(port_addresses) - - def test_get_free_lun(self, req): - def lm_response(*args, **kwargs): - return {'lun-maps': [{'lun': 1}]} - req.side_effect = lm_response - - ig_names = ['test1', 'test2'] - self.driver._get_free_lun(ig_names) - - def test_race_on_terminate_connection(self, req): - """Test for race conditions on num_of_mapped_volumes. - - This test confirms that num_of_mapped_volumes won't break even if we - receive a NotFound exception when retrieving info on a specific - mapping, as that specific mapping could have been deleted between - the request to get the list of exiting mappings and the request to get - the info on one of them. - """ - req.side_effect = xms_request - self.driver.client = xtremio.XtremIOClient3( - self.config, self.config.xtremio_cluster_name) - # We'll wrap num_of_mapped_volumes, we'll store here original method - original_method = self.driver.client.num_of_mapped_volumes - - def fake_num_of_mapped_volumes(*args, **kwargs): - # Add a nonexistent mapping - mappings = [{'href': 'volumes/1'}, {'href': 'volumes/12'}] - - # Side effects will be: 1st call returns the list, then we return - # data for existing mappings, and on the nonexistent one we added - # we return NotFound - side_effect = [{'lun-maps': mappings}, - {'content': xms_data['lun-maps'][1]}, - exception.NotFound] - - with mock.patch.object(self.driver.client, 'req', - side_effect=side_effect): - return original_method(*args, **kwargs) - - self.driver.create_volume(self.data.test_volume) - map_data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.assertEqual(1, map_data['data']['target_lun']) - with mock.patch.object(self.driver.client, 'num_of_mapped_volumes', - side_effect=fake_num_of_mapped_volumes): - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - self.driver.delete_volume(self.data.test_volume) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/fake_exception.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/fake_exception.py deleted file mode 100644 index 3f7f3fcce..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/unity/fake_exception.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class StoropsException(Exception): - message = 'Storops Error.' - - -class UnityLunNameInUseError(StoropsException): - pass - - -class UnityResourceNotFoundError(StoropsException): - pass - - -class UnitySnapNameInUseError(StoropsException): - pass - - -class UnityDeleteAttachedSnapError(StoropsException): - pass - - -class UnityResourceAlreadyAttachedError(StoropsException): - pass - - -class UnityPolicyNameInUseError(StoropsException): - pass - - -class UnityNothingToModifyError(StoropsException): - pass - - -class UnityThinCloneLimitExceededError(StoropsException): - pass - - -class ExtendLunError(Exception): - pass - - -class DetachIsCalled(Exception): - pass - - -class LunDeleteIsCalled(Exception): - pass - - -class SnapDeleteIsCalled(Exception): - pass - - -class UnexpectedLunDeletion(Exception): - pass - - -class AdapterSetupError(Exception): - pass diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_adapter.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/test_adapter.py deleted file mode 100644 index e353c2cad..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_adapter.py +++ /dev/null @@ -1,845 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import functools -import unittest - -import mock -from oslo_utils import units - -from cinder import exception -from cinder.tests.unit.volume.drivers.dell_emc.unity \ - import fake_exception as ex -from cinder.tests.unit.volume.drivers.dell_emc.unity import test_client -from cinder.volume.drivers.dell_emc.unity import adapter - - -######################## -# -# Start of Mocks -# -######################## -class MockConfig(object): - def __init__(self): - self.config_group = 'test_backend' - self.unity_storage_pool_names = ['pool1', 'pool2'] - self.unity_io_ports = None - self.reserved_percentage = 5 - self.max_over_subscription_ratio = 300 - self.volume_backend_name = 'backend' - self.san_ip = '1.2.3.4' - self.san_login = 'user' - self.san_password = 'pass' - self.driver_ssl_cert_verify = False - self.driver_ssl_cert_path = None - - def safe_get(self, name): - return getattr(self, name) - - -class MockConnector(object): - @staticmethod - def disconnect_volume(data, device): - pass - - -class MockDriver(object): - def __init__(self): - self.configuration = mock.Mock(volume_dd_blocksize='1M') - - @staticmethod - def _connect_device(conn): - return {'connector': MockConnector(), - 'device': {'path': 'dev'}, - 'conn': {'data': {}}} - - -class MockClient(object): - def __init__(self): - self._system = test_client.MockSystem() - - @staticmethod - def get_pools(): - return test_client.MockResourceList(['pool0', 'pool1']) - - @staticmethod - def create_lun(name, size, pool, description=None, io_limit_policy=None): - return test_client.MockResource(_id=name, name=name) - - @staticmethod - def get_lun(name=None, lun_id=None): - if lun_id is None: - lun_id = 'lun_4' - if lun_id in ('lun_43',): # for thin clone cases - return test_client.MockResource(_id=lun_id, name=name) - if name == 'not_exists': - ret = test_client.MockResource(name=lun_id) - ret.existed = False - else: - if name is None: - name = lun_id - ret = test_client.MockResource(_id=lun_id, name=name) - return ret - - @staticmethod - def delete_lun(lun_id): - if lun_id != 'lun_4': - raise ex.UnexpectedLunDeletion() - - @staticmethod - def get_serial(): - return 'CLIENT_SERIAL' - - @staticmethod - def create_snap(src_lun_id, name=None): - if src_lun_id in ('lun_53', 'lun_55'): # for thin clone cases - return test_client.MockResource( - _id='snap_clone_{}'.format(src_lun_id)) - return test_client.MockResource(name=name, _id=src_lun_id) - - @staticmethod - def get_snap(name=None): - if name in ('snap_50',): # for thin clone cases - return name - snap = test_client.MockResource(name=name, _id=name) - if name is not None: - ret = snap - else: - ret = [snap] - return ret - - @staticmethod - def delete_snap(snap): - if snap.name in ('abc-def_snap',): - raise ex.SnapDeleteIsCalled() - - @staticmethod - def create_host(name, uids): - return test_client.MockResource(name=name) - - @staticmethod - def get_host(name): - return test_client.MockResource(name=name) - - @staticmethod - def attach(host, lun_or_snap): - return 10 - - @staticmethod - def detach(host, lun_or_snap): - error_ids = ['lun_43', 'snap_0'] - if host.name == 'host1' and lun_or_snap.get_id() in error_ids: - raise ex.DetachIsCalled() - - @staticmethod - def get_iscsi_target_info(allowed_ports=None): - return [{'portal': '1.2.3.4:1234', 'iqn': 'iqn.1-1.com.e:c.a.a0'}, - {'portal': '1.2.3.5:1234', 'iqn': 'iqn.1-1.com.e:c.a.a1'}] - - @staticmethod - def get_fc_target_info(host=None, logged_in_only=False, - allowed_ports=None): - if host and host.name == 'no_target': - ret = [] - else: - ret = ['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'] - return ret - - @staticmethod - def create_lookup_service(): - return {} - - @staticmethod - def get_io_limit_policy(specs): - return None - - @staticmethod - def extend_lun(lun_id, size_gib): - if size_gib <= 0: - raise ex.ExtendLunError - - @staticmethod - def get_fc_ports(): - return test_client.MockResourceList(ids=['spa_iom_0_fc0', - 'spa_iom_0_fc1']) - - @staticmethod - def get_ethernet_ports(): - return test_client.MockResourceList(ids=['spa_eth0', 'spb_eth0']) - - @staticmethod - def thin_clone(obj, name, io_limit_policy, description, new_size_gb): - if (obj.name, name) in ( - ('snap_61', 'lun_60'), ('lun_63', 'lun_60')): - return test_client.MockResource(_id=name) - else: - raise ex.UnityThinCloneLimitExceededError - - @property - def system(self): - return self._system - - -class MockLookupService(object): - @staticmethod - def get_device_mapping_from_network(initiator_wwns, target_wwns): - return { - 'san_1': { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'), - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121') - } - } - - -class MockOSResource(mock.Mock): - def __init__(self, *args, **kwargs): - super(MockOSResource, self).__init__(*args, **kwargs) - if 'name' in kwargs: - self.name = kwargs['name'] - - -def mock_adapter(driver_clz): - ret = driver_clz() - ret._client = MockClient() - with mock.patch('cinder.volume.drivers.dell_emc.unity.adapter.' - 'CommonAdapter.validate_ports'), \ - patch_storops(): - ret.do_setup(MockDriver(), MockConfig()) - ret.lookup_service = MockLookupService() - return ret - - -def get_backend_qos_specs(volume): - return None - - -def get_connector_properties(): - return {'host': 'host1', 'wwpns': 'abcdefg'} - - -def get_lun_pl(name): - return 'id^%s|system^CLIENT_SERIAL|type^lun|version^None' % name - - -def get_snap_lun_pl(name): - return 'id^%s|system^CLIENT_SERIAL|type^snap_lun|version^None' % name - - -def get_snap_pl(name): - return 'id^%s|system^CLIENT_SERIAL|type^snapshot|version^None' % name - - -def get_connector_uids(adapter, connector): - return [] - - -def get_connection_info(adapter, hlu, host, connector): - return {} - - -def patch_for_unity_adapter(func): - @functools.wraps(func) - @mock.patch('cinder.volume.drivers.dell_emc.unity.utils.' - 'get_backend_qos_specs', - new=get_backend_qos_specs) - @mock.patch('cinder.utils.brick_get_connector_properties', - new=get_connector_properties) - def func_wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return func_wrapper - - -def patch_for_concrete_adapter(clz_str): - def inner_decorator(func): - @functools.wraps(func) - @mock.patch('%s.get_connector_uids' % clz_str, - new=get_connector_uids) - @mock.patch('%s.get_connection_info' % clz_str, - new=get_connection_info) - def func_wrapper(*args, **kwargs): - return func(*args, **kwargs) - return func_wrapper - - return inner_decorator - - -patch_for_iscsi_adapter = patch_for_concrete_adapter( - 'cinder.volume.drivers.dell_emc.unity.adapter.ISCSIAdapter') - - -patch_for_fc_adapter = patch_for_concrete_adapter( - 'cinder.volume.drivers.dell_emc.unity.adapter.FCAdapter') - - -@contextlib.contextmanager -def patch_thin_clone(cloned_lun): - with mock.patch.object(adapter.CommonAdapter, '_thin_clone') as tc: - tc.return_value = cloned_lun - yield tc - - -@contextlib.contextmanager -def patch_dd_copy(copied_lun): - with mock.patch.object(adapter.CommonAdapter, '_dd_copy') as dd: - dd.return_value = copied_lun - yield dd - - -@contextlib.contextmanager -def patch_copy_volume(): - with mock.patch('cinder.volume.utils.copy_volume') as mocked: - yield mocked - - -@contextlib.contextmanager -def patch_storops(): - with mock.patch.object(adapter, 'storops') as storops: - storops.ThinCloneActionEnum = mock.Mock(DD_COPY='DD_COPY') - yield storops - - -class IdMatcher(object): - def __init__(self, obj): - self._obj = obj - - def __eq__(self, other): - return self._obj._id == other._id - - -######################## -# -# Start of Tests -# -######################## - -@mock.patch.object(adapter, 'storops_ex', new=ex) -class CommonAdapterTest(unittest.TestCase): - def setUp(self): - self.adapter = mock_adapter(adapter.CommonAdapter) - - def test_get_managed_pools(self): - ret = self.adapter.get_managed_pools() - self.assertIn('pool1', ret) - self.assertNotIn('pool0', ret) - self.assertNotIn('pool2', ret) - - @patch_for_unity_adapter - def test_create_volume(self): - volume = MockOSResource(name='lun_3', size=5, host='unity#pool1') - ret = self.adapter.create_volume(volume) - expected = get_lun_pl('lun_3') - self.assertEqual(expected, ret['provider_location']) - - def test_create_snapshot(self): - volume = MockOSResource(provider_location='id^lun_43') - snap = MockOSResource(volume=volume, name='abc-def_snap') - result = self.adapter.create_snapshot(snap) - self.assertEqual(get_snap_pl('lun_43'), result['provider_location']) - self.assertEqual('lun_43', result['provider_id']) - - def test_delete_snap(self): - def f(): - snap = MockOSResource(name='abc-def_snap') - self.adapter.delete_snapshot(snap) - - self.assertRaises(ex.SnapDeleteIsCalled, f) - - def test_get_lun_id_has_location(self): - volume = MockOSResource(provider_location='id^lun_43') - self.assertEqual('lun_43', self.adapter.get_lun_id(volume)) - - def test_get_lun_id_no_location(self): - volume = MockOSResource(provider_location=None) - self.assertEqual('lun_4', self.adapter.get_lun_id(volume)) - - def test_delete_volume(self): - volume = MockOSResource(provider_location='id^lun_4') - self.adapter.delete_volume(volume) - - def test_get_pool_stats(self): - stats_list = self.adapter.get_pools_stats() - self.assertEqual(1, len(stats_list)) - - stats = stats_list[0] - self.assertEqual('pool1', stats['pool_name']) - self.assertEqual(5, stats['total_capacity_gb']) - self.assertEqual('pool1|CLIENT_SERIAL', stats['location_info']) - self.assertEqual(6, stats['provisioned_capacity_gb']) - self.assertEqual(2, stats['free_capacity_gb']) - self.assertEqual(300, stats['max_over_subscription_ratio']) - self.assertEqual(5, stats['reserved_percentage']) - self.assertFalse(stats['thick_provisioning_support']) - self.assertTrue(stats['thin_provisioning_support']) - - def test_update_volume_stats(self): - stats = self.adapter.update_volume_stats() - self.assertEqual('backend', stats['volume_backend_name']) - self.assertEqual('unknown', stats['storage_protocol']) - self.assertTrue(stats['thin_provisioning_support']) - self.assertFalse(stats['thick_provisioning_support']) - self.assertEqual(1, len(stats['pools'])) - - def test_serial_number(self): - self.assertEqual('CLIENT_SERIAL', self.adapter.serial_number) - - def test_do_setup(self): - self.assertEqual('1.2.3.4', self.adapter.ip) - self.assertEqual('user', self.adapter.username) - self.assertEqual('pass', self.adapter.password) - self.assertFalse(self.adapter.array_cert_verify) - self.assertIsNone(self.adapter.array_ca_cert_path) - - def test_do_setup_version_before_4_1(self): - def f(): - with mock.patch('cinder.volume.drivers.dell_emc.unity.adapter.' - 'CommonAdapter.validate_ports'): - self.adapter._client.system.system_version = '4.0.0' - self.adapter.do_setup(self.adapter.driver, MockConfig()) - self.assertRaises(exception.VolumeBackendAPIException, f) - - def test_verify_cert_false_path_none(self): - self.adapter.array_cert_verify = False - self.adapter.array_ca_cert_path = None - self.assertFalse(self.adapter.verify_cert) - - def test_verify_cert_false_path_not_none(self): - self.adapter.array_cert_verify = False - self.adapter.array_ca_cert_path = '/tmp/array_ca.crt' - self.assertFalse(self.adapter.verify_cert) - - def test_verify_cert_true_path_none(self): - self.adapter.array_cert_verify = True - self.adapter.array_ca_cert_path = None - self.assertTrue(self.adapter.verify_cert) - - def test_verify_cert_true_path_valide(self): - self.adapter.array_cert_verify = True - self.adapter.array_ca_cert_path = '/tmp/array_ca.crt' - self.assertEqual(self.adapter.array_ca_cert_path, - self.adapter.verify_cert) - - def test_terminate_connection_volume(self): - def f(): - volume = MockOSResource(provider_location='id^lun_43', id='id_43') - connector = {'host': 'host1'} - self.adapter.terminate_connection(volume, connector) - - self.assertRaises(ex.DetachIsCalled, f) - - def test_terminate_connection_snapshot(self): - def f(): - connector = {'host': 'host1'} - snap = MockOSResource(name='snap_0', id='snap_0') - self.adapter.terminate_connection_snapshot(snap, connector) - - self.assertRaises(ex.DetachIsCalled, f) - - def test_manage_existing_by_name(self): - ref = {'source-id': 12} - volume = MockOSResource(name='lun1') - ret = self.adapter.manage_existing(volume, ref) - expected = get_lun_pl('12') - self.assertEqual(expected, ret['provider_location']) - - def test_manage_existing_by_id(self): - ref = {'source-name': 'lunx'} - volume = MockOSResource(name='lun1') - ret = self.adapter.manage_existing(volume, ref) - expected = get_lun_pl('lun_4') - self.assertEqual(expected, ret['provider_location']) - - def test_manage_existing_invalid_ref(self): - def f(): - ref = {} - volume = MockOSResource(name='lun1') - self.adapter.manage_existing(volume, ref) - - self.assertRaises(exception.ManageExistingInvalidReference, f) - - def test_manage_existing_lun_not_found(self): - def f(): - ref = {'source-name': 'not_exists'} - volume = MockOSResource(name='lun1') - self.adapter.manage_existing(volume, ref) - - self.assertRaises(exception.ManageExistingInvalidReference, f) - - @patch_for_unity_adapter - def test_manage_existing_get_size_invalid_backend(self): - def f(): - volume = MockOSResource(volume_type_id='thin', - host='host@backend#pool1') - ref = {'source-id': 12} - self.adapter.manage_existing_get_size(volume, ref) - - self.assertRaises(exception.ManageExistingInvalidReference, f) - - @patch_for_unity_adapter - def test_manage_existing_get_size_success(self): - volume = MockOSResource(volume_type_id='thin', - host='host@backend#pool0') - ref = {'source-id': 12} - volume_size = self.adapter.manage_existing_get_size(volume, ref) - self.assertEqual(5, volume_size) - - @patch_for_unity_adapter - def test_create_volume_from_snapshot(self): - lun_id = 'lun_50' - volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1') - snap_id = 'snap_50' - snap = MockOSResource(name=snap_id) - with patch_thin_clone(test_client.MockResource(_id=lun_id)) as tc: - ret = self.adapter.create_volume_from_snapshot(volume, snap) - self.assertEqual(get_snap_lun_pl(lun_id), - ret['provider_location']) - tc.assert_called_with(adapter.VolumeParams(self.adapter, volume), - snap_id) - - @patch_for_unity_adapter - def test_create_cloned_volume_attached(self): - lun_id = 'lun_51' - src_lun_id = 'lun_53' - volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1') - src_vref = MockOSResource(id=src_lun_id, name=src_lun_id, - provider_location=get_lun_pl(src_lun_id), - volume_attachment=['not_care']) - with patch_dd_copy(test_client.MockResource(_id=lun_id)) as dd: - ret = self.adapter.create_cloned_volume(volume, src_vref) - dd.assert_called_with( - adapter.VolumeParams(self.adapter, volume), - IdMatcher(test_client.MockResource( - _id='snap_clone_{}'.format(src_lun_id))), - src_lun=IdMatcher(test_client.MockResource(_id=src_lun_id))) - self.assertEqual(get_lun_pl(lun_id), ret['provider_location']) - - @patch_for_unity_adapter - def test_create_cloned_volume_available(self): - lun_id = 'lun_54' - src_lun_id = 'lun_55' - volume = MockOSResource(id=lun_id, host='unity#pool1', size=3, - provider_location=get_lun_pl(lun_id)) - src_vref = MockOSResource(id=src_lun_id, name=src_lun_id, - provider_location=get_lun_pl(src_lun_id), - volume_attachment=None) - with patch_thin_clone(test_client.MockResource(_id=lun_id)) as tc: - ret = self.adapter.create_cloned_volume(volume, src_vref) - tc.assert_called_with( - adapter.VolumeParams(self.adapter, volume), - IdMatcher(test_client.MockResource( - _id='snap_clone_{}'.format(src_lun_id))), - src_lun=IdMatcher(test_client.MockResource(_id=src_lun_id))) - self.assertEqual(get_snap_lun_pl(lun_id), ret['provider_location']) - - @patch_for_unity_adapter - def test_dd_copy_with_src_lun(self): - lun_id = 'lun_56' - src_lun_id = 'lun_57' - src_snap_id = 'snap_57' - volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', - provider_location=get_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - src_lun = test_client.MockResource(name=src_lun_id, _id=src_lun_id) - src_lun.size_total = 6 * units.Gi - with patch_copy_volume() as copy_volume: - ret = self.adapter._dd_copy( - adapter.VolumeParams(self.adapter, volume), src_snap, - src_lun=src_lun) - copy_volume.assert_called_with('dev', 'dev', 6144, '1M', - sparse=True) - self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), - ret) - - @patch_for_unity_adapter - def test_dd_copy_wo_src_lun(self): - lun_id = 'lun_58' - src_lun_id = 'lun_59' - src_snap_id = 'snap_59' - volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', - provider_location=get_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - src_snap.storage_resource = test_client.MockResource(name=src_lun_id, - _id=src_lun_id) - with patch_copy_volume() as copy_volume: - ret = self.adapter._dd_copy( - adapter.VolumeParams(self.adapter, volume), src_snap) - copy_volume.assert_called_with('dev', 'dev', 5120, '1M', - sparse=True) - self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), - ret) - - @patch_for_unity_adapter - def test_dd_copy_raise(self): - lun_id = 'lun_58' - src_snap_id = 'snap_59' - volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', - provider_location=get_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - with patch_copy_volume() as copy_volume: - copy_volume.side_effect = AttributeError - self.assertRaises(AttributeError, - self.adapter._dd_copy, volume, src_snap) - - @patch_for_unity_adapter - def test_thin_clone(self): - lun_id = 'lun_60' - src_snap_id = 'snap_61' - volume = MockOSResource(name=lun_id, id=lun_id, size=1, - provider_location=get_snap_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - ret = self.adapter._thin_clone(volume, src_snap) - self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) - - @patch_for_unity_adapter - def test_thin_clone_downgraded_with_src_lun(self): - lun_id = 'lun_60' - src_snap_id = 'snap_62' - src_lun_id = 'lun_62' - volume = MockOSResource(name=lun_id, id=lun_id, size=1, - provider_location=get_snap_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - src_lun = test_client.MockResource(name=src_lun_id, _id=src_lun_id) - new_dd_lun = test_client.MockResource(name='lun_63') - with patch_storops() as mocked_storops, \ - patch_dd_copy(new_dd_lun) as dd: - ret = self.adapter._thin_clone( - adapter.VolumeParams(self.adapter, volume), - src_snap, src_lun=src_lun) - vol_params = adapter.VolumeParams(self.adapter, volume) - vol_params.name = 'hidden-{}'.format(volume.name) - vol_params.description = 'hidden-{}'.format(volume.description) - dd.assert_called_with(vol_params, src_snap, src_lun=src_lun) - mocked_storops.TCHelper.notify.assert_called_with(src_lun, - 'DD_COPY', - new_dd_lun) - self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) - - @patch_for_unity_adapter - def test_thin_clone_downgraded_wo_src_lun(self): - lun_id = 'lun_60' - src_snap_id = 'snap_62' - volume = MockOSResource(name=lun_id, id=lun_id, size=1, - provider_location=get_snap_lun_pl(lun_id)) - src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) - new_dd_lun = test_client.MockResource(name='lun_63') - with patch_storops() as mocked_storops, \ - patch_dd_copy(new_dd_lun) as dd: - ret = self.adapter._thin_clone( - adapter.VolumeParams(self.adapter, volume), src_snap) - vol_params = adapter.VolumeParams(self.adapter, volume) - vol_params.name = 'hidden-{}'.format(volume.name) - vol_params.description = 'hidden-{}'.format(volume.description) - dd.assert_called_with(vol_params, src_snap, src_lun=None) - mocked_storops.TCHelper.notify.assert_called_with(src_snap, - 'DD_COPY', - new_dd_lun) - self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) - - def test_extend_volume_error(self): - def f(): - volume = MockOSResource(id='l56', - provider_location=get_lun_pl('lun56')) - self.adapter.extend_volume(volume, -1) - - self.assertRaises(ex.ExtendLunError, f) - - def test_extend_volume_no_id(self): - def f(): - volume = MockOSResource(provider_location='type^lun') - self.adapter.extend_volume(volume, 5) - - self.assertRaises(exception.VolumeBackendAPIException, f) - - def test_normalize_config(self): - config = MockConfig() - config.unity_storage_pool_names = [' pool_1 ', '', ' '] - config.unity_io_ports = [' spa_eth2 ', '', ' '] - normalized = self.adapter.normalize_config(config) - self.assertEqual(['pool_1'], normalized.unity_storage_pool_names) - self.assertEqual(['spa_eth2'], normalized.unity_io_ports) - - def test_normalize_config_raise(self): - with self.assertRaisesRegexp(exception.InvalidConfigurationValue, - 'unity_storage_pool_names'): - config = MockConfig() - config.unity_storage_pool_names = ['', ' '] - self.adapter.normalize_config(config) - with self.assertRaisesRegexp(exception.InvalidConfigurationValue, - 'unity_io_ports'): - config = MockConfig() - config.unity_io_ports = ['', ' '] - self.adapter.normalize_config(config) - - -class FCAdapterTest(unittest.TestCase): - def setUp(self): - self.adapter = mock_adapter(adapter.FCAdapter) - - def test_setup(self): - self.assertIsNotNone(self.adapter.lookup_service) - - def test_auto_zone_enabled(self): - self.assertTrue(self.adapter.auto_zone_enabled) - - def test_fc_protocol(self): - stats = mock_adapter(adapter.FCAdapter).update_volume_stats() - self.assertEqual('FC', stats['storage_protocol']) - - def test_get_connector_uids(self): - connector = {'host': 'fake_host', - 'wwnns': ['1111111111111111', - '2222222222222222'], - 'wwpns': ['3333333333333333', - '4444444444444444'] - } - expected = ['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33', - '22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44'] - ret = self.adapter.get_connector_uids(connector) - self.assertListEqual(expected, ret) - - def test_get_connection_info_no_targets(self): - def f(): - host = test_client.MockResource('no_target') - self.adapter.get_connection_info(12, host, {}) - - self.assertRaises(exception.VolumeBackendAPIException, f) - - def test_get_connection_info_auto_zone_enabled(self): - host = test_client.MockResource('host1') - connector = {'wwpns': 'abcdefg'} - ret = self.adapter.get_connection_info(10, host, connector) - target_wwns = ['100000051e55a100', '100000051e55a121'] - self.assertListEqual(target_wwns, ret['target_wwn']) - init_target_map = { - '200000051e55a100': ('100000051e55a100', '100000051e55a121'), - '200000051e55a121': ('100000051e55a100', '100000051e55a121')} - self.assertDictEqual(init_target_map, ret['initiator_target_map']) - self.assertEqual(10, ret['target_lun']) - - def test_get_connection_info_auto_zone_disabled(self): - self.adapter.lookup_service = None - host = test_client.MockResource('host1') - connector = {'wwpns': 'abcdefg'} - ret = self.adapter.get_connection_info(10, host, connector) - self.assertEqual(10, ret['target_lun']) - wwns = ['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'] - self.assertListEqual(wwns, ret['target_wwn']) - - @patch_for_fc_adapter - def test_initialize_connection_volume(self): - volume = MockOSResource(provider_location='id^lun_43', id='id_43') - connector = {'host': 'host1'} - conn_info = self.adapter.initialize_connection(volume, connector) - self.assertEqual('fibre_channel', conn_info['driver_volume_type']) - self.assertTrue(conn_info['data']['target_discovered']) - self.assertEqual('id_43', conn_info['data']['volume_id']) - - @patch_for_fc_adapter - def test_initialize_connection_snapshot(self): - snap = MockOSResource(id='snap_1', name='snap_1') - connector = {'host': 'host1'} - conn_info = self.adapter.initialize_connection_snapshot( - snap, connector) - self.assertEqual('fibre_channel', conn_info['driver_volume_type']) - self.assertTrue(conn_info['data']['target_discovered']) - self.assertEqual('snap_1', conn_info['data']['volume_id']) - - def test_terminate_connection_auto_zone_enabled(self): - connector = {'host': 'host1', 'wwpns': 'abcdefg'} - volume = MockOSResource(provider_location='id^lun_41', id='id_41') - ret = self.adapter.terminate_connection(volume, connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - data = ret['data'] - target_map = { - '200000051e55a100': ('100000051e55a100', '100000051e55a121'), - '200000051e55a121': ('100000051e55a100', '100000051e55a121')} - self.assertDictEqual(target_map, data['initiator_target_map']) - target_wwn = ['100000051e55a100', '100000051e55a121'] - self.assertListEqual(target_wwn, data['target_wwn']) - - def test_validate_ports_whitelist_none(self): - ports = self.adapter.validate_ports(None) - self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) - - def test_validate_ports(self): - ports = self.adapter.validate_ports(['spa_iom_0_fc0']) - self.assertEqual(set(('spa_iom_0_fc0',)), set(ports)) - - def test_validate_ports_asterisk(self): - ports = self.adapter.validate_ports(['spa*']) - self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) - - def test_validate_ports_question_mark(self): - ports = self.adapter.validate_ports(['spa_iom_0_fc?']) - self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) - - def test_validate_ports_no_matched(self): - with self.assertRaisesRegexp(exception.InvalidConfigurationValue, - 'unity_io_ports'): - self.adapter.validate_ports(['spc_invalid']) - - def test_validate_ports_unmatched_whitelist(self): - with self.assertRaisesRegexp(exception.InvalidConfigurationValue, - 'unity_io_ports'): - self.adapter.validate_ports(['spa_iom*', 'spc_invalid']) - - -class ISCSIAdapterTest(unittest.TestCase): - def setUp(self): - self.adapter = mock_adapter(adapter.ISCSIAdapter) - - def test_iscsi_protocol(self): - stats = self.adapter.update_volume_stats() - self.assertEqual('iSCSI', stats['storage_protocol']) - - def test_get_connector_uids(self): - connector = {'host': 'fake_host', 'initiator': 'fake_iqn'} - ret = self.adapter.get_connector_uids(connector) - self.assertListEqual(['fake_iqn'], ret) - - def test_get_connection_info(self): - connector = {'host': 'fake_host', 'initiator': 'fake_iqn'} - hlu = 10 - info = self.adapter.get_connection_info(hlu, None, connector) - target_iqns = ['iqn.1-1.com.e:c.a.a0', 'iqn.1-1.com.e:c.a.a1'] - target_portals = ['1.2.3.4:1234', '1.2.3.5:1234'] - self.assertListEqual(target_iqns, info['target_iqns']) - self.assertListEqual([hlu, hlu], info['target_luns']) - self.assertListEqual(target_portals, info['target_portals']) - self.assertEqual(hlu, info['target_lun']) - self.assertTrue(info['target_portal'] in target_portals) - self.assertTrue(info['target_iqn'] in target_iqns) - - @patch_for_iscsi_adapter - def test_initialize_connection_volume(self): - volume = MockOSResource(provider_location='id^lun_43', id='id_43') - connector = {'host': 'host1'} - conn_info = self.adapter.initialize_connection(volume, connector) - self.assertEqual('iscsi', conn_info['driver_volume_type']) - self.assertTrue(conn_info['data']['target_discovered']) - self.assertEqual('id_43', conn_info['data']['volume_id']) - - @patch_for_iscsi_adapter - def test_initialize_connection_snapshot(self): - snap = MockOSResource(id='snap_1', name='snap_1') - connector = {'host': 'host1'} - conn_info = self.adapter.initialize_connection_snapshot( - snap, connector) - self.assertEqual('iscsi', conn_info['driver_volume_type']) - self.assertTrue(conn_info['data']['target_discovered']) - self.assertEqual('snap_1', conn_info['data']['volume_id']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py deleted file mode 100644 index 4094d2f83..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from mock import mock -from oslo_utils import units - -from cinder.tests.unit.volume.drivers.dell_emc.unity \ - import fake_exception as ex -from cinder.volume.drivers.dell_emc.unity import client - - -######################## -# -# Start of Mocks -# -######################## - - -class MockResource(object): - def __init__(self, name=None, _id=None): - self.name = name - self._id = _id - self.existed = True - self.size_total = 5 * units.Gi - self.size_subscribed = 6 * units.Gi - self.size_free = 2 * units.Gi - self.is_auto_delete = None - self.initiator_id = [] - self.alu_hlu_map = {'already_attached': 99} - self.ip_address = None - self.is_logged_in = None - self.wwn = None - self.max_iops = None - self.max_kbps = None - self.pool_name = 'Pool0' - self._storage_resource = None - - @property - def id(self): - return self._id - - def get_id(self): - return self._id - - def delete(self): - if self.get_id() in ['snap_2']: - raise ex.SnapDeleteIsCalled() - elif self.get_id() == 'not_found': - raise ex.UnityResourceNotFoundError() - elif self.get_id() == 'snap_in_use': - raise ex.UnityDeleteAttachedSnapError() - - @property - def pool(self): - return MockResource('pool0') - - @property - def iscsi_host_initiators(self): - iscsi_initiator = MockResource('iscsi_initiator') - iscsi_initiator.initiator_id = ['iqn.1-1.com.e:c.host.0', - 'iqn.1-1.com.e:c.host.1'] - return iscsi_initiator - - @property - def total_size_gb(self): - return self.size_total / units.Gi - - @total_size_gb.setter - def total_size_gb(self, value): - if value == self.total_size_gb: - raise ex.UnityNothingToModifyError() - else: - self.size_total = value * units.Gi - - def add_initiator(self, uid, force_create=None): - self.initiator_id.append(uid) - - def attach(self, lun_or_snap, skip_hlu_0=True): - if lun_or_snap.get_id() == 'already_attached': - raise ex.UnityResourceAlreadyAttachedError() - self.alu_hlu_map[lun_or_snap.get_id()] = len(self.alu_hlu_map) - return self.get_hlu(lun_or_snap) - - @staticmethod - def detach(lun_or_snap): - if lun_or_snap.name == 'detach_failure': - raise ex.DetachIsCalled() - - def get_hlu(self, lun): - return self.alu_hlu_map.get(lun.get_id(), None) - - @staticmethod - def create_lun(lun_name, size_gb, description=None, io_limit_policy=None): - if lun_name == 'in_use': - raise ex.UnityLunNameInUseError() - ret = MockResource(lun_name, 'lun_2') - if io_limit_policy is not None: - ret.max_iops = io_limit_policy.max_iops - ret.max_kbps = io_limit_policy.max_kbps - return ret - - @staticmethod - def create_snap(name, is_auto_delete=False): - if name == 'in_use': - raise ex.UnitySnapNameInUseError() - ret = MockResource(name) - ret.is_auto_delete = is_auto_delete - return ret - - @staticmethod - def update(data=None): - pass - - @property - def iscsi_node(self): - name = 'iqn.1-1.com.e:c.%s.0' % self.name - return MockResource(name) - - @property - def fc_host_initiators(self): - init0 = MockResource('fhi_0') - init0.initiator_id = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:CD:EE:FF' - init1 = MockResource('fhi_1') - init1.initiator_id = '00:11:22:33:44:55:66:77:88:99:AA:BB:BC:CD:EE:FF' - return MockResourceList.create(init0, init1) - - @property - def paths(self): - path0 = MockResource('%s_path_0' % self.name) - path0.is_logged_in = True - path1 = MockResource('%s_path_1' % self.name) - path1.is_logged_in = False - path2 = MockResource('%s_path_2' % self.name) - path2.is_logged_in = True - return MockResourceList.create(path0, path1) - - @property - def fc_port(self): - ret = MockResource(_id='spa_iom_0_fc0') - ret.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:EE:FF' - return ret - - @property - def host_luns(self): - return [] - - @property - def storage_resource(self): - if self._storage_resource is None: - self._storage_resource = MockResource(_id='sr_%s' % self._id, - name='sr_%s' % self.name) - return self._storage_resource - - @storage_resource.setter - def storage_resource(self, value): - self._storage_resource = value - - def modify(self, name=None): - self.name = name - - def thin_clone(self, name, io_limit_policy=None, description=None): - if name == 'thin_clone_name_in_use': - raise ex.UnityLunNameInUseError - return MockResource(_id=name, name=name) - - -class MockResourceList(object): - def __init__(self, names=None, ids=None): - if names is not None: - self.resources = [MockResource(name=name) for name in names] - elif ids is not None: - self.resources = [MockResource(_id=_id) for _id in ids] - - @staticmethod - def create(*rsc_list): - ret = MockResourceList([]) - ret.resources = rsc_list - return ret - - @property - def name(self): - return map(lambda i: i.name, self.resources) - - def __iter__(self): - return self.resources.__iter__() - - def __len__(self): - return len(self.resources) - - def __getattr__(self, item): - return [getattr(i, item) for i in self.resources] - - def shadow_copy(self, **kwargs): - if list(filter(None, kwargs.values())): - return MockResourceList.create(self.resources[0]) - else: - return self - - -class MockSystem(object): - def __init__(self): - self.serial_number = 'SYSTEM_SERIAL' - self.system_version = '4.1.0' - - @property - def info(self): - mocked_info = mock.Mock() - mocked_info.name = self.serial_number - return mocked_info - - @staticmethod - def get_lun(_id=None, name=None): - if _id == 'not_found': - raise ex.UnityResourceNotFoundError() - if _id == 'tc_80': # for thin clone with extending size - lun = MockResource(name=_id, _id=_id) - lun.total_size_gb = 7 - return lun - return MockResource(name, _id) - - @staticmethod - def get_pool(): - return MockResourceList(['Pool 1', 'Pool 2']) - - @staticmethod - def get_snap(name): - if name == 'not_found': - raise ex.UnityResourceNotFoundError() - return MockResource(name) - - @staticmethod - def create_host(name): - return MockResource(name) - - @staticmethod - def get_host(name): - if name == 'not_found': - raise ex.UnityResourceNotFoundError() - return MockResource(name) - - @staticmethod - def get_iscsi_portal(): - portal0 = MockResource('p0') - portal0.ip_address = '1.1.1.1' - portal1 = MockResource('p1') - portal1.ip_address = '1.1.1.2' - return MockResourceList.create(portal0, portal1) - - @staticmethod - def get_fc_port(): - port0 = MockResource('fcp0') - port0.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:EE:FF' - port1 = MockResource('fcp1') - port1.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:FF:EE' - return MockResourceList.create(port0, port1) - - @staticmethod - def create_io_limit_policy(name, max_iops=None, max_kbps=None): - if name == 'in_use': - raise ex.UnityPolicyNameInUseError() - ret = MockResource(name) - ret.max_iops = max_iops - ret.max_kbps = max_kbps - return ret - - @staticmethod - def get_io_limit_policy(name): - return MockResource(name=name) - - -@mock.patch.object(client, 'storops', new='True') -def get_client(): - ret = client.UnityClient('1.2.3.4', 'user', 'pass') - ret._system = MockSystem() - return ret - - -######################## -# -# Start of Tests -# -######################## -@mock.patch.object(client, 'storops_ex', new=ex) -class ClientTest(unittest.TestCase): - def setUp(self): - self.client = get_client() - - def test_get_serial(self): - self.assertEqual('SYSTEM_SERIAL', self.client.get_serial()) - - def test_create_lun_success(self): - name = 'LUN 3' - pool = MockResource('Pool 0') - lun = self.client.create_lun(name, 5, pool) - self.assertEqual(name, lun.name) - - def test_create_lun_name_in_use(self): - name = 'in_use' - pool = MockResource('Pool 0') - lun = self.client.create_lun(name, 6, pool) - self.assertEqual('in_use', lun.name) - - def test_create_lun_with_io_limit(self): - pool = MockResource('Pool 0') - limit = MockResource('limit') - limit.max_kbps = 100 - lun = self.client.create_lun('LUN 4', 6, pool, io_limit_policy=limit) - self.assertEqual(100, lun.max_kbps) - - def test_thin_clone_success(self): - name = 'tc_77' - src_lun = MockResource(_id='id_77') - lun = self.client.thin_clone(src_lun, name) - self.assertEqual(name, lun.name) - - def test_thin_clone_name_in_used(self): - name = 'thin_clone_name_in_use' - src_lun = MockResource(_id='id_79') - lun = self.client.thin_clone(src_lun, name) - self.assertEqual(name, lun.name) - - def test_thin_clone_extend_size(self): - name = 'tc_80' - src_lun = MockResource(_id='id_80') - lun = self.client.thin_clone(src_lun, name, io_limit_policy=None, - new_size_gb=7) - self.assertEqual(name, lun.name) - self.assertEqual(7, lun.total_size_gb) - - def test_delete_lun_normal(self): - self.assertIsNone(self.client.delete_lun('lun3')) - - def test_delete_lun_not_found(self): - try: - self.client.delete_lun('not_found') - except ex.StoropsException: - self.fail('not found error should be dealt with silently.') - - def test_get_lun_with_id(self): - lun = self.client.get_lun('lun4') - self.assertEqual('lun4', lun.get_id()) - - def test_get_lun_with_name(self): - lun = self.client.get_lun(name='LUN 4') - self.assertEqual('LUN 4', lun.name) - - def test_get_lun_not_found(self): - ret = self.client.get_lun(lun_id='not_found') - self.assertIsNone(ret) - - def test_get_pools(self): - pools = self.client.get_pools() - self.assertEqual(2, len(pools)) - - def test_create_snap_normal(self): - snap = self.client.create_snap('lun_1', 'snap_1') - self.assertEqual('snap_1', snap.name) - - def test_create_snap_in_use(self): - snap = self.client.create_snap('lun_1', 'in_use') - self.assertEqual('in_use', snap.name) - - def test_delete_snap_error(self): - def f(): - snap = MockResource(_id='snap_2') - self.client.delete_snap(snap) - - self.assertRaises(ex.SnapDeleteIsCalled, f) - - def test_delete_snap_not_found(self): - try: - snap = MockResource(_id='not_found') - self.client.delete_snap(snap) - except ex.StoropsException: - self.fail('snap not found should not raise exception.') - - def test_delete_snap_none(self): - try: - ret = self.client.delete_snap(None) - self.assertIsNone(ret) - except ex.StoropsException: - self.fail('delete none should not raise exception.') - - def test_delete_snap_in_use(self): - def f(): - snap = MockResource(_id='snap_in_use') - self.client.delete_snap(snap) - - self.assertRaises(ex.UnityDeleteAttachedSnapError, f) - - def test_get_snap_found(self): - snap = self.client.get_snap('snap_2') - self.assertEqual('snap_2', snap.name) - - def test_get_snap_not_found(self): - ret = self.client.get_snap('not_found') - self.assertIsNone(ret) - - def test_create_host_found(self): - iqns = ['iqn.1-1.com.e:c.a.a0'] - host = self.client.create_host('host1', iqns) - - self.assertEqual('host1', host.name) - self.assertLessEqual(['iqn.1-1.com.e:c.a.a0'], host.initiator_id) - - def test_create_host_not_found(self): - host = self.client.create_host('not_found', []) - self.assertEqual('not_found', host.name) - - def test_attach_lun(self): - lun = MockResource(_id='lun1', name='l1') - host = MockResource('host1') - self.assertEqual(1, self.client.attach(host, lun)) - - def test_attach_already_attached(self): - lun = MockResource(_id='already_attached') - host = MockResource('host1') - hlu = self.client.attach(host, lun) - self.assertEqual(99, hlu) - - def test_detach_lun(self): - def f(): - lun = MockResource('detach_failure') - host = MockResource('host1') - self.client.detach(host, lun) - - self.assertRaises(ex.DetachIsCalled, f) - - def test_get_host(self): - self.assertEqual('host2', self.client.get_host('host2').name) - - def test_get_iscsi_target_info(self): - ret = self.client.get_iscsi_target_info() - expected = [{'iqn': 'iqn.1-1.com.e:c.p0.0', 'portal': '1.1.1.1:3260'}, - {'iqn': 'iqn.1-1.com.e:c.p1.0', 'portal': '1.1.1.2:3260'}] - self.assertListEqual(expected, ret) - - def test_get_iscsi_target_info_allowed_ports(self): - ret = self.client.get_iscsi_target_info(allowed_ports=['spa_eth0']) - expected = [{'iqn': 'iqn.1-1.com.e:c.p0.0', 'portal': '1.1.1.1:3260'}] - self.assertListEqual(expected, ret) - - def test_get_fc_target_info_without_host(self): - ret = self.client.get_fc_target_info() - self.assertListEqual(['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'], - sorted(ret)) - - def test_get_fc_target_info_without_host_but_allowed_ports(self): - ret = self.client.get_fc_target_info(allowed_ports=['spa_fc0']) - self.assertListEqual(['8899AABBCCDDEEFF'], ret) - - def test_get_fc_target_info_with_host(self): - host = MockResource('host0') - ret = self.client.get_fc_target_info(host, True) - self.assertListEqual(['8899AABBCCDDEEFF'], ret) - - def test_get_fc_target_info_with_host_and_allowed_ports(self): - host = MockResource('host0') - ret = self.client.get_fc_target_info(host, True, - allowed_ports=['spb_iom_0_fc0']) - self.assertListEqual([], ret) - - def test_get_io_limit_policy_none(self): - ret = self.client.get_io_limit_policy(None) - self.assertIsNone(ret) - - def test_get_io_limit_policy_create_new(self): - specs = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None} - limit = self.client.get_io_limit_policy(specs) - self.assertEqual('max_2_mbps', limit.name) - self.assertEqual(2, limit.max_kbps) - - def test_create_io_limit_policy_success(self): - limit = self.client.create_io_limit_policy('3kiops', max_iops=3000) - self.assertEqual('3kiops', limit.name) - self.assertEqual(3000, limit.max_iops) - - def test_create_io_limit_policy_in_use(self): - limit = self.client.create_io_limit_policy('in_use', max_iops=100) - self.assertEqual('in_use', limit.name) - - def test_expand_lun_success(self): - lun = self.client.extend_lun('ev_3', 6) - self.assertEqual(6, lun.total_size_gb) - - def test_expand_lun_nothing_to_modify(self): - lun = self.client.extend_lun('ev_4', 5) - self.assertEqual(5, lun.total_size_gb) - - def test_get_pool_name(self): - self.assertEqual('Pool0', self.client.get_pool_name('lun_0')) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py deleted file mode 100644 index 3cb44c593..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from cinder.tests.unit.volume.drivers.dell_emc.unity \ - import fake_exception as ex -from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.unity import driver - - -######################## -# -# Start of Mocks -# -######################## - -class MockAdapter(object): - def do_setup(self, driver_object, configuration): - raise ex.AdapterSetupError() - - @staticmethod - def create_volume(volume): - return volume - - @staticmethod - def create_volume_from_snapshot(volume, snapshot): - return volume - - @staticmethod - def create_cloned_volume(volume, src_vref): - return volume - - @staticmethod - def extend_volume(volume, new_size): - volume.size = new_size - - @staticmethod - def delete_volume(volume): - volume.exists = False - - @staticmethod - def create_snapshot(snapshot): - snapshot.exists = True - return snapshot - - @staticmethod - def delete_snapshot(snapshot): - snapshot.exists = False - - @staticmethod - def initialize_connection(volume, connector): - return {'volume': volume, 'connector': connector} - - @staticmethod - def terminate_connection(volume, connector): - return {'volume': volume, 'connector': connector} - - @staticmethod - def update_volume_stats(): - return {'stats': 123} - - @staticmethod - def manage_existing(volume, existing_ref): - volume.managed = True - return volume - - @staticmethod - def manage_existing_get_size(volume, existing_ref): - volume.managed = True - volume.size = 7 - return volume - - @staticmethod - def get_pool_name(volume): - return 'pool_0' - - @staticmethod - def initialize_connection_snapshot(snapshot, connector): - return {'snapshot': snapshot, 'connector': connector} - - @staticmethod - def terminate_connection_snapshot(snapshot, connector): - return {'snapshot': snapshot, 'connector': connector} - - -######################## -# -# Start of Tests -# -######################## - -class UnityDriverTest(unittest.TestCase): - @staticmethod - def get_volume(): - return test_adapter.MockOSResource(provider_location='id^lun_43', - id='id_43') - - @classmethod - def get_snapshot(cls): - return test_adapter.MockOSResource(volume=cls.get_volume()) - - @staticmethod - def get_context(): - return None - - @staticmethod - def get_connector(): - return {'host': 'host1'} - - def setUp(self): - self.config = conf.Configuration(None) - self.driver = driver.UnityDriver(configuration=self.config) - self.driver.adapter = MockAdapter() - - def test_default_initialize(self): - config = conf.Configuration(None) - iscsi_driver = driver.UnityDriver(configuration=config) - self.assertIsNone(config.unity_storage_pool_names) - self.assertTrue(config.san_thin_provision) - self.assertEqual('', config.san_ip) - self.assertEqual('admin', config.san_login) - self.assertEqual('', config.san_password) - self.assertEqual('', config.san_private_key) - self.assertEqual('', config.san_clustername) - self.assertEqual(22, config.san_ssh_port) - self.assertEqual(False, config.san_is_local) - self.assertEqual(30, config.ssh_conn_timeout) - self.assertEqual(1, config.ssh_min_pool_conn) - self.assertEqual(5, config.ssh_max_pool_conn) - self.assertEqual('iSCSI', iscsi_driver.protocol) - - def test_fc_initialize(self): - config = conf.Configuration(None) - config.storage_protocol = 'fc' - fc_driver = driver.UnityDriver(configuration=config) - self.assertEqual('FC', fc_driver.protocol) - - def test_do_setup(self): - def f(): - self.driver.do_setup(None) - - self.assertRaises(ex.AdapterSetupError, f) - - def test_create_volume(self): - volume = self.get_volume() - self.assertEqual(volume, self.driver.create_volume(volume)) - - def test_create_volume_from_snapshot(self): - volume = self.get_volume() - snap = self.get_snapshot() - self.assertEqual( - volume, self.driver.create_volume_from_snapshot(volume, snap)) - - def test_create_cloned_volume(self): - volume = self.get_volume() - self.assertEqual( - volume, self.driver.create_cloned_volume(volume, None)) - - def test_extend_volume(self): - volume = self.get_volume() - self.driver.extend_volume(volume, 6) - self.assertEqual(6, volume.size) - - def test_delete_volume(self): - volume = self.get_volume() - self.driver.delete_volume(volume) - self.assertFalse(volume.exists) - - def test_create_snapshot(self): - snapshot = self.get_snapshot() - self.driver.create_snapshot(snapshot) - self.assertTrue(snapshot.exists) - - def test_delete_snapshot(self): - snapshot = self.get_snapshot() - self.driver.delete_snapshot(snapshot) - self.assertFalse(snapshot.exists) - - def test_ensure_export(self): - self.assertIsNone(self.driver.ensure_export( - self.get_context(), self.get_volume())) - - def test_create_export(self): - self.assertIsNone(self.driver.create_export( - self.get_context(), self.get_volume(), self.get_connector())) - - def test_remove_export(self): - self.assertIsNone(self.driver.remove_export( - self.get_context(), self.get_volume())) - - def test_check_for_export(self): - self.assertIsNone(self.driver.check_for_export( - self.get_context(), self.get_volume())) - - def test_initialize_connection(self): - volume = self.get_volume() - connector = self.get_connector() - conn_info = self.driver.initialize_connection(volume, connector) - self.assertEqual(volume, conn_info['volume']) - self.assertEqual(connector, conn_info['connector']) - - def test_terminate_connection(self): - volume = self.get_volume() - connector = self.get_connector() - conn_info = self.driver.terminate_connection(volume, connector) - self.assertEqual(volume, conn_info['volume']) - self.assertEqual(connector, conn_info['connector']) - - def test_update_volume_stats(self): - stats = self.driver.get_volume_stats(True) - self.assertEqual(123, stats['stats']) - self.assertEqual(self.driver.VERSION, stats['driver_version']) - self.assertEqual(self.driver.VENDOR, stats['vendor_name']) - - def test_manage_existing(self): - volume = self.driver.manage_existing(self.get_volume(), None) - self.assertTrue(volume.managed) - - def test_manage_existing_get_size(self): - volume = self.driver.manage_existing_get_size(self.get_volume(), None) - self.assertTrue(volume.managed) - self.assertEqual(7, volume.size) - - def test_get_pool(self): - self.assertEqual('pool_0', self.driver.get_pool(self.get_volume())) - - def test_unmanage(self): - ret = self.driver.unmanage(None) - self.assertIsNone(ret) - - def test_backup_use_temp_snapshot(self): - self.assertTrue(self.driver.backup_use_temp_snapshot()) - - def test_initialize_connection_snapshot(self): - snapshot = self.get_snapshot() - conn_info = self.driver.initialize_connection_snapshot( - snapshot, self.get_connector()) - self.assertEqual(snapshot, conn_info['snapshot']) - - def test_terminate_connection_snapshot(self): - snapshot = self.get_snapshot() - conn_info = self.driver.terminate_connection_snapshot( - snapshot, self.get_connector()) - self.assertEqual(snapshot, conn_info['snapshot']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_utils.py b/cinder/tests/unit/volume/drivers/dell_emc/unity/test_utils.py deleted file mode 100644 index 7eb0c2fcc..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/unity/test_utils.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import unittest - -import mock -from oslo_utils import units - -from cinder import exception -from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter -from cinder.volume.drivers.dell_emc.unity import utils - - -def get_volume_type_extra_specs(volume_type): - return {'provisioning:type': volume_type} - - -def get_volume_type_qos_specs(type_id): - if type_id == 'invalid_backend_qos_consumer': - ret = {'qos_specs': {'consumer': 'invalid'}} - elif type_id == 'both_none': - ret = {'qos_specs': {'consumer': 'back-end', 'specs': {}}} - elif type_id == 'max_1000_iops': - ret = { - 'qos_specs': { - 'id': 'max_1000_iops', - 'consumer': 'both', - 'specs': { - 'maxIOPS': 1000 - } - } - } - elif type_id == 'max_2_mbps': - ret = { - 'qos_specs': { - 'id': 'max_2_mbps', - 'consumer': 'back-end', - 'specs': { - 'maxBWS': 2 - } - } - } - else: - ret = None - return ret - - -def patch_volume_types(func): - @functools.wraps(func) - @mock.patch(target=('cinder.volume.volume_types' - '.get_volume_type_extra_specs'), - new=get_volume_type_extra_specs) - @mock.patch(target=('cinder.volume.volume_types' - '.get_volume_type_qos_specs'), - new=get_volume_type_qos_specs) - def func_wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return func_wrapper - - -class UnityUtilsTest(unittest.TestCase): - def test_validate_pool_names_filter(self): - all_pools = list('acd') - pool_names = utils.validate_pool_names(list('abc'), all_pools) - self.assertIn('a', pool_names) - self.assertIn('c', pool_names) - self.assertNotIn('b', pool_names) - self.assertNotIn('d', pool_names) - - def test_validate_pool_names_non_exists(self): - def f(): - all_pools = list('abc') - utils.validate_pool_names(list('efg'), all_pools) - - self.assertRaises(exception.VolumeBackendAPIException, f) - - def test_validate_pool_names_default(self): - all_pools = list('ab') - pool_names = utils.validate_pool_names([], all_pools) - self.assertEqual(2, len(pool_names)) - - pool_names = utils.validate_pool_names(None, all_pools) - self.assertEqual(2, len(pool_names)) - - def test_build_provider_location(self): - location = utils.build_provider_location('unity', 'thin', 'ev_1', '3') - expected = 'id^ev_1|system^unity|type^thin|version^3' - self.assertEqual(expected, location) - - def test_extract_provider_location_version(self): - location = 'id^ev_1|system^unity|type^thin|version^3' - self.assertEqual('3', - utils.extract_provider_location(location, 'version')) - - def test_extract_provider_location_type(self): - location = 'id^ev_1|system^unity|type^thin|version^3' - self.assertEqual('thin', - utils.extract_provider_location(location, 'type')) - - def test_extract_provider_location_system(self): - location = 'id^ev_1|system^unity|type^thin|version^3' - self.assertEqual('unity', - utils.extract_provider_location(location, 'system')) - - def test_extract_provider_location_id(self): - location = 'id^ev_1|system^unity|type^thin|version^3' - self.assertEqual('ev_1', - utils.extract_provider_location(location, 'id')) - - def test_extract_provider_location_not_found(self): - location = 'id^ev_1|system^unity|type^thin|version^3' - self.assertIsNone(utils.extract_provider_location(location, 'na')) - - def test_extract_provider_location_none(self): - self.assertIsNone(utils.extract_provider_location(None, 'abc')) - - def test_extract_iscsi_uids(self): - connector = {'host': 'fake_host', - 'initiator': 'fake_iqn'} - self.assertEqual(['fake_iqn'], - utils.extract_iscsi_uids(connector)) - - def test_extract_iscsi_uids_not_found(self): - connector = {'host': 'fake_host'} - self.assertRaises(exception.VolumeBackendAPIException, - utils.extract_iscsi_uids, - connector) - - def test_extract_fc_uids(self): - connector = {'host': 'fake_host', - 'wwnns': ['1111111111111111', - '2222222222222222'], - 'wwpns': ['3333333333333333', - '4444444444444444'] - } - self.assertEqual(['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33', - '22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44', ], - utils.extract_fc_uids(connector)) - - def test_extract_fc_uids_not_found(self): - connector = {'host': 'fake_host'} - self.assertRaises(exception.VolumeBackendAPIException, - utils.extract_iscsi_uids, - connector) - - def test_byte_to_gib(self): - self.assertEqual(5, utils.byte_to_gib(5 * units.Gi)) - - def test_byte_to_mib(self): - self.assertEqual(5, utils.byte_to_mib(5 * units.Mi)) - - def test_gib_to_mib(self): - self.assertEqual(5 * units.Gi / units.Mi, utils.gib_to_mib(5)) - - def test_convert_ip_to_portal(self): - self.assertEqual('1.2.3.4:3260', utils.convert_ip_to_portal('1.2.3.4')) - - def test_convert_to_itor_tgt_map(self): - zone_mapping = { - 'san_1': { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'), - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121') - } - } - ret = utils.convert_to_itor_tgt_map(zone_mapping) - self.assertEqual(['100000051e55a100', '100000051e55a121'], ret[0]) - mapping = ret[1] - targets = ('100000051e55a100', '100000051e55a121') - self.assertEqual(targets, mapping['200000051e55a100']) - self.assertEqual(targets, mapping['200000051e55a121']) - - def test_get_pool_name(self): - volume = test_adapter.MockOSResource(host='host@backend#pool_name') - self.assertEqual('pool_name', utils.get_pool_name(volume)) - - def test_ignore_exception(self): - class IgnoredException(Exception): - pass - - def f(): - raise IgnoredException('any exception') - - try: - utils.ignore_exception(f) - except IgnoredException: - self.fail('should not raise any exception.') - - def test_assure_cleanup(self): - data = [0] - - def _enter(): - data[0] += 10 - return data[0] - - def _exit(x): - data[0] = x - 1 - - ctx = utils.assure_cleanup(_enter, _exit, True) - with ctx as r: - self.assertEqual(10, r) - - self.assertEqual(9, data[0]) - - def test_get_backend_qos_specs_type_none(self): - volume = test_adapter.MockOSResource(volume_type_id=None) - ret = utils.get_backend_qos_specs(volume) - self.assertIsNone(ret) - - @patch_volume_types - def test_get_backend_qos_specs_none(self): - volume = test_adapter.MockOSResource(volume_type_id='no_qos') - ret = utils.get_backend_qos_specs(volume) - self.assertIsNone(ret) - - @patch_volume_types - def test_get_backend_qos_invalid_consumer(self): - volume = test_adapter.MockOSResource( - volume_type_id='invalid_backend_qos_consumer') - ret = utils.get_backend_qos_specs(volume) - self.assertIsNone(ret) - - @patch_volume_types - def test_get_backend_qos_both_none(self): - volume = test_adapter.MockOSResource(volume_type_id='both_none') - ret = utils.get_backend_qos_specs(volume) - self.assertIsNone(ret) - - @patch_volume_types - def test_get_backend_qos_iops(self): - volume = test_adapter.MockOSResource(volume_type_id='max_1000_iops') - ret = utils.get_backend_qos_specs(volume) - expected = {'maxBWS': None, 'id': 'max_1000_iops', 'maxIOPS': 1000} - self.assertEqual(expected, ret) - - @patch_volume_types - def test_get_backend_qos_mbps(self): - volume = test_adapter.MockOSResource(volume_type_id='max_2_mbps') - ret = utils.get_backend_qos_specs(volume) - expected = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None} - self.assertEqual(expected, ret) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py deleted file mode 100644 index 8e7f91dd0..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py +++ /dev/null @@ -1,6009 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -from copy import deepcopy -import datetime -import tempfile -import time -from xml.dom import minidom - -import mock -import requests -import six - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder.objects import group -from cinder.objects import group_snapshot -from cinder.objects import volume_type -from cinder import test -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume.drivers.dell_emc.vmax import common -from cinder.volume.drivers.dell_emc.vmax import fc -from cinder.volume.drivers.dell_emc.vmax import iscsi -from cinder.volume.drivers.dell_emc.vmax import masking -from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import rest -from cinder.volume.drivers.dell_emc.vmax import utils -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -CINDER_EMC_CONFIG_DIR = '/etc/cinder/' - - -class VMAXCommonData(object): - # array info - array = '000197800123' - srp = 'SRP_1' - srp2 = 'SRP_2' - slo = 'Diamond' - workload = 'DSS' - port_group_name_f = 'OS-fibre-PG' - port_group_name_i = 'OS-iscsi-PG' - masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV' - masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV' - initiatorgroup_name_f = 'OS-HostX-F-IG' - initiatorgroup_name_i = 'OS-HostX-I-IG' - parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG' - parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG' - storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' - storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG' - defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' - default_sg_no_slo = 'OS-no_SLO-SG' - default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG' - default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG' - failed_resource = 'OS-failed-resource' - fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123' - new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123' - version = '3.0.0' - volume_wwn = '600000345' - remote_array = '000197800124' - device_id = '00001' - device_id2 = '00002' - rdf_group_name = '23_24_007' - rdf_group_no = '70' - u4v_version = '84' - storagegroup_name_source = 'Grp_source_sg' - storagegroup_name_target = 'Grp_target_sg' - group_snapshot_name = 'Grp_snapshot' - target_group_name = 'Grp_target' - storagegroup_name_with_id = 'GrpId_group_name' - - # connector info - wwpn1 = "123456789012345" - wwpn2 = "123456789054321" - wwnn1 = "223456789012345" - initiator = 'iqn.1993-08.org.debian: 01: 222' - ip = u'123.456.7.8' - iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001' - connector = {'ip': ip, - 'initiator': initiator, - 'wwpns': [wwpn1, wwpn2], - 'wwnns': [wwnn1], - 'host': 'HostX'} - - fabric_name_prefix = "fakeFabric" - end_point_map = {connector['wwpns'][0]: [wwnn1], - connector['wwpns'][1]: [wwnn1]} - target_wwns = [wwnn1] - zoning_mappings = { - 'array': u'000197800123', - 'init_targ_map': end_point_map, - 'initiator_group': initiatorgroup_name_f, - 'port_group': port_group_name_f, - 'target_wwns': target_wwns} - - device_map = {} - for wwn in connector['wwpns']: - fabric_name = ''.join([fabric_name_prefix, - wwn[-2:]]) - target_wwn = wwn[::-1] - fabric_map = {'initiator_port_wwn_list': [wwn], - 'target_port_wwn_list': [target_wwn] - } - device_map[fabric_name] = fabric_map - - iscsi_device_info = {'maskingview': masking_view_name_i, - 'ip_and_iqn': [{'ip': ip, - 'iqn': initiator}], - 'is_multipath': True, - 'array': array, - 'controller': {'host': '10.00.00.00'}, - 'hostlunid': 3} - fc_device_info = {'maskingview': masking_view_name_f, - 'array': array, - 'controller': {'host': '10.00.00.00'}, - 'hostlunid': 3} - - # cinder volume info - ctx = context.RequestContext('admin', 'fake', True) - provider_location = {'array': six.text_type(array), - 'device_id': device_id} - - provider_location2 = {'array': six.text_type(array), - 'device_id': device_id2} - - provider_location3 = {'array': six.text_type(remote_array), - 'device_id': device_id2} - - snap_location = {'snap_name': '12345', - 'source_id': device_id} - - test_volume_type = fake_volume.fake_volume_type_obj( - context=ctx - ) - - test_volume = fake_volume.fake_volume_obj( - context=ctx, name='vol1', size=2, provider_auth=None, - provider_location=six.text_type(provider_location), - volume_type=test_volume_type, host=fake_host, - replication_driver_data=six.text_type(provider_location3)) - - test_clone_volume = fake_volume.fake_volume_obj( - context=ctx, name='vol1', size=2, provider_auth=None, - provider_location=six.text_type(provider_location2), - host=fake_host) - - test_snapshot = fake_snapshot.fake_snapshot_obj( - context=ctx, id='12345', name='my_snap', size=2, - provider_location=six.text_type(snap_location), - host=fake_host, volume=test_volume) - - test_failed_snap = fake_snapshot.fake_snapshot_obj( - context=ctx, id='12345', name=failed_resource, size=2, - provider_location=six.text_type(snap_location), - host=fake_host, volume=test_volume) - - location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS', - 'storage_protocol': 'FC'} - test_host = {'capabilities': location_info, - 'host': fake_host} - - # extra-specs - vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'} - vol_type_extra_specs_compr_disabled = { - 'pool_name': u'Diamond+DSS+SRP_1+000197800123', - 'storagetype:disablecompression': "true"} - vol_type_extra_specs_rep_enabled = { - 'pool_name': u'Diamond+DSS+SRP_1+000197800123', - 'replication_enabled': ' True'} - extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', - 'slo': slo, - 'workload': workload, - 'srp': srp, - 'array': array, - 'interval': 3, - 'retries': 120} - extra_specs_disable_compression = deepcopy(extra_specs) - extra_specs_disable_compression[utils.DISABLECOMPRESSION] = "true" - extra_specs_intervals_set = deepcopy(extra_specs) - extra_specs_intervals_set['interval'] = 1 - extra_specs_intervals_set['retries'] = 1 - extra_specs_rep_enabled = deepcopy(extra_specs) - extra_specs_rep_enabled['replication_enabled'] = True - rep_extra_specs = deepcopy(extra_specs_rep_enabled) - rep_extra_specs['array'] = remote_array - rep_extra_specs['interval'] = 0 - rep_extra_specs['retries'] = 0 - rep_extra_specs['srp'] = srp2 - - test_volume_type_1 = volume_type.VolumeType( - id='abc', name='abc', - extra_specs=extra_specs - ) - test_volume_type_list = volume_type.VolumeTypeList( - objects=[test_volume_type_1]) - test_group_1 = group.Group( - context=None, name=storagegroup_name_source, - group_id='abc', size=1, - id='12345', status='available', - provider_auth=None, volume_type_ids=['abc'], - group_type_id='grptypeid', - volume_types=test_volume_type_list, - host=fake_host, provider_location=six.text_type(provider_location)) - - test_group_failed = group.Group( - context=None, name=failed_resource, - group_id='abc', size=1, - id='12345', status='available', - provider_auth=None, volume_type_ids=['abc'], - group_type_id='grptypeid', - volume_types=test_volume_type_list, - host=fake_host, provider_location=six.text_type(provider_location)) - - test_group = fake_group.fake_group_obj( - context=ctx, name=storagegroup_name_source, - id='12345', host=fake_host) - - test_group_without_name = fake_group.fake_group_obj( - context=ctx, name=None, - id='12345', host=fake_host) - - test_vol_grp_name = 'Grp_source_sg_12345' - test_vol_grp_name_id_only = '12345' - - test_group_snapshot_1 = group_snapshot.GroupSnapshot( - context=None, id='123456', - group_id='12345', name=group_snapshot_name, - group_type_id='grptypeid', status='available', - group=test_group_1) - - test_group_snapshot_failed = group_snapshot.GroupSnapshot( - context=None, id='123456', - group_id='12345', name=failed_resource, - group_type_id='grptypeid', status='available', - group=test_group_failed) - - # masking view dict - masking_view_dict = { - 'array': array, - 'connector': connector, - 'device_id': device_id, - 'init_group_name': initiatorgroup_name_f, - 'initiator_check': False, - 'maskingview_name': masking_view_name_f, - 'parent_sg_name': parent_sg_f, - 'srp': srp, - 'storagetype:disablecompression': False, - 'port_group_name': port_group_name_f, - 'slo': slo, - 'storagegroup_name': storagegroup_name_f, - 'volume_name': test_volume.name, - 'workload': workload, - 'replication_enabled': False} - - masking_view_dict_no_slo = deepcopy(masking_view_dict) - masking_view_dict_no_slo.update( - {'slo': None, 'workload': None, - 'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG'}) - - masking_view_dict_compression_disabled = deepcopy(masking_view_dict) - masking_view_dict_compression_disabled.update( - {'storagetype:disablecompression': True, - 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD'}) - - masking_view_dict_replication_enabled = deepcopy(masking_view_dict) - masking_view_dict_replication_enabled.update( - {'replication_enabled': True, - 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'}) - - # vmax data - # sloprovisioning - compression_info = {"symmetrixId": ["000197800128"]} - inititiatorgroup = [{"initiator": [wwpn1], - "hostId": initiatorgroup_name_f, - "maskingview": [masking_view_name_f]}, - {"initiator": [initiator], - "hostId": initiatorgroup_name_i, - "maskingview": [masking_view_name_i]}] - - initiator_list = [{"host": initiatorgroup_name_f, - "initiatorId": wwpn1, - "maskingview": [masking_view_name_f]}, - {"host": initiatorgroup_name_i, - "initiatorId": initiator, - "maskingview": [masking_view_name_i]}, - {"initiatorId": [ - "FA-1D:4:" + wwpn1, - "SE-4E:0:" + initiator]}] - - maskingview = [{"maskingViewId": masking_view_name_f, - "portGroupId": port_group_name_f, - "storageGroupId": storagegroup_name_f, - "hostId": initiatorgroup_name_f, - "maskingViewConnection": [ - {"host_lun_address": "0003"}]}, - {"maskingViewId": masking_view_name_i, - "portGroupId": port_group_name_i, - "storageGroupId": storagegroup_name_i, - "hostId": initiatorgroup_name_i, - "maskingViewConnection": [ - {"host_lun_address": "0003"}]}, - {}] - - portgroup = [{"portGroupId": port_group_name_f, - "symmetrixPortKey": [ - {"directorId": "FA-1D", - "portId": "FA-1D:4"}], - "maskingview": [masking_view_name_f]}, - {"portGroupId": port_group_name_i, - "symmetrixPortKey": [ - {"directorId": "SE-4E", - "portId": "SE-4E:0"}], - "maskingview": [masking_view_name_i]}] - - port_list = [ - {"symmetrixPort": {"num_of_masking_views": 1, - "maskingview": [masking_view_name_f], - "identifier": wwnn1, - "symmetrixPortKey": { - "directorId": "FA-1D", - "portId": "4"}, - "portgroup": [port_group_name_f]}}, - {"symmetrixPort": {"identifier": initiator, - "symmetrixPortKey": { - "directorId": "SE-4E", - "portId": "0"}, - "ip_addresses": [ip], - "num_of_masking_views": 1, - "maskingview": [masking_view_name_i], - "portgroup": [port_group_name_i]}}] - - sg_details = [{"srp": srp, - "num_of_vols": 2, - "cap_gb": 2, - "storageGroupId": defaultstoragegroup_name, - "slo": slo, - "workload": workload}, - {"srp": srp, - "num_of_vols": 2, - "cap_gb": 2, - "storageGroupId": storagegroup_name_f, - "slo": slo, - "workload": workload, - "maskingview": [masking_view_name_f], - "parent_storage_group": [parent_sg_f]}, - {"srp": srp, - "num_of_vols": 2, - "cap_gb": 2, - "storageGroupId": storagegroup_name_i, - "slo": slo, - "workload": workload, - "maskingview": [masking_view_name_i], - "parent_storage_group": [parent_sg_i]}, - {"num_of_vols": 2, - "cap_gb": 2, - "storageGroupId": parent_sg_f, - "num_of_child_sgs": 1, - "child_storage_group": [storagegroup_name_f], - "maskingview": [masking_view_name_f]}, - {"num_of_vols": 2, - "cap_gb": 2, - "storageGroupId": parent_sg_i, - "num_of_child_sgs": 1, - "child_storage_group": [storagegroup_name_i], - "maskingview": [masking_view_name_i], } - ] - - sg_details_rep = [{"childNames": [], - "numDevicesNonGk": 2, - "isLinkTarget": False, - "rdf": False, - "capacityGB": 2.0, - "name": storagegroup_name_source, - "snapVXSnapshots": ['12345'], - "symmetrixId": array, - "numSnapVXSnapshots": 1}] - - sg_list = {"storageGroupId": [storagegroup_name_f, - defaultstoragegroup_name]} - - sg_list_rep = [storagegroup_name_with_id] - - srp_details = {"srpSloDemandId": ["Bronze", "Diamond", "Gold", - "None", "Optimized", "Silver"], - "srpId": srp, - "total_allocated_cap_gb": 5244.7, - "total_usable_cap_gb": 20514.4, - "total_subscribed_cap_gb": 84970.1, - "reserved_cap_percent": 10} - - volume_details = [{"cap_gb": 2, - "num_of_storage_groups": 1, - "volumeId": device_id, - "volume_identifier": "1", - "wwn": volume_wwn, - "snapvx_target": 'false', - "snapvx_source": 'false', - "storageGroupId": [defaultstoragegroup_name, - storagegroup_name_f]}, - {"cap_gb": 1, - "num_of_storage_groups": 1, - "volumeId": device_id2, - "volume_identifier": "OS-2", - "wwn": '600012345', - "storageGroupId": [defaultstoragegroup_name, - storagegroup_name_f]}] - - volume_list = [ - {"resultList": {"result": [{"volumeId": device_id}]}}, - {"resultList": {"result": [{"volumeId": device_id2}]}}, - {"resultList": {"result": [{"volumeId": device_id}, - {"volumeId": device_id2}]}}] - - private_vol_details = { - "resultList": { - "result": [{ - "timeFinderInfo": { - "snapVXSession": [ - {"srcSnapshotGenInfo": [ - {"snapshotHeader": { - "snapshotName": "temp-1", - "device": device_id}, - "lnkSnapshotGenInfo": [ - {"targetDevice": device_id2}]}]}, - {"tgtSrcSnapshotGenInfo": { - "snapshotName": "temp-1", - "targetDevice": device_id2, - "sourceDevice": device_id}}], - "snapVXSrc": 'true', - "snapVXTgt": 'true'}}]}} - - workloadtype = {"workloadId": ["OLTP", "OLTP_REP", "DSS", "DSS_REP"]} - slo_details = {"sloId": ["Bronze", "Diamond", "Gold", - "Optimized", "Platinum", "Silver"]} - - # replication - volume_snap_vx = {"snapshotLnks": [], - "snapshotSrcs": [ - {"generation": 0, - "linkedDevices": [ - {"targetDevice": device_id2, - "percentageCopied": 100, - "state": "Copied", - "copy": True, - "defined": True, - "linked": True}], - "snapshotName": '12345', - "state": "Established"}]} - capabilities = {"symmetrixCapability": [{"rdfCapable": True, - "snapVxCapable": True, - "symmetrixId": "0001111111"}, - {"symmetrixId": array, - "snapVxCapable": True, - "rdfCapable": True}]} - group_snap_vx = {"generation": 0, - "isLinked": False, - "numUniqueTracks": 0, - "isRestored": False, - "name": group_snapshot_name, - "numStorageGroupVolumes": 1, - "state": ["Established"], - "timeToLiveExpiryDate": "N/A", - "isExpired": False, - "numSharedTracks": 0, - "timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100", - "numSourceVolumes": 1 - } - group_snap_vx_1 = {"generation": 0, - "isLinked": False, - "numUniqueTracks": 0, - "isRestored": False, - "name": group_snapshot_name, - "numStorageGroupVolumes": 1, - "state": ["Copied"], - "timeToLiveExpiryDate": "N/A", - "isExpired": False, - "numSharedTracks": 0, - "timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100", - "numSourceVolumes": 1, - "linkedStorageGroup": - {"name": target_group_name, - "percentageCopied": 100}, - } - grp_snapvx_links = [{"name": target_group_name, - "percentageCopied": 100}, - {"name": "another-target", - "percentageCopied": 90}] - - rdf_group_list = {"rdfGroupID": [{"rdfgNumber": rdf_group_no, - "label": rdf_group_name}]} - rdf_group_details = {"modes": ["Synchronous"], - "remoteSymmetrix": remote_array, - "label": rdf_group_name, - "type": "Dynamic", - "numDevices": 1, - "remoteRdfgNumber": rdf_group_no, - "rdfgNumber": rdf_group_no} - rdf_group_vol_details = {"remoteRdfGroupNumber": rdf_group_no, - "localSymmetrixId": array, - "volumeConfig": "RDF1+TDEV", - "localRdfGroupNumber": rdf_group_no, - "localVolumeName": device_id, - "rdfpairState": "Synchronized", - "remoteVolumeName": device_id2, - "localVolumeState": "Ready", - "rdfMode": "Synchronous", - "remoteVolumeState": "Write Disabled", - "remoteSymmetrixId": remote_array} - - # system - job_list = [{"status": "SUCCEEDED", - "jobId": "12345", - "result": "created", - "resourceLink": "storagegroup/%s" % storagegroup_name_f}, - {"status": "RUNNING", "jobId": "55555"}, - {"status": "FAILED", "jobId": "09999"}] - symmetrix = {"symmetrixId": array, - "model": "VMAX250F", - "ucode": "5977.1091.1092"} - - headroom = {"headroom": [{"headroomCapacity": 20348.29}]} - - -class FakeLookupService(object): - def get_device_mapping_from_network(self, initiator_wwns, target_wwns): - return VMAXCommonData.device_map - - -class FakeResponse(object): - - def __init__(self, status_code, return_object): - self.status_code = status_code - self.return_object = return_object - - def json(self): - if self.return_object: - return self.return_object - else: - raise ValueError - - -class FakeRequestsSession(object): - - def __init__(self, *args, **kwargs): - self.data = VMAXCommonData() - - def request(self, method, url, params=None, data=None): - return_object = '' - status_code = 200 - if method == 'GET': - status_code, return_object = self._get_request(url, params) - - elif method == 'POST' or method == 'PUT': - status_code, return_object = self._post_or_put(url, data) - - elif method == 'DELETE': - status_code, return_object = self._delete(url) - - elif method == 'TIMEOUT': - raise requests.Timeout - - elif method == 'EXCEPTION': - raise Exception - - return FakeResponse(status_code, return_object) - - def _get_request(self, url, params): - status_code = 200 - return_object = None - if self.data.failed_resource in url: - status_code = 500 - return_object = self.data.job_list[2] - elif 'sloprovisioning' in url: - if 'volume' in url: - return_object = self._sloprovisioning_volume(url, params) - elif 'storagegroup' in url: - return_object = self._sloprovisioning_sg(url) - elif 'maskingview' in url: - return_object = self._sloprovisioning_mv(url) - elif 'portgroup' in url: - return_object = self._sloprovisioning_pg(url) - elif 'director' in url: - return_object = self._sloprovisioning_port(url) - elif 'host' in url: - return_object = self._sloprovisioning_ig(url) - elif 'initiator' in url: - return_object = self._sloprovisioning_initiator(url) - elif 'srp' in url: - return_object = self.data.srp_details - elif 'workloadtype' in url: - return_object = self.data.workloadtype - elif 'compressionCapable' in url: - return_object = self.data.compression_info - else: - return_object = self.data.slo_details - - elif 'replication' in url: - return_object = self._replication(url) - - elif 'system' in url: - return_object = self._system(url) - - elif 'headroom' in url: - return_object = self.data.headroom - - return status_code, return_object - - def _sloprovisioning_volume(self, url, params): - return_object = self.data.volume_list[2] - if '/private' in url: - return_object = self.data.private_vol_details - elif params: - if '1' in params.values(): - return_object = self.data.volume_list[0] - elif '2' in params.values(): - return_object = self.data.volume_list[1] - else: - for vol in self.data.volume_details: - if vol['volumeId'] in url: - return_object = vol - break - return return_object - - def _sloprovisioning_sg(self, url): - return_object = self.data.sg_list - for sg in self.data.sg_details: - if sg['storageGroupId'] in url: - return_object = sg - break - return return_object - - def _sloprovisioning_mv(self, url): - if self.data.masking_view_name_i in url: - return_object = self.data.maskingview[1] - else: - return_object = self.data.maskingview[0] - return return_object - - def _sloprovisioning_pg(self, url): - return_object = None - for pg in self.data.portgroup: - if pg['portGroupId'] in url: - return_object = pg - break - return return_object - - def _sloprovisioning_port(self, url): - return_object = None - for port in self.data.port_list: - if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url: - return_object = port - break - return return_object - - def _sloprovisioning_ig(self, url): - return_object = None - for ig in self.data.inititiatorgroup: - if ig['hostId'] in url: - return_object = ig - break - return return_object - - def _sloprovisioning_initiator(self, url): - return_object = self.data.initiator_list[2] - if self.data.wwpn1 in url: - return_object = self.data.initiator_list[0] - elif self.data.initiator in url: - return_object = self.data.initiator_list[1] - return return_object - - def _replication(self, url): - return_object = None - if 'rdf_group' in url: - if self.data.device_id in url: - return_object = self.data.rdf_group_vol_details - elif self.data.rdf_group_no in url: - return_object = self.data.rdf_group_details - else: - return_object = self.data.rdf_group_list - elif 'storagegroup' in url: - return_object = self._replication_sg(url) - elif 'snapshot' in url: - return_object = self.data.volume_snap_vx - elif 'capabilities' in url: - return_object = self.data.capabilities - return return_object - - def _replication_sg(self, url): - return_object = None - if 'generation' in url: - return_object = self.data.group_snap_vx - elif 'storagegroup' in url: - return_object = self.data.sg_details_rep[0] - return return_object - - def _system(self, url): - return_object = None - if 'job' in url: - for job in self.data.job_list: - if job['jobId'] in url: - return_object = job - break - else: - return_object = self.data.symmetrix - return return_object - - def _post_or_put(self, url, payload): - return_object = self.data.job_list[0] - status_code = 201 - if self.data.failed_resource in url: - status_code = 500 - return_object = self.data.job_list[2] - elif payload: - payload = ast.literal_eval(payload) - if self.data.failed_resource in payload.values(): - status_code = 500 - return_object = self.data.job_list[2] - if payload.get('executionOption'): - status_code = 202 - return status_code, return_object - - def _delete(self, url): - if self.data.failed_resource in url: - status_code = 500 - return_object = self.data.job_list[2] - else: - status_code = 204 - return_object = None - return status_code, return_object - - def session(self): - return FakeRequestsSession() - - -class FakeConfiguration(object): - - def __init__(self, emc_file=None, volume_backend_name=None, - interval=0, retries=0, replication_device=None): - self.cinder_dell_emc_config_file = emc_file - self.interval = interval - self.retries = retries - self.volume_backend_name = volume_backend_name - self.config_group = volume_backend_name - if replication_device: - self.replication_device = [replication_device] - - def safe_get(self, key): - try: - return getattr(self, key) - except Exception: - return None - - def append_config_values(self, values): - pass - - -class FakeXML(object): - - def __init__(self): - """""" - self.tempdir = tempfile.mkdtemp() - self.data = VMAXCommonData() - - def create_fake_config_file(self, config_group, portgroup, - ssl_verify=False): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - doc = self.add_array_info(doc, emc, portgroup, ssl_verify) - filename = 'cinder_dell_emc_config_%s.xml' % config_group - config_file_path = self.tempdir + '/' + filename - - f = open(config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def add_array_info(self, doc, emc, portgroup_name, ssl_verify): - array = doc.createElement("Array") - arraytext = doc.createTextNode(self.data.array) - emc.appendChild(array) - array.appendChild(arraytext) - - ecomserverip = doc.createElement("RestServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("RestServerPort") - ecomserverporttext = doc.createTextNode("8443") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("RestUserName") - ecomusernametext = doc.createTextNode("smc") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("RestPassword") - ecompasswordtext = doc.createTextNode("smc") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(portgroup_name) - portgroup.appendChild(portgrouptext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - srp = doc.createElement("SRP") - srptext = doc.createTextNode("SRP_1") - emc.appendChild(srp) - srp.appendChild(srptext) - - if ssl_verify: - restcert = doc.createElement("SSLCert") - restcerttext = doc.createTextNode("/path/cert.crt") - emc.appendChild(restcert) - restcert.appendChild(restcerttext) - - restverify = doc.createElement("SSLVerify") - restverifytext = doc.createTextNode("/path/cert.pem") - emc.appendChild(restverify) - restverify.appendChild(restverifytext) - return doc - - -class VMAXUtilsTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXUtilsTest, self).setUp() - config_group = 'UtilsTests' - fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_i, True) - configuration = FakeConfiguration(fake_xml, config_group) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.utils = self.common.utils - - def test_get_volumetype_extra_specs(self): - with mock.patch.object(volume_types, 'get_volume_type_extra_specs', - return_value={'specs'}) as type_mock: - # path 1: volume_type_id not passed in - self.data.test_volume.volume_type_id = ( - self.data.test_volume_type.id) - self.utils.get_volumetype_extra_specs(self.data.test_volume) - volume_types.get_volume_type_extra_specs.assert_called_once_with( - self.data.test_volume_type.id) - type_mock.reset_mock() - # path 2: volume_type_id passed in - self.utils.get_volumetype_extra_specs(self.data.test_volume, '123') - volume_types.get_volume_type_extra_specs.assert_called_once_with( - '123') - type_mock.reset_mock() - # path 3: no type_id - self.utils.get_volumetype_extra_specs(self.data.test_clone_volume) - (volume_types.get_volume_type_extra_specs. - assert_not_called()) - - def test_get_volumetype_extra_specs_exception(self): - extra_specs = self.utils.get_volumetype_extra_specs( - {'name': 'no_type_id'}) - self.assertEqual({}, extra_specs) - - def test_get_random_portgroup(self): - # 4 portgroups - data = ("\n\n" - "" - "OS-PG1\n" - "OS-PG2\n" - "OS-PG3\n" - "OS-PG4\n" - "" - "") - dom = minidom.parseString(data) - portgroup = self.utils._get_random_portgroup(dom) - self.assertIn('OS-PG', portgroup) - - # Duplicate portgroups - data = ("\n\n" - "" - "OS-PG1\n" - "OS-PG1\n" - "OS-PG1\n" - "OS-PG2\n" - "" - "") - dom = minidom.parseString(data) - portgroup = self.utils._get_random_portgroup(dom) - self.assertIn('OS-PG', portgroup) - - def test_get_random_portgroup_none(self): - # Missing PortGroup tag - data = ("\n\n" - "") - dom = minidom.parseString(data) - self.assertIsNone(self.utils._get_random_portgroup(dom)) - - # Missing portgroups - data = ("\n\n" - "" - "" - "") - dom = minidom.parseString(data) - self.assertIsNone(self.utils._get_random_portgroup(dom)) - - def test_get_host_short_name(self): - host_under_16_chars = 'host_13_chars' - host1 = self.utils.get_host_short_name( - host_under_16_chars) - self.assertEqual(host_under_16_chars, host1) - - host_over_16_chars = ( - 'host_over_16_chars_host_over_16_chars_host_over_16_chars') - # Check that the same md5 value is retrieved from multiple calls - host2 = self.utils.get_host_short_name( - host_over_16_chars) - host3 = self.utils.get_host_short_name( - host_over_16_chars) - self.assertEqual(host2, host3) - host_with_period = 'hostname.with.many.parts' - ref_host_name = self.utils.generate_unique_trunc_host('hostname') - host4 = self.utils.get_host_short_name(host_with_period) - self.assertEqual(ref_host_name, host4) - - def test_get_volume_element_name(self): - volume_id = 'ea95aa39-080b-4f11-9856-a03acf9112ad' - volume_element_name = self.utils.get_volume_element_name(volume_id) - expect_vol_element_name = ('OS-' + volume_id) - self.assertEqual(expect_vol_element_name, volume_element_name) - - def test_parse_file_to_get_array_map(self): - kwargs = ( - {'RestServerIp': '1.1.1.1', - 'RestServerPort': '8443', - 'RestUserName': 'smc', - 'RestPassword': 'smc', - 'SSLCert': '/path/cert.crt', - 'SSLVerify': '/path/cert.pem', - 'SerialNumber': self.data.array, - 'srpName': 'SRP_1', - 'PortGroup': self.data.port_group_name_i}) - array_info = self.utils.parse_file_to_get_array_map( - self.common.configuration.cinder_dell_emc_config_file) - self.assertEqual(kwargs, array_info) - - @mock.patch.object(utils.VMAXUtils, - '_get_connection_info') - @mock.patch.object(utils.VMAXUtils, - '_get_random_portgroup') - def test_parse_file_to_get_array_map_errors(self, mock_port, mock_conn): - tempdir = tempfile.mkdtemp() - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml' - config_file_path = tempdir + '/' + filename - f = open(config_file_path, 'w') - doc.writexml(f) - f.close() - array_info = self.utils.parse_file_to_get_array_map( - config_file_path) - self.assertIsNone(array_info['SerialNumber']) - - def test_parse_file_to_get_array_map_conn_errors(self): - tempdir = tempfile.mkdtemp() - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml' - config_file_path = tempdir + '/' + filename - f = open(config_file_path, 'w') - doc.writexml(f) - f.close() - self.assertRaises(exception.VolumeBackendAPIException, - self.utils.parse_file_to_get_array_map, - config_file_path) - - def test_truncate_string(self): - # string is less than max number - str_to_truncate = 'string' - response = self.utils.truncate_string(str_to_truncate, 10) - self.assertEqual(str_to_truncate, response) - - def test_get_default_oversubscription_ratio(self): - default_ratio = 20.0 - max_over_sub_ratio1 = 30.0 - returned_max = self.utils.get_default_oversubscription_ratio( - max_over_sub_ratio1) - self.assertEqual(max_over_sub_ratio1, returned_max) - max_over_sub_ratio2 = 0.5 - returned_max = self.utils.get_default_oversubscription_ratio( - max_over_sub_ratio2) - self.assertEqual(default_ratio, returned_max) - - def test_get_default_storage_group_name_slo_workload(self): - srp_name = self.data.srp - slo = self.data.slo - workload = self.data.workload - sg_name = self.utils.get_default_storage_group_name( - srp_name, slo, workload) - self.assertEqual(self.data.defaultstoragegroup_name, sg_name) - - def test_get_default_storage_group_name_no_slo(self): - srp_name = self.data.srp - slo = None - workload = None - sg_name = self.utils.get_default_storage_group_name( - srp_name, slo, workload) - self.assertEqual(self.data.default_sg_no_slo, sg_name) - - def test_get_default_storage_group_name_compr_disabled(self): - srp_name = self.data.srp - slo = self.data.slo - workload = self.data.workload - sg_name = self.utils.get_default_storage_group_name( - srp_name, slo, workload, True) - self.assertEqual(self.data.default_sg_compr_disabled, sg_name) - - def test_get_time_delta(self): - start_time = 1487781721.09 - end_time = 1487781758.16 - delta = end_time - start_time - ref_delta = six.text_type(datetime.timedelta(seconds=int(delta))) - time_delta = self.utils.get_time_delta(start_time, end_time) - self.assertEqual(ref_delta, time_delta) - - def test_get_short_protocol_type(self): - # iscsi - short_i_protocol = self.utils.get_short_protocol_type('iscsi') - self.assertEqual('I', short_i_protocol) - # fc - short_f_protocol = self.utils.get_short_protocol_type('FC') - self.assertEqual('F', short_f_protocol) - # else - other_protocol = self.utils.get_short_protocol_type('OTHER') - self.assertEqual('OTHER', other_protocol) - - def test_get_temp_snap_name(self): - clone_name = "12345" - source_device_id = self.data.device_id - ref_name = "temp-00001-12345" - snap_name = self.utils.get_temp_snap_name( - clone_name, source_device_id) - self.assertEqual(ref_name, snap_name) - - def test_get_array_and_device_id(self): - volume = deepcopy(self.data.test_volume) - external_ref = {u'source-name': u'00002'} - array, device_id = self.utils.get_array_and_device_id( - volume, external_ref) - self.assertEqual(self.data.array, array) - self.assertEqual('00002', device_id) - - def test_get_array_and_device_id_exception(self): - volume = deepcopy(self.data.test_volume) - external_ref = {u'source-name': None} - self.assertRaises(exception.VolumeBackendAPIException, - self.utils.get_array_and_device_id, - volume, external_ref) - - def test_get_pg_short_name(self): - pg_under_12_chars = 'pg_11_chars' - pg1 = self.utils.get_pg_short_name(pg_under_12_chars) - self.assertEqual(pg_under_12_chars, pg1) - - pg_over_12_chars = 'portgroup_over_12_characters' - # Check that the same md5 value is retrieved from multiple calls - pg2 = self.utils.get_pg_short_name(pg_over_12_chars) - pg3 = self.utils.get_pg_short_name(pg_over_12_chars) - self.assertEqual(pg2, pg3) - - def test_is_compression_disabled_true(self): - extra_specs = self.data.extra_specs_disable_compression - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - self.assertTrue(do_disable_compression) - - def test_is_compression_disabled_false(self): - # Path 1: no compression extra spec set - extra_specs = self.data.extra_specs - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - self.assertFalse(do_disable_compression) - # Path 2: compression extra spec set to false - extra_specs2 = deepcopy(extra_specs) - extra_specs2.update({utils.DISABLECOMPRESSION: 'false'}) - do_disable_compression2 = self.utils.is_compression_disabled( - extra_specs) - self.assertFalse(do_disable_compression2) - - def test_change_compression_type_true(self): - source_compr_disabled_true = 'true' - new_type_compr_disabled = { - 'extra_specs': {utils.DISABLECOMPRESSION: 'no'}} - ans = self.utils.change_compression_type( - source_compr_disabled_true, new_type_compr_disabled) - self.assertTrue(ans) - - def test_change_compression_type_false(self): - source_compr_disabled_true = True - new_type_compr_disabled = { - 'extra_specs': {utils.DISABLECOMPRESSION: 'true'}} - ans = self.utils.change_compression_type( - source_compr_disabled_true, new_type_compr_disabled) - self.assertFalse(ans) - - def test_is_replication_enabled(self): - is_re = self.utils.is_replication_enabled( - self.data.vol_type_extra_specs_rep_enabled) - self.assertTrue(is_re) - is_re2 = self.utils.is_replication_enabled(self.data.extra_specs) - self.assertFalse(is_re2) - - def test_get_replication_config(self): - # Success, allow_extend false - rep_device_list1 = [{'target_device_id': self.data.remote_array, - 'remote_pool': self.data.srp, - 'remote_port_group': self.data.port_group_name_f, - 'rdf_group_label': self.data.rdf_group_name}] - rep_config1 = self.utils.get_replication_config(rep_device_list1) - self.assertEqual(self.data.remote_array, rep_config1['array']) - # Success, allow_extend true - rep_device_list2 = [{'target_device_id': self.data.remote_array, - 'remote_pool': self.data.srp, - 'rdf_group_label': self.data.rdf_group_name, - 'remote_port_group': self.data.port_group_name_f, - 'allow_extend': 'true'}] - rep_config2 = self.utils.get_replication_config(rep_device_list2) - self.assertTrue(rep_config2['allow_extend']) - # No rep_device_list - rep_device_list3 = [] - rep_config3 = self.utils.get_replication_config(rep_device_list3) - self.assertIsNone(rep_config3) - # Exception - rep_device_list4 = [{'target_device_id': self.data.remote_array, - 'remote_pool': self.data.srp}] - self.assertRaises(exception.VolumeBackendAPIException, - self.utils.get_replication_config, rep_device_list4) - - def test_is_volume_failed_over(self): - vol = deepcopy(self.data.test_volume) - vol.replication_status = fields.ReplicationStatus.FAILED_OVER - is_fo1 = self.utils.is_volume_failed_over(vol) - self.assertTrue(is_fo1) - is_fo2 = self.utils.is_volume_failed_over(self.data.test_volume) - self.assertFalse(is_fo2) - is_fo3 = self.utils.is_volume_failed_over(None) - self.assertFalse(is_fo3) - - def test_update_volume_group_name(self): - group = self.data.test_group_1 - ref_group_name = self.data.test_vol_grp_name - vol_grp_name = self.utils.update_volume_group_name(group) - self.assertEqual(ref_group_name, vol_grp_name) - - def test_update_volume_group_name_id_only(self): - group = self.data.test_group_without_name - ref_group_name = self.data.test_vol_grp_name_id_only - vol_grp_name = self.utils.update_volume_group_name(group) - self.assertEqual(ref_group_name, vol_grp_name) - - def test_update_admin_metadata(self): - admin_metadata = {'targetVolumeName': '123456'} - ref_model_update = [{'id': '12345', - 'admin_metadata': admin_metadata}] - volume_model_update = {'id': '12345'} - volumes_model_update = [volume_model_update] - key = 'targetVolumeName' - values = {} - values['12345'] = '123456' - self.utils.update_admin_metadata( - volumes_model_update, key, values) - self.assertEqual(ref_model_update, volumes_model_update) - - def test_get_volume_group_utils(self): - group = self.data.test_group_1 - array, extraspecs_dict = self.utils.get_volume_group_utils( - group, interval=1, retries=1) - ref_array = self.data.array - self.assertEqual(ref_array, array) - - def test_update_extra_specs_list(self): - extra_specs = self.data.extra_specs - volume_type_id = 'abc' - extraspecs_dict = self.utils._update_extra_specs_list( - extra_specs, volume_type_id, interval=1, retries=1) - self.assertEqual(extra_specs, extraspecs_dict['extra_specs']) - - def test_update_intervals_and_retries(self): - extra_specs = self.data.extra_specs - ref_interval = 1 - extraspecs = self.utils._update_intervals_and_retries( - extra_specs, interval=1, retries=1) - self.assertEqual(ref_interval, extraspecs['interval']) - - def test_get_intervals_retries_dict(self): - ref_value = {'interval': 1, 'retries': 1} - ret_dict = self.utils.get_intervals_retries_dict( - interval=1, retries=1) - self.assertEqual(ref_value, ret_dict) - - def test_update_volume_model_updates(self): - volume_model_updates = [{'id': '1', 'status': 'available'}] - volumes = [self.data.test_volume] - ref_val = {'id': self.data.test_volume.id, - 'status': 'error_deleting'} - ret_val = self.utils.update_volume_model_updates( - volume_model_updates, volumes, 'abc', status='error_deleting') - self.assertEqual(ref_val, ret_val[1]) - - def test_update_volume_model_updates_empty_update_list(self): - volume_model_updates = [] - volumes = [self.data.test_volume] - ref_val = [{'id': self.data.test_volume.id, - 'status': 'available'}] - ret_val = self.utils.update_volume_model_updates( - volume_model_updates, volumes, 'abc') - self.assertEqual(ref_val, ret_val) - - def test_update_volume_model_updates_empty_vol_list(self): - volume_model_updates = [] - volumes = [] - ref_val = [] - ret_val = self.utils.update_volume_model_updates( - volume_model_updates, volumes, 'abc') - self.assertEqual(ref_val, ret_val) - - -class VMAXRestTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXRestTest, self).setUp() - config_group = 'RestTests' - fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_f) - configuration = FakeConfiguration(fake_xml, config_group) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = fc.VMAXFCDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.rest = self.common.rest - self.utils = self.common.utils - - def test_rest_request_exception(self): - sc, msg = self.rest.request('/fake_url', 'TIMEOUT') - self.assertIsNone(sc) - self.assertIsNone(msg) - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.request, '', 'EXCEPTION') - - def test_wait_for_job_complete(self): - rc, job, status, task = self.rest.wait_for_job_complete( - {'status': 'created', 'jobId': '12345'}, self.data.extra_specs) - self.assertEqual(0, rc) - - def test_wait_for_job_complete_failed(self): - with mock.patch.object(self.rest, '_is_job_finished', - side_effect=exception.BadHTTPResponseStatus): - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.wait_for_job_complete, - self.data.job_list[0], self.data.extra_specs) - - def test_is_job_finished_false(self): - job_id = "55555" - complete, response, rc, status, task = self.rest._is_job_finished( - job_id) - self.assertFalse(complete) - - def test_is_job_finished_failed(self): - job_id = "55555" - complete, response, rc, status, task = self.rest._is_job_finished( - job_id) - self.assertFalse(complete) - with mock.patch.object(self.rest, 'request', - return_value=(200, {'status': 'FAILED'})): - complete, response, rc, status, task = ( - self.rest._is_job_finished(job_id)) - self.assertTrue(complete) - self.assertEqual(-1, rc) - - def test_check_status_code_success(self): - status_code = 200 - self.rest.check_status_code_success( - 'test success', status_code, "") - - def test_check_status_code_not_success(self): - status_code = 500 - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.check_status_code_success, - 'test exception', status_code, "") - - def test_wait_for_job_success(self): - operation = 'test' - status_code = 202 - job = self.data.job_list[0] - extra_specs = self.data.extra_specs - self.rest.wait_for_job( - operation, status_code, job, extra_specs) - - def test_wait_for_job_failed(self): - operation = 'test' - status_code = 202 - job = self.data.job_list[2] - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'wait_for_job_complete', - return_value=(-1, '', '', '')): - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.wait_for_job, - operation, status_code, job, extra_specs) - - def test_get_resource_present(self): - array = self.data.array - category = 'sloprovisioning' - resource_type = 'storagegroup' - resource = self.rest.get_resource(array, category, resource_type) - self.assertEqual(self.data.sg_list, resource) - - def test_get_resource_not_present(self): - array = self.data.array - category = 'sloprovisioning' - resource_type = self.data.failed_resource - resource = self.rest.get_resource(array, category, resource_type) - self.assertIsNone(resource) - - def test_create_resource_success(self): - array = self.data.array - category = '' - resource_type = '' - payload = {'someKey': 'someValue'} - status_code, message = self.rest.create_resource( - array, category, resource_type, payload) - self.assertEqual(self.data.job_list[0], message) - - def test_create_resource_failed(self): - array = self.data.array - category = '' - resource_type = '' - payload = {'someKey': self.data.failed_resource} - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.create_resource, array, category, - resource_type, payload) - - def test_modify_resource(self): - array = self.data.array - category = '' - resource_type = '' - payload = {'someKey': 'someValue'} - status_code, message = self.rest.modify_resource( - array, category, resource_type, payload) - self.assertEqual(self.data.job_list[0], message) - - def test_modify_resource_failed(self): - array = self.data.array - category = '' - resource_type = '' - payload = {'someKey': self.data.failed_resource} - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.modify_resource, array, category, - resource_type, payload) - - def test_delete_resource(self): - operation = 'delete res resource' - status_code = 204 - message = None - array = self.data.array - category = 'cat' - resource_type = 'res' - resource_name = 'name' - with mock.patch.object(self.rest, 'check_status_code_success'): - self.rest.delete_resource( - array, category, resource_type, resource_name) - self.rest.check_status_code_success.assert_called_with( - operation, status_code, message) - - def test_delete_resource_failed(self): - array = self.data.array - category = self.data.failed_resource - resource_type = self.data.failed_resource - resource_name = self.data.failed_resource - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.modify_resource, array, category, - resource_type, resource_name) - - def test_get_array_serial(self): - ref_details = self.data.symmetrix - array_details = self.rest.get_array_serial(self.data.array) - self.assertEqual(ref_details, array_details) - - def test_get_array_serial_failed(self): - array_details = self.rest.get_array_serial(self.data.failed_resource) - self.assertIsNone(array_details) - - def test_get_srp_by_name(self): - ref_details = self.data.srp_details - srp_details = self.rest.get_srp_by_name( - self.data.array, self.data.srp) - self.assertEqual(ref_details, srp_details) - - def test_get_slo_list(self): - ref_settings = self.data.slo_details['sloId'] - slo_settings = self.rest.get_slo_list(self.data.array) - self.assertEqual(ref_settings, slo_settings) - - def test_get_workload_settings(self): - ref_settings = self.data.workloadtype['workloadId'] - wl_settings = self.rest.get_workload_settings( - self.data.array) - self.assertEqual(ref_settings, wl_settings) - - def test_get_workload_settings_failed(self): - wl_settings = self.rest.get_workload_settings( - self.data.failed_resource) - self.assertFalse(wl_settings) - - def test_get_headroom_capacity(self): - ref_headroom = self.data.headroom['headroom'][0]['headroomCapacity'] - headroom_cap = self.rest.get_headroom_capacity( - self.data.array, self.data.srp, - self.data.slo, self.data.workload) - self.assertEqual(ref_headroom, headroom_cap) - - def test_get_headroom_capacity_failed(self): - headroom_cap = self.rest.get_headroom_capacity( - self.data.failed_resource, self.data.srp, - self.data.slo, self.data.workload) - self.assertIsNone(headroom_cap) - - def test_is_compression_capable_true(self): - compr_capable = self.rest.is_compression_capable('000197800128') - self.assertTrue(compr_capable) - - def test_is_compression_capable_false(self): - compr_capable = self.rest.is_compression_capable(self.data.array) - self.assertFalse(compr_capable) - with mock.patch.object(self.rest, 'request', return_value=(200, {})): - compr_capable = self.rest.is_compression_capable(self.data.array) - self.assertFalse(compr_capable) - - def test_get_storage_group(self): - ref_details = self.data.sg_details[0] - sg_details = self.rest.get_storage_group( - self.data.array, self.data.defaultstoragegroup_name) - self.assertEqual(ref_details, sg_details) - - def test_get_storage_group_list(self): - ref_details = self.data.sg_list['storageGroupId'] - sg_list = self.rest.get_storage_group_list( - self.data.array, {}) - self.assertEqual(ref_details, sg_list) - - def test_get_storage_group_list_none(self): - with mock.patch.object(self.rest, 'get_resource', return_value=None): - sg_list = self.rest.get_storage_group_list( - self.data.array, {}) - self.assertFalse(sg_list) - - def test_create_storage_group(self): - with mock.patch.object(self.rest, 'create_resource'): - payload = {'someKey': 'someValue'} - self.rest._create_storagegroup(self.data.array, payload) - self.rest.create_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'storagegroup', payload) - - def test_create_storage_group_success(self): - sg_name = self.rest.create_storage_group( - self.data.array, self.data.storagegroup_name_f, self.data.srp, - self.data.slo, self.data.workload, self.data.extra_specs) - self.assertEqual(self.data.storagegroup_name_f, sg_name) - - def test_create_storage_group_failed(self): - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.create_storage_group, self.data.array, - self.data.failed_resource, self.data.srp, self.data.slo, - self.data.workload, self.data.extra_specs) - - def test_create_storage_group_no_slo(self): - sg_name = self.rest.create_storage_group( - self.data.array, self.data.default_sg_no_slo, self.data.srp, - None, None, self.data.extra_specs) - self.assertEqual(self.data.default_sg_no_slo, sg_name) - - def test_create_storage_group_compression_disabled(self): - with mock.patch.object(self.rest, '_create_storagegroup', - return_value=(200, self.data.job_list[0])): - self.rest.create_storage_group( - self.data.array, self.data.default_sg_compr_disabled, - self.data.srp, self.data.slo, self.data.workload, - self.data.extra_specs, True) - payload = {"srpId": self.data.srp, - "storageGroupId": self.data.default_sg_compr_disabled, - "emulation": "FBA", - "sloBasedStorageGroupParam": [ - {"num_of_vols": 0, - "sloId": self.data.slo, - "workloadSelection": self.data.workload, - "volumeAttribute": { - "volume_size": "0", - "capacityUnit": "GB"}, - "noCompression": "true"}]} - self.rest._create_storagegroup.assert_called_once_with( - self.data.array, payload) - - def test_modify_storage_group(self): - array = self.data.array - storagegroup = self.data.defaultstoragegroup_name - payload = {'someKey': 'someValue'} - version = self.data.u4v_version - with mock.patch.object(self.rest, 'modify_resource'): - self.rest.modify_storage_group(array, storagegroup, payload) - self.rest.modify_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'storagegroup', - payload, version, resource_name=storagegroup) - - def test_create_volume_from_sg_success(self): - volume_name = self.data.volume_details[0]['volume_identifier'] - ref_dict = self.data.provider_location - volume_dict = self.rest.create_volume_from_sg( - self.data.array, volume_name, self.data.defaultstoragegroup_name, - self.data.test_volume.size, self.data.extra_specs) - self.assertEqual(ref_dict, volume_dict) - - def test_create_volume_from_sg_failed(self): - volume_name = self.data.volume_details[0]['volume_identifier'] - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.create_volume_from_sg, self.data.array, - volume_name, self.data.failed_resource, - self.data.test_volume.size, self.data.extra_specs) - - def test_create_volume_from_sg_cannot_retrieve_device_id(self): - with mock.patch.object(self.rest, 'find_volume_device_id', - return_value=None): - volume_name = self.data.volume_details[0]['volume_identifier'] - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.create_volume_from_sg, self.data.array, - volume_name, self.data.failed_resource, - self.data.test_volume.size, self.data.extra_specs) - - def test_add_vol_to_sg_success(self): - operation = 'Add volume to sg' - status_code = 202 - message = self.data.job_list[0] - with mock.patch.object(self.rest, 'wait_for_job'): - device_id = self.data.device_id - self.rest.add_vol_to_sg( - self.data.array, self.data.storagegroup_name_f, device_id, - self.data.extra_specs) - self.rest.wait_for_job.assert_called_with( - operation, status_code, message, self.data.extra_specs) - - def test_add_vol_to_sg_failed(self): - device_id = [self.data.device_id] - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.add_vol_to_sg, self.data.array, - self.data.failed_resource, device_id, - self.data.extra_specs) - - def test_remove_vol_from_sg_success(self): - operation = 'Remove vol from sg' - status_code = 202 - message = self.data.job_list[0] - with mock.patch.object(self.rest, 'wait_for_job'): - device_id = self.data.device_id - self.rest.remove_vol_from_sg( - self.data.array, self.data.storagegroup_name_f, device_id, - self.data.extra_specs) - self.rest.wait_for_job.assert_called_with( - operation, status_code, message, self.data.extra_specs) - - @mock.patch.object(time, 'sleep') - def test_remove_vol_from_sg_failed(self, mock_sleep): - device_id = [self.data.volume_details[0]['volumeId']] - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest.remove_vol_from_sg, self.data.array, - self.data.failed_resource, device_id, - self.data.extra_specs) - - def test_get_vmax_default_storage_group(self): - ref_storage_group = self.data.sg_details[0] - ref_sg_name = self.data.defaultstoragegroup_name - storagegroup, storagegroup_name = ( - self.rest.get_vmax_default_storage_group( - self.data.array, self.data.srp, - self.data.slo, self.data.workload)) - self.assertEqual(ref_sg_name, storagegroup_name) - self.assertEqual(ref_storage_group, storagegroup) - - def test_delete_storage_group(self): - operation = 'delete storagegroup resource' - status_code = 204 - message = None - with mock.patch.object(self.rest, 'check_status_code_success'): - self.rest.delete_storage_group( - self.data.array, self.data.storagegroup_name_f) - self.rest.check_status_code_success.assert_called_with( - operation, status_code, message) - - def test_is_child_sg_in_parent_sg(self): - is_child1 = self.rest.is_child_sg_in_parent_sg( - self.data.array, self.data.storagegroup_name_f, - self.data.parent_sg_f) - is_child2 = self.rest.is_child_sg_in_parent_sg( - self.data.array, self.data.defaultstoragegroup_name, - self.data.parent_sg_f) - self.assertTrue(is_child1) - self.assertFalse(is_child2) - - def test_add_child_sg_to_parent_sg(self): - payload = {"editStorageGroupActionParam": { - "expandStorageGroupParam": { - "addExistingStorageGroupParam": { - "storageGroupId": [self.data.storagegroup_name_f]}}}} - with mock.patch.object(self.rest, 'modify_storage_group', - return_value=(202, self.data.job_list[0])): - self.rest.add_child_sg_to_parent_sg( - self.data.array, self.data.storagegroup_name_f, - self.data.parent_sg_f, self.data.extra_specs) - self.rest.modify_storage_group.assert_called_once_with( - self.data.array, self.data.parent_sg_f, payload) - - def test_remove_child_sg_from_parent_sg(self): - payload = {"editStorageGroupActionParam": { - "removeStorageGroupParam": { - "storageGroupId": [self.data.storagegroup_name_f], - "force": 'true'}}} - with mock.patch.object(self.rest, 'modify_storage_group', - return_value=(202, self.data.job_list[0])): - self.rest.remove_child_sg_from_parent_sg( - self.data.array, self.data.storagegroup_name_f, - self.data.parent_sg_f, self.data.extra_specs) - self.rest.modify_storage_group.assert_called_once_with( - self.data.array, self.data.parent_sg_f, payload) - - def test_get_volume_list(self): - ref_volumes = [self.data.device_id, self.data.device_id2] - volumes = self.rest.get_volume_list(self.data.array, {}) - self.assertEqual(ref_volumes, volumes) - - def test_get_volume(self): - ref_volumes = self.data.volume_details[0] - device_id = self.data.device_id - volumes = self.rest.get_volume(self.data.array, device_id) - self.assertEqual(ref_volumes, volumes) - - def test_get_private_volume(self): - device_id = self.data.device_id - ref_volume = self.data.private_vol_details['resultList']['result'][0] - volume = self.rest._get_private_volume(self.data.array, device_id) - self.assertEqual(ref_volume, volume) - - def test_get_private_volume_exception(self): - device_id = self.data.device_id - with mock.patch.object(self.rest, 'get_resource', - return_value={}): - self.assertRaises(exception.VolumeBackendAPIException, - self.rest._get_private_volume, - self.data.array, device_id) - - def test_modify_volume_success(self): - array = self.data.array - device_id = self.data.device_id - payload = {'someKey': 'someValue'} - with mock.patch.object(self.rest, 'modify_resource'): - self.rest._modify_volume(array, device_id, payload) - self.rest.modify_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'volume', - payload, resource_name=device_id) - - def test_modify_volume_failed(self): - payload = {'someKey': self.data.failed_resource} - device_id = self.data.device_id - self.assertRaises( - exception.VolumeBackendAPIException, - self.rest._modify_volume, self.data.array, - device_id, payload) - - def test_extend_volume(self): - device_id = self.data.device_id - new_size = '3' - extend_vol_payload = {"executionOption": "ASYNCHRONOUS", - "editVolumeActionParam": { - "expandVolumeParam": { - "volumeAttribute": { - "volume_size": new_size, - "capacityUnit": "GB"}}}} - with mock.patch.object(self.rest, '_modify_volume', - return_value=(202, self.data.job_list[0])): - self.rest.extend_volume(self.data.array, device_id, new_size, - self.data.extra_specs) - self.rest._modify_volume.assert_called_once_with( - self.data.array, device_id, extend_vol_payload) - - def test_delete_volume(self): - device_id = self.data.device_id - deallocate_vol_payload = {"editVolumeActionParam": { - "freeVolumeParam": {"free_volume": 'true'}}} - with mock.patch.object(self.rest, 'delete_resource'): - with mock.patch.object(self.rest, '_modify_volume'): - self.rest.delete_volume(self.data.array, device_id) - self.rest._modify_volume.assert_called_once_with( - self.data.array, device_id, deallocate_vol_payload) - self.rest.delete_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'volume', device_id) - - def test_rename_volume(self): - device_id = self.data.device_id - payload = {"editVolumeActionParam": { - "modifyVolumeIdentifierParam": { - "volumeIdentifier": { - "identifier_name": 'new_name', - "volumeIdentifierChoice": "identifier_name"}}}} - with mock.patch.object(self.rest, '_modify_volume'): - self.rest.rename_volume(self.data.array, device_id, 'new_name') - self.rest._modify_volume.assert_called_once_with( - self.data.array, device_id, payload) - - def test_find_mv_connections_for_vol(self): - device_id = self.data.device_id - ref_lun_id = int((self.data.maskingview[0]['maskingViewConnection'] - [0]['host_lun_address']), 16) - host_lun_id = self.rest.find_mv_connections_for_vol( - self.data.array, self.data.masking_view_name_f, device_id) - self.assertEqual(ref_lun_id, host_lun_id) - - def test_find_mv_connections_for_vol_failed(self): - # no masking view info retrieved - device_id = self.data.volume_details[0]['volumeId'] - host_lun_id = self.rest.find_mv_connections_for_vol( - self.data.array, self.data.failed_resource, device_id) - self.assertIsNone(host_lun_id) - # no connection info received - with mock.patch.object(self.rest, 'get_resource', - return_value={'no_conn': 'no_info'}): - host_lun_id2 = self.rest.find_mv_connections_for_vol( - self.data.array, self.data.masking_view_name_f, device_id) - self.assertIsNone(host_lun_id2) - - def test_get_storage_groups_from_volume(self): - array = self.data.array - device_id = self.data.device_id - ref_list = self.data.volume_details[0]['storageGroupId'] - sg_list = self.rest.get_storage_groups_from_volume(array, device_id) - self.assertEqual(ref_list, sg_list) - - def test_get_num_vols_in_sg(self): - num_vol = self.rest.get_num_vols_in_sg( - self.data.array, self.data.defaultstoragegroup_name) - self.assertEqual(2, num_vol) - - def test_get_num_vols_in_sg_no_num(self): - with mock.patch.object(self.rest, 'get_storage_group', - return_value={}): - num_vol = self.rest.get_num_vols_in_sg( - self.data.array, self.data.defaultstoragegroup_name) - self.assertEqual(0, num_vol) - - def test_is_volume_in_storagegroup(self): - # True - array = self.data.array - device_id = self.data.device_id - storagegroup = self.data.defaultstoragegroup_name - is_vol1 = self.rest.is_volume_in_storagegroup( - array, device_id, storagegroup) - # False - with mock.patch.object(self.rest, 'get_storage_groups_from_volume', - return_value=[]): - is_vol2 = self.rest.is_volume_in_storagegroup( - array, device_id, storagegroup) - self.assertTrue(is_vol1) - self.assertFalse(is_vol2) - - def test_find_volume_device_number(self): - array = self.data.array - volume_name = self.data.volume_details[0]['volume_identifier'] - ref_device = self.data.device_id - device_number = self.rest.find_volume_device_id(array, volume_name) - self.assertEqual(ref_device, device_number) - - def test_find_volume_device_number_failed(self): - array = self.data.array - with mock.patch.object(self.rest, 'get_volume_list', - return_value=[]): - device_number = self.rest.find_volume_device_id( - array, 'name') - self.assertIsNone(device_number) - - def test_get_volume_success(self): - array = self.data.array - device_id = self.data.device_id - ref_volume = self.data.volume_details[0] - volume = self.rest.get_volume(array, device_id) - self.assertEqual(ref_volume, volume) - - def test_get_volume_failed(self): - array = self.data.array - device_id = self.data.failed_resource - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.get_volume, - array, device_id) - - def test_find_volume_identifier(self): - array = self.data.array - device_id = self.data.device_id - ref_name = self.data.volume_details[0]['volume_identifier'] - vol_name = self.rest.find_volume_identifier(array, device_id) - self.assertEqual(ref_name, vol_name) - - def test_get_volume_size(self): - array = self.data.array - device_id = self.data.device_id - ref_size = self.data.test_volume.size - size = self.rest.get_size_of_device_on_array(array, device_id) - self.assertEqual(ref_size, size) - - def test_get_volume_size_exception(self): - array = self.data.array - device_id = self.data.device_id - with mock.patch.object(self.rest, 'get_volume', - return_value=None): - size = self.rest.get_size_of_device_on_array( - array, device_id) - self.assertIsNone(size) - - def test_get_portgroup(self): - array = self.data.array - pg_name = self.data.port_group_name_f - ref_pg = self.data.portgroup[0] - portgroup = self.rest.get_portgroup(array, pg_name) - self.assertEqual(ref_pg, portgroup) - - def test_get_port_ids(self): - array = self.data.array - pg_name = self.data.port_group_name_f - ref_ports = ["FA-1D:4"] - port_ids = self.rest.get_port_ids(array, pg_name) - self.assertEqual(ref_ports, port_ids) - - def test_get_port_ids_no_portgroup(self): - array = self.data.array - pg_name = self.data.port_group_name_f - with mock.patch.object(self.rest, 'get_portgroup', - return_value=None): - port_ids = self.rest.get_port_ids(array, pg_name) - self.assertFalse(port_ids) - - def test_get_port(self): - array = self.data.array - port_id = "FA-1D:4" - ref_port = self.data.port_list[0] - port = self.rest.get_port(array, port_id) - self.assertEqual(ref_port, port) - - def test_get_iscsi_ip_address_and_iqn(self): - array = self.data.array - port_id = "SE-4E:0" - ref_ip = [self.data.ip] - ref_iqn = self.data.initiator - ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( - array, port_id) - self.assertEqual(ref_ip, ip_addresses) - self.assertEqual(ref_iqn, iqn) - - def test_get_iscsi_ip_address_and_iqn_no_port(self): - array = self.data.array - port_id = "SE-4E:0" - with mock.patch.object(self.rest, 'get_port', return_value=None): - ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( - array, port_id) - self.assertIsNone(ip_addresses) - self.assertIsNone(iqn) - - def test_get_target_wwns(self): - array = self.data.array - pg_name = self.data.port_group_name_f - ref_wwns = [self.data.wwnn1] - target_wwns = self.rest.get_target_wwns(array, pg_name) - self.assertEqual(ref_wwns, target_wwns) - - def test_get_target_wwns_failed(self): - array = self.data.array - pg_name = self.data.port_group_name_f - with mock.patch.object(self.rest, 'get_port', - return_value=None): - target_wwns = self.rest.get_target_wwns(array, pg_name) - self.assertFalse(target_wwns) - - def test_get_initiator_group(self): - array = self.data.array - ig_name = self.data.initiatorgroup_name_f - ref_ig = self.data.inititiatorgroup[0] - response_ig = self.rest.get_initiator_group(array, ig_name) - self.assertEqual(ref_ig, response_ig) - - def test_get_initiator(self): - array = self.data.array - initiator_name = self.data.initiator - ref_initiator = self.data.initiator_list[1] - response_initiator = self.rest.get_initiator(array, initiator_name) - self.assertEqual(ref_initiator, response_initiator) - - def test_get_initiator_list(self): - array = self.data.array - with mock.patch.object(self.rest, 'get_resource', - return_value={'initiatorId': '1234'}): - init_list = self.rest.get_initiator_list(array) - self.assertIsNotNone(init_list) - - def test_get_initiator_list_none(self): - array = self.data.array - with mock.patch.object(self.rest, 'get_resource', return_value={}): - init_list = self.rest.get_initiator_list(array) - self.assertFalse(init_list) - - def test_get_in_use_initiator_list_from_array(self): - ref_list = self.data.initiator_list[2]['initiatorId'] - init_list = self.rest.get_in_use_initiator_list_from_array( - self.data.array) - self.assertEqual(ref_list, init_list) - - def test_get_in_use_initiator_list_from_array_failed(self): - array = self.data.array - with mock.patch.object(self.rest, 'get_initiator_list', - return_value=[]): - init_list = self.rest.get_in_use_initiator_list_from_array(array) - self.assertFalse(init_list) - - def test_get_initiator_group_from_initiator(self): - initiator = self.data.wwpn1 - ref_group = self.data.initiatorgroup_name_f - init_group = self.rest.get_initiator_group_from_initiator( - self.data.array, initiator) - self.assertEqual(ref_group, init_group) - - def test_get_initiator_group_from_initiator_failed(self): - initiator = self.data.wwpn1 - with mock.patch.object(self.rest, 'get_initiator', - return_value=None): - init_group = self.rest.get_initiator_group_from_initiator( - self.data.array, initiator) - self.assertIsNone(init_group) - with mock.patch.object(self.rest, 'get_initiator', - return_value={'name': 'no_host'}): - init_group = self.rest.get_initiator_group_from_initiator( - self.data.array, initiator) - self.assertIsNone(init_group) - - def test_create_initiator_group(self): - init_group_name = self.data.initiatorgroup_name_f - init_list = [self.data.wwpn1] - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): - payload = ({"executionOption": "ASYNCHRONOUS", - "hostId": init_group_name, "initiatorId": init_list}) - self.rest.create_initiator_group( - self.data.array, init_group_name, init_list, extra_specs) - self.rest.create_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'host', payload) - - def test_delete_initiator_group(self): - with mock.patch.object(self.rest, 'delete_resource'): - self.rest.delete_initiator_group( - self.data.array, self.data.initiatorgroup_name_f) - self.rest.delete_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'host', - self.data.initiatorgroup_name_f) - - def test_get_masking_view(self): - array = self.data.array - masking_view_name = self.data.masking_view_name_f - ref_mask_view = self.data.maskingview[0] - masking_view = self.rest.get_masking_view(array, masking_view_name) - self.assertEqual(ref_mask_view, masking_view) - - def test_get_masking_views_from_storage_group(self): - array = self.data.array - storagegroup_name = self.data.storagegroup_name_f - ref_mask_view = [self.data.masking_view_name_f] - masking_view = self.rest.get_masking_views_from_storage_group( - array, storagegroup_name) - self.assertEqual(ref_mask_view, masking_view) - - def test_get_masking_views_by_initiator_group(self): - array = self.data.array - initiatorgroup_name = self.data.initiatorgroup_name_f - ref_mask_view = [self.data.masking_view_name_f] - masking_view = self.rest.get_masking_views_by_initiator_group( - array, initiatorgroup_name) - self.assertEqual(ref_mask_view, masking_view) - - def test_get_masking_views_by_initiator_group_failed(self): - array = self.data.array - initiatorgroup_name = self.data.initiatorgroup_name_f - with mock.patch.object(self.rest, 'get_initiator_group', - return_value=None): - masking_view = self.rest.get_masking_views_by_initiator_group( - array, initiatorgroup_name) - self.assertFalse(masking_view) - with mock.patch.object(self.rest, 'get_initiator_group', - return_value={'name': 'no_mv'}): - masking_view = self.rest.get_masking_views_by_initiator_group( - array, initiatorgroup_name) - self.assertFalse(masking_view) - - def test_get_element_from_masking_view(self): - array = self.data.array - maskingview_name = self.data.masking_view_name_f - # storage group - ref_sg = self.data.storagegroup_name_f - storagegroup = self.rest.get_element_from_masking_view( - array, maskingview_name, storagegroup=True) - self.assertEqual(ref_sg, storagegroup) - # initiator group - ref_ig = self.data.initiatorgroup_name_f - initiatorgroup = self.rest.get_element_from_masking_view( - array, maskingview_name, host=True) - self.assertEqual(ref_ig, initiatorgroup) - # portgroup - ref_pg = self.data.port_group_name_f - portgroup = self.rest.get_element_from_masking_view( - array, maskingview_name, portgroup=True) - self.assertEqual(ref_pg, portgroup) - - def test_get_element_from_masking_view_failed(self): - array = self.data.array - maskingview_name = self.data.masking_view_name_f - # no element chosen - element = self.rest.get_element_from_masking_view( - array, maskingview_name) - self.assertIsNone(element) - # cannot retrieve maskingview - with mock.patch.object(self.rest, 'get_masking_view', - return_value=None): - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.get_element_from_masking_view, - array, maskingview_name) - - def test_get_common_masking_views(self): - array = self.data.array - initiatorgroup = self.data.initiatorgroup_name_f - portgroup = self.data.port_group_name_f - ref_maskingview = self.data.masking_view_name_f - maskingview_list = self.rest.get_common_masking_views( - array, portgroup, initiatorgroup) - self.assertEqual(ref_maskingview, maskingview_list) - - def test_get_common_masking_views_none(self): - array = self.data.array - initiatorgroup = self.data.initiatorgroup_name_f - portgroup = self.data.port_group_name_f - with mock.patch.object(self.rest, 'get_masking_view_list', - return_value=[]): - maskingview_list = self.rest.get_common_masking_views( - array, portgroup, initiatorgroup) - self.assertFalse(maskingview_list) - - def test_create_masking_view(self): - maskingview_name = self.data.masking_view_name_f - storagegroup_name = self.data.storagegroup_name_f - port_group_name = self.data.port_group_name_f - init_group_name = self.data.initiatorgroup_name_f - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): - payload = ({"executionOption": "ASYNCHRONOUS", - "portGroupSelection": { - "useExistingPortGroupParam": { - "portGroupId": port_group_name}}, - "maskingViewId": maskingview_name, - "hostOrHostGroupSelection": { - "useExistingHostParam": { - "hostId": init_group_name}}, - "storageGroupSelection": { - "useExistingStorageGroupParam": { - "storageGroupId": storagegroup_name}}}) - self.rest.create_masking_view( - self.data.array, maskingview_name, storagegroup_name, - port_group_name, init_group_name, extra_specs) - self.rest.create_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'maskingview', payload) - - def test_delete_masking_view(self): - with mock.patch.object(self.rest, 'delete_resource'): - self.rest.delete_masking_view( - self.data.array, self.data.masking_view_name_f) - self.rest.delete_resource.assert_called_once_with( - self.data.array, 'sloprovisioning', 'maskingview', - self.data.masking_view_name_f) - - def test_get_replication_capabilities(self): - ref_response = self.data.capabilities['symmetrixCapability'][1] - capabilities = self.rest.get_replication_capabilities(self.data.array) - self.assertEqual(ref_response, capabilities) - - def test_is_clone_licenced(self): - licence = self.rest.is_snapvx_licensed(self.data.array) - self.assertTrue(licence) - false_response = {'rdfCapable': True, - 'snapVxCapable': False, - 'symmetrixId': '000197800123'} - with mock.patch.object(self.rest, 'get_replication_capabilities', - return_value=false_response): - licence2 = self.rest.is_snapvx_licensed(self.data.array) - self.assertFalse(licence2) - - def test_is_clone_licenced_error(self): - with mock.patch.object(self.rest, 'get_replication_capabilities', - return_value=None): - licence3 = self.rest.is_snapvx_licensed(self.data.array) - self.assertFalse(licence3) - - def test_create_volume_snap(self): - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.device_id - extra_specs = self.data.extra_specs - payload = {"deviceNameListSource": [{"name": device_id}], - "bothSides": 'false', "star": 'false', - "force": 'false'} - resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} - with mock.patch.object(self.rest, 'create_resource', - return_value=(202, self.data.job_list[0])): - self.rest.create_volume_snap( - self.data.array, snap_name, device_id, extra_specs) - self.rest.create_resource.assert_called_once_with( - self.data.array, 'replication', resource_type, - payload, private='/private') - - def test_modify_volume_snap(self): - array = self.data.array - source_id = self.data.device_id - target_id = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - extra_specs = self.data.extra_specs - payload = {"deviceNameListSource": [{"name": source_id}], - "deviceNameListTarget": [ - {"name": target_id}], - "copy": 'true', "action": "", - "star": 'false', "force": 'false', - "exact": 'false', "remote": 'false', - "symforce": 'false', "nocopy": 'false'} - with mock.patch.object( - self.rest, 'modify_resource', return_value=( - 202, self.data.job_list[0])) as mock_modify: - # link - payload["action"] = "Link" - self.rest.modify_volume_snap( - array, source_id, target_id, snap_name, extra_specs, link=True) - self.rest.modify_resource.assert_called_once_with( - array, 'replication', 'snapshot', payload, - resource_name=snap_name, private='/private') - # unlink - mock_modify.reset_mock() - payload["action"] = "Unlink" - self.rest.modify_volume_snap( - array, source_id, target_id, snap_name, - extra_specs, unlink=True) - self.rest.modify_resource.assert_called_once_with( - array, 'replication', 'snapshot', payload, - resource_name=snap_name, private='/private') - # none selected - mock_modify.reset_mock() - self.rest.modify_volume_snap( - array, source_id, target_id, snap_name, - extra_specs) - self.rest.modify_resource.assert_not_called() - - def test_delete_volume_snap(self): - array = self.data.array - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - source_device_id = self.data.device_id - payload = {"deviceNameListSource": [{"name": source_device_id}]} - with mock.patch.object(self.rest, 'delete_resource'): - self.rest.delete_volume_snap(array, snap_name, source_device_id) - self.rest.delete_resource.assert_called_once_with( - array, 'replication', 'snapshot', snap_name, - payload=payload, private='/private') - - def test_get_volume_snap_info(self): - array = self.data.array - source_device_id = self.data.device_id - ref_snap_info = self.data.volume_snap_vx - snap_info = self.rest.get_volume_snap_info(array, source_device_id) - self.assertEqual(ref_snap_info, snap_info) - - def test_get_volume_snap(self): - array = self.data.array - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.device_id - ref_snap = self.data.volume_snap_vx['snapshotSrcs'][0] - snap = self.rest.get_volume_snap(array, device_id, snap_name) - self.assertEqual(ref_snap, snap) - - def test_get_volume_snap_none(self): - array = self.data.array - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - device_id = self.data.device_id - with mock.patch.object(self.rest, 'get_volume_snap_info', - return_value=None): - snap = self.rest.get_volume_snap(array, device_id, snap_name) - self.assertIsNone(snap) - with mock.patch.object(self.rest, 'get_volume_snap_info', - return_value={'snapshotSrcs': []}): - snap = self.rest.get_volume_snap(array, device_id, snap_name) - self.assertIsNone(snap) - - def test_get_sync_session(self): - array = self.data.array - source_id = self.data.device_id - target_id = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) - snap_name = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['snapshotName']) - ref_sync = (self.data.volume_snap_vx - ['snapshotSrcs'][0]['linkedDevices'][0]) - sync = self.rest.get_sync_session( - array, source_id, snap_name, target_id) - self.assertEqual(ref_sync, sync) - - def test_find_snap_vx_sessions(self): - array = self.data.array - source_id = self.data.device_id - ref_sessions = [{'snap_name': 'temp-1', - 'source_vol': self.data.device_id, - 'target_vol_list': [self.data.device_id2]}, - {'snap_name': 'temp-1', - 'source_vol': self.data.device_id, - 'target_vol_list': [self.data.device_id2]}] - sessions = self.rest.find_snap_vx_sessions(array, source_id) - self.assertEqual(ref_sessions, sessions) - - def test_find_snap_vx_sessions_tgt_only(self): - array = self.data.array - source_id = self.data.device_id - ref_sessions = [{'snap_name': 'temp-1', - 'source_vol': self.data.device_id, - 'target_vol_list': [self.data.device_id2]}] - sessions = self.rest.find_snap_vx_sessions( - array, source_id, tgt_only=True) - self.assertEqual(ref_sessions, sessions) - - def test_update_storagegroup_qos(self): - sg_qos = {"srp": self.data.srp, "num_of_vols": 2, "cap_gb": 2, - "storageGroupId": "OS-QOS-SG", - "slo": self.data.slo, "workload": self.data.workload, - "hostIOLimit": {"host_io_limit_io_sec": "4000", - "dynamicDistribution": "Always", - "host_io_limit_mb_sec": "4000"}} - self.data.sg_details.append(sg_qos) - array = self.data.array - extra_specs = self.data.extra_specs - extra_specs['qos'] = { - 'maxIOPS': '4000', 'DistributionType': 'Always'} - return_value = self.rest.update_storagegroup_qos( - array, "OS-QOS-SG", extra_specs) - self.assertEqual(False, return_value) - extra_specs['qos'] = { - 'DistributionType': 'onFailure', 'maxMBPS': '4000'} - return_value = self.rest.update_storagegroup_qos( - array, "OS-QOS-SG", extra_specs) - self.assertTrue(return_value) - - def test_update_storagegroup_qos_exception(self): - array = self.data.array - storage_group = self.data.defaultstoragegroup_name - extra_specs = self.data.extra_specs - extra_specs['qos'] = { - 'maxIOPS': '4000', 'DistributionType': 'Wrong', 'maxMBPS': '4000'} - with mock.patch.object(self.rest, 'check_status_code_success', - side_effect=[None, None, None, Exception]): - self.assertRaises(exception.VolumeBackendAPIException, - self.rest.update_storagegroup_qos, array, - storage_group, extra_specs) - extra_specs['qos']['DistributionType'] = 'Always' - return_value = self.rest.update_storagegroup_qos( - array, "OS-QOS-SG", extra_specs) - self.assertFalse(return_value) - - def test_get_rdf_group(self): - with mock.patch.object(self.rest, 'get_resource') as mock_get: - self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no) - mock_get.assert_called_once_with( - self.data.array, 'replication', 'rdf_group', - self.data.rdf_group_no) - - def test_get_rdf_group_list(self): - rdf_list = self.rest.get_rdf_group_list(self.data.array) - self.assertEqual(self.data.rdf_group_list, rdf_list) - - def test_get_rdf_group_volume(self): - with mock.patch.object(self.rest, 'get_resource') as mock_get: - self.rest.get_rdf_group_volume( - self.data.array, self.data.rdf_group_no, self.data.device_id) - mock_get.assert_called_once_with( - self.data.array, 'replication', 'rdf_group', "70/volume/00001") - - def test_are_vols_rdf_paired(self): - are_vols1, local_state, pair_state = self.rest.are_vols_rdf_paired( - self.data.array, self.data.remote_array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_no) - self.assertTrue(are_vols1) - are_vols2, local_state, pair_state = self.rest.are_vols_rdf_paired( - self.data.array, "00012345", self.data.device_id, - self.data.device_id2, self.data.rdf_group_no) - self.assertFalse(are_vols2) - with mock.patch.object(self.rest, "get_rdf_group_volume", - return_value=None): - are_vols3, local, pair = self.rest.are_vols_rdf_paired( - self.data.array, self.data.remote_array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_no) - self.assertFalse(are_vols3) - - def test_get_rdf_group_number(self): - rdfg_num = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) - self.assertEqual(self.data.rdf_group_no, rdfg_num) - with mock.patch.object(self.rest, 'get_rdf_group_list', - return_value=None): - rdfg_num2 = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) - self.assertIsNone(rdfg_num2) - with mock.patch.object(self.rest, 'get_rdf_group', - return_value=None): - rdfg_num3 = self.rest.get_rdf_group_number( - self.data.array, self.data.rdf_group_name) - self.assertIsNone(rdfg_num3) - - def test_create_rdf_device_pair(self): - ref_dict = {'array': self.data.remote_array, - 'device_id': self.data.device_id2} - rdf_dict = self.rest.create_rdf_device_pair( - self.data.array, self.data.device_id, self.data.rdf_group_no, - self.data.device_id2, self.data.remote_array, "OS-2", - self.data.extra_specs) - self.assertEqual(ref_dict, rdf_dict) - - def test_modify_rdf_device_pair(self): - resource_name = "70/volume/00001" - common_opts = {"force": 'false', - "symForce": 'false', - "star": 'false', - "hop2": 'false', - "bypass": 'false'} - split_opts = deepcopy(common_opts) - split_opts.update({"immediate": 'false'}) - split_payload = {"action": "Split", - 'executionOption': 'ASYNCHRONOUS', - "split": split_opts} - - failover_opts = deepcopy(common_opts) - failover_opts.update({"establish": 'true', - "restore": 'false', - "remote": 'false', - "immediate": 'false'}) - failover_payload = {"action": "Failover", - 'executionOption': 'ASYNCHRONOUS', - "failover": failover_opts} - with mock.patch.object( - self.rest, "modify_resource", - return_value=(200, self.data.job_list[0])) as mock_mod: - self.rest.modify_rdf_device_pair( - self.data.array, self.data.device_id, self.data.rdf_group_no, - self.data.extra_specs, split=True) - mock_mod.assert_called_once_with( - self.data.array, 'replication', 'rdf_group', - split_payload, resource_name=resource_name, - private='/private') - mock_mod.reset_mock() - self.rest.modify_rdf_device_pair( - self.data.array, self.data.device_id, self.data.rdf_group_no, - self.data.extra_specs, split=False) - mock_mod.assert_called_once_with( - self.data.array, 'replication', 'rdf_group', - failover_payload, resource_name=resource_name, - private='/private') - - def test_get_storage_group_rep(self): - array = self.data.array - source_group_name = self.data.storagegroup_name_source - ref_details = self.data.sg_details_rep[0] - volume_group = self.rest.get_storage_group_rep(array, - source_group_name) - self.assertEqual(volume_group, ref_details) - - def test_get_volumes_in_storage_group(self): - array = self.data.array - storagegroup_name = self.data.storagegroup_name_source - ref_volumes = [self.data.device_id, self.data.device_id2] - volume_list = self.rest.get_volumes_in_storage_group( - array, storagegroup_name) - self.assertEqual(ref_volumes, volume_list) - - def test_create_storagegroup_snap(self): - array = self.data.array - extra_specs = self.data.extra_specs - source_group = self.data.storagegroup_name_source - snap_name = self.data.group_snapshot_name - with mock.patch.object( - self.rest, "create_storagegroup_snap") as mock_create: - self.rest.create_storagegroup_snap( - array, source_group, snap_name, extra_specs) - mock_create.assert_called_once_with(array, - source_group, - snap_name, - extra_specs) - - -class VMAXProvisionTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXProvisionTest, self).setUp() - config_group = 'ProvisionTests' - self.fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_i) - configuration = FakeConfiguration(self.fake_xml, config_group) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - provision.UNLINK_INTERVAL = 0 - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.provision = self.common.provision - self.utils = self.common.utils - self.rest = self.common.rest - - def test_create_storage_group(self): - array = self.data.array - storagegroup_name = self.data.storagegroup_name_f - srp = self.data.srp - slo = self.data.slo - workload = self.data.workload - extra_specs = self.data.extra_specs - storagegroup = self.provision.create_storage_group( - array, storagegroup_name, srp, slo, workload, extra_specs) - self.assertEqual(storagegroup_name, storagegroup) - - def test_create_volume_from_sg(self): - array = self.data.array - storagegroup_name = self.data.storagegroup_name_f - volumeId = self.data.test_volume.id - volume_name = self.utils.get_volume_element_name(volumeId) - volume_size = self.data.test_volume.size - extra_specs = self.data.extra_specs - ref_dict = self.data.provider_location - volume_dict = self.provision.create_volume_from_sg( - array, volume_name, storagegroup_name, volume_size, extra_specs) - self.assertEqual(ref_dict, volume_dict) - - def test_delete_volume_from_srp(self): - array = self.data.array - device_id = self.data.device_id - volume_name = self.data.volume_details[0]['volume_identifier'] - with mock.patch.object(self.provision.rest, 'delete_volume'): - self.provision.delete_volume_from_srp( - array, device_id, volume_name) - self.provision.rest.delete_volume.assert_called_once_with( - array, device_id) - - def test_create_volume_snap_vx(self): - array = self.data.array - source_device_id = self.data.device_id - snap_name = self.data.snap_location['snap_name'] - extra_specs = self.data.extra_specs - with mock.patch.object(self.provision.rest, 'create_volume_snap'): - self.provision.create_volume_snapvx( - array, source_device_id, snap_name, extra_specs) - self.provision.rest.create_volume_snap.assert_called_once_with( - array, snap_name, source_device_id, extra_specs) - - def test_create_volume_replica_create_snap_true(self): - array = self.data.array - source_device_id = self.data.device_id - target_device_id = self.data.device_id2 - snap_name = self.data.snap_location['snap_name'] - extra_specs = self.data.extra_specs - with mock.patch.object(self.provision, 'create_volume_snapvx'): - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): - self.provision.create_volume_replica( - array, source_device_id, target_device_id, - snap_name, extra_specs, create_snap=True) - self.provision.rest.modify_volume_snap.assert_called_once_with( - array, source_device_id, target_device_id, snap_name, - extra_specs, link=True) - self.provision.create_volume_snapvx.assert_called_once_with( - array, source_device_id, snap_name, extra_specs) - - def test_create_volume_replica_create_snap_false(self): - array = self.data.array - source_device_id = self.data.device_id - target_device_id = self.data.device_id2 - snap_name = self.data.snap_location['snap_name'] - extra_specs = self.data.extra_specs - with mock.patch.object(self.provision, 'create_volume_snapvx'): - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): - self.provision.create_volume_replica( - array, source_device_id, target_device_id, - snap_name, extra_specs, create_snap=False) - self.provision.rest.modify_volume_snap.assert_called_once_with( - array, source_device_id, target_device_id, snap_name, - extra_specs, link=True) - self.provision.create_volume_snapvx.assert_not_called() - - def test_break_replication_relationship(self): - array = self.data.array - source_device_id = self.data.device_id - target_device_id = self.data.device_id2 - snap_name = self.data.snap_location['snap_name'] - extra_specs = self.data.extra_specs - with mock.patch.object(self.provision.rest, 'modify_volume_snap'): - self.provision.break_replication_relationship( - array, target_device_id, source_device_id, snap_name, - extra_specs) - (self.provision.rest.modify_volume_snap. - assert_called_once_with( - array, source_device_id, target_device_id, - snap_name, extra_specs, unlink=True)) - - def test_unlink_volume(self): - with mock.patch.object(self.rest, 'modify_volume_snap') as mock_mod: - self.provision._unlink_volume( - self.data.array, self.data.device_id, self.data.device_id2, - self.data.snap_location['snap_name'], self.data.extra_specs) - mock_mod.assert_called_once_with( - self.data.array, self.data.device_id, self.data.device_id2, - self.data.snap_location['snap_name'], self.data.extra_specs, - unlink=True) - - def test_unlink_volume_exception(self): - with mock.patch.object( - self.rest, 'modify_volume_snap', side_effect=[ - exception.VolumeBackendAPIException(data=''), ''] - ) as mock_mod: - self.provision._unlink_volume( - self.data.array, self.data.device_id, self.data.device_id2, - self.data.snap_location['snap_name'], self.data.extra_specs) - self.assertEqual(2, mock_mod.call_count) - - def test_delete_volume_snap(self): - array = self.data.array - source_device_id = self.data.device_id - snap_name = self.data.snap_location['snap_name'] - with mock.patch.object(self.provision.rest, 'delete_volume_snap'): - self.provision.delete_volume_snap( - array, snap_name, source_device_id) - self.provision.rest.delete_volume_snap.assert_called_once_with( - array, snap_name, source_device_id) - - def test_extend_volume(self): - array = self.data.array - device_id = self.data.device_id - new_size = '3' - extra_specs = self.data.extra_specs - with mock.patch.object(self.provision.rest, 'extend_volume'): - self.provision.extend_volume(array, device_id, new_size, - extra_specs) - self.provision.rest.extend_volume.assert_called_once_with( - array, device_id, new_size, extra_specs) - - def test_get_srp_pool_stats_no_wlp(self): - array = self.data.array - array_info = self.common.pool_info['arrays_info'][0] - ref_stats = (self.data.srp_details['total_usable_cap_gb'], - float(self.data.srp_details['total_usable_cap_gb'] - - self.data.srp_details['total_allocated_cap_gb']), - self.data.srp_details['total_subscribed_cap_gb'], - self.data.srp_details['reserved_cap_percent'], False) - with mock.patch.object(self.provision, - '_get_remaining_slo_capacity_wlp', - return_value=-1): - stats = self.provision.get_srp_pool_stats(array, array_info) - self.assertEqual(ref_stats, stats) - - def test_get_srp_pool_stats_wlp_enabled(self): - array = self.data.array - array_info = self.common.pool_info['arrays_info'][0] - srp = self.data.srp - headroom_capacity = self.provision.rest.get_headroom_capacity( - array, srp, array_info['SLO'], array_info['Workload']) - ref_stats = (self.data.srp_details['total_usable_cap_gb'], - float(headroom_capacity - - self.data.srp_details['total_allocated_cap_gb']), - self.data.srp_details['total_subscribed_cap_gb'], - self.data.srp_details['reserved_cap_percent'], True) - stats = self.provision.get_srp_pool_stats(array, array_info) - self.assertEqual(ref_stats, stats) - - def test_get_srp_pool_stats_errors(self): - # cannot retrieve srp - array = self.data.array - array_info = {'srpName': self.data.failed_resource} - ref_stats = (0, 0, 0, 0, False) - stats = self.provision.get_srp_pool_stats(array, array_info) - self.assertEqual(ref_stats, stats) - # cannot report on all stats - with mock.patch.object(self.provision.rest, 'get_srp_by_name', - return_value={'total_usable_cap_gb': 33}): - with mock.patch.object(self.provision, - '_get_remaining_slo_capacity_wlp', - return_value=(-1)): - ref_stats = (33, 0, 0, 0, False) - stats = self.provision.get_srp_pool_stats(array, array_info) - self.assertEqual(ref_stats, stats) - - def test_get_remaining_slo_capacity_wlp(self): - array = self.data.array - array_info = self.common.pool_info['arrays_info'][0] - srp = self.data.srp - ref_capacity = self.provision.rest.get_headroom_capacity( - array, srp, array_info['SLO'], array_info['Workload']) - remaining_capacity = ( - self.provision._get_remaining_slo_capacity_wlp( - array, srp, array_info)) - self.assertEqual(ref_capacity, remaining_capacity) - - def test_get_remaining_slo_capacity_no_slo_or_wlp(self): - array = self.data.array - array_info = self.common.pool_info['arrays_info'][0] - srp = self.data.srp - ref_capacity = -1 - with mock.patch.object(self.provision.rest, 'get_headroom_capacity', - return_value=None): - remaining_capacity = ( - self.provision._get_remaining_slo_capacity_wlp( - array, srp, {'SLO': None})) - self.assertEqual(ref_capacity, remaining_capacity) - self.provision.rest.get_headroom_capacity.assert_not_called() - remaining_capacity = ( - self.provision._get_remaining_slo_capacity_wlp( - array, srp, array_info)) - self.assertEqual(ref_capacity, remaining_capacity) - - def test_verify_slo_workload_true(self): - # with slo and workload - array = self.data.array - slo = self.data.slo - workload = self.data.workload - srp = self.data.srp - valid_slo, valid_workload = self.provision.verify_slo_workload( - array, slo, workload, srp) - self.assertTrue(valid_slo) - self.assertTrue(valid_workload) - # slo and workload = none - slo2 = None - workload2 = None - valid_slo2, valid_workload2 = self.provision.verify_slo_workload( - array, slo2, workload2, srp) - self.assertTrue(valid_slo2) - self.assertTrue(valid_workload2) - slo2 = None - workload2 = 'None' - valid_slo2, valid_workload2 = self.provision.verify_slo_workload( - array, slo2, workload2, srp) - self.assertTrue(valid_slo2) - self.assertTrue(valid_workload2) - - def test_verify_slo_workload_false(self): - # Both wrong - array = self.data.array - slo = 'Diamante' - workload = 'DSSS' - srp = self.data.srp - valid_slo, valid_workload = self.provision.verify_slo_workload( - array, slo, workload, srp) - self.assertFalse(valid_slo) - self.assertFalse(valid_workload) - # Workload set, no slo set - valid_slo, valid_workload = self.provision.verify_slo_workload( - array, None, self.data.workload, srp) - self.assertTrue(valid_slo) - self.assertFalse(valid_workload) - - def test_get_slo_workload_settings_from_storage_group(self): - ref_settings = "Diamond+DSS" - sg_slo_settings = ( - self.provision.get_slo_workload_settings_from_storage_group( - self.data.array, self.data.defaultstoragegroup_name)) - self.assertEqual(ref_settings, sg_slo_settings) - # No workload - with mock.patch.object(self.provision.rest, 'get_storage_group', - return_value={'slo': 'Silver'}): - ref_settings2 = "Silver+NONE" - sg_slo_settings2 = ( - self.provision.get_slo_workload_settings_from_storage_group( - self.data.array, 'no_workload_sg')) - self.assertEqual(ref_settings2, sg_slo_settings2) - - def test_break_rdf_relationship(self): - array = self.data.array - device_id = self.data.device_id - target_device = self.data.device_id2 - rdf_group_name = self.data.rdf_group_name - rep_extra_specs = self.data.rep_extra_specs - with mock.patch.object( - self.provision.rest, 'modify_rdf_device_pair') as mod_rdf: - with mock.patch.object( - self.provision.rest, 'delete_rdf_pair') as del_rdf: - self.provision.break_rdf_relationship( - array, device_id, target_device, - rdf_group_name, rep_extra_specs, "Synchronized") - mod_rdf.assert_called_once_with( - array, device_id, rdf_group_name, rep_extra_specs, - split=True) - del_rdf.assert_called_once_with( - array, device_id, rdf_group_name) - - def test_failover_volume(self): - array = self.data.array - device_id = self.data.device_id - rdf_group_name = self.data.rdf_group_name - extra_specs = self.data.extra_specs - with mock.patch.object( - self.provision.rest, 'modify_rdf_device_pair') as mod_rdf: - self.provision.failover_volume( - array, device_id, rdf_group_name, - extra_specs, '', True) - mod_rdf.assert_called_once_with( - array, device_id, rdf_group_name, extra_specs, - split=False) - mod_rdf.reset_mock() - self.provision.failover_volume( - array, device_id, rdf_group_name, - extra_specs, '', False) - mod_rdf.assert_called_once_with( - array, device_id, rdf_group_name, extra_specs, - split=False) - - def test_create_volume_group_success(self): - array = self.data.array - group_name = self.data.storagegroup_name_source - extra_specs = self.data.extra_specs - ref_value = self.data.storagegroup_name_source - storagegroup = self.provision.create_volume_group(array, - group_name, - extra_specs) - self.assertEqual(ref_value, storagegroup) - - def test_create_group_replica(self): - array = self.data.array - source_group = self.data.storagegroup_name_source - snap_name = self.data.group_snapshot_name - extra_specs = self.data.extra_specs - with mock.patch.object( - self.provision, - 'create_group_replica') as mock_create_replica: - self.provision.create_group_replica( - array, source_group, snap_name, extra_specs) - mock_create_replica.assert_called_once_with( - array, source_group, snap_name, extra_specs) - - def test_delete_group_replica(self): - array = self.data.array - snap_name = self.data.group_snapshot_name - source_group_name = self.data.storagegroup_name_source - with mock.patch.object( - self.provision, - 'delete_group_replica') as mock_delete_replica: - self.provision.delete_group_replica(array, - snap_name, - source_group_name) - mock_delete_replica.assert_called_once_with( - array, snap_name, source_group_name) - - def test_link_and_break_replica(self): - array = self.data.array - source_group_name = self.data.storagegroup_name_source - target_group_name = self.data.target_group_name - snap_name = self.data.group_snapshot_name - extra_specs = self.data.extra_specs - deleteSnapshot = False - with mock.patch.object( - self.provision, - 'link_and_break_replica') as mock_link_and_break_replica: - self.provision.link_and_break_replica( - array, source_group_name, - target_group_name, snap_name, - extra_specs, deleteSnapshot) - mock_link_and_break_replica.assert_called_once_with( - array, source_group_name, - target_group_name, snap_name, - extra_specs, deleteSnapshot) - - def test_unlink_group(self): - with mock.patch.object(self.rest, - 'modify_storagegroup_snap') as mock_mod: - self.provision._unlink_group( - self.data.array, self.data.storagegroup_name_source, - self.data.target_group_name, - self.data.group_snapshot_name, self.data.extra_specs) - mock_mod.assert_called_once_with( - self.data.array, self.data.storagegroup_name_source, - self.data.target_group_name, - self.data.group_snapshot_name, self.data.extra_specs, - unlink=True) - - -class VMAXCommonTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXCommonTest, self).setUp() - config_group = 'CommonTests' - self.fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_f) - configuration = FakeConfiguration(self.fake_xml, config_group, - 1, 1) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = fc.VMAXFCDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.masking = self.common.masking - self.provision = self.common.provision - self.rest = self.common.rest - self.utils = self.common.utils - self.utils.get_volumetype_extra_specs = ( - mock.Mock(return_value=self.data.vol_type_extra_specs)) - - @mock.patch.object(rest.VMAXRest, - 'set_rest_credentials') - @mock.patch.object(common.VMAXCommon, - '_get_slo_workload_combinations', - return_value=[]) - @mock.patch.object(utils.VMAXUtils, - 'parse_file_to_get_array_map', - return_value=[]) - def test_gather_info_no_opts(self, mock_parse, mock_combo, mock_rest): - configuration = FakeConfiguration(None, 'config_group', None, None) - fc.VMAXFCDriver(configuration=configuration) - - def test_get_slo_workload_combinations_success(self): - array_info = self.utils.parse_file_to_get_array_map( - self.common.pool_info['config_file']) - finalarrayinfolist = self.common._get_slo_workload_combinations( - array_info) - self.assertTrue(len(finalarrayinfolist) > 1) - - def test_get_slo_workload_combinations_failed(self): - array_info = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.common._get_slo_workload_combinations, - array_info) - - def test_create_volume(self): - ref_model_update = ( - {'provider_location': six.text_type(self.data.provider_location)}) - model_update = self.common.create_volume(self.data.test_volume) - self.assertEqual(ref_model_update, model_update) - - def test_create_volume_from_snapshot(self): - ref_model_update = ( - {'provider_location': six.text_type( - self.data.provider_location)}) - model_update = self.common.create_volume_from_snapshot( - self.data.test_clone_volume, self.data.test_snapshot) - self.assertEqual(ref_model_update, model_update) - - def test_cloned_volume(self): - ref_model_update = ( - {'provider_location': six.text_type( - self.data.provider_location)}) - model_update = self.common.create_cloned_volume( - self.data.test_clone_volume, self.data.test_volume) - self.assertEqual(ref_model_update, model_update) - - def test_delete_volume(self): - with mock.patch.object(self.common, '_delete_volume'): - self.common.delete_volume(self.data.test_volume) - self.common._delete_volume.assert_called_once_with( - self.data.test_volume) - - def test_create_snapshot(self): - ref_model_update = ( - {'provider_location': six.text_type( - self.data.snap_location)}) - model_update = self.common.create_snapshot( - self.data.test_snapshot, self.data.test_volume) - self.assertEqual(ref_model_update, model_update) - - def test_delete_snapshot(self): - snap_name = self.data.snap_location['snap_name'] - sourcedevice_id = self.data.snap_location['source_id'] - with mock.patch.object(self.provision, 'delete_volume_snap'): - self.common.delete_snapshot(self.data.test_snapshot, - self.data.test_volume) - self.provision.delete_volume_snap.assert_called_once_with( - self.data.array, snap_name, sourcedevice_id) - - def test_delete_snapshot_not_found(self): - with mock.patch.object(self.common, '_parse_snap_info', - return_value=(None, None)): - with mock.patch.object(self.provision, 'delete_volume_snap'): - self.common.delete_snapshot(self.data.test_snapshot, - self.data.test_volume) - self.provision.delete_volume_snap.assert_not_called() - - def test_remove_members(self): - array = self.data.array - device_id = self.data.device_id - volume = self.data.test_volume - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, - 'remove_and_reset_members') as mock_rm: - self.common._remove_members(array, volume, device_id, - extra_specs, self.data.connector) - mock_rm.assert_called_once_with( - array, device_id, volume_name, - extra_specs, True, self.data.connector) - - def test_unmap_lun(self): - array = self.data.array - device_id = self.data.device_id - volume = self.data.test_volume - extra_specs = deepcopy(self.data.extra_specs_intervals_set) - extra_specs['port_group_name'] = self.data.port_group_name_f - connector = self.data.connector - with mock.patch.object(self.common, '_remove_members'): - self.common._unmap_lun(volume, connector) - self.common._remove_members.assert_called_once_with( - array, volume, device_id, extra_specs, connector) - - def test_unmap_lun_not_mapped(self): - volume = self.data.test_volume - connector = self.data.connector - with mock.patch.object(self.common, 'find_host_lun_id', - return_value=({}, False, [])): - with mock.patch.object(self.common, '_remove_members'): - self.common._unmap_lun(volume, connector) - self.common._remove_members.assert_not_called() - - def test_initialize_connection_already_mapped(self): - volume = self.data.test_volume - connector = self.data.connector - host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] - ['host_lun_address']) - ref_dict = {'hostlunid': int(host_lun, 16), - 'maskingview': self.data.masking_view_name_f, - 'array': self.data.array, - 'device_id': self.data.device_id} - device_info_dict = self.common.initialize_connection(volume, connector) - self.assertEqual(ref_dict, device_info_dict) - - def test_initialize_connection_not_mapped(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = deepcopy(self.data.extra_specs_intervals_set) - extra_specs['port_group_name'] = self.data.port_group_name_f - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - with mock.patch.object(self.common, 'find_host_lun_id', - return_value=({}, False, [])): - with mock.patch.object( - self.common, '_attach_volume', return_value=( - {}, self.data.port_group_name_f)): - device_info_dict = self.common.initialize_connection(volume, - connector) - self.assertEqual({}, device_info_dict) - self.common._attach_volume.assert_called_once_with( - volume, connector, extra_specs, masking_view_dict, False) - - def test_attach_volume_success(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = deepcopy(self.data.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] - ['host_lun_address']) - ref_dict = {'hostlunid': int(host_lun, 16), - 'maskingview': self.data.masking_view_name_f, - 'array': self.data.array, - 'device_id': self.data.device_id} - with mock.patch.object(self.masking, 'setup_masking_view', - return_value={ - 'port_group_name': - self.data.port_group_name_f}): - device_info_dict, pg = self.common._attach_volume( - volume, connector, extra_specs, masking_view_dict) - self.assertEqual(ref_dict, device_info_dict) - - def test_attach_volume_failed(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = deepcopy(self.data.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - with mock.patch.object(self.masking, 'setup_masking_view', - return_value={}): - with mock.patch.object(self.common, 'find_host_lun_id', - return_value=({}, False, [])): - with mock.patch.object( - self.masking, - 'check_if_rollback_action_for_masking_required'): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._attach_volume, volume, - connector, extra_specs, - masking_view_dict) - device_id = self.data.device_id - (self.masking. - check_if_rollback_action_for_masking_required. - assert_called_once_with(self.data.array, device_id, {})) - - def test_terminate_connection(self): - volume = self.data.test_volume - connector = self.data.connector - with mock.patch.object(self.common, '_unmap_lun'): - self.common.terminate_connection(volume, connector) - self.common._unmap_lun.assert_called_once_with( - volume, connector) - - def test_extend_volume_success(self): - volume = self.data.test_volume - array = self.data.array - device_id = self.data.device_id - new_size = self.data.test_volume.size - ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - with mock.patch.object(self.common, '_sync_check'): - with mock.patch.object(self.provision, 'extend_volume'): - self.common.extend_volume(volume, new_size) - self.provision.extend_volume.assert_called_once_with( - array, device_id, new_size, ref_extra_specs) - - def test_extend_volume_failed_snap_src(self): - volume = self.data.test_volume - new_size = self.data.test_volume.size - with mock.patch.object(self.rest, 'is_vol_in_rep_session', - return_value=(False, True, None)): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.extend_volume, volume, new_size) - - def test_extend_volume_failed_no_device_id(self): - volume = self.data.test_volume - new_size = self.data.test_volume.size - with mock.patch.object(self.common, '_find_device_on_array', - return_value=None): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.extend_volume, volume, new_size) - - def test_extend_volume_failed_wrong_size(self): - volume = self.data.test_volume - new_size = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.common.extend_volume, volume, new_size) - - def test_update_volume_stats(self): - data = self.common.update_volume_stats() - self.assertEqual('CommonTests', data['volume_backend_name']) - - def test_update_volume_stats_no_wlp(self): - with mock.patch.object(self.common, '_update_srp_stats', - return_value=('123s#SRP_1#None#None', - 100, 90, 90, 10, False)): - data = self.common.update_volume_stats() - self.assertEqual('CommonTests', data['volume_backend_name']) - - def test_set_config_file_and_get_extra_specs(self): - volume = self.data.test_volume - extra_specs, config_file, qos_specs = ( - self.common._set_config_file_and_get_extra_specs(volume)) - self.assertEqual(self.data.vol_type_extra_specs, extra_specs) - self.assertEqual(self.fake_xml, config_file) - - def test_set_config_file_and_get_extra_specs_no_specs(self): - volume = self.data.test_volume - ref_config = '/etc/cinder/cinder_dell_emc_config.xml' - with mock.patch.object(self.utils, 'get_volumetype_extra_specs', - return_value=None): - extra_specs, config_file, qos_specs = ( - self.common._set_config_file_and_get_extra_specs(volume)) - self.assertIsNone(extra_specs) - self.assertEqual(ref_config, config_file) - - def test_find_device_on_array_success(self): - volume = self.data.test_volume - extra_specs = self.data.extra_specs - ref_device_id = self.data.device_id - founddevice_id = self.common._find_device_on_array(volume, extra_specs) - self.assertEqual(ref_device_id, founddevice_id) - - def test_find_device_on_array_different_device_id(self): - volume = self.data.test_volume - extra_specs = self.data.extra_specs - with mock.patch.object( - self.rest, 'find_volume_device_id', - return_value='01234'): - founddevice_id = self.common._find_device_on_array( - volume, extra_specs) - self.assertIsNone(founddevice_id) - - def test_find_device_on_array_provider_location_not_string(self): - volume = fake_volume.fake_volume_obj( - context='cxt', provider_location=None) - extra_specs = self.data.extra_specs - founddevice_id = self.common._find_device_on_array( - volume, extra_specs) - self.assertIsNone(founddevice_id) - - def test_find_host_lun_id_attached(self): - volume = self.data.test_volume - extra_specs = self.data.extra_specs - host = 'HostX' - host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] - ['host_lun_address']) - ref_masked = {'hostlunid': int(host_lun, 16), - 'maskingview': self.data.masking_view_name_f, - 'array': self.data.array, - 'device_id': self.data.device_id} - maskedvols, __, __ = self.common.find_host_lun_id( - volume, host, extra_specs) - self.assertEqual(ref_masked, maskedvols) - - def test_find_host_lun_id_not_attached(self): - volume = self.data.test_volume - extra_specs = self.data.extra_specs - host = 'HostX' - with mock.patch.object(self.rest, 'find_mv_connections_for_vol', - return_value=None): - maskedvols, __, __ = self.common.find_host_lun_id( - volume, host, extra_specs) - self.assertEqual({}, maskedvols) - - def test_get_masking_views_from_volume(self): - array = self.data.array - device_id = self.data.device_id - host = 'HostX' - ref_mv_list = [self.data.masking_view_name_f] - maskingview_list = self.common.get_masking_views_from_volume( - array, device_id, host) - self.assertEqual(ref_mv_list, maskingview_list) - - def test_get_masking_views_from_volume_wrong_host(self): - array = self.data.array - device_id = self.data.device_id - host = 'DifferentHost' - maskingview_list = self.common.get_masking_views_from_volume( - array, device_id, host) - self.assertFalse(maskingview_list) - - def test_register_config_file_from_config_group_exists(self): - config_group_name = 'CommonTests' - config_file = self.common._register_config_file_from_config_group( - config_group_name) - self.assertEqual(self.fake_xml, config_file) - - def test_register_config_file_from_config_group_does_not_exist(self): - config_group_name = 'IncorrectName' - self.assertRaises(exception.VolumeBackendAPIException, - self.common._register_config_file_from_config_group, - config_group_name) - - def test_initial_setup_success(self): - volume = self.data.test_volume - ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - extra_specs = self.common._initial_setup(volume) - self.assertEqual(ref_extra_specs, extra_specs) - - def test_initial_setup_failed(self): - volume = self.data.test_volume - with mock.patch.object(self.utils, 'parse_file_to_get_array_map', - return_value=None): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._initial_setup, volume) - - def test_populate_masking_dict(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = deepcopy(self.data.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - ref_mv_dict = self.data.masking_view_dict - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - self.assertEqual(ref_mv_dict, masking_view_dict) - - def test_populate_masking_dict_no_slo(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = { - 'slo': None, - 'workload': None, - 'srp': self.data.srp, - 'array': self.data.array, - 'port_group_name': self.data.port_group_name_f} - ref_mv_dict = self.data.masking_view_dict_no_slo - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - self.assertEqual(ref_mv_dict, masking_view_dict) - - def test_populate_masking_dict_compr_disabled(self): - volume = self.data.test_volume - connector = self.data.connector - extra_specs = deepcopy(self.data.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - extra_specs[utils.DISABLECOMPRESSION] = "true" - ref_mv_dict = self.data.masking_view_dict_compression_disabled - masking_view_dict = self.common._populate_masking_dict( - volume, connector, extra_specs) - self.assertEqual(ref_mv_dict, masking_view_dict) - - def test_create_cloned_volume(self): - volume = self.data.test_clone_volume - source_volume = self.data.test_volume - extra_specs = self.data.extra_specs - ref_dict = self.data.provider_location - clone_dict = self.common._create_cloned_volume( - volume, source_volume, extra_specs) - self.assertEqual(ref_dict, clone_dict) - - def test_create_cloned_volume_is_snapshot(self): - volume = self.data.test_snapshot - source_volume = self.data.test_volume - extra_specs = self.data.extra_specs - ref_dict = self.data.snap_location - clone_dict = self.common._create_cloned_volume( - volume, source_volume, extra_specs, True, False) - self.assertEqual(ref_dict, clone_dict) - - def test_create_cloned_volume_from_snapshot(self): - volume = self.data.test_clone_volume - source_volume = self.data.test_snapshot - extra_specs = self.data.extra_specs - ref_dict = self.data.provider_location - clone_dict = self.common._create_cloned_volume( - volume, source_volume, extra_specs, False, True) - self.assertEqual(ref_dict, clone_dict) - - def test_create_cloned_volume_not_licenced(self): - volume = self.data.test_clone_volume - source_volume = self.data.test_volume - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'is_snapvx_licensed', - return_value=False): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_cloned_volume, - volume, source_volume, extra_specs) - - def test_parse_snap_info_found(self): - ref_device_id = self.data.device_id - ref_snap_name = self.data.snap_location['snap_name'] - sourcedevice_id, foundsnap_name = self.common._parse_snap_info( - self.data.array, self.data.test_snapshot) - self.assertEqual(ref_device_id, sourcedevice_id) - self.assertEqual(ref_snap_name, foundsnap_name) - - def test_parse_snap_info_not_found(self): - ref_snap_name = None - with mock.patch.object(self.rest, 'get_volume_snap', - return_value=None): - __, foundsnap_name = self.common._parse_snap_info( - self.data.array, self.data.test_snapshot) - self.assertIsNone(ref_snap_name, foundsnap_name) - - def test_parse_snap_info_exception(self): - with mock.patch.object( - self.rest, 'get_volume_snap', - side_effect=exception.VolumeBackendAPIException): - __, foundsnap_name = self.common._parse_snap_info( - self.data.array, self.data.test_snapshot) - self.assertIsNone(foundsnap_name) - - def test_parse_snap_info_provider_location_not_string(self): - snapshot = fake_snapshot.fake_snapshot_obj( - context='ctxt', provider_loaction={'not': 'string'}) - sourcedevice_id, foundsnap_name = self.common._parse_snap_info( - self.data.array, snapshot) - self.assertIsNone(foundsnap_name) - - def test_create_snapshot_success(self): - array = self.data.array - snapshot = self.data.test_snapshot - source_device_id = self.data.device_id - extra_specs = self.data.extra_specs - ref_dict = {'snap_name': '12345', 'source_id': self.data.device_id} - snap_dict = self.common._create_snapshot( - array, snapshot, source_device_id, extra_specs) - self.assertEqual(ref_dict, snap_dict) - - def test_create_snapshot_exception(self): - array = self.data.array - snapshot = self.data.test_snapshot - source_device_id = self.data.device_id - extra_specs = self.data.extra_specs - with mock.patch.object( - self.provision, 'create_volume_snapvx', - side_effect=exception.VolumeBackendAPIException): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_snapshot, - array, snapshot, source_device_id, extra_specs) - - @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') - def test_delete_volume_from_srp(self, mock_rm): - array = self.data.array - device_id = self.data.device_id - volume_name = self.data.test_volume.name - ref_extra_specs = self.data.extra_specs_intervals_set - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - volume = self.data.test_volume - with mock.patch.object(self.common, '_sync_check'): - with mock.patch.object(self.common, '_delete_from_srp'): - self.common._delete_volume(volume) - self.common._delete_from_srp.assert_called_once_with( - array, device_id, volume_name, ref_extra_specs) - - def test_delete_volume_not_found(self): - volume = self.data.test_volume - with mock.patch.object(self.common, '_find_device_on_array', - return_value=None): - with mock.patch.object(self.common, '_delete_from_srp'): - self.common._delete_volume(volume) - self.common._delete_from_srp.assert_not_called() - - def test_create_volume_success(self): - volume_name = '1' - volume_size = self.data.test_volume.size - extra_specs = self.data.extra_specs - ref_dict = self.data.provider_location - volume_dict = self.common._create_volume( - volume_name, volume_size, extra_specs) - self.assertEqual(ref_dict, volume_dict) - - def test_create_volume_failed(self): - volume_name = self.data.test_volume.name - volume_size = self.data.test_volume.size - extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, - 'get_or_create_default_storage_group', - return_value=self.data.failed_resource): - with mock.patch.object(self.rest, 'delete_storage_group'): - # path 1: not last vol in sg - with mock.patch.object(self.rest, 'get_num_vols_in_sg', - return_value=2): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_volume, - volume_name, volume_size, extra_specs) - self.rest.delete_storage_group.assert_not_called() - # path 2: last vol in sg, delete sg - with mock.patch.object(self.rest, 'get_num_vols_in_sg', - return_value=0): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_volume, - volume_name, volume_size, extra_specs) - (self.rest.delete_storage_group. - assert_called_once_with(self.data.array, - self.data.failed_resource)) - - def test_create_volume_incorrect_slo(self): - volume_name = self.data.test_volume.name - volume_size = self.data.test_volume.size - extra_specs = {'slo': 'Diamondz', - 'workload': 'DSSSS', - 'srp': self.data.srp, - 'array': self.data.array} - self.assertRaises( - exception.VolumeBackendAPIException, - self.common._create_volume, - volume_name, volume_size, extra_specs) - - def test_set_vmax_extra_specs(self): - srp_record = self.utils.parse_file_to_get_array_map( - self.fake_xml) - extra_specs = self.common._set_vmax_extra_specs( - self.data.vol_type_extra_specs, srp_record) - ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - self.assertEqual(ref_extra_specs, extra_specs) - - def test_set_vmax_extra_specs_no_srp_name(self): - srp_record = self.utils.parse_file_to_get_array_map( - self.fake_xml) - extra_specs = self.common._set_vmax_extra_specs({}, srp_record) - self.assertEqual('Optimized', extra_specs['slo']) - - def test_set_vmax_extra_specs_compr_disabled(self): - with mock.patch.object(self.rest, 'is_compression_capable', - return_value=True): - srp_record = self.utils.parse_file_to_get_array_map( - self.fake_xml) - extra_specs = self.common._set_vmax_extra_specs( - self.data.vol_type_extra_specs_compr_disabled, srp_record) - ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - ref_extra_specs[utils.DISABLECOMPRESSION] = "true" - self.assertEqual(ref_extra_specs, extra_specs) - - def test_set_vmax_extra_specs_compr_disabled_not_compr_capable(self): - srp_record = self.utils.parse_file_to_get_array_map( - self.fake_xml) - extra_specs = self.common._set_vmax_extra_specs( - self.data.vol_type_extra_specs_compr_disabled, srp_record) - ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) - ref_extra_specs['port_group_name'] = self.data.port_group_name_f - self.assertEqual(ref_extra_specs, extra_specs) - - def test_set_vmax_extra_specs_portgroup_as_spec(self): - srp_record = self.utils.parse_file_to_get_array_map( - self.fake_xml) - extra_specs = self.common._set_vmax_extra_specs( - {'port_group_name': 'extra_spec_pg'}, srp_record) - self.assertEqual('extra_spec_pg', extra_specs['port_group_name']) - - def test_set_vmax_extra_specs_no_portgroup_set(self): - fake_xml = FakeXML().create_fake_config_file( - 'test_no_pg_set', '') - srp_record = self.utils.parse_file_to_get_array_map(fake_xml) - self.assertRaises(exception.VolumeBackendAPIException, - self.common._set_vmax_extra_specs, - {}, srp_record) - - def test_delete_volume_from_srp_success(self): - array = self.data.array - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object( - self.provision, 'delete_volume_from_srp') as mock_del: - self.common._delete_from_srp(array, device_id, volume_name, - extra_specs) - mock_del.assert_called_once_with(array, device_id, volume_name) - - def test_delete_volume_from_srp_failed(self): - array = self.data.array - device_id = self.data.failed_resource - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object(self.masking, - 'add_volume_to_default_storage_group'): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._delete_from_srp, array, - device_id, volume_name, extra_specs) - (self.masking.add_volume_to_default_storage_group. - assert_called_once_with( - array, device_id, volume_name, extra_specs)) - - @mock.patch.object(utils.VMAXUtils, 'is_replication_enabled', - side_effect=[False, True]) - def test_remove_vol_and_cleanup_replication(self, mock_rep_enabled): - array = self.data.array - device_id = self.data.device_id - volume = self.data.test_volume - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object( - self.masking, 'remove_and_reset_members') as mock_rm: - with mock.patch.object( - self.common, 'cleanup_lun_replication') as mock_clean: - self.common._remove_vol_and_cleanup_replication( - array, device_id, volume_name, extra_specs) - mock_rm.assert_called_once_with( - array, device_id, volume_name, extra_specs, False) - mock_clean.assert_not_called() - self.common._remove_vol_and_cleanup_replication( - array, device_id, volume_name, extra_specs, volume) - mock_clean.assert_called_once_with( - volume, volume_name, device_id, extra_specs) - - def test_get_target_wwns_from_masking_view(self): - target_wwns = self.common.get_target_wwns_from_masking_view( - self.data.test_volume, self.data.connector) - ref_wwns = [self.data.wwnn1] - self.assertEqual(ref_wwns, target_wwns) - - def test_get_target_wwns_from_masking_view_no_mv(self): - with mock.patch.object(self.common, 'get_masking_views_from_volume', - return_value=None): - target_wwns = self.common.get_target_wwns_from_masking_view( - self.data.test_volume, self.data.connector) - self.assertFalse(target_wwns) - - def test_get_port_group_from_masking_view(self): - array = self.data.array - maskingview_name = self.data.masking_view_name_f - with mock.patch.object(self.rest, - 'get_element_from_masking_view'): - self.common.get_port_group_from_masking_view( - array, maskingview_name) - self.rest.get_element_from_masking_view.assert_called_once_with( - array, maskingview_name, portgroup=True) - - def test_get_initiator_group_from_masking_view(self): - array = self.data.array - maskingview_name = self.data.masking_view_name_f - with mock.patch.object(self.rest, - 'get_element_from_masking_view'): - self.common.get_initiator_group_from_masking_view( - array, maskingview_name) - self.rest.get_element_from_masking_view.assert_called_once_with( - array, maskingview_name, host=True) - - def test_get_common_masking_views(self): - array = self.data.array - portgroup_name = self.data.port_group_name_f - initiator_group_name = self.data.initiatorgroup_name_f - with mock.patch.object(self.rest, 'get_common_masking_views'): - self.common.get_common_masking_views( - array, portgroup_name, initiator_group_name) - self.rest.get_common_masking_views.assert_called_once_with( - array, portgroup_name, initiator_group_name) - - def test_get_ip_and_iqn(self): - ref_ip_iqn = [{'iqn': self.data.initiator, - 'ip': self.data.ip}] - port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId'] - ip_iqn_list = self.common._get_ip_and_iqn(self.data.array, port) - self.assertEqual(ref_ip_iqn, ip_iqn_list) - - def test_find_ip_and_iqns(self): - ref_ip_iqn = [{'iqn': self.data.initiator, - 'ip': self.data.ip}] - ip_iqn_list = self.common._find_ip_and_iqns( - self.data.array, self.data.port_group_name_i) - self.assertEqual(ref_ip_iqn, ip_iqn_list) - - def test_create_replica_snap_name(self): - array = self.data.array - clone_volume = self.data.test_clone_volume - source_device_id = self.data.device_id - snap_name = self.data.snap_location['snap_name'] - ref_dict = self.data.provider_location - clone_dict = self.common._create_replica( - array, clone_volume, source_device_id, - self.data.extra_specs, snap_name) - self.assertEqual(ref_dict, clone_dict) - - def test_create_replica_no_snap_name(self): - array = self.data.array - clone_volume = self.data.test_clone_volume - source_device_id = self.data.device_id - snap_name = "temp-" + source_device_id + clone_volume.id - ref_dict = self.data.provider_location - with mock.patch.object(self.utils, 'get_temp_snap_name', - return_value=snap_name): - clone_dict = self.common._create_replica( - array, clone_volume, source_device_id, - self.data.extra_specs) - self.assertEqual(ref_dict, clone_dict) - self.utils.get_temp_snap_name.assert_called_once_with( - ('OS-' + clone_volume.id), source_device_id) - - def test_create_replica_failed_cleanup_target(self): - array = self.data.array - clone_volume = self.data.test_clone_volume - device_id = self.data.device_id - snap_name = self.data.failed_resource - clone_name = 'OS-' + clone_volume.id - extra_specs = self.data.extra_specs - with mock.patch.object(self.common, '_cleanup_target'): - self.assertRaises( - exception.VolumeBackendAPIException, - self.common._create_replica, array, clone_volume, - device_id, self.data.extra_specs, snap_name) - self.common._cleanup_target.assert_called_once_with( - array, device_id, device_id, clone_name, - snap_name, extra_specs) - - def test_create_replica_failed_no_target(self): - array = self.data.array - clone_volume = self.data.test_clone_volume - source_device_id = self.data.device_id - snap_name = self.data.failed_resource - with mock.patch.object(self.common, '_create_volume', - return_value={'device_id': None}): - with mock.patch.object(self.common, '_cleanup_target'): - self.assertRaises( - exception.VolumeBackendAPIException, - self.common._create_replica, array, clone_volume, - source_device_id, self.data.extra_specs, snap_name) - self.common._cleanup_target.assert_not_called() - - @mock.patch.object( - masking.VMAXMasking, - 'remove_and_reset_members') - def test_cleanup_target_sync_present(self, mock_remove): - array = self.data.array - clone_volume = self.data.test_clone_volume - source_device_id = self.data.device_id - target_device_id = self.data.device_id2 - snap_name = self.data.failed_resource - clone_name = clone_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'get_sync_session', - return_value='session'): - with mock.patch.object(self.provision, - 'break_replication_relationship'): - self.common._cleanup_target( - array, target_device_id, source_device_id, - clone_name, snap_name, extra_specs) - (self.provision.break_replication_relationship. - assert_called_with( - array, target_device_id, source_device_id, - snap_name, extra_specs)) - - def test_cleanup_target_no_sync(self): - array = self.data.array - clone_volume = self.data.test_clone_volume - source_device_id = self.data.device_id - target_device_id = self.data.device_id2 - snap_name = self.data.failed_resource - clone_name = clone_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'get_sync_session', - return_value=None): - with mock.patch.object(self.common, - '_delete_from_srp'): - self.common._cleanup_target( - array, target_device_id, source_device_id, - clone_name, snap_name, extra_specs) - self.common._delete_from_srp.assert_called_once_with( - array, target_device_id, clone_name, - extra_specs) - - @mock.patch.object( - provision.VMAXProvision, - 'delete_volume_snap') - @mock.patch.object( - provision.VMAXProvision, - 'break_replication_relationship') - def test_sync_check_temp_snap(self, mock_break, mock_delete): - array = self.data.array - device_id = self.data.device_id - target = self.data.volume_details[1]['volumeId'] - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - snap_name = 'temp-1' - with mock.patch.object(self.rest, 'get_volume_snap', - return_value=snap_name): - self.common._sync_check(array, device_id, volume_name, - extra_specs) - mock_break.assert_called_with( - array, target, device_id, snap_name, extra_specs) - mock_delete.assert_called_with( - array, snap_name, device_id) - - @mock.patch.object( - provision.VMAXProvision, - 'delete_volume_snap') - @mock.patch.object( - provision.VMAXProvision, - 'break_replication_relationship') - def test_sync_check_not_temp_snap(self, mock_break, mock_delete): - array = self.data.array - device_id = self.data.device_id - target = self.data.volume_details[1]['volumeId'] - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - snap_name = 'OS-1' - sessions = [{'source_vol': device_id, - 'snap_name': snap_name, - 'target_vol_list': [target]}] - with mock.patch.object(self.rest, 'find_snap_vx_sessions', - return_value=sessions): - self.common._sync_check(array, device_id, volume_name, - extra_specs) - mock_break.assert_called_with( - array, target, device_id, snap_name, extra_specs) - mock_delete.assert_not_called() - - @mock.patch.object( - provision.VMAXProvision, - 'break_replication_relationship') - def test_sync_check_no_sessions(self, mock_break): - array = self.data.array - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - with mock.patch.object(self.rest, 'find_snap_vx_sessions', - return_value=None): - self.common._sync_check(array, device_id, volume_name, - extra_specs) - mock_break.assert_not_called() - - def test_manage_existing_success(self): - external_ref = {u'source-name': u'00002'} - provider_location = {'device_id': u'00002', 'array': u'000197800123'} - ref_update = {'provider_location': six.text_type(provider_location)} - with mock.patch.object( - self.common, '_check_lun_valid_for_cinder_management'): - model_update = self.common.manage_existing( - self.data.test_volume, external_ref) - self.assertEqual(ref_update, model_update) - - @mock.patch.object( - rest.VMAXRest, 'get_masking_views_from_storage_group', - return_value=None) - def test_check_lun_valid_for_cinder_management(self, mock_mv): - external_ref = {u'source-name': u'00001'} - self.common._check_lun_valid_for_cinder_management( - self.data.array, '00001', - self.data.test_volume.id, external_ref) - - @mock.patch.object( - rest.VMAXRest, 'get_volume', - side_effect=[ - None, - VMAXCommonData.volume_details[0], - VMAXCommonData.volume_details[0], - VMAXCommonData.volume_details[1]]) - @mock.patch.object( - rest.VMAXRest, 'get_masking_views_from_storage_group', - side_effect=[VMAXCommonData.sg_details[1]['maskingview'], - None]) - @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', - return_value=[VMAXCommonData.defaultstoragegroup_name]) - @mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session', - side_effect=[(True, False, []), (False, False, None)]) - def test_check_lun_valid_for_cinder_management_exception( - self, mock_rep, mock_sg, mock_mvs, mock_get_vol): - external_ref = {u'source-name': u'00001'} - for x in range(0, 3): - self.assertRaises( - exception.ManageExistingInvalidReference, - self.common._check_lun_valid_for_cinder_management, - self.data.array, '00001', - self.data.test_volume.id, external_ref) - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.common._check_lun_valid_for_cinder_management, - self.data.array, '00001', - self.data.test_volume.id, external_ref) - - def test_manage_existing_get_size(self): - external_ref = {u'source-name': u'00001'} - size = self.common.manage_existing_get_size( - self.data.test_volume, external_ref) - self.assertEqual(2, size) - - def test_manage_existing_get_size_exception(self): - external_ref = {u'source-name': u'00001'} - with mock.patch.object(self.rest, 'get_size_of_device_on_array', - return_value=3.5): - self.assertRaises(exception.ManageExistingInvalidReference, - self.common.manage_existing_get_size, - self.data.test_volume, external_ref) - - @mock.patch.object(common.VMAXCommon, - '_remove_vol_and_cleanup_replication') - def test_unmanage_success(self, mock_rm): - volume = self.data.test_volume - with mock.patch.object(self.rest, 'rename_volume'): - self.common.unmanage(volume) - self.rest.rename_volume.assert_called_once_with( - self.data.array, self.data.device_id, - self.data.test_volume.id) - - def test_unmanage_device_not_found(self): - volume = self.data.test_volume - with mock.patch.object(self.common, '_find_device_on_array', - return_value=None): - with mock.patch.object(self.rest, 'rename_volume'): - self.common.unmanage(volume) - self.rest.rename_volume.assert_not_called() - - @mock.patch.object(common.VMAXCommon, - '_slo_workload_migration') - def test_retype(self, mock_migrate): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs_intervals_set - extra_specs['port_group_name'] = self.data.port_group_name_f - volume = self.data.test_volume - new_type = {'extra_specs': {}} - host = {'host': self.data.new_host} - self.common.retype(volume, new_type, host) - mock_migrate.assert_called_once_with( - device_id, volume, host, volume_name, new_type, extra_specs) - mock_migrate.reset_mock() - with mock.patch.object( - self.common, '_find_device_on_array', return_value=None): - self.common.retype(volume, new_type, host) - mock_migrate.assert_not_called() - - def test_slo_workload_migration_valid(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - new_type = {'extra_specs': {}} - volume = self.data.test_volume - host = {'host': self.data.new_host} - with mock.patch.object(self.common, '_migrate_volume'): - self.common._slo_workload_migration( - device_id, volume, host, volume_name, new_type, extra_specs) - self.common._migrate_volume.assert_called_once_with( - extra_specs[utils.ARRAY], device_id, - extra_specs[utils.SRP], 'Silver', - 'OLTP', volume_name, new_type, extra_specs) - - def test_slo_workload_migration_not_valid(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - volume = self.data.test_volume - new_type = {'extra_specs': {}} - host = {'host': self.data.new_host} - with mock.patch.object(self.common, - '_is_valid_for_storage_assisted_migration', - return_value=(False, 'Silver', 'OLTP')): - migrate_status = self.common._slo_workload_migration( - device_id, volume, host, volume_name, new_type, extra_specs) - self.assertFalse(migrate_status) - - def test_slo_workload_migration_same_hosts(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - volume = self.data.test_volume - host = {'host': self.data.fake_host} - new_type = {'extra_specs': {}} - migrate_status = self.common._slo_workload_migration( - device_id, volume, host, volume_name, new_type, extra_specs) - self.assertFalse(migrate_status) - - def test_slo_workload_migration_same_host_change_compression(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - volume = self.data.test_volume - host = {'host': self.data.fake_host} - new_type = {'extra_specs': {utils.DISABLECOMPRESSION: "true"}} - with mock.patch.object( - self.common, '_is_valid_for_storage_assisted_migration', - return_value=(True, self.data.slo, self.data.workload)): - with mock.patch.object(self.common, '_migrate_volume'): - migrate_status = self.common._slo_workload_migration( - device_id, volume, host, volume_name, new_type, - extra_specs) - self.assertTrue(migrate_status) - self.common._migrate_volume.assert_called_once_with( - extra_specs[utils.ARRAY], device_id, - extra_specs[utils.SRP], self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) - - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - def test_migrate_volume_success(self, mock_remove): - with mock.patch.object(self.rest, 'is_volume_in_storagegroup', - return_value=True): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - new_type = {'extra_specs': {}} - migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) - self.assertTrue(migrate_status) - mock_remove.assert_called_once_with( - self.data.array, device_id, None, extra_specs, False) - mock_remove.reset_mock() - with mock.patch.object( - self.rest, 'get_storage_groups_from_volume', - return_value=[]): - migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) - self.assertTrue(migrate_status) - mock_remove.assert_not_called() - - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - new_type = {'extra_specs': {}} - with mock.patch.object( - self.masking, 'get_or_create_default_storage_group', - side_effect=exception.VolumeBackendAPIException): - migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) - self.assertFalse(migrate_status) - - def test_migrate_volume_failed_vol_not_added(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - extra_specs = self.data.extra_specs - new_type = {'extra_specs': {}} - with mock.patch.object( - self.rest, 'is_volume_in_storagegroup', - return_value=False): - migrate_status = self.common._migrate_volume( - self.data.array, device_id, self.data.srp, self.data.slo, - self.data.workload, volume_name, new_type, extra_specs) - self.assertFalse(migrate_status) - - def test_is_valid_for_storage_assisted_migration_true(self): - device_id = self.data.device_id - host = {'host': self.data.new_host} - volume_name = self.data.test_volume.name - ref_return = (True, 'Silver', 'OLTP') - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - # No current sgs found - with mock.patch.object(self.rest, 'get_storage_groups_from_volume', - return_value=None): - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host, self.data.array, self.data.srp, - volume_name, False) - self.assertEqual(ref_return, return_val) - - def test_is_valid_for_storage_assisted_migration_false(self): - device_id = self.data.device_id - volume_name = self.data.test_volume.name - ref_return = (False, None, None) - # IndexError - host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'} - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - # Wrong array - host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'} - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host2, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - # Wrong srp - host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'} - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host3, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - # Already in correct sg - host4 = {'host': self.data.fake_host} - return_val = self.common._is_valid_for_storage_assisted_migration( - device_id, host4, self.data.array, - self.data.srp, volume_name, False) - self.assertEqual(ref_return, return_val) - - def test_find_volume_group_name_from_id(self): - array = self.data.array - group_id = 'GrpId' - group_name = None - ref_group_name = self.data.storagegroup_name_with_id - with mock.patch.object( - self.rest, 'get_storage_group_list', - return_value=self.data.sg_list_rep): - group_name = self.common._find_volume_group_name_from_id( - array, group_id) - self.assertEqual(ref_group_name, group_name) - - def test_find_volume_group_name_from_id_not_found(self): - array = self.data.array - group_id = 'GrpId' - group_name = None - group_name = self.common._find_volume_group_name_from_id( - array, group_id) - self.assertIsNone(group_name) - - def test_find_volume_group(self): - group = self.data.test_group_1 - array = self.data.array - volume_group = self.common._find_volume_group(array, group) - ref_group = self.data.sg_details_rep[0] - self.assertEqual(ref_group, volume_group) - - def test_get_volume_device_ids(self): - array = self.data.array - volumes = [self.data.test_volume] - ref_device_ids = [self.data.device_id] - device_ids = self.common._get_volume_device_ids(volumes, array) - self.assertEqual(ref_device_ids, device_ids) - - def test_get_members_of_volume_group(self): - array = self.data.array - group_name = self.data.storagegroup_name_source - ref_volumes = [self.data.device_id, self.data.device_id2] - member_device_ids = self.common._get_members_of_volume_group( - array, group_name) - self.assertEqual(ref_volumes, member_device_ids) - - def test_get_members_of_volume_group_empty(self): - array = self.data.array - group_name = self.data.storagegroup_name_source - with mock.patch.object( - self.rest, 'get_volumes_in_storage_group', - return_value=None): - member_device_ids = self.common._get_members_of_volume_group( - array, group_name - ) - self.assertIsNone(member_device_ids) - - @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_replica(self, mock_check): - source_group = self.data.test_group_1 - snap_name = self.data.group_snapshot_name - with mock.patch.object( - self.common, - '_create_group_replica') as mock_create_replica: - self.common._create_group_replica( - source_group, snap_name) - mock_create_replica.assert_called_once_with( - source_group, snap_name) - - def test_create_group_replica_exception(self): - source_group = self.data.test_group_failed - snap_name = self.data.group_snapshot_name - with mock.patch.object( - volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - self.assertRaises(exception.VolumeBackendAPIException, - self.common._create_group_replica, - source_group, - snap_name) - - def test_create_group_snapshot(self): - context = None - group_snapshot = self.data.test_group_snapshot_1 - snapshots = [] - ref_model_update = {'status': fields.GroupStatus.AVAILABLE} - with mock.patch.object( - volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, snapshots_model_update = ( - self.common.create_group_snapshot( - context, group_snapshot, snapshots)) - self.assertEqual(ref_model_update, model_update) - - def test_create_group_snapshot_exception(self): - context = None - group_snapshot = self.data.test_group_snapshot_failed - snapshots = [] - with mock.patch.object( - volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.create_group_snapshot, - context, - group_snapshot, - snapshots) - - def test_create_group(self): - ref_model_update = {'status': fields.GroupStatus.AVAILABLE} - context = None - group = self.data.test_group_1 - with mock.patch.object( - volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update = self.common.create_group(context, group) - self.assertEqual(ref_model_update, model_update) - - def test_create_group_exception(self): - context = None - group = self.data.test_group_snapshot_failed - with mock.patch.object( - volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.create_group, - context, - group) - - def test_delete_group_snapshot(self): - group_snapshot = self.data.test_group_snapshot_1 - snapshots = [] - context = None - ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, snapshots_model_update = ( - self.common.delete_group_snapshot(context, - group_snapshot, snapshots)) - self.assertEqual(ref_model_update, model_update) - - def test_delete_group_snapshot_success(self): - group_snapshot = self.data.test_group_snapshot_1 - snapshots = [] - ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, snapshots_model_update = ( - self.common._delete_group_snapshot(group_snapshot, - snapshots)) - self.assertEqual(ref_model_update, model_update) - - def test_delete_group_snapshot_failed(self): - group_snapshot = self.data.test_group_snapshot_failed - snapshots = [] - ref_model_update = ( - {'status': fields.GroupSnapshotStatus.ERROR_DELETING}) - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, snapshots_model_update = ( - self.common._delete_group_snapshot(group_snapshot, - snapshots)) - self.assertEqual(ref_model_update, model_update) - - def test_update_group(self): - group = self.data.test_group_1 - add_vols = [self.data.test_volume] - remove_vols = [] - ref_model_update = {'status': fields.GroupStatus.AVAILABLE} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, __, __ = self.common.update_group(group, - add_vols, - remove_vols) - self.assertEqual(ref_model_update, model_update) - - @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_not_found(self, mock_check): - group = self.data.test_group_1 - add_vols = [] - remove_vols = [] - with mock.patch.object( - self.common, '_find_volume_group', - return_value=None): - self.assertRaises(exception.GroupNotFound, - self.common.update_group, - group, - add_vols, - remove_vols) - - @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_exception(self, mock_check): - group = self.data.test_group_1 - add_vols = [] - remove_vols = [] - with mock.patch.object( - self.common, '_find_volume_group', - side_effect=exception.VolumeBackendAPIException): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.update_group, - group, add_vols, remove_vols) - - def test_delete_group(self): - group = self.data.test_group_1 - volumes = [self.data.test_volume] - context = None - ref_model_update = {'status': fields.GroupStatus.DELETED} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True),\ - mock.patch.object(self.rest, 'get_volumes_in_storage_group', - return_value=[]): - model_update, __ = self.common.delete_group( - context, group, volumes) - self.assertEqual(ref_model_update, model_update) - - def test_delete_group_success(self): - group = self.data.test_group_1 - volumes = [] - ref_model_update = {'status': fields.GroupStatus.DELETED} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True),\ - mock.patch.object(self.rest, 'get_volumes_in_storage_group', - return_value=[]): - model_update, __ = self.common._delete_group(group, volumes) - self.assertEqual(ref_model_update, model_update) - - def test_delete_group_already_deleted(self): - group = self.data.test_group_failed - ref_model_update = {'status': fields.GroupStatus.DELETED} - volumes = [] - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, __ = self.common._delete_group(group, volumes) - self.assertEqual(ref_model_update, model_update) - - @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_failed(self, mock_check): - group = self.data.test_group_1 - volumes = [] - ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING} - with mock.patch.object( - self.rest, 'delete_storage_group', - side_effect=exception.VolumeBackendAPIException): - model_update, __ = self.common._delete_group( - group, volumes) - self.assertEqual(ref_model_update, model_update) - - def test_create_group_from_src_success(self): - context = None - group = self.data.test_group_1 - group_snapshot = self.data.test_group_snapshot_1 - snapshots = [] - volumes = [self.data.test_volume] - source_group = None - source_vols = [] - ref_model_update = {'status': fields.GroupStatus.AVAILABLE} - with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', - return_value=True): - model_update, volumes_model_update = ( - self.common.create_group_from_src( - context, group, volumes, - group_snapshot, snapshots, - source_group, source_vols)) - self.assertEqual(ref_model_update, model_update) - - -class VMAXFCTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXFCTest, self).setUp() - config_group = 'FCTests' - self.fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_f) - self.configuration = FakeConfiguration(self.fake_xml, config_group) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = fc.VMAXFCDriver(configuration=self.configuration) - self.driver = driver - self.common = self.driver.common - self.masking = self.common.masking - self.utils = self.common.utils - self.utils.get_volumetype_extra_specs = ( - mock.Mock(return_value=self.data.vol_type_extra_specs)) - - def test_create_volume(self): - with mock.patch.object(self.common, 'create_volume'): - self.driver.create_volume(self.data.test_volume) - self.common.create_volume.assert_called_once_with( - self.data.test_volume) - - def test_create_volume_from_snapshot(self): - volume = self.data.test_clone_volume - snapshot = self.data.test_snapshot - with mock.patch.object(self.common, 'create_volume_from_snapshot'): - self.driver.create_volume_from_snapshot(volume, snapshot) - self.common.create_volume_from_snapshot.assert_called_once_with( - volume, snapshot) - - def test_create_cloned_volume(self): - volume = self.data.test_clone_volume - src_volume = self.data.test_volume - with mock.patch.object(self.common, 'create_cloned_volume'): - self.driver.create_cloned_volume(volume, src_volume) - self.common.create_cloned_volume.assert_called_once_with( - volume, src_volume) - - def test_delete_volume(self): - with mock.patch.object(self.common, 'delete_volume'): - self.driver.delete_volume(self.data.test_volume) - self.common.delete_volume.assert_called_once_with( - self.data.test_volume) - - def test_create_snapshot(self): - with mock.patch.object(self.common, 'create_snapshot'): - self.driver.create_snapshot(self.data.test_snapshot) - self.common.create_snapshot.assert_called_once_with( - self.data.test_snapshot, self.data.test_snapshot.volume) - - def test_delete_snapshot(self): - with mock.patch.object(self.common, 'delete_snapshot'): - self.driver.delete_snapshot(self.data.test_snapshot) - self.common.delete_snapshot.assert_called_once_with( - self.data.test_snapshot, self.data.test_snapshot.volume) - - def test_initialize_connection(self): - with mock.patch.object(self.common, 'initialize_connection', - return_value=self.data.fc_device_info): - with mock.patch.object(self.driver, 'populate_data'): - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.common.initialize_connection.assert_called_once_with( - self.data.test_volume, self.data.connector) - self.driver.populate_data.assert_called_once_with( - self.data.fc_device_info, self.data.test_volume, - self.data.connector) - - def test_populate_data(self): - with mock.patch.object(self.driver, '_build_initiator_target_map', - return_value=([], {})): - ref_data = { - 'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': self.data.fc_device_info['hostlunid'], - 'target_discovered': True, - 'target_wwn': [], - 'initiator_target_map': {}}} - data = self.driver.populate_data(self.data.fc_device_info, - self.data.test_volume, - self.data.connector) - self.assertEqual(ref_data, data) - self.driver._build_initiator_target_map.assert_called_once_with( - self.data.test_volume, self.data.connector) - - def test_terminate_connection(self): - with mock.patch.object(self.common, 'terminate_connection'): - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - self.common.terminate_connection.assert_called_once_with( - self.data.test_volume, self.data.connector) - - def test_terminate_connection_no_zoning_mappings(self): - with mock.patch.object(self.driver, '_get_zoning_mappings', - return_value=None): - with mock.patch.object(self.common, 'terminate_connection'): - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - self.common.terminate_connection.assert_not_called() - - def test_get_zoning_mappings(self): - ref_mappings = self.data.zoning_mappings - zoning_mappings = self.driver._get_zoning_mappings( - self.data.test_volume, self.data.connector) - self.assertEqual(ref_mappings, zoning_mappings) - - def test_get_zoning_mappings_no_mv(self): - ref_mappings = {'port_group': None, - 'initiator_group': None, - 'target_wwns': None, - 'init_targ_map': None, - 'array': None} - with mock.patch.object(self.common, 'get_masking_views_from_volume', - return_value=None): - zoning_mappings = self.driver._get_zoning_mappings( - self.data.test_volume, self.data.connector) - self.assertEqual(ref_mappings, zoning_mappings) - - def test_cleanup_zones_other_vols_mapped(self): - ref_data = {'driver_volume_type': 'fibre_channel', - 'data': {}} - data = self.driver._cleanup_zones(self.data.zoning_mappings) - self.assertEqual(ref_data, data) - - def test_cleanup_zones_no_vols_mapped(self): - zoning_mappings = self.data.zoning_mappings - ref_data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': zoning_mappings['target_wwns'], - 'initiator_target_map': - zoning_mappings['init_targ_map']}} - with mock.patch.object(self.common, 'get_common_masking_views', - return_value=[]): - data = self.driver._cleanup_zones(self.data.zoning_mappings) - self.assertEqual(ref_data, data) - - def test_build_initiator_target_map(self): - ref_target_map = {'123456789012345': ['543210987654321'], - '123456789054321': ['123450987654321']} - with mock.patch.object(fczm_utils, 'create_lookup_service', - return_value=FakeLookupService()): - driver = fc.VMAXFCDriver(configuration=self.configuration) - with mock.patch.object(driver.common, - 'get_target_wwns_from_masking_view', - return_value=self.data.target_wwns): - targets, target_map = driver._build_initiator_target_map( - self.data.test_volume, self.data.connector) - self.assertEqual(ref_target_map, target_map) - - def test_extend_volume(self): - with mock.patch.object(self.common, 'extend_volume'): - self.driver.extend_volume(self.data.test_volume, '3') - self.common.extend_volume.assert_called_once_with( - self.data.test_volume, '3') - - def test_get_volume_stats(self): - with mock.patch.object(self.driver, 'update_volume_stats'): - # no refresh - self.driver.get_volume_stats() - self.driver.update_volume_stats.assert_not_called() - # with refresh - self.driver.get_volume_stats(True) - self.driver.update_volume_stats.assert_called_once_with() - - def test_update_volume_stats(self): - with mock.patch.object(self.common, 'update_volume_stats', - return_value={}): - self.driver.update_volume_stats() - self.common.update_volume_stats.assert_called_once_with() - - def test_check_for_setup_error(self): - self.driver.check_for_setup_error() - - def test_ensure_export(self): - self.driver.ensure_export('context', 'volume') - - def test_create_export(self): - self.driver.create_export('context', 'volume', 'connector') - - def test_remove_export(self): - self.driver.remove_export('context', 'volume') - - def test_check_for_export(self): - self.driver.check_for_export('context', 'volume_id') - - def test_manage_existing(self): - with mock.patch.object(self.common, 'manage_existing', - return_value={}): - external_ref = {u'source-name': u'00002'} - self.driver.manage_existing(self.data.test_volume, external_ref) - self.common.manage_existing.assert_called_once_with( - self.data.test_volume, external_ref) - - def test_manage_existing_get_size(self): - with mock.patch.object(self.common, 'manage_existing_get_size', - return_value='1'): - external_ref = {u'source-name': u'00002'} - self.driver.manage_existing_get_size( - self.data.test_volume, external_ref) - self.common.manage_existing_get_size.assert_called_once_with( - self.data.test_volume, external_ref) - - def test_unmanage_volume(self): - with mock.patch.object(self.common, 'unmanage', - return_value={}): - self.driver.unmanage(self.data.test_volume) - self.common.unmanage.assert_called_once_with( - self.data.test_volume) - - def test_retype(self): - host = {'host': self.data.new_host} - new_type = {'extra_specs': {}} - with mock.patch.object(self.common, 'retype', - return_value=True): - self.driver.retype({}, self.data.test_volume, new_type, '', host) - self.common.retype.assert_called_once_with( - self.data.test_volume, new_type, host) - - def test_failover_host(self): - with mock.patch.object( - self.common, 'failover_host', - return_value=(self.data.remote_array, [], [])) as mock_fo: - self.driver.failover_host(self.data.ctx, [self.data.test_volume]) - mock_fo.assert_called_once_with([self.data.test_volume], None, - None) - - -class VMAXISCSITest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXISCSITest, self).setUp() - config_group = 'ISCSITests' - self.fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_i) - configuration = FakeConfiguration(self.fake_xml, config_group) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.masking = self.common.masking - self.utils = self.common.utils - self.utils.get_volumetype_extra_specs = ( - mock.Mock(return_value=self.data.vol_type_extra_specs)) - - def test_create_volume(self): - with mock.patch.object(self.common, 'create_volume'): - self.driver.create_volume(self.data.test_volume) - self.common.create_volume.assert_called_once_with( - self.data.test_volume) - - def test_create_volume_from_snapshot(self): - volume = self.data.test_clone_volume - snapshot = self.data.test_snapshot - with mock.patch.object(self.common, 'create_volume_from_snapshot'): - self.driver.create_volume_from_snapshot(volume, snapshot) - self.common.create_volume_from_snapshot.assert_called_once_with( - volume, snapshot) - - def test_create_cloned_volume(self): - volume = self.data.test_clone_volume - src_volume = self.data.test_volume - with mock.patch.object(self.common, 'create_cloned_volume'): - self.driver.create_cloned_volume(volume, src_volume) - self.common.create_cloned_volume.assert_called_once_with( - volume, src_volume) - - def test_delete_volume(self): - with mock.patch.object(self.common, 'delete_volume'): - self.driver.delete_volume(self.data.test_volume) - self.common.delete_volume.assert_called_once_with( - self.data.test_volume) - - def test_create_snapshot(self): - with mock.patch.object(self.common, 'create_snapshot'): - self.driver.create_snapshot(self.data.test_snapshot) - self.common.create_snapshot.assert_called_once_with( - self.data.test_snapshot, self.data.test_snapshot.volume) - - def test_delete_snapshot(self): - with mock.patch.object(self.common, 'delete_snapshot'): - self.driver.delete_snapshot(self.data.test_snapshot) - self.common.delete_snapshot.assert_called_once_with( - self.data.test_snapshot, self.data.test_snapshot.volume) - - def test_initialize_connection(self): - ref_dict = {'maskingview': self.data.masking_view_name_f, - 'array': self.data.array, - 'hostlunid': 3, - 'device_id': self.data.device_id, - 'ip_and_iqn': [{'ip': self.data.ip, - 'iqn': self.data.initiator}], - 'is_multipath': False} - with mock.patch.object(self.driver, 'get_iscsi_dict'): - with mock.patch.object( - self.common, 'get_port_group_from_masking_view', - return_value=self.data.port_group_name_i): - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - self.driver.get_iscsi_dict.assert_called_once_with( - ref_dict, self.data.test_volume) - - def test_get_iscsi_dict_success(self): - ip_and_iqn = self.common._find_ip_and_iqns( - self.data.array, self.data.port_group_name_i) - host_lun_id = self.data.iscsi_device_info['hostlunid'] - volume = self.data.test_volume - device_info = self.data.iscsi_device_info - ref_data = {'driver_volume_type': 'iscsi', 'data': {}} - with mock.patch.object( - self.driver, 'vmax_get_iscsi_properties', return_value={}): - data = self.driver.get_iscsi_dict(device_info, volume) - self.assertEqual(ref_data, data) - self.driver.vmax_get_iscsi_properties.assert_called_once_with( - volume, ip_and_iqn, True, host_lun_id) - - def test_get_iscsi_dict_exception(self): - device_info = {'ip_and_iqn': ''} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.get_iscsi_dict, - device_info, self.data.test_volume) - - def test_vmax_get_iscsi_properties_one_target_no_auth(self): - vol = deepcopy(self.data.test_volume) - ip_and_iqn = self.common._find_ip_and_iqns( - self.data.array, self.data.port_group_name_i) - host_lun_id = self.data.iscsi_device_info['hostlunid'] - ref_properties = { - 'target_discovered': True, - 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], - 'target_portal': ip_and_iqn[0]['ip'] + ":3260", - 'target_lun': host_lun_id, - 'volume_id': self.data.test_volume.id} - iscsi_properties = self.driver.vmax_get_iscsi_properties( - vol, ip_and_iqn, True, host_lun_id) - self.assertEqual(type(ref_properties), type(iscsi_properties)) - self.assertEqual(ref_properties, iscsi_properties) - - def test_vmax_get_iscsi_properties_multiple_targets(self): - ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, - {'ip': self.data.ip, 'iqn': self.data.iqn}] - host_lun_id = self.data.iscsi_device_info['hostlunid'] - ref_properties = { - 'target_portals': ( - [t['ip'] + ":3260" for t in ip_and_iqn]), - 'target_iqns': ( - [t['iqn'].split(",")[0] for t in ip_and_iqn]), - 'target_luns': [host_lun_id] * len(ip_and_iqn), - 'target_discovered': True, - 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], - 'target_portal': ip_and_iqn[0]['ip'] + ":3260", - 'target_lun': host_lun_id, - 'volume_id': self.data.test_volume.id} - iscsi_properties = self.driver.vmax_get_iscsi_properties( - self.data.test_volume, ip_and_iqn, True, host_lun_id) - self.assertEqual(ref_properties, iscsi_properties) - - def test_vmax_get_iscsi_properties_auth(self): - vol = deepcopy(self.data.test_volume) - vol.provider_auth = "auth_method auth_username auth_secret" - ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, - {'ip': self.data.ip, 'iqn': self.data.iqn}] - host_lun_id = self.data.iscsi_device_info['hostlunid'] - ref_properties = { - 'target_portals': ( - [t['ip'] + ":3260" for t in ip_and_iqn]), - 'target_iqns': ( - [t['iqn'].split(",")[0] for t in ip_and_iqn]), - 'target_luns': [host_lun_id] * len(ip_and_iqn), - 'target_discovered': True, - 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], - 'target_portal': ip_and_iqn[0]['ip'] + ":3260", - 'target_lun': host_lun_id, - 'volume_id': self.data.test_volume.id, - 'auth_method': 'auth_method', - 'auth_username': 'auth_username', - 'auth_password': 'auth_secret'} - iscsi_properties = self.driver.vmax_get_iscsi_properties( - vol, ip_and_iqn, True, host_lun_id) - self.assertEqual(ref_properties, iscsi_properties) - - def test_terminate_connection(self): - with mock.patch.object(self.common, 'terminate_connection'): - self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - self.common.terminate_connection.assert_called_once_with( - self.data.test_volume, self.data.connector) - - def test_extend_volume(self): - with mock.patch.object(self.common, 'extend_volume'): - self.driver.extend_volume(self.data.test_volume, '3') - self.common.extend_volume.assert_called_once_with( - self.data.test_volume, '3') - - def test_get_volume_stats(self): - with mock.patch.object(self.driver, 'update_volume_stats'): - # no refresh - self.driver.get_volume_stats() - self.driver.update_volume_stats.assert_not_called() - # with refresh - self.driver.get_volume_stats(True) - self.driver.update_volume_stats.assert_called_once_with() - - def test_update_volume_stats(self): - with mock.patch.object(self.common, 'update_volume_stats', - return_value={}): - self.driver.update_volume_stats() - self.common.update_volume_stats.assert_called_once_with() - - def test_check_for_setup_error(self): - self.driver.check_for_setup_error() - - def test_ensure_export(self): - self.driver.ensure_export('context', 'volume') - - def test_create_export(self): - self.driver.create_export('context', 'volume', 'connector') - - def test_remove_export(self): - self.driver.remove_export('context', 'volume') - - def test_check_for_export(self): - self.driver.check_for_export('context', 'volume_id') - - def test_manage_existing(self): - with mock.patch.object(self.common, 'manage_existing', - return_value={}): - external_ref = {u'source-name': u'00002'} - self.driver.manage_existing(self.data.test_volume, external_ref) - self.common.manage_existing.assert_called_once_with( - self.data.test_volume, external_ref) - - def test_manage_existing_get_size(self): - with mock.patch.object(self.common, 'manage_existing_get_size', - return_value='1'): - external_ref = {u'source-name': u'00002'} - self.driver.manage_existing_get_size( - self.data.test_volume, external_ref) - self.common.manage_existing_get_size.assert_called_once_with( - self.data.test_volume, external_ref) - - def test_unmanage_volume(self): - with mock.patch.object(self.common, 'unmanage', - return_value={}): - self.driver.unmanage(self.data.test_volume) - self.common.unmanage.assert_called_once_with( - self.data.test_volume) - - def test_retype(self): - host = {'host': self.data.new_host} - new_type = {'extra_specs': {}} - with mock.patch.object(self.common, 'retype', - return_value=True): - self.driver.retype({}, self.data.test_volume, new_type, '', host) - self.common.retype.assert_called_once_with( - self.data.test_volume, new_type, host) - - def test_failover_host(self): - with mock.patch.object(self.common, 'failover_host', - return_value={}) as mock_fo: - self.driver.failover_host({}, [self.data.test_volume]) - mock_fo.assert_called_once_with([self.data.test_volume], None, - None) - - -class VMAXMaskingTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXMaskingTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'MaskingTests' - configuration.config_group = 'MaskingTests' - self._gather_info = common.VMAXCommon._gather_info - common.VMAXCommon._gather_info = mock.Mock() - driver = common.VMAXCommon( - 'iSCSI', common.VMAXCommon.VERSION, configuration=configuration) - driver_fc = common.VMAXCommon( - 'FC', common.VMAXCommon.VERSION, configuration=configuration) - self.driver = driver - self.driver_fc = driver_fc - self.mask = self.driver.masking - self.extra_specs = self.data.extra_specs - self.extra_specs['port_group_name'] = self.data.port_group_name_i - self.maskingviewdict = self.driver._populate_masking_dict( - self.data.test_volume, self.data.connector, self.extra_specs) - self.maskingviewdict['extra_specs'] = self.extra_specs - self.device_id = self.data.device_id - self.volume_name = self.data.volume_details[0]['volume_identifier'] - - def tearDown(self): - super(VMAXMaskingTest, self).tearDown() - common.VMAXCommon._gather_info = self._gather_info - - @mock.patch.object( - masking.VMAXMasking, - 'get_or_create_masking_view_and_map_lun') - def test_setup_masking_view(self, mock_get_or_create_mv): - self.driver.masking.setup_masking_view( - self.data.array, self.maskingviewdict, self.extra_specs) - mock_get_or_create_mv.assert_called_once() - - @mock.patch.object( - masking.VMAXMasking, - '_check_adding_volume_to_storage_group') - @mock.patch.object( - masking.VMAXMasking, - '_get_default_storagegroup_and_remove_vol', - return_value=VMAXCommonData.defaultstoragegroup_name) - @mock.patch.object( - masking.VMAXMasking, - '_get_or_create_masking_view', - side_effect=[None, "Error in masking view retrieval", - exception.VolumeBackendAPIException]) - @mock.patch.object( - rest.VMAXRest, - 'get_element_from_masking_view', - side_effect=[VMAXCommonData.port_group_name_i, Exception]) - def test_get_or_create_masking_view_and_map_lun( - self, mock_masking_view_element, mock_masking, mock_default_sg, - mock_add_volume): - rollback_dict = ( - self.driver.masking.get_or_create_masking_view_and_map_lun( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, self.extra_specs)) - self.assertEqual(self.maskingviewdict, rollback_dict) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.masking.get_or_create_masking_view_and_map_lun, - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, self.extra_specs) - self.maskingviewdict['slo'] = None - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.masking.get_or_create_masking_view_and_map_lun, - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, self.extra_specs) - - @mock.patch.object( - masking.VMAXMasking, - 'remove_vol_from_storage_group') - @mock.patch.object( - rest.VMAXRest, - 'is_volume_in_storagegroup', - side_effect=[True, False]) - def test_get_default_storagegroup_and_remove_vol( - self, mock_volume_in_sg, mock_remove_volume): - - self.driver.masking._get_default_storagegroup_and_remove_vol( - self.data.array, self.device_id, self.maskingviewdict, - self.volume_name, self.extra_specs) - mock_remove_volume.assert_called_once() - default_sg_name = ( - self.driver.masking._get_default_storagegroup_and_remove_vol( - self.data.array, self.device_id, self.maskingviewdict, - self.volume_name, self.extra_specs)) - self.assertEqual(self.data.defaultstoragegroup_name, default_sg_name) - - @mock.patch.object( - rest.VMAXRest, - 'get_masking_view', - side_effect=[VMAXCommonData.maskingview, - VMAXCommonData.maskingview, None]) - @mock.patch.object( - masking.VMAXMasking, - '_validate_existing_masking_view', - side_effect=[(VMAXCommonData.maskingview[1]['storageGroupId'], - None), (None, "Error Message")]) - @mock.patch.object( - masking.VMAXMasking, - '_create_new_masking_view', - return_value=None) - def test_get_or_create_masking_view( - self, mock_create_mv, mock_validate_mv, - mock_get_mv): - for x in range(0, 3): - self.driver.masking._get_or_create_masking_view( - self.data.array, self.maskingviewdict, self.extra_specs) - mock_create_mv.assert_called_once() - - @mock.patch.object( - masking.VMAXMasking, - '_get_or_create_storage_group', - side_effect=["Storage group not found", None, - "Storage group not found", None, None, None, - None, None, None, None, None]) - @mock.patch.object( - masking.VMAXMasking, - '_check_port_group', - side_effect=[(None, "Port group error"), (None, None), (None, None), - (None, None)]) - @mock.patch.object( - masking.VMAXMasking, - '_get_or_create_initiator_group', - side_effect=[(None, "Initiator group error"), (None, None), - (None, None)]) - @mock.patch.object( - masking.VMAXMasking, - '_check_adding_volume_to_storage_group', - side_effect=["Storage group error", None]) - @mock.patch.object( - masking.VMAXMasking, - 'create_masking_view', - return_value=None) - def test_create_new_masking_view( - self, mock_create_mv, mock_add_volume, mock_create_IG, - mock_check_PG, mock_create_SG): - for x in range(0, 6): - self.driver.masking._create_new_masking_view( - self.data.array, self.maskingviewdict, - self.maskingviewdict['maskingview_name'], self.extra_specs) - mock_create_mv.assert_called_once() - - @mock.patch.object( - masking.VMAXMasking, - '_check_existing_storage_group', - side_effect=[(VMAXCommonData.storagegroup_name_i, None), - (VMAXCommonData.storagegroup_name_i, None), - (None, "Error Checking existing storage group")]) - @mock.patch.object( - rest.VMAXRest, - 'get_element_from_masking_view', - return_value=VMAXCommonData.port_group_name_i) - @mock.patch.object( - masking.VMAXMasking, - '_check_port_group', - side_effect=[(None, None), (None, "Error checking pg")]) - @mock.patch.object( - masking.VMAXMasking, - '_check_existing_initiator_group', - return_value=(VMAXCommonData.initiatorgroup_name_i, None)) - def test_validate_existing_masking_view( - self, mock_check_ig, mock_check_pg, mock_get_mv_element, - mock_check_sg): - for x in range(0, 3): - self.driver.masking._validate_existing_masking_view( - self.data.array, self.maskingviewdict, - self.maskingviewdict['maskingview_name'], self.extra_specs) - self.assertEqual(3, mock_check_sg.call_count) - mock_get_mv_element.assert_called_with( - self.data.array, self.maskingviewdict['maskingview_name'], - portgroup=True) - mock_check_ig.assert_called_once() - - @mock.patch.object( - rest.VMAXRest, - 'get_storage_group', - side_effect=[VMAXCommonData.storagegroup_name_i, None, None]) - @mock.patch.object( - provision.VMAXProvision, - 'create_storage_group', - side_effect=[VMAXCommonData.storagegroup_name_i, None]) - def test_get_or_create_storage_group(self, mock_sg, mock_get_sg): - for x in range(0, 2): - self.driver.masking._get_or_create_storage_group( - self.data.array, self.maskingviewdict, - self.data.storagegroup_name_i, self.extra_specs) - self.driver.masking._get_or_create_storage_group( - self.data.array, self.maskingviewdict, - self.data.storagegroup_name_i, self.extra_specs, True) - self.assertEqual(3, mock_get_sg.call_count) - self.assertEqual(2, mock_sg.call_count) - - @mock.patch.object( - masking.VMAXMasking, - '_check_adding_volume_to_storage_group', - return_value=None) - @mock.patch.object( - masking.VMAXMasking, - '_get_or_create_storage_group', - return_value=None) - @mock.patch.object( - rest.VMAXRest, - 'get_element_from_masking_view', - return_value=VMAXCommonData.parent_sg_i) - @mock.patch.object( - rest.VMAXRest, - 'is_child_sg_in_parent_sg', - side_effect=[True, False]) - @mock.patch.object( - masking.VMAXMasking, - '_check_add_child_sg_to_parent_sg', - return_value=None) - def test_check_existing_storage_group_success( - self, mock_add_sg, mock_is_child, mock_get_mv_element, - mock_create_sg, mock_add): - masking_view_dict = deepcopy(self.data.masking_view_dict) - masking_view_dict['extra_specs'] = self.data.extra_specs - with mock.patch.object(self.driver.rest, 'get_storage_group', - side_effect=[ - VMAXCommonData.parent_sg_i, - VMAXCommonData.storagegroup_name_i]): - _, msg = ( - self.driver.masking._check_existing_storage_group( - self.data.array, self.maskingviewdict['maskingview_name'], - masking_view_dict)) - self.assertIsNone(msg) - mock_create_sg.assert_not_called() - with mock.patch.object(self.driver.rest, 'get_storage_group', - side_effect=[ - VMAXCommonData.parent_sg_i, None]): - _, msg = ( - self.driver.masking._check_existing_storage_group( - self.data.array, self.maskingviewdict['maskingview_name'], - masking_view_dict)) - self.assertIsNone(msg) - mock_create_sg.assert_called_once_with( - self.data.array, masking_view_dict, - VMAXCommonData.storagegroup_name_f, - self.data.extra_specs) - - @mock.patch.object( - masking.VMAXMasking, - '_check_adding_volume_to_storage_group', - side_effect=[None, "Error Message"]) - @mock.patch.object( - rest.VMAXRest, - 'is_child_sg_in_parent_sg', - side_effect=[True, False, False]) - @mock.patch.object( - rest.VMAXRest, - 'get_element_from_masking_view', - return_value=VMAXCommonData.parent_sg_i) - @mock.patch.object( - rest.VMAXRest, - 'get_storage_group', - side_effect=[None, VMAXCommonData.parent_sg_i, None, - VMAXCommonData.parent_sg_i, None, - VMAXCommonData.parent_sg_i, None]) - def test_check_existing_storage_group_failed( - self, mock_get_sg, mock_get_mv_element, mock_child, mock_check): - masking_view_dict = deepcopy(self.data.masking_view_dict) - masking_view_dict['extra_specs'] = self.data.extra_specs - for x in range(0, 4): - _, msg = ( - self.driver.masking._check_existing_storage_group( - self.data.array, self.maskingviewdict['maskingview_name'], - masking_view_dict)) - self.assertIsNotNone(msg) - self.assertEqual(7, mock_get_sg.call_count) - self.assertEqual(1, mock_check.call_count) - - @mock.patch.object(rest.VMAXRest, 'get_portgroup', - side_effect=[VMAXCommonData.port_group_name_i, None]) - def test_check_port_group( - self, mock_get_pg): - for x in range(0, 2): - _, msg = self.driver.masking._check_port_group( - self.data.array, self.maskingviewdict['maskingview_name']) - self.assertIsNotNone(msg) - self.assertEqual(2, mock_get_pg.call_count) - - @mock.patch.object( - masking.VMAXMasking, '_find_initiator_group', - side_effect=[VMAXCommonData.initiatorgroup_name_i, None, None]) - @mock.patch.object(masking.VMAXMasking, '_create_initiator_group', - side_effect=[VMAXCommonData.initiatorgroup_name_i, None] - ) - def test_get_or_create_initiator_group(self, mock_create_ig, mock_find_ig): - self.driver.masking._get_or_create_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector, self.extra_specs) - mock_create_ig.assert_not_called() - found_init_group, msg = ( - self.driver.masking._get_or_create_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector, self.extra_specs)) - self.assertIsNone(msg) - found_init_group, msg = ( - self.driver.masking._get_or_create_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector, self.extra_specs)) - self.assertIsNotNone(msg) - - def test_check_existing_initiator_group(self): - with mock.patch.object( - rest.VMAXRest, 'get_element_from_masking_view', - return_value=VMAXCommonData.inititiatorgroup): - ig_from_mv, msg = ( - self.driver.masking._check_existing_initiator_group( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, self.data.storagegroup_name_i, - self.data.port_group_name_i, self.extra_specs)) - self.assertEqual(self.data.inititiatorgroup, ig_from_mv) - - def test_check_adding_volume_to_storage_group(self): - with mock.patch.object( - masking.VMAXMasking, '_create_initiator_group'): - with mock.patch.object( - rest.VMAXRest, 'is_volume_in_storagegroup', - side_effect=[True, False]): - msg = ( - self.driver.masking._check_adding_volume_to_storage_group( - self.data.array, self.device_id, - self.data.storagegroup_name_i, - self.maskingviewdict[utils.VOL_NAME], - self.maskingviewdict[utils.EXTRA_SPECS])) - self.assertIsNone(msg) - msg = ( - self.driver.masking._check_adding_volume_to_storage_group( - self.data.array, self.device_id, - self.data.storagegroup_name_i, - self.maskingviewdict[utils.VOL_NAME], - self.maskingviewdict[utils.EXTRA_SPECS])) - - @mock.patch.object(rest.VMAXRest, 'add_vol_to_sg') - def test_add_volume_to_storage_group(self, mock_add_volume): - self.driver.masking.add_volume_to_storage_group( - self.data.array, self.device_id, self.data.storagegroup_name_i, - self.volume_name, self.extra_specs) - mock_add_volume.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'remove_vol_from_sg') - def test_remove_vol_from_storage_group(self, mock_remove_volume): - with mock.patch.object( - rest.VMAXRest, 'is_volume_in_storagegroup', - side_effect=[False, True]): - self.driver.masking.remove_vol_from_storage_group( - self.data.array, self.device_id, self.data.storagegroup_name_i, - self.volume_name, self.extra_specs) - mock_remove_volume.assert_called_once() - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.masking.remove_vol_from_storage_group, - self.data.array, self.device_id, self.data.storagegroup_name_i, - self.volume_name, self.extra_specs) - - def test_find_initiator_names(self): - foundinitiatornames = self.driver.masking.find_initiator_names( - self.data.connector) - self.assertEqual(self.data.connector['initiator'], - foundinitiatornames[0]) - foundinitiatornames = self.driver_fc.masking.find_initiator_names( - self.data.connector) - self.assertEqual(self.data.connector['wwpns'][0], - foundinitiatornames[0]) - connector = {'ip': self.data.ip, 'initiator': None, 'host': 'HostX'} - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.masking.find_initiator_names, connector) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver_fc.masking.find_initiator_names, connector) - - def test_find_initiator_group(self): - with mock.patch.object( - rest.VMAXRest, 'get_in_use_initiator_list_from_array', - return_value=self.data.initiator_list[2]['initiatorId']): - with mock.patch.object( - rest.VMAXRest, 'get_initiator_group_from_initiator', - return_value=self.data.initiator_list): - found_init_group_nam = ( - self.driver.masking._find_initiator_group( - self.data.array, ['FA-1D:4:123456789012345'])) - self.assertEqual(self.data.initiator_list, - found_init_group_nam) - found_init_group_nam = ( - self.driver.masking._find_initiator_group( - self.data.array, ['Error'])) - self.assertIsNone(found_init_group_nam) - - def test_create_masking_view(self): - with mock.patch.object(rest.VMAXRest, 'create_masking_view', - side_effect=[None, Exception]): - error_message = self.driver.masking.create_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.data.initiatorgroup_name_i, self.extra_specs) - self.assertIsNone(error_message) - error_message = self.driver.masking.create_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.data.initiatorgroup_name_i, self.extra_specs) - self.assertIsNotNone(error_message) - - @mock.patch.object(masking.VMAXMasking, '_check_ig_rollback') - def test_check_if_rollback_action_for_masking_required(self, - mock_check_ig): - with mock.patch.object(rest.VMAXRest, - 'get_storage_groups_from_volume', - side_effect=[ - exception.VolumeBackendAPIException, - self.data.defaultstoragegroup_name, - self.data.defaultstoragegroup_name, None, - None, ]): - self.assertRaises( - exception.VolumeBackendAPIException, - self.mask.check_if_rollback_action_for_masking_required, - self.data.array, self.device_id, self.maskingviewdict) - with mock.patch.object(masking.VMAXMasking, - 'remove_and_reset_members'): - self.maskingviewdict[ - 'default_sg_name'] = self.data.defaultstoragegroup_name - error_message = ( - self.mask.check_if_rollback_action_for_masking_required( - self.data.array, self.device_id, self.maskingviewdict)) - self.assertIsNone(error_message) - - @mock.patch.object(rest.VMAXRest, 'delete_masking_view') - @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') - @mock.patch.object(rest.VMAXRest, 'get_initiator_group') - @mock.patch.object(masking.VMAXMasking, '_find_initiator_group', - return_value=VMAXCommonData.initiatorgroup_name_i) - def test_verify_initiator_group_from_masking_view( - self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv): - self.mask._verify_initiator_group_from_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, self.data.initiatorgroup_name_i, - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.extra_specs) - mock_get_ig.assert_not_called() - mock_get_ig.return_value = False - self.mask._verify_initiator_group_from_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, 'OS-Wrong-Host-I-IG', - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.extra_specs) - mock_get_ig.assert_called() - - @mock.patch.object(rest.VMAXRest, 'delete_masking_view') - @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') - @mock.patch.object(rest.VMAXRest, 'get_initiator_group', - return_value=True) - @mock.patch.object(masking.VMAXMasking, '_find_initiator_group', - return_value=VMAXCommonData.initiatorgroup_name_i) - def test_verify_initiator_group_from_masking_view2( - self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv): - mock_delete_mv.side_effect = [None, Exception] - self.mask._verify_initiator_group_from_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, 'OS-Wrong-Host-I-IG', - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.extra_specs) - mock_delete_mv.assert_called() - _, found_ig_from_connector = ( - self.mask._verify_initiator_group_from_masking_view( - self.data.array, self.maskingviewdict['maskingview_name'], - self.maskingviewdict, 'OS-Wrong-Host-I-IG', - self.data.storagegroup_name_i, self.data.port_group_name_i, - self.extra_specs)) - self.assertEqual(self.data.initiatorgroup_name_i, - found_ig_from_connector) - - @mock.patch.object(rest.VMAXRest, 'create_initiator_group') - def test_create_initiator_group(self, mock_create_ig): - initiator_names = self.mask.find_initiator_names(self.data.connector) - ret_init_group_name = self.mask._create_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, initiator_names, - self.extra_specs) - self.assertEqual(self.data.initiatorgroup_name_i, ret_init_group_name) - - @mock.patch.object(masking.VMAXMasking, - '_last_volume_delete_initiator_group') - def test_check_ig_rollback(self, mock_last_volume): - with mock.patch.object(masking.VMAXMasking, '_find_initiator_group', - side_effect=[ - None, 'FAKE-I-IG', - self.data.initiatorgroup_name_i]): - for x in range(0, 2): - self.mask._check_ig_rollback(self.data.array, - self.data.initiatorgroup_name_i, - self.data.connector) - mock_last_volume.assert_not_called() - self.mask._check_ig_rollback( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector) - mock_last_volume.assert_called() - - @mock.patch.object(masking.VMAXMasking, '_cleanup_deletion') - @mock.patch.object(masking.VMAXMasking, - 'add_volume_to_default_storage_group') - def test_remove_and_reset_members(self, mock_ret_to_sg, mock_cleanup): - self.mask.remove_and_reset_members(self.data.array, self.device_id, - self.volume_name, self.extra_specs, - reset=False) - mock_ret_to_sg.assert_not_called() - self.mask.remove_and_reset_members(self.data.array, self.device_id, - self.volume_name, self.extra_specs) - mock_ret_to_sg.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', - return_value=[VMAXCommonData.storagegroup_name_i]) - @mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg') - def test_cleanup_deletion(self, mock_remove_vol, mock_get_sg): - self.mask._cleanup_deletion(self.data.array, self.device_id, - self.volume_name, self.extra_specs, None) - mock_get_sg.assert_called_once() - - @mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg') - @mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg') - def test_remove_volume_from_sg(self, mock_multiple_vols, mock_last_vol): - with mock.patch.object( - rest.VMAXRest, 'get_masking_views_from_storage_group', - return_value=None): - with mock.patch.object( - rest.VMAXRest, 'get_num_vols_in_sg', - side_effect=[2, 1]): - self.mask.remove_volume_from_sg( - self.data.array, self.device_id, self.volume_name, - self.data.defaultstoragegroup_name, self.extra_specs) - mock_last_vol.assert_not_called() - self.mask.remove_volume_from_sg( - self.data.array, self.device_id, self.volume_name, - self.data.defaultstoragegroup_name, self.extra_specs) - mock_last_vol.assert_called() - - @mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg') - @mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg') - def test_remove_volume_from_sg_2(self, mock_multiple_vols, mock_last_vol): - with mock.patch.object( - rest.VMAXRest, 'is_volume_in_storagegroup', - return_value=True): - with mock.patch.object( - rest.VMAXRest, 'get_masking_views_from_storage_group', - return_value=[self.data.masking_view_name_i]): - with mock.patch.object( - rest.VMAXRest, 'get_num_vols_in_sg', - side_effect=[2, 1]): - self.mask.remove_volume_from_sg( - self.data.array, self.device_id, self.volume_name, - self.data.storagegroup_name_i, self.extra_specs) - mock_last_vol.assert_not_called() - self.mask.remove_volume_from_sg( - self.data.array, self.device_id, self.volume_name, - self.data.storagegroup_name_i, self.extra_specs) - mock_last_vol.assert_called() - - @mock.patch.object(masking.VMAXMasking, '_last_vol_masking_views', - return_value=True) - @mock.patch.object(masking.VMAXMasking, '_last_vol_no_masking_views', - return_value=True) - def test_last_vol_in_sg(self, mock_no_mv, mock_mv): - mv_list = [self.data.masking_view_name_i, - self.data.masking_view_name_f] - with mock.patch.object(rest.VMAXRest, - 'get_masking_views_from_storage_group', - side_effect=[mv_list, []]): - for x in range(0, 2): - self.mask._last_vol_in_sg( - self.data.array, self.device_id, self.volume_name, - self.data.storagegroup_name_i, self.extra_specs, - self.data.connector) - self.assertEqual(1, mock_mv.call_count) - self.assertEqual(1, mock_no_mv.call_count) - - @mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg') - @mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups') - @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg', - side_effect=[1, 3]) - @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - @mock.patch.object(masking.VMAXMasking, 'get_parent_sg_from_child', - side_effect=[None, 'parent_sg_name', 'parent_sg_name']) - def test_last_vol_no_masking_views( - self, mock_get_parent, mock_delete, mock_num_vols, - mock_delete_casc, mock_remove): - for x in range(0, 3): - self.mask._last_vol_no_masking_views( - self.data.array, self.data.storagegroup_name_i, - self.device_id, self.volume_name, self.extra_specs) - self.assertEqual(1, mock_delete.call_count) - self.assertEqual(1, mock_delete_casc.call_count) - self.assertEqual(1, mock_remove.call_count) - - @mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg') - @mock.patch.object(masking.VMAXMasking, '_delete_mv_ig_and_sg') - @mock.patch.object(masking.VMAXMasking, '_get_num_vols_from_mv', - side_effect=[(1, 'parent_name'), (3, 'parent_name')]) - def test_last_vol_masking_views( - self, mock_num_vols, mock_delete_all, mock_remove): - for x in range(0, 2): - self.mask._last_vol_masking_views( - self.data.array, self.data.storagegroup_name_i, - [self.data.masking_view_name_i], self.device_id, - self.volume_name, self.extra_specs, self.data.connector) - self.assertEqual(1, mock_delete_all.call_count) - self.assertEqual(1, mock_remove.call_count) - - @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg') - @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') - def test_multiple_vols_in_sg(self, mock_remove_vol, mock_get_volumes): - self.mask._multiple_vols_in_sg( - self.data.array, self.device_id, self.data.storagegroup_name_i, - self.volume_name, self.extra_specs) - mock_get_volumes.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view') - @mock.patch.object(masking.VMAXMasking, '_last_volume_delete_masking_view') - @mock.patch.object(masking.VMAXMasking, - '_last_volume_delete_initiator_group') - @mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups') - def test_delete_mv_ig_and_sg(self, mock_delete_sg, mock_delete_ig, - mock_delete_mv, mock_get_element): - self.mask._delete_mv_ig_and_sg( - self.data.array, self.data.masking_view_name_i, - self.data.storagegroup_name_i, self.data.parent_sg_i, - self.data.connector) - mock_delete_sg.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'delete_masking_view') - def test_last_volume_delete_masking_view(self, mock_delete_mv): - self.mask._last_volume_delete_masking_view( - self.data.array, self.data.masking_view_name_i) - mock_delete_mv.assert_called_once() - - @mock.patch.object(masking.VMAXMasking, - 'get_or_create_default_storage_group') - @mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group') - def test_add_volume_to_default_storage_group( - self, mock_add_sg, mock_get_sg): - self.mask.add_volume_to_default_storage_group( - self.data.array, self.device_id, self.volume_name, - self.extra_specs) - mock_add_sg.assert_called_once() - - @mock.patch.object(provision.VMAXProvision, 'create_storage_group') - def test_get_or_create_default_storage_group(self, mock_create_sg): - with mock.patch.object( - rest.VMAXRest, 'get_vmax_default_storage_group', - return_value=(None, self.data.storagegroup_name_i)): - storage_group_name = self.mask.get_or_create_default_storage_group( - self.data.array, self.data.srp, self.data.slo, - self.data.workload, self.extra_specs) - self.assertEqual(self.data.storagegroup_name_i, storage_group_name) - with mock.patch.object( - rest.VMAXRest, 'get_vmax_default_storage_group', - return_value=("test_sg", self.data.storagegroup_name_i)): - with mock.patch.object( - rest.VMAXRest, 'get_masking_views_from_storage_group', - return_value=self.data.masking_view_name_i): - self.assertRaises( - exception.VolumeBackendAPIException, - self.mask.get_or_create_default_storage_group, - self.data.array, self.data.srp, self.data.slo, - self.data.workload, self.extra_specs) - - @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') - def test_remove_last_vol_and_delete_sg(self, mock_delete_sg, mock_vol_sg): - self.mask._remove_last_vol_and_delete_sg( - self.data.array, self.device_id, self.volume_name, - self.data.storagegroup_name_i, self.extra_specs) - mock_delete_sg.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') - def test_last_volume_delete_initiator_group(self, mock_delete_ig): - self.mask._last_volume_delete_initiator_group( - self.data.array, self.data.initiatorgroup_name_f, 'Wrong_Host') - mock_delete_ig.assert_not_called() - mv_list = [self.data.masking_view_name_i, - self.data.masking_view_name_f] - with mock.patch.object(rest.VMAXRest, - 'get_masking_views_by_initiator_group', - side_effect=[mv_list, []]): - self.mask._last_volume_delete_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector['host']) - mock_delete_ig.assert_not_called() - self.mask._last_volume_delete_initiator_group( - self.data.array, self.data.initiatorgroup_name_i, - self.data.connector['host']) - mock_delete_ig.assert_called_once() - - def test_populate_masking_dict_init_check_false(self): - extra_specs = self.data.extra_specs - connector = self.data.connector - with mock.patch.object(self.driver, '_get_initiator_check_flag', - return_value=False): - masking_view_dict = self.driver._populate_masking_dict( - self.data.test_volume, connector, extra_specs) - self.assertFalse(masking_view_dict['initiator_check']) - - def test_populate_masking_dict_init_check_true(self): - extra_specs = self.data.extra_specs - connector = self.data.connector - with mock.patch.object(self.driver, '_get_initiator_check_flag', - return_value=True): - masking_view_dict = self.driver._populate_masking_dict( - self.data.test_volume, connector, extra_specs) - self.assertTrue(masking_view_dict['initiator_check']) - - def test_check_existing_initiator_group_verify_true(self): - mv_dict = deepcopy(self.data.masking_view_dict) - mv_dict['initiator_check'] = True - with mock.patch.object( - rest.VMAXRest, 'get_element_from_masking_view', - return_value=VMAXCommonData.initiatorgroup_name_f): - with mock.patch.object( - self.mask, '_verify_initiator_group_from_masking_view', - return_value=(True, self.data.initiatorgroup_name_f)): - self.mask._check_existing_initiator_group( - self.data.array, self.data.masking_view_name_f, - mv_dict, self.data.storagegroup_name_f, - self.data.port_group_name_f, self.data.extra_specs) - (self.mask._verify_initiator_group_from_masking_view. - assert_called_once_with( - self.data.array, self.data.masking_view_name_f, - mv_dict, self.data.initiatorgroup_name_f, - self.data.storagegroup_name_f, - self.data.port_group_name_f, self.data.extra_specs)) - - @mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg', - side_effect=[ - None, exception.VolumeBackendAPIException]) - @mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg', - side_effect=[True, False, False]) - def test_check_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): - for x in range(0, 3): - message = self.mask._check_add_child_sg_to_parent_sg( - self.data.array, self.data.storagegroup_name_i, - self.data.parent_sg_i, self.data.extra_specs) - self.assertIsNotNone(message) - - @mock.patch.object(rest.VMAXRest, 'add_child_sg_to_parent_sg') - @mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg', - side_effect=[True, False]) - def test_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): - for x in range(0, 2): - self.mask.add_child_sg_to_parent_sg( - self.data.array, self.data.storagegroup_name_i, - self.data.parent_sg_i, self.data.extra_specs) - self.assertEqual(1, mock_add.call_count) - - def test_get_parent_sg_from_child(self): - with mock.patch.object(self.driver.rest, 'get_storage_group', - side_effect=[None, self.data.sg_details[1]]): - sg_name = self.mask.get_parent_sg_from_child( - self.data.array, self.data.storagegroup_name_i) - self.assertIsNone(sg_name) - sg_name2 = self.mask.get_parent_sg_from_child( - self.data.array, self.data.storagegroup_name_f) - self.assertEqual(self.data.parent_sg_f, sg_name2) - - @mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view', - return_value='parent_sg') - @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg', - return_value=2) - def test_get_num_vols_from_mv(self, mock_num, mock_element): - num_vols, sg = self.mask._get_num_vols_from_mv( - self.data.array, self.data.masking_view_name_f) - self.assertEqual(2, num_vols) - - @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - def test_delete_cascaded(self, mock_delete): - self.mask._delete_cascaded_storage_groups( - self.data.array, self.data.masking_view_name_f, - self.data.parent_sg_f) - self.assertEqual(2, mock_delete.call_count) - - @mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg') - @mock.patch.object(masking.VMAXMasking, - 'move_volume_between_storage_groups') - @mock.patch.object(provision.VMAXProvision, 'create_storage_group') - def test_pre_live_migration(self, mock_create_sg, mock_move, mock_add): - with mock.patch.object( - rest.VMAXRest, 'get_storage_group', - side_effect=[None, self.data.sg_details[1]["storageGroupId"]] - ): - source_sg = self.data.sg_details[2]["storageGroupId"] - source_parent_sg = self.data.sg_details[4]["storageGroupId"] - source_nf_sg = source_parent_sg[:-2] + 'NONFAST' - self.data.iscsi_device_info['device_id'] = self.data.device_id - self.mask.pre_live_migration( - source_nf_sg, source_sg, source_parent_sg, False, - self.data.iscsi_device_info, None) - mock_create_sg.assert_called_once() - - @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - @mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg') - def test_post_live_migration(self, mock_remove_child_sg, mock_delete_sg): - self.data.iscsi_device_info['source_sg'] = self.data.sg_details[2][ - "storageGroupId"] - self.data.iscsi_device_info['source_parent_sg'] = self.data.sg_details[ - 4]["storageGroupId"] - with mock.patch.object( - rest.VMAXRest, 'get_num_vols_in_sg', side_effect=[0, 1]): - self.mask.post_live_migration(self.data.iscsi_device_info, None) - mock_remove_child_sg.assert_called_once() - mock_delete_sg.assert_called_once() - - @mock.patch.object(masking.VMAXMasking, - 'move_volume_between_storage_groups') - @mock.patch.object(rest.VMAXRest, 'delete_storage_group') - @mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg') - @mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg') - def test_failed_live_migration( - self, mock_remove_volume, mock_remove_child_sg, mock_delete_sg, - mock_move): - device_dict = self.data.iscsi_device_info - device_dict['device_id'] = self.data.device_id - device_dict['source_sg'] = self.data.sg_details[2]["storageGroupId"] - device_dict['source_parent_sg'] = self.data.sg_details[4][ - "storageGroupId"] - device_dict['source_nf_sg'] = ( - self.data.sg_details[4]["storageGroupId"][:-2] + 'NONFAST') - sg_list = [device_dict['source_nf_sg']] - with mock.patch.object( - rest.VMAXRest, 'is_child_sg_in_parent_sg', - side_effect=[True, False]): - self.mask.failed_live_migration(device_dict, sg_list, None) - mock_remove_volume.assert_not_called() - mock_remove_child_sg.assert_called_once() - - -class VMAXCommonReplicationTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXCommonReplicationTest, self).setUp() - config_group = 'CommonReplicationTests' - self.fake_xml = FakeXML().create_fake_config_file( - config_group, self.data.port_group_name_f) - self.replication_device = { - 'target_device_id': self.data.remote_array, - 'remote_port_group': self.data.port_group_name_f, - 'remote_pool': self.data.srp2, - 'rdf_group_label': self.data.rdf_group_name, - 'allow_extend': 'True'} - configuration = FakeConfiguration( - self.fake_xml, config_group, - replication_device=self.replication_device) - rest.VMAXRest._establish_rest_session = mock.Mock( - return_value=FakeRequestsSession()) - driver = fc.VMAXFCDriver(configuration=configuration) - self.driver = driver - self.common = self.driver.common - self.masking = self.common.masking - self.provision = self.common.provision - self.rest = self.common.rest - self.utils = self.common.utils - self.utils.get_volumetype_extra_specs = ( - mock.Mock( - return_value=self.data.vol_type_extra_specs_rep_enabled)) - self.extra_specs = deepcopy(self.data.extra_specs_rep_enabled) - self.extra_specs['retries'] = 0 - self.extra_specs['interval'] = 0 - - def test_get_replication_info(self): - self.common._get_replication_info() - self.assertTrue(self.common.replication_enabled) - - def test_create_replicated_volume(self): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - vol_identifier = self.utils.get_volume_element_name( - self.data.test_volume.id) - with mock.patch.object(self.common, '_replicate_volume', - return_value={}) as mock_rep: - self.common.create_volume(self.data.test_volume) - volume_dict = self.data.provider_location - mock_rep.assert_called_once_with( - self.data.test_volume, vol_identifier, volume_dict, - extra_specs) - - def test_create_cloned_replicated_volume(self): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - with mock.patch.object(self.common, '_replicate_volume', - return_value={}) as mock_rep: - self.common.create_cloned_volume( - self.data.test_clone_volume, self.data.test_volume) - volume_dict = self.data.provider_location - mock_rep.assert_called_once_with( - self.data.test_clone_volume, - self.data.test_clone_volume.name, volume_dict, extra_specs) - - def test_create_replicated_volume_from_snap(self): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - with mock.patch.object(self.common, '_replicate_volume', - return_value={}) as mock_rep: - self.common.create_volume_from_snapshot( - self.data.test_clone_volume, self.data.test_snapshot) - volume_dict = self.data.provider_location - mock_rep.assert_called_once_with( - self.data.test_clone_volume, "snapshot-12345", volume_dict, - extra_specs) - - def test_replicate_volume(self): - volume_dict = self.data.provider_location - rs_enabled = fields.ReplicationStatus.ENABLED - with mock.patch.object(self.common, 'setup_volume_replication', - return_value=(rs_enabled, {})) as mock_setup: - self.common._replicate_volume( - self.data.test_volume, "1", volume_dict, self.extra_specs) - mock_setup.assert_called_once_with( - self.data.array, self.data.test_volume, - self.data.device_id, self.extra_specs) - - def test_replicate_volume_exception(self): - volume_dict = self.data.provider_location - with mock.patch.object( - self.common, 'setup_volume_replication', - side_effect=exception.VolumeBackendAPIException(data='')): - with mock.patch.object( - self.common, '_cleanup_replication_source') as mock_clean: - self.assertRaises(exception.VolumeBackendAPIException, - self.common._replicate_volume, - self.data.test_volume, - "1", volume_dict, self.extra_specs) - mock_clean.assert_called_once_with( - self.data.array, self.data.test_volume, "1", - volume_dict, self.extra_specs) - - @mock.patch.object(common.VMAXCommon, '_remove_members') - @mock.patch.object(common.VMAXCommon, - '_get_replication_extra_specs', - return_value=VMAXCommonData.rep_extra_specs) - @mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over', - return_value=True) - def test_unmap_lun_volume_failed_over(self, mock_fo, mock_es, mock_rm): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - rep_config = self.utils.get_replication_config( - [self.replication_device]) - self.common._unmap_lun(self.data.test_volume, self.data.connector) - mock_es.assert_called_once_with(extra_specs, rep_config) - - @mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over', - return_value=True) - def test_initialize_connection_vol_failed_over(self, mock_fo): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - rep_extra_specs = deepcopy(VMAXCommonData.rep_extra_specs) - rep_extra_specs['port_group_name'] = self.data.port_group_name_f - rep_config = self.utils.get_replication_config( - [self.replication_device]) - with mock.patch.object(self.common, '_get_replication_extra_specs', - return_value=rep_extra_specs) as mock_es: - self.common.initialize_connection( - self.data.test_volume, self.data.connector) - mock_es.assert_called_once_with(extra_specs, rep_config) - - @mock.patch.object(common.VMAXCommon, '_sync_check') - def test_extend_volume_rep_enabled(self, mock_sync): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - volume_name = self.data.test_volume.name - with mock.patch.object(self.rest, 'is_vol_in_rep_session', - return_value=(False, False, None)): - with mock.patch.object( - self.common, 'extend_volume_is_replicated') as mock_ex_re: - self.common.extend_volume(self.data.test_volume, '5') - mock_ex_re.assert_called_once_with( - self.data.array, self.data.test_volume, - self.data.device_id, volume_name, "5", extra_specs) - - def test_set_config_file_get_extra_specs_rep_enabled(self): - extra_specs, _, _ = self.common._set_config_file_and_get_extra_specs( - self.data.test_volume) - self.assertTrue(extra_specs['replication_enabled']) - - def test_populate_masking_dict_is_re(self): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - masking_dict = self.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extra_specs) - self.assertTrue(masking_dict['replication_enabled']) - self.assertEqual('OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE', - masking_dict[utils.SG_NAME]) - - @mock.patch.object(common.VMAXCommon, - '_replicate_volume', - return_value={}) - def test_manage_existing_is_replicated(self, mock_rep): - extra_specs = deepcopy(self.extra_specs) - extra_specs['port_group_name'] = self.data.port_group_name_f - external_ref = {u'source-name': u'00002'} - volume_name = self.utils.get_volume_element_name( - self.data.test_volume.id) - provider_location = {'device_id': u'00002', 'array': self.data.array} - with mock.patch.object( - self.common, '_check_lun_valid_for_cinder_management'): - self.common.manage_existing( - self.data.test_volume, external_ref) - mock_rep.assert_called_once_with( - self.data.test_volume, volume_name, provider_location, - extra_specs, delete_src=False) - - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - def test_setup_volume_replication(self, mock_rm): - rep_status, rep_data = self.common.setup_volume_replication( - self.data.array, self.data.test_volume, self.data.device_id, - self.extra_specs) - self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) - self.assertEqual({'array': self.data.remote_array, - 'device_id': self.data.device_id}, rep_data) - - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - @mock.patch.object(common.VMAXCommon, '_create_volume') - def test_setup_volume_replication_target(self, mock_create, mock_rm): - rep_status, rep_data = self.common.setup_volume_replication( - self.data.array, self.data.test_volume, self.data.device_id, - self.extra_specs, self.data.device_id2) - self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) - self.assertEqual({'array': self.data.remote_array, - 'device_id': self.data.device_id2}, rep_data) - mock_create.assert_not_called() - - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') - def test_cleanup_lun_replication_success(self, mock_clean, mock_rm): - rep_extra_specs = deepcopy(self.data.rep_extra_specs) - rep_extra_specs['port_group_name'] = self.data.port_group_name_f - self.common.cleanup_lun_replication( - self.data.test_volume, "1", self.data.device_id, - self.extra_specs) - mock_clean.assert_called_once_with( - self.data.array, self.data.remote_array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_no, "1", - rep_extra_specs) - mock_rm.assert_called_once_with( - self.data.remote_array, self.data.device_id2, "1", - rep_extra_specs, False) - - @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') - def test_cleanup_lun_replication_no_target(self, mock_clean): - with mock.patch.object(self.common, 'get_remote_target_device', - return_value=(None, '', '', '', '')): - self.common.cleanup_lun_replication( - self.data.test_volume, "1", self.data.device_id, - self.extra_specs) - mock_clean.assert_not_called() - - def test_cleanup_lun_replication_exception(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.cleanup_lun_replication, - self.data.test_volume, "1", self.data.device_id, - self.extra_specs) - - @mock.patch.object(common.VMAXCommon, '_delete_from_srp') - @mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship') - def test_cleanup_remote_target(self, mock_break, mock_del): - with mock.patch.object(self.rest, 'are_vols_rdf_paired', - return_value=(False, '', '')): - self.common._cleanup_remote_target( - self.data.array, self.data.remote_array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_name, - "vol1", self.data.rep_extra_specs) - mock_break.assert_not_called() - self.common._cleanup_remote_target( - self.data.array, self.data.remote_array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_name, - "vol1", self.data.rep_extra_specs) - mock_break.assert_called_once_with( - self.data.array, self.data.device_id, - self.data.device_id2, self.data.rdf_group_name, - self.data.rep_extra_specs, "Synchronized") - - @mock.patch.object(common.VMAXCommon, - '_remove_vol_and_cleanup_replication') - @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') - @mock.patch.object(common.VMAXCommon, '_delete_from_srp') - def test_cleanup_replication_source(self, mock_del, mock_rm, mock_clean): - self.common._cleanup_replication_source( - self.data.array, self.data.test_volume, "vol1", - {'device_id': self.data.device_id}, self.extra_specs) - mock_del.assert_called_once_with( - self.data.array, self.data.device_id, "vol1", self.extra_specs) - - def test_get_rdf_details(self): - rdf_group_no, remote_array = self.common.get_rdf_details( - self.data.array) - self.assertEqual(self.data.rdf_group_no, rdf_group_no) - self.assertEqual(self.data.remote_array, remote_array) - - def test_get_rdf_details_exception(self): - with mock.patch.object(self.rest, 'get_rdf_group_number', - return_value=None): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.get_rdf_details, self.data.array) - - def test_failover_host(self): - volumes = [self.data.test_volume, self.data.test_clone_volume] - with mock.patch.object(self.common, '_failover_volume', - return_value={}) as mock_fo: - self.common.failover_host(volumes) - self.assertEqual(2, mock_fo.call_count) - - def test_failover_host_exception(self): - volumes = [self.data.test_volume, self.data.test_clone_volume] - self.assertRaises(exception.VolumeBackendAPIException, - self.common.failover_host, - volumes, secondary_id="default") - - def test_failover_volume(self): - ref_model_update = { - 'volume_id': self.data.test_volume.id, - 'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER, - 'replication_driver_data': six.text_type( - self.data.provider_location), - 'provider_location': six.text_type( - self.data.provider_location3)}} - model_update = self.common._failover_volume( - self.data.test_volume, True, self.extra_specs) - self.assertEqual(ref_model_update, model_update) - ref_model_update2 = { - 'volume_id': self.data.test_volume.id, - 'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'replication_driver_data': six.text_type( - self.data.provider_location), - 'provider_location': six.text_type( - self.data.provider_location3)}} - model_update2 = self.common._failover_volume( - self.data.test_volume, False, self.extra_specs) - self.assertEqual(ref_model_update2, model_update2) - - def test_failover_volume_exception(self): - with mock.patch.object( - self.provision, 'failover_volume', - side_effect=exception.VolumeBackendAPIException): - ref_model_update = { - 'volume_id': self.data.test_volume.id, - 'updates': {'replication_status': - fields.ReplicationStatus.FAILOVER_ERROR, - 'replication_driver_data': six.text_type( - self.data.provider_location3), - 'provider_location': six.text_type( - self.data.provider_location)}} - model_update = self.common._failover_volume( - self.data.test_volume, True, self.extra_specs) - self.assertEqual(ref_model_update, model_update) - - @mock.patch.object( - common.VMAXCommon, '_find_device_on_array', - side_effect=[None, VMAXCommonData.device_id, - VMAXCommonData.device_id, VMAXCommonData.device_id]) - @mock.patch.object( - common.VMAXCommon, 'get_masking_views_from_volume', - side_effect=['OS-host-MV', None, exception.VolumeBackendAPIException]) - def test_recover_volumes_on_failback(self, mock_mv, mock_dev): - recovery1 = self.common.recover_volumes_on_failback( - self.data.test_volume, self.extra_specs) - self.assertEqual('error', recovery1['updates']['status']) - recovery2 = self.common.recover_volumes_on_failback( - self.data.test_volume, self.extra_specs) - self.assertEqual('in-use', recovery2['updates']['status']) - recovery3 = self.common.recover_volumes_on_failback( - self.data.test_volume, self.extra_specs) - self.assertEqual('available', recovery3['updates']['status']) - recovery4 = self.common.recover_volumes_on_failback( - self.data.test_volume, self.extra_specs) - self.assertEqual('available', recovery4['updates']['status']) - - def test_get_remote_target_device(self): - target_device1, _, _, _, _ = ( - self.common.get_remote_target_device( - self.data.array, self.data.test_volume, self.data.device_id)) - self.assertEqual(self.data.device_id2, target_device1) - target_device2, _, _, _, _ = ( - self.common.get_remote_target_device( - self.data.array, self.data.test_clone_volume, - self.data.device_id)) - self.assertIsNone(target_device2) - with mock.patch.object(self.rest, 'are_vols_rdf_paired', - return_value=(False, '')): - target_device3, _, _, _, _ = ( - self.common.get_remote_target_device( - self.data.array, self.data.test_volume, - self.data.device_id)) - self.assertIsNone(target_device3) - with mock.patch.object(self.rest, 'get_volume', - return_value=None): - target_device4, _, _, _, _ = ( - self.common.get_remote_target_device( - self.data.array, self.data.test_volume, - self.data.device_id)) - self.assertIsNone(target_device4) - - @mock.patch.object(common.VMAXCommon, 'setup_volume_replication') - @mock.patch.object(provision.VMAXProvision, 'extend_volume') - @mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship') - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - def test_extend_volume_is_replicated(self, mock_remove, - mock_break, mock_extend, mock_setup): - self.common.extend_volume_is_replicated( - self.data.array, self.data.test_volume, self.data.device_id, - 'vol1', '5', self.data.extra_specs_rep_enabled) - self.assertEqual(2, mock_remove.call_count) - self.assertEqual(2, mock_extend.call_count) - - def test_extend_volume_is_replicated_exception(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.common.extend_volume_is_replicated, - self.data.failed_resource, self.data.test_volume, - self.data.device_id, 'vol1', '1', - self.data.extra_specs_rep_enabled) - - @mock.patch.object(common.VMAXCommon, 'add_volume_to_replication_group') - @mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members') - def test_enable_rdf(self, mock_remove, mock_add): - rep_config = self.utils.get_replication_config( - [self.replication_device]) - self.common.enable_rdf( - self.data.array, self.data.device_id, self.data.rdf_group_no, - rep_config, 'OS-1', self.data.remote_array, self.data.device_id2, - self.extra_specs) - self.assertEqual(2, mock_remove.call_count) - self.assertEqual(2, mock_add.call_count) - - @mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group') - @mock.patch.object(common.VMAXCommon, '_cleanup_remote_target') - def test_enable_rdf_exception(self, mock_cleanup, mock_rm): - rep_config = self.utils.get_replication_config( - [self.replication_device]) - self.assertRaises( - exception.VolumeBackendAPIException, self.common.enable_rdf, - self.data.array, self.data.device_id, - self.data.failed_resource, rep_config, 'OS-1', - self.data.remote_array, self.data.device_id2, self.extra_specs) - self.assertEqual(1, mock_cleanup.call_count) - - def test_add_volume_to_replication_group(self): - sg_name = self.common.add_volume_to_replication_group( - self.data.array, self.data.device_id, 'vol1', - self.extra_specs) - self.assertEqual(self.data.default_sg_re_enabled, sg_name) - - @mock.patch.object(masking.VMAXMasking, - 'get_or_create_default_storage_group', - side_effect=exception.VolumeBackendAPIException) - def test_add_volume_to_replication_group_exception(self, mock_get): - self.assertRaises( - exception.VolumeBackendAPIException, - self.common.add_volume_to_replication_group, - self.data.array, self.data.device_id, 'vol1', - self.extra_specs) - - def test_get_replication_extra_specs(self): - rep_config = self.utils.get_replication_config( - [self.replication_device]) - # Path one - disable compression - extra_specs1 = deepcopy(self.extra_specs) - extra_specs1[utils.DISABLECOMPRESSION] = "true" - ref_specs1 = deepcopy(self.data.rep_extra_specs) - ref_specs1['port_group_name'] = self.data.port_group_name_f - rep_extra_specs1 = self.common._get_replication_extra_specs( - extra_specs1, rep_config) - self.assertEqual(ref_specs1, rep_extra_specs1) - # Path two - disable compression, not all flash - ref_specs2 = deepcopy(self.data.rep_extra_specs) - ref_specs2['port_group_name'] = self.data.port_group_name_f - with mock.patch.object(self.rest, 'is_compression_capable', - return_value=False): - rep_extra_specs2 = self.common._get_replication_extra_specs( - extra_specs1, rep_config) - self.assertEqual(ref_specs2, rep_extra_specs2) - # Path three - slo not valid - extra_specs3 = deepcopy(self.extra_specs) - ref_specs3 = deepcopy(ref_specs1) - ref_specs3['slo'] = None - ref_specs3['workload'] = None - with mock.patch.object(self.provision, 'verify_slo_workload', - return_value=(False, False)): - rep_extra_specs3 = self.common._get_replication_extra_specs( - extra_specs3, rep_config) - self.assertEqual(ref_specs3, rep_extra_specs3) - - def test_get_secondary_stats(self): - rep_config = self.utils.get_replication_config( - [self.replication_device]) - array_map = self.utils.parse_file_to_get_array_map( - self.common.pool_info['config_file']) - finalarrayinfolist = self.common._get_slo_workload_combinations( - array_map) - array_info = finalarrayinfolist[0] - ref_info = deepcopy(array_info) - ref_info['SerialNumber'] = six.text_type(rep_config['array']) - ref_info['srpName'] = rep_config['srp'] - secondary_info = self.common.get_secondary_stats_info( - rep_config, array_info) - self.assertEqual(ref_info, secondary_info) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/__init__.py deleted file mode 100644 index 69a8bb962..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -import mock - -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops - -fake_vnx = mock.Mock() -fake_storops.exception = fake_exception -fake_storops.vnx = fake_vnx -sys.modules['storops'] = fake_storops diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_enum.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_enum.py deleted file mode 100644 index 9a20687b6..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_enum.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import enum -import six - - -class Enum(enum.Enum): - @classmethod - def verify(cls, value, allow_none=True): - if value is None and not allow_none: - raise ValueError( - 'None is not allowed here for %s.') % cls.__name__ - elif value is not None and not isinstance(value, cls): - raise ValueError('%(value)s is not an instance of %(name)s.') % { - 'value': value, 'name': cls.__name__} - - @classmethod - def get_all(cls): - return list(cls) - - @classmethod - def get_opt(cls, value): - option_map = cls.get_option_map() - if option_map is None: - raise NotImplementedError( - 'Option map is not defined for %s.') % cls.__name__ - - ret = option_map.get(value, None) - if ret is None: - raise ValueError('%(value)s is not a valid option for %(name)s.' - ) % {'value': value, 'name': cls.__name__} - return ret - - @classmethod - def parse(cls, value): - if isinstance(value, six.string_types): - ret = cls.from_str(value) - elif isinstance(value, six.integer_types): - ret = cls.from_int(value) - elif isinstance(value, cls): - ret = value - elif value is None: - ret = None - else: - raise ValueError( - 'Not supported value type: %s.') % type(value) - return ret - - def is_equal(self, value): - if isinstance(value, six.string_types): - ret = self.value.lower() == value.lower() - else: - ret = self.value == value - return ret - - @classmethod - def from_int(cls, value): - ret = None - int_index = cls.get_int_index() - if int_index is not None: - try: - ret = int_index[value] - except IndexError: - pass - else: - try: - ret = next(i for i in cls.get_all() if i.is_equal(value)) - except StopIteration: - pass - if ret is None: - raise ValueError - return ret - - @classmethod - def from_str(cls, value): - ret = None - if value is not None: - for item in cls.get_all(): - if item.is_equal(value): - ret = item - break - else: - cls._raise_invalid_value(value) - return ret - - @classmethod - def _raise_invalid_value(cls, value): - msg = ('%(value)s is not a valid value for %(name)s.' - ) % {'value': value, 'name': cls.__name__} - raise ValueError(msg) - - @classmethod - def get_option_map(cls): - raise None - - @classmethod - def get_int_index(cls): - return None - - @classmethod - def values(cls): - return [m.value for m in cls.__members__.values()] - - @classmethod - def enum_name(cls): - return cls.__name__ - - -class VNXCtrlMethod(object): - LIMIT_CTRL = 'limit' - - def __init__(self, method, metric, value): - self.method = method - self.metric = metric - self.value = value diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py deleted file mode 100644 index 0ed631309..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class StoropsException(Exception): - message = 'Storops Error.' - - -class VNXException(StoropsException): - message = "VNX Error." - - -class VNXStorageGroupError(VNXException): - pass - - -class VNXAttachAluError(VNXException): - pass - - -class VNXAluAlreadyAttachedError(VNXAttachAluError): - message = ( - 'LUN already exists in the specified storage group', - 'Requested LUN has already been added to this Storage Group') - - -class VNXDetachAluError(VNXStorageGroupError): - pass - - -class VNXDetachAluNotFoundError(VNXDetachAluError): - message = 'No such Host LUN in this Storage Group' - - -class VNXCreateStorageGroupError(VNXStorageGroupError): - pass - - -class VNXStorageGroupNameInUseError(VNXCreateStorageGroupError): - message = 'Storage Group name already in use' - - -class VNXNoHluAvailableError(VNXStorageGroupError): - pass - - -class VNXMigrationError(VNXException): - pass - - -class VNXLunNotMigratingError(VNXException): - pass - - -class VNXLunSyncCompletedError(VNXMigrationError): - error_code = 0x714a8021 - - -class VNXTargetNotReadyError(VNXMigrationError): - message = 'The destination LUN is not available for migration' - - -class VNXSnapError(VNXException): - pass - - -class VNXDeleteAttachedSnapError(VNXSnapError): - error_code = 0x716d8003 - - -class VNXCreateSnapError(VNXException): - message = 'Cannot create the snapshot.' - - -class VNXAttachSnapError(VNXSnapError): - message = 'Cannot attach the snapshot.' - - -class VNXDetachSnapError(VNXSnapError): - message = 'Cannot detach the snapshot.' - - -class VNXSnapAlreadyMountedError(VNXSnapError): - error_code = 0x716d8055 - - -class VNXSnapNameInUseError(VNXSnapError): - error_code = 0x716d8005 - - -class VNXSnapNotExistsError(VNXSnapError): - message = 'The specified snapshot does not exist.' - - -class VNXLunError(VNXException): - pass - - -class VNXCreateLunError(VNXLunError): - pass - - -class VNXLunNameInUseError(VNXCreateLunError): - error_code = 0x712d8d04 - - -class VNXLunExtendError(VNXLunError): - pass - - -class VNXLunExpandSizeError(VNXLunExtendError): - error_code = 0x712d8e04 - - -class VNXLunPreparingError(VNXLunError): - error_code = 0x712d8e0e - - -class VNXLunNotFoundError(VNXLunError): - message = 'Could not retrieve the specified (pool lun).' - - -class VNXDeleteLunError(VNXLunError): - pass - - -class VNXLunUsedByFeatureError(VNXLunError): - pass - - -class VNXCompressionError(VNXLunError): - pass - - -class VNXCompressionAlreadyEnabledError(VNXCompressionError): - message = 'Compression on the specified LUN is already turned on.' - - -class VNXConsistencyGroupError(VNXException): - pass - - -class VNXCreateConsistencyGroupError(VNXConsistencyGroupError): - pass - - -class VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError): - error_code = 0x716d8021 - - -class VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError): - message = 'Cannot find the consistency group' - - -class VNXPingNodeError(VNXException): - pass - - -class VNXMirrorException(VNXException): - pass - - -class VNXMirrorNameInUseError(VNXMirrorException): - message = 'Mirror name already in use' - - -class VNXMirrorPromotePrimaryError(VNXMirrorException): - message = 'Cannot remove or promote a primary image.' - - -class VNXMirrorNotFoundError(VNXMirrorException): - message = 'Mirror not found' - - -class VNXMirrorGroupNameInUseError(VNXMirrorException): - message = 'Mirror Group name already in use' - - -class VNXMirrorGroupNotFoundError(VNXMirrorException): - message = 'Unable to locate the specified group' - - -class VNXMirrorGroupAlreadyMemberError(VNXMirrorException): - message = 'The mirror is already a member of a group' - - -class VNXMirrorGroupMirrorNotMemberError(VNXMirrorException): - message = 'The specified mirror is not a member of the group' - - -class VNXMirrorGroupAlreadyPromotedError(VNXMirrorException): - message = 'The Consistency Group has no secondary images to promote' diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/__init__.py deleted file mode 100644 index b50af7e37..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_enum - - -class VNXSystem(object): - pass - - -class VNXEnum(fake_enum.Enum): - pass - - -class VNXSPEnum(VNXEnum): - SP_A = 'SP A' - SP_B = 'SP B' - CONTROL_STATION = 'Celerra' - - -class VNXProvisionEnum(VNXEnum): - # value of spec "provisioning:type" - THIN = 'thin' - THICK = 'thick' - COMPRESSED = 'compressed' - DEDUPED = 'deduplicated' - - -class VNXMigrationRate(VNXEnum): - LOW = 'low' - MEDIUM = 'medium' - HIGH = 'high' - ASAP = 'asap' - - -class VNXTieringEnum(VNXEnum): - NONE = 'none' - HIGH_AUTO = 'starthighthenauto' - AUTO = 'auto' - HIGH = 'highestavailable' - LOW = 'lowestavailable' - NO_MOVE = 'nomovement' - - -class VNXMirrorViewRecoveryPolicy(VNXEnum): - MANUAL = 'manual' - AUTO = 'automatic' - - -class VNXMirrorViewSyncRate(VNXEnum): - HIGH = 'high' - MEDIUM = 'medium' - LOW = 'low' - - -class VNXMirrorImageState(VNXEnum): - SYNCHRONIZED = 'Synchronized' - OUT_OF_SYNC = 'Out-of-Sync' - SYNCHRONIZING = 'Synchronizing' - CONSISTENT = 'Consistent' - SCRAMBLED = 'Scrambled' - INCOMPLETE = 'Incomplete' - LOCAL_ONLY = 'Local Only' - EMPTY = 'Empty' - - -VNXCtrlMethod = fake_enum.VNXCtrlMethod diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/tasks.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/tasks.py deleted file mode 100644 index 04375297d..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/tasks.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class PQueue(object): - - def __init__(self, path, interval=None): - self.path = path - self._interval = interval - self.started = False - - def put(self, item): - return item - - def start(self): - self.started = True diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_cinder.yaml b/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_cinder.yaml deleted file mode 100644 index 639004f80..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_cinder.yaml +++ /dev/null @@ -1,600 +0,0 @@ -########################################################### -# Common -########################################################### - -volume: &volume_base - _type: 'volume' - _properties: &volume_base_properties - status: 'creating' - size: 1 - id: - _uuid: volume_id - provider_auth: 'None' - host: 'host@backendsec#unit_test_pool' - project_id: - _uuid: project_id - provider_location: &provider_location - _build_provider_location: &provider_location_dict - id: 1 - type: 'lun' - system: 'fake_serial' - base_lun_name: 'test' - version: '07.00.00' - display_name: 'volume-1' - display_description: 'test volume' - volume_type_id: - consistencygroup_id: - group_id: - volume_attachment: - _properties: {} - volume_metadata: - _properties: {} - group: - _type: 'group' - _properties: {} - -host: &host_base - _properties: - host: 'host@backendsec#unit_test_pool' - -consistency_group: &cg_base - _type: 'cg' - _properties: &cg_base_properties - id: - _uuid: consistency_group_id - status: 'creating' - name: 'cg_name' - host: 'host@backend#unit_test_pool' - -consistency_group_with_type: &cg_base_with_type - _type: 'cg' - _properties: - <<: *cg_base_properties - volume_type_id: 'type1' - -snapshot: &snapshot_base - _type: 'snapshot' - _properties: &snapshot_base_properties - id: - _uuid: snapshot_id - status: available - name: 'snapshot_name' - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - name: 'attached_volume_name' - volume_name: 'attached_volume_name' - -cg_snapshot: &cg_snapshot_base - _type: 'cg_snapshot' - _properties: &cg_snapshot_base_properties - id: - _uuid: cgsnapshot_id - status: 'creating' - -group: &group_base - _type: 'group' - _properties: &group_base_properties - id: - _uuid: group_id - name: 'test_group' - status: 'creating' - replication_status: 'enabled' - - - -########################################################### -# TestCommonAdapter, TestISCSIAdapter, TestFCAdapter -########################################################### -test_mock_driver_input_inner: - volume: *volume_base - -test_create_volume: &test_create_volume - volume: *volume_base - -test_create_volume_error: *test_create_volume - -test_create_thick_volume: *test_create_volume - -test_create_volume_with_qos: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - name: "volume_with_qos" - volume_type_id: - _uuid: volume_type_id - -test_migrate_volume: - volume: *volume_base - -test_migrate_volume_host_assisted: - volume: *volume_base - -test_delete_volume_not_force: &test_delete_volume_not_force - volume: *volume_base - -test_delete_volume_force: *test_delete_volume_not_force - -test_delete_async_volume: - volume: *volume_base - -test_delete_async_volume_migrating: - volume: *volume_base - -test_retype_need_migration_when_host_changed: - volume: *volume_base - host: - _properties: - host: 'host@backendsec#another_pool' - -test_retype_need_migration_for_smp_volume: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - provider_location: - _build_provider_location: - <<: *provider_location_dict - type: 'smp' - host: *host_base - -test_retype_need_migration_when_provision_changed: - volume: *volume_base - host: *host_base - -test_retype_not_need_migration_when_provision_changed: - volume: *volume_base - host: *host_base - -test_retype_not_need_migration: - volume: *volume_base - host: *host_base - -test_retype_need_migration: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - volume_type_id: - _uuid: volume_type_id - host: *host_base - -test_retype_lun_has_snap: - volume: *volume_base - host: *host_base - -test_retype_turn_on_compression_change_tier: - volume: *volume_base - host: *host_base - -test_retype_change_tier: - volume: *volume_base - host: *host_base - -test_create_consistencygroup: - cg: *cg_base - -test_delete_consistencygroup: - cg: *cg_base - -test_delete_consistencygroup_with_volume: - cg: *cg_base - vol1: *volume_base - vol2: *volume_base - -test_delete_consistencygroup_error: - cg: *cg_base - vol1: *volume_base - vol2: *volume_base - -test_delete_consistencygroup_volume_error: - cg: *cg_base - vol1: *volume_base - vol2: *volume_base - -test_extend_volume: - volume: *volume_base - -test_create_snapshot_adapter: - snapshot: *snapshot_base - -test_delete_snapshot_adapter: - snapshot: *snapshot_base - -test_do_create_cgsnap: &cg_snap_and_snaps - cg_snap: *cg_snapshot_base - snap1: *snapshot_base - snap2: *snapshot_base - -test_do_delete_cgsnap: *cg_snap_and_snaps - -test_manage_existing_lun_no_exist: - volume: *volume_base - -test_manage_existing_invalid_pool: - volume: *volume_base - -test_manage_existing_get_size: - volume: *volume_base - -test_manage_existing_type_mismatch: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - volume_type_id: - _uuid: volume_type_id - -test_manage_existing: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - volume_type_id: - _uuid: volume_type_id - -test_manage_existing_smp: - volume: *volume_base - -test_create_cloned_volume: - volume: *volume_base - src_vref: - _type: volume - _properties: - <<: *volume_base_properties - id: - _uuid: volume2_id - size: 2 - -test_create_cloned_volume_snapcopy: - volume: - _type: volume - _properties: - <<: *volume_base_properties - src_vref: - _type: volume - _properties: - <<: *volume_base_properties - id: - _uuid: volume2_id - size: 2 - -test_create_volume_from_snapshot: - volume: *volume_base - snapshot: *snapshot_base - -test_create_volume_from_snapshot_snapcopy: - volume: *volume_base - snapshot: *snapshot_base - -test_get_base_lun_name: - volume: *volume_base - -test_do_create_cg_from_cgsnap: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume_id - vol2: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume2_id - snap1: - _type: 'snapshot' - _properties: - <<: *snapshot_base_properties - id: - _uuid: snapshot_id - snap2: - _type: 'snapshot' - _properties: - <<: *snapshot_base_properties - id: - _uuid: snapshot2_id - -test_do_clone_cg: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: consistency_group_id - - src_vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: consistency_group2_id - -test_assure_host_access: - volume: *volume_base - -test_assure_host_access_without_auto_register_new_sg: - volume: *volume_base - -test_assure_host_access_without_auto_register: - volume: *volume_base - -test_auto_register_initiator: - volume: *volume_base - -test_auto_register_initiator_no_white_list: - volume: *volume_base - -test_auto_register_initiator_no_port_to_reg: - volume: *volume_base - -test_remove_host_access: - volume: *volume_base - -test_remove_host_access_sg_absent: - volume: *volume_base - -test_remove_host_access_volume_not_in_sg: - volume: *volume_base - -test_do_update_cg: - cg: *cg_base - volume_add: - <<: *volume_base - _properties: - <<: *volume_base_properties - provider_location: - _build_provider_location: - <<: *provider_location_dict - id: 1 - volume_remove: - <<: *volume_base - _properties: - <<: *volume_base_properties - provider_location: - _build_provider_location: - <<: *provider_location_dict - id: 2 - -test_create_export_snapshot: - snapshot: *snapshot_base - -test_remove_export_snapshot: - snapshot: *snapshot_base - -test_initialize_connection_snapshot: - snapshot: *snapshot_base - -test_terminate_connection_snapshot: - snapshot: *snapshot_base - -test_setup_lun_replication: - vol1: &volume_for_replication - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume_id - volume_type_id: - _uuid: volume_type_id - -test_setup_lun_replication_in_group: - group1: - _type: 'group' - _properties: - <<: *group_base_properties - group_type_id: - _uuid: group_type_id - vol1: *volume_for_replication - -test_cleanup_replication: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume2_id - volume_type_id: - _uuid: volume_type_id - -test_failover_host: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume3_id - volume_type_id: - _uuid: volume_type_id - -test_failover_host_invalid_backend_id: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume4_id - volume_type_id: - _uuid: volume_type_id - -test_failover_host_failback: - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume5_id - volume_type_id: - _uuid: volume_type_id - replication_status: enabled - -test_failover_host_groups: - group1: - _type: 'group' - _properties: - <<: *group_base_properties - id: - _uuid: group_id - group_type_id: - _uuid: group_type_id - replication_status: failed-over - volumes: [*volume_base, *volume_base] - - vol1: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume4_id - volume_type_id: - _uuid: volume_type_id - replication_status: failed-over - - vol2: - _type: 'volume' - _properties: - <<: *volume_base_properties - id: - _uuid: volume4_id - volume_type_id: - _uuid: volume_type_id - replication_status: failed-over - -test_get_pool_name: - volume: *volume_base - -test_update_migrated_volume: - volume: *volume_base - new_volume: *volume_base - -test_update_migrated_volume_smp: - volume: *volume_base - new_volume: - <<: *volume_base - _properties: - <<: *volume_base_properties - provider_location: - _build_provider_location: - <<: *provider_location_dict - type: smp - -test_create_group_snap: - -test_create_cgsnapshot: - -test_create_cloned_cg: - -test_create_cloned_group: - -test_create_cg_from_cgsnapshot: - -test_create_group_from_group_snapshot: - -test_create_cgsnapshot: - -test_create_group_snapshot: - -test_delete_group_snapshot: - -test_delete_cgsnapshot: - -########################################################### -# TestReplicationAdaper -########################################################### - -test_enable_replication: - volume1: *volume_base - volume2: *volume_base - group: *group_base - -test_disable_replication: - volume1: *volume_base - volume2: *volume_base - group: *group_base - -test_failover_replication: - volume1: *volume_base - volume2: *volume_base - group: *group_base - -########################################################### -# TestUtils -########################################################### - -test_validate_cg_type: - cg: - _properties: - id: - _uuid: GROUP_ID - volume_type_ids: ['type1'] - -test_require_consistent_group_snapshot_enabled: - group: - _type: 'group' - _properties: - id: - _uuid: group_id - group_type_id: - _uuid: group_type_id - -test_is_image_cache_volume_false: - volume: *volume_base - -test_is_image_cache_volume_true: - volume: *volume_base - -test_calc_migrate_and_provision_image_cache: - volume: *volume_base - -test_calc_migrate_and_provision: - volume: *volume_base - -test_get_backend_qos_specs: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - volume_type_id: - _uuid: volume_type_id - -test_check_type_matched_invalid: - volume: - _type: 'volume' - _properties: - <<: *volume_base_properties - volume_type_id: - _uuid: volume_type_id - group: - _type: 'group' - _properties: - id: - _uuid: group_id - group_type_id: - _uuid: group_type_id - -test_check_rep_status_matched_disabled: - group: - _type: 'group' - _properties: - id: - _uuid: group_id - group_type_id: - _uuid: group_type_id - replication_status: 'disabled' - -########################################################### -# TestClient -########################################################### - -test_get_lun_id: - volume: *volume_base - -test_get_lun_id_without_provider_location: - volume: - <<: *volume_base - _properties: - <<: *volume_base_properties - provider_location: diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_vnx.yaml b/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_vnx.yaml deleted file mode 100644 index d6e4c97bb..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_vnx.yaml +++ /dev/null @@ -1,2406 +0,0 @@ -########################################################### -# Example: -# vnx: -# _properties: # properties -# serial: serial_1 -# name: lun_1 -# state: -# _side_effect: [Ready, Offline] # side effect for property -# -# _methods: # methods -# get_pool: *pool_1 # return value of method -# get_lun: -# _raise: -# GetLunError: Unkown Error # method raise exception -# get_cg: -# _side_effect: [cg_1, cg_2] # side effect for method -# -########################################################### - -########################################################### -# Common -########################################################### -lun_base: &lun_base - _properties: &lun_base_prop - name: lun_name - lun_id: lun_id - wwn: 'fake_wwn' - poll: False - operation: None - state: Ready - existed: true - -cg_base: - _properties: &cg_base_prop - fake_prop: fake_prop_value - -cg_snap_base: - _properties: &cg_snap_base_prop - id: 'cg_snap_id' - -pool_base: &pool_base - _properties: &pool_base_prop - name: pool_name - pool_id: 0 - state: Ready - user_capacity_gbs: 1311 - total_subscribed_capacity_gbs: 131 - available_capacity_gbs: 132 - percent_full_threshold: 70 - fast_cache: True - -pool_feature_base: - _properties: &pool_feature_base_prop - max_pool_luns: 3000 - total_pool_luns: 151 - -vnx_base: &vnx_base - _properties: &vnx_base_prop - serial: fake_serial - -snapshot_base: &snapshot_base - _properties: &snapshot_base_prop - status: - existed: true - name: snapshot_name - state: - -sg: &sg_base - _properties: &sg_base_prop - existed: true - name: sg_name - -spa: &spa - _enum: - VNXSPEnum: SP A - -spb: &spb - _enum: - VNXSPEnum: SP B - -iscsi_port_base: &iscsi_port_base - _type: 'VNXPort' - _properties: &iscsi_port_base_prop - sp: *spa - port_id: 0 - vport_id: 0 - -all_iscsi_ports: &all_iscsi_ports - - &iscsi_port_a-0-0 - <<: *iscsi_port_base - _properties: - <<: *iscsi_port_base_prop - port_id: 0 - vport_id: 0 - - &iscsi_port_a-0-1 - <<: *iscsi_port_base - _properties: - <<: *iscsi_port_base_prop - port_id: 0 - vport_id: 1 - - &iscsi_port_a-1-0 - <<: *iscsi_port_base - _properties: - <<: *iscsi_port_base_prop - port_id: 1 - vport_id: 0 - - &iscsi_port_b-0-1 - <<: *iscsi_port_base - _properties: - <<: *iscsi_port_base_prop - sp: *spb - port_id: 0 - vport_id: 1 - -fc_port_base: &fc_port_base - _type: 'VNXPort' - _properties: &fc_port_base_prop - sp: *spa - port_id: 1 - vport_id: None - wwn: 'wwn' - link_status: 'Up' - port_status: 'Online' - -all_fc_ports: &all_fc_ports - - &fc_port_a-1 - <<: *fc_port_base - _properties: - <<: *fc_port_base_prop - port_id: 1 - wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A1' - - &fc_port_a-2 - <<: *fc_port_base - _properties: - <<: *fc_port_base_prop - port_id: 2 - wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A2' - port_status: 'Offline' - - &fc_port_b-2 - <<: *fc_port_base - _properties: - <<: *fc_port_base_prop - sp: *spb - port_id: 2 - wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B2' - -mirror_base: &mirror_base - _properties: &mirror_base_prop - image_state: - _type: VNXMirrorImageState - value: 'SYNCHRONIZED' - -mirror_group_base: &mirror_group_base - _properties: &mirror_group_base_prop - condition: 'Active' - existed: true - name: 'base_group' - role: 'Primary' - state: 'Synchronized' - -########################################################### -# TestClient -########################################################### -test_create_lun: &test_create_lun - lun: &lun_test_create_lun - _properties: - <<: *lun_base_prop - name: lun1 - _methods: - update: - with_no_poll: _context - - pool: &pool_test_create_lun - _properties: - <<: *pool_base_prop - name: pool1 - _methods: - create_lun: *lun_test_create_lun - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: *pool_test_create_lun - -test_create_lun_error: &test_create_lun_error - pool: &pool_test_create_lun_error - _properties: - <<: *pool_base_prop - _methods: - create_lun: - _raise: - VNXCreateLunError: Unkown Error - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: *pool_test_create_lun_error - -test_is_lun_io_ready_false: - lun: - _properties: - <<: *lun_base_prop - state: Initializing - _methods: - update: - with_no_poll: _context - -test_is_lun_io_ready_true: - lun: - _properties: - <<: *lun_base_prop - state: Ready - operation: None - _methods: - update: - with_no_poll: _context - -test_is_lun_io_ready_exception: - lun: - _properties: - <<: *lun_base_prop - state: Deleting - _methods: - update: - with_no_poll: _context - -test_create_lun_in_cg: - cg: &cg_test_create_lun_in_cg - _properties: - <<: *cg_base_prop - _methods: - add_member: - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_create_lun - get_pool: *pool_test_create_lun - get_cg: *cg_test_create_lun_in_cg - -test_create_lun_compression: - lun: &lun_test_create_lun_compression - _properties: - <<: *lun_base_prop - name: lun2 - _methods: - update: - with_no_poll: _context - - pool: &pool_test_create_lun_compression - _properties: - <<: *pool_base_prop - _methods: - create_lun: *lun_test_create_lun_compression - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: *pool_test_create_lun_compression - -test_create_lun_already_existed: - lun: &lun_test_create_lun_already_existed - _properties: - <<: *lun_base_prop - name: lun3 - _methods: - update: - with_no_poll: _context - - pool: &pool_test_create_lun_already_existed - _properties: - <<: *pool_base_prop - _methods: - create_lun: - _raise: - VNXLunNameInUseError: Lun already exists(0x712d8d04) - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_create_lun_already_existed - get_pool: *pool_test_create_lun_already_existed - -test_migrate_lun: - lun: &lun_migrate - _properties: - <<: *lun_base_prop - _methods: - migrate: - - vnx: - _methods: - get_lun: *lun_migrate - -test_migrate_lun_with_retry: - lun: &lun_migrate_retry - _properties: - <<: *lun_base_prop - _methods: - migrate: - _raise: - VNXTargetNotReadyError: 'The destination LUN is not available for migration' - - vnx: - _methods: - get_lun: *lun_migrate_retry - -test_session_finished_faulted: - session: &session_faulted - _properties: - existed: true - current_state: 'FAULTED' - vnx: - _methods: - get_lun: *lun_base - get_migration_session: *session_faulted - -test_session_finished_migrating: - session: &session_migrating - _properties: - existed: true - current_state: 'MIGRATING' - vnx: - _methods: - get_lun: *lun_base - get_migration_session: *session_migrating - -test_session_finished_not_existed: - session: &session_not_existed - _properties: - existed: false - vnx: - _methods: - get_lun: *lun_base - get_migration_session: *session_not_existed - -test_migrate_lun_error: - lun1: &lun_migrate_error - <<: *lun_base_prop - _methods: - migrate: - _raise: - VNXMigrationError: 'Unknown Error' - vnx: - _methods: - get_lun: *lun_migrate_error - -test_verify_migration: - lun1: &src_lun - _propertis: - <<: *lun_base_prop - lun2: &dst_lun - _properties: - poll: false - wwn: 'fake_wwn' - session: &session_verify - _properties: - existed: false - vnx: - _methods: - get_lun: - _side_effect: [*src_lun, *dst_lun] - get_migration_session: *session_verify - -test_verify_migration_false: - vnx: - _methods: - get_lun: - _side_effect: [*src_lun, *dst_lun] - get_migration_session: *session_verify - -test_cleanup_migration: - session: &session_cancel - _properties: - existed: true - dest_lu_id: 2 - - lun: &lun_cancel_migrate - _methods: - cancel_migrate: - vnx: - _methods: - get_migration_session: *session_cancel - get_lun: *lun_cancel_migrate - -test_cleanup_migration_not_migrating: - lun: &lun_cancel_migrate_not_migrating - _methods: - cancel_migrate: - _raise: - VNXLunNotMigratingError: The LUN is not migrating - vnx: - _methods: - get_migration_session: *session_cancel - get_lun: *lun_cancel_migrate_not_migrating - -test_cleanup_migration_cancel_failed: - lun: &lun_cancel_migrate_cancel_failed - _methods: - cancel_migrate: - _raise: - VNXLunSyncCompletedError: The LUN is not migrating - _properties: - wwn: test - vnx: - _methods: - get_migration_session: - _side_effect: [*session_cancel, *session_verify] - get_lun: *lun_cancel_migrate_cancel_failed - -test_get_lun_by_name: - lun: &lun_test_get_lun_by_name - _properties: - <<: *lun_base_prop - lun_id: 888 - name: lun_name_test_get_lun_by_name - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_get_lun_by_name - -test_delete_lun: &test_delete_lun - lun: &lun_test_delete_lun - _properties: - <<: *lun_base_prop - name: lun_test_delete_lun - is_snap_mount_point: False - _methods: - delete: - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_delete_lun - -test_delete_smp: &test_delete_smp - snapshot: &snapshot_test_delete_smp - _properties: - name: snapshot_test_delete_smp - _methods: - delete: - - lun: &lun_test_delete_smp - _properties: - <<: *lun_base_prop - name: lun_test_delete_smp - attached_snapshot: *snapshot_test_delete_smp - is_snap_mount_point: True - _methods: - delete: - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_delete_smp - -test_delete_lun_not_exist: - lun: &lun_test_delete_lun_not_exist - _properties: - <<: *lun_base_prop - name: lun_test_delete_lun_not_exist - is_snap_mount_point: False - _methods: - delete: - _raise: - VNXLunNotFoundError: - Lun to delete doesn't exist. - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_delete_lun_not_exist - -test_delete_lun_exception: - lun: &lun_test_delete_lun_exception - _properties: - <<: *lun_base_prop - name: lun_test_delete_lun_exception - is_snap_mount_point: False - _methods: - delete: - _raise: - VNXDeleteLunError: - General lun delete error. - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_delete_lun_exception - -test_cleanup_async_lun: - lun: &lun_test_cleanup_async_lun - _properties: - <<: *lun_base_prop - name: lun_test_cleanup_async_lun - is_snap_mount_point: True - _methods: - delete: - cancel_migrate: - snap: &snap_test_cleanup_async_lun - _methods: - delete: - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_lun: *lun_test_cleanup_async_lun - get_snap: *snap_test_cleanup_async_lun - get_migration_session: *session_migrating - -test_create_cg: &test_create_cg - cg: &cg_for_create - _properties: - existed: True - _methods: - update: - with_no_poll: _context - - vnx: - _methods: - create_cg: *cg_for_create - -test_create_cg_already_existed: - vnx: - _methods: - create_cg: - _raise: - VNXConsistencyGroupNameInUseError: Already in use - get_cg: *cg_for_create - -test_delete_cg: - cg: &cg_for_deletion - _methods: - delete: - vnx: - _methods: - get_cg: *cg_for_deletion - -test_delete_cg_not_existed: - cg: &cg_delete_no_existed - _methods: - delete: - _raise: - VNXConsistencyGroupNotFoundError: CG not found - vnx: - _methods: - get_cg: *cg_delete_no_existed - -test_expand_lun: &test_expand_lun - lun: &lun_test_expand_lun - _properties: - <<: *lun_base_prop - name: lun_test_expand_lun - total_capacity_gb: 10 - _methods: - expand: - update: - with_no_poll: _context - vnx: - _properties: *vnx_base_prop - _methods: - get_lun: *lun_test_expand_lun - -test_expand_lun_not_poll: *test_expand_lun - -test_expand_lun_already_expanded: - lun: &lun_test_expand_lun_already_expanded - _properties: - <<: *lun_base_prop - total_capacity_gb: 10 - _methods: - update: - with_no_poll: _context - expand: - _raise: - VNXLunExpandSizeError: - LUN already expanded. - vnx: - _properties: *vnx_base_prop - _methods: - get_lun: *lun_test_expand_lun_already_expanded - -test_expand_lun_not_ops_ready: - lun: &lun_test_expand_lun_not_ops_ready - _properties: - <<: *lun_base_prop - total_capacity_gb: 10 - operation: 'None' - _methods: - update: - with_no_poll: _context - expand: - _raise: - VNXLunPreparingError: - LUN operation not ready. - vnx: - _properties: *vnx_base_prop - _methods: - get_lun: *lun_test_expand_lun_not_ops_ready - -test_create_snapshot: &test_create_snapshot - lun: &lun_test_create_snapshot - <<: *lun_base - _methods: - create_snap: - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_create_snapshot - -test_create_snapshot_snap_name_exist_error: - lun: &lun_test_create_snapshot_snap_name_exist_error - <<: *lun_base - _methods: - create_snap: - _raise: - VNXSnapNameInUseError: Snapshot name is in use. - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_create_snapshot_snap_name_exist_error - -test_delete_snapshot: &test_delete_snapshot - snapshot: &snapshot_test_delete_snapshot - <<: *snapshot_base - _methods: - delete: - - vnx: - <<: *vnx_base - _methods: - get_snap: *snapshot_test_delete_snapshot - -test_delete_snapshot_delete_attached_error: - snapshot: &snapshot_test_delete_snapshot_delete_attached_error - <<: *snapshot_base - _methods: - delete: - _raise: - VNXDeleteAttachedSnapError: - Snapshot is attached to a LUN. - - vnx: - <<: *vnx_base - _methods: - get_snap: *snapshot_test_delete_snapshot_delete_attached_error - -test_copy_snapshot: - snap: &snap_copy - _methods: - copy: - vnx: - _methods: - get_snap: *snap_copy - -test_create_mount_point: - lun: &lun_mount_point - _methods: - create_mount_point: - vnx: - _methods: - get_lun: *lun_mount_point - -test_attach_mount_point: - lun: &lun_attach_snap - _methods: - attach_snap: - vnx: - _methods: - get_lun: *lun_attach_snap - -test_detach_mount_point: - lun: &lun_detach - _methods: - detach_snap: - vnx: - _methods: - get_lun: *lun_detach - -test_modify_snapshot: - snap: &snap_modify - _methods: - modify: - - vnx: - _methods: - get_snap: *snap_modify - -test_create_cg_snapshot: &test_create_cg_snapshot - cg_snap: &cg_snap_exist - _properties: - existed: True - _methods: - update: - with_no_poll: _context - cg: &cg_test_create_cg_snapshot - _methods: - create_snap: *cg_snap_exist - vnx: - _methods: - get_cg: *cg_test_create_cg_snapshot - -test_create_cg_snapshot_already_existed: - cg: &cg_create_cg_snapshot_in_use_error - _methods: - with_no_poll: _context - create_snap: - _raise: - VNXSnapNameInUseError: 'Already in use' - vnx: - _methods: - get_cg: *cg_create_cg_snapshot_in_use_error - get_snap: *cg_snap_exist - -test_delete_cg_snapshot: *test_delete_snapshot - -test_create_sg: - sg: &sg_test_create_sg - <<: *sg_base - vnx: - <<: *vnx_base - _methods: - create_sg: *sg_test_create_sg - -test_create_sg_name_in_use: - vnx: - <<: *vnx_base - _methods: - create_sg: - _raise: - VNXStorageGroupNameInUseError: Storage group name is in use. - get_sg: *sg_base - -test_get_storage_group: - sg: &sg_test_get_storage_group - <<: *sg_base - vnx: - <<: *vnx_base - _methods: - get_sg: *sg_test_get_storage_group - -test_register_initiator: - sg: &sg_test_register_initiator - <<: *sg_base - _methods: - connect_hba: - update: - with_poll: _context - vnx: *vnx_base - -test_register_initiator_exception: - sg: &sg_test_register_initiator_exception - <<: *sg_base - _methods: - connect_hba: - _raise: - VNXStorageGroupError: set_path error. - update: - with_poll: _context - vnx: *vnx_base - -test_ping_node: - iscsi_port: &iscsi_port_test_ping_node - <<: *iscsi_port_base - _methods: - ping_node: - vnx: - <<: *vnx_base - _methods: - get_iscsi_port: *iscsi_port_test_ping_node - -test_ping_node_fail: - iscsi_port: &iscsi_port_test_ping_node_fail - <<: *iscsi_port_base - _methods: - ping_node: - _raise: - VNXPingNodeError: Failed to ping node. - vnx: - <<: *vnx_base - _methods: - get_iscsi_port: *iscsi_port_test_ping_node_fail - -test_add_lun_to_sg: - sg: &sg_test_add_lun_to_sg - <<: *sg_base - _methods: - attach_alu: 1 - vnx: *vnx_base - -test_add_lun_to_sg_alu_already_attached: - sg: &sg_test_add_lun_to_sg_alu_already_attached - <<: *sg_base - _methods: - attach_alu: - _raise: - VNXAluAlreadyAttachedError: ALU is already attached. - get_hlu: 1 - vnx: *vnx_base - -test_add_lun_to_sg_alu_in_use: - lun: - _properties: - <<: *lun_base_prop - lun_id: 1 - sg: &sg_test_add_lun_to_sg_alu_in_use - <<: *sg_base - _methods: - attach_alu: - _raise: - VNXNoHluAvailableError: No HLU available. - get_hlu: 1 - vnx: *vnx_base - -test_update_consistencygroup_no_lun_in_cg: - cg: - _properties: - <<: *cg_base_prop - lun_list: [] - _methods: - replace_member: - - lun_1: - _properties: - <<: *lun_base_prop - lun_id: 1 - - lun_2: - _properties: - <<: *lun_base_prop - lun_id: 2 - - vnx: *vnx_base - -test_update_consistencygroup_lun_in_cg: - lun_1: &lun_1_test_update_consistencygroup_lun_in_cg - _properties: - <<: *lun_base_prop - lun_id: 1 - - lun_2: - _properties: - <<: *lun_base_prop - lun_id: 2 - - cg: - _properties: - <<: *cg_base_prop - lun_list: - - *lun_1_test_update_consistencygroup_lun_in_cg - _methods: - replace_member: - - vnx: *vnx_base - -test_update_consistencygroup_remove_all: - lun_1: &lun_1_test_update_consistencygroup_remove_all - _properties: - <<: *lun_base_prop - lun_id: 1 - - cg: - _properties: - <<: *cg_base_prop - lun_list: - - *lun_1_test_update_consistencygroup_remove_all - _methods: - delete_member: - - vnx: *vnx_base - -test_create_export_snapshot: - -test_remove_export_snapshot: - -test_initialize_connection_snapshot: - lun: &lun_test_initialize_connection_snapshot - _properties: - <<: *lun_base_prop - lun_id: 100 - - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_initialize_connection_snapshot - -test_terminate_connection_snapshot: - lun: &lun_test_terminate_connection_snapshot - _properties: - <<: *lun_base_prop - lun_id: 100 - - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_terminate_connection_snapshot - -test_get_available_ip: - vnx: - _properties: - alive_sp_ip: '192.168.1.5' - -test_create_mirror: - vnx: - _methods: - get_lun: *lun_base - create_mirror_view: *mirror_base - -test_create_mirror_already_created: - vnx: - _methods: - get_lun: *lun_base - create_mirror_view: - _raise: - VNXMirrorNameInUseError: 'name in use' - get_mirror_view: *mirror_base - -test_delete_mirror: - mirror: &mirror_test_delete_mirror - _methods: - delete: - vnx: - _methods: - get_mirror_view: *mirror_test_delete_mirror - - -test_delete_mirror_already_deleted: - mirror: &mirror_delete_error - _methods: - delete: - _raise: - VNXMirrorNotFoundError: 'not found' - vnx: - _methods: - get_mirror_view: *mirror_delete_error - -test_add_image: - mirror: &mirror_test_add_image - _methods: - add_image: - update: - with_no_poll: _context - with_poll: _context - _properties: - secondary_image: - _properties: - state: - _enum: - VNXMirrorImageState: 'Synchronized' - vnx: - _methods: - get_mirror_view: *mirror_test_add_image - -test_remove_image: - mirror: &mirror_remove_image - _methods: - remove_image: - - vnx: - _methods: - get_mirror_view: *mirror_remove_image - -test_fracture_image: - mirror: &mirror_fracture_image - _methods: - fracture_image: - vnx: - _methods: - get_mirror_view: *mirror_fracture_image - -test_sync_image: - mirror: &mirror_sync_image - _properties: - <<: *mirror_base_prop - secondary_image: - _properties: - state: - _enum: - VNXMirrorImageState: 'SYNCHRONIZED' - _methods: - sync_image: - with_no_poll: _context - update: - - vnx: - _methods: - get_mirror_view: *mirror_sync_image - -test_promote_image: - mirror: &mirror_promote_image - _methods: - promote_image: - - vnx: - _methods: - get_mirror_view: *mirror_promote_image - -# Mirror group tests start -test_create_mirror_group: - vnx: - _methods: - create_mirror_group: *mirror_group_base - -test_create_mirror_group_name_in_use: - vnx: - _methods: - create_mirror_group: - _raise: - VNXMirrorGroupNameInUseError: Mirror Group name already in use - get_mirror_group: *mirror_group_base - -test_delete_mirror_group: - group: &group_to_delete - _methods: - delete: - vnx: - _methods: - get_mirror_group: *group_to_delete - -test_delete_mirror_group_not_found: - group: &group_to_delete_not_found - _methods: - delete: - _raise: - VNXMirrorGroupNotFoundError: Unable to locate - - vnx: - _methods: - get_mirror_group: *group_to_delete_not_found - -test_add_mirror: - group: &group_to_add - _methods: - add_mirror: - vnx: - _methods: - get_mirror_group: *group_to_add - get_mirror_view: *mirror_base - -test_add_mirror_already_added: - group: &group_to_add_added - _methods: - add_mirror: - _raise: - VNXMirrorGroupAlreadyMemberError: already a member of a group - vnx: - _methods: - get_mirror_group: *group_to_add_added - get_mirror_view: *mirror_base - -test_remove_mirror: - group: &group_to_remove - _methods: - remove_mirror: - vnx: - _methods: - get_mirror_group: *group_to_remove - get_mirror_view: *mirror_base - -test_remove_mirror_not_member: - group: &group_to_remove_not_member - _methods: - remove_mirror: - _raise: - VNXMirrorGroupMirrorNotMemberError: not a member of the group - vnx: - _methods: - get_mirror_group: *group_to_remove_not_member - get_mirror_view: *mirror_base - -test_promote_mirror_group: - group: &group_to_promote - _methods: - promote_group: - vnx: - _methods: - get_mirror_group: *group_to_promote - -test_promote_mirror_group_already_promoted: - group: &group_to_promote_already_promoted - _methods: - promote_group: - _raise: - VNXMirrorGroupAlreadyPromotedError: no secondary images to promote - vnx: - _methods: - get_mirror_group: *group_to_promote_already_promoted - -test_sync_mirror_group: - group: &group_to_sync - _methods: - sync_group: - vnx: - _methods: - get_mirror_group: *group_to_sync - -test_fracture_mirror_group: - group: &group_to_fracture - _methods: - fracture_group: - vnx: - _methods: - get_mirror_group: *group_to_fracture - -test_get_lun_id: - -test_get_lun_id_without_provider_location: - lun: &test_get_lun_id_without_provider_location - <<: *lun_base - _properties: - <<: *lun_base_prop - lun_id: 1 - - vnx: - _methods: - get_lun: *test_get_lun_id_without_provider_location - -test_get_ioclass: - ioclass_false: &ioclass_false - _properties: - existed: False - ioclass_true: &ioclass_true - _properties: - existed: True - _methods: - add_lun: - vnx: - _methods: - get_ioclass: *ioclass_false - create_ioclass: *ioclass_true - -test_create_ioclass_iops: - vnx: - _methods: - create_ioclass: *ioclass_true - -test_create_ioclass_bws: - vnx: - _methods: - create_ioclass: *ioclass_true - -test_create_policy: - policy: &policy - _properties: - state: "Running" - existed: False - _methods: - add_class: - run_policy: - vnx: - _methods: - get_policy: *policy - create_policy: *policy - -test_get_running_policy: - vnx: - _methods: - get_policy: [*policy, *policy] - -test_add_lun_to_ioclass: - vnx: - _methods: - get_ioclass: *ioclass_true - -########################################################### -# TestCommonAdapter -########################################################### -test_create_volume: *test_create_lun - -test_create_volume_error: *test_create_lun_error - -test_create_thick_volume: *test_create_lun - -test_create_volume_with_qos: - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: *pool_test_create_lun - get_ioclass: *ioclass_true - get_policy: [*policy] - -test_migrate_volume: - lun: &src_lun_1 - _properties: - <<: *lun_base_prop - lun_id: 4 - wwn: 'src_wwn' - poll: false - _methods: - migrate: - update: - with_no_poll: _context - - - lun2: &lun_migrate_1 - _properties: - lun_id: 5 - wwn: 'dst_wwn' - _methods: - cancel_migrate: - - lun3: &lun_not_existed - _properties: - wwn: - - session: &session - _properties: - existed: false - pool: &migrate_pool - _methods: - create_lun: *src_lun_1 - vnx: - _methods: - get_lun: - _side_effect: [*lun_migrate_1, *src_lun_1, *src_lun_1, *lun_not_existed] - get_migration_session: *session - get_pool: *migrate_pool - -test_migrate_volume_host_assisted: - vnx: - _methods: - -test_create_cloned_volume: - snap: &snap_for_clone - _methods: - delete: - thick_base_lun: &test_create_cloned_volume_thick_base_lun - _properties: - is_thin_lun: fase - smp: &smp_migrate - _properties: - <<: *lun_base_prop - lun_id: 4 - wwn: 'src_wwn' - poll: false - is_thin_lun: false - total_capacity_gb: 10 - primary_lun: *test_create_cloned_volume_thick_base_lun - _methods: - migrate: - update: - with_no_poll: _context - lun2: &lun_migrate_2 - _properties: - lun_id: 5 - wwn: 'dst_wwn' - _methods: - cancel_migrate: - create_snap: - create_mount_point: - attach_snap: - lun3: &lun_not_existed_2 - _properties: - wwn: - session: &session_2 - _properties: - existed: false - pool: &migrate_pool_2 - _methods: - create_lun: *smp_migrate - vnx: - _properties: - serial: fake_serial - _methods: - get_lun: - _side_effect: [*lun_migrate_2, - *lun_migrate_2, *smp_migrate, *lun_migrate_2, *smp_migrate, - *lun_not_existed_2, *smp_migrate, *smp_migrate, - *lun_not_existed_2] - get_migration_session: *session_2 - get_pool: *migrate_pool_2 - get_snap: *snap_for_clone - -test_create_cloned_volume_snapcopy: - lun: &lun_for_snapcopy - _methods: - create_mount_point: - create_snap: - smp: &smp_for_snapcopy - _properties: - lun_id: 11 - _methods: - attach_snap: - vnx: - _properties: - serial: fake_serial - _methods: - get_lun: - _side_effect: [*lun_for_snapcopy, *lun_for_snapcopy, - *smp_for_snapcopy, *smp_for_snapcopy] - get_pool: *pool_base - -test_create_volume_from_snapshot: - lun: &lun_from_snapshot - _properties: - lun_id: 16 - _methods: - create_mount_point: - smp: &smp_from_lun - _properties: - is_thin_lun: false - total_capacity_gb: 10 - primary_lun: *test_create_cloned_volume_thick_base_lun - _methods: - attach_snap: - vnx: - _properties: - serial: fake_serial - _methods: - get_lun: - _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun, - *smp_from_lun, *dst_lun, *src_lun_1, *src_lun_1, *lun_not_existed] - get_pool: *pool_test_create_lun - get_migration_session: *session - -test_create_volume_from_snapshot_snapcopy: - snap: &snap_for_snapcopy - _methods: - copy: - modify: - vnx: - _properties: - serial: fake_serial - _methods: - get_snap: - _side_effect: [*snap_for_snapcopy, *snap_for_snapcopy] - get_lun: - _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun] - -test_parse_pools: &test_parse_pools - pool1: &pool_test_parse_pools_1 - _properties: - <<: *pool_base_prop - name: 'pool5' - pool2: &pool_test_parse_pools_2 - _properties: - <<: *pool_base_prop - name: 'pool6' - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] - -test_parse_pools_one_invalid_pool: *test_parse_pools - -test_parse_pools_all_invalid_pools: *test_parse_pools - -test_get_enabler_stats: &test_get_enabler_stats - vnx: &vnx_test_get_enabler_stats - _properties: - <<: *vnx_base_prop - _methods: - is_compression_enabled: True - is_dedup_enabled: True - is_fast_cache_enabled: True - is_thin_enabled: True - is_snap_enabled: True - is_auto_tiering_enabled: True - -test_get_pool_stats: - pool_feature: &pool_feature_test_get_pool_stats - _properties: - <<: *pool_feature_base_prop - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] - get_pool_feature: *pool_feature_test_get_pool_stats - is_auto_tiering_enabled: True - -test_get_pool_stats_max_luns_reached: - pool_feature: &pool_feature_test_get_pool_stats_max_luns_reached - _properties: - <<: *pool_feature_base_prop - total_pool_luns: 3001 - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] - get_pool_feature: *pool_feature_test_get_pool_stats_max_luns_reached - is_auto_tiering_enabled: True - -test_get_pool_stats_with_reserved: - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] - get_pool_feature: *pool_feature_test_get_pool_stats - is_auto_tiering_enabled: True - -test_get_pool_stats_offline: - pool1: &pool_test_get_pool_stats_offline_1 - _properties: - <<: *pool_base_prop - name: 'pool7' - state: 'Offline' - pool2: &pool_test_get_pool_stats_offline_2 - _properties: - <<: *pool_base_prop - name: 'pool8' - state: 'Offline' - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: [*pool_test_get_pool_stats_offline_1, *pool_test_get_pool_stats_offline_2] - get_pool_feature: *pool_feature_test_get_pool_stats - is_compression_enabled: False - is_dedup_enabled: True - is_fast_cache_enabled: False - is_thin_enabled: True - is_snap_enabled: False - is_auto_tiering_enabled: True - -test_update_volume_stats: *test_get_enabler_stats - -test_append_volume_stats: - vnx: - _properties: - serial: fake_serial - -test_delete_volume_not_force: *test_delete_lun -test_delete_volume_force: *test_delete_lun - -test_delete_async_volume: - snap: &snap_test_delete_async_volume - _methods: - delete: - vnx: - _methods: - get_lun: *lun_test_delete_lun - get_snap: *snap_test_delete_async_volume - -test_delete_async_volume_migrating: - lun: &lun_used_by_feature - _properties: - is_snap_mount_point: false - _methods: - cancel_migrate: - delete: - _raise: - VNXLunUsedByFeatureError: - vnx: - _methods: - get_lun: *lun_used_by_feature - get_snap: *snap_test_delete_async_volume - -test_enable_compression: - lun: - _properties: - <<: *lun_base_prop - _methods: - enable_compression: - -test_enable_compression_on_compressed_lun: - lun: - _properties: - <<: *lun_base_prop - _methods: - enable_compression: - _raise: - VNXCompressionAlreadyEnabledError: - -test_lun_has_snapshot_false: - lun: - _properties: - <<: *lun_base_prop - _methods: - get_snap: [] - -test_lun_has_snapshot_true: - lun: - _properties: - <<: *lun_base_prop - _methods: - get_snap: ['fake_snap'] - -test_get_vnx_enabler_status: - vnx: - _methods: - is_dedup_enabled: True - is_compression_enabled: False - is_thin_enabled: True - is_fast_cache_enabled: True - is_auto_tiering_enabled: False - is_snap_enabled: True - -test_retype_type_invalid: - vnx: - _methods: - is_dedup_enabled: True - is_compression_enabled: True - is_thin_enabled: True - is_fast_cache_enabled: True - is_auto_tiering_enabled: True - is_snap_enabled: True - -test_retype_need_migration: - lun: &lun_retype_need_migration - _properties: - <<: *lun_base_prop - _methods: - get_snap: [] - with_no_poll: _context - update: - vnx: - _methods: - get_lun: - _side_effect: [*lun_retype_need_migration] - -test_retype_turn_on_compression_change_tier: - lun: &lun_retype_turn_on_compression_change_tier - _properties: - <<: *lun_base_prop - provision: - _enum: - VNXProvisionEnum: 'thin' - tier: - _enum: - VNXTieringEnum: 'auto' - _methods: - enable_compression: - get_snap: [] - with_no_poll: _context - update: - vnx: - _methods: - get_lun: *lun_retype_turn_on_compression_change_tier - -test_retype_lun_has_snap: - lun: &lun_retype_lun_has_snap - _properties: - <<: *lun_base_prop - provision: - _enum: - VNXProvisionEnum: 'thick' - tier: - _enum: - VNXTieringEnum: 'auto' - _methods: - get_snap: ['fake_snap'] - with_no_poll: _context - update: - vnx: - _methods: - get_lun: *lun_retype_lun_has_snap - -test_retype_change_tier: - lun: &lun_retype_change_tier - _properties: - <<: *lun_base_prop - provision: - _enum: - VNXProvisionEnum: 'thick' - tier: - _enum: - VNXTieringEnum: 'nomovement' - _methods: - with_no_poll: _context - update: - vnx: - _methods: - get_lun: *lun_retype_change_tier - -test_create_consistencygroup: *test_create_cg - -test_delete_consistencygroup: - vnx: - _methods: - get_cg: *cg_for_deletion - -test_delete_consistencygroup_with_volume: - vnx: - _methods: - get_cg: *cg_for_deletion - get_lun: *lun_test_delete_lun - -test_delete_consistencygroup_error: - cg: &cg_delete_error - _methods: - delete: - _raise: - VNXConsistencyGroupError: Unable to delete cg - vnx: - _methods: - get_cg: *cg_delete_error - -test_delete_consistencygroup_volume_error: - vnx: - _methods: - get_cg: *cg_for_deletion - get_lun: *lun_test_delete_lun_exception - -test_extend_volume: *test_expand_lun - -test_create_snapshot_adapter: *test_create_snapshot - -test_delete_snapshot_adapter: *test_delete_snapshot - -test_create_cgsnapshot: *test_create_cg_snapshot - -test_do_create_cgsnap: *test_create_cg_snapshot - -test_do_delete_cgsnap: - cg_snap: &cg_snap_delete - _methods: - delete: - vnx: - _methods: - get_snap: *cg_snap_delete - -test_do_create_cg_from_cgsnap: - snap: &copied_cg_snap - _methods: - copy: - modify: - smp: &smp_from_src_lun - _properties: - lun_id: 12 - _methods: - attach_snap: - - lun: &src_lun_in_cg - _methods: - create_mount_point: *smp_from_src_lun - vnx: - _properties: - _methods: - get_snap: - _side_effect: [*copied_cg_snap, *copied_cg_snap, - *snapshot_test_delete_snapshot] - get_lun: - _side_effect: [*src_lun_in_cg, *smp_from_src_lun, - *smp_from_src_lun, *lun_migrate, *src_lun, *dst_lun] - get_pool: *pool_test_create_lun - get_migration_session: *session_verify - create_cg: *cg_for_create - -test_do_clone_cg: - vnx: - _properties: - _methods: - get_cg: *cg_test_create_cg_snapshot - get_snap: *snapshot_test_delete_snapshot - get_lun: - _side_effect: [*src_lun_in_cg, *smp_from_src_lun, - *smp_from_src_lun, *lun_migrate, *src_lun, *dst_lun] - get_pool: *pool_test_create_lun - get_migration_session: *session_verify - create_cg: *cg_for_create - -test_validate_ports_iscsi: &test_validate_ports_iscsi - iscsi_port_a-0-0: *iscsi_port_a-0-0 - vnx: - <<: *vnx_base - _methods: - get_iscsi_port: *all_iscsi_ports - -test_validate_ports_iscsi_invalid: *test_validate_ports_iscsi - -test_validate_ports_iscsi_not_exist: *test_validate_ports_iscsi - -test_validate_ports_fc: &test_validate_ports_fc - fc_port_a-1: *fc_port_a-1 - vnx: - <<: *vnx_base - _methods: - get_fc_port: *all_fc_ports - -test_validate_ports_fc_invalid: *test_validate_ports_fc - -test_validate_ports_fc_not_exist: *test_validate_ports_fc - -test_manage_existing_lun_no_exist: - lun: &lun_manage_lun_not_exist - _properties: - existed: False - vnx: - _methods: - get_lun: *lun_manage_lun_not_exist - -test_manage_existing_invalid_ref: - lun: *lun_manage_lun_not_exist - -test_manage_existing_invalid_pool: - lun: &lun_manage_in_other_pool - _properties: - existed: True - pool_name: 'unmanaged_pool' - vnx: - _methods: - get_lun: *lun_manage_in_other_pool - -test_manage_existing_get_size: - lun: &lun_manage_get_size - _properties: - existed: True - pool_name: 'unit_test_pool' - total_capacity_gb: 5 - vnx: - _methods: - get_lun: *lun_manage_get_size - -test_manage_existing_type_mismatch: - lun: &lun_manage_type_mismatch - _properties: - existed: True - pool_name: 'unit_test_pool' - provision: - _enum: - VNXProvisionEnum: 'thick' - tier: - _enum: - VNXTieringEnum: 'highestavailable' - total_capacity_gb: 5 - vnx: - _methods: - get_lun: *lun_manage_type_mismatch - -test_manage_existing: - lun: &lun_manage_existing - _properties: &lun_manage_existing_props - lun_id: 1 - existed: True - pool_name: 'unit_test_pool' - provision: - _enum: - VNXProvisionEnum: 'deduplicated' - tier: - _enum: - VNXTieringEnum: 'auto' - total_capacity_gb: 5 - primary_lun: 'N/A' - is_snap_mount_point: False - _methods: - rename: - -test_manage_existing_smp: - lun: &manage_existing_smp - _properties: - lun_id: 2 - existed: True - pool_name: 'unit_test_pool' - primary_lun: 'src_lun' - is_snap_mount_point: True - _methods: - rename: - vnx: - _methods: - get_lun: *manage_existing_smp - -test_assure_storage_group: - sg: &sg_test_assure_storage_group - _properties: - <<: *sg_base_prop - existed: True - _methods: - update: - with_poll: _context - vnx: - <<: *vnx_base - _methods: - get_sg: *sg_test_assure_storage_group - -test_assure_storage_group_create_new: - sg: &sg_test_assure_storage_group_create_new - _properties: - <<: *sg_base_prop - existed: False - _methods: - update: - with_poll: _context - vnx: - <<: *vnx_base - _methods: - get_sg: *sg_test_assure_storage_group_create_new - create_sg: *sg_test_assure_storage_group_create_new - -test_assure_host_access: - sg: &sg_test_assure_host_access - <<: *sg_base - _methods: - update: - with_poll: _context - lun: &lun_test_assure_host_access - <<: *lun_base - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_assure_host_access - -test_assure_host_access_without_auto_register_new_sg: &test_assure_host_access_without_auto_register_new_sg - sg: &sg_test_assure_host_access_without_auto_register_new_sg - <<: *sg_base - _methods: - update: - connect_host: - with_poll: _context - lun: &lun_test_assure_host_access_without_auto_register_new_sg - <<: *lun_base - vnx: - <<: *vnx_base - _methods: - get_lun: *lun_test_assure_host_access_without_auto_register_new_sg - -test_assure_host_access_without_auto_register: *test_assure_host_access_without_auto_register_new_sg - -test_auto_register_initiator: &test_auto_register_initiator - allowed_ports: *all_iscsi_ports - reg_ports: [*iscsi_port_a-0-0] - sg: &sg_auto_register_initiator - _properties: - <<: *sg_base_prop - initiator_uid_list: ['iqn-reg-1', 'iqn-reg-2'] - _methods: - get_ports: [*iscsi_port_a-0-0] - vnx: - <<: *vnx_base - -test_auto_register_initiator_no_white_list: *test_auto_register_initiator - -test_auto_register_initiator_no_port_to_reg: - allowed_ports: [*iscsi_port_a-0-0] - reg_ports: [*iscsi_port_a-0-0] - sg: - _properties: - <<: *sg_base_prop - initiator_uid_list: ['iqn-reg-1', 'iqn-reg-2'] - _methods: - get_ports: [*iscsi_port_a-0-0] - vnx: - <<: *vnx_base - -test_build_provider_location: - vnx: - _properties: - serial: 'vnx-serial' - -test_remove_host_access: - sg: &sg_remove_host_access - _properties: - existed: True - _methods: - detach_alu: - vnx: - _methods: - get_sg: *sg_remove_host_access - get_lun: *lun_base - -test_set_extra_spec_defaults: - vnx: - <<: *vnx_base_prop - _methods: - is_auto_tiering_enabled: True - -test_remove_host_access_sg_absent: - sg: &sg_remove_host_access_sg_absent - _properties: - existed: False - vnx: - _methods: - get_sg: *sg_remove_host_access_sg_absent - get_lun: *lun_base - -test_setup_lun_replication: - vnx: - _properties: - serial: 'vnx-serial' - lun: - _properties: - lun_id: 222 - wwn: fake_wwn - -test_setup_lun_replication_in_group: - group: &group_for_enable - _methods: - add_mirror: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - get_mirror_view: *mirror_base - get_mirror_group: *group_for_enable - - lun: - _properties: - lun_id: 222 - wwn: fake_wwn - -test_cleanup_replication: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - is_mirror_view_sync_enabled: True - -test_build_mirror_view: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - is_mirror_view_sync_enabled: True - -test_build_mirror_view_no_device: - vnx: - _properties: - serial: 'vnx-serial' - -test_build_mirror_view_2_device: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - is_mirror_view_sync_enabled: True - -test_build_mirror_view_no_enabler: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - is_mirror_view_sync_enabled: False - -test_build_mirror_view_failover_false: - vnx: - _properties: - serial: 'vnx-serial' - _methods: - is_mirror_view_sync_enabled: True - -test_failover_host: - lun1: - _properties: - lun_id: 11 - -test_failover_host_invalid_backend_id: - -test_failover_host_failback: - lun1: - _properties: - lun_id: 22 - -test_failover_host_groups: - lun1: - _properties: - lun_id: 22 - -test_get_pool_name: - lun: &lun_test_get_pool_name - _properties: - <<: *lun_base_prop - pool_name: pool_1 - _methods: - with_no_poll: _context - update: - - vnx: - _methods: - get_lun: *lun_test_get_pool_name - -test_normalize_config_naviseccli_path: - -test_normalize_config_queue_path: - -test_normalize_config_naviseccli_path_none: - -test_normalize_config_pool_names: - -test_normalize_config_pool_names_none: - -test_normalize_config_pool_names_empty_list: - -test_normalize_config_io_port_list: - -test_normalize_config_io_port_list_none: - -test_normalize_config_io_port_list_empty_list: - - -########################################################### -# TestISCSIAdapter -########################################################### -test_parse_ports_iscsi: &test_parse_ports_iscsi - connection_port: &port_test_parse_ports_iscsi - - _properties: - existed: False - vnx: - _methods: - get_sg: *sg_remove_host_access_sg_absent - get_lun: *lun_base - -test_remove_host_access_volume_not_in_sg: - sg: &remove_host_access_volume_not_in_sg - _properties: *sg_base_prop - _methods: - detach_alu: - _raises: VNXDetachAluNotFoundError - vnx: - _methods: - get_sg: *remove_host_access_volume_not_in_sg - get_lun: *lun_base - -test_terminate_connection_cleanup_remove_sg: - sg: - _properties: *sg_base_prop - _methods: - delete: - disconnect_host: - get_alu_hlu_map: {} - -test_terminate_connection_cleanup_sg_absent: - sg: - _properties: - existed: False - -test_terminate_connection_cleanup_deregister: - sg: - _properties: *sg_base_prop - _methods: - delete: - disconnect_host: - get_alu_hlu_map: {} - vnx: - _methods: - remove_hba: - -test_terminate_connection_cleanup_sg_is_not_empty: - sg: - _properties: *sg_base_prop - _methods: - get_alu_hlu_map: {'1': '1'} - -test_update_consistencygroup: - -test_do_update_cg: - -test_update_migrated_volume: - -test_update_migrated_volume_smp: - -test_normalize_config_iscsi_initiators: - -test_normalize_config_iscsi_initiators_none: - -test_normalize_config_iscsi_initiators_empty_str: - -test_normalize_config_iscsi_initiators_not_dict: - -test_create_group_snap: - -test_create_cgsnapshot: - -test_create_cloned_cg: - -test_create_cloned_group: - -test_create_cg_from_cgsnapshot: - -test_create_group_from_group_snapshot: - -test_create_cgsnapshot: - -test_create_group_snapshot: - -test_delete_group_snapshot: - -test_delete_cgsnapshot: - - -########################################################### -# TestISCSIAdapter -########################################################### -test_update_volume_stats_iscsi: - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_iscsi_port: *all_iscsi_ports - -test_build_terminate_connection_return_data_iscsi: - - -########################################################### -# TestFCAdapter -########################################################### -test_build_terminate_connection_return_data_without_autozone: - -test_build_terminate_connection_return_data_sg_absent: - sg: - _properties: - <<: *sg_base_prop - existed: False - -test_build_terminate_connection_return_data_auto_zone: - sg: - _properties: - <<: *sg_base_prop - name: 'fake_host' - fc_ports: - - *fc_port_a-1 - _methods: - get_alu_hlu_map: {} - vnx: - _methods: - get_fc_port: *all_fc_ports - -test_mock_vnx_objects_foo: *test_create_lun - -test_get_tgt_list_and_initiator_tgt_map_allow_port_only: - sg: - _properties: - <<: *sg_base_prop - fc_ports: - - *fc_port_a-1 - - <<: *fc_port_base - _properties: - <<: *fc_port_base_prop - sp: *spb - port_id: 1 - wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1' - - *fc_port_b-2 - adapter: - _properties: - allowed_ports: - - <<: *fc_port_base - _properties: - <<: *fc_port_base_prop - sp: *spb - port_id: 1 - wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1' - - *fc_port_b-2 - - vnx: - _methods: - get_fc_port: *all_fc_ports - - -########################################################## -# TestReplicationAdapter -########################################################## - -test_enable_replication: - vnx: - _methods: - create_mirror_group: *mirror_group_base - get_mirror_view: *mirror_base - get_mirror_group: *group_for_enable - -test_disable_replication: - group: &group_for_disable - _methods: - remove_mirror: - delete: - - vnx: - _methods: - get_mirror_view: *mirror_base - get_mirror_group: *group_for_disable - -test_failover_replication: - - lun1: *lun_base - -########################################################## -# TestTaskflow -########################################################## -test_copy_snapshot_task: - vnx: - _methods: - get_snap: *snap_copy - -test_copy_snapshot_task_revert: - snap: &snap_copy_error - _methods: - copy: - _raise: - VNXSnapError: Unable to copy snap - delete: - vnx: - _methods: - get_snap: *snap_copy_error - -test_create_smp_task: - smp: &smp - _properties: - lun_id: 15 - lun: &lun_create_smp - _methods: - create_mount_point: *smp - vnx: - _methods: - get_lun: - _side_effect: [*lun_create_smp, *smp] - -test_create_smp_task_revert: - lun: &lun_create_smp_error - _methods: - create_mount_point: - _raise: - VNXCreateLunError: 'Unable to create mount point' - delete: - _properties: - is_snap_mount_point: False - vnx: - _methods: - get_lun: *lun_create_smp_error - -test_attach_snap_task: - vnx: - _methods: - get_lun: *lun_attach_snap - -test_attach_snap_task_revert: - lun: &lun_attach_snap_error - _methods: - attach_snap: - _raise: - VNXAttachSnapError: 'Unable to attach snapshot' - detach_snap: - vnx: - _methods: - get_lun: *lun_attach_snap_error - -test_create_snapshot_task: - lun: &lun_snap - _methods: - create_snap: - vnx: - _methods: - get_lun: *lun_snap - -test_create_snapshot_task_revert: - snap: &snap_delete - _methods: - delete: - lun: &lun_snap_error - _methods: - create_snap: - _raise: - VNXCreateSnapError: 'Unable to create snap' - vnx: - _methods: - get_lun: *lun_snap_error - get_snap: *snap_delete - -test_allow_read_write_task: - vnx: - _methods: - get_snap: *snap_modify - -test_allow_read_write_task_revert: - snap: &snap_modify_error - _methods: - modify: - _raise: - VNXSnapError: Unable to modify snap - vnx: - _methods: - get_snap: *snap_modify_error - -test_wait_migrations_task: - vnx: - -test_create_consistency_group_task: - vnx: - -test_create_consistency_group_task_revert: - vnx: - -test_create_cg_snapshot_task: *test_create_cg_snapshot - - -test_create_cg_snapshot_task_revert: - cg: &create_cg_snapshot_error - _methods: - create_snap: - _raise: - VNXCreateSnapError: 'Create failed' - snap: &snap_create_cg_revert - _methods: - delete: - vnx: - _methods: - get_cg: *create_cg_snapshot_error - get_snap: *snap_create_cg_revert - -test_extend_smp_task: - thin_base_lun: &test_extend_smp_task_thin_base_lun - _properties: - is_thin_lun: true - lun: &lun_test_extend_smp_task - _properties: - <<: *lun_base_prop - name: lun_test_extend_smp_task - is_thin_lun: true - total_capacity_gb: 10 - primary_lun: *test_extend_smp_task_thin_base_lun - new_lun: &new_lun_test_extend_smp_task - _properties: - <<: *lun_base_prop - name: new_lun_test_extend_smp_task - is_thin_lun: true - total_capacity_gb: 100 - _methods: - expand: - with_no_poll: _context - update: - vnx: - _methods: - get_lun: - _side_effect: [*lun_test_extend_smp_task, *new_lun_test_extend_smp_task] - -test_extend_smp_task_skip_small_size: - lun: &lun_test_extend_smp_task_skip_small_size - _properties: - <<: *lun_base_prop - name: lun_test_extend_smp_task_skip_small_size - is_thin_lun: true - total_capacity_gb: 1 - primary_lun: *test_extend_smp_task_thin_base_lun - vnx: - _methods: - get_lun: *lun_test_extend_smp_task_skip_small_size - -test_extend_smp_task_skip_thick: &test_extend_smp_task_skip_thick - thick_base_lun: &test_extend_smp_task_thick_base_lun - _properties: - is_thin_lun: false - lun: &lun_test_extend_smp_task_skip_thick - _properties: - <<: *lun_base_prop - name: lun_test_extend_smp_task_skip_thick - is_thin_lun: false - total_capacity_gb: 10 - primary_lun: *test_extend_smp_task_thick_base_lun - vnx: - _methods: - get_lun: *lun_test_extend_smp_task_skip_thick - -########################################################### -# TestExtraSpecs -########################################################### -test_generate_extra_specs_from_lun: - lun: - _properties: - provision: - _enum: - VNXProvisionEnum: 'compressed' - tier: - _enum: - VNXTieringEnum: 'highestavailable' - - deduped_lun: - _properties: - provision: - _enum: - VNXProvisionEnum: 'deduplicated' - tier: - _enum: - VNXTieringEnum: 'auto' - -test_extra_specs_match_with_lun: - lun: - _properties: - provision: - _enum: - VNXProvisionEnum: 'thin' - tier: - _enum: - VNXTieringEnum: 'nomovement' - - deduped_lun: - _properties: - provision: - _enum: - VNXProvisionEnum: 'deduplicated' - tier: - _enum: - VNXTieringEnum: 'nomovement' - -test_extra_specs_not_match_with_lun: - lun: - _properties: - provision: - _enum: - VNXProvisionEnum: 'thick' - tier: - _enum: - VNXTieringEnum: 'lowestavailable' diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/res_mock.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/res_mock.py deleted file mode 100644 index 83596b12b..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/res_mock.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from cinder import context -from cinder.tests.unit.consistencygroup import fake_cgsnapshot -from cinder.tests.unit.consistencygroup import fake_consistencygroup -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception as \ - lib_ex -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops as \ - storops -from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils -from cinder.volume.drivers.dell_emc.vnx import adapter -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import driver -from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils - -SYMBOL_TYPE = '_type' -SYMBOL_PROPERTIES = '_properties' -SYMBOL_METHODS = '_methods' -SYMBOL_SIDE_EFFECT = '_side_effect' -SYMBOL_RAISE = '_raise' -SYMBOL_CONTEXT = '_context' -UUID = '_uuid' -SYMBOL_ENUM = '_enum' - - -def _is_driver_object(obj_body): - return isinstance(obj_body, dict) and SYMBOL_PROPERTIES in obj_body - - -class DriverResourceMock(dict): - fake_func_mapping = {} - - def __init__(self, yaml_file): - yaml_dict = utils.load_yaml(yaml_file) - if not isinstance(yaml_dict, dict): - return - for case_name, case_res in yaml_dict.items(): - if not isinstance(case_res, dict): - continue - self[case_name] = {} - for obj_name, obj_body in case_res.items(): - self[case_name][obj_name] = self._parse_driver_object(obj_body) - - def _parse_driver_object(self, obj_body): - if isinstance(obj_body, dict): - obj_body = {k: self._parse_driver_object(v) - for k, v in obj_body.items()} - if _is_driver_object(obj_body): - return self._create_object(obj_body) - else: - return obj_body - elif isinstance(obj_body, list): - return map(self._parse_driver_object, obj_body) - else: - return obj_body - - def _create_object(self, obj_body): - props = obj_body[SYMBOL_PROPERTIES] - for prop_name, prop_value in props.items(): - if isinstance(prop_value, dict) and prop_value: - # get the first key as the convert function - func_name = list(prop_value.keys())[0] - if func_name.startswith('_'): - func = getattr(self, func_name) - props[prop_name] = func(prop_value[func_name]) - - if (SYMBOL_TYPE in obj_body and - obj_body[SYMBOL_TYPE] in self.fake_func_mapping): - return self.fake_func_mapping[obj_body[SYMBOL_TYPE]](**props) - else: - return props - - @staticmethod - def _uuid(uuid_key): - uuid_key = uuid_key.upper() - return getattr(fake_constants, uuid_key) - - -def _fake_volume_wrapper(*args, **kwargs): - expected_attrs_key = {'volume_attachment': 'volume_attachment', - 'volume_metadata': 'metadata'} - if 'group' in kwargs: - expected_attrs_key['group'] = kwargs['group'] - - return fake_volume.fake_volume_obj( - context.get_admin_context(), - expected_attrs=[ - v for (k, v) in expected_attrs_key.items() if k in kwargs], - **kwargs) - - -def _fake_cg_wrapper(*args, **kwargs): - return fake_consistencygroup.fake_consistencyobject_obj( - 'fake_context', **kwargs) - - -def _fake_snapshot_wrapper(*args, **kwargs): - return fake_snapshot.fake_snapshot_obj('fake_context', - expected_attrs=( - ['volume'] if 'volume' in kwargs - else None), - **kwargs) - - -def _fake_cg_snapshot_wrapper(*args, **kwargs): - return fake_cgsnapshot.fake_cgsnapshot_obj(None, **kwargs) - - -def _fake_group_wrapper(*args, **kwargs): - return fake_group.fake_group_obj(None, **kwargs) - - -class EnumBuilder(object): - def __init__(self, enum_dict): - enum_dict = enum_dict[SYMBOL_ENUM] - for k, v in enum_dict.items(): - self.klazz = k - self.value = v - - def __call__(self, *args, **kwargs): - return getattr(storops, self.klazz).parse(self.value) - - -class CinderResourceMock(DriverResourceMock): - # fake_func in the mapping should be like func(*args, **kwargs) - fake_func_mapping = {'volume': _fake_volume_wrapper, - 'cg': _fake_cg_wrapper, - 'snapshot': _fake_snapshot_wrapper, - 'cg_snapshot': _fake_cg_snapshot_wrapper, - 'group': _fake_group_wrapper} - - def __init__(self, yaml_file): - super(CinderResourceMock, self).__init__(yaml_file) - - @staticmethod - def _build_provider_location(props): - return vnx_utils.build_provider_location( - props.get('system'), props.get('type'), - six.text_type(props.get('id')), - six.text_type(props.get('base_lun_name')), - props.get('version')) - - -class ContextMock(object): - """Mocks the return value of a context function.""" - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_valu, exc_tb): - pass - - -class MockBase(object): - """Base object of all the Mocks. - - This mock convert the dict to object when the '_type' is - included in the dict - """ - - def _is_mock_object(self, yaml_info): - return (isinstance(yaml_info, dict) and - (SYMBOL_PROPERTIES in yaml_info or - SYMBOL_METHODS in yaml_info)) - - def _is_object_with_type(self, yaml_dict): - return isinstance(yaml_dict, dict) and SYMBOL_TYPE in yaml_dict - - def _is_object_with_enum(self, yaml_dict): - return isinstance(yaml_dict, dict) and SYMBOL_ENUM in yaml_dict - - def _build_mock_object(self, yaml_dict): - if self._is_object_with_type(yaml_dict): - return FakePort(yaml_dict) - elif self._is_object_with_enum(yaml_dict): - return EnumBuilder(yaml_dict)() - elif self._is_mock_object(yaml_dict): - return StorageObjectMock(yaml_dict) - elif isinstance(yaml_dict, dict): - return {k: self._build_mock_object(v) - for k, v in yaml_dict.items()} - elif isinstance(yaml_dict, list): - return [self._build_mock_object(each) for each in yaml_dict] - else: - return yaml_dict - - -class StorageObjectMock(object): - PROPS = 'props' - - def __init__(self, yaml_dict): - self.__dict__[StorageObjectMock.PROPS] = {} - props = yaml_dict.get(SYMBOL_PROPERTIES, None) - if props: - for k, v in props.items(): - setattr(self, k, StoragePropertyMock(k, v)()) - - methods = yaml_dict.get(SYMBOL_METHODS, None) - if methods: - for k, v in methods.items(): - setattr(self, k, StorageMethodMock(k, v)) - - def __setattr__(self, key, value): - self.__dict__[StorageObjectMock.PROPS][key] = value - - def __getattr__(self, item): - try: - super(StorageObjectMock, self).__getattr__(item) - except AttributeError: - return self.__dict__[StorageObjectMock.PROPS][item] - except KeyError: - raise KeyError('%(item)s not exist in mock object.' - ) % {'item': item} - - -class FakePort(StorageObjectMock): - - def __eq__(self, other): - o_sp = other.sp - o_port_id = other.port_id - o_vport_id = other.vport_id - - ret = True - ret &= self.sp == o_sp - ret &= self.port_id == o_port_id - ret &= self.vport_id == o_vport_id - - return ret - - def __hash__(self): - return hash((self.sp, self.port_id, self.vport_id)) - - -class StoragePropertyMock(mock.PropertyMock, MockBase): - def __init__(self, name, property_body): - return_value = property_body - side_effect = None - - # only support return_value and side_effect for property - if (isinstance(property_body, dict) and - SYMBOL_SIDE_EFFECT in property_body): - side_effect = self._build_mock_object( - property_body[SYMBOL_SIDE_EFFECT]) - return_value = None - - if side_effect is not None: - super(StoragePropertyMock, self).__init__( - name=name, - side_effect=side_effect) - else: - return_value = self._build_mock_object(return_value) - - super(StoragePropertyMock, self).__init__( - name=name, - return_value=return_value) - - -class StorageMethodMock(mock.Mock, MockBase): - def __init__(self, name, method_body): - return_value = method_body - exception = None - side_effect = None - - # support return_value, side_effect and exception for method - if isinstance(method_body, dict): - if (SYMBOL_SIDE_EFFECT in method_body or - SYMBOL_RAISE in method_body): - exception = method_body.get(SYMBOL_RAISE, None) - side_effect = method_body.get(SYMBOL_SIDE_EFFECT, None) - return_value = None - - if exception is not None: - ex = None - if isinstance(exception, dict) and exception: - ex_name = list(exception.keys())[0] - ex_tmp = [getattr(ex_module, ex_name, None) - for ex_module in [lib_ex, common]] - try: - ex = [each for each in ex_tmp if each is not None][0] - super(StorageMethodMock, self).__init__( - name=name, - side_effect=ex(exception[ex_name])) - except IndexError: - raise KeyError('Exception %(ex_name)s not found.' - % {'ex_name': ex_name}) - else: - raise KeyError('Invalid Exception body, should be a dict.') - elif side_effect is not None: - super(StorageMethodMock, self).__init__( - name=name, - side_effect=self._build_mock_object(side_effect)) - elif return_value is not None: - super(StorageMethodMock, self).__init__( - name=name, - return_value=(ContextMock() if return_value == SYMBOL_CONTEXT - else self._build_mock_object(return_value))) - else: - super(StorageMethodMock, self).__init__( - name=name, return_value=None) - - -class StorageResourceMock(dict, MockBase): - def __init__(self, yaml_file): - yaml_dict = utils.load_yaml(yaml_file) - if not isinstance(yaml_dict, dict): - return - for section, sec_body in yaml_dict.items(): - if isinstance(sec_body, dict): - self[section] = {obj_name: self._build_mock_object(obj_body) - for obj_name, obj_body - in sec_body.items()} - else: - self[section] = {} - - -cinder_res = CinderResourceMock('mocked_cinder.yaml') -DRIVER_RES_MAPPING = { - 'TestResMock': cinder_res, - 'TestCommonAdapter': cinder_res, - 'TestReplicationAdapter': cinder_res, - 'TestISCSIAdapter': cinder_res, - 'TestFCAdapter': cinder_res, - 'TestUtils': cinder_res, - 'TestClient': cinder_res -} - - -def mock_driver_input(func): - @six.wraps(func) - def decorated(cls, *args, **kwargs): - return func(cls, - DRIVER_RES_MAPPING[cls.__class__.__name__][func.__name__], - *args, **kwargs) - return decorated - - -vnx_res = StorageResourceMock('mocked_vnx.yaml') -STORAGE_RES_MAPPING = { - 'TestResMock': StorageResourceMock('test_res_mock.yaml'), - 'TestCondition': vnx_res, - 'TestClient': vnx_res, - 'TestCommonAdapter': vnx_res, - 'TestReplicationAdapter': vnx_res, - 'TestISCSIAdapter': vnx_res, - 'TestFCAdapter': vnx_res, - 'TestTaskflow': vnx_res, - 'TestExtraSpecs': vnx_res, -} -DEFAULT_STORAGE_RES = 'vnx' - - -def _build_client(): - return client.Client(ip='192.168.1.2', - username='sysadmin', - password='sysadmin', - scope='global', - naviseccli=None, - sec_file=None, - queue_path='vnx-cinder') - - -def patch_client(func): - @six.wraps(func) - def decorated(cls, *args, **kwargs): - storage_res = ( - STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) - with utils.patch_vnxsystem as patched_vnx: - if DEFAULT_STORAGE_RES in storage_res: - patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES] - client = _build_client() - return func(cls, client, storage_res, *args, **kwargs) - return decorated - - -PROTOCOL_COMMON = 'Common' -PROTOCOL_MAPPING = { - PROTOCOL_COMMON: adapter.CommonAdapter, - common.PROTOCOL_ISCSI: adapter.ISCSIAdapter, - common.PROTOCOL_FC: adapter.FCAdapter -} - - -def patch_adapter_init(protocol): - def inner_patch_adapter(func): - @six.wraps(func) - def decorated(cls, *args, **kwargs): - storage_res = ( - STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) - with utils.patch_vnxsystem as patched_vnx: - if DEFAULT_STORAGE_RES in storage_res: - patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES] - adapter = PROTOCOL_MAPPING[protocol](cls.configuration) - return func(cls, adapter, storage_res, *args, **kwargs) - return decorated - return inner_patch_adapter - - -def _patch_adapter_prop(adapter, client): - try: - adapter.serial_number = client.get_serial() - except KeyError: - adapter.serial_number = 'faked_serial_number' - adapter.VERSION = driver.VNXDriver.VERSION - - -def patch_adapter(protocol): - def inner_patch_adapter(func): - @six.wraps(func) - def decorated(cls, *args, **kwargs): - storage_res = ( - STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) - with utils.patch_vnxsystem: - client = _build_client() - adapter = PROTOCOL_MAPPING[protocol](cls.configuration, None) - if DEFAULT_STORAGE_RES in storage_res: - client.vnx = storage_res[DEFAULT_STORAGE_RES] - adapter.client = client - _patch_adapter_prop(adapter, client) - return func(cls, adapter, storage_res, *args, **kwargs) - return decorated - return inner_patch_adapter - - -patch_common_adapter = patch_adapter(PROTOCOL_COMMON) -patch_iscsi_adapter = patch_adapter(common.PROTOCOL_ISCSI) -patch_fc_adapter = patch_adapter(common.PROTOCOL_FC) - - -def mock_storage_resources(func): - @six.wraps(func) - def decorated(cls, *args, **kwargs): - storage_res = ( - STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) - return func(cls, storage_res, *args, **kwargs) - return decorated diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_adapter.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_adapter.py deleted file mode 100644 index 2b50a7d53..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_adapter.py +++ /dev/null @@ -1,1628 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -import os -import re - -from oslo_config import cfg - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import utils as test_utils -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ - as storops_ex -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ - as storops -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.vnx import adapter -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils - - -class TestCommonAdapter(test.TestCase): - - def setUp(self): - super(TestCommonAdapter, self).setUp() - self.configuration = conf.Configuration(None) - vnx_utils.init_ops(self.configuration) - self.configuration.san_ip = '192.168.1.1' - self.configuration.storage_vnx_authentication_type = 'global' - self.configuration.config_group = 'vnx_backend' - self.ctxt = context.get_admin_context() - - def tearDown(self): - super(TestCommonAdapter, self).tearDown() - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_volume(self, vnx_common, _ignore, mocked_input): - volume = mocked_input['volume'] - with mock.patch.object(vnx_utils, 'get_backend_qos_specs', - return_value=None): - model_update = vnx_common.create_volume(volume) - self.assertEqual('False', model_update.get('metadata')['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_volume_error(self, vnx_common, _ignore, mocked_input): - def inner(): - with mock.patch.object(vnx_utils, 'get_backend_qos_specs', - return_value=None): - vnx_common.create_volume(mocked_input['volume']) - self.assertRaises(storops_ex.VNXCreateLunError, inner) - - @utils.patch_extra_specs({'provisioning:type': 'thick'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_thick_volume(self, vnx_common, _ignore, mocked_input): - volume = mocked_input['volume'] - expected_pool = volume.host.split('#')[1] - with mock.patch.object(vnx_utils, 'get_backend_qos_specs', - return_value=None): - vnx_common.create_volume(volume) - vnx_common.client.vnx.get_pool.assert_called_with( - name=expected_pool) - - @utils.patch_extra_specs({'provisioning:type': 'thin'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_volume_with_qos(self, vnx_common, _ignore, mocked_input): - volume = mocked_input['volume'] - with mock.patch.object(vnx_utils, 'get_backend_qos_specs', - return_value={'id': 'test', - 'maxBWS': 100, - 'maxIOPS': 123}): - model_update = vnx_common.create_volume(volume) - self.assertEqual('False', model_update.get('metadata')['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_migrate_volume(self, vnx_common, mocked, cinder_input): - volume = cinder_input['volume'] - host = {'capabilities': - {'location_info': 'pool_name|fake_serial', - 'storage_protocol': 'iscsi'}, - 'host': 'hostname@backend_name#pool_name'} - vnx_common.serial_number = 'fake_serial' - migrated, update = vnx_common.migrate_volume(None, volume, host) - self.assertTrue(migrated) - self.assertEqual('False', update['metadata']['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_migrate_volume_host_assisted(self, vnx_common, mocked, - cinder_input): - volume1 = cinder_input['volume'] - host = { - 'capabilities': { - 'location_info': 'pool_name|fake_serial', - 'storage_protocol': 'iscsi'}, - 'host': 'hostname@backend_name#pool_name'} - vnx_common.serial_number = 'new_serial' - migrated, update = vnx_common.migrate_volume(None, volume1, host) - self.assertFalse(migrated) - self.assertIsNone(update) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_cloned_volume( - self, vnx_common, mocked, cinder_input): - volume = cinder_input['volume'] - src_vref = cinder_input['src_vref'] - model_update = vnx_common.create_cloned_volume(volume, src_vref) - self.assertEqual('False', model_update['metadata']['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_cloned_volume_snapcopy( - self, vnx_common, mocked, cinder_input): - volume = cinder_input['volume'] - volume.metadata = {'snapcopy': 'True'} - src_vref = cinder_input['src_vref'] - model_update = vnx_common.create_cloned_volume(volume, src_vref) - self.assertEqual('True', model_update['metadata']['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_volume_from_snapshot( - self, vnx_common, mocked, cinder_input): - volume = cinder_input['volume'] - volume['metadata'] = {'async_migrate': 'False'} - snapshot = cinder_input['snapshot'] - snapshot.volume = volume - update = vnx_common.create_volume_from_snapshot(volume, snapshot) - self.assertEqual('False', update['metadata']['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_volume_from_snapshot_snapcopy( - self, vnx_common, mocked, cinder_input): - volume = cinder_input['volume'] - volume.metadata = {'snapcopy': 'True'} - snapshot = cinder_input['snapshot'] - snapshot.volume = volume - update = vnx_common.create_volume_from_snapshot(volume, snapshot) - self.assertEqual('True', update['metadata']['snapcopy']) - - @res_mock.patch_common_adapter - def test_create_cg_from_cgsnapshot(self, common, _): - common.do_create_cg_from_cgsnap = mock.Mock( - return_value='fake_return') - new_cg = test_utils.create_consistencygroup( - self.ctxt, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=fake_constants.VOLUME_TYPE_ID) - cg_snapshot = test_utils.create_cgsnapshot( - self.ctxt, - fake_constants.CONSISTENCY_GROUP2_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - vol_new = test_utils.create_volume(self.ctxt) - ret = common.create_cg_from_cgsnapshot( - None, new_cg, [vol_new], cg_snapshot, snaps) - self.assertEqual('fake_return', ret) - common.do_create_cg_from_cgsnap.assert_called_once_with( - new_cg.id, new_cg.host, [vol_new], cg_snapshot.id, snaps) - - @res_mock.patch_common_adapter - def test_create_group_from_group_snapshot(self, common, _): - common.do_create_cg_from_cgsnap = mock.Mock( - return_value='fake_return') - group = test_utils.create_group( - self.ctxt, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=fake_constants.VOLUME_TYPE_ID) - group_snapshot = test_utils.create_group_snapshot( - self.ctxt, - fake_constants.CGSNAPSHOT_ID, - group_type_id=fake_constants.VOLUME_TYPE_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - vol_new = test_utils.create_volume(self.ctxt) - ret = common.create_group_from_group_snapshot( - None, group, [vol_new], group_snapshot, snaps) - self.assertEqual('fake_return', ret) - common.do_create_cg_from_cgsnap.assert_called_once_with( - group.id, group.host, [vol_new], group_snapshot.id, snaps) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_do_create_cg_from_cgsnap( - self, vnx_common, mocked, cinder_input): - cg_id = fake_constants.CONSISTENCY_GROUP_ID - cg_host = 'host@backend#unit_test_pool' - volumes = [cinder_input['vol1']] - cgsnap_id = fake_constants.CGSNAPSHOT_ID - snaps = [cinder_input['snap1']] - - model_update, volume_updates = ( - vnx_common.do_create_cg_from_cgsnap( - cg_id, cg_host, volumes, cgsnap_id, snaps)) - self.assertIsNone(model_update) - self.assertIsNotNone( - re.findall('id^12', - volume_updates[0]['provider_location'])) - - @res_mock.patch_common_adapter - def test_create_cloned_cg(self, common, _): - common.do_clone_cg = mock.Mock( - return_value='fake_return') - group = test_utils.create_consistencygroup( - self.ctxt, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=fake_constants.VOLUME_TYPE_ID) - src_group = test_utils.create_consistencygroup( - self.ctxt, - id=fake_constants.CONSISTENCY_GROUP2_ID, - host='host@backend#unit_test_pool2', - group_type_id=fake_constants.VOLUME_TYPE_ID) - vol = test_utils.create_volume(self.ctxt) - src_vol = test_utils.create_volume(self.ctxt) - ret = common.create_cloned_group( - None, group, [vol], src_group, [src_vol]) - self.assertEqual('fake_return', ret) - common.do_clone_cg.assert_called_once_with( - group.id, group.host, [vol], src_group.id, [src_vol]) - - @res_mock.patch_common_adapter - def test_create_cloned_group(self, common, _): - common.do_clone_cg = mock.Mock( - return_value='fake_return') - group = test_utils.create_group( - self.ctxt, - id=fake_constants.GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=fake_constants.VOLUME_TYPE_ID) - src_group = test_utils.create_group( - self.ctxt, - id=fake_constants.GROUP2_ID, - host='host@backend#unit_test_pool2', - group_type_id=fake_constants.VOLUME_TYPE_ID) - vol = test_utils.create_volume(self.ctxt) - src_vol = test_utils.create_volume(self.ctxt) - ret = common.create_cloned_group( - None, group, [vol], src_group, [src_vol]) - self.assertEqual('fake_return', ret) - common.do_clone_cg.assert_called_once_with( - group.id, group.host, [vol], src_group.id, [src_vol]) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_do_clone_cg(self, vnx_common, _, cinder_input): - cg_id = fake_constants.CONSISTENCY_GROUP_ID - cg_host = 'host@backend#unit_test_pool' - volumes = [cinder_input['vol1']] - src_cg_id = fake_constants.CONSISTENCY_GROUP2_ID - src_volumes = [cinder_input['src_vol1']] - model_update, volume_updates = vnx_common.do_clone_cg( - cg_id, cg_host, volumes, src_cg_id, src_volumes) - self.assertIsNone(model_update) - self.assertIsNotNone( - re.findall('id^12', - volume_updates[0]['provider_location'])) - - @res_mock.patch_common_adapter - def test_parse_pools(self, vnx_common, mocked): - vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool6'] - parsed = vnx_common.parse_pools() - self.assertEqual( - len(vnx_common.config.storage_vnx_pool_names), - len(parsed)) - pools = vnx_common.client.get_pools() - self.assertEqual(pools, parsed) - - @res_mock.patch_common_adapter - def test_parse_pools_one_invalid_pool(self, vnx_common, mocked): - vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool7'] - parsed = vnx_common.parse_pools() - pools = vnx_common.client.get_pools() - self.assertIn(parsed[0], pools) - - @res_mock.patch_common_adapter - def test_parse_pools_all_invalid_pools(self, vnx_common, mocked): - vnx_common.config.storage_vnx_pool_names = ['pool7', 'pool8'] - self.assertRaises(exception.VolumeBackendAPIException, - vnx_common.parse_pools) - - @res_mock.patch_common_adapter - def test_get_enabler_stats(self, vnx_common, mocked): - stats = vnx_common.get_enabler_stats() - self.assertTrue(stats['compression_support']) - self.assertTrue(stats['fast_support']) - self.assertTrue(stats['deduplication_support']) - self.assertTrue(stats['thin_provisioning_support']) - self.assertTrue(stats['consistent_group_snapshot_enabled']) - - @res_mock.patch_common_adapter - def test_get_pool_stats(self, vnx_common, mocked): - pools = vnx_common.client.vnx.get_pool() - vnx_common.config.storage_vnx_pool_names = [ - pool.name for pool in pools] - stats = { - 'compression_support': True, - 'fast_support': True, - 'deduplication_support': True, - 'thin_provisioning_support': True, - 'consistent_group_snapshot_enabled': True, - 'consistencygroup_support': True - - } - pool_stats = vnx_common.get_pool_stats(stats) - self.assertEqual(2, len(pool_stats)) - for stat in pool_stats: - self.assertTrue(stat['fast_cache_enabled']) - self.assertTrue(stat['QoS_support']) - self.assertIn(stat['pool_name'], [pools[0].name, - pools[1].name]) - self.assertFalse(stat['replication_enabled']) - self.assertEqual([], stat['replication_targets']) - - @res_mock.patch_common_adapter - def test_get_pool_stats_offline(self, vnx_common, mocked): - vnx_common.config.storage_vnx_pool_names = [] - pool_stats = vnx_common.get_pool_stats() - for stat in pool_stats: - self.assertTrue(stat['fast_cache_enabled']) - self.assertEqual(0, stat['free_capacity_gb']) - - @res_mock.patch_common_adapter - def test_get_pool_stats_max_luns_reached(self, vnx_common, mocked): - pools = vnx_common.client.vnx.get_pool() - vnx_common.config.storage_vnx_pool_names = [ - pool.name for pool in pools] - stats = { - 'compression_support': True, - 'fast_support': True, - 'deduplication_support': True, - 'thin_provisioning_support': True, - 'consistent_group_snapshot_enabled': True, - 'consistencygroup_support': True - - } - pool_stats = vnx_common.get_pool_stats(stats) - for stat in pool_stats: - self.assertTrue(stat['fast_cache_enabled']) - self.assertEqual(0, stat['free_capacity_gb']) - - @res_mock.patch_common_adapter - def test_get_pool_stats_with_reserved(self, vnx_common, mocked): - pools = vnx_common.client.vnx.get_pool() - vnx_common.config.storage_vnx_pool_names = [ - pool.name for pool in pools] - stats = { - 'compression_support': True, - 'fast_support': True, - 'deduplication_support': True, - 'thin_provisioning_support': True, - 'consistent_group_snapshot_enabled': True, - 'consistencygroup_support': True - - } - vnx_common.reserved_percentage = 15 - pool_stats = vnx_common.get_pool_stats(stats) - for stat in pool_stats: - self.assertTrue(stat['fast_cache_enabled']) - self.assertIsNot(0, stat['free_capacity_gb']) - self.assertEqual(15, stat['reserved_percentage']) - - @res_mock.patch_common_adapter - def test_update_volume_stats(self, vnx_common, mocked): - with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'): - stats = vnx_common.update_volume_stats() - pools_stats = stats['pools'] - for stat in pools_stats: - self.assertFalse(stat['replication_enabled']) - self.assertEqual([], stat['replication_targets']) - - @res_mock.patch_common_adapter - def test_append_volume_stats(self, vnx_common, mocked): - device = utils.get_replication_device() - vnx_common.config.replication_device = [device] - vnx_common.mirror_view = utils.build_fake_mirror_view() - stats = {} - vnx_common.append_replication_stats(stats) - self.assertTrue(stats['replication_enabled']) - self.assertEqual(1, stats['replication_count']) - self.assertEqual(['sync'], stats['replication_type']) - self.assertEqual([device['backend_id']], - stats['replication_targets']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_volume_not_force(self, vnx_common, mocked, mocked_input): - vnx_common.force_delete_lun_in_sg = False - volume = mocked_input['volume'] - volume['metadata'] = {'async_migrate': 'False'} - vnx_common.delete_volume(volume) - lun = vnx_common.client.vnx.get_lun() - lun.delete.assert_called_with(force_detach=True, detach_from_sg=False) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_volume_force(self, vnx_common, mocked, mocked_input): - vnx_common.force_delete_lun_in_sg = True - volume = mocked_input['volume'] - volume['metadata'] = {'async_migrate': 'False'} - vnx_common.delete_volume(volume) - lun = vnx_common.client.vnx.get_lun() - lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_async_volume(self, vnx_common, mocked, mocked_input): - volume = mocked_input['volume'] - volume.metadata = {'async_migrate': 'True'} - vnx_common.force_delete_lun_in_sg = True - vnx_common.delete_volume(volume) - lun = vnx_common.client.vnx.get_lun() - lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_async_volume_migrating(self, vnx_common, mocked, - mocked_input): - - volume = mocked_input['volume'] - volume.metadata = {'async_migrate': 'True'} - vnx_common.force_delete_lun_in_sg = True - vnx_common.client.cleanup_async_lun = mock.Mock() - vnx_common.delete_volume(volume) - lun = vnx_common.client.vnx.get_lun() - lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) - - @utils.patch_extra_specs_validate(side_effect=exception.InvalidVolumeType( - reason='fake_reason')) - @res_mock.patch_common_adapter - def test_retype_type_invalid(self, vnx_common, mocked): - self.assertRaises(exception.InvalidVolumeType, - vnx_common.retype, - None, None, - {'extra_specs': 'fake_spec'}, - None, None) - - @mock.patch.object(client.Client, 'get_vnx_enabler_status') - @utils.patch_extra_specs_validate(return_value=True) - @utils.patch_extra_specs({'storagetype:tiering': 'auto', - 'provisioning:type': 'thin'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_retype_need_migration( - self, vnx_common, mocked, driver_in, - enabler_status): - new_type = { - 'extra_specs': {'provisioning:type': 'deduplicated', - 'storagetype:tiering': 'starthighthenauto'}} - volume = driver_in['volume'] - host = driver_in['host'] - fake_migrate_return = (True, ['fake_model_update']) - vnx_common._migrate_volume = mock.Mock( - return_value=fake_migrate_return) - ret = vnx_common.retype(None, volume, new_type, None, host) - self.assertEqual(fake_migrate_return, ret) - vnx_common._migrate_volume.assert_called_once_with( - volume, host, common.ExtraSpecs(new_type['extra_specs'])) - - @mock.patch.object(client.Client, 'get_vnx_enabler_status') - @utils.patch_extra_specs_validate(return_value=True) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_retype_turn_on_compression_change_tier( - self, vnx_common, mocked, driver_in, - enabler_status): - new_type = { - 'extra_specs': {'provisioning:type': 'compressed', - 'storagetype:tiering': 'starthighthenauto'}} - volume = driver_in['volume'] - host = driver_in['host'] - lun = mocked['lun'] - vnx_common.client.get_lun = mock.Mock(return_value=lun) - ret = vnx_common.retype(None, volume, new_type, None, host) - self.assertTrue(ret) - lun.enable_compression.assert_called_once_with(ignore_thresholds=True) - self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, lun.tier) - - @mock.patch.object(client.Client, 'get_vnx_enabler_status') - @utils.patch_extra_specs_validate(return_value=True) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_retype_lun_has_snap( - self, vnx_common, mocked, driver_in, - enabler_status): - new_type = { - 'extra_specs': {'provisioning:type': 'thin', - 'storagetype:tiering': 'auto'}} - volume = driver_in['volume'] - host = driver_in['host'] - new_type = { - 'extra_specs': {'provisioning:type': 'thin', - 'storagetype:tiering': 'auto'}} - ret = vnx_common.retype(None, volume, new_type, None, host) - self.assertFalse(ret) - new_type = { - 'extra_specs': {'provisioning:type': 'compressed', - 'storagetype:tiering': 'auto'}} - ret = vnx_common.retype(None, volume, new_type, None, host) - self.assertFalse(ret) - - @mock.patch.object(client.Client, 'get_vnx_enabler_status') - @utils.patch_extra_specs_validate(return_value=True) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_retype_change_tier( - self, vnx_common, mocked, driver_in, - enabler_status): - new_type = { - 'extra_specs': {'storagetype:tiering': 'auto'}} - volume = driver_in['volume'] - host = driver_in['host'] - lun = mocked['lun'] - vnx_common.client.get_lun = mock.Mock(return_value=lun) - ret = vnx_common.retype(None, volume, new_type, None, host) - self.assertTrue(ret) - self.assertEqual(storops.VNXTieringEnum.AUTO, lun.tier) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_consistencygroup(self, vnx_common, mocked, mocked_input): - cg = mocked_input['cg'] - model_update = vnx_common.create_consistencygroup(None, group=cg) - self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, - model_update['status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_consistencygroup(self, vnx_common, mocked, mocked_input): - cg = mocked_input['cg'] - model_update, vol_update_list = vnx_common.delete_consistencygroup( - None, group=cg, volumes=[]) - self.assertEqual(cg.status, - model_update['status']) - self.assertEqual([], vol_update_list) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_consistencygroup_with_volume( - self, vnx_common, mocked, mocked_input): - cg = mocked_input['cg'] - vol1 = mocked_input['vol1'] - vol2 = mocked_input['vol2'] - model_update, vol_update_list = vnx_common.delete_consistencygroup( - None, group=cg, volumes=[vol1, vol2]) - self.assertEqual(cg.status, - model_update['status']) - for update in vol_update_list: - self.assertEqual(fields.ConsistencyGroupStatus.DELETED, - update['status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_consistencygroup_error(self, vnx_common, - mocked, mocked_input): - cg = mocked_input['cg'] - self.assertRaises( - storops_ex.VNXConsistencyGroupError, - vnx_common.delete_consistencygroup, - context=None, group=cg, volumes=[]) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_consistencygroup_volume_error(self, vnx_common, - mocked, mocked_input): - cg = mocked_input['cg'] - vol1 = mocked_input['vol1'] - vol2 = mocked_input['vol2'] - model_update, vol_update_list = vnx_common.delete_consistencygroup( - None, group=cg, volumes=[vol1, vol2]) - self.assertEqual(cg.status, - model_update['status']) - for update in vol_update_list: - self.assertEqual(fields.ConsistencyGroupStatus.ERROR_DELETING, - update['status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_extend_volume(self, common_adapter, _ignore, mocked_input): - common_adapter.extend_volume(mocked_input['volume'], 10) - - lun = common_adapter.client.vnx.get_lun() - lun.expand.assert_called_once_with(10, ignore_thresholds=True) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_snapshot_adapter(self, common_adapter, _ignore, - mocked_input): - common_adapter.create_snapshot(mocked_input['snapshot']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_delete_snapshot_adapter(self, common_adapter, _ignore, - mocked_input): - common_adapter.delete_snapshot(mocked_input['snapshot']) - - @res_mock.patch_common_adapter - def test_create_cgsnapshot(self, common_adapter, _): - common_adapter.do_create_cgsnap = mock.Mock( - return_value='fake_return') - cg_snapshot = test_utils.create_cgsnapshot( - self.ctxt, - fake_constants.CONSISTENCY_GROUP_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - ret = common_adapter.create_cgsnapshot( - None, cg_snapshot, snaps) - self.assertEqual('fake_return', ret) - common_adapter.do_create_cgsnap.assert_called_once_with( - cg_snapshot.consistencygroup_id, - cg_snapshot.id, - snaps) - - @res_mock.patch_common_adapter - def test_create_group_snap(self, common_adapter, _): - common_adapter.do_create_cgsnap = mock.Mock( - return_value='fake_return') - group_snapshot = test_utils.create_group_snapshot( - self.ctxt, - fake_constants.GROUP_ID, - group_type_id=fake_constants.VOLUME_TYPE_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - ret = common_adapter.create_group_snapshot( - None, group_snapshot, snaps) - self.assertEqual('fake_return', ret) - common_adapter.do_create_cgsnap.assert_called_once_with( - group_snapshot.group_id, - group_snapshot.id, - snaps) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_do_create_cgsnap(self, common_adapter, _, mocked_input): - group_name = fake_constants.CONSISTENCY_GROUP_ID - snap_name = fake_constants.CGSNAPSHOT_ID - snap1 = mocked_input['snap1'] - snap2 = mocked_input['snap2'] - model_update, snapshots_model_update = ( - common_adapter.do_create_cgsnap(group_name, snap_name, - [snap1, snap2])) - self.assertEqual('available', model_update['status']) - for update in snapshots_model_update: - self.assertEqual(fields.SnapshotStatus.AVAILABLE, update['status']) - - @res_mock.patch_common_adapter - def test_delete_group_snapshot(self, common_adapter, _): - common_adapter.do_delete_cgsnap = mock.Mock( - return_value='fake_return') - group_snapshot = test_utils.create_group_snapshot( - self.ctxt, - fake_constants.GROUP_ID, - group_type_id=fake_constants.VOLUME_TYPE_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - ret = common_adapter.delete_group_snapshot( - None, group_snapshot, snaps) - self.assertEqual('fake_return', ret) - common_adapter.do_delete_cgsnap.assert_called_once_with( - group_snapshot.group_id, - group_snapshot.id, - group_snapshot.status, - snaps) - - @res_mock.patch_common_adapter - def test_delete_cgsnapshot(self, common_adapter, _): - common_adapter.do_delete_cgsnap = mock.Mock( - return_value='fake_return') - cg_snapshot = test_utils.create_cgsnapshot( - self.ctxt, - fake_constants.CONSISTENCY_GROUP_ID) - vol = test_utils.create_volume(self.ctxt) - snaps = [ - test_utils.create_snapshot(self.ctxt, vol.id)] - ret = common_adapter.delete_cgsnapshot(None, cg_snapshot, snaps) - self.assertEqual('fake_return', ret) - common_adapter.do_delete_cgsnap.assert_called_once_with( - cg_snapshot.consistencygroup_id, - cg_snapshot.id, - cg_snapshot.status, - snaps) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_do_delete_cgsnap(self, common_adapter, _, mocked_input): - group_name = fake_constants.CGSNAPSHOT_ID - snap_name = fake_constants.CGSNAPSHOT_ID - model_update, snapshot_updates = ( - common_adapter.do_delete_cgsnap( - group_name, snap_name, 'available', - [mocked_input['snap1'], mocked_input['snap2']])) - self.assertEqual('deleted', model_update['status']) - for snap in snapshot_updates: - self.assertEqual(fields.SnapshotStatus.DELETED, snap['status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing_lun_no_exist( - self, common_adapter, _ignore, mocked_input): - self.assertRaises( - exception.ManageExistingInvalidReference, - common_adapter.manage_existing_get_size, - mocked_input['volume'], {'source-name': 'fake'}) - common_adapter.client.vnx.get_lun.assert_called_once_with( - name='fake', lun_id=None) - - @res_mock.patch_common_adapter - def test_manage_existing_invalid_ref( - self, common_adapter, _ignore): - self.assertRaises( - exception.ManageExistingInvalidReference, - common_adapter.manage_existing_get_size, - None, {'invalidkey': 'fake'}) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing_invalid_pool( - self, common_adapter, _ignore, mocked_input): - self.assertRaises( - exception.ManageExistingInvalidReference, - common_adapter.manage_existing_get_size, - mocked_input['volume'], {'source-id': '6'}) - common_adapter.client.vnx.get_lun.assert_called_once_with( - lun_id='6', name=None) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing_get_size( - self, common_adapter, mocked_res, mocked_input): - size = common_adapter.manage_existing_get_size( - mocked_input['volume'], {'source-name': 'test_lun'}) - self.assertEqual(size, mocked_res['lun'].total_capacity_gb) - - @utils.patch_extra_specs({'provisioning:type': 'thin', - 'storagetype:tiering': 'auto'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing_type_mismatch( - self, common_adapter, mocked_res, mocked_input): - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - common_adapter.manage_existing, - mocked_input['volume'], - {'source-name': 'test_lun'}) - - @utils.patch_extra_specs({'provisioning:type': 'deduplicated'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing( - self, common_adapter, mocked_res, mocked_input): - test_lun = mocked_res['lun'] - common_adapter.client.get_lun = mock.Mock(return_value=test_lun) - lun_name = mocked_input['volume'].name - common_adapter._build_provider_location = mock.Mock( - return_value="fake_pl") - pl = common_adapter.manage_existing( - mocked_input['volume'], - {'source-name': 'test_lun'}) - common_adapter._build_provider_location.assert_called_with( - lun_type='lun', - lun_id=1, - base_lun_name=lun_name) - self.assertEqual('fake_pl', pl['provider_location']) - test_lun.rename.assert_called_once_with( - lun_name) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_manage_existing_smp( - self, common_adapter, mocked_res, mocked_input): - common_adapter._build_provider_location = mock.Mock( - return_value="fake_pl") - pl = common_adapter.manage_existing( - mocked_input['volume'], {'source-name': 'test_lun'}) - common_adapter._build_provider_location.assert_called_with( - lun_id=2, lun_type='smp', base_lun_name='src_lun') - self.assertEqual('fake_pl', pl['provider_location']) - - @res_mock.patch_common_adapter - def test_assure_storage_group(self, common_adapter, mocked_res): - host = common.Host('host', ['initiators']) - common_adapter.assure_storage_group(host) - - @res_mock.patch_common_adapter - def test_assure_storage_group_create_new(self, common_adapter, mocked_res): - host = common.Host('host', ['initiators']) - common_adapter.assure_storage_group(host) - common_adapter.client.vnx.create_sg.assert_called_once_with(host.name) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_assure_host_access(self, common_adapter, - mocked_res, mocked_input): - common_adapter.config.initiator_auto_registration = True - common_adapter.max_retries = 3 - common_adapter.auto_register_initiator = mock.Mock() - common_adapter.client.add_lun_to_sg = mock.Mock() - sg = mocked_res['sg'] - host = common.Host('host', ['initiators']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - lun = common_adapter.client.get_lun() - common_adapter.assure_host_access(sg, host, volume, True) - common_adapter.auto_register_initiator.assert_called_once_with( - sg, host) - common_adapter.client.add_lun_to_sg.assert_called_once_with( - sg, lun, common_adapter.max_retries) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_assure_host_access_without_auto_register_new_sg( - self, common_adapter, mocked_res, mocked_input): - common_adapter.config.initiator_auto_registration = False - common_adapter.max_retries = 3 - common_adapter.client.add_lun_to_sg = mock.Mock() - sg = mocked_res['sg'] - host = common.Host('host', ['initiators']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - lun = common_adapter.client.get_lun() - common_adapter.assure_host_access(sg, host, volume, True) - sg.connect_host.assert_called_once_with(host.name) - common_adapter.client.add_lun_to_sg.assert_called_once_with( - sg, lun, common_adapter.max_retries) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_assure_host_access_without_auto_register( - self, common_adapter, mocked_res, mocked_input): - common_adapter.config.initiator_auto_registration = False - common_adapter.max_retries = 3 - common_adapter.client.add_lun_to_sg = mock.Mock() - sg = mocked_res['sg'] - host = common.Host('host', ['initiators']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - lun = common_adapter.client.get_lun() - common_adapter.assure_host_access(sg, host, volume, False) - sg.connect_host.assert_not_called() - common_adapter.client.add_lun_to_sg.assert_called_once_with( - sg, lun, common_adapter.max_retries) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_auto_register_initiator( - self, common_adapter, mocked_res, mocked_input): - common_adapter.client.register_initiator = mock.Mock() - - common_adapter.config.io_port_list = ['a-0-0', 'a-0-1', 'a-1-0', - 'b-0-1'] - allowed_ports = mocked_res['allowed_ports'] - common_adapter.allowed_ports = allowed_ports - reg_ports = mocked_res['reg_ports'] - sg = mocked_res['sg'] - host = common.Host('host', ['iqn-host-1', 'iqn-reg-2']) - common_adapter.auto_register_initiator(sg, host) - - initiator_port_map = {'iqn-host-1': set(allowed_ports), - 'iqn-reg-2': set(allowed_ports) - set(reg_ports)} - common_adapter.client.register_initiator.assert_called_once_with( - sg, host, initiator_port_map) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_auto_register_initiator_no_white_list( - self, common_adapter, mocked_res, mocked_input): - for io_port_list in (None, ): - common_adapter.client.register_initiator = mock.Mock() - - common_adapter.config.io_port_list = io_port_list - allowed_ports = mocked_res['allowed_ports'] - common_adapter.allowed_ports = allowed_ports - sg = mocked_res['sg'] - host = common.Host('host', ['iqn-host-1', 'iqn-reg-2']) - common_adapter.auto_register_initiator(sg, host) - - initiator_port_map = {'iqn-host-1': set(allowed_ports)} - common_adapter.client.register_initiator.assert_called_once_with( - sg, host, initiator_port_map) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_auto_register_initiator_no_port_to_reg( - self, common_adapter, mocked_res, mocked_input): - common_adapter.config.io_port_list = ['a-0-0'] - allowed_ports = mocked_res['allowed_ports'] - common_adapter.allowed_ports = allowed_ports - sg = mocked_res['sg'] - host = common.Host('host', ['iqn-reg-1', 'iqn-reg-2']) - with mock.patch.object(common_adapter.client, 'register_initiator'): - common_adapter.auto_register_initiator(sg, host) - common_adapter.client.register_initiator.assert_called_once_with( - sg, host, {}) - - @res_mock.patch_common_adapter - def test_build_provider_location(self, common_adapter, mocked_res): - common_adapter.serial_number = 'vnx-serial' - pl = common_adapter._build_provider_location( - lun_id='fake_id', lun_type='smp', base_lun_name='fake_name') - expected_pl = vnx_utils.build_provider_location( - system='vnx-serial', - lun_type='smp', - lun_id='fake_id', - base_lun_name='fake_name', - version=common_adapter.VERSION) - self.assertEqual(expected_pl, pl) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_remove_host_access( - self, common_adapter, mocked_res, mocked_input): - host = common.Host('fake_host', ['fake_initiator']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - sg = mocked_res['sg'] - common_adapter.remove_host_access(volume, host, sg) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_remove_host_access_sg_absent( - self, common_adapter, mocked_res, mocked_input): - host = common.Host('fake_host', ['fake_initiator']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - sg = mocked_res['sg'] - common_adapter.remove_host_access(volume, host, sg) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_remove_host_access_volume_not_in_sg( - self, common_adapter, mocked_res, mocked_input): - host = common.Host('fake_host', ['fake_initiator']) - cinder_volume = mocked_input['volume'] - volume = common.Volume(cinder_volume.name, cinder_volume.id, - common_adapter.client.get_lun_id(cinder_volume)) - sg = mocked_res['sg'] - common_adapter.remove_host_access(volume, host, sg) - - @res_mock.patch_common_adapter - def test_terminate_connection_cleanup_sg_absent( - self, common_adapter, mocked_res): - common_adapter.destroy_empty_sg = True - common_adapter.itor_auto_dereg = True - host = common.Host('fake_host', ['fake_initiator']) - sg = mocked_res['sg'] - common_adapter.terminate_connection_cleanup(host, sg) - - @res_mock.patch_common_adapter - def test_terminate_connection_cleanup_remove_sg( - self, common_adapter, mocked_res): - common_adapter.destroy_empty_sg = True - common_adapter.itor_auto_dereg = False - host = common.Host('fake_host', ['fake_initiator']) - sg = mocked_res['sg'] - common_adapter.terminate_connection_cleanup(host, sg) - - @res_mock.patch_common_adapter - def test_terminate_connection_cleanup_deregister( - self, common_adapter, mocked_res): - common_adapter.destroy_empty_sg = True - common_adapter.itor_auto_dereg = True - host = common.Host('fake_host', ['fake_initiator1', 'fake_initiator2']) - sg = mocked_res['sg'] - common_adapter.terminate_connection_cleanup(host, sg) - common_adapter.client.vnx.remove_hba.assert_any_call( - 'fake_initiator1') - common_adapter.client.vnx.remove_hba.assert_any_call( - 'fake_initiator2') - - @res_mock.patch_common_adapter - def test_terminate_connection_cleanup_sg_is_not_empty( - self, common_adapter, mocked_res): - common_adapter.destroy_empty_sg = True - common_adapter.itor_auto_dereg = True - host = common.Host('fake_host', ['fake_initiator']) - sg = mocked_res['sg'] - common_adapter.terminate_connection_cleanup(host, sg) - - @res_mock.patch_common_adapter - def test_set_extra_spec_defaults(self, common_adapter, mocked_res): - common_adapter.set_extra_spec_defaults() - self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, - common.ExtraSpecs.TIER_DEFAULT) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_do_update_cg(self, common_adapter, _, mocked_input): - common_adapter.client.update_consistencygroup = mock.Mock() - cg = mocked_input['cg'] - common_adapter.client.get_cg = mock.Mock(return_value=cg) - common_adapter.do_update_cg(cg.id, - [mocked_input['volume_add']], - [mocked_input['volume_remove']]) - - common_adapter.client.update_consistencygroup.assert_called_once_with( - cg, [1], [2]) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_create_export_snapshot(self, common_adapter, mocked_res, - mocked_input): - common_adapter.client.create_mount_point = mock.Mock() - snapshot = mocked_input['snapshot'] - common_adapter.create_export_snapshot(None, snapshot, None) - common_adapter.client.create_mount_point.assert_called_once_with( - snapshot.volume_name, 'tmp-smp-' + snapshot.id) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_remove_export_snapshot(self, common_adapter, mocked_res, - mocked_input): - common_adapter.client.delete_lun = mock.Mock() - snapshot = mocked_input['snapshot'] - common_adapter.remove_export_snapshot(None, snapshot) - common_adapter.client.delete_lun.assert_called_once_with( - 'tmp-smp-' + snapshot.id) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_initialize_connection_snapshot(self, common_adapter, mocked_res, - mocked_input): - common_adapter.client.attach_snapshot = mock.Mock() - common_adapter._initialize_connection = mock.Mock(return_value='fake') - - snapshot = mocked_input['snapshot'] - smp_name = 'tmp-smp-' + snapshot.id - conn = common_adapter.initialize_connection_snapshot(snapshot, None) - common_adapter.client.attach_snapshot.assert_called_once_with( - smp_name, snapshot.name) - lun = mocked_res['lun'] - called_volume = common_adapter._initialize_connection.call_args[0][0] - self.assertEqual((smp_name, snapshot.id, lun.lun_id), - (called_volume.name, called_volume.id, - called_volume.vnx_lun_id)) - self.assertIsNone( - common_adapter._initialize_connection.call_args[0][1]) - self.assertIs(common_adapter._initialize_connection(), conn) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_terminate_connection_snapshot(self, common_adapter, mocked_res, - mocked_input): - common_adapter.client.detach_snapshot = mock.Mock() - common_adapter._terminate_connection = mock.Mock() - - snapshot = mocked_input['snapshot'] - smp_name = 'tmp-smp-' + snapshot.id - common_adapter.terminate_connection_snapshot(snapshot, None) - lun = mocked_res['lun'] - called_volume = common_adapter._terminate_connection.call_args[0][0] - self.assertEqual((smp_name, snapshot.id, lun.lun_id), - (called_volume.name, called_volume.id, - called_volume.vnx_lun_id)) - self.assertIsNone(common_adapter._terminate_connection.call_args[0][1]) - common_adapter.client.detach_snapshot.assert_called_once_with( - smp_name) - - @utils.patch_extra_specs({'replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_setup_lun_replication(self, common_adapter, - mocked_res, mocked_input): - vol1 = mocked_input['vol1'] - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.create_lun.return_value = ( - mocked_res['lun']) - common_adapter.mirror_view = fake_mirror - common_adapter.config.replication_device = ( - [utils.get_replication_device()]) - rep_update = common_adapter.setup_lun_replication( - vol1, 111) - fake_mirror.create_mirror.assert_called_once_with( - 'mirror_' + vol1.id, 111) - fake_mirror.add_image.assert_called_once_with( - 'mirror_' + vol1.id, mocked_res['lun'].lun_id) - self.assertEqual(fields.ReplicationStatus.ENABLED, - rep_update['replication_status']) - - @utils.patch_extra_specs({'replication_enabled': ' True'}) - @utils.patch_group_specs({'consistent_group_replication_enabled': - ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_setup_lun_replication_in_group( - self, common_adapter, mocked_res, mocked_input): - vol1 = mocked_input['vol1'] - group1 = mocked_input['group1'] - vol1.group = group1 - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.create_lun.return_value = ( - mocked_res['lun']) - common_adapter.mirror_view = fake_mirror - common_adapter.config.replication_device = ( - [utils.get_replication_device()]) - rep_update = common_adapter.setup_lun_replication( - vol1, 111) - fake_mirror.create_mirror.assert_called_once_with( - 'mirror_' + vol1.id, 111) - fake_mirror.add_image.assert_called_once_with( - 'mirror_' + vol1.id, mocked_res['lun'].lun_id) - self.assertEqual(fields.ReplicationStatus.ENABLED, - rep_update['replication_status']) - - @utils.patch_extra_specs({'replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_cleanup_replication(self, common_adapter, - mocked_res, mocked_input): - fake_mirror = utils.build_fake_mirror_view() - vol1 = mocked_input['vol1'] - with mock.patch.object(common_adapter, 'build_mirror_view') as fake: - fake.return_value = fake_mirror - common_adapter.cleanup_lun_replication(vol1) - fake_mirror.destroy_mirror.assert_called_once_with( - 'mirror_' + vol1.id, vol1.name) - - @res_mock.patch_common_adapter - def test_build_mirror_view(self, common_adapter, - mocked_res): - common_adapter.config.replication_device = [ - utils.get_replication_device()] - with utils.patch_vnxsystem: - mirror = common_adapter.build_mirror_view( - common_adapter.config) - self.assertIsNotNone(mirror) - - @res_mock.patch_common_adapter - def test_build_mirror_view_no_device( - self, common_adapter, mocked_res): - common_adapter.config.replication_device = [] - mirror = common_adapter.build_mirror_view( - common_adapter.config) - self.assertIsNone(mirror) - - @res_mock.patch_common_adapter - def test_build_mirror_view_2_device(self, common_adapter, mocked_res): - device = utils.get_replication_device() - device1 = device.copy() - common_adapter.config.replication_device = [device, device1] - self.assertRaises(exception.InvalidInput, - common_adapter.build_mirror_view, - common_adapter.config) - - @res_mock.patch_common_adapter - def test_build_mirror_view_no_enabler(self, common_adapter, mocked_res): - common_adapter.config.replication_device = [ - utils.get_replication_device()] - self.assertRaises(exception.InvalidInput, - common_adapter.build_mirror_view, - common_adapter.config) - - @res_mock.patch_common_adapter - def test_build_mirror_view_failover_false(self, common_adapter, - mocked_res): - common_adapter.config.replication_device = [ - utils.get_replication_device()] - with utils.patch_vnxsystem: - failover_mirror = common_adapter.build_mirror_view( - common_adapter.config, failover=False) - self.assertIsNotNone(failover_mirror) - - @utils.patch_extra_specs({'replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_failover_host(self, common_adapter, mocked_res, mocked_input): - device = utils.get_replication_device() - common_adapter.config.replication_device = [device] - vol1 = mocked_input['vol1'] - lun1 = mocked_res['lun1'] - with mock.patch.object(common_adapter, 'build_mirror_view') as fake: - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.get_lun.return_value = lun1 - fake_mirror.secondary_client.get_serial.return_value = ( - device['backend_id']) - fake.return_value = fake_mirror - backend_id, updates, __ = common_adapter.failover_host( - None, [vol1], device['backend_id'], []) - fake_mirror.promote_image.assert_called_once_with( - 'mirror_' + vol1.id) - fake_mirror.secondary_client.get_serial.assert_called_with() - fake_mirror.secondary_client.get_lun.assert_called_with( - name=vol1.name) - self.assertEqual(fake_mirror.secondary_client, - common_adapter.client) - self.assertEqual(device['backend_id'], - common_adapter.active_backend_id) - self.assertEqual(device['backend_id'], backend_id) - for update in updates: - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - update['updates']['replication_status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_failover_host_invalid_backend_id(self, common_adapter, - mocked_res, mocked_input): - common_adapter.config.replication_device = [ - utils.get_replication_device()] - vol1 = mocked_input['vol1'] - self.assertRaises(exception.InvalidReplicationTarget, - common_adapter.failover_host, - None, [vol1], 'new_id', []) - - @utils.patch_extra_specs({'replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_failover_host_failback(self, common_adapter, mocked_res, - mocked_input): - device = utils.get_replication_device() - common_adapter.config.replication_device = [device] - common_adapter.active_backend_id = device['backend_id'] - vol1 = mocked_input['vol1'] - lun1 = mocked_res['lun1'] - with mock.patch.object(common_adapter, 'build_mirror_view') as fake: - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.get_lun.return_value = lun1 - fake_mirror.secondary_client.get_serial.return_value = ( - device['backend_id']) - fake.return_value = fake_mirror - backend_id, updates, __ = common_adapter.failover_host( - None, [vol1], 'default', []) - fake_mirror.promote_image.assert_called_once_with( - 'mirror_' + vol1.id) - fake_mirror.secondary_client.get_serial.assert_called_with() - fake_mirror.secondary_client.get_lun.assert_called_with( - name=vol1.name) - self.assertEqual(fake_mirror.secondary_client, - common_adapter.client) - self.assertEqual('default', backend_id) - for update in updates: - self.assertEqual(fields.ReplicationStatus.ENABLED, - update['updates']['replication_status']) - - @utils.patch_group_specs({'consistent_group_replication_enabled': - ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_failover_host_groups(self, common_adapter, mocked_res, - mocked_input): - device = utils.get_replication_device() - common_adapter.config.replication_device = [device] - common_adapter.active_backend_id = device['backend_id'] - mocked_group = mocked_input['group1'] - group1 = mock.Mock() - - group1.id = mocked_group.id - group1.replication_status = mocked_group.replication_status - group1.volumes = [mocked_input['vol1'], mocked_input['vol2']] - lun1 = mocked_res['lun1'] - with mock.patch.object(common_adapter, 'build_mirror_view') as fake: - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.get_lun.return_value = lun1 - fake_mirror.secondary_client.get_serial.return_value = ( - device['backend_id']) - fake.return_value = fake_mirror - backend_id, updates, group_update_list = ( - common_adapter.failover_host(None, [], 'default', [group1])) - fake_mirror.promote_mirror_group.assert_called_once_with( - group1.id.replace('-', '')) - fake_mirror.secondary_client.get_serial.assert_called_with() - fake_mirror.secondary_client.get_lun.assert_called_with( - name=mocked_input['vol1'].name) - self.assertEqual(fake_mirror.secondary_client, - common_adapter.client) - self.assertEqual([{ - 'group_id': group1.id, - 'updates': {'replication_status': - fields.ReplicationStatus.ENABLED}}], - group_update_list) - self.assertEqual(2, len(updates)) - self.assertIsNone(common_adapter.active_backend_id) - self.assertEqual('default', backend_id) - for update in updates: - self.assertEqual(fields.ReplicationStatus.ENABLED, - update['updates']['replication_status']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_get_pool_name(self, common_adapter, mocked_res, mocked_input): - self.assertEqual(mocked_res['lun'].pool_name, - common_adapter.get_pool_name(mocked_input['volume'])) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_update_migrated_volume(self, common_adapter, mocked_res, - mocked_input): - data = common_adapter.update_migrated_volume( - None, mocked_input['volume'], mocked_input['new_volume']) - self.assertEqual(mocked_input['new_volume'].provider_location, - data['provider_location']) - self.assertEqual('False', data['metadata']['snapcopy']) - - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_update_migrated_volume_smp(self, common_adapter, mocked_res, - mocked_input): - data = common_adapter.update_migrated_volume( - None, mocked_input['volume'], mocked_input['new_volume']) - self.assertEqual(mocked_input['new_volume'].provider_location, - data['provider_location']) - self.assertEqual('True', data['metadata']['snapcopy']) - - @res_mock.patch_common_adapter - def test_normalize_config_queue_path(self, common_adapter, - mocked_res): - common_adapter._normalize_config() - self.assertEqual(os.path.join(cfg.CONF.state_path, - 'vnx', - 'vnx_backend'), - common_adapter.queue_path) - - @res_mock.patch_common_adapter - def test_normalize_config_naviseccli_path(self, common_adapter, - mocked_res): - old_value = common_adapter.config.naviseccli_path - common_adapter._normalize_config() - self.assertEqual(old_value, common_adapter.config.naviseccli_path) - - @res_mock.patch_common_adapter - def test_normalize_config_naviseccli_path_none(self, common_adapter, - mocked_res): - common_adapter.config.naviseccli_path = "" - common_adapter._normalize_config() - self.assertIsNone(common_adapter.config.naviseccli_path) - - common_adapter.config.naviseccli_path = " " - common_adapter._normalize_config() - self.assertIsNone(common_adapter.config.naviseccli_path) - - common_adapter.config.naviseccli_path = None - common_adapter._normalize_config() - self.assertIsNone(common_adapter.config.naviseccli_path) - - @res_mock.patch_common_adapter - def test_normalize_config_pool_names(self, common_adapter, - mocked_res): - common_adapter.config.storage_vnx_pool_names = [ - 'pool_1', ' pool_2 ', '', ' '] - common_adapter._normalize_config() - self.assertEqual(['pool_1', 'pool_2'], - common_adapter.config.storage_vnx_pool_names) - - @res_mock.patch_common_adapter - def test_normalize_config_pool_names_none(self, common_adapter, - mocked_res): - common_adapter.config.storage_vnx_pool_names = None - common_adapter._normalize_config() - self.assertIsNone(common_adapter.config.storage_vnx_pool_names) - - @res_mock.patch_common_adapter - def test_normalize_config_pool_names_empty_list(self, common_adapter, - mocked_res): - common_adapter.config.storage_vnx_pool_names = [] - self.assertRaises(exception.InvalidConfigurationValue, - common_adapter._normalize_config) - - common_adapter.config.storage_vnx_pool_names = [' ', ''] - self.assertRaises(exception.InvalidConfigurationValue, - common_adapter._normalize_config) - - @res_mock.patch_common_adapter - def test_normalize_config_io_port_list(self, common_adapter, - mocked_res): - common_adapter.config.io_port_list = [ - 'a-0-1', ' b-1 ', '', ' '] - common_adapter._normalize_config() - self.assertEqual(['A-0-1', 'B-1'], - common_adapter.config.io_port_list) - - @res_mock.patch_common_adapter - def test_normalize_config_io_port_list_none(self, common_adapter, - mocked_res): - common_adapter.config.io_port_list = None - common_adapter._normalize_config() - self.assertIsNone(common_adapter.config.io_port_list) - - @res_mock.patch_common_adapter - def test_normalize_config_io_port_list_empty_list(self, common_adapter, - mocked_res): - common_adapter.config.io_port_list = [] - self.assertRaises(exception.InvalidConfigurationValue, - common_adapter._normalize_config) - - common_adapter.config.io_port_list = [' ', ''] - self.assertRaises(exception.InvalidConfigurationValue, - common_adapter._normalize_config) - - -class TestISCSIAdapter(test.TestCase): - STORAGE_PROTOCOL = common.PROTOCOL_ISCSI - - def setUp(self): - super(TestISCSIAdapter, self).setUp() - self.configuration = conf.Configuration(None) - vnx_utils.init_ops(self.configuration) - self.configuration.storage_protocol = self.STORAGE_PROTOCOL - - def tearDown(self): - super(TestISCSIAdapter, self).tearDown() - - @res_mock.patch_iscsi_adapter - def test_validate_ports_iscsi(self, vnx_iscsi, mocked): - all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() - valid_ports = vnx_iscsi.validate_ports(all_iscsi_ports, ['A-0-0']) - self.assertEqual([mocked['iscsi_port_a-0-0']], valid_ports) - - @res_mock.patch_iscsi_adapter - def test_validate_ports_iscsi_invalid(self, vnx_iscsi, mocked): - invalid_white_list = ['A-0-0', 'A-B-0'] - all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() - self.assertRaisesRegex( - exception.VolumeBackendAPIException, - 'Invalid iscsi ports %s specified for io_port_list.' - % 'A-B-0', - vnx_iscsi.validate_ports, - all_iscsi_ports, - invalid_white_list) - - @res_mock.patch_iscsi_adapter - def test_validate_ports_iscsi_not_exist(self, vnx_iscsi, mocked): - nonexistent_ports = ['A-0-0', 'A-6-1'] - all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() - self.assertRaisesRegex( - exception.VolumeBackendAPIException, - 'Invalid iscsi ports %s specified for io_port_list' - % 'A-6-1', - vnx_iscsi.validate_ports, - all_iscsi_ports, - nonexistent_ports) - - @res_mock.patch_iscsi_adapter - def test_update_volume_stats_iscsi(self, vnx_iscsi, mocked): - with mock.patch.object(adapter.CommonAdapter, 'update_volume_stats', - return_value={'storage_protocol': - self.STORAGE_PROTOCOL}): - stats = vnx_iscsi.update_volume_stats() - self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol']) - self.assertEqual('VNXISCSIDriver', stats['volume_backend_name']) - - @res_mock.patch_iscsi_adapter - def test_build_terminate_connection_return_data_iscsi( - self, vnx_iscsi, mocked): - re = vnx_iscsi.build_terminate_connection_return_data(None, None) - self.assertIsNone(re) - - @res_mock.patch_iscsi_adapter - def test_normalize_config_iscsi_initiators( - self, vnx_iscsi, mocked): - vnx_iscsi.config.iscsi_initiators = ( - '{"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]}') - vnx_iscsi._normalize_config() - expected = {"host1": ["10.0.0.1", "10.0.0.2"], - "host2": ["10.0.0.3"]} - self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators) - - vnx_iscsi.config.iscsi_initiators = '{}' - vnx_iscsi._normalize_config() - expected = {} - self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators) - - @res_mock.patch_iscsi_adapter - def test_normalize_config_iscsi_initiators_none( - self, vnx_iscsi, mocked): - vnx_iscsi.config.iscsi_initiators = None - vnx_iscsi._normalize_config() - self.assertIsNone(vnx_iscsi.config.iscsi_initiators) - - @res_mock.patch_iscsi_adapter - def test_normalize_config_iscsi_initiators_empty_str( - self, vnx_iscsi, mocked): - vnx_iscsi.config.iscsi_initiators = '' - self.assertRaises(exception.InvalidConfigurationValue, - vnx_iscsi._normalize_config) - - vnx_iscsi.config.iscsi_initiators = ' ' - self.assertRaises(exception.InvalidConfigurationValue, - vnx_iscsi._normalize_config) - - @res_mock.patch_iscsi_adapter - def test_normalize_config_iscsi_initiators_not_dict( - self, vnx_iscsi, mocked): - vnx_iscsi.config.iscsi_initiators = '["a", "b"]' - self.assertRaises(exception.InvalidConfigurationValue, - vnx_iscsi._normalize_config) - - -class TestFCAdapter(test.TestCase): - STORAGE_PROTOCOL = common.PROTOCOL_FC - - def setUp(self): - super(TestFCAdapter, self).setUp() - self.configuration = conf.Configuration(None) - vnx_utils.init_ops(self.configuration) - self.configuration.storage_protocol = self.STORAGE_PROTOCOL - - def tearDown(self): - super(TestFCAdapter, self).tearDown() - - @res_mock.patch_fc_adapter - def test_validate_ports_fc(self, vnx_fc, mocked): - all_fc_ports = vnx_fc.client.get_fc_targets() - valid_ports = vnx_fc.validate_ports(all_fc_ports, ['A-1']) - self.assertEqual([mocked['fc_port_a-1']], valid_ports) - - @res_mock.patch_fc_adapter - def test_validate_ports_fc_invalid(self, vnx_fc, mocked): - invalid_white_list = ['A-1', 'A-B'] - all_fc_ports = vnx_fc.client.get_fc_targets() - self.assertRaisesRegex( - exception.VolumeBackendAPIException, - 'Invalid fc ports %s specified for io_port_list.' - % 'A-B', - vnx_fc.validate_ports, - all_fc_ports, - invalid_white_list) - - @res_mock.patch_fc_adapter - def test_validate_ports_fc_not_exist(self, vnx_fc, mocked): - nonexistent_ports = ['A-1', 'A-6'] - all_fc_ports = vnx_fc.client.get_fc_targets() - self.assertRaisesRegex( - exception.VolumeBackendAPIException, - 'Invalid fc ports %s specified for io_port_list' - % 'A-6', - vnx_fc.validate_ports, - all_fc_ports, - nonexistent_ports) - - @res_mock.patch_fc_adapter - def test_update_volume_stats(self, vnx_fc, mocked): - with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'): - stats = vnx_fc.update_volume_stats() - self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol']) - self.assertEqual('VNXFCDriver', stats['volume_backend_name']) - - @mock.patch.object(vnx_utils, 'convert_to_tgt_list_and_itor_tgt_map') - @res_mock.patch_fc_adapter - def test_build_terminate_connection_return_data_auto_zone( - self, vnx_fc, mocked, converter): - vnx_fc.lookup_service = mock.Mock() - get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network - - itor_tgt_map = { - 'wwn1': ['wwnt1', 'wwnt2', 'wwnt3'], - 'wwn2': ['wwnt1', 'wwnt2'] - } - converter.return_value = ([], itor_tgt_map) - host = common.Host('fake_host', - ['fake_hba1'], - wwpns=['wwn1', 'wwn2']) - sg = mocked['sg'] - re = vnx_fc.build_terminate_connection_return_data(host, sg) - get_mapping.assert_called_once_with( - ['wwn1', 'wwn2'], ['5006016636E01CA1']) - self.assertEqual(itor_tgt_map, - re['data']['initiator_target_map']) - - @res_mock.patch_fc_adapter - def test_build_terminate_connection_return_data_sg_absent( - self, vnx_fc, mocked): - sg = mocked['sg'] - re = vnx_fc.build_terminate_connection_return_data(None, sg) - self.assertEqual('fibre_channel', re['driver_volume_type']) - self.assertEqual({}, re['data']) - - @res_mock.patch_fc_adapter - def test_build_terminate_connection_return_data_without_autozone( - self, vnx_fc, mocked): - self.lookup_service = None - re = vnx_fc.build_terminate_connection_return_data(None, None) - self.assertEqual('fibre_channel', re['driver_volume_type']) - self.assertEqual({}, re['data']) - - @res_mock.patch_fc_adapter - def test_get_tgt_list_and_initiator_tgt_map_allow_port_only( - self, vnx_fc, mocked): - sg = mocked['sg'] - host = common.Host('fake_host', - ['fake_hba1'], - wwpns=['wwn1', 'wwn2']) - mapping = { - 'san_1': {'initiator_port_wwn_list': ['wwn1'], - 'target_port_wwn_list': ['5006016636E01CB2']}} - vnx_fc.lookup_service = mock.Mock() - vnx_fc.lookup_service.get_device_mapping_from_network = mock.Mock( - return_value=mapping) - get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network - vnx_fc.allowed_ports = mocked['adapter'].allowed_ports - targets, tgt_map = vnx_fc._get_tgt_list_and_initiator_tgt_map( - sg, host, True) - self.assertEqual(['5006016636E01CB2'], targets) - self.assertEqual({'wwn1': ['5006016636E01CB2']}, tgt_map) - get_mapping.assert_called_once_with( - ['wwn1', 'wwn2'], ['5006016636E01CB2']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py deleted file mode 100644 index d69cc9375..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py +++ /dev/null @@ -1,576 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ - as storops_ex -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ - as storops -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils -from cinder.volume.drivers.dell_emc.vnx import client as vnx_client -from cinder.volume.drivers.dell_emc.vnx import common as vnx_common - - -class TestCondition(test.TestCase): - @res_mock.patch_client - def test_is_lun_io_ready_false(self, client, mocked): - r = vnx_client.Condition.is_lun_io_ready(mocked['lun']) - self.assertFalse(r) - - @res_mock.patch_client - def test_is_lun_io_ready_true(self, client, mocked): - r = vnx_client.Condition.is_lun_io_ready(mocked['lun']) - self.assertTrue(r) - - @res_mock.patch_client - def test_is_lun_io_ready_exception(self, client, mocked): - self.assertRaises(exception.VolumeBackendAPIException, - vnx_client.Condition.is_lun_io_ready, - mocked['lun']) - - -class TestClient(test.TestCase): - def setUp(self): - super(TestClient, self).setUp() - self.origin_timeout = vnx_common.DEFAULT_TIMEOUT - vnx_common.DEFAULT_TIMEOUT = 0 - - def tearDown(self): - super(TestClient, self).tearDown() - vnx_common.DEFAULT_TIMEOUT = self.origin_timeout - - @res_mock.patch_client - def test_create_lun(self, client, mocked): - client.create_lun(pool='pool1', name='test', size=1, provision=None, - tier=None, cg_id=None, ignore_thresholds=False) - client.vnx.get_pool.assert_called_once_with(name='pool1') - pool = client.vnx.get_pool(name='pool1') - pool.create_lun.assert_called_with(lun_name='test', - size_gb=1, - provision=None, - tier=None, - ignore_thresholds=False) - - @res_mock.patch_client - def test_create_lun_error(self, client, mocked): - self.assertRaises(storops_ex.VNXCreateLunError, - client.create_lun, - pool='pool1', - name='test', - size=1, - provision=None, - tier=None, - cg_id=None, - ignore_thresholds=False) - client.vnx.get_pool.assert_called_once_with(name='pool1') - - @res_mock.patch_client - def test_create_lun_already_existed(self, client, mocked): - client.create_lun(pool='pool1', name='lun3', size=1, provision=None, - tier=None, cg_id=None, ignore_thresholds=False) - client.vnx.get_lun.assert_called_once_with(name='lun3') - - @res_mock.patch_client - def test_create_lun_in_cg(self, client, mocked): - client.create_lun( - pool='pool1', name='test', size=1, provision=None, - tier=None, cg_id='cg1', ignore_thresholds=False) - - @res_mock.patch_client - def test_create_lun_compression(self, client, mocked): - client.create_lun(pool='pool1', name='lun2', size=1, - provision=storops.VNXProvisionEnum.COMPRESSED, - tier=None, cg_id=None, - ignore_thresholds=False) - - @res_mock.patch_client - def test_migrate_lun(self, client, mocked): - client.migrate_lun(src_id=1, - dst_id=2) - lun = client.vnx.get_lun() - lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH) - - @unittest.skip("Skip until bug #1578986 is fixed") - @utils.patch_sleep - @res_mock.patch_client - def test_migrate_lun_with_retry(self, client, mocked, mock_sleep): - lun = client.vnx.get_lun() - self.assertRaises(storops_ex.VNXTargetNotReadyError, - client.migrate_lun, - src_id=4, - dst_id=5) - lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH) - mock_sleep.assert_called_with(15) - - @res_mock.patch_client - def test_session_finished_faulted(self, client, mocked): - lun = client.vnx.get_lun() - r = client.session_finished(lun) - self.assertTrue(r) - - @res_mock.patch_client - def test_session_finished_migrating(self, client, mocked): - lun = client.vnx.get_lun() - r = client.session_finished(lun) - self.assertFalse(r) - - @res_mock.patch_client - def test_session_finished_not_existed(self, client, mocked): - lun = client.vnx.get_lun() - r = client.session_finished(lun) - self.assertTrue(r) - - @res_mock.patch_client - def test_migrate_lun_error(self, client, mocked): - lun = client.vnx.get_lun() - self.assertRaises(storops_ex.VNXMigrationError, - client.migrate_lun, - src_id=4, - dst_id=5) - lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH) - - @res_mock.patch_client - def test_verify_migration(self, client, mocked): - r = client.verify_migration(1, 2, 'test_wwn') - self.assertTrue(r) - - @res_mock.patch_client - def test_verify_migration_false(self, client, mocked): - r = client.verify_migration(1, 2, 'fake_wwn') - self.assertFalse(r) - - @res_mock.patch_client - def test_cleanup_migration(self, client, mocked): - client.cleanup_migration(1, 2) - - @res_mock.patch_client - def test_cleanup_migration_not_migrating(self, client, mocked): - client.cleanup_migration(1, 2) - - @res_mock.patch_client - def test_cleanup_migration_cancel_failed(self, client, mocked): - client.cleanup_migration(1, 2) - - @res_mock.patch_client - def test_get_lun_by_name(self, client, mocked): - lun = client.get_lun(name='lun_name_test_get_lun_by_name') - self.assertEqual(888, lun.lun_id) - - @res_mock.patch_client - def test_delete_lun(self, client, mocked): - client.delete_lun(mocked['lun'].name) - - @res_mock.patch_client - def test_delete_smp(self, client, mocked): - client.delete_lun(mocked['lun'].name) - - @res_mock.patch_client - def test_delete_lun_not_exist(self, client, mocked): - client.delete_lun(mocked['lun'].name) - - @res_mock.patch_client - def test_delete_lun_exception(self, client, mocked): - self.assertRaisesRegexp(storops_ex.VNXDeleteLunError, - 'General lun delete error.', - client.delete_lun, mocked['lun'].name) - - @res_mock.patch_client - def test_cleanup_async_lun(self, client, mocked): - client.cleanup_async_lun( - mocked['lun'].name, - force=True) - - @res_mock.patch_client - def test_enable_compression(self, client, mocked): - lun_obj = mocked['lun'] - client.enable_compression(lun_obj) - lun_obj.enable_compression.assert_called_with(ignore_thresholds=True) - - @res_mock.patch_client - def test_enable_compression_on_compressed_lun(self, client, mocked): - lun_obj = mocked['lun'] - client.enable_compression(lun_obj) - - @res_mock.patch_client - def test_get_vnx_enabler_status(self, client, mocked): - re = client.get_vnx_enabler_status() - self.assertTrue(re.dedup_enabled) - self.assertFalse(re.compression_enabled) - self.assertTrue(re.thin_enabled) - self.assertFalse(re.fast_enabled) - self.assertTrue(re.snap_enabled) - - @res_mock.patch_client - def test_lun_has_snapshot_true(self, client, mocked): - re = client.lun_has_snapshot(mocked['lun']) - self.assertTrue(re) - - @res_mock.patch_client - def test_lun_has_snapshot_false(self, client, mocked): - re = client.lun_has_snapshot(mocked['lun']) - self.assertFalse(re) - - @res_mock.patch_client - def test_create_cg(self, client, mocked): - cg = client.create_consistency_group('cg_name') - self.assertIsNotNone(cg) - - @res_mock.patch_client - def test_create_cg_already_existed(self, client, mocked): - cg = client.create_consistency_group('cg_name_already_existed') - self.assertIsNotNone(cg) - - @res_mock.patch_client - def test_delete_cg(self, client, mocked): - client.delete_consistency_group('deleted_name') - - @res_mock.patch_client - def test_delete_cg_not_existed(self, client, mocked): - client.delete_consistency_group('not_existed') - - @res_mock.patch_client - def test_expand_lun(self, client, _ignore): - client.expand_lun('lun', 10, poll=True) - - @res_mock.patch_client - def test_expand_lun_not_poll(self, client, _ignore): - client.expand_lun('lun', 10, poll=False) - - @res_mock.patch_client - def test_expand_lun_already_expanded(self, client, _ignore): - client.expand_lun('lun', 10) - - @unittest.skip("Skip until bug #1578986 is fixed") - @utils.patch_sleep - @res_mock.patch_client - def test_expand_lun_not_ops_ready(self, client, _ignore, sleep_mock): - self.assertRaises(storops_ex.VNXLunPreparingError, - client.expand_lun, 'lun', 10) - lun = client.vnx.get_lun() - lun.expand.assert_called_once_with(10, ignore_thresholds=True) - # Called twice - lun.expand.assert_called_once_with(10, ignore_thresholds=True) - - @res_mock.patch_client - def test_create_snapshot(self, client, _ignore): - client.create_snapshot('lun_test_create_snapshot', - 'snap_test_create_snapshot') - - lun = client.vnx.get_lun() - lun.create_snap.assert_called_once_with('snap_test_create_snapshot', - allow_rw=True, - auto_delete=False, - keep_for=None) - - @res_mock.patch_client - def test_create_snapshot_snap_name_exist_error(self, client, _ignore): - client.create_snapshot('lun_name', 'snapshot_name') - - @res_mock.patch_client - def test_delete_snapshot(self, client, _ignore): - client.delete_snapshot('snapshot_name') - - @res_mock.patch_client - def test_delete_snapshot_delete_attached_error(self, client, _ignore): - self.assertRaises(storops_ex.VNXDeleteAttachedSnapError, - client.delete_snapshot, 'snapshot_name') - - @res_mock.patch_client - def test_copy_snapshot(self, client, mocked): - client.copy_snapshot('old_name', 'new_name') - - @res_mock.patch_client - def test_create_mount_point(self, client, mocked): - client.create_mount_point('lun_name', 'smp_name') - - @res_mock.patch_client - def test_attach_mount_point(self, client, mocked): - client.attach_snapshot('smp_name', 'snap_name') - - @res_mock.patch_client - def test_detach_mount_point(self, client, mocked): - client.detach_snapshot('smp_name') - - @res_mock.patch_client - def test_modify_snapshot(self, client, mocked): - client.modify_snapshot('snap_name', True, True) - - @res_mock.patch_client - def test_create_cg_snapshot(self, client, mocked): - snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') - self.assertIsNotNone(snap) - - @res_mock.patch_client - def test_create_cg_snapshot_already_existed(self, client, mocked): - snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') - self.assertIsNotNone(snap) - - @res_mock.patch_client - def test_delete_cg_snapshot(self, client, mocked): - client.delete_cg_snapshot(cg_snap_name='test_snap') - - @res_mock.patch_client - def test_create_sg(self, client, mocked): - client.create_storage_group('sg_name') - - @res_mock.patch_client - def test_create_sg_name_in_use(self, client, mocked): - client.create_storage_group('sg_name') - self.assertIsNotNone(client.sg_cache['sg_name']) - self.assertTrue(client.sg_cache['sg_name'].existed) - - @res_mock.patch_client - def test_get_storage_group(self, client, mocked): - sg = client.get_storage_group('sg_name') - self.assertEqual('sg_name', sg.name) - - @res_mock.patch_client - def test_register_initiator(self, client, mocked): - host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip') - client.register_initiator(mocked['sg'], host, - {'host_initiator': 'port_1'}) - - @res_mock.patch_client - def test_register_initiator_exception(self, client, mocked): - host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip') - client.register_initiator(mocked['sg'], host, - {'host_initiator': 'port_1'}) - - @res_mock.patch_client - def test_ping_node(self, client, mocked): - self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip')) - - @res_mock.patch_client - def test_ping_node_fail(self, client, mocked): - self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip')) - - @res_mock.patch_client - def test_add_lun_to_sg(self, client, mocked): - lun = 'not_care' - self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3)) - - @res_mock.patch_client - def test_add_lun_to_sg_alu_already_attached(self, client, mocked): - lun = 'not_care' - self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3)) - - @res_mock.patch_client - def test_add_lun_to_sg_alu_in_use(self, client, mocked): - self.assertRaisesRegexp(storops_ex.VNXNoHluAvailableError, - 'No HLU available.', - client.add_lun_to_sg, - mocked['sg'], - mocked['lun'], - 3) - - @res_mock.patch_client - def test_update_consistencygroup_no_lun_in_cg(self, client, mocked): - lun_1 = mocked['lun_1'] - lun_2 = mocked['lun_2'] - - def _get_lun(lun_id): - return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0] - - client.get_lun = _get_lun - cg = mocked['cg'] - - client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], []) - cg.replace_member.assert_called_once_with(lun_1, lun_2) - - @res_mock.patch_client - def test_update_consistencygroup_lun_in_cg(self, client, mocked): - lun_1 = mocked['lun_1'] - lun_2 = mocked['lun_2'] - - def _get_lun(lun_id): - return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0] - - client.get_lun = _get_lun - cg = mocked['cg'] - - client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id]) - cg.replace_member.assert_called_once_with(lun_2) - - @res_mock.patch_client - def test_update_consistencygroup_remove_all(self, client, mocked): - lun_1 = mocked['lun_1'] - - def _get_lun(lun_id): - return [x for x in (lun_1,) if x.lun_id == lun_id][0] - - client.get_lun = _get_lun - cg = mocked['cg'] - - client.update_consistencygroup(cg, [], [lun_1.lun_id]) - cg.delete_member.assert_called_once_with(lun_1) - - @res_mock.patch_client - def test_get_available_ip(self, client, mocked): - ip = client.get_available_ip() - self.assertEqual('192.168.1.5', ip) - - @res_mock.patch_client - def test_create_mirror(self, client, mocked): - mv = client.create_mirror('test_mirror_name', 11) - self.assertIsNotNone(mv) - - @res_mock.patch_client - def test_create_mirror_already_created(self, client, mocked): - mv = client.create_mirror('error_mirror', 12) - self.assertIsNotNone(mv) - - @res_mock.patch_client - def test_delete_mirror(self, client, mocked): - client.delete_mirror('mirror_name') - - @res_mock.patch_client - def test_delete_mirror_already_deleted(self, client, mocked): - client.delete_mirror('mirror_name_deleted') - - @res_mock.patch_client - def test_add_image(self, client, mocked): - client.add_image('mirror_namex', '192.168.1.11', 31) - - @res_mock.patch_client - def test_remove_image(self, client, mocked): - client.remove_image('mirror_remove') - - @res_mock.patch_client - def test_fracture_image(self, client, mocked): - client.fracture_image('mirror_fracture') - - @res_mock.patch_client - def test_sync_image(self, client, mocked): - client.sync_image('mirror_sync') - - @res_mock.patch_client - def test_promote_image(self, client, mocked): - client.promote_image('mirror_promote') - - @res_mock.patch_client - def test_create_mirror_group(self, client, mocked): - group_name = 'test_mg' - mg = client.create_mirror_group(group_name) - self.assertIsNotNone(mg) - - @res_mock.patch_client - def test_create_mirror_group_name_in_use(self, client, mocked): - group_name = 'test_mg_name_in_use' - mg = client.create_mirror_group(group_name) - self.assertIsNotNone(mg) - - @res_mock.patch_client - def test_delete_mirror_group(self, client, mocked): - group_name = 'delete_name' - client.delete_mirror_group(group_name) - - @res_mock.patch_client - def test_delete_mirror_group_not_found(self, client, mocked): - group_name = 'group_not_found' - client.delete_mirror_group(group_name) - - @res_mock.patch_client - def test_add_mirror(self, client, mocked): - group_name = 'group_add_mirror' - mirror_name = 'mirror_name' - client.add_mirror(group_name, mirror_name) - - @res_mock.patch_client - def test_add_mirror_already_added(self, client, mocked): - group_name = 'group_already_added' - mirror_name = 'mirror_name' - client.add_mirror(group_name, mirror_name) - - @res_mock.patch_client - def test_remove_mirror(self, client, mocked): - group_name = 'group_mirror' - mirror_name = 'mirror_name' - client.remove_mirror(group_name, mirror_name) - - @res_mock.patch_client - def test_remove_mirror_not_member(self, client, mocked): - group_name = 'group_mirror' - mirror_name = 'mirror_name_not_member' - client.remove_mirror(group_name, mirror_name) - - @res_mock.patch_client - def test_promote_mirror_group(self, client, mocked): - group_name = 'group_promote' - client.promote_mirror_group(group_name) - - @res_mock.patch_client - def test_promote_mirror_group_already_promoted(self, client, mocked): - group_name = 'group_promote' - client.promote_mirror_group(group_name) - - @res_mock.patch_client - def test_sync_mirror_group(self, client, mocked): - group_name = 'group_sync' - client.sync_mirror_group(group_name) - - @res_mock.patch_client - def test_fracture_mirror_group(self, client, mocked): - group_name = 'group_fracture' - client.fracture_mirror_group(group_name) - - @res_mock.mock_driver_input - @res_mock.patch_client - def test_get_lun_id(self, client, mocked, cinder_input): - lun_id = client.get_lun_id(cinder_input['volume']) - self.assertEqual(1, lun_id) - - @res_mock.mock_driver_input - @res_mock.patch_client - def test_get_lun_id_without_provider_location(self, client, mocked, - cinder_input): - lun_id = client.get_lun_id(cinder_input['volume']) - self.assertIsInstance(lun_id, int) - self.assertEqual(mocked['lun'].lun_id, lun_id) - - @res_mock.patch_client - def test_get_ioclass(self, client, mocked): - qos_specs = {'id': 'qos', vnx_common.QOS_MAX_IOPS: 10, - vnx_common.QOS_MAX_BWS: 100} - ioclasses = client.get_ioclass(qos_specs) - self.assertEqual(2, len(ioclasses)) - - @res_mock.patch_client - def test_create_ioclass_iops(self, client, mocked): - ioclass = client.create_ioclass_iops('test', 1000) - self.assertIsNotNone(ioclass) - - @res_mock.patch_client - def test_create_ioclass_bws(self, client, mocked): - ioclass = client.create_ioclass_bws('test', 100) - self.assertIsNotNone(ioclass) - - @res_mock.patch_client - def test_create_policy(self, client, mocked): - policy = client.create_policy('policy_name') - self.assertIsNotNone(policy) - - @res_mock.patch_client - def test_get_running_policy(self, client, mocked): - policy, is_new = client.get_running_policy() - self.assertEqual(policy.state in ['Running', 'Measuring'], True) - self.assertFalse(is_new) - - @res_mock.patch_client - def test_add_lun_to_ioclass(self, client, mocked): - client.add_lun_to_ioclass('test_ioclass', 1) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_common.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_common.py deleted file mode 100644 index 8e9222305..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_common.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ - as storops -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common - - -class TestExtraSpecs(test.TestCase): - def test_valid_extra_spec(self): - extra_spec = { - 'provisioning:type': 'deduplicated', - 'storagetype:tiering': 'nomovement', - } - spec_obj = common.ExtraSpecs(extra_spec) - self.assertEqual(storops.VNXProvisionEnum.DEDUPED, - spec_obj.provision) - self.assertEqual(storops.VNXTieringEnum.NO_MOVE, - spec_obj.tier) - - def test_extra_spec_case_insensitive(self): - extra_spec = { - 'provisioning:type': 'Thin', - 'storagetype:tiering': 'StartHighThenAuto', - } - spec_obj = common.ExtraSpecs(extra_spec) - self.assertEqual(storops.VNXProvisionEnum.THIN, - spec_obj.provision) - self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, - spec_obj.tier) - - def test_empty_extra_spec(self): - extra_spec = {} - common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK, - storops.VNXTieringEnum.HIGH_AUTO) - spec_obj = common.ExtraSpecs(extra_spec) - self.assertEqual(storops.VNXProvisionEnum.THICK, spec_obj.provision) - self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, spec_obj.tier) - - def test_invalid_provision(self): - extra_spec = { - 'provisioning:type': 'invalid', - } - self.assertRaises(exception.InvalidVolumeType, - common.ExtraSpecs, - extra_spec) - - def test_invalid_tiering(self): - extra_spec = { - 'storagetype:tiering': 'invalid', - } - self.assertRaises(exception.InvalidVolumeType, - common.ExtraSpecs, - extra_spec) - - def test_validate_extra_spec_dedup_and_tier_failed(self): - spec_obj = common.ExtraSpecs({ - 'storagetype:pool': 'fake_pool', - 'provisioning:type': 'deduplicated', - 'storagetype:tiering': 'auto', - }) - enabler_status = common.VNXEnablerStatus( - dedup=True, fast=True, thin=True) - self.assertRaises(exception.InvalidVolumeType, - spec_obj.validate, - enabler_status) - - def test_tier_is_not_set_to_default_for_dedup_provision(self): - common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK, - storops.VNXTieringEnum.HIGH_AUTO) - spec_obj = common.ExtraSpecs({'provisioning:type': 'deduplicated'}) - self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec_obj.provision) - self.assertIsNone(spec_obj.tier) - - def test_validate_extra_spec_is_valid(self): - spec_obj = common.ExtraSpecs({ - 'storagetype:pool': 'fake_pool', - 'provisioning:type': 'thin', - 'storagetype:tiering': 'auto', - }) - enabler_status = common.VNXEnablerStatus( - dedup=True, fast=True, thin=True) - re = spec_obj.validate(enabler_status) - self.assertTrue(re) - - def test_validate_extra_spec_dedup_invalid(self): - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'deduplicated', - }) - enabler_status = common.VNXEnablerStatus(dedup=False) - self.assertRaises(exception.InvalidVolumeType, - spec_obj.validate, - enabler_status) - - def test_validate_extra_spec_compress_invalid(self): - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'compressed', - }) - enabler_status = common.VNXEnablerStatus(compression=False) - self.assertRaises(exception.InvalidVolumeType, - spec_obj.validate, - enabler_status) - - def test_validate_extra_spec_no_thin_invalid(self): - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'compressed', - }) - enabler_status = common.VNXEnablerStatus(compression=True, thin=False) - self.assertRaises(exception.InvalidVolumeType, - spec_obj.validate, - enabler_status) - - def test_validate_extra_spec_tier_invalid(self): - spec_obj = common.ExtraSpecs({ - 'storagetype:tiering': 'auto', - }) - enabler_status = common.VNXEnablerStatus( - dedup=True, fast=False, compression=True, snap=True, thin=True) - self.assertRaises(exception.InvalidVolumeType, - spec_obj.validate, - enabler_status) - - def test_get_raw_data(self): - spec_obj = common.ExtraSpecs({'key1': 'value1'}) - self.assertIn('key1', spec_obj) - self.assertNotIn('key2', spec_obj) - self.assertEqual('value1', spec_obj['key1']) - - @res_mock.mock_storage_resources - def test_generate_extra_specs_from_lun(self, mocked_res): - lun = mocked_res['lun'] - spec = common.ExtraSpecs.from_lun(lun) - self.assertEqual(storops.VNXProvisionEnum.COMPRESSED, spec.provision) - self.assertEqual(storops.VNXTieringEnum.HIGH, spec.tier) - - lun = mocked_res['deduped_lun'] - spec = common.ExtraSpecs.from_lun(lun) - self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec.provision) - self.assertIsNone(spec.tier) - - @res_mock.mock_storage_resources - def test_extra_specs_match_with_lun(self, mocked_res): - lun = mocked_res['lun'] - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'thin', - 'storagetype:tiering': 'nomovement', - }) - self.assertTrue(spec_obj.match_with_lun(lun)) - - lun = mocked_res['deduped_lun'] - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'deduplicated', - }) - self.assertTrue(spec_obj.match_with_lun(lun)) - - @res_mock.mock_storage_resources - def test_extra_specs_not_match_with_lun(self, mocked_res): - lun = mocked_res['lun'] - spec_obj = common.ExtraSpecs({ - 'provisioning:type': 'thick', - 'storagetype:tiering': 'nomovement', - }) - self.assertFalse(spec_obj.match_with_lun(lun)) - - -class FakeConfiguration(object): - def __init__(self): - self.replication_device = [] - - -class TestReplicationDeviceList(test.TestCase): - def setUp(self): - super(TestReplicationDeviceList, self).setUp() - self.configuration = FakeConfiguration() - replication_devices = [] - device = {'backend_id': 'array_id_1', - 'san_ip': '192.168.1.1', - 'san_login': 'admin', - 'san_password': 'admin', - 'storage_vnx_authentication_type': 'global', - 'storage_vnx_security_file_dir': '/home/stack/'} - replication_devices.append(device) - self.configuration.replication_device = replication_devices - - def test_get_device(self): - devices_list = common.ReplicationDeviceList(self.configuration) - device = devices_list.get_device('array_id_1') - self.assertIsNotNone(device) - self.assertEqual('192.168.1.1', device.san_ip) - self.assertEqual('admin', device.san_login) - self.assertEqual('admin', device.san_password) - self.assertEqual('global', device.storage_vnx_authentication_type) - self.assertEqual('/home/stack/', device.storage_vnx_security_file_dir) - - def test_device_no_backend_id(self): - device = {'san_ip': '192.168.1.2'} - config = FakeConfiguration() - config.replication_device = [device] - self.assertRaises( - exception.InvalidInput, - common.ReplicationDeviceList, config) - - def test_device_no_secfile(self): - device = {'backend_id': 'test_id', - 'san_ip': '192.168.1.2'} - config = FakeConfiguration() - config.replication_device = [device] - rep_list = common.ReplicationDeviceList(config) - self.assertIsNone(rep_list[0].storage_vnx_security_file_dir) - - def test_get_device_not_found(self): - devices_list = common.ReplicationDeviceList(self.configuration) - device = devices_list.get_device('array_id_not_existed') - self.assertIsNone(device) - - def test_devices(self): - devices_list = common.ReplicationDeviceList(self.configuration) - self.assertEqual(1, len(devices_list.devices)) - self.assertEqual(1, len(devices_list)) - self.assertIsNotNone(devices_list[0]) - - def test_get_backend_ids(self): - backend_ids = common.ReplicationDeviceList.get_backend_ids( - self.configuration) - self.assertEqual(1, len(backend_ids)) - self.assertIn('array_id_1', backend_ids) - - -class TestVNXMirrorView(test.TestCase): - def setUp(self): - super(TestVNXMirrorView, self).setUp() - self.primary_client = mock.create_autospec(client.Client) - self.secondary_client = mock.create_autospec(client.Client) - self.mirror_view = common.VNXMirrorView( - self.primary_client, self.secondary_client) - - def test_create_mirror(self): - self.mirror_view.create_mirror('mirror_test', 11) - self.primary_client.create_mirror.assert_called_once_with( - 'mirror_test', 11) - - def test_create_secondary_lun(self): - self.mirror_view.create_secondary_lun('pool_name', 'lun_name', - 10, 'thick', 'auto') - self.secondary_client.create_lun.assert_called_once_with( - 'pool_name', 'lun_name', 10, 'thick', 'auto') - - def test_delete_secondary_lun(self): - self.mirror_view.delete_secondary_lun('lun_name') - self.secondary_client.delete_lun.assert_called_once_with('lun_name') - - def test_delete_mirror(self): - self.mirror_view.delete_mirror('mirror_name') - self.primary_client.delete_mirror.assert_called_once_with( - 'mirror_name') - - def test_add_image(self): - self.secondary_client.get_available_ip.return_value = '192.168.1.2' - self.mirror_view.add_image('mirror_name', 111) - self.secondary_client.get_available_ip.assert_called_once_with() - self.primary_client.add_image.assert_called_once_with( - 'mirror_name', '192.168.1.2', 111) - - def test_remove_image(self): - self.mirror_view.remove_image('mirror_remove') - self.primary_client.remove_image.assert_called_once_with( - 'mirror_remove') - - def test_fracture_image(self): - self.mirror_view.fracture_image('mirror_fracture') - self.primary_client.fracture_image.assert_called_once_with( - 'mirror_fracture') - - def test_promote_image(self): - self.mirror_view.promote_image('mirror_promote') - self.secondary_client.promote_image.assert_called_once_with( - 'mirror_promote') - - def test_destroy_mirror(self): - mv = mock.Mock() - mv.existed = True - self.primary_client.get_mirror.return_value = mv - self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name') - self.primary_client.get_mirror.assert_called_once_with( - 'mirror_name') - self.primary_client.fracture_image.assert_called_once_with( - 'mirror_name') - self.primary_client.remove_image.assert_called_once_with( - 'mirror_name') - self.primary_client.delete_mirror.assert_called_once_with( - 'mirror_name') - self.secondary_client.delete_lun.assert_called_once_with( - 'sec_lun_name') - - def test_destroy_mirror_not_existed(self): - mv = mock.Mock() - mv.existed = False - self.primary_client.get_mirror.return_value = mv - self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name') - self.primary_client.get_mirror.assert_called_once_with( - 'mirror_name') - self.assertFalse(self.primary_client.fracture_image.called) - - def test_create_mirror_group(self): - self.mirror_view.create_mirror_group('test_group') - self.primary_client.create_mirror_group.assert_called_once_with( - 'test_group') - - def test_delete_mirror_group(self): - self.mirror_view.delete_mirror_group('test_group') - self.primary_client.delete_mirror_group.assert_called_once_with( - 'test_group') - - def test_add_mirror(self): - self.mirror_view.add_mirror('test_group', 'test_mirror') - self.primary_client.add_mirror.assert_called_once_with( - 'test_group', 'test_mirror') - - def test_remove_mirror(self): - self.mirror_view.remove_mirror('test_group', 'test_mirror') - self.primary_client.remove_mirror('test_group', 'test_mirror') - - def test_sync_mirror_group(self): - self.mirror_view.sync_mirror_group('test_group') - self.primary_client.sync_mirror_group.assert_called_once_with( - 'test_group') - - def test_promote_mirror_group(self): - self.mirror_view.promote_mirror_group('test_group') - self.secondary_client.promote_mirror_group.assert_called_once_with( - 'test_group') - - def test_fracture_mirror_group(self): - self.mirror_view.fracture_mirror_group('test_group') - self.primary_client.fracture_mirror_group.assert_called_once_with( - 'test_group') diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_driver.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_driver.py deleted file mode 100644 index 22733a6f3..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_driver.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.vnx import driver - - -class TestVNXDriver(test.TestCase): - def setUp(self): - super(TestVNXDriver, self).setUp() - self.configuration = conf.Configuration(None) - self.fc_adapter_patcher = mock.patch( - 'cinder.volume.drivers.dell_emc.vnx.adapter.FCAdapter', - autospec=True) - self.fc_adapter_patcher.start() - self.iscsi_adapter_patcher = mock.patch( - 'cinder.volume.drivers.dell_emc.vnx.adapter.ISCSIAdapter', - autospec=True) - self.iscsi_adapter_patcher.start() - self.driver = None - self.addCleanup(self.fc_adapter_patcher.stop) - self.addCleanup(self.iscsi_adapter_patcher.stop) - - def _get_driver(self, protocol): - self.configuration.storage_protocol = protocol - drv = driver.VNXDriver(configuration=self.configuration, - active_backend_id=None) - drv.do_setup(None) - return drv - - def test_init_iscsi_driver(self): - _driver = self._get_driver('iscsi') - driver_name = str(_driver.adapter) - self.assertIn('ISCSIAdapter', driver_name) - self.assertEqual(driver.VNXDriver.VERSION, _driver.VERSION) - - def test_init_fc_driver(self): - _driver = self._get_driver('FC') - driver_name = str(_driver.adapter) - self.assertIn('FCAdapter', driver_name) - self.assertEqual(driver.VNXDriver.VERSION, _driver.VERSION) - - def test_create_volume(self): - _driver = self._get_driver('iscsi') - _driver.create_volume('fake_volume') - _driver.adapter.create_volume.assert_called_once_with('fake_volume') - - def test_initialize_connection(self): - _driver = self._get_driver('iscsi') - _driver.initialize_connection('fake_volume', {'host': 'fake_host'}) - _driver.adapter.initialize_connection.assert_called_once_with( - 'fake_volume', {'host': 'fake_host'}) - - def test_terminate_connection(self): - _driver = self._get_driver('iscsi') - _driver.terminate_connection('fake_volume', {'host': 'fake_host'}) - _driver.adapter.terminate_connection.assert_called_once_with( - 'fake_volume', {'host': 'fake_host'}) - - def test_is_consistent_group_snapshot_enabled(self): - _driver = self._get_driver('iscsi') - _driver._stats = {'consistent_group_snapshot_enabled': True} - self.assertTrue(_driver.is_consistent_group_snapshot_enabled()) - _driver._stats = {'consistent_group_snapshot_enabled': False} - self.assertFalse(_driver.is_consistent_group_snapshot_enabled()) - self.assertFalse(_driver.is_consistent_group_snapshot_enabled()) - - def test_enable_replication(self): - _driver = self._get_driver('iscsi') - _driver.enable_replication(None, 'group', 'volumes') - - def test_disable_replication(self): - _driver = self._get_driver('iscsi') - _driver.disable_replication(None, 'group', 'volumes') - - def test_failover_replication(self): - _driver = self._get_driver('iscsi') - _driver.failover_replication(None, 'group', 'volumes', 'backend_id') diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_replication.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_replication.py deleted file mode 100644 index 4995c1cc2..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_replication.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils - - -class TestReplicationAdapter(test.TestCase): - - def setUp(self): - super(TestReplicationAdapter, self).setUp() - self.configuration = conf.Configuration(None) - vnx_utils.init_ops(self.configuration) - self.configuration.san_ip = '192.168.1.1' - self.configuration.storage_vnx_authentication_type = 'global' - self.ctxt = context.get_admin_context() - - def tearDown(self): - super(TestReplicationAdapter, self).tearDown() - - @utils.patch_group_specs({ - 'consistent_group_replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_enable_replication(self, common_adapter, mocked_res, - mocked_input): - group = mocked_input['group'] - volumes = [mocked_input['volume1'], - mocked_input['volume2']] - volumes[0].group = group - volumes[1].group = group - common_adapter.enable_replication(self.ctxt, group, volumes) - - @utils.patch_group_specs({ - 'consistent_group_replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_disable_replication(self, common_adapter, mocked_res, - mocked_input): - group = mocked_input['group'] - volumes = [mocked_input['volume1'], - mocked_input['volume2']] - volumes[0].group = group - volumes[1].group = group - common_adapter.disable_replication(self.ctxt, group, volumes) - - @utils.patch_group_specs({ - 'consistent_group_replication_enabled': ' True'}) - @res_mock.mock_driver_input - @res_mock.patch_common_adapter - def test_failover_replication(self, common_adapter, mocked_res, - mocked_input): - device = utils.get_replication_device() - common_adapter.config.replication_device = [device] - group = mocked_input['group'] - volumes = [mocked_input['volume1'], mocked_input['volume2']] - lun1 = mocked_res['lun1'] - volumes[0].group = group - volumes[1].group = group - secondary_backend_id = 'fake_serial' - with mock.patch.object(common_adapter, - 'build_mirror_view') as fake: - fake_mirror = utils.build_fake_mirror_view() - fake_mirror.secondary_client.get_lun.return_value = lun1 - fake_mirror.secondary_client.get_serial.return_value = ( - device['backend_id']) - fake.return_value = fake_mirror - model_update, volume_updates = common_adapter.failover_replication( - self.ctxt, group, volumes, secondary_backend_id) - - fake_mirror.promote_mirror_group.assert_called_with( - group.id.replace('-', '')) - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - model_update['replication_status']) - for update in volume_updates: - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - update['replication_status']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.py deleted file mode 100644 index 3e65a5c09..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.vnx import utils - - -class TestResMock(test.TestCase): - def test_load_cinder_resource(self): - cinder_res = res_mock.CinderResourceMock('mocked_cinder.yaml') - - volume = cinder_res['test_mock_driver_input_inner']['volume'] - - items = ['base_lun_name^test', 'version^07.00.00', 'type^lun', - 'system^fake_serial', 'id^1'] - self.assertEqual(sorted(items), - sorted(volume.provider_location.split('|'))) - - def test_mock_driver_input(self): - @res_mock.mock_driver_input - def test_mock_driver_input_inner(self, mocked_input): - items = ['base_lun_name^test', 'version^07.00.00', 'type^lun', - 'system^fake_serial', 'id^1'] - mocked_items = mocked_input['volume'].provider_location.split('|') - self.assertEqual(sorted(items), - sorted(mocked_items)) - - test_mock_driver_input_inner(self) - - def test_load_storage_resource(self): - vnx_res = res_mock.StorageResourceMock('test_res_mock.yaml') - lun = vnx_res['test_load_storage_resource']['lun'] - pool = vnx_res['test_load_storage_resource']['pool'] - created_lun = pool.create_lun() - self.assertEqual(lun.lun_id, created_lun.lun_id) - self.assertEqual(lun.poll, created_lun.poll) - self.assertEqual(lun.state, created_lun.state) - - def test_patch_client(self): - @res_mock.patch_client - def test_patch_client_inner(self, patched_client, mocked): - vnx = patched_client.vnx - self.assertEqual('fake_serial', vnx.serial) - - pool = vnx.get_pool() - self.assertEqual('pool_name', pool.name) - - test_patch_client_inner(self) - - def test_patch_client_mocked(self): - @res_mock.patch_client - def test_patch_client_mocked_inner(self, patched_client, mocked): - lun = mocked['lun'] - self.assertEqual('Offline', lun.state) - - test_patch_client_mocked_inner(self) - - def test_patch_adapter_common(self): - self.configuration = conf.Configuration(None) - utils.init_ops(self.configuration) - self.configuration.san_ip = '192.168.1.1' - self.configuration.storage_vnx_authentication_type = 'global' - self.configuration.storage_vnx_pool_names = 'pool1,unit_test_pool' - - @res_mock.patch_common_adapter - def test_patch_common_adapter_inner(self, patched_adapter, mocked): - pool = patched_adapter.client.vnx.get_pool() - self.assertEqual('pool_name', pool.name) - - test_patch_common_adapter_inner(self) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.yaml b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.yaml deleted file mode 100644 index f835ed0f3..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.yaml +++ /dev/null @@ -1,59 +0,0 @@ -################################################# -# Storage resource -################################################# - -# Common -lun_base: - _properties: &lun_base_prop - lun_id: lun_id - poll: False - operation: None - state: Ready - -pool_base: - _properties: &pool_base_prop - name: pool_name - pool_id: 0 - state: Ready - user_capacity_gbs: 1311 - total_subscribed_capacity_gbs: 131 - available_capacity_gbs: 132 - percent_full_threshold: 70 - fast_cache: True - -vnx_base: - _properties: &vnx_base_prop - serial: fake_serial - -test_load_storage_resource: &test_load_storage_resource - lun: &lun1 - _properties: - <<: *lun_base_prop - state: Offline - _methods: - update: - - pool: &pool1 - _properties: - <<: *pool_base_prop - _methods: - create_lun: *lun1 - - vnx: - _properties: - <<: *vnx_base_prop - _methods: - get_pool: *pool1 - -test_patch_client_inner: *test_load_storage_resource - -test_patch_client_mocked_inner: *test_load_storage_resource - -test_patch_common_adapter_inner: *test_load_storage_resource - -test_property_side_effect_inner: - lun: - _properties: - <<: *lun_base_prop - total_capacity_gb: - _side_effect: [5, 10] diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_taskflows.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_taskflows.py deleted file mode 100644 index 559075c5f..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_taskflows.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) 2016 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow.types import failure - -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ - as vnx_ex -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -import cinder.volume.drivers.dell_emc.vnx.taskflows as vnx_taskflow - - -class TestTaskflow(test.TestCase): - def setUp(self): - super(TestTaskflow, self).setUp() - self.work_flow = linear_flow.Flow('test_task') - - @res_mock.patch_client - def test_copy_snapshot_task(self, client, mocked): - store_spec = {'client': client, - 'snap_name': 'original_name', - 'new_snap_name': 'new_name' - } - self.work_flow.add(vnx_taskflow.CopySnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_copy_snapshot_task_revert(self, client, mocked): - store_spec = {'client': client, - 'snap_name': 'original_name', - 'new_snap_name': 'new_name' - } - self.work_flow.add(vnx_taskflow.CopySnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXSnapError, - engine.run) - - @res_mock.patch_client - def test_create_smp_task(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'mount_point_name', - 'base_lun_name': 'base_name' - } - self.work_flow.add(vnx_taskflow.CreateSMPTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - smp_id = engine.storage.fetch('smp_id') - self.assertEqual(15, smp_id) - - @res_mock.patch_client - def test_create_smp_task_revert(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'mount_point_name', - 'base_lun_name': 'base_name' - } - self.work_flow.add(vnx_taskflow.CreateSMPTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXCreateLunError, - engine.run) - smp_id = engine.storage.fetch('smp_id') - self.assertIsInstance(smp_id, failure.Failure) - - @res_mock.patch_client - def test_attach_snap_task(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'mount_point_name', - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.AttachSnapTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_attach_snap_task_revert(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'mount_point_name', - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.AttachSnapTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXAttachSnapError, - engine.run) - - @res_mock.patch_client - def test_create_snapshot_task(self, client, mocked): - store_spec = { - 'client': client, - 'lun_id': 12, - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_create_snapshot_task_revert(self, client, mocked): - store_spec = { - 'client': client, - 'lun_id': 13, - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXCreateSnapError, - engine.run) - - @res_mock.patch_client - def test_allow_read_write_task(self, client, mocked): - store_spec = { - 'client': client, - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.ModifySnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_allow_read_write_task_revert(self, client, mocked): - store_spec = { - 'client': client, - 'snap_name': 'snap_name' - } - self.work_flow.add(vnx_taskflow.ModifySnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXSnapError, - engine.run) - - @res_mock.patch_client - def test_create_cg_snapshot_task(self, client, mocked): - store_spec = { - 'client': client, - 'cg_name': 'test_cg', - 'cg_snap_name': 'my_snap_name' - } - self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - snap_name = engine.storage.fetch('new_cg_snap_name') - self.assertIsInstance(snap_name, res_mock.StorageObjectMock) - - @res_mock.patch_client - def test_create_cg_snapshot_task_revert(self, client, mocked): - store_spec = { - 'client': client, - 'cg_name': 'test_cg', - 'cg_snap_name': 'my_snap_name' - } - self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - self.assertRaises(vnx_ex.VNXCreateSnapError, - engine.run) - - @res_mock.patch_client - def test_extend_smp_task(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'lun_test_extend_smp_task', - 'lun_size': 100 - } - self.work_flow.add(vnx_taskflow.ExtendSMPTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_extend_smp_task_skip_small_size(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'lun_test_extend_smp_task', - 'lun_size': 1 - } - self.work_flow.add(vnx_taskflow.ExtendSMPTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() - - @res_mock.patch_client - def test_extend_smp_task_skip_thick(self, client, mocked): - store_spec = { - 'client': client, - 'smp_name': 'lun_test_extend_smp_task_skip_thick', - 'lun_size': 100 - } - self.work_flow.add(vnx_taskflow.ExtendSMPTask()) - engine = taskflow.engines.load(self.work_flow, - store=store_spec) - engine.run() diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_utils.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_utils.py deleted file mode 100644 index 3ffd40bf8..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_utils.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2016 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ - as storops_ex -from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ - as storops -from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock -from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils as ut_utils -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils - - -class FakeDriver(object): - - @vnx_utils.require_consistent_group_snapshot_enabled - def fake_group_method(self, context, group_or_snap): - return True - - -class TestUtils(test.TestCase): - def setUp(self): - super(TestUtils, self).setUp() - self.origin_timeout = common.DEFAULT_TIMEOUT - common.DEFAULT_TIMEOUT = 0.05 - - def tearDown(self): - super(TestUtils, self).tearDown() - common.DEFAULT_TIMEOUT = self.origin_timeout - - def test_wait_until(self): - mock_testmethod = mock.Mock(return_value=True) - vnx_utils.wait_until(mock_testmethod, interval=0) - mock_testmethod.assert_has_calls([mock.call()]) - - def test_wait_until_with_exception(self): - mock_testmethod = mock.Mock( - side_effect=storops_ex.VNXAttachSnapError('Unknown error')) - mock_testmethod.__name__ = 'test_method' - self.assertRaises(storops_ex.VNXAttachSnapError, - vnx_utils.wait_until, - mock_testmethod, - timeout=1, - interval=0, - reraise_arbiter=( - lambda ex: not isinstance( - ex, storops_ex.VNXCreateLunError))) - mock_testmethod.assert_has_calls([mock.call()]) - - def test_wait_until_with_params(self): - mock_testmethod = mock.Mock(return_value=True) - vnx_utils.wait_until(mock_testmethod, - param1=1, - param2='test') - mock_testmethod.assert_has_calls( - [mock.call(param1=1, param2='test')]) - mock_testmethod.assert_has_calls([mock.call(param1=1, param2='test')]) - - @res_mock.mock_driver_input - def test_retype_need_migration_when_host_changed(self, driver_in): - volume = driver_in['volume'] - another_host = driver_in['host'] - re = vnx_utils.retype_need_migration( - volume, None, None, another_host) - self.assertTrue(re) - - @res_mock.mock_driver_input - def test_retype_need_migration_for_smp_volume(self, driver_in): - volume = driver_in['volume'] - host = driver_in['host'] - re = vnx_utils.retype_need_migration( - volume, None, None, host) - self.assertTrue(re) - - @res_mock.mock_driver_input - def test_retype_need_migration_when_provision_changed( - self, driver_in): - volume = driver_in['volume'] - host = driver_in['host'] - old_spec = common.ExtraSpecs({'provisioning:type': 'thin'}) - new_spec = common.ExtraSpecs({'provisioning:type': 'deduplicated'}) - re = vnx_utils.retype_need_migration( - volume, old_spec.provision, new_spec.provision, host) - self.assertTrue(re) - - @res_mock.mock_driver_input - def test_retype_not_need_migration_when_provision_changed( - self, driver_in): - volume = driver_in['volume'] - host = driver_in['host'] - old_spec = common.ExtraSpecs({'provisioning:type': 'thick'}) - new_spec = common.ExtraSpecs({'provisioning:type': 'compressed'}) - re = vnx_utils.retype_need_migration( - volume, old_spec.provision, new_spec.provision, host) - self.assertFalse(re) - - @res_mock.mock_driver_input - def test_retype_not_need_migration(self, driver_in): - volume = driver_in['volume'] - host = driver_in['host'] - old_spec = common.ExtraSpecs({'storagetype:tiering': 'auto'}) - new_spec = common.ExtraSpecs( - {'storagetype:tiering': 'starthighthenauto'}) - re = vnx_utils.retype_need_migration( - volume, old_spec.provision, new_spec.provision, host) - self.assertFalse(re) - - def test_retype_need_change_tier(self): - re = vnx_utils.retype_need_change_tier( - storops.VNXTieringEnum.AUTO, storops.VNXTieringEnum.HIGH_AUTO) - self.assertTrue(re) - - def test_retype_need_turn_on_compression(self): - re = vnx_utils.retype_need_turn_on_compression( - storops.VNXProvisionEnum.THIN, - storops.VNXProvisionEnum.COMPRESSED) - self.assertTrue(re) - re = vnx_utils.retype_need_turn_on_compression( - storops.VNXProvisionEnum.THICK, - storops.VNXProvisionEnum.COMPRESSED) - self.assertTrue(re) - - def test_retype_not_need_turn_on_compression(self): - re = vnx_utils.retype_need_turn_on_compression( - storops.VNXProvisionEnum.DEDUPED, - storops.VNXProvisionEnum.COMPRESSED) - self.assertFalse(re) - re = vnx_utils.retype_need_turn_on_compression( - storops.VNXProvisionEnum.DEDUPED, - storops.VNXProvisionEnum.COMPRESSED) - self.assertFalse(re) - - @res_mock.mock_driver_input - def test_get_base_lun_name(self, mocked): - volume = mocked['volume'] - self.assertEqual( - 'test', - vnx_utils.get_base_lun_name(volume)) - - def test_convert_to_tgt_list_and_itor_tgt_map(self): - zone_mapping = { - 'san_1': {'initiator_port_wwn_list': - ['wwn1_1'], - 'target_port_wwn_list': - ['wwnt_1', 'wwnt_2']}, - 'san_2': {'initiator_port_wwn_list': - ['wwn2_1', 'wwn2_2'], - 'target_port_wwn_list': - ['wwnt_1', 'wwnt_3']}, - } - - tgt_wwns, itor_tgt_map = ( - vnx_utils.convert_to_tgt_list_and_itor_tgt_map(zone_mapping)) - self.assertEqual({'wwnt_1', 'wwnt_2', 'wwnt_3'}, set(tgt_wwns)) - self.assertEqual({'wwn1_1': ['wwnt_1', 'wwnt_2'], - 'wwn2_1': ['wwnt_1', 'wwnt_3'], - 'wwn2_2': ['wwnt_1', 'wwnt_3']}, - itor_tgt_map) - - @ut_utils.patch_group_specs(' True') - @res_mock.mock_driver_input - def test_require_consistent_group_snapshot_enabled(self, input): - driver = FakeDriver() - is_called = driver.fake_group_method('context', input['group']) - self.assertTrue(is_called) - - @res_mock.mock_driver_input - def test_is_image_cache_volume_false(self, mocked): - volume = mocked['volume'] - volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' - self.assertFalse(vnx_utils.is_image_cache_volume(volume)) - volume.display_name = 'volume-ca86b9a0-d0d5-c62274056cc0' - self.assertFalse(vnx_utils.is_image_cache_volume(volume)) - - @res_mock.mock_driver_input - def test_is_image_cache_volume_true(self, mocked): - volume = mocked['volume'] - volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' - self.assertTrue(vnx_utils.is_image_cache_volume(volume)) - - @res_mock.mock_driver_input - def test_calc_migrate_and_provision_image_cache(self, mocked): - volume = mocked['volume'] - volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' - self.assertTrue(vnx_utils.is_image_cache_volume(volume)) - async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume) - self.assertFalse(async_migrate) - self.assertEqual(provision.name, 'THIN') - - @res_mock.mock_driver_input - def test_calc_migrate_and_provision(self, mocked): - volume = mocked['volume'] - volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' - async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume) - self.assertEqual(vnx_utils.is_async_migrate_enabled(volume), - async_migrate) - self.assertEqual(provision.name, 'THICK') - - @ut_utils.patch_extra_specs({}) - @res_mock.mock_driver_input - def test_get_backend_qos_specs(self, cinder_input): - volume = mock.Mock() - volume.volume_type.qos_specs = mock.Mock() - volume.volume_type.qos_specs.__getitem__ = mock.Mock(return_value=None) - r = vnx_utils.get_backend_qos_specs(volume) - self.assertIsNone(r) - - volume.volume_type.qos_specs.__getitem__ = mock.Mock( - return_value={'consumer': 'frontend'}) - r = vnx_utils.get_backend_qos_specs(volume) - self.assertIsNone(r) - - volume.volume_type.qos_specs.__getitem__ = mock.Mock( - return_value={'id': 'test', 'consumer': 'back-end', - 'specs': {common.QOS_MAX_BWS: 100, - common.QOS_MAX_IOPS: 10}}) - r = vnx_utils.get_backend_qos_specs(volume) - self.assertIsNotNone(r) - self.assertEqual(100, r[common.QOS_MAX_BWS]) - self.assertEqual(10, r[common.QOS_MAX_IOPS]) - - @ut_utils.patch_group_specs({ - 'consistent_group_replication_enabled': ' True'}) - @ut_utils.patch_extra_specs({ - 'replication_enabled': ' False'}) - @res_mock.mock_driver_input - def test_check_type_matched_invalid(self, mocked): - volume = mocked['volume'] - volume.group = mocked['group'] - self.assertRaises(exception.InvalidInput, - vnx_utils.check_type_matched, - volume) - - @ut_utils.patch_group_specs({ - 'consistent_group_replication_enabled': ' True'}) - @res_mock.mock_driver_input - def test_check_rep_status_matched_disabled(self, mocked): - group = mocked['group'] - self.assertRaises(exception.InvalidInput, - vnx_utils.check_rep_status_matched, - group) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py b/cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py deleted file mode 100644 index ec567a2e5..000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2016 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from os import path - -import mock -import six -import yaml - -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common - - -patch_sleep = mock.patch('time.sleep') - - -patch_vnxsystem = mock.patch('storops.VNXSystem') - - -def load_yaml(file_name): - yaml_file = '{}/{}'.format(path.dirname( - path.abspath(__file__)), file_name) - with open(yaml_file) as f: - res = yaml.safe_load(f) - return res - - -def patch_extra_specs(specs): - return _build_patch_decorator( - 'cinder.volume.volume_types.get_volume_type_extra_specs', - return_value=specs) - - -def patch_group_specs(specs): - return _build_patch_decorator( - 'cinder.volume.group_types.get_group_type_specs', - return_value=specs) - - -def patch_extra_specs_validate(return_value=None, side_effect=None): - return _build_patch_decorator( - 'cinder.volume.drivers.dell_emc.vnx.common.ExtraSpecs.validate', - return_value=return_value, - side_effect=side_effect) - - -def _build_patch_decorator(module_str, return_value=None, side_effect=None): - def _inner_mock(func): - @six.wraps(func) - def decorator(*args, **kwargs): - with mock.patch( - module_str, - return_value=return_value, - side_effect=side_effect): - return func(*args, **kwargs) - return decorator - return _inner_mock - - -def build_fake_mirror_view(): - primary_client = mock.create_autospec(spec=client.Client) - secondary_client = mock.create_autospec(spec=client.Client) - - mirror_view = mock.create_autospec(spec=common.VNXMirrorView) - mirror_view.primary_client = primary_client - mirror_view.secondary_client = secondary_client - return mirror_view - - -def get_replication_device(): - return { - 'backend_id': 'fake_serial', - 'san_ip': '192.168.1.12', - 'san_login': 'admin', - 'san_password': 'admin', - 'storage_vnx_authentication_type': 'global', - 'storage_vnx_security_file_dir': None, - 'pool_name': 'remote_pool', - } diff --git a/cinder/tests/unit/volume/drivers/disco/__init__.py b/cinder/tests/unit/volume/drivers/disco/__init__.py deleted file mode 100644 index 42c795e47..000000000 --- a/cinder/tests/unit/volume/drivers/disco/__init__.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2015 Industrial Technology Research Institute. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Parent class for the DISCO driver unit test.""" - -import mock -from suds import client - -from os_brick.initiator import connector - -from cinder import context -from cinder import test -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -import cinder.volume.drivers.disco.disco as driver -import cinder.volume.drivers.disco.disco_api as disco_api -import cinder.volume.drivers.disco.disco_attach_detach as attach_detach - - -class TestDISCODriver(test.TestCase): - """Generic class for the DISCO test case.""" - - DETAIL_OPTIONS = { - 'success': 1, - 'pending': 2, - 'failure': 3 - } - - ERROR_STATUS = 1 - - def setUp(self): - """Initialise variable common to all the test cases.""" - super(TestDISCODriver, self).setUp() - - mock_exec = mock.Mock() - mock_exec.return_value = ('', '') - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.disco_client = '127.0.0.1' - self.cfg.disco_client_port = '9898' - self.cfg.disco_wsdl_path = 'somewhere' - self.cfg.disco_volume_name_prefix = 'openstack-' - self.cfg.disco_snapshot_check_timeout = 3600 - self.cfg.disco_restore_check_timeout = 3600 - self.cfg.disco_clone_check_timeout = 3600 - self.cfg.disco_retry_interval = 2 - self.cfg.num_volume_device_scan_tries = 3 - self.cfg.disco_choice_client = 'SOAP' - self.cfg.disco_rest_ip = '127.0.0.1' - - self.FAKE_RESPONSE = { - 'standard': { - 'success': {'status': 0, 'result': 'a normal message'}, - 'fail': {'status': 1, 'result': 'an error message'}} - } - - mock.patch.object(client, - 'Client', - self.create_client).start() - - mock.patch.object(disco_api, - 'DiscoApi', - self.create_client).start() - - mock.patch.object(connector.InitiatorConnector, - 'factory', - self.get_mock_connector).start() - - self.driver = driver.DiscoDriver(execute=mock_exec, - configuration=self.cfg) - self.driver.do_setup(None) - - self.attach_detach = attach_detach.AttachDetachDiscoVolume(self.cfg) - - self.ctx = context.RequestContext('fake', 'fake', auth_token=True) - self.volume = fake_volume.fake_volume_obj(self.ctx) - self.volume['volume_id'] = '1234567' - - self.requester = self.driver.client - - def create_client(self, *cmd, **kwargs): - """Mock the client's methods.""" - return FakeClient() - - def get_mock_connector(self, *cmd, **kwargs): - """Mock the os_brick connector.""" - return None - - def get_mock_attribute(self, *cmd, **kwargs): - """Mock the os_brick connector.""" - return 'DISCO' - - def get_fake_volume(self, *cmd, **kwards): - """Return a volume object for the tests.""" - return self.volume - - -class FakeClient(object): - """Fake class to mock client.""" - - def __init__(self, *args, **kwargs): - """Create a fake service attribute.""" - self.service = FakeMethod() - - -class FakeMethod(object): - """Fake class recensing some of the method of the rest client.""" - - def __init__(self, *args, **kwargs): - """Fake class to mock the client.""" - - def volumeCreate(self, *args, **kwargs): - """"Mock function to create a volume.""" - - def volumeDelete(self, *args, **kwargs): - """"Mock function to delete a volume.""" - - def snapshotCreate(self, *args, **kwargs): - """"Mock function to create a snapshot.""" - - def snapshotDetail(self, *args, **kwargs): - """"Mock function to get the snapshot detail.""" - - def snapshotDelete(self, *args, **kwargs): - """"Mock function to delete snapshot.""" - - def restoreFromSnapshot(self, *args, **kwargs): - """"Mock function to create a volume from a snapshot.""" - - def restoreDetail(self, *args, **kwargs): - """"Mock function to detail the restore operation.""" - - def volumeDetail(self, *args, **kwargs): - """Mock function to get the volume detail from its id.""" - - def volumeDetailByName(self, *args, **kwargs): - """"Mock function to get the volume detail from its name.""" - - def volumeClone(self, *args, **kwargs): - """"Mock function to clone a volume.""" - - def cloneDetail(self, *args, **kwargs): - """Mock function to get the clone detail.""" - - def volumeExtend(self, *args, **kwargs): - """Mock function to extend a volume.""" - - def systemInformationList(self, *args, **kwargs): - """Mock function to get the backend properties.""" diff --git a/cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py b/cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py deleted file mode 100644 index 4a77a6f7b..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py +++ /dev/null @@ -1,158 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test cases for create cloned volume.""" - -import copy -import mock -import six -import time - - -from cinder import exception -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as utils -from cinder.tests.unit.volume.drivers import disco - - -class CreateCloneVolumeTestCase(disco.TestDISCODriver): - """Test cases for DISCO connector.""" - - def setUp(self): - """Initialise variables and mock functions.""" - super(CreateCloneVolumeTestCase, self).setUp() - - self.dest_volume = fake_volume.fake_volume_obj(self.ctx) - # Create mock functions for all the call done by the driver.""" - mock.patch.object(self.requester, - 'volumeClone', - self.clone_request).start() - - mock.patch.object(self.requester, - 'cloneDetail', - self.clone_detail_request).start() - - mock.patch.object(self.requester, - 'volumeDetailByName', - self.volume_detail_request).start() - - self.volume_detail_response = { - 'status': 0, - 'volumeInfoResult': - {'volumeId': 1234567} - } - - clone_success = ( - copy.deepcopy(self.FAKE_RESPONSE['standard']['success'])) - clone_pending = ( - copy.deepcopy(self.FAKE_RESPONSE['standard']['success'])) - clone_fail = ( - copy.deepcopy(self.FAKE_RESPONSE['standard']['success'])) - clone_response_fail = ( - copy.deepcopy(self.FAKE_RESPONSE['standard']['success'])) - - clone_success['result'] = ( - six.text_type(self.DETAIL_OPTIONS['success'])) - clone_pending['result'] = ( - six.text_type(self.DETAIL_OPTIONS['pending'])) - clone_fail['result'] = ( - six.text_type(self.DETAIL_OPTIONS['failure'])) - clone_response_fail['status'] = 1 - - self.FAKE_RESPONSE['clone_detail'] = { - 'success': clone_success, - 'fail': clone_fail, - 'pending': clone_pending, - 'request_fail': clone_response_fail - } - - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response['result'] = '1234' - - self.response_detail = ( - self.FAKE_RESPONSE['clone_detail']['success']) - self.test_pending = False - self.test_pending_count = 0 - - def clone_request(self, *cmd, **kwargs): - """Mock function for the createVolumeFromSnapshot function.""" - return self.response - - def clone_detail_request(self, *cmd, **kwargs): - """Mock function for the restoreDetail function.""" - if self.test_pending: - if self.test_pending_count == 0: - self.test_pending_count += 1 - return self.FAKE_RESPONSE['clone_detail']['pending'] - else: - return self.FAKE_RESPONSE['clone_detail']['success'] - else: - return self.response_detail - - def volume_detail_request(self, *cmd, **kwargs): - """Mock function for the volumeDetail function.""" - return self.volume_detail_response - - def test_create_cloned_volume(self): - """Normal case.""" - expected = 1234567 - actual = self.driver.create_cloned_volume(self.dest_volume, - self.volume) - self.assertEqual(expected, actual['provider_location']) - - def test_create_clone_volume_fail(self): - """Clone volume request to DISCO fails.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_cloned_volume) - - def test_create_cloned_volume_fail_not_immediate(self): - """Get clone detail returns that the clone fails.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['clone_detail']['fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_cloned_volume) - - def test_create_cloned_volume_fail_not_immediate_response_fail(self): - """Get clone detail request to DISCO fails.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['clone_detail']['request_fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_cloned_volume) - - def test_create_cloned_volume_fail_not_immediate_request_fail(self): - """Get clone detail returns the task is pending then complete.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.test_pending = True - self.test_create_cloned_volume() - - @mock.patch.object(time, 'time') - def test_create_cloned_volume_timeout(self, mock_time): - """Clone request timeout.""" - timeout = 3 - mock_time.side_effect = utils.generate_timeout_series(timeout) - self.driver.configuration.disco_clone_check_timeout = timeout - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['clone_detail']['pending']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_cloned_volume) - - def test_create_cloned_volume_volume_detail_fail(self): - """Get volume detail request to DISCO fails.""" - self.volume_detail_response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_cloned_volume) diff --git a/cinder/tests/unit/volume/drivers/disco/test_create_snapshot.py b/cinder/tests/unit/volume/drivers/disco/test_create_snapshot.py deleted file mode 100644 index 80945add4..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_create_snapshot.py +++ /dev/null @@ -1,153 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for the function create snapshot.""" - - -import copy -import mock -import time - -from cinder import db -from cinder import exception -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import utils as utils -from cinder.tests.unit.volume.drivers import disco - - -class CreateSnapshotTestCase(disco.TestDISCODriver): - """Test cases for DISCO connector.""" - - def get_fake_volume(self, ctx, id): - """Return fake volume from db calls.""" - return self.volume - - def setUp(self): - """Initialise variables and mock functions.""" - super(CreateSnapshotTestCase, self).setUp() - - self.snapshot = fake_snapshot.fake_snapshot_obj( - self.ctx, **{'volume': self.volume}) - - # Mock db call in the cinder driver - self.mock_object(db.sqlalchemy.api, 'volume_get', - self.get_fake_volume) - - mock.patch.object(self.requester, - 'snapshotCreate', - self.snapshot_request).start() - - mock.patch.object(self.requester, - 'snapshotDetail', - self.snapshot_detail_request).start() - - snapshot_detail_response = { - 'status': 0, - 'snapshotInfoResult': - {'snapshotId': 1234, - 'description': 'a description', - 'createTime': '', - 'expireTime': '', - 'isDeleted': False, - 'status': 0} - } - - snap_success = copy.deepcopy(snapshot_detail_response) - snap_pending = copy.deepcopy(snapshot_detail_response) - snap_fail = copy.deepcopy(snapshot_detail_response) - snap_response_fail = copy.deepcopy(snapshot_detail_response) - snap_success['snapshotInfoResult']['status'] = ( - self.DETAIL_OPTIONS['success']) - snap_pending['snapshotInfoResult']['status'] = ( - self.DETAIL_OPTIONS['pending']) - snap_fail['snapshotInfoResult']['status'] = ( - self.DETAIL_OPTIONS['failure']) - snap_response_fail['status'] = 1 - - self.FAKE_RESPONSE['snapshot_detail'] = { - 'success': snap_success, - 'fail': snap_fail, - 'pending': snap_pending, - 'request_fail': snap_response_fail} - - self.response = ( - self.FAKE_RESPONSE['standard']['success']) - self.response['result'] = 1234 - - self.response_detail = ( - self.FAKE_RESPONSE['snapshot_detail']['success']) - self.test_pending = False - - self.test_pending_count = 0 - - def snapshot_request(self, *cmd, **kwargs): - """Mock function for the createSnapshot call.""" - return self.response - - def snapshot_detail_request(self, *cmd, **kwargs): - """Mock function for the snapshotDetail call.""" - if self.test_pending: - if self.test_pending_count == 0: - self.test_pending_count += 1 - return self.FAKE_RESPONSE['snapshot_detail']['pending'] - else: - return self.FAKE_RESPONSE['snapshot_detail']['success'] - else: - return self.response_detail - - def test_create_snapshot(self): - """Normal test case.""" - expected = 1234 - actual = self.driver.create_snapshot(self.volume) - self.assertEqual(expected, actual['provider_location']) - - def test_create_snapshot_fail(self): - """Request to DISCO failed.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_snapshot) - - def test_create_snapshot_fail_not_immediate(self): - """Request to DISCO failed when monitoring the snapshot details.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['snapshot_detail']['fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_snapshot) - - def test_create_snapshot_fail_not_immediate_response_fail(self): - """Request to get the snapshot details returns a failure.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['snapshot_detail']['request_fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_snapshot) - - def test_create_snapshot_detail_pending(self): - """Request to get the snapshot detail return pending then success.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.test_pending = True - self.test_create_snapshot() - - @mock.patch.object(time, 'time') - def test_create_snapshot_timeout(self, mock_time): - """Snapshot request timeout.""" - timeout = 3 - mock_time.side_effect = utils.generate_timeout_series(timeout) - self.driver.configuration.disco_snapshot_check_timeout = timeout - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['snapshot_detail']['pending']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_snapshot) diff --git a/cinder/tests/unit/volume/drivers/disco/test_create_volume.py b/cinder/tests/unit/volume/drivers/disco/test_create_volume.py deleted file mode 100644 index d4006d289..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_create_volume.py +++ /dev/null @@ -1,53 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for the create volume function.""" - -import mock - -from cinder import exception -from cinder.tests.unit.volume.drivers import disco - - -class CreateVolumeTestCase(disco.TestDISCODriver): - """Test cases for DISCO connector.""" - - def setUp(self): - """Prepare variables and mock functions.""" - super(CreateVolumeTestCase, self).setUp() - - # Mock the method volumeCreate. - mock.patch.object(self.requester, - 'volumeCreate', - self.perform_disco_request).start() - - self.response = self.FAKE_RESPONSE['standard']['success'] - - def perform_disco_request(self, *cmd, **kwargs): - """Mock function for the suds client.""" - return self.response - - def test_create_volume(self): - """Normal case.""" - expected = '1234567' - self.response['result'] = expected - ret = self.driver.create_volume(self.volume) - actual = ret['provider_location'] - self.assertEqual(expected, actual) - - def test_create_volume_fail(self): - """Request to DISCO failed.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume) diff --git a/cinder/tests/unit/volume/drivers/disco/test_create_volume_from_snapshot.py b/cinder/tests/unit/volume/drivers/disco/test_create_volume_from_snapshot.py deleted file mode 100644 index 70fcbe516..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_create_volume_from_snapshot.py +++ /dev/null @@ -1,166 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for create volume from snapshot.""" - -import copy -import mock -import time - -from cinder import exception -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import utils as utils -from cinder.tests.unit.volume.drivers import disco - - -class CreateVolumeFromSnapshotTestCase(disco.TestDISCODriver): - """Test cases for the create volume from snapshot of DISCO connector.""" - - def setUp(self): - """Initialise variables and mock functions.""" - super(CreateVolumeFromSnapshotTestCase, self).setUp() - - self.snapshot = fake_snapshot.fake_snapshot_obj( - self.ctx, **{'volume': self.volume}) - - # Mock restoreFromSnapshot, restoreDetail - # and volume detail since they are in the function path - mock.patch.object(self.requester, - 'restoreFromSnapshot', - self.restore_request).start() - - mock.patch.object(self.requester, - 'restoreDetail', - self.restore_detail_request).start() - - mock.patch.object(self.requester, - 'volumeDetailByName', - self.volume_detail_request).start() - - restore_detail_response = { - 'status': 0, - 'restoreInfoResult': - {'restoreId': 1234, - 'startTime': '', - 'statusPercent': '', - 'volumeName': 'aVolumeName', - 'snapshotId': 1234, - 'status': 0} - } - - self.volume_detail_response = { - 'status': 0, - 'volumeInfoResult': - {'volumeId': 1234567} - } - - rest_success = copy.deepcopy(restore_detail_response) - rest_pending = copy.deepcopy(restore_detail_response) - rest_fail = copy.deepcopy(restore_detail_response) - rest_response_fail = copy.deepcopy(restore_detail_response) - rest_success['restoreInfoResult']['status'] = ( - self.DETAIL_OPTIONS['success']) - rest_pending['restoreInfoResult']['status'] = ( - self.DETAIL_OPTIONS['pending']) - rest_fail['restoreInfoResult']['status'] = ( - self.DETAIL_OPTIONS['failure']) - rest_response_fail['status'] = 1 - - self.FAKE_RESPONSE['restore_detail'] = { - 'success': rest_success, - 'fail': rest_fail, - 'pending': rest_pending, - 'request_fail': rest_response_fail - } - - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response['result'] = '1234' - - self.response_detail = ( - self.FAKE_RESPONSE['restore_detail']['success']) - self.test_pending = False - - self.test_pending_count = 0 - - def restore_request(self, *cmd, **kwargs): - """Mock function for the createVolumeFromSnapshot function.""" - return self.response - - def restore_detail_request(self, *cmd, **kwargs): - """Mock function for the restoreDetail function.""" - if self.test_pending: - if self.test_pending_count == 0: - self.test_pending_count += 1 - return self.FAKE_RESPONSE['restore_detail']['pending'] - else: - return self.FAKE_RESPONSE['restore_detail']['success'] - else: - return self.response_detail - - def volume_detail_request(self, *cmd, **kwargs): - """Mock function for the volumeDetail function.""" - return self.volume_detail_response - - def test_create_volume_from_snapshot(self): - """Normal case.""" - expected = 1234567 - actual = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - self.assertEqual(expected, actual['provider_location']) - - def test_create_volume_from_snapshot_fail(self): - """Create volume from snapshot request fails.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume_from_snapshot) - - def test_create_volume_from_snapshot_fail_not_immediate(self): - """Get restore details request fails.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['restore_detail']['fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume_from_snapshot) - - def test_create_volume_from_snapshot_fail_detail_response_fail(self): - """Get restore details reports that restore operation fails.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['restore_detail']['request_fail']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume_from_snapshot) - - def test_create_volume_from_snapshot_fail_not_immediate_resp_fail(self): - """Get restore details reports that the task is pending, then done.""" - self.response = self.FAKE_RESPONSE['standard']['success'] - self.test_pending = True - self.test_create_volume_from_snapshot() - - @mock.patch.object(time, 'time') - def test_create_volume_from_snapshot_timeout(self, mock_time): - """Create volume from snapshot task timeout.""" - timeout = 3 - mock_time.side_effect = utils.generate_timeout_series(timeout) - self.driver.configuration.disco_restore_check_timeout = timeout - self.response = self.FAKE_RESPONSE['standard']['success'] - self.response_detail = ( - self.FAKE_RESPONSE['restore_detail']['pending']) - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume_from_snapshot) - - def test_create_volume_from_snapshot_volume_detail_fail(self): - """Cannot get the newly created volume information.""" - self.volume_detail_response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_create_volume_from_snapshot) diff --git a/cinder/tests/unit/volume/drivers/disco/test_delete_snapshot.py b/cinder/tests/unit/volume/drivers/disco/test_delete_snapshot.py deleted file mode 100644 index e102957c6..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_delete_snapshot.py +++ /dev/null @@ -1,51 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for the delete snapshot function.""" -import mock - -from cinder import exception -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit.volume.drivers import disco - - -class DeleteSnapshotTestCase(disco.TestDISCODriver): - """Test cases to delete DISCO volumes.""" - - def setUp(self): - """Initialise variables and mock functions.""" - super(DeleteSnapshotTestCase, self).setUp() - - # Mock snapshotDelete function. - mock.patch.object(self.requester, - 'snapshotDelete', - self.perform_disco_request).start() - - self.response = self.FAKE_RESPONSE['standard']['success'] - self.snapshot = fake_snapshot.fake_snapshot_obj( - self.ctx, **{'volume': self.volume}) - - def perform_disco_request(self, *cmd, **kwargs): - """Mock function to delete a snapshot.""" - return self.response - - def test_delete_snapshot(self): - """Delete a snapshot.""" - self.driver.delete_snapshot(self.snapshot) - - def test_delete_snapshot_fail(self): - """Make the API returns an error while deleting.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_delete_snapshot) diff --git a/cinder/tests/unit/volume/drivers/disco/test_delete_volume.py b/cinder/tests/unit/volume/drivers/disco/test_delete_volume.py deleted file mode 100644 index 8bbf16802..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_delete_volume.py +++ /dev/null @@ -1,49 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for the delete volume function.""" - -import mock - -from cinder import exception -from cinder.tests.unit.volume.drivers import disco - - -class DeleteVolumeTestCase(disco.TestDISCODriver): - """Test cases to delete DISCO volumes.""" - - def setUp(self): - """Initialise variables and mock functions.""" - super(DeleteVolumeTestCase, self).setUp() - - # Mock volumeDelete function. - mock.patch.object(self.requester, - 'volumeDelete', - self.perform_disco_request).start() - - self.response = self.FAKE_RESPONSE['standard']['success'] - - def perform_disco_request(self, *cmd, **kwargs): - """Mock function to delete a volume.""" - return self.response - - def test_delete_volume(self): - """Delete a volume.""" - self.driver.delete_volume(self.volume) - - def test_delete_volume_fail(self): - """Make the API returns an error while deleting.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_delete_volume) diff --git a/cinder/tests/unit/volume/drivers/disco/test_extend_volume.py b/cinder/tests/unit/volume/drivers/disco/test_extend_volume.py deleted file mode 100644 index c1924ac3d..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_extend_volume.py +++ /dev/null @@ -1,50 +0,0 @@ -# (c) Copyright 2015 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test cases for the extend volume feature.""" - -import mock - -from cinder import exception -from cinder.tests.unit.volume.drivers import disco - - -class VolumeExtendTestCase(disco.TestDISCODriver): - """Test cases for DISCO connector.""" - - def setUp(self): - """Initialise variables and mock functions.""" - super(VolumeExtendTestCase, self).setUp() - - # Mock function to extend a volume. - mock.patch.object(self.requester, - 'volumeExtend', - self.perform_disco_request).start() - - self.response = self.FAKE_RESPONSE['standard']['success'] - self.new_size = 5 - - def perform_disco_request(self, *cmd, **kwargs): - """Mock volumExtend function from suds client.""" - return self.response - - def test_extend_volume(self): - """Extend a volume, normal case.""" - self.driver.extend_volume(self.volume, self.new_size) - - def test_extend_volume_fail(self): - """Request to DISCO failed.""" - self.response = self.FAKE_RESPONSE['standard']['fail'] - self.assertRaises(exception.VolumeBackendAPIException, - self.test_extend_volume) diff --git a/cinder/tests/unit/volume/drivers/disco/test_manage_existing.py b/cinder/tests/unit/volume/drivers/disco/test_manage_existing.py deleted file mode 100644 index c03669780..000000000 --- a/cinder/tests/unit/volume/drivers/disco/test_manage_existing.py +++ /dev/null @@ -1,133 +0,0 @@ -# (c) Copyright 2016 Industrial Technology Research Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test case for the function manage_existing.""" - -import mock - -from cinder import exception -from cinder.tests.unit.volume.drivers import disco - - -class ManageExistingTestCase(disco.TestDISCODriver): - """Test cases for Disco connector.""" - - def setUp(self): - """Initialize variables and mock functions.""" - super(ManageExistingTestCase, self).setUp() - - # Mock function to extract volume information by its ID - mock.patch.object(self.requester, - 'volumeDetail', - self.perform_disco_request).start() - - # Mock function to extract volume information by its Name - mock.patch.object(self.requester, - 'volumeDetailByName', - self.perform_disco_request).start() - - self.response = {'volumeInfoResult': { - 'volumeName': 'abcdefg', - 'volumeId': 1234567, - 'volSizeMb': 2 - }, - 'status': 0 - } - - self.existing_ref_no_identification = {} - self.existing_ref_with_id = {'source-id': 1234567} - self.existing_ref_with_name = {'source-name': 'abcdefg'} - self.existing_ref_no_identification = self.existing_ref_with_id - - def perform_disco_request(self, *args, **kwargs): - """Mock volumeDetail/volumeDetailByName function from rest client.""" - return self.response - - def call_manage_existing(self): - """Manage an existing volume.""" - self.driver.manage_existing( - self.volume, - self.existing_ref_no_identification) - - def test_manage_existing_no_identification(self): - """Manage an existing volume, no id/name.""" - self.existing_ref_no_identification = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.call_manage_existing) - - def test_manage_existing_case_id(self): - """Manage an existing volume, by its id.""" - expected = {'display_name': 'abcdefg'} - ret = self.driver.manage_existing(self.volume, - self.existing_ref_with_id) - actual = {'display_name': ret['display_name']} - self.assertEqual(expected, actual) - - def test_manage_existing_case_name(self): - """Manage an existing volume, by its name.""" - expected = {'provider_location': 1234567} - ret = self.driver.manage_existing(self.volume, - self.existing_ref_with_name) - actual = {'provider_location': ret['provider_location']} - self.assertEqual(expected, actual) - - def test_manage_existing_get_size(self): - """Get size of an existing volume.""" - self.driver.manage_existing_get_size( - self.volume, - self.existing_ref_no_identification) - - def test_manage_existing_get_size_no_identification(self): - """Error while getting size of an existing volume, no id/name.""" - self.existing_ref_no_identification = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.test_manage_existing_get_size) - - def test_manage_existing_get_size_case_id(self): - """Get size of an existing volume, by its id.""" - expected = 2 - ret = self.driver.manage_existing_get_size(self.volume, - self.existing_ref_with_id) - self.assertEqual(expected, ret) - - def test_manage_existing_get_size_case_name(self): - """Get size of an existing volume, by its name.""" - expected = 2 - ret = self.driver.manage_existing_get_size(self.volume, - self.existing_ref_with_name) - self.assertEqual(expected, ret) - - def test_manage_existing_case_id_fail(self): - """Request to DISCO failed.""" - self.response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_manage_existing_case_id) - - def test_manage_existing_case_name_fail(self): - """Request to DISCO failed.""" - self.response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_manage_existing_case_name) - - def test_manage_existing_get_size_case_id_fail(self): - """Request to DISCO failed.""" - self.response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_manage_existing_get_size_case_id) - - def test_manage_existing_get_size_case_name_fail(self): - """Request to DISCO failed.""" - self.response['status'] = 1 - self.assertRaises(exception.VolumeBackendAPIException, - self.test_manage_existing_get_size_case_name) diff --git a/cinder/tests/unit/volume/drivers/fusionstorage/__init__.py b/cinder/tests/unit/volume/drivers/fusionstorage/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/fusionstorage/test_dsware.py b/cinder/tests/unit/volume/drivers/fusionstorage/test_dsware.py deleted file mode 100644 index b9cc0e8f5..000000000 --- a/cinder/tests/unit/volume/drivers/fusionstorage/test_dsware.py +++ /dev/null @@ -1,785 +0,0 @@ -# Copyright (c) 2013 - 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for Huawei FusionStorage drivers. -""" - -import mock -from oslo_config import cfg -from oslo_service import loopingcall - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers.fusionstorage import dsware -from cinder.volume.drivers.fusionstorage import fspythonapi - - -test_volume = {'name': 'test_vol1', - 'size': 4, - 'volume_metadata': '', - 'host': 'host01@dsware', - 'instance_uuid': None, - 'provider_id': '127.0.0.1', - 'id': fake_constants.VOLUME_ID} - -test_src_volume = {'name': 'test_vol2', - 'size': 4, - 'status': 'available'} - -test_snapshot = { - 'name': 'test_snapshot1', - 'volume_id': fake_constants.VOLUME_ID, - 'volume_size': '4'} - - -class FakeDSWAREDriver(dsware.DSWAREDriver): - def __init__(self): - configuration = conf.Configuration( - [ - cfg.StrOpt('fake'), - ], - None - ) - super(FakeDSWAREDriver, self).__init__(configuration=configuration) - self.dsware_client = fspythonapi.FSPythonApi() - self.manage_ip = '127.0.0.1' - self.pool_type = '1' - - -class DSwareDriverTestCase(test.TestCase): - def setUp(self): - super(DSwareDriverTestCase, self).setUp() - self.driver = FakeDSWAREDriver() - self.context = context.get_admin_context() - self.volume = fake_volume.fake_volume_obj(context=self.context, - **test_volume) - self.scr_volume = fake_volume.fake_volume_obj(context=self.context, - **test_src_volume) - self.snapshot = fake_snapshot.fake_snapshot_obj(context=self.context, - **test_snapshot) - - def test_private_get_dsware_manage_ip(self): - retval = self.driver._get_dsware_manage_ip(self.volume) - self.assertEqual('127.0.0.1', retval) - - test_volume_fail_dict = {'name': 'test_vol', - 'size': 4, - 'volume_metadata': '', - 'host': 'host01@dsware', - 'provider_id': None} - test_volume_fail = fake_volume.fake_volume_obj(context=self.context, - **test_volume_fail_dict) - self.assertRaises(exception.CinderException, - self.driver._get_dsware_manage_ip, - test_volume_fail) - - def test_private_get_poolid_from_host(self): - retval = self.driver._get_poolid_from_host( - 'abc@fusionstorage_sas2copy#0') - self.assertEqual('0', retval) - - retval = self.driver._get_poolid_from_host( - 'abc@fusionstorage_sas2copy@0') - self.assertEqual(self.driver.pool_type, retval) - - retval = self.driver._get_poolid_from_host(None) - self.assertEqual(self.driver.pool_type, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'query_dsware_version') - @mock.patch.object(dsware.DSWAREDriver, '_get_poolid_from_host') - def test_private_create_volume_old_version(self, mock_get_poolid, - mock_query_dsware, - mock_create_volume): - # query_dsware_version return 1, old version - mock_query_dsware.return_value = 1 - mock_create_volume.return_value = 0 - self.driver._create_volume(self.volume.name, - self.volume.size, - True, - 'abc@fusionstorage_sas2copy') - mock_create_volume.assert_called_with(self.volume.name, 0, - self.volume.size, 1) - - self.driver._create_volume(self.volume.name, - self.volume.size, - False, - 'abc@fusionstorage_sas2copy') - mock_create_volume.assert_called_with(self.volume.name, 0, - self.volume.size, 0) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'query_dsware_version') - @mock.patch.object(dsware.DSWAREDriver, '_get_poolid_from_host') - def test_private_create_volume_new_version(self, mock_get_poolid, - mock_query_dsware, - mock_create_volume): - # query_dsware_version return 0, new version - mock_query_dsware.return_value = 0 - mock_get_poolid.return_value = 0 - mock_create_volume.return_value = 0 - self.driver._create_volume(self.volume.name, - self.volume.size, - True, - 'abcE@fusionstorage_sas2copy#0') - mock_create_volume.assert_called_with(self.volume.name, 0, - self.volume.size, 1) - - self.driver._create_volume(self.volume.name, - self.volume.size, - False, - 'abc@fusionstorage_sas2copy#0') - mock_create_volume.assert_called_with(self.volume.name, 0, - self.volume.size, 0) - - mock_query_dsware.return_value = 0 - mock_get_poolid.return_value = 1 - mock_create_volume.return_value = 0 - self.driver._create_volume(self.volume.name, - self.volume.size, - True, - 'abc@fusionstorage_sas2copy#1') - mock_create_volume.assert_called_with(self.volume.name, 1, - self.volume.size, 1) - - self.driver._create_volume(self.volume.name, - self.volume.size, - False, - 'abc@fusionstorage_sas2copy#1') - mock_create_volume.assert_called_with(self.volume.name, 1, - self.volume.size, 0) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'query_dsware_version') - @mock.patch.object(dsware.DSWAREDriver, '_get_poolid_from_host') - def test_private_create_volume_query_version_fail(self, mock_get_poolid, - mock_query_dsware, - mock_create_volume): - # query_dsware_version return 500015, query dsware version failed! - mock_query_dsware.return_value = 500015 - self.assertRaises(exception.CinderException, - self.driver._create_volume, - self.volume.name, - self.volume.size, - True, - 'abc@fusionstorage_sas2copy#0') - self.assertRaises(exception.CinderException, - self.driver._create_volume, - self.volume.name, - self.volume.size, - False, - 'abc@fusionstorage_sas2copy#0') - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'query_dsware_version') - @mock.patch.object(dsware.DSWAREDriver, '_get_poolid_from_host') - def test_private_create_volume_fail(self, mock_get_poolid, - mock_query_dsware, - mock_create_volume): - mock_query_dsware.return_value = 1 - # create_volume return 1, create volume failed - mock_create_volume.return_value = 1 - self.assertRaises(exception.CinderException, - self.driver._create_volume, - self.volume.name, - self.volume.size, - True, - 'abc@fusionstorage_sas2copy#0') - self.assertRaises(exception.CinderException, - self.driver._create_volume, - self.volume.name, - self.volume.size, - False, - 'abc@fusionstorage_sas2copy#0') - - @mock.patch.object(dsware.DSWAREDriver, '_create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'get_manage_ip') - def test_create_volume(self, mock_get_manage_ip, mock_create_volume): - # success - mock_get_manage_ip.return_value = self.driver.manage_ip - retval = self.driver.create_volume(self.volume) - self.assertEqual({"provider_id": self.driver.manage_ip}, - retval) - - # failure - mock_create_volume.side_effect = exception.CinderException( - 'DSWARE Create Volume failed!') - - self.assertRaises(exception.CinderException, - self.driver.create_volume, - self.volume) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume_from_snap') - def test_private_create_volume_from_snap(self, mock_create_volume): - mock_create_volume.side_effect = [0, 1] - self.driver._create_volume_from_snap(self.volume.name, - self.volume.size, - self.snapshot.name) - # failure - self.assertRaises(exception.CinderException, - self.driver._create_volume_from_snap, - self.volume.name, self.volume.size, - self.snapshot.name) - - @mock.patch.object(fspythonapi.FSPythonApi, 'extend_volume') - def test_extend_volume(self, mock_extend_volume): - mock_extend_volume.return_value = 0 - self.driver.extend_volume(self.volume, 5) - - mock_extend_volume.return_value = 0 - self.assertRaises(exception.CinderException, - self.driver.extend_volume, - self.volume, - 3) - - mock_extend_volume.return_value = 1 - self.assertRaises(exception.CinderException, - self.driver.extend_volume, - self.volume, - 5) - - @mock.patch.object(dsware.DSWAREDriver, '_create_volume_from_snap') - @mock.patch.object(fspythonapi.FSPythonApi, 'get_manage_ip') - def test_create_volume_from_snap(self, mock_manage_ip, mock_create_vol): - # success - mock_manage_ip.return_value = self.driver.manage_ip - retval = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - self.assertEqual({"provider_id": self.driver.manage_ip}, - retval) - - # failure - mock_create_vol.side_effect = exception.CinderException( - 'DSWARE:create volume from snap failed') - self.assertRaises(exception.CinderException, - self.driver.create_volume_from_snapshot, - self.volume, self.snapshot) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume_from_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'get_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, - '_wait_for_create_cloned_volume_finish_timer') - def test_create_cloned_volume(self, mock_wait_finish, - mock_get_manage_ip, mock_create_volume): - # success - mock_create_volume.return_value = None - mock_get_manage_ip.return_value = self.driver.manage_ip - mock_wait_finish.return_value = True - retval = self.driver.create_cloned_volume(self.volume, self.scr_volume) - self.assertEqual({"provider_id": "127.0.0.1"}, retval) - - # failure:create exception - mock_create_volume.return_value = 500015 - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - self.volume, self.scr_volume) - # failure:wait exception - mock_create_volume.return_value = None - mock_wait_finish.return_value = False - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - self.volume, self.scr_volume) - - @mock.patch.object(fspythonapi.FSPythonApi, 'query_volume') - def test_private_check_create_cloned_volume_finish(self, - mock_query_volume): - query_result_done = {'result': 0, 'vol_name': 'vol1', - 'father_name': 'vol1_father', 'status': '0', - 'vol_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'create_time': '01/01/2015'} - - query_result_doing = {'result': 0, 'vol_name': 'vol1', - 'father_name': 'vol1_father', 'status': '6', - 'vol_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'create_time': '01/01/2015'} - - mock_query_volume.side_effect = [ - query_result_done, query_result_doing, query_result_doing] - - # success - self.assertRaises(loopingcall.LoopingCallDone, - self.driver._check_create_cloned_volume_finish, - self.volume.name) - - # in the process of creating volume - self.driver.count = self.driver.configuration.clone_volume_timeout - 1 - self.driver._check_create_cloned_volume_finish(self.volume.name) - self.assertEqual(self.driver.configuration.clone_volume_timeout, - self.driver.count) - - # timeout - self.driver.count = self.driver.configuration.clone_volume_timeout - self.assertRaises(loopingcall.LoopingCallDone, - self.driver._check_create_cloned_volume_finish, - self.volume.name) - - @mock.patch.object(dsware.DSWAREDriver, - '_check_create_cloned_volume_finish') - def test_private_wait_for_create_cloned_volume_finish_timer(self, - mock_check): - mock_check.side_effect = [loopingcall.LoopingCallDone(retvalue=True), - loopingcall.LoopingCallDone(retvalue=False)] - retval = self.driver._wait_for_create_cloned_volume_finish_timer( - self.volume.name) - self.assertTrue(retval) - - retval = self.driver._wait_for_create_cloned_volume_finish_timer( - self.volume.name) - self.assertFalse(retval) - - def test_private_analyse_output(self): - out = 'ret_code=10\nret_desc=test\ndev_addr=/sda\n' - retval = self.driver._analyse_output(out) - self.assertEqual({'dev_addr': '/sda', - 'ret_desc': 'test', 'ret_code': '10'}, - retval) - - out = 'abcdefg' - retval = self.driver._analyse_output(out) - self.assertEqual({}, retval) - - def test_private_attach_volume(self): - success = ['ret_code=0\nret_desc=success\ndev_addr=/dev/sdb\n', ''] - failure = ['ret_code=50510011\nret_desc=failed\ndev_addr=/dev/sdb\n', - ''] - mock_execute = self.mock_object(self.driver, '_execute') - mock_execute.side_effect = [success, failure] - # attached successful - retval = self.driver._attach_volume(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'success', 'ret_code': '0'}, - retval) - # attached failure - retval = self.driver._attach_volume(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'failed', 'ret_code': '50510011'}, - retval) - - def test_private_detach_volume(self): - success = ['ret_code=0\nret_desc=success\ndev_addr=/dev/sdb\n', ''] - failure = ['ret_code=50510011\nret_desc=failed\ndev_addr=/dev/sdb\n', - ''] - mock_execute = self.mock_object(self.driver, '_execute') - mock_execute.side_effect = [success, failure] - # detached successful - retval = self.driver._detach_volume(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'success', 'ret_code': '0'}, - retval) - # detached failure - retval = self.driver._detach_volume(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'failed', - 'ret_code': '50510011'}, - retval) - - def test_private_query_volume_attach(self): - success = ['ret_code=0\nret_desc=success\ndev_addr=/dev/sdb\n', ''] - failure = ['ret_code=50510011\nret_desc=failed\ndev_addr=/dev/sdb\n', - ''] - mock_execute = self.mock_object(self.driver, '_execute') - mock_execute.side_effect = [success, failure] - # query successful - retval = self.driver._query_volume_attach(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'success', - 'ret_code': '0'}, - retval) - # query failure - retval = self.driver._query_volume_attach(self.volume.name, - self.driver.manage_ip) - self.assertEqual({'dev_addr': '/dev/sdb', - 'ret_desc': 'failed', - 'ret_code': '50510011'}, - retval) - - @mock.patch.object(dsware.DSWAREDriver, '_get_dsware_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, '_attach_volume') - @mock.patch.object(image_utils, 'fetch_to_raw') - @mock.patch.object(dsware.DSWAREDriver, '_detach_volume') - def test_copy_image_to_volume(self, mock_detach, mock_fetch, - mock_attach, mock_get_manage_ip): - success = {'ret_code': '0', - 'ret_desc': 'success', - 'dev_addr': '/dev/sdb'} - failure = {'ret_code': '50510011', - 'ret_desc': 'failed', - 'dev_addr': '/dev/sdb'} - context = '' - image_service = '' - image_id = '' - mock_get_manage_ip.return_value = '127.0.0.1' - mock_attach.side_effect = [success, failure, success] - mock_detach.side_effect = [success, failure, failure] - - # success - self.driver.copy_image_to_volume(context, self.volume, image_service, - image_id) - - # failure - attach failure - self.assertRaises(exception.CinderException, - self.driver.copy_image_to_volume, - context, self.volume, image_service, image_id) - - # failure - detach failure - self.assertRaises(exception.CinderException, - self.driver.copy_image_to_volume, - context, self.volume, image_service, image_id) - - @mock.patch.object(dsware.DSWAREDriver, '_get_dsware_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, '_attach_volume') - @mock.patch.object(dsware.DSWAREDriver, '_query_volume_attach') - @mock.patch.object(image_utils, 'upload_volume') - @mock.patch.object(dsware.DSWAREDriver, '_detach_volume') - def test_copy_volume_to_image_success(self, mock_detach, mock_upload, - mock_query, mock_attach, - mock_get_manage_ip): - success = {'ret_code': '0', - 'ret_desc': 'success', - 'dev_addr': '/dev/sdb'} - already_attached = {'ret_code': '50151401', - 'ret_desc': 'already_attached', - 'dev_addr': '/dev/sdb'} - context = '' - image_service = '' - image_meta = '' - - mock_get_manage_ip.return_value = '127.0.0.1' - mock_attach.return_value = success - mock_detach.return_value = success - self.driver.copy_volume_to_image(context, self.volume, image_service, - image_meta) - mock_upload.assert_called_with('', '', '', '/dev/sdb') - - mock_attach.return_value = already_attached - mock_query.return_value = success - mock_detach.return_value = success - self.driver.copy_volume_to_image(context, self.volume, image_service, - image_meta) - mock_upload.assert_called_with('', '', '', '/dev/sdb') - - @mock.patch.object(dsware.DSWAREDriver, '_get_dsware_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, '_attach_volume') - @mock.patch.object(dsware.DSWAREDriver, '_query_volume_attach') - @mock.patch.object(image_utils, 'upload_volume') - @mock.patch.object(dsware.DSWAREDriver, '_detach_volume') - def test_copy_volume_to_image_attach_fail(self, mock_detach, mock_upload, - mock_query, mock_attach, - mock_get_manage_ip): - failure = {'ret_code': '50510011', - 'ret_desc': 'failed', - 'dev_addr': '/dev/sdb'} - context = '' - image_service = '' - image_meta = '' - - mock_get_manage_ip.return_value = '127.0.0.1' - mock_attach.return_value = failure - self.assertRaises(exception.CinderException, - self.driver.copy_volume_to_image, - context, self.volume, image_service, image_meta) - mock_attach.return_value = None - self.assertRaises(exception.CinderException, - self.driver.copy_volume_to_image, - context, self.volume, image_service, image_meta) - - @mock.patch.object(dsware.DSWAREDriver, '_get_dsware_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, '_attach_volume') - @mock.patch.object(dsware.DSWAREDriver, '_query_volume_attach') - @mock.patch.object(image_utils, 'upload_volume') - @mock.patch.object(dsware.DSWAREDriver, '_detach_volume') - def test_copy_volume_to_image_query_attach_fail(self, mock_detach, - mock_upload, mock_query, - mock_attach, - mock_get_manage_ip): - already_attached = {'ret_code': '50151401', - 'ret_desc': 'already_attached', - 'dev_addr': '/dev/sdb'} - failure = {'ret_code': '50510011', - 'ret_desc': 'failed', - 'dev_addr': '/dev/sdb'} - context = '' - image_service = '' - image_meta = '' - - mock_get_manage_ip.return_value = '127.0.0.1' - mock_attach.return_value = already_attached - mock_query.return_value = failure - self.assertRaises(exception.CinderException, - self.driver.copy_volume_to_image, - context, self.volume, image_service, image_meta) - - mock_query.return_value = None - self.assertRaises(exception.CinderException, - self.driver.copy_volume_to_image, - context, self.volume, image_service, image_meta) - - @mock.patch.object(dsware.DSWAREDriver, '_get_dsware_manage_ip') - @mock.patch.object(dsware.DSWAREDriver, '_attach_volume') - @mock.patch.object(dsware.DSWAREDriver, '_query_volume_attach') - @mock.patch.object(image_utils, 'upload_volume') - @mock.patch.object(dsware.DSWAREDriver, '_detach_volume') - def test_copy_volume_to_image_upload_fail(self, mock_detach, mock_upload, - mock_query, mock_attach, - mock_get_manage_ip): - success = {'ret_code': '0', - 'ret_desc': 'success', - 'dev_addr': '/dev/sdb'} - already_attached = {'ret_code': '50151401', - 'ret_desc': 'already_attached', - 'dev_addr': '/dev/sdb'} - context = '' - image_service = '' - image_meta = '' - - mock_get_manage_ip.return_value = '127.0.0.1' - mock_attach.return_value = already_attached - mock_query.return_value = success - mock_upload.side_effect = exception.CinderException( - 'upload_volume error') - self.assertRaises(exception.CinderException, - self.driver.copy_volume_to_image, - context, self.volume, image_service, image_meta) - - @mock.patch.object(fspythonapi.FSPythonApi, 'query_volume') - def test_private_get_volume(self, mock_query): - result_success = {'result': 0} - result_not_exist = {'result': "50150005\n"} - result_exception = {'result': "50510006\n"} - - mock_query.side_effect = [ - result_success, result_not_exist, result_exception] - - retval = self.driver._get_volume(self.volume.name) - self.assertTrue(retval) - - retval = self.driver._get_volume(self.volume.name) - self.assertFalse(retval) - - self.assertRaises(exception.CinderException, - self.driver._get_volume, - self.volume.name) - - @mock.patch.object(fspythonapi.FSPythonApi, 'delete_volume') - def test_private_delete_volume(self, mock_delete): - result_success = 0 - result_not_exist = '50150005\n' - result_being_deleted = '50151002\n' - result_exception = '51050006\n' - - mock_delete.side_effect = [result_success, result_not_exist, - result_being_deleted, result_exception] - - retval = self.driver._delete_volume(self.volume.name) - self.assertTrue(retval) - - retval = self.driver._delete_volume(self.volume.name) - self.assertTrue(retval) - - retval = self.driver._delete_volume(self.volume.name) - self.assertTrue(retval) - - self.assertRaises(exception.CinderException, - self.driver._delete_volume, self.volume.name) - - @mock.patch.object(dsware.DSWAREDriver, '_get_volume') - @mock.patch.object(dsware.DSWAREDriver, '_delete_volume') - def test_delete_volume(self, mock_delete, mock_get): - mock_get.return_value = False - retval = self.driver.delete_volume(self.volume) - self.assertTrue(retval) - - mock_get.return_value = True - mock_delete.return_value = True - retval = self.driver.delete_volume(self.volume) - self.assertTrue(retval) - - mock_get.return_value = True - mock_delete.side_effect = exception.CinderException( - 'delete volume exception') - self.assertRaises(exception.CinderException, - self.driver.delete_volume, - self.volume) - - mock_get.side_effect = exception.CinderException( - 'get volume exception') - self.assertRaises(exception.CinderException, - self.driver.delete_volume, - self.volume) - - @mock.patch.object(fspythonapi.FSPythonApi, 'query_snap') - def test_private_get_snapshot(self, mock_query): - result_success = {'result': 0} - result_not_found = {'result': "50150006\n"} - result_exception = {'result': "51050007\n"} - mock_query.side_effect = [result_success, result_not_found, - result_exception] - - retval = self.driver._get_snapshot(self.snapshot.name) - self.assertTrue(retval) - - retval = self.driver._get_snapshot(self.snapshot.name) - self.assertFalse(retval) - - self.assertRaises(exception.CinderException, - self.driver._get_snapshot, - self.snapshot.name) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_snapshot') - def test_private_create_snapshot(self, mock_create): - mock_create.side_effect = [0, 1] - - self.driver._create_snapshot(self.snapshot.name, - self.volume.name) - - self.assertRaises(exception.CinderException, - self.driver._create_snapshot, - self.snapshot.name, self.volume.name) - - @mock.patch.object(fspythonapi.FSPythonApi, 'delete_snapshot') - def test_private_delete_snapshot(self, mock_delete): - mock_delete.side_effect = [0, 1] - - self.driver._delete_snapshot(self.snapshot.name) - - self.assertRaises(exception.CinderException, - self.driver._delete_snapshot, self.snapshot.name) - - @mock.patch.object(dsware.DSWAREDriver, '_get_volume') - @mock.patch.object(dsware.DSWAREDriver, '_create_snapshot') - def test_create_snapshot(self, mock_create, mock_get): - mock_get.return_value = True - self.driver.create_snapshot(self.snapshot) - - mock_create.side_effect = exception.CinderException( - 'create snapshot failed') - self.assertRaises(exception.CinderException, - self.driver.create_snapshot, self.snapshot) - - mock_get.side_effect = [ - False, exception.CinderException('get volume failed')] - self.assertRaises(exception.CinderException, - self.driver.create_snapshot, - self.snapshot) - self.assertRaises(exception.CinderException, - self.driver.create_snapshot, - self.snapshot) - - @mock.patch.object(dsware.DSWAREDriver, '_get_snapshot') - @mock.patch.object(dsware.DSWAREDriver, '_delete_snapshot') - def test_delete_snapshot(self, mock_delete, mock_get): - mock_get.side_effect = [True, False, exception.CinderException, True] - self.driver.delete_snapshot(self.snapshot) - self.driver.delete_snapshot(self.snapshot) - - self.assertRaises(exception.CinderException, - self.driver.delete_snapshot, - self.snapshot) - mock_delete.side_effect = exception.CinderException( - 'delete snapshot exception') - self.assertRaises(exception.CinderException, - self.driver.delete_snapshot, - self.snapshot) - - @mock.patch.object(fspythonapi.FSPythonApi, 'query_pool_info') - def test_private_update_single_pool_info_status(self, mock_query): - pool_info = {'result': 0, - 'pool_id': 10, - 'total_capacity': 10240, - 'used_capacity': 5120, - 'alloc_capacity': 7168} - pool_info_none = {'result': 1} - - mock_query.side_effect = [pool_info, pool_info_none] - - self.driver._update_single_pool_info_status() - self.assertEqual({'total_capacity_gb': 10.0, - 'free_capacity_gb': 5.0, - 'volume_backend_name': None, - 'vendor_name': 'Open Source', - 'driver_version': '1.0', - 'storage_protocol': 'dsware', - 'reserved_percentage': 0, - 'QoS_support': False}, - self.driver._stats) - - self.driver._update_single_pool_info_status() - self.assertIsNone(self.driver._stats) - - @mock.patch.object(fspythonapi.FSPythonApi, 'query_pool_type') - def test_private_update_multi_pool_of_same_type_status(self, mock_query): - query_result = (0, [{'result': 0, - 'pool_id': '0', - 'total_capacity': '10240', - 'used_capacity': '5120', - 'alloc_capacity': '7168'}]) - query_result_none = (0, []) - - mock_query.side_effect = [query_result, query_result_none] - - self.driver._update_multi_pool_of_same_type_status() - self.assertEqual({'volume_backend_name': None, - 'vendor_name': 'Open Source', - 'driver_version': '1.0', - 'storage_protocol': 'dsware', - 'pools': [{'pool_name': '0', - 'total_capacity_gb': 10.0, - 'allocated_capacity_gb': 5.0, - 'free_capacity_gb': 5.0, - 'QoS_support': False, - 'reserved_percentage': 0}]}, - self.driver._stats) - - self.driver._update_multi_pool_of_same_type_status() - self.assertIsNone(self.driver._stats) - - def test_private_calculate_pool_info(self): - pool_sets = [{'pool_id': 0, - 'total_capacity': 10240, - 'used_capacity': 5120, - 'QoS_support': False, - 'reserved_percentage': 0}] - retval = self.driver._calculate_pool_info(pool_sets) - self.assertEqual([{'pool_name': 0, - 'total_capacity_gb': 10.0, - 'allocated_capacity_gb': 5.0, - 'free_capacity_gb': 5.0, - 'QoS_support': False, - 'reserved_percentage': 0}], - retval) - - @mock.patch.object(dsware.DSWAREDriver, '_update_single_pool_info_status') - @mock.patch.object(dsware.DSWAREDriver, - '_update_multi_pool_of_same_type_status') - @mock.patch.object(fspythonapi.FSPythonApi, 'query_dsware_version') - def test_get_volume_stats(self, mock_query, mock_type, mock_info): - mock_query.return_value = 1 - - self.driver.get_volume_stats(False) - mock_query.assert_not_called() - - self.driver.get_volume_stats(True) - mock_query.assert_called_once_with() diff --git a/cinder/tests/unit/volume/drivers/fusionstorage/test_fspythonapi.py b/cinder/tests/unit/volume/drivers/fusionstorage/test_fspythonapi.py deleted file mode 100644 index 785334e5e..000000000 --- a/cinder/tests/unit/volume/drivers/fusionstorage/test_fspythonapi.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright (c) 2013 - 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for Huawei FusionStorage drivers. -""" - -import mock - -from cinder import test -from cinder import utils -from cinder.volume.drivers.fusionstorage import fspythonapi - - -class FSPythonApiTestCase(test.TestCase): - - def setUp(self): - super(FSPythonApiTestCase, self).setUp() - self.api = fspythonapi.FSPythonApi() - - @mock.patch.object(fspythonapi.FSPythonApi, 'get_ip_port') - @mock.patch.object(fspythonapi.FSPythonApi, 'get_manage_ip') - @mock.patch.object(utils, 'execute') - def test_start_execute_cmd(self, mock_execute, - mock_get_manage_ip, mock_get_ip_port): - result1 = ['result=0\ndesc=success\n', ''] - result2 = ['result=50150007\ndesc=volume does not exist\n', ''] - result3 = ['result=50150008\ndesc=volume is being deleted\n', ''] - result4 = ['result=50\ndesc=exception\n', ''] - cmd = 'abcdef' - - mock_get_ip_port.return_value = ['127.0.0.1', '128.0.0.1'] - mock_get_manage_ip.return_value = '127.0.0.1' - - mock_execute.return_value = result1 - retval = self.api.start_execute_cmd(cmd, 0) - self.assertEqual('result=0', retval) - - mock_execute.return_value = result2 - retval = self.api.start_execute_cmd(cmd, 0) - self.assertEqual('result=0', retval) - - mock_execute.return_value = result3 - retval = self.api.start_execute_cmd(cmd, 0) - self.assertEqual('result=0', retval) - - mock_execute.return_value = result4 - retval = self.api.start_execute_cmd(cmd, 0) - self.assertEqual('result=50', retval) - - mock_execute.return_value = result1 - retval = self.api.start_execute_cmd(cmd, 1) - self.assertEqual(['result=0', 'desc=success', ''], retval) - - mock_execute.return_value = result2 - retval = self.api.start_execute_cmd(cmd, 1) - self.assertEqual('result=0', retval) - - mock_execute.return_value = result3 - retval = self.api.start_execute_cmd(cmd, 1) - self.assertEqual('result=0', retval) - - mock_execute.return_value = result4 - retval = self.api.start_execute_cmd(cmd, 1) - self.assertEqual(['result=50', 'desc=exception', ''], retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_create_volume(self, mock_start_execute): - mock_start_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.create_volume('volume_name', 'pool_id-123', 1024, 0) - self.assertEqual(0, retval) - - retval = self.api.create_volume('volume_name', 'pool_id-123', 1024, 0) - self.assertEqual('50150007\n', retval) - - retval = self.api.create_volume('volume_name', 'pool_id-123', 1024, 0) - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_extend_volume(self, mock_start_execute): - mock_start_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.extend_volume('volume_name', 1024) - self.assertEqual(0, retval) - - retval = self.api.extend_volume('volume_name', 1024) - self.assertEqual('50150007\n', retval) - - retval = self.api.extend_volume('volume_name', 1024) - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_create_volume_from_snap(self, mock_start_execute): - mock_start_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.create_volume_from_snap('volume_name', 1024, - 'snap_name') - self.assertEqual(0, retval) - - retval = self.api.create_volume_from_snap('volume_name', 1024, - 'snap_name') - self.assertEqual('50150007\n', retval) - - retval = self.api.create_volume_from_snap('volume_name', 1024, - 'snap_name') - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_create_fullvol_from_snap(self, mock_start_execute): - mock_start_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.create_fullvol_from_snap('volume_name', 'snap_name') - self.assertEqual(0, retval) - - retval = self.api.create_fullvol_from_snap('volume_name', 'snap_name') - self.assertEqual('50150007\n', retval) - - retval = self.api.create_fullvol_from_snap('volume_name', 'snap_name') - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_snapshot') - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'delete_snapshot') - @mock.patch.object(fspythonapi.FSPythonApi, 'delete_volume') - @mock.patch.object(fspythonapi.FSPythonApi, 'create_fullvol_from_snap') - def test_create_volume_from_volume(self, mock_create_fullvol, - mock_delete_volume, mock_delete_snap, - mock_create_volume, mock_create_snap): - mock_create_snap.return_value = 0 - mock_create_volume.return_value = 0 - mock_create_fullvol.return_value = 0 - - retval = self.api.create_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(0, retval) - - mock_create_snap.return_value = 1 - retval = self.api.create_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(1, retval) - - mock_create_snap.return_value = 0 - mock_create_volume.return_value = 1 - retval = self.api.create_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(1, retval) - - mock_create_volume.return_value = 0 - self.api.create_fullvol_from_snap.return_value = 1 - retval = self.api.create_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'create_snapshot') - @mock.patch.object(fspythonapi.FSPythonApi, 'create_volume_from_snap') - def test_create_clone_volume_from_volume(self, mock_volume, mock_snap): - mock_snap.side_effect = [0, 1] - mock_volume.side_effect = [0, 1] - retval = self.api.create_clone_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(0, retval) - retval = self.api.create_clone_volume_from_volume('vol_name', 1024, - 'src_vol_name') - self.assertEqual(1, retval) - - def test_volume_info_analyze_success(self): - vol_info = ('vol_name=vol1,father_name=vol1_father,' - 'status=available,vol_size=1024,real_size=1024,' - 'pool_id=pool1,create_time=01/01/2015') - vol_info_res = {'result': 0, 'vol_name': 'vol1', - 'father_name': 'vol1_father', - 'status': 'available', 'vol_size': '1024', - 'real_size': '1024', 'pool_id': 'pool1', - 'create_time': '01/01/2015'} - - retval = self.api.volume_info_analyze(vol_info) - self.assertEqual(vol_info_res, retval) - - def test_volume_info_analyze_fail(self): - vol_info = '' - vol_info_res = {'result': 1, 'vol_name': '', 'father_name': '', - 'status': '', 'vol_size': '', 'real_size': '', - 'pool_id': '', 'create_time': ''} - retval = self.api.volume_info_analyze(vol_info) - self.assertEqual(vol_info_res, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - @mock.patch.object(fspythonapi.FSPythonApi, 'volume_info_analyze') - @mock.patch.object(fspythonapi.FSPythonApi, 'delete_snapshot') - def test_query_volume(self, mock_delete, mock_analyze, mock_execute): - exec_result = ['result=0\n', - 'vol_name=vol1,father_name=vol1_father,status=0,' + - 'vol_size=1024,real_size=1024,pool_id=pool1,' + - 'create_time=01/01/2015'] - query_result = {'result': 0, 'vol_name': 'vol1', - 'father_name': 'vol1_father', 'status': '0', - 'vol_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'create_time': '01/01/2015'} - mock_delete.return_value = 0 - mock_execute.return_value = exec_result - mock_analyze.return_value = query_result - retval = self.api.query_volume('vol1') - self.assertEqual(query_result, retval) - - exec_result = ['result=0\n', - 'vol_name=vol1,father_name=vol1_father,status=1,' + - 'vol_size=1024,real_size=1024,pool_id=pool1,' + - 'create_time=01/01/2015'] - query_result = {'result': 0, 'vol_name': 'vol1', - 'father_name': 'vol1_father', 'status': '1', - 'vol_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'create_time': '01/01/2015'} - mock_delete.return_value = 0 - mock_execute.return_value = exec_result - mock_analyze.return_value = query_result - retval = self.api.query_volume('vol1') - self.assertEqual(query_result, retval) - - vol_info_failure = 'result=32500000\n' - failure_res = {'result': 1, 'vol_name': '', 'father_name': '', - 'status': '', 'vol_size': '', 'real_size': '', - 'pool_id': '', 'create_time': ''} - mock_execute.return_value = vol_info_failure - retval = self.api.query_volume('vol1') - self.assertEqual(failure_res, retval) - - vol_info_failure = None - failure_res = {'result': 1, 'vol_name': '', 'father_name': '', - 'status': '', 'vol_size': '', 'real_size': '', - 'pool_id': '', 'create_time': ''} - - mock_execute.return_value = vol_info_failure - retval = self.api.query_volume('vol1') - self.assertEqual(failure_res, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_delete_volume(self, mock_execute): - mock_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.delete_volume('volume_name') - self.assertEqual(0, retval) - - retval = self.api.delete_volume('volume_name') - self.assertEqual('50150007\n', retval) - - retval = self.api.delete_volume('volume_name') - self.assertEqual(1, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_create_snapshot(self, mock_execute): - mock_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.create_snapshot('snap_name', 'vol_name', 0) - self.assertEqual(0, retval) - - retval = self.api.create_snapshot('snap_name', 'vol_name', 0) - self.assertEqual('50150007\n', retval) - - retval = self.api.create_snapshot('snap_name', 'vol_name', 0) - self.assertEqual(1, retval) - - def test_snap_info_analyze_success(self): - snap_info = ('snap_name=snap1,father_name=snap1_father,status=0,' - 'snap_size=1024,real_size=1024,pool_id=pool1,' - 'delete_priority=1,create_time=01/01/2015') - snap_info_res = {'result': 0, 'snap_name': 'snap1', - 'father_name': 'snap1_father', 'status': '0', - 'snap_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'delete_priority': '1', - 'create_time': '01/01/2015'} - - retval = self.api.snap_info_analyze(snap_info) - self.assertEqual(snap_info_res, retval) - - def test_snap_info_analyze_fail(self): - snap_info = '' - snap_info_res = {'result': 1, 'snap_name': '', 'father_name': '', - 'status': '', 'snap_size': '', 'real_size': '', - 'pool_id': '', 'delete_priority': '', - 'create_time': ''} - retval = self.api.snap_info_analyze(snap_info) - self.assertEqual(snap_info_res, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_query_snap(self, mock_execute): - exec_result = ['result=0\n', - 'snap_name=snap1,father_name=snap1_father,status=0,' + - 'snap_size=1024,real_size=1024,pool_id=pool1,' + - 'delete_priority=1,create_time=01/01/2015'] - query_result = {'result': 0, 'snap_name': 'snap1', - 'father_name': 'snap1_father', 'status': '0', - 'snap_size': '1024', 'real_size': '1024', - 'pool_id': 'pool1', 'delete_priority': '1', - 'create_time': '01/01/2015'} - mock_execute.return_value = exec_result - retval = self.api.query_snap('snap1') - self.assertEqual(query_result, retval) - - exec_result = ['result=50150007\n'] - qurey_result = {'result': '50150007\n', 'snap_name': '', - 'father_name': '', 'status': '', 'snap_size': '', - 'real_size': '', 'pool_id': '', - 'delete_priority': '', 'create_time': ''} - mock_execute.return_value = exec_result - retval = self.api.query_snap('snap1') - self.assertEqual(qurey_result, retval) - - exec_result = '' - query_result = {'result': 1, 'snap_name': '', 'father_name': '', - 'status': '', 'snap_size': '', 'real_size': '', - 'pool_id': '', 'delete_priority': '', - 'create_time': ''} - mock_execute.return_value = exec_result - retval = self.api.query_snap('snap1') - self.assertEqual(query_result, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_delete_snapshot(self, mock_execute): - mock_execute.side_effect = ['result=0\n', - 'result=50150007\n', None] - - retval = self.api.delete_snapshot('snap_name') - self.assertEqual(0, retval) - - retval = self.api.delete_snapshot('snap_name') - self.assertEqual('50150007\n', retval) - - retval = self.api.delete_snapshot('snap_name') - self.assertEqual(1, retval) - - def test_pool_info_analyze(self): - pool_info = 'pool_id=pool100,total_capacity=1024,' + \ - 'used_capacity=500,alloc_capacity=500' - analyze_res = {'result': 0, 'pool_id': 'pool100', - 'total_capacity': '1024', 'used_capacity': '500', - 'alloc_capacity': '500'} - - retval = self.api.pool_info_analyze(pool_info) - self.assertEqual(analyze_res, retval) - - pool_info = '' - analyze_res = {'result': 1, 'pool_id': '', 'total_capacity': '', - 'used_capacity': '', 'alloc_capacity': ''} - retval = self.api.pool_info_analyze(pool_info) - self.assertEqual(analyze_res, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_query_pool_info(self, mock_execute): - exec_result = ['result=0\n', - 'pool_id=0,total_capacity=1024,' + - 'used_capacity=500,alloc_capacity=500\n'] - query_result = {'result': 0, 'pool_id': '0', - 'total_capacity': '1024', 'used_capacity': '500', - 'alloc_capacity': '500'} - mock_execute.return_value = exec_result - retval = self.api.query_pool_info('0') - self.assertEqual(query_result, retval) - - exec_result = ['result=51050008\n'] - query_result = {'result': '51050008\n', 'pool_id': '', - 'total_capacity': '', 'used_capacity': '', - 'alloc_capacity': ''} - mock_execute.return_value = exec_result - retval = self.api.query_pool_info('0') - self.assertEqual(query_result, retval) - - exec_result = '' - query_result = {'result': 1, 'pool_id': '', 'total_capacity': '', - 'used_capacity': '', 'alloc_capacity': ''} - mock_execute.return_value = exec_result - retval = self.api.query_pool_info('0') - self.assertEqual(query_result, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_query_pool_type(self, mock_execute): - exec_result = ['result=0\n', - 'pool_id=0,total_capacity=1024,' + - 'used_capacity=500,alloc_capacity=500\n'] - query_result = (0, [{'result': 0, - 'pool_id': '0', 'total_capacity': '1024', - 'used_capacity': '500', 'alloc_capacity': '500'}]) - - mock_execute.return_value = exec_result - retval = self.api.query_pool_type('sata2copy') - self.assertEqual(query_result, retval) - - exec_result = ['result=0\n', - 'pool_id=0,total_capacity=1024,' + - 'used_capacity=500,alloc_capacity=500\n', - 'pool_id=1,total_capacity=2048,' + - 'used_capacity=500,alloc_capacity=500\n'] - query_result = (0, [{'result': 0, 'pool_id': '0', - 'total_capacity': '1024', 'used_capacity': '500', - 'alloc_capacity': '500'}, - {'result': 0, 'pool_id': '1', - 'total_capacity': '2048', 'used_capacity': '500', - 'alloc_capacity': '500'}]) - mock_execute.return_value = exec_result - retval = self.api.query_pool_type('sata2copy') - self.assertEqual(query_result, retval) - - exec_result = ['result=51010015\n'] - query_result = (51010015, []) - mock_execute.return_value = exec_result - retval = self.api.query_pool_type('sata2copy') - self.assertEqual(query_result, retval) - - exec_result = '' - query_result = (0, []) - mock_execute.return_value = exec_result - retval = self.api.query_pool_type('sata2copy') - self.assertEqual(query_result, retval) - - @mock.patch.object(fspythonapi.FSPythonApi, 'start_execute_cmd') - def test_query_dsware_version(self, mock_execute): - mock_execute.side_effect = ['result=0\n', 'result=50500001\n', - 'result=50150007\n', None] - - retval = self.api.query_dsware_version() - self.assertEqual(0, retval) - - retval = self.api.query_dsware_version() - self.assertEqual(1, retval) - - retval = self.api.query_dsware_version() - self.assertEqual('50150007\n', retval) - - retval = self.api.query_dsware_version() - self.assertEqual(2, retval) diff --git a/cinder/tests/unit/volume/drivers/hitachi/__init__.py b/cinder/tests/unit/volume/drivers/hitachi/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py deleted file mode 100644 index d8117d774..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_horcm_fc.py +++ /dev/null @@ -1,1034 +0,0 @@ -# Copyright (C) 2014, 2015, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_fc -from cinder.volume.drivers.hitachi import hbsd_horcm - - -def _exec_raidcom(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_vals.get(args) - - -def _exec_raidcom_get_ldev_no_stdout(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_stdout.get(args) - - -def _exec_raidcom_get_ldev_no_nml(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_nml.get(args) - - -def _exec_raidcom_get_ldev_no_open_v(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_open_v.get(args) - - -def _exec_raidcom_get_ldev_no_hdp(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_no_hdp.get(args) - - -def _exec_raidcom_get_ldev_pair(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_pair.get(args) - - -def _exec_raidcom_get_ldev_permit(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_permit.get(args) - - -def _exec_raidcom_get_ldev_invalid_size(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_invalid_size.get(args) - - -def _exec_raidcom_get_ldev_num_port(*args, **kargs): - return HBSDHORCMFCDriverTest.horcm_get_ldev_num_port.get(args) - - -class HBSDHORCMFCDriverTest(test.TestCase): - """Test HBSDHORCMFCDriver.""" - - raidqry_result = "DUMMY\n\ -Ver&Rev: 01-31-03/06" - - raidcom_get_host_grp_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 None -\n\ -CL1-A 1 - None -" - - raidcom_get_result = "LDEV : 0\n\ -VOL_TYPE : OPEN-V-CVS\n\ -LDEV : 1\n\ -VOL_TYPE : NOT DEFINED" - - raidcom_get_result2 = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - raidcom_get_result3 = "Serial# : 210944\n\ -LDEV : 0\n\ -SL : 0\n\ -CL : 0\n\ -VOL_TYPE : NOT DEFINED\n\ -VOL_Capacity(BLK) : 2098560\n\ -NUM_LDEV : 1\n\ -LDEVs : 0\n\ -NUM_PORT : 3\n\ -PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ -F_POOLID : NONE\n\ -VOL_ATTR : CVS\n\ -RAID_LEVEL : RAID5\n\ -RAID_TYPE : 3D+1P\n\ -NUM_GROUP : 1\n\ -RAID_GROUPs : 01-01\n\ -DRIVE_TYPE : DKR5C-J600SS\n\ -DRIVE_Capa : 1143358736\n\ -LDEV_NAMING : test\n\ -STS : NML\n\ -OPE_TYPE : NONE\n\ -OPE_RATE : 100\n\ -MP# : 0\n\ -SSID : 0004" - - raidcom_get_command_status_result = "HANDLE SSB1 SSB2 ERR_CNT\ - Serial# Description\n\ -00d4 - - 0 210944 -" - - raidcom_get_result4 = "Serial# : 210944\n\ -LDEV : 0\n\ -SL : 0\n\ -CL : 0\n\ -VOL_TYPE : DEFINED\n\ -VOL_Capacity(BLK) : 2098560\n\ -NUM_LDEV : 1\n\ -LDEVs : 0\n\ -NUM_PORT : 3\n\ -PORTs : CL3-A-41 42 R7000001 : CL8-B-20 8 R7000000 : CL6-A-10 25 R7000000\n\ -F_POOLID : NONE\n\ -VOL_ATTR : CVS\n\ -RAID_LEVEL : RAID5\n\ -RAID_TYPE : 3D+1P\n\ -NUM_GROUP : 1\n\ -RAID_GROUPs : 01-01\n\ -DRIVE_TYPE : DKR5C-J600SS\n\ -DRIVE_Capa : 1143358736\n\ -LDEV_NAMING : test\n\ -STS : NML\n\ -OPE_TYPE : NONE\n\ -OPE_RATE : 100\n\ -MP# : 0\n\ -SSID : 0004" - - raidcom_get_copy_grp_result = "DUMMY\n\ -HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31P - - None\n\ -HBSD-127.0.0.1None1A31 HBSD-127.0.0.1None1A31S - - None" - - raidcom_get_device_grp_result1 = "DUMMY\n\ -HBSD-127.0.0.1None1A31P HBSD-ldev-0-2 0 None" - - raidcom_get_device_grp_result2 = "DUMMY\n\ -HBSD-127.0.0.1None1A31S HBSD-ldev-0-2 2 None" - - raidcom_get_snapshot_result = "DUMMY\n\ -HBSD-sanp P-VOL PSUS None 0 3 3 18 100 G--- 53ee291f\n\ -HBSD-sanp P-VOL PSUS None 0 4 4 18 100 G--- 53ee291f" - - raidcom_dp_pool_result = "DUMMY \n\ -030 POLN 0 6006 6006 75 80 1 14860 32 167477" - - raidcom_port_result = "DUMMY\n\ -CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 None 50060E801053C2E0 -" - - raidcom_port_result2 = "DUMMY\n\ -CL1-A 12345678912345aa None -\n\ -CL1-A 12345678912345bb None -" - - raidcom_host_grp_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 None LINUX/IRIX" - - raidcom_hba_wwn_result = "DUMMY\n\ -CL1-A 0 HBSD-127.0.0.1 12345678912345aa None -" - - raidcom_get_lun_result = "DUMMY\n\ -CL1-A 0 LINUX/IRIX 254 1 5 - None" - - pairdisplay_result = "DUMMY\n\ -HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 L CL1-A-0 0 0 0 None 0 P-VOL PSUS None 2\ - -\n\ -HBSD-127.0.0.1None1A31 HBSD-ldev-0-2 R CL1-A-0 0 0 0 None 2 S-VOL SSUS - 0 -" - - pairdisplay_result2 = "DUMMY\n\ -HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 L CL1-A-1 0 0 0 None 1 P-VOL PAIR None 1\ - -\n\ -HBSD-127.0.0.1None1A30 HBSD-ldev-1-1 R CL1-A-1 0 0 0 None 1 S-VOL PAIR - 1 -" - - horcm_vals = { - ('raidqry', u'-h'): - [0, "%s" % raidqry_result, ""], - ('raidcom', '-login user pasword'): - [0, "", ""], - ('raidcom', u'get host_grp -port CL1-A -key host_grp'): - [0, "%s" % raidcom_get_host_grp_result, ""], - ('raidcom', u'add host_grp -port CL1-A-1 -host_grp_name HBSD-pair00'): - [0, "", ""], - ('raidcom', - u'add host_grp -port CL1-A-1 -host_grp_name HBSD-127.0.0.2'): - [0, "", ""], - ('raidcom', u'delete host_grp -port CL1-A-1 HBSD-127.0.0.2'): - [1, "", ""], - ('raidcom', 'get ldev -ldev_id 0 -cnt 2'): - [0, "%s" % raidcom_get_result, ""], - ('raidcom', - 'add ldev -pool 30 -ldev_id 1 -capacity 128G -emulation OPEN-V'): - [0, "", ""], - ('raidcom', - 'add ldev -pool 30 -ldev_id 1 -capacity 256G -emulation OPEN-V'): - [1, "", "SSB=0x2E22,0x0001"], - ('raidcom', 'get command_status'): - [0, "%s" % raidcom_get_command_status_result, ""], - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_result2, ""], - ('raidcom', 'get ldev -ldev_id 1 -check_status NML -time 120'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 0'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 1'): - [0, "%s" % raidcom_get_snapshot_result, ""], - ('raidcom', 'get snapshot -ldev_id 2'): - [0, "", ""], - ('raidcom', 'get snapshot -ldev_id 3'): - [0, "", ""], - ('raidcom', 'get copy_grp'): - [0, "%s" % raidcom_get_copy_grp_result, ""], - ('raidcom', 'delete ldev -ldev_id 0'): - [0, "", ""], - ('raidcom', 'delete ldev -ldev_id 1'): - [0, "", ""], - ('raidcom', 'delete ldev -ldev_id 2'): - [1, "", "error"], - ('raidcom', 'delete ldev -ldev_id 3'): - [1, "", "SSB=0x2E20,0x0000"], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30P'): - [0, "", ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A30S'): - [0, "", ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31P'): - [0, "%s" % raidcom_get_device_grp_result1, ""], - ('raidcom', 'get device_grp -device_grp_name HBSD-127.0.0.1None1A31S'): - [0, "%s" % raidcom_get_device_grp_result2, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -CLI'): - [0, "", ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -CLI'): - [0, "", ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -CLI'): - [0, "%s" % pairdisplay_result, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -CLI'): - [0, "%s" % pairdisplay_result, ""], - ('pairdisplay', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -CLI'): - [0, "%s" % pairdisplay_result2, ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ -HBSD-ldev-0-1 -ldev_id 0'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ -HBSD-ldev-0-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30P \ -HBSD-ldev-1-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add device_grp -device_grp_name HBSD-127.0.0.1None1A30S \ -HBSD-ldev-1-1 -ldev_id 1'): - [0, "", ""], - ('raidcom', - 'add copy_grp -copy_grp_name HBSD-127.0.0.1None1A30 \ -HBSD-127.0.0.1None1A30P HBSD-127.0.0.1None1A30S -mirror_id 0'): - [0, "", ""], - ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 \ --split -fq quick -c 3 -vl'): - [0, "", ""], - ('paircreate', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 \ --split -fq quick -c 3 -vl'): - [0, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowait'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-0-1 -nowaits'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowait'): - [1, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -nowaits'): - [1, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowait'): - [4, "", ""], - ('pairevtwait', '-g HBSD-127.0.0.1None1A30 -d HBSD-ldev-1-1 -nowaits'): - [200, "", ""], - ('pairsplit', '-g HBSD-127.0.0.1None1A31 -d HBSD-ldev-0-2 -S'): - [0, "", ""], - ('raidcom', 'extend ldev -ldev_id 0 -capacity 128G'): - [0, "", ""], - ('raidcom', 'get dp_pool'): - [0, "%s" % raidcom_dp_pool_result, ""], - ('raidcom', 'get port'): - [0, "%s" % raidcom_port_result, ""], - ('raidcom', 'get port -port CL1-A'): - [0, "%s" % raidcom_port_result2, ""], - ('raidcom', 'get host_grp -port CL1-A'): - [0, "%s" % raidcom_host_grp_result, ""], - ('raidcom', 'get hba_wwn -port CL1-A-0'): - [0, "%s" % raidcom_hba_wwn_result, ""], - ('raidcom', 'get hba_wwn -port CL1-A-1'): - [0, "", ""], - ('raidcom', 'add hba_wwn -port CL1-A-0 -hba_wwn 12345678912345bb'): - [0, "", ""], - ('raidcom', 'add hba_wwn -port CL1-A-1 -hba_wwn 12345678912345bb'): - [1, "", ""], - ('raidcom', u'get lun -port CL1-A-0'): - [0, "%s" % raidcom_get_lun_result, ""], - ('raidcom', u'get lun -port CL1-A-1'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-0 -ldev_id 0 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-0 -ldev_id 1 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-1 -ldev_id 0 -lun_id 0'): - [0, "", ""], - ('raidcom', u'add lun -port CL1-A-1 -ldev_id 1 -lun_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-0 -ldev_id 1'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 0'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 2'): - [0, "", ""], - ('raidcom', u'delete lun -port CL1-A-1 -ldev_id 1'): - [1, "", ""]} - - horcm_get_ldev_no_stdout = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "", ""]} - - raidcom_get_ldev_no_nml = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS :" - - horcm_get_ldev_no_nml = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_nml, ""]} - - raidcom_get_ldev_no_open_v = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_no_open_v = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_open_v, ""]} - - raidcom_get_ldev_no_hdp = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS :\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_no_hdp = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_no_hdp, ""]} - - raidcom_get_ldev_pair = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : HORC : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_pair = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_pair, ""]} - - raidcom_get_ldev_permit = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : XXX : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_permit = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_permit, ""]} - - raidcom_get_ldev_invalid_size = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097151\n\ -NUM_PORT : 0\n\ -STS : NML" - - horcm_get_ldev_invalid_size = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_invalid_size, ""]} - - raidcom_get_ldev_num_port = "DUMMY\n\ -LDEV : 1\n\ -DUMMY\n\ -DUMMY\n\ -VOL_TYPE : OPEN-V-CVS\n\ -VOL_ATTR : CVS : HDP\n\ -VOL_Capacity(BLK) : 2097152\n\ -NUM_PORT : 1\n\ -STS : NML" - - horcm_get_ldev_num_port = { - ('raidcom', 'get ldev -ldev_id 1'): - [0, "%s" % raidcom_get_ldev_num_port, ""]} - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '0', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'status': 'creating'} - - test_volume_error2 = {'name': 'test_volume2', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_volume_error5 = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume', - 'provider_location': '1', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '0', 'status': 'available'} - - test_snapshot_error = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': _VOLUME, - 'provider_location': '2', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - SERIAL_NUM = '210944' - test_existing_ref = {'ldev': '1', 'serial_number': SERIAL_NUM} - test_existing_none_ldev_ref = {'ldev': None, - 'serial_number': SERIAL_NUM} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', - 'serial_number': SERIAL_NUM} - test_existing_no_ldev_ref = {'serial_number': SERIAL_NUM} - test_existing_none_serial_ref = {'ldev': '1', 'serial_number': None} - test_existing_invalid_serial_ref = {'ldev': '1', 'serial_number': '999999'} - test_existing_no_serial_ref = {'ldev': '1'} - - def __init__(self, *args, **kwargs): - super(HBSDHORCMFCDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(utils, 'brick_get_connector_properties', - return_value={'ip': '127.0.0.1', - 'wwpns': ['12345678912345aa']}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(utils, 'execute', - return_value=['%s' % raidqry_result, '']) - def setUp(self, arg1, arg2, arg3, arg4): - super(HBSDHORCMFCDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - self.driver.check_param() - self.driver.common.pair_flock = hbsd_basiclib.NopLock() - self.driver.common.command = hbsd_horcm.HBSDHORCM(self.configuration) - self.driver.common.command.horcmgr_flock = hbsd_basiclib.NopLock() - self.driver.common.create_lock_file() - self.driver.common.command.connect_storage() - self.driver.max_hostgroups = \ - self.driver.common.command.get_max_hostgroups() - self.driver.add_hostgroup() - self.driver.output_param_to_log() - self.driver.do_setup_status.set() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_thin_pool_id = 31 - self.configuration.hitachi_target_ports = "CL1-A" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = "None" - self.configuration.hitachi_unit_name = None - self.configuration.hitachi_group_request = True - self.configuration.hitachi_group_range = None - self.configuration.hitachi_zoning_request = False - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = "0-1" - self.configuration.hitachi_default_copy_method = 'FULL' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_async_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - self.configuration.hitachi_horcm_add_conf = True - self.configuration.hitachi_horcm_numbers = "409,419" - self.configuration.hitachi_horcm_user = "user" - self.configuration.hitachi_horcm_password = "pasword" - self.configuration.hitachi_horcm_resource_lock_timeout = 600 - - def _setup_driver(self): - self.driver = hbsd_fc.HBSDFCDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDError, self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_extend_volume(self, arg1, arg2, arg3, arg4): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_extend_volume_error(self, arg1, arg2, arg3, arg4): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_volume(self, arg1, arg2, arg3, arg4): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_volume_error(self, arg1, arg2, arg3, arg4): - """test delete_volume.""" - self.driver.delete_volume(self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5, arg6, - arg7): - """test create_snapshot.""" - ret = self.driver.create_volume(self.test_volume) - ret = self.driver.create_snapshot(self.test_snapshot_error) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_snapshot(self, arg1, arg2, arg3, arg4): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_delete_snapshot_error(self, arg1, arg2, arg3, arg4): - """test delete_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_snapshot, - self.test_snapshot_error) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self.test_volume, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3, arg4, - arg5): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error5, self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4, arg5, arg6): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume, - self._VOLUME) - self.assertEqual('1', vol['provider_location']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume_larger_size(self, arg1, arg2, arg3, arg4, - arg5, arg6, arg7): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertEqual('1', vol['provider_location']) - arg5.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4, arg5, - arg6): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_cloned_volume, - self.test_volume, self.test_volume_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], - 'ip': '127.0.0.1'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - self.assertEqual(0, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': ['12345678912345bb'], 'ip': '127.0.0.2'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_terminate_connection(self, arg1, arg2): - """test terminate connection.""" - connector = {'wwpns': ['12345678912345aa', '12345678912345bb'], - 'ip': '127.0.0.1'} - rc = self.driver.terminate_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_terminate_connection_error(self, arg1, arg2): - """test terminate connection.""" - connector = {'ip': '127.0.0.1'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_manage_existing(self, arg1, arg2): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(1, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_serial_ref(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_serial_ref(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_serial_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_serial_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'start_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'check_horcm', - return_value=[0, "", ""]) - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_unmanage(self, arg1, arg2, arg3, arg4): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_stdout) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_stdout(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_nml) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_nml(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_open_v) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_open_v(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_no_hdp) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_no_hdp(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_pair) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_pair(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_permit) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_permit(self, arg1, arg2, arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_invalid_size) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_invalid_size(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_horcm.HBSDHORCM, 'exec_raidcom', - side_effect=_exec_raidcom_get_ldev_num_port) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_get_ldev_num_port(self, arg1, arg2, - arg3): - self.configuration.hitachi_serial_number = self.SERIAL_NUM - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - def test_invalid_resource_lock_timeout_below_limit(self): - self.configuration.hitachi_horcm_resource_lock_timeout = -1 - self.assertRaises(exception.HBSDError, self.driver.check_param) - - def test_invalid_resource_lock_timeout_over_limit(self): - self.configuration.hitachi_horcm_resource_lock_timeout = 7201 - self.assertRaises(exception.HBSDError, self.driver.check_param) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py deleted file mode 100644 index 147d6ba33..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_fc.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_fc -from cinder.volume.drivers.hitachi import hbsd_snm2 - - -def _exec_hsnm(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_vals.get(args) - - -def _exec_hsnm_get_lu_ret_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_ret_err.get(args) - - -def _exec_hsnm_get_lu_vol_type_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_vol_type_err.get(args) - - -def _exec_hsnm_get_lu_dppool_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_dppool_err.get(args) - - -def _exec_hsnm_get_lu_size_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_size_err.get(args) - - -def _exec_hsnm_get_lu_num_port_err(*args, **kargs): - return HBSDSNM2FCDriverTest.hsnm_get_lu_num_port_err.get(args) - - -class HBSDSNM2FCDriverTest(test.TestCase): - """Test HBSDSNM2FCDriver.""" - - audppool_result = " DP RAID \ - Current Utilization Current Over Replication\ - Available Current Replication Rotational \ - \ - Stripe \ - Needing Preparation\n\ - Pool Tier Mode Level Total Capacity Consumed Capacity \ - Percent Provisioning Percent Capacity \ -Utilization Percent Type Speed Encryption Status \ - \ -Reconstruction Progress Size Capacity\n\ - 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ - 1% 24835% 532.0 GB \ - 1% SAS 10000rpm N/A Normal \ - N/A \ - 256KB 0.0 GB" - - aureplicationlocal_result = "Pair Name LUN Pair \ -LUN Status Copy Type Group \ - Point-in-Time MU Number\n\ - 0 10 0 Split( 99%) \ - ShadowImage ---:Ungrouped N/A\ - " - - auluref_result = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - auluref_result1 = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" - - auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ -Name Port Name Host Group\n\ -HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ - Assigned WWN\n Name Port Name \ -Host Group\n abcdefg 10000000C97BCE7A \ -001:HBSD-01" - - aufibre1_result = "Port Information\n\ - Port Address\n CTL Port\ - Node Name Port Name Setting Current\n 0 0 \ -50060E801053C2E0 50060E801053C2E0 0000EF 272700" - - auhgmap_result = "Mapping Mode = ON\nPort Group \ - H-LUN LUN\n 00 001:HBSD-00 0 1000" - - hsnm_vals = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, "", ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): - [1, "", ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -refer -pvol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -refer -svol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, "", ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, "", ""], - ('auludel', '-unit None -lu 1 -f'): [0, 0, ""], - ('auludel', '-unit None -lu 3 -f'): [1, 0, ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, 0, ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], - ('auhgmap', '-unit None -add 0 0 1 1 1'): [0, 0, ""], - ('auhgwwn', '-unit None -refer'): [0, "%s" % auhgwwn_result, ""], - ('aufibre1', '-unit None -refer'): [0, "%s" % aufibre1_result, ""], - ('auhgmap', '-unit None -refer'): [0, "%s" % auhgmap_result, ""]} - - auluref_ret_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - hsnm_get_lu_ret_err = { - ('auluref', '-unit None -lu 0'): [1, "%s" % auluref_ret_err, ""], - } - - auluref_vol_type_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 DUMMY" - - hsnm_get_lu_vol_type_err = { - ('auluref', '-unit None -lu 0'): - [0, "%s" % auluref_vol_type_err, ""], - } - - auluref_dppool_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 N/A Enable 0 Normal" - - hsnm_get_lu_dppool_err = { - ('auluref', '-unit None -lu 0'): - [0, "%s" % auluref_dppool_err, ""], - } - - auluref_size_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097151 blocks 256KB N/A 0 Enable 0 Normal" - hsnm_get_lu_size_err = { - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_size_err, ""], - } - - auluref_num_port_err = "Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 1 Normal" - - hsnm_get_lu_num_port_err = { - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_num_port_err, ""], - } - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '1', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '3', 'status': 'available'} - - test_volume_error1 = {'name': 'test_volume_error', 'size': 128, - 'id': 'test-volume-error', - 'provider_location': None, 'status': 'available'} - - test_volume_error2 = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': test_volume_error, - 'provider_location': None, 'status': 'available'} - - UNIT_NAME = 'HUS110_91122819' - test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} - test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} - test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} - test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} - test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} - test_existing_no_unit_ref = {'ldev': '0'} - - def __init__(self, *args, **kwargs): - super(HBSDSNM2FCDriverTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HBSDSNM2FCDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_target_ports = "00" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = "None" - self.configuration.hitachi_unit_name = "None" - self.configuration.hitachi_group_request = False - self.configuration.hitachi_zoning_request = False - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = [0, 100] - self.configuration.hitachi_default_copy_method = 'SI' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - - def _setup_driver(self): - self.driver = hbsd_fc.HBSDFCDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) - self.driver.common.pair_flock = \ - self.driver.common.command.set_pair_flock() - self.driver.common.horcmgr_flock = \ - self.driver.common.command.set_horcmgr_flock() - self.driver.do_setup_status.set() - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume(self, arg1, arg2): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume_error(self, arg1, arg2): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume(self, arg1, arg2): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume_error(self, arg1, arg2): - """test delete_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_volume, - self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_snapshot, - self.test_snapshot_error2) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot_error(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error2, self.test_snapshot) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self._VOLUME, - self.test_volume) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertIsNotNone(vol) - arg3.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error1) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDError, - self.driver.create_cloned_volume, - self._VOLUME, self.test_volume_error1) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - self.assertEqual(1, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = {'wwpns': 'x', 'ip': '0xc0a80100'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection(self, arg1): - """test terminate connection.""" - connector = {'wwpns': '0x100000', 'ip': '0xc0a80100'} - rc = self.driver.terminate_connection(self._VOLUME, connector) - self.assertEqual('fibre_channel', rc['driver_volume_type']) - self.assertEqual(['50060E801053C2E0'], rc['data']['target_wwn']) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection_error(self, arg1): - """test terminate connection.""" - connector = {'ip': '0xc0a80100'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_manage_existing(self, arg1, arg2): - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(0, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_ret_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_ret_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_vol_type_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_vol_type_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_dppool_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_dppool_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_size_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_size_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_get_lu_num_port_err) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_lu_num_port_err(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage(self, arg1, arg2): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py deleted file mode 100644 index 28d64e3f9..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_snm2_iscsi.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for Hitachi Block Storage Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hbsd_basiclib -from cinder.volume.drivers.hitachi import hbsd_common -from cinder.volume.drivers.hitachi import hbsd_iscsi -from cinder.volume.drivers.hitachi import hbsd_snm2 - - -def _exec_hsnm(*args, **kargs): - return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args) - - -def _exec_hsnm_init(*args, **kargs): - return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args) - - -class HBSDSNM2ISCSIDriverTest(test.TestCase): - """Test HBSDSNM2ISCSIDriver.""" - - audppool_result = " DP RAID \ - Current Utilization Current Over Replication\ - Available Current Replication Rotational \ - \ - Stripe \ - Needing Preparation\n\ - Pool Tier Mode Level Total Capacity Consumed Capacity \ - Percent Provisioning Percent Capacity \ -Utilization Percent Type Speed Encryption Status \ - \ -Reconstruction Progress Size Capacity\n\ - 30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \ - 1% 24835% 532.0 GB \ - 1% SAS 10000rpm N/A Normal \ - N/A \ - 256KB 0.0 GB" - - aureplicationlocal_result = "Pair Name LUN Pair \ -LUN Status Copy Type Group \ - Point-in-Time MU Number\n\ - 0 10 0 Split( 99%) \ - ShadowImage ---:Ungrouped N/A\ - " - - auluref_result = " Stripe RAID DP Tier \ - RAID Rotational Number\n\ - LU Capacity Size Group Pool Mode Level Type\ - Speed of Paths Status\n\ - 0 2097152 blocks 256KB 0 0 Enable 0 Normal" - - auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \ -Name Port Name Host Group\n\ -HBSD-00 10000000C97BCE7A 001:HBSD-01\n\ - Assigned WWN\n Name Port Name \ -Host Group\n abcdefg 10000000C97BCE7A \ -001:HBSD-01" - - autargetini_result = "Port 00 Target Security ON\n\ - Target Name \ -iSCSI Name\n\ - 001:HBSD-01 \ -iqn" - - autargetini_result2 = "Port 00 Target Security ON\n\ - Target Name \ -iSCSI Name" - - autargetmap_result = "Mapping Mode = ON\n\ -Port Target H-LUN LUN\n\ - 00 001:HBSD-01 0 1000" - - auiscsi_result = "Port 00\n\ - Port Number : 3260\n\ - Keep Alive Timer[sec.] : 60\n\ - MTU : 1500\n\ - Transfer Rate : 1Gbps\n\ - Link Status : Link Up\n\ - Ether Address : 00:00:87:33:D1:3E\n\ - IPv4\n\ - IPv4 Address : 192.168.0.1\n\ - IPv4 Subnet Mask : 255.255.252.0\n\ - IPv4 Default Gateway : 0.0.0.0\n\ - IPv6 Status : Disable\n\ - Connecting Hosts : 0\n\ - Result : Normal\n\ - VLAN Status : Disable\n\ - VLAN ID : N/A\n\ - Header Digest : Enable\n\ - Data Digest : Enable\n\ - Window Scale : Disable" - - autargetdef_result = "Port 00\n\ - Authentication Mutual\n\ - Target Method CHAP Algorithm \ -Authentication\n\ - 001:T000 None --- ---\n\ - User Name : ---\n\ - iSCSI Name : iqn-target" - - hsnm_vals = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, "", ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'): - [1, "", ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -refer -pvol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -refer -svol 3'): - [1, "", "DMEC002015"], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, "", ""], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'): - [1, "", ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, "", ""], - ('auludel', '-unit None -lu 1 -f'): [0, "", ""], - ('auludel', '-unit None -lu 3 -f'): [1, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""], - ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], - ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], - ('autargetini', '-unit None -refer'): - [0, "%s" % autargetini_result, ""], - ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): - [0, "", ""], - ('autargetmap', '-unit None -refer'): - [0, "%s" % autargetmap_result, ""], - ('autargetdef', - '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ --authmethod None'): - [0, "", ""], - ('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \ --iname iqnX.target -authmethod None'): - [1, "", ""], - ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ --ReportFullPortalList enable'): - [0, "", ""], - ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], - ('autargetdef', '-unit None -refer'): - [0, "%s" % autargetdef_result, ""]} - - hsnm_vals_init = { - ('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""], - ('aureplicationlocal', - '-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'): - [0, 0, ""], - ('aureplicationlocal', '-unit None -refer -pvol 1'): - [0, "%s" % aureplicationlocal_result, ""], - ('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'): - [0, 0, ""], - ('auluchgsize', '-unit None -lu 1 -size 256g'): - [0, 0, ""], - ('auludel', '-unit None -lu 1 -f'): [0, "", ""], - ('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""], - ('auluref', '-unit None'): [0, "%s" % auluref_result, ""], - ('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""], - ('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""], - ('autargetini', '-unit None -refer'): - [0, "%s" % autargetini_result2, ""], - ('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'): - [0, "", ""], - ('autargetmap', '-unit None -refer'): - [0, "%s" % autargetmap_result, ""], - ('autargetdef', - '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \ --authmethod None'): - [0, "", ""], - ('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \ --ReportFullPortalList enable'): - [0, "", ""], - ('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""], - ('autargetdef', '-unit None -refer'): - [0, "%s" % autargetdef_result, ""], - ('auman', '-help'): - [0, "Version 27.50", ""]} - -# The following information is passed on to tests, when creating a volume - - _VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0', - 'provider_location': '1', 'name': 'test', - 'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'} - - test_volume = {'name': 'test_volume', 'size': 128, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_larger = {'name': 'test_volume', 'size': 256, - 'id': 'test-volume-0', - 'provider_location': '1', 'status': 'available'} - - test_volume_error = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '3', 'status': 'available'} - - test_volume_error1 = {'name': 'test_volume_error', 'size': 128, - 'id': 'test-volume-error', - 'provider_location': None, 'status': 'available'} - - test_volume_error2 = {'name': 'test_volume_error', 'size': 256, - 'id': 'test-volume-error', - 'provider_location': '1', 'status': 'available'} - - test_volume_error3 = {'name': 'test_volume3', 'size': 128, - 'id': 'test-volume3', - 'volume_metadata': [{'key': 'type', - 'value': 'V-VOL'}], - 'provider_location': '1', 'status': 'available'} - - test_volume_error4 = {'name': 'test_volume4', 'size': 128, - 'id': 'test-volume2', - 'provider_location': '3', 'status': 'available'} - - test_snapshot = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME, - 'provider_location': '1', 'status': 'available'} - - test_snapshot_error2 = {'volume_name': 'test', 'size': 128, - 'volume_size': 128, 'name': 'test-snap', - 'volume_id': 0, 'id': 'test-snap-0', - 'volume': test_volume_error, - 'provider_location': None, 'status': 'available'} - - UNIT_NAME = 'HUS110_91122819' - test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME} - test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME} - test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME} - test_existing_no_ldev_ref = {'unit_name': UNIT_NAME} - test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None} - test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'} - test_existing_no_unit_ref = {'ldev': '0'} - - def __init__(self, *args, **kwargs): - super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs) - - @mock.patch.object(utils, 'brick_get_connector_properties', - return_value={'ip': '0.0.0.0', - 'initiator': 'iqn'}) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', - side_effect=_exec_hsnm_init) - @mock.patch.object(utils, 'execute', - return_value=['', '']) - def setUp(self, args1, arg2, arg3, arg4): - super(HBSDSNM2ISCSIDriverTest, self).setUp() - self._setup_config() - self._setup_driver() - self.driver.check_param() - self.driver.common.create_lock_file() - self.driver.common.command.connect_storage() - self.driver.max_hostgroups = \ - self.driver.common.command.get_max_hostgroups() - self.driver.add_hostgroup() - self.driver.output_param_to_log() - self.driver.do_setup_status.set() - - def _setup_config(self): - self.configuration = mock.Mock(conf.Configuration) - self.configuration.hitachi_pool_id = 30 - self.configuration.hitachi_thin_pool_id = 31 - self.configuration.hitachi_target_ports = "00" - self.configuration.hitachi_debug_level = 0 - self.configuration.hitachi_serial_number = None - self.configuration.hitachi_unit_name = "None" - self.configuration.hitachi_group_request = True - self.configuration.hitachi_group_range = "0-1" - self.configuration.config_group = "None" - self.configuration.hitachi_ldev_range = "0-100" - self.configuration.hitachi_default_copy_method = 'FULL' - self.configuration.hitachi_copy_check_interval = 1 - self.configuration.hitachi_async_copy_check_interval = 1 - self.configuration.hitachi_copy_speed = 3 - self.configuration.hitachi_auth_method = None - self.configuration.hitachi_auth_user = "HBSD-CHAP-user" - self.configuration.hitachi_auth_password = "HBSD-CHAP-password" - self.configuration.hitachi_add_chap_user = "False" - - def _setup_driver(self): - self.driver = hbsd_iscsi.HBSDISCSIDriver( - configuration=self.configuration) - context = None - db = None - self.driver.common = hbsd_common.HBSDCommon( - self.configuration, self.driver, context, db) - self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration) - self.driver.common.horcmgr_flock = \ - self.driver.common.command.set_horcmgr_flock() - -# API test cases - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume(self, arg1, arg2, arg3): - """test create_volume.""" - ret = self.driver.create_volume(self._VOLUME) - vol = self._VOLUME.copy() - vol['provider_location'] = ret['provider_location'] - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_error(self, arg1, arg2, arg3): - """test create_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_volume, - self.test_volume_error) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats(self, arg1, arg2): - """test get_volume_stats.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_get_volume_stats_error(self, arg1, arg2): - """test get_volume_stats.""" - self.configuration.hitachi_pool_id = 29 - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - self.configuration.hitachi_pool_id = 30 - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume(self, arg1, arg2): - """test extend_volume.""" - self.driver.extend_volume(self._VOLUME, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_extend_volume_error(self, arg1, arg2): - """test extend_volume.""" - self.assertRaises(exception.HBSDError, self.driver.extend_volume, - self.test_volume_error3, 256) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume(self, arg1, arg2): - """test delete_volume.""" - self.driver.delete_volume(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_volume_error(self, arg1, arg2): - """test delete_volume.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.delete_volume, - self.test_volume_error4) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - ret = self.driver.create_volume(self._VOLUME) - ret = self.driver.create_snapshot(self.test_snapshot) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata', - return_value={'dummy_snapshot_meta': 'snapshot_meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5): - """test create_snapshot.""" - self.assertRaises(exception.HBSDCmdError, - self.driver.create_snapshot, - self.test_snapshot_error2) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_delete_snapshot_error(self, arg1, arg2): - """test delete_snapshot.""" - self.driver.delete_snapshot(self.test_snapshot_error2) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - vol = self.driver.create_volume_from_snapshot(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3): - """test create_volume_from_snapshot.""" - self.assertRaises(exception.HBSDError, - self.driver.create_volume_from_snapshot, - self.test_volume_error2, self.test_snapshot) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self._VOLUME, - self.test_snapshot) - self.assertIsNotNone(vol) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=_VOLUME) - @mock.patch.object(hbsd_common.HBSDCommon, 'extend_volume') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_larger(self, arg1, arg2, arg3, arg4, arg5): - """test create_cloned_volume.""" - vol = self.driver.create_cloned_volume(self.test_volume_larger, - self._VOLUME) - self.assertIsNotNone(vol) - arg3.assert_called_once_with(self.test_volume_larger, - self.test_volume_larger['size']) - return - - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata', - return_value={'dummy_volume_meta': 'meta'}) - @mock.patch.object(hbsd_common.HBSDCommon, 'get_volume', - return_value=test_volume_error1) - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4): - """test create_cloned_volume.""" - self.assertRaises(exception.HBSDError, - self.driver.create_cloned_volume, - self._VOLUME, self.test_volume_error1) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection(self, arg1, arg2): - """test initialize connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqn'} - rc = self.driver.initialize_connection(self._VOLUME, connector) - self.assertEqual('iscsi', rc['driver_volume_type']) - self.assertEqual('iqn-target', rc['data']['target_iqn']) - self.assertEqual(1, rc['data']['target_lun']) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_initialize_connection_error(self, arg1, arg2): - """test initialize connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqnX'} - self.assertRaises(exception.HBSDError, - self.driver.initialize_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection(self, arg1): - """test terminate connection.""" - connector = { - 'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator': - 'iqn'} - self.driver.terminate_connection(self._VOLUME, connector) - return - - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_terminate_connection_error(self, arg1): - """test terminate connection.""" - connector = {'ip': '0.0.0.0'} - self.assertRaises(exception.HBSDError, - self.driver.terminate_connection, - self._VOLUME, connector) - return - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_manage_existing(self, arg1, arg2): - rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref) - self.assertEqual(0, rc['provider_location']) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - size = self.driver.manage_existing_get_size(self._VOLUME, - self.test_existing_ref) - self.assertEqual(1, size) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_ldev_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_none_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_invalid_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - @mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata') - def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3): - self.configuration.hitachi_unit_name = self.UNIT_NAME - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, self._VOLUME, - self.test_existing_no_unit_ref) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage(self, arg1, arg2): - self.driver.unmanage(self._VOLUME) - - @mock.patch.object(hbsd_basiclib, 'get_process_lock') - @mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm) - def test_unmanage_busy(self, arg1, arg2): - self.assertRaises(exception.HBSDVolumeIsBusy, - self.driver.unmanage, self.test_volume_error3) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py deleted file mode 100644 index 3285c2c8d..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import os -import paramiko -import time - -from oslo_concurrency import processutils as putils - -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume.drivers.hitachi import hnas_backend - - -evsfs_list = "\n\ -FS ID FS Label FS Permanent ID EVS ID EVS Label\n\ ------ ----------- ------------------ ------ ---------\n\ - 1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\ - 1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\ - 1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\ - 1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\ - 1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\ -\n" - -cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D" - -version = "\n\ -Model: HNAS 4040 \n\n\ -Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\ -Hardware: NAS Platform (M2SEKW1339109) \n\n\ -board MMB1 \n\ -mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\ -board MFB1 \n\ -mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \ - RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \ - WD v00E2 DI v001A FC v0002 \n\ -Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\ -board MCP \n\ -Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\ -\n" - -evsipaddr = "\n\ -EVS Type Label IP Address Mask Port \n\ ----------- --------------- ------------------ --------------- ------\n\ -admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\ -admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\ -evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\ -evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\ -evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\ -\n" - -df_f = "\n\ -ID Label EVS Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- --- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -df_f_tb = "\n\ -ID Label EVS Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- --- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -nfs_export = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ -File system size: 250 GB \n\ -File system free space: 228 GB \n\ -File system state: \n\ -formatted = Yes \n\ -mounted = Yes \n\ -failed = No \n\ -thin provisioned = No \n\ -Access snapshots: Yes \n\ -Display snapshots: Yes \n\ -Read Caching: Disabled \n\ -Disaster recovery setting: \n\ -Recovered = No \n\ -Transfer setting = Use file system default \n\n\ -Export configuration: \n\ -127.0.0.1 \n\ -\n" - -df_f_single_evs = "\n\ -ID Label Size Used Snapshots Deduped Avail \ -Thin ThinSize ThinAvail FS Type\n\ ----- ---------- ------ ------------ --------- ------- ------------ \ ----- -------- --------- --------------------\n\ -1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \ - No 32 KB,WFS-2,128 DSBs\n\ -\n" - -nfs_export_tb = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ -File system size: 250 TB \n\ -File system free space: 228 TB \n\ -\n" - -nfs_export_not_available = "\n\ -Export name: /export01-husvm \n\ -Export path: /export01-husvm \n\ -File system label: fs-cinder \n\ - *** not available *** \n\ -\n" - -evs_list = "\n\ -Node EVS ID Type Label Enabled Status IP Address Port \n\ ----- ------ ------- --------------- ------- ------ ------------------- ---- \n\ - 1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\ - 1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\ - 172.24.44.15 eth0 \n\ - 172.24.49.101 ag2 \n\ - 1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\ - 172.24.48.32 ag4 \n\ - 1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\ - 1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\ -\n" - -lu_list = "Name : cinder-lu \n\ -Comment: \n\ -Path : /.cinder/cinder-lu \n\ -Size : 2 GB \n\ -File System : fs-cinder \n\ -File System Mounted : YES \n\ -Logical Unit Mounted: No" - -lu_list_tb = "Name : test-lu \n\ -Comment: \n\ -Path : /.cinder/test-lu \n\ -Size : 2 TB \n\ -File System : fs-cinder \n\ -File System Mounted : YES \n\ -Logical Unit Mounted: No" - -hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': lu_list, - 'l2': lu_list_tb} - -add_targetsecret = "Target created successfully." - -backend_opts = {'mgmt_ip0': '0.0.0.0', - 'cluster_admin_ip0': None, - 'ssh_port': '22', - 'username': 'supervisor', - 'password': 'supervisor', - 'ssh_private_key': 'test_key'} - -target_chap_disable = "\n\ -Alias : cinder-default \n\ -Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\ -Comment : \n\ -Secret : \n\ -Authentication : Disabled \n\ -Logical units : No logical units. \n\ -\n\ - LUN Logical Unit \n\ - ---- -------------------------------- \n\ - 0 cinder-lu \n\ - 1 volume-99da7ae7-1e7f-4d57-8bf... \n\ -\n\ -Access configuration: \n\ -" - -file_clone_stat = "Clone: /nfs_cinder/cinder-lu \n\ - SnapshotFile: FileHandle[00000000004010000d20116826ffffffffffffff] \n\ -\n\ - SnapshotFile: FileHandle[00000000004029000d81f26826ffffffffffffff] \n\ -" - -file_clone_stat_snap_file1 = "\ -FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ -References: \n\ - Clone: /nfs_cinder/cinder-lu \n\ - Clone: /nfs_cinder/snapshot-lu-1 \n\ - Clone: /nfs_cinder/snapshot-lu-2 \n\ -" - -file_clone_stat_snap_file2 = "\ -FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\ -References: \n\ - Clone: /nfs_cinder/volume-not-used \n\ - Clone: /nfs_cinder/snapshot-1 \n\ - Clone: /nfs_cinder/snapshot-2 \n\ -" - -not_a_clone = "\ -file-clone-stat: failed to get predecessor snapshot-files: File is not a clone" - -file_relatives =\ - [' /nfs_cinder/snapshot-lu-1 ', - ' /nfs_cinder/snapshot-lu-2 ', - ' /nfs_cinder/volume-not-used ', - ' /nfs_cinder/snapshot-1 ', - ' /nfs_cinder/snapshot-2 '] - - -class HDSHNASBackendTest(test.TestCase): - - def __init__(self, *args, **kwargs): - super(HDSHNASBackendTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HDSHNASBackendTest, self).setUp() - self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts) - - def test_run_cmd(self): - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - return_value=(df_f, '')) - - out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0', - 'supervisor', 'supervisor', - 'df', '-a') - - self.assertIn('fs-cinder', out) - self.assertIn('WFS-2,128 DSBs', out) - - def test_run_cmd_retry_exception(self): - self.hnas_backend.cluster_admin_ip0 = '172.24.44.11' - - exceptions = [putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Failed to establish' - ' SSC connection'), - putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Connection reset'), - putils.ProcessExecutionError(stderr='Connection reset')] - - self.mock_object(os.path, 'isfile', - return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - side_effect=exceptions) - - self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd, - 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', - '-a') - - def test_run_cmd_exception_without_retry(self): - self.mock_object(os.path, 'isfile', - return_value=True) - self.mock_object(utils, 'execute') - self.mock_object(time, 'sleep') - self.mock_object(paramiko, 'SSHClient') - self.mock_object(paramiko.RSAKey, 'from_private_key_file') - self.mock_object(putils, 'ssh_execute', - side_effect=putils.ProcessExecutionError( - stderr='Error')) - - self.assertRaises(putils.ProcessExecutionError, - self.hnas_backend._run_cmd, 'ssh', '0.0.0.0', - 'supervisor', 'supervisor', 'df', '-a') - - def test_get_version(self): - expected_out = { - 'hardware': 'NAS Platform (M2SEKW1339109)', - 'mac': '83-68-96-AA-DA-5D', - 'version': '11.2.3319.14', - 'model': 'HNAS 4040', - 'serial': 'B1339745' - } - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(cluster_getmac, ''), (version, '')]) - - out = self.hnas_backend.get_version() - - self.assertEqual(expected_out, out) - - def test_get_evs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - out = self.hnas_backend.get_evs('fs-cinder') - - self.assertEqual('2', out) - - def test_get_export_list(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual(250.0, out[0]['size']) - self.assertEqual(228.0, out[0]['free']) - self.assertEqual('/export01-husvm', out[0]['path']) - - def test_get_export_list_data_not_available(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export_not_available, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual('/export01-husvm', out[0]['path']) - self.assertEqual(-1, out[0]['size']) - self.assertEqual(-1, out[0]['free']) - - def test_get_export_list_tb(self): - size = float(250 * 1024) - free = float(228 * 1024) - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(nfs_export_tb, ''), - (evsfs_list, ''), - (evs_list, '')]) - - out = self.hnas_backend.get_export_list() - - self.assertEqual('fs-cinder', out[0]['fs']) - self.assertEqual(size, out[0]['size']) - self.assertEqual(free, out[0]['free']) - self.assertEqual('/export01-husvm', out[0]['path']) - - def test_file_clone(self): - path1 = '/.cinder/path1' - path2 = '/.cinder/path2' - - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - self.hnas_backend.file_clone('fs-cinder', path1, path2) - - calls = [mock.call('evsfs', 'list'), mock.call('console-context', - '--evs', '2', - 'file-clone-create', - '-f', 'fs-cinder', - path1, path2)] - self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False) - - def test_file_clone_wrong_fs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsfs_list, '')) - - self.assertRaises(exception.InvalidParameterValue, - self.hnas_backend.file_clone, 'fs-fake', 'src', - 'dst') - - def test_get_evs_info(self): - expected_out = {'evs_number': '1'} - expected_out2 = {'evs_number': '2'} - - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=(evsipaddr, '')) - - out = self.hnas_backend.get_evs_info() - - self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l') - self.assertEqual(expected_out, out['10.0.0.20']) - self.assertEqual(expected_out, out['172.24.44.20']) - self.assertEqual(expected_out2, out['172.24.44.21']) - - def test_get_fs_info(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('2', out['evs_id']) - self.assertEqual('fs-cinder', out['label']) - self.assertEqual('228', out['available_size']) - self.assertEqual('250', out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_empty_return(self): - self.mock_object(self.hnas_backend, '_run_cmd', - return_value=('Not mounted', '')) - - out = self.hnas_backend.get_fs_info('fs-cinder') - self.assertEqual({}, out) - - def test_get_fs_info_single_evs(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_single_evs, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual('228', out['available_size']) - self.assertEqual('250', out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_tb(self): - available_size = float(228 * 1024 ** 2) - total_size = float(250 * 1024 ** 2) - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_tb, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual(str(available_size), out['available_size']) - self.assertEqual(str(total_size), out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_fs_single_evs_tb(self): - available_size = float(228 * 1024 ** 2) - total_size = float(250 * 1024 ** 2) - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(df_f_tb, ''), (evsfs_list, ''), - (hnas_fs_list, '')]) - - out = self.hnas_backend.get_fs_info('fs-cinder') - - self.assertEqual('fs-cinder', out['label']) - self.assertEqual(str(available_size), out['available_size']) - self.assertEqual(str(total_size), out['total_size']) - self.assertEqual(0, out['provisioned_capacity']) - - def test_get_cloned_file_relatives(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.get_cloned_file_relatives('cinder-lu', - 'fs-cinder') - self.assertEqual(file_relatives, out) - self.hnas_backend._run_cmd.assert_called_with('console-context', - '--evs', '2', - 'file-clone-stat-' - 'snapshot-file', - '-f', 'fs-cinder', - '00000000004029000d81' - 'f26826ffffffffffffff]') - - def test_get_cloned_file_relatives_not_clone_except(self): - exc = putils.ProcessExecutionError(stderr='File is not a clone') - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), exc]) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.hnas_backend.get_cloned_file_relatives, - 'cinder-lu', 'fs-cinder', True) - - def test_get_cloned_file_relatives_not_clone_no_except(self): - exc = putils.ProcessExecutionError(stderr='File is not a clone') - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), exc]) - - out = self.hnas_backend.get_cloned_file_relatives('cinder-lu', - 'fs-cinder') - - self.assertEqual([], out) - - def test_check_snapshot_parent_true(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), - (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.check_snapshot_parent('cinder-lu', - 'snapshot-lu-1', - 'fs-cinder') - - self.assertTrue(out) - - def test_check_snapshot_parent_false(self): - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), - (file_clone_stat, ''), - (file_clone_stat_snap_file1, ''), - (file_clone_stat_snap_file2, '')]) - out = self.hnas_backend.check_snapshot_parent('cinder-lu', - 'snapshot-lu-3', - 'fs-cinder') - - self.assertFalse(out) - - def test_get_export_path(self): - export_out = '/export01-husvm' - - self.mock_object(self.hnas_backend, '_run_cmd', - side_effect=[(evsfs_list, ''), (nfs_export, '')]) - - out = self.hnas_backend.get_export_path(export_out, 'fs-cinder') - - self.assertEqual(export_out, out) - self.hnas_backend._run_cmd.assert_called_with('console-context', - '--evs', '2', - 'nfs-export', 'list', - export_out) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py deleted file mode 100644 index f94aea7f8..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py +++ /dev/null @@ -1,834 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock -import os - -from oslo_concurrency import processutils as putils -import socket - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_backend as backend -from cinder.volume.drivers.hitachi import hnas_nfs as nfs -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume.drivers import nfs as base_nfs -from cinder.volume import utils as vutils - -_VOLUME = {'name': 'cinder-volume', - 'id': fake.VOLUME_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'} - -_SNAPSHOT = { - 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'id': fake.SNAPSHOT_ID, - 'size': 128, - 'volume_type': None, - 'provider_location': 'hnas', - 'volume_size': 128, - 'volume': _VOLUME, - 'volume_name': _VOLUME['name'], - 'host': 'host1@hnas-iscsi-backend#silver', - 'volume_type_id': fake.VOLUME_TYPE_ID, -} - - -class HNASNFSDriverTest(test.TestCase): - """Test HNAS NFS volume driver.""" - - def __init__(self, *args, **kwargs): - super(HNASNFSDriverTest, self).__init__(*args, **kwargs) - - def instantiate_snapshot(self, snap): - snap = snap.copy() - snap['volume'] = fake_volume.fake_volume_obj( - None, **snap['volume']) - snapshot = fake_snapshot.fake_snapshot_obj( - None, expected_attrs=['volume'], **snap) - return snapshot - - def setUp(self): - super(HNASNFSDriverTest, self).setUp() - self.context = context.get_admin_context() - - self.volume = fake_volume.fake_volume_obj( - self.context, - **_VOLUME) - - self.snapshot = self.instantiate_snapshot(_SNAPSHOT) - - self.volume_type = fake_volume.fake_volume_type_obj( - None, - **{'name': 'silver'} - ) - self.clone = fake_volume.fake_volume_obj( - None, - **{'id': fake.VOLUME2_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'}) - - # xml parsed from utils - self.parsed_xml = { - 'username': 'supervisor', - 'password': 'supervisor', - 'hnas_cmd': 'ssc', - 'ssh_port': '22', - 'services': { - 'default': { - 'hdp': '172.24.49.21:/fs-cinder', - 'pool_name': 'default', - 'label': 'svc_0', - 'ctl': '1', - 'export': { - 'fs': 'fs-cinder', - 'path': '/export-cinder/volume' - } - }, - }, - 'cluster_admin_ip0': None, - 'ssh_private_key': None, - 'chap_enabled': 'True', - 'mgmt_ip0': '172.17.44.15', - 'ssh_enabled': None - } - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.hds_hnas_nfs_config_file = 'fake.xml' - - self.mock_object(hnas_utils, 'read_cinder_conf', - return_value=self.parsed_xml) - - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.reserved_percentage = 0 - self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml' - self.configuration.nfs_shares_config = 'fake_nfs_share.xml' - self.configuration.num_shell_tries = 2 - self.configuration.nfs_mount_point_base = '%state_path/mnt' - self.configuration.nfs_mount_options = None - - self.driver = nfs.HNASNFSDriver(configuration=self.configuration) - - def test_check_pool_and_share_no_default_configured(self): - nfs_shares = '172.24.49.21:/fs-cinder' - - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.driver.config['services'] = { - 'silver': { - 'hdp': 'fs3', - 'iscsi_ip': '172.17.39.133', - 'iscsi_port': '3260', - 'port': '22', - 'volume_type': 'silver', - 'label': 'svc_1', - 'evs': '2', - 'tgt': { - 'alias': 'iscsi-test', - 'secret': 'itEpgB5gPefGhW2' - } - } - } - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_check_pool_and_share_mismatch_exception(self): - # passing a share that does not exists in config should raise an - # exception - nfs_shares = '172.24.49.21:/nfs_share' - - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_check_pool_and_share_type_mismatch_exception(self): - nfs_shares = '172.24.49.21:/fs-cinder' - self.volume.host = 'host1@hnas-nfs-backend#gold' - - # returning a pool different from 'default' should raise an exception - self.mock_object(hnas_utils, 'get_pool', return_value='default') - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver._check_pool_and_share, self.volume, - nfs_shares) - - def test_do_setup(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - export_list = [ - {'fs': 'fs-cinder', - 'name': '/fs-cinder', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - showmount = "Export list for 172.24.49.21: \n\ -/fs-cinder * \n\ -/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\ -" - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', return_value=(showmount, '')) - - self.driver.do_setup(None) - - self.driver._execute.assert_called_with('showmount', '-e', - '172.24.49.21') - self.assertTrue(backend.HNASSSHBackend.get_export_list.called) - - def test_do_setup_execute_exception(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - - export_list = [ - {'fs': 'fs-cinder', - 'name': '/fs-cinder', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', - side_effect=putils.ProcessExecutionError) - - self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup, - None) - - def test_do_setup_missing_export(self): - version_info = { - 'mac': '83-68-96-AA-DA-5D', - 'model': 'HNAS 4040', - 'version': '12.4.3924.11', - 'hardware': 'NAS Platform', - 'serial': 'B1339109', - } - export_list = [ - {'fs': 'fs-cinder', - 'name': '/wrong-fs', - 'free': 228.0, - 'path': '/fs-cinder', - 'evs': ['172.24.49.21'], - 'size': 250.0} - ] - - showmount = "Export list for 172.24.49.21: \n\ -/fs-cinder * \n\ -" - - self.mock_object(backend.HNASSSHBackend, 'get_version', - return_value=version_info) - self.mock_object(self.driver, '_load_shares_config') - self.mock_object(backend.HNASSSHBackend, 'get_export_list', - return_value=export_list) - self.mock_object(self.driver, '_execute', return_value=(showmount, '')) - - self.assertRaises(exception.InvalidParameterValue, - self.driver.do_setup, None) - - def test_create_volume(self): - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(self.driver, '_do_create_volume') - - out = self.driver.create_volume(self.volume) - - self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location']) - self.assertTrue(self.driver._ensure_shares_mounted.called) - - def test_create_volume_exception(self): - # pool 'original' doesnt exists in services - self.volume.host = 'host1@hnas-nfs-backend#original' - - self.mock_object(self.driver, '_ensure_shares_mounted') - - self.assertRaises(exception.ParameterNotFound, - self.driver.create_volume, self.volume) - - def test_create_cloned_volume(self): - self.volume.size = 150 - - self.mock_object(self.driver, 'extend_volume') - self.mock_object(backend.HNASSSHBackend, 'file_clone') - - out = self.driver.create_cloned_volume(self.volume, self.clone) - - self.assertEqual('hnas', out['provider_location']) - - def test_create_cloned_volume_invalid_volume_type(self): - self.volume.volume_type_id = fake.VOLUME_TYPE_ID - self.clone.volume_type_id = fake.VOLUME_TYPE2_ID - - self.mock_object(self.driver, 'extend_volume') - self.mock_object(backend.HNASSSHBackend, 'file_clone') - - self.assertRaises(exception.InvalidVolumeType, - self.driver.create_cloned_volume, self.volume, - self.clone) - - def test_get_volume_stats(self): - self.driver.pools = [{'pool_name': 'default', - 'service_label': 'default', - 'fs': '172.24.49.21:/easy-stack'}, - {'pool_name': 'cinder_svc', - 'service_label': 'cinder_svc', - 'fs': '172.24.49.26:/MNT-CinderTest2'}] - - self.mock_object(self.driver, '_update_volume_stats') - self.mock_object(self.driver, '_get_capacity_info', - return_value=(150, 50, 100)) - - out = self.driver.get_volume_stats() - - self.assertEqual('6.0.0', out['driver_version']) - self.assertEqual('Hitachi', out['vendor_name']) - self.assertEqual('NFS', out['storage_protocol']) - - def test_create_volume_from_snapshot(self): - expected_out = {'provider_location': 'hnas'} - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - - self.assertEqual(expected_out, result) - - def test_create_volume_from_snapshot_legacy(self): - expected_out = {'provider_location': 'hnas'} - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=True)) - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - - self.assertEqual(expected_out, result) - - def test_create_snapshot(self): - expected_out = {'provider_location': 'hnas'} - self.mock_object(backend.HNASSSHBackend, 'file_clone') - result = self.driver.create_snapshot(self.snapshot) - - self.assertEqual(expected_out, result) - - def test_delete_snapshot(self): - nfs_mount = "/opt/stack/data/cinder/mnt/" - path = nfs_mount + self.driver._get_snapshot_name(self.snapshot) - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - - self.mock_object(self.driver, '_get_file_path', - mock.Mock(return_value=path)) - self.mock_object(self.driver, '_execute') - - self.driver.delete_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('rm', path, run_as_root=True) - - def test_delete_snapshot_legacy(self): - nfs_mount = "/opt/stack/data/cinder/mnt/" - legacy_path = nfs_mount + self.snapshot.name - - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=True)) - self.mock_object(self.driver, '_file_not_present', - mock.Mock(return_value=False)) - self.mock_object(self.driver, '_get_file_path', - mock.Mock(return_value=legacy_path)) - self.mock_object(self.driver, '_execute') - - self.driver.delete_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('rm', legacy_path, - run_as_root=True) - - def test_extend_volume(self): - share_mount_point = '/fs-cinder' - data = image_utils.imageutils.QemuImgInfo - data.virtual_size = 200 * 1024 ** 3 - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=share_mount_point) - self.mock_object(image_utils, 'qemu_img_info', return_value=data) - - self.driver.extend_volume(self.volume, 200) - - self.driver._get_mount_point_for_share.assert_called_with('hnas') - - def test_extend_volume_resizing_exception(self): - share_mount_point = '/fs-cinder' - data = image_utils.imageutils.QemuImgInfo - data.virtual_size = 2048 ** 3 - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=share_mount_point) - self.mock_object(image_utils, 'qemu_img_info', return_value=data) - - self.mock_object(image_utils, 'resize_image') - - self.assertRaises(exception.InvalidResults, - self.driver.extend_volume, self.volume, 200) - - def test_manage_existing(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value='/fs-cinder/cinder-volume') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(self.driver, '_execute') - - out = self.driver.manage_existing(self.volume, existing_vol_ref) - - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - self.assertEqual(loc, out) - - os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/') - self.driver._get_mount_point_for_share.assert_called_once_with( - '172.24.49.21:/fs-cinder') - utils.resolve_hostname.assert_called_with('172.24.49.21') - self.driver._ensure_shares_mounted.assert_called_once_with() - - def test_manage_existing_name_matches(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - self.volume.name)) - - out = self.driver.manage_existing(self.volume, existing_vol_ref) - - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - self.assertEqual(loc, out) - - def test_manage_existing_exception(self): - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - 'cinder-volume')) - self.mock_object(self.driver, '_execute', - side_effect=putils.ProcessExecutionError) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_missing_source_name(self): - # empty source-name should raise an exception - existing_vol_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_already_managed(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - self.mock_object(vutils, 'check_already_managed_volume', - return_value=True) - - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_missing_volume_in_backend(self): - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'} - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - side_effect=['172.24.49.21', '172.24.49.22']) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.volume, - existing_vol_ref) - - def test_manage_existing_get_size(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - out = self.driver.manage_existing_get_size(self.volume, - existing_vol_ref) - - self.assertEqual(1, out) - utils.get_file_size.assert_called_once_with( - '/mnt/silver/cinder-volume') - utils.resolve_hostname.assert_called_with('172.24.49.21') - - def test_manage_existing_get_size_exception(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=('172.24.49.21:/fs-cinder', - '/mnt/silver', - 'cinder-volume')) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, self.volume, - existing_vol_ref) - - def test_manage_existing_get_size_resolving_hostname_exception(self): - existing_vol_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-volume', - } - - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises(socket.gaierror, - self.driver.manage_existing_get_size, self.volume, - existing_vol_ref) - - def test_unmanage(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - vol_str = 'volume-' + self.volume.id - vol_path = os.path.join(path, vol_str) - new_path = os.path.join(path, 'unmanage-' + vol_str) - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute') - - self.driver.unmanage(self.volume) - - self.driver._execute.assert_called_with('mv', vol_path, new_path, - run_as_root=False, - check_exit_code=True) - self.driver._get_mount_point_for_share.assert_called_with( - self.volume.provider_location) - - def test_unmanage_volume_exception(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute', side_effect=ValueError) - - self.driver.unmanage(self.volume) - - def test_manage_existing_snapshot(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-%s.%s" % (self.snapshot.volume.name, self.snapshot.id) - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' - + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=True) - self.mock_object(self.driver, '_execute') - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - out = self.driver.manage_existing_snapshot(self.snapshot, - existing_ref) - - self.assertEqual(loc, out) - - def test_manage_existing_snapshot_legacy(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-snapshot-%s" % self.snapshot.id - loc = {'provider_location': '172.24.49.21:/fs-cinder'} - existing_ref = { - 'source-name': '172.24.49.21:/fs-cinder/' + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=True) - self.mock_object(self.driver, '_execute') - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - out = self.driver.manage_existing_snapshot(self.snapshot, existing_ref) - - self.assertEqual(loc, out) - - def test_manage_existing_snapshot_not_parent_exception(self): - nfs_share = "172.24.49.21:/fs-cinder" - nfs_mount = "/opt/stack/data/cinder/mnt/" + fake.SNAPSHOT_ID - path = "unmanage-%s.%s" % (fake.VOLUME_ID, self.snapshot.id) - - existing_ref = {'source-name': '172.24.49.21:/fs-cinder/' - + fake.SNAPSHOT_ID} - - self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref', - return_value=(nfs_share, nfs_mount, path)) - self.mock_object(backend.HNASSSHBackend, 'check_snapshot_parent', - return_value=False) - self.mock_object(backend.HNASSSHBackend, 'get_export_path', - return_value='fs-cinder') - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, self.snapshot, - existing_ref) - - def test_manage_existing_snapshot_get_size(self): - existing_ref = { - 'source-name': '172.24.49.21:/fs-cinder/cinder-snapshot', - } - self.driver._mounted_shares = ['172.24.49.21:/fs-cinder'] - expected_size = 1 - - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(utils, 'resolve_hostname', - return_value='172.24.49.21') - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value='/mnt/silver') - self.mock_object(os.path, 'isfile', return_value=True) - self.mock_object(utils, 'get_file_size', return_value=expected_size) - - out = self.driver.manage_existing_snapshot_get_size( - self.snapshot, existing_ref) - - self.assertEqual(1, out) - utils.get_file_size.assert_called_once_with( - '/mnt/silver/cinder-snapshot') - utils.resolve_hostname.assert_called_with('172.24.49.21') - - def test_unmanage_snapshot(self): - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - snapshot_name = "%s.%s" % (self.snapshot.volume.name, self.snapshot.id) - old_path = os.path.join(path, snapshot_name) - new_path = os.path.join(path, 'unmanage-' + snapshot_name) - - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value=path) - self.mock_object(self.driver, '_execute') - - self.driver.unmanage_snapshot(self.snapshot) - - self.driver._execute.assert_called_with('mv', old_path, new_path, - run_as_root=False, - check_exit_code=True) - self.driver._get_mount_point_for_share.assert_called_with( - self.snapshot.provider_location) - - def test_get_manageable_volumes_not_safe(self): - manageable_vol = [{'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': - '172.24.49.21:/fs-cinder/volume-1e5177e7-' - '95e5-4a0f-b170-e45f4b469f6a'}, - 'safe_to_manage': False, - 'size': 128}] - - rsrc = [self.volume] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [self.volume.name] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - - out = self.driver._get_manageable_resource_info( - rsrc, "volume", None, 1000, 0, ['reference'], ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with('%s/%s' % ( - path, self.volume.name)) - self.driver._get_mount_point_for_share(self.volume.provider_location) - - self.assertEqual(out, manageable_vol) - - def test_get_manageable_volumes(self): - manageable_vol = [{ - 'cinder_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a'}, - 'safe_to_manage': False, - 'size': 128}] - - rsrc = [self.volume] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.VOLUME_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - - out = self.driver._get_manageable_resource_info(rsrc, "volume", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with( - '%s/%s' % (path, self.volume.name)) - self.driver._get_mount_point_for_share(self.volume.provider_location) - - self.assertEqual(out, manageable_vol) - - def test_get_manageable_snapshots(self): - manageable_snap = [{ - 'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab', - 'extra_info': None, - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'snapshot-253b2878-ec60-4793-' - 'ad19-e65496ec7aab'}, - 'safe_to_manage': False, - 'size': 128, - 'source_reference': {'id': '1'}}] - - rsrc = [self.snapshot] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.SNAPSHOT_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives', - return_value=[' /nfs_cinder/volume-1', - '/nfs_cinder/snapshot2']) - - out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_file_size.assert_called_with( - '%s/%s' % (path, self.snapshot.name)) - self.driver._get_mount_point_for_share(self.snapshot.provider_location) - - self.assertEqual(out, manageable_snap) - - def test_get_manageable_snapshots_unknown_origin(self): - manageable_snap = [{ - 'cinder_id': '253b2878-ec60-4793-ad19-e65496ec7aab', - 'extra_info': 'Could not determine the volume that owns ' - 'the snapshot', - 'reason_not_safe': 'already managed', - 'reference': { - 'source-name': '172.24.49.21:/fs-cinder/' - 'snapshot-253b2878-ec60-4793-' - 'ad19-e65496ec7aab'}, - 'safe_to_manage': False, - 'size': 128, - 'source_reference': {'id': 'unknown'}}] - - rsrc = [self.snapshot] - path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e' - self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share', - return_value=path) - vols_exp = [fake.SNAPSHOT_NAME] - self.mock_object(self.driver, '_get_volumes_from_export', - return_value=vols_exp) - self.mock_object(self.driver, '_get_file_size', - return_value=self.volume.size) - self.mock_object(backend.HNASSSHBackend, 'get_cloned_file_relatives', - return_value=[' /nfs_cinder/volume-1', - ' /nfs_cinder/volume-2', - '/nfs_cinder/snapshot2']) - - out = self.driver._get_manageable_resource_info(rsrc, "snapshot", None, - 1000, 0, ['reference'], - ['desc']) - - self.driver._get_volumes_from_export.assert_called_with( - '172.24.49.21:/fs-cinder') - self.driver._get_mount_point_for_share(self.snapshot.provider_location) - self.driver._get_file_size.assert_called_with('%s/%s' % ( - path, self.snapshot.name)) - self.assertEqual(out, manageable_snap) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py deleted file mode 100644 index 730e01971..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) 2016 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import ddt -import os - -from xml.etree import ElementTree as ETree - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume import volume_types - -_VOLUME = {'name': 'cinder-volume', - 'id': fake_constants.VOLUME_ID, - 'size': 128, - 'host': 'host1@hnas-nfs-backend#default', - 'volume_type': 'default', - 'provider_location': 'hnas'} - -service_parameters = ['volume_type', 'hdp'] -optional_parameters = ['ssc_cmd', 'cluster_admin_ip0'] - -config_from_cinder_conf = { - 'username': 'supervisor', - 'fs': {'easy-stack': 'easy-stack', - 'silver': 'silver'}, - 'ssh_port': 22, - 'cluster_admin_ip0': None, - 'ssh_private_key': None, - 'mgmt_ip0': '172.24.44.15', - 'ssc_cmd': 'ssc', - 'services': { - 'default': { - 'label': u'svc_0', - 'pool_name': 'default', - 'hdp': 'easy-stack'}, - 'FS-CinderDev1': { - 'label': u'svc_1', - 'pool_name': 'FS-CinderDev1', - 'hdp': 'silver'}}, - 'password': 'supervisor'} - -valid_XML_str = ''' - - 172.24.44.15 - supervisor - supervisor - False - /home/ubuntu/.ssh/id_rsa - - default - easy-stack - - - silver - FS-CinderDev1 - - -''' - -XML_no_authentication = ''' - - 172.24.44.15 - supervisor - False - -''' - -XML_empty_authentication_param = ''' - - 172.24.44.15 - supervisor - - False - - - default - easy-stack - - -''' - -# missing mgmt_ip0 -XML_without_mandatory_params = ''' - - supervisor - supervisor - False - - default - easy-stack - - -''' - -XML_no_services_configured = ''' - - 172.24.44.15 - supervisor - supervisor - 10 - False - /home/ubuntu/.ssh/id_rsa - -''' - -parsed_xml = {'username': 'supervisor', 'password': 'supervisor', - 'ssc_cmd': 'ssc', 'ssh_port': 22, - 'fs': {'easy-stack': 'easy-stack', - 'FS-CinderDev1': 'FS-CinderDev1'}, - 'cluster_admin_ip0': None, - 'ssh_private_key': '/home/ubuntu/.ssh/id_rsa', - 'services': { - 'default': {'hdp': 'easy-stack', 'pool_name': 'default', - 'label': 'svc_0'}, - 'silver': {'hdp': 'FS-CinderDev1', 'pool_name': 'silver', - 'label': 'svc_1'}}, - 'mgmt_ip0': '172.24.44.15'} - -valid_XML_etree = ETree.XML(valid_XML_str) -invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication) -invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param) -invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params) -invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured) - - -@ddt.ddt -class HNASUtilsTest(test.TestCase): - - def __init__(self, *args, **kwargs): - super(HNASUtilsTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(HNASUtilsTest, self).setUp() - - self.fake_conf = conf.Configuration(hnas_utils.drivers_common_opts, - conf.SHARED_CONF_GROUP) - - self.override_config('hnas_username', 'supervisor', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_password', 'supervisor', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_mgmt_ip0', '172.24.44.15', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc0_pool_name', 'default', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc0_hdp', 'easy-stack', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1', - conf.SHARED_CONF_GROUP) - self.override_config('hnas_svc1_hdp', 'silver', - conf.SHARED_CONF_GROUP) - - self.context = context.get_admin_context() - self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) - self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ - 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'})) - - def test_read_xml_config(self): - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=valid_XML_etree) - - xml_path = 'xml_file_found' - out = hnas_utils.read_xml_config(xml_path, - service_parameters, - optional_parameters) - - self.assertEqual(parsed_xml, out) - - def test_read_xml_config_parser_error(self): - xml_file = 'hnas_nfs.xml' - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', side_effect=ETree.ParseError) - - self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, - xml_file, service_parameters, optional_parameters) - - def test_read_xml_config_not_found(self): - self.mock_object(os, 'access', return_value=False) - - xml_path = 'xml_file_not_found' - self.assertRaises(exception.NotFound, hnas_utils.read_xml_config, - xml_path, service_parameters, optional_parameters) - - def test_read_xml_config_without_services_configured(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_service) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_xml_config_empty_authentication_parameter(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_empty_parameter) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_xml_config_mandatory_parameters_missing(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_mandatory_params) - - self.assertRaises(exception.ParameterNotFound, - hnas_utils.read_xml_config, xml_file, - service_parameters, optional_parameters) - - def test_read_config_xml_without_authentication_parameter(self): - xml_file = 'hnas_nfs.xml' - - self.mock_object(os, 'access', return_value=True) - self.mock_object(ETree, 'parse', return_value=ETree.ElementTree) - self.mock_object(ETree.ElementTree, 'getroot', - return_value=invalid_XML_etree_no_authentication) - - self.assertRaises(exception.ConfigNotFound, hnas_utils.read_xml_config, - xml_file, service_parameters, optional_parameters) - - def test_get_pool_with_vol_type(self): - self.mock_object(volume_types, 'get_volume_type_extra_specs', - return_value={'service_label': 'silver'}) - - self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('silver', out) - - def test_get_pool_with_vol_type_id_none(self): - self.volume.volume_type_id = None - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('default', out) - - def test_get_pool_with_missing_service_label(self): - self.mock_object(volume_types, 'get_volume_type_extra_specs', - return_value={'service_label': 'gold'}) - - self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID - self.volume.volume_type = self.volume_type - - out = hnas_utils.get_pool(parsed_xml, self.volume) - - self.assertEqual('default', out) - - def test_get_pool_without_vol_type(self): - out = hnas_utils.get_pool(parsed_xml, self.volume) - self.assertEqual('default', out) - - def test_read_cinder_conf_nfs(self): - out = hnas_utils.read_cinder_conf(self.fake_conf) - - self.assertEqual(config_from_cinder_conf, out) - - def test_read_cinder_conf_break(self): - self.override_config('hnas_username', None, conf.SHARED_CONF_GROUP) - self.override_config('hnas_password', None, conf.SHARED_CONF_GROUP) - self.override_config('hnas_mgmt_ip0', None, conf.SHARED_CONF_GROUP) - out = hnas_utils.read_cinder_conf(self.fake_conf) - self.assertIsNone(out) - - @ddt.data('hnas_username', 'hnas_password', - 'hnas_mgmt_ip0', 'hnas_svc0_pool_name', - 'hnas_svc0_hdp', ) - def test_init_invalid_conf_parameters(self, attr_name): - self.override_config(attr_name, None, conf.SHARED_CONF_GROUP) - - self.assertRaises(exception.InvalidParameterValue, - hnas_utils.read_cinder_conf, self.fake_conf) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py deleted file mode 100644 index 4fef1bfad..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_fc.py +++ /dev/null @@ -1,1806 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for Hitachi VSP Driver.""" - -import copy -import os - -import mock -from os_brick.initiator import connector as brick_connector -from oslo_concurrency import processutils -from oslo_config import cfg -from six.moves import range - -from cinder import context as cinder_context -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder import exception -from cinder.objects import snapshot as obj_snap -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_fc -from cinder.volume.drivers.hitachi import vsp_horcm -from cinder.volume.drivers.hitachi import vsp_utils -from cinder.volume import utils as volume_utils - -# Dummy return values -SUCCEED = 0 -STDOUT = "" -STDERR = "" -CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) - -# Configuration parameter values -CONFIG_MAP = { - 'serial': '492015', - 'my_ip': '127.0.0.1', -} - -# CCI instance numbers -INST_NUMS = (200, 201) - -# Shadow Image copy group names -CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( - CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) - for x in range(3) -} - -# Map containing all maps for dummy response creation -DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() -DUMMY_RESPONSE_MAP.update(CG_MAP) - -# Dummy response for FC zoning device mapping -DEVICE_MAP = { - 'fabric_name': { - 'initiator_port_wwn_list': ['123456789abcdee', '123456789abcdef'], - 'target_port_wwn_list': ['111111112345678']}} - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sS - - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT3 = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg0)s %(cg0)sS 0 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P -GET_DEVICE_GRP_MU1P_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S -GET_DEVICE_GRP_MU1S_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_wwn -port CL1-A HBSD-0123456789abcdef -GET_HBA_WWN_CL1A_HOSTGRP_RESULT = ( - "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" - "CL1-A 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get dp_pool -GET_DP_POOL_RESULT = ( - "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " - "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" - "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" -) - -# cmd: raidcom get dp_pool -GET_DP_POOL_ERROR_RESULT = ( - "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" -) - -# cmd: raidcom get pool -key opt -GET_POOL_KEYOPT_RESULT = ( - "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" - "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_wwn -port CL1-B-0 -GET_HBA_WWN_CL1B0_RESULT = ( - "PORT GID GROUP_NAME HWWN Serial# NICK_NAME\n" - "CL1-B 0 HBSD-0123456789abcdef 0123456789abcdef %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-A -GET_HOST_GRP_CL1A_RESULT = ( - "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" - "CL1-A 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-B -GET_HOST_GRP_CL1B_RESULT = ( - "PORT GID GROUP_NAME Serial# HMD HMO_BITs\n" - "CL1-B 0 HBSD-0123456789abcdef %(serial)s LINUX/IRIX 91\n" -) % DUMMY_RESPONSE_MAP - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-0123456789abcdef -ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 -ADD_HOSTGRP_PAIR_RESULT = ( - "raidcom: Host group ID 2(0x2) will be used for adding.\n" -) - -# raidcom add lun -port CL1-A-0 -ldev_id x -ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" - -# cmd: raidcom get ldev -ldev_list undefined -cnt 1 -GET_LDEV_LDEV_LIST_UNDEFINED = ( - "LDEV : 1 VIR_LDEV : 65534\n" - "VOL_TYPE : NOT DEFINED\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 8 - - NOT DEFINED - - - -\n" - " %(serial)s 9 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED -GET_LDEV_CHECKSTATUS_ERR = ( - "raidcom: testing condition has failed with exit(1).\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -GET_LDEV_LDEV0_RESULT = """ -LDEV : 0 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 1 -GET_LDEV_LDEV1_RESULT = """ -LDEV : 1 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 268435456 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 3 -GET_LDEV_LDEV3_RESULT = """ -LDEV : 3 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : -""" - -# cmd: raidcom get ldev -ldev_id 4 -GET_LDEV_LDEV4_RESULT = """ -LDEV : 4 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 5 -GET_LDEV_LDEV5_RESULT = """ -LDEV : 5 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : VVOL -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 6 -GET_LDEV_LDEV6_RESULT = """ -LDEV : 6 -VOL_TYPE : OPEN-V-CVS -PORTs : CL1-A-0 0 HBSD-0123456789abcdef -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 7 -GET_LDEV_LDEV7_RESULT = """ -LDEV : 7 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 10 -GET_LDEV_LDEV10_RESULT = """ -LDEV : 10 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 11 -GET_LDEV_LDEV11_RESULT = """ -LDEV : 11 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 12 -GET_LDEV_LDEV12_RESULT = """ -LDEV : 12 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 13 -GET_LDEV_LDEV13_RESULT = """ -LDEV : 13 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : BLK -""" - -# cmd: raidcom get ldev -ldev_id 14 -GET_LDEV_LDEV14_RESULT = """ -LDEV : 14 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : HDT -VOL_Capacity(BLK) : 9999999 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get lun -port CL1-A-0 -GET_LUN_CL1A0_RESULT = ( - "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" - "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -GET_PORT_RESULT = ( - "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" - "CL1-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL1-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL3-A FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" - "CL3-B FIBRE TAR AUT 01 Y PtoP Y 0 %(serial)s 0123456789abcdef -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 4 -GET_SNAPSHOT_LDEV4_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 7 -GET_SNAPSHOT_LDEV7_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 8 -GET_SNAPSHOT_LDEV8_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 11 -GET_SNAPSHOT_LDEV11_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 -PAIRDISPLAY_LDEV0_1_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " - "P-VOL PSUS %(serial)s 1 W\n" - "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " - "S-VOL SSUS - 0 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 -PAIRDISPLAY_LDEV7_10_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 10 W\n" - "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 -PAIRDISPLAY_LDEV7_12_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 12 W\n" - "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidqry -h -RAIDQRY_RESULT = ( - "Model : RAID-Manager/Linux/x64\n" - "Ver&Rev: 01-39-03/03\n" - "Usage : raidqry [options] for HORC[200]\n" - " -h Help/Usage\n" - " -I[#] Set to HORCMINST#\n" - " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" - " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" - " -z Set to the interactive mode\n" - " -zx Set to the interactive mode and HORCM monitoring\n" - " -q Quit(Return to main())\n" - " -g Specify for getting all group name on local\n" - " -l Specify the local query\n" - " -lm Specify the local query with full micro version\n" - " -r Specify the remote query\n" - " -f Specify display for floatable host\n" -) - -EXECUTE_TABLE = { - ('add', 'hba_wwn', '-port', 'CL3-A-0', '-hba_wwn', '0123456789abcdef'): ( - vsp_horcm.EX_INVARG, STDOUT, STDERR), - ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', - 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-0123456789abcdef'): (SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( - vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), - ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( - vsp_horcm.EX_CMDIOE, STDOUT, - "raidcom: [EX_CMDIOE] Control command I/O error"), - ('get', 'hba_wwn', '-port', 'CL1-A', 'HBSD-0123456789abcdef'): ( - SUCCEED, GET_HBA_WWN_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( - SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( - SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), - ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), - ('get', 'hba_wwn', '-port', 'CL1-B-0'): ( - SUCCEED, GET_HBA_WWN_CL1B0_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-A'): ( - SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-B'): ( - SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), - ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( - SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), - ('get', 'lun', '-port', 'CL1-A-0'): ( - SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), - ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 4): ( - SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 7): ( - SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 8): ( - SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 11): ( - SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), - ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( - vsp_horcm.EX_CMDIOE, STDOUT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), - ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), - ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), - ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), - ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), -} - -EXECUTE_TABLE2 = EXECUTE_TABLE.copy() -EXECUTE_TABLE2.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), -}) - -EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() - -EXECUTE_TABLE4 = EXECUTE_TABLE.copy() -EXECUTE_TABLE4.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), -}) - -EXECUTE_TABLE5 = EXECUTE_TABLE.copy() -EXECUTE_TABLE5.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), -}) - -ERROR_EXECUTE_TABLE = { - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), -} - -DEFAULT_CONNECTOR = { - 'host': 'host', - 'ip': CONFIG_MAP['my_ip'], - 'wwpns': ['0123456789abcdef'], - 'multipath': False, -} - -CTXT = cinder_context.get_admin_context() - -TEST_VOLUME = [] -for i in range(14): - volume = {} - volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) - volume['name'] = 'test-volume{0:d}'.format(i) - volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) - volume['size'] = 256 if i == 1 else 128 - if i == 2: - volume['status'] = 'creating' - elif i == 5: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - volume = fake_volume.fake_volume_obj(CTXT, **volume) - TEST_VOLUME.append(volume) - - -def _volume_get(context, volume_id): - """Return predefined volume info.""" - return TEST_VOLUME[int(volume_id.replace("-", ""))] - -TEST_SNAPSHOT = [] -for i in range(8): - snapshot = {} - snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) - snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) - snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( - i if i < 5 else i + 5) - snapshot['status'] = 'creating' if i == 2 else 'available' - snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( - i if i < 5 else 7) - snapshot['volume'] = _volume_get(None, snapshot['volume_id']) - snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) - snapshot['volume_size'] = 256 if i == 1 else 128 - snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) - TEST_SNAPSHOT.append(snapshot) - -# Flags that determine _fake_run_horcmstart() return values -run_horcmstart_returns_error = False -run_horcmstart_returns_error2 = False -run_horcmstart3_cnt = 0 - - -def _access(*args, **kargs): - """Assume access to the path is allowed.""" - return True - - -def _execute(*args, **kargs): - """Return predefined results for command execution.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - return result - - -def _execute2(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE2.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) - return result - - -def _execute3(*args, **kargs): - """Change pairevtwait's dummy return value after it is called.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) - if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): - EXECUTE_TABLE3.update({ - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), - }) - return result - - -def _execute4(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE4.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) - return result - - -def _execute5(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE5.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) - return result - - -def _cinder_execute(*args, **kargs): - """Return predefined results or raise an exception.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - if ret == SUCCEED: - return stdout, stderr - else: - pee = processutils.ProcessExecutionError(exit_code=ret, - stdout=stdout, - stderr=stderr) - raise pee - - -def _error_execute(*args, **kargs): - """Return predefined error results.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = _execute(*args, **kargs) - ret = ERROR_EXECUTE_TABLE.get(cmd) - return ret if ret else result - - -def _brick_get_connector_properties(multipath=False, enforce_multipath=False): - """Return a predefined connector object.""" - return DEFAULT_CONNECTOR - - -def _brick_get_connector_properties_error(multipath=False, - enforce_multipath=False): - """Return an incomplete connector object.""" - connector = dict(DEFAULT_CONNECTOR) - del connector['wwpns'] - return connector - - -def _connect_volume(*args, **kwargs): - """Return predefined volume info.""" - return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} - - -def _disconnect_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _copy_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _volume_admin_metadata_get(context, volume_id): - """Return dummy admin metadata.""" - return {'fake_key': 'fake_value'} - - -def _snapshot_metadata_update(context, snapshot_id, metadata, delete): - """Return without doing anything.""" - pass - - -def _fake_is_smpl(*args): - """Assume the Shadow Image pair status is SMPL.""" - return True - - -def _fake_run_horcmgr(*args): - """Assume CCI is running.""" - return vsp_horcm._HORCM_RUNNING - - -def _fake_run_horcmstart(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error else 3 - - -def _fake_run_horcmstart2(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error2 else 3 - - -def _fake_run_horcmstart3(*args): - """Update a counter and return a value based on it.""" - global run_horcmstart3_cnt - run_horcmstart3_cnt = run_horcmstart3_cnt + 1 - return 0 if run_horcmstart3_cnt <= 1 else 3 - - -def _fake_check_ldev_status(*args, **kwargs): - """Assume LDEV status has changed as desired.""" - return None - - -def _fake_exists(path): - """Assume the path does not exist.""" - return False - - -class FakeLookupService(object): - """Dummy FC zoning mapping lookup service class.""" - - def get_device_mapping_from_network(self, initiator_wwns, target_wwns): - """Return predefined FC zoning mapping.""" - return DEVICE_MAP - - -class VSPHORCMFCDriverTest(test.TestCase): - """Unit test class for VSP HORCM interface fibre channel module.""" - - test_existing_ref = {'source-id': '0'} - test_existing_none_ldev_ref = {'source-id': '2'} - test_existing_invalid_ldev_ref = {'source-id': 'AAA'} - test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} - test_existing_no_ldev_ref = {} - test_existing_invalid_sts_ldev = {'source-id': '13'} - test_existing_invalid_vol_attr = {'source-id': '12'} - test_existing_invalid_size = {'source-id': '14'} - test_existing_invalid_port_cnt = {'source-id': '6'} - test_existing_failed_to_start_horcmgr = {'source-id': '15'} - - def setUp(self): - """Set up the test environment.""" - super(VSPHORCMFCDriverTest, self).setUp() - - self.configuration = mock.Mock(conf.Configuration) - self.ctxt = cinder_context.get_admin_context() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - """Set configuration parameter values.""" - self.configuration.config_group = "HORCM" - - self.configuration.volume_backend_name = "HORCMFC" - self.configuration.volume_driver = ( - "cinder.volume.drivers.hitachi.vsp_fc.VSPFCDriver") - self.configuration.reserved_percentage = "0" - self.configuration.use_multipath_for_image_xfer = False - self.configuration.enforce_multipath_for_image_xfer = False - self.configuration.num_volume_device_scan_tries = 3 - self.configuration.volume_dd_blocksize = "1000" - - self.configuration.vsp_storage_id = CONFIG_MAP['serial'] - self.configuration.vsp_pool = "30" - self.configuration.vsp_thin_pool = None - self.configuration.vsp_ldev_range = "0-1" - self.configuration.vsp_default_copy_method = 'FULL' - self.configuration.vsp_copy_speed = 3 - self.configuration.vsp_copy_check_interval = 1 - self.configuration.vsp_async_copy_check_interval = 1 - self.configuration.vsp_target_ports = "CL1-A" - self.configuration.vsp_compute_target_ports = "CL1-A" - self.configuration.vsp_horcm_pair_target_ports = "CL1-A" - self.configuration.vsp_group_request = True - - self.configuration.vsp_zoning_request = False - - self.configuration.vsp_horcm_numbers = INST_NUMS - self.configuration.vsp_horcm_user = "user" - self.configuration.vsp_horcm_password = "pasword" - self.configuration.vsp_horcm_add_conf = False - - self.configuration.safe_get = self._fake_safe_get - - CONF = cfg.CONF - CONF.my_ip = CONFIG_MAP['my_ip'] - - def _fake_safe_get(self, value): - """Retrieve a configuration value avoiding throwing an exception.""" - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def _setup_driver(self, execute, brick_get_connector_properties): - """Set up the driver environment.""" - self.driver = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self.driver.create_export(None, None, None) - self.driver.ensure_export(None, None) - self.driver.remove_export(None, None) - - # API test cases - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_do_setup(self, execute, brick_get_connector_properties): - """Normal case: The host group exists beforehand.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '0123456789abcdef'}, - drv.common.storage_info['wwns']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_raidqry_h_invalid( - self, execute, brick_get_connector_properties): - """Error case: 'raidqry -h' returns nothing. This error is ignored.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] - EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '0123456789abcdef'}, - drv.common.storage_info['wwns']) - EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_specify_pool_name( - self, execute, brick_get_connector_properties): - """Normal case: Specify pool name rather than pool number.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_pool = "VSPPOOL" - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp( - self, execute, brick_get_connector_properties): - """Normal case: The host groups does not exist beforehand.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-B" - - drv.do_setup(None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp_error( - self, execute, brick_get_connector_properties): - """Error case: 'add hba_wwn' fails(MSGID0614-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-A" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_thin_pool_not_specified(self, execute): - """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_default_copy_method = 'THIN' - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_ldev_range_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Not specify LDEV range.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_ldev_range = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_storage_id_not_specified(self, execute): - """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_storage_id = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_numbers_invalid(self, execute): - """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (200, 200) - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_user_not_specified(self, execute): - """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_compute_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only compute_target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_pair_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only pair_target_ports is not specified.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_pair_target_ports = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_specified(self, execute): - """Error case: Parameter error(compute_target_ports).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_specified(self, execute): - """Error case: Parameter error(pair_target_ports).(MSGID0601-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(processutils, 'execute', side_effect=_execute) - @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) - @mock.patch.object(os, 'access', side_effect=_access) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_create_conf( - self, vsp_utils_execute, access, exists, processutils_execute, - brick_get_connector_properties): - """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (500, 501) - self.configuration.vsp_horcm_add_conf = True - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_login( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userX" - self.configuration.vsp_horcm_password = "paswordX" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_command( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userY" - self.configuration.vsp_horcm_password = "paswordY" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) - def test_do_setup_failed_to_horcmshutdown( - self, _run_horcmgr, execute, brick_get_connector_properties): - """Error case: CCI's status is always RUNNING(MSGID0608-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) - def test_do_setup_failed_to_horcmstart( - self, _run_horcmstart, execute, brick_get_connector_properties): - """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - global run_horcmstart_returns_error - run_horcmstart_returns_error = True - self.assertRaises(exception.VSPError, drv.do_setup, None) - run_horcmstart_returns_error = False - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties_error) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_wwn_not_found( - self, execute, brick_get_connector_properties): - """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_port_not_found(self, execute): - """Error case: The target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_found(self, execute): - """Error case: Compute target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_found(self, execute): - """Error case: Pair target port does not exist(MSGID0650-E).""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = ["CL5-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume(self, execute): - """Normal case: Extend volume succeeds.""" - self.driver.extend_volume(TEST_VOLUME[0], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0613-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_ldev_is_vvol(self, execute): - """Error case: The volume is a V-VOL(MSGID0618-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - def test_extend_volume_raidcom_error(self, execute,): - """Error case: 'extend ldev' returns an error(MSGID0600-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats(self, execute): - """Normal case: Refreshing data required.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - self.assertFalse(stats['multiattach']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats_no_refresh(self, execute): - """Normal case: Refreshing data not required.""" - stats = self.driver.get_volume_stats() - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) - def test_get_volume_stats_failed_to_get_dp_pool(self, execute): - """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" - self.driver.common.storage_info['pool_id'] = 29 - - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume(self, execute): - """Normal case: Available LDEV range is 0-1.""" - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_free_ldev_not_found_on_storage(self, execute): - """Error case: No unused LDEV exists(MSGID0648-E).""" - self.driver.common.storage_info['ldev_range'] = [0, 0] - - self.assertRaises( - exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_no_setting_ldev_range(self, execute): - """Normal case: Available LDEV range is unlimited.""" - self.driver.common.storage_info['ldev_range'] = None - - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, - '_check_ldev_status', side_effect=_fake_check_ldev_status) - def test_delete_volume(self, _check_ldev_status, execute): - """Normal case: Delete a volume.""" - self.driver.delete_volume(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.delete_volume(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_ldev_not_found_on_storage(self, execute): - """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_volume(TEST_VOLUME[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_volume_is_busy(self, execute): - """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_full( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=FULL.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_thin( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=THIN.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - self.configuration.vsp_thin_pool = 31 - self.configuration.vsp_default_copy_method = "THIN" - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_provider_location_is_none( - self, volume_get, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_ldev_not_found_on_storage( - self, volume_get, execute): - """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_full(self, execute): - """Normal case: Delete a snapshot.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[5]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) - def test_delete_snapshot_full_smpl(self, _is_smpl, execute): - """Normal case: The LDEV in an SI volume pair becomes SMPL.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[7]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_vvol_timeout(self, execute): - """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" - self.assertRaises( - exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_provider_location_is_none(self, execute): - """Error case: Snapshot's provider_location is None(MSGID0304-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_ldev_not_found_on_storage(self, execute): - """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_snapshot_is_busy(self, execute): - """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.SnapshotIsBusy, self.driver.delete_snapshot, - TEST_SNAPSHOT[4]) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_same_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: The source volume is a V-VOL and copied by dd.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.FibreChannelConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_extend_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: Copy with dd and extend the size afterward.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_provider_location_is_none(self, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_invalid_size(self, execute): - """Error case: src-size > clone-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_extend_size_thin(self, execute): - """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - test_vol_obj, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_same_size(self, execute): - """Normal case: Copy with Shadow Image.""" - vol = self.driver.create_volume_from_snapshot( - TEST_VOLUME[0], TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) - def test_create_volume_from_snapshot_full_extend_normal(self, execute): - """Normal case: Copy with Shadow Image and extend the size.""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - vol = self.driver.create_volume_from_snapshot( - test_vol_obj, TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) - def test_create_volume_from_snapshot_full_extend_PSUE(self, execute): - """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) - def test_create_volume_from_snapshot_full_PSUE(self, execute): - """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) - @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) - def test_create_volume_from_snapshot_full_SMPL( - self, execute, _run_horcmstart): - """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_invalid_size(self, execute): - """Error case: volume-size < snapshot-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_thin_extend(self, execute): - """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_provider_location_is_none( - self, execute): - """Error case: Snapshot's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection(self, volume_admin_metadata_get, execute): - """Normal case: Initialize connection.""" - self.configuration.vsp_zoning_request = True - self.driver.common._lookup_service = FakeLookupService() - - ret = self.driver.initialize_connection( - TEST_VOLUME[0], DEFAULT_CONNECTOR) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_multipath( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: Initialize connection in multipath environment.""" - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] - drv.do_setup(None) - multipath_connector = copy.copy(DEFAULT_CONNECTOR) - multipath_connector['multipath'] = True - ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef', '0123456789abcdef'], - ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_initialize_connection_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0619-E).""" - self.assertRaises( - exception.VSPError, self.driver.initialize_connection, - TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_already_attached( - self, volume_admin_metadata_get, execute): - """Unusual case: 'add lun' returns 'already defined' error.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[6], DEFAULT_CONNECTOR) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(255, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_target_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_compute_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: compute_target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_fc.VSPFCDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - self.assertEqual(['0123456789abcdef'], ret['data']['target_wwn']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection(self, execute): - """Normal case: Terminate connection.""" - self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_provider_location_is_none(self, execute): - """Unusual case: Volume's provider_location is None(MSGID0302-W).""" - self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_no_port_mapped_to_ldev(self, execute): - """Unusual case: No port is mapped to the LDEV.""" - self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_initiator_iqn_not_found(self, execute): - """Error case: The connector does not have 'wwpns'(MSGID0650-E).""" - connector = dict(DEFAULT_CONNECTOR) - del connector['wwpns'] - - self.assertRaises( - exception.VSPError, self.driver.terminate_connection, - TEST_VOLUME[0], connector) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_volume_to_image(self, execute): - """Normal case: Copy a volume to an image.""" - image_service = 'fake_image_service' - image_meta = 'fake_image_meta' - - with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ - as mock_copy_volume_to_image: - self.driver.copy_volume_to_image( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - mock_copy_volume_to_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing(self, execute): - """Normal case: Bring an existing volume under Cinder's control.""" - ret = self.driver.manage_existing( - TEST_VOLUME[0], self.test_existing_ref) - self.assertEqual('0', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_normal(self, execute): - """Normal case: Return an existing LDEV's size.""" - self.driver.manage_existing_get_size( - TEST_VOLUME[0], self.test_existing_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_none_ldev_ref(self, execute): - """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_none_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_ldev_ref(self, execute): - """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_value_error_ref(self, execute): - """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_value_error_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_no_ldev_ref(self, execute): - """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_no_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_sts_ldev(self, execute): - """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_sts_ldev) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_vol_attr(self, execute): - """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_vol_attr) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_size_ref(self, execute): - """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_size) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_port_cnt(self, execute): - """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_port_cnt) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) - def test_manage_existing_get_size_failed_to_start_horcmgr( - self, _run_horcmstart, execute): - """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" - global run_horcmstart_returns_error2 - run_horcmstart_returns_error2 = True - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_failed_to_start_horcmgr) - run_horcmstart_returns_error2 = False - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage(self, execute): - """Normal case: Take out a volume from Cinder's control.""" - self.driver.unmanage(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.unmanage(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_invalid_sts_ldev(self, execute): - """Unusual case: The volume's STS is BLK.""" - self.driver.unmanage(TEST_VOLUME[13]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_image_to_volume(self, execute): - """Normal case: Copy an image to a volume.""" - image_service = 'fake_image_service' - image_id = 'fake_image_id' - self.configuration.vsp_horcm_numbers = (400, 401) - - with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ - as mock_copy_image: - self.driver.copy_image_to_volume( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - mock_copy_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_update_migrated_volume_success(self, execute): - """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[2], - "available") - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_update_migrated_volume_error(self, execute): - """Error case: 'modify ldev' fails(MSGID0315-W).""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[3], - "available") - - def test_get_ldev_volume_is_none(self): - """Error case: The volume is None.""" - self.assertIsNone(vsp_utils.get_ldev(None)) - - def test_check_ignore_error_string(self): - """Normal case: ignore_error is a string.""" - ignore_error = 'SSB=0xB980,0xB902' - stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' - ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' - 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' - 'The specified port can not be operated.') - self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) - - def test_check_opts_parameter_specified(self): - """Normal case: A valid parameter is specified.""" - cfg.CONF.paramAAA = 'aaa' - vsp_utils.check_opts(conf.Configuration(None), - [cfg.StrOpt('paramAAA')]) - - def test_check_opt_value_parameter_not_set(self): - """Error case: A parameter is not set(MSGID0601-E).""" - self.assertRaises(cfg.NoSuchOptError, - vsp_utils.check_opt_value, - conf.Configuration(None), - ['paramCCC']) - - def test_build_initiator_target_map_no_lookup_service(self): - """Normal case: None is specified for lookup_service.""" - connector = {'wwpns': ['0000000000000000', '1111111111111111']} - target_wwns = ['2222222222222222', '3333333333333333'] - init_target_map = vsp_utils.build_initiator_target_map(connector, - target_wwns, - None) - self.assertEqual( - {'0000000000000000': ['2222222222222222', '3333333333333333'], - '1111111111111111': ['2222222222222222', '3333333333333333']}, - init_target_map) - - def test_update_conn_info_not_update_conn_info(self): - """Normal case: Not update connection info.""" - vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), - dict({'wwpns': []}), - None) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py deleted file mode 100644 index ff1ccaa2d..000000000 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_vsp_horcm_iscsi.py +++ /dev/null @@ -1,1900 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for Hitachi VSP Driver.""" - -import copy -import os - -import mock -from os_brick.initiator import connector as brick_connector -from oslo_concurrency import processutils -from oslo_config import cfg -from six.moves import range - -from cinder import context as cinder_context -from cinder import db -from cinder.db.sqlalchemy import api as sqlalchemy_api -from cinder import exception -from cinder.objects import snapshot as obj_snap -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_horcm -from cinder.volume.drivers.hitachi import vsp_iscsi -from cinder.volume.drivers.hitachi import vsp_utils -from cinder.volume import utils as volume_utils - -# Dummy return values -SUCCEED = 0 -STDOUT = "" -STDERR = "" -CMD_SUCCEED = (SUCCEED, STDOUT, STDERR) - -# Configuration parameter values -CONFIG_MAP = { - 'serial': '492015', - 'my_ip': '127.0.0.1', -} - -# CCI instance numbers -INST_NUMS = (200, 201) - -# Shadow Image copy group names -CG_MAP = {'cg%s' % x: vsp_horcm._COPY_GROUP % ( - CONFIG_MAP['my_ip'], CONFIG_MAP['serial'], INST_NUMS[1], x) - for x in range(3) -} - -# Map containing all maps for dummy response creation -DUMMY_RESPONSE_MAP = CONFIG_MAP.copy() -DUMMY_RESPONSE_MAP.update(CG_MAP) - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sP 0 - %(serial)s\n" - "%(cg1)s %(cg1)sS - - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT2 = "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - -# cmd: raidcom get copy_grp -GET_COPY_GRP_RESULT3 = ( - "COPY_GROUP LDEV_GROUP MU# JID# Serial#\n" - "%(cg0)s %(cg0)sP 0 - %(serial)s\n" - "%(cg0)s %(cg0)sS 0 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91P -GET_DEVICE_GRP_MU1P_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sP VSP-LDEV-0-2 0 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get device_grp -device_grp_name VSP-127.0.0.14920150C91S -GET_DEVICE_GRP_MU1S_RESULT = ( - "LDEV_GROUP LDEV_NAME LDEV# Serial#\n" - "%(cg1)sS VSP-LDEV-0-2 2 %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_iscsi -port CL1-A HBSD-127.0.0.1 -GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT = ( - "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" - "CL1-A 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get dp_pool -GET_DP_POOL_RESULT = ( - "PID POLS U(%) AV_CAP(MB) TP_CAP(MB) W(%) H(%) Num LDEV# LCNT " - "TL_CAP(MB) BM TR_CAP(MB) RCNT\n" - "030 POLN 0 6006 6006 75 80 1 14860 32 167477 NB 0 0\n" -) - -# cmd: raidcom get dp_pool -GET_DP_POOL_ERROR_RESULT = ( - "PID POLS U(%) POOL_NAME Seq# Num LDEV# H(%) VCAP(%) TYPE PM PT\n" -) - -# cmd: raidcom get pool -key opt -GET_POOL_KEYOPT_RESULT = ( - "PID POLS U(%%) POOL_NAME Seq# Num LDEV# H(%%) VCAP(%%) TYPE PM PT\n" - "030 POLM 30 VSPPOOL %(serial)s 1 10000 80 - OPEN N HDP\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get hba_iscsi -port CL1-B-0 -GET_HBA_ISCSI_CL1B0_RESULT = ( - "PORT GID GROUP_NAME IQN Serial# NICK_NAME\n" - "CL1-B 0 HBSD-127.0.0.1 iqn-initiator %(serial)s -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-A -GET_HOST_GRP_CL1A_RESULT = ( - "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" - "CL1-A 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " - "%(serial)s LINUX/IRIX 83 91\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get host_grp -port CL1-B -GET_HOST_GRP_CL1B_RESULT = ( - "PORT GID GROUP_NAME IQN AMD D Serial# HMD HMO_BITs\n" - "CL1-B 0 HBSD-127.0.0.1 iqn-initiator.hbsd-target BOTH S " - "%(serial)s LINUX/IRIX 83 91\n" -) % DUMMY_RESPONSE_MAP - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-127.0.0.1 -ADD_HOSTGRP_RESULT = "raidcom: Host group ID 0(0x0) will be used for adding.\n" - -# raidcom add host_grp -port CLx-y -host_grp_name HBSD-pair00 -ADD_HOSTGRP_PAIR_RESULT = ( - "raidcom: Host group ID 2(0x2) will be used for adding.\n" -) - -# raidcom add lun -port CL1-A-0 -ldev_id x -ADD_LUN_LUN0_RESULT = "raidcom: LUN 0(0x0) will be used for adding.\n" - -# cmd: raidcom get ldev -ldev_list undefined -cnt 1 -GET_LDEV_LDEV_LIST_UNDEFINED = ( - "LDEV : 1 VIR_LDEV : 65534\n" - "VOL_TYPE : NOT DEFINED\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -cnt 2 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2 = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id 0 -cnt 10 -key front_end (LDEV) -GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT = ( - " Serial# LDEV# SL CL VOL_TYPE VOL_Cap(BLK) PID ATTRIBUTE" - " Ports PORT_No:LU#:GRPNAME\n" - " %(serial)s 0 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 1 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 2 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 3 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 4 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 5 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 6 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 7 0 0 OPEN-V-CVS 2097152 - CVS 0\n" - " %(serial)s 8 - - NOT DEFINED - - - -\n" - " %(serial)s 9 - - NOT DEFINED - - - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get ldev -ldev_id x -check_status NOT DEFINED -GET_LDEV_CHECKSTATUS_ERR = ( - "raidcom: testing condition has failed with exit(1).\n" -) - -# cmd: raidcom get ldev -ldev_id 0 -GET_LDEV_LDEV0_RESULT = """ -LDEV : 0 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 1 -GET_LDEV_LDEV1_RESULT = """ -LDEV : 1 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 268435456 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 3 -GET_LDEV_LDEV3_RESULT = """ -LDEV : 3 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : -""" - -# cmd: raidcom get ldev -ldev_id 4 -GET_LDEV_LDEV4_RESULT = """ -LDEV : 4 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 5 -GET_LDEV_LDEV5_RESULT = """ -LDEV : 5 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : VVOL -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 6 -GET_LDEV_LDEV6_RESULT = """ -LDEV : 6 -VOL_TYPE : OPEN-V-CVS -PORTs : CL1-A-0 0 HBSD-127.0.0.1 -VOL_ATTR : CVS : HDP -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 7 -GET_LDEV_LDEV7_RESULT = """ -LDEV : 7 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 0 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 10 -GET_LDEV_LDEV10_RESULT = """ -LDEV : 10 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 11 -GET_LDEV_LDEV11_RESULT = """ -LDEV : 11 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : QS : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 12 -GET_LDEV_LDEV12_RESULT = """ -LDEV : 12 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get ldev -ldev_id 13 -GET_LDEV_LDEV13_RESULT = """ -LDEV : 13 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : MRCF : HDP : HDT -VOL_Capacity(BLK) : 2097152 -NUM_PORT : 1 -STS : BLK -""" - -# cmd: raidcom get ldev -ldev_id 14 -GET_LDEV_LDEV14_RESULT = """ -LDEV : 14 -VOL_TYPE : OPEN-V-CVS -VOL_ATTR : CVS : HDP : HDT -VOL_Capacity(BLK) : 9999999 -NUM_PORT : 1 -STS : NML -""" - -# cmd: raidcom get lun -port CL1-A-0 -GET_LUN_CL1A0_RESULT = ( - "PORT GID HMD LUN NUM LDEV CM Serial# HMO_BITs\n" - "CL1-A 0 LINUX/IRIX 4 1 4 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 254 1 5 - %(serial)s\n" - "CL1-A 0 LINUX/IRIX 255 1 6 - %(serial)s\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -GET_PORT_RESULT = ( - "PORT TYPE ATTR SPD LPID FAB CONN SSW SL Serial# WWN PHY_PORT\n" - "CL1-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL1-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL3-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL3-B ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" - "CL4-A ISCSI TAR 10G 01 N UNKN Y 0 %(serial)s - -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL1-A -key opt -GET_PORT_CL1A_KEY_OPT_RESULT = ( - "PORT : CL1-A\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL1-B -key opt -GET_PORT_CL1B_KEY_OPT_RESULT = ( - "PORT : CL1-B\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL3-A -key opt -GET_PORT_CL3A_KEY_OPT_RESULT = ( - "PORT : CL3-A\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get port -port CL3-A -key opt -GET_PORT_CL3B_KEY_OPT_RESULT = ( - "PORT : CL3-B\n" - "TCP_OPT : IPV6_D : SACK_E : DACK_E : INS_D : VTAG_D\n" - "TCP_MTU : 1500\n" - "WSZ : 64KB\n" - "KA_TIMER : 60\n" - "TCP_PORT : 3260\n" - "IPV4_ADDR : 11.22.33.44\n" - "IPV4_SMSK : 255.255.0.0\n" - "IPV4_GWAD : 0.0.0.0\n" - "IPV6_ADDR_INF : INV : AM : fe80::\n" - "IPV6_GADR_INF : INV : AM : ::\n" - "IPV6_GWAD_INF : INV : :: : ::\n" - "VLAN_ID : -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 4 -GET_SNAPSHOT_LDEV4_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 4 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 7 -GET_SNAPSHOT_LDEV7_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 3 8 31 100 ---- 57db5cb0\n" - "VSP-SNAP0 P-VOL PSUS %(serial)s 7 4 9 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 8 -GET_SNAPSHOT_LDEV8_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 8 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidcom get snapshot -ldev_id 11 -GET_SNAPSHOT_LDEV11_RESULT = ( - "SnapShot_name P/S STAT Serial# LDEV# MU# P-LDEV# PID %% MODE " - "SPLT-TIME\n" - "VSP-SNAP0 S-VOL SSUS %(serial)s 11 3 7 31 100 ---- 57db5cb0\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 1 0 -IM201 -PAIRDISPLAY_LDEV0_1_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-0-1 L CL1-A-0 0 0 0 %(serial)s 0 " - "P-VOL PSUS %(serial)s 1 W\n" - "%(cg0)s VSP-LDEV-0-1 R CL1-A-0 0 1 0 %(serial)s 1 " - "S-VOL SSUS - 0 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 10 0 -IM201 -PAIRDISPLAY_LDEV7_10_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-10 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 10 W\n" - "%(cg0)s VSP-LDEV-7-10 R CL1-A-1 0 1 0 %(serial)s 10 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: pairdisplay -CLI -d 492015 12 0 -IM201 -PAIRDISPLAY_LDEV7_12_RESULT = ( - "Group PairVol L/R Port# TID LU-M Seq# LDEV# " - "P/S Status Seq# P-LDEV# M\n" - "%(cg0)s VSP-LDEV-7-12 L CL1-A-1 0 0 0 %(serial)s 7 " - "P-VOL PSUS %(serial)s 12 W\n" - "%(cg0)s VSP-LDEV-7-12 R CL1-A-1 0 1 0 %(serial)s 12 " - "S-VOL SSUS - 7 -\n" -) % DUMMY_RESPONSE_MAP - -# cmd: raidqry -h -RAIDQRY_RESULT = ( - "Model : RAID-Manager/Linux/x64\n" - "Ver&Rev: 01-39-03/03\n" - "Usage : raidqry [options] for HORC[200]\n" - " -h Help/Usage\n" - " -I[#] Set to HORCMINST#\n" - " -IH[#] or -ITC[#] Set to HORC mode [and HORCMINST#]\n" - " -IM[#] or -ISI[#] Set to MRCF mode [and HORCMINST#]\n" - " -z Set to the interactive mode\n" - " -zx Set to the interactive mode and HORCM monitoring\n" - " -q Quit(Return to main())\n" - " -g Specify for getting all group name on local\n" - " -l Specify the local query\n" - " -lm Specify the local query with full micro version\n" - " -r Specify the remote query\n" - " -f Specify display for floatable host\n" -) - -EXECUTE_TABLE = { - ('add', 'hba_iscsi', '-port', 'CL3-A-0', '-hba_iscsi_name', - 'iqn-initiator'): (vsp_horcm.EX_INVARG, STDOUT, STDERR), - ('add', 'host_grp', '-port', 'CL1-A', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL1-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-A', '-host_grp_name', - 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( - SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-127.0.0.1', '-iscsi_name', 'iqn-initiator.hbsd-target'): ( - SUCCEED, ADD_HOSTGRP_RESULT, STDERR), - ('add', 'host_grp', '-port', 'CL3-B', '-host_grp_name', - 'HBSD-pair00'): (SUCCEED, ADD_HOSTGRP_PAIR_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 1): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 5): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('add', 'lun', '-port', 'CL1-A-0', '-ldev_id', 6): ( - vsp_horcm.EX_CMDRJE, STDOUT, vsp_horcm._LU_PATH_DEFINED), - ('add', 'lun', '-port', 'CL1-B-0', '-ldev_id', 0, '-lun_id', 0): ( - SUCCEED, ADD_LUN_LUN0_RESULT, STDERR), - ('extend', 'ldev', '-ldev_id', 3, '-capacity', '128G'): ( - vsp_horcm.EX_CMDIOE, STDOUT, - "raidcom: [EX_CMDIOE] Control command I/O error"), - ('get', 'hba_iscsi', '-port', 'CL1-A', 'HBSD-127.0.0.1'): ( - SUCCEED, GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'hba_iscsi', '-port', 'CL1-A', 'HBSD-127.0.0.2'): ( - SUCCEED, GET_HBA_ISCSI_CL1A_HOSTGRP_RESULT, STDERR), - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'P'): ( - SUCCEED, GET_DEVICE_GRP_MU1P_RESULT, STDERR), - ('get', 'device_grp', '-device_grp_name', CG_MAP['cg1'] + 'S'): ( - SUCCEED, GET_DEVICE_GRP_MU1S_RESULT, STDERR), - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_RESULT, STDERR), - ('get', 'pool', '-key', 'opt'): (SUCCEED, GET_POOL_KEYOPT_RESULT, STDERR), - ('get', 'hba_iscsi', '-port', 'CL1-B-0'): ( - SUCCEED, GET_HBA_ISCSI_CL1B0_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-A'): ( - SUCCEED, GET_HOST_GRP_CL1A_RESULT, STDERR), - ('get', 'host_grp', '-port', 'CL1-B'): ( - SUCCEED, GET_HOST_GRP_CL1B_RESULT, STDERR), - ('get', 'ldev', '-ldev_list', 'undefined', '-cnt', '1'): ( - SUCCEED, GET_LDEV_LDEV_LIST_UNDEFINED, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 2, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT2_FRONTEND_RESULT2, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-cnt', 10, '-key', 'front_end'): ( - SUCCEED, GET_LDEV_LDEV0_CNT10_FRONTEND_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 0, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('get', 'ldev', '-ldev_id', 0): (SUCCEED, GET_LDEV_LDEV0_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 1): (SUCCEED, GET_LDEV_LDEV1_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 3): (SUCCEED, GET_LDEV_LDEV3_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 4): (SUCCEED, GET_LDEV_LDEV4_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 5): (SUCCEED, GET_LDEV_LDEV5_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 6): (SUCCEED, GET_LDEV_LDEV6_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 7): (SUCCEED, GET_LDEV_LDEV7_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 10): (SUCCEED, GET_LDEV_LDEV10_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 11): (SUCCEED, GET_LDEV_LDEV11_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 12): (SUCCEED, GET_LDEV_LDEV12_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 13): (SUCCEED, GET_LDEV_LDEV13_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 14): (SUCCEED, GET_LDEV_LDEV14_RESULT, STDERR), - ('get', 'ldev', '-ldev_id', 15): (vsp_horcm.EX_COMERR, "", STDERR), - ('get', 'lun', '-port', 'CL1-A-0'): ( - SUCCEED, GET_LUN_CL1A0_RESULT, STDERR), - ('get', 'port'): (SUCCEED, GET_PORT_RESULT, STDERR), - ('get', 'port', '-port', 'CL1-A', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL1A_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL1-B', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL1B_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL3-A', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL3A_KEY_OPT_RESULT, STDERR), - ('get', 'port', '-port', 'CL3-B', '-key', 'opt'): ( - SUCCEED, GET_PORT_CL3B_KEY_OPT_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 4): ( - SUCCEED, GET_SNAPSHOT_LDEV4_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 7): ( - SUCCEED, GET_SNAPSHOT_LDEV7_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 8): ( - SUCCEED, GET_SNAPSHOT_LDEV8_RESULT, STDERR), - ('get', 'snapshot', '-ldev_id', 11): ( - SUCCEED, GET_SNAPSHOT_LDEV11_RESULT, STDERR), - ('modify', 'ldev', '-ldev_id', 3, '-status', 'discard_zero_page'): ( - vsp_horcm.EX_CMDIOE, STDOUT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 10, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_10_RESULT, STDERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 12, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV7_12_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 8, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.COPY, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 10, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 12, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), - ('raidqry', '-h'): (SUCCEED, RAIDQRY_RESULT, STDERR), - ('tee', '/etc/horcm501.conf'): (1, STDOUT, STDERR), - ('-login', 'user', 'pasword'): (SUCCEED, STDOUT, STDERR), - ('-login', 'userX', 'paswordX'): (vsp_horcm.EX_ENAUTH, STDOUT, STDERR), - ('-login', 'userY', 'paswordY'): (vsp_horcm.EX_COMERR, STDOUT, STDERR), -} - -EXECUTE_TABLE2 = EXECUTE_TABLE.copy() -EXECUTE_TABLE2.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT2, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUS, STDOUT, STDERR), -}) - -EXECUTE_TABLE3 = EXECUTE_TABLE2.copy() - -EXECUTE_TABLE4 = EXECUTE_TABLE.copy() -EXECUTE_TABLE4.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), -}) - -EXECUTE_TABLE5 = EXECUTE_TABLE.copy() -EXECUTE_TABLE5.update({ - ('get', 'copy_grp'): (SUCCEED, GET_COPY_GRP_RESULT3, STDERR), - ('get', 'ldev', '-ldev_id', 1, '-check_status', 'NOT', 'DEFINED'): ( - 1, STDOUT, GET_LDEV_CHECKSTATUS_ERR), - ('pairdisplay', '-CLI', '-d', '%s' % CONFIG_MAP['serial'], 1, 0, - '-IM%s' % INST_NUMS[1]): ( - SUCCEED, PAIRDISPLAY_LDEV0_1_RESULT, STDERR), - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.SMPL, STDOUT, STDERR), -}) - -ERROR_EXECUTE_TABLE = { - ('get', 'dp_pool'): (SUCCEED, GET_DP_POOL_ERROR_RESULT, STDERR), -} - -DEFAULT_CONNECTOR = { - 'host': 'host', - 'ip': CONFIG_MAP['my_ip'], - 'initiator': 'iqn-initiator', - 'multipath': False, -} - -CTXT = cinder_context.get_admin_context() - -TEST_VOLUME = [] -for i in range(14): - volume = {} - volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) - volume['name'] = 'test-volume{0:d}'.format(i) - volume['provider_location'] = None if i == 2 else '{0:d}'.format(i) - volume['size'] = 256 if i == 1 else 128 - if i == 2: - volume['status'] = 'creating' - elif i == 5: - volume['status'] = 'in-use' - else: - volume['status'] = 'available' - volume = fake_volume.fake_volume_obj(CTXT, **volume) - TEST_VOLUME.append(volume) - - -def _volume_get(context, volume_id): - """Return predefined volume info.""" - return TEST_VOLUME[int(volume_id.replace("-", ""))] - -TEST_SNAPSHOT = [] -for i in range(8): - snapshot = {} - snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) - snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) - snapshot['provider_location'] = None if i == 2 else '{0:d}'.format( - i if i < 5 else i + 5) - snapshot['status'] = 'creating' if i == 2 else 'available' - snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format( - i if i < 5 else 7) - snapshot['volume'] = _volume_get(None, snapshot['volume_id']) - snapshot['volume_name'] = 'test-volume{0:d}'.format(i if i < 5 else 7) - snapshot['volume_size'] = 256 if i == 1 else 128 - snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) - TEST_SNAPSHOT.append(snapshot) - -# Flags that determine _fake_run_horcmstart() return values -run_horcmstart_returns_error = False -run_horcmstart_returns_error2 = False -run_horcmstart3_cnt = 0 - - -def _access(*args, **kargs): - """Assume access to the path is allowed.""" - return True - - -def _execute(*args, **kargs): - """Return predefined results for command execution.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - return result - - -def _execute2(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE2.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE2.get(cmd, CMD_SUCCEED) - return result - - -def _execute3(*args, **kargs): - """Change pairevtwait's dummy return value after it is called.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE3.get(cmd, CMD_SUCCEED) - if cmd == ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): - EXECUTE_TABLE3.update({ - ('pairevtwait', '-d', CONFIG_MAP['serial'], 1, '-nowaits', - '-IM%s' % INST_NUMS[1]): (vsp_horcm.PSUE, STDOUT, STDERR), - }) - return result - - -def _execute4(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE4.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE4.get(cmd, CMD_SUCCEED) - return result - - -def _execute5(*args, **kargs): - """Return predefined results based on EXECUTE_TABLE5.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = EXECUTE_TABLE5.get(cmd, CMD_SUCCEED) - return result - - -def _cinder_execute(*args, **kargs): - """Return predefined results or raise an exception.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - ret, stdout, stderr = EXECUTE_TABLE.get(cmd, CMD_SUCCEED) - if ret == SUCCEED: - return stdout, stderr - else: - pee = processutils.ProcessExecutionError(exit_code=ret, - stdout=stdout, - stderr=stderr) - raise pee - - -def _error_execute(*args, **kargs): - """Return predefined error results.""" - cmd = args[1:-3] if args[0] == 'raidcom' else args - result = _execute(*args, **kargs) - ret = ERROR_EXECUTE_TABLE.get(cmd) - return ret if ret else result - - -def _brick_get_connector_properties(multipath=False, enforce_multipath=False): - """Return a predefined connector object.""" - return DEFAULT_CONNECTOR - - -def _brick_get_connector_properties_error(multipath=False, - enforce_multipath=False): - """Return an incomplete connector object.""" - connector = dict(DEFAULT_CONNECTOR) - del connector['initiator'] - return connector - - -def _connect_volume(*args, **kwargs): - """Return predefined volume info.""" - return {'path': u'/dev/disk/by-path/xxxx', 'type': 'block'} - - -def _disconnect_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _copy_volume(*args, **kwargs): - """Return without doing anything.""" - pass - - -def _volume_admin_metadata_get(context, volume_id): - """Return dummy admin metadata.""" - return {'fake_key': 'fake_value'} - - -def _snapshot_metadata_update(context, snapshot_id, metadata, delete): - """Return without doing anything.""" - pass - - -def _fake_is_smpl(*args): - """Assume the Shadow Image pair status is SMPL.""" - return True - - -def _fake_run_horcmgr(*args): - """Assume CCI is running.""" - return vsp_horcm._HORCM_RUNNING - - -def _fake_run_horcmstart(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error else 3 - - -def _fake_run_horcmstart2(*args): - """Return a value based on a flag value.""" - return 0 if not run_horcmstart_returns_error2 else 3 - - -def _fake_run_horcmstart3(*args): - """Update a counter and return a value based on it.""" - global run_horcmstart3_cnt - run_horcmstart3_cnt = run_horcmstart3_cnt + 1 - return 0 if run_horcmstart3_cnt <= 1 else 3 - - -def _fake_check_ldev_status(*args, **kwargs): - """Assume LDEV status has changed as desired.""" - return None - - -def _fake_exists(path): - """Assume the path does not exist.""" - return False - - -class VSPHORCMISCSIDriverTest(test.TestCase): - """Unit test class for VSP HORCM interface iSCSI module.""" - - test_existing_ref = {'source-id': '0'} - test_existing_none_ldev_ref = {'source-id': '2'} - test_existing_invalid_ldev_ref = {'source-id': 'AAA'} - test_existing_value_error_ref = {'source-id': 'XX:XX:XX'} - test_existing_no_ldev_ref = {} - test_existing_invalid_sts_ldev = {'source-id': '13'} - test_existing_invalid_vol_attr = {'source-id': '12'} - test_existing_invalid_size = {'source-id': '14'} - test_existing_invalid_port_cnt = {'source-id': '6'} - test_existing_failed_to_start_horcmgr = {'source-id': '15'} - - def setUp(self): - """Set up the test environment.""" - super(VSPHORCMISCSIDriverTest, self).setUp() - - self.configuration = mock.Mock(conf.Configuration) - self.ctxt = cinder_context.get_admin_context() - self._setup_config() - self._setup_driver() - - def _setup_config(self): - """Set configuration parameter values.""" - self.configuration.config_group = "HORCM" - - self.configuration.volume_backend_name = "HORCMISCSI" - self.configuration.volume_driver = ( - "cinder.volume.drivers.hitachi.vsp_iscsi.VSPISCSIDriver") - self.configuration.reserved_percentage = "0" - self.configuration.use_multipath_for_image_xfer = False - self.configuration.enforce_multipath_for_image_xfer = False - self.configuration.num_volume_device_scan_tries = 3 - self.configuration.volume_dd_blocksize = "1000" - - self.configuration.vsp_storage_id = CONFIG_MAP['serial'] - self.configuration.vsp_pool = "30" - self.configuration.vsp_thin_pool = None - self.configuration.vsp_ldev_range = "0-1" - self.configuration.vsp_default_copy_method = 'FULL' - self.configuration.vsp_copy_speed = 3 - self.configuration.vsp_copy_check_interval = 1 - self.configuration.vsp_async_copy_check_interval = 1 - self.configuration.vsp_target_ports = "CL1-A" - self.configuration.vsp_compute_target_ports = "CL1-A" - self.configuration.vsp_horcm_pair_target_ports = "CL1-A" - self.configuration.vsp_group_request = True - - self.configuration.vsp_use_chap_auth = True - self.configuration.vsp_auth_user = "auth_user" - self.configuration.vsp_auth_password = "auth_password" - - self.configuration.vsp_horcm_numbers = INST_NUMS - self.configuration.vsp_horcm_user = "user" - self.configuration.vsp_horcm_password = "pasword" - self.configuration.vsp_horcm_add_conf = False - - self.configuration.safe_get = self._fake_safe_get - - CONF = cfg.CONF - CONF.my_ip = CONFIG_MAP['my_ip'] - - def _fake_safe_get(self, value): - """Retrieve a configuration value avoiding throwing an exception.""" - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def _setup_driver(self, execute, brick_get_connector_properties): - """Set up the driver environment.""" - self.driver = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self.driver.create_export(None, None, None) - self.driver.ensure_export(None, None) - self.driver.remove_export(None, None) - - # API test cases - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_do_setup(self, execute, brick_get_connector_properties): - """Normal case: The host group exists beforehand.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '11.22.33.44:3260'}, - drv.common.storage_info['portals']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_raidqry_h_invalid( - self, execute, brick_get_connector_properties): - """Error case: 'raidqry -h' returns nothing. This error is ignored.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - raidqry_h_original = EXECUTE_TABLE[('raidqry', '-h')] - EXECUTE_TABLE[('raidqry', '-h')] = (SUCCEED, "", STDERR) - drv.do_setup(None) - self.assertEqual( - {'CL1-A': '11.22.33.44:3260'}, - drv.common.storage_info['portals']) - EXECUTE_TABLE[('raidqry', '-h')] = raidqry_h_original - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_specify_pool_name( - self, execute, brick_get_connector_properties): - """Normal case: Specify pool name rather than pool number.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_pool = "VSPPOOL" - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp( - self, execute, brick_get_connector_properties): - """Normal case: The host groups does not exist beforehand.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-B" - - drv.do_setup(None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_create_hostgrp_error( - self, execute, brick_get_connector_properties): - """Error case: 'add hba_iscsi' fails(MSGID0309-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = "CL3-A" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_thin_pool_not_specified(self, execute): - """Error case: Parameter error(vsp_thin_pool).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_default_copy_method = 'THIN' - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_ldev_range_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Not specify LDEV range.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_ldev_range = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_storage_id_not_specified(self, execute): - """Error case: Parameter error(vsp_storage_id).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_storage_id = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_numbers_invalid(self, execute): - """Error case: Parameter error(vsp_horcm_numbers).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (200, 200) - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_horcm_user_not_specified(self, execute): - """Error case: Parameter error(vsp_horcm_user).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_compute_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only compute_target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - - drv.do_setup(None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_only_pair_target_ports_not_specified( - self, execute, brick_get_connector_properties): - """Normal case: Only pair_target_ports is not specified.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_pair_target_ports = None - - drv.do_setup(None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_specified(self, execute): - """Error case: Parameter error(compute_target_ports).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_specified(self, execute): - """Error case: Parameter error(pair_target_ports).(MSGID0601-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = None - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 5) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(processutils, 'execute', side_effect=_execute) - @mock.patch.object(os.path, 'exists', side_effect=_fake_exists) - @mock.patch.object(os, 'access', side_effect=_access) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_create_conf( - self, vsp_utils_execute, access, exists, processutils_execute, - brick_get_connector_properties): - """Error case: Writing into horcmxxx.conf fails.(MSGID0632-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_numbers = (500, 501) - self.configuration.vsp_horcm_add_conf = True - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_login( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_ENAUTH(MSGID0600-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userX" - self.configuration.vsp_horcm_password = "paswordX" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 2) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_failed_to_command( - self, execute, brick_get_connector_properties): - """Error case: 'raidcom -login' fails with EX_COMERR(MSGID0600-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_horcm_user = "userY" - self.configuration.vsp_horcm_password = "paswordY" - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 2) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmgr', side_effect=_fake_run_horcmgr) - def test_do_setup_failed_to_horcmshutdown( - self, _run_horcmgr, execute, brick_get_connector_properties): - """Error case: CCI's status is always RUNNING(MSGID0608-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart) - def test_do_setup_failed_to_horcmstart( - self, _run_horcmstart, execute, brick_get_connector_properties): - """Error case: _run_horcmstart() returns an error(MSGID0609-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - global run_horcmstart_returns_error - run_horcmstart_returns_error = True - self.assertRaises(exception.VSPError, drv.do_setup, None) - run_horcmstart_returns_error = False - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties_error) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_initiator_not_found( - self, execute, brick_get_connector_properties): - """Error case: The connector does not have 'initiator'(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_port_not_found(self, execute): - """Error case: The target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_compute_target_ports_not_found(self, execute): - """Error case: Compute target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_compute_target_ports = ["CL4-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_do_setup_pair_target_ports_not_found(self, execute): - """Error case: Pair target port does not exist(MSGID0650-E).""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - self.configuration.vsp_horcm_pair_target_ports = ["CL5-A"] - - self.assertRaises(exception.VSPError, drv.do_setup, None) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume(self, execute): - """Normal case: Extend volume succeeds.""" - self.driver.extend_volume(TEST_VOLUME[0], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0613-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[2], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_ldev_is_vvol(self, execute): - """Error case: The volume is a V-VOL(MSGID0618-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[5], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_extend_volume_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[4], 256) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - @mock.patch.object(vsp_horcm, '_EXTEND_WAITTIME', 1) - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - def test_extend_volume_raidcom_error(self, execute,): - """Error case: 'extend ldev' returns an error(MSGID0600-E).""" - self.assertRaises( - exception.VSPError, self.driver.extend_volume, TEST_VOLUME[3], 256) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats(self, execute): - """Normal case: Refreshing data required.""" - stats = self.driver.get_volume_stats(True) - self.assertEqual('Hitachi', stats['vendor_name']) - self.assertFalse(stats['multiattach']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_get_volume_stats_no_refresh(self, execute): - """Normal case: Refreshing data not required.""" - stats = self.driver.get_volume_stats() - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_error_execute) - def test_get_volume_stats_failed_to_get_dp_pool(self, execute): - """Error case: The pool does not exist(MSGID0640-E, MSGID0620-E).""" - self.driver.common.storage_info['pool_id'] = 29 - - stats = self.driver.get_volume_stats(True) - self.assertEqual({}, stats) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume(self, execute): - """Normal case: Available LDEV range is 0-1.""" - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_free_ldev_not_found_on_storage(self, execute): - """Error case: No unused LDEV exists(MSGID0648-E).""" - self.driver.common.storage_info['ldev_range'] = [0, 0] - - self.assertRaises( - exception.VSPError, self.driver.create_volume, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_no_setting_ldev_range(self, execute): - """Normal case: Available LDEV range is unlimited.""" - self.driver.common.storage_info['ldev_range'] = None - - ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt)) - self.assertEqual('1', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, - '_check_ldev_status', side_effect=_fake_check_ldev_status) - def test_delete_volume(self, _check_ldev_status, execute): - """Normal case: Delete a volume.""" - self.driver.delete_volume(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.delete_volume(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_ldev_not_found_on_storage(self, execute): - """Unusual case: The volume's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_volume(TEST_VOLUME[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_volume_volume_is_busy(self, execute): - """Error case: The volume is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[4]) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_full( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=FULL.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_horcm, 'PAIR', vsp_horcm.PSUS) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'snapshot_metadata_update', side_effect=_snapshot_metadata_update) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_thin( - self, volume_get, snapshot_metadata_update, execute): - """Normal case: copy_method=THIN.""" - self.driver.common.storage_info['ldev_range'] = [0, 9] - self.configuration.vsp_thin_pool = 31 - self.configuration.vsp_default_copy_method = "THIN" - - ret = self.driver.create_snapshot(TEST_SNAPSHOT[7]) - self.assertEqual('8', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_provider_location_is_none( - self, volume_get, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) - def test_create_snapshot_ldev_not_found_on_storage( - self, volume_get, execute): - """Error case: The src-vol's LDEV does not exist.(MSGID0612-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_snapshot, TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_full(self, execute): - """Normal case: Delete a snapshot.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[5]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm.VSPHORCM, '_is_smpl', side_effect=_fake_is_smpl) - def test_delete_snapshot_full_smpl(self, _is_smpl, execute): - """Normal case: The LDEV in an SI volume pair becomes SMPL.""" - self.driver.delete_snapshot(TEST_SNAPSHOT[7]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_vvol_timeout(self, execute): - """Error case: V-VOL is not deleted from a snapshot(MSGID0611-E).""" - self.assertRaises( - exception.VSPError, self.driver.delete_snapshot, TEST_SNAPSHOT[6]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_provider_location_is_none(self, execute): - """Error case: Snapshot's provider_location is None(MSGID0304-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_ldev_not_found_on_storage(self, execute): - """Unusual case: The snapshot's LDEV does not exist.(MSGID0319-W).""" - self.driver.delete_snapshot(TEST_SNAPSHOT[3]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_delete_snapshot_snapshot_is_busy(self, execute): - """Error case: The snapshot is a P-VOL of a THIN pair(MSGID0616-E).""" - self.assertRaises( - exception.SnapshotIsBusy, self.driver.delete_snapshot, - TEST_SNAPSHOT[4]) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_same_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: The source volume is a V-VOL and copied by dd.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(volume_utils, 'copy_volume', side_effect=_copy_volume) - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object( - utils, 'brick_get_connector', - side_effect=mock.MagicMock()) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'connect_volume', _connect_volume) - @mock.patch.object( - brick_connector.ISCSIConnector, - 'disconnect_volume', _disconnect_volume) - def test_create_cloned_volume_with_dd_extend_size( - self, execute, brick_get_connector, brick_get_connector_properties, - copy_volume): - """Normal case: Copy with dd and extend the size afterward.""" - vol = self.driver.create_cloned_volume(TEST_VOLUME[1], TEST_VOLUME[5]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_provider_location_is_none(self, execute): - """Error case: Source vol's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_invalid_size(self, execute): - """Error case: src-size > clone-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - TEST_VOLUME[0], TEST_VOLUME[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_cloned_volume_extend_size_thin(self, execute): - """Error case: clone > src and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_cloned_volume, - test_vol_obj, TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_same_size(self, execute): - """Normal case: Copy with Shadow Image.""" - vol = self.driver.create_volume_from_snapshot( - TEST_VOLUME[0], TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute2) - def test_create_volume_from_snapshot_full_extend_normal(self, execute): - """Normal case: Copy with Shadow Image and extend the size.""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - vol = self.driver.create_volume_from_snapshot( - test_vol_obj, TEST_SNAPSHOT[0]) - self.assertEqual('1', vol['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute3) - def test_create_volume_from_snapshot_full_extend_PSUE(self, execute): - """Error case: SI copy -> pair status: PSUS -> PSUE(MSGID0722-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute4) - def test_create_volume_from_snapshot_full_PSUE(self, execute): - """Error case: SI copy -> pair status becomes PSUE(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart3) - @mock.patch.object(vsp_horcm, '_LDEV_STATUS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'DEFAULT_PROCESS_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute5) - def test_create_volume_from_snapshot_full_SMPL( - self, execute, _run_horcmstart): - """Error case: SI copy -> pair status becomes SMPL(MSGID0610-E).""" - test_vol_obj = copy.copy(TEST_VOLUME[0]) - test_vol_obj.metadata.update({'copy_method': 'FULL'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_invalid_size(self, execute): - """Error case: volume-size < snapshot-size(MSGID0617-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[1]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_thin_extend(self, execute): - """Error case: volume > snapshot and copy_method=THIN(MSGID0621-E).""" - self.configuration.vsp_thin_pool = 31 - test_vol_obj = copy.copy(TEST_VOLUME[1]) - test_vol_obj.metadata.update({'copy_method': 'THIN'}) - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - test_vol_obj, TEST_SNAPSHOT[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_create_volume_from_snapshot_provider_location_is_none( - self, execute): - """Error case: Snapshot's provider_location is None(MSGID0624-E).""" - self.assertRaises( - exception.VSPError, self.driver.create_volume_from_snapshot, - TEST_VOLUME[0], TEST_SNAPSHOT[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection(self, volume_admin_metadata_get, execute): - """Normal case: Initialize connection.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[0], DEFAULT_CONNECTOR) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_multipath( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: Initialize connection in multipath environment.""" - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = ["CL1-A", "CL1-B"] - drv.do_setup(None) - multipath_connector = copy.copy(DEFAULT_CONNECTOR) - multipath_connector['multipath'] = True - ret = drv.initialize_connection(TEST_VOLUME[0], multipath_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual(['11.22.33.44:3260', '11.22.33.44:3260'], - ret['data']['target_portals']) - self.assertEqual(['iqn-initiator.hbsd-target', - 'iqn-initiator.hbsd-target'], - ret['data']['target_iqns']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual([0, 0], ret['data']['target_luns']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_initialize_connection_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0619-E).""" - self.assertRaises( - exception.VSPError, self.driver.initialize_connection, - TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_already_attached( - self, volume_admin_metadata_get, execute): - """Unusual case: 'add lun' returns 'already defined' error.""" - ret = self.driver.initialize_connection( - TEST_VOLUME[6], DEFAULT_CONNECTOR) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(255, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_target_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object( - utils, 'brick_get_connector_properties', - side_effect=_brick_get_connector_properties) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - db, 'volume_admin_metadata_get', - side_effect=_volume_admin_metadata_get) - def test_initialize_connection_compute_port_not_specified( - self, volume_admin_metadata_get, execute, - brick_get_connector_properties): - """Normal case: compute_target_port is not specified.""" - compute_connector = DEFAULT_CONNECTOR.copy() - compute_connector['ip'] = '127.0.0.2' - drv = vsp_iscsi.VSPISCSIDriver( - configuration=self.configuration, db=db) - self._setup_config() - self.configuration.vsp_compute_target_ports = None - drv.do_setup(None) - ret = drv.initialize_connection(TEST_VOLUME[0], compute_connector) - self.assertEqual('iscsi', ret['driver_volume_type']) - self.assertEqual('11.22.33.44:3260', ret['data']['target_portal']) - self.assertEqual('iqn-initiator.hbsd-target', - ret['data']['target_iqn']) - self.assertEqual('CHAP', ret['data']['auth_method']) - self.assertEqual('auth_user', ret['data']['auth_username']) - self.assertEqual('auth_password', ret['data']['auth_password']) - self.assertEqual(0, ret['data']['target_lun']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection(self, execute): - """Normal case: Terminate connection.""" - self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_provider_location_is_none(self, execute): - """Unusual case: Volume's provider_location is None(MSGID0302-W).""" - self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_no_port_mapped_to_ldev(self, execute): - """Unusual case: No port is mapped to the LDEV.""" - self.driver.terminate_connection(TEST_VOLUME[3], DEFAULT_CONNECTOR) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_terminate_connection_initiator_iqn_not_found(self, execute): - """Error case: The connector does not have 'initiator'(MSGID0650-E).""" - connector = dict(DEFAULT_CONNECTOR) - del connector['initiator'] - - self.assertRaises( - exception.VSPError, self.driver.terminate_connection, - TEST_VOLUME[0], connector) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_volume_to_image(self, execute): - """Normal case: Copy a volume to an image.""" - image_service = 'fake_image_service' - image_meta = 'fake_image_meta' - - with mock.patch.object(driver.VolumeDriver, 'copy_volume_to_image') \ - as mock_copy_volume_to_image: - self.driver.copy_volume_to_image( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - mock_copy_volume_to_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_meta) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing(self, execute): - """Normal case: Bring an existing volume under Cinder's control.""" - ret = self.driver.manage_existing( - TEST_VOLUME[0], self.test_existing_ref) - self.assertEqual('0', ret['provider_location']) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_normal(self, execute): - """Normal case: Return an existing LDEV's size.""" - self.driver.manage_existing_get_size( - TEST_VOLUME[0], self.test_existing_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_none_ldev_ref(self, execute): - """Error case: Source LDEV's properties do not exist(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_none_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_ldev_ref(self, execute): - """Error case: Source LDEV's ID is an invalid decimal(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_value_error_ref(self, execute): - """Error case: Source LDEV's ID is an invalid hex(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_value_error_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_no_ldev_ref(self, execute): - """Error case: Source LDEV's ID is not specified(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_no_ldev_ref) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_sts_ldev(self, execute): - """Error case: Source LDEV's STS is invalid(MSGID0707-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_sts_ldev) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_vol_attr(self, execute): - """Error case: Source LDEV's VOL_ATTR is invalid(MSGID0702-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_vol_attr) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_size_ref(self, execute): - """Error case: Source LDEV's VOL_Capacity is invalid(MSGID0703-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_size) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_manage_existing_get_size_invalid_port_cnt(self, execute): - """Error case: Source LDEV's NUM_PORT is invalid(MSGID0704-E).""" - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_invalid_port_cnt) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - @mock.patch.object( - vsp_horcm, '_run_horcmstart', side_effect=_fake_run_horcmstart2) - def test_manage_existing_get_size_failed_to_start_horcmgr( - self, _run_horcmstart, execute): - """Error case: _start_horcmgr() returns an error(MSGID0320-W).""" - global run_horcmstart_returns_error2 - run_horcmstart_returns_error2 = True - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, TEST_VOLUME[0], - self.test_existing_failed_to_start_horcmgr) - run_horcmstart_returns_error2 = False - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage(self, execute): - """Normal case: Take out a volume from Cinder's control.""" - self.driver.unmanage(TEST_VOLUME[0]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_provider_location_is_none(self, execute): - """Error case: The volume's provider_location is None(MSGID0304-W).""" - self.driver.unmanage(TEST_VOLUME[2]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_invalid_sts_ldev(self, execute): - """Unusual case: The volume's STS is BLK.""" - self.driver.unmanage(TEST_VOLUME[13]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_unmanage_volume_is_busy(self, execute): - """Error case: The volume is in a THIN volume pair(MSGID0616-E).""" - self.assertRaises( - exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[4]) - - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_copy_image_to_volume(self, execute): - """Normal case: Copy an image to a volume.""" - image_service = 'fake_image_service' - image_id = 'fake_image_id' - self.configuration.vsp_horcm_numbers = (400, 401) - - with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ - as mock_copy_image: - self.driver.copy_image_to_volume( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - mock_copy_image.assert_called_with( - self.ctxt, TEST_VOLUME[0], image_service, image_id) - - @mock.patch.object(utils, 'execute', side_effect=_cinder_execute) - def test_update_migrated_volume_success(self, execute): - """Normal case: 'modify ldev -status discard_zero_page' succeeds.""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[2], - "available") - - @mock.patch.object(vsp_horcm, '_EXEC_RETRY_INTERVAL', 1) - @mock.patch.object(vsp_horcm, '_EXEC_MAX_WAITTIME', 1) - @mock.patch.object(vsp_utils, 'execute', side_effect=_execute) - def test_update_migrated_volume_error(self, execute): - """Error case: 'modify ldev' fails(MSGID0315-W).""" - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[3], - "available") - - def test_get_ldev_volume_is_none(self): - """Error case: The volume is None.""" - self.assertIsNone(vsp_utils.get_ldev(None)) - - def test_check_ignore_error_string(self): - """Normal case: ignore_error is a string.""" - ignore_error = 'SSB=0xB980,0xB902' - stderr = ('raidcom: [EX_CMDRJE] An order to the control/command device' - ' was rejected\nIt was rejected due to SKEY=0x05, ASC=0x26, ' - 'ASCQ=0x00, SSB=0xB980,0xB902 on Serial#(400003)\nCAUSE : ' - 'The specified port can not be operated.') - self.assertTrue(vsp_utils.check_ignore_error(ignore_error, stderr)) - - def test_check_opts_parameter_specified(self): - """Normal case: A valid parameter is specified.""" - cfg.CONF.paramAAA = 'aaa' - vsp_utils.check_opts(conf.Configuration(None), - [cfg.StrOpt('paramAAA')]) - - def test_check_opt_value_parameter_not_set(self): - """Error case: A parameter is not set(MSGID0601-E).""" - self.assertRaises(cfg.NoSuchOptError, - vsp_utils.check_opt_value, - conf.Configuration(None), - ['paramCCC']) - - def test_build_initiator_target_map_no_lookup_service(self): - """Normal case: None is specified for lookup_service.""" - connector = {'wwpns': ['0000000000000000', '1111111111111111']} - target_wwns = ['2222222222222222', '3333333333333333'] - init_target_map = vsp_utils.build_initiator_target_map(connector, - target_wwns, - None) - self.assertEqual( - {'0000000000000000': ['2222222222222222', '3333333333333333'], - '1111111111111111': ['2222222222222222', '3333333333333333']}, - init_target_map) - - def test_update_conn_info_not_update_conn_info(self): - """Normal case: Not update connection info.""" - vsp_utils.update_conn_info(dict({'data': dict({'target_wwn': []})}), - dict({'wwpns': []}), - None) diff --git a/cinder/tests/unit/volume/drivers/hpe/__init__.py b/cinder/tests/unit/volume/drivers/hpe/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py deleted file mode 100644 index ebe402689..000000000 --- a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py +++ /dev/null @@ -1,29 +0,0 @@ -# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Fake HPE client for testing 3PAR without installing the client.""" - -import sys - -import mock - -from cinder.tests.unit.volume.drivers.hpe \ - import fake_hpe_client_exceptions as hpeexceptions - -hpe3par = mock.Mock() -hpe3par.version = "4.2.0" -hpe3par.exceptions = hpeexceptions - -sys.modules['hpe3parclient'] = hpe3par diff --git a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py deleted file mode 100644 index f753eb3ed..000000000 --- a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py +++ /dev/null @@ -1,119 +0,0 @@ -# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Fake HPE client exceptions to use when mocking HPE clients.""" - - -class UnsupportedVersion(Exception): - """Unsupported version of the client.""" - pass - - -class ClientException(Exception): - """The base exception class for these fake exceptions.""" - _error_code = None - _error_desc = None - _error_ref = None - - _debug1 = None - _debug2 = None - - def __init__(self, error=None): - if error: - if 'code' in error: - self._error_code = error['code'] - if 'desc' in error: - self._error_desc = error['desc'] - if 'ref' in error: - self._error_ref = error['ref'] - - if 'debug1' in error: - self._debug1 = error['debug1'] - if 'debug2' in error: - self._debug2 = error['debug2'] - - def get_code(self): - return self._error_code - - def get_description(self): - return self._error_desc - - def get_ref(self): - return self._error_ref - - def __str__(self): - formatted_string = self.message - if self.http_status: - formatted_string += " (HTTP %s)" % self.http_status - if self._error_code: - formatted_string += " %s" % self._error_code - if self._error_desc: - formatted_string += " - %s" % self._error_desc - if self._error_ref: - formatted_string += " - %s" % self._error_ref - - if self._debug1: - formatted_string += " (1: '%s')" % self._debug1 - - if self._debug2: - formatted_string += " (2: '%s')" % self._debug2 - - return formatted_string - - -class HTTPConflict(ClientException): - http_status = 409 - message = "Conflict" - - def __init__(self, error=None): - if error: - super(HTTPConflict, self).__init__(error) - if 'message' in error: - self._error_desc = error['message'] - - def get_description(self): - return self._error_desc - - -class HTTPNotFound(ClientException): - http_status = 404 - message = "Not found" - - -class HTTPForbidden(ClientException): - http_status = 403 - message = "Forbidden" - - -class HTTPBadRequest(ClientException): - http_status = 400 - message = "Bad request" - - -class HTTPUnauthorized(ClientException): - http_status = 401 - message = "Unauthorized" - - -class HTTPServerError(ClientException): - http_status = 500 - message = "Error" - - def __init__(self, error=None): - if error and 'message' in error: - self._error_desc = error['message'] - - def get_description(self): - return self._error_desc diff --git a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py b/cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py deleted file mode 100644 index d4f05ee20..000000000 --- a/cinder/tests/unit/volume/drivers/hpe/fake_hpe_lefthand_client.py +++ /dev/null @@ -1,29 +0,0 @@ -# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Fake HPE client for testing LeftHand without installing the client.""" - -import sys - -import mock - -from cinder.tests.unit.volume.drivers.hpe \ - import fake_hpe_client_exceptions as hpeexceptions - -hpelefthand = mock.Mock() -hpelefthand.version = "2.1.0" -hpelefthand.exceptions = hpeexceptions - -sys.modules['hpelefthandclient'] = hpelefthand diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py deleted file mode 100644 index 3bb3d832d..000000000 --- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py +++ /dev/null @@ -1,8509 +0,0 @@ -# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for OpenStack Cinder volume drivers.""" - -import mock - -import ast - -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.hpe \ - import fake_hpe_3par_client as hpe3parclient -from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon -from cinder.volume.drivers.hpe import hpe_3par_fc as hpefcdriver -from cinder.volume.drivers.hpe import hpe_3par_iscsi as hpedriver -from cinder.volume import qos_specs -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -hpeexceptions = hpe3parclient.hpeexceptions - - -HPE3PAR_CPG = 'OpenStackCPG' -HPE3PAR_CPG2 = 'fakepool' -HPE3PAR_CPG_QOS = 'qospool' -HPE3PAR_CPG_SNAP = 'OpenStackCPGSnap' -HPE3PAR_USER_NAME = 'testUser' -HPE3PAR_USER_PASS = 'testPassword' -HPE3PAR_SAN_IP = '2.2.2.2' -HPE3PAR_SAN_SSH_PORT = 999 -HPE3PAR_SAN_SSH_CON_TIMEOUT = 44 -HPE3PAR_SAN_SSH_PRIVATE = 'foobar' -GOODNESS_FUNCTION = \ - "stats.capacity_utilization < 0.6? 100:25" -FILTER_FUNCTION = \ - "stats.total_volumes < 400 && stats.capacity_utilization < 0.8" - -CHAP_USER_KEY = "HPQ-cinder-CHAP-name" -CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" - -FLASH_CACHE_ENABLED = 1 -FLASH_CACHE_DISABLED = 2 - -# Input/output (total read/write) operations per second. -THROUGHPUT = 'throughput' -# Data processed (total read/write) per unit time: kilobytes per second. -BANDWIDTH = 'bandwidth' -# Response time (total read/write): microseconds. -LATENCY = 'latency' -# IO size (total read/write): kilobytes. -IO_SIZE = 'io_size' -# Queue length for processing IO requests -QUEUE_LENGTH = 'queue_length' -# Average busy percentage -AVG_BUSY_PERC = 'avg_busy_perc' - -# replication constants -HPE3PAR_CPG_REMOTE = 'DestOpenStackCPG' -HPE3PAR_CPG2_REMOTE = 'destfakepool' -HPE3PAR_CPG_MAP = 'OpenStackCPG:DestOpenStackCPG fakepool:destfakepool' -SYNC_MODE = 1 -PERIODIC_MODE = 2 -SYNC_PERIOD = 900 -# EXISTENT_PATH error code returned from hpe3parclient -EXISTENT_PATH = 73 - - -class Comment(object): - def __init__(self, expected): - self.expected = expected - - def __eq__(self, actual): - return (dict(ast.literal_eval(actual)) == self.expected) - - def __ne__(self, other): - return not self.__eq__(other) - - -class HPE3PARBaseDriver(object): - - VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' - SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6' - CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000' - VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db' - VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111' - VOL_TYPE_ID_DEDUP_COMPRESS = 'd03338a9-9115-48a3-8dfc-33333333333' - VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222' - VOLUME_NAME = 'volume-' + VOLUME_ID - SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID - VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw' - SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' - SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' - VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw' - SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ' - RCG_3PAR_NAME = 'rcg-0DM4qZEVSKON-DXN-N' - GROUP_ID = '6044fedf-c889-4752-900f-2039d247a5df' - CONSIS_GROUP_NAME = 'vvs-YET.38iJR1KQDyA50kel3w' - SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47' - SRC_CONSIS_GROUP_NAME = 'vvs-fX36AqxuSMuWr4oM0wCNRw' - CGSNAPSHOT_ID = 'e91c5ed5-daee-4e84-8724-1c9e31e7a1f2' - CGSNAPSHOT_BASE_NAME = 'oss-6Rxe1druToSHJByeMeeh8g' - CLIENT_ID = "12345" - REPLICATION_CLIENT_ID = "54321" - REPLICATION_BACKEND_ID = 'target' - # fake host on the 3par - FAKE_HOST = 'fakehost' - FAKE_CINDER_HOST = 'fakehost@foo#' + HPE3PAR_CPG - USER_ID = '2689d9a913974c008b1d859013f23607' - PROJECT_ID = 'fac88235b9d64685a3530f73e490348f' - VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156' - FAKE_DESC = 'test description name' - FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, - 'type': 1, - 'portWWN': '0987654321234', - 'protocol': 1, - 'mode': 2, - 'linkState': 4}, - {'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, - 'type': 1, - 'portWWN': '123456789000987', - 'protocol': 1, - 'mode': 2, - 'linkState': 4}] - QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50', - 'qos:minIOPS': '100', 'qos:minBWS': '25', - 'qos:latency': '25', 'qos:priority': 'low'} - QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', - 'minIOPS': '100', 'minBWS': '25', - 'latency': '25', 'priority': 'low'} - VVS_NAME = "myvvs" - FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}, - 'protocol': 2, - 'mode': 2, - 'IPAddr': '1.1.1.2', - 'iSCSIName': ('iqn.2000-05.com.3pardata:' - '21810002ac00383d'), - 'linkState': 4} - volume = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': None} - - volume_src_cg = {'name': SRC_CG_VOLUME_NAME, - 'id': SRC_CG_VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': None} - - volume_replicated = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'replication_status': 'disabled', - 'provider_location': CLIENT_ID, - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': 'replicated', - 'volume_type_id': VOLUME_TYPE_ID_REPLICATED} - - replication_targets = [{'backend_id': REPLICATION_BACKEND_ID, - 'cpg_map': HPE3PAR_CPG_MAP, - 'hpe3par_api_url': 'https://1.1.1.1/api/v1', - 'hpe3par_username': HPE3PAR_USER_NAME, - 'hpe3par_password': HPE3PAR_USER_PASS, - 'san_ip': HPE3PAR_SAN_IP, - 'san_login': HPE3PAR_USER_NAME, - 'san_password': HPE3PAR_USER_PASS, - 'san_ssh_port': HPE3PAR_SAN_SSH_PORT, - 'ssh_conn_timeout': HPE3PAR_SAN_SSH_CON_TIMEOUT, - 'san_private_key': HPE3PAR_SAN_SSH_PRIVATE}] - - list_rep_targets = [{'backend_id': 'target'}] - - volume_encrypted = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': None, - 'encryption_key_id': 'fake_key'} - - volume_dedup_compression = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 16, - 'host': FAKE_CINDER_HOST, - 'volume_type': 'dedup_compression', - 'volume_type_id': VOL_TYPE_ID_DEDUP_COMPRESS} - - volume_dedup = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': 'dedup', - 'volume_type_id': VOLUME_TYPE_ID_DEDUP} - - volume_pool = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(FAKE_HOST, HPE3PAR_CPG2), - 'volume_type': None, - 'volume_type_id': None} - - volume_qos = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': 'gold'} - - volume_flash_cache = {'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE} - - snapshot = {'name': SNAPSHOT_NAME, - 'id': SNAPSHOT_ID, - 'user_id': USER_ID, - 'project_id': PROJECT_ID, - 'volume_id': VOLUME_ID_SNAP, - 'volume_name': VOLUME_NAME, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': 2, - 'display_name': 'fakesnap', - 'display_description': FAKE_DESC, - 'volume': volume} - - wwn = ["123456789012345", "123456789054321"] - - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:222', - 'wwpns': [wwn[0], wwn[1]], - 'wwnns': ["223456789012345", "223456789054321"], - 'host': FAKE_HOST, - 'multipath': False} - - connector_multipath_enabled = {'ip': '10.0.0.2', - 'initiator': ('iqn.1993-08.org' - '.debian:01:222'), - 'wwpns': [wwn[0], wwn[1]], - 'wwnns': ["223456789012345", - "223456789054321"], - 'host': FAKE_HOST, - 'multipath': True} - - volume_type = {'name': 'gold', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'cpg': HPE3PAR_CPG2, - 'qos:maxIOPS': '1000', - 'qos:maxBWS': '50', - 'qos:minIOPS': '100', - 'qos:minBWS': '25', - 'qos:latency': '25', - 'qos:priority': 'low'}, - 'deleted_at': None, - 'id': 'gold'} - - volume_type_replicated = {'name': 'replicated', - 'deleted': False, - 'updated_at': None, - 'extra_specs': - {'replication_enabled': ' True'}, - 'deleted_at': None, - 'id': VOLUME_TYPE_ID_REPLICATED} - - volume_type_dedup_compression = {'name': 'dedup', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'cpg': HPE3PAR_CPG2, - 'provisioning': 'dedup', - 'compression': 'true'}, - 'deleted_at': None, - 'id': VOL_TYPE_ID_DEDUP_COMPRESS} - - volume_type_dedup = {'name': 'dedup', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'cpg': HPE3PAR_CPG2, - 'provisioning': 'dedup'}, - 'deleted_at': None, - 'id': VOLUME_TYPE_ID_DEDUP} - - volume_type_flash_cache = {'name': 'flash-cache-on', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'cpg': HPE3PAR_CPG2, - 'hpe3par:flash_cache': 'true'}, - 'deleted_at': None, - 'id': VOLUME_TYPE_ID_FLASH_CACHE} - - flash_cache_3par_keys = {'flash_cache': 'true'} - - cpgs = [ - {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, - 'incrementMiB': 8192}, - 'SAUsage': {'rawTotalMiB': 24576, - 'rawUsedMiB': 768, - 'totalMiB': 8192, - 'usedMiB': 256}, - 'SDGrowth': {'LDLayout': {'RAIDType': 4, - 'diskPatterns': [{'diskType': 2}]}, - 'incrementMiB': 32768}, - 'SDUsage': {'rawTotalMiB': 49152, - 'rawUsedMiB': 1023, - 'totalMiB': 36864, - 'usedMiB': 1024 * 1}, - 'UsrUsage': {'rawTotalMiB': 57344, - 'rawUsedMiB': 43349, - 'totalMiB': 43008, - 'usedMiB': 1024 * 20}, - 'additionalStates': [], - 'degradedStates': [], - 'failedStates': [], - 'id': 5, - 'name': HPE3PAR_CPG, - 'numFPVVs': 2, - 'numTPVVs': 0, - 'numTDVVs': 1, - 'state': 1, - 'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}] - - TASK_DONE = 1 - TASK_ACTIVE = 2 - STATUS_DONE = {'status': 1} - STATUS_ACTIVE = {'status': 2} - - mock_client_conf = { - 'PORT_MODE_TARGET': 2, - 'PORT_STATE_READY': 4, - 'PORT_PROTO_ISCSI': 2, - 'PORT_PROTO_FC': 1, - 'PORT_TYPE_HOST': 1, - 'TASK_DONE': TASK_DONE, - 'TASK_ACTIVE': TASK_ACTIVE, - 'HOST_EDIT_ADD': 1, - 'CHAP_INITIATOR': 1, - 'CHAP_TARGET': 2, - 'getPorts.return_value': { - 'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT] - } - } - - RETYPE_VVS_NAME = "yourvvs" - - RETYPE_HOST = { - u'host': u'mark-stack1@3parfc', - u'capabilities': { - 'QoS_support': True, - u'location_info': u'HPE3PARDriver:1234567:MARK_TEST_CPG', - u'timestamp': u'2014-06-04T19:03:32.485540', - u'allocated_capacity_gb': 0, - u'volume_backend_name': u'3parfc', - u'free_capacity_gb': u'infinite', - u'driver_version': u'3.0.0', - u'total_capacity_gb': u'infinite', - u'reserved_percentage': 0, - u'vendor_name': u'Hewlett Packard Enterprise', - u'storage_protocol': u'FC' - } - } - - RETYPE_HOST_NOT3PAR = { - u'host': u'mark-stack1@3parfc', - u'capabilities': { - u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG', - } - } - - RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', - 'minIOPS': '100', 'minBWS': '25', - 'latency': '25', 'priority': 'high'} - - RETYPE_VOLUME_TYPE_ID = "FakeVolId" - - RETYPE_VOLUME_TYPE_0 = { - 'name': 'red', - 'id': RETYPE_VOLUME_TYPE_ID, - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs': RETYPE_VVS_NAME, - 'qos': RETYPE_QOS_SPECS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': volume_type - } - } - - RETYPE_VOLUME_TYPE_1 = { - 'name': 'white', - 'id': RETYPE_VOLUME_TYPE_ID, - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs': VVS_NAME, - 'qos': QOS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': volume_type - } - } - - RETYPE_VOLUME_TYPE_2 = { - 'name': 'blue', - 'id': RETYPE_VOLUME_TYPE_ID, - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs': RETYPE_VVS_NAME, - 'qos': RETYPE_QOS_SPECS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': volume_type - } - } - - RETYPE_VOLUME_TYPE_3 = { - 'name': 'purple', - 'id': RETYPE_VOLUME_TYPE_ID, - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs': RETYPE_VVS_NAME, - 'qos': RETYPE_QOS_SPECS, - 'tpvv': False, - 'tdvv': True, - 'volume_type': volume_type - } - } - RETYPE_VOLUME_TYPE_BAD_PERSONA = { - 'name': 'bad_persona', - 'id': 'any_id', - 'extra_specs': { - 'hpe3par:persona': '99 - invalid' - } - } - - RETYPE_VOLUME_TYPE_BAD_CPG = { - 'name': 'bad_cpg', - 'id': 'any_id', - 'extra_specs': { - 'cpg': 'bogus', - 'snap_cpg': 'bogus', - 'hpe3par:persona': '2 - Generic-ALUA' - } - } - - MANAGE_VOLUME_INFO = { - 'userCPG': 'testUserCpg0', - 'snapCPG': 'testSnapCpg0', - 'provisioningType': 1, - 'comment': "{'display_name': 'Foo Volume'}" - } - - MV_INFO_WITH_NO_SNAPCPG = { - 'userCPG': 'testUserCpg0', - 'provisioningType': 1, - 'comment': "{'display_name': 'Foo Volume'}" - } - - RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}" - - RETYPE_VOLUME_INFO_0 = { - 'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Retype Vol0', - 'size': 1, - 'host': RETYPE_HOST, - 'userCPG': 'testUserCpg0', - 'snapCPG': 'testSnapCpg0', - 'provisioningType': 1, - 'comment': RETYPE_TEST_COMMENT - } - - RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}" - - RETYPE_VOLUME_INFO_1 = { - 'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Retype Vol1', - 'size': 1, - 'host': RETYPE_HOST, - 'userCPG': HPE3PAR_CPG, - 'snapCPG': HPE3PAR_CPG_SNAP, - 'provisioningType': 1, - 'comment': RETYPE_TEST_COMMENT - } - - RETYPE_TEST_COMMENT_2 = "{'retype_test': 'test comment 2'}" - - RETYPE_VOLUME_INFO_2 = { - 'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Retype Vol2', - 'size': 1, - 'host': RETYPE_HOST, - 'userCPG': HPE3PAR_CPG, - 'snapCPG': HPE3PAR_CPG_SNAP, - 'provisioningType': 3, - 'comment': RETYPE_TEST_COMMENT - } - # Test for when we don't get a snapCPG. - RETYPE_VOLUME_INFO_NO_SNAP = { - 'name': VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'Retype Vol2', - 'size': 1, - 'host': RETYPE_HOST, - 'userCPG': 'testUserCpg2', - 'provisioningType': 1, - 'comment': '{}' - } - - RETYPE_CONF = { - 'TASK_ACTIVE': TASK_ACTIVE, - 'TASK_DONE': TASK_DONE, - 'getTask.return_value': STATUS_DONE, - 'getStorageSystemInfo.return_value': {'id': CLIENT_ID, - 'serialNumber': '1234567'}, - 'getVolume.return_value': RETYPE_VOLUME_INFO_0, - 'modifyVolume.return_value': ("anyResponse", {'taskid': 1}) - } - - # 3PAR retype currently doesn't use the diff. Existing code and fresh info - # from the array work better for the most part. Some use of the diff was - # intentionally removed to make _retype more usable for other use cases. - RETYPE_DIFF = None - - wsapi_version_312 = {'major': 1, - 'build': 30102422, - 'minor': 3, - 'revision': 1} - - wsapi_version_for_compression = {'major': 1, - 'build': 30301215, - 'minor': 6, - 'revision': 0} - - wsapi_version_for_dedup = {'major': 1, - 'build': 30201120, - 'minor': 4, - 'revision': 1} - - wsapi_version_for_flash_cache = {'major': 1, - 'build': 30201200, - 'minor': 4, - 'revision': 2} - - wsapi_version_for_remote_copy = {'major': 1, - 'build': 30202290, - 'minor': 5, - 'revision': 0} - - # Use this to point to latest version of wsapi - wsapi_version_latest = wsapi_version_for_compression - - standard_login = [ - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), - mock.call.setSSHOptions( - HPE3PAR_SAN_IP, - HPE3PAR_USER_NAME, - HPE3PAR_USER_PASS, - missing_key_policy='AutoAddPolicy', - privatekey=HPE3PAR_SAN_SSH_PRIVATE, - known_hosts_file=mock.ANY, - port=HPE3PAR_SAN_SSH_PORT, - conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT)] - - get_id_login = [ - mock.call.getWsApiVersion(), - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), - mock.call.setSSHOptions( - HPE3PAR_SAN_IP, - HPE3PAR_USER_NAME, - HPE3PAR_USER_PASS, - missing_key_policy='AutoAddPolicy', - privatekey=HPE3PAR_SAN_SSH_PRIVATE, - known_hosts_file=mock.ANY, - port=HPE3PAR_SAN_SSH_PORT, - conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), - mock.call.getStorageSystemInfo()] - - standard_logout = [ - mock.call.logout()] - - class fake_volume_object(object): - def __init__(self, vol_id='d03338a9-9115-48a3-8dfc-35cdfcdc15a7'): - self.id = vol_id - self.name = 'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7' - self.display_name = 'Foo Volume' - self.size = 2 - self.host = 'fakehost@foo#OpenStackCPG' - self.volume_type = None - self.volume_type_id = None - - class fake_group_object(object): - def __init__(self, grp_id='6044fedf-c889-4752-900f-2039d247a5df'): - self.id = grp_id - self.volume_type_ids = ['d03338a9-9115-48a3-8dfc-33333333333'] - self.volume_types = ['d03338a9-9115-48a3-8dfc-33333333333'] - self.name = 'cg_name' - self.group_snapshot_id = None - self.host = 'fakehost@foo#OpenStackCPG' - self.description = 'consistency group' - - class fake_group_snapshot_object(object): - def __init__(self, cgsnap_id='e91c5ed5-daee-4e84-8724-1c9e31e7a1f2'): - self.id = cgsnap_id - self.group_id = '6044fedf-c889-4752-900f-2039d247a5df' - self.description = 'group_snapshot' - self.readOnly = False - - def setup_configuration(self): - configuration = mock.MagicMock() - configuration.hpe3par_debug = False - configuration.hpe3par_username = HPE3PAR_USER_NAME - configuration.hpe3par_password = HPE3PAR_USER_PASS - configuration.hpe3par_api_url = 'https://1.1.1.1/api/v1' - configuration.hpe3par_cpg = [HPE3PAR_CPG, HPE3PAR_CPG2] - configuration.hpe3par_cpg_snap = HPE3PAR_CPG_SNAP - configuration.iscsi_ip_address = '1.1.1.2' - configuration.iscsi_port = '1234' - configuration.san_ip = HPE3PAR_SAN_IP - configuration.san_login = HPE3PAR_USER_NAME - configuration.san_password = HPE3PAR_USER_PASS - configuration.san_ssh_port = HPE3PAR_SAN_SSH_PORT - configuration.ssh_conn_timeout = HPE3PAR_SAN_SSH_CON_TIMEOUT - configuration.san_private_key = HPE3PAR_SAN_SSH_PRIVATE - configuration.hpe3par_snapshot_expiration = "" - configuration.hpe3par_snapshot_retention = "" - configuration.hpe3par_iscsi_ips = [] - configuration.hpe3par_iscsi_chap_enabled = False - configuration.goodness_function = GOODNESS_FUNCTION - configuration.filter_function = FILTER_FUNCTION - configuration.image_volume_cache_enabled = False - configuration.replication_device = None - return configuration - - @mock.patch( - 'hpe3parclient.client.HPE3ParClient', - spec=True, - ) - def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None): - - _m_client = _m_client.return_value - - # Configure the base constants, defaults etc... - _m_client.configure_mock(**self.mock_client_conf) - - _m_client.getWsApiVersion.return_value = self.wsapi_version_latest - - # If m_conf, drop those over the top of the base_conf. - if m_conf is not None: - _m_client.configure_mock(**m_conf) - - if conf is None: - conf = self.setup_configuration() - self.driver = driver(configuration=conf) - self.driver.do_setup(None) - return _m_client - - @mock.patch('hpe3parclient.version', "3.0.9") - def test_unsupported_client_version(self): - - self.assertRaises(exception.InvalidInput, - self.setup_driver) - - def test_ssh_options(self): - - expected_hosts_key_file = "test_hosts_key_file" - self.flags(ssh_hosts_key_file=expected_hosts_key_file, - strict_ssh_host_key_policy=False) - - self.ctxt = context.get_admin_context() - mock_client = self.setup_mock_client( - driver=hpefcdriver.HPE3PARFCDriver) - - expected = [ - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), - mock.call.setSSHOptions( - HPE3PAR_SAN_IP, - HPE3PAR_USER_NAME, - HPE3PAR_USER_PASS, - privatekey=HPE3PAR_SAN_SSH_PRIVATE, - known_hosts_file=expected_hosts_key_file, - missing_key_policy="AutoAddPolicy", - port=HPE3PAR_SAN_SSH_PORT, - conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2)] - mock_client.assert_has_calls( - expected + - self.standard_logout) - - def test_ssh_options_strict(self): - - expected_hosts_key_file = "test_hosts_key_file" - self.flags(ssh_hosts_key_file=expected_hosts_key_file, - strict_ssh_host_key_policy=True) - - self.ctxt = context.get_admin_context() - mock_client = self.setup_mock_client( - driver=hpefcdriver.HPE3PARFCDriver) - - expected = [ - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), - mock.call.setSSHOptions( - HPE3PAR_SAN_IP, - HPE3PAR_USER_NAME, - HPE3PAR_USER_PASS, - privatekey=HPE3PAR_SAN_SSH_PRIVATE, - known_hosts_file=expected_hosts_key_file, - missing_key_policy="RejectPolicy", - port=HPE3PAR_SAN_SSH_PORT, - conn_timeout=HPE3PAR_SAN_SSH_CON_TIMEOUT), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2)] - mock_client.assert_has_calls(expected + self.standard_logout) - - def test_task_waiter(self): - - task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE] - - def side_effect(*args): - return task_statuses and task_statuses.pop(0) or self.STATUS_DONE - - conf = {'getTask.side_effect': side_effect} - mock_client = self.setup_driver(mock_conf=conf) - - task_id = 1234 - interval = .001 - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - waiter = common.TaskWaiter(mock_client, task_id, interval) - status = waiter.wait_for_task() - - expected = [ - mock.call.getTask(task_id), - mock.call.getTask(task_id), - mock.call.getTask(task_id) - ] - mock_client.assert_has_calls(expected) - self.assertEqual(self.STATUS_DONE, status) - - def test_create_volume(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.create_volume(self.volume) - comment = Comment({ - "display_name": "Foo Volume", - "type": "OpenStack", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) - expected = [ - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_create_volume_in_generic_group(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - volume = {'name': self.VOLUME_NAME, - 'id': self.VOLUME_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'group_id': '87101633-13e0-41ee-813b-deabc372267b', - 'host': self.FAKE_CINDER_HOST, - 'volume_type': None, - 'volume_type_id': None} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.create_volume(volume) - comment = Comment({ - "display_name": "Foo Volume", - "type": "OpenStack", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) - expected = [ - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_create_volume_in_pool(self): - - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - return_model = self.driver.create_volume(self.volume_pool) - comment = Comment({ - "display_name": "Foo Volume", - "type": "OpenStack", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) - expected = [ - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG2, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertIsNone(return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_unsupported_dedup_volume_type(self, _mock_volume_types): - - mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) - _mock_volume_types.return_value = { - 'name': 'dedup', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'provisioning': 'dedup', - 'volume_type': self.volume_type_dedup}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - self.assertRaises(exception.InvalidInput, - common.get_volume_settings_from_type_id, - self.VOLUME_TYPE_ID_DEDUP, - "mock") - - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_snap_cpg_from_volume_type(self, _mock_volume_types): - - mock_client = self.setup_driver() - expected_type_snap_cpg = "type_snap_cpg" - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': expected_type_snap_cpg, - 'volume_type': self.volume_type}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - result = common.get_volume_settings_from_type_id( - "mock", self.driver.configuration.hpe3par_cpg) - - self.assertEqual(expected_type_snap_cpg, result['snap_cpg']) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types): - - mock_client = self.setup_driver() - expected_cpg = 'use_extra_specs_cpg' - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'cpg': expected_cpg, - 'volume_type': self.volume_type}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - result = common.get_volume_settings_from_type_id( - "mock", self.driver.configuration.hpe3par_cpg) - - self.assertEqual(self.driver.configuration.hpe3par_cpg_snap, - result['snap_cpg']) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_snap_cpg_from_volume_type_conf_snap_cpg( - self, _mock_volume_types): - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'volume_type': self.volume_type}} - - conf = self.setup_configuration() - expected_snap_cpg = conf.hpe3par_cpg_snap - mock_client = self.setup_driver(config=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - result = common.get_volume_settings_from_type_id( - "mock", self.driver.configuration.hpe3par_cpg) - - self.assertEqual(expected_snap_cpg, result['snap_cpg']) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_snap_cpg_from_volume_type_conf_cpg( - self, _mock_volume_types): - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'volume_type': self.volume_type}} - - conf = self.setup_configuration() - conf.hpe3par_cpg_snap = None - expected_cpg = conf.hpe3par_cpg - mock_client = self.setup_driver(config=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - result = common.get_volume_settings_from_type_id( - "mock", self.driver.configuration.hpe3par_cpg) - - self.assertEqual(expected_cpg, result['snap_cpg']) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_qos(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': self.volume_type}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - return_model = self.driver.create_volume(self.volume_qos) - comment = Comment({ - "volume_type_name": "gold", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "gold", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, - "type": "OpenStack"}) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertIsNone(return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_replicated_periodic(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.side_effect = ( - hpeexceptions.HTTPNotFound) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - return_model = self.driver.create_volume(self.volume_replicated) - comment = Comment({ - "volume_type_name": "replicated", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, - "type": "OpenStack"}) - - backend_id = self.replication_targets[0]['backend_id'] - expected = [ - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP}), - mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createRemoteCopyGroup( - self.RCG_3PAR_NAME, - [{'userCPG': HPE3PAR_CPG_REMOTE, - 'targetName': backend_id, - 'mode': PERIODIC_MODE, - 'snapCPG': HPE3PAR_CPG_REMOTE}], - {'localUserCPG': HPE3PAR_CPG, - 'localSnapCPG': HPE3PAR_CPG_SNAP}), - mock.call.addVolumeToRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - [{'secVolumeName': self.VOLUME_3PAR_NAME, - 'targetName': backend_id}], - optional={'volumeAutoCreation': True}), - mock.call.modifyRemoteCopyGroup( - self.RCG_3PAR_NAME, - {'targets': [{'syncPeriod': SYNC_PERIOD, - 'targetName': backend_id}]}), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - self.assertEqual({'replication_status': 'enabled', - 'provider_location': self.CLIENT_ID}, - return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_delete_volume_replicated_failedover(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.return_value = ( - {'targets': [{'targetName': 'tgt'}]}) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - volume = self.volume_replicated.copy() - volume['replication_status'] = 'failed-over' - self.driver.delete_volume(volume) - - rcg_name = self.RCG_3PAR_NAME + ".r" + self.CLIENT_ID - expected = [ - mock.call.getRemoteCopyGroup(rcg_name), - mock.call.toggleRemoteCopyConfigMirror( - 'tgt', - mirror_config=False), - mock.call.stopRemoteCopy(rcg_name), - mock.call.removeVolumeFromRemoteCopyGroup( - rcg_name, - self.VOLUME_3PAR_NAME, - removeFromTarget=True), - mock.call.removeRemoteCopyGroup(rcg_name), - mock.call.deleteVolume(self.VOLUME_3PAR_NAME), - mock.call.toggleRemoteCopyConfigMirror( - 'tgt', - mirror_config=True)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_replicated_sync(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'sync' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.side_effect = ( - hpeexceptions.HTTPNotFound) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'sync', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - return_model = self.driver.create_volume(self.volume_replicated) - comment = Comment({ - "volume_type_name": "replicated", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, - "type": "OpenStack"}) - - backend_id = self.replication_targets[0]['backend_id'] - expected = [ - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP}), - mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createRemoteCopyGroup( - self.RCG_3PAR_NAME, - [{'userCPG': HPE3PAR_CPG_REMOTE, - 'targetName': backend_id, - 'mode': SYNC_MODE, - 'snapCPG': HPE3PAR_CPG_REMOTE}], - {'localUserCPG': HPE3PAR_CPG, - 'localSnapCPG': HPE3PAR_CPG_SNAP}), - mock.call.addVolumeToRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - [{'secVolumeName': self.VOLUME_3PAR_NAME, - 'targetName': backend_id}], - optional={'volumeAutoCreation': True}), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - self.assertEqual({'replication_status': 'enabled', - 'provider_location': self.CLIENT_ID}, - return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_dedup_compression(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'dedup_compression', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'hpe3par:provisioning': 'dedup', - 'hpe3par:compression': 'True', - 'volume_type': self.volume_type_dedup_compression}} - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234', - 'licenseInfo': { - 'licenses': [{'name': 'Compression'}, - {'name': 'Thin Provisioning (102400G)'}, - {'name': 'System Reporter'}] - } - } - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - return_model = self.driver.create_volume( - self.volume_dedup_compression) - comment = Comment({ - "volume_type_name": "dedup_compression", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "d03338a9-9115-48a3-8dfc-33333333333", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, - "type": "OpenStack"}) - expectedcall = [ - mock.call.getStorageSystemInfo()] - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getStorageSystemInfo(), - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 16384, { - 'comment': comment, - 'tpvv': False, - 'tdvv': True, - 'compression': True, - 'snapCPG': HPE3PAR_CPG_SNAP})] - mock_client.assert_has_calls( - self.standard_login + - expectedcall + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - self.assertIsNone(return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_dedup(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'dedup', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'provisioning': 'dedup', - 'volume_type': self.volume_type_dedup}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - return_model = self.driver.create_volume(self.volume_dedup) - comment = Comment({ - "volume_type_name": "dedup", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "d03338a9-9115-48a3-8dfc-11111111111", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, - "type": "OpenStack"}) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': False, - 'tdvv': True, - 'snapCPG': HPE3PAR_CPG_SNAP})] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertIsNone(return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_flash_cache(self, _mock_volume_types): - # Setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - - _mock_volume_types.return_value = { - 'name': 'flash-cache-on', - 'extra_specs': { - 'cpg': HPE3PAR_CPG2, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'hpe3par:flash_cache': 'true', - 'volume_type': self.volume_type_flash_cache}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED - mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED - - return_model = self.driver.create_volume(self.volume_flash_cache) - comment = Comment({ - "volume_type_name": "flash-cache-on", - "display_name": "Foo Volume", - "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - "qos": {}, "type": "OpenStack"}) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolume( - self.VOLUME_3PAR_NAME, - HPE3PAR_CPG, - 2048, { - 'comment': comment, - 'tpvv': True, - 'tdvv': False, - 'snapCPG': HPE3PAR_CPG_SNAP}), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), - mock.call.createQoSRules( - 'vvs-0DM4qZEVSKON-DXN-NwVpw', - {'priority': 2} - ), - mock.call.modifyVolumeSet( - 'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1), - mock.call.addVolumeToVolumeSet( - 'vvs-0DM4qZEVSKON-DXN-NwVpw', - 'osv-0DM4qZEVSKON-DXN-NwVpw')] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - self.assertIsNone(return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_unsupported_flash_cache_volume(self, _mock_volume_types): - - mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) - _mock_volume_types.return_value = { - 'name': 'flash-cache-on', - 'extra_specs': { - 'cpg': HPE3PAR_CPG2, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'hpe3par:flash_cache': 'true', - 'volume_type': self.volume_type_flash_cache}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - self.assertRaises(exception.InvalidInput, - common.get_flash_cache_policy, - self.flash_cache_3par_keys) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_not_3par(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(exception.InvalidHost, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_1, - self.RETYPE_DIFF, - self.RETYPE_HOST_NOT3PAR) - - expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_volume_not_found(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(hpeexceptions.HTTPNotFound, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_1, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types): - _mock_volume_types.side_effect = [ - self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0] - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0 - - # Fail the QOS setting to test the revert of the snap CPG rename. - mock_client.addVolumeToVolumeSet.side_effect = \ - hpeexceptions.HTTPForbidden - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(hpeexceptions.HTTPForbidden, - self.driver.retype, - self.ctxt, - {'id': self.VOLUME_ID}, - self.RETYPE_VOLUME_TYPE_0, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - old_settings = { - 'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'], - 'comment': self.RETYPE_VOLUME_INFO_0['comment']} - new_settings = { - 'snapCPG': ( - self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']), - 'comment': mock.ANY} - - expected = [ - mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings) - ] - mock_client.assert_has_calls(expected) - expected = [ - mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings) - ] - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_revert_comment(self, _mock_volume_types): - _mock_volume_types.side_effect = [ - self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1] - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1 - - # Fail the QOS setting to test the revert of the snap CPG rename. - mock_client.deleteVolumeSet.side_effect = hpeexceptions.HTTPForbidden - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(hpeexceptions.HTTPForbidden, - self.driver.retype, - self.ctxt, - {'id': self.VOLUME_ID}, - self.RETYPE_VOLUME_TYPE_2, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - original = { - 'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'], - 'comment': self.RETYPE_VOLUME_INFO_1['comment']} - - expected = [ - mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)] - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_different_array(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': 'XXXXXXX'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(exception.InvalidHost, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_1, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getStorageSystemInfo()] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_across_cpg_domains(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - mock_client.getCPG.side_effect = [ - {'domain': 'domain1'}, - {'domain': 'domain2'}, - ] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(exception.Invalid3PARDomain, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_1, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getStorageSystemInfo(), - mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), - mock.call.getCPG( - self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']) - ] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_across_snap_cpg_domains(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - mock_client.getCPG.side_effect = [ - {'domain': 'cpg_domain'}, - {'domain': 'cpg_domain'}, - {'domain': 'snap_cpg_domain_1'}, - ] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(exception.Invalid3PARDomain, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_1, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getStorageSystemInfo(), - mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), - mock.call.getCPG( - self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']), - mock.call.getCPG( - self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']) - ] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_to_bad_persona(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.assertRaises(exception.InvalidInput, - self.driver.retype, - self.ctxt, - self.RETYPE_VOLUME_INFO_0, - self.RETYPE_VOLUME_TYPE_BAD_PERSONA, - self.RETYPE_DIFF, - self.RETYPE_HOST) - - expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_tune(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:maxIOPS": "100", - "qos:maxBWS": "50", - "qos:minIOPS": "10", - "qos:minBWS": "20", - "qos:latency": "5", - "qos:priority": "high"}) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - - type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) - - volume = {'id': HPE3PARBaseDriver.CLONE_ID} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - retyped = self.driver.retype( - self.ctxt, volume, type_ref, None, self.RETYPE_HOST) - self.assertTrue(retyped) - - expected = [ - mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', - {'comment': mock.ANY, - 'snapCPG': 'OpenStackCPGSnap'}), - mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'), - mock.call.addVolumeToVolumeSet('myvvs', - 'osv-0DM4qZEVSKON-AAAAAAAAA'), - mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', - {'action': 6, - 'userCPG': 'OpenStackCPG', - 'conversionOperation': 1, - 'compression': False, - 'tuneOperation': 1}), - mock.call.getTask(1), - ] - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_non_rep_type_to_rep_type(self, _mock_volume_types): - - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.side_effect = ( - hpeexceptions.HTTPNotFound) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = { - 'id': self.REPLICATION_CLIENT_ID, - 'serialNumber': '1234567' - } - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - mock_client.getVolume.return_value = { - 'name': mock.ANY, - 'snapCPG': mock.ANY, - 'comment': "{'display_name': 'Foo Volume'}", - 'provisioningType': mock.ANY, - 'userCPG': 'OpenStackCPG', - 'snapCPG': 'OpenStackCPGSnap'} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - retyped = self.driver.retype( - self.ctxt, - self.volume, - self.volume_type_replicated, - None, - self.RETYPE_HOST) - self.assertTrue(retyped) - backend_id = self.replication_targets[0]['backend_id'] - expected = [ - mock.call.createRemoteCopyGroup( - self.RCG_3PAR_NAME, - [{'userCPG': HPE3PAR_CPG_REMOTE, - 'targetName': backend_id, - 'mode': PERIODIC_MODE, - 'snapCPG': HPE3PAR_CPG_REMOTE}], - {'localUserCPG': HPE3PAR_CPG, - 'localSnapCPG': HPE3PAR_CPG_SNAP}), - mock.call.addVolumeToRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - [{'secVolumeName': self.VOLUME_3PAR_NAME, - 'targetName': backend_id}], - optional={'volumeAutoCreation': True}), - mock.call.modifyRemoteCopyGroup( - self.RCG_3PAR_NAME, - {'targets': [{'syncPeriod': SYNC_PERIOD, - 'targetName': backend_id}]}), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_rep_type_to_non_rep_type(self, _mock_volume_types): - - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.side_effect = ( - hpeexceptions.HTTPNotFound) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = { - 'id': self.REPLICATION_CLIENT_ID, - 'serialNumber': '1234567' - } - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - volume_1 = {'name': self.VOLUME_NAME, - 'id': self.VOLUME_ID, - 'display_name': 'Foo Volume', - 'replication_status': 'disabled', - 'provider_location': self.CLIENT_ID, - 'size': 2, - 'host': self.FAKE_CINDER_HOST, - 'volume_type': 'replicated', - 'volume_type_id': 'gold'} - - volume_type = {'name': 'replicated', - 'deleted': False, - 'updated_at': None, - 'deleted_at': None, - 'extra_specs': {'replication_enabled': 'False'}, - 'id': 'silver'} - - def get_side_effect(*args): - data = {'value': None} - if args[1] == 'gold': - data['value'] = { - 'name': 'replicated', - 'id': 'gold', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - elif args[1] == 'silver': - data['value'] = {'name': 'silver', - 'deleted': False, - 'updated_at': None, - 'extra_specs': { - 'replication_enabled': 'False'}, - 'deleted_at': None, - 'id': 'silver'} - return data['value'] - - _mock_volume_types.side_effect = get_side_effect - - mock_client.getVolume.return_value = { - 'name': mock.ANY, - 'snapCPG': mock.ANY, - 'comment': "{'display_name': 'Foo Volume'}", - 'provisioningType': mock.ANY, - 'userCPG': 'OpenStackCPG', - 'snapCPG': 'OpenStackCPGSnap'} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - retyped = self.driver.retype( - self.ctxt, volume_1, volume_type, None, self.RETYPE_HOST) - self.assertTrue(retyped) - - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), - mock.call.removeVolumeFromRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - removeFromTarget=True), - mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout, any_order =True) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_rep_type_to_rep_type(self, _mock_volume_types): - - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_client.getRemoteCopyGroup.side_effect = ( - hpeexceptions.HTTPNotFound) - mock_client.getCPG.return_value = {'domain': None} - mock_replicated_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = { - 'id': self.REPLICATION_CLIENT_ID, - 'serialNumber': '1234567' - } - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - volume_1 = {'name': self.VOLUME_NAME, - 'id': self.VOLUME_ID, - 'display_name': 'Foo Volume', - 'replication_status': 'disabled', - 'provider_location': self.CLIENT_ID, - 'size': 2, - 'host': self.FAKE_CINDER_HOST, - 'volume_type': 'replicated', - 'volume_type_id': 'gold'} - - volume_type = {'name': 'replicated', - 'deleted': False, - 'updated_at': None, - 'deleted_at': None, - 'extra_specs': {'replication_enabled': ' True'}, - 'id': 'silver'} - - def get_side_effect(*args): - data = {'value': None} - if args[1] == 'gold': - data['value'] = { - 'name': 'replicated', - 'id': 'gold', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - elif args[1] == 'silver': - data['value'] = { - 'name': 'silver', - 'deleted': False, - 'updated_at': None, - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '1500', - 'volume_type': self.volume_type_replicated}, - 'deleted_at': None, - 'id': 'silver'} - return data['value'] - - _mock_volume_types.side_effect = get_side_effect - - mock_client.getVolume.return_value = { - 'name': mock.ANY, - 'snapCPG': mock.ANY, - 'comment': "{'display_name': 'Foo Volume'}", - 'provisioningType': mock.ANY, - 'userCPG': 'OpenStackCPG', - 'snapCPG': 'OpenStackCPGSnap'} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - backend_id = self.replication_targets[0]['backend_id'] - retyped = self.driver.retype( - self.ctxt, volume_1, volume_type, None, self.RETYPE_HOST) - self.assertTrue(retyped) - - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), - mock.call.removeVolumeFromRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - removeFromTarget=True), - mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), - mock.call.createRemoteCopyGroup( - self.RCG_3PAR_NAME, - [{'userCPG': HPE3PAR_CPG_REMOTE, - 'targetName': backend_id, - 'mode': PERIODIC_MODE, - 'snapCPG': HPE3PAR_CPG_REMOTE}], - {'localUserCPG': HPE3PAR_CPG, - 'localSnapCPG': HPE3PAR_CPG_SNAP}), - mock.call.addVolumeToRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - [{'secVolumeName': self.VOLUME_3PAR_NAME, - 'targetName': backend_id}], - optional={'volumeAutoCreation': True}), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout, any_order =True) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_qos_spec(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - cpg = "any_cpg" - snap_cpg = "any_cpg" - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - common._retype(self.volume, - HPE3PARBaseDriver.VOLUME_3PAR_NAME, - "old_type", "old_type_id", - HPE3PARBaseDriver.RETYPE_HOST, - None, cpg, cpg, snap_cpg, snap_cpg, - True, False, False, True, None, None, - self.QOS_SPECS, self.RETYPE_QOS_SPECS, - None, None, - "{}", None) - - expected = [ - mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), - mock.call.createQoSRules( - 'vvs-0DM4qZEVSKON-DXN-NwVpw', - {'ioMinGoal': 100, 'ioMaxLimit': 1000, - 'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200, - 'priority': 3, - 'latencyGoal': 25} - ), - mock.call.addVolumeToVolumeSet( - 'vvs-0DM4qZEVSKON-DXN-NwVpw', - 'osv-0DM4qZEVSKON-DXN-NwVpw')] - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_retype_dedup(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_3 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - cpg = "any_cpg" - snap_cpg = "any_cpg" - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - common._retype(self.volume, - HPE3PARBaseDriver.VOLUME_3PAR_NAME, - "old_type", "old_type_id", - HPE3PARBaseDriver.RETYPE_HOST, - None, cpg, cpg, snap_cpg, snap_cpg, - True, False, False, True, None, None, - self.QOS_SPECS, self.RETYPE_QOS_SPECS, - None, None, - "{}", None) - - expected = [ - mock.call.addVolumeToVolumeSet(u'vvs-0DM4qZEVSKON-DXN-NwVpw', - 'osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', - {'action': 6, - 'userCPG': 'any_cpg', - 'conversionOperation': 3, - 'compression': False, - 'tuneOperation': 1}), - mock.call.getTask(1)] - mock_client.assert_has_calls(expected) - - def test_delete_volume(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.delete_volume(self.volume) - - expected = [mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_delete_volume_online_clone_active(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - ex = hpeexceptions.HTTPConflict("Online clone is active.") - ex._error_code = 151 - mock_client.deleteVolume = mock.Mock(side_effect=ex) - mock_client.isOnlinePhysicalCopy.return_value = True - self.driver.delete_volume(self.volume) - - expected = [ - mock.call.deleteVolume(self.VOLUME_3PAR_NAME), - mock.call.isOnlinePhysicalCopy(self.VOLUME_3PAR_NAME), - mock.call.stopOnlinePhysicalCopy(self.VOLUME_3PAR_NAME)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_delete_volume_replicated(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.delete_volume(self.volume_replicated) - - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), - mock.call.removeVolumeFromRemoteCopyGroup( - self.RCG_3PAR_NAME, - self.VOLUME_3PAR_NAME, - removeFromTarget=True), - mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), - mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_cpg_with_volume_return_usercpg(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY, - 'userCPG': HPE3PAR_CPG2} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - volume = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2)} - common = self.driver._login() - user_cpg = common.get_cpg(volume) - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(volume['id']) - self.assertEqual(HPE3PAR_CPG2, user_cpg) - expected = [mock.call.getVolume(vol_name)] - - mock_client.assert_has_calls( - self.standard_login + - expected) - - def test_get_cpg_with_volume_return_snapcpg(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY, - 'snapCPG': HPE3PAR_CPG2} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - volume = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2)} - common = self.driver._login() - snap_cpg = common.get_cpg(volume, allowSnap=True) - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(volume['id']) - self.assertEqual(HPE3PAR_CPG2, snap_cpg) - expected = [mock.call.getVolume(vol_name)] - - mock_client.assert_has_calls( - self.standard_login + - expected) - - def test_get_cpg_with_volume_return_no_cpg(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - volume = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2)} - common = self.driver._login() - cpg_name = common.get_cpg(volume) - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(volume['id']) - self.assertEqual(HPE3PAR_CPG2, cpg_name) - expected = [mock.call.getVolume(vol_name)] - - mock_client.assert_has_calls( - self.standard_login + - expected) - - def test_create_cloned_volume(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - mock_client.copyVolume.return_value = {'taskid': 1} - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': 'XXXXXXX'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2), - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 2, 'status': 'available'} - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - expectedcall = [ - mock.call.getStorageSystemInfo()] - - expected = [ - mock.call.copyVolume( - self.VOLUME_NAME_3PAR, - 'osv-0DM4qZEVSKON-AAAAAAAAA', - HPE3PAR_CPG2, - {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, - 'tdvv': False, 'online': True})] - - mock_client.assert_has_calls( - self.standard_login + - expectedcall + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_clone_volume_with_vvs(self, _mock_volume_types): - # Setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'gold', - 'id': 'gold-id', - 'extra_specs': {'vvs': self.VVS_NAME}} - - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - mock_client.copyVolume.return_value = {'taskid': 1} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume_vvs = {'id': self.CLONE_ID, - 'name': self.VOLUME_NAME, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': self.FAKE_CINDER_HOST, - 'volume_type': 'gold', - 'volume_type_id': 'gold-id'} - - src_vref = {'id': self.VOLUME_ID, - 'name': self.VOLUME_NAME, - 'size': 2, 'status': 'available', - 'volume_type': 'gold', - 'host': self.FAKE_CINDER_HOST, - 'volume_type_id': 'gold-id'} - - model_update = self.driver.create_cloned_volume(volume_vvs, - src_vref) - self.assertIsNone(model_update) - - clone_vol_vvs = common.get_volume_settings_from_type(volume_vvs) - source_vol_vvs = common.get_volume_settings_from_type(src_vref) - - self.assertEqual(clone_vol_vvs, source_vol_vvs) - - expected = [ - mock.call.copyVolume( - self.VOLUME_NAME_3PAR, - 'osv-0DM4qZEVSKON-AAAAAAAAA', - 'OpenStackCPG', - {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, - 'tdvv': False, 'online': True}), - mock.call.addVolumeToVolumeSet( - self.VVS_NAME, - 'osv-0DM4qZEVSKON-AAAAAAAAA')] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_backup_iscsi_volume_with_chap_disabled(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - mock_client.copyVolume.return_value = {'taskid': 1} - mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2)} - src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 2, 'status': 'backing-up'} - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - - expected = [ - mock.call.copyVolume( - self.VOLUME_NAME_3PAR, - 'osv-0DM4qZEVSKON-AAAAAAAAA', - HPE3PAR_CPG2, - {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, - 'tdvv': False, 'online': True})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_create_clone_iscsi_volume_with_chap_disabled(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - mock_client.getVolume.return_value = {'name': mock.ANY} - mock_client.copyVolume.return_value = {'taskid': 1} - mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2)} - src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 2, 'status': 'available'} - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(src_vref['id']) - - expected = [ - mock.call.getVolumeMetaData(vol_name, - 'HPQ-cinder-CHAP-name'), - mock.call.copyVolume( - self.VOLUME_NAME_3PAR, - 'osv-0DM4qZEVSKON-AAAAAAAAA', - HPE3PAR_CPG2, - {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, - 'tdvv': False, 'online': True})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_backup_iscsi_volume_with_chap_enabled(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - mock_client.getVolume.return_value = {'name': mock.ANY} - task_id = 1 - mock_client.copyVolume.return_value = {'taskid': task_id} - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-key'} - mock_client.getTask.return_value = {'status': 1} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 5, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2), - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 5, 'status': 'backing-up'} - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(volume['id']) - src_vol_name = common._get_3par_vol_name(src_vref['id']) - optional = {'priority': 1} - comment = mock.ANY - - expected = [ - mock.call.getVolumeMetaData(src_vol_name, - 'HPQ-cinder-CHAP-name'), - mock.call.createVolume(vol_name, 'fakepool', - 5120, comment), - mock.call.copyVolume( - src_vol_name, - vol_name, - None, - optional=optional), - mock.call.getTask(task_id), - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_create_cloned_volume_offline_copy(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - task_id = 1 - mock_client.copyVolume.return_value = {'taskid': task_id} - mock_client.getTask.return_value = {'status': 1} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 5, - 'host': volume_utils.append_host(self.FAKE_HOST, - HPE3PAR_CPG2), - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 2, 'status': 'available'} - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - - common = hpecommon.HPE3PARCommon(None) - vol_name = common._get_3par_vol_name(volume['id']) - src_vol_name = common._get_3par_vol_name(src_vref['id']) - optional = {'priority': 1} - comment = mock.ANY - - expected = [ - mock.call.createVolume(vol_name, 'fakepool', - 5120, comment), - mock.call.copyVolume( - src_vol_name, - vol_name, - None, - optional=optional), - mock.call.getTask(task_id), - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_cloned_qos_volume(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'name': mock.ANY} - mock_client.copyVolume.return_value = {'taskid': 1} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - src_vref = {'id': HPE3PARBaseDriver.CLONE_ID, - 'name': HPE3PARBaseDriver.VOLUME_NAME, - 'size': 2, 'status': 'available'} - volume = self.volume_qos.copy() - host = "TEST_HOST" - pool = "TEST_POOL" - volume_host = volume_utils.append_host(host, pool) - expected_cpg = pool - volume['id'] = HPE3PARBaseDriver.VOLUME_ID - volume['host'] = volume_host - volume['source_volid'] = HPE3PARBaseDriver.CLONE_ID - model_update = self.driver.create_cloned_volume(volume, src_vref) - self.assertIsNone(model_update) - - expected = [ - mock.call.getCPG(expected_cpg), - mock.call.copyVolume( - 'osv-0DM4qZEVSKON-AAAAAAAAA', - self.VOLUME_3PAR_NAME, - expected_cpg, - {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, - 'tdvv': False, 'online': True}), - mock.call.addVolumeToVolumeSet( - 'yourvvs', - 'osv-0DM4qZEVSKON-DXN-NwVpw')] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_migrate_volume(self): - - conf = { - 'getStorageSystemInfo.return_value': { - 'id': self.CLIENT_ID, - 'serialNumber': '1234'}, - 'getTask.return_value': { - 'status': 1}, - 'getCPG.return_value': {}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 - } - - mock_client = self.setup_driver(mock_conf=conf) - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'volume_type_id': None, - 'size': 2, - 'status': 'available', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume_name_3par = common._encode_name(volume['id']) - - loc_info = 'HPE3PARDriver:1234:CPG-FC1' - host = {'host': 'stack@3parfc1#CPG-FC1', - 'capabilities': {'location_info': loc_info}} - - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - self.assertIsNotNone(result) - self.assertEqual((True, None), result) - - osv_matcher = 'osv-' + volume_name_3par - - comment = Comment({ - "display_name": "Foo Volume", - "qos": {}, - }) - - expected = [ - mock.call.modifyVolume( - osv_matcher, - {'comment': comment, - 'snapCPG': HPE3PAR_CPG_SNAP}), - mock.call.modifyVolume(osv_matcher, - {'action': 6, - 'userCPG': 'CPG-FC1', - 'conversionOperation': 1, - 'compression': False, - 'tuneOperation': 1}), - mock.call.getTask(mock.ANY) - ] - - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_migrate_volume_with_type(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 - - conf = { - 'getStorageSystemInfo.return_value': { - 'id': self.CLIENT_ID, - 'serialNumber': '1234'}, - 'getTask.return_value': { - 'status': 1}, - 'getCPG.return_value': {}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 - } - - mock_client = self.setup_driver(mock_conf=conf) - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - display_name = 'Foo Volume' - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': display_name, - "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], - 'size': 2, - 'status': 'available', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume_name_3par = common._encode_name(volume['id']) - - loc_info = 'HPE3PARDriver:1234:CPG-FC1' - instance_host = 'stack@3parfc1#CPG-FC1' - host = {'host': instance_host, - 'capabilities': {'location_info': loc_info}} - - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - self.assertIsNotNone(result) - # when the host and pool are the same we'll get None - self.assertEqual((True, None), result) - - osv_matcher = 'osv-' + volume_name_3par - - expected_comment = Comment({ - "display_name": display_name, - "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], - "volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'], - "vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs'] - }) - - expected = [ - mock.call.modifyVolume( - osv_matcher, - {'comment': expected_comment, - 'snapCPG': self.RETYPE_VOLUME_TYPE_2 - ['extra_specs']['snap_cpg']}), - mock.call.modifyVolume( - osv_matcher, - {'action': 6, - 'userCPG': 'CPG-FC1', - 'conversionOperation': 1, - 'tuneOperation': 1, - 'compression': False}), - mock.call.getTask(mock.ANY) - ] - - mock_client.assert_has_calls( - expected + - self.standard_logout) - - def test_migrate_volume_diff_host(self): - conf = { - 'getStorageSystemInfo.return_value': { - 'id': self.CLIENT_ID, - 'serialNumber': 'different'}, - } - - mock_client = self.setup_driver(mock_conf=conf) - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'volume_type_id': None, - 'size': 2, - 'status': 'available', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - - loc_info = 'HPE3PARDriver:1234:CPG-FC1' - host = {'host': 'stack@3parfc1', - 'capabilities': {'location_info': loc_info}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - self.assertIsNotNone(result) - self.assertEqual((False, None), result) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_migrate_volume_diff_domain(self, _mock_volume_types): - _mock_volume_types.return_value = self.volume_type - - conf = { - 'getStorageSystemInfo.return_value': { - 'id': self.CLIENT_ID, - 'serialNumber': '1234'}, - 'getTask.return_value': { - 'status': 1}, - 'getCPG.return_value': {}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 - } - - mock_client = self.setup_driver(mock_conf=conf) - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'volume_type_id': None, - 'size': 2, - 'status': 'available', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume_name_3par = common._encode_name(volume['id']) - - loc_info = 'HPE3PARDriver:1234:CPG-FC1' - host = {'host': 'stack@3parfc1#CPG-FC1', - 'capabilities': {'location_info': loc_info}} - - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - self.assertIsNotNone(result) - self.assertEqual((True, None), result) - - osv_matcher = 'osv-' + volume_name_3par - - comment = Comment({"qos": {}, "display_name": "Foo Volume"}) - - expected = [ - mock.call.modifyVolume( - osv_matcher, - {'comment': comment, - 'snapCPG': HPE3PAR_CPG_SNAP}), - mock.call.modifyVolume(osv_matcher, - {'action': 6, - 'userCPG': 'CPG-FC1', - 'conversionOperation': 1, - 'tuneOperation': 1, - 'compression': False}), - mock.call.getTask(mock.ANY), - ] - - mock_client.assert_has_calls(expected + self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_migrate_volume_attached(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'volume_type_id': None, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'status': 'in-use', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - - loc_info = 'HPE3PARDriver:1234567:CPG-FC1' - - protocol = "FC" - if self.properties['driver_volume_type'] == "iscsi": - protocol = "iSCSI" - - host = {'host': 'stack@3parfc1', - 'capabilities': {'location_info': loc_info, - 'storage_protocol': protocol}} - - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - - new_comment = Comment({ - "qos": {}, - "retype_test": "test comment", - }) - expected = [ - mock.call.modifyVolume(osv_matcher, - {'comment': new_comment, - 'snapCPG': 'OpenStackCPGSnap'}), - mock.call.modifyVolume(osv_matcher, - {'action': 6, - 'userCPG': 'OpenStackCPG', - 'conversionOperation': 1, - 'tuneOperation': 1, - 'compression': False}), - mock.call.getTask(1), - mock.call.logout() - ] - mock_client.assert_has_calls(expected) - - self.assertIsNotNone(result) - self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}), - result) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types): - _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 - mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) - - protocol = "OTHER" - - volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, - 'volume_type_id': None, - 'id': HPE3PARBaseDriver.CLONE_ID, - 'display_name': 'Foo Volume', - 'size': 2, - 'status': 'in-use', - 'host': HPE3PARBaseDriver.FAKE_HOST, - 'source_volid': HPE3PARBaseDriver.VOLUME_ID} - - loc_info = 'HPE3PARDriver:1234567:CPG-FC1' - host = {'host': 'stack@3parfc1', - 'capabilities': {'location_info': loc_info, - 'storage_protocol': protocol}} - - result = self.driver.migrate_volume(context.get_admin_context(), - volume, host) - - self.assertIsNotNone(result) - self.assertEqual((False, None), result) - expected = [] - mock_client.assert_has_calls(expected) - - def test_update_migrated_volume(self): - mock_client = self.setup_driver() - fake_old_volume = {'id': self.VOLUME_ID} - provider_location = 'foo' - fake_new_volume = {'id': self.CLONE_ID, - '_name_id': self.CLONE_ID, - 'provider_location': provider_location} - original_volume_status = 'available' - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - actual_update = self.driver.update_migrated_volume( - context.get_admin_context(), fake_old_volume, - fake_new_volume, original_volume_status) - - expected_update = {'_name_id': None, - 'provider_location': None} - self.assertEqual(expected_update, actual_update) - - expected = [ - mock.call.modifyVolume( - 'osv-0DM4qZEVSKON-DXN-NwVpw', - {'newName': u'tsv-0DM4qZEVSKON-DXN-NwVpw'}), - mock.call.modifyVolume( - 'osv-0DM4qZEVSKON-AAAAAAAAA', - {'newName': u'osv-0DM4qZEVSKON-DXN-NwVpw'}), - mock.call.modifyVolume( - 'tsv-0DM4qZEVSKON-DXN-NwVpw', - {'newName': u'osv-0DM4qZEVSKON-AAAAAAAAA'}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_update_migrated_volume_attached(self): - mock_client = self.setup_driver() - fake_old_volume = {'id': self.VOLUME_ID} - provider_location = 'foo' - fake_new_volume = {'id': self.CLONE_ID, - '_name_id': self.CLONE_ID, - 'provider_location': provider_location} - original_volume_status = 'in-use' - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - actual_update = self.driver.update_migrated_volume( - context.get_admin_context(), fake_old_volume, - fake_new_volume, original_volume_status) - - expected_update = {'_name_id': fake_new_volume['_name_id'], - 'provider_location': provider_location} - self.assertEqual(expected_update, actual_update) - - def test_create_snapshot(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.create_snapshot(self.snapshot) - - comment = Comment({ - "volume_id": "761fc5e5-5191-4ec7-aeba-33e36de44156", - "display_name": "fakesnap", - "description": "test description name", - "volume_name": - "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - expected = [ - mock.call.createSnapshot( - 'oss-L4I73ONuTci9Fd4ceij-MQ', - 'osv-dh-F5VGRTseuujPjbeRBVg', - { - 'comment': comment, - 'readOnly': True})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_delete_snapshot(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.delete_snapshot(self.snapshot) - - expected = [ - mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_delete_snapshot_in_use(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 1}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - mock_client = self.setup_driver(mock_conf=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - volume = self.volume.copy() - model_update = self.driver.create_volume_from_snapshot( - self.volume, - self.snapshot) - self.assertIsNone(model_update) - - comment = Comment({ - "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", - "display_name": "Foo Volume", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - omv_matcher = 'omv-' + volume_name_3par - - expected = [ - mock.call.createSnapshot( - self.VOLUME_3PAR_NAME, - 'oss-L4I73ONuTci9Fd4ceij-MQ', - { - 'comment': comment, - 'readOnly': False}), - mock.call.copyVolume( - osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), - mock.call.getTask(mock.ANY), - mock.call.getVolume(osv_matcher), - mock.call.deleteVolume(osv_matcher), - mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - ex = hpeexceptions.HTTPConflict("In use") - ex._error_code = 32 - mock_client.deleteVolume = mock.Mock(side_effect=ex) - - # Deleting the snapshot that a volume is dependent on should fail - self.assertRaises(exception.SnapshotIsBusy, - self.driver.delete_snapshot, - self.snapshot) - - def test_delete_snapshot_not_found(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.create_snapshot(self.snapshot) - - try: - ex = hpeexceptions.HTTPNotFound("not found") - mock_client.deleteVolume = mock.Mock(side_effect=ex) - self.driver.delete_snapshot(self.snapshot) - except Exception: - self.fail("Deleting a snapshot that is missing should act " - "as if it worked.") - - def test_create_volume_from_snapshot(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 1}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - mock_client = self.setup_driver(mock_conf=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - volume = self.volume.copy() - model_update = self.driver.create_volume_from_snapshot( - self.volume, - self.snapshot) - self.assertIsNone(model_update) - - comment = Comment({ - "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", - "display_name": "Foo Volume", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - omv_matcher = 'omv-' + volume_name_3par - - expected = [ - mock.call.createSnapshot( - self.VOLUME_3PAR_NAME, - 'oss-L4I73ONuTci9Fd4ceij-MQ', - { - 'comment': comment, - 'readOnly': False}), - mock.call.copyVolume( - osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), - mock.call.getTask(mock.ANY), - mock.call.getVolume(osv_matcher), - mock.call.deleteVolume(osv_matcher), - mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - volume = self.volume.copy() - volume['size'] = 1 - self.assertRaises(exception.InvalidInput, - self.driver.create_volume_from_snapshot, - volume, self.snapshot) - - def test_create_volume_from_snapshot_and_extend(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 1}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - - mock_client = self.setup_driver(mock_conf=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume = self.volume.copy() - volume['size'] = self.volume['size'] + 10 - model_update = self.driver.create_volume_from_snapshot( - volume, - self.snapshot) - self.assertIsNone(model_update) - - comment = Comment({ - "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", - "display_name": "Foo Volume", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - omv_matcher = 'omv-' + volume_name_3par - - expected = [ - mock.call.createSnapshot( - self.VOLUME_3PAR_NAME, - 'oss-L4I73ONuTci9Fd4ceij-MQ', - { - 'comment': comment, - 'readOnly': False}), - mock.call.copyVolume( - osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), - mock.call.getTask(mock.ANY), - mock.call.getVolume(osv_matcher), - mock.call.deleteVolume(osv_matcher), - mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), - mock.call.growVolume(osv_matcher, 10 * 1024)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_from_snapshot_and_extend_with_qos( - self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 1}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - - mock_client = self.setup_driver(mock_conf=conf) - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'cpg': HPE3PAR_CPG_QOS, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': self.volume_type}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume = self.volume_qos.copy() - volume['size'] = self.volume['size'] + 10 - model_update = self.driver.create_volume_from_snapshot( - volume, - self.snapshot) - self.assertIsNone(model_update) - - comment = Comment({ - "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", - "display_name": "Foo Volume", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - omv_matcher = 'omv-' + volume_name_3par - - expected = [ - mock.call.createSnapshot( - self.VOLUME_3PAR_NAME, - 'oss-L4I73ONuTci9Fd4ceij-MQ', - { - 'comment': comment, - 'readOnly': False}), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.copyVolume( - osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), - mock.call.getTask(mock.ANY), - mock.call.getVolume(osv_matcher), - mock.call.deleteVolume(osv_matcher), - mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), - mock.call.growVolume(osv_matcher, 10 * 1024)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_create_volume_from_snapshot_and_extend_copy_fail(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 4, - 'failure message': 'out of disk space'}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - - mock_client = self.setup_driver(mock_conf=conf) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - volume = self.volume.copy() - volume['size'] = self.volume['size'] + 10 - - self.assertRaises(exception.CinderException, - self.driver.create_volume_from_snapshot, - volume, self.snapshot) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_from_snapshot_qos(self, _mock_volume_types): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = { - 'getTask.return_value': { - 'status': 1}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {} - } - mock_client = self.setup_driver(mock_conf=conf) - _mock_volume_types.return_value = { - 'name': 'gold', - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': self.volume_type}} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - volume = self.volume_qos.copy() - model_update = self.driver.create_volume_from_snapshot( - volume, self.snapshot) - self.assertIsNone(model_update) - - comment = Comment({ - "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", - "display_name": "Foo Volume", - "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", - }) - volume_name_3par = common._encode_name(volume['id']) - osv_matcher = 'osv-' + volume_name_3par - omv_matcher = 'omv-' + volume_name_3par - - expected = [ - mock.call.createSnapshot( - self.VOLUME_3PAR_NAME, - 'oss-L4I73ONuTci9Fd4ceij-MQ', { - 'comment': comment, - 'readOnly': False}), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.copyVolume( - osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), - mock.call.getTask(mock.ANY), - mock.call.getVolume(osv_matcher), - mock.call.deleteVolume(osv_matcher), - mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher})] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - volume = self.volume.copy() - volume['size'] = 1 - self.assertRaises(exception.InvalidInput, - self.driver.create_volume_from_snapshot, - volume, self.snapshot) - - def test_terminate_connection(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getHostVLUNs.return_value = [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.terminate_connection( - self.volume, - self.connector, - force=True) - - expected = [ - mock.call.queryHost(iqns=[self.connector['initiator']]), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_USER_KEY), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_terminate_connection_from_primary_when_failed_over(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( - error={'desc': 'The host does not exist.'}) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - self.driver._active_backend_id = 'some_id' - self.driver.terminate_connection( - self.volume, - self.connector, - force=True) - - # When the volume is still attached to the primary array after a - # fail-over, there should be no call to delete the VLUN(s) or the - # host. We can assert these methods were not called to make sure - # the proper exceptions are being raised. - self.assertEqual(0, mock_client.deleteVLUN.call_count) - self.assertEqual(0, mock_client.deleteHost.call_count) - - def test_extend_volume(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - grow_size = 3 - old_size = self.volume['size'] - new_size = old_size + grow_size - self.driver.extend_volume(self.volume, str(new_size)) - growth_size_mib = grow_size * units.Ki - - expected = [ - mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)] - - mock_client.assert_has_calls(expected) - - def test_extend_volume_non_base(self): - extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) - conf = { - 'getTask.return_value': { - 'status': 1}, - 'getCPG.return_value': {}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {}, - # Throw an exception first time only - 'growVolume.side_effect': [extend_ex, - None], - } - - mock_client = self.setup_driver(mock_conf=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - grow_size = 3 - old_size = self.volume['size'] - new_size = old_size + grow_size - self.driver.extend_volume(self.volume, str(new_size)) - - self.assertEqual(2, mock_client.growVolume.call_count) - - def test_extend_volume_non_base_failure(self): - extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) - conf = { - 'getTask.return_value': { - 'status': 1}, - 'getCPG.return_value': {}, - 'copyVolume.return_value': {'taskid': 1}, - 'getVolume.return_value': {}, - # Always fail - 'growVolume.side_effect': extend_ex - } - - mock_client = self.setup_driver(mock_conf=conf) - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - grow_size = 3 - old_size = self.volume['size'] - new_size = old_size + grow_size - self.assertRaises(hpeexceptions.HTTPForbidden, - self.driver.extend_volume, - self.volume, - str(new_size)) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_extend_volume_replicated(self, _mock_volume_types): - # Managed vs. unmanaged and periodic vs. sync are not relevant when - # extending a replicated volume type. - # We will use managed and periodic as the default. - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - grow_size = 3 - old_size = self.volume_replicated['size'] - new_size = old_size + grow_size - - # Test a successful extend. - self.driver.extend_volume( - self.volume_replicated, - new_size) - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), - mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - # Test an unsuccessful extend. growVolume will fail but remote - # copy should still be started again. - mock_client.growVolume.side_effect = ( - hpeexceptions.HTTPForbidden("Error: The volume cannot be " - "extended.")) - self.assertRaises( - hpeexceptions.HTTPForbidden, - self.driver.extend_volume, - self.volume_replicated, - new_size) - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), - mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), - mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_ports(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getPorts.return_value = { - 'members': [ - {'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, - 'protocol': 2, - 'IPAddr': '10.10.120.252', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D2', - 'type': 8}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'protocol': 2, - 'IPAddr': '10.10.220.253', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D6', - 'type': 8}, - {'portWWN': '20210002AC00383D', - 'protocol': 1, - 'linkState': 4, - 'mode': 2, - 'device': ['cage2'], - 'nodeWWN': '20210002AC00383D', - 'type': 2, - 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - ports = common.get_ports()['members'] - self.assertEqual(3, len(ports)) - - def test_get_by_qos_spec_with_scoping(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:maxIOPS": "100", - "qos:maxBWS": "50", - "qos:minIOPS": "10", - "qos:minBWS": "20", - "qos:latency": "5", - "qos:priority": "high"}) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) - qos = common._get_qos_by_volume_type(type_ref) - self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', - 'minIOPS': '100', 'minBWS': '25', - 'latency': '25', 'priority': 'low'}, qos) - - def test_get_by_qos_spec(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - qos_ref = qos_specs.create( - self.ctxt, - 'qos-specs-1', - self.QOS_SPECS) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:maxIOPS": "100", - "qos:maxBWS": "50", - "qos:minIOPS": "10", - "qos:minBWS": "20", - "qos:latency": "5", - "qos:priority": "high"}) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) - qos = common._get_qos_by_volume_type(type_ref) - self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', - 'minIOPS': '100', 'minBWS': '25', - 'latency': '25', 'priority': 'low'}, qos) - - def test_get_by_qos_by_type_only(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:maxIOPS": "100", - "qos:maxBWS": "50", - "qos:minIOPS": "10", - "qos:minBWS": "20", - "qos:latency": "5", - "qos:priority": "high"}) - type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) - qos = common._get_qos_by_volume_type(type_ref) - self.assertEqual({'maxIOPS': '100', 'maxBWS': '50', - 'minIOPS': '10', 'minBWS': '20', - 'latency': '5', 'priority': 'high'}, qos) - - def test_create_vlun(self): - host = 'fake-host' - lun_id = 11 - nsp = '1:2:3' - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" % - {'name': self.VOLUME_NAME, - 'lunid': lun_id, - 'host': host, - 'nsp': nsp}) - mock_client.createVLUN.return_value = location - - expected_info = {'volume_name': self.VOLUME_NAME, - 'lun_id': lun_id, - 'host_name': host, - 'nsp': nsp} - common = self.driver._login() - vlun_info = common._create_3par_vlun( - self.VOLUME_NAME, - host, - nsp) - self.assertEqual(expected_info, vlun_info) - - location = ("%(name)s,%(lunid)s,%(host)s" % - {'name': self.VOLUME_NAME, - 'lunid': lun_id, - 'host': host}) - mock_client.createVLUN.return_value = location - expected_info = {'volume_name': self.VOLUME_NAME, - 'lun_id': lun_id, - 'host_name': host} - vlun_info = common._create_3par_vlun( - self.VOLUME_NAME, - host, - None) - self.assertEqual(expected_info, vlun_info) - - def test_create_vlun_vlunid_zero(self): - # This will test "auto" for deactive when Lun ID is 0 - host = 'fake-host' - lun_id = 0 - nsp = '0:1:1' - port = {'node': 0, 'slot': 1, 'cardPort': 1} - - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - # _create_3par_vlun with nsp - location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" % - {'name': self.VOLUME_NAME, - 'lunid': lun_id, - 'host': host, - 'nsp': nsp}) - mock_client.createVLUN.return_value = location - expected_info = {'volume_name': self.VOLUME_NAME, - 'lun_id': lun_id, - 'host_name': host, - 'nsp': nsp} - common = self.driver._login() - vlun_info = common._create_3par_vlun( - self.VOLUME_NAME, - host, - nsp, - lun_id=lun_id) - self.assertEqual(expected_info, vlun_info) - mock_client.createVLUN.assert_called_once_with(self.VOLUME_NAME, - hostname=host, - auto=False, - portPos=port, - lun=lun_id) - - # _create_3par_vlun without nsp - mock_client.reset_mock() - location = ("%(name)s,%(lunid)s,%(host)s" % - {'name': self.VOLUME_NAME, - 'lunid': lun_id, - 'host': host}) - mock_client.createVLUN.return_value = location - expected_info = {'volume_name': self.VOLUME_NAME, - 'lun_id': lun_id, - 'host_name': host} - vlun_info = common._create_3par_vlun( - self.VOLUME_NAME, - host, - None, - lun_id=lun_id) - self.assertEqual(expected_info, vlun_info) - mock_client.createVLUN.assert_called_once_with(self.VOLUME_NAME, - hostname=host, - auto=False, - lun=lun_id) - - def test__get_existing_volume_ref_name(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - unm_matcher = common._get_3par_unm_name(self.volume['id']) - ums_matcher = common._get_3par_ums_name(self.volume['id']) - - existing_ref = {'source-name': unm_matcher} - result = common._get_existing_volume_ref_name(existing_ref) - self.assertEqual(unm_matcher, result) - - existing_ref = {'source-id': self.volume['id']} - result = common._get_existing_volume_ref_name(existing_ref) - self.assertEqual(unm_matcher, result) - - existing_ref = {'source-id': self.volume['id']} - result = common._get_existing_volume_ref_name(existing_ref, True) - self.assertEqual(ums_matcher, result) - - existing_ref = {'bad-key': 'foo'} - self.assertRaises( - exception.ManageExistingInvalidReference, - common._get_existing_volume_ref_name, - existing_ref) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing(self, _mock_volume_types): - _mock_volume_types.return_value = self.volume_type - mock_client = self.setup_driver() - - new_comment = Comment({ - "display_name": "Foo Volume", - "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", - "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", - "type": "OpenStack", - }) - volume = {'display_name': None, - 'host': self.FAKE_CINDER_HOST, - 'volume_type': 'gold', - 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - unm_matcher = common._get_3par_unm_name(self.volume['id']) - osv_matcher = common._get_3par_vol_name(volume['id']) - vvs_matcher = common._get_3par_vvs_name(volume['id']) - existing_ref = {'source-name': unm_matcher} - - expected_obj = {'display_name': 'Foo Volume'} - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_manage = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment}), - ] - - retype_comment_qos = Comment({ - "display_name": "Foo Volume", - "volume_type_name": self.volume_type['name'], - "volume_type_id": self.volume_type['id'], - "qos": { - 'maxIOPS': '1000', - 'maxBWS': '50', - 'minIOPS': '100', - 'minBWS': '25', - 'latency': '25', - 'priority': 'low' - } - }) - - expected_snap_cpg = HPE3PAR_CPG_SNAP - expected_retype_modify = [ - mock.call.modifyVolume(osv_matcher, - {'comment': retype_comment_qos, - 'snapCPG': expected_snap_cpg}), - mock.call.deleteVolumeSet(vvs_matcher), - ] - - expected_retype_specs = [ - mock.call.createVolumeSet(vvs_matcher, None), - mock.call.createQoSRules( - vvs_matcher, - {'ioMinGoal': 100, 'ioMaxLimit': 1000, - 'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25, - 'bwMaxLimitKB': 51200}), - mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher), - mock.call.modifyVolume( - osv_matcher, - {'action': 6, - 'userCPG': HPE3PAR_CPG, - 'conversionOperation': 1, 'tuneOperation': 1, - 'compression': False}), - mock.call.getTask(1) - ] - - mock_client.assert_has_calls(self.standard_login + expected_manage) - mock_client.assert_has_calls(expected_retype_modify) - mock_client.assert_has_calls( - expected_retype_specs + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types): - _mock_volume_types.return_value = self.volume_type - mock_client = self.setup_driver() - - new_comment = Comment({ - "display_name": "Foo Volume", - "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", - "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", - "type": "OpenStack", - }) - - volume = {'display_name': None, - 'host': 'my-stack1@3parxxx#CPGNOTUSED', - 'volume_type': 'gold', - 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.return_value = self.MV_INFO_WITH_NO_SNAPCPG - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - unm_matcher = common._get_3par_unm_name(self.volume['id']) - osv_matcher = common._get_3par_vol_name(volume['id']) - existing_ref = {'source-name': unm_matcher} - - expected_obj = {'display_name': 'Foo Volume'} - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_manage = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume( - existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment, - # manage_existing() should be setting - # blank snapCPG to the userCPG - 'snapCPG': 'testUserCpg0'}) - ] - - mock_client.assert_has_calls(self.standard_login + expected_manage) - self.assertEqual(expected_obj, obj) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing_vvs(self, _mock_volume_types): - test_volume_type = self.RETYPE_VOLUME_TYPE_2 - vvs = test_volume_type['extra_specs']['vvs'] - _mock_volume_types.return_value = test_volume_type - mock_client = self.setup_driver() - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - - id = '007abcde-7579-40bc-8f90-a20b3902283e' - new_comment = Comment({ - "display_name": "Test Volume", - "name": ("volume-%s" % id), - "volume_id": id, - "type": "OpenStack", - }) - volume = {'display_name': 'Test Volume', - 'host': 'my-stack1@3parxxx#CPGNOTUSED', - 'volume_type': 'gold', - 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': id} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - unm_matcher = common._get_3par_unm_name(self.volume['id']) - osv_matcher = common._get_3par_vol_name(volume['id']) - vvs_matcher = common._get_3par_vvs_name(volume['id']) - - existing_ref = {'source-name': unm_matcher} - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_obj = {'display_name': 'Test Volume'} - expected_manage = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment}) - ] - - retype_comment_vvs = Comment({ - "display_name": "Foo Volume", - "volume_type_name": test_volume_type['name'], - "volume_type_id": test_volume_type['id'], - "vvs": vvs - }) - - expected_retype = [ - mock.call.modifyVolume(osv_matcher, - {'comment': retype_comment_vvs, - 'snapCPG': 'OpenStackCPGSnap'}), - mock.call.deleteVolumeSet(vvs_matcher), - mock.call.addVolumeToVolumeSet(vvs, osv_matcher), - mock.call.modifyVolume(osv_matcher, - {'action': 6, - 'userCPG': 'CPGNOTUSED', - 'conversionOperation': 1, - 'tuneOperation': 1, - 'compression': False}), - mock.call.getTask(1) - ] - - mock_client.assert_has_calls(self.standard_login + expected_manage) - mock_client.assert_has_calls( - expected_retype + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - def test_manage_existing_no_volume_type(self): - mock_client = self.setup_driver() - - comment = repr({"display_name": "Foo Volume"}) - new_comment = Comment({ - "type": "OpenStack", - "display_name": "Foo Volume", - "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", - "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", - }) - volume = {'display_name': None, - 'volume_type': None, - 'volume_type_id': None, - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.return_value = {'comment': comment, - 'userCPG': 'testUserCpg0'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - unm_matcher = common._get_3par_unm_name(self.volume['id']) - osv_matcher = common._get_3par_vol_name(volume['id']) - existing_ref = {'source-name': unm_matcher} - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_obj = {'display_name': 'Foo Volume'} - expected = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment, - # manage_existing() should be setting - # blank snapCPG to the userCPG - 'snapCPG': 'testUserCpg0'}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - volume['display_name'] = 'Test Volume' - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_obj = {'display_name': 'Test Volume'} - expected = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment, - # manage_existing() should be setting - # blank snapCPG to the userCPG - 'snapCPG': 'testUserCpg0'}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - mock_client.getVolume.return_value = {'userCPG': 'testUserCpg0'} - volume['display_name'] = None - common = self.driver._login() - - obj = self.driver.manage_existing(volume, existing_ref) - - expected_obj = {'display_name': None} - expected = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': osv_matcher, - 'comment': new_comment, - # manage_existing() should be setting - # blank snapCPG to the userCPG - 'snapCPG': 'testUserCpg0'}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - def test_manage_existing_invalid_input(self): - mock_client = self.setup_driver() - - volume = {'display_name': None, - 'volume_type': None, - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - unm_matcher = common._get_3par_unm_name(self.volume['id']) - existing_ref = {'source-name': unm_matcher} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing, - volume=volume, - existing_ref=existing_ref) - - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_manage_existing_volume_type_exception(self): - mock_client = self.setup_driver() - - comment = repr({"display_name": "Foo Volume"}) - volume = {'display_name': None, - 'volume_type': 'gold', - 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.return_value = {'comment': comment} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - unm_matcher = common._get_3par_unm_name(self.volume['id']) - existing_ref = {'source-name': unm_matcher} - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, - volume=volume, - existing_ref=existing_ref) - - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing_retype_exception(self, _mock_volume_types): - mock_client = self.setup_driver() - _mock_volume_types.return_value = { - 'name': 'gold', - 'id': 'gold-id', - 'extra_specs': { - 'cpg': HPE3PAR_CPG, - 'snap_cpg': HPE3PAR_CPG_SNAP, - 'vvs_name': self.VVS_NAME, - 'qos': self.QOS, - 'tpvv': True, - 'tdvv': False, - 'volume_type': self.volume_type}} - - volume = {'display_name': None, - 'host': 'stack1@3pariscsi#POOL1', - 'volume_type': 'gold', - 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} - - mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO - mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) - mock_client.getTask.return_value = self.STATUS_DONE - mock_client.getCPG.side_effect = [ - {'domain': 'domain1'}, - {'domain': 'domain2'}, - {'domain': 'domain3'}, - ] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - unm_matcher = common._get_3par_unm_name(self.volume['id']) - osv_matcher = common._get_3par_vol_name(volume['id']) - - existing_ref = {'source-name': unm_matcher} - - self.assertRaises(exception.Invalid3PARDomain, - self.driver.manage_existing, - volume=volume, - existing_ref=existing_ref) - - expected = [ - - mock.call.getVolume(unm_matcher), - mock.call.modifyVolume( - unm_matcher, { - 'newName': osv_matcher, - 'comment': mock.ANY}), - mock.call.getCPG('POOL1'), - mock.call.getVolume(osv_matcher), - mock.call.getCPG('testUserCpg0'), - mock.call.getCPG('POOL1'), - mock.call.modifyVolume( - osv_matcher, {'newName': unm_matcher, - 'comment': self.MANAGE_VOLUME_INFO - ['comment']}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_manage_existing_snapshot(self): - mock_client = self.setup_driver() - - new_comment = Comment({ - "display_name": "snap", - "volume_name": self.VOLUME_NAME, - "volume_id": self.VOLUME_ID, - "description": "", - }) - - volume = {'id': self.VOLUME_ID} - - snapshot = { - 'display_name': None, - 'id': self.SNAPSHOT_ID, - 'volume': volume, - } - - mock_client.getVolume.return_value = { - "comment": "{'display_name': 'snap'}", - 'copyOf': self.VOLUME_NAME_3PAR, - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - oss_matcher = common._get_3par_snap_name(snapshot['id']) - ums_matcher = common._get_3par_ums_name(snapshot['id']) - existing_ref = {'source-name': ums_matcher} - expected_obj = {'display_name': 'snap'} - - obj = self.driver.manage_existing_snapshot(snapshot, existing_ref) - - expected = [ - mock.call.getVolume(existing_ref['source-name']), - mock.call.modifyVolume(existing_ref['source-name'], - {'newName': oss_matcher, - 'comment': new_comment}), - ] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_obj, obj) - - def test_manage_existing_snapshot_invalid_parent(self): - mock_client = self.setup_driver() - - volume = {'id': self.VOLUME_ID} - - snapshot = { - 'display_name': None, - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', - 'volume': volume, - } - - mock_client.getVolume.return_value = { - "comment": "{'display_name': 'snap'}", - 'copyOf': 'fake-invalid', - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - ums_matcher = common._get_3par_ums_name(snapshot['id']) - existing_ref = {'source-name': ums_matcher} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_snapshot, - snapshot=snapshot, - existing_ref=existing_ref) - - expected = [ - mock.call.getVolume(existing_ref['source-name']), - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_manage_existing_snapshot_failed_over_volume(self): - mock_client = self.setup_driver() - - volume = { - 'id': self.VOLUME_ID, - 'replication_status': 'failed-over', - } - - snapshot = { - 'display_name': None, - 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', - 'volume': volume, - } - - mock_client.getVolume.return_value = { - "comment": "{'display_name': 'snap'}", - 'copyOf': self.VOLUME_NAME_3PAR, - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - ums_matcher = common._get_3par_ums_name(snapshot['id']) - existing_ref = {'source-name': ums_matcher} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_snapshot, - snapshot=snapshot, - existing_ref=existing_ref) - - def test_manage_existing_get_size(self): - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'sizeMiB': 2048} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - unm_matcher = common._get_3par_unm_name(self.volume['id']) - volume = {} - existing_ref = {'source-name': unm_matcher} - - size = self.driver.manage_existing_get_size(volume, existing_ref) - - expected_size = 2 - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_size, size) - - def test_manage_existing_get_size_invalid_reference(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - volume = {} - existing_ref = {'source-name': self.VOLUME_3PAR_NAME} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - mock_client.assert_has_calls( - self.standard_login + - self.standard_logout) - - existing_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - mock_client.assert_has_calls( - self.standard_login + - self.standard_logout) - - def test_manage_existing_get_size_invalid_input(self): - mock_client = self.setup_driver() - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - unm_matcher = common._get_3par_unm_name(self.volume['id']) - volume = {} - existing_ref = {'source-name': unm_matcher} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_manage_existing_snapshot_get_size(self): - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'sizeMiB': 2048} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - ums_matcher = common._get_3par_ums_name(self.snapshot['id']) - snapshot = {} - existing_ref = {'source-name': ums_matcher} - - size = self.driver.manage_existing_snapshot_get_size(snapshot, - existing_ref) - - expected_size = 2 - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_size, size) - - def test_manage_existing_snapshot_get_size_invalid_reference(self): - mock_client = self.setup_driver() - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - snapshot = {} - existing_ref = {'source-name': self.SNAPSHOT_3PAR_NAME} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - mock_client.assert_has_calls( - self.standard_login + - self.standard_logout) - - existing_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - mock_client.assert_has_calls( - self.standard_login + - self.standard_logout) - - def test_manage_existing_snapshot_get_size_invalid_input(self): - mock_client = self.setup_driver() - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - ums_matcher = common._get_3par_ums_name(self.snapshot['id']) - snapshot = {} - existing_ref = {'source-name': ums_matcher} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - expected = [mock.call.getVolume(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_unmanage(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - self.driver.unmanage(self.volume) - - osv_matcher = common._get_3par_vol_name(self.volume['id']) - unm_matcher = common._get_3par_unm_name(self.volume['id']) - - expected = [ - mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_unmanage_snapshot(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - self.driver.unmanage_snapshot(self.snapshot) - - oss_matcher = common._get_3par_snap_name(self.snapshot['id']) - ums_matcher = common._get_3par_ums_name(self.snapshot['id']) - - expected = [ - mock.call.modifyVolume(oss_matcher, {'newName': ums_matcher}) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_unmanage_snapshot_failed_over_volume(self): - mock_client = self.setup_driver() - - volume = {'replication_status': 'failed-over', } - snapshot = {'id': self.SNAPSHOT_ID, - 'display_name': 'fake_snap', - 'volume': volume, } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - self.assertRaises(exception.SnapshotIsBusy, - self.driver.unmanage_snapshot, - snapshot=snapshot) - - def test__safe_hostname(self): - long_hostname = "abc123abc123abc123abc123abc123abc123" - fixed_hostname = "abc123abc123abc123abc123abc123a" - common = hpecommon.HPE3PARCommon(None) - safe_host = common._safe_hostname(long_hostname) - self.assertEqual(fixed_hostname, safe_host) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - - comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - # create a group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'get_volume_settings_from_type') - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src(self, cg_ss_enable, vol_ss_enable, - typ_info): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - type_info = {'cpg': 'OpenStackCPG', - 'tpvv': True, - 'tdvv': False, - 'snap_cpg': 'OpenStackCPG', - 'hpe3par_keys': {}} - - typ_info.return_value = type_info - - group_snap_comment = Comment({ - "group_id": "6044fedf-c889-4752-900f-2039d247a5df", - "description": "group_snapshot", - "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2", - }) - - group_snap_optional = ( - {'comment': group_snap_comment, - 'readOnly': False}) - - group_comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=group_comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # add a volume to the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[volume], remove_volumes=[]) - - expected = [ - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # create a snapshot of the consistency group - grp_snapshot = self.fake_group_snapshot_object() - self.driver.create_group_snapshot(context.get_admin_context(), - grp_snapshot, []) - - expected = [ - mock.call.createSnapshotOfVolumeSet( - self.CGSNAPSHOT_BASE_NAME + "-@count@", - self.CONSIS_GROUP_NAME, - optional=group_snap_optional)] - - # create a consistency group from the cgsnapshot - self.driver.create_group_from_src( - context.get_admin_context(), group, - [volume], group_snapshot=grp_snapshot, - snapshots=[self.snapshot]) - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'get_volume_settings_from_type') - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_group(self, cg_ss_enable, vol_ss_enable, - typ_info): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - type_info = {'cpg': 'OpenStackCPG', - 'tpvv': True, - 'tdvv': False, - 'snap_cpg': 'OpenStackCPG', - 'hpe3par_keys': {}} - - typ_info.return_value = type_info - source_volume = self.volume_src_cg - - group_snap_optional = ( - {'expirationHours': 1}) - - group_comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - group = self.fake_group_object() - source_grp = self.fake_group_object( - grp_id=self.SRC_CONSIS_GROUP_ID) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=group_comment), - mock.call.createSnapshotOfVolumeSet( - mock.ANY, - self.SRC_CONSIS_GROUP_NAME, - optional=group_snap_optional), - mock.call.copyVolume( - mock.ANY, - self.VOLUME_NAME_3PAR, - HPE3PAR_CPG, - {'snapCPG': HPE3PAR_CPG, 'online': True, - 'tpvv': mock.ANY, 'tdvv': mock.ANY}), - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - # Create a consistency group from a source consistency group. - self.driver.create_group_from_src( - context.get_admin_context(), group, - [volume], source_group=source_grp, - source_vols=[source_volume]) - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - - comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # remove the consistency group - group.status = fields.GroupStatus.DELETING - self.driver.delete_group(context.get_admin_context(), group, []) - - expected = [ - mock.call.deleteVolumeSet( - self.CONSIS_GROUP_NAME)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_exceptions(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - volume = fake_volume.fake_volume_obj(context.get_admin_context()) - self.driver.create_group(context.get_admin_context(), group) - - # remove the consistency group - group.status = fields.GroupStatus.DELETING - - # mock HTTPConflict in delete volume set - mock_client.deleteVolumeSet.side_effect = ( - hpeexceptions.HTTPConflict()) - # no exception should escape method - self.driver.delete_group(context.get_admin_context(), group, []) - - # mock HTTPNotFound in delete volume set - mock_client.deleteVolumeSet.side_effect = ( - hpeexceptions.HTTPNotFound()) - # no exception should escape method - self.driver.delete_group(context.get_admin_context(), group, []) - - # mock HTTPConflict in delete volume - mock_client.deleteVolume.side_effect = ( - hpeexceptions.HTTPConflict()) - # no exception should escape method - self.driver.delete_group(context.get_admin_context(), group, - [volume]) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_add_vol(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - - comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # add a volume to the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[volume], remove_volumes=[]) - - expected = [ - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_remove_vol(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - - comment = Comment({ - 'group_id': self.GROUP_ID - }) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # add a volume to the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[volume], remove_volumes=[]) - - expected = [ - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # remove the volume from the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[], remove_volumes=[volume]) - - expected = [ - mock.call.removeVolumeFromVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - - cg_comment = Comment({ - 'group_id': self.GROUP_ID - }) - - group_snap_comment = Comment({ - "group_id": "6044fedf-c889-4752-900f-2039d247a5df", - "description": "group_snapshot", - "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) - - cgsnap_optional = ( - {'comment': group_snap_comment, - 'readOnly': False}) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=cg_comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # add a volume to the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[volume], remove_volumes=[]) - - expected = [ - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # create a snapshot of the consistency group - group_snapshot = self.fake_group_snapshot_object() - self.driver.create_group_snapshot(context.get_admin_context(), - group_snapshot, []) - - expected = [ - mock.call.createSnapshotOfVolumeSet( - self.CGSNAPSHOT_BASE_NAME + "-@count@", - self.CONSIS_GROUP_NAME, - optional=cgsnap_optional)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' - 'is_volume_group_snap_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_snapshot(self, cg_ss_enable, vol_ss_enable): - cg_ss_enable.return_value = True - vol_ss_enable.return_value = True - mock_client = self.setup_driver() - mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} - volume = self.fake_volume_object() - group_snapshot = self.fake_group_snapshot_object() - - cg_comment = Comment({ - 'group_id': self.GROUP_ID - }) - - group_snap_comment = Comment({ - "group_id": "6044fedf-c889-4752-900f-2039d247a5df", - "description": "group_snapshot", - "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) - - group_snap_optional = {'comment': group_snap_comment, - 'readOnly': False} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - mock_client.getCPG.return_value = {'domain': None} - - # create a consistency group - group = self.fake_group_object() - self.driver.create_group(context.get_admin_context(), group) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.createVolumeSet( - self.CONSIS_GROUP_NAME, - domain=None, - comment=cg_comment)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # add a volume to the consistency group - self.driver.update_group(context.get_admin_context(), group, - add_volumes=[volume], remove_volumes=[]) - expected = [ - mock.call.addVolumeToVolumeSet( - self.CONSIS_GROUP_NAME, - self.VOLUME_NAME_3PAR)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - - # create a snapshot of the consistency group - self.driver.create_group_snapshot(context.get_admin_context(), - group_snapshot, []) - - expected = [ - mock.call.createSnapshotOfVolumeSet( - self.CGSNAPSHOT_BASE_NAME + "-@count@", - self.CONSIS_GROUP_NAME, - optional=group_snap_optional)] - - # delete the snapshot of the consistency group - group_snapshot.status = fields.GroupSnapshotStatus.DELETING - self.driver.delete_group_snapshot(context.get_admin_context(), - group_snapshot, []) - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_failover_host(self, _mock_volume_types): - # periodic vs. sync is not relevant when conducting a failover. We - # will just use periodic. - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - valid_backend_id = ( - self.replication_targets[0]['backend_id']) - invalid_backend_id = 'INVALID' - - volumes = [self.volume_replicated] - # Test invalid secondary target. - self.assertRaises( - exception.InvalidReplicationTarget, - self.driver.failover_host, - context.get_admin_context(), - volumes, - invalid_backend_id) - - # Test no secondary target. - self.assertRaises( - exception.InvalidReplicationTarget, - self.driver.failover_host, - context.get_admin_context(), - volumes, - None) - - # Test a successful failover. - expected_model = (self.REPLICATION_BACKEND_ID, - [{'updates': {'replication_status': - 'failed-over'}, - 'volume_id': self.VOLUME_ID}], - []) - return_model = self.driver.failover_host( - context.get_admin_context(), - volumes, - valid_backend_id) - expected = [ - mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)] - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_model, return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_replication_failback_ready(self, _mock_volume_types): - # Managed vs. unmanaged and periodic vs. sync are not relevant when - # failing back a volume. - # We will use managed and periodic as the default. - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - # Test a successful fail-back. - volume = self.volume_replicated.copy() - volume['replication_status'] = 'failed-over' - return_model = self.driver.failover_host( - context.get_admin_context(), - [volume], - 'default') - expected_model = (None, - [{'updates': {'replication_status': - 'available'}, - 'volume_id': self.VOLUME_ID}], - []) - self.assertEqual(expected_model, return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_replication_failback_not_ready(self, _mock_volume_types): - # Managed vs. unmanaged and periodic vs. sync are not relevant when - # failing back a volume. - # We will use managed and periodic as the default. - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_client.getStorageSystemInfo.return_value = ( - {'id': self.CLIENT_ID}) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getStorageSystemInfo.return_value = ( - {'id': self.REPLICATION_CLIENT_ID}) - - _mock_volume_types.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:mode': 'periodic', - 'replication:sync_period': '900', - 'volume_type': self.volume_type_replicated}} - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_client.getRemoteCopyGroup.side_effect = ( - exception.VolumeBackendAPIException( - "Error: Remote Copy Group not Ready.")) - mock_replication_client.return_value = mock_replicated_client - - # Test an unsuccessful fail-back. - volume = self.volume_replicated.copy() - volume['replication_status'] = 'failed-over' - - self.assertRaises( - exception.InvalidReplicationTarget, - self.driver.failover_host, - context.get_admin_context(), - [volume], - 'default') - - def test_get_pool_with_existing_volume(self): - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - actual_cpg = self.driver.get_pool(self.volume) - expected_cpg = HPE3PAR_CPG - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME) - ] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertEqual(expected_cpg, actual_cpg) - - def test_get_pool_with_non_existing_volume(self): - mock_client = self.setup_driver() - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME) - ] - - try: - self.assertRaises( - hpeexceptions.HTTPNotFound, - self.driver.get_pool, - self.volume) - - except exception.InvalidVolume: - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_driver_login_with_wrong_credential_and_replication_disabled(self): - mock_client = self.setup_driver() - mock_client.login.side_effect = hpeexceptions.HTTPUnauthorized - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - expected = [ - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS) - ] - self.assertRaises( - exception.InvalidInput, - self.driver._login) - mock_client.assert_has_calls(expected) - - def test_driver_login_with_wrong_credential_and_replication_enabled(self): - conf = self.setup_configuration() - self.replication_targets[0]['replication_mode'] = 'periodic' - conf.replication_device = self.replication_targets - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - mock_client.login.side_effect = hpeexceptions.HTTPUnauthorized - - with mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client, \ - mock.patch.object( - hpecommon.HPE3PARCommon, - '_create_replication_client') as mock_replication_client: - mock_create_client.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - expected = [ - mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS) - ] - - common = self.driver._login() - mock_client.assert_has_calls( - expected) - self.assertTrue(common._replication_enabled) - - -class TestHPE3PARFCDriver(HPE3PARBaseDriver, test.TestCase): - - properties = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'encrypted': False, - 'target_lun': 90, - 'target_wwn': ['0987654321234', '123456789000987'], - 'target_discovered': True, - 'initiator_target_map': {'123456789012345': - ['0987654321234', '123456789000987'], - '123456789054321': - ['0987654321234', '123456789000987'], - }}} - - def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): - self.ctxt = context.get_admin_context() - mock_client = self.setup_mock_client( - conf=config, - m_conf=mock_conf, - driver=hpefcdriver.HPE3PARFCDriver) - - if wsapi_version: - mock_client.getWsApiVersion.return_value = ( - wsapi_version) - else: - mock_client.getWsApiVersion.return_value = ( - self.wsapi_version_latest) - - expected = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2)] - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - return mock_client - - def test_initialize_connection(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST, - 'FCPaths': [{'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 7, - 'slot': 1}, - 'vendor': None, - 'wwn': self.wwn[0]}, - {'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 6, - 'slot': 1}, - 'vendor': None, - 'wwn': self.wwn[1]}]}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, - 'remoteName': self.wwn[1], - 'lun': 90, 'type': 0}], - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, - 'remoteName': self.wwn[0], - 'lun': 90, 'type': 0}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': 90, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - expected_properties = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'encrypted': False, - 'target_lun': 90, - 'target_wwn': ['0987654321234', '123456789000987'], - 'target_discovered': True, - 'initiator_target_map': - {'123456789012345': ['123456789000987'], - '123456789054321': ['0987654321234']}}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume, - self.connector) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=True, - hostname=self.FAKE_HOST, - lun=None, - portPos={'node': 7, 'slot': 1, 'cardPort': 1}), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=False, - hostname=self.FAKE_HOST, - lun=90, - portPos={'node': 6, 'slot': 1, 'cardPort': 1}), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(expected_properties, result) - - @mock.patch('cinder.zonemanager.utils.create_lookup_service') - def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - class fake_lookup_object(object): - def get_device_mapping_from_network(self, connector, target_wwns): - fake_map = { - 'FAB_1': { - 'target_port_wwn_list': ['0987654321234'], - 'initiator_port_wwn_list': ['123456789012345'] - } - } - return fake_map - mock_lookup.return_value = fake_lookup_object() - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST, - 'FCPaths': [{'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 1, - 'slot': 2}, - 'vendor': None, - 'wwn': self.wwn[0]}]}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': 90, 'type': 0, - 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': 90, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:222', - 'wwpns': [self.wwn[0]], - 'wwnns': ["223456789012345"], - 'host': self.FAKE_HOST} - - expected_properties = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'encrypted': False, - 'target_lun': 90, - 'target_wwn': ['0987654321234'], - 'target_discovered': True, - 'initiator_target_map': {'123456789012345': - ['0987654321234'] - }}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection(self.volume, connector) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.ANY, - mock.call.getHost(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=True, - hostname=self.FAKE_HOST, - portPos={'node': 7, 'slot': 1, 'cardPort': 1}, - lun=None), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(expected_properties, result) - - def test_initialize_connection_encrypted(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST, - 'FCPaths': [{'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 7, - 'slot': 1}, - 'vendor': None, - 'wwn': self.wwn[0]}, - {'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 6, - 'slot': 1}, - 'vendor': None, - 'wwn': self.wwn[1]}]}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': 90, 'type': 0, - 'remoteName': self.wwn[1], - 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}}], - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, - 'remoteName': self.wwn[0], - 'lun': 90, 'type': 0}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': 90, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - expected_properties = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'encrypted': True, - 'target_lun': 90, - 'target_wwn': ['0987654321234', '123456789000987'], - 'target_discovered': True, - 'initiator_target_map': - {'123456789012345': ['123456789000987'], - '123456789054321': ['0987654321234']}}} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume_encrypted, - self.connector) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getPorts(), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=True, - hostname=self.FAKE_HOST, - lun=None, - portPos={'node': 7, 'slot': 1, 'cardPort': 1}), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=False, - hostname=self.FAKE_HOST, - lun=90, - portPos={'node': 6, 'slot': 1, 'cardPort': 1}), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(expected_properties, result) - - def test_terminate_connection(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - effects = [ - [{'active': False, 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}], - hpeexceptions.HTTPNotFound, - hpeexceptions.HTTPNotFound] - - mock_client.getHostVLUNs.side_effect = effects - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - expected = [ - mock.call.queryHost(wwns=['123456789012345', '123456789054321']), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getPorts()] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertIn('data', conn_info) - self.assertIn('initiator_target_map', conn_info['data']) - mock_client.reset_mock() - - mock_client.getHostVLUNs.side_effect = effects - - # mock some deleteHost exceptions that are handled - delete_with_vlun = hpeexceptions.HTTPConflict( - error={'message': "has exported VLUN"}) - delete_with_hostset = hpeexceptions.HTTPConflict( - error={'message': "host is a member of a set"}) - mock_client.deleteHost = mock.Mock( - side_effect=[delete_with_vlun, delete_with_hostset]) - - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - mock_client.getHostVLUNs.side_effect = effects - - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - @mock.patch('cinder.zonemanager.utils.create_lookup_service') - def test_terminate_connection_with_lookup(self, mock_lookup): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - class fake_lookup_object(object): - def get_device_mapping_from_network(self, connector, target_wwns): - fake_map = { - 'FAB_1': { - 'target_port_wwn_list': ['0987654321234'], - 'initiator_port_wwn_list': ['123456789012345'] - } - } - return fake_map - mock_lookup.return_value = fake_lookup_object() - mock_client = self.setup_driver() - - effects = [ - [{'active': False, 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}], - hpeexceptions.HTTPNotFound, - hpeexceptions.HTTPNotFound] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = effects - - expected = [ - mock.call.queryHost(wwns=['123456789012345', '123456789054321']), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getPorts()] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertIn('data', conn_info) - self.assertIn('initiator_target_map', conn_info['data']) - mock_client.reset_mock() - - mock_client.getHostVLUNs.side_effect = effects - - # mock some deleteHost exceptions that are handled - delete_with_vlun = hpeexceptions.HTTPConflict( - error={'message': "has exported VLUN"}) - delete_with_hostset = hpeexceptions.HTTPConflict( - error={'message': "host is a member of a set"}) - mock_client.deleteHost = mock.Mock( - side_effect=[delete_with_vlun, delete_with_hostset]) - - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - mock_client.reset_mock() - mock_client.getHostVLUNs.side_effect = effects - - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - def test_terminate_connection_more_vols(self): - mock_client = self.setup_driver() - # mock more than one vlun on the host (don't even try to remove host) - mock_client.getHostVLUNs.return_value = \ - [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}, - {'active': True, - 'volumeName': 'there-is-another-volume', - 'lun': None, 'type': 0}, - ] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - expect_less = [ - mock.call.queryHost(wwns=['123456789012345', '123456789054321']), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - conn_info = self.driver.terminate_connection(self.volume, - self.connector) - mock_client.assert_has_calls( - self.standard_login + - expect_less + - self.standard_logout) - self.assertNotIn('initiator_target_map', conn_info['data']) - - def test_get_3par_host_from_wwn_iqn(self): - mock_client = self.setup_driver() - mock_client.getHosts.return_value = { - 'name': self.FAKE_HOST, - 'FCPaths': [{'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 1, - 'slot': 2}, - 'vendor': None, - 'wwn': '123ab6789012345'}]} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - hostname = mock_client._get_3par_hostname_from_wwn_iqn( - wwns=['123AB6789012345', '123CD6789054321'], - iqns=None) - self.assertIsNotNone(hostname) - - def test_get_volume_stats1(self): - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config) - mock_client.getCPG.return_value = self.cpgs[0] - # Purposely left out the Priority Optimization license in - # getStorageSystemInfo to test that QoS_support returns False. - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234', - 'licenseInfo': { - 'licenses': [{'name': 'Remote Copy'}, - {'name': 'Thin Provisioning (102400G)'}, - {'name': 'System Reporter'}] - } - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - stat_capabilities = { - THROUGHPUT: 0, - BANDWIDTH: 0, - LATENCY: 0, - IO_SIZE: 0, - QUEUE_LENGTH: 0, - AVG_BUSY_PERC: 0 - } - - mock_client.getCPGStatData.return_value = stat_capabilities - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - stats = self.driver.get_volume_stats(True) - const = 0.0009765625 - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['thick_provisioning_support']) - self.assertFalse(stats['pools'][0]['QoS_support']) - self.assertEqual(86.0, - stats['pools'][0]['provisioned_capacity_gb']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertEqual(stat_capabilities[THROUGHPUT], - stats['pools'][0][THROUGHPUT]) - self.assertEqual(stat_capabilities[BANDWIDTH], - stats['pools'][0][BANDWIDTH]) - self.assertEqual(stat_capabilities[LATENCY], - stats['pools'][0][LATENCY]) - self.assertEqual(stat_capabilities[IO_SIZE], - stats['pools'][0][IO_SIZE]) - self.assertEqual(stat_capabilities[QUEUE_LENGTH], - stats['pools'][0][QUEUE_LENGTH]) - self.assertEqual(stat_capabilities[AVG_BUSY_PERC], - stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['thick_provisioning_support']) - self.assertFalse(stats['pools'][0]['QoS_support']) - self.assertEqual(86.0, - stats['pools'][0]['provisioned_capacity_gb']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertEqual(stat_capabilities[THROUGHPUT], - stats['pools'][0][THROUGHPUT]) - self.assertEqual(stat_capabilities[BANDWIDTH], - stats['pools'][0][BANDWIDTH]) - self.assertEqual(stat_capabilities[LATENCY], - stats['pools'][0][LATENCY]) - self.assertEqual(stat_capabilities[IO_SIZE], - stats['pools'][0][IO_SIZE]) - self.assertEqual(stat_capabilities[QUEUE_LENGTH], - stats['pools'][0][QUEUE_LENGTH]) - self.assertEqual(stat_capabilities[AVG_BUSY_PERC], - stats['pools'][0][AVG_BUSY_PERC]) - - cpg2 = self.cpgs[0].copy() - cpg2.update({'SDGrowth': {'limitMiB': 8192}}) - mock_client.getCPG.return_value = cpg2 - - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['thick_provisioning_support']) - self.assertFalse(stats['pools'][0]['QoS_support']) - total_capacity_gb = 8192 * const - self.assertEqual(total_capacity_gb, - stats['pools'][0]['total_capacity_gb']) - free_capacity_gb = int( - (8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] + - self.cpgs[0]['SDUsage']['usedMiB'])) * const) - self.assertEqual(free_capacity_gb, - stats['pools'][0]['free_capacity_gb']) - provisioned_capacity_gb = int( - (self.cpgs[0]['UsrUsage']['totalMiB'] + - self.cpgs[0]['SAUsage']['totalMiB'] + - self.cpgs[0]['SDUsage']['totalMiB']) * const) - self.assertEqual(provisioned_capacity_gb, - stats['pools'][0]['provisioned_capacity_gb']) - cap_util = (float(total_capacity_gb - free_capacity_gb) / - float(total_capacity_gb)) * 100 - self.assertEqual(cap_util, - stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertEqual(stat_capabilities[THROUGHPUT], - stats['pools'][0][THROUGHPUT]) - self.assertEqual(stat_capabilities[BANDWIDTH], - stats['pools'][0][BANDWIDTH]) - self.assertEqual(stat_capabilities[LATENCY], - stats['pools'][0][LATENCY]) - self.assertEqual(stat_capabilities[IO_SIZE], - stats['pools'][0][IO_SIZE]) - self.assertEqual(stat_capabilities[QUEUE_LENGTH], - stats['pools'][0][QUEUE_LENGTH]) - self.assertEqual(stat_capabilities[AVG_BUSY_PERC], - stats['pools'][0][AVG_BUSY_PERC]) - common.client.deleteCPG(HPE3PAR_CPG) - common.client.createCPG(HPE3PAR_CPG, {}) - - def test_get_volume_stats2(self): - # Testing when the API_VERSION is incompatible with getCPGStatData - srstatld_api_version = 30201200 - pre_srstatld_api_version = srstatld_api_version - 1 - wsapi = {'build': pre_srstatld_api_version} - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config, wsapi_version=wsapi) - mock_client.getCPG.return_value = self.cpgs[0] - # Purposely left out the Thin Provisioning license in - # getStorageSystemInfo to test that thin_provisioning_support returns - # False. - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234', - 'licenseInfo': { - 'licenses': [{'name': 'Remote Copy'}, - {'name': 'Priority Optimization'}] - } - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver._login() - - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertFalse(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['QoS_support']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_volume_stats3(self): - # Testing when the client version is incompatible with getCPGStatData - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config, - wsapi_version=self.wsapi_version_312) - mock_client.getCPG.return_value = self.cpgs[0] - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234' - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver._login() - - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_volume_stats4(self): - # Testing get_volume_stats() when System Reporter license is not active - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config) - mock_client.getCPG.return_value = self.cpgs[0] - # Purposely left out the System Reporter license in - # getStorageSystemInfo to test sr_support - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234', - 'licenseInfo': { - 'licenses': [{'name': 'Remote Copy'}, - {'name': 'Priority Optimization'}, - {'name': 'Thin Provisioning'}] - } - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - stats = self.driver.get_volume_stats(True) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['QoS_support']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_create_host_with_unmanage_fc_and_manage_iscsi_hosts(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - - def get_side_effect(*args): - host = {'name': None} - if args[0] == 'fake': - host['name'] = 'fake' - elif args[0] == self.FAKE_HOST: - host['name'] = self.FAKE_HOST - return host - - mock_client.getHost.side_effect = get_side_effect - mock_client.queryHost.return_value = { - 'members': [{ - 'name': 'fake' - }] - } - mock_client.getVLUN.return_value = {'lun': 186} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.getHost('fake')] - - mock_client.assert_has_calls(expected) - - self.assertEqual('fake', host['name']) - - def test_create_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST, - 'FCPaths': [{'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 1, - 'slot': 2}, - 'vendor': None, - 'wwn': self.wwn[0]}, - {'driverVersion': None, - 'firmwareVersion': None, - 'hostSpeed': 0, - 'model': None, - 'portPos': {'cardPort': 1, 'node': 0, - 'slot': 2}, - 'vendor': None, - 'wwn': self.wwn[1]}]}] - mock_client.queryHost.return_value = None - mock_client.getVLUN.return_value = {'lun': 186} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.createHost( - self.FAKE_HOST, - FCWwns=['123456789012345', '123456789054321'], - optional={'domain': None, 'persona': 2}), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - - def test_create_invalid_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('Host not found.'), { - 'name': 'fakehost.foo', - 'FCPaths': [{'wwn': '123456789012345'}, { - 'wwn': '123456789054321'}]}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': 'fakehost.foo' - }] - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost('fakehost'), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.getHost('fakehost.foo')] - - mock_client.assert_has_calls(expected) - - self.assertEqual('fakehost.foo', host['name']) - - def test_create_host_concurrent(self): - # tests concurrent requests of create host - # setup_mock_client driver with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.queryHost.side_effect = [ - None, - {'members': [{'name': self.FAKE_HOST}] - }] - mock_client.createHost.side_effect = [ - hpeexceptions.HTTPConflict( - {'code': EXISTENT_PATH, - 'desc': 'host WWN/iSCSI name already used by another host'})] - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.createHost( - self.FAKE_HOST, - FCWwns=['123456789012345', '123456789054321'], - optional={'domain': None, 'persona': 2}), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - - def test_create_modify_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [{ - 'name': self.FAKE_HOST, 'FCPaths': []}, - {'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789012345'}, { - 'wwn': '123456789054321'}]}] - mock_client.queryHost.return_value = None - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - # On Python 3, hash is randomized, and so set() is used to get - # the expected order - fcwwns = list(set(('123456789054321', '123456789012345'))) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost('fakehost'), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.modifyHost('fakehost', - {'FCWWNs': fcwwns, - 'pathOperation': 1}), - mock.call.getHost('fakehost')] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertEqual(2, len(host['FCPaths'])) - - def test_modify_host_with_new_wwn(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - getHost_ret1 = { - 'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789054321'}]} - getHost_ret2 = { - 'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789012345'}, - {'wwn': '123456789054321'}]} - mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] - mock_client.queryHost.return_value = None - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost('fakehost'), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.modifyHost( - 'fakehost', { - 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), - mock.call.getHost('fakehost')] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertEqual(2, len(host['FCPaths'])) - - def test_modify_host_with_unknown_wwn_and_new_wwn(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - getHost_ret1 = { - 'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789054321'}, - {'wwn': 'xxxxxxxxxxxxxxx'}]} - getHost_ret2 = { - 'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789012345'}, - {'wwn': '123456789054321'}, - {'wwn': 'xxxxxxxxxxxxxxx'}]} - mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] - mock_client.queryHost.return_value = None - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host = self.driver._create_host( - common, - self.volume, - self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost('fakehost'), - mock.call.queryHost(wwns=['123456789012345', - '123456789054321']), - mock.call.modifyHost( - 'fakehost', { - 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), - mock.call.getHost('fakehost')] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertEqual(3, len(host['FCPaths'])) - - -class TestHPE3PARISCSIDriver(HPE3PARBaseDriver, test.TestCase): - - TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d' - TARGET_LUN = 186 - - properties = { - 'driver_volume_type': 'iscsi', - 'data': - {'encrypted': False, - 'target_discovered': True, - 'target_iqn': TARGET_IQN, - 'target_lun': TARGET_LUN, - 'target_portal': '1.1.1.2:1234'}} - - multipath_properties = { - 'driver_volume_type': 'iscsi', - 'data': - {'encrypted': False, - 'target_discovered': True, - 'target_iqns': [TARGET_IQN], - 'target_luns': [TARGET_LUN], - 'target_portals': ['1.1.1.2:1234']}} - - def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): - - self.ctxt = context.get_admin_context() - - mock_client = self.setup_mock_client( - conf=config, - m_conf=mock_conf, - driver=hpedriver.HPE3PARISCSIDriver) - - if wsapi_version: - mock_client.getWsApiVersion.return_value = ( - wsapi_version) - else: - mock_client.getWsApiVersion.return_value = ( - self.wsapi_version_latest) - - expected_get_cpgs = [ - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2)] - expected_get_ports = [mock.call.getPorts()] - mock_client.assert_has_calls( - self.standard_login + - expected_get_cpgs + - self.standard_logout + - self.standard_login + - expected_get_ports + - self.standard_logout) - mock_client.reset_mock() - - return mock_client - - def test_initialize_connection(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - [{'hostname': self.FAKE_HOST, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, - 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, 'type': 0}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': self.TARGET_LUN, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume, - self.connector) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(self.properties, result) - - def test_initialize_connection_multipath(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, 'type': 0, - 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': self.TARGET_LUN, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - - mock_client.getiSCSIPorts.return_value = [{ - 'IPAddr': '1.1.1.2', - 'iSCSIName': self.TARGET_IQN, - }] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume, - self.connector_multipath_enabled) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getiSCSIPorts( - state=self.mock_client_conf['PORT_STATE_READY']), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.createVLUN( - self.VOLUME_3PAR_NAME, - auto=True, - hostname=self.FAKE_HOST, - portPos=self.FAKE_ISCSI_PORT['portPos'], - lun=None), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(self.multipath_properties, result) - - def test_initialize_connection_multipath_existing_nsp(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - [{'hostname': self.FAKE_HOST, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, - 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, 'type': 0}]] - - mock_client.getiSCSIPorts.return_value = [{ - 'IPAddr': '1.1.1.2', - 'iSCSIName': self.TARGET_IQN, - }] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume, - self.connector_multipath_enabled) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getiSCSIPorts( - state=self.mock_client_conf['PORT_STATE_READY']), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - self.assertDictEqual(self.multipath_properties, result) - - def test_initialize_connection_encrypted(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - - mock_client.getHostVLUNs.side_effect = [ - [{'hostname': self.FAKE_HOST, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, - 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], - [{'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': self.TARGET_LUN, 'type': 0}]] - - location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % - {'volume_name': self.VOLUME_3PAR_NAME, - 'lun_id': self.TARGET_LUN, - 'host': self.FAKE_HOST, - 'nsp': 'something'}) - mock_client.createVLUN.return_value = location - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - result = self.driver.initialize_connection( - self.volume_encrypted, - self.connector) - - expected = [ - mock.call.getVolume(self.VOLUME_3PAR_NAME), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST)] - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - - expected_properties = self.properties - expected_properties['data']['encrypted'] = True - self.assertDictEqual(self.properties, result) - - def test_terminate_connection_for_clear_chap_creds_not_found(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getHostVLUNs.return_value = [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID - } - # Test for clear CHAP creds fails with HTTPNotFound - mock_client.removeVolumeMetaData.side_effect = [ - hpeexceptions.HTTPNotFound, - hpeexceptions.HTTPNotFound] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver.terminate_connection( - self.volume, - self.connector, - force=True) - - expected = [ - mock.call.queryHost(iqns=[self.connector['initiator']]), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_USER_KEY), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_terminate_connection_for_clear_chap_user_key_bad_request(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getHostVLUNs.return_value = [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID - } - # Test for CHAP USER KEY fails with HTTPBadRequest - mock_client.removeVolumeMetaData.side_effect = [ - hpeexceptions.HTTPBadRequest] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - self.assertRaises(hpeexceptions.HTTPBadRequest, - self.driver.terminate_connection, - self.volume, - self.connector, - force=True) - - expected = [ - mock.call.queryHost(iqns=[self.connector['initiator']]), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_USER_KEY)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_terminate_connection_for_clear_chap_pass_key_bad_request(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getHostVLUNs.return_value = [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0}] - - mock_client.queryHost.return_value = { - 'members': [{ - 'name': self.FAKE_HOST - }] - } - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - } - # Test for CHAP PASS KEY fails with HTTPBadRequest - mock_client.removeVolumeMetaData.side_effect = [ - None, - hpeexceptions.HTTPBadRequest] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - self.assertRaises(hpeexceptions.HTTPBadRequest, - self.driver.terminate_connection, - self.volume, - self.connector, - force=True) - - expected = [ - mock.call.queryHost(iqns=[self.connector['initiator']]), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteVLUN( - self.VOLUME_3PAR_NAME, - None, - hostname=self.FAKE_HOST), - mock.call.getHostVLUNs(self.FAKE_HOST), - mock.call.deleteHost(self.FAKE_HOST), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_USER_KEY), - mock.call.removeVolumeMetaData( - self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_volume_stats(self): - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config) - mock_client.getCPG.return_value = self.cpgs[0] - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234' - } - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - stat_capabilities = { - THROUGHPUT: 0, - BANDWIDTH: 0, - LATENCY: 0, - IO_SIZE: 0, - QUEUE_LENGTH: 0, - AVG_BUSY_PERC: 0 - } - mock_client.getCPGStatData.return_value = stat_capabilities - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - stats = self.driver.get_volume_stats(True) - const = 0.0009765625 - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['thick_provisioning_support']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(86.0, - stats['pools'][0]['provisioned_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertEqual(stat_capabilities[THROUGHPUT], - stats['pools'][0][THROUGHPUT]) - self.assertEqual(stat_capabilities[BANDWIDTH], - stats['pools'][0][BANDWIDTH]) - self.assertEqual(stat_capabilities[LATENCY], - stats['pools'][0][LATENCY]) - self.assertEqual(stat_capabilities[IO_SIZE], - stats['pools'][0][IO_SIZE]) - self.assertEqual(stat_capabilities[QUEUE_LENGTH], - stats['pools'][0][QUEUE_LENGTH]) - self.assertEqual(stat_capabilities[AVG_BUSY_PERC], - stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - cpg2 = self.cpgs[0].copy() - cpg2.update({'SDGrowth': {'limitMiB': 8192}}) - mock_client.getCPG.return_value = cpg2 - - stats = self.driver.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['thick_provisioning_support']) - total_capacity_gb = 8192 * const - self.assertEqual(total_capacity_gb, - stats['pools'][0]['total_capacity_gb']) - free_capacity_gb = int( - (8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] + - self.cpgs[0]['SDUsage']['usedMiB'])) * const) - self.assertEqual(free_capacity_gb, - stats['pools'][0]['free_capacity_gb']) - cap_util = (float(total_capacity_gb - free_capacity_gb) / - float(total_capacity_gb)) * 100 - self.assertEqual(cap_util, - stats['pools'][0]['capacity_utilization']) - provisioned_capacity_gb = int( - (self.cpgs[0]['UsrUsage']['totalMiB'] + - self.cpgs[0]['SAUsage']['totalMiB'] + - self.cpgs[0]['SDUsage']['totalMiB']) * const) - self.assertEqual(provisioned_capacity_gb, - stats['pools'][0]['provisioned_capacity_gb']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertEqual(stat_capabilities[THROUGHPUT], - stats['pools'][0][THROUGHPUT]) - self.assertEqual(stat_capabilities[BANDWIDTH], - stats['pools'][0][BANDWIDTH]) - self.assertEqual(stat_capabilities[LATENCY], - stats['pools'][0][LATENCY]) - self.assertEqual(stat_capabilities[IO_SIZE], - stats['pools'][0][IO_SIZE]) - self.assertEqual(stat_capabilities[QUEUE_LENGTH], - stats['pools'][0][QUEUE_LENGTH]) - self.assertEqual(stat_capabilities[AVG_BUSY_PERC], - stats['pools'][0][AVG_BUSY_PERC]) - - def test_get_volume_stats2(self): - # Testing when the API_VERSION is incompatible with getCPGStatData - srstatld_api_version = 30201200 - pre_srstatld_api_version = srstatld_api_version - 1 - wsapi = {'build': pre_srstatld_api_version} - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config, wsapi_version=wsapi) - mock_client.getCPG.return_value = self.cpgs[0] - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234' - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver._login() - - stats = self.driver.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_volume_stats3(self): - # Testing when the client version is incompatible with getCPGStatData - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config, - wsapi_version=self.wsapi_version_312) - mock_client.getCPG.return_value = self.cpgs[0] - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234' - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - self.driver._login() - - stats = self.driver.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_get_volume_stats4(self): - # Testing get_volume_stats() when System Reporter license is not active - # setup_mock_client drive with the configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.filter_function = FILTER_FUNCTION - config.goodness_function = GOODNESS_FUNCTION - mock_client = self.setup_driver(config=config) - mock_client.getCPG.return_value = self.cpgs[0] - # Purposely left out the System Reporter license in - # getStorageSystemInfo to test sr_support - mock_client.getStorageSystemInfo.return_value = { - 'id': self.CLIENT_ID, - 'serialNumber': '1234', - 'licenseInfo': { - 'licenses': [{'name': 'Remote Copy'}, - {'name': 'Priority Optimization'}, - {'name': 'Thin Provisioning'}] - } - } - - # cpg has no limit - mock_client.getCPGAvailableSpace.return_value = { - "capacityEfficiency": {u'compaction': 594.4}, - "rawFreeMiB": 1024.0 * 6, - "usableFreeMiB": 1024.0 * 3 - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - stats = self.driver.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual('12345', stats['array_id']) - self.assertTrue(stats['pools'][0]['thin_provisioning_support']) - self.assertTrue(stats['pools'][0]['QoS_support']) - self.assertEqual(24.0, stats['pools'][0]['total_capacity_gb']) - self.assertEqual(3.0, stats['pools'][0]['free_capacity_gb']) - self.assertEqual(87.5, stats['pools'][0]['capacity_utilization']) - self.assertEqual(3, stats['pools'][0]['total_volumes']) - self.assertEqual(GOODNESS_FUNCTION, - stats['pools'][0]['goodness_function']) - self.assertEqual(FILTER_FUNCTION, - stats['pools'][0]['filter_function']) - self.assertIsNone(stats['pools'][0][THROUGHPUT]) - self.assertIsNone(stats['pools'][0][BANDWIDTH]) - self.assertIsNone(stats['pools'][0][LATENCY]) - self.assertIsNone(stats['pools'][0][IO_SIZE]) - self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) - self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) - - expected = [ - mock.call.getStorageSystemInfo(), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG), - mock.call.getCPG(HPE3PAR_CPG2), - mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] - - mock_client.assert_has_calls( - self.get_id_login + - self.standard_logout + - self.standard_login + - expected + - self.standard_logout) - - def test_create_host_with_unmanage_iscsi_and_manage_fc_hosts(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - - def get_side_effect(*args): - host = {'name': None} - if args[0] == 'fake': - host['name'] = 'fake' - elif args[0] == self.FAKE_HOST: - host['name'] = self.FAKE_HOST - host['iSCSIPaths'] = [{ - "name": "iqn.1993-08.org.debian:01:222"}] - return host - - mock_client.getHost.side_effect = get_side_effect - mock_client.queryHost.return_value = { - 'members': [{ - 'name': 'fake' - }] - } - mock_client.getVLUN.return_value = {'lun': 186} - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=[self.connector['initiator']]), - mock.call.getHost('fake')] - - mock_client.assert_has_calls(expected) - - self.assertEqual('fake', host['name']) - - def test_create_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = None - mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.createHost( - self.FAKE_HOST, - optional={'domain': None, 'persona': 2}, - iscsiNames=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertIsNone(auth_username) - self.assertIsNone(auth_password) - - def test_create_host_chap_enabled(self): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - mock_client.queryHost.return_value = None - mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} - - expected_mod_request = { - 'chapOperation': mock_client.HOST_EDIT_ADD, - 'chapOperationMode': mock_client.CHAP_INITIATOR, - 'chapName': 'test-user', - 'chapSecret': 'test-pass' - } - - def get_side_effect(*args): - data = {'value': None} - if args[1] == CHAP_USER_KEY: - data['value'] = 'test-user' - elif args[1] == CHAP_PASS_KEY: - data['value'] = 'test-pass' - return data - - mock_client.getVolumeMetaData.side_effect = get_side_effect - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.createHost( - self.FAKE_HOST, - optional={'domain': None, 'persona': 2}, - iscsiNames=['iqn.1993-08.org.debian:01:222']), - mock.call.modifyHost( - 'fakehost', - expected_mod_request), - mock.call.getHost(self.FAKE_HOST) - ] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertEqual('test-user', auth_username) - self.assertEqual('test-pass', auth_password) - - def test_create_host_chap_enabled_and_host_without_chap_cred(self): - # setup_mock_client driver - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.queryHost.return_value = None - - expected_mod_request = { - 'chapOperation': mock_client.HOST_EDIT_ADD, - 'chapOperationMode': mock_client.CHAP_INITIATOR, - 'chapName': 'test-user', - 'chapSecret': 'test-pass' - } - - def get_side_effect(*args): - data = {'value': None} - if args[1] == CHAP_USER_KEY: - data['value'] = 'test-user' - elif args[1] == CHAP_PASS_KEY: - data['value'] = 'test-pass' - return data - - mock_client.getVolumeMetaData.side_effect = get_side_effect - mock_client.getHost.return_value = { - 'name': self.FAKE_HOST, - 'initiatorChapEnabled': False, - 'iSCSIPaths': [{ - "name": "iqn.1993-08.org.debian:01:222" - }] - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.modifyHost(self.FAKE_HOST, expected_mod_request)] - - mock_client.assert_has_calls(expected) - - def test_create_invalid_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('Host not found.'), - {'name': 'fakehost.foo'}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': 'fakehost.foo' - }] - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost('fakehost.foo')] - - mock_client.assert_has_calls(expected) - - self.assertEqual('fakehost.foo', host['name']) - self.assertIsNone(auth_username) - self.assertIsNone(auth_password) - - def test_create_invalid_host_chap_enabled(self): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('Host not found.'), - {'name': 'fakehost.foo'}] - mock_client.queryHost.return_value = { - 'members': [{ - 'name': 'fakehost.foo' - }] - } - - def get_side_effect(*args): - data = {'value': None} - if args[1] == CHAP_USER_KEY: - data['value'] = 'test-user' - elif args[1] == CHAP_PASS_KEY: - data['value'] = 'test-pass' - return data - - mock_client.getVolumeMetaData.side_effect = get_side_effect - - expected_mod_request = { - 'chapOperation': mock_client.HOST_EDIT_ADD, - 'chapOperationMode': mock_client.CHAP_INITIATOR, - 'chapName': 'test-user', - 'chapSecret': 'test-pass' - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.modifyHost( - 'fakehost.foo', - expected_mod_request), - mock.call.getHost('fakehost.foo') - ] - - mock_client.assert_has_calls(expected) - - self.assertEqual('fakehost.foo', host['name']) - self.assertEqual('test-user', auth_username) - self.assertEqual('test-pass', auth_password) - - def test_create_host_concurrent(self): - # tests concurrent requests of create host - # setup_mock_client driver with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.queryHost.side_effect = [ - None, {'members': [{'name': self.FAKE_HOST}]}] - mock_client.createHost.side_effect = [ - hpeexceptions.HTTPConflict( - {'code': EXISTENT_PATH, - 'desc': 'host WWN/iSCSI name already used by another host'})] - mock_client.getHost.side_effect = [ - hpeexceptions.HTTPNotFound('fake'), - {'name': self.FAKE_HOST}] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, user, pwd = self.driver._create_host( - common, self.volume, self.connector) - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.createHost( - self.FAKE_HOST, - optional={'domain': None, 'persona': 2}, - iscsiNames=['iqn.1993-08.org.debian:01:222']), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - - def test_create_modify_host(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - {'name': self.FAKE_HOST, 'FCPaths': []}, - {'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789012345'}, - {'wwn': '123456789054321'}]}] - mock_client.queryHost.return_value = None - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.modifyHost( - self.FAKE_HOST, - {'pathOperation': 1, - 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertIsNone(auth_username) - self.assertIsNone(auth_password) - self.assertEqual(2, len(host['FCPaths'])) - - def test_create_modify_host_chap_enabled(self): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} - mock_client.getCPG.return_value = {} - mock_client.getHost.side_effect = [ - {'name': self.FAKE_HOST, 'FCPaths': []}, - {'name': self.FAKE_HOST, - 'FCPaths': [{'wwn': '123456789012345'}, - {'wwn': '123456789054321'}]}] - mock_client.queryHost.return_value = None - - def get_side_effect(*args): - data = {'value': None} - if args[1] == CHAP_USER_KEY: - data['value'] = 'test-user' - elif args[1] == CHAP_PASS_KEY: - data['value'] = 'test-pass' - return data - - mock_client.getVolumeMetaData.side_effect = get_side_effect - - expected_mod_request = { - 'chapOperation': mock_client.HOST_EDIT_ADD, - 'chapOperationMode': mock_client.CHAP_INITIATOR, - 'chapName': 'test-user', - 'chapSecret': 'test-pass' - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - host, auth_username, auth_password = self.driver._create_host( - common, self.volume, self.connector) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getCPG(HPE3PAR_CPG), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.getHost(self.FAKE_HOST), - mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), - mock.call.modifyHost( - self.FAKE_HOST, - {'pathOperation': 1, - 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), - mock.call.modifyHost( - self.FAKE_HOST, - expected_mod_request - ), - mock.call.getHost(self.FAKE_HOST)] - - mock_client.assert_has_calls(expected) - - self.assertEqual(self.FAKE_HOST, host['name']) - self.assertEqual('test-user', auth_username) - self.assertEqual('test-pass', auth_password) - self.assertEqual(2, len(host['FCPaths'])) - - def test_get_least_used_nsp_for_host_single(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - - # Setup two ISCSI IPs - conf = self.setup_configuration() - conf.hpe3par_iscsi_ips = ["10.10.220.253"] - mock_client = self.setup_driver(config=conf) - - mock_client.getPorts.return_value = PORTS_RET - mock_client.getVLUNs.return_value = VLUNS1_RET - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - self.driver.initialize_iscsi_ports(common) - - nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') - self.assertEqual("1:8:1", nsp) - - def test_get_least_used_nsp_for_host_new(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - - # Setup two ISCSI IPs - conf = self.setup_configuration() - conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] - mock_client = self.setup_driver(config=conf) - - mock_client.getPorts.return_value = PORTS_RET - mock_client.getVLUNs.return_value = VLUNS1_RET - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - self.driver.initialize_iscsi_ports(common) - - # Host 'newhost' does not yet have any iscsi paths, - # so the 'least used' is returned - nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') - self.assertEqual("1:8:2", nsp) - - def test_get_least_used_nsp_for_host_reuse(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - - # Setup two ISCSI IPs - conf = self.setup_configuration() - conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] - mock_client = self.setup_driver(config=conf) - - mock_client.getPorts.return_value = PORTS_RET - mock_client.getVLUNs.return_value = VLUNS1_RET - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - self.driver.initialize_iscsi_ports(common) - - # hosts 'foo' and 'bar' already have active iscsi paths - # the same one should be used - nsp = self.driver._get_least_used_nsp_for_host(common, 'foo') - self.assertEqual("1:8:2", nsp) - - nsp = self.driver._get_least_used_nsp_for_host(common, 'bar') - self.assertEqual("1:8:1", nsp) - - def test_get_least_used_nps_for_host_fc(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - mock_client.getPorts.return_value = PORTS1_RET - mock_client.getVLUNs.return_value = VLUNS5_RET - - # Setup two ISCSI IPs - iscsi_ips = ["10.10.220.252", "10.10.220.253"] - self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - self.driver.initialize_iscsi_ports(common) - - nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') - self.assertNotEqual("0:6:3", nsp) - self.assertEqual("1:8:1", nsp) - - def test_invalid_iscsi_ip(self): - config = self.setup_configuration() - config.hpe3par_iscsi_ips = ['10.10.220.250', '10.10.220.251'] - config.iscsi_ip_address = '10.10.10.10' - mock_conf = { - 'getPorts.return_value': { - 'members': [ - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, - 'protocol': 2, - 'IPAddr': '10.10.220.252', - 'linkState': 4, - 'device': [], - 'iSCSIName': self.TARGET_IQN, - 'mode': 2, - 'HWAddr': '2C27D75375D2', - 'type': 8}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'protocol': 2, - 'IPAddr': '10.10.220.253', - 'linkState': 4, - 'device': [], - 'iSCSIName': self.TARGET_IQN, - 'mode': 2, - 'HWAddr': '2C27D75375D6', - 'type': 8}]}} - - # no valid ip addr should be configured. - self.assertRaises(exception.InvalidInput, - self.setup_driver, - config=config, - mock_conf=mock_conf) - - def test_get_least_used_nsp(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - ports = [ - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] - mock_client.getVLUNs.return_value = {'members': ports} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - # in use count - vluns = common.client.getVLUNs() - nsp = self.driver._get_least_used_nsp(common, vluns['members'], - ['0:2:1', '1:8:1']) - self.assertEqual('1:8:1', nsp) - - ports = [ - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}] - - mock_client.getVLUNs.return_value = {'members': ports} - - # in use count - common = self.driver._login() - vluns = common.client.getVLUNs() - nsp = self.driver._get_least_used_nsp(common, vluns['members'], - ['0:2:1', '1:2:1']) - self.assertEqual('1:2:1', nsp) - - ports = [ - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}, - {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, - 'active': True}] - - mock_client.getVLUNs.return_value = {'members': ports} - - # in use count - common = self.driver._login() - vluns = common.client.getVLUNs() - nsp = self.driver._get_least_used_nsp(common, vluns['members'], - ['1:1:1', '1:2:1']) - self.assertEqual('1:1:1', nsp) - - def test_set_3par_chaps(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - expected = [] - self.driver._set_3par_chaps( - common, 'test-host', 'test-vol', 'test-host', 'pass') - mock_client.assert_has_calls(expected) - - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - expected_mod_request = { - 'chapOperation': mock_client.HOST_EDIT_ADD, - 'chapOperationMode': mock_client.CHAP_INITIATOR, - 'chapName': 'test-host', - 'chapSecret': 'fake' - } - - expected = [ - mock.call.modifyHost('test-host', expected_mod_request) - ] - self.driver._set_3par_chaps( - common, 'test-host', 'test-vol', 'test-host', 'fake') - mock_client.assert_has_calls(expected) - - @mock.patch('cinder.volume.utils.generate_password') - def test_do_export(self, mock_utils): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - connector = {'host': 'test-host'} - mock_utils.return_value = 'random-pass' - mock_client.getHostVLUNs.return_value = [ - {'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0, - 'remoteName': 'iqn.1993-08.org.debian:01:222'} - ] - mock_client.getHost.return_value = { - 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', - 'initiatorChapEnabled': True - } - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass' - } - - expected = [] - expected_model = {'provider_auth': None} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - model = self.driver._do_export(common, volume, connector) - - mock_client.assert_has_calls(expected) - self.assertEqual(expected_model, model) - - mock_client.reset_mock() - - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - mock_utils.return_value = 'random-pass' - mock_client.getHostVLUNs.return_value = [ - {'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0, - 'remoteName': 'iqn.1993-08.org.debian:01:222'} - ] - mock_client.getHost.return_value = { - 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', - 'initiatorChapEnabled': True - } - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass' - } - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.getHost('test-host'), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') - ] - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - model = self.driver._do_export(common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertEqual(expected_model, model) - - @mock.patch('cinder.volume.utils.generate_password') - def test_do_export_host_not_found(self, mock_utils): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - connector = {'host': 'test-host'} - mock_utils.return_value = "random-pass" - mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( - 'fake') - - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass' - } - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') - ] - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - model = self.driver._do_export(common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertEqual(expected_model, model) - - @mock.patch('cinder.volume.utils.generate_password') - def test_do_export_host_chap_disabled(self, mock_utils): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - connector = {'host': 'test-host'} - mock_utils.return_value = 'random-pass' - mock_client.getHostVLUNs.return_value = [ - {'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0, - 'remoteName': 'iqn.1993-08.org.debian:01:222'} - ] - mock_client.getHost.return_value = { - 'name': 'fake-host', - 'initiatorChapEnabled': False - } - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass' - } - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.getHost('test-host'), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') - ] - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - model = self.driver._do_export(common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertEqual(expected_model, model) - - @mock.patch('cinder.volume.utils.generate_password') - def test_do_export_no_active_vluns(self, mock_utils): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - connector = {'host': 'test-host'} - mock_utils.return_value = "random-pass" - mock_client.getHostVLUNs.return_value = [ - {'active': False, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 0, - 'remoteName': 'iqn.1993-08.org.debian:01:222'} - ] - mock_client.getHost.return_value = { - 'name': 'fake-host', - 'initiatorChapEnabled': True - } - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass' - } - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.getHost('test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') - ] - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - model = self.driver._do_export(common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertEqual(expected_model, model) - - @mock.patch('cinder.volume.utils.generate_password') - def test_do_export_vlun_missing_chap_credentials(self, mock_utils): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP 3PAR client - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - - volume = {'host': 'test-host@3pariscsi', - 'id': self.VOLUME_ID} - connector = {'host': 'test-host'} - mock_utils.return_value = 'random-pass' - - mock_client.getHost.return_value = { - 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', - 'initiatorChapEnabled': True} - - mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.getHost('test-host'), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')] - - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - # vlun has remoteName - mock_client.getHostVLUNs.return_value = [ - {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': 1, 'type': 3, - 'remoteName': 'iqn.1993-08.org.debian:01:222'}] - - model_with_remote_name = self.driver._do_export( - common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertDictEqual(expected_model, model_with_remote_name) - - # vlun does not has remoteName - mock_client.getHostVLUNs.return_value = [ - {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 1}] - - model_without_remote_name = self.driver._do_export( - common, volume, connector) - mock_client.assert_has_calls(expected) - self.assertDictEqual(expected_model, model_without_remote_name) - - @mock.patch('cinder.volume.utils.generate_password') - def test_create_export(self, mock_utils): - config = self.setup_configuration() - config.hpe3par_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=config) - mock_utils.return_value = 'random-pass' - volume = {'host': 'test-host@3pariscsi', - 'id': self.VOLUME_ID} - connector = {'host': 'test-host'} - mock_client.getHostVLUNs.return_value = [ - {'active': True, - 'volumeName': self.VOLUME_3PAR_NAME, - 'lun': None, 'type': 3, - 'remoteName': 'iqn.1993-08.org.debian:01:222'}] - mock_client.getHost.return_value = { - 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', - 'initiatorChapEnabled': True} - mock_client.getVolumeMetaData.return_value = { - 'value': 'random-pass'} - - expected = [ - mock.call.getHostVLUNs('test-host'), - mock.call.getHost('test-host'), - mock.call.getVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), - mock.call.setVolumeMetaData( - 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')] - expected_model = {'provider_auth': 'CHAP test-host random-pass'} - mock_create_client = self.mock_object(hpecommon.HPE3PARCommon, - '_create_client', - return_value=mock_client) - mock_create_client.return_value = mock_client - model = self.driver.create_export(None, volume, connector) - mock_client.assert_has_calls(expected) - self.assertDictEqual(expected_model, model) - - def test_initialize_iscsi_ports_with_iscsi_ip_and_port(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = self.setup_configuration() - conf.hpe3par_iscsi_ips = ["10.10.220.252:1234"] - mock_client = self.setup_driver(config=conf) - - mock_client.getPorts.return_value = PORTS_RET - expected = [mock.call.getPorts()] - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - common = self.driver._login() - self.driver.initialize_iscsi_ports(common) - mock_client.assert_has_calls(expected) - - def test_initialize_iscsi_ports_with_wrong_ip_format_configured(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - conf = self.setup_configuration() - conf.hpe3par_iscsi_ips = ["10.10.220.252:1234:4567"] - mock_client = self.setup_driver(config=conf) - - mock_client.getPorts.return_value = PORTS_RET - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - - common = self.driver._login() - self.assertRaises(exception.InvalidInput, - self.driver.initialize_iscsi_ports, - common) - - def test_ensure_export(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - - mock_client.getAllVolumeMetaData.return_value = { - 'total': 0, - 'members': [] - } - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - model = self.driver.ensure_export(None, volume) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') - ] - - expected_model = {'provider_auth': None} - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_model, model) - - mock_client.getAllVolumeMetaData.return_value = { - 'total': 2, - 'members': [ - { - 'creationTimeSec': 1406074222, - 'value': 'fake-host', - 'key': CHAP_USER_KEY, - 'creationTime8601': '2014-07-22T17:10:22-07:00' - }, - { - 'creationTimeSec': 1406074222, - 'value': 'random-pass', - 'key': CHAP_PASS_KEY, - 'creationTime8601': '2014-07-22T17:10:22-07:00' - } - ] - } - - model = self.driver.ensure_export(None, volume) - - expected = [ - mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), - mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') - ] - - expected_model = {'provider_auth': "CHAP fake-host random-pass"} - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_model, model) - - def test_ensure_export_missing_volume(self): - # setup_mock_client drive with default configuration - # and return the mock HTTP 3PAR client - mock_client = self.setup_driver() - - volume = {'host': 'test-host@3pariscsi', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - - mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound( - 'fake') - - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - model = self.driver.ensure_export(None, volume) - - expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')] - - expected_model = None - - mock_client.assert_has_calls( - self.standard_login + - expected + - self.standard_logout) - self.assertEqual(expected_model, model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_volume_settings_default_pool(self, _mock_volume_types): - _mock_volume_types.return_value = { - 'name': 'gold', - 'id': 'gold-id', - 'extra_specs': {}} - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - volume = {'host': 'test-host@3pariscsi#pool_foo', - 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} - pool = volume_utils.extract_host(volume['host'], 'pool') - model = common.get_volume_settings_from_type_id('gold-id', pool) - self.assertEqual('pool_foo', model['cpg']) - - def test_get_model_update(self): - mock_client = self.setup_driver() - with mock.patch.object(hpecommon.HPE3PARCommon, - '_create_client') as mock_create_client: - mock_create_client.return_value = mock_client - common = self.driver._login() - - model_update = common._get_model_update('xxx@yyy#zzz', 'CPG') - self.assertEqual({'host': 'xxx@yyy#CPG'}, model_update) - -VLUNS5_RET = ({'members': - [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, - 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'active': True}]}) - -PORTS_RET = ({'members': - [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, - 'protocol': 2, - 'IPAddr': '10.10.220.252', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D2', - 'type': 8}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'protocol': 2, - 'IPAddr': '10.10.220.253', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D6', - 'type': 8}]}) - -VLUNS1_RET = ({'members': - [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, - 'hostname': 'foo', 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'hostname': 'bar', 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'hostname': 'bar', 'active': True}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'hostname': 'bar', 'active': True}]}) - -PORTS1_RET = ({'members': - [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, - 'protocol': 2, - 'IPAddr': '10.10.120.252', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D2', - 'type': 8}, - {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, - 'protocol': 2, - 'IPAddr': '10.10.220.253', - 'linkState': 4, - 'device': [], - 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', - 'mode': 2, - 'HWAddr': '2C27D75375D6', - 'type': 8}, - {'portWWN': '20210002AC00383D', - 'protocol': 1, - 'linkState': 4, - 'mode': 2, - 'device': ['cage2'], - 'nodeWWN': '20210002AC00383D', - 'type': 2, - 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}) diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py b/cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py deleted file mode 100644 index 8714e7c75..000000000 --- a/cinder/tests/unit/volume/drivers/hpe/test_hpelefthand.py +++ /dev/null @@ -1,3354 +0,0 @@ -# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for OpenStack Cinder volume drivers.""" - -import copy -import json -import mock -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.volume.drivers.hpe \ - import fake_hpe_lefthand_client as hpelefthandclient -from cinder.volume.drivers.hpe import hpe_lefthand_iscsi -from cinder.volume import volume_types - -hpeexceptions = hpelefthandclient.hpeexceptions - -GOODNESS_FUNCTION = \ - "capabilities.capacity_utilization < 0.6? 100 : 25" -FILTER_FUNCTION = \ - "capabilities.total_volumes < 400 && capabilities.capacity_utilization" -HPELEFTHAND_SAN_SSH_CON_TIMEOUT = 44 -HPELEFTHAND_SAN_SSH_PRIVATE = 'foobar' -HPELEFTHAND_API_URL = 'http://fake.foo:8080/lhos' -HPELEFTHAND_API_URL2 = 'http://fake2.foo2:8080/lhos' -HPELEFTHAND_SSH_IP = 'fake.foo' -HPELEFTHAND_SSH_IP2 = 'fake2.foo2' -HPELEFTHAND_USERNAME = 'foo1' -HPELEFTHAND_PASSWORD = 'bar2' -HPELEFTHAND_SSH_PORT = 16022 -HPELEFTHAND_CLUSTER_NAME = 'CloudCluster1' -VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db' -FAKE_FAILOVER_HOST = 'fakefailover@foo#destfakepool' -REPLICATION_BACKEND_ID = 'target' - - -class HPELeftHandBaseDriver(object): - - cluster_id = 1 - - volume_name = "fakevolume" - volume_name_repl = "fakevolume_replicated" - volume_id = 1 - volume = { - 'name': volume_name, - 'display_name': 'Foo Volume', - 'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:' - 'group01:25366:fakev 0'), - 'id': volume_id, - 'provider_auth': None, - 'size': 1} - - volume_extend = { - 'name': volume_name, - 'display_name': 'Foo Volume', - 'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:' - 'group01:25366:fakev 0'), - 'id': volume_id, - 'provider_auth': None, - 'size': 5} - - volume_replicated = { - 'name': volume_name_repl, - 'display_name': 'Foo Volume', - 'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:' - 'group01:25366:fakev 0'), - 'id': volume_id, - 'provider_auth': None, - 'size': 1, - 'volume_type': 'replicated', - 'volume_type_id': VOLUME_TYPE_ID_REPLICATED, - 'replication_driver_data': ('{"location": "' + HPELEFTHAND_API_URL + - '"}')} - - repl_targets = [{'backend_id': 'target', - 'managed_backend_name': FAKE_FAILOVER_HOST, - 'hpelefthand_api_url': HPELEFTHAND_API_URL2, - 'hpelefthand_username': HPELEFTHAND_USERNAME, - 'hpelefthand_password': HPELEFTHAND_PASSWORD, - 'hpelefthand_clustername': HPELEFTHAND_CLUSTER_NAME, - 'hpelefthand_ssh_port': HPELEFTHAND_SSH_PORT, - 'ssh_conn_timeout': HPELEFTHAND_SAN_SSH_CON_TIMEOUT, - 'san_private_key': HPELEFTHAND_SAN_SSH_PRIVATE, - 'cluster_id': 6, - 'cluster_vip': '10.0.1.6'}] - - repl_targets_unmgd = [{'backend_id': 'target', - 'hpelefthand_api_url': HPELEFTHAND_API_URL2, - 'hpelefthand_username': HPELEFTHAND_USERNAME, - 'hpelefthand_password': HPELEFTHAND_PASSWORD, - 'hpelefthand_clustername': HPELEFTHAND_CLUSTER_NAME, - 'hpelefthand_ssh_port': HPELEFTHAND_SSH_PORT, - 'ssh_conn_timeout': HPELEFTHAND_SAN_SSH_CON_TIMEOUT, - 'san_private_key': HPELEFTHAND_SAN_SSH_PRIVATE, - 'cluster_id': 6, - 'cluster_vip': '10.0.1.6'}] - - list_rep_targets = [{'backend_id': REPLICATION_BACKEND_ID}] - - serverName = 'fakehost' - server_id = 0 - server_uri = '/lhos/servers/0' - - snapshot_name = "fakeshapshot" - snapshot_id = 3 - snapshot = { - 'id': snapshot_id, - 'name': snapshot_name, - 'display_name': 'fakesnap', - 'volume_name': volume_name, - 'volume': volume, - 'volume_size': 1} - - cloned_volume_name = "clone_volume" - cloned_volume = {'name': cloned_volume_name, - 'size': 1} - cloned_volume_extend = {'name': cloned_volume_name, - 'size': 5} - - cloned_snapshot_name = "clonedshapshot" - cloned_snapshot_id = 5 - cloned_snapshot = { - 'name': cloned_snapshot_name, - 'volume_name': volume_name} - - volume_type_id = 4 - init_iqn = 'iqn.1993-08.org.debian:01:222' - - volume_type = {'name': 'gold', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'hpelh:provisioning': 'thin', - 'hpelh:ao': 'true', - 'hpelh:data_pl': 'r-0'}, - 'deleted_at': None, - 'id': 'gold'} - old_volume_type = {'name': 'gold', - 'deleted': False, - 'updated_at': None, - 'extra_specs': {'hplh:provisioning': 'thin', - 'hplh:ao': 'true', - 'hplh:data_pl': 'r-0'}, - 'deleted_at': None, - 'id': 'gold'} - - connector = { - 'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:222', - 'host': serverName} - - driver_startup_call_stack = [ - mock.call.login('foo1', 'bar2'), - mock.call.getClusterByName('CloudCluster1'), - mock.call.setSSHOptions( - HPELEFTHAND_SSH_IP, - HPELEFTHAND_USERNAME, - HPELEFTHAND_PASSWORD, - missing_key_policy='AutoAddPolicy', - privatekey=HPELEFTHAND_SAN_SSH_PRIVATE, - known_hosts_file=mock.ANY, - port=HPELEFTHAND_SSH_PORT, - conn_timeout=HPELEFTHAND_SAN_SSH_CON_TIMEOUT), - ] - - -class TestHPELeftHandISCSIDriver(HPELeftHandBaseDriver, test.TestCase): - - CONSIS_GROUP_ID = '3470cc4c-63b3-4c7a-8120-8a0693b45838' - GROUPSNAPSHOT_ID = '5351d914-6c90-43e7-9a8e-7e84610927da' - - class fake_group_object(object): - volume_type_ids = '371c64d5-b92a-488c-bc14-1e63cef40e08' - name = 'group_name' - groupsnapshot_id = None - id = '3470cc4c-63b3-4c7a-8120-8a0693b45838' - description = 'group' - - class fake_groupsnapshot_object(object): - group_id = '3470cc4c-63b3-4c7a-8120-8a0693b45838' - description = 'groupsnapshot' - id = '5351d914-6c90-43e7-9a8e-7e84610927da' - readOnly = False - - def default_mock_conf(self): - - mock_conf = mock.MagicMock() - mock_conf.hpelefthand_api_url = HPELEFTHAND_API_URL - mock_conf.hpelefthand_username = HPELEFTHAND_USERNAME - mock_conf.hpelefthand_password = HPELEFTHAND_PASSWORD - mock_conf.hpelefthand_ssh_port = HPELEFTHAND_SSH_PORT - mock_conf.ssh_conn_timeout = HPELEFTHAND_SAN_SSH_CON_TIMEOUT - mock_conf.san_private_key = HPELEFTHAND_SAN_SSH_PRIVATE - mock_conf.hpelefthand_iscsi_chap_enabled = False - mock_conf.hpelefthand_debug = False - mock_conf.hpelefthand_clustername = "CloudCluster1" - mock_conf.goodness_function = GOODNESS_FUNCTION - mock_conf.filter_function = FILTER_FUNCTION - mock_conf.reserved_percentage = 25 - - def safe_get(attr): - try: - return mock_conf.__getattribute__(attr) - except AttributeError: - return None - mock_conf.safe_get = safe_get - - return mock_conf - - @mock.patch('hpelefthandclient.client.HPELeftHandClient', spec=True) - def setup_driver(self, _mock_client, config=None): - if config is None: - config = self.default_mock_conf() - - _mock_client.return_value.getClusterByName.return_value = { - 'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]} - _mock_client.return_value.getCluster.return_value = { - 'spaceTotal': units.Gi * 500, - 'spaceAvailable': units.Gi * 250} - _mock_client.return_value.getApiVersion.return_value = '1.2' - _mock_client.return_value.getIPFromCluster.return_value = '1.1.1.1' - self.driver = hpe_lefthand_iscsi.HPELeftHandISCSIDriver( - configuration=config) - self.driver.do_setup(None) - self.cluster_name = config.hpelefthand_clustername - return _mock_client.return_value - - @mock.patch('hpelefthandclient.version', "1.0.0") - def test_unsupported_client_version(self): - - self.assertRaises(exception.InvalidInput, - self.setup_driver) - - @mock.patch('hpelefthandclient.version', "3.0.0") - def test_supported_client_version(self): - - self.setup_driver() - - def test__login(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - self.driver._login() - expected = self.driver_startup_call_stack - mock_client.assert_has_calls(expected) - - # mock HTTPNotFound - mock_client.login.side_effect = ( - hpeexceptions.HTTPNotFound()) - # ensure the raised exception is a cinder exception - self.assertRaises(exception.DriverNotInitialized, - self.driver._login) - - # mock other HTTP exception - mock_client.login.side_effect = ( - hpeexceptions.HTTPServerError()) - # ensure the raised exception is a cinder exception - self.assertRaises(exception.DriverNotInitialized, - self.driver._login) - - def test_get_version_string(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - ver_string = self.driver.get_version_string() - self.assertEqual(self.driver.VERSION, ver_string.split()[1]) - - def test_check_for_setup_error(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # test for latest version - mock_client.getApiVersion.return_value = ('3.0.0') - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - self.driver.check_for_setup_error() - - expected = self.driver_startup_call_stack + [ - mock.call.getApiVersion(), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - # test for older version - mock_client.reset_mock() - mock_client.getApiVersion.return_value = ('1.0.0') - - # execute driver - self.driver.check_for_setup_error() - mock_client.assert_has_calls(expected) - - def test_create_volume(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - volume_info = self.driver.create_volume(self.volume) - - self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', - volume_info['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.createVolume( - 'fakevolume', - 1, - units.Gi, - {'isThinProvisioned': True, - 'clusterName': 'CloudCluster1'}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - # mock HTTPServerError - mock_client.createVolume.side_effect =\ - hpeexceptions.HTTPServerError() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @mock.patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': {'hpelh:provisioning': 'full'}}) - def test_create_volume_with_es(self, _mock_volume_type): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - volume_with_vt = self.volume - volume_with_vt['volume_type_id'] = 1 - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_volume - volume_info = self.driver.create_volume(volume_with_vt) - - self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', - volume_info['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.createVolume( - 'fakevolume', - 1, - units.Gi, - {'isThinProvisioned': False, - 'clusterName': 'CloudCluster1'}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - @mock.patch.object( - volume_types, - 'get_volume_type', - return_value={'extra_specs': (HPELeftHandBaseDriver. - old_volume_type['extra_specs'])}) - def test_create_volume_old_volume_type(self, _mock_volume_type): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - volume_info = self.driver.create_volume(self.volume) - - self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', - volume_info['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.createVolume( - 'fakevolume', - 1, - units.Gi, - {'isThinProvisioned': True, - 'clusterName': 'CloudCluster1'}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - # mock HTTPServerError - mock_client.createVolume.side_effect =\ - hpeexceptions.HTTPServerError() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - def test_delete_volume(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # mock return value of getVolumeByName - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute delete_volume - del_volume = self.volume - del_volume['volume_type_id'] = None - self.driver.delete_volume(del_volume) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.deleteVolume(self.volume_id), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - # mock HTTPNotFound (volume not found) - mock_client.getVolumeByName.side_effect =\ - hpeexceptions.HTTPNotFound() - # no exception should escape method - self.driver.delete_volume(del_volume) - - # mock HTTPConflict - mock_client.deleteVolume.side_effect = hpeexceptions.HTTPConflict() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, {}) - - def test_extend_volume(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # mock return value of getVolumeByName - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute extend_volume - self.driver.extend_volume(self.volume, 2) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.modifyVolume(1, {'size': 2 * units.Gi}), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - # mock HTTPServerError (array failure) - mock_client.modifyVolume.side_effect =\ - hpeexceptions.HTTPServerError() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, self.volume, 2) - - def test_initialize_connection(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # mock return value of getVolumeByName - mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() - mock_client.createServer.return_value = {'id': self.server_id} - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None - } - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute initialize_connection - result = self.driver.initialize_connection( - self.volume, - self.connector) - - # validate - self.assertEqual('iscsi', result['driver_volume_type']) - self.assertFalse(result['data']['target_discovered']) - self.assertEqual(self.volume_id, result['data']['volume_id']) - self.assertNotIn('auth_method', result['data']) - - expected = self.driver_startup_call_stack + [ - mock.call.getServerByName('fakehost'), - mock.call.createServer - ( - 'fakehost', - 'iqn.1993-08.org.debian:01:222', - None - ), - mock.call.getVolumeByName('fakevolume'), - mock.call.addServerAccess(1, 0), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - # mock HTTPServerError (array failure) - mock_client.createServer.side_effect =\ - hpeexceptions.HTTPServerError() - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.initialize_connection, self.volume, self.connector) - - def test_initialize_connection_session_exists(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # mock return value of getVolumeByName - mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() - mock_client.createServer.return_value = {'id': self.server_id} - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': [{'server': {'uri': self.server_uri}}] - } - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute initialize_connection - result = self.driver.initialize_connection( - self.volume, - self.connector) - - # validate - self.assertEqual('iscsi', result['driver_volume_type']) - self.assertFalse(result['data']['target_discovered']) - self.assertEqual(self.volume_id, result['data']['volume_id']) - self.assertNotIn('auth_method', result['data']) - - expected = self.driver_startup_call_stack + [ - mock.call.getServerByName('fakehost'), - mock.call.createServer - ( - 'fakehost', - 'iqn.1993-08.org.debian:01:222', - None - ), - mock.call.getVolumeByName('fakevolume'), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_initialize_connection_with_chaps(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - # mock return value of getVolumeByName - mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound() - mock_client.createServer.return_value = { - 'id': self.server_id, - 'chapAuthenticationRequired': True, - 'chapTargetSecret': 'dont_tell'} - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None - } - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute initialize_connection - result = self.driver.initialize_connection( - self.volume, - self.connector) - - # validate - self.assertEqual('iscsi', result['driver_volume_type']) - self.assertFalse(result['data']['target_discovered']) - self.assertEqual(self.volume_id, result['data']['volume_id']) - self.assertEqual('CHAP', result['data']['auth_method']) - - expected = self.driver_startup_call_stack + [ - mock.call.getServerByName('fakehost'), - mock.call.createServer - ( - 'fakehost', - 'iqn.1993-08.org.debian:01:222', - None - ), - mock.call.getVolumeByName('fakevolume'), - mock.call.addServerAccess(1, 0), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - @mock.patch('cinder.volume.utils.generate_password') - def test_initialize_connection_with_chap_disabled(self, mock_utils): - # setup_mock_client drive with CHAP disabled configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_utils.return_value = 'random-pass' - mock_client.getServerByName.return_value = { - 'id': self.server_id, - 'name': self.serverName, - 'chapTargetSecret': 'random-pass'} - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None - } - expected = [ - mock.call.getServerByName('fakehost'), - mock.call.getVolumeByName('fakevolume'), - mock.call.addServerAccess(1, 0), - mock.call.logout()] - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # test initialize_connection with chap disabled - # and chapTargetSecret - self.driver.initialize_connection( - self.volume, - self.connector) - mock_client.assert_has_calls(expected) - - @mock.patch('cinder.volume.utils.generate_password') - def test_initialize_connection_with_chap_enabled(self, mock_utils): - # setup_mock_client drive with CHAP enabled configuration - # and return the mock HTTP LeftHand client - conf = self.default_mock_conf() - conf.hpelefthand_iscsi_chap_enabled = True - mock_client = self.setup_driver(config=conf) - mock_client.getServerByName.return_value = { - 'id': self.server_id, - 'name': self.serverName, - 'chapTargetSecret': None} - - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None} - expected = [ - mock.call.getServerByName('fakehost'), - mock.call.getVolumeByName('fakevolume'), - mock.call.addServerAccess(1, 0), - mock.call.logout()] - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # test initialize_connection with chap enabled - # and chapTargetSecret is None - result = self.driver.initialize_connection( - self.volume, - self.connector) - mock_client.assert_has_calls(expected) - - # test initialize_connection with chapAuthenticationRequired - mock_client.getServerByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - mock_client.createServer.return_value = { - 'id': self.server_id, - 'chapAuthenticationRequired': True, - 'chapTargetSecret': 'random-pass'} - result = self.driver.initialize_connection( - self.volume, - self.connector) - self.assertEqual('random-pass', result['data']['auth_password']) - - def test_terminate_connection(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getServerByName.return_value = { - 'id': self.server_id, - 'name': self.serverName} - mock_client.findServerVolumes.return_value = [{'id': self.volume_id}] - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute terminate_connection - self.driver.terminate_connection(self.volume, self.connector) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.getServerByName('fakehost'), - mock.call.findServerVolumes('fakehost'), - mock.call.removeServerAccess(1, 0), - mock.call.deleteServer(0)] - - # validate call chain - mock_client.assert_has_calls(expected) - - mock_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.terminate_connection, - self.volume, - self.connector) - - def test_terminate_connection_from_primary_when_failed_over(self): - # setup drive with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getServerByName.side_effect = hpeexceptions.HTTPNotFound( - "The host does not exist.") - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.driver._active_backend_id = 'some_id' - # execute terminate_connection - self.driver.terminate_connection(self.volume, self.connector) - - # When the volume is still attached to the primary array after a - # fail-over, there should be no call to delete the server. - # We can assert this method is not called to make sure - # the proper exceptions are being raised. - self.assertEqual(0, mock_client.removeServerAccess.call_count) - - def test_terminate_connection_multiple_volumes_on_server(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getServerByName.return_value = { - 'id': self.server_id, - 'name': self.serverName} - mock_client.findServerVolumes.return_value = [ - {'id': self.volume_id}, - {'id': 99999}] - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute terminate_connection - self.driver.terminate_connection(self.volume, self.connector) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.getServerByName('fakehost'), - mock.call.findServerVolumes('fakehost'), - mock.call.removeServerAccess(1, 0)] - - # validate call chain - mock_client.assert_has_calls(expected) - self.assertFalse(mock_client.deleteServer.called) - - mock_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.terminate_connection, - self.volume, - self.connector) - - def test_create_snapshot(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_snapshot - self.driver.create_snapshot(self.snapshot) - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.createSnapshot( - 'fakeshapshot', - 1, - {'inheritAccess': True}), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - # mock HTTPServerError (array failure) - mock_client.getVolumeByName.side_effect =\ - hpeexceptions.HTTPNotFound() - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_snapshot, self.snapshot) - - def test_delete_snapshot(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute delete_snapshot - self.driver.delete_snapshot(self.snapshot) - - expected = self.driver_startup_call_stack + [ - mock.call.getSnapshotByName('fakeshapshot'), - mock.call.deleteSnapshot(3), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - mock_client.getSnapshotByName.side_effect =\ - hpeexceptions.HTTPNotFound() - # no exception is thrown, just error msg is logged - self.driver.delete_snapshot(self.snapshot) - - # mock HTTPServerError (array failure) - ex = hpeexceptions.HTTPServerError({'message': 'Some message.'}) - mock_client.getSnapshotByName.side_effect = ex - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.delete_snapshot, - self.snapshot) - - # mock HTTPServerError because the snap is in use - ex = hpeexceptions.HTTPServerError({ - 'message': - 'Hey, dude cannot be deleted because it is a clone point' - ' duh.'}) - mock_client.getSnapshotByName.side_effect = ex - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.SnapshotIsBusy, - self.driver.delete_snapshot, - self.snapshot) - - # Exception other than HTTPServerError and HTTPNotFound - ex = hpeexceptions.HTTPBadRequest({ - 'message': 'Bad request'}) - mock_client.getSnapshotByName.side_effect = ex - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.delete_snapshot, - self.snapshot) - - def test_create_volume_from_snapshot(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} - mock_client.cloneSnapshot.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_volume_from_snapshot - model_update = self.driver.create_volume_from_snapshot( - self.volume, self.snapshot) - - expected_iqn = 'iqn.1993-08.org.debian:01:222 0' - expected_location = "10.0.1.6:3260,1 %s" % expected_iqn - self.assertEqual(expected_location, - model_update['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.getSnapshotByName('fakeshapshot'), - mock.call.cloneSnapshot('fakevolume', 3), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_create_volume_from_snapshot_extend(self): - - # setup drive with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.cloneSnapshot.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_volume_from_snapshot - model_update = self.driver.create_volume_from_snapshot( - self.volume_extend, self.snapshot) - - expected_iqn = 'iqn.1993-08.org.debian:01:222 0' - expected_location = "10.0.1.6:3260,1 %s" % expected_iqn - self.assertEqual(expected_location, - model_update['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.getSnapshotByName('fakeshapshot'), - mock.call.cloneSnapshot('fakevolume', 3), - mock.call.login('foo1', 'bar2'), - mock.call.getClusterByName('CloudCluster1'), - mock.call.setSSHOptions( - HPELEFTHAND_SSH_IP, - HPELEFTHAND_USERNAME, - HPELEFTHAND_PASSWORD, - missing_key_policy='AutoAddPolicy', - privatekey=HPELEFTHAND_SAN_SSH_PRIVATE, - known_hosts_file=mock.ANY, - port=HPELEFTHAND_SSH_PORT, - conn_timeout=HPELEFTHAND_SAN_SSH_CON_TIMEOUT), - mock.call.getVolumeByName('fakevolume'), - mock.call.modifyVolume(self.volume_id, { - 'size': self.volume_extend['size'] * units.Gi}), - mock.call.logout(), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_from_snapshot_with_replication( - self, - mock_get_volume_type): - - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id} - mock_client.cloneSnapshot.return_value = { - 'iscsiIqn': self.connector['initiator']} - - mock_client.doesRemoteSnapshotScheduleExist.return_value = True - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - volume = self.volume.copy() - volume['volume_type_id'] = self.volume_type_id - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_volume_from_snapshot for volume replication - # enabled, with snapshot scheduled - model_update = self.driver.create_volume_from_snapshot( - volume, self.snapshot) - - expected_iqn = 'iqn.1993-08.org.debian:01:222 0' - expected_location = "10.0.1.6:3260,1 %s" % expected_iqn - self.assertEqual(expected_location, - model_update['provider_location']) - self.assertEqual('enabled', model_update['replication_status']) - # expected calls - expected = self.driver_startup_call_stack + [ - mock.call.getSnapshotByName('fakeshapshot'), - mock.call.cloneSnapshot(self.volume['name'], self.snapshot_id), - mock.call.doesRemoteSnapshotScheduleExist( - 'fakevolume_SCHED_Pri'), - mock.call.startRemoteSnapshotSchedule('fakevolume_SCHED_Pri'), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_create_volume_from_snapshot_exception(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getSnapshotByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # testing when getSnapshotByName returns an exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.volume, self.snapshot) - - # expected calls - expected = self.driver_startup_call_stack + [ - mock.call.getSnapshotByName(self.snapshot_name), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_create_cloned_volume(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.cloneVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_cloned_volume - model_update = self.driver.create_cloned_volume( - self.cloned_volume, self.volume) - - expected_iqn = 'iqn.1993-08.org.debian:01:222 0' - expected_location = "10.0.1.6:3260,1 %s" % expected_iqn - self.assertEqual(expected_location, - model_update['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.cloneVolume('clone_volume', 1), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_cloned_volume_with_replication(self, mock_get_volume_type): - - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.cloneVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.doesRemoteSnapshotScheduleExist.return_value = False - - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - cloned_volume = self.cloned_volume.copy() - cloned_volume['volume_type_id'] = self.volume_type_id - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - # execute create_cloned_volume - model_update = self.driver.create_cloned_volume( - cloned_volume, self.volume) - self.assertEqual('enabled', - model_update['replication_status']) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.cloneVolume('clone_volume', 1), - mock.call.doesRemoteSnapshotScheduleExist( - 'clone_volume_SCHED_Pri'), - mock.call.createRemoteSnapshotSchedule( - 'clone_volume', 'clone_volume_SCHED', 1800, - '1970-01-01T00:00:00Z', 5, 'CloudCluster1', 5, - 'clone_volume', '1.1.1.1', 'foo1', 'bar2'), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - expected_calls_replica_client = [ - mock.call.createVolume('clone_volume', 1, - cloned_volume['size'] * units.Gi, - None), - mock.call.makeVolumeRemote('clone_volume', - 'clone_volume_SS'), - mock.call.getIPFromCluster('CloudCluster1')] - - mock_replicated_client.assert_has_calls( - expected_calls_replica_client) - - def test_create_cloned_volume_exception(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.cloneVolume.side_effect = ( - hpeexceptions.HTTPServerError()) - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.cloned_volume, self.volume) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.cloneVolume('clone_volume', 1), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_create_cloned_volume_extend(self): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.cloneVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute create_cloned_volume with extend - model_update = self.driver.create_cloned_volume( - self.cloned_volume_extend, self.volume) - - expected_iqn = 'iqn.1993-08.org.debian:01:222 0' - expected_location = "10.0.1.6:3260,1 %s" % expected_iqn - self.assertEqual(expected_location, - model_update['provider_location']) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.cloneVolume('clone_volume', 1), - mock.call.login('foo1', 'bar2'), - mock.call.getClusterByName('CloudCluster1'), - mock.call.setSSHOptions( - HPELEFTHAND_SSH_IP, - HPELEFTHAND_USERNAME, - HPELEFTHAND_PASSWORD, - missing_key_policy='AutoAddPolicy', - privatekey=HPELEFTHAND_SAN_SSH_PRIVATE, - known_hosts_file=mock.ANY, - port=HPELEFTHAND_SSH_PORT, - conn_timeout=HPELEFTHAND_SAN_SSH_CON_TIMEOUT), - mock.call.getVolumeByName('clone_volume'), - mock.call.modifyVolume(self.volume_id, {'size': 5 * units.Gi}), - mock.call.logout(), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_extra_spec_mapping(self, _mock_get_volume_type): - - # setup driver with default configuration - self.setup_driver() - - # 2 extra specs we don't care about, and - # 1 that will get mapped - _mock_get_volume_type.return_value = { - 'extra_specs': { - 'foo:bar': 'fake', - 'bar:foo': 1234, - 'hpelh:provisioning': 'full'}} - - volume_with_vt = self.volume - volume_with_vt['volume_type_id'] = self.volume_type_id - - # get the extra specs of interest from this volume's volume type - volume_extra_specs = self.driver._get_volume_extra_specs( - volume_with_vt) - extra_specs = self.driver._get_lh_extra_specs( - volume_extra_specs, - hpe_lefthand_iscsi.extra_specs_key_map.keys()) - - # map the extra specs key/value pairs to key/value pairs - # used as optional configuration values by the LeftHand backend - optional = self.driver._map_extra_specs(extra_specs) - - self.assertDictEqual({'isThinProvisioned': False}, optional) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_extra_spec_mapping_invalid_value(self, _mock_get_volume_type): - - # setup driver with default configuration - self.setup_driver() - - volume_with_vt = self.volume - volume_with_vt['volume_type_id'] = self.volume_type_id - - _mock_get_volume_type.return_value = { - 'extra_specs': { - # r-07 is an invalid value for hpelh:ao - 'hpelh:data_pl': 'r-07', - 'hpelh:ao': 'true'}} - - # get the extra specs of interest from this volume's volume type - volume_extra_specs = self.driver._get_volume_extra_specs( - volume_with_vt) - extra_specs = self.driver._get_lh_extra_specs( - volume_extra_specs, - hpe_lefthand_iscsi.extra_specs_key_map.keys()) - - # map the extra specs key/value pairs to key/value pairs - # used as optional configuration values by the LeftHand backend - optional = self.driver._map_extra_specs(extra_specs) - - # {'hpelh:ao': 'true'} should map to - # {'isAdaptiveOptimizationEnabled': True} - # without hpelh:data_pl since r-07 is an invalid value - self.assertDictEqual({'isAdaptiveOptimizationEnabled': True}, optional) - - def test_retype_with_no_LH_extra_specs(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - ctxt = context.get_admin_context() - - host = {'host': self.serverName} - key_specs_old = {'foo': False, 'bar': 2, 'error': True} - key_specs_new = {'foo': True, 'bar': 5, 'error': False} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - volume = dict.copy(self.volume) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.driver.retype(ctxt, volume, new_type, diff, host) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_retype_with_only_LH_extra_specs(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - ctxt = context.get_admin_context() - - host = {'host': self.serverName} - key_specs_old = {'hpelh:provisioning': 'thin'} - key_specs_new = {'hpelh:provisioning': 'full', 'hpelh:ao': 'true'} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - volume = dict.copy(self.volume) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.driver.retype(ctxt, volume, new_type, diff, host) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.modifyVolume( - 1, { - 'isThinProvisioned': False, - 'isAdaptiveOptimizationEnabled': True}), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_retype_with_both_extra_specs(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - ctxt = context.get_admin_context() - - host = {'host': self.serverName} - key_specs_old = {'hpelh:provisioning': 'full', 'foo': 'bar'} - key_specs_new = {'hpelh:provisioning': 'thin', 'foo': 'foobar'} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - volume = dict.copy(self.volume) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.driver.retype(ctxt, volume, new_type, diff, host) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.modifyVolume(1, {'isThinProvisioned': True}), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_retype_same_extra_specs(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - ctxt = context.get_admin_context() - - host = {'host': self.serverName} - key_specs_old = {'hpelh:provisioning': 'full', 'hpelh:ao': 'true'} - key_specs_new = {'hpelh:provisioning': 'full', 'hpelh:ao': 'false'} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - volume = dict.copy(self.volume) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.driver.retype(ctxt, volume, new_type, diff, host) - - expected = self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.modifyVolume( - 1, - {'isAdaptiveOptimizationEnabled': False}), - mock.call.logout()] - - # validate call chain - mock_client.assert_has_calls(expected) - - def test_migrate_no_location(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - host = {'host': self.serverName, 'capabilities': {}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertFalse(migrated) - - mock_client.assert_has_calls([]) - self.assertEqual(0, len(mock_client.method_calls)) - - def test_migrate_incorrect_vip(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.10", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id} - - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertFalse(migrated) - - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - # and nothing else - self.assertEqual( - len(expected), - len(mock_client.method_calls)) - - def test_migrate_with_location(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.111", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id} - - mock_client.getVolumeByName.return_value = {'id': self.volume_id, - 'iscsiSessions': None} - mock_client.getVolume.return_value = {'snapshots': { - 'resource': None}} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertTrue(migrated) - - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] + self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.getVolume( - 1, - 'fields=snapshots,snapshots[resource[members[name]]]'), - mock.call.modifyVolume(1, {'clusterName': 'New_CloudCluster'}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - # and nothing else - self.assertEqual( - len(expected), - len(mock_client.method_calls)) - - def test_migrate_with_Snapshots(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.111", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id} - - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None} - mock_client.getVolume.return_value = {'snapshots': { - 'resource': 'snapfoo'}} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertFalse(migrated) - - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] + self.driver_startup_call_stack + [ - mock.call.getVolumeByName('fakevolume'), - mock.call.getVolume( - 1, - 'fields=snapshots,snapshots[resource[members[name]]]'), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - # and nothing else - self.assertEqual( - len(expected), - len(mock_client.method_calls)) - - def test_migrate_volume_error_diff_backends(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - fake_driver_loc = 'FakeDriver %(cluster)s %(vip)s' - location = (fake_driver_loc % {'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - host = {'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume(None, - self.volume, - host) - self.assertFalse(migrated) - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_migrate_volume_error_diff_management_group(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - location = (self.driver.DRIVER_LOCATION % {'cluster': 'FakeCluster', - 'vip': '10.10.10.112'}) - host = {'host': self.serverName, - 'capabilities': {'location_info': location}} - mock_client.getClusterByName.return_value = { - 'id': self.cluster_id, - 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6', - 'ipV4NetMask': '255.255.240.0'}] - } - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume(None, - self.volume, - host) - self.assertFalse(migrated) - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('FakeCluster'), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_migrate_volume_exception_diff_management_group(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - mock_client.getClusterByName.side_effect = [{ - 'id': self.cluster_id, - 'virtualIPAddresses': [{ - 'ipV4Address': '10.0.1.6', - 'ipV4NetMask': '255.255.240.0'}]}, - hpeexceptions.HTTPNotFound] - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume(None, - self.volume, - host) - self.assertFalse(migrated) - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_migrate_volume_error_exported_volume(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.111", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id - } - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': [{'server': {'uri': self.server_uri}}] - } - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume(None, - self.volume, - host) - self.assertFalse(migrated) - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] + self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume['name']), - mock.call.logout()] - mock_client.assert_has_calls(expected) - - def test_migrate_volume_error_volume_not_exist(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.111", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id} - - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None} - mock_client.getVolume.return_value = {'snapshots': { - 'resource': 'snapfoo'}} - - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertFalse(migrated) - - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] + self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume['name']), - mock.call.getVolume( - self.volume['id'], - 'fields=snapshots,snapshots[resource[members[name]]]'), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - def test_migrate_volume_exception(self): - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - mock_client.getClusterByName.return_value = { - "virtualIPAddresses": [{ - "ipV4Address": "10.10.10.111", - "ipV4NetMask": "255.255.240.0"}], - "id": self.cluster_id} - - mock_client.getVolumeByName.return_value = { - 'id': self.volume_id, - 'iscsiSessions': None} - mock_client.getVolume.side_effect = hpeexceptions.HTTPServerError() - - location = (self.driver.DRIVER_LOCATION % { - 'cluster': 'New_CloudCluster', - 'vip': '10.10.10.111'}) - host = { - 'host': self.serverName, - 'capabilities': {'location_info': location}} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - # Testing for any other HTTPServerError - (migrated, update) = self.driver.migrate_volume( - None, - self.volume, - host) - self.assertFalse(migrated) - - expected = self.driver_startup_call_stack + [ - mock.call.getClusterByName('New_CloudCluster'), - mock.call.logout()] + self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume['name']), - mock.call.getVolume( - self.volume['id'], - 'fields=snapshots,snapshots[resource[members[name]]]'), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - def test_update_migrated_volume(self): - mock_client = self.setup_driver() - volume_id = 'fake_vol_id' - clone_id = 'fake_clone_id' - fake_old_volume = {'id': volume_id} - provider_location = 'foo' - fake_new_volume = {'id': clone_id, - '_name_id': clone_id, - 'provider_location': provider_location} - original_volume_status = 'available' - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - actual_update = self.driver.update_migrated_volume( - context.get_admin_context(), fake_old_volume, - fake_new_volume, original_volume_status) - - expected_update = {'_name_id': None, - 'provider_location': None} - self.assertEqual(expected_update, actual_update) - - def test_update_migrated_volume_attached(self): - mock_client = self.setup_driver() - volume_id = 'fake_vol_id' - clone_id = 'fake_clone_id' - fake_old_volume = {'id': volume_id} - provider_location = 'foo' - fake_new_volume = {'id': clone_id, - '_name_id': clone_id, - 'provider_location': provider_location} - original_volume_status = 'in-use' - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - actual_update = self.driver.update_migrated_volume( - context.get_admin_context(), fake_old_volume, - fake_new_volume, original_volume_status) - - expected_update = {'_name_id': fake_new_volume['_name_id'], - 'provider_location': provider_location} - self.assertEqual(expected_update, actual_update) - - def test_update_migrated_volume_failed_to_rename(self): - mock_client = self.setup_driver() - volume_id = 'fake_vol_id' - clone_id = 'fake_clone_id' - fake_old_volume = {'id': volume_id} - provider_location = 'foo' - fake_new_volume = {'id': clone_id, - '_name_id': clone_id, - 'provider_location': provider_location} - original_volume_status = 'available' - # mock HTTPServerError (array failure) - mock_client.modifyVolume.side_effect =\ - hpeexceptions.HTTPServerError() - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - actual_update = self.driver.update_migrated_volume( - context.get_admin_context(), fake_old_volume, - fake_new_volume, original_volume_status) - - expected_update = {'_name_id': clone_id, - 'provider_location': provider_location} - self.assertEqual(expected_update, actual_update) - - @mock.patch.object(volume_types, 'get_volume_type', - return_value={'extra_specs': {'hpelh:ao': 'true'}}) - def test_create_volume_with_ao_true(self, _mock_volume_type): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - volume_with_vt = self.volume - volume_with_vt['volume_type_id'] = 1 - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - volume_info = self.driver.create_volume(volume_with_vt) - - self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', - volume_info['provider_location']) - - # make sure createVolume is called without - # isAdaptiveOptimizationEnabled == true - expected = self.driver_startup_call_stack + [ - mock.call.createVolume( - 'fakevolume', - 1, - units.Gi, - {'isThinProvisioned': True, - 'clusterName': 'CloudCluster1'}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type', - return_value={'extra_specs': {'hpelh:ao': 'false'}}) - def test_create_volume_with_ao_false(self, _mock_volume_type): - - # setup driver with default configuration - # and return the mock HTTP LeftHand client - mock_client = self.setup_driver() - - volume_with_vt = self.volume - volume_with_vt['volume_type_id'] = 1 - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - volume_info = self.driver.create_volume(volume_with_vt) - - self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0', - volume_info['provider_location']) - - # make sure createVolume is called with - # isAdaptiveOptimizationEnabled == false - expected = self.driver_startup_call_stack + [ - mock.call.createVolume( - 'fakevolume', - 1, - units.Gi, - {'isThinProvisioned': True, - 'clusterName': 'CloudCluster1', - 'isAdaptiveOptimizationEnabled': False}), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - def test_get_existing_volume_ref_name(self): - self.setup_driver() - - existing_ref = {'source-name': self.volume_name} - result = self.driver._get_existing_volume_ref_name( - existing_ref) - self.assertEqual(self.volume_name, result) - - existing_ref = {'bad-key': 'foo'} - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver._get_existing_volume_ref_name, - existing_ref) - - def test_manage_existing(self): - mock_client = self.setup_driver() - - self.driver.api_version = "1.1" - - volume = {'display_name': 'Foo Volume', - 'volume_type': None, - 'volume_type_id': None, - 'id': '12345'} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - existing_ref = {'source-name': self.volume_name} - - expected_obj = {'display_name': 'Foo Volume'} - - obj = self.driver.manage_existing(volume, existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume_name), - mock.call.logout()] + - self.driver_startup_call_stack + [ - mock.call.modifyVolume(self.volume_id, - {'name': 'volume-12345'}), - mock.call.logout()]) - self.assertEqual(expected_obj, obj) - - def test_manage_existing_with_non_existing_virtual_volume(self): - mock_client = self.setup_driver() - self.driver.api_version = "1.1" - existing_ref = {'source-name': self.volume_name} - mock_client.getVolumeByName.side_effect = hpeexceptions.HTTPNotFound - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing, - self.volume, - existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume_name), - mock.call.logout()]) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing_retype(self, _mock_volume_types): - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'gold', - 'id': 'gold-id', - 'extra_specs': { - 'hpelh:provisioning': 'thin', - 'hpelh:ao': 'true', - 'hpelh:data_pl': 'r-0', - 'volume_type': self.volume_type}} - - self.driver.api_version = "1.1" - - volume = {'display_name': 'Foo Volume', - 'host': 'stack@lefthand#lefthand', - 'volume_type': 'gold', - 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '12345'} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - existing_ref = {'source-name': self.volume_name} - - expected_obj = {'display_name': 'Foo Volume'} - - obj = self.driver.manage_existing(volume, existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume_name), - mock.call.logout()] + - self.driver_startup_call_stack + [ - mock.call.modifyVolume(self.volume_id, - {'name': 'volume-12345'}), - mock.call.logout()]) - self.assertEqual(expected_obj, obj) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_manage_existing_retype_exception(self, _mock_volume_types): - mock_client = self.setup_driver() - - _mock_volume_types.return_value = { - 'name': 'gold', - 'id': 'gold-id', - 'extra_specs': { - 'hpelh:provisioning': 'thin', - 'hpelh:ao': 'true', - 'hpelh:data_pl': 'r-0', - 'volume_type': self.volume_type}} - - self.driver.retype = mock.Mock( - side_effect=exception.VolumeNotFound(volume_id="fake")) - - self.driver.api_version = "1.1" - - volume = {'display_name': 'Foo Volume', - 'host': 'stack@lefthand#lefthand', - 'volume_type': 'gold', - 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '12345'} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - existing_ref = {'source-name': self.volume_name} - - self.assertRaises(exception.VolumeNotFound, - self.driver.manage_existing, - volume, - existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume_name), - mock.call.logout()] + - self.driver_startup_call_stack + [ - mock.call.modifyVolume(self.volume_id, - {'name': 'volume-12345'}), - mock.call.logout()] + - self.driver_startup_call_stack + [ - mock.call.modifyVolume(self.volume_id, - {'name': 'fakevolume'}), - mock.call.logout()]) - - def test_manage_existing_volume_type_exception(self): - mock_client = self.setup_driver() - - self.driver.api_version = "1.1" - - volume = {'display_name': 'Foo Volume', - 'volume_type': 'gold', - 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', - 'id': '12345'} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - existing_ref = {'source-name': self.volume_name} - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, - volume=volume, - existing_ref=existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getVolumeByName(self.volume_name), - mock.call.logout()]) - - def test_manage_existing_snapshot(self): - mock_client = self.setup_driver() - - self.driver.api_version = "1.1" - - volume = { - 'id': '111', - } - snapshot = { - 'display_name': 'Foo Snap', - 'id': '12345', - 'volume': volume, - 'volume_id': '111', - } - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getSnapshotByName.return_value = { - 'id': self.snapshot_id - } - mock_client.getSnapshotParentVolume.return_value = { - 'name': 'volume-111' - } - - existing_ref = {'source-name': self.snapshot_name} - expected_obj = {'display_name': 'Foo Snap'} - - obj = self.driver.manage_existing_snapshot(snapshot, existing_ref) - - mock_client.assert_has_calls( - self.driver_startup_call_stack + [ - mock.call.getSnapshotByName(self.snapshot_name), - mock.call.getSnapshotParentVolume(self.snapshot_name), - mock.call.modifySnapshot(self.snapshot_id, - {'name': 'snapshot-12345'}), - mock.call.logout()]) - self.assertEqual(expected_obj, obj) - - def test_manage_existing_snapshot_failed_over_volume(self): - mock_client = self.setup_driver() - - self.driver.api_version = "1.1" - - volume = { - 'id': self.volume_id, - 'replication_status': 'failed-over', - } - snapshot = { - 'display_name': 'Foo Snap', - 'id': '12345', - 'volume': volume, - } - existing_ref = {'source-name': self.snapshot_name} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_snapshot, - snapshot=snapshot, - existing_ref=existing_ref) - - def test_manage_existing_get_size(self): - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'size': 2147483648} - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - volume = {} - existing_ref = {'source-name': self.volume_name} - - size = self.driver.manage_existing_get_size(volume, existing_ref) - - expected_size = 2 - expected = [mock.call.getVolumeByName(existing_ref['source-name']), - mock.call.logout()] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - self.assertEqual(expected_size, size) - - def test_manage_existing_get_size_invalid_reference(self): - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'size': 2147483648} - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - volume = {} - existing_ref = {'source-name': "volume-12345"} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - mock_client.assert_has_calls([]) - - existing_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - mock_client.assert_has_calls([]) - - def test_manage_existing_get_size_invalid_input(self): - mock_client = self.setup_driver() - mock_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound('fake')) - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - volume = {} - existing_ref = {'source-name': self.volume_name} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_get_size, - volume=volume, - existing_ref=existing_ref) - - expected = [mock.call.getVolumeByName(existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - - def test_manage_existing_snapshot_get_size(self): - mock_client = self.setup_driver() - mock_client.getSnapshotByName.return_value = {'size': 2147483648} - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - snapshot = {} - existing_ref = {'source-name': self.snapshot_name} - - size = self.driver.manage_existing_snapshot_get_size(snapshot, - existing_ref) - - expected_size = 2 - expected = [mock.call.getSnapshotByName( - existing_ref['source-name']), - mock.call.logout()] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - self.assertEqual(expected_size, size) - - def test_manage_existing_snapshot_get_size_invalid_reference(self): - mock_client = self.setup_driver() - mock_client.getSnapshotByName.return_value = {'size': 2147483648} - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - snapshot = {} - existing_ref = {'source-name': "snapshot-12345"} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - mock_client.assert_has_calls([]) - - existing_ref = {} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - mock_client.assert_has_calls([]) - - def test_manage_snapshot(self): - mock_client = self.setup_driver() - self.driver.api_version = "1.1" - existing_ref = {'source-name': self.snapshot_name} - snapshot = {'volume_id': '222'} - snapshot.update(self.snapshot) - snapshot['id'] = '4' - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # Failed to update the existing snapshot with the new name - mock_client.getSnapshotByName.return_value = {'id': '4'} - mock_client.getSnapshotParentVolume.return_value = { - 'name': 'volume-222'} - mock_client.modifySnapshot.side_effect = \ - hpeexceptions.HTTPServerError - expected = [mock.call.getSnapshotByName('fakeshapshot'), - mock.call.getSnapshotParentVolume('fakeshapshot'), - mock.call.modifySnapshot('4', {'name': 'snapshot-4'})] - - self.driver._manage_snapshot( - client=mock_client, - volume=snapshot['volume'], - snapshot=snapshot, - target_snap_name=existing_ref['source-name'], - existing_ref=existing_ref) - - mock_client.assert_has_calls( - expected) - - # The provided snapshot is not a snapshot of the provided volume, - # raises InvalidInput exception - mock_client.getSnapshotParentVolume.return_value = { - 'name': 'volume-111'} - self.assertRaises(exception.InvalidInput, - self.driver._manage_snapshot, - client=mock_client, - volume=snapshot['volume'], - snapshot=snapshot, - target_snap_name=existing_ref['source-name'], - existing_ref=existing_ref) - - # Non existence of parent volume of a snapshot - # raises HTTPNotFound exception - mock_client.getSnapshotParentVolume.side_effect =\ - hpeexceptions.HTTPNotFound - self.assertRaises(exception.InvalidInput, - self.driver._manage_snapshot, - client=mock_client, - volume=snapshot['volume'], - snapshot=snapshot, - target_snap_name=existing_ref['source-name'], - existing_ref=existing_ref) - - # Non existence of a snapshot raises HTTPNotFound exception - mock_client.getSnapshotByName.side_effect =\ - hpeexceptions.HTTPNotFound - self.assertRaises(exception.InvalidInput, - self.driver._manage_snapshot, - client=mock_client, - volume=snapshot['volume'], - snapshot=snapshot, - target_snap_name=existing_ref['source-name'], - existing_ref=existing_ref) - - def test_manage_existing_snapshot_get_size_invalid_input(self): - mock_client = self.setup_driver() - mock_client.getSnapshotByName.side_effect = ( - hpeexceptions.HTTPNotFound('fake')) - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - snapshot = {} - existing_ref = {'source-name': self.snapshot_name} - - self.assertRaises(exception.InvalidInput, - self.driver.manage_existing_snapshot_get_size, - snapshot=snapshot, - existing_ref=existing_ref) - - expected = [mock.call.getSnapshotByName( - existing_ref['source-name'])] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - - def test_unmanage(self): - mock_client = self.setup_driver() - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - - # mock return value of getVolumes - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": self.volume_id, - "clusterName": self.cluster_name, - "size": 1 - }] - } - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - self.driver.unmanage(self.volume) - - new_name = 'unm-' + str(self.volume['id']) - - expected = [ - mock.call.getVolumeByName(self.volume['name']), - mock.call.modifyVolume(self.volume['id'], {'name': new_name}), - mock.call.logout() - ] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - - def test_unmanage_snapshot(self): - mock_client = self.setup_driver() - volume = { - 'id': self.volume_id, - } - snapshot = { - 'name': self.snapshot_name, - 'display_name': 'Foo Snap', - 'volume': volume, - 'id': self.snapshot_id, - } - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id, } - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - self.driver.unmanage_snapshot(snapshot) - - new_name = 'ums-' + str(self.snapshot_id) - - expected = [ - mock.call.getSnapshotByName(snapshot['name']), - mock.call.modifySnapshot(self.snapshot_id, {'name': new_name}), - mock.call.logout() - ] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - - def test_unmanage_snapshot_failed_over_volume(self): - mock_client = self.setup_driver() - volume = { - 'id': self.volume_id, - 'replication_status': 'failed-over', - } - snapshot = { - 'name': self.snapshot_name, - 'display_name': 'Foo Snap', - 'volume': volume, - 'id': self.snapshot_id, - } - mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id, } - - self.driver.api_version = "1.1" - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - self.assertRaises(exception.SnapshotIsBusy, - self.driver.unmanage_snapshot, - snapshot=snapshot) - - def test_api_version(self): - self.setup_driver() - self.driver.api_version = "1.1" - self.driver._check_api_version() - - self.driver.api_version = "1.0" - self.assertRaises(exception.InvalidInput, - self.driver._check_api_version) - - def test_get_volume_stats(self): - - # set up driver with default config - mock_client = self.setup_driver() - - # mock return value of getVolumes - mock_client.getVolumes.return_value = { - "type": "volume", - "total": 1, - "members": [{ - "id": 12345, - "clusterName": self.cluster_name, - "size": 1 * units.Gi - }] - } - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # execute driver - stats = self.driver.get_volume_stats(True) - - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual(GOODNESS_FUNCTION, stats['goodness_function']) - self.assertEqual(FILTER_FUNCTION, stats['filter_function']) - self.assertEqual(1, int(stats['total_volumes'])) - self.assertTrue(stats['thin_provisioning_support']) - self.assertTrue(stats['thick_provisioning_support']) - self.assertEqual(1, int(stats['provisioned_capacity_gb'])) - self.assertEqual(25, int(stats['reserved_percentage'])) - - cap_util = ( - float(units.Gi * 500 - units.Gi * 250) / float(units.Gi * 500) - ) * 100 - - self.assertEqual(cap_util, float(stats['capacity_utilization'])) - - expected = self.driver_startup_call_stack + [ - mock.call.getCluster(1), - mock.call.getVolumes(fields=['members[id]', - 'members[clusterName]', - 'members[size]'], - cluster=self.cluster_name), - mock.call.logout()] - - mock_client.assert_has_calls(expected) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group(self, cg_ss_enabled, mock_get_volume_type): - cg_ss_enabled.side_effect = [False, True, True] - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - # set up driver with default config - mock_client = self.setup_driver() - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group object - group = self.fake_group_object() - - # create a group with consistent_group_snapshot_enabled flag to - # False - self.assertRaises(NotImplementedError, - self.driver.create_group, ctxt, group) - - # create a group with consistent_group_snapshot_enabled flag to - # True - model_update = self.driver.create_group(ctxt, group) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # create a group with replication enabled on volume type - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' True'}} - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.ERROR, - model_update['status']) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, cg_ss_enabled, mock_get_volume_type): - cg_ss_enabled.return_value = True - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - # set up driver with default config - mock_client = self.setup_driver() - - mock_volume = mock.MagicMock() - volumes = [mock_volume] - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group - group = self.fake_group_object() - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # delete the group - group.status = fields.GroupStatus.DELETING - model_update, vols = self.driver.delete_group(ctxt, group, - volumes) - self.assertEqual(fields.GroupStatus.DELETING, - model_update['status']) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_add_vol_delete_group(self, cg_ss_enabled, - mock_get_volume_type): - cg_ss_enabled.return_value = True - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - - # set up driver with default config - mock_client = self.setup_driver() - - mock_volume = mock.MagicMock() - volumes = [mock_volume] - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group - group = self.fake_group_object() - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # add volume to group - model_update = self.driver.update_group( - ctxt, group, add_volumes=[self.volume], remove_volumes=None) - - # delete the group - group.status = fields.GroupStatus.DELETING - model_update, vols = self.driver.delete_group(ctxt, group, - volumes) - self.assertEqual(fields.GroupStatus.DELETING, - model_update['status']) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_remove_vol_delete_group(self, cg_ss_enabled, - mock_get_volume_type): - cg_ss_enabled.return_value = True - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - # set up driver with default config - mock_client = self.setup_driver() - - mock_volume = mock.MagicMock() - volumes = [mock_volume] - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - - # mock return value of createVolume - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group - group = self.fake_group_object() - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # add volume to group - model_update = self.driver.update_group( - ctxt, group, add_volumes=[self.volume], remove_volumes=None) - - # remove volume from group - model_update = self.driver.update_group( - ctxt, group, add_volumes=None, remove_volumes=[self.volume]) - - # delete the group - group.status = fields.GroupStatus.DELETING - model_update, vols = self.driver.delete_group(ctxt, group, - volumes) - self.assertEqual(fields.GroupStatus.DELETING, - model_update['status']) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_groupsnapshot(self, cg_ss_enabled, mock_get_volume_type): - cg_ss_enabled.return_value = True - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - # set up driver with default config - mock_client = self.setup_driver() - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - - mock_snap = mock.MagicMock() - mock_snap.volumeName = self.volume_name - expected_snaps = [mock_snap] - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group - group = self.fake_group_object() - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # create volume and add it to the group - self.driver.update_group( - ctxt, group, add_volumes=[self.volume], remove_volumes=None) - - # create the group snapshot - groupsnapshot = self.fake_groupsnapshot_object() - madel_update, snaps = self.driver.create_group_snapshot( - ctxt, groupsnapshot, expected_snaps) - self.assertEqual('available', madel_update['status']) - - # mock HTTPServerError (array failure) - mock_client.createSnapshotSet.side_effect = ( - hpeexceptions.HTTPServerError()) - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, - ctxt, groupsnapshot, expected_snaps) - - # mock HTTPServerError (array failure) - mock_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - # ensure the raised exception is a cinder exception - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, - ctxt, groupsnapshot, expected_snaps) - - @mock.patch.object(volume_types, 'get_volume_type') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_groupsnapshot(self, cg_ss_enabled, mock_get_volume_type): - cg_ss_enabled.return_value = True - ctxt = context.get_admin_context() - mock_get_volume_type.return_value = { - 'name': 'gold', - 'extra_specs': { - 'replication_enabled': ' False'}} - # set up driver with default config - mock_client = self.setup_driver() - - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - - mock_snap = mock.MagicMock() - mock_snap.volumeName = self.volume_name - expected_snaps = [mock_snap] - - with mock.patch.object(hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup: - mock_do_setup.return_value = mock_client - - # create a group - group = self.fake_group_object() - model_update = self.driver.create_group(ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - # create volume and add it to the group - self.driver.update_group( - ctxt, group, add_volumes=[self.volume], remove_volumes=None) - - # delete the group snapshot - groupsnapshot = self.fake_groupsnapshot_object() - groupsnapshot.status = 'deleting' - model_update, snaps = self.driver.delete_group_snapshot( - ctxt, groupsnapshot, expected_snaps) - self.assertEqual('deleting', model_update['status']) - - # mock HTTPServerError - ex = hpeexceptions.HTTPServerError({ - 'message': - 'Hey, dude cannot be deleted because it is a clone point' - ' duh.'}) - mock_client.getSnapshotByName.side_effect = ex - # ensure the raised exception is a cinder exception - cgsnap, snaps = self.driver.delete_group_snapshot( - ctxt, groupsnapshot, expected_snaps) - self.assertEqual('error', snaps[0]['status']) - - # mock HTTP other errors - ex = hpeexceptions.HTTPConflict({'message': 'Some message.'}) - mock_client.getSnapshotByName.side_effect = ex - # ensure the raised exception is a cinder exception - cgsnap, snaps = self.driver.delete_group_snapshot( - ctxt, groupsnapshot, expected_snaps) - self.assertEqual('error', snaps[0]['status']) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_replicated(self, _mock_get_volume_type): - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.doesRemoteSnapshotScheduleExist.return_value = False - mock_replicated_client = self.setup_driver(config=conf) - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - return_model = self.driver.create_volume(self.volume_replicated) - - expected = [ - mock.call.createVolume( - 'fakevolume_replicated', - 1, - units.Gi, - {'isThinProvisioned': True, - 'clusterName': 'CloudCluster1'}), - mock.call.doesRemoteSnapshotScheduleExist( - 'fakevolume_replicated_SCHED_Pri'), - mock.call.createRemoteSnapshotSchedule( - 'fakevolume_replicated', - 'fakevolume_replicated_SCHED', - 1800, - '1970-01-01T00:00:00Z', - 5, - 'CloudCluster1', - 5, - 'fakevolume_replicated', - '1.1.1.1', - 'foo1', - 'bar2'), - mock.call.logout()] - - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' - rep_data = json.dumps({"location": HPELEFTHAND_API_URL}) - self.assertEqual({'replication_status': 'enabled', - 'replication_driver_data': rep_data, - 'provider_location': prov_location}, - return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_delete_volume_replicated(self, _mock_get_volume_type): - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets - mock_client = self.setup_driver(config=conf) - mock_client.getVolumeByName.return_value = {'id': self.volume_id} - mock_client.getVolumes.return_value = {'total': 1, 'members': []} - mock_replicated_client = self.setup_driver(config=conf) - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - self.driver.delete_volume(self.volume_replicated) - - expected = [ - mock.call.deleteRemoteSnapshotSchedule( - 'fakevolume_replicated_SCHED'), - mock.call.getVolumeByName('fakevolume_replicated'), - mock.call.deleteVolume(1)] - mock_client.assert_has_calls( - self.driver_startup_call_stack + - expected) - - # mock HTTPNotFound (volume not found) - mock_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - # no exception should escape method - self.driver.delete_volume(self.volume_replicated) - - # mock HTTPNotFound (remote snapshot not found) - mock_client.deleteRemoteSnapshotSchedule.side_effect = ( - hpeexceptions.HTTPNotFound()) - # no exception should escape method - self.driver.delete_volume(self.volume_replicated) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_failover_host(self, _mock_get_volume_type): - ctxt = context.get_admin_context() - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getVolumeByName.return_value = { - 'iscsiIqn': self.connector['initiator']} - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - invalid_backend_id = 'INVALID' - - # Test invalid secondary target. - self.assertRaises( - exception.InvalidReplicationTarget, - self.driver.failover_host, - ctxt, - [self.volume_replicated], - invalid_backend_id) - - # Test a successful failover. - return_model = self.driver.failover_host( - context.get_admin_context(), - [self.volume_replicated], - REPLICATION_BACKEND_ID) - prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' - expected_model = (REPLICATION_BACKEND_ID, - [{'updates': {'replication_status': - 'failed-over', - 'provider_location': - prov_location}, - 'volume_id': 1}], - []) - self.assertEqual(expected_model, return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_failover_host_exceptions(self, _mock_get_volume_type): - - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - # mock HTTPNotFound - client - mock_client.stopRemoteSnapshotSchedule.side_effect = ( - hpeexceptions.HTTPNotFound()) - self.driver.failover_host( - context.get_admin_context(), - [self.volume_replicated], - REPLICATION_BACKEND_ID) - - # mock HTTPNotFound - replicated client - mock_replicated_client.stopRemoteSnapshotSchedule.side_effect = ( - hpeexceptions.HTTPNotFound()) - self.driver.failover_host( - context.get_admin_context(), - [self.volume_replicated], - REPLICATION_BACKEND_ID) - - # mock HTTPNotFound - replicated client - mock_replicated_client.getVolumeByName.side_effect = ( - hpeexceptions.HTTPNotFound()) - self.driver.failover_host( - context.get_admin_context(), - [self.volume_replicated], - REPLICATION_BACKEND_ID) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_replication_failback_host_ready(self, _mock_get_volume_type): - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getVolumeByName.return_value = { - 'iscsiIqn': self.connector['initiator'], - 'isPrimary': True} - mock_replicated_client.getRemoteSnapshotSchedule.return_value = ( - ['', - 'HP StoreVirtual LeftHand OS Command Line Interface', - '(C) Copyright 2007-2016', - '', - 'RESPONSE', - ' result 0', - ' period 1800', - ' paused false']) - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - volume = self.volume_replicated.copy() - rep_data = json.dumps({"primary_config_group": "failover_group"}) - volume['replication_driver_data'] = rep_data - return_model = self.driver.failover_host( - context.get_admin_context(), - [volume], - 'default') - prov_location = '10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0' - expected_model = (None, - [{'updates': {'replication_status': - 'available', - 'provider_location': - prov_location}, - 'volume_id': 1}], - []) - self.assertEqual(expected_model, return_model) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_replication_failback_host_not_ready(self, - _mock_get_volume_type): - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - mock_replicated_client.getVolumeByName.return_value = { - 'iscsiIqn': self.connector['initiator'], - 'isPrimary': False} - mock_replicated_client.getRemoteSnapshotSchedule.return_value = ( - ['', - 'HP StoreVirtual LeftHand OS Command Line Interface', - '(C) Copyright 2007-2016', - '', - 'RESPONSE', - ' result 0', - ' period 1800', - ' paused true']) - - _mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True'}} - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - - volume = self.volume_replicated.copy() - self.assertRaises( - exception.InvalidReplicationTarget, - self.driver.failover_host, - context.get_admin_context(), - [volume], - 'default') - - @mock.patch.object(volume_types, 'get_volume_type') - def test_do_volume_replication_setup(self, mock_get_volume_type): - # set up driver with default config - conf = self.default_mock_conf() - conf.replication_device = self.repl_targets_unmgd - mock_client = self.setup_driver(config=conf) - volume = self.volume - volume['volume_type_id'] = 4 - mock_client.createVolume.return_value = { - 'iscsiIqn': self.connector['initiator']} - mock_client.doesRemoteSnapshotScheduleExist.return_value = False - mock_replicated_client = self.setup_driver(config=conf) - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - client = self.driver._login() - # failed to create remote snapshot schedule on the primary system - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': {'replication_enabled': ' True'}} - mock_client.createRemoteSnapshotSchedule.side_effect =\ - hpeexceptions.HTTPServerError() - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._do_volume_replication_setup, - volume, - client) - # remote retention count of a volume greater than max retention - # count raises an exception - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:remote_retention_count': '52'}} - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._do_volume_replication_setup, - volume, - client) - # retention count of a volume greater than max retention - # count raises an exception - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:retention_count': '52'}} - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._do_volume_replication_setup, - volume, - client) - # sync period of a volume less than minimum sync period - # raises an exception - mock_get_volume_type.return_value = { - 'name': 'replicated', - 'extra_specs': { - 'replication_enabled': ' True', - 'replication:sync_period': '1500'}} - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._do_volume_replication_setup, - volume, - client) - - def test_do_setup_with_incorrect_replication_device_information(self): - conf = self.default_mock_conf() - repl_target = copy.deepcopy(self.repl_targets) - # delete left hand password so that replication device - # information become incorrect - del repl_target[0]['hpelefthand_password'] - conf.replication_device = repl_target - mock_client = self.setup_driver(config=conf) - mock_replicated_client = self.setup_driver(config=conf) - - with mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_client') as mock_do_setup, \ - mock.patch.object( - hpe_lefthand_iscsi.HPELeftHandISCSIDriver, - '_create_replication_client') as mock_replication_client: - mock_do_setup.return_value = mock_client - mock_replication_client.return_value = mock_replicated_client - self.driver.do_setup(None) - self.assertFalse(self.driver._replication_targets) - - def test__create_replication_client(self): - # set up driver with default config - self.setup_driver() - - # Ensure creating a replication client works without specifying - # ssh_conn_timeout or san_private_key. - remote_array = { - 'hpelefthand_api_url': 'https://1.1.1.1:8080/lhos', - 'hpelefthand_username': 'user', - 'hpelefthand_password': 'password', - 'hpelefthand_ssh_port': '16022'} - cl = self.driver._create_replication_client(remote_array) - cl.setSSHOptions.assert_called_with( - '1.1.1.1', - 'user', - 'password', - conn_timeout=30, - known_hosts_file=mock.ANY, - missing_key_policy='AutoAddPolicy', - port='16022', - privatekey='') - - # Verify we can create a replication client with custom values for - # ssh_conn_timeout and san_private_key. - cl.reset_mock() - remote_array['ssh_conn_timeout'] = 45 - remote_array['san_private_key'] = 'foobarkey' - cl = self.driver._create_replication_client(remote_array) - cl.setSSHOptions.assert_called_with( - '1.1.1.1', - 'user', - 'password', - conn_timeout=45, - known_hosts_file=mock.ANY, - missing_key_policy='AutoAddPolicy', - port='16022', - privatekey='foobarkey') - - # mock HTTPNotFound - cl.login.side_effect = hpeexceptions.HTTPNotFound() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.DriverNotInitialized, - self.driver._create_replication_client, remote_array) - - # mock other HTTP error - cl.login.side_effect = hpeexceptions.HTTPServerError() - # ensure the raised exception is a cinder exception - self.assertRaises(exception.DriverNotInitialized, - self.driver._create_replication_client, remote_array) diff --git a/cinder/tests/unit/volume/drivers/huawei/__init__.py b/cinder/tests/unit/volume/drivers/huawei/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py b/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py deleted file mode 100644 index 861036a34..000000000 --- a/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py +++ /dev/null @@ -1,5563 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for huawei drivers.""" -import collections -import copy -import ddt -import json -import mock -import re -import requests -import tempfile -from xml.dom import minidom -from xml.etree import ElementTree - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_group_snapshot -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.huawei import constants -from cinder.volume.drivers.huawei import fc_zone_helper -from cinder.volume.drivers.huawei import huawei_conf -from cinder.volume.drivers.huawei import huawei_driver -from cinder.volume.drivers.huawei import huawei_utils -from cinder.volume.drivers.huawei import hypermetro -from cinder.volume.drivers.huawei import replication -from cinder.volume.drivers.huawei import rest_client -from cinder.volume.drivers.huawei import smartx -from cinder.volume import qos_specs -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -admin_contex = context.get_admin_context() - -vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') -Volume = collections.namedtuple('Volume', vol_attrs) - -PROVIDER_LOCATION = ('{"huawei_lun_id": "11", ' - '"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"}') -PROVIDER_LOCATION_WITH_HYPERMETRO = ( - '{"huawei_lun_id": "11", ' - '"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", ' - '"hypermetro_id": "11", ' - '"remote_lun_id": "1"}') -SNAP_PROVIDER_LOCATION = '{"huawei_snapshot_id": "11"}' - -HOST = 'ubuntu001@backend001#OpenStack_Pool' -ID = '21ec7341-9256-497b-97d9-ef48edcf0635' -ENCODE_NAME = huawei_utils.encode_name(ID) -METADATA = {} -TEST_PAIR_ID = "3400a30d844d0004" -VOL_METADATA = [{'key': 'hypermetro_id', 'value': '11'}, - {'key': 'remote_lun_id', 'value': '1'}] -ADMIN_METADATA = [{'key': 'huawei_lun_wwn', 'value': 'FAKE_WWN'}] -REPLICA_DRIVER_DATA = ('{"pair_id": "%s", "rmt_lun_id": "1", ' - '"rmt_lun_wwn": "FAKE_RMT_LUN_WWN"}') % TEST_PAIR_ID - -hypermetro_devices = """{ - "remote_device": { - "RestURL": "http://192.0.2.69:8082/deviceManager/rest", - "UserName": "admin", - "UserPassword": "Admin@storage1", - "StoragePool": "OpenStack_Pool", - "domain_name": "hypermetro-domain", - "remote_target_ip": "192.0.2.241" - } -} -""" - -fake_smartx_value = {'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': False, - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test', - } - -fake_hypermetro_opts = {'hypermetro': 'true', - 'smarttier': False, - 'smartcache': False, - 'smartpartition': False, - 'thin_provisioning_support': False, - 'thick_provisioning_support': False, - } - -sync_replica_specs = {'replication_enabled': ' True', - 'replication_type': ' sync'} -async_replica_specs = {'replication_enabled': ' True', - 'replication_type': ' async'} - -replica_hypermetro_specs = {'hypermetro': ' True', - 'replication_enabled': ' True'} - -test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool', - 'capabilities': {'smartcache': True, - 'location_info': '210235G7J20000000000', - 'QoS_support': True, - 'pool_name': 'OpenStack_Pool', - 'timestamp': '2015-07-13T11:41:00.513549', - 'smartpartition': True, - 'allocated_capacity_gb': 0, - 'volume_backend_name': 'HuaweiFCDriver', - 'free_capacity_gb': 20.0, - 'driver_version': '1.1.0', - 'total_capacity_gb': 20.0, - 'smarttier': True, - 'hypermetro': True, - 'reserved_percentage': 0, - 'vendor_name': None, - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'storage_protocol': 'FC', - } - } - -test_new_type = { - 'name': u'new_type', - 'qos_specs_id': None, - 'deleted': False, - 'created_at': None, - 'updated_at': None, - 'extra_specs': { - 'smarttier': ' true', - 'smartcache': ' true', - 'smartpartition': ' true', - 'thin_provisioning_support': ' true', - 'thick_provisioning_support': ' False', - 'policy': '2', - 'smartcache:cachename': 'cache-test', - 'smartpartition:partitionname': 'partition-test', - }, - 'is_public': True, - 'deleted_at': None, - 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', - 'description': None, -} - -test_new_replication_type = { - 'name': u'new_type', - 'qos_specs_id': None, - 'deleted': False, - 'created_at': None, - 'updated_at': None, - 'extra_specs': { - 'replication_enabled': ' True', - 'replication_type': ' sync', - }, - 'is_public': True, - 'deleted_at': None, - 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', - 'description': None, -} - -test_hypermetro_type = { - 'name': u'new_type', - 'qos_specs_id': None, - 'deleted': False, - 'created_at': None, - 'updated_at': None, - 'extra_specs': { - 'hypermetro': ' True' - }, - 'is_public': True, - 'deleted_at': None, - 'id': u'550c089b-bfdd-4f7f-86e1-3ba88125555c', - 'description': None, -} - -hypermetro_devices = """ -{ - "remote_device": { - "RestURL": "http://192.0.2.69:8082/deviceManager/rest", - "UserName":"admin", - "UserPassword":"Admin@storage2", - "StoragePool":"OpenStack_Pool", - "domain_name":"hypermetro_test"} -} -""" - -FAKE_FIND_POOL_RESPONSE = {'CAPACITY': '985661440', - 'ID': '0', - 'TOTALCAPACITY': '985661440'} - -FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1", - "NAME": "5mFHcBv4RkCcD+JyrWc0SA", - "WWN": '6643e8c1004c5f6723e9f454003'} - -FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'multipath': False, - 'wwpns': ['10000090fa0d6754'], - 'wwnns': ['10000090fa0d6755'], - 'host': 'ubuntuc', - } - -smarttier_opts = {'smarttier': 'true', - 'smartpartition': False, - 'smartcache': False, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'policy': '3', - 'readcachepolicy': '1', - 'writecachepolicy': None, - } - -fake_fabric_mapping = { - 'swd1': { - 'target_port_wwn_list': ['2000643e8c4c5f66'], - 'initiator_port_wwn_list': ['10000090fa0d6754'] - } -} - -fake_fabric_mapping_no_ports = { - 'swd1': { - 'target_port_wwn_list': [], - 'initiator_port_wwn_list': ['10000090fa0d6754'] - } -} - -fake_fabric_mapping_no_wwn = { - 'swd1': { - 'target_port_wwn_list': ['2000643e8c4c5f66'], - 'initiator_port_wwn_list': [] - } -} - -CHANGE_OPTS = {'policy': ('1', '2'), - 'partitionid': (['1', 'partition001'], ['2', 'partition002']), - 'cacheid': (['1', 'cache001'], ['2', 'cache002']), - 'qos': (['11', {'MAXIOPS': '100', 'IOType': '1'}], - {'MAXIOPS': '100', 'IOType': '2', - 'MIN': 1, 'LATENCY': 1}), - 'host': ('ubuntu@huawei#OpenStack_Pool', - 'ubuntu@huawei#OpenStack_Pool'), - 'LUNType': ('0', '1'), - } - -# A fake response of create a host -FAKE_CREATE_HOST_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data":{"NAME": "ubuntuc001", - "ID": "1"} -} -""" - -FAKE_GET_HOST_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data":{"NAME": "ubuntuc001", - "ID": "1", - "ISADD2HOSTGROUP": "true"} -} -""" - -# A fake response of success response storage -FAKE_COMMON_SUCCESS_RESPONSE = """ -{ - "error": { - "code": 0, - "description": "None" - }, - "data":{} -} -""" - -# A fake response of fail response storage -FAKE_COMMON_FAIL_RESPONSE = """ -{ - "error": { - "code": 50331651, - "description": "An error occurs to the parameter." - }, - "data":{} -} -""" - -# A fake response of login huawei storage -FAKE_GET_LOGIN_STORAGE_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "username": "admin", - "iBaseToken": "2001031430", - "deviceid": "210235G7J20000000000", - "accountstate": 2 - } -} -""" - -# A fake response of login out huawei storage -FAKE_LOGIN_OUT_STORAGE_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "ID": 11 - } -} -""" - -# A fake response of mock storage pool info -FAKE_STORAGE_POOL_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "USERFREECAPACITY": "985661440", - "ID": "0", - "NAME": "OpenStack_Pool", - "USERTOTALCAPACITY": "985661440", - "TIER0CAPACITY": "100", - "TIER1CAPACITY": "0", - "TIER2CAPACITY": "0" - }] -} -""" - -# A fake response of lun or lungroup response -FAKE_LUN_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "ID": "1", - "NAME": "5mFHcBv4RkCcD+JyrWc0SA", - "WWN": "6643e8c1004c5f6723e9f454003", - "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", - "HEALTHSTATUS": "1", - "RUNNINGSTATUS": "27", - "ALLOCTYPE": "1", - "CAPACITY": "2097152" - } -} -""" - -# A fake report of mock storage pool info -FAKE_POOLS_UNSUPPORT_REPORT = { - 'pool_name': 'StoragePool', - 'location_info': '2102350BVB10F2000020', - 'QoS_support': False, - 'smartcache': False, - 'thick_provisioning_support': False, - 'splitmirror': False, - 'allocated_capacity_gb': 7, - 'thin_provisioning_support': True, - 'free_capacity_gb': 400.0, - 'smartpartition': False, - 'total_capacity_gb': 400.0, - 'reserved_percentage': 0, - 'max_over_subscription_ratio': 20.0, - 'luncopy': False -} - -FAKE_POOLS_SUPPORT_REPORT = { - 'pool_name': 'StoragePool', - 'location_info': '2102350BVB10F2000020', - 'QoS_support': True, - 'smartcache': True, - 'thick_provisioning_support': True, - 'splitmirror': True, - 'allocated_capacity_gb': 7, - 'thin_provisioning_support': True, - 'free_capacity_gb': 400.0, - 'smartpartition': True, - 'total_capacity_gb': 400.0, - 'reserved_percentage': 0, - 'max_over_subscription_ratio': 20.0, - 'luncopy': True, - 'hypermetro': True, - 'consistent_group_snapshot_enabled': True -} - -FAKE_LUN_GET_SUCCESS_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "ID": "11", - "IOCLASSID": "11", - "NAME": "5mFHcBv4RkCcD+JyrWc0SA", - "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", - "RUNNINGSTATUS": "10", - "HEALTHSTATUS": "1", - "RUNNINGSTATUS": "27", - "LUNLIST": "", - "ALLOCTYPE": "1", - "CAPACITY": "2097152", - "WRITEPOLICY": "1", - "MIRRORPOLICY": "0", - "PREFETCHPOLICY": "1", - "PREFETCHVALUE": "20", - "DATATRANSFERPOLICY": "1", - "READCACHEPOLICY": "2", - "WRITECACHEPOLICY": "5", - "OWNINGCONTROLLER": "0B", - "SMARTCACHEPARTITIONID": "", - "CACHEPARTITIONID": "", - "WWN": "6643e8c1004c5f6723e9f454003", - "PARENTNAME": "OpenStack_Pool" - } -} -""" - -FAKE_QUERY_ALL_LUN_RESPONSE = { - "error": { - "code": 0 - }, - "data": [{ - "ID": "1", - "NAME": ENCODE_NAME - }] -} - -FAKE_LUN_ASSOCIATE_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "ID":"11" - }] -} -""" - -FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """ -{ - "error": { - "code":0 - }, - "data":[{ - "NAME":"OpenStack_LunGroup_1", - "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", - "ID":"11", - "TYPE":256 - }] -} -""" - -FAKE_QUERY_LUN_GROUP_RESPONSE = """ -{ - "error": { - "code":0 - }, - "data":{ - "NAME":"5mFHcBv4RkCcD+JyrWc0SA", - "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", - "ID":"11", - "TYPE":256 - } -} -""" - -FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":{ - "NAME":"5mFHcBv4RkCcD+JyrWc0SA", - "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", - "ID":"11", - "TYPE":256 - } -} -""" - -FAKE_LUN_COUNT_RESPONSE = """ -{ - "data":{ - "COUNT":"0" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" -# A fake response of snapshot list response -FAKE_SNAPSHOT_LIST_INFO_RESPONSE = { - "error": { - "code": 0, - "description": "0" - }, - "data": [{ - "ID": "11", - "NAME": ENCODE_NAME - }, ] -} - - -# A fake response of create snapshot response -FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "ID": "11", - "NAME": "YheUoRwbSX2BxN7" - } -} -""" - -# A fake response of get snapshot response -FAKE_GET_SNAPSHOT_INFO_RESPONSE = """ -{ - "error": { - "code": 0, - "description": "0" - }, - "data": { - "ID": "11", - "NAME": "YheUoRwbSX2BxN7" - } -} -""" - -FAKE_SNAPSHOT_COUNT_RESPONSE = """ -{ - "data":{ - "COUNT":"2" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" - -# A fake response of get iscsi response - -FAKE_GET_ISCSI_INFO_RESPONSE = """ -{ - "data": [{ - "ETHPORTID": "139267", - "ID": "0+iqn.oceanstor:21004846fb8ca15f::22004:192.0.2.1,t,0x2005", - "TPGT": "8197", - "TYPE": 249 - }, - { - "ETHPORTID": "139268", - "ID": "1+iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.2,t,0x2004", - "TPGT": "8196", - "TYPE": 249 - } - ], - "error": { - "code": 0, - "description": "0" - } -} -""" - -# A fake response of get eth info response -FAKE_GET_ETH_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "PARENTTYPE": 209, - "MACADDRESS": "00:22:a1:0a:79:57", - "ETHNEGOTIATE": "-1", - "ERRORPACKETS": "0", - "IPV4ADDR": "192.0.2.2", - "IPV6GATEWAY": "", - "IPV6MASK": "0", - "OVERFLOWEDPACKETS": "0", - "ISCSINAME": "P0", - "HEALTHSTATUS": "1", - "ETHDUPLEX": "2", - "ID": "16909568", - "LOSTPACKETS": "0", - "TYPE": 213, - "NAME": "P0", - "INIORTGT": "4", - "RUNNINGSTATUS": "10", - "IPV4GATEWAY": "", - "BONDNAME": "", - "STARTTIME": "1371684218", - "SPEED": "1000", - "ISCSITCPPORT": "0", - "IPV4MASK": "255.255.0.0", - "IPV6ADDR": "", - "LOGICTYPE": "0", - "LOCATION": "ENG0.A5.P0", - "MTU": "1500", - "PARENTID": "1.5" - }, - { - "PARENTTYPE": 209, - "MACADDRESS": "00:22:a1:0a:79:57", - "ETHNEGOTIATE": "-1", - "ERRORPACKETS": "0", - "IPV4ADDR": "192.0.2.1", - "IPV6GATEWAY": "", - "IPV6MASK": "0", - "OVERFLOWEDPACKETS": "0", - "ISCSINAME": "P0", - "HEALTHSTATUS": "1", - "ETHDUPLEX": "2", - "ID": "16909568", - "LOSTPACKETS": "0", - "TYPE": 213, - "NAME": "P0", - "INIORTGT": "4", - "RUNNINGSTATUS": "10", - "IPV4GATEWAY": "", - "BONDNAME": "", - "STARTTIME": "1371684218", - "SPEED": "1000", - "ISCSITCPPORT": "0", - "IPV4MASK": "255.255.0.0", - "IPV6ADDR": "", - "LOGICTYPE": "0", - "LOCATION": "ENG0.A5.P3", - "MTU": "1500", - "PARENTID": "1.5" - }] -} -""" - -FAKE_GET_ETH_ASSOCIATE_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "IPV4ADDR": "192.0.2.1", - "HEALTHSTATUS": "1", - "RUNNINGSTATUS": "10" - }, - { - "IPV4ADDR": "192.0.2.2", - "HEALTHSTATUS": "1", - "RUNNINGSTATUS": "10" - } - ] -} -""" -# A fake response of get iscsi device info response -FAKE_GET_ISCSI_DEVICE_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:" - }] -} -""" - -# A fake response of get iscsi device info response -FAKE_GET_ALL_HOST_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "PARENTTYPE": 245, - "NAME": "ubuntuc", - "DESCRIPTION": "", - "RUNNINGSTATUS": "1", - "IP": "", - "PARENTNAME": "", - "OPERATIONSYSTEM": "0", - "LOCATION": "", - "HEALTHSTATUS": "1", - "MODEL": "", - "ID": "1", - "PARENTID": "", - "NETWORKNAME": "", - "TYPE": 21 - }, - { - "PARENTTYPE": 245, - "NAME": "ubuntu", - "DESCRIPTION": "", - "RUNNINGSTATUS": "1", - "IP": "", - "PARENTNAME": "", - "OPERATIONSYSTEM": "0", - "LOCATION": "", - "HEALTHSTATUS": "1", - "MODEL": "", - "ID": "2", - "PARENTID": "", - "NETWORKNAME": "", - "TYPE": 21 - }] -} -""" - -# A fake response of get host or hostgroup info response -FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "NAME":"ubuntuc", - "DESCRIPTION":"", - "ID":"0", - "TYPE":14 - }, - {"NAME":"OpenStack_HostGroup_1", - "DESCRIPTION":"", - "ID":"0", - "TYPE":14 - } - ] -} -""" - -FAKE_GET_HOST_GROUP_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data":{ - "NAME":"ubuntuc", - "DESCRIPTION":"", - "ID":"0", - "TYPE":14 - } -} -""" - -# A fake response of lun copy info response -FAKE_GET_LUN_COPY_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": { - "COPYSTOPTIME": "-1", - "HEALTHSTATUS": "1", - "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", - "RUNNINGSTATUS": "36", - "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", - "ID": "0", - "LUNCOPYTYPE": "1", - "COPYPROGRESS": "0", - "COPYSPEED": "2", - "TYPE": 219, - "COPYSTARTTIME": "-1" - } -} -""" - -# A fake response of lun copy list info response -FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """ -{ - "error": { - "code": 0 - }, - "data": [{ - "COPYSTOPTIME": "1372209335", - "HEALTHSTATUS": "1", - "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", - "RUNNINGSTATUS": "40", - "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", - "ID": "0", - "LUNCOPYTYPE": "1", - "COPYPROGRESS": "100", - "COPYSPEED": "2", - "TYPE": 219, - "COPYSTARTTIME": "1372209329" - }] -} -""" - -# A fake response of mappingview info response -FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "WORKMODE":"255", - "HEALTHSTATUS":"1", - "NAME":"OpenStack_Mapping_View_1", - "RUNNINGSTATUS":"27", - "DESCRIPTION":"", - "ENABLEINBANDCOMMAND":"true", - "ID":"1", - "INBANDLUNWWN":"", - "TYPE":245 - }, - { - "WORKMODE":"255", - "HEALTHSTATUS":"1", - "NAME":"YheUoRwbSX2BxN767nvLSw", - "RUNNINGSTATUS":"27", - "DESCRIPTION":"", - "ENABLEINBANDCOMMAND":"true", - "ID":"2", - "INBANDLUNWWN": "", - "TYPE": 245 - }] -} -""" - -FAKE_GET_MAPPING_VIEW_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "WORKMODE":"255", - "HEALTHSTATUS":"1", - "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", - "RUNNINGSTATUS":"27", - "DESCRIPTION":"", - "ENABLEINBANDCOMMAND":"true", - "ID":"11", - "INBANDLUNWWN":"", - "TYPE": 245, - "AVAILABLEHOSTLUNIDLIST": "" - }] -} -""" - -FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":{ - "WORKMODE":"255", - "HEALTHSTATUS":"1", - "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", - "RUNNINGSTATUS":"27", - "DESCRIPTION":"", - "ENABLEINBANDCOMMAND":"true", - "ID":"1", - "INBANDLUNWWN":"", - "TYPE":245, - "AVAILABLEHOSTLUNIDLIST": "[1]" - } -} -""" - -FAKE_FC_INFO_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "HEALTHSTATUS":"1", - "NAME":"", - "MULTIPATHTYPE":"1", - "ISFREE":"true", - "RUNNINGSTATUS":"27", - "ID":"10000090fa0d6754", - "OPERATIONSYSTEM":"255", - "TYPE":223 - }, - { - "HEALTHSTATUS":"1", - "NAME":"", - "MULTIPATHTYPE":"1", - "ISFREE":"true", - "RUNNINGSTATUS":"27", - "ID":"10000090fa0d6755", - "OPERATIONSYSTEM":"255", - "TYPE":223 - }] -} -""" - -FAKE_ISCSI_INITIATOR_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "CHAPNAME":"mm-user", - "HEALTHSTATUS":"1", - "ID":"iqn.1993-08.org.debian:01:9073aba6c6f", - "ISFREE":"true", - "MULTIPATHTYPE":"1", - "NAME":"", - "OPERATIONSYSTEM":"255", - "RUNNINGSTATUS":"28", - "TYPE":222, - "USECHAP":"true" - }, - { - "ISFREE":"true", - "ID":"ini-1" - }, - { - "ISFREE":"false", - "ID":"ini-2", - "PARENTNAME":"Host2", - "PARENTID":"2" - }] -} -""" - -FAKE_HOST_LINK_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "PARENTTYPE":21, - "TARGET_ID":"0000000000000000", - "INITIATOR_NODE_WWN":"20000090fa0d6754", - "INITIATOR_TYPE":"223", - "RUNNINGSTATUS":"27", - "PARENTNAME":"ubuntuc", - "INITIATOR_ID":"10000090fa0d6754", - "TARGET_PORT_WWN":"24000022a10a2a39", - "HEALTHSTATUS":"1", - "INITIATOR_PORT_WWN":"10000090fa0d6754", - "ID":"010000090fa0d675-0000000000110400", - "TARGET_NODE_WWN":"21000022a10a2a39", - "PARENTID":"1", - "CTRL_ID":"0", - "TYPE":255, - "TARGET_TYPE":"212" - }] -} -""" - -FAKE_PORT_GROUP_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "ID":11, - "NAME": "portgroup-test" - }] -} -""" - -FAKE_ERROR_INFO_RESPONSE = """ -{ - "error":{ - "code":31755596 - } -} -""" - -FAKE_ERROR_CONNECT_RESPONSE = """ -{ - "error":{ - "code":-403 - } -} -""" - -FAKE_ERROR_LUN_INFO_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":{ - "ID":"11", - "IOCLASSID":"11", - "NAME":"5mFHcBv4RkCcD+JyrWc0SA", - "ALLOCTYPE": "0", - "DATATRANSFERPOLICY": "0", - "SMARTCACHEPARTITIONID": "0", - "CACHEPARTITIONID": "0" - } -} -""" -FAKE_GET_FC_INI_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "ID":"10000090fa0d6754", - "ISFREE":"true" - }] -} -""" - -FAKE_SYSTEM_VERSION_RESPONSE = """ -{ - "error":{ - "code": 0 - }, - "data":{ - "PRODUCTVERSION": "V100R001C10", - "wwn": "21003400a30d844d" - } -} -""" - -FAKE_GET_LUN_MIGRATION_RESPONSE = """ -{ - "data":[{"ENDTIME":"1436816174", - "ID":"9", - "PARENTID":"11", - "PARENTNAME":"xmRBHMlVRruql5vwthpPXQ", - "PROCESS":"-1", - "RUNNINGSTATUS":"76", - "SPEED":"2", - "STARTTIME":"1436816111", - "TARGETLUNID":"1", - "TARGETLUNNAME":"4924891454902893639", - "TYPE":253, - "WORKMODE":"0" - }], - "error":{"code":0, - "description":"0"} -} -""" - -FAKE_HYPERMETRODOMAIN_RESPONSE = """ -{ - "error":{ - "code": 0 - }, - "data":[{ - "PRODUCTVERSION": "V100R001C10", - "ID": "11", - "NAME": "hypermetro_test", - "RUNNINGSTATUS": "1", - "HEALTHSTATUS": "0" - }] -} -""" - -FAKE_HYPERMETRO_RESPONSE = """ -{ - "error":{ - "code": 0 - }, - "data":{ - "PRODUCTVERSION": "V100R001C10", - "ID": "11", - "NAME": "hypermetro_test", - "RUNNINGSTATUS": "1", - "HEALTHSTATUS": "1" - } -} -""" - -FAKE_QOS_INFO_RESPONSE = """ -{ - "error":{ - "code": 0 - }, - "data":{ - "ID": "11" - } -} -""" - -FAKE_GET_FC_PORT_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":[{ - "RUNNINGSTATUS":"10", - "WWN":"2000643e8c4c5f66", - "PARENTID":"0A.1", - "ID": "1114368", - "RUNSPEED": "16000" - }, - { - "RUNNINGSTATUS":"10", - "WWN":"2000643e8c4c5f67", - "PARENTID":"0A.1", - "ID": "1114369", - "RUNSPEED": "16000" - }] -} -""" - -FAKE_SMARTCACHEPARTITION_RESPONSE = """ -{ - "error":{ - "code":0 - }, - "data":{ - "ID":"11", - "NAME":"cache-name" - } -} -""" - -FAKE_CONNECT_FC_RESPONSE = { - "driver_volume_type": 'fibre_channel', - "data": { - "target_wwn": ["10000090fa0d6754"], - "target_lun": "1", - "volume_id": ID - } -} - -FAKE_METRO_INFO_RESPONSE = { - "PRODUCTVERSION": "V100R001C10", - "ID": "11", - "NAME": "hypermetro_test", - "RUNNINGSTATUS": "42", - "HEALTHSTATUS": "0" -} - -FAKE_METRO_INFO_NEW_RESPONSE = """{ - "error": { - "code": 0 - }, - "data": { - "PRODUCTVERSION": "V100R001C10", - "ID": "11", - "NAME": "hypermetro_test", - "RUNNINGSTATUS": "1", - "HEALTHSTATUS": "1" - } -} -""" - -FAKE_CREATE_METROROUP_RESPONSE = """ -{ - "data": { - "DESCRIPTION": "", - "DOMAINID": "643e8c4c5f670100", - "DOMAINNAME": "hypermetro-domain", - "HEALTHSTATUS": "1", - "ID": "3400a30d844d8002", - "ISEMPTY": "true", - "NAME": "6F7kdHZcQJ2zbzxHmBl4FQ", - "PRIORITYSTATIONTYPE": "0", - "RECOVERYPOLICY": "1", - "RESOURCETYPE": "11", - "RUNNINGSTATUS": "41", - "SPEED": "2", - "SYNCDIRECTION": "1", - "TYPE": 15364 - }, - "error": { - "code": 0, - "description": "0" - } -} -""" - -FAKE_GET_METROROUP_RESPONSE = { - "data": [{ - "DESCRIPTION": "", - "DOMAINID": "643e8c4c5f670100", - "DOMAINNAME": "hypermetro-domain", - "HEALTHSTATUS": "1", - "ID": "11", - "ISEMPTY": "true", - "NAME": huawei_utils.encode_name(ID), - "PRIORITYSTATIONTYPE": "0", - "RECOVERYPOLICY": "1", - "RESOURCETYPE": "11", - "RUNNINGSTATUS": "41", - "SPEED": "2", - "SYNCDIRECTION": "1", - "TYPE": 15364 - }], - "error": { - "code": 0, - "description": "0" - }, -} - - -FAKE_GET_METROROUP_ID_RESPONSE = """ -{ - "data": { - "DESCRIPTION": "", - "DOMAINID": "643e8c4c5f670100", - "DOMAINNAME": "hypermetro-domain", - "HEALTHSTATUS": "1", - "ID": "11", - "ISEMPTY": "false", - "NAME": "IexzQZJWSXuX2e9I7c8GNQ", - "PRIORITYSTATIONTYPE": "0", - "RECOVERYPOLICY": "1", - "RESOURCETYPE": "11", - "RUNNINGSTATUS": "1", - "SPEED": "2", - "SYNCDIRECTION": "1", - "TYPE": 15364 - }, - "error": { - "code": 0, - "description": "0" - } -} -""" - -# mock login info map -MAP_COMMAND_TO_FAKE_RESPONSE = {} - -MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions/POST'] = ( - FAKE_GET_LOGIN_STORAGE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/sessions/DELETE'] = ( - FAKE_LOGIN_OUT_STORAGE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/POST'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION?range=[0-256]/GET'] = ( - FAKE_GET_LUN_MIGRATION_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock storage info map -MAP_COMMAND_TO_FAKE_RESPONSE['/storagepool/GET'] = ( - FAKE_STORAGE_POOL_RESPONSE) - -# mock lun info map -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/POST'] = ( - FAKE_LUN_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/GET'] = ( - FAKE_LUN_GET_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/GET'] = ( - FAKE_LUN_GET_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun?filter=NAME::%s/GET' % ENCODE_NAME] = ( - json.dumps(FAKE_QUERY_ALL_LUN_RESPONSE)) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=12/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21' - '&ASSOCIATEOBJID=0/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21' - '&ASSOCIATEOBJID=1/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1' - '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11' - '/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=21' - '&ASSOCIATEOBJID=1/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = ( - FAKE_QUERY_LUN_GROUP_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/POST'] = ( - FAKE_QUERY_LUN_GROUP_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/POST'] = ( - FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUNGroup/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' - '&ASSOCIATEOBJID=1/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' - '&ASSOCIATEOBJID=1/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' - '&ASSOCIATEOBJID=11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=27' - '&ASSOCIATEOBJID=11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_LUN_COUNT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=1/GET'] = ( - FAKE_SNAPSHOT_COUNT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_SNAPSHOT_COUNT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=27' - '&ASSOCIATEOBJID=11/GET'] = ( - FAKE_LUN_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = ( - FAKE_LUN_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11' - '&ASSOCIATEOBJID=12/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock snapshot info map -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/POST'] = ( - FAKE_CREATE_SNAPSHOT_INFO_RESPONSE) - -# mock snapshot info map -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/GET'] = ( - FAKE_GET_SNAPSHOT_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/activate/POST'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/stop/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?filter=NAME::%s/GET' % ENCODE_NAME] = ( - json.dumps(FAKE_SNAPSHOT_LIST_INFO_RESPONSE)) - -# mock QoS info map -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/GET'] = ( - FAKE_LUN_GET_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/active/11/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/POST'] = ( - FAKE_QOS_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/count/GET'] = ( - FAKE_COMMON_FAIL_RESPONSE) - -# mock iscsi info map -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_tgt_port/GET'] = ( - FAKE_GET_ISCSI_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/GET'] = ( - FAKE_GET_ETH_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE' - '=257&ASSOCIATEOBJID=11/GET'] = ( - FAKE_GET_ETH_ASSOCIATE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsidevicename/GET'] = ( - FAKE_GET_ISCSI_DEVICE_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?range=[0-256]/GET'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/GET'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/POST'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/PUT'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?PARENTTYPE=21&PARENTID' - '=1/GET'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/remove_iscsi_from_host/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/' - 'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = ( - FAKE_ISCSI_INITIATOR_RESPONSE) -# mock host info map -MAP_COMMAND_TO_FAKE_RESPONSE['/host?range=[0-65535]/GET'] = ( - FAKE_GET_ALL_HOST_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/GET'] = ( - FAKE_GET_HOST_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/POST'] = ( - FAKE_CREATE_HOST_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup?range=[0-8191]/GET'] = ( - FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/GET'] = ( - FAKE_GET_HOST_GROUP_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' - '&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1' - '/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' - '&ASSOCIATEOBJID=0/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' - 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/0/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' - 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - - -MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/associate/POST'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock copy info map -MAP_COMMAND_TO_FAKE_RESPONSE['/luncopy/POST'] = ( - FAKE_GET_LUN_COPY_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY?range=[0-1023]/GET'] = ( - FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/start/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/0/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock mapping view info map -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview?range=[0-8191]/GET'] = ( - FAKE_GET_MAPPING_VIEW_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/POST'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/PUT'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/1/GET'] = ( - FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/1/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/REMOVE_ASSOCIATE/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/lungroup?TYPE=256&' - 'ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' - 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' - 'ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' - 'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' - 'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=11/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -FAKE_GET_ENGINES_RESPONSE = """ -{ - "error":{ - "code": 0 - }, - "data":[{ - "NODELIST": "[]", - "ID": "0" - }] -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/storageengine/GET'] = ( - FAKE_GET_ENGINES_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate?ASSOCIATEOBJTYPE=245&' - 'ASSOCIATEOBJID=1&range=[0-8191]/GET'] = ( - FAKE_GET_MAPPING_VIEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock FC info map -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' - 'range=[0-8191]/GET'] = ( - FAKE_FC_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -# mock FC info map -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' - 'range=[0-8191]/GET'] = ( - FAKE_FC_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/GET'] = ( - FAKE_FC_INFO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/host_link?INITIATOR_TYPE=223' - '&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = ( - FAKE_HOST_LINK_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup?range=[0-8191]&TYPE=257/GET'] = ( - FAKE_PORT_GROUP_RESPONSE) - -# mock system info map -MAP_COMMAND_TO_FAKE_RESPONSE['/system//GET'] = ( - FAKE_SYSTEM_VERSION_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]/GET'] = ( - FAKE_GET_FC_INI_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?range=[0-256]/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition/POST'] = ( - FAKE_SYSTEM_VERSION_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]&PARENTID=1/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( - FAKE_GET_FC_PORT_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/0/GET'] = ( - FAKE_SMARTCACHEPARTITION_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/REMOVE_ASSOCIATE/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/count/GET'] = ( - FAKE_COMMON_FAIL_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/cachepartition/0/GET'] = ( - FAKE_SMARTCACHEPARTITION_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroDomain?range=[0-32]/GET'] = ( - FAKE_HYPERMETRODOMAIN_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/POST'] = ( - FAKE_HYPERMETRO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/3400a30d844d0007/GET'] = ( - FAKE_METRO_INFO_NEW_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/disable_hcpair/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/hyperMetro/associate/pair/POST'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/hyperMetro/associate/pair/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/GET'] = ( - FAKE_HYPERMETRO_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair?range=[0-4095]/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/synchronize_hcpair/PUT'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror?range=[0-8191]/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror/count/GET'] = ( - FAKE_COMMON_FAIL_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/smartcachepool/count/GET'] = ( - FAKE_COMMON_FAIL_RESPONSE) - -FAKE_GET_PORTG_BY_VIEW = """ -{ - "data": [{ - "DESCRIPTION": "Please do NOT modify this. Engine ID: 0", - "ID": "0", - "NAME": "OpenStack_PortGroup_1", - "TYPE": 257 - }], - "error": { - "code": 0 - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/mappingview?TYPE=257&AS' - 'SOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( - FAKE_GET_PORTG_BY_VIEW) - -FAKE_GET_PORT_BY_PORTG = """ -{ - "data":[{ - "CONFSPEED":"0","FCCONFMODE":"3", - "FCRUNMODE":"0","HEALTHSTATUS":"1","ID":"2000643e8c4c5f66", - "MAXSUPPORTSPEED":"16000","NAME":"P0","PARENTID":"0B.1", - "PARENTTYPE":209,"RUNNINGSTATUS":"10","RUNSPEED":"8000", - "WWN":"2000643e8c4c5f66" - }], - "error":{ - "code":0,"description":"0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate/portgroup?TYPE=212&ASSOCI' - 'ATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( - FAKE_GET_PORT_BY_PORTG) - -FAKE_GET_PORTG = """ -{ - "data": { - "TYPE": 257, - "NAME": "OpenStack_PortGroup_1", - "DESCRIPTION": "Please DO NOT change thefollowing message: 0", - "ID": "0" - }, - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/GET'] = FAKE_GET_PORTG - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/PUT'] = FAKE_GET_PORTG - -MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup/POST'] = ( - FAKE_GET_PORT_BY_PORTG) - -MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup?ID=0&TYPE=257&ASSOCIA' - 'TEOBJTYPE=212&ASSOCIATEOBJID=2000643e8c4c5f66/DE' - 'LETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -FAKE_CREATE_PORTG = """ -{ - "data": { - "DESCRIPTION": "Please DO NOT change the following message: 0", - "ID": "0", - "NAME": "OpenStack_PortGroup_1", - "TYPE": 257 - }, - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/POST'] = FAKE_CREATE_PORTG - -MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/1/DELETE'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -FAKE_GET_PORTG_FROM_PORT = """ -{ - "data": [{ - "TYPE": 257, - "NAME": "OpenStack_PortGroup_1", - "DESCRIPTION": "PleaseDONOTchangethefollowingmessage: 0", - "ID": "0" - }], - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA' - 'TEOBJTYPE=212&ASSOCIATEOBJID=1114368/GET'] = ( - FAKE_GET_PORTG_FROM_PORT) - -FAKE_GET_VIEW_BY_PORTG = """ -{ - "data": [{ - "ASSOCIATEOBJID": "0", - "COUNT": "0", - "ASSOCIATEOBJTYPE": "0", - "INBANDLUNWWN": "", - "FORFILESYSTEM": "false", - "ID": "2", - "ENABLEINBANDCOMMAND": "false", - "NAME": "OpenStack_Mapping_View_1", - "WORKMODE": "0", - "TYPE": 245, - "HOSTLUNID": "0", - "DESCRIPTION": "" - }], - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASS' - 'OCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( - FAKE_GET_VIEW_BY_PORTG) - -FAKE_GET_LUNG_BY_VIEW = """ -{ - "data": [{ - "TYPE": 256, - "NAME": "OpenStack_LunGroup_1", - "DESCRIPTION": "OpenStack_LunGroup_1", - "ID": "1" - }], - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/mappingview?TYPE=256&ASSO' - 'CIATEOBJTYPE=245&ASSOCIATEOBJID=2/GET'] = ( - FAKE_GET_LUNG_BY_VIEW) - -FAKE_LUN_COUNT_RESPONSE_1 = """ -{ - "data":{ - "COUNT":"2" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOB' - 'JTYPE=256&ASSOCIATEOBJID=1/GET'] = ( - FAKE_LUN_COUNT_RESPONSE_1) - -FAKE_PORTS_IN_PG_RESPONSE = """ -{ - "data": [{ - "ID": "1114114", - "WWN": "2002643e8c4c5f66" - }, - { - "ID": "1114113", - "WWN": "2001643e8c4c5f66" - }], - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=' - '257&ASSOCIATEOBJID=0/GET'] = ( - FAKE_PORTS_IN_PG_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetro_ConsistentGroup/POST'] = ( - FAKE_CREATE_METROROUP_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup?type" - "='15364'/GET"] = ( - json.dumps(FAKE_GET_METROROUP_RESPONSE)) - -MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/11/GET"] = ( - FAKE_GET_METROROUP_ID_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/11/DELETE"] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/stop/PUT"] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/sync/PUT"] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - -FAKE_GET_REMOTEDEV_RESPONSE = """ -{ - "data":[{ - "ARRAYTYPE":"1", - "HEALTHSTATUS":"1", - "ID":"0", - "NAME":"Huawei.Storage", - "RUNNINGSTATUS":"1", - "WWN":"21003400a30d844d" - }], - "error":{ - "code":0, - "description":"0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/remote_device/GET'] = ( - FAKE_GET_REMOTEDEV_RESPONSE) - -FAKE_CREATE_PAIR_RESPONSE = """ -{ - "data":{ - "ID":"%s" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" % TEST_PAIR_ID -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/POST'] = ( - FAKE_CREATE_PAIR_RESPONSE) - -FAKE_DELETE_PAIR_RESPONSE = """ -{ - "data":{}, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/DELETE' % TEST_PAIR_ID] = ( - FAKE_DELETE_PAIR_RESPONSE) - -FAKE_SET_PAIR_ACCESS_RESPONSE = """ -{ - "data":{}, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/PUT' % TEST_PAIR_ID] = ( - FAKE_SET_PAIR_ACCESS_RESPONSE) - -FAKE_GET_PAIR_NORMAL_RESPONSE = """ -{ - "data":{ - "REPLICATIONMODEL": "1", - "RUNNINGSTATUS": "1", - "SECRESACCESS": "2", - "HEALTHSTATUS": "1", - "ISPRIMARY": "true" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" - -FAKE_GET_PAIR_SPLIT_RESPONSE = """ -{ - "data":{ - "REPLICATIONMODEL": "1", - "RUNNINGSTATUS": "26", - "SECRESACCESS": "2", - "ISPRIMARY": "true" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" - -FAKE_GET_PAIR_SYNC_RESPONSE = """ -{ - "data":{ - "REPLICATIONMODEL": "1", - "RUNNINGSTATUS": "23", - "SECRESACCESS": "2" - }, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/GET' % TEST_PAIR_ID] = ( - FAKE_GET_PAIR_NORMAL_RESPONSE) - -FAKE_SYNC_PAIR_RESPONSE = """ -{ - "data":{}, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/sync/PUT'] = ( - FAKE_SYNC_PAIR_RESPONSE) - -FAKE_SPLIT_PAIR_RESPONSE = """ -{ - "data":{}, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/split/PUT'] = ( - FAKE_SPLIT_PAIR_RESPONSE) - -FAKE_SWITCH_PAIR_RESPONSE = """ -{ - "data":{}, - "error":{ - "code":0, - "description":"0" - } -} -""" -MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/switch/PUT'] = ( - FAKE_SWITCH_PAIR_RESPONSE) - -FAKE_PORTS_IN_PG_RESPONSE = """ -{ - "data": [{ - "ID": "1114114", - "WWN": "2002643e8c4c5f66" - }, - { - "ID": "1114113", - "WWN": "2001643e8c4c5f66" - }], - "error": { - "code": 0, - "description": "0" - } -} -""" - -MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=' - '257&ASSOCIATEOBJID=0/GET'] = ( - FAKE_PORTS_IN_PG_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA' - 'TEOBJTYPE=212&ASSOCIATEOBJID=1114369/GET'] = ( - FAKE_PORTS_IN_PG_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASSOC' - 'IATEOBJTYPE=257&ASSOCIATEOBJID=1114114/GET'] = ( - FAKE_SWITCH_PAIR_RESPONSE) - -MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASSOC' - 'IATEOBJTYPE=257&ASSOCIATEOBJID=1114113/GET'] = ( - FAKE_COMMON_SUCCESS_RESPONSE) - - -REPLICA_BACKEND_ID = 'huawei-replica-1' - - -def cg_or_cg_snapshot(func): - def wrapper(self, *args, **kwargs): - self.mock_object(volume_utils, - 'is_group_a_cg_snapshot_type', - return_value=True) - return func(self, *args, **kwargs) - return wrapper - - -class FakeHuaweiConf(huawei_conf.HuaweiConf): - def __init__(self, conf, protocol): - self.conf = conf - self.protocol = protocol - - def safe_get(self, key): - try: - return getattr(self.conf, key) - except Exception: - return - - def update_config_value(self): - setattr(self.conf, 'volume_backend_name', 'huawei_storage') - setattr(self.conf, 'san_address', - ['http://192.0.2.69:8082/deviceManager/rest/']) - setattr(self.conf, 'san_user', 'admin') - setattr(self.conf, 'san_password', 'Admin@storage') - setattr(self.conf, 'san_product', 'V3') - setattr(self.conf, 'san_protocol', self.protocol) - setattr(self.conf, 'lun_type', constants.THICK_LUNTYPE) - setattr(self.conf, 'lun_ready_wait_interval', 2) - setattr(self.conf, 'lun_copy_wait_interval', 2) - setattr(self.conf, 'lun_timeout', 43200) - setattr(self.conf, 'lun_write_type', '1') - setattr(self.conf, 'lun_mirror_switch', '1') - setattr(self.conf, 'lun_prefetch_type', '1') - setattr(self.conf, 'lun_prefetch_value', '0') - setattr(self.conf, 'lun_policy', '0') - setattr(self.conf, 'lun_read_cache_policy', '2') - setattr(self.conf, 'lun_write_cache_policy', '5') - setattr(self.conf, 'storage_pools', ['OpenStack_Pool']) - setattr(self.conf, 'iscsi_default_target_ip', ['192.0.2.68']) - setattr(self.conf, 'metro_san_address', - ['https://192.0.2.240:8088/deviceManager/rest/']) - setattr(self.conf, 'metro_storage_pools', 'OpenStack_Pool') - setattr(self.conf, 'metro_san_user', 'admin') - setattr(self.conf, 'metro_san_password', 'Admin@storage1') - setattr(self.conf, 'metro_domain_name', 'hypermetro_test') - - iscsi_info = {'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'TargetIP': '192.0.2.2', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1', - 'TargetPortGroup': 'portgroup-test', } - setattr(self.conf, 'iscsi_info', [iscsi_info]) - - rmt_iscsi_info = ('{ Name: iqn.1993-08.debian:01:ec2bff7acxxx;\n' - 'TargetIP:1.1.1.1;CHAPinfo:mm-user#mm-user@storage;' - 'ALUA:1; TargetPortGroup:portgroup-test};\t\n ' - '{ Name: iqn.1993-08.debian:01:ec2bff7acyyy;\n' - 'TargetIP:2.2.2.2;CHAPinfo:nn-user#nn-user@storage;' - 'ALUA:0; TargetPortGroup:portgroup-test1}\t\n') - - targets = [{'backend_id': REPLICA_BACKEND_ID, - 'storage_pool': 'OpenStack_Pool', - 'san_address': - 'https://192.0.2.69:8088/deviceManager/rest/', - 'san_user': 'admin', - 'san_password': 'Admin@storage1', - 'iscsi_info': rmt_iscsi_info}] - setattr(self.conf, 'replication_device', targets) - - setattr(self.conf, 'safe_get', self.safe_get) - - -class FakeClient(rest_client.RestClient): - - def __init__(self, configuration): - san_address = configuration.san_address - san_user = configuration.san_user - san_password = configuration.san_password - rest_client.RestClient.__init__(self, configuration, - san_address, - san_user, - san_password) - self.test_fail = False - self.test_multi_url_flag = False - self.cache_not_exist = False - self.partition_not_exist = False - - def _get_snapshotid_by_name(self, snapshot_name): - return "11" - - def _check_snapshot_exist(self, snapshot_id): - return True - - def get_partition_id_by_name(self, name): - if self.partition_not_exist: - return None - return "11" - - def get_cache_id_by_name(self, name): - if self.cache_not_exist: - return None - return "11" - - def add_lun_to_cache(self, lunid, cache_id): - pass - - def do_call(self, url, data, method, calltimeout=4, - log_filter_flag=False): - url = url.replace('http://192.0.2.69:8082/deviceManager/rest', '') - command = url.replace('/210235G7J20000000000/', '') - data = json.dumps(data) if data else None - - if method: - command = command + "/" + method - - for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys(): - if command == item: - data = MAP_COMMAND_TO_FAKE_RESPONSE[item] - if self.test_fail: - data = FAKE_ERROR_INFO_RESPONSE - if command == 'lun/11/GET': - data = FAKE_ERROR_LUN_INFO_RESPONSE - - self.test_fail = False - - if self.test_multi_url_flag: - data = FAKE_ERROR_CONNECT_RESPONSE - self.test_multi_url_flag = False - - return json.loads(data) - - -class FakeReplicaPairManager(replication.ReplicaPairManager): - def _init_rmt_client(self): - self.rmt_client = FakeClient(self.conf) - - -class FakeISCSIStorage(huawei_driver.HuaweiISCSIDriver): - """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" - - def __init__(self, configuration): - self.configuration = configuration - self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') - self.active_backend_id = None - self.replica = None - self.support_func = None - - def do_setup(self): - self.metro_flag = True - self.huawei_conf.update_config_value() - self.get_local_and_remote_dev_conf() - - self.client = FakeClient(configuration=self.configuration) - self.rmt_client = FakeClient(configuration=self.configuration) - self.replica_client = FakeClient(configuration=self.configuration) - self.metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - self.replica = FakeReplicaPairManager(self.client, - self.replica_client, - self.configuration) - - -class FakeFCStorage(huawei_driver.HuaweiFCDriver): - """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" - - def __init__(self, configuration): - self.configuration = configuration - self.fcsan = None - self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') - self.active_backend_id = None - self.replica = None - self.support_func = None - - def do_setup(self): - self.metro_flag = True - self.huawei_conf.update_config_value() - self.get_local_and_remote_dev_conf() - - self.client = FakeClient(configuration=self.configuration) - self.rmt_client = FakeClient(configuration=self.configuration) - self.replica_client = FakeClient(configuration=self.configuration) - self.metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - self.replica = FakeReplicaPairManager(self.client, - self.replica_client, - self.configuration) - - -@ddt.ddt -class HuaweiTestBase(test.TestCase): - """Base class for Huawei test cases. - - Implement common setup operations or test cases in this class. - """ - - def setUp(self): - super(HuaweiTestBase, self).setUp() - self.configuration = mock.Mock(spec=conf.Configuration) - self.driver = FakeISCSIStorage(configuration=self.configuration) - self.driver.do_setup() - - self.volume = fake_volume.fake_volume_obj( - admin_contex, host=HOST, provider_location=PROVIDER_LOCATION, - metadata=METADATA, id=ID) - - self.snapshot = fake_snapshot.fake_snapshot_obj( - admin_contex, provider_location=SNAP_PROVIDER_LOCATION, id=ID) - - self.snapshot.volume = self.volume - - self.replica_volume = fake_volume.fake_volume_obj( - admin_contex, host=HOST, provider_location=PROVIDER_LOCATION, - metadata=METADATA, replication_status='disabled', - replication_driver_data=REPLICA_DRIVER_DATA, id=ID) - - self.hyper_volume = fake_volume.fake_volume_obj( - admin_contex, host=HOST, - provider_location=PROVIDER_LOCATION_WITH_HYPERMETRO, - id=ID) - - self.original_volume = fake_volume.fake_volume_obj(admin_contex, - id=ID) - - self.current_volume = fake_volume.fake_volume_obj( - admin_contex, id=ID, provider_location=PROVIDER_LOCATION, - name_id=ID) - - self.group_snapshot = fake_group_snapshot.fake_group_snapshot_obj( - admin_contex, id=ID, group_id=ID, status='available') - - self.group = fake_group.fake_group_obj( - admin_contex, id=ID, status='available') - - def test_encode_name(self): - lun_name = huawei_utils.encode_name(self.volume.id) - - # The hash value is different between py27 and py34. - # So we use assertIn. - self.assertIn(lun_name, ('21ec7341-4687000622165227970', - '21ec7341-7953146827712520106')) - - @mock.patch.object(rest_client, 'RestClient') - def test_create_snapshot_success(self, mock_client): - lun_info = self.driver.create_snapshot(self.snapshot) - self.assertDictEqual({"huawei_snapshot_id": "11"}, - json.loads(lun_info['provider_location'])) - - self.snapshot.volume_id = ID - self.snapshot.volume = self.volume - lun_info = self.driver.create_snapshot(self.snapshot) - self.assertDictEqual({"huawei_snapshot_id": "11"}, - json.loads(lun_info['provider_location'])) - - @ddt.data('1', '', '0') - def test_copy_volume(self, input_speed): - self.driver.configuration.lun_copy_wait_interval = 0 - self.volume.metadata = {'copyspeed': input_speed} - - mocker = self.mock_object( - self.driver.client, 'create_luncopy', - mock.Mock(wraps=self.driver.client.create_luncopy)) - - self.driver._copy_volume(self.volume, - 'fake_copy_name', - 'fake_src_lun', - 'fake_tgt_lun') - - mocker.assert_called_once_with('fake_copy_name', - 'fake_src_lun', - 'fake_tgt_lun', - input_speed) - - @ddt.data({'input_speed': '1', - 'actual_speed': '1'}, - {'input_speed': '', - 'actual_speed': '2'}, - {'input_speed': None, - 'actual_speed': '2'}, - {'input_speed': '5', - 'actual_speed': '2'}) - @ddt.unpack - def test_client_create_luncopy(self, input_speed, actual_speed): - mocker = self.mock_object( - self.driver.client, 'call', - mock.Mock(wraps=self.driver.client.call)) - - self.driver.client.create_luncopy('fake_copy_name', - 'fake_src_lun', - 'fake_tgt_lun', - input_speed) - - mocker.assert_called_once_with( - mock.ANY, - {"TYPE": 219, - "NAME": 'fake_copy_name', - "DESCRIPTION": 'fake_copy_name', - "COPYSPEED": actual_speed, - "LUNCOPYTYPE": "1", - "SOURCELUN": "INVALID;fake_src_lun;INVALID;INVALID;INVALID", - "TARGETLUN": "INVALID;fake_tgt_lun;INVALID;INVALID;INVALID"}, - 'POST' - ) - - @ddt.data( - { - 'volume': fake_volume.fake_volume_obj( - admin_contex, - provider_location=PROVIDER_LOCATION), - 'expect': {'huawei_lun_id': '11', - 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'} - }, - { - 'volume': fake_volume.fake_volume_obj( - admin_contex, - provider_location=None), - 'expect': {} - }, - { - 'volume': fake_volume.fake_volume_obj( - admin_contex, - provider_location=''), - 'expect': {} - }, - { - 'volume': fake_volume.fake_volume_obj( - admin_contex, - provider_location='11', - volume_admin_metadata=ADMIN_METADATA, - volume_metadata=VOL_METADATA - ), - 'expect': {'huawei_lun_id': '11', - 'huawei_lun_wwn': 'FAKE_WWN', - 'hypermetro_id': '11', - 'remote_lun_id': '1'} - } - ) - @ddt.unpack - def test_get_lun_metadata(self, volume, expect): - metadata = huawei_utils.get_lun_metadata(volume) - self.assertEqual(expect, metadata) - - @ddt.data( - { - 'snapshot': fake_snapshot.fake_snapshot_obj( - admin_contex, - provider_location=SNAP_PROVIDER_LOCATION), - 'expect': {'huawei_snapshot_id': '11'} - }, - { - 'snapshot': fake_snapshot.fake_snapshot_obj( - admin_contex, - provider_location=None), - 'expect': {} - }, - { - 'snapshot': fake_snapshot.fake_snapshot_obj( - admin_contex, - provider_location=''), - 'expect': {} - }, - { - 'snapshot': fake_snapshot.fake_snapshot_obj( - admin_contex, - provider_location='11'), - 'expect': {'huawei_snapshot_id': '11'} - } - ) - @ddt.unpack - def test_get_snapshot_metadata(self, snapshot, expect): - metadata = huawei_utils.get_snapshot_metadata(snapshot) - self.assertEqual(expect, metadata) - - -@ddt.ddt -class HuaweiISCSIDriverTestCase(HuaweiTestBase): - - def setUp(self): - super(HuaweiISCSIDriverTestCase, self).setUp() - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.hypermetro_devices = hypermetro_devices - self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') - self.driver = FakeISCSIStorage(configuration=self.configuration) - self.driver.do_setup() - self.portgroup = 'portgroup-test' - self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:' - ':20503:192.0.2.1', - 'iqn.2006-08.com.huawei:oceanstor:21000022a:' - ':20500:192.0.2.2'] - self.target_ips = ['192.0.2.1', - '192.0.2.2'] - self.portgroup_id = 11 - self.driver.client.login() - - def test_parse_rmt_iscsi_info(self): - rmt_devs = self.driver.huawei_conf.get_replication_devices() - iscsi_info = rmt_devs[0]['iscsi_info'] - expected_iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7acxxx', - 'TargetIP': '1.1.1.1', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1', - 'TargetPortGroup': 'portgroup-test'}, - {'Name': 'iqn.1993-08.debian:01:ec2bff7acyyy', - 'TargetIP': '2.2.2.2', - 'CHAPinfo': 'nn-user;nn-user@storage', - 'ALUA': '0', - 'TargetPortGroup': 'portgroup-test1'}] - self.assertEqual(expected_iscsi_info, iscsi_info) - - def test_parse_rmt_iscsi_info_without_iscsi_configuration(self): - self.configuration.replication_device[0]['iscsi_info'] = '' - rmt_devs = self.driver.huawei_conf.get_replication_devices() - iscsi_info = rmt_devs[0]['iscsi_info'] - self.assertEqual([], iscsi_info) - - def test_login_success(self): - device_id = self.driver.client.login() - self.assertEqual('210235G7J20000000000', device_id) - - @ddt.data(constants.PWD_EXPIRED, constants.PWD_RESET) - def test_login_password_expires_and_reset_fail(self, state): - with mock.patch.object(self.driver.client, 'logout') as mock_logout: - self.mock_object(FakeClient, 'do_call', - return_value={"error": {"code": 0}, - "data": { - "username": "admin", - "iBaseToken": "2001031430", - "deviceid": "210235G7J20000000000", - "accountstate": state}}) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.client.login) - mock_logout.assert_called_once_with() - - def test_login_logout_fail(self): - login_info = {"error": {"code": 0}, - "data": {"username": "admin", - "iBaseToken": "2001031430", - "deviceid": "210235G7J20000000000", - "accountstate": 3}} - logout_info = {"error": {"code": 1}, "data": {}} - self.mock_object(FakeClient, 'do_call', - side_effect=[login_info, logout_info]) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.client.login) - - def test_check_volume_exist_on_array(self): - self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', - return_value=None) - self.driver._check_volume_exist_on_array( - self.volume, constants.VOLUME_NOT_EXISTS_WARN) - - def test_create_volume_success(self): - # Have pool info in the volume. - self.volume.host = 'ubuntu001@backend001#OpenStack_Pool' - - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - # No pool info in the volume. - self.volume.host = 'ubuntu001@backend001' - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_delete_replication_fail(self, pool_data): - self.driver.support_func = pool_data - self.mock_object(replication.ReplicaCommonDriver, 'split') - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - self.mock_object(rest_client.RestClient, - 'delete_lun', - side_effect=exception.VolumeBackendAPIException( - data='err')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, self.replica_volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_migrate_volume_success_no_data(self, pool_data): - self.driver.support_func = pool_data - task_info = {"data": [{"ENDTIME": "1436816174", - "ID": "9", - "PARENTID": "11", - "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", - "PROCESS": "-1", - "RUNNINGSTATUS": "76", - "SPEED": "2", - "STARTTIME": "1436816111", - "TARGETLUNID": "1", - "TARGETLUNNAME": "4924891454902893639", - "TYPE": 253, - "WORKMODE": "0" - }], - "error": {"code": 0, - "description": "0"} - } - moved = False - empty_dict = {} - self.mock_object(rest_client.RestClient, 'get_lun_migration_task', - side_effect=[{}, task_info]) - moved, model_update = self.driver.migrate_volume(None, - self.volume, - test_host, - None) - self.assertTrue(moved) - self.assertEqual(empty_dict, model_update) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_migrate_volume_success_with_replication(self, pool_data): - self.driver.support_func = pool_data - task_info = {"data": [{"ENDTIME": "1436816174", - "ID": "9", - "PARENTID": "11", - "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", - "PROCESS": "-1", - "RUNNINGSTATUS": "76", - "SPEED": "2", - "STARTTIME": "1436816111", - "TARGETLUNID": "1", - "TARGETLUNNAME": "4924891454902893639", - "TYPE": 253, - "WORKMODE": "0" - }], - "error": {"code": 0, - "description": "0"} - } - moved = False - empty_dict = {} - self.mock_object(rest_client.RestClient, 'get_lun_migration_task', - return_value=task_info) - moved, model_update = self.driver.migrate_volume(None, - self.replica_volume, - test_host, - None) - self.assertTrue(moved) - self.assertEqual(empty_dict, model_update) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_migrate_volume_fail_migration_fault(self, pool_data): - self.driver.support_func = pool_data - task_info = {"data": [{"ENDTIME": "1436816174", - "ID": "9", - "PARENTID": "11", - "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", - "PROCESS": "-1", - "RUNNINGSTATUS": "74", - "SPEED": "2", - "STARTTIME": "1436816111", - "TARGETLUNID": "1", - "TARGETLUNNAME": "4924891454902893639", - "TYPE": 253, - "WORKMODE": "0" - }], - "error": {"code": 0, - "description": "0"} - } - self.mock_object(rest_client.RestClient, 'get_lun_migration_task', - return_value=task_info) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.migrate_volume, - None, self.volume, test_host, None) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_migrate_volume_fail_no_migrate_task(self, pool_data): - self.driver.support_func = pool_data - task_info = {"data": [{"ENDTIME": "1436816174", - "ID": "9", - "PARENTID": "12", - "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", - "PROCESS": "-1", - "RUNNINGSTATUS": "76", - "SPEED": "2", - "STARTTIME": "1436816111", - "TARGETLUNID": "1", - "TARGETLUNNAME": "4924891454902893639", - "TYPE": 253, - "WORKMODE": "0" - }], - "error": {"code": 0, - "description": "0"} - } - self.mock_object(rest_client.RestClient, 'get_lun_migration_task', - return_value=task_info) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.migrate_volume, - None, self.volume, test_host, None) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_migrate_volume_with_type_id(self, pool_data): - self.driver.support_func = pool_data - self.volume.volume_type_id = '550c089b-bfdd-4f7f-86e1-3ba88125555c' - task_info = {"data": [{"ENDTIME": "1436816174", - "ID": "9", - "PARENTID": "11", - "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", - "PROCESS": "-1", - "RUNNINGSTATUS": "76", - "SPEED": "2", - "STARTTIME": "1436816111", - "TARGETLUNID": "1", - "TARGETLUNNAME": "4924891454902893639", - "TYPE": 253, - "WORKMODE": "0" - }], - "error": {"code": 0, - "description": "0"} - } - empty_dict = {} - self.mock_object(volume_types, 'get_volume_type', - return_value=test_new_type) - self.mock_object(rest_client.RestClient, 'get_lun_migration_task', - return_value=task_info) - moved, model_update = self.driver.migrate_volume(None, - self.volume, - test_host, - None) - self.assertTrue(moved) - self.assertEqual(empty_dict, model_update) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_manage_existing_fail(self, pool_data): - self.driver.support_func = pool_data - self.mock_object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, 'ALLOCTYPE': 1}) - self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - self.mock_object(rest_client.RestClient, 'rename_lun') - self.mock_object(huawei_driver.HuaweiBaseDriver, - '_get_lun_info_by_ref', - return_value={ - 'PARENTNAME': 'OpenStack_Pool', - 'SNAPSHOTIDS': [], - 'ID': 'ID1', - 'HEALTHSTATUS': constants.STATUS_HEALTH, - 'WWN': '6643e8c1004c5f6723e9f454003'}) - self.mock_object(volume_types, 'get_volume_type', - return_value={'extra_specs': test_new_type}) - self.mock_object(huawei_driver.HuaweiBaseDriver, - '_check_needed_changes', - return_value={}) - external_ref = {'source-name': 'test1', - 'source-id': 'ID1'} - - self.driver.manage_existing(self.volume, external_ref) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_delete_volume_success(self, pool_data): - self.driver.support_func = pool_data - self.driver.delete_volume(self.volume) - - def test_delete_snapshot_success(self): - self.driver.delete_snapshot(self.snapshot) - - def test_create_volume_from_snapsuccess(self): - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - self.mock_object(replication.ReplicaCommonDriver, 'sync') - model_update = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(model_update['provider_location'])) - - driver_data = {'pair_id': TEST_PAIR_ID, - 'rmt_lun_id': '1', - 'rmt_lun_wwn': '6643e8c1004c5f6723e9f454003'} - self.assertDictEqual( - driver_data, json.loads(model_update['replication_driver_data'])) - self.assertEqual('available', model_update['replication_status']) - - @mock.patch.object(huawei_driver.HuaweiISCSIDriver, - 'initialize_connection', - return_value={"data": {'target_lun': 1}}) - def test_initialize_connection_snapshot_success(self, mock_iscsi_init): - iscsi_properties = self.driver.initialize_connection_snapshot( - self.snapshot, FakeConnector) - volume = Volume(id=self.snapshot.id, - provider_location=self.snapshot.provider_location, - lun_type='27', - metadata=None) - self.assertEqual(1, iscsi_properties['data']['target_lun']) - mock_iscsi_init.assert_called_with(volume, FakeConnector) - - def test_initialize_connection_success_multipath_portgroup(self): - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.mock_object(rest_client.RestClient, 'get_tgt_port_group', - return_value = '11') - iscsi_properties = self.driver.initialize_connection(self.volume, - temp_connector) - self.assertEqual([1, 1], iscsi_properties['data']['target_luns']) - - def test_initialize_connection_fail_multipath_portgroup(self): - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.mock_object(rest_client.RestClient, 'get_tgt_port_group', - return_value = '12') - self.mock_object(rest_client.RestClient, '_get_tgt_ip_from_portgroup', - return_value = []) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.volume, temp_connector) - - def test_initialize_connection_success_multipath_targetip(self): - iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'TargetIP': '192.0.2.2', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1'}] - - configuration = mock.Mock(spec = conf.Configuration) - configuration.hypermetro_devices = hypermetro_devices - driver = FakeISCSIStorage(configuration = self.configuration) - driver.do_setup() - driver.configuration.iscsi_info = iscsi_info - driver.client.iscsi_info = iscsi_info - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - iscsi_properties = driver.initialize_connection(self.volume, - temp_connector) - self.assertEqual([1], iscsi_properties['data']['target_luns']) - - def test_initialize_connection_fail_multipath_targetip(self): - iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'TargetIP': '192.0.2.6', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1'}] - - configuration = mock.Mock(spec = conf.Configuration) - configuration.hypermetro_devices = hypermetro_devices - driver = FakeISCSIStorage(configuration = self.configuration) - driver.do_setup() - driver.configuration.iscsi_info = iscsi_info - driver.client.iscsi_info = iscsi_info - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.assertRaises(exception.VolumeBackendAPIException, - driver.initialize_connection, - self.volume, temp_connector) - - def test_initialize_connection_success_multipath_defaultip(self): - iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1'}] - default_target_ip = ['192.0.2.2'] - configuration = mock.Mock(spec = conf.Configuration) - configuration.hypermetro_devices = hypermetro_devices - driver = FakeISCSIStorage(configuration = self.configuration) - driver.do_setup() - driver.configuration.iscsi_info = iscsi_info - driver.client.iscsi_info = iscsi_info - driver.configuration.iscsi_default_target_ip = default_target_ip - driver.client.iscsi_default_target_ip = default_target_ip - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - iscsi_properties = driver.initialize_connection(self.volume, - temp_connector) - self.assertEqual([1], iscsi_properties['data']['target_luns']) - - def test_initialize_connection_fail_multipath_defaultip(self): - iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1'}] - - default_target_ip = ['192.0.2.6'] - configuration = mock.Mock(spec = conf.Configuration) - configuration.hypermetro_devices = hypermetro_devices - driver = FakeISCSIStorage(configuration = self.configuration) - driver.do_setup() - driver.configuration.iscsi_info = iscsi_info - driver.client.iscsi_info = iscsi_info - driver.configuration.iscsi_default_target_ip = default_target_ip - driver.client.iscsi_default_target_ip = default_target_ip - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.assertRaises(exception.VolumeBackendAPIException, - driver.initialize_connection, - self.volume, temp_connector) - - def test_initialize_connection_fail_no_port_in_portgroup(self): - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.mock_object(rest_client.RestClient, 'get_tgt_port_group', - return_value='11') - self.mock_object(rest_client.RestClient, '_get_tgt_ip_from_portgroup', - return_value=[]) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.volume, temp_connector) - - def test_initialize_connection_fail_multipath_no_ip(self): - iscsi_info = [{'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', - 'CHAPinfo': 'mm-user;mm-user@storage', - 'ALUA': '1'}] - configuration = mock.Mock(spec = conf.Configuration) - configuration.hypermetro_devices = hypermetro_devices - driver = FakeISCSIStorage(configuration = self.configuration) - driver.do_setup() - driver.configuration.iscsi_info = iscsi_info - driver.client.iscsi_info = iscsi_info - driver.configuration.iscsi_default_target_ip = None - driver.client.iscsi_default_target_ip = None - temp_connector = copy.deepcopy(FakeConnector) - temp_connector['multipath'] = True - self.assertRaises(exception.VolumeBackendAPIException, - driver.initialize_connection, - self.volume, temp_connector) - - @mock.patch.object(huawei_driver.HuaweiISCSIDriver, - 'terminate_connection') - def test_terminate_connection_snapshot_success(self, mock_iscsi_term): - self.driver.terminate_connection_snapshot(self.snapshot, - FakeConnector) - volume = Volume(id=self.snapshot.id, - provider_location=self.snapshot.provider_location, - lun_type='27', - metadata=None) - mock_iscsi_term.assert_called_with(volume, FakeConnector) - - def test_terminate_connection_success(self): - self.driver.terminate_connection(self.volume, FakeConnector) - - def test_get_volume_status(self): - data = self.driver.get_volume_stats() - self.assertEqual(self.driver.VERSION, data['driver_version']) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={"CAPACITY": 6291456}) - @mock.patch.object(rest_client.RestClient, 'extend_lun') - def test_extend_volume_size_equal(self, mock_extend, mock_lun_info): - self.driver.extend_volume(self.volume, 3) - self.assertEqual(0, mock_extend.call_count) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={"CAPACITY": 5291456}) - @mock.patch.object(rest_client.RestClient, 'extend_lun') - def test_extend_volume_success(self, mock_extend, mock_lun_info): - self.driver.extend_volume(self.volume, 3) - self.assertEqual(1, mock_extend.call_count) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={"CAPACITY": 7291456}) - def test_extend_volume_fail(self, mock_lun_info): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, self.volume, 3) - - def test_extend_nonexistent_volume(self): - self.volume = fake_volume.fake_volume_obj(admin_contex) - self.mock_object(rest_client.RestClient, - 'get_lun_id_by_name', - return_value=None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.volume, 3) - - def test_get_volume_metadata(self): - metadata = [{'key': 'huawei_lun_wwn', 'value': '1'}] - tmp_volume = fake_volume.fake_volume_obj( - admin_contex, volume_metadata=metadata) - expected_value = {'huawei_lun_wwn': '1'} - metadata = huawei_utils.get_volume_metadata(tmp_volume) - self.assertEqual(expected_value, metadata) - - expected_value = {'huawei_lun_wwn': '1'} - tmp_volume = fake_volume.fake_volume_obj(admin_contex) - tmp_volume.metadata = expected_value - metadata = huawei_utils.get_volume_metadata(tmp_volume) - self.assertEqual(expected_value, metadata) - - def test_login_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.client.login) - - def test_create_snapshot_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, self.snapshot) - - def test_create_volume_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - def test_delete_volume_fail(self): - self.driver.client.test_fail = True - self.driver.delete_volume(self.volume) - - def test_delete_snapshot_fail(self): - self.driver.client.test_fail = True - self.driver.delete_snapshot(self.snapshot) - - def test_delete_snapshot_with_snapshot_nonexistent(self): - self.snapshot.provider_location = None - self.driver.delete_snapshot(self.snapshot) - - def test_initialize_connection_fail(self): - self.driver.client.test_fail = True - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.volume, FakeConnector) - - def test_lun_is_associated_to_lungroup(self): - self.driver.client.associate_lun_to_lungroup('11', '11') - result = self.driver.client._is_lun_associated_to_lungroup('11', - '11') - self.assertTrue(result) - - def test_lun_is_not_associated_to_lun_group(self): - self.driver.client.associate_lun_to_lungroup('12', '12') - self.driver.client.remove_lun_from_lungroup('12', '12') - result = self.driver.client._is_lun_associated_to_lungroup('12', '12') - self.assertFalse(result) - - def test_get_tgtip(self): - portg_id = self.driver.client.get_tgt_port_group(self.portgroup) - target_ip = self.driver.client._get_tgt_ip_from_portgroup(portg_id) - self.assertEqual(self.target_ips, target_ip) - - def test_find_chap_info(self): - tmp_dict = {} - tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3' - tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage' - iscsi_info = [tmp_dict] - initiator_name = FakeConnector['initiator'] - chapinfo = self.driver.client.find_chap_info(iscsi_info, - initiator_name) - chap_username, chap_password = chapinfo.split(';') - self.assertEqual('mm-user', chap_username) - self.assertEqual('mm-user@storage', chap_password) - - def test_find_alua_info(self): - tmp_dict = {} - tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3' - tmp_dict['ALUA'] = '1' - iscsi_info = [tmp_dict] - initiator_name = FakeConnector['initiator'] - type = self.driver.client._find_alua_info(iscsi_info, - initiator_name) - self.assertEqual('1', type) - - def test_get_pool_info(self): - pools = [{"NAME": "test001", - "ID": "0", - "USERFREECAPACITY": "36", - "USERTOTALCAPACITY": "48", - "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, - "TIER0CAPACITY": "48", - "TIER1CAPACITY": "0", - "TIER2CAPACITY": "0"}, - {"NAME": "test002", - "ID": "1", - "USERFREECAPACITY": "37", - "USERTOTALCAPACITY": "49", - "USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE, - "TIER0CAPACITY": "0", - "TIER1CAPACITY": "49", - "TIER2CAPACITY": "0"}, - {"NAME": "test003", - "ID": "0", - "USERFREECAPACITY": "36", - "DATASPACE": "35", - "USERTOTALCAPACITY": "48", - "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, - "TIER0CAPACITY": "0", - "TIER1CAPACITY": "0", - "TIER2CAPACITY": "48"}, - {"NAME": "test004", - "ID": "0", - "USERFREECAPACITY": "36", - "DATASPACE": "35", - "USERTOTALCAPACITY": "48", - "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, - "TIER0CAPACITY": "40"}] - pool_name = 'test001' - test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48', - 'TIER0CAPACITY': '48', 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '0'} - pool_info = self.driver.client.get_pool_info(pool_name, pools) - self.assertEqual(test_info, pool_info) - - pool_name = 'test002' - test_info = {} - pool_info = self.driver.client.get_pool_info(pool_name, pools) - self.assertEqual(test_info, pool_info) - - pool_name = 'test000' - test_info = {} - pool_info = self.driver.client.get_pool_info(pool_name, pools) - self.assertEqual(test_info, pool_info) - - pool_name = 'test003' - test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48', - 'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '48'} - pool_info = self.driver.client.get_pool_info(pool_name, pools) - self.assertEqual(test_info, pool_info) - - pool_name = 'test004' - test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48', - 'TIER0CAPACITY': '40', 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '0'} - pool_info = self.driver.client.get_pool_info(pool_name, pools) - self.assertEqual(test_info, pool_info) - - def test_get_smartx_specs_opts(self): - smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts) - self.assertEqual('3', smartx_opts['policy']) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MAXIOPS': '100', - 'IOType': '2'}) - def test_create_smartqos(self, mock_qos_value, pool_data): - self.driver.support_func = pool_data - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @ddt.data('front-end', 'back-end') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', - return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) - def test_create_smartqos_success(self, - mock_consumer, - mock_qos_specs, - mock_value_type): - self.mock_object(qos_specs, 'get_qos_specs', - return_value={'specs': {'maxBandWidth': '100', - 'IOType': '0'}, - 'consumer': mock_consumer}) - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @ddt.data([{'specs': {'maxBandWidth': '100', 'IOType': '3'}}, - FAKE_POOLS_UNSUPPORT_REPORT], - [{'specs': {'maxBandWidth': '100', 'IOType': '3'}}, - FAKE_POOLS_SUPPORT_REPORT], - [{'specs': {'minBandWidth': '0', 'IOType': '2'}}, - FAKE_POOLS_UNSUPPORT_REPORT], - [{'specs': {'minBandWidth': '0', 'IOType': '2'}}, - FAKE_POOLS_SUPPORT_REPORT]) - @ddt.unpack - def test_create_smartqos_failed(self, qos_specs_value, pool_data): - self.driver.support_func = pool_data - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', - return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) - self.mock_object(qos_specs, 'get_qos_specs', - return_value=qos_specs_value) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_create_smartqos_without_huawei_type(self, pool_data): - self.driver.support_func = pool_data - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_type', - return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) - self.mock_object(qos_specs, 'get_qos_specs', - return_value={'specs': {'fake_qos_type': '100', - 'IOType': '2'}}) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MAXIOPS': '100', - 'IOType': '2'}) - @mock.patch.object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - @mock.patch.object(rest_client.RestClient, 'find_available_qos', - return_value=(None, [])) - def test_create_smartqos_on_v3r3_with_no_qos(self, - mock_find_available_qos, - mock_qos_value, - mock_array_version): - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MINIOPS': '100', - 'IOType': '2'}) - @mock.patch.object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - @mock.patch.object(rest_client.RestClient, 'find_available_qos', - return_value=('11', u'["0", "2", "3"]')) - def test_create_smartqos_on_v3r3_with_qos(self, - mock_find_available_qos, - mock_qos_value, - mock_array_version): - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MINIOPS': '100', - 'IOType': '2'}) - @mock.patch.object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - @mock.patch.object(rest_client.RestClient, 'find_available_qos', - return_value=('11', u'["0", "2", "3"]')) - def test_create_smartqos_on_v3r3_with_unsupport_qos( - self, mock_find_available_qos, - mock_qos_value, mock_array_version): - self.driver.support_func = FAKE_POOLS_UNSUPPORT_REPORT - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MINIOPS': '100', - 'IOType': '2'}) - @mock.patch.object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - @mock.patch.object(rest_client.RestClient, 'find_available_qos', - return_value=(None, [])) - @mock.patch.object(rest_client.RestClient, 'activate_deactivate_qos') - def test_create_smartqos_on_v3r3_active_failed(self, - pool_data, - mock_activate_qos, - mock_find_available_qos, - mock_qos_value, - mock_array_version): - self.driver.support_func = pool_data - mock_activate_qos.side_effect = ( - exception.VolumeBackendAPIException(data='Activate or deactivate ' - 'QoS error. ')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type', - return_value={'MINIOPS': '100', - 'IOType': '2'}) - @mock.patch.object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - @mock.patch.object(rest_client.RestClient, 'find_available_qos', - return_value=(None, [])) - @mock.patch.object(rest_client.RestClient, 'create_qos_policy') - def test_create_smartqos_on_v3r3_qos_failed(self, - pool_data, - mock_create_qos, - mock_find_available_qos, - mock_qos_value, - mock_array_version): - self.driver.support_func = pool_data - mock_create_qos.side_effect = ( - exception.VolumeBackendAPIException(data='Create QoS policy ' - 'error.')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'get_qos_info', - return_value={"LUNLIST": u'["1", "2", "3"]', - "RUNNINGSTATUS": "2"}) - def test_delete_smartqos_with_lun_left(self, mock_qos_info, pool_data): - self.driver.support_func = pool_data - self.driver.delete_volume(self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'get_qos_info', - return_value={"LUNLIST": u'["1"]', - "RUNNINGSTATUS": "2"}) - def test_delete_smartqos_with_no_lun_left(self, mock_qos_info, pool_data): - self.driver.support_func = pool_data - self.driver.delete_volume(self.volume) - - @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - def test_create_smartx(self, mock_volume_types, mock_add_lun_to_partition): - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @ddt.data([{'smarttier': 'true', 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', 'cachename': None, - 'partitionname': 'partition-test'}, - FAKE_POOLS_UNSUPPORT_REPORT], - [{'smarttier': 'true', 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', 'cachename': 'cache-test', - 'partitionname': None}, - FAKE_POOLS_SUPPORT_REPORT], - [{'smarttier': 'true', 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', 'cachename': None, - 'partitionname': 'partition-test'}, - FAKE_POOLS_SUPPORT_REPORT], - [{'smarttier': 'true', 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', 'cachename': 'cache-test', - 'partitionname': None}, - FAKE_POOLS_UNSUPPORT_REPORT]) - @ddt.unpack - def test_create_smartCache_failed(self, opts, pool_data): - self.driver.support_func = pool_data - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_params', - return_value=opts) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - def test_create_smartCache_failed_with_no_cacheid(self, - mock_volume_type, - pool_data): - self.driver.client.cache_not_exist = True - self.driver.support_func = pool_data - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'smarttier': 'true', - 'smartcache': 'true', - 'smartpartition': 'true', - 'thin_provisioning_support': 'true', - 'thick_provisioning_support': 'false', - 'policy': '2', - 'cachename': 'cache-test', - 'partitionname': 'partition-test'}) - def test_create_smartPartition_failed_with_no_partid(self, - mock_volume_type, - pool_data): - self.driver.client.partition_not_exist = True - self.driver.support_func = pool_data - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - def test_find_available_qos(self): - - qos = {'MAXIOPS': '100', 'IOType': '2'} - fake_qos_info_response_equal = { - "error": { - "code": 0 - }, - "data": [{ - "ID": "11", - "MAXIOPS": "100", - "LATENCY": "0", - "IOType": "2", - "FSLIST": u'[""]', - 'RUNNINGSTATUS': "2", - "NAME": "OpenStack_57_20151225102851", - "LUNLIST": u'["1", "2", "3", "4", "5", "6", "7", "8", "9",\ - "10", ,"11", "12", "13", "14", "15", "16", "17", "18", "19",\ - "20", ,"21", "22", "23", "24", "25", "26", "27", "28", "29",\ - "30", ,"31", "32", "33", "34", "35", "36", "37", "38", "39",\ - "40", ,"41", "42", "43", "44", "45", "46", "47", "48", "49",\ - "50", ,"51", "52", "53", "54", "55", "56", "57", "58", "59",\ - "60", ,"61", "62", "63", "64"]' - }] - } - # Number of LUNs in QoS is equal to 64 - with mock.patch.object(rest_client.RestClient, 'get_qos', - return_value=fake_qos_info_response_equal): - (qos_id, lun_list) = self.driver.client.find_available_qos(qos) - self.assertEqual((None, []), (qos_id, lun_list)) - - # Number of LUNs in QoS is less than 64 - fake_qos_info_response_less = { - "error": { - "code": 0 - }, - "data": [{ - "ID": "11", - "MAXIOPS": "100", - "LATENCY": "0", - "IOType": "2", - "FSLIST": u'[""]', - 'RUNNINGSTATUS': "2", - "NAME": "OpenStack_57_20151225102851", - "LUNLIST": u'["0", "1", "2"]' - }] - } - with mock.patch.object(rest_client.RestClient, 'get_qos', - return_value=fake_qos_info_response_less): - (qos_id, lun_list) = self.driver.client.find_available_qos(qos) - self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list)) - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value=fake_hypermetro_opts) - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value=FAKE_FIND_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', - return_value='11') - @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', - return_value=True) - def test_create_hypermetro_success(self, - mock_volume_ready, - mock_hyper_domain, - mock_pool_info, - mock_all_pool_info, - mock_login_return): - location = {"huawei_lun_id": "1", - "hypermetro_id": "11", - "remote_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - lun_info = self.driver.create_volume(self.hyper_volume) - self.assertDictEqual(location, - json.loads(lun_info['provider_location'])) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value=fake_hypermetro_opts) - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value=FAKE_FIND_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', - return_value='11') - @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', - return_value=True) - @mock.patch.object(hypermetro.HuaweiHyperMetro, - '_create_hypermetro_pair') - @mock.patch.object(rest_client.RestClient, 'delete_lun') - def test_create_hypermetro_fail(self, - pool_data, - mock_delete_lun, - mock_hyper_pair_info, - mock_volume_ready, - mock_hyper_domain, - mock_pool_info, - mock_all_pool_info, - mock_hypermetro_opts - ): - self.driver.client.login() - self.driver.support_func = pool_data - mock_hyper_pair_info.side_effect = exception.VolumeBackendAPIException( - data='Create hypermetro error.') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.hyper_volume) - mock_delete_lun.assert_called_with('1') - - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value={}) - def test_create_hypermetro_remote_pool_none_fail(self, - mock_pool_info, - mock_all_pool_info): - param = {'TYPE': '11', - 'PARENTID': ''} - self.driver.client.login() - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.metro.create_hypermetro, - '2', param) - - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value=FAKE_FIND_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'create_lun', - return_value={'CAPACITY': '2097152', - 'DESCRIPTION': '2f0635', - 'HEALTHSTATUS': '1', - 'ALLOCTYPE': '1', - 'WWN': '6643e8c1004c5f6723e9f454003', - 'ID': '1', - 'RUNNINGSTATUS': '27', - 'NAME': '5mFHcBv4RkCcD'}) - @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', - return_value='11') - @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', - return_value=True) - def test_create_hypermetro_remote_pool_parentid(self, - mock_volume_ready, - mock_hyper_domain, - mock_create_lun, - mock_pool_info, - mock_all_pool_info): - param = {'TYPE': '11', - 'PARENTID': ''} - self.driver.metro.create_hypermetro('2', param) - lun_PARENTID = mock_create_lun.call_args[0][0]['PARENTID'] - self.assertEqual(FAKE_FIND_POOL_RESPONSE['ID'], lun_PARENTID) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'check_lun_exist', - return_value=True) - @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', - return_value=True) - @mock.patch.object(rest_client.RestClient, 'delete_hypermetro', - return_value=FAKE_COMMON_SUCCESS_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'delete_lun', - return_value=None) - def test_delete_hypermetro_success(self, - mock_delete_lun, - mock_delete_hypermetro, - mock_check_hyermetro, - mock_lun_exit, - pool_data): - self.driver.support_func = pool_data - self.driver.delete_volume(self.hyper_volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'check_lun_exist', - return_value=True) - @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', - return_value=True) - @mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id', - return_value=FAKE_METRO_INFO_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'delete_hypermetro') - @mock.patch.object(rest_client.RestClient, 'delete_lun', - return_value=None) - def test_delete_hypermetro_fail(self, - pool_data, - mock_delete_lun, - mock_delete_hypermetro, - mock_metro_info, - mock_check_hyermetro, - mock_lun_exit): - self.driver.support_func = pool_data - mock_delete_hypermetro.side_effect = ( - exception.VolumeBackendAPIException(data='Delete hypermetro ' - 'error.')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, self.hyper_volume) - mock_delete_lun.assert_called_with('11') - - def test_manage_existing_get_size_invalid_reference(self): - # Can't find LUN by source-name. - external_ref = {'source-name': 'LUN1'} - with mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value=None): - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - self.volume, external_ref) - self.assertIsNotNone(re.search('please check the source-name ' - 'or source-id', ex.msg)) - - # Can't find LUN by source-id. - external_ref = {'source-id': 'ID1'} - with mock.patch.object(rest_client.RestClient, 'get_lun_info') as m_gt: - m_gt.side_effect = exception.VolumeBackendAPIException( - data='Error') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, - self.volume, external_ref) - self.assertIsNotNone(re.search('please check the source-name ' - 'or source-id', ex.msg)) - - @ddt.data({'source-id': 'ID1'}, {'source-name': 'LUN1'}, - {'source-name': 'LUN1', 'source-id': 'ID1'}) - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 3097152}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_get_size_success(self, mock_get_lun_id_by_name, - mock_get_lun_info, - external_ref): - size = self.driver.manage_existing_get_size(self.volume, - external_ref) - self.assertEqual(2, size) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool'}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_pool_mismatch(self, mock_get_by_name, - mock_get_info): - # LUN does not belong to the specified pool. - with mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_lun_info_by_ref', - return_value={'PARENTNAME': 'StoragePool'}): - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - self.assertIsNotNone(re.search('The specified LUN does not belong' - ' to the given pool', ex.msg)) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool'}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_lun_abnormal(self, mock_get_by_name, - mock_get_info): - - # Status is not normal. - ret = {'PARENTNAME': "OpenStack_Pool", - 'HEALTHSTATUS': '2'} - with mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_lun_info_by_ref', - return_value=ret): - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - self.assertIsNotNone(re.search('LUN status is not normal', ex.msg)) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs', - return_value=[{'LOCALOBJID': 'ID1'}]) - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool', - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_with_hypermetro(self, mock_get_by_name, - mock_get_info, - mock_get_hyper_pairs, - pool_data): - self.driver.support_func = pool_data - # Exists in a HyperMetroPair. - with mock.patch.object(rest_client.RestClient, - 'get_hypermetro_pairs', - return_value=[{'LOCALOBJID': 'ID1'}]): - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - self.assertIsNotNone(re.search('HyperMetroPair', ex.msg)) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs') - @mock.patch.object(rest_client.RestClient, 'rename_lun') - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool', - 'HEALTHSTATUS': constants.STATUS_HEALTH, - 'WWN': '6643e8c1004c5f6723e9f454003'}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_with_lower_version(self, pool_data, - mock_get_by_name, - mock_get_info, mock_rename, - mock_get_hyper_pairs): - self.driver.support_func = pool_data - mock_get_hyper_pairs.side_effect = ( - exception.VolumeBackendAPIException(data='err')) - external_ref = {'source-name': 'LUN1'} - model_update = self.driver.manage_existing(self.volume, - external_ref) - location = {"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", - "huawei_lun_id": "ID1"} - self.assertDictEqual(location, - json.loads(model_update['provider_location'])) - - @ddt.data([[{'PRILUNID': 'ID1'}], []], - [[{'PRILUNID': 'ID2'}], ['ID1', 'ID2']]) - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool', - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_with_splitmirror(self, ddt_data, - mock_get_by_name, - mock_get_info): - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - # Exists in a SplitMirror. - with mock.patch.object(rest_client.RestClient, 'get_split_mirrors', - return_value=ddt_data[0]), \ - mock.patch.object(rest_client.RestClient, 'get_target_luns', - return_value=ddt_data[1]): - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - - self.assertIsNotNone(re.search('SplitMirror', ex.msg)) - - @ddt.data([[{'PARENTID': 'ID1'}], FAKE_POOLS_UNSUPPORT_REPORT], - [[{'TARGETLUNID': 'ID1'}], FAKE_POOLS_UNSUPPORT_REPORT], - [[{'PARENTID': 'ID1'}], FAKE_POOLS_SUPPORT_REPORT], - [[{'TARGETLUNID': 'ID1'}], FAKE_POOLS_SUPPORT_REPORT]) - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool', - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - @ddt.unpack - def test_manage_existing_under_migration(self, ddt_data, pool_data, - mock_get_by_name, - mock_get_info): - self.driver.support_func = pool_data - # Exists in a migration task. - with mock.patch.object(rest_client.RestClient, 'get_migration_task', - return_value=ddt_data): - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - self.assertIsNotNone(re.search('migration', ex.msg)) - - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ID': 'ID1', - 'PARENTNAME': 'OpenStack_Pool', - 'SNAPSHOTIDS': [], - 'ISADD2LUNGROUP': 'true', - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - def test_manage_existing_with_lungroup(self, mock_get_by_name, - mock_get_info): - # Already in LUN group. - - external_ref = {'source-name': 'LUN1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - self.volume, external_ref) - self.assertIsNotNone(re.search('Already exists in a LUN group', - ex.msg)) - - @ddt.data([{'source-name': 'LUN1'}, FAKE_POOLS_UNSUPPORT_REPORT], - [{'source-name': 'LUN1'}, FAKE_POOLS_SUPPORT_REPORT], - [{'source-id': 'ID1'}, FAKE_POOLS_UNSUPPORT_REPORT], - [{'source-id': 'ID1'}, FAKE_POOLS_SUPPORT_REPORT]) - @mock.patch.object(rest_client.RestClient, 'rename_lun') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_lun_info_by_ref', - return_value={'PARENTNAME': 'OpenStack_Pool', - 'SNAPSHOTIDS': [], - 'ID': 'ID1', - 'HEALTHSTATUS': constants.STATUS_HEALTH, - 'WWN': '6643e8c1004c5f6723e9f454003'}) - @mock.patch.object(rest_client.RestClient, 'get_lun_info', - return_value={'CAPACITY': 2097152, - 'ALLOCTYPE': 1}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value='ID1') - @ddt.unpack - def test_manage_existing_success(self, mock_get_by_name, mock_get_info, - mock_check_lun, mock_rename, - external_ref, pool_data): - self.driver.support_func = pool_data - model_update = self.driver.manage_existing(self.volume, - external_ref) - expected = {"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", - "huawei_lun_id": "ID1"} - self.assertDictEqual(expected, - json.loads(model_update['provider_location'])) - - def test_unmanage(self): - self.driver.unmanage(self.volume) - - def test_manage_existing_snapshot_abnormal(self): - with mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_snapshot_info_by_ref', - return_value={'HEALTHSTATUS': '2', - 'PARENTID': '11'}): - external_ref = {'source-name': 'test1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.snapshot, external_ref) - self.assertIsNotNone(re.search('Snapshot status is not normal', - ex.msg)) - - @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', - return_value={'ID': 'ID1', - 'EXPOSEDTOINITIATOR': 'true', - 'NAME': 'test1', - 'PARENTID': '11', - 'USERCAPACITY': 2097152, - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', - return_value='ID1') - def test_manage_existing_snapshot_with_lungroup(self, mock_get_by_name, - mock_get_info): - # Already in LUN group. - external_ref = {'source-name': 'test1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.snapshot, external_ref) - self.assertIsNotNone(re.search('Snapshot is exposed to initiator', - ex.msg)) - - @mock.patch.object(rest_client.RestClient, 'rename_snapshot') - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_snapshot_info_by_ref', - return_value={'ID': 'ID1', - 'EXPOSEDTOINITIATOR': 'false', - 'NAME': 'test1', - 'PARENTID': '11', - 'USERCAPACITY': 2097152, - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - def test_manage_existing_snapshot_success(self, mock_get_info, - mock_rename): - external_ref = {'source-name': 'test1'} - model_update = self.driver.manage_existing_snapshot(self.snapshot, - external_ref) - expect_value = {'provider_location': '{"huawei_snapshot_id": "ID1"}'} - self.assertEqual(expect_value, model_update) - - external_ref = {'source-id': 'ID1'} - model_update = self.driver.manage_existing_snapshot(self.snapshot, - external_ref) - expect_value = {'provider_location': '{"huawei_snapshot_id": "ID1"}'} - self.assertEqual(expect_value, model_update) - - @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', - return_value={'ID': 'ID1', - 'EXPOSEDTOINITIATOR': 'false', - 'NAME': 'test1', - 'USERCAPACITY': 2097152, - 'PARENTID': '12', - 'HEALTHSTATUS': constants.STATUS_HEALTH}) - @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', - return_value='ID1') - def test_manage_existing_snapshot_mismatch_lun(self, mock_get_by_name, - mock_get_info): - external_ref = {'source-name': 'test1'} - ex = self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - self.snapshot, external_ref) - self.assertIsNotNone(re.search("Snapshot doesn't belong to volume", - ex.msg)) - - @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', - return_value={'USERCAPACITY': 3097152}) - @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', - return_value='ID1') - def test_manage_existing_snapshot_get_size_success(self, - mock_get_id_by_name, - mock_get_info): - external_ref = {'source-name': 'test1', - 'source-id': 'ID1'} - size = self.driver.manage_existing_snapshot_get_size(self.snapshot, - external_ref) - self.assertEqual(2, size) - - external_ref = {'source-name': 'test1'} - size = self.driver.manage_existing_snapshot_get_size(self.snapshot, - external_ref) - self.assertEqual(2, size) - - external_ref = {'source-id': 'ID1'} - size = self.driver.manage_existing_snapshot_get_size(self.snapshot, - external_ref) - self.assertEqual(2, size) - - def test_unmanage_snapshot(self): - self.driver.unmanage_snapshot(self.snapshot) - - @ddt.data(sync_replica_specs, async_replica_specs) - def test_create_replication_success(self, mock_type): - self.mock_object(replication.ReplicaCommonDriver, 'sync') - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': mock_type}) - - model_update = self.driver.create_volume(self.replica_volume) - driver_data = {'pair_id': TEST_PAIR_ID, - 'rmt_lun_id': '1', - 'rmt_lun_wwn': '6643e8c1004c5f6723e9f454003'} - self.assertDictEqual( - driver_data, json.loads(model_update['replication_driver_data'])) - self.assertEqual('available', model_update['replication_status']) - - @ddt.data( - [ - rest_client.RestClient, - 'get_array_info', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'get_remote_devices', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'get_remote_devices', - mock.Mock(return_value={}), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - replication.ReplicaPairManager, - 'wait_volume_online', - mock.Mock(side_effect=[ - None, - exception.VolumeBackendAPIException(data='err')]), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'create_pair', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - replication.ReplicaCommonDriver, - 'sync', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'get_array_info', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'get_remote_devices', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'get_remote_devices', - mock.Mock(return_value={}), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - replication.ReplicaPairManager, - 'wait_volume_online', - mock.Mock(side_effect=[ - None, - exception.VolumeBackendAPIException(data='err')]), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - rest_client.RestClient, - 'create_pair', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - replication.ReplicaCommonDriver, - 'sync', - mock.Mock( - side_effect=exception.VolumeBackendAPIException(data='err')), - FAKE_POOLS_SUPPORT_REPORT - ], - ) - @ddt.unpack - def test_create_replication_fail(self, mock_module, mock_func, - mock_value, pool_data): - self.driver.support_func = pool_data - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - self.mock_object(replication.ReplicaPairManager, '_delete_pair') - self.mock_object(mock_module, mock_func, mock_value) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, self.replica_volume) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_delete_replication_success(self, pool_data): - self.driver.support_func = pool_data - self.mock_object(replication.ReplicaCommonDriver, 'split') - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - self.driver.delete_volume(self.replica_volume) - - self.mock_object(rest_client.RestClient, 'check_lun_exist', - return_value=False) - self.driver.delete_volume(self.replica_volume) - - def test_wait_volume_online(self): - replica = FakeReplicaPairManager(self.driver.client, - self.driver.replica_client, - self.configuration) - lun_info = {'ID': '11'} - - replica.wait_volume_online(self.driver.client, lun_info) - - offline_status = {'RUNNINGSTATUS': '28'} - replica.wait_volume_online(self.driver.client, lun_info) - - with mock.patch.object(rest_client.RestClient, 'get_lun_info', - offline_status): - self.assertRaises(exception.VolumeBackendAPIException, - replica.wait_volume_online, - self.driver.client, - lun_info) - - def test_wait_second_access(self): - pair_id = '1' - access_ro = constants.REPLICA_SECOND_RO - access_rw = constants.REPLICA_SECOND_RW - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - self.mock_object(replication.PairOp, 'get_replica_info', - return_value={'SECRESACCESS': access_ro}) - self.mock_object(huawei_utils.time, 'time', - side_effect=utils.generate_timeout_series( - constants.DEFAULT_REPLICA_WAIT_TIMEOUT)) - - common_driver.wait_second_access(pair_id, access_ro) - self.assertRaises(exception.VolumeBackendAPIException, - common_driver.wait_second_access, pair_id, access_rw) - - def test_wait_replica_ready(self): - normal_status = { - 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, - 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL - } - split_status = { - 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SPLIT, - 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL - } - sync_status = { - 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SYNC, - 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL - } - pair_id = '1' - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - with mock.patch.object(replication.PairOp, 'get_replica_info', - return_value=normal_status): - common_driver.wait_replica_ready(pair_id) - - with mock.patch.object( - replication.PairOp, - 'get_replica_info', - side_effect=[sync_status, normal_status]): - common_driver.wait_replica_ready(pair_id) - - with mock.patch.object(replication.PairOp, 'get_replica_info', - return_value=split_status): - self.assertRaises(exception.VolumeBackendAPIException, - common_driver.wait_replica_ready, pair_id) - - def test_failover_to_current(self): - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [self.volume], 'default', []) - self.assertIn(driver.active_backend_id, ('', None)) - self.assertEqual(old_client, driver.client) - self.assertEqual(old_replica_client, driver.replica_client) - self.assertEqual(old_replica, driver.replica) - self.assertEqual('default', secondary_id) - self.assertEqual(0, len(volumes_update)) - - def test_failover_normal_volumes(self): - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [self.volume], REPLICA_BACKEND_ID, []) - self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual(REPLICA_BACKEND_ID, secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(self.volume.id, v_id) - self.assertEqual('error', v_update['status']) - self.assertEqual(self.volume['status'], - v_update['metadata']['old_status']) - - def test_failback_to_current(self): - driver = FakeISCSIStorage(configuration=self.configuration) - driver.active_backend_id = REPLICA_BACKEND_ID - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [self.volume], REPLICA_BACKEND_ID, []) - self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) - self.assertEqual(old_client, driver.client) - self.assertEqual(old_replica_client, driver.replica_client) - self.assertEqual(old_replica, driver.replica) - self.assertEqual(REPLICA_BACKEND_ID, secondary_id) - self.assertEqual(0, len(volumes_update)) - - def test_failback_normal_volumes(self): - self.volume.status = 'error' - self.volume.metadata = {'old_status': 'available'} - - driver = FakeISCSIStorage(configuration=self.configuration) - driver.active_backend_id = REPLICA_BACKEND_ID - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [self.volume], 'default', []) - self.assertIn(driver.active_backend_id, ('', None)) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual('default', secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(self.volume.id, v_id) - self.assertEqual('available', v_update['status']) - self.assertNotIn('old_status', v_update['metadata']) - - def test_failover_replica_volumes(self): - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - self.mock_object(replication.ReplicaCommonDriver, 'failover') - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'replication_enabled': 'true'}) - secondary_id, volumes_update, __ = driver.failover_host( - None, [self.replica_volume], REPLICA_BACKEND_ID, []) - self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual(REPLICA_BACKEND_ID, secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(self.replica_volume.id, v_id) - - expect_location = {"huawei_lun_wwn": "FAKE_RMT_LUN_WWN", - "huawei_lun_id": "1"} - self.assertDictEqual( - expect_location, json.loads(v_update['provider_location'])) - self.assertEqual('failed-over', v_update['replication_status']) - - metadata = huawei_utils.get_lun_metadata(self.replica_volume) - new_drv_data = {'pair_id': TEST_PAIR_ID, - 'rmt_lun_id': metadata['huawei_lun_id'], - 'rmt_lun_wwn': metadata['huawei_lun_wwn']} - self.assertDictEqual( - new_drv_data, json.loads(v_update['replication_driver_data'])) - - @ddt.data({}, {'pair_id': TEST_PAIR_ID}) - def test_failover_replica_volumes_invalid_drv_data(self, mock_drv_data): - volume = self.replica_volume - volume['replication_driver_data'] = replication.to_string( - mock_drv_data) - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'replication_enabled': 'true'}) - secondary_id, volumes_update, __ = driver.failover_host( - None, [volume], REPLICA_BACKEND_ID, []) - self.assertEqual(driver.active_backend_id, REPLICA_BACKEND_ID) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual(REPLICA_BACKEND_ID, secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(volume.id, v_id) - self.assertEqual('error', v_update['replication_status']) - - def test_failback_replica_volumes(self): - self.mock_object(replication.ReplicaCommonDriver, 'enable') - self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready') - self.mock_object(replication.ReplicaCommonDriver, 'failover') - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'replication_enabled': 'true'}) - - volume = self.replica_volume - - driver = FakeISCSIStorage(configuration=self.configuration) - driver.active_backend_id = REPLICA_BACKEND_ID - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [volume], 'default', []) - self.assertIn(driver.active_backend_id, ('', None)) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual('default', secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(self.replica_volume.id, v_id) - - expect_location = {"huawei_lun_wwn": "FAKE_RMT_LUN_WWN", - "huawei_lun_id": "1"} - self.assertDictEqual( - expect_location, json.loads(v_update['provider_location'])) - self.assertEqual('available', v_update['replication_status']) - - metadata = huawei_utils.get_lun_metadata(self.replica_volume) - new_drv_data = {'pair_id': TEST_PAIR_ID, - 'rmt_lun_id': metadata['huawei_lun_id'], - 'rmt_lun_wwn': metadata['huawei_lun_wwn']} - self.assertDictEqual( - new_drv_data, json.loads(v_update['replication_driver_data'])) - - @ddt.data({}, {'pair_id': TEST_PAIR_ID}) - def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data): - self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value={'replication_enabled': 'true'}) - - volume = self.replica_volume - volume['replication_driver_data'] = replication.to_string( - mock_drv_data) - - driver = FakeISCSIStorage(configuration=self.configuration) - driver.active_backend_id = REPLICA_BACKEND_ID - driver.do_setup() - old_client = driver.client - old_replica_client = driver.replica_client - old_replica = driver.replica - secondary_id, volumes_update, __ = driver.failover_host( - None, [volume], 'default', []) - self.assertIn(driver.active_backend_id, ('', None)) - self.assertEqual(old_client, driver.replica_client) - self.assertEqual(old_replica_client, driver.client) - self.assertNotEqual(old_replica, driver.replica) - self.assertEqual('default', secondary_id) - self.assertEqual(1, len(volumes_update)) - v_id = volumes_update[0]['volume_id'] - v_update = volumes_update[0]['updates'] - self.assertEqual(self.replica_volume.id, v_id) - self.assertEqual('error', v_update['replication_status']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(replication.PairOp, 'is_primary', - side_effect=[False, True]) - @mock.patch.object(replication.ReplicaCommonDriver, 'split') - @mock.patch.object(replication.ReplicaCommonDriver, 'unprotect_second') - def test_replication_driver_enable_success(self, - mock_unprotect, - mock_split, - mock_is_primary): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - common_driver.enable(replica_id) - self.assertTrue(mock_unprotect.called) - self.assertTrue(mock_split.called) - self.assertTrue(mock_is_primary.called) - - @mock.patch.object(replication.PairOp, 'is_primary', return_value=False) - @mock.patch.object(replication.ReplicaCommonDriver, 'split') - def test_replication_driver_failover_success(self, - mock_split, - mock_is_primary): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - common_driver.failover(replica_id) - self.assertTrue(mock_split.called) - self.assertTrue(mock_is_primary.called) - - @mock.patch.object(replication.PairOp, 'is_primary', return_value=True) - def test_replication_driver_failover_fail(self, mock_is_primary): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - self.assertRaises( - exception.VolumeBackendAPIException, - common_driver.failover, - replica_id) - - @ddt.data(constants.REPLICA_SECOND_RW, constants.REPLICA_SECOND_RO) - def test_replication_driver_protect_second(self, mock_access): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - - self.mock_object(replication.ReplicaCommonDriver, 'wait_second_access') - self.mock_object( - replication.PairOp, - 'get_replica_info', - return_value={'SECRESACCESS': mock_access}) - - common_driver.protect_second(replica_id) - common_driver.unprotect_second(replica_id) - - def test_replication_driver_sync(self): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - async_normal_status = { - 'REPLICATIONMODEL': constants.REPLICA_ASYNC_MODEL, - 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, - 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL - } - - self.mock_object(replication.ReplicaCommonDriver, 'protect_second') - self.mock_object(replication.PairOp, 'get_replica_info', - return_value=async_normal_status) - common_driver.sync(replica_id, True) - common_driver.sync(replica_id, False) - - def test_replication_driver_split(self): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - - self.mock_object(replication.ReplicaCommonDriver, 'wait_expect_state') - self.mock_object( - replication.PairOp, 'split', - side_effect=exception.VolumeBackendAPIException(data='err')) - common_driver.split(replica_id) - - @mock.patch.object(replication.PairOp, 'split') - @ddt.data(constants.REPLICA_RUNNING_STATUS_SPLIT, - constants.REPLICA_RUNNING_STATUS_INVALID, - constants.REPLICA_RUNNING_STATUS_ERRUPTED) - def test_replication_driver_split_already_disabled(self, mock_status, - mock_op_split): - replica_id = TEST_PAIR_ID - op = replication.PairOp(self.driver.client) - common_driver = replication.ReplicaCommonDriver(self.configuration, op) - - pair_info = json.loads(FAKE_GET_PAIR_NORMAL_RESPONSE)['data'] - pair_info['RUNNINGSTATUS'] = mock_status - self.mock_object(rest_client.RestClient, 'get_pair_by_id', - return_value=pair_info) - common_driver.split(replica_id) - self.assertFalse(mock_op_split.called) - - def test_replication_base_op(self): - replica_id = '1' - op = replication.AbsReplicaOp(None) - op.create() - op.delete(replica_id) - op.protect_second(replica_id) - op.unprotect_second(replica_id) - op.sync(replica_id) - op.split(replica_id) - op.switch(replica_id) - op.is_primary({}) - op.get_replica_info(replica_id) - op._is_status(None, {'key': 'volue'}, None) - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"error": {"code": 0}}) - def test_get_tgt_port_group_no_portg_exist(self, mock_call): - portg = self.driver.client.get_tgt_port_group('test_portg') - self.assertIsNone(portg) - - def test_get_tgt_iqn_from_rest_match(self): - match_res = { - 'data': [{ - 'TYPE': 249, - 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.19,t,0x01' - }, { - 'TYPE': 249, - 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.191,t,0x01' - }], - 'error': { - 'code': 0 - } - } - ip = '111.111.111.19' - expected_iqn = 'iqn.2006-08.com: 210048cee9d: 111.111.111.19' - self.mock_object(rest_client.RestClient, 'call', - return_value=match_res) - iqn = self.driver.client._get_tgt_iqn_from_rest(ip) - self.assertEqual(expected_iqn, iqn) - - def test_get_tgt_iqn_from_rest_mismatch(self): - match_res = { - 'data': [{ - 'TYPE': 249, - 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.191,t,0x01' - }, { - 'TYPE': 249, - 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.192,t,0x01' - }], - 'error': { - 'code': 0 - } - } - ip = '192.0.2.19' - self.mock_object(rest_client.RestClient, 'call', - return_value=match_res) - iqn = self.driver.client._get_tgt_iqn_from_rest(ip) - self.assertIsNone(iqn) - - @cg_or_cg_snapshot - def test_create_group_snapshot(self): - test_snapshots = [self.snapshot] - ctxt = context.get_admin_context() - model, snapshots = ( - self.driver.create_group_snapshot(ctxt, self.group_snapshot, - test_snapshots)) - - self.assertEqual('21ec7341-9256-497b-97d9-ef48edcf0635', - snapshots[0]['id']) - self.assertEqual('available', snapshots[0]['status']) - self.assertDictEqual({'huawei_snapshot_id': '11'}, - json.loads(snapshots[0]['provider_location'])) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model['status']) - - @cg_or_cg_snapshot - def test_create_group_snapshot_with_create_snapshot_fail(self): - test_snapshots = [self.snapshot] - ctxt = context.get_admin_context() - self.mock_object(rest_client.RestClient, 'create_snapshot', - side_effect=( - exception.VolumeBackendAPIException(data='err'))) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, - ctxt, - self.group_snapshot, - test_snapshots) - - @cg_or_cg_snapshot - def test_create_group_snapshot_with_active_snapshot_fail(self): - test_snapshots = [self.snapshot] - ctxt = context.get_admin_context() - self.mock_object(rest_client.RestClient, 'activate_snapshot', - side_effect=( - exception.VolumeBackendAPIException(data='err'))) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, - ctxt, - self.group_snapshot, - test_snapshots) - - @cg_or_cg_snapshot - def test_delete_group_snapshot(self): - test_snapshots = [self.snapshot] - ctxt = context.get_admin_context() - self.driver.delete_group_snapshot(ctxt, self.group_snapshot, - test_snapshots) - - -class FCSanLookupService(object): - - def get_device_mapping_from_network(self, initiator_list, - target_list): - return fake_fabric_mapping - - -@ddt.ddt -class HuaweiFCDriverTestCase(HuaweiTestBase): - - def setUp(self): - super(HuaweiFCDriverTestCase, self).setUp() - self.configuration = mock.Mock(spec=conf.Configuration) - self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') - self.huawei_conf = FakeHuaweiConf(self.configuration, 'FC') - self.configuration.hypermetro_devices = hypermetro_devices - driver = FakeFCStorage(configuration=self.configuration) - self.driver = driver - self.driver.do_setup() - self.driver.client.login() - - def test_login_success(self): - device_id = self.driver.client.login() - self.assertEqual('210235G7J20000000000', device_id) - - def test_create_volume_success(self): - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_delete_volume_success(self, pool_data): - self.driver.support_func = pool_data - self.driver.delete_volume(self.volume) - - def test_delete_snapshot_success(self): - self.driver.delete_snapshot(self.snapshot) - - def test_create_volume_from_snapsuccess(self): - lun_info = self.driver.create_volume_from_snapshot(self.volume, - self.snapshot) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - @mock.patch.object(huawei_driver.HuaweiFCDriver, - 'initialize_connection', - return_value={"data": {'target_lun': 1}}) - def test_initialize_connection_snapshot_success(self, mock_fc_init): - iscsi_properties = self.driver.initialize_connection_snapshot( - self.snapshot, FakeConnector) - volume = Volume(id=self.snapshot.id, - provider_location=self.snapshot.provider_location, - lun_type='27', - metadata=None) - self.assertEqual(1, iscsi_properties['data']['target_lun']) - mock_fc_init.assert_called_with(volume, FakeConnector) - - def test_initialize_connection_success(self): - do_mapping_mocker = self.mock_object( - self.driver.client, 'do_mapping', - wraps=self.driver.client.do_mapping) - iscsi_properties = self.driver.initialize_connection(self.volume, - FakeConnector) - self.assertEqual(1, iscsi_properties['data']['target_lun']) - do_mapping_mocker.assert_called_once_with( - '11', '0', '1', None, '11', False) - - def test_initialize_connection_fail_no_online_wwns_in_host(self): - self.mock_object(rest_client.RestClient, 'get_online_free_wwns', - return_value=[]) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.volume, FakeConnector) - - def test_initialize_connection_no_local_ini_tgt_map(self): - self.mock_object(rest_client.RestClient, 'get_init_targ_map', - return_value=('', '')) - self.mock_object(huawei_driver.HuaweiFCDriver, '_get_same_hostid', - return_value='') - self.mock_object(rest_client.RestClient, 'change_hostlun_id', - return_value=None) - self.mock_object(rest_client.RestClient, 'do_mapping', - return_value={'lun_id': '1', - 'view_id': '1', - 'aval_luns': '[1]'}) - - self.driver.initialize_connection(self.hyper_volume, FakeConnector) - - def test_hypermetro_connection_success(self): - self.mock_object(rest_client.RestClient, 'find_array_version', - return_value='V300R003C00') - fc_properties = self.driver.initialize_connection(self.hyper_volume, - FakeConnector) - self.assertEqual(1, fc_properties['data']['target_lun']) - - @mock.patch.object(huawei_driver.HuaweiFCDriver, - 'terminate_connection') - def test_terminate_connection_snapshot_success(self, mock_fc_term): - self.driver.terminate_connection_snapshot(self.snapshot, - FakeConnector) - volume = Volume(id=self.snapshot.id, - provider_location=self.snapshot.provider_location, - lun_type='27', - metadata=None) - mock_fc_term.assert_called_with(volume, FakeConnector) - - def test_terminate_connection_success(self): - self.driver.client.terminateFlag = True - self.driver.terminate_connection(self.volume, FakeConnector) - self.assertTrue(self.driver.client.terminateFlag) - - def test_terminate_connection_portgroup_associated(self): - self.mock_object(rest_client.RestClient, - 'is_portgroup_associated_to_view', - return_value=True) - self.mock_object(huawei_driver.HuaweiFCDriver, - '_delete_zone_and_remove_fc_initiators', - return_value=({}, 1)) - self.driver.terminate_connection(self.volume, FakeConnector) - - def test_terminate_connection_fc_initiators_exist_in_host(self): - self.mock_object(rest_client.RestClient, - 'check_fc_initiators_exist_in_host', - return_value=True) - self.driver.terminate_connection(self.volume, FakeConnector) - - def test_terminate_connection_hypermetro_in_metadata(self): - self.driver.terminate_connection(self.hyper_volume, FakeConnector) - - def test_get_volume_status(self): - remote_device_info = {"ARRAYTYPE": "1", - "HEALTHSTATUS": "1", - "RUNNINGSTATUS": "10"} - self.mock_object( - replication.ReplicaPairManager, - 'get_remote_device_by_wwn', - return_value=remote_device_info) - data = self.driver.get_volume_stats() - self.assertEqual(self.driver.VERSION, data['driver_version']) - self.assertTrue(data['pools'][0]['replication_enabled']) - self.assertListEqual(['sync', 'async'], - data['pools'][0]['replication_type']) - - self.mock_object( - replication.ReplicaPairManager, - 'get_remote_device_by_wwn', - return_value={}) - data = self.driver.get_volume_stats() - self.assertNotIn('replication_enabled', data['pools'][0]) - - self.mock_object( - replication.ReplicaPairManager, - 'try_get_remote_wwn', - return_value={}) - data = self.driver.get_volume_stats() - self.assertEqual(self.driver.VERSION, data['driver_version']) - self.assertNotIn('replication_enabled', data['pools'][0]) - - @ddt.data({'TIER0CAPACITY': '100', - 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '0', - 'disktype': 'ssd'}, - {'TIER0CAPACITY': '0', - 'TIER1CAPACITY': '100', - 'TIER2CAPACITY': '0', - 'disktype': 'sas'}, - {'TIER0CAPACITY': '0', - 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '100', - 'disktype': 'nl_sas'}, - {'TIER0CAPACITY': '100', - 'TIER1CAPACITY': '100', - 'TIER2CAPACITY': '100', - 'disktype': 'mix'}, - {'TIER0CAPACITY': '0', - 'TIER1CAPACITY': '0', - 'TIER2CAPACITY': '0', - 'disktype': ''}) - def test_get_volume_disk_type(self, disk_type_value): - response_dict = json.loads(FAKE_STORAGE_POOL_RESPONSE) - storage_pool_sas = copy.deepcopy(response_dict) - storage_pool_sas['data'][0]['TIER0CAPACITY'] = ( - disk_type_value['TIER0CAPACITY']) - storage_pool_sas['data'][0]['TIER1CAPACITY'] = ( - disk_type_value['TIER1CAPACITY']) - storage_pool_sas['data'][0]['TIER2CAPACITY'] = ( - disk_type_value['TIER2CAPACITY']) - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - driver.replica = None - - self.mock_object(rest_client.RestClient, 'get_all_pools', - return_value=storage_pool_sas['data']) - data = driver.get_volume_stats() - if disk_type_value['disktype']: - self.assertEqual(disk_type_value['disktype'], - data['pools'][0]['disk_type']) - else: - self.assertIsNone(data['pools'][0].get('disk_type')) - - def test_get_disk_type_pool_info_none(self): - driver = FakeISCSIStorage(configuration=self.configuration) - driver.do_setup() - driver.replica = None - self.mock_object(rest_client.RestClient, 'get_pool_info', - return_value=None) - data = driver.get_volume_stats() - self.assertIsNone(data['pools'][0].get('disk_type')) - - def test_extend_volume(self): - self.driver.extend_volume(self.volume, 3) - - def test_login_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.client.login) - - def test_create_snapshot_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, self.snapshot) - - def test_create_volume_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.volume) - - def test_delete_volume_fail(self): - self.driver.client.test_fail = True - self.driver.delete_volume(self.volume) - - def test_delete_snapshot_fail(self): - self.driver.client.test_fail = True - self.driver.delete_snapshot(self.snapshot) - - def test_initialize_connection_fail(self): - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.volume, FakeConnector) - - def test_lun_is_associated_to_lungroup(self): - self.driver.client.associate_lun_to_lungroup('11', '11') - result = self.driver.client._is_lun_associated_to_lungroup('11', - '11') - self.assertTrue(result) - - def test_lun_is_not_associated_to_lun_group(self): - self.driver.client.associate_lun_to_lungroup('12', '12') - self.driver.client.remove_lun_from_lungroup('12', '12') - result = self.driver.client._is_lun_associated_to_lungroup('12', - '12') - self.assertFalse(result) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client, 'RestClient') - def test_migrate_volume_success(self, mock_add_lun_to_partition, - pool_data): - # Migrate volume without new type. - empty_dict = {} - self.driver.support_func = pool_data - moved, model_update = self.driver.migrate_volume(None, - self.volume, - test_host, - None) - self.assertTrue(moved) - self.assertEqual(empty_dict, model_update) - - # Migrate volume with new type. - empty_dict = {} - new_type = {'extra_specs': - {'smarttier': ' true', - 'smartcache': ' true', - 'smartpartition': ' true', - 'thin_provisioning_support': ' true', - 'thick_provisioning_support': ' False', - 'policy': '2', - 'smartcache:cachename': 'cache-test', - 'smartpartition:partitionname': 'partition-test'}} - moved, model_update = self.driver.migrate_volume(None, - self.volume, - test_host, - new_type) - self.assertTrue(moved) - self.assertEqual(empty_dict, model_update) - - def test_migrate_volume_fail(self): - self.driver.client.test_fail = True - - # Migrate volume without new type. - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.migrate_volume, None, - self.volume, test_host, None) - - # Migrate volume with new type. - new_type = {'extra_specs': - {'smarttier': ' true', - 'smartcache': ' true', - 'thin_provisioning_support': ' true', - 'thick_provisioning_support': ' False', - 'policy': '2', - 'smartcache:cachename': 'cache-test', - 'partitionname': 'partition-test'}} - self.driver.client.test_fail = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.migrate_volume, None, - self.volume, test_host, new_type) - - def test_check_migration_valid(self): - is_valid = self.driver._check_migration_valid(test_host, - self.volume) - self.assertTrue(is_valid) - # No pool_name in capabilities. - invalid_host1 = {'host': 'ubuntu001@backend002#OpenStack_Pool', - 'capabilities': - {'location_info': '210235G7J20000000000', - 'allocated_capacity_gb': 0, - 'volume_backend_name': 'HuaweiFCDriver', - 'storage_protocol': 'FC'}} - is_valid = self.driver._check_migration_valid(invalid_host1, - self.volume) - self.assertFalse(is_valid) - # location_info in capabilities is not matched. - invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', - 'capabilities': - {'location_info': '210235G7J20000000001', - 'allocated_capacity_gb': 0, - 'pool_name': 'OpenStack_Pool', - 'volume_backend_name': 'HuaweiFCDriver', - 'storage_protocol': 'FC'}} - is_valid = self.driver._check_migration_valid(invalid_host2, - self.volume) - self.assertFalse(is_valid) - # storage_protocol is not match current protocol and volume status is - # 'in-use'. - location = ('{"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", ' - '"huawei_lun_id": "11"}') - volume_in_use = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'size': 2, - 'volume_name': 'vol1', - 'id': ID, - 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', - 'volume_attachment': 'in-use', - 'provider_location': location} - invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', - 'capabilities': - {'location_info': '210235G7J20000000001', - 'allocated_capacity_gb': 0, - 'pool_name': 'OpenStack_Pool', - 'volume_backend_name': 'HuaweiFCDriver', - 'storage_protocol': 'iSCSI'}} - is_valid = self.driver._check_migration_valid(invalid_host2, - volume_in_use) - self.assertFalse(is_valid) - # pool_name is empty. - invalid_host3 = {'host': 'ubuntu001@backend002#OpenStack_Pool', - 'capabilities': - {'location_info': '210235G7J20000000001', - 'allocated_capacity_gb': 0, - 'pool_name': '', - 'volume_backend_name': 'HuaweiFCDriver', - 'storage_protocol': 'iSCSI'}} - is_valid = self.driver._check_migration_valid(invalid_host3, - self.volume) - self.assertFalse(is_valid) - - @mock.patch.object(rest_client.RestClient, 'rename_lun') - def test_update_migrated_volume_success(self, mock_rename_lun): - model_update = self.driver.update_migrated_volume(None, - self.original_volume, - self.current_volume, - 'available') - self.assertEqual({'_name_id': None}, model_update) - - @mock.patch.object(rest_client.RestClient, 'rename_lun') - def test_update_migrated_volume_fail(self, mock_rename_lun): - mock_rename_lun.side_effect = exception.VolumeBackendAPIException( - data='Error occurred.') - model_update = self.driver.update_migrated_volume(None, - self.original_volume, - self.current_volume, - 'available') - self.assertEqual(self.current_volume.name_id, - model_update['_name_id']) - - @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') - def test_retype_volume_success(self, mock_add_lun_to_partition): - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - retype = self.driver.retype(None, self.volume, - test_new_type, None, test_host) - self.assertTrue(retype) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(rest_client, 'RestClient') - @mock.patch.object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - def test_retype_replication_volume_success(self, mock_get_type, - mock_add_lun_to_partition, - pool_data): - self.driver.support_func = pool_data - retype = self.driver.retype(None, self.volume, - test_new_replication_type, None, test_host) - self.assertTrue(retype) - - @ddt.data( - [ - replication.ReplicaPairManager, - 'create_replica', - exception.VolumeBackendAPIException( - data='Can\'t support smarttier on the array.'), - FAKE_POOLS_UNSUPPORT_REPORT - ], - [ - replication.ReplicaPairManager, - 'create_replica', - exception.VolumeBackendAPIException( - data='Can\'t support smarttier on the array.'), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - replication.ReplicaPairManager, - 'delete_replica', - exception.VolumeBackendAPIException( - data='Can\'t support smarttier on the array.'), - FAKE_POOLS_SUPPORT_REPORT - ], - [ - replication.ReplicaPairManager, - 'delete_replica', - exception.VolumeBackendAPIException( - data='Can\'t support smarttier on the array.'), - FAKE_POOLS_UNSUPPORT_REPORT - ], - ) - @ddt.unpack - def test_retype_replication_volume_fail(self, - mock_module, - mock_func, - side_effect, - pool_data): - self.driver.support_func = pool_data - self.mock_object(mock_module, mock_func, side_effect=side_effect) - self.mock_object(rest_client.RestClient, 'add_lun_to_partition') - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': sync_replica_specs}) - retype = self.driver.retype(None, self.volume, - test_new_replication_type, None, test_host) - self.assertFalse(retype) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_retype_volume_cache_fail(self, pool_data): - self.driver.client.cache_not_exist = True - self.driver.support_func = pool_data - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.retype, None, - self.volume, test_new_type, None, test_host) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - def test_retype_volume_partition_fail(self, pool_data): - self.driver.support_func = pool_data - self.driver.client.partition_not_exist = True - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.retype, None, - self.volume, test_new_type, None, test_host) - - @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') - def test_retype_volume_fail(self, mock_add_lun_to_partition): - - self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT - mock_add_lun_to_partition.side_effect = ( - exception.VolumeBackendAPIException(data='Error occurred.')) - retype = self.driver.retype(None, self.volume, - test_new_type, None, test_host) - self.assertFalse(retype) - - @mock.patch.object(rest_client.RestClient, 'get_all_engines', - return_value=[{'NODELIST': '["0A","0B"]', 'ID': '0'}]) - def test_build_ini_targ_map_engie_recorded(self, mock_engines): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( - ['10000090fa0d6754'], '1', '11') - target_port_wwns = ['2000643e8c4c5f66'] - self.assertEqual(target_port_wwns, tgt_wwns) - self.assertEqual({}, init_targ_map) - - @ddt.data(fake_fabric_mapping_no_ports, fake_fabric_mapping_no_wwn) - def test_filter_by_fabric_fail(self, ddt_map): - self.mock_object( - FCSanLookupService, 'get_device_mapping_from_network', - return_value=ddt_map) - fake_lookup_service = FCSanLookupService() - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - self.assertRaises(exception.VolumeBackendAPIException, - zone_helper._filter_by_fabric, ['10000090fa0d6754'], - None) - - @mock.patch.object(rest_client.RestClient, 'get_all_engines', - return_value=[{'NODELIST': '["0A"]', 'ID': '0'}, - {'NODELIST': '["0B"]', 'ID': '1'}]) - @mock.patch.object(fc_zone_helper.FCZoneHelper, '_build_contr_port_map', - return_value={'0B': ['2000643e8c4c5f67']}) - def test_build_ini_targ_map_engie_not_recorded(self, mock_engines, map): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( - ['10000090fa0d6754'], '1', '11') - expected_wwns = ['2000643e8c4c5f67', '2000643e8c4c5f66'] - expected_map = {'10000090fa0d6754': expected_wwns} - self.assertEqual(expected_wwns, tgt_wwns) - self.assertEqual(expected_map, init_targ_map) - - @mock.patch.object(rest_client.RestClient, 'get_all_engines', - return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}]) - def test_build_ini_targ_map_no_map(self, mock_engines): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - # Host with id '5' has no map on the array. - (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( - ['10000090fa0d6754'], '5', '11') - expected_wwns = ['2000643e8c4c5f66'] - expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} - self.assertEqual(expected_wwns, tgt_wwns) - self.assertEqual(expected_map, init_targ_map) - - @mock.patch.object(rest_client.RestClient, 'get_all_engines', - return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}]) - @mock.patch.object(rest_client.RestClient, 'get_tgt_port_group', - return_value='0') - @mock.patch.object(rest_client.RestClient, 'delete_portgroup') - def test_build_ini_targ_map_exist_portg(self, delete, engines, portg): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - # Host with id '5' has no map on the array. - (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( - ['10000090fa0d6754'], '5', '11') - expected_wwns = ['2000643e8c4c5f66'] - expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} - self.assertEqual(expected_wwns, tgt_wwns) - self.assertEqual(expected_map, init_targ_map) - self.assertEqual(1, delete.call_count) - - def test_get_init_targ_map(self): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - (tgt_wwns, portg_id, init_targ_map) = zone_helper.get_init_targ_map( - ['10000090fa0d6754'], '1') - expected_wwns = ['2000643e8c4c5f66'] - expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} - self.assertEqual(expected_wwns, tgt_wwns) - self.assertEqual(expected_map, init_targ_map) - - def test_get_init_targ_map_no_host(self): - fake_lookup_service = FCSanLookupService() - - zone_helper = fc_zone_helper.FCZoneHelper( - fake_lookup_service, self.driver.client) - ret = zone_helper.get_init_targ_map( - ['10000090fa0d6754'], None) - expected_ret = ([], None, {}) - self.assertEqual(expected_ret, ret) - - def test_multi_resturls_success(self): - self.driver.client.test_multi_url_flag = True - lun_info = self.driver.create_volume(self.volume) - expect_value = {"huawei_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - self.assertDictEqual(expect_value, - json.loads(lun_info['provider_location'])) - - def test_get_id_from_result(self): - result = {} - name = 'test_name' - key = 'NAME' - re = self.driver.client._get_id_from_result(result, name, key) - self.assertIsNone(re) - - result = {'data': {}} - re = self.driver.client._get_id_from_result(result, name, key) - self.assertIsNone(re) - - result = {'data': [{'COUNT': 1, 'ID': '1'}, - {'COUNT': 2, 'ID': '2'}]} - - re = self.driver.client._get_id_from_result(result, name, key) - self.assertIsNone(re) - - result = {'data': [{'NAME': 'test_name1', 'ID': '1'}, - {'NAME': 'test_name2', 'ID': '2'}]} - re = self.driver.client._get_id_from_result(result, name, key) - self.assertIsNone(re) - - result = {'data': [{'NAME': 'test_name', 'ID': '1'}, - {'NAME': 'test_name2', 'ID': '2'}]} - re = self.driver.client._get_id_from_result(result, name, key) - self.assertEqual('1', re) - - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value={'ID': 1, - 'CAPACITY': 110362624, - 'TOTALCAPACITY': 209715200}) - def test_get_capacity(self, mock_get_pool_info): - expected_pool_capacity = {'total_capacity': 100.0, - 'free_capacity': 52.625} - pool_capacity = self.driver.client._get_capacity(None, - None) - self.assertEqual(expected_pool_capacity, pool_capacity) - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value=fake_hypermetro_opts) - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value=FAKE_FIND_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', - return_value='11') - @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', - return_value=True) - @mock.patch.object(hypermetro.HuaweiHyperMetro, - '_create_hypermetro_pair', - return_value={"ID": '11', - "NAME": 'hypermetro-pair'}) - @mock.patch.object(rest_client.RestClient, 'logout', - return_value=None) - def test_create_hypermetro_success(self, mock_hypermetro_opts, - mock_login_return, - mock_all_pool_info, - mock_pool_info, - mock_hyper_domain, - mock_volume_ready, - mock_logout): - - location = {"huawei_lun_id": "1", - "hypermetro_id": "11", - "remote_lun_id": "1", - "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} - lun_info = self.driver.create_volume(self.hyper_volume) - self.assertDictEqual(location, - json.loads(lun_info['provider_location'])) - - @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) - @mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params', - return_value=fake_hypermetro_opts) - @mock.patch.object(rest_client.RestClient, 'get_all_pools', - return_value=FAKE_STORAGE_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_pool_info', - return_value=FAKE_FIND_POOL_RESPONSE) - @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', - return_value='11') - @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', - return_value=True) - @mock.patch.object(rest_client.RestClient, 'create_hypermetro') - def test_create_hypermetro_fail(self, - pool_data, - mock_pair_info, - mock_hypermetro_opts, - mock_all_pool_info, - mock_pool_info, - mock_hyper_domain, - mock_volume_ready - ): - self.driver.support_func = pool_data - mock_pair_info.side_effect = ( - exception.VolumeBackendAPIException(data='Error occurred.')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.metro.create_hypermetro, "11", {}) - - @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', - return_value={'hypermetro_id': '3400a30d844d0007', - 'remote_lun_id': '1'}) - @mock.patch.object(rest_client.RestClient, 'do_mapping', - return_value={'lun_id': '1', - 'view_id': '1', - 'aval_luns': '[1]'}) - def test_hypermetro_connection_success_2(self, mock_map, mock_metadata): - fc_properties = self.driver.metro.connect_volume_fc(self.volume, - FakeConnector) - self.assertEqual(1, fc_properties['data']['target_lun']) - mock_map.assert_called_once_with('1', '0', '1', - hypermetro_lun=True) - - @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', - return_value={'hypermetro_id': '3400a30d844d0007', - 'remote_lun_id': '1'}) - def test_terminate_hypermetro_connection_success(self, mock_metradata): - self.driver.metro.disconnect_volume_fc(self.volume, FakeConnector) - - @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', - return_value={'hypermetro_id': '3400a30d844d0007', - 'remote_lun_id': None}) - @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', - return_value=None) - def test_hypermetroid_none_fail(self, mock_metadata, moke_metro_name): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.metro.connect_volume_fc, - self.hyper_volume, - FakeConnector) - - def test_wait_volume_ready_success(self): - flag = self.driver.metro._wait_volume_ready("11") - self.assertIsNone(flag) - - @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', - return_value={'hypermetro_id': '3400a30d844d0007', - 'remote_lun_id': '1'}) - @mock.patch.object(rest_client.RestClient, 'get_online_free_wwns', - return_value=[]) - @mock.patch.object(rest_client.RestClient, 'get_host_iscsi_initiators', - return_value=[]) - def test_hypermetro_connection_fail(self, mock_metadata, - mock_fc_initiator, - mock_host_initiators): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.metro.connect_volume_fc, - self.hyper_volume, - FakeConnector) - - def test_create_snapshot_fail_hypermetro(self): - self.mock_object( - huawei_driver.HuaweiBaseDriver, - '_get_volume_type', - return_value={'extra_specs': replica_hypermetro_specs}) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.volume, self.snapshot) - - def test_create_snapshot_fail_no_snapshot_id(self): - self.snapshot.provider_location = None - self.mock_object(rest_client.RestClient, 'get_snapshot_id_by_name', - return_value=None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.volume, self.snapshot) - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"data": [{"RUNNINGSTATUS": "27", - "ID": '1'}, - {"RUNNINGSTATUS": "26", - "ID": '2'}], - "error": {"code": 0}}) - def test_get_online_free_wwns(self, mock_call): - wwns = self.driver.client.get_online_free_wwns() - self.assertEqual(['1'], wwns) - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"data": {"ID": 1}, "error": {"code": 0}}) - def test_rename_lun(self, mock_call): - des = 'This LUN is renamed.' - new_name = 'test_name' - self.driver.client.rename_lun('1', new_name, des) - self.assertEqual(1, mock_call.call_count) - url = "/lun/1" - data = {"NAME": new_name, "DESCRIPTION": des} - mock_call.assert_called_once_with(url, data, "PUT") - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"data": {}}) - def test_is_host_associated_to_hostgroup_no_data(self, mock_call): - res = self.driver.client.is_host_associated_to_hostgroup('1') - self.assertFalse(res) - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"data": {'ISADD2HOSTGROUP': 'true'}}) - def test_is_host_associated_to_hostgroup_true(self, mock_call): - res = self.driver.client.is_host_associated_to_hostgroup('1') - self.assertTrue(res) - - @mock.patch.object(rest_client.RestClient, 'call', - return_value={"data": {'ISADD2HOSTGROUP': 'false'}}) - def test_is_host_associated_to_hostgroup_false(self, mock_call): - res = self.driver.client.is_host_associated_to_hostgroup('1') - self.assertFalse(res) - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_group_type', - return_value=[{"hypermetro": "true"}]) - @cg_or_cg_snapshot - def test_create_hypermetro_group_success(self, mock_grouptype): - """Test that create_group return successfully.""" - ctxt = context.get_admin_context() - # Create group - model_update = self.driver.create_group(ctxt, self.group) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group created failed") - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_group_type', - return_value=[{"hypermetro": "false"}]) - @cg_or_cg_snapshot - def test_create_normal_group_success(self, mock_grouptype): - """Test that create_group return successfully.""" - ctxt = context.get_admin_context() - # Create group - model_update = self.driver.create_group(ctxt, self.group) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group created failed") - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_group_type', - return_value=[{"hypermetro": "true"}]) - @cg_or_cg_snapshot - def test_delete_hypermetro_group_success(self, mock_grouptype): - """Test that delete_group return successfully.""" - test_volumes = [self.volume] - ctxt = context.get_admin_context() - # Delete group - model, volumes = self.driver.delete_group(ctxt, self.group, - test_volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model['status'], - "Group deleted failed") - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_group_type', - return_value=[{"hypermetro": "false"}]) - @cg_or_cg_snapshot - def test_delete_normal_group_success(self, mock_grouptype): - """Test that delete_group return successfully.""" - ctxt = context.get_admin_context() - test_volumes = [self.volume] - # Delete group - model, volumes = self.driver.delete_group(ctxt, self.group, - test_volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model['status'], - "Group deleted failed") - - @mock.patch.object(huawei_driver.HuaweiBaseDriver, - '_get_group_type', - return_value=[{"hypermetro": "true"}]) - @mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata', - return_value={'hypermetro_id': '3400a30d844d0007', - 'remote_lun_id': '59'}) - @cg_or_cg_snapshot - def test_update_group_success(self, mock_grouptype, mock_metadata): - """Test that update_group return successfully.""" - ctxt = context.get_admin_context() - add_volumes = [self.volume] - remove_volumes = [self.volume] - # Update group - model_update = self.driver.update_group(ctxt, self.group, - add_volumes, remove_volumes) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update[0]['status'], - "Group update failed") - - def test_is_initiator_associated_to_host_raise(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.client.is_initiator_associated_to_host, - 'ini-2', '1') - - def test_is_initiator_associated_to_host_true(self): - ret = self.driver.client.is_initiator_associated_to_host('ini-1', '1') - self.assertFalse(ret) - ret = self.driver.client.is_initiator_associated_to_host('ini-2', '2') - self.assertTrue(ret) - - -@ddt.ddt -class HuaweiConfTestCase(test.TestCase): - def setUp(self): - super(HuaweiConfTestCase, self).setUp() - - self.tmp_dir = tempfile.mkdtemp() - self.fake_xml_file = self.tmp_dir + '/cinder_huawei_conf.xml' - - self.conf = mock.Mock() - self.conf.cinder_huawei_conf_file = self.fake_xml_file - self.huawei_conf = huawei_conf.HuaweiConf(self.conf) - - def _create_fake_conf_file(self, configs): - """Create a fake Config file. - - Huawei storage customize a XML configuration file, the configuration - file is used to set the Huawei storage custom parameters, therefore, - in the UT test we need to simulate such a configuration file. - """ - doc = minidom.Document() - - config = doc.createElement('config') - doc.appendChild(config) - - storage = doc.createElement('Storage') - config.appendChild(storage) - url = doc.createElement('RestURL') - url_text = doc.createTextNode('http://192.0.2.69:8082/' - 'deviceManager/rest/') - url.appendChild(url_text) - storage.appendChild(url) - username = doc.createElement('UserName') - username_text = doc.createTextNode('admin') - username.appendChild(username_text) - storage.appendChild(username) - password = doc.createElement('UserPassword') - password_text = doc.createTextNode('Admin@storage') - password.appendChild(password_text) - storage.appendChild(password) - product = doc.createElement('Product') - product_text = doc.createTextNode(configs.get('Product', 'V3')) - product.appendChild(product_text) - storage.appendChild(product) - protocol = doc.createElement('Protocol') - protocol_text = doc.createTextNode('iSCSI') - protocol.appendChild(protocol_text) - storage.appendChild(protocol) - - lun = doc.createElement('LUN') - config.appendChild(lun) - - if 'LUNType' in configs: - luntype = doc.createElement('LUNType') - luntype_text = doc.createTextNode(configs['LUNType']) - luntype.appendChild(luntype_text) - lun.appendChild(luntype) - - lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval') - lun_ready_wait_interval_text = doc.createTextNode('2') - lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text) - lun.appendChild(lun_ready_wait_interval) - lun_copy_wait_interval = doc.createElement('LUNcopyWaitInterval') - lun_copy_wait_interval_text = doc.createTextNode('2') - lun_copy_wait_interval.appendChild(lun_copy_wait_interval_text) - lun.appendChild(lun_copy_wait_interval) - timeout = doc.createElement('Timeout') - timeout_text = doc.createTextNode('43200') - timeout.appendChild(timeout_text) - lun.appendChild(timeout) - write_type = doc.createElement('WriteType') - write_type_text = doc.createTextNode('1') - write_type.appendChild(write_type_text) - lun.appendChild(write_type) - mirror_switch = doc.createElement('MirrorSwitch') - mirror_switch_text = doc.createTextNode('1') - mirror_switch.appendChild(mirror_switch_text) - lun.appendChild(mirror_switch) - prefetch = doc.createElement('Prefetch') - prefetch.setAttribute('Type', '1') - prefetch.setAttribute('Value', '0') - lun.appendChild(prefetch) - pool = doc.createElement('StoragePool') - pool_text = doc.createTextNode('OpenStack_Pool') - pool.appendChild(pool_text) - lun.appendChild(pool) - - iscsi = doc.createElement('iSCSI') - config.appendChild(iscsi) - defaulttargetip = doc.createElement('DefaultTargetIP') - defaulttargetip_text = doc.createTextNode('192.0.2.68') - defaulttargetip.appendChild(defaulttargetip_text) - iscsi.appendChild(defaulttargetip) - initiator = doc.createElement('Initiator') - initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') - initiator.setAttribute('TargetIP', '192.0.2.2') - initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage') - initiator.setAttribute('ALUA', '1') - initiator.setAttribute('TargetPortGroup', 'PortGroup001') - iscsi.appendChild(initiator) - - fakefile = open(self.conf.cinder_huawei_conf_file, 'w') - fakefile.write(doc.toprettyxml(indent='')) - fakefile.close() - - @ddt.data( - ( - { - 'Product': 'Dorado', - 'LUNType': 'Thin', - }, - 1, - ), - ( - { - 'Product': 'Dorado', - }, - 1, - ), - ( - { - 'Product': 'Dorado', - 'LUNType': 'Thick', - }, - exception.InvalidInput, - ), - ( - { - 'Product': 'V3', - 'LUNType': 'Thick', - }, - 0, - ), - ( - { - 'Product': 'V3', - 'LUNType': 'invalid', - }, - exception.InvalidInput, - ), - ) - @ddt.unpack - def test_luntype_config(self, custom_configs, expect_result): - self._create_fake_conf_file(custom_configs) - tree = ElementTree.parse(self.conf.cinder_huawei_conf_file) - xml_root = tree.getroot() - self.huawei_conf._san_product(xml_root) - - if isinstance(expect_result, int): - self.huawei_conf._lun_type(xml_root) - self.assertEqual(expect_result, self.conf.lun_type) - else: - self.assertRaises(expect_result, - self.huawei_conf._lun_type, xml_root) - - -@ddt.ddt -class HuaweiRestClientTestCase(test.TestCase): - def setUp(self): - super(HuaweiRestClientTestCase, self).setUp() - config = mock.Mock(spec=conf.Configuration) - huawei_conf = FakeHuaweiConf(config, 'iSCSI') - huawei_conf.update_config_value() - self.client = rest_client.RestClient( - config, config.san_address, config.san_user, config.san_password) - - def test_init_http_head(self): - self.client.init_http_head() - self.assertIsNone(self.client.url) - self.assertEqual("keep-alive", - self.client.session.headers["Connection"]) - self.assertEqual("application/json", - self.client.session.headers["Content-Type"]) - self.assertEqual(False, self.client.session.verify) - - @ddt.data('POST', 'PUT', 'GET', 'DELETE') - def test_do_call_method(self, method): - self.client.init_http_head() - - if method: - mock_func = self.mock_object(self.client.session, method.lower()) - else: - mock_func = self.mock_object(self.client.session, 'post') - - self.client.do_call("http://fake-rest-url", None, method) - mock_func.assert_called_once_with("http://fake-rest-url", - timeout=constants.SOCKET_TIMEOUT) - - def test_do_call_method_invalid(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.client.do_call, - "http://fake-rest-url", None, 'fake-method') - - def test_do_call_http_error(self): - self.client.init_http_head() - - fake_res = requests.Response() - fake_res.reason = 'something wrong' - fake_res.status_code = 500 - fake_res.url = "http://fake-rest-url" - - self.mock_object(self.client.session, 'post', return_value=fake_res) - res = self.client.do_call("http://fake-rest-url", None, 'POST') - - expected = {"error": {"code": 500, - "description": - '500 Server Error: something wrong for ' - 'url: http://fake-rest-url'}} - self.assertEqual(expected, res) diff --git a/cinder/tests/unit/volume/drivers/ibm/__init__.py b/cinder/tests/unit/volume/drivers/ibm/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py b/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py deleted file mode 100644 index 18e148750..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" Fake pyxcli-client for testing the driver without installing pyxcli""" -import mock -import sys - -from cinder.tests.unit.volume.drivers.ibm import fake_pyxcli_exceptions - - -pyxcli_client = mock.Mock() -pyxcli_client.errors = fake_pyxcli_exceptions -pyxcli_client.events = mock.Mock() -pyxcli_client.mirroring = mock.Mock() -pyxcli_client.transports = fake_pyxcli_exceptions -pyxcli_client.mirroring.cg_recovery_manager = mock.Mock() - -sys.modules['pyxcli'] = pyxcli_client -sys.modules['pyxcli.events'] = pyxcli_client.events -sys.modules['pyxcli.mirroring'] = pyxcli_client.mirroring diff --git a/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli_exceptions.py b/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli_exceptions.py deleted file mode 100644 index 424c24e4d..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli_exceptions.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" Fake pyxcli exceptions for testing the driver without installing pyxcli""" - - -class XCLIError(Exception): - pass - - -class VolumeBadNameError(XCLIError): - pass - - -class CredentialsError(XCLIError): - pass - - -class ConnectionError(XCLIError): - pass - - -class CgHasMirrorError(XCLIError): - pass - - -class CgDoesNotExistError(XCLIError): - pass - - -class CgEmptyError(XCLIError): - pass - - -class PoolSnapshotLimitReachedError(XCLIError): - pass - - -class CommandFailedRuntimeError(XCLIError): - pass - - -class PoolOutOfSpaceError(XCLIError): - pass - - -class CgLimitReachedError(XCLIError): - pass - - -class HostBadNameError(XCLIError): - pass - - -class CgNotEmptyError(XCLIError): - pass - - -class SystemOutOfSpaceError(XCLIError): - pass - - -class CgNameExistsError(XCLIError): - pass - - -class CgBadNameError(XCLIError): - pass - - -class SnapshotGroupDoesNotExistError(XCLIError): - pass - - -class ClosedTransportError(XCLIError): - pass - - -class VolumeNotInConsGroup(XCLIError): - pass diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py b/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py deleted file mode 100644 index a0ceece44..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py +++ /dev/null @@ -1,3272 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Tests for the IBM DS8K family driver.""" -import ast -import copy -import eventlet -import json -import mock -import six - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import utils as testutils -from cinder.volume import configuration as conf -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import proxy -from cinder.volume import group_types -from cinder.volume import volume_types - -# mock decorator logger for all unit test cases. -mock_logger = mock.patch.object(proxy, 'logger', lambda x: x) -mock_logger.start() -from cinder.volume.drivers.ibm.ibm_storage import ( - ds8k_replication as replication) -from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper -from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as ds8kproxy -from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient -mock_logger.stop() - -TEST_VOLUME_ID = '0001' -TEST_HOST_ID = 'H1' -TEST_VOLUME_BACKEND_NAME = 'ds8k_backend' -TEST_GROUP_HOST = 'test_host@' + TEST_VOLUME_BACKEND_NAME + '#fakepool' -TEST_LUN_ID = '00' -TEST_POOLS_STR = 'P0,P1' -TEST_POOL_ID_1 = 'P0' -TEST_POOL_ID_2 = 'P1' -TEST_POOL_NAME_1 = 'OPENSTACK_DEV_0' -TEST_POOL_NAME_2 = 'OPENSTACK_DEV_1' -TEST_SOURCE_DS8K_IP = '1.1.1.1' -TEST_TARGET_DS8K_IP = '2.2.2.2' -TEST_SOURCE_WWNN = '5000000000FFC111' -TEST_TARGET_WWNN = '5000000000FFD222' -TEST_SOURCE_WWPN_1 = '10000090FA3418BC' -TEST_SOURCE_WWPN_2 = '10000090FA3418BD' -TEST_SOURCE_IOPORT = 'I0001' -TEST_TARGET_IOPORT = 'I0002' -TEST_LSS_ID_1 = '00' -TEST_LSS_ID_2 = '01' -TEST_LSS_ID_3 = '02' -TEST_PPRC_PATH_ID_1 = (TEST_SOURCE_WWNN + "_" + TEST_LSS_ID_1 + ":" + - TEST_TARGET_WWNN + "_" + TEST_LSS_ID_1) -TEST_PPRC_PATH_ID_2 = (TEST_TARGET_WWNN + "_" + TEST_LSS_ID_1 + ":" + - TEST_SOURCE_WWNN + "_" + TEST_LSS_ID_1) -TEST_ECKD_VOLUME_ID = '1001' -TEST_ECKD_POOL_ID = 'P10' -TEST_ECKD_POOL_NAME = 'OPENSTACK_DEV_10' -TEST_LCU_ID = '10' -TEST_ECKD_PPRC_PATH_ID = (TEST_SOURCE_WWNN + "_" + TEST_LCU_ID + ":" + - TEST_TARGET_WWNN + "_" + TEST_LCU_ID) -TEST_SOURCE_SYSTEM_UNIT = u'2107-1111111' -TEST_TARGET_SYSTEM_UNIT = u'2107-2222222' -TEST_SOURCE_VOLUME_ID = TEST_VOLUME_ID -TEST_TARGET_VOLUME_ID = TEST_VOLUME_ID -TEST_PPRC_PAIR_ID = (TEST_SOURCE_SYSTEM_UNIT + '_' + - TEST_SOURCE_VOLUME_ID + ':' + - TEST_TARGET_SYSTEM_UNIT + '_' + - TEST_TARGET_VOLUME_ID) -TEST_FLASHCOPY = { - 'sourcevolume': {'id': 'fake_volume_id_1'}, - 'targetvolume': {'id': 'fake_volume_id_2'}, - 'persistent': 'enabled', - 'recording': 'enabled', - 'backgroundcopy': 'disabled', - 'state': 'valid' -} -TEST_CONNECTOR = { - 'ip': '192.168.1.2', - 'initiator': 'iqn.1993-08.org.debian:01:fdf9fdfd', - 'wwpns': [TEST_SOURCE_WWPN_1, TEST_SOURCE_WWPN_2], - 'platform': 'x86_64', - 'os_type': 'linux2', - 'host': 'fakehost' -} -TEST_REPLICATION_DEVICE = { - 'san_ip': TEST_TARGET_DS8K_IP, - 'san_login': 'fake', - 'san_clustername': TEST_POOL_ID_1, - 'san_password': 'fake', - 'backend_id': TEST_TARGET_DS8K_IP, - 'connection_type': storage.XIV_CONNECTION_TYPE_FC, - 'ds8k_logical_control_unit_range': '' -} -FAKE_GET_LSS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "lss": - [ - { - "id": TEST_LSS_ID_1, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "10" - }, - { - "id": TEST_LSS_ID_2, - "group": "1", - "addrgrp": "0", - "type": "fb", - "configvols": "20" - }, - { - "id": TEST_LSS_ID_3, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "30" - }, - { - "id": "10", - "group": "0", - "addrgrp": "1", - "type": "ckd", - "configvols": "12" - } - ] - } -} -FAKE_GET_FB_LSS_RESPONSE_1 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "lss": - [ - { - "id": TEST_LSS_ID_1, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "10", - } - ] - } -} -FAKE_GET_FB_LSS_RESPONSE_2 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "lss": - [ - { - "id": TEST_LSS_ID_2, - "group": "1", - "addrgrp": "0", - "type": "fb", - "configvols": "20", - } - ] - } -} -FAKE_GET_FB_LSS_RESPONSE_3 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "lss": - [ - { - "id": TEST_LSS_ID_3, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "30", - } - ] - } -} -FAKE_GET_CKD_LSS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "lss": - [ - { - "id": "10", - "group": "0", - "addrgrp": "1", - "type": "ckd", - "configvols": "10", - } - ] - } -} -FAKE_CREATE_VOLUME_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "volumes": - [ - { - "id": TEST_VOLUME_ID, - "name": "fake_volume" - } - ] - }, - "link": - { - "rel": "self", - "href": "https://1.1.1.1:8452/api/v1/volumes/" + TEST_VOLUME_ID - } -} -FAKE_GET_PPRC_PATH_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "paths": - [ - { - "id": TEST_PPRC_PATH_ID_1, - "source_lss_id": TEST_LSS_ID_1, - "target_lss_id": TEST_LSS_ID_1, - "target_system_wwnn": TEST_TARGET_WWNN, - "port_pairs": - [ - { - "source_port_id": TEST_SOURCE_IOPORT, - "target_port_id": TEST_TARGET_IOPORT, - "state": "success" - } - ] - }, - { - "id": TEST_ECKD_PPRC_PATH_ID, - "source_lss_id": TEST_LCU_ID, - "target_lss_id": TEST_LCU_ID, - "target_system_wwnn": TEST_TARGET_WWNN, - "port_pairs": - [ - { - "source_port_id": TEST_SOURCE_IOPORT, - "target_port_id": TEST_TARGET_IOPORT, - "state": "success" - } - ] - } - ] - } -} -FAKE_GET_PPRC_PATH_RESPONSE_1 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "paths": - [ - { - "id": TEST_PPRC_PATH_ID_1, - "source_lss_id": TEST_LSS_ID_1, - "target_lss_id": TEST_LSS_ID_1, - "target_system_wwnn": TEST_TARGET_WWNN, - "port_pairs": - [ - { - "source_port_id": TEST_SOURCE_IOPORT, - "target_port_id": TEST_TARGET_IOPORT, - "state": "success" - } - ] - } - ] - } -} -FAKE_GET_PPRC_PATH_RESPONSE_2 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "paths": - [ - { - "id": TEST_PPRC_PATH_ID_2, - "source_lss_id": TEST_LSS_ID_1, - "target_lss_id": TEST_LSS_ID_1, - "target_system_wwnn": TEST_SOURCE_WWNN, - "port_pairs": - [ - { - "source_port_id": TEST_TARGET_IOPORT, - "target_port_id": TEST_SOURCE_IOPORT, - "state": "success" - } - ] - } - ] - } -} -FAKE_GET_ECKD_PPRC_PATH_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "paths": - [ - { - "id": TEST_ECKD_PPRC_PATH_ID, - "source_lss_id": TEST_LCU_ID, - "target_lss_id": TEST_LCU_ID, - "target_system_wwnn": TEST_TARGET_WWNN, - "port_pairs": - [ - { - "source_port_id": TEST_SOURCE_IOPORT, - "target_port_id": TEST_TARGET_IOPORT, - "state": "success" - } - ] - } - ] - } -} -FAKE_GET_PPRCS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "pprcs": - [ - { - "id": TEST_PPRC_PAIR_ID, - "source_volume": - { - "name": TEST_SOURCE_VOLUME_ID, - }, - "source_system": - { - "id": TEST_SOURCE_SYSTEM_UNIT, - }, - "target_volume": - { - "name": TEST_TARGET_VOLUME_ID, - }, - "target_system": - { - "id": TEST_TARGET_SYSTEM_UNIT, - }, - "type": "metro_mirror", - "state": "full_duplex" - } - ] - } -} -FAKE_GET_POOL_RESPONSE_1 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "pools": - [ - { - "id": TEST_POOL_ID_1, - "name": TEST_POOL_NAME_1, - "node": "0", - "stgtype": "fb", - "cap": "10737418240", - "capavail": "10737418240" - } - ] - } -} -FAKE_GET_POOL_RESPONSE_2 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "pools": - [ - { - "id": TEST_POOL_ID_2, - "name": TEST_POOL_NAME_2, - "node": "1", - "stgtype": "fb", - "cap": "10737418240", - "capavail": "10737418240" - } - ] - } -} -FAKE_GET_ECKD_POOL_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "pools": - [ - { - "id": TEST_ECKD_POOL_ID, - "name": TEST_ECKD_POOL_NAME, - "node": "0", - "stgtype": "ckd", - "cap": "10737418240", - "capavail": "10737418240" - } - ] - } -} -FAKE_GET_TOKEN_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "token": - { - "token": "8cf01a2771a04035bcffb7f4a62e9df8", - "expired_time": "2016-08-06T06:36:54-0700", - "max_idle_interval": "1800000" - } -} - -FAKE_GET_PHYSICAL_LINKS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "physical_links": - [ - { - "source_port_id": TEST_SOURCE_IOPORT, - "target_port_id": TEST_TARGET_IOPORT - } - ] - } -} -FAKE_GET_SYSTEM_RESPONSE_1 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "systems": - [ - { - "id": TEST_SOURCE_SYSTEM_UNIT, - "name": "", - "state": "online", - "release": "7.5.1", - "bundle": "87.51.9.0", - "MTM": "2421-961", - "sn": "1300741", - "wwnn": TEST_SOURCE_WWNN, - "cap": "28019290210304", - "capalloc": "6933150957568", - "capavail": "21086139252736", - "capraw": "40265318400000" - } - ] - } -} -FAKE_GET_SYSTEM_RESPONSE_2 = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "systems": - [ - { - "id": TEST_TARGET_SYSTEM_UNIT, - "name": "", - "state": "online", - "release": "7.5.1", - "bundle": "87.51.9.0", - "MTM": "2421-962", - "sn": "1300742", - "wwnn": TEST_TARGET_WWNN, - "cap": "20019290210304", - "capalloc": "4833150957560", - "capavail": "31086139252736", - "capraw": "20265318400000" - } - ] - } -} -FAKE_GET_REST_VERSION_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "api_info": - [ - { - "bundle_version": "5.7.51.1068" - } - ] - } -} -FAKE_GET_HOST_PORTS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "host_ports": - [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "link": {}, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "lbs": "512", - "wwnn": "", - "login_type": "", - "logical_path_established": "", - "login_ports": [], - "host_id": TEST_HOST_ID, - "host": - { - "name": "OShost:fakehost", - "link": {} - } - } - ] - } -} -FAKE_MAP_VOLUME_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "mappings": - [ - { - "lunid": TEST_LUN_ID, - } - ] - }, - "link": - { - "rel": "self", - "href": ("https://1.1.1.1:8452/api/v1/hosts[id=" + - TEST_HOST_ID + "]/mappings/" + TEST_LUN_ID) - } -} -FAKE_GET_IOPORT_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "ioports": - [ - { - "id": "I0001", - "link": - { - "rel": "self", - "href": "https://1.1.1.1:8452/api/v1/ioports/I0001" - }, - "state": "online", - "protocol": "SCSI-FCP", - "wwpn": TEST_SOURCE_WWPN_1, - "type": "Fibre Channel-SW", - "speed": "8 Gb/s", - "loc": "U1400.1B3.RJ03177-P1-C1-T0", - "io_enclosure": - { - "id": "2", - "link": {} - } - } - ] - } -} -FAKE_CREATE_HOST_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "hosts": - [ - { - "id": TEST_HOST_ID - } - ] - }, - "link": - { - "rel": "self", - "href": "https://1.1.1.1:8452/api/v1/hosts/testHost_1" - } -} -FAKE_GET_MAPPINGS_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "mappings": - [ - { - "lunid": TEST_LUN_ID, - "link": {}, - "volume": - { - "id": TEST_VOLUME_ID, - "link": {} - } - }, - { - "lunid": "01", - "link": {}, - "volume": - { - "id": "0002", - "link": {} - } - } - ] - } -} -FAKE_GET_VOLUME_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "data": - { - "volumes": - [ - { - "id": TEST_VOLUME_ID, - "link": {}, - "name": "OSvol:vol_1001", - "pool": - { - "id": TEST_POOL_ID_1, - "link": {} - } - } - ] - } -} -FAKE_GENERIC_RESPONSE = { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - }, - "responses": - [ - { - "server": - { - "status": "ok", - "code": "", - "message": "Operation done successfully." - } - } - ] -} -FAKE_DELETE_VOLUME_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_DELETE_PPRC_PAIR_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_FAILBACK_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_FAILOVER_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_CHANGE_VOLUME_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_POST_FLASHCOPIES_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_POST_UNFREEZE_FLASHCOPIES_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_CREATE_LCU_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_ASSIGN_HOST_PORT_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_DELETE_MAPPINGS_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_DELETE_HOST_PORTS_RESPONSE = FAKE_GENERIC_RESPONSE -FAKE_DELETE_HOSTS_RESPONSE = FAKE_GENERIC_RESPONSE - -FAKE_REST_API_RESPONSES = { - TEST_SOURCE_DS8K_IP + '/get': - FAKE_GET_REST_VERSION_RESPONSE, - TEST_TARGET_DS8K_IP + '/get': - FAKE_GET_REST_VERSION_RESPONSE, - TEST_SOURCE_DS8K_IP + '/systems/get': - FAKE_GET_SYSTEM_RESPONSE_1, - TEST_TARGET_DS8K_IP + '/systems/get': - FAKE_GET_SYSTEM_RESPONSE_2, - TEST_SOURCE_DS8K_IP + '/volumes/post': - FAKE_CREATE_VOLUME_RESPONSE, - TEST_TARGET_DS8K_IP + '/volumes/post': - FAKE_CREATE_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/get': - FAKE_GET_VOLUME_RESPONSE, - TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/get': - FAKE_GET_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/put': - FAKE_CHANGE_VOLUME_RESPONSE, - TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/put': - FAKE_CHANGE_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/volumes/delete': - FAKE_DELETE_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/delete': - FAKE_DELETE_VOLUME_RESPONSE, - TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/delete': - FAKE_DELETE_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/lss/get': - FAKE_GET_LSS_RESPONSE, - TEST_TARGET_DS8K_IP + '/lss/get': - FAKE_GET_LSS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_1 + '/get': - FAKE_GET_FB_LSS_RESPONSE_1, - TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_1 + '/get': - FAKE_GET_FB_LSS_RESPONSE_1, - TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_2 + '/get': - FAKE_GET_FB_LSS_RESPONSE_2, - TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_2 + '/get': - FAKE_GET_FB_LSS_RESPONSE_2, - TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_3 + '/get': - FAKE_GET_FB_LSS_RESPONSE_3, - TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_3 + '/get': - FAKE_GET_FB_LSS_RESPONSE_3, - TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LCU_ID + '/get': - FAKE_GET_CKD_LSS_RESPONSE, - TEST_TARGET_DS8K_IP + '/lss/' + TEST_LCU_ID + '/get': - FAKE_GET_CKD_LSS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/lss/fb/get': - FAKE_GET_FB_LSS_RESPONSE_1, - TEST_SOURCE_DS8K_IP + '/lss/ckd/get': - FAKE_GET_CKD_LSS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/lss/post': - FAKE_CREATE_LCU_RESPONSE, - TEST_SOURCE_DS8K_IP + '/pools/' + TEST_POOL_ID_1 + '/get': - FAKE_GET_POOL_RESPONSE_1, - TEST_TARGET_DS8K_IP + '/pools/' + TEST_POOL_ID_1 + '/get': - FAKE_GET_POOL_RESPONSE_1, - TEST_SOURCE_DS8K_IP + '/pools/' + TEST_POOL_ID_2 + '/get': - FAKE_GET_POOL_RESPONSE_2, - TEST_TARGET_DS8K_IP + '/pools/' + TEST_POOL_ID_2 + '/get': - FAKE_GET_POOL_RESPONSE_2, - TEST_SOURCE_DS8K_IP + '/pools/' + TEST_ECKD_POOL_ID + '/get': - FAKE_GET_ECKD_POOL_RESPONSE, - TEST_TARGET_DS8K_IP + '/pools/' + TEST_ECKD_POOL_ID + '/get': - FAKE_GET_ECKD_POOL_RESPONSE, - TEST_SOURCE_DS8K_IP + '/tokens/post': - FAKE_GET_TOKEN_RESPONSE, - TEST_TARGET_DS8K_IP + '/tokens/post': - FAKE_GET_TOKEN_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/' + TEST_PPRC_PATH_ID_1 + '/get': - FAKE_GET_PPRC_PATH_RESPONSE_1, - TEST_TARGET_DS8K_IP + '/cs/pprcs/paths/' + TEST_PPRC_PATH_ID_2 + '/get': - FAKE_GET_PPRC_PATH_RESPONSE_2, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/' + TEST_ECKD_PPRC_PATH_ID + '/get': - FAKE_GET_ECKD_PPRC_PATH_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/get': - FAKE_GET_PPRC_PATH_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/get': - FAKE_GET_PPRCS_RESPONSE, - TEST_TARGET_DS8K_IP + '/cs/pprcs/get': - FAKE_GET_PPRCS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/post': - FAKE_FAILOVER_RESPONSE, - TEST_TARGET_DS8K_IP + '/cs/pprcs/post': - FAKE_FAILOVER_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/delete/post': - FAKE_DELETE_PPRC_PAIR_RESPONSE, - TEST_TARGET_DS8K_IP + '/cs/pprcs/delete/post': - FAKE_FAILBACK_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/resume/post': - FAKE_FAILBACK_RESPONSE, - TEST_TARGET_DS8K_IP + '/cs/pprcs/resume/post': - FAKE_FAILBACK_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/flashcopies/post': - FAKE_POST_FLASHCOPIES_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/flashcopies/unfreeze/post': - FAKE_POST_UNFREEZE_FLASHCOPIES_RESPONSE, - TEST_SOURCE_DS8K_IP + '/cs/pprcs/physical_links/get': - FAKE_GET_PHYSICAL_LINKS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/host_ports/get': - FAKE_GET_HOST_PORTS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/post': - FAKE_MAP_VOLUME_RESPONSE, - TEST_SOURCE_DS8K_IP + '/ioports/get': - FAKE_GET_IOPORT_RESPONSE, - TEST_SOURCE_DS8K_IP + '/hosts/post': - FAKE_CREATE_HOST_RESPONSE, - TEST_SOURCE_DS8K_IP + '/host_ports/assign/post': - FAKE_ASSIGN_HOST_PORT_RESPONSE, - TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/get': - FAKE_GET_MAPPINGS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/' + - TEST_LUN_ID + '/delete': - FAKE_DELETE_MAPPINGS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/host_ports/' + TEST_SOURCE_WWPN_2 + '/delete': - FAKE_DELETE_HOST_PORTS_RESPONSE, - TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/delete': - FAKE_DELETE_HOSTS_RESPONSE -} - - -class FakeDefaultRESTConnector(restclient.DefaultRESTConnector): - """Fake the Default Connector.""" - - def connect(self): - pass - - def send(self, method='', url='', headers=None, payload='', timeout=900): - host = url.split('https://')[1].split(':8452')[0] - endpoint = url.split('v1')[1].split('?')[0] - start = url.index('type') if 'type=' in url else None - if start: - type_str = url[start:].split('&')[0].split('=')[1] - else: - type_str = '' - request = host + endpoint + '/' + type_str + method.lower() - return 200, json.dumps(FAKE_REST_API_RESPONSES[request]) - - -class FakeRESTScheduler(restclient.RESTScheduler): - """Fake REST Scheduler.""" - - def __init__(self, host, user, passw, connector_obj, verify=False): - self.token = '' - self.host = host - self.port = '8452' - self.user = user - self.passw = passw - self.connector = connector_obj or FakeDefaultRESTConnector(verify) - self.connect() - - -class FakeDS8KCommonHelper(helper.DS8KCommonHelper): - """Fake IBM DS8K Helper.""" - - def __init__(self, conf, HTTPConnectorObject=None): - self.conf = conf - self._connector_obj = HTTPConnectorObject - self._connection_type = self._get_value('connection_type') - self._storage_pools = None - self.backend = {} - self.setup() - - def _get_value(self, key): - value = getattr(self.conf, key, None) - if not value and key not in self.OPTIONAL_PARAMS: - value = self.conf.get(key) - return value - - def _create_client(self): - self._client = FakeRESTScheduler(self._get_value('san_ip'), - self._get_value('san_login'), - self._get_value('san_password'), - None, True) - self.backend['rest_version'] = self._get_version()['bundle_version'] - - -class FakeDS8KECKDHelper(FakeDS8KCommonHelper, helper.DS8KECKDHelper): - """Fake IBM DS8K ECKD Helper.""" - - pass - - -class FakeDS8KReplSourceHelper(FakeDS8KCommonHelper, - helper.DS8KReplicationSourceHelper): - """Fake IBM DS8K Replication Target Helper.""" - - pass - - -class FakeDS8KReplTargetHelper(FakeDS8KReplSourceHelper, - helper.DS8KReplicationTargetHelper): - """Fake IBM DS8K Replication Target Helper.""" - - pass - - -class FakeDS8KReplTargetECKDHelper(FakeDS8KECKDHelper, - helper.DS8KReplicationTargetECKDHelper): - """Fake IBM DS8K Replication Target ECKD Helper.""" - - pass - - -class FakeReplication(replication.Replication): - """Fake Replication class.""" - - def __init__(self, source_helper, device): - self._source_helper = source_helper - if device.get('connection_type') == storage.XIV_CONNECTION_TYPE_FC: - self._target_helper = FakeDS8KReplTargetHelper(device) - else: - self._target_helper = FakeDS8KReplTargetECKDHelper(device) - self._mm_manager = replication.MetroMirrorManager(self._source_helper, - self._target_helper) - - -class FakeDS8KProxy(ds8kproxy.DS8KProxy): - """Fake IBM DS8K Proxy Driver.""" - - def __init__(self, storage_info, logger, exception, - driver=None, active_backend_id=None, - HTTPConnectorObject=None): - with mock.patch.object(proxy.IBMStorageProxy, - '_get_safely_from_configuration') as get_conf: - get_conf.side_effect = [{}, False] - proxy.IBMStorageProxy.__init__(self, storage_info, logger, - exception, driver, - active_backend_id) - self._helper = None - self._replication = None - self._connector_obj = HTTPConnectorObject - self._replication_enabled = False - self._active_backend_id = active_backend_id - self.configuration = driver.configuration - self.consisgroup_cache = {} - self.setup(None) - - def setup(self, context): - connection_type = self.configuration.connection_type - repl_devices = getattr(self.configuration, 'replication_device', None) - if connection_type == storage.XIV_CONNECTION_TYPE_FC: - if not repl_devices: - self._helper = FakeDS8KCommonHelper(self.configuration, - self._connector_obj) - else: - self._helper = FakeDS8KReplSourceHelper( - self.configuration, self._connector_obj) - else: - self._helper = FakeDS8KECKDHelper(self.configuration, - self._connector_obj) - # set up replication target - if repl_devices: - self._do_replication_setup(repl_devices, self._helper) - - def _do_replication_setup(self, devices, src_helper): - self._replication = FakeReplication(src_helper, devices[0]) - if self._active_backend_id: - self._switch_backend_connection(self._active_backend_id) - else: - self._replication.check_physical_links() - self._replication_enabled = True - - -class DS8KProxyTest(test.TestCase): - """Test proxy for DS8K volume driver.""" - - VERSION = "2.0.0" - - def setUp(self): - """Initialize IBM DS8K Driver.""" - super(DS8KProxyTest, self).setUp() - self.ctxt = context.get_admin_context() - - self.configuration = mock.Mock(conf.Configuration) - self.configuration.connection_type = storage.XIV_CONNECTION_TYPE_FC - self.configuration.chap = 'disabled' - self.configuration.san_ip = TEST_SOURCE_DS8K_IP - self.configuration.management_ips = '' - self.configuration.san_login = 'fake' - self.configuration.san_clustername = TEST_POOL_ID_1 - self.configuration.san_password = 'fake' - self.configuration.volume_backend_name = TEST_VOLUME_BACKEND_NAME - self.configuration.ds8k_host_type = 'auto' - self.configuration.reserved_percentage = 0 - self.storage_info = mock.MagicMock() - self.logger = mock.MagicMock() - self.exception = mock.MagicMock() - - def _create_volume(self, **kwargs): - properties = { - 'host': 'openstack@ds8k_backend#ds8k_pool', - 'size': 1 - } - for p in properties.keys(): - if p not in kwargs: - kwargs[p] = properties[p] - return testutils.create_volume(self.ctxt, **kwargs) - - def _create_snapshot(self, **kwargs): - return testutils.create_snapshot(self.ctxt, **kwargs) - - def _create_group(self, **kwargs): - return testutils.create_group(self.ctxt, **kwargs) - - def _create_group_snapshot(self, group_id, **kwargs): - return testutils.create_group_snapshot(self.ctxt, - group_id=group_id, - **kwargs) - - def test_check_host_type(self): - """host type should be a valid one.""" - self.configuration.ds8k_host_type = 'fake_OS' - self.assertRaises(exception.InvalidParameterValue, - FakeDS8KCommonHelper, self.configuration, None) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_systems') - def test_verify_version_of_8_0_1(self, mock_get_systems): - """8.0.1 should not use this driver.""" - mock_get_systems.return_value = { - "id": TEST_SOURCE_SYSTEM_UNIT, - "release": "8.0.1", - "wwnn": TEST_SOURCE_WWNN, - } - self.assertRaises(exception.VolumeDriverException, - FakeDS8KCommonHelper, self.configuration, None) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_version') - def test_verify_rest_version_for_5_7_fb(self, mock_get_version): - """test the min version of REST for fb volume in 7.x.""" - mock_get_version.return_value = { - "bundle_version": "5.7.50.0" - } - self.assertRaises(exception.VolumeDriverException, - FakeDS8KCommonHelper, self.configuration, None) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_version') - def test_verify_rest_version_for_5_8_fb(self, mock_get_version): - """test the min version of REST for fb volume in 8.1.""" - mock_get_version.return_value = { - "bundle_version": "5.8.10.0" - } - FakeDS8KCommonHelper(self.configuration, None) - - @mock.patch.object(helper.DS8KECKDHelper, '_get_version') - def test_verify_rest_version_for_5_7_eckd(self, mock_get_version): - """test the min version of REST for eckd volume in 7.x.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - mock_get_version.return_value = { - "bundle_version": "5.7.50.0" - } - self.assertRaises(exception.VolumeDriverException, - FakeDS8KECKDHelper, self.configuration, None) - - @mock.patch.object(helper.DS8KECKDHelper, '_get_version') - def test_verify_rest_version_for_5_8_eckd_1(self, mock_get_version): - """test the min version of REST for eckd volume in 8.1.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - mock_get_version.return_value = { - "bundle_version": "5.8.10.0" - } - self.assertRaises(exception.VolumeDriverException, - FakeDS8KECKDHelper, self.configuration, None) - - @mock.patch.object(helper.DS8KECKDHelper, '_get_version') - def test_verify_rest_version_for_5_8_eckd_2(self, mock_get_version): - """test the min version of REST for eckd volume in 8.2.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - mock_get_version.return_value = { - "bundle_version": "5.8.20.0" - } - self.assertRaises(exception.VolumeDriverException, - FakeDS8KECKDHelper, self.configuration, None) - - def test_verify_pools_with_wrong_type(self): - """pool should be set according to the connection type.""" - self.configuration.san_clustername = TEST_POOLS_STR - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.assertRaises(exception.InvalidParameterValue, - FakeDS8KCommonHelper, self.configuration, None) - - def test_verify_pools_with_wrong_type_2(self): - """set wrong connection type should raise exception.""" - self.configuration.connection_type = 'fake_type' - self.assertRaises(exception.InvalidParameterValue, - FakeDS8KCommonHelper, self.configuration, None) - - def test_get_storage_information(self): - """should get id, wwnn and release fields from system.""" - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - self.assertIn('storage_unit', cmn_helper.backend.keys()) - self.assertIn('storage_wwnn', cmn_helper.backend.keys()) - self.assertIn('storage_version', cmn_helper.backend.keys()) - - def test_update_stats(self): - """verify the fields returned by _update_stats.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - expected_result = { - "volume_backend_name": TEST_VOLUME_BACKEND_NAME, - "serial_number": TEST_SOURCE_SYSTEM_UNIT, - "extent_pools": TEST_POOL_ID_1, - "vendor_name": 'IBM', - "driver_version": 'IBM Storage (v2.0.0)', - "storage_protocol": storage.XIV_CONNECTION_TYPE_FC, - "total_capacity_gb": 10, - "free_capacity_gb": 10, - "reserved_percentage": 0, - "consistent_group_snapshot_enabled": True, - "multiattach": False - } - - self.driver._update_stats() - self.assertDictEqual(expected_result, self.driver.meta['stat']) - - def test_update_stats_when_driver_initialize_failed(self): - """update stats raises exception if driver initialized failed.""" - with mock.patch(__name__ + '.FakeDS8KCommonHelper') as mock_helper: - mock_helper.return_value = None - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - self.assertRaises(exception.CinderException, - self.driver._update_stats) - - def test_update_stats_when_can_not_get_pools(self): - """update stats raises exception if get pools failed.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - with mock.patch.object(helper.DS8KCommonHelper, - 'get_pools') as mock_get_pools: - mock_get_pools.return_value = [] - self.assertRaises(exception.CinderException, - self.driver._update_stats) - - def test_find_pool_should_choose_biggest_pool(self): - """create volume should choose biggest pool.""" - self.configuration.san_clustername = TEST_POOLS_STR - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) - self.assertEqual(TEST_POOL_ID_1, pool_id) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') - def test_find_lss_when_lss_in_pprc_path(self, mock_get_all_lss): - """find LSS when existing LSSs are in PPRC path.""" - mock_get_all_lss.return_value = [{ - "id": TEST_LSS_ID_1, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "0" - }] - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) - self.assertNotEqual(TEST_LSS_ID_1, lss_id) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') - def test_find_lss_when_existing_lss_available(self, - mock_get_all_lss): - """find LSS when existing LSSs are available.""" - mock_get_all_lss.return_value = [{ - "id": TEST_LSS_ID_2, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "0" - }] - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) - self.assertEqual(TEST_LSS_ID_2, lss_id) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') - def test_find_lss_should_choose_emptiest_one(self, mock_get_all_lss): - """find LSS should choose the emptiest one.""" - mock_get_all_lss.return_value = [ - { - "id": TEST_LSS_ID_1, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "200" - }, - { - "id": TEST_LSS_ID_2, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "100" - }, - { - "id": TEST_LSS_ID_3, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "150" - } - ] - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) - self.assertEqual(TEST_LSS_ID_2, lss_id) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') - @mock.patch.object(helper.DS8KCommonHelper, '_find_from_nonexistent_lss') - def test_find_lss_when_no_existing_lss_available(self, mock_find_lss, - mock_get_all_lss): - """find LSS when no existing LSSs are available.""" - mock_get_all_lss.return_value = [{ - "id": TEST_LSS_ID_2, - "group": "0", - "addrgrp": "0", - "type": "fb", - "configvols": "256" - }] - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) - self.assertTrue(mock_find_lss.called) - - @mock.patch.object(helper.DS8KCommonHelper, '_find_lss') - def test_find_lss_when_all_lss_exhausted(self, mock_find_lss): - """when all LSSs are exhausted should raise exception.""" - cmn_helper = FakeDS8KCommonHelper(self.configuration, None) - mock_find_lss.return_value = None - self.assertRaises(restclient.LssIDExhaustError, - cmn_helper.find_pool_lss_pair, None, False, None) - - def test_find_lss_for_volume_which_belongs_to_cg(self): - """find lss for volume, which is in empty CG.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - lun = ds8kproxy.Lun(volume) - self.driver._create_lun_helper(lun) - pid, lss = lun.pool_lss_pair['source'] - self.assertTrue(lss in ['20', '21', '22', '23']) - - def test_find_lss_for_volume_which_belongs_to_cg2(self): - """find lss for volume, which is in CG having volumes.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': '2000'}) - self._create_volume(group_id=group.id, - provider_location=location) - volume = self._create_volume(group_id=group.id) - lun = ds8kproxy.Lun(volume) - self.driver._create_lun_helper(lun) - pid, lss = lun.pool_lss_pair['source'] - self.assertEqual(lss, '20') - - def test_find_lss_for_volume_which_belongs_to_cg3(self): - """find lss for volume, and other CGs have volumes.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - - group_type2 = group_types.create( - self.ctxt, - 'group2', - {'consistent_group_snapshot_enabled': ' True'} - ) - group2 = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type2.id) - location = six.text_type({'vol_hex_id': '2000'}) - self._create_volume(group_id=group2.id, - provider_location=location) - lun = ds8kproxy.Lun(volume) - self.driver._create_lun_helper(lun) - pid, lss = lun.pool_lss_pair['source'] - self.assertNotEqual(lss, '20') - - def test_find_lss_for_volume_which_belongs_to_cg4(self): - """find lss for volume, and other CGs are in error state.""" - self.configuration.lss_range_for_cg = '20' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - - group_type2 = group_types.create( - self.ctxt, - 'group2', - {'consistent_group_snapshot_enabled': ' True'} - ) - group2 = self._create_group(status='error', - host=TEST_GROUP_HOST, - group_type_id=group_type2.id) - location = six.text_type({'vol_hex_id': '2000'}) - self._create_volume(group_id=group2.id, - provider_location=location) - lun = ds8kproxy.Lun(volume) - self.driver._create_lun_helper(lun) - pid, lss = lun.pool_lss_pair['source'] - # error group will be ignored, so LSS 20 can be used. - self.assertEqual(lss, '20') - - def test_create_volume_and_assign_to_group_with_wrong_host(self): - # create volume for group which has wrong format of host. - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host="fake_invalid_host", - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume, volume) - - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - def test_create_volume_but_lss_full_afterwards(self, mock_create_lun): - """create volume in a LSS which is full afterwards.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - volume = self._create_volume(volume_type_id=vol_type.id) - mock_create_lun.side_effect = [ - restclient.LssFullException('LSS is full.'), TEST_VOLUME_ID] - vol = self.driver.create_volume(volume) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(vol['provider_location'])['vol_hex_id']) - - def test_create_volume_of_FB512(self): - """create volume which type is FB 512.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - volume = self._create_volume(volume_type_id=vol_type.id) - vol = self.driver.create_volume(volume) - self.assertEqual('FB 512', vol['metadata']['data_type']) - self.assertEqual(TEST_VOLUME_ID, vol['metadata']['vol_hex_id']) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(vol['provider_location'])['vol_hex_id']) - - def test_create_volume_of_OS400_050(self): - """create volume which type is OS400 050.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - extra_spec = {'drivers:os400': '050'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - vol = self.driver.create_volume(volume) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(vol['provider_location'])['vol_hex_id']) - self.assertEqual('050 FB 520UV', vol['metadata']['data_type']) - - def test_create_volume_when_specify_area(self): - """create volume and put it in specific pool and lss.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { - 'drivers:storage_pool_ids': TEST_POOL_ID_1, - 'drivers:storage_lss_ids': TEST_LSS_ID_1 - }) - volume = self._create_volume(volume_type_id=vol_type.id) - lun = ds8kproxy.Lun(volume) - pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) - self.assertEqual(TEST_POOL_ID_1, pool) - self.assertEqual(TEST_LSS_ID_1, lss) - - def test_create_volume_only_specify_lss(self): - """create volume and put it in specific lss.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { - 'drivers:storage_lss_ids': TEST_LSS_ID_1 - }) - volume = self._create_volume(volume_type_id=vol_type.id) - lun = ds8kproxy.Lun(volume) - pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) - # if not specify pool, choose pools set in configuration file. - self.assertTrue(pool in self.configuration.san_clustername.split(',')) - self.assertEqual(TEST_LSS_ID_1, lss) - - def test_create_volume_only_specify_pool(self): - """create volume and put it in specific pool.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { - 'drivers:storage_pool_ids': TEST_POOL_ID_1 - }) - volume = self._create_volume(volume_type_id=vol_type.id) - lun = ds8kproxy.Lun(volume) - pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) - self.assertEqual(TEST_POOL_ID_1, pool) - - def test_create_volume_but_specify_wrong_lss_id(self): - """create volume, but specify a wrong lss id.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { - 'drivers:storage_pool_ids': TEST_POOL_ID_1, - 'drivers:storage_lss_ids': '100' - }) - volume = self._create_volume(volume_type_id=vol_type.id) - lun = ds8kproxy.Lun(volume) - self.assertRaises(exception.InvalidParameterValue, - self.driver._find_pool_lss_pair_from_spec, - lun, set()) - - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - def test_create_eckd_volume(self, mock_create_lun): - """create volume which type is ECKD.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'drivers:thin_provision': 'False'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - mock_create_lun.return_value = TEST_ECKD_VOLUME_ID - vol = self.driver.create_volume(volume) - location = ast.literal_eval(vol['provider_location']) - self.assertEqual('3390', vol['metadata']['data_type']) - self.assertEqual(TEST_ECKD_VOLUME_ID, vol['metadata']['vol_hex_id']) - self.assertEqual(TEST_ECKD_VOLUME_ID, location['vol_hex_id']) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_physical_links') - def test_check_physical_links(self, mock_get_physical_links): - """check physical links when user do not connect DS8K.""" - src_helper = FakeDS8KCommonHelper(self.configuration, None) - repl = FakeReplication(src_helper, TEST_REPLICATION_DEVICE) - mock_get_physical_links.return_value = None - self.assertRaises(exception.CinderException, repl.check_physical_links) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_physical_links') - def test_check_physical_links2(self, mock_get_physical_links): - """check physical links if more than eight physical links.""" - src_helper = FakeDS8KCommonHelper(self.configuration, None) - repl = FakeReplication(src_helper, TEST_REPLICATION_DEVICE) - mock_get_physical_links.return_value = [ - {"source_port_id": 'I0001', "target_port_id": 'I0001'}, - {"source_port_id": 'I0002', "target_port_id": 'I0002'}, - {"source_port_id": 'I0003', "target_port_id": 'I0003'}, - {"source_port_id": 'I0004', "target_port_id": 'I0004'}, - {"source_port_id": 'I0005', "target_port_id": 'I0005'}, - {"source_port_id": 'I0006', "target_port_id": 'I0006'}, - {"source_port_id": 'I0007', "target_port_id": 'I0007'}, - {"source_port_id": 'I0008', "target_port_id": 'I0008'}, - {"source_port_id": 'I0009', "target_port_id": 'I0009'} - ] - repl.check_physical_links() - port_pairs = repl._target_helper.backend['port_pairs'] - self.assertEqual(8, len(port_pairs)) - - def test_check_physical_links3(self): - """check physical links when user set them in configure file.""" - src_helper = FakeDS8KCommonHelper(self.configuration, None) - device = TEST_REPLICATION_DEVICE.copy() - device['port_pairs'] = TEST_SOURCE_IOPORT + '-' + TEST_TARGET_IOPORT - repl = FakeReplication(src_helper, device) - expected_port_pairs = [ - {'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT} - ] - repl.check_physical_links() - self.assertEqual(expected_port_pairs, - repl._target_helper.backend['port_pairs']) - - @mock.patch.object(proxy.IBMStorageProxy, '__init__') - def test_do_replication_setup(self, mock_init): - """driver supports only one replication target.""" - replication_device = ['fake_device_1', 'fake_device_2'] - ds8k_proxy = ds8kproxy.DS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.assertRaises(exception.InvalidParameterValue, - ds8k_proxy._do_replication_setup, - replication_device, None) - - @mock.patch.object(proxy.IBMStorageProxy, '__init__') - @mock.patch.object(replication, 'Replication') - @mock.patch.object(ds8kproxy.DS8KProxy, '_switch_backend_connection') - def test_switch_backend_connection(self, mock_switch_connection, - mock_replication, mock_proxy_init): - """driver should switch connection if it has been failed over.""" - ds8k_proxy = ds8kproxy.DS8KProxy(self.storage_info, self.logger, - self.exception, self, - TEST_TARGET_DS8K_IP) - src_helper = FakeDS8KCommonHelper(self.configuration, None) - mock_replication.return_value = FakeReplication( - src_helper, TEST_REPLICATION_DEVICE) - ds8k_proxy._do_replication_setup( - [TEST_REPLICATION_DEVICE], src_helper) - self.assertTrue(mock_switch_connection.called) - - def test_find_lcu_for_eckd_replicated_volume(self): - """find LCU for eckd replicated volume when pprc path is availble.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - src_helper = FakeDS8KECKDHelper(self.configuration, None) - - device = TEST_REPLICATION_DEVICE.copy() - device['connection_type'] = storage.XIV_CONNECTION_TYPE_FC_ECKD - device['ds8k_devadd_unitadd_mapping'] = 'A4-10' - device['ds8k_ssid_prefix'] = 'FF' - device['san_clustername'] = TEST_ECKD_POOL_ID - repl = FakeReplication(src_helper, device) - repl.check_physical_links() - pool_lss_pair = repl.find_pool_lss_pair(None) - - expected_pair = {'source': (TEST_ECKD_POOL_ID, TEST_LCU_ID), - 'target': (TEST_ECKD_POOL_ID, TEST_LCU_ID)} - self.assertDictEqual(expected_pair, pool_lss_pair) - - @mock.patch.object(eventlet, 'sleep') - def test_create_fb_replicated_volume(self, mock_sleep): - """create FB volume when enable replication.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - vol = self.driver.create_volume(volume) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(vol['provider_location'])['vol_hex_id']) - repl = eval(vol['metadata']['replication']) - self.assertEqual(TEST_VOLUME_ID, - repl[TEST_TARGET_DS8K_IP]['vol_hex_id']) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') - @mock.patch.object(replication.MetroMirrorManager, 'create_pprc_path') - @mock.patch.object(eventlet, 'sleep') - def test_create_fb_replicated_vol_but_no_path_available(self, mock_sleep, - create_pprc_path, - get_pprc_paths): - """create replicated volume but no pprc paths are available.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - get_pprc_paths.return_value = [ - { - 'source_lss_id': TEST_LSS_ID_1, - 'target_lss_id': TEST_LSS_ID_1, - 'port_pairs': [ - { - 'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT, - 'state': 'failed' - } - ], - 'target_system_wwnn': TEST_TARGET_WWNN - } - ] - self.driver.create_volume(volume) - self.assertTrue(create_pprc_path.called) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') - @mock.patch.object(eventlet, 'sleep') - def test_create_fb_replicated_vol_and_verify_lss_in_path( - self, mock_sleep, get_pprc_paths): - """create replicated volume should verify the LSS in pprc paths.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - get_pprc_paths.return_value = [ - { - 'source_lss_id': TEST_LSS_ID_1, - 'target_lss_id': TEST_LSS_ID_1, - 'port_pairs': [ - { - 'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT, - 'state': 'success' - } - ], - 'target_system_wwnn': TEST_TARGET_WWNN - }, - { - 'source_lss_id': TEST_LSS_ID_2, - 'target_lss_id': TEST_LSS_ID_2, - 'port_pairs': [ - { - 'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT, - 'state': 'success' - } - ], - 'target_system_wwnn': TEST_TARGET_WWNN - } - ] - vol = self.driver.create_volume(volume) - # locate the volume in pprc path which LSS matches the pool. - self.assertEqual( - TEST_LSS_ID_1, - ast.literal_eval(vol['provider_location'])['vol_hex_id'][:2]) - repl = eval(vol['metadata']['replication']) - self.assertEqual(TEST_LSS_ID_1, - repl[TEST_TARGET_DS8K_IP]['vol_hex_id'][:2]) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') - @mock.patch.object(eventlet, 'sleep') - def test_create_fb_replicated_vol_when_paths_available( - self, mock_sleep, get_pprc_paths): - """create replicated volume when multiple pprc paths are available.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - get_pprc_paths.return_value = [ - { - 'source_lss_id': TEST_LSS_ID_1, - 'target_lss_id': TEST_LSS_ID_1, - 'port_pairs': [ - { - 'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT, - 'state': 'success' - } - ], - 'target_system_wwnn': TEST_TARGET_WWNN - }, - { - 'source_lss_id': TEST_LSS_ID_3, - 'target_lss_id': TEST_LSS_ID_3, - 'port_pairs': [ - { - 'source_port_id': TEST_SOURCE_IOPORT, - 'target_port_id': TEST_TARGET_IOPORT, - 'state': 'success' - } - ], - 'target_system_wwnn': TEST_TARGET_WWNN - } - ] - vol = self.driver.create_volume(volume) - # locate the volume in pprc path which has emptest LSS. - self.assertEqual( - TEST_LSS_ID_1, - ast.literal_eval(vol['provider_location'])['vol_hex_id'][:2]) - repl = eval(vol['metadata']['replication']) - self.assertEqual(TEST_LSS_ID_1, - repl[TEST_TARGET_DS8K_IP]['vol_hex_id'][:2]) - - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - @mock.patch.object(eventlet, 'sleep') - def test_create_replicated_vol_but_lss_full_afterwards( - self, mock_sleep, create_lun): - """create replicated volume but lss is full afterwards.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - create_lun.side_effect = [ - restclient.LssFullException('LSS is full.'), - TEST_VOLUME_ID, - TEST_VOLUME_ID - ] - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - volume = self._create_volume(volume_type_id=vol_type.id) - with mock.patch.object(replication.MetroMirrorManager, - '_is_pprc_paths_healthy') as check_pprc_path: - check_pprc_path.return_value = replication.PPRC_PATH_HEALTHY - vol = self.driver.create_volume(volume) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(vol['provider_location'])['vol_hex_id']) - repl = eval(vol['metadata']['replication']) - self.assertEqual(TEST_VOLUME_ID, - repl[TEST_TARGET_DS8K_IP]['vol_hex_id']) - - @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') - def test_delete_volume(self, mock_delete_lun): - """delete volume successfully.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - self.driver.delete_volume(volume) - self.assertTrue(mock_delete_lun.called) - - @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') - def test_delete_volume_return_if_no_volume_id(self, mock_delete_lun): - """should not try to delete volume if the volume id is None.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - volume = self._create_volume() - self.driver.delete_volume(volume) - self.assertFalse(mock_delete_lun.called) - - @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') - @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') - def test_delete_volume_return_if_volume_not_exist(self, mock_delete_lun, - mock_lun_exists): - """should not delete volume if the volume doesn't exist.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - mock_lun_exists.return_value = False - self.driver.delete_volume(volume) - self.assertFalse(mock_delete_lun.called) - - @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun_by_id') - @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') - def test_delete_fb_replicated_volume(self, mock_delete_lun, - mock_delete_lun_by_id): - """Delete volume when enable replication.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - extra_spec = {'replication_enabled': ' True'} - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - self.driver.delete_volume(volume) - self.assertTrue(mock_delete_lun_by_id.called) - self.assertTrue(mock_delete_lun.called) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_cloned_volume(self, mock_get_flashcopy, mock_sleep): - """clone the volume successfully.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - src_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - location = six.text_type({'vol_hex_id': None}) - tgt_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - volume_update = self.driver.create_cloned_volume(tgt_vol, src_vol) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(volume_update['provider_location'])['vol_hex_id']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, 'change_lun') - def test_create_cloned_volume2(self, mock_change_lun, - mock_get_flashcopy, mock_sleep): - """clone from source volume to a bigger target volume.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - src_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - location = six.text_type({'vol_hex_id': None}) - tgt_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - size=2) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - self.driver.create_cloned_volume(tgt_vol, src_vol) - self.assertTrue(mock_change_lun.called) - - def test_create_cloned_volume3(self): - """clone source volume which should be smaller than target volume.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - src_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - size=2) - location = six.text_type({'vol_hex_id': None}) - tgt_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_cloned_volume, tgt_vol, src_vol) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_cloned_volume4(self, mock_get_flashcopy): - """clone a volume which should not be a target in flashcopy.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - src_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - location = six.text_type({'vol_hex_id': None}) - tgt_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - flashcopy_relationship = copy.deepcopy(TEST_FLASHCOPY) - flashcopy_relationship['targetvolume']['id'] = TEST_VOLUME_ID - mock_get_flashcopy.return_value = [flashcopy_relationship] - self.assertRaises(restclient.APIException, - self.driver.create_cloned_volume, tgt_vol, src_vol) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') - @mock.patch.object(helper.DS8KCommonHelper, 'create_lun') - def test_create_cloned_volume5(self, mock_create_lun, mock_lun_exists, - mock_get_flashcopy): - """clone a volume when target has volume ID but it is nonexistent.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - src_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - location = six.text_type({'vol_hex_id': '0003'}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - tgt_vol = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - volume_metadata=metadata) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_lun_exists.return_value = False - self.driver.create_cloned_volume(tgt_vol, src_vol) - self.assertTrue(mock_create_lun.called) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_volume_from_snapshot(self, mock_get_flashcopy, mock_sleep): - """create volume from snapshot.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - volume = self._create_volume(volume_type_id=vol_type.id) - location = six.text_type({'vol_hex_id': '0002'}) - snap = self._create_snapshot(volume_id=volume.id, - volume_type_id=vol_type.id, - provider_location=location) - vol = self._create_volume(volume_type_id=vol_type.id) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - volume_update = self.driver.create_volume_from_snapshot(vol, snap) - self.assertEqual( - TEST_VOLUME_ID, - ast.literal_eval(volume_update['provider_location'])['vol_hex_id']) - - def test_extend_volume(self): - """extend unreplicated volume.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - self.driver.extend_volume(volume, 2) - - @mock.patch.object(eventlet, 'sleep') - def test_extend_replicated_volume(self, mock_sleep): - """extend replicated volume.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - self.driver.extend_volume(volume, 2) - - def test_extend_replicated_volume_that_has_been_failed_over(self): - """extend replicated volume which has been failed over should fail.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self, TEST_TARGET_DS8K_IP) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - self.assertRaises(exception.CinderException, - self.driver.extend_volume, volume, 2) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_snapshot(self, mock_get_flashcopy, mock_sleep): - """test a successful creation of snapshot.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': '0002'}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - snapshot = self._create_snapshot(volume_id=volume.id) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - snapshot_update = self.driver.create_snapshot(snapshot) - location = ast.literal_eval(snapshot_update['provider_location']) - self.assertEqual(TEST_VOLUME_ID, location['vol_hex_id']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_retype_from_thick_to_thin(self, mock_get_flashcopy, mock_sleep): - """retype from thick-provision to thin-provision.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': {'drivers:thin_provision': ('False', 'True')} - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'drivers:thin_provision': 'False'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_retype_from_thin_to_thick(self, mock_get_flashcopy, mock_sleep): - """retype from thin-provision to thick-provision.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': {'drivers:thin_provision': ('True', 'False')} - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'drivers:thin_provision': 'True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(eventlet, 'sleep') - def test_retype_from_unreplicated_to_replicated(self, mock_sleep): - """retype from unreplicated to replicated.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'replication_enabled': (' False', ' True') - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' False'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(eventlet, 'sleep') - def test_retype_from_replicated_to_unreplicated(self, mock_sleep): - """retype from replicated to unreplicated.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'replication_enabled': (' True', ' False') - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_retype_from_thin_to_thick_and_replicated(self, mock_get_flashcopy, - mock_sleep): - """retype from thin-provision to thick-provision and replicated.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'drivers:thin_provision': ('True', 'False'), - 'replication_enabled': (' False', ' True') - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_retype_thin_replicated_vol_to_thick_vol(self, mock_get_flashcopy, - mock_sleep): - """retype from thin-provision and replicated to thick-provision.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'drivers:thin_provision': ('True', 'False'), - 'replication_enabled': (' True', ' False') - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(eventlet, 'sleep') - def test_retype_replicated_volume_from_thin_to_thick(self, mock_sleep, - mock_get_flashcopy): - """retype replicated volume from thin-provision to thick-provision.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'drivers:thin_provision': ('True', 'False'), - 'replication_enabled': (' True', ' True') - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - self.assertTrue(retyped) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool') - @mock.patch.object(eventlet, 'sleep') - def test_retype_thin_vol_to_thick_vol_in_specific_area( - self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy): - """retype thin volume to thick volume located in specific area.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'drivers:thin_provision': ('True', 'False'), - 'drivers:storage_pool_ids': (None, TEST_POOL_ID_1), - 'drivers:storage_lss_ids': (None, TEST_LSS_ID_1) - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'drivers:thin_provision': 'False'}) - location = six.text_type({'vol_hex_id': '0400'}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1} - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - location = ast.literal_eval(retype_model_update['provider_location']) - self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2]) - self.assertTrue(retyped) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool') - @mock.patch.object(eventlet, 'sleep') - def test_retype_replicated_vol_to_vol_in_specific_area( - self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy): - """retype replicated volume to a specific area.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'replication_enabled': (' True', ' True'), - 'drivers:storage_pool_ids': (None, TEST_POOL_ID_1), - 'drivers:storage_lss_ids': (None, TEST_LSS_ID_1) - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': '0400'}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1} - retyped, retype_model_update = self.driver.retype( - self.ctxt, volume, new_type, diff, host) - location = ast.literal_eval(retype_model_update['provider_location']) - self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2]) - self.assertTrue(retyped) - - def test_retype_vol_in_specific_area_to_another_area(self): - """retype volume from a specific area to another area.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - new_type = {} - diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': { - 'drivers:storage_pool_ids': (TEST_POOL_ID_1, TEST_POOL_ID_2), - 'drivers:storage_lss_ids': (TEST_LSS_ID_1, TEST_LSS_ID_2) - } - } - host = None - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { - 'drivers:storage_pool_ids': TEST_POOL_ID_1, - 'drivers:storage_lss_ids': TEST_LSS_ID_1}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - self.assertRaises(exception.VolumeDriverException, - self.driver.retype, - self.ctxt, volume, new_type, diff, host) - - def test_migrate_replicated_volume(self): - """migrate replicated volume should be failed.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - self.driver._update_stats() - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - backend = { - 'host': 'host@backend#pool_id', - 'capabilities': { - 'extent_pools': TEST_POOL_ID_1, - 'serial_number': TEST_SOURCE_SYSTEM_UNIT, - 'vendor_name': 'IBM', - 'storage_protocol': 'fibre_channel' - } - } - self.assertRaises(exception.VolumeDriverException, - self.driver.migrate_volume, - self.ctxt, volume, backend) - - def test_migrate_and_try_pools_in_same_rank(self): - """migrate volume and try pool in same rank.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - self.driver._update_stats() - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - backend = { - 'host': 'host@backend#pool_id', - 'capabilities': { - 'extent_pools': TEST_POOL_ID_1, - 'serial_number': TEST_SOURCE_SYSTEM_UNIT, - 'vendor_name': 'IBM', - 'storage_protocol': 'fibre_channel' - } - } - moved, model_update = self.driver.migrate_volume( - self.ctxt, volume, backend) - self.assertTrue(moved) - - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(eventlet, 'sleep') - def test_migrate_and_try_pools_in_opposite_rank(self, mock_sleep, - mock_get_flashcopy): - """migrate volume and try pool in opposite rank.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - self.driver._update_stats() - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - backend = { - 'host': 'host@backend#pool_id', - 'capabilities': { - 'extent_pools': TEST_POOL_ID_2, - 'serial_number': TEST_SOURCE_SYSTEM_UNIT, - 'vendor_name': 'IBM', - 'storage_protocol': 'fibre_channel' - } - } - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - with mock.patch.object(helper.DS8KCommonHelper, - '_get_pools') as get_pools: - get_pools.return_value = FAKE_GET_POOL_RESPONSE_2['data']['pools'] - moved, model_update = self.driver.migrate_volume( - self.ctxt, volume, backend) - self.assertTrue(moved) - - def test_initialize_connection_of_fb_volume(self): - """attach a FB volume to host.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - map_data = self.driver.initialize_connection(volume, TEST_CONNECTOR) - self.assertEqual(int(TEST_LUN_ID), map_data['data']['target_lun']) - - def test_initialize_connection_of_eckd_volume(self): - """attach a ECKD volume to host.""" - self.configuration.connection_type = ( - storage.XIV_CONNECTION_TYPE_FC_ECKD) - self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' - self.configuration.ds8k_ssid_prefix = 'FF' - self.configuration.san_clustername = TEST_ECKD_POOL_ID - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_ECKD_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - map_data = self.driver.initialize_connection(volume, {}) - self.assertEqual(int('C4', 16), map_data['data']['cula']) - self.assertEqual(int(TEST_ECKD_VOLUME_ID[2:4], 16), - map_data['data']['unit_address']) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') - def test_initialize_connection_when_no_existing_host(self, - mock_get_host_ports): - """attach volume to host which has not been created.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - host_ports = [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "unconfigured", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": '' - } - ] - mock_get_host_ports.side_effect = [host_ports] - map_data = self.driver.initialize_connection(volume, TEST_CONNECTOR) - self.assertEqual(int(TEST_LUN_ID), map_data['data']['target_lun']) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') - def test_initialize_connection_with_multiple_hosts(self, - mock_get_host_ports): - """attach volume to multiple hosts.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - host_ports = [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": 'H1' - }, - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": 'H2' - } - ] - mock_get_host_ports.side_effect = [host_ports] - self.assertRaises(restclient.APIException, - self.driver.initialize_connection, - volume, TEST_CONNECTOR) - - def test_terminate_connection_of_fb_volume(self): - """detach a FB volume from host.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - self.driver.terminate_connection(volume, TEST_CONNECTOR) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') - def test_terminate_connection_with_multiple_hosts(self, - mock_get_host_ports): - """detach volume from multiple hosts.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - host_ports = [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": 'H1' - }, - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": 'H2' - } - ] - mock_get_host_ports.side_effect = [host_ports] - self.assertRaises(restclient.APIException, - self.driver.terminate_connection, - volume, TEST_CONNECTOR) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') - def test_terminate_connection_but_can_not_find_host(self, - mock_get_host_ports): - """detach volume but can not find host.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - host_ports = [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "unconfigured", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": '' - } - ] - mock_get_host_ports.side_effect = [host_ports] - unmap_data = self.driver.terminate_connection(volume, TEST_CONNECTOR) - self.assertIsNone(unmap_data) - - @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') - @mock.patch.object(helper.DS8KCommonHelper, '_get_mappings') - def test_terminate_connection_and_remove_host(self, mock_get_mappings, - mock_get_host_ports): - """detach volume and remove host in DS8K.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - - host_ports = [ - { - "wwpn": TEST_SOURCE_WWPN_1, - "state": "logged in", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": TEST_HOST_ID - }, - { - "wwpn": TEST_SOURCE_WWPN_2, - "state": "unconfigured", - "hosttype": "LinuxRHEL", - "addrdiscovery": "lunpolling", - "host_id": '' - } - ] - mappings = [ - { - "lunid": TEST_LUN_ID, - "link": {}, - "volume": {"id": TEST_VOLUME_ID, "link": {}} - } - ] - mock_get_host_ports.side_effect = [host_ports] - mock_get_mappings.side_effect = [mappings] - self.driver.terminate_connection(volume, TEST_CONNECTOR) - - def test_create_consistency_group(self): - """user should reserve LSS for consistency group.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_group, - self.ctxt, group) - - def test_delete_consistency_group_sucessfully(self): - """test a successful consistency group deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(provider_location=location, - group_id=group.id) - model_update, volumes_model_update = ( - self.driver.delete_group(self.ctxt, group, [volume])) - self.assertEqual('deleted', volumes_model_update[0]['status']) - self.assertEqual(fields.GroupStatus.DELETED, - model_update['status']) - - @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') - def test_delete_consistency_group_failed(self, mock_delete_lun): - """test a failed consistency group deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(provider_location=location, - group_id=group.id) - mock_delete_lun.side_effect = ( - restclient.APIException('delete volume failed.')) - model_update, volumes_model_update = ( - self.driver.delete_group(self.ctxt, group, [volume])) - self.assertEqual('error_deleting', volumes_model_update[0]['status']) - self.assertEqual(fields.GroupStatus.ERROR_DELETING, - model_update['status']) - - def test_create_consistency_group_without_reserve_lss(self): - """user should reserve LSS for group if it enables cg.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_group, self.ctxt, group) - - def test_update_generic_group_without_enable_cg(self): - """update group which not enable cg should return None.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create(self.ctxt, 'group', {}) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(provider_location=location) - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_group(self.ctxt, group, [volume], [])) - self.assertIsNone(model_update) - self.assertIsNone(add_volumes_update) - self.assertIsNone(remove_volumes_update) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') - def test_update_generic_group_when_enable_cg(self, mock_lun_exists, - mock_create_lun, - mock_get_flashcopy, - mock_sleep): - """update group, but volume is not in LSS which belongs to group.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(provider_location=location, - volume_metadata=metadata) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_create_lun.return_value = '2200' - mock_lun_exists.return_value = True - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_group(self.ctxt, group, [volume], [])) - location = ast.literal_eval(add_volumes_update[0]['provider_location']) - self.assertEqual('2200', location['vol_hex_id']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') - def test_update_generic_group_when_enable_cg2(self, mock_lun_exists, - mock_create_lun, - mock_get_flashcopy, - mock_sleep): - """add replicated volume into group.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - - vol_type = volume_types.create( - self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_create_lun.return_value = '2200' - mock_lun_exists.return_value = True - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_group(self.ctxt, group, [volume], [])) - location = ast.literal_eval(add_volumes_update[0]['provider_location']) - self.assertEqual('2200', location['vol_hex_id']) - - @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') - def test_delete_generic_group_failed(self, mock_delete_lun): - """test a failed group deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create(self.ctxt, 'group', {}) - group = self._create_group(group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(group_type_id=group_type.id, - provider_location=location, - group_id=group.id) - mock_delete_lun.side_effect = ( - restclient.APIException('delete volume failed.')) - model_update, volumes_model_update = ( - self.driver.delete_group(self.ctxt, group, [volume])) - self.assertEqual('error_deleting', volumes_model_update[0]['status']) - self.assertEqual(fields.GroupStatus.ERROR_DELETING, - model_update['status']) - - def test_delete_generic_group_sucessfully(self): - """test a successful generic group deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create(self.ctxt, 'CG', {}) - group = self._create_group(group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(group_type_id=group_type.id, - provider_location=location, - group_id=group.id) - model_update, volumes_model_update = ( - self.driver.delete_group(self.ctxt, group, [volume])) - self.assertEqual('deleted', volumes_model_update[0]['status']) - self.assertEqual(fields.GroupStatus.DELETED, model_update['status']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - def test_create_consistency_group_snapshot_sucessfully( - self, mock_create_lun, mock_get_flashcopy, mock_sleep): - """test a successful consistency group snapshot creation.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': '2000'}) - volume = self._create_volume(provider_location=location, - group_id=group.id) - group_snapshot = ( - self._create_group_snapshot(group_id=group.id, - group_type_id=group_type.id)) - snapshot = self._create_snapshot(volume_id=volume.id, - group_snapshot_id=group_snapshot.id) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_create_lun.return_value = '2200' - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot( - self.ctxt, group_snapshot, [snapshot])) - location = ast.literal_eval( - snapshots_model_update[0]['provider_location']) - self.assertEqual('2200', location['vol_hex_id']) - self.assertEqual('available', snapshots_model_update[0]['status']) - self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) - - def test_delete_consistency_group_snapshot_sucessfully(self): - """test a successful consistency group snapshot deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': '2000'}) - volume = self._create_volume(provider_location=location, - group_id=group.id) - group_snapshot = ( - self._create_group_snapshot(group_id=group.id, - group_type_id=group_type.id)) - snapshot = self._create_snapshot(volume_id=volume.id, - group_snapshot_id=group_snapshot.id) - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot( - self.ctxt, group_snapshot, [snapshot])) - self.assertEqual('deleted', snapshots_model_update[0]['status']) - self.assertEqual(fields.GroupSnapshotStatus.DELETED, - model_update['status']) - - @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') - def test_delete_consistency_group_snapshot_failed(self, - mock_delete_lun): - """test a failed consistency group snapshot deletion.""" - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = self._create_group(group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': '2000'}) - volume = self._create_volume(provider_location=location, - group_id=group.id) - group_snapshot = ( - self._create_group_snapshot(group_id=group.id, - group_type_id=group_type.id)) - snapshot = self._create_snapshot(volume_id=volume.id, - group_snapshot_id=group_snapshot.id) - - mock_delete_lun.side_effect = ( - restclient.APIException('delete snapshot failed.')) - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot( - self.ctxt, group_snapshot, [snapshot])) - self.assertEqual('error_deleting', snapshots_model_update[0]['status']) - self.assertEqual(fields.GroupSnapshotStatus.ERROR_DELETING, - model_update['status']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_consisgroup_from_consisgroup(self, mock_get_flashcopy, - mock_create_lun, mock_sleep): - """test creation of consistency group from consistency group.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - src_group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - location = six.text_type({'vol_hex_id': '2000'}) - src_vol = self._create_volume(provider_location=location, - group_id=src_group.id) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_create_lun.return_value = '2200' - model_update, volumes_model_update = ( - self.driver.create_group_from_src( - self.ctxt, group, [volume], None, None, src_group, [src_vol])) - self.assertEqual('2200', - volumes_model_update[0]['metadata']['vol_hex_id']) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') - @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') - def test_create_consisgroup_from_cgsnapshot(self, mock_get_flashcopy, - mock_create_lun, mock_sleep): - """test creation of consistency group from cgsnapshot.""" - self.configuration.lss_range_for_cg = '20-23' - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - group_type = group_types.create( - self.ctxt, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - src_group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - src_vol = self._create_volume(group_id=src_group.id) - group_snapshot = ( - self._create_group_snapshot(group_id=src_group.id, - group_type_id=group_type.id)) - location = six.text_type({'vol_hex_id': '2000'}) - snapshot = self._create_snapshot(volume_id=src_vol.id, - provider_location=location, - group_snapshot_id=group_snapshot.id) - group = self._create_group(host=TEST_GROUP_HOST, - group_type_id=group_type.id) - volume = self._create_volume(group_id=group.id) - - mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] - mock_create_lun.return_value = '2200' - model_update, volumes_model_update = ( - self.driver.create_group_from_src( - self.ctxt, group, [volume], group_snapshot, - [snapshot], None, None)) - self.assertEqual( - '2200', volumes_model_update[0]['metadata']['vol_hex_id']) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') - def test_failover_host_successfully(self, mock_get_pprc_pairs, mock_sleep): - """Failover host to valid secondary successfully.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) - pprc_pairs[0]['state'] = 'suspended' - mock_get_pprc_pairs.side_effect = [pprc_pairs] - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) - self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) - - @mock.patch.object(replication.Replication, 'do_pprc_failover') - def test_failover_host_failed(self, mock_do_pprc_failover): - """Failover host should raise exception when failed.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - - mock_do_pprc_failover.side_effect = ( - restclient.APIException('failed to do failover.')) - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, self.ctxt, - [volume], TEST_TARGET_DS8K_IP, []) - - def test_failover_host_to_invalid_target(self): - """Failover host to invalid secondary should fail.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, self.ctxt, - [volume], 'fake_target', []) - - def test_failover_host_that_has_been_failed_over(self): - """Failover host that has been failed over should just return.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self, TEST_TARGET_DS8K_IP) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) - self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) - self.assertEqual([], volume_update_list) - - def test_failback_host_that_has_been_failed_back(self): - """Failback host that has been failed back should just return.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], 'default', []) - self.assertIsNone(secondary_id) - self.assertEqual([], volume_update_list) - - def test_failover_host_which_only_has_unreplicated_volume(self): - """Failover host which only has unreplicated volume.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) - self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) - self.assertEqual('error', volume_update_list[0]['updates']['status']) - - def test_failback_should_recover_status_of_unreplicated_volume(self): - """Failback host should recover the status of unreplicated volume.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self, TEST_TARGET_DS8K_IP) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) - location = six.text_type({ - 'vol_hex_id': TEST_VOLUME_ID, - 'old_status': 'available' - }) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location) - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], 'default', []) - self.assertEqual('default', secondary_id) - self.assertEqual('available', - volume_update_list[0]['updates']['status']) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') - def test_failback_host_successfully(self, mock_get_pprc_pairs, mock_sleep): - """Failback host to primary successfully.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self, TEST_TARGET_DS8K_IP) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - metadata = [{'key': 'data_type', 'value': 'FB 512'}] - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data, - volume_metadata=metadata) - pprc_pairs_full_duplex = FAKE_GET_PPRCS_RESPONSE['data']['pprcs'] - pprc_pairs_suspended = copy.deepcopy(pprc_pairs_full_duplex) - pprc_pairs_suspended[0]['state'] = 'suspended' - mock_get_pprc_pairs.side_effect = [pprc_pairs_full_duplex, - pprc_pairs_suspended, - pprc_pairs_full_duplex] - secondary_id, volume_update_list, __ = self.driver.failover_host( - self.ctxt, [volume], 'default', []) - self.assertEqual('default', secondary_id) - - @mock.patch.object(replication.Replication, 'start_pprc_failback') - def test_failback_host_failed(self, mock_start_pprc_failback): - """Failback host should raise exception when failed.""" - self.configuration.replication_device = [TEST_REPLICATION_DEVICE] - self.driver = FakeDS8KProxy(self.storage_info, self.logger, - self.exception, self, TEST_TARGET_DS8K_IP) - self.driver.setup(self.ctxt) - - vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', - {'replication_enabled': ' True'}) - location = six.text_type({'vol_hex_id': TEST_VOLUME_ID}) - data = json.dumps( - {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) - volume = self._create_volume(volume_type_id=vol_type.id, - provider_location=location, - replication_driver_data=data) - mock_start_pprc_failback.side_effect = ( - restclient.APIException('failed to do failback.')) - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, self.ctxt, - [volume], 'default', []) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py deleted file mode 100644 index 63bec428f..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py +++ /dev/null @@ -1,1364 +0,0 @@ -# Copyright 2014 IBM Corp. -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Tests for the IBM FlashSystem volume driver.""" - -import mock -from oslo_concurrency import processutils -from oslo_utils import units -import six - -import random -import re - -from cinder import context -from cinder import exception -from cinder import test -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm import flashsystem_fc -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - - -class FlashSystemManagementSimulator(object): - def __init__(self): - # Default protocol is FC - self._protocol = 'FC' - self._volumes_list = {} - self._hosts_list = {} - self._mappings_list = {} - self._next_cmd_error = { - 'lsnode': '', - 'lssystem': '', - 'lsmdiskgrp': '' - } - self._errors = { - # CMMVC50000 is a fake error which indicates that command has not - # got expected results. This error represents kinds of CLI errors. - 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' - 'successfully.') - } - - @staticmethod - def _find_unused_id(d): - ids = [] - for v in d.values(): - ids.append(int(v['id'])) - ids.sort() - for index, n in enumerate(ids): - if n > index: - return six.text_type(index) - return six.text_type(len(ids)) - - @staticmethod - def _is_invalid_name(name): - if re.match(r'^[a-zA-Z_][\w ._-]*$', name): - return False - return True - - @staticmethod - def _cmd_to_dict(arg_list): - no_param_args = [ - 'bytes', - 'force' - ] - one_param_args = [ - 'delim', - 'hbawwpn', - 'host', - 'iogrp', - 'iscsiname', - 'mdiskgrp', - 'name', - 'scsi', - 'size', - 'unit' - ] - - # All commands should begin with svcinfo or svctask - if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2: - raise exception.InvalidInput(reason=six.text_type(arg_list)) - ret = {'cmd': arg_list[1]} - arg_list.pop(0) - - skip = False - for i in range(1, len(arg_list)): - if skip: - skip = False - continue - if arg_list[i][0] == '-': - param = arg_list[i][1:] - if param in no_param_args: - ret[param] = True - elif param in one_param_args: - ret[param] = arg_list[i + 1] - skip = True - else: - raise exception.InvalidInput( - reason=('unrecognized argument %s') % arg_list[i]) - else: - ret['obj'] = arg_list[i] - return ret - - @staticmethod - def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs): - """Generic function for printing information.""" - if nohdr: - del rows[0] - for index in range(len(rows)): - rows[index] = delim.join(rows[index]) - return ('%s' % '\n'.join(rows), '') - - @staticmethod - def _convert_units_bytes(num, unit): - unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - unit_index = 0 - - while unit.lower() != unit_array[unit_index].lower(): - num = num * 1024 - unit_index += 1 - - return six.text_type(num) - - def _cmd_lshost(self, **kwargs): - """lshost command. - - svcinfo lshost -delim ! - svcinfo lshost -delim ! - """ - if 'obj' not in kwargs: - rows = [] - rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) - for host in self._hosts_list.values(): - rows.append([host['id'], host['host_name'], '1', '1', - 'degraded']) - if len(rows) > 1: - return self._print_cmd_info(rows=rows, **kwargs) - else: - return ('', '') - else: - host_name = kwargs['obj'].strip('\'\"') - if host_name not in self._hosts_list: - return self._errors['CMMVC50000'] - host = self._hosts_list[host_name] - rows = [] - rows.append(['id', host['id']]) - rows.append(['name', host['host_name']]) - rows.append(['port_count', '1']) - rows.append(['type', 'generic']) - rows.append(['mask', '1111']) - rows.append(['iogrp_count', '1']) - rows.append(['status', 'degraded']) - for port in host['iscsi_names']: - rows.append(['iscsi_name', port]) - rows.append(['node_logged_in_count', '0']) - rows.append(['state', 'offline']) - for port in host['wwpns']: - rows.append(['WWPN', port]) - rows.append(['node_logged_in_count', '0']) - rows.append(['state', 'active']) - - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - - return ('%s' % '\n'.join(rows), '') - - def _cmd_lshostvdiskmap(self, **kwargs): - """svcinfo lshostvdiskmap -delim ! """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - - host_name = kwargs['obj'].strip('\'\"') - if host_name not in self._hosts_list: - return self._errors['CMMVC50000'] - - rows = [] - rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', - 'vdisk_UID']) - - for mapping in self._mappings_list.values(): - if (host_name == '') or (mapping['host'] == host_name): - volume = self._volumes_list[mapping['vol']] - rows.append([mapping['id'], mapping['host'], - mapping['lun'], volume['id'], - volume['name'], volume['vdisk_UID']]) - - return self._print_cmd_info(rows=rows, **kwargs) - - def _cmd_lsmdiskgrp(self, **kwargs): - """svcinfo lsmdiskgrp -gui -bytes -delim ! """ - - status = 'online' - if self._next_cmd_error['lsmdiskgrp'] == 'error': - self._next_cmd_error['lsmdiskgrp'] = '' - return self._errors['CMMVC50000'] - - if self._next_cmd_error['lsmdiskgrp'] == 'status=offline': - self._next_cmd_error['lsmdiskgrp'] = '' - status = 'offline' - - rows = [None] * 2 - rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity', - 'free_capacity', 'virtual_capacity', 'used_capacity', - 'real_capacity', 'encrypted', 'type', 'encrypt'] - rows[1] = ['0', status, '1', '0', '3573412790272', - '3529432325160', '1693247906775', '277841182', - '38203734097', 'no', 'parent', 'no'] - - if kwargs['obj'] == 'mdiskgrp0': - row = rows[1] - else: - return self._errors['CMMVC50000'] - - objrows = [] - for idx, val in enumerate(rows[0]): - objrows.append([val, row[idx]]) - - if 'delim' in kwargs: - for index in range(len(objrows)): - objrows[index] = kwargs['delim'].join(objrows[index]) - - return ('%s' % '\n'.join(objrows), '') - - def _cmd_lsnode(self, **kwargs): - """lsnode command. - - svcinfo lsnode -delim ! - svcinfo lsnode -delim ! - """ - - if self._protocol == 'FC' or self._protocol == 'both': - port_status = 'active' - else: - port_status = 'unconfigured' - - rows1 = [None] * 7 - rows1[0] = ['name', 'node1'] - rows1[1] = ['port_id', '000000000000001'] - rows1[2] = ['port_status', port_status] - rows1[3] = ['port_speed', '8Gb'] - rows1[4] = ['port_id', '000000000000001'] - rows1[5] = ['port_status', port_status] - rows1[6] = ['port_speed', '8Gb'] - - rows2 = [None] * 7 - rows2[0] = ['name', 'node2'] - rows2[1] = ['port_id', '000000000000002'] - rows2[2] = ['port_status', port_status] - rows2[3] = ['port_speed', '8Gb'] - rows2[4] = ['port_id', '000000000000002'] - rows2[5] = ['port_status', port_status] - rows2[6] = ['port_speed', 'N/A'] - - rows3 = [None] * 3 - rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', - 'IO_group_id', 'IO_group_name', 'config_node', - 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', - 'panel_name', 'enclosure_id', 'canister_id', - 'enclosure_serial_number'] - rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0', - 'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '', - '01-1', '1', '1', 'H441028'] - rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0', - 'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '', - '01-2', '1', '2', 'H441028'] - - if self._next_cmd_error['lsnode'] == 'error': - self._next_cmd_error['lsnode'] = '' - return self._errors['CMMVC50000'] - - rows = None - if 'obj' not in kwargs: - rows = rows3 - elif kwargs['obj'] == '1': - rows = rows1 - elif kwargs['obj'] == '2': - rows = rows2 - else: - return self._errors['CMMVC50000'] - - if self._next_cmd_error['lsnode'] == 'header_mismatch': - rows[0].pop(2) - self._next_cmd_error['lsnode'] = '' - - return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None)) - - def _cmd_lssystem(self, **kwargs): - """svcinfo lssystem -delim !""" - - open_access_enabled = 'off' - - if self._next_cmd_error['lssystem'] == 'error': - self._next_cmd_error['lssystem'] = '' - return self._errors['CMMVC50000'] - - if self._next_cmd_error['lssystem'] == 'open_access_enabled=on': - self._next_cmd_error['lssystem'] = '' - open_access_enabled = 'on' - - rows = [None] * 3 - rows[0] = ['id', '0123456789ABCDEF'] - rows[1] = ['name', 'flashsystem_1.2.3.4'] - rows[2] = ['open_access_enabled', open_access_enabled] - - return self._print_cmd_info(rows=rows, **kwargs) - - def _cmd_lsportfc(self, **kwargs): - """svcinfo lsportfc""" - - if self._protocol == 'FC' or self._protocol == 'both': - status = 'active' - else: - status = 'unconfigured' - - rows = [None] * 3 - rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type', - 'port_speed', 'node_id', 'node_name', 'WWPN', - 'nportid', 'status', 'attachment', 'topology'] - rows[1] = ['0', '1', '1', '1', 'fc', - '8Gb', '1', 'node_1', 'AABBCCDDEEFF0011', - '000000', status, 'host', 'al'] - rows[2] = ['1', '1', '1', '1', 'fc', - '8Gb', '1', 'node_1', 'AABBCCDDEEFF0010', - '000000', status, 'host', 'al'] - return self._print_cmd_info(rows=rows, **kwargs) - - def _cmd_lsportip(self, **kwargs): - """svcinfo lsportip""" - - if self._protocol == 'iSCSI' or self._protocol == 'both': - IP_address1 = '192.168.1.10' - IP_address2 = '192.168.1.11' - state = 'online' - speed = '8G' - else: - IP_address1 = '' - IP_address2 = '' - state = '' - speed = '' - - rows = [None] * 3 - rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id', - 'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6', - 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', - 'failover', 'link_state', 'host', 'host_6', 'vlan', - 'vlan_6', 'adapter_location', 'adapter_port_id'] - rows[1] = ['1', '1', 'node1', '0', '0', - '0', IP_address1, '', '', '', - '0', '', '11:22:33:44:55:AA', '', state, speed, - 'no', 'active', '', '', '', '', '0', '0'] - rows[2] = ['2', '2', 'node2', '0', '0', - '0', IP_address2, '', '', '', - '0', '', '11:22:33:44:55:BB', '', state, speed, - 'no', 'active', '', '', '', '', '0', '0'] - - return self._print_cmd_info(rows=rows, **kwargs) - - def _cmd_lsvdisk(self, **kwargs): - """cmd: svcinfo lsvdisk -gui -bytes -delim ! """ - - if 'obj' not in kwargs or ( - 'delim' not in kwargs) or ( - 'bytes' not in kwargs): - return self._errors['CMMVC50000'] - - if kwargs['obj'] not in self._volumes_list: - return self._errors['CMMVC50000'] - - vol = self._volumes_list[kwargs['obj']] - - rows = [] - rows.append(['id', vol['id']]) - rows.append(['name', vol['name']]) - rows.append(['status', vol['status']]) - rows.append(['capacity', vol['capacity']]) - rows.append(['vdisk_UID', vol['vdisk_UID']]) - rows.append(['udid', '']) - rows.append(['open_access_scsi_id', '1']) - rows.append(['parent_mdisk_grp_id', '0']) - rows.append(['parent_mdisk_grp_name', 'mdiskgrp0']) - - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - return ('%s' % '\n'.join(rows), '') - - def _cmd_lsvdiskhostmap(self, **kwargs): - """svcinfo lsvdiskhostmap -delim ! """ - - if 'obj' not in kwargs or ( - 'delim' not in kwargs): - return self._errors['CMMVC50000'] - - vdisk_name = kwargs['obj'] - if vdisk_name not in self._volumes_list: - return self._errors['CMMVC50000'] - - rows = [] - rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name', - 'vdisk_UID', 'IO_group_id', 'IO_group_name']) - - mappings_found = 0 - for mapping in self._mappings_list.values(): - if (mapping['vol'] == vdisk_name): - mappings_found += 1 - volume = self._volumes_list[mapping['vol']] - host = self._hosts_list[mapping['host']] - rows.append([volume['id'], volume['name'], '1', host['id'], - host['host_name'], volume['vdisk_UID'], - '0', 'mdiskgrp0']) - - if mappings_found: - return self._print_cmd_info(rows=rows, **kwargs) - else: - return ('', '') - - def _cmd_expandvdisksize(self, **kwargs): - """svctask expandvdisksize -size -unit gb """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - vol_name = kwargs['obj'].strip('\'\"') - - if 'size' not in kwargs: - return self._errors['CMMVC50000'] - size = int(kwargs['size']) - - if vol_name not in self._volumes_list: - return self._errors['CMMVC50000'] - - curr_size = int(self._volumes_list[vol_name]['capacity']) - addition = size * units.Gi - self._volumes_list[vol_name]['capacity'] = six.text_type( - curr_size + addition) - return ('', '') - - def _cmd_mkvdisk(self, **kwargs): - """mkvdisk command. - - svctask mkvdisk -name -mdiskgrp -iogrp - -size -unit - """ - - if 'name' not in kwargs or ( - 'size' not in kwargs) or ( - 'unit' not in kwargs): - return self._errors['CMMVC50000'] - - vdisk_info = {} - vdisk_info['id'] = self._find_unused_id(self._volumes_list) - vdisk_info['name'] = kwargs['name'].strip('\'\"') - vdisk_info['status'] = 'online' - vdisk_info['capacity'] = self._convert_units_bytes( - int(kwargs['size']), kwargs['unit']) - vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id'] - - if vdisk_info['name'] in self._volumes_list: - return self._errors['CMMVC50000'] - else: - self._volumes_list[vdisk_info['name']] = vdisk_info - return ('Virtual Disk, id [%s], successfully created' % - (vdisk_info['id']), '') - - def _cmd_chvdisk(self, **kwargs): - """chvdisk command - - svcask chvdisk -name -udid - -open_access_scsi_id - """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - - source_name = kwargs['obj'].strip('\'\"') - dest_name = kwargs['name'].strip('\'\"') - vol = self._volumes_list[source_name] - vol['name'] = dest_name - del self._volumes_list[source_name] - self._volumes_list[dest_name] = vol - return ('', '') - - def _cmd_rmvdisk(self, **kwargs): - """svctask rmvdisk -force """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - - vdisk_name = kwargs['obj'].strip('\'\"') - - if vdisk_name not in self._volumes_list: - return self._errors['CMMVC50000'] - - del self._volumes_list[vdisk_name] - return ('', '') - - def _add_port_to_host(self, host_info, **kwargs): - if 'iscsiname' in kwargs: - added_key = 'iscsi_names' - added_val = kwargs['iscsiname'].strip('\'\"') - elif 'hbawwpn' in kwargs: - added_key = 'wwpns' - added_val = kwargs['hbawwpn'].strip('\'\"') - else: - return self._errors['CMMVC50000'] - - host_info[added_key].append(added_val) - - for v in self._hosts_list.values(): - if v['id'] == host_info['id']: - continue - for port in v[added_key]: - if port == added_val: - return self._errors['CMMVC50000'] - return ('', '') - - def _cmd_mkhost(self, **kwargs): - """mkhost command. - - svctask mkhost -force -hbawwpn -name - svctask mkhost -force -iscsiname -name - """ - - if 'name' not in kwargs: - return self._errors['CMMVC50000'] - - host_name = kwargs['name'].strip('\'\"') - if self._is_invalid_name(host_name): - return self._errors['CMMVC50000'] - if host_name in self._hosts_list: - return self._errors['CMMVC50000'] - - host_info = {} - host_info['id'] = self._find_unused_id(self._hosts_list) - host_info['host_name'] = host_name - host_info['iscsi_names'] = [] - host_info['wwpns'] = [] - - out, err = self._add_port_to_host(host_info, **kwargs) - if not len(err): - self._hosts_list[host_name] = host_info - return ('Host, id [%s], successfully created' % - (host_info['id']), '') - else: - return (out, err) - - def _cmd_addhostport(self, **kwargs): - """addhostport command. - - svctask addhostport -force -hbawwpn - svctask addhostport -force -iscsiname - """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - host_name = kwargs['obj'].strip('\'\"') - - if host_name not in self._hosts_list: - return self._errors['CMMVC50000'] - - host_info = self._hosts_list[host_name] - return self._add_port_to_host(host_info, **kwargs) - - def _cmd_rmhost(self, **kwargs): - """svctask rmhost """ - - if 'obj' not in kwargs: - return self._errors['CMMVC50000'] - - host_name = kwargs['obj'].strip('\'\"') - if host_name not in self._hosts_list: - return self._errors['CMMVC50000'] - - for v in self._mappings_list.values(): - if (v['host'] == host_name): - return self._errors['CMMVC50000'] - - del self._hosts_list[host_name] - return ('', '') - - def _cmd_mkvdiskhostmap(self, **kwargs): - """svctask mkvdiskhostmap -host -scsi """ - - mapping_info = {} - mapping_info['id'] = self._find_unused_id(self._mappings_list) - - if 'host' not in kwargs or ( - 'scsi' not in kwargs) or ( - 'obj' not in kwargs): - return self._errors['CMMVC50000'] - mapping_info['host'] = kwargs['host'].strip('\'\"') - mapping_info['lun'] = kwargs['scsi'].strip('\'\"') - mapping_info['vol'] = kwargs['obj'].strip('\'\"') - - if mapping_info['vol'] not in self._volumes_list: - return self._errors['CMMVC50000'] - - if mapping_info['host'] not in self._hosts_list: - return self._errors['CMMVC50000'] - - if mapping_info['vol'] in self._mappings_list: - return self._errors['CMMVC50000'] - - for v in self._mappings_list.values(): - if ((v['host'] == mapping_info['host']) and - (v['lun'] == mapping_info['lun'])): - return self._errors['CMMVC50000'] - - for v in self._mappings_list.values(): - if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): - return self._errors['CMMVC50000'] - - self._mappings_list[mapping_info['id']] = mapping_info - return ('Virtual Disk to Host map, id [%s], successfully created' - % (mapping_info['id']), '') - - def _cmd_rmvdiskhostmap(self, **kwargs): - """svctask rmvdiskhostmap -host """ - - if 'host' not in kwargs or 'obj' not in kwargs: - return self._errors['CMMVC50000'] - host = kwargs['host'].strip('\'\"') - vdisk = kwargs['obj'].strip('\'\"') - - mapping_ids = [] - for v in self._mappings_list.values(): - if v['vol'] == vdisk: - mapping_ids.append(v['id']) - if not mapping_ids: - return self._errors['CMMVC50000'] - - this_mapping = None - for mapping_id in mapping_ids: - if self._mappings_list[mapping_id]['host'] == host: - this_mapping = mapping_id - if this_mapping is None: - return self._errors['CMMVC50000'] - - del self._mappings_list[this_mapping] - return ('', '') - - def set_protocol(self, protocol): - self._protocol = protocol - - def execute_command(self, cmd, check_exit_code=True): - try: - kwargs = self._cmd_to_dict(cmd) - except exception.InvalidInput: - return self._errors['CMMVC50000'] - - command = kwargs.pop('cmd') - func = getattr(self, '_cmd_' + command) - out, err = func(**kwargs) - - if (check_exit_code) and (len(err) != 0): - raise processutils.ProcessExecutionError(exit_code=1, - stdout=out, - stderr=err, - cmd=command) - return (out, err) - - def error_injection(self, cmd, error): - self._next_cmd_error[cmd] = error - - -class FlashSystemFakeDriver(flashsystem_fc.FlashSystemFCDriver): - def __init__(self, *args, **kwargs): - super(FlashSystemFakeDriver, self).__init__(*args, **kwargs) - - def set_fake_storage(self, fake): - self.fake_storage = fake - - def _ssh(self, cmd, check_exit_code=True): - utils.check_ssh_injection(cmd) - ret = self.fake_storage.execute_command(cmd, check_exit_code) - return ret - - -class FlashSystemDriverTestCase(test.TestCase): - - def _set_flag(self, flag, value): - group = self.driver.configuration.config_group - self.driver.configuration.set_override(flag, value, group) - - def _reset_flags(self): - self.driver.configuration.local_conf.reset() - for k, v in self._def_flags.items(): - self._set_flag(k, v) - - def _generate_vol_info(self, - vol_name, - vol_size=10, - vol_status='available'): - rand_id = six.text_type(random.randint(10000, 99999)) - if not vol_name: - vol_name = 'test_volume%s' % rand_id - - return {'name': vol_name, - 'size': vol_size, - 'id': '%s' % rand_id, - 'volume_type_id': None, - 'status': vol_status, - 'mdisk_grp_name': 'mdiskgrp0'} - - def _generate_snap_info(self, - vol_name, - vol_id, - vol_size, - vol_status, - snap_status='available'): - rand_id = six.text_type(random.randint(10000, 99999)) - return {'name': 'test_snap_%s' % rand_id, - 'id': rand_id, - 'volume': {'name': vol_name, - 'id': vol_id, - 'size': vol_size, - 'status': vol_status}, - 'volume_size': vol_size, - 'status': snap_status, - 'mdisk_grp_name': 'mdiskgrp0'} - - def setUp(self): - super(FlashSystemDriverTestCase, self).setUp() - - self._def_flags = {'san_ip': 'hostname', - 'san_login': 'username', - 'san_password': 'password', - 'flashsystem_connection_protocol': 'FC', - 'flashsystem_multihostmap_enabled': True} - - self.connector = { - 'host': 'flashsystem', - 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], - 'wwpns': ['abcd000000000001', 'abcd000000000002'], - 'initiator': 'iqn.123456'} - - self.sim = FlashSystemManagementSimulator() - self.driver = FlashSystemFakeDriver( - configuration=conf.Configuration(None)) - self.driver.set_fake_storage(self.sim) - - self._reset_flags() - self.ctxt = context.get_admin_context() - self.driver.do_setup(None) - self.driver.check_for_setup_error() - - self.sleeppatch = mock.patch('eventlet.greenthread.sleep') - self.sleeppatch.start() - - def tearDown(self): - self.sleeppatch.stop() - super(FlashSystemDriverTestCase, self).tearDown() - - def test_flashsystem_do_setup(self): - # case 1: cmd lssystem encounters error - self.sim.error_injection('lssystem', 'error') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - - # case 2: open_access_enabled is not off - self.sim.error_injection('lssystem', 'open_access_enabled=on') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - - # case 3: cmd lsmdiskgrp encounters error - self.sim.error_injection('lsmdiskgrp', 'error') - self.assertRaises(exception.InvalidInput, - self.driver.do_setup, None) - - # case 4: status is not online - self.sim.error_injection('lsmdiskgrp', 'status=offline') - self.assertRaises(exception.InvalidInput, - self.driver.do_setup, None) - - # case 5: cmd lsnode encounters error - self.sim.error_injection('lsnode', 'error') - self.assertRaises(processutils.ProcessExecutionError, - self.driver.do_setup, None) - - # case 6: cmd lsnode header does not match - self.sim.error_injection('lsnode', 'header_mismatch') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - - # case 7: set as FC - self.sim.set_protocol('FC') - self.driver.do_setup(None) - self.assertEqual('FC', self.driver._protocol) - - # case 8: no configured nodes available - self.sim.set_protocol('unknown') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - - # clear environment - self.sim.set_protocol('FC') - self.driver.do_setup(None) - - def test_flashsystem_check_for_setup_error(self): - self._set_flag('san_ip', '') - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('san_ssh_port', '') - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('san_login', '') - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('san_password', None) - self._set_flag('san_private_key', None) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('flashsystem_connection_protocol', 'foo') - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - # clear environment - self.driver.do_setup(None) - - def test_flashsystem_validate_connector(self): - conn_neither = {'host': 'host'} - conn_iscsi = {'host': 'host', 'initiator': 'foo'} - conn_fc = {'host': 'host', 'wwpns': 'bar'} - conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} - - protocol = self.driver._protocol - - # case 1: when protocol is FC - self.driver._protocol = 'FC' - self.driver.validate_connector(conn_fc) - self.driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.driver.validate_connector, conn_iscsi) - self.assertRaises(exception.InvalidConnectorException, - self.driver.validate_connector, conn_neither) - - # clear environment - self.driver._protocol = protocol - - def test_flashsystem_volumes(self): - # case 1: create volume - vol = self._generate_vol_info(None) - self.driver.create_volume(vol) - - # Check whether volume is created successfully - attributes = self.driver._get_vdisk_attributes(vol['name']) - attr_size = float(attributes['capacity']) / units.Gi - self.assertEqual(float(vol['size']), attr_size) - - # case 2: create volume with empty returning value - with mock.patch.object(FlashSystemFakeDriver, - '_ssh') as mock_ssh: - mock_ssh.return_value = ("", "") - vol1 = self._generate_vol_info(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, vol1) - - # case 3: create volume with error returning value - with mock.patch.object(FlashSystemFakeDriver, - '_ssh') as mock_ssh: - mock_ssh.return_value = ("CMMVC6070E", - "An invalid or duplicated " - "parameter has been detected.") - vol2 = self._generate_vol_info(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, vol2) - - # case 4: delete volume - self.driver.delete_volume(vol) - - # case 5: delete volume that doesn't exist (expected not fail) - vol_no_exist = self._generate_vol_info(None) - self.driver.delete_volume(vol_no_exist) - - def test_flashsystem_extend_volume(self): - vol = self._generate_vol_info(None) - self.driver.create_volume(vol) - self.driver.extend_volume(vol, '200') - attrs = self.driver._get_vdisk_attributes(vol['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 200) - - # clear environment - self.driver.delete_volume(vol) - - def test_flashsystem_connection(self): - # case 1: initialize_connection/terminate_connection for good path - vol1 = self._generate_vol_info(None) - self.driver.create_volume(vol1) - self.driver.initialize_connection(vol1, self.connector) - self.driver.terminate_connection(vol1, self.connector) - - # case 2: when volume is not existed - vol2 = self._generate_vol_info(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - vol2, self.connector) - - # case 3: _get_vdisk_map_properties raises exception - with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_get_vdisk_map_properties') as get_properties: - get_properties.side_effect = exception.VolumeBackendAPIException - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - vol1, self.connector) - - # case 4: terminate_connection with no host - with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_get_hostvdisk_mappings') as mock_host: - mock_host.return_value = {} - vol3 = self._generate_vol_info(None) - self.driver.create_volume(vol3) - self.driver.initialize_connection(vol3, self.connector) - return_value = self.driver.terminate_connection(vol3, - self.connector) - self.assertNotEqual({}, return_value['data']) - - # case 5: terminate_connection with host - vol4 = self._generate_vol_info(None) - self.driver.create_volume(vol4) - self.driver.initialize_connection(vol4, self.connector) - vol5 = self._generate_vol_info(None) - self.driver.create_volume(vol5) - self.driver.initialize_connection(vol5, self.connector) - return_value = self.driver.terminate_connection(vol4, - self.connector) - self.assertEqual({}, return_value['data']) - - # clear environment - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - self.driver.delete_volume(vol3) - self.driver.delete_volume(vol4) - self.driver.delete_volume(vol5) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_create_and_copy_vdisk_data') - def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data): - # case 1: good path - vol1 = self._generate_vol_info(None) - snap1 = self._generate_snap_info(vol1['name'], - vol1['id'], - vol1['size'], - vol1['status']) - self.driver.create_snapshot(snap1) - - # case 2: when volume status is error - vol2 = self._generate_vol_info(None, vol_status='error') - snap2 = self._generate_snap_info(vol2['name'], - vol2['id'], - vol2['size'], - vol2['status']) - self.assertRaises(exception.InvalidVolume, - self.driver.create_snapshot, snap2) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_delete_vdisk') - def test_flashsystem_delete_snapshot(self, _delete_vdisk): - vol1 = self._generate_vol_info(None) - snap1 = self._generate_snap_info(vol1['name'], - vol1['id'], - vol1['size'], - vol1['status']) - self.driver.delete_snapshot(snap1) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_create_and_copy_vdisk_data') - def test_flashsystem_create_volume_from_snapshot( - self, _create_and_copy_vdisk_data): - # case 1: good path - vol = self._generate_vol_info(None) - snap = self._generate_snap_info(vol['name'], - vol['id'], - vol['size'], - vol['status']) - self.driver.create_volume_from_snapshot(vol, snap) - - # case 2: when size does not match - vol = self._generate_vol_info(None, vol_size=100) - snap = self._generate_snap_info(vol['name'], - vol['id'], - 200, - vol['status']) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume_from_snapshot, - vol, snap) - - # case 3: when snapshot status is not available - vol = self._generate_vol_info(None) - snap = self._generate_snap_info(vol['name'], - vol['id'], - vol['size'], - vol['status'], - snap_status='error') - self.assertRaises(exception.InvalidSnapshot, - self.driver.create_volume_from_snapshot, - vol, snap) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_create_and_copy_vdisk_data') - def test_flashsystem_create_cloned_volume( - self, _create_and_copy_vdisk_data): - # case 1: good path - vol1 = self._generate_vol_info(None) - vol2 = self._generate_vol_info(None) - self.driver.create_cloned_volume(vol2, vol1) - - # case 2: when size does not match - vol1 = self._generate_vol_info(None, vol_size=10) - vol2 = self._generate_vol_info(None, vol_size=20) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_cloned_volume, - vol2, vol1) - - def test_flashsystem_get_volume_stats(self): - # case 1: good path - self._set_flag('reserved_percentage', 25) - self._set_flag('flashsystem_multihostmap_enabled', False) - pool = 'mdiskgrp0' - backend_name = 'flashsystem_1.2.3.4' + '_' + pool - - stats = self.driver.get_volume_stats() - - self.assertEqual(25, stats['reserved_percentage']) - self.assertEqual('IBM', stats['vendor_name']) - self.assertEqual('FC', stats['storage_protocol']) - self.assertEqual(backend_name, stats['volume_backend_name']) - self.assertEqual(False, stats['multiattach']) - - self._reset_flags() - - # case 2: when lsmdiskgrp returns error - self.sim.error_injection('lsmdiskgrp', 'error') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.get_volume_stats, refresh=True) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_copy_vdisk_data') - def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data): - # case 1: when volume does not exist - vol1 = self._generate_vol_info(None) - vol2 = self._generate_vol_info(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._create_and_copy_vdisk_data, - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - - # case 2: good path - self.driver.create_volume(vol1) - self.driver._create_and_copy_vdisk_data( - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - # case 3: _copy_vdisk_data raises exception - self.driver.create_volume(vol1) - _copy_vdisk_data.side_effect = exception.VolumeBackendAPIException - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._create_and_copy_vdisk_data, - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - self.assertEqual(set(), self.driver._vdisk_copy_in_progress) - - # clear environment - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - @mock.patch.object(volume_utils, 'copy_volume') - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_scan_device') - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_remove_device') - @mock.patch.object(utils, 'brick_get_connector_properties') - def test_flashsystem_copy_vdisk_data(self, - _connector, - _remove_device, - _scan_device, - copy_volume): - - connector = _connector.return_value = self.connector - vol1 = self._generate_vol_info(None) - vol2 = self._generate_vol_info(None) - self.driver.create_volume(vol1) - self.driver.create_volume(vol2) - - # case 1: no mapped before copy - self.driver._copy_vdisk_data( - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) - (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) - self.assertFalse(v1_mapped) - self.assertFalse(v2_mapped) - - # case 2: mapped before copy - self.driver.initialize_connection(vol1, connector) - self.driver.initialize_connection(vol2, connector) - self.driver._copy_vdisk_data( - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) - (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) - self.assertTrue(v1_mapped) - self.assertTrue(v2_mapped) - self.driver.terminate_connection(vol1, connector) - self.driver.terminate_connection(vol2, connector) - - # case 3: no mapped before copy, raise exception when scan - _scan_device.side_effect = exception.VolumeBackendAPIException - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._copy_vdisk_data, - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) - (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) - self.assertFalse(v1_mapped) - self.assertFalse(v2_mapped) - - # case 4: no mapped before copy, raise exception when copy - copy_volume.side_effect = exception.VolumeBackendAPIException - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._copy_vdisk_data, - vol1['name'], vol1['id'], vol2['name'], vol2['id']) - (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) - (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) - self.assertFalse(v1_mapped) - self.assertFalse(v2_mapped) - - # clear environment - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - def test_flashsystem_connector_to_hostname_prefix(self): - # Invalid characters will be translated to '-' - - # case 1: host name is unicode with invalid characters - conn = {'host': u'unicode.test}.abc{.abc'} - self.assertEqual(u'unicode.test-.abc-.abc', - self.driver._connector_to_hostname_prefix(conn)) - - # case 2: host name is string with invalid characters - conn = {'host': 'string.test}.abc{.abc'} - self.assertEqual('string.test-.abc-.abc', - self.driver._connector_to_hostname_prefix(conn)) - - # case 3: host name is neither unicode nor string - conn = {'host': 12345} - self.assertRaises(exception.NoValidBackend, - self.driver._connector_to_hostname_prefix, - conn) - - # case 4: host name started with number will be translated - conn = {'host': '192.168.1.1'} - self.assertEqual('_192.168.1.1', - self.driver._connector_to_hostname_prefix(conn)) - - def test_flashsystem_create_host(self): - # case 1: create host - conn = { - 'host': 'flashsystem', - 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], - 'wwpns': ['abcd000000000001', 'abcd000000000002'], - 'initiator': 'iqn.123456'} - host = self.driver._create_host(conn) - - # case 2: create host that already exists - self.assertRaises(processutils.ProcessExecutionError, - self.driver._create_host, - conn) - - # case 3: delete host - self.driver._delete_host(host) - - # case 4: create host with empty ports - conn = {'host': 'flashsystem', 'wwpns': []} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._create_host, - conn) - - def test_flashsystem_find_host_exhaustive(self): - # case 1: create host and find it - conn1 = { - 'host': 'flashsystem-01', - 'wwnns': ['1111111111abcdef', '1111111111abcdeg'], - 'wwpns': ['1111111111000001', '1111111111000002'], - 'initiator': 'iqn.111111'} - conn2 = { - 'host': 'flashsystem-02', - 'wwnns': ['2222222222abcdef', '2222222222abcdeg'], - 'wwpns': ['2222222222000001', '2222222222000002'], - 'initiator': 'iqn.222222'} - conn3 = { - 'host': 'flashsystem-03', - 'wwnns': ['3333333333abcdef', '3333333333abcdeg'], - 'wwpns': ['3333333333000001', '3333333333000002'], - 'initiator': 'iqn.333333'} - host1 = self.driver._create_host(conn1) - host2 = self.driver._create_host(conn2) - self.assertEqual( - host2, - self.driver._find_host_exhaustive(conn2, [host1, host2])) - self.assertIsNone(self.driver._find_host_exhaustive(conn3, - [host1, host2])) - - # case 2: hosts contains non-existent host info - with mock.patch.object(FlashSystemFakeDriver, - '_ssh') as mock_ssh: - mock_ssh.return_value = ("pass", "") - self.driver._find_host_exhaustive(conn1, [host2]) - self.assertFalse(mock_ssh.called) - - # clear environment - self.driver._delete_host(host1) - self.driver._delete_host(host2) - - def test_flashsystem_get_vdisk_params(self): - # case 1: use default params - self.driver._get_vdisk_params(None) - - # case 2: use extra params from type - opts1 = {'storage_protocol': 'FC'} - opts2 = {'capabilities:storage_protocol': 'FC'} - opts3 = {'storage_protocol': 'iSCSI'} - type1 = volume_types.create(self.ctxt, 'opts1', opts1) - type2 = volume_types.create(self.ctxt, 'opts2', opts2) - type3 = volume_types.create(self.ctxt, 'opts3', opts3) - self.assertEqual( - 'FC', - self.driver._get_vdisk_params(type1['id'])['protocol']) - self.assertEqual( - 'FC', - self.driver._get_vdisk_params(type2['id'])['protocol']) - self.assertRaises(exception.InvalidInput, - self.driver._get_vdisk_params, - type3['id']) - - # clear environment - volume_types.destroy(self.ctxt, type1['id']) - volume_types.destroy(self.ctxt, type2['id']) - - def test_flashsystem_map_vdisk_to_host(self): - # case 1: no host found - vol1 = self._generate_vol_info(None) - self.driver.create_volume(vol1) - self.assertEqual( - # lun id shoud begin with 1 - 1, - self.driver._map_vdisk_to_host(vol1['name'], self.connector)) - - # case 2: host already exists - vol2 = self._generate_vol_info(None) - self.driver.create_volume(vol2) - self.assertEqual( - # lun id shoud be sequential - 2, - self.driver._map_vdisk_to_host(vol2['name'], self.connector)) - - # case 3: test if already mapped - self.assertEqual( - 1, - self.driver._map_vdisk_to_host(vol1['name'], self.connector)) - - # clean environment - self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) - self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - # case 4: If there is no vdisk mapped to host, host should be removed - self.assertIsNone(self.driver._get_host_from_connector(self.connector)) - - def test_flashsystem_manage_existing(self): - # case 1: manage a vdisk good path - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.driver.manage_existing(vol1, existing_ref) - self.driver.delete_volume(vol1) - - # case 2: manage a vdisk not exist - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol1, existing_ref) - - # case 3: manage a vdisk without name and uid - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol1, existing_ref) - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) - - @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, - '_get_vdiskhost_mappings') - def test_flashsystem_manage_existing_get_size_mapped( - self, - _get_vdiskhost_mappings_mock): - # manage a vdisk with mappings - _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - vol1, - existing_ref) - - # clean environment - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) - - def test_flashsystem_manage_existing_get_size_bad_ref(self): - # bad existing_ref - vol1 = self._generate_vol_info(None, None) - existing_ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, vol1, - existing_ref) - - def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): - # vdisk not exist - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - vol1, - existing_ref) - - def test_flashsystem_manage_existing_get_size(self): - # good path - kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) - self.assertEqual(10001, vdisk_size) - - # clean environment - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py deleted file mode 100644 index 352220b39..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Tests for the IBM FlashSystem iSCSI volume driver. -""" - -import mock -import six - -import random - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.ibm \ - import test_ibm_flashsystem as fscommon -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm import flashsystem_iscsi -from cinder.volume import volume_types - - -class FlashSystemManagementSimulator(fscommon.FlashSystemManagementSimulator): - def __init__(self): - # Default protocol is iSCSI - self._protocol = 'iSCSI' - self._volumes_list = {} - self._hosts_list = {} - self._mappings_list = {} - self._next_cmd_error = { - 'lsnode': '', - 'lssystem': '', - 'lsmdiskgrp': '' - } - self._errors = { - # CMMVC50000 is a fake error which indicates that command has not - # got expected results. This error represents kinds of CLI errors. - 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' - 'successfully.') - } - - -class FlashSystemFakeISCSIDriver(flashsystem_iscsi.FlashSystemISCSIDriver): - def __init__(self, *args, **kwargs): - super(FlashSystemFakeISCSIDriver, self).__init__(*args, **kwargs) - - def set_fake_storage(self, fake): - self.fake_storage = fake - - def _ssh(self, cmd, check_exit_code=True): - utils.check_ssh_injection(cmd) - ret = self.fake_storage.execute_command(cmd, check_exit_code) - return ret - - -class FlashSystemISCSIDriverTestCase(test.TestCase): - - def _set_flag(self, flag, value): - group = self.driver.configuration.config_group - self.driver.configuration.set_override(flag, value, group) - - def _reset_flags(self): - self.driver.configuration.local_conf.reset() - for k, v in self._def_flags.items(): - self._set_flag(k, v) - - def _generate_vol_info(self, - vol_name, - vol_size=10, - vol_status='available'): - rand_id = six.text_type(random.randint(10000, 99999)) - if not vol_name: - vol_name = 'test_volume%s' % rand_id - - return {'name': vol_name, - 'size': vol_size, - 'id': '%s' % rand_id, - 'volume_type_id': None, - 'status': vol_status, - 'mdisk_grp_name': 'mdiskgrp0'} - - def _generate_snap_info(self, - vol_name, - vol_id, - vol_size, - vol_status, - snap_status='available'): - rand_id = six.text_type(random.randint(10000, 99999)) - return {'name': 'test_snap_%s' % rand_id, - 'id': rand_id, - 'volume': {'name': vol_name, - 'id': vol_id, - 'size': vol_size, - 'status': vol_status}, - 'volume_size': vol_size, - 'status': snap_status, - 'mdisk_grp_name': 'mdiskgrp0'} - - def setUp(self): - super(FlashSystemISCSIDriverTestCase, self).setUp() - - self._def_flags = {'san_ip': 'hostname', - 'san_login': 'username', - 'san_password': 'password', - 'flashsystem_connection_protocol': 'iSCSI', - 'flashsystem_multihostmap_enabled': True, - 'iscsi_ip_address': '192.168.1.10', - 'flashsystem_iscsi_portid': 1} - - self.connector = { - 'host': 'flashsystem', - 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], - 'wwpns': ['abcd000000000001', 'abcd000000000002'], - 'initiator': 'iqn.123456'} - - self.sim = FlashSystemManagementSimulator() - self.driver = FlashSystemFakeISCSIDriver( - configuration=conf.Configuration(None)) - self.driver.set_fake_storage(self.sim) - - self._reset_flags() - self.ctxt = context.get_admin_context() - self.driver.do_setup(None) - self.driver.check_for_setup_error() - - self.sleeppatch = mock.patch('eventlet.greenthread.sleep') - self.sleeppatch.start() - - def tearDown(self): - self.sleeppatch.stop() - super(FlashSystemISCSIDriverTestCase, self).tearDown() - - def test_flashsystem_do_setup(self): - # case 1: set as iSCSI - self.sim.set_protocol('iSCSI') - self._set_flag('flashsystem_connection_protocol', 'iSCSI') - self.driver.do_setup(None) - self.assertEqual('iSCSI', self.driver._protocol) - - # clear environment - self.sim.set_protocol('iSCSI') - self._reset_flags() - - def test_flashsystem_validate_connector(self): - conn_neither = {'host': 'host'} - conn_iscsi = {'host': 'host', 'initiator': 'foo'} - conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} - - protocol = self.driver._protocol - - # case 1: when protocol is iSCSI - self.driver._protocol = 'iSCSI' - self.driver.validate_connector(conn_iscsi) - self.driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.driver.validate_connector, conn_neither) - - # clear environment - self.driver._protocol = protocol - - def test_flashsystem_connection(self): - # case 1: initialize_connection/terminate_connection with iSCSI - self.sim.set_protocol('iSCSI') - self._set_flag('flashsystem_connection_protocol', 'iSCSI') - self.driver.do_setup(None) - vol1 = self._generate_vol_info(None) - self.driver.create_volume(vol1) - self.driver.initialize_connection(vol1, self.connector) - self.driver.terminate_connection(vol1, self.connector) - - # clear environment - self.driver.delete_volume(vol1) - self.sim.set_protocol('iSCSI') - self._reset_flags() - - def test_flashsystem_create_host(self): - # case 1: create host with iqn - self.sim.set_protocol('iSCSI') - self._set_flag('flashsystem_connection_protocol', 'iSCSI') - self.driver.do_setup(None) - conn = { - 'host': 'flashsystem', - 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], - 'wwpns': ['abcd000000000001', 'abcd000000000002'], - 'initiator': 'iqn.123456'} - host = self.driver._create_host(conn) - - # case 2: delete host - self.driver._delete_host(host) - - # clear environment - self.sim.set_protocol('iSCSI') - self._reset_flags() - - def test_flashsystem_get_vdisk_params(self): - # case 1: use default params - self.driver._get_vdisk_params(None) - - # case 2: use extra params from type - opts1 = {'storage_protocol': 'iSCSI'} - opts2 = {'capabilities:storage_protocol': 'iSCSI'} - opts3 = {'storage_protocol': 'FC'} - type1 = volume_types.create(self.ctxt, 'opts1', opts1) - type2 = volume_types.create(self.ctxt, 'opts2', opts2) - type3 = volume_types.create(self.ctxt, 'opts3', opts3) - self.assertEqual( - 'iSCSI', - self.driver._get_vdisk_params(type1['id'])['protocol']) - self.assertEqual( - 'iSCSI', - self.driver._get_vdisk_params(type2['id'])['protocol']) - self.assertRaises(exception.InvalidInput, - self.driver._get_vdisk_params, - type3['id']) - - # clear environment - volume_types.destroy(self.ctxt, type1['id']) - volume_types.destroy(self.ctxt, type2['id']) - volume_types.destroy(self.ctxt, type3['id']) - - def test_flashsystem_map_vdisk_to_host(self): - # case 1: no host found - vol1 = self._generate_vol_info(None) - self.driver.create_volume(vol1) - self.assertEqual( - # lun id shoud begin with 1 - 1, - self.driver._map_vdisk_to_host(vol1['name'], self.connector)) - - # case 2: host already exists - vol2 = self._generate_vol_info(None) - self.driver.create_volume(vol2) - self.assertEqual( - # lun id shoud be sequential - 2, - self.driver._map_vdisk_to_host(vol2['name'], self.connector)) - - # case 3: test if already mapped - self.assertEqual( - 1, - self.driver._map_vdisk_to_host(vol1['name'], self.connector)) - - # clean environment - self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) - self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - # case 4: If there is no vdisk mapped to host, host should be removed - self.assertIsNone(self.driver._get_host_from_connector(self.connector)) - - def test_terminate_connection_with_normal_path(self): - connector = {'host': 'flashsystem-host', - 'wwnns': ['10000090fa17311e', '10000090fa17311f'], - 'wwpns': ['20000090fa17311e', '20000090fa17311f'], - 'initiator': 'iqn.1993-08.org.debian:01:89ad29bbdc43'} - # create test volume - volume_iscsi = self._generate_vol_info(None) - self.driver.create_volume(volume_iscsi) - - # normal connection test - self.driver.initialize_connection(volume_iscsi, connector) - host = self.driver._get_host_from_connector(connector) - self.assertIsNotNone(host) - self.driver.terminate_connection(volume_iscsi, connector) - host = self.driver._get_host_from_connector(connector) - self.assertIsNone(host) - - # clean environment - self.driver.delete_volume(volume_iscsi) - - def test_terminate_connection_with_resource_leak_check(self): - connector = {'host': 'flashsystem-host', - 'wwnns': ['10000090fa17311e', '10000090fa17311f'], - 'wwpns': ['20000090fa17311e', '20000090fa17311f'], - 'initiator': 'iqn.1993-08.org.debian:01:89ad29bbdc43'} - # create test volume - volume_iscsi = self._generate_vol_info(None) - self.driver.create_volume(volume_iscsi) - - # volume mapping removed before terminate connection - self.driver.initialize_connection(volume_iscsi, connector) - host = self.driver._get_host_from_connector(connector) - self.assertIsNotNone(host) - rmmap_cmd = {'host': host, 'obj': volume_iscsi['name']} - self.sim._cmd_rmvdiskhostmap(**rmmap_cmd) - self.driver.terminate_connection(volume_iscsi, connector) - host = self.driver._get_host_from_connector(connector) - self.assertIsNone(host) - - # clean environment - self.driver.delete_volume(volume_iscsi) - - def test_flashsystem_find_host_exhaustive(self): - # case 1: create host and find it - self.sim.set_protocol('iSCSI') - self._set_flag('flashsystem_connection_protocol', 'iSCSI') - conn1 = { - 'host': 'flashsystem-01', - 'wwnns': ['1111111111abcdef', '1111111111abcdeg'], - 'wwpns': ['1111111111000001', '1111111111000002'], - 'initiator': 'iqn.111111'} - conn2 = { - 'host': 'flashsystem-02', - 'wwnns': ['2222222222abcdef', '2222222222abcdeg'], - 'wwpns': ['2222222222000001', '2222222222000002'], - 'initiator': 'iqn.222222'} - conn3 = { - 'host': 'flashsystem-03', - 'wwnns': ['3333333333abcdef', '3333333333abcdeg'], - 'wwpns': ['3333333333000001', '3333333333000002'], - 'initiator': 'iqn.333333'} - host1 = self.driver._create_host(conn1) - host2 = self.driver._create_host(conn2) - self.assertEqual( - host2, - self.driver._find_host_exhaustive(conn2, [host1, host2])) - self.assertIsNone(self.driver._find_host_exhaustive(conn3, - [host1, host2])) - - # case 2: hosts contains non-existent host info - with mock.patch.object(FlashSystemFakeISCSIDriver, - '_ssh') as mock_ssh: - mock_ssh.return_value = ("pass", "") - self.driver._find_host_exhaustive(conn1, [host2]) - self.assertFalse(mock_ssh.called) - - # clear environment - self.driver._delete_host(host1) - self.driver._delete_host(host2) - self.sim.set_protocol('iSCSI') - self._reset_flags() - - def test_flashsystem_manage_existing(self): - # case 1: manage a vdisk good path - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.driver.manage_existing(vol1, existing_ref) - self.driver.delete_volume(vol1) - - # case 2: manage a vdisk not exist - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol1, existing_ref) - - # case 3: manage a vdisk without name and uid - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, vol1, existing_ref) - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) - - @mock.patch.object(flashsystem_iscsi.FlashSystemISCSIDriver, - '_get_vdiskhost_mappings') - def test_flashsystem_manage_existing_get_size_mapped( - self, - _get_vdiskhost_mappings_mock): - # case 2: manage a vdisk with mappings - _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} - kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - vol1, - existing_ref) - - # clean environment - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) - - def test_flashsystem_manage_existing_get_size_bad_ref(self): - # bad existing_ref - vol1 = self._generate_vol_info(None, None) - existing_ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, vol1, - existing_ref) - - def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): - # vdisk not exist - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - vol1, - existing_ref) - - def test_flashsystem_manage_existing_get_size(self): - # good path - kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} - self.sim._cmd_mkvdisk(**kwargs) - vol1 = self._generate_vol_info(None) - existing_ref = {'source-name': u'unmanage-vol-01'} - vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) - self.assertEqual(10001, vdisk_size) - - # clean environment - vdisk1 = {'obj': u'unmanage-vol-01'} - self.sim._cmd_rmvdisk(**vdisk1) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py b/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py deleted file mode 100644 index 3c4d72f86..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py +++ /dev/null @@ -1,987 +0,0 @@ -# Copyright 2013 IBM Corp. -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import mock - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm.ibm_storage import ibm_storage -from cinder.volume import volume_types - -FAKE = "fake" -FAKE2 = "fake2" -CANNOT_DELETE = "Can not delete" -TOO_BIG_VOLUME_SIZE = 12000 -POOL_SIZE = 100 -GROUP_ID = 1 -VOLUME = {'size': 16, - 'name': FAKE, - 'id': 1, - 'status': 'available'} -VOLUME2 = {'size': 32, - 'name': FAKE2, - 'id': 2, - 'status': 'available'} -GROUP_VOLUME = {'size': 16, - 'name': FAKE, - 'id': 3, - 'group_id': GROUP_ID, - 'status': 'available'} - -MANAGED_FAKE = "managed_fake" -MANAGED_VOLUME = {'size': 16, - 'name': MANAGED_FAKE, - 'id': 2} - -REPLICA_FAKE = "repicated_fake" -REPLICATED_VOLUME = {'size': 64, - 'name': REPLICA_FAKE, - 'id': '2', - 'replication_status': fields.ReplicationStatus.ENABLED} - -REPLICATED_VOLUME_DISABLED = REPLICATED_VOLUME.copy() -REPLICATED_VOLUME_DISABLED['replication_status'] = ( - fields.ReplicationStatus.DISABLED) - -REPLICATION_TARGETS = [{'target_device_id': 'fakedevice'}] -SECONDARY = 'fakedevice' -FAKE_FAILOVER_HOST = 'fakehost@fakebackend#fakepool' -FAKE_PROVIDER_LOCATION = 'fake_provider_location' -FAKE_DRIVER_DATA = 'fake_driver_data' - -CONTEXT = {} - -FAKESNAPSHOT = 'fakesnapshot' -SNAPSHOT = {'name': 'fakesnapshot', - 'id': 3} - -GROUP = {'id': GROUP_ID, } -GROUP_SNAPSHOT_ID = 1 -GROUP_SNAPSHOT = {'id': GROUP_SNAPSHOT_ID, - 'group_id': GROUP_ID} - -CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } - -FAKE_PROXY = 'cinder.tests.unit.volume.drivers.ibm.test_ibm_storage' \ - '.IBMStorageFakeProxyDriver' - - -class IBMStorageFakeProxyDriver(object): - """Fake IBM Storage driver - - Fake IBM Storage driver for IBM XIV, Spectrum Accelerate, - FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. - """ - - def __init__(self, ibm_storage_info, logger, expt, - driver=None, active_backend_id=None): - """Initialize Proxy.""" - - self.ibm_storage_info = ibm_storage_info - self.logger = logger - self.exception = expt - self.storage_portal = \ - self.storage_iqn = FAKE - - self.volumes = {} - self.snapshots = {} - self.driver = driver - - def setup(self, context): - if self.ibm_storage_info['user'] != self.driver\ - .configuration.san_login: - raise self.exception.NotAuthorized() - - if self.ibm_storage_info['address'] != self.driver\ - .configuration.san_ip: - raise self.exception.HostNotFound(host='fake') - - def create_volume(self, volume): - if volume['size'] > POOL_SIZE: - raise self.exception.VolumeBackendAPIException(data='blah') - self.volumes[volume['name']] = volume - - def volume_exists(self, volume): - return self.volumes.get(volume['name'], None) is not None - - def delete_volume(self, volume): - if self.volumes.get(volume['name'], None) is not None: - del self.volumes[volume['name']] - - def manage_volume_get_size(self, volume, existing_ref): - if self.volumes.get(existing_ref['source-name'], None) is None: - raise self.exception.VolumeNotFound(volume_id=volume['id']) - return self.volumes[existing_ref['source-name']]['size'] - - def manage_volume(self, volume, existing_ref): - if self.volumes.get(existing_ref['source-name'], None) is None: - raise self.exception.VolumeNotFound(volume_id=volume['id']) - volume['size'] = MANAGED_VOLUME['size'] - return {} - - def unmanage_volume(self, volume): - pass - - def initialize_connection(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound(volume_id=volume['id']) - lun_id = volume['id'] - - self.volumes[volume['name']]['attached'] = connector - - return {'driver_volume_type': 'iscsi', - 'data': {'target_discovered': True, - 'target_portal': self.storage_portal, - 'target_iqn': self.storage_iqn, - 'target_lun': lun_id, - 'volume_id': volume['id'], - 'multipath': True, - 'provider_location': "%s,1 %s %s" % ( - self.storage_portal, - self.storage_iqn, - lun_id), }, - } - - def terminate_connection(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound(volume_id=volume['id']) - if not self.is_volume_attached(volume, connector): - raise self.exception.NotFound(_('Volume not found for ' - 'instance %(instance_id)s.') - % {'instance_id': 'fake'}) - del self.volumes[volume['name']]['attached'] - - def is_volume_attached(self, volume, connector): - if not self.volume_exists(volume): - raise self.exception.VolumeNotFound(volume_id=volume['id']) - - return (self.volumes[volume['name']].get('attached', None) - == connector) - - def get_replication_status(self, context, volume): - if volume['replication_status'] == 'invalid_status_val': - raise exception.CinderException() - return {'replication_status': 'active'} - - def retype(self, ctxt, volume, new_type, diff, host): - volume['easytier'] = new_type['extra_specs']['easytier'] - return True, volume - - def create_group(self, ctxt, group): - - volumes = [volume for k, volume in self.volumes.items() - if volume['group_id'] == group['id']] - - if volumes: - raise exception.CinderException( - message='The group id of volume may be wrong.') - - return {'status': fields.GroupStatus.AVAILABLE} - - def delete_group(self, ctxt, group, volumes): - for volume in self.volumes.values(): - if group.get('id') == volume.get('group_id'): - if volume['name'] == CANNOT_DELETE: - raise exception.VolumeBackendAPIException( - message='Volume can not be deleted') - else: - volume['status'] = 'deleted' - volumes.append(volume) - - # Delete snapshots in group - self.snapshots = {k: snap for k, snap in self.snapshots.items() - if not(snap.get('group_id') == group.get('id'))} - - # Delete volume in group - self.volumes = {k: vol for k, vol in self.volumes.items() - if not(vol.get('group_id') == group.get('id'))} - - return {'status': fields.GroupStatus.DELETED}, volumes - - def update_group(self, context, group, add_volumes, remove_volumes): - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update, None, None - - def create_group_from_src(self, context, group, volumes, group_snapshot, - snapshots, source_group=None, source_vols=None): - - return None, None - - def create_group_snapshot(self, ctxt, group_snapshot, snapshots): - for volume in self.volumes.values(): - if group_snapshot.get('group_id') == volume.get('group_id'): - if volume['size'] > POOL_SIZE / 2: - raise self.exception.VolumeBackendAPIException(data='blah') - - snapshot = copy.deepcopy(volume) - snapshot['name'] = ( - CANNOT_DELETE if snapshot['name'] == CANNOT_DELETE - else snapshot['name'] + 'Snapshot') - snapshot['status'] = 'available' - snapshot['group_snapshot_id'] = group_snapshot.get('id') - snapshot['group_id'] = group_snapshot.get('group_id') - self.snapshots[snapshot['name']] = snapshot - snapshots.append(snapshot) - - return {'status': fields.GroupSnapshotStatus.AVAILABLE}, snapshots - - def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): - updated_snapshots = [] - for snapshot in snapshots: - if snapshot['name'] == CANNOT_DELETE: - raise exception.VolumeBackendAPIException( - message='Snapshot can not be deleted') - else: - snapshot['status'] = 'deleted' - updated_snapshots.append(snapshot) - - # Delete snapshots in group - self.snapshots = {k: snap for k, snap in self.snapshots.items() - if not(snap.get('group_id') - == group_snapshot.get('group_snapshot_id'))} - - return {'status': 'deleted'}, updated_snapshots - - def freeze_backend(self, context): - return True - - def thaw_backend(self, context): - return True - - def failover_host(self, context, volumes, secondary_id, groups=None): - target_id = 'BLA' - volume_update_list = [] - for volume in volumes: - status = 'failed-over' - if volume['replication_status'] == 'invalid_status_val': - status = 'error' - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': status}}) - - return target_id, volume_update_list, [] - - def enable_replication(self, context, group, volumes): - vol_status = [] - for vol in volumes: - vol_status.append( - {'id': vol['id'], - 'replication_status': fields.ReplicationStatus.ENABLED}) - return ( - {'replication_status': fields.ReplicationStatus.ENABLED}, - vol_status) - - def disable_replication(self, context, group, volumes): - volume_update_list = [] - for volume in volumes: - volume_update_list.append( - {'id': volume['id'], - 'replication_status': fields.ReplicationStatus.DISABLED}) - return ( - {'replication_status': fields.ReplicationStatus.DISABLED}, - volume_update_list) - - def failover_replication(self, context, group, volumes, secondary_id): - volume_update_list = [] - for volume in volumes: - volume_update_list.append( - {'id': volume['id'], - 'replication_status': fields.ReplicationStatus.FAILED_OVER}) - return ({'replication_status': fields.ReplicationStatus.FAILED_OVER}, - volume_update_list) - - def get_replication_error_status(self, context, groups): - return( - [{'group_id': groups[0]['id'], - 'replication_status': fields.ReplicationStatus.ERROR}], - [{'volume_id': VOLUME['id'], - 'replication_status': fields.ReplicationStatus.ERROR}]) - - -class IBMStorageVolumeDriverTest(test.TestCase): - """Test IBM Storage driver - - Test IBM Storage driver for IBM XIV, Spectrum Accelerate, - FlashSystem A9000, FlashSystem A9000R and DS8000 storage Systems. - """ - - def setUp(self): - """Initialize IBM Storage Driver.""" - super(IBMStorageVolumeDriverTest, self).setUp() - - configuration = mock.Mock(conf.Configuration) - configuration.san_is_local = False - configuration.proxy = FAKE_PROXY - configuration.connection_type = 'iscsi' - configuration.chap = 'disabled' - configuration.san_ip = FAKE - configuration.management_ips = FAKE - configuration.san_login = FAKE - configuration.san_clustername = FAKE - configuration.san_password = FAKE - configuration.append_config_values(mock.ANY) - - self.driver = ibm_storage.IBMStorageDriver( - configuration=configuration) - - def test_initialized_should_set_ibm_storage_info(self): - """Test that the san flags are passed to the IBM proxy.""" - - self.assertEqual( - self.driver.proxy.ibm_storage_info['user'], - self.driver.configuration.san_login) - self.assertEqual( - self.driver.proxy.ibm_storage_info['password'], - self.driver.configuration.san_password) - self.assertEqual( - self.driver.proxy.ibm_storage_info['address'], - self.driver.configuration.san_ip) - self.assertEqual( - self.driver.proxy.ibm_storage_info['vol_pool'], - self.driver.configuration.san_clustername) - - def test_setup_should_fail_if_credentials_are_invalid(self): - """Test that the proxy validates credentials.""" - - self.driver.proxy.ibm_storage_info['user'] = 'invalid' - self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) - - def test_setup_should_fail_if_connection_is_invalid(self): - """Test that the proxy validates connection.""" - - self.driver.proxy.ibm_storage_info['address'] = \ - 'invalid' - self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) - - def test_create_volume(self): - """Test creating a volume.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - has_volume = self.driver.proxy.volume_exists(VOLUME) - self.assertTrue(has_volume) - self.driver.delete_volume(VOLUME) - - def test_volume_exists(self): - """Test the volume exist method with a volume that doesn't exist.""" - - self.driver.do_setup(None) - - self.assertFalse( - self.driver.proxy.volume_exists({'name': FAKE}) - ) - - def test_delete_volume(self): - """Verify that a volume is deleted.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.delete_volume(VOLUME) - has_volume = self.driver.proxy.volume_exists(VOLUME) - self.assertFalse(has_volume) - - def test_delete_volume_should_fail_for_not_existing_volume(self): - """Verify that deleting a non-existing volume is OK.""" - - self.driver.do_setup(None) - self.driver.delete_volume(VOLUME) - - def test_create_volume_should_fail_if_no_pool_space_left(self): - """Verify that the proxy validates volume pool space.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': FAKE, - 'id': 1, - 'size': TOO_BIG_VOLUME_SIZE}) - - def test_initialize_connection(self): - """Test that inititialize connection attaches volume to host.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.initialize_connection(VOLUME, CONNECTOR) - - self.assertTrue( - self.driver.proxy.is_volume_attached(VOLUME, CONNECTOR)) - - self.driver.terminate_connection(VOLUME, CONNECTOR) - self.driver.delete_volume(VOLUME) - - def test_initialize_connection_should_fail_for_non_existing_volume(self): - """Verify that initialize won't work for non-existing volume.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeNotFound, - self.driver.initialize_connection, - VOLUME, - CONNECTOR) - - def test_terminate_connection(self): - """Test terminating a connection.""" - - self.driver.do_setup(None) - self.driver.create_volume(VOLUME) - self.driver.initialize_connection(VOLUME, CONNECTOR) - self.driver.terminate_connection(VOLUME, CONNECTOR) - - self.assertFalse(self.driver.proxy.is_volume_attached( - VOLUME, - CONNECTOR)) - - self.driver.delete_volume(VOLUME) - - def test_terminate_connection_should_fail_on_non_existing_volume(self): - """Test that terminate won't work for non-existing volumes.""" - - self.driver.do_setup(None) - self.assertRaises(exception.VolumeNotFound, - self.driver.terminate_connection, - VOLUME, - CONNECTOR) - - def test_manage_existing_get_size(self): - """Test that manage_existing_get_size returns the expected size. """ - - self.driver.do_setup(None) - self.driver.create_volume(MANAGED_VOLUME) - existing_ref = {'source-name': MANAGED_VOLUME['name']} - return_size = self.driver.manage_existing_get_size( - VOLUME, - existing_ref) - self.assertEqual(MANAGED_VOLUME['size'], return_size) - - # cover both case, whether driver renames the volume or not - self.driver.delete_volume(VOLUME) - self.driver.delete_volume(MANAGED_VOLUME) - - def test_manage_existing_get_size_should_fail_on_non_existing_volume(self): - """Test that manage_existing_get_size fails on non existing volume. """ - - self.driver.do_setup(None) - # on purpose - do NOT create managed volume - existing_ref = {'source-name': MANAGED_VOLUME['name']} - self.assertRaises(exception.VolumeNotFound, - self.driver.manage_existing_get_size, - VOLUME, - existing_ref) - - def test_manage_existing(self): - """Test that manage_existing returns successfully. """ - - self.driver.do_setup(None) - self.driver.create_volume(MANAGED_VOLUME) - existing_ref = {'source-name': MANAGED_VOLUME['name']} - self.driver.manage_existing(VOLUME, existing_ref) - self.assertEqual(MANAGED_VOLUME['size'], VOLUME['size']) - - # cover both case, whether driver renames the volume or not - self.driver.delete_volume(VOLUME) - self.driver.delete_volume(MANAGED_VOLUME) - - def test_manage_existing_should_fail_on_non_existing_volume(self): - """Test that manage_existing fails on non existing volume. """ - - self.driver.do_setup(None) - # on purpose - do NOT create managed volume - existing_ref = {'source-name': MANAGED_VOLUME['name']} - self.assertRaises(exception.VolumeNotFound, - self.driver.manage_existing, - VOLUME, - existing_ref) - - def test_get_replication_status(self): - """Test that get_replication_status return successfully. """ - - self.driver.do_setup(None) - - # assume the replicated volume is inactive - replicated_volume = copy.deepcopy(REPLICATED_VOLUME) - replicated_volume['replication_status'] = 'inactive' - model_update = self.driver.get_replication_status( - CONTEXT, - replicated_volume - ) - self.assertEqual('active', model_update['replication_status']) - - def test_get_replication_status_fail_on_exception(self): - """Test that get_replication_status fails on exception""" - - self.driver.do_setup(None) - - replicated_volume = copy.deepcopy(REPLICATED_VOLUME) - # on purpose - set invalid value to replication_status - # expect an exception. - replicated_volume['replication_status'] = 'invalid_status_val' - self.assertRaises( - exception.CinderException, - self.driver.get_replication_status, - CONTEXT, - replicated_volume - ) - - def test_retype(self): - """Test that retype returns successfully.""" - - self.driver.do_setup(None) - - # prepare parameters - ctxt = context.get_admin_context() - - host = { - 'host': 'foo', - 'capabilities': { - 'location_info': 'ibm_storage_fake_1', - 'extent_size': '1024' - } - } - - key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} - key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, equal = volume_types.volume_types_diff( - ctxt, - old_type_ref['id'], - new_type_ref['id'], - ) - - volume = copy.deepcopy(VOLUME) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - self.driver.create_volume(volume) - ret = self.driver.retype(ctxt, volume, new_type, diff, host) - self.assertTrue(ret) - self.assertEqual('1', volume['easytier']) - - def test_retype_fail_on_exception(self): - """Test that retype fails on exception.""" - - self.driver.do_setup(None) - - # prepare parameters - ctxt = context.get_admin_context() - - host = { - 'host': 'foo', - 'capabilities': { - 'location_info': 'ibm_storage_fake_1', - 'extent_size': '1024' - } - } - - key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new') - - diff, equal = volume_types.volume_types_diff( - ctxt, - old_type_ref['id'], - new_type_ref['id'], - ) - - volume = copy.deepcopy(VOLUME) - old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) - volume['volume_type'] = old_type - volume['host'] = host - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - self.driver.create_volume(volume) - self.assertRaises( - KeyError, - self.driver.retype, - ctxt, volume, new_type, diff, host - ) - - def test_create_group(self): - """Test that create_group return successfully.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - model_update = self.driver.create_group(ctxt, GROUP) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group created failed") - - def test_create_group_fail_on_group_not_empty(self): - """Test create_group with empty group.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create volumes - # And add the volumes into the group before creating group - self.driver.create_volume(GROUP_VOLUME) - - self.assertRaises(exception.CinderException, - self.driver.create_group, - ctxt, GROUP) - - def test_delete_group(self): - """Test that delete_group return successfully.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Create volumes and add them to group - self.driver.create_volume(GROUP_VOLUME) - - # Delete group - model_update, volumes = self.driver.delete_group(ctxt, GROUP, - [GROUP_VOLUME]) - - # Verify the result - self.assertEqual(fields.GroupStatus.DELETED, - model_update['status'], - 'Group deleted failed') - for volume in volumes: - self.assertEqual('deleted', - volume['status'], - 'Group deleted failed') - - def test_delete_group_fail_on_volume_not_delete(self): - """Test delete_group with volume delete failure.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Set the volume not to be deleted - volume = copy.deepcopy(GROUP_VOLUME) - volume['name'] = CANNOT_DELETE - - # Create volumes and add them to group - self.driver.create_volume(volume) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_group, - ctxt, GROUP, [volume]) - - def test_create_group_snapshot(self): - """Test that create_group_snapshot return successfully.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Create volumes and add them to group - self.driver.create_volume(VOLUME) - - # Create group snapshot - model_update, snapshots = self.driver.create_group_snapshot( - ctxt, GROUP_SNAPSHOT, [VOLUME]) - - # Verify the result - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - model_update['status'], - 'Group Snapshot created failed') - for snap in snapshots: - self.assertEqual('available', - snap['status']) - - # Clean the environment - self.driver.delete_group_snapshot(ctxt, GROUP_SNAPSHOT, [VOLUME]) - self.driver.delete_group(ctxt, GROUP, [VOLUME]) - - def test_create_group_snapshot_fail_on_no_pool_space_left(self): - """Test create_group_snapshot when no pool space left.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Set the volume size - volume = copy.deepcopy(GROUP_VOLUME) - volume['size'] = POOL_SIZE / 2 + 1 - - # Create volumes and add them to group - self.driver.create_volume(volume) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, - ctxt, GROUP_SNAPSHOT, [volume]) - - # Clean the environment - self.driver.volumes = None - self.driver.delete_group(ctxt, GROUP, [volume]) - - def test_delete_group_snapshot(self): - """Test that delete_group_snapshot return successfully.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Create volumes and add them to group - self.driver.create_volume(GROUP_VOLUME) - - # Create group snapshot - self.driver.create_group_snapshot(ctxt, GROUP_SNAPSHOT, [GROUP_VOLUME]) - - # Delete group snapshot - model_update, snapshots = self.driver.delete_group_snapshot( - ctxt, GROUP_SNAPSHOT, [GROUP_VOLUME]) - - # Verify the result - self.assertEqual(fields.GroupSnapshotStatus.DELETED, - model_update['status'], - 'Group Snapshot deleted failed') - for snap in snapshots: - self.assertEqual('deleted', - snap['status']) - - # Clean the environment - self.driver.delete_group(ctxt, GROUP, [GROUP_VOLUME]) - - def test_delete_group_snapshot_fail_on_snapshot_not_delete(self): - """Test delete_group_snapshot when the snapshot cannot be deleted.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group - self.driver.create_group(ctxt, GROUP) - - # Set the snapshot not to be deleted - volume = copy.deepcopy(GROUP_VOLUME) - volume['name'] = CANNOT_DELETE - - # Create volumes and add them to group - self.driver.create_volume(volume) - - # Create group snapshot - self.driver.create_group_snapshot(ctxt, GROUP_SNAPSHOT, [volume]) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_group_snapshot, - ctxt, GROUP_SNAPSHOT, [volume]) - - def test_update_group_without_volumes(self): - """Test update_group when there are no volumes specified.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Update group - model_update, added, removed = self.driver.update_group( - ctxt, GROUP, [], []) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group update failed") - self.assertIsNone(added, - "added volumes list is not empty") - self.assertIsNone(removed, - "removed volumes list is not empty") - - def test_update_group_with_volumes(self): - """Test update_group when there are volumes specified.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Update group - model_update, added, removed = self.driver.update_group( - ctxt, GROUP, [VOLUME], [VOLUME2]) - - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group update failed") - self.assertIsNone(added, - "added volumes list is not empty") - self.assertIsNone(removed, - "removed volumes list is not empty") - - def test_create_group_from_src_without_volumes(self): - """Test create_group_from_src with no volumes specified.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group from source - model_update, volumes_model_update = ( - self.driver.create_group_from_src( - ctxt, GROUP, [], GROUP_SNAPSHOT, [])) - - # model_update can be None or return available in status - if model_update: - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group create from source failed") - # volumes_model_update can be None or return available in status - if volumes_model_update: - self.assertFalse(volumes_model_update, - "volumes list is not empty") - - def test_create_group_from_src_with_volumes(self): - """Test create_group_from_src with volumes specified.""" - - self.driver.do_setup(None) - - ctxt = context.get_admin_context() - - # Create group from source - model_update, volumes_model_update = ( - self.driver.create_group_from_src( - ctxt, GROUP, [VOLUME], GROUP_SNAPSHOT, [SNAPSHOT])) - - # model_update can be None or return available in status - if model_update: - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "Group create from source failed") - # volumes_model_update can be None or return available in status - if volumes_model_update: - self.assertEqual(fields.GroupStatus.AVAILABLE, - volumes_model_update['status'], - "volumes list status failed") - - def test_freeze_backend(self): - """Test that freeze_backend returns successful""" - - self.driver.do_setup(None) - - # not much we can test here... - self.assertTrue(self.driver.freeze_backend(CONTEXT)) - - def test_thaw_backend(self): - """Test that thaw_backend returns successful""" - - self.driver.do_setup(None) - - # not much we can test here... - self.assertTrue(self.driver.thaw_backend(CONTEXT)) - - def test_failover_host(self): - """Test that failover_host returns expected values""" - - self.driver.do_setup(None) - - replicated_volume = copy.deepcopy(REPLICATED_VOLUME) - # assume the replication_status is active - replicated_volume['replication_status'] = 'active' - - expected_target_id = 'BLA' - expected_volume_update_list = [ - {'volume_id': REPLICATED_VOLUME['id'], - 'updates': {'replication_status': 'failed-over'}}] - - target_id, volume_update_list, __ = self.driver.failover_host( - CONTEXT, - [replicated_volume], - SECONDARY, - [] - ) - - self.assertEqual(expected_target_id, target_id) - self.assertEqual(expected_volume_update_list, volume_update_list) - - def test_failover_host_bad_state(self): - """Test that failover_host returns with error""" - - self.driver.do_setup(None) - - replicated_volume = copy.deepcopy(REPLICATED_VOLUME) - # assume the replication_status is active - replicated_volume['replication_status'] = 'invalid_status_val' - - expected_target_id = 'BLA' - expected_volume_update_list = [ - {'volume_id': REPLICATED_VOLUME['id'], - 'updates': {'replication_status': 'error'}}] - - target_id, volume_update_list, __ = self.driver.failover_host( - CONTEXT, - [replicated_volume], - SECONDARY, - [] - ) - - self.assertEqual(expected_target_id, target_id) - self.assertEqual(expected_volume_update_list, volume_update_list) - - def test_enable_replication(self): - self.driver.do_setup(None) - model_update, volumes_model_update = self.driver.enable_replication( - CONTEXT, GROUP, [REPLICATED_VOLUME]) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - for vol in volumes_model_update: - self.assertEqual(fields.ReplicationStatus.ENABLED, - vol['replication_status']) - - def test_disable_replication(self): - self.driver.do_setup(None) - model_update, volumes_model_update = self.driver.disable_replication( - CONTEXT, GROUP, [REPLICATED_VOLUME_DISABLED]) - self.assertEqual(fields.ReplicationStatus.DISABLED, - model_update['replication_status']) - for vol in volumes_model_update: - self.assertEqual(fields.ReplicationStatus.DISABLED, - volumes_model_update[0]['replication_status']) - - def test_failover_replication(self): - self.driver.do_setup(None) - model_update, volumes_model_update = self.driver.failover_replication( - CONTEXT, GROUP, [VOLUME], SECONDARY) - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, - model_update['replication_status']) - - def test_get_replication_error_status(self): - self.driver.do_setup(None) - group_model_updates, volume_model_updates = ( - self.driver.get_replication_error_status(CONTEXT, [GROUP])) - self.assertEqual(fields.ReplicationStatus.ERROR, - group_model_updates[0]['replication_status']) - self.assertEqual(fields.ReplicationStatus.ERROR, - volume_model_updates[0]['replication_status']) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py deleted file mode 100644 index 6fcd53055..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py +++ /dev/null @@ -1,7690 +0,0 @@ -# Copyright 2015 IBM Corp. -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Tests for the IBM Storwize family and SVC volume driver. -""" - -import ddt -import json -import mock -import paramiko -import random -import re -import time - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import importutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import ssh_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as testutils -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm.storwize_svc import ( - replication as storwize_rep) -from cinder.volume.drivers.ibm.storwize_svc import storwize_const -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc -from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi -from cinder.volume import group_types -from cinder.volume import qos_specs -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -SVC_POOLS = ['openstack', 'openstack1'] - -CONF = cfg.CONF - - -def _get_test_pool(get_all=False): - if get_all: - return SVC_POOLS - else: - return SVC_POOLS[0] - - -class StorwizeSVCManagementSimulator(object): - def __init__(self, pool_name): - self._flags = {'storwize_svc_volpool_name': pool_name} - self._volumes_list = {} - self._hosts_list = {} - self._mappings_list = {} - self._fcmappings_list = {} - self._fcconsistgrp_list = {} - self._rcrelationship_list = {} - self._partnership_list = {} - self._partnershipcandidate_list = {} - self._system_list = {'storwize-svc-sim': {'id': '0123456789ABCDEF', - 'name': 'storwize-svc-sim'}, - 'aux-svc-sim': {'id': 'ABCDEF0123456789', - 'name': 'aux-svc-sim'}} - self._other_pools = {'openstack2': {}, 'openstack3': {}} - self._next_cmd_error = { - 'lsportip': '', - 'lsfabric': '', - 'lsiscsiauth': '', - 'lsnodecanister': '', - 'mkvdisk': '', - 'lsvdisk': '', - 'lsfcmap': '', - 'prestartfcmap': '', - 'startfcmap': '', - 'rmfcmap': '', - 'lslicense': '', - 'lsguicapabilities': '', - 'lshost': '', - 'lsrcrelationship': '' - } - self._errors = { - 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), - 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' - 'object already exists.'), - 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' - 'exist or is not a suitable candidate.'), - 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), - 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' - 'the maximum number of allowed iSCSI ' - 'qualified names (IQNs) has been reached, ' - 'or the IQN is already assigned or is not ' - 'valid.'), - 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' - 'exist, or the name supplied does not meet ' - 'the naming rules.'), - 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' - 'not created because the VDisk is already ' - 'mapped to a host.'), - 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' - 'not created because a VDisk is already ' - 'mapped to this host with this SCSI LUN.'), - 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' - 'not deleted because it is mapped to a ' - 'host or because it is part of a FlashCopy ' - 'or Remote Copy mapping, or is involved in ' - 'an image mode migrate.'), - 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' - 'is not valid. The name can contain letters, ' - 'numbers, spaces, periods, dashes, and ' - 'underscores. The name must begin with a ' - 'letter or an underscore. The name must not ' - 'begin or end with a space.'), - 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' - 'more of the configured port names is in a ' - 'mapping.'), - 'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not ' - 'created because the source and target ' - 'virtual disks (VDisks) are different sizes.'), - 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' - 'source and target VDisks are the same.'), - 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' - 'least one node in the I/O group does not ' - 'support compressed VDisks.'), - 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' - 'target and source managed disk groups must ' - 'be different.'), - 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' - 'copy specified does not exist.'), - 'CMMVC6446E': ('', 'The command failed because the managed disk ' - 'groups have different extent sizes.'), - # Catch-all for invalid state transitions: - 'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not ' - 'changed because the mapping or consistency ' - 'group is another state.'), - 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' - 'parameter.'), - 'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed ' - 'because it is not valid given the current ' - 'relationship state.'), - 'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'), - 'CMMVC5713E': ('', 'CMMVC5713E Some parameters are mutually ' - 'exclusive.'), - - } - self._fc_transitions = {'begin': {'make': 'idle_or_copied'}, - 'idle_or_copied': {'prepare': 'preparing', - 'delete': 'end', - 'delete_force': 'end'}, - 'preparing': {'flush_failed': 'stopped', - 'wait': 'prepared'}, - 'end': None, - 'stopped': {'prepare': 'preparing', - 'delete_force': 'end'}, - 'prepared': {'stop': 'stopped', - 'start': 'copying'}, - 'copying': {'wait': 'idle_or_copied', - 'stop': 'stopping'}, - # Assume the worst case where stopping->stopped - # rather than stopping idle_or_copied - 'stopping': {'wait': 'stopped'}, - } - - self._fc_cg_transitions = {'begin': {'make': 'empty'}, - 'empty': {'add': 'idle_or_copied'}, - 'idle_or_copied': {'prepare': 'preparing', - 'delete': 'end', - 'delete_force': 'end'}, - 'preparing': {'flush_failed': 'stopped', - 'wait': 'prepared'}, - 'end': None, - 'stopped': {'prepare': 'preparing', - 'delete_force': 'end'}, - 'prepared': {'stop': 'stopped', - 'start': 'copying', - 'delete_force': 'end', - 'delete': 'end'}, - 'copying': {'wait': 'idle_or_copied', - 'stop': 'stopping', - 'delete_force': 'end', - 'delete': 'end'}, - # Assume the case where stopping->stopped - # rather than stopping idle_or_copied - 'stopping': {'wait': 'stopped'}, - } - self._rc_transitions = {'inconsistent_stopped': - {'start': 'inconsistent_copying', - 'stop': 'inconsistent_stopped', - 'delete': 'end', - 'delete_force': 'end'}, - 'inconsistent_copying': { - 'wait': 'consistent_synchronized', - 'start': 'inconsistent_copying', - 'stop': 'inconsistent_stopped', - 'delete': 'end', - 'delete_force': 'end'}, - 'consistent_synchronized': { - 'start': 'consistent_synchronized', - 'stop': 'consistent_stopped', - 'stop_access': 'idling', - 'delete': 'end', - 'delete_force': 'end'}, - 'consistent_copying': { - 'start': 'consistent_copying', - 'stop': 'consistent_stopped', - 'stop_access': 'idling', - 'delete': 'end', - 'delete_force': 'end'}, - 'consistent_stopped': - {'start': 'consistent_synchronized', - 'stop': 'consistent_stopped', - 'delete': 'end', - 'delete_force': 'end'}, - 'end': None, - 'idling': { - 'start': 'inconsistent_copying', - 'stop': 'inconsistent_stopped', - 'stop_access': 'idling', - 'delete': 'end', - 'delete_force': 'end'}, - } - - def _state_transition(self, function, fcmap): - if (function == 'wait' and - 'wait' not in self._fc_transitions[fcmap['status']]): - return ('', '') - - if fcmap['status'] == 'copying' and function == 'wait': - if fcmap['copyrate'] != '0': - if fcmap['progress'] == '0': - fcmap['progress'] = '50' - else: - fcmap['progress'] = '100' - fcmap['status'] = 'idle_or_copied' - return ('', '') - else: - try: - curr_state = fcmap['status'] - fcmap['status'] = self._fc_transitions[curr_state][function] - return ('', '') - except Exception: - return self._errors['CMMVC5903E'] - - def _fc_cg_state_transition(self, function, fc_consistgrp): - if (function == 'wait' and - 'wait' not in self._fc_transitions[fc_consistgrp['status']]): - return ('', '') - - try: - curr_state = fc_consistgrp['status'] - fc_consistgrp['status'] \ - = self._fc_cg_transitions[curr_state][function] - return ('', '') - except Exception: - return self._errors['CMMVC5903E'] - - # Find an unused ID - @staticmethod - def _find_unused_id(d): - ids = [] - for v in d.values(): - ids.append(int(v['id'])) - ids.sort() - for index, n in enumerate(ids): - if n > index: - return six.text_type(index) - return six.text_type(len(ids)) - - # Check if name is valid - @staticmethod - def _is_invalid_name(name): - if re.match(r'^[a-zA-Z_][\w._-]*$', name): - return False - return True - - # Convert argument string to dictionary - @staticmethod - def _cmd_to_dict(arg_list): - no_param_args = [ - 'autodelete', - 'bytes', - 'compressed', - 'force', - 'nohdr', - 'nofmtdisk', - 'global', - 'access', - 'start' - ] - one_param_args = [ - 'chapsecret', - 'cleanrate', - 'copy', - 'copyrate', - 'delim', - 'easytier', - 'filtervalue', - 'grainsize', - 'hbawwpn', - 'host', - 'iogrp', - 'iscsiname', - 'mdiskgrp', - 'name', - 'rsize', - 'scsi', - 'size', - 'source', - 'target', - 'unit', - 'vdisk', - 'warning', - 'wwpn', - 'primary', - 'consistgrp', - 'master', - 'aux', - 'cluster', - 'linkbandwidthmbits', - 'backgroundcopyrate', - 'copies', - 'cyclingmode', - 'cycleperiodseconds', - 'masterchange', - 'auxchange', - ] - no_or_one_param_args = [ - 'autoexpand', - ] - - # Handle the special case of lsnode which is a two-word command - # Use the one word version of the command internally - if arg_list[0] in ('svcinfo', 'svctask'): - if arg_list[1] == 'lsnode': - if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! - ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} - else: - ret = {'cmd': 'lsnodecanister'} - else: - ret = {'cmd': arg_list[1]} - arg_list.pop(0) - else: - ret = {'cmd': arg_list[0]} - - skip = False - for i in range(1, len(arg_list)): - if skip: - skip = False - continue - # Check for a quoted command argument for volumes and strip - # quotes so that the simulater can match it later. Just - # match against test naming convensions for now. - if arg_list[i][0] == '"' and ('volume' in arg_list[i] or - 'snapshot' in arg_list[i]): - arg_list[i] = arg_list[i][1:-1] - if arg_list[i][0] == '-': - if arg_list[i][1:] in no_param_args: - ret[arg_list[i][1:]] = True - elif arg_list[i][1:] in one_param_args: - ret[arg_list[i][1:]] = arg_list[i + 1] - skip = True - elif arg_list[i][1:] in no_or_one_param_args: - if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': - ret[arg_list[i][1:]] = True - else: - ret[arg_list[i][1:]] = arg_list[i + 1] - skip = True - else: - raise exception.InvalidInput( - reason=_('unrecognized argument %s') % arg_list[i]) - else: - ret['obj'] = arg_list[i] - return ret - - @staticmethod - def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): - """Generic function for printing information.""" - if nohdr: - del rows[0] - - for index in range(len(rows)): - rows[index] = delim.join(rows[index]) - return ('%s' % '\n'.join(rows), '') - - @staticmethod - def _print_info_obj_cmd(header, row, delim=' ', nohdr=False): - """Generic function for printing information for a specific object.""" - objrows = [] - for idx, val in enumerate(header): - objrows.append([val, row[idx]]) - - if nohdr: - for index in range(len(objrows)): - objrows[index] = ' '.join(objrows[index][1:]) - for index in range(len(objrows)): - objrows[index] = delim.join(objrows[index]) - return ('%s' % '\n'.join(objrows), '') - - @staticmethod - def _convert_bytes_units(bytestr): - num = int(bytestr) - unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - unit_index = 0 - - while num > 1024: - num = num / 1024 - unit_index += 1 - - return '%d%s' % (num, unit_array[unit_index]) - - @staticmethod - def _convert_units_bytes(num, unit): - unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - unit_index = 0 - - while unit.lower() != unit_array[unit_index].lower(): - num = num * 1024 - unit_index += 1 - - return six.text_type(num) - - def _cmd_lslicense(self, **kwargs): - rows = [None] * 3 - rows[0] = ['used_compression_capacity', '0.08'] - rows[1] = ['license_compression_capacity', '0'] - if self._next_cmd_error['lslicense'] == 'no_compression': - self._next_cmd_error['lslicense'] = '' - rows[2] = ['license_compression_enclosures', '0'] - else: - rows[2] = ['license_compression_enclosures', '1'] - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_lsguicapabilities(self, **kwargs): - rows = [None] * 2 - if self._next_cmd_error['lsguicapabilities'] == 'no_compression': - self._next_cmd_error['lsguicapabilities'] = '' - rows[0] = ['license_scheme', '0'] - else: - rows[0] = ['license_scheme', 'flex'] - rows[1] = ['product_key', storwize_const.DEV_MODEL_SVC] - return self._print_info_cmd(rows=rows, **kwargs) - - # Print mostly made-up stuff in the correct syntax - def _cmd_lssystem(self, **kwargs): - rows = [None] * 3 - rows[0] = ['id', '0123456789ABCDEF'] - rows[1] = ['name', 'storwize-svc-sim'] - rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_lssystem_aux(self, **kwargs): - rows = [None] * 3 - rows[0] = ['id', 'ABCDEF0123456789'] - rows[1] = ['name', 'aux-svc-sim'] - rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] - return self._print_info_cmd(rows=rows, **kwargs) - - # Print mostly made-up stuff in the correct syntax, assume -bytes passed - def _cmd_lsmdiskgrp(self, **kwargs): - pool_num = len(self._flags['storwize_svc_volpool_name']) - rows = [] - rows.append(['id', 'name', 'status', 'mdisk_count', - 'vdisk_count', 'capacity', 'extent_size', - 'free_capacity', 'virtual_capacity', 'used_capacity', - 'real_capacity', 'overallocation', 'warning', - 'easy_tier', 'easy_tier_status']) - for i in range(pool_num): - row_data = [str(i + 1), - self._flags['storwize_svc_volpool_name'][i], 'online', - '1', six.text_type(len(self._volumes_list)), - '3573412790272', '256', '3529926246400', - '1693247906775', - '26843545600', '38203734097', '47', '80', 'auto', - 'inactive'] - rows.append(row_data) - rows.append([str(pool_num + 1), 'openstack2', 'online', - '1', '0', '3573412790272', '256', - '3529432325160', '1693247906775', '26843545600', - '38203734097', '47', '80', 'auto', 'inactive']) - rows.append([str(pool_num + 2), 'openstack3', 'online', - '1', '0', '3573412790272', '128', - '3529432325160', '1693247906775', '26843545600', - '38203734097', '47', '80', 'auto', 'inactive']) - if 'obj' not in kwargs: - return self._print_info_cmd(rows=rows, **kwargs) - else: - pool_name = kwargs['obj'].strip('\'\"') - if pool_name == kwargs['obj']: - raise exception.InvalidInput( - reason=_('obj missing quotes %s') % kwargs['obj']) - elif pool_name in self._flags['storwize_svc_volpool_name']: - for each_row in rows: - if pool_name in each_row: - row = each_row - break - elif pool_name == 'openstack2': - row = rows[-2] - elif pool_name == 'openstack3': - row = rows[-1] - else: - return self._errors['CMMVC5754E'] - objrows = [] - for idx, val in enumerate(rows[0]): - objrows.append([val, row[idx]]) - if 'nohdr' in kwargs: - for index in range(len(objrows)): - objrows[index] = ' '.join(objrows[index][1:]) - - if 'delim' in kwargs: - for index in range(len(objrows)): - objrows[index] = kwargs['delim'].join(objrows[index]) - - return ('%s' % '\n'.join(objrows), '') - - def _get_mdiskgrp_id(self, mdiskgrp): - grp_num = len(self._flags['storwize_svc_volpool_name']) - if mdiskgrp in self._flags['storwize_svc_volpool_name']: - for i in range(grp_num): - if mdiskgrp == self._flags['storwize_svc_volpool_name'][i]: - return i + 1 - elif mdiskgrp == 'openstack2': - return grp_num + 1 - elif mdiskgrp == 'openstack3': - return grp_num + 2 - else: - return None - - # Print mostly made-up stuff in the correct syntax - def _cmd_lsnodecanister(self, **kwargs): - rows = [None] * 3 - rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', - 'IO_group_id', 'IO_group_name', 'config_node', - 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', - 'panel_name', 'enclosure_id', 'canister_id', - 'enclosure_serial_number'] - rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', - 'io_grp0', - 'yes', '123456789ABCDEF0', '100', - 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', - '0123ABC'] - rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0', - 'io_grp0', - 'no', '123456789ABCDEF1', '100', - 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', - '0123ABC'] - - if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': - rows[0].pop(2) - self._next_cmd_error['lsnodecanister'] = '' - if self._next_cmd_error['lsnodecanister'] == 'remove_field': - for row in rows: - row.pop(0) - self._next_cmd_error['lsnodecanister'] = '' - - return self._print_info_cmd(rows=rows, **kwargs) - - # Print information of every single node of SVC - def _cmd_lsnode(self, **kwargs): - node_infos = dict() - node_infos['1'] = r'''id!1 -name!node1 -port_id!500507680210C744 -port_status!active -port_speed!8Gb -port_id!500507680220C744 -port_status!active -port_speed!8Gb -''' - node_infos['2'] = r'''id!2 -name!node2 -port_id!500507680220C745 -port_status!active -port_speed!8Gb -port_id!500507680230C745 -port_status!inactive -port_speed!N/A -''' - node_id = kwargs.get('node_id', None) - stdout = node_infos.get(node_id, '') - return stdout, '' - - # Print made up stuff for the ports - def _cmd_lsportfc(self, **kwargs): - node_1 = [None] * 7 - node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type', - 'port_speed', 'node_id', 'node_name', 'WWPN', - 'nportid', 'status', 'attachment'] - node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1', - '5005076802132ADE', '012E00', 'active', 'switch'] - node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1', - '5005076802232ADE', '012E00', 'active', 'switch'] - node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1', - '5005076802332ADE', '9B0600', 'active', 'switch'] - node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1', - '5005076802432ADE', '012A00', 'active', 'switch'] - node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1', - '5005076802532ADE', '014A00', 'active', 'switch'] - node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1', - '5005076802632ADE', '000000', - 'inactive_unconfigured', 'none'] - - node_2 = [None] * 7 - node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type', - 'port_speed', 'node_id', 'node_name', 'WWPN', - 'nportid', 'status', 'attachment'] - node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2', - '5005086802132ADE', '012E00', 'active', 'switch'] - node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2', - '5005086802232ADE', '012E00', 'active', 'switch'] - node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2', - '5005086802332ADE', '9B0600', 'active', 'switch'] - node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2', - '5005086802432ADE', '012A00', 'active', 'switch'] - node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2', - '5005086802532ADE', '014A00', 'active', 'switch'] - node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2', - '5005086802632ADE', '000000', - 'inactive_unconfigured', 'none'] - node_infos = [node_1, node_2] - node_id = int(kwargs['filtervalue'].split('=')[1]) - 1 - - return self._print_info_cmd(rows=node_infos[node_id], **kwargs) - - # Print mostly made-up stuff in the correct syntax - def _cmd_lsportip(self, **kwargs): - if self._next_cmd_error['lsportip'] == 'ip_no_config': - self._next_cmd_error['lsportip'] = '' - ip_addr1 = '' - ip_addr2 = '' - gw = '' - else: - ip_addr1 = '1.234.56.78' - ip_addr2 = '1.234.56.79' - ip_addr3 = '1.234.56.80' - ip_addr4 = '1.234.56.81' - gw = '1.234.56.1' - - rows = [None] * 17 - rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', - 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', - 'duplex', 'state', 'speed', 'failover', 'link_state'] - rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', - gw, '', '', '', '01:23:45:67:89:00', 'Full', - 'online', '1Gb/s', 'no', 'active'] - rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', - '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes', ''] - rows[3] = ['2', '1', 'node1', ip_addr3, '255.255.255.0', - gw, '', '', '', '01:23:45:67:89:01', 'Full', - 'configured', '1Gb/s', 'no', 'active'] - rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', - '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', - 'yes', 'inactive'] - rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'no', ''] - rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'yes', ''] - rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'no', ''] - rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'yes', ''] - rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', - gw, '', '', '', '01:23:45:67:89:02', 'Full', - 'online', '1Gb/s', 'no', ''] - rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', - '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes', ''] - rows[11] = ['2', '2', 'node2', ip_addr4, '255.255.255.0', - gw, '', '', '', '01:23:45:67:89:03', 'Full', - 'configured', '1Gb/s', 'no', 'inactive'] - rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', - '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', - 'yes', ''] - rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'no', ''] - rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'yes', ''] - rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'no', ''] - rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', - 'unconfigured', '', 'yes', ''] - - if self._next_cmd_error['lsportip'] == 'header_mismatch': - rows[0].pop(2) - self._next_cmd_error['lsportip'] = '' - if self._next_cmd_error['lsportip'] == 'remove_field': - for row in rows: - row.pop(1) - self._next_cmd_error['lsportip'] = '' - - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_lsfabric(self, **kwargs): - if self._next_cmd_error['lsfabric'] == 'no_hosts': - return ('', '') - host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None - target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None - host_infos = [] - for hv in self._hosts_list.values(): - if (not host_name) or (hv['host_name'] == host_name): - if not target_wwpn or target_wwpn in hv['wwpns']: - host_infos.append(hv) - break - if not len(host_infos): - return ('', '') - rows = [] - rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', - 'local_wwpn', 'local_port', 'local_nportid', 'state', - 'name', 'cluster_name', 'type']) - for host_info in host_infos: - for wwpn in host_info['wwpns']: - rows.append([wwpn, '123456', host_info['id'], 'nodeN', - 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', - host_info['host_name'], '', 'host']) - if self._next_cmd_error['lsfabric'] == 'header_mismatch': - rows[0].pop(0) - self._next_cmd_error['lsfabric'] = '' - if self._next_cmd_error['lsfabric'] == 'remove_field': - for row in rows: - row.pop(0) - self._next_cmd_error['lsfabric'] = '' - if self._next_cmd_error['lsfabric'] == 'remove_rows': - rows = [] - return self._print_info_cmd(rows=rows, **kwargs) - - # Create a vdisk - def _cmd_mkvdisk(self, **kwargs): - # We only save the id/uid, name, and size - all else will be made up - volume_info = {} - volume_info['id'] = self._find_unused_id(self._volumes_list) - volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] - - mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') - sec_pool = None - is_mirror_vol = False - if 'copies' in kwargs: - # it is a mirror volume - pool_split = mdiskgrp.split(':') - if len(pool_split) != 2: - raise exception.InvalidInput( - reason=_('mdiskgrp %s is invalid for mirror ' - 'volume') % kwargs['mdiskgrp']) - else: - is_mirror_vol = True - mdiskgrp = pool_split[0] - sec_pool = pool_split[1] - - if mdiskgrp == kwargs['mdiskgrp']: - raise exception.InvalidInput( - reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) - mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp) - sec_pool_id = self._get_mdiskgrp_id(sec_pool) - volume_info['mdisk_grp_name'] = mdiskgrp - volume_info['mdisk_grp_id'] = str(mdiskgrp_id) - - if 'name' in kwargs: - volume_info['name'] = kwargs['name'].strip('\'\"') - else: - volume_info['name'] = 'vdisk' + volume_info['id'] - - # Assume size and unit are given, store it in bytes - capacity = int(kwargs['size']) - unit = kwargs['unit'] - volume_info['capacity'] = self._convert_units_bytes(capacity, unit) - volume_info['IO_group_id'] = kwargs['iogrp'] - volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp'] - volume_info['RC_name'] = '' - volume_info['RC_id'] = '' - - if 'easytier' in kwargs: - if kwargs['easytier'] == 'on': - volume_info['easy_tier'] = 'on' - else: - volume_info['easy_tier'] = 'off' - - if 'rsize' in kwargs: - volume_info['formatted'] = 'no' - # Fake numbers - volume_info['used_capacity'] = '786432' - volume_info['real_capacity'] = '21474816' - volume_info['free_capacity'] = '38219264' - if 'warning' in kwargs: - volume_info['warning'] = kwargs['warning'].rstrip('%') - else: - volume_info['warning'] = '80' - if 'autoexpand' in kwargs: - volume_info['autoexpand'] = 'on' - else: - volume_info['autoexpand'] = 'off' - if 'grainsize' in kwargs: - volume_info['grainsize'] = kwargs['grainsize'] - else: - volume_info['grainsize'] = '32' - if 'compressed' in kwargs: - volume_info['compressed_copy'] = 'yes' - else: - volume_info['compressed_copy'] = 'no' - else: - volume_info['used_capacity'] = volume_info['capacity'] - volume_info['real_capacity'] = volume_info['capacity'] - volume_info['free_capacity'] = '0' - volume_info['warning'] = '' - volume_info['autoexpand'] = '' - volume_info['grainsize'] = '' - volume_info['compressed_copy'] = 'no' - volume_info['formatted'] = 'yes' - if 'nofmtdisk' in kwargs: - if kwargs['nofmtdisk']: - volume_info['formatted'] = 'no' - - vol_cp = {'id': '0', - 'status': 'online', - 'sync': 'yes', - 'primary': 'yes', - 'mdisk_grp_id': str(mdiskgrp_id), - 'mdisk_grp_name': mdiskgrp, - 'easy_tier': volume_info['easy_tier'], - 'compressed_copy': volume_info['compressed_copy']} - volume_info['copies'] = {'0': vol_cp} - if is_mirror_vol: - vol_cp1 = {'id': '1', - 'status': 'online', - 'sync': 'yes', - 'primary': 'no', - 'mdisk_grp_id': str(sec_pool_id), - 'mdisk_grp_name': sec_pool, - 'easy_tier': volume_info['easy_tier'], - 'compressed_copy': volume_info['compressed_copy']} - volume_info['copies']['1'] = vol_cp1 - - if volume_info['name'] in self._volumes_list: - return self._errors['CMMVC6035E'] - else: - self._volumes_list[volume_info['name']] = volume_info - return ('Virtual Disk, id [%s], successfully created' % - (volume_info['id']), '') - - # Delete a vdisk - def _cmd_rmvdisk(self, **kwargs): - force = True if 'force' in kwargs else False - - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - - if vol_name not in self._volumes_list: - return self._errors['CMMVC5753E'] - - if not force: - for mapping in self._mappings_list.values(): - if mapping['vol'] == vol_name: - return self._errors['CMMVC5840E'] - for fcmap in self._fcmappings_list.values(): - if ((fcmap['source'] == vol_name) or - (fcmap['target'] == vol_name)): - return self._errors['CMMVC5840E'] - - del self._volumes_list[vol_name] - return ('', '') - - def _cmd_expandvdisksize(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - - # Assume unit is gb - if 'size' not in kwargs: - return self._errors['CMMVC5707E'] - size = int(kwargs['size']) - - if vol_name not in self._volumes_list: - return self._errors['CMMVC5753E'] - - curr_size = int(self._volumes_list[vol_name]['capacity']) - addition = size * units.Gi - self._volumes_list[vol_name]['capacity'] = ( - six.text_type(curr_size + addition)) - return ('', '') - - def _get_fcmap_info(self, vol_name): - ret_vals = { - 'fc_id': '', - 'fc_name': '', - 'fc_map_count': '0', - } - for fcmap in self._fcmappings_list.values(): - if ((fcmap['source'] == vol_name) or - (fcmap['target'] == vol_name)): - ret_vals['fc_id'] = fcmap['id'] - ret_vals['fc_name'] = fcmap['name'] - ret_vals['fc_map_count'] = '1' - return ret_vals - - # List information about vdisks - def _cmd_lsvdisk(self, **kwargs): - rows = [] - rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', - 'status', 'mdisk_grp_id', 'mdisk_grp_name', - 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', - 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', - 'fast_write_state', 'se_copy_count', 'RC_change']) - - for vol in self._volumes_list.values(): - if (('filtervalue' not in kwargs) or - (kwargs['filtervalue'] == 'name=' + vol['name']) or - (kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])): - fcmap_info = self._get_fcmap_info(vol['name']) - - if 'bytes' in kwargs: - cap = self._convert_bytes_units(vol['capacity']) - else: - cap = vol['capacity'] - rows.append([six.text_type(vol['id']), vol['name'], - vol['IO_group_id'], - vol['IO_group_name'], 'online', '0', - _get_test_pool(), - cap, 'striped', - fcmap_info['fc_id'], fcmap_info['fc_name'], - '', '', vol['uid'], - fcmap_info['fc_map_count'], '1', 'empty', - '1', 'no']) - if 'obj' not in kwargs: - return self._print_info_cmd(rows=rows, **kwargs) - else: - if kwargs['obj'] not in self._volumes_list: - return self._errors['CMMVC5754E'] - vol = self._volumes_list[kwargs['obj']] - fcmap_info = self._get_fcmap_info(vol['name']) - cap = vol['capacity'] - cap_u = vol['used_capacity'] - cap_r = vol['real_capacity'] - cap_f = vol['free_capacity'] - if 'bytes' not in kwargs: - for item in [cap, cap_u, cap_r, cap_f]: - item = self._convert_bytes_units(item) - rows = [] - - rows.append(['id', six.text_type(vol['id'])]) - rows.append(['name', vol['name']]) - rows.append(['IO_group_id', vol['IO_group_id']]) - rows.append(['IO_group_name', vol['IO_group_name']]) - rows.append(['status', 'online']) - rows.append(['capacity', cap]) - rows.append(['formatted', vol['formatted']]) - rows.append(['mdisk_id', '']) - rows.append(['mdisk_name', '']) - rows.append(['FC_id', fcmap_info['fc_id']]) - rows.append(['FC_name', fcmap_info['fc_name']]) - rows.append(['RC_id', vol['RC_id']]) - rows.append(['RC_name', vol['RC_name']]) - rows.append(['vdisk_UID', vol['uid']]) - rows.append(['throttling', '0']) - - if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': - rows.append(['preferred_node_id', '']) - self._next_cmd_error['lsvdisk'] = '' - elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': - self._next_cmd_error['lsvdisk'] = '' - else: - rows.append(['preferred_node_id', '1']) - rows.append(['fast_write_state', 'empty']) - rows.append(['cache', 'readwrite']) - rows.append(['udid', '']) - rows.append(['fc_map_count', fcmap_info['fc_map_count']]) - rows.append(['sync_rate', '50']) - rows.append(['copy_count', '1']) - rows.append(['se_copy_count', '0']) - rows.append(['mirror_write_priority', 'latency']) - rows.append(['RC_change', 'no']) - - for copy in vol['copies'].values(): - rows.append(['copy_id', copy['id']]) - rows.append(['status', copy['status']]) - rows.append(['primary', copy['primary']]) - rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) - rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) - rows.append(['type', 'striped']) - rows.append(['used_capacity', cap_u]) - rows.append(['real_capacity', cap_r]) - rows.append(['free_capacity', cap_f]) - rows.append(['easy_tier', copy['easy_tier']]) - rows.append(['compressed_copy', copy['compressed_copy']]) - rows.append(['autoexpand', vol['autoexpand']]) - rows.append(['warning', vol['warning']]) - rows.append(['grainsize', vol['grainsize']]) - - if 'nohdr' in kwargs: - for index in range(len(rows)): - rows[index] = ' '.join(rows[index][1:]) - - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - return ('%s' % '\n'.join(rows), '') - - def _cmd_lsiogrp(self, **kwargs): - rows = [None] * 6 - rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count'] - rows[1] = ['0', 'io_grp0', '2', '0', '4'] - rows[2] = ['1', 'io_grp1', '2', '0', '4'] - rows[3] = ['2', 'io_grp2', '0', '0', '4'] - rows[4] = ['3', 'io_grp3', '0', '0', '4'] - rows[5] = ['4', 'recovery_io_grp', '0', '0', '0'] - return self._print_info_cmd(rows=rows, **kwargs) - - def _add_port_to_host(self, host_info, **kwargs): - if 'iscsiname' in kwargs: - added_key = 'iscsi_names' - added_val = kwargs['iscsiname'].strip('\'\"') - elif 'hbawwpn' in kwargs: - added_key = 'wwpns' - added_val = kwargs['hbawwpn'].strip('\'\"') - else: - return self._errors['CMMVC5707E'] - - host_info[added_key].append(added_val) - - for v in self._hosts_list.values(): - if v['id'] == host_info['id']: - continue - for port in v[added_key]: - if port == added_val: - return self._errors['CMMVC6581E'] - return ('', '') - - # Make a host - def _cmd_mkhost(self, **kwargs): - host_info = {} - host_info['id'] = self._find_unused_id(self._hosts_list) - - if 'name' in kwargs: - host_name = kwargs['name'].strip('\'\"') - else: - host_name = 'host' + six.text_type(host_info['id']) - - if self._is_invalid_name(host_name): - return self._errors['CMMVC6527E'] - - if host_name in self._hosts_list: - return self._errors['CMMVC6035E'] - - host_info['host_name'] = host_name - host_info['iscsi_names'] = [] - host_info['wwpns'] = [] - - out, err = self._add_port_to_host(host_info, **kwargs) - if not len(err): - self._hosts_list[host_name] = host_info - return ('Host, id [%s], successfully created' % - (host_info['id']), '') - else: - return (out, err) - - # Add ports to an existing host - def _cmd_addhostport(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - host_name = kwargs['obj'].strip('\'\"') - - if host_name not in self._hosts_list: - return self._errors['CMMVC5753E'] - - host_info = self._hosts_list[host_name] - return self._add_port_to_host(host_info, **kwargs) - - # Change host properties - def _cmd_chhost(self, **kwargs): - if 'chapsecret' not in kwargs: - return self._errors['CMMVC5707E'] - secret = kwargs['obj'].strip('\'\"') - - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - host_name = kwargs['obj'].strip('\'\"') - - if host_name not in self._hosts_list: - return self._errors['CMMVC5753E'] - - self._hosts_list[host_name]['chapsecret'] = secret - return ('', '') - - # Remove a host - def _cmd_rmhost(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - - host_name = kwargs['obj'].strip('\'\"') - if host_name not in self._hosts_list: - return self._errors['CMMVC5753E'] - - for v in self._mappings_list.values(): - if (v['host'] == host_name): - return self._errors['CMMVC5871E'] - - del self._hosts_list[host_name] - return ('', '') - - # List information about hosts - def _cmd_lshost(self, **kwargs): - if 'obj' not in kwargs: - rows = [] - rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) - - found = False - # Sort hosts by names to give predictable order for tests - # depend on it. - for host_name in sorted(self._hosts_list.keys()): - host = self._hosts_list[host_name] - filterstr = 'name=' + host['host_name'] - if (('filtervalue' not in kwargs) or - (kwargs['filtervalue'] == filterstr)): - rows.append([host['id'], host['host_name'], '1', '4', - 'offline']) - found = True - if found: - return self._print_info_cmd(rows=rows, **kwargs) - else: - return ('', '') - else: - if self._next_cmd_error['lshost'] == 'missing_host': - self._next_cmd_error['lshost'] = '' - return self._errors['CMMVC5754E'] - elif self._next_cmd_error['lshost'] == 'bigger_troubles': - return self._errors['CMMVC6527E'] - host_name = kwargs['obj'].strip('\'\"') - if host_name not in self._hosts_list: - return self._errors['CMMVC5754E'] - if (self._next_cmd_error['lshost'] == 'fail_fastpath' and - host_name == 'DifferentHost'): - return self._errors['CMMVC5701E'] - host = self._hosts_list[host_name] - rows = [] - rows.append(['id', host['id']]) - rows.append(['name', host['host_name']]) - rows.append(['port_count', '1']) - rows.append(['type', 'generic']) - rows.append(['mask', '1111']) - rows.append(['iogrp_count', '4']) - rows.append(['status', 'online']) - for port in host['iscsi_names']: - rows.append(['iscsi_name', port]) - rows.append(['node_logged_in_count', '0']) - rows.append(['state', 'offline']) - for port in host['wwpns']: - rows.append(['WWPN', port]) - rows.append(['node_logged_in_count', '0']) - rows.append(['state', 'active']) - - if 'nohdr' in kwargs: - for index in range(len(rows)): - rows[index] = ' '.join(rows[index][1:]) - - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - - return ('%s' % '\n'.join(rows), '') - - # List iSCSI authorization information about hosts - def _cmd_lsiscsiauth(self, **kwargs): - if self._next_cmd_error['lsiscsiauth'] == 'no_info': - self._next_cmd_error['lsiscsiauth'] = '' - return ('', '') - rows = [] - rows.append(['type', 'id', 'name', 'iscsi_auth_method', - 'iscsi_chap_secret']) - - for host in self._hosts_list.values(): - method = 'none' - secret = '' - if 'chapsecret' in host: - method = 'chap' - secret = host['chapsecret'] - rows.append(['host', host['id'], host['host_name'], method, - secret]) - return self._print_info_cmd(rows=rows, **kwargs) - - # Create a vdisk-host mapping - def _cmd_mkvdiskhostmap(self, **kwargs): - mapping_info = {} - mapping_info['id'] = self._find_unused_id(self._mappings_list) - if 'host' not in kwargs: - return self._errors['CMMVC5707E'] - mapping_info['host'] = kwargs['host'].strip('\'\"') - - if 'scsi' in kwargs: - mapping_info['lun'] = kwargs['scsi'].strip('\'\"') - else: - mapping_info['lun'] = mapping_info['id'] - - if 'obj' not in kwargs: - return self._errors['CMMVC5707E'] - mapping_info['vol'] = kwargs['obj'].strip('\'\"') - - if mapping_info['vol'] not in self._volumes_list: - return self._errors['CMMVC5753E'] - - if mapping_info['host'] not in self._hosts_list: - return self._errors['CMMVC5754E'] - - if mapping_info['vol'] in self._mappings_list: - return self._errors['CMMVC6071E'] - - for v in self._mappings_list.values(): - if ((v['host'] == mapping_info['host']) and - (v['lun'] == mapping_info['lun'])): - return self._errors['CMMVC5879E'] - - for v in self._mappings_list.values(): - if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs): - return self._errors['CMMVC6071E'] - - self._mappings_list[mapping_info['id']] = mapping_info - return ('Virtual Disk to Host map, id [%s], successfully created' - % (mapping_info['id']), '') - - # Delete a vdisk-host mapping - def _cmd_rmvdiskhostmap(self, **kwargs): - if 'host' not in kwargs: - return self._errors['CMMVC5707E'] - host = kwargs['host'].strip('\'\"') - - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol = kwargs['obj'].strip('\'\"') - - mapping_ids = [] - for v in self._mappings_list.values(): - if v['vol'] == vol: - mapping_ids.append(v['id']) - if not mapping_ids: - return self._errors['CMMVC5753E'] - - this_mapping = None - for mapping_id in mapping_ids: - if self._mappings_list[mapping_id]['host'] == host: - this_mapping = mapping_id - if this_mapping is None: - return self._errors['CMMVC5753E'] - - del self._mappings_list[this_mapping] - return ('', '') - - # List information about host->vdisk mappings - def _cmd_lshostvdiskmap(self, **kwargs): - host_name = kwargs['obj'].strip('\'\"') - - if host_name not in self._hosts_list: - return self._errors['CMMVC5754E'] - - rows = [] - rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', - 'vdisk_UID']) - - for mapping in self._mappings_list.values(): - if (host_name == '') or (mapping['host'] == host_name): - volume = self._volumes_list[mapping['vol']] - rows.append([mapping['id'], mapping['host'], - mapping['lun'], volume['id'], - volume['name'], volume['uid']]) - - return self._print_info_cmd(rows=rows, **kwargs) - - # List information about vdisk->host mappings - def _cmd_lsvdiskhostmap(self, **kwargs): - mappings_found = 0 - vdisk_name = kwargs['obj'].strip('\'\"') - - if vdisk_name not in self._volumes_list: - return self._errors['CMMVC5753E'] - - rows = [] - rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', - 'IO_group_id', 'IO_group_name']) - - for mapping in self._mappings_list.values(): - if (mapping['vol'] == vdisk_name): - mappings_found += 1 - volume = self._volumes_list[mapping['vol']] - host = self._hosts_list[mapping['host']] - rows.append([volume['id'], mapping['lun'], host['id'], - host['host_name'], volume['uid'], - volume['IO_group_id'], volume['IO_group_name']]) - - if mappings_found: - return self._print_info_cmd(rows=rows, **kwargs) - else: - return ('', '') - - # Create a FlashCopy mapping - def _cmd_mkfcmap(self, **kwargs): - source = '' - target = '' - copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' - - if 'source' not in kwargs: - return self._errors['CMMVC5707E'] - source = kwargs['source'].strip('\'\"') - if source not in self._volumes_list: - return self._errors['CMMVC5754E'] - - if 'target' not in kwargs: - return self._errors['CMMVC5707E'] - target = kwargs['target'].strip('\'\"') - if target not in self._volumes_list: - return self._errors['CMMVC5754E'] - - if source == target: - return self._errors['CMMVC6303E'] - - if (self._volumes_list[source]['capacity'] != - self._volumes_list[target]['capacity']): - return self._errors['CMMVC5754E'] - - fcmap_info = {} - fcmap_info['source'] = source - fcmap_info['target'] = target - fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) - fcmap_info['name'] = 'fcmap' + fcmap_info['id'] - fcmap_info['copyrate'] = copyrate - fcmap_info['progress'] = '0' - fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False - fcmap_info['status'] = 'idle_or_copied' - - # Add fcmap to consistency group - if 'consistgrp' in kwargs: - consistgrp = kwargs['consistgrp'] - - # if is digit, assume is cg id, else is cg name - cg_id = 0 - if not consistgrp.isdigit(): - for consistgrp_key in self._fcconsistgrp_list.keys(): - if (self._fcconsistgrp_list[consistgrp_key]['name'] - == consistgrp): - cg_id = consistgrp_key - fcmap_info['consistgrp'] = consistgrp_key - break - else: - if int(consistgrp) in self._fcconsistgrp_list.keys(): - cg_id = int(consistgrp) - - # If can't find exist consistgrp id, return not exist error - if not cg_id: - return self._errors['CMMVC5754E'] - - fcmap_info['consistgrp'] = cg_id - # Add fcmap to consistgrp - self._fcconsistgrp_list[cg_id]['fcmaps'][fcmap_info['id']] = ( - fcmap_info['name']) - self._fc_cg_state_transition('add', - self._fcconsistgrp_list[cg_id]) - - self._fcmappings_list[fcmap_info['id']] = fcmap_info - - return('FlashCopy Mapping, id [' + fcmap_info['id'] + - '], successfully created', '') - - def _cmd_prestartfcmap(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - - if self._next_cmd_error['prestartfcmap'] == 'bad_id': - id_num = -1 - self._next_cmd_error['prestartfcmap'] = '' - - try: - fcmap = self._fcmappings_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - return self._state_transition('prepare', fcmap) - - def _cmd_startfcmap(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - - if self._next_cmd_error['startfcmap'] == 'bad_id': - id_num = -1 - self._next_cmd_error['startfcmap'] = '' - - try: - fcmap = self._fcmappings_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - return self._state_transition('start', fcmap) - - def _cmd_stopfcmap(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - - try: - fcmap = self._fcmappings_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - return self._state_transition('stop', fcmap) - - def _cmd_rmfcmap(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - force = True if 'force' in kwargs else False - - if self._next_cmd_error['rmfcmap'] == 'bad_id': - id_num = -1 - self._next_cmd_error['rmfcmap'] = '' - - try: - fcmap = self._fcmappings_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - function = 'delete_force' if force else 'delete' - ret = self._state_transition(function, fcmap) - if fcmap['status'] == 'end': - del self._fcmappings_list[id_num] - return ret - - def _cmd_lsvdiskfcmappings(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5707E'] - vdisk = kwargs['obj'] - rows = [] - rows.append(['id', 'name']) - for v in self._fcmappings_list.values(): - if v['source'] == vdisk or v['target'] == vdisk: - rows.append([v['id'], v['name']]) - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_chfcmap(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5707E'] - id_num = kwargs['obj'] - - try: - fcmap = self._fcmappings_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - for key in ['name', 'copyrate', 'autodelete']: - if key in kwargs: - fcmap[key] = kwargs[key] - return ('', '') - - def _cmd_lsfcmap(self, **kwargs): - rows = [] - rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', - 'target_vdisk_id', 'target_vdisk_name', 'group_id', - 'group_name', 'status', 'progress', 'copy_rate', - 'clean_progress', 'incremental', 'partner_FC_id', - 'partner_FC_name', 'restoring', 'start_time', - 'rc_controlled']) - - # Assume we always get a filtervalue argument - filter_key = kwargs['filtervalue'].split('=')[0] - filter_value = kwargs['filtervalue'].split('=')[1] - to_delete = [] - for k, v in self._fcmappings_list.items(): - if six.text_type(v[filter_key]) == filter_value: - source = self._volumes_list[v['source']] - target = self._volumes_list[v['target']] - self._state_transition('wait', v) - - if self._next_cmd_error['lsfcmap'] == 'speed_up': - self._next_cmd_error['lsfcmap'] = '' - curr_state = v['status'] - while self._state_transition('wait', v) == ("", ""): - if curr_state == v['status']: - break - curr_state = v['status'] - - if ((v['status'] == 'idle_or_copied' and v['autodelete'] and - v['progress'] == '100') or (v['status'] == 'end')): - to_delete.append(k) - else: - rows.append([v['id'], v['name'], source['id'], - source['name'], target['id'], target['name'], - '', '', v['status'], v['progress'], - v['copyrate'], '100', 'off', '', '', 'no', '', - 'no']) - - for d in to_delete: - del self._fcmappings_list[d] - - return self._print_info_cmd(rows=rows, **kwargs) - - # Create a FlashCopy mapping - def _cmd_mkfcconsistgrp(self, **kwargs): - fcconsistgrp_info = {} - fcconsistgrp_info['id'] = self._find_unused_id(self._fcconsistgrp_list) - - if 'name' in kwargs: - fcconsistgrp_info['name'] = kwargs['name'].strip('\'\"') - else: - fcconsistgrp_info['name'] = 'fccstgrp' + fcconsistgrp_info['id'] - - if 'autodelete' in kwargs: - fcconsistgrp_info['autodelete'] = True - else: - fcconsistgrp_info['autodelete'] = False - fcconsistgrp_info['status'] = 'empty' - fcconsistgrp_info['start_time'] = None - fcconsistgrp_info['fcmaps'] = {} - - self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info - - return('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] + - '], successfully created', '') - - def _cmd_prestartfcconsistgrp(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - cg_name = kwargs['obj'] - - cg_id = 0 - for cg_id in self._fcconsistgrp_list.keys(): - if cg_name == self._fcconsistgrp_list[cg_id]['name']: - break - - return self._fc_cg_state_transition('prepare', - self._fcconsistgrp_list[cg_id]) - - def _cmd_startfcconsistgrp(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - cg_name = kwargs['obj'] - - cg_id = 0 - for cg_id in self._fcconsistgrp_list.keys(): - if cg_name == self._fcconsistgrp_list[cg_id]['name']: - break - - return self._fc_cg_state_transition('start', - self._fcconsistgrp_list[cg_id]) - - def _cmd_stopfcconsistgrp(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - - try: - fcconsistgrps = self._fcconsistgrp_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - return self._fc_cg_state_transition('stop', fcconsistgrps) - - def _cmd_rmfcconsistgrp(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - cg_name = kwargs['obj'] - force = True if 'force' in kwargs else False - - cg_id = 0 - for cg_id in self._fcconsistgrp_list.keys(): - if cg_name == self._fcconsistgrp_list[cg_id]['name']: - break - if not cg_id: - return self._errors['CMMVC5753E'] - fcconsistgrps = self._fcconsistgrp_list[cg_id] - - function = 'delete_force' if force else 'delete' - ret = self._fc_cg_state_transition(function, fcconsistgrps) - if fcconsistgrps['status'] == 'end': - del self._fcconsistgrp_list[cg_id] - return ret - - def _cmd_lsfcconsistgrp(self, **kwargs): - rows = [] - - if 'obj' not in kwargs: - rows.append(['id', 'name', 'status' 'start_time']) - - for fcconsistgrp in self._fcconsistgrp_list.values(): - rows.append([fcconsistgrp['id'], - fcconsistgrp['name'], - fcconsistgrp['status'], - fcconsistgrp['start_time']]) - return self._print_info_cmd(rows=rows, **kwargs) - else: - fcconsistgrp = None - cg_id = 0 - for cg_id in self._fcconsistgrp_list.keys(): - if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']: - fcconsistgrp = self._fcconsistgrp_list[cg_id] - rows = [] - rows.append(['id', six.text_type(cg_id)]) - rows.append(['name', fcconsistgrp['name']]) - rows.append(['status', fcconsistgrp['status']]) - rows.append(['autodelete', - six.text_type(fcconsistgrp['autodelete'])]) - rows.append(['start_time', - six.text_type(fcconsistgrp['start_time'])]) - - for fcmap_id in fcconsistgrp['fcmaps'].keys(): - rows.append(['FC_mapping_id', six.text_type(fcmap_id)]) - rows.append(['FC_mapping_name', - fcconsistgrp['fcmaps'][fcmap_id]]) - - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - self._fc_cg_state_transition('wait', fcconsistgrp) - return ('%s' % '\n'.join(rows), '') - - def _cmd_migratevdisk(self, **kwargs): - if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs: - return self._errors['CMMVC5707E'] - mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') - vdisk = kwargs['vdisk'].strip('\'\"') - copy_id = kwargs['copy'] - if vdisk not in self._volumes_list: - return self._errors['CMMVC5753E'] - mdiskgrp_id = str(self._get_mdiskgrp_id(mdiskgrp)) - - self._volumes_list[vdisk]['mdisk_grp_name'] = mdiskgrp - self._volumes_list[vdisk]['mdisk_grp_id'] = mdiskgrp_id - - vol = self._volumes_list[vdisk] - vol['copies'][copy_id]['mdisk_grp_name'] = mdiskgrp - vol['copies'][copy_id]['mdisk_grp_id'] = mdiskgrp_id - return ('', '') - - def _cmd_addvdiskcopy(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - if vol_name not in self._volumes_list: - return self._errors['CMMVC5753E'] - vol = self._volumes_list[vol_name] - if 'mdiskgrp' not in kwargs: - return self._errors['CMMVC5707E'] - mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') - if mdiskgrp == kwargs['mdiskgrp']: - raise exception.InvalidInput( - reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) - auto_del = True if 'autodelete' in kwargs else False - - copy_info = {} - copy_info['id'] = self._find_unused_id(vol['copies']) - copy_info['status'] = 'online' - copy_info['sync'] = 'no' - copy_info['primary'] = 'no' - copy_info['mdisk_grp_name'] = mdiskgrp - copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp)) - - if 'easytier' in kwargs: - if kwargs['easytier'] == 'on': - copy_info['easy_tier'] = 'on' - else: - copy_info['easy_tier'] = 'off' - if 'rsize' in kwargs: - if 'compressed' in kwargs: - copy_info['compressed_copy'] = 'yes' - else: - copy_info['compressed_copy'] = 'no' - vol['copies'][copy_info['id']] = copy_info - if auto_del: - del_copy_id = None - for v in vol['copies'].values(): - if v['id'] != copy_info['id']: - del_copy_id = v['id'] - break - if del_copy_id: - del vol['copies'][del_copy_id] - return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' % - {'vid': vol['id'], 'cid': copy_info['id']}, '') - - def _cmd_lsvdiskcopy(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5804E'] - name = kwargs['obj'] - vol = self._volumes_list[name] - rows = [] - rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync', - 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', - 'type', 'se_copy', 'easy_tier', 'easy_tier_status', - 'compressed_copy']) - for copy in vol['copies'].values(): - rows.append([vol['id'], vol['name'], copy['id'], - copy['status'], copy['sync'], copy['primary'], - copy['mdisk_grp_id'], copy['mdisk_grp_name'], - vol['capacity'], 'striped', 'yes', copy['easy_tier'], - 'inactive', copy['compressed_copy']]) - if 'copy' not in kwargs: - return self._print_info_cmd(rows=rows, **kwargs) - else: - copy_id = kwargs['copy'].strip('\'\"') - if copy_id not in vol['copies']: - return self._errors['CMMVC6353E'] - copy = vol['copies'][copy_id] - rows = [] - rows.append(['vdisk_id', vol['id']]) - rows.append(['vdisk_name', vol['name']]) - rows.append(['capacity', vol['capacity']]) - rows.append(['copy_id', copy['id']]) - rows.append(['status', copy['status']]) - rows.append(['sync', copy['sync']]) - copy['sync'] = 'yes' - rows.append(['primary', copy['primary']]) - rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) - rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) - rows.append(['easy_tier', copy['easy_tier']]) - rows.append(['easy_tier_status', 'inactive']) - rows.append(['compressed_copy', copy['compressed_copy']]) - rows.append(['autoexpand', vol['autoexpand']]) - - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - - return ('%s' % '\n'.join(rows), '') - - def _cmd_rmvdiskcopy(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - if 'copy' not in kwargs: - return self._errors['CMMVC5707E'] - copy_id = kwargs['copy'].strip('\'\"') - if vol_name not in self._volumes_list: - return self._errors['CMMVC5753E'] - vol = self._volumes_list[vol_name] - if copy_id not in vol['copies']: - return self._errors['CMMVC6353E'] - del vol['copies'][copy_id] - - return ('', '') - - def _cmd_chvdisk(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - vol = self._volumes_list[vol_name] - kwargs.pop('obj') - - params = ['name', 'warning', 'udid', - 'autoexpand', 'easytier', 'primary'] - for key, value in kwargs.items(): - if key == 'easytier': - vol['easy_tier'] = value - for copy in vol['copies'].values(): - vol['copies'][copy['id']]['easy_tier'] = value - continue - if key == 'warning': - vol['warning'] = value.rstrip('%') - continue - if key == 'name': - vol['name'] = value - del self._volumes_list[vol_name] - self._volumes_list[value] = vol - if key == 'primary': - if value == '0': - self._volumes_list[vol_name]['copies']['0']['primary']\ - = 'yes' - self._volumes_list[vol_name]['copies']['1']['primary']\ - = 'no' - elif value == '1': - self._volumes_list[vol_name]['copies']['0']['primary']\ - = 'no' - self._volumes_list[vol_name]['copies']['1']['primary']\ - = 'yes' - else: - err = self._errors['CMMVC6353E'][1] % {'VALUE': key} - return ('', err) - if key in params: - vol[key] = value - if key == 'autoexpand': - for copy in vol['copies'].values(): - vol['copies'][copy['id']]['autoexpand'] = value - else: - err = self._errors['CMMVC5709E'][1] % {'VALUE': key} - return ('', err) - return ('', '') - - def _cmd_movevdisk(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - vol_name = kwargs['obj'].strip('\'\"') - vol = self._volumes_list[vol_name] - - if 'iogrp' not in kwargs: - return self._errors['CMMVC5707E'] - - iogrp = kwargs['iogrp'] - if iogrp.isdigit(): - vol['IO_group_id'] = iogrp - vol['IO_group_name'] = 'io_grp%s' % iogrp - else: - vol['IO_group_id'] = iogrp[6:] - vol['IO_group_name'] = iogrp - return ('', '') - - def _cmd_addvdiskaccess(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - return ('', '') - - def _cmd_rmvdiskaccess(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - return ('', '') - - # list vdisk sync process - def _cmd_lsvdisksyncprogress(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5804E'] - name = kwargs['obj'] - copy_id = kwargs.get('copy', None) - vol = self._volumes_list[name] - rows = [] - rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress', - 'estimated_completion_time']) - copy_found = False - for copy in vol['copies'].values(): - if not copy_id or copy_id == copy['id']: - copy_found = True - row = [vol['id'], name, copy['id']] - if copy['sync'] == 'yes': - row.extend(['100', '']) - else: - row.extend(['50', '140210115226']) - copy['sync'] = 'yes' - rows.append(row) - if not copy_found: - return self._errors['CMMVC5804E'] - return self._print_info_cmd(rows=rows, **kwargs) - - def _add_host_to_list(self, connector): - host_info = {} - host_info['id'] = self._find_unused_id(self._hosts_list) - host_info['host_name'] = connector['host'] - host_info['iscsi_names'] = [] - host_info['wwpns'] = [] - if 'initiator' in connector: - host_info['iscsi_names'].append(connector['initiator']) - if 'wwpns' in connector: - host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] - self._hosts_list[connector['host']] = host_info - - def _host_in_list(self, host_name): - for k in self._hosts_list: - if k.startswith(host_name): - return k - return None - - # Replication related command - # Create a remote copy - def _cmd_mkrcrelationship(self, **kwargs): - master_vol = '' - aux_vol = '' - aux_cluster = '' - master_sys = self._system_list['storwize-svc-sim'] - aux_sys = self._system_list['aux-svc-sim'] - - if 'master' not in kwargs: - return self._errors['CMMVC5707E'] - master_vol = kwargs['master'].strip('\'\"') - if master_vol not in self._volumes_list: - return self._errors['CMMVC5754E'] - - if 'aux' not in kwargs: - return self._errors['CMMVC5707E'] - aux_vol = kwargs['aux'].strip('\'\"') - if aux_vol not in self._volumes_list: - return self._errors['CMMVC5754E'] - - if 'cluster' not in kwargs: - return self._errors['CMMVC5707E'] - aux_cluster = kwargs['cluster'].strip('\'\"') - if aux_cluster != aux_sys['name']: - return self._errors['CMMVC5754E'] - - if (self._volumes_list[master_vol]['capacity'] != - self._volumes_list[aux_vol]['capacity']): - return self._errors['CMMVC5754E'] - - cyclingmode = None - if 'cyclingmode' in kwargs: - cyclingmode = kwargs['cyclingmode'].strip('\'\"') - - rcrel_info = {} - rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) - rcrel_info['name'] = 'rcrel' + rcrel_info['id'] - rcrel_info['master_cluster_id'] = master_sys['id'] - rcrel_info['master_cluster_name'] = master_sys['name'] - rcrel_info['master_vdisk_id'] = self._volumes_list[master_vol]['id'] - rcrel_info['master_vdisk_name'] = master_vol - rcrel_info['aux_cluster_id'] = aux_sys['id'] - rcrel_info['aux_cluster_name'] = aux_sys['name'] - rcrel_info['aux_vdisk_id'] = self._volumes_list[aux_vol]['id'] - rcrel_info['aux_vdisk_name'] = aux_vol - rcrel_info['primary'] = 'master' - rcrel_info['consistency_group_id'] = '' - rcrel_info['consistency_group_name'] = '' - rcrel_info['state'] = 'inconsistent_stopped' - rcrel_info['bg_copy_priority'] = '50' - rcrel_info['progress'] = '0' - rcrel_info['freeze_time'] = '' - rcrel_info['status'] = 'online' - rcrel_info['sync'] = '' - rcrel_info['copy_type'] = 'global' if 'global' in kwargs else 'metro' - rcrel_info['cycling_mode'] = cyclingmode if cyclingmode else '' - rcrel_info['cycle_period_seconds'] = '300' - rcrel_info['master_change_vdisk_id'] = '' - rcrel_info['master_change_vdisk_name'] = '' - rcrel_info['aux_change_vdisk_id'] = '' - rcrel_info['aux_change_vdisk_name'] = '' - - self._rcrelationship_list[rcrel_info['name']] = rcrel_info - self._volumes_list[master_vol]['RC_name'] = rcrel_info['name'] - self._volumes_list[master_vol]['RC_id'] = rcrel_info['id'] - self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name'] - self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id'] - return('RC Relationship, id [' + rcrel_info['id'] + - '], successfully created', '') - - def _cmd_lsrcrelationship(self, **kwargs): - rows = [] - - if 'obj' in kwargs: - name = kwargs['obj'] - for k, v in self._rcrelationship_list.items(): - if six.text_type(v['name']) == name: - self._rc_state_transition('wait', v) - - if self._next_cmd_error['lsrcrelationship'] == 'speed_up': - self._next_cmd_error['lsrcrelationship'] = '' - curr_state = v['status'] - while self._rc_state_transition('wait', v) == ("", ""): - if curr_state == v['status']: - break - curr_state = v['status'] - - rows.append(['id', v['id']]) - rows.append(['name', v['name']]) - rows.append(['master_cluster_id', v['master_cluster_id']]) - rows.append(['master_cluster_name', - v['master_cluster_name']]) - rows.append(['master_vdisk_id', v['master_vdisk_id']]) - rows.append(['master_vdisk_name', v['master_vdisk_name']]) - rows.append(['aux_cluster_id', v['aux_cluster_id']]) - rows.append(['aux_cluster_name', v['aux_cluster_name']]) - rows.append(['aux_vdisk_id', v['aux_vdisk_id']]) - rows.append(['aux_vdisk_name', v['aux_vdisk_name']]) - rows.append(['consistency_group_id', - v['consistency_group_id']]) - rows.append(['primary', v['primary']]) - rows.append(['consistency_group_name', - v['consistency_group_name']]) - rows.append(['state', v['state']]) - rows.append(['bg_copy_priority', v['bg_copy_priority']]) - rows.append(['progress', v['progress']]) - rows.append(['freeze_time', v['freeze_time']]) - rows.append(['status', v['status']]) - rows.append(['sync', v['sync']]) - rows.append(['copy_type', v['copy_type']]) - rows.append(['cycling_mode', v['cycling_mode']]) - rows.append(['cycle_period_seconds', - v['cycle_period_seconds']]) - rows.append(['master_change_vdisk_id', - v['master_change_vdisk_id']]) - rows.append(['master_change_vdisk_name', - v['master_change_vdisk_name']]) - rows.append(['aux_change_vdisk_id', - v['aux_change_vdisk_id']]) - rows.append(['aux_change_vdisk_name', - v['aux_change_vdisk_name']]) - - if 'nohdr' in kwargs: - for index in range(len(rows)): - rows[index] = ' '.join(rows[index][1:]) - if 'delim' in kwargs: - for index in range(len(rows)): - rows[index] = kwargs['delim'].join(rows[index]) - - return ('%s' % '\n'.join(rows), '') - - def _cmd_startrcrelationship(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - - primary_vol = None - if 'primary' in kwargs: - primary_vol = kwargs['primary'].strip('\'\"') - - try: - rcrel = self._rcrelationship_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - if rcrel['state'] == 'idling' and not primary_vol: - return self._errors['CMMVC5963E'] - - self._rc_state_transition('start', rcrel) - if primary_vol: - self._rcrelationship_list[id_num]['primary'] = primary_vol - return ('', '') - - def _cmd_stoprcrelationship(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - force_access = True if 'access' in kwargs else False - - try: - rcrel = self._rcrelationship_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - function = 'stop_access' if force_access else 'stop' - self._rc_state_transition(function, rcrel) - if force_access: - self._rcrelationship_list[id_num]['primary'] = '' - return ('', '') - - def _cmd_switchrcrelationship(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5707E'] - id_num = kwargs['obj'] - - try: - rcrel = self._rcrelationship_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - if rcrel['state'] == storwize_const.REP_CONSIS_SYNC: - rcrel['primary'] = kwargs['primary'] - return ('', '') - else: - return self._errors['CMMVC5753E'] - - def _cmd_rmrcrelationship(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - id_num = kwargs['obj'] - force = True if 'force' in kwargs else False - - try: - rcrel = self._rcrelationship_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - function = 'delete_force' if force else 'delete' - self._rc_state_transition(function, rcrel) - if rcrel['state'] == 'end': - self._volumes_list[rcrel['master_vdisk_name']]['RC_name'] = '' - self._volumes_list[rcrel['master_vdisk_name']]['RC_id'] = '' - self._volumes_list[rcrel['aux_vdisk_name']]['RC_name'] = '' - self._volumes_list[rcrel['aux_vdisk_name']]['RC_id'] = '' - del self._rcrelationship_list[id_num] - - return ('', '') - - def _cmd_chrcrelationship(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5707E'] - id_num = kwargs['obj'] - - try: - rcrel = self._rcrelationship_list[id_num] - except KeyError: - return self._errors['CMMVC5753E'] - - nonull_num = 0 - masterchange = None - if 'masterchange' in kwargs: - masterchange = kwargs['masterchange'].strip('\'\"') - nonull_num += 1 - - auxchange = None - if 'auxchange' in kwargs: - auxchange = kwargs['auxchange'].strip('\'\"') - nonull_num += 1 - - cycleperiodseconds = None - if 'cycleperiodseconds' in kwargs: - cycleperiodseconds = kwargs['cycleperiodseconds'].strip('\'\"') - nonull_num += 1 - - if nonull_num > 1: - return self._errors['CMMVC5713E'] - elif masterchange: - rcrel['master_change_vdisk_name'] = masterchange - return ('', '') - elif auxchange: - rcrel['aux_change_vdisk_name'] = auxchange - return ('', '') - elif cycleperiodseconds: - rcrel['cycle_period_seconds'] = cycleperiodseconds - return ('', '') - - def _rc_state_transition(self, function, rcrel): - if (function == 'wait' and - 'wait' not in self._rc_transitions[rcrel['state']]): - return ('', '') - - if rcrel['state'] == 'inconsistent_copying' and function == 'wait': - if rcrel['progress'] == '0': - rcrel['progress'] = '50' - elif (storwize_const.GMCV_MULTI == rcrel['cycling_mode'] - and storwize_const.GLOBAL == rcrel['copy_type']): - rcrel['progress'] = '100' - rcrel['state'] = 'consistent_copying' - else: - rcrel['progress'] = '100' - rcrel['state'] = 'consistent_synchronized' - return ('', '') - else: - try: - curr_state = rcrel['state'] - rcrel['state'] = self._rc_transitions[curr_state][function] - return ('', '') - except Exception: - return self._errors['CMMVC5982E'] - - def _cmd_lspartnershipcandidate(self, **kwargs): - rows = [None] * 4 - master_sys = self._system_list['storwize-svc-sim'] - aux_sys = self._system_list['aux-svc-sim'] - rows[0] = ['id', 'configured', 'name'] - rows[1] = [master_sys['id'], 'no', master_sys['name']] - rows[2] = [aux_sys['id'], 'no', aux_sys['name']] - rows[3] = ['0123456789001234', 'no', 'fake_svc'] - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_lspartnership(self, **kwargs): - rows = [] - rows.append(['id', 'name', 'location', 'partnership', - 'type', 'cluster_ip', 'event_log_sequence']) - - master_sys = self._system_list['storwize-svc-sim'] - if master_sys['name'] not in self._partnership_list: - local_info = {} - local_info['id'] = master_sys['id'] - local_info['name'] = master_sys['name'] - local_info['location'] = 'local' - local_info['type'] = '' - local_info['cluster_ip'] = '' - local_info['event_log_sequence'] = '' - local_info['chap_secret'] = '' - local_info['linkbandwidthmbits'] = '' - local_info['backgroundcopyrate'] = '' - local_info['partnership'] = '' - self._partnership_list[master_sys['id']] = local_info - - # Assume we always get a filtervalue argument - filter_key = kwargs['filtervalue'].split('=')[0] - filter_value = kwargs['filtervalue'].split('=')[1] - for k, v in self._partnership_list.items(): - if six.text_type(v[filter_key]) == filter_value: - rows.append([v['id'], v['name'], v['location'], - v['partnership'], v['type'], v['cluster_ip'], - v['event_log_sequence']]) - return self._print_info_cmd(rows=rows, **kwargs) - - def _cmd_mkippartnership(self, **kwargs): - if 'clusterip' not in kwargs: - return self._errors['CMMVC5707E'] - clusterip = kwargs['master'].strip('\'\"') - - if 'linkbandwidthmbits' not in kwargs: - return self._errors['CMMVC5707E'] - bandwith = kwargs['linkbandwidthmbits'].strip('\'\"') - - if 'backgroundcopyrate' not in kwargs: - return self._errors['CMMVC5707E'] - copyrate = kwargs['backgroundcopyrate'].strip('\'\"') - - if clusterip == '192.168.10.21': - partner_info_id = self._system_list['storwize-svc-sim']['id'] - partner_info_name = self._system_list['storwize-svc-sim']['name'] - else: - partner_info_id = self._system_list['aux-svc-sim']['id'] - partner_info_name = self._system_list['aux-svc-sim']['name'] - - partner_info = {} - partner_info['id'] = partner_info_id - partner_info['name'] = partner_info_name - partner_info['location'] = 'remote' - partner_info['type'] = 'ipv4' - partner_info['cluster_ip'] = clusterip - partner_info['event_log_sequence'] = '' - partner_info['chap_secret'] = '' - partner_info['linkbandwidthmbits'] = bandwith - partner_info['backgroundcopyrate'] = copyrate - partner_info['partnership'] = 'fully_configured' - - self._partnership_list[partner_info['id']] = partner_info - return('', '') - - def _cmd_mkfcpartnership(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - peer_sys = kwargs['obj'] - - if 'linkbandwidthmbits' not in kwargs: - return self._errors['CMMVC5707E'] - bandwith = kwargs['linkbandwidthmbits'].strip('\'\"') - - if 'backgroundcopyrate' not in kwargs: - return self._errors['CMMVC5707E'] - copyrate = kwargs['backgroundcopyrate'].strip('\'\"') - - partner_info = {} - partner_info['id'] = self._system_list[peer_sys]['id'] - partner_info['name'] = peer_sys - partner_info['location'] = 'remote' - partner_info['type'] = 'fc' - partner_info['cluster_ip'] = '' - partner_info['event_log_sequence'] = '' - partner_info['chap_secret'] = '' - partner_info['linkbandwidthmbits'] = bandwith - partner_info['backgroundcopyrate'] = copyrate - partner_info['partnership'] = 'fully_configured' - self._partnership_list[partner_info['id']] = partner_info - return('', '') - - def _cmd_chpartnership(self, **kwargs): - if 'obj' not in kwargs: - return self._errors['CMMVC5701E'] - peer_sys = kwargs['obj'] - if peer_sys not in self._partnership_list: - return self._errors['CMMVC5753E'] - - partner_state = ('fully_configured' if 'start'in kwargs - else 'fully_configured_stopped') - self._partnership_list[peer_sys]['partnership'] = partner_state - return('', '') - - # The main function to run commands on the management simulator - def execute_command(self, cmd, check_exit_code=True): - try: - kwargs = self._cmd_to_dict(cmd) - except IndexError: - return self._errors['CMMVC5707E'] - - command = kwargs.pop('cmd') - func = getattr(self, '_cmd_' + command) - out, err = func(**kwargs) - - if (check_exit_code) and (len(err) != 0): - raise processutils.ProcessExecutionError(exit_code=1, - stdout=out, - stderr=err, - cmd=' '.join(cmd)) - - return (out, err) - - # After calling this function, the next call to the specified command will - # result in in the error specified - def error_injection(self, cmd, error): - self._next_cmd_error[cmd] = error - - def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"): - if copy == 'primary': - self._volumes_list[vol_name]['copies']['0'][key] = value - elif copy == 'secondary': - self._volumes_list[vol_name]['copies']['1'][key] = value - else: - msg = _("The copy should be primary or secondary") - raise exception.InvalidInput(reason=msg) - - -class StorwizeSVCISCSIFakeDriver(storwize_svc_iscsi.StorwizeSVCISCSIDriver): - def __init__(self, *args, **kwargs): - super(StorwizeSVCISCSIFakeDriver, self).__init__(*args, **kwargs) - - def set_fake_storage(self, fake): - self.fake_storage = fake - - def _run_ssh(self, cmd, check_exit_code=True, attempts=1): - utils.check_ssh_injection(cmd) - ret = self.fake_storage.execute_command(cmd, check_exit_code) - - return ret - - -class StorwizeSVCFcFakeDriver(storwize_svc_fc.StorwizeSVCFCDriver): - def __init__(self, *args, **kwargs): - super(StorwizeSVCFcFakeDriver, self).__init__(*args, **kwargs) - - def set_fake_storage(self, fake): - self.fake_storage = fake - - def _run_ssh(self, cmd, check_exit_code=True, attempts=1): - utils.check_ssh_injection(cmd) - ret = self.fake_storage.execute_command(cmd, check_exit_code) - - return ret - - -class StorwizeSVCISCSIDriverTestCase(test.TestCase): - @mock.patch.object(time, 'sleep') - def setUp(self, mock_sleep): - super(StorwizeSVCISCSIDriverTestCase, self).setUp() - self.USESIM = True - if self.USESIM: - self.iscsi_driver = StorwizeSVCISCSIFakeDriver( - configuration=conf.Configuration([], conf.SHARED_CONF_GROUP)) - self._def_flags = {'san_ip': 'hostname', - 'san_login': 'user', - 'san_password': 'pass', - 'storwize_svc_volpool_name': ['openstack'], - 'storwize_svc_flashcopy_timeout': 20, - 'storwize_svc_flashcopy_rate': 49, - 'storwize_svc_multipath_enabled': False, - 'storwize_svc_allow_tenant_qos': True} - wwpns = [ - six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type( - random.randint(10000, 99999)) - self._connector = {'ip': '1.234.56.78', - 'host': 'storwize-svc-test', - 'wwpns': wwpns, - 'initiator': initiator} - self.sim = StorwizeSVCManagementSimulator(['openstack']) - - self.iscsi_driver.set_fake_storage(self.sim) - self.ctxt = context.get_admin_context() - - self._reset_flags() - self.ctxt = context.get_admin_context() - db_driver = CONF.db_driver - self.db = importutils.import_module(db_driver) - self.iscsi_driver.db = self.db - self.iscsi_driver.do_setup(None) - self.iscsi_driver.check_for_setup_error() - self.iscsi_driver._helpers.check_fcmapping_interval = 0 - - def _set_flag(self, flag, value): - group = self.iscsi_driver.configuration.config_group - self.override_config(flag, value, group) - - def _reset_flags(self): - CONF.reset() - for k, v in self._def_flags.items(): - self._set_flag(k, v) - - def _create_volume(self, **kwargs): - pool = _get_test_pool() - prop = {'host': 'openstack@svc#%s' % pool, - 'size': 1} - for p in prop.keys(): - if p not in kwargs: - kwargs[p] = prop[p] - vol = testutils.create_volume(self.ctxt, **kwargs) - self.iscsi_driver.create_volume(vol) - return vol - - def _delete_volume(self, volume): - self.iscsi_driver.delete_volume(volume) - self.db.volume_destroy(self.ctxt, volume['id']) - - def _generate_vol_info(self, vol_name, vol_id): - pool = _get_test_pool() - prop = {'mdisk_grp_name': pool} - if vol_name: - prop.update(volume_name=vol_name, - volume_id=vol_id, - volume_size=10) - else: - prop.update(size=10, - volume_type_id=None, - mdisk_grp_name=pool, - host='openstack@svc#%s' % pool) - vol = testutils.create_volume(self.ctxt, **prop) - return vol - - def _assert_vol_exists(self, name, exists): - is_vol_defined = self.iscsi_driver._helpers.is_vdisk_defined(name) - self.assertEqual(exists, is_vol_defined) - - def test_storwize_svc_iscsi_validate_connector(self): - conn_neither = {'host': 'host'} - conn_iscsi = {'host': 'host', 'initiator': 'foo'} - conn_fc = {'host': 'host', 'wwpns': 'bar'} - conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} - - self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) - self.iscsi_driver.validate_connector(conn_iscsi) - self.iscsi_driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.iscsi_driver.validate_connector, conn_fc) - self.assertRaises(exception.InvalidConnectorException, - self.iscsi_driver.validate_connector, conn_neither) - - self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) - self.iscsi_driver.validate_connector(conn_iscsi) - self.iscsi_driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.iscsi_driver.validate_connector, conn_neither) - - def test_storwize_terminate_iscsi_connection(self): - # create a iSCSI volume - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.iscsi_driver.initialize_connection(volume_iSCSI, connector) - self.iscsi_driver.terminate_connection(volume_iSCSI, connector) - - @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_do_terminate_connection') - @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_do_initialize_connection') - def test_storwize_do_terminate_iscsi_connection(self, init_conn, - term_conn): - # create a iSCSI volume - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.iscsi_driver.initialize_connection(volume_iSCSI, connector) - self.iscsi_driver.terminate_connection(volume_iSCSI, connector) - init_conn.assert_called_once_with(volume_iSCSI, connector) - term_conn.assert_called_once_with(volume_iSCSI, connector) - - @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_do_terminate_connection') - def test_storwize_initialize_iscsi_connection_failure(self, term_conn): - # create a iSCSI volume - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.iscsi_driver._state['storage_nodes'] = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.iscsi_driver.initialize_connection, - volume_iSCSI, connector) - term_conn.assert_called_once_with(volume_iSCSI, connector) - - def test_storwize_terminate_iscsi_connection_multi_attach(self): - # create a iSCSI volume - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - connector2 = {'host': 'STORWIZE-SVC-HOST', - 'wwnns': ['30000090fa17311e', '30000090fa17311f'], - 'wwpns': ['ffff000000000000', 'ffff000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} - - # map and unmap the volume to two hosts normal case - self.iscsi_driver.initialize_connection(volume_iSCSI, connector) - self.iscsi_driver.initialize_connection(volume_iSCSI, connector2) - for conn in [connector, connector2]: - host = self.iscsi_driver._helpers.get_host_from_connector( - conn, iscsi=True) - self.assertIsNotNone(host) - self.iscsi_driver.terminate_connection(volume_iSCSI, connector) - self.iscsi_driver.terminate_connection(volume_iSCSI, connector2) - # validate that the host entries are deleted - for conn in [connector, connector2]: - host = self.iscsi_driver._helpers.get_host_from_connector(conn) - self.assertIsNone(host) - # map and unmap the volume to two hosts with the mapping removed - self.iscsi_driver.initialize_connection(volume_iSCSI, connector) - self.iscsi_driver.initialize_connection(volume_iSCSI, connector2) - # Test multiple attachments case - host_name = self.iscsi_driver._helpers.get_host_from_connector( - connector2, iscsi=True) - self.iscsi_driver._helpers.unmap_vol_from_host( - volume_iSCSI['name'], host_name) - host_name = self.iscsi_driver._helpers.get_host_from_connector( - connector2, iscsi=True) - self.assertIsNotNone(host_name) - with mock.patch.object(storwize_svc_common.StorwizeSSH, - 'rmvdiskhostmap') as rmmap: - rmmap.side_effect = Exception('boom') - self.iscsi_driver.terminate_connection(volume_iSCSI, - connector2) - host_name = self.iscsi_driver._helpers.get_host_from_connector( - connector2, iscsi=True) - self.assertIsNone(host_name) - # Test single attachment case - self.iscsi_driver._helpers.unmap_vol_from_host( - volume_iSCSI['name'], host_name) - with mock.patch.object(storwize_svc_common.StorwizeSSH, - 'rmvdiskhostmap') as rmmap: - rmmap.side_effect = Exception('boom') - self.iscsi_driver.terminate_connection(volume_iSCSI, connector) - # validate that the host entries are deleted - for conn in [connector, connector2]: - host = self.iscsi_driver._helpers.get_host_from_connector( - conn, iscsi=True) - self.assertIsNone(host) - - def test_storwize_initialize_iscsi_connection_single_path(self): - # Test the return value for _get_iscsi_properties - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - # Expected single path host-volume map return value - exp_s_path = {'driver_volume_type': 'iscsi', - 'data': {'target_discovered': False, - 'target_iqn': - 'iqn.1982-01.com.ibm:1234.sim.node1', - 'target_portal': '1.234.56.78:3260', - 'target_lun': 0, - 'auth_method': 'CHAP', - 'discovery_auth_method': 'CHAP'}} - - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - # Make sure that the volumes have been created - self._assert_vol_exists(volume_iSCSI['name'], True) - - # Check case where no hosts exist - ret = self.iscsi_driver._helpers.get_host_from_connector( - connector, iscsi=True) - self.assertIsNone(ret) - - # Initialize connection to map volume to a host - ret = self.iscsi_driver.initialize_connection( - volume_iSCSI, connector) - self.assertEqual(exp_s_path['driver_volume_type'], - ret['driver_volume_type']) - - # Check the single path host-volume map return value - for k, v in exp_s_path['data'].items(): - self.assertEqual(v, ret['data'][k]) - - ret = self.iscsi_driver._helpers.get_host_from_connector( - connector, iscsi=True) - self.assertIsNotNone(ret) - - def test_storwize_initialize_iscsi_connection_multipath(self): - # Test the return value for _get_iscsi_properties - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', - 'multipath': True} - - # Expected multipath host-volume map return value - exp_m_path = {'driver_volume_type': 'iscsi', - 'data': {'target_discovered': False, - 'target_iqn': - 'iqn.1982-01.com.ibm:1234.sim.node1', - 'target_portal': '1.234.56.78:3260', - 'target_lun': 0, - 'target_iqns': [ - 'iqn.1982-01.com.ibm:1234.sim.node1', - 'iqn.1982-01.com.ibm:1234.sim.node1', - 'iqn.1982-01.com.ibm:1234.sim.node2'], - 'target_portals': - ['1.234.56.78:3260', - '1.234.56.80:3260', - '1.234.56.79:3260'], - 'target_luns': [0, 0, 0], - 'auth_method': 'CHAP', - 'discovery_auth_method': 'CHAP'}} - - volume_iSCSI = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' iSCSI'} - vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) - volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] - - # Check case where no hosts exist - ret = self.iscsi_driver._helpers.get_host_from_connector( - connector, iscsi=True) - self.assertIsNone(ret) - - # Initialize connection to map volume to a host - ret = self.iscsi_driver.initialize_connection( - volume_iSCSI, connector) - self.assertEqual(exp_m_path['driver_volume_type'], - ret['driver_volume_type']) - - # Check the multipath host-volume map return value - for k, v in exp_m_path['data'].items(): - self.assertEqual(v, ret['data'][k]) - - ret = self.iscsi_driver._helpers.get_host_from_connector( - connector, iscsi=True) - self.assertIsNotNone(ret) - - def test_storwize_svc_iscsi_host_maps(self): - # Create two volumes to be used in mappings - - ctxt = context.get_admin_context() - volume1 = self._generate_vol_info(None, None) - self.iscsi_driver.create_volume(volume1) - volume2 = self._generate_vol_info(None, None) - self.iscsi_driver.create_volume(volume2) - - # Create volume types that we created - types = {} - for protocol in ['iSCSI']: - opts = {'storage_protocol': ' ' + protocol} - types[protocol] = volume_types.create(ctxt, protocol, opts) - - expected = {'iSCSI': {'driver_volume_type': 'iscsi', - 'data': {'target_discovered': False, - 'target_iqn': - 'iqn.1982-01.com.ibm:1234.sim.node1', - 'target_portal': '1.234.56.78:3260', - 'target_lun': 0, - 'auth_method': 'CHAP', - 'discovery_auth_method': 'CHAP'}}} - - volume1['volume_type_id'] = types[protocol]['id'] - volume2['volume_type_id'] = types[protocol]['id'] - - # Check case where no hosts exist - if self.USESIM: - ret = self.iscsi_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNone(ret) - - # Make sure that the volumes have been created - self._assert_vol_exists(volume1['name'], True) - self._assert_vol_exists(volume2['name'], True) - - # Initialize connection from the first volume to a host - ret = self.iscsi_driver.initialize_connection( - volume1, self._connector) - self.assertEqual(expected[protocol]['driver_volume_type'], - ret['driver_volume_type']) - for k, v in expected[protocol]['data'].items(): - self.assertEqual(v, ret['data'][k]) - - # Initialize again, should notice it and do nothing - ret = self.iscsi_driver.initialize_connection( - volume1, self._connector) - self.assertEqual(expected[protocol]['driver_volume_type'], - ret['driver_volume_type']) - for k, v in expected[protocol]['data'].items(): - self.assertEqual(v, ret['data'][k]) - - # Try to delete the 1st volume (should fail because it is mapped) - self.assertRaises(exception.VolumeBackendAPIException, - self.iscsi_driver.delete_volume, - volume1) - - ret = self.iscsi_driver.terminate_connection(volume1, - self._connector) - if self.USESIM: - ret = self.iscsi_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNone(ret) - - # Check cases with no auth set for host - if self.USESIM: - for auth_enabled in [True, False]: - for host_exists in ['yes-auth', 'yes-noauth', 'no']: - self._set_flag('storwize_svc_iscsi_chap_enabled', - auth_enabled) - case = 'en' + six.text_type( - auth_enabled) + 'ex' + six.text_type(host_exists) - conn_na = {'initiator': 'test:init:%s' % - random.randint(10000, 99999), - 'ip': '11.11.11.11', - 'host': 'host-%s' % case} - if host_exists.startswith('yes'): - self.sim._add_host_to_list(conn_na) - if host_exists == 'yes-auth': - kwargs = {'chapsecret': 'foo', - 'obj': conn_na['host']} - self.sim._cmd_chhost(**kwargs) - volume1['volume_type_id'] = types['iSCSI']['id'] - - init_ret = self.iscsi_driver.initialize_connection(volume1, - conn_na) - host_name = self.sim._host_in_list(conn_na['host']) - chap_ret = ( - self.iscsi_driver._helpers.get_chap_secret_for_host( - host_name)) - if auth_enabled or host_exists == 'yes-auth': - self.assertIn('auth_password', init_ret['data']) - self.assertIsNotNone(chap_ret) - else: - self.assertNotIn('auth_password', init_ret['data']) - self.assertIsNone(chap_ret) - self.iscsi_driver.terminate_connection(volume1, conn_na) - self._set_flag('storwize_svc_iscsi_chap_enabled', True) - - # Test no preferred node - if self.USESIM: - self.sim.error_injection('lsvdisk', 'no_pref_node') - self.assertRaises(exception.VolumeBackendAPIException, - self.iscsi_driver.initialize_connection, - volume1, self._connector) - - # Initialize connection from the second volume to the host with no - # preferred node set if in simulation mode, otherwise, just - # another initialize connection. - if self.USESIM: - self.sim.error_injection('lsvdisk', 'blank_pref_node') - self.iscsi_driver.initialize_connection(volume2, self._connector) - - # Try to remove connection from host that doesn't exist (should fail) - conn_no_exist = self._connector.copy() - conn_no_exist['initiator'] = 'i_dont_exist' - conn_no_exist['wwpns'] = ['0000000000000000'] - self.assertRaises(exception.VolumeDriverException, - self.iscsi_driver.terminate_connection, - volume1, - conn_no_exist) - - # Try to remove connection from volume that isn't mapped (should print - # message but NOT fail) - unmapped_vol = self._generate_vol_info(None, None) - self.iscsi_driver.create_volume(unmapped_vol) - self.iscsi_driver.terminate_connection(unmapped_vol, self._connector) - self.iscsi_driver.delete_volume(unmapped_vol) - - # Remove the mapping from the 1st volume and delete it - self.iscsi_driver.terminate_connection(volume1, self._connector) - self.iscsi_driver.delete_volume(volume1) - self._assert_vol_exists(volume1['name'], False) - - # Make sure our host still exists - host_name = self.iscsi_driver._helpers.get_host_from_connector( - self._connector, iscsi=True) - self.assertIsNotNone(host_name) - - # Remove the mapping from the 2nd volume. The host should - # be automatically removed because there are no more mappings. - self.iscsi_driver.terminate_connection(volume2, self._connector) - - # Check if we successfully terminate connections when the host is not - # specified (see bug #1244257) - fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} - self.iscsi_driver.initialize_connection(volume2, self._connector) - host_name = self.iscsi_driver._helpers.get_host_from_connector( - self._connector, iscsi=True) - self.assertIsNotNone(host_name) - self.iscsi_driver.terminate_connection(volume2, fake_conn) - host_name = self.iscsi_driver._helpers.get_host_from_connector( - self._connector, iscsi=True) - self.assertIsNone(host_name) - self.iscsi_driver.delete_volume(volume2) - self._assert_vol_exists(volume2['name'], False) - - # Delete volume types that we created - for protocol in ['iSCSI']: - volume_types.destroy(ctxt, types[protocol]['id']) - - # Check if our host still exists (it should not) - if self.USESIM: - ret = ( - self.iscsi_driver._helpers.get_host_from_connector( - self._connector, iscsi=True)) - self.assertIsNone(ret) - - def test_storwize_svc_iscsi_multi_host_maps(self): - # We can't test connecting to multiple hosts from a single host when - # using real storage - if not self.USESIM: - return - - # Create a volume to be used in mappings - ctxt = context.get_admin_context() - volume = self._generate_vol_info(None, None) - self.iscsi_driver.create_volume(volume) - - # Create volume types for protocols - types = {} - for protocol in ['iSCSI']: - opts = {'storage_protocol': ' ' + protocol} - types[protocol] = volume_types.create(ctxt, protocol, opts) - - # Create a connector for the second 'host' - wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type(random.randint(10000, - 99999)) - conn2 = {'ip': '1.234.56.79', - 'host': 'storwize-svc-test2', - 'wwpns': wwpns, - 'initiator': initiator} - - # Check protocols for iSCSI - volume['volume_type_id'] = types[protocol]['id'] - - # Make sure that the volume has been created - self._assert_vol_exists(volume['name'], True) - - self.iscsi_driver.initialize_connection(volume, self._connector) - - self._set_flag('storwize_svc_multihostmap_enabled', False) - self.assertRaises( - exception.CinderException, - self.iscsi_driver.initialize_connection, volume, conn2) - - self._set_flag('storwize_svc_multihostmap_enabled', True) - self.iscsi_driver.initialize_connection(volume, conn2) - - self.iscsi_driver.terminate_connection(volume, conn2) - self.iscsi_driver.terminate_connection(volume, self._connector) - - def test_add_vdisk_copy_iscsi(self): - # Ensure only iSCSI is available - self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) - volume = self._generate_vol_info(None, None) - self.iscsi_driver.create_volume(volume) - self.iscsi_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) - - -class StorwizeSVCFcDriverTestCase(test.TestCase): - @mock.patch.object(time, 'sleep') - def setUp(self, mock_sleep): - super(StorwizeSVCFcDriverTestCase, self).setUp() - self.USESIM = True - if self.USESIM: - self.fc_driver = StorwizeSVCFcFakeDriver( - configuration=conf.Configuration(None)) - self._def_flags = {'san_ip': 'hostname', - 'san_login': 'user', - 'san_password': 'pass', - 'storwize_svc_volpool_name': - SVC_POOLS, - 'storwize_svc_flashcopy_timeout': 20, - 'storwize_svc_flashcopy_rate': 49, - 'storwize_svc_multipath_enabled': False, - 'storwize_svc_allow_tenant_qos': True} - wwpns = [ - six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type( - random.randint(10000, 99999)) - self._connector = {'ip': '1.234.56.78', - 'host': 'storwize-svc-test', - 'wwpns': wwpns, - 'initiator': initiator} - self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) - - self.fc_driver.set_fake_storage(self.sim) - self.ctxt = context.get_admin_context() - - self._reset_flags() - self.ctxt = context.get_admin_context() - db_driver = self.fc_driver.configuration.db_driver - self.db = importutils.import_module(db_driver) - self.fc_driver.db = self.db - self.fc_driver.do_setup(None) - self.fc_driver.check_for_setup_error() - self.fc_driver._helpers.check_fcmapping_interval = 0 - - def _set_flag(self, flag, value): - group = self.fc_driver.configuration.config_group - self.fc_driver.configuration.set_override(flag, value, group) - - def _reset_flags(self): - self.fc_driver.configuration.local_conf.reset() - for k, v in self._def_flags.items(): - self._set_flag(k, v) - - def _create_volume(self, **kwargs): - pool = _get_test_pool() - prop = {'host': 'openstack@svc#%s' % pool, - 'size': 1} - for p in prop.keys(): - if p not in kwargs: - kwargs[p] = prop[p] - vol = testutils.create_volume(self.ctxt, **kwargs) - self.fc_driver.create_volume(vol) - return vol - - def _delete_volume(self, volume): - self.fc_driver.delete_volume(volume) - self.db.volume_destroy(self.ctxt, volume['id']) - - def _generate_vol_info(self, vol_name, vol_id): - pool = _get_test_pool() - prop = {'mdisk_grp_name': pool} - if vol_name: - prop.update(volume_name=vol_name, - volume_id=vol_id, - volume_size=10) - else: - prop.update(size=10, - volume_type_id=None, - mdisk_grp_name=pool, - host='openstack@svc#%s' % pool) - vol = testutils.create_volume(self.ctxt, **prop) - return vol - - def _assert_vol_exists(self, name, exists): - is_vol_defined = self.fc_driver._helpers.is_vdisk_defined(name) - self.assertEqual(exists, is_vol_defined) - - def test_storwize_get_host_with_fc_connection(self): - # Create a FC host - del self._connector['initiator'] - helper = self.fc_driver._helpers - host_name = helper.create_host(self._connector) - - # Remove the first wwpn from connector, and then try get host - wwpns = self._connector['wwpns'] - wwpns.remove(wwpns[0]) - host_name = helper.get_host_from_connector(self._connector) - - self.assertIsNotNone(host_name) - - def test_storwize_get_host_with_fc_connection_with_volume(self): - # create a FC volume - volume_fc = self._generate_vol_info(None, None) - self.fc_driver.create_volume(volume_fc) - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) - volume_fc['volume_type_id'] = vol_type_fc['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - self.fc_driver.initialize_connection(volume_fc, connector) - # Create a FC host - helper = self.fc_driver._helpers - - host_name = helper.get_host_from_connector( - connector, volume_fc['name']) - self.assertIsNotNone(host_name) - - def test_storwize_get_host_from_connector_with_lshost_failure(self): - self.skipTest('Bug 1640205') - self._connector.pop('initiator') - helper = self.fc_driver._helpers - # Create two hosts. The first is not related to the connector and - # we use the simulator for that. The second is for the connector. - # We will force the missing_host error for the first host, but - # then tolerate and find the second host on the slow path normally. - if self.USESIM: - self.sim._cmd_mkhost(name='storwize-svc-test-9', hbawwpn='123456') - helper.create_host(self._connector) - # tell lshost to fail while calling get_host_from_connector - if self.USESIM: - # tell lshost to fail while called from get_host_from_connector - self.sim.error_injection('lshost', 'missing_host') - # tell lsfabric to skip rows so that we skip past fast path - self.sim.error_injection('lsfabric', 'remove_rows') - # Run test - host_name = helper.get_host_from_connector(self._connector) - - self.assertIsNotNone(host_name) - # Need to assert that lshost was actually called. The way - # we do that is check that the next simulator error for lshost - # has been reset. - self.assertEqual(self.sim._next_cmd_error['lshost'], '', - "lshost was not called in the simulator. The " - "queued error still remains.") - - def test_storwize_get_host_from_connector_with_lshost_failure2(self): - self._connector.pop('initiator') - self._connector['wwpns'] = [] # Clearing will skip over fast-path - helper = self.fc_driver._helpers - if self.USESIM: - # Add a host to the simulator. We don't need it to match the - # connector since we will force a bad failure for lshost. - self.sim._cmd_mkhost(name='DifferentHost', hbawwpn='123456') - # tell lshost to fail badly while called from - # get_host_from_connector - self.sim.error_injection('lshost', 'bigger_troubles') - self.assertRaises(exception.VolumeBackendAPIException, - helper.get_host_from_connector, - self._connector) - - def test_storwize_get_host_from_connector_not_found(self): - self._connector.pop('initiator') - helper = self.fc_driver._helpers - # Create some hosts. The first is not related to the connector and - # we use the simulator for that. The second is for the connector. - # We will force the missing_host error for the first host, but - # then tolerate and find the second host on the slow path normally. - if self.USESIM: - self.sim._cmd_mkhost(name='storwize-svc-test-3', hbawwpn='1234567') - self.sim._cmd_mkhost(name='storwize-svc-test-2', hbawwpn='2345678') - self.sim._cmd_mkhost(name='storwize-svc-test-1', hbawwpn='3456789') - self.sim._cmd_mkhost(name='A-Different-host', hbawwpn='9345678') - self.sim._cmd_mkhost(name='B-Different-host', hbawwpn='8345678') - self.sim._cmd_mkhost(name='C-Different-host', hbawwpn='7345678') - # tell lshost to fail while calling get_host_from_connector - if self.USESIM: - # tell lsfabric to skip rows so that we skip past fast path - self.sim.error_injection('lsfabric', 'remove_rows') - # Run test - host_name = helper.get_host_from_connector(self._connector) - - self.assertIsNone(host_name) - - def test_storwize_get_host_from_connector_fast_path(self): - self._connector.pop('initiator') - helper = self.fc_driver._helpers - # Create two hosts. Our lshost will return the hosts in sorted - # Order. The extra host will be returned before the target - # host. If we get detailed lshost info on our host without - # gettting detailed info on the other host we used the fast path - if self.USESIM: - self.sim._cmd_mkhost(name='A-DifferentHost', hbawwpn='123456') - helper.create_host(self._connector) - # tell lshost to fail while calling get_host_from_connector - if self.USESIM: - # tell lshost to fail while called from get_host_from_connector - self.sim.error_injection('lshost', 'fail_fastpath') - # tell lsfabric to skip rows so that we skip past fast path - self.sim.error_injection('lsfabric', 'remove_rows') - # Run test - host_name = helper.get_host_from_connector(self._connector) - - self.assertIsNotNone(host_name) - # Need to assert that lshost was actually called. The way - # we do that is check that the next simulator error for lshost - # has not been reset. - self.assertEqual(self.sim._next_cmd_error['lshost'], 'fail_fastpath', - "lshost was not called in the simulator. The " - "queued error still remains.") - - def test_storwize_initiator_multiple_wwpns_connected(self): - - # Generate us a test volume - volume = self._create_volume() - - # Fibre Channel volume type - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) - - volume['volume_type_id'] = vol_type['id'] - - # Make sure that the volumes have been created - self._assert_vol_exists(volume['name'], True) - - # Set up one WWPN that won't match and one that will. - self.fc_driver._state['storage_nodes']['1']['WWPN'] = [ - '123456789ABCDEF0', 'AABBCCDDEEFF0010'] - - wwpns = ['ff00000000000000', 'ff00000000000001'] - connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_conn_fc_wwpns') as get_mappings: - mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002', - 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012'] - get_mappings.return_value = mapped_wwpns - - # Initialize the connection - init_ret = self.fc_driver.initialize_connection(volume, connector) - - # Make sure we return all wwpns which where mapped as part of the - # connection - self.assertEqual(mapped_wwpns, - init_ret['data']['target_wwn']) - - def test_storwize_svc_fc_validate_connector(self): - conn_neither = {'host': 'host'} - conn_iscsi = {'host': 'host', 'initiator': 'foo'} - conn_fc = {'host': 'host', 'wwpns': 'bar'} - conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} - - self.fc_driver._state['enabled_protocols'] = set(['FC']) - self.fc_driver.validate_connector(conn_fc) - self.fc_driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.fc_driver.validate_connector, conn_iscsi) - self.assertRaises(exception.InvalidConnectorException, - self.fc_driver.validate_connector, conn_neither) - - self.fc_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) - self.fc_driver.validate_connector(conn_fc) - self.fc_driver.validate_connector(conn_both) - self.assertRaises(exception.InvalidConnectorException, - self.fc_driver.validate_connector, conn_neither) - - def test_storwize_terminate_fc_connection(self): - # create a FC volume - volume_fc = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) - volume_fc['volume_type_id'] = vol_type_fc['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.fc_driver.initialize_connection(volume_fc, connector) - self.fc_driver.terminate_connection(volume_fc, connector) - - @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, - '_do_terminate_connection') - @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, - '_do_initialize_connection') - def test_storwize_do_terminate_fc_connection(self, init_conn, - term_conn): - # create a FC volume - volume_fc = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) - volume_fc['volume_type_id'] = vol_type_fc['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.fc_driver.initialize_connection(volume_fc, connector) - self.fc_driver.terminate_connection(volume_fc, connector) - init_conn.assert_called_once_with(volume_fc, connector) - term_conn.assert_called_once_with(volume_fc, connector) - - @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, - '_do_terminate_connection') - def test_storwize_initialize_fc_connection_failure(self, term_conn): - # create a FC volume - volume_fc = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) - volume_fc['volume_type_id'] = vol_type_fc['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - - self.fc_driver._state['storage_nodes'] = {} - self.assertRaises(exception.VolumeBackendAPIException, - self.fc_driver.initialize_connection, - volume_fc, connector) - term_conn.assert_called_once_with(volume_fc, connector) - - def test_storwize_terminate_fc_connection_multi_attach(self): - # create a FC volume - volume_fc = self._create_volume() - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) - volume_fc['volume_type_id'] = vol_type_fc['id'] - - connector = {'host': 'storwize-svc-host', - 'wwnns': ['20000090fa17311e', '20000090fa17311f'], - 'wwpns': ['ff00000000000000', 'ff00000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} - connector2 = {'host': 'STORWIZE-SVC-HOST', - 'wwnns': ['30000090fa17311e', '30000090fa17311f'], - 'wwpns': ['ffff000000000000', 'ffff000000000001'], - 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} - - # map and unmap the volume to two hosts normal case - self.fc_driver.initialize_connection(volume_fc, connector) - self.fc_driver.initialize_connection(volume_fc, connector2) - # validate that the host entries are created - for conn in [connector, connector2]: - host = self.fc_driver._helpers.get_host_from_connector(conn) - self.assertIsNotNone(host) - self.fc_driver.terminate_connection(volume_fc, connector) - self.fc_driver.terminate_connection(volume_fc, connector2) - # validate that the host entries are deleted - for conn in [connector, connector2]: - host = self.fc_driver._helpers.get_host_from_connector(conn) - self.assertIsNone(host) - # map and unmap the volume to two hosts with the mapping gone - self.fc_driver.initialize_connection(volume_fc, connector) - self.fc_driver.initialize_connection(volume_fc, connector2) - # Test multiple attachments case - host_name = self.fc_driver._helpers.get_host_from_connector(connector2) - self.fc_driver._helpers.unmap_vol_from_host( - volume_fc['name'], host_name) - host_name = self.fc_driver._helpers.get_host_from_connector(connector2) - self.assertIsNotNone(host_name) - with mock.patch.object(storwize_svc_common.StorwizeSSH, - 'rmvdiskhostmap') as rmmap: - rmmap.side_effect = Exception('boom') - self.fc_driver.terminate_connection(volume_fc, connector2) - host_name = self.fc_driver._helpers.get_host_from_connector(connector2) - self.assertIsNone(host_name) - # Test single attachment case - self.fc_driver._helpers.unmap_vol_from_host( - volume_fc['name'], host_name) - with mock.patch.object(storwize_svc_common.StorwizeSSH, - 'rmvdiskhostmap') as rmmap: - rmmap.side_effect = Exception('boom') - self.fc_driver.terminate_connection(volume_fc, connector) - # validate that the host entries are deleted - for conn in [connector, connector2]: - host = self.fc_driver._helpers.get_host_from_connector(conn) - self.assertIsNone(host) - - def test_storwize_initiator_target_map(self): - # Generate us a test volume - volume = self._create_volume() - - # FIbre Channel volume type - extra_spec = {'capabilities:storage_protocol': ' FC'} - vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) - - volume['volume_type_id'] = vol_type['id'] - - # Make sure that the volumes have been created - self._assert_vol_exists(volume['name'], True) - - wwpns = ['ff00000000000000', 'ff00000000000001'] - connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} - - # Initialise the connection - init_ret = self.fc_driver.initialize_connection(volume, connector) - - # Check that the initiator_target_map is as expected - init_data = {'driver_volume_type': 'fibre_channel', - 'data': {'initiator_target_map': - {'ff00000000000000': ['AABBCCDDEEFF0011'], - 'ff00000000000001': ['AABBCCDDEEFF0011']}, - 'target_discovered': False, - 'target_lun': 0, - 'target_wwn': ['AABBCCDDEEFF0011'], - 'volume_id': volume['id'] - } - } - - self.assertEqual(init_data, init_ret) - - # Terminate connection - term_ret = self.fc_driver.terminate_connection(volume, connector) - - # Check that the initiator_target_map is as expected - term_data = {'driver_volume_type': 'fibre_channel', - 'data': {'initiator_target_map': - {'ff00000000000000': ['5005076802432ADE', - '5005076802332ADE', - '5005076802532ADE', - '5005076802232ADE', - '5005076802132ADE', - '5005086802132ADE', - '5005086802332ADE', - '5005086802532ADE', - '5005086802232ADE', - '5005086802432ADE'], - 'ff00000000000001': ['5005076802432ADE', - '5005076802332ADE', - '5005076802532ADE', - '5005076802232ADE', - '5005076802132ADE', - '5005086802132ADE', - '5005086802332ADE', - '5005086802532ADE', - '5005086802232ADE', - '5005086802432ADE']} - } - } - - self.assertItemsEqual(term_data, term_ret) - - def test_storwize_svc_fc_host_maps(self): - # Create two volumes to be used in mappings - - ctxt = context.get_admin_context() - volume1 = self._generate_vol_info(None, None) - self.fc_driver.create_volume(volume1) - volume2 = self._generate_vol_info(None, None) - self.fc_driver.create_volume(volume2) - - # Create volume types that we created - types = {} - for protocol in ['FC']: - opts = {'storage_protocol': ' ' + protocol} - types[protocol] = volume_types.create(ctxt, protocol, opts) - - expected = {'FC': {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': 0, - 'target_wwn': ['AABBCCDDEEFF0011'], - 'target_discovered': False}}} - - volume1['volume_type_id'] = types[protocol]['id'] - volume2['volume_type_id'] = types[protocol]['id'] - - # Check case where no hosts exist - if self.USESIM: - ret = self.fc_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNone(ret) - - # Make sure that the volumes have been created - self._assert_vol_exists(volume1['name'], True) - self._assert_vol_exists(volume2['name'], True) - - # Initialize connection from the first volume to a host - ret = self.fc_driver.initialize_connection( - volume1, self._connector) - self.assertEqual(expected[protocol]['driver_volume_type'], - ret['driver_volume_type']) - for k, v in expected[protocol]['data'].items(): - self.assertEqual(v, ret['data'][k]) - - # Initialize again, should notice it and do nothing - ret = self.fc_driver.initialize_connection( - volume1, self._connector) - self.assertEqual(expected[protocol]['driver_volume_type'], - ret['driver_volume_type']) - for k, v in expected[protocol]['data'].items(): - self.assertEqual(v, ret['data'][k]) - - # Try to delete the 1st volume (should fail because it is mapped) - self.assertRaises(exception.VolumeBackendAPIException, - self.fc_driver.delete_volume, - volume1) - - # Check bad output from lsfabric for the 2nd volume - if protocol == 'FC' and self.USESIM: - for error in ['remove_field', 'header_mismatch']: - self.sim.error_injection('lsfabric', error) - self.assertRaises(exception.VolumeBackendAPIException, - self.fc_driver.initialize_connection, - volume2, self._connector) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_conn_fc_wwpns') as conn_fc_wwpns: - conn_fc_wwpns.return_value = [] - ret = self.fc_driver.initialize_connection(volume2, - self._connector) - - ret = self.fc_driver.terminate_connection(volume1, self._connector) - if protocol == 'FC' and self.USESIM: - # For the first volume detach, ret['data'] should be empty - # only ret['driver_volume_type'] returned - self.assertEqual({}, ret['data']) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - ret = self.fc_driver.terminate_connection(volume2, - self._connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - # wwpn is randomly created - self.assertNotEqual({}, ret['data']) - if self.USESIM: - ret = self.fc_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNone(ret) - - # Test no preferred node - if self.USESIM: - self.sim.error_injection('lsvdisk', 'no_pref_node') - self.assertRaises(exception.VolumeBackendAPIException, - self.fc_driver.initialize_connection, - volume1, self._connector) - - # Initialize connection from the second volume to the host with no - # preferred node set if in simulation mode, otherwise, just - # another initialize connection. - if self.USESIM: - self.sim.error_injection('lsvdisk', 'blank_pref_node') - self.fc_driver.initialize_connection(volume2, self._connector) - - # Try to remove connection from host that doesn't exist (should fail) - conn_no_exist = self._connector.copy() - conn_no_exist['initiator'] = 'i_dont_exist' - conn_no_exist['wwpns'] = ['0000000000000000'] - self.assertRaises(exception.VolumeDriverException, - self.fc_driver.terminate_connection, - volume1, - conn_no_exist) - - # Try to remove connection from volume that isn't mapped (should print - # message but NOT fail) - unmapped_vol = self._generate_vol_info(None, None) - self.fc_driver.create_volume(unmapped_vol) - self.fc_driver.terminate_connection(unmapped_vol, self._connector) - self.fc_driver.delete_volume(unmapped_vol) - - # Remove the mapping from the 1st volume and delete it - self.fc_driver.terminate_connection(volume1, self._connector) - self.fc_driver.delete_volume(volume1) - self._assert_vol_exists(volume1['name'], False) - - # Make sure our host still exists - host_name = self.fc_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNotNone(host_name) - - # Remove the mapping from the 2nd volume. The host should - # be automatically removed because there are no more mappings. - self.fc_driver.terminate_connection(volume2, self._connector) - - # Check if we successfully terminate connections when the host is not - # specified (see bug #1244257) - fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} - self.fc_driver.initialize_connection(volume2, self._connector) - host_name = self.fc_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNotNone(host_name) - self.fc_driver.terminate_connection(volume2, fake_conn) - host_name = self.fc_driver._helpers.get_host_from_connector( - self._connector) - self.assertIsNone(host_name) - self.fc_driver.delete_volume(volume2) - self._assert_vol_exists(volume2['name'], False) - - # Delete volume types that we created - for protocol in ['FC']: - volume_types.destroy(ctxt, types[protocol]['id']) - - # Check if our host still exists (it should not) - if self.USESIM: - ret = (self.fc_driver._helpers.get_host_from_connector( - self._connector)) - self.assertIsNone(ret) - - def test_storwize_svc_fc_multi_host_maps(self): - # We can't test connecting to multiple hosts from a single host when - # using real storage - if not self.USESIM: - return - - # Create a volume to be used in mappings - ctxt = context.get_admin_context() - volume = self._generate_vol_info(None, None) - self.fc_driver.create_volume(volume) - - # Create volume types for protocols - types = {} - for protocol in ['FC']: - opts = {'storage_protocol': ' ' + protocol} - types[protocol] = volume_types.create(ctxt, protocol, opts) - - # Create a connector for the second 'host' - wwpns = [six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type(random.randint(10000, - 99999)) - conn2 = {'ip': '1.234.56.79', - 'host': 'storwize-svc-test2', - 'wwpns': wwpns, - 'initiator': initiator} - - # Check protocols for FC - - volume['volume_type_id'] = types[protocol]['id'] - - # Make sure that the volume has been created - self._assert_vol_exists(volume['name'], True) - - self.fc_driver.initialize_connection(volume, self._connector) - - self._set_flag('storwize_svc_multihostmap_enabled', False) - self.assertRaises( - exception.CinderException, - self.fc_driver.initialize_connection, volume, conn2) - - self._set_flag('storwize_svc_multihostmap_enabled', True) - self.fc_driver.initialize_connection(volume, conn2) - - self.fc_driver.terminate_connection(volume, conn2) - self.fc_driver.terminate_connection(volume, self._connector) - - def test_add_vdisk_copy_fc(self): - # Ensure only FC is available - self.fc_driver._state['enabled_protocols'] = set(['FC']) - volume = self._generate_vol_info(None, None) - self.fc_driver.create_volume(volume) - self.fc_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) - - -@ddt.ddt -class StorwizeSVCCommonDriverTestCase(test.TestCase): - @mock.patch.object(time, 'sleep') - def setUp(self, mock_sleep): - super(StorwizeSVCCommonDriverTestCase, self).setUp() - self.USESIM = True - if self.USESIM: - self._def_flags = {'san_ip': 'hostname', - 'storwize_san_secondary_ip': 'secondaryname', - 'san_login': 'user', - 'san_password': 'pass', - 'storwize_svc_volpool_name': - SVC_POOLS, - 'storwize_svc_flashcopy_timeout': 20, - 'storwize_svc_flashcopy_rate': 49, - 'storwize_svc_allow_tenant_qos': True} - config = conf.Configuration(storwize_svc_common.storwize_svc_opts, - conf.SHARED_CONF_GROUP) - # Override any configs that may get set in __init__ - self._reset_flags(config) - self.driver = StorwizeSVCISCSIFakeDriver( - configuration=config) - self._driver = storwize_svc_iscsi.StorwizeSVCISCSIDriver( - configuration=config) - wwpns = [ - six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type( - random.randint(10000, 99999)) - self._connector = {'ip': '1.234.56.78', - 'host': 'storwize-svc-test', - 'wwpns': wwpns, - 'initiator': initiator} - self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) - - self.driver.set_fake_storage(self.sim) - self.ctxt = context.get_admin_context() - - else: - self._reset_flags() - self.ctxt = context.get_admin_context() - db_driver = CONF.db_driver - self.db = importutils.import_module(db_driver) - self.driver.db = self.db - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self.driver._helpers.check_fcmapping_interval = 0 - self.mock_object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - 'DEFAULT_GR_SLEEP', 0) - self._create_test_volume_types() - - def _set_flag(self, flag, value, configuration=None): - if not configuration: - configuration = self.driver.configuration - group = configuration.config_group - self.override_config(flag, value, group) - - def _reset_flags(self, configuration=None): - if not configuration: - configuration = self.driver.configuration - CONF.reset() - for k, v in self._def_flags.items(): - self._set_flag(k, v, configuration) - - def _assert_vol_exists(self, name, exists): - is_vol_defined = self.driver._helpers.is_vdisk_defined(name) - self.assertEqual(exists, is_vol_defined) - - def _create_test_volume_types(self): - spec = {'mirror_pool': 'openstack1'} - self.mirror_vol_type = self._create_volume_type(spec, 'mirror_type') - self.default_vol_type = self._create_volume_type(None, 'default_type') - - def test_storwize_svc_connectivity(self): - # Make sure we detect if the pool doesn't exist - no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) - self._set_flag('storwize_svc_volpool_name', no_exist_pool) - self.assertRaises(exception.InvalidInput, - self.driver.do_setup, None) - self._reset_flags() - - # Check the case where the user didn't configure IP addresses - # as well as receiving unexpected results from the storage - if self.USESIM: - self.sim.error_injection('lsnodecanister', 'header_mismatch') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - self.sim.error_injection('lsnodecanister', 'remove_field') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - self.sim.error_injection('lsportip', 'header_mismatch') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - self.sim.error_injection('lsportip', 'remove_field') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, None) - - # Check with bad parameters - self._set_flag('san_ip', '') - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('san_password', None) - self._set_flag('san_private_key', None) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('storwize_svc_vol_grainsize', 42) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('storwize_svc_vol_compression', True) - self._set_flag('storwize_svc_vol_rsize', -1) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('storwize_svc_vol_rsize', 2) - self._set_flag('storwize_svc_vol_nofmtdisk', True) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - self._set_flag('storwize_svc_vol_iogrp', 5) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - if self.USESIM: - self.sim.error_injection('lslicense', 'no_compression') - self.sim.error_injection('lsguicapabilities', 'no_compression') - self._set_flag('storwize_svc_vol_compression', True) - self.driver.do_setup(None) - self.assertRaises(exception.InvalidInput, - self.driver.check_for_setup_error) - self._reset_flags() - - # Finally, check with good parameters - self.driver.do_setup(None) - - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_set_up_with_san_ip(self, mock_ssh_execute, mock_ssh_pool): - ssh_cmd = ['svcinfo'] - self._driver._run_ssh(ssh_cmd) - - mock_ssh_pool.assert_called_once_with( - self._driver.configuration.san_ip, - self._driver.configuration.san_ssh_port, - self._driver.configuration.ssh_conn_timeout, - self._driver.configuration.san_login, - password=self._driver.configuration.san_password, - privatekey=self._driver.configuration.san_private_key, - min_size=self._driver.configuration.ssh_min_pool_conn, - max_size=self._driver.configuration.ssh_max_pool_conn) - - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_set_up_with_secondary_ip(self, mock_ssh_execute, - mock_ssh_pool): - mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] - ssh_cmd = ['svcinfo'] - self._driver._run_ssh(ssh_cmd) - - mock_ssh_pool.assert_called_with( - self._driver.configuration.storwize_san_secondary_ip, - self._driver.configuration.san_ssh_port, - self._driver.configuration.ssh_conn_timeout, - self._driver.configuration.san_login, - password=self._driver.configuration.san_password, - privatekey=self._driver.configuration.san_private_key, - min_size=self._driver.configuration.ssh_min_pool_conn, - max_size=self._driver.configuration.ssh_max_pool_conn) - - @mock.patch.object(random, 'randint', mock.Mock(return_value=0)) - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_fail_to_secondary_ip(self, mock_ssh_execute, - mock_ssh_pool): - mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, - mock.MagicMock()] - ssh_cmd = ['svcinfo'] - self._driver._run_ssh(ssh_cmd) - - mock_ssh_pool.assert_called_with( - self._driver.configuration.storwize_san_secondary_ip, - self._driver.configuration.san_ssh_port, - self._driver.configuration.ssh_conn_timeout, - self._driver.configuration.san_login, - password=self._driver.configuration.san_password, - privatekey=self._driver.configuration.san_private_key, - min_size=self._driver.configuration.ssh_min_pool_conn, - max_size=self._driver.configuration.ssh_max_pool_conn) - - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_secondary_ip_ssh_fail_to_san_ip(self, mock_ssh_execute, - mock_ssh_pool): - mock_ssh_pool.side_effect = [ - paramiko.SSHException, - mock.MagicMock( - ip = self._driver.configuration.storwize_san_secondary_ip), - mock.MagicMock()] - mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, - mock.MagicMock()] - ssh_cmd = ['svcinfo'] - self._driver._run_ssh(ssh_cmd) - - mock_ssh_pool.assert_called_with( - self._driver.configuration.san_ip, - self._driver.configuration.san_ssh_port, - self._driver.configuration.ssh_conn_timeout, - self._driver.configuration.san_login, - password=self._driver.configuration.san_password, - privatekey=self._driver.configuration.san_private_key, - min_size=self._driver.configuration.ssh_min_pool_conn, - max_size=self._driver.configuration.ssh_max_pool_conn) - - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_both_ip_set_failure(self, mock_ssh_execute, - mock_ssh_pool): - mock_ssh_pool.side_effect = [ - paramiko.SSHException, - mock.MagicMock(), - mock.MagicMock()] - mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, - processutils.ProcessExecutionError] - ssh_cmd = ['svcinfo'] - self.assertRaises(processutils.ProcessExecutionError, - self._driver._run_ssh, ssh_cmd) - - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_second_ip_not_set_failure(self, mock_ssh_execute, - mock_ssh_pool): - mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, - mock.MagicMock()] - self._set_flag('storwize_san_secondary_ip', None) - ssh_cmd = ['svcinfo'] - self.assertRaises(processutils.ProcessExecutionError, - self._driver._run_ssh, ssh_cmd) - - @mock.patch.object(random, 'randint', mock.Mock(return_value=0)) - @mock.patch.object(ssh_utils, 'SSHPool') - @mock.patch.object(processutils, 'ssh_execute') - def test_run_ssh_consistent_active_ip(self, mock_ssh_execute, - mock_ssh_pool): - ssh_cmd = ['svcinfo'] - self._driver._run_ssh(ssh_cmd) - self._driver._run_ssh(ssh_cmd) - self._driver._run_ssh(ssh_cmd) - self.assertEqual(self._driver.configuration.san_ip, - self._driver.active_ip) - mock_ssh_execute.side_effect = [paramiko.SSHException, - mock.MagicMock(), mock.MagicMock()] - self._driver._run_ssh(ssh_cmd) - self._driver._run_ssh(ssh_cmd) - self.assertEqual(self._driver.configuration.storwize_san_secondary_ip, - self._driver.active_ip) - - def _create_volume_type(self, opts, type_name): - type_ref = volume_types.create(self.ctxt, type_name, opts) - vol_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) - return vol_type - - def _generate_vol_info(self, vol_type=None, size=10): - pool = _get_test_pool() - prop = {'size': size, - 'host': 'openstack@svc#%s' % pool} - if vol_type: - prop['volume_type_id'] = vol_type.id - vol = testutils.create_volume(self.ctxt, **prop) - return vol - - def _generate_snap_info(self, vol_id, size=10): - prop = {'volume_id': vol_id, - 'volume_size': size} - snap = testutils.create_snapshot(self.ctxt, **prop) - return snap - - def _create_volume(self, **kwargs): - pool = _get_test_pool() - prop = {'host': 'openstack@svc#%s' % pool, - 'size': 1} - for p in prop.keys(): - if p not in kwargs: - kwargs[p] = prop[p] - vol = testutils.create_volume(self.ctxt, **kwargs) - self.driver.create_volume(vol) - return vol - - def _delete_volume(self, volume): - self.driver.delete_volume(volume) - self.db.volume_destroy(self.ctxt, volume['id']) - - def _create_group_in_db(self, **kwargs): - cg = testutils.create_group(self.ctxt, **kwargs) - return cg - - def _create_group(self, **kwargs): - grp = self._create_group_in_db(**kwargs) - - model_update = self.driver.create_group(self.ctxt, grp) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "CG created failed") - return grp - - def _create_group_snapshot_in_db(self, group_id, **kwargs): - group_snapshot = testutils.create_group_snapshot(self.ctxt, - group_id=group_id, - **kwargs) - snapshots = [] - volumes = self.db.volume_get_all_by_generic_group( - self.ctxt.elevated(), group_id) - - if not volumes: - msg = _("Group is empty. No cgsnapshot will be created.") - raise exception.InvalidGroup(reason=msg) - - for volume in volumes: - snapshots.append(testutils.create_snapshot( - self.ctxt, volume['id'], - group_snapshot.id, - group_snapshot.name, - group_snapshot.id, - fields.SnapshotStatus.CREATING)) - return group_snapshot, snapshots - - def _create_group_snapshot(self, cg_id, **kwargs): - group_snapshot, snapshots = self._create_group_snapshot_in_db( - cg_id, **kwargs) - - model_update, snapshots_model = ( - self.driver.create_group_snapshot(self.ctxt, group_snapshot, - snapshots)) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - model_update['status'], - "CGSnapshot created failed") - - for snapshot in snapshots_model: - self.assertEqual(fields.SnapshotStatus.AVAILABLE, - snapshot['status']) - return group_snapshot, snapshots - - def _create_test_vol(self, opts): - ctxt = testutils.get_test_admin_context() - type_ref = volume_types.create(ctxt, 'testtype', opts) - volume = self._generate_vol_info() - volume.volume_type_id = type_ref['id'] - volume.volume_typ = objects.VolumeType.get_by_id(ctxt, - type_ref['id']) - self.driver.create_volume(volume) - - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - self.driver.delete_volume(volume) - volume_types.destroy(ctxt, type_ref['id']) - return attrs - - def _get_default_opts(self): - opt = {'rsize': 2, - 'warning': 0, - 'autoexpand': True, - 'grainsize': 256, - 'compression': False, - 'easytier': True, - 'iogrp': '0', - 'qos': None, - 'replication': False, - 'stretched_cluster': None, - 'nofmtdisk': False, - 'mirror_pool': None, - 'cycle_period_seconds': 300, - } - return opt - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_get_vdisk_params') - def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params, - add_vdisk_qos): - vol = testutils.create_volume(self.ctxt) - fake_opts = self._get_default_opts() - # If the qos is empty, chvdisk should not be called - # for create_volume. - get_vdisk_params.return_value = fake_opts - self.driver.create_volume(vol) - self._assert_vol_exists(vol['name'], True) - self.assertFalse(add_vdisk_qos.called) - self.driver.delete_volume(vol) - - # If the qos is not empty, chvdisk should be called - # for create_volume. - fake_opts['qos'] = {'IOThrottling': 5000} - get_vdisk_params.return_value = fake_opts - self.driver.create_volume(vol) - self._assert_vol_exists(vol['name'], True) - add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos']) - - self.driver.delete_volume(vol) - self._assert_vol_exists(vol['name'], False) - - def test_storwize_svc_snapshots(self): - vol1 = self._create_volume() - snap1 = self._generate_snap_info(vol1.id) - - # Test timeout and volume cleanup - self._set_flag('storwize_svc_flashcopy_timeout', 1) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_snapshot, snap1) - self._assert_vol_exists(snap1['name'], False) - self._reset_flags() - - # Test prestartfcmap failing - with mock.patch.object( - storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: - prestart.side_effect = exception.VolumeBackendAPIException - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, snap1) - - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.sim.error_injection('startfcmap', 'bad_id') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, snap1) - self._assert_vol_exists(snap1['name'], False) - self.sim.error_injection('prestartfcmap', 'bad_id') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, snap1) - self._assert_vol_exists(snap1['name'], False) - - # Test successful snapshot - self.driver.create_snapshot(snap1) - self._assert_vol_exists(snap1['name'], True) - - # Try to create a snapshot from an non-existing volume - should fail - vol2 = self._generate_vol_info() - snap_novol = self._generate_snap_info(vol2.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_snapshot, - snap_novol) - - # We support deleting a volume that has snapshots, so delete the volume - # first - self.driver.delete_volume(vol1) - self.driver.delete_snapshot(snap1) - - def test_storwize_svc_create_cloned_volume(self): - vol1 = self._create_volume() - vol2 = testutils.create_volume(self.ctxt) - vol3 = testutils.create_volume(self.ctxt) - - # Try to clone where source size > target size - vol1['size'] = vol2['size'] + 1 - self.assertRaises(exception.InvalidInput, - self.driver.create_cloned_volume, - vol2, vol1) - self._assert_vol_exists(vol2['name'], False) - - # Try to clone where source size = target size - vol1['size'] = vol2['size'] - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol2, vol1) - if self.USESIM: - # validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol1['name']: - self.assertEqual('49', fcmap['copyrate']) - self._assert_vol_exists(vol2['name'], True) - - # Try to clone where source size < target size - vol3['size'] = vol1['size'] + 1 - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol3, vol1) - if self.USESIM: - # Validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol1['name']: - self.assertEqual('49', fcmap['copyrate']) - self._assert_vol_exists(vol3['name'], True) - - # Delete in the 'opposite' order to make sure it works - self.driver.delete_volume(vol3) - self._assert_vol_exists(vol3['name'], False) - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2['name'], False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1['name'], False) - - def test_storwize_svc_create_volume_from_snapshot(self): - vol1 = self._create_volume() - snap1 = self._generate_snap_info(vol1.id) - self.driver.create_snapshot(snap1) - vol2 = self._generate_vol_info() - vol3 = self._generate_vol_info() - - # Try to create a volume from a non-existing snapshot - vol_novol = self._generate_vol_info() - snap_novol = self._generate_snap_info(vol_novol.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume_from_snapshot, - vol_novol, - snap_novol) - - # Fail the snapshot - with mock.patch.object( - storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: - prestart.side_effect = exception.VolumeBackendAPIException - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - vol2, snap1) - self._assert_vol_exists(vol2['name'], False) - - # Try to create where volume size < snapshot size - snap1['volume_size'] += 1 - self.assertRaises(exception.InvalidInput, - self.driver.create_volume_from_snapshot, - vol2, snap1) - self._assert_vol_exists(vol2['name'], False) - snap1['volume_size'] -= 1 - - # Try to create where volume size > snapshot size - vol2['size'] += 1 - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_volume_from_snapshot(vol2, snap1) - self._assert_vol_exists(vol2['name'], True) - vol2['size'] -= 1 - - # Try to create where volume size = snapshot size - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_volume_from_snapshot(vol3, snap1) - self._assert_vol_exists(vol3['name'], True) - - # Delete in the 'opposite' order to make sure it works - self.driver.delete_volume(vol3) - self._assert_vol_exists(vol3['name'], False) - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2['name'], False) - self.driver.delete_snapshot(snap1) - self._assert_vol_exists(snap1['name'], False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1['name'], False) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') - def test_storwize_svc_create_volfromsnap_clone_with_qos(self, - add_vdisk_qos): - vol1 = self._create_volume() - snap1 = self._generate_snap_info(vol1.id) - self.driver.create_snapshot(snap1) - vol2 = self._generate_vol_info() - vol3 = self._generate_vol_info() - fake_opts = self._get_default_opts() - - # Succeed - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - - # If the qos is empty, chvdisk should not be called - # for create_volume_from_snapshot. - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - get_vdisk_params.return_value = fake_opts - self.driver.create_volume_from_snapshot(vol2, snap1) - self._assert_vol_exists(vol2['name'], True) - self.assertFalse(add_vdisk_qos.called) - self.driver.delete_volume(vol2) - - # If the qos is not empty, chvdisk should be called - # for create_volume_from_snapshot. - fake_opts['qos'] = {'IOThrottling': 5000} - get_vdisk_params.return_value = fake_opts - self.driver.create_volume_from_snapshot(vol2, snap1) - self._assert_vol_exists(vol2['name'], True) - add_vdisk_qos.assert_called_once_with(vol2['name'], - fake_opts['qos']) - - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - - # If the qos is empty, chvdisk should not be called - # for create_volume_from_snapshot. - add_vdisk_qos.reset_mock() - fake_opts['qos'] = None - get_vdisk_params.return_value = fake_opts - self.driver.create_cloned_volume(vol3, vol2) - self._assert_vol_exists(vol3['name'], True) - self.assertFalse(add_vdisk_qos.called) - self.driver.delete_volume(vol3) - - # If the qos is not empty, chvdisk should be called - # for create_volume_from_snapshot. - fake_opts['qos'] = {'IOThrottling': 5000} - get_vdisk_params.return_value = fake_opts - self.driver.create_cloned_volume(vol3, vol2) - self._assert_vol_exists(vol3['name'], True) - add_vdisk_qos.assert_called_once_with(vol3['name'], - fake_opts['qos']) - - # Delete in the 'opposite' order to make sure it works - self.driver.delete_volume(vol3) - self._assert_vol_exists(vol3['name'], False) - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2['name'], False) - self.driver.delete_snapshot(snap1) - self._assert_vol_exists(snap1['name'], False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1['name'], False) - - def test_storwize_svc_delete_vol_with_fcmap(self): - vol1 = self._create_volume() - # create two snapshots - snap1 = self._generate_snap_info(vol1.id) - snap2 = self._generate_snap_info(vol1.id) - self.driver.create_snapshot(snap1) - self.driver.create_snapshot(snap2) - vol2 = self._generate_vol_info() - vol3 = self._generate_vol_info() - - # Create vol from the second snapshot - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_volume_from_snapshot(vol2, snap2) - if self.USESIM: - # validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol2['name']: - self.assertEqual('copying', fcmap['status']) - self._assert_vol_exists(vol2['name'], True) - - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol3, vol2) - - if self.USESIM: - # validate copyrate was set on the flash copy - for i, fcmap in self.sim._fcmappings_list.items(): - if fcmap['target'] == vol3['name']: - self.assertEqual('copying', fcmap['status']) - self._assert_vol_exists(vol3['name'], True) - - # Delete in the 'opposite' order to make sure it works - self.driver.delete_volume(vol3) - self._assert_vol_exists(vol3['name'], False) - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2['name'], False) - self.driver.delete_snapshot(snap2) - self._assert_vol_exists(snap2['name'], False) - self.driver.delete_snapshot(snap1) - self._assert_vol_exists(snap1['name'], False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1['name'], False) - - def test_storwize_svc_volumes(self): - # Create a first volume - volume = self._generate_vol_info() - self.driver.create_volume(volume) - - self.driver.ensure_export(None, volume) - - # Do nothing - self.driver.create_export(None, volume, {}) - self.driver.remove_export(None, volume) - - # Make sure volume attributes are as they should be - attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) - attr_size = float(attributes['capacity']) / units.Gi # bytes to GB - self.assertEqual(attr_size, float(volume['size'])) - pool = _get_test_pool() - self.assertEqual(attributes['mdisk_grp_name'], pool) - - # Try to create the volume again (should fail) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - volume) - - # Try to delete a volume that doesn't exist (should not fail) - vol_no_exist = self._generate_vol_info() - self.driver.delete_volume(vol_no_exist) - # Ensure export for volume that doesn't exist (should not fail) - self.driver.ensure_export(None, vol_no_exist) - - # Delete the volume - self.driver.delete_volume(volume) - - def test_storwize_svc_volume_name(self): - volume = self._generate_vol_info() - self.driver.create_volume(volume) - self.driver.ensure_export(None, volume) - - # Ensure lsvdisk can find the volume by name - attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) - self.assertIn('name', attributes) - self.assertEqual(volume['name'], attributes['name']) - self.driver.delete_volume(volume) - - def test_storwize_svc_volume_params(self): - # Option test matrix - # Option Value Covered by test # - # rsize -1 1 - # rsize 2 2,3 - # warning 0 2 - # warning 80 3 - # autoexpand True 2 - # autoexpand False 3 - # grainsize 32 2 - # grainsize 256 3 - # compression True 4 - # compression False 2,3 - # easytier True 1,3 - # easytier False 2 - # iogrp 0 1 - # iogrp 1 2 - # nofmtdisk False 1 - # nofmtdisk True 1 - - opts_list = [] - chck_list = [] - opts_list.append({'rsize': -1, 'easytier': True, 'iogrp': '0'}) - chck_list.append({'free_capacity': '0', 'easy_tier': 'on', - 'IO_group_id': '0'}) - - opts_list.append({'rsize': -1, 'nofmtdisk': False}) - chck_list.append({'formatted': 'yes'}) - - opts_list.append({'rsize': -1, 'nofmtdisk': True}) - chck_list.append({'formatted': 'no'}) - - test_iogrp = '1' if self.USESIM else '0' - opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, - 'autoexpand': True, 'grainsize': 32, - 'easytier': False, 'iogrp': test_iogrp}) - chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', - 'warning': '0', 'autoexpand': 'on', - 'grainsize': '32', 'easy_tier': 'off', - 'IO_group_id': (test_iogrp)}) - opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, - 'autoexpand': False, 'grainsize': 256, - 'easytier': True}) - chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', - 'warning': '80', 'autoexpand': 'off', - 'grainsize': '256', 'easy_tier': 'on'}) - opts_list.append({'rsize': 2, 'compression': True}) - chck_list.append({'-free_capacity': '0', - 'compressed_copy': 'yes'}) - - for idx in range(len(opts_list)): - attrs = self._create_test_vol(opts_list[idx]) - for k, v in chck_list[idx].items(): - try: - if k[0] == '-': - k = k[1:] - self.assertNotEqual(v, attrs[k]) - else: - self.assertEqual(v, attrs[k]) - except processutils.ProcessExecutionError as e: - if 'CMMVC7050E' not in e.stderr: - raise - - def test_storwize_svc_unicode_host_and_volume_names(self): - # We'll check with iSCSI only - nothing protocol-dependent here - self.driver.do_setup(None) - - rand_id = random.randint(10000, 99999) - volume1 = self._generate_vol_info() - self.driver.create_volume(volume1) - self._assert_vol_exists(volume1['name'], True) - - self.assertRaises(exception.VolumeDriverException, - self.driver._helpers.create_host, - {'host': 12345}) - - # Add a host first to make life interesting (this host and - # conn['host'] should be translated to the same prefix, and the - # initiator should differentiate - tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, - 'ip': '10.10.10.10', - 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} - self.driver._helpers.create_host(tmpconn1, iscsi=True) - - # Add a host with a different prefix - tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, - 'ip': '10.10.10.11', - 'host': u'unicode.hello.world-%s' % rand_id} - self.driver._helpers.create_host(tmpconn2, iscsi=True) - - conn = {'initiator': u'unicode:initiator3.%s' % rand_id, - 'ip': '10.10.10.12', - 'host': u'unicode.foo}.bar}.baz-%s' % rand_id} - self.driver.initialize_connection(volume1, conn) - host_name = self.driver._helpers.get_host_from_connector( - conn, iscsi=True) - self.assertIsNotNone(host_name) - self.driver.terminate_connection(volume1, conn) - host_name = self.driver._helpers.get_host_from_connector(conn) - self.assertIsNone(host_name) - self.driver.delete_volume(volume1) - - # Clean up temporary hosts - for tmpconn in [tmpconn1, tmpconn2]: - host_name = self.driver._helpers.get_host_from_connector( - tmpconn, iscsi=True) - self.assertIsNotNone(host_name) - self.driver._helpers.delete_host(host_name) - - def test_storwize_svc_delete_volume_snapshots(self): - # Create a volume with two snapshots - master = self._create_volume() - - # Fail creating a snapshot - will force delete the snapshot - if self.USESIM and False: - snap = self._generate_snap_info(master.id) - self.sim.error_injection('startfcmap', 'bad_id') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, snap) - self._assert_vol_exists(snap['name'], False) - - # Delete a snapshot - snap = self._generate_snap_info(master.id) - self.driver.create_snapshot(snap) - self._assert_vol_exists(snap['name'], True) - self.driver.delete_snapshot(snap) - self._assert_vol_exists(snap['name'], False) - - # Delete a volume with snapshots (regular) - snap = self._generate_snap_info(master.id) - self.driver.create_snapshot(snap) - self._assert_vol_exists(snap['name'], True) - self.driver.delete_volume(master) - self._assert_vol_exists(master['name'], False) - - # Fail create volume from snapshot - will force delete the volume - if self.USESIM: - volfs = self._generate_vol_info() - self.sim.error_injection('startfcmap', 'bad_id') - self.sim.error_injection('lsfcmap', 'speed_up') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - volfs, snap) - self._assert_vol_exists(volfs['name'], False) - - # Create volume from snapshot and delete it - volfs = self._generate_vol_info() - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_volume_from_snapshot(volfs, snap) - self._assert_vol_exists(volfs['name'], True) - self.driver.delete_volume(volfs) - self._assert_vol_exists(volfs['name'], False) - - # Create volume from snapshot and delete the snapshot - volfs = self._generate_vol_info() - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_volume_from_snapshot(volfs, snap) - self.driver.delete_snapshot(snap) - self._assert_vol_exists(snap['name'], False) - - # Fail create clone - will force delete the target volume - if self.USESIM: - clone = self._generate_vol_info() - self.sim.error_injection('startfcmap', 'bad_id') - self.sim.error_injection('lsfcmap', 'speed_up') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - clone, volfs) - self._assert_vol_exists(clone['name'], False) - - # Create the clone, delete the source and target - clone = self._generate_vol_info() - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(clone, volfs) - self._assert_vol_exists(clone['name'], True) - self.driver.delete_volume(volfs) - self._assert_vol_exists(volfs['name'], False) - self.driver.delete_volume(clone) - self._assert_vol_exists(clone['name'], False) - - @ddt.data((True, None), (True, 5), (False, -1), (False, 100)) - @ddt.unpack - def test_storwize_svc_get_volume_stats( - self, is_thin_provisioning_enabled, rsize): - self._set_flag('reserved_percentage', 25) - self._set_flag('storwize_svc_multihostmap_enabled', True) - self._set_flag('storwize_svc_vol_rsize', rsize) - stats = self.driver.get_volume_stats() - for each_pool in stats['pools']: - self.assertIn(each_pool['pool_name'], - self._def_flags['storwize_svc_volpool_name']) - self.assertTrue(each_pool['multiattach']) - self.assertLessEqual(each_pool['free_capacity_gb'], - each_pool['total_capacity_gb']) - self.assertLessEqual(each_pool['allocated_capacity_gb'], - each_pool['total_capacity_gb']) - self.assertEqual(25, each_pool['reserved_percentage']) - self.assertEqual(is_thin_provisioning_enabled, - each_pool['thin_provisioning_support']) - self.assertEqual(not is_thin_provisioning_enabled, - each_pool['thick_provisioning_support']) - self.assertTrue(each_pool['consistent_group_snapshot_enabled']) - if self.USESIM: - expected = 'storwize-svc-sim' - self.assertEqual(expected, stats['volume_backend_name']) - for each_pool in stats['pools']: - self.assertIn(each_pool['pool_name'], - self._def_flags['storwize_svc_volpool_name']) - self.assertAlmostEqual(3328.0, each_pool['total_capacity_gb']) - self.assertAlmostEqual(3287.5, each_pool['free_capacity_gb']) - self.assertAlmostEqual(25.0, - each_pool['allocated_capacity_gb']) - if is_thin_provisioning_enabled: - self.assertAlmostEqual( - 1576.96, each_pool['provisioned_capacity_gb']) - - def test_get_pool(self): - ctxt = testutils.get_test_admin_context() - type_ref = volume_types.create(ctxt, 'testtype', None) - volume = self._generate_vol_info() - volume.volume_type_id = type_ref['id'] - volume.volume_type = objects.VolumeType.get_by_id(ctxt, - type_ref['id']) - self.driver.create_volume(volume) - vol = self.driver._helpers.get_vdisk_attributes(volume.name) - self.assertEqual(vol['mdisk_grp_name'], - self.driver.get_pool(volume)) - - self.driver.delete_volume(volume) - volume_types.destroy(ctxt, type_ref['id']) - - def test_storwize_svc_extend_volume(self): - volume = self._create_volume() - self.driver.extend_volume(volume, '13') - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - - self.assertAlmostEqual(vol_size, 13) - - snap = self._generate_snap_info(volume.id) - self.driver.create_snapshot(snap) - self._assert_vol_exists(snap['name'], True) - self.assertRaises(exception.VolumeDriverException, - self.driver.extend_volume, volume, '16') - - self.driver.delete_snapshot(snap) - self.driver.delete_volume(volume) - - @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, - 'create_relationship') - @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, - 'extend_target_volume') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def _storwize_svc_extend_volume_replication(self, - get_relationship, - delete_relationship, - extend_target_volume, - create_relationship): - fake_target = mock.Mock() - rep_type = 'global' - self.driver.replications[rep_type] = ( - self.driver.replication_factory(rep_type, fake_target)) - volume = self._create_volume() - volume['replication_status'] = fields.ReplicationStatus.ENABLED - fake_target_vol = 'vol-target-id' - get_relationship.return_value = {'aux_vdisk_name': fake_target_vol} - with mock.patch.object( - self.driver, - '_get_volume_replicated_type_mirror') as mirror_type: - mirror_type.return_value = 'global' - self.driver.extend_volume(volume, '13') - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 13) - delete_relationship.assert_called_once_with(volume['name']) - extend_target_volume.assert_called_once_with(fake_target_vol, - 12) - create_relationship.assert_called_once_with(volume, - fake_target_vol) - - self.driver.delete_volume(volume) - - def _storwize_svc_extend_volume_replication_failover(self): - volume = self._create_volume() - volume['replication_status'] = fields.ReplicationStatus.FAILED_OVER - with mock.patch.object( - self.driver, - '_get_volume_replicated_type_mirror') as mirror_type: - mirror_type.return_value = 'global' - self.driver.extend_volume(volume, '13') - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 13) - - self.driver.delete_volume(volume) - - def _check_loc_info(self, capabilities, expected): - host = {'host': 'foo', 'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1} - ctxt = context.get_admin_context() - moved, model_update = self.driver.migrate_volume(ctxt, vol, host) - self.assertEqual(expected['moved'], moved) - self.assertEqual(expected['model_update'], model_update) - - def test_storwize_svc_migrate_bad_loc_info(self): - self._check_loc_info({}, {'moved': False, 'model_update': None}) - cap = {'location_info': 'foo'} - self._check_loc_info(cap, {'moved': False, 'model_update': None}) - cap = {'location_info': 'FooDriver:foo:bar'} - self._check_loc_info(cap, {'moved': False, 'model_update': None}) - cap = {'location_info': 'StorwizeSVCDriver:foo:bar'} - self._check_loc_info(cap, {'moved': False, 'model_update': None}) - - def test_storwize_svc_volume_migrate(self): - # Make sure we don't call migrate_volume_vdiskcopy - self.driver.do_setup(None) - loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + - ':openstack2') - cap = {'location_info': loc, 'extent_size': '256'} - host = {'host': 'openstack@svc#openstack2', 'capabilities': cap} - ctxt = context.get_admin_context() - volume = self._create_volume() - volume['volume_type_id'] = None - self.driver.migrate_volume(ctxt, volume, host) - self._delete_volume(volume) - - def test_storwize_svc_get_vdisk_params(self): - self.driver.do_setup(None) - fake_qos = {'qos:IOThrottling': 5000} - expected_qos = {'IOThrottling': 5000} - fake_opts = self._get_default_opts() - # The parameters retured should be the same to the default options, - # if the QoS is empty. - vol_type_empty_qos = self._create_volume_type_qos(True, None) - type_id = vol_type_empty_qos['id'] - params = self.driver._get_vdisk_params(type_id, - volume_type=vol_type_empty_qos, - volume_metadata=None) - self.assertEqual(fake_opts, params) - volume_types.destroy(self.ctxt, type_id) - - # If the QoS is set via the qos association with the volume type, - # qos value should be set in the retured parameters. - vol_type_qos = self._create_volume_type_qos(False, fake_qos) - type_id = vol_type_qos['id'] - # If type_id is not none and volume_type is none, it should work fine. - params = self.driver._get_vdisk_params(type_id, volume_type=None, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If type_id is not none and volume_type is not none, it should - # work fine. - params = self.driver._get_vdisk_params(type_id, - volume_type=vol_type_qos, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If type_id is none and volume_type is not none, it should work fine. - params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If both type_id and volume_type are none, no qos will be returned - # in the parameter. - params = self.driver._get_vdisk_params(None, volume_type=None, - volume_metadata=None) - self.assertIsNone(params['qos']) - qos_spec = volume_types.get_volume_type_qos_specs(type_id) - volume_types.destroy(self.ctxt, type_id) - qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) - - # If the QoS is set via the extra specs in the volume type, - # qos value should be set in the retured parameters. - vol_type_qos = self._create_volume_type_qos(True, fake_qos) - type_id = vol_type_qos['id'] - # If type_id is not none and volume_type is none, it should work fine. - params = self.driver._get_vdisk_params(type_id, volume_type=None, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If type_id is not none and volume_type is not none, - # it should work fine. - params = self.driver._get_vdisk_params(type_id, - volume_type=vol_type_qos, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If type_id is none and volume_type is not none, - # it should work fine. - params = self.driver._get_vdisk_params(None, - volume_type=vol_type_qos, - volume_metadata=None) - self.assertEqual(expected_qos, params['qos']) - # If both type_id and volume_type are none, no qos will be returned - # in the parameter. - params = self.driver._get_vdisk_params(None, volume_type=None, - volume_metadata=None) - self.assertIsNone(params['qos']) - volume_types.destroy(self.ctxt, type_id) - - # If the QoS is set in the volume metadata, - # qos value should be set in the retured parameters. - metadata = [{'key': 'qos:IOThrottling', 'value': 4000}] - expected_qos_metadata = {'IOThrottling': 4000} - params = self.driver._get_vdisk_params(None, volume_type=None, - volume_metadata=metadata) - self.assertEqual(expected_qos_metadata, params['qos']) - - # If the QoS is set both in the metadata and the volume type, the one - # in the volume type will take effect. - vol_type_qos = self._create_volume_type_qos(True, fake_qos) - type_id = vol_type_qos['id'] - params = self.driver._get_vdisk_params(type_id, volume_type=None, - volume_metadata=metadata) - self.assertEqual(expected_qos, params['qos']) - volume_types.destroy(self.ctxt, type_id) - - # If the QoS is set both via the qos association and the - # extra specs, the one from the qos association will take effect. - fake_qos_associate = {'qos:IOThrottling': 6000} - expected_qos_associate = {'IOThrottling': 6000} - vol_type_qos = self._create_volume_type_qos_both(fake_qos, - fake_qos_associate) - type_id = vol_type_qos['id'] - params = self.driver._get_vdisk_params(type_id, volume_type=None, - volume_metadata=None) - self.assertEqual(expected_qos_associate, params['qos']) - qos_spec = volume_types.get_volume_type_qos_specs(type_id) - volume_types.destroy(self.ctxt, type_id) - qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'disable_vdisk_qos') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'update_vdisk_qos') - def test_storwize_svc_retype_no_copy(self, update_vdisk_qos, - disable_vdisk_qos): - self.driver.do_setup(None) - loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + - ':openstack') - cap = {'location_info': loc, 'extent_size': '128'} - self.driver._stats = {'location_info': loc} - host = {'host': 'openstack@svc#openstack', 'capabilities': cap} - ctxt = context.get_admin_context() - - key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} - key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - old_type = objects.VolumeType.get_by_id(ctxt, - old_type_ref['id']) - volume = self._generate_vol_info(old_type) - volume['host'] = host['host'] - new_type = objects.VolumeType.get_by_id(ctxt, - new_type_ref['id']) - - self.driver.create_volume(volume) - self.driver.retype(ctxt, volume, new_type, diff, host) - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed') - self.assertEqual('5', attrs['warning'], 'Volume retype failed') - self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed') - self.driver.delete_volume(volume) - - fake_opts = self._get_default_opts() - fake_opts_old = self._get_default_opts() - fake_opts_old['qos'] = {'IOThrottling': 4000} - fake_opts_qos = self._get_default_opts() - fake_opts_qos['qos'] = {'IOThrottling': 5000} - self.driver.create_volume(volume) - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for both the source and target volumes, - # add_vdisk_qos and disable_vdisk_qos will not be called for - # retype. - get_vdisk_params.side_effect = [fake_opts, fake_opts] - self.driver.retype(ctxt, volume, new_type, diff, host) - self.assertFalse(update_vdisk_qos.called) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is specified for both source and target volumes, - # add_vdisk_qos will be called for retype, and disable_vdisk_qos - # will not be called. - get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] - self.driver.retype(ctxt, volume, new_type, diff, host) - update_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for source and speficied for target volume, - # add_vdisk_qos will be called for retype, and disable_vdisk_qos - # will not be called. - get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] - self.driver.retype(ctxt, volume, new_type, diff, host) - update_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for target volume and specified for source - # volume, add_vdisk_qos will not be called for retype, and - # disable_vdisk_qos will be called. - get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] - self.driver.retype(ctxt, volume, new_type, diff, host) - self.assertFalse(update_vdisk_qos.called) - disable_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.driver.delete_volume(volume) - - def test_storwize_svc_retype_only_change_iogrp(self): - self.driver.do_setup(None) - loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + - ':openstack') - cap = {'location_info': loc, 'extent_size': '128'} - self.driver._stats = {'location_info': loc} - host = {'host': 'openstack@svc#openstack', 'capabilities': cap} - ctxt = context.get_admin_context() - - key_specs_old = {'iogrp': 0} - key_specs_new = {'iogrp': 1} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - old_type = objects.VolumeType.get_by_id(ctxt, - old_type_ref['id']) - volume = self._generate_vol_info(old_type) - volume['host'] = host['host'] - new_type = objects.VolumeType.get_by_id(ctxt, - new_type_ref['id']) - - self.driver.create_volume(volume) - self.driver.retype(ctxt, volume, new_type, diff, host) - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' - 'failed') - self.driver.delete_volume(volume) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'disable_vdisk_qos') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'update_vdisk_qos') - def test_storwize_svc_retype_need_copy(self, update_vdisk_qos, - disable_vdisk_qos): - self.driver.do_setup(None) - loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + - ':openstack') - cap = {'location_info': loc, 'extent_size': '128'} - self.driver._stats = {'location_info': loc} - host = {'host': 'openstack@svc#openstack', 'capabilities': cap} - ctxt = context.get_admin_context() - - key_specs_old = {'compression': True, 'iogrp': 0} - key_specs_new = {'compression': False, 'iogrp': 1} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], - new_type_ref['id']) - - old_type = objects.VolumeType.get_by_id(ctxt, - old_type_ref['id']) - volume = self._generate_vol_info(old_type) - volume['host'] = host['host'] - new_type = objects.VolumeType.get_by_id(ctxt, - new_type_ref['id']) - - self.driver.create_volume(volume) - self.driver.retype(ctxt, volume, new_type, diff, host) - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - self.assertEqual('no', attrs['compressed_copy']) - self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' - 'failed') - self.driver.delete_volume(volume) - - fake_opts = self._get_default_opts() - fake_opts_old = self._get_default_opts() - fake_opts_old['qos'] = {'IOThrottling': 4000} - fake_opts_qos = self._get_default_opts() - fake_opts_qos['qos'] = {'IOThrottling': 5000} - self.driver.create_volume(volume) - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for both the source and target volumes, - # add_vdisk_qos and disable_vdisk_qos will not be called for - # retype. - get_vdisk_params.side_effect = [fake_opts, fake_opts] - self.driver.retype(ctxt, volume, new_type, diff, host) - self.assertFalse(update_vdisk_qos.called) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is specified for both source and target volumes, - # add_vdisk_qos will be called for retype, and disable_vdisk_qos - # will not be called. - get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] - self.driver.retype(ctxt, volume, new_type, diff, host) - update_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for source and speficied for target volume, - # add_vdisk_qos will be called for retype, and disable_vdisk_qos - # will not be called. - get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] - self.driver.retype(ctxt, volume, new_type, diff, host) - update_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.assertFalse(disable_vdisk_qos.called) - self.driver.delete_volume(volume) - - self.driver.create_volume(volume) - update_vdisk_qos.reset_mock() - with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, - '_get_vdisk_params') as get_vdisk_params: - # If qos is empty for target volume and specified for source - # volume, add_vdisk_qos will not be called for retype, and - # disable_vdisk_qos will be called. - get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] - self.driver.retype(ctxt, volume, new_type, diff, host) - self.assertFalse(update_vdisk_qos.called) - disable_vdisk_qos.assert_called_with(volume['name'], - fake_opts_qos['qos']) - self.driver.delete_volume(volume) - - def test_set_storage_code_level_success(self): - res = self.driver._helpers.get_system_info() - if self.USESIM: - self.assertEqual((7, 2, 0, 0), res['code_level'], - 'Get code level error') - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') - def test_storwize_update_migrated_volume(self, rename_vdisk): - ctxt = testutils.get_test_admin_context() - backend_volume = self._create_volume() - volume = self._create_volume() - model_update = self.driver.update_migrated_volume(ctxt, volume, - backend_volume, - 'available') - rename_vdisk.assert_called_once_with(backend_volume.name, volume.name) - self.assertEqual({'_name_id': None}, model_update) - - rename_vdisk.reset_mock() - rename_vdisk.side_effect = exception.VolumeBackendAPIException - model_update = self.driver.update_migrated_volume(ctxt, volume, - backend_volume, - 'available') - self.assertEqual({'_name_id': backend_volume.id}, model_update) - - rename_vdisk.reset_mock() - rename_vdisk.side_effect = exception.VolumeBackendAPIException - model_update = self.driver.update_migrated_volume(ctxt, volume, - backend_volume, - 'attached') - self.assertEqual({'_name_id': backend_volume.id}, model_update) - - def test_storwize_vdisk_copy_ops(self): - ctxt = testutils.get_test_admin_context() - volume = self._create_volume() - driver = self.driver - dest_pool = volume_utils.extract_host(volume['host'], 'pool') - new_ops = driver._helpers.add_vdisk_copy(volume['name'], dest_pool, - None, self.driver._state, - self.driver.configuration) - self.driver._add_vdisk_copy_op(ctxt, volume, new_ops) - admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) - self.assertEqual(":".join(x for x in new_ops), - admin_metadata['vdiskcopyops'], - 'Storwize driver add vdisk copy error.') - self.driver._check_volume_copy_ops() - self.driver._rm_vdisk_copy_op(ctxt, volume, new_ops[0], new_ops[1]) - admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) - self.assertIsNone(admin_metadata.get('vdiskcopyops', None), - 'Storwize driver delete vdisk copy error') - self._delete_volume(volume) - - def test_storwize_delete_with_vdisk_copy_ops(self): - volume = self._create_volume() - self.driver._vdiskcopyops = {volume['id']: [('0', '1')]} - with mock.patch.object(self.driver, '_vdiskcopyops_loop'): - self.assertIn(volume['id'], self.driver._vdiskcopyops) - self.driver.delete_volume(volume) - self.assertNotIn(volume['id'], self.driver._vdiskcopyops) - - # Test groups operation #### - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_group_create_with_replication( - self, is_grp_a_cg_snapshot_type): - """Test group create.""" - is_grp_a_cg_snapshot_type.side_effect = True - spec = {'replication_enabled': ' True', - 'replication_type': ' metro'} - rep_type_ref = volume_types.create(self.ctxt, 'rep_type', spec) - rep_group = testutils.create_group( - self.ctxt, group_type_id=fake.GROUP_TYPE_ID, - volume_type_ids=[rep_type_ref['id']]) - - model_update = self.driver.create_group(self.ctxt, rep_group) - self.assertEqual(fields.GroupStatus.ERROR, - model_update['status']) - - self.assertFalse(is_grp_a_cg_snapshot_type.called) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_group_create(self, is_grp_a_cg_snapshot_type): - """Test group create.""" - is_grp_a_cg_snapshot_type.side_effect = [False, True] - group = mock.MagicMock() - - self.assertRaises(NotImplementedError, - self.driver.create_group, self.ctxt, group) - - model_update = self.driver.create_group(self.ctxt, group) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_delete_group(self, is_grp_a_cg_snapshot_type): - is_grp_a_cg_snapshot_type.side_effect = [False, True] - type_ref = volume_types.create(self.ctxt, 'testtype', None) - group = testutils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_id=type_ref['id']) - - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - volumes = self.db.volume_get_all_by_generic_group( - self.ctxt.elevated(), group.id) - self.assertRaises(NotImplementedError, - self.driver.delete_group, - self.ctxt, group, volumes) - - model_update = self.driver.delete_group(self.ctxt, group, volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual('deleted', volume['status']) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_group_update(self, is_grp_a_cg_snapshot_type): - """Test group update.""" - is_grp_a_cg_snapshot_type.side_effect = [False, True] - group = mock.MagicMock() - self.assertRaises(NotImplementedError, self.driver.update_group, - self.ctxt, group, None, None) - - (model_update, add_volumes_update, - remove_volumes_update) = self.driver.update_group(self.ctxt, group) - self.assertIsNone(model_update) - self.assertIsNone(add_volumes_update) - self.assertIsNone(remove_volumes_update) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_create_group_snapshot(self, is_grp_a_cg_snapshot_type): - is_grp_a_cg_snapshot_type.side_effect = [False, True] - type_ref = volume_types.create(self.ctxt, 'testtype', None) - group = testutils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_id=type_ref['id']) - - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - group_snapshot, snapshots = self._create_group_snapshot_in_db( - group.id) - self.assertRaises(NotImplementedError, - self.driver.create_group_snapshot, - self.ctxt, group_snapshot, snapshots) - - (model_update, - snapshots_model_update) = self.driver.create_group_snapshot( - self.ctxt, group_snapshot, snapshots) - self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, - model_update['status'], - "CGSnapshot created failed") - - for snapshot in snapshots_model_update: - self.assertEqual(fields.SnapshotStatus.AVAILABLE, - snapshot['status']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_storwize_delete_group_snapshot(self, is_grp_a_cg_snapshot_type): - is_grp_a_cg_snapshot_type.side_effect = [True, False, True] - type_ref = volume_types.create(self.ctxt, 'testtype', None) - group = testutils.create_group(self.ctxt, - group_type_id=fake.GROUP_TYPE_ID, - volume_type_id=type_ref['id']) - - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) - - group_snapshot, snapshots = self._create_group_snapshot(group.id) - self.assertRaises(NotImplementedError, - self.driver.delete_group_snapshot, - self.ctxt, group_snapshot, snapshots) - - model_update = self.driver.delete_group_snapshot(self.ctxt, - group_snapshot, - snapshots) - self.assertEqual(fields.GroupSnapshotStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual(fields.SnapshotStatus.DELETED, volume['status']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - def test_storwize_create_group_from_src_invalid(self): - # Invalid input case for create group from src - type_ref = volume_types.create(self.ctxt, 'testtype', None) - spec = {'consistent_group_snapshot_enabled': ' True'} - cg_type_ref = group_types.create(self.ctxt, 'cg_type', spec) - vg_type_ref = group_types.create(self.ctxt, 'vg_type', None) - - # create group in db - group = self._create_group_in_db(volume_type_id=type_ref.id, - group_type_id=vg_type_ref.id) - self.assertRaises(NotImplementedError, - self.driver.create_group_from_src, - self.ctxt, group, None, None, None, - None, None) - - group = self._create_group_in_db(volume_type_id=type_ref.id, - group_type_id=cg_type_ref.id) - - # create volumes in db - vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, - group_id=group.id) - vol2 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, - group_id=group.id) - volumes = [vol1, vol2] - - source_cg = self._create_group_in_db(volume_type_id=type_ref.id, - group_type_id=cg_type_ref.id) - - # Add volumes to source CG - src_vol1 = self._create_volume(volume_type_id=type_ref.id, - group_id=source_cg['id']) - src_vol2 = self._create_volume(volume_type_id=type_ref.id, - group_id=source_cg['id']) - source_vols = [src_vol1, src_vol2] - - group_snapshot, snapshots = self._create_group_snapshot( - source_cg['id'], group_type_id=cg_type_ref.id) - - # Create group from src with null input - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - self.ctxt, group, volumes, None, None, - None, None) - - # Create cg from src with source_cg and empty source_vols - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - self.ctxt, group, volumes, None, None, - source_cg, None) - - # Create cg from src with source_vols and empty source_cg - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - self.ctxt, group, volumes, None, None, - None, source_vols) - - # Create cg from src with cgsnapshot and empty snapshots - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - self.ctxt, group, volumes, group_snapshot, None, - None, None) - # Create cg from src with snapshots and empty cgsnapshot - self.assertRaises(exception.InvalidInput, - self.driver.create_group_from_src, - self.ctxt, group, volumes, None, snapshots, - None, None) - - model_update = self.driver.delete_group(self.ctxt, group, volumes) - - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual('deleted', volume['status']) - - model_update = self.driver.delete_group(self.ctxt, - source_cg, source_vols) - - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual('deleted', volume['status']) - - model_update = self.driver.delete_group(self.ctxt, - group_snapshot, snapshots) - - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual('deleted', volume['status']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - def test_storwize_group_from_src(self): - # Valid case for create cg from src - type_ref = volume_types.create(self.ctxt, 'testtype', None) - spec = {'consistent_group_snapshot_enabled': ' True'} - cg_type_ref = group_types.create(self.ctxt, 'cg_type', spec) - pool = _get_test_pool() - # Create cg in db - group = self._create_group_in_db(volume_type_id=type_ref.id, - group_type_id=cg_type_ref.id) - # Create volumes in db - testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, - group_id=group.id, - host='openstack@svc#%s' % pool) - testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, - consistencygroup_id=group.id, - host='openstack@svc#%s' % pool) - volumes = self.db.volume_get_all_by_generic_group( - self.ctxt.elevated(), group.id) - - # Create source CG - source_cg = self._create_group_in_db(volume_type_id=type_ref.id, - group_type_id=cg_type_ref.id) - # Add volumes to source CG - self._create_volume(volume_type_id=type_ref.id, - group_id=source_cg['id']) - self._create_volume(volume_type_id=type_ref.id, - group_id=source_cg['id']) - source_vols = self.db.volume_get_all_by_generic_group( - self.ctxt.elevated(), source_cg['id']) - - # Create cgsnapshot - group_snapshot, snapshots = self._create_group_snapshot( - source_cg['id'], group_type_id=cg_type_ref.id) - - # Create cg from source cg - model_update, volumes_model_update = ( - self.driver.create_group_from_src(self.ctxt, group, volumes, None, - None, source_cg, source_vols)) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "CG create from src created failed") - for each_vol in volumes_model_update: - self.assertEqual('available', each_vol['status']) - - model_update = self.driver.delete_group(self.ctxt, group, volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for each_vol in model_update[1]: - self.assertEqual('deleted', each_vol['status']) - - # Create cg from cg snapshot - model_update, volumes_model_update = ( - self.driver.create_group_from_src(self.ctxt, group, volumes, - group_snapshot, snapshots, - None, None)) - self.assertEqual(fields.GroupStatus.AVAILABLE, - model_update['status'], - "CG create from src created failed") - for each_vol in volumes_model_update: - self.assertEqual('available', each_vol['status']) - - model_update = self.driver.delete_group(self.ctxt, group, volumes) - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for each_vol in model_update[1]: - self.assertEqual('deleted', each_vol['status']) - - model_update = self.driver.delete_group_snapshot(self.ctxt, - group_snapshot, - snapshots) - self.assertEqual(fields.GroupStatus.DELETED, - model_update[0]['status']) - for volume in model_update[1]: - self.assertEqual('deleted', volume['status']) - - # mirror/strtch cluster volume test cases - def test_storwize_svc_create_mirror_volume(self): - # create mirror volume in invalid pool - spec = {'mirror_pool': 'invalid_pool'} - mirror_vol_type = self._create_volume_type(spec, 'invalid_mirror_type') - vol = self._generate_vol_info(mirror_vol_type) - self.assertRaises(exception.InvalidInput, - self.driver.create_volume, vol) - - spec = {'mirror_pool': 'openstack1'} - mirror_vol_type = self._create_volume_type(spec, 'test_mirror_type') - vol = self._generate_vol_info(mirror_vol_type) - self.driver.create_volume(vol) - self._assert_vol_exists(vol.name, True) - - copies = self.driver._helpers.get_vdisk_copies(vol.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - self.driver.delete_volume(vol) - self._assert_vol_exists(vol['name'], False) - - def test_storwize_svc_snapshots_mirror_volume(self): - vol1 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume(vol1) - - snap1 = self._generate_snap_info(vol1.id) - self._assert_vol_exists(snap1.name, False) - - self.driver.create_snapshot(snap1) - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self._assert_vol_exists(snap1.name, True) - copies = self.driver._helpers.get_vdisk_copies(snap1.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - - self.driver.delete_snapshot(snap1) - self.driver.delete_volume(vol1) - - def test_storwize_svc_create_cloned_mirror_volume(self): - vol1 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume(vol1) - vol2 = self._generate_vol_info(self.mirror_vol_type) - - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - self.driver.create_cloned_volume(vol2, vol1) - self._assert_vol_exists(vol2.name, True) - copies = self.driver._helpers.get_vdisk_copies(vol2.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2.name, False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1.name, False) - - def test_storwize_svc_create_mirror_volume_from_snapshot(self): - vol1 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume(vol1) - snap1 = self._generate_snap_info(vol1.id) - self.driver.create_snapshot(snap1) - - if self.USESIM: - self.sim.error_injection('lsfcmap', 'speed_up') - - vol2 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume_from_snapshot(vol2, snap1) - self._assert_vol_exists(vol2.name, True) - copies = self.driver._helpers.get_vdisk_copies(vol2.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - - self.driver.delete_volume(vol2) - self._assert_vol_exists(vol2['name'], False) - self.driver.delete_snapshot(snap1) - self._assert_vol_exists(snap1['name'], False) - self.driver.delete_volume(vol1) - self._assert_vol_exists(vol1['name'], False) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_copy') - def test_storwize_svc_mirror_volume_migrate(self, add_vdisk_copy): - # use migratevdisk for mirror volume migration, rather than - # addvdiskcopy - self.driver.do_setup(None) - loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + - ':openstack2') - host = {'host': 'openstack@svc#openstack2', - 'capabilities': {'location_info': loc}} - ctxt = context.get_admin_context() - vol1 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume(vol1) - copies = self.driver._helpers.get_vdisk_copies(vol1.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - - self.driver.migrate_volume(ctxt, vol1, host) - copies = self.driver._helpers.get_vdisk_copies(vol1.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack2') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - self.assertFalse(add_vdisk_copy.called) - self._delete_volume(vol1) - - @ddt.data(({'mirror_pool': 'openstack1'}, - {'mirror_pool': 'openstack1', 'compression': True}), - ({'compression': False}, - {'mirror_pool': 'openstack1', 'compression': True}), - ({}, {'mirror_pool': 'invalidpool'})) - @ddt.unpack - def test_storwize_svc_retype_mirror_volume_invalid(self, old_opts, - new_opts): - self.driver.do_setup(self.ctxt) - host = {'host': 'openstack@svc#openstack'} - ctxt = context.get_admin_context() - - vol_type1 = self._create_volume_type(old_opts, 'old') - vol_type2 = self._create_volume_type(new_opts, 'new') - diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, - vol_type2.id) - vol1 = self._generate_vol_info(vol_type1) - self.driver.create_volume(vol1) - - self.assertRaises(exception.VolumeDriverException, - self.driver.retype, self.ctxt, vol1, - vol_type2, diff, host) - self.driver.delete_volume(vol1) - - @ddt.data(({'mirror_pool': 'openstack1'}, {}), - ({'mirror_pool': 'openstack1'}, {'mirror_pool': ''})) - @ddt.unpack - def test_storwize_retype_from_mirror_to_none_mirror(self, - old_opts, new_opts): - self.driver.do_setup(self.ctxt) - host = {'host': 'openstack@svc#openstack'} - ctxt = context.get_admin_context() - - vol_type1 = self._create_volume_type(old_opts, 'old') - vol_type2 = self._create_volume_type(new_opts, 'new') - diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, - vol_type2.id) - vol1 = self._generate_vol_info(vol_type1) - self.driver.create_volume(vol1) - - self._assert_vol_exists(vol1.name, True) - copies = self.driver._helpers.lsvdiskcopy(vol1.name) - self.assertEqual(len(copies), 2) - - self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) - copies = self.driver._helpers.lsvdiskcopy(vol1.name) - self.assertEqual(len(copies), 1) - copies = self.driver._helpers.get_vdisk_copies(vol1.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - - self.driver.delete_volume(vol1) - - @ddt.data(({}, {'mirror_pool': 'openstack1'}), - ({'mirror_pool': ''}, {'mirror_pool': 'openstack1'})) - @ddt.unpack - def test_storwize_retype_from_none_to_mirror_volume(self, - old_opts, new_opts): - self.driver.do_setup(self.ctxt) - host = {'host': 'openstack@svc#openstack'} - ctxt = context.get_admin_context() - - old_opts = {} - new_opts = {'mirror_pool': 'openstack1'} - vol_type1 = self._create_volume_type(old_opts, 'old') - vol_type2 = self._create_volume_type(new_opts, 'new') - diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, - vol_type2.id) - vol1 = self._generate_vol_info(vol_type1) - self.driver.create_volume(vol1) - - self._assert_vol_exists(vol1.name, True) - copies = self.driver._helpers.lsvdiskcopy(vol1.name) - self.assertEqual(len(copies), 1) - - self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) - copies = self.driver._helpers.lsvdiskcopy(vol1.name) - self.assertEqual(len(copies), 2) - copies = self.driver._helpers.get_vdisk_copies(vol1.name) - self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') - self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') - - self.driver.delete_volume(vol1) - - @ddt.data(({}, {'mirror_pool': 'openstack1'}), - ({'mirror_pool': ''}, {'mirror_pool': 'openstack1'}), - ({'mirror_pool': 'openstack1'}, {}), - ({'mirror_pool': 'openstack1'}, {'mirror_pool': ''}), - ({'mirror_pool': 'openstack1'}, {'mirror_pool': 'invalidpool'})) - @ddt.unpack - def test_storwize_manage_existing_mismatch_with_mirror_volume( - self, opts1, opts2): - self.driver.do_setup(self.ctxt) - vol_type1 = self._create_volume_type(opts1, 'vol_type1') - vol_type2 = self._create_volume_type(opts2, 'vol_type2') - vol1 = self._generate_vol_info(vol_type1) - self.driver.create_volume(vol1) - vol2 = self._generate_vol_info(vol_type2) - - ref = {'source-name': vol1.name} - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, vol2, ref) - - self.driver.delete_volume(vol1) - - def test_storwize_manage_existing_with_mirror_volume(self): - self.driver.do_setup(self.ctxt) - vol1 = self._generate_vol_info(self.mirror_vol_type) - self.driver.create_volume(vol1) - uid_of_vol1 = self._get_vdisk_uid(vol1.name) - - opts1 = {'mirror_pool': 'openstack1'} - new_volume_type = self._create_volume_type(opts1, 'new_mirror_type') - new_volume = self._generate_vol_info(new_volume_type) - ref = {'source-name': vol1.name} - self.driver.manage_existing(new_volume, ref) - - # Check the uid of the volume which has been renamed. - uid_of_new_vol = self._get_vdisk_uid(new_volume.name) - self.assertEqual(uid_of_vol1, uid_of_new_vol) - - self.driver.delete_volume(new_volume) - - def _create_volume_type_qos(self, extra_specs, fake_qos): - # Generate a QoS volume type for volume. - if extra_specs: - spec = fake_qos - type_ref = volume_types.create(self.ctxt, "qos_extra_specs", spec) - else: - type_ref = volume_types.create(self.ctxt, "qos_associate", None) - if fake_qos: - qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos) - qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], - type_ref['id']) - - qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) - return qos_type - - def _create_volume_type_qos_both(self, fake_qos, fake_qos_associate): - type_ref = volume_types.create(self.ctxt, "qos_extra_specs", fake_qos) - qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos_associate) - qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], - type_ref['id']) - qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) - return qos_type - - def _create_replication_volume_type(self, enable): - # Generate a volume type for volume repliation. - if enable: - spec = {'capabilities:replication': ' True'} - type_ref = volume_types.create(self.ctxt, "replication_1", spec) - else: - spec = {'capabilities:replication': ' False'} - type_ref = volume_types.create(self.ctxt, "replication_2", spec) - - replication_type = objects.VolumeType.get_by_id(self.ctxt, - type_ref['id']) - return replication_type - - def _create_consistency_group_volume_type(self): - # Generate a volume type for volume consistencygroup. - spec = {'capabilities:consistencygroup_support': ' True'} - type_ref = volume_types.create(self.ctxt, "cg", spec) - - cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) - - return cg_type - - def _get_vdisk_uid(self, vdisk_name): - """Return vdisk_UID for given vdisk. - - Given a vdisk by name, performs an lvdisk command that extracts - the vdisk_UID parameter and returns it. - Returns None if the specified vdisk does not exist. - """ - vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, - delim='!') - - # Iterate through each row until we find the vdisk_UID entry - for row in vdisk_properties.split('\n'): - words = row.split('!') - if words[0] == 'vdisk_UID': - return words[1] - return None - - def _create_volume_and_return_uid(self, volume_name): - """Creates a volume and returns its UID. - - Creates a volume with the specified name, and returns the UID that - the Storwize controller allocated for it. We do this by executing a - create_volume and then calling into the simulator to perform an - lsvdisk directly. - """ - volume = self._generate_vol_info() - self.driver.create_volume(volume) - - return (volume, self._get_vdisk_uid(volume['name'])) - - def test_manage_existing_get_size_bad_ref(self): - """Error on manage with bad reference. - - This test case attempts to manage an existing volume but passes in - a bad reference that the Storwize driver doesn't understand. We - expect an exception to be raised. - """ - volume = self._generate_vol_info() - ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, volume, ref) - - def test_manage_existing_get_size_bad_uid(self): - """Error when the specified UUID does not exist.""" - volume = self._generate_vol_info() - ref = {'source-id': 'bad_uid'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, volume, ref) - pass - - def test_manage_existing_get_size_bad_name(self): - """Error when the specified name does not exist.""" - volume = self._generate_vol_info() - ref = {'source-name': 'bad_name'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, volume, ref) - - def test_manage_existing_bad_ref(self): - """Error on manage with bad reference. - - This test case attempts to manage an existing volume but passes in - a bad reference that the Storwize driver doesn't understand. We - expect an exception to be raised. - """ - - # Error when neither UUID nor name are specified. - volume = self._generate_vol_info() - ref = {} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, volume, ref) - - # Error when the specified UUID does not exist. - volume = self._generate_vol_info() - ref = {'source-id': 'bad_uid'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, volume, ref) - - # Error when the specified name does not exist. - volume = self._generate_vol_info() - ref = {'source-name': 'bad_name'} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, volume, ref) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_vdisk_copy_attrs') - def test_manage_existing_mismatch(self, - get_vdisk_copy_attrs): - ctxt = testutils.get_test_admin_context() - _volume, uid = self._create_volume_and_return_uid('manage_test') - - opts = {'rsize': -1} - type_thick_ref = volume_types.create(ctxt, 'testtype1', opts) - - opts = {'rsize': 2} - type_thin_ref = volume_types.create(ctxt, 'testtype2', opts) - - opts = {'rsize': 2, 'compression': True} - type_comp_ref = volume_types.create(ctxt, 'testtype3', opts) - - opts = {'rsize': -1, 'iogrp': 1} - type_iogrp_ref = volume_types.create(ctxt, 'testtype4', opts) - - new_volume = self._generate_vol_info() - ref = {'source-name': _volume['name']} - - fake_copy_thin = self._get_default_opts() - fake_copy_thin['autoexpand'] = 'on' - - fake_copy_comp = self._get_default_opts() - fake_copy_comp['autoexpand'] = 'on' - fake_copy_comp['compressed_copy'] = 'yes' - - fake_copy_thick = self._get_default_opts() - fake_copy_thick['autoexpand'] = '' - fake_copy_thick['compressed_copy'] = 'no' - - fake_copy_no_comp = self._get_default_opts() - fake_copy_no_comp['compressed_copy'] = 'no' - - valid_iogrp = self.driver._state['available_iogrps'] - self.driver._state['available_iogrps'] = [9999] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - self.driver._state['available_iogrps'] = valid_iogrp - - get_vdisk_copy_attrs.side_effect = [fake_copy_thin, - fake_copy_thick, - fake_copy_no_comp, - fake_copy_comp, - fake_copy_thick, - fake_copy_thick - ] - new_volume['volume_type_id'] = type_thick_ref['id'] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - new_volume['volume_type_id'] = type_thin_ref['id'] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - new_volume['volume_type_id'] = type_comp_ref['id'] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - new_volume['volume_type_id'] = type_thin_ref['id'] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - new_volume['volume_type_id'] = type_iogrp_ref['id'] - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - new_volume['volume_type_id'] = type_thick_ref['id'] - no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) - new_volume['host'] = 'openstack@svc#%s' % no_exist_pool - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - self._reset_flags() - volume_types.destroy(ctxt, type_thick_ref['id']) - volume_types.destroy(ctxt, type_comp_ref['id']) - volume_types.destroy(ctxt, type_iogrp_ref['id']) - - def test_manage_existing_good_uid_not_mapped(self): - """Tests managing a volume with no mappings. - - This test case attempts to manage an existing volume by UID, and - we expect it to succeed. We verify that the backend volume was - renamed to have the name of the Cinder volume that we asked for it to - be associated with. - """ - - # Create a volume as a way of getting a vdisk created, and find out the - # UID of that vdisk. - _volume, uid = self._create_volume_and_return_uid('manage_test') - - # Descriptor of the Cinder volume that we want to own the vdisk - # referenced by uid. - new_volume = self._generate_vol_info() - - # Submit the request to manage it. - ref = {'source-id': uid} - size = self.driver.manage_existing_get_size(new_volume, ref) - self.assertEqual(10, size) - self.driver.manage_existing(new_volume, ref) - - # Assert that there is a disk named after the new volume that has the - # ID that we passed in, indicating that the disk has been renamed. - uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) - self.assertEqual(uid, uid_of_new_volume) - - def test_manage_existing_good_name_not_mapped(self): - """Tests managing a volume with no mappings. - - This test case attempts to manage an existing volume by name, and - we expect it to succeed. We verify that the backend volume was - renamed to have the name of the Cinder volume that we asked for it to - be associated with. - """ - - # Create a volume as a way of getting a vdisk created, and find out the - # UID of that vdisk. - _volume, uid = self._create_volume_and_return_uid('manage_test') - - # Descriptor of the Cinder volume that we want to own the vdisk - # referenced by uid. - new_volume = self._generate_vol_info() - - # Submit the request to manage it. - ref = {'source-name': _volume['name']} - size = self.driver.manage_existing_get_size(new_volume, ref) - self.assertEqual(10, size) - self.driver.manage_existing(new_volume, ref) - - # Assert that there is a disk named after the new volume that has the - # ID that we passed in, indicating that the disk has been renamed. - uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) - self.assertEqual(uid, uid_of_new_volume) - - def test_manage_existing_mapped(self): - """Tests managing a mapped volume with no override. - - This test case attempts to manage an existing volume by UID, but - the volume is mapped to a host, so we expect to see an exception - raised. - """ - # Create a volume as a way of getting a vdisk created, and find out the - # UUID of that vdisk. - # Set replication target. - volume, uid = self._create_volume_and_return_uid('manage_test') - - # Map a host to the disk - conn = {'initiator': u'unicode:initiator3', - 'ip': '10.10.10.12', - 'host': u'unicode.foo}.bar}.baz'} - self.driver.initialize_connection(volume, conn) - - # Descriptor of the Cinder volume that we want to own the vdisk - # referenced by uid. - volume = self._generate_vol_info() - ref = {'source-id': uid} - - # Attempt to manage this disk, and except an exception beause the - # volume is already mapped. - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, volume, ref) - - ref = {'source-name': volume['name']} - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, volume, ref) - - def test_manage_existing_good_uid_mapped_with_override(self): - """Tests managing a mapped volume with override. - - This test case attempts to manage an existing volume by UID, when it - already mapped to a host, but the ref specifies that this is OK. - We verify that the backend volume was renamed to have the name of the - Cinder volume that we asked for it to be associated with. - """ - # Create a volume as a way of getting a vdisk created, and find out the - # UUID of that vdisk. - volume, uid = self._create_volume_and_return_uid('manage_test') - - # Map a host to the disk - conn = {'initiator': u'unicode:initiator3', - 'ip': '10.10.10.12', - 'host': u'unicode.foo}.bar}.baz'} - self.driver.initialize_connection(volume, conn) - - # Descriptor of the Cinder volume that we want to own the vdisk - # referenced by uid. - new_volume = self._generate_vol_info() - - # Submit the request to manage it, specifying that it is OK to - # manage a volume that is already attached. - ref = {'source-id': uid, 'manage_if_in_use': True} - size = self.driver.manage_existing_get_size(new_volume, ref) - self.assertEqual(10, size) - self.driver.manage_existing(new_volume, ref) - - # Assert that there is a disk named after the new volume that has the - # ID that we passed in, indicating that the disk has been renamed. - uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) - self.assertEqual(uid, uid_of_new_volume) - - def test_manage_existing_good_name_mapped_with_override(self): - """Tests managing a mapped volume with override. - - This test case attempts to manage an existing volume by name, when it - already mapped to a host, but the ref specifies that this is OK. - We verify that the backend volume was renamed to have the name of the - Cinder volume that we asked for it to be associated with. - """ - # Create a volume as a way of getting a vdisk created, and find out the - # UUID of that vdisk. - volume, uid = self._create_volume_and_return_uid('manage_test') - - # Map a host to the disk - conn = {'initiator': u'unicode:initiator3', - 'ip': '10.10.10.12', - 'host': u'unicode.foo}.bar}.baz'} - self.driver.initialize_connection(volume, conn) - - # Descriptor of the Cinder volume that we want to own the vdisk - # referenced by uid. - new_volume = self._generate_vol_info() - - # Submit the request to manage it, specifying that it is OK to - # manage a volume that is already attached. - ref = {'source-name': volume['name'], 'manage_if_in_use': True} - size = self.driver.manage_existing_get_size(new_volume, ref) - self.assertEqual(10, size) - self.driver.manage_existing(new_volume, ref) - - # Assert that there is a disk named after the new volume that has the - # ID that we passed in, indicating that the disk has been renamed. - uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) - self.assertEqual(uid, uid_of_new_volume) - - -class CLIResponseTestCase(test.TestCase): - def test_empty(self): - self.assertEqual(0, len( - storwize_svc_common.CLIResponse(''))) - self.assertEqual(0, len( - storwize_svc_common.CLIResponse(('', 'stderr')))) - - def test_header(self): - raw = r'''id!name -1!node1 -2!node2 -''' - resp = storwize_svc_common.CLIResponse(raw, with_header=True) - self.assertEqual(2, len(resp)) - self.assertEqual('1', resp[0]['id']) - self.assertEqual('2', resp[1]['id']) - - def test_select(self): - raw = r'''id!123 -name!Bill -name!Bill2 -age!30 -home address!s1 -home address!s2 - -id! 7 -name!John -name!John2 -age!40 -home address!s3 -home address!s4 -''' - resp = storwize_svc_common.CLIResponse(raw, with_header=False) - self.assertEqual([('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), - ('s3', 'John', 's3'), ('s4', 'John2', 's4')], - list(resp.select('home address', 'name', - 'home address'))) - - def test_lsnode_all(self): - raw = r'''id!name!UPS_serial_number!WWNN!status -1!node1!!500507680200C744!online -2!node2!!500507680200C745!online -''' - resp = storwize_svc_common.CLIResponse(raw) - self.assertEqual(2, len(resp)) - self.assertEqual('1', resp[0]['id']) - self.assertEqual('500507680200C744', resp[0]['WWNN']) - self.assertEqual('2', resp[1]['id']) - self.assertEqual('500507680200C745', resp[1]['WWNN']) - - def test_lsnode_single(self): - raw = r'''id!1 -port_id!500507680210C744 -port_status!active -port_speed!8Gb -port_id!500507680240C744 -port_status!inactive -port_speed!8Gb -''' - resp = storwize_svc_common.CLIResponse(raw, with_header=False) - self.assertEqual(1, len(resp)) - self.assertEqual('1', resp[0]['id']) - self.assertEqual([('500507680210C744', 'active'), - ('500507680240C744', 'inactive')], - list(resp.select('port_id', 'port_status'))) - - -class StorwizeHelpersTestCase(test.TestCase): - def setUp(self): - super(StorwizeHelpersTestCase, self).setUp() - self.storwize_svc_common = storwize_svc_common.StorwizeHelpers(None) - self.mock_wait_time = mock.patch.object( - storwize_svc_common.StorwizeHelpers, "WAIT_TIME", 0) - - @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lslicense') - @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') - def test_compression_enabled(self, lsguicapabilities, lslicense): - fake_license_without_keys = {} - fake_license = { - 'license_compression_enclosures': '1', - 'license_compression_capacity': '1' - } - fake_license_scheme = { - 'license_scheme': '9846' - } - fake_license_invalid_scheme = { - 'license_scheme': '0000' - } - - lslicense.side_effect = [fake_license_without_keys, - fake_license_without_keys, - fake_license, - fake_license_without_keys] - lsguicapabilities.side_effect = [fake_license_without_keys, - fake_license_invalid_scheme, - fake_license_scheme] - self.assertFalse(self.storwize_svc_common.compression_enabled()) - - self.assertFalse(self.storwize_svc_common.compression_enabled()) - - self.assertTrue(self.storwize_svc_common.compression_enabled()) - - self.assertTrue(self.storwize_svc_common.compression_enabled()) - - @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') - def test_replication_licensed(self, lsguicapabilities): - lsguicapabilities.side_effect = [ - {'product_key': '0000'}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE_V3500}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE_V3700}, - {'product_key': - storwize_const.DEV_MODEL_SVC}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE_V7000}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE_V5000}, - {'product_key': - storwize_const.DEV_MODEL_STORWIZE_V5000_1YR}, - {'product_key': - storwize_const.DEV_MODEL_FLASH_V9000}, - {'product_key': - storwize_const.DEV_MODEL_FLEX}] - for i in range(3): - self.assertFalse(self.storwize_svc_common.replication_licensed()) - - for i in range(7): - self.assertTrue(self.storwize_svc_common.replication_licensed()) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_vdisk_count_by_io_group') - def test_select_io_group(self, get_vdisk_count_by_io_group): - # given io groups - opts = {} - # system io groups - state = {} - - fake_iog_vdc1 = {0: 100, 1: 50, 2: 50, 3: 300} - fake_iog_vdc2 = {0: 2, 1: 1, 2: 200} - fake_iog_vdc3 = {0: 2, 2: 200} - fake_iog_vdc4 = {0: 100, 1: 100, 2: 100, 3: 100} - fake_iog_vdc5 = {0: 10, 1: 1, 2: 200, 3: 300} - - get_vdisk_count_by_io_group.side_effect = [fake_iog_vdc1, - fake_iog_vdc2, - fake_iog_vdc3, - fake_iog_vdc4, - fake_iog_vdc5] - opts['iogrp'] = '0,2' - state['available_iogrps'] = [0, 1, 2, 3] - - iog = self.storwize_svc_common.select_io_group(state, opts) - self.assertTrue(iog in state['available_iogrps']) - self.assertEqual(2, iog) - - opts['iogrp'] = '0' - state['available_iogrps'] = [0, 1, 2] - - iog = self.storwize_svc_common.select_io_group(state, opts) - self.assertTrue(iog in state['available_iogrps']) - self.assertEqual(0, iog) - - opts['iogrp'] = '1,2' - state['available_iogrps'] = [0, 2] - - iog = self.storwize_svc_common.select_io_group(state, opts) - self.assertTrue(iog in state['available_iogrps']) - self.assertEqual(2, iog) - - opts['iogrp'] = ' 0, 1, 2 ' - state['available_iogrps'] = [0, 1, 2, 3] - - iog = self.storwize_svc_common.select_io_group(state, opts) - self.assertTrue(iog in state['available_iogrps']) - # since vdisk count in all iogroups is same, it will pick the first - self.assertEqual(0, iog) - - opts['iogrp'] = '0,1,2, 3' - state['available_iogrps'] = [0, 1, 2, 3] - - iog = self.storwize_svc_common.select_io_group(state, opts) - self.assertTrue(iog in state['available_iogrps']) - self.assertEqual(1, iog) - - -@ddt.ddt -class StorwizeSSHTestCase(test.TestCase): - def setUp(self): - super(StorwizeSSHTestCase, self).setUp() - self.fake_driver = StorwizeSVCISCSIFakeDriver( - configuration=conf.Configuration(None)) - sim = StorwizeSVCManagementSimulator(['openstack']) - self.fake_driver.set_fake_storage(sim) - self.storwize_ssh = storwize_svc_common.StorwizeSSH( - self.fake_driver._run_ssh) - - def test_mkvdiskhostmap(self): - # mkvdiskhostmap should not be returning anything - self.fake_driver.fake_storage._volumes_list['9999'] = { - 'name': ' 9999', 'id': '0', 'uid': '0', - 'IO_group_id': '0', 'IO_group_name': 'fakepool'} - self.fake_driver.fake_storage._hosts_list['HOST1'] = { - 'name': 'HOST1', 'id': '0', 'host_name': 'HOST1'} - self.fake_driver.fake_storage._hosts_list['HOST2'] = { - 'name': 'HOST2', 'id': '1', 'host_name': 'HOST2'} - self.fake_driver.fake_storage._hosts_list['HOST3'] = { - 'name': 'HOST3', 'id': '2', 'host_name': 'HOST3'} - - ret = self.storwize_ssh.mkvdiskhostmap('HOST1', '9999', '511', False) - self.assertEqual('511', ret) - - ret = self.storwize_ssh.mkvdiskhostmap('HOST2', '9999', '512', True) - self.assertEqual('512', ret) - - ret = self.storwize_ssh.mkvdiskhostmap('HOST3', '9999', None, True) - self.assertIsNotNone(ret) - - with mock.patch.object( - storwize_svc_common.StorwizeSSH, - 'run_ssh_check_created') as run_ssh_check_created: - ex = exception.VolumeBackendAPIException(data='CMMVC6071E') - run_ssh_check_created.side_effect = ex - self.assertRaises(exception.VolumeBackendAPIException, - self.storwize_ssh.mkvdiskhostmap, - 'HOST3', '9999', 511, True) - - @ddt.data((exception.VolumeBackendAPIException(data='CMMVC6372W'), None), - (exception.VolumeBackendAPIException(data='CMMVC6372W'), - {'name': 'fakevol', 'id': '0', 'uid': '0', 'IO_group_id': '0', - 'IO_group_name': 'fakepool'}), - (exception.VolumeBackendAPIException(data='error'), None)) - @ddt.unpack - def test_mkvdisk_with_warning(self, run_ssh_check, lsvol): - opt = {'iogrp': 0} - with mock.patch.object(storwize_svc_common.StorwizeSSH, - 'run_ssh_check_created', - side_effect=run_ssh_check),\ - mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdisk', - return_value=lsvol): - if lsvol: - ret = self.storwize_ssh.mkvdisk('fakevol', '1', 'gb', - 'fakepool', opt, []) - self.assertEqual('0', ret) - else: - self.assertRaises(exception.VolumeBackendAPIException, - self.storwize_ssh.mkvdisk, - 'fakevol', '1', 'gb', 'fakepool', opt, []) - - -@ddt.ddt -class StorwizeSVCReplicationTestCase(test.TestCase): - @mock.patch.object(time, 'sleep') - def setUp(self, mock_sleep): - super(StorwizeSVCReplicationTestCase, self).setUp() - - def _run_ssh_aux(cmd, check_exit_code=True, attempts=1): - utils.check_ssh_injection(cmd) - if len(cmd) > 2 and cmd[1] == 'lssystem': - cmd[1] = 'lssystem_aux' - ret = self.sim.execute_command(cmd, check_exit_code) - return ret - aux_connect_patcher = mock.patch( - 'cinder.volume.drivers.ibm.storwize_svc.' - 'replication.StorwizeSVCReplicationManager._run_ssh') - self.aux_ssh_mock = aux_connect_patcher.start() - self.addCleanup(aux_connect_patcher.stop) - self.aux_ssh_mock.side_effect = _run_ssh_aux - - self.USESIM = True - if self.USESIM: - self.driver = StorwizeSVCFcFakeDriver( - configuration=conf.Configuration(None)) - self.rep_target = {"backend_id": "svc_aux_target_1", - "san_ip": "192.168.10.22", - "san_login": "admin", - "san_password": "admin", - "pool_name": _get_test_pool()} - self.fake_target = {"backend_id": "svc_id_target", - "san_ip": "192.168.10.23", - "san_login": "admin", - "san_password": "admin", - "pool_name": _get_test_pool()} - self._def_flags = {'san_ip': '192.168.10.21', - 'san_login': 'user', - 'san_password': 'pass', - 'storwize_svc_volpool_name': - SVC_POOLS, - 'replication_device': [self.rep_target]} - wwpns = [ - six.text_type(random.randint(0, 9999999999999999)).zfill(16), - six.text_type(random.randint(0, 9999999999999999)).zfill(16)] - initiator = 'test.initiator.%s' % six.text_type( - random.randint(10000, 99999)) - self._connector = {'ip': '1.234.56.78', - 'host': 'storwize-svc-test', - 'wwpns': wwpns, - 'initiator': initiator} - self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) - - self.driver.set_fake_storage(self.sim) - self.ctxt = context.get_admin_context() - - self._reset_flags() - self.ctxt = context.get_admin_context() - db_driver = self.driver.configuration.db_driver - self.db = importutils.import_module(db_driver) - self.driver.db = self.db - - self.driver.do_setup(None) - self.driver.check_for_setup_error() - self._create_test_volume_types() - - def _set_flag(self, flag, value): - group = self.driver.configuration.config_group - self.driver.configuration.set_override(flag, value, group) - - def _reset_flags(self): - for k, v in self._def_flags.items(): - self._set_flag(k, v) - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - - def _assert_vol_exists(self, name, exists): - is_vol_defined = self.driver._helpers.is_vdisk_defined(name) - self.assertEqual(exists, is_vol_defined) - - def _generate_vol_info(self, vol_type=None, size=1): - pool = _get_test_pool() - volume_type = vol_type if vol_type else self.non_replica_type - prop = {'size': size, - 'volume_type_id': volume_type.id, - 'host': 'openstack@svc#%s' % pool - } - vol = testutils.create_volume(self.ctxt, **prop) - return vol - - def _generate_snap_info(self, vol_id): - prop = {'volume_id': vol_id} - snap = testutils.create_snapshot(self.ctxt, **prop) - return snap - - def _create_replica_volume_type(self, enable, - rep_type=storwize_const.METRO, - opts=None, vol_type_name=None, - cycle_period_seconds=None): - # Generate a volume type for volume repliation. - if enable: - if rep_type == storwize_const.METRO: - spec = {'replication_enabled': ' True', - 'replication_type': ' metro'} - type_name = 'rep_metro' - elif rep_type == storwize_const.GMCV: - if cycle_period_seconds: - spec = {'replication_enabled': ' True', - 'replication_type': ' gmcv', - 'drivers:cycle_period_seconds': - cycle_period_seconds} - type_name = 'rep_gmcv_with_cps' + cycle_period_seconds - else: - spec = {'replication_enabled': ' True', - 'replication_type': ' gmcv'} - type_name = 'rep_gmcv_default' - else: - spec = {'replication_enabled': ' True', - 'replication_type': ' global'} - type_name = 'rep_global' - elif opts: - spec = opts - type_name = vol_type_name - else: - spec = {'replication_enabled': ' False'} - type_name = "non_rep" - - type_ref = volume_types.create(self.ctxt, type_name, spec) - replication_type = objects.VolumeType.get_by_id(self.ctxt, - type_ref['id']) - return replication_type - - def _create_test_volume_types(self): - self.mm_type = self._create_replica_volume_type( - True, rep_type=storwize_const.METRO) - self.gm_type = self._create_replica_volume_type( - True, rep_type=storwize_const.GLOBAL) - self.gmcv_default_type = self._create_replica_volume_type( - True, rep_type=storwize_const.GMCV) - self.gmcv_with_cps600_type = self._create_replica_volume_type( - True, rep_type=storwize_const.GMCV, cycle_period_seconds="600") - self.gmcv_with_cps900_type = self._create_replica_volume_type( - True, rep_type=storwize_const.GMCV, cycle_period_seconds="900") - self.gmcv_with_cps86401_type = self._create_replica_volume_type( - True, rep_type=storwize_const.GMCV, cycle_period_seconds="86401") - self.non_replica_type = self._create_replica_volume_type(False) - - def _create_test_volume(self, rep_type): - volume = self._generate_vol_info(rep_type) - model_update = self.driver.create_volume(volume) - return volume, model_update - - def _get_vdisk_uid(self, vdisk_name): - vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, - delim='!') - for row in vdisk_properties.split('\n'): - words = row.split('!') - if words[0] == 'vdisk_UID': - return words[1] - return None - - def test_storwize_do_replication_setup_error(self): - fake_targets = [self.rep_target, self.rep_target] - self.driver.configuration.set_override('replication_device', - [{"backend_id": - "svc_id_target"}]) - self.assertRaises(exception.InvalidInput, - self.driver._do_replication_setup) - - self.driver.configuration.set_override('replication_device', - fake_targets) - self.assertRaises(exception.InvalidInput, - self.driver._do_replication_setup) - - self.driver._active_backend_id = 'fake_id' - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.assertRaises(exception.InvalidInput, - self.driver._do_replication_setup) - - self.driver._active_backend_id = None - - self.driver._do_replication_setup() - self.assertEqual(self.driver._replica_target, self.rep_target) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'replication_licensed') - def test_storwize_setup_replication(self, - replication_licensed): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver._active_backend_id = None - replication_licensed.side_effect = [False, True, True, True] - - self.driver._get_storwize_config() - self.assertEqual(self.driver._helpers, - self.driver._master_backend_helpers) - self.assertEqual(self.driver._replica_enabled, False) - - self.driver._get_storwize_config() - self.assertEqual(self.driver._replica_target, self.rep_target) - self.assertEqual(self.driver._replica_enabled, True) - - self.driver._active_backend_id = self.rep_target['backend_id'] - self.driver._get_storwize_config() - self.assertEqual(self.driver._helpers, - self.driver._aux_backend_helpers) - self.assertEqual(self.driver._replica_enabled, True) - - self.driver._active_backend_id = None - self.driver._get_storwize_config() - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'create_vdisk') - def test_storwize_svc_create_stretch_volume_with_replication(self, - create_vdisk): - spec = {'mirror_pool': 'openstack1', - 'replication_enabled': ' True', - 'replication_type': ' global' - } - vol_type = self._create_replica_volume_type( - False, opts=spec, vol_type_name='test_type') - vol = self._generate_vol_info(vol_type) - self.assertRaises(exception.InvalidInput, - self.driver.create_volume, vol) - self.assertFalse(create_vdisk.called) - - def test_storwize_create_volume_with_mirror_replication(self): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume) - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - # Create global mirror replication. - volume, model_update = self._create_test_volume(self.gm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume) - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - # Create global mirror with change volumes replication. - volume, model_update = self._create_test_volume( - self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume, True) - # gmcv with specified cycle_period_seconds - volume, model_update = self._create_test_volume( - self.gmcv_with_cps600_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume, True) - # gmcv with invalid cycle_period_seconds - self.assertRaises(exception.InvalidInput, - self._create_test_volume, - self.gmcv_with_cps86401_type) - - def _validate_replic_vol_creation(self, volume, isGMCV=False): - self._assert_vol_exists(volume['name'], True) - self._assert_vol_exists( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) - if isGMCV: - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], True) - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) - - rel_info = self.driver._helpers.get_relationship_info(volume['name']) - self.assertIsNotNone(rel_info) - if isGMCV: - vol_rep_type = rel_info['copy_type'] - cycling_mode = rel_info['cycling_mode'] - cycle_period_seconds = rel_info['cycle_period_seconds'] - rep_type = self.driver._get_volume_replicated_type( - self.ctxt, volume) - src_opts = self.driver._get_vdisk_params(volume['volume_type_id']) - opt_cycle_period_seconds = six.text_type( - src_opts.get('cycle_period_seconds')) - self.assertEqual(opt_cycle_period_seconds, cycle_period_seconds) - self.assertEqual(storwize_const.GMCV_MULTI, cycling_mode) - self.assertEqual(storwize_const.GLOBAL, vol_rep_type) - self.assertEqual(storwize_const.GMCV, rep_type) - self.assertEqual('master', rel_info['primary']) - self.assertEqual(volume['name'], rel_info['master_vdisk_name']) - self.assertEqual( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], - rel_info['aux_vdisk_name']) - self.assertEqual('inconsistent_copying', rel_info['state']) - self.assertEqual( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], - rel_info['master_change_vdisk_name']) - self.assertEqual( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], - rel_info['aux_change_vdisk_name']) - self.assertEqual('inconsistent_copying', rel_info['state']) - self.sim._rc_state_transition('wait', rel_info) - self.assertEqual('consistent_copying', rel_info['state']) - else: - vol_rep_type = rel_info['copy_type'] - rep_type = self.driver._get_volume_replicated_type( - self.ctxt, volume) - self.assertEqual(rep_type, vol_rep_type) - - self.assertEqual('master', rel_info['primary']) - self.assertEqual(volume['name'], rel_info['master_vdisk_name']) - self.assertEqual( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], - rel_info['aux_vdisk_name']) - self.assertEqual('inconsistent_copying', rel_info['state']) - - self.sim._rc_state_transition('wait', rel_info) - self.assertEqual('consistent_synchronized', rel_info['state']) - - def _validate_gmcv_vol_retype(self, volume): - self._assert_vol_exists(volume['name'], True) - self._assert_vol_exists( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) - self._assert_vol_exists(storwize_const.REPLICA_CHG_VOL_PREFIX + - volume['name'], True) - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) - - rel_info = self.driver._helpers.get_relationship_info(volume['name']) - self.assertIsNotNone(rel_info) - - src_opts = self.driver._get_vdisk_params(volume['volume_type_id']) - opt_cycle_period_seconds = six.text_type( - src_opts.get('cycle_period_seconds')) - self.assertEqual(opt_cycle_period_seconds, - rel_info['cycle_period_seconds']) - self.assertEqual(storwize_const.GMCV_MULTI, rel_info['cycling_mode']) - self.assertEqual(storwize_const.GLOBAL, rel_info['copy_type']) - self.assertEqual(storwize_const.GMCV, - self.driver._get_volume_replicated_type( - self.ctxt, volume)) - self.assertEqual('master', rel_info['primary']) - self.assertEqual(volume['name'], rel_info['master_vdisk_name']) - self.assertEqual((storwize_const.REPLICA_CHG_VOL_PREFIX - + volume['name']), - rel_info['master_change_vdisk_name']) - aux_vdisk_name = (storwize_const.REPLICA_AUX_VOL_PREFIX - + volume['name']) - self.assertEqual(aux_vdisk_name, - rel_info['aux_vdisk_name']) - self.assertEqual((storwize_const.REPLICA_CHG_VOL_PREFIX - + aux_vdisk_name), - rel_info['aux_change_vdisk_name']) - - def _validate_replic_vol_deletion(self, volume, isGMCV=False): - self._assert_vol_exists(volume['name'], False) - self._assert_vol_exists( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) - if isGMCV: - # All change volumes should be deleted - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], False) - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) - rel_info = self.driver._helpers.get_relationship_info(volume['name']) - self.assertIsNone(rel_info) - - def test_storwize_create_snapshot_volume_with_mirror_replica(self): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication volume. - vol1, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - snap = testutils.create_snapshot(self.ctxt, vol1.id) - self.driver.create_snapshot(snap) - - vol2 = self._generate_vol_info(self.mm_type) - model_update = self.driver.create_volume_from_snapshot(vol2, snap) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(vol2) - - self.driver.delete_snapshot(snap) - self.driver.delete_volume(vol1) - self.driver.delete_volume(vol2) - - # Create gmcv replication volume. - vol1, model_update = self._create_test_volume(self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(vol1, True) - snap = testutils.create_snapshot(self.ctxt, vol1.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_snapshot, - snap) - self.driver.delete_volume(vol1) - - # gmcv with specified cycle_period_seconds - vol1, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(vol1, True) - snap = testutils.create_snapshot(self.ctxt, vol1.id) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_snapshot, snap) - self.driver.delete_volume(vol1) - - def test_storwize_create_cloned_volume_with_mirror_replica(self): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create a source metro mirror replication volume. - src_volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - volume = self._generate_vol_info(self.mm_type) - - # Create a cloned volume from source volume. - model_update = self.driver.create_cloned_volume(volume, src_volume) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume) - - self.driver.delete_volume(src_volume) - self.driver.delete_volume(volume) - # Create a source gmcv replication volume. - src_volume, model_update = self._create_test_volume( - self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - volume = self._generate_vol_info(self.gmcv_default_type) - - # Create a cloned volume from source volume. - model_update = self.driver.create_cloned_volume(volume, src_volume) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - - self.driver.delete_volume(src_volume) - self.driver.delete_volume(volume) - - # Create a source gmcv volume with specified cycle_period_seconds - src_volume, model_update = self._create_test_volume( - self.gmcv_with_cps600_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - volume = self._generate_vol_info(self.gmcv_with_cps600_type) - - # Create a cloned volume from source volume. - model_update = self.driver.create_cloned_volume(volume, src_volume) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - - self.driver.delete_volume(src_volume) - self.driver.delete_volume(volume) - - @ddt.data(({'replication_enabled': ' True', - 'replication_type': ' global'}, - {'replication_enabled': ' True', - 'replication_type': ' metro'}), - ({'replication_enabled': ' True', - 'replication_type': ' metro'}, - {'replication_enabled': ' True', - 'replication_type': ' global'}), - ({'replication_enabled': ' True', - 'replication_type': ' metro'}, - {'mirror_pool': 'openstack1'}), - ({'mirror_pool': 'openstack1'}, - {'mirror_pool': 'openstack1', - 'replication_enabled': ' True', - 'replication_type': ' metro'}), - ({'replication_enabled': ' False'}, - {'mirror_pool': 'openstack1', - 'replication_enabled': ' True', - 'replication_type': ' metro'})) - @ddt.unpack - def test_storwize_retype_invalid_replication(self, old_opts, new_opts): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - host = {'host': 'openstack@svc#openstack'} - old_type = self._create_replica_volume_type( - False, opts=old_opts, vol_type_name='test_old_type') - - volume, model_update = self._create_test_volume(old_type) - new_type = self._create_replica_volume_type( - False, opts=new_opts, vol_type_name='test_new_type') - diff, _equal = volume_types.volume_types_diff( - self.ctxt, new_type['id'], old_type['id']) - self.assertRaises(exception.VolumeDriverException, self.driver.retype, - self.ctxt, volume, new_type, diff, host) - - def test_storwize_retype_from_mirror_to_none_replication(self): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - host = {'host': 'openstack@svc#openstack'} - - volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.mm_type['id'], self.gm_type['id']) - # Change the mirror type from mm to gm - self.assertRaises(exception.VolumeDriverException, - self.driver.retype, self.ctxt, - volume, self.gm_type, diff, host) - - # Retype from mm to gmcv - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.mm_type['id'], self.gmcv_with_cps600_type['id']) - self.assertRaises(exception.VolumeDriverException, - self.driver.retype, self.ctxt, - volume, self.gmcv_with_cps600_type, diff, host) - - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.non_replica_type['id'], self.mm_type['id']) - # Retype from mm to non-replica - retyped, model_update = self.driver.retype( - self.ctxt, volume, self.non_replica_type, diff, host) - self.assertEqual(fields.ReplicationStatus.DISABLED, - model_update['replication_status']) - self._assert_vol_exists( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) - - self.driver.delete_volume(volume) - self._assert_vol_exists(volume['name'], False) - rel_info = self.driver._helpers.get_relationship_info(volume['name']) - self.assertIsNone(rel_info) - - # Create gmcv volume - volume, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - # Retype from gmcv to gm - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.gmcv_with_cps900_type['id'], self.gm_type['id']) - self.assertRaises(exception.VolumeDriverException, - self.driver.retype, self.ctxt, - volume, self.gm_type, diff, host) - # Retype from gmcv to non-replica - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.gmcv_with_cps900_type['id'], - self.non_replica_type['id']) - retyped, model_update = self.driver.retype( - self.ctxt, volume, self.non_replica_type, diff, host) - self.assertEqual(fields.ReplicationStatus.DISABLED, - model_update['replication_status']) - # All change volumes should be deleted - self._assert_vol_exists( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], False) - self._assert_vol_exists( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) - - self.driver.delete_volume(volume) - self._assert_vol_exists(volume['name'], False) - rel_info = self.driver._helpers.get_relationship_info(volume['name']) - self.assertIsNone(rel_info) - - def test_storwize_retype_from_none_to_mirror_replication(self): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - volume, model_update = self._create_test_volume(self.non_replica_type) - self.assertIsNone(model_update) - - # Retype to mm replica - host = {'host': 'openstack@svc#openstack'} - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.non_replica_type['id'], self.mm_type['id']) - retyped, model_update = self.driver.retype( - self.ctxt, volume, self.mm_type, diff, host) - volume['volume_type_id'] = self.mm_type['id'] - volume['volume_type'] = self.mm_type - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume) - - self.driver.delete_volume(volume) - - # Create non-replica volume - volume, model_update = self._create_test_volume(self.non_replica_type) - self.assertIsNone(model_update) - - # Retype to gmcv replica - host = {'host': 'openstack@svc#openstack'} - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.non_replica_type['id'], - self.gmcv_with_cps900_type['id']) - retyped, model_update = self.driver.retype( - self.ctxt, volume, self.gmcv_with_cps900_type, diff, host) - volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] - volume['volume_type'] = self.gmcv_with_cps900_type - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume, True) - - def test_storwize_retype_from_gmcv_to_gmcv_replication(self): - # Set replication target - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create gmcv default volume - volume, model_update = self._create_test_volume(self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume, True) - - # Retype to gmcv with cycle_period_seconds 600 replica - host = {'host': 'openstack@svc#openstack'} - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.gmcv_default_type['id'], - self.gmcv_with_cps600_type['id']) - self.driver.retype(self.ctxt, volume, - self.gmcv_with_cps600_type, diff, host) - volume['volume_type_id'] = self.gmcv_with_cps600_type['id'] - volume['volume_type'] = self.gmcv_with_cps600_type - self._validate_gmcv_vol_retype(volume) - - # Retype to gmcv with cycle_period_seconds 900 replica - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.gmcv_with_cps600_type['id'], - self.gmcv_with_cps900_type['id']) - self.driver.retype(self.ctxt, volume, - self.gmcv_with_cps900_type, diff, host) - volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] - volume['volume_type'] = self.gmcv_with_cps900_type - self._validate_gmcv_vol_retype(volume) - - # Retype to gmcv with invalid cycle_period_seconds - diff, _equal = volume_types.volume_types_diff( - self.ctxt, self.gmcv_with_cps600_type['id'], - self.gmcv_with_cps86401_type['id']) - self.assertRaises(exception.InvalidInput, self.driver.retype, - self.ctxt, volume, self.gmcv_with_cps86401_type, - diff, host) - - # Retype to gmcv default volume - diff, _equal = volume_types.volume_types_diff( - self.ctxt, - self.gmcv_with_cps900_type['id'], - self.gmcv_default_type['id']) - self.driver.retype(self.ctxt, volume, - self.gmcv_default_type, diff, host) - volume['volume_type_id'] = self.gmcv_default_type['id'] - volume['volume_type'] = self.gmcv_default_type - self._validate_gmcv_vol_retype(volume) - - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume, True) - - def test_storwize_extend_volume_replication(self): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication volume. - volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - self.driver.extend_volume(volume, '13') - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 13) - - attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 13) - - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - # Create gmcv replication volume. - volume, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - self.driver.extend_volume(volume, 15) - attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 15) - - attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 15) - - attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + - volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 15) - - attrs = self.driver._helpers.get_vdisk_attributes( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name']) - vol_size = int(attrs['capacity']) / units.Gi - self.assertAlmostEqual(vol_size, 15) - - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - def test_storwize_manage_existing_mismatch_with_volume_replication(self): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create mm replication volume. - rep_volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create non-replication volume. - non_rep_volume, model_update = self._create_test_volume( - self.non_replica_type) - - new_volume = self._generate_vol_info() - - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.non_replica_type['id'] - new_volume['volume_type'] = self.non_replica_type - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - ref = {'source-name': non_rep_volume['name']} - new_volume['volume_type_id'] = self.mm_type['id'] - new_volume['volume_type'] = self.mm_type - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.gm_type['id'] - new_volume['volume_type'] = self.gm_type - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] - new_volume['volume_type'] = self.gmcv_with_cps900_type - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, new_volume, ref) - - self.driver.delete_volume(rep_volume) - self.driver.delete_volume(new_volume) - - # Create gmcv default replication volume - rep_volume, model_update = self._create_test_volume( - self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - new_volume = self._generate_vol_info() - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] - new_volume['volume_type'] = self.gmcv_with_cps900_type - # manage existing gmcv volume with different cycle period seconds - self.assertRaises( - exception.ManageExistingVolumeTypeMismatch, - self.driver.manage_existing, - new_volume, - ref) - self.driver.delete_volume(rep_volume) - self.driver.delete_volume(new_volume) - - def test_storwize_manage_existing_with_volume_replication(self): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create mm replication volume. - rep_volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - uid_of_master = self._get_vdisk_uid(rep_volume['name']) - uid_of_aux = self._get_vdisk_uid( - storwize_const.REPLICA_AUX_VOL_PREFIX + rep_volume['name']) - - new_volume = self._generate_vol_info() - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.mm_type['id'] - new_volume['volume_type'] = self.mm_type - self.driver.manage_existing(new_volume, ref) - - # Check the uid of the volume which has been renamed. - uid_of_master_volume = self._get_vdisk_uid(new_volume['name']) - uid_of_aux_volume = self._get_vdisk_uid( - storwize_const.REPLICA_AUX_VOL_PREFIX + new_volume['name']) - self.assertEqual(uid_of_master, uid_of_master_volume) - self.assertEqual(uid_of_aux, uid_of_aux_volume) - - self.driver.delete_volume(rep_volume) - # Create gmcv replication volume. - rep_volume, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - uid_of_master = self._get_vdisk_uid(rep_volume['name']) - uid_of_master_change = self._get_vdisk_uid( - storwize_const.REPLICA_CHG_VOL_PREFIX + - rep_volume['name']) - uid_of_aux = self._get_vdisk_uid( - storwize_const.REPLICA_AUX_VOL_PREFIX + - rep_volume['name']) - uid_of_aux_change = self._get_vdisk_uid( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + - rep_volume['name']) - - new_volume = self._generate_vol_info() - ref = {'source-name': rep_volume['name']} - new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] - new_volume['volume_type'] = self.gmcv_with_cps900_type - self.driver.manage_existing(new_volume, ref) - - # Check the uid of the volume which has been renamed. - uid_of_new_master = self._get_vdisk_uid(new_volume['name']) - uid_of_new_master_change = self._get_vdisk_uid( - storwize_const.REPLICA_CHG_VOL_PREFIX + - new_volume['name']) - uid_of_new_aux = self._get_vdisk_uid( - storwize_const.REPLICA_AUX_VOL_PREFIX + - new_volume['name']) - uid_of_new_aux_change = self._get_vdisk_uid( - storwize_const.REPLICA_CHG_VOL_PREFIX + - storwize_const.REPLICA_AUX_VOL_PREFIX + - new_volume['name']) - - self.assertEqual(uid_of_master, uid_of_new_master) - self.assertEqual(uid_of_aux, uid_of_new_aux) - self.assertEqual(uid_of_master_change, uid_of_new_master_change) - self.assertEqual(uid_of_aux_change, uid_of_new_aux_change) - - self.driver.delete_volume(rep_volume) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_storwize_update_migrated_replication_volume( - self, get_rp_info, rename_vdisk): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create replication volume. - backend_volume, model_update = self._create_test_volume(self.mm_type) - volume, model_update = self._create_test_volume(self.mm_type) - get_rp_info.side_effect = [{'aux_vdisk_name': 'aux_test'}] - model_update = self.driver.update_migrated_volume(self.ctxt, volume, - backend_volume, - 'available') - aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) - rename_vdisk.assert_called_with('aux_test', aux_vol) - self.assertEqual({'_name_id': None}, model_update) - - rename_vdisk.reset_mock() - rename_vdisk.side_effect = exception.VolumeBackendAPIException( - data='foo') - model_update = self.driver.update_migrated_volume(self.ctxt, volume, - backend_volume, - 'available') - self.assertEqual({'_name_id': backend_volume.id}, model_update) - - def test_storwize_delete_volume_with_mirror_replication(self): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - volume, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(volume) - - # Delete volume in non-failover state - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - # Create gmcv replication. - gmcv_volume, model_update = self._create_test_volume( - self.gmcv_with_cps600_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - self._validate_replic_vol_creation(gmcv_volume, True) - - # Delete gmcv volume in non-failover state - self.driver.delete_volume(gmcv_volume) - self._validate_replic_vol_deletion(gmcv_volume, True) - - non_replica_vol, model_update = self._create_test_volume( - self.non_replica_type) - self.assertIsNone(model_update) - - volumes = [volume, non_replica_vol, gmcv_volume] - # Delete volume in failover state - self.driver.failover_host( - self.ctxt, volumes, self.rep_target['backend_id'], []) - # Delete non-replicate volume in a failover state - self.assertRaises(exception.VolumeDriverException, - self.driver.delete_volume, - non_replica_vol) - - # Delete replicate volume in failover state - self.driver.delete_volume(volume) - self._validate_replic_vol_deletion(volume) - - self.driver.delete_volume(gmcv_volume) - self._validate_replic_vol_deletion(gmcv_volume, True) - - self.driver.failover_host( - self.ctxt, volumes, 'default', []) - self.driver.delete_volume(non_replica_vol) - self._assert_vol_exists(non_replica_vol['name'], False) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_vdisk') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_delete_target_volume(self, get_relationship_info, - delete_relationship, - delete_vdisk): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - fake_name = 'volume-%s' % fake.VOLUME_ID - get_relationship_info.return_value = {'aux_vdisk_name': - fake_name} - self.driver._helpers.delete_rc_volume(fake_name) - get_relationship_info.assert_called_once_with(fake_name) - delete_relationship.assert_called_once_with(fake_name) - master_change_fake_name = ( - storwize_const.REPLICA_CHG_VOL_PREFIX + fake_name) - calls = [mock.call(master_change_fake_name, False), - mock.call(fake_name, False)] - delete_vdisk.assert_has_calls(calls, any_order=True) - self.assertEqual(2, delete_vdisk.call_count) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_vdisk') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_delete_target_volume_no_relationship(self, get_relationship_info, - delete_relationship, - delete_vdisk): - # Set replication target. - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - fake_name = 'volume-%s' % fake.VOLUME_ID - get_relationship_info.return_value = None - self.driver._helpers.delete_rc_volume(fake_name) - get_relationship_info.assert_called_once_with(fake_name) - self.assertFalse(delete_relationship.called) - self.assertTrue(delete_vdisk.called) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_vdisk') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'delete_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_delete_target_volume_fail(self, get_relationship_info, - delete_relationship, - delete_vdisk): - fake_id = fake.VOLUME_ID - fake_name = 'volume-%s' % fake_id - get_relationship_info.return_value = {'aux_vdisk_name': - fake_name} - delete_vdisk.side_effect = Exception - self.assertRaises(exception.VolumeDriverException, - self.driver._helpers.delete_rc_volume, - fake_name) - get_relationship_info.assert_called_once_with(fake_name) - delete_relationship.assert_called_once_with(fake_name) - - def test_storwize_failover_host_backend_error(self): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - mm_vol, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create gmcv replication. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - volumes = [mm_vol, gmcv_vol] - - self.driver._replica_enabled = False - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, - self.ctxt, volumes, self.rep_target['backend_id'], - []) - self.driver._replica_enabled = True - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, - self.ctxt, volumes, self.fake_target['backend_id'], - []) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_system_info') as get_sys_info: - get_sys_info.side_effect = [ - exception.VolumeBackendAPIException(data='CMMVC6071E'), - exception.VolumeBackendAPIException(data='CMMVC6071E')] - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, - self.ctxt, volumes, - self.rep_target['backend_id'], []) - - self.driver._active_backend_id = self.rep_target['backend_id'] - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, - self.ctxt, volumes, 'default', []) - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gmcv_vol) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_failover_volume_relationship_error(self, get_relationship_info): - # Create global mirror replication. - gm_vol, model_update = self._create_test_volume(self.gm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create gmcv replication. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - get_relationship_info.side_effect = [None, - exception.VolumeDriverException, - None, - exception.VolumeDriverException] - expected_list = [{'updates': {'replication_status': - fields.ReplicationStatus.FAILOVER_ERROR, - 'status': 'error'}, - 'volume_id': gm_vol['id']}, - {'updates': {'replication_status': - fields.ReplicationStatus.FAILOVER_ERROR, - 'status': 'error'}, - 'volume_id': gmcv_vol['id']} - ] - volumes_update = self.driver._failover_replica_volumes( - self.ctxt, [gm_vol, gmcv_vol]) - self.assertEqual(expected_list, volumes_update) - - volumes_update = self.driver._failover_replica_volumes( - self.ctxt, [gm_vol, gmcv_vol]) - self.assertEqual(expected_list, volumes_update) - - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_volume_stats') - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_storwize_state') - def test_storwize_failover_host_replica_volumes(self, - update_storwize_state, - update_volume_stats): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - mm_vol, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create global replication volume. - gm_vol, model_update = self._create_test_volume(self.gm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create gmcv volume. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_with_cps600_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - volumes = [mm_vol, gm_vol, gmcv_vol] - expected_list = [ - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': mm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': gm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': gmcv_vol['id']}] - - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, self.rep_target['backend_id'], []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual(expected_list, volume_list) - - self.assertEqual(self.driver._active_backend_id, target_id) - self.assertEqual(self.driver._aux_backend_helpers, - self.driver._helpers) - self.assertEqual([self.driver._replica_target['pool_name']], - self.driver._get_backend_pools()) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gm_vol) - self.driver.delete_volume(gmcv_vol) - - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, None, []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual([], volume_list) - - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_volume_stats') - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_storwize_state') - def test_storwize_failover_host_normal_volumes(self, - update_storwize_state, - update_volume_stats): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - mm_vol, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - mm_vol['status'] = 'in-use' - - # Create gmcv replication. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_with_cps600_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - gmcv_vol['status'] = 'in-use' - - # Create non-replication volume. - non_replica_vol, model_update = self._create_test_volume( - self.non_replica_type) - self.assertIsNone(model_update) - non_replica_vol['status'] = 'error' - - volumes = [mm_vol, gmcv_vol, non_replica_vol] - - rep_data1 = json.dumps({'previous_status': mm_vol['status']}) - rep_data2 = json.dumps({'previous_status': gmcv_vol['status']}) - rep_data3 = json.dumps({'previous_status': non_replica_vol['status']}) - - expected_list = [{'updates': {'status': 'error', - 'replication_driver_data': rep_data1}, - 'volume_id': mm_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data2}, - 'volume_id': gmcv_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data3}, - 'volume_id': non_replica_vol['id']}, - ] - - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, self.rep_target['backend_id'], []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual(expected_list, volume_list) - - self.assertEqual(self.driver._active_backend_id, target_id) - self.assertEqual(self.driver._aux_backend_helpers, - self.driver._helpers) - self.assertEqual([self.driver._replica_target['pool_name']], - self.driver._get_backend_pools()) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, None, []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual([], volume_list) - # Delete non-replicate volume in a failover state - self.assertRaises(exception.VolumeDriverException, - self.driver.delete_volume, - non_replica_vol) - self.driver.failover_host(self.ctxt, volumes, 'default', []) - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gmcv_vol) - self.driver.delete_volume(non_replica_vol) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'switch_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'stop_relationship') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - def test_failover_host_by_force_access(self, get_relationship_info, - stop_relationship, - switch_relationship): - replica_obj = self.driver._get_replica_obj(storwize_const.METRO) - fake_vol = {'id': '21345678-1234-5678-1234-567812345683', - 'name': 'fake-volume'} - target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + fake_vol['name'] - context = mock.Mock - get_relationship_info.side_effect = [{ - 'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678', - 'name': 'RC_name'}] - switch_relationship.side_effect = exception.VolumeDriverException - replica_obj.failover_volume_host(context, fake_vol) - get_relationship_info.assert_called_once_with(target_vol) - switch_relationship.assert_called_once_with('RC_name') - stop_relationship.assert_called_once_with(target_vol, access=True) - - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_volume_stats') - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_storwize_state') - def test_storwize_failback_replica_volumes(self, - update_storwize_state, - update_volume_stats): - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create metro mirror replication. - mm_vol, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create global mirror replication. - gm_vol, model_update = self._create_test_volume(self.gm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - # Create gmcv replication. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_with_cps900_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - - volumes = [gm_vol, mm_vol, gmcv_vol] - failover_expect = [ - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': gm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': mm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': gmcv_vol['id']}] - - failback_expect = [ - {'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'status': 'available'}, - 'volume_id': gm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'status': 'available'}, - 'volume_id': mm_vol['id']}, - {'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'status': 'available'}, - 'volume_id': gmcv_vol['id']}] - # Already failback - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, 'default', []) - self.assertIsNone(target_id) - self.assertEqual([], volume_list) - - # fail over operation - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, self.rep_target['backend_id'], []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual(failover_expect, volume_list) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - - # fail back operation - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, 'default', []) - self.assertEqual('default', target_id) - self.assertEqual(failback_expect, volume_list) - self.assertIsNone(self.driver._active_backend_id) - self.assertEqual(SVC_POOLS, self.driver._get_backend_pools()) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gm_vol) - self.driver.delete_volume(gmcv_vol) - - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_volume_stats') - @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, - '_update_storwize_state') - def test_storwize_failback_normal_volumes(self, - update_storwize_state, - update_volume_stats): - - self.driver.configuration.set_override('replication_device', - [self.rep_target]) - self.driver.do_setup(self.ctxt) - - # Create replication volume. - mm_vol, model_update = self._create_test_volume(self.mm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - mm_vol['status'] = 'in-use' - gm_vol, model_update = self._create_test_volume(self.gm_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - gm_vol['status'] = 'available' - - # Create gmcv replication. - gmcv_vol, model_update = self._create_test_volume( - self.gmcv_default_type) - self.assertEqual(fields.ReplicationStatus.ENABLED, - model_update['replication_status']) - gmcv_vol['status'] = 'in-use' - - # Create non-replication volume. - non_replica_vol1, model_update = self._create_test_volume( - self.non_replica_type) - self.assertIsNone(model_update) - non_replica_vol2, model_update = self._create_test_volume( - self.non_replica_type) - self.assertIsNone(model_update) - non_replica_vol1['status'] = 'error' - non_replica_vol2['status'] = 'available' - - volumes = [mm_vol, gmcv_vol, non_replica_vol1, - non_replica_vol2, gm_vol] - - rep_data0 = json.dumps({'previous_status': mm_vol['status']}) - rep_data1 = json.dumps({'previous_status': gmcv_vol['status']}) - rep_data2 = json.dumps({'previous_status': non_replica_vol1['status']}) - rep_data3 = json.dumps({'previous_status': non_replica_vol2['status']}) - failover_expect = [ - {'updates': - {'replication_status': fields.ReplicationStatus.FAILED_OVER}, - 'volume_id': gm_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data0}, - 'volume_id': mm_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data1}, - 'volume_id': gmcv_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data2}, - 'volume_id': non_replica_vol1['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data3}, - 'volume_id': non_replica_vol2['id']}] - - # Already failback - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, 'default', []) - self.assertIsNone(target_id) - self.assertEqual([], volume_list) - - # fail over operation - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, self.rep_target['backend_id'], []) - self.assertEqual(self.rep_target['backend_id'], target_id) - self.assertEqual(failover_expect, volume_list) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - - # fail back operation - mm_vol['replication_driver_data'] = json.dumps( - {'previous_status': 'in-use'}) - gmcv_vol['replication_driver_data'] = json.dumps( - {'previous_status': 'in-use'}) - non_replica_vol1['replication_driver_data'] = json.dumps( - {'previous_status': 'error'}) - non_replica_vol2['replication_driver_data'] = json.dumps( - {'previous_status': 'available'}) - gm_vol['status'] = 'in-use' - rep_data4 = json.dumps({'previous_status': gm_vol['status']}) - failback_expect = [{'updates': {'status': 'in-use', - 'replication_driver_data': ''}, - 'volume_id': mm_vol['id']}, - {'updates': {'status': 'in-use', - 'replication_driver_data': ''}, - 'volume_id': gmcv_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': ''}, - 'volume_id': non_replica_vol1['id']}, - {'updates': {'status': 'available', - 'replication_driver_data': ''}, - 'volume_id': non_replica_vol2['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': rep_data4}, - 'volume_id': gm_vol['id']}] - target_id, volume_list, __ = self.driver.failover_host( - self.ctxt, volumes, 'default', []) - self.assertEqual('default', target_id) - self.assertEqual(failback_expect, volume_list) - self.assertIsNone(self.driver._active_backend_id) - self.assertEqual(SVC_POOLS, self.driver._get_backend_pools()) - self.assertTrue(update_storwize_state.called) - self.assertTrue(update_volume_stats.called) - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gmcv_vol) - self.driver.delete_volume(non_replica_vol1) - self.driver.delete_volume(non_replica_vol2) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_system_info') - @mock.patch.object(storwize_rep.StorwizeSVCReplicationManager, - '_partnership_validate_create') - def test_establish_partnership_with_local_sys(self, partnership_create, - get_system_info): - get_system_info.side_effect = [{'system_name': 'storwize-svc-sim'}, - {'system_name': 'storwize-svc-sim'}] - - rep_mgr = self.driver._get_replica_mgr() - rep_mgr.establish_target_partnership() - self.assertFalse(partnership_create.called) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_system_info') - def test_establish_target_partnership(self, get_system_info): - source_system_name = 'storwize-svc-sim' - target_system_name = 'aux-svc-sim' - - get_system_info.side_effect = [{'system_name': source_system_name}, - {'system_name': target_system_name}] - - rep_mgr = self.driver._get_replica_mgr() - rep_mgr.establish_target_partnership() - partner_info = self.driver._helpers.get_partnership_info( - source_system_name) - self.assertIsNotNone(partner_info) - self.assertEqual(partner_info['name'], source_system_name) - - partner_info = self.driver._helpers.get_partnership_info( - source_system_name) - self.assertIsNotNone(partner_info) - self.assertEqual(partner_info['name'], source_system_name) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_partnership_info') - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'chpartnership') - def test_start_partnership(self, chpartnership, get_partnership_info): - get_partnership_info.side_effect = [ - None, - {'partnership': 'fully_configured', - 'id': '0'}, - {'partnership': 'fully_configured_stopped', - 'id': '0'}] - - rep_mgr = self.driver._get_replica_mgr() - rep_mgr._partnership_start(rep_mgr._master_helpers, - 'storwize-svc-sim') - self.assertFalse(chpartnership.called) - rep_mgr._partnership_start(rep_mgr._master_helpers, - 'storwize-svc-sim') - self.assertFalse(chpartnership.called) - - rep_mgr._partnership_start(rep_mgr._master_helpers, - 'storwize-svc-sim') - chpartnership.assert_called_once_with('0') - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'start_relationship') - def test_sync_replica_volumes_with_aux(self, start_relationship): - # Create metro mirror replication. - mm_vol = self._generate_vol_info(self.mm_type) - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + mm_vol['name'] - - # Create gmcv replication. - gmcv_vol = self._generate_vol_info(self.gmcv_with_cps600_type) - tgt_gmcv_volume = (storwize_const.REPLICA_AUX_VOL_PREFIX + - gmcv_vol['name']) - volumes = [mm_vol, gmcv_vol] - - fake_info = {'volume': 'fake', - 'master_vdisk_name': 'fake', - 'aux_vdisk_name': 'fake'} - sync_state = {'state': storwize_const.REP_CONSIS_SYNC, - 'primary': 'fake'} - sync_state.update(fake_info) - - sync_copying_state = {'state': storwize_const.REP_CONSIS_COPYING, - 'primary': 'fake'} - sync_copying_state.update(fake_info) - - disconn_state = {'state': storwize_const.REP_IDL_DISC, - 'primary': 'master'} - disconn_state.update(fake_info) - stop_state = {'state': storwize_const.REP_CONSIS_STOP, - 'primary': 'aux'} - stop_state.update(fake_info) - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=None)): - self.driver._sync_with_aux(self.ctxt, volumes) - self.assertFalse(start_relationship.called) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=sync_state)): - self.driver._sync_with_aux(self.ctxt, volumes) - self.assertFalse(start_relationship.called) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=sync_copying_state)): - self.driver._sync_with_aux(self.ctxt, volumes) - self.assertFalse(start_relationship.called) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=disconn_state)): - self.driver._sync_with_aux(self.ctxt, volumes) - calls = [mock.call(tgt_volume), mock.call(tgt_gmcv_volume)] - start_relationship.assert_has_calls(calls, any_order=True) - self.assertEqual(2, start_relationship.call_count) - - start_relationship.reset_mock() - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=stop_state)): - self.driver._sync_with_aux(self.ctxt, volumes) - calls = [mock.call(tgt_volume, primary='aux'), - mock.call(tgt_gmcv_volume, primary='aux')] - start_relationship.assert_has_calls(calls, any_order=True) - self.assertEqual(2, start_relationship.call_count) - self.driver.delete_volume(mm_vol) - self.driver.delete_volume(gmcv_vol) - - @mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info') - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=testutils.ZeroIntervalLoopingCall) - def test_wait_replica_vol_ready(self, get_relationship_info): - # Create metro mirror replication. - mm_vol = self._generate_vol_info(self.mm_type) - - # Create gmcv replication. - gmcv_vol = self._generate_vol_info(self.gmcv_with_cps900_type) - - fake_info = {'volume': 'fake', - 'master_vdisk_name': 'fake', - 'aux_vdisk_name': 'fake', - 'primary': 'fake'} - sync_state = {'state': storwize_const.REP_CONSIS_SYNC} - sync_state.update(fake_info) - sync_copying_state = {'state': storwize_const.REP_CONSIS_COPYING} - sync_copying_state.update(fake_info) - disconn_state = {'state': storwize_const.REP_IDL_DISC} - disconn_state.update(fake_info) - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=None)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._wait_replica_vol_ready, - self.ctxt, mm_vol) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=None)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._wait_replica_vol_ready, - self.ctxt, gmcv_vol) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=sync_state)): - self.driver._wait_replica_vol_ready(self.ctxt, mm_vol) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=sync_copying_state)): - self.driver._wait_replica_vol_ready(self.ctxt, gmcv_vol) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=disconn_state)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._wait_replica_vol_ready, - self.ctxt, mm_vol) - - with mock.patch.object(storwize_svc_common.StorwizeHelpers, - 'get_relationship_info', - mock.Mock(return_value=disconn_state)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._wait_replica_vol_ready, - self.ctxt, gmcv_vol) diff --git a/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py b/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py deleted file mode 100644 index c280993fd..000000000 --- a/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py +++ /dev/null @@ -1,2297 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import mock -import six -from xml.etree import ElementTree - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as testutils -from cinder.tests.unit.volume.drivers.ibm import fake_pyxcli -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import cryptish -from cinder.volume.drivers.ibm.ibm_storage.xiv_proxy import XIVProxy -from cinder.volume.drivers.ibm.ibm_storage import xiv_replication -from cinder.volume import group_types - -errors = fake_pyxcli.pyxcli_client.errors -mirroring = fake_pyxcli.pyxcli_client.mirroring - -test_mock = mock.MagicMock() -module_patcher = mock.MagicMock() - -test_mock.cinder.exception = exception - - -TEST_LOG_PREFIX = storage.XIV_LOG_PREFIX -TEST_VOLUME = { - 'name': 'BLA', - 'id': 23, - 'size': 17, - 'group_id': fake.CONSISTENCY_GROUP_ID, -} - -TEST_GROUP_SPECS = { - 'group_replication_enabled': ' True', - 'replication_type': 'sync', -} - -TEST_EXTRA_SPECS = { - 'replication_enabled': ' False', -} -TEST_EXTRA_SPECS_REPL = { - 'replication_enabled': ' True', - 'replication_type': 'sync', -} - -TEST_WWPNS = ["50017380FE020160", "50017380FE020161", "50017380FE020162"] -TEST_INITIATOR = 'c5507606d5680e05' -TEST_CONNECTOR = { - 'ip': '129.123.123.123', - 'initiator': TEST_INITIATOR, - 'wwpns': [TEST_INITIATOR], -} -TEST_TARGET_MAP = {TEST_INITIATOR: TEST_WWPNS} - -TEST_HOST_ID = 11 -TEST_HOST_NAME = 'WTF32' -TEST_CHAP_NAME = 'WTF64' -TEST_CHAP_SECRET = 'V1RGNjRfXw==' - -FC_TARGETS_OPTIMIZED = [ - "50017380FE020160", "50017380FE020190", "50017380FE020192"] -FC_TARGETS_OPTIMIZED_WITH_HOST = [ - "50017380FE020160", "50017380FE020192"] -FC_TARGETS_BEFORE_SORTING = [ - "50017380FE020160", "50017380FE020161", "50017380FE020162", - "50017380FE020190", "50017380FE020191", "50017380FE020192"] -FC_TARGETS_AFTER_SORTING = [ - "50017380FE020190", "50017380FE020160", "50017380FE020191", - "50017380FE020161", "50017380FE020162", "50017380FE020192"] - -FC_PORT_LIST_OUTPUT = [ - {'component_id': '1:FC_Port:4:1', 'port_state': 'Online', 'role': 'Target', - 'wwpn': '50017380FE020160'}, - {'component_id': '1:FC_Port:5:1', 'port_state': 'Link Problem', - 'role': 'Target', 'wwpn': '50017380FE020161'}, - {'component_id': '1:FC_Port:6:1', 'port_state': 'Online', - 'role': 'Initiator', 'wwpn': '50017380FE020162'}, - {'component_id': '1:FC_Port:7:1', 'port_state': 'Link Problem', - 'role': 'Initiator', 'wwpn': '50017380FE020163'}, - {'component_id': '1:FC_Port:8:1', 'port_state': 'Online', 'role': 'Target', - 'wwpn': '50017380FE020190'}, - {'component_id': '1:FC_Port:9:1', 'port_state': 'Link Problem', - 'role': 'Target', 'wwpn': '50017380FE020191'}, - {'component_id': '1:FC_Port:4:1', 'port_state': 'Online', 'role': 'Target', - 'wwpn': '50017380FE020192'}, - {'component_id': '1:FC_Port:5:1', 'port_state': 'Link Problem', - 'role': 'Initiator', 'wwpn': '50017380FE020193'}] - -HOST_CONNECTIVITY_LIST = [ - {'host': 'nova-compute-c5507606d5680e05', 'host_port': '10000000C97D26DB', - 'local_fc_port': '1:FC_Port:4:1', 'local_iscsi_port': '', - 'module': '1:Module:4', 'type': 'FC'}] - -REPLICA_ID = 'WTF32' -REPLICA_IP = '1.2.3.4' -REPLICA_USER = 'WTF64' -REPLICA_PASSWORD = 'WTFWTF' -REPLICA_POOL = 'WTF64' -REPLICA_PARAMS = { - 'san_ip': REPLICA_IP, - 'san_login': REPLICA_USER, - 'san_password': cryptish.encrypt(REPLICA_PASSWORD), - 'san_clustername': REPLICA_POOL -} - - -class XIVProxyTest(test.TestCase): - - """Tests the main Proxy driver""" - - def setUp(self): - """import at setup to ensure module patchers are in place""" - super(XIVProxyTest, self).setUp() - - self.proxy = XIVProxy - self.version = "cinder" - self.proxy.configuration = {} - self.ctxt = context.get_admin_context() - - self.default_storage_info = { - 'user': "WTF32", - 'password': cryptish.encrypt("WTF32"), - 'address': "WTF32", - 'vol_pool': "WTF32", - 'management_ips': "WTF32", - 'system_id': "WTF32" - } - self.proxy.configuration['replication_device'] = { - 'backend_id': REPLICA_ID, - 'san_ip': REPLICA_IP, - 'san_user': REPLICA_USER, - 'san_password': REPLICA_PASSWORD, - } - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage" - ".xiv_proxy.socket.getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - def test_setup_should_fail_if_password_is_not_encrypted(self): - """Passing an unencrypted password should raise an error""" - - storage_info = self.default_storage_info.copy() - - storage_info['password'] = "WTF32" - - p = self.proxy(storage_info, mock.MagicMock(), - test_mock.cinder.exception) - - self.assertRaises(test_mock.cinder.exception.InvalidParameterValue, - p.setup, {}) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy.client." - "XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy.socket." - "getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - def test_setup_should_fail_if_credentials_are_invalid(self, mock_xcli): - """Passing invalid credentials should raise an error""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - mock_xcli.connect_multiendpoint_ssl = mock.MagicMock( - side_effect=errors.CredentialsError( - 'bla', 'bla', ElementTree.Element("bla"))) - - self.assertRaises(test_mock.cinder.exception.NotAuthorized, - p.setup, {}) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.socket.getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - def test_setup_should_fail_if_connection_is_invalid(self, mock_xcli): - """Passing an invalid host to the setup should raise an error""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - mock_xcli.connect_multiendpoint_ssl = mock.MagicMock( - side_effect=errors.ConnectionError( - 'bla', 'bla', ElementTree.Element("bla"))) - - self.assertRaises(test_mock.cinder.exception.HostNotFound, - p.setup, {}) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." - "client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.storage.get_online_iscsi_ports", - mock.MagicMock(return_value=['WTF32'])) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.socket.getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - def test_setup_should_set_iqn_and_portal(self, mock_xcli): - """Test setup - - Setup should retrieve values from xcli - and set the IQN and Portal - """ - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd - item = cmd.config_get.return_value.as_dict.return_value.__getitem__ - item.return_value.value = "BLA" - - p.setup({}) - - self.assertEqual("BLA", p.meta.get('ibm_storage_iqn')) - self.assertEqual("WTF32:3260", p.meta.get('ibm_storage_portal')) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." - "client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.storage.get_online_iscsi_ports", - mock.MagicMock(return_value=['WTF32'])) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.socket.getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target", - mock.MagicMock(return_value="BLABLA")) - def test_setup_should_succeed_if_replica_is_set(self, mock_xcli): - """Test setup - - Setup should succeed if replica is set - """ - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd - item = cmd.config_get.return_value.as_dict.return_value.__getitem__ - item.return_value.value = "BLA" - - SCHEDULE_LIST_RESPONSE = { - '00:01:00': {'interval': 120}, - '00:02:00': {'interval': 300}, - '00:05:00': {'interval': 600}, - '00:10:00': {'interval': 1200}, - } - cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd - cmd.schedule_list.return_value\ - .as_dict.return_value = SCHEDULE_LIST_RESPONSE - - p.setup({}) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." - "client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.storage.get_online_iscsi_ports", - mock.MagicMock(return_value=['WTF32'])) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.socket.getfqdn", new=mock.MagicMock( - return_value='test_hostname')) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target", - mock.MagicMock(return_value="BLABLA")) - def test_setup_should_fail_if_schedule_create_fails(self, mock_xcli): - """Test setup - - Setup should fail if replica is set and schedule_create fails - """ - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd - item = cmd.config_get.return_value.as_dict.return_value.__getitem__ - item.return_value.value = "BLA" - cmd.schedule_list.return_value.as_dict.return_value = {} - cmd.schedule_create.side_effect = ( - errors.XCLIError('bla')) - - self.assertRaises(exception.VolumeBackendAPIException, p.setup, {}) - - def test_create_volume_should_call_xcli(self): - """Create volume should call xcli with the correct parameters""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - volume = testutils.create_volume( - self.ctxt, size=16, display_name='WTF32') - p.create_volume(volume) - - p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( - vol=volume.name, - size_blocks=storage.gigabytes_to_blocks(16), - pool='WTF32') - - def test_create_volume_should_fail_if_no_pool_space(self): - """Test create volume - - Create volume should raise an error - if there's no pool space left - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_create.side_effect = ( - errors.PoolOutOfSpaceError( - 'bla', 'bla', ElementTree.Element('bla'))) - - volume = testutils.create_volume( - self.ctxt, size=16, display_name='WTF32', - volume_type_id='b3fcacb5-fbd8-4394-8c00-06853bc13929') - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_volume, volume) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.VolumeReplication.create_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.GroupReplication.create_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy.get_group_specs_by_group_resource", - mock.MagicMock(return_value=(TEST_GROUP_SPECS, ''))) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target", - mock.MagicMock(return_value="BLABLA")) - def test_enable_replication(self): - """Test enable_replication""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - p.ibm_storage_cli = mock.MagicMock() - p._call_remote_xiv_xcli = mock.MagicMock() - p._update_consistencygroup = mock.MagicMock() - p.targets = {'tgt1': 'info1'} - - group = self._create_test_group('WTF') - vol = testutils.create_volume(self.ctxt) - ret = p.enable_replication(self.ctxt, group, [vol]) - - self.assertEqual(( - {'replication_status': fields.ReplicationStatus.ENABLED}, - [{'id': vol['id'], - 'replication_status': fields.ReplicationStatus.ENABLED}]), ret) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.VolumeReplication.delete_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.group_types.get_group_type_specs", - mock.MagicMock(return_value=TEST_GROUP_SPECS)) - def test_disable_replication(self): - """Test disable_replication""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - p.ibm_storage_cli = mock.MagicMock() - p._call_remote_xiv_xcli = mock.MagicMock() - p._update_consistencygroup = mock.MagicMock() - - group = self._create_test_group('WTF') - ret = p.disable_replication(self.ctxt, group, []) - - self.assertEqual(( - {'replication_status': fields.ReplicationStatus.DISABLED}, []), - ret) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._using_default_backend", - mock.MagicMock(return_value=False)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value={'san_clustername': "master"})) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._init_xcli", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._init_xcli", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy.get_group_specs_by_group_resource", - mock.MagicMock(return_value=(TEST_GROUP_SPECS, ''))) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.GroupReplication.failover", - mock.MagicMock(return_value=(True, 'good'))) - def test_failover_replication_with_default(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - group = self._create_test_group('WTF') - group.replication_status = fields.ReplicationStatus.FAILED_OVER - vol = testutils.create_volume(self.ctxt) - group_update, vol_update = p.failover_replication(self.ctxt, group, - [vol], 'default') - updates = {'status': 'available'} - self.assertEqual(({'replication_status': 'available'}, - [{'volume_id': vol['id'], - 'updates': updates}]), (group_update, vol_update)) - - def test_failover_resource_no_mirror(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - recovery_mgr = mock.MagicMock() - recovery_mgr.is_mirror_active = mock.MagicMock() - recovery_mgr.is_mirror_active.return_value = False - - group = self._create_test_group('WTF') - ret = xiv_replication.Replication(p)._failover_resource( - group, recovery_mgr, mock.MagicMock, 'cg', True) - msg = ("%(rep_type)s %(res)s: no active mirroring and can not " - "failback" % {'rep_type': 'cg', - 'res': group['name']}) - self.assertEqual((False, msg), ret) - - def test_failover_resource_mirror(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - recovery_mgr = mock.MagicMock() - recovery_mgr.is_mirror_active = mock.MagicMock() - recovery_mgr.is_mirror_active.return_value = True - - group = self._create_test_group('WTF') - ret = xiv_replication.Replication(p)._failover_resource( - group, recovery_mgr, mock.MagicMock, 'cg', True) - - self.assertEqual((True, None), ret) - - def test_failover_resource_change_role(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - recovery_mgr = mock.MagicMock() - recovery_mgr.is_mirror_active = mock.MagicMock() - recovery_mgr.is_mirror_active.return_value = True - recovery_mgr.switch_roles.side_effect = ( - errors.XCLIError('')) - failover_rep_mgr = mock.MagicMock() - failover_rep_mgr.change_role = mock.MagicMock() - group = self._create_test_group('WTF') - - xiv_replication.Replication(p)._failover_resource( - group, recovery_mgr, failover_rep_mgr, 'cg', True) - - failover_rep_mgr.change_role.assert_called_once_with( - resource_id=group['name'], - new_role='Slave') - - @mock.patch("cinder.volume.utils.is_group_a_cg_snapshot_type", - mock.MagicMock(return_value=True)) - def test_create_volume_with_consistency_group(self): - """Test Create volume with consistency_group""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p._cg_name_from_volume = mock.MagicMock(return_value="cg") - - vol_type = testutils.create_volume_type(self.ctxt, name='WTF') - volume = testutils.create_volume( - self.ctxt, size=16, volume_type_id=vol_type.id) - - grp = self._create_test_group('WTF') - volume.group = grp - p.create_volume(volume) - - p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( - vol=volume['name'], - size_blocks=storage.gigabytes_to_blocks(16), - pool='WTF32') - p.ibm_storage_cli.cmd.cg_add_vol.assert_called_once_with( - vol=volume['name'], - cg='cg') - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.VolumeReplication.create_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value=None)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_extra_specs", - mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) - def test_create_volume_with_replication(self): - """Test Create volume with replication""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - volume = testutils.create_volume( - self.ctxt, size=16, display_name='WTF32', - volume_type_id='b3fcacb5-fbd8-4394-8c00-06853bc13929') - volume.group = None - p.create_volume(volume) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.VolumeReplication.create_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value=None)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_extra_specs", - mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) - def test_create_volume_with_replication_and_cg(self): - """Test Create volume with replication and CG""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - volume = testutils.create_volume( - self.ctxt, size=16, display_name='WTF32', - volume_type_id='b3fcacb5-fbd8-4394-8c00-06853bc13929') - grp = testutils.create_group(self.ctxt, name='bla', group_type_id='1') - volume.group = grp - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_volume, volume) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value=None)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_extra_specs", - mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) - def test_create_volume_with_replication_multiple_targets(self): - """Test Create volume with replication and multiple targets""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - volume = testutils.create_volume( - self.ctxt, size=16, display_name='WTF32', - volume_type_id='b3fcacb5-fbd8-4394-8c00-06853bc13929') - volume.group = None - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_volume, volume) - - def test_delete_volume_should_pass_the_correct_parameters(self): - """Delete volume should call xcli with the correct parameters""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] - - p.delete_volume({'name': 'WTF32'}) - - p.ibm_storage_cli.cmd.vol_delete.assert_called_once_with(vol='WTF32') - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_replication.VolumeReplication.delete_replication", - mock.MagicMock()) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_extra_specs", - mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) - def test_delete_volume_with_replication(self): - """Test Delete volume with replication""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - volume = {'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} - p.delete_volume(volume) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_extra_specs", - mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - def test_failover_host(self, mock_xcli): - """Test failover_host with valid target""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock_xcli - p.ibm_storage_cli.connect_multiendpoint_ssl.return_value - mock_xcli.connect_multiendpoint_ssl.return_value = mock_xcli - - volume = {'id': 'WTF64', 'size': 16, - 'name': 'WTF32', 'volume_type_id': 'WTF'} - target = REPLICA_ID - p.failover_host({}, [volume], target, []) - - def test_failover_host_invalid_target(self): - """Test failover_host with invalid target""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'id': 'WTF64', 'size': 16, - 'name': 'WTF32', 'volume_type_id': 'WTF'} - target = 'Invalid' - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.failover_host, {}, [volume], target, []) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - def test_failover_host_no_connection_to_target(self, mock_xcli): - """Test failover_host that fails to connect to target""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock_xcli - p.ibm_storage_cli.connect_multiendpoint_ssl.return_value - mock_xcli.connect_multiendpoint_ssl.side_effect = errors.XCLIError('') - - volume = {'id': 'WTF64', 'size': 16, - 'name': 'WTF32', 'volume_type_id': 'WTF'} - target = REPLICA_ID - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.failover_host, {}, [volume], target, []) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.client.XCLIClient") - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_target_params", - mock.MagicMock(return_value=REPLICA_PARAMS)) - def test_failback_host(self, mock_xcli): - """Test failing back after DR""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'id': 'WTF64', 'size': 16, - 'name': 'WTF32', 'volume_type_id': 'WTF'} - target = 'default' - p.failover_host(None, [volume], target, []) - - def qos_test_empty_name_if_no_specs(self): - """Test empty name in case no specs are specified""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - perf_name = p._check_perf_class_on_backend({}) - self.assertEqual('', perf_name) - - def test_qos_class_name_contains_qos_type(self): - """Test backend naming - - Test if the naming convention is correct - when getting the right specs with qos type - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] - perf_name = p._check_perf_class_on_backend({'bw': '100', - 'type': 'independent'}) - - self.assertEqual('cinder-qos_bw_100_type_independent', perf_name) - - def test_qos_called_with_type_parameter(self): - """Test xcli call for qos creation with type""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] - perf_name = p._check_perf_class_on_backend({'bw': '100', - 'type': 'independent'}) - p.ibm_storage_cli.cmd.perf_class_create.assert_called_once_with( - perf_class=perf_name, - type='independent') - - def test_qos_called_with_wrong_type_parameter(self): - """Test xcli call for qos creation with wrong type""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] - p.ibm_storage_cli.cmd.perf_class_create.side_effect = ( - errors.XCLIError('llegal value')) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p._check_perf_class_on_backend, - {'bw': '100', 'type': 'BAD'}) - - def test_qos_class_on_backend_name_correct(self): - """Test backend naming - - Test if the naming convention is correct - when getting the right specs - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] - perf_name = p._check_perf_class_on_backend({'bw': '100'}) - - self.assertEqual('cinder-qos_bw_100', perf_name) - - def test_qos_xcli_exception(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_list.side_effect = ( - errors.XCLIError('')) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p._check_perf_class_on_backend, {'bw': '100'}) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._qos_create_kwargs_for_xcli", - mock.MagicMock(return_value={})) - def test_regex_from_perf_class_name(self): - """Test type extraction from perf_class with Regex""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - perf_class_names_list = [ - {'class_name': 'cinder-qos_iops_1000_type_independent_bw_1000', - 'type': 'independent'}, - {'class_name': 'cinder-qos_iops_1000_bw_1000_type_shared', - 'type': 'shared'}, - {'class_name': 'cinder-qos_type_badtype_bw_1000', - 'type': None}] - - for element in perf_class_names_list: - _type = p._get_type_from_perf_class_name( - perf_class_name=element['class_name']) - self.assertEqual(element['type'], _type) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._qos_create_kwargs_for_xcli", - mock.MagicMock(return_value={})) - def test_create_qos_class_with_type(self): - """Test performance class creation with type""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.perf_class_set_rate.return_value = None - p.ibm_storage_cli.cmd.perf_class_create.return_value = None - - perf_class_name = 'cinder-qos_iops_1000_type_independent_bw_1000' - p_class_name = p._create_qos_class(perf_class_name=perf_class_name, - specs=None) - - p.ibm_storage_cli.cmd.perf_class_create.assert_called_once_with( - perf_class=perf_class_name, - type='independent') - self.assertEqual('cinder-qos_iops_1000_type_independent_bw_1000', - p_class_name) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", - mock.MagicMock(return_value=True)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value='specs')) - def test_qos_specs_exist_if_type_exists(self): - """Test a case where type was found and qos were found""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'name': 'bla', 'volume_type_id': '7'} - specs = p._qos_specs_from_volume(volume) - self.assertEqual('specs', specs) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", - mock.MagicMock(return_value=True)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value=None)) - def test_no_qos_but_type_exists(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'name': 'bla', 'volume_type_id': '7'} - specs = p._qos_specs_from_volume(volume) - self.assertIsNone(specs) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", - mock.MagicMock(return_value=True)) - @mock.patch("cinder.volume.drivers.ibm.ibm_storage." - "xiv_proxy.XIVProxy._get_qos_specs", - mock.MagicMock(return_value=None)) - def test_qos_specs_doesnt_exist_if_no_type(self): - """Test _qos_specs_from_volume - - Test a case where no type was defined - and therefore no specs exist - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'name': 'bla'} - specs = p._qos_specs_from_volume(volume) - self.assertIsNone(specs) - - def test_manage_volume_should_call_xcli(self): - """Manage volume should call xcli with the correct parameters""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ - {'name': 'WTF64', 'size': 34}] - p.manage_volume(volume={'name': 'WTF32'}, - reference={'source-name': 'WTF64'}) - - p.ibm_storage_cli.cmd.vol_list.assert_called_once_with( - vol='WTF64') - - def test_manage_volume_should_return_volume_if_exists(self): - """Manage volume should return with no errors""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ - {'name': 'WTF64', 'size': 34}] - volume = {'name': 'WTF32'} - p.manage_volume(volume=volume, - reference={'source-name': 'WTF64'}) - - self.assertEqual(34, volume['size']) - - def test_manage_volume_should_raise_exception_if_not_exists(self): - """Test manage_volume - - Manage volume should return with exception - if volume does not exist - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [] - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.manage_volume, volume={'name': 'WTF32'}, - reference={'source-name': 'WTF64'}) - - def test_manage_volume_get_size_if_volume_exists(self): - """Manage volume get size should return size""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ - {'name': 'WTF64', 'size': 34}] - volume = {'name': 'WTF32'} - size = p.manage_volume_get_size(volume=volume, - reference={'source-name': 'WTF64'}) - - self.assertEqual(34, size) - - def test_retype_false_if_no_location(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - volume = {'display_name': 'vol'} - new_type = {} - new_type['name'] = "type1" - host = {'capabilities': ''} - diff = {} - ret = p.retype({}, volume, new_type, diff, host) - self.assertFalse(ret) - - def test_retype_false_if_dest_not_xiv_backend(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - host = {'capabilities': {'location_info': "IBM-XIV:host:pool"}} - volume = {'display_name': 'vol', 'host': "origdest_orighost_origpool"} - new_type = {'name': "type1"} - diff = {} - ret = p.retype({}, volume, new_type, diff, host) - self.assertFalse(ret) - - def test_retype_true_if_dest_is_xiv_backend(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.migrate_volume = mock.MagicMock() - p.migrate_volume.return_value = (True, None) - p._qos_specs_from_volume = mock.MagicMock() - p._get_qos_specs = mock.MagicMock() - p._qos_specs_from_volume.return_value = {} - p._get_qos_specs.return_value = {} - - host = {'capabilities': {'location_info': "IBM-XIV:host:pool"}} - volume = {'display_name': 'vol', 'host': "IBM-XIV_host_pool"} - new_type = {'name': "type1"} - diff = {} - ret = p.retype({}, volume, new_type, diff, host) - self.assertTrue(ret) - - def test_manage_volume_get_size_should_raise_exception_if_not_exists(self): - """Test manage_volume - - Manage volume get size should raise exception - if volume does not exist - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [] - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.manage_volume_get_size, - volume={'name': 'WTF32'}, - reference={'source-name': 'WTF64'}) - - def test_initialize_connection(self): - """Test initialize_connection - - Ensure that initialize connection returns, - all the correct connection values - """ - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - p.ibm_storage_iqn = "BLAIQN" - p.ibm_storage_portal = "BLAPORTAL" - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] - host = self._get_test_host() - setattr( - p, '_get_host_and_fc_targets', mock.MagicMock(return_value=( - [], host))) - setattr( - p, '_vol_map_and_get_lun_id', mock.MagicMock(return_value=100)) - p.volume_exists = mock.MagicMock(return_value=True) - - info = p.initialize_connection(TEST_VOLUME, {}) - - self.assertEqual( - p.meta.get('ibm_storage_portal'), - info['data']['target_portal']) - self.assertEqual( - p.meta.get('ibm_storage_iqn'), - info['data']['target_iqn']) - self.assertEqual(100, info['data']['target_lun']) - - def test_initialize_connection_no_initiator(self): - """Initialize connection raises exception on missing initiator""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - connector = TEST_CONNECTOR.copy() - connector['initiator'] = None - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, - connector) - - def test_initialize_connection_bad_iqn(self): - """Initialize connection raises exception on bad formatted IQN""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - connector = TEST_CONNECTOR.copy() - # any string would pass for initiator - connector['initiator'] = 5555 - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, - connector) - - def test_get_fc_targets_returns_optimized_wwpns_list(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT - fc_targets = p._get_fc_targets(None) - six.assertCountEqual(self, FC_TARGETS_OPTIMIZED, fc_targets) - - def test_get_fc_targets_returns_host_optimized_wwpns_list(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) - host = {'name': hostname} - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT - p.ibm_storage_cli.cmd.host_connectivity_list.return_value = ( - HOST_CONNECTIVITY_LIST) - fc_targets = p._get_fc_targets(host) - self.assertEqual(FC_TARGETS_OPTIMIZED_WITH_HOST, fc_targets, - "FC targets are different from the expected") - - def test_define_ports_returns_sorted_wwpns_list(self): - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p._get_connection_type = mock.MagicMock( - return_value=storage.XIV_CONNECTION_TYPE_FC) - p._define_fc = mock.MagicMock(return_value=FC_TARGETS_BEFORE_SORTING) - fc_targets = p._define_ports(self._get_test_host()) - fc_result = list(map(lambda x: x[-1:], fc_targets)) - expected_result = list(map(lambda x: x[-1:], FC_TARGETS_AFTER_SORTING)) - self.assertEqual(expected_result, fc_result, - "FC targets are different from the expected") - - def test_get_host_and_fc_targets_if_host_not_defined(self): - """Test host and FC targets - - Tests that host and fc targets are provided - if the host is not defined - """ - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - p.meta = mock.MagicMock() - p.meta.ibm_storage_iqn = "BLAIQN" - p.meta.ibm_storage_portal = "BLAPORTAL" - p.meta.openstack_version = "cinder-2013.2" - - pool = {'name': "WTF32", 'domain': 'pool_domain_bla'} - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.host_list.return_value.as_list = [] - p.ibm_storage_cli.cmd.host_list_ports.return_value = [] - p.ibm_storage_cli.cmd.pool_list.return_value.as_list = [pool] - p._get_bunch_from_host = mock.MagicMock() - p._get_bunch_from_host.return_value = { - 'name': "nova-compute-%s" % TEST_INITIATOR, - 'initiator': TEST_INITIATOR, - 'id': 123, 'wwpns': 111, 'chap': 'chap', } - - fc_targets, host = getattr(p, '_get_host_and_fc_targets')( - TEST_VOLUME, TEST_CONNECTOR) - - hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) - p.ibm_storage_cli.cmd.host_define.assert_called_once_with( - host=hostname, domain=pool.get('domain')) - p.ibm_storage_cli.cmd.host_add_port.assert_called_once_with( - host=hostname, iscsi_name=TEST_CONNECTOR['initiator']) - - def test_get_lun_id_if_host_already_mapped(self): - """Test lun id - - Tests that a lun is provided if host is already - mapped to other volumes - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - vol_mapping_list = p.ibm_storage_cli.cmd.vol_mapping_list - vol_mapping_list.return_value.as_dict.return_value = {} - lun1 = {'lun': 1} - lun2 = {'lun': 2} - p.ibm_storage_cli.cmd.mapping_list.return_value.as_list = [lun1, lun2] - - host = self._get_test_host() - self.assertEqual( - 3, getattr(p, '_vol_map_and_get_lun_id')( - TEST_VOLUME, TEST_CONNECTOR, host)) - - def test_terminate_connection_should_call_unmap_vol(self): - """Terminate connection should call unmap vol""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p._get_connection_type = mock.MagicMock( - return_value=storage.XIV_CONNECTION_TYPE_FC) - p._get_fc_targets = mock.MagicMock(return_value=TEST_WWPNS) - p.ibm_storage_cli = mock.MagicMock() - vol_mapping_ret = p.ibm_storage_cli.cmd.vol_mapping_list.return_value - vol_mapping_ret.as_dict.return_value.has_keys.return_value = True - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] - - hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) - host = { - 'name': hostname, - 'initiator': TEST_CONNECTOR['initiator'], - 'id': 1 - } - TEST_CONNECTOR['wwpns'] = [TEST_INITIATOR] - - setattr(p, "_get_host", mock.MagicMock(return_value=host)) - - meta = p.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) - - self.assertEqual( - TEST_TARGET_MAP, meta['data']['initiator_target_map']) - - p.ibm_storage_cli.cmd.unmap_vol.assert_called_once_with( - vol=TEST_VOLUME['name'], host=hostname) - - def test_terminate_connection_multiple_connections(self): - # Terminate connection should not return meta if host is still - # connected - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - p.ibm_storage_cli = mock.MagicMock() - vol_dict = p.ibm_storage_cli.cmd.vol_mapping_list.return_value.as_dict - vol_dict.return_value.has_keys.return_value = True - - p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] - - hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) - host = { - 'name': hostname, - 'initiator': TEST_CONNECTOR['initiator'], - 'id': 1 - } - TEST_CONNECTOR['wwpns'] = [TEST_INITIATOR] - - map_dict = p.ibm_storage_cli.cmd.mapping_list.return_value.as_dict - map_dict.return_value.has_keys.return_value = host - - setattr(p, "_get_host", mock.MagicMock(return_value=host)) - - meta = p.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) - - self.assertIsNone(meta) - - p.ibm_storage_cli.cmd.unmap_vol.assert_called_once_with( - vol=TEST_VOLUME['name'], host=hostname) - - def test_attach_deleted_volume_should_fail_with_info_to_log(self): - """Test attach deleted volume should fail with info to log""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - mock_log = mock.MagicMock() - setattr(p, "_log", mock_log) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_mapping_list.side_effect = ( - errors.VolumeBadNameError('bla', 'bla', - ElementTree.Element('Bla'))) - p._define_host_according_to_chap = mock.MagicMock() - p._define_host_according_to_chap.return_value = dict(id=100) - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, - TEST_CONNECTOR) - - def _get_test_host(self): - host = { - 'name': TEST_HOST_NAME, - 'initiator': TEST_INITIATOR, - 'id': TEST_HOST_ID, - 'wwpns': [TEST_INITIATOR], - 'chap': (TEST_CHAP_NAME, TEST_CHAP_SECRET) - } - return host - - def _create_test_group(self, g_name='group', is_cg=True): - extra_specs = {} - if is_cg: - extra_specs['consistent_group_snapshot_enabled'] = ' True' - - group_type = group_types.create(self.ctxt, g_name, extra_specs) - return testutils.create_group(self.ctxt, - host=self._get_test_host()['name'], - group_type_id=group_type.id, - volume_type_ids=[]) - - def _create_test_cgsnapshot(self, group_id): - group_type = group_types.create( - self.ctxt, 'group_snapshot', - {'consistent_group_snapshot_enabled': ' True'}) - return testutils.create_group_snapshot(self.ctxt, group_id=group_id, - group_type_id=group_type.id) - - def test_create_generic_group(self): - """test create generic group""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group(is_cg=False) - - self.assertRaises(NotImplementedError, - p.create_group, {}, group_obj) - - def test_create_consistencygroup(self): - """test a successful cg create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - - model_update = p.create_group({}, group_obj) - - p.ibm_storage_cli.cmd.cg_create.assert_called_once_with( - cg=p._cg_name_from_id(group_obj.id), - pool='WTF32') - - self.assertEqual('available', model_update['status']) - - def test_create_consistencygroup_already_exists(self): - """test create_consistenygroup when cg already exists""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_create.side_effect = errors.CgNameExistsError( - 'bla', 'bla', ElementTree.Element('bla')) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group, {}, self._create_test_group()) - - def test_create_consistencygroup_reached_limit(self): - """test create_consistenygroup when reached maximum CGs""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_create.side_effect = ( - errors.CgLimitReachedError( - 'bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group, {}, self._create_test_group()) - - @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." - "client.XCLIClient") - def test_create_consistencygroup_with_replication(self, mock_xcli): - """test create_consistenygroup when replication is set""" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception) - - p.ibm_storage_cli = mock.MagicMock() - - group_obj = self._create_test_group() - - vol_type = objects.VolumeType(context=self.ctxt, - name='volume_type_rep', - extra_specs=( - {'replication_enabled': ' True', - 'replication_type': 'sync'})) - group_obj.volume_types = objects.VolumeTypeList(context=self.ctxt, - objects=[vol_type]) - - model_update = p.create_group({}, group_obj) - self.assertEqual('available', model_update['status']) - - def test_create_consistencygroup_from_src_cgsnapshot(self): - """test a successful cg create from cgsnapshot""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.create_volume_from_snapshot.return_value = [] - - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - volume = testutils.create_volume(self.ctxt) - snapshot = testutils.create_snapshot(self.ctxt, volume.id) - - model_update, vols_model_update = p.create_group_from_src( - {}, group_obj, [volume], - cgsnap_group_obj, [snapshot], None, None) - - p.ibm_storage_cli.cmd.cg_create.assert_called_once_with( - cg=p._cg_name_from_id(group_obj.id), pool='WTF32') - - self.assertEqual('available', model_update['status']) - - def test_create_consistencygroup_from_src_cg(self): - """test a successful cg create from consistencygroup""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.create_volume_from_snapshot.return_value = [] - - group_obj = self._create_test_group() - src_group_obj = self._create_test_group(g_name='src_group') - - volume = testutils.create_volume(self.ctxt) - src_volume = testutils.create_volume(self.ctxt) - - model_update, vols_model_update = p.create_group_from_src( - {}, group_obj, [volume], - None, None, src_group_obj, [src_volume]) - - p.ibm_storage_cli.cmd.cg_create.assert_called_once_with(cg=group_obj, - pool='WTF32') - - self.assertEqual('available', model_update['status']) - - def test_create_consistencygroup_from_src_fails_cg_create_from_cgsnapshot( - self): - """test cg create from cgsnapshot fails on cg_create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.cg_create.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - volume = testutils.create_volume(self.ctxt) - snapshot = testutils.create_snapshot(self.ctxt, volume.id) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, - group_obj, [volume], cgsnap_group_obj, - [snapshot], None, None) - - def test_create_consistencygroup_from_src_fails_cg_create_from_cg(self): - """test cg create from cg fails on cg_create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.cg_create.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - src_group_obj = self._create_test_group(g_name='src_group') - - volume = testutils.create_volume(self.ctxt) - src_volume = testutils.create_volume(self.ctxt) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, - group_obj, [volume], None, None, - src_group_obj, [src_volume]) - - def test_create_consistencygroup_from_src_fails_vol_create_from_cgsnapshot( - self): - """test cg create from cgsnapshot fails on vol_create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_create.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - volume = testutils.create_volume(self.ctxt) - snapshot = testutils.create_snapshot(self.ctxt, volume.id) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, - group_obj, [volume], cgsnap_group_obj, - [snapshot], None, None) - - def test_create_consistencygroup_from_src_fails_vol_create_from_cg(self): - """test cg create from cg fails on vol_create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_create.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - src_group_obj = self._create_test_group(g_name='src_group') - - volume = testutils.create_volume(self.ctxt) - src_volume = testutils.create_volume(self.ctxt) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, - group_obj, [volume], None, None, - src_group_obj, [src_volume]) - - def test_create_consistencygroup_from_src_fails_vol_copy_from_cgsnapshot( - self): - """test cg create from cgsnapshot fails on vol_copy""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_copy.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - volume = testutils.create_volume(self.ctxt) - snapshot = testutils.create_snapshot(self.ctxt, volume.id) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, group_obj, - [volume], cgsnap_group_obj, [snapshot], - None, None) - - def test_create_consistencygroup_from_src_fails_vol_copy_from_cg(self): - """test cg create from cg fails on vol_copy""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.vol_copy.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - src_group_obj = self._create_test_group(g_name='src_group') - - volume = testutils.create_volume(self.ctxt) - src_volume = testutils.create_volume(self.ctxt) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_from_src, {}, - group_obj, [volume], None, None, - src_group_obj, [src_volume]) - - def test_delete_consistencygroup_with_no_volumes(self): - """test a successful cg delete""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - group_obj = self._create_test_group() - - model_update, volumes = p.delete_group({}, group_obj, []) - - p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( - cg=p._cg_name_from_id(group_obj.id)) - - self.assertEqual('deleted', model_update['status']) - - def test_delete_consistencygroup_not_exists(self): - """test delete_consistenygroup when CG does not exist""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_delete.side_effect = ( - errors.CgDoesNotExistError( - 'bla', 'bla', ElementTree.Element('bla'))) - - group_obj = self._create_test_group() - - model_update, volumes = p.delete_group({}, group_obj, []) - - p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( - cg=p._cg_name_from_id(group_obj.id)) - - self.assertEqual('deleted', model_update['status']) - - def test_delete_consistencygroup_not_exists_2(self): - """test delete_consistenygroup when CG does not exist bad name""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_delete.side_effect = ( - errors.CgBadNameError( - 'bla', 'bla', ElementTree.Element('bla'))) - - group_obj = self._create_test_group() - model_update, volumes = p.delete_group({}, group_obj, []) - - p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( - cg=p._cg_name_from_id(group_obj.id)) - - self.assertEqual('deleted', model_update['status']) - - def test_delete_consistencygroup_not_empty(self): - """test delete_consistenygroup when CG is not empty""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_delete.side_effect = errors.CgNotEmptyError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.delete_group, {}, group_obj, []) - - def test_delete_consistencygroup_is_mirrored(self): - """test delete_consistenygroup when CG is mirroring""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.cg_delete.side_effect = errors.CgHasMirrorError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.delete_group, {}, group_obj, []) - - def test_update_consistencygroup(self): - """test update_consistencygroup""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - group_obj = self._create_test_group() - vol_add = testutils.create_volume(self.ctxt, display_name='WTF32') - vol_remove = testutils.create_volume(self.ctxt, display_name='WTF64') - - model_update, add_model_update, remove_model_update = ( - p.update_group({}, group_obj, [vol_add], [vol_remove])) - - p.ibm_storage_cli.cmd.cg_add_vol.assert_called_once_with( - vol=vol_add['name'], cg=p._cg_name_from_id(group_obj.id)) - p.ibm_storage_cli.cmd.cg_remove_vol.assert_called_once_with( - vol=vol_remove['name']) - self.assertEqual('available', model_update['status']) - - def test_update_consistencygroup_exception_in_add_vol(self): - """test update_consistencygroup with exception in cg_add_vol""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.cg_add_vol.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - vol_add = testutils.create_volume(self.ctxt, display_name='WTF32') - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.update_group, {}, group_obj, [vol_add], []) - - def test_update_consistencygroup_exception_in_remove_vol(self): - """test update_consistencygroup with exception in cg_remove_vol""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.cg_remove_vol.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - group_obj = self._create_test_group() - vol_remove = testutils.create_volume(self.ctxt) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.update_group, {}, - group_obj, [], [vol_remove]) - - def test_update_consistencygroup_remove_non_exist_vol_(self): - """test update_group with exception in cg_remove_vol""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - p.ibm_storage_cli.cmd.cg_remove_vol.side_effect = ( - errors.VolumeNotInConsGroup( - 'bla', 'bla', ElementTree.Element('bla'))) - - group_obj = self._create_test_group() - vol_remove = testutils.create_volume(self.ctxt) - - model_update, add_model_update, remove_model_update = ( - p.update_group({}, group_obj, [], [vol_remove])) - - p.ibm_storage_cli.cmd.cg_remove_vol.assert_called_once_with( - vol=vol_remove['name']) - self.assertEqual('available', model_update['status']) - - def test_create_cgsnapshot(self): - """test a successful cgsnapshot create""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - model_update, snapshots_model_update = ( - p.create_group_snapshot({}, cgsnap_group_obj, [])) - - p.ibm_storage_cli.cmd.cg_snapshots_create.assert_called_once_with( - cg=p._cg_name_from_cgsnapshot(cgsnap_group_obj), - snap_group=p._group_name_from_cgsnapshot_id( - cgsnap_group_obj['id'])) - - self.assertEqual('available', model_update['status']) - - def test_create_cgsnapshot_is_empty(self): - """test create_cgsnapshot when CG is empty""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( - errors.CgEmptyError('bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_create_cgsnapshot_cg_not_exist(self): - """test create_cgsnapshot when CG does not exist""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( - errors.CgDoesNotExistError( - 'bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_create_cgsnapshot_snapshot_limit(self): - """test create_cgsnapshot when reached snapshot limit""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( - errors.PoolSnapshotLimitReachedError( - 'bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.create_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_delete_cgsnapshot(self): - """test a successful cgsnapshot delete""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - model_update, snapshots_model_update = p.delete_group_snapshot( - {}, cgsnap_group_obj, []) - - p.ibm_storage_cli.cmd.snap_group_delete.assert_called_once_with( - snap_group=p._group_name_from_cgsnapshot_id( - cgsnap_group_obj['id'])) - - self.assertEqual('deleted', model_update['status']) - - def test_delete_cgsnapshot_cg_does_not_exist(self): - """test delete_cgsnapshot with bad CG name""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( - errors.CgDoesNotExistError( - 'bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.delete_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_delete_cgsnapshot_no_space_left_for_snapshots(self): - """test delete_cgsnapshot when no space left for snapshots""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( - errors.PoolSnapshotLimitReachedError( - 'bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.delete_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_delete_cgsnapshot_with_empty_consistency_group(self): - """test delete_cgsnapshot with empty consistency group""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - group_obj = self._create_test_group() - cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) - - p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( - errors.CgEmptyError('bla', 'bla', ElementTree.Element('bla'))) - - ex = getattr(p, "_get_exception")() - self.assertRaises(ex, p.delete_group_snapshot, {}, - cgsnap_group_obj, []) - - def test_silent_delete_volume(self): - """test _silent_delete_volume fails silently without exception""" - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - p.ibm_storage_cli = mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_delete.side_effect = errors.XCLIError( - 'bla', 'bla', ElementTree.Element('bla')) - - # check no assertion occurs - p._silent_delete_volume(TEST_VOLUME) - - @mock.patch("cinder.volume.utils.group_get_by_id", mock.MagicMock()) - @mock.patch("cinder.volume.utils.is_group_a_cg_snapshot_type", - mock.MagicMock(return_value=False)) - def test_create_cloned_volume_calls_vol_create_and_copy(self): - """test create_cloned_volume - - check if calls the appropriate xiv_backend functions - are being called - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - vol_src = testutils.create_volume(self.ctxt, display_name='bla', - size=17) - vol_trg = testutils.create_volume(self.ctxt, display_name='bla', - size=17) - - p.ibm_storage_cli = mock.MagicMock() - p._cg_name_from_volume = mock.MagicMock(return_value="cg") - - p.create_cloned_volume(vol_trg, vol_src) - p._create_volume = test_mock.MagicMock() - - p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( - pool='WTF32', - size_blocks=storage.gigabytes_to_blocks(17), - vol=vol_trg['name']) - - p.ibm_storage_cli.cmd.vol_copy.assert_called_once_with( - vol_src=vol_src['name'], - vol_trg=vol_trg['name']) - - @mock.patch("cinder.volume.utils.group_get_by_id", mock.MagicMock()) - @mock.patch("cinder.volume.utils.is_group_a_cg_snapshot_type", - mock.MagicMock(return_value=False)) - def test_handle_created_vol_properties_returns_vol_update(self): - """test handle_created_vol_props - - returns replication enables if replication info is True - """ - driver = mock.MagicMock() - driver.VERSION = "VERSION" - - p = self.proxy( - self.default_storage_info, - mock.MagicMock(), - test_mock.cinder.exception, - driver) - - xiv_replication.VolumeReplication = mock.MagicMock() - grp = testutils.create_group(self.ctxt, name='bla', group_type_id='1') - volume = testutils.create_volume(self.ctxt, display_name='bla') - volume.group = grp - ret_val = p.handle_created_vol_properties({'enabled': True}, volume) - - self.assertEqual(ret_val, {'replication_status': 'enabled'}) diff --git a/cinder/tests/unit/volume/drivers/infortrend/__init__.py b/cinder/tests/unit/volume/drivers/infortrend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py deleted file mode 100644 index 0a8d6b423..000000000 --- a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py +++ /dev/null @@ -1,2306 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import test -from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli - - -class InfortrendCLITestData(object): - - """CLI Test Data.""" - - # Infortrend entry - fake_lv_id = ['5DE94FF775D81C30', '1234567890'] - - fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173', - '987654321', '123456789', - '2667FE351FC505AE', '53F3E98141A2E871'] - - fake_pair_id = ['55D790F8350B036B', '095A184B0ED2DB10'] - - fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB'] - - fake_data_port_ip = ['172.27.0.1', '172.27.0.2', - '172.27.0.3', '172.27.0.4', - '172.27.0.5', '172.27.0.6'] - - fake_model = ['DS S12F-G2852-6'] - - fake_manage_port_ip = ['172.27.0.10'] - - fake_system_id = ['DEEC'] - - fake_host_ip = ['172.27.0.2'] - - fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC'] - - fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC', - '110123D02310DEEC', '120123D02310DEEC'] - - fake_initiator_wwnns = ['2234567890123456', '2234567890543216'] - - fake_initiator_wwpns = ['1234567890123456', '1234567890543216'] - - fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123', - 'iqn.1991-05.com.infortrend:pc456'] - - fake_lun_map = [0, 1, 2] - - # cinder entry - test_provider_location = [( - 'system_id^%s@partition_id^%s') % ( - int(fake_system_id[0], 16), fake_partition_id[0]), - ] - - test_volume = { - 'id': '5aa119a8-d25b-45a7-8d1b-88e127885635', - 'size': 1, - 'name': 'Part-1', - 'host': 'infortrend-server1@backend_1#LV-1', - 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': None, - 'display_description': 'Part-1', - 'volume_type_id': None, - 'provider_location': test_provider_location[0], - 'volume_attachment': [], - } - - test_dst_volume = { - 'id': '6bb119a8-d25b-45a7-8d1b-88e127885666', - 'size': 1, - 'name': 'Part-1-Copy', - 'host': 'infortrend-server1@backend_1', - 'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': None, - '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', - 'display_description': 'Part-1-Copy', - 'volume_type_id': None, - 'provider_location': '', - 'volume_attachment': [], - } - - test_ref_volume = { - 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', - 'size': 1, - } - - test_ref_volume_with_import = { - 'source-name': 'import_into_openstack', - 'size': 1, - } - - test_snapshot = { - 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', - 'volume_id': test_volume['id'], - 'volume_name': test_volume['name'], - 'volume_size': 2, - 'project_id': 'project', - 'display_name': None, - 'display_description': 'SI-1', - 'volume_type_id': None, - 'provider_location': fake_snapshot_id[0], - } - - test_iqn = [( - 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( - int(fake_system_id[0], 16), 1, 0, 1), ( - 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( - int(fake_system_id[0], 16), 1, 0, 1), - ] - - test_iscsi_properties = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_portal': '%s:3260' % fake_data_port_ip[2], - 'target_iqn': test_iqn[0], - 'target_lun': fake_lun_map[0], - 'volume_id': test_volume['id'], - }, - } - - test_iscsi_properties_with_mcs = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_portal': '%s:3260' % fake_data_port_ip[0], - 'target_iqn': test_iqn[1], - 'target_lun': fake_lun_map[2], - 'volume_id': test_volume['id'], - }, - } - - test_iqn_empty_map = [( - 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( - int(fake_system_id[0], 16), 0, 0, 1), - ] - - test_iscsi_properties_empty_map = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_portal': '%s:3260' % fake_data_port_ip[0], - 'target_iqn': test_iqn_empty_map[0], - 'target_lun': fake_lun_map[0], - 'volume_id': test_volume['id'], - }, - } - - test_initiator_target_map = { - fake_initiator_wwpns[0]: fake_target_wwpns[0:2], - fake_initiator_wwpns[1]: fake_target_wwpns[0:2], - } - - test_fc_properties = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': fake_lun_map[0], - 'target_wwn': fake_target_wwpns[0:2], - 'initiator_target_map': test_initiator_target_map, - }, - } - - test_initiator_target_map_specific_channel = { - fake_initiator_wwpns[0]: [fake_target_wwpns[1]], - fake_initiator_wwpns[1]: [fake_target_wwpns[1]], - } - - test_fc_properties_with_specific_channel = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': fake_lun_map[0], - 'target_wwn': [fake_target_wwpns[1]], - 'initiator_target_map': test_initiator_target_map_specific_channel, - }, - } - - test_target_wwpns_map_multipath_r_model = [ - fake_target_wwpns[0], - fake_target_wwpns[2], - fake_target_wwpns[1], - fake_target_wwpns[3], - ] - - test_initiator_target_map_multipath_r_model = { - fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:], - fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:], - } - - test_fc_properties_multipath_r_model = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': fake_lun_map[0], - 'target_wwn': test_target_wwpns_map_multipath_r_model[:], - 'initiator_target_map': - test_initiator_target_map_multipath_r_model, - }, - } - - test_initiator_target_map_zoning = { - fake_initiator_wwpns[0].lower(): - [x.lower() for x in fake_target_wwpns[0:2]], - fake_initiator_wwpns[1].lower(): - [x.lower() for x in fake_target_wwpns[0:2]], - } - - test_fc_properties_zoning = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': fake_lun_map[0], - 'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]], - 'initiator_target_map': test_initiator_target_map_zoning, - }, - } - - test_initiator_target_map_zoning_r_model = { - fake_initiator_wwpns[0].lower(): - [x.lower() for x in fake_target_wwpns[1:3]], - fake_initiator_wwpns[1].lower(): - [x.lower() for x in fake_target_wwpns[1:3]], - } - - test_fc_properties_zoning_r_model = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': fake_lun_map[0], - 'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]], - 'initiator_target_map': test_initiator_target_map_zoning_r_model, - }, - } - - test_fc_terminate_conn_info = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'initiator_target_map': test_initiator_target_map_zoning, - }, - } - - test_connector_iscsi = { - 'ip': fake_host_ip[0], - 'initiator': fake_initiator_iqn[0], - 'host': 'infortrend-server1@backend_1', - } - - test_connector_fc = { - 'wwpns': fake_initiator_wwpns, - 'wwnns': fake_initiator_wwnns, - 'host': 'infortrend-server1@backend_1', - } - - fake_pool = { - 'pool_name': 'LV-2', - 'pool_id': fake_lv_id[1], - 'total_capacity_gb': 1000, - 'free_capacity_gb': 1000, - 'reserved_percentage': 0, - 'QoS_support': False, - 'thin_provisioning_support': False, - } - - test_pools = [{ - 'pool_name': 'LV-1', - 'pool_id': fake_lv_id[0], - 'total_capacity_gb': round(857982.0 / 1024, 2), - 'free_capacity_gb': round(841978.0 / 1024, 2), - 'reserved_percentage': 0, - 'QoS_support': False, - 'max_over_subscription_ratio': 20.0, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'provisioned_capacity_gb': - round((400) / 1024, 2), - 'infortrend_provisioning': 'full', - }] - - test_volume_states = { - 'volume_backend_name': 'infortrend_backend_1', - 'vendor_name': 'Infortrend', - 'driver_version': '99.99', - 'storage_protocol': 'iSCSI', - 'pools': test_pools, - } - - test_host = { - 'host': 'infortrend-server1@backend_1', - 'capabilities': test_volume_states, - } - - test_migrate_volume_states = { - 'volume_backend_name': 'infortrend_backend_1', - 'vendor_name': 'Infortrend', - 'driver_version': '99.99', - 'storage_protocol': 'iSCSI', - 'pool_name': 'LV-1', - 'pool_id': fake_lv_id[1], - 'total_capacity_gb': round(857982.0 / 1024, 2), - 'free_capacity_gb': round(841978.0 / 1024, 2), - 'reserved_percentage': 0, - 'QoS_support': False, - 'infortrend_provisioning': 'full', - } - - test_migrate_host = { - 'host': 'infortrend-server1@backend_1#LV-2', - 'capabilities': test_migrate_volume_states, - } - - test_migrate_volume_states_2 = { - 'volume_backend_name': 'infortrend_backend_1', - 'vendor_name': 'Infortrend', - 'driver_version': '99.99', - 'storage_protocol': 'iSCSI', - 'pool_name': 'LV-1', - 'pool_id': fake_lv_id[1], - 'total_capacity_gb': round(857982.0 / 1024, 2), - 'free_capacity_gb': round(841978.0 / 1024, 2), - 'reserved_percentage': 0, - 'QoS_support': False, - 'infortrend_provisioning': 'full', - } - - test_migrate_host_2 = { - 'host': 'infortrend-server1@backend_1#LV-1', - 'capabilities': test_migrate_volume_states_2, - } - - fake_host = { - 'host': 'infortrend-server1@backend_1', - 'capabilities': {}, - } - - fake_volume_id = [test_volume['id'], test_dst_volume['id']] - - fake_lookup_map = { - '12345678': { - 'initiator_port_wwn_list': - [x.lower() for x in fake_initiator_wwpns], - 'target_port_wwn_list': - [x.lower() for x in fake_target_wwpns[0:2]], - }, - } - - fake_lookup_map_r_model = { - '12345678': { - 'initiator_port_wwn_list': - [x.lower() for x in fake_initiator_wwpns[:]], - 'target_port_wwn_list': - [x.lower() for x in fake_target_wwpns[1:3]], - }, - } - - test_new_type = { - 'name': 'type0', - 'qos_specs_id': None, - 'deleted': False, - 'extra_specs': {'infortrend_provisioning': 'thin'}, - 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', - } - - test_diff = {'extra_specs': {'infortrend_provisioning': ('full', 'thin')}} - - def get_fake_cli_failed(self): - return """ -CLI: Failed -Return: 0x0001 - -CLI: No selected device -Return: 0x000c -""" - - def get_fake_cli_failed_with_network(self): - return """ -CLI: Failed -Return: 0x0001 - -CLI: No network -Return: 0x000b -""" - - def get_fake_cli_succeed(self): - return """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - -CLI: Successful: 0 mapping(s) shown -Return: 0x0000 -""" - - def get_test_show_empty_list(self): - return (0, []) - - def get_test_show_snapshot(self, partition_id=None, snapshot_id=None): - if partition_id and snapshot_id: - return (0, [{ - 'Map': 'No', - 'Partition-ID': partition_id, - 'SI-ID': snapshot_id, - 'Name': '---', - 'Activated-time': 'Thu, Jan 09 01:33:11 2020', - 'Index': '1', - }]) - else: - return (0, [{ - 'Map': 'No', - 'Partition-ID': self.fake_partition_id[0], - 'SI-ID': self.fake_snapshot_id[0], - 'Name': '---', - 'Activated-time': 'Thu, Jan 09 01:33:11 2020', - 'Index': '1', - }, { - 'Map': 'No', - 'Partition-ID': self.fake_partition_id[0], - 'SI-ID': self.fake_snapshot_id[1], - 'Name': '---', - 'Activated-time': 'Thu, Jan 09 01:35:50 2020', - 'Index': '2', - }]) - - def get_fake_show_snapshot(self): - msg = r""" -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 -\/\/\/- -\ -/ -- - -\ -/ -- -\/-\/- Index SI-ID Name Partition-ID Map Activated-time ---------------------------------------------------------------------------------- - 1 %s --- %s No Thu, Jan 09 01:33:11 2020 - 2 %s --- %s No Thu, Jan 09 01:35:50 2020 - -CLI: Successful: 2 snapshot image(s) shown -Return: 0x0000 -""" - return msg % (self.fake_snapshot_id[0], - self.fake_partition_id[0], - self.fake_snapshot_id[1], - self.fake_partition_id[0]) - - def get_test_show_snapshot_detail_filled_block(self): - return (0, [{ - 'Mapped': 'Yes', - 'Created-time': 'Wed, Jun 10 10:57:16 2015', - 'ID': self.fake_snapshot_id[0], - 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', - 'Description': '---', - 'Total-filled-block': '1', - 'LV-ID': self.fake_lv_id[0], - 'Activation-schedule-time': 'Not Actived', - 'Mapping': 'CH:0/ID:0/LUN:1', - 'Index': '1', - 'Used': '0', - 'Name': '---', - 'Valid-filled-block': '0', - 'Partition-ID': self.fake_partition_id[0], - }]) - - def get_test_show_snapshot_detail(self): - return (0, [{ - 'Mapped': 'Yes', - 'Created-time': 'Wed, Jun 10 10:57:16 2015', - 'ID': self.fake_snapshot_id[0], - 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', - 'Description': '---', - 'Total-filled-block': '0', - 'LV-ID': self.fake_lv_id[0], - 'Activation-schedule-time': 'Not Actived', - 'Mapping': 'CH:0/ID:0/LUN:1', - 'Index': '1', - 'Used': '0', - 'Name': '---', - 'Valid-filled-block': '0', - 'Partition-ID': self.fake_partition_id[0], - }]) - - def get_fake_show_snapshot_detail(self): - msg = """ -CLI: Successful: Device(UID:25090, Name:, Model:DS 1016RE) selected. -Return: 0x0000 - - ID: %s - Index: 1 - Name: --- - Partition-ID: %s - LV-ID: %s - Created-time: Wed, Jun 10 10:57:16 2015 - Last-modification-time: Wed, Jun 10 10:57:16 2015 - Activation-schedule-time: Not Actived - Used: 0 - Valid-filled-block: 0 - Total-filled-block: 0 - Description: --- - Mapped: Yes - Mapping: CH:0/ID:0/LUN:1 - -CLI: Successful: 1 snapshot image(s) shown -Return: 0x0000 -""" - return msg % (self.fake_snapshot_id[0], - self.fake_partition_id[0], - self.fake_lv_id[0]) - - def get_test_show_net(self): - return (0, [{ - 'Slot': 'slotA', - 'MAC': '10D02380DEEC', - 'ID': '1', - 'IPv4': self.fake_data_port_ip[0], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': 'slotB', - 'MAC': '10D02390DEEC', - 'ID': '1', - 'IPv4': self.fake_data_port_ip[1], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': 'slotA', - 'MAC': '10D02340DEEC', - 'ID': '2', - 'IPv4': self.fake_data_port_ip[2], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': 'slotB', - 'MAC': '10D02350DEEC', - 'ID': '2', - 'IPv4': self.fake_data_port_ip[3], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': 'slotA', - 'MAC': '10D02310DEEC', - 'ID': '4', - 'IPv4': self.fake_data_port_ip[4], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': 'slotB', - 'MAC': '10D02320DEEC', - 'ID': '4', - 'IPv4': self.fake_data_port_ip[5], - 'Mode': 'Disabled', - 'IPv6': '---', - }, { - 'Slot': '---', - 'MAC': '10D023077124', - 'ID': '32', - 'IPv4': '172.27.1.1', - 'Mode': 'Disabled', - 'IPv6': '---', - }]) - - def get_fake_show_net(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID MAC Mode IPv4 Mode IPv6 Slot ---------------------------------------------------------------- - 1 10D02380DEEC DHCP %s Disabled --- slotA - 1 10D02390DEEC DHCP %s Disabled --- slotB - 2 10D02340DEEC DHCP %s Disabled --- slotA - 2 10D02350DEEC DHCP %s Disabled --- slotB - 4 10D02310DEEC DHCP %s Disabled --- slotA - 4 10D02320DEEC DHCP %s Disabled --- slotB - 32 10D023077124 DHCP 172.27.1.1 Disabled --- --- - -CLI: Successful: 2 record(s) found -Return: 0x0000 -""" - return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1], - self.fake_data_port_ip[2], self.fake_data_port_ip[3], - self.fake_data_port_ip[4], self.fake_data_port_ip[5]) - - def get_test_show_net_detail(self): - return (0, [{ - 'Slot': 'slotA', - 'IPv4-mode': 'DHCP', - 'ID': '1', - 'IPv6-address': '---', - 'Net-mask': '---', - 'IPv4-address': '---', - 'Route': '---', - 'Gateway': '---', - 'IPv6-mode': 'Disabled', - 'MAC': '00D023877124', - 'Prefix-length': '---', - }, { - 'Slot': '---', - 'IPv4-mode': 'DHCP', - 'ID': '32', - 'IPv6-address': '---', - 'Net-mask': '255.255.240.0', - 'IPv4-address': '172.27.112.245', - 'Route': '---', - 'Gateway': '172.27.127.254', - 'IPv6-mode': 'Disabled', - 'MAC': '00D023077124', - 'Prefix-length': '---', - }]) - - def get_fake_show_net_detail(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID: 1 - MAC: 00D023877124 - IPv4-mode: DHCP - IPv4-address: --- - Net-mask: --- - Gateway: --- - IPv6-mode: Disabled - IPv6-address: --- - Prefix-length: --- - Route: --- - Slot: slotA - - ID: 32 - MAC: 00D023077124 - IPv4-mode: DHCP - IPv4-address: 172.27.112.245 - Net-mask: 255.255.240.0 - Gateway: 172.27.127.254 - IPv6-mode: Disabled - IPv6-address: --- - Prefix-length: --- - Route: --- - Slot: --- - -CLI: Successful: 3 record(s) found -Return: 0x0000 -""" - return msg - - def get_test_show_partition(self, volume_id=None, pool_id=None): - result = [{ - 'ID': self.fake_partition_id[0], - 'Used': '200', - 'Name': self.fake_volume_id[0].replace('-', ''), - 'Size': '200', - 'Min-reserve': '200', - 'LV-ID': self.fake_lv_id[0], - }, { - 'ID': self.fake_partition_id[1], - 'Used': '200', - 'Name': self.fake_volume_id[1].replace('-', ''), - 'Size': '200', - 'Min-reserve': '200', - 'LV-ID': self.fake_lv_id[0], - }] - if volume_id and pool_id: - result.append({ - 'ID': self.fake_partition_id[2], - 'Used': '200', - 'Name': volume_id, - 'Size': '200', - 'Min-reserve': '200', - 'LV-ID': pool_id, - }) - return (0, result) - - def get_fake_show_partition(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID Name LV-ID Size Used Min-reserve ---------------------------------------------------- - %s %s %s 200 200 200 - %s %s %s 200 200 200 - -CLI: Successful: 3 partition(s) shown -Return: 0x0000 -""" - return msg % (self.fake_partition_id[0], - self.fake_volume_id[0].replace('-', ''), - self.fake_lv_id[0], - self.fake_partition_id[1], - self.fake_volume_id[1].replace('-', ''), - self.fake_lv_id[0]) - - def get_test_show_partition_detail_for_map( - self, partition_id, mapped='true'): - result = [{ - 'LV-ID': self.fake_lv_id[0], - 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1', - 'Used': '200', - 'Size': '200', - 'ID': partition_id, - 'Progress': '---', - 'Min-reserve': '200', - 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', - 'Valid-filled-block': '100', - 'Name': self.fake_volume_id[0].replace('-', ''), - 'Mapped': mapped, - 'Total-filled-block': '100', - 'Creation-time': 'Wed, Jan 08 20:23:23 2020', - }] - return (0, result) - - def get_test_show_partition_detail(self, volume_id=None, pool_id=None): - result = [{ - 'LV-ID': self.fake_lv_id[0], - 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0', - 'Used': '200', - 'Size': '200', - 'ID': self.fake_partition_id[0], - 'Progress': '---', - 'Min-reserve': '200', - 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', - 'Valid-filled-block': '100', - 'Name': self.fake_volume_id[0].replace('-', ''), - 'Mapped': 'true', - 'Total-filled-block': '100', - 'Creation-time': 'Wed, Jan 08 20:23:23 2020', - }, { - 'LV-ID': self.fake_lv_id[0], - 'Mapping': '---', - 'Used': '200', - 'Size': '200', - 'ID': self.fake_partition_id[1], - 'Progress': '---', - 'Min-reserve': '200', - 'Last-modification-time': 'Sat, Jan 11 22:18:40 2020', - 'Valid-filled-block': '100', - 'Name': self.fake_volume_id[1].replace('-', ''), - 'Mapped': 'false', - 'Total-filled-block': '100', - 'Creation-time': 'Sat, Jan 11 22:18:40 2020', - }] - if volume_id and pool_id: - result.extend([{ - 'LV-ID': pool_id, - 'Mapping': '---', - 'Used': '200', - 'Size': '200', - 'ID': self.fake_partition_id[2], - 'Progress': '---', - 'Min-reserve': '200', - 'Last-modification-time': 'Sat, Jan 15 22:18:40 2020', - 'Valid-filled-block': '100', - 'Name': volume_id, - 'Mapped': 'false', - 'Total-filled-block': '100', - 'Creation-time': 'Sat, Jan 15 22:18:40 2020', - }, { - 'LV-ID': '987654321', - 'Mapping': '---', - 'Used': '200', - 'Size': '200', - 'ID': '123123123123', - 'Progress': '---', - 'Min-reserve': '200', - 'Last-modification-time': 'Sat, Jan 12 22:18:40 2020', - 'Valid-filled-block': '100', - 'Name': volume_id, - 'Mapped': 'false', - 'Total-filled-block': '100', - 'Creation-time': 'Sat, Jan 15 22:18:40 2020', - }]) - return (0, result) - - def get_fake_show_partition_detail(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID: %s - Name: %s - LV-ID: %s - Size: 200 - Used: 200 - Min-reserve: 200 - Creation-time: Wed, Jan 08 20:23:23 2020 - Last-modification-time: Wed, Jan 08 20:23:23 2020 - Valid-filled-block: 100 - Total-filled-block: 100 - Progress: --- - Mapped: true - Mapping: CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0 - - ID: %s - Name: %s - LV-ID: %s - Size: 200 - Used: 200 - Min-reserve: 200 - Creation-time: Sat, Jan 11 22:18:40 2020 - Last-modification-time: Sat, Jan 11 22:18:40 2020 - Valid-filled-block: 100 - Total-filled-block: 100 - Progress: --- - Mapped: false - Mapping: --- - -CLI: Successful: 3 partition(s) shown -Return: 0x0000 -""" - return msg % (self.fake_partition_id[0], - self.fake_volume_id[0].replace('-', ''), - self.fake_lv_id[0], - self.fake_partition_id[1], - self.fake_volume_id[1].replace('-', ''), - self.fake_lv_id[0]) - - def get_test_show_replica_detail_for_migrate( - self, src_part_id, dst_part_id, volume_id, status='Completed'): - result = [{ - 'Pair-ID': self.fake_pair_id[0], - 'Name': 'Cinder-Snapshot', - 'Source-Device': 'DEEC', - 'Source': src_part_id, - 'Source-Type': 'LV-Partition', - 'Source-Name': volume_id, - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'Yes', - 'Target-Device': 'DEEC', - 'Target': dst_part_id, - 'Target-Type': 'LV-Partition', - 'Target-Name': volume_id, - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '033EA1FA4EA193EB', - 'Target-Mapped': 'No', - 'Type': 'Copy', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': status, - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }] - return (0, result) - - def get_test_show_replica_detail_for_si_sync_pair(self): - result = [{ - 'Pair-ID': self.fake_pair_id[0], - 'Name': 'Cinder-Snapshot', - 'Source-Device': 'DEEC', - 'Source': self.fake_snapshot_id[0], - 'Source-Type': 'LV-Partition', - 'Source-Name': '', - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'Yes', - 'Target-Device': 'DEEC', - 'Target': self.fake_partition_id[1], - 'Target-Type': 'LV-Partition', - 'Target-Name': '', - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '033EA1FA4EA193EB', - 'Target-Mapped': 'No', - 'Type': 'Copy', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': 'Copy', - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }] - return (0, result) - - def get_test_show_replica_detail_for_sync_pair(self): - result = [{ - 'Pair-ID': self.fake_pair_id[0], - 'Name': 'Cinder-Snapshot', - 'Source-Device': 'DEEC', - 'Source': self.fake_partition_id[0], - 'Source-Type': 'LV-Partition', - 'Source-Name': self.fake_volume_id[0].replace('-', ''), - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'Yes', - 'Target-Device': 'DEEC', - 'Target': self.fake_partition_id[1], - 'Target-Type': 'LV-Partition', - 'Target-Name': self.fake_volume_id[1].replace('-', ''), - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '033EA1FA4EA193EB', - 'Target-Mapped': 'No', - 'Type': 'Copy', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': 'Copy', - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }] - return (0, result) - - def get_test_show_replica_detail(self): - result = [{ - 'Pair-ID': '4BF246E26966F015', - 'Name': 'Cinder-Snapshot', - 'Source-Device': 'DEEC', - 'Source': self.fake_partition_id[2], - 'Source-Type': 'LV-Partition', - 'Source-Name': 'Part-2', - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'No', - 'Target-Device': 'DEEC', - 'Target': self.fake_partition_id[3], - 'Target-Type': 'LV-Partition', - 'Target-Name': 'Part-1-Copy', - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '714B80F0335F6E52', - 'Target-Mapped': 'No', - 'Type': 'Copy', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': 'Completed', - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }, { - 'Pair-ID': self.fake_pair_id[0], - 'Name': 'Cinder-Migrate', - 'Source-Device': 'DEEC', - 'Source': self.fake_partition_id[0], - 'Source-Type': 'LV-Partition', - 'Source-Name': self.fake_volume_id[0].replace('-', ''), - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'Yes', - 'Target-Device': 'DEEC', - 'Target': self.fake_partition_id[1], - 'Target-Type': 'LV-Partition', - 'Target-Name': self.fake_volume_id[1].replace('-', ''), - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '033EA1FA4EA193EB', - 'Target-Mapped': 'No', - 'Type': 'Mirror', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': 'Mirror', - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }, { - 'Pair-ID': self.fake_pair_id[1], - 'Name': 'Cinder-Migrate', - 'Source-Device': 'DEEC', - 'Source': self.fake_partition_id[4], - 'Source-Type': 'LV-Partition', - 'Source-Name': self.fake_volume_id[0].replace('-', ''), - 'Source-LV': '5DE94FF775D81C30', - 'Source-VS': '2C482316298F7A4E', - 'Source-Mapped': 'No', - 'Target-Device': 'DEEC', - 'Target': self.fake_partition_id[5], - 'Target-Type': 'LV-Partition', - 'Target-Name': self.fake_volume_id[1].replace('-', ''), - 'Target-LV': '5DE94FF775D81C30', - 'Target-VS': '714B80F0335F6E52', - 'Target-Mapped': 'Yes', - 'Type': 'Mirror', - 'Priority': 'Normal', - 'Timeout': '---', - 'Incremental': '---', - 'Compression': '---', - 'Status': 'Mirror', - 'Progress': '---', - 'Created-time': '01/11/2020 22:20 PM', - 'Sync-commence-time': '01/11/2020 22:20 PM', - 'Split-time': '01/11/2020 22:20 PM', - 'Completed-time': '01/11/2020 22:21 PM', - 'Description': '---', - }] - return (0, result) - - def get_fake_show_replica_detail(self): - msg = """ - CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - - Pair-ID: 4BF246E26966F015 - Name: Cinder-Snapshot - Source-Device: DEEC - Source: %s - Source-Type: LV-Partition - Source-Name: Part-2 - Source-LV: 5DE94FF775D81C30 - Source-VS: 2C482316298F7A4E - Source-Mapped: No - Target-Device: DEEC - Target: %s - Target-Type: LV-Partition - Target-Name: Part-1-Copy - Target-LV: 5DE94FF775D81C30 - Target-VS: 714B80F0335F6E52 - Target-Mapped: No - Type: Copy - Priority: Normal - Timeout: --- - Incremental: --- - Compression: --- - Status: Completed - Progress: --- - Created-time: 01/11/2020 22:20 PM - Sync-commence-time: 01/11/2020 22:20 PM - Split-time: 01/11/2020 22:20 PM - Completed-time: 01/11/2020 22:21 PM - Description: --- - - Pair-ID: %s - Name: Cinder-Migrate - Source-Device: DEEC - Source: %s - Source-Type: LV-Partition - Source-Name: %s - Source-LV: 5DE94FF775D81C30 - Source-VS: 2C482316298F7A4E - Source-Mapped: Yes - Target-Device: DEEC - Target: %s - Target-Type: LV-Partition - Target-Name: %s - Target-LV: 5DE94FF775D81C30 - Target-VS: 033EA1FA4EA193EB - Target-Mapped: No - Type: Mirror - Priority: Normal - Timeout: --- - Incremental: --- - Compression: --- - Status: Mirror - Progress: --- - Created-time: 01/11/2020 22:20 PM - Sync-commence-time: 01/11/2020 22:20 PM - Split-time: 01/11/2020 22:20 PM - Completed-time: 01/11/2020 22:21 PM - Description: --- - - Pair-ID: %s - Name: Cinder-Migrate - Source-Device: DEEC - Source: %s - Source-Type: LV-Partition - Source-Name: %s - Source-LV: 5DE94FF775D81C30 - Source-VS: 2C482316298F7A4E - Source-Mapped: No - Target-Device: DEEC - Target: %s - Target-Type: LV-Partition - Target-Name: %s - Target-LV: 5DE94FF775D81C30 - Target-VS: 714B80F0335F6E52 - Target-Mapped: Yes - Type: Mirror - Priority: Normal - Timeout: --- - Incremental: --- - Compression: --- - Status: Mirror - Progress: --- - Created-time: 01/11/2020 22:20 PM - Sync-commence-time: 01/11/2020 22:20 PM - Split-time: 01/11/2020 22:20 PM - Completed-time: 01/11/2020 22:21 PM - Description: --- - -CLI: Successful: 3 replication job(s) shown -Return: 0x0000 -""" - return msg % (self.fake_partition_id[2], - self.fake_partition_id[3], - self.fake_pair_id[0], - self.fake_partition_id[0], - self.fake_volume_id[0].replace('-', ''), - self.fake_partition_id[1], - self.fake_volume_id[1].replace('-', ''), - self.fake_pair_id[1], - self.fake_partition_id[4], - self.fake_volume_id[0].replace('-', ''), - self.fake_partition_id[5], - self.fake_volume_id[1].replace('-', '')) - - def get_test_show_lv(self): - return (0, [{ - 'Name': 'LV-1', - 'LD-amount': '1', - 'Available': '841978 MB', - 'ID': self.fake_lv_id[0], - 'Progress': '---', - 'Size': '857982 MB', - 'Status': 'On-line', - }]) - - def get_fake_show_lv(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID Name LD-amount Size Available Progress Status --------------------------------------------------------------- - %s LV-1 1 857982 MB 841978 MB --- On-line - -CLI: Successful: 1 Logical Volumes(s) shown -Return: 0x0000 -""" - return msg % self.fake_lv_id[0] - - def get_test_show_lv_detail(self): - return (0, [{ - 'Policy': 'Default', - 'Status': 'On-line', - 'ID': self.fake_lv_id[0], - 'Available': '841978 MB', - 'Expandable-size': '0 MB', - 'Name': 'LV-1', - 'Size': '857982 MB', - 'LD-amount': '1', - 'Progress': '---', - }]) - - def get_fake_show_lv_detail(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - ID: %s - Name: LV-1 - LD-amount: 1 - Size: 857982 MB - Available: 841978 MB - Expandable-size: 0 MB - Policy: Default - Progress: --- - Status: On-line - -CLI: Successful: 1 Logical Volumes(s) shown -Return: 0x0000 -""" - return msg % self.fake_lv_id[0] - - def get_test_show_lv_tier_for_migration(self): - return (0, [{ - 'LV-Name': 'TierLV', - 'LV-ID': self.fake_lv_id[1], - 'Tier': '0', - 'Size': '418.93 GB', - 'Used': '10 GB(2.4%)', - 'Data Service': '0 MB(0.0%)', - 'Reserved Ratio': '10.0%', - }, { - 'LV-Name': 'TierLV', - 'LV-ID': self.fake_lv_id[1], - 'Tier': '3', - 'Size': '931.02 GB', - 'Used': '0 MB(0.0%)', - 'Data Service': '0 MB(0.0%)', - 'Reserved Ratio': '0.0%', - }]) - - def get_test_show_lv_tier(self): - return (0, [{ - 'LV-Name': 'TierLV', - 'LV-ID': self.fake_lv_id[0], - 'Tier': '0', - 'Size': '418.93 GB', - 'Used': '10 GB(2.4%)', - 'Data Service': '0 MB(0.0%)', - 'Reserved Ratio': '10.0%', - }, { - 'LV-Name': 'TierLV', - 'LV-ID': self.fake_lv_id[0], - 'Tier': '3', - 'Size': '931.02 GB', - 'Used': '0 MB(0.0%)', - 'Data Service': '0 MB(0.0%)', - 'Reserved Ratio': '0.0%', - }]) - - def get_fake_show_lv_tier(self): - msg = """ -CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - - LV-Name LV-ID Tier Size Used Data Service Reserved Ratio ------------------------------------------------------------------------------- - TierLV %s 0 418.93 GB 10 GB(2.4%%) 0 MB(0.0%%) 10.0%% - TierLV %s 3 931.02 GB 0 MB(0.0%%) 0 MB(0.0%%) 0.0%% - -CLI: Successful: 2 lv tiering(s) shown -Return: 0x0000 -""" - return msg % (self.fake_lv_id[0], - self.fake_lv_id[0]) - - def get_test_show_device(self): - return (0, [{ - 'ID': self.fake_system_id[0], - 'Connected-IP': self.fake_manage_port_ip[0], - 'Name': '---', - 'Index': '0*', - 'JBOD-ID': 'N/A', - 'Capacity': '1.22 TB', - 'Model': self.fake_model[0], - 'Service-ID': '8445676', - }]) - - def get_fake_show_device(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - Index ID Model Name Connected-IP JBOD-ID Capacity Service-ID ------------------------------------------------------------------------- - 0* %s %s --- %s N/A 1.22 TB 8445676 - -CLI: Successful: 1 device(s) found -Return: 0x0000 -""" - return msg % (self.fake_system_id[0], - self.fake_model[0], - self.fake_manage_port_ip[0]) - - def get_test_show_channel_single(self): - return (0, [{ - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '0', - 'MCS': 'N/A', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '1', - 'MCS': '0', - 'curClock': '---', - }]) - - def get_test_show_channel_with_mcs(self): - return (0, [{ - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '0', - 'MCS': 'N/A', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '1', - 'MCS': '1', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '2', - 'MCS': '1', - 'curClock': '---', - }, { - 'ID': '---', - 'defClock': '6.0 Gbps', - 'Type': 'SAS', - 'Mode': 'Drive', - 'Width': 'SAS', - 'Ch': '3', - 'MCS': 'N/A', - 'curClock': '6.0 Gbps', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '4', - 'MCS': '2', - 'curClock': '---', - }, { - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '5', - 'MCS': 'N/A', - 'curClock': '---', - }]) - - def get_test_show_channel_without_mcs(self): - return (0, [{ - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '0', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '1', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '2', - 'curClock': '---', - }, { - 'ID': '---', - 'defClock': '6.0 Gbps', - 'Type': 'SAS', - 'Mode': 'Drive', - 'Width': 'SAS', - 'Ch': '3', - 'curClock': '6.0 Gbps', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '4', - 'curClock': '---', - }, { - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '5', - 'curClock': '---', - }]) - - def get_test_show_channel_with_diff_target_id(self): - return (0, [{ - 'ID': '32', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '0', - 'MCS': 'N/A', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '1', - 'MCS': '0', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '2', - 'MCS': '1', - 'curClock': '---', - }, { - 'ID': '---', - 'defClock': '6.0 Gbps', - 'Type': 'SAS', - 'Mode': 'Drive', - 'Width': 'SAS', - 'Ch': '3', - 'MCS': 'N/A', - 'curClock': '6.0 Gbps', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '4', - 'MCS': '2', - 'curClock': '---', - }, { - 'ID': '48', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '5', - 'MCS': 'N/A', - 'curClock': '---', - }]) - - def get_test_show_channel(self): - return (0, [{ - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '0', - 'MCS': 'N/A', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '1', - 'MCS': '0', - 'curClock': '---', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '2', - 'MCS': '1', - 'curClock': '---', - }, { - 'ID': '---', - 'defClock': '6.0 Gbps', - 'Type': 'SAS', - 'Mode': 'Drive', - 'Width': 'SAS', - 'Ch': '3', - 'MCS': 'N/A', - 'curClock': '6.0 Gbps', - }, { - 'ID': '0', - 'defClock': 'Auto', - 'Type': 'NETWORK', - 'Mode': 'Host', - 'Width': 'iSCSI', - 'Ch': '4', - 'MCS': '2', - 'curClock': '---', - }, { - 'ID': '112', - 'defClock': 'Auto', - 'Type': 'FIBRE', - 'Mode': 'Host', - 'Width': '---', - 'Ch': '5', - 'MCS': 'N/A', - 'curClock': '---', - }]) - - def get_fake_show_channel(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - Ch Mode Type defClock curClock Width ID MCS ---------------------------------------------------------- - 0 Host FIBRE Auto --- --- 112 N/A - 1 Host NETWORK Auto --- iSCSI 0 0 - 2 Host NETWORK Auto --- iSCSI 0 1 - 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- N/A - 4 Host NETWORK Auto --- iSCSI 0 2 - 5 Host FIBRE Auto --- --- 112 N/A - -CLI: Successful: : 6 channel(s) shown -Return: 0x0000 -""" - return msg - - def get_test_show_channel_r_model_diff_target_id(self): - return (0, [{ - 'Mode': 'Host', - 'AID': '32', - 'defClock': 'Auto', - 'MCS': 'N/A', - 'Ch': '0', - 'BID': '33', - 'curClock': '---', - 'Width': '---', - 'Type': 'FIBRE', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '0', - 'Ch': '1', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '1', - 'Ch': '2', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Drive', - 'AID': '---', - 'defClock': '6.0 Gbps', - 'MCS': 'N/A', - 'Ch': '3', - 'BID': '---', - 'curClock': '6.0 Gbps', - 'Width': 'SAS', - 'Type': 'SAS', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '2', - 'Ch': '4', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Host', - 'AID': '48', - 'defClock': 'Auto', - 'MCS': 'N/A', - 'Ch': '5', - 'BID': '49', - 'curClock': '---', - 'Width': '---', - 'Type': 'FIBRE', - }]) - - def get_test_show_channel_r_model(self): - return (0, [{ - 'Mode': 'Host', - 'AID': '112', - 'defClock': 'Auto', - 'MCS': 'N/A', - 'Ch': '0', - 'BID': '113', - 'curClock': '---', - 'Width': '---', - 'Type': 'FIBRE', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '0', - 'Ch': '1', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '1', - 'Ch': '2', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Drive', - 'AID': '---', - 'defClock': '6.0 Gbps', - 'MCS': 'N/A', - 'Ch': '3', - 'BID': '---', - 'curClock': '6.0 Gbps', - 'Width': 'SAS', - 'Type': 'SAS', - }, { - 'Mode': 'Host', - 'AID': '0', - 'defClock': 'Auto', - 'MCS': '2', - 'Ch': '4', - 'BID': '1', - 'curClock': '---', - 'Width': 'iSCSI', - 'Type': 'NETWORK', - }, { - 'Mode': 'Host', - 'AID': '112', - 'defClock': 'Auto', - 'MCS': 'N/A', - 'Ch': '5', - 'BID': '113', - 'curClock': '---', - 'Width': '---', - 'Type': 'FIBRE', - }]) - - def get_fake_show_channel_r_model(self): - msg = """ -CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - - Ch Mode Type defClock curClock Width AID BID MCS ----------------------------------------------------------------- - 0 Host FIBRE Auto --- --- 112 113 N/A - 1 Host NETWORK Auto --- iSCSI 0 1 0 - 2 Host NETWORK Auto --- iSCSI 0 1 1 - 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- --- N/A - 4 Host NETWORK Auto --- iSCSI 0 1 2 - 5 Host FIBRE Auto --- --- 112 113 N/A - -CLI: Successful: : 9 channel(s) shown -Return: 0x0000 -""" - return msg - - def get_show_map_with_lun_map_on_zoning(self): - return (0, [{ - 'Ch': '0', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': self.fake_initiator_wwpns[0], - 'Target': '112', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }]) - - def get_test_show_map(self, partition_id=None, channel_id=None): - if partition_id and channel_id: - return (0, [{ - 'Ch': channel_id, - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': partition_id, - }, { - 'Ch': channel_id, - 'LUN': '1', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': partition_id, - }]) - else: - return (0, [{ - 'Ch': '1', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }, { - 'Ch': '1', - 'LUN': '1', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }, { - 'Ch': '4', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }]) - - def get_test_show_map_multimap(self): - return (0, [{ - 'Ch': '1', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }, { - 'Ch': '1', - 'LUN': '1', - 'Media': 'PART', - 'Host-ID': '---', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }, { - 'Ch': '4', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '210000E08B0AADE1', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }, { - 'Ch': '4', - 'LUN': '0', - 'Media': 'PART', - 'Host-ID': '210000E08B0AADE2', - 'Target': '0', - 'Name': 'Part-1', - 'ID': self.fake_partition_id[0], - }]) - - def get_fake_show_map(self): - msg = """ -CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected. -Return: 0x0000 - - Ch Target LUN Media Name ID Host-ID ------------------------------------------------------------ - 1 0 0 PART Part-1 %s --- - 1 0 1 PART Part-1 %s --- - 4 0 0 PART Part-1 %s --- - -CLI: Successful: 3 mapping(s) shown -Return: 0x0000 -""" - return msg % (self.fake_partition_id[0], - self.fake_partition_id[0], - self.fake_partition_id[0]) - - def get_test_show_license(self): - return (0, { - 'Local Volume Copy': { - 'Support': False, - 'Amount': '8/256', - }, - 'Synchronous Remote Mirror': { - 'Support': False, - 'Amount': '8/256', - }, - 'Snapshot': { - 'Support': False, - 'Amount': '1024/16384', - }, - 'Self-Encryption Drives': { - 'Support': False, - 'Amount': '---', - }, - 'Compression': { - 'Support': False, - 'Amount': '---', - }, - 'Local volume Mirror': { - 'Support': False, - 'Amount': '8/256', - }, - 'Storage Tiering': { - 'Support': False, - 'Amount': '---', - }, - 'Asynchronous Remote Mirror': { - 'Support': False, - 'Amount': '8/256', - }, - 'Scale-out': { - 'Support': False, - 'Amount': 'Not Support', - }, - 'Thin Provisioning': { - 'Support': False, - 'Amount': '---', - }, - 'Max JBOD': { - 'Support': False, - 'Amount': '15', - }, - 'EonPath': { - 'Support': False, - 'Amount': '---', - } - }) - - def get_fake_show_license(self): - msg = """ -CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - - License Amount(Partition/Subsystem) Expired ------------------------------------------------------------------- - EonPath --- Expired - Scale-out Not Support --- - Snapshot 1024/16384 Expired - Local Volume Copy 8/256 Expired - Local volume Mirror 8/256 Expired - Synchronous Remote Mirror 8/256 Expired - Asynchronous Remote Mirror 8/256 Expired - Compression --- Expired - Thin Provisioning --- Expired - Storage Tiering --- Expired - Max JBOD 15 Expired - Self-Encryption Drives --- Expired - -CLI: Successful -Return: 0x0000 -""" - return msg - - def get_test_show_wwn_with_g_model(self): - return (0, [{ - 'ID': 'ID:112', - 'WWPN': self.fake_target_wwpns[0], - 'CH': '0', - 'WWNN': self.fake_target_wwnns[0], - }, { - 'ID': 'ID:112', - 'WWPN': self.fake_target_wwpns[1], - 'CH': '5', - 'WWNN': self.fake_target_wwnns[0], - }]) - - def get_test_show_wwn_with_diff_target_id(self): - return (0, [{ - 'ID': 'AID:32', - 'WWPN': self.fake_target_wwpns[0], - 'CH': '0', - 'WWNN': self.fake_target_wwnns[0], - }, { - 'ID': 'BID:33', - 'WWPN': self.fake_target_wwpns[2], - 'CH': '0', - 'WWNN': self.fake_target_wwnns[1], - }, { - 'ID': 'AID:48', - 'WWPN': self.fake_target_wwpns[1], - 'CH': '5', - 'WWNN': self.fake_target_wwnns[0], - }, { - 'ID': 'BID:49', - 'WWPN': self.fake_target_wwpns[3], - 'CH': '5', - 'WWNN': self.fake_target_wwnns[1], - }]) - - def get_test_show_wwn(self): - return (0, [{ - 'ID': 'AID:112', - 'WWPN': self.fake_target_wwpns[0], - 'CH': '0', - 'WWNN': self.fake_target_wwnns[0], - }, { - 'ID': 'BID:113', - 'WWPN': self.fake_target_wwpns[2], - 'CH': '0', - 'WWNN': self.fake_target_wwnns[1], - }, { - 'ID': 'AID:112', - 'WWPN': self.fake_target_wwpns[1], - 'CH': '5', - 'WWNN': self.fake_target_wwnns[0], - }, { - 'ID': 'BID:113', - 'WWPN': self.fake_target_wwpns[3], - 'CH': '5', - 'WWNN': self.fake_target_wwnns[1], - }]) - - def get_fake_show_wwn(self): - msg = """ -CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - -WWN entries in controller for host channels: - CH ID WWNN WWPN -------------------------------------------------- - 0 AID:112 %s %s - 0 BID:113 %s %s - 5 AID:112 %s %s - 5 BID:113 %s %s - -CLI: Successful -Return: 0x0000 -""" - return msg % (self.fake_target_wwnns[0], self.fake_target_wwpns[0], - self.fake_target_wwnns[1], self.fake_target_wwpns[2], - self.fake_target_wwnns[0], self.fake_target_wwpns[1], - self.fake_target_wwnns[1], self.fake_target_wwpns[3]) - - def get_test_show_iqn(self): - return (0, [{ - 'Name': self.fake_initiator_iqn[0][-16:], - 'IQN': self.fake_initiator_iqn[0], - 'User': '---', - 'Password': '---', - 'Target': '---', - 'Target-Password': '---', - 'IP': '0.0.0.0', - 'Mask': '0.0.0.0', - }]) - - def get_fake_show_iqn(self): - msg = """ -CLI: Successful: Device(UID:deec, Name:, Model:DS S16F-R2852-6) selected. -Return: 0x0000 - -Detected host IQN: - IQN ----------------------------------------- - %s - - -List of initiator IQN(s): --------------------------- - Name: %s - IQN: %s - User: --- - Password: --- - Target: --- - Target-Password: --- - IP: 0.0.0.0 - Mask: 0.0.0.0 - -CLI: Successful: 1 initiator iqn(s) shown -Return: 0x0000 -""" - return msg % (self.fake_initiator_iqn[0], - self.fake_initiator_iqn[0][-16:], - self.fake_initiator_iqn[0]) - - def get_fake_discovery(self, target_iqns, target_portals): - template = '%s,1 %s' - - if len(target_iqns) == 1: - result = template % (target_portals[0], target_iqns[0]) - return (0, result) - - result = [] - for i in range(len(target_iqns)): - result.append(template % ( - target_portals[i], target_iqns[i])) - return (0, '\n'.join(result)) - - -class InfortrendCLITestCase(test.TestCase): - - CommandList = ['CreateLD', 'CreateLV', - 'CreatePartition', 'DeletePartition', 'SetPartition', - 'CreateMap', 'DeleteMap', - 'CreateSnapshot', 'DeleteSnapshot', - 'CreateReplica', 'DeleteReplica', - 'CreateIQN', 'DeleteIQN', - 'ShowLD', 'ShowLV', - 'ShowPartition', 'ShowSnapshot', - 'ShowDevice', 'ShowChannel', - 'ShowDisk', 'ShowMap', - 'ShowNet', 'ShowLicense', - 'ShowWWN', 'ShowReplica', - 'ShowIQN'] - - def __init__(self, *args, **kwargs): - super(InfortrendCLITestCase, self).__init__(*args, **kwargs) - self.cli_data = InfortrendCLITestData() - - def _cli_set(self, cli, fake_result): - cli_conf = { - 'path': '', - 'password': '', - 'ip': '', - 'cli_retry_time': 1, - } - cli = cli(cli_conf) - - cli._execute = mock.Mock(return_value=fake_result) - - return cli - - def _cli_multi_set(self, cli, fake_result_list): - cli_conf = { - 'path': '', - 'password': '', - 'ip': '', - 'cli_retry_time': 5, - } - cli = cli(cli_conf) - - cli._execute = mock.Mock(side_effect=fake_result_list) - - return cli - - def _test_command_succeed(self, command): - - fake_cli_succeed = self.cli_data.get_fake_cli_succeed() - test_command = self._cli_set(command, fake_cli_succeed) - - rc, out = test_command.execute() - self.assertEqual(0, rc) - - def _test_command_failed(self, command): - - fake_cli_failed = self.cli_data.get_fake_cli_failed() - test_command = self._cli_set(command, fake_cli_failed) - - rc, out = test_command.execute() - self.assertEqual(int('0x000c', 16), rc) - - def _test_command_failed_retry_succeed(self, log_error, command): - - log_error.reset_mock() - - LOG_ERROR_STR = ( - 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' - ) - - fake_result_list = [ - self.cli_data.get_fake_cli_failed(), - self.cli_data.get_fake_cli_failed_with_network(), - self.cli_data.get_fake_cli_succeed(), - ] - test_command = self._cli_multi_set(command, fake_result_list) - - rc, out = test_command.execute() - self.assertEqual(0, rc) - - expect_log_error = [ - mock.call(LOG_ERROR_STR, { - 'retry': 1, - 'method': test_command.__class__.__name__, - 'rc': int('0x000c', 16), - 'reason': 'No selected device', - }), - mock.call(LOG_ERROR_STR, { - 'retry': 2, - 'method': test_command.__class__.__name__, - 'rc': int('0x000b', 16), - 'reason': 'No network', - }) - ] - log_error.assert_has_calls(expect_log_error) - - def _test_command_failed_retry_timeout(self, log_error, command): - - log_error.reset_mock() - - LOG_ERROR_STR = ( - 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' - ) - - fake_result_list = [ - self.cli_data.get_fake_cli_failed(), - self.cli_data.get_fake_cli_failed_with_network(), - self.cli_data.get_fake_cli_failed_with_network(), - self.cli_data.get_fake_cli_failed(), - self.cli_data.get_fake_cli_failed_with_network(), - ] - test_command = self._cli_multi_set(command, fake_result_list) - - rc, out = test_command.execute() - self.assertEqual(int('0x000b', 16), rc) - self.assertEqual('No network', out) - - expect_log_error = [ - mock.call(LOG_ERROR_STR, { - 'retry': 1, - 'method': test_command.__class__.__name__, - 'rc': int('0x000c', 16), - 'reason': 'No selected device', - }), - mock.call(LOG_ERROR_STR, { - 'retry': 2, - 'method': test_command.__class__.__name__, - 'rc': int('0x000b', 16), - 'reason': 'No network', - }), - mock.call(LOG_ERROR_STR, { - 'retry': 3, - 'method': test_command.__class__.__name__, - 'rc': int('0x000b', 16), - 'reason': 'No network', - }), - mock.call(LOG_ERROR_STR, { - 'retry': 4, - 'method': test_command.__class__.__name__, - 'rc': int('0x000c', 16), - 'reason': 'No selected device', - }), - mock.call(LOG_ERROR_STR, { - 'retry': 5, - 'method': test_command.__class__.__name__, - 'rc': int('0x000b', 16), - 'reason': 'No network', - }) - ] - log_error.assert_has_calls(expect_log_error) - - def _test_show_command(self, fake_data, test_data, command, *params): - - test_command = self._cli_set(command, fake_data) - - rc, out = test_command.execute(*params) - - self.assertEqual(test_data[0], rc) - - if isinstance(out, list): - for i in range(len(test_data[1])): - self.assertDictEqual(test_data[1][i], out[i]) - else: - self.assertDictEqual(test_data[1], out) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_cli_all_command_execute(self): - - for command in self.CommandList: - self._test_command_succeed(getattr(cli, command)) - self._test_command_failed(getattr(cli, command)) - - @mock.patch.object(cli.LOG, 'error') - def test_cli_all_command_execute_retry_succeed(self, log_error): - - for command in self.CommandList: - self._test_command_failed_retry_succeed( - log_error, getattr(cli, command)) - - @mock.patch.object(cli.LOG, 'error') - def test_cli_all_command_execute_retry_timeout(self, log_error): - - for command in self.CommandList: - self._test_command_failed_retry_timeout( - log_error, getattr(cli, command)) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_snapshot(self): - self._test_show_command( - self.cli_data.get_fake_show_snapshot(), - self.cli_data.get_test_show_snapshot(), - cli.ShowSnapshot) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_snapshot_detail(self): - self._test_show_command( - self.cli_data.get_fake_show_snapshot_detail(), - self.cli_data.get_test_show_snapshot_detail(), - cli.ShowSnapshot, '-l') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_net(self): - self._test_show_command( - self.cli_data.get_fake_show_net(), - self.cli_data.get_test_show_net(), - cli.ShowNet) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_detail_net(self): - self._test_show_command( - self.cli_data.get_fake_show_net_detail(), - self.cli_data.get_test_show_net_detail(), - cli.ShowNet, '-l') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_partition(self): - self._test_show_command( - self.cli_data.get_fake_show_partition(), - self.cli_data.get_test_show_partition(), - cli.ShowPartition) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_partition_detail(self): - self._test_show_command( - self.cli_data.get_fake_show_partition_detail(), - self.cli_data.get_test_show_partition_detail(), - cli.ShowPartition, '-l') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_lv(self): - self._test_show_command( - self.cli_data.get_fake_show_lv(), - self.cli_data.get_test_show_lv(), - cli.ShowLV) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_lv_detail(self): - self._test_show_command( - self.cli_data.get_fake_show_lv_detail(), - self.cli_data.get_test_show_lv_detail(), - cli.ShowLV, '-l') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_lv_tier(self): - self._test_show_command( - self.cli_data.get_fake_show_lv_tier(), - self.cli_data.get_test_show_lv_tier(), - cli.ShowLV, 'tier') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_device(self): - self._test_show_command( - self.cli_data.get_fake_show_device(), - self.cli_data.get_test_show_device(), - cli.ShowDevice) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_channel(self): - self._test_show_command( - self.cli_data.get_fake_show_channel(), - self.cli_data.get_test_show_channel(), - cli.ShowChannel) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_channel_with_r_model(self): - self._test_show_command( - self.cli_data.get_fake_show_channel_r_model(), - self.cli_data.get_test_show_channel_r_model(), - cli.ShowChannel) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_map(self): - self._test_show_command( - self.cli_data.get_fake_show_map(), - self.cli_data.get_test_show_map(), - cli.ShowMap) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_license(self): - self._test_show_command( - self.cli_data.get_fake_show_license(), - self.cli_data.get_test_show_license(), - cli.ShowLicense) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_replica_detail(self): - self._test_show_command( - self.cli_data.get_fake_show_replica_detail(), - self.cli_data.get_test_show_replica_detail(), - cli.ShowReplica, '-l') - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_wwn(self): - self._test_show_command( - self.cli_data.get_fake_show_wwn(), - self.cli_data.get_test_show_wwn(), - cli.ShowWWN) - - @mock.patch.object(cli.LOG, 'debug', mock.Mock()) - def test_show_iqn(self): - self._test_show_command( - self.cli_data.get_fake_show_iqn(), - self.cli_data.get_test_show_iqn(), - cli.ShowIQN) diff --git a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py b/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py deleted file mode 100644 index c741a0560..000000000 --- a/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py +++ /dev/null @@ -1,2150 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit import utils -from cinder.tests.unit.volume.drivers.infortrend import test_infortrend_cli -from cinder.volume import configuration -from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli - -SUCCEED = (0, '') -FAKE_ERROR_RETURN = (-1, '') - - -class InfortrendTestCass(test.TestCase): - - def __init__(self, *args, **kwargs): - super(InfortrendTestCass, self).__init__(*args, **kwargs) - - def setUp(self): - super(InfortrendTestCass, self).setUp() - self.cli_data = test_infortrend_cli.InfortrendCLITestData() - - self.configuration = configuration.Configuration( - [], config_group=configuration.SHARED_CONF_GROUP) - self.configuration.append_config_values = mock.Mock(return_value=0) - self.configuration.safe_get = self._fake_safe_get - - def _fake_safe_get(self, key): - return getattr(self.configuration, key) - - def _driver_setup(self, mock_commands, configuration=None): - if configuration is None: - configuration = self.configuration - self.driver = self._get_driver(configuration) - - mock_commands_execute = self._mock_command_execute(mock_commands) - mock_cli = mock.Mock(side_effect=mock_commands_execute) - - self.driver._execute_command = mock_cli - - def _get_driver(self, conf): - raise NotImplementedError - - def _mock_command_execute(self, mock_commands): - def fake_execute_command(cli_type, *args, **kwargs): - if cli_type in mock_commands.keys(): - if isinstance(mock_commands[cli_type], list): - ret = mock_commands[cli_type][0] - del mock_commands[cli_type][0] - return ret - elif isinstance(mock_commands[cli_type], tuple): - return mock_commands[cli_type] - else: - return mock_commands[cli_type](*args, **kwargs) - return FAKE_ERROR_RETURN - return fake_execute_command - - def _mock_show_lv_for_migrate(self, *args, **kwargs): - if 'tier' in args: - return self.cli_data.get_test_show_lv_tier_for_migration() - return self.cli_data.get_test_show_lv() - - def _mock_show_lv(self, *args, **kwargs): - if 'tier' in args: - return self.cli_data.get_test_show_lv_tier() - return self.cli_data.get_test_show_lv() - - def _assert_cli_has_calls(self, expect_cli_cmd): - self.driver._execute_command.assert_has_calls(expect_cli_cmd) - - -class InfortrendFCCommonTestCase(InfortrendTestCass): - - def __init__(self, *args, **kwargs): - super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(InfortrendFCCommonTestCase, self).setUp() - - self.configuration.volume_backend_name = 'infortrend_backend_1' - self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] - self.configuration.san_password = '111111' - self.configuration.infortrend_provisioning = 'full' - self.configuration.infortrend_tiering = '0' - self.configuration.infortrend_pools_name = 'LV-1, LV-2' - self.configuration.infortrend_slots_a_channels_id = '0,5' - self.configuration.infortrend_slots_b_channels_id = '0,5' - self.configuration.infortrend_cli_timeout = 30 - - def _get_driver(self, conf): - return common_cli.InfortrendCommon('FC', configuration=conf) - - def test_normal_channel(self): - - test_map_dict = { - 'slot_a': {'0': [], '5': []}, - 'slot_b': {}, - } - test_target_dict = { - 'slot_a': {'0': '112', '5': '112'}, - 'slot_b': {}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - } - self._driver_setup(mock_commands) - - self.driver._init_map_info(True) - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - def test_normal_channel_with_r_model(self): - - test_map_dict = { - 'slot_a': {'0': [], '5': []}, - 'slot_b': {'0': [], '5': []}, - } - test_target_dict = { - 'slot_a': {'0': '112', '5': '112'}, - 'slot_b': {'0': '113', '5': '113'}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - } - self._driver_setup(mock_commands) - - self.driver._init_map_info(True) - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), - } - self._driver_setup(mock_commands) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual(self.cli_data.test_fc_properties, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_specific_channel(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - configuration = copy.copy(self.configuration) - configuration.infortrend_slots_a_channels_id = '5' - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), - } - self._driver_setup(mock_commands, configuration) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual( - self.cli_data.test_fc_properties_with_specific_channel, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_diff_target_id(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - test_initiator_wwpns = test_connector['wwpns'] - test_partition_id = self.cli_data.fake_partition_id[0] - configuration = copy.copy(self.configuration) - configuration.infortrend_slots_a_channels_id = '5' - - mock_commands = { - 'ShowChannel': - self.cli_data.get_test_show_channel_with_diff_target_id(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), - } - self._driver_setup(mock_commands, configuration) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - expect_cli_cmd = [ - mock.call('ShowChannel'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertDictEqual( - self.cli_data.test_fc_properties_with_specific_channel, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_multipath_with_r_model(self): - - test_volume = self.cli_data.test_volume - test_connector = copy.deepcopy(self.cli_data.test_connector_fc) - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn(), - } - self._driver_setup(mock_commands) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual( - self.cli_data.test_fc_properties_multipath_r_model, properties) - - def test_initialize_connection_with_get_wwn_fail(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.initialize_connection, - test_volume, - test_connector) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_zoning(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - test_initiator_wwpns = test_connector['wwpns'] - test_partition_id = self.cli_data.fake_partition_id[0] - test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] - test_lookup_map = self.cli_data.fake_lookup_map - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), - } - self._driver_setup(mock_commands) - self.driver.fc_lookup_service = mock.Mock() - get_device_mapping_from_network = ( - self.driver.fc_lookup_service.get_device_mapping_from_network - ) - get_device_mapping_from_network.return_value = test_lookup_map - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - get_device_mapping_from_network.assert_has_calls( - [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) - - expect_cli_cmd = [ - mock.call('ShowChannel'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertDictEqual( - self.cli_data.test_fc_properties_zoning, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_zoning_r_model(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - test_initiator_wwpns = test_connector['wwpns'] - test_partition_id = self.cli_data.fake_partition_id[0] - test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] - test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] - test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] - test_lookup_map = self.cli_data.fake_lookup_map_r_model - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn(), - } - self._driver_setup(mock_commands) - self.driver.fc_lookup_service = mock.Mock() - get_device_mapping_from_network = ( - self.driver.fc_lookup_service.get_device_mapping_from_network - ) - get_device_mapping_from_network.return_value = test_lookup_map - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - get_device_mapping_from_network.assert_has_calls( - [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) - - expect_cli_cmd = [ - mock.call('ShowChannel'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertDictEqual( - self.cli_data.test_fc_properties_zoning_r_model, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_zoning_r_model_diff_target_id(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_fc - test_initiator_wwpns = test_connector['wwpns'] - test_partition_id = self.cli_data.fake_partition_id[0] - test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] - test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] - test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] - test_lookup_map = self.cli_data.fake_lookup_map_r_model - - mock_commands = { - 'ShowChannel': - self.cli_data.get_test_show_channel_r_model_diff_target_id(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'CreateMap': SUCCEED, - 'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(), - } - self._driver_setup(mock_commands) - self.driver.fc_lookup_service = mock.Mock() - get_device_mapping_from_network = ( - self.driver.fc_lookup_service.get_device_mapping_from_network - ) - get_device_mapping_from_network.return_value = test_lookup_map - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - get_device_mapping_from_network.assert_has_calls( - [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) - - expect_cli_cmd = [ - mock.call('ShowChannel'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', - 'wwn=%s' % test_initiator_wwpns[0]), - mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', - 'wwn=%s' % test_initiator_wwpns[1]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertDictEqual( - self.cli_data.test_fc_properties_zoning_r_model, properties) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_terminate_connection(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = self.cli_data.test_connector_fc - - mock_commands = { - 'DeleteMap': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowWWN': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.terminate_connection(test_volume, test_connector) - - expect_cli_cmd = [ - mock.call('DeleteMap', 'part', test_partition_id, '-y'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_terminate_connection_with_zoning(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = self.cli_data.test_connector_fc - test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] - test_lookup_map = self.cli_data.fake_lookup_map - - mock_commands = { - 'DeleteMap': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), - } - self._driver_setup(mock_commands) - self.driver.map_dict = { - 'slot_a': {'0': [], '5': []}, - 'slot_b': {}, - } - self.driver.fc_lookup_service = mock.Mock() - get_device_mapping_from_network = ( - self.driver.fc_lookup_service.get_device_mapping_from_network - ) - get_device_mapping_from_network.return_value = test_lookup_map - - conn_info = self.driver.terminate_connection( - test_volume, test_connector) - - get_device_mapping_from_network.assert_has_calls( - [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) - expect_cli_cmd = [ - mock.call('DeleteMap', 'part', test_partition_id, '-y'), - mock.call('ShowMap'), - mock.call('ShowWWN'), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertDictEqual( - self.cli_data.test_fc_terminate_conn_info, conn_info) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_terminate_connection_with_zoning_and_lun_map_exist(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = self.cli_data.test_connector_fc - - mock_commands = { - 'DeleteMap': SUCCEED, - 'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(), - } - self._driver_setup(mock_commands) - self.driver.map_dict = { - 'slot_a': {'0': [], '5': []}, - 'slot_b': {}, - } - self.driver.target_dict = { - 'slot_a': {'0': '112', '5': '112'}, - 'slot_b': {}, - } - self.driver.fc_lookup_service = mock.Mock() - - conn_info = self.driver.terminate_connection( - test_volume, test_connector) - - expect_cli_cmd = [ - mock.call('DeleteMap', 'part', test_partition_id, '-y'), - mock.call('ShowMap'), - ] - expect_conn_info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - self._assert_cli_has_calls(expect_cli_cmd) - - self.assertEqual(expect_conn_info, conn_info) - - -class InfortrendiSCSICommonTestCase(InfortrendTestCass): - - def __init__(self, *args, **kwargs): - super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(InfortrendiSCSICommonTestCase, self).setUp() - - self.configuration.volume_backend_name = 'infortrend_backend_1' - self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] - self.configuration.san_password = '111111' - self.configuration.infortrend_provisioning = 'full' - self.configuration.infortrend_tiering = '0' - self.configuration.infortrend_pools_name = 'LV-1, LV-2' - self.configuration.infortrend_slots_a_channels_id = '1,2,4' - self.configuration.infortrend_slots_b_channels_id = '1,2,4' - - def _get_driver(self, conf): - return common_cli.InfortrendCommon('iSCSI', configuration=conf) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_create_map_warning_return_code(self, log_warning): - - FAKE_RETURN_CODE = (20, '') - mock_commands = { - 'CreateMap': FAKE_RETURN_CODE, - } - self._driver_setup(mock_commands) - - self.driver._execute('CreateMap') - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_delete_map_warning_return_code(self, log_warning): - - FAKE_RETURN_CODE = (11, '') - mock_commands = { - 'DeleteMap': FAKE_RETURN_CODE, - } - self._driver_setup(mock_commands) - - self.driver._execute('DeleteMap') - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_create_iqn_warning_return_code(self, log_warning): - - FAKE_RETURN_CODE = (20, '') - mock_commands = { - 'CreateIQN': FAKE_RETURN_CODE, - } - self._driver_setup(mock_commands) - - self.driver._execute('CreateIQN') - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_delete_iqn_warning_return_code_has_map(self, log_warning): - - FAKE_RETURN_CODE = (20, '') - mock_commands = { - 'DeleteIQN': FAKE_RETURN_CODE, - } - self._driver_setup(mock_commands) - - self.driver._execute('DeleteIQN') - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_delete_iqn_warning_return_code_no_such_name(self, log_warning): - - FAKE_RETURN_CODE = (11, '') - mock_commands = { - 'DeleteIQN': FAKE_RETURN_CODE, - } - self._driver_setup(mock_commands) - - self.driver._execute('DeleteIQN') - self.assertEqual(1, log_warning.call_count) - - def test_normal_channel(self): - - test_map_dict = { - 'slot_a': {'1': [], '2': [], '4': []}, - 'slot_b': {}, - } - test_target_dict = { - 'slot_a': {'1': '0', '2': '0', '4': '0'}, - 'slot_b': {}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - } - self._driver_setup(mock_commands) - - self.driver._init_map_info() - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - def test_normal_channel_with_multipath(self): - - test_map_dict = { - 'slot_a': {'1': [], '2': [], '4': []}, - 'slot_b': {'1': [], '2': [], '4': []}, - } - test_target_dict = { - 'slot_a': {'1': '0', '2': '0', '4': '0'}, - 'slot_b': {'1': '1', '2': '1', '4': '1'}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - } - self._driver_setup(mock_commands) - - self.driver._init_map_info(multipath=True) - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - def test_specific_channel(self): - - configuration = copy.copy(self.configuration) - configuration.infortrend_slots_a_channels_id = '2, 4' - - test_map_dict = { - 'slot_a': {'2': [], '4': []}, - 'slot_b': {}, - } - test_target_dict = { - 'slot_a': {'2': '0', '4': '0'}, - 'slot_b': {}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - } - self._driver_setup(mock_commands, configuration) - - self.driver._init_map_info() - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - def test_update_mcs_dict(self): - - configuration = copy.copy(self.configuration) - configuration.use_multipath_for_image_xfer = True - - test_mcs_dict = { - 'slot_a': {'1': ['1', '2'], '2': ['4']}, - 'slot_b': {}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), - } - self._driver_setup(mock_commands, configuration) - - self.driver._init_map_info() - - self.assertDictEqual(test_mcs_dict, self.driver.mcs_dict) - - def test_mapping_info_with_mcs(self): - - configuration = copy.copy(self.configuration) - configuration.use_multipath_for_image_xfer = True - - fake_mcs_dict = { - 'slot_a': {'0': ['1', '2'], '2': ['4']}, - 'slot_b': {}, - } - lun_list = list(range(0, 127)) - fake_map_dict = { - 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, - 'slot_b': {}, - } - - test_map_chl = { - 'slot_a': ['1', '2'], - } - test_map_lun = ['2'] - test_mcs_id = '0' - self.driver = self._get_driver(configuration) - self.driver.mcs_dict = fake_mcs_dict - self.driver.map_dict = fake_map_dict - - map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs() - - self.assertDictEqual(test_map_chl, map_chl) - self.assertEqual(test_map_lun, map_lun) - self.assertEqual(test_mcs_id, mcs_id) - - def test_mapping_info_with_mcs_multi_group(self): - - configuration = copy.copy(self.configuration) - configuration.use_multipath_for_image_xfer = True - - fake_mcs_dict = { - 'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']}, - 'slot_b': {}, - } - lun_list = list(range(0, 127)) - fake_map_dict = { - 'slot_a': { - '1': lun_list[2:], - '2': lun_list[:], - '3': lun_list[:], - '4': lun_list[1:], - '5': lun_list[:], - }, - 'slot_b': {}, - } - - test_map_chl = { - 'slot_a': ['3', '4'], - } - test_map_lun = ['1'] - test_mcs_id = '1' - self.driver = self._get_driver(configuration) - self.driver.mcs_dict = fake_mcs_dict - self.driver.map_dict = fake_map_dict - - map_chl, map_lun, mcs_id = self.driver._get_mapping_info_with_mcs() - - self.assertDictEqual(test_map_chl, map_chl) - self.assertEqual(test_map_lun, map_lun) - self.assertEqual(test_mcs_id, mcs_id) - - def test_specific_channel_with_multipath(self): - - configuration = copy.copy(self.configuration) - configuration.infortrend_slots_a_channels_id = '1,2' - - test_map_dict = { - 'slot_a': {'1': [], '2': []}, - 'slot_b': {}, - } - test_target_dict = { - 'slot_a': {'1': '0', '2': '0'}, - 'slot_b': {}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - } - self._driver_setup(mock_commands, configuration) - - self.driver._init_map_info(multipath=True) - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - def test_specific_channel_with_multipath_r_model(self): - - configuration = copy.copy(self.configuration) - configuration.infortrend_slots_a_channels_id = '1,2' - configuration.infortrend_slots_b_channels_id = '1' - - test_map_dict = { - 'slot_a': {'1': [], '2': []}, - 'slot_b': {'1': []}, - } - test_target_dict = { - 'slot_a': {'1': '0', '2': '0'}, - 'slot_b': {'1': '1'}, - } - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - } - self._driver_setup(mock_commands, configuration) - - self.driver._init_map_info(multipath=True) - - self.assertDictEqual(test_map_dict, self.driver.map_dict) - self.assertDictEqual(test_target_dict, self.driver.target_dict) - - @mock.patch.object(common_cli.LOG, 'info') - def test_create_volume(self, log_info): - - test_volume = self.cli_data.test_volume - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[0], - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'ShowLV': self._mock_show_lv, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_volume(test_volume) - - self.assertDictEqual(test_model_update, model_update) - self.assertEqual(1, log_info.call_count) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_create_volume_with_create_fail(self): - test_volume = self.cli_data.test_volume - - mock_commands = { - 'CreatePartition': FAKE_ERROR_RETURN, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'ShowLV': self._mock_show_lv, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.create_volume, - test_volume) - - @mock.patch.object(common_cli.LOG, 'info') - def test_delete_volume(self, log_info): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_snapshot_id = self.cli_data.fake_snapshot_id - test_pair_id = self.cli_data.fake_pair_id - - mock_commands = { - 'ShowPartition': - self.cli_data.get_test_show_partition_detail_for_map( - test_partition_id), - 'ShowReplica': self.cli_data.get_test_show_replica_detail(), - 'DeleteReplica': SUCCEED, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), - 'DeleteSnapshot': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'DeleteMap': SUCCEED, - 'DeletePartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.delete_volume(test_volume) - - expect_cli_cmd = [ - mock.call('ShowPartition', '-l'), - mock.call('ShowReplica', '-l'), - mock.call('DeleteReplica', test_pair_id[0], '-y'), - mock.call('ShowSnapshot', 'part=%s' % test_partition_id), - mock.call('DeleteSnapshot', test_snapshot_id[0], '-y'), - mock.call('DeleteSnapshot', test_snapshot_id[1], '-y'), - mock.call('ShowMap', 'part=%s' % test_partition_id), - mock.call('DeleteMap', 'part', test_partition_id, '-y'), - mock.call('DeletePartition', test_partition_id, '-y'), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, log_info.call_count) - - @mock.patch.object(common_cli.LOG, 'warning', mock.Mock()) - def test_delete_volume_with_sync_pair(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - - mock_commands = { - 'ShowPartition': - self.cli_data.get_test_show_partition_detail_for_map( - test_partition_id), - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_sync_pair(), - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.VolumeDriverException, - self.driver.delete_volume, - test_volume) - - def test_delete_volume_with_delete_fail(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - - mock_commands = { - 'ShowPartition': - self.cli_data.get_test_show_partition_detail_for_map( - test_partition_id), - 'ShowReplica': self.cli_data.get_test_show_replica_detail(), - 'DeleteReplica': SUCCEED, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), - 'DeleteSnapshot': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'DeleteMap': SUCCEED, - 'DeletePartition': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.delete_volume, - test_volume) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_delete_volume_with_partiton_not_found(self, log_warning): - - test_volume = self.cli_data.test_volume - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_empty_list(), - } - self._driver_setup(mock_commands) - - self.driver.delete_volume(test_volume) - - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'info') - def test_delete_volume_without_provider(self, log_info): - - test_system_id = self.cli_data.fake_system_id[0] - test_volume = copy.deepcopy(self.cli_data.test_volume) - test_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( - int(test_system_id, 16), 'None') - test_partition_id = self.cli_data.fake_partition_id[0] - - mock_commands = { - 'ShowPartition': - self.cli_data.get_test_show_partition_detail_for_map( - test_partition_id), - 'ShowReplica': self.cli_data.get_test_show_replica_detail(), - 'DeleteReplica': SUCCEED, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), - 'DeleteSnapshot': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'DeleteMap': SUCCEED, - 'DeletePartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.delete_volume(test_volume) - - self.assertEqual(1, log_info.call_count) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(common_cli.LOG, 'info') - def test_create_cloned_volume(self, log_info): - - fake_partition_id = self.cli_data.fake_partition_id[0] - test_dst_volume = self.cli_data.test_dst_volume - test_dst_volume_id = test_dst_volume['id'].replace('-', '') - test_src_volume = self.cli_data.test_volume - test_dst_part_id = self.cli_data.fake_partition_id[1] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[1], - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - fake_partition_id, test_dst_part_id, test_dst_volume_id), - 'DeleteReplica': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_cloned_volume( - test_dst_volume, test_src_volume) - - self.assertDictEqual(test_model_update, model_update) - self.assertEqual(1, log_info.call_count) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(common_cli.LOG, 'info') - def test_create_cloned_volume_different_size(self, log_info): - - fake_partition_id = self.cli_data.fake_partition_id[0] - test_dst_volume = self.cli_data.test_dst_volume - test_dst_volume['size'] = 10 - test_dst_volume_id = test_dst_volume['id'].replace('-', '') - test_src_volume = self.cli_data.test_volume - test_dst_part_id = self.cli_data.fake_partition_id[1] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[1], - int(self.cli_data.fake_system_id[0], 16)) - - } - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - fake_partition_id, test_dst_part_id, test_dst_volume_id), - 'DeleteReplica': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_cloned_volume( - test_dst_volume, test_src_volume) - self.assertDictEqual(test_model_update, model_update) - log_info.assert_called_once() - self.assertEqual(10, test_dst_volume['size']) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_create_cloned_volume_with_create_replica_fail(self): - - test_dst_volume = self.cli_data.test_dst_volume - test_src_volume = self.cli_data.test_volume - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': FAKE_ERROR_RETURN, - 'ShowLV': self._mock_show_lv, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.create_cloned_volume, - test_dst_volume, - test_src_volume) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_create_export(self): - - test_volume = self.cli_data.test_volume - test_model_update = { - 'provider_location': test_volume['provider_location'], - } - self.driver = self._get_driver(self.configuration) - - model_update = self.driver.create_export(None, test_volume) - - self.assertDictEqual(test_model_update, model_update) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_get_volume_stats(self): - - test_volume_states = self.cli_data.test_volume_states - - mock_commands = { - 'ShowLicense': self.cli_data.get_test_show_license(), - 'ShowLV': self.cli_data.get_test_show_lv(), - 'ShowPartition': self.cli_data.get_test_show_partition_detail(), - } - self._driver_setup(mock_commands) - self.driver.VERSION = '99.99' - - volume_states = self.driver.get_volume_stats(True) - - self.assertDictEqual(test_volume_states, volume_states) - - def test_get_volume_stats_fail(self): - - mock_commands = { - 'ShowLicense': self.cli_data.get_test_show_license(), - 'ShowLV': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.get_volume_stats) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_create_snapshot(self): - - fake_partition_id = self.cli_data.fake_partition_id[0] - fake_snapshot_id = self.cli_data.fake_snapshot_id[0] - - mock_commands = { - 'CreateSnapshot': SUCCEED, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot( - partition_id=fake_partition_id, - snapshot_id=fake_snapshot_id), - 'ShowPartition': self.cli_data.get_test_show_partition(), - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_snapshot(self.cli_data.test_snapshot) - - self.assertEqual(fake_snapshot_id, model_update['provider_location']) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_create_snapshot_without_partition_id(self): - - fake_partition_id = self.cli_data.fake_partition_id[0] - fake_snapshot_id = self.cli_data.fake_snapshot_id[0] - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'CreateSnapshot': SUCCEED, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot( - partition_id=fake_partition_id, - snapshot_id=fake_snapshot_id), - 'ShowPartition': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.create_snapshot, - test_snapshot) - - def test_create_snapshot_with_create_fail(self): - - fake_partition_id = self.cli_data.fake_partition_id[0] - fake_snapshot_id = self.cli_data.fake_snapshot_id[0] - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'CreateSnapshot': FAKE_ERROR_RETURN, - 'ShowSnapshot': self.cli_data.get_test_show_snapshot( - partition_id=fake_partition_id, - snapshot_id=fake_snapshot_id), - 'ShowPartition': self.cli_data.get_test_show_partition(), - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.create_snapshot, - test_snapshot) - - def test_create_snapshot_with_show_fail(self): - - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'CreateSnapshot': SUCCEED, - 'ShowSnapshot': FAKE_ERROR_RETURN, - 'ShowPartition': self.cli_data.get_test_show_partition(), - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.create_snapshot, - test_snapshot) - - @mock.patch.object(common_cli.LOG, 'info') - def test_delete_snapshot(self, log_info): - - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'ShowReplica': self.cli_data.get_test_show_replica_detail(), - 'DeleteSnapshot': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.delete_snapshot(test_snapshot) - - self.assertEqual(1, log_info.call_count) - - def test_delete_snapshot_without_provider_location(self): - - test_snapshot = self.cli_data.test_snapshot - - self.driver = self._get_driver(self.configuration) - self.driver._get_raid_snapshot_id = mock.Mock(return_value=None) - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.delete_snapshot, - test_snapshot) - - def test_delete_snapshot_with_fail(self): - - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'ShowReplica': self.cli_data.get_test_show_replica_detail(), - 'DeleteSnapshot': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.delete_snapshot, - test_snapshot) - - @mock.patch.object(common_cli.LOG, 'warning', mock.Mock()) - def test_delete_snapshot_with_sync_pair(self): - - test_snapshot = self.cli_data.test_snapshot - - mock_commands = { - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_si_sync_pair(), - 'DeleteSnapshot': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.VolumeDriverException, - self.driver.delete_snapshot, - test_snapshot) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(common_cli.LOG, 'info') - def test_create_volume_from_snapshot(self, log_info): - - test_snapshot = self.cli_data.test_snapshot - test_snapshot_id = self.cli_data.fake_snapshot_id[0] - test_dst_volume = self.cli_data.test_dst_volume - test_dst_volume_id = test_dst_volume['id'].replace('-', '') - test_dst_part_id = self.cli_data.fake_partition_id[1] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[1], - int(self.cli_data.fake_system_id[0], 16)) - } - mock_commands = { - 'ShowSnapshot': - self.cli_data.get_test_show_snapshot_detail_filled_block(), - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - test_snapshot_id, test_dst_part_id, test_dst_volume_id), - 'DeleteReplica': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_volume_from_snapshot( - test_dst_volume, test_snapshot) - - self.assertDictEqual(test_model_update, model_update) - self.assertEqual(1, log_info.call_count) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(common_cli.LOG, 'info') - def test_create_volume_from_snapshot_with_different_size(self, log_info): - - test_snapshot = self.cli_data.test_snapshot - test_snapshot_id = self.cli_data.fake_snapshot_id[0] - test_dst_volume = self.cli_data.test_dst_volume - test_dst_volume['size'] = 10 - test_dst_volume_id = test_dst_volume['id'].replace('-', '') - test_dst_part_id = self.cli_data.fake_partition_id[1] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[1], - int(self.cli_data.fake_system_id[0], 16)) - } - mock_commands = { - 'ShowSnapshot': - self.cli_data.get_test_show_snapshot_detail_filled_block(), - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - test_snapshot_id, test_dst_part_id, test_dst_volume_id), - 'DeleteReplica': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_volume_from_snapshot( - test_dst_volume, test_snapshot) - self.assertDictEqual(test_model_update, model_update) - self.assertEqual(1, log_info.call_count) - self.assertEqual(10, test_dst_volume['size']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - @mock.patch.object(common_cli.LOG, 'info') - def test_create_volume_from_snapshot_without_filled_block(self, log_info): - - test_snapshot = self.cli_data.test_snapshot - test_snapshot_id = self.cli_data.fake_snapshot_id[0] - test_dst_volume = self.cli_data.test_dst_volume - test_dst_volume_id = test_dst_volume['id'].replace('-', '') - test_dst_part_id = self.cli_data.fake_partition_id[1] - test_src_part_id = self.cli_data.fake_partition_id[0] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - self.cli_data.fake_partition_id[1], - int(self.cli_data.fake_system_id[0], 16)) - } - mock_commands = { - 'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail(), - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'ShowDevice': self.cli_data.get_test_show_device(), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv, - 'ShowReplica': [ - self.cli_data.get_test_show_replica_detail_for_migrate( - test_src_part_id, test_dst_part_id, test_dst_volume_id), - self.cli_data.get_test_show_replica_detail_for_migrate( - test_snapshot_id, test_dst_part_id, test_dst_volume_id), - ], - 'DeleteReplica': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.create_volume_from_snapshot( - test_dst_volume, test_snapshot) - - self.assertDictEqual(test_model_update, model_update) - self.assertEqual(1, log_info.call_count) - - def test_create_volume_from_snapshot_without_provider_location( - self): - - test_snapshot = self.cli_data.test_snapshot - test_dst_volume = self.cli_data.test_dst_volume - - self.driver = self._get_driver(self.configuration) - self.driver._get_raid_snapshot_id = mock.Mock(return_value=None) - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - test_dst_volume, - test_snapshot) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) - test_iscsi_properties = self.cli_data.test_iscsi_properties - test_target_protal = [test_iscsi_properties['data']['target_portal']] - test_target_iqn = [test_iscsi_properties['data']['target_iqn']] - - test_connector['multipath'] = False - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateMap': SUCCEED, - 'ShowNet': self.cli_data.get_test_show_net(), - 'ExecuteCommand': self.cli_data.get_fake_discovery( - test_target_iqn, test_target_protal), - } - self._driver_setup(mock_commands) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual(test_iscsi_properties, properties) - - expect_cli_cmd = [ - mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', - 'iqn=%s' % test_connector['initiator']), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_iqn_not_exist(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1]) - test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) - test_iscsi_properties = self.cli_data.test_iscsi_properties - test_target_protal = [test_iscsi_properties['data']['target_portal']] - test_target_iqn = [test_iscsi_properties['data']['target_iqn']] - - test_connector['multipath'] = False - test_connector['initiator'] = test_initiator - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateIQN': SUCCEED, - 'CreateMap': SUCCEED, - 'ShowNet': self.cli_data.get_test_show_net(), - 'ExecuteCommand': self.cli_data.get_fake_discovery( - test_target_iqn, test_target_protal), - } - self._driver_setup(mock_commands) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual(test_iscsi_properties, properties) - - expect_cli_cmd = [ - mock.call('CreateIQN', test_initiator, test_initiator[-16:]), - mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', - 'iqn=%s' % test_connector['initiator']), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_empty_map(self): - - test_volume = self.cli_data.test_volume - test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) - test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map - test_target_protal = [test_iscsi_properties['data']['target_portal']] - test_target_iqn = [test_iscsi_properties['data']['target_iqn']] - - test_connector['multipath'] = False - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_empty_list(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateMap': SUCCEED, - 'ShowNet': self.cli_data.get_test_show_net(), - 'ExecuteCommand': self.cli_data.get_fake_discovery( - test_target_iqn, test_target_protal), - } - self._driver_setup(mock_commands) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual( - self.cli_data.test_iscsi_properties_empty_map, properties) - - def test_initialize_connection_with_create_map_fail(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_iscsi - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateMap': FAKE_ERROR_RETURN, - 'ShowNet': SUCCEED, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.initialize_connection, - test_volume, - test_connector) - - def test_initialize_connection_with_get_ip_fail(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_iscsi - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateMap': SUCCEED, - 'ShowNet': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.initialize_connection, - test_volume, - test_connector) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_initialize_connection_with_mcs(self): - - configuration = copy.copy(self.configuration) - configuration.use_multipath_for_image_xfer = True - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) - test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs - test_target_protal = [test_iscsi_properties['data']['target_portal']] - test_target_iqn = [test_iscsi_properties['data']['target_iqn']] - - test_connector['multipath'] = False - - mock_commands = { - 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), - 'ShowMap': self.cli_data.get_test_show_map(), - 'ShowIQN': self.cli_data.get_test_show_iqn(), - 'CreateMap': SUCCEED, - 'ShowNet': self.cli_data.get_test_show_net(), - 'ExecuteCommand': self.cli_data.get_fake_discovery( - test_target_iqn, test_target_protal), - } - self._driver_setup(mock_commands, configuration) - - properties = self.driver.initialize_connection( - test_volume, test_connector) - - self.assertDictEqual(test_iscsi_properties, properties) - - expect_cli_cmd = [ - mock.call('CreateMap', 'part', test_partition_id, '1', '0', '2', - 'iqn=%s' % test_connector['initiator']), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_extend_volume(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_new_size = 10 - test_expand_size = test_new_size - test_volume['size'] - - mock_commands = { - 'SetPartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.extend_volume(test_volume, test_new_size) - - expect_cli_cmd = [ - mock.call('SetPartition', 'expand', test_partition_id, - 'size=%sGB' % test_expand_size), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_extend_volume_mb(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_new_size = 5.5 - test_expand_size = round((test_new_size - test_volume['size']) * 1024) - - mock_commands = { - 'SetPartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.extend_volume(test_volume, test_new_size) - - expect_cli_cmd = [ - mock.call('SetPartition', 'expand', test_partition_id, - 'size=%sMB' % test_expand_size), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - def test_extend_volume_fail(self): - - test_volume = self.cli_data.test_volume - test_new_size = 10 - - mock_commands = { - 'SetPartition': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.extend_volume, - test_volume, - test_new_size) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_terminate_connection(self): - - test_volume = self.cli_data.test_volume - test_partition_id = self.cli_data.fake_partition_id[0] - test_connector = self.cli_data.test_connector_iscsi - - mock_commands = { - 'DeleteMap': SUCCEED, - 'ShowMap': self.cli_data.get_test_show_map(), - 'DeleteIQN': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.terminate_connection(test_volume, test_connector) - - expect_cli_cmd = [ - mock.call('DeleteMap', 'part', test_partition_id, '-y'), - mock.call('ShowMap'), - mock.call('DeleteIQN', test_connector['initiator'][-16:]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - - def test_terminate_connection_fail(self): - - test_volume = self.cli_data.test_volume - test_connector = self.cli_data.test_connector_iscsi - - mock_commands = { - 'DeleteMap': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.terminate_connection, - test_volume, - test_connector) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - def test_migrate_volume(self): - - test_host = copy.deepcopy(self.cli_data.test_migrate_host) - fake_pool = copy.deepcopy(self.cli_data.fake_pool) - test_volume = self.cli_data.test_volume - test_volume_id = test_volume['id'].replace('-', '') - test_src_part_id = self.cli_data.fake_partition_id[0] - test_dst_part_id = self.cli_data.fake_partition_id[2] - test_pair_id = self.cli_data.fake_pair_id[0] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - test_dst_part_id, - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition( - test_volume_id, fake_pool['pool_id']), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv_for_migrate, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - test_src_part_id, test_dst_part_id, test_volume_id), - 'DeleteReplica': SUCCEED, - 'DeleteMap': SUCCEED, - 'DeletePartition': SUCCEED, - } - self._driver_setup(mock_commands) - - rc, model_update = self.driver.migrate_volume(test_volume, test_host) - - expect_cli_cmd = [ - mock.call('CreatePartition', - fake_pool['pool_id'], - test_volume['id'].replace('-', ''), - 'size=%s' % (test_volume['size'] * 1024), - ''), - mock.call('ShowPartition'), - mock.call('CreateReplica', - 'Cinder-Migrate', - 'part', test_src_part_id, - 'part', test_dst_part_id, - 'type=mirror'), - mock.call('ShowReplica', '-l'), - mock.call('DeleteReplica', test_pair_id, '-y'), - mock.call('DeleteMap', 'part', test_src_part_id, '-y'), - mock.call('DeletePartition', test_src_part_id, '-y'), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertTrue(rc) - self.assertDictEqual(test_model_update, model_update) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_migrate_volume_with_invalid_storage(self, log_warning): - - fake_host = self.cli_data.fake_host - test_volume = self.cli_data.test_volume - - mock_commands = { - 'ShowLV': self._mock_show_lv_for_migrate, - } - self._driver_setup(mock_commands) - - rc, model_update = self.driver.migrate_volume(test_volume, fake_host) - - self.assertFalse(rc) - self.assertIsNone(model_update) - self.assertEqual(1, log_warning.call_count) - - def test_migrate_volume_with_get_part_id_fail(self): - - test_host = copy.deepcopy(self.cli_data.test_migrate_host) - test_volume = self.cli_data.test_volume - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition(), - 'DeleteMap': SUCCEED, - 'CreateReplica': SUCCEED, - 'CreateMap': SUCCEED, - 'ShowLV': self._mock_show_lv_for_migrate, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.VolumeDriverException, - self.driver.migrate_volume, - test_volume, - test_host) - - def test_migrate_volume_with_create_replica_fail(self): - - test_host = copy.deepcopy(self.cli_data.test_migrate_host) - fake_pool = copy.deepcopy(self.cli_data.fake_pool) - test_volume = self.cli_data.test_volume - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition( - test_volume['id'].replace('-', ''), fake_pool['pool_id']), - 'DeleteMap': SUCCEED, - 'CreateReplica': FAKE_ERROR_RETURN, - 'CreateMap': SUCCEED, - 'ShowLV': self._mock_show_lv_for_migrate, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.migrate_volume, - test_volume, - test_host) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - def test_migrate_volume_timeout(self): - - test_host = copy.deepcopy(self.cli_data.test_migrate_host) - fake_pool = copy.deepcopy(self.cli_data.fake_pool) - test_volume = self.cli_data.test_volume - test_volume_id = test_volume['id'].replace('-', '') - test_src_part_id = self.cli_data.fake_partition_id[0] - test_dst_part_id = self.cli_data.fake_partition_id[2] - - configuration = copy.copy(self.configuration) - configuration.infortrend_cli_timeout = 0 - - mock_commands = { - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition( - test_volume_id, fake_pool['pool_id']), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv_for_migrate, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - test_src_part_id, test_dst_part_id, test_volume_id, - 'Copy'), - } - self._driver_setup(mock_commands, configuration) - - self.assertRaises( - exception.VolumeDriverException, - self.driver.migrate_volume, - test_volume, - test_host) - - def test_manage_existing_get_size(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - test_pool = self.cli_data.fake_lv_id[0] - test_partition_id = self.cli_data.fake_partition_id[2] - test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), - 'ShowMap': SUCCEED, - } - - self._driver_setup(mock_commands) - - size = self.driver.manage_existing_get_size( - test_volume, test_ref_volume) - - expect_cli_cmd = [ - mock.call('ShowMap', 'part=%s' % test_partition_id), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, size) - - def test_manage_existing_get_size_with_import(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume_with_import - test_pool = self.cli_data.fake_lv_id[0] - test_partition_id = self.cli_data.fake_partition_id[2] - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - test_ref_volume['source-name'], test_pool), - 'ShowMap': SUCCEED, - } - - self._driver_setup(mock_commands) - - size = self.driver.manage_existing_get_size( - test_volume, test_ref_volume) - - expect_cli_cmd = [ - mock.call('ShowMap', 'part=%s' % test_partition_id), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, size) - - def test_manage_existing_get_size_in_use(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - test_pool = self.cli_data.fake_lv_id[0] - test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), - 'ShowMap': self.cli_data.get_test_show_map(), - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, - test_volume, - test_ref_volume) - - def test_manage_existing_get_size_no_source_id(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_dst_volume - self.driver = self._get_driver(self.configuration) - - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - test_volume, - test_ref_volume) - - def test_manage_existing_get_size_show_part_fail(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - - mock_commands = { - 'ShowPartition': FAKE_ERROR_RETURN, - 'ShowMap': SUCCEED, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.manage_existing_get_size, - test_volume, - test_ref_volume) - - def test_manage_existing_get_size_show_map_fail(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - test_pool = self.cli_data.fake_lv_id[0] - test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), - 'ShowMap': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.manage_existing_get_size, - test_volume, - test_ref_volume) - - @mock.patch.object(common_cli.LOG, 'info') - def test_manage_existing(self, log_info): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - test_pool = self.cli_data.fake_lv_id[0] - test_partition_id = self.cli_data.fake_partition_id[2] - test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - test_partition_id, - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), - 'SetPartition': SUCCEED, - 'ShowDevice': self.cli_data.get_test_show_device(), - } - self._driver_setup(mock_commands) - - model_update = self.driver.manage_existing( - test_volume, test_ref_volume) - - expect_cli_cmd = [ - mock.call('SetPartition', test_partition_id, - 'name=%s' % test_volume['id'].replace('-', '')), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, log_info.call_count) - self.assertDictEqual(test_model_update, model_update) - - def test_manage_existing_rename_fail(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - test_pool = self.cli_data.fake_lv_id[0] - test_ref_volume_id = test_ref_volume['source-id'].replace('-', '') - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), - 'SetPartition': FAKE_ERROR_RETURN, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.InfortrendCliException, - self.driver.manage_existing, - test_volume, - test_ref_volume) - - def test_manage_existing_with_part_not_found(self): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume - - mock_commands = { - 'ShowPartition': - self.cli_data.get_test_show_partition_detail(), - 'SetPartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing, - test_volume, - test_ref_volume) - - @mock.patch.object(common_cli.LOG, 'info') - def test_manage_existing_with_import(self, log_info): - - test_volume = self.cli_data.test_volume - test_ref_volume = self.cli_data.test_ref_volume_with_import - test_pool = self.cli_data.fake_lv_id[0] - test_partition_id = self.cli_data.fake_partition_id[2] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - test_partition_id, - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'ShowPartition': self.cli_data.get_test_show_partition_detail( - test_ref_volume['source-name'], test_pool), - 'SetPartition': SUCCEED, - 'ShowDevice': self.cli_data.get_test_show_device(), - } - self._driver_setup(mock_commands) - - model_update = self.driver.manage_existing( - test_volume, test_ref_volume) - - expect_cli_cmd = [ - mock.call('SetPartition', test_partition_id, - 'name=%s' % test_volume['id'].replace('-', '')), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, log_info.call_count) - self.assertDictEqual(test_model_update, model_update) - - @mock.patch.object(common_cli.LOG, 'info') - def test_unmanage(self, log_info): - - test_volume = self.cli_data.test_volume - test_volume_id = test_volume['id'].replace('-', '') - test_partition_id = self.cli_data.fake_partition_id[0] - - mock_commands = { - 'SetPartition': SUCCEED, - } - self._driver_setup(mock_commands) - - self.driver.unmanage(test_volume) - - expect_cli_cmd = [ - mock.call( - 'SetPartition', - test_partition_id, - 'name=cinder-unmanaged-%s' % test_volume_id[:-17]), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertEqual(1, log_info.call_count) - - @mock.patch.object(common_cli.LOG, 'info') - def test_retype_without_change(self, log_info): - - test_volume = self.cli_data.test_volume - test_new_type = self.cli_data.test_new_type - test_diff = {'extra_specs': {}} - test_host = self.cli_data.test_migrate_host_2 - - self.driver = self._get_driver(self.configuration) - - rc = self.driver.retype( - None, test_volume, test_new_type, test_diff, test_host) - - self.assertTrue(rc) - self.assertEqual(1, log_info.call_count) - - @mock.patch.object(common_cli.LOG, 'warning') - def test_retype_with_change_provision(self, log_warning): - - test_volume = self.cli_data.test_volume - test_new_type = self.cli_data.test_new_type - test_diff = self.cli_data.test_diff - test_host = self.cli_data.test_migrate_host_2 - - self.driver = self._get_driver(self.configuration) - - rc = self.driver.retype( - None, test_volume, test_new_type, test_diff, test_host) - - self.assertFalse(rc) - self.assertEqual(1, log_warning.call_count) - - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_retype_with_migrate(self): - - fake_pool = copy.deepcopy(self.cli_data.fake_pool) - test_host = copy.deepcopy(self.cli_data.test_migrate_host) - test_volume = self.cli_data.test_volume - test_volume_id = test_volume['id'].replace('-', '') - test_new_type = self.cli_data.test_new_type - test_diff = self.cli_data.test_diff - test_src_part_id = self.cli_data.fake_partition_id[0] - test_dst_part_id = self.cli_data.fake_partition_id[2] - test_pair_id = self.cli_data.fake_pair_id[0] - test_model_update = { - 'provider_location': 'partition_id^%s@system_id^%s' % ( - test_dst_part_id, - int(self.cli_data.fake_system_id[0], 16)) - } - - mock_commands = { - 'ShowSnapshot': SUCCEED, - 'CreatePartition': SUCCEED, - 'ShowPartition': self.cli_data.get_test_show_partition( - test_volume_id, fake_pool['pool_id']), - 'CreateReplica': SUCCEED, - 'ShowLV': self._mock_show_lv_for_migrate, - 'ShowReplica': - self.cli_data.get_test_show_replica_detail_for_migrate( - test_src_part_id, test_dst_part_id, test_volume_id), - 'DeleteReplica': SUCCEED, - 'DeleteMap': SUCCEED, - 'DeletePartition': SUCCEED, - } - self._driver_setup(mock_commands) - - rc, model_update = self.driver.retype( - None, test_volume, test_new_type, test_diff, test_host) - - min_size = int(test_volume['size'] * 1024 * 0.2) - create_params = 'init=disable min=%sMB' % min_size - expect_cli_cmd = [ - mock.call('ShowSnapshot', 'part=%s' % test_src_part_id), - mock.call( - 'CreatePartition', - fake_pool['pool_id'], - test_volume['id'].replace('-', ''), - 'size=%s' % (test_volume['size'] * 1024), - create_params, - ), - mock.call('ShowPartition'), - mock.call( - 'CreateReplica', - 'Cinder-Migrate', - 'part', test_src_part_id, - 'part', test_dst_part_id, - 'type=mirror' - ), - mock.call('ShowReplica', '-l'), - mock.call('DeleteReplica', test_pair_id, '-y'), - mock.call('DeleteMap', 'part', test_src_part_id, '-y'), - mock.call('DeletePartition', test_src_part_id, '-y'), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertTrue(rc) - self.assertDictEqual(test_model_update, model_update) - - @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) - @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) - def test_update_migrated_volume(self): - src_volume = self.cli_data.test_volume - dst_volume = copy.deepcopy(self.cli_data.test_dst_volume) - test_dst_part_id = self.cli_data.fake_partition_id[1] - dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( - int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) - test_model_update = { - '_name_id': None, - 'provider_location': dst_volume['provider_location'], - } - - mock_commands = { - 'SetPartition': SUCCEED, - } - self._driver_setup(mock_commands) - - model_update = self.driver.update_migrated_volume( - None, src_volume, dst_volume, 'available') - - expect_cli_cmd = [ - mock.call('SetPartition', test_dst_part_id, - 'name=%s' % src_volume['id'].replace('-', '')), - ] - self._assert_cli_has_calls(expect_cli_cmd) - self.assertDictEqual(test_model_update, model_update) - - @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) - def test_update_migrated_volume_rename_fail(self): - src_volume = self.cli_data.test_volume - dst_volume = self.cli_data.test_dst_volume - dst_volume['_name_id'] = 'fake_name_id' - test_dst_part_id = self.cli_data.fake_partition_id[1] - dst_volume['provider_location'] = 'system_id^%s@partition_id^%s' % ( - int(self.cli_data.fake_system_id[0], 16), test_dst_part_id) - - mock_commands = { - 'SetPartition': FAKE_ERROR_RETURN - } - self._driver_setup(mock_commands) - model_update = self.driver.update_migrated_volume( - None, src_volume, dst_volume, 'available') - self.assertEqual({'_name_id': 'fake_name_id'}, model_update) diff --git a/cinder/tests/unit/volume/drivers/nec/__init__.py b/cinder/tests/unit/volume/drivers/nec/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/nec/test_volume.py b/cinder/tests/unit/volume/drivers/nec/test_volume.py deleted file mode 100644 index 49bd2b9b4..000000000 --- a/cinder/tests/unit/volume/drivers/nec/test_volume.py +++ /dev/null @@ -1,1177 +0,0 @@ -# -# Copyright (c) 2016 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import unittest - -from cinder import exception -from cinder.volume.drivers.nec import cli -from cinder.volume.drivers.nec import volume_helper - - -xml_out = ''' - - - - -
- M310 -
-
-
- - -
- 0000 - LX - 287RbQoP7VdwR1WsPC2fZT - 1073741824 - 0000 - --- - MV -
-
- -
- 0001 - - backup_SDV0001 - 5368709120 - 0001 - (invalid attribute) - IV -
-
- -
- 0003 - LX - 31HxzqBiAFTUxxOlcVn3EA_back - 1073741824 - 0001 - --- - RV -
-
- -
- 0004 - LX - 287RbQoP7VdwR1WsPC2fZT_back - 1073741824 - 0000 - --- - RV -
-
- -
- 0005 - LX - 20000009910200140005 - 10737418240 - 0000 - --- - RV -
-
- -
- 0006 - LX - 287RbQoP7VdwR1WsPC2fZT_l - 10737418240 - 0000 - --- - IV -
-
- -
- 0007 - - 20000009910200140007 - 10737418240 - 0000 - --- - IV -
-
- -
- 0008 - - 20000009910200140008 - 10737418240 - 0000 - --- - IV -
-
- -
- 0009 - - 20000009910200140009 - 10737418240 - 0000 - --- - IV -
-
- -
- 000a - - 2000000991020012000A - 6442450944 - 0000 - --- - IV -
-
- -
- 000b - - 2000000991020012000B - 6442450944 - 0000 - --- - IV -
-
- -
- 000c - - 2000000991020012000C - 6442450944 - 0000 - --- - IV -
-
- -
- 000d - LX - yEUHrXa5AHMjOZZLb93eP - 6442450944 - 0001 - --- - IV -
-
- -
- 000e - LX - 4T7JpyqI3UuPlKeT9D3VQF - 6442450944 - 0001 - RPL - IV -
-
- -
- 0fff - - Pool0000_SYV0FFF - 8589934592 - 0000 - (invalid attribute) - --- -
-
-
- - -
- 0000 - 281320357888 - 84020297728 - 197300060160 -
-
- -
- 0001 - 89657442304 - 6710886400 - 82946555904 -
-
- -
- 0002 - 1950988894208 - 18446744073441116160 - 1951257329664 -
-
- -
- 0003 - 1950988894208 - 18446744073441116160 - 1951257329664 -
-
-
- - -
- 00-00 - 2100000991020012 -
-
- -
- 00-01 - 2200000991020012 -
-
- -
- 00-02 - 192.168.1.90 - Link Down -
-
- -
- 00-03 - 192.168.1.91 - Link Down -
-
- -
- 01-00 - 2900000991020012 -
-
- -
- 01-01 - 2A00000991020012 -
-
- -
- 01-02 - 192.168.2.92 - Link Down -
-
- -
- 01-03 - 192.168.2.93 - Link Up -
-
-
- - -
- LX - OpenStack1 -
-
- 1000-0090-FAA0-786B -
-
- 1000-0090-FAA0-786A -
-
- -
- WN - TNES120250 -
-
- 1000-0090-FA76-9605 -
-
- 1000-0090-FA76-9604 -
-
- -
- WN - TNES140098 -
-
- 1000-0090-FA53-302C -
-
- 1000-0090-FA53-302D -
-
- 0000 - 0005 -
-
- 0001 - 0006 -
-
- -
- LX - OpenStack0 - Multi-Target -
-
- 192.168.1.90:3260 -
-
- 192.168.1.91:3260 -
-
- 192.168.2.92:3260 -
-
- 192.168.2.93:3260 -
-
- iqn.1994-05.com.redhat:d1d8e8f23255 -
-
- iqn.2001-03.target0000 - 0000 - 0000 -
-
-
- Command Completed Successfully!! - 0 -
-
-''' - - -def patch_view_all(self, conf_ismview_path=None, delete_ismview=True, - cmd_lock=True): - return xml_out - - -def patch_get_conf_properties(self, conf=None): - conf = { - 'cli': None, - 'cli_fip': '10.64.169.250', - 'cli_user': 'sysadmin', - 'cli_password': 'sys123', - 'cli_privkey': 'sys123', - 'pool_pools': [0, 1], - 'pool_backup_pools': [2, 3], - 'pool_actual_free_capacity': 50000000000, - 'ldset_name': '', - 'ldset_controller_node_name': 'LX:node0', - 'ld_name_format': 'LX:%s', - 'ld_backupname_format': 'LX:%s_back', - 'ld_backend_max_count': 1024, - 'thread_timeout': 5, - 'ismview_dir': 'view', - 'ismview_alloptimize': '', - 'ssh_pool_port_number': 22, - 'diskarray_name': 'node0', - 'queryconfig_view': '', - 'ismview_path': None, - 'driver_name': 'MStorageISCSIDriver', - 'config_group': '', - 'configuration': '', - 'vendor_name': 'nec', - 'products': '', - 'backend_name': '', - 'portal_number': 2 - } - conf['cli'] = cli.MStorageISMCLI(conf) - return conf - - -def patch_execute(self, command, expected_status=[0], raise_exec=True): - return "success", 0, 0 - - -class DummyVolume(object): - - def __init__(self): - super(DummyVolume, self).__init__() - self.id = '' - self.size = 0 - self.status = '' - self.migration_status = '' - self.volume_id = '' - self.volume_type_id = '' - self.attach_status = '' - self.provider_location = '' - - -@ddt.ddt -class VolumeIDConvertTest(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - - def tearDown(self): - pass - - @ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR")) - @ddt.unpack - def test_volumeid_should_change_62scale(self, volid, ldname): - self.vol.id = volid - actual = self._convert_id2name(self.vol) - self.assertEqual(ldname, actual, - "ID:%(volid)s should be change to %(ldname)s" % - {'volid': volid, 'ldname': ldname}) - - @ddt.data(("AAAAAAAA", "LX:37mA82_back"), ("BBBBBBBB", "LX:3R9ZwR_back")) - @ddt.unpack - def test_snap_volumeid_should_change_62scale_andpostfix(self, - volid, - ldname): - self.vol.id = volid - actual = self._convert_id2snapname(self.vol) - self.assertEqual(ldname, actual, - "ID:%(volid)s should be change to %(ldname)s" % - {'volid': volid, 'ldname': ldname}) - - @ddt.data(("AAAAAAAA", "LX:37mA82_m"), ("BBBBBBBB", "LX:3R9ZwR_m")) - @ddt.unpack - def test_ddrsnap_volumeid_should_change_62scale_and_m(self, - volid, - ldname): - self.vol.id = volid - actual = self._convert_id2migratename(self.vol) - self.assertEqual(ldname, actual, - "ID:%(volid)s should be change to %(ldname)s" % - {'volid': volid, 'ldname': ldname}) - - @ddt.data(("AAAAAAAA", "LX:3R9ZwR", "target:BBBBBBBB")) - @ddt.unpack - def test_migrate_volumeid_should_change_62scale_andpostfix(self, - volid, - ldname, - status): - self.vol.id = volid - self.vol.migration_status = status - actual = self._convert_id2name_in_migrate(self.vol) - self.assertEqual(ldname, actual, - "ID:%(volid)s/%(status)s should be " - "change to %(ldname)s" % - {'volid': volid, - 'status': status, - 'ldname': ldname}) - - @ddt.data(("AAAAAAAA", "LX:37mA82", "deleting:BBBBBBBB"), - ("AAAAAAAA", "LX:37mA82", ""), - ("AAAAAAAA", "LX:37mA82", "success")) - @ddt.unpack - def test_NOTmigrate_volumeid_should_change_62scale(self, - volid, - ldname, - status): - self.vol.id = volid - self.vol.migration_status = status - actual = self._convert_id2name_in_migrate(self.vol) - self.assertEqual(ldname, actual, - "ID:%(volid)s/%(status)s should be " - "change to %(ldname)s" % - {'volid': volid, - 'status': status, - 'ldname': ldname}) - - -class NominatePoolLDTest(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - self._numofld_per_pool = 1024 - - def tearDown(self): - pass - - def test_getxml(self): - self.assertIsNotNone(self.xml, "iSMview xml should not be None") - - def test_selectldn_for_normalvolume(self): - ldn = self._select_ldnumber(self.used_ldns, self.max_ld_count) - self.assertEqual(2, ldn, "selected ldn should be XXX") - - def test_selectpool_for_normalvolume(self): - self.vol.size = 10 - pool = self._select_leastused_poolnumber(self.vol, - self.pools, - self.xml) - self.assertEqual(1, pool, "selected pool should be 1") - # config:pool_pools=[1] - self.vol.size = 999999999999 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_leastused_poolnumber(self.vol, - self.pools, - self.xml) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_selectpool_for_migratevolume(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - pool = self._select_migrate_poolnumber(self.vol, - self.pools, - self.xml, - [1]) - self.assertEqual(1, pool, "selected pool should be 1") - self.vol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" - self.vol.size = 10 - pool = self._select_migrate_poolnumber(self.vol, - self.pools, - self.xml, - [1]) - self.assertEqual(-1, pool, "selected pool is the same pool(return -1)") - self.vol.size = 999999999999 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_migrate_poolnumber(self.vol, - self.pools, - self.xml, - [1]) - - def test_selectpool_for_snapvolume(self): - self.vol.size = 10 - savePool1 = self.pools[1]['free'] - self.pools[1]['free'] = 0 - pool = self._select_dsv_poolnumber(self.vol, self.pools) - self.assertEqual(2, pool, "selected pool should be 2") - # config:pool_backup_pools=[2] - self.pools[1]['free'] = savePool1 - - if len(self.pools[0]['ld_list']) is 1024: - savePool2 = self.pools[2]['free'] - savePool3 = self.pools[3]['free'] - self.pools[2]['free'] = 0 - self.pools[3]['free'] = 0 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_dsv_poolnumber(self.vol, self.pools) - self.pools[2]['free'] = savePool2 - self.pools[3]['free'] = savePool3 - - self.vol.size = 999999999999 - pool = self._select_dsv_poolnumber(self.vol, self.pools) - self.assertEqual(2, pool, "selected pool should be 2") - # config:pool_backup_pools=[2] - - def test_selectpool_for_ddrvolume(self): - self.vol.size = 10 - pool = self._select_ddr_poolnumber(self.vol, - self.pools, - self.xml, - 10) - self.assertEqual(2, pool, "selected pool should be 2") - # config:pool_backup_pools=[2] - - savePool2 = self.pools[2]['free'] - savePool3 = self.pools[3]['free'] - self.pools[2]['free'] = 0 - self.pools[3]['free'] = 0 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_ddr_poolnumber(self.vol, - self.pools, - self.xml, - 10) - self.pools[2]['free'] = savePool2 - self.pools[3]['free'] = savePool3 - - self.vol.size = 999999999999 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_ddr_poolnumber(self.vol, - self.pools, - self.xml, - 999999999999) - - def test_selectpool_for_volddrvolume(self): - self.vol.size = 10 - pool = self._select_volddr_poolnumber(self.vol, - self.pools, - self.xml, - 10) - self.assertEqual(1, pool, "selected pool should be 1") - # config:pool_backup_pools=[2] - - savePool0 = self.pools[0]['free'] - savePool1 = self.pools[1]['free'] - self.pools[0]['free'] = 0 - self.pools[1]['free'] = 0 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_volddr_poolnumber(self.vol, - self.pools, - self.xml, - 10) - self.pools[0]['free'] = savePool0 - self.pools[1]['free'] = savePool1 - - self.vol.size = 999999999999 - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'No available pools found.'): - pool = self._select_volddr_poolnumber(self.vol, - self.pools, - self.xml, - 999999999999) - - -class VolumeCreateTest(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - - def tearDown(self): - pass - - def test_validate_migrate_volume(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - self.vol.status = 'available' - self._validate_migrate_volume(self.vol, self.xml) - - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - self.vol.status = 'creating' - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'Specified Logical Disk' - ' LX:287RbQoP7VdwR1WsPC2fZT' - ' is not available.'): - self._validate_migrate_volume(self.vol, self.xml) - - self.vol.id = "AAAAAAAA" - self.vol.size = 10 - self.vol.status = 'available' - with self.assertRaisesRegexp(exception.NotFound, - 'Logical Disk `LX:37mA82`' - ' could not be found.'): - self._validate_migrate_volume(self.vol, self.xml) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_extend_volume(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" # MV - self.vol.size = 1 - self.vol.status = 'available' - self.extend_volume(self.vol, 10) - - self.vol.id = "00046058-d38e-7f60-67b7-59ed65e54225" # RV - self.vol.size = 1 - self.vol.status = 'available' - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - 'RPL Attribute Error. ' - 'RPL Attribute = RV.'): - self.extend_volume(self.vol, 10) - - -class BindLDTest(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self.src = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - mock_bindld = mock.Mock() - self._bind_ld = mock_bindld - self._bind_ld.return_value = 0, 0, 0 - - def test_bindld_CreateVolume(self): - self.vol.id = "AAAAAAAA" - self.vol.size = 1 - self.vol.migration_status = "success" - self.vol.volume_type_id = None - self.create_volume(self.vol) - self._bind_ld.assert_called_once_with( - self.vol, self.vol.size, None, - self._convert_id2name_in_migrate, - self._select_leastused_poolnumber) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_bindld_CreateCloneVolume(self): - self.vol.id = "AAAAAAAA" - self.vol.size = 1 - self.vol.migration_status = "success" - self.src.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.src.size = 1 - self.vol.volume_type_id = None - mock_query_DSV = mock.Mock() - self._cli.query_BV_SV_status = mock_query_DSV - self._cli.query_BV_SV_status.return_value = 'snap/active' - mock_query_DDR = mock.Mock() - self._cli.query_MV_RV_name = mock_query_DDR - self._cli.query_MV_RV_name.return_value = 'separated' - mock_backup = mock.Mock() - self._cli.backup_restore = mock_backup - self.create_cloned_volume(self.vol, self.src) - self._bind_ld.assert_called_once_with( - self.vol, self.vol.size, None, - self._convert_id2name, - self._select_leastused_poolnumber) - - -class BindLDTest_Snap(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self.snap = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - mock_bindld = mock.Mock() - self._bind_ld = mock_bindld - self._bind_ld.return_value = 0, 0, 0 - mock_bindsnap = mock.Mock() - self._create_snapshot = mock_bindsnap - - def test_bindld_CreateSnapshot(self): - self.snap.id = "AAAAAAAA" - self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" - self.snap.size = 10 - self.create_snapshot(self.snap) - self._create_snapshot.assert_called_once_with( - self.snap, self._properties['diskarray_name']) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_bindld_CreateFromSnapshot(self): - self.vol.id = "AAAAAAAA" - self.vol.size = 1 - self.vol.migration_status = "success" - self.vol.volume_type_id = None - self.snap.id = "63410c76-2f12-4473-873d-74a63dfcd3e2" - self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" - mock_query = mock.Mock() - self._cli.query_BV_SV_status = mock_query - self._cli.query_BV_SV_status.return_value = 'snap/active' - mock_backup = mock.Mock() - self._cli.backup_restore = mock_backup - self.create_volume_from_snapshot(self.vol, self.snap) - self._bind_ld.assert_called_once_with( - self.vol, 1, None, - self._convert_id2name, - self._select_volddr_poolnumber, 1) - - -class ExportTest(volume_helper.MStorageDSVDriver, unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - mock_getldset = mock.Mock() - self.get_ldset = mock_getldset - self.get_ldset.return_value = self.ldsets["LX:OpenStack0"] - - def tearDown(self): - pass - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_iscsi_portal(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - self.vol.status = None - self.vol.migration_status = None - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} - self.iscsi_do_export(None, self.vol, connector) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_fc_do_export(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - self.vol.status = None - self.vol.migration_status = None - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - self.fc_do_export(None, self.vol, connector) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_remove_export(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.size = 10 - self.vol.status = 'uploading' - self.vol.attach_status = 'attached' - self.vol.migration_status = None - self.vol.volume_type_id = None - context = mock.Mock() - ret = self.remove_export(context, self.vol) - self.assertIsNone(ret) - - self.vol.attach_status = None - - self.vol.status = 'downloading' - with self.assertRaisesRegexp(exception.VolumeBackendAPIException, - r'Failed to unregister Logical Disk from' - r' Logical Disk Set \(iSM31064\)'): - mock_del = mock.Mock() - self._cli.delldsetld = mock_del - self._cli.delldsetld.return_value = False, 'iSM31064' - self.remove_export(context, self.vol) - - self.vol.status = None - migstat = 'target:1febb976-86d0-42ed-9bc0-4aa3e158f27d' - self.vol.migration_status = migstat - ret = self.remove_export(context, self.vol) - self.assertIsNone(ret) - - def test_iscsi_initialize_connection(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88" - self.vol.provider_location = loc - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", - 'multipath': True} - info = self._iscsi_initialize_connection(self.vol, connector) - self.assertEqual('iscsi', info['driver_volume_type']) - self.assertEqual('iqn.2010-10.org.openstack:volume-00000001', - info['data']['target_iqn']) - self.assertEqual('127.0.0.1:3260', info['data']['target_portal']) - self.assertEqual(88, info['data']['target_lun']) - self.assertEqual('iqn.2010-10.org.openstack:volume-00000001', - info['data']['target_iqns'][0]) - self.assertEqual('127.0.0.1:3260', info['data']['target_portals'][0]) - self.assertEqual(88, info['data']['target_luns'][0]) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_fc_initialize_connection(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.vol.migration_status = None - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - info = self._fc_initialize_connection(self.vol, connector) - self.assertEqual('fibre_channel', info['driver_volume_type']) - self.assertEqual('2100000991020012', info['data']['target_wwn'][0]) - self.assertEqual('2200000991020012', info['data']['target_wwn'][1]) - self.assertEqual('2900000991020012', info['data']['target_wwn'][2]) - self.assertEqual('2A00000991020012', info['data']['target_wwn'][3]) - self.assertEqual( - '2100000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][0]) - self.assertEqual( - '2100000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][0]) - self.assertEqual( - '2200000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][1]) - self.assertEqual( - '2200000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][1]) - self.assertEqual( - '2900000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][2]) - self.assertEqual( - '2900000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][2]) - self.assertEqual( - '2A00000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][3]) - self.assertEqual( - '2A00000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][3]) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_fc_terminate_connection(self): - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - info = self._fc_terminate_connection(self.vol, connector) - self.assertEqual('fibre_channel', info['driver_volume_type']) - self.assertEqual('2100000991020012', info['data']['target_wwn'][0]) - self.assertEqual('2200000991020012', info['data']['target_wwn'][1]) - self.assertEqual('2900000991020012', info['data']['target_wwn'][2]) - self.assertEqual('2A00000991020012', info['data']['target_wwn'][3]) - self.assertEqual( - '2100000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][0]) - self.assertEqual( - '2100000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][0]) - self.assertEqual( - '2200000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][1]) - self.assertEqual( - '2200000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][1]) - self.assertEqual( - '2900000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][2]) - self.assertEqual( - '2900000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][2]) - self.assertEqual( - '2A00000991020012', - info['data']['initiator_target_map']['10000090FAA0786A'][3]) - self.assertEqual( - '2A00000991020012', - info['data']['initiator_target_map']['10000090FAA0786B'][3]) - - -class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver, - unittest.TestCase): - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all', - patch_view_all) - def test_delete_snapshot(self): - self.vol.id = "63410c76-2f12-4473-873d-74a63dfcd3e2" - self.vol.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" - mock_query = mock.Mock() - self._cli.query_BV_SV_status = mock_query - self._cli.query_BV_SV_status.return_value = 'snap/active' - ret = self.delete_snapshot(self.vol) - self.assertIsNone(ret) - - -class NonDisruptiveBackup_test(volume_helper.MStorageDSVDriver, - unittest.TestCase): - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - def setUp(self): - self.do_setup(None) - self.vol = DummyVolume() - self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" - self.volvolume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" - self.volsize = 10 - self.volstatus = None - self.volmigration_status = None - self._properties = self.get_conf_properties() - self._cli = self._properties['cli'] - self.xml = self._cli.view_all() - (self.pools, - self.lds, - self.ldsets, - self.used_ldns, - self.hostports, - self.max_ld_count) = self.configs(self.xml) - - def test_validate_ld_exist(self): - ldname = self._validate_ld_exist( - self.lds, self.vol.id, self._properties['ld_name_format']) - self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', ldname) - self.vol.id = "00000000-0000-0000-0000-6b6d96553b4b" - with self.assertRaisesRegexp(exception.NotFound, - 'Logical Disk `LX:XXXXXXXX`' - ' could not be found.'): - self._validate_ld_exist( - self.lds, self.vol.id, self._properties['ld_name_format']) - - def test_validate_iscsildset_exist(self): - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} - ldset = self._validate_iscsildset_exist(self.ldsets, connector) - self.assertEqual('LX:OpenStack0', ldset['ldsetname']) - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255XX"} - with self.assertRaisesRegexp(exception.NotFound, - 'Appropriate Logical Disk Set' - ' could not be found.'): - self._validate_iscsildset_exist(self.ldsets, connector) - - def test_validate_fcldset_exist(self): - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - ldset = self._validate_fcldset_exist(self.ldsets, connector) - self.assertEqual('LX:OpenStack1', ldset['ldsetname']) - connector = {'wwpns': ["10000090FAA0786X", "10000090FAA0786Y"]} - with self.assertRaisesRegexp(exception.NotFound, - 'Appropriate Logical Disk Set' - ' could not be found.'): - self._validate_fcldset_exist(self.ldsets, connector) - - def test_enumerate_iscsi_portals(self): - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} - ldset = self._validate_iscsildset_exist(self.ldsets, connector) - self.assertEqual('LX:OpenStack0', ldset['ldsetname']) - portal = self._enumerate_iscsi_portals(self.hostports, ldset) - self.assertEqual('192.168.1.90:3260', portal[0]) - self.assertEqual('192.168.1.91:3260', portal[1]) - self.assertEqual('192.168.2.92:3260', portal[2]) - self.assertEqual('192.168.2.93:3260', portal[3]) - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - def test_initialize_connection_snapshot(self): - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} - loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88" - self.vol.provider_location = loc - ret = self.iscsi_initialize_connection_snapshot(self.vol, connector) - self.assertIsNotNone(ret) - self.assertEqual('iscsi', ret['driver_volume_type']) - - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - ret = self.fc_initialize_connection_snapshot(self.vol, connector) - self.assertIsNotNone(ret) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - def test_terminate_connection_snapshot(self): - connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} - self.iscsi_terminate_connection_snapshot(self.vol, connector) - - connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} - ret = self.fc_terminate_connection_snapshot(self.vol, connector) - self.assertEqual('fibre_channel', ret['driver_volume_type']) - - @mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.' - 'get_conf_properties', patch_get_conf_properties) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.' - 'view_all', patch_view_all) - @mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute', - patch_execute) - def test_remove_export_snapshot(self): - self.remove_export_snapshot(None, self.vol) - - def test_backup_use_temp_snapshot(self): - ret = self.backup_use_temp_snapshot() - self.assertTrue(ret) diff --git a/cinder/tests/unit/volume/drivers/netapp/__init__.py b/cinder/tests/unit/volume/drivers/netapp/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py deleted file mode 100644 index 40bb4e022..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py +++ /dev/null @@ -1,1413 +0,0 @@ -# Copyright (c) - 2015, Tom Barron. All rights reserved. -# Copyright (c) - 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from lxml import etree -import mock -from six.moves import urllib - -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -import cinder.volume.drivers.netapp.dataontap.client.api as netapp_api - - -FAKE_VOL_XML = b""" - open123 - online - 0 - 0 - 0 - false - false - """ - -FAKE_XML1 = b"""\ -abc\ -abc\ -""" - -FAKE_XML2 = b"""somecontent""" - -FAKE_NA_ELEMENT = netapp_api.NaElement(etree.XML(FAKE_VOL_XML)) - -FAKE_INVOKE_DATA = 'somecontent' - -FAKE_XML_STR = 'abc' - -FAKE_API_NAME = 'volume-get-iter' - -FAKE_API_NAME_ELEMENT = netapp_api.NaElement(FAKE_API_NAME) - -FAKE_NA_SERVER_STR = '127.0.0.1' - -FAKE_NA_SERVER = netapp_api.NaServer(FAKE_NA_SERVER_STR) - -FAKE_NA_SERVER_API_1_5 = netapp_api.NaServer(FAKE_NA_SERVER_STR) -FAKE_NA_SERVER_API_1_5.set_vfiler('filer') -FAKE_NA_SERVER_API_1_5.set_api_version(1, 5) - - -FAKE_NA_SERVER_API_1_14 = netapp_api.NaServer(FAKE_NA_SERVER_STR) -FAKE_NA_SERVER_API_1_14.set_vserver('server') -FAKE_NA_SERVER_API_1_14.set_api_version(1, 14) - - -FAKE_NA_SERVER_API_1_20 = netapp_api.NaServer(FAKE_NA_SERVER_STR) -FAKE_NA_SERVER_API_1_20.set_vfiler('filer') -FAKE_NA_SERVER_API_1_20.set_vserver('server') -FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) - -VOLUME_VSERVER_NAME = 'fake_vserver' -VOLUME_NAMES = ('volume1', 'volume2') -VOLUME_NAME = 'volume1' - - -FAKE_QUERY = {'volume-attributes': None} - -FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes', - 'volume-space-attributes', - 'volume-state-attributes', - 'volume-qos-attributes']} - -FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443), - mock.call(8488)] - -FAKE_RESULT_API_ERR_REASON = netapp_api.NaElement('result') -FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000') -FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason') - -FAKE_RESULT_API_ERRNO_INVALID = netapp_api.NaElement('result') -FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000') - -FAKE_RESULT_API_ERRNO_VALID = netapp_api.NaElement('result') -FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956') - -FAKE_RESULT_SUCCESS = netapp_api.NaElement('result') -FAKE_RESULT_SUCCESS.add_attr('status', 'passed') - -FAKE_HTTP_OPENER = urllib.request.build_opener() -INITIATOR_IQN = 'iqn.2015-06.com.netapp:fake_iqn' -USER_NAME = 'fake_user' -PASSWORD = 'passw0rd' -ENCRYPTED_PASSWORD = 'B351F145DA527445' - -NO_RECORDS_RESPONSE = etree.XML(""" - - 0 - -""") - -VOLUME_GET_NAME_RESPONSE = etree.XML(""" - - - - - %(volume)s - %(vserver)s - - - - 1 - -""" % {'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME}) - -INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML(""" - - 1 - fake_tag - -""") - -INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML(""" - - - fake_tag - -""") - -INVALID_RESPONSE = etree.XML(""" - - 1 - -""") - -GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE = etree.XML(""" - - 2 - - -
%(address1)s
-
- -
%(address2)s
-
-
-
-""" % {"address1": "1.2.3.4", "address2": "99.98.97.96"}) - -QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML(""" - - - - 30KB/S - 1 - 53 - fake_qos_policy_group_name - user_defined - 12496028-b641-11e5-abbd-123478563412 - cinder-iscsi - - - 1 - -""") - -VOLUME_LIST_INFO_RESPONSE = etree.XML(""" - - - - vol0 - 64_bit - online - 1441193750528 - 3161096192 - 1438032654336 - 0 - vfiler0 - aggr0 - volume - true - false - false - false - - - vol1 - 64_bit - online - 1441193750528 - 3161096192 - 1438032654336 - 0 - vfiler0 - aggr0 - volume - true - false - false - false - - - vol2 - 64_bit - offline - 1441193750528 - 3161096192 - 1438032654336 - 0 - vfiler0 - aggr0 - volume - true - false - false - false - - - vol3 - 64_bit - online - 1441193750528 - 3161096192 - 1438032654336 - 0 - vfiler0 - aggr0 - volume - true - false - false - false - - - -""") - -SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE = etree.XML(""" - - - - %(snapshot_name)s - False - %(vol_name)s - abcd-ef01-2345-6789 - - - 1 - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE = etree.XML(""" - - - - %(snapshot_name)s - True - %(vol_name)s - - - 1 - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE = etree.XML(""" - - - - %(snapshot_name)s - False - %(vol_name)s - abcd-ef01-2345-6789 - - - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE = etree.XML(""" - - - - deleted_cinder_%(snapshot_name)s - False - %(vol_name)s - abcd-ef01-2345-6789 - - - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY = etree.XML(""" - - - - deleted_cinder_busy_snapshot - True - %(vol_name)s - abcd-ef01-2345-6789 - - - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE = etree.XML(""" - - - - %(snapshot_name)s - True - %(vol_name)s - - - -""" % { - 'snapshot_name': fake.SNAPSHOT['name'], - 'vol_name': fake.SNAPSHOT['volume_id'], -}) - -SNAPSHOT_NOT_PRESENT_7MODE = etree.XML(""" - - - - NOT_THE_RIGHT_SNAPSHOT - false - %(vol_name)s - - - -""" % {'vol_name': fake.SNAPSHOT['volume_id']}) - -NODE_NAME = 'fake_node1' -NODE_NAMES = ('fake_node1', 'fake_node2') -VOLUME_AGGREGATE_NAME = 'fake_aggr1' -VOLUME_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') - -AGGR_GET_ITER_RESPONSE = etree.XML(""" - - - - - - false - - - - 64_bit - 1758646411 - aggr - - - 512 - 30384 - 96 - 30384 - 30384 - 30384 - 243191 - 96 - 0 - - - 4082368507 - cluster3-01 - 4082368507 - cluster3-01 - - - off - 0 - - - active - block - 3 - cfo - true - false - true - false - false - false - unmirrored - online - 1 - - - true - false - /%(aggr1)s/plex0 - normal,active - - - block - false - false - false - /%(aggr1)s/plex0/rg0 - 0 - 0 - - - 0 - - - on - 16 - raid_dp, normal - raid_dp - online - - - false - - - 0 - 0 - true - true - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - 245760 - 0 - 95 - 45670400 - 943718400 - 898048000 - 0 - 898048000 - 897802240 - - - 1 - 0 - 0 - - %(aggr1)s - 15863632-ea49-49a8-9c88-2bd2d57c6d7a - - cluster3-01 - - unknown - - - - - false - - - - 64_bit - 706602229 - aggr - - - 528 - 31142 - 96 - 31142 - 31142 - 31142 - 1945584 - 96 - 0 - - - 4082368507 - cluster3-01 - 4082368507 - cluster3-01 - - - off - 0 - - - active - block - 10 - sfo - false - false - true - false - false - false - unmirrored - online - 1 - - - true - false - /%(aggr2)s/plex0 - normal,active - - - block - false - false - false - /%(aggr2)s/plex0/rg0 - 0 - 0 - - - block - false - false - false - /%(aggr2)s/plex0/rg1 - 0 - 0 - - - 0 - - - on - 8 - raid4, normal - raid4 - online - - - false - - - 0 - 0 - true - true - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - 425984 - 0 - 15 - 6448431104 - 7549747200 - 1101316096 - 0 - 1101316096 - 1100890112 - - - 2 - 0 - 0 - - %(aggr2)s - 2a741934-1aaf-42dd-93ca-aaf231be108a - - cluster3-01 - - not_striped - - - 2 - -""" % { - 'aggr1': VOLUME_AGGREGATE_NAMES[0], - 'aggr2': VOLUME_AGGREGATE_NAMES[1], -}) - -AGGR_GET_SPACE_RESPONSE = etree.XML(""" - - - - - - - /%(aggr1)s/plex0 - - - /%(aggr1)s/plex0/rg0 - - - - - - - 45670400 - 943718400 - 898048000 - - %(aggr1)s - - - - - - /%(aggr2)s/plex0 - - - /%(aggr2)s/plex0/rg0 - - - /%(aggr2)s/plex0/rg1 - - - - - - - 4267659264 - 7549747200 - 3282087936 - - %(aggr2)s - - - 2 - -""" % { - 'aggr1': VOLUME_AGGREGATE_NAMES[0], - 'aggr2': VOLUME_AGGREGATE_NAMES[1], -}) - -AGGR_GET_NODE_RESPONSE = etree.XML(""" - - - - - %(node)s - - %(aggr)s - - - 1 - -""" % { - 'aggr': VOLUME_AGGREGATE_NAME, - 'node': NODE_NAME, -}) - -AGGREGATE_RAID_TYPE = 'raid_dp' -AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" - - - - - - - /%(aggr)s/plex0 - - - /%(aggr)s/plex0/rg0 - - - - - %(raid)s - true - - %(aggr)s - - - 1 - -""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE}) - -AGGR_INFO_SSC = { - 'name': VOLUME_AGGREGATE_NAME, - 'raid-type': AGGREGATE_RAID_TYPE, - 'is-hybrid': True, -} - -AGGR_SIZE_TOTAL = 107374182400 -AGGR_SIZE_AVAILABLE = 59055800320 -AGGR_USED_PERCENT = 45 -AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" - - - - - %(used)s - %(total_size)s - %(available_size)s - - %(aggr)s - - - 1 - -""" % { - 'aggr': VOLUME_AGGREGATE_NAME, - 'used': AGGR_USED_PERCENT, - 'available_size': AGGR_SIZE_AVAILABLE, - 'total_size': AGGR_SIZE_TOTAL, -}) - -VOLUME_SIZE_TOTAL = 19922944 -VOLUME_SIZE_AVAILABLE = 19791872 -VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" - - 1 - - - - %(available_size)s - %(total_size)s - - - - -""" % { - 'available_size': VOLUME_SIZE_AVAILABLE, - 'total_size': VOLUME_SIZE_TOTAL, -}) - -VOLUME_GET_ITER_LIST_RESPONSE = etree.XML(""" - - - - - %(volume1)s - %(vserver)s - - - - - %(volume2)s - %(vserver)s - - - - 2 - -""" % { - 'volume1': VOLUME_NAMES[0], - 'volume2': VOLUME_NAMES[1], - 'vserver': VOLUME_VSERVER_NAME, -}) - -VOLUME_GET_ITER_SSC_RESPONSE = etree.XML(""" - - - - - %(aggr)s - /%(volume)s - %(volume)s - %(vserver)s - rw - - - false - false - - - fake_qos_policy_group_name - - - true - none - 5 - 12345 - - - default - - - en_US - - - - 1 - -""" % { - 'aggr': VOLUME_AGGREGATE_NAMES[0], - 'volume': VOLUME_NAMES[0], - 'vserver': VOLUME_VSERVER_NAME, -}) - -VOLUME_INFO_SSC = { - 'name': VOLUME_NAMES[0], - 'vserver': VOLUME_VSERVER_NAME, - 'junction-path': '/%s' % VOLUME_NAMES[0], - 'aggregate': VOLUME_AGGREGATE_NAMES[0], - 'space-guarantee-enabled': True, - 'language': 'en_US', - 'percentage-snapshot-reserve': '5', - 'snapshot-policy': 'default', - 'type': 'rw', - 'size': '12345', - 'space-guarantee': 'none', - 'qos-policy-group': 'fake_qos_policy_group_name', -} - -SIS_GET_ITER_SSC_RESPONSE = etree.XML(""" - - - - false - enabled - 211106232532992 - 703687441776640 - - - 1 - -""") - -VOLUME_DEDUPE_INFO_SSC = { - 'compression': False, - 'dedupe': True, - 'logical-data-size': 211106232532992, - 'logical-data-limit': 703687441776640, -} - -SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE = etree.XML(""" - - - - false - disabled - - - 1 - -""") - -VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA = { - 'compression': False, - 'dedupe': False, - 'logical-data-size': 0, - 'logical-data-limit': 1, -} - -CLONE_SPLIT_STATUS_RESPONSE = etree.XML(""" - - - 1234 - 316659348799488 - - -""") - -VOLUME_CLONE_SPLIT_STATUS = { - 'unsplit-size': 316659348799488, - 'unsplit-clone-count': 1234, -} - -CLONE_SPLIT_STATUS_NO_DATA_RESPONSE = etree.XML(""" - - - - -""") - -VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE = etree.XML(""" - - - - true - - %(aggr)s - /%(volume)s - %(volume)s - %(vserver)s - rw - - - false - false - - - fake_qos_policy_group_name - - - true - none - 5 - 12345 - - - default - - - en_US - - - - 1 - -""" % { - 'aggr': VOLUME_AGGREGATE_NAMES[0], - 'volume': VOLUME_NAMES[0], - 'vserver': VOLUME_VSERVER_NAME, -}) - -STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML(""" - - - - cluster3-01:v4.16 - - - cluster3-01:v4.17 - - - cluster3-01:v4.18 - - - cluster3-01:v4.19 - - - cluster3-01:v4.20 - - - cluster3-01:v4.21 - - - cluster3-01:v4.22 - - - cluster3-01:v4.24 - - - cluster3-01:v4.25 - - - cluster3-01:v4.26 - - - next_tag_1 - 10 - -""") - -STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML(""" - - - - cluster3-01:v4.27 - - - cluster3-01:v4.28 - - - cluster3-01:v4.29 - - - cluster3-01:v4.32 - - - cluster3-01:v5.16 - - - cluster3-01:v5.17 - - - cluster3-01:v5.18 - - - cluster3-01:v5.19 - - - cluster3-01:v5.20 - - - cluster3-01:v5.21 - - - next_tag_2 - 10 - -""") - -STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" - - - - cluster3-01:v5.22 - - - cluster3-01:v5.24 - - - cluster3-01:v5.25 - - - cluster3-01:v5.26 - - - cluster3-01:v5.27 - - - cluster3-01:v5.28 - - - cluster3-01:v5.29 - - - cluster3-01:v5.32 - - - 8 - -""") - -AGGREGATE_DISK_TYPES = ['SATA', 'SSD'] -STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" - - - - cluster3-01:v5.19 - - %(type0)s - - - - cluster3-01:v5.20 - - %(type0)s - - - - cluster3-01:v5.20 - - %(type1)s - - - - cluster3-01:v5.20 - - %(type1)s - - - - 4 - -""" % { - 'type0': AGGREGATE_DISK_TYPES[0], - 'type1': AGGREGATE_DISK_TYPES[1], -}) - -SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML(""" - - - - object - - - api,api2,api3 - operation - - - - - 1 - -""") - -PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [ - 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', - 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', - 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', - 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', - 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', - 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', - 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', - 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', -] - -PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML(""" - - - - No. of times 8.3 names are accessed per second. - access_8_3_names - diag - rate - per_sec - - - Array of counts of different types of CPs - - wafl_timer generated CP - snapshot generated CP - wafl_avail_bufs generated CP - dirty_blk_cnt generated CP - full NV-log generated CP,back-to-back CP - flush generated CP,sync generated CP - deferred back-to-back CP - low mbufs generated CP - low datavecs generated CP - nvlog replay takeover time limit CP - - cp_count - diag - delta - array - none - - - total_cp_msecs - Array of percentage time spent in different phases of CP - - %(labels)s - - cp_phase_times - diag - percent - array - percent - - - -""" % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)}) - -PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML(""" - - - - - - avg_processor_busy - 5674745133134 - - - system - %(node1)s:kernel:system - - - - - avg_processor_busy - 4077649009234 - - - system - %(node2)s:kernel:system - - - 1453412013 - -""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]}) - -PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML(""" - - 1454146292 - - - system - - - avg_processor_busy - 13215732322 - - - - - """) - -PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML(""" - - - - system - %(node)s:kernel:system - - - 1 - -""" % {'node': NODE_NAME}) - -PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML(""" - - - - processor0 - - - processor1 - - - """) - -SYSTEM_GET_INFO_RESPONSE = etree.XML(""" - - - %(node)s - 4082368508 - SIMBOX - SIMBOX - NetApp - 4082368508 - 2593 - NetApp VSim - 999999 - 2 - 1599 - 0x40661 - 15 - 2199023255552 - 17592186044416 - 500 - true - - -""" % {'node': NODE_NAME}) - -ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML(""" - - %s -""" % INITIATOR_IQN) - -ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML(""" -""" % INITIATOR_IQN) - -CLUSTER_NAME = 'fake_cluster' -REMOTE_CLUSTER_NAME = 'fake_cluster_2' -CLUSTER_ADDRESS_1 = 'fake_cluster_address' -CLUSTER_ADDRESS_2 = 'fake_cluster_address_2' -VSERVER_NAME = 'fake_vserver' -VSERVER_NAME_2 = 'fake_vserver_2' -ADMIN_VSERVER_NAME = 'fake_admin_vserver' -NODE_VSERVER_NAME = 'fake_node_vserver' -SM_SOURCE_VSERVER = 'fake_source_vserver' -SM_SOURCE_VOLUME = 'fake_source_volume' -SM_DEST_VSERVER = 'fake_destination_vserver' -SM_DEST_VOLUME = 'fake_destination_volume' - -CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" - - - - - %(addr1)s - %(addr2)s - - available - %(cluster)s - fake_uuid - - %(addr1)s - - %(remote_cluster)s - fake_serial_number - 60 - - - 1 - -""" % { - 'addr1': CLUSTER_ADDRESS_1, - 'addr2': CLUSTER_ADDRESS_2, - 'cluster': CLUSTER_NAME, - 'remote_cluster': REMOTE_CLUSTER_NAME, -}) - -CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML(""" - - - - false - 8 - - - -""") - -VSERVER_PEER_GET_ITER_RESPONSE = etree.XML(""" - - - - - snapmirror - - %(cluster)s - peered - %(vserver2)s - %(vserver1)s - - - 2 - -""" % { - 'cluster': CLUSTER_NAME, - 'vserver1': VSERVER_NAME, - 'vserver2': VSERVER_NAME_2 -}) - -SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" - - - - %(vserver)s:%(volume2)s - %(volume2)s - fake_destination_node - %(vserver)s - fake_snapshot - 1442701782 - false - true - 2187 - 109 - 1442701890 - test:manila - 1171456 - initialize - 0 - snapmirrored - fake_snapshot - 1442701782 - DPDefault - v2 - ea8bfcc6-5f1d-11e5-8446-123478563412 - idle - data_protection - daily - %(vserver)s:%(volume1)s - %(volume1)s - %(vserver)s - fake_destination_vserver - - - 1 - -""" % { - 'volume1': VOLUME_NAMES[0], - 'volume2': VOLUME_NAMES[1], - 'vserver': VOLUME_VSERVER_NAME, -}) - -SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML(""" - - - - fake_destination_vserver - fake_destination_volume - true - snapmirrored - daily - fake_source_vserver - fake_source_volume - - - 1 - -""") - -SNAPMIRROR_INITIALIZE_RESULT = etree.XML(""" - - succeeded - -""") - -VSERVER_DATA_LIST_RESPONSE = etree.XML(""" - - - - %(vserver)s - data - - - 1 - -""" % {'vserver': VSERVER_NAME}) - -SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" - - - - %s - - - 1 - -""" % NODE_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py deleted file mode 100644 index c1f104716..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Bob Callaway. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for NetApp API layer -""" -import ddt -from lxml import etree -import mock -import paramiko -import six -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( - fakes as zapi_fakes) -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api - - -@ddt.ddt -class NetAppApiServerTests(test.TestCase): - """Test case for NetApp API server methods""" - def setUp(self): - self.root = netapp_api.NaServer('127.0.0.1') - super(NetAppApiServerTests, self).setUp() - - @ddt.data(None, 'ftp') - def test_set_transport_type_value_error(self, transport_type): - """Tests setting an invalid transport type""" - self.assertRaises(ValueError, self.root.set_transport_type, - transport_type) - - @ddt.data({'params': {'transport_type': 'http', - 'server_type_filer': 'filer'}}, - {'params': {'transport_type': 'http', - 'server_type_filer': 'xyz'}}, - {'params': {'transport_type': 'https', - 'server_type_filer': 'filer'}}, - {'params': {'transport_type': 'https', - 'server_type_filer': 'xyz'}}) - @ddt.unpack - def test_set_transport_type_valid(self, params): - """Tests setting a valid transport type""" - self.root._server_type = params['server_type_filer'] - mock_invoke = self.mock_object(self.root, 'set_port') - - self.root.set_transport_type(params['transport_type']) - - expected_call_args = zapi_fakes.FAKE_CALL_ARGS_LIST - - self.assertIn(mock_invoke.call_args, expected_call_args) - - @ddt.data('stor', 'STORE', '') - def test_set_server_type_value_error(self, server_type): - """Tests Value Error on setting the wrong server type""" - self.assertRaises(ValueError, self.root.set_server_type, server_type) - - @ddt.data('!&', '80na', '') - def test_set_port__value_error(self, port): - """Tests Value Error on trying to set port with a non-integer""" - self.assertRaises(ValueError, self.root.set_port, port) - - @ddt.data('!&', '80na', '') - def test_set_timeout_value_error(self, timeout): - """Tests Value Error on trying to set port with a non-integer""" - self.assertRaises(ValueError, self.root.set_timeout, timeout) - - @ddt.data({'params': {'major': 1, 'minor': '20a'}}, - {'params': {'major': '20a', 'minor': 1}}, - {'params': {'major': '!*', 'minor': '20a'}}) - @ddt.unpack - def test_set_api_version_value_error(self, params): - """Tests Value Error on setting non-integer version""" - self.assertRaises(ValueError, self.root.set_api_version, **params) - - def test_set_api_version_valid(self): - """Tests Value Error on setting non-integer version""" - args = {'major': '20', 'minor': 1} - - expected_call_args_list = [mock.call('20'), mock.call(1)] - - mock_invoke = self.mock_object(six, 'text_type', return_value='str') - self.root.set_api_version(**args) - - self.assertEqual(expected_call_args_list, mock_invoke.call_args_list) - - @ddt.data({'params': {'result': zapi_fakes.FAKE_RESULT_API_ERR_REASON}}, - {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_INVALID}}, - {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_VALID}}) - @ddt.unpack - def test_invoke_successfully_naapi_error(self, params): - """Tests invoke successfully raising NaApiError""" - self.mock_object(self.root, 'send_http_request', - return_value=params['result']) - - self.assertRaises(netapp_api.NaApiError, - self.root.invoke_successfully, - zapi_fakes.FAKE_NA_ELEMENT) - - def test_invoke_successfully_no_error(self): - """Tests invoke successfully with no errors""" - self.mock_object(self.root, 'send_http_request', - return_value=zapi_fakes.FAKE_RESULT_SUCCESS) - - self.assertEqual(zapi_fakes.FAKE_RESULT_SUCCESS.to_string(), - self.root.invoke_successfully( - zapi_fakes.FAKE_NA_ELEMENT).to_string()) - - def test__create_request(self): - """Tests method _create_request""" - self.root._ns = zapi_fakes.FAKE_XML_STR - self.root._api_version = '1.20' - self.mock_object(self.root, '_enable_tunnel_request') - self.mock_object(netapp_api.NaElement, 'add_child_elem') - self.mock_object(netapp_api.NaElement, 'to_string', - return_value=zapi_fakes.FAKE_XML_STR) - mock_invoke = self.mock_object(urllib.request, 'Request') - - self.root._create_request(zapi_fakes.FAKE_NA_ELEMENT, True) - - self.assertTrue(mock_invoke.called) - - @ddt.data({'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_5}}, - {'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_14}}) - @ddt.unpack - def test__enable_tunnel_request__value_error(self, params): - """Tests value errors with creating tunnel request""" - - self.assertRaises(ValueError, params['server']._enable_tunnel_request, - 'test') - - def test__enable_tunnel_request_valid(self): - """Tests creating tunnel request with correct values""" - netapp_elem = zapi_fakes.FAKE_NA_ELEMENT - server = zapi_fakes.FAKE_NA_SERVER_API_1_20 - mock_invoke = self.mock_object(netapp_elem, 'add_attr') - expected_call_args = [mock.call('vfiler', 'filer'), - mock.call('vfiler', 'server')] - - server._enable_tunnel_request(netapp_elem) - - self.assertEqual(expected_call_args, mock_invoke.call_args_list) - - def test__parse_response__naapi_error(self): - """Tests NaApiError on no response""" - self.assertRaises(netapp_api.NaApiError, - self.root._parse_response, None) - - def test__parse_response_no_error(self): - """Tests parse function with appropriate response""" - mock_invoke = self.mock_object(etree, 'XML', return_value='xml') - - self.root._parse_response(zapi_fakes.FAKE_XML_STR) - - mock_invoke.assert_called_with(zapi_fakes.FAKE_XML_STR) - - def test__build_opener_not_implemented_error(self): - """Tests whether certificate style authorization raises Exception""" - self.root._auth_style = 'not_basic_auth' - - self.assertRaises(NotImplementedError, self.root._build_opener) - - def test__build_opener_valid(self): - """Tests whether build opener works with valid parameters""" - self.root._auth_style = 'basic_auth' - mock_invoke = self.mock_object(urllib.request, 'build_opener') - - self.root._build_opener() - - self.assertTrue(mock_invoke.called) - - @ddt.data(None, zapi_fakes.FAKE_XML_STR) - def test_send_http_request_value_error(self, na_element): - """Tests whether invalid NaElement parameter causes error""" - - self.assertRaises(ValueError, self.root.send_http_request, na_element) - - def test_send_http_request_http_error(self): - """Tests handling of HTTPError""" - na_element = zapi_fakes.FAKE_NA_ELEMENT - self.mock_object(self.root, '_create_request', - return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) - self.mock_object(netapp_api, 'LOG') - self.root._opener = zapi_fakes.FAKE_HTTP_OPENER - self.mock_object(self.root, '_build_opener') - self.mock_object(self.root._opener, 'open', - side_effect=urllib.error.HTTPError(url='', hdrs='', - fp=None, - code='401', - msg='httperror')) - - self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, - na_element) - - def test_send_http_request_unknown_exception(self): - """Tests handling of Unknown Exception""" - na_element = zapi_fakes.FAKE_NA_ELEMENT - self.mock_object(self.root, '_create_request', - return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) - mock_log = self.mock_object(netapp_api, 'LOG') - self.root._opener = zapi_fakes.FAKE_HTTP_OPENER - self.mock_object(self.root, '_build_opener') - self.mock_object(self.root._opener, 'open', side_effect=Exception) - - self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, - na_element) - self.assertEqual(1, mock_log.exception.call_count) - - def test_send_http_request_valid(self): - """Tests the method send_http_request with valid parameters""" - na_element = zapi_fakes.FAKE_NA_ELEMENT - self.root._trace = True - self.mock_object(self.root, '_create_request', - return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) - self.mock_object(netapp_api, 'LOG') - self.root._opener = zapi_fakes.FAKE_HTTP_OPENER - self.mock_object(self.root, '_build_opener') - self.mock_object(self.root, '_get_result', - return_value=zapi_fakes.FAKE_NA_ELEMENT) - opener_mock = self.mock_object(self.root._opener, 'open') - opener_mock.read.side_effect = ['resp1', 'resp2'] - - self.root.send_http_request(na_element) - - -class NetAppApiElementTransTests(test.TestCase): - """Test case for NetApp API element translations.""" - - def test_translate_struct_dict_unique_key(self): - """Tests if dict gets properly converted to NaElements.""" - root = netapp_api.NaElement('root') - child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} - root.translate_struct(child) - self.assertEqual(3, len(root.get_children())) - self.assertEqual('v1', root.get_child_content('e1')) - self.assertEqual('v2', root.get_child_content('e2')) - self.assertEqual('v3', root.get_child_content('e3')) - - def test_translate_struct_dict_nonunique_key(self): - """Tests if list/dict gets properly converted to NaElements.""" - root = netapp_api.NaElement('root') - child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] - root.translate_struct(child) - self.assertEqual(3, len(root.get_children())) - children = root.get_children() - for c in children: - if c.get_name() == 'e1': - self.assertIn(c.get_content(), ['v1', 'v3']) - else: - self.assertEqual('v2', c.get_content()) - - def test_translate_struct_list(self): - """Tests if list gets properly converted to NaElements.""" - root = netapp_api.NaElement('root') - child = ['e1', 'e2'] - root.translate_struct(child) - self.assertEqual(2, len(root.get_children())) - self.assertIsNone(root.get_child_content('e1')) - self.assertIsNone(root.get_child_content('e2')) - - def test_translate_struct_tuple(self): - """Tests if tuple gets properly converted to NaElements.""" - root = netapp_api.NaElement('root') - child = ('e1', 'e2') - root.translate_struct(child) - self.assertEqual(2, len(root.get_children())) - self.assertIsNone(root.get_child_content('e1')) - self.assertIsNone(root.get_child_content('e2')) - - def test_translate_invalid_struct(self): - """Tests if invalid data structure raises exception.""" - root = netapp_api.NaElement('root') - child = 'random child element' - self.assertRaises(ValueError, root.translate_struct, child) - - def test_setter_builtin_types(self): - """Tests str, int, float get converted to NaElement.""" - root = netapp_api.NaElement('root') - root['e1'] = 'v1' - root['e2'] = 1 - root['e3'] = 2.0 - root['e4'] = 8 - self.assertEqual(4, len(root.get_children())) - self.assertEqual('v1', root.get_child_content('e1')) - self.assertEqual('1', root.get_child_content('e2')) - self.assertEqual('2.0', root.get_child_content('e3')) - self.assertEqual('8', root.get_child_content('e4')) - - def test_setter_na_element(self): - """Tests na_element gets appended as child.""" - root = netapp_api.NaElement('root') - root['e1'] = netapp_api.NaElement('nested') - self.assertEqual(1, len(root.get_children())) - e1 = root.get_child_by_name('e1') - self.assertIsInstance(e1, netapp_api.NaElement) - self.assertIsInstance(e1.get_child_by_name('nested'), - netapp_api.NaElement) - - def test_setter_child_dict(self): - """Tests dict is appended as child to root.""" - root = netapp_api.NaElement('root') - root['d'] = {'e1': 'v1', 'e2': 'v2'} - e1 = root.get_child_by_name('d') - self.assertIsInstance(e1, netapp_api.NaElement) - sub_ch = e1.get_children() - self.assertEqual(2, len(sub_ch)) - for c in sub_ch: - self.assertIn(c.get_name(), ['e1', 'e2']) - if c.get_name() == 'e1': - self.assertEqual('v1', c.get_content()) - else: - self.assertEqual('v2', c.get_content()) - - def test_setter_child_list_tuple(self): - """Tests list/tuple are appended as child to root.""" - root = netapp_api.NaElement('root') - root['l'] = ['l1', 'l2'] - root['t'] = ('t1', 't2') - l = root.get_child_by_name('l') - self.assertIsInstance(l, netapp_api.NaElement) - t = root.get_child_by_name('t') - self.assertIsInstance(t, netapp_api.NaElement) - for le in l.get_children(): - self.assertIn(le.get_name(), ['l1', 'l2']) - for te in t.get_children(): - self.assertIn(te.get_name(), ['t1', 't2']) - - def test_setter_no_value(self): - """Tests key with None value.""" - root = netapp_api.NaElement('root') - root['k'] = None - self.assertIsNone(root.get_child_content('k')) - - def test_setter_invalid_value(self): - """Tests invalid value raises exception.""" - root = netapp_api.NaElement('root') - try: - root['k'] = netapp_api.NaServer('localhost') - except Exception as e: - if not isinstance(e, TypeError): - self.fail(_('Error not a TypeError.')) - - def test_setter_invalid_key(self): - """Tests invalid value raises exception.""" - root = netapp_api.NaElement('root') - try: - root[None] = 'value' - except Exception as e: - if not isinstance(e, KeyError): - self.fail(_('Error not a KeyError.')) - - def test_getter_key_error(self): - """Tests invalid key raises exception""" - root = netapp_api.NaElement('root') - self.mock_object(root, 'get_child_by_name', return_value=None) - self.mock_object(root, 'has_attr', return_value=None) - - self.assertRaises(KeyError, - netapp_api.NaElement.__getitem__, - root, '123') - - def test_getter_na_element_list(self): - """Tests returning NaElement list""" - root = netapp_api.NaElement('root') - root['key'] = ['val1', 'val2'] - - self.assertEqual(root.get_child_by_name('key').get_name(), - root.__getitem__('key').get_name()) - - def test_getter_child_text(self): - """Tests NaElement having no children""" - root = netapp_api.NaElement('root') - root.set_content('FAKE_CONTENT') - self.mock_object(root, 'get_child_by_name', return_value=root) - - self.assertEqual('FAKE_CONTENT', - root.__getitem__('root')) - - def test_getter_child_attr(self): - """Tests invalid key raises exception""" - root = netapp_api.NaElement('root') - root.add_attr('val', 'FAKE_VALUE') - - self.assertEqual('FAKE_VALUE', - root.__getitem__('val')) - - def test_add_node_with_children(self): - """Tests adding a child node with its own children""" - root = netapp_api.NaElement('root') - self.mock_object(netapp_api.NaElement, - 'create_node_with_children', - return_value=zapi_fakes.FAKE_INVOKE_DATA) - mock_invoke = self.mock_object(root, 'add_child_elem') - - root.add_node_with_children('options') - - mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA) - - def test_create_node_with_children(self): - """Tests adding a child node with its own children""" - root = netapp_api.NaElement('root') - self.mock_object(root, 'add_new_child', return_value='abc') - - result_xml = str(root.create_node_with_children( - 'options', test1=zapi_fakes.FAKE_XML_STR, - test2=zapi_fakes.FAKE_XML_STR)) - - # No ordering is guaranteed for elements in this XML. - self.assertTrue(result_xml.startswith(""), result_xml) - self.assertTrue("abc" in result_xml, result_xml) - self.assertTrue("abc" in result_xml, result_xml) - self.assertTrue(result_xml.rstrip().endswith(""), result_xml) - - def test_add_new_child(self): - """Tests adding a child node with its own children""" - root = netapp_api.NaElement('root') - self.mock_object(netapp_api.NaElement, - '_convert_entity_refs', - return_value=zapi_fakes.FAKE_INVOKE_DATA) - - root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA) - - self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string()) - - def test_get_attr_names_empty_attr(self): - """Tests _elements.attrib being empty""" - root = netapp_api.NaElement('root') - - self.assertEqual([], root.get_attr_names()) - - def test_get_attr_names(self): - """Tests _elements.attrib being non-empty""" - root = netapp_api.NaElement('root') - root.add_attr('attr1', 'a1') - root.add_attr('attr2', 'a2') - - self.assertEqual(['attr1', 'attr2'], root.get_attr_names()) - - -@ddt.ddt -class SSHUtilTests(test.TestCase): - """Test Cases for SSH API invocation.""" - - def setUp(self): - super(SSHUtilTests, self).setUp() - self.mock_object(netapp_api.SSHUtil, '_init_ssh_pool') - self.sshutil = netapp_api.SSHUtil('127.0.0.1', - 'fake_user', - 'fake_password') - - def test_execute_command(self): - ssh = mock.Mock(paramiko.SSHClient) - stdin, stdout, stderr = self._mock_ssh_channel_files( - paramiko.ChannelFile) - self.mock_object(ssh, 'exec_command', - return_value=(stdin, stdout, stderr)) - - wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') - stdout_read = self.mock_object(stdout, 'read', return_value='') - self.sshutil.execute_command(ssh, 'ls') - - wait_on_stdout.assert_called_once_with(stdout, - netapp_api.SSHUtil.RECV_TIMEOUT) - stdout_read.assert_called_once_with() - - def test_execute_read_exception(self): - ssh = mock.Mock(paramiko.SSHClient) - exec_command = self.mock_object(ssh, 'exec_command') - exec_command.side_effect = paramiko.SSHException('Failure') - wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') - - self.assertRaises(paramiko.SSHException, - self.sshutil.execute_command, ssh, 'ls') - wait_on_stdout.assert_not_called() - - @ddt.data('Password:', - 'Password: ', - 'Password: \n\n') - def test_execute_command_with_prompt(self, response): - ssh = mock.Mock(paramiko.SSHClient) - stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) - stdout_read = self.mock_object(stdout.channel, 'recv', - return_value=response) - stdin_write = self.mock_object(stdin, 'write') - self.mock_object(ssh, 'exec_command', - return_value=(stdin, stdout, stderr)) - - wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') - self.sshutil.execute_command_with_prompt(ssh, 'sudo ls', - 'Password:', 'easypass') - - wait_on_stdout.assert_called_once_with(stdout, - netapp_api.SSHUtil.RECV_TIMEOUT) - stdout_read.assert_called_once_with(999) - stdin_write.assert_called_once_with('easypass' + '\n') - - def test_execute_command_unexpected_response(self): - ssh = mock.Mock(paramiko.SSHClient) - stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) - stdout_read = self.mock_object(stdout.channel, 'recv', - return_value='bad response') - self.mock_object(ssh, 'exec_command', - return_value=(stdin, stdout, stderr)) - - wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') - self.assertRaises(exception.VolumeBackendAPIException, - self.sshutil.execute_command_with_prompt, - ssh, 'sudo ls', 'Password:', 'easypass') - - wait_on_stdout.assert_called_once_with(stdout, - netapp_api.SSHUtil.RECV_TIMEOUT) - stdout_read.assert_called_once_with(999) - - def test_wait_on_stdout(self): - stdout = mock.Mock() - stdout.channel = mock.Mock(paramiko.Channel) - - exit_status = self.mock_object(stdout.channel, 'exit_status_ready', - return_value=False) - self.sshutil._wait_on_stdout(stdout, 1) - exit_status.assert_any_call() - self.assertGreater(exit_status.call_count, 2) - - def _mock_ssh_channel_files(self, channel): - stdin = mock.Mock() - stdin.channel = mock.Mock(channel) - stdout = mock.Mock() - stdout.channel = mock.Mock(channel) - stderr = mock.Mock() - stderr.channel = mock.Mock(channel) - return stdin, stdout, stderr diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py deleted file mode 100644 index c55669e35..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_7mode.py +++ /dev/null @@ -1,863 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -from lxml import etree -import mock -import paramiko -import six - -from cinder import exception -from cinder import ssh_utils -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( - fakes as fake_client) -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_7mode -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp import utils as netapp_utils - -CONNECTION_INFO = {'hostname': 'hostname', - 'transport_type': 'https', - 'port': 443, - 'username': 'admin', - 'password': 'passw0rd'} - - -@ddt.ddt -class NetApp7modeClientTestCase(test.TestCase): - - def setUp(self): - super(NetApp7modeClientTestCase, self).setUp() - - self.fake_volume = six.text_type(uuid.uuid4()) - - self.mock_object(client_7mode.Client, '_init_ssh_client') - with mock.patch.object(client_7mode.Client, - 'get_ontapi_version', - return_value=(1, 20)): - self.client = client_7mode.Client([self.fake_volume], - **CONNECTION_INFO) - - self.client.ssh_client = mock.MagicMock() - self.client.connection = mock.MagicMock() - self.connection = self.client.connection - self.fake_lun = six.text_type(uuid.uuid4()) - - def test_get_iscsi_target_details_no_targets(self): - response = netapp_api.NaElement( - etree.XML(""" - - - """)) - self.connection.invoke_successfully.return_value = response - - target_list = self.client.get_iscsi_target_details() - - self.assertEqual([], target_list) - - def test_get_iscsi_target_details(self): - expected_target = { - "address": "127.0.0.1", - "port": "1337", - "tpgroup-tag": "7777", - } - response = netapp_api.NaElement( - etree.XML(""" - - - %(address)s - %(port)s - %(tpgroup-tag)s - - - """ % expected_target)) - self.connection.invoke_successfully.return_value = response - - target_list = self.client.get_iscsi_target_details() - - self.assertEqual([expected_target], target_list) - - def test_get_iscsi_service_details_with_no_iscsi_service(self): - response = netapp_api.NaElement( - etree.XML(""" - """)) - self.connection.invoke_successfully.return_value = response - - iqn = self.client.get_iscsi_service_details() - - self.assertIsNone(iqn) - - def test_get_iscsi_service_details(self): - expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1' - response = netapp_api.NaElement( - etree.XML(""" - %s - """ % expected_iqn)) - self.connection.invoke_successfully.return_value = response - - iqn = self.client.get_iscsi_service_details() - - self.assertEqual(expected_iqn, iqn) - - def test_get_lun_list(self): - response = netapp_api.NaElement( - etree.XML(""" - - - - - """)) - self.connection.invoke_successfully.return_value = response - - luns = self.client.get_lun_list() - - self.assertEqual(2, len(luns)) - - def test_get_igroup_by_initiators_none_found(self): - initiators = fake.FC_FORMATTED_INITIATORS[0] - - response = netapp_api.NaElement( - etree.XML(""" - - - """)) - self.connection.invoke_successfully.return_value = response - - igroup = self.client.get_igroup_by_initiators(initiators) - - self.assertEqual([], igroup) - - def test_get_igroup_by_initiators(self): - initiators = [fake.FC_FORMATTED_INITIATORS[0]] - response = netapp_api.NaElement( - etree.XML(""" - - - %(initiator-group-name)s - %(initiator-group-type)s - 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 - - linux - 0 - false - - false - true - true - - true - - - 21:00:00:24:ff:40:6c:c3 - - - - - """ % fake.IGROUP1)) - self.connection.invoke_successfully.return_value = response - - igroups = self.client.get_igroup_by_initiators(initiators) - - # make these lists of dicts comparable using hashable dictionaries - igroups = set( - [netapp_utils.hashabledict(igroup) for igroup in igroups]) - expected = set([netapp_utils.hashabledict(fake.IGROUP1)]) - - self.assertSetEqual(igroups, expected) - - def test_get_igroup_by_initiators_multiple(self): - initiators = fake.FC_FORMATTED_INITIATORS - response = netapp_api.NaElement( - etree.XML(""" - - - %(initiator-group-name)s - %(initiator-group-type)s - 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 - - linux - - - 21:00:00:24:ff:40:6c:c3 - - - 21:00:00:24:ff:40:6c:c2 - - - - - openstack-igroup2 - fcp - 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 - - linux - - - 21:00:00:24:ff:40:6c:c2 - - - - """ % fake.IGROUP1)) - self.connection.invoke_successfully.return_value = response - - igroups = self.client.get_igroup_by_initiators(initiators) - - # make these lists of dicts comparable using hashable dictionaries - igroups = set( - [netapp_utils.hashabledict(igroup) for igroup in igroups]) - expected = set([netapp_utils.hashabledict(fake.IGROUP1)]) - - self.assertSetEqual(igroups, expected) - - def test_clone_lun(self): - fake_clone_start = netapp_api.NaElement( - etree.XML(""" - - - 1337 - volume-uuid - - - """)) - fake_clone_status = netapp_api.NaElement( - etree.XML(""" - - - completed - - - """)) - - self.connection.invoke_successfully.side_effect = [fake_clone_start, - fake_clone_status] - - self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN') - self.assertEqual(2, self.connection.invoke_successfully.call_count) - - def test_clone_lun_api_error(self): - fake_clone_start = netapp_api.NaElement( - etree.XML(""" - - - 1337 - volume-uuid - - - """)) - fake_clone_status = netapp_api.NaElement( - etree.XML(""" - - - error - - - """)) - - self.connection.invoke_successfully.side_effect = [fake_clone_start, - fake_clone_status] - - self.assertRaises(netapp_api.NaApiError, self.client.clone_lun, - 'path', 'new_path', 'fakeLUN', 'newFakeLUN') - - def test_clone_lun_multiple_zapi_calls(self): - # Max block-ranges per call = 32, max blocks per range = 2^24 - # Force 2 calls - bc = 2 ** 24 * 32 * 2 - fake_clone_start = netapp_api.NaElement( - etree.XML(""" - - - 1337 - volume-uuid - - - """)) - fake_clone_status = netapp_api.NaElement( - etree.XML(""" - - - completed - - - """)) - - self.connection.invoke_successfully.side_effect = [fake_clone_start, - fake_clone_status, - fake_clone_start, - fake_clone_status] - - self.client.clone_lun('path', 'new_path', 'fakeLUN', 'newFakeLUN', - block_count=bc) - - self.assertEqual(4, self.connection.invoke_successfully.call_count) - - def test_clone_lun_wait_for_clone_to_finish(self): - # Max block-ranges per call = 32, max blocks per range = 2^24 - # Force 2 calls - bc = 2 ** 24 * 32 * 2 - fake_clone_start = netapp_api.NaElement( - etree.XML(""" - - - 1337 - volume-uuid - - - """)) - fake_clone_status = netapp_api.NaElement( - etree.XML(""" - - - running - - - """)) - fake_clone_status_completed = netapp_api.NaElement( - etree.XML(""" - - - completed - - - """)) - - fake_responses = [fake_clone_start, - fake_clone_status, - fake_clone_status_completed, - fake_clone_start, - fake_clone_status_completed] - self.connection.invoke_successfully.side_effect = fake_responses - - with mock.patch('time.sleep') as mock_sleep: - self.client.clone_lun('path', 'new_path', 'fakeLUN', - 'newFakeLUN', block_count=bc) - - mock_sleep.assert_called_once_with(1) - self.assertEqual(5, self.connection.invoke_successfully.call_count) - - def test_get_lun_by_args(self): - response = netapp_api.NaElement( - etree.XML(""" - - - - """)) - self.connection.invoke_successfully.return_value = response - - luns = self.client.get_lun_by_args() - - self.assertEqual(1, len(luns)) - - def test_get_lun_by_args_no_lun_found(self): - response = netapp_api.NaElement( - etree.XML(""" - - - """)) - self.connection.invoke_successfully.return_value = response - - luns = self.client.get_lun_by_args() - - self.assertEqual(0, len(luns)) - - def test_get_lun_by_args_with_args_specified(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - response = netapp_api.NaElement( - etree.XML(""" - - - - """)) - self.connection.invoke_successfully.return_value = response - - lun = self.client.get_lun_by_args(path=path) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - lun_info_args = actual_request.get_children() - - # Assert request is made with correct arguments - self.assertEqual('path', lun_info_args[0].get_name()) - self.assertEqual(path, lun_info_args[0].get_content()) - - self.assertEqual(1, len(lun)) - - def test_get_filer_volumes(self): - response = netapp_api.NaElement( - etree.XML(""" - - - - """)) - self.connection.invoke_successfully.return_value = response - - volumes = self.client.get_filer_volumes() - - self.assertEqual(1, len(volumes)) - - def test_get_filer_volumes_no_volumes(self): - response = netapp_api.NaElement( - etree.XML(""" - - - """)) - self.connection.invoke_successfully.return_value = response - - volumes = self.client.get_filer_volumes() - - self.assertEqual([], volumes) - - def test_get_lun_map(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - self.connection.invoke_successfully.return_value = mock.Mock() - - self.client.get_lun_map(path=path) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - lun_info_args = actual_request.get_children() - - # Assert request is made with correct arguments - self.assertEqual('path', lun_info_args[0].get_name()) - self.assertEqual(path, lun_info_args[0].get_content()) - - def test_set_space_reserve(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - self.connection.invoke_successfully.return_value = mock.Mock() - - self.client.set_space_reserve(path, 'true') - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - lun_info_args = actual_request.get_children() - - # The children list is not generated in a stable order, - # so figure out which entry is which. - if lun_info_args[0].get_name() == 'path': - path_arg = lun_info_args[0] - enable_arg = lun_info_args[1] - else: - path_arg = lun_info_args[1] - enable_arg = lun_info_args[0] - - # Assert request is made with correct arguments - self.assertEqual('path', path_arg.get_name()) - self.assertEqual(path, path_arg.get_content()) - self.assertEqual('enable', enable_arg.get_name()) - self.assertEqual('true', enable_arg.get_content()) - - def test_get_actual_path_for_export(self): - fake_export_path = 'fake_export_path' - expected_actual_pathname = 'fake_actual_pathname' - response = netapp_api.NaElement( - etree.XML(""" - %(path)s - """ % {'path': expected_actual_pathname})) - self.connection.invoke_successfully.return_value = response - - actual_pathname = self.client.get_actual_path_for_export( - fake_export_path) - - __, __, _kwargs = self.connection.invoke_successfully.mock_calls[0] - enable_tunneling = _kwargs['enable_tunneling'] - - self.assertEqual(expected_actual_pathname, actual_pathname) - self.assertTrue(enable_tunneling) - - def test_clone_file(self): - expected_src_path = "fake_src_path" - expected_dest_path = "fake_dest_path" - fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf' - fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2' - fake_clone_id_response = netapp_api.NaElement( - etree.XML(""" - - - %(volume)s - %(clone_id)s - - - """ % {'volume': fake_volume_id, - 'clone_id': fake_clone_op_id})) - fake_clone_list_response = netapp_api.NaElement( - etree.XML(""" - - - %(volume)s - %(clone_id)s - - %(clone_id)s - - - - completed - - - """ % {'volume': fake_volume_id, - 'clone_id': fake_clone_op_id})) - self.connection.invoke_successfully.side_effect = [ - fake_clone_id_response, fake_clone_list_response] - - self.client.clone_file(expected_src_path, - expected_dest_path, - source_snapshot=fake.CG_SNAPSHOT_ID) - - __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - enable_tunneling = _kwargs['enable_tunneling'] - actual_src_path = actual_request \ - .get_child_by_name('source-path').get_content() - actual_dest_path = actual_request.get_child_by_name( - 'destination-path').get_content() - - self.assertEqual(expected_src_path, actual_src_path) - self.assertEqual(expected_dest_path, actual_dest_path) - self.assertEqual( - fake.CG_SNAPSHOT_ID, - actual_request.get_child_by_name('snapshot-name').get_content()) - self.assertIsNone(actual_request.get_child_by_name( - 'destination-exists')) - self.assertTrue(enable_tunneling) - - def test_clone_file_when_clone_fails(self): - """Ensure clone is cleaned up on failure.""" - expected_src_path = "fake_src_path" - expected_dest_path = "fake_dest_path" - fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf' - fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2' - fake_clone_id_response = netapp_api.NaElement( - etree.XML(""" - - - %(volume)s - %(clone_id)s - - - """ % {'volume': fake_volume_id, - 'clone_id': fake_clone_op_id})) - fake_clone_list_response = netapp_api.NaElement( - etree.XML(""" - - - %(volume)s - %(clone_id)s - - %(clone_id)s - - - - failed - - - """ % {'volume': fake_volume_id, - 'clone_id': fake_clone_op_id})) - fake_clone_clear_response = mock.Mock() - self.connection.invoke_successfully.side_effect = [ - fake_clone_id_response, fake_clone_list_response, - fake_clone_clear_response] - - self.assertRaises(netapp_api.NaApiError, - self.client.clone_file, - expected_src_path, - expected_dest_path) - - __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - enable_tunneling = _kwargs['enable_tunneling'] - actual_src_path = actual_request \ - .get_child_by_name('source-path').get_content() - actual_dest_path = actual_request.get_child_by_name( - 'destination-path').get_content() - - self.assertEqual(expected_src_path, actual_src_path) - self.assertEqual(expected_dest_path, actual_dest_path) - self.assertIsNone(actual_request.get_child_by_name( - 'destination-exists')) - self.assertTrue(enable_tunneling) - - __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[1] - actual_request = _args[0] - enable_tunneling = _kwargs['enable_tunneling'] - actual_clone_id = actual_request.get_child_by_name('clone-id') - actual_clone_id_info = actual_clone_id.get_child_by_name( - 'clone-id-info') - actual_clone_op_id = actual_clone_id_info.get_child_by_name( - 'clone-op-id').get_content() - actual_volume_uuid = actual_clone_id_info.get_child_by_name( - 'volume-uuid').get_content() - - self.assertEqual(fake_clone_op_id, actual_clone_op_id) - self.assertEqual(fake_volume_id, actual_volume_uuid) - self.assertTrue(enable_tunneling) - - # Ensure that the clone-clear call is made upon error - __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[2] - actual_request = _args[0] - enable_tunneling = _kwargs['enable_tunneling'] - actual_clone_id = actual_request \ - .get_child_by_name('clone-id').get_content() - - self.assertEqual(fake_clone_op_id, actual_clone_id) - self.assertTrue(enable_tunneling) - - def test_get_file_usage(self): - expected_bytes = "2048" - fake_path = 'fake_path' - response = netapp_api.NaElement( - etree.XML(""" - %(unique-bytes)s - """ % {'unique-bytes': expected_bytes})) - self.connection.invoke_successfully.return_value = response - - actual_bytes = self.client.get_file_usage(fake_path) - - self.assertEqual(expected_bytes, actual_bytes) - - def test_get_ifconfig(self): - expected_response = mock.Mock() - self.connection.invoke_successfully.return_value = expected_response - - actual_response = self.client.get_ifconfig() - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - self.assertEqual('net-ifconfig-get', actual_request.get_name()) - self.assertEqual(expected_response, actual_response) - - def test_get_fc_target_wwpns(self): - wwpn1 = '50:0a:09:81:90:fe:eb:a5' - wwpn2 = '50:0a:09:82:90:fe:eb:a5' - response = netapp_api.NaElement( - etree.XML(""" - - - - %(wwpn1)s - true - 1a - - - %(wwpn2)s - true - 1b - - - """ % {'wwpn1': wwpn1, 'wwpn2': wwpn2})) - self.connection.invoke_successfully.return_value = response - - wwpns = self.client.get_fc_target_wwpns() - - self.assertSetEqual(set(wwpns), set([wwpn1, wwpn2])) - - def test_get_flexvol_capacity(self): - expected_total_bytes = 1000 - expected_available_bytes = 750 - fake_flexvol_path = '/fake/vol' - response = netapp_api.NaElement( - etree.XML(""" - - - - %(total_bytes)s - %(available_bytes)s - - - """ % {'total_bytes': expected_total_bytes, - 'available_bytes': expected_available_bytes})) - self.connection.invoke_successfully.return_value = response - - result = self.client.get_flexvol_capacity(fake_flexvol_path) - - expected = { - 'size-total': expected_total_bytes, - 'size-available': expected_available_bytes, - } - self.assertEqual(expected, result) - - def test_get_performance_instance_names(self): - - mock_send_request = self.mock_object(self.client, 'send_request') - mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE) - - result = self.client.get_performance_instance_names('processor') - - expected = ['processor0', 'processor1'] - self.assertEqual(expected, result) - - perf_object_instance_list_info_args = {'objectname': 'processor'} - mock_send_request.assert_called_once_with( - 'perf-object-instance-list-info', - perf_object_instance_list_info_args, enable_tunneling=False) - - def test_get_performance_counters(self): - - mock_send_request = self.mock_object(self.client, 'send_request') - mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE) - - instance_names = ['system'] - counter_names = ['avg_processor_busy'] - result = self.client.get_performance_counters('system', - instance_names, - counter_names) - - expected = [ - { - 'avg_processor_busy': '13215732322', - 'instance-name': 'system', - 'timestamp': '1454146292', - } - ] - self.assertEqual(expected, result) - - perf_object_get_instances_args = { - 'objectname': 'system', - 'instances': [ - {'instance': instance} for instance in instance_names - ], - 'counters': [ - {'counter': counter} for counter in counter_names - ], - } - mock_send_request.assert_called_once_with( - 'perf-object-get-instances', perf_object_get_instances_args, - enable_tunneling=False) - - def test_get_system_name(self): - - mock_send_request = self.mock_object(self.client, 'send_request') - mock_send_request.return_value = netapp_api.NaElement( - fake_client.SYSTEM_GET_INFO_RESPONSE) - - result = self.client.get_system_name() - - self.assertEqual(fake_client.NODE_NAME, result) - - def test_check_iscsi_initiator_exists_when_no_initiator_exists(self): - self.connection.invoke_successfully = mock.Mock( - side_effect=netapp_api.NaApiError) - initiator = fake_client.INITIATOR_IQN - - initiator_exists = self.client.check_iscsi_initiator_exists(initiator) - - self.assertFalse(initiator_exists) - - def test_check_iscsi_initiator_exists_when_initiator_exists(self): - self.connection.invoke_successfully = mock.Mock() - initiator = fake_client.INITIATOR_IQN - - initiator_exists = self.client.check_iscsi_initiator_exists(initiator) - - self.assertTrue(initiator_exists) - - def test_set_iscsi_chap_authentication(self): - ssh = mock.Mock(paramiko.SSHClient) - sshpool = mock.Mock(ssh_utils.SSHPool) - self.client.ssh_client.ssh_pool = sshpool - self.mock_object(self.client.ssh_client, 'execute_command') - sshpool.item().__enter__ = mock.Mock(return_value=ssh) - sshpool.item().__exit__ = mock.Mock(return_value=False) - - self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, - fake_client.USER_NAME, - fake_client.PASSWORD) - - command = ('iscsi security add -i iqn.2015-06.com.netapp:fake_iqn ' - '-s CHAP -p passw0rd -n fake_user') - self.client.ssh_client.execute_command.assert_has_calls( - [mock.call(ssh, command)] - ) - - def test_get_snapshot_if_snapshot_present_not_busy(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement( - fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE) - self.connection.invoke_successfully.return_value = response - - snapshot = self.client.get_snapshot(expected_vol_name, - expected_snapshot_name) - - self.assertEqual(expected_vol_name, snapshot['volume']) - self.assertEqual(expected_snapshot_name, snapshot['name']) - self.assertEqual(set([]), snapshot['owners']) - self.assertFalse(snapshot['busy']) - - def test_get_snapshot_if_snapshot_present_busy(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement( - fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE) - self.connection.invoke_successfully.return_value = response - - snapshot = self.client.get_snapshot(expected_vol_name, - expected_snapshot_name) - - self.assertEqual(expected_vol_name, snapshot['volume']) - self.assertEqual(expected_snapshot_name, snapshot['name']) - self.assertEqual(set([]), snapshot['owners']) - self.assertTrue(snapshot['busy']) - - def test_get_snapshot_if_snapshot_not_present(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement(fake_client.SNAPSHOT_NOT_PRESENT_7MODE) - self.connection.invoke_successfully.return_value = response - - self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, - expected_vol_name, expected_snapshot_name) - - @ddt.data({ - 'mock_return': - fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE, - 'expected': [{ - 'name': client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME, - 'instance_id': 'abcd-ef01-2345-6789', - 'volume_name': fake.SNAPSHOT['volume_id'], - }] - }, { - 'mock_return': fake_client.NO_RECORDS_RESPONSE, - 'expected': [], - }, { - 'mock_return': - fake_client.SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY, - 'expected': [], - }) - @ddt.unpack - def test_get_snapshots_marked_for_deletion(self, mock_return, expected): - api_response = netapp_api.NaElement(mock_return) - volume_list = [fake.SNAPSHOT['volume_id']] - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_snapshots_marked_for_deletion(volume_list) - - api_args = { - 'target-name': fake.SNAPSHOT['volume_id'], - 'target-type': 'volume', - 'terse': 'true', - } - - self.client.send_request.assert_called_once_with( - 'snapshot-list-info', api_args) - self.assertListEqual(expected, result) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py deleted file mode 100644 index c46d5375b..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py +++ /dev/null @@ -1,604 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from lxml import etree -import mock -import six -import time - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( - fakes as fake_client) -import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base - - -CONNECTION_INFO = {'hostname': 'hostname', - 'transport_type': 'https', - 'port': 443, - 'username': 'admin', - 'password': 'passw0rd'} - - -class NetAppBaseClientTestCase(test.TestCase): - - def setUp(self): - super(NetAppBaseClientTestCase, self).setUp() - - self.mock_object(client_base, 'LOG') - self.mock_object(client_base.Client, '_init_ssh_client') - self.client = client_base.Client(**CONNECTION_INFO) - self.client.connection = mock.MagicMock() - self.client.ssh_client = mock.MagicMock() - self.connection = self.client.connection - self.fake_volume = six.text_type(uuid.uuid4()) - self.fake_lun = six.text_type(uuid.uuid4()) - self.fake_size = '1024' - self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true'} - self.mock_send_request = self.mock_object(self.client, 'send_request') - - def test_get_ontapi_version(self): - version_response = netapp_api.NaElement( - etree.XML(""" - 1 - 19 - """)) - self.connection.invoke_successfully.return_value = version_response - - major, minor = self.client.get_ontapi_version(cached=False) - - self.assertEqual('1', major) - self.assertEqual('19', minor) - - def test_get_ontapi_version_cached(self): - - self.connection.get_api_version.return_value = (1, 20) - - major, minor = self.client.get_ontapi_version() - - self.assertEqual(1, self.connection.get_api_version.call_count) - self.assertEqual(1, major) - self.assertEqual(20, minor) - - def test_check_is_naelement(self): - - element = netapp_api.NaElement('name') - - self.assertIsNone(self.client.check_is_naelement(element)) - self.assertRaises(ValueError, self.client.check_is_naelement, None) - - def test_create_lun(self): - expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.create_lun(self.fake_volume, - self.fake_lun, - self.fake_size, - self.fake_metadata) - - mock_create_node.assert_called_once_with( - 'lun-create-by-size', - **{'path': expected_path, - 'size': self.fake_size, - 'ostype': self.fake_metadata['OsType'], - 'space-reservation-enabled': - self.fake_metadata['SpaceReserved']}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_create_lun_with_qos_policy_group_name(self): - expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - expected_qos_group_name = 'qos_1' - mock_request = mock.Mock() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - return_value=mock_request - ) as mock_create_node: - self.client.create_lun( - self.fake_volume, - self.fake_lun, - self.fake_size, - self.fake_metadata, - qos_policy_group_name=expected_qos_group_name) - - mock_create_node.assert_called_once_with( - 'lun-create-by-size', - **{'path': expected_path, 'size': self.fake_size, - 'ostype': self.fake_metadata['OsType'], - 'space-reservation-enabled': - self.fake_metadata['SpaceReserved']}) - mock_request.add_new_child.assert_called_once_with( - 'qos-policy-group', expected_qos_group_name) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_create_lun_raises_on_failure(self): - self.connection.invoke_successfully = mock.Mock( - side_effect=netapp_api.NaApiError) - - self.assertRaises(netapp_api.NaApiError, - self.client.create_lun, - self.fake_volume, - self.fake_lun, - self.fake_size, - self.fake_metadata) - - def test_destroy_lun(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.destroy_lun(path) - - mock_create_node.assert_called_once_with( - 'lun-destroy', - **{'path': path}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_destroy_lun_force(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - mock_request = mock.Mock() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - return_value=mock_request - ) as mock_create_node: - self.client.destroy_lun(path) - - mock_create_node.assert_called_once_with('lun-destroy', - **{'path': path}) - mock_request.add_new_child.assert_called_once_with('force', 'true') - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_map_lun(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - expected_lun_id = 'my_lun' - mock_response = mock.Mock() - self.connection.invoke_successfully.return_value = mock_response - mock_response.get_child_content.return_value = expected_lun_id - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - actual_lun_id = self.client.map_lun(path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-map', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - self.assertEqual(expected_lun_id, actual_lun_id) - - def test_map_lun_with_lun_id(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - expected_lun_id = 'my_lun' - mock_response = mock.Mock() - self.connection.invoke_successfully.return_value = mock_response - mock_response.get_child_content.return_value = expected_lun_id - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - actual_lun_id = self.client.map_lun(path, igroup, - lun_id=expected_lun_id) - - mock_create_node.assert_called_once_with( - 'lun-map', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - self.assertEqual(expected_lun_id, actual_lun_id) - - def test_map_lun_with_api_error(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - self.connection.invoke_successfully.side_effect =\ - netapp_api.NaApiError() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.assertRaises(netapp_api.NaApiError, self.client.map_lun, - path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-map', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_unmap_lun(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - mock_response = mock.Mock() - self.connection.invoke_successfully.return_value = mock_response - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.unmap_lun(path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-unmap', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_unmap_lun_with_api_error(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - self.connection.invoke_successfully.side_effect =\ - netapp_api.NaApiError() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.assertRaises(netapp_api.NaApiError, self.client.unmap_lun, - path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-unmap', - **{'path': path, 'initiator-group': igroup}) - - def test_unmap_lun_already_unmapped(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - EINVALIDINPUTERROR = '13115' - self.connection.invoke_successfully.side_effect =\ - netapp_api.NaApiError(code=EINVALIDINPUTERROR) - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.unmap_lun(path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-unmap', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_unmap_lun_lun_not_mapped_in_group(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - igroup = 'igroup' - EVDISK_ERROR_NO_SUCH_LUNMAP = '9016' - self.connection.invoke_successfully.side_effect =\ - netapp_api.NaApiError(code=EVDISK_ERROR_NO_SUCH_LUNMAP) - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.unmap_lun(path, igroup) - - mock_create_node.assert_called_once_with( - 'lun-unmap', - **{'path': path, 'initiator-group': igroup}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_create_igroup(self): - igroup = 'igroup' - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.create_igroup(igroup) - - mock_create_node.assert_called_once_with( - 'igroup-create', - **{'initiator-group-name': igroup, - 'initiator-group-type': 'iscsi', - 'os-type': 'default'}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_add_igroup_initiator(self): - igroup = 'igroup' - initiator = 'initiator' - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - ) as mock_create_node: - self.client.add_igroup_initiator(igroup, initiator) - - mock_create_node.assert_called_once_with( - 'igroup-add', - **{'initiator-group-name': igroup, - 'initiator': initiator}) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_do_direct_resize(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - new_size = 1024 - mock_request = mock.Mock() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - return_value=mock_request - ) as mock_create_node: - self.client.do_direct_resize(path, new_size) - - mock_create_node.assert_called_once_with( - 'lun-resize', - **{'path': path, - 'size': new_size}) - mock_request.add_new_child.assert_called_once_with( - 'force', 'true') - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_do_direct_resize_not_forced(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - new_size = 1024 - mock_request = mock.Mock() - - with mock.patch.object(netapp_api.NaElement, - 'create_node_with_children', - return_value=mock_request - ) as mock_create_node: - self.client.do_direct_resize(path, new_size, force=False) - - mock_create_node.assert_called_once_with( - 'lun-resize', - **{'path': path, - 'size': new_size}) - self.assertFalse(mock_request.add_new_child.called) - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_get_lun_geometry(self): - expected_keys = set(['size', 'bytes_per_sector', 'sectors_per_track', - 'tracks_per_cylinder', 'cylinders', 'max_resize']) - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - mock_response = mock.Mock() - self.connection.invoke_successfully.return_value = mock_response - - geometry = self.client.get_lun_geometry(path) - - self.assertEqual(expected_keys, set(geometry.keys())) - - def test_get_lun_geometry_with_api_error(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - self.connection.invoke_successfully.side_effect =\ - netapp_api.NaApiError() - geometry = self.client.get_lun_geometry(path) - - self.assertEqual({}, geometry) - - def test_get_volume_options(self): - fake_response = netapp_api.NaElement('volume') - fake_response.add_node_with_children('options', test='blah') - self.connection.invoke_successfully.return_value = fake_response - - options = self.client.get_volume_options('volume') - - self.assertEqual(1, len(options)) - - def test_get_volume_options_with_no_options(self): - fake_response = netapp_api.NaElement('options') - self.connection.invoke_successfully.return_value = fake_response - - options = self.client.get_volume_options('volume') - - self.assertEqual([], options) - - def test_move_lun(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - new_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - fake_response = netapp_api.NaElement('options') - self.connection.invoke_successfully.return_value = fake_response - - self.client.move_lun(path, new_path) - - self.connection.invoke_successfully.assert_called_once_with( - mock.ANY, True) - - def test_get_igroup_by_initiators(self): - self.assertRaises(NotImplementedError, - self.client.get_igroup_by_initiators, - fake.FC_FORMATTED_INITIATORS) - - def test_get_fc_target_wwpns(self): - self.assertRaises(NotImplementedError, - self.client.get_fc_target_wwpns) - - def test_has_luns_mapped_to_initiator(self): - initiator = fake.FC_FORMATTED_INITIATORS[0] - version_response = netapp_api.NaElement( - etree.XML(""" - - - - /vol/cinder1/volume-9be956b3-9854-4a5c-a7f5-13a16da52c9c - openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b - - 0 - - - /vol/cinder1/volume-ac90433c-a560-41b3-9357-7f3f80071eb5 - openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b - - 1 - - - """)) - - self.connection.invoke_successfully.return_value = version_response - - self.assertTrue(self.client._has_luns_mapped_to_initiator(initiator)) - - def test_has_luns_mapped_to_initiator_not_mapped(self): - initiator = fake.FC_FORMATTED_INITIATORS[0] - version_response = netapp_api.NaElement( - etree.XML(""" - - - """)) - self.connection.invoke_successfully.return_value = version_response - self.assertFalse(self.client._has_luns_mapped_to_initiator(initiator)) - - @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') - def test_has_luns_mapped_to_initiators(self, - mock_has_luns_mapped_to_initiator): - initiators = fake.FC_FORMATTED_INITIATORS - mock_has_luns_mapped_to_initiator.return_value = True - self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators)) - - @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') - def test_has_luns_mapped_to_initiators_not_mapped( - self, mock_has_luns_mapped_to_initiator): - initiators = fake.FC_FORMATTED_INITIATORS - mock_has_luns_mapped_to_initiator.return_value = False - self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) - - def test_get_performance_counter_info(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) - - result = self.client.get_performance_counter_info('wafl', - 'cp_phase_times') - - expected = { - 'name': 'cp_phase_times', - 'base-counter': 'total_cp_msecs', - 'labels': fake_client.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS, - } - self.assertEqual(expected, result) - - perf_object_counter_list_info_args = {'objectname': 'wafl'} - self.mock_send_request.assert_called_once_with( - 'perf-object-counter-list-info', - perf_object_counter_list_info_args, enable_tunneling=False) - - def test_get_performance_counter_info_not_found(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) - - self.assertRaises(exception.NotFound, - self.client.get_performance_counter_info, - 'wafl', - 'invalid') - - def test_delete_snapshot(self): - api_args = { - 'volume': fake.SNAPSHOT['volume_id'], - 'snapshot': fake.SNAPSHOT['name'], - } - self.mock_object(self.client, 'send_request') - - self.client.delete_snapshot(api_args['volume'], - api_args['snapshot']) - - asserted_api_args = { - 'volume': api_args['volume'], - 'snapshot': api_args['snapshot'], - } - self.client.send_request.assert_called_once_with('snapshot-delete', - asserted_api_args) - - def test_create_cg_snapshot(self): - self.mock_object(self.client, '_start_cg_snapshot', - return_value=fake.CONSISTENCY_GROUP_ID) - self.mock_object(self.client, '_commit_cg_snapshot') - - self.client.create_cg_snapshot([fake.CG_VOLUME_NAME], - fake.CG_SNAPSHOT_NAME) - - self.client._commit_cg_snapshot.assert_called_once_with( - fake.CONSISTENCY_GROUP_ID) - - def test_create_cg_snapshot_no_id(self): - self.mock_object(self.client, '_start_cg_snapshot', return_value=None) - - self.assertRaises(exception.VolumeBackendAPIException, - self.client.create_cg_snapshot, - [fake.CG_VOLUME_NAME], - fake.CG_SNAPSHOT_NAME) - - def test_start_cg_snapshot(self): - snapshot_init = { - 'snapshot': fake.CG_SNAPSHOT_NAME, - 'timeout': 'relaxed', - 'volumes': [{'volume-name': fake.CG_VOLUME_NAME}], - } - self.mock_object(self.client, 'send_request') - - self.client._start_cg_snapshot([fake.CG_VOLUME_NAME], - snapshot_init['snapshot']) - - self.client.send_request.assert_called_once_with('cg-start', - snapshot_init) - - def test_commit_cg_snapshot(self): - snapshot_commit = {'cg-id': fake.CG_VOLUME_ID} - self.mock_object(self.client, 'send_request') - - self.client._commit_cg_snapshot(snapshot_commit['cg-id']) - - self.client.send_request.assert_called_once_with( - 'cg-commit', {'cg-id': snapshot_commit['cg-id']}) - - def test_wait_for_busy_snapshot_raise_exception(self): - BUSY_SNAPSHOT = dict(fake.SNAPSHOT) - BUSY_SNAPSHOT['busy'] = True - - # Need to mock sleep as it is called by @utils.retry - self.mock_object(time, 'sleep') - mock_get_snapshot = self.mock_object(self.client, 'get_snapshot', - return_value=BUSY_SNAPSHOT) - - self.assertRaises(exception.SnapshotIsBusy, - self.client.wait_for_busy_snapshot, - fake.FLEXVOL, fake.SNAPSHOT_NAME) - - calls = [ - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - ] - mock_get_snapshot.assert_has_calls(calls) - - def test_rename_snapshot(self): - self.mock_object(self.client, 'send_request') - - self.client.rename_snapshot( - fake.SNAPSHOT['volume_id'], fake.SNAPSHOT_NAME, - client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME) - - api_args = { - 'volume': fake.SNAPSHOT['volume_id'], - 'current-name': fake.SNAPSHOT_NAME, - 'new-name': - client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME, - } - - self.client.send_request.assert_called_once_with( - 'snapshot-rename', api_args) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py deleted file mode 100644 index 64ac143ed..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py +++ /dev/null @@ -1,3603 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import uuid - -import ddt -from lxml import etree -import mock -import paramiko -import six -import time - -from cinder import exception -from cinder import ssh_utils -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( - fakes as fake_client) -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp import utils as netapp_utils - - -CONNECTION_INFO = {'hostname': 'hostname', - 'transport_type': 'https', - 'port': 443, - 'username': 'admin', - 'password': 'passw0rd', - 'vserver': 'fake_vserver'} - - -@ddt.ddt -class NetAppCmodeClientTestCase(test.TestCase): - - def setUp(self): - super(NetAppCmodeClientTestCase, self).setUp() - - self.mock_object(client_cmode.Client, '_init_ssh_client') - with mock.patch.object(client_cmode.Client, - 'get_ontapi_version', - return_value=(1, 20)): - self.client = client_cmode.Client(**CONNECTION_INFO) - - self.client.ssh_client = mock.MagicMock() - self.client.connection = mock.MagicMock() - self.connection = self.client.connection - - self.vserver = CONNECTION_INFO['vserver'] - self.fake_volume = six.text_type(uuid.uuid4()) - self.fake_lun = six.text_type(uuid.uuid4()) - self.mock_send_request = self.mock_object(self.client, 'send_request') - - def _mock_api_error(self, code='fake'): - return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) - - def test_has_records(self): - - result = self.client._has_records(netapp_api.NaElement( - fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE)) - - self.assertTrue(result) - - def test_has_records_not_found(self): - - result = self.client._has_records( - netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)) - - self.assertFalse(result) - - @ddt.data((fake_client.AGGR_GET_ITER_RESPONSE, 2), - (fake_client.NO_RECORDS_RESPONSE, 0)) - @ddt.unpack - def test_get_record_count(self, response, expected): - - api_response = netapp_api.NaElement(response) - - result = self.client._get_record_count(api_response) - - self.assertEqual(expected, result) - - def test_get_records_count_invalid(self): - - api_response = netapp_api.NaElement( - fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS) - - self.assertRaises(exception.NetAppDriverException, - self.client._get_record_count, - api_response) - - @ddt.data(True, False) - def test_send_iter_request(self, enable_tunneling): - - api_responses = [ - netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1), - netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2), - netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3), - ] - mock_send_request = self.mock_object( - self.client, 'send_request', - side_effect=copy.deepcopy(api_responses)) - - storage_disk_get_iter_args = { - 'desired-attributes': { - 'storage-disk-info': { - 'disk-name': None, - } - } - } - result = self.client.send_iter_request( - 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, - enable_tunneling=enable_tunneling, max_page_length=10) - - num_records = result.get_child_content('num-records') - self.assertEqual('28', num_records) - next_tag = result.get_child_content('next-tag') - self.assertEqual('', next_tag) - - args1 = copy.deepcopy(storage_disk_get_iter_args) - args1['max-records'] = 10 - args2 = copy.deepcopy(storage_disk_get_iter_args) - args2['max-records'] = 10 - args2['tag'] = 'next_tag_1' - args3 = copy.deepcopy(storage_disk_get_iter_args) - args3['max-records'] = 10 - args3['tag'] = 'next_tag_2' - - mock_send_request.assert_has_calls([ - mock.call('storage-disk-get-iter', args1, - enable_tunneling=enable_tunneling), - mock.call('storage-disk-get-iter', args2, - enable_tunneling=enable_tunneling), - mock.call('storage-disk-get-iter', args3, - enable_tunneling=enable_tunneling), - ]) - - def test_send_iter_request_single_page(self): - - api_response = netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE) - mock_send_request = self.mock_object(self.client, 'send_request', - return_value=api_response) - - storage_disk_get_iter_args = { - 'desired-attributes': { - 'storage-disk-info': { - 'disk-name': None, - } - } - } - result = self.client.send_iter_request( - 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, - max_page_length=10) - - num_records = result.get_child_content('num-records') - self.assertEqual('4', num_records) - - args = copy.deepcopy(storage_disk_get_iter_args) - args['max-records'] = 10 - - mock_send_request.assert_has_calls([ - mock.call('storage-disk-get-iter', args, enable_tunneling=True), - ]) - - def test_send_iter_request_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - mock_send_request = self.mock_object(self.client, 'send_request', - return_value=api_response) - - result = self.client.send_iter_request('storage-disk-get-iter') - - num_records = result.get_child_content('num-records') - self.assertEqual('0', num_records) - - args = {'max-records': client_cmode.DEFAULT_MAX_PAGE_LENGTH} - - mock_send_request.assert_has_calls([ - mock.call('storage-disk-get-iter', args, enable_tunneling=True), - ]) - - @ddt.data(fake_client.INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES, - fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS) - def test_send_iter_request_invalid(self, fake_response): - - api_response = netapp_api.NaElement(fake_response) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - self.assertRaises(exception.NetAppDriverException, - self.client.send_iter_request, - 'storage-disk-get-iter') - - def test_list_vservers(self): - - api_response = netapp_api.NaElement( - fake_client.VSERVER_DATA_LIST_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.list_vservers() - - vserver_get_iter_args = { - 'query': { - 'vserver-info': { - 'vserver-type': 'data' - } - }, - 'desired-attributes': { - 'vserver-info': { - 'vserver-name': None - } - } - } - self.client.send_iter_request.assert_has_calls([ - mock.call('vserver-get-iter', vserver_get_iter_args, - enable_tunneling=False)]) - self.assertListEqual([fake_client.VSERVER_NAME], result) - - def test_list_vservers_node_type(self): - - api_response = netapp_api.NaElement( - fake_client.VSERVER_DATA_LIST_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.list_vservers(vserver_type='node') - - vserver_get_iter_args = { - 'query': { - 'vserver-info': { - 'vserver-type': 'node' - } - }, - 'desired-attributes': { - 'vserver-info': { - 'vserver-name': None - } - } - } - self.client.send_iter_request.assert_has_calls([ - mock.call('vserver-get-iter', vserver_get_iter_args, - enable_tunneling=False)]) - self.assertListEqual([fake_client.VSERVER_NAME], result) - - def test_list_vservers_not_found(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.list_vservers(vserver_type='data') - - self.assertListEqual([], result) - - @ddt.data((1, 21), (1, 100), (2, 0)) - def test_get_ems_log_destination_vserver(self, ontapi_version): - - self.mock_object(self.client, - 'get_ontapi_version', - return_value=ontapi_version) - mock_list_vservers = self.mock_object( - self.client, - 'list_vservers', - return_value=[fake_client.ADMIN_VSERVER_NAME]) - - result = self.client._get_ems_log_destination_vserver() - - mock_list_vservers.assert_called_once_with(vserver_type='admin') - self.assertEqual(fake_client.ADMIN_VSERVER_NAME, result) - - def test_get_ems_log_destination_vserver_legacy(self): - - self.mock_object(self.client, - 'get_ontapi_version', - return_value=(1, 15)) - mock_list_vservers = self.mock_object( - self.client, - 'list_vservers', - return_value=[fake_client.NODE_VSERVER_NAME]) - - result = self.client._get_ems_log_destination_vserver() - - mock_list_vservers.assert_called_once_with(vserver_type='node') - self.assertEqual(fake_client.NODE_VSERVER_NAME, result) - - def test_get_ems_log_destination_no_cluster_creds(self): - - self.mock_object(self.client, - 'get_ontapi_version', - return_value=(1, 21)) - mock_list_vservers = self.mock_object( - self.client, - 'list_vservers', - side_effect=[[], [fake_client.VSERVER_NAME]]) - - result = self.client._get_ems_log_destination_vserver() - - mock_list_vservers.assert_has_calls([ - mock.call(vserver_type='admin'), - mock.call(vserver_type='data')]) - self.assertEqual(fake_client.VSERVER_NAME, result) - - def test_get_ems_log_destination_vserver_not_found(self): - - self.mock_object(self.client, - 'get_ontapi_version', - return_value=(1, 21)) - mock_list_vservers = self.mock_object( - self.client, - 'list_vservers', - return_value=[]) - - self.assertRaises(exception.NotFound, - self.client._get_ems_log_destination_vserver) - - mock_list_vservers.assert_has_calls([ - mock.call(vserver_type='admin'), - mock.call(vserver_type='data'), - mock.call(vserver_type='node')]) - - def test_get_iscsi_target_details_no_targets(self): - response = netapp_api.NaElement( - etree.XML(""" - 1 - - """)) - self.connection.invoke_successfully.return_value = response - target_list = self.client.get_iscsi_target_details() - - self.assertEqual([], target_list) - - def test_get_iscsi_target_details(self): - expected_target = { - "address": "127.0.0.1", - "port": "1337", - "interface-enabled": "true", - "tpgroup-tag": "7777", - } - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - %(address)s - %(port)s - %(interface-enabled)s - %(tpgroup-tag)s - - - """ % expected_target)) - self.connection.invoke_successfully.return_value = response - - target_list = self.client.get_iscsi_target_details() - - self.assertEqual([expected_target], target_list) - - def test_get_iscsi_service_details_with_no_iscsi_service(self): - response = netapp_api.NaElement( - etree.XML(""" - 0 - """)) - self.connection.invoke_successfully.return_value = response - - iqn = self.client.get_iscsi_service_details() - - self.assertIsNone(iqn) - - def test_get_iscsi_service_details(self): - expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1' - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - %s - - - """ % expected_iqn)) - self.connection.invoke_successfully.return_value = response - - iqn = self.client.get_iscsi_service_details() - - self.assertEqual(expected_iqn, iqn) - - def test_get_lun_list(self): - response = netapp_api.NaElement( - etree.XML(""" - 2 - - - - - - - """)) - self.connection.invoke_successfully.return_value = response - - luns = self.client.get_lun_list() - - self.assertEqual(2, len(luns)) - - def test_get_lun_list_with_multiple_pages(self): - response = netapp_api.NaElement( - etree.XML(""" - 2 - - - - - fake-next - """)) - response_2 = netapp_api.NaElement( - etree.XML(""" - 2 - - - - - """)) - self.connection.invoke_successfully.side_effect = [response, - response_2] - - luns = self.client.get_lun_list() - - self.assertEqual(4, len(luns)) - - def test_get_lun_map_no_luns_mapped(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - response = netapp_api.NaElement( - etree.XML(""" - 0 - - """)) - self.connection.invoke_successfully.return_value = response - - lun_map = self.client.get_lun_map(path) - - self.assertEqual([], lun_map) - - def test_get_lun_map(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - expected_lun_map = { - "initiator-group": "igroup", - "lun-id": "1337", - "vserver": "vserver", - } - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - %(lun-id)s - %(initiator-group)s - %(vserver)s - - - """ % expected_lun_map)) - self.connection.invoke_successfully.return_value = response - - lun_map = self.client.get_lun_map(path) - - self.assertEqual([expected_lun_map], lun_map) - - def test_get_lun_map_multiple_pages(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - expected_lun_map = { - "initiator-group": "igroup", - "lun-id": "1337", - "vserver": "vserver", - } - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - %(lun-id)s - %(initiator-group)s - %(vserver)s - - - blah - """ % expected_lun_map)) - response_2 = netapp_api.NaElement( - etree.XML(""" - 1 - - - %(lun-id)s - %(initiator-group)s - %(vserver)s - - - """ % expected_lun_map)) - self.connection.invoke_successfully.side_effect = [response, - response_2] - - lun_map = self.client.get_lun_map(path) - - self.assertEqual([expected_lun_map, expected_lun_map], lun_map) - - def test_get_igroup_by_initiator_none_found(self): - initiator = 'initiator' - response = netapp_api.NaElement( - etree.XML(""" - 0 - - """)) - self.connection.invoke_successfully.return_value = response - - igroup = self.client.get_igroup_by_initiators([initiator]) - - self.assertEqual([], igroup) - - def test_get_igroup_by_initiators(self): - initiators = ['11:22:33:44:55:66:77:88'] - expected_igroup = { - 'initiator-group-os-type': 'default', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'openstack-igroup1', - } - - response = netapp_api.NaElement( - etree.XML(""" - - - true - %(initiator-group-name)s - default - false - 0 - %(initiator-group-type)s - true - f8aa707a-57fa-11e4-ad08-123478563412 - - false - - - 11:22:33:44:55:66:77:88 - - - cinder-iscsi - - - 1 - """ % expected_igroup)) - self.connection.invoke_successfully.return_value = response - - igroups = self.client.get_igroup_by_initiators(initiators) - - # make these lists of dicts comparable using hashable dictionaries - igroups = set( - [netapp_utils.hashabledict(igroup) for igroup in igroups]) - expected = set([netapp_utils.hashabledict(expected_igroup)]) - - self.assertSetEqual(igroups, expected) - - def test_get_igroup_by_initiators_multiple(self): - initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11'] - expected_igroup = { - 'initiator-group-os-type': 'default', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'openstack-igroup1', - } - - response = netapp_api.NaElement( - etree.XML(""" - - - true - %(initiator-group-name)s - default - false - 0 - %(initiator-group-type)s - true - f8aa707a-57fa-11e4-ad08-123478563412 - - false - - - 11:22:33:44:55:66:77:88 - - - 88:77:66:55:44:33:22:11 - - - cinder-iscsi - - - 1 - """ % expected_igroup)) - self.connection.invoke_successfully.return_value = response - - igroups = self.client.get_igroup_by_initiators(initiators) - - # make these lists of dicts comparable using hashable dictionaries - igroups = set( - [netapp_utils.hashabledict(igroup) for igroup in igroups]) - expected = set([netapp_utils.hashabledict(expected_igroup)]) - - self.assertSetEqual(igroups, expected) - - def test_get_igroup_by_initiators_multiple_pages(self): - initiator = '11:22:33:44:55:66:77:88' - expected_igroup1 = { - 'initiator-group-os-type': 'default', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'openstack-igroup1', - } - expected_igroup2 = { - 'initiator-group-os-type': 'default', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'openstack-igroup2', - } - response_1 = netapp_api.NaElement( - etree.XML(""" - - - true - %(initiator-group-name)s - default - false - 0 - %(initiator-group-type)s - true - f8aa707a-57fa-11e4-ad08-123478563412 - - false - - - 11:22:33:44:55:66:77:88 - - - cinder-iscsi - - - 12345 - 1 - """ % expected_igroup1)) - response_2 = netapp_api.NaElement( - etree.XML(""" - - - true - %(initiator-group-name)s - default - false - 0 - %(initiator-group-type)s - true - f8aa707a-57fa-11e4-ad08-123478563412 - - false - - - 11:22:33:44:55:66:77:88 - - - cinder-iscsi - - - 1 - """ % expected_igroup2)) - self.connection.invoke_successfully.side_effect = [response_1, - response_2] - - igroups = self.client.get_igroup_by_initiators([initiator]) - - # make these lists of dicts comparable using hashable dictionaries - igroups = set( - [netapp_utils.hashabledict(igroup) for igroup in igroups]) - expected = set([netapp_utils.hashabledict(expected_igroup1), - netapp_utils.hashabledict(expected_igroup2)]) - - self.assertSetEqual(igroups, expected) - - def test_clone_lun(self): - self.client.clone_lun( - 'volume', 'fakeLUN', 'newFakeLUN', - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - - self.assertEqual(1, self.connection.invoke_successfully.call_count) - - @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, - {'supports_is_backup': True, 'is_snapshot': False}, - {'supports_is_backup': False, 'is_snapshot': True}, - {'supports_is_backup': False, 'is_snapshot': False}) - @ddt.unpack - def test_clone_lun_is_snapshot(self, supports_is_backup, is_snapshot): - - self.client.features.add_feature('BACKUP_CLONE_PARAM', - supported=supports_is_backup) - - self.client.clone_lun( - 'volume', 'fakeLUN', 'newFakeLUN', is_snapshot=is_snapshot) - - clone_create_args = { - 'volume': 'volume', - 'source-path': 'fakeLUN', - 'destination-path': 'newFakeLUN', - 'space-reserve': 'true', - } - if is_snapshot and supports_is_backup: - clone_create_args['is-backup'] = 'true' - self.connection.invoke_successfully.assert_called_once_with( - netapp_api.NaElement.create_node_with_children( - 'clone-create', **clone_create_args), True) - - def test_clone_lun_multiple_zapi_calls(self): - """Test for when lun clone requires more than one zapi call.""" - - # Max block-ranges per call = 32, max blocks per range = 2^24 - # Force 2 calls - bc = 2 ** 24 * 32 * 2 - self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN', - block_count=bc) - self.assertEqual(2, self.connection.invoke_successfully.call_count) - - def test_get_lun_by_args(self): - response = netapp_api.NaElement( - etree.XML(""" - 2 - - - - - """)) - self.connection.invoke_successfully.return_value = response - - lun = self.client.get_lun_by_args() - - self.assertEqual(1, len(lun)) - - def test_get_lun_by_args_no_lun_found(self): - response = netapp_api.NaElement( - etree.XML(""" - 2 - - - """)) - self.connection.invoke_successfully.return_value = response - - lun = self.client.get_lun_by_args() - - self.assertEqual(0, len(lun)) - - def test_get_lun_by_args_with_args_specified(self): - path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) - response = netapp_api.NaElement( - etree.XML(""" - 2 - - - - - """)) - self.connection.invoke_successfully.return_value = response - - lun = self.client.get_lun_by_args(path=path) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - query = actual_request.get_child_by_name('query') - lun_info_args = query.get_child_by_name('lun-info').get_children() - - # Assert request is made with correct arguments - self.assertEqual('path', lun_info_args[0].get_name()) - self.assertEqual(path, lun_info_args[0].get_content()) - - self.assertEqual(1, len(lun)) - - def test_file_assign_qos(self): - - api_args = { - 'volume': fake.FLEXVOL, - 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME, - 'file': fake.NFS_FILE_PATH, - 'vserver': self.vserver - } - - self.client.file_assign_qos( - fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, fake.NFS_FILE_PATH) - - self.mock_send_request.assert_has_calls([ - mock.call('file-assign-qos', api_args, False)]) - - def test_set_lun_qos_policy_group(self): - - api_args = { - 'path': fake.LUN_PATH, - 'qos-policy-group': fake.QOS_POLICY_GROUP_NAME, - } - - self.client.set_lun_qos_policy_group( - fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME) - - self.mock_send_request.assert_has_calls([ - mock.call('lun-set-qos-policy-group', api_args)]) - - def test_provision_qos_policy_group_no_qos_policy_group_info(self): - - self.client.provision_qos_policy_group(qos_policy_group_info=None) - - self.assertEqual(0, self.connection.qos_policy_group_create.call_count) - - def test_provision_qos_policy_group_legacy_qos_policy_group_info(self): - - self.client.provision_qos_policy_group( - qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY) - - self.assertEqual(0, self.connection.qos_policy_group_create.call_count) - - def test_provision_qos_policy_group_with_qos_spec_create(self): - - self.mock_object(self.client, - 'qos_policy_group_exists', - return_value=False) - self.mock_object(self.client, 'qos_policy_group_create') - self.mock_object(self.client, 'qos_policy_group_modify') - - self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO) - - self.client.qos_policy_group_create.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)]) - self.assertFalse(self.client.qos_policy_group_modify.called) - - def test_provision_qos_policy_group_with_qos_spec_modify(self): - - self.mock_object(self.client, - 'qos_policy_group_exists', - return_value=True) - self.mock_object(self.client, 'qos_policy_group_create') - self.mock_object(self.client, 'qos_policy_group_modify') - - self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO) - - self.assertFalse(self.client.qos_policy_group_create.called) - self.client.qos_policy_group_modify.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT)]) - - def test_qos_policy_group_exists(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE) - - result = self.client.qos_policy_group_exists( - fake.QOS_POLICY_GROUP_NAME) - - api_args = { - 'query': { - 'qos-policy-group-info': { - 'policy-group': fake.QOS_POLICY_GROUP_NAME, - }, - }, - 'desired-attributes': { - 'qos-policy-group-info': { - 'policy-group': None, - }, - }, - } - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-get-iter', api_args, False)]) - self.assertTrue(result) - - def test_qos_policy_group_exists_not_found(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - - result = self.client.qos_policy_group_exists( - fake.QOS_POLICY_GROUP_NAME) - - self.assertFalse(result) - - def test_qos_policy_group_create(self): - - api_args = { - 'policy-group': fake.QOS_POLICY_GROUP_NAME, - 'max-throughput': fake.MAX_THROUGHPUT, - 'vserver': self.vserver, - } - - self.client.qos_policy_group_create( - fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT) - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-create', api_args, False)]) - - def test_qos_policy_group_modify(self): - - api_args = { - 'policy-group': fake.QOS_POLICY_GROUP_NAME, - 'max-throughput': fake.MAX_THROUGHPUT, - } - - self.client.qos_policy_group_modify( - fake.QOS_POLICY_GROUP_NAME, fake.MAX_THROUGHPUT) - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-modify', api_args, False)]) - - def test_qos_policy_group_delete(self): - - api_args = { - 'policy-group': fake.QOS_POLICY_GROUP_NAME - } - - self.client.qos_policy_group_delete( - fake.QOS_POLICY_GROUP_NAME) - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-delete', api_args, False)]) - - def test_qos_policy_group_rename(self): - - new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME - api_args = { - 'policy-group-name': fake.QOS_POLICY_GROUP_NAME, - 'new-name': new_name, - } - - self.client.qos_policy_group_rename( - fake.QOS_POLICY_GROUP_NAME, new_name) - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-rename', api_args, False)]) - - def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self): - - mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') - mock_remove = self.mock_object(self.client, - 'remove_unused_qos_policy_groups') - - self.client.mark_qos_policy_group_for_deletion( - qos_policy_group_info=None) - - self.assertEqual(0, mock_rename.call_count) - self.assertEqual(0, mock_remove.call_count) - - def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self): - - mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') - mock_remove = self.mock_object(self.client, - 'remove_unused_qos_policy_groups') - - self.client.mark_qos_policy_group_for_deletion( - qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY) - - self.assertEqual(0, mock_rename.call_count) - self.assertEqual(1, mock_remove.call_count) - - def test_mark_qos_policy_group_for_deletion_w_qos_spec(self): - - mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') - mock_remove = self.mock_object(self.client, - 'remove_unused_qos_policy_groups') - mock_log = self.mock_object(client_cmode.LOG, 'warning') - new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME - - self.client.mark_qos_policy_group_for_deletion( - qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO) - - mock_rename.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)]) - self.assertEqual(0, mock_log.call_count) - self.assertEqual(1, mock_remove.call_count) - - def test_mark_qos_policy_group_for_deletion_exception_path(self): - - mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') - mock_rename.side_effect = netapp_api.NaApiError - mock_remove = self.mock_object(self.client, - 'remove_unused_qos_policy_groups') - mock_log = self.mock_object(client_cmode.LOG, 'warning') - new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME - - self.client.mark_qos_policy_group_for_deletion( - qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO) - - mock_rename.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_NAME, new_name)]) - self.assertEqual(1, mock_log.call_count) - self.assertEqual(1, mock_remove.call_count) - - def test_remove_unused_qos_policy_groups(self): - - mock_log = self.mock_object(client_cmode.LOG, 'debug') - api_args = { - 'query': { - 'qos-policy-group-info': { - 'policy-group': 'deleted_cinder_*', - 'vserver': self.vserver, - } - }, - 'max-records': 3500, - 'continue-on-failure': 'true', - 'return-success-list': 'false', - 'return-failure-list': 'false', - } - - self.client.remove_unused_qos_policy_groups() - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-delete-iter', api_args, False)]) - self.assertEqual(0, mock_log.call_count) - - def test_remove_unused_qos_policy_groups_api_error(self): - - mock_log = self.mock_object(client_cmode.LOG, 'debug') - api_args = { - 'query': { - 'qos-policy-group-info': { - 'policy-group': 'deleted_cinder_*', - 'vserver': self.vserver, - } - }, - 'max-records': 3500, - 'continue-on-failure': 'true', - 'return-success-list': 'false', - 'return-failure-list': 'false', - } - self.mock_send_request.side_effect = netapp_api.NaApiError - - self.client.remove_unused_qos_policy_groups() - - self.mock_send_request.assert_has_calls([ - mock.call('qos-policy-group-delete-iter', api_args, False)]) - self.assertEqual(1, mock_log.call_count) - - @mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname', - return_value='192.168.1.101') - def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname): - fake_ip = '192.168.1.101' - response = netapp_api.NaElement( - etree.XML(""" - 0 - - - """)) - self.connection.invoke_successfully.return_value = response - - self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip, - fake_ip) - - @mock.patch('cinder.volume.drivers.netapp.utils.resolve_hostname', - return_value='192.168.1.101') - def test_get_if_info_by_ip(self, mock_resolve_hostname): - fake_ip = '192.168.1.101' - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - - - """)) - self.connection.invoke_successfully.return_value = response - - results = self.client.get_if_info_by_ip(fake_ip) - - self.assertEqual(1, len(results)) - - def test_get_vol_by_junc_vserver_not_found(self): - fake_vserver = 'fake_vserver' - fake_junc = 'fake_junction_path' - response = netapp_api.NaElement( - etree.XML(""" - 0 - - - """)) - self.connection.invoke_successfully.return_value = response - - self.assertRaises(exception.NotFound, - self.client.get_vol_by_junc_vserver, - fake_vserver, fake_junc) - - def test_get_vol_by_junc_vserver(self): - fake_vserver = 'fake_vserver' - fake_junc = 'fake_junction_path' - expected_flex_vol = 'fake_flex_vol' - response = netapp_api.NaElement( - etree.XML(""" - 1 - - - - %(flex_vol)s - - - - """ % {'flex_vol': expected_flex_vol})) - self.connection.invoke_successfully.return_value = response - - actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver, - fake_junc) - - self.assertEqual(expected_flex_vol, actual_flex_vol) - - def test_clone_file(self): - expected_flex_vol = "fake_flex_vol" - expected_src_path = "fake_src_path" - expected_dest_path = "fake_dest_path" - self.connection.get_api_version.return_value = (1, 20) - - self.client.clone_file(expected_flex_vol, expected_src_path, - expected_dest_path, self.vserver, - source_snapshot=fake.CG_SNAPSHOT_ID) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - actual_flex_vol = actual_request.get_child_by_name('volume') \ - .get_content() - actual_src_path = actual_request \ - .get_child_by_name('source-path').get_content() - actual_dest_path = actual_request.get_child_by_name( - 'destination-path').get_content() - - self.assertEqual(expected_flex_vol, actual_flex_vol) - self.assertEqual(expected_src_path, actual_src_path) - self.assertEqual(expected_dest_path, actual_dest_path) - req_snapshot_child = actual_request.get_child_by_name('snapshot-name') - self.assertEqual(fake.CG_SNAPSHOT_ID, req_snapshot_child.get_content()) - - self.assertIsNone(actual_request.get_child_by_name( - 'destination-exists')) - - def test_clone_file_when_destination_exists(self): - expected_flex_vol = "fake_flex_vol" - expected_src_path = "fake_src_path" - expected_dest_path = "fake_dest_path" - self.connection.get_api_version.return_value = (1, 20) - - self.client.clone_file(expected_flex_vol, expected_src_path, - expected_dest_path, self.vserver, - dest_exists=True) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - actual_flex_vol = actual_request.get_child_by_name('volume') \ - .get_content() - actual_src_path = actual_request \ - .get_child_by_name('source-path').get_content() - actual_dest_path = actual_request.get_child_by_name( - 'destination-path').get_content() - - self.assertEqual(expected_flex_vol, actual_flex_vol) - self.assertEqual(expected_src_path, actual_src_path) - self.assertEqual(expected_dest_path, actual_dest_path) - self.assertEqual('true', - actual_request.get_child_by_name( - 'destination-exists').get_content()) - - def test_clone_file_when_destination_exists_and_version_less_than_1_20( - self): - expected_flex_vol = "fake_flex_vol" - expected_src_path = "fake_src_path" - expected_dest_path = "fake_dest_path" - self.connection.get_api_version.return_value = (1, 19) - - self.client.clone_file(expected_flex_vol, expected_src_path, - expected_dest_path, self.vserver, - dest_exists=True) - - __, _args, __ = self.connection.invoke_successfully.mock_calls[0] - actual_request = _args[0] - actual_flex_vol = actual_request.get_child_by_name('volume') \ - .get_content() - actual_src_path = actual_request \ - .get_child_by_name('source-path').get_content() - actual_dest_path = actual_request.get_child_by_name( - 'destination-path').get_content() - - self.assertEqual(expected_flex_vol, actual_flex_vol) - self.assertEqual(expected_src_path, actual_src_path) - self.assertEqual(expected_dest_path, actual_dest_path) - self.assertIsNone(actual_request.get_child_by_name( - 'destination-exists')) - - @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, - {'supports_is_backup': True, 'is_snapshot': False}, - {'supports_is_backup': False, 'is_snapshot': True}, - {'supports_is_backup': False, 'is_snapshot': False}) - @ddt.unpack - def test_clone_file_is_snapshot(self, supports_is_backup, is_snapshot): - - self.connection.get_api_version.return_value = (1, 20) - self.client.features.add_feature('BACKUP_CLONE_PARAM', - supported=supports_is_backup) - - self.client.clone_file( - 'volume', 'fake_source', 'fake_destination', 'fake_vserver', - is_snapshot=is_snapshot) - - clone_create_args = { - 'volume': 'volume', - 'source-path': 'fake_source', - 'destination-path': 'fake_destination', - } - if is_snapshot and supports_is_backup: - clone_create_args['is-backup'] = 'true' - self.connection.invoke_successfully.assert_called_once_with( - netapp_api.NaElement.create_node_with_children( - 'clone-create', **clone_create_args), True) - - def test_get_file_usage(self): - expected_bytes = "2048" - fake_vserver = 'fake_vserver' - fake_path = 'fake_path' - response = netapp_api.NaElement( - etree.XML(""" - %(unique-bytes)s - """ % {'unique-bytes': expected_bytes})) - self.connection.invoke_successfully.return_value = response - - actual_bytes = self.client.get_file_usage(fake_vserver, fake_path) - - self.assertEqual(expected_bytes, actual_bytes) - - def test_check_cluster_api(self): - - self.client.features.USER_CAPABILITY_LIST = True - mock_check_cluster_api_legacy = self.mock_object( - self.client, '_check_cluster_api_legacy') - mock_check_cluster_api = self.mock_object( - self.client, '_check_cluster_api', return_value=True) - - result = self.client.check_cluster_api('object', 'operation', 'api') - - self.assertTrue(result) - self.assertFalse(mock_check_cluster_api_legacy.called) - mock_check_cluster_api.assert_called_once_with( - 'object', 'operation', 'api') - - def test_check_cluster_api_legacy(self): - - self.client.features.USER_CAPABILITY_LIST = False - mock_check_cluster_api_legacy = self.mock_object( - self.client, '_check_cluster_api_legacy', return_value=True) - mock_check_cluster_api = self.mock_object( - self.client, '_check_cluster_api') - - result = self.client.check_cluster_api('object', 'operation', 'api') - - self.assertTrue(result) - self.assertFalse(mock_check_cluster_api.called) - mock_check_cluster_api_legacy.assert_called_once_with('api') - - def test__check_cluster_api(self): - - api_response = netapp_api.NaElement( - fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE) - self.mock_send_request.return_value = api_response - - result = self.client._check_cluster_api('object', 'operation', 'api') - - system_user_capability_get_iter_args = { - 'query': { - 'capability-info': { - 'object-name': 'object', - 'operation-list': { - 'operation-info': { - 'name': 'operation', - }, - }, - }, - }, - 'desired-attributes': { - 'capability-info': { - 'operation-list': { - 'operation-info': { - 'api-name': None, - }, - }, - }, - }, - } - self.mock_send_request.assert_called_once_with( - 'system-user-capability-get-iter', - system_user_capability_get_iter_args, - False) - - self.assertTrue(result) - - @ddt.data(fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE, - fake_client.NO_RECORDS_RESPONSE) - def test__check_cluster_api_not_found(self, response): - - api_response = netapp_api.NaElement(response) - self.mock_send_request.return_value = api_response - - result = self.client._check_cluster_api('object', 'operation', 'api4') - - self.assertFalse(result) - - @ddt.data('volume-get-iter', 'volume-get', 'aggr-options-list-info') - def test__check_cluster_api_legacy(self, api): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_send_request.return_value = api_response - - result = self.client._check_cluster_api_legacy(api) - - self.assertTrue(result) - self.mock_send_request.assert_called_once_with(api, - enable_tunneling=False) - - @ddt.data(netapp_api.EAPIPRIVILEGE, netapp_api.EAPINOTFOUND) - def test__check_cluster_api_legacy_insufficient_privileges(self, code): - - self.mock_send_request.side_effect = netapp_api.NaApiError(code=code) - - result = self.client._check_cluster_api_legacy('volume-get-iter') - - self.assertFalse(result) - self.mock_send_request.assert_called_once_with('volume-get-iter', - enable_tunneling=False) - - def test__check_cluster_api_legacy_api_error(self): - - self.mock_send_request.side_effect = netapp_api.NaApiError() - - result = self.client._check_cluster_api_legacy('volume-get-iter') - - self.assertTrue(result) - self.mock_send_request.assert_called_once_with('volume-get-iter', - enable_tunneling=False) - - def test__check_cluster_api_legacy_invalid_api(self): - - self.assertRaises(ValueError, - self.client._check_cluster_api_legacy, - 'fake_api') - - def test_get_operational_lif_addresses(self): - expected_result = ['1.2.3.4', '99.98.97.96'] - api_response = netapp_api.NaElement( - fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - address_list = self.client.get_operational_lif_addresses() - - net_interface_get_iter_args = { - 'query': { - 'net-interface-info': { - 'operational-status': 'up' - } - }, - 'desired-attributes': { - 'net-interface-info': { - 'address': None, - } - } - } - self.client.send_iter_request.assert_called_once_with( - 'net-interface-get-iter', net_interface_get_iter_args) - - self.assertEqual(expected_result, address_list) - - @ddt.data({'flexvol_path': '/fake/vol'}, - {'flexvol_name': 'fake_volume'}, - {'flexvol_path': '/fake/vol', 'flexvol_name': 'fake_volume'}) - def test_get_flexvol_capacity(self, kwargs): - - api_response = netapp_api.NaElement( - fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE) - mock_send_iter_request = self.mock_object( - self.client, 'send_iter_request', return_value=api_response) - - capacity = self.client.get_flexvol_capacity(**kwargs) - - volume_id_attributes = {} - if 'flexvol_path' in kwargs: - volume_id_attributes['junction-path'] = kwargs['flexvol_path'] - if 'flexvol_name' in kwargs: - volume_id_attributes['name'] = kwargs['flexvol_name'] - - volume_get_iter_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': volume_id_attributes, - } - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-space-attributes': { - 'size-available': None, - 'size-total': None, - } - } - }, - } - mock_send_iter_request.assert_called_once_with( - 'volume-get-iter', volume_get_iter_args) - - self.assertEqual(fake_client.VOLUME_SIZE_TOTAL, capacity['size-total']) - self.assertEqual(fake_client.VOLUME_SIZE_AVAILABLE, - capacity['size-available']) - - def test_get_flexvol_capacity_not_found(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - - self.assertRaises(exception.NetAppDriverException, - self.client.get_flexvol_capacity, - flexvol_path='fake_path') - - def test_list_flexvols(self): - - api_response = netapp_api.NaElement( - fake_client.VOLUME_GET_ITER_LIST_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.list_flexvols() - - volume_get_iter_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': { - 'type': 'rw', - 'style': 'flex', - }, - 'volume-state-attributes': { - 'is-vserver-root': 'false', - 'is-inconsistent': 'false', - 'is-invalid': 'false', - 'state': 'online', - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None, - }, - }, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'volume-get-iter', volume_get_iter_args) - self.assertEqual(list(fake_client.VOLUME_NAMES), result) - - def test_list_flexvols_not_found(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.list_flexvols() - - self.assertEqual([], result) - - def test_get_flexvol(self): - - api_response = netapp_api.NaElement( - fake_client.VOLUME_GET_ITER_SSC_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_flexvol( - flexvol_name=fake_client.VOLUME_NAMES[0], - flexvol_path='/%s' % fake_client.VOLUME_NAMES[0]) - - volume_get_iter_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': fake_client.VOLUME_NAMES[0], - 'junction-path': '/' + fake_client.VOLUME_NAMES[0], - 'type': 'rw', - 'style': 'flex', - }, - 'volume-state-attributes': { - 'is-vserver-root': 'false', - 'is-inconsistent': 'false', - 'is-invalid': 'false', - 'state': 'online', - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None, - 'owning-vserver-name': None, - 'junction-path': None, - 'type': None, - 'containing-aggregate-name': None, - }, - 'volume-mirror-attributes': { - 'is-data-protection-mirror': None, - 'is-replica-volume': None, - }, - 'volume-space-attributes': { - 'is-space-guarantee-enabled': None, - 'space-guarantee': None, - 'percentage-snapshot-reserve': None, - 'size': None, - }, - 'volume-qos-attributes': { - 'policy-group-name': None, - }, - 'volume-snapshot-attributes': { - 'snapshot-policy': None, - }, - 'volume-language-attributes': { - 'language-code': None, - } - }, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'volume-get-iter', volume_get_iter_args) - self.assertEqual(fake_client.VOLUME_INFO_SSC, result) - - def test_get_flexvol_not_found(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - self.assertRaises(exception.VolumeBackendAPIException, - self.client.get_flexvol, - flexvol_name=fake_client.VOLUME_NAMES[0]) - - def test_create_flexvol(self): - self.mock_object(self.client, 'send_request') - - self.client.create_flexvol( - fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100) - - volume_create_args = { - 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, - 'size': '100g', - 'volume': fake_client.VOLUME_NAME, - 'volume-type': 'rw', - 'junction-path': '/%s' % fake_client.VOLUME_NAME, - } - - self.client.send_request.assert_called_once_with('volume-create', - volume_create_args) - - @ddt.data('dp', 'rw', None) - def test_create_volume_with_extra_specs(self, volume_type): - - self.mock_object(self.client, 'enable_flexvol_dedupe') - self.mock_object(self.client, 'enable_flexvol_compression') - self.mock_object(self.client, 'send_request') - - self.client.create_flexvol( - fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100, - space_guarantee_type='volume', language='en-US', - snapshot_policy='default', dedupe_enabled=True, - compression_enabled=True, snapshot_reserve=15, - volume_type=volume_type) - - volume_create_args = { - 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, - 'size': '100g', - 'volume': fake_client.VOLUME_NAME, - 'space-reserve': 'volume', - 'language-code': 'en-US', - 'volume-type': volume_type, - 'percentage-snapshot-reserve': '15', - } - - if volume_type != 'dp': - volume_create_args['snapshot-policy'] = 'default' - volume_create_args['junction-path'] = ('/%s' % - fake_client.VOLUME_NAME) - - self.client.send_request.assert_called_with('volume-create', - volume_create_args) - self.client.enable_flexvol_dedupe.assert_called_once_with( - fake_client.VOLUME_NAME) - self.client.enable_flexvol_compression.assert_called_once_with( - fake_client.VOLUME_NAME) - - def test_flexvol_exists(self): - - api_response = netapp_api.NaElement( - fake_client.VOLUME_GET_NAME_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.flexvol_exists(fake_client.VOLUME_NAME) - - volume_get_iter_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': fake_client.VOLUME_NAME - } - } - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None - } - } - } - } - - self.client.send_iter_request.assert_has_calls([ - mock.call('volume-get-iter', volume_get_iter_args)]) - self.assertTrue(result) - - def test_flexvol_exists_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME)) - - def test_rename_flexvol(self): - - self.mock_object(self.client, 'send_request') - - self.client.rename_flexvol(fake_client.VOLUME_NAME, 'new_name') - - volume_rename_api_args = { - 'volume': fake_client.VOLUME_NAME, - 'new-volume-name': 'new_name', - } - - self.client.send_request.assert_called_once_with( - 'volume-rename', volume_rename_api_args) - - def test_mount_flexvol_default_junction_path(self): - - self.mock_object(self.client, 'send_request') - - self.client.mount_flexvol(fake_client.VOLUME_NAME) - - volume_mount_args = { - 'volume-name': fake_client.VOLUME_NAME, - 'junction-path': '/%s' % fake_client.VOLUME_NAME, - } - - self.client.send_request.assert_has_calls([ - mock.call('volume-mount', volume_mount_args)]) - - def test_mount_flexvol(self): - - self.mock_object(self.client, 'send_request') - fake_path = '/fake_path' - - self.client.mount_flexvol(fake_client.VOLUME_NAME, - junction_path=fake_path) - - volume_mount_args = { - 'volume-name': fake_client.VOLUME_NAME, - 'junction-path': fake_path, - } - - self.client.send_request.assert_has_calls([ - mock.call('volume-mount', volume_mount_args)]) - - def test_enable_flexvol_dedupe(self): - - self.mock_object(self.client, 'send_request') - - self.client.enable_flexvol_dedupe(fake_client.VOLUME_NAME) - - sis_enable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} - - self.client.send_request.assert_called_once_with('sis-enable', - sis_enable_args) - - def test_disable_flexvol_dedupe(self): - - self.mock_object(self.client, 'send_request') - - self.client.disable_flexvol_dedupe(fake_client.VOLUME_NAME) - - sis_disable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} - - self.client.send_request.assert_called_once_with('sis-disable', - sis_disable_args) - - def test_enable_flexvol_compression(self): - - self.mock_object(self.client, 'send_request') - - self.client.enable_flexvol_compression(fake_client.VOLUME_NAME) - - sis_set_config_args = { - 'path': '/vol/%s' % fake_client.VOLUME_NAME, - 'enable-compression': 'true' - } - - self.client.send_request.assert_called_once_with('sis-set-config', - sis_set_config_args) - - def test_disable_flexvol_compression(self): - - self.mock_object(self.client, 'send_request') - - self.client.disable_flexvol_compression(fake_client.VOLUME_NAME) - - sis_set_config_args = { - 'path': '/vol/%s' % fake_client.VOLUME_NAME, - 'enable-compression': 'false' - } - - self.client.send_request.assert_called_once_with('sis-set-config', - sis_set_config_args) - - def test_get_flexvol_dedupe_info(self): - - api_response = netapp_api.NaElement( - fake_client.SIS_GET_ITER_SSC_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_flexvol_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - sis_get_iter_args = { - 'query': { - 'sis-status-info': { - 'path': '/vol/%s' % fake_client.VOLUME_NAMES[0], - }, - }, - 'desired-attributes': { - 'sis-status-info': { - 'state': None, - 'is-compression-enabled': None, - 'logical-data-size': None, - 'logical-data-limit': None, - }, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'sis-get-iter', sis_get_iter_args) - self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC, result) - - def test_get_flexvol_dedupe_info_no_logical_data_values(self): - - api_response = netapp_api.NaElement( - fake_client.SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_flexvol_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, - result) - - def test_get_flexvol_dedupe_info_not_found(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_flexvol_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, - result) - - def test_get_flexvol_dedupe_info_api_error(self): - - self.mock_object(self.client, - 'send_iter_request', - side_effect=self._mock_api_error()) - - result = self.client.get_flexvol_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, - result) - - def test_get_flexvol_dedupe_info_api_insufficient_privileges(self): - - api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE) - self.mock_object(self.client, - 'send_iter_request', - side_effect=api_error) - - result = self.client.get_flexvol_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, - result) - - def test_get_flexvol_dedupe_used_percent(self): - - self.client.features.add_feature('CLONE_SPLIT_STATUS') - mock_get_flexvol_dedupe_info = self.mock_object( - self.client, 'get_flexvol_dedupe_info', - return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) - mock_get_clone_split_info = self.mock_object( - self.client, 'get_clone_split_info', - return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS) - - result = self.client.get_flexvol_dedupe_used_percent( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(75.0, result) - mock_get_flexvol_dedupe_info.assert_called_once_with( - fake_client.VOLUME_NAMES[0]) - mock_get_clone_split_info.assert_called_once_with( - fake_client.VOLUME_NAMES[0]) - - def test_get_flexvol_dedupe_used_percent_not_supported(self): - - self.client.features.add_feature('CLONE_SPLIT_STATUS', supported=False) - mock_get_flexvol_dedupe_info = self.mock_object( - self.client, 'get_flexvol_dedupe_info', - return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) - mock_get_clone_split_info = self.mock_object( - self.client, 'get_clone_split_info', - return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS) - - result = self.client.get_flexvol_dedupe_used_percent( - fake_client.VOLUME_NAMES[0]) - - self.assertEqual(0.0, result) - self.assertFalse(mock_get_flexvol_dedupe_info.called) - self.assertFalse(mock_get_clone_split_info.called) - - def test_get_clone_split_info(self): - - api_response = netapp_api.NaElement( - fake_client.CLONE_SPLIT_STATUS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) - - self.assertEqual(fake_client.VOLUME_CLONE_SPLIT_STATUS, result) - self.client.send_request.assert_called_once_with( - 'clone-split-status', {'volume-name': fake_client.VOLUME_NAMES[0]}) - - def test_get_clone_split_info_api_error(self): - - self.mock_object(self.client, - 'send_request', - side_effect=self._mock_api_error()) - - result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) - - expected = {'unsplit-size': 0, 'unsplit-clone-count': 0} - self.assertEqual(expected, result) - - def test_get_clone_split_info_no_data(self): - - api_response = netapp_api.NaElement( - fake_client.CLONE_SPLIT_STATUS_NO_DATA_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) - - expected = {'unsplit-size': 0, 'unsplit-clone-count': 0} - self.assertEqual(expected, result) - - def test_is_flexvol_mirrored(self): - - api_response = netapp_api.NaElement( - fake_client.SNAPMIRROR_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.is_flexvol_mirrored( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - snapmirror_get_iter_args = { - 'query': { - 'snapmirror-info': { - 'source-vserver': fake_client.VOLUME_VSERVER_NAME, - 'source-volume': fake_client.VOLUME_NAMES[0], - 'mirror-state': 'snapmirrored', - 'relationship-type': 'data_protection', - }, - }, - 'desired-attributes': { - 'snapmirror-info': None, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'snapmirror-get-iter', snapmirror_get_iter_args) - self.assertTrue(result) - - def test_is_flexvol_mirrored_not_mirrored(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.is_flexvol_mirrored( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - self.assertFalse(result) - - def test_is_flexvol_mirrored_api_error(self): - - self.mock_object(self.client, - 'send_request', - side_effect=self._mock_api_error()) - - result = self.client.is_flexvol_mirrored( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - self.assertFalse(result) - - def test_is_flexvol_encrypted(self): - - api_response = netapp_api.NaElement( - fake_client.VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE) - self.client.features.add_feature('FLEXVOL_ENCRYPTION') - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.is_flexvol_encrypted( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - volume_get_iter_args = { - 'query': { - 'volume-attributes': { - 'encrypt': 'true', - 'volume-id-attributes': { - 'name': fake_client.VOLUME_NAME, - 'owning-vserver-name': fake_client.VOLUME_VSERVER_NAME, - } - } - }, - 'desired-attributes': { - 'volume-attributes': { - 'encrypt': None, - } - } - } - - self.client.send_iter_request.assert_called_once_with( - 'volume-get-iter', volume_get_iter_args) - - self.assertTrue(result) - - def test_is_flexvol_encrypted_unsupported_version(self): - - self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False) - result = self.client.is_flexvol_encrypted( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - self.assertFalse(result) - - def test_is_flexvol_encrypted_no_records_found(self): - - api_response = netapp_api.NaElement( - fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.is_flexvol_encrypted( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - self.assertFalse(result) - - def test_is_flexvol_encrypted_api_error(self): - - self.mock_object(self.client, - 'send_request', - side_effect=self._mock_api_error()) - - result = self.client.is_flexvol_encrypted( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - self.assertFalse(result) - - def test_get_aggregates(self): - - api_response = netapp_api.NaElement( - fake_client.AGGR_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client._get_aggregates() - - self.client.send_request.assert_has_calls([ - mock.call('aggr-get-iter', {}, enable_tunneling=False)]) - self.assertListEqual( - [aggr.to_string() for aggr in api_response.get_child_by_name( - 'attributes-list').get_children()], - [aggr.to_string() for aggr in result]) - - def test_get_aggregates_with_filters(self): - - api_response = netapp_api.NaElement( - fake_client.AGGR_GET_SPACE_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - desired_attributes = { - 'aggr-attributes': { - 'aggregate-name': None, - 'aggr-space-attributes': { - 'size-total': None, - 'size-available': None, - } - } - } - - result = self.client._get_aggregates( - aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES, - desired_attributes=desired_attributes) - - aggr_get_iter_args = { - 'query': { - 'aggr-attributes': { - 'aggregate-name': '|'.join( - fake_client.VOLUME_AGGREGATE_NAMES), - } - }, - 'desired-attributes': desired_attributes - } - - self.client.send_request.assert_has_calls([ - mock.call('aggr-get-iter', aggr_get_iter_args, - enable_tunneling=False)]) - self.assertListEqual( - [aggr.to_string() for aggr in api_response.get_child_by_name( - 'attributes-list').get_children()], - [aggr.to_string() for aggr in result]) - - def test_get_aggregates_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client._get_aggregates() - - self.client.send_request.assert_has_calls([ - mock.call('aggr-get-iter', {}, enable_tunneling=False)]) - self.assertListEqual([], result) - - def test_get_node_for_aggregate(self): - - api_response = netapp_api.NaElement( - fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name( - 'attributes-list').get_children() - self.mock_object(self.client, - '_get_aggregates', - return_value=api_response) - - result = self.client.get_node_for_aggregate( - fake_client.VOLUME_AGGREGATE_NAME) - - desired_attributes = { - 'aggr-attributes': { - 'aggregate-name': None, - 'aggr-ownership-attributes': { - 'home-name': None, - }, - }, - } - - self.client._get_aggregates.assert_has_calls([ - mock.call( - aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], - desired_attributes=desired_attributes)]) - - self.assertEqual(fake_client.NODE_NAME, result) - - def test_get_node_for_aggregate_none_requested(self): - - result = self.client.get_node_for_aggregate(None) - - self.assertIsNone(result) - - def test_get_node_for_aggregate_api_not_found(self): - - api_error = self._mock_api_error(netapp_api.EAPINOTFOUND) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - result = self.client.get_node_for_aggregate( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertIsNone(result) - - def test_get_node_for_aggregate_api_error(self): - - self.mock_object(self.client, 'send_request', self._mock_api_error()) - - self.assertRaises(netapp_api.NaApiError, - self.client.get_node_for_aggregate, - fake_client.VOLUME_AGGREGATE_NAME) - - def test_get_node_for_aggregate_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_node_for_aggregate( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertIsNone(result) - - def test_get_aggregate_none_specified(self): - - result = self.client.get_aggregate('') - - self.assertEqual({}, result) - - def test_get_aggregate(self): - - api_response = netapp_api.NaElement( - fake_client.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name( - 'attributes-list').get_children() - self.mock_object(self.client, - '_get_aggregates', - return_value=api_response) - - result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) - - desired_attributes = { - 'aggr-attributes': { - 'aggregate-name': None, - 'aggr-raid-attributes': { - 'raid-type': None, - 'is-hybrid': None, - }, - }, - } - self.client._get_aggregates.assert_has_calls([ - mock.call( - aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], - desired_attributes=desired_attributes)]) - - expected = { - 'name': fake_client.VOLUME_AGGREGATE_NAME, - 'raid-type': 'raid_dp', - 'is-hybrid': True, - } - self.assertEqual(expected, result) - - def test_get_aggregate_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_get_aggregate_api_error(self): - - self.mock_object(self.client, - 'send_request', - side_effect=self._mock_api_error()) - - result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_get_aggregate_api_not_found(self): - - api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) - self.mock_object(self.client, - 'send_iter_request', - side_effect=api_error) - - result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_list_cluster_nodes(self): - - api_response = netapp_api.NaElement( - fake_client.SYSTEM_NODE_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_request', - mock.Mock(return_value=api_response)) - - result = self.client.list_cluster_nodes() - - self.assertListEqual([fake_client.NODE_NAME], result) - - def test_list_cluster_nodes_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - mock.Mock(return_value=api_response)) - - result = self.client.list_cluster_nodes() - - self.assertListEqual([], result) - - def test_check_for_cluster_credentials(self): - - self.mock_object(self.client, - 'list_cluster_nodes', - mock.Mock(return_value=fake_client.NODE_NAMES)) - - result = self.client.check_for_cluster_credentials() - - self.assertTrue(result) - - def test_check_for_cluster_credentials_not_found(self): - - api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) - self.mock_object(self.client, - 'list_cluster_nodes', - side_effect=api_error) - - result = self.client.check_for_cluster_credentials() - - self.assertFalse(result) - - def test_check_for_cluster_credentials_api_error(self): - - self.mock_object(self.client, - 'list_cluster_nodes', - self._mock_api_error()) - - self.assertRaises(netapp_api.NaApiError, - self.client.check_for_cluster_credentials) - - @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, - {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) - @ddt.unpack - def test_get_aggregate_disk_types(self, types, expected): - - mock_get_aggregate_disk_types = self.mock_object( - self.client, '_get_aggregate_disk_types', return_value=types) - - result = self.client.get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertItemsEqual(expected, result) - mock_get_aggregate_disk_types.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) - - def test_get_aggregate_disk_types_not_found(self): - - mock_get_aggregate_disk_types = self.mock_object( - self.client, '_get_aggregate_disk_types', return_value=set()) - - result = self.client.get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertIsNone(result) - mock_get_aggregate_disk_types.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) - - def test_get_aggregate_disk_types_api_not_found(self): - - api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) - self.mock_object(self.client, - 'send_iter_request', - side_effect=api_error) - - result = self.client.get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertIsNone(result) - - def test_get_aggregate_disk_types_shared(self): - - self.client.features.add_feature('ADVANCED_DISK_PARTITIONING') - mock_get_aggregate_disk_types = self.mock_object( - self.client, '_get_aggregate_disk_types', - side_effect=[set(['SSD']), set(['SATA'])]) - - result = self.client.get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertIsInstance(result, list) - self.assertItemsEqual(['SATA', 'SSD'], result) - mock_get_aggregate_disk_types.assert_has_calls([ - mock.call(fake_client.VOLUME_AGGREGATE_NAME), - mock.call(fake_client.VOLUME_AGGREGATE_NAME, shared=True), - ]) - - def test__get_aggregate_disk_types(self): - - api_response = netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client._get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - storage_disk_get_iter_args = { - 'query': { - 'storage-disk-info': { - 'disk-raid-info': { - 'disk-aggregate-info': { - 'aggregate-name': - fake_client.VOLUME_AGGREGATE_NAME, - }, - }, - }, - }, - 'desired-attributes': { - 'storage-disk-info': { - 'disk-raid-info': { - 'effective-disk-type': None, - }, - }, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'storage-disk-get-iter', storage_disk_get_iter_args, - enable_tunneling=False) - - expected = set(fake_client.AGGREGATE_DISK_TYPES) - self.assertEqual(expected, result) - - def test__get_aggregate_disk_types_shared(self): - - api_response = netapp_api.NaElement( - fake_client.STORAGE_DISK_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client._get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME, shared=True) - - storage_disk_get_iter_args = { - 'query': { - 'storage-disk-info': { - 'disk-raid-info': { - 'disk-shared-info': { - 'aggregate-list': { - 'shared-aggregate-info': { - 'aggregate-name': - fake_client.VOLUME_AGGREGATE_NAME, - }, - }, - }, - }, - }, - }, - 'desired-attributes': { - 'storage-disk-info': { - 'disk-raid-info': { - 'effective-disk-type': None, - }, - }, - }, - } - self.client.send_iter_request.assert_called_once_with( - 'storage-disk-get-iter', storage_disk_get_iter_args, - enable_tunneling=False) - - expected = set(fake_client.AGGREGATE_DISK_TYPES) - self.assertEqual(expected, result) - - def test__get_aggregate_disk_types_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client._get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual(set(), result) - - def test__get_aggregate_disk_types_api_error(self): - - self.mock_object(self.client, - 'send_iter_request', - side_effect=self._mock_api_error()) - - result = self.client._get_aggregate_disk_types( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual(set([]), result) - - def test_get_aggregate_capacities(self): - - aggr1_capacities = { - 'percent-used': 50, - 'size-available': 100.0, - 'size-total': 200.0, - } - aggr2_capacities = { - 'percent-used': 75, - 'size-available': 125.0, - 'size-total': 500.0, - } - mock_get_aggregate_capacity = self.mock_object( - self.client, 'get_aggregate_capacity', - side_effect=[aggr1_capacities, aggr2_capacities]) - - result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) - - expected = { - 'aggr1': aggr1_capacities, - 'aggr2': aggr2_capacities, - } - self.assertEqual(expected, result) - mock_get_aggregate_capacity.assert_has_calls([ - mock.call('aggr1'), - mock.call('aggr2'), - ]) - - def test_get_aggregate_capacities_not_found(self): - - mock_get_aggregate_capacity = self.mock_object( - self.client, 'get_aggregate_capacity', side_effect=[{}, {}]) - - result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) - - expected = { - 'aggr1': {}, - 'aggr2': {}, - } - self.assertEqual(expected, result) - mock_get_aggregate_capacity.assert_has_calls([ - mock.call('aggr1'), - mock.call('aggr2'), - ]) - - def test_get_aggregate_capacities_not_list(self): - - result = self.client.get_aggregate_capacities('aggr1') - - self.assertEqual({}, result) - - def test_get_aggregate_capacity(self): - - api_response = netapp_api.NaElement( - fake_client.AGGR_GET_ITER_CAPACITY_RESPONSE).get_child_by_name( - 'attributes-list').get_children() - self.mock_object(self.client, - '_get_aggregates', - return_value=api_response) - - result = self.client.get_aggregate_capacity( - fake_client.VOLUME_AGGREGATE_NAME) - - desired_attributes = { - 'aggr-attributes': { - 'aggr-space-attributes': { - 'percent-used-capacity': None, - 'size-available': None, - 'size-total': None, - }, - }, - } - self.client._get_aggregates.assert_has_calls([ - mock.call( - aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], - desired_attributes=desired_attributes)]) - - expected = { - 'percent-used': float(fake_client.AGGR_USED_PERCENT), - 'size-available': float(fake_client.AGGR_SIZE_AVAILABLE), - 'size-total': float(fake_client.AGGR_SIZE_TOTAL), - } - self.assertEqual(expected, result) - - def test_get_aggregate_capacity_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_aggregate_capacity( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_get_aggregate_capacity_api_error(self): - - self.mock_object(self.client, - 'send_request', - side_effect=self._mock_api_error()) - - result = self.client.get_aggregate_capacity( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_get_aggregate_capacity_api_not_found(self): - - api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - result = self.client.get_aggregate_capacity( - fake_client.VOLUME_AGGREGATE_NAME) - - self.assertEqual({}, result) - - def test_get_performance_instance_uuids(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE) - - result = self.client.get_performance_instance_uuids( - 'system', fake_client.NODE_NAME) - - expected = [fake_client.NODE_NAME + ':kernel:system'] - self.assertEqual(expected, result) - - perf_object_instance_list_info_iter_args = { - 'objectname': 'system', - 'query': { - 'instance-info': { - 'uuid': fake_client.NODE_NAME + ':*', - } - } - } - self.mock_send_request.assert_called_once_with( - 'perf-object-instance-list-info-iter', - perf_object_instance_list_info_iter_args, enable_tunneling=False) - - def test_get_performance_counters(self): - - self.mock_send_request.return_value = netapp_api.NaElement( - fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE) - - instance_uuids = [ - fake_client.NODE_NAMES[0] + ':kernel:system', - fake_client.NODE_NAMES[1] + ':kernel:system', - ] - counter_names = ['avg_processor_busy'] - result = self.client.get_performance_counters('system', - instance_uuids, - counter_names) - - expected = [ - { - 'avg_processor_busy': '5674745133134', - 'instance-name': 'system', - 'instance-uuid': instance_uuids[0], - 'node-name': fake_client.NODE_NAMES[0], - 'timestamp': '1453412013', - }, { - 'avg_processor_busy': '4077649009234', - 'instance-name': 'system', - 'instance-uuid': instance_uuids[1], - 'node-name': fake_client.NODE_NAMES[1], - 'timestamp': '1453412013' - }, - ] - self.assertEqual(expected, result) - - perf_object_get_instances_args = { - 'objectname': 'system', - 'instance-uuids': [ - {'instance-uuid': instance_uuid} - for instance_uuid in instance_uuids - ], - 'counters': [ - {'counter': counter} for counter in counter_names - ], - } - self.mock_send_request.assert_called_once_with( - 'perf-object-get-instances', perf_object_get_instances_args, - enable_tunneling=False) - - def test_check_iscsi_initiator_exists_when_no_initiator_exists(self): - self.connection.invoke_successfully = mock.Mock( - side_effect=netapp_api.NaApiError) - initiator = fake_client.INITIATOR_IQN - - initiator_exists = self.client.check_iscsi_initiator_exists(initiator) - - self.assertFalse(initiator_exists) - - def test_check_iscsi_initiator_exists_when_initiator_exists(self): - self.connection.invoke_successfully = mock.Mock() - initiator = fake_client.INITIATOR_IQN - - initiator_exists = self.client.check_iscsi_initiator_exists(initiator) - - self.assertTrue(initiator_exists) - - def test_set_iscsi_chap_authentication_no_previous_initiator(self): - self.connection.invoke_successfully = mock.Mock() - self.mock_object(self.client, 'check_iscsi_initiator_exists', - return_value=False) - - ssh = mock.Mock(paramiko.SSHClient) - sshpool = mock.Mock(ssh_utils.SSHPool) - self.client.ssh_client.ssh_pool = sshpool - self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') - sshpool.item().__enter__ = mock.Mock(return_value=ssh) - sshpool.item().__exit__ = mock.Mock(return_value=False) - - self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, - fake_client.USER_NAME, - fake_client.PASSWORD) - - command = ('iscsi security create -vserver fake_vserver ' - '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' - '-auth-type CHAP -user-name fake_user') - self.client.ssh_client.execute_command_with_prompt.assert_has_calls( - [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] - ) - - def test_set_iscsi_chap_authentication_with_preexisting_initiator(self): - self.connection.invoke_successfully = mock.Mock() - self.mock_object(self.client, 'check_iscsi_initiator_exists', - return_value=True) - - ssh = mock.Mock(paramiko.SSHClient) - sshpool = mock.Mock(ssh_utils.SSHPool) - self.client.ssh_client.ssh_pool = sshpool - self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') - sshpool.item().__enter__ = mock.Mock(return_value=ssh) - sshpool.item().__exit__ = mock.Mock(return_value=False) - - self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, - fake_client.USER_NAME, - fake_client.PASSWORD) - - command = ('iscsi security modify -vserver fake_vserver ' - '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' - '-auth-type CHAP -user-name fake_user') - self.client.ssh_client.execute_command_with_prompt.assert_has_calls( - [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] - ) - - def test_set_iscsi_chap_authentication_with_ssh_exception(self): - self.connection.invoke_successfully = mock.Mock() - self.mock_object(self.client, 'check_iscsi_initiator_exists', - return_value=True) - - ssh = mock.Mock(paramiko.SSHClient) - sshpool = mock.Mock(ssh_utils.SSHPool) - self.client.ssh_client.ssh_pool = sshpool - sshpool.item().__enter__ = mock.Mock(return_value=ssh) - sshpool.item().__enter__.side_effect = paramiko.SSHException( - 'Connection Failure') - sshpool.item().__exit__ = mock.Mock(return_value=False) - - self.assertRaises(exception.VolumeBackendAPIException, - self.client.set_iscsi_chap_authentication, - fake_client.INITIATOR_IQN, - fake_client.USER_NAME, - fake_client.PASSWORD) - - def test_get_snapshot_if_snapshot_present_not_busy(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement( - fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE) - self.mock_send_request.return_value = response - - snapshot = self.client.get_snapshot(expected_vol_name, - expected_snapshot_name) - - self.assertEqual(expected_vol_name, snapshot['volume']) - self.assertEqual(expected_snapshot_name, snapshot['name']) - self.assertEqual(set([]), snapshot['owners']) - self.assertFalse(snapshot['busy']) - - def test_get_snapshot_if_snapshot_present_busy(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement( - fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE) - self.mock_send_request.return_value = response - - snapshot = self.client.get_snapshot(expected_vol_name, - expected_snapshot_name) - - self.assertEqual(expected_vol_name, snapshot['volume']) - self.assertEqual(expected_snapshot_name, snapshot['name']) - self.assertEqual(set([]), snapshot['owners']) - self.assertTrue(snapshot['busy']) - - def test_get_snapshot_if_snapshot_not_present(self): - expected_vol_name = fake.SNAPSHOT['volume_id'] - expected_snapshot_name = fake.SNAPSHOT['name'] - response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_send_request.return_value = response - - self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, - expected_vol_name, expected_snapshot_name) - - def test_create_cluster_peer(self): - - self.mock_object(self.client, 'send_request') - - self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'], - 'fake_user', 'fake_password', - 'fake_passphrase') - - cluster_peer_create_args = { - 'peer-addresses': [ - {'remote-inet-address': 'fake_address_1'}, - {'remote-inet-address': 'fake_address_2'}, - ], - 'user-name': 'fake_user', - 'password': 'fake_password', - 'passphrase': 'fake_passphrase', - } - self.client.send_request.assert_has_calls([ - mock.call('cluster-peer-create', cluster_peer_create_args)]) - - def test_get_cluster_peers(self): - - api_response = netapp_api.NaElement( - fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_cluster_peers() - - cluster_peer_get_iter_args = {} - self.client.send_iter_request.assert_has_calls([ - mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) - - expected = [{ - 'active-addresses': [ - fake_client.CLUSTER_ADDRESS_1, - fake_client.CLUSTER_ADDRESS_2 - ], - 'availability': 'available', - 'cluster-name': fake_client.CLUSTER_NAME, - 'cluster-uuid': 'fake_uuid', - 'peer-addresses': [fake_client.CLUSTER_ADDRESS_1], - 'remote-cluster-name': fake_client.REMOTE_CLUSTER_NAME, - 'serial-number': 'fake_serial_number', - 'timeout': '60', - }] - - self.assertEqual(expected, result) - - def test_get_cluster_peers_single(self): - - api_response = netapp_api.NaElement( - fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - self.client.get_cluster_peers( - remote_cluster_name=fake_client.CLUSTER_NAME) - - cluster_peer_get_iter_args = { - 'query': { - 'cluster-peer-info': { - 'remote-cluster-name': fake_client.CLUSTER_NAME, - } - }, - } - self.client.send_iter_request.assert_has_calls([ - mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) - - def test_get_cluster_peers_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_cluster_peers( - remote_cluster_name=fake_client.CLUSTER_NAME) - - self.assertEqual([], result) - self.assertTrue(self.client.send_iter_request.called) - - def test_delete_cluster_peer(self): - - self.mock_object(self.client, 'send_request') - - self.client.delete_cluster_peer(fake_client.CLUSTER_NAME) - - cluster_peer_delete_args = {'cluster-name': fake_client.CLUSTER_NAME} - self.client.send_request.assert_has_calls([ - mock.call('cluster-peer-delete', cluster_peer_delete_args)]) - - def test_get_cluster_peer_policy(self): - - self.client.features.add_feature('CLUSTER_PEER_POLICY') - - api_response = netapp_api.NaElement( - fake_client.CLUSTER_PEER_POLICY_GET_RESPONSE) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_cluster_peer_policy() - - expected = { - 'is-unauthenticated-access-permitted': False, - 'passphrase-minimum-length': 8, - } - self.assertEqual(expected, result) - self.assertTrue(self.client.send_request.called) - - def test_get_cluster_peer_policy_not_supported(self): - - result = self.client.get_cluster_peer_policy() - - self.assertEqual({}, result) - - def test_set_cluster_peer_policy_not_supported(self): - - self.mock_object(self.client, 'send_request') - - self.client.set_cluster_peer_policy() - - self.assertFalse(self.client.send_request.called) - - def test_set_cluster_peer_policy_no_arguments(self): - - self.client.features.add_feature('CLUSTER_PEER_POLICY') - self.mock_object(self.client, 'send_request') - - self.client.set_cluster_peer_policy() - - self.assertFalse(self.client.send_request.called) - - def test_set_cluster_peer_policy(self): - - self.client.features.add_feature('CLUSTER_PEER_POLICY') - self.mock_object(self.client, 'send_request') - - self.client.set_cluster_peer_policy( - is_unauthenticated_access_permitted=True, - passphrase_minimum_length=12) - - cluster_peer_policy_modify_args = { - 'is-unauthenticated-access-permitted': 'true', - 'passphrase-minlength': '12', - } - self.client.send_request.assert_has_calls([ - mock.call('cluster-peer-policy-modify', - cluster_peer_policy_modify_args)]) - - def test_create_vserver_peer(self): - - self.mock_object(self.client, 'send_request') - - self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer') - - vserver_peer_create_args = { - 'vserver': 'fake_vserver', - 'peer-vserver': 'fake_vserver_peer', - 'applications': [ - {'vserver-peer-application': 'snapmirror'}, - ], - } - self.client.send_request.assert_has_calls([ - mock.call('vserver-peer-create', vserver_peer_create_args)]) - - def test_delete_vserver_peer(self): - - self.mock_object(self.client, 'send_request') - - self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer') - - vserver_peer_delete_args = { - 'vserver': 'fake_vserver', - 'peer-vserver': 'fake_vserver_peer', - } - self.client.send_request.assert_has_calls([ - mock.call('vserver-peer-delete', vserver_peer_delete_args)]) - - def test_accept_vserver_peer(self): - - self.mock_object(self.client, 'send_request') - - self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer') - - vserver_peer_accept_args = { - 'vserver': 'fake_vserver', - 'peer-vserver': 'fake_vserver_peer', - } - self.client.send_request.assert_has_calls([ - mock.call('vserver-peer-accept', vserver_peer_accept_args)]) - - def test_get_vserver_peers(self): - - api_response = netapp_api.NaElement( - fake_client.VSERVER_PEER_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_vserver_peers( - vserver_name=fake_client.VSERVER_NAME, - peer_vserver_name=fake_client.VSERVER_NAME_2) - - vserver_peer_get_iter_args = { - 'query': { - 'vserver-peer-info': { - 'vserver': fake_client.VSERVER_NAME, - 'peer-vserver': fake_client.VSERVER_NAME_2, - } - }, - } - self.client.send_iter_request.assert_has_calls([ - mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)]) - - expected = [{ - 'vserver': 'fake_vserver', - 'peer-vserver': 'fake_vserver_2', - 'peer-state': 'peered', - 'peer-cluster': 'fake_cluster' - }] - self.assertEqual(expected, result) - - def test_get_vserver_peers_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client.get_vserver_peers( - vserver_name=fake_client.VSERVER_NAME, - peer_vserver_name=fake_client.VSERVER_NAME_2) - - self.assertEqual([], result) - self.assertTrue(self.client.send_iter_request.called) - - def test_ensure_snapmirror_v2(self): - - self.assertIsNone(self.client._ensure_snapmirror_v2()) - - def test_ensure_snapmirror_v2_not_supported(self): - - self.client.features.add_feature('SNAPMIRROR_V2', supported=False) - - self.assertRaises(exception.NetAppDriverException, - self.client._ensure_snapmirror_v2) - - @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'}, - {'schedule': None, 'policy': None}) - @ddt.unpack - def test_create_snapmirror(self, schedule, policy): - self.mock_object(self.client, 'send_request') - - self.client.create_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - schedule=schedule, policy=policy) - - snapmirror_create_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'relationship-type': 'data_protection', - } - if schedule: - snapmirror_create_args['schedule'] = schedule - if policy: - snapmirror_create_args['policy'] = policy - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-create', snapmirror_create_args)]) - - def test_create_snapmirror_already_exists(self): - api_error = netapp_api.NaApiError(code=netapp_api.ERELATION_EXISTS) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.client.create_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_create_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'relationship-type': 'data_protection', - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-create', snapmirror_create_args)]) - - def test_create_snapmirror_error(self): - api_error = netapp_api.NaApiError(code=0) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.assertRaises(netapp_api.NaApiError, - self.client.create_snapmirror, - fake_client.SM_SOURCE_VSERVER, - fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, - fake_client.SM_DEST_VOLUME) - self.assertTrue(self.client.send_request.called) - - @ddt.data( - { - 'source_snapshot': 'fake_snapshot', - 'transfer_priority': 'fake_priority' - }, - { - 'source_snapshot': None, - 'transfer_priority': None - } - ) - @ddt.unpack - def test_initialize_snapmirror(self, source_snapshot, transfer_priority): - - api_response = netapp_api.NaElement( - fake_client.SNAPMIRROR_INITIALIZE_RESULT) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.initialize_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - source_snapshot=source_snapshot, - transfer_priority=transfer_priority) - - snapmirror_initialize_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - if source_snapshot: - snapmirror_initialize_args['source-snapshot'] = source_snapshot - if transfer_priority: - snapmirror_initialize_args['transfer-priority'] = transfer_priority - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-initialize', snapmirror_initialize_args)]) - - expected = { - 'operation-id': None, - 'status': 'succeeded', - 'jobid': None, - 'error-code': None, - 'error-message': None - } - self.assertEqual(expected, result) - - @ddt.data(True, False) - def test_release_snapmirror(self, relationship_info_only): - - self.mock_object(self.client, 'send_request') - - self.client.release_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - relationship_info_only=relationship_info_only) - - snapmirror_release_args = { - 'query': { - 'snapmirror-destination-info': { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'relationship-info-only': ('true' if relationship_info_only - else 'false'), - } - } - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-release-iter', snapmirror_release_args)]) - - def test_quiesce_snapmirror(self): - - self.mock_object(self.client, 'send_request') - - self.client.quiesce_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_quiesce_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) - - @ddt.data(True, False) - def test_abort_snapmirror(self, clear_checkpoint): - - self.mock_object(self.client, 'send_request') - - self.client.abort_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - clear_checkpoint=clear_checkpoint) - - snapmirror_abort_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'clear-checkpoint': 'true' if clear_checkpoint else 'false', - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-abort', snapmirror_abort_args)]) - - def test_abort_snapmirror_no_transfer_in_progress(self): - api_error = netapp_api.NaApiError( - code=netapp_api.ENOTRANSFER_IN_PROGRESS) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.client.abort_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_abort_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'clear-checkpoint': 'false', - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-abort', snapmirror_abort_args)]) - - def test_abort_snapmirror_error(self): - api_error = netapp_api.NaApiError(code=0) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.assertRaises(netapp_api.NaApiError, - self.client.abort_snapmirror, - fake_client.SM_SOURCE_VSERVER, - fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, - fake_client.SM_DEST_VOLUME) - - def test_break_snapmirror(self): - - self.mock_object(self.client, 'send_request') - - self.client.break_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_break_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-break', snapmirror_break_args)]) - - @ddt.data( - { - 'schedule': 'fake_schedule', - 'policy': 'fake_policy', - 'tries': 5, - 'max_transfer_rate': 1024, - }, - { - 'schedule': None, - 'policy': None, - 'tries': None, - 'max_transfer_rate': None, - } - ) - @ddt.unpack - def test_modify_snapmirror(self, schedule, policy, tries, - max_transfer_rate): - - self.mock_object(self.client, 'send_request') - - self.client.modify_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - schedule=schedule, policy=policy, tries=tries, - max_transfer_rate=max_transfer_rate) - - snapmirror_modify_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - if schedule: - snapmirror_modify_args['schedule'] = schedule - if policy: - snapmirror_modify_args['policy'] = policy - if tries: - snapmirror_modify_args['tries'] = tries - if max_transfer_rate: - snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-modify', snapmirror_modify_args)]) - - def test_delete_snapmirror(self): - - self.mock_object(self.client, 'send_request') - - self.client.delete_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_delete_args = { - 'query': { - 'snapmirror-info': { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - } - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) - - def test_update_snapmirror(self): - - self.mock_object(self.client, 'send_request') - - self.client.update_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_update_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-update', snapmirror_update_args)]) - - def test_update_snapmirror_already_transferring(self): - api_error = netapp_api.NaApiError( - code=netapp_api.ETRANSFER_IN_PROGRESS) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.client.update_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_update_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-update', snapmirror_update_args)]) - - def test_update_snapmirror_already_transferring_two(self): - api_error = netapp_api.NaApiError(code=netapp_api.EANOTHER_OP_ACTIVE) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.client.update_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_update_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-update', snapmirror_update_args)]) - - def test_update_snapmirror_error(self): - api_error = netapp_api.NaApiError(code=0) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.assertRaises(netapp_api.NaApiError, - self.client.update_snapmirror, - fake_client.SM_SOURCE_VSERVER, - fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, - fake_client.SM_DEST_VOLUME) - - def test_resume_snapmirror(self): - self.mock_object(self.client, 'send_request') - - self.client.resume_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_resume_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-resume', snapmirror_resume_args)]) - - def test_resume_snapmirror_not_quiesed(self): - api_error = netapp_api.NaApiError( - code=netapp_api.ERELATION_NOT_QUIESCED) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.client.resume_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_resume_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-resume', snapmirror_resume_args)]) - - def test_resume_snapmirror_error(self): - api_error = netapp_api.NaApiError(code=0) - self.mock_object(self.client, 'send_request', side_effect=api_error) - - self.assertRaises(netapp_api.NaApiError, - self.client.resume_snapmirror, - fake_client.SM_SOURCE_VSERVER, - fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, - fake_client.SM_DEST_VOLUME) - - def test_resync_snapmirror(self): - self.mock_object(self.client, 'send_request') - - self.client.resync_snapmirror( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) - - snapmirror_resync_args = { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - } - self.client.send_request.assert_has_calls([ - mock.call('snapmirror-resync', snapmirror_resync_args)]) - - def test__get_snapmirrors(self): - - api_response = netapp_api.NaElement( - fake_client.SNAPMIRROR_GET_ITER_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - desired_attributes = { - 'snapmirror-info': { - 'source-vserver': None, - 'source-volume': None, - 'destination-vserver': None, - 'destination-volume': None, - 'is-healthy': None, - } - } - - result = self.client._get_snapmirrors( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - desired_attributes=desired_attributes) - - snapmirror_get_iter_args = { - 'query': { - 'snapmirror-info': { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - }, - }, - 'desired-attributes': { - 'snapmirror-info': { - 'source-vserver': None, - 'source-volume': None, - 'destination-vserver': None, - 'destination-volume': None, - 'is-healthy': None, - }, - }, - } - self.client.send_iter_request.assert_has_calls([ - mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) - self.assertEqual(1, len(result)) - - def test__get_snapmirrors_not_found(self): - - api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - result = self.client._get_snapmirrors() - - self.client.send_iter_request.assert_has_calls([ - mock.call('snapmirror-get-iter', {})]) - - self.assertEqual([], result) - - def test_get_snapmirrors(self): - - api_response = netapp_api.NaElement( - fake_client.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE) - self.mock_object(self.client, - 'send_iter_request', - return_value=api_response) - - desired_attributes = ['source-vserver', 'source-volume', - 'destination-vserver', 'destination-volume', - 'is-healthy', 'mirror-state', 'schedule'] - - result = self.client.get_snapmirrors( - fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, - fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, - desired_attributes=desired_attributes) - - snapmirror_get_iter_args = { - 'query': { - 'snapmirror-info': { - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - }, - }, - 'desired-attributes': { - 'snapmirror-info': { - 'source-vserver': None, - 'source-volume': None, - 'destination-vserver': None, - 'destination-volume': None, - 'is-healthy': None, - 'mirror-state': None, - 'schedule': None, - }, - }, - } - - expected = [{ - 'source-vserver': fake_client.SM_SOURCE_VSERVER, - 'source-volume': fake_client.SM_SOURCE_VOLUME, - 'destination-vserver': fake_client.SM_DEST_VSERVER, - 'destination-volume': fake_client.SM_DEST_VOLUME, - 'is-healthy': 'true', - 'mirror-state': 'snapmirrored', - 'schedule': 'daily', - }] - - self.client.send_iter_request.assert_has_calls([ - mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) - self.assertEqual(expected, result) - - def test_get_provisioning_options_from_flexvol(self): - - self.mock_object(self.client, 'get_flexvol', - return_value=fake_client.VOLUME_INFO_SSC) - self.mock_object(self.client, 'get_flexvol_dedupe_info', - return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) - - expected_prov_opts = { - 'aggregate': 'fake_aggr1', - 'compression_enabled': False, - 'dedupe_enabled': True, - 'language': 'en_US', - 'size': 1, - 'snapshot_policy': 'default', - 'snapshot_reserve': '5', - 'space_guarantee_type': 'none', - 'volume_type': 'rw' - } - - actual_prov_opts = self.client.get_provisioning_options_from_flexvol( - fake_client.VOLUME_NAME) - - self.assertEqual(expected_prov_opts, actual_prov_opts) - - def test_wait_for_busy_snapshot(self): - # Need to mock sleep as it is called by @utils.retry - self.mock_object(time, 'sleep') - mock_get_snapshot = self.mock_object( - self.client, 'get_snapshot', - return_value=fake.SNAPSHOT - ) - - self.client.wait_for_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME) - - mock_get_snapshot.assert_called_once_with(fake.FLEXVOL, - fake.SNAPSHOT_NAME) - - def test_wait_for_busy_snapshot_raise_exception(self): - # Need to mock sleep as it is called by @utils.retry - self.mock_object(time, 'sleep') - BUSY_SNAPSHOT = dict(fake.SNAPSHOT) - BUSY_SNAPSHOT['busy'] = True - mock_get_snapshot = self.mock_object( - self.client, 'get_snapshot', - return_value=BUSY_SNAPSHOT - ) - - self.assertRaises(exception.SnapshotIsBusy, - self.client.wait_for_busy_snapshot, - fake.FLEXVOL, fake.SNAPSHOT_NAME) - - calls = [ - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), - ] - mock_get_snapshot.assert_has_calls(calls) - - @ddt.data({ - 'mock_return': - fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE, - 'expected': [{ - 'name': fake.SNAPSHOT_NAME, - 'instance_id': 'abcd-ef01-2345-6789', - 'volume_name': fake.SNAPSHOT['volume_id'], - }] - }, { - 'mock_return': fake_client.NO_RECORDS_RESPONSE, - 'expected': [], - }) - @ddt.unpack - def test_get_snapshots_marked_for_deletion(self, mock_return, expected): - api_response = netapp_api.NaElement(mock_return) - self.mock_object(self.client, - 'send_request', - return_value=api_response) - - result = self.client.get_snapshots_marked_for_deletion() - - api_args = { - 'query': { - 'snapshot-info': { - 'name': client_base.DELETED_PREFIX + '*', - 'vserver': self.vserver, - 'busy': 'false' - }, - }, - 'desired-attributes': { - 'snapshot-info': { - 'name': None, - 'volume': None, - 'snapshot-instance-uuid': None, - } - }, - } - - self.client.send_request.assert_called_once_with( - 'snapshot-get-iter', api_args) - self.assertListEqual(expected, result) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py deleted file mode 100644 index 3b9be0336..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py +++ /dev/null @@ -1,514 +0,0 @@ -# Copyright (c) - 2014, Clinton Knight. All rights reserved. -# Copyright (c) - 2015, Tom Barron. All rights reserved. -# Copyright (c) - 2016 Chuck Fouts. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from lxml import etree - -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api - - -VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56' -LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86' -LUN_HANDLE = 'fake_lun_handle' -LUN_NAME = 'lun1' -LUN_SIZE = 3 -LUN_TABLE = {LUN_NAME: None} -SIZE = 1024 -HOST_NAME = 'fake.host.name' -BACKEND_NAME = 'fake_backend_name' -POOL_NAME = 'aggr1' -SHARE_IP = '192.168.99.24' -EXPORT_PATH = '/fake/export/path' -NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH) -HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME) -NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE) -AGGREGATE = 'aggr1' -FLEXVOL = 'openstack-flexvol' -NFS_FILE_PATH = 'nfsvol' -PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME) -IMAGE_FILE_ID = 'img-cache-imgid' -PROVIDER_LOCATION = 'fake_provider_location' -NFS_HOST = 'nfs-host1' -NFS_SHARE_PATH = '/export' -NFS_EXPORT_1 = '%s:%s' % (NFS_HOST, NFS_SHARE_PATH) -NFS_EXPORT_2 = 'nfs-host2:/export' -MOUNT_POINT = '/mnt/nfs' -LUN_METADATA = { - 'OsType': None, - 'SpaceReserved': 'true', - 'Path': PATH, - 'Qtree': None, - 'Volume': POOL_NAME, -} -VOLUME = { - 'name': LUN_NAME, - 'size': SIZE, - 'id': VOLUME_ID, - 'host': HOST_STRING, -} -NFS_VOLUME = { - 'name': NFS_FILE_PATH, - 'size': SIZE, - 'id': VOLUME_ID, - 'host': NFS_HOST_STRING, - 'provider_location': PROVIDER_LOCATION, -} - -FAKE_MANAGE_VOLUME = { - 'name': 'volume-new-managed-123', - 'id': 'volume-new-managed-123', -} - -FAKE_IMAGE_LOCATION = ( - None, - [ - # valid metadata - { - 'metadata': { - 'share_location': 'nfs://host/path', - 'mountpoint': '/opt/stack/data/glance', - 'id': 'abc-123', - 'type': 'nfs' - }, - 'url': 'file:///opt/stack/data/glance/image-id-0' - }, - # missing metadata - { - 'metadata': {}, - 'url': 'file:///opt/stack/data/glance/image-id-1' - }, - # missing location_type - { - 'metadata': {'location_type': None}, - 'url': 'file:///opt/stack/data/glance/image-id-2' - }, - # non-nfs location_type - { - 'metadata': {'location_type': 'not-NFS'}, - 'url': 'file:///opt/stack/data/glance/image-id-3' - }, - # missing share_location - { - 'metadata': {'location_type': 'nfs', 'share_location': None}, - 'url': 'file:///opt/stack/data/glance/image-id-4'}, - # missing mountpoint - { - 'metadata': { - 'location_type': 'nfs', - 'share_location': 'nfs://host/path', - # Pre-kilo we documented "mount_point" - 'mount_point': '/opt/stack/data/glance' - }, - 'url': 'file:///opt/stack/data/glance/image-id-5' - }, - # Valid metadata - { - 'metadata': - { - 'share_location': 'nfs://host/path', - 'mountpoint': '/opt/stack/data/glance', - 'id': 'abc-123', - 'type': 'nfs', - }, - 'url': 'file:///opt/stack/data/glance/image-id-6' - } - ] -) - -NETAPP_VOLUME = 'fake_netapp_volume' - -VFILER = 'fake_netapp_vfiler' - -UUID1 = '12345678-1234-5678-1234-567812345678' -LUN_PATH = '/vol/vol0/%s' % LUN_NAME - -VSERVER_NAME = 'openstack-vserver' - -FC_VOLUME = {'name': 'fake_volume'} - -FC_INITIATORS = ['21000024ff406cc3', '21000024ff406cc2'] -FC_FORMATTED_INITIATORS = ['21:00:00:24:ff:40:6c:c3', - '21:00:00:24:ff:40:6c:c2'] - -FC_TARGET_WWPNS = ['500a098280feeba5', '500a098290feeba5', - '500a098190feeba5', '500a098180feeba5'] - -FC_FORMATTED_TARGET_WWPNS = ['50:0a:09:82:80:fe:eb:a5', - '50:0a:09:82:90:fe:eb:a5', - '50:0a:09:81:90:fe:eb:a5', - '50:0a:09:81:80:fe:eb:a5'] - -FC_CONNECTOR = {'ip': '1.1.1.1', - 'host': 'fake_host', - 'wwnns': ['20000024ff406cc3', '20000024ff406cc2'], - 'wwpns': ['21000024ff406cc3', '21000024ff406cc2']} - -FC_I_T_MAP = {'21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], - '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5']} - -FC_I_T_MAP_COMPLETE = {'21000024ff406cc3': FC_TARGET_WWPNS, - '21000024ff406cc2': FC_TARGET_WWPNS} - -FC_FABRIC_MAP = {'fabricB': - {'target_port_wwn_list': - ['500a098190feeba5', '500a098180feeba5'], - 'initiator_port_wwn_list': ['21000024ff406cc2']}, - 'fabricA': - {'target_port_wwn_list': - ['500a098290feeba5', '500a098280feeba5'], - 'initiator_port_wwn_list': ['21000024ff406cc3']}} - -FC_TARGET_INFO = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': 1, - 'initiator_target_map': FC_I_T_MAP, - 'target_wwn': FC_TARGET_WWPNS, - 'target_discovered': True}} - -FC_TARGET_INFO_EMPTY = {'driver_volume_type': 'fibre_channel', 'data': {}} - -FC_TARGET_INFO_UNMAP = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': FC_TARGET_WWPNS, - 'initiator_target_map': FC_I_T_MAP}} - -IGROUP1_NAME = 'openstack-igroup1' - -IGROUP1 = { - 'initiator-group-os-type': 'linux', - 'initiator-group-type': 'fcp', - 'initiator-group-name': IGROUP1_NAME, -} - -ISCSI_VOLUME = { - 'name': 'fake_volume', - 'id': 'fake_id', - 'provider_auth': 'fake provider auth', -} - -ISCSI_LUN = {'name': ISCSI_VOLUME, 'lun_id': 42} - -ISCSI_SERVICE_IQN = 'fake_iscsi_service_iqn' - -ISCSI_CONNECTION_PROPERTIES = { - 'data': { - 'auth_method': 'fake_method', - 'auth_password': 'auth', - 'auth_username': 'provider', - 'discovery_auth_method': 'fake_method', - 'discovery_auth_username': 'provider', - 'discovery_auth_password': 'auth', - 'target_discovered': False, - 'target_iqn': ISCSI_SERVICE_IQN, - 'target_lun': 42, - 'target_portal': '1.2.3.4:3260', - 'volume_id': 'fake_id', - }, - 'driver_volume_type': 'iscsi', -} - -ISCSI_CONNECTOR = { - 'ip': '1.1.1.1', - 'host': 'fake_host', - 'initiator': 'fake_initiator_iqn', -} - -ISCSI_TARGET_DETAILS_LIST = [ - {'address': '5.6.7.8', 'port': '3260'}, - {'address': '1.2.3.4', 'port': '3260'}, - {'address': '99.98.97.96', 'port': '3260'}, -] - -IPV4_ADDRESS = '192.168.14.2' -IPV6_ADDRESS = 'fe80::6e40:8ff:fe8a:130' -NFS_SHARE_IPV4 = IPV4_ADDRESS + ':' + EXPORT_PATH -NFS_SHARE_IPV6 = IPV6_ADDRESS + ':' + EXPORT_PATH - -RESERVED_PERCENTAGE = 7 -MAX_OVER_SUBSCRIPTION_RATIO = 19.0 -TOTAL_BYTES = 4797892092432 -AVAILABLE_BYTES = 13479932478 -CAPACITY_VALUES = (TOTAL_BYTES, AVAILABLE_BYTES) -CAPACITIES = {'size-total': TOTAL_BYTES, 'size-available': AVAILABLE_BYTES} - -IGROUP1 = {'initiator-group-os-type': 'linux', - 'initiator-group-type': 'fcp', - 'initiator-group-name': IGROUP1_NAME} - -QOS_SPECS = {} -EXTRA_SPECS = {} -MAX_THROUGHPUT = '21734278B/s' -QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' - -QOS_POLICY_GROUP_INFO_LEGACY = { - 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME, - 'spec': None, -} - -QOS_POLICY_GROUP_SPEC = { - 'max_throughput': MAX_THROUGHPUT, - 'policy_name': QOS_POLICY_GROUP_NAME, -} - -QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} - -CLONE_SOURCE_NAME = 'fake_clone_source_name' -CLONE_SOURCE_ID = 'fake_clone_source_id' -CLONE_SOURCE_SIZE = 1024 - -CLONE_SOURCE = { - 'size': CLONE_SOURCE_SIZE, - 'name': CLONE_SOURCE_NAME, - 'id': CLONE_SOURCE_ID, -} - -CLONE_DESTINATION_NAME = 'fake_clone_destination_name' -CLONE_DESTINATION_SIZE = 1041 -CLONE_DESTINATION_ID = 'fake_clone_destination_id' - -CLONE_DESTINATION = { - 'size': CLONE_DESTINATION_SIZE, - 'name': CLONE_DESTINATION_NAME, - 'id': CLONE_DESTINATION_ID, -} - -VOLUME_NAME = 'volume-fake_volume_id' -MOUNT_PATH = '168.10.16.11:/' + VOLUME_ID -SNAPSHOT_NAME = 'fake_snapshot_name' -SNAPSHOT_LUN_HANDLE = 'fake_snapshot_lun_handle' -SNAPSHOT_MOUNT = '/fake/mount/path' - -SNAPSHOT = { - 'name': SNAPSHOT_NAME, - 'volume_size': SIZE, - 'volume_id': VOLUME_ID, - 'volume_name': VOLUME_NAME, - 'busy': False, -} - -VOLUME_REF = {'name': 'fake_vref_name', 'size': 42} - -FAKE_CMODE_VOLUMES = ['open123', 'mixed', 'open321'] -FAKE_CMODE_POOL_MAP = { - 'open123': { - 'pool_name': 'open123', - }, - 'mixed': { - 'pool_name': 'mixed', - }, - 'open321': { - 'pool_name': 'open321', - }, -} - -FAKE_7MODE_VOLUME = { - 'all': [ - netapp_api.NaElement( - etree.XML(""" - open123 - """)), - netapp_api.NaElement( - etree.XML(""" - mixed3 - """)), - netapp_api.NaElement( - etree.XML(""" - open1234 - """)) - ], -} - -FILE_LIST = ['file1', 'file2', 'file3'] - -FAKE_LUN = netapp_api.NaElement.create_node_with_children( - 'lun-info', - **{'alignment': 'indeterminate', - 'block-size': '512', - 'comment': '', - 'creation-timestamp': '1354536362', - 'is-space-alloc-enabled': 'false', - 'is-space-reservation-enabled': 'true', - 'mapped': 'false', - 'multiprotocol-type': 'linux', - 'online': 'true', - 'path': '/vol/fakeLUN/fakeLUN', - 'prefix-size': '0', - 'qtree': '', - 'read-only': 'false', - 'serial-number': '2FfGI$APyN68', - 'share-state': 'none', - 'size': '20971520', - 'size-used': '0', - 'staging': 'false', - 'suffix-size': '0', - 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412', - 'volume': 'fakeLUN', - 'vserver': 'fake_vserver'}) - -FAKE_7MODE_VOL1 = [netapp_api.NaElement( - etree.XML(""" - open123 - online - 0 - 0 - 0 - false - false - """))] - -FAKE_7MODE_POOLS = [ - { - 'pool_name': 'open123', - 'consistencygroup_support': True, - 'QoS_support': False, - 'reserved_percentage': 0, - 'total_capacity_gb': 0.0, - 'free_capacity_gb': 0.0, - 'max_over_subscription_ratio': 20.0, - 'multiattach': False, - 'thin_provisioning_support': False, - 'thick_provisioning_support': True, - 'provisioned_capacity_gb': 0.0, - 'utilization': 30.0, - 'filter_function': 'filter', - 'goodness_function': 'goodness', - } -] - -CG_VOLUME_NAME = 'fake_cg_volume' -CG_GROUP_NAME = 'fake_consistency_group' -CG_POOL_NAME = 'cdot' -SOURCE_CG_VOLUME_NAME = 'fake_source_cg_volume' -CG_VOLUME_ID = 'fake_cg_volume_id' -CG_VOLUME_SIZE = 100 -SOURCE_CG_VOLUME_ID = 'fake_source_cg_volume_id' -CONSISTENCY_GROUP_NAME = 'fake_cg' -SOURCE_CONSISTENCY_GROUP_ID = 'fake_source_cg_id' -CONSISTENCY_GROUP_ID = 'fake_cg_id' -CG_SNAPSHOT_ID = 'fake_cg_snapshot_id' -CG_SNAPSHOT_NAME = 'snapshot-' + CG_SNAPSHOT_ID -CG_VOLUME_SNAPSHOT_ID = 'fake_cg_volume_snapshot_id' - -CG_LUN_METADATA = { - 'OsType': None, - 'Path': '/vol/aggr1/fake_cg_volume', - 'SpaceReserved': 'true', - 'Qtree': None, - 'Volume': POOL_NAME, -} - -SOURCE_CG_VOLUME = { - 'name': SOURCE_CG_VOLUME_NAME, - 'size': CG_VOLUME_SIZE, - 'id': SOURCE_CG_VOLUME_ID, - 'host': 'hostname@backend#cdot', - 'consistencygroup_id': None, - 'status': 'fake_status', -} - -CG_VOLUME = { - 'name': CG_VOLUME_NAME, - 'size': 100, - 'id': CG_VOLUME_ID, - 'host': 'hostname@backend#' + CG_POOL_NAME, - 'consistencygroup_id': CONSISTENCY_GROUP_ID, - 'status': 'fake_status', -} - -SOURCE_CONSISTENCY_GROUP = { - 'id': SOURCE_CONSISTENCY_GROUP_ID, - 'status': 'fake_status', -} - -CONSISTENCY_GROUP = { - 'id': CONSISTENCY_GROUP_ID, - 'status': 'fake_status', - 'name': CG_GROUP_NAME, -} - -CG_CONTEXT = {} - -CG_SNAPSHOT = { - 'id': CG_SNAPSHOT_ID, - 'name': CG_SNAPSHOT_NAME, - 'volume_size': CG_VOLUME_SIZE, - 'consistencygroup_id': CONSISTENCY_GROUP_ID, - 'status': 'fake_status', - 'volume_id': 'fake_source_volume_id', -} - -CG_VOLUME_SNAPSHOT = { - 'name': CG_SNAPSHOT_NAME, - 'volume_size': CG_VOLUME_SIZE, - 'cgsnapshot_id': CG_SNAPSHOT_ID, - 'id': CG_VOLUME_SNAPSHOT_ID, - 'status': 'fake_status', - 'volume_id': CG_VOLUME_ID, -} - - -class test_volume(object): - pass - -test_volume = test_volume() -test_volume.id = {'vserver': 'openstack', 'name': 'vola'} -test_volume.aggr = { - 'disk_type': 'SSD', - 'ha_policy': 'cfo', - 'junction': '/vola', - 'name': 'aggr1', - 'raid_type': 'raiddp', -} -test_volume.export = {'path': NFS_SHARE} -test_volume.sis = {'dedup': False, 'compression': False} -test_volume.state = { - 'status': 'online', - 'vserver_root': False, - 'junction_active': True, -} -test_volume.qos = {'qos_policy_group': None} - - -class test_snapshot(object): - pass - - def __getitem__(self, key): - return getattr(self, key) - -test_snapshot = test_snapshot() -test_snapshot.id = 'fake_snap_id' -test_snapshot.name = 'snapshot-%s' % test_snapshot.id -test_snapshot.volume_id = 'fake_volume_id' -test_snapshot.provider_location = PROVIDER_LOCATION - - -def get_fake_net_interface_get_iter_response(): - return etree.XML(""" - 1 - - -
FAKE_IP
-
-
""") - - -def get_fake_ifs(): - list_of_ifs = [ - etree.XML(""" -
FAKE_IP
"""), - etree.XML(""" -
FAKE_IP2
"""), - etree.XML(""" -
FAKE_IP3
"""), - ] - return [netapp_api.NaElement(el) for el in list_of_ifs] diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py deleted file mode 100644 index a8d2476f0..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py +++ /dev/null @@ -1,546 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -NODE = 'cluster1-01' - -COUNTERS_T1 = [ - { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'avg_processor_busy': '29078861388', - 'instance-name': 'system', - 'timestamp': '1453573776', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time': '1063283283681', - 'instance-name': 'system', - 'timestamp': '1453573776', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time1': '1063283283681', - 'instance-name': 'system', - 'timestamp': '1453573776', - }, { - 'cp_phase_times:p2a_snap': '714', - 'cp_phase_times:p4_finish': '14897', - 'cp_phase_times:setup': '581', - 'cp_phase_times:p2a_dlog1': '6019', - 'cp_phase_times:p2a_dlog2': '2328', - 'cp_phase_times:p2v_cont': '2479', - 'cp_phase_times:p2v_volinfo': '1138', - 'cp_phase_times:p2v_bm': '3484', - 'cp_phase_times:p2v_fsinfo': '2031', - 'cp_phase_times:p2a_inofile': '356', - 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' - '427,1058,354,3484,5135,1460,1138,2479,356,1373' - ',6019,9,2328,2257,229,493,1275,0,6059,714,530215,' - '21603833,0,0,3286,11075940,22001,14897,36', - 'cp_phase_times:p2v_dlog2': '377', - 'instance-name': 'wafl', - 'cp_phase_times:p3_wait': '0', - 'cp_phase_times:p2a_bm': '6059', - 'cp_phase_times:p1_quota': '498', - 'cp_phase_times:p2v_inofile': '839', - 'cp_phase_times:p2a_refcount': '493', - 'cp_phase_times:p2a_fsinfo': '2257', - 'cp_phase_times:p2a_hyabc': '0', - 'cp_phase_times:p2a_volinfo': '530215', - 'cp_phase_times:pre_p0': '5007', - 'cp_phase_times:p2a_hya': '9', - 'cp_phase_times:p0_snap_del': '1840', - 'cp_phase_times:p2a_ino': '1373', - 'cp_phase_times:p2v_df_scores_sub': '354', - 'cp_phase_times:p2v_ino_pub': '799', - 'cp_phase_times:p2a_ipu_bitmap_grow': '229', - 'cp_phase_times:p2v_refcount': '427', - 'timestamp': '1453573776', - 'cp_phase_times:p2v_dlog1': '0', - 'cp_phase_times:p2_finish': '0', - 'cp_phase_times:p1_clean': '9832', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'cp_phase_times:p3a_volinfo': '11075940', - 'cp_phase_times:p2a_topaa': '1275', - 'cp_phase_times:p2_flush': '21603833', - 'cp_phase_times:p2v_df_scores': '1460', - 'cp_phase_times:ipu_disk_add': '0', - 'cp_phase_times:p2v_snap': '5135', - 'cp_phase_times:p5_finish': '36', - 'cp_phase_times:p2v_ino_pri': '1336', - 'cp_phase_times:p3v_volinfo': '3286', - 'cp_phase_times:p2v_topaa': '1058', - 'cp_phase_times:p3_finish': '22001', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'total_cp_msecs': '33309624', - 'instance-name': 'wafl', - 'timestamp': '1453573776', - }, { - 'domain_busy:kahuna': '2712467226', - 'timestamp': '1453573777', - 'domain_busy:cifs': '434036', - 'domain_busy:raid_exempt': '28', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy:target': '6460782', - 'domain_busy:nwk_exempt': '20', - 'domain_busy:raid': '722094140', - 'domain_busy:storage': '2253156562', - 'instance-name': 'processor0', - 'domain_busy:cluster': '34', - 'domain_busy:wafl_xcleaner': '51275254', - 'domain_busy:wafl_exempt': '1243553699', - 'domain_busy:protocol': '54', - 'domain_busy': '1028851855595,2712467226,2253156562,5688808118,' - '722094140,28,6460782,59,434036,1243553699,51275254,' - '61237441,34,54,11,20,5254181873,13656398235,452215', - 'domain_busy:nwk_legacy': '5254181873', - 'domain_busy:dnscache': '59', - 'domain_busy:exempt': '5688808118', - 'domain_busy:hostos': '13656398235', - 'domain_busy:sm_exempt': '61237441', - 'domain_busy:nwk_exclusive': '11', - 'domain_busy:idle': '1028851855595', - 'domain_busy:ssan_exempt': '452215', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'processor_elapsed_time': '1063283843318', - 'instance-name': 'processor0', - 'timestamp': '1453573777', - }, { - 'domain_busy:kahuna': '1978024846', - 'timestamp': '1453573777', - 'domain_busy:cifs': '318584', - 'domain_busy:raid_exempt': '0', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'domain_busy:target': '3330956', - 'domain_busy:nwk_exempt': '0', - 'domain_busy:raid': '722235930', - 'domain_busy:storage': '1498890708', - 'instance-name': 'processor1', - 'domain_busy:cluster': '0', - 'domain_busy:wafl_xcleaner': '50122685', - 'domain_busy:wafl_exempt': '1265921369', - 'domain_busy:protocol': '0', - 'domain_busy': '1039557880852,1978024846,1498890708,3734060289,' - '722235930,0,3330956,0,318584,1265921369,50122685,' - '36417362,0,0,0,0,2815252976,10274810484,393451', - 'domain_busy:nwk_legacy': '2815252976', - 'domain_busy:dnscache': '0', - 'domain_busy:exempt': '3734060289', - 'domain_busy:hostos': '10274810484', - 'domain_busy:sm_exempt': '36417362', - 'domain_busy:nwk_exclusive': '0', - 'domain_busy:idle': '1039557880852', - 'domain_busy:ssan_exempt': '393451', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'processor_elapsed_time': '1063283843321', - 'instance-name': 'processor1', - 'timestamp': '1453573777', - } -] - -COUNTERS_T2 = [ - { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'avg_processor_busy': '29081228905', - 'instance-name': 'system', - 'timestamp': '1453573834', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time': '1063340792148', - 'instance-name': 'system', - 'timestamp': '1453573834', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time1': '1063340792148', - 'instance-name': 'system', - 'timestamp': '1453573834', - }, { - 'cp_phase_times:p2a_snap': '714', - 'cp_phase_times:p4_finish': '14897', - 'cp_phase_times:setup': '581', - 'cp_phase_times:p2a_dlog1': '6019', - 'cp_phase_times:p2a_dlog2': '2328', - 'cp_phase_times:p2v_cont': '2479', - 'cp_phase_times:p2v_volinfo': '1138', - 'cp_phase_times:p2v_bm': '3484', - 'cp_phase_times:p2v_fsinfo': '2031', - 'cp_phase_times:p2a_inofile': '356', - 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' - '427,1058,354,3484,5135,1460,1138,2479,356,1373,' - '6019,9,2328,2257,229,493,1275,0,6059,714,530215,' - '21604863,0,0,3286,11076392,22001,14897,36', - 'cp_phase_times:p2v_dlog2': '377', - 'instance-name': 'wafl', - 'cp_phase_times:p3_wait': '0', - 'cp_phase_times:p2a_bm': '6059', - 'cp_phase_times:p1_quota': '498', - 'cp_phase_times:p2v_inofile': '839', - 'cp_phase_times:p2a_refcount': '493', - 'cp_phase_times:p2a_fsinfo': '2257', - 'cp_phase_times:p2a_hyabc': '0', - 'cp_phase_times:p2a_volinfo': '530215', - 'cp_phase_times:pre_p0': '5007', - 'cp_phase_times:p2a_hya': '9', - 'cp_phase_times:p0_snap_del': '1840', - 'cp_phase_times:p2a_ino': '1373', - 'cp_phase_times:p2v_df_scores_sub': '354', - 'cp_phase_times:p2v_ino_pub': '799', - 'cp_phase_times:p2a_ipu_bitmap_grow': '229', - 'cp_phase_times:p2v_refcount': '427', - 'timestamp': '1453573834', - 'cp_phase_times:p2v_dlog1': '0', - 'cp_phase_times:p2_finish': '0', - 'cp_phase_times:p1_clean': '9832', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'cp_phase_times:p3a_volinfo': '11076392', - 'cp_phase_times:p2a_topaa': '1275', - 'cp_phase_times:p2_flush': '21604863', - 'cp_phase_times:p2v_df_scores': '1460', - 'cp_phase_times:ipu_disk_add': '0', - 'cp_phase_times:p2v_snap': '5135', - 'cp_phase_times:p5_finish': '36', - 'cp_phase_times:p2v_ino_pri': '1336', - 'cp_phase_times:p3v_volinfo': '3286', - 'cp_phase_times:p2v_topaa': '1058', - 'cp_phase_times:p3_finish': '22001', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'total_cp_msecs': '33311106', - 'instance-name': 'wafl', - 'timestamp': '1453573834', - }, { - 'domain_busy:kahuna': '2712629374', - 'timestamp': '1453573834', - 'domain_busy:cifs': '434036', - 'domain_busy:raid_exempt': '28', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy:target': '6461082', - 'domain_busy:nwk_exempt': '20', - 'domain_busy:raid': '722136824', - 'domain_busy:storage': '2253260824', - 'instance-name': 'processor0', - 'domain_busy:cluster': '34', - 'domain_busy:wafl_xcleaner': '51277506', - 'domain_busy:wafl_exempt': '1243637154', - 'domain_busy:protocol': '54', - 'domain_busy': '1028906640232,2712629374,2253260824,5689093500,' - '722136824,28,6461082,59,434036,1243637154,51277506,' - '61240335,34,54,11,20,5254491236,13657992139,452215', - 'domain_busy:nwk_legacy': '5254491236', - 'domain_busy:dnscache': '59', - 'domain_busy:exempt': '5689093500', - 'domain_busy:hostos': '13657992139', - 'domain_busy:sm_exempt': '61240335', - 'domain_busy:nwk_exclusive': '11', - 'domain_busy:idle': '1028906640232', - 'domain_busy:ssan_exempt': '452215', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'processor_elapsed_time': '1063341351916', - 'instance-name': 'processor0', - 'timestamp': '1453573834', - }, { - 'domain_busy:kahuna': '1978217049', - 'timestamp': '1453573834', - 'domain_busy:cifs': '318584', - 'domain_busy:raid_exempt': '0', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'domain_busy:target': '3331147', - 'domain_busy:nwk_exempt': '0', - 'domain_busy:raid': '722276805', - 'domain_busy:storage': '1498984059', - 'instance-name': 'processor1', - 'domain_busy:cluster': '0', - 'domain_busy:wafl_xcleaner': '50126176', - 'domain_busy:wafl_exempt': '1266039846', - 'domain_busy:protocol': '0', - 'domain_busy': '1039613222253,1978217049,1498984059,3734279672,' - '722276805,0,3331147,0,318584,1266039846,50126176,' - '36419297,0,0,0,0,2815435865,10276068104,393451', - 'domain_busy:nwk_legacy': '2815435865', - 'domain_busy:dnscache': '0', - 'domain_busy:exempt': '3734279672', - 'domain_busy:hostos': '10276068104', - 'domain_busy:sm_exempt': '36419297', - 'domain_busy:nwk_exclusive': '0', - 'domain_busy:idle': '1039613222253', - 'domain_busy:ssan_exempt': '393451', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'processor_elapsed_time': '1063341351919', - 'instance-name': 'processor1', - 'timestamp': '1453573834', - }, -] - -SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system'] -SYSTEM_INSTANCE_NAMES = ['system'] - -SYSTEM_COUNTERS = [ - { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'avg_processor_busy': '27877641199', - 'instance-name': 'system', - 'timestamp': '1453524928', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time': '1014438541279', - 'instance-name': 'system', - 'timestamp': '1453524928', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:system', - 'cpu_elapsed_time1': '1014438541279', - 'instance-name': 'system', - 'timestamp': '1453524928', - }, -] - - -WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl'] -WAFL_INSTANCE_NAMES = ['wafl'] - -WAFL_COUNTERS = [ - { - 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' - '418,1048,344,3344,4867,1397,1101,2380,356,1318,' - '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' - '20542954,0,0,3122,10567367,20696,13982,36', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'instance-name': 'wafl', - 'timestamp': '1453523339', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'total_cp_msecs': '31721222', - 'instance-name': 'wafl', - 'timestamp': '1453523339', - }, -] - -WAFL_CP_PHASE_TIMES_COUNTER_INFO = { - 'labels': [ - 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', - 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', - 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', - 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', - 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', - 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', - 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', - 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', - 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', - 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', - ], - 'name': 'cp_phase_times', -} - -EXPANDED_WAFL_COUNTERS = [ - { - 'cp_phase_times:p2a_snap': '696', - 'cp_phase_times:p4_finish': '13982', - 'cp_phase_times:setup': '563', - 'cp_phase_times:p2a_dlog1': '5954', - 'cp_phase_times:p2a_dlog2': '2236', - 'cp_phase_times:p2v_cont': '2380', - 'cp_phase_times:p2v_volinfo': '1101', - 'cp_phase_times:p2v_bm': '3344', - 'cp_phase_times:p2v_fsinfo': '1937', - 'cp_phase_times:p2a_inofile': '356', - 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' - '418,1048,344,3344,4867,1397,1101,2380,356,1318,' - '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' - '20542954,0,0,3122,10567367,20696,13982,36', - 'cp_phase_times:p2v_dlog2': '359', - 'instance-name': 'wafl', - 'cp_phase_times:p3_wait': '0', - 'cp_phase_times:p2a_bm': '5838', - 'cp_phase_times:p1_quota': '469', - 'cp_phase_times:p2v_inofile': '821', - 'cp_phase_times:p2a_refcount': '476', - 'cp_phase_times:p2a_fsinfo': '2190', - 'cp_phase_times:p2a_hyabc': '0', - 'cp_phase_times:p2a_volinfo': '515588', - 'cp_phase_times:pre_p0': '4844', - 'cp_phase_times:p2a_hya': '9', - 'cp_phase_times:p0_snap_del': '1731', - 'cp_phase_times:p2a_ino': '1318', - 'cp_phase_times:p2v_df_scores_sub': '344', - 'cp_phase_times:p2v_ino_pub': '763', - 'cp_phase_times:p2a_ipu_bitmap_grow': '228', - 'cp_phase_times:p2v_refcount': '418', - 'timestamp': '1453523339', - 'cp_phase_times:p2v_dlog1': '0', - 'cp_phase_times:p2_finish': '0', - 'cp_phase_times:p1_clean': '9676', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'cp_phase_times:p3a_volinfo': '10567367', - 'cp_phase_times:p2a_topaa': '1221', - 'cp_phase_times:p2_flush': '20542954', - 'cp_phase_times:p2v_df_scores': '1397', - 'cp_phase_times:ipu_disk_add': '0', - 'cp_phase_times:p2v_snap': '4867', - 'cp_phase_times:p5_finish': '36', - 'cp_phase_times:p2v_ino_pri': '1282', - 'cp_phase_times:p3v_volinfo': '3122', - 'cp_phase_times:p2v_topaa': '1048', - 'cp_phase_times:p3_finish': '20696', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:wafl', - 'total_cp_msecs': '31721222', - 'instance-name': 'wafl', - 'timestamp': '1453523339', - }, -] - -PROCESSOR_INSTANCE_UUIDS = [ - 'cluster1-01:kernel:processor0', - 'cluster1-01:kernel:processor1', -] -PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1'] - -PROCESSOR_COUNTERS = [ - { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' - '690280568,28,6180773,59,413895,1190100947,48989575,' - '58549809,34,54,11,20,5024141791,13136260754,452215', - 'instance-name': 'processor0', - 'timestamp': '1453524150', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'processor_elapsed_time': '1013660714257', - 'instance-name': 'processor0', - 'timestamp': '1453524150', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' - '691372324,0,3188648,0,305947,1211235777,47954620,' - '34832715,0,0,0,0,2692084482,9834648927,393451', - 'instance-name': 'processor1', - 'timestamp': '1453524150', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'processor_elapsed_time': '1013660714261', - 'instance-name': 'processor1', - 'timestamp': '1453524150', - }, -] - -PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = { - 'labels': [ - 'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt', - 'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner', - 'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt', - 'nwk_legacy', 'hostOS', 'ssan_exempt', - ], - 'name': 'domain_busy', -} - -EXPANDED_PROCESSOR_COUNTERS = [ - { - 'domain_busy:kahuna': '2597164534', - 'timestamp': '1453524150', - 'domain_busy:cifs': '413895', - 'domain_busy:raid_exempt': '28', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy:target': '6180773', - 'domain_busy:nwk_exempt': '20', - 'domain_busy:raid': '690280568', - 'domain_busy:storage': '2155400686', - 'instance-name': 'processor0', - 'domain_busy:cluster': '34', - 'domain_busy:wafl_xcleaner': '48989575', - 'domain_busy:wafl_exempt': '1190100947', - 'domain_busy:protocol': '54', - 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' - '690280568,28,6180773,59,413895,1190100947,48989575,' - '58549809,34,54,11,20,5024141791,13136260754,452215', - 'domain_busy:nwk_legacy': '5024141791', - 'domain_busy:dnscache': '59', - 'domain_busy:exempt': '5443901498', - 'domain_busy:hostos': '13136260754', - 'domain_busy:sm_exempt': '58549809', - 'domain_busy:nwk_exclusive': '11', - 'domain_busy:idle': '980648687811', - 'domain_busy:ssan_exempt': '452215', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'processor_elapsed_time': '1013660714257', - 'instance-name': 'processor0', - 'timestamp': '1453524150', - }, { - 'domain_busy:kahuna': '1891766637', - 'timestamp': '1453524150', - 'domain_busy:cifs': '305947', - 'domain_busy:raid_exempt': '0', - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'domain_busy:target': '3188648', - 'domain_busy:nwk_exempt': '0', - 'domain_busy:raid': '691372324', - 'domain_busy:storage': '1433411516', - 'instance-name': 'processor1', - 'domain_busy:cluster': '0', - 'domain_busy:wafl_xcleaner': '47954620', - 'domain_busy:wafl_exempt': '1211235777', - 'domain_busy:protocol': '0', - 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' - '691372324,0,3188648,0,305947,1211235777,47954620,' - '34832715,0,0,0,0,2692084482,9834648927,393451', - 'domain_busy:nwk_legacy': '2692084482', - 'domain_busy:dnscache': '0', - 'domain_busy:exempt': '3572427934', - 'domain_busy:hostos': '9834648927', - 'domain_busy:sm_exempt': '34832715', - 'domain_busy:nwk_exclusive': '0', - 'domain_busy:idle': '990957980543', - 'domain_busy:ssan_exempt': '393451', - }, { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor1', - 'processor_elapsed_time': '1013660714261', - 'instance-name': 'processor1', - 'timestamp': '1453524150', - }, -] diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py deleted file mode 100644 index 51eefe386..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_7mode.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ - import fakes as fake -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.performance import perf_base - - -@ddt.ddt -class Performance7modeLibraryTestCase(test.TestCase): - - def setUp(self): - super(Performance7modeLibraryTestCase, self).setUp() - - with mock.patch.object(perf_7mode.Performance7modeLibrary, - '_init_counter_info'): - self.zapi_client = mock.Mock() - self.zapi_client.get_system_name.return_value = fake.NODE - self.perf_library = perf_7mode.Performance7modeLibrary( - self.zapi_client) - self.perf_library.system_object_name = 'system' - self.perf_library.avg_processor_busy_base_counter_name = ( - 'cpu_elapsed_time1') - - def test_init_counter_info_not_supported(self): - - self.zapi_client.features.SYSTEM_METRICS = False - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name') - - self.perf_library._init_counter_info() - - self.assertIsNone(self.perf_library.system_object_name) - self.assertIsNone( - self.perf_library.avg_processor_busy_base_counter_name) - self.assertFalse(mock_get_base_counter_name.called) - - def test_init_counter_info_api_error(self): - - self.zapi_client.features.SYSTEM_METRICS = True - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name', - side_effect=netapp_api.NaApiError) - - self.perf_library._init_counter_info() - - self.assertEqual('system', self.perf_library.system_object_name) - self.assertEqual( - 'cpu_elapsed_time1', - self.perf_library.avg_processor_busy_base_counter_name) - mock_get_base_counter_name.assert_called_once_with( - 'system', 'avg_processor_busy') - - def test_init_counter_info_system(self): - - self.zapi_client.features.SYSTEM_METRICS = True - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name', - return_value='cpu_elapsed_time1') - - self.perf_library._init_counter_info() - - self.assertEqual('system', self.perf_library.system_object_name) - self.assertEqual( - 'cpu_elapsed_time1', - self.perf_library.avg_processor_busy_base_counter_name) - mock_get_base_counter_name.assert_called_once_with( - 'system', 'avg_processor_busy') - - def test_update_performance_cache(self): - - self.perf_library.performance_counters = list(range(11, 21)) - - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - return_value=21) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', - return_value=25) - - self.perf_library.update_performance_cache() - - self.assertEqual(list(range(12, 22)), - self.perf_library.performance_counters) - self.assertEqual(25, self.perf_library.utilization) - mock_get_node_utilization_counters.assert_called_once_with() - mock_get_node_utilization.assert_called_once_with(12, 21, fake.NODE) - - def test_update_performance_cache_first_pass(self): - - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - return_value=11) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', return_value=25) - - self.perf_library.update_performance_cache() - - self.assertEqual([11], self.perf_library.performance_counters) - mock_get_node_utilization_counters.assert_called_once_with() - self.assertFalse(mock_get_node_utilization.called) - - def test_update_performance_cache_counters_unavailable(self): - - self.perf_library.performance_counters = list(range(11, 21)) - self.perf_library.utilization = 55.0 - - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - return_value=None) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', return_value=25) - - self.perf_library.update_performance_cache() - - self.assertEqual(list(range(11, 21)), - self.perf_library.performance_counters) - self.assertEqual(55.0, self.perf_library.utilization) - mock_get_node_utilization_counters.assert_called_once_with() - self.assertFalse(mock_get_node_utilization.called) - - def test_update_performance_cache_not_supported(self): - - self.zapi_client.features.SYSTEM_METRICS = False - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters') - - self.perf_library.update_performance_cache() - - self.assertEqual([], self.perf_library.performance_counters) - self.assertEqual(perf_base.DEFAULT_UTILIZATION, - self.perf_library.utilization) - self.assertFalse(mock_get_node_utilization_counters.called) - - def test_get_node_utilization(self): - - self.perf_library.utilization = 47.1 - - result = self.perf_library.get_node_utilization() - - self.assertEqual(47.1, result) - - def test_get_node_utilization_counters(self): - - mock_get_node_utilization_system_counters = self.mock_object( - self.perf_library, '_get_node_utilization_system_counters', - return_value=['A', 'B', 'C']) - mock_get_node_utilization_wafl_counters = self.mock_object( - self.perf_library, '_get_node_utilization_wafl_counters', - return_value=['D', 'E', 'F']) - mock_get_node_utilization_processor_counters = self.mock_object( - self.perf_library, '_get_node_utilization_processor_counters', - return_value=['G', 'H', 'I']) - - result = self.perf_library._get_node_utilization_counters() - - expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] - self.assertEqual(expected, result) - - mock_get_node_utilization_system_counters.assert_called_once_with() - mock_get_node_utilization_wafl_counters.assert_called_once_with() - mock_get_node_utilization_processor_counters.assert_called_once_with() - - def test_get_node_utilization_counters_api_error(self): - - self.mock_object(self.perf_library, - '_get_node_utilization_system_counters', - side_effect=netapp_api.NaApiError) - - result = self.perf_library._get_node_utilization_counters() - - self.assertIsNone(result) - - def test_get_node_utilization_system_counters(self): - - mock_get_performance_instance_names = self.mock_object( - self.zapi_client, 'get_performance_instance_names', - return_value=fake.SYSTEM_INSTANCE_NAMES) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.SYSTEM_COUNTERS) - - result = self.perf_library._get_node_utilization_system_counters() - - self.assertEqual(fake.SYSTEM_COUNTERS, result) - - mock_get_performance_instance_names.assert_called_once_with('system') - mock_get_performance_counters.assert_called_once_with( - 'system', fake.SYSTEM_INSTANCE_NAMES, - ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) - - def test_get_node_utilization_wafl_counters(self): - - mock_get_performance_instance_names = self.mock_object( - self.zapi_client, 'get_performance_instance_names', - return_value=fake.WAFL_INSTANCE_NAMES) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.WAFL_COUNTERS) - mock_get_performance_counter_info = self.mock_object( - self.zapi_client, 'get_performance_counter_info', - return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO) - - result = self.perf_library._get_node_utilization_wafl_counters() - - self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) - - mock_get_performance_instance_names.assert_called_once_with('wafl') - mock_get_performance_counters.assert_called_once_with( - 'wafl', fake.WAFL_INSTANCE_NAMES, - ['total_cp_msecs', 'cp_phase_times']) - mock_get_performance_counter_info.assert_called_once_with( - 'wafl', 'cp_phase_times') - - def test_get_node_utilization_processor_counters(self): - - mock_get_performance_instance_names = self.mock_object( - self.zapi_client, 'get_performance_instance_names', - return_value=fake.PROCESSOR_INSTANCE_NAMES) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.PROCESSOR_COUNTERS) - self.mock_object( - self.zapi_client, 'get_performance_counter_info', - return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO) - - result = self.perf_library._get_node_utilization_processor_counters() - - self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) - - mock_get_performance_instance_names.assert_called_once_with( - 'processor') - mock_get_performance_counters.assert_called_once_with( - 'processor', fake.PROCESSOR_INSTANCE_NAMES, - ['domain_busy', 'processor_elapsed_time']) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py deleted file mode 100644 index 9834964cb..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ - import fakes as fake -from cinder.volume.drivers.netapp.dataontap.performance import perf_base - - -@ddt.ddt -class PerformanceLibraryTestCase(test.TestCase): - - def setUp(self): - super(PerformanceLibraryTestCase, self).setUp() - - with mock.patch.object(perf_base.PerformanceLibrary, - '_init_counter_info'): - self.zapi_client = mock.Mock() - self.perf_library = perf_base.PerformanceLibrary(self.zapi_client) - self.perf_library.system_object_name = 'system' - self.perf_library.avg_processor_busy_base_counter_name = ( - 'cpu_elapsed_time1') - - def test_init(self): - - mock_zapi_client = mock.Mock() - mock_init_counter_info = self.mock_object( - perf_base.PerformanceLibrary, '_init_counter_info') - - library = perf_base.PerformanceLibrary(mock_zapi_client) - - self.assertEqual(mock_zapi_client, library.zapi_client) - mock_init_counter_info.assert_called_once_with() - - def test_init_counter_info(self): - - self.perf_library._init_counter_info() - - self.assertIsNone(self.perf_library.system_object_name) - self.assertIsNone( - self.perf_library.avg_processor_busy_base_counter_name) - - def test_get_node_utilization_kahuna_overutilized(self): - - mock_get_kahuna_utilization = self.mock_object( - self.perf_library, '_get_kahuna_utilization', return_value=61.0) - mock_get_average_cpu_utilization = self.mock_object( - self.perf_library, '_get_average_cpu_utilization', - return_value=25.0) - - result = self.perf_library._get_node_utilization('fake1', - 'fake2', - 'fake_node') - - self.assertAlmostEqual(100.0, result) - mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') - self.assertFalse(mock_get_average_cpu_utilization.called) - - @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0}, - {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000}, - {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0}) - @ddt.unpack - def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time): - - mock_get_kahuna_utilization = self.mock_object( - self.perf_library, '_get_kahuna_utilization', return_value=59.0) - mock_get_average_cpu_utilization = self.mock_object( - self.perf_library, '_get_average_cpu_utilization', - return_value=cpu) - mock_get_total_consistency_point_time = self.mock_object( - self.perf_library, '_get_total_consistency_point_time', - return_value=cp_time) - mock_get_consistency_point_p2_flush_time = self.mock_object( - self.perf_library, '_get_consistency_point_p2_flush_time', - return_value=cp_time) - mock_get_total_time = self.mock_object( - self.perf_library, '_get_total_time', return_value=poll_time) - mock_get_adjusted_consistency_point_time = self.mock_object( - self.perf_library, '_get_adjusted_consistency_point_time') - - result = self.perf_library._get_node_utilization('fake1', - 'fake2', - 'fake_node') - - expected = max(min(100.0, 100.0 * cpu), 0) - self.assertEqual(expected, result) - - mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') - mock_get_average_cpu_utilization.assert_called_once_with('fake1', - 'fake2') - mock_get_total_consistency_point_time.assert_called_once_with('fake1', - 'fake2') - mock_get_consistency_point_p2_flush_time.assert_called_once_with( - 'fake1', 'fake2') - mock_get_total_time.assert_called_once_with('fake1', - 'fake2', - 'total_cp_msecs') - self.assertFalse(mock_get_adjusted_consistency_point_time.called) - - @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80}, - {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80}, - {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100}) - @ddt.unpack - def test_get_node_utilization(self, cpu, adjusted_cp_time, expected): - - mock_get_kahuna_utilization = self.mock_object( - self.perf_library, '_get_kahuna_utilization', return_value=59.0) - mock_get_average_cpu_utilization = self.mock_object( - self.perf_library, '_get_average_cpu_utilization', - return_value=cpu) - mock_get_total_consistency_point_time = self.mock_object( - self.perf_library, '_get_total_consistency_point_time', - return_value=90.0) - mock_get_consistency_point_p2_flush_time = self.mock_object( - self.perf_library, '_get_consistency_point_p2_flush_time', - return_value=50.0) - mock_get_total_time = self.mock_object( - self.perf_library, '_get_total_time', return_value=10000) - mock_get_adjusted_consistency_point_time = self.mock_object( - self.perf_library, '_get_adjusted_consistency_point_time', - return_value=adjusted_cp_time) - - result = self.perf_library._get_node_utilization('fake1', - 'fake2', - 'fake_node') - - self.assertEqual(expected, result) - - mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') - mock_get_average_cpu_utilization.assert_called_once_with('fake1', - 'fake2') - mock_get_total_consistency_point_time.assert_called_once_with('fake1', - 'fake2') - mock_get_consistency_point_p2_flush_time.assert_called_once_with( - 'fake1', 'fake2') - mock_get_total_time.assert_called_once_with('fake1', - 'fake2', - 'total_cp_msecs') - mock_get_adjusted_consistency_point_time.assert_called_once_with( - 90.0, 50.0) - - def test_get_node_utilization_calculation_error(self): - - self.mock_object(self.perf_library, - '_get_kahuna_utilization', - return_value=59.0) - self.mock_object(self.perf_library, - '_get_average_cpu_utilization', - return_value=25.0) - self.mock_object(self.perf_library, - '_get_total_consistency_point_time', - return_value=90.0) - self.mock_object(self.perf_library, - '_get_consistency_point_p2_flush_time', - return_value=50.0) - self.mock_object(self.perf_library, - '_get_total_time', - return_value=10000) - self.mock_object(self.perf_library, - '_get_adjusted_consistency_point_time', - side_effect=ZeroDivisionError) - - result = self.perf_library._get_node_utilization('fake1', - 'fake2', - 'fake_node') - - self.assertEqual(perf_base.DEFAULT_UTILIZATION, result) - - def test_get_kahuna_utilization(self): - - mock_get_performance_counter = self.mock_object( - self.perf_library, - '_get_performance_counter_average_multi_instance', - return_value=[0.2, 0.3]) - - result = self.perf_library._get_kahuna_utilization('fake_t1', - 'fake_t2') - - self.assertAlmostEqual(50.0, result) - mock_get_performance_counter.assert_called_once_with( - 'fake_t1', 'fake_t2', 'domain_busy:kahuna', - 'processor_elapsed_time') - - def test_get_average_cpu_utilization(self): - - mock_get_performance_counter_average = self.mock_object( - self.perf_library, '_get_performance_counter_average', - return_value=0.45) - - result = self.perf_library._get_average_cpu_utilization('fake_t1', - 'fake_t2') - - self.assertAlmostEqual(0.45, result) - mock_get_performance_counter_average.assert_called_once_with( - 'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1') - - def test_get_total_consistency_point_time(self): - - mock_get_performance_counter_delta = self.mock_object( - self.perf_library, '_get_performance_counter_delta', - return_value=500) - - result = self.perf_library._get_total_consistency_point_time( - 'fake_t1', 'fake_t2') - - self.assertEqual(500, result) - mock_get_performance_counter_delta.assert_called_once_with( - 'fake_t1', 'fake_t2', 'total_cp_msecs') - - def test_get_consistency_point_p2_flush_time(self): - - mock_get_performance_counter_delta = self.mock_object( - self.perf_library, '_get_performance_counter_delta', - return_value=500) - - result = self.perf_library._get_consistency_point_p2_flush_time( - 'fake_t1', 'fake_t2') - - self.assertEqual(500, result) - mock_get_performance_counter_delta.assert_called_once_with( - 'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush') - - def test_get_total_time(self): - - mock_find_performance_counter_timestamp = self.mock_object( - self.perf_library, '_find_performance_counter_timestamp', - side_effect=[100, 105]) - - result = self.perf_library._get_total_time('fake_t1', - 'fake_t2', - 'fake_counter') - - self.assertEqual(5000, result) - mock_find_performance_counter_timestamp.assert_has_calls([ - mock.call('fake_t1', 'fake_counter'), - mock.call('fake_t2', 'fake_counter')]) - - def test_get_adjusted_consistency_point_time(self): - - result = self.perf_library._get_adjusted_consistency_point_time( - 500, 200) - - self.assertAlmostEqual(360.0, result) - - def test_get_performance_counter_delta(self): - - result = self.perf_library._get_performance_counter_delta( - fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs') - - self.assertEqual(1482, result) - - def test_get_performance_counter_average(self): - - result = self.perf_library._get_performance_counter_average( - fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', - 'processor_elapsed_time', 'processor0') - - self.assertAlmostEqual(0.00281954360981, result) - - def test_get_performance_counter_average_multi_instance(self): - - result = ( - self.perf_library._get_performance_counter_average_multi_instance( - fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', - 'processor_elapsed_time')) - - expected = [0.002819543609809441, 0.0033421611147606135] - self.assertAlmostEqual(expected, result) - - def test_find_performance_counter_value(self): - - result = self.perf_library._find_performance_counter_value( - fake.COUNTERS_T1, 'domain_busy:kahuna', - instance_name='processor0') - - self.assertEqual('2712467226', result) - - def test_find_performance_counter_value_not_found(self): - - self.assertRaises( - exception.NotFound, - self.perf_library._find_performance_counter_value, - fake.COUNTERS_T1, 'invalid', instance_name='processor0') - - def test_find_performance_counter_timestamp(self): - - result = self.perf_library._find_performance_counter_timestamp( - fake.COUNTERS_T1, 'domain_busy') - - self.assertEqual('1453573777', result) - - def test_find_performance_counter_timestamp_not_found(self): - - self.assertRaises( - exception.NotFound, - self.perf_library._find_performance_counter_timestamp, - fake.COUNTERS_T1, 'invalid', instance_name='processor0') - - def test_expand_performance_array(self): - - counter_info = { - 'labels': ['idle', 'kahuna', 'storage', 'exempt'], - 'name': 'domain_busy', - } - self.zapi_client.get_performance_counter_info = mock.Mock( - return_value=counter_info) - - counter = { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy': '969142314286,2567571412,2131582146,5383861579', - 'instance-name': 'processor0', - 'timestamp': '1453512244', - } - self.perf_library._expand_performance_array('wafl', - 'domain_busy', - counter) - - modified_counter = { - 'node-name': 'cluster1-01', - 'instance-uuid': 'cluster1-01:kernel:processor0', - 'domain_busy': '969142314286,2567571412,2131582146,5383861579', - 'instance-name': 'processor0', - 'timestamp': '1453512244', - 'domain_busy:idle': '969142314286', - 'domain_busy:kahuna': '2567571412', - 'domain_busy:storage': '2131582146', - 'domain_busy:exempt': '5383861579', - } - self.assertEqual(modified_counter, counter) - - def test_get_base_counter_name(self): - - counter_info = { - 'base-counter': 'cpu_elapsed_time', - 'labels': [], - 'name': 'avg_processor_busy', - } - self.zapi_client.get_performance_counter_info = mock.Mock( - return_value=counter_info) - - result = self.perf_library._get_base_counter_name( - 'system:constituent', 'avg_processor_busy') - - self.assertEqual('cpu_elapsed_time', result) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py deleted file mode 100644 index 5fbeabaf1..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py +++ /dev/null @@ -1,466 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ - import fakes as fake -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.performance import perf_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode - - -@ddt.ddt -class PerformanceCmodeLibraryTestCase(test.TestCase): - - def setUp(self): - super(PerformanceCmodeLibraryTestCase, self).setUp() - - with mock.patch.object(perf_cmode.PerformanceCmodeLibrary, - '_init_counter_info'): - self.zapi_client = mock.Mock() - self.perf_library = perf_cmode.PerformanceCmodeLibrary( - self.zapi_client) - self.perf_library.system_object_name = 'system' - self.perf_library.avg_processor_busy_base_counter_name = ( - 'cpu_elapsed_time1') - - self._set_up_fake_pools() - - def _set_up_fake_pools(self): - - self.fake_volumes = { - 'pool1': { - 'netapp_aggregate': 'aggr1', - }, - 'pool2': { - 'netapp_aggregate': 'aggr2', - }, - 'pool3': { - 'netapp_aggregate': 'aggr2', - }, - } - - self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3']) - self.fake_nodes = set(['node1', 'node2']) - self.fake_aggr_node_map = { - 'aggr1': 'node1', - 'aggr2': 'node2', - 'aggr3': 'node2', - } - - def test_init_counter_info_not_supported(self): - - self.zapi_client.features.SYSTEM_METRICS = False - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name') - - self.perf_library._init_counter_info() - - self.assertIsNone(self.perf_library.system_object_name) - self.assertIsNone( - self.perf_library.avg_processor_busy_base_counter_name) - self.assertFalse(mock_get_base_counter_name.called) - - @ddt.data({ - 'system_constituent': False, - 'base_counter': 'cpu_elapsed_time1', - }, { - 'system_constituent': True, - 'base_counter': 'cpu_elapsed_time', - }) - @ddt.unpack - def test_init_counter_info_api_error(self, system_constituent, - base_counter): - - self.zapi_client.features.SYSTEM_METRICS = True - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = ( - system_constituent) - self.mock_object(self.perf_library, - '_get_base_counter_name', - side_effect=netapp_api.NaApiError) - - self.perf_library._init_counter_info() - - self.assertEqual( - base_counter, - self.perf_library.avg_processor_busy_base_counter_name) - - def test_init_counter_info_system(self): - - self.zapi_client.features.SYSTEM_METRICS = True - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name', - return_value='cpu_elapsed_time1') - - self.perf_library._init_counter_info() - - self.assertEqual('system', self.perf_library.system_object_name) - self.assertEqual( - 'cpu_elapsed_time1', - self.perf_library.avg_processor_busy_base_counter_name) - mock_get_base_counter_name.assert_called_once_with( - 'system', 'avg_processor_busy') - - def test_init_counter_info_system_constituent(self): - - self.zapi_client.features.SYSTEM_METRICS = False - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True - mock_get_base_counter_name = self.mock_object( - self.perf_library, '_get_base_counter_name', - return_value='cpu_elapsed_time') - - self.perf_library._init_counter_info() - - self.assertEqual('system:constituent', - self.perf_library.system_object_name) - self.assertEqual( - 'cpu_elapsed_time', - self.perf_library.avg_processor_busy_base_counter_name) - mock_get_base_counter_name.assert_called_once_with( - 'system:constituent', 'avg_processor_busy') - - def test_update_performance_cache(self): - - self.perf_library.performance_counters = { - 'node1': list(range(11, 21)), - 'node2': list(range(21, 31)), - } - mock_get_aggregates_for_pools = self.mock_object( - self.perf_library, '_get_aggregates_for_pools', - return_value=self.fake_aggrs) - mock_get_nodes_for_aggregates = self.mock_object( - self.perf_library, '_get_nodes_for_aggregates', - return_value=(self.fake_nodes, self.fake_aggr_node_map)) - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - side_effect=[21, 31]) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', side_effect=[25, 75]) - - self.perf_library.update_performance_cache(self.fake_volumes) - - expected_performance_counters = { - 'node1': list(range(12, 22)), - 'node2': list(range(22, 32)), - } - self.assertEqual(expected_performance_counters, - self.perf_library.performance_counters) - - expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75} - self.assertEqual(expected_pool_utilization, - self.perf_library.pool_utilization) - - mock_get_aggregates_for_pools.assert_called_once_with( - self.fake_volumes) - mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) - mock_get_node_utilization_counters.assert_has_calls([ - mock.call('node1'), mock.call('node2')]) - mock_get_node_utilization.assert_has_calls([ - mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')]) - - def test_update_performance_cache_first_pass(self): - - mock_get_aggregates_for_pools = self.mock_object( - self.perf_library, '_get_aggregates_for_pools', - return_value=self.fake_aggrs) - mock_get_nodes_for_aggregates = self.mock_object( - self.perf_library, '_get_nodes_for_aggregates', - return_value=(self.fake_nodes, self.fake_aggr_node_map)) - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - side_effect=[11, 21]) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', side_effect=[25, 75]) - - self.perf_library.update_performance_cache(self.fake_volumes) - - expected_performance_counters = {'node1': [11], 'node2': [21]} - self.assertEqual(expected_performance_counters, - self.perf_library.performance_counters) - - expected_pool_utilization = { - 'pool1': perf_base.DEFAULT_UTILIZATION, - 'pool2': perf_base.DEFAULT_UTILIZATION, - 'pool3': perf_base.DEFAULT_UTILIZATION, - } - self.assertEqual(expected_pool_utilization, - self.perf_library.pool_utilization) - - mock_get_aggregates_for_pools.assert_called_once_with( - self.fake_volumes) - mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) - mock_get_node_utilization_counters.assert_has_calls([ - mock.call('node1'), mock.call('node2')]) - self.assertFalse(mock_get_node_utilization.called) - - def test_update_performance_cache_unknown_nodes(self): - - self.perf_library.performance_counters = { - 'node1': range(11, 21), - 'node2': range(21, 31), - } - mock_get_aggregates_for_pools = self.mock_object( - self.perf_library, '_get_aggregates_for_pools', - return_value=self.fake_aggrs) - mock_get_nodes_for_aggregates = self.mock_object( - self.perf_library, '_get_nodes_for_aggregates', - return_value=(set(), {})) - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - side_effect=[11, 21]) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', side_effect=[25, 75]) - - self.perf_library.update_performance_cache(self.fake_volumes) - - expected_performance_counters = { - 'node1': range(11, 21), - 'node2': range(21, 31), - } - self.assertEqual(expected_performance_counters, - self.perf_library.performance_counters) - - expected_pool_utilization = { - 'pool1': perf_base.DEFAULT_UTILIZATION, - 'pool2': perf_base.DEFAULT_UTILIZATION, - 'pool3': perf_base.DEFAULT_UTILIZATION, - } - self.assertEqual(expected_pool_utilization, - self.perf_library.pool_utilization) - - mock_get_aggregates_for_pools.assert_called_once_with( - self.fake_volumes) - mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) - self.assertFalse(mock_get_node_utilization_counters.called) - self.assertFalse(mock_get_node_utilization.called) - - def test_update_performance_cache_counters_unavailable(self): - - self.perf_library.performance_counters = { - 'node1': range(11, 21), - 'node2': range(21, 31), - } - mock_get_aggregates_for_pools = self.mock_object( - self.perf_library, '_get_aggregates_for_pools', - return_value=self.fake_aggrs) - mock_get_nodes_for_aggregates = self.mock_object( - self.perf_library, '_get_nodes_for_aggregates', - return_value=(self.fake_nodes, self.fake_aggr_node_map)) - mock_get_node_utilization_counters = self.mock_object( - self.perf_library, '_get_node_utilization_counters', - side_effect=[None, None]) - mock_get_node_utilization = self.mock_object( - self.perf_library, '_get_node_utilization', side_effect=[25, 75]) - - self.perf_library.update_performance_cache(self.fake_volumes) - - expected_performance_counters = { - 'node1': range(11, 21), - 'node2': range(21, 31), - } - self.assertEqual(expected_performance_counters, - self.perf_library.performance_counters) - - expected_pool_utilization = { - 'pool1': perf_base.DEFAULT_UTILIZATION, - 'pool2': perf_base.DEFAULT_UTILIZATION, - 'pool3': perf_base.DEFAULT_UTILIZATION, - } - self.assertEqual(expected_pool_utilization, - self.perf_library.pool_utilization) - - mock_get_aggregates_for_pools.assert_called_once_with( - self.fake_volumes) - mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) - mock_get_node_utilization_counters.assert_has_calls([ - mock.call('node1'), mock.call('node2')], - any_order=True) - self.assertFalse(mock_get_node_utilization.called) - - def test_update_performance_cache_not_supported(self): - - self.zapi_client.features.SYSTEM_METRICS = False - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False - - mock_get_aggregates_for_pools = self.mock_object( - self.perf_library, '_get_aggregates_for_pools') - - self.perf_library.update_performance_cache(self.fake_volumes) - - expected_performance_counters = {} - self.assertEqual(expected_performance_counters, - self.perf_library.performance_counters) - - expected_pool_utilization = {} - self.assertEqual(expected_pool_utilization, - self.perf_library.pool_utilization) - - self.assertFalse(mock_get_aggregates_for_pools.called) - - @ddt.data({'pool': 'pool1', 'expected': 10.0}, - {'pool': 'pool3', 'expected': perf_base.DEFAULT_UTILIZATION}) - @ddt.unpack - def test_get_node_utilization_for_pool(self, pool, expected): - - self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0} - - result = self.perf_library.get_node_utilization_for_pool(pool) - - self.assertAlmostEqual(expected, result) - - def test__update_for_failover(self): - self.mock_object(self.perf_library, 'update_performance_cache') - mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') - - self.perf_library._update_for_failover(mock_client, self.fake_volumes) - - self.assertEqual(mock_client, self.perf_library.zapi_client) - self.perf_library.update_performance_cache.assert_called_once_with( - self.fake_volumes) - - def test_get_aggregates_for_pools(self): - - result = self.perf_library._get_aggregates_for_pools(self.fake_volumes) - - expected_aggregate_names = set(['aggr1', 'aggr2']) - self.assertEqual(expected_aggregate_names, result) - - def test_get_nodes_for_aggregates(self): - - aggregate_names = ['aggr1', 'aggr2', 'aggr3'] - aggregate_nodes = ['node1', 'node2', 'node2'] - - mock_get_node_for_aggregate = self.mock_object( - self.zapi_client, 'get_node_for_aggregate', - side_effect=aggregate_nodes) - - result = self.perf_library._get_nodes_for_aggregates(aggregate_names) - - self.assertEqual(2, len(result)) - result_node_names, result_aggr_node_map = result - - expected_node_names = set(['node1', 'node2']) - expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes)) - self.assertEqual(expected_node_names, result_node_names) - self.assertEqual(expected_aggr_node_map, result_aggr_node_map) - mock_get_node_for_aggregate.assert_has_calls([ - mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')]) - - def test_get_node_utilization_counters(self): - - mock_get_node_utilization_system_counters = self.mock_object( - self.perf_library, '_get_node_utilization_system_counters', - return_value=['A', 'B', 'C']) - mock_get_node_utilization_wafl_counters = self.mock_object( - self.perf_library, '_get_node_utilization_wafl_counters', - return_value=['D', 'E', 'F']) - mock_get_node_utilization_processor_counters = self.mock_object( - self.perf_library, '_get_node_utilization_processor_counters', - return_value=['G', 'H', 'I']) - - result = self.perf_library._get_node_utilization_counters(fake.NODE) - - expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] - self.assertEqual(expected, result) - - mock_get_node_utilization_system_counters.assert_called_once_with( - fake.NODE) - mock_get_node_utilization_wafl_counters.assert_called_once_with( - fake.NODE) - mock_get_node_utilization_processor_counters.assert_called_once_with( - fake.NODE) - - def test_get_node_utilization_counters_api_error(self): - - self.mock_object(self.perf_library, - '_get_node_utilization_system_counters', - side_effect=netapp_api.NaApiError) - - result = self.perf_library._get_node_utilization_counters(fake.NODE) - - self.assertIsNone(result) - - def test_get_node_utilization_system_counters(self): - - mock_get_performance_instance_uuids = self.mock_object( - self.zapi_client, 'get_performance_instance_uuids', - return_value=fake.SYSTEM_INSTANCE_UUIDS) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.SYSTEM_COUNTERS) - - result = self.perf_library._get_node_utilization_system_counters( - fake.NODE) - - self.assertEqual(fake.SYSTEM_COUNTERS, result) - - mock_get_performance_instance_uuids.assert_called_once_with( - 'system', fake.NODE) - mock_get_performance_counters.assert_called_once_with( - 'system', fake.SYSTEM_INSTANCE_UUIDS, - ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) - - def test_get_node_utilization_wafl_counters(self): - - mock_get_performance_instance_uuids = self.mock_object( - self.zapi_client, 'get_performance_instance_uuids', - return_value=fake.WAFL_INSTANCE_UUIDS) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.WAFL_COUNTERS) - mock_get_performance_counter_info = self.mock_object( - self.zapi_client, 'get_performance_counter_info', - return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO) - - result = self.perf_library._get_node_utilization_wafl_counters( - fake.NODE) - - self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) - - mock_get_performance_instance_uuids.assert_called_once_with( - 'wafl', fake.NODE) - mock_get_performance_counters.assert_called_once_with( - 'wafl', fake.WAFL_INSTANCE_UUIDS, - ['total_cp_msecs', 'cp_phase_times']) - mock_get_performance_counter_info.assert_called_once_with( - 'wafl', 'cp_phase_times') - - def test_get_node_utilization_processor_counters(self): - - mock_get_performance_instance_uuids = self.mock_object( - self.zapi_client, 'get_performance_instance_uuids', - return_value=fake.PROCESSOR_INSTANCE_UUIDS) - mock_get_performance_counters = self.mock_object( - self.zapi_client, 'get_performance_counters', - return_value=fake.PROCESSOR_COUNTERS) - self.mock_object( - self.zapi_client, 'get_performance_counter_info', - return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO) - - result = self.perf_library._get_node_utilization_processor_counters( - fake.NODE) - - self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) - - mock_get_performance_instance_uuids.assert_called_once_with( - 'processor', fake.NODE) - mock_get_performance_counters.assert_called_once_with( - 'processor', fake.PROCESSOR_INSTANCE_UUIDS, - ['domain_busy', 'processor_elapsed_time']) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py deleted file mode 100644 index c1a1f9311..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_7mode.py +++ /dev/null @@ -1,779 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp block storage 7-mode library -""" - - -import ddt -from lxml import etree -import mock - -from oslo_utils import timeutils - -from cinder import exception -from cinder import test -import cinder.tests.unit.volume.drivers.netapp.dataontap.client.fakes \ - as client_fakes -import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake -import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes -from cinder.volume.drivers.netapp.dataontap import block_7mode -from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import utils as na_utils - - -@ddt.ddt -class NetAppBlockStorage7modeLibraryTestCase(test.TestCase): - """Test case for NetApp's 7-Mode iSCSI library.""" - - def setUp(self): - super(NetAppBlockStorage7modeLibraryTestCase, self).setUp() - - kwargs = { - 'configuration': self.get_config_7mode(), - 'host': 'openstack@7modeblock', - } - self.library = block_7mode.NetAppBlockStorage7modeLibrary( - 'driver', 'protocol', **kwargs) - - self.library.zapi_client = mock.Mock() - self.zapi_client = self.library.zapi_client - self.library.perf_library = mock.Mock() - self.library.vfiler = mock.Mock() - # Deprecated option - self.library.configuration.netapp_volume_list = None - - def get_config_7mode(self): - config = na_fakes.create_configuration_7mode() - config.netapp_storage_protocol = 'iscsi' - config.netapp_login = 'admin' - config.netapp_password = 'pass' - config.netapp_server_hostname = '127.0.0.1' - config.netapp_transport_type = 'http' - config.netapp_server_port = '80' - return config - - @mock.patch.object(perf_7mode, 'Performance7modeLibrary', mock.Mock()) - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.MagicMock(return_value=(1, 20))) - @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, - '_get_root_volume_name') - @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary, - '_do_partner_setup') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') - def test_do_setup(self, super_do_setup, mock_do_partner_setup, - mock_get_root_volume_name): - - self.mock_object(client_base.Client, '_init_ssh_client') - mock_get_root_volume_name.return_value = 'vol0' - context = mock.Mock() - - self.library.do_setup(context) - - super_do_setup.assert_called_once_with(context) - mock_do_partner_setup.assert_called_once_with() - mock_get_root_volume_name.assert_called_once_with() - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.MagicMock(return_value=(1, 20))) - def test_do_partner_setup(self): - self.mock_object(client_base.Client, '_init_ssh_client') - self.library.configuration.netapp_partner_backend_name = 'partner' - - self.library._do_partner_setup() - - self.assertIsNotNone(self.library.partner_zapi_client) - - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.MagicMock(return_value=(1, 20))) - def test_do_partner_setup_no_partner(self): - self.mock_object(client_base.Client, '_init_ssh_client') - self.library._do_partner_setup() - - self.assertFalse(hasattr(self.library, 'partner_zapi_client')) - - @mock.patch.object( - block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') - def test_check_for_setup_error(self, super_check_for_setup_error): - self.zapi_client.get_ontapi_version.return_value = (1, 9) - self.mock_object(self.library, '_refresh_volume_info') - self.library.volume_list = ['open1', 'open2'] - mock_add_looping_tasks = self.mock_object( - self.library, '_add_looping_tasks') - - self.library.check_for_setup_error() - - mock_add_looping_tasks.assert_called_once_with() - super_check_for_setup_error.assert_called_once_with() - - def test_check_for_setup_error_no_filtered_pools(self): - self.zapi_client.get_ontapi_version.return_value = (1, 9) - self.mock_object(self.library, '_refresh_volume_info') - self.library.volume_list = [] - - self.assertRaises(exception.NetAppDriverException, - self.library.check_for_setup_error) - - @ddt.data(None, (1, 8)) - def test_check_for_setup_error_unsupported_or_no_version(self, version): - self.zapi_client.get_ontapi_version.return_value = version - self.assertRaises(exception.VolumeBackendAPIException, - self.library.check_for_setup_error) - - def test_handle_ems_logging(self): - - self.library.volume_list = ['vol0', 'vol1', 'vol2'] - self.mock_object( - dot_utils, 'build_ems_log_message_0', - return_value='fake_base_ems_log_message') - self.mock_object( - dot_utils, 'build_ems_log_message_1', - return_value='fake_pool_ems_log_message') - mock_send_ems_log_message = self.mock_object( - self.zapi_client, 'send_ems_log_message') - - self.library._handle_ems_logging() - - mock_send_ems_log_message.assert_has_calls([ - mock.call('fake_base_ems_log_message'), - mock.call('fake_pool_ems_log_message'), - ]) - dot_utils.build_ems_log_message_0.assert_called_once_with( - self.library.driver_name, self.library.app_version, - self.library.driver_mode) - dot_utils.build_ems_log_message_1.assert_called_once_with( - self.library.driver_name, self.library.app_version, None, - self.library.volume_list, []) - - def test__get_volume_model_update(self): - """Driver is not expected to return a model update.""" - self.assertIsNone( - self.library._get_volume_model_update(fake.VOLUME_REF)) - - @ddt.data(None, fake.VFILER) - def test__get_owner(self, vfiler): - self.library.configuration.netapp_server_hostname = 'openstack' - self.library.vfiler = vfiler - expected_owner = 'openstack' - - retval = self.library._get_owner() - - if vfiler: - expected_owner += ':' + vfiler - - self.assertEqual(expected_owner, retval) - - def test_find_mapped_lun_igroup(self): - response = netapp_api.NaElement(etree.XML(""" - - - - %(initiator-group-name)s - %(initiator-group-type)s - 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 - - linux - 0 - false - - false - true - true - - true - - - 21:00:00:24:ff:40:6c:c3 - - - 21:00:00:24:ff:40:6c:c2 - - Centos - - - - 2 - - - """ % fake.IGROUP1)) - initiators = fake.FC_FORMATTED_INITIATORS - self.zapi_client.get_lun_map.return_value = response - - (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', - initiators) - - self.assertEqual(fake.IGROUP1_NAME, igroup) - self.assertEqual('2', lun_id) - - def test_find_mapped_lun_igroup_initiator_mismatch(self): - response = netapp_api.NaElement(etree.XML(""" - - - - openstack-igroup1 - fcp - 1477ee47-0e1f-4b35-a82c-dcca0b76fc44 - - linux - 0 - false - - false - true - true - - true - - - 21:00:00:24:ff:40:6c:c3 - - - 2 - - - """)) - initiators = fake.FC_FORMATTED_INITIATORS - self.zapi_client.get_lun_map.return_value = response - - (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', - initiators) - - self.assertIsNone(igroup) - self.assertIsNone(lun_id) - - def test_find_mapped_lun_igroup_no_igroups(self): - response = netapp_api.NaElement(etree.XML(""" - - - """)) - initiators = fake.FC_FORMATTED_INITIATORS - self.zapi_client.get_lun_map.return_value = response - - (igroup, lun_id) = self.library._find_mapped_lun_igroup('path', - initiators) - - self.assertIsNone(igroup) - self.assertIsNone(lun_id) - - def test_find_mapped_lun_igroup_raises(self): - self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError - initiators = fake.FC_FORMATTED_INITIATORS - self.assertRaises(netapp_api.NaApiError, - self.library._find_mapped_lun_igroup, - 'path', - initiators) - - def test_has_luns_mapped_to_initiators_local_map(self): - initiator_list = fake.FC_FORMATTED_INITIATORS - self.zapi_client.has_luns_mapped_to_initiators.return_value = True - self.library.partner_zapi_client = mock.Mock() - - result = self.library._has_luns_mapped_to_initiators(initiator_list) - - self.assertTrue(result) - self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( - initiator_list) - self.assertEqual(0, self.library.partner_zapi_client. - has_luns_mapped_to_initiators.call_count) - - def test_has_luns_mapped_to_initiators_partner_map(self): - initiator_list = fake.FC_FORMATTED_INITIATORS - self.zapi_client.has_luns_mapped_to_initiators.return_value = False - self.library.partner_zapi_client = mock.Mock() - self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ - return_value = True - - result = self.library._has_luns_mapped_to_initiators(initiator_list) - - self.assertTrue(result) - self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( - initiator_list) - self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ - assert_called_with(initiator_list) - - def test_has_luns_mapped_to_initiators_no_maps(self): - initiator_list = fake.FC_FORMATTED_INITIATORS - self.zapi_client.has_luns_mapped_to_initiators.return_value = False - self.library.partner_zapi_client = mock.Mock() - self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ - return_value = False - - result = self.library._has_luns_mapped_to_initiators(initiator_list) - - self.assertFalse(result) - self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( - initiator_list) - self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ - assert_called_with(initiator_list) - - def test_has_luns_mapped_to_initiators_no_partner(self): - initiator_list = fake.FC_FORMATTED_INITIATORS - self.zapi_client.has_luns_mapped_to_initiators.return_value = False - self.library.partner_zapi_client = mock.Mock() - self.library.partner_zapi_client.has_luns_mapped_to_initiators.\ - return_value = True - - result = self.library._has_luns_mapped_to_initiators( - initiator_list, include_partner=False) - - self.assertFalse(result) - self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( - initiator_list) - self.assertEqual(0, self.library.partner_zapi_client. - has_luns_mapped_to_initiators.call_count) - - @ddt.data(True, False) - def test_clone_lun_zero_block_count(self, is_snapshot): - """Test for when clone lun is not passed a block count.""" - self.library._get_lun_attr = mock.Mock(return_value={ - 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) - self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', - is_snapshot=is_snapshot) - - self.library.zapi_client.clone_lun.assert_called_once_with( - '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', - 'newFakeLUN', 'false', block_count=0, dest_block=0, - source_snapshot=None, src_block=0) - - def test_clone_lun_blocks(self): - """Test for when clone lun is passed block information.""" - block_count = 10 - src_block = 10 - dest_block = 30 - self.library._get_lun_attr = mock.Mock(return_value={ - 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) - self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', - block_count=block_count, src_block=src_block, - dest_block=dest_block) - - self.library.zapi_client.clone_lun.assert_called_once_with( - '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', - 'newFakeLUN', 'false', block_count=block_count, - dest_block=dest_block, src_block=src_block, - source_snapshot=None) - - def test_clone_lun_no_space_reservation(self): - """Test for when space_reservation is not passed.""" - self.library._get_lun_attr = mock.Mock(return_value={ - 'Volume': 'fakeLUN', 'Path': '/vol/fake/fakeLUN'}) - self.library.lun_space_reservation = 'false' - self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [fake.FAKE_LUN] - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN') - - self.library.zapi_client.clone_lun.assert_called_once_with( - '/vol/fake/fakeLUN', '/vol/fake/newFakeLUN', 'fakeLUN', - 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, - source_snapshot=None) - - def test_clone_lun_qos_supplied(self): - """Test for qos supplied in clone lun invocation.""" - self.assertRaises(exception.VolumeDriverException, - self.library._clone_lun, - 'fakeLUN', - 'newFakeLUN', - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - - def test_get_fc_target_wwpns(self): - ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0], - fake.FC_FORMATTED_TARGET_WWPNS[1]] - ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2], - fake.FC_FORMATTED_TARGET_WWPNS[3]] - self.zapi_client.get_fc_target_wwpns.return_value = ports1 - self.library.partner_zapi_client = mock.Mock() - self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \ - ports2 - - result = self.library._get_fc_target_wwpns() - - self.assertSetEqual(set(fake.FC_FORMATTED_TARGET_WWPNS), set(result)) - - def test_get_fc_target_wwpns_no_partner(self): - ports1 = [fake.FC_FORMATTED_TARGET_WWPNS[0], - fake.FC_FORMATTED_TARGET_WWPNS[1]] - ports2 = [fake.FC_FORMATTED_TARGET_WWPNS[2], - fake.FC_FORMATTED_TARGET_WWPNS[3]] - self.zapi_client.get_fc_target_wwpns.return_value = ports1 - self.library.partner_zapi_client = mock.Mock() - self.library.partner_zapi_client.get_fc_target_wwpns.return_value = \ - ports2 - - result = self.library._get_fc_target_wwpns(include_partner=False) - - self.assertSetEqual(set(ports1), set(result)) - - def test_create_lun(self): - self.library.vol_refresh_voluntary = False - - self.library._create_lun(fake.VOLUME_ID, fake.LUN_ID, - fake.LUN_SIZE, fake.LUN_METADATA) - - self.library.zapi_client.create_lun.assert_called_once_with( - fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, - None) - self.assertTrue(self.library.vol_refresh_voluntary) - - def test_create_lun_with_qos_policy_group(self): - self.assertRaises(exception.VolumeDriverException, - self.library._create_lun, fake.VOLUME_ID, - fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - - def test_check_volume_type_for_lun_legacy_qos_not_supported(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.library._check_volume_type_for_lun, - na_fakes.VOLUME, {}, {}, na_fakes.LEGACY_EXTRA_SPECS) - - self.assertEqual(0, mock_get_volume_type.call_count) - - def test_check_volume_type_for_lun_no_volume_type(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = None - mock_get_backend_spec = self.mock_object( - na_utils, 'get_backend_qos_spec_from_volume_type') - - self.library._check_volume_type_for_lun(na_fakes.VOLUME, {}, {}, None) - - self.assertEqual(0, mock_get_backend_spec.call_count) - - def test_check_volume_type_for_lun_qos_spec_not_supported(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = na_fakes.VOLUME_TYPE - mock_get_backend_spec = self.mock_object( - na_utils, 'get_backend_qos_spec_from_volume_type') - mock_get_backend_spec.return_value = na_fakes.QOS_SPEC - - self.assertRaises(exception.ManageExistingVolumeTypeMismatch, - self.library._check_volume_type_for_lun, - na_fakes.VOLUME, {}, {}, na_fakes.EXTRA_SPECS) - - def test_get_preferred_target_from_list(self): - - result = self.library._get_preferred_target_from_list( - fake.ISCSI_TARGET_DETAILS_LIST) - - self.assertEqual(fake.ISCSI_TARGET_DETAILS_LIST[0], result) - - def test_mark_qos_policy_group_for_deletion(self): - result = self.library._mark_qos_policy_group_for_deletion( - fake.QOS_POLICY_GROUP_INFO) - - self.assertIsNone(result) - - def test_setup_qos_for_volume(self): - result = self.library._setup_qos_for_volume(fake.VOLUME, - fake.EXTRA_SPECS) - - self.assertIsNone(result) - - def test_manage_existing_lun_same_name(self): - mock_lun = block_base.NetAppLun('handle', 'name', '1', - {'Path': '/vol/FAKE_CMODE_VOL1/name'}) - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=mock_lun) - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.library._check_volume_type_for_lun = mock.Mock() - self.library._add_lun_to_table = mock.Mock() - self.zapi_client.move_lun = mock.Mock() - - self.library.manage_existing({'name': 'name'}, {'ref': 'ref'}) - - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - {'ref': 'ref'}) - self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) - self.assertEqual(1, self.library._add_lun_to_table.call_count) - self.assertEqual(0, self.zapi_client.move_lun.call_count) - - def test_manage_existing_lun_new_path(self): - mock_lun = block_base.NetAppLun( - 'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=mock_lun) - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.library._check_volume_type_for_lun = mock.Mock() - self.library._add_lun_to_table = mock.Mock() - self.zapi_client.move_lun = mock.Mock() - - self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'}) - - self.assertEqual( - 2, self.library._get_existing_vol_with_manage_ref.call_count) - self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) - self.assertEqual(1, self.library._add_lun_to_table.call_count) - self.zapi_client.move_lun.assert_called_once_with( - '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') - - def test_get_pool_stats_no_volumes(self): - - self.library.vols = [] - - result = self.library._get_pool_stats() - - self.assertListEqual([], result) - - @ddt.data({'netapp_lun_space_reservation': 'enabled'}, - {'netapp_lun_space_reservation': 'disabled'}) - @ddt.unpack - def test_get_pool_stats(self, netapp_lun_space_reservation): - - self.library.volume_list = ['vol0', 'vol1', 'vol2'] - self.library.root_volume_name = 'vol0' - self.library.reserved_percentage = 5 - self.library.max_over_subscription_ratio = 10.0 - self.library.configuration.netapp_lun_space_reservation = ( - netapp_lun_space_reservation) - self.library.vols = netapp_api.NaElement( - client_fakes.VOLUME_LIST_INFO_RESPONSE).get_child_by_name( - 'volumes').get_children() - self.library.perf_library.get_node_utilization = ( - mock.Mock(return_value=30.0)) - - thick = netapp_lun_space_reservation == 'enabled' - - result = self.library._get_pool_stats(filter_function='filter', - goodness_function='goodness') - - expected = [{ - 'pool_name': 'vol1', - 'consistencygroup_support': True, - 'QoS_support': False, - 'thin_provisioning_support': not thick, - 'thick_provisioning_support': thick, - 'provisioned_capacity_gb': 2.94, - 'free_capacity_gb': 1339.27, - 'total_capacity_gb': 1342.21, - 'reserved_percentage': 5, - 'max_over_subscription_ratio': 10.0, - 'multiattach': False, - 'utilization': 30.0, - 'filter_function': 'filter', - 'goodness_function': 'goodness', - }] - - self.assertEqual(expected, result) - - def test_get_filtered_pools_invalid_conf(self): - """Verify an exception is raised if the regex pattern is invalid.""" - self.library.configuration.netapp_pool_name_search_pattern = '(.+' - - self.assertRaises(exception.InvalidConfigurationValue, - self.library._get_filtered_pools) - - @ddt.data('.*?3$|mix.+', '(.+?[0-9]+) ', '^.+3$', '^[a-z].*?[^4]$') - def test_get_filtered_pools_match_select_pools(self, patterns): - self.library.vols = fake.FAKE_7MODE_VOLUME['all'] - self.library.configuration.netapp_pool_name_search_pattern = patterns - - filtered_pools = self.library._get_filtered_pools() - - self.assertEqual( - fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'), - filtered_pools[0] - ) - self.assertEqual( - fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'), - filtered_pools[1] - ) - - @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed3, open1234', '.+') - def test_get_filtered_pools_match_all_pools(self, patterns): - self.library.vols = fake.FAKE_7MODE_VOLUME['all'] - self.library.configuration.netapp_pool_name_search_pattern = patterns - - filtered_pools = self.library._get_filtered_pools() - - self.assertEqual( - fake.FAKE_7MODE_VOLUME['all'][0].get_child_content('name'), - filtered_pools[0] - ) - self.assertEqual( - fake.FAKE_7MODE_VOLUME['all'][1].get_child_content('name'), - filtered_pools[1] - ) - self.assertEqual( - fake.FAKE_7MODE_VOLUME['all'][2].get_child_content('name'), - filtered_pools[2] - ) - - @ddt.data('abc|stackopen|openstack|abc.*', 'abc', - 'stackopen, openstack, open', '^$') - def test_get_filtered_pools_non_matching_patterns(self, patterns): - - self.library.vols = fake.FAKE_7MODE_VOLUME['all'] - self.library.configuration.netapp_pool_name_search_pattern = patterns - - filtered_pools = self.library._get_filtered_pools() - - self.assertListEqual([], filtered_pools) - - def test_get_pool_stats_no_ssc_vols(self): - - self.library.vols = {} - - pools = self.library._get_pool_stats() - - self.assertListEqual([], pools) - - def test_get_pool_stats_with_filtered_pools(self): - - self.library.vols = fake.FAKE_7MODE_VOL1 - self.library.volume_list = [ - fake.FAKE_7MODE_VOL1[0].get_child_content('name') - ] - self.library.root_volume_name = '' - self.library.perf_library.get_node_utilization = ( - mock.Mock(return_value=30.0)) - - pools = self.library._get_pool_stats(filter_function='filter', - goodness_function='goodness') - - self.assertListEqual(fake.FAKE_7MODE_POOLS, pools) - - def test_get_pool_stats_no_filtered_pools(self): - - self.library.vols = fake.FAKE_7MODE_VOL1 - self.library.volume_list = ['open1', 'open2'] - self.library.root_volume_name = '' - - pools = self.library._get_pool_stats() - - self.assertListEqual([], pools) - - @ddt.data((None, False, False), - (30, True, False), - (30, False, True)) - @ddt.unpack - def test__refresh_volume_info_already_running(self, - vol_refresh_time, - vol_refresh_voluntary, - is_newer): - mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') - self.library.vol_refresh_time = vol_refresh_time - self.library.vol_refresh_voluntary = vol_refresh_voluntary - self.library.vol_refresh_interval = 30 - self.mock_object(timeutils, 'is_newer_than', return_value=is_newer) - self.mock_object(na_utils, 'set_safe_attr', return_value=False) - - retval = self.library._refresh_volume_info() - - self.assertIsNone(retval) - # Assert no values are unset by the method - self.assertEqual(vol_refresh_voluntary, - self.library.vol_refresh_voluntary) - self.assertEqual(vol_refresh_time, self.library.vol_refresh_time) - if timeutils.is_newer_than.called: - timeutils.is_newer_than.assert_called_once_with( - vol_refresh_time, self.library.vol_refresh_interval) - na_utils.set_safe_attr.assert_has_calls([ - mock.call(self.library, 'vol_refresh_running', True), - mock.call(self.library, 'vol_refresh_running', False)]) - self.assertEqual(1, mock_warning_log.call_count) - - def test__refresh_volume_info(self): - mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') - self.library.vol_refresh_time = None - self.library.vol_refresh_voluntary = True - self.mock_object(timeutils, 'is_newer_than') - self.mock_object(self.library.zapi_client, 'get_filer_volumes') - self.mock_object(self.library, '_get_filtered_pools', - return_value=['vol1', 'vol2']) - self.mock_object(na_utils, 'set_safe_attr', return_value=True) - - retval = self.library._refresh_volume_info() - - self.assertIsNone(retval) - self.assertEqual(False, self.library.vol_refresh_voluntary) - self.assertEqual(['vol1', 'vol2'], self.library.volume_list) - self.assertIsNotNone(self.library.vol_refresh_time) - na_utils.set_safe_attr.assert_has_calls([ - mock.call(self.library, 'vol_refresh_running', True), - mock.call(self.library, 'vol_refresh_running', False)]) - self.assertFalse(mock_warning_log.called) - - def test__refresh_volume_info_exception(self): - mock_warning_log = self.mock_object(block_7mode.LOG, 'warning') - self.library.vol_refresh_time = None - self.library.vol_refresh_voluntary = True - self.mock_object(timeutils, 'is_newer_than') - self.mock_object(na_utils, 'set_safe_attr', return_value=True) - self.mock_object(self.library.zapi_client, - 'get_filer_volumes', - side_effect=exception.NetAppDriverException) - self.mock_object(self.library, '_get_filtered_pools') - - retval = self.library._refresh_volume_info() - - self.assertIsNone(retval) - self.assertFalse(self.library._get_filtered_pools.called) - self.assertEqual(1, mock_warning_log.call_count) - - def test_delete_volume(self): - self.library.vol_refresh_voluntary = False - mock_super_delete_volume = self.mock_object( - block_base.NetAppBlockStorageLibrary, 'delete_volume') - - self.library.delete_volume(fake.VOLUME) - - mock_super_delete_volume.assert_called_once_with(fake.VOLUME) - self.assertTrue(self.library.vol_refresh_voluntary) - - def test_delete_snapshot(self): - self.library.vol_refresh_voluntary = False - mock_super_delete_snapshot = self.mock_object( - block_base.NetAppBlockStorageLibrary, 'delete_snapshot') - - self.library.delete_snapshot(fake.SNAPSHOT) - - mock_super_delete_snapshot.assert_called_once_with(fake.SNAPSHOT) - self.assertTrue(self.library.vol_refresh_voluntary) - - def test_add_looping_tasks(self): - mock_super_add_looping_tasks = self.mock_object( - block_base.NetAppBlockStorageLibrary, '_add_looping_tasks') - - self.library._add_looping_tasks() - - mock_super_add_looping_tasks.assert_called_once_with() - - def test_get_backing_flexvol_names(self): - self.library.volume_list = ['vol0', 'vol1', 'vol2'] - - result = self.library._get_backing_flexvol_names() - - self.assertEqual('vol2', result[2]) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py deleted file mode 100644 index 7e9d0d4e9..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py +++ /dev/null @@ -1,1622 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. -# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. -# Copyright (c) 2016 Chuck Fouts. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp block storage library -""" - -import copy -import uuid - -import ddt -import mock -from oslo_log import versionutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes -from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils - - -@ddt.ddt -class NetAppBlockStorageLibraryTestCase(test.TestCase): - - def setUp(self): - super(NetAppBlockStorageLibraryTestCase, self).setUp() - - kwargs = { - 'configuration': self.get_config_base(), - 'host': 'openstack@netappblock', - } - self.library = block_base.NetAppBlockStorageLibrary( - 'driver', 'protocol', **kwargs) - self.library.zapi_client = mock.Mock() - self.zapi_client = self.library.zapi_client - self.mock_request = mock.Mock() - self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) - - def get_config_base(self): - return na_fakes.create_configuration() - - @mock.patch.object(versionutils, 'report_deprecated_feature') - def test_get_reserved_percentage_default_multipler(self, mock_report): - - default = 1.2 - reserved_percentage = 20.0 - self.library.configuration.netapp_size_multiplier = default - self.library.configuration.reserved_percentage = reserved_percentage - - result = self.library._get_reserved_percentage() - - self.assertEqual(reserved_percentage, result) - self.assertFalse(mock_report.called) - - @mock.patch.object(versionutils, 'report_deprecated_feature') - def test_get_reserved_percentage(self, mock_report): - - multiplier = 2.0 - self.library.configuration.netapp_size_multiplier = multiplier - - result = self.library._get_reserved_percentage() - - reserved_ratio = round(1 - (1 / multiplier), 2) - reserved_percentage = 100 * int(reserved_ratio) - - self.assertEqual(reserved_percentage, result) - msg = ('The "netapp_size_multiplier" configuration option is ' - 'deprecated and will be removed in the Mitaka release. ' - 'Please set "reserved_percentage = %d" instead.' % - result) - mock_report.assert_called_once_with(block_base.LOG, msg) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr', - mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'})) - def test_get_pool(self): - pool = self.library.get_pool({'name': 'volume-fake-uuid'}) - self.assertEqual('FAKE_CMODE_VOL1', pool) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr', - mock.Mock(return_value=None)) - def test_get_pool_no_metadata(self): - pool = self.library.get_pool({'name': 'volume-fake-uuid'}) - self.assertIsNone(pool) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr', - mock.Mock(return_value=dict())) - def test_get_pool_volume_unknown(self): - pool = self.library.get_pool({'name': 'volume-fake-uuid'}) - self.assertIsNone(pool) - - def test_create_volume(self): - volume_size_in_bytes = int(fake.SIZE) * units.Gi - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.mock_object(block_base, 'LOG') - self.mock_object(volume_utils, 'extract_host', - return_value=fake.POOL_NAME) - self.mock_object(self.library, '_setup_qos_for_volume', - return_value=None) - self.mock_object(self.library, '_create_lun') - self.mock_object(self.library, '_create_lun_handle') - self.mock_object(self.library, '_add_lun_to_table') - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.mock_object(self.library, '_get_volume_model_update') - - self.library.create_volume(fake.VOLUME) - - self.library._create_lun.assert_called_once_with( - fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, - fake.LUN_METADATA, None) - self.library._get_volume_model_update.assert_called_once_with( - fake.VOLUME) - self.assertEqual( - 0, self.library. _mark_qos_policy_group_for_deletion.call_count) - self.assertEqual(0, block_base.LOG.error.call_count) - - def test_create_volume_no_pool(self): - self.mock_object(volume_utils, 'extract_host', return_value=None) - - self.assertRaises(exception.InvalidHost, self.library.create_volume, - fake.VOLUME) - - def test_create_volume_exception_path(self): - self.mock_object(block_base, 'LOG') - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(self.library, '_setup_qos_for_volume', - return_value=None) - self.mock_object(self.library, '_create_lun', side_effect=Exception) - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library.create_volume, fake.VOLUME) - - self.assertEqual(1, self.library. - _mark_qos_policy_group_for_deletion.call_count) - self.assertEqual(1, block_base.LOG.exception.call_count) - - def test_create_volume_no_pool_provided_by_scheduler(self): - fake_volume = copy.deepcopy(fake.VOLUME) - # Set up fake volume whose 'host' field is missing pool information. - fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) - - self.assertRaises(exception.InvalidHost, self.library.create_volume, - fake_volume) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_or_create_igroup') - def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr): - os = 'linux' - protocol = 'fcp' - self.library.host_type = 'linux' - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} - mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, - 'iscsi') - self.zapi_client.map_lun.return_value = '1' - - lun_id = self.library._map_lun('fake_volume', - fake.FC_FORMATTED_INITIATORS, - protocol, None) - - self.assertEqual('1', lun_id) - mock_get_or_create_igroup.assert_called_once_with( - fake.FC_FORMATTED_INITIATORS, protocol, os) - self.zapi_client.map_lun.assert_called_once_with( - fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_or_create_igroup') - @mock.patch.object(block_base, 'LOG', mock.Mock()) - def test_map_lun_mismatch_host_os( - self, mock_get_or_create_igroup, mock_get_lun_attr): - os = 'windows' - protocol = 'fcp' - self.library.host_type = 'linux' - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} - mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, - 'iscsi') - self.library._map_lun('fake_volume', - fake.FC_FORMATTED_INITIATORS, - protocol, None) - mock_get_or_create_igroup.assert_called_once_with( - fake.FC_FORMATTED_INITIATORS, protocol, - self.library.host_type) - self.zapi_client.map_lun.assert_called_once_with( - fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) - self.assertEqual(1, block_base.LOG.warning.call_count) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_or_create_igroup') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_find_mapped_lun_igroup') - def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup, - mock_get_or_create_igroup, mock_get_lun_attr): - os = 'linux' - protocol = 'fcp' - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} - mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, - 'iscsi') - mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2') - self.zapi_client.map_lun.side_effect = netapp_api.NaApiError - - lun_id = self.library._map_lun( - 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) - - self.assertEqual('2', lun_id) - mock_find_mapped_lun_igroup.assert_called_once_with( - fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_or_create_igroup') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_find_mapped_lun_igroup') - def test_map_lun_api_error(self, mock_find_mapped_lun_igroup, - mock_get_or_create_igroup, mock_get_lun_attr): - os = 'linux' - protocol = 'fcp' - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} - mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, - 'iscsi') - mock_find_mapped_lun_igroup.return_value = (None, None) - self.zapi_client.map_lun.side_effect = netapp_api.NaApiError - - self.assertRaises(netapp_api.NaApiError, self.library._map_lun, - 'fake_volume', fake.FC_FORMATTED_INITIATORS, - protocol, None) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_find_mapped_lun_igroup') - def test_unmap_lun(self, mock_find_mapped_lun_igroup): - mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1) - - self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH, - fake.IGROUP1_NAME) - - def test_find_mapped_lun_igroup(self): - self.assertRaises(NotImplementedError, - self.library._find_mapped_lun_igroup, - fake.LUN_PATH, - fake.FC_FORMATTED_INITIATORS) - - def test_has_luns_mapped_to_initiators(self): - self.zapi_client.has_luns_mapped_to_initiators.return_value = True - self.assertTrue(self.library._has_luns_mapped_to_initiators( - fake.FC_FORMATTED_INITIATORS)) - self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( - fake.FC_FORMATTED_INITIATORS) - - def test_get_or_create_igroup_preexisting(self): - self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1] - self.library._create_igroup_add_initiators = mock.Mock() - igroup_name, host_os, ig_type = self.library._get_or_create_igroup( - fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') - - self.assertEqual(fake.IGROUP1_NAME, igroup_name) - self.assertEqual('linux', host_os) - self.assertEqual('fcp', ig_type) - self.zapi_client.get_igroup_by_initiators.assert_called_once_with( - fake.FC_FORMATTED_INITIATORS) - self.assertEqual( - 0, self.library._create_igroup_add_initiators.call_count) - - @mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1)) - def test_get_or_create_igroup_none_preexisting(self): - """This method also tests _create_igroup_add_initiators.""" - self.zapi_client.get_igroup_by_initiators.return_value = [] - - igroup_name, os, ig_type = self.library._get_or_create_igroup( - fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') - - self.assertEqual('openstack-' + fake.UUID1, igroup_name) - self.zapi_client.create_igroup.assert_called_once_with( - igroup_name, 'fcp', 'linux') - self.assertEqual(len(fake.FC_FORMATTED_INITIATORS), - self.zapi_client.add_igroup_initiator.call_count) - self.assertEqual('linux', os) - self.assertEqual('fcp', ig_type) - - def test_get_fc_target_wwpns(self): - self.assertRaises(NotImplementedError, - self.library._get_fc_target_wwpns) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_build_initiator_target_map') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_map_lun') - def test_initialize_connection_fc(self, mock_map_lun, - mock_build_initiator_target_map): - self.maxDiff = None - mock_map_lun.return_value = '1' - mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, - fake.FC_I_T_MAP, 4) - - target_info = self.library.initialize_connection_fc(fake.FC_VOLUME, - fake.FC_CONNECTOR) - - self.assertDictEqual(target_info, fake.FC_TARGET_INFO) - mock_map_lun.assert_called_once_with( - 'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_build_initiator_target_map') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_map_lun') - def test_initialize_connection_fc_no_wwpns( - self, mock_map_lun, mock_build_initiator_target_map): - - mock_map_lun.return_value = '1' - mock_build_initiator_target_map.return_value = (None, None, 0) - self.assertRaises(exception.VolumeBackendAPIException, - self.library.initialize_connection_fc, - fake.FC_VOLUME, - fake.FC_CONNECTOR) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_has_luns_mapped_to_initiators') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_unmap_lun') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr') - def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun, - mock_has_luns_mapped_to_initiators): - - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} - mock_unmap_lun.return_value = None - mock_has_luns_mapped_to_initiators.return_value = True - - target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, - fake.FC_CONNECTOR) - - self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY) - mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, - fake.FC_FORMATTED_INITIATORS) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_build_initiator_target_map') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_has_luns_mapped_to_initiators') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_unmap_lun') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_lun_attr') - def test_terminate_connection_fc_no_more_luns( - self, mock_get_lun_attr, mock_unmap_lun, - mock_has_luns_mapped_to_initiators, - mock_build_initiator_target_map): - - mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} - mock_unmap_lun.return_value = None - mock_has_luns_mapped_to_initiators.return_value = False - mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, - fake.FC_I_T_MAP, 4) - - target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, - fake.FC_CONNECTOR) - - self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_fc_target_wwpns') - def test_build_initiator_target_map_no_lookup_service( - self, mock_get_fc_target_wwpns): - - self.library.lookup_service = None - mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS - - (target_wwpns, init_targ_map, num_paths) = \ - self.library._build_initiator_target_map(fake.FC_CONNECTOR) - - self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) - self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map) - self.assertEqual(0, num_paths) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_fc_target_wwpns') - def test_build_initiator_target_map_with_lookup_service( - self, mock_get_fc_target_wwpns): - - self.library.lookup_service = mock.Mock() - self.library.lookup_service.get_device_mapping_from_network.\ - return_value = fake.FC_FABRIC_MAP - mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS - - (target_wwpns, init_targ_map, num_paths) = \ - self.library._build_initiator_target_map(fake.FC_CONNECTOR) - - self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) - self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map) - self.assertEqual(4, num_paths) - - @mock.patch.object(na_utils, 'check_flags') - def test_do_setup_san_configured(self, mock_check_flags): - self.library.configuration.netapp_lun_ostype = 'windows' - self.library.configuration.netapp_host_type = 'solaris' - self.library.configuration.netapp_lun_space_reservation = 'disabled' - self.library.do_setup(mock.Mock()) - self.assertTrue(mock_check_flags.called) - self.assertEqual('windows', self.library.lun_ostype) - self.assertEqual('solaris', self.library.host_type) - - @mock.patch.object(na_utils, 'check_flags') - def test_do_setup_san_unconfigured(self, mock_check_flags): - self.library.configuration.netapp_lun_ostype = None - self.library.configuration.netapp_host_type = None - self.library.configuration.netapp_lun_space_reservation = 'enabled' - self.library.do_setup(mock.Mock()) - self.assertTrue(mock_check_flags.called) - self.assertEqual('linux', self.library.lun_ostype) - self.assertEqual('linux', self.library.host_type) - - def test_do_setup_space_reservation_disabled(self): - self.mock_object(na_utils, 'check_flags') - self.library.configuration.netapp_lun_ostype = None - self.library.configuration.netapp_host_type = None - self.library.configuration.netapp_lun_space_reservation = 'disabled' - - self.library.do_setup(mock.Mock()) - - self.assertEqual('false', self.library.lun_space_reservation) - - def test_do_setup_space_reservation_enabled(self): - self.mock_object(na_utils, 'check_flags') - self.library.configuration.netapp_lun_ostype = None - self.library.configuration.netapp_host_type = None - self.library.configuration.netapp_lun_space_reservation = 'enabled' - - self.library.do_setup(mock.Mock()) - - self.assertEqual('true', self.library.lun_space_reservation) - - def test_get_existing_vol_with_manage_ref_no_source_info(self): - - self.assertRaises(exception.ManageExistingInvalidReference, - self.library._get_existing_vol_with_manage_ref, - {}) - - def test_get_existing_vol_manage_not_found(self): - - self.zapi_client.get_lun_by_args.return_value = [] - - self.assertRaises(exception.ManageExistingInvalidReference, - self.library._get_existing_vol_with_manage_ref, - {'source-name': 'lun_path'}) - self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) - - def test_get_existing_vol_manage_lun_by_path(self): - - self.library.vserver = 'fake_vserver' - self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] - mock_lun = block_base.NetAppLun( - 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_extract_lun_info', - return_value=mock_lun) - - existing_ref = {'source-name': 'fake_path'} - lun = self.library._get_existing_vol_with_manage_ref(existing_ref) - - self.zapi_client.get_lun_by_args.assert_called_once_with( - path='fake_path') - self.library._extract_lun_info.assert_called_once_with('lun0') - self.assertEqual('lun0', lun.name) - - def test_get_existing_vol_manage_lun_by_uuid(self): - - self.library.vserver = 'fake_vserver' - self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] - mock_lun = block_base.NetAppLun( - 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_extract_lun_info', - return_value=mock_lun) - - existing_ref = {'source-id': 'fake_uuid'} - lun = self.library._get_existing_vol_with_manage_ref(existing_ref) - - self.zapi_client.get_lun_by_args.assert_called_once_with( - uuid='fake_uuid') - self.library._extract_lun_info.assert_called_once_with('lun0') - self.assertEqual('lun0', lun.name) - - def test_get_existing_vol_manage_lun_invalid_mode(self): - - self.assertRaises(exception.ManageExistingInvalidReference, - self.library._get_existing_vol_with_manage_ref, - {'source-id': 'src_id'}) - - def test_get_existing_vol_manage_lun_invalid_lun(self): - - self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_is_lun_valid_on_storage', - side_effect=[False, True]) - mock_lun0 = block_base.NetAppLun( - 'lun0', 'lun0', '3', {'UUID': 'src_id_0'}) - mock_lun1 = block_base.NetAppLun( - 'lun1', 'lun1', '5', {'UUID': 'src_id_1'}) - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_extract_lun_info', - side_effect=[mock_lun0, mock_lun1]) - - lun = self.library._get_existing_vol_with_manage_ref( - {'source-name': 'lun_path'}) - - self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) - self.library._extract_lun_info.assert_has_calls([ - mock.call('lun0'), - mock.call('lun1'), - ]) - self.assertEqual('lun1', lun.name) - - @mock.patch.object(block_base.NetAppBlockStorageLibrary, - '_get_existing_vol_with_manage_ref', - mock.Mock(return_value=block_base.NetAppLun( - 'handle', 'name', '1073742824', {}))) - def test_manage_existing_get_size(self): - size = self.library.manage_existing_get_size( - {'id': 'vol_id'}, {'ref': 'ref'}) - self.assertEqual(2, size) - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - {'ref': 'ref'}) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_manage_existing_lun_name_matches(self, model_update): - volume = fake_volume.fake_volume_obj(self.ctxt) - existing_ref = {'source-name': 'fake_path'} - mock_lun = block_base.NetAppLun( - volume['name'], volume['name'], '3', - {'UUID': 'fake_uuid', 'Path': 'p'}) - self.mock_object(self.library, '_get_existing_vol_with_manage_ref', - return_value=mock_lun) - - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object(self.library, '_check_volume_type_for_lun', - return_value=True) - self.mock_object(self.library, '_setup_qos_for_volume') - self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', - return_value=None) - self.mock_object(self.library, '_add_lun_to_table') - self.mock_object(self.library, '_get_volume_model_update', - return_value=model_update) - mock_info_log = self.mock_object(block_base.LOG, 'info') - - actual_update = self.library.manage_existing(volume, existing_ref) - - self.assertEqual(model_update, actual_update) - self.assertEqual(2, mock_info_log.call_count) - self.library._add_lun_to_table.assert_called_once_with(mock_lun) - - @ddt.data(None, 'fake_qos_policy_group_name') - def test_manage_existing_rename_lun(self, qos_policy_group_name): - expected_update = ( - {'replication_status': fields.ReplicationStatus.ENABLED}) - volume = fake_volume.fake_volume_obj(self.ctxt) - existing_ref = {'source-name': 'fake_path'} - mock_lun = block_base.NetAppLun( - 'lun0', 'lun0', '3', {'UUID': 'fake_uuid', 'Path': fake.LUN_PATH}) - self.mock_object(self.library, '_get_existing_vol_with_manage_ref', - return_value=mock_lun) - - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object(self.library, '_check_volume_type_for_lun', - return_value=True) - self.mock_object(self.library, '_setup_qos_for_volume') - self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', - return_value=qos_policy_group_name) - self.mock_object(self.library, '_add_lun_to_table') - self.mock_object(self.library, '_get_volume_model_update', - return_value=expected_update) - self.mock_object(self.zapi_client, 'set_lun_qos_policy_group') - mock_info_log = self.mock_object(block_base.LOG, 'info') - - actual_update = self.library.manage_existing(volume, existing_ref) - - expected_new_path = '/vol/vol0/%s' % volume['name'] - self.assertEqual(expected_update, actual_update) - self.assertEqual(1, mock_info_log.call_count) - self.library._add_lun_to_table.assert_called_once_with(mock_lun) - if qos_policy_group_name: - (self.zapi_client.set_lun_qos_policy_group. - assert_called_once_with(expected_new_path, qos_policy_group_name)) - else: - self.assertFalse( - self.zapi_client.set_lun_qos_policy_group.called) - - @mock.patch.object(block_base.LOG, 'info') - def test_unmanage(self, log): - mock_lun = block_base.NetAppLun('handle', 'name', '1', - {'Path': 'p', 'UUID': 'uuid'}) - self.library._get_lun_from_table = mock.Mock(return_value=mock_lun) - self.library.unmanage({'name': 'vol'}) - self.library._get_lun_from_table.assert_called_once_with('vol') - self.assertEqual(1, log.call_count) - - def test_check_vol_type_for_lun(self): - result = self.library._check_volume_type_for_lun( - 'vol', 'lun', 'existing_ref', {}) - self.assertIsNone(result) - - def test_is_lun_valid_on_storage(self): - self.assertTrue(self.library._is_lun_valid_on_storage('lun')) - - def test_initialize_connection_iscsi(self): - target_details_list = fake.ISCSI_TARGET_DETAILS_LIST - volume = fake.ISCSI_VOLUME - connector = fake.ISCSI_CONNECTOR - self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', - return_value=fake.ISCSI_LUN['lun_id']) - self.zapi_client.get_iscsi_target_details.return_value = ( - target_details_list) - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_get_preferred_target_from_list', - return_value=target_details_list[1]) - self.zapi_client.get_iscsi_service_details.return_value = ( - fake.ISCSI_SERVICE_IQN) - self.mock_object(na_utils, - 'get_iscsi_connection_properties', - return_value=fake.ISCSI_CONNECTION_PROPERTIES) - - target_info = self.library.initialize_connection_iscsi(volume, - connector) - - self.assertEqual( - fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_method'], - target_info['data']['auth_method']) - self.assertEqual( - fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_password'], - target_info['data']['auth_password']) - self.assertIn('auth_password', target_info['data']) - - self.assertEqual( - fake.ISCSI_CONNECTION_PROPERTIES['data']['discovery_auth_method'], - target_info['data']['discovery_auth_method']) - self.assertEqual( - fake.ISCSI_CONNECTION_PROPERTIES['data'] - ['discovery_auth_password'], - target_info['data']['discovery_auth_password']) - self.assertIn('auth_password', target_info['data']) - self.assertEqual( - fake.ISCSI_CONNECTION_PROPERTIES['data'] - ['discovery_auth_username'], - target_info['data']['discovery_auth_username']) - - self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info) - block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( - fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], - 'iscsi', None) - self.zapi_client.get_iscsi_target_details.assert_called_once_with() - block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ - .assert_called_once_with( - target_details_list) - self.zapi_client.get_iscsi_service_details.assert_called_once_with() - - def test_initialize_connection_iscsi_no_target_list(self): - volume = fake.ISCSI_VOLUME - connector = fake.ISCSI_CONNECTOR - self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', - return_value=fake.ISCSI_LUN['lun_id']) - self.zapi_client.get_iscsi_target_details.return_value = None - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_get_preferred_target_from_list') - self.mock_object(na_utils, - 'get_iscsi_connection_properties', - return_value=fake.ISCSI_CONNECTION_PROPERTIES) - - self.assertRaises(exception.VolumeBackendAPIException, - self.library.initialize_connection_iscsi, - volume, connector) - - self.assertEqual( - 0, block_base.NetAppBlockStorageLibrary - ._get_preferred_target_from_list.call_count) - self.assertEqual( - 0, self.zapi_client.get_iscsi_service_details.call_count) - self.assertEqual( - 0, na_utils.get_iscsi_connection_properties.call_count) - - def test_initialize_connection_iscsi_no_preferred_target(self): - volume = fake.ISCSI_VOLUME - connector = fake.ISCSI_CONNECTOR - self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', - return_value=fake.ISCSI_LUN['lun_id']) - self.zapi_client.get_iscsi_target_details.return_value = None - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_get_preferred_target_from_list', - return_value=None) - self.mock_object(na_utils, 'get_iscsi_connection_properties') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library.initialize_connection_iscsi, - volume, connector) - - self.assertEqual(0, self.zapi_client - .get_iscsi_service_details.call_count) - self.assertEqual(0, na_utils.get_iscsi_connection_properties - .call_count) - - def test_initialize_connection_iscsi_no_iscsi_service_details(self): - target_details_list = fake.ISCSI_TARGET_DETAILS_LIST - volume = fake.ISCSI_VOLUME - connector = fake.ISCSI_CONNECTOR - self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', - return_value=fake.ISCSI_LUN['lun_id']) - self.zapi_client.get_iscsi_target_details.return_value = ( - target_details_list) - self.mock_object(block_base.NetAppBlockStorageLibrary, - '_get_preferred_target_from_list', - return_value=target_details_list[1]) - self.zapi_client.get_iscsi_service_details.return_value = None - self.mock_object(na_utils, 'get_iscsi_connection_properties') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library.initialize_connection_iscsi, - volume, - connector) - - block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( - fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], - 'iscsi', None) - self.zapi_client.get_iscsi_target_details.assert_called_once_with() - block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ - .assert_called_once_with(target_details_list) - - def test_get_target_details_list(self): - target_details_list = fake.ISCSI_TARGET_DETAILS_LIST - - result = self.library._get_preferred_target_from_list( - target_details_list) - - self.assertEqual(target_details_list[0], result) - - def test_get_preferred_target_from_empty_list(self): - target_details_list = [] - - result = self.library._get_preferred_target_from_list( - target_details_list) - - self.assertIsNone(result) - - def test_get_preferred_target_from_list_with_one_interface_disabled(self): - target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) - target_details_list[0]['interface-enabled'] = 'false' - - result = self.library._get_preferred_target_from_list( - target_details_list) - - self.assertEqual(target_details_list[1], result) - - def test_get_preferred_target_from_list_with_all_interfaces_disabled(self): - target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) - for target in target_details_list: - target['interface-enabled'] = 'false' - - result = self.library._get_preferred_target_from_list( - target_details_list) - - self.assertEqual(target_details_list[0], result) - - def test_get_preferred_target_from_list_with_filter(self): - target_details_list = fake.ISCSI_TARGET_DETAILS_LIST - filter = [target_detail['address'] - for target_detail in target_details_list[1:]] - - result = self.library._get_preferred_target_from_list( - target_details_list, filter) - - self.assertEqual(target_details_list[1], result) - - @mock.patch.object(na_utils, 'check_flags', mock.Mock()) - @mock.patch.object(block_base, 'LOG', mock.Mock()) - def test_setup_error_invalid_lun_os(self): - self.library.configuration.netapp_lun_ostype = 'unknown' - self.library.do_setup(mock.Mock()) - - self.assertRaises(exception.NetAppDriverException, - self.library.check_for_setup_error) - - block_base.LOG.error.assert_called_once_with(mock.ANY) - - @mock.patch.object(na_utils, 'check_flags', mock.Mock()) - @mock.patch.object(block_base, 'LOG', mock.Mock()) - def test_setup_error_invalid_host_type(self): - self.library.configuration.netapp_lun_ostype = 'linux' - self.library.configuration.netapp_host_type = 'future_os' - self.library.do_setup(mock.Mock()) - - self.assertRaises(exception.NetAppDriverException, - self.library.check_for_setup_error) - - block_base.LOG.error.assert_called_once_with(mock.ANY) - - @mock.patch.object(na_utils, 'check_flags', mock.Mock()) - def test_check_for_setup_error_both_config(self): - self.library.configuration.netapp_lun_ostype = 'linux' - self.library.configuration.netapp_host_type = 'linux' - self.library.do_setup(mock.Mock()) - self.zapi_client.get_lun_list.return_value = ['lun1'] - self.library._extract_and_populate_luns = mock.Mock() - mock_looping_start_tasks = self.mock_object( - self.library.loopingcalls, 'start_tasks') - - self.library.check_for_setup_error() - - self.library._extract_and_populate_luns.assert_called_once_with( - ['lun1']) - mock_looping_start_tasks.assert_called_once_with() - - @mock.patch.object(na_utils, 'check_flags', mock.Mock()) - def test_check_for_setup_error_no_os_host(self): - mock_start_tasks = self.mock_object( - self.library.loopingcalls, 'start_tasks') - self.library.configuration.netapp_lun_ostype = None - self.library.configuration.netapp_host_type = None - self.library.do_setup(mock.Mock()) - self.zapi_client.get_lun_list.return_value = ['lun1'] - self.library._extract_and_populate_luns = mock.Mock() - - self.library.check_for_setup_error() - - self.library._extract_and_populate_luns.assert_called_once_with( - ['lun1']) - - mock_start_tasks.assert_called_once_with() - - def test_delete_volume(self): - mock_delete_lun = self.mock_object(self.library, '_delete_lun') - - self.library.delete_volume(fake.VOLUME) - - mock_delete_lun.assert_called_once_with(fake.LUN_NAME) - - def test_delete_lun(self): - mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') - mock_get_lun_attr.return_value = fake.LUN_METADATA - self.library.zapi_client = mock.Mock() - self.library.lun_table = fake.LUN_TABLE - - self.library._delete_lun(fake.LUN_NAME) - - mock_get_lun_attr.assert_called_once_with( - fake.LUN_NAME, 'metadata') - self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) - - def test_delete_lun_no_metadata(self): - self.mock_object(self.library, '_get_lun_attr', return_value=None) - self.library.zapi_client = mock.Mock() - self.mock_object(self.library, 'zapi_client') - - self.library._delete_lun(fake.LUN_NAME) - - self.library._get_lun_attr.assert_called_once_with( - fake.LUN_NAME, 'metadata') - self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count) - self.assertEqual(0, - self.zapi_client. - mark_qos_policy_group_for_deletion.call_count) - - def test_delete_snapshot(self): - mock_delete_lun = self.mock_object(self.library, '_delete_lun') - - self.library.delete_snapshot(fake.SNAPSHOT) - - mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME) - - def test_clone_source_to_destination(self): - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object(self.library, '_setup_qos_for_volume', - return_value=fake.QOS_POLICY_GROUP_INFO) - self.mock_object(self.library, '_clone_lun') - self.mock_object(self.library, '_extend_volume') - self.mock_object(self.library, 'delete_volume') - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.mock_object(self.library, '_get_volume_model_update', - return_value={'key': 'value'}) - self.library.lun_space_reservation = 'false' - - retval = self.library._clone_source_to_destination( - fake.CLONE_SOURCE, fake.CLONE_DESTINATION) - - self.assertEqual({'key': 'value'}, retval) - na_utils.get_volume_extra_specs.assert_called_once_with( - fake.CLONE_DESTINATION) - self.library._setup_qos_for_volume.assert_called_once_with( - fake.CLONE_DESTINATION, fake.EXTRA_SPECS) - self.library._clone_lun.assert_called_once_with( - fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, - space_reserved='false', - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - self.library._extend_volume.assert_called_once_with( - fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, - fake.QOS_POLICY_GROUP_NAME) - self.assertEqual(0, self.library.delete_volume.call_count) - self.assertEqual(0, self.library. - _mark_qos_policy_group_for_deletion.call_count) - - def test_clone_source_to_destination_exception_path(self): - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object(self.library, '_setup_qos_for_volume', - return_value=fake.QOS_POLICY_GROUP_INFO) - self.mock_object(self.library, '_clone_lun') - self.mock_object(self.library, '_extend_volume', side_effect=Exception) - self.mock_object(self.library, 'delete_volume') - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.library.lun_space_reservation = 'true' - - self.assertRaises(exception.VolumeBackendAPIException, - self.library._clone_source_to_destination, - fake.CLONE_SOURCE, fake.CLONE_DESTINATION) - - na_utils.get_volume_extra_specs.assert_called_once_with( - fake.CLONE_DESTINATION) - self.library._setup_qos_for_volume.assert_called_once_with( - fake.CLONE_DESTINATION, fake.EXTRA_SPECS) - self.library._clone_lun.assert_called_once_with( - fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, - space_reserved='true', - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - self.library._extend_volume.assert_called_once_with( - fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, - fake.QOS_POLICY_GROUP_NAME) - self.assertEqual(1, self.library.delete_volume.call_count) - self.assertEqual(1, self.library. - _mark_qos_policy_group_for_deletion.call_count) - - def test_create_lun(self): - self.assertRaises(NotImplementedError, self.library._create_lun, - fake.VOLUME_ID, fake.LUN_ID, fake.SIZE, - fake.LUN_METADATA) - - def test_clone_lun(self): - self.assertRaises(NotImplementedError, self.library._clone_lun, - fake.VOLUME_ID, 'new-' + fake.VOLUME_ID) - - def test_create_snapshot(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, - fake.LUN_SIZE, fake.LUN_METADATA) - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - self.mock_object(self.library, - '_get_lun_from_table', - return_value=fake_lun) - - self.library.create_snapshot(fake.SNAPSHOT) - - mock_clone_lun.assert_called_once_with( - fake_lun.name, fake.SNAPSHOT_NAME, space_reserved='false', - is_snapshot=True) - - def test_create_volume_from_snapshot(self): - mock_do_clone = self.mock_object(self.library, - '_clone_source_to_destination') - source = { - 'name': fake.SNAPSHOT['name'], - 'size': fake.SNAPSHOT['volume_size'] - } - - self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT) - - mock_do_clone.assert_has_calls([ - mock.call(source, fake.VOLUME)]) - - def test_create_cloned_volume(self): - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, - fake.LUN_SIZE, fake.LUN_METADATA) - mock_get_lun_from_table = self.mock_object(self.library, - '_get_lun_from_table') - mock_get_lun_from_table.return_value = fake_lun - mock_do_clone = self.mock_object(self.library, - '_clone_source_to_destination') - source = { - 'name': fake_lun.name, - 'size': fake.VOLUME_REF['size'] - } - - self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF) - - mock_do_clone.assert_has_calls([ - mock.call(source, fake.VOLUME)]) - - def test_extend_volume(self): - - new_size = 100 - volume_copy = copy.copy(fake.VOLUME) - volume_copy['size'] = new_size - - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) - mock_setup_qos_for_volume = self.mock_object( - self.library, '_setup_qos_for_volume', - return_value=fake.QOS_POLICY_GROUP_INFO) - mock_extend_volume = self.mock_object(self.library, '_extend_volume') - - self.library.extend_volume(fake.VOLUME, new_size) - - mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) - mock_setup_qos_for_volume.assert_called_once_with(volume_copy, - fake.EXTRA_SPECS) - mock_extend_volume.assert_called_once_with(fake.VOLUME, - new_size, - fake.QOS_POLICY_GROUP_NAME) - - def test_extend_volume_api_error(self): - - new_size = 100 - volume_copy = copy.copy(fake.VOLUME) - volume_copy['size'] = new_size - - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) - mock_setup_qos_for_volume = self.mock_object( - self.library, '_setup_qos_for_volume', - return_value=fake.QOS_POLICY_GROUP_INFO) - mock_extend_volume = self.mock_object( - self.library, '_extend_volume', side_effect=netapp_api.NaApiError) - - self.assertRaises(netapp_api.NaApiError, - self.library.extend_volume, - fake.VOLUME, - new_size) - - mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) - mock_setup_qos_for_volume.assert_has_calls([ - mock.call(volume_copy, fake.EXTRA_SPECS), - mock.call(fake.VOLUME, fake.EXTRA_SPECS)]) - mock_extend_volume.assert_called_once_with( - fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) - - def test__extend_volume_direct(self): - - current_size = fake.LUN_SIZE - current_size_bytes = current_size * units.Gi - new_size = fake.LUN_SIZE * 2 - new_size_bytes = new_size * units.Gi - max_size = fake.LUN_SIZE * 10 - max_size_bytes = max_size * units.Gi - fake_volume = copy.copy(fake.VOLUME) - fake_volume['size'] = new_size - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - current_size_bytes, - fake.LUN_METADATA) - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} - mock_get_lun_geometry = self.mock_object( - self.library.zapi_client, 'get_lun_geometry', - return_value=fake_lun_geometry) - mock_do_direct_resize = self.mock_object(self.library.zapi_client, - 'do_direct_resize') - mock_do_sub_clone_resize = self.mock_object(self.library, - '_do_sub_clone_resize') - self.library.lun_table = {fake.VOLUME['name']: fake_lun} - - self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') - - mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) - mock_get_lun_geometry.assert_called_once_with( - fake.LUN_METADATA['Path']) - mock_do_direct_resize.assert_called_once_with( - fake.LUN_METADATA['Path'], six.text_type(new_size_bytes)) - self.assertFalse(mock_do_sub_clone_resize.called) - self.assertEqual(six.text_type(new_size_bytes), - self.library.lun_table[fake.VOLUME['name']].size) - - def test__extend_volume_clone(self): - - current_size = fake.LUN_SIZE - current_size_bytes = current_size * units.Gi - new_size = fake.LUN_SIZE * 20 - new_size_bytes = new_size * units.Gi - max_size = fake.LUN_SIZE * 10 - max_size_bytes = max_size * units.Gi - fake_volume = copy.copy(fake.VOLUME) - fake_volume['size'] = new_size - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - current_size_bytes, - fake.LUN_METADATA) - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} - mock_get_lun_geometry = self.mock_object( - self.library.zapi_client, 'get_lun_geometry', - return_value=fake_lun_geometry) - mock_do_direct_resize = self.mock_object(self.library.zapi_client, - 'do_direct_resize') - mock_do_sub_clone_resize = self.mock_object(self.library, - '_do_sub_clone_resize') - self.library.lun_table = {fake.VOLUME['name']: fake_lun} - - self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') - - mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) - mock_get_lun_geometry.assert_called_once_with( - fake.LUN_METADATA['Path']) - self.assertFalse(mock_do_direct_resize.called) - mock_do_sub_clone_resize.assert_called_once_with( - fake.LUN_METADATA['Path'], six.text_type(new_size_bytes), - qos_policy_group_name='fake_qos_policy') - self.assertEqual(six.text_type(new_size_bytes), - self.library.lun_table[fake.VOLUME['name']].size) - - def test__extend_volume_no_change(self): - - current_size = fake.LUN_SIZE - current_size_bytes = current_size * units.Gi - new_size = fake.LUN_SIZE - max_size = fake.LUN_SIZE * 10 - max_size_bytes = max_size * units.Gi - fake_volume = copy.copy(fake.VOLUME) - fake_volume['size'] = new_size - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - current_size_bytes, - fake.LUN_METADATA) - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} - mock_get_lun_geometry = self.mock_object( - self.library.zapi_client, 'get_lun_geometry', - return_value=fake_lun_geometry) - mock_do_direct_resize = self.mock_object(self.library.zapi_client, - 'do_direct_resize') - mock_do_sub_clone_resize = self.mock_object(self.library, - '_do_sub_clone_resize') - self.library.lun_table = {fake_volume['name']: fake_lun} - - self.library._extend_volume(fake_volume, new_size, 'fake_qos_policy') - - mock_get_lun_from_table.assert_called_once_with(fake_volume['name']) - self.assertFalse(mock_get_lun_geometry.called) - self.assertFalse(mock_do_direct_resize.called) - self.assertFalse(mock_do_sub_clone_resize.called) - - def test_do_sub_clone_resize(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - fake.LUN_SIZE, - fake.LUN_METADATA) - new_lun_size = fake.LUN_SIZE * 10 - new_lun_name = 'new-%s' % fake.LUN_NAME - block_count = fake.LUN_SIZE * units.Gi / 512 - - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - mock_get_vol_option = self.mock_object( - self.library, '_get_vol_option', return_value='off') - mock_get_lun_block_count = self.mock_object( - self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object( - self.library.zapi_client, 'create_lun') - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_post_sub_clone_resize = self.mock_object( - self.library, '_post_sub_clone_resize') - mock_destroy_lun = self.mock_object( - self.library.zapi_client, 'destroy_lun') - - self.library._do_sub_clone_resize(fake.LUN_PATH, - new_lun_size, - fake.QOS_POLICY_GROUP_NAME) - - mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) - mock_get_vol_option.assert_called_once_with('vol0', 'compression') - mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) - mock_create_lun.assert_called_once_with( - 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - mock_clone_lun.assert_called_once_with( - fake.LUN_NAME, new_lun_name, block_count=block_count) - mock_post_sub_clone_resize.assert_called_once_with(fake.LUN_PATH) - self.assertFalse(mock_destroy_lun.called) - - def test_do_sub_clone_resize_compression_on(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - fake.LUN_SIZE, - fake.LUN_METADATA) - new_lun_size = fake.LUN_SIZE * 10 - block_count = fake.LUN_SIZE * units.Gi / 512 - - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - mock_get_vol_option = self.mock_object( - self.library, '_get_vol_option', return_value='on') - mock_get_lun_block_count = self.mock_object( - self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object( - self.library.zapi_client, 'create_lun') - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_post_sub_clone_resize = self.mock_object( - self.library, '_post_sub_clone_resize') - mock_destroy_lun = self.mock_object( - self.library.zapi_client, 'destroy_lun') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library._do_sub_clone_resize, - fake.LUN_PATH, - new_lun_size, - fake.QOS_POLICY_GROUP_NAME) - - mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) - mock_get_vol_option.assert_called_once_with('vol0', 'compression') - self.assertFalse(mock_get_lun_block_count.called) - self.assertFalse(mock_create_lun.called) - self.assertFalse(mock_clone_lun.called) - self.assertFalse(mock_post_sub_clone_resize.called) - self.assertFalse(mock_destroy_lun.called) - - def test_do_sub_clone_resize_no_blocks(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - fake.LUN_SIZE, - fake.LUN_METADATA) - new_lun_size = fake.LUN_SIZE * 10 - block_count = 0 - - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - mock_get_vol_option = self.mock_object( - self.library, '_get_vol_option', return_value='off') - mock_get_lun_block_count = self.mock_object( - self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object( - self.library.zapi_client, 'create_lun') - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_post_sub_clone_resize = self.mock_object( - self.library, '_post_sub_clone_resize') - mock_destroy_lun = self.mock_object( - self.library.zapi_client, 'destroy_lun') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library._do_sub_clone_resize, - fake.LUN_PATH, - new_lun_size, - fake.QOS_POLICY_GROUP_NAME) - - mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) - mock_get_vol_option.assert_called_once_with('vol0', 'compression') - mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) - self.assertFalse(mock_create_lun.called) - self.assertFalse(mock_clone_lun.called) - self.assertFalse(mock_post_sub_clone_resize.called) - self.assertFalse(mock_destroy_lun.called) - - def test_do_sub_clone_resize_create_error(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - fake.LUN_SIZE, - fake.LUN_METADATA) - new_lun_size = fake.LUN_SIZE * 10 - new_lun_name = 'new-%s' % fake.LUN_NAME - block_count = fake.LUN_SIZE * units.Gi / 512 - - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - mock_get_vol_option = self.mock_object( - self.library, '_get_vol_option', return_value='off') - mock_get_lun_block_count = self.mock_object( - self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object( - self.library.zapi_client, 'create_lun', - side_effect=netapp_api.NaApiError) - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_post_sub_clone_resize = self.mock_object( - self.library, '_post_sub_clone_resize') - mock_destroy_lun = self.mock_object( - self.library.zapi_client, 'destroy_lun') - - self.assertRaises(netapp_api.NaApiError, - self.library._do_sub_clone_resize, - fake.LUN_PATH, - new_lun_size, - fake.QOS_POLICY_GROUP_NAME) - - mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) - mock_get_vol_option.assert_called_once_with('vol0', 'compression') - mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) - mock_create_lun.assert_called_once_with( - 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - self.assertFalse(mock_clone_lun.called) - self.assertFalse(mock_post_sub_clone_resize.called) - self.assertFalse(mock_destroy_lun.called) - - def test_do_sub_clone_resize_clone_error(self): - - fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, - fake.LUN_ID, - fake.LUN_SIZE, - fake.LUN_METADATA) - new_lun_size = fake.LUN_SIZE * 10 - new_lun_name = 'new-%s' % fake.LUN_NAME - new_lun_path = '/vol/vol0/%s' % new_lun_name - block_count = fake.LUN_SIZE * units.Gi / 512 - - mock_get_lun_from_table = self.mock_object( - self.library, '_get_lun_from_table', return_value=fake_lun) - mock_get_vol_option = self.mock_object( - self.library, '_get_vol_option', return_value='off') - mock_get_lun_block_count = self.mock_object( - self.library, '_get_lun_block_count', return_value=block_count) - mock_create_lun = self.mock_object( - self.library.zapi_client, 'create_lun') - mock_clone_lun = self.mock_object( - self.library, '_clone_lun', side_effect=netapp_api.NaApiError) - mock_post_sub_clone_resize = self.mock_object( - self.library, '_post_sub_clone_resize') - mock_destroy_lun = self.mock_object( - self.library.zapi_client, 'destroy_lun') - - self.assertRaises(netapp_api.NaApiError, - self.library._do_sub_clone_resize, - fake.LUN_PATH, - new_lun_size, - fake.QOS_POLICY_GROUP_NAME) - - mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) - mock_get_vol_option.assert_called_once_with('vol0', 'compression') - mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) - mock_create_lun.assert_called_once_with( - 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, - qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) - mock_clone_lun.assert_called_once_with( - fake.LUN_NAME, new_lun_name, block_count=block_count) - self.assertFalse(mock_post_sub_clone_resize.called) - mock_destroy_lun.assert_called_once_with(new_lun_path) - - def test_configure_chap_generate_username_and_password(self): - """Ensure that a CHAP username and password are generated.""" - initiator_name = fake.ISCSI_CONNECTOR['initiator'] - - username, password = self.library._configure_chap(initiator_name) - - self.assertEqual(na_utils.DEFAULT_CHAP_USER_NAME, username) - self.assertIsNotNone(password) - self.assertEqual(len(password), na_utils.CHAP_SECRET_LENGTH) - - def test_add_chap_properties(self): - """Ensure that CHAP properties are added to the properties dictionary - - """ - properties = {'data': {}} - self.library._add_chap_properties(properties, 'user1', 'pass1') - - data = properties['data'] - self.assertEqual('CHAP', data['auth_method']) - self.assertEqual('user1', data['auth_username']) - self.assertEqual('pass1', data['auth_password']) - self.assertEqual('CHAP', data['discovery_auth_method']) - self.assertEqual('user1', data['discovery_auth_username']) - self.assertEqual('pass1', data['discovery_auth_password']) - - def test_create_cgsnapshot(self): - snapshot = fake.CG_SNAPSHOT - snapshot['volume'] = fake.CG_VOLUME - - mock_extract_host = self.mock_object( - volume_utils, 'extract_host', return_value=fake.POOL_NAME) - - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_busy = self.mock_object( - self.zapi_client, 'wait_for_busy_snapshot') - mock_delete_snapshot = self.mock_object( - self.zapi_client, 'delete_snapshot') - - self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot]) - - mock_extract_host.assert_called_once_with(fake.CG_VOLUME['host'], - level='pool') - self.zapi_client.create_cg_snapshot.assert_called_once_with( - set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID) - mock_clone_lun.assert_called_once_with( - fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME, - source_snapshot=fake.CG_SNAPSHOT_ID) - mock_busy.assert_called_once_with(fake.POOL_NAME, fake.CG_SNAPSHOT_ID) - mock_delete_snapshot.assert_called_once_with( - fake.POOL_NAME, fake.CG_SNAPSHOT_ID) - - def test_create_cgsnapshot_busy_snapshot(self): - snapshot = fake.CG_SNAPSHOT - snapshot['volume'] = fake.CG_VOLUME - - mock_extract_host = self.mock_object( - volume_utils, 'extract_host', - return_value=fake.POOL_NAME) - mock_clone_lun = self.mock_object(self.library, '_clone_lun') - mock_busy = self.mock_object( - self.zapi_client, 'wait_for_busy_snapshot') - mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name']) - mock_delete_snapshot = self.mock_object( - self.zapi_client, 'delete_snapshot') - mock_mark_snapshot_for_deletion = self.mock_object( - self.zapi_client, 'mark_snapshot_for_deletion') - - self.library.create_cgsnapshot(fake.CG_SNAPSHOT, [snapshot]) - - mock_extract_host.assert_called_once_with( - fake.CG_VOLUME['host'], level='pool') - self.zapi_client.create_cg_snapshot.assert_called_once_with( - set([fake.POOL_NAME]), fake.CG_SNAPSHOT_ID) - mock_clone_lun.assert_called_once_with( - fake.CG_VOLUME_NAME, fake.CG_SNAPSHOT_NAME, - source_snapshot=fake.CG_SNAPSHOT_ID) - mock_delete_snapshot.assert_not_called() - mock_mark_snapshot_for_deletion.assert_called_once_with( - fake.POOL_NAME, fake.CG_SNAPSHOT_ID) - - def test_delete_cgsnapshot(self): - - mock_delete_snapshot = self.mock_object( - self.library, '_delete_lun') - - self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT]) - - mock_delete_snapshot.assert_called_once_with(fake.CG_SNAPSHOT['name']) - - def test_delete_cgsnapshot_not_found(self): - self.mock_object(block_base, 'LOG') - self.mock_object(self.library, '_get_lun_attr', return_value=None) - - self.library.delete_cgsnapshot(fake.CG_SNAPSHOT, [fake.CG_SNAPSHOT]) - - self.assertEqual(0, block_base.LOG.error.call_count) - self.assertEqual(1, block_base.LOG.warning.call_count) - self.assertEqual(0, block_base.LOG.info.call_count) - - def test_create_volume_with_cg(self): - volume_size_in_bytes = int(fake.CG_VOLUME_SIZE) * units.Gi - self._create_volume_test_helper() - - self.library.create_volume(fake.CG_VOLUME) - - self.library._create_lun.assert_called_once_with( - fake.POOL_NAME, fake.CG_VOLUME_NAME, volume_size_in_bytes, - fake.CG_LUN_METADATA, None) - self.library._get_volume_model_update.assert_called_once_with( - fake.CG_VOLUME) - self.assertEqual(0, self.library. - _mark_qos_policy_group_for_deletion.call_count) - self.assertEqual(0, block_base.LOG.error.call_count) - - def _create_volume_test_helper(self): - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.mock_object(block_base, 'LOG') - self.mock_object(volume_utils, 'extract_host', - return_value=fake.POOL_NAME) - self.mock_object(self.library, '_setup_qos_for_volume', - return_value=None) - self.mock_object(self.library, '_create_lun') - self.mock_object(self.library, '_create_lun_handle') - self.mock_object(self.library, '_add_lun_to_table') - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.mock_object(self.library, '_get_volume_model_update') - - def test_create_consistency_group(self): - model_update = self.library.create_consistencygroup( - fake.CONSISTENCY_GROUP) - self.assertEqual('available', model_update['status']) - - def test_delete_consistencygroup_volume_delete_failure(self): - self.mock_object(block_base, 'LOG') - self.mock_object(self.library, '_delete_lun', side_effect=Exception) - - model_update, volumes = self.library.delete_consistencygroup( - fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) - - self.assertEqual('deleted', model_update['status']) - self.assertEqual('error_deleting', volumes[0]['status']) - self.assertEqual(1, block_base.LOG.exception.call_count) - - def test_delete_consistencygroup_not_found(self): - self.mock_object(block_base, 'LOG') - self.mock_object(self.library, '_get_lun_attr', return_value=None) - - model_update, volumes = self.library.delete_consistencygroup( - fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) - - self.assertEqual(0, block_base.LOG.error.call_count) - self.assertEqual(1, block_base.LOG.warning.call_count) - self.assertEqual(0, block_base.LOG.info.call_count) - - self.assertEqual('deleted', model_update['status']) - self.assertEqual('deleted', volumes[0]['status']) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_create_consistencygroup_from_src_cg_snapshot(self, - volume_model_update): - mock_clone_source_to_destination = self.mock_object( - self.library, '_clone_source_to_destination', - return_value=volume_model_update) - - actual_return_value = self.library.create_consistencygroup_from_src( - fake.CONSISTENCY_GROUP, [fake.VOLUME], cgsnapshot=fake.CG_SNAPSHOT, - snapshots=[fake.CG_VOLUME_SNAPSHOT]) - - clone_source_to_destination_args = { - 'name': fake.CG_SNAPSHOT['name'], - 'size': fake.CG_SNAPSHOT['volume_size'], - } - mock_clone_source_to_destination.assert_called_once_with( - clone_source_to_destination_args, fake.VOLUME) - if volume_model_update: - volume_model_update['id'] = fake.VOLUME['id'] - expected_return_value = ((None, [volume_model_update]) - if volume_model_update else (None, [])) - self.assertEqual(expected_return_value, actual_return_value) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_create_consistencygroup_from_src_cg(self, volume_model_update): - lun_name = fake.SOURCE_CG_VOLUME['name'] - mock_lun = block_base.NetAppLun( - lun_name, lun_name, '3', {'UUID': 'fake_uuid'}) - self.mock_object(self.library, '_get_lun_from_table', - return_value=mock_lun) - mock_clone_source_to_destination = self.mock_object( - self.library, '_clone_source_to_destination', - return_value=volume_model_update) - - actual_return_value = self.library.create_consistencygroup_from_src( - fake.CONSISTENCY_GROUP, [fake.VOLUME], - source_cg=fake.SOURCE_CONSISTENCY_GROUP, - source_vols=[fake.SOURCE_CG_VOLUME]) - - clone_source_to_destination_args = { - 'name': fake.SOURCE_CG_VOLUME['name'], - 'size': fake.SOURCE_CG_VOLUME['size'], - } - if volume_model_update: - volume_model_update['id'] = fake.VOLUME['id'] - expected_return_value = ((None, [volume_model_update]) - if volume_model_update else (None, [])) - mock_clone_source_to_destination.assert_called_once_with( - clone_source_to_destination_args, fake.VOLUME) - self.assertEqual(expected_return_value, actual_return_value) - - def test_add_looping_tasks(self): - mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') - mock_call_snap_cleanup = self.mock_object( - self.library, '_delete_snapshots_marked_for_deletion') - mock_call_ems_logging = self.mock_object( - self.library, '_handle_ems_logging') - - self.library._add_looping_tasks() - - mock_add_task.assert_has_calls([ - mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE), - mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) - - def test_delete_snapshots_marked_for_deletion(self): - snapshots = [{ - 'name': fake.SNAPSHOT_NAME, - 'volume_name': fake.VOLUME['name'] - }] - mock_get_backing_flexvol_names = self.mock_object( - self.library, '_get_backing_flexvol_names') - mock_get_backing_flexvol_names.return_value = [fake.VOLUME['name']] - mock_get_snapshots_marked = self.mock_object( - self.zapi_client, 'get_snapshots_marked_for_deletion') - mock_get_snapshots_marked.return_value = snapshots - mock_delete_snapshot = self.mock_object( - self.zapi_client, 'delete_snapshot') - - self.library._delete_snapshots_marked_for_deletion() - - mock_get_backing_flexvol_names.assert_called_once_with() - mock_get_snapshots_marked.assert_called_once_with( - [fake.VOLUME['name']]) - mock_delete_snapshot.assert_called_once_with( - fake.VOLUME['name'], fake.SNAPSHOT_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py deleted file mode 100644 index 7db17c3f6..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py +++ /dev/null @@ -1,751 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp block storage C-mode library -""" - -import ddt -import mock - -from cinder import exception -from cinder import test -import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake -from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\ - fake_utils -import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes -from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap import block_cmode -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp.dataontap.utils import data_motion -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import utils as na_utils - - -@ddt.ddt -class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): - """Test case for NetApp's C-Mode iSCSI library.""" - - def setUp(self): - super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp() - - kwargs = { - 'configuration': self.get_config_cmode(), - 'host': 'openstack@cdotblock', - } - self.library = block_cmode.NetAppBlockStorageCmodeLibrary( - 'driver', 'protocol', **kwargs) - - self.library.zapi_client = mock.Mock() - self.zapi_client = self.library.zapi_client - self.library.perf_library = mock.Mock() - self.library.ssc_library = mock.Mock() - self.library.vserver = mock.Mock() - self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME, - fake.SIZE, None) - self.fake_snapshot_lun = block_base.NetAppLun( - fake.SNAPSHOT_LUN_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE, None) - self.mock_object(self.library, 'lun_table') - self.library.lun_table = { - fake.LUN_NAME: self.fake_lun, - fake.SNAPSHOT_NAME: self.fake_snapshot_lun, - } - self.mock_object(block_base.NetAppBlockStorageLibrary, 'delete_volume') - - def get_config_cmode(self): - config = na_fakes.create_configuration_cmode() - config.netapp_storage_protocol = 'iscsi' - config.netapp_login = 'admin' - config.netapp_password = 'pass' - config.netapp_server_hostname = '127.0.0.1' - config.netapp_transport_type = 'https' - config.netapp_server_port = '443' - config.netapp_vserver = 'openstack' - return config - - @mock.patch.object(client_cmode.Client, 'check_for_cluster_credentials', - mock.MagicMock(return_value=False)) - @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) - @mock.patch.object(client_base.Client, 'get_ontapi_version', - mock.MagicMock(return_value=(1, 20))) - @mock.patch.object(na_utils, 'check_flags') - @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') - def test_do_setup(self, super_do_setup, mock_check_flags): - self.zapi_client.check_for_cluster_credentials = mock.MagicMock( - return_value=True) - self.mock_object(client_base.Client, '_init_ssh_client') - self.mock_object( - dot_utils, 'get_backend_configuration', - return_value=self.get_config_cmode()) - context = mock.Mock() - - self.library.do_setup(context) - - super_do_setup.assert_called_once_with(context) - self.assertEqual(1, mock_check_flags.call_count) - - def test_check_for_setup_error(self): - super_check_for_setup_error = self.mock_object( - block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') - mock_check_api_permissions = self.mock_object( - self.library.ssc_library, 'check_api_permissions') - mock_add_looping_tasks = self.mock_object( - self.library, '_add_looping_tasks') - mock_get_pool_map = self.mock_object( - self.library, '_get_flexvol_to_pool_map', - return_value={'fake_map': None}) - mock_add_looping_tasks = self.mock_object( - self.library, '_add_looping_tasks') - - self.library.check_for_setup_error() - - self.assertEqual(1, super_check_for_setup_error.call_count) - mock_check_api_permissions.assert_called_once_with() - self.assertEqual(1, mock_add_looping_tasks.call_count) - mock_get_pool_map.assert_called_once_with() - mock_add_looping_tasks.assert_called_once_with() - - def test_check_for_setup_error_no_filtered_pools(self): - self.mock_object(block_base.NetAppBlockStorageLibrary, - 'check_for_setup_error') - mock_check_api_permissions = self.mock_object( - self.library.ssc_library, 'check_api_permissions') - self.mock_object(self.library, '_add_looping_tasks') - self.mock_object( - self.library, '_get_flexvol_to_pool_map', return_value={}) - - self.assertRaises(exception.NetAppDriverException, - self.library.check_for_setup_error) - - mock_check_api_permissions.assert_called_once_with() - - @ddt.data({'replication_enabled': True, 'failed_over': False}, - {'replication_enabled': True, 'failed_over': True}, - {'replication_enabled': False, 'failed_over': False}) - @ddt.unpack - def test_handle_housekeeping_tasks(self, replication_enabled, failed_over): - ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, - 'ensure_snapmirrors') - self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_utils.SSC.keys()) - self.library.replication_enabled = replication_enabled - self.library.failed_over = failed_over - - self.library._handle_housekeeping_tasks() - - (self.zapi_client.remove_unused_qos_policy_groups. - assert_called_once_with()) - if replication_enabled and not failed_over: - ensure_mirrors.assert_called_once_with( - self.library.configuration, self.library.backend_name, - fake_utils.SSC.keys()) - else: - self.assertFalse(ensure_mirrors.called) - - def test_handle_ems_logging(self): - - volume_list = ['vol0', 'vol1', 'vol2'] - self.mock_object( - self.library.ssc_library, 'get_ssc_flexvol_names', - return_value=volume_list) - self.mock_object( - dot_utils, 'build_ems_log_message_0', - return_value='fake_base_ems_log_message') - self.mock_object( - dot_utils, 'build_ems_log_message_1', - return_value='fake_pool_ems_log_message') - mock_send_ems_log_message = self.mock_object( - self.zapi_client, 'send_ems_log_message') - - self.library._handle_ems_logging() - - mock_send_ems_log_message.assert_has_calls([ - mock.call('fake_base_ems_log_message'), - mock.call('fake_pool_ems_log_message'), - ]) - dot_utils.build_ems_log_message_0.assert_called_once_with( - self.library.driver_name, self.library.app_version, - self.library.driver_mode) - dot_utils.build_ems_log_message_1.assert_called_once_with( - self.library.driver_name, self.library.app_version, - self.library.vserver, volume_list, []) - - def test_find_mapped_lun_igroup(self): - igroups = [fake.IGROUP1] - self.zapi_client.get_igroup_by_initiators.return_value = igroups - - lun_maps = [{'initiator-group': fake.IGROUP1_NAME, - 'lun-id': '1', - 'vserver': fake.VSERVER_NAME}] - self.zapi_client.get_lun_map.return_value = lun_maps - - (igroup, lun_id) = self.library._find_mapped_lun_igroup( - fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - self.assertEqual(fake.IGROUP1_NAME, igroup) - self.assertEqual('1', lun_id) - - def test_find_mapped_lun_igroup_initiator_mismatch(self): - self.zapi_client.get_igroup_by_initiators.return_value = [] - - lun_maps = [{'initiator-group': fake.IGROUP1_NAME, - 'lun-id': '1', - 'vserver': fake.VSERVER_NAME}] - self.zapi_client.get_lun_map.return_value = lun_maps - - (igroup, lun_id) = self.library._find_mapped_lun_igroup( - fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - self.assertIsNone(igroup) - self.assertIsNone(lun_id) - - def test_find_mapped_lun_igroup_name_mismatch(self): - igroups = [{'initiator-group-os-type': 'linux', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'igroup2'}] - self.zapi_client.get_igroup_by_initiators.return_value = igroups - - lun_maps = [{'initiator-group': fake.IGROUP1_NAME, - 'lun-id': '1', - 'vserver': fake.VSERVER_NAME}] - self.zapi_client.get_lun_map.return_value = lun_maps - - (igroup, lun_id) = self.library._find_mapped_lun_igroup( - fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - self.assertIsNone(igroup) - self.assertIsNone(lun_id) - - def test_find_mapped_lun_igroup_no_igroup_prefix(self): - igroups = [{'initiator-group-os-type': 'linux', - 'initiator-group-type': 'fcp', - 'initiator-group-name': 'igroup2'}] - self.zapi_client.get_igroup_by_initiators.return_value = igroups - - lun_maps = [{'initiator-group': 'igroup2', - 'lun-id': '1', - 'vserver': fake.VSERVER_NAME}] - self.zapi_client.get_lun_map.return_value = lun_maps - - (igroup, lun_id) = self.library._find_mapped_lun_igroup( - fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) - - self.assertIsNone(igroup) - self.assertIsNone(lun_id) - - def test_clone_lun_zero_block_count(self): - """Test for when clone lun is not passed a block count.""" - - self.library._get_lun_attr = mock.Mock(return_value={'Volume': - 'fakeLUN'}) - self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') - - self.library.zapi_client.clone_lun.assert_called_once_with( - 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, - dest_block=0, src_block=0, qos_policy_group_name=None, - source_snapshot=None, is_snapshot=False) - - def test_clone_lun_blocks(self): - """Test for when clone lun is passed block information.""" - block_count = 10 - src_block = 10 - dest_block = 30 - - self.library._get_lun_attr = mock.Mock(return_value={'Volume': - 'fakeLUN'}) - self.library.zapi_client = mock.Mock() - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', - block_count=block_count, src_block=src_block, - dest_block=dest_block) - - self.library.zapi_client.clone_lun.assert_called_once_with( - 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', - block_count=block_count, dest_block=dest_block, - src_block=src_block, qos_policy_group_name=None, - source_snapshot=None, is_snapshot=False) - - def test_clone_lun_no_space_reservation(self): - """Test for when space_reservation is not passed.""" - - self.library._get_lun_attr = mock.Mock(return_value={'Volume': - 'fakeLUN'}) - self.library.zapi_client = mock.Mock() - self.library.lun_space_reservation = 'false' - self.library.zapi_client.get_lun_by_args.return_value = [ - mock.Mock(spec=netapp_api.NaElement)] - lun = fake.FAKE_LUN - self.library._get_lun_by_args = mock.Mock(return_value=[lun]) - self.library._add_lun_to_table = mock.Mock() - - self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) - - self.library.zapi_client.clone_lun.assert_called_once_with( - 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, - dest_block=0, src_block=0, qos_policy_group_name=None, - source_snapshot=None, is_snapshot=True) - - def test_get_fc_target_wwpns(self): - ports = [fake.FC_FORMATTED_TARGET_WWPNS[0], - fake.FC_FORMATTED_TARGET_WWPNS[1]] - self.zapi_client.get_fc_target_wwpns.return_value = ports - - result = self.library._get_fc_target_wwpns() - - self.assertSetEqual(set(ports), set(result)) - - def test_create_lun(self): - self.library._create_lun( - fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) - - self.library.zapi_client.create_lun.assert_called_once_with( - fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, - None) - - def test_get_preferred_target_from_list(self): - target_details_list = fake.ISCSI_TARGET_DETAILS_LIST - operational_addresses = [ - target['address'] - for target in target_details_list[2:]] - self.zapi_client.get_operational_lif_addresses = ( - mock.Mock(return_value=operational_addresses)) - - result = self.library._get_preferred_target_from_list( - target_details_list) - - self.assertEqual(target_details_list[2], result) - - @ddt.data([], ['target_1', 'target_2']) - def test_get_pool_stats(self, replication_backends): - - ssc = { - 'vola': { - 'pool_name': 'vola', - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_compression': 'false', - 'netapp_mirrored': 'false', - 'netapp_dedup': 'true', - 'netapp_aggregate': 'aggr1', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': 'SSD', - }, - } - mock_get_ssc = self.mock_object(self.library.ssc_library, - 'get_ssc', - return_value=ssc) - mock_get_aggrs = self.mock_object(self.library.ssc_library, - 'get_ssc_aggregates', - return_value=['aggr1']) - self.mock_object(self.library, 'get_replication_backend_names', - return_value=replication_backends) - - self.library.using_cluster_credentials = True - self.library.reserved_percentage = 5 - self.library.max_over_subscription_ratio = 10 - self.library.perf_library.get_node_utilization_for_pool = ( - mock.Mock(return_value=30.0)) - mock_capacities = { - 'size-total': 10737418240.0, - 'size-available': 2147483648.0, - } - self.mock_object(self.zapi_client, - 'get_flexvol_capacity', - return_value=mock_capacities) - self.mock_object(self.zapi_client, - 'get_flexvol_dedupe_used_percent', - return_value=55.0) - - aggr_capacities = { - 'aggr1': { - 'percent-used': 45, - 'size-available': 59055800320.0, - 'size-total': 107374182400.0, - }, - } - mock_get_aggr_capacities = self.mock_object( - self.zapi_client, 'get_aggregate_capacities', - return_value=aggr_capacities) - - result = self.library._get_pool_stats(filter_function='filter', - goodness_function='goodness') - - expected = [{ - 'pool_name': 'vola', - 'QoS_support': True, - 'consistencygroup_support': True, - 'reserved_percentage': 5, - 'max_over_subscription_ratio': 10.0, - 'multiattach': False, - 'total_capacity_gb': 10.0, - 'free_capacity_gb': 2.0, - 'provisioned_capacity_gb': 8.0, - 'netapp_dedupe_used_percent': 55.0, - 'netapp_aggregate_used_percent': 45, - 'utilization': 30.0, - 'filter_function': 'filter', - 'goodness_function': 'goodness', - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_compression': 'false', - 'netapp_mirrored': 'false', - 'netapp_dedup': 'true', - 'netapp_aggregate': 'aggr1', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': 'SSD', - 'replication_enabled': False, - }] - if replication_backends: - expected[0].update({ - 'replication_enabled': True, - 'replication_count': len(replication_backends), - 'replication_targets': replication_backends, - 'replication_type': 'async', - }) - - self.assertEqual(expected, result) - mock_get_ssc.assert_called_once_with() - mock_get_aggrs.assert_called_once_with() - mock_get_aggr_capacities.assert_called_once_with(['aggr1']) - - @ddt.data({}, None) - def test_get_pool_stats_no_ssc_vols(self, ssc): - - mock_get_ssc = self.mock_object(self.library.ssc_library, - 'get_ssc', - return_value=ssc) - - pools = self.library._get_pool_stats() - - self.assertListEqual([], pools) - mock_get_ssc.assert_called_once_with() - - @ddt.data(r'open+|demix+', 'open.+', r'.+\d', '^((?!mix+).)*$', - 'open123, open321') - def test_get_pool_map_match_selected_pools(self, patterns): - - self.library.configuration.netapp_pool_name_search_pattern = patterns - mock_list_flexvols = self.mock_object( - self.zapi_client, 'list_flexvols', - return_value=fake.FAKE_CMODE_VOLUMES) - - result = self.library._get_flexvol_to_pool_map() - - expected = { - 'open123': { - 'pool_name': 'open123', - }, - 'open321': { - 'pool_name': 'open321', - }, - } - self.assertEqual(expected, result) - mock_list_flexvols.assert_called_once_with() - - @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321', - '.*?') - def test_get_pool_map_match_all_pools(self, patterns): - - self.library.configuration.netapp_pool_name_search_pattern = patterns - mock_list_flexvols = self.mock_object( - self.zapi_client, 'list_flexvols', - return_value=fake.FAKE_CMODE_VOLUMES) - - result = self.library._get_flexvol_to_pool_map() - - self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result) - mock_list_flexvols.assert_called_once_with() - - def test_get_pool_map_invalid_conf(self): - """Verify an exception is raised if the regex pattern is invalid""" - self.library.configuration.netapp_pool_name_search_pattern = '(.+' - - self.assertRaises(exception.InvalidConfigurationValue, - self.library._get_flexvol_to_pool_map) - - @ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack', - 'abc*', '^$') - def test_get_pool_map_non_matching_patterns(self, patterns): - - self.library.configuration.netapp_pool_name_search_pattern = patterns - mock_list_flexvols = self.mock_object( - self.zapi_client, 'list_flexvols', - return_value=fake.FAKE_CMODE_VOLUMES) - - result = self.library._get_flexvol_to_pool_map() - - self.assertEqual({}, result) - mock_list_flexvols.assert_called_once_with() - - def test_update_ssc(self): - - mock_get_pool_map = self.mock_object( - self.library, '_get_flexvol_to_pool_map', - return_value=fake.FAKE_CMODE_VOLUMES) - - result = self.library._update_ssc() - - self.assertIsNone(result) - mock_get_pool_map.assert_called_once_with() - self.library.ssc_library.update_ssc.assert_called_once_with( - fake.FAKE_CMODE_VOLUMES) - - def test_delete_volume(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - return_value=fake.QOS_POLICY_GROUP_INFO) - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - - self.library.delete_volume(fake.VOLUME) - - (block_base.NetAppBlockStorageLibrary.delete_volume. - assert_called_once_with(fake.VOLUME)) - na_utils.get_valid_qos_policy_group_info.assert_called_once_with( - fake.VOLUME) - (self.library._mark_qos_policy_group_for_deletion. - assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)) - - def test_delete_volume_get_valid_qos_policy_group_info_exception(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - side_effect=exception.Invalid) - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - - self.library.delete_volume(fake.VOLUME) - - (block_base.NetAppBlockStorageLibrary.delete_volume. - assert_called_once_with(fake.VOLUME)) - (self.library._mark_qos_policy_group_for_deletion. - assert_called_once_with(None)) - - def test_setup_qos_for_volume(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - return_value=fake.QOS_POLICY_GROUP_INFO) - self.mock_object(self.zapi_client, 'provision_qos_policy_group') - - result = self.library._setup_qos_for_volume(fake.VOLUME, - fake.EXTRA_SPECS) - - self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) - self.zapi_client.provision_qos_policy_group.\ - assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) - - def test_setup_qos_for_volume_exception_path(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - side_effect=exception.Invalid) - self.mock_object(self.zapi_client, 'provision_qos_policy_group') - - self.assertRaises(exception.VolumeBackendAPIException, - self.library._setup_qos_for_volume, fake.VOLUME, - fake.EXTRA_SPECS) - - self.assertEqual(0, - self.zapi_client. - provision_qos_policy_group.call_count) - - def test_mark_qos_policy_group_for_deletion(self): - self.mock_object(self.zapi_client, - 'mark_qos_policy_group_for_deletion') - - self.library._mark_qos_policy_group_for_deletion( - fake.QOS_POLICY_GROUP_INFO) - - self.zapi_client.mark_qos_policy_group_for_deletion\ - .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) - - def test_unmanage(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - return_value=fake.QOS_POLICY_GROUP_INFO) - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') - - self.library.unmanage(fake.VOLUME) - - na_utils.get_valid_qos_policy_group_info.assert_called_once_with( - fake.VOLUME) - self.library._mark_qos_policy_group_for_deletion\ - .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) - block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( - fake.VOLUME) - - def test_unmanage_w_invalid_qos_policy(self): - self.mock_object(na_utils, 'get_valid_qos_policy_group_info', - side_effect=exception.Invalid) - self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') - self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') - - self.library.unmanage(fake.VOLUME) - - na_utils.get_valid_qos_policy_group_info.assert_called_once_with( - fake.VOLUME) - self.library._mark_qos_policy_group_for_deletion\ - .assert_called_once_with(None) - block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( - fake.VOLUME) - - def test_manage_existing_lun_same_name(self): - mock_lun = block_base.NetAppLun('handle', 'name', '1', - {'Path': '/vol/FAKE_CMODE_VOL1/name'}) - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=mock_lun) - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.library._check_volume_type_for_lun = mock.Mock() - self.library._setup_qos_for_volume = mock.Mock() - self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', - return_value=fake.QOS_POLICY_GROUP_NAME) - self.library._add_lun_to_table = mock.Mock() - self.zapi_client.move_lun = mock.Mock() - mock_set_lun_qos_policy_group = self.mock_object( - self.zapi_client, 'set_lun_qos_policy_group') - - self.library.manage_existing({'name': 'name'}, {'ref': 'ref'}) - - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - {'ref': 'ref'}) - self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) - self.assertEqual(1, self.library._add_lun_to_table.call_count) - self.assertEqual(0, self.zapi_client.move_lun.call_count) - self.assertEqual(1, mock_set_lun_qos_policy_group.call_count) - - def test_manage_existing_lun_new_path(self): - mock_lun = block_base.NetAppLun( - 'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=mock_lun) - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(na_utils, 'log_extra_spec_warnings') - self.library._check_volume_type_for_lun = mock.Mock() - self.library._add_lun_to_table = mock.Mock() - self.zapi_client.move_lun = mock.Mock() - - self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'}) - - self.assertEqual( - 2, self.library._get_existing_vol_with_manage_ref.call_count) - self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) - self.assertEqual(1, self.library._add_lun_to_table.call_count) - self.zapi_client.move_lun.assert_called_once_with( - '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') - - @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, - {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, - {'secondary_id': 'dev1', 'configured_targets': []}, - {'secondary_id': None, 'configured_targets': []}) - @ddt.unpack - def test_failover_host_invalid_replication_target(self, secondary_id, - configured_targets): - """This tests executes a method in the DataMotionMixin.""" - self.library.backend_name = 'dev0' - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=configured_targets) - complete_failover_call = self.mock_object( - data_motion.DataMotionMixin, '_complete_failover') - - self.assertRaises(exception.InvalidReplicationTarget, - self.library.failover_host, 'fake_context', [], - secondary_id=secondary_id) - self.assertFalse(complete_failover_call.called) - - def test_failover_host_unable_to_failover(self): - """This tests executes a method in the DataMotionMixin.""" - self.library.backend_name = 'dev0' - self.mock_object( - data_motion.DataMotionMixin, '_complete_failover', - side_effect=exception.NetAppDriverException) - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=['dev1', 'dev2']) - self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_utils.SSC.keys()) - self.mock_object(self.library, '_update_zapi_client') - - self.assertRaises(exception.UnableToFailOver, - self.library.failover_host, 'fake_context', [], - secondary_id='dev1') - data_motion.DataMotionMixin._complete_failover.assert_called_once_with( - 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], - failover_target='dev1') - self.assertFalse(self.library._update_zapi_client.called) - - def test_failover_host(self): - """This tests executes a method in the DataMotionMixin.""" - self.library.backend_name = 'dev0' - self.mock_object(data_motion.DataMotionMixin, '_complete_failover', - return_value=('dev1', [])) - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=['dev1', 'dev2']) - self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_utils.SSC.keys()) - self.mock_object(self.library, '_update_zapi_client') - - actual_active, vol_updates, __ = self.library.failover_host( - 'fake_context', [], secondary_id='dev1', groups=[]) - - data_motion.DataMotionMixin._complete_failover.assert_called_once_with( - 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], - failover_target='dev1') - self.library._update_zapi_client.assert_called_once_with('dev1') - self.assertTrue(self.library.failed_over) - self.assertEqual('dev1', self.library.failed_over_backend_name) - self.assertEqual('dev1', actual_active) - self.assertEqual([], vol_updates) - - def test_add_looping_tasks(self): - mock_update_ssc = self.mock_object(self.library, '_update_ssc') - mock_remove_unused_qos_policy_groups = self.mock_object( - self.zapi_client, 'remove_unused_qos_policy_groups') - mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') - mock_super_add_looping_tasks = self.mock_object( - block_base.NetAppBlockStorageLibrary, '_add_looping_tasks') - - self.library._add_looping_tasks() - - mock_update_ssc.assert_called_once_with() - mock_add_task.assert_has_calls([ - mock.call(mock_update_ssc, - loopingcalls.ONE_HOUR, - loopingcalls.ONE_HOUR), - mock.call(mock_remove_unused_qos_policy_groups, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE)]) - mock_super_add_looping_tasks.assert_called_once_with() - - def test_get_backing_flexvol_names(self): - mock_ssc_library = self.mock_object( - self.library.ssc_library, 'get_ssc') - - self.library._get_backing_flexvol_names() - - mock_ssc_library.assert_called_once_with() diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py deleted file mode 100644 index 6a42fde1c..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2015 Clinton Knight. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp block storage driver interfaces -""" - - -from cinder import test -from cinder.volume.drivers.netapp.dataontap import block_7mode -from cinder.volume.drivers.netapp.dataontap import block_cmode -from cinder.volume.drivers.netapp.dataontap import fc_7mode -from cinder.volume.drivers.netapp.dataontap import fc_cmode -from cinder.volume.drivers.netapp.dataontap import iscsi_7mode -from cinder.volume.drivers.netapp.dataontap import iscsi_cmode - - -class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase): - - def setUp(self): - super(NetAppBlockStorageDriverInterfaceTestCase, self).setUp() - - self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary, - '__init__', - return_value=None) - self.mock_object(block_7mode.NetAppBlockStorage7modeLibrary, - '__init__', - return_value=None) - - self.iscsi_7mode_driver = iscsi_7mode.NetApp7modeISCSIDriver() - self.iscsi_cmode_driver = iscsi_cmode.NetAppCmodeISCSIDriver() - self.fc_7mode_driver = fc_7mode.NetApp7modeFibreChannelDriver() - self.fc_cmode_driver = fc_cmode.NetAppCmodeFibreChannelDriver() - - def test_driver_interfaces_match(self): - """Ensure the NetApp block storage driver interfaces match. - - The four block storage Cinder drivers from NetApp (iSCSI/FC, - 7-mode/C-mode) are merely passthrough shim layers atop a common - block storage library. Bugs have been introduced when a Cinder - method was exposed via a subset of those driver shims. This test - ensures they remain in sync and the library features are uniformly - available in the four drivers. - """ - - # Get local functions of each driver interface - iscsi_7mode = self._get_local_functions(self.iscsi_7mode_driver) - iscsi_cmode = self._get_local_functions(self.iscsi_cmode_driver) - fc_7mode = self._get_local_functions(self.fc_7mode_driver) - fc_cmode = self._get_local_functions(self.fc_cmode_driver) - - # Ensure NetApp block storage driver shims are identical - self.assertSetEqual(iscsi_7mode, iscsi_cmode) - self.assertSetEqual(iscsi_7mode, fc_7mode) - self.assertSetEqual(iscsi_7mode, fc_cmode) - - def _get_local_functions(self, obj): - """Get function names of an object without superclass functions.""" - return set([key for key, value in type(obj).__dict__.items() - if callable(value)]) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py deleted file mode 100644 index ab8d98449..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright (c) 2015 Tom Barron. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for the NetApp 7mode NFS storage driver -""" - -import ddt -import mock -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_utils import units - -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes -from cinder import utils -from cinder.volume.drivers.netapp.dataontap import nfs_7mode -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import utils as na_utils - - -@ddt.ddt -class NetApp7modeNfsDriverTestCase(test.TestCase): - def setUp(self): - super(NetApp7modeNfsDriverTestCase, self).setUp() - - kwargs = { - 'configuration': self.get_config_7mode(), - 'host': 'openstack@7modenfs', - } - - with mock.patch.object(utils, 'get_root_helper', - return_value=mock.Mock()): - with mock.patch.object(remotefs_brick, 'RemoteFsClient', - return_value=mock.Mock()): - self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs) - self.driver._mounted_shares = [fake.NFS_SHARE] - self.driver.ssc_vols = True - self.driver.zapi_client = mock.Mock() - self.driver.perf_library = mock.Mock() - - def get_config_7mode(self): - config = na_fakes.create_configuration_cmode() - config.netapp_storage_protocol = 'nfs' - config.netapp_login = 'root' - config.netapp_password = 'pass' - config.netapp_server_hostname = '127.0.0.1' - config.netapp_transport_type = 'http' - config.netapp_server_port = '80' - return config - - @ddt.data({'share': None, 'is_snapshot': False}, - {'share': None, 'is_snapshot': True}, - {'share': 'fake_share', 'is_snapshot': False}, - {'share': 'fake_share', 'is_snapshot': True}) - @ddt.unpack - def test_clone_backing_file_for_volume(self, share, is_snapshot): - - mock_get_export_ip_path = self.mock_object( - self.driver, '_get_export_ip_path', - return_value=(fake.SHARE_IP, fake.EXPORT_PATH)) - mock_get_actual_path_for_export = self.mock_object( - self.driver.zapi_client, 'get_actual_path_for_export', - return_value='fake_path') - - self.driver._clone_backing_file_for_volume( - fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share, - is_snapshot=is_snapshot) - - mock_get_export_ip_path.assert_called_once_with( - fake.VOLUME_ID, share) - mock_get_actual_path_for_export.assert_called_once_with( - fake.EXPORT_PATH) - self.driver.zapi_client.clone_file.assert_called_once_with( - 'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone', - None) - - @ddt.data({'nfs_sparsed_volumes': True}, - {'nfs_sparsed_volumes': False}) - @ddt.unpack - def test_get_pool_stats(self, nfs_sparsed_volumes): - - self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes - thick = not nfs_sparsed_volumes - - total_capacity_gb = na_utils.round_down( - fake.TOTAL_BYTES // units.Gi, '0.01') - free_capacity_gb = na_utils.round_down( - fake.AVAILABLE_BYTES // units.Gi, '0.01') - provisioned_capacity_gb = total_capacity_gb - free_capacity_gb - capacity = { - 'reserved_percentage': fake.RESERVED_PERCENTAGE, - 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'provisioned_capacity_gb': provisioned_capacity_gb, - } - self.mock_object(self.driver, - '_get_share_capacity_info', - return_value=capacity) - self.mock_object(self.driver.perf_library, - 'get_node_utilization', - return_value=30.0) - - result = self.driver._get_pool_stats(filter_function='filter', - goodness_function='goodness') - - expected = [{'pool_name': '192.168.99.24:/fake/export/path', - 'QoS_support': False, - 'consistencygroup_support': True, - 'thick_provisioning_support': thick, - 'thin_provisioning_support': not thick, - 'free_capacity_gb': 12.0, - 'total_capacity_gb': 4468.0, - 'reserved_percentage': 7, - 'max_over_subscription_ratio': 19.0, - 'multiattach': False, - 'provisioned_capacity_gb': 4456.0, - 'utilization': 30.0, - 'filter_function': 'filter', - 'goodness_function': 'goodness'}] - - self.assertEqual(expected, result) - - def test_shortlist_del_eligible_files(self): - mock_get_path_for_export = self.mock_object( - self.driver.zapi_client, 'get_actual_path_for_export') - mock_get_path_for_export.return_value = fake.FLEXVOL - - mock_get_file_usage = self.mock_object( - self.driver.zapi_client, 'get_file_usage') - mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0] - - expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file - in fake.FILE_LIST] - - result = self.driver._shortlist_del_eligible_files( - fake.NFS_SHARE, fake.FILE_LIST) - - self.assertEqual(expected, result) - - def test_shortlist_del_eligible_files_empty_list(self): - mock_get_export_ip_path = self.mock_object( - self.driver, '_get_export_ip_path') - mock_get_export_ip_path.return_value = ('', '/export_path') - - mock_get_path_for_export = self.mock_object( - self.driver.zapi_client, 'get_actual_path_for_export') - mock_get_path_for_export.return_value = fake.FLEXVOL - - result = self.driver._shortlist_del_eligible_files( - fake.NFS_SHARE, []) - - self.assertEqual([], result) - - @ddt.data({'has_space': True, 'expected': True}, - {'has_space': False, 'expected': False}) - @ddt.unpack - def test_is_share_clone_compatible(self, has_space, expected): - mock_share_has_space_for_clone = self.mock_object( - self.driver, '_share_has_space_for_clone') - mock_share_has_space_for_clone.return_value = has_space - - result = self.driver._is_share_clone_compatible(fake.VOLUME, - fake.NFS_SHARE) - - self.assertEqual(expected, result) - - def test__get_volume_model_update(self): - """Driver is not expected to return a model update.""" - self.assertIsNone( - self.driver._get_volume_model_update(fake.VOLUME_REF)) - - def test_delete_cgsnapshot(self): - mock_delete_file = self.mock_object(self.driver, '_delete_file') - - model_update, snapshots_model_update = ( - self.driver.delete_cgsnapshot( - fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT])) - - mock_delete_file.assert_called_once_with( - fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name']) - self.assertIsNone(model_update) - self.assertIsNone(snapshots_model_update) - - def test_get_snapshot_backing_flexvol_names(self): - snapshots = [ - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, - {'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}}, - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}}, - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, - ] - - hosts = [snap['volume']['host'] for snap in snapshots] - flexvols = self.driver._get_flexvol_names_from_hosts(hosts) - - self.assertEqual(3, len(flexvols)) - self.assertIn('volume1', flexvols) - self.assertIn('volume2', flexvols) - self.assertIn('volume3', flexvols) - - def test_check_for_setup_error(self): - mock_get_ontapi_version = self.mock_object( - self.driver.zapi_client, 'get_ontapi_version') - mock_get_ontapi_version.return_value = ['1', '10'] - mock_add_looping_tasks = self.mock_object( - self.driver, '_add_looping_tasks') - mock_super_check_for_setup_error = self.mock_object( - nfs_base.NetAppNfsDriver, 'check_for_setup_error') - - self.driver.check_for_setup_error() - - mock_get_ontapi_version.assert_called_once_with() - mock_add_looping_tasks.assert_called_once_with() - mock_super_check_for_setup_error.assert_called_once_with() - - def test_add_looping_tasks(self): - mock_super_add_looping_tasks = self.mock_object( - nfs_base.NetAppNfsDriver, '_add_looping_tasks') - - self.driver._add_looping_tasks() - mock_super_add_looping_tasks.assert_called_once_with() - - def test_handle_ems_logging(self): - - volume_list = ['vol0', 'vol1', 'vol2'] - self.mock_object( - self.driver, '_get_backing_flexvol_names', - return_value=volume_list) - self.mock_object( - dot_utils, 'build_ems_log_message_0', - return_value='fake_base_ems_log_message') - self.mock_object( - dot_utils, 'build_ems_log_message_1', - return_value='fake_pool_ems_log_message') - mock_send_ems_log_message = self.mock_object( - self.driver.zapi_client, 'send_ems_log_message') - - self.driver._handle_ems_logging() - - mock_send_ems_log_message.assert_has_calls([ - mock.call('fake_base_ems_log_message'), - mock.call('fake_pool_ems_log_message'), - ]) - dot_utils.build_ems_log_message_0.assert_called_once_with( - self.driver.driver_name, self.driver.app_version, - self.driver.driver_mode) - dot_utils.build_ems_log_message_1.assert_called_once_with( - self.driver.driver_name, self.driver.app_version, None, - volume_list, []) - - def test_get_backing_flexvol_names(self): - - result = self.driver._get_backing_flexvol_names() - - self.assertEqual('path', result[0]) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py deleted file mode 100644 index 798393ec6..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py +++ /dev/null @@ -1,1168 +0,0 @@ -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for the NetApp NFS storage driver -""" -import copy -import os -import threading -import time - -import ddt -import mock -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_concurrency import processutils -from oslo_utils import units -import shutil - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume.drivers import nfs -from cinder.volume.drivers import remotefs - - -@ddt.ddt -class NetAppNfsDriverTestCase(test.TestCase): - def setUp(self): - super(NetAppNfsDriverTestCase, self).setUp() - configuration = mock.Mock() - configuration.reserved_percentage = 0 - configuration.nfs_mount_point_base = '/mnt/test' - configuration.reserved_percentage = 0 - configuration.max_over_subscription_ratio = 1.1 - self.fake_nfs_export_1 = fake.NFS_EXPORT_1 - self.fake_nfs_export_2 = fake.NFS_EXPORT_2 - self.fake_mount_point = fake.MOUNT_POINT - self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) - - kwargs = { - 'configuration': configuration, - 'host': 'openstack@netappnfs', - } - - with mock.patch.object(utils, 'get_root_helper', - return_value=mock.Mock()): - with mock.patch.object(remotefs_brick, 'RemoteFsClient', - return_value=mock.Mock()): - self.driver = nfs_base.NetAppNfsDriver(**kwargs) - self.driver.db = mock.Mock() - - self.driver.zapi_client = mock.Mock() - self.zapi_client = self.driver.zapi_client - - @mock.patch.object(nfs.NfsDriver, 'do_setup') - @mock.patch.object(na_utils, 'check_flags') - def test_do_setup(self, mock_check_flags, mock_super_do_setup): - self.driver.do_setup(mock.Mock()) - - self.assertTrue(mock_check_flags.called) - self.assertTrue(mock_super_do_setup.called) - - def test_get_share_capacity_info(self): - mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info') - mock_get_capacity.return_value = fake.CAPACITY_VALUES - expected_total_capacity_gb = na_utils.round_down( - fake.TOTAL_BYTES / units.Gi, '0.01') - expected_free_capacity_gb = (na_utils.round_down( - fake.AVAILABLE_BYTES / units.Gi, '0.01')) - expected_reserved_percentage = round( - self.driver.configuration.reserved_percentage) - - result = self.driver._get_share_capacity_info(fake.NFS_SHARE) - - self.assertEqual(expected_total_capacity_gb, - result['total_capacity_gb']) - self.assertEqual(expected_free_capacity_gb, - result['free_capacity_gb']) - self.assertEqual(expected_reserved_percentage, - round(result['reserved_percentage'])) - - def test_get_capacity_info_ipv4_share(self): - expected = fake.CAPACITY_VALUES - get_capacity = self.driver.zapi_client.get_flexvol_capacity - get_capacity.return_value = fake.CAPACITIES - - result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4) - - self.assertEqual(expected, result) - get_capacity.assert_has_calls([ - mock.call(flexvol_path=fake.EXPORT_PATH)]) - - def test_get_capacity_info_ipv6_share(self): - expected = fake.CAPACITY_VALUES - get_capacity = self.driver.zapi_client.get_flexvol_capacity - get_capacity.return_value = fake.CAPACITIES - - result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6) - - self.assertEqual(expected, result) - get_capacity.assert_has_calls([ - mock.call(flexvol_path=fake.EXPORT_PATH)]) - - def test_get_pool(self): - pool = self.driver.get_pool({'provider_location': 'fake-share'}) - - self.assertEqual('fake-share', pool) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_create_volume(self, model_update): - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(na_utils, 'get_volume_extra_specs') - self.mock_object(self.driver, '_do_create_volume') - self.mock_object(self.driver, '_do_qos_for_volume') - self.mock_object(self.driver, '_get_volume_model_update', - return_value=model_update) - expected = {'provider_location': fake.NFS_SHARE} - if model_update: - expected.update(model_update) - - actual = self.driver.create_volume(fake.NFS_VOLUME) - - self.assertEqual(expected, actual) - - def test_create_volume_no_pool(self): - volume = copy.deepcopy(fake.NFS_VOLUME) - volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) - self.mock_object(self.driver, '_ensure_shares_mounted') - - self.assertRaises(exception.InvalidHost, - self.driver.create_volume, - volume) - - def test_create_volume_exception(self): - self.mock_object(self.driver, '_ensure_shares_mounted') - self.mock_object(na_utils, 'get_volume_extra_specs') - mock_create = self.mock_object(self.driver, '_do_create_volume') - mock_create.side_effect = Exception - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - fake.NFS_VOLUME) - - @ddt.data(None, {'key': 'value'}) - def test_clone_source_to_destination_volume(self, model_update): - self.mock_object(self.driver, '_get_volume_location', - return_value=fake.POOL_NAME) - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object( - self.driver, - '_clone_with_extension_check') - self.mock_object(self.driver, '_do_qos_for_volume') - self.mock_object(self.driver, '_get_volume_model_update', - return_value=model_update) - expected = {'provider_location': fake.POOL_NAME} - if model_update: - expected.update(model_update) - - result = self.driver._clone_source_to_destination_volume( - fake.CLONE_SOURCE, fake.CLONE_DESTINATION) - - self.assertEqual(expected, result) - - def test_clone_source_to_destination_volume_with_do_qos_exception(self): - self.mock_object(self.driver, '_get_volume_location', - return_value=fake.POOL_NAME) - self.mock_object(na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - self.mock_object( - self.driver, - '_clone_with_extension_check') - self.mock_object(self.driver, '_do_qos_for_volume', - side_effect=Exception) - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver._clone_source_to_destination_volume, - fake.CLONE_SOURCE, - fake.CLONE_DESTINATION) - - def test_clone_with_extension_check_equal_sizes(self): - clone_source = copy.deepcopy(fake.CLONE_SOURCE) - clone_source['size'] = fake.VOLUME['size'] - self.mock_object(self.driver, '_clone_backing_file_for_volume') - self.mock_object(self.driver, 'local_path') - mock_discover = self.mock_object(self.driver, - '_discover_file_till_timeout') - mock_discover.return_value = True - self.mock_object(self.driver, '_set_rw_permissions') - mock_extend_volume = self.mock_object(self.driver, 'extend_volume') - - self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) - - self.assertEqual(0, mock_extend_volume.call_count) - - def test_clone_with_extension_check_unequal_sizes(self): - clone_source = copy.deepcopy(fake.CLONE_SOURCE) - clone_source['size'] = fake.VOLUME['size'] + 1 - self.mock_object(self.driver, '_clone_backing_file_for_volume') - self.mock_object(self.driver, 'local_path') - mock_discover = self.mock_object(self.driver, - '_discover_file_till_timeout') - mock_discover.return_value = True - self.mock_object(self.driver, '_set_rw_permissions') - mock_extend_volume = self.mock_object(self.driver, 'extend_volume') - - self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) - - self.assertEqual(1, mock_extend_volume.call_count) - - def test_clone_with_extension_check_extend_exception(self): - clone_source = copy.deepcopy(fake.CLONE_SOURCE) - clone_source['size'] = fake.VOLUME['size'] + 1 - self.mock_object(self.driver, '_clone_backing_file_for_volume') - self.mock_object(self.driver, 'local_path') - mock_discover = self.mock_object(self.driver, - '_discover_file_till_timeout') - mock_discover.return_value = True - self.mock_object(self.driver, '_set_rw_permissions') - mock_extend_volume = self.mock_object(self.driver, 'extend_volume') - mock_extend_volume.side_effect = Exception - mock_cleanup = self.mock_object(self.driver, - '_cleanup_volume_on_failure') - - self.assertRaises(exception.CinderException, - self.driver._clone_with_extension_check, - clone_source, - fake.NFS_VOLUME) - - self.assertEqual(1, mock_cleanup.call_count) - - def test_clone_with_extension_check_no_discovery(self): - self.mock_object(self.driver, '_clone_backing_file_for_volume') - self.mock_object(self.driver, 'local_path') - self.mock_object(self.driver, '_set_rw_permissions') - mock_discover = self.mock_object(self.driver, - '_discover_file_till_timeout') - mock_discover.return_value = False - - self.assertRaises(exception.CinderException, - self.driver._clone_with_extension_check, - fake.CLONE_SOURCE, - fake.NFS_VOLUME) - - def test_create_volume_from_snapshot(self): - volume = fake.VOLUME - expected_source = { - 'name': fake.SNAPSHOT_NAME, - 'size': fake.SIZE, - 'id': fake.VOLUME_ID, - } - mock_clone_call = self.mock_object( - self.driver, '_clone_source_to_destination_volume', - return_value='fake') - - retval = self.driver.create_volume_from_snapshot(volume, fake.SNAPSHOT) - - self.assertEqual('fake', retval) - mock_clone_call.assert_called_once_with(expected_source, volume) - - def test_create_cloned_volume(self): - provider_location = fake.POOL_NAME - src_vref = fake.CLONE_SOURCE - self.mock_object(self.driver, '_clone_source_to_destination_volume', - return_value=provider_location) - - result = self.driver.create_cloned_volume(fake.NFS_VOLUME, - src_vref) - self.assertEqual(provider_location, result) - - def test_do_qos_for_volume(self): - self.assertRaises(NotImplementedError, - self.driver._do_qos_for_volume, - fake.NFS_VOLUME, - fake.EXTRA_SPECS) - - def test_create_snapshot(self): - - mock_clone_backing_file_for_volume = self.mock_object( - self.driver, '_clone_backing_file_for_volume') - - self.driver.create_snapshot(fake.SNAPSHOT) - - mock_clone_backing_file_for_volume.assert_called_once_with( - fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'], - fake.SNAPSHOT['volume_id'], is_snapshot=True) - - def test_delete_snapshot(self): - updates = { - 'name': fake.SNAPSHOT_NAME, - 'volume_size': fake.SIZE, - 'volume_id': fake.VOLUME_ID, - 'volume_name': fake.VOLUME_NAME, - 'busy': False, - } - snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, **updates) - self.mock_object(self.driver, '_delete_file') - - self.driver.delete_snapshot(snapshot) - - self.driver._delete_file.assert_called_once_with(snapshot.volume_id, - snapshot.name) - - def test__get_volume_location(self): - volume_id = fake.VOLUME_ID - self.mock_object(self.driver, '_get_host_ip', - return_value='168.124.10.12') - self.mock_object(self.driver, '_get_export_path', - return_value='/fake_mount_path') - - retval = self.driver._get_volume_location(volume_id) - - self.assertEqual('168.124.10.12:/fake_mount_path', retval) - self.driver._get_host_ip.assert_called_once_with(volume_id) - self.driver._get_export_path.assert_called_once_with(volume_id) - - def test__clone_backing_file_for_volume(self): - self.assertRaises(NotImplementedError, - self.driver._clone_backing_file_for_volume, - fake.VOLUME_NAME, fake.CLONE_SOURCE_NAME, - fake.VOLUME_ID, share=None) - - def test__get_provider_location(self): - updates = {'provider_location': fake.PROVIDER_LOCATION} - volume = fake_volume.fake_volume_obj(self.ctxt, **updates) - self.mock_object(self.driver.db, 'volume_get', return_value=volume) - - retval = self.driver._get_provider_location(fake.VOLUME_ID) - - self.assertEqual(fake.PROVIDER_LOCATION, retval) - - @ddt.data(None, processutils.ProcessExecutionError) - def test__volume_not_present(self, side_effect): - self.mock_object(self.driver, '_get_volume_path') - self.mock_object(self.driver, '_try_execute', side_effect=side_effect) - - retval = self.driver._volume_not_present( - fake.MOUNT_PATH, fake.VOLUME_NAME) - - self.assertEqual(side_effect is not None, retval) - - @mock.patch.object(time, 'sleep') - def test__try_execute_exception(self, patched_sleep): - self.mock_object(self.driver, '_execute', - side_effect=processutils.ProcessExecutionError) - mock_exception_log = self.mock_object(nfs_base.LOG, 'exception') - self.driver.configuration.num_shell_tries = 3 - - self.assertRaises(processutils.ProcessExecutionError, - self.driver._try_execute, - 'fake-command', attr1='val1', attr2='val2') - self.assertEqual(2, mock_exception_log.call_count) - self.driver._execute.assert_has_calls([ - mock.call('fake-command', attr1='val1', attr2='val2'), - mock.call('fake-command', attr1='val1', attr2='val2'), - mock.call('fake-command', attr1='val1', attr2='val2')]) - self.assertEqual(2, time.sleep.call_count) - patched_sleep.assert_has_calls([mock.call(1), mock.call(4)]) - - def test__update_volume_stats(self): - self.assertRaises(NotImplementedError, - self.driver._update_volume_stats) - - def test_copy_image_to_volume_base_exception(self): - mock_info_log = self.mock_object(nfs_base.LOG, 'info') - self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume', - side_effect=exception.NfsException) - - self.assertRaises(exception.NfsException, - self.driver.copy_image_to_volume, - 'fake_context', fake.NFS_VOLUME, - 'fake_img_service', fake.IMAGE_FILE_ID) - mock_info_log.assert_not_called() - - def test_copy_image_to_volume(self): - mock_log = self.mock_object(nfs_base, 'LOG') - mock_copy_image = self.mock_object( - remotefs.RemoteFSDriver, 'copy_image_to_volume') - mock_register_image = self.mock_object( - self.driver, '_register_image_in_cache') - - self.driver.copy_image_to_volume('fake_context', - fake.NFS_VOLUME, - 'fake_img_service', - fake.IMAGE_FILE_ID) - - mock_copy_image.assert_called_once_with( - 'fake_context', fake.NFS_VOLUME, 'fake_img_service', - fake.IMAGE_FILE_ID) - self.assertEqual(1, mock_log.info.call_count) - mock_register_image.assert_called_once_with( - fake.NFS_VOLUME, fake.IMAGE_FILE_ID) - - @ddt.data(None, Exception) - def test__register_image_in_cache(self, exc): - mock_log = self.mock_object(nfs_base, 'LOG') - self.mock_object(self.driver, '_do_clone_rel_img_cache', - side_effect=exc) - - retval = self.driver._register_image_in_cache( - fake.NFS_VOLUME, fake.IMAGE_FILE_ID) - - self.assertIsNone(retval) - self.assertEqual(exc is not None, mock_log.warning.called) - self.assertEqual(1, mock_log.info.call_count) - - @ddt.data(True, False) - def test_do_clone_rel_img_cache(self, path_exists): - self.mock_object(nfs_base.LOG, 'info') - self.mock_object(utils, 'synchronized', return_value=lambda f: f) - self.mock_object(self.driver, '_get_mount_point_for_share', - return_value='dir') - self.mock_object(os.path, 'exists', return_value=path_exists) - self.mock_object(self.driver, '_clone_backing_file_for_volume') - self.mock_object(os, 'utime') - - retval = self.driver._do_clone_rel_img_cache( - fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, - fake.NFS_SHARE, 'fake_cache_file') - - self.assertIsNone(retval) - self.assertTrue(self.driver._get_mount_point_for_share.called) - if not path_exists: - self.driver._clone_backing_file_for_volume.assert_called_once_with( - fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, - share=fake.NFS_SHARE, volume_id=None) - os.utime.assert_called_once_with( - 'dir/' + fake.CLONE_SOURCE_NAME, None) - else: - self.driver._clone_backing_file_for_volume.assert_not_called() - os.utime.assert_not_called() - - os.path.exists.assert_called_once_with( - 'dir/' + fake.CLONE_DESTINATION_NAME) - - def test__spawn_clean_cache_job_clean_job_setup(self): - self.driver.cleaning = True - mock_debug_log = self.mock_object(nfs_base.LOG, 'debug') - self.mock_object(utils, 'synchronized', return_value=lambda f: f) - - retval = self.driver._spawn_clean_cache_job() - - self.assertIsNone(retval) - self.assertEqual(1, mock_debug_log.call_count) - - def test__spawn_clean_cache_job_new_clean_job(self): - - class FakeTimer(object): - def start(self): - pass - - fake_timer = FakeTimer() - self.mock_object(utils, 'synchronized', return_value=lambda f: f) - self.mock_object(fake_timer, 'start') - self.mock_object(nfs_base.LOG, 'debug') - self.mock_object(self.driver, '_clean_image_cache') - self.mock_object(threading, 'Timer', return_value=fake_timer) - - retval = self.driver._spawn_clean_cache_job() - - self.assertIsNone(retval) - threading.Timer.assert_called_once_with( - 0, self.driver._clean_image_cache) - fake_timer.start.assert_called_once_with() - - def test_cleanup_volume_on_failure(self): - path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) - mock_local_path = self.mock_object(self.driver, 'local_path') - mock_local_path.return_value = path - mock_exists_check = self.mock_object(os.path, 'exists') - mock_exists_check.return_value = True - mock_delete = self.mock_object(self.driver, '_delete_file_at_path') - - self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) - - mock_delete.assert_has_calls([mock.call(path)]) - - def test_cleanup_volume_on_failure_no_path(self): - self.mock_object(self.driver, 'local_path') - mock_exists_check = self.mock_object(os.path, 'exists') - mock_exists_check.return_value = False - mock_delete = self.mock_object(self.driver, '_delete_file_at_path') - - self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) - - self.assertEqual(0, mock_delete.call_count) - - def test_get_export_ip_path_volume_id_provided(self): - mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip') - mock_get_host_ip.return_value = fake.IPV4_ADDRESS - - mock_get_export_path = self.mock_object( - self.driver, '_get_export_path') - mock_get_export_path.return_value = fake.EXPORT_PATH - - expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH) - - result = self.driver._get_export_ip_path(fake.VOLUME_ID) - - self.assertEqual(expected, result) - - def test_get_export_ip_path_share_provided(self): - expected = (fake.SHARE_IP, fake.EXPORT_PATH) - - result = self.driver._get_export_ip_path(share=fake.NFS_SHARE) - - self.assertEqual(expected, result) - - def test_get_export_ip_path_volume_id_and_share_provided(self): - mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip') - mock_get_host_ip.return_value = fake.IPV4_ADDRESS - - mock_get_export_path = self.mock_object( - self.driver, '_get_export_path') - mock_get_export_path.return_value = fake.EXPORT_PATH - - expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH) - - result = self.driver._get_export_ip_path( - fake.VOLUME_ID, fake.NFS_SHARE) - - self.assertEqual(expected, result) - - def test_get_export_ip_path_no_args(self): - self.assertRaises(exception.InvalidInput, - self.driver._get_export_ip_path) - - def test_get_host_ip(self): - mock_get_provider_location = self.mock_object( - self.driver, '_get_provider_location') - mock_get_provider_location.return_value = fake.NFS_SHARE - expected = fake.SHARE_IP - - result = self.driver._get_host_ip(fake.VOLUME_ID) - - self.assertEqual(expected, result) - - def test_get_export_path(self): - mock_get_provider_location = self.mock_object( - self.driver, '_get_provider_location') - mock_get_provider_location.return_value = fake.NFS_SHARE - expected = fake.EXPORT_PATH - - result = self.driver._get_export_path(fake.VOLUME_ID) - - self.assertEqual(expected, result) - - def test_construct_image_url_loc(self): - img_loc = fake.FAKE_IMAGE_LOCATION - - locations = self.driver._construct_image_nfs_url(img_loc) - - self.assertIn("nfs://host/path/image-id-0", locations) - self.assertIn("nfs://host/path/image-id-6", locations) - self.assertEqual(2, len(locations)) - - def test_construct_image_url_direct(self): - img_loc = ("nfs://host/path/image-id", None) - - locations = self.driver._construct_image_nfs_url(img_loc) - - self.assertIn("nfs://host/path/image-id", locations) - - def test_extend_volume(self): - - new_size = 100 - volume_copy = copy.copy(fake.VOLUME) - volume_copy['size'] = new_size - - path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) - self.mock_object(self.driver, - 'local_path', - return_value=path) - mock_resize_image_file = self.mock_object(self.driver, - '_resize_image_file') - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) - mock_do_qos_for_volume = self.mock_object(self.driver, - '_do_qos_for_volume') - - self.driver.extend_volume(fake.VOLUME, new_size) - - mock_resize_image_file.assert_called_once_with(path, new_size) - mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) - mock_do_qos_for_volume.assert_called_once_with(volume_copy, - fake.EXTRA_SPECS, - cleanup=False) - - def test_extend_volume_resize_error(self): - - new_size = 100 - volume_copy = copy.copy(fake.VOLUME) - volume_copy['size'] = new_size - - path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) - self.mock_object(self.driver, - 'local_path', - return_value=path) - mock_resize_image_file = self.mock_object( - self.driver, '_resize_image_file', - side_effect=netapp_api.NaApiError) - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) - mock_do_qos_for_volume = self.mock_object(self.driver, - '_do_qos_for_volume') - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - fake.VOLUME, - new_size) - - mock_resize_image_file.assert_called_once_with(path, new_size) - self.assertFalse(mock_get_volume_extra_specs.called) - self.assertFalse(mock_do_qos_for_volume.called) - - def test_extend_volume_qos_error(self): - - new_size = 100 - volume_copy = copy.copy(fake.VOLUME) - volume_copy['size'] = new_size - - path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) - self.mock_object(self.driver, - 'local_path', - return_value=path) - mock_resize_image_file = self.mock_object(self.driver, - '_resize_image_file') - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', - return_value=fake.EXTRA_SPECS) - mock_do_qos_for_volume = self.mock_object( - self.driver, '_do_qos_for_volume', - side_effect=netapp_api.NaApiError) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - fake.VOLUME, - new_size) - - mock_resize_image_file.assert_called_once_with(path, new_size) - mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) - mock_do_qos_for_volume.assert_called_once_with(volume_copy, - fake.EXTRA_SPECS, - cleanup=False) - - def test_is_share_clone_compatible(self): - self.assertRaises(NotImplementedError, - self.driver._is_share_clone_compatible, - fake.NFS_VOLUME, - fake.NFS_SHARE) - - @ddt.data( - {'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True}, - {'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False}, - {'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False}, - {'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True}, - {'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True}, - {'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False}, - ) - @ddt.unpack - def test_share_has_space_for_clone(self, size, thin, over, res, expected): - total_bytes = 20 * units.Gi - available_bytes = 12 * units.Gi - - with mock.patch.object(self.driver, - '_get_capacity_info', - return_value=( - total_bytes, available_bytes)): - with mock.patch.object(self.driver, - 'max_over_subscription_ratio', - over): - with mock.patch.object(self.driver, - 'reserved_percentage', - res): - result = self.driver._share_has_space_for_clone( - fake.NFS_SHARE, - size, - thin=thin) - self.assertEqual(expected, result) - - @ddt.data( - {'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True}, - {'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False}, - {'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False}, - {'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True}, - {'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True}, - {'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False}, - ) - @ddt.unpack - @mock.patch.object(nfs_base.NetAppNfsDriver, '_get_capacity_info') - def test_share_has_space_for_clone2(self, - mock_get_capacity, - size, thin, over, res, expected): - total_bytes = 20 * units.Gi - available_bytes = 12 * units.Gi - mock_get_capacity.return_value = (total_bytes, available_bytes) - - with mock.patch.object(self.driver, - 'max_over_subscription_ratio', - over): - with mock.patch.object(self.driver, - 'reserved_percentage', - res): - result = self.driver._share_has_space_for_clone( - fake.NFS_SHARE, - size, - thin=thin) - self.assertEqual(expected, result) - - def test_get_share_mount_and_vol_from_vol_ref(self): - self.mock_object(na_utils, 'resolve_hostname', - return_value='10.12.142.11') - self.mock_object(os.path, 'isfile', return_value=True) - self.driver._mounted_shares = [self.fake_nfs_export_1] - vol_path = "%s/%s" % (self.fake_nfs_export_1, 'test_file_name') - vol_ref = {'source-name': vol_path} - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - (share, mount, file_path) = ( - self.driver._get_share_mount_and_vol_from_vol_ref(vol_ref)) - - self.assertEqual(self.fake_nfs_export_1, share) - self.assertEqual(self.fake_mount_point, mount) - self.assertEqual('test_file_name', file_path) - - def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self): - self.mock_object(na_utils, 'resolve_hostname', - return_value='10.12.142.11') - self.driver._mounted_shares = [self.fake_nfs_export_1] - vol_ref = {'source-id': '1234546'} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_share_mount_and_vol_from_vol_ref, - vol_ref) - - def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self): - self.mock_object(na_utils, 'resolve_hostname', - return_value='10.12.142.11') - self.driver._mounted_shares = [self.fake_nfs_export_1] - vol_path = "%s/%s" % (self.fake_nfs_export_2, 'test_file_name') - vol_ref = {'source-name': vol_path} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_share_mount_and_vol_from_vol_ref, - vol_ref) - - def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self): - self.mock_object(na_utils, 'resolve_hostname', - return_value='10.12.142.11') - self.driver._mounted_shares = [self.fake_nfs_export_1] - vol_ref = {'source-name': self.fake_nfs_export_2} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver._get_share_mount_and_vol_from_vol_ref, - vol_ref) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_manage_existing(self, model_update): - self.mock_object(utils, 'get_file_size', - return_value=1074253824) - self.driver._mounted_shares = [self.fake_nfs_export_1] - test_file = 'test_file_name' - volume = fake.FAKE_MANAGE_VOLUME - vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) - vol_ref = {'source-name': vol_path} - self.driver._check_volume_type = mock.Mock() - shutil.move = mock.Mock() - self.mock_object(self.driver, '_execute') - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.fake_nfs_export_1, self.fake_mount_point, - test_file)) - mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') - mock_get_specs.return_value = {} - self.mock_object(self.driver, '_do_qos_for_volume') - self.mock_object(self.driver, '_get_volume_model_update', - return_value=model_update) - - actual_model_update = self.driver.manage_existing(volume, vol_ref) - - self.assertEqual( - self.fake_nfs_export_1, actual_model_update['provider_location']) - if model_update: - self.assertEqual(model_update['replication_status'], - actual_model_update['replication_status']) - else: - self.assertFalse('replication_status' in actual_model_update) - self.driver._check_volume_type.assert_called_once_with( - volume, self.fake_nfs_export_1, test_file, {}) - - def test_manage_existing_move_fails(self): - self.mock_object(utils, 'get_file_size', return_value=1074253824) - self.driver._mounted_shares = [self.fake_nfs_export_1] - test_file = 'test_file_name' - volume = fake.FAKE_MANAGE_VOLUME - vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) - vol_ref = {'source-name': vol_path} - mock_check_volume_type = self.driver._check_volume_type = mock.Mock() - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.fake_nfs_export_1, self.fake_mount_point, - test_file)) - self.driver._execute = mock.Mock(side_effect=OSError) - mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') - mock_get_specs.return_value = {} - self.mock_object(self.driver, '_do_qos_for_volume') - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing, volume, vol_ref) - - mock_check_volume_type.assert_called_once_with( - volume, self.fake_nfs_export_1, test_file, {}) - - def test_unmanage(self): - mock_log = self.mock_object(nfs_base, 'LOG') - volume = {'id': '123', 'provider_location': '/share'} - - retval = self.driver.unmanage(volume) - - self.assertIsNone(retval) - self.assertEqual(1, mock_log.info.call_count) - - def test_manage_existing_get_size(self): - test_file = 'test_file_name' - self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.fake_nfs_export_1, self.fake_mount_point, - test_file)) - self.mock_object(utils, 'get_file_size', return_value=1073741824) - self.driver._mounted_shares = [self.fake_nfs_export_1] - volume = fake.FAKE_MANAGE_VOLUME - vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) - vol_ref = {'source-name': vol_path} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - vol_size = self.driver.manage_existing_get_size(volume, vol_ref) - - self.assertEqual(1, vol_size) - - def test_manage_existing_get_size_round_up(self): - test_file = 'test_file_name' - self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.fake_nfs_export_1, self.fake_mount_point, - test_file)) - self.mock_object(utils, 'get_file_size', return_value=1073760270) - self.driver._mounted_shares = [self.fake_nfs_export_1] - volume = fake.FAKE_MANAGE_VOLUME - vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) - vol_ref = {'source-name': vol_path} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - vol_size = self.driver.manage_existing_get_size(volume, vol_ref) - - self.assertEqual(2, vol_size) - - def test_manage_existing_get_size_error(self): - test_file = 'test_file_name' - self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( - return_value=(self.fake_nfs_export_1, self.fake_mount_point, - test_file)) - self.driver._mounted_shares = [self.fake_nfs_export_1] - volume = fake.FAKE_MANAGE_VOLUME - vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) - vol_ref = {'source-name': vol_path} - - self.driver._ensure_shares_mounted = mock.Mock() - self.driver._get_mount_point_for_share = mock.Mock( - return_value=self.fake_mount_point) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, - volume, - vol_ref) - - def test_create_consistency_group(self): - model_update = self.driver.create_consistencygroup( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP) - self.assertEqual('available', model_update['status']) - - @ddt.data(True, False) - def test_delete_file(self, volume_not_present): - mock_get_provider_location = self.mock_object( - self.driver, '_get_provider_location') - mock_get_provider_location.return_value = fake.NFS_SHARE - mock_volume_not_present = self.mock_object( - self.driver, '_volume_not_present') - mock_volume_not_present.return_value = volume_not_present - mock_get_volume_path = self.mock_object( - self.driver, '_get_volume_path') - mock_get_volume_path.return_value = fake.PATH - mock_delete = self.mock_object(self.driver, '_delete') - - self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) - - mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) - mock_volume_not_present.assert_called_once_with( - fake.NFS_SHARE, fake.CG_VOLUME_NAME) - if not volume_not_present: - mock_get_volume_path.assert_called_once_with( - fake.NFS_SHARE, fake.CG_VOLUME_NAME) - mock_delete.assert_called_once_with(fake.PATH) - - def test_delete_file_volume_not_present(self): - mock_get_provider_location = self.mock_object( - self.driver, '_get_provider_location') - mock_get_provider_location.return_value = fake.NFS_SHARE - mock_volume_not_present = self.mock_object( - self.driver, '_volume_not_present') - mock_volume_not_present.return_value = True - mock_get_volume_path = self.mock_object( - self.driver, '_get_volume_path') - mock_delete = self.mock_object(self.driver, '_delete') - - self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) - - mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) - mock_volume_not_present.assert_called_once_with( - fake.NFS_SHARE, fake.CG_VOLUME_NAME) - mock_get_volume_path.assert_not_called() - mock_delete.assert_not_called() - - def test_update_consistencygroup(self): - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_consistencygroup(fake.CG_CONTEXT, "foo")) - self.assertIsNone(add_volumes_update) - self.assertIsNone(remove_volumes_update) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_create_consistencygroup_from_src(self, volume_model_update): - volume_model_update = volume_model_update or {} - volume_model_update.update( - {'provider_location': fake.PROVIDER_LOCATION}) - mock_create_volume_from_snapshot = self.mock_object( - self.driver, 'create_volume_from_snapshot', - return_value=volume_model_update) - - model_update, volumes_model_update = ( - self.driver.create_consistencygroup_from_src( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME], - cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT])) - - expected_volumes_model_updates = [{'id': fake.VOLUME['id']}] - expected_volumes_model_updates[0].update(volume_model_update) - mock_create_volume_from_snapshot.assert_called_once_with( - fake.VOLUME, fake.SNAPSHOT) - self.assertIsNone(model_update) - self.assertEqual(expected_volumes_model_updates, volumes_model_update) - - @ddt.data(None, - {'replication_status': fields.ReplicationStatus.ENABLED}) - def test_create_consistencygroup_from_src_source_vols( - self, volume_model_update): - mock_get_snapshot_flexvols = self.mock_object( - self.driver, '_get_flexvol_names_from_hosts') - mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME])) - mock_clone_backing_file = self.mock_object( - self.driver, '_clone_backing_file_for_volume') - fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id'] - mock_busy = self.mock_object( - self.driver.zapi_client, 'wait_for_busy_snapshot') - self.mock_object(self.driver, '_get_volume_model_update', - return_value=volume_model_update) - - model_update, volumes_model_update = ( - self.driver.create_consistencygroup_from_src( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME], - source_cg=fake.CONSISTENCY_GROUP, - source_vols=[fake.NFS_VOLUME])) - - expected_volumes_model_updates = [{ - 'id': fake.NFS_VOLUME['id'], - 'provider_location': fake.PROVIDER_LOCATION, - }] - if volume_model_update: - expected_volumes_model_updates[0].update(volume_model_update) - mock_get_snapshot_flexvols.assert_called_once_with( - [fake.NFS_VOLUME['host']]) - self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( - set([fake.CG_POOL_NAME]), fake_snapshot_name) - mock_clone_backing_file.assert_called_once_with( - fake.NFS_VOLUME['name'], fake.VOLUME['name'], - fake.NFS_VOLUME['id'], source_snapshot=fake_snapshot_name) - mock_busy.assert_called_once_with( - fake.CG_POOL_NAME, fake_snapshot_name) - self.driver.zapi_client.delete_snapshot.assert_called_once_with( - fake.CG_POOL_NAME, fake_snapshot_name) - self.assertIsNone(model_update) - self.assertEqual(expected_volumes_model_updates, volumes_model_update) - - def test_create_consistencygroup_from_src_invalid_parms(self): - - model_update, volumes_model_update = ( - self.driver.create_consistencygroup_from_src( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME])) - - self.assertIn('error', model_update['status']) - - def test_create_cgsnapshot(self): - snapshot = fake.CG_SNAPSHOT - snapshot['volume'] = fake.CG_VOLUME - mock_get_snapshot_flexvols = self.mock_object( - self.driver, '_get_flexvol_names_from_hosts') - mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME])) - mock_clone_backing_file = self.mock_object( - self.driver, '_clone_backing_file_for_volume') - mock_busy = self.mock_object( - self.driver.zapi_client, 'wait_for_busy_snapshot') - - self.driver.create_cgsnapshot( - fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot]) - - mock_get_snapshot_flexvols.assert_called_once_with( - [snapshot['volume']['host']]) - self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( - set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID) - mock_clone_backing_file.assert_called_once_with( - snapshot['volume']['name'], snapshot['name'], - snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID) - mock_busy.assert_called_once_with( - fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) - self.driver.zapi_client.delete_snapshot.assert_called_once_with( - fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) - - def test_create_cgsnapshot_busy_snapshot(self): - snapshot = fake.CG_SNAPSHOT - snapshot['volume'] = fake.CG_VOLUME - mock_get_snapshot_flexvols = self.mock_object( - self.driver, '_get_flexvol_names_from_hosts') - mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME])) - mock_clone_backing_file = self.mock_object( - self.driver, '_clone_backing_file_for_volume') - mock_busy = self.mock_object( - self.driver.zapi_client, 'wait_for_busy_snapshot') - mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name']) - mock_mark_snapshot_for_deletion = self.mock_object( - self.zapi_client, 'mark_snapshot_for_deletion') - - self.driver.create_cgsnapshot( - fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot]) - - mock_get_snapshot_flexvols.assert_called_once_with( - [snapshot['volume']['host']]) - self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( - set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID) - mock_clone_backing_file.assert_called_once_with( - snapshot['volume']['name'], snapshot['name'], - snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID) - mock_busy.assert_called_once_with( - fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) - self.driver.zapi_client.delete_snapshot.assert_not_called() - mock_mark_snapshot_for_deletion.assert_called_once_with( - fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID) - - def test_delete_consistencygroup_volume_delete_failure(self): - self.mock_object(self.driver, '_delete_file', side_effect=Exception) - - model_update, volumes = self.driver.delete_consistencygroup( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) - - self.assertEqual('deleted', model_update['status']) - self.assertEqual('error_deleting', volumes[0]['status']) - - def test_delete_consistencygroup(self): - mock_delete_file = self.mock_object( - self.driver, '_delete_file') - - model_update, volumes = self.driver.delete_consistencygroup( - fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME]) - - self.assertEqual('deleted', model_update['status']) - self.assertEqual('deleted', volumes[0]['status']) - mock_delete_file.assert_called_once_with( - fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) - - def test_check_for_setup_error(self): - super_check_for_setup_error = self.mock_object( - nfs.NfsDriver, 'check_for_setup_error') - mock_start_tasks = self.mock_object( - self.driver.loopingcalls, 'start_tasks') - - self.driver.check_for_setup_error() - - super_check_for_setup_error.assert_called_once_with() - mock_start_tasks.assert_called_once_with() - - def test_add_looping_tasks(self): - mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task') - mock_call_snap_cleanup = self.mock_object( - self.driver, '_delete_snapshots_marked_for_deletion') - mock_call_ems_logging = self.mock_object( - self.driver, '_handle_ems_logging') - - self.driver._add_looping_tasks() - - mock_add_task.assert_has_calls([ - mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE), - mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) - - def test_delete_snapshots_marked_for_deletion(self): - snapshots = [{ - 'name': fake.SNAPSHOT_NAME, - 'volume_name': fake.VOLUME['name'] - }] - mock_get_flexvol_names = self.mock_object( - self.driver, '_get_backing_flexvol_names') - mock_get_flexvol_names.return_value = [fake.VOLUME['name']] - mock_get_snapshots_marked = self.mock_object( - self.zapi_client, 'get_snapshots_marked_for_deletion') - mock_get_snapshots_marked.return_value = snapshots - mock_delete_snapshot = self.mock_object( - self.zapi_client, 'delete_snapshot') - - self.driver._delete_snapshots_marked_for_deletion() - - mock_get_flexvol_names.assert_called_once_with() - mock_get_snapshots_marked.assert_called_once_with( - [fake.VOLUME['name']]) - mock_delete_snapshot.assert_called_once_with( - fake.VOLUME['name'], fake.SNAPSHOT_NAME) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py deleted file mode 100644 index 0a50a36cf..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py +++ /dev/null @@ -1,1469 +0,0 @@ -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp cmode nfs storage driver -""" - -import hashlib -import uuid - -import ddt -import mock -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_utils import units - -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake -from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as \ - fake_ssc -from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap import nfs_cmode -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp.dataontap.utils import data_motion -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume.drivers import nfs -from cinder.volume import utils as volume_utils - - -@ddt.ddt -class NetAppCmodeNfsDriverTestCase(test.TestCase): - def setUp(self): - super(NetAppCmodeNfsDriverTestCase, self).setUp() - - kwargs = { - 'configuration': self.get_config_cmode(), - 'host': 'openstack@nfscmode', - } - - with mock.patch.object(utils, 'get_root_helper', - return_value=mock.Mock()): - with mock.patch.object(remotefs_brick, 'RemoteFsClient', - return_value=mock.Mock()): - self.driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) - self.driver._mounted_shares = [fake.NFS_SHARE] - self.driver.ssc_vols = True - self.driver.vserver = fake.VSERVER_NAME - self.driver.ssc_enabled = True - self.driver.perf_library = mock.Mock() - self.driver.ssc_library = mock.Mock() - self.driver.zapi_client = mock.Mock() - - def get_config_cmode(self): - config = na_fakes.create_configuration_cmode() - config.netapp_storage_protocol = 'nfs' - config.netapp_login = 'admin' - config.netapp_password = 'pass' - config.netapp_server_hostname = '127.0.0.1' - config.netapp_transport_type = 'http' - config.netapp_server_port = '80' - config.netapp_vserver = fake.VSERVER_NAME - config.netapp_copyoffload_tool_path = 'copyoffload_tool_path' - return config - - @ddt.data({'active_backend_id': None, 'targets': ['dev1', 'dev2']}, - {'active_backend_id': None, 'targets': []}, - {'active_backend_id': 'dev1', 'targets': []}, - {'active_backend_id': 'dev1', 'targets': ['dev1', 'dev2']}) - @ddt.unpack - def test_init_driver_for_replication(self, active_backend_id, - targets): - kwargs = { - 'configuration': self.get_config_cmode(), - 'host': 'openstack@nfscmode', - 'active_backend_id': active_backend_id, - } - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=targets) - with mock.patch.object(utils, 'get_root_helper', - return_value=mock.Mock()): - with mock.patch.object(remotefs_brick, 'RemoteFsClient', - return_value=mock.Mock()): - nfs_driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) - - self.assertEqual(active_backend_id, - nfs_driver.failed_over_backend_name) - self.assertEqual(active_backend_id is not None, - nfs_driver.failed_over) - self.assertEqual(len(targets) > 0, - nfs_driver.replication_enabled) - - @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) - @mock.patch.object(client_cmode, 'Client', mock.Mock()) - @mock.patch.object(nfs.NfsDriver, 'do_setup') - @mock.patch.object(na_utils, 'check_flags') - def test_do_setup(self, mock_check_flags, mock_super_do_setup): - self.mock_object( - dot_utils, 'get_backend_configuration', - return_value=self.get_config_cmode()) - self.driver.do_setup(mock.Mock()) - - self.assertTrue(mock_check_flags.called) - self.assertTrue(mock_super_do_setup.called) - - def test__update_volume_stats(self): - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - self.mock_object(self.driver, 'get_filter_function') - self.mock_object(self.driver, 'get_goodness_function') - self.mock_object(self.driver, '_spawn_clean_cache_job') - self.driver.zapi_client = mock.Mock() - self.mock_object(self.driver, '_get_pool_stats', return_value={}) - expected_stats = { - 'driver_version': self.driver.VERSION, - 'pools': {}, - 'sparse_copy_volume': True, - 'replication_enabled': False, - 'storage_protocol': 'nfs', - 'vendor_name': 'NetApp', - 'volume_backend_name': 'NetApp_NFS_Cluster_direct', - } - - retval = self.driver._update_volume_stats() - - self.assertIsNone(retval) - self.assertTrue(self.driver._spawn_clean_cache_job.called) - self.assertEqual(1, mock_debug_log.call_count) - self.assertEqual(expected_stats, self.driver._stats) - - @ddt.data([], ['target_1', 'target_2']) - def test_get_pool_stats(self, replication_backends): - - self.driver.zapi_client = mock.Mock() - ssc = { - 'vola': { - 'pool_name': '10.10.10.10:/vola', - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_compression': 'false', - 'netapp_mirrored': 'false', - 'netapp_dedup': 'true', - 'netapp_aggregate': 'aggr1', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': 'SSD', - 'consistencygroup_support': True, - }, - } - mock_get_ssc = self.mock_object(self.driver.ssc_library, - 'get_ssc', - return_value=ssc) - mock_get_aggrs = self.mock_object(self.driver.ssc_library, - 'get_ssc_aggregates', - return_value=['aggr1']) - - self.mock_object(self.driver, 'get_replication_backend_names', - return_value=replication_backends) - - total_capacity_gb = na_utils.round_down( - fake.TOTAL_BYTES // units.Gi, '0.01') - free_capacity_gb = na_utils.round_down( - fake.AVAILABLE_BYTES // units.Gi, '0.01') - provisioned_capacity_gb = total_capacity_gb - free_capacity_gb - capacity = { - 'reserved_percentage': fake.RESERVED_PERCENTAGE, - 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'provisioned_capacity_gb': provisioned_capacity_gb, - } - self.mock_object(self.driver, - '_get_share_capacity_info', - return_value=capacity) - self.mock_object(self.driver.zapi_client, - 'get_flexvol_dedupe_used_percent', - return_value=55.0) - - aggr_capacities = { - 'aggr1': { - 'percent-used': 45, - 'size-available': 59055800320.0, - 'size-total': 107374182400.0, - }, - } - mock_get_aggr_capacities = self.mock_object( - self.driver.zapi_client, 'get_aggregate_capacities', - return_value=aggr_capacities) - - self.driver.perf_library.get_node_utilization_for_pool = ( - mock.Mock(return_value=30.0)) - - result = self.driver._get_pool_stats(filter_function='filter', - goodness_function='goodness') - - expected = [{ - 'pool_name': '10.10.10.10:/vola', - 'QoS_support': True, - 'reserved_percentage': fake.RESERVED_PERCENTAGE, - 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, - 'multiattach': False, - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'provisioned_capacity_gb': provisioned_capacity_gb, - 'netapp_dedupe_used_percent': 55.0, - 'netapp_aggregate_used_percent': 45, - 'utilization': 30.0, - 'filter_function': 'filter', - 'goodness_function': 'goodness', - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_compression': 'false', - 'netapp_mirrored': 'false', - 'netapp_dedup': 'true', - 'netapp_aggregate': 'aggr1', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': 'SSD', - 'consistencygroup_support': True, - 'replication_enabled': False, - }] - if replication_backends: - expected[0].update({ - 'replication_enabled': True, - 'replication_count': len(replication_backends), - 'replication_targets': replication_backends, - 'replication_type': 'async', - }) - - self.assertEqual(expected, result) - mock_get_ssc.assert_called_once_with() - mock_get_aggrs.assert_called_once_with() - mock_get_aggr_capacities.assert_called_once_with(['aggr1']) - - @ddt.data({}, None) - def test_get_pool_stats_no_ssc_vols(self, ssc): - - mock_get_ssc = self.mock_object(self.driver.ssc_library, - 'get_ssc', - return_value=ssc) - - pools = self.driver._get_pool_stats() - - self.assertListEqual([], pools) - mock_get_ssc.assert_called_once_with() - - def test_update_ssc(self): - - mock_ensure_shares_mounted = self.mock_object( - self.driver, '_ensure_shares_mounted') - mock_get_pool_map = self.mock_object( - self.driver, '_get_flexvol_to_pool_map', - return_value='fake_map') - mock_update_ssc = self.mock_object( - self.driver.ssc_library, 'update_ssc') - - result = self.driver._update_ssc() - - self.assertIsNone(result) - mock_ensure_shares_mounted.assert_called_once_with() - mock_get_pool_map.assert_called_once_with() - mock_update_ssc.assert_called_once_with('fake_map') - - def test_get_pool_map(self): - - self.driver.zapi_client = mock.Mock() - mock_get_operational_lif_addresses = self.mock_object( - self.driver.zapi_client, 'get_operational_lif_addresses', - return_value=[fake.SHARE_IP]) - mock_resolve_hostname = self.mock_object( - na_utils, 'resolve_hostname', return_value=fake.SHARE_IP) - mock_get_flexvol = self.mock_object( - self.driver.zapi_client, 'get_flexvol', - return_value={'name': fake.NETAPP_VOLUME}) - - result = self.driver._get_flexvol_to_pool_map() - - expected = { - fake.NETAPP_VOLUME: { - 'pool_name': fake.NFS_SHARE, - }, - } - self.assertEqual(expected, result) - mock_get_operational_lif_addresses.assert_called_once_with() - mock_resolve_hostname.assert_called_once_with(fake.SHARE_IP) - mock_get_flexvol.assert_called_once_with(flexvol_path=fake.EXPORT_PATH) - - def test_get_pool_map_address_not_found(self): - - self.driver.zapi_client = mock.Mock() - self.mock_object(self.driver.zapi_client, - 'get_operational_lif_addresses', - return_value=[]) - self.mock_object(na_utils, - 'resolve_hostname', - return_value=fake.SHARE_IP) - - result = self.driver._get_flexvol_to_pool_map() - - self.assertEqual({}, result) - - def test_get_pool_map_flexvol_not_found(self): - - self.driver.zapi_client = mock.Mock() - self.mock_object(self.driver.zapi_client, - 'get_operational_lif_addresses', - return_value=[fake.SHARE_IP]) - self.mock_object(na_utils, - 'resolve_hostname', - return_value=fake.SHARE_IP) - side_effect = exception.VolumeBackendAPIException(data='fake_data') - self.mock_object(self.driver.zapi_client, - 'get_flexvol', - side_effect=side_effect) - - result = self.driver._get_flexvol_to_pool_map() - - self.assertEqual({}, result) - - @ddt.data(['/mnt/img-id1', '/mnt/img-id2'], []) - def test__shortlist_del_eligible_files(self, old_files): - self.driver.zapi_client = mock.Mock() - self.driver.zapi_client.get_file_usage = mock.Mock(return_value='1000') - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - self.mock_object(self.driver, '_get_vserver_and_exp_vol', - return_value=('openstack', 'fake_share')) - expected_list = [(o, '1000') for o in old_files] - - observed_list = self.driver._shortlist_del_eligible_files( - 'fake_ip:fake_share', old_files) - - self.assertEqual(expected_list, observed_list) - self.assertEqual(1, mock_debug_log.call_count) - - @ddt.data({'ip': None, 'shares': None}, - {'ip': 'fake_ip', 'shares': ['fip:/fsh1']}) - @ddt.unpack - def test__share_match_for_ip_no_match(self, ip, shares): - def side_effect(arg): - if arg == 'fake_ip': - return 'openstack' - return None - - self.mock_object(self.driver, '_get_vserver_for_ip', - side_effect=side_effect) - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - - retval = self.driver._share_match_for_ip(ip, shares) - - self.assertIsNone(retval) - self.assertEqual(1, mock_debug_log.call_count) - - def test__share_match_for_ip(self): - shares = ['fip:/fsh1'] - self.mock_object(self.driver, '_get_vserver_for_ip', - return_value='openstack') - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - - retval = self.driver._share_match_for_ip('fip', shares) - - self.assertEqual('fip:/fsh1', retval) - self.assertEqual(1, mock_debug_log.call_count) - - def test__get_vserver_for_ip_ignores_zapi_exception(self): - self.driver.zapi_client = mock.Mock() - self.driver.zapi_client.get_if_info_by_ip = mock.Mock( - side_effect=exception.NotFound) - - vserver = self.driver._get_vserver_for_ip('FAKE_IP') - - self.assertIsNone(vserver) - - def test__get_vserver_for_ip(self): - self.driver.zapi_client = mock.Mock() - self.driver.zapi_client.get_if_info_by_ip = mock.Mock( - return_value=fake.get_fake_ifs()) - - vserver = self.driver._get_vserver_for_ip('FAKE_IP') - - self.assertIsNone(vserver) - - def test_check_for_setup_error(self): - super_check_for_setup_error = self.mock_object( - nfs_base.NetAppNfsDriver, 'check_for_setup_error') - mock_check_api_permissions = self.mock_object( - self.driver.ssc_library, 'check_api_permissions') - mock_add_looping_tasks = self.mock_object( - self.driver, '_add_looping_tasks') - - self.driver.check_for_setup_error() - - self.assertEqual(1, super_check_for_setup_error.call_count) - mock_check_api_permissions.assert_called_once_with() - self.assertEqual(1, mock_add_looping_tasks.call_count) - mock_add_looping_tasks.assert_called_once_with() - - @ddt.data({'replication_enabled': True, 'failed_over': False}, - {'replication_enabled': True, 'failed_over': True}, - {'replication_enabled': False, 'failed_over': False}) - @ddt.unpack - def test_handle_housekeeping_tasks(self, replication_enabled, failed_over): - ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, - 'ensure_snapmirrors') - self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_ssc.SSC.keys()) - self.driver.replication_enabled = replication_enabled - self.driver.failed_over = failed_over - - self.driver._handle_housekeeping_tasks() - - (self.driver.zapi_client.remove_unused_qos_policy_groups. - assert_called_once_with()) - if replication_enabled and not failed_over: - ensure_mirrors.assert_called_once_with( - self.driver.configuration, self.driver.backend_name, - fake_ssc.SSC.keys()) - else: - self.assertFalse(ensure_mirrors.called) - - def test_handle_ems_logging(self): - - volume_list = ['vol0', 'vol1', 'vol2'] - self.mock_object( - self.driver, '_get_backing_flexvol_names', - return_value=volume_list) - self.mock_object( - dot_utils, 'build_ems_log_message_0', - return_value='fake_base_ems_log_message') - self.mock_object( - dot_utils, 'build_ems_log_message_1', - return_value='fake_pool_ems_log_message') - mock_send_ems_log_message = self.mock_object( - self.driver.zapi_client, 'send_ems_log_message') - - self.driver._handle_ems_logging() - - mock_send_ems_log_message.assert_has_calls([ - mock.call('fake_base_ems_log_message'), - mock.call('fake_pool_ems_log_message'), - ]) - dot_utils.build_ems_log_message_0.assert_called_once_with( - self.driver.driver_name, self.driver.app_version, - self.driver.driver_mode) - dot_utils.build_ems_log_message_1.assert_called_once_with( - self.driver.driver_name, self.driver.app_version, - self.driver.vserver, volume_list, []) - - def test_delete_volume(self): - fake_provider_location = 'fake_provider_location' - fake_volume = {'provider_location': fake_provider_location} - self.mock_object(self.driver, '_delete_backing_file_for_volume') - self.mock_object(na_utils, - 'get_valid_qos_policy_group_info', - return_value='fake_qos_policy_group_info') - - self.driver.delete_volume(fake_volume) - - self.driver._delete_backing_file_for_volume.assert_called_once_with( - fake_volume) - na_utils.get_valid_qos_policy_group_info.assert_called_once_with( - fake_volume) - (self.driver.zapi_client.mark_qos_policy_group_for_deletion. - assert_called_once_with('fake_qos_policy_group_info')) - - def test_delete_volume_exception_path(self): - fake_provider_location = 'fake_provider_location' - fake_volume = {'provider_location': fake_provider_location} - self.mock_object(self.driver, '_delete_backing_file_for_volume') - self.mock_object(na_utils, - 'get_valid_qos_policy_group_info', - return_value='fake_qos_policy_group_info') - self.mock_object( - self.driver.zapi_client, - 'mark_qos_policy_group_for_deletion', - side_effect=exception.NetAppDriverException) - - self.driver.delete_volume(fake_volume) - - self.driver._delete_backing_file_for_volume.assert_called_once_with( - fake_volume) - na_utils.get_valid_qos_policy_group_info.assert_called_once_with( - fake_volume) - (self.driver.zapi_client.mark_qos_policy_group_for_deletion. - assert_called_once_with('fake_qos_policy_group_info')) - - def test_delete_backing_file_for_volume(self): - mock_filer_delete = self.mock_object(self.driver, '_delete_file') - mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, - 'delete_volume') - - self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) - - mock_filer_delete.assert_called_once_with( - fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) - self.assertEqual(0, mock_super_delete.call_count) - - @ddt.data(True, False) - def test_delete_backing_file_for_volume_exception_path(self, super_exc): - mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') - exception_call_count = 2 if super_exc else 1 - mock_filer_delete = self.mock_object(self.driver, '_delete_file') - mock_filer_delete.side_effect = [Exception] - mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, - 'delete_volume') - if super_exc: - mock_super_delete.side_effect = [Exception] - - self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) - - mock_filer_delete.assert_called_once_with( - fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) - mock_super_delete.assert_called_once_with(fake.NFS_VOLUME) - self.assertEqual(exception_call_count, mock_exception_log.call_count) - - def test_delete_snapshot(self): - mock_get_location = self.mock_object(self.driver, - '_get_provider_location') - mock_get_location.return_value = fake.PROVIDER_LOCATION - mock_delete_backing = self.mock_object( - self.driver, '_delete_backing_file_for_snapshot') - - self.driver.delete_snapshot(fake.test_snapshot) - - mock_delete_backing.assert_called_once_with(fake.test_snapshot) - - def test_delete_backing_file_for_snapshot(self): - mock_filer_delete = self.mock_object(self.driver, '_delete_file') - mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, - 'delete_snapshot') - - self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) - - mock_filer_delete.assert_called_once_with( - fake.test_snapshot['volume_id'], fake.test_snapshot['name']) - self.assertEqual(0, mock_super_delete.call_count) - - @ddt.data(True, False) - def test_delete_backing_file_for_snapshot_exception_path(self, super_exc): - mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') - exception_call_count = 2 if super_exc else 1 - mock_filer_delete = self.mock_object(self.driver, '_delete_file') - mock_filer_delete.side_effect = [Exception] - mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, - 'delete_snapshot') - if super_exc: - mock_super_delete.side_effect = [Exception] - - self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) - - mock_filer_delete.assert_called_once_with( - fake.test_snapshot['volume_id'], fake.test_snapshot['name']) - mock_super_delete.assert_called_once_with(fake.test_snapshot) - self.assertEqual(exception_call_count, mock_exception_log.call_count) - - def test_delete_file(self): - mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') - mock_get_vs_ip.return_value = (fake.VSERVER_NAME, '/%s' % fake.FLEXVOL) - mock_zapi_delete = self.driver.zapi_client.delete_file - - self.driver._delete_file( - fake.test_snapshot['volume_id'], fake.test_snapshot['name']) - - mock_zapi_delete.assert_called_once_with( - '/vol/%s/%s' % (fake.FLEXVOL, fake.test_snapshot['name'])) - - def test_do_qos_for_volume_no_exception(self): - - mock_get_info = self.mock_object(na_utils, - 'get_valid_qos_policy_group_info') - mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO - mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group - mock_set_policy = self.mock_object(self.driver, - '_set_qos_policy_group_on_volume') - mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - mock_cleanup = self.mock_object(self.driver, - '_cleanup_volume_on_failure') - - self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS) - - mock_get_info.assert_has_calls([ - mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) - mock_provision_qos.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_INFO)]) - mock_set_policy.assert_has_calls([ - mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)]) - self.assertEqual(0, mock_error_log.call_count) - self.assertEqual(0, mock_debug_log.call_count) - self.assertEqual(0, mock_cleanup.call_count) - - def test_do_qos_for_volume_exception_w_cleanup(self): - mock_get_info = self.mock_object(na_utils, - 'get_valid_qos_policy_group_info') - mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO - mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group - mock_set_policy = self.mock_object(self.driver, - '_set_qos_policy_group_on_volume') - mock_set_policy.side_effect = netapp_api.NaApiError - mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - mock_cleanup = self.mock_object(self.driver, - '_cleanup_volume_on_failure') - - self.assertRaises(netapp_api.NaApiError, - self.driver._do_qos_for_volume, - fake.NFS_VOLUME, - fake.EXTRA_SPECS) - - mock_get_info.assert_has_calls([ - mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) - mock_provision_qos.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_INFO)]) - mock_set_policy.assert_has_calls([ - mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)]) - self.assertEqual(1, mock_error_log.call_count) - self.assertEqual(1, mock_debug_log.call_count) - mock_cleanup.assert_has_calls([ - mock.call(fake.NFS_VOLUME)]) - - def test_do_qos_for_volume_exception_no_cleanup(self): - - mock_get_info = self.mock_object(na_utils, - 'get_valid_qos_policy_group_info') - mock_get_info.side_effect = exception.Invalid - mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group - mock_set_policy = self.mock_object(self.driver, - '_set_qos_policy_group_on_volume') - mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') - mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') - mock_cleanup = self.mock_object(self.driver, - '_cleanup_volume_on_failure') - - self.assertRaises(exception.Invalid, self.driver._do_qos_for_volume, - fake.NFS_VOLUME, fake.EXTRA_SPECS, cleanup=False) - - mock_get_info.assert_has_calls([ - mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) - self.assertEqual(0, mock_provision_qos.call_count) - self.assertEqual(0, mock_set_policy.call_count) - self.assertEqual(1, mock_error_log.call_count) - self.assertEqual(0, mock_debug_log.call_count) - self.assertEqual(0, mock_cleanup.call_count) - - def test_set_qos_policy_group_on_volume(self): - - mock_get_name_from_info = self.mock_object( - na_utils, 'get_qos_policy_group_name_from_info') - mock_get_name_from_info.return_value = fake.QOS_POLICY_GROUP_NAME - - mock_extract_host = self.mock_object(volume_utils, 'extract_host') - mock_extract_host.return_value = fake.NFS_SHARE - - mock_get_flex_vol_name =\ - self.driver.zapi_client.get_vol_by_junc_vserver - mock_get_flex_vol_name.return_value = fake.FLEXVOL - - mock_file_assign_qos = self.driver.zapi_client.file_assign_qos - - self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, - fake.QOS_POLICY_GROUP_INFO) - - mock_get_name_from_info.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_INFO)]) - mock_extract_host.assert_has_calls([ - mock.call(fake.NFS_HOST_STRING, level='pool')]) - mock_get_flex_vol_name.assert_has_calls([ - mock.call(fake.VSERVER_NAME, fake.EXPORT_PATH)]) - mock_file_assign_qos.assert_has_calls([ - mock.call(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, - fake.NFS_VOLUME['name'])]) - - def test_set_qos_policy_group_on_volume_no_info(self): - - mock_get_name_from_info = self.mock_object( - na_utils, 'get_qos_policy_group_name_from_info') - - mock_extract_host = self.mock_object(volume_utils, 'extract_host') - - mock_get_flex_vol_name =\ - self.driver.zapi_client.get_vol_by_junc_vserver - - mock_file_assign_qos = self.driver.zapi_client.file_assign_qos - - self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, - None) - - self.assertEqual(0, mock_get_name_from_info.call_count) - self.assertEqual(0, mock_extract_host.call_count) - self.assertEqual(0, mock_get_flex_vol_name.call_count) - self.assertEqual(0, mock_file_assign_qos.call_count) - - def test_set_qos_policy_group_on_volume_no_name(self): - - mock_get_name_from_info = self.mock_object( - na_utils, 'get_qos_policy_group_name_from_info') - mock_get_name_from_info.return_value = None - - mock_extract_host = self.mock_object(volume_utils, 'extract_host') - - mock_get_flex_vol_name =\ - self.driver.zapi_client.get_vol_by_junc_vserver - - mock_file_assign_qos = self.driver.zapi_client.file_assign_qos - - self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, - fake.QOS_POLICY_GROUP_INFO) - - mock_get_name_from_info.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_INFO)]) - self.assertEqual(0, mock_extract_host.call_count) - self.assertEqual(0, mock_get_flex_vol_name.call_count) - self.assertEqual(0, mock_file_assign_qos.call_count) - - @ddt.data({'share': None, 'is_snapshot': False}, - {'share': None, 'is_snapshot': True}, - {'share': 'fake_share', 'is_snapshot': False}, - {'share': 'fake_share', 'is_snapshot': True}) - @ddt.unpack - def test_clone_backing_file_for_volume(self, share, is_snapshot): - - mock_get_vserver_and_exp_vol = self.mock_object( - self.driver, '_get_vserver_and_exp_vol', - return_value=(fake.VSERVER_NAME, fake.FLEXVOL)) - - self.driver._clone_backing_file_for_volume( - fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share, - is_snapshot=is_snapshot) - - mock_get_vserver_and_exp_vol.assert_called_once_with( - fake.VOLUME_ID, share) - self.driver.zapi_client.clone_file.assert_called_once_with( - fake.FLEXVOL, fake.FLEXVOL, 'fake_clone', fake.VSERVER_NAME, - is_snapshot=is_snapshot) - - def test__clone_backing_file_for_volume(self): - body = fake.get_fake_net_interface_get_iter_response() - self.driver.zapi_client.get_if_info_by_ip = mock.Mock( - return_value=[netapp_api.NaElement(body)]) - self.driver.zapi_client.get_vol_by_junc_vserver = mock.Mock( - return_value='nfsvol') - self.mock_object(self.driver, '_get_export_ip_path', - return_value=('127.0.0.1', 'fakepath')) - - retval = self.driver._clone_backing_file_for_volume( - 'vol', 'clone', 'vol_id', share='share', is_snapshot=True) - - self.assertIsNone(retval) - self.driver.zapi_client.clone_file.assert_called_once_with( - 'nfsvol', 'vol', 'clone', None, is_snapshot=True) - - def test__copy_from_img_service_copyoffload_nonexistent_binary_path(self): - self.mock_object(nfs_cmode.LOG, 'debug') - drv = self.driver - context = object() - volume = {'id': 'vol_id', 'name': 'name'} - image_service = mock.Mock() - image_service.get_location.return_value = (mock.Mock(), mock.Mock()) - image_service.show.return_value = {'size': 0} - image_id = 'image_id' - drv._client = mock.Mock() - drv._client.get_api_version = mock.Mock(return_value=(1, 20)) - drv._find_image_in_cache = mock.Mock(return_value=[]) - drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"]) - drv._check_get_nfs_path_segs = mock.Mock( - return_value=("test:test", "dr")) - drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.128.1.1") - drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') - drv._get_host_ip = mock.Mock() - drv._get_provider_location = mock.Mock() - drv._get_export_path = mock.Mock(return_value="dr") - drv._check_share_can_hold_size = mock.Mock() - # Raise error as if the copyoffload file can not be found - drv._clone_file_dst_exists = mock.Mock(side_effect=OSError()) - drv._discover_file_till_timeout = mock.Mock() - - # Verify the original error is propagated - self.assertRaises(OSError, drv._copy_from_img_service, - context, volume, image_service, image_id) - - drv._discover_file_till_timeout.assert_not_called() - - @mock.patch.object(image_utils, 'qemu_img_info') - def test__copy_from_img_service_raw_copyoffload_workflow_success( - self, mock_qemu_img_info): - drv = self.driver - volume = {'id': 'vol_id', 'name': 'name', 'size': 1} - image_id = 'image_id' - context = object() - image_service = mock.Mock() - image_service.get_location.return_value = ('nfs://ip1/openstack/img', - None) - image_service.show.return_value = {'size': 1, 'disk_format': 'raw'} - - drv._check_get_nfs_path_segs =\ - mock.Mock(return_value=('ip1', '/openstack')) - drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') - drv._get_host_ip = mock.Mock(return_value='ip2') - drv._get_export_path = mock.Mock(return_value='/exp_path') - drv._get_provider_location = mock.Mock(return_value='share') - drv._execute = mock.Mock() - drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') - drv._discover_file_till_timeout = mock.Mock(return_value=True) - img_inf = mock.Mock() - img_inf.file_format = 'raw' - mock_qemu_img_info.return_value = img_inf - drv._check_share_can_hold_size = mock.Mock() - drv._move_nfs_file = mock.Mock(return_value=True) - drv._delete_file_at_path = mock.Mock() - drv._clone_file_dst_exists = mock.Mock() - drv._post_clone_image = mock.Mock() - - retval = drv._copy_from_img_service( - context, volume, image_service, image_id) - - self.assertIsNone(retval) - drv._get_ip_verify_on_cluster.assert_any_call('ip1') - drv._get_export_path.assert_called_with('vol_id') - drv._check_share_can_hold_size.assert_called_with('share', 1) - drv._post_clone_image.assert_called_with(volume) - self.assertEqual(1, drv._execute.call_count) - - @mock.patch.object(image_utils, 'convert_image') - @mock.patch.object(image_utils, 'qemu_img_info') - @mock.patch('os.path.exists') - def test__copy_from_img_service_qcow2_copyoffload_workflow_success( - self, mock_exists, mock_qemu_img_info, mock_cvrt_image): - drv = self.driver - cinder_mount_point_base = '/opt/stack/data/cinder/mnt/' - # To get the cinder mount point directory, we use: - mount_dir = hashlib.md5( - '203.0.113.122:/cinder-flexvol1'.encode('utf-8')).hexdigest() - cinder_mount_point = cinder_mount_point_base + mount_dir - destination_copied_file = ( - '/cinder-flexvol1/a155308c-0290-497b-b278-4cdd01de0253' - ) - volume = {'id': 'vol_id', 'name': 'name', 'size': 1} - image_id = 'image_id' - context = object() - image_service = mock.Mock() - image_service.get_location.return_value = ( - 'nfs://203.0.113.122/glance-flexvol1', None) - image_service.show.return_value = {'size': 1, - 'disk_format': 'qcow2'} - drv._check_get_nfs_path_segs = ( - mock.Mock(return_value=('203.0.113.122', '/openstack')) - ) - - drv._get_ip_verify_on_cluster = mock.Mock(return_value='203.0.113.122') - drv._get_host_ip = mock.Mock(return_value='203.0.113.122') - drv._get_export_path = mock.Mock( - return_value='/cinder-flexvol1') - drv._get_provider_location = mock.Mock(return_value='share') - drv._execute = mock.Mock() - drv._execute_as_root = False - drv._get_mount_point_for_share = mock.Mock( - return_value=cinder_mount_point) - img_inf = mock.Mock() - img_inf.file_format = 'raw' - mock_qemu_img_info.return_value = img_inf - drv._check_share_can_hold_size = mock.Mock() - - drv._move_nfs_file = mock.Mock(return_value=True) - drv._delete_file_at_path = mock.Mock() - drv._clone_file_dst_exists = mock.Mock() - drv._post_clone_image = mock.Mock() - self.mock_object(uuid, 'uuid4', mock.Mock( - return_value='a155308c-0290-497b-b278-4cdd01de0253')) - - retval = drv._copy_from_img_service( - context, volume, image_service, image_id) - - self.assertIsNone(retval) - drv._get_ip_verify_on_cluster.assert_any_call('203.0.113.122') - drv._get_export_path.assert_called_with('vol_id') - drv._check_share_can_hold_size.assert_called_with('share', 1) - drv._post_clone_image.assert_called_with(volume) - self.assertEqual(1, mock_cvrt_image.call_count) - - # _execute must be called once for copy-offload and again to touch - # the top directory to refresh cache - drv._execute.assert_has_calls( - [ - mock.call( - 'copyoffload_tool_path', '203.0.113.122', - '203.0.113.122', '/openstack/glance-flexvol1', - destination_copied_file, run_as_root=False, - check_exit_code=0 - ), - mock.call('touch', cinder_mount_point, run_as_root=False) - ] - ) - self.assertEqual(2, drv._execute.call_count) - self.assertEqual(2, drv._delete_file_at_path.call_count) - self.assertEqual(1, drv._clone_file_dst_exists.call_count) - - def test__copy_from_cache_copyoffload_success(self): - drv = self.driver - volume = {'id': 'vol_id', 'name': 'name', 'size': 1} - image_id = 'image_id' - cache_result = [('ip1:/openstack', 'img-cache-imgid')] - drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1') - drv._get_host_ip = mock.Mock(return_value='ip2') - drv._get_export_path = mock.Mock(return_value='/exp_path') - drv._execute = mock.Mock() - drv._register_image_in_cache = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='/share') - drv._post_clone_image = mock.Mock() - - copied = drv._copy_from_cache(volume, image_id, cache_result) - - self.assertTrue(copied) - drv._get_ip_verify_on_cluster.assert_any_call('ip1') - drv._get_export_path.assert_called_with('vol_id') - drv._execute.assert_called_once_with( - 'copyoffload_tool_path', 'ip1', 'ip1', - '/openstack/img-cache-imgid', '/exp_path/name', - run_as_root=False, check_exit_code=0) - drv._post_clone_image.assert_called_with(volume) - drv._get_provider_location.assert_called_with('vol_id') - - def test_unmanage(self): - mock_get_info = self.mock_object(na_utils, - 'get_valid_qos_policy_group_info') - mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO - - mock_mark_for_deletion =\ - self.driver.zapi_client.mark_qos_policy_group_for_deletion - - super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') - - self.driver.unmanage(fake.NFS_VOLUME) - - mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) - mock_mark_for_deletion.assert_has_calls([ - mock.call(fake.QOS_POLICY_GROUP_INFO)]) - super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) - - def test_unmanage_invalid_qos(self): - mock_get_info = self.mock_object(na_utils, - 'get_valid_qos_policy_group_info') - mock_get_info.side_effect = exception.Invalid - - super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') - - self.driver.unmanage(fake.NFS_VOLUME) - - mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) - super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) - - def test_add_looping_tasks(self): - mock_update_ssc = self.mock_object(self.driver, '_update_ssc') - mock_remove_unused_qos_policy_groups = self.mock_object( - self.driver.zapi_client, 'remove_unused_qos_policy_groups') - mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task') - mock_super_add_looping_tasks = self.mock_object( - nfs_base.NetAppNfsDriver, '_add_looping_tasks') - - self.driver._add_looping_tasks() - - mock_update_ssc.assert_called_once_with() - mock_add_task.assert_has_calls([ - mock.call(mock_update_ssc, - loopingcalls.ONE_HOUR, - loopingcalls.ONE_HOUR), - mock.call(mock_remove_unused_qos_policy_groups, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE)]) - mock_super_add_looping_tasks.assert_called_once_with() - - @ddt.data({'has_space': True, 'type_match': True, 'expected': True}, - {'has_space': True, 'type_match': False, 'expected': False}, - {'has_space': False, 'type_match': True, 'expected': False}, - {'has_space': False, 'type_match': False, 'expected': False}) - @ddt.unpack - def test_is_share_clone_compatible(self, has_space, type_match, expected): - - mock_get_flexvol_name_for_share = self.mock_object( - self.driver, '_get_flexvol_name_for_share', - return_value='fake_flexvol') - mock_is_volume_thin_provisioned = self.mock_object( - self.driver, '_is_volume_thin_provisioned', return_value='thin') - mock_share_has_space_for_clone = self.mock_object( - self.driver, '_share_has_space_for_clone', return_value=has_space) - mock_is_share_vol_type_match = self.mock_object( - self.driver, '_is_share_vol_type_match', return_value=type_match) - - result = self.driver._is_share_clone_compatible(fake.VOLUME, - fake.NFS_SHARE) - - self.assertEqual(expected, result) - mock_get_flexvol_name_for_share.assert_called_once_with(fake.NFS_SHARE) - mock_is_volume_thin_provisioned.assert_called_once_with('fake_flexvol') - mock_share_has_space_for_clone.assert_called_once_with( - fake.NFS_SHARE, fake.SIZE, 'thin') - if has_space: - mock_is_share_vol_type_match.assert_called_once_with( - fake.VOLUME, fake.NFS_SHARE, 'fake_flexvol') - - @ddt.data({'thin': True, 'expected': True}, - {'thin': False, 'expected': False}, - {'thin': None, 'expected': False}) - @ddt.unpack - def test_is_volume_thin_provisioned(self, thin, expected): - - ssc_data = {'thin_provisioning_support': thin} - mock_get_ssc_for_flexvol = self.mock_object( - self.driver.ssc_library, 'get_ssc_for_flexvol', - return_value=ssc_data) - - result = self.driver._is_volume_thin_provisioned('fake_flexvol') - - self.assertEqual(expected, result) - mock_get_ssc_for_flexvol.assert_called_once_with('fake_flexvol') - - @ddt.data({'flexvols': ['volume1', 'volume2'], 'expected': True}, - {'flexvols': ['volume3', 'volume4'], 'expected': False}, - {'flexvols': [], 'expected': False}) - @ddt.unpack - def test_is_share_vol_type_match(self, flexvols, expected): - - mock_get_volume_extra_specs = self.mock_object( - na_utils, 'get_volume_extra_specs', - return_value='fake_extra_specs') - mock_get_matching_flexvols_for_extra_specs = self.mock_object( - self.driver.ssc_library, 'get_matching_flexvols_for_extra_specs', - return_value=flexvols) - - result = self.driver._is_share_vol_type_match(fake.VOLUME, - fake.NFS_SHARE, - 'volume1') - - self.assertEqual(expected, result) - mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) - mock_get_matching_flexvols_for_extra_specs.assert_called_once_with( - 'fake_extra_specs') - - @ddt.data({'share': 'volume1', 'expected': 'volume1'}, - {'share': 'volume3', 'expected': None}) - @ddt.unpack - def test_get_flexvol_name_for_share(self, share, expected): - - mock_get_ssc = self.mock_object( - self.driver.ssc_library, 'get_ssc', return_value=fake_ssc.SSC) - - result = self.driver._get_flexvol_name_for_share(share) - - self.assertEqual(expected, result) - mock_get_ssc.assert_called_once_with() - - def test_get_flexvol_name_for_share_no_ssc_vols(self): - - mock_get_ssc = self.mock_object( - self.driver.ssc_library, 'get_ssc', return_value={}) - - result = self.driver._get_flexvol_name_for_share('fake_share') - - self.assertIsNone(result) - mock_get_ssc.assert_called_once_with() - - def test_find_image_location_with_local_copy(self): - local_share = '/share' - cache_result = [ - ('ip1:/openstack', 'img-cache-imgid'), - ('ip2:/openstack', 'img-cache-imgid'), - (local_share, 'img-cache-imgid'), - ('ip3:/openstack', 'img-cache-imgid'), - ] - self.driver._get_provider_location = mock.Mock( - return_value=local_share) - - cache_copy, found_local_copy = self.driver._find_image_location( - cache_result, fake.VOLUME_ID) - - self.assertEqual(cache_result[2], cache_copy) - self.assertTrue(found_local_copy) - self.driver._get_provider_location.assert_called_once_with( - fake.VOLUME_ID) - - def test_find_image_location_with_remote_copy(self): - cache_result = [('ip1:/openstack', 'img-cache-imgid')] - self.driver._get_provider_location = mock.Mock(return_value='/share') - - cache_copy, found_local_copy = self.driver._find_image_location( - cache_result, fake.VOLUME_ID) - - self.assertEqual(cache_result[0], cache_copy) - self.assertFalse(found_local_copy) - self.driver._get_provider_location.assert_called_once_with( - fake.VOLUME_ID) - - def test_find_image_location_without_cache_copy(self): - cache_result = [] - self.driver._get_provider_location = mock.Mock(return_value='/share') - - cache_copy, found_local_copy = self.driver._find_image_location( - cache_result, fake.VOLUME_ID) - - self.assertIsNone(cache_copy) - self.assertFalse(found_local_copy) - self.driver._get_provider_location.assert_called_once_with( - fake.VOLUME_ID) - - def test_clone_file_dest_exists(self): - self.driver._get_vserver_and_exp_vol = mock.Mock( - return_value=(fake.VSERVER_NAME, fake.EXPORT_PATH)) - self.driver.zapi_client.clone_file = mock.Mock() - - self.driver._clone_file_dst_exists( - fake.NFS_SHARE, fake.IMAGE_FILE_ID, fake.VOLUME['name'], - dest_exists=True) - - self.driver._get_vserver_and_exp_vol.assert_called_once_with( - share=fake.NFS_SHARE) - self.driver.zapi_client.clone_file.assert_called_once_with( - fake.EXPORT_PATH, fake.IMAGE_FILE_ID, fake.VOLUME['name'], - fake.VSERVER_NAME, dest_exists=True) - - def test_get_source_ip_and_path(self): - self.driver._get_ip_verify_on_cluster = mock.Mock( - return_value=fake.SHARE_IP) - - src_ip, src_path = self.driver._get_source_ip_and_path( - fake.NFS_SHARE, fake.IMAGE_FILE_ID) - - self.assertEqual(fake.SHARE_IP, src_ip) - assert_path = fake.EXPORT_PATH + '/' + fake.IMAGE_FILE_ID - self.assertEqual(assert_path, src_path) - self.driver._get_ip_verify_on_cluster.assert_called_once_with( - fake.SHARE_IP) - - def test_get_destination_ip_and_path(self): - self.driver._get_ip_verify_on_cluster = mock.Mock( - return_value=fake.SHARE_IP) - self.driver._get_host_ip = mock.Mock(return_value='host.ip') - self.driver._get_export_path = mock.Mock(return_value=fake.EXPORT_PATH) - - dest_ip, dest_path = self.driver._get_destination_ip_and_path( - fake.VOLUME) - - self.assertEqual(fake.SHARE_IP, dest_ip) - assert_path = fake.EXPORT_PATH + '/' + fake.LUN_NAME - self.assertEqual(assert_path, dest_path) - self.driver._get_ip_verify_on_cluster.assert_called_once_with( - 'host.ip') - self.driver._get_host_ip.assert_called_once_with(fake.VOLUME_ID) - self.driver._get_export_path.assert_called_once_with(fake.VOLUME_ID) - - def test_copy_image_to_volume_copyoffload_non_cached_ssc_update(self): - mock_log = self.mock_object(nfs_cmode, 'LOG') - drv = self.driver - context = object() - volume = {'id': 'vol_id', 'name': 'name'} - image_service = object() - image_id = 'image_id' - drv.zapi_client = mock.Mock() - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - drv._copy_from_img_service = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value='vol') - - retval = drv.copy_image_to_volume( - context, volume, image_service, image_id) - - self.assertIsNone(retval) - drv._copy_from_img_service.assert_called_once_with( - context, volume, image_service, image_id) - self.assertEqual(1, mock_log.debug.call_count) - self.assertEqual(1, mock_log.info.call_count) - - def test_copy_image_to_volume_copyoffload_from_cache_success(self): - mock_info_log = self.mock_object(nfs_cmode.LOG, 'info') - drv = self.driver - context = object() - volume = {'id': 'vol_id', 'name': 'name'} - image_service = object() - image_id = 'image_id' - drv.zapi_client = mock.Mock() - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value='vol') - drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')]) - drv._copy_from_cache = mock.Mock(return_value=True) - - drv.copy_image_to_volume(context, volume, image_service, image_id) - - drv._copy_from_cache.assert_called_once_with( - volume, image_id, [('share', 'img')]) - self.assertEqual(1, mock_info_log.call_count) - - def test_copy_image_to_volume_copyoffload_from_img_service(self): - drv = self.driver - context = object() - volume = {'id': 'vol_id', 'name': 'name'} - image_service = object() - image_id = 'image_id' - drv.zapi_client = mock.Mock() - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value='vol') - drv._find_image_in_cache = mock.Mock(return_value=False) - drv._copy_from_img_service = mock.Mock() - - retval = drv.copy_image_to_volume( - context, volume, image_service, image_id) - - self.assertIsNone(retval) - drv._copy_from_img_service.assert_called_once_with( - context, volume, image_service, image_id) - - def test_copy_image_to_volume_copyoffload_failure(self): - mock_log = self.mock_object(nfs_cmode, 'LOG') - drv = self.driver - context = object() - volume = {'id': 'vol_id', 'name': 'name'} - image_service = object() - image_id = 'image_id' - drv.zapi_client = mock.Mock() - drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) - drv._copy_from_img_service = mock.Mock(side_effect=Exception()) - nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock() - drv._get_provider_location = mock.Mock(return_value='share') - drv._get_vol_for_share = mock.Mock(return_value='vol') - - retval = drv.copy_image_to_volume( - context, volume, image_service, image_id) - - self.assertIsNone(retval) - drv._copy_from_img_service.assert_called_once_with( - context, volume, image_service, image_id) - nfs_base.NetAppNfsDriver.copy_image_to_volume. \ - assert_called_once_with(context, volume, image_service, image_id) - mock_log.info.assert_not_called() - self.assertEqual(1, mock_log.exception.call_count) - - def test_copy_from_remote_cache(self): - source_ip = '192.0.1.1' - source_path = '/openstack/img-cache-imgid' - cache_copy = ('192.0.1.1:/openstack', fake.IMAGE_FILE_ID) - dest_path = fake.EXPORT_PATH + '/' + fake.VOLUME['name'] - self.driver._execute = mock.Mock() - self.driver._get_source_ip_and_path = mock.Mock( - return_value=(source_ip, source_path)) - self.driver._get_destination_ip_and_path = mock.Mock( - return_value=(fake.SHARE_IP, dest_path)) - self.driver._register_image_in_cache = mock.Mock() - - self.driver._copy_from_remote_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_copy) - - self.driver._execute.assert_called_once_with( - 'copyoffload_tool_path', source_ip, fake.SHARE_IP, - source_path, dest_path, run_as_root=False, check_exit_code=0) - self.driver._get_source_ip_and_path.assert_called_once_with( - cache_copy[0], fake.IMAGE_FILE_ID) - self.driver._get_destination_ip_and_path.assert_called_once_with( - fake.VOLUME) - self.driver._register_image_in_cache.assert_called_once_with( - fake.VOLUME, fake.IMAGE_FILE_ID) - - def test_copy_from_cache_workflow_remote_location(self): - cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID), - ('ip2:/openstack', fake.IMAGE_FILE_ID), - ('ip3:/openstack', fake.IMAGE_FILE_ID)] - self.driver._find_image_location = mock.Mock(return_value=[ - cache_result[0], False]) - self.driver._copy_from_remote_cache = mock.Mock() - self.driver._post_clone_image = mock.Mock() - - copied = self.driver._copy_from_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) - - self.assertTrue(copied) - self.driver._copy_from_remote_cache.assert_called_once_with( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0]) - self.driver._post_clone_image.assert_called_once_with(fake.VOLUME) - - def test_copy_from_cache_workflow_remote_location_no_copyoffload(self): - cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID), - ('ip2:/openstack', fake.IMAGE_FILE_ID), - ('ip3:/openstack', fake.IMAGE_FILE_ID)] - self.driver._find_image_location = mock.Mock(return_value=[ - cache_result[0], False]) - self.driver._copy_from_remote_cache = mock.Mock() - self.driver._post_clone_image = mock.Mock() - self.driver.configuration.netapp_copyoffload_tool_path = None - - copied = self.driver._copy_from_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) - - self.assertFalse(copied) - self.driver._copy_from_remote_cache.assert_not_called() - - def test_copy_from_cache_workflow_local_location(self): - local_share = '/share' - cache_result = [ - ('ip1:/openstack', 'img-cache-imgid'), - ('ip2:/openstack', 'img-cache-imgid'), - (local_share, 'img-cache-imgid'), - ('ip3:/openstack', 'img-cache-imgid'), - ] - self.driver._find_image_location = mock.Mock(return_value=[ - cache_result[2], True]) - self.driver._clone_file_dst_exists = mock.Mock() - self.driver._post_clone_image = mock.Mock() - - copied = self.driver._copy_from_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) - - self.assertTrue(copied) - self.driver._clone_file_dst_exists.assert_called_once_with( - local_share, fake.IMAGE_FILE_ID, fake.VOLUME['name'], - dest_exists=True) - self.driver._post_clone_image.assert_called_once_with(fake.VOLUME) - - def test_copy_from_cache_workflow_no_location(self): - cache_result = [] - self.driver._find_image_location = mock.Mock( - return_value=(None, False)) - - copied = self.driver._copy_from_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) - - self.assertFalse(copied) - - def test_copy_from_cache_workflow_exception(self): - cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID)] - self.driver._find_image_location = mock.Mock(return_value=[ - cache_result[0], False]) - self.driver._copy_from_remote_cache = mock.Mock( - side_effect=Exception) - self.driver._post_clone_image = mock.Mock() - - copied = self.driver._copy_from_cache( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) - - self.assertFalse(copied) - self.driver._copy_from_remote_cache.assert_called_once_with( - fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0]) - self.assertFalse(self.driver._post_clone_image.called) - - @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, - {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, - {'secondary_id': 'dev1', 'configured_targets': []}, - {'secondary_id': None, 'configured_targets': []}) - @ddt.unpack - def test_failover_host_invalid_replication_target(self, secondary_id, - configured_targets): - """This tests executes a method in the DataMotionMixin.""" - self.driver.backend_name = 'dev0' - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=configured_targets) - complete_failover_call = self.mock_object( - data_motion.DataMotionMixin, '_complete_failover') - - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, 'fake_context', [], - secondary_id=secondary_id) - self.assertFalse(complete_failover_call.called) - - def test_failover_host_unable_to_failover(self): - """This tests executes a method in the DataMotionMixin.""" - self.driver.backend_name = 'dev0' - self.mock_object(data_motion.DataMotionMixin, '_complete_failover', - side_effect=exception.NetAppDriverException) - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=['dev1', 'dev2']) - self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_ssc.SSC.keys()) - self.mock_object(self.driver, '_update_zapi_client') - - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, 'fake_context', [], - secondary_id='dev1') - data_motion.DataMotionMixin._complete_failover.assert_called_once_with( - 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], - failover_target='dev1') - self.assertFalse(self.driver._update_zapi_client.called) - - def test_failover_host(self): - """This tests executes a method in the DataMotionMixin.""" - self.driver.backend_name = 'dev0' - self.mock_object(data_motion.DataMotionMixin, '_complete_failover', - return_value=('dev1', [])) - self.mock_object(data_motion.DataMotionMixin, - 'get_replication_backend_names', - return_value=['dev1', 'dev2']) - self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', - return_value=fake_ssc.SSC.keys()) - self.mock_object(self.driver, '_update_zapi_client') - - actual_active, vol_updates, __ = self.driver.failover_host( - 'fake_context', [], secondary_id='dev1', groups=[]) - - data_motion.DataMotionMixin._complete_failover.assert_called_once_with( - 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], - failover_target='dev1') - self.driver._update_zapi_client.assert_called_once_with('dev1') - self.assertTrue(self.driver.failed_over) - self.assertEqual('dev1', self.driver.failed_over_backend_name) - self.assertEqual('dev1', actual_active) - self.assertEqual([], vol_updates) - - def test_delete_cgsnapshot(self): - mock_delete_backing_file = self.mock_object( - self.driver, '_delete_backing_file_for_snapshot') - snapshots = [fake.CG_SNAPSHOT] - - model_update, snapshots_model_update = ( - self.driver.delete_cgsnapshot( - fake.CG_CONTEXT, fake.CG_SNAPSHOT, snapshots)) - - mock_delete_backing_file.assert_called_once_with(fake.CG_SNAPSHOT) - self.assertIsNone(model_update) - self.assertIsNone(snapshots_model_update) - - def test_get_snapshot_backing_flexvol_names(self): - snapshots = [ - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, - {'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}}, - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}}, - {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, - ] - - ssc = { - 'volume1': {'pool_name': '/fake/volume1', }, - 'volume2': {'pool_name': '/fake/volume2', }, - 'volume3': {'pool_name': '/fake/volume3', }, - } - - mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc') - mock_get_ssc.return_value = ssc - - hosts = [snap['volume']['host'] for snap in snapshots] - flexvols = self.driver._get_flexvol_names_from_hosts(hosts) - - mock_get_ssc.assert_called_once_with() - self.assertEqual(3, len(flexvols)) - self.assertIn('volume1', flexvols) - self.assertIn('volume2', flexvols) - self.assertIn('volume3', flexvols) - - def test_get_backing_flexvol_names(self): - mock_ssc_library = self.mock_object( - self.driver.ssc_library, 'get_ssc') - - self.driver._get_backing_flexvol_names() - - mock_ssc_library.assert_called_once_with() diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/__init__.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py deleted file mode 100644 index 47ffccaa2..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.netapp import options as na_opts - -SSC_VSERVER = 'fake_vserver' -SSC_VOLUMES = ('volume1', 'volume2') -SSC_VOLUME_MAP = { - SSC_VOLUMES[0]: { - 'pool_name': SSC_VOLUMES[0], - }, - SSC_VOLUMES[1]: { - 'pool_name': SSC_VOLUMES[1], - }, -} -SSC_AGGREGATES = ('aggr1', 'aggr2') - -SSC = { - 'volume1': { - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_aggregate': 'aggr1', - 'netapp_compression': 'false', - 'netapp_dedup': 'true', - 'netapp_mirrored': 'false', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': ['SSD'], - 'netapp_hybrid_aggregate': 'false', - 'netapp_flexvol_encryption': 'true', - 'pool_name': 'volume1', - }, - 'volume2': { - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'netapp_thin_provisioned': 'true', - 'netapp_aggregate': 'aggr2', - 'netapp_compression': 'true', - 'netapp_dedup': 'true', - 'netapp_mirrored': 'true', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': ['FCAL', 'SSD'], - 'netapp_hybrid_aggregate': 'true', - 'netapp_flexvol_encryption': 'false', - 'pool_name': 'volume2', - }, -} - -SSC_FLEXVOL_INFO = { - 'volume1': { - 'thick_provisioning_support': True, - 'thin_provisioning_support': False, - 'netapp_thin_provisioned': 'false', - 'netapp_aggregate': 'aggr1', - }, - 'volume2': { - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'netapp_thin_provisioned': 'true', - 'netapp_aggregate': 'aggr2', - }, -} - -SSC_DEDUPE_INFO = { - 'volume1': { - 'netapp_dedup': 'true', - 'netapp_compression': 'false', - }, - 'volume2': { - 'netapp_dedup': 'true', - 'netapp_compression': 'true', - }, -} - -SSC_ENCRYPTION_INFO = { - 'volume1': { - 'netapp_flexvol_encryption': 'true', - }, - 'volume2': { - 'netapp_flexvol_encryption': 'false', - }, -} - -SSC_MIRROR_INFO = { - 'volume1': { - 'netapp_mirrored': 'false', - }, - 'volume2': { - 'netapp_mirrored': 'true', - }, -} - -SSC_AGGREGATE_INFO = { - 'volume1': { - 'netapp_disk_type': ['SSD'], - 'netapp_raid_type': 'raid_dp', - 'netapp_hybrid_aggregate': 'false', - }, - 'volume2': { - 'netapp_disk_type': ['FCAL', 'SSD'], - 'netapp_raid_type': 'raid_dp', - 'netapp_hybrid_aggregate': 'true', - }, -} - -PROVISIONING_OPTS = { - 'aggregate': 'fake_aggregate', - 'thin_provisioned': True, - 'snapshot_policy': None, - 'language': 'en_US', - 'dedupe_enabled': False, - 'compression_enabled': False, - 'snapshot_reserve': '12', - 'volume_type': 'rw', - 'size': 20, -} - -ENCRYPTED_PROVISIONING_OPTS = { - 'aggregate': 'fake_aggregate', - 'thin_provisioned': True, - 'snapshot_policy': None, - 'language': 'en_US', - 'dedupe_enabled': False, - 'compression_enabled': False, - 'snapshot_reserve': '12', - 'volume_type': 'rw', - 'size': 20, - 'encrypt': 'true', -} - - -def get_fake_cmode_config(backend_name): - - config = configuration.Configuration(driver.volume_opts, - config_group=backend_name) - config.append_config_values(na_opts.netapp_proxy_opts) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_transport_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_provisioning_opts) - config.append_config_values(na_opts.netapp_cluster_opts) - config.append_config_values(na_opts.netapp_san_opts) - config.append_config_values(na_opts.netapp_replication_opts) - - return config diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py deleted file mode 100644 index cc3e3d442..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy - -import ddt -import mock -import six - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( - fakes as fake_client) -import cinder.tests.unit.volume.drivers.netapp.dataontap.utils.fakes as fake -import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes -from cinder.volume.drivers.netapp.dataontap.utils import capabilities - - -@ddt.ddt -class CapabilitiesLibraryTestCase(test.TestCase): - - def setUp(self): - super(CapabilitiesLibraryTestCase, self).setUp() - - self.zapi_client = mock.Mock() - self.configuration = self.get_config_cmode() - self.ssc_library = capabilities.CapabilitiesLibrary( - 'iSCSI', fake.SSC_VSERVER, self.zapi_client, self.configuration) - self.ssc_library.ssc = fake.SSC - - def get_config_cmode(self): - config = na_fakes.create_configuration_cmode() - config.volume_backend_name = 'fake_backend' - return config - - def test_check_api_permissions(self): - - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.ssc_library.check_api_permissions() - - self.zapi_client.check_cluster_api.assert_has_calls( - [mock.call(*key) for key in capabilities.SSC_API_MAP.keys()]) - self.assertEqual(0, mock_log.call_count) - - def test_check_api_permissions_failed_ssc_apis(self): - - def check_cluster_api(object_name, operation_name, api): - if api != 'volume-get-iter': - return False - return True - - self.zapi_client.check_cluster_api.side_effect = check_cluster_api - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.ssc_library.check_api_permissions() - - self.assertEqual(1, mock_log.call_count) - - def test_check_api_permissions_failed_volume_api(self): - - def check_cluster_api(object_name, operation_name, api): - if api == 'volume-get-iter': - return False - return True - - self.zapi_client.check_cluster_api.side_effect = check_cluster_api - mock_log = self.mock_object(capabilities.LOG, 'warning') - - self.assertRaises(exception.VolumeBackendAPIException, - self.ssc_library.check_api_permissions) - - self.assertEqual(0, mock_log.call_count) - - def test_get_ssc(self): - - result = self.ssc_library.get_ssc() - - self.assertEqual(fake.SSC, result) - self.assertIsNot(fake.SSC, result) - - def test_get_ssc_flexvol_names(self): - - result = self.ssc_library.get_ssc_flexvol_names() - - self.assertItemsEqual(fake.SSC_VOLUMES, result) - - def test_get_ssc_for_flexvol(self): - - result = self.ssc_library.get_ssc_for_flexvol(fake.SSC_VOLUMES[0]) - - self.assertEqual(fake.SSC.get(fake.SSC_VOLUMES[0]), result) - self.assertIsNot(fake.SSC.get(fake.SSC_VOLUMES[0]), result) - - def test_get_ssc_for_flexvol_not_found(self): - - result = self.ssc_library.get_ssc_for_flexvol('invalid') - - self.assertEqual({}, result) - - def test_get_ssc_aggregates(self): - - result = self.ssc_library.get_ssc_aggregates() - - six.assertCountEqual(self, list(fake.SSC_AGGREGATES), result) - - def test_update_ssc(self): - - mock_get_ssc_flexvol_info = self.mock_object( - self.ssc_library, '_get_ssc_flexvol_info', - side_effect=[fake.SSC_FLEXVOL_INFO['volume1'], - fake.SSC_FLEXVOL_INFO['volume2']]) - mock_get_ssc_dedupe_info = self.mock_object( - self.ssc_library, '_get_ssc_dedupe_info', - side_effect=[fake.SSC_DEDUPE_INFO['volume1'], - fake.SSC_DEDUPE_INFO['volume2']]) - mock_get_ssc_mirror_info = self.mock_object( - self.ssc_library, '_get_ssc_mirror_info', - side_effect=[fake.SSC_MIRROR_INFO['volume1'], - fake.SSC_MIRROR_INFO['volume2']]) - mock_get_ssc_aggregate_info = self.mock_object( - self.ssc_library, '_get_ssc_aggregate_info', - side_effect=[fake.SSC_AGGREGATE_INFO['volume1'], - fake.SSC_AGGREGATE_INFO['volume2']]) - mock_get_ssc_encryption_info = self.mock_object( - self.ssc_library, '_get_ssc_encryption_info', - side_effect=[fake.SSC_ENCRYPTION_INFO['volume1'], - fake.SSC_ENCRYPTION_INFO['volume2']]) - ordered_ssc = collections.OrderedDict() - ordered_ssc['volume1'] = fake.SSC_VOLUME_MAP['volume1'] - ordered_ssc['volume2'] = fake.SSC_VOLUME_MAP['volume2'] - - result = self.ssc_library.update_ssc(ordered_ssc) - - self.assertIsNone(result) - self.assertEqual(fake.SSC, self.ssc_library.ssc) - mock_get_ssc_flexvol_info.assert_has_calls([ - mock.call('volume1'), mock.call('volume2')]) - mock_get_ssc_dedupe_info.assert_has_calls([ - mock.call('volume1'), mock.call('volume2')]) - mock_get_ssc_mirror_info.assert_has_calls([ - mock.call('volume1'), mock.call('volume2')]) - mock_get_ssc_aggregate_info.assert_has_calls([ - mock.call('aggr1'), mock.call('aggr2')]) - mock_get_ssc_encryption_info.assert_has_calls([ - mock.call('volume1'), mock.call('volume2')]) - - def test__update_for_failover(self): - self.mock_object(self.ssc_library, 'update_ssc') - flexvol_map = {'volume1': fake.SSC_VOLUME_MAP['volume1']} - mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') - - self.ssc_library._update_for_failover(mock_client, flexvol_map) - - self.assertEqual(mock_client, self.ssc_library.zapi_client) - self.ssc_library.update_ssc.assert_called_once_with(flexvol_map) - - @ddt.data({'lun_space_guarantee': True}, - {'lun_space_guarantee': False}) - @ddt.unpack - def test_get_ssc_flexvol_info_thin_block(self, lun_space_guarantee): - - self.ssc_library.configuration.netapp_lun_space_reservation = \ - 'enabled' if lun_space_guarantee else 'disabled' - self.mock_object(self.ssc_library.zapi_client, - 'get_flexvol', - return_value=fake_client.VOLUME_INFO_SSC) - - result = self.ssc_library._get_ssc_flexvol_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_thin_provisioned': 'true', - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'netapp_aggregate': 'fake_aggr1', - } - self.assertEqual(expected, result) - self.zapi_client.get_flexvol.assert_called_once_with( - flexvol_name=fake_client.VOLUME_NAMES[0]) - - @ddt.data({'vol_space_guarantee': 'file', 'lun_space_guarantee': True}, - {'vol_space_guarantee': 'volume', 'lun_space_guarantee': True}) - @ddt.unpack - def test_get_ssc_flexvol_info_thick_block(self, vol_space_guarantee, - lun_space_guarantee): - - self.ssc_library.configuration.netapp_lun_space_reservation = \ - 'enabled' if lun_space_guarantee else 'disabled' - fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC) - fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee - self.mock_object(self.ssc_library.zapi_client, - 'get_flexvol', - return_value=fake_volume_info_ssc) - - result = self.ssc_library._get_ssc_flexvol_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_thin_provisioned': 'false', - 'thick_provisioning_support': lun_space_guarantee, - 'thin_provisioning_support': not lun_space_guarantee, - 'netapp_aggregate': 'fake_aggr1', - } - self.assertEqual(expected, result) - self.zapi_client.get_flexvol.assert_called_once_with( - flexvol_name=fake_client.VOLUME_NAMES[0]) - - @ddt.data({'nfs_sparsed_volumes': True}, - {'nfs_sparsed_volumes': False}) - @ddt.unpack - def test_get_ssc_flexvol_info_thin_file(self, nfs_sparsed_volumes): - - self.ssc_library.protocol = 'nfs' - self.ssc_library.configuration.nfs_sparsed_volumes = \ - nfs_sparsed_volumes - self.mock_object(self.ssc_library.zapi_client, - 'get_flexvol', - return_value=fake_client.VOLUME_INFO_SSC) - - result = self.ssc_library._get_ssc_flexvol_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_thin_provisioned': 'true', - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'netapp_aggregate': 'fake_aggr1', - } - self.assertEqual(expected, result) - self.zapi_client.get_flexvol.assert_called_once_with( - flexvol_name=fake_client.VOLUME_NAMES[0]) - - @ddt.data({'vol_space_guarantee': 'file', 'nfs_sparsed_volumes': True}, - {'vol_space_guarantee': 'volume', 'nfs_sparsed_volumes': False}) - @ddt.unpack - def test_get_ssc_flexvol_info_thick_file(self, vol_space_guarantee, - nfs_sparsed_volumes): - - self.ssc_library.protocol = 'nfs' - self.ssc_library.configuration.nfs_sparsed_volumes = \ - nfs_sparsed_volumes - fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC) - fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee - self.mock_object(self.ssc_library.zapi_client, - 'get_flexvol', - return_value=fake_volume_info_ssc) - - result = self.ssc_library._get_ssc_flexvol_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_thin_provisioned': 'false', - 'thick_provisioning_support': not nfs_sparsed_volumes, - 'thin_provisioning_support': nfs_sparsed_volumes, - 'netapp_aggregate': 'fake_aggr1', - } - self.assertEqual(expected, result) - self.zapi_client.get_flexvol.assert_called_once_with( - flexvol_name=fake_client.VOLUME_NAMES[0]) - - def test_get_ssc_dedupe_info(self): - - self.mock_object( - self.ssc_library.zapi_client, 'get_flexvol_dedupe_info', - return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) - - result = self.ssc_library._get_ssc_dedupe_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_dedup': 'true', - 'netapp_compression': 'false', - } - self.assertEqual(expected, result) - self.zapi_client.get_flexvol_dedupe_info.assert_called_once_with( - fake_client.VOLUME_NAMES[0]) - - def test_get_ssc_encryption_info(self): - - self.mock_object( - self.ssc_library.zapi_client, 'is_flexvol_encrypted', - return_value=True) - - result = self.ssc_library._get_ssc_encryption_info( - fake_client.VOLUME_NAMES[0]) - - expected = { - 'netapp_flexvol_encryption': 'true', - } - self.assertEqual(expected, result) - self.zapi_client.is_flexvol_encrypted.assert_called_once_with( - fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) - - @ddt.data(True, False) - def test_get_ssc_mirror_info(self, mirrored): - - self.mock_object( - self.ssc_library.zapi_client, 'is_flexvol_mirrored', - return_value=mirrored) - - result = self.ssc_library._get_ssc_mirror_info( - fake_client.VOLUME_NAMES[0]) - - expected = {'netapp_mirrored': 'true' if mirrored else 'false'} - self.assertEqual(expected, result) - self.zapi_client.is_flexvol_mirrored.assert_called_once_with( - fake_client.VOLUME_NAMES[0], fake.SSC_VSERVER) - - def test_get_ssc_aggregate_info(self): - - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate', - return_value=fake_client.AGGR_INFO_SSC) - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate_disk_types', - return_value=fake_client.AGGREGATE_DISK_TYPES) - - result = self.ssc_library._get_ssc_aggregate_info( - fake_client.VOLUME_AGGREGATE_NAME) - - expected = { - 'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES, - 'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE, - 'netapp_hybrid_aggregate': 'true', - } - self.assertEqual(expected, result) - self.zapi_client.get_aggregate.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) - self.zapi_client.get_aggregate_disk_types.assert_called_once_with( - fake_client.VOLUME_AGGREGATE_NAME) - - def test_get_ssc_aggregate_info_not_found(self): - - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate', return_value={}) - self.mock_object( - self.ssc_library.zapi_client, 'get_aggregate_disk_types', - return_value=None) - - result = self.ssc_library._get_ssc_aggregate_info( - fake_client.VOLUME_AGGREGATE_NAME) - - expected = { - 'netapp_disk_type': None, - 'netapp_raid_type': None, - 'netapp_hybrid_aggregate': None, - } - self.assertEqual(expected, result) - - def test_get_matching_flexvols_for_extra_specs(self): - - specs = { - 'thick_provisioning_support': ' False', - 'netapp_compression': 'true', - 'netapp_dedup': 'true', - 'netapp_mirrored': 'true', - 'netapp_raid_type': 'raid_dp', - 'netapp_disk_type': 'FCAL', - 'non_ssc_key': 'fake_value', - } - - result = self.ssc_library.get_matching_flexvols_for_extra_specs(specs) - - self.assertEqual(['volume2'], result) - - @ddt.data( - { - 'flexvol_info': { - 'netapp_dedup': 'true', - }, - 'extra_specs': { - 'netapp_dedup': 'true', - 'non_ssc_key': 'fake_value', - } - }, - { - 'flexvol_info': fake.SSC['volume1'], - 'extra_specs': { - 'netapp_disk_type': 'SSD', - 'pool_name': 'volume1', - } - }, - { - 'flexvol_info': fake.SSC['volume2'], - 'extra_specs': { - 'netapp_disk_type': 'SSD', - 'netapp_hybrid_aggregate': 'true', - } - } - ) - @ddt.unpack - def test_flexvol_matches_extra_specs(self, flexvol_info, extra_specs): - - result = self.ssc_library._flexvol_matches_extra_specs(flexvol_info, - extra_specs) - - self.assertTrue(result) - - @ddt.data( - { - 'flexvol_info': { - 'netapp_dedup': 'true', - }, - 'extra_specs': { - 'netapp_dedup': 'false', - 'non_ssc_key': 'fake_value', - } - }, - { - 'flexvol_info': fake.SSC['volume2'], - 'extra_specs': { - 'netapp_disk_type': 'SSD', - 'pool_name': 'volume1', - } - }, - { - 'flexvol_info': fake.SSC['volume2'], - 'extra_specs': { - 'netapp_disk_type': 'SATA', - } - } - ) - @ddt.unpack - def test_flexvol_matches_extra_specs_no_match(self, flexvol_info, - extra_specs): - - result = self.ssc_library._flexvol_matches_extra_specs(flexvol_info, - extra_specs) - - self.assertFalse(result) - - @ddt.data(('SSD', 'SSD'), ('SSD', ['SSD', 'FCAL'])) - @ddt.unpack - def test_extra_spec_matches(self, extra_spec_value, ssc_flexvol_value): - - result = self.ssc_library._extra_spec_matches(extra_spec_value, - ssc_flexvol_value) - - self.assertTrue(result) - - @ddt.data(('SSD', 'FCAL'), ('SSD', ['FCAL'])) - @ddt.unpack - def test_extra_spec_matches_no_match(self, extra_spec_value, - ssc_flexvol_value): - - result = self.ssc_library._extra_spec_matches(extra_spec_value, - ssc_flexvol_value) - - self.assertFalse(result) - - def test_modify_extra_specs_for_comparison(self): - - specs = { - 'thick_provisioning_support': ' False', - 'thin_provisioning_support': ' true', - 'netapp_compression': 'true', - } - - result = self.ssc_library._modify_extra_specs_for_comparison(specs) - - expected = { - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'netapp_compression': 'true', - } - self.assertEqual(expected, result) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py deleted file mode 100644 index 1165d776a..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py +++ /dev/null @@ -1,784 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import copy -import ddt -import mock -from oslo_config import cfg - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap.utils import data_motion -from cinder.volume.drivers.netapp.dataontap.utils import utils -from cinder.volume.drivers.netapp import options as na_opts - - -CONF = cfg.CONF - - -@ddt.ddt -class NetAppCDOTDataMotionMixinTestCase(test.TestCase): - - def setUp(self): - super(NetAppCDOTDataMotionMixinTestCase, self).setUp() - self.dm_mixin = data_motion.DataMotionMixin() - self.src_backend = 'backend1' - self.dest_backend = 'backend2' - self.src_vserver = 'source_vserver' - self.dest_vserver = 'dest_vserver' - self._setup_mock_config() - self.mock_cmode_client = self.mock_object(client_cmode, 'Client') - self.src_flexvol_name = 'volume_c02d497a_236c_4852_812a_0d39373e312a' - self.dest_flexvol_name = self.src_flexvol_name - self.mock_src_client = mock.Mock() - self.mock_dest_client = mock.Mock() - self.config = fakes.get_fake_cmode_config(self.src_backend) - self.mock_object(utils, 'get_backend_configuration', - side_effect=[self.mock_dest_config, - self.mock_src_config]) - self.mock_object(utils, 'get_client_for_backend', - side_effect=[self.mock_dest_client, - self.mock_src_client]) - - def _setup_mock_config(self): - self.mock_src_config = configuration.Configuration( - driver.volume_opts, config_group=self.src_backend) - self.mock_dest_config = configuration.Configuration( - driver.volume_opts, config_group=self.dest_backend) - - for config in (self.mock_src_config, self.mock_dest_config): - config.append_config_values(na_opts.netapp_proxy_opts) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_transport_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_provisioning_opts) - config.append_config_values(na_opts.netapp_cluster_opts) - config.append_config_values(na_opts.netapp_san_opts) - config.append_config_values(na_opts.netapp_replication_opts) - config.netapp_snapmirror_quiesce_timeout = 10 - - CONF.set_override('netapp_vserver', self.src_vserver, - group=self.src_backend) - CONF.set_override('netapp_vserver', self.dest_vserver, - group=self.dest_backend) - - @ddt.data(None, [], [{'some_key': 'some_value'}]) - def test_get_replication_backend_names_none(self, replication_device): - CONF.set_override('replication_device', replication_device, - group=self.src_backend) - - devices = self.dm_mixin.get_replication_backend_names(self.config) - - self.assertEqual(0, len(devices)) - - @ddt.data([{'backend_id': 'xyzzy'}, {'backend_id': 'spoon!'}], - [{'backend_id': 'foobar'}]) - def test_get_replication_backend_names_valid(self, replication_device): - CONF.set_override('replication_device', replication_device, - group=self.src_backend) - - devices = self.dm_mixin.get_replication_backend_names(self.config) - - self.assertEqual(len(replication_device), len(devices)) - - def test_get_snapmirrors(self): - self.mock_object(self.mock_dest_client, 'get_snapmirrors') - - self.dm_mixin.get_snapmirrors(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.get_snapmirrors.assert_called_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name, - desired_attributes=['relationship-status', - 'mirror-state', - 'source-vserver', - 'source-volume', - 'destination-vserver', - 'destination-volume', - 'last-transfer-end-timestamp', - 'lag-time']) - self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count) - - @ddt.data([], ['backend1'], ['backend1', 'backend2']) - def test_get_replication_backend_stats(self, replication_backend_names): - self.mock_object(self.dm_mixin, 'get_replication_backend_names', - return_value=replication_backend_names) - enabled_stats = { - 'replication_count': len(replication_backend_names), - 'replication_targets': replication_backend_names, - 'replication_type': 'async', - } - expected_stats = { - 'replication_enabled': len(replication_backend_names) > 0, - } - if len(replication_backend_names) > 0: - expected_stats.update(enabled_stats) - - actual_stats = self.dm_mixin.get_replication_backend_stats(self.config) - - self.assertDictEqual(expected_stats, actual_stats) - - @ddt.data(None, [], - [{'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) - def test_get_replication_aggregate_map_none(self, replication_aggr_map): - - self.mock_object(utils, 'get_backend_configuration', - return_value=self.config) - CONF.set_override('netapp_replication_aggregate_map', - replication_aggr_map, - group=self.src_backend) - - aggr_map = self.dm_mixin._get_replication_aggregate_map( - self.src_backend, 'replication_backend_1') - - self.assertEqual(0, len(aggr_map)) - - @ddt.data([{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}], - [{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}, - {'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) - def test_get_replication_aggregate_map_valid(self, replication_aggr_map): - self.mock_object(utils, 'get_backend_configuration', - return_value=self.config) - CONF.set_override('netapp_replication_aggregate_map', - replication_aggr_map, group=self.src_backend) - - aggr_map = self.dm_mixin._get_replication_aggregate_map( - self.src_backend, 'replication_backend_1') - - self.assertDictEqual({'aggr1': 'aggr10'}, aggr_map) - - @ddt.data(True, False) - def test_create_snapmirror_dest_flexvol_exists(self, dest_exists): - mock_dest_client = mock.Mock() - self.mock_object(mock_dest_client, 'flexvol_exists', - return_value=dest_exists) - self.mock_object(mock_dest_client, 'get_snapmirrors', - return_value=None) - create_destination_flexvol = self.mock_object( - self.dm_mixin, 'create_destination_flexvol') - self.mock_object(utils, 'get_client_for_backend', - return_value=mock_dest_client) - - self.dm_mixin.create_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - if not dest_exists: - create_destination_flexvol.assert_called_once_with( - self.src_backend, self.dest_backend, self.src_flexvol_name, - self.dest_flexvol_name) - else: - self.assertFalse(create_destination_flexvol.called) - mock_dest_client.create_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, schedule='hourly') - mock_dest_client.initialize_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - @ddt.data('uninitialized', 'broken-off', 'snapmirrored') - def test_create_snapmirror_snapmirror_exists_state(self, mirror_state): - mock_dest_client = mock.Mock() - existing_snapmirrors = [{'mirror-state': mirror_state}] - self.mock_object(self.dm_mixin, 'create_destination_flexvol') - self.mock_object(utils, 'get_client_for_backend', - return_value=mock_dest_client) - self.mock_object(mock_dest_client, 'flexvol_exists', - return_value=True) - self.mock_object(mock_dest_client, 'get_snapmirrors', - return_value=existing_snapmirrors) - - self.dm_mixin.create_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.assertFalse(mock_dest_client.create_snapmirror.called) - self.assertFalse(mock_dest_client.initialize_snapmirror.called) - self.assertFalse(self.dm_mixin.create_destination_flexvol.called) - if mirror_state == 'snapmirrored': - self.assertFalse(mock_dest_client.resume_snapmirror.called) - self.assertFalse(mock_dest_client.resync_snapmirror.called) - else: - mock_dest_client.resume_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - mock_dest_client.resume_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - - @ddt.data('resume_snapmirror', 'resync_snapmirror') - def test_create_snapmirror_snapmirror_exists_repair_exception(self, - failed_call): - mock_dest_client = mock.Mock() - mock_exception_log = self.mock_object(data_motion.LOG, 'exception') - existing_snapmirrors = [{'mirror-state': 'broken-off'}] - self.mock_object(self.dm_mixin, 'create_destination_flexvol') - self.mock_object(utils, 'get_client_for_backend', - return_value=mock_dest_client) - self.mock_object(mock_dest_client, 'flexvol_exists', - return_value=True) - self.mock_object(mock_dest_client, 'get_snapmirrors', - return_value=existing_snapmirrors) - self.mock_object(mock_dest_client, failed_call, - side_effect=netapp_api.NaApiError) - - self.dm_mixin.create_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.assertFalse(mock_dest_client.create_snapmirror.called) - self.assertFalse(mock_dest_client.initialize_snapmirror.called) - self.assertFalse(self.dm_mixin.create_destination_flexvol.called) - mock_dest_client.resume_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - mock_dest_client.resume_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - self.assertEqual(1, mock_exception_log.call_count) - - def test_delete_snapmirror(self): - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, mock_src_client]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - mock_src_client.release_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - def test_delete_snapmirror_does_not_exist(self): - """Ensure delete succeeds when the snapmirror does not exist.""" - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError( - code=netapp_api.EAPIERROR) - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, mock_src_client]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - mock_src_client.release_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - def test_delete_snapmirror_error_deleting(self): - """Ensure delete succeeds when the snapmirror does not exist.""" - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError( - code=netapp_api.ESOURCE_IS_DIFFERENT - ) - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, mock_src_client]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - mock_src_client.release_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - def test_delete_snapmirror_error_releasing(self): - """Ensure delete succeeds when the snapmirror does not exist.""" - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - mock_src_client.release_snapmirror.side_effect = ( - netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, mock_src_client]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - mock_src_client.release_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - def test_delete_snapmirror_without_release(self): - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, mock_src_client]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name, - release=False) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - self.assertFalse(mock_src_client.release_snapmirror.called) - - def test_delete_snapmirror_source_unreachable(self): - mock_src_client = mock.Mock() - mock_dest_client = mock.Mock() - self.mock_object(utils, 'get_client_for_backend', - side_effect=[mock_dest_client, Exception]) - - self.dm_mixin.delete_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - mock_dest_client.delete_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - self.assertFalse(mock_src_client.release_snapmirror.called) - - def test_quiesce_then_abort_timeout(self): - self.mock_object(time, 'sleep') - mock_get_snapmirrors = mock.Mock( - return_value=[{'relationship-status': 'transferring'}]) - self.mock_object(self.mock_dest_client, 'get_snapmirrors', - mock_get_snapmirrors) - - self.dm_mixin.quiesce_then_abort(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.get_snapmirrors.assert_called_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name, - desired_attributes=['relationship-status', 'mirror-state']) - self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) - self.mock_dest_client.quiesce_snapmirror.assert_called_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - self.mock_dest_client.abort_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name, clear_checkpoint=False) - - def test_update_snapmirror(self): - self.mock_object(self.mock_dest_client, 'get_snapmirrors') - - self.dm_mixin.update_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.update_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - - def test_quiesce_then_abort_wait_for_quiesced(self): - self.mock_object(time, 'sleep') - self.mock_object(self.mock_dest_client, 'get_snapmirrors', - side_effect=[ - [{'relationship-status': 'transferring'}], - [{'relationship-status': 'quiesced'}]]) - - self.dm_mixin.quiesce_then_abort(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.get_snapmirrors.assert_called_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name, - desired_attributes=['relationship-status', 'mirror-state']) - self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) - self.mock_dest_client.quiesce_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - - def test_break_snapmirror(self): - self.mock_object(self.dm_mixin, 'quiesce_then_abort') - - self.dm_mixin.break_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.dm_mixin.quiesce_then_abort.assert_called_once_with( - self.src_backend, self.dest_backend, - self.src_flexvol_name, self.dest_flexvol_name) - self.mock_dest_client.break_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - self.mock_dest_client.mount_flexvol.assert_called_once_with( - self.dest_flexvol_name) - - def test_break_snapmirror_wait_for_quiesced(self): - self.mock_object(self.dm_mixin, 'quiesce_then_abort') - - self.dm_mixin.break_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.dm_mixin.quiesce_then_abort.assert_called_once_with( - self.src_backend, self.dest_backend, - self.src_flexvol_name, self.dest_flexvol_name,) - self.mock_dest_client.break_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - self.mock_dest_client.mount_flexvol.assert_called_once_with( - self.dest_flexvol_name) - - def test_resync_snapmirror(self): - self.dm_mixin.resync_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.resync_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, - self.dest_vserver, self.dest_flexvol_name) - - def test_resume_snapmirror(self): - self.dm_mixin.resume_snapmirror(self.src_backend, - self.dest_backend, - self.src_flexvol_name, - self.dest_flexvol_name) - - self.mock_dest_client.resume_snapmirror.assert_called_once_with( - self.src_vserver, self.src_flexvol_name, self.dest_vserver, - self.dest_flexvol_name) - - @ddt.data({'size': 1, 'aggr_map': {}}, - {'size': 1, 'aggr_map': {'aggr02': 'aggr20'}}, - {'size': None, 'aggr_map': {'aggr01': 'aggr10'}}) - @ddt.unpack - def test_create_destination_flexvol_exception(self, size, aggr_map): - self.mock_object( - self.mock_src_client, 'get_provisioning_options_from_flexvol', - return_value={'size': size, 'aggregate': 'aggr01'}) - self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', - return_value=aggr_map) - mock_client_call = self.mock_object( - self.mock_dest_client, 'create_flexvol') - - self.assertRaises(exception.NetAppDriverException, - self.dm_mixin.create_destination_flexvol, - self.src_backend, self.dest_backend, - self.src_flexvol_name, self.dest_flexvol_name) - if size: - self.dm_mixin._get_replication_aggregate_map.\ - assert_called_once_with(self.src_backend, self.dest_backend) - else: - self.assertFalse( - self.dm_mixin._get_replication_aggregate_map.called) - self.assertFalse(mock_client_call.called) - - def test_create_destination_flexvol(self): - aggr_map = { - fakes.PROVISIONING_OPTS['aggregate']: 'aggr01', - 'aggr20': 'aggr02', - } - provisioning_opts = copy.deepcopy(fakes.PROVISIONING_OPTS) - expected_prov_opts = copy.deepcopy(fakes.PROVISIONING_OPTS) - expected_prov_opts.pop('volume_type', None) - expected_prov_opts.pop('size', None) - expected_prov_opts.pop('aggregate', None) - mock_get_provisioning_opts_call = self.mock_object( - self.mock_src_client, 'get_provisioning_options_from_flexvol', - return_value=provisioning_opts) - mock_is_flexvol_encrypted = self.mock_object( - self.mock_src_client, 'is_flexvol_encrypted', - return_value=False) - self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', - return_value=aggr_map) - mock_client_call = self.mock_object( - self.mock_dest_client, 'create_flexvol') - - retval = self.dm_mixin.create_destination_flexvol( - self.src_backend, self.dest_backend, - self.src_flexvol_name, self.dest_flexvol_name) - - self.assertIsNone(retval) - mock_get_provisioning_opts_call.assert_called_once_with( - self.src_flexvol_name) - self.dm_mixin._get_replication_aggregate_map.assert_called_once_with( - self.src_backend, self.dest_backend) - mock_client_call.assert_called_once_with( - self.dest_flexvol_name, 'aggr01', fakes.PROVISIONING_OPTS['size'], - volume_type='dp', **expected_prov_opts) - mock_is_flexvol_encrypted.assert_called_once_with( - self.src_flexvol_name, self.src_vserver) - - def test_create_encrypted_destination_flexvol(self): - aggr_map = { - fakes.ENCRYPTED_PROVISIONING_OPTS['aggregate']: 'aggr01', - 'aggr20': 'aggr02', - } - provisioning_opts = copy.deepcopy(fakes.ENCRYPTED_PROVISIONING_OPTS) - expected_prov_opts = copy.deepcopy(fakes.ENCRYPTED_PROVISIONING_OPTS) - expected_prov_opts.pop('volume_type', None) - expected_prov_opts.pop('size', None) - expected_prov_opts.pop('aggregate', None) - mock_get_provisioning_opts_call = self.mock_object( - self.mock_src_client, 'get_provisioning_options_from_flexvol', - return_value=provisioning_opts) - mock_is_flexvol_encrypted = self.mock_object( - self.mock_src_client, 'is_flexvol_encrypted', - return_value=True) - self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', - return_value=aggr_map) - mock_client_call = self.mock_object( - self.mock_dest_client, 'create_flexvol') - - retval = self.dm_mixin.create_destination_flexvol( - self.src_backend, self.dest_backend, - self.src_flexvol_name, self.dest_flexvol_name) - - self.assertIsNone(retval) - mock_get_provisioning_opts_call.assert_called_once_with( - self.src_flexvol_name) - self.dm_mixin._get_replication_aggregate_map.assert_called_once_with( - self.src_backend, self.dest_backend) - mock_client_call.assert_called_once_with( - self.dest_flexvol_name, 'aggr01', - fakes.ENCRYPTED_PROVISIONING_OPTS['size'], - volume_type='dp', **expected_prov_opts) - mock_is_flexvol_encrypted.assert_called_once_with( - self.src_flexvol_name, self.src_vserver) - - def test_ensure_snapmirrors(self): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - self.mock_object(self.dm_mixin, 'get_replication_backend_names', - return_value=replication_backends) - self.mock_object(self.dm_mixin, 'create_snapmirror') - expected_calls = [ - mock.call(self.src_backend, replication_backends[0], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[0], - flexvols[1], flexvols[1]), - mock.call(self.src_backend, replication_backends[1], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[1], - flexvols[1], flexvols[1]), - ] - - retval = self.dm_mixin.ensure_snapmirrors(self.mock_src_config, - self.src_backend, - flexvols) - - self.assertIsNone(retval) - self.dm_mixin.get_replication_backend_names.assert_called_once_with( - self.mock_src_config) - self.dm_mixin.create_snapmirror.assert_has_calls(expected_calls) - - def test_break_snapmirrors(self): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - side_effects = [None, netapp_api.NaApiError, None, None] - self.mock_object(self.dm_mixin, 'get_replication_backend_names', - return_value=replication_backends) - self.mock_object(self.dm_mixin, 'break_snapmirror', - side_effect=side_effects) - mock_exc_log = self.mock_object(data_motion.LOG, 'exception') - expected_calls = [ - mock.call(self.src_backend, replication_backends[0], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[0], - flexvols[1], flexvols[1]), - mock.call(self.src_backend, replication_backends[1], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[1], - flexvols[1], flexvols[1]), - ] - - failed_to_break = self.dm_mixin.break_snapmirrors( - self.mock_src_config, self.src_backend, flexvols, 'fallback1') - - self.assertEqual(1, len(failed_to_break)) - self.assertEqual(1, mock_exc_log.call_count) - self.dm_mixin.get_replication_backend_names.assert_called_once_with( - self.mock_src_config) - self.dm_mixin.break_snapmirror.assert_has_calls(expected_calls) - - def test_update_snapmirrors(self): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - self.mock_object(self.dm_mixin, 'get_replication_backend_names', - return_value=replication_backends) - side_effects = [None, netapp_api.NaApiError, None, None] - self.mock_object(self.dm_mixin, 'update_snapmirror', - side_effect=side_effects) - expected_calls = [ - mock.call(self.src_backend, replication_backends[0], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[0], - flexvols[1], flexvols[1]), - mock.call(self.src_backend, replication_backends[1], - flexvols[0], flexvols[0]), - mock.call(self.src_backend, replication_backends[1], - flexvols[1], flexvols[1]), - ] - - retval = self.dm_mixin.update_snapmirrors(self.mock_src_config, - self.src_backend, - flexvols) - - self.assertIsNone(retval) - self.dm_mixin.get_replication_backend_names.assert_called_once_with( - self.mock_src_config) - self.dm_mixin.update_snapmirror.assert_has_calls(expected_calls) - - @ddt.data([{'destination-volume': 'nvol3', 'lag-time': '3223'}, - {'destination-volume': 'nvol5', 'lag-time': '32'}], - []) - def test__choose_failover_target_no_failover_targets(self, snapmirrors): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - mock_debug_log = self.mock_object(data_motion.LOG, 'debug') - self.mock_object(self.dm_mixin, 'get_snapmirrors', - return_value=snapmirrors) - - target = self.dm_mixin._choose_failover_target( - self.src_backend, flexvols, replication_backends) - - self.assertIsNone(target) - self.assertEqual(2, mock_debug_log.call_count) - - def test__choose_failover_target(self): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - target_1_snapmirrors = [ - {'destination-volume': 'nvol3', 'lag-time': '12'}, - {'destination-volume': 'nvol1', 'lag-time': '1541'}, - {'destination-volume': 'nvol2', 'lag-time': '16'}, - ] - target_2_snapmirrors = [ - {'destination-volume': 'nvol2', 'lag-time': '717'}, - {'destination-volume': 'nvol1', 'lag-time': '323'}, - {'destination-volume': 'nvol3', 'lag-time': '720'}, - ] - mock_debug_log = self.mock_object(data_motion.LOG, 'debug') - self.mock_object(self.dm_mixin, 'get_snapmirrors', - side_effect=[target_1_snapmirrors, - target_2_snapmirrors]) - - target = self.dm_mixin._choose_failover_target( - self.src_backend, flexvols, replication_backends) - - self.assertEqual('fallback2', target) - self.assertFalse(mock_debug_log.called) - - def test__failover_host_no_suitable_target(self): - flexvols = ['nvol1', 'nvol2'] - replication_backends = ['fallback1', 'fallback2'] - self.mock_object(self.dm_mixin, '_choose_failover_target', - return_value=None) - self.mock_object(utils, 'get_backend_configuration') - self.mock_object(self.dm_mixin, 'update_snapmirrors') - self.mock_object(self.dm_mixin, 'break_snapmirrors') - - self.assertRaises(exception.NetAppDriverException, - self.dm_mixin._complete_failover, - self.src_backend, replication_backends, flexvols, - [], failover_target=None) - self.assertFalse(utils.get_backend_configuration.called) - self.assertFalse(self.dm_mixin.update_snapmirrors.called) - self.assertFalse(self.dm_mixin.break_snapmirrors.called) - - @ddt.data('fallback1', None) - def test__failover_host(self, failover_target): - flexvols = ['nvol1', 'nvol2', 'nvol3'] - replication_backends = ['fallback1', 'fallback2'] - volumes = [ - {'id': 'xyzzy', 'host': 'openstack@backend1#nvol1'}, - {'id': 'foobar', 'host': 'openstack@backend1#nvol2'}, - {'id': 'waldofred', 'host': 'openstack@backend1#nvol3'}, - ] - expected_volume_updates = [ - { - 'volume_id': 'xyzzy', - 'updates': {'replication_status': 'failed-over'}, - }, - { - 'volume_id': 'foobar', - 'updates': {'replication_status': 'failed-over'}, - }, - { - 'volume_id': 'waldofred', - 'updates': {'replication_status': 'error'}, - }, - ] - expected_active_backend_name = failover_target or 'fallback2' - self.mock_object(self.dm_mixin, '_choose_failover_target', - return_value='fallback2') - self.mock_object(utils, 'get_backend_configuration') - self.mock_object(self.dm_mixin, 'update_snapmirrors') - self.mock_object(self.dm_mixin, 'break_snapmirrors', - return_value=['nvol3']) - - actual_active_backend_name, actual_volume_updates = ( - self.dm_mixin._complete_failover( - self.src_backend, replication_backends, flexvols, - volumes, failover_target=failover_target) - ) - - self.assertEqual(expected_active_backend_name, - actual_active_backend_name) - self.assertEqual(expected_volume_updates, actual_volume_updates) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_loopingcalls.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_loopingcalls.py deleted file mode 100644 index 201474d9d..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_loopingcalls.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2016 Chuck Fouts. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_service import loopingcall - -from cinder import test -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls - - -class LoopingCallsTestCase(test.TestCase): - - def setUp(self): - super(LoopingCallsTestCase, self).setUp() - self.mock_first_looping_task = mock.Mock() - self.mock_second_looping_task = mock.Mock() - - self.mock_loopingcall = self.mock_object( - loopingcall, - 'FixedIntervalLoopingCall', - side_effect=[self.mock_first_looping_task, - self.mock_second_looping_task] - ) - self.loopingcalls = loopingcalls.LoopingCalls() - - def test_add_task(self): - interval = 3600 - initial_delay = 5 - - self.loopingcalls.add_task(self.mock_first_looping_task, interval) - self.loopingcalls.add_task( - self.mock_second_looping_task, interval, initial_delay) - - self.assertEqual(2, len(self.loopingcalls.tasks)) - self.assertEqual(interval, self.loopingcalls.tasks[0].interval) - self.assertEqual(initial_delay, - self.loopingcalls.tasks[1].initial_delay) - - def test_start_tasks(self): - interval = 3600 - initial_delay = 5 - - self.loopingcalls.add_task(self.mock_first_looping_task, interval) - self.loopingcalls.add_task( - self.mock_second_looping_task, interval, initial_delay) - - self.loopingcalls.start_tasks() - - self.mock_first_looping_task.start.assert_called_once_with( - interval, 0) - self.mock_second_looping_task.start.assert_called_once_with( - interval, initial_delay) diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py deleted file mode 100644 index 55206b002..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py +++ /dev/null @@ -1,166 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import socket - -import ddt -import mock -from oslo_config import cfg - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp.dataontap.utils import utils - -CONF = cfg.CONF - - -@ddt.ddt -class NetAppCDOTDataMotionTestCase(test.TestCase): - - def setUp(self): - super(NetAppCDOTDataMotionTestCase, self).setUp() - self.backend = 'backend1' - self.mock_cmode_client = self.mock_object(client_cmode, 'Client') - self.config = fakes.get_fake_cmode_config(self.backend) - CONF.set_override('volume_backend_name', self.backend, - group=self.backend) - CONF.set_override('netapp_transport_type', 'https', - group=self.backend) - CONF.set_override('netapp_login', 'fake_user', - group=self.backend) - CONF.set_override('netapp_password', 'fake_password', - group=self.backend) - CONF.set_override('netapp_server_hostname', 'fake_hostname', - group=self.backend) - CONF.set_override('netapp_server_port', 8866, - group=self.backend) - - def test_get_backend_configuration(self): - self.mock_object(utils, 'CONF') - CONF.set_override('netapp_vserver', 'fake_vserver', - group=self.backend) - utils.CONF.list_all_sections.return_value = [self.backend] - - config = utils.get_backend_configuration(self.backend) - - self.assertEqual('fake_vserver', config.netapp_vserver) - - def test_get_backend_configuration_different_backend_name(self): - self.mock_object(utils, 'CONF') - CONF.set_override('netapp_vserver', 'fake_vserver', - group=self.backend) - CONF.set_override('volume_backend_name', 'fake_backend_name', - group=self.backend) - utils.CONF.list_all_sections.return_value = [self.backend] - - config = utils.get_backend_configuration(self.backend) - - self.assertEqual('fake_vserver', config.netapp_vserver) - self.assertEqual('fake_backend_name', config.volume_backend_name) - - @ddt.data([], ['fake_backend1', 'fake_backend2']) - def test_get_backend_configuration_not_configured(self, conf_sections): - self.mock_object(utils, 'CONF') - utils.CONF.list_all_sections.return_value = conf_sections - - self.assertRaises(exception.ConfigNotFound, - utils.get_backend_configuration, - self.backend) - - def test_get_client_for_backend(self): - self.mock_object(utils, 'get_backend_configuration', - return_value=self.config) - - utils.get_client_for_backend(self.backend) - - self.mock_cmode_client.assert_called_once_with( - hostname='fake_hostname', password='fake_password', - username='fake_user', transport_type='https', port=8866, - trace=mock.ANY, vserver=None) - - def test_get_client_for_backend_with_vserver(self): - self.mock_object(utils, 'get_backend_configuration', - return_value=self.config) - - CONF.set_override('netapp_vserver', 'fake_vserver', - group=self.backend) - - utils.get_client_for_backend(self.backend) - - self.mock_cmode_client.assert_called_once_with( - hostname='fake_hostname', password='fake_password', - username='fake_user', transport_type='https', port=8866, - trace=mock.ANY, vserver='fake_vserver') - - -@ddt.ddt -class NetAppDataOntapUtilsTestCase(test.TestCase): - - @ddt.data('cluster', '7mode') - def test_build_ems_log_message_0(self, driver_mode): - - self.mock_object( - socket, 'gethostname', return_value='fake_hostname') - - result = utils.build_ems_log_message_0( - 'fake_driver_name', 'fake_app_version', driver_mode) - - dest = ('cluster node' if driver_mode == 'cluster' - else '7 mode controller') - expected = { - 'computer-name': 'fake_hostname', - 'event-source': 'Cinder driver fake_driver_name', - 'app-version': 'fake_app_version', - 'category': 'provisioning', - 'log-level': '5', - 'auto-support': 'false', - 'event-id': '0', - 'event-description': 'OpenStack Cinder connected to %s' % dest, - } - self.assertEqual(expected, result) - - def test_build_ems_log_message_1(self): - - self.mock_object( - socket, 'gethostname', return_value='fake_hostname') - aggregate_pools = ['aggr1', 'aggr2'] - flexvol_pools = ['vol1', 'vol2'] - - result = utils.build_ems_log_message_1( - 'fake_driver_name', 'fake_app_version', 'fake_vserver', - flexvol_pools, aggregate_pools) - - pool_info = { - 'pools': { - 'vserver': 'fake_vserver', - 'aggregates': aggregate_pools, - 'flexvols': flexvol_pools, - }, - } - self.assertDictEqual(pool_info, - json.loads(result['event-description'])) - - result['event-description'] = '' - expected = { - 'computer-name': 'fake_hostname', - 'event-source': 'Cinder driver fake_driver_name', - 'app-version': 'fake_app_version', - 'category': 'provisioning', - 'log-level': '5', - 'auto-support': 'false', - 'event-id': '1', - 'event-description': '', - } - self.assertEqual(expected, result) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/__init__.py b/cinder/tests/unit/volume/drivers/netapp/eseries/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py b/cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py deleted file mode 100644 index d3eb51f42..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py +++ /dev/null @@ -1,1469 +0,0 @@ -# Copyright (c) - 2015, Alex Meade -# Copyright (c) - 2015, Yogesh Kshirsagar -# Copyright (c) - 2015, Michael Price -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy -import json - -import mock - -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.volume import configuration as conf -from cinder.volume.drivers.netapp.eseries import utils -import cinder.volume.drivers.netapp.options as na_opts -import cinder.volume.drivers.netapp.utils as na_utils - -FAKE_CINDER_VOLUME = { - 'id': fake.VOLUME_ID, - 'size': 1, - 'volume_name': 'lun1', - 'host': 'hostname@backend#DDP', - 'os_type': 'linux', - 'provider_location': 'lun1', - 'name_id': fake.VOLUME2_ID, - 'provider_auth': 'provider a b', - 'project_id': fake.PROJECT_ID, - 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None, - 'migration_status': None, - 'attach_status': fields.VolumeAttachStatus.DETACHED -} - -FAKE_CINDER_SNAPSHOT = { - 'id': fake.SNAPSHOT_ID, - 'volume': FAKE_CINDER_VOLUME, - 'provider_id': '3400000060080E500023BB3400631F335294A5A8', -} - -FAKE_CINDER_CG = { - 'id': fake.CONSISTENCY_GROUP_ID, -} - -FAKE_CINDER_CG_SNAPSHOT = { - 'id': fake.CGSNAPSHOT_ID, - 'consistencygroup_id': FAKE_CINDER_CG['id'], -} - -MULTIATTACH_HOST_GROUP = { - 'clusterRef': '8500000060080E500023C7340036035F515B78FC', - 'label': utils.MULTI_ATTACH_HOST_GROUP_NAME, -} - -FOREIGN_HOST_GROUP = { - 'clusterRef': '8500000060080E500023C7340036035F515B78FD', - 'label': 'FOREIGN HOST GROUP', -} - -HOST_GROUPS = [MULTIATTACH_HOST_GROUP, FOREIGN_HOST_GROUP] - -SSC_POOLS = [ - { - "poolId": "0400000060080E5000290D8000009C9955828DD2", - "name": "DDP", - "pool": { - "sequenceNum": 2, - "offline": False, - "raidLevel": "raidDiskPool", - "worldWideName": "60080E5000290D8000009C9955828DD2", - "volumeGroupRef": "0400000060080E5000290D8000009C9955828DD2", - "reserved1": "000000000000000000000000", - "reserved2": "", - "trayLossProtection": False, - "label": "DDP", - "state": "complete", - "spindleSpeedMatch": True, - "spindleSpeed": 7200, - "isInaccessible": False, - "securityType": "none", - "drawerLossProtection": True, - "protectionInformationCapable": False, - "protectionInformationCapabilities": { - "protectionInformationCapable": True, - "protectionType": "type2Protection" - }, - "volumeGroupData": { - "type": "diskPool", - "diskPoolData": { - "reconstructionReservedDriveCount": 1, - "reconstructionReservedAmt": "2992518463488", - "reconstructionReservedDriveCountCurrent": 1, - "poolUtilizationWarningThreshold": 100, - "poolUtilizationCriticalThreshold": 100, - "poolUtilizationState": "utilizationOptimal", - "unusableCapacity": "0", - "degradedReconstructPriority": "high", - "criticalReconstructPriority": "highest", - "backgroundOperationPriority": "low", - "allocGranularity": "4294967296" - } - }, - "usage": "standard", - "driveBlockFormat": "allNative", - "reservedSpaceAllocated": True, - "usedSpace": "13653701033984", - "totalRaidedSpace": "23459111370752", - "extents": [ - { - "sectorOffset": "0", - "rawCapacity": "9805410336768", - "raidLevel": "raidDiskPool", - "volumeGroupRef": - "0400000060080E5000290D8000009C9955828DD2", - "freeExtentRef": - "0301000060080E5000290D8000009C9955828DD2", - "reserved1": "000000000000000000000000", - "reserved2": "" - } - ], - "largestFreeExtentSize": "9805410336768", - "raidStatus": "optimal", - "freeSpace": "9805410336768", - "drivePhysicalType": "sas", - "driveMediaType": "hdd", - "normalizedSpindleSpeed": "spindleSpeed7200", - "id": "0400000060080E5000290D8000009C9955828DD2", - "diskPool": True, - "name": "DDP" - }, - "flashCacheCapable": True, - "dataAssuranceCapable": True, - "encrypted": False, - "thinProvisioningCapable": True, - "spindleSpeed": "spindleSpeed7200", - "raidLevel": "raidDiskPool", - "availableFreeExtentCapacities": [ - "9805410336768" - ] - }, - { - "poolId": "0400000060080E5000290D8000009CBA55828E96", - "name": "pool_raid1", - "pool": { - "sequenceNum": 6, - "offline": False, - "raidLevel": "raid1", - "worldWideName": "60080E5000290D8000009CBA55828E96", - "volumeGroupRef": "0400000060080E5000290D8000009CBA55828E96", - "reserved1": "000000000000000000000000", - "reserved2": "", - "trayLossProtection": False, - "label": "pool_raid1", - "state": "complete", - "spindleSpeedMatch": True, - "spindleSpeed": 10000, - "isInaccessible": False, - "securityType": "none", - "drawerLossProtection": True, - "protectionInformationCapable": False, - "protectionInformationCapabilities": { - "protectionInformationCapable": True, - "protectionType": "type2Protection" - }, - "volumeGroupData": { - "type": "unknown", - "diskPoolData": None - }, - "usage": "standard", - "driveBlockFormat": "allNative", - "reservedSpaceAllocated": True, - "usedSpace": "2978559819776", - "totalRaidedSpace": "6662444097536", - "extents": [ - { - "sectorOffset": "387891200", - "rawCapacity": "3683884277760", - "raidLevel": "raid1", - "volumeGroupRef": - "0400000060080E5000290D8000009CBA55828E96", - "freeExtentRef": - "030000B360080E5000290D8000009CBA55828E96", - "reserved1": "000000000000000000000000", - "reserved2": "" - } - ], - "largestFreeExtentSize": "3683884277760", - "raidStatus": "optimal", - "freeSpace": "3683884277760", - "drivePhysicalType": "sas", - "driveMediaType": "hdd", - "normalizedSpindleSpeed": "spindleSpeed10k", - "id": "0400000060080E5000290D8000009CBA55828E96", - "diskPool": False, - "name": "pool_raid1" - }, - "flashCacheCapable": False, - "dataAssuranceCapable": True, - "encrypted": False, - "thinProvisioningCapable": False, - "spindleSpeed": "spindleSpeed10k", - "raidLevel": "raid1", - "availableFreeExtentCapacities": [ - "3683884277760" - ] - }, - { - "poolId": "0400000060080E5000290D8000009CAB55828E51", - "name": "pool_raid6", - "pool": { - "sequenceNum": 3, - "offline": False, - "raidLevel": "raid6", - "worldWideName": "60080E5000290D8000009CAB55828E51", - "volumeGroupRef": "0400000060080E5000290D8000009CAB55828E51", - "reserved1": "000000000000000000000000", - "reserved2": "", - "trayLossProtection": False, - "label": "pool_raid6", - "state": "complete", - "spindleSpeedMatch": True, - "spindleSpeed": 15000, - "isInaccessible": False, - "securityType": "enabled", - "drawerLossProtection": False, - "protectionInformationCapable": False, - "protectionInformationCapabilities": { - "protectionInformationCapable": True, - "protectionType": "type2Protection" - }, - "volumeGroupData": { - "type": "unknown", - "diskPoolData": None - }, - "usage": "standard", - "driveBlockFormat": "allNative", - "reservedSpaceAllocated": True, - "usedSpace": "16413217521664", - "totalRaidedSpace": "16637410312192", - "extents": [ - { - "sectorOffset": "1144950784", - "rawCapacity": "224192790528", - "raidLevel": "raid6", - "volumeGroupRef": - "0400000060080E5000290D8000009CAB55828E51", - "freeExtentRef": - "0300005960080E5000290D8000009CAB55828E51", - "reserved1": "000000000000000000000000", - "reserved2": "" - } - ], - "largestFreeExtentSize": "224192790528", - "raidStatus": "optimal", - "freeSpace": "224192790528", - "drivePhysicalType": "sas", - "driveMediaType": "hdd", - "normalizedSpindleSpeed": "spindleSpeed15k", - "id": "0400000060080E5000290D8000009CAB55828E51", - "diskPool": False, - "name": "pool_raid6" - }, - "flashCacheCapable": False, - "dataAssuranceCapable": True, - "encrypted": True, - "thinProvisioningCapable": False, - "spindleSpeed": "spindleSpeed15k", - "raidLevel": "raid6", - "availableFreeExtentCapacities": [ - "224192790528" - ] - } -] - -STORAGE_POOLS = [ssc_pool['pool'] for ssc_pool in SSC_POOLS] - -VOLUMES = [ - { - "offline": False, - "extremeProtection": False, - "volumeHandle": 2, - "raidLevel": "raid0", - "sectorOffset": "0", - "worldWideName": "60080E50002998A00000945355C37C19", - "label": "1", - "blkSize": 512, - "capacity": "10737418240", - "reconPriority": 1, - "segmentSize": 131072, - "action": "initializing", - "cache": { - "cwob": False, - "enterpriseCacheDump": False, - "mirrorActive": True, - "mirrorEnable": True, - "readCacheActive": True, - "readCacheEnable": True, - "writeCacheActive": True, - "writeCacheEnable": True, - "cacheFlushModifier": "flush10Sec", - "readAheadMultiplier": 1 - }, - "mediaScan": { - "enable": False, - "parityValidationEnable": False - }, - "volumeRef": "0200000060080E50002998A00000945355C37C19", - "status": "optimal", - "volumeGroupRef": "0400000060080E50002998A00000945255C37C14", - "currentManager": "070000000000000000000001", - "preferredManager": "070000000000000000000001", - "perms": { - "mapToLUN": True, - "snapShot": True, - "format": True, - "reconfigure": True, - "mirrorPrimary": True, - "mirrorSecondary": True, - "copySource": True, - "copyTarget": True, - "readable": True, - "writable": True, - "rollback": True, - "mirrorSync": True, - "newImage": True, - "allowDVE": True, - "allowDSS": True, - "concatVolumeMember": True, - "flashReadCache": True, - "asyncMirrorPrimary": True, - "asyncMirrorSecondary": True, - "pitGroup": True, - "cacheParametersChangeable": True, - "allowThinManualExpansion": False, - "allowThinGrowthParametersChange": False, - "allowVaulting": False, - "allowRestore": False - }, - "mgmtClientAttribute": 0, - "dssPreallocEnabled": True, - "dssMaxSegmentSize": 2097152, - "preReadRedundancyCheckEnabled": False, - "protectionInformationCapable": False, - "protectionType": "type1Protection", - "applicationTagOwned": False, - "untrustworthy": 0, - "volumeUse": "standardVolume", - "volumeFull": False, - "volumeCopyTarget": False, - "volumeCopySource": False, - "pitBaseVolume": False, - "asyncMirrorTarget": False, - "asyncMirrorSource": False, - "remoteMirrorSource": False, - "remoteMirrorTarget": False, - "diskPool": False, - "flashCached": False, - "increasingBy": "0", - "metadata": [], - "dataAssurance": True, - "name": "1", - "id": "0200000060080E50002998A00000945355C37C19", - "wwn": "60080E50002998A00000945355C37C19", - "objectType": "volume", - "mapped": False, - "preferredControllerId": "070000000000000000000001", - "totalSizeInBytes": "10737418240", - "onlineVolumeCopy": False, - "listOfMappings": [], - "currentControllerId": "070000000000000000000001", - "cacheSettings": { - "cwob": False, - "enterpriseCacheDump": False, - "mirrorActive": True, - "mirrorEnable": True, - "readCacheActive": True, - "readCacheEnable": True, - "writeCacheActive": True, - "writeCacheEnable": True, - "cacheFlushModifier": "flush10Sec", - "readAheadMultiplier": 1 - }, - "thinProvisioned": False - }, - { - "volumeHandle": 16385, - "worldWideName": "60080E500029347000001D7B55C3791E", - "label": "2", - "allocationGranularity": 128, - "capacity": "53687091200", - "reconPriority": 1, - "volumeRef": "3A00000060080E500029347000001D7B55C3791E", - "status": "optimal", - "repositoryRef": "3600000060080E500029347000001D7955C3791D", - "currentManager": "070000000000000000000002", - "preferredManager": "070000000000000000000002", - "perms": { - "mapToLUN": True, - "snapShot": False, - "format": True, - "reconfigure": False, - "mirrorPrimary": False, - "mirrorSecondary": False, - "copySource": True, - "copyTarget": False, - "readable": True, - "writable": True, - "rollback": True, - "mirrorSync": True, - "newImage": True, - "allowDVE": True, - "allowDSS": True, - "concatVolumeMember": False, - "flashReadCache": True, - "asyncMirrorPrimary": True, - "asyncMirrorSecondary": True, - "pitGroup": True, - "cacheParametersChangeable": True, - "allowThinManualExpansion": False, - "allowThinGrowthParametersChange": False, - "allowVaulting": False, - "allowRestore": False - }, - "mgmtClientAttribute": 0, - "preReadRedundancyCheckEnabled": False, - "protectionType": "type0Protection", - "applicationTagOwned": True, - "maxVirtualCapacity": "69269232549888", - "initialProvisionedCapacity": "4294967296", - "currentProvisionedCapacity": "4294967296", - "provisionedCapacityQuota": "55834574848", - "growthAlertThreshold": 85, - "expansionPolicy": "automatic", - "volumeCache": { - "cwob": False, - "enterpriseCacheDump": False, - "mirrorActive": True, - "mirrorEnable": True, - "readCacheActive": True, - "readCacheEnable": True, - "writeCacheActive": True, - "writeCacheEnable": True, - "cacheFlushModifier": "flush10Sec", - "readAheadMultiplier": 0 - }, - "offline": False, - "volumeFull": False, - "volumeGroupRef": "0400000060080E50002998A00000945155C37C08", - "blkSize": 512, - "storageVolumeRef": "0200000060080E500029347000001D7855C3791D", - "volumeCopyTarget": False, - "volumeCopySource": False, - "pitBaseVolume": False, - "asyncMirrorTarget": False, - "asyncMirrorSource": False, - "remoteMirrorSource": False, - "remoteMirrorTarget": False, - "flashCached": False, - "mediaScan": { - "enable": False, - "parityValidationEnable": False - }, - "metadata": [], - "dataAssurance": False, - "name": "2", - "id": "3A00000060080E500029347000001D7B55C3791E", - "wwn": "60080E500029347000001D7B55C3791E", - "objectType": "thinVolume", - "mapped": False, - "diskPool": True, - "preferredControllerId": "070000000000000000000002", - "totalSizeInBytes": "53687091200", - "onlineVolumeCopy": False, - "listOfMappings": [], - "currentControllerId": "070000000000000000000002", - "segmentSize": 131072, - "cacheSettings": { - "cwob": False, - "enterpriseCacheDump": False, - "mirrorActive": True, - "mirrorEnable": True, - "readCacheActive": True, - "readCacheEnable": True, - "writeCacheActive": True, - "writeCacheEnable": True, - "cacheFlushModifier": "flush10Sec", - "readAheadMultiplier": 0 - }, - "thinProvisioned": True - } -] - -VOLUME = VOLUMES[0] - -STORAGE_POOL = { - 'label': 'DDP', - 'id': 'fakevolgroupref', - 'volumeGroupRef': 'fakevolgroupref', - 'raidLevel': 'raidDiskPool', - 'usedSpace': '16413217521664', - 'totalRaidedSpace': '16637410312192', -} - -INITIATOR_NAME = 'iqn.1998-01.com.vmware:localhost-28a58148' -INITIATOR_NAME_2 = 'iqn.1998-01.com.vmware:localhost-28a58149' -INITIATOR_NAME_3 = 'iqn.1998-01.com.vmware:localhost-28a58150' -WWPN = '20130080E5322230' -WWPN_2 = '20230080E5322230' - -FC_TARGET_WWPNS = [ - '500a098280feeba5', - '500a098290feeba5', - '500a098190feeba5', - '500a098180feeba5' -] - -FC_I_T_MAP = { - '20230080E5322230': [ - '500a098280feeba5', - '500a098290feeba5' - ], - '20130080E5322230': [ - '500a098190feeba5', - '500a098180feeba5' - ] -} - -FC_FABRIC_MAP = { - 'fabricB': { - 'target_port_wwn_list': [ - '500a098190feeba5', - '500a098180feeba5' - ], - 'initiator_port_wwn_list': [ - '20130080E5322230' - ] - }, - 'fabricA': { - 'target_port_wwn_list': [ - '500a098290feeba5', - '500a098280feeba5' - ], - 'initiator_port_wwn_list': [ - '20230080E5322230' - ] - } -} - -HOST = { - 'isSAControlled': False, - 'confirmLUNMappingCreation': False, - 'label': 'stlrx300s7-55', - 'isLargeBlockFormatHost': False, - 'clusterRef': '8500000060080E500023C7340036035F515B78FC', - 'protectionInformationCapableAccessMethod': False, - 'ports': [], - 'hostRef': '8400000060080E500023C73400300381515BFBA3', - 'hostTypeIndex': 6, - 'hostSidePorts': [{ - 'label': 'NewStore', - 'type': 'iscsi', - 'address': INITIATOR_NAME}] -} -HOST_2 = { - 'isSAControlled': False, - 'confirmLUNMappingCreation': False, - 'label': 'stlrx300s7-55', - 'isLargeBlockFormatHost': False, - 'clusterRef': utils.NULL_REF, - 'protectionInformationCapableAccessMethod': False, - 'ports': [], - 'hostRef': '8400000060080E500023C73400300381515BFBA5', - 'hostTypeIndex': 6, - 'hostSidePorts': [{ - 'label': 'NewStore', 'type': 'iscsi', - 'address': INITIATOR_NAME_2}] -} -# HOST_3 has all lun_ids in use. -HOST_3 = { - 'isSAControlled': False, - 'confirmLUNMappingCreation': False, - 'label': 'stlrx300s7-55', - 'isLargeBlockFormatHost': False, - 'clusterRef': '8500000060080E500023C73400360351515B78FC', - 'protectionInformationCapableAccessMethod': False, - 'ports': [], - 'hostRef': '8400000060080E501023C73400800381515BFBA5', - 'hostTypeIndex': 6, - 'hostSidePorts': [{ - 'label': 'NewStore', 'type': 'iscsi', - 'address': INITIATOR_NAME_3}], -} - - -VOLUME_MAPPING = { - 'lunMappingRef': '8800000000000000000000000000000000000000', - 'lun': 1, - 'ssid': 16384, - 'perms': 15, - 'volumeRef': VOLUME['volumeRef'], - 'type': 'all', - 'mapRef': HOST['hostRef'] -} -# VOLUME_MAPPING_3 corresponding to HOST_3 has all lun_ids in use. -VOLUME_MAPPING_3 = { - 'lunMappingRef': '8800000000000000000000000000000000000000', - 'lun': range(255), - 'ssid': 16384, - 'perms': 15, - 'volumeRef': VOLUME['volumeRef'], - 'type': 'all', - 'mapRef': HOST_3['hostRef'], -} - -VOLUME_MAPPING_TO_MULTIATTACH_GROUP = copy.deepcopy(VOLUME_MAPPING) -VOLUME_MAPPING_TO_MULTIATTACH_GROUP.update( - {'mapRef': MULTIATTACH_HOST_GROUP['clusterRef']} -) - -STORAGE_SYSTEM = { - 'chassisSerialNumber': 1, - 'fwVersion': '08.10.15.00', - 'freePoolSpace': 11142431623168, - 'driveCount': 24, - 'hostSparesUsed': 0, 'id': - '1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b', - 'hotSpareSizeAsString': '0', 'wwn': - '60080E500023C73400000000515AF323', - 'passwordStatus': 'valid', - 'parameters': { - 'minVolSize': 1048576, 'maxSnapshotsPerBase': 16, - 'maxDrives': 192, - 'maxVolumes': 512, - 'maxVolumesPerGroup': 256, - 'maxMirrors': 0, - 'maxMappingsPerVolume': 1, - 'maxMappableLuns': 256, - 'maxVolCopys': 511, - 'maxSnapshots': 256 - }, 'hotSpareCount': 0, - 'hostSpareCountInStandby': 0, - 'status': 'needsattn', - 'trayCount': 1, - 'usedPoolSpaceAsString': '5313000380416', - 'ip2': '10.63.165.216', - 'ip1': '10.63.165.215', - 'freePoolSpaceAsString': '11142431623168', - 'types': 'SAS', - 'name': 'stle2600-7_8', - 'hotSpareSize': 0, - 'usedPoolSpace': 5313000380416, - 'driveTypes': ['sas'], - 'unconfiguredSpaceByDriveType': {}, - 'unconfiguredSpaceAsStrings': '0', - 'model': '2650', - 'unconfiguredSpace': 0 -} - -SNAPSHOT_GROUP = { - 'id': '3300000060080E500023C7340000098D5294AC9A', - 'status': 'optimal', - 'autoDeleteLimit': 0, - 'maxRepositoryCapacity': '-65536', - 'rollbackStatus': 'none', - 'unusableRepositoryCapacity': '0', - 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A', - 'clusterSize': 65536, - 'label': 'C6JICISVHNG2TFZX4XB5ZWL7F', - 'maxBaseCapacity': '476187142128128', - 'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C', - 'fullWarnThreshold': 99, - 'repFullPolicy': 'purgepit', - 'action': 'none', - 'rollbackPriority': 'medium', - 'creationPendingStatus': 'none', - 'consistencyGroupRef': '0000000000000000000000000000000000000000', - 'volumeHandle': 49153, - 'consistencyGroup': False, - 'baseVolume': '0200000060080E500023C734000009825294A534', - 'snapshotCount': 32 -} - -SNAPSHOT_IMAGE = { - 'id': fake.SNAPSHOT_ID, - 'baseVol': '0200000060080E500023C734000009825294A534', - 'status': 'optimal', - 'pitCapacity': '2147483648', - 'pitTimestamp': '1389315375', - 'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A', - 'creationMethod': 'user', - 'repositoryCapacityUtilization': '2818048', - 'activeCOW': True, - 'isRollbackSource': False, - 'pitRef': '3400000060080E500023BB3400631F335294A5A8', - 'pitSequenceNumber': '19', - 'consistencyGroupId': '0000000000000000000000000000000000000000', -} - -SNAPSHOT_VOLUME = { - 'id': '35000000600A0980006077F80000F8BF566581AA', - 'viewRef': '35000000600A0980006077F80000F8BF566581AA', - 'worldWideName': '600A0980006077F80000F8BF566581AA', - 'baseVol': '02000000600A0980006077F80000F89B56657E26', - 'basePIT': '0000000000000000000000000000000000000000', - 'boundToPIT': False, - 'accessMode': 'readOnly', - 'label': 'UZJ45SLUKNGWRF3QZHBTOG4C4E_DEL', - 'status': 'stopped', - 'currentManager': '070000000000000000000001', - 'preferredManager': '070000000000000000000001', - 'repositoryVolume': '0000000000000000000000000000000000000000', - 'fullWarnThreshold': 0, - 'viewTime': '1449453419', - 'viewSequenceNumber': '2104', - 'volumeHandle': 16510, - 'clusterSize': 0, - 'maxRepositoryCapacity': '0', - 'unusableRepositoryCapacity': '0', - 'membership': { - 'viewType': 'individual', - 'cgViewRef': None - }, - 'mgmtClientAttribute': 0, - 'offline': False, - 'volumeFull': False, - 'repositoryCapacity': '0', - 'baseVolumeCapacity': '1073741824', - 'totalSizeInBytes': '0', - 'consistencyGroupId': None, - 'volumeCopyTarget': False, - 'cloneCopy': False, - 'volumeCopySource': False, - 'pitBaseVolume': False, - 'asyncMirrorTarget': False, - 'asyncMirrorSource': False, - 'protectionType': 'type0Protection', - 'remoteMirrorSource': False, - 'remoteMirrorTarget': False, - 'wwn': '600A0980006077F80000F8BF566581AA', - 'listOfMappings': [], - 'mapped': False, - 'currentControllerId': '070000000000000000000001', - 'preferredControllerId': '070000000000000000000001', - 'onlineVolumeCopy': False, - 'objectType': 'pitView', - 'name': 'UZJ45SLUKNGWRF3QZHBTOG4C4E', -} - -FAKE_BACKEND_STORE = { - 'key': 'cinder-snapshots', - 'value': '{"3300000060080E50003416400000E90D56B047E5":"2"}' -} - -HARDWARE_INVENTORY_SINGLE_CONTROLLER = { - 'controllers': [ - { - 'modelName': '2752', - 'serialNumber': '021436001321' - } - ] -} - -HARDWARE_INVENTORY = { - 'controllers': [ - { - 'modelName': '2752', - 'serialNumber': '021436000943' - }, - { - 'modelName': '2752', - 'serialNumber': '021436001321' - } - ], - 'iscsiPorts': [ - { - 'controllerId': - '070000000000000000000002', - 'ipv4Enabled': True, - 'ipv4Data': { - 'ipv4Address': '0.0.0.0', - 'ipv4AddressConfigMethod': - 'configStatic', - 'ipv4VlanId': { - 'isEnabled': False, - 'value': 0 - }, - 'ipv4AddressData': { - 'ipv4Address': '172.20.123.66', - 'ipv4SubnetMask': '255.255.255.0', - 'configState': 'configured', - 'ipv4GatewayAddress': '0.0.0.0' - } - }, - 'tcpListenPort': 3260, - 'interfaceRef': '2202040000000000000000000000000000000000', - 'iqn': 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323' - } - ], - 'fibrePorts': [ - { - "channel": 1, - "loopID": 126, - "speed": 800, - "hardAddress": 6, - "nodeName": "20020080E5322230", - "portName": "20130080E5322230", - "portId": "011700", - "topology": "fabric", - "part": "PM8032 ", - "revision": 8, - "chanMiswire": False, - "esmMiswire": False, - "linkStatus": "up", - "isDegraded": False, - "speedControl": "auto", - "maxSpeed": 800, - "speedNegError": False, - "reserved1": "000000000000000000000000", - "reserved2": "", - "ddsChannelState": 0, - "ddsStateReason": 0, - "ddsStateWho": 0, - "isLocal": True, - "channelPorts": [], - "currentInterfaceSpeed": "speed8gig", - "maximumInterfaceSpeed": "speed8gig", - "interfaceRef": "2202020000000000000000000000000000000000", - "physicalLocation": { - "trayRef": "0000000000000000000000000000000000000000", - "slot": 0, - "locationParent": { - "refType": "generic", - "controllerRef": None, - "symbolRef": "0000000000000000000000000000000000000000", - "typedReference": None - }, - "locationPosition": 0 - }, - "isTrunkCapable": False, - "trunkMiswire": False, - "protectionInformationCapable": True, - "controllerId": "070000000000000000000002", - "interfaceId": "2202020000000000000000000000000000000000", - "addressId": "20130080E5322230", - "niceAddressId": "20:13:00:80:E5:32:22:30" - }, - { - "channel": 2, - "loopID": 126, - "speed": 800, - "hardAddress": 7, - "nodeName": "20020080E5322230", - "portName": "20230080E5322230", - "portId": "011700", - "topology": "fabric", - "part": "PM8032 ", - "revision": 8, - "chanMiswire": False, - "esmMiswire": False, - "linkStatus": "up", - "isDegraded": False, - "speedControl": "auto", - "maxSpeed": 800, - "speedNegError": False, - "reserved1": "000000000000000000000000", - "reserved2": "", - "ddsChannelState": 0, - "ddsStateReason": 0, - "ddsStateWho": 0, - "isLocal": True, - "channelPorts": [], - "currentInterfaceSpeed": "speed8gig", - "maximumInterfaceSpeed": "speed8gig", - "interfaceRef": "2202030000000000000000000000000000000000", - "physicalLocation": { - "trayRef": "0000000000000000000000000000000000000000", - "slot": 0, - "locationParent": { - "refType": "generic", - "controllerRef": None, - "symbolRef": "0000000000000000000000000000000000000000", - "typedReference": None - }, - "locationPosition": 0 - }, - "isTrunkCapable": False, - "trunkMiswire": False, - "protectionInformationCapable": True, - "controllerId": "070000000000000000000002", - "interfaceId": "2202030000000000000000000000000000000000", - "addressId": "20230080E5322230", - "niceAddressId": "20:23:00:80:E5:32:22:30" - }, - ] -} - -FAKE_POOL_ACTION_PROGRESS = [ - { - "volumeRef": "0200000060080E50002998A00000945355C37C19", - "progressPercentage": 55, - "estimatedTimeToCompletion": 1, - "currentAction": "initializing" - }, - { - "volumeRef": "0200000060080E50002998A00000945355C37C18", - "progressPercentage": 0, - "estimatedTimeToCompletion": 0, - "currentAction": "progressDve" - }, -] - -FAKE_CHAP_SECRET = 'password123' -FAKE_RESOURCE_URL = '/devmgr/v2/devmgr/utils/about' -FAKE_APP_VERSION = '2015.2|2015.2.dev59|vendor|Linux-3.13.0-24-generic' -FAKE_BACKEND = 'eseriesiSCSI' -FAKE_CINDER_HOST = 'ubuntu-1404' -FAKE_SERIAL_NUMBERS = ['021436000943', '021436001321'] -FAKE_SERIAL_NUMBER = ['021436001321'] -FAKE_DEFAULT_SERIAL_NUMBER = ['unknown', 'unknown'] -FAKE_DEFAULT_MODEL = 'unknown' -FAKE_ABOUT_RESPONSE = { - 'runningAsProxy': True, - 'version': '01.53.9010.0005', - 'systemId': 'a89355ab-692c-4d4a-9383-e249095c3c0', -} - -FAKE_TARGET_IQN = 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323' - -FAKE_CHAP_USERNAME = 'eserieschapuser' - -FAKE_CHAP_PARAMETERS = { - 'ChapAuthentication': True, - 'iqn': FAKE_TARGET_IQN, - 'chapSecret': FAKE_CHAP_SECRET, - 'authMethod': 'CHAP', -} - -FAKE_CLIENT_CHAP_PARAMETERS = ( - FAKE_TARGET_IQN, - FAKE_CHAP_USERNAME, - FAKE_CHAP_SECRET, -) - -FAKE_TARGET_DICT = { - 'data': { - 'auth_method': 'CHAP', - 'auth_password': FAKE_CHAP_SECRET, - 'auth_username': FAKE_CHAP_USERNAME, - 'discovery_auth_method': 'CHAP', - 'discovery_auth_password': FAKE_CHAP_SECRET, - 'discovery_auth_username': FAKE_CHAP_USERNAME, - 'target_discovered': False, - 'target_iqn': FAKE_TARGET_IQN, - 'target_lun': 1, - 'target_portal': '172.20.123.66:3260', - 'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', - }, - 'driver_volume_type': 'iscsi', -} - -FAKE_CHAP_POST_DATA = { - 'enableChapAuthentication': True, - 'alias': FAKE_CHAP_USERNAME, - 'iqn': FAKE_TARGET_IQN, - 'chapSecret': FAKE_CHAP_SECRET, - 'authMethod': 'CHAP', -} - - -FAKE_CONTROLLERS = [ - {'serialNumber': FAKE_SERIAL_NUMBERS[0], 'modelName': '2752'}, - {'serialNumber': FAKE_SERIAL_NUMBERS[1], 'modelName': '2752'}] - -FAKE_SINGLE_CONTROLLER = [{'serialNumber': FAKE_SERIAL_NUMBERS[1]}] - -FAKE_KEY = ('openstack-%s-%s-%s' % (FAKE_CINDER_HOST, FAKE_SERIAL_NUMBERS[0], - FAKE_SERIAL_NUMBERS[1])) - -FAKE_ASUP_DATA = { - 'category': 'provisioning', - 'app-version': FAKE_APP_VERSION, - 'event-source': 'Cinder driver NetApp_iSCSI_ESeries', - 'event-description': 'OpenStack Cinder connected to E-Series proxy', - 'system-version': '08.10.15.00', - 'computer-name': FAKE_CINDER_HOST, - 'model': FAKE_CONTROLLERS[0]['modelName'], - 'controller2-serial': FAKE_CONTROLLERS[1]['serialNumber'], - 'controller1-serial': FAKE_CONTROLLERS[0]['serialNumber'], - 'chassis-serial-number': FAKE_SERIAL_NUMBER[0], - 'operating-mode': 'proxy', -} - -GET_ASUP_RETURN = { - 'model': FAKE_CONTROLLERS[0]['modelName'], - 'serial_numbers': FAKE_SERIAL_NUMBERS, - 'firmware_version': FAKE_ASUP_DATA['system-version'], - 'chassis_sn': FAKE_ASUP_DATA['chassis-serial-number'], -} - -FAKE_POST_INVOKE_DATA = ('POST', '/key-values/%s' % FAKE_KEY, - json.dumps(FAKE_ASUP_DATA)) - -VOLUME_COPY_JOB = { - "status": "complete", - "cloneCopy": True, - "pgRef": "3300000060080E500023C73400000ACA52D29454", - "volcopyHandle": 49160, - "idleTargetWriteProt": True, - "copyPriority": "priority2", - "volcopyRef": "1800000060080E500023C73400000ACF52D29466", - "worldWideName": "60080E500023C73400000ACF52D29466", - "copyCompleteTime": "0", - "sourceVolume": "3500000060080E500023C73400000ACE52D29462", - "currentManager": "070000000000000000000002", - "copyStartTime": "1389551671", - "reserved1": "00000000", - "targetVolume": "0200000060080E500023C73400000A8C52D10675", -} - -FAKE_ENDPOINT_HTTP = 'http://host:80/endpoint' - -FAKE_ENDPOINT_HTTPS = 'https://host:8443/endpoint' - -FAKE_INVOC_MSG = 'success' - -FAKE_CLIENT_PARAMS = { - 'scheme': 'http', - 'host': '127.0.0.1', - 'port': 8080, - 'service_path': '/devmgr/vn', - 'username': 'rw', - 'password': 'rw', -} - -FAKE_CONSISTENCY_GROUP = { - 'cgRef': '2A000000600A0980006077F8008702F45480F41A', - 'label': '5BO5GPO4PFGRPMQWEXGTILSAUI', - 'repFullPolicy': 'failbasewrites', - 'fullWarnThreshold': 75, - 'autoDeleteLimit': 0, - 'rollbackPriority': 'medium', - 'uniqueSequenceNumber': [8940, 8941, 8942], - 'creationPendingStatus': 'none', - 'name': '5BO5GPO4PFGRPMQWEXGTILSAUI', - 'id': '2A000000600A0980006077F8008702F45480F41A' -} - -FAKE_CONSISTENCY_GROUP_MEMBER = { - 'consistencyGroupId': '2A000000600A0980006077F8008702F45480F41A', - 'volumeId': '02000000600A0980006077F8000002F55480F421', - 'volumeWwn': '600A0980006077F8000002F55480F421', - 'baseVolumeName': 'I5BHHNILUJGZHEUD4S36GCOQYA', - 'clusterSize': 65536, - 'totalRepositoryVolumes': 1, - 'totalRepositoryCapacity': '4294967296', - 'usedRepositoryCapacity': '5636096', - 'fullWarnThreshold': 75, - 'totalSnapshotImages': 3, - 'totalSnapshotVolumes': 2, - 'autoDeleteSnapshots': False, - 'autoDeleteLimit': 0, - 'pitGroupId': '33000000600A0980006077F8000002F85480F435', - 'repositoryVolume': '36000000600A0980006077F8000002F75480F435' -} -FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME = { - 'id': '2C00000060080E500034194F002C96A256BD50F9', - 'name': '6TRZHKDG75DVLBC2JU5J647RME', - 'cgViewRef': '2C00000060080E500034194F002C96A256BD50F9', - 'groupRef': '2A00000060080E500034194F0087969856BD2D67', - 'label': '6TRZHKDG75DVLBC2JU5J647RME', - 'viewTime': '1455221060', - 'viewSequenceNumber': '10', -} - - -def list_snapshot_groups(numGroups): - snapshots = [] - for n in range(0, numGroups): - s = copy.deepcopy(SNAPSHOT_GROUP) - s['label'] = s['label'][:-1] + str(n) - snapshots.append(s) - return snapshots - - -def create_configuration_eseries(): - config = conf.Configuration(None) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_transport_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_provisioning_opts) - config.append_config_values(na_opts.netapp_eseries_opts) - config.netapp_storage_protocol = 'iscsi' - config.netapp_login = 'rw' - config.netapp_password = 'rw' - config.netapp_server_hostname = '127.0.0.1' - config.netapp_transport_type = 'http' - config.netapp_server_port = '8080' - config.netapp_storage_pools = 'DDP' - config.netapp_storage_family = 'eseries' - config.netapp_sa_password = 'saPass' - config.netapp_controller_ips = '10.11.12.13,10.11.12.14' - config.netapp_webservice_path = '/devmgr/v2' - config.netapp_enable_multiattach = False - config.use_chap_auth = False - return config - - -def deepcopy_return_value_method_decorator(fn): - """Returns a deepcopy of the returned value of the wrapped function.""" - def decorator(*args, **kwargs): - return copy.deepcopy(fn(*args, **kwargs)) - - return decorator - - -def deepcopy_return_value_class_decorator(cls): - """Wraps 'non-protected' methods of a class with decorator. - - Wraps all 'non-protected' methods of a class with the - deepcopy_return_value_method_decorator decorator. - """ - class NewClass(cls): - def __getattribute__(self, attr_name): - obj = super(NewClass, self).__getattribute__(attr_name) - if (hasattr(obj, '__call__') and not attr_name.startswith('_') - and not isinstance(obj, mock.Mock)): - return deepcopy_return_value_method_decorator(obj) - return obj - - return NewClass - - -@deepcopy_return_value_class_decorator -class FakeEseriesClient(object): - features = na_utils.Features() - - def __init__(self, *args, **kwargs): - self.features.add_feature('AUTOSUPPORT') - self.features.add_feature('SSC_API_V2') - self.features.add_feature('REST_1_3_RELEASE') - self.features.add_feature('REST_1_4_RELEASE') - - def list_storage_pools(self): - return STORAGE_POOLS - - def register_storage_system(self, *args, **kwargs): - return { - 'freePoolSpace': '17055871480319', - 'driveCount': 24, - 'wwn': '60080E500023C73400000000515AF323', - 'id': '1', - 'hotSpareSizeAsString': '0', - 'hostSparesUsed': 0, - 'types': '', - 'hostSpareCountInStandby': 0, - 'status': 'optimal', - 'trayCount': 1, - 'usedPoolSpaceAsString': '37452115456', - 'ip2': '10.63.165.216', - 'ip1': '10.63.165.215', - 'freePoolSpaceAsString': '17055871480319', - 'hotSpareCount': 0, - 'hotSpareSize': '0', - 'name': 'stle2600-7_8', - 'usedPoolSpace': '37452115456', - 'driveTypes': ['sas'], - 'unconfiguredSpaceByDriveType': {}, - 'unconfiguredSpaceAsStrings': '0', - 'model': '2650', - 'unconfiguredSpace': '0' - } - - def list_volume(self, volume_id): - return VOLUME - - def list_volumes(self): - return [VOLUME] - - def delete_volume(self, vol): - pass - - def create_host_group(self, name): - return MULTIATTACH_HOST_GROUP - - def get_host_group(self, ref): - return MULTIATTACH_HOST_GROUP - - def list_host_groups(self): - return [MULTIATTACH_HOST_GROUP, FOREIGN_HOST_GROUP] - - def get_host_group_by_name(self, name, *args, **kwargs): - host_groups = self.list_host_groups() - return [host_group for host_group in host_groups - if host_group['label'] == name][0] - - def set_host_group_for_host(self, *args, **kwargs): - pass - - def create_host_with_ports(self, *args, **kwargs): - return HOST - - def list_hosts(self): - return [HOST, HOST_2] - - def get_host(self, *args, **kwargs): - return HOST - - def create_volume(self, *args, **kwargs): - return VOLUME - - def create_volume_mapping(self, *args, **kwargs): - return VOLUME_MAPPING - - def get_volume_mappings(self): - return [VOLUME_MAPPING] - - def get_volume_mappings_for_volume(self, volume): - return [VOLUME_MAPPING] - - def get_volume_mappings_for_host(self, host_ref): - return [VOLUME_MAPPING] - - def get_volume_mappings_for_host_group(self, hg_ref): - return [VOLUME_MAPPING] - - def delete_volume_mapping(self): - return - - def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id): - return {'lun': lun_id} - - def list_storage_system(self): - return STORAGE_SYSTEM - - def list_storage_systems(self): - return [STORAGE_SYSTEM] - - def list_snapshot_groups(self): - return [SNAPSHOT_GROUP] - - def list_snapshot_images(self): - return [SNAPSHOT_IMAGE] - - def list_snapshot_image(self, *args, **kwargs): - return SNAPSHOT_IMAGE - - def create_cg_snapshot_view(self, *args, **kwargs): - return SNAPSHOT_VOLUME - - def list_host_types(self): - return [ - { - 'name': 'FactoryDefault', - 'index': 0, - 'code': 'FactoryDefault', - }, - { - 'name': 'Windows 2000/Server 2003/Server 2008 Non-Clustered', - 'index': 1, - 'code': 'W2KNETNCL', - }, - { - 'name': 'Solaris', - 'index': 2, - 'code': 'SOL', - }, - { - 'name': 'ONTAP_RDAC', - 'index': 4, - 'code': 'ONTAP_RDAC', - }, - { - 'name': 'AVT_4M', - 'index': 5, - 'code': 'AVT_4M', - }, - { - 'name': 'Linux', - 'index': 6, - 'code': 'LNX', - }, - { - 'name': 'LnxALUA', - 'index': 7, - 'code': 'LnxALUA', - }, - { - 'name': 'Windows 2000/Server 2003/Server 2008 Clustered', - 'index': 8, - 'code': 'W2KNETCL', - }, - { - 'name': 'AIX MPIO', - 'index': 9, - 'code': 'AIX MPIO', - }, - { - 'name': 'VmwTPGSALUA', - 'index': 10, - 'code': 'VmwTPGSALUA', - }, - { - 'name': 'HP-UX TPGS', - 'index': 15, - 'code': 'HPXTPGS', - }, - { - 'name': 'SolTPGSALUA', - 'index': 17, - 'code': 'SolTPGSALUA', - }, - { - 'name': 'SVC', - 'index': 18, - 'code': 'SVC', - }, - { - 'name': 'MacTPGSALUA', - 'index': 22, - 'code': 'MacTPGSALUA', - }, - { - 'name': 'WinTPGSALUA', - 'index': 23, - 'code': 'WinTPGSALUA', - }, - { - 'name': 'LnxTPGSALUA', - 'index': 24, - 'code': 'LnxTPGSALUA', - }, - { - 'name': 'LnxTPGSALUA_PM', - 'index': 25, - 'code': 'LnxTPGSALUA_PM', - }, - { - 'name': 'ONTAP_ALUA', - 'index': 26, - 'code': 'ONTAP_ALUA', - }, - { - 'name': 'LnxTPGSALUA_SF', - 'index': 27, - 'code': 'LnxTPGSALUA_SF', - } - ] - - def update_host_type(self, *args, **kwargs): - pass - - def list_hardware_inventory(self): - return HARDWARE_INVENTORY - - def get_eseries_api_info(self, verify=False): - return 'Proxy', '1.53.9010.0005' - - def set_counter(self, key, value): - pass - - def add_autosupport_data(self, *args): - pass - - def set_chap_authentication(self, *args, **kwargs): - return FAKE_CHAP_PARAMETERS - - def get_serial_numbers(self): - return FAKE_ASUP_DATA.get('controller1-serial'), FAKE_ASUP_DATA.get( - 'controller2-serial') - - def get_model_name(self): - pass - - def api_operating_mode(self): - pass - - def get_firmware_version(self): - return FAKE_ASUP_DATA['system-version'] - - def create_volume_copy_job(self, *args, **kwargs): - return VOLUME_COPY_JOB - - def list_vol_copy_job(self, *args, **kwargs): - return VOLUME_COPY_JOB - - def delete_vol_copy_job(self, *args, **kwargs): - pass - - def create_snapshot_image(self, *args, **kwargs): - return SNAPSHOT_IMAGE - - def create_snapshot_volume(self, *args, **kwargs): - return SNAPSHOT_VOLUME - - def list_snapshot_volumes(self, *args, **kwargs): - return [SNAPSHOT_VOLUME] - - def list_snapshot_volume(self, *args, **kwargs): - return SNAPSHOT_IMAGE - - def create_snapshot_group(self, *args, **kwargs): - return SNAPSHOT_GROUP - - def list_snapshot_group(self, *args, **kwargs): - return SNAPSHOT_GROUP - - def delete_snapshot_volume(self, *args, **kwargs): - pass - - def list_target_wwpns(self, *args, **kwargs): - return [WWPN_2] - - def update_stored_system_password(self, *args, **kwargs): - pass - - def update_snapshot_volume(self, *args, **kwargs): - return SNAPSHOT_VOLUME - - def delete_snapshot_image(self, *args, **kwargs): - pass - - def delete_snapshot_group(self, *args, **kwargs): - pass - - def restart_snapshot_volume(self, *args, **kwargs): - pass - - def create_consistency_group(self, *args, **kwargs): - return FAKE_CONSISTENCY_GROUP - - def delete_consistency_group(self, *args, **kwargs): - pass - - def list_consistency_groups(self, *args, **kwargs): - return [FAKE_CONSISTENCY_GROUP] - - def remove_consistency_group_member(self, *args, **kwargs): - pass - - def add_consistency_group_member(self, *args, **kwargs): - pass - - def list_backend_store(self, key): - return {} - - def save_backend_store(self, key, val): - pass - - def create_consistency_group_snapshot(self, *args, **kwargs): - return [SNAPSHOT_IMAGE] - - def get_consistency_group_snapshots(self, *args, **kwargs): - return [SNAPSHOT_IMAGE] - - def delete_consistency_group_snapshot(self, *args, **kwargs): - pass diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py deleted file mode 100644 index 0cbcfcae2..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_client.py +++ /dev/null @@ -1,1218 +0,0 @@ -# Copyright (c) 2014 Alex Meade -# Copyright (c) 2015 Yogesh Kshirsagar -# Copyright (c) 2015 Michael Price -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import json -import mock -from simplejson import scanner -from six.moves import http_client - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ - eseries_fake -from cinder.volume.drivers.netapp.eseries import exception as es_exception - -from cinder.volume.drivers.netapp.eseries import client -from cinder.volume.drivers.netapp import utils as na_utils - - -@ddt.ddt -class NetAppEseriesClientDriverTestCase(test.TestCase): - """Test case for NetApp e-series client.""" - - def setUp(self): - super(NetAppEseriesClientDriverTestCase, self).setUp() - self.mock_log = mock.Mock() - self.mock_object(client, 'LOG', self.mock_log) - self.fake_password = 'mysecret' - - self.my_client = client.RestClient('http', 'host', '80', '/test', - 'user', self.fake_password, - system_id='fake_sys_id') - self.my_client._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP - - fake_response = mock.Mock() - fake_response.status_code = http_client.OK - self.my_client.invoke_service = mock.Mock(return_value=fake_response) - self.my_client.api_version = '01.52.9000.1' - - @ddt.data(http_client.OK, http_client.CREATED, - http_client.NON_AUTHORITATIVE_INFORMATION, - http_client.NO_CONTENT) - def test_eval_response_success(self, status_code): - fake_resp = mock.Mock() - fake_resp.status_code = status_code - - self.assertIsNone(self.my_client._eval_response(fake_resp)) - - @ddt.data(300, 400, 404, 500) - def test_eval_response_failure(self, status_code): - fake_resp = mock.Mock() - fake_resp.status_code = status_code - expected_msg = "Response error code - %s." % status_code - - with self.assertRaisesRegex(es_exception.WebServiceException, - expected_msg) as exc: - self.my_client._eval_response(fake_resp) - - self.assertEqual(status_code, exc.status_code) - - @ddt.data(('30', 'storage array password.*?incorrect'), - ('authFailPassword', 'storage array password.*?incorrect'), - ('unknown', None)) - @ddt.unpack - def test_eval_response_422(self, ret_code, exc_regex): - status_code = http_client.UNPROCESSABLE_ENTITY - fake_resp = mock.Mock() - fake_resp.text = "fakeError" - fake_resp.json = mock.Mock(return_value={'retcode': ret_code}) - fake_resp.status_code = status_code - exc_regex = exc_regex if exc_regex is not None else fake_resp.text - - with self.assertRaisesRegexp(es_exception.WebServiceException, - exc_regex) as exc: - self.my_client._eval_response(fake_resp) - self.assertEqual(status_code, exc.status_code) - - def test_eval_response_424(self): - status_code = http_client.FAILED_DEPENDENCY - fake_resp = mock.Mock() - fake_resp.status_code = status_code - fake_resp.text = "Fake Error Message" - - with self.assertRaisesRegex(es_exception.WebServiceException, - "The storage-system is offline") as exc: - self.my_client._eval_response(fake_resp) - - self.assertEqual(status_code, exc.status_code) - - def test_register_storage_system_does_not_log_password(self): - self.my_client._eval_response = mock.Mock() - self.my_client.register_storage_system([], password=self.fake_password) - for call in self.mock_log.debug.mock_calls: - __, args, __ = call - self.assertNotIn(self.fake_password, args[0]) - - def test_update_stored_system_password_does_not_log_password(self): - self.my_client._eval_response = mock.Mock() - self.my_client.update_stored_system_password( - password=self.fake_password) - for call in self.mock_log.debug.mock_calls: - __, args, __ = call - self.assertNotIn(self.fake_password, args[0]) - - def test_list_target_wwpns(self): - fake_hardware_inventory = copy.deepcopy( - eseries_fake.HARDWARE_INVENTORY) - - mock_hardware_inventory = mock.Mock( - return_value=fake_hardware_inventory) - self.mock_object(self.my_client, 'list_hardware_inventory', - mock_hardware_inventory) - expected_wwpns = [eseries_fake.WWPN, eseries_fake.WWPN_2] - - actual_wwpns = self.my_client.list_target_wwpns() - - self.assertEqual(expected_wwpns, actual_wwpns) - - def test_list_target_wwpns_single_wwpn(self): - fake_hardware_inventory = copy.deepcopy( - eseries_fake.HARDWARE_INVENTORY) - - fake_hardware_inventory['fibrePorts'] = [ - fake_hardware_inventory['fibrePorts'][0] - ] - mock_hardware_inventory = mock.Mock( - return_value=fake_hardware_inventory) - self.mock_object(self.my_client, 'list_hardware_inventory', - mock_hardware_inventory) - expected_wwpns = [eseries_fake.WWPN] - - actual_wwpns = self.my_client.list_target_wwpns() - - self.assertEqual(expected_wwpns, actual_wwpns) - - def test_list_target_wwpns_no_wwpn(self): - fake_hardware_inventory = copy.deepcopy( - eseries_fake.HARDWARE_INVENTORY) - - fake_hardware_inventory['fibrePorts'] = [] - mock_hardware_inventory = mock.Mock( - return_value=fake_hardware_inventory) - self.mock_object(self.my_client, 'list_hardware_inventory', - mock_hardware_inventory) - expected_wwpns = [] - - actual_wwpns = self.my_client.list_target_wwpns() - - self.assertEqual(expected_wwpns, actual_wwpns) - - def test_get_host_group_by_name(self): - groups = copy.deepcopy(eseries_fake.HOST_GROUPS) - group = groups[0] - self.mock_object(self.my_client, 'list_host_groups', - return_value=groups) - - result = self.my_client.get_host_group_by_name(group['label']) - - self.assertEqual(group, result) - - def test_move_volume_mapping_via_symbol(self): - invoke = self.mock_object(self.my_client, '_invoke', return_value='ok') - host_ref = 'host' - cluster_ref = 'cluster' - lun_id = 10 - expected_data = {'lunMappingRef': host_ref, 'lun': lun_id, - 'mapRef': cluster_ref} - - result = self.my_client.move_volume_mapping_via_symbol(host_ref, - cluster_ref, - lun_id) - - invoke.assert_called_once_with('POST', '/storage-systems/{system-id}/' - 'symbol/moveLUNMapping', - expected_data) - - self.assertEqual({'lun': lun_id}, result) - - def test_move_volume_mapping_via_symbol_fail(self): - self.mock_object(self.my_client, '_invoke', return_value='failure') - - self.assertRaises( - exception.NetAppDriverException, - self.my_client.move_volume_mapping_via_symbol, '1', '2', 10) - - def test_create_host_from_ports_fc(self): - label = 'fake_host' - host_type = 'linux' - port_type = 'fc' - port_ids = [eseries_fake.WWPN, eseries_fake.WWPN_2] - expected_ports = [ - {'type': port_type, 'port': eseries_fake.WWPN, 'label': mock.ANY}, - {'type': port_type, 'port': eseries_fake.WWPN_2, - 'label': mock.ANY}] - mock_create_host = self.mock_object(self.my_client, 'create_host') - - self.my_client.create_host_with_ports(label, host_type, port_ids, - port_type) - - mock_create_host.assert_called_once_with(label, host_type, - expected_ports, None) - - def test_host_from_ports_with_no_ports_provided_fc(self): - label = 'fake_host' - host_type = 'linux' - port_type = 'fc' - port_ids = [] - expected_ports = [] - mock_create_host = self.mock_object(self.my_client, 'create_host') - - self.my_client.create_host_with_ports(label, host_type, port_ids, - port_type) - - mock_create_host.assert_called_once_with(label, host_type, - expected_ports, None) - - def test_create_host_from_ports_iscsi(self): - label = 'fake_host' - host_type = 'linux' - port_type = 'iscsi' - port_ids = [eseries_fake.INITIATOR_NAME, - eseries_fake.INITIATOR_NAME_2] - expected_ports = [ - {'type': port_type, 'port': eseries_fake.INITIATOR_NAME, - 'label': mock.ANY}, - {'type': port_type, 'port': eseries_fake.INITIATOR_NAME_2, - 'label': mock.ANY}] - mock_create_host = self.mock_object(self.my_client, 'create_host') - - self.my_client.create_host_with_ports(label, host_type, port_ids, - port_type) - - mock_create_host.assert_called_once_with(label, host_type, - expected_ports, None) - - def test_get_volume_mappings_for_volume(self): - volume_mapping_1 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) - volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) - volume_mapping_2['volumeRef'] = '2' - self.mock_object(self.my_client, 'get_volume_mappings', - return_value=[volume_mapping_1, volume_mapping_2]) - - mappings = self.my_client.get_volume_mappings_for_volume( - eseries_fake.VOLUME) - - self.assertEqual([volume_mapping_1], mappings) - - def test_get_volume_mappings_for_host(self): - volume_mapping_1 = copy.deepcopy( - eseries_fake.VOLUME_MAPPING) - volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) - volume_mapping_2['volumeRef'] = '2' - volume_mapping_2['mapRef'] = 'hostRef' - self.mock_object(self.my_client, 'get_volume_mappings', - return_value=[volume_mapping_1, volume_mapping_2]) - - mappings = self.my_client.get_volume_mappings_for_host( - 'hostRef') - - self.assertEqual([volume_mapping_2], mappings) - - def test_get_volume_mappings_for_hostgroup(self): - volume_mapping_1 = copy.deepcopy( - eseries_fake.VOLUME_MAPPING) - volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING) - volume_mapping_2['volumeRef'] = '2' - volume_mapping_2['mapRef'] = 'hostGroupRef' - self.mock_object(self.my_client, 'get_volume_mappings', - return_value=[volume_mapping_1, volume_mapping_2]) - - mappings = self.my_client.get_volume_mappings_for_host_group( - 'hostGroupRef') - - self.assertEqual([volume_mapping_2], mappings) - - def test_to_pretty_dict_string(self): - dict = { - 'foo': 'bar', - 'fu': { - 'nested': 'boo' - } - } - expected_dict_string = ("""{ - "foo": "bar", - "fu": { - "nested": "boo" - } -}""") - - dict_string = self.my_client._to_pretty_dict_string(dict) - - self.assertEqual(expected_dict_string, dict_string) - - def test_log_http_request(self): - mock_log = self.mock_object(client, 'LOG') - verb = "POST" - url = "/v2/test/me" - headers = {"Content-Type": "application/json"} - headers_string = """{ - "Content-Type": "application/json" -}""" - body = {} - body_string = "{}" - - self.my_client._log_http_request(verb, url, headers, body) - - args = mock_log.debug.call_args - log_message, log_params = args[0] - final_msg = log_message % log_params - self.assertIn(verb, final_msg) - self.assertIn(url, final_msg) - self.assertIn(headers_string, final_msg) - self.assertIn(body_string, final_msg) - - def test_log_http_request_no_body(self): - mock_log = self.mock_object(client, 'LOG') - verb = "POST" - url = "/v2/test/me" - headers = {"Content-Type": "application/json"} - headers_string = """{ - "Content-Type": "application/json" -}""" - body = None - body_string = "" - - self.my_client._log_http_request(verb, url, headers, body) - - args = mock_log.debug.call_args - log_message, log_params = args[0] - final_msg = log_message % log_params - self.assertIn(verb, final_msg) - self.assertIn(url, final_msg) - self.assertIn(headers_string, final_msg) - self.assertIn(body_string, final_msg) - - def test_log_http_response(self): - mock_log = self.mock_object(client, 'LOG') - status = "200" - headers = {"Content-Type": "application/json"} - headers_string = """{ - "Content-Type": "application/json" -}""" - body = {} - body_string = "{}" - - self.my_client._log_http_response(status, headers, body) - - args = mock_log.debug.call_args - log_message, log_params = args[0] - final_msg = log_message % log_params - self.assertIn(status, final_msg) - self.assertIn(headers_string, final_msg) - self.assertIn(body_string, final_msg) - - def test_log_http_response_no_body(self): - mock_log = self.mock_object(client, 'LOG') - status = "200" - headers = {"Content-Type": "application/json"} - headers_string = """{ - "Content-Type": "application/json" -}""" - body = None - body_string = "" - - self.my_client._log_http_response(status, headers, body) - - args = mock_log.debug.call_args - log_message, log_params = args[0] - final_msg = log_message % log_params - self.assertIn(status, final_msg) - self.assertIn(headers_string, final_msg) - self.assertIn(body_string, final_msg) - - def test_add_autosupport_data(self): - self.mock_object( - client.RestClient, 'get_eseries_api_info', - return_value=(eseries_fake.FAKE_ASUP_DATA['operating-mode'], - eseries_fake.FAKE_ABOUT_RESPONSE['version'])) - self.mock_object( - self.my_client, 'get_asup_info', - return_value=eseries_fake.GET_ASUP_RETURN) - self.mock_object( - self.my_client, 'set_counter', return_value={'value': 1}) - mock_invoke = self.mock_object( - self.my_client, '_invoke', - return_value=eseries_fake.FAKE_ASUP_DATA) - - client.RestClient.add_autosupport_data( - self.my_client, - eseries_fake.FAKE_KEY, - eseries_fake.FAKE_ASUP_DATA - ) - - mock_invoke.assert_called_with(*eseries_fake.FAKE_POST_INVOKE_DATA) - - @ddt.data((eseries_fake.FAKE_SERIAL_NUMBERS, - eseries_fake.HARDWARE_INVENTORY), - (eseries_fake.FAKE_DEFAULT_SERIAL_NUMBER, {}), - (eseries_fake.FAKE_SERIAL_NUMBER, - eseries_fake.HARDWARE_INVENTORY_SINGLE_CONTROLLER)) - @ddt.unpack - def test_get_asup_info_serial_numbers(self, expected_serial_numbers, - controllers): - self.mock_object( - client.RestClient, 'list_hardware_inventory', - return_value=controllers) - self.mock_object( - client.RestClient, 'list_storage_system', return_value={}) - - sn = client.RestClient.get_asup_info(self.my_client)['serial_numbers'] - - self.assertEqual(expected_serial_numbers, sn) - - def test_get_asup_info_model_name(self): - self.mock_object( - client.RestClient, 'list_hardware_inventory', - return_value=eseries_fake.HARDWARE_INVENTORY) - self.mock_object( - client.RestClient, 'list_storage_system', - return_value=eseries_fake.STORAGE_SYSTEM) - - model_name = client.RestClient.get_asup_info(self.my_client)['model'] - - self.assertEqual(eseries_fake.HARDWARE_INVENTORY['controllers'][0] - ['modelName'], model_name) - - def test_get_asup_info_model_name_empty_controllers_list(self): - self.mock_object( - client.RestClient, 'list_hardware_inventory', return_value={}) - self.mock_object( - client.RestClient, 'list_storage_system', return_value={}) - - model_name = client.RestClient.get_asup_info(self.my_client)['model'] - - self.assertEqual(eseries_fake.FAKE_DEFAULT_MODEL, model_name) - - def test_get_eseries_api_info(self): - fake_invoke_service = mock.Mock() - fake_invoke_service.json = mock.Mock( - return_value=eseries_fake.FAKE_ABOUT_RESPONSE) - self.mock_object( - client.RestClient, '_get_resource_url', - return_value=eseries_fake.FAKE_RESOURCE_URL) - self.mock_object( - self.my_client, 'invoke_service', return_value=fake_invoke_service) - - eseries_info = client.RestClient.get_eseries_api_info( - self.my_client, verify=False) - - self.assertEqual((eseries_fake.FAKE_ASUP_DATA['operating-mode'], - eseries_fake.FAKE_ABOUT_RESPONSE['version']), - eseries_info) - - def test_list_ssc_storage_pools(self): - self.my_client.features = mock.Mock() - self.my_client._invoke = mock.Mock( - return_value=eseries_fake.SSC_POOLS) - - pools = client.RestClient.list_ssc_storage_pools(self.my_client) - - self.assertEqual(eseries_fake.SSC_POOLS, pools) - - def test_get_ssc_storage_pool(self): - fake_pool = eseries_fake.SSC_POOLS[0] - self.my_client.features = mock.Mock() - self.my_client._invoke = mock.Mock( - return_value=fake_pool) - - pool = client.RestClient.get_ssc_storage_pool(self.my_client, - fake_pool['poolId']) - - self.assertEqual(fake_pool, pool) - - @ddt.data(('volumes', True), ('volumes', False), - ('volume', True), ('volume', False)) - @ddt.unpack - def test_get_volume_api_path(self, path_key, ssc_available): - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=ssc_available) - expected_key = 'ssc_' + path_key if ssc_available else path_key - expected = self.my_client.RESOURCE_PATHS.get(expected_key) - - actual = self.my_client._get_volume_api_path(path_key) - - self.assertEqual(expected, actual) - - @ddt.data(True, False) - def test_get_volume_api_path_invalid(self, ssc_available): - key = 'invalidKey' - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=ssc_available) - - self.assertRaises(KeyError, self.my_client._get_volume_api_path, key) - - def test_list_volumes(self): - url = client.RestClient.RESOURCE_PATHS['ssc_volumes'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - self.my_client._invoke = mock.Mock( - return_value=eseries_fake.VOLUMES) - - volumes = client.RestClient.list_volumes(self.my_client) - - self.assertEqual(eseries_fake.VOLUMES, volumes) - self.my_client._invoke.assert_called_once_with('GET', url) - - @ddt.data(client.RestClient.ID, client.RestClient.WWN, - client.RestClient.NAME) - def test_list_volume_v1(self, uid_field_name): - url = client.RestClient.RESOURCE_PATHS['volumes'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=False) - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - self.my_client._invoke = mock.Mock( - return_value=eseries_fake.VOLUMES) - - volume = client.RestClient.list_volume(self.my_client, - fake_volume[uid_field_name]) - - self.my_client._invoke.assert_called_once_with('GET', url) - self.assertEqual(fake_volume, volume) - - def test_list_volume_v1_not_found(self): - url = client.RestClient.RESOURCE_PATHS['volumes'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=False) - self.my_client._invoke = mock.Mock( - return_value=eseries_fake.VOLUMES) - - self.assertRaises(exception.VolumeNotFound, - client.RestClient.list_volume, - self.my_client, 'fakeId') - self.my_client._invoke.assert_called_once_with('GET', url) - - def test_list_volume_v2(self): - url = client.RestClient.RESOURCE_PATHS['ssc_volume'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - self.my_client._invoke = mock.Mock(return_value=fake_volume) - - volume = client.RestClient.list_volume(self.my_client, - fake_volume['id']) - - self.my_client._invoke.assert_called_once_with('GET', url, - **{'object-id': - mock.ANY}) - self.assertEqual(fake_volume, volume) - - def test_list_volume_v2_not_found(self): - status_code = http_client.NOT_FOUND - url = client.RestClient.RESOURCE_PATHS['ssc_volume'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - msg = "Response error code - %s." % status_code - self.my_client._invoke = mock.Mock( - side_effect=es_exception.WebServiceException(message=msg, - status_code= - status_code)) - - self.assertRaises(exception.VolumeNotFound, - client.RestClient.list_volume, - self.my_client, 'fakeId') - self.my_client._invoke.assert_called_once_with('GET', url, - **{'object-id': - mock.ANY}) - - def test_list_volume_v2_failure(self): - status_code = http_client.UNPROCESSABLE_ENTITY - url = client.RestClient.RESOURCE_PATHS['ssc_volume'] - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - msg = "Response error code - %s." % status_code - self.my_client._invoke = mock.Mock( - side_effect=es_exception.WebServiceException(message=msg, - status_code= - status_code)) - - self.assertRaises(es_exception.WebServiceException, - client.RestClient.list_volume, self.my_client, - 'fakeId') - self.my_client._invoke.assert_called_once_with('GET', url, - **{'object-id': - mock.ANY}) - - def test_create_volume_V1(self): - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=False) - create_volume = self.my_client._invoke = mock.Mock( - return_value=eseries_fake.VOLUME) - - volume = client.RestClient.create_volume(self.my_client, - 'fakePool', '1', 1) - - args, kwargs = create_volume.call_args - verb, url, body = args - # Ensure the correct API was used - self.assertEqual('/storage-systems/{system-id}/volumes', url) - self.assertEqual(eseries_fake.VOLUME, volume) - - def test_create_volume_V2(self): - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - create_volume = self.my_client._invoke = mock.Mock( - return_value=eseries_fake.VOLUME) - - volume = client.RestClient.create_volume(self.my_client, - 'fakePool', '1', 1) - - args, kwargs = create_volume.call_args - verb, url, body = args - # Ensure the correct API was used - self.assertIn('/storage-systems/{system-id}/ssc/volumes', url, - 'The legacy API was used!') - self.assertEqual(eseries_fake.VOLUME, volume) - - def test_create_volume_unsupported_specs(self): - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=False) - self.my_client.api_version = '01.52.9000.1' - - self.assertRaises(exception.NetAppDriverException, - client.RestClient.create_volume, self.my_client, - '1', 'label', 1, read_cache=True) - - @ddt.data(True, False) - def test_update_volume(self, ssc_api_enabled): - label = 'updatedName' - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - expected_volume = copy.deepcopy(fake_volume) - expected_volume['name'] = label - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=ssc_api_enabled) - self.my_client._invoke = mock.Mock(return_value=expected_volume) - - updated_volume = self.my_client.update_volume(fake_volume['id'], - label) - - if ssc_api_enabled: - url = self.my_client.RESOURCE_PATHS.get('ssc_volume') - else: - url = self.my_client.RESOURCE_PATHS.get('volume') - - self.my_client._invoke.assert_called_once_with('POST', url, - {'name': label}, - **{'object-id': - fake_volume['id']} - ) - self.assertDictEqual(expected_volume, updated_volume) - - def test_get_pool_operation_progress(self): - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - fake_response = copy.deepcopy(eseries_fake.FAKE_POOL_ACTION_PROGRESS) - self.my_client._invoke = mock.Mock(return_value=fake_response) - - response = self.my_client.get_pool_operation_progress(fake_pool['id']) - - url = self.my_client.RESOURCE_PATHS.get('pool_operation_progress') - self.my_client._invoke.assert_called_once_with('GET', url, - **{'object-id': - fake_pool['id']}) - self.assertEqual(fake_response, response) - - def test_extend_volume(self): - new_capacity = 10 - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - self.my_client._invoke = mock.Mock(return_value=fake_volume) - - expanded_volume = self.my_client.expand_volume(fake_volume['id'], - new_capacity, False) - - url = self.my_client.RESOURCE_PATHS.get('volume_expand') - body = {'expansionSize': new_capacity, 'sizeUnit': 'gb'} - self.my_client._invoke.assert_called_once_with('POST', url, body, - **{'object-id': - fake_volume['id']}) - self.assertEqual(fake_volume, expanded_volume) - - def test_extend_volume_thin(self): - new_capacity = 10 - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=True) - self.my_client._invoke = mock.Mock(return_value=fake_volume) - - expanded_volume = self.my_client.expand_volume(fake_volume['id'], - new_capacity, True) - - url = self.my_client.RESOURCE_PATHS.get('thin_volume_expand') - body = {'newVirtualSize': new_capacity, 'sizeUnit': 'gb', - 'newRepositorySize': new_capacity} - self.my_client._invoke.assert_called_once_with('POST', url, body, - **{'object-id': - fake_volume['id']}) - self.assertEqual(fake_volume, expanded_volume) - - @ddt.data(True, False) - def test_delete_volume(self, ssc_api_enabled): - fake_volume = copy.deepcopy(eseries_fake.VOLUME) - self.my_client.features = mock.Mock() - self.my_client.features.SSC_API_V2 = na_utils.FeatureState( - supported=ssc_api_enabled) - self.my_client._invoke = mock.Mock() - - self.my_client.delete_volume(fake_volume['id']) - - if ssc_api_enabled: - url = self.my_client.RESOURCE_PATHS.get('ssc_volume') - else: - url = self.my_client.RESOURCE_PATHS.get('volume') - - self.my_client._invoke.assert_called_once_with('DELETE', url, - **{'object-id': - fake_volume['id']}) - - def test_list_snapshot_group(self): - grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - invoke = self.mock_object(self.my_client, '_invoke', return_value=grp) - fake_ref = 'fake' - - result = self.my_client.list_snapshot_group(fake_ref) - - self.assertEqual(grp, result) - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['snapshot_group'], - **{'object-id': fake_ref}) - - def test_list_snapshot_groups(self): - grps = [copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)] - invoke = self.mock_object(self.my_client, '_invoke', return_value=grps) - - result = self.my_client.list_snapshot_groups() - - self.assertEqual(grps, result) - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['snapshot_groups']) - - def test_delete_snapshot_group(self): - invoke = self.mock_object(self.my_client, '_invoke') - fake_ref = 'fake' - - self.my_client.delete_snapshot_group(fake_ref) - - invoke.assert_called_once_with( - 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_group'], - **{'object-id': fake_ref}) - - @ddt.data((None, None, None, None, None), ('1', 50, 75, 32, 'purgepit')) - @ddt.unpack - def test_create_snapshot_group(self, pool_id, repo, warn, limit, policy): - vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - invoke = self.mock_object(self.my_client, '_invoke', return_value=vol) - snap_grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - - result = self.my_client.create_snapshot_group( - snap_grp['label'], snap_grp['id'], pool_id, repo, warn, limit, - policy) - - self.assertEqual(vol, result) - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['snapshot_groups'], - {'baseMappableObjectId': snap_grp['id'], 'name': snap_grp['label'], - 'storagePoolId': pool_id, 'repositoryPercentage': repo, - 'warningThreshold': warn, 'autoDeleteLimit': limit, - 'fullPolicy': policy}) - - def test_list_snapshot_volumes(self): - vols = [copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)] - invoke = self.mock_object(self.my_client, '_invoke', return_value=vols) - - result = self.my_client.list_snapshot_volumes() - - self.assertEqual(vols, result) - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['snapshot_volumes']) - - def test_delete_snapshot_volume(self): - invoke = self.mock_object(self.my_client, '_invoke') - fake_ref = 'fake' - - self.my_client.delete_snapshot_volume(fake_ref) - - invoke.assert_called_once_with( - 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_volume'], - **{'object-id': fake_ref}) - - @ddt.data((None, None, None, None), ('1', 50, 75, 'readWrite')) - @ddt.unpack - def test_create_snapshot_volume(self, pool_id, repo, warn, mode): - vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - invoke = self.mock_object(self.my_client, '_invoke', return_value=vol) - - result = self.my_client.create_snapshot_volume( - vol['basePIT'], vol['label'], vol['id'], pool_id, - repo, warn, mode) - - self.assertEqual(vol, result) - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['snapshot_volumes'], - mock.ANY) - - def test_update_snapshot_volume(self): - snap_id = '1' - label = 'name' - pct = 99 - vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - invoke = self.mock_object(self.my_client, '_invoke', return_value=vol) - - result = self.my_client.update_snapshot_volume(snap_id, label, pct) - - self.assertEqual(vol, result) - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['snapshot_volume'], - {'name': label, 'fullThreshold': pct}, **{'object-id': snap_id}) - - def test_create_snapshot_image(self): - img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - invoke = self.mock_object(self.my_client, '_invoke', return_value=img) - grp_id = '1' - - result = self.my_client.create_snapshot_image(grp_id) - - self.assertEqual(img, result) - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['snapshot_images'], - {'groupId': grp_id}) - - def test_list_snapshot_image(self): - img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - invoke = self.mock_object(self.my_client, '_invoke', return_value=img) - fake_ref = 'fake' - - result = self.my_client.list_snapshot_image(fake_ref) - - self.assertEqual(img, result) - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['snapshot_image'], - **{'object-id': fake_ref}) - - def test_list_snapshot_images(self): - imgs = [copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)] - invoke = self.mock_object(self.my_client, '_invoke', return_value=imgs) - - result = self.my_client.list_snapshot_images() - - self.assertEqual(imgs, result) - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['snapshot_images']) - - def test_delete_snapshot_image(self): - invoke = self.mock_object(self.my_client, '_invoke') - fake_ref = 'fake' - - self.my_client.delete_snapshot_image(fake_ref) - - invoke.assert_called_once_with( - 'DELETE', self.my_client.RESOURCE_PATHS['snapshot_image'], - **{'object-id': fake_ref}) - - def test_create_consistency_group(self): - invoke = self.mock_object(self.my_client, '_invoke') - name = 'fake' - - self.my_client.create_consistency_group(name) - - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['cgroups'], mock.ANY) - - def test_list_consistency_group(self): - invoke = self.mock_object(self.my_client, '_invoke') - ref = 'fake' - - self.my_client.get_consistency_group(ref) - - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['cgroup'], - **{'object-id': ref}) - - def test_list_consistency_groups(self): - invoke = self.mock_object(self.my_client, '_invoke') - - self.my_client.list_consistency_groups() - - invoke.assert_called_once_with( - 'GET', self.my_client.RESOURCE_PATHS['cgroups']) - - def test_delete_consistency_group(self): - invoke = self.mock_object(self.my_client, '_invoke') - ref = 'fake' - - self.my_client.delete_consistency_group(ref) - - invoke.assert_called_once_with( - 'DELETE', self.my_client.RESOURCE_PATHS['cgroup'], - **{'object-id': ref}) - - def test_add_consistency_group_member(self): - invoke = self.mock_object(self.my_client, '_invoke') - vol_id = eseries_fake.VOLUME['id'] - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.add_consistency_group_member(vol_id, cg_id) - - invoke.assert_called_once_with( - 'POST', self.my_client.RESOURCE_PATHS['cgroup_members'], - mock.ANY, **{'object-id': cg_id}) - - def test_remove_consistency_group_member(self): - invoke = self.mock_object(self.my_client, '_invoke') - vol_id = eseries_fake.VOLUME['id'] - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.remove_consistency_group_member(vol_id, cg_id) - - invoke.assert_called_once_with( - 'DELETE', self.my_client.RESOURCE_PATHS['cgroup_member'], - **{'object-id': cg_id, 'vol-id': vol_id}) - - def test_create_consistency_group_snapshot(self): - invoke = self.mock_object(self.my_client, '_invoke') - path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.create_consistency_group_snapshot(cg_id) - - invoke.assert_called_once_with('POST', path, **{'object-id': cg_id}) - - @ddt.data(0, 32) - def test_delete_consistency_group_snapshot(self, seq_num): - invoke = self.mock_object(self.my_client, '_invoke') - path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.delete_consistency_group_snapshot(cg_id, seq_num) - - invoke.assert_called_once_with( - 'DELETE', path, **{'object-id': cg_id, 'seq-num': seq_num}) - - def test_get_consistency_group_snapshots(self): - invoke = self.mock_object(self.my_client, '_invoke') - path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.get_consistency_group_snapshots(cg_id) - - invoke.assert_called_once_with( - 'GET', path, **{'object-id': cg_id}) - - def test_create_cg_snapshot_view(self): - cg_snap_view = copy.deepcopy( - eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME) - view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - invoke = self.mock_object(self.my_client, '_invoke', - return_value=cg_snap_view) - list_views = self.mock_object( - self.my_client, 'list_cg_snapshot_views', return_value=[view]) - name = view['name'] - snap_id = view['basePIT'] - path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.my_client.create_cg_snapshot_view(cg_id, name, snap_id) - - invoke.assert_called_once_with( - 'POST', path, mock.ANY, **{'object-id': cg_id}) - list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef']) - - def test_create_cg_snapshot_view_not_found(self): - cg_snap_view = copy.deepcopy( - eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME) - view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - invoke = self.mock_object(self.my_client, '_invoke', - return_value=cg_snap_view) - list_views = self.mock_object( - self.my_client, 'list_cg_snapshot_views', return_value=[view]) - del_view = self.mock_object(self.my_client, 'delete_cg_snapshot_view') - name = view['name'] - # Ensure we don't get a match on the retrieved views - snap_id = None - path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - - self.assertRaises( - exception.NetAppDriverException, - self.my_client.create_cg_snapshot_view, cg_id, name, snap_id) - - invoke.assert_called_once_with( - 'POST', path, mock.ANY, **{'object-id': cg_id}) - list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef']) - del_view.assert_called_once_with(cg_id, cg_snap_view['id']) - - def test_list_cg_snapshot_views(self): - invoke = self.mock_object(self.my_client, '_invoke') - path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot_views') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - view_id = 'id' - - self.my_client.list_cg_snapshot_views(cg_id, view_id) - - invoke.assert_called_once_with( - 'GET', path, **{'object-id': cg_id, 'view-id': view_id}) - - def test_delete_cg_snapshot_view(self): - invoke = self.mock_object(self.my_client, '_invoke') - path = self.my_client.RESOURCE_PATHS.get('cgroup_snap_view') - cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id'] - view_id = 'id' - - self.my_client.delete_cg_snapshot_view(cg_id, view_id) - - invoke.assert_called_once_with( - 'DELETE', path, **{'object-id': cg_id, 'view-id': view_id}) - - @ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3', - '01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4') - def test_api_version_not_support_asup(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertFalse(self.my_client.features.AUTOSUPPORT.supported) - - @ddt.data('01.52.9000.3', '01.52.9000.4', '01.52.8999.2', - '01.52.8999.3', '01.53.8999.3', '01.53.9000.2', - '02.51.9000.3', '02.52.8999.3', '02.51.8999.2') - def test_api_version_supports_asup(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertTrue(self.my_client.features.AUTOSUPPORT) - - @ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3', - '01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4') - def test_api_version_not_support_chap(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertFalse(self.my_client.features.CHAP_AUTHENTICATION) - - @ddt.data('01.53.9000.15', '01.53.9000.16', '01.53.8999.15', - '01.54.8999.16', '01.54.9010.15', '01.54.9090.15', - '02.52.9000.15', '02.53.8999.15', '02.54.8999.14') - def test_api_version_supports_chap(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertTrue(self.my_client.features.CHAP_AUTHENTICATION) - - @ddt.data('00.00.00.00', '01.52.9000.1', '01.52.9001.2', '00.53.9001.3', - '01.53.9090.1', '1.53.9010.14', '0.53.9011.15') - def test_api_version_not_support_ssc_api(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertFalse(self.my_client.features.SSC_API_V2.supported) - - @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1', - '01.53.9010.20', '01.53.9010.17', '01.54.9000.1', - '02.51.9000.3', '02.52.8999.3', '02.51.8999.2') - def test_api_version_supports_ssc_api(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertTrue(self.my_client.features.SSC_API_V2.supported) - - @ddt.data('00.00.00.00', '01.52.9000.5', '01.52.9001.2', '00.53.9001.3', - '01.52.9090.1', '1.52.9010.7', '0.53.9011.7') - def test_api_version_not_support_1_3(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertFalse(self.my_client.features.REST_1_3_RELEASE.supported) - - @ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1', - '01.54.9010.20', '01.54.9000.1', '02.51.9000.3', - '02.52.8999.3', '02.51.8999.2') - def test_api_version_1_3(self, api_version): - - self.mock_object(client.RestClient, - 'get_eseries_api_info', - return_value=('proxy', api_version)) - - client.RestClient._init_features(self.my_client) - - self.assertTrue(self.my_client.features.REST_1_3_RELEASE.supported) - - def test_invoke_bad_content_type(self): - """Tests the invoke behavior with a non-JSON response""" - fake_response = mock.Mock() - fake_response.json = mock.Mock(side_effect=scanner.JSONDecodeError( - '', '{}', 1)) - fake_response.status_code = http_client.FAILED_DEPENDENCY - fake_response.text = "Fake Response" - self.mock_object(self.my_client, 'invoke_service', - return_value=fake_response) - - self.assertRaises(es_exception.WebServiceException, - self.my_client._invoke, 'GET', - eseries_fake.FAKE_ENDPOINT_HTTP) - - def test_list_backend_store(self): - path = self.my_client.RESOURCE_PATHS.get('persistent-store') - fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE) - invoke = self.mock_object( - self.my_client, '_invoke', return_value=fake_store) - expected = json.loads(fake_store.get('value')) - - result = self.my_client.list_backend_store('key') - - self.assertEqual(expected, result) - invoke.assert_called_once_with('GET', path, key='key') - - def test_save_backend_store(self): - path = self.my_client.RESOURCE_PATHS.get('persistent-stores') - fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE) - key = 'key' - invoke = self.mock_object(self.my_client, '_invoke') - - self.my_client.save_backend_store(key, fake_store) - - invoke.assert_called_once_with('POST', path, mock.ANY) - - -@ddt.ddt -class TestWebserviceClientTestCase(test.TestCase): - def setUp(self): - """sets up the mock tests""" - super(TestWebserviceClientTestCase, self).setUp() - self.mock_log = mock.Mock() - self.mock_object(client, 'LOG', self.mock_log) - self.webclient = client.WebserviceClient('http', 'host', '80', - '/test', 'user', '****') - - @ddt.data({'params': {'host': None, 'scheme': 'https', 'port': '80'}}, - {'params': {'host': 'host', 'scheme': None, 'port': '80'}}, - {'params': {'host': 'host', 'scheme': 'http', 'port': None}}) - @ddt.unpack - def test__validate_params_value_error(self, params): - """Tests various scenarios for ValueError in validate method""" - self.assertRaises(exception.InvalidInput, - self.webclient._validate_params, **params) - - def test_invoke_service_no_endpoint_error(self): - """Tests Exception and Log error if no endpoint is provided""" - self.webclient._endpoint = None - log_error = 'Unexpected error while invoking web service' - - self.assertRaises(exception.NetAppDriverException, - self.webclient.invoke_service) - self.assertTrue(self.mock_log.exception.find(log_error)) - - def test_invoke_service(self): - """Tests if invoke_service evaluates the right response""" - self.webclient._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP - self.mock_object(self.webclient.conn, 'request', - return_value=eseries_fake.FAKE_INVOC_MSG) - result = self.webclient.invoke_service() - - self.assertIsNotNone(result) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_driver.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_driver.py deleted file mode 100644 index b9b0f281b..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_driver.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) 2015 Alex Meade. All rights reserved. -# Copyright (c) 2015 Michael Price. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import ddt -import mock -import socket - -from cinder import exception -from cinder.volume import configuration as conf - -from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ - fakes -from cinder.volume.drivers.netapp import common -from cinder.volume.drivers.netapp.eseries import client -from cinder.volume.drivers.netapp.eseries import library -from cinder.volume.drivers.netapp.eseries import utils -from cinder.volume.drivers.netapp import options -import cinder.volume.drivers.netapp.utils as na_utils - - -@ddt.ddt -class NetAppESeriesDriverTestCase(object): - """Test case for NetApp e-series iscsi driver.""" - - volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, - 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', - 'os_type': 'linux', 'provider_location': 'lun1', - 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', - 'provider_auth': 'provider a b', 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - snapshot = {'id': '17928122-553b-4da9-9737-e5c3dcd97f75', - 'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', - 'size': 2, 'volume_name': 'lun1', - 'volume_size': 2, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - volume_sec = {'id': 'b6c01641-8955-4917-a5e3-077147478575', - 'size': 2, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'lun1', - 'name_id': 'b6c01641-8955-4917-a5e3-077147478575', - 'provider_auth': None, 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None} - volume_clone = {'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'size': 3, - 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'cl_sm', - 'name_id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', - 'provider_auth': None, - 'project_id': 'project', 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None} - volume_clone_large = {'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553', - 'size': 6, 'volume_name': 'lun1', - 'os_type': 'linux', 'provider_location': 'cl_lg', - 'name_id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553', - 'provider_auth': None, - 'project_id': 'project', 'display_name': None, - 'display_description': 'lun1', - 'volume_type_id': None} - fake_eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['id']) - fake_size_gb = volume['size'] - fake_eseries_pool_label = 'DDP' - fake_ref = {'source-name': 'CFDGJSLS'} - fake_ret_vol = {'id': 'vol_id', 'label': 'label', - 'worldWideName': 'wwn', 'capacity': '2147583648'} - PROTOCOL = 'iscsi' - - def setUp(self): - super(NetAppESeriesDriverTestCase, self).setUp() - self._custom_setup() - - def _custom_setup(self): - self.mock_object(na_utils, 'OpenStackInfo') - - configuration = self._set_config(self.create_configuration()) - self.driver = common.NetAppDriver(configuration=configuration) - self.library = self.driver.library - self.mock_object(self.library, - '_check_mode_get_or_register_storage_system') - self.mock_object(self.library, '_version_check') - self.mock_object(self.driver.library, '_check_storage_system') - self.driver.do_setup(context='context') - self.driver.library._client._endpoint = fakes.FAKE_ENDPOINT_HTTP - self.driver.library._client.features = mock.Mock() - self.driver.library._client.features.REST_1_4_RELEASE = True - - def _set_config(self, configuration): - configuration.netapp_storage_family = 'eseries' - configuration.netapp_storage_protocol = self.PROTOCOL - configuration.netapp_transport_type = 'http' - configuration.netapp_server_hostname = '127.0.0.1' - configuration.netapp_server_port = None - configuration.netapp_webservice_path = '/devmgr/vn' - configuration.netapp_controller_ips = '127.0.0.2,127.0.0.3' - configuration.netapp_sa_password = 'pass1234' - configuration.netapp_login = 'rw' - configuration.netapp_password = 'rw' - configuration.netapp_storage_pools = 'DDP' - configuration.netapp_enable_multiattach = False - return configuration - - @staticmethod - def create_configuration(): - configuration = conf.Configuration(None) - configuration.append_config_values(options.netapp_basicauth_opts) - configuration.append_config_values(options.netapp_eseries_opts) - configuration.append_config_values(options.netapp_san_opts) - return configuration - - @abc.abstractmethod - @mock.patch.object(na_utils, 'validate_instantiation') - def test_instantiation(self, mock_validate_instantiation): - pass - - def test_embedded_mode(self): - self.mock_object(client.RestClient, '_init_features') - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3' - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(driver.library, '_version_check') - self.mock_object(client.RestClient, 'list_storage_systems', - return_value=[fakes.STORAGE_SYSTEM]) - driver.do_setup(context='context') - - self.assertEqual('1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b', - driver.library._client.get_system_id()) - - def test_check_system_pwd_not_sync(self): - def list_system(): - if getattr(self, 'test_count', None): - self.test_count = 1 - return {'status': 'passwordoutofsync'} - return {'status': 'needsAttention'} - - self.library._client.list_storage_system = mock.Mock(wraps=list_system) - result = self.library._check_storage_system() - self.assertTrue(result) - - def test_create_destroy(self): - self.mock_object(client.RestClient, 'delete_volume', - return_value='None') - self.mock_object(self.driver.library, 'create_volume', - return_value=self.volume) - self.mock_object(self.library._client, 'list_volume', - return_value=fakes.VOLUME) - - self.driver.create_volume(self.volume) - self.driver.delete_volume(self.volume) - - def test_vol_stats(self): - self.driver.get_volume_stats(refresh=False) - - def test_get_pool(self): - self.mock_object(self.library, '_get_volume', - return_value={'volumeGroupRef': 'fake_ref'}) - self.mock_object(self.library._client, "get_storage_pool", - return_value={'volumeGroupRef': 'fake_ref', - 'label': 'ddp1'}) - - pool = self.driver.get_pool({'name_id': 'fake-uuid'}) - - self.assertEqual('ddp1', pool) - - def test_get_pool_no_pools(self): - self.mock_object(self.library, '_get_volume', - return_value={'volumeGroupRef': 'fake_ref'}) - self.mock_object(self.library._client, "get_storage_pool", - return_value=None) - - pool = self.driver.get_pool({'name_id': 'fake-uuid'}) - - self.assertIsNone(pool) - - @mock.patch.object(library.NetAppESeriesLibrary, '_create_volume', - mock.Mock()) - def test_create_volume(self): - - self.driver.create_volume(self.volume) - - self.library._create_volume.assert_called_with( - 'DDP', self.fake_eseries_volume_label, self.volume['size'], {}) - - def test_create_volume_no_pool_provided_by_scheduler(self): - volume = copy.deepcopy(self.volume) - volume['host'] = "host@backend" # missing pool - self.assertRaises(exception.InvalidHost, self.driver.create_volume, - volume) - - @mock.patch.object(client.RestClient, 'list_storage_pools') - def test_helper_create_volume_fail(self, fake_list_pools): - fake_pool = {} - fake_pool['label'] = self.fake_eseries_pool_label - fake_pool['volumeGroupRef'] = 'foo' - fake_pool['raidLevel'] = 'raidDiskPool' - fake_pools = [fake_pool] - fake_list_pools.return_value = fake_pools - wrong_eseries_pool_label = 'hostname@backend' - self.assertRaises(exception.NetAppDriverException, - self.library._create_volume, - wrong_eseries_pool_label, - self.fake_eseries_volume_label, - self.fake_size_gb) - - @mock.patch.object(library.LOG, 'info') - @mock.patch.object(client.RestClient, 'list_storage_pools') - @mock.patch.object(client.RestClient, 'create_volume', - mock.MagicMock(return_value='CorrectVolume')) - def test_helper_create_volume(self, storage_pools, log_info): - fake_pool = {} - fake_pool['label'] = self.fake_eseries_pool_label - fake_pool['volumeGroupRef'] = 'foo' - fake_pool['raidLevel'] = 'raidDiskPool' - fake_pools = [fake_pool] - storage_pools.return_value = fake_pools - storage_vol = self.library._create_volume( - self.fake_eseries_pool_label, - self.fake_eseries_volume_label, - self.fake_size_gb) - log_info.assert_called_once_with("Created volume with label %s.", - self.fake_eseries_volume_label) - self.assertEqual('CorrectVolume', storage_vol) - - @mock.patch.object(client.RestClient, 'list_storage_pools') - @mock.patch.object(client.RestClient, 'create_volume', - mock.MagicMock( - side_effect=exception.NetAppDriverException)) - @mock.patch.object(library.LOG, 'info', mock.Mock()) - def test_create_volume_check_exception(self, fake_list_pools): - fake_pool = {} - fake_pool['label'] = self.fake_eseries_pool_label - fake_pool['volumeGroupRef'] = 'foo' - fake_pool['raidLevel'] = 'raidDiskPool' - fake_pools = [fake_pool] - fake_list_pools.return_value = fake_pools - self.assertRaises(exception.NetAppDriverException, - self.library._create_volume, - self.fake_eseries_pool_label, - self.fake_eseries_volume_label, self.fake_size_gb) - - def test_portal_for_vol_controller(self): - volume = {'id': 'vol_id', 'currentManager': 'ctrl1'} - vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'} - portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'}, - {'controller': 'ctrl1', 'iqn': 'iqn1'}] - portal = self.library._get_iscsi_portal_for_vol(volume, portals) - self.assertEqual({'controller': 'ctrl1', 'iqn': 'iqn1'}, portal) - portal = self.library._get_iscsi_portal_for_vol(vol_nomatch, portals) - self.assertEqual({'controller': 'ctrl2', 'iqn': 'iqn2'}, portal) - - def test_portal_for_vol_any_false(self): - vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'} - portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'}, - {'controller': 'ctrl1', 'iqn': 'iqn1'}] - self.assertRaises(exception.NetAppDriverException, - self.library._get_iscsi_portal_for_vol, - vol_nomatch, portals, False) - - def test_do_setup_all_default(self): - configuration = self._set_config(self.create_configuration()) - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system = mock.Mock() - mock_invoke = self.mock_object(client, 'RestClient') - driver.do_setup(context='context') - mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS) - - def test_do_setup_http_default_port(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_transport_type = 'http' - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system = mock.Mock() - mock_invoke = self.mock_object(client, 'RestClient') - driver.do_setup(context='context') - mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS) - - def test_do_setup_https_default_port(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_transport_type = 'https' - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system = mock.Mock() - mock_invoke = self.mock_object(client, 'RestClient') - driver.do_setup(context='context') - FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=8443, - scheme='https') - mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) - - def test_do_setup_http_non_default_port(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_server_port = 81 - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system = mock.Mock() - mock_invoke = self.mock_object(client, 'RestClient') - driver.do_setup(context='context') - FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=81) - mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) - - def test_do_setup_https_non_default_port(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_transport_type = 'https' - configuration.netapp_server_port = 446 - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system = mock.Mock() - mock_invoke = self.mock_object(client, 'RestClient') - driver.do_setup(context='context') - FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=446, - scheme='https') - mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS) - - def test_setup_good_controller_ip(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '127.0.0.1' - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system - - def test_setup_good_controller_ips(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '127.0.0.2,127.0.0.1' - driver = common.NetAppDriver(configuration=configuration) - driver.library._check_mode_get_or_register_storage_system - - def test_setup_missing_controller_ip(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = None - driver = common.NetAppDriver(configuration=configuration) - self.assertRaises(exception.InvalidInput, - driver.do_setup, context='context') - - def test_setup_error_invalid_controller_ip(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '987.65.43.21' - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(na_utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises( - exception.NoValidBackend, - driver.library._check_mode_get_or_register_storage_system) - - def test_setup_error_invalid_first_controller_ip(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '987.65.43.21,127.0.0.1' - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(na_utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises( - exception.NoValidBackend, - driver.library._check_mode_get_or_register_storage_system) - - def test_setup_error_invalid_second_controller_ip(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '127.0.0.1,987.65.43.21' - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(na_utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises( - exception.NoValidBackend, - driver.library._check_mode_get_or_register_storage_system) - - def test_setup_error_invalid_both_controller_ips(self): - configuration = self._set_config(self.create_configuration()) - configuration.netapp_controller_ips = '564.124.1231.1,987.65.43.21' - driver = common.NetAppDriver(configuration=configuration) - self.mock_object(na_utils, 'resolve_hostname', - side_effect=socket.gaierror) - - self.assertRaises( - exception.NoValidBackend, - driver.library._check_mode_get_or_register_storage_system) - - def test_manage_existing_get_size(self): - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=self.fake_ret_vol) - size = self.driver.manage_existing_get_size(self.volume, self.fake_ref) - self.assertEqual(3, size) - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - self.fake_ref) - - def test_get_exist_vol_source_name_missing(self): - self.library._client.list_volume = mock.Mock( - side_effect=exception.InvalidInput) - self.assertRaises(exception.ManageExistingInvalidReference, - self.library._get_existing_vol_with_manage_ref, - {'id': '1234'}) - - @ddt.data('source-id', 'source-name') - def test_get_exist_vol_source_not_found(self, attr_name): - def _get_volume(v_id): - d = {'id': '1', 'name': 'volume1', 'worldWideName': '0'} - if v_id in d: - return d[v_id] - else: - raise exception.VolumeNotFound(message=v_id) - - self.library._client.list_volume = mock.Mock(wraps=_get_volume) - self.assertRaises(exception.ManageExistingInvalidReference, - self.library._get_existing_vol_with_manage_ref, - {attr_name: 'name2'}) - - self.library._client.list_volume.assert_called_once_with( - 'name2') - - def test_get_exist_vol_with_manage_ref(self): - fake_ret_vol = {'id': 'right'} - self.library._client.list_volume = mock.Mock(return_value=fake_ret_vol) - - actual_vol = self.library._get_existing_vol_with_manage_ref( - {'source-name': 'name2'}) - - self.library._client.list_volume.assert_called_once_with('name2') - self.assertEqual(fake_ret_vol, actual_vol) - - @mock.patch.object(utils, 'convert_uuid_to_es_fmt') - def test_manage_existing_same_label(self, mock_convert_es_fmt): - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=self.fake_ret_vol) - mock_convert_es_fmt.return_value = 'label' - self.driver.manage_existing(self.volume, self.fake_ref) - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - self.fake_ref) - mock_convert_es_fmt.assert_called_once_with( - '114774fb-e15a-4fae-8ee2-c9723e3645ef') - - @mock.patch.object(utils, 'convert_uuid_to_es_fmt') - def test_manage_existing_new(self, mock_convert_es_fmt): - self.library._get_existing_vol_with_manage_ref = mock.Mock( - return_value=self.fake_ret_vol) - mock_convert_es_fmt.return_value = 'vol_label' - self.library._client.update_volume = mock.Mock( - return_value={'id': 'update', 'worldWideName': 'wwn'}) - self.driver.manage_existing(self.volume, self.fake_ref) - self.library._get_existing_vol_with_manage_ref.assert_called_once_with( - self.fake_ref) - mock_convert_es_fmt.assert_called_once_with( - '114774fb-e15a-4fae-8ee2-c9723e3645ef') - self.library._client.update_volume.assert_called_once_with( - 'vol_id', 'vol_label') - - @mock.patch.object(library.LOG, 'info') - def test_unmanage(self, log_info): - self.library._get_volume = mock.Mock(return_value=self.fake_ret_vol) - self.driver.unmanage(self.volume) - self.library._get_volume.assert_called_once_with( - '114774fb-e15a-4fae-8ee2-c9723e3645ef') - self.assertEqual(1, log_info.call_count) - - @mock.patch.object(library.NetAppESeriesLibrary, 'ensure_export', - mock.Mock()) - def test_ensure_export(self): - self.driver.ensure_export('context', self.fake_ret_vol) - self.assertTrue(self.library.ensure_export.called) - - @mock.patch.object(library.NetAppESeriesLibrary, 'extend_volume', - mock.Mock()) - def test_extend_volume(self): - capacity = 10 - self.driver.extend_volume(self.fake_ret_vol, capacity) - self.library.extend_volume.assert_called_with(self.fake_ret_vol, - capacity) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'create_cgsnapshot', mock.Mock()) - def test_create_cgsnapshot(self): - cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) - snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) - - self.driver.create_cgsnapshot('ctx', cgsnapshot, snapshots) - - self.library.create_cgsnapshot.assert_called_with(cgsnapshot, - snapshots) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'delete_cgsnapshot', mock.Mock()) - def test_delete_cgsnapshot(self): - cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) - snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) - - self.driver.delete_cgsnapshot('ctx', cgsnapshot, snapshots) - - self.library.delete_cgsnapshot.assert_called_with(cgsnapshot, - snapshots) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'create_consistencygroup', mock.Mock()) - def test_create_consistencygroup(self): - cg = copy.deepcopy(fakes.FAKE_CINDER_CG) - - self.driver.create_consistencygroup('ctx', cg) - - self.library.create_consistencygroup.assert_called_with(cg) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'delete_consistencygroup', mock.Mock()) - def test_delete_consistencygroup(self): - cg = copy.deepcopy(fakes.FAKE_CINDER_CG) - volumes = copy.deepcopy([fakes.VOLUME]) - - self.driver.delete_consistencygroup('ctx', cg, volumes) - - self.library.delete_consistencygroup.assert_called_with(cg, volumes) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'update_consistencygroup', mock.Mock()) - def test_update_consistencygroup(self): - group = copy.deepcopy(fakes.FAKE_CINDER_CG) - - self.driver.update_consistencygroup('ctx', group, {}, {}) - - self.library.update_consistencygroup.assert_called_with(group, {}, {}) - - @mock.patch.object(library.NetAppESeriesLibrary, - 'create_consistencygroup_from_src', mock.Mock()) - def test_create_consistencygroup_from_src(self): - cg = copy.deepcopy(fakes.FAKE_CINDER_CG) - volumes = copy.deepcopy([fakes.VOLUME]) - source_vols = copy.deepcopy([fakes.VOLUME]) - cgsnapshot = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) - source_cg = copy.deepcopy(fakes.FAKE_CINDER_CG_SNAPSHOT) - snapshots = copy.deepcopy([fakes.SNAPSHOT_IMAGE]) - - self.driver.create_consistencygroup_from_src( - 'ctx', cg, volumes, cgsnapshot, snapshots, source_cg, - source_vols) - - self.library.create_consistencygroup_from_src.assert_called_with( - cg, volumes, cgsnapshot, snapshots, source_cg, source_vols) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_fc_driver.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_fc_driver.py deleted file mode 100644 index 7a8c8a2c3..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_fc_driver.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2015 Alex Meade -# Copyright (c) 2015 Yogesh Kshirsagar -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.eseries import test_driver -import cinder.volume.drivers.netapp.eseries.fc_driver as fc -from cinder.volume.drivers.netapp import utils as na_utils - - -class NetAppESeriesFibreChannelDriverTestCase(test_driver - .NetAppESeriesDriverTestCase, - test.TestCase): - - PROTOCOL = 'fc' - - @mock.patch.object(na_utils, 'validate_instantiation') - def test_instantiation(self, mock_validate_instantiation): - fc.NetAppEseriesFibreChannelDriver(configuration=mock.Mock()) - - self.assertTrue(mock_validate_instantiation.called) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_host_mapper.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_host_mapper.py deleted file mode 100644 index 096df6a51..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_host_mapper.py +++ /dev/null @@ -1,662 +0,0 @@ -# Copyright (c) 2015 Alex Meade. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Mock unit tests for the NetApp E-series iscsi driver.""" - -import copy - -import mock -import six - -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.volume.drivers.netapp.eseries \ - import fakes as eseries_fakes -from cinder.volume.drivers.netapp.eseries import host_mapper -from cinder.volume.drivers.netapp.eseries import utils - - -def get_fake_volume(): - return { - 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, - 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', - 'os_type': 'linux', 'provider_location': 'lun1', - 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', - 'provider_auth': 'provider a b', 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None, 'migration_status': None, 'attach_status': - fields.VolumeAttachStatus.DETACHED, "status": "available" - } - -FAKE_MAPPINGS = [{u'lun': 1}] - -FAKE_USED_UP_MAPPINGS = [{u'lun': n} for n in range(256)] - -FAKE_USED_UP_LUN_ID_DICT = {n: 1 for n in range(256)} - -FAKE_UNUSED_LUN_ID = set([]) - -FAKE_USED_LUN_ID_DICT = ({0: 1, 1: 1}) - -FAKE_USED_LUN_IDS = [1, 2] - -FAKE_SINGLE_USED_LUN_ID = 1 - -FAKE_USED_UP_LUN_IDS = range(256) - - -class NetAppEseriesHostMapperTestCase(test.TestCase): - def setUp(self): - super(NetAppEseriesHostMapperTestCase, self).setUp() - - self.client = eseries_fakes.FakeEseriesClient() - - def test_unmap_volume_from_host_volume_mapped_to_host(self): - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - eseries_fakes.VOLUME_MAPPING - ] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - self.mock_object(self.client, 'delete_volume_mapping') - - host_mapper.unmap_volume_from_host(self.client, get_fake_volume(), - eseries_fakes.HOST, - eseries_fakes.VOLUME_MAPPING) - - self.assertTrue(self.client.delete_volume_mapping.called) - - def test_unmap_volume_from_host_volume_mapped_to_different_host(self): - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - # Mapped to host 1 - fake_eseries_volume['listOfMappings'] = [ - eseries_fakes.VOLUME_MAPPING - ] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - self.mock_object(self.client, 'delete_volume_mapping') - self.mock_object(self.client, 'get_host_group', - side_effect=exception.NotFound) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.unmap_volume_from_host, - self.client, get_fake_volume(), - eseries_fakes.HOST_2, - eseries_fakes.VOLUME_MAPPING) - self.assertIn("not currently mapped to host", six.text_type(err)) - - def test_unmap_volume_from_host_volume_mapped_to_host_group_but_not_host( - self): - """Test volume mapped to host not in specified host group. - - Ensure an error is raised if the specified host is not in the - host group the volume is mapped to. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ - 'clusterRef'] - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_host = copy.deepcopy(eseries_fakes.HOST) - fake_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'list_hosts', - return_value=[fake_host]) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.unmap_volume_from_host, - self.client, get_fake_volume(), - fake_host, - fake_volume_mapping) - self.assertIn("not currently mapped to host", six.text_type(err)) - - def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group( - self): - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ - 'clusterRef'] - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'delete_volume_mapping') - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_volume = get_fake_volume() - fake_volume['status'] = 'detaching' - - host_mapper.unmap_volume_from_host(self.client, fake_volume, - eseries_fakes.HOST, - fake_volume_mapping) - - self.assertTrue(self.client.delete_volume_mapping.called) - - def test_unmap_volume_from_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa - self): - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_volume_mapping['mapRef'] = eseries_fakes.MULTIATTACH_HOST_GROUP[ - 'clusterRef'] - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'delete_volume_mapping') - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_volume = get_fake_volume() - fake_volume['status'] = 'in-use' - - host_mapper.unmap_volume_from_host(self.client, fake_volume, - eseries_fakes.HOST, - fake_volume_mapping) - - self.assertFalse(self.client.delete_volume_mapping.called) - - def test_unmap_volume_from_host_volume_mapped_to_outside_host_group(self): - """Test volume mapped to host group without host. - - Ensure we raise error when we find a volume is mapped to an unknown - host group that does not have the host. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_ref = "8500000060080E500023C7340036035F515B78FD" - fake_volume_mapping['mapRef'] = fake_ref - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_host = copy.deepcopy(eseries_fakes.HOST) - fake_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'list_hosts', - return_value=[fake_host]) - self.mock_object(self.client, 'get_host_group', - return_value=eseries_fakes.FOREIGN_HOST_GROUP) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.unmap_volume_from_host, - self.client, get_fake_volume(), - eseries_fakes.HOST, - fake_volume_mapping) - self.assertIn("unsupported host group", six.text_type(err)) - - def test_unmap_volume_from_host_volume_mapped_to_outside_host_group_w_host( - self): - """Test volume mapped to host in unknown host group. - - Ensure we raise error when we find a volume is mapped to an unknown - host group that has the host. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_ref = "8500000060080E500023C7340036035F515B78FD" - fake_volume_mapping['mapRef'] = fake_ref - fake_eseries_volume['clusterRef'] = fake_ref - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_host = copy.deepcopy(eseries_fakes.HOST) - fake_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'list_hosts', return_value=[fake_host]) - self.mock_object(self.client, 'get_host_group', - return_value=eseries_fakes.FOREIGN_HOST_GROUP) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.unmap_volume_from_host, - self.client, get_fake_volume(), - eseries_fakes.HOST, - fake_volume_mapping) - - self.assertIn("unsupported host group", six.text_type(err)) - - def test_map_volume_to_single_host_volume_not_mapped(self): - self.mock_object(self.client, 'create_volume_mapping', - return_value=eseries_fakes.VOLUME_MAPPING) - - host_mapper.map_volume_to_single_host(self.client, get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - None, - False) - - self.assertTrue(self.client.create_volume_mapping.called) - - def test_map_volume_to_single_host_volume_already_mapped_to_target_host( - self): - """Should be a no-op""" - self.mock_object(self.client, 'create_volume_mapping') - - host_mapper.map_volume_to_single_host(self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - eseries_fakes.VOLUME_MAPPING, - False) - - self.assertFalse(self.client.create_volume_mapping.called) - - def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group( - self): - """Test map volume to a single host. - - Should move mapping to target host if volume is not migrating or - attached(in-use). If volume is not in use then it should not require a - mapping making it ok to sever the mapping to the host group. - """ - fake_mapping_to_other_host = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = \ - eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] - self.mock_object(self.client, 'move_volume_mapping_via_symbol', - return_value={'lun': 5}) - - host_mapper.map_volume_to_single_host(self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - fake_mapping_to_other_host, - False) - - self.assertTrue(self.client.move_volume_mapping_via_symbol.called) - - def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_migrating( # noqa - self): - """Should raise error saying multiattach not enabled""" - fake_mapping_to_other_host = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = \ - eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] - fake_volume = get_fake_volume() - fake_volume['attach_status'] = fields.VolumeAttachStatus.ATTACHED - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_single_host, - self.client, fake_volume, - eseries_fakes.VOLUME, - eseries_fakes.HOST, - fake_mapping_to_other_host, - False) - - self.assertIn('multiattach is disabled', six.text_type(err)) - - def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group_and_attached( # noqa - self): - """Should raise error saying multiattach not enabled""" - fake_mapping_to_other_host = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = \ - eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] - fake_volume = get_fake_volume() - fake_volume['attach_status'] = fields.VolumeAttachStatus.ATTACHED - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_single_host, - self.client, fake_volume, - eseries_fakes.VOLUME, - eseries_fakes.HOST, - fake_mapping_to_other_host, - False) - - self.assertIn('multiattach is disabled', six.text_type(err)) - - def test_map_volume_to_single_host_volume_mapped_to_another_host(self): - """Should raise error saying multiattach not enabled""" - fake_mapping_to_other_host = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = eseries_fakes.HOST_2[ - 'hostRef'] - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_single_host, - self.client, get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - fake_mapping_to_other_host, - False) - - self.assertIn('multiattach is disabled', six.text_type(err)) - - def test_map_volume_to_multiple_hosts_volume_already_mapped_to_target_host( - self): - """Should be a no-op.""" - self.mock_object(self.client, 'create_volume_mapping') - - host_mapper.map_volume_to_multiple_hosts(self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - eseries_fakes.VOLUME_MAPPING) - - self.assertFalse(self.client.create_volume_mapping.called) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group( # noqa - self): - """Should ensure target host is in the multiattach host group.""" - fake_host = copy.deepcopy(eseries_fakes.HOST_2) - fake_host['clusterRef'] = utils.NULL_REF - - fake_mapping_to_host_group = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_host_group['mapRef'] = \ - eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] - - self.mock_object(self.client, 'set_host_group_for_host') - self.mock_object(self.client, 'get_host_group', - return_value=eseries_fakes.MULTIATTACH_HOST_GROUP) - - host_mapper.map_volume_to_multiple_hosts(self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - fake_host, - fake_mapping_to_host_group) - - self.assertEqual( - 1, self.client.set_host_group_for_host.call_count) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_multiattach_host_group_with_lun_collision( # noqa - self): - """Should ensure target host is in the multiattach host group.""" - fake_host = copy.deepcopy(eseries_fakes.HOST_2) - fake_host['clusterRef'] = utils.NULL_REF - fake_mapping_to_host_group = copy.deepcopy( - eseries_fakes.VOLUME_MAPPING) - fake_mapping_to_host_group['mapRef'] = \ - eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef'] - self.mock_object(self.client, 'set_host_group_for_host', - side_effect=exception.NetAppDriverException) - - self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - fake_host, - fake_mapping_to_host_group) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host(self): - """Test that mapping moves to another host group. - - Should ensure both existing host and destination host are in - multiattach host group and move the mapping to the host group. - """ - - existing_host = copy.deepcopy(eseries_fakes.HOST) - existing_host['clusterRef'] = utils.NULL_REF - target_host = copy.deepcopy(eseries_fakes.HOST_2) - target_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'get_host', return_value=existing_host) - self.mock_object(self.client, 'set_host_group_for_host') - self.mock_object(self.client, 'get_host_group', - side_effect=exception.NotFound) - mock_move_mapping = mock.Mock( - return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) - self.mock_object(self.client, - 'move_volume_mapping_via_symbol', - mock_move_mapping) - - host_mapper.map_volume_to_multiple_hosts(self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - target_host, - eseries_fakes.VOLUME_MAPPING) - - self.assertEqual( - 2, self.client.set_host_group_for_host.call_count) - - self.assertTrue(self.client.move_volume_mapping_via_symbol - .called) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_source_host( # noqa - self): - """Test moving source host to multiattach host group. - - Should fail attempting to move source host to multiattach host - group and raise an error. - """ - - existing_host = copy.deepcopy(eseries_fakes.HOST) - existing_host['clusterRef'] = utils.NULL_REF - target_host = copy.deepcopy(eseries_fakes.HOST_2) - target_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'get_host', return_value=existing_host) - self.mock_object(self.client, 'set_host_group_for_host', - side_effect=[None, exception.NetAppDriverException]) - self.mock_object(self.client, 'get_host_group', - side_effect=exception.NotFound) - mock_move_mapping = mock.Mock( - return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) - self.mock_object(self.client, - 'move_volume_mapping_via_symbol', - mock_move_mapping) - - self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - target_host, - eseries_fakes.VOLUME_MAPPING) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_dest_host( # noqa - self): - """Test moving destination host to multiattach host group. - - Should fail attempting to move destination host to multiattach host - group and raise an error. - """ - - existing_host = copy.deepcopy(eseries_fakes.HOST) - existing_host['clusterRef'] = utils.NULL_REF - target_host = copy.deepcopy(eseries_fakes.HOST_2) - target_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'get_host', return_value=existing_host) - self.mock_object(self.client, 'set_host_group_for_host', - side_effect=[exception.NetAppDriverException, None]) - self.mock_object(self.client, 'get_host_group', - side_effect=exception.NotFound) - mock_move_mapping = mock.Mock( - return_value=eseries_fakes.VOLUME_MAPPING_TO_MULTIATTACH_GROUP) - self.mock_object(self.client, - 'move_volume_mapping_via_symbol', - mock_move_mapping) - - self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - target_host, - eseries_fakes.VOLUME_MAPPING) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_foreign_host_group( - self): - """Test a target when the host is in a foreign host group. - - Should raise an error stating the volume is mapped to an - unsupported host group. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_ref = "8500000060080E500023C7340036035F515B78FD" - fake_volume_mapping['mapRef'] = fake_ref - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - fake_host = copy.deepcopy(eseries_fakes.HOST) - fake_host['clusterRef'] = utils.NULL_REF - self.mock_object(self.client, 'get_host_group', - return_value=eseries_fakes.FOREIGN_HOST_GROUP) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - fake_host, - fake_volume_mapping) - self.assertIn("unsupported host group", six.text_type(err)) - - def test_map_volume_to_multiple_hosts_volume_mapped_to_host_in_foreign_host_group( # noqa - self): - """Test a target when the host is in a foreign host group. - - Should raise an error stating the volume is mapped to a - host that is in an unsupported host group. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_host = copy.deepcopy(eseries_fakes.HOST_2) - fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[ - 'clusterRef'] - fake_volume_mapping['mapRef'] = fake_host['hostRef'] - fake_eseries_volume['listOfMappings'] = [fake_volume_mapping] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - self.mock_object(self.client, 'get_host', return_value=fake_host) - self.mock_object(self.client, 'get_host_group', - side_effect=[eseries_fakes.FOREIGN_HOST_GROUP]) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - eseries_fakes.HOST, - fake_volume_mapping) - - self.assertIn("unsupported host group", six.text_type(err)) - - def test_map_volume_to_multiple_hosts_volume_target_host_in_foreign_host_group( # noqa - self): - """Test a target when the host is in a foreign host group. - - Should raise an error stating the target host is in an - unsupported host group. - """ - fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) - fake_volume_mapping = copy.deepcopy(eseries_fakes.VOLUME_MAPPING) - fake_host = copy.deepcopy(eseries_fakes.HOST_2) - fake_host['clusterRef'] = eseries_fakes.FOREIGN_HOST_GROUP[ - 'clusterRef'] - self.mock_object(self.client, 'list_volumes', - return_value=[fake_eseries_volume]) - self.mock_object(self.client, 'get_host', - return_value=eseries_fakes.HOST) - self.mock_object(self.client, 'get_host_group', - side_effect=[eseries_fakes.FOREIGN_HOST_GROUP]) - - err = self.assertRaises(exception.NetAppDriverException, - host_mapper.map_volume_to_multiple_hosts, - self.client, - get_fake_volume(), - eseries_fakes.VOLUME, - fake_host, - fake_volume_mapping) - - self.assertIn("unsupported host group", six.text_type(err)) - - def test_get_unused_lun_ids(self): - unused_lun_ids = host_mapper._get_unused_lun_ids(FAKE_MAPPINGS) - self.assertEqual(set(range(2, 256)), unused_lun_ids) - - def test_get_unused_lun_id_counter(self): - used_lun_id_count = host_mapper._get_used_lun_id_counter( - FAKE_MAPPINGS) - self.assertEqual(FAKE_USED_LUN_ID_DICT, used_lun_id_count) - - def test_get_unused_lun_ids_used_up_luns(self): - unused_lun_ids = host_mapper._get_unused_lun_ids( - FAKE_USED_UP_MAPPINGS) - self.assertEqual(FAKE_UNUSED_LUN_ID, unused_lun_ids) - - def test_get_lun_id_counter_used_up_luns(self): - used_lun_ids = host_mapper._get_used_lun_id_counter( - FAKE_USED_UP_MAPPINGS) - self.assertEqual(FAKE_USED_UP_LUN_ID_DICT, used_lun_ids) - - def test_host_not_full(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - self.assertFalse(host_mapper._is_host_full(self.client, fake_host)) - - def test_host_full(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - self.mock_object(self.client, 'get_volume_mappings_for_host', - return_value=FAKE_USED_UP_MAPPINGS) - self.assertTrue(host_mapper._is_host_full(self.client, fake_host)) - - def test_get_free_lun(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - with mock.patch('random.sample') as mock_random: - mock_random.return_value = [3] - lun = host_mapper._get_free_lun(self.client, fake_host, False, - []) - self.assertEqual(3, lun) - - def test_get_free_lun_host_full(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - self.mock_object(host_mapper, '_is_host_full', return_value=True) - self.assertRaises( - exception.NetAppDriverException, - host_mapper._get_free_lun, - self.client, fake_host, False, FAKE_USED_UP_MAPPINGS) - - def test_get_free_lun_no_unused_luns(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - lun = host_mapper._get_free_lun(self.client, fake_host, False, - FAKE_USED_UP_MAPPINGS) - self.assertEqual(255, lun) - - def test_get_free_lun_no_unused_luns_host_not_full(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - self.mock_object(host_mapper, '_is_host_full', return_value=False) - lun = host_mapper._get_free_lun(self.client, fake_host, False, - FAKE_USED_UP_MAPPINGS) - self.assertEqual(255, lun) - - def test_get_free_lun_no_lun_available(self): - fake_host = copy.deepcopy(eseries_fakes.HOST_3) - self.mock_object(self.client, 'get_volume_mappings_for_host', - return_value=FAKE_USED_UP_MAPPINGS) - - self.assertRaises(exception.NetAppDriverException, - host_mapper._get_free_lun, - self.client, fake_host, False, - FAKE_USED_UP_MAPPINGS) - - def test_get_free_lun_multiattach_enabled_no_unused_ids(self): - fake_host = copy.deepcopy(eseries_fakes.HOST_3) - self.mock_object(self.client, 'get_volume_mappings', - return_value=FAKE_USED_UP_MAPPINGS) - - self.assertRaises(exception.NetAppDriverException, - host_mapper._get_free_lun, - self.client, fake_host, True, - FAKE_USED_UP_MAPPINGS) - - def test_get_lun_by_mapping(self): - used_luns = host_mapper._get_used_lun_ids_for_mappings(FAKE_MAPPINGS) - self.assertEqual(set([0, 1]), used_luns) - - def test_get_lun_by_mapping_no_mapping(self): - used_luns = host_mapper._get_used_lun_ids_for_mappings([]) - self.assertEqual(set([0]), used_luns) - - def test_lun_id_available_on_host(self): - fake_host = copy.deepcopy(eseries_fakes.HOST) - self.assertTrue(host_mapper._is_lun_id_available_on_host( - self.client, fake_host, FAKE_UNUSED_LUN_ID)) - - def test_no_lun_id_available_on_host(self): - fake_host = copy.deepcopy(eseries_fakes.HOST_3) - self.mock_object(self.client, 'get_volume_mappings_for_host', - return_value=FAKE_USED_UP_MAPPINGS) - - self.assertFalse(host_mapper._is_lun_id_available_on_host( - self.client, fake_host, FAKE_SINGLE_USED_LUN_ID)) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_iscsi_driver.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_iscsi_driver.py deleted file mode 100644 index 213de9603..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_iscsi_driver.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2015 Alex Meade. All rights reserved. -# Copyright (c) 2015 Michael Price. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from cinder import test - -from cinder.tests.unit.volume.drivers.netapp.eseries import test_driver -from cinder.volume.drivers.netapp.eseries import iscsi_driver as iscsi -import cinder.volume.drivers.netapp.utils as na_utils - - -@ddt.ddt -class NetAppESeriesIscsiDriverTestCase(test_driver.NetAppESeriesDriverTestCase, - test.TestCase): - - @mock.patch.object(na_utils, 'validate_instantiation') - def test_instantiation(self, mock_validate_instantiation): - iscsi.NetAppEseriesISCSIDriver(configuration=mock.Mock()) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py deleted file mode 100644 index 52789c9e2..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py +++ /dev/null @@ -1,2559 +0,0 @@ -# Copyright (c) 2014 Andrew Kerr -# Copyright (c) 2015 Alex Meade -# Copyright (c) 2015 Rushil Chugh -# Copyright (c) 2015 Yogesh Kshirsagar -# Copyright (c) 2015 Michael Price -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import ddt -import time -import uuid - -import mock -from oslo_utils import units -import six -from six.moves import range -from six.moves import reduce - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test - -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import utils as cinder_utils -from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \ - eseries_fake -from cinder.volume.drivers.netapp.eseries import client as es_client -from cinder.volume.drivers.netapp.eseries import exception as eseries_exc -from cinder.volume.drivers.netapp.eseries import host_mapper -from cinder.volume.drivers.netapp.eseries import library -from cinder.volume.drivers.netapp.eseries import utils -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils -from cinder.zonemanager import utils as fczm_utils - - -def get_fake_volume(): - """Return a fake Cinder Volume that can be used as a parameter""" - return { - 'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1, - 'volume_name': 'lun1', 'host': 'hostname@backend#DDP', - 'os_type': 'linux', 'provider_location': 'lun1', - 'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', - 'provider_auth': 'provider a b', 'project_id': 'project', - 'display_name': None, 'display_description': 'lun1', - 'volume_type_id': None, 'migration_status': None, 'attach_status': - fields.VolumeAttachStatus.DETACHED - } - - -@ddt.ddt -class NetAppEseriesLibraryTestCase(test.TestCase): - def setUp(self): - super(NetAppEseriesLibraryTestCase, self).setUp() - - kwargs = {'configuration': - eseries_fake.create_configuration_eseries()} - - self.library = library.NetAppESeriesLibrary('FAKE', **kwargs) - - # We don't want the looping calls to run - self.mock_object(self.library, '_start_periodic_tasks') - # Deprecated Option - self.library.configuration.netapp_storage_pools = None - self.library._client = eseries_fake.FakeEseriesClient() - - self.mock_object(self.library, '_start_periodic_tasks') - - self.mock_object(library.cinder_utils, 'synchronized', - return_value=lambda f: f) - - with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new = cinder_utils.ZeroIntervalLoopingCall): - self.library.check_for_setup_error() - - self.ctxt = context.get_admin_context() - - def test_do_setup(self): - self.mock_object(self.library, - '_check_mode_get_or_register_storage_system') - self.mock_object(es_client, 'RestClient', - eseries_fake.FakeEseriesClient) - mock_check_flags = self.mock_object(na_utils, 'check_flags') - self.library.do_setup(mock.Mock()) - - self.assertTrue(mock_check_flags.called) - - @ddt.data('linux_dm_mp', 'linux_atto', 'linux_mpp_rdac', - 'linux_pathmanager', 'linux_sf', 'ontap', 'ontap_rdac', - 'vmware', 'windows_atto', 'windows_clustered', - 'factoryDefault', 'windows', None) - def test_check_host_type(self, host_type): - config = mock.Mock() - default_host_type = self.library.host_type - config.netapp_host_type = host_type - self.mock_object(self.library, 'configuration', config) - - result = self.library._check_host_type() - - self.assertIsNone(result) - if host_type: - self.assertEqual(self.library.HOST_TYPES.get(host_type), - self.library.host_type) - else: - self.assertEqual(default_host_type, self.library.host_type) - - def test_check_host_type_invalid(self): - config = mock.Mock() - config.netapp_host_type = 'invalid' - self.mock_object(self.library, 'configuration', config) - - self.assertRaises(exception.NetAppDriverException, - self.library._check_host_type) - - def test_check_host_type_new(self): - config = mock.Mock() - config.netapp_host_type = 'new_host_type' - expected = 'host_type' - self.mock_object(self.library, 'configuration', config) - host_types = [{ - 'name': 'new_host_type', - 'index': 0, - 'code': expected, - }] - self.mock_object(self.library._client, 'list_host_types', - return_value=host_types) - - result = self.library._check_host_type() - - self.assertIsNone(result) - self.assertEqual(expected, self.library.host_type) - - @ddt.data(('optimal', True), ('offline', False), ('needsAttn', True), - ('neverContacted', False), ('newKey', True), (None, True)) - @ddt.unpack - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_check_storage_system_status(self, status, status_valid): - system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) - system['status'] = status - status = status.lower() if status is not None else '' - - actual_status, actual_valid = ( - self.library._check_storage_system_status(system)) - - self.assertEqual(status, actual_status) - self.assertEqual(status_valid, actual_valid) - - @ddt.data(('valid', True), ('invalid', False), ('unknown', False), - ('newKey', True), (None, True)) - @ddt.unpack - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_check_password_status(self, status, status_valid): - system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) - system['passwordStatus'] = status - status = status.lower() if status is not None else '' - - actual_status, actual_valid = ( - self.library._check_password_status(system)) - - self.assertEqual(status, actual_status) - self.assertEqual(status_valid, actual_valid) - - def test_check_storage_system_bad_system(self): - exc_str = "bad_system" - controller_ips = self.library.configuration.netapp_controller_ips - self.library._client.list_storage_system = mock.Mock( - side_effect=exception.NetAppDriverException(message=exc_str)) - info_log = self.mock_object(library.LOG, 'info') - - self.assertRaisesRegexp(exception.NetAppDriverException, exc_str, - self.library._check_storage_system) - - info_log.assert_called_once_with(mock.ANY, controller_ips) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_check_storage_system(self): - system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) - self.mock_object(self.library._client, 'list_storage_system', - return_value=system) - update_password = self.mock_object(self.library._client, - 'update_stored_system_password') - info_log = self.mock_object(library.LOG, 'info') - - self.library._check_storage_system() - - self.assertTrue(update_password.called) - self.assertTrue(info_log.called) - - @ddt.data({'status': 'optimal', 'passwordStatus': 'invalid'}, - {'status': 'offline', 'passwordStatus': 'valid'}) - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_check_storage_system_bad_status(self, system): - self.mock_object(self.library._client, 'list_storage_system', - return_value=system) - self.mock_object(self.library._client, 'update_stored_system_password') - self.mock_object(time, 'time', side_effect=range(0, 60, 5)) - - self.assertRaisesRegexp(exception.NetAppDriverException, - 'bad.*?status', - self.library._check_storage_system) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_check_storage_system_update_password(self): - self.library.configuration.netapp_sa_password = 'password' - - def get_system_iter(): - key = 'passwordStatus' - system = copy.deepcopy(eseries_fake.STORAGE_SYSTEM) - system[key] = 'invalid' - yield system - yield system - - system[key] = 'valid' - yield system - - self.mock_object(self.library._client, 'list_storage_system', - side_effect=get_system_iter()) - update_password = self.mock_object(self.library._client, - 'update_stored_system_password') - info_log = self.mock_object(library.LOG, 'info') - - self.library._check_storage_system() - - update_password.assert_called_once_with( - self.library.configuration.netapp_sa_password) - self.assertTrue(info_log.called) - - def test_get_storage_pools_empty_result(self): - """Verify an exception is raised if no pools are returned.""" - self.library.configuration.netapp_pool_name_search_pattern = '$' - - def test_get_storage_pools_invalid_conf(self): - """Verify an exception is raised if the regex pattern is invalid.""" - self.library.configuration.netapp_pool_name_search_pattern = '(.*' - - self.assertRaises(exception.InvalidConfigurationValue, - self.library._get_storage_pools) - - def test_get_storage_pools_default(self): - """Verify that all pools are returned if the search option is empty.""" - filtered_pools = self.library._get_storage_pools() - - self.assertEqual(eseries_fake.STORAGE_POOLS, filtered_pools) - - @ddt.data((r'[\d]+,a', ['1', '2', 'a', 'b'], ['1', '2', 'a']), - ('1 , 3', ['1', '2', '3'], ['1', '3']), - ('$,3', ['1', '2', '3'], ['3']), - ('[a-zA-Z]+', ['1', 'a', 'B'], ['a', 'B']), - ('', ['1', '2'], ['1', '2']) - ) - @ddt.unpack - def test_get_storage_pools(self, pool_filter, pool_labels, - expected_pool_labels): - """Verify that pool filtering via the search_pattern works correctly - - :param pool_filter: A regular expression to be used for filtering via - pool labels - :param pool_labels: A list of pool labels - :param expected_pool_labels: The labels from 'pool_labels' that - should be matched by 'pool_filter' - """ - self.library.configuration.netapp_pool_name_search_pattern = ( - pool_filter) - pools = [{'label': label} for label in pool_labels] - - self.library._client.list_storage_pools = mock.Mock( - return_value=pools) - - filtered_pools = self.library._get_storage_pools() - - filtered_pool_labels = [pool['label'] for pool in filtered_pools] - self.assertEqual(expected_pool_labels, filtered_pool_labels) - - def test_get_volume(self): - fake_volume = copy.deepcopy(get_fake_volume()) - volume = copy.deepcopy(eseries_fake.VOLUME) - self.library._client.list_volume = mock.Mock(return_value=volume) - - result = self.library._get_volume(fake_volume['id']) - - self.assertEqual(1, self.library._client.list_volume.call_count) - self.assertDictEqual(volume, result) - - def test_get_volume_bad_input(self): - volume = copy.deepcopy(eseries_fake.VOLUME) - self.library._client.list_volume = mock.Mock(return_value=volume) - - self.assertRaises(exception.InvalidInput, self.library._get_volume, - None) - - def test_get_volume_bad_uuid(self): - volume = copy.deepcopy(eseries_fake.VOLUME) - self.library._client.list_volume = mock.Mock(return_value=volume) - - self.assertRaises(ValueError, self.library._get_volume, '1') - - def test_update_ssc_info_no_ssc(self): - drives = [{'currentVolumeGroupRef': 'test_vg1', - 'driveMediaType': 'ssd'}] - pools = [{'volumeGroupRef': 'test_vg1', 'label': 'test_vg1', - 'raidLevel': 'raid6', 'securityType': 'enabled'}] - self.library._client = mock.Mock() - self.library._client.features.SSC_API_V2 = na_utils.FeatureState( - False, minimum_version="1.53.9000.1") - self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1), - (1, 53, 9010, 15)] - self.library.configuration.netapp_pool_name_search_pattern = "test_vg1" - self.library._client.list_storage_pools = mock.Mock(return_value=pools) - self.library._client.list_drives = mock.Mock(return_value=drives) - - self.library._update_ssc_info() - - self.assertEqual( - {'test_vg1': {'netapp_disk_encryption': 'true', - 'netapp_disk_type': 'SSD', - 'netapp_raid_type': 'raid6'}}, - self.library._ssc_stats) - - @ddt.data(True, False) - def test_update_ssc_info(self, data_assurance_supported): - self.library._client = mock.Mock() - self.library._client.features.SSC_API_V2 = na_utils.FeatureState( - True, minimum_version="1.53.9000.1") - self.library._client.list_ssc_storage_pools = mock.Mock( - return_value=eseries_fake.SSC_POOLS) - self.library._get_storage_pools = mock.Mock( - return_value=eseries_fake.STORAGE_POOLS) - # Data Assurance is not supported on some storage backends - self.library._is_data_assurance_supported = mock.Mock( - return_value=data_assurance_supported) - - self.library._update_ssc_info() - - for pool in eseries_fake.SSC_POOLS: - poolId = pool['poolId'] - - raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get( - pool['raidLevel'], 'unknown') - - if pool['pool']["driveMediaType"] == 'ssd': - disk_type = 'SSD' - else: - disk_type = pool['pool']['drivePhysicalType'] - disk_type = ( - self.library.SSC_DISK_TYPE_MAPPING.get( - disk_type, 'unknown')) - - da_enabled = pool['dataAssuranceCapable'] and ( - data_assurance_supported) - - thin_provisioned = pool['thinProvisioningCapable'] - - expected = { - 'consistencygroup_support': True, - 'netapp_disk_encryption': - six.text_type(pool['encrypted']).lower(), - 'netapp_eseries_flash_read_cache': - six.text_type(pool['flashCacheCapable']).lower(), - 'netapp_thin_provisioned': - six.text_type(thin_provisioned).lower(), - 'netapp_eseries_data_assurance': - six.text_type(da_enabled).lower(), - 'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'], - 'netapp_raid_type': raid_lvl, - 'netapp_disk_type': disk_type - } - actual = self.library._ssc_stats[poolId] - self.assertDictEqual(expected, actual) - - @ddt.data(('FC', True), ('iSCSI', False)) - @ddt.unpack - def test_is_data_assurance_supported(self, backend_storage_protocol, - enabled): - self.mock_object(self.library, 'driver_protocol', - backend_storage_protocol) - - actual = self.library._is_data_assurance_supported() - - self.assertEqual(enabled, actual) - - @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') - def test_update_ssc_disk_types(self, disk_type): - drives = [{'currentVolumeGroupRef': 'test_vg1', - 'interfaceType': {'driveType': disk_type}}] - pools = [{'volumeGroupRef': 'test_vg1'}] - - self.library._client.list_drives = mock.Mock(return_value=drives) - self.library._client.get_storage_pool = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_disk_types(pools) - - expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown') - self.assertEqual({'test_vg1': {'netapp_disk_type': expected}}, - ssc_stats) - - @ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage') - def test_update_ssc_disk_types_ssd(self, disk_type): - drives = [{'currentVolumeGroupRef': 'test_vg1', - 'driveMediaType': 'ssd', 'driveType': disk_type}] - pools = [{'volumeGroupRef': 'test_vg1'}] - - self.library._client.list_drives = mock.Mock(return_value=drives) - self.library._client.get_storage_pool = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_disk_types(pools) - - self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}}, - ssc_stats) - - @ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED', - 'garbage') - def test_update_ssc_disk_encryption(self, securityType): - pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}] - self.library._client.list_storage_pools = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_disk_encryption(pools) - - # Convert the boolean value to a lower-case string value - expected = 'true' if securityType == "enabled" else 'false' - self.assertEqual({'test_vg1': {'netapp_disk_encryption': expected}}, - ssc_stats) - - def test_update_ssc_disk_encryption_multiple(self): - pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'}, - {'volumeGroupRef': 'test_vg2', 'securityType': 'enabled'}] - self.library._client.list_storage_pools = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_disk_encryption(pools) - - self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'}, - 'test_vg2': {'netapp_disk_encryption': 'true'}}, - ssc_stats) - - @ddt.data(True, False) - def test_get_volume_stats(self, refresh): - fake_stats = {'key': 'val'} - - def populate_stats(): - self.library._stats = fake_stats - - self.library._update_volume_stats = mock.Mock( - side_effect=populate_stats) - self.library._update_ssc_info = mock.Mock() - self.library._ssc_stats = {self.library.THIN_UQ_SPEC: True} - - actual = self.library.get_volume_stats(refresh = refresh) - - if(refresh): - self.library._update_volume_stats.assert_called_once_with() - self.assertEqual(fake_stats, actual) - else: - self.assertEqual(0, self.library._update_volume_stats.call_count) - self.assertEqual(0, self.library._update_ssc_info.call_count) - - def test_get_volume_stats_no_ssc(self): - """Validate that SSC data is collected if not yet populated""" - fake_stats = {'key': 'val'} - - def populate_stats(): - self.library._stats = fake_stats - - self.library._update_volume_stats = mock.Mock( - side_effect=populate_stats) - self.library._update_ssc_info = mock.Mock() - self.library._ssc_stats = None - - actual = self.library.get_volume_stats(refresh = True) - - self.library._update_volume_stats.assert_called_once_with() - self.library._update_ssc_info.assert_called_once_with() - self.assertEqual(fake_stats, actual) - - def test_update_volume_stats_provisioning(self): - """Validate pool capacity calculations""" - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', - {fake_pool["volumeGroupRef"]: { - self.library.THIN_UQ_SPEC: True}}) - self.library.configuration = mock.Mock() - reserved_pct = 5 - over_subscription_ratio = 1.0 - self.library.configuration.max_over_subscription_ratio = ( - over_subscription_ratio) - self.library.configuration.reserved_percentage = reserved_pct - total_gb = int(fake_pool['totalRaidedSpace']) / units.Gi - used_gb = int(fake_pool['usedSpace']) / units.Gi - free_gb = total_gb - used_gb - - self.library._update_volume_stats() - - self.assertEqual(1, len(self.library._stats['pools'])) - pool_stats = self.library._stats['pools'][0] - self.assertEqual(fake_pool['label'], pool_stats.get('pool_name')) - self.assertEqual(reserved_pct, pool_stats['reserved_percentage']) - self.assertEqual(over_subscription_ratio, - pool_stats['max_over_subscription_ratio']) - self.assertEqual(total_gb, pool_stats.get('total_capacity_gb')) - self.assertEqual(used_gb, pool_stats.get('provisioned_capacity_gb')) - self.assertEqual(free_gb, pool_stats.get('free_capacity_gb')) - - @ddt.data(False, True) - def test_update_volume_stats_thin_provisioning(self, thin_provisioning): - """Validate that thin provisioning support is correctly reported""" - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', - {fake_pool["volumeGroupRef"]: { - self.library.THIN_UQ_SPEC: thin_provisioning}}) - - self.library._update_volume_stats() - - self.assertEqual(1, len(self.library._stats['pools'])) - pool_stats = self.library._stats['pools'][0] - self.assertEqual(thin_provisioning, pool_stats.get( - 'thin_provisioning_support')) - # Should always be True - self.assertTrue(pool_stats.get('thick_provisioning_support')) - - def test_update_volume_stats_ssc(self): - """Ensure that the SSC data is correctly reported in the pool stats""" - ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'} - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.mock_object(self.library, '_ssc_stats', - {fake_pool["volumeGroupRef"]: ssc}) - - self.library._update_volume_stats() - - self.assertEqual(1, len(self.library._stats['pools'])) - pool_stats = self.library._stats['pools'][0] - for key in ssc: - self.assertIn(key, pool_stats) - self.assertEqual(ssc[key], pool_stats[key]) - - def test_update_volume_stats_no_ssc(self): - """Ensure that pool stats are correctly reported without SSC""" - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - self.library._update_volume_stats() - - self.assertEqual(1, len(self.library._stats['pools'])) - pool_stats = self.library._stats['pools'][0] - self.assertFalse(pool_stats.get('thin_provisioning_support')) - # Should always be True - self.assertTrue(pool_stats.get('thick_provisioning_support')) - - def test_terminate_connection_iscsi_no_hosts(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - - self.mock_object(self.library._client, 'list_hosts', return_value=[]) - - self.assertRaises(exception.NotFound, - self.library.terminate_connection_iscsi, - get_fake_volume(), - connector) - - def test_terminate_connection_iscsi_volume_not_mapped(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - volume = copy.deepcopy(eseries_fake.VOLUME) - volume['listOfMappings'] = [] - self.library._get_volume = mock.Mock(return_value=volume) - self.assertRaises(eseries_exc.VolumeNotMapped, - self.library.terminate_connection_iscsi, - get_fake_volume(), - connector) - - def test_terminate_connection_iscsi_volume_mapped(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - eseries_fake.VOLUME_MAPPING - ] - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - self.mock_object(host_mapper, 'unmap_volume_from_host') - - self.library.terminate_connection_iscsi(get_fake_volume(), connector) - - self.assertTrue(host_mapper.unmap_volume_from_host.called) - - def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist( - self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, 'list_hosts', - return_value=[eseries_fake.HOST_2]) - self.assertRaises(exception.NotFound, - self.library.terminate_connection_iscsi, - get_fake_volume(), - connector) - - def test_initialize_connection_iscsi_volume_not_mapped(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - eseries_fake.VOLUME_MAPPING - ] - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - - self.library.initialize_connection_iscsi(get_fake_volume(), connector) - - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_initialize_connection_iscsi_without_chap(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, - 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - mock_configure_chap = self.mock_object(self.library, '_configure_chap') - - self.library.initialize_connection_iscsi(get_fake_volume(), connector) - - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - self.assertFalse(mock_configure_chap.called) - - def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist( - self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(self.library._client, 'list_hosts', return_value=[]) - self.mock_object(self.library._client, 'create_host_with_ports', - return_value=eseries_fake.HOST) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - eseries_fake.VOLUME_MAPPING - ] - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - - self.library.initialize_connection_iscsi(get_fake_volume(), connector) - - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(self.library._client.list_hosts.called) - self.assertTrue(self.library._client.create_host_with_ports.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_initialize_connection_iscsi_volume_already_mapped_to_target_host( - self): - """Should be a no-op""" - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - - self.library.initialize_connection_iscsi(get_fake_volume(), connector) - - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_initialize_connection_iscsi_volume_mapped_to_another_host(self): - """Should raise error saying multiattach not enabled""" - connector = {'initiator': eseries_fake.INITIATOR_NAME} - fake_mapping_to_other_host = copy.deepcopy( - eseries_fake.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[ - 'hostRef'] - self.mock_object(host_mapper, 'map_volume_to_single_host', - side_effect=exception.NetAppDriverException) - - self.assertRaises(exception.NetAppDriverException, - self.library.initialize_connection_iscsi, - get_fake_volume(), connector) - - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - @ddt.data(eseries_fake.WWPN, - fczm_utils.get_formatted_wwn(eseries_fake.WWPN)) - def test_get_host_with_matching_port_wwpn(self, port_id): - port_ids = [port_id] - host = copy.deepcopy(eseries_fake.HOST) - host.update( - { - 'hostSidePorts': [{'label': 'NewStore', 'type': 'fc', - 'address': eseries_fake.WWPN}] - } - ) - host_2 = copy.deepcopy(eseries_fake.HOST_2) - host_2.update( - { - 'hostSidePorts': [{'label': 'NewStore', 'type': 'fc', - 'address': eseries_fake.WWPN_2}] - } - ) - host_list = [host, host_2] - self.mock_object(self.library._client, - 'list_hosts', - return_value=host_list) - - actual_host = self.library._get_host_with_matching_port( - port_ids) - - self.assertEqual(host, actual_host) - - def test_get_host_with_matching_port_iqn(self): - port_ids = [eseries_fake.INITIATOR_NAME] - host = copy.deepcopy(eseries_fake.HOST) - host.update( - { - 'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi', - 'address': eseries_fake.INITIATOR_NAME}] - } - ) - host_2 = copy.deepcopy(eseries_fake.HOST_2) - host_2.update( - { - 'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi', - 'address': eseries_fake.INITIATOR_NAME_2}] - } - ) - host_list = [host, host_2] - self.mock_object(self.library._client, - 'list_hosts', - return_value=host_list) - - actual_host = self.library._get_host_with_matching_port( - port_ids) - - self.assertEqual(host, actual_host) - - def test_terminate_connection_fc_no_hosts(self): - connector = {'wwpns': [eseries_fake.WWPN]} - - self.mock_object(self.library._client, 'list_hosts', - return_value=[]) - - self.assertRaises(exception.NotFound, - self.library.terminate_connection_fc, - get_fake_volume(), - connector) - - def test_terminate_connection_fc_volume_not_mapped(self): - connector = {'wwpns': [eseries_fake.WWPN]} - fake_host = copy.deepcopy(eseries_fake.HOST) - fake_host['hostSidePorts'] = [{ - 'label': 'NewStore', - 'type': 'fc', - 'address': eseries_fake.WWPN - }] - volume = copy.deepcopy(eseries_fake.VOLUME) - volume['listOfMappings'] = [] - self.mock_object(self.library, '_get_volume', return_value=volume) - - self.mock_object(self.library._client, 'list_hosts', - return_value=[fake_host]) - - self.assertRaises(eseries_exc.VolumeNotMapped, - self.library.terminate_connection_fc, - get_fake_volume(), - connector) - - def test_terminate_connection_fc_volume_mapped(self): - connector = {'wwpns': [eseries_fake.WWPN]} - fake_host = copy.deepcopy(eseries_fake.HOST) - fake_host['hostSidePorts'] = [{ - 'label': 'NewStore', - 'type': 'fc', - 'address': eseries_fake.WWPN - }] - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - copy.deepcopy(eseries_fake.VOLUME_MAPPING) - ] - self.mock_object(self.library._client, 'list_hosts', - return_value=[fake_host]) - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - self.mock_object(host_mapper, 'unmap_volume_from_host') - - self.library.terminate_connection_fc(get_fake_volume(), connector) - - self.assertTrue(host_mapper.unmap_volume_from_host.called) - - def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self): - connector = {'wwpns': [eseries_fake.WWPN]} - fake_host = copy.deepcopy(eseries_fake.HOST) - fake_host['hostSidePorts'] = [{ - 'label': 'NewStore', - 'type': 'fc', - 'address': eseries_fake.WWPN - }] - expected_target_info = { - 'driver_volume_type': 'fibre_channel', - 'data': {}, - } - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - copy.deepcopy(eseries_fake.VOLUME_MAPPING) - ] - self.mock_object(self.library._client, 'list_hosts', - return_value=[fake_host]) - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - self.mock_object(host_mapper, 'unmap_volume_from_host') - self.mock_object(self.library._client, 'get_volume_mappings_for_host', - return_value=[ - copy.deepcopy(eseries_fake.VOLUME_MAPPING)]) - - target_info = self.library.terminate_connection_fc(get_fake_volume(), - connector) - self.assertDictEqual(expected_target_info, target_info) - - self.assertTrue(host_mapper.unmap_volume_from_host.called) - - def test_terminate_connection_fc_volume_mapped_cleanup_zone(self): - connector = {'wwpns': [eseries_fake.WWPN]} - fake_host = copy.deepcopy(eseries_fake.HOST) - fake_host['hostSidePorts'] = [{ - 'label': 'NewStore', - 'type': 'fc', - 'address': eseries_fake.WWPN - }] - expected_target_info = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_wwn': [eseries_fake.WWPN_2], - 'initiator_target_map': { - eseries_fake.WWPN: [eseries_fake.WWPN_2] - }, - }, - } - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_eseries_volume['listOfMappings'] = [ - copy.deepcopy(eseries_fake.VOLUME_MAPPING) - ] - self.mock_object(self.library._client, 'list_hosts', - return_value=[fake_host]) - self.mock_object(self.library._client, 'list_volume', - return_value=fake_eseries_volume) - self.mock_object(host_mapper, 'unmap_volume_from_host') - self.mock_object(self.library._client, 'get_volume_mappings_for_host', - return_value=[]) - - target_info = self.library.terminate_connection_fc(get_fake_volume(), - connector) - self.assertDictEqual(expected_target_info, target_info) - - self.assertTrue(host_mapper.unmap_volume_from_host.called) - - def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist( - self): - connector = {'wwpns': [eseries_fake.WWPN]} - self.mock_object(self.library._client, 'list_hosts', - return_value=[eseries_fake.HOST_2]) - self.assertRaises(exception.NotFound, - self.library.terminate_connection_fc, - get_fake_volume(), - connector) - - def test_initialize_connection_fc_volume_not_mapped(self): - connector = {'wwpns': [eseries_fake.WWPN]} - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - expected_target_info = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': [eseries_fake.WWPN_2], - 'initiator_target_map': { - eseries_fake.WWPN: [eseries_fake.WWPN_2] - }, - }, - } - - target_info = self.library.initialize_connection_fc(get_fake_volume(), - connector) - - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - self.assertDictEqual(expected_target_info, target_info) - - def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist( - self): - connector = {'wwpns': [eseries_fake.WWPN]} - self.library.driver_protocol = 'FC' - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(self.library._client, 'list_hosts', - return_value=[]) - self.mock_object(self.library._client, 'create_host_with_ports', - return_value=eseries_fake.HOST) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - - self.library.initialize_connection_fc(get_fake_volume(), connector) - - self.library._client.create_host_with_ports.assert_called_once_with( - mock.ANY, mock.ANY, - [fczm_utils.get_formatted_wwn(eseries_fake.WWPN)], - port_type='fc', group_id=None - ) - - def test_initialize_connection_fc_volume_already_mapped_to_target_host( - self): - """Should be a no-op""" - connector = {'wwpns': [eseries_fake.WWPN]} - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - - self.library.initialize_connection_fc(get_fake_volume(), connector) - - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_initialize_connection_fc_volume_mapped_to_another_host(self): - """Should raise error saying multiattach not enabled""" - connector = {'wwpns': [eseries_fake.WWPN]} - fake_mapping_to_other_host = copy.deepcopy( - eseries_fake.VOLUME_MAPPING) - fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[ - 'hostRef'] - self.mock_object(host_mapper, 'map_volume_to_single_host', - side_effect=exception.NetAppDriverException) - - self.assertRaises(exception.NetAppDriverException, - self.library.initialize_connection_fc, - get_fake_volume(), connector) - - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_initialize_connection_fc_no_target_wwpns(self): - """Should be a no-op""" - connector = {'wwpns': [eseries_fake.WWPN]} - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - self.mock_object(self.library._client, 'list_target_wwpns', - return_value=[]) - - self.assertRaises(exception.VolumeBackendAPIException, - self.library.initialize_connection_fc, - get_fake_volume(), connector) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_build_initiator_target_map_fc_with_lookup_service( - self): - connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]} - self.library.lookup_service = mock.Mock() - self.library.lookup_service.get_device_mapping_from_network = ( - mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP)) - - (target_wwpns, initiator_target_map, num_paths) = ( - self.library._build_initiator_target_map_fc(connector)) - - self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS), - set(target_wwpns)) - self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map) - self.assertEqual(4, num_paths) - - @ddt.data(('raid0', 'raid0'), ('raid1', 'raid1'), ('raid3', 'raid5'), - ('raid5', 'raid5'), ('raid6', 'raid6'), ('raidDiskPool', 'DDP')) - @ddt.unpack - def test_update_ssc_raid_type(self, raid_lvl, raid_lvl_mapping): - pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] - self.library._client.list_storage_pools = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_raid_type(pools) - - self.assertEqual({'test_vg1': {'netapp_raid_type': raid_lvl_mapping}}, - ssc_stats) - - @ddt.data('raidAll', '__UNDEFINED', 'unknown', - 'raidUnsupported', 'garbage') - def test_update_ssc_raid_type_invalid(self, raid_lvl): - pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}] - self.library._client.list_storage_pools = mock.Mock(return_value=pools) - - ssc_stats = self.library._update_ssc_raid_type(pools) - - self.assertEqual({'test_vg1': {'netapp_raid_type': 'unknown'}}, - ssc_stats) - - def test_create_asup(self): - self.library._client = mock.Mock() - self.library._client.features.AUTOSUPPORT = na_utils.FeatureState() - self.library._client.api_operating_mode = ( - eseries_fake.FAKE_ASUP_DATA['operating-mode']) - self.library._app_version = eseries_fake.FAKE_APP_VERSION - self.mock_object( - self.library._client, 'get_asup_info', - return_value=eseries_fake.GET_ASUP_RETURN) - self.mock_object( - self.library._client, 'set_counter', return_value={'value': 1}) - mock_invoke = self.mock_object( - self.library._client, 'add_autosupport_data') - - self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) - - mock_invoke.assert_called_with(eseries_fake.FAKE_KEY, - eseries_fake.FAKE_ASUP_DATA) - - def test_create_asup_not_supported(self): - self.library._client = mock.Mock() - self.library._client.features.AUTOSUPPORT = na_utils.FeatureState( - supported=False) - mock_invoke = self.mock_object( - self.library._client, 'add_autosupport_data') - - self.library._create_asup(eseries_fake.FAKE_CINDER_HOST) - - mock_invoke.assert_not_called() - - @mock.patch.object(library, 'LOG', mock.Mock()) - def test_create_volume_fail_clean(self): - """Test volume creation fail w/o a partial volume being created. - - Test the failed creation of a volume where a partial volume with - the name has not been created, thus no cleanup is required. - """ - self.library._get_volume = mock.Mock( - side_effect = exception.VolumeNotFound(message='')) - self.library._client.create_volume = mock.Mock( - side_effect = exception.NetAppDriverException) - self.library._client.delete_volume = mock.Mock() - fake_volume = copy.deepcopy(get_fake_volume()) - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume, fake_volume) - - self.assertTrue(self.library._get_volume.called) - self.assertFalse(self.library._client.delete_volume.called) - self.assertEqual(1, library.LOG.error.call_count) - - @mock.patch.object(library, 'LOG', mock.Mock()) - def test_create_volume_fail_dirty(self): - """Test volume creation fail where a partial volume has been created. - - Test scenario where the creation of a volume fails and a partial - volume is created with the name/id that was supplied by to the - original creation call. In this situation the partial volume should - be detected and removed. - """ - fake_volume = copy.deepcopy(get_fake_volume()) - self.library._get_volume = mock.Mock(return_value=fake_volume) - self.library._client.list_volume = mock.Mock(return_value=fake_volume) - self.library._client.create_volume = mock.Mock( - side_effect = exception.NetAppDriverException) - self.library._client.delete_volume = mock.Mock() - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume, fake_volume) - - self.assertTrue(self.library._get_volume.called) - self.assertTrue(self.library._client.delete_volume.called) - self.library._client.delete_volume.assert_called_once_with( - fake_volume["id"]) - self.assertEqual(1, library.LOG.error.call_count) - - @mock.patch.object(library, 'LOG', mock.Mock()) - def test_create_volume_fail_dirty_fail_delete(self): - """Volume creation fail with partial volume deletion fails - - Test scenario where the creation of a volume fails and a partial - volume is created with the name/id that was supplied by to the - original creation call. The partial volume is detected but when - the cleanup deletetion of that fragment volume is attempted it fails. - """ - fake_volume = copy.deepcopy(get_fake_volume()) - self.library._get_volume = mock.Mock(return_value=fake_volume) - self.library._client.list_volume = mock.Mock(return_value=fake_volume) - self.library._client.create_volume = mock.Mock( - side_effect = exception.NetAppDriverException) - self.library._client.delete_volume = mock.Mock( - side_effect = exception.NetAppDriverException) - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume, fake_volume) - - self.assertTrue(self.library._get_volume.called) - self.assertTrue(self.library._client.delete_volume.called) - self.library._client.delete_volume.assert_called_once_with( - fake_volume["id"]) - self.assertEqual(2, library.LOG.error.call_count) - - def test_create_consistencygroup(self): - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - expected = {'status': 'available'} - create_cg = self.mock_object(self.library, - '_create_consistency_group', - return_value=expected) - - actual = self.library.create_consistencygroup(fake_cg) - - create_cg.assert_called_once_with(fake_cg) - self.assertEqual(expected, actual) - - def test_create_consistency_group(self): - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - expected = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - create_cg = self.mock_object(self.library._client, - 'create_consistency_group', - return_value=expected) - - result = self.library._create_consistency_group(fake_cg) - - name = utils.convert_uuid_to_es_fmt(fake_cg['id']) - create_cg.assert_called_once_with(name) - self.assertEqual(expected, result) - - def test_delete_consistencygroup(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - volumes = [get_fake_volume()] * 3 - model_update = {'status': 'deleted'} - volume_update = [{'status': 'deleted', 'id': vol['id']} for vol in - volumes] - delete_cg = self.mock_object(self.library._client, - 'delete_consistency_group') - updt_index = self.mock_object( - self.library, '_merge_soft_delete_changes') - delete_vol = self.mock_object(self.library, 'delete_volume') - self.mock_object(self.library, '_get_consistencygroup', - return_value=cg) - - result = self.library.delete_consistencygroup(fake_cg, volumes) - - self.assertEqual(len(volumes), delete_vol.call_count) - delete_cg.assert_called_once_with(cg['id']) - self.assertEqual((model_update, volume_update), result) - updt_index.assert_called_once_with(None, [cg['id']]) - - def test_delete_consistencygroup_index_update_failure(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - volumes = [get_fake_volume()] * 3 - model_update = {'status': 'deleted'} - volume_update = [{'status': 'deleted', 'id': vol['id']} for vol in - volumes] - delete_cg = self.mock_object(self.library._client, - 'delete_consistency_group') - delete_vol = self.mock_object(self.library, 'delete_volume') - self.mock_object(self.library, '_get_consistencygroup', - return_value=cg) - - result = self.library.delete_consistencygroup(fake_cg, volumes) - - self.assertEqual(len(volumes), delete_vol.call_count) - delete_cg.assert_called_once_with(cg['id']) - self.assertEqual((model_update, volume_update), result) - - def test_delete_consistencygroup_not_found(self): - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - delete_cg = self.mock_object(self.library._client, - 'delete_consistency_group') - updt_index = self.mock_object( - self.library, '_merge_soft_delete_changes') - delete_vol = self.mock_object(self.library, 'delete_volume') - exc = exception.ConsistencyGroupNotFound(consistencygroup_id='') - self.mock_object(self.library, '_get_consistencygroup', - side_effect=exc) - - self.library.delete_consistencygroup(fake_cg, []) - - delete_cg.assert_not_called() - delete_vol.assert_not_called() - updt_index.assert_not_called() - - def test_get_consistencygroup(self): - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - name = utils.convert_uuid_to_es_fmt(fake_cg['id']) - cg['name'] = name - list_cgs = self.mock_object(self.library._client, - 'list_consistency_groups', - return_value=[cg]) - - result = self.library._get_consistencygroup(fake_cg) - - self.assertEqual(cg, result) - list_cgs.assert_called_once_with() - - def test_get_consistencygroup_not_found(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - list_cgs = self.mock_object(self.library._client, - 'list_consistency_groups', - return_value=[cg]) - - self.assertRaises(exception.ConsistencyGroupNotFound, - self.library._get_consistencygroup, - copy.deepcopy(eseries_fake.FAKE_CINDER_CG)) - - list_cgs.assert_called_once_with() - - def test_update_consistencygroup(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - vol = copy.deepcopy(eseries_fake.VOLUME) - volumes = [get_fake_volume()] * 3 - self.mock_object( - self.library, '_get_volume', return_value=vol) - self.mock_object(self.library, '_get_consistencygroup', - return_value=cg) - - self.library.update_consistencygroup(fake_cg, volumes, volumes) - - def test_create_consistencygroup_from_src(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - volumes = [cinder_utils.create_volume(self.ctxt) for i in range(3)] - src_volumes = [cinder_utils.create_volume(self.ctxt) for v in volumes] - update_cg = self.mock_object( - self.library, '_update_consistency_group_members') - create_cg = self.mock_object( - self.library, '_create_consistency_group', return_value=cg) - self.mock_object( - self.library, '_create_volume_from_snapshot') - - self.mock_object(self.library, '_get_snapshot', return_value=snap) - - self.library.create_consistencygroup_from_src( - fake_cg, volumes, None, None, None, src_volumes) - - create_cg.assert_called_once_with(fake_cg) - update_cg.assert_called_once_with(cg, volumes, []) - - def test_create_consistencygroup_from_src_cgsnapshot(self): - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - fake_cg = copy.deepcopy(eseries_fake.FAKE_CINDER_CG) - fake_vol = cinder_utils.create_volume(self.ctxt) - cgsnap = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - volumes = [fake_vol] - snapshots = [cinder_utils.create_snapshot(self.ctxt, v['id']) for v - in volumes] - update_cg = self.mock_object( - self.library, '_update_consistency_group_members') - create_cg = self.mock_object( - self.library, '_create_consistency_group', return_value=cg) - clone_vol = self.mock_object( - self.library, '_create_volume_from_snapshot') - - self.library.create_consistencygroup_from_src( - fake_cg, volumes, cgsnap, snapshots, None, None) - - create_cg.assert_called_once_with(fake_cg) - update_cg.assert_called_once_with(cg, volumes, []) - self.assertEqual(clone_vol.call_count, len(volumes)) - - @ddt.data({'consistencyGroupId': utils.NULL_REF}, - {'consistencyGroupId': None}, {'consistencyGroupId': '1'}, {}) - def test_is_cgsnapshot(self, snapshot_image): - if snapshot_image.get('consistencyGroupId'): - result = not (utils.NULL_REF == snapshot_image[ - 'consistencyGroupId']) - else: - result = False - - actual = self.library._is_cgsnapshot(snapshot_image) - - self.assertEqual(result, actual) - - def test_add_volume_to_consistencygroup(self): - fake_volume = cinder_utils.create_volume(self.ctxt) - fake_volume['consistencygroup'] = ( - cinder_utils.create_consistencygroup(self.ctxt)) - fake_volume['consistencygroup_id'] = fake_volume[ - 'consistencygroup']['id'] - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - self.mock_object(self.library, '_get_consistencygroup', - return_value=cg) - update_members = self.mock_object(self.library, - '_update_consistency_group_members') - - self.library._add_volume_to_consistencygroup(fake_volume) - - update_members.assert_called_once_with(cg, [fake_volume], []) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_copy_volume_high_priority_readonly(self): - src_vol = copy.deepcopy(eseries_fake.VOLUME) - dst_vol = copy.deepcopy(eseries_fake.VOLUME) - vc = copy.deepcopy(eseries_fake.VOLUME_COPY_JOB) - self.mock_object(self.library._client, 'create_volume_copy_job', - return_value=vc) - self.mock_object(self.library._client, 'list_vol_copy_job', - return_value=vc) - delete_copy = self.mock_object(self.library._client, - 'delete_vol_copy_job') - - result = self.library._copy_volume_high_priority_readonly( - src_vol, dst_vol) - - self.assertIsNone(result) - delete_copy.assert_called_once_with(vc['volcopyRef']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_copy_volume_high_priority_readonly_job_create_failure(self): - src_vol = copy.deepcopy(eseries_fake.VOLUME) - dst_vol = copy.deepcopy(eseries_fake.VOLUME) - self.mock_object(self.library._client, 'create_volume_copy_job', - side_effect=exception.NetAppDriverException) - - self.assertRaises( - exception.NetAppDriverException, - self.library._copy_volume_high_priority_readonly, src_vol, - dst_vol) - - -@ddt.ddt -class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase): - """Test driver when netapp_enable_multiattach is enabled. - - Test driver behavior when the netapp_enable_multiattach configuration - option is True. - """ - - def setUp(self): - super(NetAppEseriesLibraryMultiAttachTestCase, self).setUp() - config = eseries_fake.create_configuration_eseries() - config.netapp_enable_multiattach = True - - kwargs = {'configuration': config} - - self.library = library.NetAppESeriesLibrary("FAKE", **kwargs) - self.library._client = eseries_fake.FakeEseriesClient() - - self.mock_object(library.cinder_utils, 'synchronized', - return_value=lambda f: f) - self.mock_object(self.library, '_start_periodic_tasks') - - self.ctxt = context.get_admin_context() - - with mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new = cinder_utils.ZeroIntervalLoopingCall): - self.library.check_for_setup_error() - - def test_do_setup_host_group_already_exists(self): - mock_check_flags = self.mock_object(na_utils, 'check_flags') - self.mock_object(self.library, - '_check_mode_get_or_register_storage_system') - fake_rest_client = eseries_fake.FakeEseriesClient() - self.mock_object(self.library, '_create_rest_client', - return_value=fake_rest_client) - mock_create = self.mock_object(fake_rest_client, 'create_host_group') - - self.library.do_setup(mock.Mock()) - - self.assertTrue(mock_check_flags.called) - self.assertFalse(mock_create.call_count) - - def test_do_setup_host_group_does_not_exist(self): - mock_check_flags = self.mock_object(na_utils, 'check_flags') - fake_rest_client = eseries_fake.FakeEseriesClient() - self.mock_object(self.library, '_create_rest_client', - return_value=fake_rest_client) - mock_get_host_group = self.mock_object( - fake_rest_client, "get_host_group_by_name", - side_effect=exception.NotFound) - self.mock_object(self.library, - '_check_mode_get_or_register_storage_system') - - self.library.do_setup(mock.Mock()) - - self.assertTrue(mock_check_flags.called) - self.assertTrue(mock_get_host_group.call_count) - - def test_create_volume(self): - self.library._client.create_volume = mock.Mock( - return_value=eseries_fake.VOLUME) - update_members = self.mock_object(self.library, - '_update_consistency_group_members') - - self.library.create_volume(get_fake_volume()) - self.assertTrue(self.library._client.create_volume.call_count) - - update_members.assert_not_called() - - @ddt.data(('netapp_eseries_flash_read_cache', 'flash_cache', 'true'), - ('netapp_eseries_flash_read_cache', 'flash_cache', 'false'), - ('netapp_eseries_flash_read_cache', 'flash_cache', None), - ('netapp_thin_provisioned', 'thin_provision', 'true'), - ('netapp_thin_provisioned', 'thin_provision', 'false'), - ('netapp_thin_provisioned', 'thin_provision', None), - ('netapp_eseries_data_assurance', 'data_assurance', 'true'), - ('netapp_eseries_data_assurance', 'data_assurance', 'false'), - ('netapp_eseries_data_assurance', 'data_assurance', None), - ('netapp:write_cache', 'write_cache', 'true'), - ('netapp:write_cache', 'write_cache', 'false'), - ('netapp:write_cache', 'write_cache', None), - ('netapp:read_cache', 'read_cache', 'true'), - ('netapp:read_cache', 'read_cache', 'false'), - ('netapp:read_cache', 'read_cache', None), - ('netapp_eseries_flash_read_cache', 'flash_cache', 'True'), - ('netapp_eseries_flash_read_cache', 'flash_cache', '1'), - ('netapp_eseries_data_assurance', 'data_assurance', '')) - @ddt.unpack - def test_create_volume_with_extra_spec(self, spec, key, value): - fake_volume = get_fake_volume() - extra_specs = {spec: value} - volume = copy.deepcopy(eseries_fake.VOLUME) - - self.library._client.create_volume = mock.Mock( - return_value=volume) - # Make this utility method return our extra spec - mocked_spec_method = self.mock_object(na_utils, - 'get_volume_extra_specs') - mocked_spec_method.return_value = extra_specs - - self.library.create_volume(fake_volume) - - self.assertEqual(1, self.library._client.create_volume.call_count) - # Ensure create_volume is called with the correct argument - args, kwargs = self.library._client.create_volume.call_args - self.assertIn(key, kwargs) - if(value is not None): - expected = na_utils.to_bool(value) - else: - expected = value - self.assertEqual(expected, kwargs[key]) - - def test_create_volume_too_many_volumes(self): - self.library._client.list_volumes = mock.Mock( - return_value=[eseries_fake.VOLUME for __ in - range(utils.MAX_LUNS_PER_HOST_GROUP + 1)]) - self.library._client.create_volume = mock.Mock( - return_value=eseries_fake.VOLUME) - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume, - get_fake_volume()) - self.assertFalse(self.library._client.create_volume.call_count) - - @ddt.data(0, 1, 2) - def test_create_snapshot(self, group_count): - """Successful Snapshot creation test""" - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - self.library._get_volume = mock.Mock(return_value=fake_eseries_volume) - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - fake_cinder_snapshot = copy.deepcopy( - eseries_fake.FAKE_CINDER_SNAPSHOT) - fake_snapshot_group_list = eseries_fake.list_snapshot_groups( - group_count) - fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - self.library._client.create_snapshot_group = mock.Mock( - return_value=fake_snapshot_group) - self.library._client.list_snapshot_groups = mock.Mock( - return_value=fake_snapshot_group_list) - self.library._client.create_snapshot_image = mock.Mock( - return_value=fake_snapshot_image) - - self.library.create_snapshot(fake_cinder_snapshot) - - @ddt.data(0, 1, 3) - def test_create_cloned_volume(self, snapshot_group_count): - """Test creating cloned volume with different exist group counts. """ - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - self.library._get_volume = mock.Mock(return_value=fake_eseries_volume) - fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - self.library._get_storage_pools = mock.Mock(return_value=[fake_pool]) - fake_snapshot_group_list = eseries_fake.list_snapshot_groups( - snapshot_group_count) - self.library._client.list_snapshot_groups = mock.Mock( - return_value=fake_snapshot_group_list) - fake_snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - self.library._client.create_snapshot_group = mock.Mock( - return_value=fake_snapshot_group) - fake_snapshot_image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - self.library._client.create_snapshot_image = mock.Mock( - return_value=fake_snapshot_image) - self.library._get_snapshot_group_for_snapshot = mock.Mock( - return_value=copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)) - fake_created_volume = copy.deepcopy(eseries_fake.VOLUMES[1]) - self.library.create_volume_from_snapshot = mock.Mock( - return_value = fake_created_volume) - fake_cinder_volume = copy.deepcopy(eseries_fake.FAKE_CINDER_VOLUME) - extend_vol = {'id': uuid.uuid4(), 'size': 10} - self.mock_object(self.library, '_create_volume_from_snapshot') - - self.library.create_cloned_volume(extend_vol, fake_cinder_volume) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new = cinder_utils.ZeroIntervalLoopingCall) - def test_create_volume_from_snapshot(self): - fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - self.mock_object(self.library, "_schedule_and_create_volume", - return_value=fake_eseries_volume) - self.mock_object(self.library, "_get_snapshot", - return_value=copy.deepcopy( - eseries_fake.SNAPSHOT_IMAGE)) - - self.library.create_volume_from_snapshot( - get_fake_volume(), fake_snap) - - self.assertEqual( - 1, self.library._schedule_and_create_volume.call_count) - - def test_create_volume_from_snapshot_create_fails(self): - fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - self.mock_object(self.library, "_schedule_and_create_volume", - return_value=fake_dest_eseries_volume) - self.mock_object(self.library._client, "delete_volume") - self.mock_object(self.library._client, "delete_snapshot_volume") - self.mock_object(self.library, "_get_snapshot", - return_value=copy.deepcopy( - eseries_fake.SNAPSHOT_IMAGE)) - self.mock_object(self.library._client, "create_snapshot_volume", - side_effect=exception.NetAppDriverException) - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume_from_snapshot, - get_fake_volume(), - fake_snapshot.fake_snapshot_obj(None)) - - self.assertEqual( - 1, self.library._schedule_and_create_volume.call_count) - # Ensure the volume we were going to copy to is cleaned up - self.library._client.delete_volume.assert_called_once_with( - fake_dest_eseries_volume['volumeRef']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new = cinder_utils.ZeroIntervalLoopingCall) - def test_create_volume_from_snapshot_copy_job_fails(self): - fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - self.mock_object(self.library, "_schedule_and_create_volume", - return_value=fake_dest_eseries_volume) - self.mock_object(self.library, "_create_snapshot_volume", - return_value=fake_dest_eseries_volume) - self.mock_object(self.library._client, "delete_volume") - self.mock_object(self.library, "_get_snapshot", - return_value=copy.deepcopy( - eseries_fake.SNAPSHOT_IMAGE)) - - fake_failed_volume_copy_job = copy.deepcopy( - eseries_fake.VOLUME_COPY_JOB) - fake_failed_volume_copy_job['status'] = 'failed' - self.mock_object(self.library._client, - "create_volume_copy_job", - return_value=fake_failed_volume_copy_job) - self.mock_object(self.library._client, - "list_vol_copy_job", - return_value=fake_failed_volume_copy_job) - - self.assertRaises(exception.NetAppDriverException, - self.library.create_volume_from_snapshot, - get_fake_volume(), - fake_snapshot.fake_snapshot_obj(None)) - - self.assertEqual( - 1, self.library._schedule_and_create_volume.call_count) - # Ensure the volume we were going to copy to is cleaned up - self.library._client.delete_volume.assert_called_once_with( - fake_dest_eseries_volume['volumeRef']) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new = cinder_utils.ZeroIntervalLoopingCall) - def test_create_volume_from_snapshot_fail_to_delete_snapshot_volume(self): - fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME) - fake_dest_eseries_volume['volumeRef'] = 'fake_volume_ref' - self.mock_object(self.library, "_schedule_and_create_volume", - return_value=fake_dest_eseries_volume) - self.mock_object(self.library, "_get_snapshot", - return_value=copy.deepcopy( - eseries_fake.SNAPSHOT_IMAGE)) - self.mock_object(self.library, '_create_snapshot_volume', - return_value=copy.deepcopy( - eseries_fake.SNAPSHOT_VOLUME)) - self.mock_object(self.library, "_create_snapshot_volume", - return_value=copy.deepcopy( - eseries_fake.VOLUME)) - self.mock_object(self.library._client, "delete_snapshot_volume", - side_effect=exception.NetAppDriverException) - self.mock_object(self.library._client, "delete_volume") - - self.library.create_volume_from_snapshot( - get_fake_volume(), fake_snapshot.fake_snapshot_obj(None)) - - self.assertEqual( - 1, self.library._schedule_and_create_volume.call_count) - self.assertEqual( - 1, self.library._client.delete_snapshot_volume.call_count) - # Ensure the volume we created is not cleaned up - self.assertEqual(0, self.library._client.delete_volume.call_count) - - def test_create_snapshot_volume_cgsnap(self): - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - self.mock_object(self.library, '_get_snapshot_group', return_value=grp) - expected = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - self.mock_object(self.library, '_is_cgsnapshot', return_value=True) - create_view = self.mock_object( - self.library._client, 'create_cg_snapshot_view', - return_value=expected) - - result = self.library._create_snapshot_volume(image) - - self.assertEqual(expected, result) - create_view.assert_called_once_with(image['consistencyGroupId'], - mock.ANY, image['id']) - - def test_create_snapshot_volume(self): - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - self.mock_object(self.library, '_get_snapshot_group', return_value=grp) - expected = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - self.mock_object(self.library, '_is_cgsnapshot', return_value=False) - create_view = self.mock_object( - self.library._client, 'create_snapshot_volume', - return_value=expected) - - result = self.library._create_snapshot_volume(image) - - self.assertEqual(expected, result) - create_view.assert_called_once_with( - image['pitRef'], mock.ANY, image['baseVol']) - - def test_create_snapshot_group(self): - label = 'label' - - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - get_call = self.mock_object( - self.library, '_get_storage_pools', return_value=None) - create_call = self.mock_object( - self.library._client, 'create_snapshot_group', - return_value=snapshot_group) - - actual = self.library._create_snapshot_group(label, vol) - - get_call.assert_not_called() - create_call.assert_called_once_with(label, vol['id'], repo_percent=20) - self.assertEqual(snapshot_group, actual) - - def test_create_snapshot_group_legacy_ddp(self): - self.library._client.features.REST_1_3_RELEASE = False - vol = copy.deepcopy(eseries_fake.VOLUME) - pools = copy.deepcopy(eseries_fake.STORAGE_POOLS) - pool = pools[-1] - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - vol['volumeGroupRef'] = pool['id'] - pool['raidLevel'] = 'raidDiskPool' - get_call = self.mock_object( - self.library, '_get_storage_pools', return_value=pools) - create_call = self.mock_object( - self.library._client, 'create_snapshot_group', - return_value=snapshot_group) - - actual = self.library._create_snapshot_group('label', vol) - - create_call.assert_called_with('label', vol['id'], - vol['volumeGroupRef'], - repo_percent=mock.ANY) - get_call.assert_called_once_with() - self.assertEqual(snapshot_group, actual) - - def test_create_snapshot_group_legacy_vg(self): - self.library._client.features.REST_1_3_RELEASE = False - vol = copy.deepcopy(eseries_fake.VOLUME) - vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi - pools = copy.deepcopy(eseries_fake.STORAGE_POOLS) - pool = pools[0] - pool['raidLevel'] = 'raid6' - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - vol['volumeGroupRef'] = pool['id'] - - get_call = self.mock_object( - self.library, '_get_sorted_available_storage_pools', - return_value=pools) - self.mock_object(self.library._client, 'create_snapshot_group', - return_value=snapshot_group) - actual = self.library._create_snapshot_group('label', vol) - - get_call.assert_called_once_with(vol_size_gb) - self.assertEqual(snapshot_group, actual) - - def test_get_snapshot(self): - fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - get_snap = self.mock_object( - self.library._client, 'list_snapshot_image', return_value=snap) - - result = self.library._get_snapshot(fake_snap) - - self.assertEqual(snap, result) - get_snap.assert_called_once_with(fake_snap['provider_id']) - - def test_get_snapshot_fail(self): - fake_snap = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - get_snap = self.mock_object( - self.library._client, 'list_snapshot_image', - side_effect=exception.NotFound) - - self.assertRaises(exception.NotFound, self.library._get_snapshot, - fake_snap) - - get_snap.assert_called_once_with(fake_snap['provider_id']) - - def test_get_snapshot_group_for_snapshot(self): - fake_id = 'id' - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - get_snap = self.mock_object( - self.library, '_get_snapshot', return_value=snap) - get_grp = self.mock_object(self.library._client, 'list_snapshot_group', - return_value=grp) - - result = self.library._get_snapshot_group_for_snapshot(fake_id) - - self.assertEqual(grp, result) - get_grp.assert_called_once_with(snap['pitGroupRef']) - get_snap.assert_called_once_with(fake_id) - - def test_get_snapshot_group_for_snapshot_fail(self): - fake_id = 'id' - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - get_snap = self.mock_object( - self.library, '_get_snapshot', return_value=snap) - get_grp = self.mock_object(self.library._client, 'list_snapshot_group', - side_effect=exception.NotFound) - - self.assertRaises(exception.NotFound, - self.library._get_snapshot_group_for_snapshot, - fake_id) - - get_grp.assert_called_once_with(snap['pitGroupRef']) - get_snap.assert_called_once_with(fake_id) - - def test_get_snapshot_groups_for_volume(self): - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - # Generate some snapshot groups that will not match - snapshot_groups = [copy.deepcopy(snapshot_group) for i in range( - self.library.MAX_SNAPSHOT_GROUP_COUNT)] - for i, group in enumerate(snapshot_groups): - group['baseVolume'] = str(i) - snapshot_groups.append(snapshot_group) - get_call = self.mock_object( - self.library._client, 'list_snapshot_groups', - return_value=snapshot_groups) - - groups = self.library._get_snapshot_groups_for_volume(vol) - - get_call.assert_called_once_with() - self.assertEqual([snapshot_group], groups) - - def test_get_available_snapshot_group(self): - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - snapshot_group['snapshotCount'] = 0 - # Generate some snapshot groups that will not match - - reserved_group = copy.deepcopy(snapshot_group) - reserved_group['label'] += self.library.SNAPSHOT_VOL_COPY_SUFFIX - - full_group = copy.deepcopy(snapshot_group) - full_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT - - cgroup = copy.deepcopy(snapshot_group) - cgroup['consistencyGroup'] = True - - snapshot_groups = [snapshot_group, reserved_group, full_group, cgroup] - get_call = self.mock_object( - self.library, '_get_snapshot_groups_for_volume', - return_value=snapshot_groups) - - group = self.library._get_available_snapshot_group(vol) - - get_call.assert_called_once_with(vol) - self.assertEqual(snapshot_group, group) - - def test_get_snapshot_groups_for_volume_not_found(self): - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snapshot_group['baseVolume'] = vol['id'] - snapshot_group['snapshotCount'] = self.library.MAX_SNAPSHOT_COUNT - # Generate some snapshot groups that will not match - - get_call = self.mock_object( - self.library, '_get_snapshot_groups_for_volume', - return_value=[snapshot_group]) - - group = self.library._get_available_snapshot_group(vol) - - get_call.assert_called_once_with(vol) - self.assertIsNone(group) - - def test_create_snapshot_available_snap_group(self): - expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - expected = {'provider_id': expected_snap['id']} - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - fake_label = 'fakeName' - self.mock_object(self.library, '_get_volume', return_value=vol) - create_call = self.mock_object( - self.library._client, 'create_snapshot_image', - return_value=expected_snap) - self.mock_object(self.library, '_get_available_snapshot_group', - return_value=snapshot_group) - self.mock_object(utils, 'convert_uuid_to_es_fmt', - return_value=fake_label) - fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - - model_update = self.library.create_snapshot(fake_snapshot) - - self.assertEqual(expected, model_update) - create_call.assert_called_once_with(snapshot_group['id']) - - @ddt.data(False, True) - def test_create_snapshot_failure(self, cleanup_failure): - """Validate the behavior for a failure during snapshot creation""" - - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - fake_label = 'fakeName' - create_fail_exc = exception.NetAppDriverException('fail_create') - cleanup_fail_exc = exception.NetAppDriverException('volume_deletion') - if cleanup_failure: - exc_msg = cleanup_fail_exc.msg - delete_snap_grp = self.mock_object( - self.library, '_delete_snapshot_group', - side_effect=cleanup_fail_exc) - else: - exc_msg = create_fail_exc.msg - delete_snap_grp = self.mock_object( - self.library, '_delete_snapshot_group') - self.mock_object(self.library, '_get_volume', return_value=vol) - self.mock_object(self.library._client, 'create_snapshot_image', - side_effect=create_fail_exc) - self.mock_object(self.library._client, 'create_snapshot_volume', - return_value=snap_vol) - self.mock_object(self.library, '_get_available_snapshot_group', - return_value=snapshot_group) - self.mock_object(utils, 'convert_uuid_to_es_fmt', - return_value=fake_label) - fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - - self.assertRaisesRegexp(exception.NetAppDriverException, - exc_msg, - self.library.create_snapshot, - fake_snapshot) - self.assertTrue(delete_snap_grp.called) - - def test_create_snapshot_no_snap_group(self): - self.library._client.features = mock.Mock() - expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - fake_label = 'fakeName' - self.mock_object(self.library, '_get_volume', return_value=vol) - create_call = self.mock_object( - self.library._client, 'create_snapshot_image', - return_value=expected_snap) - self.mock_object(self.library, '_get_snapshot_groups_for_volume', - return_value=[snapshot_group]) - self.mock_object(self.library, '_get_available_snapshot_group', - return_value=None) - self.mock_object(utils, 'convert_uuid_to_es_fmt', - return_value=fake_label) - fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - - snapshot = self.library.create_snapshot(fake_snapshot) - - expected = {'provider_id': expected_snap['id']} - self.assertEqual(expected, snapshot) - create_call.assert_called_once_with(snapshot_group['id']) - - def test_create_snapshot_no_snapshot_groups_remaining(self): - """Test the failure condition where all snap groups are allocated""" - - self.library._client.features = mock.Mock() - expected_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - vol = copy.deepcopy(eseries_fake.VOLUME) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snap_vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME) - grp_count = (self.library.MAX_SNAPSHOT_GROUP_COUNT - - self.library.RESERVED_SNAPSHOT_GROUP_COUNT) - fake_label = 'fakeName' - self.mock_object(self.library, '_get_volume', return_value=vol) - self.mock_object(self.library._client, 'create_snapshot_image', - return_value=expected_snap) - self.mock_object(self.library._client, 'create_snapshot_volume', - return_value=snap_vol) - self.mock_object(self.library, '_get_available_snapshot_group', - return_value=None) - self.mock_object(self.library, '_get_snapshot_groups_for_volume', - return_value=[snapshot_group] * grp_count) - self.mock_object(utils, 'convert_uuid_to_es_fmt', - return_value=fake_label) - fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - - # Error message should contain the maximum number of supported - # snapshots - self.assertRaisesRegexp(exception.SnapshotLimitExceeded, - str(self.library.MAX_SNAPSHOT_COUNT * - grp_count), - self.library.create_snapshot, fake_snapshot) - - def test_delete_snapshot(self): - fake_vol = cinder_utils.create_volume(self.ctxt) - fake_snap = cinder_utils.create_snapshot(self.ctxt, fake_vol['id']) - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - vol = copy.deepcopy(eseries_fake.VOLUME) - self.mock_object(self.library, '_get_volume', return_value=vol) - self.mock_object(self.library, '_get_snapshot', return_value=snap) - - del_snap = self.mock_object(self.library, '_delete_es_snapshot') - - self.library.delete_snapshot(fake_snap) - - del_snap.assert_called_once_with(snap) - - def test_delete_es_snapshot(self): - vol = copy.deepcopy(eseries_fake.VOLUME) - snap_count = 30 - # Ensure that it's the oldest PIT - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - snapshot_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - fake_volume_refs = ['1', '2', snap['baseVol']] - fake_snapshot_group_refs = ['3', '4', snapshot_group['id']] - snapshots = [copy.deepcopy(snap) for i in range(snap_count)] - bitset = na_utils.BitSet(0) - for i, snapshot in enumerate(snapshots): - volume_ref = fake_volume_refs[i % len(fake_volume_refs)] - group_ref = fake_snapshot_group_refs[i % - len(fake_snapshot_group_refs)] - snapshot['pitGroupRef'] = group_ref - snapshot['baseVol'] = volume_ref - snapshot['pitSequenceNumber'] = str(i) - snapshot['id'] = i - bitset.set(i) - snapshots.append(snap) - - filtered_snaps = [x for x in snapshots - if x['pitGroupRef'] == snap['pitGroupRef']] - - self.mock_object(self.library, '_get_volume', return_value=vol) - self.mock_object(self.library, '_get_snapshot', return_value=snap) - self.mock_object(self.library, '_get_soft_delete_map', - return_value={snap['pitGroupRef']: repr(bitset)}) - self.mock_object(self.library._client, 'list_snapshot_images', - return_value=snapshots) - delete_image = self.mock_object( - self.library, '_cleanup_snapshot_images', - return_value=({snap['pitGroupRef']: repr(bitset)}, None)) - - self.library._delete_es_snapshot(snap) - - delete_image.assert_called_once_with(filtered_snaps, bitset) - - def test_delete_snapshot_oldest(self): - vol = copy.deepcopy(eseries_fake.VOLUME) - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - snapshots = [snap] - self.mock_object(self.library, '_get_volume', return_value=vol) - self.mock_object(self.library, '_get_snapshot', return_value=snap) - self.mock_object(self.library, '_get_soft_delete_map', return_value={}) - self.mock_object(self.library._client, 'list_snapshot_images', - return_value=snapshots) - delete_image = self.mock_object( - self.library, '_cleanup_snapshot_images', - return_value=(None, [snap['pitGroupRef']])) - - self.library._delete_es_snapshot(snap) - - delete_image.assert_called_once_with(snapshots, - na_utils.BitSet(1)) - - def test_get_soft_delete_map(self): - fake_val = 'fake' - self.mock_object(self.library._client, 'list_backend_store', - return_value=fake_val) - - actual = self.library._get_soft_delete_map() - - self.assertEqual(fake_val, actual) - - def test_cleanup_snapshot_images_delete_all(self): - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - images = [image] * 32 - bitset = na_utils.BitSet() - for i, image in enumerate(images): - image['pitSequenceNumber'] = i - bitset.set(i) - delete_grp = self.mock_object(self.library._client, - 'delete_snapshot_group') - - updt, keys = self.library._cleanup_snapshot_images( - images, bitset) - - delete_grp.assert_called_once_with(image['pitGroupRef']) - self.assertIsNone(updt) - self.assertEqual([image['pitGroupRef']], keys) - - def test_cleanup_snapshot_images_delete_all_fail(self): - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - bitset = na_utils.BitSet(2 ** 32 - 1) - delete_grp = self.mock_object( - self.library._client, 'delete_snapshot_group', - side_effect=exception.NetAppDriverException) - - updt, keys = self.library._cleanup_snapshot_images( - [image], bitset) - - delete_grp.assert_called_once_with(image['pitGroupRef']) - self.assertIsNone(updt) - self.assertEqual([image['pitGroupRef']], keys) - - def test_cleanup_snapshot_images(self): - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - images = [image] * 32 - del_count = 16 - bitset = na_utils.BitSet() - for i, image in enumerate(images): - image['pitSequenceNumber'] = i - if i < del_count: - bitset.set(i) - exp_bitset = copy.deepcopy(bitset) - exp_bitset >>= 16 - delete_img = self.mock_object( - self.library, '_delete_snapshot_image') - - updt, keys = self.library._cleanup_snapshot_images( - images, bitset) - - self.assertEqual(del_count, delete_img.call_count) - self.assertIsNone(keys) - self.assertEqual({image['pitGroupRef']: exp_bitset}, updt) - - def test_delete_snapshot_image(self): - snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - - self.mock_object(self.library._client, 'list_snapshot_group', - return_value=snap_group) - - self.library._delete_snapshot_image(snap) - - def test_delete_snapshot_image_fail_cleanup(self): - snap_group = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP) - snap_group['snapshotCount'] = 0 - snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - - self.mock_object(self.library._client, 'list_snapshot_group', - return_value=snap_group) - - self.library._delete_snapshot_image(snap) - - def test_delete_snapshot_not_found(self): - fake_snapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT) - get_snap = self.mock_object(self.library, '_get_snapshot', - side_effect=exception.NotFound) - - with mock.patch.object(library, 'LOG', mock.Mock()): - self.library.delete_snapshot(fake_snapshot) - get_snap.assert_called_once_with(fake_snapshot) - self.assertTrue(library.LOG.warning.called) - - @ddt.data(['key1', 'key2'], [], None) - def test_merge_soft_delete_changes_keys(self, keys_to_del): - count = len(keys_to_del) if keys_to_del is not None else 0 - save_store = self.mock_object( - self.library._client, 'save_backend_store') - index = {'key1': 'val'} - get_store = self.mock_object(self.library, '_get_soft_delete_map', - return_value=index) - - self.library._merge_soft_delete_changes(None, keys_to_del) - - if count: - expected = copy.deepcopy(index) - for key in keys_to_del: - expected.pop(key, None) - get_store.assert_called_once_with() - save_store.assert_called_once_with( - self.library.SNAPSHOT_PERSISTENT_STORE_KEY, expected) - else: - get_store.assert_not_called() - save_store.assert_not_called() - - def test_create_cgsnapshot(self): - fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - fake_vol = cinder_utils.create_volume(self.ctxt) - fake_snapshots = [cinder_utils.create_snapshot(self.ctxt, - fake_vol['id'])] - vol = copy.deepcopy(eseries_fake.VOLUME) - image = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - image['baseVol'] = vol['id'] - cg_snaps = [image] - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - - for snap in cg_snaps: - snap['baseVol'] = vol['id'] - get_cg = self.mock_object( - self.library, '_get_consistencygroup_by_name', return_value=cg) - get_vol = self.mock_object( - self.library, '_get_volume', return_value=vol) - mk_snap = self.mock_object( - self.library._client, 'create_consistency_group_snapshot', - return_value=cg_snaps) - - model_update, snap_updt = self.library.create_cgsnapshot( - fake_cgsnapshot, fake_snapshots) - - self.assertIsNone(model_update) - for snap in cg_snaps: - self.assertIn({'id': fake_snapshots[0]['id'], - 'provider_id': snap['id'], - 'status': 'available'}, snap_updt) - self.assertEqual(len(cg_snaps), len(snap_updt)) - - get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( - fake_cgsnapshot['consistencygroup_id'])) - self.assertEqual(get_vol.call_count, len(fake_snapshots)) - mk_snap.assert_called_once_with(cg['id']) - - def test_create_cgsnapshot_cg_fail(self): - fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - fake_snapshots = [copy.deepcopy(eseries_fake.FAKE_CINDER_SNAPSHOT)] - self.mock_object( - self.library, '_get_consistencygroup_by_name', - side_effect=exception.NetAppDriverException) - - self.assertRaises( - exception.NetAppDriverException, - self.library.create_cgsnapshot, fake_cgsnapshot, fake_snapshots) - - def test_delete_cgsnapshot(self): - """Test the deletion of a cgsnapshot when a soft delete is required""" - fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - fake_vol = cinder_utils.create_volume(self.ctxt) - fake_snapshots = [cinder_utils.create_snapshot( - self.ctxt, fake_vol['id'])] - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - # Ensure that the snapshot to be deleted is not the oldest - cg_snap['pitSequenceNumber'] = str(max(cg['uniqueSequenceNumber'])) - cg_snaps = [cg_snap] - for snap in fake_snapshots: - snap['provider_id'] = cg_snap['id'] - vol = copy.deepcopy(eseries_fake.VOLUME) - for snap in cg_snaps: - snap['baseVol'] = vol['id'] - get_cg = self.mock_object( - self.library, '_get_consistencygroup_by_name', return_value=cg) - self.mock_object( - self.library._client, 'delete_consistency_group_snapshot') - self.mock_object( - self.library._client, 'get_consistency_group_snapshots', - return_value=cg_snaps) - soft_del = self.mock_object( - self.library, '_soft_delete_cgsnapshot', return_value=(None, None)) - - # Mock the locking mechanism - model_update, snap_updt = self.library.delete_cgsnapshot( - fake_cgsnapshot, fake_snapshots) - - self.assertIsNone(model_update) - self.assertIsNone(snap_updt) - get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( - fake_cgsnapshot['consistencygroup_id'])) - soft_del.assert_called_once_with( - cg, cg_snap['pitSequenceNumber']) - - @ddt.data(True, False) - def test_soft_delete_cgsnapshot(self, bitset_exists): - """Test the soft deletion of a cgsnapshot""" - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - seq_num = 10 - cg_snap['pitSequenceNumber'] = seq_num - cg_snaps = [cg_snap] - self.mock_object( - self.library._client, 'delete_consistency_group_snapshot') - self.mock_object( - self.library._client, 'get_consistency_group_snapshots', - return_value=cg_snaps) - bitset = na_utils.BitSet(1) - index = {cg['id']: repr(bitset)} if bitset_exists else {} - bitset >>= len(cg_snaps) - updt = {cg['id']: repr(bitset)} - self.mock_object(self.library, '_get_soft_delete_map', - return_value=index) - save_map = self.mock_object(self.library, '_merge_soft_delete_changes') - - model_update, snap_updt = self.library._soft_delete_cgsnapshot( - cg, seq_num) - - self.assertIsNone(model_update) - self.assertIsNone(snap_updt) - save_map.assert_called_once_with(updt, None) - - def test_delete_cgsnapshot_single(self): - """Test the backend deletion of the oldest cgsnapshot""" - fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - fake_vol = cinder_utils.create_volume(self.ctxt) - fake_snapshots = [cinder_utils.create_snapshot(self.ctxt, - fake_vol['id'])] - cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - cg_snaps = [cg_snap] - for snap in fake_snapshots: - snap['provider_id'] = cg_snap['id'] - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - cg['uniqueSequenceNumber'] = [cg_snap['pitSequenceNumber']] - vol = copy.deepcopy(eseries_fake.VOLUME) - for snap in cg_snaps: - snap['baseVol'] = vol['id'] - get_cg = self.mock_object( - self.library, '_get_consistencygroup_by_name', return_value=cg) - del_snap = self.mock_object( - self.library._client, 'delete_consistency_group_snapshot', - return_value=cg_snaps) - - model_update, snap_updt = self.library.delete_cgsnapshot( - fake_cgsnapshot, fake_snapshots) - - self.assertIsNone(model_update) - self.assertIsNone(snap_updt) - get_cg.assert_called_once_with(utils.convert_uuid_to_es_fmt( - fake_cgsnapshot['consistencygroup_id'])) - del_snap.assert_called_once_with(cg['id'], cg_snap[ - 'pitSequenceNumber']) - - def test_delete_cgsnapshot_snap_not_found(self): - fake_cgsnapshot = copy.deepcopy(eseries_fake.FAKE_CINDER_CG_SNAPSHOT) - fake_vol = cinder_utils.create_volume(self.ctxt) - fake_snapshots = [cinder_utils.create_snapshot( - self.ctxt, fake_vol['id'])] - cg_snap = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE) - cg_snaps = [cg_snap] - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - self.mock_object(self.library, '_get_consistencygroup_by_name', - return_value=cg) - self.mock_object( - self.library._client, 'delete_consistency_group_snapshot', - return_value=cg_snaps) - - self.assertRaises( - exception.CgSnapshotNotFound, - self.library.delete_cgsnapshot, fake_cgsnapshot, fake_snapshots) - - @ddt.data(0, 1, 10, 32) - def test_cleanup_cg_snapshots(self, count): - # Set the soft delete bit for 'count' snapshot images - bitset = na_utils.BitSet() - for i in range(count): - bitset.set(i) - cg = copy.deepcopy(eseries_fake.FAKE_CONSISTENCY_GROUP) - # Define 32 snapshots for the CG - cg['uniqueSequenceNumber'] = list(range(32)) - cg_id = cg['id'] - del_snap = self.mock_object( - self.library._client, 'delete_consistency_group_snapshot') - expected_bitset = copy.deepcopy(bitset) >> count - expected_updt = {cg_id: repr(expected_bitset)} - - updt = self.library._cleanup_cg_snapshots( - cg_id, cg['uniqueSequenceNumber'], bitset) - - self.assertEqual(count, del_snap.call_count) - self.assertEqual(expected_updt, updt) - - @ddt.data(False, True) - def test_get_pool_operation_progress(self, expect_complete): - """Validate the operation progress is interpreted correctly""" - - pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - if expect_complete: - pool_progress = [] - else: - pool_progress = copy.deepcopy( - eseries_fake.FAKE_POOL_ACTION_PROGRESS) - - expected_actions = set(action['currentAction'] for action in - pool_progress) - expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'], - pool_progress, 0) - - self.library._client.get_pool_operation_progress = mock.Mock( - return_value=pool_progress) - - complete, actions, eta = self.library._get_pool_operation_progress( - pool['id']) - self.assertEqual(expect_complete, complete) - self.assertEqual(expected_actions, actions) - self.assertEqual(expected_eta, eta) - - @ddt.data(False, True) - def test_get_pool_operation_progress_with_action(self, expect_complete): - """Validate the operation progress is interpreted correctly""" - - expected_action = 'fakeAction' - pool = copy.deepcopy(eseries_fake.STORAGE_POOL) - if expect_complete: - pool_progress = copy.deepcopy( - eseries_fake.FAKE_POOL_ACTION_PROGRESS) - for progress in pool_progress: - progress['currentAction'] = 'none' - else: - pool_progress = copy.deepcopy( - eseries_fake.FAKE_POOL_ACTION_PROGRESS) - pool_progress[0]['currentAction'] = expected_action - - expected_actions = set(action['currentAction'] for action in - pool_progress) - expected_eta = reduce(lambda x, y: x + y['estimatedTimeToCompletion'], - pool_progress, 0) - - self.library._client.get_pool_operation_progress = mock.Mock( - return_value=pool_progress) - - complete, actions, eta = self.library._get_pool_operation_progress( - pool['id'], expected_action) - self.assertEqual(expect_complete, complete) - self.assertEqual(expected_actions, actions) - self.assertEqual(expected_eta, eta) - - @mock.patch('eventlet.greenthread.sleep') - def test_extend_volume(self, _mock_sleep): - """Test volume extend with a thick-provisioned volume""" - - def get_copy_progress(): - for eta in range(5, -1, -1): - action_status = 'none' if eta == 0 else 'remappingDve' - complete = action_status == 'none' - yield complete, action_status, eta - - fake_volume = copy.deepcopy(get_fake_volume()) - volume = copy.deepcopy(eseries_fake.VOLUME) - new_capacity = 10 - volume['objectType'] = 'volume' - self.library._client.expand_volume = mock.Mock() - self.library._get_pool_operation_progress = mock.Mock( - side_effect=get_copy_progress()) - self.library._get_volume = mock.Mock(return_value=volume) - - self.library.extend_volume(fake_volume, new_capacity) - - # Ensure that the extend method waits until the expansion is completed - self.assertEqual(6, - self.library._get_pool_operation_progress.call_count - ) - self.library._client.expand_volume.assert_called_with(volume['id'], - new_capacity, - False) - - def test_extend_volume_thin(self): - """Test volume extend with a thin-provisioned volume""" - - fake_volume = copy.deepcopy(get_fake_volume()) - volume = copy.deepcopy(eseries_fake.VOLUME) - new_capacity = 10 - volume['objectType'] = 'thinVolume' - self.library._client.expand_volume = mock.Mock(return_value=volume) - self.library._get_volume_operation_progress = mock.Mock() - self.library._get_volume = mock.Mock(return_value=volume) - - self.library.extend_volume(fake_volume, new_capacity) - - self.assertFalse(self.library._get_volume_operation_progress.called) - self.library._client.expand_volume.assert_called_with(volume['id'], - new_capacity, - True) - - def test_delete_non_existing_volume(self): - volume2 = get_fake_volume() - # Change to a nonexistent id. - volume2['name_id'] = '88888888-4444-4444-4444-cccccccccccc' - self.assertIsNone(self.library.delete_volume(volume2)) - - def test_map_volume_to_host_volume_not_mapped(self): - """Map the volume directly to destination host.""" - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - - self.library.map_volume_to_host(get_fake_volume(), - eseries_fake.VOLUME, - eseries_fake.INITIATOR_NAME_2) - - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_map_volume_to_host_volume_not_mapped_host_does_not_exist(self): - """Should create the host map directly to the host.""" - self.mock_object(self.library._client, 'list_hosts', - return_value=[]) - self.mock_object(self.library._client, 'create_host_with_ports', - return_value=eseries_fake.HOST_2) - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - - self.library.map_volume_to_host(get_fake_volume(), - eseries_fake.VOLUME, - eseries_fake.INITIATOR_NAME_2) - - self.assertTrue(self.library._client.create_host_with_ports.called) - self.assertTrue( - self.library._client.get_volume_mappings_for_volume.called) - self.assertTrue(host_mapper.map_volume_to_single_host.called) - - def test_map_volume_to_host_volume_already_mapped(self): - """Should be a no-op.""" - self.mock_object(host_mapper, 'map_volume_to_multiple_hosts', - return_value=eseries_fake.VOLUME_MAPPING) - - self.library.map_volume_to_host(get_fake_volume(), - eseries_fake.VOLUME, - eseries_fake.INITIATOR_NAME) - - self.assertTrue(host_mapper.map_volume_to_multiple_hosts.called) - - -class NetAppEseriesISCSICHAPAuthenticationTestCase(test.TestCase): - """Test behavior when the use_chap_auth configuration option is True.""" - - def setUp(self): - super(NetAppEseriesISCSICHAPAuthenticationTestCase, self).setUp() - config = eseries_fake.create_configuration_eseries() - config.use_chap_auth = True - config.chap_password = None - config.chap_username = None - - kwargs = {'configuration': config} - - self.library = library.NetAppESeriesLibrary("FAKE", **kwargs) - self.library._client = eseries_fake.FakeEseriesClient() - self.library._client.features = mock.Mock() - self.library._client.features = na_utils.Features() - self.library._client.features.add_feature('CHAP_AUTHENTICATION', - supported=True, - min_version="1.53.9010.15") - self.mock_object(self.library, - '_check_storage_system') - self.library.check_for_setup_error() - - def test_initialize_connection_with_chap(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, 'get_volume_mappings', - return_value=[]) - self.mock_object(self.library._client, 'list_hosts', - return_value=[]) - self.mock_object(self.library._client, 'create_host_with_ports', - return_value=[eseries_fake.HOST]) - self.mock_object(host_mapper, 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - mock_configure_chap = ( - self.mock_object(self.library, - '_configure_chap', - return_value=(eseries_fake.FAKE_CHAP_USERNAME, - eseries_fake.FAKE_CHAP_SECRET))) - - properties = self.library.initialize_connection_iscsi( - get_fake_volume(), connector) - - mock_configure_chap.assert_called_with(eseries_fake.FAKE_TARGET_IQN) - self.assertDictEqual(eseries_fake.FAKE_TARGET_DICT, properties) - - def test_configure_chap_with_no_chap_secret_specified(self): - mock_invoke_generate_random_secret = self.mock_object( - volume_utils, - 'generate_password', - return_value=eseries_fake.FAKE_CHAP_SECRET) - mock_invoke_set_chap_authentication = self.mock_object( - self.library._client, - 'set_chap_authentication', - return_value=eseries_fake.FAKE_CHAP_POST_DATA) - - username, password = self.library._configure_chap( - eseries_fake.FAKE_TARGET_IQN) - - self.assertTrue(mock_invoke_generate_random_secret.called) - mock_invoke_set_chap_authentication.assert_called_with( - *eseries_fake.FAKE_CLIENT_CHAP_PARAMETERS) - self.assertEqual(eseries_fake.FAKE_CHAP_USERNAME, username) - self.assertEqual(eseries_fake.FAKE_CHAP_SECRET, password) - - def test_configure_chap_with_no_chap_username_specified(self): - mock_invoke_generate_random_secret = self.mock_object( - volume_utils, - 'generate_password', - return_value=eseries_fake.FAKE_CHAP_SECRET) - mock_invoke_set_chap_authentication = self.mock_object( - self.library._client, - 'set_chap_authentication', - return_value=eseries_fake.FAKE_CHAP_POST_DATA) - mock_log = self.mock_object(library, 'LOG') - warn_msg = 'No CHAP username found for CHAP user' - - username, password = self.library._configure_chap( - eseries_fake.FAKE_TARGET_IQN) - - self.assertTrue(mock_invoke_generate_random_secret.called) - self.assertTrue(mock_log.warning.find(warn_msg)) - mock_invoke_set_chap_authentication.assert_called_with( - *eseries_fake.FAKE_CLIENT_CHAP_PARAMETERS) - self.assertEqual(eseries_fake.FAKE_CHAP_USERNAME, username) - self.assertEqual(eseries_fake.FAKE_CHAP_SECRET, password) - - def test_configure_chap_with_invalid_version(self): - connector = {'initiator': eseries_fake.INITIATOR_NAME} - self.mock_object(self.library._client, - 'get_volume_mappings_for_volume', - return_value=[]) - self.mock_object(host_mapper, - 'map_volume_to_single_host', - return_value=eseries_fake.VOLUME_MAPPING) - self.library._client.features.CHAP_AUTHENTICATION.supported = False - self.library._client.api_version = "1.52.9010.01" - - self.assertRaises(exception.NetAppDriverException, - self.library.initialize_connection_iscsi, - get_fake_volume(), - connector) diff --git a/cinder/tests/unit/volume/drivers/netapp/eseries/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/eseries/test_utils.py deleted file mode 100644 index 8afe41e7d..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/eseries/test_utils.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp E-series driver utility module -""" - -import six - -from cinder import test -from cinder.volume.drivers.netapp.eseries import utils - - -class NetAppEseriesDriverUtilsTestCase(test.TestCase): - - def test_convert_uuid_to_es_fmt(self): - value = 'e67e931a-b2ed-4890-938b-3acc6a517fac' - result = utils.convert_uuid_to_es_fmt(value) - self.assertEqual('4Z7JGGVS5VEJBE4LHLGGUUL7VQ', result) - - def test_convert_es_fmt_to_uuid(self): - value = '4Z7JGGVS5VEJBE4LHLGGUUL7VQ' - result = six.text_type(utils.convert_es_fmt_to_uuid(value)) - self.assertEqual('e67e931a-b2ed-4890-938b-3acc6a517fac', result) diff --git a/cinder/tests/unit/volume/drivers/netapp/fakes.py b/cinder/tests/unit/volume/drivers/netapp/fakes.py deleted file mode 100644 index 6a6830598..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/fakes.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) - 2014, Clinton Knight All rights reserved. -# Copyright (c) - 2015, Alex Meade. All Rights Reserved. -# Copyright (c) - 2015, Rushil Chugh. All Rights Reserved. -# Copyright (c) - 2015, Tom Barron. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.volume import configuration as conf -import cinder.volume.drivers.netapp.options as na_opts - - -ISCSI_FAKE_LUN_ID = 1 - -ISCSI_FAKE_IQN = 'iqn.1993-08.org.debian:01:10' - -ISCSI_FAKE_ADDRESS = '10.63.165.216' - -ISCSI_FAKE_PORT = '2232' - -ISCSI_FAKE_VOLUME = {'id': 'fake_id'} - -ISCSI_FAKE_TARGET = {} -ISCSI_FAKE_TARGET['address'] = ISCSI_FAKE_ADDRESS -ISCSI_FAKE_TARGET['port'] = ISCSI_FAKE_PORT - -ISCSI_FAKE_VOLUME = {'id': 'fake_id', 'provider_auth': 'None stack password'} - -FC_ISCSI_TARGET_INFO_DICT = {'target_discovered': False, - 'target_portal': '10.63.165.216:2232', - 'target_iqn': ISCSI_FAKE_IQN, - 'target_lun': ISCSI_FAKE_LUN_ID, - 'volume_id': ISCSI_FAKE_VOLUME['id'], - 'auth_method': 'None', 'auth_username': 'stack', - 'auth_password': 'password'} - -VOLUME_NAME = 'fake_volume_name' -VOLUME_ID = 'fake_volume_id' -VOLUME_TYPE_ID = 'fake_volume_type_id' - -VOLUME = { - 'name': VOLUME_NAME, - 'size': 42, - 'id': VOLUME_ID, - 'host': 'fake_host@fake_backend#fake_pool', - 'volume_type_id': VOLUME_TYPE_ID, -} - -SNAPSHOT_NAME = 'fake_snapshot_name' -SNAPSHOT_ID = 'fake_snapshot_id' - -SNAPSHOT = { - 'name': SNAPSHOT_NAME, - 'id': SNAPSHOT_ID, - 'volume_id': VOLUME_ID, - 'volume_name': VOLUME_NAME, - 'volume_size': 42, -} - -QOS_SPECS = {} - -EXTRA_SPECS = {} - -MAX_THROUGHPUT = '21734278B/s' -QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' -LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME} - -LEGACY_QOS = { - 'policy_name': QOS_POLICY_GROUP_NAME, -} - -QOS_POLICY_GROUP_SPEC = { - 'max_throughput': MAX_THROUGHPUT, - 'policy_name': 'openstack-%s' % VOLUME_ID, -} - -QOS_POLICY_GROUP_INFO_NONE = {'legacy': None, 'spec': None} - -QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} - -LEGACY_QOS_POLICY_GROUP_INFO = { - 'legacy': LEGACY_QOS, - 'spec': None, -} - -INVALID_QOS_POLICY_GROUP_INFO = { - 'legacy': LEGACY_QOS, - 'spec': QOS_POLICY_GROUP_SPEC, -} - -QOS_SPECS_ID = 'fake_qos_specs_id' -QOS_SPEC = {'maxBPS': 21734278} -OUTER_BACKEND_QOS_SPEC = { - 'id': QOS_SPECS_ID, - 'specs': QOS_SPEC, - 'consumer': 'back-end', -} -OUTER_FRONTEND_QOS_SPEC = { - 'id': QOS_SPECS_ID, - 'specs': QOS_SPEC, - 'consumer': 'front-end', -} -OUTER_BOTH_QOS_SPEC = { - 'id': QOS_SPECS_ID, - 'specs': QOS_SPEC, - 'consumer': 'both', -} -VOLUME_TYPE = {'id': VOLUME_TYPE_ID, 'qos_specs_id': QOS_SPECS_ID} - - -def create_configuration(): - config = conf.Configuration(None) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_transport_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_provisioning_opts) - return config - - -def create_configuration_7mode(): - config = create_configuration() - config.append_config_values(na_opts.netapp_7mode_opts) - return config - - -def create_configuration_cmode(): - config = create_configuration() - config.append_config_values(na_opts.netapp_cluster_opts) - return config diff --git a/cinder/tests/unit/volume/drivers/netapp/test_common.py b/cinder/tests/unit/volume/drivers/netapp/test_common.py deleted file mode 100644 index 1a3ce7feb..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/test_common.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import exception -from cinder import test -import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes -import cinder.volume.drivers.netapp.common as na_common -import cinder.volume.drivers.netapp.dataontap.fc_cmode as fc_cmode -import cinder.volume.drivers.netapp.utils as na_utils - - -class NetAppDriverFactoryTestCase(test.TestCase): - - def setUp(self): - super(NetAppDriverFactoryTestCase, self).setUp() - self.mock_object(na_common, 'LOG') - - def test_new(self): - - self.mock_object(na_utils.OpenStackInfo, 'info', - return_value='fake_info') - mock_create_driver = self.mock_object(na_common.NetAppDriver, - 'create_driver') - - config = na_fakes.create_configuration() - config.netapp_storage_family = 'fake_family' - config.netapp_storage_protocol = 'fake_protocol' - - kwargs = {'configuration': config} - na_common.NetAppDriver(**kwargs) - - kwargs['app_version'] = 'fake_info' - mock_create_driver.assert_called_with('fake_family', 'fake_protocol', - *(), **kwargs) - - def test_new_missing_config(self): - - self.mock_object(na_utils.OpenStackInfo, 'info') - self.mock_object(na_common.NetAppDriver, 'create_driver') - - self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{}) - - def test_new_missing_family(self): - - self.mock_object(na_utils.OpenStackInfo, 'info') - self.mock_object(na_common.NetAppDriver, 'create_driver') - - config = na_fakes.create_configuration() - config.netapp_storage_protocol = 'fake_protocol' - config.netapp_storage_family = None - - kwargs = {'configuration': config} - self.assertRaises(exception.InvalidInput, - na_common.NetAppDriver, - **kwargs) - - def test_new_missing_protocol(self): - - self.mock_object(na_utils.OpenStackInfo, 'info') - self.mock_object(na_common.NetAppDriver, 'create_driver') - - config = na_fakes.create_configuration() - config.netapp_storage_family = 'fake_family' - - kwargs = {'configuration': config} - self.assertRaises(exception.InvalidInput, - na_common.NetAppDriver, - **kwargs) - - def test_create_driver(self): - - def get_full_class_name(obj): - return obj.__module__ + '.' + obj.__class__.__name__ - - kwargs = { - 'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info', - 'host': 'fakehost@fakebackend', - } - - registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY - - for family in registry: - for protocol, full_class_name in registry[family].items(): - driver = na_common.NetAppDriver.create_driver( - family, protocol, **kwargs) - self.assertEqual(full_class_name, get_full_class_name(driver)) - - def test_create_driver_case_insensitive(self): - - kwargs = { - 'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info', - 'host': 'fakehost@fakebackend', - } - - driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC', - **kwargs) - - self.assertIsInstance(driver, fc_cmode.NetAppCmodeFibreChannelDriver) - - def test_create_driver_invalid_family(self): - - kwargs = { - 'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info', - 'host': 'fakehost@fakebackend', - } - - self.assertRaises(exception.InvalidInput, - na_common.NetAppDriver.create_driver, - 'kardashian', 'iscsi', **kwargs) - - def test_create_driver_invalid_protocol(self): - - kwargs = { - 'configuration': na_fakes.create_configuration(), - 'app_version': 'fake_info', - 'host': 'fakehost@fakebackend', - } - - self.assertRaises(exception.InvalidInput, - na_common.NetAppDriver.create_driver, - 'ontap_7mode', 'carrier_pigeon', **kwargs) diff --git a/cinder/tests/unit/volume/drivers/netapp/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/test_utils.py deleted file mode 100644 index 634fa2198..000000000 --- a/cinder/tests/unit/volume/drivers/netapp/test_utils.py +++ /dev/null @@ -1,897 +0,0 @@ -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Michael Price. All rights reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Mock unit tests for the NetApp driver utility module -""" - -import copy -import ddt -import platform - -import mock -from oslo_concurrency import processutils as putils - -from cinder import context -from cinder import exception -from cinder import test -import cinder.tests.unit.volume.drivers.netapp.fakes as fake -from cinder import version -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -class NetAppDriverUtilsTestCase(test.TestCase): - - @mock.patch.object(na_utils, 'LOG', mock.Mock()) - def test_validate_instantiation_proxy(self): - kwargs = {'netapp_mode': 'proxy'} - na_utils.validate_instantiation(**kwargs) - self.assertEqual(0, na_utils.LOG.warning.call_count) - - @mock.patch.object(na_utils, 'LOG', mock.Mock()) - def test_validate_instantiation_no_proxy(self): - kwargs = {'netapp_mode': 'asdf'} - na_utils.validate_instantiation(**kwargs) - self.assertEqual(1, na_utils.LOG.warning.call_count) - - def test_check_flags(self): - - class TestClass(object): - pass - - required_flags = ['flag1', 'flag2'] - configuration = TestClass() - setattr(configuration, 'flag1', 'value1') - setattr(configuration, 'flag3', 'value3') - self.assertRaises(exception.InvalidInput, na_utils.check_flags, - required_flags, configuration) - - setattr(configuration, 'flag2', 'value2') - self.assertIsNone(na_utils.check_flags(required_flags, configuration)) - - def test_to_bool(self): - self.assertTrue(na_utils.to_bool(True)) - self.assertTrue(na_utils.to_bool('true')) - self.assertTrue(na_utils.to_bool('yes')) - self.assertTrue(na_utils.to_bool('y')) - self.assertTrue(na_utils.to_bool(1)) - self.assertTrue(na_utils.to_bool('1')) - self.assertFalse(na_utils.to_bool(False)) - self.assertFalse(na_utils.to_bool('false')) - self.assertFalse(na_utils.to_bool('asdf')) - self.assertFalse(na_utils.to_bool('no')) - self.assertFalse(na_utils.to_bool('n')) - self.assertFalse(na_utils.to_bool(0)) - self.assertFalse(na_utils.to_bool('0')) - self.assertFalse(na_utils.to_bool(2)) - self.assertFalse(na_utils.to_bool('2')) - - def test_set_safe_attr(self): - - fake_object = mock.Mock() - fake_object.fake_attr = None - - # test initial checks - self.assertFalse(na_utils.set_safe_attr(None, fake_object, None)) - self.assertFalse(na_utils.set_safe_attr(fake_object, None, None)) - self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', - None)) - - # test value isn't changed if it shouldn't be and retval is False - fake_object.fake_attr = 'fake_value' - self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', - 'fake_value')) - self.assertEqual('fake_value', fake_object.fake_attr) - - # test value is changed if it should be and retval is True - self.assertTrue(na_utils.set_safe_attr(fake_object, 'fake_attr', - 'new_fake_value')) - self.assertEqual('new_fake_value', fake_object.fake_attr) - - def test_round_down(self): - self.assertAlmostEqual(na_utils.round_down(5.567), 5.56) - self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56) - self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5) - self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5) - self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0) - self.assertAlmostEqual(na_utils.round_down(-5.567), -5.56) - self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56) - self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5) - self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5) - - def test_iscsi_connection_properties(self): - - actual_properties = na_utils.get_iscsi_connection_properties( - fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, - fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, - fake.ISCSI_FAKE_PORT) - - actual_properties_mapped = actual_properties['data'] - - self.assertDictEqual(actual_properties_mapped, - fake.FC_ISCSI_TARGET_INFO_DICT) - - def test_iscsi_connection_lun_id_type_str(self): - FAKE_LUN_ID = '1' - - actual_properties = na_utils.get_iscsi_connection_properties( - FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, - fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) - - actual_properties_mapped = actual_properties['data'] - - self.assertIs(int, type(actual_properties_mapped['target_lun'])) - - def test_iscsi_connection_lun_id_type_dict(self): - FAKE_LUN_ID = {'id': 'fake_id'} - - self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties, - FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, - fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, - fake.ISCSI_FAKE_PORT) - - def test_get_volume_extra_specs(self): - fake_extra_specs = {'fake_key': 'fake_value'} - fake_volume_type = {'extra_specs': fake_extra_specs} - fake_volume = {'volume_type_id': 'fake_volume_type_id'} - self.mock_object(context, 'get_admin_context') - self.mock_object(volume_types, 'get_volume_type', - return_value=fake_volume_type) - self.mock_object(na_utils, 'log_extra_spec_warnings') - - result = na_utils.get_volume_extra_specs(fake_volume) - - self.assertEqual(fake_extra_specs, result) - - def test_get_volume_extra_specs_no_type_id(self): - fake_volume = {} - self.mock_object(context, 'get_admin_context') - self.mock_object(volume_types, 'get_volume_type') - self.mock_object(na_utils, 'log_extra_spec_warnings') - - result = na_utils.get_volume_extra_specs(fake_volume) - - self.assertEqual({}, result) - - def test_get_volume_extra_specs_no_volume_type(self): - fake_volume = {'volume_type_id': 'fake_volume_type_id'} - self.mock_object(context, 'get_admin_context') - self.mock_object(volume_types, 'get_volume_type', return_value=None) - self.mock_object(na_utils, 'log_extra_spec_warnings') - - result = na_utils.get_volume_extra_specs(fake_volume) - - self.assertEqual({}, result) - - def test_log_extra_spec_warnings_obsolete_specs(self): - - mock_log = self.mock_object(na_utils.LOG, 'warning') - - na_utils.log_extra_spec_warnings({'netapp:raid_type': 'raid4'}) - - self.assertEqual(1, mock_log.call_count) - - def test_log_extra_spec_warnings_deprecated_specs(self): - - mock_log = self.mock_object(na_utils.LOG, 'warning') - - na_utils.log_extra_spec_warnings({'netapp_thick_provisioned': 'true'}) - - self.assertEqual(1, mock_log.call_count) - - def test_validate_qos_spec_none(self): - qos_spec = None - - # Just return without raising an exception. - na_utils.validate_qos_spec(qos_spec) - - def test_validate_qos_spec_keys_weirdly_cased(self): - qos_spec = {'mAxIopS': 33000} - - # Just return without raising an exception. - na_utils.validate_qos_spec(qos_spec) - - def test_validate_qos_spec_bad_key(self): - qos_spec = {'maxFlops': 33000} - - self.assertRaises(exception.Invalid, - na_utils.validate_qos_spec, - qos_spec) - - def test_validate_qos_spec_bad_key_combination(self): - qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000} - - self.assertRaises(exception.Invalid, - na_utils.validate_qos_spec, - qos_spec) - - def test_map_qos_spec_none(self): - qos_spec = None - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertIsNone(result) - - def test_map_qos_spec_maxiops(self): - qos_spec = {'maxIOPs': 33000} - mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') - mock_get_name.return_value = 'fake_qos_policy' - expected = { - 'policy_name': 'fake_qos_policy', - 'max_throughput': '33000iops', - } - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertEqual(expected, result) - - def test_map_qos_spec_maxiopspergib(self): - qos_spec = {'maxIOPSperGiB': 1000} - mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') - mock_get_name.return_value = 'fake_qos_policy' - expected = { - 'policy_name': 'fake_qos_policy', - 'max_throughput': '42000iops', - } - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertEqual(expected, result) - - def test_map_qos_spec_maxbps(self): - qos_spec = {'maxBPS': 1000000} - mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') - mock_get_name.return_value = 'fake_qos_policy' - expected = { - 'policy_name': 'fake_qos_policy', - 'max_throughput': '1000000B/s', - } - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertEqual(expected, result) - - def test_map_qos_spec_maxbpspergib(self): - qos_spec = {'maxBPSperGiB': 100000} - mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') - mock_get_name.return_value = 'fake_qos_policy' - expected = { - 'policy_name': 'fake_qos_policy', - 'max_throughput': '4200000B/s', - } - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertEqual(expected, result) - - def test_map_qos_spec_no_key_present(self): - qos_spec = {} - mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') - mock_get_name.return_value = 'fake_qos_policy' - expected = { - 'policy_name': 'fake_qos_policy', - 'max_throughput': None, - } - - result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) - - self.assertEqual(expected, result) - - def test_map_dict_to_lower(self): - original = {'UPperKey': 'Value'} - expected = {'upperkey': 'Value'} - - result = na_utils.map_dict_to_lower(original) - - self.assertEqual(expected, result) - - def test_get_qos_policy_group_name(self): - expected = 'openstack-%s' % fake.VOLUME_ID - - result = na_utils.get_qos_policy_group_name(fake.VOLUME) - - self.assertEqual(expected, result) - - def test_get_qos_policy_group_name_no_id(self): - volume = copy.deepcopy(fake.VOLUME) - del(volume['id']) - - result = na_utils.get_qos_policy_group_name(volume) - - self.assertIsNone(result) - - def test_get_qos_policy_group_name_from_info(self): - expected = 'openstack-%s' % fake.VOLUME_ID - result = na_utils.get_qos_policy_group_name_from_info( - fake.QOS_POLICY_GROUP_INFO) - - self.assertEqual(expected, result) - - def test_get_qos_policy_group_name_from_info_no_info(self): - - result = na_utils.get_qos_policy_group_name_from_info(None) - - self.assertIsNone(result) - - def test_get_qos_policy_group_name_from_legacy_info(self): - expected = fake.QOS_POLICY_GROUP_NAME - - result = na_utils.get_qos_policy_group_name_from_info( - fake.LEGACY_QOS_POLICY_GROUP_INFO) - - self.assertEqual(expected, result) - - def test_get_qos_policy_group_name_from_spec_info(self): - expected = 'openstack-%s' % fake.VOLUME_ID - - result = na_utils.get_qos_policy_group_name_from_info( - fake.QOS_POLICY_GROUP_INFO) - - self.assertEqual(expected, result) - - def test_get_qos_policy_group_name_from_none_qos_info(self): - expected = None - - result = na_utils.get_qos_policy_group_name_from_info( - fake.QOS_POLICY_GROUP_INFO_NONE) - - self.assertEqual(expected, result) - - def test_get_valid_qos_policy_group_info_exception_path(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.side_effect = exception.VolumeTypeNotFound - expected = fake.QOS_POLICY_GROUP_INFO_NONE - - result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) - - self.assertEqual(expected, result) - - def test_get_valid_qos_policy_group_info_volume_type_none(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = None - expected = fake.QOS_POLICY_GROUP_INFO_NONE - - result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) - - self.assertEqual(expected, result) - - def test_get_valid_qos_policy_group_info_no_info(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = fake.VOLUME_TYPE - mock_get_legacy_qos_policy = self.mock_object(na_utils, - 'get_legacy_qos_policy') - mock_get_legacy_qos_policy.return_value = None - mock_get_valid_qos_spec_from_volume_type = self.mock_object( - na_utils, 'get_valid_backend_qos_spec_from_volume_type') - mock_get_valid_qos_spec_from_volume_type.return_value = None - self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') - expected = fake.QOS_POLICY_GROUP_INFO_NONE - - result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) - - self.assertEqual(expected, result) - - def test_get_valid_legacy_qos_policy_group_info(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = fake.VOLUME_TYPE - mock_get_legacy_qos_policy = self.mock_object(na_utils, - 'get_legacy_qos_policy') - - mock_get_legacy_qos_policy.return_value = fake.LEGACY_QOS - mock_get_valid_qos_spec_from_volume_type = self.mock_object( - na_utils, 'get_valid_backend_qos_spec_from_volume_type') - mock_get_valid_qos_spec_from_volume_type.return_value = None - self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') - - result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) - - self.assertEqual(fake.LEGACY_QOS_POLICY_GROUP_INFO, result) - - def test_get_valid_spec_qos_policy_group_info(self): - mock_get_volume_type = self.mock_object(na_utils, - 'get_volume_type_from_volume') - mock_get_volume_type.return_value = fake.VOLUME_TYPE - mock_get_legacy_qos_policy = self.mock_object(na_utils, - 'get_legacy_qos_policy') - mock_get_legacy_qos_policy.return_value = None - mock_get_valid_qos_spec_from_volume_type = self.mock_object( - na_utils, 'get_valid_backend_qos_spec_from_volume_type') - mock_get_valid_qos_spec_from_volume_type.return_value =\ - fake.QOS_POLICY_GROUP_SPEC - self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') - - result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) - - self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) - - def test_get_valid_backend_qos_spec_from_volume_type_no_spec(self): - mock_get_spec = self.mock_object( - na_utils, 'get_backend_qos_spec_from_volume_type') - mock_get_spec.return_value = None - mock_validate = self.mock_object(na_utils, 'validate_qos_spec') - - result = na_utils.get_valid_backend_qos_spec_from_volume_type( - fake.VOLUME, fake.VOLUME_TYPE) - - self.assertIsNone(result) - self.assertEqual(0, mock_validate.call_count) - - def test_get_valid_backend_qos_spec_from_volume_type(self): - mock_get_spec = self.mock_object( - na_utils, 'get_backend_qos_spec_from_volume_type') - mock_get_spec.return_value = fake.QOS_SPEC - mock_validate = self.mock_object(na_utils, 'validate_qos_spec') - - result = na_utils.get_valid_backend_qos_spec_from_volume_type( - fake.VOLUME, fake.VOLUME_TYPE) - - self.assertEqual(fake.QOS_POLICY_GROUP_SPEC, result) - self.assertEqual(1, mock_validate.call_count) - - def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): - volume_type = copy.deepcopy(fake.VOLUME_TYPE) - del(volume_type['qos_specs_id']) - mock_get_context = self.mock_object(context, 'get_admin_context') - - result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - - self.assertIsNone(result) - self.assertEqual(0, mock_get_context.call_count) - - def test_get_backend_qos_spec_from_volume_type_no_qos_spec(self): - volume_type = fake.VOLUME_TYPE - self.mock_object(context, 'get_admin_context') - mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_specs.return_value = None - - result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - - self.assertIsNone(result) - - def test_get_backend_qos_spec_from_volume_type_with_frontend_spec(self): - volume_type = fake.VOLUME_TYPE - self.mock_object(context, 'get_admin_context') - mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_specs.return_value = fake.OUTER_FRONTEND_QOS_SPEC - - result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - - self.assertIsNone(result) - - def test_get_backend_qos_spec_from_volume_type_with_backend_spec(self): - volume_type = fake.VOLUME_TYPE - self.mock_object(context, 'get_admin_context') - mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_specs.return_value = fake.OUTER_BACKEND_QOS_SPEC - - result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - - self.assertEqual(fake.QOS_SPEC, result) - - def test_get_backend_qos_spec_from_volume_type_with_both_spec(self): - volume_type = fake.VOLUME_TYPE - self.mock_object(context, 'get_admin_context') - mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_specs.return_value = fake.OUTER_BOTH_QOS_SPEC - - result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - - self.assertEqual(fake.QOS_SPEC, result) - - def test_check_for_invalid_qos_spec_combination(self): - - self.assertRaises(exception.Invalid, - na_utils.check_for_invalid_qos_spec_combination, - fake.INVALID_QOS_POLICY_GROUP_INFO, - fake.VOLUME_TYPE) - - def test_get_legacy_qos_policy(self): - extra_specs = fake.LEGACY_EXTRA_SPECS - expected = {'policy_name': fake.QOS_POLICY_GROUP_NAME} - - result = na_utils.get_legacy_qos_policy(extra_specs) - - self.assertEqual(expected, result) - - def test_get_legacy_qos_policy_no_policy_name(self): - extra_specs = fake.EXTRA_SPECS - - result = na_utils.get_legacy_qos_policy(extra_specs) - - self.assertIsNone(result) - - -class OpenStackInfoTestCase(test.TestCase): - - UNKNOWN_VERSION = 'unknown version' - UNKNOWN_RELEASE = 'unknown release' - UNKNOWN_VENDOR = 'unknown vendor' - UNKNOWN_PLATFORM = 'unknown platform' - VERSION_STRING_RET_VAL = 'fake_version_1' - RELEASE_STRING_RET_VAL = 'fake_release_1' - PLATFORM_RET_VAL = 'fake_platform_1' - VERSION_INFO_VERSION = 'fake_version_2' - VERSION_INFO_RELEASE = 'fake_release_2' - RPM_INFO_VERSION = 'fake_version_3' - RPM_INFO_RELEASE = 'fake_release_3' - RPM_INFO_VENDOR = 'fake vendor 3' - PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') - NO_PKG_FOUND = ('', 'whatever') - PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') - DEB_RLS = 'upstream_version-debian_revision' - DEB_VENDOR = 'debian_revision' - - def test_openstack_info_init(self): - info = na_utils.OpenStackInfo() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(version.version_info, 'version_string', - mock.Mock(return_value=VERSION_STRING_RET_VAL)) - def test_update_version_from_version_string(self): - info = na_utils.OpenStackInfo() - info._update_version_from_version_string() - - self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(version.version_info, 'version_string', - mock.Mock(side_effect=Exception)) - def test_xcption_in_update_version_from_version_string(self): - info = na_utils.OpenStackInfo() - info._update_version_from_version_string() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(version.version_info, 'release_string', - mock.Mock(return_value=RELEASE_STRING_RET_VAL)) - def test_update_release_from_release_string(self): - info = na_utils.OpenStackInfo() - info._update_release_from_release_string() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(version.version_info, 'release_string', - mock.Mock(side_effect=Exception)) - def test_xcption_in_update_release_from_release_string(self): - info = na_utils.OpenStackInfo() - info._update_release_from_release_string() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(platform, 'platform', - mock.Mock(return_value=PLATFORM_RET_VAL)) - def test_update_platform(self): - info = na_utils.OpenStackInfo() - info._update_platform() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.PLATFORM_RET_VAL, info._platform) - - @mock.patch.object(platform, 'platform', - mock.Mock(side_effect=Exception)) - def test_xcption_in_update_platform(self): - info = na_utils.OpenStackInfo() - info._update_platform() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', - mock.Mock(return_value=VERSION_INFO_VERSION)) - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', - mock.Mock(return_value=VERSION_INFO_RELEASE)) - def test_update_info_from_version_info(self): - info = na_utils.OpenStackInfo() - info._update_info_from_version_info() - - self.assertEqual(self.VERSION_INFO_VERSION, info._version) - self.assertEqual(self.VERSION_INFO_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', - mock.Mock(return_value='')) - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', - mock.Mock(return_value=None)) - def test_no_info_from_version_info(self): - info = na_utils.OpenStackInfo() - info._update_info_from_version_info() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', - mock.Mock(return_value=VERSION_INFO_VERSION)) - @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', - mock.Mock(side_effect=Exception)) - def test_xcption_in_info_from_version_info(self): - info = na_utils.OpenStackInfo() - info._update_info_from_version_info() - - self.assertEqual(self.VERSION_INFO_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - - @mock.patch.object(putils, 'execute', - mock.Mock(return_value=PUTILS_RPM_RET_VAL)) - def test_update_info_from_rpm(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_rpm() - - self.assertEqual(self.RPM_INFO_VERSION, info._version) - self.assertEqual(self.RPM_INFO_RELEASE, info._release) - self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertTrue(found_package) - - @mock.patch.object(putils, 'execute', - mock.Mock(return_value=NO_PKG_FOUND)) - def test_update_info_from_rpm_no_pkg_found(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_rpm() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertFalse(found_package) - - @mock.patch.object(putils, 'execute', - mock.Mock(side_effect=Exception)) - def test_xcption_in_update_info_from_rpm(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_rpm() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertFalse(found_package) - - @mock.patch.object(putils, 'execute', - mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) - def test_update_info_from_dpkg(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_dpkg() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.DEB_RLS, info._release) - self.assertEqual(self.DEB_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertTrue(found_package) - - @mock.patch.object(putils, 'execute', - mock.Mock(return_value=NO_PKG_FOUND)) - def test_update_info_from_dpkg_no_pkg_found(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_dpkg() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertFalse(found_package) - - @mock.patch.object(putils, 'execute', - mock.Mock(side_effect=Exception)) - def test_xcption_in_update_info_from_dpkg(self): - info = na_utils.OpenStackInfo() - found_package = info._update_info_from_dpkg() - - self.assertEqual(self.UNKNOWN_VERSION, info._version) - self.assertEqual(self.UNKNOWN_RELEASE, info._release) - self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) - self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) - self.assertFalse(found_package) - - @mock.patch.object(na_utils.OpenStackInfo, - '_update_version_from_version_string', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_release_from_release_string', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_platform', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_version_info', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_rpm', mock.Mock(return_value=True)) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_dpkg') - def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): - info = na_utils.OpenStackInfo() - info._update_openstack_info() - - self.assertFalse(mock_updt_from_dpkg.called) - - @mock.patch.object(na_utils.OpenStackInfo, - '_update_version_from_version_string', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_release_from_release_string', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_platform', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_version_info', mock.Mock()) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_rpm', mock.Mock(return_value=False)) - @mock.patch.object(na_utils.OpenStackInfo, - '_update_info_from_dpkg') - def test_update_openstack_info_rpm_pkg_not_found(self, - mock_updt_from_dpkg): - info = na_utils.OpenStackInfo() - info._update_openstack_info() - - self.assertTrue(mock_updt_from_dpkg.called) - - -@ddt.ddt -class FeaturesTestCase(test.TestCase): - - def setUp(self): - super(FeaturesTestCase, self).setUp() - self.features = na_utils.Features() - - def test_init(self): - self.assertSetEqual(set(), self.features.defined_features) - - def test_add_feature_default(self): - self.features.add_feature('FEATURE_1') - - self.assertTrue(self.features.FEATURE_1.supported) - self.assertIn('FEATURE_1', self.features.defined_features) - - @ddt.data(True, False) - def test_add_feature(self, value): - self.features.add_feature('FEATURE_2', value) - - self.assertEqual(value, bool(self.features.FEATURE_2)) - self.assertEqual(value, self.features.FEATURE_2.supported) - self.assertIsNone(self.features.FEATURE_2.minimum_version) - self.assertIn('FEATURE_2', self.features.defined_features) - - @ddt.data((True, '1'), (False, 2), (False, None), (True, None)) - @ddt.unpack - def test_add_feature_min_version(self, enabled, min_version): - self.features.add_feature('FEATURE_2', enabled, - min_version=min_version) - - self.assertEqual(enabled, bool(self.features.FEATURE_2)) - self.assertEqual(enabled, self.features.FEATURE_2.supported) - self.assertEqual(min_version, self.features.FEATURE_2.minimum_version) - self.assertIn('FEATURE_2', self.features.defined_features) - - @ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,)) - def test_add_feature_type_error(self, value): - self.assertRaises(TypeError, - self.features.add_feature, - 'FEATURE_3', - value) - self.assertNotIn('FEATURE_3', self.features.defined_features) - - def test_get_attr_missing(self): - self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4') - - -@ddt.ddt -class BitSetTestCase(test.TestCase): - - def test_default(self): - self.assertEqual(na_utils.BitSet(0), na_utils.BitSet()) - - def test_set(self): - bitset = na_utils.BitSet(0) - bitset.set(16) - - self.assertEqual(na_utils.BitSet(1 << 16), bitset) - - def test_unset(self): - bitset = na_utils.BitSet(1 << 16) - bitset.unset(16) - - self.assertEqual(na_utils.BitSet(0), bitset) - - def test_is_set(self): - bitset = na_utils.BitSet(1 << 16) - - self.assertTrue(bitset.is_set(16)) - - def test_not_equal(self): - set1 = na_utils.BitSet(1 << 15) - set2 = na_utils.BitSet(1 << 16) - - self.assertNotEqual(set1, set2) - - def test_repr(self): - raw_val = 1 << 16 - actual = repr(na_utils.BitSet(raw_val)) - expected = str(raw_val) - - self.assertEqual(actual, expected) - - def test_str(self): - raw_val = 1 << 16 - actual = str(na_utils.BitSet(raw_val)) - expected = bin(raw_val) - - self.assertEqual(actual, expected) - - def test_int(self): - val = 1 << 16 - actual = int(int(na_utils.BitSet(val))) - - self.assertEqual(val, actual) - - def test_and(self): - actual = na_utils.BitSet(1 << 16 | 1 << 15) - actual &= 1 << 16 - - self.assertEqual(na_utils.BitSet(1 << 16), actual) - - def test_or(self): - actual = na_utils.BitSet() - actual |= 1 << 16 - - self.assertEqual(na_utils.BitSet(1 << 16), actual) - - def test_invert(self): - actual = na_utils.BitSet(1 << 16) - actual = ~actual - - self.assertEqual(~(1 << 16), actual) - - def test_xor(self): - actual = na_utils.BitSet(1 << 16) - actual ^= 1 << 16 - - self.assertEqual(na_utils.BitSet(), actual) - - def test_lshift(self): - actual = na_utils.BitSet(1) - actual <<= 16 - - self.assertEqual(na_utils.BitSet(1 << 16), actual) - - def test_rshift(self): - actual = na_utils.BitSet(1 << 16) - actual >>= 16 - - self.assertEqual(na_utils.BitSet(1), actual) diff --git a/cinder/tests/unit/volume/drivers/nexenta/__init__.py b/cinder/tests/unit/volume/drivers/nexenta/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py deleted file mode 100644 index 3b805d412..000000000 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py +++ /dev/null @@ -1,693 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for OpenStack Cinder volume driver -""" - -import mock -from mock import patch -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.nexenta import iscsi -from cinder.volume.drivers.nexenta import jsonrpc -from cinder.volume.drivers.nexenta import nfs -from cinder.volume.drivers.nexenta import utils - - -class TestNexentaISCSIDriver(test.TestCase): - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - TEST_VOLUME_NAME3 = 'volume3' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_REF = { - 'name': TEST_VOLUME_NAME, - 'size': 1, - 'id': '1', - 'status': 'available' - } - TEST_VOLUME_REF2 = { - 'name': TEST_VOLUME_NAME2, - 'size': 1, - 'id': '2', - 'status': 'in-use' - } - TEST_VOLUME_REF3 = { - 'name': TEST_VOLUME_NAME3, - 'size': 3, - 'id': '3', - 'status': 'in-use' - } - TEST_SNAPSHOT_REF = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_size': 1, - } - - def __init__(self, method): - super(TestNexentaISCSIDriver, self).__init__(method) - - def setUp(self): - super(TestNexentaISCSIDriver, self).setUp() - self.cfg = mock.Mock(spec=conf.Configuration) - self.ctxt = context.get_admin_context() - self.cfg.nexenta_dataset_description = '' - self.cfg.nexenta_host = '1.1.1.1' - self.cfg.nexenta_user = 'admin' - self.cfg.nexenta_password = 'nexenta' - self.cfg.nexenta_volume = 'cinder' - self.cfg.nexenta_rest_port = 2000 - self.cfg.nexenta_rest_protocol = 'http' - self.cfg.nexenta_iscsi_target_portal_port = 3260 - self.cfg.nexenta_target_prefix = 'iqn:' - self.cfg.nexenta_target_group_prefix = 'cinder/' - self.cfg.nexenta_blocksize = '8K' - self.cfg.nexenta_sparse = True - self.cfg.nexenta_dataset_compression = 'on' - self.cfg.nexenta_dataset_dedup = 'off' - self.cfg.nexenta_rrmgr_compression = 1 - self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 - self.cfg.nexenta_rrmgr_connections = 2 - self.cfg.reserved_percentage = 20 - self.nms_mock = mock.Mock() - for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', - 'stmf', 'scsidisk', 'snapshot']: - setattr(self.nms_mock, mod, mock.Mock()) - self.mock_object(jsonrpc, 'NexentaJSONProxy', - return_value=self.nms_mock) - self.drv = iscsi.NexentaISCSIDriver( - configuration=self.cfg) - self.drv.db = db - self.drv.do_setup(self.ctxt) - - def test_check_do_setup(self): - self.assertEqual('http', self.drv.nms_protocol) - - def test_check_for_setup_error(self): - self.nms_mock.volume.object_exists.return_value = False - self.assertRaises(LookupError, self.drv.check_for_setup_error) - - def test_local_path(self): - self.assertRaises(NotImplementedError, self.drv.local_path, '') - - def test_create_volume(self): - self.drv.create_volume(self.TEST_VOLUME_REF) - self.nms_mock.zvol.create.assert_called_with( - 'cinder/%s' % self.TEST_VOLUME_REF['name'], '1G', - self.cfg.nexenta_blocksize, self.cfg.nexenta_sparse) - - def test_delete_volume(self): - self.drv._collect_garbage = lambda vol: vol - self.nms_mock.zvol.get_child_props.return_value = ( - {'origin': 'cinder/volume0@snapshot'}) - self.drv.delete_volume(self.TEST_VOLUME_REF) - self.nms_mock.zvol.get_child_props.assert_called_with( - 'cinder/volume1', 'origin') - self.nms_mock.zvol.destroy.assert_called_with( - 'cinder/volume1', '') - - self.nms_mock.zvol.get_child_props.assert_called_with( - 'cinder/volume1', 'origin') - self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') - self.drv.delete_volume(self.TEST_VOLUME_REF) - - self.nms_mock.zvol.get_child_props.assert_called_with( - 'cinder/volume1', 'origin') - - def test_create_cloned_volume(self): - vol = self.TEST_VOLUME_REF2 - src_vref = self.TEST_VOLUME_REF - snapshot = { - 'volume_name': src_vref['name'], - 'name': 'cinder-clone-snapshot-%s' % vol['id'], - } - self.drv.create_cloned_volume(vol, src_vref) - self.nms_mock.zvol.create_snapshot.assert_called_with( - 'cinder/%s' % src_vref['name'], snapshot['name'], '') - self.nms_mock.zvol.clone.assert_called_with( - 'cinder/%s@%s' % (src_vref['name'], snapshot['name']), - 'cinder/%s' % vol['name']) - - def test_migrate_volume(self): - self.drv._collect_garbage = lambda vol: vol - volume = self.TEST_VOLUME_REF - host = { - 'capabilities': { - 'vendor_name': 'Nexenta', - 'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder', - 'free_capacity_gb': 1, - 'iscsi_target_portal_port': 3260, - 'nms_url': 'http://admin:password@1.1.1.1:2000' - } - } - snapshot = { - 'volume_name': volume['name'], - 'name': 'cinder-migrate-snapshot-%s' % volume['id'], - } - volume_name = 'cinder/%s' % volume['name'] - - self.nms_mock.appliance.ssh_list_bindings.return_value = ( - {'0': [True, True, True, '1.1.1.1']}) - self.nms_mock.zvol.get_child_props.return_value = None - - self.drv.migrate_volume(None, volume, host) - self.nms_mock.zvol.create_snapshot.assert_called_with( - 'cinder/%s' % volume['name'], snapshot['name'], '') - - src = '%(volume)s/%(zvol)s@%(snapshot)s' % { - 'volume': 'cinder', - 'zvol': volume['name'], - 'snapshot': snapshot['name'] - } - dst = '1.1.1.1:cinder' - cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst]) - - self.nms_mock.appliance.execute.assert_called_with(cmd) - - snapshot_name = 'cinder/%(volume)s@%(snapshot)s' % { - 'volume': volume['name'], - 'snapshot': snapshot['name'] - } - self.nms_mock.snapshot.destroy.assert_called_with(snapshot_name, '') - self.nms_mock.zvol.destroy.assert_called_with(volume_name, '') - self.nms_mock.snapshot.destroy.assert_called_with( - 'cinder/%(volume)s@%(snapshot)s' % { - 'volume': volume['name'], - 'snapshot': snapshot['name'] - }, '') - - def test_create_snapshot(self): - self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) - self.nms_mock.zvol.create_snapshot.assert_called_with( - 'cinder/volume1', 'snapshot1', '') - - def test_create_volume_from_snapshot(self): - self._create_volume_db_entry() - self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF3, - self.TEST_SNAPSHOT_REF) - self.nms_mock.zvol.clone.assert_called_with( - 'cinder/volume1@snapshot1', 'cinder/volume3') - self.nms_mock.zvol.set_child_prop.assert_called_with( - 'cinder/volume3', 'volsize', '3G') - - def test_delete_snapshot(self): - self._create_volume_db_entry() - self.drv._collect_garbage = lambda vol: vol - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nms_mock.snapshot.destroy.assert_called_with( - 'cinder/volume1@snapshot1', '') - - # Check that exception not raised if snapshot does not exist - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nms_mock.snapshot.destroy.side_effect = ( - exception.NexentaException('does not exist')) - self.nms_mock.snapshot.destroy.assert_called_with( - 'cinder/volume1@snapshot1', '') - - def _mock_all_export_methods(self, fail=False): - self.assertTrue(self.nms_mock.stmf.list_targets.called) - self.nms_mock.iscsitarget.create_target.assert_called_with( - {'target_name': 'iqn:1.1.1.1-0'}) - self.nms_mock.stmf.list_targetgroups() - zvol_name = 'cinder/volume1' - self.nms_mock.stmf.create_targetgroup.assert_called_with( - 'cinder/1.1.1.1-0') - self.nms_mock.stmf.list_targetgroup_members.assert_called_with( - 'cinder/1.1.1.1-0') - self.nms_mock.scsidisk.lu_exists.assert_called_with(zvol_name) - self.nms_mock.scsidisk.create_lu.assert_called_with(zvol_name, {}) - - def _stub_all_export_methods(self): - self.nms_mock.scsidisk.lu_exists.return_value = False - self.nms_mock.scsidisk.lu_shared.side_effect = ( - exception.NexentaException(['does not exist for zvol'])) - self.nms_mock.scsidisk.create_lu.return_value = {'lun': 0} - self.nms_mock.stmf.list_targets.return_value = [] - self.nms_mock.stmf.list_targetgroups.return_value = [] - self.nms_mock.stmf.list_targetgroup_members.return_value = [] - self.nms_mock._get_target_name.return_value = ['iqn:1.1.1.1-0'] - self.nms_mock.iscsitarget.create_targetgroup.return_value = ({ - 'target_name': 'cinder/1.1.1.1-0'}) - self.nms_mock.scsidisk.add_lun_mapping_entry.return_value = {'lun': 0} - - def test_create_export(self): - self._stub_all_export_methods() - retval = self.drv.create_export({}, self.TEST_VOLUME_REF, None) - self._mock_all_export_methods() - location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.cfg.nexenta_host, - 'port': self.cfg.nexenta_iscsi_target_portal_port, - 'name': 'iqn:1.1.1.1-0', - 'lun': '0' - } - self.assertEqual({'provider_location': location}, retval) - - def test_ensure_export(self): - self._stub_all_export_methods() - self.drv.ensure_export({}, self.TEST_VOLUME_REF) - self._mock_all_export_methods() - - def test_remove_export(self): - self.nms_mock.stmf.list_targets.return_value = ['iqn:1.1.1.1-0'] - self.nms_mock.stmf.list_targetgroups.return_value = ( - ['cinder/1.1.1.1-0']) - self.nms_mock.stmf.list_targetgroup_members.return_value = ( - ['iqn:1.1.1.1-0']) - self.drv.remove_export({}, self.TEST_VOLUME_REF) - self.assertTrue(self.nms_mock.stmf.list_targets.called) - self.assertTrue(self.nms_mock.stmf.list_targetgroups.called) - self.nms_mock.scsidisk.delete_lu.assert_called_with('cinder/volume1') - - def test_get_volume_stats(self): - stats = {'size': '5368709120G', - 'used': '5368709120G', - 'available': '5368709120G', - 'health': 'ONLINE'} - self.nms_mock.volume.get_child_props.return_value = stats - stats = self.drv.get_volume_stats(True) - self.assertEqual('iSCSI', stats['storage_protocol']) - self.assertEqual(5368709120.0, stats['total_capacity_gb']) - self.assertEqual(5368709120.0, stats['free_capacity_gb']) - self.assertEqual(20, stats['reserved_percentage']) - self.assertFalse(stats['QoS_support']) - - def test_collect_garbage__snapshot(self): - name = 'cinder/v1@s1' - self.drv._mark_as_garbage(name) - self.nms_mock.zvol.get_child_props.return_value = None - self.drv._collect_garbage(name) - self.nms_mock.snapshot.destroy.assert_called_with(name, '') - self.assertNotIn(name, self.drv._needless_objects) - - def test_collect_garbage__volume(self): - name = 'cinder/v1' - self.drv._mark_as_garbage(name) - self.nms_mock.zvol.get_child_props.return_value = None - self.drv._collect_garbage(name) - self.nms_mock.zvol.destroy.assert_called_with(name, '') - self.assertNotIn(name, self.drv._needless_objects) - - def _create_volume_db_entry(self): - vol = { - 'id': '1', - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_VOLUME_NAME - } - return db.volume_create(self.ctxt, vol)['id'] - - -class TestNexentaNfsDriver(test.TestCase): - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - TEST_VOLUME_NAME3 = 'volume3' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_REF = { - 'name': TEST_VOLUME_NAME, - 'size': 1, - 'id': '1', - 'status': 'available' - } - TEST_VOLUME_REF2 = { - 'name': TEST_VOLUME_NAME2, - 'size': 2, - 'id': '2', - 'status': 'in-use' - } - TEST_VOLUME_REF3 = { - 'name': TEST_VOLUME_NAME2, - 'id': '2', - 'status': 'in-use' - } - TEST_SNAPSHOT_REF = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_size': 1, - 'volume_id': 1 - } - - TEST_EXPORT1 = 'host1:/volumes/stack/share' - TEST_NMS1 = 'http://admin:nexenta@host1:2000' - - TEST_EXPORT2 = 'host2:/volumes/stack/share' - TEST_NMS2 = 'http://admin:nexenta@host2:2000' - - TEST_EXPORT2_OPTIONS = '-o intr' - - TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' - - TEST_SHARE_SVC = 'svc:/network/nfs/server:default' - - TEST_SHARE_OPTS = { - 'read_only': '', - 'read_write': '*', - 'recursive': 'true', - 'anonymous_rw': 'true', - 'extra_options': 'anon=0', - 'root': 'nobody' - } - - def _create_volume_db_entry(self): - vol = { - 'id': '1', - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_EXPORT1 - } - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - return db.volume_create(self.ctxt, vol)['id'] - - def setUp(self): - super(TestNexentaNfsDriver, self).setUp() - self.ctxt = context.get_admin_context() - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.nexenta_dataset_description = '' - self.cfg.nexenta_shares_config = None - self.cfg.nexenta_mount_point_base = '$state_path/mnt' - self.cfg.nexenta_sparsed_volumes = True - self.cfg.nexenta_dataset_compression = 'on' - self.cfg.nexenta_dataset_dedup = 'off' - self.cfg.nexenta_rrmgr_compression = 1 - self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 - self.cfg.nexenta_rrmgr_connections = 2 - self.cfg.nfs_mount_point_base = '/mnt/test' - self.cfg.nfs_mount_options = None - self.cfg.nas_mount_options = None - self.cfg.nexenta_nms_cache_volroot = False - self.cfg.nfs_mount_attempts = 3 - self.cfg.reserved_percentage = 20 - self.cfg.max_over_subscription_ratio = 20.0 - self.nms_mock = mock.Mock() - for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc', - 'snapshot', 'netsvc'): - setattr(self.nms_mock, mod, mock.Mock()) - self.nms_mock.__hash__ = lambda *_, **__: 1 - self.mock_object(jsonrpc, 'NexentaJSONProxy', - return_value=self.nms_mock) - self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) - self.drv.shares = {} - self.drv.share2nms = {} - - def test_check_for_setup_error(self): - self.drv.share2nms = { - 'host1:/volumes/stack/share': self.nms_mock - } - - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.volume.object_exists.return_value = True - self.nms_mock.folder.object_exists.return_value = True - share_opts = { - 'read_write': '*', - 'read_only': '', - 'root': 'nobody', - 'extra_options': 'anon=0', - 'recursive': 'true', - 'anonymous_rw': 'true', - } - self.nms_mock.netstorsvc.get_shared_folders.return_value = '' - self.nms_mock.folder.get_child_props.return_value = { - 'available': 1, 'used': 1} - self.drv.check_for_setup_error() - self.nms_mock.netstorsvc.share_folder.assert_called_with( - 'svc:/network/nfs/server:default', 'stack/share', share_opts) - - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.volume.object_exists.return_value = False - - self.assertRaises(LookupError, self.drv.check_for_setup_error) - - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.volume.object_exists.return_value = True - self.nms_mock.folder.object_exists.return_value = False - - self.assertRaises(LookupError, self.drv.check_for_setup_error) - - def test_initialize_connection(self): - self.drv.shares = { - self.TEST_EXPORT1: None - } - volume = { - 'provider_location': self.TEST_EXPORT1, - 'name': 'volume' - } - result = self.drv.initialize_connection(volume, None) - self.assertEqual('%s/volume' % self.TEST_EXPORT1, - result['data']['export']) - - def test_do_create_volume(self): - volume = { - 'provider_location': self.TEST_EXPORT1, - 'size': 1, - 'name': 'volume-1' - } - self.drv.shares = {self.TEST_EXPORT1: None} - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - - compression = self.cfg.nexenta_dataset_compression - self.nms_mock.folder.get_child_props.return_value = { - 'available': 1, 'used': 1} - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default', - 'configure').AndReturn({ - 'nfs_server_versmax': { - 'current': u'3'}}) - self.nms_mock.netsvc.get_confopts.return_value = { - 'nfs_server_versmax': {'current': 4}} - self.nms_mock._ensure_share_mounted.return_value = True - self.drv._do_create_volume(volume) - self.nms_mock.folder.create_with_props.assert_called_with( - 'stack', 'share/volume-1', {'compression': compression}) - self.nms_mock.netstorsvc.share_folder.assert_called_with( - self.TEST_SHARE_SVC, 'stack/share/volume-1', self.TEST_SHARE_OPTS) - mock_chmod = self.nms_mock.appliance.execute - mock_chmod.assert_called_with( - 'chmod ugo+rw /volumes/stack/share/volume-1/volume') - mock_truncate = self.nms_mock.appliance.execute - mock_truncate.side_effect = exception.NexentaException() - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.folder.get_child_props.return_value = { - 'available': 1, 'used': 1} - self.assertRaises(exception.NexentaException, - self.drv._do_create_volume, volume) - - def test_create_sparsed_file(self): - self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1) - self.nms_mock.appliance.execute.assert_called_with( - 'truncate --size 1G /tmp/path') - - def test_create_regular_file(self): - self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1) - self.nms_mock.appliance.execute.assert_called_with( - 'dd if=/dev/zero of=/tmp/path bs=1M count=1024') - - @patch('cinder.volume.drivers.remotefs.' - 'RemoteFSDriver._ensure_shares_mounted') - @patch('cinder.volume.drivers.nexenta.nfs.' - 'NexentaNfsDriver._get_volroot') - @patch('cinder.volume.drivers.nexenta.nfs.' - 'NexentaNfsDriver._get_nfs_server_version') - def test_create_larger_volume_from_snap(self, version, volroot, ensure): - version.return_value = 4 - volroot.return_value = 'volroot' - self._create_volume_db_entry() - self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, - self.TEST_SNAPSHOT_REF) - self.nms_mock.appliance.execute.assert_called_with( - 'truncate --size 2G /volumes/stack/share/volume2/volume') - - @patch('cinder.volume.drivers.remotefs.' - 'RemoteFSDriver._ensure_shares_mounted') - @patch('cinder.volume.drivers.nexenta.nfs.' - 'NexentaNfsDriver._get_volroot') - @patch('cinder.volume.drivers.nexenta.nfs.' - 'NexentaNfsDriver._get_nfs_server_version') - def test_create_volume_from_snapshot(self, version, volroot, ensure): - version.return_value = 4 - volroot.return_value = 'volroot' - self._create_volume_db_entry() - self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF, - self.TEST_SNAPSHOT_REF) - self.nms_mock.appliance.execute.assert_not_called() - - self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF3, - self.TEST_SNAPSHOT_REF) - self.nms_mock.appliance.execute.assert_not_called() - - def test_set_rw_permissions_for_all(self): - path = '/tmp/path' - self.drv._set_rw_permissions_for_all(self.nms_mock, path) - self.nms_mock.appliance.execute.assert_called_with( - 'chmod ugo+rw %s' % path) - - def test_local_path(self): - volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} - path = self.drv.local_path(volume) - self.assertEqual( - '$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume', - path - ) - - def test_remote_path(self): - volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} - path = self.drv.remote_path(volume) - self.assertEqual('/volumes/stack/share/volume-1/volume', path) - - def test_share_folder(self): - self.drv._share_folder(self.nms_mock, 'stack', 'share/folder') - path = 'stack/share/folder' - self.nms_mock.netstorsvc.share_folder.assert_called_with( - self.TEST_SHARE_SVC, path, self.TEST_SHARE_OPTS) - - def test_load_shares_config(self): - self.drv.configuration.nfs_shares_config = ( - self.TEST_SHARES_CONFIG_FILE) - - config_data = [ - '%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1), - '# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2), - '', - '%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2, - self.TEST_EXPORT2_OPTIONS) - ] - - with mock.patch.object(self.drv, '_read_config_file') as \ - mock_read_config_file: - mock_read_config_file.return_value = config_data - self.drv._load_shares_config( - self.drv.configuration.nfs_shares_config) - - self.assertIn(self.TEST_EXPORT1, self.drv.shares) - self.assertIn(self.TEST_EXPORT2, self.drv.shares) - self.assertEqual(2, len(self.drv.shares)) - - self.assertIn(self.TEST_EXPORT1, self.drv.share2nms) - self.assertIn(self.TEST_EXPORT2, self.drv.share2nms) - self.assertEqual(2, len(self.drv.share2nms.keys())) - - self.assertEqual(self.TEST_EXPORT2_OPTIONS, - self.drv.shares[self.TEST_EXPORT2]) - - def test_get_capacity_info(self): - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.folder.get_child_props.return_value = { - 'available': '1G', - 'used': '2G' - } - total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1) - - self.assertEqual(3 * units.Gi, total) - self.assertEqual(units.Gi, free) - self.assertEqual(2 * units.Gi, allocated) - - def test_get_share_datasets(self): - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - self.nms_mock.server.get_prop.return_value = '/volumes' - volume_name, folder_name = ( - self.drv._get_share_datasets(self.TEST_EXPORT1)) - - self.assertEqual('stack', volume_name) - self.assertEqual('share', folder_name) - - def test_delete_snapshot(self): - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - self._create_volume_db_entry() - - self.nms_mock.server.get_prop.return_value = '/volumes' - self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) - self.nms_mock.snapshot.destroy.assert_called_with( - 'stack/share/volume-1@snapshot1', '') - - def test_delete_volume(self): - self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} - self._create_volume_db_entry() - - self.drv._ensure_share_mounted = lambda *_, **__: 0 - self.drv._execute = lambda *_, **__: 0 - - self.nms_mock.server.get_prop.return_value = '/volumes' - self.nms_mock.folder.get_child_props.return_value = { - 'available': 1, 'used': 1} - self.drv.delete_volume({ - 'id': '1', - 'name': 'volume-1', - 'provider_location': self.TEST_EXPORT1 - }) - self.nms_mock.folder.destroy.assert_called_with( - 'stack/share/volume-1', '-r') - - # Check that exception not raised if folder does not exist on - # NexentaStor appliance. - mock = self.nms_mock.folder.destroy - mock.side_effect = exception.NexentaException('Folder does not exist') - self.drv.delete_volume({ - 'id': '1', - 'name': 'volume-1', - 'provider_location': self.TEST_EXPORT1 - }) - - -class TestNexentaUtils(test.TestCase): - - def test_str2size(self): - values_to_test = ( - # Test empty value - (None, 0), - ('', 0), - ('0', 0), - ('12', 12), - # Test int values - (10, 10), - # Test bytes string - ('1b', 1), - ('1B', 1), - ('1023b', 1023), - ('0B', 0), - # Test other units - ('1M', units.Mi), - ('1.0M', units.Mi), - ) - - for value, result in values_to_test: - self.assertEqual(result, utils.str2size(value)) - - # Invalid format value - self.assertRaises(ValueError, utils.str2size, 'A') - - def test_str2gib_size(self): - self.assertEqual(1, utils.str2gib_size('1024M')) - self.assertEqual(300 * units.Mi // units.Gi, - utils.str2gib_size('300M')) - self.assertEqual(1.2 * units.Ti // units.Gi, - utils.str2gib_size('1.2T')) - self.assertRaises(ValueError, utils.str2gib_size, 'A') - - def test_parse_nms_url(self): - urls = ( - ('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta', - '192.168.1.1', '2000', '/rest/nms/')), - ('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta', - '192.168.1.1', '8080', '/rest/nms/')), - ('https://root:password@192.168.1.1:8080', - (False, 'https', 'root', 'password', '192.168.1.1', '8080', - '/rest/nms/')), - ) - for url, result in urls: - self.assertEqual(result, utils.parse_nms_url(url)) diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py deleted file mode 100644 index 30745544e..000000000 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for OpenStack Cinder volume driver -""" - -import mock -from mock import patch -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.nexenta.ns5 import iscsi -from cinder.volume.drivers.nexenta.ns5 import jsonrpc - - -class TestNexentaISCSIDriver(test.TestCase): - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - TEST_VOLUME_NAME3 = 'volume3' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_REF = { - 'name': TEST_VOLUME_NAME, - 'size': 1, - 'id': '1', - 'status': 'available' - } - TEST_VOLUME_REF2 = { - 'name': TEST_VOLUME_NAME2, - 'size': 1, - 'id': '2', - 'status': 'in-use' - } - TEST_VOLUME_REF3 = { - 'name': TEST_VOLUME_NAME3, - 'size': 2, - 'id': '2', - 'status': 'in-use' - } - TEST_SNAPSHOT_REF = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_id': '1', - 'volume_size': 1 - } - - def __init__(self, method): - super(TestNexentaISCSIDriver, self).__init__(method) - - def setUp(self): - super(TestNexentaISCSIDriver, self).setUp() - self.cfg = mock.Mock(spec=conf.Configuration) - self.ctxt = context.get_admin_context() - self.cfg.nexenta_dataset_description = '' - self.cfg.nexenta_host = '1.1.1.1' - self.cfg.nexenta_user = 'admin' - self.cfg.nexenta_password = 'nexenta' - self.cfg.nexenta_volume = 'cinder' - self.cfg.nexenta_rest_port = 2000 - self.cfg.nexenta_use_https = False - self.cfg.nexenta_iscsi_target_portal_port = 8080 - self.cfg.nexenta_target_prefix = 'iqn:' - self.cfg.nexenta_target_group_prefix = 'cinder/' - self.cfg.nexenta_ns5_blocksize = 32 - self.cfg.nexenta_sparse = True - self.cfg.nexenta_dataset_compression = 'on' - self.cfg.nexenta_dataset_dedup = 'off' - self.cfg.reserved_percentage = 20 - self.cfg.nexenta_volume = 'pool' - self.cfg.nexenta_volume_group = 'dsg' - self.nef_mock = mock.Mock() - self.mock_object(jsonrpc, 'NexentaJSONProxy', - return_value=self.nef_mock) - self.drv = iscsi.NexentaISCSIDriver( - configuration=self.cfg) - self.drv.db = db - self.drv._fetch_volumes = lambda: None - self.drv.do_setup(self.ctxt) - - def _create_volume_db_entry(self): - vol = { - 'id': '1', - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_VOLUME_NAME - } - return db.volume_create(self.ctxt, vol)['id'] - - def test_do_setup(self): - self.nef_mock.post.side_effect = exception.NexentaException( - 'Could not create volume group') - self.assertRaises( - exception.NexentaException, - self.drv.do_setup, self.ctxt) - - self.nef_mock.post.side_effect = exception.NexentaException( - '{"code": "EEXIST"}') - self.assertIsNone(self.drv.do_setup(self.ctxt)) - - def test_check_for_setup_error(self): - self.nef_mock.get.return_value = { - 'data': [{'name': 'iscsit', 'state': 'offline'}]} - self.assertRaises( - exception.NexentaException, self.drv.check_for_setup_error) - - self.nef_mock.get.side_effect = exception.NexentaException() - self.assertRaises(LookupError, self.drv.check_for_setup_error) - - def test_create_volume(self): - self.drv.create_volume(self.TEST_VOLUME_REF) - url = 'storage/pools/pool/volumeGroups/dsg/volumes' - self.nef_mock.post.assert_called_with(url, { - 'name': self.TEST_VOLUME_REF['name'], - 'volumeSize': 1 * units.Gi, - 'volumeBlockSize': 32768, - 'sparseVolume': self.cfg.nexenta_sparse}) - - def test_delete_volume(self): - self.drv.collect_zfs_garbage = lambda x: None - self.nef_mock.delete.side_effect = exception.NexentaException( - 'Failed to destroy snapshot') - self.assertIsNone(self.drv.delete_volume(self.TEST_VOLUME_REF)) - url = 'storage/pools/pool/volumeGroups' - data = {'name': 'dsg', 'volumeBlockSize': 32768} - self.nef_mock.post.assert_called_with(url, data) - - def test_extend_volume(self): - self.drv.extend_volume(self.TEST_VOLUME_REF, 2) - url = ('storage/pools/pool/volumeGroups/dsg/volumes/%(name)s') % { - 'name': self.TEST_VOLUME_REF['name']} - self.nef_mock.put.assert_called_with(url, { - 'volumeSize': 2 * units.Gi}) - - def test_delete_snapshot(self): - self._create_volume_db_entry() - url = ('storage/pools/pool/volumeGroups/dsg/' - 'volumes/volume-1/snapshots/snapshot1') - - self.nef_mock.delete.side_effect = exception.NexentaException('EBUSY') - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nef_mock.delete.assert_called_with(url) - - self.nef_mock.delete.side_effect = exception.NexentaException('Error') - self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) - self.nef_mock.delete.assert_called_with(url) - - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.create_snapshot') - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.delete_snapshot') - @patch('cinder.volume.drivers.nexenta.ns5.iscsi.' - 'NexentaISCSIDriver.create_volume_from_snapshot') - def test_create_cloned_volume(self, crt_vol, dlt_snap, crt_snap): - self._create_volume_db_entry() - vol = self.TEST_VOLUME_REF2 - src_vref = self.TEST_VOLUME_REF - crt_vol.side_effect = exception.NexentaException() - dlt_snap.side_effect = exception.NexentaException() - self.assertRaises( - exception.NexentaException, - self.drv.create_cloned_volume, vol, src_vref) - - def test_create_snapshot(self): - self._create_volume_db_entry() - self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) - url = 'storage/pools/pool/volumeGroups/dsg/volumes/volume-1/snapshots' - self.nef_mock.post.assert_called_with( - url, {'name': 'snapshot1'}) - - def test_create_larger_volume_from_snapshot(self): - self._create_volume_db_entry() - vol = self.TEST_VOLUME_REF3 - src_vref = self.TEST_SNAPSHOT_REF - - self.drv.create_volume_from_snapshot(vol, src_vref) - - # make sure the volume get extended! - url = ('storage/pools/pool/volumeGroups/dsg/volumes/%(name)s') % { - 'name': self.TEST_VOLUME_REF3['name']} - self.nef_mock.put.assert_called_with(url, { - 'volumeSize': 2 * units.Gi}) - - def test_do_export(self): - target_name = 'new_target' - lun = 0 - - class GetSideEffect(object): - def __init__(self): - self.lm_counter = -1 - - def __call__(self, *args, **kwargs): - # Find out whether the volume is exported - if 'san/lunMappings?volume=' in args[0]: - self.lm_counter += 1 - # a value for the first call - if self.lm_counter == 0: - return {'data': []} - else: - return {'data': [{'lun': lun}]} - # Get the name of just created target - elif 'san/iscsi/targets' in args[0]: - return {'data': [{'name': target_name}]} - - def post_side_effect(*args, **kwargs): - if 'san/iscsi/targets' in args[0]: - return {'data': [{'name': target_name}]} - - self.nef_mock.get.side_effect = GetSideEffect() - self.nef_mock.post.side_effect = post_side_effect - res = self.drv._do_export(self.ctxt, self.TEST_VOLUME_REF) - provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.cfg.nexenta_host, - 'port': self.cfg.nexenta_iscsi_target_portal_port, - 'name': target_name, - 'lun': lun, - } - expected = {'provider_location': provider_location} - self.assertEqual(expected, res) - - def test_remove_export(self): - mapping_id = '1234567890' - self.nef_mock.get.return_value = {'data': [{'id': mapping_id}]} - self.drv.remove_export(self.ctxt, self.TEST_VOLUME_REF) - url = 'san/lunMappings/%s' % mapping_id - self.nef_mock.delete.assert_called_with(url) - - def test_update_volume_stats(self): - self.nef_mock.get.return_value = { - 'bytesAvailable': 10 * units.Gi, - 'bytesUsed': 2 * units.Gi - } - location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { - 'driver': self.drv.__class__.__name__, - 'host': self.cfg.nexenta_host, - 'pool': self.cfg.nexenta_volume, - 'group': self.cfg.nexenta_volume_group, - } - stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.cfg.nexenta_dataset_dedup, - 'compression': self.cfg.nexenta_dataset_compression, - 'description': self.cfg.nexenta_dataset_description, - 'driver_version': self.drv.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': 10, - 'free_capacity_gb': 8, - 'reserved_percentage': self.cfg.reserved_percentage, - 'QoS_support': False, - 'volume_backend_name': self.drv.backend_name, - 'location_info': location_info, - 'iscsi_target_portal_port': ( - self.cfg.nexenta_iscsi_target_portal_port), - 'nef_url': self.drv.nef.url - } - self.drv._update_volume_stats() - self.assertEqual(stats, self.drv._stats) diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py deleted file mode 100644 index 82fbfc69c..000000000 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for NexentaStor 5 REST API helper -""" - -import uuid - -import mock -from mock import patch -from oslo_serialization import jsonutils -import requests -from requests import adapters -from six.moves import http_client - -from cinder import exception -from cinder import test -from cinder.volume.drivers.nexenta.ns5 import jsonrpc - -HOST = '1.1.1.1' -USERNAME = 'user' -PASSWORD = 'pass' - - -def gen_response(code=http_client.OK, json=None): - r = requests.Response() - r.headers['Content-Type'] = 'application/json' - r.encoding = 'utf8' - r.status_code = code - r.reason = 'FAKE REASON' - r.raw = mock.Mock() - r._content = '' - if json: - r._content = jsonutils.dumps(json) - return r - - -class TestNexentaJSONProxyAuth(test.TestCase): - - @patch('cinder.volume.drivers.nexenta.ns5.jsonrpc.requests.post') - def test_https_auth(self, post): - use_https = True - port = 8443 - auth_uri = 'auth/login' - rnd_url = 'some/random/url' - - class PostSideEffect(object): - def __call__(self, *args, **kwargs): - r = gen_response() - if args[0] == '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'https', - 'host': HOST, - 'port': port, - 'uri': auth_uri}: - token = uuid.uuid4().hex - content = {'token': token} - r._content = jsonutils.dumps(content) - return r - post_side_effect = PostSideEffect() - post.side_effect = post_side_effect - - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - self.counter = 0 - - def send(self, request, *args, **kwargs): - # an url is being requested for the second time - if self.counter == 1: - # make the fake backend respond 401 - r = gen_response(http_client.UNAUTHORIZED) - r._content = '' - r.connection = mock.Mock() - r_ = gen_response(json={'data': []}) - r.connection.send = lambda prep, **kwargs_: r_ - else: - r = gen_response(json={'data': []}) - r.request = request - self.counter += 1 - return r - - nef = jsonrpc.NexentaJSONProxy(HOST, port, USERNAME, PASSWORD, - use_https) - adapter = TestAdapter() - nef.session.mount( - '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'https', - 'host': HOST, - 'port': port, - 'uri': rnd_url}, - adapter) - - # successful authorization - self.assertEqual({'data': []}, nef.get(rnd_url)) - - # session timeout simulation. Client must authenticate newly - self.assertEqual({'data': []}, nef.get(rnd_url)) - # auth URL must be requested two times at this moment - self.assertEqual(2, post.call_count) - - # continue with the last (second) token - self.assertEqual(nef.get(rnd_url), {'data': []}) - # auth URL must be requested two times - self.assertEqual(2, post.call_count) - - -class TestNexentaJSONProxy(test.TestCase): - - def setUp(self): - super(TestNexentaJSONProxy, self).setUp() - self.nef = jsonrpc.NexentaJSONProxy(HOST, 0, USERNAME, PASSWORD, False) - - def gen_adapter(self, code, json=None): - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - - def send(self, request, *args, **kwargs): - r = gen_response(code, json) - r.request = request - return r - - return TestAdapter() - - def _mount_adapter(self, url, adapter): - self.nef.session.mount( - '%(scheme)s://%(host)s:%(port)s/%(uri)s' % { - 'scheme': 'http', - 'host': HOST, - 'port': 8080, - 'uri': url}, - adapter) - - def test_post(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.post(rnd_url)) - - def test_delete(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.delete(rnd_url)) - - def test_put(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.put(rnd_url)) - - def test_get_200(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.OK, - random_dict)) - self.assertEqual(random_dict, self.nef.get(rnd_url)) - - def test_get_201(self): - random_dict = {'data': uuid.uuid4().hex} - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, self.gen_adapter(http_client.CREATED, - random_dict)) - self.assertEqual(random_dict, self.nef.get(rnd_url)) - - def test_get_500(self): - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - - def send(self, request, *args, **kwargs): - json = { - 'code': 'NEF_ERROR', - 'message': 'Some error' - } - r = gen_response(http_client.INTERNAL_SERVER_ERROR, json) - r.request = request - return r - - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.NexentaException, self.nef.get, rnd_url) - - def test_get__not_nef_error(self): - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - - def send(self, request, *args, **kwargs): - r = gen_response(http_client.NOT_FOUND) - r._content = 'Page Not Found' - r.request = request - return r - - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.VolumeBackendAPIException, self.nef.get, - rnd_url) - - def test_get__not_nef_error_empty_body(self): - class TestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(TestAdapter, self).__init__() - - def send(self, request, *args, **kwargs): - r = gen_response(http_client.NOT_FOUND) - r.request = request - return r - - adapter = TestAdapter() - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, adapter) - self.assertRaises(exception.VolumeBackendAPIException, self.nef.get, - rnd_url) - - def test_202(self): - redirect_url = 'redirect/url' - - class RedirectTestAdapter(adapters.HTTPAdapter): - - def __init__(self): - super(RedirectTestAdapter, self).__init__() - - def send(self, request, *args, **kwargs): - json = { - 'links': [{'href': redirect_url}] - } - r = gen_response(http_client.ACCEPTED, json) - r.request = request - return r - - rnd_url = 'some/random/url' - self._mount_adapter(rnd_url, RedirectTestAdapter()) - self._mount_adapter(redirect_url, self.gen_adapter( - http_client.CREATED)) - self.assertIsNone(self.nef.get(rnd_url)) diff --git a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py b/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py deleted file mode 100644 index 1475adc04..000000000 --- a/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit tests for OpenStack Cinder volume driver -""" - -import mock -from mock import patch - -from cinder import context -from cinder import db -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.fake_volume import fake_volume_obj -from cinder.volume import configuration as conf -from cinder.volume.drivers.nexenta.ns5 import jsonrpc -from cinder.volume.drivers.nexenta.ns5 import nfs - - -class TestNexentaNfsDriver(test.TestCase): - TEST_SHARE = 'host1:/pool/share' - TEST_SHARE2_OPTIONS = '-o intr' - TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' - TEST_SNAPSHOT_NAME = 'snapshot1' - TEST_VOLUME_NAME = 'volume1' - TEST_VOLUME_NAME2 = 'volume2' - - TEST_VOLUME = fake_volume_obj(None, **{ - 'name': TEST_VOLUME_NAME, - 'id': fake.VOLUME_ID, - 'size': 1, - 'status': 'available', - 'provider_location': TEST_SHARE - }) - - TEST_VOLUME2 = fake_volume_obj(None, **{ - 'name': TEST_VOLUME_NAME2, - 'size': 2, - 'id': fake.VOLUME2_ID, - 'status': 'in-use' - }) - - TEST_SNAPSHOT = { - 'name': TEST_SNAPSHOT_NAME, - 'volume_name': TEST_VOLUME_NAME, - 'volume_size': 1, - 'volume_id': fake.VOLUME_ID - } - - TEST_SHARE_SVC = 'svc:/network/nfs/server:default' - - def setUp(self): - super(TestNexentaNfsDriver, self).setUp() - self.ctxt = context.get_admin_context() - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.nexenta_dataset_description = '' - self.cfg.nexenta_mount_point_base = '$state_path/mnt' - self.cfg.nexenta_sparsed_volumes = True - self.cfg.nexenta_dataset_compression = 'on' - self.cfg.nexenta_dataset_dedup = 'off' - self.cfg.nfs_mount_point_base = '/mnt/test' - self.cfg.nfs_mount_attempts = 3 - self.cfg.nfs_mount_options = None - self.cfg.nas_mount_options = 'vers=4' - self.cfg.reserved_percentage = 20 - self.cfg.nexenta_use_https = False - self.cfg.nexenta_rest_port = 0 - self.cfg.nexenta_user = 'user' - self.cfg.nexenta_password = 'pass' - self.cfg.max_over_subscription_ratio = 20.0 - self.cfg.nas_host = '1.1.1.1' - self.cfg.nas_share_path = 'pool/share' - self.nef_mock = mock.Mock() - self.stubs.Set(jsonrpc, 'NexentaJSONProxy', - lambda *_, **__: self.nef_mock) - self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) - self.drv.db = db - self.drv.do_setup(self.ctxt) - - def _create_volume_db_entry(self): - vol = { - 'id': fake.VOLUME_ID, - 'size': 1, - 'status': 'available', - 'provider_location': self.TEST_SHARE - } - return db.volume_create(self.ctxt, vol)['id'] - - def test_check_for_setup_error(self): - self.nef_mock.get.return_value = {'data': []} - self.assertRaises( - LookupError, lambda: self.drv.check_for_setup_error()) - - def test_initialize_connection(self): - data = { - 'export': self.TEST_VOLUME['provider_location'], 'name': 'volume'} - self.assertEqual({ - 'driver_volume_type': self.drv.driver_volume_type, - 'data': data - }, self.drv.initialize_connection(self.TEST_VOLUME, None)) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._create_regular_file') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._create_sparsed_file') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._ensure_share_mounted') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._share_folder') - def test_do_create_volume(self, share, ensure, sparsed, regular): - ensure.return_value = True - share.return_value = True - self.nef_mock.get.return_value = 'on' - self.drv._do_create_volume(self.TEST_VOLUME) - - url = 'storage/pools/pool/filesystems' - data = { - 'name': 'share/volume-' + fake.VOLUME_ID, - 'compressionMode': 'on', - 'dedupMode': 'off', - } - self.nef_mock.post.assert_called_with(url, data) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._ensure_share_mounted') - def test_delete_volume(self, ensure): - self._create_volume_db_entry() - self.nef_mock.get.return_value = {} - self.drv.delete_volume(self.TEST_VOLUME) - self.nef_mock.delete.assert_called_with( - 'storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '?snapshots=true') - - def test_create_snapshot(self): - self._create_volume_db_entry() - self.drv.create_snapshot(self.TEST_SNAPSHOT) - url = ('storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '/snapshots') - data = {'name': self.TEST_SNAPSHOT['name']} - self.nef_mock.post.assert_called_with(url, data) - - def test_delete_snapshot(self): - self._create_volume_db_entry() - self.drv.delete_snapshot(self.TEST_SNAPSHOT) - url = ('storage/pools/pool/filesystems/share%2Fvolume-' + - fake.VOLUME_ID + '/snapshots/snapshot1') - self.drv.delete_snapshot(self.TEST_SNAPSHOT) - self.nef_mock.delete.assert_called_with(url) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.extend_volume') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver._share_folder') - def test_create_volume_from_snapshot(self, share, path, extend): - self._create_volume_db_entry() - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % { - 'pool': 'pool', - 'fs': '%2F'.join(['share', 'volume-' + fake.VOLUME_ID]), - 'snap': self.TEST_SNAPSHOT['name'] - } - path = '/'.join(['pool/share', self.TEST_VOLUME2['name']]) - data = {'targetPath': path} - self.drv.create_volume_from_snapshot( - self.TEST_VOLUME2, self.TEST_SNAPSHOT) - self.nef_mock.post.assert_called_with(url, data) - - # make sure the volume get extended! - extend.assert_called_once_with(self.TEST_VOLUME2, 2) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('oslo_concurrency.processutils.execute') - def test_extend_volume_sparsed(self, _execute, path): - self._create_volume_db_entry() - path.return_value = 'path' - - self.drv.extend_volume(self.TEST_VOLUME, 2) - - _execute.assert_called_with( - 'truncate', '-s', '2G', - 'path', - root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf', - run_as_root=True) - - @patch('cinder.volume.drivers.nexenta.ns5.nfs.' - 'NexentaNfsDriver.local_path') - @patch('oslo_concurrency.processutils.execute') - def test_extend_volume_nonsparsed(self, _execute, path): - self._create_volume_db_entry() - path.return_value = 'path' - with mock.patch.object(self.drv, - 'sparsed_volumes', - False): - - self.drv.extend_volume(self.TEST_VOLUME, 2) - - _execute.assert_called_with( - 'dd', 'if=/dev/zero', 'seek=1073741824', - 'of=path', - 'bs=1M', 'count=1024', - root_helper='sudo cinder-rootwrap /etc/cinder/rootwrap.conf', - run_as_root=True) - - def test_get_capacity_info(self): - self.nef_mock.get.return_value = { - 'bytesAvailable': 1000, - 'bytesUsed': 100} - self.assertEqual( - (1000, 900, 100), self.drv._get_capacity_info('pool/share')) diff --git a/cinder/tests/unit/volume/drivers/solidfire/__init__.py b/cinder/tests/unit/volume/drivers/solidfire/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_invalid_data.json b/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_invalid_data.json deleted file mode 100644 index 5ede513a7..000000000 --- a/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_invalid_data.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "test_max_greater_than_burst": [ - { - "burstIOPS": 2, - "maxIOPS": 3, - "minIOPS": "100", - "scaleMin": "2", - "scaledIOPS": "True", - "size": 2 - } - ], - "test_min_greater_than_max_burst": [ - { - "burstIOPS": 2, - "maxIOPS": 2, - "minIOPS": "100", - "scaleMin": "3", - "scaledIOPS": "True", - "size": 2 - } - ] -} diff --git a/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_test_data.json b/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_test_data.json deleted file mode 100644 index e93e37a4c..000000000 --- a/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_test_data.json +++ /dev/null @@ -1,134 +0,0 @@ -{ - "test_capping_the_maximum_of_minIOPS": [ - { - "burstIOPS": "200000", - "maxIOPS": "200000", - "minIOPS": "14950", - "scaleMin": "100", - "scaledIOPS": "True", - "size": 2 - }, - { - "burstIOPS": 200000, - "maxIOPS": 200000, - "minIOPS": 15000 - } - ], - "test_capping_the_maximums": [ - { - "burstIOPS": "190000", - "maxIOPS": "190000", - "minIOPS": "100", - "scaleBurst": "10003", - "scaleMax": "10002", - "scaleMin": "2", - "scaledIOPS": "True", - "size": 2 - }, - { - "burstIOPS": 200000, - "maxIOPS": 200000, - "minIOPS": 102 - } - ], - "test_capping_the_minimum": [ - { - "burstIOPS": "300", - "maxIOPS": "200", - "minIOPS": "50", - "scaleBurst": "2", - "scaleMax": "2", - "scaleMin": "2", - "scaledIOPS": "True", - "size": 2 - }, - { - "burstIOPS": 302, - "maxIOPS": 202, - "minIOPS": 100 - } - ], - "test_regular_QoS": [ - { - "burstIOPS": "200", - "maxIOPS": "200", - "minIOPS": "100", - "size": 1 - }, - { - "burstIOPS": 200, - "maxIOPS": 200, - "minIOPS": 100 - } - ], - "test_scaled_QoS_with_size_1": [ - { - "burstIOPS": "300", - "maxIOPS": "200", - "minIOPS": "100", - "scaleBurst": "2", - "scaleMax": "2", - "scaleMin": "2", - "scaledIOPS": "True", - "size": 1 - }, - { - "burstIOPS": 300, - "maxIOPS": 200, - "minIOPS": 100 - } - ], - "test_scaled_QoS_with_size_2": [ - { - "burstIOPS": "300", - "maxIOPS": "200", - "minIOPS": "100", - "scaleBurst": "2", - "scaleMax": "2", - "scaleMin": "2", - "scaledIOPS": "True", - "size": 2 - }, - { - "burstIOPS": 302, - "maxIOPS": 202, - "minIOPS": 102 - } - ], - "test_scoped_regular_QoS": [ - { - "qos:burstIOPS": "200", - "qos:maxIOPS": "200", - "qos:minIOPS": "100", - "size": 1 - }, - { - "burstIOPS": 200, - "maxIOPS": 200, - "minIOPS": 100 - } - ], - "test_when_no_valid_QoS_values_present": [ - { - "key": "value", - "size": 2 - }, - {} - ], - "test_without_presence_of_the_scaled_flag": [ - { - "burstIOPS": "300", - "maxIOPS": "200", - "minIOPS": "100", - "scaleBurst": "2", - "scaleMax": "2", - "scaleMin": "2", - "size": 2 - }, - { - "burstIOPS": 300, - "maxIOPS": 200, - "minIOPS": 100 - } - ] -} diff --git a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py b/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py deleted file mode 100644 index 30e1c9763..000000000 --- a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py +++ /dev/null @@ -1,2039 +0,0 @@ - -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import ddt -import mock -from oslo_utils import timeutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.image import fake as fake_image -from cinder.volume import configuration as conf -from cinder.volume.drivers import solidfire -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -@ddt.ddt -class SolidFireVolumeTestCase(test.TestCase): - - def setUp(self): - self.ctxt = context.get_admin_context() - self.configuration = conf.Configuration(None) - self.configuration.sf_allow_tenant_qos = True - self.configuration.san_is_local = True - self.configuration.sf_emulate_512 = True - self.configuration.sf_account_prefix = 'cinder' - self.configuration.reserved_percentage = 25 - self.configuration.iscsi_helper = None - self.configuration.sf_template_account_name = 'openstack-vtemplate' - self.configuration.sf_allow_template_caching = False - self.configuration.sf_svip = None - self.configuration.sf_enable_volume_mapping = True - self.configuration.sf_volume_prefix = 'UUID-' - self.configuration.sf_enable_vag = False - self.configuration.replication_device = [] - - super(SolidFireVolumeTestCase, self).setUp() - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - self.mock_object(solidfire.SolidFireDriver, - '_build_endpoint_info', - self.fake_build_endpoint_info) - - self.expected_qos_results = {'minIOPS': 1000, - 'maxIOPS': 10000, - 'burstIOPS': 20000} - self.mock_stats_data =\ - {'result': - {'clusterCapacity': {'maxProvisionedSpace': 107374182400, - 'usedSpace': 1073741824, - 'compressionPercent': 100, - 'deDuplicationPercent': 100, - 'thinProvisioningPercent': 100}}} - self.mock_volume = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': 'fast', - 'created_at': timeutils.utcnow()} - self.fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'updated_at': datetime.datetime(2013, 9, - 28, 15, - 27, 36, - 325355), - 'is_public': True, - 'owner': 'testprjid'} - self.fake_image_service = fake_image.FakeImageService() - - def fake_init_cluster_pairs(*args, **kwargs): - return None - - def fake_build_endpoint_info(obj, **kwargs): - endpoint = {} - endpoint['mvip'] = '1.1.1.1' - endpoint['login'] = 'admin' - endpoint['passwd'] = 'admin' - endpoint['port'] = '443' - endpoint['url'] = '{scheme}://{mvip}'.format(mvip='%s:%s' % - (endpoint['mvip'], - endpoint['port']), - scheme='https') - - return endpoint - - def fake_issue_api_request(obj, method, params, version='1.0', - endpoint=None): - if method is 'GetClusterCapacity' and version == '1.0': - data = {'result': - {'clusterCapacity': {'maxProvisionedSpace': 107374182400, - 'usedSpace': 1073741824, - 'compressionPercent': 100, - 'deDuplicationPercent': 100, - 'thinProvisioningPercent': 100}}} - return data - - elif method is 'GetClusterInfo': - results = { - 'result': - {'clusterInfo': - {'name': 'fake-cluster', - 'mvip': '1.1.1.1', - 'svip': '1.1.1.1', - 'uniqueID': 'unqid', - 'repCount': 2, - 'uuid': '53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', - 'attributes': {}}}} - return results - - elif method is 'GetClusterVersionInfo': - return {'id': None, 'result': {'softwareVersionInfo': - {'pendingVersion': '8.2.1.4', - 'packageName': '', - 'currentVersion': '8.2.1.4', - 'nodeID': 0, 'startTime': ''}, - 'clusterVersion': '8.2.1.4', - 'clusterAPIVersion': '8.2'}} - - elif method is 'AddAccount' and version == '1.0': - return {'result': {'accountID': 25}, 'id': 1} - - elif method is 'GetAccountByName' and version == '1.0': - results = {'result': {'account': - {'accountID': 25, - 'username': params['username'], - 'status': 'active', - 'initiatorSecret': '123456789012', - 'targetSecret': '123456789012', - 'attributes': {}, - 'volumes': [6, 7, 20]}}, - "id": 1} - return results - - elif method is 'CreateVolume' and version == '1.0': - return {'result': {'volumeID': 5}, 'id': 1} - - elif method is 'CreateSnapshot' and version == '6.0': - return {'result': {'snapshotID': 5}, 'id': 1} - - elif method is 'DeleteVolume' and version == '1.0': - return {'result': {}, 'id': 1} - - elif method is 'ModifyVolume' and version == '5.0': - return {'result': {}, 'id': 1} - - elif method is 'CloneVolume': - return {'result': {'volumeID': 6}, 'id': 2} - - elif method is 'ModifyVolume': - return - - elif method is 'ListVolumesForAccount' and version == '1.0': - test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' - result = {'result': { - 'volumes': [{'volumeID': 5, - 'name': test_name, - 'accountID': 25, - 'sliceCount': 1, - 'totalSize': 1 * units.Gi, - 'enable512e': True, - 'access': "readWrite", - 'status': "active", - 'attributes': {}, - 'qos': None, - 'iqn': test_name}]}} - return result - - elif method is 'ListActiveVolumes': - test_name = "existing_volume" - result = {'result': { - 'volumes': [{'volumeID': 5, - 'name': test_name, - 'accountID': 8, - 'sliceCount': 1, - 'totalSize': int(1.75 * units.Gi), - 'enable512e': True, - 'access': "readWrite", - 'status': "active", - 'attributes': {}, - 'qos': None, - 'iqn': test_name}]}} - return result - elif method is 'DeleteSnapshot': - return {'result': {}} - elif method is 'GetClusterVersionInfo': - return {'result': {'clusterAPIVersion': '8.0'}} - elif method is 'StartVolumePairing': - return {'result': {'volumePairingKey': 'fake-pairing-key'}} - else: - # Crap, unimplemented API call in Fake - return None - - def fake_issue_api_request_fails(obj, method, - params, version='1.0', - endpoint=None): - response = {'error': {'code': 000, - 'name': 'DummyError', - 'message': 'This is a fake error response'}, - 'id': 1} - msg = ('Error (%s) encountered during ' - 'SolidFire API call.' % response['error']['name']) - raise exception.SolidFireAPIException(message=msg) - - def fake_set_qos_by_volume_type(self, type_id, ctxt): - return {'minIOPS': 500, - 'maxIOPS': 1000, - 'burstIOPS': 1000} - - def fake_volume_get(obj, key, default=None): - return {'qos': 'fast'} - - def fake_update_cluster_status(self): - return - - def fake_get_cluster_version_info(self): - return - - def fake_get_model_info(self, account, vid, endpoint=None): - return {'fake': 'fake-model'} - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_create_volume_with_qos_type(self, - _mock_create_template_account, - _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_create_template_account.return_value = 1 - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': 'fast', - 'created_at': timeutils.utcnow()} - - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - - test_type = {'name': 'sf-1', - 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', - 'deleted': False, - 'created_at': '2014-02-06 04:58:11', - 'updated_at': None, - 'extra_specs': {}, - 'deleted_at': None, - 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} - - test_qos_spec = {'id': 'asdfafdasdf', - 'specs': {'minIOPS': '1000', - 'maxIOPS': '2000', - 'burstIOPS': '3000'}} - - def _fake_get_volume_type(ctxt, type_id): - return test_type - - def _fake_get_qos_spec(ctxt, spec_id): - return test_qos_spec - - def _fake_do_volume_create(account, params): - params['provider_location'] = '1.1.1.1 iqn 0' - return params - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_account_create_availability', - return_value=fake_sfaccounts[0]), \ - mock.patch.object(sfv, - '_do_volume_create', - side_effect=_fake_do_volume_create), \ - mock.patch.object(volume_types, - 'get_volume_type', - side_effect=_fake_get_volume_type), \ - mock.patch.object(qos_specs, - 'get_qos_specs', - side_effect=_fake_get_qos_spec): - - self.assertEqual({'burstIOPS': 3000, - 'minIOPS': 1000, - 'maxIOPS': 2000}, - sfv.create_volume(testvol)['qos']) - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_create_volume(self, - _mock_create_template_account, - _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_create_template_account.return_value = 1 - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'created_at': timeutils.utcnow()} - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_account_create_availability', - return_value=fake_sfaccounts[0]): - - model_update = sfv.create_volume(testvol) - self.assertIsNotNone(model_update) - self.assertIsNone(model_update.get('provider_geometry', None)) - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_create_volume_non_512e(self, - _mock_create_template_account, - _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_create_template_account.return_value = 1 - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'created_at': timeutils.utcnow()} - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_issue_api_request', - side_effect=self.fake_issue_api_request), \ - mock.patch.object(sfv, - '_get_account_create_availability', - return_value=fake_sfaccounts[0]): - - self.configuration.sf_emulate_512 = False - model_update = sfv.create_volume(testvol) - self.configuration.sf_emulate_512 = True - self.assertEqual('4096 4096', - model_update.get('provider_geometry', None)) - - def test_create_delete_snapshot(self): - testsnap = {'project_id': 'testprjid', - 'name': 'testvol', - 'volume_size': 1, - 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', - 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'created_at': timeutils.utcnow(), - 'provider_id': '8 99 None'} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - fake_uuid = 'UUID-b831c4d1-d1f0-11e1-9b23-0800200c9a66' - with mock.patch.object( - solidfire.SolidFireDriver, - '_get_sf_snapshots', - return_value=[{'snapshotID': '5', - 'name': fake_uuid, - 'volumeID': 5}]), \ - mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=[{'accountID': 5, - 'name': 'testprjid'}]): - sfv.create_snapshot(testsnap) - sfv.delete_snapshot(testsnap) - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_create_clone(self, - _mock_create_template_account, - _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_create_template_account.return_value = 1 - _fake_get_snaps = [{'snapshotID': 5, 'name': 'testvol'}] - _fake_get_volume = ( - {'volumeID': 99, - 'name': 'UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'attributes': {}}) - - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'created_at': timeutils.utcnow()} - - testvol_b = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sf_snapshots', - return_value=_fake_get_snaps), \ - mock.patch.object(sfv, - '_get_sf_volume', - return_value=_fake_get_volume), \ - mock.patch.object(sfv, - '_issue_api_request', - side_effect=self.fake_issue_api_request), \ - mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=[]), \ - mock.patch.object(sfv, - '_get_model_info', - return_value={}): - sfv.create_cloned_volume(testvol_b, testvol) - - def test_initialize_connector_with_blocksizes(self): - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' - 'solidfire:87hg.uuid-2cc06226-cc' - '74-4cb7-bd55-14aed659a0cc.4060 0', - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '4096 4096', - 'created_at': timeutils.utcnow(), - } - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - properties = sfv.initialize_connection(testvol, connector) - self.assertEqual('4096', properties['data']['physical_block_size']) - self.assertEqual('4096', properties['data']['logical_block_size']) - self.assertTrue(properties['data']['discard']) - - def test_create_volume_fails(self): - # NOTE(JDG) This test just fakes update_cluster_status - # this is inentional for this test - self.mock_object(solidfire.SolidFireDriver, - '_update_cluster_status', - self.fake_update_cluster_status) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request_fails) - try: - sfv.create_volume(testvol) - self.fail("Should have thrown Error") - except Exception: - pass - - def test_create_sfaccount(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - account = sfv._create_sfaccount('project-id') - self.assertIsNotNone(account) - - def test_create_sfaccount_fails(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request_fails) - self.assertRaises(exception.SolidFireAPIException, - sfv._create_sfaccount, 'project-id') - - def test_get_sfaccount_by_name(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - account = sfv._get_sfaccount_by_name('some-name') - self.assertIsNotNone(account) - - def test_get_sfaccount_by_name_fails(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request_fails) - self.assertRaises(exception.SolidFireAPIException, - sfv._get_sfaccount_by_name, 'some-name') - - def test_delete_volume(self): - vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': vol_id, - 'name_id': vol_id, - 'created_at': timeutils.utcnow(), - 'provider_id': '1 5 None', - 'multiattach': True - } - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - - get_vol_result = [{'volumeID': 5, - 'name': 'test_volume', - 'accountID': 25, - 'sliceCount': 1, - 'totalSize': 1 * units.Gi, - 'enable512e': True, - 'access': "readWrite", - 'status': "active", - 'attributes': {}, - 'qos': None, - 'iqn': 'super_fake_iqn'}] - - mod_conf = self.configuration - mod_conf.sf_enable_vag = True - sfv = solidfire.SolidFireDriver(configuration=mod_conf) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_volumes_for_account', - return_value=get_vol_result), \ - mock.patch.object(sfv, - '_issue_api_request'), \ - mock.patch.object(sfv, - '_remove_volume_from_vags') as rem_vol: - - sfv.delete_volume(testvol) - rem_vol.assert_called_with(get_vol_result[0]['volumeID']) - - def test_delete_volume_no_volume_on_backend(self): - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - fake_no_volumes = [] - vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' - testvol = {'project_id': 'testprjid', - 'name': 'no-name', - 'size': 1, - 'id': vol_id, - 'name_id': vol_id, - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_volumes_for_account', - return_value=fake_no_volumes): - sfv.delete_volume(testvol) - - def test_delete_snapshot_no_snapshot_on_backend(self): - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - fake_no_volumes = [] - snap_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' - testsnap = {'project_id': 'testprjid', - 'name': 'no-name', - 'size': 1, - 'id': snap_id, - 'name_id': snap_id, - 'volume_id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_volumes_for_account', - return_value=fake_no_volumes): - sfv.delete_snapshot(testsnap) - - def test_extend_volume(self): - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - sfv.extend_volume(testvol, 2) - - def test_extend_volume_fails_no_volume(self): - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'no-name', - 'size': 1, - 'id': 'not-found'} - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.assertRaises(exception.VolumeNotFound, - sfv.extend_volume, - testvol, 2) - - def test_extend_volume_fails_account_lookup(self): - # NOTE(JDG) This test just fakes update_cluster_status - # this is intentional for this test - self.mock_object(solidfire.SolidFireDriver, - '_update_cluster_status', - self.fake_update_cluster_status) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'no-name', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request_fails) - self.assertRaises(exception.SolidFireAPIException, - sfv.extend_volume, - testvol, 2) - - def test_set_by_qos_spec_with_scoping(self): - size = 1 - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - qos_ref = qos_specs.create(self.ctxt, - 'qos-specs-1', {'qos:minIOPS': '1000', - 'qos:maxIOPS': '10000', - 'qos:burstIOPS': '20000'}) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:minIOPS": "100", - "qos:burstIOPS": "300", - "qos:maxIOPS": "200"}) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) - self.assertEqual(self.expected_qos_results, qos) - - def test_set_by_qos_spec(self): - size = 1 - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - qos_ref = qos_specs.create(self.ctxt, - 'qos-specs-1', {'minIOPS': '1000', - 'maxIOPS': '10000', - 'burstIOPS': '20000'}) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:minIOPS": "100", - "qos:burstIOPS": "300", - "qos:maxIOPS": "200"}) - qos_specs.associate_qos_with_type(self.ctxt, - qos_ref['id'], - type_ref['id']) - qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) - self.assertEqual(self.expected_qos_results, qos) - - @ddt.file_data("scaled_iops_test_data.json") - @ddt.unpack - def test_scaled_qos_spec_by_type(self, argument): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - size = argument[0].pop('size') - type_ref = volume_types.create(self.ctxt, "type1", argument[0]) - qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) - self.assertEqual(argument[1], qos) - - @ddt.file_data("scaled_iops_invalid_data.json") - @ddt.unpack - def test_set_scaled_qos_by_type_invalid(self, inputs): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - size = inputs[0].pop('size') - type_ref = volume_types.create(self.ctxt, "type1", inputs[0]) - self.assertRaises(exception.InvalidQoSSpecs, - sfv._set_qos_by_volume_type, - self.ctxt, - type_ref['id'], - size) - - def test_accept_transfer(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - expected = {'provider_auth': 'CHAP cinder-new_project 123456789012'} - self.assertEqual(expected, - sfv.accept_transfer(self.ctxt, - testvol, - 'new_user', 'new_project')) - - def test_accept_transfer_volume_not_found_raises(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', - 'created_at': timeutils.utcnow()} - self.assertRaises(exception.VolumeNotFound, - sfv.accept_transfer, - self.ctxt, - testvol, - 'new_user', - 'new_project') - - def test_retype(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - type_ref = volume_types.create(self.ctxt, - "type1", {"qos:minIOPS": "500", - "qos:burstIOPS": "2000", - "qos:maxIOPS": "1000"}) - diff = {'encryption': {}, 'qos_specs': {}, - 'extra_specs': {'qos:burstIOPS': ('10000', u'2000'), - 'qos:minIOPS': ('1000', u'500'), - 'qos:maxIOPS': ('10000', u'1000')}} - host = None - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - - self.assertTrue(sfv.retype(self.ctxt, - testvol, - type_ref, diff, host)) - - def test_retype_with_qos_spec(self): - test_type = {'name': 'sf-1', - 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', - 'deleted': False, - 'created_at': '2014-02-06 04:58:11', - 'updated_at': None, - 'extra_specs': {}, - 'deleted_at': None, - 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} - - test_qos_spec = {'id': 'asdfafdasdf', - 'specs': {'minIOPS': '1000', - 'maxIOPS': '2000', - 'burstIOPS': '3000'}} - - def _fake_get_volume_type(ctxt, type_id): - return test_type - - def _fake_get_qos_spec(ctxt, spec_id): - return test_qos_spec - - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - self.mock_object(volume_types, 'get_volume_type', - _fake_get_volume_type) - self.mock_object(qos_specs, 'get_qos_specs', - _fake_get_qos_spec) - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - diff = {'encryption': {}, 'extra_specs': {}, - 'qos_specs': {'burstIOPS': ('10000', '2000'), - 'minIOPS': ('1000', '500'), - 'maxIOPS': ('10000', '1000')}} - host = None - testvol = {'project_id': 'testprjid', - 'name': 'test_volume', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.assertTrue(sfv.retype(self.ctxt, - testvol, - test_type, diff, host)) - - def test_update_cluster_status(self): - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - sfv._update_cluster_status() - self.assertEqual(99.0, sfv.cluster_stats['free_capacity_gb']) - self.assertEqual(100.0, sfv.cluster_stats['total_capacity_gb']) - - def test_update_cluster_status_mvip_unreachable(self): - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=self.fake_issue_api_request_fails): - sfv._update_cluster_status() - self.assertEqual(0, sfv.cluster_stats['free_capacity_gb']) - self.assertEqual(0, sfv.cluster_stats['total_capacity_gb']) - - def test_manage_existing_volume(self): - external_ref = {'name': 'existing volume', 'source-id': 5} - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request', - self.fake_issue_api_request) - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - model_update = sfv.manage_existing(testvol, external_ref) - self.assertIsNotNone(model_update) - self.assertIsNone(model_update.get('provider_geometry', None)) - - def test_manage_existing_get_size(self): - external_ref = {'name': 'existing volume', 'source-id': 5} - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'created_at': timeutils.utcnow()} - mock_issue_api_request = self.mock_object(solidfire.SolidFireDriver, - '_issue_api_request') - mock_issue_api_request.side_effect = self.fake_issue_api_request - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - size = sfv.manage_existing_get_size(testvol, external_ref) - self.assertEqual(2, size) - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - def test_create_volume_for_migration(self, - _mock_create_template_account, - _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_create_template_account.return_value = 1 - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77', - 'volume_type_id': None, - 'created_at': timeutils.utcnow(), - 'migration_status': 'target:' - 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} - fake_sfaccounts = [{'accountID': 5, - 'name': 'testprjid', - 'targetSecret': 'shhhh', - 'username': 'john-wayne'}] - - def _fake_do_v_create(project_id, params): - return project_id, params - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_sfaccounts_for_tenant', - return_value=fake_sfaccounts), \ - mock.patch.object(sfv, - '_get_account_create_availability', - return_value=fake_sfaccounts[0]), \ - mock.patch.object(sfv, - '_do_volume_create', - side_effect=_fake_do_v_create): - - proj_id, sf_vol_object = sfv.create_volume(testvol) - self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66', - sf_vol_object['attributes']['uuid']) - self.assertEqual('b830b3c0-d1f0-11e1-9b23-1900200c9a77', - sf_vol_object['attributes']['migration_uuid']) - self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', - sf_vol_object['name']) - - @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') - @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') - @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') - def test_verify_image_volume_out_of_date(self, - _mock_create_image_volume, - _mock_get_sf_volume, - _mock_get_sfaccount, - _mock_issue_api_request, - _mock_update_cluster_status): - fake_sf_vref = { - 'status': 'active', 'volumeID': 1, - 'attributes': { - 'image_info': - {'image_updated_at': '2014-12-17T00:16:23+00:00', - 'image_id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'image_name': 'fake-image', - 'image_created_at': '2014-12-17T00:16:23+00:00'}}} - - _mock_update_cluster_status.return_value = None - _mock_issue_api_request.side_effect = ( - self.fake_issue_api_request) - _mock_get_sfaccount.return_value = {'username': 'openstack-vtemplate', - 'accountID': 7777} - _mock_get_sf_volume.return_value = fake_sf_vref - _mock_create_image_volume.return_value = fake_sf_vref - - image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'updated_at': datetime.datetime(2013, 9, 28, - 15, 27, 36, - 325355)} - image_service = 'null' - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - sfv._verify_image_volume(self.ctxt, image_meta, image_service) - self.assertTrue(_mock_create_image_volume.called) - - @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') - @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') - @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') - def test_verify_image_volume_ok(self, - _mock_create_image_volume, - _mock_get_sf_volume, - _mock_get_sfaccount, - _mock_issue_api_request, - _mock_update_cluster_status): - - _mock_issue_api_request.side_effect = self.fake_issue_api_request - _mock_update_cluster_status.return_value = None - _mock_get_sfaccount.return_value = {'username': 'openstack-vtemplate', - 'accountID': 7777} - _mock_get_sf_volume.return_value =\ - {'status': 'active', 'volumeID': 1, - 'attributes': { - 'image_info': - {'image_updated_at': '2013-09-28T15:27:36.325355', - 'image_id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'image_name': 'fake-image', - 'image_created_at': '2014-12-17T00:16:23+00:00'}}} - _mock_create_image_volume.return_value = None - - image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501', - 'updated_at': datetime.datetime(2013, 9, 28, - 15, 27, 36, - 325355)} - image_service = 'null' - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - sfv._verify_image_volume(self.ctxt, image_meta, image_service) - self.assertFalse(_mock_create_image_volume.called) - - @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') - def test_clone_image_not_configured(self, _mock_issue_api_request): - _mock_issue_api_request.side_effect = self.fake_issue_api_request - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - self.assertEqual((None, False), - sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - self.fake_image_meta, - 'fake')) - - @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account') - @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume') - def test_clone_image_authorization(self, - _mock_create_image_volume, - _mock_create_template_account): - fake_sf_vref = { - 'status': 'active', 'volumeID': 1, - 'attributes': { - 'image_info': - {'image_updated_at': '2014-12-17T00:16:23+00:00', - 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', - 'image_name': 'fake-image', - 'image_created_at': '2014-12-17T00:16:23+00:00'}}} - _mock_create_image_volume.return_value = fake_sf_vref - _mock_create_template_account.return_value = 1 - - self.configuration.sf_allow_template_caching = True - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - # Make sure if it's NOT public and we're NOT the owner it - # doesn't try and cache - timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) - _fake_image_meta = { - 'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', - 'name': 'fakeimage123456', - 'created_at': timestamp, - 'updated_at': timestamp, - 'deleted_at': None, - 'deleted': False, - 'status': 'active', - 'visibility': 'private', - 'protected': False, - 'container_format': 'raw', - 'disk_format': 'raw', - 'owner': 'wrong-owner', - 'properties': {'kernel_id': 'nokernel', - 'ramdisk_id': 'nokernel', - 'architecture': 'x86_64'}} - - with mock.patch.object(sfv, '_do_clone_volume', - return_value=('fe', 'fi', 'fo')): - self.assertEqual((None, False), - sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - self.fake_image_service)) - - # And is_public False, but the correct owner does work - _fake_image_meta['owner'] = 'testprjid' - self.assertEqual( - ('fo', True), - sfv.clone_image( - self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - self.fake_image_service)) - - # And is_public True, even if not the correct owner - _fake_image_meta['is_public'] = True - _fake_image_meta['owner'] = 'wrong-owner' - self.assertEqual( - ('fo', True), - sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - self.fake_image_service)) - # And using the new V2 visibility tag - _fake_image_meta['visibility'] = 'public' - _fake_image_meta['owner'] = 'wrong-owner' - self.assertEqual( - ('fo', True), - sfv.clone_image(self.ctxt, - self.mock_volume, - 'fake', - _fake_image_meta, - self.fake_image_service)) - - def test_create_template_no_account(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - def _fake_issue_api_req(method, params, version=0): - if 'GetAccountByName' in method: - raise exception.SolidFireAPIException - return {'result': {'accountID': 1}} - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=_fake_issue_api_req): - self.assertEqual(1, - sfv._create_template_account('foo')) - - def test_configured_svip(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - def _fake_get_volumes(account_id, endpoint=None): - return [{'volumeID': 1, - 'iqn': ''}] - - def _fake_get_cluster_info(): - return {'clusterInfo': {'svip': '10.10.10.10', - 'mvip': '1.1.1.1'}} - - with mock.patch.object(sfv, - '_get_volumes_by_sfaccount', - side_effect=_fake_get_volumes),\ - mock.patch.object(sfv, - '_issue_api_request', - side_effect=self.fake_issue_api_request): - - sfaccount = {'targetSecret': 'yakitiyak', - 'accountID': 5, - 'username': 'bobthebuilder'} - v = sfv._get_model_info(sfaccount, 1) - self.assertEqual('1.1.1.1:3260 0', v['provider_location']) - - configured_svip = '9.9.9.9:6500' - sfv.active_cluster_info['svip'] = configured_svip - v = sfv._get_model_info(sfaccount, 1) - self.assertEqual('%s 0' % configured_svip, v['provider_location']) - - def test_init_volume_mappings(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - - vid_1 = 'c9125d6d-22ff-4cc3-974d-d4e350df9c91' - vid_2 = '79883868-6933-47a1-a362-edfbf8d55a18' - sid_1 = 'e3caa4fa-485e-45ca-970e-1d3e693a2520' - project_1 = 'e6fb073c-11f0-4f4c-897c-90e7c7c4bcf8' - project_2 = '4ff32607-305c-4a6b-a51a-0dd33124eecf' - - vrefs = [{'id': vid_1, - 'project_id': project_1, - 'provider_id': None}, - {'id': vid_2, - 'project_id': project_2, - 'provider_id': 22}] - snaprefs = [{'id': sid_1, - 'project_id': project_1, - 'provider_id': None, - 'volume_id': vid_1}] - sf_vols = [{'volumeID': 99, - 'name': 'UUID-' + vid_1, - 'accountID': 100}, - {'volumeID': 22, - 'name': 'UUID-' + vid_2, - 'accountID': 200}] - sf_snaps = [{'snapshotID': 1, - 'name': 'UUID-' + sid_1, - 'volumeID': 99}] - - def _fake_issue_api_req(method, params, version=0): - if 'ListActiveVolumes' in method: - return {'result': {'volumes': sf_vols}} - if 'ListSnapshots'in method: - return {'result': {'snapshots': sf_snaps}} - - with mock.patch.object( - sfv, '_issue_api_request', side_effect=_fake_issue_api_req): - volume_updates, snapshot_updates = sfv.update_provider_info( - vrefs, snaprefs) - self.assertEqual('99 100 53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', - volume_updates[0]['provider_id']) - self.assertEqual(1, len(volume_updates)) - - self.assertEqual('1 99 53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', - snapshot_updates[0]['provider_id']) - self.assertEqual(1, len(snapshot_updates)) - - def test_get_sf_volume_missing_attributes(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - test_name = "existing_volume" - fake_response = {'result': { - 'volumes': [{'volumeID': 5, - 'name': test_name, - 'accountID': 8, - 'sliceCount': 1, - 'totalSize': 1 * units.Gi, - 'enable512e': True, - 'access': "readWrite", - 'status': "active", - 'qos': None, - 'iqn': test_name}]}} - - def _fake_issue_api_req(method, params, version=0): - return fake_response - - with mock.patch.object( - sfv, '_issue_api_request', side_effect=_fake_issue_api_req): - self.assertEqual(5, sfv._get_sf_volume(test_name, 8)['volumeID']) - - def test_sf_init_conn_with_vag(self): - # Verify with the _enable_vag conf set that we correctly create a VAG. - mod_conf = self.configuration - mod_conf.sf_enable_vag = True - sfv = solidfire.SolidFireDriver(configuration=mod_conf) - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' - 'solidfire:87hg.uuid-2cc06226-cc' - '74-4cb7-bd55-14aed659a0cc.4060 0', - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '4096 4096', - 'created_at': timeutils.utcnow(), - 'provider_id': "1 1 1" - } - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - provider_id = testvol['provider_id'] - vol_id = int(provider_id.split()[0]) - vag_id = 1 - - with mock.patch.object(sfv, - '_safe_create_vag', - return_value=vag_id) as create_vag, \ - mock.patch.object(sfv, - '_add_volume_to_vag') as add_vol: - sfv._sf_initialize_connection(testvol, connector) - create_vag.assert_called_with(connector['initiator'], - vol_id) - add_vol.assert_called_with(vol_id, - connector['initiator'], - vag_id) - - def test_sf_term_conn_with_vag_rem_vag(self): - # Verify we correctly remove an empty VAG on detach. - mod_conf = self.configuration - mod_conf.sf_enable_vag = True - sfv = solidfire.SolidFireDriver(configuration=mod_conf) - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' - 'solidfire:87hg.uuid-2cc06226-cc' - '74-4cb7-bd55-14aed659a0cc.4060 0', - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '4096 4096', - 'created_at': timeutils.utcnow(), - 'provider_id': "1 1 1", - 'multiattach': False - } - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - vag_id = 1 - vags = [{'attributes': {}, - 'deletedVolumes': [], - 'initiators': [connector['initiator']], - 'name': 'fakeiqn', - 'volumeAccessGroupID': vag_id, - 'volumes': [1], - 'virtualNetworkIDs': []}] - - with mock.patch.object(sfv, - '_get_vags_by_name', - return_value=vags), \ - mock.patch.object(sfv, - '_remove_vag') as rem_vag: - sfv._sf_terminate_connection(testvol, connector, False) - rem_vag.assert_called_with(vag_id) - - def test_sf_term_conn_with_vag_rem_vol(self): - # Verify we correctly remove a the volume from a non-empty VAG. - mod_conf = self.configuration - mod_conf.sf_enable_vag = True - sfv = solidfire.SolidFireDriver(configuration=mod_conf) - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None, - 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' - 'solidfire:87hg.uuid-2cc06226-cc' - '74-4cb7-bd55-14aed659a0cc.4060 0', - 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' - 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '4096 4096', - 'created_at': timeutils.utcnow(), - 'provider_id': "1 1 1", - 'multiattach': False - } - provider_id = testvol['provider_id'] - vol_id = int(provider_id.split()[0]) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - vag_id = 1 - vags = [{'attributes': {}, - 'deletedVolumes': [], - 'initiators': [connector['initiator']], - 'name': 'fakeiqn', - 'volumeAccessGroupID': vag_id, - 'volumes': [1, 2], - 'virtualNetworkIDs': []}] - - with mock.patch.object(sfv, - '_get_vags_by_name', - return_value=vags), \ - mock.patch.object(sfv, - '_remove_volume_from_vag') as rem_vag: - sfv._sf_terminate_connection(testvol, connector, False) - rem_vag.assert_called_with(vol_id, vag_id) - - def test_safe_create_vag_simple(self): - # Test the sunny day call straight into _create_vag. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'fake_iqn' - vol_id = 1 - - with mock.patch.object(sfv, - '_get_vags_by_name', - return_value=[]), \ - mock.patch.object(sfv, - '_create_vag') as mock_create_vag: - sfv._safe_create_vag(iqn, vol_id) - mock_create_vag.assert_called_with(iqn, vol_id) - - def test_safe_create_vag_matching_vag(self): - # Vag exists, resuse. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vags = [{'attributes': {}, - 'deletedVolumes': [], - 'initiators': [iqn], - 'name': iqn, - 'volumeAccessGroupID': 1, - 'volumes': [1, 2], - 'virtualNetworkIDs': []}] - - with mock.patch.object(sfv, - '_get_vags_by_name', - return_value=vags), \ - mock.patch.object(sfv, - '_create_vag') as create_vag, \ - mock.patch.object(sfv, - '_add_initiator_to_vag') as add_iqn: - vag_id = sfv._safe_create_vag(iqn, None) - self.assertEqual(vag_id, vags[0]['volumeAccessGroupID']) - create_vag.assert_not_called() - add_iqn.assert_not_called() - - def test_safe_create_vag_reuse_vag(self): - # Reuse a matching vag. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vags = [{'attributes': {}, - 'deletedVolumes': [], - 'initiators': [], - 'name': iqn, - 'volumeAccessGroupID': 1, - 'volumes': [1, 2], - 'virtualNetworkIDs': []}] - vag_id = vags[0]['volumeAccessGroupID'] - - with mock.patch.object(sfv, - '_get_vags_by_name', - return_value=vags), \ - mock.patch.object(sfv, - '_add_initiator_to_vag', - return_value=vag_id) as add_init: - res_vag_id = sfv._safe_create_vag(iqn, None) - self.assertEqual(res_vag_id, vag_id) - add_init.assert_called_with(iqn, vag_id) - - def test_create_vag_iqn_fail(self): - # Attempt to create a VAG with an already in-use initiator. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xExceededLimit: {}'.format(params['initiators'][0]) - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request), \ - mock.patch.object(sfv, - '_safe_create_vag', - return_value=vag_id) as create_vag, \ - mock.patch.object(sfv, - '_purge_vags') as purge_vags: - res_vag_id = sfv._create_vag(iqn, vol_id) - self.assertEqual(res_vag_id, vag_id) - create_vag.assert_called_with(iqn, vol_id) - purge_vags.assert_not_called() - - def test_create_vag_limit_fail(self): - # Attempt to create a VAG with VAG limit reached. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xExceededLimit' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request), \ - mock.patch.object(sfv, - '_safe_create_vag', - return_value=vag_id) as create_vag, \ - mock.patch.object(sfv, - '_purge_vags') as purge_vags: - res_vag_id = sfv._create_vag(iqn, vol_id) - self.assertEqual(res_vag_id, vag_id) - create_vag.assert_called_with(iqn, vol_id) - purge_vags.assert_called_with() - - def test_add_initiator_duplicate(self): - # Thrown exception should yield vag_id. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - - def throw_request(method, params, version): - msg = 'xAlreadyInVolumeAccessGroup' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request): - res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) - self.assertEqual(vag_id, res_vag_id) - - def test_add_initiator_missing_vag(self): - # Thrown exception should result in create_vag call. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - - def throw_request(method, params, version): - msg = 'xVolumeAccessGroupIDDoesNotExist' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request), \ - mock.patch.object(sfv, - '_safe_create_vag', - return_value=vag_id) as mock_create_vag: - res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) - self.assertEqual(vag_id, res_vag_id) - mock_create_vag.assert_called_with(iqn) - - def test_add_volume_to_vag_duplicate(self): - # Thrown exception should yield vag_id - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xAlreadyInVolumeAccessGroup' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request): - res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) - self.assertEqual(res_vag_id, vag_id) - - def test_add_volume_to_vag_missing_vag(self): - # Thrown exception should yield vag_id - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - iqn = 'TESTIQN' - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xVolumeAccessGroupIDDoesNotExist' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request), \ - mock.patch.object(sfv, - '_safe_create_vag', - return_value=vag_id) as mock_create_vag: - res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) - self.assertEqual(res_vag_id, vag_id) - mock_create_vag.assert_called_with(iqn, vol_id) - - def test_remove_volume_from_vag_missing_volume(self): - # Volume not in VAG, throws. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xNotInVolumeAccessGroup' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request): - sfv._remove_volume_from_vag(vol_id, vag_id) - - def test_remove_volume_from_vag_missing_vag(self): - # Volume not in VAG, throws. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xVolumeAccessGroupIDDoesNotExist' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request): - sfv._remove_volume_from_vag(vol_id, vag_id) - - def test_remove_volume_from_vag_unknown_exception(self): - # Volume not in VAG, throws. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - vag_id = 1 - vol_id = 42 - - def throw_request(method, params, version): - msg = 'xUnknownException' - raise exception.SolidFireAPIException(message=msg) - - with mock.patch.object(sfv, - '_issue_api_request', - side_effect=throw_request): - self.assertRaises(exception.SolidFireAPIException, - sfv._remove_volume_from_vag, - vol_id, - vag_id) - - def test_remove_volume_from_vags(self): - # Remove volume from several VAGs. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - vol_id = 42 - vags = [{'volumeAccessGroupID': 1, - 'volumes': [vol_id]}, - {'volumeAccessGroupID': 2, - 'volumes': [vol_id, 43]}] - - with mock.patch.object(sfv, - '_base_get_vags', - return_value=vags), \ - mock.patch.object(sfv, - '_remove_volume_from_vag') as rem_vol: - sfv._remove_volume_from_vags(vol_id) - self.assertEqual(len(vags), rem_vol.call_count) - - def test_purge_vags(self): - # Remove subset of VAGs. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - vags = [{'initiators': [], - 'volumeAccessGroupID': 1, - 'deletedVolumes': [], - 'volumes': [], - 'attributes': {'openstack': True}}, - {'initiators': [], - 'volumeAccessGroupID': 2, - 'deletedVolumes': [], - 'volumes': [], - 'attributes': {'openstack': False}}, - {'initiators': [], - 'volumeAccessGroupID': 3, - 'deletedVolumes': [1], - 'volumes': [], - 'attributes': {'openstack': True}}, - {'initiators': [], - 'volumeAccessGroupID': 4, - 'deletedVolumes': [], - 'volumes': [1], - 'attributes': {'openstack': True}}, - {'initiators': ['fakeiqn'], - 'volumeAccessGroupID': 5, - 'deletedVolumes': [], - 'volumes': [], - 'attributes': {'openstack': True}}] - with mock.patch.object(sfv, - '_base_get_vags', - return_value=vags), \ - mock.patch.object(sfv, - '_remove_vag') as rem_vag: - sfv._purge_vags() - # Of the vags provided there is only one that is valid for purge - # based on the limits of no initiators, volumes, deleted volumes, - # and features the openstack attribute. - self.assertEqual(1, rem_vag.call_count) - rem_vag.assert_called_with(1) - - def test_sf_create_group_snapshot(self): - # Sunny day group snapshot creation. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - name = 'great_gsnap_name' - sf_volumes = [{'volumeID': 1}, {'volumeID': 42}] - expected_params = {'name': name, - 'volumes': [1, 42]} - fake_result = {'result': 'contrived_test'} - with mock.patch.object(sfv, - '_issue_api_request', - return_value=fake_result) as fake_api: - res = sfv._sf_create_group_snapshot(name, sf_volumes) - self.assertEqual('contrived_test', res) - fake_api.assert_called_with('CreateGroupSnapshot', - expected_params, - version='7.0') - - def test_group_snapshot_creator_sunny(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - gsnap_name = 'great_gsnap_name' - prefix = sfv.configuration.sf_volume_prefix - vol_uuids = ['one', 'two', 'three'] - active_vols = [{'name': prefix + 'one'}, - {'name': prefix + 'two'}, - {'name': prefix + 'three'}] - with mock.patch.object(sfv, - '_get_all_active_volumes', - return_value=active_vols),\ - mock.patch.object(sfv, - '_sf_create_group_snapshot', - return_value=None) as create: - sfv._group_snapshot_creator(gsnap_name, vol_uuids) - create.assert_called_with(gsnap_name, - active_vols) - - def test_group_snapshot_creator_rainy(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - gsnap_name = 'great_gsnap_name' - prefix = sfv.configuration.sf_volume_prefix - vol_uuids = ['one', 'two', 'three'] - active_vols = [{'name': prefix + 'one'}, - {'name': prefix + 'two'}] - with mock.patch.object(sfv, - '_get_all_active_volumes', - return_value=active_vols): - self.assertRaises(exception.SolidFireDriverException, - sfv._group_snapshot_creator, - gsnap_name, - vol_uuids) - - def test_create_temp_group_snapshot(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - cg = {'id': 'great_gsnap_name'} - prefix = sfv.configuration.sf_volume_prefix - tmp_name = prefix + cg['id'] + '-tmp' - vols = [{'id': 'one'}, - {'id': 'two'}, - {'id': 'three'}] - with mock.patch.object(sfv, - '_group_snapshot_creator', - return_value=None) as create: - sfv._create_temp_group_snapshot(cg, vols) - create.assert_called_with(tmp_name, ['one', 'two', 'three']) - - def test_list_group_snapshots(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - res = {'result': {'groupSnapshots': 'a_thing'}} - with mock.patch.object(sfv, - '_issue_api_request', - return_value=res): - result = sfv._list_group_snapshots() - self.assertEqual('a_thing', result) - - def test_get_group_snapshot_by_name(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - fake_snaps = [{'name': 'a_fantastic_name'}] - with mock.patch.object(sfv, - '_list_group_snapshots', - return_value=fake_snaps): - result = sfv._get_group_snapshot_by_name('a_fantastic_name') - self.assertEqual(fake_snaps[0], result) - - def test_delete_group_snapshot(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - gsnap_id = 1 - with mock.patch.object(sfv, - '_issue_api_request') as api_req: - sfv._delete_group_snapshot(gsnap_id) - api_req.assert_called_with('DeleteGroupSnapshot', - {'groupSnapshotID': gsnap_id}, - version='7.0') - - def test_delete_cgsnapshot_by_name(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - fake_gsnap = {'groupSnapshotID': 42} - with mock.patch.object(sfv, - '_get_group_snapshot_by_name', - return_value=fake_gsnap),\ - mock.patch.object(sfv, - '_delete_group_snapshot') as del_stuff: - sfv._delete_cgsnapshot_by_name('does not matter') - del_stuff.assert_called_with(fake_gsnap['groupSnapshotID']) - - def test_delete_cgsnapshot_by_name_rainy(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_group_snapshot_by_name', - return_value=None): - self.assertRaises(exception.SolidFireDriverException, - sfv._delete_cgsnapshot_by_name, - 'does not matter') - - def test_find_linked_snapshot(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_snap = {'members': [{'volumeID': 1}, {'volumeID': 2}]} - source_vol = {'volumeID': 1} - with mock.patch.object(sfv, - '_get_sf_volume', - return_value=source_vol) as get_vol: - res = sfv._find_linked_snapshot('fake_uuid', group_snap) - self.assertEqual(source_vol, res) - get_vol.assert_called_with('fake_uuid') - - def test_create_consisgroup_from_src_cgsnapshot(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - ctxt = None - group = {} - volumes = [{'id': 'one'}, {'id': 'two'}, {'id': 'three'}] - cgsnapshot = {'id': 'great_uuid'} - snapshots = [{'id': 'snap_id_1', 'volume_id': 'one'}, - {'id': 'snap_id_2', 'volume_id': 'two'}, - {'id': 'snap_id_3', 'volume_id': 'three'}] - source_cg = None - source_vols = None - group_snap = {} - name = sfv.configuration.sf_volume_prefix + cgsnapshot['id'] - kek = (None, None, {}) - with mock.patch.object(sfv, - '_get_group_snapshot_by_name', - return_value=group_snap) as get_snap,\ - mock.patch.object(sfv, - '_find_linked_snapshot'),\ - mock.patch.object(sfv, - '_do_clone_volume', - return_value=kek): - model, vol_models = sfv._create_consistencygroup_from_src( - ctxt, group, volumes, - cgsnapshot, snapshots, - source_cg, source_vols) - get_snap.assert_called_with(name) - self.assertEqual( - {'status': fields.GroupStatus.AVAILABLE}, model) - - def test_create_consisgroup_from_src_source_cg(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - ctxt = None - group = {} - volumes = [{'id': 'one', 'source_volid': 'source_one'}, - {'id': 'two', 'source_volid': 'source_two'}, - {'id': 'three', 'source_volid': 'source_three'}] - cgsnapshot = {'id': 'great_uuid'} - snapshots = None - source_cg = {'id': 'fantastic_cg'} - source_vols = [1, 2, 3] - source_snap = None - group_snap = {} - kek = (None, None, {}) - with mock.patch.object(sfv, - '_create_temp_group_snapshot', - return_value=source_cg['id']),\ - mock.patch.object(sfv, - '_get_group_snapshot_by_name', - return_value=group_snap) as get_snap,\ - mock.patch.object(sfv, - '_find_linked_snapshot', - return_value=source_snap),\ - mock.patch.object(sfv, - '_do_clone_volume', - return_value=kek),\ - mock.patch.object(sfv, - '_delete_cgsnapshot_by_name'): - model, vol_models = sfv._create_consistencygroup_from_src( - ctxt, group, volumes, - cgsnapshot, snapshots, - source_cg, - source_vols) - get_snap.assert_called_with(source_cg['id']) - self.assertEqual( - {'status': fields.GroupStatus.AVAILABLE}, model) - - def test_create_cgsnapshot(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - ctxt = None - cgsnapshot = {'id': 'acceptable_cgsnap_id'} - snapshots = [{'volume_id': 'one'}, - {'volume_id': 'two'}] - pfx = sfv.configuration.sf_volume_prefix - active_vols = [{'name': pfx + 'one'}, - {'name': pfx + 'two'}] - with mock.patch.object(sfv, - '_get_all_active_volumes', - return_value=active_vols),\ - mock.patch.object(sfv, - '_sf_create_group_snapshot') as create_gsnap: - sfv._create_cgsnapshot(ctxt, cgsnapshot, snapshots) - create_gsnap.assert_called_with(pfx + cgsnapshot['id'], - active_vols) - - def test_create_cgsnapshot_rainy(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - ctxt = None - cgsnapshot = {'id': 'acceptable_cgsnap_id'} - snapshots = [{'volume_id': 'one'}, - {'volume_id': 'two'}] - pfx = sfv.configuration.sf_volume_prefix - active_vols = [{'name': pfx + 'one'}] - with mock.patch.object(sfv, - '_get_all_active_volumes', - return_value=active_vols),\ - mock.patch.object(sfv, - '_sf_create_group_snapshot'): - self.assertRaises(exception.SolidFireDriverException, - sfv._create_cgsnapshot, - ctxt, - cgsnapshot, - snapshots) - - def test_create_vol_from_cgsnap(self): - # cgsnaps on the backend yield numerous identically named snapshots. - # create_volume_from_snapshot now searches for the correct snapshot. - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - source = {'group_snapshot_id': 'typical_cgsnap_id', - 'volume_id': 'typical_vol_id', - 'id': 'no_id_4_u'} - name = (self.configuration.sf_volume_prefix - + source.get('group_snapshot_id')) - with mock.patch.object(sfv, - '_get_group_snapshot_by_name', - return_value={}) as get,\ - mock.patch.object(sfv, - '_create_clone_from_sf_snapshot', - return_value='model'): - result = sfv.create_volume_from_snapshot({}, source) - get.assert_called_once_with(name) - self.assertEqual('model', result) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_cg(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = True - group = mock.MagicMock() - result = sfv.create_group(self.ctxt, group) - self.assertEqual(result, - {'status': fields.GroupStatus.AVAILABLE}) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_rainy(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = False - group = mock.MagicMock() - self.assertRaises(NotImplementedError, - sfv.create_group, - self.ctxt, group) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_rainy(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = False - group = mock.MagicMock() - volumes = [mock.MagicMock()] - self.assertRaises(NotImplementedError, - sfv.create_group_from_src, - self.ctxt, group, volumes) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_cg(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = True - group = mock.MagicMock() - volumes = [mock.MagicMock()] - ret = 'things' - with mock.patch.object(sfv, - '_create_consistencygroup_from_src', - return_value=ret): - result = sfv.create_group_from_src(self.ctxt, - group, - volumes) - self.assertEqual(ret, result) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot_rainy(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = False - group_snapshot = mock.MagicMock() - snapshots = [mock.MagicMock()] - self.assertRaises(NotImplementedError, - sfv.create_group_snapshot, - self.ctxt, - group_snapshot, - snapshots) - group_cg_test.assert_called_once_with(group_snapshot) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = True - group_snapshot = mock.MagicMock() - snapshots = [mock.MagicMock()] - ret = 'things' - with mock.patch.object(sfv, - '_create_cgsnapshot', - return_value=ret): - result = sfv.create_group_snapshot(self.ctxt, - group_snapshot, - snapshots) - self.assertEqual(ret, result) - group_cg_test.assert_called_once_with(group_snapshot) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_rainy(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = False - group = mock.MagicMock() - volumes = [mock.MagicMock()] - self.assertRaises(NotImplementedError, - sfv.delete_group, - self.ctxt, - group, - volumes) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = True - group = mock.MagicMock() - volumes = [mock.MagicMock()] - ret = 'things' - with mock.patch.object(sfv, - '_delete_consistencygroup', - return_value=ret): - result = sfv.delete_group(self.ctxt, - group, - volumes) - self.assertEqual(ret, result) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_rainy(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = False - group = mock.MagicMock() - self.assertRaises(NotImplementedError, - sfv.update_group, - self.ctxt, - group) - group_cg_test.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group(self, group_cg_test): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - group_cg_test.return_value = True - group = mock.MagicMock() - ret = 'things' - with mock.patch.object(sfv, - '_update_consistencygroup', - return_value=ret): - result = sfv.update_group(self.ctxt, - group) - self.assertEqual(ret, result) - group_cg_test.assert_called_once_with(group) - - def test_getattr_failure(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - try: - sfv.foo() - self.fail("Should have thrown Error") - except Exception: - pass - - def test_set_rep_by_volume_type(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - sfv.cluster_pairs = [{'cluster_id': 'fake-id', 'cluster_mvip': - 'fake-mvip'}] - ctxt = None - type_id = '290edb2a-f5ea-11e5-9ce9-5e5517507c66' - fake_type = {'extra_specs': {'replication': 'enabled'}} - with mock.patch.object(volume_types, - 'get_volume_type', - return_value=fake_type): - self.assertEqual('fake-id', sfv._set_rep_by_volume_type( - ctxt, - type_id)['targets']['cluster_id']) - - def test_replicate_volume(self): - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - sfv.cluster_pairs = ( - [{'uniqueID': 'lu9f', 'endpoint': {'passwd': 'admin', 'port': - 443, 'url': - 'https://192.168.139.102:443', - 'svip': '10.10.8.134', 'mvip': - '192.168.139.102', 'login': - 'admin'}, 'name': - 'AutoTest2-6AjG-FOR-TEST-ONLY', 'clusterPairID': 33, 'uuid': - '9c499d4b-8fff-48b4-b875-27601d5d9889', 'svip': '10.10.23.2', - 'mvipNodeID': 1, 'repCount': 1, 'encryptionAtRestState': - 'disabled', 'attributes': {}, 'mvip': '192.168.139.102', - 'ensemble': ['10.10.5.130'], 'svipNodeID': 1}]) - - with mock.patch.object(sfv, - '_issue_api_request', - self.fake_issue_api_request),\ - mock.patch.object(sfv, - '_get_sfaccount_by_name', - return_value={'accountID': 1}),\ - mock.patch.object(sfv, - '_do_volume_create', - return_value={'provider_id': '1 2 xxxx'}): - self.assertEqual({'provider_id': '1 2 xxxx'}, - sfv._replicate_volume( - {'project_id': 1, 'volumeID': 1}, - {'attributes': {}}, - {'initiatorSecret': 'shhh', - 'targetSecret': 'dont-tell'}, - {})) - - def test_pythons_try_except(self): - def _fake_retrieve_rep(vol): - raise exception.SolidFireAPIException - - sfv = solidfire.SolidFireDriver(configuration=self.configuration) - with mock.patch.object(sfv, - '_get_create_account', - return_value={'accountID': 5}),\ - mock.patch.object(sfv, - '_retrieve_qos_setting', - return_value=None),\ - mock.patch.object(sfv, - '_do_volume_create', - return_value={'provider_id': '1 2 xxxx'}),\ - mock.patch.object(sfv, - '_retrieve_replication_settings', - side_effect=_fake_retrieve_rep): - self.assertRaises(exception.SolidFireAPIException, - sfv.create_volume, - self.mock_volume) diff --git a/cinder/tests/unit/volume/drivers/synology/__init__.py b/cinder/tests/unit/volume/drivers/synology/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/synology/test_synology_common.py b/cinder/tests/unit/volume/drivers/synology/test_synology_common.py deleted file mode 100644 index e81703b89..000000000 --- a/cinder/tests/unit/volume/drivers/synology/test_synology_common.py +++ /dev/null @@ -1,1665 +0,0 @@ -# Copyright (c) 2016 Synology Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the Synology iSCSI volume driver.""" - -import copy -import math - -import mock -from oslo_utils import units -import requests -from six.moves import http_client -from six import string_types - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers.synology import synology_common as common - -VOLUME_ID = fake.VOLUME_ID -TARGET_NAME_PREFIX = 'Cinder-Target-' -IP = '10.0.0.1' -IQN = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + VOLUME_ID -TRG_ID = 1 -CHAP_AUTH_USERNAME = 'username' -CHAP_AUTH_PASSWORD = 'password' -VOLUME = { - '_name_id': '', - 'name': fake.VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'fake_volume', - 'size': 10, - 'provider_location': '%s:3260,%d %s 1' % (IP, TRG_ID, IQN), - 'provider_auth': 'CHAP %(user)s %(pass)s' % { - 'user': CHAP_AUTH_USERNAME, - 'pass': CHAP_AUTH_PASSWORD}, -} -NEW_VOLUME_ID = fake.VOLUME2_ID -IQN2 = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + NEW_VOLUME_ID -NEW_TRG_ID = 2 -NEW_VOLUME = { - 'name': fake.VOLUME2_NAME, - 'id': NEW_VOLUME_ID, - 'display_name': 'new_fake_volume', - 'size': 10, - 'provider_location': '%s:3260,%d %s 1' % (IP, NEW_TRG_ID, IQN2), -} -SNAPSHOT_ID = fake.SNAPSHOT_ID -DS_SNAPSHOT_UUID = 'ca86a56a-40d8-4210-974c-ef15dbf01cba' -SNAPSHOT_METADATA = { - 'snap-meta1': 'value1', - 'snap-meta2': 'value2', - 'snap-meta3': 'value3', -} -SNAPSHOT = { - 'name': fake.SNAPSHOT_NAME, - 'id': SNAPSHOT_ID, - 'volume_id': VOLUME_ID, - 'volume_name': VOLUME['name'], - 'volume_size': 10, - 'display_name': 'fake_snapshot', - 'volume': VOLUME, - 'metadata': SNAPSHOT_METADATA, -} -SNAPSHOT_INFO = { - 'is_action_locked': False, - 'snapshot_id': 1, - 'status': 'Healthy', - 'uuid': DS_SNAPSHOT_UUID, -} -INITIATOR_IQN = 'iqn.1993-08.org.debian:01:604af6a341' -CONNECTOR = { - 'initiator': INITIATOR_IQN, -} -CONTEXT = { -} -LOCAL_PATH = '/dev/isda' -IMAGE_SERVICE = 'image_service' -IMAGE_ID = 1 -IMAGE_META = { - 'id': IMAGE_ID -} -POOL_NAME = 'volume1' -NODE_UUID = '72003c93-2db2-4f00-a169-67c5eae86bb1' -NODE_UUID2 = '8e1e8b82-1ef9-4157-a4bf-e069355386c2' -HOST = { - 'capabilities': { - 'pool_name': 'volume2', - 'backend_info': 'Synology:iscsi:' + NODE_UUID, - }, -} -POOL_INFO = { - 'display_name': 'Volume 1', - 'raid_type': 'raid_1', - 'readonly': False, - 'fs_type': 'ext4', - 'location': 'internal', - 'eppool_used_byte': '139177984', - 'size_total_byte': '487262806016', - 'volume_id': 1, - 'size_free_byte': '486521139200', - 'container': 'internal', - 'volume_path': '/volume1', - 'single_volume': True -} -LUN_UUID = 'e1315f33-ba35-42c3-a3e7-5a06958eca30' -LUN_INFO = { - 'status': '', - 'is_action_locked': False, - 'name': VOLUME['name'], - 'extent_size': 0, - 'allocated_size': 0, - 'uuid': LUN_UUID, - 'is_mapped': True, - 'lun_id': 3, - 'location': '/volume2', - 'restored_time': 0, - 'type': 143, - 'size': 1073741824 -} -FAKE_API = 'SYNO.Fake.API' -FAKE_METHOD = 'fake' -FAKE_PATH = 'fake.cgi' - - -class MockResponse(object): - def __init__(self, json_data, status_code): - self.json_data = json_data - self.status_code = status_code - - def json(self): - return self.json_data - - -class SynoSessionTestCase(test.TestCase): - @mock.patch('requests.post', return_value=MockResponse( - {'data': {'sid': 'sid'}, 'success': True}, http_client.OK)) - def setUp(self, _mock_post): - super(SynoSessionTestCase, self).setUp() - - self.host = '127.0.0.1' - self.port = 5001 - self.username = 'admin' - self.password = 'admin' - self.https = True - self.ssl_verify = False - self.one_time_pass = None - self.device_id = None - self.session = common.Session(self.host, - self.port, - self.username, - self.password, - self.https, - self.ssl_verify, - self.one_time_pass, - self.device_id) - self.session.__class__.__del__ = lambda x: x - - def test_query(self): - out = { - 'maxVersion': 3, - 'minVersion': 1, - 'path': FAKE_PATH, - 'requestFormat': 'JSON' - } - data = { - 'api': 'SYNO.API.Info', - 'version': 1, - 'method': 'query', - 'query': FAKE_API - } - requests.post = mock.Mock(side_effect=[ - MockResponse({ - 'data': { - FAKE_API: out - }, - 'success': True - }, http_client.OK), - MockResponse({ - 'data': { - FAKE_API: out - } - }, http_client.OK), - ]) - - result = self.session.query(FAKE_API) - requests.post.assert_called_once_with( - 'https://127.0.0.1:5001/webapi/query.cgi', - data=data, - verify=self.ssl_verify) - self.assertDictEqual(out, result) - - result = self.session.query(FAKE_API) - self.assertIsNone(result) - - -class SynoAPIRequestTestCase(test.TestCase): - @mock.patch('requests.post') - def setUp(self, _mock_post): - super(SynoAPIRequestTestCase, self).setUp() - - self.host = '127.0.0.1' - self.port = 5001 - self.username = 'admin' - self.password = 'admin' - self.https = True - self.ssl_verify = False - self.one_time_pass = None - self.device_id = None - self.request = common.APIRequest(self.host, - self.port, - self.username, - self.password, - self.https, - self.ssl_verify, - self.one_time_pass, - self.device_id) - self.request._APIRequest__session._sid = 'sid' - self.request._APIRequest__session.__class__.__del__ = lambda x: x - - def tearDown(self): - super(SynoAPIRequestTestCase, self).tearDown() - - @mock.patch.object(common, 'Session') - def test_new_session(self, _mock_session): - self.device_id = 'did' - self.request = common.APIRequest(self.host, - self.port, - self.username, - self.password, - self.https, - self.ssl_verify, - self.one_time_pass, - self.device_id) - - result = self.request.new_session() - self.assertIsNone(result) - - def test__start(self): - out = { - 'maxVersion': 3, - 'minVersion': 1, - 'path': FAKE_PATH, - 'requestFormat': 'JSON' - } - self.request._APIRequest__session.query = mock.Mock(return_value=out) - - result = self.request._start(FAKE_API, 3) - (self.request._APIRequest__session.query. - assert_called_once_with(FAKE_API)) - self.assertEqual(FAKE_PATH, result) - - out.update(maxVersion=2) - self.assertRaises(exception.APIException, - self.request._start, - FAKE_API, - 3) - - def test__encode_param(self): - param = { - 'api': FAKE_API, - 'method': FAKE_METHOD, - 'version': 1, - '_sid': 'sid' - } - self.request._jsonFormat = True - result = self.request._encode_param(param) - self.assertIsInstance(result, string_types) - - def test_request(self): - version = 1 - - self.request._start = mock.Mock(return_value='fake.cgi') - self.request._encode_param = mock.Mock(side_effect=lambda x: x) - self.request.new_session = mock.Mock() - requests.post = mock.Mock(side_effect=[ - MockResponse({'success': True}, http_client.OK), - MockResponse({'error': {'code': http_client.SWITCHING_PROTOCOLS}, - 'success': False}, http_client.OK), - MockResponse({'error': {'code': http_client.SWITCHING_PROTOCOLS}}, - http_client.OK), - MockResponse({}, http_client.INTERNAL_SERVER_ERROR) - ]) - - result = self.request.request(FAKE_API, FAKE_METHOD, version) - self.assertDictEqual({'success': True}, result) - - result = self.request.request(FAKE_API, FAKE_METHOD, version) - self.assertDictEqual( - {'error': {'code': http_client.SWITCHING_PROTOCOLS}, - 'success': False}, result) - - self.assertRaises(exception.MalformedResponse, - self.request.request, - FAKE_API, - FAKE_METHOD, - version) - - result = self.request.request(FAKE_API, FAKE_METHOD, version) - self.assertDictEqual( - {'http_status': http_client.INTERNAL_SERVER_ERROR}, result) - - @mock.patch.object(common.LOG, 'debug') - def test_request_auth_error(self, _log): - version = 1 - - self.request._start = mock.Mock(return_value='fake.cgi') - self.request._encode_param = mock.Mock(side_effect=lambda x: x) - self.request.new_session = mock.Mock() - requests.post = mock.Mock(return_value= - MockResponse({ - 'error': {'code': 105}, - 'success': False - }, http_client.OK)) - - self.assertRaises(exception.SynoAuthError, - self.request.request, - FAKE_API, - FAKE_METHOD, - version) - - -class SynoCommonTestCase(test.TestCase): - - @mock.patch.object(common.SynoCommon, - '_get_node_uuid', - return_value=NODE_UUID) - @mock.patch.object(common, 'APIRequest') - def setUp(self, _request, _get_node_uuid): - super(SynoCommonTestCase, self).setUp() - - self.conf = self.setup_configuration() - self.common = common.SynoCommon(self.conf, 'iscsi') - self.common.vendor_name = 'Synology' - self.common.driver_type = 'iscsi' - self.common.volume_backend_name = 'DiskStation' - self.common.iscsi_port = 3260 - - def setup_configuration(self): - config = mock.Mock(spec=conf.Configuration) - config.use_chap_auth = False - config.iscsi_protocol = 'iscsi' - config.iscsi_ip_address = IP - config.iscsi_port = 3260 - config.synology_admin_port = 5000 - config.synology_username = 'admin' - config.synology_password = 'admin' - config.synology_ssl_verify = True - config.synology_one_time_pass = '123456' - config.synology_pool_name = POOL_NAME - config.volume_dd_blocksize = 1 - config.iscsi_target_prefix = 'iqn.2000-01.com.synology:' - config.chap_username = 'abcd' - config.chap_password = 'qwerty' - config.reserved_percentage = 0 - config.max_over_subscription_ratio = 20 - - return config - - @mock.patch.object(common.SynoCommon, - '_get_node_uuid', - return_value=NODE_UUID) - @mock.patch.object(common, 'APIRequest') - def test___init__(self, _request, _get_node_uuid): - self.conf.safe_get = (mock.Mock(side_effect=[ - self.conf.iscsi_ip_address, - '', - ''])) - - self.assertRaises(exception.InvalidConfigurationValue, - self.common.__init__, - self.conf, - 'iscsi') - - self.assertRaises(exception.InvalidConfigurationValue, - self.common.__init__, - self.conf, - 'iscsi') - - def test__get_node_uuid(self): - out = { - 'data': { - 'nodes': [{ - 'uuid': NODE_UUID - }] - }, - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - - result = self.common._get_node_uuid() - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.Node', - 'list', - mock.ANY)) - self.assertEqual(NODE_UUID, result) - - del out['data']['nodes'] - self.assertRaises(exception.VolumeDriverException, - self.common._get_node_uuid) - - self.assertRaises(exception.SynoAuthError, - self.common._get_node_uuid) - - def test__get_pool_info(self): - out = { - 'data': { - 'volume': POOL_INFO - }, - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - result = self.common._get_pool_info() - (self.common.exec_webapi. - assert_called_with('SYNO.Core.Storage.Volume', - 'get', - mock.ANY, - volume_path='/' + POOL_NAME)) - self.assertDictEqual(POOL_INFO, result) - - del out['data']['volume'] - self.assertRaises(exception.MalformedResponse, - self.common._get_pool_info) - - self.assertRaises(exception.SynoAuthError, - self.common._get_pool_info) - - self.conf.synology_pool_name = '' - self.assertRaises(exception.InvalidConfigurationValue, - self.common._get_pool_info) - - def test__get_pool_size(self): - pool_info = copy.deepcopy(POOL_INFO) - self.common._get_pool_info = mock.Mock(return_value=pool_info) - - result = self.common._get_pool_size() - - self.assertEqual((int(int(POOL_INFO['size_free_byte']) / units.Gi), - int(int(POOL_INFO['size_total_byte']) / units.Gi), - math.ceil((float(POOL_INFO['size_total_byte']) - - float(POOL_INFO['size_free_byte']) - - float(POOL_INFO['eppool_used_byte'])) / - units.Gi)), - result) - - del pool_info['size_free_byte'] - self.assertRaises(exception.MalformedResponse, - self.common._get_pool_size) - - def test__get_pool_lun_provisioned_size(self): - out = { - 'data': { - 'luns': [{ - 'lun_id': 1, - 'location': '/' + POOL_NAME, - 'size': 5368709120 - }, { - 'lun_id': 2, - 'location': '/' + POOL_NAME, - 'size': 3221225472 - }] - }, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - - result = self.common._get_pool_lun_provisioned_size() - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'list', - mock.ANY, - location='/' + POOL_NAME)) - self.assertEqual(int(math.ceil(float(5368709120 + 3221225472) / - units.Gi)), - result) - - def test__get_pool_lun_provisioned_size_error(self): - out = { - 'data': {}, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - - self.assertRaises(exception.MalformedResponse, - self.common._get_pool_lun_provisioned_size) - - self.conf.synology_pool_name = '' - self.assertRaises(exception.InvalidConfigurationValue, - self.common._get_pool_lun_provisioned_size) - - def test__get_lun_info(self): - out = { - 'data': { - 'lun': LUN_INFO - }, - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - result = self.common._get_lun_info(VOLUME['name'], - ['is_mapped']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'get', - mock.ANY, - uuid=VOLUME['name'], - additional=['is_mapped'])) - self.assertDictEqual(LUN_INFO, result) - - del out['data']['lun'] - self.assertRaises(exception.MalformedResponse, - self.common._get_lun_info, - VOLUME['name']) - - self.assertRaises(exception.SynoAuthError, - self.common._get_lun_info, - VOLUME['name']) - - self.assertRaises(exception.InvalidParameterValue, - self.common._get_lun_info, - '') - - def test__get_lun_uuid(self): - lun_info = copy.deepcopy(LUN_INFO) - self.common._get_lun_info = ( - mock.Mock(side_effect=[ - lun_info, - lun_info, - exception.SynoAuthError(message='dont care')])) - - result = self.common._get_lun_uuid(VOLUME['name']) - self.assertEqual(LUN_UUID, result) - - del lun_info['uuid'] - self.assertRaises(exception.MalformedResponse, - self.common._get_lun_uuid, - VOLUME['name']) - - self.assertRaises(exception.SynoAuthError, - self.common._get_lun_uuid, - VOLUME['name']) - - self.assertRaises(exception.InvalidParameterValue, - self.common._get_lun_uuid, - '') - - def test__get_lun_status(self): - lun_info = copy.deepcopy(LUN_INFO) - self.common._get_lun_info = ( - mock.Mock(side_effect=[ - lun_info, - lun_info, - lun_info, - exception.SynoAuthError(message='dont care')])) - - result = self.common._get_lun_status(VOLUME['name']) - self.assertEqual((lun_info['status'], lun_info['is_action_locked']), - result) - - del lun_info['is_action_locked'] - self.assertRaises(exception.MalformedResponse, - self.common._get_lun_status, - VOLUME['name']) - - del lun_info['status'] - self.assertRaises(exception.MalformedResponse, - self.common._get_lun_status, - VOLUME['name']) - - self.assertRaises(exception.SynoAuthError, - self.common._get_lun_status, - VOLUME['name']) - - self.assertRaises(exception.InvalidParameterValue, - self.common._get_lun_status, - '') - - def test__get_snapshot_info(self): - out = { - 'data': { - 'snapshot': SNAPSHOT_INFO - }, - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - result = self.common._get_snapshot_info(DS_SNAPSHOT_UUID, - additional=['status']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'get_snapshot', - mock.ANY, - snapshot_uuid=DS_SNAPSHOT_UUID, - additional=['status'])) - self.assertDictEqual(SNAPSHOT_INFO, result) - - del out['data']['snapshot'] - self.assertRaises(exception.MalformedResponse, - self.common._get_snapshot_info, - DS_SNAPSHOT_UUID) - - self.assertRaises(exception.SynoAuthError, - self.common._get_snapshot_info, - DS_SNAPSHOT_UUID) - - self.assertRaises(exception.InvalidParameterValue, - self.common._get_snapshot_info, - '') - - def test__get_snapshot_status(self): - snapshot_info = copy.deepcopy(SNAPSHOT_INFO) - self.common._get_snapshot_info = ( - mock.Mock(side_effect=[ - snapshot_info, - snapshot_info, - snapshot_info, - exception.SynoAuthError(message='dont care')])) - - result = self.common._get_snapshot_status(DS_SNAPSHOT_UUID) - self.assertEqual((snapshot_info['status'], - snapshot_info['is_action_locked']), - result) - - del snapshot_info['is_action_locked'] - self.assertRaises(exception.MalformedResponse, - self.common._get_snapshot_status, - DS_SNAPSHOT_UUID) - - del snapshot_info['status'] - self.assertRaises(exception.MalformedResponse, - self.common._get_snapshot_status, - DS_SNAPSHOT_UUID) - - self.assertRaises(exception.SynoAuthError, - self.common._get_snapshot_status, - DS_SNAPSHOT_UUID) - - self.assertRaises(exception.InvalidParameterValue, - self.common._get_snapshot_status, - '') - - def test__get_metadata_value(self): - ctxt = context.get_admin_context() - fake_vol_obj = fake_volume.fake_volume_obj(ctxt) - self.assertRaises(exception.VolumeMetadataNotFound, - self.common._get_metadata_value, - fake_vol_obj, - 'no_such_key') - - fake_snap_obj = (fake_snapshot. - fake_snapshot_obj(ctxt, - expected_attrs=['metadata'])) - self.assertRaises(exception.SnapshotMetadataNotFound, - self.common._get_metadata_value, - fake_snap_obj, - 'no_such_key') - - meta = {'snapshot_metadata': [{'key': 'ds_snapshot_UUID', - 'value': DS_SNAPSHOT_UUID}], - 'expected_attrs': ['metadata']} - - fake_snap_obj = fake_snapshot.fake_snapshot_obj(ctxt, - **meta) - result = self.common._get_metadata_value(fake_snap_obj, - 'ds_snapshot_UUID') - self.assertEqual(DS_SNAPSHOT_UUID, result) - - self.assertRaises(exception.MetadataAbsent, - self.common._get_metadata_value, - SNAPSHOT, - 'no_such_key') - - def test__target_create_with_chap_auth(self): - out = { - 'data': { - 'target_id': TRG_ID - }, - 'success': True - } - trg_name = self.common.TARGET_NAME_PREFIX + VOLUME['id'] - iqn = self.conf.iscsi_target_prefix + trg_name - self.conf.use_chap_auth = True - self.common.exec_webapi = mock.Mock(return_value=out) - self.conf.safe_get = ( - mock.Mock(side_effect=[ - self.conf.use_chap_auth, - 'abcd', - 'qwerty', - self.conf.iscsi_target_prefix])) - result = self.common._target_create(VOLUME['id']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.Target', - 'create', - mock.ANY, - name=trg_name, - iqn=iqn, - auth_type=1, - user='abcd', - password='qwerty', - max_sessions=0)) - self.assertEqual((IQN, TRG_ID, 'CHAP abcd qwerty'), result) - - def test__target_create_without_chap_auth(self): - out = { - 'data': { - 'target_id': TRG_ID - }, - 'success': True - } - trg_name = self.common.TARGET_NAME_PREFIX + VOLUME['id'] - iqn = self.conf.iscsi_target_prefix + trg_name - self.common.exec_webapi = mock.Mock(return_value=out) - self.conf.safe_get = ( - mock.Mock(side_effect=[ - self.conf.use_chap_auth, - self.conf.iscsi_target_prefix])) - result = self.common._target_create(VOLUME['id']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.Target', - 'create', - mock.ANY, - name=trg_name, - iqn=iqn, - auth_type=0, - user='', - password='', - max_sessions=0)) - self.assertEqual((IQN, TRG_ID, ''), result) - - def test__target_create_error(self): - out = { - 'data': { - }, - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - exception.SynoAuthError(message='dont care')])) - self.conf.safe_get = ( - mock.Mock(side_effect=[ - self.conf.use_chap_auth, - self.conf.iscsi_target_prefix, - self.conf.use_chap_auth, - self.conf.iscsi_target_prefix])) - - self.assertRaises(exception.VolumeDriverException, - self.common._target_create, - VOLUME['id']) - - self.assertRaises(exception.SynoAuthError, - self.common._target_create, - VOLUME['id']) - - self.assertRaises(exception.InvalidParameterValue, - self.common._target_create, - '') - - def test__target_delete(self): - out = { - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - exception.SynoAuthError(message='dont care')])) - - result = self.common._target_delete(TRG_ID) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.Target', - 'delete', - mock.ANY, - target_id=str(TRG_ID))) - self.assertIsNone(result) - - self.assertRaises(exception.SynoAuthError, - self.common._target_delete, - TRG_ID) - - self.assertRaises(exception.InvalidParameterValue, - self.common._target_delete, - -1) - - def test__lun_map_unmap_target(self): - out = { - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - self.common._get_lun_uuid = mock.Mock(return_value=LUN_UUID) - - result = self.common._lun_map_unmap_target(VOLUME['name'], - True, - TRG_ID) - self.common._get_lun_uuid.assert_called_with(VOLUME['name']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'map_target', - mock.ANY, - uuid=LUN_UUID, - target_ids=[str(TRG_ID)])) - self.assertIsNone(result) - - result = self.common._lun_map_unmap_target(VOLUME['name'], - False, - TRG_ID) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'unmap_target', - mock.ANY, - uuid=LUN_UUID, - target_ids=[str(TRG_ID)])) - self.assertIsNone(result) - - self.assertRaises(exception.SynoAuthError, - self.common._lun_map_unmap_target, - VOLUME['name'], - True, - TRG_ID) - - self.assertRaises(exception.InvalidParameterValue, - self.common._lun_map_unmap_target, - mock.ANY, - mock.ANY, - -1) - - def test__lun_map_target(self): - self.common._lun_map_unmap_target = mock.Mock() - - result = self.common._lun_map_target(VOLUME, TRG_ID) - - self.common._lun_map_unmap_target.assert_called_with(VOLUME, - True, - TRG_ID) - self.assertIsNone(result) - - def test__lun_ummap_target(self): - self.common._lun_map_unmap_target = mock.Mock() - - result = self.common._lun_unmap_target(VOLUME, TRG_ID) - - self.common._lun_map_unmap_target.assert_called_with(VOLUME, - False, - TRG_ID) - self.assertIsNone(result) - - def test__modify_lun_name(self): - out = { - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - exception.SynoAuthError(message='dont care')])) - - result = self.common._modify_lun_name(VOLUME['name'], - NEW_VOLUME['name']) - self.assertIsNone(result) - - self.assertRaises(exception.SynoAuthError, - self.common._modify_lun_name, - VOLUME['name'], - NEW_VOLUME['name']) - - @mock.patch('eventlet.sleep') - def test__check_lun_status_normal(self, _patched_sleep): - self.common._get_lun_status = ( - mock.Mock(side_effect=[ - ('normal', True), - ('normal', False), - ('cloning', False), - exception.SynoLUNNotExist(message='dont care')])) - - result = self.common._check_lun_status_normal(VOLUME['name']) - self.assertEqual(1, _patched_sleep.call_count) - self.assertEqual([mock.call(2)], _patched_sleep.call_args_list) - self.common._get_lun_status.assert_called_with(VOLUME['name']) - self.assertTrue(result) - - result = self.common._check_lun_status_normal(VOLUME['name']) - self.assertFalse(result) - - self.assertRaises(exception.SynoLUNNotExist, - self.common._check_lun_status_normal, - VOLUME['name']) - - @mock.patch('eventlet.sleep') - def test__check_snapshot_status_healthy(self, _patched_sleep): - self.common._get_snapshot_status = ( - mock.Mock(side_effect=[ - ('Healthy', True), - ('Healthy', False), - ('Unhealthy', False), - exception.SynoLUNNotExist(message='dont care')])) - - result = self.common._check_snapshot_status_healthy(DS_SNAPSHOT_UUID) - self.assertEqual(1, _patched_sleep.call_count) - self.assertEqual([mock.call(2)], _patched_sleep.call_args_list) - self.common._get_snapshot_status.assert_called_with(DS_SNAPSHOT_UUID) - self.assertTrue(result) - - result = self.common._check_snapshot_status_healthy(DS_SNAPSHOT_UUID) - self.assertFalse(result) - - self.assertRaises(exception.SynoLUNNotExist, - self.common._check_snapshot_status_healthy, - DS_SNAPSHOT_UUID) - - def test__check_storage_response(self): - out = { - 'success': False - } - result = self.common._check_storage_response(out) - self.assertEqual('Internal error', result[0]) - self.assertIsInstance(result[1], - (exception.VolumeBackendAPIException)) - - def test__check_iscsi_response(self): - out = { - 'success': False, - 'error': { - } - } - self.assertRaises(exception.MalformedResponse, - self.common._check_iscsi_response, - out) - - out['error'].update(code=18990505) - result = self.common._check_iscsi_response(out, uuid=LUN_UUID) - self.assertEqual('Bad LUN UUID [18990505]', result[0]) - self.assertIsInstance(result[1], - (exception.SynoLUNNotExist)) - - out['error'].update(code=18990532) - result = self.common._check_iscsi_response(out, - snapshot_id=SNAPSHOT_ID) - self.assertEqual('No such snapshot [18990532]', result[0]) - self.assertIsInstance(result[1], - (exception.SnapshotNotFound)) - - out['error'].update(code=12345678) - result = self.common._check_iscsi_response(out, uuid=LUN_UUID) - self.assertEqual('Internal error [12345678]', result[0]) - self.assertIsInstance(result[1], - (exception.VolumeBackendAPIException)) - - def test__check_ds_pool_status(self): - info = copy.deepcopy(POOL_INFO) - self.common._get_pool_info = mock.Mock(return_value=info) - - result = self.common._check_ds_pool_status() - self.assertIsNone(result) - - info['readonly'] = True - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_pool_status) - - del info['readonly'] - self.assertRaises(exception.MalformedResponse, - self.common._check_ds_pool_status) - - def test__check_ds_version(self): - ver1 = 'DSM 6.1-9999' - ver2 = 'DSM 6.0.2-9999' - ver3 = 'DSM 6.0.1-9999 Update 2' - ver4 = 'DSM 6.0-9999 Update 2' - ver5 = 'DSM 5.2-9999 ' - out = { - 'data': { - }, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - self.assertRaises(exception.MalformedResponse, - self.common._check_ds_version) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.System', - 'info', - mock.ANY, - type='firmware')) - - out['data'].update(firmware_ver=ver1) - result = self.common._check_ds_version() - self.assertIsNone(result) - - out['data'].update(firmware_ver=ver2) - result = self.common._check_ds_version() - self.assertIsNone(result) - - out['data'].update(firmware_ver=ver3) - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_version) - - out['data'].update(firmware_ver=ver4) - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_version) - - out['data'].update(firmware_ver=ver5) - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_version) - - self.common.exec_webapi = ( - mock.Mock(side_effect= - exception.SynoAuthError(message='dont care'))) - self.assertRaises(exception.SynoAuthError, - self.common._check_ds_version) - - def test__check_ds_ability(self): - out = { - 'data': { - 'support_storage_mgr': 'yes', - 'support_iscsi_target': 'yes', - 'support_vaai': 'yes', - 'supportsnapshot': 'yes', - }, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - result = self.common._check_ds_ability() - self.assertIsNone(result) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.System', - 'info', - mock.ANY, - type='define')) - - out['data'].update(supportsnapshot='no') - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_ability) - - out['data'].update(support_vaai='no') - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_ability) - - out['data'].update(support_iscsi_target='no') - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_ability) - - out['data'].update(support_storage_mgr='no') - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_ability) - - out['data'].update(usbstation='yes') - self.assertRaises(exception.VolumeDriverException, - self.common._check_ds_ability) - - del out['data'] - self.assertRaises(exception.MalformedResponse, - self.common._check_ds_ability) - - self.common.exec_webapi = ( - mock.Mock(side_effect= - exception.SynoAuthError(message='dont care'))) - self.assertRaises(exception.SynoAuthError, - self.common._check_ds_ability) - - @mock.patch.object(common.LOG, 'exception') - def test_check_response(self, _logexc): - out = { - 'success': True - } - bad_out1 = { - 'api_info': { - 'api': 'SYNO.Core.ISCSI.LUN', - 'method': 'create', - 'version': 1 - }, - 'success': False - } - bad_out2 = { - 'api_info': { - 'api': 'SYNO.Core.Storage.Volume', - 'method': 'get', - 'version': 1 - }, - 'success': False - } - bad_out3 = { - 'api_info': { - 'api': 'SYNO.Core.System', - 'method': 'info', - 'version': 1 - }, - 'success': False - } - self.common._check_iscsi_response = ( - mock.Mock(return_value= - ('Bad LUN UUID', - exception.SynoLUNNotExist(message='dont care')))) - self.common._check_storage_response = ( - mock.Mock(return_value= - ('Internal error', - exception. - VolumeBackendAPIException(message='dont care')))) - - result = self.common.check_response(out) - self.assertEqual(0, _logexc.call_count) - self.assertIsNone(result) - - self.assertRaises(exception.SynoLUNNotExist, - self.common.check_response, - bad_out1) - self.assertRaises(exception.VolumeBackendAPIException, - self.common.check_response, - bad_out2) - self.assertRaises(exception.VolumeBackendAPIException, - self.common.check_response, - bad_out3) - - def test_exec_webapi(self): - api = 'SYNO.Fake.WebAPI' - method = 'fake' - version = 1 - resp = {} - bad_resp = { - 'http_status': http_client.INTERNAL_SERVER_ERROR - } - expected = copy.deepcopy(resp) - expected.update(api_info={'api': api, - 'method': method, - 'version': version}) - self.common.synoexec = mock.Mock(side_effect=[resp, bad_resp]) - - result = self.common.exec_webapi(api, - method, - version, - param1='value1', - param2='value2') - - self.common.synoexec.assert_called_once_with(api, - method, - version, - param1='value1', - param2='value2') - self.assertDictEqual(expected, result) - - self.assertRaises(exception.SynoAPIHTTPError, - self.common.exec_webapi, - api, - method, - version, - param1='value1', - param2='value2') - - def test_get_ip(self): - result = self.common.get_ip() - self.assertEqual(self.conf.iscsi_ip_address, result) - - def test_get_provider_location(self): - self.common.get_ip = ( - mock.Mock(return_value=self.conf.iscsi_ip_address)) - self.conf.safe_get = ( - mock.Mock(return_value=['10.0.0.2', '10.0.0.3'])) - expected = ('10.0.0.1:3260;10.0.0.2:3260;10.0.0.3:3260' + - ',%(tid)d %(iqn)s 0') % {'tid': TRG_ID, 'iqn': IQN} - - result = self.common.get_provider_location(IQN, TRG_ID) - - self.assertEqual(expected, result) - - def test_is_lun_mapped(self): - bad_lun_info = copy.deepcopy(LUN_INFO) - del bad_lun_info['is_mapped'] - self.common._get_lun_info = ( - mock.Mock(side_effect=[ - LUN_INFO, - exception.SynoAuthError(message='dont care'), - bad_lun_info])) - - result = self.common.is_lun_mapped(VOLUME['name']) - self.assertEqual(LUN_INFO['is_mapped'], result) - - self.assertRaises(exception.SynoAuthError, - self.common.is_lun_mapped, - VOLUME['name']) - - self.assertRaises(exception.MalformedResponse, - self.common.is_lun_mapped, - VOLUME['name']) - - self.assertRaises(exception.InvalidParameterValue, - self.common.is_lun_mapped, - '') - - def test_check_for_setup_error(self): - self.common._check_ds_pool_status = mock.Mock() - self.common._check_ds_version = mock.Mock() - self.common._check_ds_ability = mock.Mock() - - result = self.common.check_for_setup_error() - - self.common._check_ds_pool_status.assert_called_once_with() - self.common._check_ds_version.assert_called_once_with() - self.common._check_ds_ability.assert_called_once_with() - self.assertIsNone(result) - - def test_update_volume_stats(self): - self.common._get_pool_size = mock.Mock(return_value=(10, 100, 50)) - self.common._get_pool_lun_provisioned_size = ( - mock.Mock(return_value=300)) - - data = { - 'volume_backend_name': 'DiskStation', - 'vendor_name': 'Synology', - 'storage_protocol': 'iscsi', - 'consistencygroup_support': False, - 'QoS_support': False, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': 0, - 'free_capacity_gb': 10, - 'total_capacity_gb': 100, - 'provisioned_capacity_gb': 350, - 'max_over_subscription_ratio': 20, - 'iscsi_ip_address': '10.0.0.1', - 'pool_name': 'volume1', - 'backend_info': - 'Synology:iscsi:72003c93-2db2-4f00-a169-67c5eae86bb1' - } - - result = self.common.update_volume_stats() - - self.assertDictEqual(data, result) - - def test_create_volume(self): - out = { - 'success': True - } - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - out, - exception.SynoAuthError(message='dont care')])) - self.common._check_lun_status_normal = ( - mock.Mock(side_effect=[True, False, True])) - - result = self.common.create_volume(VOLUME) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'create', - mock.ANY, - name=VOLUME['name'], - type=self.common.CINDER_LUN, - location='/' + self.conf.synology_pool_name, - size=VOLUME['size'] * units.Gi)) - self.assertIsNone(result) - - self.assertRaises(exception.VolumeDriverException, - self.common.create_volume, - VOLUME) - - self.assertRaises(exception.SynoAuthError, - self.common.create_volume, - VOLUME) - - def test_delete_volume(self): - out = { - 'success': True - } - self.common._get_lun_uuid = mock.Mock(return_value=LUN_UUID) - self.common.exec_webapi = ( - mock.Mock(side_effect=[ - out, - exception.SynoLUNNotExist(message='dont care'), - exception.SynoAuthError(message='dont care')])) - - result = self.common.delete_volume(VOLUME) - self.common._get_lun_uuid.assert_called_with(VOLUME['name']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'delete', - mock.ANY, - uuid=LUN_UUID)) - self.assertIsNone(result) - - result = self.common.delete_volume(VOLUME) - self.assertIsNone(result) - - self.assertRaises(exception.SynoAuthError, - self.common.delete_volume, - VOLUME) - - def test_create_cloned_volume(self): - out = { - 'success': True - } - new_volume = copy.deepcopy(NEW_VOLUME) - new_volume['size'] = 20 - self.common.exec_webapi = mock.Mock(return_value=out) - self.common._get_lun_uuid = ( - mock.Mock(side_effect=[ - LUN_UUID, - LUN_UUID, - LUN_UUID, - exception.InvalidParameterValue('dont care')])) - self.common.extend_volume = mock.Mock() - self.common._check_lun_status_normal = ( - mock.Mock(side_effect=[True, True, False, False])) - result = self.common.create_cloned_volume(new_volume, VOLUME) - self.common._get_lun_uuid.assert_called_with(VOLUME['name']) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'clone', - mock.ANY, - src_lun_uuid=LUN_UUID, - dst_lun_name=new_volume['name'], - is_same_pool=True, - clone_type='CINDER')) - (self.common._check_lun_status_normal. - assert_called_with(new_volume['name'])) - self.common.extend_volume.assert_called_once_with(new_volume, - new_volume['size']) - self.assertIsNone(result) - - new_volume['size'] = 10 - result = self.common.create_cloned_volume(new_volume, VOLUME) - self.assertIsNone(result) - - self.assertRaises(exception.VolumeDriverException, - self.common.create_cloned_volume, - new_volume, - VOLUME) - - self.assertRaises(exception.InvalidParameterValue, - self.common.create_cloned_volume, - new_volume, - VOLUME) - - def test_extend_volume(self): - new_size = 20 - out = { - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - self.common._get_lun_uuid = ( - mock.Mock(side_effect=[ - LUN_UUID, - exception.InvalidParameterValue('dont care')])) - - result = self.common.extend_volume(VOLUME, new_size) - - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'set', - mock.ANY, - uuid=LUN_UUID, - new_size=new_size * units.Gi)) - self.assertIsNone(result) - self.assertRaises(exception.ExtendVolumeError, - self.common.extend_volume, - VOLUME, - new_size) - - def test_update_migrated_volume(self): - expected = { - '_name_id': None - } - self.common._modify_lun_name = mock.Mock(side_effect=[None, Exception]) - - result = self.common.update_migrated_volume(VOLUME, - NEW_VOLUME) - - self.common._modify_lun_name.assert_called_with(NEW_VOLUME['name'], - VOLUME['name']) - self.assertDictEqual(expected, result) - - self.assertRaises(exception.VolumeMigrationFailed, - self.common.update_migrated_volume, - VOLUME, - NEW_VOLUME) - - def test_create_snapshot(self): - expected_result = { - 'metadata': { - self.common.METADATA_DS_SNAPSHOT_UUID: DS_SNAPSHOT_UUID - } - } - expected_result['metadata'].update(SNAPSHOT['metadata']) - - out = { - 'data': { - 'snapshot_uuid': DS_SNAPSHOT_UUID, - 'snapshot_id': SNAPSHOT_ID - }, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - self.common._check_snapshot_status_healthy = ( - mock.Mock(side_effect=[True, False])) - - result = self.common.create_snapshot(SNAPSHOT) - - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'take_snapshot', - mock.ANY, - src_lun_uuid=SNAPSHOT['volume']['name'], - is_app_consistent=False, - is_locked=False, - taken_by='Cinder', - description='(Cinder) ' + - SNAPSHOT['id'])) - self.assertDictEqual(expected_result, result) - - self.assertRaises(exception.VolumeDriverException, - self.common.create_snapshot, - SNAPSHOT) - - def test_create_snapshot_error(self): - out = { - 'data': { - 'snapshot_uuid': 1, - 'snapshot_id': SNAPSHOT_ID - }, - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - - self.assertRaises(exception.MalformedResponse, - self.common.create_snapshot, - SNAPSHOT) - - self.common.exec_webapi = ( - mock.Mock(side_effect=exception.SynoAuthError(reason='dont care'))) - - self.assertRaises(exception.SynoAuthError, - self.common.create_snapshot, - SNAPSHOT) - - def test_delete_snapshot(self): - out = { - 'success': True - } - self.common.exec_webapi = mock.Mock(return_value=out) - self.common._get_metadata_value = ( - mock.Mock(side_effect=[ - DS_SNAPSHOT_UUID, - exception.SnapshotMetadataNotFound(message='dont care'), - exception.MetadataAbsent])) - - result = self.common.delete_snapshot(SNAPSHOT) - (self.common._get_metadata_value. - assert_called_with(SNAPSHOT, - self.common.METADATA_DS_SNAPSHOT_UUID)) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'delete_snapshot', - mock.ANY, - snapshot_uuid=DS_SNAPSHOT_UUID, - deleted_by='Cinder')) - self.assertIsNone(result) - - result = self.common.delete_snapshot(SNAPSHOT) - self.assertIsNone(result) - - self.assertRaises(exception.MetadataAbsent, - self.common.delete_snapshot, - SNAPSHOT) - - def test_create_volume_from_snapshot(self): - out = { - 'success': True - } - new_volume = copy.deepcopy(NEW_VOLUME) - new_volume['size'] = 20 - self.common.exec_webapi = mock.Mock(return_value=out) - self.common._get_metadata_value = ( - mock.Mock(side_effect=[ - DS_SNAPSHOT_UUID, - DS_SNAPSHOT_UUID, - exception.SnapshotMetadataNotFound(message='dont care'), - exception.SynoAuthError(message='dont care')])) - self.common._check_lun_status_normal = ( - mock.Mock(side_effect=[True, False, True, True])) - self.common.extend_volume = mock.Mock() - - result = self.common.create_volume_from_snapshot(new_volume, SNAPSHOT) - - (self.common._get_metadata_value. - assert_called_with(SNAPSHOT, - self.common.METADATA_DS_SNAPSHOT_UUID)) - (self.common.exec_webapi. - assert_called_with('SYNO.Core.ISCSI.LUN', - 'clone_snapshot', - mock.ANY, - src_lun_uuid=SNAPSHOT['volume']['name'], - snapshot_uuid=DS_SNAPSHOT_UUID, - cloned_lun_name=new_volume['name'], - clone_type='CINDER')) - self.common.extend_volume.assert_called_once_with(new_volume, - new_volume['size']) - self.assertIsNone(result) - - self.assertRaises(exception.VolumeDriverException, - self.common.create_volume_from_snapshot, - new_volume, - SNAPSHOT) - - self.assertRaises(exception.SnapshotMetadataNotFound, - self.common.create_volume_from_snapshot, - new_volume, - SNAPSHOT) - - self.assertRaises(exception.SynoAuthError, - self.common.create_volume_from_snapshot, - new_volume, - SNAPSHOT) - - def test_get_iqn_and_trgid(self): - location = '%s:3260,%d %s 1' % (IP, 1, IQN) - - result = self.common.get_iqn_and_trgid(location) - - self.assertEqual((IQN, 1), result) - - location = '' - self.assertRaises(exception.InvalidParameterValue, - self.common.get_iqn_and_trgid, - location) - - location = 'BADINPUT' - self.assertRaises(exception.InvalidInput, - self.common.get_iqn_and_trgid, - location) - - location = '%s:3260 %s 1' % (IP, IQN) - self.assertRaises(exception.InvalidInput, - self.common.get_iqn_and_trgid, - location) - - def test_get_iscsi_properties(self): - volume = copy.deepcopy(VOLUME) - iscsi_properties = { - 'target_discovered': False, - 'target_iqn': IQN, - 'target_portal': '%s:3260' % IP, - 'volume_id': VOLUME['id'], - 'access_mode': 'rw', - 'discard': False, - 'auth_method': 'CHAP', - 'auth_username': CHAP_AUTH_USERNAME, - 'auth_password': CHAP_AUTH_PASSWORD - } - self.common.get_ip = mock.Mock(return_value=IP) - self.conf.safe_get = mock.Mock(return_value=[]) - - result = self.common.get_iscsi_properties(volume) - self.assertDictEqual(iscsi_properties, result) - - volume['provider_location'] = '' - self.assertRaises(exception.InvalidParameterValue, - self.common.get_iscsi_properties, - volume) - - def test_get_iscsi_properties_multipath(self): - volume = copy.deepcopy(VOLUME) - iscsi_properties = { - 'target_discovered': False, - 'target_iqn': IQN, - 'target_iqns': [IQN] * 3, - 'target_lun': 0, - 'target_luns': [0] * 3, - 'target_portal': '%s:3260' % IP, - 'target_portals': - ['%s:3260' % IP, '10.0.0.2:3260', '10.0.0.3:3260'], - 'volume_id': VOLUME['id'], - 'access_mode': 'rw', - 'discard': False, - 'auth_method': 'CHAP', - 'auth_username': CHAP_AUTH_USERNAME, - 'auth_password': CHAP_AUTH_PASSWORD - } - self.common.get_ip = mock.Mock(return_value=IP) - self.conf.safe_get = mock.Mock(return_value=['10.0.0.2', '10.0.0.3']) - - result = self.common.get_iscsi_properties(volume) - self.assertDictEqual(iscsi_properties, result) - - volume['provider_location'] = '' - self.assertRaises(exception.InvalidParameterValue, - self.common.get_iscsi_properties, - volume) - - def test_get_iscsi_properties_without_chap(self): - volume = copy.deepcopy(VOLUME) - iscsi_properties = { - 'target_discovered': False, - 'target_iqn': IQN, - 'target_portal': '%s:3260' % IP, - 'volume_id': VOLUME['id'], - 'access_mode': 'rw', - 'discard': False - } - self.common.get_ip = mock.Mock(return_value=IP) - self.conf.safe_get = mock.Mock(return_value=[]) - - volume['provider_auth'] = 'abcde' - result = self.common.get_iscsi_properties(volume) - self.assertDictEqual(iscsi_properties, result) - - volume['provider_auth'] = '' - result = self.common.get_iscsi_properties(volume) - self.assertDictEqual(iscsi_properties, result) - - del volume['provider_auth'] - result = self.common.get_iscsi_properties(volume) - self.assertDictEqual(iscsi_properties, result) - - def test_create_iscsi_export(self): - self.common._target_create = ( - mock.Mock(return_value=(IQN, TRG_ID, VOLUME['provider_auth']))) - self.common._lun_map_target = mock.Mock() - - iqn, trg_id, provider_auth = ( - self.common.create_iscsi_export(VOLUME['name'], VOLUME['id'])) - - self.common._target_create.assert_called_with(VOLUME['id']) - self.common._lun_map_target.assert_called_with(VOLUME['name'], trg_id) - self.assertEqual((IQN, TRG_ID, VOLUME['provider_auth']), - (iqn, trg_id, provider_auth)) - - def test_remove_iscsi_export(self): - trg_id = TRG_ID - self.common._lun_unmap_target = mock.Mock() - self.common._target_delete = mock.Mock() - - result = self.common.remove_iscsi_export(VOLUME['name'], trg_id) - - self.assertIsNone(result) - self.common._lun_unmap_target.assert_called_with(VOLUME['name'], - TRG_ID) - self.common._target_delete.assert_called_with(TRG_ID) diff --git a/cinder/tests/unit/volume/drivers/synology/test_synology_iscsi.py b/cinder/tests/unit/volume/drivers/synology/test_synology_iscsi.py deleted file mode 100644 index 7b88fcfc4..000000000 --- a/cinder/tests/unit/volume/drivers/synology/test_synology_iscsi.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright (c) 2016 Synology Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the Synology iSCSI volume driver.""" - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import configuration as conf -from cinder.volume.drivers.synology import synology_common as common -from cinder.volume.drivers.synology import synology_iscsi - -VOLUME_ID = fake.VOLUME_ID -TARGET_NAME_PREFIX = 'Cinder-Target-' -IP = '10.10.10.10' -IQN = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + VOLUME_ID -TRG_ID = 1 -VOLUME = { - 'name': fake.VOLUME_NAME, - 'id': VOLUME_ID, - 'display_name': 'fake_volume', - 'size': 10, - 'provider_location': '%s:3260,%d %s 1' % (IP, TRG_ID, IQN), -} -NEW_VOLUME_ID = fake.VOLUME2_ID -IQN2 = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + NEW_VOLUME_ID -NEW_TRG_ID = 2 -NEW_VOLUME = { - 'name': fake.VOLUME2_NAME, - 'id': NEW_VOLUME_ID, - 'display_name': 'new_fake_volume', - 'size': 10, - 'provider_location': '%s:3260,%d %s 1' % (IP, NEW_TRG_ID, IQN2), -} -SNAPSHOT_ID = fake.SNAPSHOT_ID -SNAPSHOT = { - 'name': fake.SNAPSHOT_NAME, - 'id': SNAPSHOT_ID, - 'volume_id': VOLUME_ID, - 'volume_name': VOLUME['name'], - 'volume_size': 10, - 'display_name': 'fake_snapshot', -} -DS_SNAPSHOT_UUID = 'ca86a56a-40d8-4210-974c-ef15dbf01cba' -SNAPSHOT_METADATA = { - 'metadata': { - 'ds_snapshot_UUID': DS_SNAPSHOT_UUID - } -} -INITIATOR_IQN = 'iqn.1993-08.org.debian:01:604af6a341' -CONNECTOR = { - 'initiator': INITIATOR_IQN, -} -CONTEXT = { -} -LOCAL_PATH = '/dev/isda' -IMAGE_SERVICE = 'image_service' -IMAGE_ID = 1 -IMAGE_META = { - 'id': IMAGE_ID -} -NODE_UUID = '72003c93-2db2-4f00-a169-67c5eae86bb1' -HOST = { -} - - -class SynoISCSIDriverTestCase(test.TestCase): - - @mock.patch.object(common.SynoCommon, - '_get_node_uuid', - return_value=NODE_UUID) - @mock.patch.object(common, 'APIRequest') - def setUp(self, _request, _get_node_uuid): - super(SynoISCSIDriverTestCase, self).setUp() - - self.conf = self.setup_configuration() - self.driver = synology_iscsi.SynoISCSIDriver(configuration=self.conf) - self.driver.common = common.SynoCommon(self.conf, 'iscsi') - - def setup_configuration(self): - config = mock.Mock(spec=conf.Configuration) - config.use_chap_auth = False - config.iscsi_protocol = 'iscsi' - config.iscsi_ip_address = IP - config.synology_admin_port = 5000 - config.synology_username = 'admin' - config.synology_password = 'admin' - config.synology_ssl_verify = True - config.synology_one_time_pass = '123456' - config.volume_dd_blocksize = 1 - - return config - - def test_check_for_setup_error(self): - self.driver.common.check_for_setup_error = mock.Mock() - - result = self.driver.check_for_setup_error() - - self.driver.common.check_for_setup_error.assert_called_with() - self.assertIsNone(result) - - def test_create_volume(self): - self.driver.common.create_volume = mock.Mock() - - result = self.driver.create_volume(VOLUME) - - self.driver.common.create_volume.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_delete_volume(self): - self.driver.common.delete_volume = mock.Mock() - - result = self.driver.delete_volume(VOLUME) - - self.driver.common.delete_volume.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_create_cloned_volume(self): - self.driver.common.create_cloned_volume = mock.Mock() - - result = self.driver.create_cloned_volume(VOLUME, NEW_VOLUME) - - self.driver.common.create_cloned_volume.assert_called_with( - VOLUME, NEW_VOLUME) - self.assertIsNone(result) - - def test_extend_volume(self): - new_size = 20 - - self.driver.common.extend_volume = mock.Mock() - - result = self.driver.extend_volume(VOLUME, new_size) - - self.driver.common.extend_volume.assert_called_with( - VOLUME, new_size) - self.assertIsNone(result) - - def test_extend_volume_wrong_size(self): - wrong_new_size = 1 - - self.driver.common.extend_volume = mock.Mock() - - result = self.driver.extend_volume(VOLUME, wrong_new_size) - - self.driver.common.extend_volume.assert_not_called() - self.assertIsNone(result) - - def test_create_volume_from_snapshot(self): - self.driver.common.create_volume_from_snapshot = mock.Mock() - - result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) - - (self.driver.common. - create_volume_from_snapshot.assert_called_with(VOLUME, SNAPSHOT)) - self.assertIsNone(result) - - def test_update_migrated_volume(self): - fake_ret = {'_name_id': VOLUME['id']} - status = '' - self.driver.common.update_migrated_volume = ( - mock.Mock(return_value=fake_ret)) - - result = self.driver.update_migrated_volume(CONTEXT, - VOLUME, - NEW_VOLUME, - status) - - (self.driver.common.update_migrated_volume. - assert_called_with(VOLUME, NEW_VOLUME)) - self.assertEqual(fake_ret, result) - - def test_create_snapshot(self): - self.driver.common.create_snapshot = ( - mock.Mock(return_value=SNAPSHOT_METADATA)) - - result = self.driver.create_snapshot(SNAPSHOT) - - self.driver.common.create_snapshot.assert_called_with(SNAPSHOT) - self.assertDictEqual(SNAPSHOT_METADATA, result) - - def test_delete_snapshot(self): - self.driver.common.delete_snapshot = mock.Mock() - - result = self.driver.delete_snapshot(SNAPSHOT) - - self.driver.common.delete_snapshot.assert_called_with(SNAPSHOT) - self.assertIsNone(result) - - def test_get_volume_stats(self): - self.driver.common.update_volume_stats = mock.MagicMock() - - result = self.driver.get_volume_stats(True) - - self.driver.common.update_volume_stats.assert_called_with() - self.assertEqual(self.driver.stats, result) - - result = self.driver.get_volume_stats(False) - - self.driver.common.update_volume_stats.assert_called_with() - self.assertEqual(self.driver.stats, result) - - def test_get_volume_stats_error(self): - self.driver.common.update_volume_stats = ( - mock.MagicMock(side_effect=exception.VolumeDriverException( - message='dont care'))) - - self.assertRaises(exception.VolumeDriverException, - self.driver.get_volume_stats, - True) - - def test_create_export(self): - provider_auth = 'CHAP username password' - provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN) - - self.driver.common.is_lun_mapped = mock.Mock(return_value=False) - self.driver.common.create_iscsi_export = ( - mock.Mock(return_value=(IQN, TRG_ID, provider_auth))) - self.driver.common.get_provider_location = ( - mock.Mock(return_value=provider_location)) - - result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR) - - self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) - (self.driver.common.create_iscsi_export. - assert_called_with(VOLUME['name'], VOLUME['id'])) - self.driver.common.get_provider_location.assert_called_with(IQN, - TRG_ID) - self.assertEqual(provider_location, result['provider_location']) - self.assertEqual(provider_auth, result['provider_auth']) - - def test_create_export_is_mapped(self): - self.driver.common.is_lun_mapped = mock.Mock(return_value=True) - self.driver.common.create_iscsi_export = mock.Mock() - self.driver.common.get_provider_location = mock.Mock() - - result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR) - - self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) - self.driver.common.create_iscsi_export.assert_not_called() - self.driver.common.get_provider_location.assert_not_called() - self.assertEqual({}, result) - - def test_create_export_error(self): - provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN) - - self.driver.common.is_lun_mapped = mock.Mock(return_value=False) - self.driver.common.create_iscsi_export = ( - mock.Mock(side_effect=exception.InvalidInput(reason='dont care'))) - self.driver.common.get_provider_location = ( - mock.Mock(return_value=provider_location)) - - self.assertRaises(exception.ExportFailure, - self.driver.create_export, - CONTEXT, - VOLUME, - CONNECTOR) - self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) - self.driver.common.get_provider_location.assert_not_called() - - def test_remove_export(self): - self.driver.common.is_lun_mapped = mock.Mock(return_value=True) - self.driver.common.remove_iscsi_export = mock.Mock() - self.driver.common.get_iqn_and_trgid = ( - mock.Mock(return_value=('', TRG_ID))) - - _, trg_id = (self.driver.common. - get_iqn_and_trgid(VOLUME['provider_location'])) - result = self.driver.remove_export(CONTEXT, VOLUME) - - self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) - (self.driver.common.get_iqn_and_trgid. - assert_called_with(VOLUME['provider_location'])) - (self.driver.common.remove_iscsi_export. - assert_called_with(VOLUME['name'], trg_id)) - self.assertIsNone(result) - - def test_remove_export_not_mapped(self): - self.driver.common.is_lun_mapped = mock.Mock(return_value=False) - self.driver.common.remove_iscsi_export = mock.Mock() - self.driver.common.get_iqn_and_trgid = mock.Mock() - - result = self.driver.remove_export(CONTEXT, VOLUME) - - self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) - self.driver.common.get_iqn_and_trgid.assert_not_called() - self.driver.common.remove_iscsi_export.assert_not_called() - self.assertIsNone(result) - - def test_remove_export_error(self): - self.driver.common.is_lun_mapped = mock.Mock(return_value=True) - self.driver.common.remove_iscsi_export = ( - mock.Mock(side_effect= exception.RemoveExportException( - volume=VOLUME, reason='dont care'))) - - self.assertRaises(exception.RemoveExportException, - self.driver.remove_export, - CONTEXT, - VOLUME) - - def test_remove_export_error_get_lun_mapped(self): - self.driver.common.remove_iscsi_export = mock.Mock() - self.driver.common.get_iqn_and_trgid = mock.Mock() - self.driver.common.is_lun_mapped = ( - mock.Mock(side_effect=exception.SynoLUNNotExist( - message='dont care'))) - - result = self.driver.remove_export(CONTEXT, VOLUME) - - self.assertIsNone(result) - self.driver.common.get_iqn_and_trgid.assert_not_called() - self.driver.common.remove_iscsi_export.assert_not_called() - - def test_initialize_connection(self): - iscsi_properties = { - 'target_discovered': False, - 'target_iqn': IQN, - 'target_portal': '%s:3260' % self.conf.iscsi_ip_address, - 'volume_id': VOLUME['id'], - 'access_mode': 'rw', - 'discard': False - } - - self.driver.common.get_iscsi_properties = ( - mock.Mock(return_value=iscsi_properties)) - self.conf.safe_get = mock.Mock(return_value='iscsi') - - result = self.driver.initialize_connection(VOLUME, CONNECTOR) - - self.driver.common.get_iscsi_properties.assert_called_with(VOLUME) - self.conf.safe_get.assert_called_with('iscsi_protocol') - self.assertEqual('iscsi', result['driver_volume_type']) - self.assertDictEqual(iscsi_properties, result['data']) - - def test_initialize_connection_error(self): - self.driver.common.get_iscsi_properties = ( - mock.Mock(side_effect=exception.InvalidInput(reason='dont care'))) - - self.assertRaises(exception.InvalidInput, - self.driver.initialize_connection, - VOLUME, - CONNECTOR) diff --git a/cinder/tests/unit/volume/drivers/test_block_device.py b/cinder/tests/unit/volume/drivers/test_block_device.py deleted file mode 100644 index c501c9fbe..000000000 --- a/cinder/tests/unit/volume/drivers/test_block_device.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright (c) 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from cinder import context -from cinder import db -import cinder.exception -from cinder.objects import fields -from cinder.objects import snapshot as obj_snap -from cinder.objects import volume as obj_volume -import cinder.test -from cinder.tests.unit import fake_constants as fake -from cinder.volume import configuration as conf -from cinder.volume.drivers import block_device -from cinder.volume import utils as volutils - - -class TestBlockDeviceDriver(cinder.test.TestCase): - def setUp(self): - fake_opt = [cfg.StrOpt('fake_opt', default='fake', help='fake option')] - super(TestBlockDeviceDriver, self).setUp() - self.configuration = conf.Configuration(fake_opt, 'fake_group') - self.configuration.available_devices = ['/dev/loop1', '/dev/loop2'] - self.configuration.iscsi_helper = 'tgtadm' - self.host = 'localhost' - self.configuration.iscsi_port = 3260 - self.configuration.volume_dd_blocksize = 1234 - self.drv = block_device.BlockDeviceDriver( - configuration=self.configuration, - host='localhost', db=db) - - def test_initialize_connection(self): - TEST_VOLUME1 = obj_volume.Volume(host='localhost1', - provider_location='1 2 3 /dev/loop1', - provider_auth='a b c', - attached_mode='rw', - id=fake.VOLUME_ID) - TEST_CONNECTOR = {'host': 'localhost1'} - - data = self.drv.initialize_connection(TEST_VOLUME1, TEST_CONNECTOR) - expected_data = {'data': {'device_path': '/dev/loop1'}, - 'driver_volume_type': 'local'} - - self.assertEqual(expected_data, data) - - @mock.patch('cinder.volume.driver.ISCSIDriver.initialize_connection') - def test_initialize_connection_different_hosts(self, _init_conn): - TEST_CONNECTOR = {'host': 'localhost1'} - TEST_VOLUME2 = obj_volume.Volume(host='localhost2', - provider_location='1 2 3 /dev/loop2', - provider_auth='d e f', - attached_mode='rw', - id=fake.VOLUME2_ID) - _init_conn.return_value = 'data' - - data = self.drv.initialize_connection(TEST_VOLUME2, TEST_CONNECTOR) - expected_data = {'data': {'auth_method': 'd', - 'auth_password': 'f', - 'auth_username': 'e', - 'encrypted': False, - 'target_discovered': False, - 'target_iqn': '2', - 'target_lun': 3, - 'target_portal': '1', - 'volume_id': fake.VOLUME2_ID}} - - self.assertEqual(expected_data['data'], data['data']) - - @mock.patch('cinder.volume.drivers.block_device.BlockDeviceDriver.' - 'local_path', return_value=None) - @mock.patch('cinder.volume.utils.clear_volume') - def test_delete_not_volume_provider_location(self, _clear_volume, - _local_path): - TEST_VOLUME2 = obj_volume.Volume(provider_location=None) - self.drv.delete_volume(TEST_VOLUME2) - _local_path.assert_called_once_with(TEST_VOLUME2) - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.volume.utils.clear_volume') - def test_delete_volume_path_exist(self, _clear_volume, _exists): - TEST_VOLUME = obj_volume.Volume(name_id=fake.VOLUME_NAME_ID, - size=1, - provider_location='/dev/loop1', - display_name='vol1', - status='available') - - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop1') as lp_mocked: - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop1': 1}) as \ - gds_mocked: - volutils.clear_volume(gds_mocked, lp_mocked) - - self.drv.delete_volume(TEST_VOLUME) - - lp_mocked.assert_called_once_with(TEST_VOLUME) - gds_mocked.assert_called_once_with(['/dev/loop1']) - - self.assertTrue(_exists.called) - self.assertTrue(_clear_volume.called) - - def test_delete_path_is_not_in_list_of_available_devices(self): - TEST_VOLUME2 = obj_volume.Volume(provider_location='/dev/loop0') - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop0') as lp_mocked: - self.drv.delete_volume(TEST_VOLUME2) - lp_mocked.assert_called_once_with(TEST_VOLUME2) - - def test__update_provider_location(self): - TEST_VOLUME = obj_volume.Volume(name_id=fake.VOLUME_NAME_ID, - size=1, - display_name='vol1') - with mock.patch.object(obj_volume.Volume, 'update') as update_mocked, \ - mock.patch.object(obj_volume.Volume, 'save') as save_mocked: - self.drv._update_provider_location(TEST_VOLUME, 'dev_path') - self.assertEqual(1, update_mocked.call_count) - save_mocked.assert_called_once_with() - - def test_create_volume(self): - TEST_VOLUME = obj_volume.Volume(name_id=fake.VOLUME_NAME_ID, - size=1, - display_name='vol1') - - with mock.patch.object(self.drv, 'find_appropriate_size_device', - return_value='dev_path') as fasd_mocked: - with mock.patch.object(self.drv, '_update_provider_location') as \ - upl_mocked: - self.drv.create_volume(TEST_VOLUME) - fasd_mocked.assert_called_once_with(TEST_VOLUME.size) - upl_mocked.assert_called_once_with(TEST_VOLUME, 'dev_path') - - def test_update_volume_stats(self): - - with mock.patch.object(self.drv, '_devices_sizes', - return_value={'/dev/loop1': 1024, - '/dev/loop2': 1024}) as \ - ds_mocked: - with mock.patch.object(self.drv, '_get_used_devices') as \ - gud_mocked: - self.drv._update_volume_stats() - - reserved_percentage = self.configuration.reserved_percentage - self.assertEqual({ - 'vendor_name': "Open Source", - 'driver_version': self.drv.VERSION, - 'volume_backend_name': 'BlockDev', - 'storage_protocol': 'unknown', - 'pools': [{ - 'QoS_support': False, - 'total_capacity_gb': 2, - 'free_capacity_gb': 2, - 'reserved_percentage': reserved_percentage, - 'pool_name': 'BlockDev'}]}, self.drv._stats) - gud_mocked.assert_called_once_with() - ds_mocked.assert_called_once_with() - - @mock.patch('cinder.volume.utils.copy_volume') - def test_create_cloned_volume(self, _copy_volume): - TEST_SRC = obj_volume.Volume(id=fake.VOLUME_ID, - name_id=fake.VOLUME_NAME_ID, - size=1, - provider_location='/dev/loop1') - TEST_VOLUME = obj_volume.Volume(name_id=fake.VOLUME2_NAME_ID, - size=1, - display_name='vol1') - - with mock.patch.object(self.drv, 'find_appropriate_size_device', - return_value='/dev/loop2') as fasd_mocked: - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop2': 2}) as \ - gds_mocked: - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop1') as \ - lp_mocked: - with mock.patch.object(self.drv, - '_update_provider_location') as \ - upl_mocked: - volutils.copy_volume('/dev/loop1', fasd_mocked, 2, - mock.sentinel, - execute=self.drv._execute) - self.drv.create_cloned_volume(TEST_VOLUME, TEST_SRC) - fasd_mocked.assert_called_once_with(TEST_SRC.size) - lp_mocked.assert_called_once_with(TEST_SRC) - gds_mocked.assert_called_once_with(['/dev/loop2']) - upl_mocked.assert_called_once_with( - TEST_VOLUME, '/dev/loop2') - - @mock.patch.object(cinder.image.image_utils, 'fetch_to_raw') - def test_copy_image_to_volume(self, _fetch_to_raw): - TEST_VOLUME = obj_volume.Volume(name_id=fake.VOLUME_NAME_ID, - size=1, - provider_location='/dev/loop1') - TEST_IMAGE_SERVICE = "image_service" - TEST_IMAGE_ID = "image_id" - - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop1') as lp_mocked: - self.drv.copy_image_to_volume(context, TEST_VOLUME, - TEST_IMAGE_SERVICE, TEST_IMAGE_ID) - lp_mocked.assert_called_once_with(TEST_VOLUME) - - _fetch_to_raw.assert_called_once_with(context, TEST_IMAGE_SERVICE, - TEST_IMAGE_ID, '/dev/loop1', - 1234, size=1) - - def test_copy_volume_to_image(self): - TEST_VOLUME = {'provider_location': '/dev/loop1'} - TEST_IMAGE_SERVICE = "image_service" - TEST_IMAGE_META = "image_meta" - - with mock.patch.object(cinder.image.image_utils, 'upload_volume') as \ - _upload_volume: - with mock.patch.object(self.drv, 'local_path') as _local_path: - _local_path.return_value = '/dev/loop1' - self.drv.copy_volume_to_image(context, TEST_VOLUME, - TEST_IMAGE_SERVICE, - TEST_IMAGE_META) - - self.assertTrue(_local_path.called) - _upload_volume.assert_called_once_with(context, - TEST_IMAGE_SERVICE, - TEST_IMAGE_META, - '/dev/loop1') - - def test_get_used_devices(self): - TEST_VOLUME1 = {'host': 'localhost', - 'provider_location': '/dev/loop1'} - TEST_VOLUME2 = {'host': 'localhost', - 'provider_location': '/dev/loop2'} - - def fake_local_path(vol): - return vol['provider_location'].split()[-1] - - with mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - return_value=[TEST_VOLUME1, TEST_VOLUME2]),\ - mock.patch.object(obj_snap.SnapshotList, 'get_by_host', - return_value=[]): - with mock.patch.object(context, 'get_admin_context'): - with mock.patch.object(self.drv, 'local_path', - return_value=fake_local_path): - path1 = self.drv.local_path(TEST_VOLUME1) - path2 = self.drv.local_path(TEST_VOLUME2) - self.assertEqual(set([path1, path2]), - self.drv._get_used_devices()) - - def test_get_devices_sizes(self): - dev_paths = ['/dev/loop1', '/dev/loop2', '/dev/loop3'] - out = '4294967296\n2147483648\n3221225472\nn' - with mock.patch.object(self.drv, - '_execute', - return_value=(out, None)) as _execute: - actual = self.drv._get_devices_sizes(dev_paths) - self.assertEqual(3, len(actual)) - self.assertEqual({'/dev/loop1': 4096, '/dev/loop2': 2048, - '/dev/loop3': 3072}, actual) - _execute.assert_called_once_with('blockdev', '--getsize64', - *dev_paths, run_as_root=True) - - def test_devices_sizes(self): - with mock.patch.object(self.drv, '_get_devices_sizes') as \ - _get_dvc_size: - _get_dvc_size.return_value = {'/dev/loop1': 1, '/dev/loop2': 1} - self.assertEqual(2, len(self.drv._devices_sizes())) - self.assertEqual({'/dev/loop1': 1, '/dev/loop2': 1}, - self.drv._devices_sizes()) - - def test_find_appropriate_size_device_no_free_disks(self): - size = 1 - with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: - with mock.patch.object(self.drv, '_get_used_devices') as \ - _get_used_dvc: - _dvc_sizes.return_value = {'/dev/loop1': 1, - '/dev/loop2': 1} - _get_used_dvc.return_value = set(['/dev/loop1', '/dev/loop2']) - self.assertRaises(cinder.exception.CinderException, - self.drv.find_appropriate_size_device, size) - - def test_find_appropriate_size_device_not_big_enough_disk(self): - size = 2948 - with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: - with mock.patch.object(self.drv, '_get_used_devices') as \ - _get_used_dvc: - _dvc_sizes.return_value = {'/dev/loop1': 1024, - '/dev/loop2': 1924} - _get_used_dvc.return_value = set(['/dev/loop1']) - self.assertRaises(cinder.exception.CinderException, - self.drv.find_appropriate_size_device, size) - - def test_find_appropriate_size_device(self): - size = 1 - with mock.patch.object(self.drv, '_devices_sizes') as _dvc_sizes: - with mock.patch.object(self.drv, '_get_used_devices') as \ - _get_used_dvc: - _dvc_sizes.return_value = {'/dev/loop1': 2048, - '/dev/loop2': 1024} - _get_used_dvc.return_value = set() - self.assertEqual('/dev/loop2', - self.drv.find_appropriate_size_device(size)) - - def test_extend_volume_exists(self): - TEST_VOLUME = {'name': 'vol1', 'id': 123} - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop1': 1024}) as \ - mock_get_size: - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop1') as lp_mocked: - self.assertRaises(cinder.exception.CinderException, - self.drv.extend_volume, TEST_VOLUME, 2) - lp_mocked.assert_called_once_with(TEST_VOLUME) - mock_get_size.assert_called_once_with(['/dev/loop1']) - - @mock.patch('cinder.volume.utils.copy_volume') - def test_create_snapshot(self, _copy_volume): - TEST_VOLUME = obj_volume.Volume(id=fake.VOLUME_ID, - name_id=fake.VOLUME_NAME_ID, - size=1, - display_name='vol1', - status='available', - provider_location='/dev/loop1') - TEST_SNAP = obj_snap.Snapshot(id=fake.SNAPSHOT_ID, - volume_id=fake.VOLUME_ID, - volume_size=1024, - provider_location='/dev/loop2', - volume=TEST_VOLUME) - - with mock.patch.object(self.drv, 'find_appropriate_size_device', - return_value='/dev/loop2') as fasd_mocked: - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop2': 1024}) as \ - gds_mocked: - with mock.patch.object(self.drv, - '_update_provider_location') as \ - upl_mocked: - volutils.copy_volume('/dev/loop1', fasd_mocked, 1024, - mock.sentinel, - execute=self.drv._execute) - self.drv.create_snapshot(TEST_SNAP) - fasd_mocked.assert_called_once_with(TEST_SNAP.volume_size) - gds_mocked.assert_called_once_with(['/dev/loop2']) - upl_mocked.assert_called_once_with( - TEST_SNAP, '/dev/loop2') - - def test_create_snapshot_with_not_available_volume(self): - TEST_VOLUME = obj_volume.Volume(id=fake.VOLUME_ID, - name_id=fake.VOLUME_NAME_ID, - size=1, - display_name='vol1', - status='in use', - provider_location='/dev/loop1') - TEST_SNAP = obj_snap.Snapshot(id=fake.SNAPSHOT_ID, - volume_id=fake.VOLUME_ID, - volume_size=1024, - provider_location='/dev/loop2', - volume=TEST_VOLUME) - - self.assertRaises(cinder.exception.CinderException, - self.drv.create_snapshot, TEST_SNAP) - - @mock.patch('cinder.volume.utils.copy_volume') - def test_create_volume_from_snapshot(self, _copy_volume): - TEST_SNAP = obj_snap.Snapshot(volume_id=fake.VOLUME_ID, - volume_size=1024, - provider_location='/dev/loop1') - TEST_VOLUME = obj_volume.Volume(id=fake.VOLUME_ID, - name_id=fake.VOLUME_NAME_ID, - size=1, - display_name='vol1', - provider_location='/dev/loop2') - - with mock.patch.object(self.drv, 'find_appropriate_size_device', - return_value='/dev/loop2') as fasd_mocked: - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop2': 1024}) as \ - gds_mocked: - with mock.patch.object(self.drv, - '_update_provider_location') as \ - upl_mocked: - volutils.copy_volume('/dev/loop1', fasd_mocked, 1024, - mock.sentinel, - execute=self.drv._execute) - self.drv.create_volume_from_snapshot( - TEST_VOLUME, TEST_SNAP) - fasd_mocked.assert_called_once_with( - TEST_SNAP.volume_size) - gds_mocked.assert_called_once_with(['/dev/loop2']) - upl_mocked.assert_called_once_with( - TEST_VOLUME, '/dev/loop2') - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('cinder.volume.utils.clear_volume') - def test_delete_snapshot(self, _clear_volume, _exists): - TEST_SNAP = obj_snap.Snapshot(volume_id=fake.VOLUME_ID, - provider_location='/dev/loop1', - status=fields.SnapshotStatus.AVAILABLE) - - with mock.patch.object(self.drv, 'local_path', - return_value='/dev/loop1') as lp_mocked: - with mock.patch.object(self.drv, '_get_devices_sizes', - return_value={'/dev/loop1': 1}) as \ - gds_mocked: - volutils.clear_volume(gds_mocked, lp_mocked) - self.drv.delete_snapshot(TEST_SNAP) - lp_mocked.assert_called_once_with(TEST_SNAP) - gds_mocked.assert_called_once_with(['/dev/loop1']) - - self.assertTrue(_exists.called) - self.assertTrue(_clear_volume.called) diff --git a/cinder/tests/unit/volume/drivers/test_blockbridge.py b/cinder/tests/unit/volume/drivers/test_blockbridge.py deleted file mode 100644 index 86660a238..000000000 --- a/cinder/tests/unit/volume/drivers/test_blockbridge.py +++ /dev/null @@ -1,582 +0,0 @@ -# Copyright 2015 Blockbridge Networks, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Blockbridge EPS iSCSI Volume Driver Tests -""" - -import base64 - -import mock -from oslo_serialization import jsonutils -from oslo_utils import units -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -import cinder.volume.drivers.blockbridge as bb - - -DEFAULT_POOL_NAME = "OpenStack" -DEFAULT_POOL_QUERY = "+openstack" - -FIXTURE_VOL_EXPORT_OK = """{ - "target_ip":"127.0.0.1", - "target_port":3260, - "target_iqn":"iqn.2009-12.com.blockbridge:t-pjxczxh-t001", - "target_lun":0, - "initiator_login":"mock-user-abcdef123456" -} -""" - -POOL_STATS_WITHOUT_USAGE = { - 'driver_version': '1.3.0', - 'pools': [{ - 'filter_function': None, - 'free_capacity_gb': 'unknown', - 'goodness_function': None, - 'location_info': 'BlockbridgeDriver:unknown:OpenStack', - 'max_over_subscription_ratio': None, - 'pool_name': 'OpenStack', - 'thin_provisioning_support': True, - 'reserved_percentage': 0, - 'total_capacity_gb': 'unknown'}, - ], - 'storage_protocol': 'iSCSI', - 'vendor_name': 'Blockbridge', - 'volume_backend_name': 'BlockbridgeISCSIDriver', -} - - -def common_mocks(f): - """Decorator to set mocks common to all tests. - - The point of doing these mocks here is so that we don't accidentally set - mocks that can't/don't get unset. - """ - def _common_inner_inner1(inst, *args, **kwargs): - @mock.patch("six.moves.http_client.HTTPSConnection", autospec=True) - def _common_inner_inner2(mock_conn): - inst.mock_httplib = mock_conn - inst.mock_conn = mock_conn.return_value - inst.mock_response = mock.Mock() - - inst.mock_response.read.return_value = '{}' - inst.mock_response.status = http_client.OK - - inst.mock_conn.request.return_value = True - inst.mock_conn.getresponse.return_value = inst.mock_response - - return f(inst, *args, **kwargs) - - return _common_inner_inner2() - - return _common_inner_inner1 - - -class BlockbridgeISCSIDriverTestCase(test.TestCase): - - def setUp(self): - super(BlockbridgeISCSIDriverTestCase, self).setUp() - - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.blockbridge_api_host = 'ut-api.blockbridge.com' - self.cfg.blockbridge_api_port = None - self.cfg.blockbridge_auth_scheme = 'token' - self.cfg.blockbridge_auth_token = '0//kPIw7Ck7PUkPSKY...' - self.cfg.blockbridge_pools = {DEFAULT_POOL_NAME: DEFAULT_POOL_QUERY} - self.cfg.blockbridge_default_pool = None - self.cfg.filter_function = None - self.cfg.goodness_function = None - - def _cfg_safe_get(arg): - return getattr(self.cfg, arg, None) - - self.cfg.safe_get.side_effect = _cfg_safe_get - - mock_exec = mock.Mock() - mock_exec.return_value = ('', '') - - self.real_client = bb.BlockbridgeAPIClient(configuration=self.cfg) - self.mock_client = mock.Mock(wraps=self.real_client) - - self.driver = bb.BlockbridgeISCSIDriver(execute=mock_exec, - client=self.mock_client, - configuration=self.cfg) - - self.user_id = '2c13bc8ef717015fda1e12e70dab24654cb6a6da' - self.project_id = '62110b9d37f1ff3ea1f51e75812cb92ed9a08b28' - - self.volume_name = u'testvol-1' - self.volume_id = '6546b9e9-1980-4241-a4e9-0ad9d382c032' - self.volume_size = 1 - self.volume = dict( - name=self.volume_name, - size=self.volume_size, - id=self.volume_id, - user_id=self.user_id, - project_id=self.project_id, - host='fake-host') - - self.snapshot_name = u'testsnap-1' - self.snapshot_id = '207c12af-85a7-4da6-8d39-a7457548f965' - self.snapshot = dict( - volume_name=self.volume_name, - name=self.snapshot_name, - id=self.snapshot_id, - volume_id='55ff8a46-c35f-4ca3-9991-74e1697b220e', - user_id=self.user_id, - project_id=self.project_id) - - self.connector = dict( - initiator='iqn.1994-05.com.redhat:6a528422b61') - - self.driver.do_setup(context.get_admin_context()) - - @common_mocks - def test_http_mock_success(self): - self.mock_response.read.return_value = '{}' - self.mock_response.status = http_client.OK - - conn = http_client.HTTPSConnection('whatever', None) - conn.request('GET', '/blah', '{}', {}) - rsp = conn.getresponse() - - self.assertEqual('{}', rsp.read()) - self.assertEqual(http_client.OK, rsp.status) - - @common_mocks - def test_http_mock_failure(self): - mock_body = '{"error": "no results matching query", "status": 413}' - - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - conn = http_client.HTTPSConnection('whatever', None) - conn.request('GET', '/blah', '{}', {}) - rsp = conn.getresponse() - - self.assertEqual(mock_body, rsp.read()) - self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, rsp.status) - - @common_mocks - def test_cfg_api_host(self): - with mock.patch.object(self.cfg, 'blockbridge_api_host', 'test.host'): - self.driver.get_volume_stats(True) - self.mock_httplib.assert_called_once_with('test.host', None) - - @common_mocks - def test_cfg_api_port(self): - with mock.patch.object(self.cfg, 'blockbridge_api_port', 1234): - self.driver.get_volume_stats(True) - self.mock_httplib.assert_called_once_with( - self.cfg.blockbridge_api_host, 1234) - - @common_mocks - def test_cfg_api_auth_scheme_password(self): - self.cfg.blockbridge_auth_scheme = 'password' - self.cfg.blockbridge_auth_user = 'mock-user' - self.cfg.blockbridge_auth_password = 'mock-password' - with mock.patch.object(self.driver, 'hostname', 'mock-hostname'): - self.driver.get_volume_stats(True) - - creds = "%s:%s" % (self.cfg.blockbridge_auth_user, - self.cfg.blockbridge_auth_password) - if six.PY3: - creds = creds.encode('utf-8') - b64_creds = base64.encodestring(creds).decode('ascii') - else: - b64_creds = base64.encodestring(creds) - - params = dict( - hostname='mock-hostname', - version=self.driver.VERSION, - backend_name='BlockbridgeISCSIDriver', - pool='OpenStack', - query='+openstack') - - headers = { - 'Accept': 'application/vnd.blockbridge-3+json', - 'Authorization': "Basic %s" % b64_creds.replace("\n", ""), - 'User-Agent': "cinder-volume/%s" % self.driver.VERSION, - } - - self.mock_conn.request.assert_called_once_with( - 'GET', mock.ANY, None, headers) - # Parse the URL instead of comparing directly both URLs. - # On Python 3, parameters are formatted in a random order because - # of the hash randomization. - conn_url = self.mock_conn.request.call_args[0][1] - conn_params = dict(urllib.parse.parse_qsl(conn_url.split("?", 1)[1])) - self.assertTrue(conn_url.startswith("/api/cinder/status?"), - repr(conn_url)) - self.assertEqual(params, conn_params) - - @common_mocks - def test_create_volume(self): - self.driver.create_volume(self.volume) - - url = "/volumes/%s" % self.volume_id - create_params = dict( - name=self.volume_name, - query=DEFAULT_POOL_QUERY, - capacity=self.volume_size * units.Gi) - - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - full_url = "/api/cinder" + url - tsk_header = "ext_auth=keystone/%(project_id)s/%(user_id)s" % kwargs - authz_header = "Bearer %s" % self.cfg.blockbridge_auth_token - headers = { - 'X-Blockbridge-Task': tsk_header, - 'Accept': 'application/vnd.blockbridge-3+json', - 'Content-Type': 'application/json', - 'Authorization': authz_header, - 'User-Agent': "cinder-volume/%s" % self.driver.VERSION, - } - - # This is split up because assert_called_once_with won't handle - # randomly ordered dictionaries. - args, kwargs = self.mock_conn.request.call_args - self.assertEqual(args[0], 'PUT') - self.assertEqual(args[1], full_url) - self.assertDictEqual(jsonutils.loads(args[2]), create_params) - self.assertDictEqual(args[3], headers) - - @common_mocks - def test_create_volume_no_results(self): - mock_body = '{"message": "no results matching query", "status": 413}' - - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - self.assertRaisesRegex(exception.VolumeBackendAPIException, - "no results matching query", - self.driver.create_volume, - self.volume) - - create_params = dict( - name=self.volume_name, - query=DEFAULT_POOL_QUERY, - capacity=self.volume_size * units.Gi) - - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with( - "/volumes/%s" % self.volume_id, **kwargs) - - @common_mocks - def test_create_volume_from_snapshot(self): - self.driver.create_volume_from_snapshot(self.volume, self.snapshot) - - vol_src = dict( - snapshot_id=self.snapshot_id, - volume_id=self.snapshot['volume_id']) - create_params = dict( - name=self.volume_name, - capacity=self.volume_size * units.Gi, - src=vol_src) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with( - "/volumes/%s" % self.volume_id, **kwargs) - - @common_mocks - def test_create_volume_from_snapshot_overquota(self): - mock_body = '{"message": "over quota", "status": 413}' - - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - self.assertRaisesRegex(exception.VolumeBackendAPIException, - "over quota", - self.driver.create_volume_from_snapshot, - self.volume, - self.snapshot) - - vol_src = dict( - snapshot_id=self.snapshot_id, - volume_id=self.snapshot['volume_id']) - create_params = dict( - name=self.volume_name, - capacity=self.volume_size * units.Gi, - src=vol_src) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with( - "/volumes/%s" % self.volume_id, **kwargs) - - @common_mocks - def test_create_cloned_volume(self): - src_vref = dict( - name='cloned_volume_source', - size=self.volume_size, - id='5d734467-5d77-461c-b5ac-5009dbeaa5d5', - user_id=self.user_id, - project_id=self.project_id) - - self.driver.create_cloned_volume(self.volume, src_vref) - - create_params = dict( - name=self.volume_name, - capacity=self.volume_size * units.Gi, - src=dict(volume_id=src_vref['id'])) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with( - "/volumes/%s" % self.volume_id, **kwargs) - - @common_mocks - def test_create_cloned_volume_overquota(self): - mock_body = '{"message": "over quota", "status": 413}' - - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - src_vref = dict( - name='cloned_volume_source', - size=self.volume_size, - id='5d734467-5d77-461c-b5ac-5009dbeaa5d5', - user_id=self.user_id, - project_id=self.project_id) - - self.assertRaisesRegex(exception.VolumeBackendAPIException, - "over quota", - self.driver.create_cloned_volume, - self.volume, - src_vref) - - create_params = dict( - name=self.volume_name, - capacity=self.volume_size * units.Gi, - src=dict(volume_id=src_vref['id'])) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with( - "/volumes/%s" % self.volume_id, **kwargs) - - @common_mocks - def test_extend_volume(self): - self.driver.extend_volume(self.volume, 2) - - url = "/volumes/%s" % self.volume_id - kwargs = dict( - action='grow', - method='POST', - params=dict(capacity=(2 * units.Gi)), - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_extend_volume_overquota(self): - mock_body = '{"message": "over quota", "status": 413}' - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - self.assertRaisesRegex(exception.VolumeBackendAPIException, - "over quota", - self.driver.extend_volume, - self.volume, - 2) - - url = "/volumes/%s" % self.volume_id - kwargs = dict( - action='grow', - method='POST', - params=dict(capacity=(2 * units.Gi)), - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_delete_volume(self): - self.driver.delete_volume(self.volume) - - url = "/volumes/%s" % self.volume_id - kwargs = dict( - method='DELETE', - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_create_snapshot(self): - self.driver.create_snapshot(self.snapshot) - - url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], - self.snapshot['id']) - create_params = dict( - name=self.snapshot_name) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_create_snapshot_overquota(self): - mock_body = '{"message": "over quota", "status": 413}' - self.mock_response.read.return_value = mock_body - self.mock_response.status = http_client.REQUEST_ENTITY_TOO_LARGE - - self.assertRaisesRegex(exception.VolumeBackendAPIException, - "over quota", - self.driver.create_snapshot, - self.snapshot) - - url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], - self.snapshot['id']) - create_params = dict( - name=self.snapshot_name) - kwargs = dict( - method='PUT', - params=create_params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_delete_snapshot(self): - self.driver.delete_snapshot(self.snapshot) - - url = "/volumes/%s/snapshots/%s" % (self.snapshot['volume_id'], - self.snapshot['id']) - kwargs = dict( - method='DELETE', - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - @mock.patch('cinder.volume.utils.generate_username') - @mock.patch('cinder.volume.utils.generate_password') - def test_initialize_connection(self, - mock_generate_password, - mock_generate_username): - mock_generate_username.return_value = 'mock-user-abcdef123456' - mock_generate_password.return_value = 'mock-password-abcdef123456' - - self.mock_response.read.return_value = FIXTURE_VOL_EXPORT_OK - self.mock_response.status = http_client.OK - - props = self.driver.initialize_connection(self.volume, self.connector) - - expected_props = dict( - driver_volume_type="iscsi", - data=dict( - auth_method="CHAP", - auth_username='mock-user-abcdef123456', - auth_password='mock-password-abcdef123456', - target_discovered=False, - target_iqn="iqn.2009-12.com.blockbridge:t-pjxczxh-t001", - target_lun=0, - target_portal="127.0.0.1:3260", - volume_id=self.volume_id)) - - self.assertEqual(expected_props, props) - - ini_name = urllib.parse.quote(self.connector["initiator"], "") - url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name) - params = dict( - chap_user="mock-user-abcdef123456", - chap_secret="mock-password-abcdef123456") - kwargs = dict( - method='PUT', - params=params, - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_terminate_connection(self): - self.driver.terminate_connection(self.volume, self.connector) - - ini_name = urllib.parse.quote(self.connector["initiator"], "") - url = "/volumes/%s/exports/%s" % (self.volume_id, ini_name) - kwargs = dict( - method='DELETE', - user_id=self.user_id, - project_id=self.project_id) - - self.mock_client.submit.assert_called_once_with(url, **kwargs) - - @common_mocks - def test_get_volume_stats_without_usage(self): - with mock.patch.object(self.driver, 'hostname', 'mock-hostname'): - self.driver.get_volume_stats(True) - - p = { - 'query': '+openstack', - 'pool': 'OpenStack', - 'hostname': 'mock-hostname', - 'version': '1.3.0', - 'backend_name': 'BlockbridgeISCSIDriver', - } - - self.mock_client.submit.assert_called_once_with('/status', params=p) - self.assertEqual(POOL_STATS_WITHOUT_USAGE, self.driver._stats) - - @common_mocks - def test_get_volume_stats_forbidden(self): - self.mock_response.status = http_client.FORBIDDEN - self.assertRaisesRegex(exception.NotAuthorized, - "Insufficient privileges", - self.driver.get_volume_stats, - True) - - @common_mocks - def test_get_volume_stats_unauthorized(self): - self.mock_response.status = http_client.UNAUTHORIZED - self.assertRaisesRegex(exception.NotAuthorized, - "Invalid credentials", - self.driver.get_volume_stats, - True) diff --git a/cinder/tests/unit/volume/drivers/test_coho.py b/cinder/tests/unit/volume/drivers/test_coho.py deleted file mode 100644 index 4945f2b74..000000000 --- a/cinder/tests/unit/volume/drivers/test_coho.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (c) 2015 Coho Data, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import binascii -import errno -import mock -import os -import six -import socket -import xdrlib - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers import coho -from cinder.volume.drivers import nfs -from cinder.volume.drivers import remotefs -from cinder.volume import qos_specs -from cinder.volume import volume_types - -ADDR = 'coho-datastream-addr' -PATH = '/test/path' -RPC_PORT = 2049 -LOCAL_PATH = '/opt/cinder/mnt/test/path' - -VOLUME = { - 'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', - 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190', - 'size': 128, - 'volume_type': 'silver', - 'volume_type_id': 'deadbeef-aaaa-bbbb-cccc-deadbeefbeef', - 'metadata': [{'key': 'type', - 'service_label': 'silver'}], - 'provider_location': 'coho-datastream-addr:/test/path', - 'id': 'bcc48c61-9691-4e5f-897c-793686093190', - 'status': 'available', -} - -CLONE_VOL = VOLUME.copy() -CLONE_VOL['size'] = 256 - -SNAPSHOT = { - 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc', - 'size': 128, - 'volume_type': None, - 'provider_location': None, - 'volume_size': 128, - 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190', - 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191', -} - -VOLUME_TYPE = { - 'name': 'sf-1', - 'qos_specs_id': 'qos-spec-id', - 'deleted': False, - 'created_at': '2016-06-06 04:58:11', - 'updated_at': None, - 'extra_specs': {}, - 'deleted_at': None, - 'id': 'deadbeef-aaaa-bbbb-cccc-deadbeefbeef' -} - -QOS_SPEC = { - 'id': 'qos-spec-id', - 'specs': { - 'maxIOPS': '2000', - 'maxMBS': '500' - } -} - -QOS = { - 'uuid': 'qos-spec-id', - 'maxIOPS': 2000, - 'maxMBS': 500 -} - -INVALID_SNAPSHOT = SNAPSHOT.copy() -INVALID_SNAPSHOT['name'] = '' - -INVALID_HEADER_BIN = binascii.unhexlify('800000') -NO_REPLY_BIN = binascii.unhexlify( - 'aaaaa01000000010000000000000000000000003') -MSG_DENIED_BIN = binascii.unhexlify( - '00000a010000000110000000000000000000000000000003') -PROC_UNAVAIL_BIN = binascii.unhexlify( - '00000a010000000100000000000000000000000000000003') -PROG_UNAVAIL_BIN = binascii.unhexlify( - '000003c70000000100000000000000000000000000000001') -PROG_MISMATCH_BIN = binascii.unhexlify( - '00000f7700000001000000000000000000000000000000020000000100000001') -GARBAGE_ARGS_BIN = binascii.unhexlify( - '00000d6e0000000100000000000000000000000000000004') - - -class CohoDriverTest(test.TestCase): - """Test Coho Data's NFS volume driver.""" - - def __init__(self, *args, **kwargs): - super(CohoDriverTest, self).__init__(*args, **kwargs) - - def setUp(self): - super(CohoDriverTest, self).setUp() - - self.context = mock.Mock() - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.reserved_percentage = 0 - self.configuration.volume_backend_name = 'coho-1' - self.configuration.coho_rpc_port = 2049 - self.configuration.nfs_shares_config = '/etc/cinder/coho_shares' - self.configuration.nfs_sparsed_volumes = True - self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt' - self.configuration.nfs_mount_options = None - self.configuration.nas_host = None - self.configuration.nas_share_path = None - self.configuration.nas_mount_options = None - - def test_setup_failure_when_rpc_port_unconfigured(self): - self.configuration.coho_rpc_port = None - drv = coho.CohoDriver(configuration=self.configuration) - - self.mock_object(coho, 'LOG') - self.mock_object(nfs.NfsDriver, 'do_setup') - - with self.assertRaisesRegex(exception.CohoException, - ".*Coho rpc port is not configured.*"): - drv.do_setup(self.context) - - self.assertTrue(coho.LOG.warning.called) - self.assertTrue(nfs.NfsDriver.do_setup.called) - - def test_setup_failure_when_coho_rpc_port_is_invalid(self): - self.configuration.coho_rpc_port = 99999 - drv = coho.CohoDriver(configuration=self.configuration) - - self.mock_object(coho, 'LOG') - self.mock_object(nfs.NfsDriver, 'do_setup') - - with self.assertRaisesRegex(exception.CohoException, - "Invalid port number.*"): - drv.do_setup(self.context) - - self.assertTrue(coho.LOG.warning.called) - self.assertTrue(nfs.NfsDriver.do_setup.called) - - def test_create_volume_with_qos(self): - drv = coho.CohoDriver(configuration=self.configuration) - - volume = fake_volume.fake_volume_obj(self.context, - **{'volume_type_id': - VOLUME['volume_type_id'], - 'provider_location': - VOLUME['provider_location']}) - mock_remotefs_create = self.mock_object(remotefs.RemoteFSDriver, - 'create_volume') - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_get_volume_type = self.mock_object(volume_types, - 'get_volume_type') - mock_get_volume_type.return_value = VOLUME_TYPE - mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_qos_specs.return_value = QOS_SPEC - mock_get_admin_context = self.mock_object(context, 'get_admin_context') - mock_get_admin_context.return_value = 'test' - - drv.create_volume(volume) - - self.assertTrue(mock_remotefs_create.called) - self.assertTrue(mock_get_admin_context.called) - mock_remotefs_create.assert_has_calls([mock.call(volume)]) - mock_get_volume_type.assert_has_calls( - [mock.call('test', volume.volume_type_id)]) - mock_get_qos_specs.assert_has_calls( - [mock.call('test', QOS_SPEC['id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().set_qos_policy(os.path.join(PATH, volume.name), - QOS)]) - - def test_create_snapshot(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_get_volume_location = self.mock_object(coho.CohoDriver, - '_get_volume_location') - mock_get_volume_location.return_value = ADDR, PATH - - drv.create_snapshot(SNAPSHOT) - - mock_get_volume_location.assert_has_calls( - [mock.call(SNAPSHOT['volume_id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().create_snapshot( - os.path.join(PATH, SNAPSHOT['volume_name']), - SNAPSHOT['name'], 0)]) - - def test_delete_snapshot(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_get_volume_location = self.mock_object(coho.CohoDriver, - '_get_volume_location') - mock_get_volume_location.return_value = ADDR, PATH - - drv.delete_snapshot(SNAPSHOT) - - mock_get_volume_location.assert_has_calls( - [mock.call(SNAPSHOT['volume_id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().delete_snapshot(SNAPSHOT['name'])]) - - def test_create_volume_from_snapshot(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_find_share = self.mock_object(drv, '_find_share') - mock_find_share.return_value = ADDR + ':' + PATH - mock_get_volume_type = self.mock_object(volume_types, - 'get_volume_type') - mock_get_volume_type.return_value = VOLUME_TYPE - mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_qos_specs.return_value = QOS_SPEC - mock_get_admin_context = self.mock_object(context, 'get_admin_context') - mock_get_admin_context.return_value = 'test' - - drv.create_volume_from_snapshot(VOLUME, SNAPSHOT) - - mock_find_share.assert_has_calls( - [mock.call(VOLUME)]) - self.assertTrue(mock_get_admin_context.called) - mock_get_volume_type.assert_has_calls( - [mock.call('test', VOLUME_TYPE['id'])]) - mock_get_qos_specs.assert_has_calls( - [mock.call('test', QOS_SPEC['id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().create_volume_from_snapshot( - SNAPSHOT['name'], os.path.join(PATH, VOLUME['name'])), - mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), - QOS)]) - - def test_create_cloned_volume(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_find_share = self.mock_object(drv, '_find_share') - mock_find_share.return_value = ADDR + ':' + PATH - mock_execute = self.mock_object(drv, '_execute') - mock_local_path = self.mock_object(drv, 'local_path') - mock_local_path.return_value = LOCAL_PATH - mock_get_volume_type = self.mock_object(volume_types, - 'get_volume_type') - mock_get_volume_type.return_value = VOLUME_TYPE - mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_qos_specs.return_value = QOS_SPEC - mock_get_admin_context = self.mock_object(context, 'get_admin_context') - mock_get_admin_context.return_value = 'test' - - drv.create_cloned_volume(VOLUME, CLONE_VOL) - - mock_find_share.assert_has_calls( - [mock.call(VOLUME)]) - mock_local_path.assert_has_calls( - [mock.call(VOLUME), mock.call(CLONE_VOL)]) - mock_execute.assert_has_calls( - [mock.call('cp', LOCAL_PATH, LOCAL_PATH, run_as_root=True)]) - self.assertTrue(mock_get_admin_context.called) - mock_get_volume_type.assert_has_calls( - [mock.call('test', VOLUME_TYPE['id'])]) - mock_get_qos_specs.assert_has_calls( - [mock.call('test', QOS_SPEC['id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), - QOS)]) - - def test_retype(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_get_volume_type = self.mock_object(volume_types, - 'get_volume_type') - mock_get_volume_type.return_value = VOLUME_TYPE - mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_qos_specs.return_value = QOS_SPEC - - drv.retype('test', VOLUME, VOLUME_TYPE, None, None) - - mock_get_volume_type.assert_has_calls( - [mock.call('test', VOLUME_TYPE['id'])]) - mock_get_qos_specs.assert_has_calls( - [mock.call('test', QOS_SPEC['id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), - QOS)]) - - def test_create_cloned_volume_larger(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_rpc_client = self.mock_object(coho, 'CohoRPCClient') - mock_find_share = self.mock_object(drv, '_find_share') - mock_find_share.return_value = ADDR + ':' + PATH - mock_execute = self.mock_object(drv, '_execute') - mock_local_path = self.mock_object(drv, 'local_path') - mock_local_path.return_value = LOCAL_PATH - mock_get_volume_type = self.mock_object(volume_types, - 'get_volume_type') - mock_get_volume_type.return_value = VOLUME_TYPE - mock_get_qos_specs = self.mock_object(qos_specs, 'get_qos_specs') - mock_get_qos_specs.return_value = QOS_SPEC - mock_get_admin_context = self.mock_object(context, 'get_admin_context') - mock_get_admin_context.return_value = 'test' - - drv.create_cloned_volume(CLONE_VOL, VOLUME) - - mock_find_share.assert_has_calls( - [mock.call(CLONE_VOL)]) - mock_local_path.assert_has_calls( - [mock.call(CLONE_VOL), mock.call(VOLUME)]) - mock_execute.assert_has_calls( - [mock.call('cp', LOCAL_PATH, LOCAL_PATH, run_as_root=True)]) - self.assertTrue(mock_get_admin_context.called) - mock_get_volume_type.assert_has_calls( - [mock.call('test', VOLUME_TYPE['id'])]) - mock_get_qos_specs.assert_has_calls( - [mock.call('test', QOS_SPEC['id'])]) - mock_rpc_client.assert_has_calls( - [mock.call(ADDR, self.configuration.coho_rpc_port), - mock.call().set_qos_policy(os.path.join(PATH, VOLUME['name']), - QOS)]) - mock_local_path.assert_has_calls( - [mock.call(CLONE_VOL)]) - mock_execute.assert_has_calls( - [mock.call('truncate', '-s', '256G', - LOCAL_PATH, run_as_root=True)]) - - def test_extend_volume(self): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_execute = self.mock_object(drv, '_execute') - mock_local_path = self.mock_object(drv, 'local_path') - mock_local_path.return_value = LOCAL_PATH - - drv.extend_volume(VOLUME, 512) - - mock_local_path.assert_has_calls( - [mock.call(VOLUME)]) - mock_execute.assert_has_calls( - [mock.call('truncate', '-s', '512G', - LOCAL_PATH, run_as_root=True)]) - - def test_snapshot_failure_when_source_does_not_exist(self): - drv = coho.CohoDriver(configuration=self.configuration) - - self.mock_object(coho.Client, '_make_call') - mock_init_socket = self.mock_object(coho.Client, 'init_socket') - mock_unpack_uint = self.mock_object(xdrlib.Unpacker, 'unpack_uint') - mock_unpack_uint.return_value = errno.ENOENT - mock_get_volume_location = self.mock_object(coho.CohoDriver, - '_get_volume_location') - mock_get_volume_location.return_value = ADDR, PATH - - with self.assertRaisesRegex(exception.CohoException, - "No such file or directory.*"): - drv.create_snapshot(SNAPSHOT) - - self.assertTrue(mock_init_socket.called) - self.assertTrue(mock_unpack_uint.called) - mock_get_volume_location.assert_has_calls( - [mock.call(SNAPSHOT['volume_id'])]) - - def test_snapshot_failure_with_invalid_input(self): - drv = coho.CohoDriver(configuration=self.configuration) - - self.mock_object(coho.Client, '_make_call') - mock_init_socket = self.mock_object(coho.Client, 'init_socket') - mock_unpack_uint = self.mock_object(xdrlib.Unpacker, 'unpack_uint') - mock_unpack_uint.return_value = errno.EINVAL - mock_get_volume_location = self.mock_object(coho.CohoDriver, - '_get_volume_location') - mock_get_volume_location.return_value = ADDR, PATH - - with self.assertRaisesRegex(exception.CohoException, - "Invalid argument"): - drv.delete_snapshot(INVALID_SNAPSHOT) - - self.assertTrue(mock_init_socket.called) - self.assertTrue(mock_unpack_uint.called) - mock_get_volume_location.assert_has_calls( - [mock.call(INVALID_SNAPSHOT['volume_id'])]) - - @mock.patch('cinder.volume.drivers.coho.Client.init_socket', - side_effect=exception.CohoException( - "Failed to establish connection.")) - def test_snapshot_failure_when_remote_is_unreachable(self, - mock_init_socket): - drv = coho.CohoDriver(configuration=self.configuration) - - mock_get_volume_location = self.mock_object(coho.CohoDriver, - '_get_volume_location') - mock_get_volume_location.return_value = 'uknown-address', PATH - - with self.assertRaisesRegex(exception.CohoException, - "Failed to establish connection.*"): - drv.create_snapshot(SNAPSHOT) - - mock_get_volume_location.assert_has_calls( - [mock.call(INVALID_SNAPSHOT['volume_id'])]) - - def test_rpc_client_make_call_proper_order(self): - """This test ensures that the RPC client logic is correct. - - When the RPC client's make_call function is called it creates - a packet and sends it to the Coho cluster RPC server. This test - ensures that the functions needed to complete the process are - called in the proper order with valid arguments. - """ - - mock_packer = self.mock_object(xdrlib, 'Packer') - mock_unpacker = self.mock_object(xdrlib, 'Unpacker') - mock_unpacker.return_value.unpack_uint.return_value = 0 - mock_socket = self.mock_object(socket, 'socket') - mock_init_call = self.mock_object(coho.Client, 'init_call') - mock_init_call.return_value = (1, 2) - mock_sendrecord = self.mock_object(coho.Client, '_sendrecord') - mock_recvrecord = self.mock_object(coho.Client, '_recvrecord') - mock_recvrecord.return_value = 'test_reply' - mock_unpack_replyheader = self.mock_object(coho.Client, - 'unpack_replyheader') - mock_unpack_replyheader.return_value = (123, 1) - - rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) - rpc_client.create_volume_from_snapshot('src', 'dest') - - self.assertTrue(mock_sendrecord.called) - self.assertTrue(mock_unpack_replyheader.called) - mock_packer.assert_has_calls([mock.call().reset()]) - mock_unpacker.assert_has_calls( - [mock.call().reset('test_reply'), - mock.call().unpack_uint()]) - mock_socket.assert_has_calls( - [mock.call(socket.AF_INET, socket.SOCK_STREAM), - mock.call().connect((ADDR, RPC_PORT))]) - mock_init_call.assert_has_calls( - [mock.call(coho.COHO1_CREATE_VOLUME_FROM_SNAPSHOT, - [(six.b('src'), mock_packer().pack_string), - (six.b('dest'), mock_packer().pack_string)])]) - - def test_rpc_client_error_in_reply_header(self): - """Ensure excpetions in reply header are raised by the RPC client. - - Coho cluster's RPC server packs errors into the reply header. - This test ensures that the RPC client parses the reply header - correctly and raises exceptions on various errors that can be - included in the reply header. - """ - mock_socket = self.mock_object(socket, 'socket') - mock_recvrecord = self.mock_object(coho.Client, '_recvrecord') - rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) - - mock_recvrecord.return_value = NO_REPLY_BIN - with self.assertRaisesRegex(exception.CohoException, - "no REPLY.*"): - rpc_client.create_snapshot('src', 'dest', 0) - - mock_recvrecord.return_value = MSG_DENIED_BIN - with self.assertRaisesRegex(exception.CohoException, - ".*MSG_DENIED.*"): - rpc_client.delete_snapshot('snapshot') - - mock_recvrecord.return_value = PROG_UNAVAIL_BIN - with self.assertRaisesRegex(exception.CohoException, - ".*PROG_UNAVAIL"): - rpc_client.delete_snapshot('snapshot') - - mock_recvrecord.return_value = PROG_MISMATCH_BIN - with self.assertRaisesRegex(exception.CohoException, - ".*PROG_MISMATCH.*"): - rpc_client.delete_snapshot('snapshot') - - mock_recvrecord.return_value = GARBAGE_ARGS_BIN - with self.assertRaisesRegex(exception.CohoException, - ".*GARBAGE_ARGS"): - rpc_client.delete_snapshot('snapshot') - - mock_recvrecord.return_value = PROC_UNAVAIL_BIN - with self.assertRaisesRegex(exception.CohoException, - ".*PROC_UNAVAIL"): - rpc_client.delete_snapshot('snapshot') - - self.assertTrue(mock_recvrecord.called) - mock_socket.assert_has_calls( - [mock.call(socket.AF_INET, socket.SOCK_STREAM), - mock.call().connect((ADDR, RPC_PORT))]) - - def test_rpc_client_error_in_receive_fragment(self): - """Ensure exception is raised when malformed packet is received.""" - mock_sendrcd = self.mock_object(coho.Client, '_sendrecord') - mock_socket = self.mock_object(socket, 'socket') - mock_socket.return_value.recv.return_value = INVALID_HEADER_BIN - rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) - - with self.assertRaisesRegex(exception.CohoException, - "Invalid response header.*"): - rpc_client.create_snapshot('src', 'dest', 0) - - self.assertTrue(mock_sendrcd.called) - mock_socket.assert_has_calls( - [mock.call(socket.AF_INET, socket.SOCK_STREAM), - mock.call().connect((ADDR, RPC_PORT)), - mock.call().recv(4)]) - - def test_rpc_client_recovery_on_broken_pipe(self): - """Ensure RPC retry on broken pipe error. - - When the cluster closes the TCP socket, try reconnecting - and retrying the command before returing error for the operation. - """ - mock_socket = self.mock_object(socket, 'socket') - mock_make_call = self.mock_object(coho.Client, '_make_call') - socket_error = socket.error('[Errno 32] Broken pipe') - socket_error.errno = errno.EPIPE - mock_make_call.side_effect = socket_error - rpc_client = coho.CohoRPCClient(ADDR, RPC_PORT) - - with self.assertRaisesRegex(exception.CohoException, - "Failed to establish.*"): - rpc_client.create_snapshot('src', 'dest', 0) - - self.assertEqual(coho.COHO_MAX_RETRIES, mock_make_call.call_count) - self.assertEqual(coho.COHO_MAX_RETRIES + 1, mock_socket.call_count) - - # assert that on a none EPIPE error it only tries once - socket_error.errno = errno.EINVAL - mock_make_call.side_effect = socket_error - with self.assertRaisesRegex(exception.CohoException, - "Unable to send request.*"): - rpc_client.delete_snapshot('src') - - self.assertEqual(coho.COHO_MAX_RETRIES + 1, mock_make_call.call_count) - self.assertEqual(coho.COHO_MAX_RETRIES + 1, mock_socket.call_count) diff --git a/cinder/tests/unit/volume/drivers/test_coprhd.py b/cinder/tests/unit/volume/drivers/test_coprhd.py deleted file mode 100644 index 83df077a9..000000000 --- a/cinder/tests/unit/volume/drivers/test_coprhd.py +++ /dev/null @@ -1,981 +0,0 @@ -# Copyright (c) 2012 - 2016 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume.drivers.coprhd import fc as coprhd_fc -from cinder.volume.drivers.coprhd import iscsi as coprhd_iscsi -from cinder.volume.drivers.coprhd import scaleio as coprhd_scaleio -from cinder.volume import volume_types - -""" -Test Data required for mocking -""" -export_group_details_data = { - "inactive": False, - "initiators": [{"creation_time": 1392194176020, - "host": {"id": "urn:storageos:Host:3e21edff-8662-4e60-ab5", - "link": {"href": "/compute/hosts/urn:storageos:H", - "rel": "self"}}, - "hostname": "lglw7134", - "id": "urn:storageos:Initiator:13945431-06b7-44a0-838c-50", - "inactive": False, - "initiator_node": "20:00:00:90:FA:13:81:8D", - "initiator_port": "iqn.1993-08.org.deb:01:222", - "link": {"href": "/compute/initiators/urn:storageos:Initi", - "rel": "self"}, - "protocol": "iSCSI", - "registration_status": "REGISTERED", - "tags": []}], - "name": "ccgroup", - "project": 'project', - "tags": [], - "tenant": 'tenant', - "type": "Host", - "varray": {"id": "urn:storageos:VirtualArray:5af376e9-ce2f-493d-9079-a872", - "link": {"href": "/vdc/varrays/urn:storageos:VirtualArray:5af3", - "rel": "self"} - }, - "volumes": [{"id": "urn:storageos:Volume:6dc64865-bb25-431c-b321-ac268f16" - "a7ae:vdc1", - "lun": 1 - }] -} - -varray_detail_data = {"name": "varray"} - -export_group_list = ["urn:storageos:ExportGroup:2dbce233-7da0-47cb-8ff3-68f48"] - -iscsi_itl_list = {"itl": [{"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "iqn.1993-08.org.deb:01:222"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "50:00:09:73:00:18:95:19", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}, - {"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "iqn.1993-08.org.deb:01:222"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "50:00:09:73:00:18:95:19", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}]} - -fcitl_itl_list = {"itl": [{"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "12:34:56:78:90:12:34:56"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "12:34:56:78:90:12:34:56", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}, - {"hlu": 3, - "initiator": {"id": "urn:storageos:Initiator:13945", - "link": {"rel": "self", - "href": "/comput"}, - "port": "12:34:56:78:90:12:34:56"}, - "export": {"id": "urn:storageos:ExportGroup:2dbce2", - "name": "ccgroup", - "link": {"rel": "self", - "href": "/block/expo"}}, - "device": {"id": "urn:storageos:Volume:aa1fc84a-af", - "link": {"rel": "self", - "href": "/block/volumes/urn:s"}, - "wwn": "600009700001957015735330303535"}, - "target": {"id": "urn:storageos:StoragePort:d7e42", - "link": {"rel": "self", - "href": "/vdc/stor:"}, - "port": "12:34:56:78:90:12:34:56", - 'ip_address': "10.10.10.10", - 'tcp_port': '22'}}]} - -scaleio_itl_list = {"itl": [{"hlu": -1, - "initiator": {"id": - "urn:storageos:Initiator:920aee", - "link": {"rel": "self", - "href": - "/compute/initiators"}, - "port": "bfdf432500000004"}, - "export": {"id": - "urn:storageos:ExportGroup:5449235", - "name": "10.108.225.109", - "link": {"rel": "self", - "href": - "/block/exports/urn:stor"}}, - "device": {"id": - "urn:storageos:Volume:b3624a83-3eb", - "link": {"rel": "self", - "href": "/block/volume"}, - "wwn": - "4F48CC4C27A43248092128B400000004"}, - "target": {}}, - {"hlu": -1, - "initiator": {"id": - "urn:storageos:Initiator:920aee", - "link": {"rel": "self", - "href": - "/compute/initiators/"}, - "port": "bfdf432500000004"}, - "export": {"id": - "urn:storageos:ExportGroup:5449235", - "name": "10.108.225.109", - "link": {"rel": "self", - "href": - "/block/exports/urn:stor"}}, - "device": {"id": - "urn:storageos:Volume:c014e96a-557", - "link": {"rel": "self", - "href": - "/block/volumes/urn:stor"}, - "wwn": - "4F48CC4C27A43248092129320000000E"}, - "target": {}}]} - - -class test_volume_data(object): - name = 'test-vol1' - size = 1 - volume_name = 'test-vol1' - id = fake.VOLUME_ID - group_id = None - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'test-vol1' - display_description = 'test volume', - volume_type_id = None - provider_id = fake.PROVIDER_ID - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class source_test_volume_data(object): - name = 'source_test-vol1' - size = 1 - volume_name = 'source_test-vol1' - id = fake.VOLUME2_ID - group_id = None - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'source_test-vol1' - display_description = 'test volume' - volume_type_id = None - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class test_clone_volume_data(object): - name = 'clone-test-vol1' - size = 1 - volume_name = 'clone-test-vol1' - id = fake.VOLUME3_ID - provider_auth = None - project_id = fake.PROJECT_ID - display_name = 'clone-test-vol1' - display_description = 'clone test volume' - volume_type_id = None - - def __init__(self, volume_type_id): - self.volume_type_id = volume_type_id - - -class test_snapshot_data(object): - name = 'snapshot1' - display_name = 'snapshot1' - size = 1 - id = fake.SNAPSHOT_ID - volume_name = 'test-vol1' - volume_id = fake.VOLUME_ID - volume = None - volume_size = 1 - project_id = fake.PROJECT_ID - status = fields.SnapshotStatus.AVAILABLE - - def __init__(self, src_volume): - self.volume = src_volume - - -def get_connector_data(): - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.deb:01:222', - 'wwpns': ["1234567890123456", "1234567890543211"], - 'wwnns': ["223456789012345", "223456789054321"], - 'host': 'fakehost'} - return connector - - -class test_group_data(object): - name = 'group_name' - display_name = 'group_name' - id = fake.GROUP_ID - volume_type_ids = None - volume_types = None - group_type_id = None - status = fields.GroupStatus.AVAILABLE - - def __init__(self, volume_types, group_type_id): - self.group_type_id = group_type_id - self.volume_types = volume_types - - -class test_group_type_data(object): - name = 'group_name' - display_name = 'group_name' - groupsnapshot_id = None - id = fake.GROUP_TYPE_ID - description = 'group' - - -class test_group_snap_data(object): - name = 'cg_snap_name' - display_name = 'cg_snap_name' - id = fake.GROUP_SNAPSHOT_ID - group_id = fake.GROUP_ID - status = fields.GroupStatus.AVAILABLE - snapshots = [] - group = None - group_type_id = None - - def __init__(self, volume_types, group_type_id): - self.group_type_id = group_type_id - self.group = test_group_data(volume_types, group_type_id) - - -class MockedEMCCoprHDDriverCommon(coprhd_common.EMCCoprHDDriverCommon): - - def __init__(self, protocol, default_backend_name, - configuration=None): - - super(MockedEMCCoprHDDriverCommon, self).__init__( - protocol, default_backend_name, configuration) - - def authenticate_user(self): - pass - - def get_exports_count_by_initiators(self, initiator_ports): - return 0 - - def _get_coprhd_volume_name(self, vol, verbose=False): - if verbose is True: - return {'volume_name': "coprhd_vol_name", - 'volume_uri': "coprhd_vol_uri"} - else: - return "coprhd_vol_name" - - def _get_coprhd_snapshot_name(self, snapshot, resUri): - return "coprhd_snapshot_name" - - def _get_coprhd_cgid(self, cgid): - return "cg_uri" - - def init_volume_api(self): - self.volume_api = mock.Mock() - self.volume_api.get.return_value = { - 'name': 'source_test-vol1', - 'size': 1, - 'volume_name': 'source_test-vol1', - 'id': fake.VOLUME_ID, - 'group_id': fake.GROUP_ID, - 'provider_auth': None, - 'project_id': fake.PROJECT_ID, - 'display_name': 'source_test-vol1', - 'display_description': 'test volume', - 'volume_type_id': fake.VOLUME_TYPE_ID} - - def init_coprhd_api_components(self): - self.volume_obj = mock.Mock() - self.volume_obj.create.return_value = "volume_created" - self.volume_obj.volume_query.return_value = "volume_uri" - self.volume_obj.get_storageAttributes.return_value = ( - 'block', 'volume_name') - self.volume_obj.storage_resource_query.return_value = "volume_uri" - self.volume_obj.is_volume_detachable.return_value = False - self.volume_obj.volume_clone_detach.return_value = 'detached' - self.volume_obj.getTags.return_value = ( - ["Openstack-vol", "Openstack-vol1"]) - self.volume_obj.tag.return_value = "tagged" - self.volume_obj.clone.return_value = "volume-cloned" - - if(self.protocol == "iSCSI"): - self.volume_obj.get_exports_by_uri.return_value = ( - iscsi_itl_list) - elif(self.protocol == "FC"): - self.volume_obj.get_exports_by_uri.return_value = ( - fcitl_itl_list) - else: - self.volume_obj.get_exports_by_uri.return_value = ( - scaleio_itl_list) - - self.volume_obj.list_volumes.return_value = [] - self.volume_obj.show.return_value = {"id": "vol_id"} - self.volume_obj.expand.return_value = "expanded" - - self.tag_obj = mock.Mock() - self.tag_obj.list_tags.return_value = [ - "Openstack-vol", "Openstack-vol1"] - self.tag_obj.tag_resource.return_value = "Tagged" - - self.exportgroup_obj = mock.Mock() - self.exportgroup_obj.exportgroup_list.return_value = ( - export_group_list) - self.exportgroup_obj.exportgroup_show.return_value = ( - export_group_details_data) - - self.exportgroup_obj.exportgroup_add_volumes.return_value = ( - "volume-added") - - self.host_obj = mock.Mock() - self.host_obj.list_by_tenant.return_value = [] - self.host_obj.list_all.return_value = [{'id': "host1_id", - 'name': "host1"}] - self.host_obj.list_initiators.return_value = [ - {'name': "12:34:56:78:90:12:34:56"}, - {'name': "12:34:56:78:90:54:32:11"}, - {'name': "bfdf432500000004"}] - - self.hostinitiator_obj = mock.Mock() - self.varray_obj = mock.Mock() - self.varray_obj.varray_show.return_value = varray_detail_data - - self.snapshot_obj = mock.Mock() - mocked_snap_obj = self.snapshot_obj.return_value - mocked_snap_obj.storageResource_query.return_value = ( - "resourceUri") - mocked_snap_obj.snapshot_create.return_value = ( - "snapshot_created") - mocked_snap_obj.snapshot_query.return_value = "snapshot_uri" - - self.consistencygroup_obj = mock.Mock() - mocked_group_object = self.consistencygroup_obj.return_value - mocked_group_object.create.return_value = "CG-Created" - mocked_group_object.consistencygroup_query.return_value = "CG-uri" - - -class EMCCoprHDISCSIDriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDISCSIDriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDISCSIDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_emulate_snapshot = False - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_iscsi.EMCCoprHDISCSIDriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.driver = coprhd_iscsi.EMCCoprHDISCSIDriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDISCSIDriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': - 'vpool_coprhd'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="iSCSI", - default_backend_name="EMCViPRISCSIDriver", - configuration=self.configuration) - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initialize = self.driver.initialize_connection( - volume_data, connector_data) - expected_initialize = {'driver_volume_type': 'iscsi', - 'data': {'target_lun': 3, - 'target_portal': '10.10.10.10:22', - 'target_iqn': - '50:00:09:73:00:18:95:19', - 'target_discovered': False, - 'volume_id': fake.VOLUME_ID}} - self.assertEqual( - expected_initialize, res_initialize, 'Unexpected return data') - - self.driver.terminate_connection(volume_data, connector_data) - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - -class EMCCoprHDFCDriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDFCDriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDFCDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_emulate_snapshot = False - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_fc.EMCCoprHDFCDriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.driver = coprhd_fc.EMCCoprHDFCDriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDFCDriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': 'vpool_vipr'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="FC", - default_backend_name="EMCViPRFCDriver", - configuration=self.configuration) - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initiatlize = self.driver.initialize_connection( - volume_data, connector_data) - expected_initialize = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': 3, - 'initiator_target_map': - {'1234567890543211': - ['1234567890123456', - '1234567890123456'], - '1234567890123456': - ['1234567890123456', - '1234567890123456']}, - 'target_wwn': ['1234567890123456', - '1234567890123456'], - 'target_discovered': False, - 'volume_id': fake.VOLUME_ID}} - self.assertEqual( - expected_initialize, res_initiatlize, 'Unexpected return data') - - res_terminate = self.driver.terminate_connection( - volume_data, connector_data) - expected_terminate = {'driver_volume_type': 'fibre_channel', - 'data': {'initiator_target_map': - {'1234567890543211': - ['1234567890123456', - '1234567890123456'], - '1234567890123456': - ['1234567890123456', - '1234567890123456']}, - 'target_wwn': ['1234567890123456', - '1234567890123456']}} - self.assertEqual( - expected_terminate, res_terminate, 'Unexpected return data') - - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - -class EMCCoprHDScaleIODriverTest(test.TestCase): - - def setUp(self): - super(EMCCoprHDScaleIODriverTest, self).setUp() - self.create_coprhd_setup() - - def create_coprhd_setup(self): - - self.configuration = mock.Mock() - self.configuration.coprhd_hostname = "10.10.10.10" - self.configuration.coprhd_port = "4443" - self.configuration.volume_backend_name = "EMCCoprHDFCDriver" - self.configuration.coprhd_username = "user-name" - self.configuration.coprhd_password = "password" - self.configuration.coprhd_tenant = "tenant" - self.configuration.coprhd_project = "project" - self.configuration.coprhd_varray = "varray" - self.configuration.coprhd_scaleio_rest_gateway_host = "10.10.10.11" - self.configuration.coprhd_scaleio_rest_gateway_port = 443 - self.configuration.coprhd_scaleio_rest_server_username = ( - "scaleio_username") - self.configuration.coprhd_scaleio_rest_server_password = ( - "scaleio_password") - self.configuration.scaleio_verify_server_certificate = False - self.configuration.scaleio_server_certificate_path = ( - "/etc/scaleio/certs") - - self.volume_type = self.create_coprhd_volume_type() - self.volume_type_id = self.volume_type.id - self.group_type = test_group_type_data() - self.group_type_id = self.group_type.id - - self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_common_driver', - self._get_mocked_common_driver) - self.mock_object(coprhd_scaleio.EMCCoprHDScaleIODriver, - '_get_client_id', - self._get_client_id) - self.driver = coprhd_scaleio.EMCCoprHDScaleIODriver( - configuration=self.configuration) - - def tearDown(self): - self._cleanUp() - super(EMCCoprHDScaleIODriverTest, self).tearDown() - - def _cleanUp(self): - self.delete_vipr_volume_type() - - def create_coprhd_volume_type(self): - ctx = context.get_admin_context() - vipr_volume_type = volume_types.create(ctx, - "coprhd-volume-type", - {'CoprHD:VPOOL': 'vpool_vipr'}) - return vipr_volume_type - - def _get_mocked_common_driver(self): - return MockedEMCCoprHDDriverCommon( - protocol="scaleio", - default_backend_name="EMCCoprHDScaleIODriver", - configuration=self.configuration) - - def _get_client_id(self, server_ip, server_port, server_username, - server_password, sdc_ip): - return "bfdf432500000004" - - def delete_vipr_volume_type(self): - ctx = context.get_admin_context() - volume_types.destroy(ctx, self.volume_type_id) - - def test_create_destroy(self): - volume = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - def test_get_volume_stats(self): - vol_stats = self.driver.get_volume_stats(True) - self.assertEqual('unknown', vol_stats['free_capacity_gb']) - - def test_create_volume_clone(self): - - src_volume_data = test_volume_data(self.volume_type_id) - clone_volume_data = test_clone_volume_data(self.volume_type_id) - self.driver.create_volume(src_volume_data) - self.driver.create_cloned_volume(clone_volume_data, src_volume_data) - self.driver.delete_volume(src_volume_data) - self.driver.delete_volume(clone_volume_data) - - def test_create_destroy_snapshot(self): - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data( - source_test_volume_data(self.volume_type_id)) - - self.driver.create_volume(volume_data) - self.driver.create_snapshot(snapshot_data) - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(volume_data) - - def test_create_volume_from_snapshot(self): - src_vol_data = source_test_volume_data(self.volume_type_id) - self.driver.create_volume(src_vol_data) - - volume_data = test_volume_data(self.volume_type_id) - snapshot_data = test_snapshot_data(src_vol_data) - - self.driver.create_snapshot(snapshot_data) - self.driver.create_volume_from_snapshot(volume_data, snapshot_data) - - self.driver.delete_snapshot(snapshot_data) - self.driver.delete_volume(src_vol_data) - self.driver.delete_volume(volume_data) - - def test_extend_volume(self): - volume_data = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume_data) - self.driver.extend_volume(volume_data, 2) - self.driver.delete_volume(volume_data) - - def test_initialize_and_terminate_connection(self): - - connector_data = get_connector_data() - volume_data = test_volume_data(self.volume_type_id) - - self.driver.create_volume(volume_data) - res_initiatlize = self.driver.initialize_connection( - volume_data, connector_data) - exp_name = res_initiatlize['data']['scaleIO_volname'] - expected_initialize = {'data': {'bandwidthLimit': None, - 'hostIP': '10.0.0.2', - 'iopsLimit': None, - 'scaleIO_volname': exp_name, - 'scaleIO_volume_id': fake.PROVIDER_ID, - 'serverIP': '10.10.10.11', - 'serverPassword': 'scaleio_password', - 'serverPort': 443, - 'serverToken': None, - 'serverUsername': 'scaleio_username'}, - 'driver_volume_type': 'scaleio'} - self.assertEqual( - expected_initialize, res_initiatlize, 'Unexpected return data') - - self.driver.terminate_connection( - volume_data, connector_data) - self.driver.delete_volume(volume_data) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_empty_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [])) - self.assertEqual([], volumes_model_update, 'Unexpected return data') - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_update_delete_group(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True, True, True] - group_data = test_group_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - self.driver.create_group(ctx, group_data) - - volume = test_volume_data(self.volume_type_id) - self.driver.create_volume(volume) - - model_update, ret1, ret2 = ( - self.driver.update_group(ctx, group_data, [volume], [])) - - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - - model_update, volumes_model_update = ( - self.driver.delete_group(ctx, group_data, [volume])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([{'status': 'deleted', 'id': fake.VOLUME_ID}], - volumes_model_update) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_delete_group_snap(self, cg_ss_enabled): - cg_ss_enabled.side_effect = [True, True] - group_snap_data = test_group_snap_data([self.volume_type], - self.group_type_id) - ctx = context.get_admin_context() - - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({'status': fields.GroupStatus.AVAILABLE}, - model_update) - self.assertEqual([], snapshots_model_update, 'Unexpected return data') - - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(ctx, group_snap_data, [])) - self.assertEqual({}, model_update, 'Unexpected return data') - self.assertEqual([], snapshots_model_update, 'Unexpected return data') diff --git a/cinder/tests/unit/volume/drivers/test_datera.py b/cinder/tests/unit/volume/drivers/test_datera.py deleted file mode 100644 index 355f3e116..000000000 --- a/cinder/tests/unit/volume/drivers/test_datera.py +++ /dev/null @@ -1,1235 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from cinder import context -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.datera import datera_common as datc -from cinder.volume.drivers.datera import datera_iscsi as datera -from cinder.volume import volume_types - - -datc.DEFAULT_SI_SLEEP = 0 -datc.DEFAULT_SI_SLEEP_API_2 = 0 -datc.DEFAULT_SNAP_SLEEP = 0 -URL_TEMPLATES = datera.datc.URL_TEMPLATES -OS_PREFIX = datera.datc.OS_PREFIX -UNMANAGE_PREFIX = datera.datc.UNMANAGE_PREFIX - - -class DateraVolumeTestCasev2(test.TestCase): - - def setUp(self): - super(DateraVolumeTestCasev2, self).setUp() - - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.san_ip = '127.0.0.1' - self.cfg.san_is_local = True - self.cfg.datera_api_token = 'secret' - self.cfg.datera_api_port = '7717' - self.cfg.datera_num_replicas = '2' - self.cfg.datera_503_timeout = 0.01 - self.cfg.datera_503_interval = 0.001 - self.cfg.datera_acl_allow_all = False - self.cfg.datera_debug = False - self.cfg.san_login = 'user' - self.cfg.san_password = 'pass' - self.cfg.datera_tenant_id = 'test-tenant' - self.cfg.driver_client_cert = None - self.cfg.driver_client_cert_key = None - self.cfg.datera_disable_profiler = False - self.cfg.driver_use_ssl = False - - mock_exec = mock.Mock() - mock_exec.return_value = ('', '') - - self.driver = datera.DateraDriver(execute=mock_exec, - configuration=self.cfg) - self.driver.set_initialized() - self.driver.configuration.get = _config_getter - self.volume = _stub_volume() - self.driver._request = mock.Mock() - m = mock.Mock() - m.json.return_value = {'api_versions': ['v2']} - self.driver._request.return_value = m - self.mock_api = mock.Mock() - self.driver._issue_api_request = self.mock_api - self._apiv = "2" - self._tenant = None - - # self.addCleanup(self.api_patcher.stop) - - def test_volume_create_success(self): - self.mock_api.return_value = stub_single_ai - self.assertIsNone(self.driver.create_volume(self.volume)) - - def test_volume_create_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - self.assertRaises(exception.DateraAPIException, - self.driver.create_volume, self.volume) - - def test_volume_create_delay(self): - """Verify after 1st retry volume becoming available is a success.""" - - def _progress_api_return(mock_api): - if mock_api.retry_count == 1: - _bad_vol_ai = stub_single_ai.copy() - _bad_vol_ai['storage_instances']['storage-1'][ - 'volumes']['volume-1']['op_status'] = 'unavailable' - return _bad_vol_ai - else: - self.mock_api.retry_count += 1 - return stub_single_ai - self.mock_api.retry_count = 0 - self.mock_api.return_value = _progress_api_return(self.mock_api) - self.assertEqual(1, self.mock_api.retry_count) - self.assertIsNone(self.driver.create_volume(self.volume)) - - @mock.patch.object(volume_types, 'get_volume_type') - def test_create_volume_with_extra_specs(self, mock_get_type): - self.mock_api.return_value = stub_single_ai - mock_get_type.return_value = { - 'name': u'The Best', - 'qos_specs_id': None, - 'deleted': False, - 'created_at': '2015-08-14 04:18:11', - 'updated_at': None, - 'extra_specs': { - u'volume_backend_name': u'datera', - u'qos:max_iops_read': u'2000', - u'qos:max_iops_write': u'4000', - u'qos:max_iops_total': u'4000' - }, - 'is_public': True, - 'deleted_at': None, - 'id': u'dffb4a83-b8fb-4c19-9f8c-713bb75db3b1', - 'description': None - } - - mock_volume = _stub_volume( - volume_type_id='dffb4a83-b8fb-4c19-9f8c-713bb75db3b1' - ) - - self.assertIsNone(self.driver.create_volume(mock_volume)) - self.assertTrue(mock_get_type.called) - - def test_create_cloned_volume_success(self): - source_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo' - ) - self.assertIsNone(self.driver.create_cloned_volume(self.volume, - source_volume)) - - def test_create_cloned_volume_success_larger(self): - cloned_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo', - size=2 - ) - - mock_extend = mock.Mock() - if self._apiv == '2': - self.driver._extend_volume_2 = mock_extend - self.driver.create_cloned_volume(cloned_volume, self.volume) - mock_extend.assert_called_with( - cloned_volume, cloned_volume['size']) - else: - self.driver._extend_volume_2_1 = mock_extend - self.driver.create_cloned_volume(cloned_volume, self.volume) - mock_extend.assert_called_with( - cloned_volume, cloned_volume['size']) - - def test_create_cloned_volume_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - source_volume = _stub_volume( - id='7f91abfa-7964-41ed-88fc-207c3a290b4f', - display_name='foo' - ) - self.assertRaises(exception.DateraAPIException, - self.driver.create_cloned_volume, self.volume, - source_volume) - - def test_delete_volume_success(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - {}] - else: - self.mock_api.side_effect = [ - {}, - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - {}] - self.assertIsNone(self.driver.delete_volume(self.volume)) - - def test_delete_volume_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = exception.NotFound - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - {}, - exception.NotFound, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}, - ] - self.assertIsNone(self.driver.delete_volume(self.volume)) - - def test_delete_volume_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - self.assertRaises(exception.DateraAPIException, - self.driver.delete_volume, self.volume) - - def test_ensure_export_success(self): - self.mock_api.side_effect = self._generate_fake_api_request() - ctxt = context.get_admin_context() - self.assertIsNone(self.driver.ensure_export(ctxt, - self.volume, - None)) - - def test_ensure_export_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - ctxt = context.get_admin_context() - self.assertRaises(exception.DateraAPIException, - self.driver.ensure_export, ctxt, self.volume, None) - - def test_create_export_target_does_not_exist_success(self): - self.mock_api.side_effect = self._generate_fake_api_request( - targets_exist=False) - ctxt = context.get_admin_context() - self.assertIsNone(self.driver.create_export(ctxt, - self.volume, - None)) - - def test_create_export_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - ctxt = context.get_admin_context() - self.assertRaises(exception.DateraAPIException, - self.driver.create_export, - ctxt, - self.volume, - None) - - def test_initialize_connection_success(self): - self.mock_api.side_effect = self._generate_fake_api_request() - connector = {} - - expected = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'volume_id': self.volume['id'], - 'target_iqn': ('iqn.2013-05.com.daterainc:tc:01:sn:' - '3bbb080aab7d9abc'), - 'target_portal': '172.28.41.63:3260', - 'target_lun': 0, - 'discard': False}} - self.assertEqual(expected, - self.driver.initialize_connection(self.volume, - connector)) - - def test_initialize_connection_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - connector = {} - self.assertRaises(exception.DateraAPIException, - self.driver.initialize_connection, - self.volume, - connector) - - def test_detach_volume_success(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] - else: - self.mock_api.side_effect = [ - {}, - {}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] - ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertIsNone(self.driver.detach_volume(ctxt, volume)) - - def test_detach_volume_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertRaises(exception.DateraAPIException, - self.driver.detach_volume, ctxt, volume) - - def test_detach_volume_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = exception.NotFound - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - exception.NotFound, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - {}, - {}, - {}, - {}] - ctxt = context.get_admin_context() - volume = _stub_volume(status='in-use') - self.assertIsNone(self.driver.detach_volume(ctxt, volume)) - - def test_create_snapshot_success(self): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.mock_api.side_effect = self._generate_fake_api_request() - self.assertIsNone(self.driver.create_snapshot(snapshot)) - - def test_create_snapshot_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(exception.DateraAPIException, - self.driver.create_snapshot, snapshot) - - def test_delete_snapshot_success(self): - if self._apiv == '2': - self.mock_api.return_value = stub_return_snapshots - else: - self.mock_api.return_value = stub_return_snapshots_21 - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertIsNone(self.driver.delete_snapshot(snapshot)) - - def test_delete_snapshot_not_found(self): - if self._apiv == '2': - self.mock_api.side_effect = [ - stub_return_snapshots, - exception.NotFound] - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - exception.NotFound] - snapshot = _stub_snapshot(self.volume['id'], volume_id="test") - self.assertIsNone(self.driver.delete_snapshot(snapshot)) - - def test_delete_snapshot_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(exception.DateraAPIException, - self.driver.delete_snapshot, snapshot) - - def test_create_volume_from_snapshot_success(self): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - if self._apiv == '2': - self.mock_api.side_effect = [ - stub_return_snapshots, - list(stub_return_snapshots.values())[0], - None] - else: - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - {'data': stub_return_snapshots_21['data'][0]}, - None] - self.assertIsNone( - self.driver.create_volume_from_snapshot(self.volume, snapshot)) - - @mock.patch.object(datera.DateraDriver, 'extend_volume') - def test_create_volume_from_snapshot_success_larger(self, mock_extend): - snapshot = _stub_snapshot(volume_id=self.volume['id']) - extend_volume = _stub_volume(size=2) - - mock_extend = mock.Mock() - if self._apiv == '2': - self.driver._extend_volume_2 = mock_extend - self.mock_api.side_effect = [ - stub_return_snapshots, - list(stub_return_snapshots.values())[0], - None] - self.driver.create_volume_from_snapshot(extend_volume, snapshot) - mock_extend.assert_called_once_with(extend_volume, - extend_volume['size']) - else: - self.driver._extend_volume_2_1 = mock_extend - self.mock_api.side_effect = [ - self._generate_fake_api_request("tenant"), - stub_return_snapshots_21, - {'data': stub_return_snapshots_21['data'][0]}, - None] - self.driver.create_volume_from_snapshot(extend_volume, snapshot) - mock_extend.assert_called_once_with(extend_volume, - extend_volume['size']) - - def test_create_volume_from_snapshot_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - snapshot = _stub_snapshot(volume_id=self.volume['id']) - self.assertRaises(exception.DateraAPIException, - self.driver.create_volume_from_snapshot, self.volume, - snapshot) - - def test_extend_volume_success(self): - volume = _stub_volume(size=1) - self.mock_api.side_effect = [ - stub_get_export, - {'data': stub_get_export}, - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "ig_group", api_version=self._apiv, tenant=self._tenant), - self._generate_fake_api_request()( - "acl_policy", api_version=self._apiv, tenant=self._tenant), - {}, {}, {}, {}, {}, {}, stub_get_export, - {'data': stub_get_export}] - self.assertIsNone(self.driver.extend_volume(volume, 2)) - - def test_extend_volume_fails(self): - self.mock_api.side_effect = exception.DateraAPIException - volume = _stub_volume(size=1) - self.assertRaises(exception.DateraAPIException, - self.driver.extend_volume, volume, 2) - - def test_login_successful(self): - self.mock_api.return_value = { - 'key': 'dd2469de081346c28ac100e071709403' - } - self.assertIsNone(self.driver.login()) - self.assertEqual(1, self.mock_api.call_count) - - def test_login_unsuccessful(self): - self.mock_api.side_effect = exception.NotAuthorized - self.assertRaises(exception.NotAuthorized, self.driver.login) - self.assertEqual(1, self.mock_api.call_count) - - def test_manage_existing(self): - self.mock_api.return_value = {} - if self._apiv == '2': - test_name = {"source-name": "test-app:test-si:test-vol"} - self.assertIsNone( - self.driver.manage_existing( - _stub_volume(), - test_name)) - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - test_name["source-name"].split(":")[0]), - method='put', - body={'name': OS_PREFIX + _stub_volume()['id']}, - api_version=self._apiv) - else: - tenant = 'tenant' - test_name = {"source-name": "{}:test-app:test-si:test-vol".format( - tenant)} - self.assertIsNone( - self.driver.manage_existing( - _stub_volume(), - test_name)) - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - test_name["source-name"].split(":")[1]), - method='put', - body={'name': OS_PREFIX + _stub_volume()['id']}, - api_version=self._apiv, - tenant='tenant') - - def test_manage_existing_wrong_ref(self): - TEST_NAME = {"source-name": "incorrect-reference"} - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing, - _stub_volume(), - TEST_NAME) - - def test_manage_existing_get_size(self): - TEST_NAME = {"source-name": "test-app:storage-1:volume-1"} - self.mock_api.side_effect = self._generate_fake_api_request() - self.assertEqual( - self.driver.manage_existing_get_size( - _stub_volume(), - TEST_NAME), 500) - if self._apiv == '2': - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - TEST_NAME["source-name"].split(":")[0]), - api_version=self._apiv) - else: - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - TEST_NAME["source-name"].split(":")[0]), - api_version=self._apiv, - tenant=self._tenant) - - def test_manage_existing_get_size_wrong_ref(self): - TEST_NAME = {"source-name": "incorrect-reference"} - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - _stub_volume(), - TEST_NAME) - - def test_get_manageable_volumes(self): - if self._apiv == '2': - self.mock_api.return_value = non_cinder_ais - six.assertCountEqual( - self, - self.driver.get_manageable_volumes( - {}, "", 10, 0, "", ""), - [{'cinder_id': None, - 'extra_info': None, - 'reason_not_safe': None, - 'reference': { - "source-name": 'test-app-inst:storage-1:volume-1'}, - 'safe_to_manage': True, - 'size': 50}, - {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', - 'extra_info': None, - 'reason_not_safe': None, - 'reference': None, - 'safe_to_manage': False, - 'size': None}]) - else: - self.mock_api.return_value = non_cinder_ais_21 - self.assertEqual( - self.driver.get_manageable_volumes( - {}, "", 10, 0, "", ""), - [{'cinder_id': None, - 'extra_info': None, - 'reason_not_safe': '', - 'reference': { - "source-name": 'test-app-inst:storage-1:volume-1'}, - 'safe_to_manage': True, - 'size': 50}, - {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', - 'extra_info': None, - 'reason_not_safe': '', - 'reference': None, - 'safe_to_manage': False, - 'size': None}]) - - def test_unmanage(self): - self.mock_api.return_value = {} - self.assertIsNone(self.driver.unmanage(_stub_volume())) - if self._apiv == '2': - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - OS_PREFIX + _stub_volume()['id']), - method='put', - body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}, - api_version=self._apiv) - else: - self.mock_api.assert_called_with( - URL_TEMPLATES['ai_inst']().format( - OS_PREFIX + _stub_volume()['id']), - method='put', - body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}, - api_version=self._apiv, - tenant=self._tenant) - - def _generate_fake_api_request(self, targets_exist=True): - def _fake_api_request(resource_type, *args, **kwargs): - if 'api_version' not in kwargs: - raise ValueError("Fix me dummy") - result = None - if resource_type.split('/')[-1] == 'storage-1': - result = stub_get_export - elif (resource_type.split('/')[-1] == - 'c20aba21-6ef6-446b-b374-45733b4883ba'): - result = stub_app_instance[ - 'c20aba21-6ef6-446b-b374-45733b4883ba'] - elif resource_type == 'acl_policy': - result = stub_acl if self._apiv == '2' else stub_acl_21 - elif resource_type == 'ig_group': - result = stub_ig if self._apiv == '2' else stub_ig_21 - elif resource_type.split('/')[-1] == 'snapshots': - result = {'timestamp': 'test_ts'} - elif resource_type.split('/')[-1] == 'test_ts': - result = {'op_state': 'available'} - elif resource_type == 'tenant': - return {} - else: - if kwargs.get('api_version') == '2': - result = list(stub_app_instance.values())[0] - else: - result = stub_app_instance_21['data'] - - if kwargs.get('api_version') == '2': - return result - else: - return {'data': result} - return _fake_api_request - - -class DateraVolumeTestCasev21(DateraVolumeTestCasev2): - - def setUp(self): - super(DateraVolumeTestCasev21, self).setUp() - - m = mock.Mock() - m.json.return_value = {'api_versions': ['v2.1']} - self.driver._request.return_value = m - self.driver._store_metadata = mock.Mock() - self._apiv = '2.1' - self._tenant = self.cfg.datera_tenant_id - -stub_acl = { - 'initiator_groups': [ - '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'], - 'initiators': [], - 'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/' - 'storage_instances/storage-1/acl_policy')} - -stub_acl_21 = { - 'initiator_groups': [ - {'path': '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'}], - 'initiators': [], - 'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/' - 'storage_instances/storage-1/acl_policy')} - -stub_ig = { - 'members': ['/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'], - 'name': 'IG-21e08155-8b95-4108-b148-089f64623963', - 'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'} - -stub_ig_21 = { - 'members': [ - {'path': '/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'}], - 'name': 'IG-21e08155-8b95-4108-b148-089f64623963', - 'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'} - -stub_create_export = { - "_ipColl": ["172.28.121.10", "172.28.120.10"], - "acls": {}, - "activeServers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, - "ctype": "TC_BLOCK_ISCSI", - "endpointsExt1": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ipHigh": 0, - "ipLow": "192421036", - "ipStr": "172.28.120.11", - "ipV": 4, - "name": "", - "network": 24 - } - }, - "endpointsExt2": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ipHigh": 0, - "ipLow": "192486572", - "ipStr": "172.28.121.11", - "ipV": 4, - "name": "", - "network": 24 - } - }, - "inodes": {"c20aba21-6ef6-446b-b374-45733b4883ba": "1"}, - "name": "", - "networkPort": 0, - "serverAllocation": "TS_ALLOC_COMPLETED", - "servers": {"4594953e-f97f-e111-ad85-001e6738c0f0": "1"}, - "targetAllocation": "TS_ALLOC_COMPLETED", - "targetIds": { - "4594953e-f97f-e111-ad85-001e6738c0f0": { - "ids": [{ - "dev": None, - "id": "iqn.2013-05.com.daterainc::01:sn:fc372bc0490b2dbe" - }] - } - }, - "typeName": "TargetIscsiConfig", - "uuid": "7071efd7-9f22-4996-8f68-47e9ab19d0fd" -} - - -stub_app_instance = { - "c20aba21-6ef6-446b-b374-45733b4883ba": { - "admin_state": "online", - "clone_src": {}, - "create_mode": "openstack", - "descr": "", - "health": "ok", - "name": "c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba", - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.63" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "3bbb080aab7d9abc", - "path": "/app_instances/c20aba21-6ef6-446b-b374" - "-45733b4883ba/storage_instances/storage-1/access" - }, - "access_control": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1" - "/access_control" - }, - "access_control_mode": "allow_all", - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/1c4feac4-17c7-478b-8928-c76e8ec80b72" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "", - "initiator_user_name": "", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1/auth", - "target_pswd": "", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "descr": "c20aba21-6ef6-446b-b374-45733b4883ba__ST__storage-1", - "op_state": "available", - "name": "storage-1", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1", - "uuid": "b9897b84-149f-43c7-b19c-27d6af8fa815", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-" - "45733b4883ba/storage_instances/storage-1" - "/volumes/volume-1", - "replica_count": 3, - "size": 500, - "snapshot_policies": {}, - "snapshots": { - "1445384931.322468627": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b" - "-b374-45733b4883ba/storage_instances" - "/storage-1/volumes/volume-1/snapshots" - "/1445384931.322468627", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } -} -stub_app_instance_21 = { - "tenant": "/root", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "version": "v2.1", - "data": { - "tenant": "/root", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "name": "OS-9b0216bc-8aab-47f2-b746-843f497cb7a6", - "id": "1e52946a-5c77-45ed-8b4e-b46e7236a8eb", - "health": "ok", - "app_template": { - "path": "", - "resolved_path": "", - "resolved_tenant": "" - }, - "descr": "", - "admin_state": "online", - "storage_instances": [ - { - "health": "ok", - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb/" - "storage_instances/storage-1", - "name": "storage-1", - "admin_state": "online", - "op_state": "available", - "volumes": [ - { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/volumes/volume-1", - "name": "volume-1", - "replica_count": 1, - "uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6", - "size": 500, - "capacity_in_use": 0, - "snapshot_policies": [], - "snapshots": [], - "placement_mode": "hybrid", - "op_state": "available", - "active_storage_nodes": [ - { - "path": "/storage_nodes/75f2cae4-68fb-4236-" - "a90c-b6c480b68816" - } - ], - "health": "ok" - } - ], - "access_control_mode": "deny_all", - "acl_policy": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/acl_policy", - "initiators": [], - "initiator_groups": [] - }, - "ip_pool": { - "path": "/access_network_ip_pools/default", - "resolved_path": "/access_network_ip_pools/default", - "resolved_tenant": "/root" - }, - "access": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/access", - "ips": [ - "172.28.41.63", - "172.29.41.29" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "3bbb080aab7d9abc" - }, - "auth": { - "path": "/app_instances/1e52946a-5c77-45ed-8b4e-" - "b46e7236a8eb/" - "storage_instances/storage-1/auth", - "type": "none", - "initiator_user_name": "", - "initiator_pswd": "(hidden)", - "target_user_name": "", - "target_pswd": "(hidden)" - }, - "active_initiators": [], - "active_storage_nodes": [ - { - "path": "/storage_nodes/75f2cae4-68fb-4236-a90c-" - "b6c480b68816" - } - ], - "uuid": "eb3d7b07-b520-4cc2-b365-90135b84c356" - } - ], - "create_mode": "openstack", - "uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6", - "snapshots": [], - "snapshot_policies": [] - } -} - -stub_get_export = stub_app_instance[ - 'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1'] - -stub_single_ai = stub_app_instance['c20aba21-6ef6-446b-b374-45733b4883ba'] - -stub_return_snapshots = \ - { - "1446076293.118600738": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/" - "1446076293.118600738", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - }, - "1446076384.00607846": { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/" - "1446076384.00607846", - "uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1" - } - } - -stub_return_snapshots_21 = { - 'data': [ - { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/", - "timestamp": "1446076293.118600738", - "utc_ts": "1446076293.118600738", - "uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c" - }, - { - "op_state": "available", - "path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba" - "/storage_instances/storage-1/volumes/volume-1/snapshots/", - "timestamp": "1446076384.00607846", - "utc_ts": "1446076384.00607846", - "uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1" - }] -} - -non_cinder_ais = { - "75bc1c69-a399-4acb-aade-3514caf13c5e": { - "admin_state": "online", - "create_mode": "normal", - "descr": "", - "health": "ok", - "id": "75bc1c69-a399-4acb-aade-3514caf13c5e", - "name": "test-app-inst", - "path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.93" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "29036682e2d37b98", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1", - "uuid": "6421237d-e4fc-433a-b535-148d5b6d8586", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 50, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9" - } - } - } - }, - "uuid": "00000000-0000-0000-0000-000000000000" - }, - "dfdaf8d1-8976-4c13-a829-3345e03cf810": { - "admin_state": "offline", - "create_mode": "openstack", - "descr": "", - "health": "ok", - "id": "dfdaf8d1-8976-4c13-a829-3345e03cf810", - "name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.57" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "56cd59e754ad02b6", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "offline", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "unavailable", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810" - "/storage_instances/storage-1", - "uuid": "5620a673-9985-464e-9616-e325a50eac60", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 5, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } -} - -non_cinder_ais_21 = { - 'data': [{ - "admin_state": "online", - "create_mode": "normal", - "descr": "", - "health": "ok", - "id": "75bc1c69-a399-4acb-aade-3514caf13c5e", - "name": "test-app-inst", - "path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.93" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "29036682e2d37b98", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "online", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-1", - "uuid": "6421237d-e4fc-433a-b535-148d5b6d8586", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/75bc1c69-a399-4acb-aade-" - "3514caf13c5e/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 50, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9" - } - } - } - }, - "uuid": "00000000-0000-0000-0000-000000000000" - }, - { - "admin_state": "offline", - "create_mode": "openstack", - "descr": "", - "health": "ok", - "id": "dfdaf8d1-8976-4c13-a829-3345e03cf810", - "name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810", - "snapshot_policies": {}, - "snapshots": {}, - "storage_instances": { - "storage-1": { - "access": { - "ips": [ - "172.28.41.57" - ], - "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:" - "56cd59e754ad02b6", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/access" - }, - "access_control_mode": "deny_all", - "acl_policy": { - "initiator_groups": [], - "initiators": [], - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/acl_policy" - }, - "active_initiators": [], - "active_storage_nodes": [ - "/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9" - ], - "admin_state": "offline", - "auth": { - "initiator_pswd": "(hidden)", - "initiator_user_name": "", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-1/auth", - "target_pswd": "(hidden)", - "target_user_name": "", - "type": "none" - }, - "creation_type": "user", - "ip_pool": "/access_network_ip_pools/default", - "name": "storage-1", - "op_state": "unavailable", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810" - "/storage_instances/storage-1", - "uuid": "5620a673-9985-464e-9616-e325a50eac60", - "volumes": { - "volume-1": { - "capacity_in_use": 0, - "name": "volume-1", - "op_state": "available", - "path": "/app_instances/dfdaf8d1-8976-4c13-a829-" - "3345e03cf810/storage_instances/storage-" - "1/volumes/volume-1", - "replica_count": 1, - "size": 5, - "snapshot_policies": {}, - "snapshots": {}, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - } - } - } - }, - "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" - }] -} - - -def _stub_datera_volume(*args, **kwargs): - return { - "status": "available", - "name": "test", - "num_replicas": "2", - "parent": "00000000-0000-0000-0000-000000000000", - "size": "1024", - "sub_type": "IS_ORIGINAL", - "uuid": "10305aa4-1343-4363-86fe-f49eb421a48c", - "snapshots": [], - "snapshot_configs": [], - "targets": [ - kwargs.get('targets', "744e1bd8-d741-4919-86cd-806037d98c8a"), - ] - } - - -def _stub_volume(*args, **kwargs): - uuid = u'c20aba21-6ef6-446b-b374-45733b4883ba' - name = u'volume-00000001' - size = 1 - volume = {} - volume['id'] = kwargs.get('id', uuid) - volume['display_name'] = kwargs.get('display_name', name) - volume['size'] = kwargs.get('size', size) - volume['provider_location'] = kwargs.get('provider_location', None) - volume['volume_type_id'] = kwargs.get('volume_type_id', None) - return volume - - -def _stub_snapshot(*args, **kwargs): - uuid = u'0bb34f0c-fea4-48e0-bf96-591120ac7e3c' - name = u'snapshot-00000001' - size = 1 - volume = {} - volume['id'] = kwargs.get('id', uuid) - volume['display_name'] = kwargs.get('display_name', name) - volume['volume_size'] = kwargs.get('size', size) - volume['volume_id'] = kwargs.get('volume_id', None) - return volume - - -def _config_getter(*args, **kwargs): - return {} diff --git a/cinder/tests/unit/volume/drivers/test_dothill.py b/cinder/tests/unit/volume/drivers/test_dothill.py deleted file mode 100644 index 80f6c6e49..000000000 --- a/cinder/tests/unit/volume/drivers/test_dothill.py +++ /dev/null @@ -1,783 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 DotHill Systems -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Unit tests for OpenStack Cinder DotHill driver.""" - - -from lxml import etree -import mock -import requests - -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.volume.drivers.dothill import dothill_client as dothill -from cinder.volume.drivers.dothill import dothill_common -from cinder.volume.drivers.dothill import dothill_fc -from cinder.volume.drivers.dothill import dothill_iscsi -from cinder.zonemanager import utils as fczm_utils - -session_key = '12a1626754554a21d85040760c81b' -resp_login = ''' - success - 0 - 12a1626754554a21d85040760c81b - 1''' - -resp_fw = '''GLS220R001 - 0''' - -resp_badlogin = ''' - error - 1 - Authentication failure - 1''' -response_ok = ''' - some data - 0 - ''' -response_not_ok = ''' - Error Message - 1 - ''' -response_stats_linear = ''' - 3863830528 - 3863830528 - ''' -response_stats_virtual = ''' - 3863830528 - 3863830528 - ''' -response_no_lun = '''''' -response_lun = ''' - 1 - - 4''' -response_ports = ''' - - FC - id1 - Disconnected - - FC - id2 - Up - - iSCSI - id3 - 10.0.0.10 - Disconnected - - iSCSI - id4 - 10.0.0.11 - Up - - iSCSI - id5 - 10.0.0.12 - Up - ''' - -response_ports_linear = response_ports % {'ip': 'primary-ip-address'} -response_ports_virtual = response_ports % {'ip': 'ip-address'} - - -invalid_xml = '''''' -malformed_xml = '''''' -fake_xml = '''''' - -stats_low_space = {'free_capacity_gb': 10, 'total_capacity_gb': 100} -stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100} - -vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2' -test_volume = {'id': vol_id, 'name_id': None, - 'display_name': 'test volume', 'name': 'volume', 'size': 10} -test_retype_volume = {'attach_status': fields.VolumeAttachStatus.DETACHED, - 'id': vol_id, 'name_id': None, - 'display_name': 'test volume', 'name': 'volume', - 'size': 10} -test_host = {'capabilities': {'location_info': - 'DotHillVolumeDriver:xxxxx:dg02:A'}} -test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', - 'volume': {'name_id': None}, - 'volume_id': vol_id, 'display_name': 'test volume', - 'name': 'volume', 'volume_size': 10} -encoded_volid = 'v_O7DDpi8TOWF_9cwnMF' -encoded_snapid = 's_O7DDpi8TOWF_9cwnMF' -dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', - 'source_volid': vol_id, - 'display_name': 'test volume', 'name': 'volume', 'size': 10} -dest_volume_larger = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', - 'name_id': None, - 'source_volid': vol_id, - 'display_name': 'test volume', - 'name': 'volume', 'size': 20} -attached_volume = {'id': vol_id, - 'display_name': 'test volume', 'name': 'volume', - 'size': 10, 'status': 'in-use', - 'attach_status': fields.VolumeAttachStatus.ATTACHED} -attaching_volume = {'id': vol_id, - 'display_name': 'test volume', 'name': 'volume', - 'size': 10, 'status': 'attaching', - 'attach_status': fields.VolumeAttachStatus.ATTACHED} -detached_volume = {'id': vol_id, 'name_id': None, - 'display_name': 'test volume', 'name': 'volume', - 'size': 10, 'status': 'available', - 'attach_status': 'detached'} - -connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian:01:222', - 'wwpns': ["111111111111111", "111111111111112"], - 'wwnns': ["211111111111111", "211111111111112"], - 'host': 'fakehost'} -invalid_connector = {'ip': '10.0.0.2', - 'initiator': '', - 'wwpns': [], - 'wwnns': [], - 'host': 'fakehost'} - - -class TestDotHillClient(test.TestCase): - def setUp(self): - super(TestDotHillClient, self).setUp() - self.login = 'manage' - self.passwd = '!manage' - self.ip = '10.0.0.1' - self.protocol = 'http' - self.ssl_verify = False - self.client = dothill.DotHillClient(self.ip, self.login, self.passwd, - self.protocol, self.ssl_verify) - - @mock.patch('requests.get') - def test_login(self, mock_requests_get): - m = mock.Mock() - mock_requests_get.return_value = m - - m.text.encode.side_effect = [resp_badlogin, resp_badlogin] - self.assertRaises(exception.DotHillAuthenticationError, - self.client.login) - - m.text.encode.side_effect = [resp_login, resp_fw] - self.client.login() - self.assertEqual(session_key, self.client._session_key) - - def test_build_request_url(self): - url = self.client._build_request_url('/path') - self.assertEqual('http://10.0.0.1/api/path', url) - url = self.client._build_request_url('/path', arg1='val1') - self.assertEqual('http://10.0.0.1/api/path/arg1/val1', url) - url = self.client._build_request_url('/path', arg_1='val1') - self.assertEqual('http://10.0.0.1/api/path/arg-1/val1', url) - url = self.client._build_request_url('/path', 'arg1') - self.assertEqual('http://10.0.0.1/api/path/arg1', url) - url = self.client._build_request_url('/path', 'arg1', arg2='val2') - self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1', url) - url = self.client._build_request_url('/path', 'arg1', 'arg3', - arg2='val2') - self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1/arg3', url) - - @mock.patch('requests.get') - def test_request(self, mock_requests_get): - self.client._session_key = session_key - - m = mock.Mock() - m.text.encode.side_effect = [response_ok, malformed_xml, - requests.exceptions. - RequestException("error")] - mock_requests_get.return_value = m - ret = self.client._api_request('/path') - self.assertTrue(type(ret) == etree._Element) - self.assertRaises(exception.DotHillConnectionError, - self.client._api_request, - '/path') - self.assertRaises(exception.DotHillConnectionError, - self.client._api_request, - '/path') - - def test_assert_response_ok(self): - ok_tree = etree.XML(response_ok) - not_ok_tree = etree.XML(response_not_ok) - invalid_tree = etree.XML(invalid_xml) - ret = self.client._assert_response_ok(ok_tree) - self.assertIsNone(ret) - self.assertRaises(exception.DotHillRequestError, - self.client._assert_response_ok, - not_ok_tree) - self.assertRaises(exception.DotHillRequestError, - self.client._assert_response_ok, invalid_tree) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_backend_exists(self, mock_request): - mock_request.side_effect = [exception.DotHillRequestError, - fake_xml] - self.assertFalse(self.client.backend_exists('backend_name', - 'linear')) - self.assertTrue(self.client.backend_exists('backend_name', - 'linear')) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_backend_stats(self, mock_request): - stats = {'free_capacity_gb': 1979, - 'total_capacity_gb': 1979} - linear = etree.XML(response_stats_linear) - virtual = etree.XML(response_stats_virtual) - mock_request.side_effect = [linear, virtual] - - self.assertEqual(stats, self.client.backend_stats('OpenStack', - 'linear')) - self.assertEqual(stats, self.client.backend_stats('A', - 'virtual')) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_get_lun(self, mock_request): - mock_request.side_effect = [etree.XML(response_no_lun), - etree.XML(response_lun)] - ret = self.client._get_first_available_lun_for_host("fakehost") - self.assertEqual(1, ret) - ret = self.client._get_first_available_lun_for_host("fakehost") - self.assertEqual(2, ret) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_get_ports(self, mock_request): - mock_request.side_effect = [etree.XML(response_ports)] - ret = self.client.get_active_target_ports() - self.assertEqual([{'port-type': 'FC', - 'target-id': 'id2', - 'status': 'Up'}, - {'port-type': 'iSCSI', - 'target-id': 'id4', - 'status': 'Up'}, - {'port-type': 'iSCSI', - 'target-id': 'id5', - 'status': 'Up'}], ret) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_get_fc_ports(self, mock_request): - mock_request.side_effect = [etree.XML(response_ports)] - ret = self.client.get_active_fc_target_ports() - self.assertEqual(['id2'], ret) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_get_iscsi_iqns(self, mock_request): - mock_request.side_effect = [etree.XML(response_ports)] - ret = self.client.get_active_iscsi_target_iqns() - self.assertEqual(['id4', 'id5'], ret) - - @mock.patch.object(dothill.DotHillClient, '_request') - def test_get_iscsi_portals(self, mock_request): - portals = {'10.0.0.12': 'Up', '10.0.0.11': 'Up'} - mock_request.side_effect = [etree.XML(response_ports_linear), - etree.XML(response_ports_virtual)] - ret = self.client.get_active_iscsi_target_portals() - self.assertEqual(portals, ret) - ret = self.client.get_active_iscsi_target_portals() - self.assertEqual(portals, ret) - - -class FakeConfiguration1(object): - dothill_backend_name = 'OpenStack' - dothill_backend_type = 'linear' - san_ip = '10.0.0.1' - san_login = 'manage' - san_password = '!manage' - dothill_api_protocol = 'http' - - def safe_get(self, key): - return 'fakevalue' - - -class FakeConfiguration2(FakeConfiguration1): - dothill_iscsi_ips = ['10.0.0.11'] - use_chap_auth = None - - -class TestFCDotHillCommon(test.TestCase): - def setUp(self): - super(TestFCDotHillCommon, self).setUp() - self.config = FakeConfiguration1() - self.common = dothill_common.DotHillCommon(self.config) - self.common.client_login = mock.MagicMock() - self.common.client_logout = mock.MagicMock() - self.common.serialNumber = "xxxxx" - self.common.owner = "A" - self.connector_element = "wwpns" - - @mock.patch.object(dothill.DotHillClient, 'get_serial_number') - @mock.patch.object(dothill.DotHillClient, 'get_owner_info') - @mock.patch.object(dothill.DotHillClient, 'backend_exists') - def test_do_setup(self, mock_backend_exists, - mock_owner_info, mock_serial_number): - mock_backend_exists.side_effect = [False, True] - mock_owner_info.return_value = "A" - mock_serial_number.return_value = "xxxxx" - self.assertRaises(exception.DotHillInvalidBackend, - self.common.do_setup, None) - self.assertIsNone(self.common.do_setup(None)) - mock_backend_exists.assert_called_with(self.common.backend_name, - self.common.backend_type) - mock_owner_info.assert_called_with(self.common.backend_name, - self.common.backend_type) - - def test_vol_name(self): - self.assertEqual(encoded_volid, self.common._get_vol_name(vol_id)) - self.assertEqual(encoded_snapid, self.common._get_snap_name(vol_id)) - - def test_check_flags(self): - class FakeOptions(object): - def __init__(self, d): - for k, v in d.items(): - self.__dict__[k] = v - - options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) - required_flags = ['opt1', 'opt2'] - ret = self.common.check_flags(options, required_flags) - self.assertIsNone(ret) - - options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) - required_flags = ['opt1', 'opt2', 'opt3'] - self.assertRaises(exception.Invalid, self.common.check_flags, - options, required_flags) - - def test_assert_connector_ok(self): - self.assertRaises(exception.InvalidInput, - self.common._assert_connector_ok, invalid_connector, - self.connector_element) - self.assertIsNone(self.common._assert_connector_ok( - connector, - self.connector_element)) - - @mock.patch.object(dothill.DotHillClient, 'backend_stats') - def test_update_volume_stats(self, mock_stats): - mock_stats.side_effect = [exception.DotHillRequestError, - stats_large_space] - - self.assertRaises(exception.Invalid, self.common._update_volume_stats) - mock_stats.assert_called_with(self.common.backend_name, - self.common.backend_type) - ret = self.common._update_volume_stats() - - self.assertIsNone(ret) - self.assertEqual({'driver_version': self.common.VERSION, - 'pools': [{'QoS_support': False, - 'free_capacity_gb': 90, - 'location_info': - 'DotHillVolumeDriver:xxxxx:OpenStack:A', - 'pool_name': 'OpenStack', - 'total_capacity_gb': 100}], - 'storage_protocol': None, - 'vendor_name': 'DotHill', - 'volume_backend_name': None}, self.common.stats) - - @mock.patch.object(dothill.DotHillClient, 'create_volume') - def test_create_volume(self, mock_create): - mock_create.side_effect = [exception.DotHillRequestError, None] - - self.assertRaises(exception.Invalid, self.common.create_volume, - test_volume) - ret = self.common.create_volume(test_volume) - self.assertIsNone(ret) - mock_create.assert_called_with(encoded_volid, - "%sGiB" % test_volume['size'], - self.common.backend_name, - self.common.backend_type) - - @mock.patch.object(dothill.DotHillClient, 'delete_volume') - def test_delete_volume(self, mock_delete): - not_found_e = exception.DotHillRequestError( - 'The volume was not found on this system.') - mock_delete.side_effect = [not_found_e, exception.DotHillRequestError, - None] - self.assertIsNone(self.common.delete_volume(test_volume)) - self.assertRaises(exception.Invalid, self.common.delete_volume, - test_volume) - self.assertIsNone(self.common.delete_volume(test_volume)) - mock_delete.assert_called_with(encoded_volid) - - @mock.patch.object(dothill.DotHillClient, 'copy_volume') - @mock.patch.object(dothill.DotHillClient, 'backend_stats') - def test_create_cloned_volume(self, mock_stats, mock_copy): - mock_stats.side_effect = [stats_low_space, stats_large_space, - stats_large_space] - - self.assertRaises(exception.DotHillNotEnoughSpace, - self.common.create_cloned_volume, - dest_volume, detached_volume) - self.assertFalse(mock_copy.called) - - mock_copy.side_effect = [exception.DotHillRequestError, None] - self.assertRaises(exception.Invalid, - self.common.create_cloned_volume, - dest_volume, detached_volume) - - ret = self.common.create_cloned_volume(dest_volume, detached_volume) - self.assertIsNone(ret) - - mock_copy.assert_called_with(encoded_volid, - 'vqqqqqqqqqqqqqqqqqqq', - self.common.backend_name, - self.common.backend_type) - - @mock.patch.object(dothill.DotHillClient, 'copy_volume') - @mock.patch.object(dothill.DotHillClient, 'backend_stats') - @mock.patch.object(dothill_common.DotHillCommon, 'extend_volume') - def test_create_cloned_volume_larger(self, mock_extend, mock_stats, - mock_copy): - mock_stats.side_effect = [stats_low_space, stats_large_space, - stats_large_space] - - self.assertRaises(exception.DotHillNotEnoughSpace, - self.common.create_cloned_volume, - dest_volume_larger, detached_volume) - self.assertFalse(mock_copy.called) - - mock_copy.side_effect = [exception.DotHillRequestError, None] - self.assertRaises(exception.Invalid, - self.common.create_cloned_volume, - dest_volume_larger, detached_volume) - - ret = self.common.create_cloned_volume(dest_volume_larger, - detached_volume) - self.assertIsNone(ret) - mock_copy.assert_called_with(encoded_volid, - 'vqqqqqqqqqqqqqqqqqqq', - self.common.backend_name, - self.common.backend_type) - mock_extend.assert_called_once_with(dest_volume_larger, - dest_volume_larger['size']) - - @mock.patch.object(dothill.DotHillClient, 'get_volume_size') - @mock.patch.object(dothill.DotHillClient, 'extend_volume') - @mock.patch.object(dothill.DotHillClient, 'copy_volume') - @mock.patch.object(dothill.DotHillClient, 'backend_stats') - def test_create_volume_from_snapshot(self, mock_stats, mock_copy, - mock_extend, mock_get_size): - mock_stats.side_effect = [stats_low_space, stats_large_space, - stats_large_space] - - self.assertRaises(exception.DotHillNotEnoughSpace, - self.common.create_volume_from_snapshot, - dest_volume, test_snap) - - mock_copy.side_effect = [exception.DotHillRequestError, None] - mock_get_size.return_value = test_snap['volume_size'] - self.assertRaises(exception.Invalid, - self.common.create_volume_from_snapshot, - dest_volume, test_snap) - - ret = self.common.create_volume_from_snapshot(dest_volume_larger, - test_snap) - self.assertIsNone(ret) - mock_copy.assert_called_with('sqqqqqqqqqqqqqqqqqqq', - 'vqqqqqqqqqqqqqqqqqqq', - self.common.backend_name, - self.common.backend_type) - mock_extend.assert_called_with('vqqqqqqqqqqqqqqqqqqq', '10GiB') - - @mock.patch.object(dothill.DotHillClient, 'get_volume_size') - @mock.patch.object(dothill.DotHillClient, 'extend_volume') - def test_extend_volume(self, mock_extend, mock_size): - mock_extend.side_effect = [exception.DotHillRequestError, None] - mock_size.side_effect = [10, 10] - self.assertRaises(exception.Invalid, self.common.extend_volume, - test_volume, 20) - ret = self.common.extend_volume(test_volume, 20) - self.assertIsNone(ret) - mock_extend.assert_called_with(encoded_volid, '10GiB') - - @mock.patch.object(dothill.DotHillClient, 'create_snapshot') - def test_create_snapshot(self, mock_create): - mock_create.side_effect = [exception.DotHillRequestError, None] - - self.assertRaises(exception.Invalid, self.common.create_snapshot, - test_snap) - ret = self.common.create_snapshot(test_snap) - self.assertIsNone(ret) - mock_create.assert_called_with(encoded_volid, 'sqqqqqqqqqqqqqqqqqqq') - - @mock.patch.object(dothill.DotHillClient, 'delete_snapshot') - def test_delete_snapshot(self, mock_delete): - not_found_e = exception.DotHillRequestError( - 'The volume was not found on this system.') - mock_delete.side_effect = [not_found_e, exception.DotHillRequestError, - None] - - self.assertIsNone(self.common.delete_snapshot(test_snap)) - self.assertRaises(exception.Invalid, self.common.delete_snapshot, - test_snap) - self.assertIsNone(self.common.delete_snapshot(test_snap)) - mock_delete.assert_called_with('sqqqqqqqqqqqqqqqqqqq') - - @mock.patch.object(dothill.DotHillClient, 'map_volume') - def test_map_volume(self, mock_map): - mock_map.side_effect = [exception.DotHillRequestError, 10] - - self.assertRaises(exception.Invalid, self.common.map_volume, - test_volume, connector, self.connector_element) - lun = self.common.map_volume(test_volume, connector, - self.connector_element) - self.assertEqual(10, lun) - mock_map.assert_called_with(encoded_volid, - connector, self.connector_element) - - @mock.patch.object(dothill.DotHillClient, 'unmap_volume') - def test_unmap_volume(self, mock_unmap): - mock_unmap.side_effect = [exception.DotHillRequestError, None] - - self.assertRaises(exception.Invalid, self.common.unmap_volume, - test_volume, connector, self.connector_element) - ret = self.common.unmap_volume(test_volume, connector, - self.connector_element) - self.assertIsNone(ret) - mock_unmap.assert_called_with(encoded_volid, connector, - self.connector_element) - - @mock.patch.object(dothill.DotHillClient, 'copy_volume') - @mock.patch.object(dothill.DotHillClient, 'delete_volume') - @mock.patch.object(dothill.DotHillClient, 'modify_volume_name') - def test_retype(self, mock_modify, mock_delete, mock_copy): - mock_copy.side_effect = [exception.DotHillRequestError, None] - self.assertRaises(exception.Invalid, self.common.migrate_volume, - test_retype_volume, test_host) - ret = self.common.migrate_volume(test_retype_volume, test_host) - self.assertEqual((True, None), ret) - ret = self.common.migrate_volume(test_retype_volume, - {'capabilities': {}}) - self.assertEqual((False, None), ret) - - @mock.patch.object(dothill_common.DotHillCommon, '_get_vol_name') - @mock.patch.object(dothill.DotHillClient, 'modify_volume_name') - def test_manage_existing(self, mock_modify, mock_volume): - existing_ref = {'source-name': 'xxxx'} - mock_modify.side_effect = [exception.DotHillRequestError, None] - self.assertRaises(exception.Invalid, self.common.manage_existing, - test_volume, existing_ref) - ret = self.common.manage_existing(test_volume, existing_ref) - self.assertIsNone(ret) - - @mock.patch.object(dothill.DotHillClient, 'get_volume_size') - def test_manage_existing_get_size(self, mock_volume): - existing_ref = {'source-name': 'xxxx'} - mock_volume.side_effect = [exception.DotHillRequestError, 1] - self.assertRaises(exception.Invalid, - self.common.manage_existing_get_size, - None, existing_ref) - ret = self.common.manage_existing_get_size(None, existing_ref) - self.assertEqual(1, ret) - - -class TestISCSIDotHillCommon(TestFCDotHillCommon): - def setUp(self): - super(TestISCSIDotHillCommon, self).setUp() - self.connector_element = 'initiator' - - -class TestDotHillFC(test.TestCase): - @mock.patch.object(dothill_common.DotHillCommon, 'do_setup') - def setUp(self, mock_setup): - super(TestDotHillFC, self).setUp() - self.vendor_name = 'DotHill' - - mock_setup.return_value = True - - def fake_init(self, *args, **kwargs): - super(dothill_fc.DotHillFCDriver, self).__init__() - self.common = None - self.configuration = FakeConfiguration1() - self.lookup_service = fczm_utils.create_lookup_service() - - dothill_fc.DotHillFCDriver.__init__ = fake_init - self.driver = dothill_fc.DotHillFCDriver() - self.driver.do_setup(None) - - def _test_with_mock(self, mock, method, args, expected=None): - func = getattr(self.driver, method) - mock.side_effect = [exception.Invalid(), None] - self.assertRaises(exception.Invalid, func, *args) - self.assertEqual(expected, func(*args)) - - @mock.patch.object(dothill_common.DotHillCommon, 'create_volume') - def test_create_volume(self, mock_create): - self._test_with_mock(mock_create, 'create_volume', [None]) - - @mock.patch.object(dothill_common.DotHillCommon, - 'create_cloned_volume') - def test_create_cloned_volume(self, mock_create): - self._test_with_mock(mock_create, 'create_cloned_volume', [None, None]) - - @mock.patch.object(dothill_common.DotHillCommon, - 'create_volume_from_snapshot') - def test_create_volume_from_snapshot(self, mock_create): - self._test_with_mock(mock_create, 'create_volume_from_snapshot', - [None, None]) - - @mock.patch.object(dothill_common.DotHillCommon, 'delete_volume') - def test_delete_volume(self, mock_delete): - self._test_with_mock(mock_delete, 'delete_volume', [None]) - - @mock.patch.object(dothill_common.DotHillCommon, 'create_snapshot') - def test_create_snapshot(self, mock_create): - self._test_with_mock(mock_create, 'create_snapshot', [None]) - - @mock.patch.object(dothill_common.DotHillCommon, 'delete_snapshot') - def test_delete_snapshot(self, mock_delete): - self._test_with_mock(mock_delete, 'delete_snapshot', [None]) - - @mock.patch.object(dothill_common.DotHillCommon, 'extend_volume') - def test_extend_volume(self, mock_extend): - self._test_with_mock(mock_extend, 'extend_volume', [None, 10]) - - @mock.patch.object(dothill_common.DotHillCommon, 'client_logout') - @mock.patch.object(dothill_common.DotHillCommon, - 'get_active_fc_target_ports') - @mock.patch.object(dothill_common.DotHillCommon, 'map_volume') - @mock.patch.object(dothill_common.DotHillCommon, 'client_login') - def test_initialize_connection(self, mock_login, mock_map, mock_ports, - mock_logout): - mock_login.return_value = None - mock_logout.return_value = None - mock_map.side_effect = [exception.Invalid, 1] - mock_ports.side_effect = [['id1']] - - self.assertRaises(exception.Invalid, - self.driver.initialize_connection, test_volume, - connector) - mock_map.assert_called_with(test_volume, connector, 'wwpns') - - ret = self.driver.initialize_connection(test_volume, connector) - self.assertEqual({'driver_volume_type': 'fibre_channel', - 'data': {'initiator_target_map': { - '111111111111111': ['id1'], - '111111111111112': ['id1']}, - 'target_wwn': ['id1'], - 'target_lun': 1, - 'target_discovered': True}}, ret) - - @mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume') - @mock.patch.object(dothill.DotHillClient, 'list_luns_for_host') - def test_terminate_connection(self, mock_list, mock_unmap): - mock_unmap.side_effect = [1] - mock_list.side_effect = ['yes'] - actual = {'driver_volume_type': 'fibre_channel', 'data': {}} - ret = self.driver.terminate_connection(test_volume, connector) - self.assertEqual(actual, ret) - mock_unmap.assert_called_with(test_volume, connector, 'wwpns') - ret = self.driver.terminate_connection(test_volume, connector) - self.assertEqual(actual, ret) - - @mock.patch.object(dothill_common.DotHillCommon, 'get_volume_stats') - def test_get_volume_stats(self, mock_stats): - stats = {'storage_protocol': None, - 'driver_version': self.driver.VERSION, - 'volume_backend_name': None, - 'vendor_name': self.vendor_name, - 'pools': [{'free_capacity_gb': 90, - 'reserved_percentage': 0, - 'total_capacity_gb': 100, - 'QoS_support': False, - 'location_info': 'xx:xx:xx:xx', - 'pool_name': 'x'}]} - mock_stats.side_effect = [exception.Invalid, stats, stats] - - self.assertRaises(exception.Invalid, self.driver.get_volume_stats, - False) - ret = self.driver.get_volume_stats(False) - self.assertEqual(stats, ret) - - ret = self.driver.get_volume_stats(True) - self.assertEqual(stats, ret) - mock_stats.assert_called_with(True) - - @mock.patch.object(dothill_common.DotHillCommon, 'retype') - def test_retype(self, mock_retype): - mock_retype.side_effect = [exception.Invalid, True, False] - args = [None, None, None, None, None] - self.assertRaises(exception.Invalid, self.driver.retype, *args) - self.assertTrue(self.driver.retype(*args)) - self.assertFalse(self.driver.retype(*args)) - - @mock.patch.object(dothill_common.DotHillCommon, 'manage_existing') - def test_manage_existing(self, mock_manage_existing): - self._test_with_mock(mock_manage_existing, 'manage_existing', - [None, None]) - - @mock.patch.object(dothill_common.DotHillCommon, - 'manage_existing_get_size') - def test_manage_size(self, mock_manage_size): - mock_manage_size.side_effect = [exception.Invalid, 1] - self.assertRaises(exception.Invalid, - self.driver.manage_existing_get_size, - None, None) - self.assertEqual(1, self.driver.manage_existing_get_size(None, None)) - - -class TestDotHillISCSI(TestDotHillFC): - @mock.patch.object(dothill_common.DotHillCommon, 'do_setup') - def setUp(self, mock_setup): - super(TestDotHillISCSI, self).setUp() - self.vendor_name = 'DotHill' - mock_setup.return_value = True - - def fake_init(self, *args, **kwargs): - super(dothill_iscsi.DotHillISCSIDriver, self).__init__() - self.common = None - self.configuration = FakeConfiguration2() - self.iscsi_ips = ['10.0.0.11'] - - dothill_iscsi.DotHillISCSIDriver.__init__ = fake_init - self.driver = dothill_iscsi.DotHillISCSIDriver() - self.driver.do_setup(None) - - @mock.patch.object(dothill_common.DotHillCommon, 'client_logout') - @mock.patch.object(dothill_common.DotHillCommon, - 'get_active_iscsi_target_portals') - @mock.patch.object(dothill_common.DotHillCommon, - 'get_active_iscsi_target_iqns') - @mock.patch.object(dothill_common.DotHillCommon, 'map_volume') - @mock.patch.object(dothill_common.DotHillCommon, 'client_login') - def test_initialize_connection(self, mock_login, mock_map, mock_iqns, - mock_portals, mock_logout): - mock_login.return_value = None - mock_logout.return_value = None - mock_map.side_effect = [exception.Invalid, 1] - self.driver.iscsi_ips = ['10.0.0.11'] - self.driver.initialize_iscsi_ports() - mock_iqns.side_effect = [['id2']] - mock_portals.return_value = {'10.0.0.11': 'Up', '10.0.0.12': 'Up'} - - self.assertRaises(exception.Invalid, - self.driver.initialize_connection, test_volume, - connector) - mock_map.assert_called_with(test_volume, connector, 'initiator') - - ret = self.driver.initialize_connection(test_volume, connector) - self.assertEqual({'driver_volume_type': 'iscsi', - 'data': {'target_iqn': 'id2', - 'target_lun': 1, - 'target_discovered': True, - 'target_portal': '10.0.0.11:3260'}}, ret) - - @mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume') - def test_terminate_connection(self, mock_unmap): - mock_unmap.side_effect = [exception.Invalid, 1] - - self.assertRaises(exception.Invalid, - self.driver.terminate_connection, test_volume, - connector) - mock_unmap.assert_called_with(test_volume, connector, 'initiator') - - ret = self.driver.terminate_connection(test_volume, connector) - self.assertIsNone(ret) diff --git a/cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py b/cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py deleted file mode 100644 index 623902a89..000000000 --- a/cinder/tests/unit/volume/drivers/test_drbdmanagedrv.py +++ /dev/null @@ -1,704 +0,0 @@ -# Copyright (c) 2014 LINBIT HA Solutions GmbH -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import eventlet -import six -import sys -import time - -import mock -from oslo_utils import importutils -from oslo_utils import timeutils - -from cinder import context -from cinder import test -from cinder.volume import configuration as conf - - -class mock_dbus(object): - def __init__(self): - pass - - @staticmethod - def Array(defaults, signature=None): - return defaults - - -class mock_dm_consts(object): - - TQ_GET_PATH = "get_path" - - NODE_ADDR = "addr" - - CSTATE_PREFIX = "cstate:" - TSTATE_PREFIX = "tstate:" - - FLAG_UPD_POOL = "upd_pool" - FLAG_UPDATE = "update" - FLAG_DRBDCTRL = "drbdctrl" - FLAG_STORAGE = "storage" - FLAG_EXTERNAL = "external" - FLAG_DEPLOY = "deploy" - - FLAG_DISKLESS = "diskless" - FLAG_CONNECT = "connect" - FLAG_UPD_CON = "upd_con" - FLAG_RECONNECT = "reconnect" - FLAG_OVERWRITE = "overwrite" - FLAG_DISCARD = "discard" - FLAG_UPD_CONFIG = "upd_config" - FLAG_STANDBY = "standby" - FLAG_QIGNORE = "qignore" - FLAG_REMOVE = "remove" - - AUX_PROP_PREFIX = "aux:" - - BOOL_TRUE = "true" - BOOL_FALSE = "false" - - VOL_ID = "vol_id" - - -class mock_dm_exc(object): - - DM_SUCCESS = 0 - DM_INFO = 1 - DM_EEXIST = 101 - DM_ENOENT = 102 - DM_ERROR = 1000 - - -class mock_dm_utils(object): - - @staticmethod - def _aux_prop_name(key): - if six.text_type(key).startswith(mock_dm_consts.AUX_PROP_PREFIX): - return key[len(mock_dm_consts.AUX_PROP_PREFIX):] - else: - return None - - @staticmethod - def aux_props_to_dict(props): - aux_props = {} - for (key, val) in props.items(): - aux_key = mock_dm_utils._aux_prop_name(key) - if aux_key is not None: - aux_props[aux_key] = val - return aux_props - - @staticmethod - def dict_to_aux_props(props): - aux_props = {} - for (key, val) in props.items(): - aux_key = mock_dm_consts.AUX_PROP_PREFIX + six.text_type(key) - aux_props[aux_key] = six.text_type(val) - return aux_props - - -def public_keys(c): - return [n for n in c.__dict__.keys() if not n.startswith("_")] - - -sys.modules['dbus'] = mock_dbus -sys.modules['drbdmanage'] = collections.namedtuple( - 'module', ['consts', 'exceptions', 'utils']) -sys.modules['drbdmanage.utils'] = collections.namedtuple( - 'module', public_keys(mock_dm_utils)) -sys.modules['drbdmanage.consts'] = collections.namedtuple( - 'module', public_keys(mock_dm_consts)) -sys.modules['drbdmanage.exceptions'] = collections.namedtuple( - 'module', public_keys(mock_dm_exc)) - -import cinder.volume.drivers.drbdmanagedrv as drv - -drv.dbus = mock_dbus -drv.dm_const = mock_dm_consts -drv.dm_utils = mock_dm_utils -drv.dm_exc = mock_dm_exc - - -def create_configuration(object): - configuration = mock.MockObject(conf.Configuration) - configuration.san_is_local = False - configuration.append_config_values(mock.IgnoreArg()) - return configuration - - -class DrbdManageFakeDriver(object): - - resources = {} - - def __init__(self): - self.calls = [] - self.cur = -1 - - def call_count(self): - return len(self.calls) - - def next_call(self): - self.cur += 1 - return self.calls[self.cur][0] - - def call_parm(self, arg_idx): - return self.calls[self.cur][arg_idx] - - def run_external_plugin(self, name, props): - self.calls.append(["run_external_plugin", name, props]) - - call_okay = [[mock_dm_exc.DM_SUCCESS, "ACK", []]] - not_done_yet = (call_okay, - dict(timeout=mock_dm_consts.BOOL_FALSE, - result=mock_dm_consts.BOOL_FALSE)) - success = (call_okay, - dict(timeout=mock_dm_consts.BOOL_FALSE, - result=mock_dm_consts.BOOL_TRUE)) - got_timeout = (call_okay, - dict(timeout=mock_dm_consts.BOOL_TRUE, - result=mock_dm_consts.BOOL_FALSE)) - - if "retry" not in props: - # Fake success, to not slow tests down - return success - - if props["retry"] > 1: - props["retry"] -= 1 - return not_done_yet - - if props.get("run-into-timeout"): - return got_timeout - - return success - - def list_resources(self, res, serial, prop, req): - self.calls.append(["list_resources", res, prop, req]) - if ('aux:cinder-id' in prop and - prop['aux:cinder-id'].startswith("deadbeef")): - return ([[mock_dm_exc.DM_ENOENT, "none", []]], - []) - else: - return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], - [("res", dict(prop))]) - - def create_resource(self, res, props): - self.calls.append(["create_resource", res, props]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def create_volume(self, res, size, props): - self.calls.append(["create_volume", res, size, props]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []], - [mock_dm_exc.DM_INFO, - "create_volume", - [(mock_dm_consts.VOL_ID, '2')]]] - - def auto_deploy(self, res, red, delta, site_clients): - self.calls.append(["auto_deploy", res, red, delta, site_clients]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []] * red] - - def list_volumes(self, res, ser, prop, req): - self.calls.append(["list_volumes", res, ser, prop, req]) - if ('aux:cinder-id' in prop and - prop['aux:cinder-id'].startswith("deadbeef")): - return ([[mock_dm_exc.DM_SUCCESS, "none", []]], - []) - else: - return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], - [("res", dict(), [(2, dict(prop))]) - ]) - - def remove_volume(self, res, nr, force): - self.calls.append(["remove_volume", res, nr, force]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def text_query(self, cmd): - self.calls.append(["text_query", cmd]) - if cmd[0] == mock_dm_consts.TQ_GET_PATH: - return ([(mock_dm_exc.DM_SUCCESS, "ack", [])], ['/dev/drbd0']) - return ([(mock_dm_exc.DM_ERROR, 'unknown command', [])], []) - - def list_assignments(self, nodes, res, ser, prop, req): - self.calls.append(["list_assignments", nodes, res, ser, prop, req]) - if ('aux:cinder-id' in prop and - prop['aux:cinder-id'].startswith("deadbeef")): - return ([[mock_dm_exc.DM_SUCCESS, "none", []]], - []) - else: - return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], - [("node", "res", dict(), [(2, dict(prop))]) - ]) - - def create_snapshot(self, res, snap, nodes, props): - self.calls.append(["create_snapshot", res, snap, nodes, props]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def list_snapshots(self, res, sn, serial, prop, req): - self.calls.append(["list_snapshots", res, sn, serial, prop, req]) - if ('aux:cinder-id' in prop and - prop['aux:cinder-id'].startswith("deadbeef")): - return ([[mock_dm_exc.DM_SUCCESS, "none", []]], - []) - else: - return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]], - [("res", [("snap", dict(prop))]) - ]) - - def remove_snapshot(self, res, snap, force): - self.calls.append(["remove_snapshot", res, snap, force]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def resize_volume(self, res, vol, ser, size, delta): - self.calls.append(["resize_volume", res, vol, ser, size, delta]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def restore_snapshot(self, res, snap, new, rprop, vprops): - self.calls.append(["restore_snapshot", res, snap, new, rprop, vprops]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def assign(self, host, resource, props): - self.calls.append(["assign", host, resource, props]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def create_node(self, name, prop): - self.calls.append(["create_node", name, prop]) - if name.startswith('EXIST'): - return [(mock_dm_exc.DM_EEXIST, "none", [])] - else: - return [(mock_dm_exc.DM_SUCCESS, "ack", [])] - - def set_drbdsetup_props(self, options): - self.calls.append(["set_drbdsetup_props", options]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - def modify_resource(self, res, ser, props): - self.calls.append(["modify_resource", res, ser, props]) - return [[mock_dm_exc.DM_SUCCESS, "ack", []]] - - -class DrbdManageIscsiTestCase(test.TestCase): - - def _fake_safe_get(self, key): - if key == 'iscsi_helper': - return 'fake' - - if key.endswith('_policy'): - return '{}' - - if key.endswith('_options'): - return '{}' - - return None - - def _fake_safe_get_with_options(self, key): - if key == 'drbdmanage_net_options': - return('{"connect-int": "4", "allow-two-primaries": "yes", ' - '"ko-count": "30"}') - if key == 'drbdmanage_resource_options': - return '{"auto-promote-timeout": "300"}' - if key == 'drbdmanage_disk_options': - return '{"c-min-rate": "4M"}' - - return self._fake_safe_get(key) - - @staticmethod - def _fake_sleep(amount): - pass - - def setUp(self): - self.ctxt = context.get_admin_context() - self._mock = mock.Mock() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.san_is_local = True - self.configuration.reserved_percentage = 1 - - super(DrbdManageIscsiTestCase, self).setUp() - - self.mock_object(importutils, 'import_object', - self.fake_import_object) - self.mock_object(drv.DrbdManageBaseDriver, - 'call_or_reconnect', - self.fake_issue_dbus_call) - self.mock_object(drv.DrbdManageBaseDriver, - 'dbus_connect', - self.fake_issue_dbus_connect) - self.mock_object(drv.DrbdManageBaseDriver, - '_wait_for_node_assignment', - self.fake_wait_node_assignment) - - self.configuration.safe_get = self._fake_safe_get - - self.mock_object(eventlet, 'sleep', self._fake_sleep) - - # Infrastructure - def fake_import_object(self, what, configuration, db, executor): - return None - - def fake_issue_dbus_call(self, fn, *args): - return fn(*args) - - def fake_wait_node_assignment(self, *args, **kwargs): - return True - - def fake_issue_dbus_connect(self): - self.odm = DrbdManageFakeDriver() - - def call_or_reconnect(self, method, *params): - return method(*params) - - def fake_is_external_node(self, name): - return False - - # Tests per se - - def test_create_volume(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'deadbeef-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.drbdmanage_devs_on_controller = False - dmd.odm = DrbdManageFakeDriver() - dmd.create_volume(testvol) - self.assertEqual(8, dmd.odm.call_count()) - self.assertEqual("create_resource", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("create_volume", dmd.odm.next_call()) - self.assertEqual(1048576, dmd.odm.call_parm(2)) - self.assertEqual("auto_deploy", dmd.odm.next_call()) - - def test_create_volume_with_options(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'deadbeef-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - self.configuration.safe_get = self._fake_safe_get_with_options - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.drbdmanage_devs_on_controller = False - dmd.odm = DrbdManageFakeDriver() - dmd.create_volume(testvol) - - self.assertEqual(8, dmd.odm.call_count()) - - self.assertEqual("create_resource", dmd.odm.next_call()) - - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("reso", dmd.odm.call_parm(1)["type"]) - self.assertEqual("300", dmd.odm.call_parm(1)["auto-promote-timeout"]) - - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("neto", dmd.odm.call_parm(1)["type"]) - self.assertEqual("30", dmd.odm.call_parm(1)["ko-count"]) - - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("disko", dmd.odm.call_parm(1)["type"]) - self.assertEqual("4M", dmd.odm.call_parm(1)["c-min-rate"]) - - self.assertEqual("list_volumes", dmd.odm.next_call()) - - self.assertEqual("create_volume", dmd.odm.next_call()) - self.assertEqual(1048576, dmd.odm.call_parm(2)) - - self.assertEqual("auto_deploy", dmd.odm.next_call()) - - def test_create_volume_controller_all_vols(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'deadbeef-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.drbdmanage_devs_on_controller = True - dmd.odm = DrbdManageFakeDriver() - dmd.create_volume(testvol) - self.assertEqual("create_resource", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("create_volume", dmd.odm.next_call()) - self.assertEqual(1048576, dmd.odm.call_parm(2)) - self.assertEqual("auto_deploy", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("assign", dmd.odm.next_call()) - self.assertEqual(9, dmd.odm.call_count()) - - def test_delete_volume(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.delete_volume(testvol) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual(testvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) - self.assertEqual("remove_volume", dmd.odm.next_call()) - - def test_local_path(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - data = dmd.local_path(testvol) - self.assertTrue(data.startswith("/dev/drbd")) - - def test_create_snapshot(self): - testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111', - 'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.create_snapshot(testsnap) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("list_assignments", dmd.odm.next_call()) - self.assertEqual("create_snapshot", dmd.odm.next_call()) - self.assertIn('node', dmd.odm.call_parm(3)) - - def test_delete_snapshot(self): - testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.delete_snapshot(testsnap) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("remove_snapshot", dmd.odm.next_call()) - - def test_extend_volume(self): - testvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.extend_volume(testvol, 5) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual(testvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) - self.assertEqual("resize_volume", dmd.odm.next_call()) - self.assertEqual("res", dmd.odm.call_parm(1)) - self.assertEqual(2, dmd.odm.call_parm(2)) - self.assertEqual(-1, dmd.odm.call_parm(3)) - self.assertEqual(5242880, dmd.odm.call_parm(4)) - - def test_create_cloned_volume(self): - srcvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.create_cloned_volume(newvol, srcvol) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("list_assignments", dmd.odm.next_call()) - self.assertEqual("create_snapshot", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("restore_snapshot", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("remove_snapshot", dmd.odm.next_call()) - - def test_create_cloned_volume_larger_size(self): - srcvol = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - newvol = {'size': 5, - 'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.create_cloned_volume(newvol, srcvol) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("list_assignments", dmd.odm.next_call()) - self.assertEqual("create_snapshot", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("restore_snapshot", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - # resize image checks - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual(newvol['id'], dmd.odm.call_parm(3)["aux:cinder-id"]) - self.assertEqual("resize_volume", dmd.odm.next_call()) - self.assertEqual("res", dmd.odm.call_parm(1)) - self.assertEqual(2, dmd.odm.call_parm(2)) - self.assertEqual(-1, dmd.odm.call_parm(3)) - self.assertEqual(5242880, dmd.odm.call_parm(4)) - - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("remove_snapshot", dmd.odm.next_call()) - - def test_create_volume_from_snapshot(self): - snap = {'project_id': 'testprjid', - 'name': 'testvol', - 'volume_size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.create_volume_from_snapshot(newvol, snap) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("restore_snapshot", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - def test_create_volume_from_snapshot_larger_size(self): - snap = {'project_id': 'testprjid', - 'name': 'testvol', - 'volume_size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - newvol = {'size': 5, - 'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'} - - dmd = drv.DrbdManageIscsiDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - dmd.create_volume_from_snapshot(newvol, snap) - self.assertEqual("list_snapshots", dmd.odm.next_call()) - self.assertEqual("restore_snapshot", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("set_drbdsetup_props", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - -class DrbdManageDrbdTestCase(DrbdManageIscsiTestCase): - - def setUp(self): - super(DrbdManageDrbdTestCase, self).setUp() - - self.mock_object(drv.DrbdManageDrbdDriver, - '_is_external_node', - self.fake_is_external_node) - - def test_drbd_create_export(self): - volume = {'project_id': 'testprjid', - 'name': 'testvol', - 'size': 1, - 'id': 'ba253fd0-8068-11e4-98c0-5254008ea111', - 'volume_type_id': 'drbdmanage', - 'created_at': timeutils.utcnow()} - - connector = {'host': 'node99', - 'ip': '127.0.0.99'} - - dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - - x = dmd.create_export({}, volume, connector) - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("create_node", dmd.odm.next_call()) - self.assertEqual("assign", dmd.odm.next_call()) - # local_path - self.assertEqual("list_volumes", dmd.odm.next_call()) - self.assertEqual("text_query", dmd.odm.next_call()) - self.assertEqual("local", x["driver_volume_type"]) - - -class DrbdManageCommonTestCase(DrbdManageIscsiTestCase): - def test_drbd_policy_loop_timeout(self): - dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - - res = dmd._call_policy_plugin('void', {}, - {'retry': 4, - 'run-into-timeout': True}) - self.assertFalse(res) - self.assertEqual(4, dmd.odm.call_count()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - def test_drbd_policy_loop_success(self): - dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - - res = dmd._call_policy_plugin('void', - {'base': 'data', - 'retry': 4}, - {'override': 'xyz'}) - self.assertTrue(res) - self.assertEqual(4, dmd.odm.call_count()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - - def test_drbd_policy_loop_simple(self): - dmd = drv.DrbdManageDrbdDriver(configuration=self.configuration) - dmd.odm = DrbdManageFakeDriver() - - res = dmd._call_policy_plugin('policy-name', - {'base': "value", - 'over': "ignore"}, - {'over': "ride", - 'starttime': 0}) - self.assertTrue(res) - - self.assertEqual(1, dmd.odm.call_count()) - self.assertEqual("run_external_plugin", dmd.odm.next_call()) - self.assertEqual('policy-name', dmd.odm.call_parm(1)) - incoming = dmd.odm.call_parm(2) - - self.assertGreaterEqual(4, abs(float(incoming['starttime']) - - time.time())) - self.assertEqual('value', incoming['base']) - self.assertEqual('ride', incoming['over']) diff --git a/cinder/tests/unit/volume/drivers/test_falconstor_fss.py b/cinder/tests/unit/volume/drivers/test_falconstor_fss.py deleted file mode 100644 index cb1832e08..000000000 --- a/cinder/tests/unit/volume/drivers/test_falconstor_fss.py +++ /dev/null @@ -1,910 +0,0 @@ -# Copyright (c) 2016 FalconStor, Inc. -# All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy -import mock -import time - -from cinder import context -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.falconstor import fc -from cinder.volume.drivers.falconstor import iscsi -from cinder.volume.drivers.falconstor import rest_proxy as proxy - - -DRIVER_PATH = "cinder.volume.drivers.falconstor" -BASE_DRIVER = DRIVER_PATH + ".fss_common.FalconstorBaseDriver" -ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver" - -PRIMARY_IP = '10.0.0.1' -SECONDARY_IP = '10.0.0.2' -FAKE_ID = 123 -FAKE_SINGLE_POOLS = {'A': 1} -FAKE_MULTIPLE_POOLS = {'P': 1, 'O': 2} -FAKE = 'fake' -FAKE_HOST = 'fakehost' -API_RESPONSE = {'rc': 0} -ISCSI_VOLUME_BACKEND_NAME = "FSSISCSIDriver" -SESSION_ID = "a76d506c-abcd-1234-efgh-710e1fd90527" -VOLUME_ID = '6068ea6d-f221-4213-bde9-f1b50aecdf36' -ADD_VOLUME_ID = '6068ed7f-f231-4283-bge9-f1b51aecdf36' -GROUP_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' - -PORTAL_RESPONSE = {'rc': 0, 'ipaddress': FAKE} -VOLUME_METADATA = {'metadata': {'FSS-vid': 1}} -EXTENT_NEW_SIZE = 3 -DATA_SERVER_INFO = 0, {'metadata': {'vendor': 'FalconStor', 'version': '1.5'}} - -FSS_SINGLE_TYPE = 'single' -RAWTIMESTAMP = '1324975390' - -VOLUME = {'id': VOLUME_ID, - 'name': "volume-" + VOLUME_ID, - 'display_name': 'fake_volume', - 'display_description': '', - 'size': 1, - 'host': "hostname@backend#%s" % FAKE_ID, - 'volume_type': None, - 'volume_type_id': None, - 'consistencygroup_id': None, - 'volume_metadata': [], - 'metadata': {"Type": "work"}} - -SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": "fake_src_vol", - "size": 1, - "host": "hostname@backend#%s" % FAKE_ID, - "volume_type": None, - "volume_type_id": None, - "volume_size": 1 -} - -VOLUME_NAME = 'cinder-' + VOLUME['id'] -SRC_VOL_NAME = 'cinder-' + SRC_VOL['id'] -DATA_OUTPUT = VOLUME_NAME, VOLUME_METADATA -SNAPSHOT_METADATA = {'fss-tm-comment': None} - -ADD_VOLUME_IN_CG = { - 'id': ADD_VOLUME_ID, - 'display_name': 'abc123', - 'display_description': '', - 'size': 1, - 'consistencygroup_id': GROUP_ID, - 'status': 'available', - 'host': "hostname@backend#%s" % FAKE_ID} - -REMOVE_VOLUME_IN_CG = { - 'id': 'fe2dbc515810451dab2f8c8a48d15bee', - 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', - 'display_description': '', - 'size': 1, - 'consistencygroup_id': GROUP_ID, - 'status': 'available', - 'host': "hostname@backend#%s" % FAKE_ID} - -CONSISTGROUP = {'id': GROUP_ID, - 'name': 'fake_group', - 'description': 'fake_group_des', - 'status': ''} -CG_SNAPSHOT = { - 'consistencygroup_id': GROUP_ID, - 'id': '3c61b0f9-842e-46bf-b061-5e0031d8083f', - 'name': 'cgsnapshot1', - 'description': 'cgsnapshot1', - 'status': ''} - -SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" -ENCODED_SNAPSHOT_ID = "cinder-8W45SsgKTG2dnSUHoiQeuA" -SNAPSHOT = {'name': "snapshot-" + SNAPSHOT_ID, - 'id': SNAPSHOT_ID, - 'volume_id': VOLUME_ID, - 'volume_name': "volume-" + VOLUME_ID, - 'volume_size': 2, - 'display_name': "fake_snapshot", - 'display_description': '', - 'volume': VOLUME, - 'metadata': SNAPSHOT_METADATA, - 'status': ''} -SNAPSHOT_LONG_NAME = { - 'name': "SnapshotsActionsV1Test-Snapshot-" + SNAPSHOT_ID, - 'id': SNAPSHOT_ID, - 'volume_id': VOLUME_ID, - 'volume_size': 2, - 'display_name': 'SnapshotsActionsV1Test-Snapshot-901108447', - 'volume': VOLUME, - 'metadata': SNAPSHOT_METADATA, - 'status': ''} - -INITIATOR_IQN = 'iqn.2015-08.org.falconstor:01:fss' -TARGET_IQN = "iqn.2015-06.com.falconstor:freestor.fss-12345abc" -TARGET_PORT = "3260" -ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] -ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] - -ISCSI_PORTS = {"iqn": TARGET_IQN, "lun": 1} -ISCSI_CONNECTOR = {'initiator': INITIATOR_IQN, - 'host': "hostname@backend#%s" % FAKE_ID} -ISCSI_INFO = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'discard': True, - 'encrypted': False, - 'qos_specs': None, - 'access_mode': 'rw', - 'volume_id': VOLUME_ID, - 'target_iqn': ISCSI_PORTS['iqn'], - 'target_portal': ISCSI_IPS[0] + ':' + TARGET_PORT, - 'target_lun': 1 - }, -} - -ISCSI_MULTIPATH_INFO = { - 'driver_volume_type': 'iscsi', - 'data''data': { - 'target_discovered': False, - 'discard': True, - 'encrypted': False, - 'qos_specs': None, - 'access_mode': 'rw', - 'volume_id': VOLUME_ID, - 'target_iqns': [ISCSI_PORTS['iqn']], - 'target_portals': [ISCSI_IPS[0] + ':' + TARGET_PORT], - 'target_luns': [1] - }, -} - -FC_INITIATOR_WWPNS = ['2100000d778301c3', '2101000d77a301c3'] -FC_TARGET_WWPNS = ['11000024ff2d2ca4', '11000024ff2d2ca5', - '11000024ff2d2c23', '11000024ff2d2c24'] -FC_WWNS = ['20000024ff2d2ca4', '20000024ff2d2ca5', - '20000024ff2d2c23', '20000024ff2d2c24'] -FC_CONNECTOR = {'ip': '10.10.0.1', - 'initiator': 'iqn.1988-08.org.oracle:568eb4ccbbcc', - 'wwpns': FC_INITIATOR_WWPNS, - 'wwnns': FC_WWNS, - 'host': FAKE_HOST, - 'multipath': False} -FC_INITIATOR_TARGET_MAP = { - FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], - FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]] -} -FC_DEVICE_MAPPING = { - "fabric": { - 'initiator_port_wwn_list': FC_INITIATOR_WWPNS, - 'target_port_wwn_list': FC_WWNS - } -} - -FC_INFO = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'volume_id': VOLUME_ID, - 'target_lun': 1, - 'target_wwn': FC_TARGET_WWPNS, - 'initiator_target_map': FC_INITIATOR_TARGET_MAP - } -} - - -def Fake_sleep(time): - pass - - -class FSSDriverTestCase(test.TestCase): - - def setUp(self): - super(FSSDriverTestCase, self).setUp() - self.mock_config = mock.Mock() - self.mock_config.san_ip = PRIMARY_IP - self.mock_config.san_login = FAKE - self.mock_config.san_password = FAKE - self.mock_config.fss_pools = FAKE_SINGLE_POOLS - self.mock_config.san_is_local = False - self.mock_config.fss_debug = False - self.mock_config.additional_retry_list = False - self.mock_object(time, 'sleep', Fake_sleep) - - -class TestFSSISCSIDriver(FSSDriverTestCase): - def __init__(self, method): - super(TestFSSISCSIDriver, self).__init__(method) - - def setUp(self): - super(TestFSSISCSIDriver, self).setUp() - self.mock_config.use_chap_auth = False - self.mock_config.use_multipath_for_image_xfer = False - self.mock_config.volume_backend_name = ISCSI_VOLUME_BACKEND_NAME - self.driver = iscsi.FSSISCSIDriver(configuration=self.mock_config) - self.mock_utils = mock.Mock() - self.driver.driver_utils = self.mock_utils - - def test_initialized_should_set_fss_info(self): - self.assertEqual(self.driver.proxy.fss_host, - self.driver.configuration.san_ip) - self.assertEqual(self.driver.proxy.fss_defined_pools, - self.driver.configuration.fss_pools) - - def test_check_for_setup_error(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - - @mock.patch.object(proxy.RESTProxy, 'create_vdev', - return_value=DATA_OUTPUT) - def test_create_volume(self, mock_create_vdev): - self.driver.create_volume(VOLUME) - mock_create_vdev.assert_called_once_with(VOLUME) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', - return_value=VOLUME_NAME) - def test_extend_volume(self, mock__get_fss_volume_name): - """Volume extended_volume successfully.""" - self.driver.proxy.extend_vdev = mock.Mock() - result = self.driver.extend_volume(VOLUME, EXTENT_NEW_SIZE) - mock__get_fss_volume_name.assert_called_once_with(VOLUME) - self.driver.proxy.extend_vdev.assert_called_once_with(VOLUME_NAME, - VOLUME["size"], - EXTENT_NEW_SIZE) - self.assertIsNone(result) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') - def test_clone_volume(self, mock__get_fss_volume_name): - mock__get_fss_volume_name.side_effect = [VOLUME_NAME, SRC_VOL_NAME] - self.driver.proxy.clone_volume = mock.Mock( - return_value=VOLUME_METADATA) - self.driver.proxy.extend_vdev = mock.Mock() - - self.driver.create_cloned_volume(VOLUME, SRC_VOL) - self.driver.proxy.clone_volume.assert_called_with(VOLUME_NAME, - SRC_VOL_NAME) - - mock__get_fss_volume_name.assert_any_call(VOLUME) - mock__get_fss_volume_name.assert_any_call(SRC_VOL) - self.assertEqual(2, mock__get_fss_volume_name.call_count) - - self.driver.proxy.extend_vdev(VOLUME_NAME, VOLUME["size"], - SRC_VOL["size"]) - self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, - VOLUME["size"], - SRC_VOL["size"]) - - @mock.patch.object(proxy.RESTProxy, 'delete_vdev') - def test_delete_volume(self, mock_delete_vdev): - result = self.driver.delete_volume(VOLUME) - mock_delete_vdev.assert_called_once_with(VOLUME) - self.assertIsNone(result) - - @mock.patch.object(proxy.RESTProxy, 'create_snapshot', - return_value=API_RESPONSE) - def test_create_snapshot(self, mock_create_snapshot): - snap_name = SNAPSHOT.get('display_name') - SNAPSHOT_METADATA["fss-tm-comment"] = snap_name - result = self.driver.create_snapshot(SNAPSHOT) - mock_create_snapshot.assert_called_once_with(SNAPSHOT) - self.assertEqual(result, {'metadata': SNAPSHOT_METADATA}) - - @mock.patch.object(proxy.RESTProxy, 'create_snapshot', - return_value=API_RESPONSE) - def test_create_snapshot_exceed_characters_len(self, mock_create_snapshot): - SNAPSHOT_METADATA["fss-tm-comment"] = ENCODED_SNAPSHOT_ID - result = self.driver.create_snapshot(SNAPSHOT_LONG_NAME) - mock_create_snapshot.assert_called_once_with(SNAPSHOT_LONG_NAME) - self.assertEqual(result, {'metadata': SNAPSHOT_METADATA}) - - @mock.patch.object(proxy.RESTProxy, 'delete_snapshot', - return_value=API_RESPONSE) - def test_delete_snapshot(self, mock_delete_snapshot): - result = self.driver.delete_snapshot(SNAPSHOT) - mock_delete_snapshot.assert_called_once_with(SNAPSHOT) - self.assertIsNone(result) - - @mock.patch.object(proxy.RESTProxy, 'create_volume_from_snapshot', - return_value=(VOLUME_NAME, VOLUME_METADATA)) - @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', - return_value=VOLUME_NAME) - def test_create_volume_from_snapshot(self, mock__get_fss_volume_name, - mock_create_volume_from_snapshot): - vol_size = VOLUME['size'] - snap_size = SNAPSHOT['volume_size'] - self.driver.proxy.extend_vdev = mock.Mock() - - self.assertEqual( - self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT), - dict(metadata=VOLUME_METADATA)) - mock_create_volume_from_snapshot.assert_called_once_with(VOLUME, - SNAPSHOT) - - if vol_size != snap_size: - mock__get_fss_volume_name.assert_called_once_with(VOLUME) - self.driver.proxy.extend_vdev(VOLUME_NAME, snap_size, vol_size) - self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, - snap_size, - vol_size) - - @mock.patch.object(proxy.RESTProxy, 'create_group') - def test_create_consistency_group(self, mock_create_group): - ctxt = context.get_admin_context() - model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP) - mock_create_group.assert_called_once_with(CONSISTGROUP) - self.assertDictEqual({'status': 'available'}, model_update) - - @mock.patch.object(proxy.RESTProxy, 'destroy_group') - @mock.patch(BASE_DRIVER + ".delete_volume", autospec=True) - def test_delete_consistency_group(self, mock_delete_vdev, - mock_destroy_group): - mock_cgroup = mock.MagicMock() - mock_cgroup.id = FAKE_ID - mock_cgroup['status'] = "deleted" - mock_context = mock.Mock() - mock_volume = mock.MagicMock() - expected_volume_updates = [{ - 'id': mock_volume.id, - 'status': 'deleted' - }] - model_update, volumes = self.driver.delete_consistencygroup( - mock_context, mock_cgroup, [mock_volume]) - - mock_destroy_group.assert_called_with(mock_cgroup) - self.assertEqual(expected_volume_updates, volumes) - self.assertEqual(mock_cgroup['status'], model_update['status']) - mock_delete_vdev.assert_called_with(self.driver, mock_volume) - - @mock.patch.object(proxy.RESTProxy, 'set_group') - def test_update_consistency_group(self, mock_set_group): - ctxt = context.get_admin_context() - add_vols = [ - {'name': 'vol1', 'id': 'vol1', 'display_name': ''}, - {'name': 'vol2', 'id': 'vol2', 'display_name': ''} - ] - remove_vols = [ - {'name': 'vol3', 'id': 'vol3', 'display_name': ''}, - {'name': 'vol4', 'id': 'vol4', 'display_name': ''} - ] - - expected_addvollist = ["cinder-%s" % volume['id'] for volume in - add_vols] - expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] - - self.driver.update_consistencygroup(ctxt, CONSISTGROUP, - add_volumes=add_vols, - remove_volumes=remove_vols) - mock_set_group.assert_called_with(GROUP_ID, - addvollist=expected_addvollist, - remvollist=expected_remvollist) - - @mock.patch.object(proxy.RESTProxy, 'create_cgsnapshot') - def test_create_cgsnapshot(self, mock_create_cgsnapshot): - mock_cgsnap = CG_SNAPSHOT - mock_context = mock.Mock() - mock_snap = mock.MagicMock() - model_update, snapshots = self.driver.create_cgsnapshot(mock_context, - mock_cgsnap, - [mock_snap]) - mock_create_cgsnapshot.assert_called_once_with(mock_cgsnap) - self.assertEqual({'status': 'available'}, model_update) - expected_snapshot_update = [{ - 'id': mock_snap.id, - 'status': 'available' - }] - self.assertEqual(expected_snapshot_update, snapshots) - - @mock.patch.object(proxy.RESTProxy, 'delete_cgsnapshot') - def test_delete_cgsnapshot(self, mock_delete_cgsnapshot): - mock_cgsnap = mock.Mock() - mock_cgsnap.id = FAKE_ID - mock_cgsnap.status = 'deleted' - mock_context = mock.Mock() - mock_snap = mock.MagicMock() - - model_update, snapshots = self.driver.delete_cgsnapshot(mock_context, - mock_cgsnap, - [mock_snap]) - mock_delete_cgsnapshot.assert_called_once_with(mock_cgsnap) - self.assertEqual({'status': mock_cgsnap.status}, model_update) - - expected_snapshot_update = [dict(id=mock_snap.id, status='deleted')] - self.assertEqual(expected_snapshot_update, snapshots) - - @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi', - return_value=ISCSI_PORTS) - def test_initialize_connection(self, mock_initialize_connection_iscsi): - FSS_HOSTS = [] - FSS_HOSTS.append(PRIMARY_IP) - ret = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) - mock_initialize_connection_iscsi.assert_called_once_with( - VOLUME, - ISCSI_CONNECTOR, - FSS_HOSTS) - result = deepcopy(ISCSI_INFO) - self.assertDictEqual(result, ret) - - @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') - @mock.patch(ISCSI_DRIVER + "._check_multipath", autospec=True) - def test_initialize_connection_multipath(self, mock__check_multipath, - mock_initialize_connection_iscsi): - fss_hosts = [] - fss_hosts.append(self.mock_config.san_ip) - mock_initialize_connection_iscsi.return_value = ISCSI_PORTS - mock__check_multipath.retuen_value = True - - self.mock_config.use_multipath_for_image_xfer = True - self.mock_config.fss_san_secondary_ip = SECONDARY_IP - multipath_connector = deepcopy(ISCSI_CONNECTOR) - multipath_connector["multipath"] = True - fss_hosts.append(SECONDARY_IP) - - self.driver.initialize_connection(VOLUME, multipath_connector) - mock_initialize_connection_iscsi.assert_called_once_with( - VOLUME, - multipath_connector, - fss_hosts) - - @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') - def test_terminate_connection(self, mock_terminate_connection_iscsi): - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - mock_terminate_connection_iscsi.assert_called_once_with( - VOLUME, - ISCSI_CONNECTOR) - - @mock.patch.object(proxy.RESTProxy, '_manage_existing_volume') - @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid') - def test_manage_existing(self, mock__get_existing_volume_ref_vid, - mock__manage_existing_volume): - ref_vid = 1 - volume_ref = {'source-id': ref_vid} - self.driver.manage_existing(VOLUME, volume_ref) - mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) - mock__manage_existing_volume.assert_called_once_with( - volume_ref['source-id'], VOLUME) - - @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid', - return_value=5120) - def test_manage_existing_get_size(self, mock__get_existing_volume_ref_vid): - ref_vid = 1 - volume_ref = {'source-id': ref_vid} - expected_size = 5 - size = self.driver.manage_existing_get_size(VOLUME, volume_ref) - mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) - self.assertEqual(expected_size, size) - - @mock.patch.object(proxy.RESTProxy, 'unmanage') - def test_unmanage(self, mock_unmanage): - self.driver.unmanage(VOLUME) - mock_unmanage.assert_called_once_with(VOLUME) - - -class TestFSSFCDriver(FSSDriverTestCase): - - def setUp(self): - super(TestFSSFCDriver, self).setUp() - self.driver = fc.FSSFCDriver(configuration=self.mock_config) - self.driver._lookup_service = mock.Mock() - - @mock.patch.object(proxy.RESTProxy, 'fc_initialize_connection') - def test_initialize_connection(self, mock_fc_initialize_connection): - fss_hosts = [] - fss_hosts.append(PRIMARY_IP) - self.driver.initialize_connection(VOLUME, FC_CONNECTOR) - mock_fc_initialize_connection.assert_called_once_with( - VOLUME, - FC_CONNECTOR, - fss_hosts) - - @mock.patch.object(proxy.RESTProxy, '_check_fc_host_devices_empty', - return_value=False) - @mock.patch.object(proxy.RESTProxy, 'fc_terminate_connection', - return_value=FAKE_ID) - def test_terminate_connection(self, mock_fc_terminate_connection, - mock__check_fc_host_devices_empty): - self.driver.terminate_connection(VOLUME, FC_CONNECTOR) - mock_fc_terminate_connection.assert_called_once_with( - VOLUME, - FC_CONNECTOR) - mock__check_fc_host_devices_empty.assert_called_once_with(FAKE_ID) - - -class TestRESTProxy(test.TestCase): - """Test REST Proxy Driver.""" - - def setUp(self): - super(TestRESTProxy, self).setUp() - configuration = mock.Mock(conf.Configuration) - configuration.san_ip = FAKE - configuration.san_login = FAKE - configuration.san_password = FAKE - configuration.fss_pools = FAKE_SINGLE_POOLS - configuration.fss_debug = False - configuration.additional_retry_list = None - - self.proxy = proxy.RESTProxy(configuration) - self.FSS_MOCK = mock.MagicMock() - self.proxy.FSS = self.FSS_MOCK - self.FSS_MOCK._fss_request.return_value = API_RESPONSE - self.mock_object(time, 'sleep', Fake_sleep) - - def test_do_setup(self): - self.proxy.do_setup() - self.FSS_MOCK.fss_login.assert_called_once_with() - self.assertNotEqual(self.proxy.session_id, SESSION_ID) - - def test_create_volume(self): - sizemb = self.proxy._convert_size_to_mb(VOLUME['size']) - volume_name = self.proxy._get_fss_volume_name(VOLUME) - _pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "P") - - params = dict(storagepoolid=_pool_id, - sizemb=sizemb, - category="virtual", - name=volume_name) - self.proxy.create_vdev(VOLUME) - self.FSS_MOCK.create_vdev.assert_called_once_with(params) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', - return_value=FAKE_ID) - def test_extend_volume(self, mock__get_fss_vid_from_name): - size = self.proxy._convert_size_to_mb(EXTENT_NEW_SIZE - VOLUME['size']) - params = dict( - action='expand', - sizemb=size - ) - volume_name = self.proxy._get_fss_volume_name(VOLUME) - self.proxy.extend_vdev(volume_name, VOLUME["size"], EXTENT_NEW_SIZE) - - mock__get_fss_vid_from_name.assert_called_once_with(volume_name, - FSS_SINGLE_TYPE) - self.FSS_MOCK.extend_vdev.assert_called_once_with(FAKE_ID, params) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', - return_value=FAKE_ID) - def test_delete_volume(self, mock__get_fss_vid_from_name): - volume_name = self.proxy._get_fss_volume_name(VOLUME) - self.proxy.delete_vdev(VOLUME) - mock__get_fss_vid_from_name.assert_called_once_with(volume_name, - FSS_SINGLE_TYPE) - self.FSS_MOCK.delete_vdev.assert_called_once_with(FAKE_ID) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', - return_value=FAKE_ID) - def test_clone_volume(self, mock__get_fss_vid_from_name): - self.FSS_MOCK.create_mirror.return_value = API_RESPONSE - self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE - _pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O") - mirror_params = dict( - category='virtual', - selectioncriteria='anydrive', - mirrortarget="virtual", - storagepoolid=_pool_id - ) - ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME) - - self.FSS_MOCK.create_mirror.assert_called_once_with(FAKE_ID, - mirror_params) - self.FSS_MOCK.sync_mirror.assert_called_once_with(FAKE_ID) - self.FSS_MOCK.promote_mirror.assert_called_once_with(FAKE_ID, - VOLUME_NAME) - self.assertNotEqual(ret, VOLUME_METADATA) - - @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', - return_value=FAKE_ID) - @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', - return_value=VOLUME_NAME) - def test_create_snapshot(self, mock__get_vol_name_from_snap, - mock__get_fss_vid_from_name, - mock_create_vdev_snapshot): - self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = [ - False, False, SNAPSHOT['volume_size']] - - self.proxy.create_snapshot(SNAPSHOT) - self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_once_with( - FAKE_ID) - sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size']) - mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb) - _pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O") - self.FSS_MOCK.create_timemark_policy.assert_called_once_with( - FAKE_ID, - storagepoolid=_pool_id) - self.FSS_MOCK.create_timemark.assert_called_once_with( - FAKE_ID, - SNAPSHOT["display_name"]) - - @mock.patch.object(proxy.RESTProxy, '_get_timestamp', - return_value=RAWTIMESTAMP) - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', - return_value=FAKE_ID) - @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', - return_value=VOLUME_NAME) - def test_delete_snapshot(self, mock__get_vol_name_from_snap, - mock__get_fss_vid_from_name, - mock__get_timestamp): - timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) - - self.proxy.delete_snapshot(SNAPSHOT) - mock__get_vol_name_from_snap.assert_called_once_with(SNAPSHOT) - self.FSS_MOCK.delete_timemark.assert_called_once_with(timestamp) - self.FSS_MOCK.get_timemark.assert_any_call(FAKE_ID) - self.assertEqual(2, self.FSS_MOCK.get_timemark.call_count) - - @mock.patch.object(proxy.RESTProxy, '_get_timestamp') - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap') - def test_create_volume_from_snapshot(self, mock__get_vol_name_from_snap, - mock__get_fss_vid_from_name, - mock__get_timestamp): - tm_info = {"rc": 0, - "data": - { - "guid": "497bad5e-e589-bb0a-e0e7-00004eeac169", - "name": "SANDisk-001", - "total": "1", - "timemark": [ - { - "size": 131072, - "comment": "123test456", - "hastimeview": False, - "priority": "low", - "quiescent": "yes", - "timeviewdata": "notkept", - "rawtimestamp": "1324975390", - "timestamp": "2015-10-11 16:43:10" - }] - } - } - mock__get_vol_name_from_snap.return_value = VOLUME_NAME - new_vol_name = self.proxy._get_fss_volume_name(VOLUME) - mock__get_fss_vid_from_name.return_value = FAKE_ID - - self.FSS_MOCK.get_timemark.return_value = tm_info - mock__get_timestamp.return_value = RAWTIMESTAMP - timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) - _pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O") - - self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT) - self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID) - mock__get_timestamp.assert_called_once_with(tm_info, - SNAPSHOT['display_name']) - self.FSS_MOCK.copy_timemark.assert_called_once_with( - timestamp, - storagepoolid=_pool_id, - name=new_vol_name) - - @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') - def test_create_consistency_group(self, mock__get_group_name_from_id): - - mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] - params = dict(name=CONSISTGROUP['name']) - self.proxy.create_group(CONSISTGROUP) - self.FSS_MOCK.create_group.assert_called_once_with(params) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') - def test_delete_consistency_group(self, mock__get_group_name_from_id, - mock__get_fss_gid_from_name): - mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] - mock__get_fss_gid_from_name.return_value = FAKE_ID - - self.proxy.destroy_group(CONSISTGROUP) - mock__get_group_name_from_id.assert_called_once_with( - CONSISTGROUP['id']) - mock__get_fss_gid_from_name.assert_called_once_with( - CONSISTGROUP['name']) - self.FSS_MOCK.destroy_group.assert_called_once_with(FAKE_ID) - - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') - def test_update_consistency_group(self, mock__get_group_name_from_id, - mock__get_fss_gid_from_name, - mock__get_fss_vid_from_name): - join_vid_list = [1, 2] - leave_vid_list = [3, 4] - mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] - mock__get_fss_gid_from_name.return_value = FAKE_ID - mock__get_fss_vid_from_name.side_effect = [join_vid_list, - leave_vid_list] - add_vols = [ - {'name': 'vol1', 'id': 'vol1'}, - {'name': 'vol2', 'id': 'vol2'} - ] - remove_vols = [ - {'name': 'vol3', 'id': 'vol3'}, - {'name': 'vol4', 'id': 'vol4'} - ] - expected_addvollist = ["cinder-%s" % volume['id'] for volume in - add_vols] - expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] - - self.proxy.set_group(CONSISTGROUP, addvollist=expected_addvollist, - remvollist=expected_remvollist) - - if expected_addvollist: - mock__get_fss_vid_from_name.assert_any_call(expected_addvollist) - - if expected_remvollist: - mock__get_fss_vid_from_name.assert_any_call(expected_remvollist) - self.assertEqual(2, mock__get_fss_vid_from_name.call_count) - - join_params = dict() - leave_params = dict() - - join_params.update( - action='join', - virtualdevices=join_vid_list - ) - leave_params.update( - action='leave', - virtualdevices=leave_vid_list - ) - self.FSS_MOCK.set_group.assert_called_once_with(FAKE_ID, join_params, - leave_params) - - @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') - @mock.patch.object(proxy.RESTProxy, 'create_group_timemark') - @mock.patch.object(proxy.RESTProxy, '_get_vdev_id_from_group_id') - @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') - def test_create_cgsnapshot(self, mock__get_group_name_from_id, - mock__get_fss_gid_from_name, - mock__get_vdev_id_from_group_id, - mock_create_group_timemark, - mock_create_vdev_snapshot - ): - vid_list = [1] - - group_name = "cinder-consisgroup-%s" % CG_SNAPSHOT[ - 'consistencygroup_id'] - mock__get_group_name_from_id.return_value = group_name - mock__get_fss_gid_from_name.return_value = FAKE_ID - mock__get_vdev_id_from_group_id.return_value = vid_list - gsnap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) - self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = ( - False, - False, - 1024) - - self.proxy.create_cgsnapshot(CG_SNAPSHOT) - mock__get_group_name_from_id.assert_called_once_with( - CG_SNAPSHOT['consistencygroup_id']) - mock__get_fss_gid_from_name.assert_called_once_with(group_name) - mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID) - _pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O") - - for vid in vid_list: - self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid) - mock_create_vdev_snapshot.assert_called_once_with(vid, 1024) - self.FSS_MOCK.create_timemark_policy.assert_called_once_with( - vid, - storagepoolid=_pool_id) - - mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name) - - @mock.patch.object(proxy.RESTProxy, 'delete_group_timemark') - @mock.patch.object(proxy.RESTProxy, '_get_fss_group_membercount') - @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') - def test_delete_cgsnapshot(self, mock__get_group_name_from_id, - mock__get_fss_gid_from_name, - mock__get_fss_group_membercount, - mock_delete_group_timemark): - tm_info = { - "rc": 0, - "data": - { - "name": "GroupTestABC", - "total": 1, - "timemark": [{ - "size": 65536, - "comment": "cinder-PGGwaaaaaaaar+wYV4AMdgIPw", - "priority": "low", - "quiescent": "yes", - "hastimeview": "false", - "timeviewdata": "notkept", - "rawtimestamp": "1324974940", - "timestamp": "2015-10-15 16:35:40"}] - } - } - final_tm_data = { - "rc": 0, - "data": - {"name": "GroupTestABC", - "total": 1, - "timemark": [] - }} - - mock__get_group_name_from_id.return_value = CG_SNAPSHOT[ - 'consistencygroup_id'] - mock__get_fss_gid_from_name.return_value = FAKE_ID - self.FSS_MOCK.get_group_timemark.side_effect = [tm_info, final_tm_data] - encode_snap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) - self.proxy.delete_cgsnapshot(CG_SNAPSHOT) - mock__get_fss_group_membercount.assert_called_once_with(FAKE_ID) - - self.assertEqual(2, self.FSS_MOCK.get_group_timemark.call_count) - self.FSS_MOCK.get_group_timemark.assert_any_call(FAKE_ID) - rawtimestamp = self.proxy._get_timestamp(tm_info, encode_snap_name) - timestamp = '%s_%s' % (FAKE_ID, rawtimestamp) - mock_delete_group_timemark.assert_called_once_with(timestamp) - self.FSS_MOCK.delete_group_timemark_policy.assert_called_once_with( - FAKE_ID) - - @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') - def test_iscsi_initialize_connection(self, - mock_initialize_connection_iscsi): - fss_hosts = [] - fss_hosts.append(PRIMARY_IP) - self.proxy.initialize_connection_iscsi(VOLUME, ISCSI_CONNECTOR, - fss_hosts) - mock_initialize_connection_iscsi.assert_called_once_with( - VOLUME, - ISCSI_CONNECTOR, - fss_hosts) - - @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') - def test_iscsi_terminate_connection(self, mock_terminate_connection_iscsi): - self.FSS_MOCK._get_target_info.return_value = (FAKE_ID, INITIATOR_IQN) - - self.proxy.terminate_connection_iscsi(VOLUME, ISCSI_CONNECTOR) - mock_terminate_connection_iscsi.assert_called_once_with( - VOLUME, - ISCSI_CONNECTOR) - - @mock.patch.object(proxy.RESTProxy, 'rename_vdev') - @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') - def test_manage_existing(self, mock__get_fss_volume_name, - mock_rename_vdev): - new_vol_name = 'rename-vol' - mock__get_fss_volume_name.return_value = new_vol_name - - self.proxy._manage_existing_volume(FAKE_ID, VOLUME) - mock__get_fss_volume_name.assert_called_once_with(VOLUME) - mock_rename_vdev.assert_called_once_with(FAKE_ID, new_vol_name) - - @mock.patch.object(proxy.RESTProxy, 'list_volume_info') - def test_manage_existing_get_size(self, mock_list_volume_info): - volume_ref = {'source-id': FAKE_ID} - vdev_info = { - "rc": 0, - "data": { - "name": "cinder-2ab1f70a-6c89-432c-84e3-5fa6c187fb92", - "type": "san", - "category": "virtual", - "sizemb": 1020 - }} - - mock_list_volume_info.return_value = vdev_info - self.proxy._get_existing_volume_ref_vid(volume_ref) - mock_list_volume_info.assert_called_once_with(FAKE_ID) - - @mock.patch.object(proxy.RESTProxy, 'rename_vdev') - @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') - @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') - def test_unmanage(self, mock__get_fss_volume_name, - mock__get_fss_vid_from_name, - mock_rename_vdev): - - mock__get_fss_volume_name.return_value = VOLUME_NAME - mock__get_fss_vid_from_name.return_value = FAKE_ID - unmanaged_vol_name = VOLUME_NAME + "-unmanaged" - - self.proxy.unmanage(VOLUME) - mock__get_fss_volume_name.assert_called_once_with(VOLUME) - mock__get_fss_vid_from_name.assert_called_once_with(VOLUME_NAME, - FSS_SINGLE_TYPE) - mock_rename_vdev.assert_called_once_with(FAKE_ID, unmanaged_vol_name) diff --git a/cinder/tests/unit/volume/drivers/test_fujitsu.py b/cinder/tests/unit/volume/drivers/test_fujitsu.py deleted file mode 100644 index 535cc14b6..000000000 --- a/cinder/tests/unit/volume/drivers/test_fujitsu.py +++ /dev/null @@ -1,925 +0,0 @@ -# Copyright (c) 2015 FUJITSU LIMITED -# Copyright (c) 2012 EMC Corporation, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six -import tempfile - -from oslo_utils import units - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf - -with mock.patch.dict('sys.modules', pywbem=mock.Mock()): - from cinder.volume.drivers.fujitsu import eternus_dx_common as dx_common - from cinder.volume.drivers.fujitsu import eternus_dx_fc as dx_fc - from cinder.volume.drivers.fujitsu import eternus_dx_iscsi as dx_iscsi - -CONFIG_FILE_NAME = 'cinder_fujitsu_eternus_dx.xml' -STORAGE_SYSTEM = '172.16.0.2' - -CONF = """ - -172.16.0.2 -5988 -testuser -testpass -10.0.0.3 -abcd1234_TPP -abcd1234_OSVD -""" - -TEST_VOLUME = { - 'id': '3d6eeb5d-109b-4435-b891-d01415178490', - 'name': 'volume1', - 'display_name': 'volume1', - 'provider_location': None, - 'volume_metadata': [], - 'size': 1, -} - -TEST_SNAP = { - 'id': 'f47a8da3-d9e2-46aa-831f-0ef04158d5a1', - 'volume_name': 'volume-3d6eeb5d-109b-4435-b891-d01415178490', - 'name': 'snap1', - 'display_name': 'test_snapshot', - 'volume': TEST_VOLUME, - 'volume_id': '3d6eeb5d-109b-4435-b891-d01415178490', -} - -TEST_CLONE = { - 'name': 'clone1', - 'size': 1, - 'volume_name': 'vol1', - 'id': '391fb914-8a55-4384-a747-588641db3b15', - 'project_id': 'project', - 'display_name': 'clone1', - 'display_description': 'volume created from snapshot', - 'volume_metadata': [], -} - -ISCSI_INITIATOR = 'iqn.1993-08.org.debian:01:8261afe17e4c' -ISCSI_TARGET_IP = '10.0.0.3' -ISCSI_TARGET_IQN = 'iqn.2000-09.com.fujitsu:storage-system.eternus-dxl:0' -FC_TARGET_WWN = ['500000E0DA000001', '500000E0DA000002'] -TEST_WWPN = ['0123456789111111', '0123456789222222'] -TEST_CONNECTOR = {'initiator': ISCSI_INITIATOR, 'wwpns': TEST_WWPN} - - -STOR_CONF_SVC = 'FUJITSU_StorageConfigurationService' -CTRL_CONF_SVC = 'FUJITSU_ControllerConfigurationService' -REPL_SVC = 'FUJITSU_ReplicationService' -STOR_VOL = 'FUJITSU_StorageVolume' -SCSI_PROT_CTR = 'FUJITSU_AffinityGroupController' -STOR_HWID = 'FUJITSU_StorageHardwareID' -STOR_HWID_MNG_SVC = 'FUJITSU_StorageHardwareIDManagementService' -STOR_POOL = 'FUJITSU_RAIDStoragePool' -STOR_POOLS = ['FUJITSU_ThinProvisioningPool', 'FUJITSU_RAIDStoragePool'] -AUTH_PRIV = 'FUJITSU_AuthorizedPrivilege' -STOR_SYNC = 'FUJITSU_StorageSynchronized' -PROT_CTRL_UNIT = 'CIM_ProtocolControllerForUnit' -STORAGE_TYPE = 'abcd1234_TPP' -LUNMASKCTRL_IDS = ['AFG0010_CM00CA00P00', 'AFG0011_CM01CA00P00'] - -MAP_STAT = '0' -VOL_STAT = '0' - -FAKE_CAPACITY = 1170368102400 -FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000' -FAKE_LUN_NO1 = '0x0014' -FAKE_LUN_ID2 = '600000E00D2A0000002A0115001E0000' -FAKE_LUN_NO2 = '0x001E' -FAKE_SYSTEM_NAME = 'ET603SA4621302115' - -FAKE_STATS = { - 'vendor_name': 'FUJITSU', - 'total_capacity_gb': FAKE_CAPACITY / units.Gi, - 'free_capacity_gb': FAKE_CAPACITY / units.Gi, -} - -FAKE_KEYBIND1 = { - 'CreationClassName': 'FUJITSU_StorageVolume', - 'SystemName': STORAGE_SYSTEM, - 'DeviceID': FAKE_LUN_ID1, - 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', -} - -FAKE_LOCATION1 = { - 'classname': 'FUJITSU_StorageVolume', - 'keybindings': FAKE_KEYBIND1, -} - -FAKE_LUN_META1 = { - 'FJ_Pool_Type': 'Thinporvisioning_POOL', - 'FJ_Volume_No': FAKE_LUN_NO1, - 'FJ_Volume_Name': u'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==', - 'FJ_Pool_Name': STORAGE_TYPE, - 'FJ_Backend': FAKE_SYSTEM_NAME, -} - -FAKE_MODEL_INFO1 = { - 'provider_location': six.text_type(FAKE_LOCATION1), - 'metadata': FAKE_LUN_META1, -} - -FAKE_KEYBIND2 = { - 'CreationClassName': 'FUJITSU_StorageVolume', - 'SystemName': STORAGE_SYSTEM, - 'DeviceID': FAKE_LUN_ID2, - 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', -} - -FAKE_LOCATION2 = { - 'classname': 'FUJITSU_StorageVolume', - 'keybindings': FAKE_KEYBIND2, -} - -FAKE_SNAP_INFO = {'provider_location': six.text_type(FAKE_LOCATION2)} - -FAKE_LUN_META2 = { - 'FJ_Pool_Type': 'Thinporvisioning_POOL', - 'FJ_Volume_No': FAKE_LUN_NO1, - 'FJ_Volume_Name': u'FJosv_UkCZqMFZW3SU_JzxjHiKfg==', - 'FJ_Pool_Name': STORAGE_TYPE, - 'FJ_Backend': FAKE_SYSTEM_NAME, -} - -FAKE_MODEL_INFO2 = { - 'provider_location': six.text_type(FAKE_LOCATION1), - 'metadata': FAKE_LUN_META2, -} - - -class FJ_StorageVolume(dict): - pass - - -class FJ_StoragePool(dict): - pass - - -class FJ_AffinityGroupController(dict): - pass - - -class FakeCIMInstanceName(dict): - - def fake_create_eternus_instance_name(self, classname, bindings): - instancename = FakeCIMInstanceName() - for key in bindings: - instancename[key] = bindings[key] - instancename.classname = classname - instancename.namespace = 'root/eternus' - return instancename - - -class FakeEternusConnection(object): - def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, - ElementType=None, TheElement=None, LUNames=None, - Size=None, Type=None, Mode=None, Locality=None, - InitiatorPortIDs=None, TargetPortIDs=None, - DeviceAccesses=None, SyncType=None, - SourceElement=None, TargetElement=None, - Operation=None, CopyType=None, - Synchronization=None, ProtocolControllers=None, - TargetPool=None): - global MAP_STAT, VOL_STAT - if MethodName == 'CreateOrModifyElementFromStoragePool': - VOL_STAT = '1' - rc = 0 - vol = self._enum_volumes() - job = {'TheElement': vol[0].path} - elif MethodName == 'ReturnToStoragePool': - VOL_STAT = '0' - rc = 0 - job = {} - elif MethodName == 'GetReplicationRelationships': - rc = 0 - job = {'Synchronizations': []} - elif MethodName == 'ExposePaths': - MAP_STAT = '1' - rc = 0 - job = {} - elif MethodName == 'HidePaths': - MAP_STAT = '0' - rc = 0 - job = {} - elif MethodName == 'CreateElementReplica': - rc = 0 - snap = self._enum_snapshots() - job = {'TargetElement': snap[0].path} - elif MethodName == 'CreateReplica': - rc = 0 - snap = self._enum_snapshots() - job = {'TargetElement': snap[0].path} - elif MethodName == 'ModifyReplicaSynchronization': - rc = 0 - job = {} - else: - raise exception.VolumeBackendAPIException(data="invoke method") - - return (rc, job) - - def EnumerateInstanceNames(self, name): - result = [] - if name == 'FUJITSU_StorageVolume': - result = self._enum_volumes() - elif name == 'FUJITSU_StorageConfigurationService': - result = self._enum_confservice() - elif name == 'FUJITSU_ReplicationService': - result = self._enum_repservice() - elif name == 'FUJITSU_ControllerConfigurationService': - result = self._enum_ctrlservice() - elif name == 'FUJITSU_AffinityGroupController': - result = self._enum_afntyservice() - elif name == 'FUJITSU_StorageHardwareIDManagementService': - result = self._enum_sthwidmngsvc() - elif name == 'CIM_ProtocolControllerForUnit': - result = self._ref_unitnames() - elif name == 'CIM_StoragePool': - result = self._enum_pools() - elif name == 'FUJITSU_SCSIProtocolEndpoint': - result = self._enum_scsiport_endpoint() - elif name == 'FUJITSU_IPProtocolEndpoint': - result = self._enum_ipproto_endpoint() - - return result - - def EnumerateInstances(self, name): - result = None - if name == 'FUJITSU_StorageProduct': - result = self._enum_sysnames() - elif name == 'FUJITSU_RAIDStoragePool': - result = self._enum_pool_details('RAID') - elif name == 'FUJITSU_ThinProvisioningPool': - result = self._enum_pool_details('TPP') - elif name == 'FUJITSU_SCSIProtocolEndpoint': - result = self._enum_scsiport_endpoint() - elif name == 'FUJITSU_iSCSIProtocolEndpoint': - result = self._enum_iscsiprot_endpoint() - elif name == 'FUJITSU_StorageHardwareID': - result = self._enum_sthwid() - elif name == 'CIM_SCSIProtocolEndpoint': - result = self._enum_scsiport_endpoint() - elif name == 'FUJITSU_StorageHardwareID': - result = None - else: - result = None - - return result - - def GetInstance(self, objectpath, LocalOnly=False): - try: - name = objectpath['CreationClassName'] - except KeyError: - name = objectpath.classname - - result = None - - if name == 'FUJITSU_StorageVolume': - result = self._getinstance_storagevolume(objectpath) - elif name == 'FUJITSU_IPProtocolEndpoint': - result = self._getinstance_ipprotocolendpoint(objectpath) - elif name == 'CIM_ProtocolControllerForUnit': - result = self._getinstance_unit(objectpath) - elif name == 'FUJITSU_AffinityGroupController': - result = self._getinstance_unit(objectpath) - - return result - - def Associators(self, objectpath, AssocClass=None, - ResultClass='FUJITSU_StorageHardwareID'): - result = None - if ResultClass == 'FUJITSU_StorageHardwareID': - result = self._assoc_hdwid() - elif ResultClass == 'FUJITSU_iSCSIProtocolEndpoint': - result = self._assoc_endpoint(objectpath) - elif ResultClass == 'FUJITSU_StorageVolume': - result = self._assoc_storagevolume(objectpath) - elif ResultClass == 'FUJITSU_AuthorizedPrivilege': - result = self._assoc_authpriv() - else: - result = self._default_assoc(objectpath) - - return result - - def AssociatorNames(self, objectpath, AssocClass=None, - ResultClass=SCSI_PROT_CTR): - result = None - if ResultClass == SCSI_PROT_CTR: - result = self._assocnames_lunmaskctrl() - elif ResultClass == 'FUJITSU_TCPProtocolEndpoint': - result = self._assocnames_tcp_endpoint() - elif ResultClass == 'FUJITSU_AffinityGroupController': - result = self._assocnames_afngroup() - else: - result = self._default_assocnames(objectpath) - - return result - - def ReferenceNames(self, objectpath, - ResultClass='CIM_ProtocolControllerForUnit'): - result = [] - if ResultClass == 'CIM_ProtocolControllerForUnit': - if MAP_STAT == '1': - result = self._ref_unitnames() - else: - result = [] - elif ResultClass == 'FUJITSU_StorageSynchronized': - result = self._ref_storage_sync() - else: - result = self._default_ref(objectpath) - - return result - - def _ref_unitnames(self): - unitnames = [] - - unitname = FJ_AffinityGroupController() - dependent = {} - dependent['CreationClassName'] = STOR_VOL - dependent['DeviceID'] = FAKE_LUN_ID1 - dependent['SystemName'] = STORAGE_SYSTEM - - antecedent = {} - antecedent['CreationClassName'] = SCSI_PROT_CTR - antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] - antecedent['SystemName'] = STORAGE_SYSTEM - - unitname['Dependent'] = dependent - unitname['Antecedent'] = antecedent - unitname['CreationClassName'] = PROT_CTRL_UNIT - unitname.path = unitname - unitnames.append(unitname) - - unitname2 = FJ_AffinityGroupController() - dependent2 = {} - dependent2['CreationClassName'] = STOR_VOL - dependent2['DeviceID'] = FAKE_LUN_ID1 - dependent2['SystemName'] = STORAGE_SYSTEM - - antecedent2 = {} - antecedent2['CreationClassName'] = SCSI_PROT_CTR - antecedent2['DeviceID'] = LUNMASKCTRL_IDS[1] - antecedent2['SystemName'] = STORAGE_SYSTEM - - unitname2['Dependent'] = dependent2 - unitname2['Antecedent'] = antecedent2 - unitname2['CreationClassName'] = PROT_CTRL_UNIT - unitname2.path = unitname2 - unitnames.append(unitname2) - - return unitnames - - def _ref_storage_sync(self): - syncnames = [] - return syncnames - - def _default_ref(self, objectpath): - return objectpath - - def _default_assoc(self, objectpath): - return objectpath - - def _assocnames_lunmaskctrl(self): - return self._enum_lunmaskctrls() - - def _assocnames_tcp_endpoint(self): - return self._enum_tcp_endpoint() - - def _assocnames_afngroup(self): - return self._enum_afntyservice() - - def _default_assocnames(self, objectpath): - return objectpath - - def _assoc_authpriv(self): - authprivs = [] - iscsi = {} - iscsi['InstanceID'] = ISCSI_INITIATOR - authprivs.append(iscsi) - - fc = {} - fc['InstanceID'] = TEST_WWPN[0] - authprivs.append(fc) - - fc1 = {} - fc1['InstanceID'] = TEST_WWPN[1] - authprivs.append(fc1) - - return authprivs - - def _assoc_endpoint(self, objectpath): - targetlist = [] - tgtport1 = {} - tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' - tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' - 'eternus-dxl:0123456789,t,0x0009') - targetlist.append(tgtport1) - - return targetlist - - def _getinstance_unit(self, objectpath): - unit = FJ_AffinityGroupController() - unit.path = None - - if MAP_STAT == '0': - return unit - dependent = {} - dependent['CreationClassName'] = STOR_VOL - dependent['DeviceID'] = FAKE_LUN_ID1 - dependent['ElementName'] = TEST_VOLUME['name'] - dependent['SystemName'] = STORAGE_SYSTEM - - antecedent = {} - antecedent['CreationClassName'] = SCSI_PROT_CTR - antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] - antecedent['SystemName'] = STORAGE_SYSTEM - - unit['Dependent'] = dependent - unit['Antecedent'] = antecedent - unit['CreationClassName'] = PROT_CTRL_UNIT - unit['DeviceNumber'] = '0' - unit.path = unit - - return unit - - def _enum_sysnames(self): - sysnamelist = [] - sysname = {} - sysname['IdentifyingNumber'] = FAKE_SYSTEM_NAME - sysnamelist.append(sysname) - return sysnamelist - - def _enum_confservice(self): - services = [] - service = {} - service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' - service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - service['SystemName'] = STORAGE_SYSTEM - service['CreationClassName'] = 'FUJITSU_StorageConfigurationService' - services.append(service) - return services - - def _enum_ctrlservice(self): - services = [] - service = {} - service['SystemName'] = STORAGE_SYSTEM - service['CreationClassName'] = 'FUJITSU_ControllerConfigurationService' - services.append(service) - return services - - def _enum_afntyservice(self): - services = [] - service = {} - service['SystemName'] = STORAGE_SYSTEM - service['CreationClassName'] = 'FUJITSU_AffinityGroupController' - services.append(service) - return services - - def _enum_repservice(self): - services = [] - service = {} - service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' - service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - service['SystemName'] = STORAGE_SYSTEM - service['CreationClassName'] = 'FUJITSU_ReplicationService' - services.append(service) - return services - - def _enum_pools(self): - pools = [] - pool = {} - pool['InstanceID'] = 'FUJITSU:RSP0004' - pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' - pools.append(pool) - - pool2 = {} - pool2['InstanceID'] = 'FUJITSU:TPP0004' - pool2['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' - pools.append(pool2) - return pools - - def _enum_pool_details(self, pooltype): - pools = [] - pool = FJ_StoragePool() - - if pooltype == 'RAID': - pool['InstanceID'] = 'FUJITSU:RSP0004' - pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' - pool['ElementName'] = 'abcd1234_OSVD' - pool['TotalManagedSpace'] = 1170368102400 - pool['RemainingManagedSpace'] = 1170368102400 - pool.path = pool - pool.path.classname = 'FUJITSU_RAIDStoragePool' - else: - pool = FJ_StoragePool() - pool['InstanceID'] = 'FUJITSU:TPP0004' - pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' - pool['ElementName'] = 'abcd1234_TPP' - pool['TotalManagedSpace'] = 1170368102400 - pool['RemainingManagedSpace'] = 1170368102400 - pool.path = pool - pool.path.classname = 'FUJITSU_ThinProvisioningPool' - - pools.append(pool) - return pools - - def _enum_volumes(self): - volumes = [] - if VOL_STAT == '0': - return volumes - volume = FJ_StorageVolume() - volume['name'] = TEST_VOLUME['name'] - volume['CreationClassName'] = 'FUJITSU_StorageVolume' - volume['Name'] = FAKE_LUN_ID1 - volume['DeviceID'] = FAKE_LUN_ID1 - volume['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - volume['SystemName'] = STORAGE_SYSTEM - volume['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' - volume['volume_type_id'] = None - volume.path = volume - volume.path.classname = volume['CreationClassName'] - - name = {} - name['classname'] = 'FUJITSU_StorageVolume' - keys = {} - keys['CreationClassName'] = 'FUJITSU_StorageVolume' - keys['SystemName'] = STORAGE_SYSTEM - keys['DeviceID'] = volume['DeviceID'] - keys['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - name['keybindings'] = keys - volume['provider_location'] = str(name) - - volumes.append(volume) - - snap_vol = FJ_StorageVolume() - snap_vol['name'] = TEST_SNAP['name'] - snap_vol['CreationClassName'] = 'FUJITSU_StorageVolume' - snap_vol['Name'] = FAKE_LUN_ID2 - snap_vol['DeviceID'] = FAKE_LUN_ID2 - snap_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - snap_vol['SystemName'] = STORAGE_SYSTEM - snap_vol['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' - snap_vol.path = snap_vol - snap_vol.path.classname = snap_vol['CreationClassName'] - - name2 = {} - name2['classname'] = 'FUJITSU_StorageVolume' - keys2 = {} - keys2['CreationClassName'] = 'FUJITSU_StorageVolume' - keys2['SystemName'] = STORAGE_SYSTEM - keys2['DeviceID'] = snap_vol['DeviceID'] - keys2['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - name2['keybindings'] = keys2 - snap_vol['provider_location'] = str(name2) - - volumes.append(snap_vol) - - clone_vol = FJ_StorageVolume() - clone_vol['name'] = TEST_CLONE['name'] - clone_vol['CreationClassName'] = 'FUJITSU_StorageVolume' - clone_vol['ElementName'] = TEST_CLONE['name'] - clone_vol['DeviceID'] = FAKE_LUN_ID2 - clone_vol['SystemName'] = STORAGE_SYSTEM - clone_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - clone_vol.path = clone_vol - clone_vol.path.classname = clone_vol['CreationClassName'] - volumes.append(clone_vol) - - return volumes - - def _enum_snapshots(self): - snapshots = [] - snap = FJ_StorageVolume() - snap['CreationClassName'] = 'FUJITSU_StorageVolume' - snap['SystemName'] = STORAGE_SYSTEM - snap['DeviceID'] = FAKE_LUN_ID2 - snap['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' - snap.path = snap - snap.path.classname = snap['CreationClassName'] - - snapshots.append(snap) - - return snapshots - - def _enum_lunmaskctrls(self): - ctrls = [] - ctrl = {} - ctrl2 = {} - if MAP_STAT == '1': - ctrl['CreationClassName'] = SCSI_PROT_CTR - ctrl['SystemName'] = STORAGE_SYSTEM - ctrl['DeviceID'] = LUNMASKCTRL_IDS[0] - ctrls.append(ctrl) - - ctrl2['CreationClassName'] = SCSI_PROT_CTR - ctrl2['SystemName'] = STORAGE_SYSTEM - ctrl2['DeviceID'] = LUNMASKCTRL_IDS[1] - ctrls.append(ctrl2) - - return ctrls - - def _enum_scsiport_endpoint(self): - targetlist = [] - tgtport1 = {} - tgtport1['Name'] = '1234567890000021' - tgtport1['CreationClassName'] = 'FUJITSU_SCSIProtocolEndpoint' - tgtport1['ConnectionType'] = 2 - tgtport1['RAMode'] = 0 - targetlist.append(tgtport1) - - return targetlist - - def _enum_ipproto_endpoint(self): - targetlist = [] - tgtport1 = {} - tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' - tgtport1['NAME'] = 'IP_CM01CA00P00_00' - targetlist.append(tgtport1) - - return targetlist - - def _enum_tcp_endpoint(self): - targetlist = [] - tgtport1 = {} - tgtport1['CreationClassName'] = 'FUJITSU_TCPProtocolEndpoint' - tgtport1['NAME'] = 'TCP_CM01CA00P00_00' - targetlist.append(tgtport1) - - return targetlist - - def _enum_iscsiprot_endpoint(self): - targetlist = [] - tgtport1 = {} - tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' - 'eternus-dxl:0123456789,t,0x0009') - tgtport1['ConnectionType'] = 7 - tgtport1['RAMode'] = 0 - targetlist.append(tgtport1) - - return targetlist - - def _getinstance_storagevolume(self, objpath): - foundinstance = None - instance = FJ_StorageVolume() - volumes = self._enum_volumes() - for volume in volumes: - if volume['DeviceID'] == objpath['DeviceID']: - instance = volume - break - if not instance: - foundinstance = None - else: - foundinstance = instance - return foundinstance - - def _getinstance_ipprotocolendpoint(self, objpath): - instance = {} - instance['IPv4Address'] = '10.0.0.3' - return instance - - -class FJFCDriverTestCase(test.TestCase): - def __init__(self, *args, **kwargs): - super(FJFCDriverTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(FJFCDriverTestCase, self).setUp() - - # Make fake xml-configuration file. - self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') - self.addCleanup(self.config_file.close) - self.config_file.write(CONF) - self.config_file.flush() - - # Make fake Object by using mock as configuration object. - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.cinder_eternus_config_file = self.config_file.name - - self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', - self.fake_eternus_connection) - - instancename = FakeCIMInstanceName() - self.mock_object(dx_common.FJDXCommon, '_create_eternus_instance_name', - instancename.fake_create_eternus_instance_name) - - # Set iscsi driver to self.driver. - driver = dx_fc.FJDXFCDriver(configuration=self.configuration) - self.driver = driver - - def fake_eternus_connection(self): - conn = FakeEternusConnection() - return conn - - def test_get_volume_stats(self): - ret = self.driver.get_volume_stats(True) - stats = {'vendor_name': ret['vendor_name'], - 'total_capacity_gb': ret['total_capacity_gb'], - 'free_capacity_gb': ret['free_capacity_gb']} - self.assertEqual(FAKE_STATS, stats) - - def test_create_and_delete_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - self.driver.delete_volume(TEST_VOLUME) - - @mock.patch.object(dx_common.FJDXCommon, '_get_mapdata') - def test_map_unmap(self, mock_mapdata): - fake_data = {'target_wwn': FC_TARGET_WWN, - 'target_lun': 0} - - mock_mapdata.return_value = fake_data - fake_mapdata = dict(fake_data) - fake_mapdata['initiator_target_map'] = { - initiator: FC_TARGET_WWN for initiator in TEST_WWPN - } - - fake_mapdata['volume_id'] = TEST_VOLUME['id'] - fake_mapdata['target_discovered'] = True - fake_info = {'driver_volume_type': 'fibre_channel', - 'data': fake_mapdata} - - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - info = self.driver.initialize_connection(TEST_VOLUME, - TEST_CONNECTOR) - self.assertEqual(fake_info, info) - self.driver.terminate_connection(TEST_VOLUME, - TEST_CONNECTOR) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_and_delete_snapshot(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - snap_info = self.driver.create_snapshot(TEST_SNAP) - self.assertEqual(FAKE_SNAP_INFO, snap_info) - - self.driver.delete_snapshot(TEST_SNAP) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_volume_from_snapshot(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - snap_info = self.driver.create_snapshot(TEST_SNAP) - self.assertEqual(FAKE_SNAP_INFO, snap_info) - - model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, - TEST_SNAP) - self.assertEqual(FAKE_MODEL_INFO2, model_info) - - self.driver.delete_snapshot(TEST_SNAP) - self.driver.delete_volume(TEST_CLONE) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_cloned_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO2, model_info) - - self.driver.delete_volume(TEST_CLONE) - self.driver.delete_volume(TEST_VOLUME) - - def test_extend_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - self.driver.extend_volume(TEST_VOLUME, 10) - - -class FJISCSIDriverTestCase(test.TestCase): - def __init__(self, *args, **kwargs): - super(FJISCSIDriverTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(FJISCSIDriverTestCase, self).setUp() - - # Make fake xml-configuration file. - self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') - self.addCleanup(self.config_file.close) - self.config_file.write(CONF) - self.config_file.flush() - - # Make fake Object by using mock as configuration object. - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.cinder_eternus_config_file = self.config_file.name - - self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', - self.fake_eternus_connection) - - instancename = FakeCIMInstanceName() - self.mock_object(dx_common.FJDXCommon, '_create_eternus_instance_name', - instancename.fake_create_eternus_instance_name) - - self.mock_object(dx_common.FJDXCommon, '_get_mapdata_iscsi', - self.fake_get_mapdata) - - # Set iscsi driver to self.driver. - driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration) - self.driver = driver - - def fake_eternus_connection(self): - conn = FakeEternusConnection() - return conn - - def fake_get_mapdata(self, vol_instance, connector, target_portlist): - multipath = connector.get('multipath', False) - if multipath: - return {'target_portals': [ISCSI_TARGET_IP], - 'target_iqns': [ISCSI_TARGET_IQN], - 'target_luns': [0]} - else: - return {'target_portal': ISCSI_TARGET_IP, - 'target_iqns': ISCSI_TARGET_IQN, - 'target_lun': 0} - - def test_get_volume_stats(self): - ret = self.driver.get_volume_stats(True) - stats = {'vendor_name': ret['vendor_name'], - 'total_capacity_gb': ret['total_capacity_gb'], - 'free_capacity_gb': ret['free_capacity_gb']} - self.assertEqual(FAKE_STATS, stats) - - def test_create_and_delete_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - self.driver.delete_volume(TEST_VOLUME) - - def test_map_unmap(self): - fake_mapdata = self.fake_get_mapdata(None, {}, None) - fake_mapdata['volume_id'] = TEST_VOLUME['id'] - fake_mapdata['target_discovered'] = True - fake_info = {'driver_volume_type': 'iscsi', - 'data': fake_mapdata} - - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - info = self.driver.initialize_connection(TEST_VOLUME, - TEST_CONNECTOR) - self.assertEqual(fake_info, info) - self.driver.terminate_connection(TEST_VOLUME, - TEST_CONNECTOR) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_and_delete_snapshot(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - snap_info = self.driver.create_snapshot(TEST_SNAP) - self.assertEqual(FAKE_SNAP_INFO, snap_info) - - self.driver.delete_snapshot(TEST_SNAP) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_volume_from_snapshot(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - snap_info = self.driver.create_snapshot(TEST_SNAP) - self.assertEqual(FAKE_SNAP_INFO, snap_info) - - model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, - TEST_SNAP) - self.assertEqual(FAKE_MODEL_INFO2, model_info) - - self.driver.delete_snapshot(TEST_SNAP) - self.driver.delete_volume(TEST_CLONE) - self.driver.delete_volume(TEST_VOLUME) - - def test_create_cloned_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO2, model_info) - - self.driver.delete_volume(TEST_CLONE) - self.driver.delete_volume(TEST_VOLUME) - - def test_extend_volume(self): - model_info = self.driver.create_volume(TEST_VOLUME) - self.assertEqual(FAKE_MODEL_INFO1, model_info) - - self.driver.extend_volume(TEST_VOLUME, 10) diff --git a/cinder/tests/unit/volume/drivers/test_gpfs.py b/cinder/tests/unit/volume/drivers/test_gpfs.py deleted file mode 100644 index 006b25152..000000000 --- a/cinder/tests/unit/volume/drivers/test_gpfs.py +++ /dev/null @@ -1,2345 +0,0 @@ - -# Copyright IBM Corp. 2013 All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile - -import mock -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm import gpfs -from cinder.volume import volume_types - - -CONF = cfg.CONF - - -class FakeQemuImgInfo(object): - def __init__(self): - self.file_format = None - self.backing_file = None - - -class GPFSDriverTestCase(test.TestCase): - driver_name = "cinder.volume.drivers.gpfs.GPFSDriver" - context = context.get_admin_context() - - def _execute_wrapper(self, cmd, *args, **kwargs): - try: - kwargs.pop('run_as_root') - except KeyError: - pass - - return utils.execute(cmd, *args, **kwargs) - - def setUp(self): - super(GPFSDriverTestCase, self).setUp() - self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") - self.images_dir = '%s/images' % self.volumes_path - self.addCleanup(self._cleanup, self.images_dir, self.volumes_path) - - if not os.path.exists(self.volumes_path): - os.mkdir(self.volumes_path) - if not os.path.exists(self.images_dir): - os.mkdir(self.images_dir) - self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - - self.driver = gpfs.GPFSDriver( - configuration=conf.Configuration([], conf.SHARED_CONF_GROUP)) - self.driver.gpfs_execute = self._execute_wrapper - exec_patcher = mock.patch.object(self.driver, '_execute', - self._execute_wrapper) - exec_patcher.start() - self.addCleanup(exec_patcher.stop) - self.driver._cluster_id = '123456' - self.driver._gpfs_device = '/dev/gpfs' - self.driver._storage_pool = 'system' - self.driver._encryption_state = 'yes' - - self.override_config('volume_driver', self.driver_name, - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_mount_point_base', self.volumes_path, - conf.SHARED_CONF_GROUP) - - self.context = context.get_admin_context() - self.context.user_id = 'fake' - self.context.project_id = 'fake' - CONF.gpfs_images_dir = self.images_dir - - def _cleanup(self, images_dir, volumes_path): - try: - os.rmdir(images_dir) - os.rmdir(volumes_path) - except OSError: - pass - - def test_different(self): - self.assertTrue(gpfs._different((True, False))) - self.assertFalse(gpfs._different((True, True))) - self.assertFalse(gpfs._different(None)) - - def test_sizestr(self): - self.assertEqual('10G', gpfs._sizestr('10')) - - @mock.patch('cinder.utils.execute') - def test_gpfs_local_execute(self, mock_exec): - mock_exec.return_value = 'test' - self.driver._gpfs_local_execute('test') - expected = [mock.call('test', run_as_root=True)] - self.assertEqual(expected, mock_exec.mock_calls) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_state_ok(self, mock_exec): - mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:' - 'reserved:nodeName:nodeNumber:state:quorum:' - 'nodesUp:totalNodes:remarks:cnfsState:\n' - 'mmgetstate::0:1:::devstack:3:active:2:3:3:' - 'quorum node:(undefined):', '') - self.assertTrue(self.driver._get_gpfs_state().splitlines()[1]. - startswith('mmgetstate::0:1:::devstack')) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_state_fail_mmgetstate(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_gpfs_state) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') - def test_check_gpfs_state_ok(self, mock_get_gpfs_state): - mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' - 'reserved:reserved:nodeName:' - 'nodeNumber:state:quorum:nodesUp:' - 'totalNodes:remarks:cnfsState:\n' - 'mmgetstate::0:1:::devstack:3:' - 'active:2:3:3:' - 'quorum node:(undefined):') - self.driver._check_gpfs_state() - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') - def test_check_gpfs_state_fail_not_active(self, mock_get_gpfs_state): - mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' - 'reserved:reserved:nodeName:' - 'nodeNumber:state:quorum:nodesUp:' - 'totalNodes:remarks:cnfsState:\n' - 'mmgetstate::0:1:::devstack:3:' - 'arbitrating:2:3:3:' - 'quorum node:(undefined):') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._check_gpfs_state) - - @mock.patch('cinder.utils.execute') - def test_get_fs_from_path_ok(self, mock_exec): - mock_exec.return_value = ('Filesystem 1K-blocks ' - 'Used Available Use%% Mounted on\n' - '%s 10485760 531968 9953792' - ' 6%% /gpfs0' % self.driver._gpfs_device, - '') - self.assertEqual(self.driver._gpfs_device, - self.driver._get_filesystem_from_path('/gpfs0')) - - @mock.patch('cinder.utils.execute') - def test_get_fs_from_path_fail_path(self, mock_exec): - mock_exec.return_value = ('Filesystem 1K-blocks ' - 'Used Available Use% Mounted on\n' - 'test 10485760 531968 ' - '9953792 6% /gpfs0', '') - self.assertNotEqual(self.driver._gpfs_device, - self.driver._get_filesystem_from_path('/gpfs0')) - - @mock.patch('cinder.utils.execute') - def test_get_fs_from_path_fail_raise(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_filesystem_from_path, '/gpfs0') - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_cluster_id_ok(self, mock_exec): - mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' - 'reserved:configParameter:value:nodeList:\n' - 'mmlsconfig::0:1:::clusterId:%s::' - % self.driver._cluster_id, '') - self.assertEqual(self.driver._cluster_id, - self.driver._get_gpfs_cluster_id()) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_cluster_id_fail_id(self, mock_exec): - mock_exec.return_value = ('mmlsconfig::HEADER.:version:reserved:' - 'reserved:configParameter:value:nodeList:\n' - 'mmlsconfig::0:1:::clusterId:test::', '') - self.assertNotEqual(self.driver._cluster_id, - self.driver._get_gpfs_cluster_id()) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_cluster_id_fail_raise(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_gpfs_cluster_id) - - @mock.patch('cinder.utils.execute') - def test_get_fileset_from_path_ok(self, mock_exec): - mock_exec.return_value = ('file name: /gpfs0\n' - 'metadata replication: 1 max 2\n' - 'data replication: 1 max 2\n' - 'immutable: no\n' - 'appendOnly: no\n' - 'flags:\n' - 'storage pool name: system\n' - 'fileset name: root\n' - 'snapshot name:\n' - 'Windows attributes: DIRECTORY', '') - self.driver._get_fileset_from_path('') - - @mock.patch('cinder.utils.execute') - def test_get_fileset_from_path_fail_mmlsattr(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_fileset_from_path, '') - - @mock.patch('cinder.utils.execute') - def test_get_fileset_from_path_fail_find_fileset(self, mock_exec): - mock_exec.return_value = ('file name: /gpfs0\n' - 'metadata replication: 1 max 2\n' - 'data replication: 1 max 2\n' - 'immutable: no\n' - 'appendOnly: no\n' - 'flags:\n' - 'storage pool name: system\n' - '*** name: root\n' - 'snapshot name:\n' - 'Windows attributes: DIRECTORY', '') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_fileset_from_path, '') - - @mock.patch('cinder.utils.execute') - def test_verify_gpfs_pool_ok(self, mock_exec): - mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' - '\n' - 'Name Id BlkSize Data ' - 'Meta ' - 'Total Data in (KB) Free Data in (KB) ' - 'Total Meta in (KB) Free Meta in (KB)\n' - 'system 0 256 KB yes ' - 'yes ' - ' 10485760 9953792 ( 95%) ' - '10485760 9954560 ( 95%)', '') - self.assertEqual('/dev/gpfs', self.driver._gpfs_device) - self.assertTrue(self.driver._verify_gpfs_pool('/dev/gpfs')) - - @mock.patch('cinder.utils.execute') - def test_verify_gpfs_pool_fail_pool(self, mock_exec): - mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' - '\n' - 'Name Id BlkSize Data ' - 'Meta ' - 'Total Data in (KB) Free Data in (KB) ' - 'Total Meta in (KB) Free Meta in (KB)\n' - 'test 0 256 KB yes ' - 'yes' - ' 10485760 9953792 ( 95%)' - ' 10485760 9954560 ( 95%)', '') - self.assertEqual('/dev/gpfs', self.driver._gpfs_device) - self.assertTrue(self.driver._verify_gpfs_pool('/dev/gpfs')) - - @mock.patch('cinder.utils.execute') - def test_verify_gpfs_pool_fail_raise(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertFalse(self.driver._verify_gpfs_pool('/dev/gpfs')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.utils.execute') - def test_update_volume_storage_pool_ok(self, mock_exec, mock_verify_pool): - mock_verify_pool.return_value = True - self.assertTrue(self.driver._update_volume_storage_pool('', 'system')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.utils.execute') - def test_update_volume_storage_pool_ok_pool_none(self, - mock_exec, - mock_verify_pool): - mock_verify_pool.return_value = True - self.assertTrue(self.driver._update_volume_storage_pool('', None)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.utils.execute') - def test_update_volume_storage_pool_fail_pool(self, - mock_exec, - mock_verify_pool): - mock_verify_pool.return_value = False - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._update_volume_storage_pool, - '', - 'system') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.utils.execute') - def test_update_volume_storage_pool_fail_mmchattr(self, - mock_exec, - mock_verify_pool): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - mock_verify_pool.return_value = True - self.assertFalse(self.driver._update_volume_storage_pool('', 'system')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.utils.execute') - def test_get_gpfs_fs_release_level_ok(self, - mock_exec, - mock_fs_from_path): - mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' - 'deviceName:fieldName:data:remarks:\n' - 'mmlsfs::0:1:::gpfs:filesystemVersion:14.03 ' - '(4.1.0.0):\n' - 'mmlsfs::0:1:::gpfs:filesystemVersionLocal:' - '14.03 (4.1.0.0):\n' - 'mmlsfs::0:1:::gpfs:filesystemVersionManager' - ':14.03 (4.1.0.0):\n' - 'mmlsfs::0:1:::gpfs:filesystemVersion' - 'Original:14.03 (4.1.0.0):\n' - 'mmlsfs::0:1:::gpfs:filesystemHighest' - 'Supported:14.03 (4.1.0.0):', '') - mock_fs_from_path.return_value = '/dev/gpfs' - self.assertEqual(('/dev/gpfs', 1403), - self.driver._get_gpfs_fs_release_level('')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.utils.execute') - def test_get_gpfs_fs_release_level_fail_mmlsfs(self, - mock_exec, - mock_fs_from_path): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - mock_fs_from_path.return_value = '/dev/gpfs' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_gpfs_fs_release_level, '') - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_cluster_release_level_ok(self, mock_exec): - mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' - 'reserved:configParameter:value:nodeList:\n' - 'mmlsconfig::0:1:::minReleaseLevel:1403::', - '') - self.assertEqual(1403, self.driver._get_gpfs_cluster_release_level()) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_cluster_release_level_fail_mmlsconfig(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_gpfs_cluster_release_level) - - @mock.patch('cinder.utils.execute') - def test_is_gpfs_path_fail_mmlsattr(self, mock_exec): - mock_exec.side_effect = processutils.ProcessExecutionError( - stdout='test', stderr='test') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._is_gpfs_path, '/dummy/path') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_fileset_from_path') - @mock.patch('cinder.utils.execute') - def test_is_same_fileset_ok(self, - mock_exec, - mock_get_fileset_from_path): - mock_get_fileset_from_path.return_value = True - self.assertTrue(self.driver._is_same_fileset('', '')) - mock_get_fileset_from_path.side_effect = [True, False] - self.assertFalse(self.driver._is_same_fileset('', '')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_available_capacity') - @mock.patch('cinder.utils.execute') - def test_same_cluster_ok(self, mock_exec, mock_avail_capacity): - mock_avail_capacity.return_value = (10192683008, 10737418240) - stats = self.driver.get_volume_stats() - loc = stats['location_info'] - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertTrue(self.driver._same_cluster(host)) - - locinfo = stats['location_info'] + '_' - loc = locinfo - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertFalse(self.driver._same_cluster(host)) - - @mock.patch('cinder.utils.execute') - def test_set_rw_permission(self, mock_exec): - self.driver._set_rw_permission('') - - @mock.patch('cinder.utils.execute') - def test_can_migrate_locally(self, mock_exec): - host = {'host': 'foo', 'capabilities': ''} - self.assertIsNone(self.driver._can_migrate_locally(host)) - - loc = 'GPFSDriver:%s' % self.driver._cluster_id - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertIsNone(self.driver._can_migrate_locally(host)) - - loc = 'GPFSDriver_:%s:testpath' % self.driver._cluster_id - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertIsNone(self.driver._can_migrate_locally(host)) - - loc = 'GPFSDriver:%s:testpath' % (self.driver._cluster_id + '_') - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertIsNone(self.driver._can_migrate_locally(host)) - - loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - self.assertEqual('testpath', self.driver._can_migrate_locally(host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_encryption_status') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_id') - @mock.patch('cinder.utils.execute') - def test_do_setup_ok(self, - mock_exec, - mock_get_gpfs_cluster_id, - mock_get_filesystem_from_path, - mock_verify_gpfs_pool, - mock_get_gpfs_fs_rel_lev, - mock_verify_encryption_state): - ctxt = self.context - mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id - mock_get_filesystem_from_path.return_value = '/dev/gpfs' - mock_verify_gpfs_pool.return_value = True - mock_get_gpfs_fs_rel_lev.return_value = 1405 - mock_verify_encryption_state.return_value = 'Yes' - self.driver.do_setup(ctxt) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_id') - @mock.patch('cinder.utils.execute') - def test_do_setup_no_encryption(self, - mock_exec, - mock_get_gpfs_cluster_id, - mock_get_filesystem_from_path, - mock_verify_gpfs_pool, - mock_get_gpfs_fs_rel_lev): - ctxt = self.context - mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id - mock_get_filesystem_from_path.return_value = '/dev/gpfs' - mock_verify_gpfs_pool.return_value = True - mock_get_gpfs_fs_rel_lev.return_value = 1403 - self.driver.do_setup(ctxt) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_id') - @mock.patch('cinder.utils.execute') - def test_do_setup_fail_get_cluster_id(self, - mock_exec, - mock_get_gpfs_cluster_id, - mock_get_filesystem_from_path, - mock_verify_gpfs_pool): - ctxt = self.context - mock_get_gpfs_cluster_id.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - mock_get_filesystem_from_path.return_value = '/dev/gpfs' - mock_verify_gpfs_pool.return_value = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, ctxt) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_id') - @mock.patch('cinder.utils.execute') - def test_do_setup_fail_get_fs_from_path(self, - mock_exec, - mock_get_gpfs_cluster_id, - mock_get_fs_from_path, - mock_verify_gpfs_pool): - ctxt = self.context - mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id - mock_get_fs_from_path.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - mock_verify_gpfs_pool.return_value = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, ctxt) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_filesystem_from_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_id') - @mock.patch('cinder.utils.execute') - def test_do_setup_fail_volume(self, - mock_exec, - mock_get_gpfs_cluster_id, - mock_get_filesystem_from_path, - mock_verify_gpfs_pool): - ctxt = self.context - mock_get_gpfs_cluster_id. return_value = self.driver._cluster_id - mock_get_filesystem_from_path.return_value = '/dev/gpfs' - mock_verify_gpfs_pool.return_value = False - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.do_setup, ctxt) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_fs_release_level') - def test_check_for_setup_error_fail_conf(self, - mock_get_gpfs_fs_rel_lev, - mock_is_gpfs_path, - mock_check_gpfs_state): - fake_fs = '/dev/gpfs' - fake_fs_release = 1400 - fake_cluster_release = 1201 - - # fail configuration.gpfs_mount_point_base is None - org_value = self.driver.configuration.gpfs_mount_point_base - self.override_config('gpfs_mount_point_base', None, - conf.SHARED_CONF_GROUP) - mock_get_gpfs_fs_rel_lev.return_value = (fake_fs, fake_fs_release) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.override_config('gpfs_mount_point_base', org_value, - conf.SHARED_CONF_GROUP) - - # fail configuration.gpfs_images_share_mode and - # configuration.gpfs_images_dir is None - self.override_config('gpfs_images_share_mode', 'copy', - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) - org_value_dir = self.driver.configuration.gpfs_images_dir - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.override_config('gpfs_images_dir', org_value_dir, - conf.SHARED_CONF_GROUP) - - # fail configuration.gpfs_images_share_mode == 'copy_on_write' and not - # _same_filesystem(configuration.gpfs_mount_point_base, - # configuration.gpfs_images_dir) - self.override_config('gpfs_images_share_mode', 'copy_on_write', - conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs._same_filesystem', - return_value=False): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - - # fail self.configuration.gpfs_images_share_mode == 'copy_on_write' and - # not self._is_same_fileset(self.configuration.gpfs_mount_point_base, - # self.configuration.gpfs_images_dir) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_is_same_fileset', return_value=False): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - - # fail directory is None - self.override_config('gpfs_images_share_mode', None, - conf.SHARED_CONF_GROUP) - org_value_dir = self.driver.configuration.gpfs_images_dir - self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level', - return_value=fake_cluster_release): - self.driver.check_for_setup_error() - self.override_config('gpfs_images_dir', org_value_dir, - conf.SHARED_CONF_GROUP) - - # fail directory.startswith('/') - org_value_mount = self.driver.configuration.gpfs_mount_point_base - self.override_config('gpfs_mount_point_base', '_' + self.volumes_path, - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_share_mode', None, - conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level', - return_value=fake_cluster_release): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.override_config('gpfs_mount_point_base', org_value_mount, - conf.SHARED_CONF_GROUP) - - # fail os.path.isdir(directory) - org_value_mount = self.driver.configuration.gpfs_mount_point_base - self.override_config('gpfs_mount_point_base', self.volumes_path + '_', - conf.SHARED_CONF_GROUP) - org_value_dir = self.driver.configuration.gpfs_images_dir - self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level', - return_value=fake_cluster_release): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - self.override_config('gpfs_mount_point_base', org_value_mount, - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_dir', org_value_dir, - conf.SHARED_CONF_GROUP) - - # fail not cluster release level >= GPFS_CLONE_MIN_RELEASE - org_fake_cluster_release = fake_cluster_release - fake_cluster_release = 1105 - self.override_config('gpfs_mount_point_base', self.volumes_path, - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level', - return_value=fake_cluster_release): - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_fs_release_level', - return_value=(fake_fs, fake_fs_release)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - fake_cluster_release = org_fake_cluster_release - - # fail not fs release level >= GPFS_CLONE_MIN_RELEASE - org_fake_fs_release = fake_fs_release - fake_fs_release = 1105 - self.override_config('gpfs_mount_point_base', self.volumes_path, - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_cluster_release_level', - return_value=fake_cluster_release): - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_gpfs_fs_release_level', - return_value=(fake_fs, fake_fs_release)): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - fake_fs_release = org_fake_fs_release - - @mock.patch('cinder.utils.execute') - def test_create_sparse_file(self, mock_exec): - self.driver._create_sparse_file('', 100) - - @mock.patch('cinder.utils.execute') - def test_allocate_file_blocks(self, mock_exec): - self.driver._allocate_file_blocks(os.path.join(self.images_dir, - 'test'), 1) - - @mock.patch('cinder.utils.execute') - def test_gpfs_change_attributes(self, mock_exec): - options = [] - options.extend(['-T', 'test']) - self.driver._gpfs_change_attributes(options, self.images_dir) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_gpfs_change_attributes') - def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs): - metadata = {'data_pool_name': 'test', - 'replicas': 'test', - 'dio': 'test', - 'write_affinity_depth': 'test', - 'block_group_factor': 'test', - 'write_affinity_failure_group': 'test', - 'fstype': 'test', - 'fslabel': 'test', - 'test': 'test'} - - self.driver._set_volume_attributes('', '', metadata) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_gpfs_change_attributes') - def test_set_volume_attributes_no_attributes(self, mock_change_attributes): - metadata = {} - org_value = self.driver.configuration.gpfs_storage_pool - self.override_config('gpfs_storage_pool', 'system', - conf.SHARED_CONF_GROUP) - self.driver._set_volume_attributes('', '', metadata) - self.override_config('gpfs_storage_pool', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_gpfs_change_attributes') - def test_set_volume_attributes_no_options(self, mock_change_attributes): - metadata = {} - org_value = self.driver.configuration.gpfs_storage_pool - self.override_config('gpfs_storage_pool', '', conf.SHARED_CONF_GROUP) - self.driver._set_volume_attributes('', '', metadata) - self.override_config('gpfs_storage_pool', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_allocate_file_blocks') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_sparse_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_create_volume(self, - mock_gpfs_path_state, - mock_local_path, - mock_sparse_file, - mock_rw_permission, - mock_set_volume_attributes, - mock_allocate_file_blocks, - mock_exec): - mock_local_path.return_value = 'test' - volume = self._fake_volume() - value = {} - value['value'] = 'test' - - org_value = self.driver.configuration.gpfs_sparse_volumes - self.override_config('gpfs_sparse_volumes', False, - conf.SHARED_CONF_GROUP) - self.driver.create_volume(volume) - self.override_config('gpfs_sparse_volumes', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_allocate_file_blocks') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_sparse_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_create_volume_no_sparse_volume(self, - mock_gpfs_path_state, - mock_local_path, - mock_sparse_file, - mock_rw_permission, - mock_set_volume_attributes, - mock_allocate_file_blocks, - mock_exec): - mock_local_path.return_value = 'test' - volume = self._fake_volume() - value = {} - value['value'] = 'test' - - org_value = self.driver.configuration.gpfs_sparse_volumes - self.override_config('gpfs_sparse_volumes', True, - conf.SHARED_CONF_GROUP) - self.driver.create_volume(volume) - self.override_config('gpfs_sparse_volumes', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_allocate_file_blocks') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_sparse_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_create_volume_with_metadata(self, - mock_gpfs_path_state, - mock_local_path, - mock_sparse_file, - mock_rw_permission, - mock_set_volume_attributes, - mock_allocate_file_blocks, - mock_exec): - mock_local_path.return_value = 'test' - volume = self._fake_volume() - value = {} - value['value'] = 'test' - mock_set_volume_attributes.return_value = True - metadata = {'fake_key': 'fake_value'} - - org_value = self.driver.configuration.gpfs_sparse_volumes - self.override_config('gpfs_sparse_volumes', True, - conf.SHARED_CONF_GROUP) - self.driver.create_volume(volume) - self.assertTrue(self.driver._set_volume_attributes(volume, 'test', - metadata)) - self.override_config('gpfs_sparse_volumes', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.' - 'GPFSDriver._get_snapshot_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_create_volume_from_snapshot(self, - mock_local_path, - mock_snapshot_path, - mock_gpfs_full_copy, - mock_create_gpfs_copy, - mock_rw_permission, - mock_gpfs_redirect, - mock_set_volume_attributes, - mock_resize_volume_file): - mock_resize_volume_file.return_value = 5 * units.Gi - volume = self._fake_volume() - volume['group_id'] = None - self.driver.db = mock.Mock() - self.driver.db.volume_get = mock.Mock() - self.driver.db.volume_get.return_value = volume - snapshot = self._fake_snapshot() - mock_snapshot_path.return_value = "/tmp/fakepath" - self.assertEqual({'size': 5.0}, - self.driver.create_volume_from_snapshot(volume, - snapshot)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_snapshot_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_create_volume_from_snapshot_metadata(self, - mock_local_path, - mock_snapshot_path, - mock_gpfs_full_copy, - mock_create_gpfs_copy, - mock_rw_permission, - mock_gpfs_redirect, - mock_set_volume_attributes, - mock_resize_volume_file): - mock_resize_volume_file.return_value = 5 * units.Gi - volume = self._fake_volume() - volume['group_id'] = None - self.driver.db = mock.Mock() - self.driver.db.volume_get = mock.Mock() - self.driver.db.volume_get.return_value = volume - snapshot = self._fake_snapshot() - mock_snapshot_path.return_value = "/tmp/fakepath" - mock_set_volume_attributes.return_value = True - metadata = {'fake_key': 'fake_value'} - - self.assertTrue(self.driver._set_volume_attributes(volume, 'test', - metadata)) - self.assertEqual({'size': 5.0}, - self.driver.create_volume_from_snapshot(volume, - snapshot)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_gpfs_clone') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_create_cloned_volume(self, - mock_local_path, - mock_gpfs_full_copy, - mock_create_gpfs_clone, - mock_rw_permission, - mock_set_volume_attributes, - mock_resize_volume_file): - mock_resize_volume_file.return_value = 5 * units.Gi - volume = self._fake_volume() - src_volume = self._fake_volume() - self.assertEqual({'size': 5.0}, - self.driver.create_cloned_volume(volume, src_volume)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_volume_attributes') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_gpfs_clone') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_create_cloned_volume_with_metadata(self, - mock_local_path, - mock_gpfs_full_copy, - mock_create_gpfs_clone, - mock_rw_permission, - mock_set_volume_attributes, - mock_resize_volume_file): - mock_resize_volume_file.return_value = 5 * units.Gi - volume = self._fake_volume() - src_volume = self._fake_volume() - mock_set_volume_attributes.return_value = True - metadata = {'fake_key': 'fake_value'} - - self.assertTrue(self.driver._set_volume_attributes(volume, 'test', - metadata)) - self.assertEqual({'size': 5.0}, - self.driver.create_cloned_volume(volume, src_volume)) - - @mock.patch('cinder.utils.execute') - def test_delete_gpfs_file_ok(self, mock_exec): - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('', ''), - ('', '')] - self.driver._delete_gpfs_file(self.images_dir) - self.driver._delete_gpfs_file(self.images_dir + '_') - - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' ' - '/gpfs0/test.txt', ''), - ('', '')] - self.driver._delete_gpfs_file(self.images_dir) - - @mock.patch('os.path.exists') - @mock.patch('cinder.utils.execute') - def test_delete_gpfs_file_ok_parent(self, mock_exec, mock_path_exists): - mock_path_exists.side_effect = [True, False, False, - True, False, False, - True, False, False] - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('/gpfs0/test.snap\ntest', ''), - ('', '')] - self.driver._delete_gpfs_file(self.images_dir) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('/gpfs0/test.ts\ntest', ''), - ('', '')] - self.driver._delete_gpfs_file(self.images_dir) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('/gpfs0/test.txt\ntest', ''), - ('', '')] - self.driver._delete_gpfs_file(self.images_dir) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_delete_volume(self, - mock_verify_gpfs_path_state, - mock_local_path, - mock_delete_gpfs_file): - self.driver.delete_volume('') - - @mock.patch('cinder.utils.execute') - def test_gpfs_redirect_ok(self, mock_exec): - org_value = self.driver.configuration.gpfs_max_clone_depth - self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('', '')] - self.assertTrue(self.driver._gpfs_redirect('')) - self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 1 148488 ' - '/gpfs0/test.txt', ''), - ('', '')] - self.assertFalse(self.driver._gpfs_redirect('')) - self.override_config('gpfs_max_clone_depth', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.utils.execute') - def test_gpfs_redirect_fail_depth(self, mock_exec): - org_value = self.driver.configuration.gpfs_max_clone_depth - self.override_config('gpfs_max_clone_depth', 0, conf.SHARED_CONF_GROUP) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', ''), - ('', '')] - self.assertFalse(self.driver._gpfs_redirect('')) - self.override_config('gpfs_max_clone_depth', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.utils.execute') - def test_gpfs_redirect_fail_match(self, mock_exec): - org_value = self.driver.configuration.gpfs_max_clone_depth - self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' 148488 ' - '/gpfs0/test.txt', ''), - ('', '')] - self.assertFalse(self.driver._gpfs_redirect('')) - self.override_config('gpfs_max_clone_depth', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') - @mock.patch('cinder.utils.execute') - def test_create_gpfs_clone(self, - mock_exec, - mock_redirect, - mock_cr_gpfs_cp, - mock_cr_gpfs_snap): - mock_redirect.return_value = True - self.driver._create_gpfs_clone('', '') - mock_redirect.side_effect = [True, False] - self.driver._create_gpfs_clone('', '') - - @mock.patch('cinder.utils.execute') - def test_create_gpfs_copy(self, mock_exec): - self.driver._create_gpfs_copy('', '') - - @mock.patch('cinder.utils.execute') - def test_create_gpfs_snap(self, mock_exec): - self.driver._create_gpfs_snap('') - self.driver._create_gpfs_snap('', '') - - @mock.patch('cinder.utils.execute') - def test_is_gpfs_parent_file_ok(self, mock_exec): - mock_exec.side_effect = [('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' yes 2 148488 ' - '/gpfs0/test.txt', ''), - ('Parent Depth Parent inode File name\n' - '------ ----- -------------- ---------\n' - ' no 2 148488 ' - '/gpfs0/test.txt', '')] - self.assertTrue(self.driver._is_gpfs_parent_file('')) - self.assertFalse(self.driver._is_gpfs_parent_file('')) - - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_snapshot_path') - def test_create_snapshot(self, - mock_get_snapshot_path, - mock_local_path, - mock_create_gpfs_snap, - mock_set_rw_permission, - mock_gpfs_redirect, - mock_vol_get_by_id): - mock_get_snapshot_path.return_value = "/tmp/fakepath" - - vol = self._fake_volume() - mock_vol_get_by_id.return_value = vol - self.driver.create_snapshot(self._fake_snapshot()) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_snapshot_path') - def test_delete_snapshot(self, - mock_snapshot_path, - mock_exec): - snapshot = self._fake_snapshot() - snapshot_path = "/tmp/fakepath" - mock_snapshot_path.return_value = snapshot_path - snapshot_ts_path = '%s.ts' % snapshot_path - self.driver.delete_snapshot(snapshot) - mock_exec.assert_any_call('mv', snapshot_path, - snapshot_ts_path) - mock_exec.assert_any_call('rm', '-f', snapshot_ts_path, - check_exit_code=False) - - def test_ensure_export(self): - self.assertIsNone(self.driver.ensure_export('', '')) - - def test_create_export(self): - self.assertIsNone(self.driver.create_export('', '', {})) - - def test_remove_export(self): - self.assertIsNone(self.driver.remove_export('', '')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_initialize_connection(self, mock_local_path): - volume = self._fake_volume() - mock_local_path.return_value = "/tmp/fakepath" - data = self.driver.initialize_connection(volume, '') - self.assertEqual(volume.name, data['data']['name']) - self.assertEqual("/tmp/fakepath", data['data']['device_path']) - self.assertEqual('gpfs', data['driver_volume_type']) - - def test_terminate_connection(self): - self.assertIsNone(self.driver.terminate_connection('', '')) - - def test_get_volume_stats(self): - fake_avail = 80 * units.Gi - fake_size = 2 * fake_avail - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_available_capacity', - return_value=(fake_avail, fake_size)): - stats = self.driver.get_volume_stats() - self.assertEqual('GPFS', stats['volume_backend_name']) - self.assertEqual('file', stats['storage_protocol']) - self.assertEqual('True', stats['gpfs_encryption_rest']) - stats = self.driver.get_volume_stats(True) - self.assertEqual('GPFS', stats['volume_backend_name']) - self.assertEqual('file', stats['storage_protocol']) - self.assertEqual('True', stats['gpfs_encryption_rest']) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_encryption_status_true(self, mock_exec): - mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' - 'deviceName:fieldName:data:remarks:\n' - 'mmlsfs::0:1:::gpfs:encryption:Yes:', '') - self.assertEqual('Yes', self.driver._get_gpfs_encryption_status()) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_encryption_status_false(self, mock_exec): - mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' - 'deviceName:fieldName:data:remarks:\n' - 'mmlsfs::0:1:::gpfs:encryption:No:', '') - self.assertEqual('No', self.driver._get_gpfs_encryption_status()) - - @mock.patch('cinder.utils.execute') - def test_get_gpfs_encryption_status_fail(self, mock_exec): - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_gpfs_encryption_status) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_stats') - def test_get_volume_stats_none_stats(self, mock_upd_vol_stats): - _stats_org = self.driver._stats - self.driver._stats = mock.Mock() - self.driver._stats.return_value = None - self.driver.get_volume_stats() - self.driver._stats = _stats_org - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image') - def test_clone_image_pub(self, mock_exec): - self.driver.clone_image('', '', '', {'id': 1}, '') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') - def test_is_cloneable_ok(self, mock_is_gpfs_path): - self.override_config('gpfs_images_share_mode', 'copy', - conf.SHARED_CONF_GROUP) - self.override_config('gpfs_images_dir', self.images_dir, - conf.SHARED_CONF_GROUP) - CONF.gpfs_images_dir = self.images_dir - mock_is_gpfs_path.return_value = None - self.assertEqual((True, None, os.path.join(CONF.gpfs_images_dir, - '12345')), - self.driver._is_cloneable('12345')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') - def test_is_cloneable_fail_path(self, mock_is_gpfs_path): - self.override_config('gpfs_images_share_mode', 'copy', - conf.SHARED_CONF_GROUP) - CONF.gpfs_images_dir = self.images_dir - mock_is_gpfs_path.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir, - '12345')), - self.driver._is_cloneable('12345')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_is_gpfs_parent_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_clone_image_clonable(self, - mock_verify_gpfs_path_state, - mock_is_cloneable, - mock_local_path, - mock_is_gpfs_parent_file, - mock_create_gpfs_snap, - mock_qemu_img_info, - mock_create_gpfs_copy, - mock_conv_image, - mock_set_rw_permission, - mock_resize_volume_file): - mock_is_cloneable.return_value = (True, 'test', self.images_dir) - mock_is_gpfs_parent_file.return_value = False - mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') - volume = self._fake_volume() - self.assertEqual(({'provider_location': None}, True), - self.driver._clone_image(volume, '', 1)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver' - '._verify_gpfs_path_state') - def test_clone_image_not_cloneable(self, - mock_verify_gpfs_path_state, - mock_is_cloneable): - mock_is_cloneable.return_value = (False, 'test', self.images_dir) - volume = self._fake_volume() - self.assertEqual((None, False), - self.driver._clone_image(volume, '', 1)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_is_gpfs_parent_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_clone_image_format_raw_copy_on_write(self, - mock_verify_gpfs_path_state, - mock_is_cloneable, - mock_local_path, - mock_is_gpfs_parent_file, - mock_create_gpfs_snap, - mock_qemu_img_info, - mock_create_gpfs_copy, - mock_set_rw_permission, - mock_resize_volume_file): - mock_is_cloneable.return_value = (True, 'test', self.images_dir) - mock_local_path.return_value = self.volumes_path - mock_is_gpfs_parent_file.return_value = False - mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') - volume = self._fake_volume() - org_value = self.driver.configuration.gpfs_images_share_mode - self.override_config('gpfs_images_share_mode', 'copy_on_write', - conf.SHARED_CONF_GROUP) - self.assertEqual(({'provider_location': None}, True), - self.driver._clone_image(volume, '', 1)) - mock_create_gpfs_snap.assert_called_once_with(self.images_dir) - - self.override_config('gpfs_images_share_mode', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('shutil.copyfile') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_is_gpfs_parent_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_clone_image_format_raw_copy(self, - mock_verify_gpfs_path_state, - mock_is_cloneable, - mock_local_path, - mock_is_gpfs_parent_file, - mock_qemu_img_info, - mock_copyfile, - mock_set_rw_permission, - mock_resize_volume_file): - mock_is_cloneable.return_value = (True, 'test', self.images_dir) - mock_local_path.return_value = self.volumes_path - mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') - volume = self._fake_volume() - org_value = self.driver.configuration.gpfs_images_share_mode - - self.override_config('gpfs_images_share_mode', 'copy', - conf.SHARED_CONF_GROUP) - self.assertEqual(({'provider_location': None}, True), - self.driver._clone_image(volume, '', 1)) - mock_copyfile.assert_called_once_with(self.images_dir, - self.volumes_path) - - self.override_config('gpfs_images_share_mode', org_value, - conf.SHARED_CONF_GROUP) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_set_rw_permission') - @mock.patch('cinder.image.image_utils.convert_image') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_clone_image_format_qcow2(self, - mock_verify_gpfs_path_state, - mock_is_cloneable, - mock_local_path, - mock_qemu_img_info, - mock_conv_image, - mock_set_rw_permission, - mock_resize_volume_file): - mock_is_cloneable.return_value = (True, 'test', self.images_dir) - mock_local_path.return_value = self.volumes_path - mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') - volume = self._fake_volume() - self.assertEqual(({'provider_location': None}, True), - self.driver._clone_image(volume, '', 1)) - mock_conv_image.assert_called_once_with(self.images_dir, - self.volumes_path, - 'raw') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.image.image_utils.fetch_to_raw') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_copy_image_to_volume(self, - mock_verify_gpfs_path_state, - mock_fetch_to_raw, - mock_local_path, - mock_resize_volume_file): - volume = self._fake_volume() - self.driver.copy_image_to_volume('', volume, '', 1) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.resize_image') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_resize_volume_file_ok(self, - mock_local_path, - mock_resize_image, - mock_qemu_img_info): - volume = self._fake_volume() - mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') - self.assertEqual(self._fake_qemu_qcow2_image_info('').virtual_size, - self.driver._resize_volume_file(volume, 2000)) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.resize_image') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_resize_volume_file_fail(self, - mock_local_path, - mock_resize_image, - mock_qemu_img_info): - volume = self._fake_volume() - mock_resize_image.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._resize_volume_file, volume, 2000) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - def test_extend_volume(self, mock_resize_volume_file): - volume = self._fake_volume() - self.driver.extend_volume(volume, 2000) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.image.image_utils.upload_volume') - def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path): - volume = self._fake_volume() - self.driver.copy_volume_to_image('', volume, '', '') - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_volume_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_can_migrate_locally') - def test_migrate_volume_ok(self, mock_local, volume_path, mock_exec): - volume = self._fake_volume() - host = {} - host = {'host': 'foo', 'capabilities': {}} - mock_local.return_value = (self.driver.configuration. - gpfs_mount_point_base + '_') - self.assertEqual((True, None), - self.driver._migrate_volume(volume, host)) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_can_migrate_locally') - def test_migrate_volume_fail_dest_path(self, mock_local, mock_exec): - volume = self._fake_volume() - host = {} - host = {'host': 'foo', 'capabilities': {}} - mock_local.return_value = None - self.assertEqual((False, None), - self.driver._migrate_volume(volume, host)) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_can_migrate_locally') - def test_migrate_volume_fail_mpb(self, mock_local, mock_exec): - volume = self._fake_volume() - host = {} - host = {'host': 'foo', 'capabilities': {}} - mock_local.return_value = (self.driver.configuration. - gpfs_mount_point_base) - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertEqual((True, None), - self.driver._migrate_volume(volume, host)) - - @mock.patch('cinder.utils.execute') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_get_volume_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_can_migrate_locally') - def test_migrate_volume_fail_mv(self, mock_local, mock_path, mock_exec): - volume = self._fake_volume() - host = {} - host = {'host': 'foo', 'capabilities': {}} - mock_local.return_value = ( - self.driver.configuration.gpfs_mount_point_base + '_') - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertEqual((False, None), - self.driver._migrate_volume(volume, host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - def test_migrate_volume_ok_pub(self, mock_migrate_volume): - self.driver.migrate_volume('', '', '') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_storage_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs._different') - def test_retype_ok(self, mock_different, local_path, - mock_strg_pool, mock_migrate_vol): - ctxt = self.context - (volume, new_type, diff, host) = self._fake_retype_arguments() - self.driver.db = mock.Mock() - mock_different.side_effect = [False, True, True] - mock_strg_pool.return_value = True - mock_migrate_vol.return_value = (True, True) - self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_storage_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs._different') - def test_retype_diff_backend(self, - mock_different, - mock_strg_pool, - mock_migrate_vol): - ctxt = self.context - (volume, new_type, diff, host) = self._fake_retype_arguments() - mock_different.side_effect = [True, True, True] - self.assertFalse(self.driver.retype(ctxt, - volume, - new_type, - diff, host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_storage_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs._different') - def test_retype_diff_pools_migrated(self, - mock_different, - mock_strg_pool, - mock_migrate_vol): - ctxt = self.context - (volume, new_type, diff, host) = self._fake_retype_arguments() - self.driver.db = mock.Mock() - mock_different.side_effect = [False, False, True] - mock_strg_pool.return_value = True - mock_migrate_vol.return_value = (True, True) - self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_storage_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs._different') - def test_retype_diff_pools(self, - mock_different, - mock_strg_pool, - mock_migrate_vol): - ctxt = self.context - (volume, new_type, diff, host) = self._fake_retype_arguments() - mock_different.side_effect = [False, False, True] - mock_strg_pool.return_value = True - mock_migrate_vol.return_value = (False, False) - self.assertFalse(self.driver.retype(ctxt, - volume, - new_type, - diff, - host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_volume_storage_pool') - @mock.patch('cinder.volume.drivers.ibm.gpfs._different') - def test_retype_no_diff_hit(self, - mock_different, - mock_strg_pool, - mock_migrate_vol): - ctxt = self.context - (volume, new_type, diff, host) = self._fake_retype_arguments() - mock_different.side_effect = [False, False, False] - self.assertFalse(self.driver.retype(ctxt, - volume, - new_type, - diff, - host)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.utils.execute') - def test_mkfs_ok(self, mock_exec, local_path): - volume = self._fake_volume() - self.driver._mkfs(volume, 'swap') - self.driver._mkfs(volume, 'swap', 'test') - self.driver._mkfs(volume, 'ext3', 'test') - self.driver._mkfs(volume, 'vfat', 'test') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - @mock.patch('cinder.utils.execute') - def test_mkfs_fail_mk(self, mock_exec, local_path): - volume = self._fake_volume() - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._mkfs, volume, 'swap', 'test') - - @mock.patch('cinder.utils.execute') - def test_get_available_capacity_ok(self, mock_exec): - mock_exec.return_value = ('Filesystem 1-blocks Used ' - 'Available Capacity Mounted on\n' - '/dev/gpfs 10737418240 544735232 ' - '10192683008 6%% /gpfs0', '') - self.assertEqual((10192683008, 10737418240), - self.driver._get_available_capacity('/gpfs0')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - @mock.patch('cinder.utils.execute') - def test_get_available_capacity_fail_mounted(self, - mock_exec, - mock_path_state): - mock_path_state.side_effect = ( - exception.VolumeBackendAPIException('test')) - mock_exec.return_value = ('Filesystem 1-blocks Used ' - 'Available Capacity Mounted on\n' - '/dev/gpfs 10737418240 544735232 ' - '10192683008 6%% /gpfs0', '') - self.assertEqual((0, 0), self.driver._get_available_capacity('/gpfs0')) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') - def test_verify_gpfs_path_state_ok(self, mock_is_gpfs_path): - self.driver._verify_gpfs_path_state(self.images_dir) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') - def test_verify_gpfs_path_state_fail_path(self, mock_is_gpfs_path): - mock_is_gpfs_path.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._verify_gpfs_path_state, self.images_dir) - - @mock.patch('cinder.utils.execute') - def test_create_consistencygroup(self, mock_exec): - ctxt = self.context - group = self._fake_group() - self.driver._create_consistencygroup(ctxt, group) - fsdev = self.driver._gpfs_device - cgname = "consisgroup-%s" % group['id'] - cgpath = os.path.join(self.driver.configuration.gpfs_mount_point_base, - cgname) - cmd = ['mmcrfileset', fsdev, cgname, '--inode-space', 'new'] - mock_exec.assert_any_call(*cmd) - cmd = ['mmlinkfileset', fsdev, cgname, '-J', cgpath] - mock_exec.assert_any_call(*cmd) - cmd = ['chmod', '770', cgpath] - mock_exec.assert_any_call(*cmd) - - @mock.patch('cinder.utils.execute') - def test_create_consistencygroup_fail(self, mock_exec): - ctxt = self.context - group = self._fake_group() - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._create_consistencygroup, ctxt, group) - - @mock.patch('cinder.utils.execute') - def test_delete_consistencygroup(self, mock_exec): - ctxt = self.context - group = self._fake_group() - group['status'] = fields.ConsistencyGroupStatus.AVAILABLE - volume = self._fake_volume() - volume['status'] = 'available' - volumes = [] - volumes.append(volume) - self.driver.db = mock.Mock() - self.driver.db.volume_get_all_by_group = mock.Mock() - self.driver.db.volume_get_all_by_group.return_value = volumes - - self.driver._delete_consistencygroup(ctxt, group, []) - fsdev = self.driver._gpfs_device - cgname = "consisgroup-%s" % group['id'] - cmd = ['mmlsfileset', fsdev, cgname] - mock_exec.assert_any_call(*cmd) - cmd = ['mmunlinkfileset', fsdev, cgname, '-f'] - mock_exec.assert_any_call(*cmd) - cmd = ['mmdelfileset', fsdev, cgname, '-f'] - mock_exec.assert_any_call(*cmd) - - @mock.patch('cinder.utils.execute') - def test_delete_consistencygroup_no_fileset(self, mock_exec): - ctxt = self.context - group = self._fake_group() - group['status'] = fields.ConsistencyGroupStatus.AVAILABLE - volume = self._fake_volume() - volume['status'] = 'available' - volumes = [] - volumes.append(volume) - self.driver.db = mock.Mock() - self.driver.db.volume_get_all_by_group = mock.Mock() - self.driver.db.volume_get_all_by_group.return_value = volumes - mock_exec.side_effect = ( - processutils.ProcessExecutionError(exit_code=2)) - - self.driver._delete_consistencygroup(ctxt, group, []) - fsdev = self.driver._gpfs_device - cgname = "consisgroup-%s" % group['id'] - cmd = ['mmlsfileset', fsdev, cgname] - mock_exec.assert_called_once_with(*cmd) - - @mock.patch('cinder.utils.execute') - def test_delete_consistencygroup_fail(self, mock_exec): - ctxt = self.context - group = self._fake_group() - group['status'] = fields.ConsistencyGroupStatus.AVAILABLE - self.driver.db = mock.Mock() - self.driver.db.volume_get_all_by_group = mock.Mock() - self.driver.db.volume_get_all_by_group.return_value = [] - - mock_exec.side_effect = ( - processutils.ProcessExecutionError(stdout='test', stderr='test')) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._delete_consistencygroup, - ctxt, group, []) - - def test_update_consistencygroup(self): - ctxt = self.context - group = self._fake_group() - self.assertRaises(exception.GPFSDriverUnsupportedOperation, - self.driver._update_consistencygroup, ctxt, group) - - def test_create_consisgroup_from_src(self): - ctxt = self.context - group = self._fake_group() - self.assertRaises(exception.GPFSDriverUnsupportedOperation, - self.driver._create_consistencygroup_from_src, - ctxt, group, []) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') - def test_create_cgsnapshot(self, mock_create_snap): - ctxt = self.context - cgsnap = self._fake_cgsnapshot() - snapshot1 = self._fake_snapshot() - model_update, snapshots = self.driver._create_cgsnapshot(ctxt, cgsnap, - [snapshot1]) - self.driver.create_snapshot.assert_called_once_with(snapshot1) - self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, - model_update) - self.assertEqual({'id': snapshot1.id, - 'status': fields.SnapshotStatus.AVAILABLE}, - snapshots[0]) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') - def test_create_cgsnapshot_empty(self, mock_create_snap): - ctxt = self.context - cgsnap = self._fake_cgsnapshot() - model_update, snapshots = self.driver._create_cgsnapshot(ctxt, cgsnap, - []) - self.assertFalse(self.driver.create_snapshot.called) - self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, - model_update) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') - def test_delete_cgsnapshot(self, mock_delete_snap): - ctxt = self.context - cgsnap = self._fake_cgsnapshot() - snapshot1 = self._fake_snapshot() - model_update, snapshots = self.driver._delete_cgsnapshot(ctxt, cgsnap, - [snapshot1]) - self.driver.delete_snapshot.assert_called_once_with(snapshot1) - self.assertEqual({'status': fields.ConsistencyGroupStatus.DELETED}, - model_update) - self.assertEqual({'id': snapshot1.id, - 'status': fields.SnapshotStatus.DELETED}, - snapshots[0]) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') - def test_delete_cgsnapshot_empty(self, mock_delete_snap): - ctxt = self.context - cgsnap = self._fake_cgsnapshot() - model_update, snapshots = self.driver._delete_cgsnapshot(ctxt, cgsnap, - []) - self.assertFalse(self.driver.delete_snapshot.called) - self.assertEqual({'status': fields.ConsistencyGroupStatus.DELETED}, - model_update) - - def test_local_path_volume_not_in_cg(self): - volume = self._fake_volume() - volume['group_id'] = None - volume_path = os.path.join( - self.driver.configuration.gpfs_mount_point_base, - volume['name'] - ) - ret = self.driver.local_path(volume) - self.assertEqual(volume_path, ret) - - @mock.patch('cinder.db.get_by_id') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_local_path_volume_in_cg(self, mock_group_cg_snapshot_type, - mock_group_obj): - mock_group_cg_snapshot_type.return_value = True - volume = self._fake_volume() - group = self._fake_group() - mock_group_obj.return_value = group - cgname = "consisgroup-%s" % volume['group_id'] - volume_path = os.path.join( - self.driver.configuration.gpfs_mount_point_base, - cgname, - volume['name'] - ) - ret = self.driver.local_path(volume) - self.assertEqual(volume_path, ret) - - @mock.patch('cinder.context.get_admin_context') - @mock.patch('cinder.objects.volume.Volume.get_by_id') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') - def test_get_snapshot_path(self, mock_local_path, mock_vol_get_by_id, - mock_admin_context): - volume = self._fake_volume() - mock_vol_get_by_id.return_value = volume - volume_path = self.volumes_path - mock_local_path.return_value = volume_path - snapshot = self._fake_snapshot() - ret = self.driver._get_snapshot_path(snapshot) - self.assertEqual( - os.path.join(os.path.dirname(volume_path), snapshot.name), ret - ) - - @mock.patch('cinder.utils.execute') - def test_gpfs_full_copy(self, mock_exec): - src = "/tmp/vol1" - dest = "/tmp/vol2" - self.driver._gpfs_full_copy(src, dest) - mock_exec.assert_called_once_with('cp', src, dest, - check_exit_code=True) - - def _fake_volume(self): - volume = {} - volume['id'] = fake.VOLUME_ID - volume['display_name'] = 'test' - volume['metadata'] = {'key1': 'val1'} - volume['_name_id'] = None - volume['size'] = 1000 - volume['group_id'] = fake.CONSISTENCY_GROUP_ID - - return objects.Volume(self.context, **volume) - - def _fake_snapshot(self): - snapshot = {} - snapshot['id'] = fake.SNAPSHOT_ID - snapshot['display_name'] = 'test-snap' - snapshot['volume_size'] = 1000 - snapshot['volume_id'] = fake.VOLUME_ID - snapshot['status'] = 'available' - snapshot['snapshot_metadata'] = [] - - return objects.Snapshot(context=self.context, **snapshot) - - def _fake_volume_in_cg(self): - volume = self._fake_volume() - volume.group_id = fake.CONSISTENCY_GROUP_ID - return volume - - def _fake_group(self): - group = {} - group['name'] = 'test_group' - group['id'] = fake.CONSISTENCY_GROUP_ID - group['user_id'] = fake.USER_ID - group['group_type_id'] = fake.GROUP_TYPE_ID - group['project_id'] = fake.PROJECT_ID - - return objects.Group(self.context, **group) - - def _fake_cgsnapshot(self): - snapshot = self._fake_snapshot() - snapshot.group_id = fake.CONSISTENCY_GROUP_ID - return snapshot - - def _fake_qemu_qcow2_image_info(self, path): - data = FakeQemuImgInfo() - data.file_format = 'qcow2' - data.backing_file = None - data.virtual_size = 1 * units.Gi - return data - - def _fake_qemu_raw_image_info(self, path): - data = FakeQemuImgInfo() - data.file_format = 'raw' - data.backing_file = None - data.virtual_size = 1 * units.Gi - return data - - def _fake_retype_arguments(self): - ctxt = self.context - loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id - cap = {'location_info': loc} - host = {'host': 'foo', 'capabilities': cap} - key_specs_old = {'capabilities:storage_pool': 'bronze', - 'volume_backend_name': 'backend1'} - key_specs_new = {'capabilities:storage_pool': 'gold', - 'volume_backend_name': 'backend1'} - old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) - new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) - - volume_types.get_volume_type(ctxt, old_type_ref['id']) - new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) - - diff, _equal = volume_types.volume_types_diff(ctxt, - old_type_ref['id'], - new_type_ref['id']) - - volume = self._fake_volume() - volume['host'] = 'foo' - - return (volume, new_type, diff, host) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group = self._fake_group() - self.assertRaises( - NotImplementedError, - self.driver.create_group, - ctxt, group - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_consistencygroup') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_cg(self, mock_cg_snapshot_type, - mock_consisgroup_create): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group = self._fake_group() - self.driver.create_group(ctxt, group) - mock_consisgroup_create.assert_called_once_with(ctxt, group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group = self._fake_group() - volumes = [] - self.assertRaises( - NotImplementedError, - self.driver.delete_group, - ctxt, group, volumes - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_delete_consistencygroup') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_cg(self, mock_cg_snapshot_type, - mock_consisgroup_delete): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group = self._fake_group() - volumes = [] - self.driver.delete_group(ctxt, group, volumes) - mock_consisgroup_delete.assert_called_once_with(ctxt, group, volumes) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group = self._fake_group() - self.assertRaises( - NotImplementedError, - self.driver.update_group, - ctxt, group - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_update_consistencygroup') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group_cg(self, mock_cg_snapshot_type, - mock_consisgroup_update): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group = self._fake_group() - self.driver.update_group(ctxt, group) - mock_consisgroup_update.assert_called_once_with(ctxt, group, - None, None) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - self.assertRaises( - NotImplementedError, - self.driver.create_group_snapshot, - ctxt, group_snapshot, snapshots - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_cgsnapshot') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot_cg(self, mock_cg_snapshot_type, - mock_cgsnapshot_create): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - self.driver.create_group_snapshot(ctxt, group_snapshot, snapshots) - mock_cgsnapshot_create.assert_called_once_with(ctxt, group_snapshot, - snapshots) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_snapshot(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - self.assertRaises( - NotImplementedError, - self.driver.delete_group_snapshot, - ctxt, group_snapshot, snapshots - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_delete_cgsnapshot') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_snapshot_cg(self, mock_cg_snapshot_type, - mock_cgsnapshot_delete): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - self.driver.delete_group_snapshot(ctxt, group_snapshot, snapshots) - mock_cgsnapshot_delete.assert_called_once_with(ctxt, group_snapshot, - snapshots) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src(self, mock_cg_snapshot_type): - mock_cg_snapshot_type.return_value = False - ctxt = self.context - group = self._fake_group() - volumes = [] - self.assertRaises( - NotImplementedError, - self.driver.create_group_from_src, - ctxt, group, volumes - ) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_consistencygroup_from_src') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src_cg(self, mock_cg_snapshot_type, - mock_cg_clone_create): - mock_cg_snapshot_type.return_value = True - ctxt = self.context - group = self._fake_group() - volumes = [] - self.driver.create_group_from_src(ctxt, group, volumes) - mock_cg_clone_create.assert_called_once_with(ctxt, group, volumes, - None, None, None, None) - - -class GPFSRemoteDriverTestCase(test.TestCase): - """Unit tests for GPFSRemoteDriver class""" - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver.' - '_get_active_gpfs_node_ip') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver.' - '_run_ssh') - def test_gpfs_remote_execute(self, - mock_run_ssh, - mock_active_gpfs_ip): - configuration = conf.Configuration(None) - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - self.driver._gpfs_remote_execute('test', check_exit_code=True) - expected = [mock.call(('test',), True)] - self.assertEqual(expected, mock_run_ssh.mock_calls) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.expanduser') - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test_get_active_gpfs_node_ip(self, mock_ssh_execute, - mock_pkey_file, mock_path, - mock_open, mock_isfile): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_mount_point_base = '/gpfs' - configuration.gpfs_private_key = '/test/fake_private_key' - mmgetstate_fake_out = "mmgetstate::state:\nmmgetstate::active:" - mock_ssh_execute.side_effect = [(mmgetstate_fake_out, ''), ('', '')] - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - san_ip = self.driver._get_active_gpfs_node_ip() - self.assertEqual('10.0.0.1', san_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.expanduser') - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test_get_active_gpfs_node_ip_with_password(self, mock_ssh_execute, - mock_pkey_file, mock_path, - mock_open, mock_isfile): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_mount_point_base = '/gpfs' - configuration.gpfs_user_password = 'FakePassword' - mmgetstate_fake_out = "mmgetstate::state:\nmmgetstate::active:" - mock_ssh_execute.side_effect = [(mmgetstate_fake_out, ''), ('', '')] - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - san_ip = self.driver._get_active_gpfs_node_ip() - self.assertEqual('10.0.0.1', san_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('six.moves.builtins.open') - def test_get_active_gpfs_node_ip_missing_key_and_password(self, mock_open, - mock_isfile): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_mount_point_base = '/gpfs' - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - self.assertRaises(exception.VolumeDriverException, - self.driver._get_active_gpfs_node_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.expanduser') - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test_get_active_gpfs_node_ip_second(self, mock_ssh_execute, - mock_pkey_file, mock_path, - mock_open, mock_isfile): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_mount_point_base = '/gpfs' - configuration.gpfs_private_key = '/test/fake_private_key' - mmgetstate_active_fake_out = "mmgetstate::state:\nmmgetstate::active:" - mmgetstate_down_fake_out = "mmgetstate::state:\nmmgetstate::down:" - mock_ssh_execute.side_effect = [(mmgetstate_down_fake_out, ''), - (mmgetstate_active_fake_out, ''), - ('', '')] - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - san_ip = self.driver._get_active_gpfs_node_ip() - self.assertEqual('10.0.0.2', san_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - def test_missing_ssh_host_key_config(self): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_hosts_key_file = None - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - self.assertRaises(exception.ParameterNotFound, - self.driver._get_active_gpfs_node_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=False) - def test_init_missing_ssh_host_key_file(self, - mock_is_file): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_hosts_key_file = '/test' - self.flags(state_path='/var/lib/cinder') - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - self.assertRaises(exception.InvalidInput, - self.driver._get_active_gpfs_node_ip) - - @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) - @mock.patch('os.path.isfile', return_value=True) - @mock.patch('six.moves.builtins.open') - @mock.patch('os.path.expanduser') - @mock.patch('paramiko.RSAKey.from_private_key_file') - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test_get_active_gpfs_node_ip_exception(self, mock_ssh_execute, - mock_pkey_file, mock_path, - mock_open, mock_isfile): - configuration = conf.Configuration(None) - configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] - configuration.gpfs_mount_point_base = '/gpfs' - configuration.gpfs_private_key = "/test/fake_private_key" - mmgetstate_down_fake_out = "mmgetstate::state:\nmmgetstate::down:" - mock_ssh_execute.side_effect = [(mmgetstate_down_fake_out, ''), - processutils.ProcessExecutionError( - stderr='test')] - self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._get_active_gpfs_node_ip) - - -class GPFSNFSDriverTestCase(test.TestCase): - driver_name = "cinder.volume.drivers.gpfs.GPFSNFSDriver" - TEST_NFS_EXPORT = 'nfs-host1:/export' - TEST_SIZE_IN_GB = 1 - TEST_EXTEND_SIZE_IN_GB = 2 - TEST_MNT_POINT = '/mnt/nfs' - TEST_MNT_POINT_BASE = '/mnt' - TEST_GPFS_MNT_POINT_BASE = '/export' - TEST_LOCAL_PATH = '/mnt/nfs/volume-123' - TEST_VOLUME_PATH = '/export/volume-123' - TEST_SNAP_PATH = '/export/snapshot-123' - - def _execute_wrapper(self, cmd, *args, **kwargs): - try: - kwargs.pop('run_as_root') - except KeyError: - pass - - return utils.execute(cmd, *args, **kwargs) - - def _fake_volume(self): - volume = {} - volume['id'] = fake.VOLUME_ID - volume['display_name'] = 'test' - volume['metadata'] = {'key1': 'val1'} - volume['_name_id'] = None - volume['size'] = 1000 - volume['group_id'] = fake.CONSISTENCY_GROUP_ID - - return objects.Volume(self.context, **volume) - - def _fake_group(self): - group = {} - group['name'] = 'test_group' - group['id'] = fake.CONSISTENCY_GROUP_ID - group['user_id'] = fake.USER_ID - group['group_type_id'] = fake.GROUP_TYPE_ID - group['project_id'] = fake.PROJECT_ID - - return objects.Group(self.context, **group) - - def _fake_snapshot(self): - snapshot = {} - snapshot['id'] = '12345' - snapshot['name'] = 'test-snap' - snapshot['volume_size'] = 1000 - snapshot['volume_id'] = '123456' - snapshot['status'] = 'available' - return snapshot - - def setUp(self): - super(GPFSNFSDriverTestCase, self).setUp() - self.driver = gpfs.GPFSNFSDriver(configuration=conf. - Configuration(None)) - self.driver.gpfs_execute = self._execute_wrapper - self.context = context.get_admin_context() - self.context.user_id = 'fake' - self.context.project_id = 'fake' - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_run_ssh') - def test_gpfs_remote_execute(self, mock_run_ssh): - mock_run_ssh.return_value = 'test' - self.driver._gpfs_remote_execute('test', check_exit_code=True) - expected = [mock.call(('test',), True)] - self.assertEqual(expected, mock_run_ssh.mock_calls) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_ensure_shares_mounted') - def test_update_volume_stats(self, mock_ensure): - """Check update volume stats.""" - - mock_ensure.return_value = True - fake_avail = 80 * units.Gi - fake_size = 2 * fake_avail - fake_used = 10 * units.Gi - - with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_get_capacity_info', - return_value=(fake_avail, fake_size, fake_used)): - stats = self.driver.get_volume_stats() - self.assertEqual('GPFSNFS', stats['volume_backend_name']) - self.assertEqual('file', stats['storage_protocol']) - stats = self.driver.get_volume_stats(True) - self.assertEqual('GPFSNFS', stats['volume_backend_name']) - self.assertEqual('file', stats['storage_protocol']) - - @mock.patch('cinder.db.get_by_id') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_get_volume_path(self, mock_group_cg_snapshot_type, mock_group): - mock_group_cg_snapshot_type.return_value = True - self.driver.configuration.gpfs_mount_point_base = ( - self.TEST_GPFS_MNT_POINT_BASE) - volume = self._fake_volume() - group = self._fake_group() - mock_group.return_value = group - volume_path_in_cg = os.path.join(self.TEST_GPFS_MNT_POINT_BASE, - 'consisgroup-' + - fake.CONSISTENCY_GROUP_ID, - 'volume-' + fake.VOLUME_ID) - self.assertEqual(volume_path_in_cg, - self.driver._get_volume_path(volume)) - volume.group_id = None - volume_path = os.path.join(self.TEST_GPFS_MNT_POINT_BASE, - 'volume-' + fake.VOLUME_ID) - self.assertEqual(volume_path, - self.driver._get_volume_path(volume)) - - @mock.patch('cinder.db.get_by_id') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_get_mount_point_for_share') - def test_local_path(self, mock_mount_point, - mock_group_cg_snapshot_type, - mock_group): - mock_mount_point.return_value = self.TEST_MNT_POINT_BASE - mock_group_cg_snapshot_type.return_value = True - volume = self._fake_volume() - group = self._fake_group() - mock_group.return_value = group - volume['provider_location'] = self.TEST_MNT_POINT_BASE - local_volume_path_in_cg = os.path.join(self.TEST_MNT_POINT_BASE, - 'consisgroup-' + - fake.CONSISTENCY_GROUP_ID, - 'volume-' + fake.VOLUME_ID) - self.assertEqual(local_volume_path_in_cg, - self.driver.local_path(volume)) - volume.group_id = None - local_volume_path = os.path.join(self.TEST_MNT_POINT_BASE, - 'volume-' + fake.VOLUME_ID) - self.assertEqual(local_volume_path, - self.driver.local_path(volume)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_get_volume_path') - def test_get_snapshot_path(self, mock_volume_path): - volume = self._fake_volume() - self.driver.db = mock.Mock() - self.driver.db.volume_get = mock.Mock() - self.driver.db.volume_get.return_value = volume - mock_volume_path.return_value = os.path.join(self. - TEST_GPFS_MNT_POINT_BASE, - volume['name']) - snapshot = self._fake_snapshot() - self.assertEqual('/export/test-snap', - self.driver._get_snapshot_path(snapshot)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_find_share') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - 'create_volume') - def test_create_volume(self, - mock_create_volume, - mock_find_share): - volume = self._fake_volume() - mock_find_share.return_value = self.TEST_VOLUME_PATH - self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, - self.driver.create_volume(volume)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_delete_gpfs_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - 'local_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_get_volume_path') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_verify_gpfs_path_state') - def test_delete_volume(self, - mock_verify_gpfs_path_state, - mock_volume_path, - mock_local_path, - mock_delete_gpfs_file): - self.driver.delete_volume('') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - 'delete_snapshot') - def test_delete_snapshot(self, - mock_delete_snapshot): - self.driver.delete_snapshot('') - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_find_share') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_volume_from_snapshot') - def test_create_volume_from_snapshot(self, - mock_create_volume_from_snapshot, - mock_find_share, - mock_resize_volume_file): - volume = self._fake_volume() - snapshot = self._fake_snapshot() - mock_find_share.return_value = self.TEST_VOLUME_PATH - self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, - self.driver.create_volume_from_snapshot(volume, - snapshot)) - - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_resize_volume_file') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' - '_find_share') - @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' - '_create_cloned_volume') - def test_create_cloned_volume(self, - mock_create_cloned_volume, - mock_find_share, - mock_resize_volume_file): - volume = self._fake_volume() - src_vref = self._fake_volume() - mock_find_share.return_value = self.TEST_VOLUME_PATH - self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, - self.driver.create_cloned_volume(volume, src_vref)) diff --git a/cinder/tests/unit/volume/drivers/test_hgst.py b/cinder/tests/unit/volume/drivers/test_hgst.py deleted file mode 100644 index 573038d47..000000000 --- a/cinder/tests/unit/volume/drivers/test_hgst.py +++ /dev/null @@ -1,941 +0,0 @@ -# Copyright (c) 2015 HGST Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from oslo_concurrency import processutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.hgst import HGSTDriver -from cinder.volume import volume_types - - -class HGSTTestCase(test.TestCase): - - # Need to mock these since we use them on driver creation - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def setUp(self, mock_ghn, mock_grnam, mock_pwnam): - """Set up UUT and all the flags required for later fake_executes.""" - super(HGSTTestCase, self).setUp() - self.mock_object(processutils, 'execute', self._fake_execute) - self._fail_vgc_cluster = False - self._fail_ip = False - self._fail_network_list = False - self._fail_domain_list = False - self._empty_domain_list = False - self._fail_host_storage = False - self._fail_space_list = False - self._fail_space_delete = False - self._fail_set_apphosts = False - self._fail_extend = False - self._request_cancel = False - self._return_blocked = 0 - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.safe_get = self._fake_safe_get - self._reset_configuration() - self.driver = HGSTDriver(configuration=self.configuration, - execute=self._fake_execute) - - def _fake_safe_get(self, value): - """Don't throw exception on missing parameters, return None.""" - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - def _reset_configuration(self): - """Set safe and sane values for config params.""" - self.configuration.num_volume_device_scan_tries = 1 - self.configuration.volume_dd_blocksize = '1M' - self.configuration.volume_backend_name = 'hgst-1' - self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2:gbd0' - self.configuration.hgst_net = 'net1' - self.configuration.hgst_redundancy = '0' - self.configuration.hgst_space_user = 'kane' - self.configuration.hgst_space_group = 'xanadu' - self.configuration.hgst_space_mode = '0777' - - def _parse_space_create(self, *cmd): - """Eats a vgc-cluster space-create command line to a dict.""" - self.created = {'storageserver': ''} - cmd = list(*cmd) - while cmd: - param = cmd.pop(0) - if param == "-n": - self.created['name'] = cmd.pop(0) - elif param == "-N": - self.created['net'] = cmd.pop(0) - elif param == "-s": - self.created['size'] = cmd.pop(0) - elif param == "--redundancy": - self.created['redundancy'] = cmd.pop(0) - elif param == "--user": - self.created['user'] = cmd.pop(0) - elif param == "--user": - self.created['user'] = cmd.pop(0) - elif param == "--group": - self.created['group'] = cmd.pop(0) - elif param == "--mode": - self.created['mode'] = cmd.pop(0) - elif param == "-S": - self.created['storageserver'] += cmd.pop(0) + "," - else: - pass - - def _parse_space_extend(self, *cmd): - """Eats a vgc-cluster space-extend commandline to a dict.""" - self.extended = {'storageserver': ''} - cmd = list(*cmd) - while cmd: - param = cmd.pop(0) - if param == "-n": - self.extended['name'] = cmd.pop(0) - elif param == "-s": - self.extended['size'] = cmd.pop(0) - elif param == "-S": - self.extended['storageserver'] += cmd.pop(0) + "," - else: - pass - if self._fail_extend: - raise processutils.ProcessExecutionError(exit_code=1) - else: - return '', '' - - def _parse_space_delete(self, *cmd): - """Eats a vgc-cluster space-delete commandline to a dict.""" - self.deleted = {} - cmd = list(*cmd) - while cmd: - param = cmd.pop(0) - if param == "-n": - self.deleted['name'] = cmd.pop(0) - else: - pass - if self._fail_space_delete: - raise processutils.ProcessExecutionError(exit_code=1) - else: - return '', '' - - def _parse_space_list(self, *cmd): - """Eats a vgc-cluster space-list commandline to a dict.""" - json = False - nameOnly = False - cmd = list(*cmd) - while cmd: - param = cmd.pop(0) - if param == "--json": - json = True - elif param == "--name-only": - nameOnly = True - elif param == "-n": - pass # Don't use the name here... - else: - pass - if self._fail_space_list: - raise processutils.ProcessExecutionError(exit_code=1) - elif nameOnly: - return "space1\nspace2\nvolume1\n", '' - elif json: - return HGST_SPACE_JSON, '' - else: - return '', '' - - def _parse_network_list(self, *cmd): - """Eat a network-list command and return error or results.""" - if self._fail_network_list: - raise processutils.ProcessExecutionError(exit_code=1) - else: - return NETWORK_LIST, '' - - def _parse_domain_list(self, *cmd): - """Eat a domain-list command and return error, empty, or results.""" - if self._fail_domain_list: - raise processutils.ProcessExecutionError(exit_code=1) - elif self._empty_domain_list: - return '', '' - else: - return "thisserver\nthatserver\nanotherserver\n", '' - - def _fake_execute(self, *cmd, **kwargs): - """Sudo hook to catch commands to allow running on all hosts.""" - cmdlist = list(cmd) - exe = cmdlist.pop(0) - if exe == 'vgc-cluster': - exe = cmdlist.pop(0) - if exe == "request-cancel": - self._request_cancel = True - if self._return_blocked > 0: - return 'Request cancelled', '' - else: - raise processutils.ProcessExecutionError(exit_code=1) - elif self._fail_vgc_cluster: - raise processutils.ProcessExecutionError(exit_code=1) - elif exe == "--version": - return "HGST Solutions V2.5.0.0.x.x.x.x.x", '' - elif exe == "space-list": - return self._parse_space_list(cmdlist) - elif exe == "space-create": - self._parse_space_create(cmdlist) - if self._return_blocked > 0: - self._return_blocked = self._return_blocked - 1 - out = "VGC_CREATE_000002\nBLOCKED\n" - raise processutils.ProcessExecutionError(stdout=out, - exit_code=1) - return '', '' - elif exe == "space-delete": - return self._parse_space_delete(cmdlist) - elif exe == "space-extend": - return self._parse_space_extend(cmdlist) - elif exe == "host-storage": - if self._fail_host_storage: - raise processutils.ProcessExecutionError(exit_code=1) - return HGST_HOST_STORAGE, '' - elif exe == "domain-list": - return self._parse_domain_list() - elif exe == "network-list": - return self._parse_network_list() - elif exe == "space-set-apphosts": - if self._fail_set_apphosts: - raise processutils.ProcessExecutionError(exit_code=1) - return '', '' - else: - raise NotImplementedError - elif exe == 'ip': - if self._fail_ip: - raise processutils.ProcessExecutionError(exit_code=1) - else: - return IP_OUTPUT, '' - elif exe == 'dd': - self.dd_count = -1 - for p in cmdlist: - if 'count=' in p: - self.dd_count = int(p[6:]) - elif 'bs=' in p: - self.bs = p[3:] - return DD_OUTPUT, '' - else: - return '', '' - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_vgc_cluster_not_present(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when vgc-cluster returns an error.""" - # Should pass - self._fail_vgc_cluster = False - self.driver.check_for_setup_error() - # Should throw exception - self._fail_vgc_cluster = True - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam, - mock_pwnam): - """Test when hgst_redundancy config parameter not 0 or 1.""" - # Should pass - self.driver.check_for_setup_error() - # Should throw exceptions - self.configuration.hgst_redundancy = '' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - self.configuration.hgst_redundancy = 'Fred' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when hgst_space_user doesn't map to UNIX user.""" - # Should pass - self.driver.check_for_setup_error() - # Should throw exceptions - mock_pwnam.side_effect = KeyError() - self.configuration.hgst_space_user = '' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - self.configuration.hgst_space_user = 'Fred!`' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_group_invalid(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when hgst_space_group doesn't map to UNIX group.""" - # Should pass - self.driver.check_for_setup_error() - # Should throw exceptions - mock_grnam.side_effect = KeyError() - self.configuration.hgst_space_group = '' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - self.configuration.hgst_space_group = 'Fred!`' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when mode for created spaces isn't proper format.""" - # Should pass - self.driver.check_for_setup_error() - # Should throw exceptions - self.configuration.hgst_space_mode = '' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - self.configuration.hgst_space_mode = 'Fred' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_net_invalid(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when hgst_net not in the domain.""" - # Should pass - self.driver.check_for_setup_error() - # Should throw exceptions - self._fail_network_list = True - self.configuration.hgst_net = 'Fred' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - self._fail_network_list = False - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_ip_addr_fails(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when IP ADDR command fails.""" - # Should pass - self.driver.check_for_setup_error() - # Throw exception, need to clear internal cached host in driver - self._fail_ip = True - self.driver._vgc_host = None - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_domain_list_fails(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when domain-list fails for the domain.""" - # Should pass - self.driver.check_for_setup_error() - # Throw exception, need to clear internal cached host in driver - self._fail_domain_list = True - self.driver._vgc_host = None - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam): - """Test exception when Cinder host not domain member.""" - # Should pass - self.driver.check_for_setup_error() - # Throw exception, need to clear internal cached host in driver - self._empty_domain_list = True - self.driver._vgc_host = None - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - @mock.patch('pwd.getpwnam', return_value=1) - @mock.patch('grp.getgrnam', return_value=1) - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_parameter_storageservers_invalid(self, mock_ghn, mock_grnam, - mock_pwnam): - """Test exception when the storage servers are invalid/missing.""" - # Should pass - self.driver.check_for_setup_error() - # Storage_hosts missing - self.configuration.hgst_storage_servers = '' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - # missing a : between host and devnode - self.configuration.hgst_storage_servers = 'stor1,stor2' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - # missing a : between host and devnode - self.configuration.hgst_storage_servers = 'stor1:gbd0,stor2' - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - # Host not in cluster - self.configuration.hgst_storage_servers = 'stor1:gbd0' - self._fail_host_storage = True - self.assertRaises(exception.VolumeDriverException, - self.driver.check_for_setup_error) - - def test_update_volume_stats(self): - """Get cluster space available, should pass.""" - actual = self.driver.get_volume_stats(True) - self.assertEqual('HGST', actual['vendor_name']) - self.assertEqual('hgst', actual['storage_protocol']) - self.assertEqual(90, actual['total_capacity_gb']) - self.assertEqual(87, actual['free_capacity_gb']) - self.assertEqual(0, actual['reserved_percentage']) - - def test_update_volume_stats_redundancy(self): - """Get cluster space available, half-sized - 1 for mirrors.""" - self.configuration.hgst_redundancy = '1' - actual = self.driver.get_volume_stats(True) - self.assertEqual('HGST', actual['vendor_name']) - self.assertEqual('hgst', actual['storage_protocol']) - self.assertEqual(44, actual['total_capacity_gb']) - self.assertEqual(43, actual['free_capacity_gb']) - self.assertEqual(0, actual['reserved_percentage']) - - def test_update_volume_stats_cached(self): - """Get cached cluster space, should not call executable.""" - self._fail_host_storage = True - actual = self.driver.get_volume_stats(False) - self.assertEqual('HGST', actual['vendor_name']) - self.assertEqual('hgst', actual['storage_protocol']) - self.assertEqual(90, actual['total_capacity_gb']) - self.assertEqual(87, actual['free_capacity_gb']) - self.assertEqual(0, actual['reserved_percentage']) - - def test_update_volume_stats_error(self): - """Test that when host-storage gives an error, return unknown.""" - self._fail_host_storage = True - actual = self.driver.get_volume_stats(True) - self.assertEqual('HGST', actual['vendor_name']) - self.assertEqual('hgst', actual['storage_protocol']) - self.assertEqual('unknown', actual['total_capacity_gb']) - self.assertEqual('unknown', actual['free_capacity_gb']) - self.assertEqual(0, actual['reserved_percentage']) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_volume(self, mock_ghn): - """Test volume creation, ensure appropriate size expansion/name.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10} - ret = self.driver.create_volume(volume) - expected = {'redundancy': '0', 'group': 'xanadu', - 'name': 'volume10', 'mode': '0777', - 'user': 'kane', 'net': 'net1', - 'storageserver': 'stor1:gbd0,stor2:gbd0,', - 'size': '12'} - self.assertDictEqual(expected, self.created) - # Check the returned provider, note that provider_id is hashed - expected_pid = {'provider_id': 'volume10'} - self.assertDictEqual(expected_pid, ret) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_volume_name_creation_fail(self, mock_ghn): - """Test volume creation exception when can't make a hashed name.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10} - self._fail_space_list = True - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume, volume) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_snapshot(self, mock_ghn): - """Test creating a snapshot, ensure full data of original copied.""" - # Now snapshot the volume and check commands - snapshot = {'volume_name': 'volume10', - 'volume_id': 'xxx', 'display_name': 'snap10', - 'name': '123abc', 'volume_size': 10, 'id': '123abc', - 'volume': {'provider_id': 'space10'}} - ret = self.driver.create_snapshot(snapshot) - # We must copy entier underlying storage, ~12GB, not just 10GB - self.assertEqual(11444 * units.Mi, self.dd_count) - self.assertEqual('1M', self.bs) - # Check space-create command - expected = {'redundancy': '0', 'group': 'xanadu', - 'name': snapshot['display_name'], 'mode': '0777', - 'user': 'kane', 'net': 'net1', - 'storageserver': 'stor1:gbd0,stor2:gbd0,', - 'size': '12'} - self.assertDictEqual(expected, self.created) - # Check the returned provider - expected_pid = {'provider_id': 'snap10'} - self.assertDictEqual(expected_pid, ret) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_cloned_volume(self, mock_ghn): - """Test creating a clone, ensure full size is copied from original.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - orig = {'id': '1', 'name': 'volume1', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10, - 'provider_id': 'space_orig'} - clone = {'id': '2', 'name': 'clone1', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10} - pid = self.driver.create_cloned_volume(clone, orig) - # We must copy entier underlying storage, ~12GB, not just 10GB - self.assertEqual(11444 * units.Mi, self.dd_count) - self.assertEqual('1M', self.bs) - # Check space-create command - expected = {'redundancy': '0', 'group': 'xanadu', - 'name': 'clone1', 'mode': '0777', - 'user': 'kane', 'net': 'net1', - 'storageserver': 'stor1:gbd0,stor2:gbd0,', - 'size': '12'} - self.assertDictEqual(expected, self.created) - # Check the returned provider - expected_pid = {'provider_id': 'clone1'} - self.assertDictEqual(expected_pid, pid) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_add_cinder_apphosts_fails(self, mock_ghn): - """Test exception when set-apphost can't connect volume to host.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - orig = {'id': '1', 'name': 'volume1', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10, - 'provider_id': 'space_orig'} - clone = {'id': '2', 'name': 'clone1', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10} - self._fail_set_apphosts = True - self.assertRaises(exception.VolumeDriverException, - self.driver.create_cloned_volume, clone, orig) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_volume_from_snapshot(self, mock_ghn): - """Test creating volume from snapshot, ensure full space copy.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - snap = {'id': '1', 'name': 'volume1', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10, - 'provider_id': 'space_orig'} - volume = {'id': '2', 'name': 'volume2', 'display_name': '', - 'volume_type_id': type_ref['id'], 'size': 10} - pid = self.driver.create_volume_from_snapshot(volume, snap) - # We must copy entier underlying storage, ~12GB, not just 10GB - self.assertEqual(11444 * units.Mi, self.dd_count) - self.assertEqual('1M', self.bs) - # Check space-create command - expected = {'redundancy': '0', 'group': 'xanadu', - 'name': 'volume2', 'mode': '0777', - 'user': 'kane', 'net': 'net1', - 'storageserver': 'stor1:gbd0,stor2:gbd0,', - 'size': '12'} - self.assertDictEqual(expected, self.created) - # Check the returned provider - expected_pid = {'provider_id': 'volume2'} - self.assertDictEqual(expected_pid, pid) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_volume_blocked(self, mock_ghn): - """Test volume creation where only initial space-create is blocked. - - This should actually pass because we are blocked byt return an error - in request-cancel, meaning that it got unblocked before we could kill - the space request. - """ - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10} - self._return_blocked = 1 # Block & fail cancel => create succeeded - ret = self.driver.create_volume(volume) - expected = {'redundancy': '0', 'group': 'xanadu', - 'name': 'volume10', 'mode': '0777', - 'user': 'kane', 'net': 'net1', - 'storageserver': 'stor1:gbd0,stor2:gbd0,', - 'size': '12'} - self.assertDictEqual(expected, self.created) - # Check the returned provider - expected_pid = {'provider_id': 'volume10'} - self.assertDictEqual(expected_pid, ret) - self.assertTrue(self._request_cancel) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_create_volume_blocked_and_fail(self, mock_ghn): - """Test volume creation where space-create blocked permanently. - - This should fail because the initial create was blocked and the - request-cancel succeeded, meaning the create operation never - completed. - """ - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10} - self._return_blocked = 2 # Block & pass cancel => create failed. :( - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume, volume) - self.assertTrue(self._request_cancel) - - def test_delete_volume(self): - """Test deleting existing volume, ensure proper name used.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self.driver.delete_volume(volume) - expected = {'name': 'volume10'} - self.assertDictEqual(expected, self.deleted) - - def test_delete_volume_failure_modes(self): - """Test cases where space-delete fails, but OS delete is still OK.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self._fail_space_delete = True - # This should not throw an exception, space-delete failure not problem - self.driver.delete_volume(volume) - self._fail_space_delete = False - volume['provider_id'] = None - # This should also not throw an exception - self.driver.delete_volume(volume) - - def test_delete_snapshot(self): - """Test deleting a snapshot, ensure proper name is removed.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - snapshot = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'snap10'} - self.driver.delete_snapshot(snapshot) - expected = {'name': 'snap10'} - self.assertDictEqual(expected, self.deleted) - - def test_extend_volume(self): - """Test extending a volume, check the size in GB vs. GiB.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self.extended = {'name': '', 'size': '0', - 'storageserver': ''} - self.driver.extend_volume(volume, 12) - expected = {'name': 'volume10', 'size': '2', - 'storageserver': 'stor1:gbd0,stor2:gbd0,'} - self.assertDictEqual(expected, self.extended) - - def test_extend_volume_noextend(self): - """Test extending a volume where Space does not need to be enlarged. - - Because Spaces are generated somewhat larger than the requested size - from OpenStack due to the base10(HGST)/base2(OS) mismatch, they can - sometimes be larger than requested from OS. In that case a - volume_extend may actually be a noop since the volume is already large - enough to satisfy OS's request. - """ - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self.extended = {'name': '', 'size': '0', - 'storageserver': ''} - self.driver.extend_volume(volume, 10) - expected = {'name': '', 'size': '0', - 'storageserver': ''} - self.assertDictEqual(expected, self.extended) - - def test_space_list_fails(self): - """Test exception is thrown when we can't call space-list.""" - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self.extended = {'name': '', 'size': '0', - 'storageserver': ''} - self._fail_space_list = True - self.assertRaises(exception.VolumeDriverException, - self.driver.extend_volume, volume, 12) - - def test_cli_error_not_blocked(self): - """Test the _blocked handler's handlinf of a non-blocked error. - - The _handle_blocked handler is called on any process errors in the - code. If the error was not caused by a blocked command condition - (syntax error, out of space, etc.) then it should just throw the - exception and not try and retry the command. - """ - ctxt = context.get_admin_context() - extra_specs = {} - type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) - volume = {'id': '1', 'name': 'volume1', - 'display_name': '', - 'volume_type_id': type_ref['id'], - 'size': 10, - 'provider_id': 'volume10'} - self.extended = {'name': '', 'size': '0', - 'storageserver': ''} - self._fail_extend = True - self.assertRaises(exception.VolumeDriverException, - self.driver.extend_volume, volume, 12) - self.assertFalse(self._request_cancel) - - @mock.patch('socket.gethostbyname', return_value='123.123.123.123') - def test_initialize_connection(self, moch_ghn): - """Test that the connection_info for Nova makes sense.""" - volume = {'name': '123', 'provider_id': 'spacey'} - conn = self.driver.initialize_connection(volume, None) - expected = {'name': 'spacey', 'noremovehost': 'thisserver'} - self.assertDictEqual(expected, conn['data']) - -# Below are some command outputs we emulate -IP_OUTPUT = """ -3: em2: mtu 1500 qdisc mq state - link/ether 00:25:90:d9:18:09 brd ff:ff:ff:ff:ff:ff - inet 192.168.0.23/24 brd 192.168.0.255 scope global em2 - valid_lft forever preferred_lft forever - inet6 fe80::225:90ff:fed9:1809/64 scope link - valid_lft forever preferred_lft forever -1: lo: mtu 65536 qdisc noqueue state UNKNOWN - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 123.123.123.123/8 scope host lo - valid_lft forever preferred_lft forever - inet 169.254.169.254/32 scope link lo - valid_lft forever preferred_lft forever - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever -2: em1: mtu 1500 qdisc mq master - link/ether 00:25:90:d9:18:08 brd ff:ff:ff:ff:ff:ff - inet6 fe80::225:90ff:fed9:1808/64 scope link - valid_lft forever preferred_lft forever -""" - -HGST_HOST_STORAGE = """ -{ - "hostStatus": [ - { - "node": "tm33.virident.info", - "up": true, - "isManager": true, - "cardStatus": [ - { - "cardName": "/dev/sda3", - "cardSerialNumber": "002f09b4037a9d521c007ee4esda3", - "cardStatus": "Good", - "cardStateDetails": "Normal", - "cardActionRequired": "", - "cardTemperatureC": 0, - "deviceType": "Generic", - "cardTemperatureState": "Safe", - "partitionStatus": [ - { - "partName": "/dev/gbd0", - "partitionState": "READY", - "usableCapacityBytes": 98213822464, - "totalReadBytes": 0, - "totalWriteBytes": 0, - "remainingLifePCT": 100, - "flashReservesLeftPCT": 100, - "fmc": true, - "vspaceCapacityAvailable": 94947041280, - "vspaceReducedCapacityAvailable": 87194279936, - "_partitionID": "002f09b4037a9d521c007ee4esda3:0", - "_usedSpaceBytes": 3266781184, - "_enabledSpaceBytes": 3266781184, - "_disabledSpaceBytes": 0 - } - ] - } - ], - "driverStatus": { - "vgcdriveDriverLoaded": true, - "vhaDriverLoaded": true, - "vcacheDriverLoaded": true, - "vlvmDriverLoaded": true, - "ipDataProviderLoaded": true, - "ibDataProviderLoaded": false, - "driverUptimeSecs": 4800, - "rVersion": "20368.d55ec22.master" - }, - "totalCapacityBytes": 98213822464, - "totalUsedBytes": 3266781184, - "totalEnabledBytes": 3266781184, - "totalDisabledBytes": 0 - }, - { - "node": "tm32.virident.info", - "up": true, - "isManager": false, - "cardStatus": [], - "driverStatus": { - "vgcdriveDriverLoaded": true, - "vhaDriverLoaded": true, - "vcacheDriverLoaded": true, - "vlvmDriverLoaded": true, - "ipDataProviderLoaded": true, - "ibDataProviderLoaded": false, - "driverUptimeSecs": 0, - "rVersion": "20368.d55ec22.master" - }, - "totalCapacityBytes": 0, - "totalUsedBytes": 0, - "totalEnabledBytes": 0, - "totalDisabledBytes": 0 - } - ], - "totalCapacityBytes": 98213822464, - "totalUsedBytes": 3266781184, - "totalEnabledBytes": 3266781184, - "totalDisabledBytes": 0 -} -""" - -HGST_SPACE_JSON = """ -{ - "resources": [ - { - "resourceType": "vLVM-L", - "resourceID": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db", - "state": "OFFLINE", - "instanceStates": {}, - "redundancy": 0, - "sizeBytes": 12000000000, - "name": "volume10", - "nodes": [], - "networks": [ - "net1" - ], - "components": [ - { - "resourceType": "vLVM-S", - "resourceID": "vLVM-S:698cdb43-54da-863e-eb10-6275f47b8ed2", - "redundancy": 0, - "order": 0, - "sizeBytes": 12000000000, - "numStripes": 1, - "stripeSizeBytes": null, - "name": "volume10s00", - "state": "OFFLINE", - "instanceStates": {}, - "components": [ - { - "name": "volume10h00", - "resourceType": "vHA", - "resourceID": "vHA:3e86da54-40db-8c69-0300-0000ac10476e", - "redundancy": 0, - "sizeBytes": 12000000000, - "state": "GOOD", - "components": [ - { - "name": "volume10h00", - "vspaceType": "vHA", - "vspaceRole": "primary", - "storageObjectID": "vHA:3e86da54-40db-8c69--18130019e486", - "state": "Disconnected (DCS)", - "node": "tm33.virident.info", - "partName": "/dev/gbd0" - } - ], - "crState": "GOOD" - }, - { - "name": "volume10v00", - "resourceType": "vShare", - "resourceID": "vShare:3f86da54-41db-8c69-0300-ecf4bbcc14cc", - "redundancy": 0, - "order": 0, - "sizeBytes": 12000000000, - "state": "GOOD", - "components": [ - { - "name": "volume10v00", - "vspaceType": "vShare", - "vspaceRole": "target", - "storageObjectID": "vShare:3f86da54-41db-8c64bbcc14cc:T", - "state": "Started", - "node": "tm33.virident.info", - "partName": "/dev/gbd0_volume10h00" - } - ] - } - ] - } - ], - "_size": "12GB", - "_state": "OFFLINE", - "_ugm": "", - "_nets": "net1", - "_hosts": "tm33.virident.info(12GB,NC)", - "_ahosts": "", - "_shosts": "tm33.virident.info(12GB)", - "_name": "volume10", - "_node": "", - "_type": "vLVM-L", - "_detail": "vLVM-L:698cdb43-54da-863e-1699-294a080ce4db", - "_device": "" - } - ] -} -""" - -NETWORK_LIST = """ -Network Name Type Flags Description ------------- ---- ---------- ------------------------ -net1 IPv4 autoConfig 192.168.0.0/24 1Gb/s -net2 IPv4 autoConfig 192.168.10.0/24 10Gb/s -""" - -DD_OUTPUT = """ -1+0 records in -1+0 records out -1024 bytes (1.0 kB) copied, 0.000427529 s, 2.4 MB/s -""" diff --git a/cinder/tests/unit/volume/drivers/test_infinidat.py b/cinder/tests/unit/volume/drivers/test_infinidat.py deleted file mode 100644 index 76cd068fb..000000000 --- a/cinder/tests/unit/volume/drivers/test_infinidat.py +++ /dev/null @@ -1,630 +0,0 @@ -# Copyright 2016 Infinidat Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for INFINIDAT InfiniBox volume driver.""" - -import mock -from oslo_utils import units - -from cinder import exception -from cinder import test -from cinder.volume import configuration -from cinder.volume.drivers import infinidat - - -TEST_WWN_1 = '00:11:22:33:44:55:66:77' -TEST_WWN_2 = '11:11:22:33:44:55:66:77' - -test_volume = mock.Mock(id=1, size=1, volume_type_id=1) -test_snapshot = mock.Mock(id=2, volume=test_volume, volume_id='1') -test_clone = mock.Mock(id=3, size=1) -test_group = mock.Mock(id=4) -test_snapgroup = mock.Mock(id=5, group=test_group) -test_connector = dict(wwpns=[TEST_WWN_1], - initiator='iqn.2012-07.org.fake:01') - - -class FakeInfinisdkException(Exception): - pass - - -class InfiniboxDriverTestCaseBase(test.TestCase): - def setUp(self): - super(InfiniboxDriverTestCaseBase, self).setUp() - - # create mock configuration - self.configuration = mock.Mock(spec=configuration.Configuration) - self.configuration.infinidat_storage_protocol = 'fc' - self.configuration.san_ip = 'mockbox' - self.configuration.infinidat_pool_name = 'mockpool' - self.configuration.san_thin_provision = 'thin' - self.configuration.san_login = 'user' - self.configuration.san_password = 'pass' - self.configuration.volume_backend_name = 'mock' - self.configuration.volume_dd_blocksize = '1M' - self.configuration.use_multipath_for_image_xfer = False - self.configuration.enforce_multipath_for_image_xfer = False - self.configuration.num_volume_device_scan_tries = 1 - self.configuration.san_is_local = False - self.configuration.chap_username = None - self.configuration.chap_password = None - self.configuration.infinidat_use_compression = None - - self.driver = infinidat.InfiniboxVolumeDriver( - configuration=self.configuration) - self._system = self._infinibox_mock() - # mock external library dependencies - infinisdk = self.patch("cinder.volume.drivers.infinidat.infinisdk") - capacity = self.patch("cinder.volume.drivers.infinidat.capacity") - self.patch("cinder.volume.drivers.infinidat.iqn") - self.patch("cinder.volume.drivers.infinidat.wwn") - capacity.byte = 1 - capacity.GiB = units.Gi - infinisdk.core.exceptions.InfiniSDKException = FakeInfinisdkException - infinisdk.InfiniBox.return_value = self._system - self.driver.do_setup(None) - - def _infinibox_mock(self): - result = mock.Mock() - self._mock_volume = mock.Mock() - self._mock_volume.has_children.return_value = False - self._mock_volume.get_logical_units.return_value = [] - self._mock_volume.create_child.return_value = self._mock_volume - self._mock_host = mock.Mock() - self._mock_host.get_luns.return_value = [] - self._mock_host.map_volume().get_lun.return_value = 1 - self._mock_pool = mock.Mock() - self._mock_pool.get_free_physical_capacity.return_value = units.Gi - self._mock_pool.get_physical_capacity.return_value = units.Gi - self._mock_ns = mock.Mock() - self._mock_ns.get_ips.return_value = [mock.Mock(ip_address='1.1.1.1')] - self._mock_group = mock.Mock() - self._mock_qos_policy = mock.Mock() - result.volumes.safe_get.return_value = self._mock_volume - result.volumes.create.return_value = self._mock_volume - result.pools.safe_get.return_value = self._mock_pool - result.hosts.safe_get.return_value = self._mock_host - result.cons_groups.safe_get.return_value = self._mock_group - result.cons_groups.create.return_value = self._mock_group - result.hosts.create.return_value = self._mock_host - result.network_spaces.safe_get.return_value = self._mock_ns - result.components.nodes.get_all.return_value = [] - result.qos_policies.create.return_value = self._mock_qos_policy - result.qos_policies.safe_get.return_value = None - return result - - def _raise_infinisdk(self, *args, **kwargs): - raise FakeInfinisdkException() - - -class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase): - def test_initialize_connection(self): - self._system.hosts.safe_get.return_value = None - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(1, result["data"]["target_lun"]) - - def test_initialize_connection_host_exists(self): - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(1, result["data"]["target_lun"]) - - def test_initialize_connection_mapping_exists(self): - mock_mapping = mock.Mock() - mock_mapping.get_volume.return_value = self._mock_volume - mock_mapping.get_lun.return_value = 888 - self._mock_host.get_luns.return_value = [mock_mapping] - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(888, result["data"]["target_lun"]) - - def test_initialize_connection_volume_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - self.assertRaises(exception.InvalidVolume, - self.driver.initialize_connection, - test_volume, test_connector) - - def test_initialize_connection_create_fails(self): - self._system.hosts.safe_get.return_value = None - self._system.hosts.create.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - test_volume, test_connector) - - def test_initialize_connection_map_fails(self): - self._mock_host.map_volume.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - test_volume, test_connector) - - def test_initialize_connection_metadata(self): - self._system.hosts.safe_get.return_value = None - self.driver.initialize_connection(test_volume, test_connector) - self._mock_host.set_metadata_from_dict.assert_called_once() - - def test_terminate_connection(self): - self.driver.terminate_connection(test_volume, test_connector) - - def test_terminate_connection_delete_host(self): - self._mock_host.get_luns.return_value = [object()] - self.driver.terminate_connection(test_volume, test_connector) - self.assertEqual(0, self._mock_host.safe_delete.call_count) - self._mock_host.get_luns.return_value = [] - self.driver.terminate_connection(test_volume, test_connector) - self.assertEqual(1, self._mock_host.safe_delete.call_count) - - def test_terminate_connection_volume_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - self.assertRaises(exception.InvalidVolume, - self.driver.terminate_connection, - test_volume, test_connector) - - def test_terminate_connection_api_fail(self): - self._mock_host.unmap_volume.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.terminate_connection, - test_volume, test_connector) - - def test_get_volume_stats_refreshes(self): - result = self.driver.get_volume_stats() - self.assertEqual(1, result["free_capacity_gb"]) - # change the "free space" in the pool - self._mock_pool.get_free_physical_capacity.return_value = 0 - # no refresh - free capacity should stay the same - result = self.driver.get_volume_stats(refresh=False) - self.assertEqual(1, result["free_capacity_gb"]) - # refresh - free capacity should change to 0 - result = self.driver.get_volume_stats(refresh=True) - self.assertEqual(0, result["free_capacity_gb"]) - - def test_get_volume_stats_pool_not_found(self): - self._system.pools.safe_get.return_value = None - self.assertRaises(exception.VolumeDriverException, - self.driver.get_volume_stats) - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume(self, *mocks): - self.driver.create_volume(test_volume) - - def test_create_volume_pool_not_found(self): - self._system.pools.safe_get.return_value = None - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume, test_volume) - - def test_create_volume_api_fail(self): - self._system.pools.safe_get.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, test_volume) - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_metadata(self, *mocks): - self.driver.create_volume(test_volume) - self._mock_volume.set_metadata_from_dict.assert_called_once() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_compression_enabled(self, *mocks): - self.configuration.infinidat_use_compression = True - self.driver.create_volume(test_volume) - self.assertTrue( - self._system.volumes.create.call_args[1]["compression_enabled"] - ) - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_compression_not_enabled(self, *mocks): - self.configuration.infinidat_use_compression = False - self.driver.create_volume(test_volume) - self.assertFalse( - self._system.volumes.create.call_args[1]["compression_enabled"] - ) - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_compression_not_available(self, *mocks): - self._system.compat.has_compression.return_value = False - self.driver.create_volume(test_volume) - self.assertNotIn( - "compression_enabled", - self._system.volumes.create.call_args[1] - ) - - def test_delete_volume(self): - self.driver.delete_volume(test_volume) - - def test_delete_volume_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - # should not raise an exception - self.driver.delete_volume(test_volume) - - def test_delete_volume_with_children(self): - self._mock_volume.has_children.return_value = True - self.assertRaises(exception.VolumeIsBusy, - self.driver.delete_volume, test_volume) - - def test_extend_volume(self): - self.driver.extend_volume(test_volume, 2) - - def test_extend_volume_api_fail(self): - self._mock_volume.resize.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, test_volume, 2) - - def test_create_snapshot(self): - self.driver.create_snapshot(test_snapshot) - - def test_create_snapshot_metadata(self): - self._mock_volume.create_snapshot.return_value = self._mock_volume - self.driver.create_snapshot(test_snapshot) - self._mock_volume.set_metadata_from_dict.assert_called_once() - - def test_create_snapshot_volume_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - self.assertRaises(exception.InvalidVolume, - self.driver.create_snapshot, test_snapshot) - - def test_create_snapshot_api_fail(self): - self._mock_volume.create_snapshot.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, test_snapshot) - - @mock.patch("cinder.volume.utils.copy_volume") - @mock.patch("cinder.utils.brick_get_connector") - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_from_snapshot(self, *mocks): - self.driver.create_volume_from_snapshot(test_clone, test_snapshot) - - def test_create_volume_from_snapshot_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - self.assertRaises(exception.InvalidSnapshot, - self.driver.create_volume_from_snapshot, - test_clone, test_snapshot) - - def test_create_volume_from_snapshot_create_fails(self): - self._mock_volume.create_child.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - test_clone, test_snapshot) - - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_volume_from_snapshot_map_fails(self, *mocks): - self._mock_host.map_volume.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - test_clone, test_snapshot) - - @mock.patch("cinder.volume.utils.copy_volume") - @mock.patch("cinder.utils.brick_get_connector") - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - def test_create_volume_from_snapshot_delete_clone_fails(self, *mocks): - self._mock_volume.delete.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - test_clone, test_snapshot) - - def test_delete_snapshot(self): - self.driver.delete_snapshot(test_snapshot) - - def test_delete_snapshot_doesnt_exist(self): - self._system.volumes.safe_get.return_value = None - # should not raise an exception - self.driver.delete_snapshot(test_snapshot) - - def test_delete_snapshot_api_fail(self): - self._mock_volume.safe_delete.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_snapshot, test_snapshot) - - @mock.patch("cinder.volume.utils.copy_volume") - @mock.patch("cinder.utils.brick_get_connector") - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_cloned_volume(self, *mocks): - self.driver.create_cloned_volume(test_clone, test_volume) - - def test_create_cloned_volume_volume_already_mapped(self): - mock_mapping = mock.Mock() - mock_mapping.get_volume.return_value = self._mock_volume - self._mock_volume.get_logical_units.return_value = [mock_mapping] - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - test_clone, test_volume) - - def test_create_cloned_volume_create_fails(self): - self._system.volumes.create.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - test_clone, test_volume) - - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_cloned_volume_map_fails(self, *mocks): - self._mock_host.map_volume.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - test_clone, test_volume) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group(self, *mocks): - self.driver.create_group(None, test_group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_metadata(self, *mocks): - self.driver.create_group(None, test_group) - self._mock_group.set_metadata_from_dict.assert_called_once() - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_twice(self, *mocks): - self.driver.create_group(None, test_group) - self.driver.create_group(None, test_group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_api_fail(self, *mocks): - self._system.cons_groups.create.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_group, - None, test_group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group(self, *mocks): - self.driver.delete_group(None, test_group, [test_volume]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_doesnt_exist(self, *mocks): - self._system.cons_groups.safe_get.return_value = None - self.driver.delete_group(None, test_group, [test_volume]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_api_fail(self, *mocks): - self._mock_group.safe_delete.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_group, - None, test_group, [test_volume]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_add_and_remove(self, *mocks): - self.driver.update_group(None, test_group, - [test_volume], [test_volume]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_update_group_api_fail(self, *mocks): - self._mock_group.add_member.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.update_group, - None, test_group, - [test_volume], [test_volume]) - - @mock.patch("cinder.volume.utils.copy_volume") - @mock.patch("cinder.utils.brick_get_connector") - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_group_from_src_snaps(self, *mocks): - self.driver.create_group_from_src(None, test_group, [test_volume], - test_snapgroup, [test_snapshot], - None, None) - - @mock.patch("cinder.volume.utils.copy_volume") - @mock.patch("cinder.utils.brick_get_connector") - @mock.patch("cinder.utils.brick_get_connector_properties", - return_value=test_connector) - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_create_group_from_src_vols(self, *mocks): - self.driver.create_group_from_src(None, test_group, [test_volume], - None, None, - test_group, [test_volume]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_snap(self, *mocks): - mock_snapgroup = mock.Mock() - mock_snapgroup.get_members.return_value = [self._mock_volume] - self._mock_volume.get_parent.return_value = self._mock_volume - self._mock_volume.get_name.return_value = '' - self._mock_group.create_snapshot.return_value = mock_snapgroup - self.driver.create_group_snapshot(None, - test_snapgroup, - [test_snapshot]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_create_group_snap_api_fail(self, *mocks): - self._mock_group.create_snapshot.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_group_snapshot, None, - test_snapgroup, [test_snapshot]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snap(self, *mocks): - self.driver.delete_group_snapshot(None, - test_snapgroup, - [test_snapshot]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snap_does_not_exist(self, *mocks): - self._system.cons_groups.safe_get.return_value = None - self.driver.delete_group_snapshot(None, - test_snapgroup, - [test_snapshot]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snap_invalid_group(self, *mocks): - self._mock_group.is_snapgroup.return_value = False - self.assertRaises(exception.InvalidGroupSnapshot, - self.driver.delete_group_snapshot, - None, test_snapgroup, [test_snapshot]) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type', - return_value=True) - def test_delete_group_snap_api_fail(self, *mocks): - self._mock_group.safe_delete.side_effect = self._raise_infinisdk - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_group_snapshot, - None, test_snapgroup, [test_snapshot]) - - -class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase): - def test_initialize_connection_multiple_wwpns(self): - connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]} - result = self.driver.initialize_connection(test_volume, connector) - self.assertEqual(1, result["data"]["target_lun"]) - - -class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase): - def setUp(self): - super(InfiniboxDriverTestCaseISCSI, self).setUp() - self.configuration.infinidat_storage_protocol = 'iscsi' - self.configuration.infinidat_iscsi_netspaces = ['netspace1'] - self.configuration.use_chap_auth = False - self.driver.do_setup(None) - - def test_setup_without_netspaces_configured(self): - self.configuration.infinidat_iscsi_netspaces = [] - self.assertRaises(exception.VolumeDriverException, - self.driver.do_setup, None) - - def test_initialize_connection(self): - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(1, result['data']['target_lun']) - - def test_initialize_netspace_does_not_exist(self): - self._system.network_spaces.safe_get.return_value = None - self.assertRaises(exception.VolumeDriverException, - self.driver.initialize_connection, - test_volume, test_connector) - - def test_initialize_netspace_has_no_ips(self): - self._mock_ns.get_ips.return_value = [] - self.assertRaises(exception.VolumeDriverException, - self.driver.initialize_connection, - test_volume, test_connector) - - def test_initialize_connection_with_chap(self): - self.configuration.use_chap_auth = True - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(1, result['data']['target_lun']) - self.assertEqual('CHAP', result['data']['auth_method']) - self.assertIn('auth_username', result['data']) - self.assertIn('auth_password', result['data']) - - def test_initialize_connection_multiple_netspaces(self): - self.configuration.infinidat_iscsi_netspaces = ['netspace1', - 'netspace2'] - result = self.driver.initialize_connection(test_volume, test_connector) - self.assertEqual(1, result['data']['target_lun']) - self.assertEqual(2, len(result['data']['target_luns'])) - self.assertEqual(2, len(result['data']['target_iqns'])) - self.assertEqual(2, len(result['data']['target_portals'])) - - def test_terminate_connection(self): - self.driver.terminate_connection(test_volume, test_connector) - - -class InfiniboxDriverTestCaseQoS(InfiniboxDriverTestCaseBase): - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_max_ipos(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': 1000, - 'maxBWS': None}}} - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_called_once() - self._mock_qos_policy.assign_entity.assert_called_once() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_max_bws(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': None, - 'maxBWS': 10000}}} - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_called_once() - self._mock_qos_policy.assign_entity.assert_called_once() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_no_compat(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': 1000, - 'maxBWS': 10000}}} - self._system.compat.has_qos.return_value = False - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_not_called() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_volume_type_id_none(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': 1000, - 'maxBWS': 10000}}} - test_volume = mock.Mock(id=1, size=1, volume_type_id=None) - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_not_called() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_no_specs(self, qos_specs): - qos_specs.return_value = {'qos_specs': None} - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_not_called() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_front_end(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'front-end', - 'specs': {'maxIOPS': 1000, - 'maxBWS': 10000}}} - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_not_called() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_specs_empty(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': None, - 'maxBWS': None}}} - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_not_called() - - @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") - def test_qos_policy_exists(self, qos_specs): - qos_specs.return_value = {'qos_specs': {'id': 'qos_name', - 'consumer': 'back-end', - 'specs': {'maxIOPS': 1000, - 'maxBWS': 10000}}} - self._system.qos_policies.safe_get.return_value = self._mock_qos_policy - self.driver.create_volume(test_volume) - self._system.qos_policies.create.assert_not_called() - self._mock_qos_policy.assign_entity.assert_called() diff --git a/cinder/tests/unit/volume/drivers/test_kaminario.py b/cinder/tests/unit/volume/drivers/test_kaminario.py deleted file mode 100644 index c20c98f73..000000000 --- a/cinder/tests/unit/volume/drivers/test_kaminario.py +++ /dev/null @@ -1,557 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for kaminario driver.""" -import mock -from oslo_utils import units -import time - -from cinder import context -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.kaminario import kaminario_common -from cinder.volume.drivers.kaminario import kaminario_fc -from cinder.volume.drivers.kaminario import kaminario_iscsi -from cinder.volume import utils as vol_utils - -CONNECTOR = {'initiator': 'iqn.1993-08.org.debian:01:12aa12aa12aa', - 'ip': '192.168.2.5', 'platform': 'x86_64', 'host': 'test-k2', - 'wwpns': ['12341a2a00001234', '12341a2a00001235'], - 'wwnns': ['12351a2a00001234', '12361a2a00001234'], - 'os_type': 'linux2', 'multipath': False} - - -class FakeK2Obj(object): - id = 548 - lun = 548 - - -class FakeSaveObject(FakeK2Obj): - def __init__(self, *args, **kwargs): - self.ntype = kwargs.get('ntype') - self.ip_address = '10.0.0.1' - self.iscsi_qualified_target_name = "xyztlnxyz" - self.snapshot = FakeK2Obj() - self.name = 'test' - self.pwwn = '50024f4053300300' - self.volume_group = self - self.is_dedup = True - self.size = units.Mi - self.replication_status = None - self.state = 'in_sync' - self.generation_number = 548 - self.current_role = 'target' - self.current_snapshot_progress = 100 - self.current_snapshot_id = None - self.wan_port = None - - def refresh(self): - return - - def save(self): - return FakeSaveObject() - - def delete(self): - return None - - -class FakeSaveObjectExp(FakeSaveObject): - def save(self): - raise exception.KaminarioCinderDriverException("test") - - def delete(self): - raise exception.KaminarioCinderDriverException("test") - - -class FakeSearchObject(object): - hits = [FakeSaveObject()] - total = 1 - - def __init__(self, *args): - if args and "mappings" in args[0]: - self.total = 0 - - -class FakeSearchObjectExp(object): - hits = [FakeSaveObjectExp()] - total = 1 - - -class FakeKrest(object): - def search(self, *args, **argv): - return FakeSearchObject(*args) - - def new(self, *args, **argv): - return FakeSaveObject() - - -class FakeKrestException(object): - def search(self, *args, **argv): - return FakeSearchObjectExp() - - def new(self, *args, **argv): - return FakeSaveObjectExp() - - -class Replication(object): - backend_id = '10.0.0.1' - login = 'login' - password = 'password' - rpo = 500 - - -class TestKaminarioISCSI(test.TestCase): - driver = None - conf = None - - def setUp(self): - self._setup_config() - self._setup_driver() - super(TestKaminarioISCSI, self).setUp() - self.context = context.get_admin_context() - self.vol = fake_volume.fake_volume_obj(self.context) - self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) - self.vol.volume_type.extra_specs = {'foo': None} - self.snap = fake_snapshot.fake_snapshot_obj(self.context) - self.snap.volume = self.vol - self.patch('eventlet.sleep') - - def _setup_config(self): - self.conf = mock.Mock(spec=configuration.Configuration) - self.conf.kaminario_dedup_type_name = "dedup" - self.conf.volume_dd_blocksize = 2 - - def _setup_driver(self): - self.driver = (kaminario_iscsi. - KaminarioISCSIDriver(configuration=self.conf)) - device = mock.Mock(return_value={'device': {'path': '/dev'}}) - self.driver._connect_device = device - self.driver.client = FakeKrest() - - def test_create_volume(self): - """Test create_volume.""" - result = self.driver.create_volume(self.vol) - self.assertIsNone(result) - - def test_create_volume_with_exception(self): - """Test create_volume_with_exception.""" - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.create_volume, self.vol) - - def test_delete_volume(self): - """Test delete_volume.""" - result = self.driver.delete_volume(self.vol) - self.assertIsNone(result) - - def test_delete_volume_with_exception(self): - """Test delete_volume_with_exception.""" - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.delete_volume, self.vol) - - def test_create_snapshot(self): - """Test create_snapshot.""" - self.snap.id = "253b2878-ec60-4793-ad19-e65496ec7aab" - self.driver.client.new = mock.Mock() - result = self.driver.create_snapshot(self.snap) - self.assertIsNone(result) - fake_object = self.driver.client.search().hits[0] - self.driver.client.new.assert_called_once_with( - "snapshots", - short_name='cs-253b2878-ec60-4793-ad19-e65496ec7aab', - source=fake_object, retention_policy=fake_object, - is_auto_deleteable=False) - - def test_create_snapshot_with_exception(self): - """Test create_snapshot_with_exception.""" - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.create_snapshot, self.snap) - - def test_delete_snapshot(self): - """Test delete_snapshot.""" - result = self.driver.delete_snapshot(self.snap) - self.assertIsNone(result) - - def test_delete_snapshot_with_exception(self): - """Test delete_snapshot_with_exception.""" - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.delete_snapshot, self.snap) - - @mock.patch.object(utils, 'brick_get_connector_properties') - @mock.patch.object(vol_utils, 'copy_volume') - def test_create_volume_from_snapshot(self, mock_copy_volume, - mock_brick_get): - """Test create_volume_from_snapshot.""" - mock_brick_get.return_value = CONNECTOR - mock_copy_volume.return_value = None - self.driver._kaminario_disconnect_volume = mock.Mock() - result = self.driver.create_volume_from_snapshot(self.vol, self.snap) - self.assertIsNone(result) - - @mock.patch.object(utils, 'brick_get_connector_properties') - @mock.patch.object(vol_utils, 'copy_volume') - def test_create_volume_from_snapshot_with_exception(self, mock_copy_volume, - mock_brick_get): - """Test create_volume_from_snapshot_with_exception.""" - mock_brick_get.return_value = CONNECTOR - mock_copy_volume.return_value = None - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.create_volume_from_snapshot, self.vol, - self.snap) - - @mock.patch.object(utils, 'brick_get_connector_properties') - @mock.patch.object(vol_utils, 'copy_volume') - def test_create_cloned_volume(self, mock_copy_volume, mock_brick_get): - """Test create_cloned_volume.""" - mock_brick_get.return_value = CONNECTOR - mock_copy_volume.return_value = None - self.driver._kaminario_disconnect_volume = mock.Mock() - result = self.driver.create_cloned_volume(self.vol, self.vol) - self.assertIsNone(result) - - @mock.patch.object(utils, 'brick_get_connector_properties') - @mock.patch.object(vol_utils, 'copy_volume') - def test_create_cloned_volume_with_exception(self, mock_copy_volume, - mock_brick_get): - """Test create_cloned_volume_with_exception.""" - mock_brick_get.return_value = CONNECTOR - mock_copy_volume.return_value = None - self.driver.terminate_connection = mock.Mock() - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.create_cloned_volume, self.vol, self.vol) - - def test_extend_volume(self): - """Test extend_volume.""" - new_size = 256 - result = self.driver.extend_volume(self.vol, new_size) - self.assertIsNone(result) - - def test_extend_volume_with_exception(self): - """Test extend_volume_with_exception.""" - self.driver.client = FakeKrestException() - new_size = 256 - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.extend_volume, self.vol, new_size) - - def test_initialize_connection(self): - """Test initialize_connection.""" - conn_info = self.driver.initialize_connection(self.vol, CONNECTOR) - self.assertIn('data', conn_info) - self.assertIn('target_iqn', conn_info['data']) - - def test_initialize_connection_with_exception(self): - """Test initialize_connection_with_exception.""" - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver.initialize_connection, self.vol, - CONNECTOR) - - def test_terminate_connection(self): - """Test terminate_connection.""" - result = self.driver.terminate_connection(self.vol, CONNECTOR) - self.assertIsNone(result) - - def test_get_lun_number(self): - """Test _get_lun_number.""" - host, host_rs, host_name = self.driver._get_host_object(CONNECTOR) - result = self.driver._get_lun_number(self.vol, host) - self.assertEqual(548, result) - - def test_get_volume_object(self): - """Test _get_volume_object.""" - result = self.driver._get_volume_object(self.vol) - self.assertEqual(548, result.id) - - def test_get_host_object(self): - """Test _get_host_object.""" - host, host_rs, host_name = self.driver._get_host_object(CONNECTOR) - self.assertEqual(548, host.id) - self.assertEqual(1, host_rs.total) - self.assertEqual('test-k2', host_name) - - def test_get_target_info(self): - """Test get_target_info.""" - iscsi_portal, target_iqn = self.driver.get_target_info(self.vol) - self.assertEqual('10.0.0.1:3260', iscsi_portal) - self.assertEqual('xyztlnxyz', target_iqn) - - def test_k2_initialize_connection(self): - """Test k2_initialize_connection.""" - result = self.driver.k2_initialize_connection(self.vol, CONNECTOR) - self.assertEqual(548, result) - - def test_manage_existing(self): - """Test manage_existing.""" - self.driver._get_replica_status = mock.Mock(return_value=False) - result = self.driver.manage_existing(self.vol, {'source-name': 'test'}) - self.assertIsNone(result) - - def test_manage_existing_exp(self): - self.driver._get_replica_status = mock.Mock(return_value=True) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.vol, - {'source-name': 'test'}) - - def test_manage_vg_volumes(self): - self.driver.nvol = 2 - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, self.vol, - {'source-name': 'test'}) - - def test_manage_existing_get_size(self): - """Test manage_existing_get_size.""" - self.driver.client.search().hits[0].size = units.Mi - result = self.driver.manage_existing_get_size(self.vol, - {'source-name': 'test'}) - self.assertEqual(1, result) - - def test_get_is_dedup(self): - """Test _get_is_dedup.""" - result = self.driver._get_is_dedup(self.vol.volume_type) - self.assertTrue(result) - - def test_get_is_dedup_false(self): - """Test _get_is_dedup_false.""" - specs = {'kaminario:thin_prov_type': 'nodedup'} - self.vol.volume_type.extra_specs = specs - result = self.driver._get_is_dedup(self.vol.volume_type) - self.assertFalse(result) - - def test_get_replica_status(self): - """Test _get_replica_status.""" - result = self.driver._get_replica_status(self.vol) - self.assertTrue(result) - - def test_create_volume_replica(self): - """Test _create_volume_replica.""" - vg = FakeSaveObject() - rep = Replication() - self.driver.replica = rep - session_name = self.driver.get_session_name('1234567890987654321') - self.assertEqual('ssn-1234567890987654321', session_name) - rsession_name = self.driver.get_rep_name(session_name) - self.assertEqual('rssn-1234567890987654321', rsession_name) - src_ssn = self.driver.client.new("replication/sessions").save() - self.assertEqual('in_sync', src_ssn.state) - result = self.driver._create_volume_replica(self.vol, vg, vg, rep.rpo) - self.assertIsNone(result) - - def test_create_volume_replica_exp(self): - """Test _create_volume_replica_exp.""" - vg = FakeSaveObject() - rep = Replication() - self.driver.replica = rep - self.driver.client = FakeKrestException() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver._create_volume_replica, self.vol, - vg, vg, rep.rpo) - - def test_delete_by_ref(self): - """Test _delete_by_ref.""" - result = self.driver._delete_by_ref(self.driver.client, 'volume', - 'name', 'message') - self.assertIsNone(result) - - def test_failover_volume(self): - """Test _failover_volume.""" - self.driver.target = FakeKrest() - session_name = self.driver.get_session_name('1234567890987654321') - self.assertEqual('ssn-1234567890987654321', session_name) - rsession_name = self.driver.get_rep_name(session_name) - self.assertEqual('rssn-1234567890987654321', rsession_name) - result = self.driver._failover_volume(self.vol) - self.assertIsNone(result) - - @mock.patch.object(kaminario_common.KaminarioCinderDriver, - '_check_for_status') - @mock.patch.object(objects.service.Service, 'get_by_args') - def test_failover_host(self, get_by_args, check_stauts): - """Test failover_host.""" - mock_args = mock.Mock() - mock_args.active_backend_id = '10.0.0.1' - self.vol.replication_status = 'failed-over' - self.driver.configuration.san_ip = '10.0.0.1' - get_by_args.side_effect = [mock_args, mock_args] - self.driver.host = 'host' - volumes = [self.vol, self.vol] - self.driver.replica = Replication() - self.driver.target = FakeKrest() - self.driver.target.search().total = 1 - self.driver.client.search().total = 1 - backend_ip, res_volumes, __ = self.driver.failover_host( - None, volumes, []) - self.assertEqual('10.0.0.1', backend_ip) - status = res_volumes[0]['updates']['replication_status'] - self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status) - # different backend ip - self.driver.configuration.san_ip = '10.0.0.2' - self.driver.client.search().hits[0].state = 'in_sync' - backend_ip, res_volumes, __ = self.driver.failover_host( - None, volumes, []) - self.assertEqual('10.0.0.2', backend_ip) - status = res_volumes[0]['updates']['replication_status'] - self.assertEqual(fields.ReplicationStatus.DISABLED, status) - - def test_delete_volume_replica(self): - """Test _delete_volume_replica.""" - self.driver.replica = Replication() - self.driver.target = FakeKrest() - session_name = self.driver.get_session_name('1234567890987654321') - self.assertEqual('ssn-1234567890987654321', session_name) - rsession_name = self.driver.get_rep_name(session_name) - self.assertEqual('rssn-1234567890987654321', rsession_name) - res = self.driver._delete_by_ref(self.driver.client, 'volumes', - 'test', 'test') - self.assertIsNone(res) - result = self.driver._delete_volume_replica(self.vol, 'test', 'test') - self.assertIsNone(result) - src_ssn = self.driver.client.search("replication/sessions").hits[0] - self.assertEqual('idle', src_ssn.state) - - def test_delete_volume_replica_exp(self): - """Test _delete_volume_replica_exp.""" - self.driver.replica = Replication() - self.driver.target = FakeKrestException() - self.driver._check_for_status = mock.Mock() - self.assertRaises(exception.KaminarioCinderDriverException, - self.driver._delete_volume_replica, self.vol, - 'test', 'test') - - def test_get_is_replica(self): - """Test get_is_replica.""" - result = self.driver._get_is_replica(self.vol.volume_type) - self.assertFalse(result) - - def test_get_is_replica_true(self): - """Test get_is_replica_true.""" - self.driver.replica = Replication() - self.vol.volume_type.extra_specs = {'kaminario:replication': 'enabled'} - result = self.driver._get_is_replica(self.vol.volume_type) - self.assertTrue(result) - - def test_after_volume_copy(self): - """Test after_volume_copy.""" - result = self.driver.after_volume_copy(None, self.vol, - self.vol.volume_type) - self.assertIsNone(result) - - def test_retype(self): - """Test retype.""" - replica_status = self.driver._get_replica_status('test') - self.assertTrue(replica_status) - replica = self.driver._get_is_replica(self.vol.volume_type) - self.assertFalse(replica) - self.driver.replica = Replication() - result = self.driver._add_replication(self.vol) - self.assertIsNone(result) - self.driver.target = FakeKrest() - self.driver._check_for_status = mock.Mock() - result = self.driver._delete_replication(self.vol) - self.assertIsNone(result) - self.driver._delete_volume_replica = mock.Mock() - result = self.driver.retype(None, self.vol, - self.vol.volume_type, None, None) - self.assertTrue(result) - new_vol_type = fake_volume.fake_volume_type_obj(self.context) - new_vol_type.extra_specs = {'kaminario:thin_prov_type': 'nodedup'} - result2 = self.driver.retype(None, self.vol, - new_vol_type, None, None) - self.assertFalse(result2) - - def test_add_replication(self): - """"Test _add_replication.""" - self.driver.replica = Replication() - result = self.driver._add_replication(self.vol) - self.assertIsNone(result) - - def test_delete_replication(self): - """Test _delete_replication.""" - self.driver.replica = Replication() - self.driver.target = FakeKrest() - self.driver._check_for_status = mock.Mock() - result = self.driver._delete_replication(self.vol) - self.assertIsNone(result) - - def test_create_failover_volume_replica(self): - """Test _create_failover_volume_replica.""" - self.driver.replica = Replication() - self.driver.target = FakeKrest() - self.driver.configuration.san_ip = '10.0.0.1' - result = self.driver._create_failover_volume_replica(self.vol, - 'test', 'test') - self.assertIsNone(result) - - def test_create_volume_replica_user_snap(self): - """Test create_volume_replica_user_snap.""" - result = self.driver._create_volume_replica_user_snap(FakeKrest(), - 'sess') - self.assertEqual(548, result) - - def test_is_user_snap_sync_finished(self): - """Test _is_user_snap_sync_finished.""" - sess_mock = mock.Mock() - sess_mock.refresh = mock.Mock() - sess_mock.generation_number = 548 - sess_mock.current_snapshot_id = None - sess_mock.current_snapshot_progress = 100 - sess_mock.current_snapshot_id = None - self.driver.snap_updates = [{'tgt_ssn': sess_mock, 'gno': 548, - 'stime': time.time()}] - result = self.driver._is_user_snap_sync_finished() - self.assertIsNone(result) - - def test_delete_failover_volume_replica(self): - """Test _delete_failover_volume_replica.""" - self.driver.target = FakeKrest() - result = self.driver._delete_failover_volume_replica(self.vol, 'test', - 'test') - self.assertIsNone(result) - - -class TestKaminarioFC(TestKaminarioISCSI): - - def _setup_driver(self): - self.driver = (kaminario_fc. - KaminarioFCDriver(configuration=self.conf)) - device = mock.Mock(return_value={'device': {'path': '/dev'}}) - self.driver._connect_device = device - self.driver.client = FakeKrest() - self.driver._lookup_service = mock.Mock() - - def test_initialize_connection(self): - """Test initialize_connection.""" - conn_info = self.driver.initialize_connection(self.vol, CONNECTOR) - self.assertIn('data', conn_info) - self.assertIn('target_wwn', conn_info['data']) - - def test_get_target_info(self): - """Test get_target_info.""" - target_wwpn = self.driver.get_target_info(self.vol) - self.assertEqual(['50024f4053300300'], target_wwpn) - - def test_terminate_connection(self): - """Test terminate_connection.""" - result = self.driver.terminate_connection(self.vol, CONNECTOR) - self.assertIn('data', result) diff --git a/cinder/tests/unit/volume/drivers/test_lvm_driver.py b/cinder/tests/unit/volume/drivers/test_lvm_driver.py deleted file mode 100644 index 96ceff4f6..000000000 --- a/cinder/tests/unit/volume/drivers/test_lvm_driver.py +++ /dev/null @@ -1,1000 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import os -import socket - -import mock -from oslo_concurrency import processutils -from oslo_config import cfg - -from cinder.brick.local_dev import lvm as brick_lvm -from cinder import db -from cinder import exception -from cinder.objects import fields -from cinder.tests import fake_driver -from cinder.tests.unit.brick import fake_lvm -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit.volume import test_driver -from cinder.volume import configuration as conf -from cinder.volume.drivers import lvm -import cinder.volume.utils -from cinder.volume import utils as volutils - -CONF = cfg.CONF - -fake_opt = [ - cfg.StrOpt('fake_opt1', default='fake', help='fake opts') -] - - -@ddt.ddt -class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase): - """Test case for VolumeDriver""" - driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" - FAKE_VOLUME = {'name': 'test1', - 'id': 'test1'} - - @mock.patch.object(os.path, 'exists', return_value=True) - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') - def test_delete_volume_invalid_parameter(self, _mock_create_export, - mock_exists): - self.configuration.volume_clear = 'zero' - self.configuration.volume_clear_size = 0 - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - db=db) - # Test volume without 'size' field and 'volume_size' field - self.assertRaises(exception.InvalidParameterValue, - lvm_driver._delete_volume, - self.FAKE_VOLUME) - - @mock.patch.object(os.path, 'exists', return_value=False) - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') - def test_delete_volume_bad_path(self, _mock_create_export, mock_exists): - self.configuration.volume_clear = 'zero' - self.configuration.volume_clear_size = 0 - self.configuration.volume_type = 'default' - - volume = dict(self.FAKE_VOLUME, size=1) - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - db=db) - - self.assertRaises(exception.VolumeBackendAPIException, - lvm_driver._delete_volume, volume) - - @mock.patch.object(volutils, 'clear_volume') - @mock.patch.object(volutils, 'copy_volume') - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') - def test_delete_volume_thinlvm_snap(self, _mock_create_export, - mock_copy, mock_clear): - vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - self.configuration.volume_clear = 'zero' - self.configuration.volume_clear_size = 0 - self.configuration.lvm_type = 'thin' - self.configuration.iscsi_helper = 'tgtadm' - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - vg_obj=vg_obj, db=db) - - uuid = '00000000-0000-0000-0000-c3aa7ee01536' - - fake_snapshot = {'name': 'volume-' + uuid, - 'id': uuid, - 'size': 123} - lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) - - @mock.patch.object(volutils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', - return_value=(2, 2, 100)) - def test_check_for_setup_error(self, _mock_get_version, vgs): - vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'auto') - - configuration = conf.Configuration(fake_opt, 'fake_group') - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, - vg_obj=vg_obj, db=db) - - lvm_driver.delete_snapshot = mock.Mock() - - volume = tests_utils.create_volume(self.context, - host=socket.gethostname()) - volume_id = volume['id'] - - backup = {} - backup['volume_id'] = volume_id - backup['user_id'] = fake.USER_ID - backup['project_id'] = fake.PROJECT_ID - backup['host'] = socket.gethostname() - backup['availability_zone'] = '1' - backup['display_name'] = 'test_check_for_setup_error' - backup['display_description'] = 'test_check_for_setup_error' - backup['container'] = 'fake' - backup['status'] = fields.BackupStatus.CREATING - backup['fail_reason'] = '' - backup['service'] = 'fake' - backup['parent_id'] = None - backup['size'] = 5 * 1024 * 1024 - backup['object_count'] = 22 - db.backup_create(self.context, backup) - - lvm_driver.check_for_setup_error() - - def test_retype_volume(self): - vol = tests_utils.create_volume(self.context) - new_type = fake.VOLUME_TYPE_ID - diff = {} - host = 'fake_host' - retyped = self.volume.driver.retype(self.context, vol, new_type, - diff, host) - self.assertTrue(retyped) - - def test_update_migrated_volume(self): - fake_volume_id = fake.VOLUME_ID - fake_new_volume_id = fake.VOLUME2_ID - fake_provider = 'fake_provider' - original_volume_name = CONF.volume_name_template % fake_volume_id - current_name = CONF.volume_name_template % fake_new_volume_id - fake_volume = tests_utils.create_volume(self.context) - fake_volume['id'] = fake_volume_id - fake_new_volume = tests_utils.create_volume(self.context) - fake_new_volume['id'] = fake_new_volume_id - fake_new_volume['provider_location'] = fake_provider - fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, - None, 'default') - with mock.patch.object(self.volume.driver, 'vg') as vg: - vg.return_value = fake_vg - vg.rename_volume.return_value = None - update = self.volume.driver.update_migrated_volume(self.context, - fake_volume, - fake_new_volume, - 'available') - vg.rename_volume.assert_called_once_with(current_name, - original_volume_name) - self.assertEqual({'_name_id': None, - 'provider_location': None}, update) - - vg.rename_volume.reset_mock() - vg.rename_volume.side_effect = processutils.ProcessExecutionError - update = self.volume.driver.update_migrated_volume(self.context, - fake_volume, - fake_new_volume, - 'available') - vg.rename_volume.assert_called_once_with(current_name, - original_volume_name) - self.assertEqual({'_name_id': fake_new_volume_id, - 'provider_location': fake_provider}, - update) - - def test_create_volume_from_snapshot_none_sparse(self): - - with mock.patch.object(self.volume.driver, 'vg'), \ - mock.patch.object(self.volume.driver, '_create_volume'), \ - mock.patch.object(volutils, 'copy_volume') as mock_copy: - - # Test case for thick LVM - src_volume = tests_utils.create_volume(self.context) - snapshot_ref = tests_utils.create_snapshot(self.context, - src_volume['id']) - dst_volume = tests_utils.create_volume(self.context) - self.volume.driver.create_volume_from_snapshot(dst_volume, - snapshot_ref) - - volume_path = self.volume.driver.local_path(dst_volume) - snapshot_path = self.volume.driver.local_path(snapshot_ref) - volume_size = 1024 - block_size = '1M' - mock_copy.assert_called_with(snapshot_path, - volume_path, - volume_size, - block_size, - execute=self.volume.driver._execute, - sparse=False) - - def test_create_volume_from_snapshot_sparse(self): - - self.configuration.lvm_type = 'thin' - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - db=db) - - with mock.patch.object(lvm_driver, 'vg'), \ - mock.patch.object(lvm_driver, '_create_volume'), \ - mock.patch.object(volutils, 'copy_volume') as mock_copy: - - # Test case for thin LVM - lvm_driver._sparse_copy_volume = True - src_volume = tests_utils.create_volume(self.context) - snapshot_ref = tests_utils.create_snapshot(self.context, - src_volume['id']) - dst_volume = tests_utils.create_volume(self.context) - lvm_driver.create_volume_from_snapshot(dst_volume, - snapshot_ref) - - volume_path = lvm_driver.local_path(dst_volume) - snapshot_path = lvm_driver.local_path(snapshot_ref) - volume_size = 1024 - block_size = '1M' - mock_copy.assert_called_with(snapshot_path, - volume_path, - volume_size, - block_size, - execute=lvm_driver._execute, - sparse=True) - - @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') - @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', - return_value=True) - def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks): - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.lvm_type = 'auto' - - vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, - vg_obj=vg_obj) - - lvm_driver.check_for_setup_error() - - self.assertEqual('thin', lvm_driver.configuration.lvm_type) - - @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - @mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes', - return_value=[]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') - @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', - return_value=True) - def test_lvm_type_auto_no_lvs(self, *_unused_mocks): - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.lvm_type = 'auto' - - vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, - vg_obj=vg_obj) - - lvm_driver.check_for_setup_error() - - self.assertEqual('thin', lvm_driver.configuration.lvm_type) - - @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') - @mock.patch('cinder.brick.local_dev.lvm.LVM.' - 'supports_lvchange_ignoreskipactivation') - @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') - @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', - return_value=False) - def test_lvm_type_auto_no_thin_support(self, *_unused_mocks): - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.lvm_type = 'auto' - - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) - - lvm_driver.check_for_setup_error() - - self.assertEqual('default', lvm_driver.configuration.lvm_type) - - @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') - @mock.patch('cinder.brick.local_dev.lvm.LVM.' - 'supports_lvchange_ignoreskipactivation') - @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume') - @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', - return_value=False) - def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks): - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.lvm_type = 'auto' - - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) - - lvm_driver.check_for_setup_error() - - self.assertEqual('default', lvm_driver.configuration.lvm_type) - - @mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume') - def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): - self.configuration.lvm_type = 'thin' - fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, - None, 'default')) - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - vg_obj=fake_vg, - db=db) - fake_volume = tests_utils.create_volume(self.context, size=1) - fake_new_volume = tests_utils.create_volume(self.context, size=2) - - lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) - fake_vg.create_lv_snapshot.assert_called_once_with( - fake_new_volume['name'], fake_volume['name'], 'thin') - mock_extend.assert_called_once_with(fake_new_volume, 2) - fake_vg.activate_lv.assert_called_once_with( - fake_new_volume['name'], is_snapshot=True, permanent=True) - - def test_lvm_migrate_volume_no_loc_info(self): - host = {'capabilities': {}} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - def test_lvm_migrate_volume_bad_loc_info(self): - capabilities = {'location_info': 'foo'} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - def test_lvm_migrate_volume_diff_driver(self): - capabilities = {'location_info': 'FooDriver:foo:bar:default:0'} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - def test_lvm_migrate_volume_diff_host(self): - capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - def test_lvm_migrate_volume_in_use(self): - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - @mock.patch.object(volutils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - def test_lvm_migrate_volume_same_volume_group(self, vgs): - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:' - 'cinder-volumes:default:0' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - self.assertRaises(exception.VolumeBackendAPIException, - self.volume.driver.migrate_volume, self.context, - vol, host) - - @mock.patch.object(lvm.LVMVolumeDriver, '_create_volume') - @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') - @mock.patch.object(brick_lvm.LVM, 'delete') - @mock.patch.object(volutils, 'copy_volume', - side_effect=processutils.ProcessExecutionError) - @mock.patch.object(volutils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes'}]) - def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume, - mock_delete, mock_pvs, - mock_create): - - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:' - 'cinder-volumes:default:0' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old', - False, None, 'default') - self.assertRaises(processutils.ProcessExecutionError, - self.volume.driver.migrate_volume, self.context, - vol, host) - mock_delete.assert_called_once_with(vol) - - @mock.patch.object(volutils, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes-2'}]) - def test_lvm_volume_group_missing(self, vgs): - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:' - 'cinder-volumes-3:default:0' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} - - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - moved, model_update = self.volume.driver.migrate_volume(self.context, - vol, host) - self.assertFalse(moved) - self.assertIsNone(model_update) - - def test_lvm_migrate_volume_proceed(self): - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:' - 'cinder-volumes-2:default:0' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} - - def fake_execute(*args, **kwargs): - pass - - def get_all_volume_groups(): - # NOTE(flaper87) Return just the destination - # host to test the check of dest VG existence. - return [{'name': 'cinder-volumes-2'}] - - def _fake_get_all_physical_volumes(obj, root_helper, vg_name): - return [{}] - - with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', - return_value = [{}]), \ - mock.patch.object(self.volume.driver, '_execute') \ - as mock_execute, \ - mock.patch.object(volutils, 'copy_volume') as mock_copy, \ - mock.patch.object(volutils, 'get_all_volume_groups', - side_effect = get_all_volume_groups), \ - mock.patch.object(self.volume.driver, '_delete_volume'): - - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - mock_execute.return_value = ("mock_outs", "mock_errs") - moved, model_update = \ - self.volume.driver.migrate_volume(self.context, vol, host) - self.assertTrue(moved) - self.assertIsNone(model_update) - mock_copy.assert_called_once_with( - '/dev/mapper/cinder--volumes-testvol', - '/dev/mapper/cinder--volumes--2-testvol', - 2048, - '1M', - execute=mock_execute, - sparse=False) - - def test_lvm_migrate_volume_proceed_with_thin(self): - hostname = socket.gethostname() - capabilities = {'location_info': 'LVMVolumeDriver:%s:' - 'cinder-volumes-2:default:0' % hostname} - host = {'capabilities': capabilities} - vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} - - def fake_execute(*args, **kwargs): - pass - - def get_all_volume_groups(): - # NOTE(flaper87) Return just the destination - # host to test the check of dest VG existence. - return [{'name': 'cinder-volumes-2'}] - - def _fake_get_all_physical_volumes(obj, root_helper, vg_name): - return [{}] - - self.configuration.lvm_type = 'thin' - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - db=db) - - with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', - return_value = [{}]), \ - mock.patch.object(lvm_driver, '_execute') \ - as mock_execute, \ - mock.patch.object(volutils, 'copy_volume') as mock_copy, \ - mock.patch.object(volutils, 'get_all_volume_groups', - side_effect = get_all_volume_groups), \ - mock.patch.object(lvm_driver, '_delete_volume'): - - lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - lvm_driver._sparse_copy_volume = True - mock_execute.return_value = ("mock_outs", "mock_errs") - moved, model_update = \ - lvm_driver.migrate_volume(self.context, vol, host) - self.assertTrue(moved) - self.assertIsNone(model_update) - mock_copy.assert_called_once_with( - '/dev/mapper/cinder--volumes-testvol', - '/dev/mapper/cinder--volumes--2-testvol', - 2048, - '1M', - execute=mock_execute, - sparse=True) - - @staticmethod - def _get_manage_existing_lvs(name): - """Helper method used by the manage_existing tests below.""" - lvs = [{'name': 'fake_lv', 'size': '1.75'}, - {'name': 'fake_lv_bad_size', 'size': 'Not a float'}] - for lv in lvs: - if lv['name'] == name: - return lv - - def _setup_stubs_for_manage_existing(self): - """Helper to set up common stubs for the manage_existing tests.""" - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - @mock.patch.object(db.sqlalchemy.api, 'volume_get', - side_effect=exception.VolumeNotFound( - volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) - def test_lvm_manage_existing_not_found(self, mock_vol_get): - self._setup_stubs_for_manage_existing() - - vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - ref = {'source-name': 'fake_lv'} - vol = {'name': vol_name, 'id': fake.VOLUME_ID, 'size': 0} - - with mock.patch.object(self.volume.driver.vg, 'rename_volume'): - model_update = self.volume.driver.manage_existing(vol, ref) - self.assertIsNone(model_update) - - @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) - def test_lvm_manage_existing_already_managed(self, exists_mock): - self._setup_stubs_for_manage_existing() - - vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' - ref = {'source-name': vol_name} - vol = {'name': 'test', 'id': 1, 'size': 0} - - with mock.patch.object(self.volume.driver.vg, 'rename_volume'): - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.volume.driver.manage_existing, - vol, ref) - - def test_lvm_manage_existing(self): - """Good pass on managing an LVM volume. - - This test case ensures that, when a logical volume with the - specified name exists, and the size is as expected, no error is - returned from driver.manage_existing, and that the rename_volume - function is called in the Brick LVM code with the correct arguments. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_lv'} - vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 0} - - def _rename_volume(old_name, new_name): - self.assertEqual(ref['source-name'], old_name) - self.assertEqual(vol['name'], new_name) - - with mock.patch.object(self.volume.driver.vg, - 'rename_volume') as mock_rename_volume, \ - mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - mock_rename_volume.return_value = _rename_volume - size = self.volume.driver.manage_existing_get_size(vol, ref) - self.assertEqual(2, size) - model_update = self.volume.driver.manage_existing(vol, ref) - self.assertIsNone(model_update) - - def test_lvm_manage_existing_bad_size(self): - """Make sure correct exception on bad size returned from LVM. - - This test case ensures that the correct exception is raised when - the information returned for the existing LVs is not in the format - that the manage_existing code expects. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_lv_bad_size'} - vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 2} - - with mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - self.assertRaises(exception.VolumeBackendAPIException, - self.volume.driver.manage_existing_get_size, - vol, ref) - - def test_lvm_manage_existing_bad_ref(self): - """Error case where specified LV doesn't exist. - - This test case ensures that the correct exception is raised when - the caller attempts to manage a volume that does not exist. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_nonexistent_lv'} - vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'} - - with mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - self.assertRaises(exception.ManageExistingInvalidReference, - self.volume.driver.manage_existing_get_size, - vol, ref) - - def test_lvm_manage_existing_snapshot(self): - """Good pass on managing an LVM snapshot. - - This test case ensures that, when a logical volume's snapshot with the - specified name exists, and the size is as expected, no error is - returned from driver.manage_existing_snapshot, and that the - rename_volume function is called in the Brick LVM code with the correct - arguments. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_lv'} - snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0} - - def _rename_volume(old_name, new_name): - self.assertEqual(ref['source-name'], old_name) - self.assertEqual(snp['name'], new_name) - - with mock.patch.object(self.volume.driver.vg, - 'rename_volume') as mock_rename_volume, \ - mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - mock_rename_volume.return_value = _rename_volume - size = self.volume.driver.manage_existing_snapshot_get_size( - snp, ref) - self.assertEqual(2, size) - model_update = self.volume.driver.manage_existing_snapshot( - snp, ref) - self.assertIsNone(model_update) - - def test_lvm_manage_existing_snapshot_bad_ref(self): - """Error case where specified LV snapshot doesn't exist. - - This test case ensures that the correct exception is raised when - the caller attempts to manage a snapshot that does not exist. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_nonexistent_lv'} - snp = { - 'name': 'test', - 'id': fake.SNAPSHOT_ID, - 'size': 0, - 'status': 'available', - } - with mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - self.assertRaises( - exception.ManageExistingInvalidReference, - self.volume.driver.manage_existing_snapshot_get_size, - snp, ref) - - def test_revert_snapshot(self): - self._setup_stubs_for_manage_existing() - self.configuration.lvm_type = 'auto' - fake_volume = tests_utils.create_volume(self.context, - display_name='fake_volume') - fake_snapshot = tests_utils.create_snapshot( - self.context, fake_volume.id) - - with mock.patch.object(self.volume.driver.vg, - 'revert') as mock_revert,\ - mock.patch.object(self.volume.driver.vg, - 'create_lv_snapshot') as mock_create,\ - mock.patch.object(self.volume.driver.vg, - 'deactivate_lv') as mock_deactive,\ - mock.patch.object(self.volume.driver.vg, - 'activate_lv') as mock_active: - self.volume.driver.revert_to_snapshot(self.context, - fake_volume, - fake_snapshot) - mock_revert.assert_called_once_with( - self.volume.driver._escape_snapshot(fake_snapshot.name)) - mock_deactive.assert_called_once_with(fake_volume.name) - mock_active.assert_called_once_with(fake_volume.name) - mock_create.assert_called_once_with( - self.volume.driver._escape_snapshot(fake_snapshot.name), - fake_volume.name, self.configuration.lvm_type) - - def test_revert_thin_snapshot(self): - - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.lvm_type = 'thin' - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, - db=db) - fake_volume = tests_utils.create_volume(self.context, - display_name='fake_volume') - fake_snapshot = tests_utils.create_snapshot( - self.context, fake_volume.id) - - self.assertRaises(NotImplementedError, - lvm_driver.revert_to_snapshot, - self.context, fake_volume, - fake_snapshot) - - def test_lvm_manage_existing_snapshot_bad_size(self): - """Make sure correct exception on bad size returned from LVM. - - This test case ensures that the correct exception is raised when - the information returned for the existing LVs is not in the format - that the manage_existing_snapshot code expects. - """ - self._setup_stubs_for_manage_existing() - - ref = {'source-name': 'fake_lv_bad_size'} - snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 2} - - with mock.patch.object(self.volume.driver.vg, 'get_volume', - self._get_manage_existing_lvs): - self.assertRaises( - exception.VolumeBackendAPIException, - self.volume.driver.manage_existing_snapshot_get_size, - snp, ref) - - def test_lvm_unmanage(self): - volume = tests_utils.create_volume(self.context, status='available', - size=1, host=CONF.host) - ret = self.volume.driver.unmanage(volume) - self.assertIsNone(ret) - - def test_lvm_get_manageable_volumes(self): - cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] - lvs = [{'name': 'volume-00000000-0000-0000-0000-000000000000', - 'size': '1.75'}, - {'name': 'volume-00000000-0000-0000-0000-000000000001', - 'size': '3.0'}, - {'name': 'snapshot-00000000-0000-0000-0000-000000000002', - 'size': '2.2'}, - {'name': 'myvol', 'size': '4.0'}] - self.volume.driver.vg = mock.Mock() - self.volume.driver.vg.get_volumes.return_value = lvs - self.volume.driver.vg.lv_is_snapshot.side_effect = [False, False, - True, False] - self.volume.driver.vg.lv_is_open.side_effect = [True, False] - res = self.volume.driver.get_manageable_volumes(cinder_vols, None, - 1000, 0, - ['size'], ['asc']) - exp = [{'size': 2, 'reason_not_safe': 'already managed', - 'extra_info': None, - 'reference': {'source-name': - 'volume-00000000-0000-0000-0000-000000000000'}, - 'cinder_id': '00000000-0000-0000-0000-000000000000', - 'safe_to_manage': False}, - {'size': 3, 'reason_not_safe': 'volume in use', - 'reference': {'source-name': - 'volume-00000000-0000-0000-0000-000000000001'}, - 'safe_to_manage': False, 'cinder_id': None, - 'extra_info': None}, - {'size': 4, 'reason_not_safe': None, - 'safe_to_manage': True, 'reference': {'source-name': 'myvol'}, - 'cinder_id': None, 'extra_info': None}] - self.assertEqual(exp, res) - - def test_lvm_get_manageable_snapshots(self): - cinder_snaps = [{'id': '00000000-0000-0000-0000-000000000000'}] - lvs = [{'name': 'snapshot-00000000-0000-0000-0000-000000000000', - 'size': '1.75'}, - {'name': 'volume-00000000-0000-0000-0000-000000000001', - 'size': '3.0'}, - {'name': 'snapshot-00000000-0000-0000-0000-000000000002', - 'size': '2.2'}, - {'name': 'mysnap', 'size': '4.0'}] - self.volume.driver.vg = mock.Mock() - self.volume.driver.vg.get_volumes.return_value = lvs - self.volume.driver.vg.lv_is_snapshot.side_effect = [True, False, True, - True] - self.volume.driver.vg.lv_is_open.side_effect = [True, False] - self.volume.driver.vg.lv_get_origin.side_effect = [ - 'volume-00000000-0000-0000-0000-000000000000', - 'volume-00000000-0000-0000-0000-000000000002', - 'myvol'] - res = self.volume.driver.get_manageable_snapshots(cinder_snaps, None, - 1000, 0, - ['size'], ['asc']) - exp = [{'size': 2, 'reason_not_safe': 'already managed', - 'reference': - {'source-name': - 'snapshot-00000000-0000-0000-0000-000000000000'}, - 'safe_to_manage': False, 'extra_info': None, - 'cinder_id': '00000000-0000-0000-0000-000000000000', - 'source_reference': - {'source-name': - 'volume-00000000-0000-0000-0000-000000000000'}}, - {'size': 3, 'reason_not_safe': 'snapshot in use', - 'reference': - {'source-name': - 'snapshot-00000000-0000-0000-0000-000000000002'}, - 'safe_to_manage': False, 'extra_info': None, - 'cinder_id': None, - 'source_reference': - {'source-name': - 'volume-00000000-0000-0000-0000-000000000002'}}, - {'size': 4, 'reason_not_safe': None, - 'reference': {'source-name': 'mysnap'}, - 'safe_to_manage': True, 'cinder_id': None, - 'source_reference': {'source-name': 'myvol'}, - 'extra_info': None}] - self.assertEqual(exp, res) - - # Global setting, LVM setting, expected outcome - @ddt.data((10.0, 2.0, 2.0)) - @ddt.data((10.0, None, 10.0)) - @ddt.unpack - def test_lvm_max_over_subscription_ratio(self, - global_value, - lvm_value, - expected_value): - configuration = conf.Configuration(fake_opt, 'fake_group') - configuration.max_over_subscription_ratio = global_value - configuration.lvm_max_over_subscription_ratio = lvm_value - - fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, - None, 'default')) - lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, - vg_obj=fake_vg, db=db) - - self.assertEqual(expected_value, - lvm_driver.configuration.max_over_subscription_ratio) - - -class LVMISCSITestCase(test_driver.BaseDriverTestCase): - """Test Case for LVMISCSIDriver""" - driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" - - def setUp(self): - super(LVMISCSITestCase, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' - self.configuration.iscsi_ip_address = '0.0.0.0' - self.configuration.iscsi_port = 3260 - - def _attach_volume(self): - """Attach volumes to an instance.""" - volume_id_list = [] - for index in range(3): - vol = {} - vol['size'] = 0 - vol_ref = db.volume_create(self.context, vol) - self.volume.create_volume(self.context, vol_ref) - vol_ref = db.volume_get(self.context, vol_ref['id']) - - # each volume has a different mountpoint - mountpoint = "/dev/sd" + chr((ord('b') + index)) - instance_uuid = '12345678-1234-5678-1234-567812345678' - db.volume_attached(self.context, vol_ref['id'], instance_uuid, - mountpoint) - volume_id_list.append(vol_ref['id']) - - return volume_id_list - - def test_do_iscsi_discovery(self): - self.configuration = conf.Configuration(None) - iscsi_driver = \ - cinder.volume.targets.tgt.TgtAdm( - configuration=self.configuration) - - ret = ("%s dummy" % CONF.iscsi_ip_address, '') - with mock.patch('cinder.utils.execute', - return_value=ret): - volume = {"name": "dummy", - "host": "0.0.0.0", - "id": "12345678-1234-5678-1234-567812345678"} - iscsi_driver._do_iscsi_discovery(volume) - - def test_get_iscsi_properties(self): - volume = {"provider_location": '', - "id": "0", - "provider_auth": "a b c", - "attached_mode": "rw"} - iscsi_driver = \ - cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) - iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0" - result = iscsi_driver._get_iscsi_properties(volume) - self.assertEqual("0.0.0.0:0000", result["target_portal"]) - self.assertEqual("iqn:iqn", result["target_iqn"]) - self.assertEqual(0, result["target_lun"]) - - def test_get_iscsi_properties_multiple_portals(self): - volume = {"provider_location": '1.1.1.1:3260;2.2.2.2:3261,1 iqn:iqn 0', - "id": "0", - "provider_auth": "a b c", - "attached_mode": "rw"} - iscsi_driver = \ - cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) - result = iscsi_driver._get_iscsi_properties(volume) - self.assertEqual("1.1.1.1:3260", result["target_portal"]) - self.assertEqual("iqn:iqn", result["target_iqn"]) - self.assertEqual(0, result["target_lun"]) - self.assertEqual(["1.1.1.1:3260", "2.2.2.2:3261"], - result["target_portals"]) - self.assertEqual(["iqn:iqn", "iqn:iqn"], result["target_iqns"]) - self.assertEqual([0, 0], result["target_luns"]) - - @mock.patch.object(brick_lvm.LVM, 'get_volumes', - return_value=[{'vg': 'fake_vg', 'name': 'fake_vol', - 'size': '1000'}]) - @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') - @mock.patch.object(brick_lvm.LVM, 'get_all_volume_groups', - return_value=[{'name': 'cinder-volumes', - 'size': '5.52', - 'available': '0.52', - 'lv_count': '2', - 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm'}]) - @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', - return_value=(2, 2, 100)) - def test_get_volume_stats(self, _mock_get_version, mock_vgs, mock_pvs, - mock_get_volumes): - self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') - - self.volume.driver._update_volume_stats() - - stats = self.volume.driver._stats - - self.assertEqual( - float('5.52'), stats['pools'][0]['total_capacity_gb']) - self.assertEqual( - float('0.52'), stats['pools'][0]['free_capacity_gb']) - self.assertEqual( - float('5.0'), stats['pools'][0]['provisioned_capacity_gb']) - self.assertEqual( - int('1'), stats['pools'][0]['total_volumes']) - self.assertFalse(stats['sparse_copy_volume']) - - # Check value of sparse_copy_volume for thin enabled case. - # This value is set in check_for_setup_error. - self.configuration = conf.Configuration(None) - self.configuration.lvm_type = 'thin' - vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, - db=db, - vg_obj=vg_obj) - lvm_driver.check_for_setup_error() - lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') - lvm_driver._update_volume_stats() - stats = lvm_driver._stats - self.assertTrue(stats['sparse_copy_volume']) - - def test_validate_connector(self): - iscsi_driver =\ - cinder.volume.targets.tgt.TgtAdm( - configuration=self.configuration) - - # Validate a valid connector - connector = {'ip': '10.0.0.2', - 'host': 'fakehost', - 'initiator': 'iqn.2012-07.org.fake:01'} - iscsi_driver.validate_connector(connector) - - # Validate a connector without the initiator - connector = {'ip': '10.0.0.2', 'host': 'fakehost'} - self.assertRaises(exception.InvalidConnectorException, - iscsi_driver.validate_connector, connector) diff --git a/cinder/tests/unit/volume/drivers/test_nfs.py b/cinder/tests/unit/volume/drivers/test_nfs.py deleted file mode 100644 index 4a060cce9..000000000 --- a/cinder/tests/unit/volume/drivers/test_nfs.py +++ /dev/null @@ -1,1601 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the NFS driver module.""" - -import ddt -import errno -import os -import six -import uuid - -import mock -from oslo_utils import imageutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers import nfs -from cinder.volume.drivers import remotefs - - -class RemoteFsDriverTestCase(test.TestCase): - TEST_FILE_NAME = 'test.txt' - TEST_EXPORT = 'nas-host1:/export' - TEST_MNT_POINT = '/mnt/nas' - - def setUp(self): - super(RemoteFsDriverTestCase, self).setUp() - self._driver = remotefs.RemoteFSDriver() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.append_config_values(mock.ANY) - self.configuration.nas_secure_file_permissions = 'false' - self.configuration.nas_secure_file_operations = 'false' - self.configuration.nfs_snapshot_support = True - self.configuration.max_over_subscription_ratio = 1.0 - self.configuration.reserved_percentage = 5 - self._driver = remotefs.RemoteFSDriver( - configuration=self.configuration) - mock_exc = mock.patch.object(self._driver, '_execute') - self._execute = mock_exc.start() - self.addCleanup(mock_exc.stop) - - def test_create_sparsed_file(self): - self._driver._create_sparsed_file('/path', 1) - self._execute.assert_called_once_with('truncate', '-s', '1G', - '/path', run_as_root=True) - - def test_create_regular_file(self): - self._driver._create_regular_file('/path', 1) - self._execute.assert_called_once_with('dd', 'if=/dev/zero', - 'of=/path', 'bs=1M', - 'count=1024', run_as_root=True) - - def test_create_qcow2_file(self): - file_size = 1 - self._driver._create_qcow2_file('/path', file_size) - self._execute.assert_called_once_with('qemu-img', 'create', '-f', - 'qcow2', '-o', - 'preallocation=metadata', - '/path', '%s' % - str(file_size * units.Gi), - run_as_root=True) - - def test_set_rw_permissions_for_all(self): - self._driver._set_rw_permissions_for_all('/path') - self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path', - run_as_root=True) - - @mock.patch.object(remotefs, 'LOG') - def test_set_rw_permissions_with_secure_file_permissions(self, LOG): - self._driver._mounted_shares = [self.TEST_EXPORT] - self.configuration.nas_secure_file_permissions = 'true' - self._driver._set_rw_permissions(self.TEST_FILE_NAME) - - self.assertFalse(LOG.warning.called) - - @mock.patch.object(remotefs, 'LOG') - def test_set_rw_permissions_without_secure_file_permissions(self, LOG): - self.configuration.nas_secure_file_permissions = 'false' - self._driver._set_rw_permissions(self.TEST_FILE_NAME) - - self.assertTrue(LOG.warning.called) - warn_msg = "%(path)s is being set with open permissions: %(perm)s" - LOG.warning.assert_called_once_with( - warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'}) - - @mock.patch('os.path.join') - @mock.patch('os.path.isfile', return_value=False) - def test_determine_nas_security_options_when_auto_and_new_install( - self, - mock_isfile, - mock_join): - """Test the setting of the NAS Security Option - - In this test case, we will create the marker file. No pre-exxisting - Cinder volumes found during bootup. - """ - self._driver._mounted_shares = [self.TEST_EXPORT] - file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT - is_new_install = True - - self._driver._ensure_shares_mounted = mock.Mock() - nas_mount = self._driver._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - mock_join.return_value = file_path - - secure_file_permissions = 'auto' - nas_option = self._driver._determine_nas_security_option_setting( - secure_file_permissions, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - secure_file_operations = 'auto' - nas_option = self._driver._determine_nas_security_option_setting( - secure_file_operations, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - @mock.patch('os.path.join') - @mock.patch('os.path.isfile') - def test_determine_nas_security_options_when_auto_and_new_install_exists( - self, - isfile, - join): - """Test the setting of the NAS Security Option - - In this test case, the marker file already exists. Cinder volumes - found during bootup. - """ - drv = self._driver - drv._mounted_shares = [self.TEST_EXPORT] - file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT - is_new_install = False - - drv._ensure_shares_mounted = mock.Mock() - nas_mount = drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - join.return_value = file_path - isfile.return_value = True - - secure_file_permissions = 'auto' - nas_option = drv._determine_nas_security_option_setting( - secure_file_permissions, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - secure_file_operations = 'auto' - nas_option = drv._determine_nas_security_option_setting( - secure_file_operations, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - @mock.patch('os.path.join') - @mock.patch('os.path.isfile') - def test_determine_nas_security_options_when_auto_and_old_install(self, - isfile, - join): - """Test the setting of the NAS Security Option - - In this test case, the marker file does not exist. There are also - pre-existing Cinder volumes. - """ - drv = self._driver - drv._mounted_shares = [self.TEST_EXPORT] - file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT - is_new_install = False - - drv._ensure_shares_mounted = mock.Mock() - nas_mount = drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - join.return_value = file_path - isfile.return_value = False - - secure_file_permissions = 'auto' - nas_option = drv._determine_nas_security_option_setting( - secure_file_permissions, - nas_mount, is_new_install) - - self.assertEqual('false', nas_option) - - secure_file_operations = 'auto' - nas_option = drv._determine_nas_security_option_setting( - secure_file_operations, - nas_mount, is_new_install) - - self.assertEqual('false', nas_option) - - def test_determine_nas_security_options_when_admin_set_true(self): - """Test the setting of the NAS Security Option - - In this test case, the Admin set the flag to 'true'. - """ - drv = self._driver - drv._mounted_shares = [self.TEST_EXPORT] - is_new_install = False - - drv._ensure_shares_mounted = mock.Mock() - nas_mount = drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - secure_file_permissions = 'true' - nas_option = drv._determine_nas_security_option_setting( - secure_file_permissions, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - secure_file_operations = 'true' - nas_option = drv._determine_nas_security_option_setting( - secure_file_operations, - nas_mount, is_new_install) - - self.assertEqual('true', nas_option) - - def test_determine_nas_security_options_when_admin_set_false(self): - """Test the setting of the NAS Security Option - - In this test case, the Admin set the flag to 'false'. - """ - drv = self._driver - drv._mounted_shares = [self.TEST_EXPORT] - is_new_install = False - - drv._ensure_shares_mounted = mock.Mock() - nas_mount = drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - - secure_file_permissions = 'false' - nas_option = drv._determine_nas_security_option_setting( - secure_file_permissions, - nas_mount, is_new_install) - - self.assertEqual('false', nas_option) - - secure_file_operations = 'false' - nas_option = drv._determine_nas_security_option_setting( - secure_file_operations, - nas_mount, is_new_install) - - self.assertEqual('false', nas_option) - - @mock.patch.object(remotefs, 'LOG') - def test_set_nas_security_options(self, LOG): - """Test setting of NAS Security options. - - The RemoteFS driver will force set options to false. The derived - objects will provide an inherited interface to properly set options. - """ - drv = self._driver - is_new_install = False - - drv.set_nas_security_options(is_new_install) - - self.assertEqual('false', drv.configuration.nas_secure_file_operations) - self.assertEqual('false', - drv.configuration.nas_secure_file_permissions) - self.assertTrue(LOG.warning.called) - - def test_secure_file_operations_enabled_true(self): - """Test nas_secure_file_operations = 'true' - - Networked file system based drivers may support secure file - operations. This test verifies the settings when secure. - """ - drv = self._driver - self.configuration.nas_secure_file_operations = 'true' - ret_flag = drv.secure_file_operations_enabled() - self.assertTrue(ret_flag) - - def test_secure_file_operations_enabled_false(self): - """Test nas_secure_file_operations = 'false' - - Networked file system based drivers may support secure file - operations. This test verifies the settings when not secure. - """ - drv = self._driver - self.configuration.nas_secure_file_operations = 'false' - ret_flag = drv.secure_file_operations_enabled() - self.assertFalse(ret_flag) - -# NFS configuration scenarios -NFS_CONFIG1 = {'max_over_subscription_ratio': 1.0, - 'reserved_percentage': 0, - 'nfs_sparsed_volumes': True, - 'nfs_qcow2_volumes': False, - 'nas_secure_file_permissions': 'false', - 'nas_secure_file_operations': 'false'} - -NFS_CONFIG2 = {'max_over_subscription_ratio': 10.0, - 'reserved_percentage': 5, - 'nfs_sparsed_volumes': False, - 'nfs_qcow2_volumes': True, - 'nas_secure_file_permissions': 'true', - 'nas_secure_file_operations': 'true'} - -NFS_CONFIG3 = {'max_over_subscription_ratio': 15.0, - 'reserved_percentage': 10, - 'nfs_sparsed_volumes': False, - 'nfs_qcow2_volumes': False, - 'nas_secure_file_permissions': 'auto', - 'nas_secure_file_operations': 'auto'} - -NFS_CONFIG4 = {'max_over_subscription_ratio': 20.0, - 'reserved_percentage': 60, - 'nfs_sparsed_volumes': True, - 'nfs_qcow2_volumes': True, - 'nas_secure_file_permissions': 'false', - 'nas_secure_file_operations': 'true'} - -QEMU_IMG_INFO_OUT1 = """image: %(volid)s - file format: raw - virtual size: %(size_gb)sG (%(size_b)s bytes) - disk size: 173K - """ - -QEMU_IMG_INFO_OUT2 = """image: %(volid)s -file format: qcow2 -virtual size: %(size_gb)sG (%(size_b)s bytes) -disk size: 196K -cluster_size: 65536 -Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false - """ - -QEMU_IMG_INFO_OUT3 = """image: volume-%(volid)s.%(snapid)s -file format: qcow2 -virtual size: %(size_gb)sG (%(size_b)s bytes) -disk size: 196K -cluster_size: 65536 -backing file: volume-%(volid)s -backing file format: qcow2 -Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false - """ - -QEMU_IMG_INFO_OUT4 = """image: volume-%(volid)s.%(snapid)s -file format: raw -virtual size: %(size_gb)sG (%(size_b)s bytes) -disk size: 196K -cluster_size: 65536 -backing file: volume-%(volid)s -backing file format: raw -Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false - """ - - -@ddt.ddt -class NfsDriverTestCase(test.TestCase): - """Test case for NFS driver.""" - - TEST_NFS_HOST = 'nfs-host1' - TEST_NFS_SHARE_PATH = '/export' - TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) - TEST_NFS_EXPORT2 = 'nfs-host2:/export' - TEST_NFS_EXPORT2_OPTIONS = '-o intr' - TEST_SIZE_IN_GB = 1 - TEST_MNT_POINT = '/mnt/nfs' - TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt' - TEST_MNT_POINT_BASE = '/mnt/test' - TEST_LOCAL_PATH = '/mnt/nfs/volume-123' - TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' - TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this' - TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo' - VOLUME_UUID = '69ad4ff6-b892-4215-aaaa-aaaaaaaaaaaa' - - def setUp(self): - super(NfsDriverTestCase, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.append_config_values(mock.ANY) - self.configuration.max_over_subscription_ratio = 1.0 - self.configuration.reserved_percentage = 5 - self.configuration.nfs_shares_config = None - self.configuration.nfs_sparsed_volumes = True - self.configuration.nfs_reserved_percentage = 5.0 - self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE - self.configuration.nfs_mount_options = None - self.configuration.nfs_mount_attempts = 3 - self.configuration.nfs_qcow2_volumes = False - self.configuration.nas_secure_file_permissions = 'false' - self.configuration.nas_secure_file_operations = 'false' - self.configuration.nas_host = None - self.configuration.nas_share_path = None - self.configuration.nas_mount_options = None - self.configuration.volume_dd_blocksize = '1M' - - self.context = context.get_admin_context() - - def _set_driver(self, extra_confs=None): - - # Overide the default configs - if extra_confs: - for config_name, config_value in extra_confs.items(): - setattr(self.configuration, config_name, config_value) - - self._driver = nfs.NfsDriver(configuration=self.configuration) - self._driver.shares = {} - self.mock_object(self._driver, '_execute') - - @ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4) - def test_local_path(self, nfs_config): - """local_path common use case.""" - self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE - self._set_driver(extra_confs=nfs_config) - drv = self._driver - - volume = fake_volume.fake_volume_obj( - self.context, - provider_location=self.TEST_NFS_EXPORT1) - - self.assertEqual( - '/mnt/test/2f4f60214cf43c595666dd815f0360a4/%s' % volume.name, - drv.local_path(volume)) - - @ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4) - def test_copy_image_to_volume(self, nfs_config): - """resize_image common case usage.""" - - mock_resize = self.mock_object(image_utils, 'resize_image') - mock_fetch = self.mock_object(image_utils, 'fetch_to_raw') - - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context, - size=self.TEST_SIZE_IN_GB) - test_img_source = 'volume-%s' % volume.id - - self.mock_object(drv, 'local_path', return_value=test_img_source) - - data = mock.Mock() - data.virtual_size = 1 * units.Gi - self.mock_object(image_utils, 'qemu_img_info', return_value=data) - drv.copy_image_to_volume(None, volume, None, None) - - mock_fetch.assert_called_once_with( - None, None, None, test_img_source, mock.ANY, run_as_root=True, - size=self.TEST_SIZE_IN_GB) - mock_resize.assert_called_once_with(test_img_source, - self.TEST_SIZE_IN_GB, - run_as_root=True) - - def test_get_mount_point_for_share(self): - """_get_mount_point_for_share should calculate correct value.""" - self._set_driver() - drv = self._driver - - self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE - - self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4', - drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) - - def test_get_mount_point_for_share_given_extra_slash_in_state_path(self): - """_get_mount_point_for_share should calculate correct value.""" - # This test gets called with the extra slash - self.configuration.nfs_mount_point_base = ( - self.TEST_MNT_POINT_BASE_EXTRA_SLASH) - - # The driver gets called with the correct configuration and removes - # the extra slash - drv = nfs.NfsDriver(configuration=self.configuration) - - self.assertEqual('/opt/stack/data/cinder/mnt', drv.base) - - self.assertEqual( - '/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4', - drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) - - def test_get_capacity_info(self): - """_get_capacity_info should calculate correct value.""" - self._set_driver() - drv = self._driver - stat_total_size = 2620544 - stat_avail = 2129984 - stat_output = '1 %d %d' % (stat_total_size, stat_avail) - - du_used = 490560 - du_output = '%d /mnt' % du_used - - with mock.patch.object( - drv, '_get_mount_point_for_share') as mock_get_mount: - mock_get_mount.return_value = self.TEST_MNT_POINT - drv._execute.side_effect = [(stat_output, None), - (du_output, None)] - - self.assertEqual((stat_total_size, stat_avail, du_used), - drv._get_capacity_info(self.TEST_NFS_EXPORT1)) - - mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1) - - calls = [mock.call('stat', '-f', '-c', '%S %b %a', - self.TEST_MNT_POINT, run_as_root=True), - mock.call('du', '-sb', '--apparent-size', - '--exclude', '*snapshot*', - self.TEST_MNT_POINT, run_as_root=True)] - - drv._execute.assert_has_calls(calls) - - def test_get_capacity_info_for_share_and_mount_point_with_spaces(self): - """_get_capacity_info should calculate correct value.""" - self._set_driver() - drv = self._driver - stat_total_size = 2620544 - stat_avail = 2129984 - stat_output = '1 %d %d' % (stat_total_size, stat_avail) - - du_used = 490560 - du_output = '%d /mnt' % du_used - - with mock.patch.object( - drv, '_get_mount_point_for_share') as mock_get_mount: - mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES - drv._execute.side_effect = [(stat_output, None), - (du_output, None)] - - self.assertEqual((stat_total_size, stat_avail, du_used), - drv._get_capacity_info( - self.TEST_NFS_EXPORT_SPACES)) - - mock_get_mount.assert_called_once_with( - self.TEST_NFS_EXPORT_SPACES) - - calls = [mock.call('stat', '-f', '-c', '%S %b %a', - self.TEST_MNT_POINT_SPACES, run_as_root=True), - mock.call('du', '-sb', '--apparent-size', - '--exclude', '*snapshot*', - self.TEST_MNT_POINT_SPACES, run_as_root=True)] - - drv._execute.assert_has_calls(calls) - - def test_load_shares_config(self): - self._set_driver() - drv = self._driver - - drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE - - with mock.patch.object( - drv, '_read_config_file') as mock_read_config: - config_data = [] - config_data.append(self.TEST_NFS_EXPORT1) - config_data.append('#' + self.TEST_NFS_EXPORT2) - config_data.append('') - config_data.append(self.TEST_NFS_EXPORT2 + ' ' + - self.TEST_NFS_EXPORT2_OPTIONS) - config_data.append('broken:share_format') - mock_read_config.return_value = config_data - - drv._load_shares_config(drv.configuration.nfs_shares_config) - - mock_read_config.assert_called_once_with( - self.TEST_SHARES_CONFIG_FILE) - self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) - self.assertIn(self.TEST_NFS_EXPORT2, drv.shares) - self.assertEqual(2, len(drv.shares)) - - self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS, - drv.shares[self.TEST_NFS_EXPORT2]) - - def test_load_shares_config_nas_opts(self): - self._set_driver() - drv = self._driver - drv.configuration.nas_host = self.TEST_NFS_HOST - drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH - drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE - - drv._load_shares_config(drv.configuration.nfs_shares_config) - - self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) - self.assertEqual(1, len(drv.shares)) - - def test_ensure_shares_mounted_should_save_mounting_successfully(self): - """_ensure_shares_mounted should save share if mounted with success.""" - self._set_driver() - drv = self._driver - config_data = [] - config_data.append(self.TEST_NFS_EXPORT1) - drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE - - with mock.patch.object( - drv, '_read_config_file') as mock_read_config: - with mock.patch.object( - drv, '_ensure_share_mounted') as mock_ensure: - mock_read_config.return_value = config_data - drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) - mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1) - - @mock.patch.object(remotefs, 'LOG') - def test_ensure_shares_mounted_should_not_save_mounting_with_error(self, - LOG): - """_ensure_shares_mounted should not save share if failed to mount.""" - self._set_driver() - drv = self._driver - config_data = [] - config_data.append(self.TEST_NFS_EXPORT1) - drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE - with mock.patch.object( - drv, '_read_config_file') as mock_read_config: - with mock.patch.object( - drv, '_ensure_share_mounted') as mock_ensure: - mock_read_config.return_value = config_data - drv._ensure_share_mounted() - self.assertEqual(0, len(drv._mounted_shares)) - mock_ensure.assert_called_once_with() - - def test_find_share_should_throw_error_if_there_is_no_mounted_share(self): - """_find_share should throw error if there is no mounted shares.""" - self._set_driver() - drv = self._driver - - drv._mounted_shares = [] - - self.assertRaises(exception.NfsNoSharesMounted, drv._find_share, - self._simple_volume()) - - def test_find_share(self): - """_find_share simple use case.""" - self._set_driver() - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - - volume = fake_volume.fake_volume_obj(self.context, - size=self.TEST_SIZE_IN_GB) - - with mock.patch.object( - drv, '_get_capacity_info') as mock_get_capacity_info: - mock_get_capacity_info.side_effect = [ - (5 * units.Gi, 2 * units.Gi, 2 * units.Gi), - (10 * units.Gi, 3 * units.Gi, 1 * units.Gi)] - self.assertEqual(self.TEST_NFS_EXPORT2, - drv._find_share(volume)) - calls = [mock.call(self.TEST_NFS_EXPORT1), - mock.call(self.TEST_NFS_EXPORT2)] - mock_get_capacity_info.assert_has_calls(calls) - self.assertEqual(2, mock_get_capacity_info.call_count) - - def test_find_share_should_throw_error_if_there_is_not_enough_space(self): - """_find_share should throw error if there is no share to host vol.""" - self._set_driver() - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - - with mock.patch.object( - drv, '_get_capacity_info') as mock_get_capacity_info: - mock_get_capacity_info.side_effect = [ - (5 * units.Gi, 0, 5 * units.Gi), - (10 * units.Gi, 0, 10 * units.Gi)] - - self.assertRaises(exception.NfsNoSuitableShareFound, - drv._find_share, self._simple_volume()) - calls = [mock.call(self.TEST_NFS_EXPORT1), - mock.call(self.TEST_NFS_EXPORT2)] - mock_get_capacity_info.assert_has_calls(calls) - self.assertEqual(2, mock_get_capacity_info.call_count) - - def _simple_volume(self, size=10): - loc = self.TEST_NFS_EXPORT1 - return fake_volume.fake_volume_obj(self.context, - display_name='volume_name', - provider_location=loc, - size=size) - - def test_create_sparsed_volume(self): - self._set_driver() - drv = self._driver - volume = self._simple_volume() - - self.override_config('nfs_sparsed_volumes', True) - - with mock.patch.object( - drv, '_create_sparsed_file') as mock_create_sparsed_file: - with mock.patch.object( - drv, '_set_rw_permissions') as mock_set_rw_permissions: - drv._do_create_volume(volume) - - mock_create_sparsed_file.assert_called_once_with(mock.ANY, - mock.ANY) - mock_set_rw_permissions.assert_called_once_with(mock.ANY) - - def test_create_nonsparsed_volume(self): - self._set_driver() - drv = self._driver - self.configuration.nfs_sparsed_volumes = False - volume = self._simple_volume() - - self.override_config('nfs_sparsed_volumes', False) - - with mock.patch.object( - drv, '_create_regular_file') as mock_create_regular_file: - with mock.patch.object( - drv, '_set_rw_permissions') as mock_set_rw_permissions: - drv._do_create_volume(volume) - - mock_create_regular_file.assert_called_once_with(mock.ANY, - mock.ANY) - mock_set_rw_permissions.assert_called_once_with(mock.ANY) - - @mock.patch.object(nfs, 'LOG') - def test_create_volume_should_ensure_nfs_mounted(self, mock_log): - """create_volume ensures shares provided in config are mounted.""" - self._set_driver() - drv = self._driver - drv._find_share = mock.Mock() - drv._find_share.return_value = self.TEST_NFS_EXPORT1 - drv._do_create_volume = mock.Mock() - - with mock.patch.object( - drv, '_ensure_share_mounted') as mock_ensure_share: - drv._ensure_share_mounted() - volume = fake_volume.fake_volume_obj(self.context, - size=self.TEST_SIZE_IN_GB) - drv.create_volume(volume) - - mock_ensure_share.assert_called_once_with() - - @mock.patch.object(nfs, 'LOG') - def test_create_volume_should_return_provider_location(self, mock_log): - """create_volume should return provider_location with found share.""" - self._set_driver() - drv = self._driver - drv._ensure_shares_mounted = mock.Mock() - drv._do_create_volume = mock.Mock() - - with mock.patch.object(drv, '_find_share') as mock_find_share: - mock_find_share.return_value = self.TEST_NFS_EXPORT1 - volume = fake_volume.fake_volume_obj(self.context, - size=self.TEST_SIZE_IN_GB) - result = drv.create_volume(volume) - self.assertEqual(self.TEST_NFS_EXPORT1, - result['provider_location']) - mock_find_share.assert_called_once_with(volume) - - def test_delete_volume(self): - """delete_volume simple test case.""" - self._set_driver() - drv = self._driver - drv._ensure_share_mounted = mock.Mock() - - volume = fake_volume.fake_volume_obj( - self.context, - display_name='volume-123', - provider_location=self.TEST_NFS_EXPORT1) - - with mock.patch.object(drv, 'local_path') as mock_local_path: - mock_local_path.return_value = self.TEST_LOCAL_PATH - drv.delete_volume(volume) - mock_local_path.assert_called_with(volume) - drv._execute.assert_called_once() - - def test_delete_should_ensure_share_mounted(self): - """delete_volume should ensure that corresponding share is mounted.""" - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj( - self.context, - display_name='volume-123', - provider_location=self.TEST_NFS_EXPORT1) - - with mock.patch.object(drv, '_ensure_share_mounted'): - drv.delete_volume(volume) - - def test_delete_should_not_delete_if_provider_location_not_provided(self): - """delete_volume shouldn't delete if provider_location missed.""" - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj(self.context, - name='volume-123', - provider_location=None) - - with mock.patch.object(drv, '_ensure_share_mounted'): - drv.delete_volume(volume) - self.assertFalse(drv._execute.called) - - def test_get_volume_stats(self): - """get_volume_stats must fill the correct values.""" - self._set_driver() - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - - with mock.patch.object( - drv, '_ensure_shares_mounted') as mock_ensure_share: - with mock.patch.object( - drv, '_get_capacity_info') as mock_get_capacity_info: - mock_get_capacity_info.side_effect = [ - (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), - (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] - - drv._ensure_shares_mounted() - drv.get_volume_stats() - - calls = [mock.call(self.TEST_NFS_EXPORT1), - mock.call(self.TEST_NFS_EXPORT2)] - mock_get_capacity_info.assert_has_calls(calls) - - self.assertTrue(mock_ensure_share.called) - self.assertEqual(30.0, drv._stats['total_capacity_gb']) - self.assertEqual(5.0, drv._stats['free_capacity_gb']) - self.assertEqual(5, drv._stats['reserved_percentage']) - self.assertTrue(drv._stats['sparse_copy_volume']) - - def test_get_volume_stats_with_non_zero_reserved_percentage(self): - """get_volume_stats must fill the correct values.""" - self.configuration.reserved_percentage = 10.0 - drv = nfs.NfsDriver(configuration=self.configuration) - - drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] - - with mock.patch.object( - drv, '_ensure_shares_mounted') as mock_ensure_share: - with mock.patch.object( - drv, '_get_capacity_info') as mock_get_capacity_info: - mock_get_capacity_info.side_effect = [ - (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), - (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] - - drv._ensure_shares_mounted() - drv.get_volume_stats() - - calls = [mock.call(self.TEST_NFS_EXPORT1), - mock.call(self.TEST_NFS_EXPORT2)] - mock_get_capacity_info.assert_has_calls(calls) - - self.assertTrue(mock_ensure_share.called) - self.assertEqual(30.0, drv._stats['total_capacity_gb']) - self.assertEqual(5.0, drv._stats['free_capacity_gb']) - self.assertEqual(10.0, drv._stats['reserved_percentage']) - - @ddt.data(True, False) - def test_update_volume_stats(self, thin): - self._set_driver() - self._driver.configuration.max_over_subscription_ratio = 20.0 - self._driver.configuration.reserved_percentage = 5.0 - self._driver.configuration.nfs_sparsed_volumes = thin - - remotefs_volume_stats = { - 'volume_backend_name': 'fake_backend_name', - 'vendor_name': 'fake_vendor', - 'driver_version': 'fake_version', - 'storage_protocol': 'NFS', - 'total_capacity_gb': 100.0, - 'free_capacity_gb': 20.0, - 'reserved_percentage': 5.0, - 'QoS_support': False, - } - self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats') - self._driver._stats = remotefs_volume_stats - - mock_get_provisioned_capacity = self.mock_object( - self._driver, '_get_provisioned_capacity', return_value=25.0) - - self._driver._update_volume_stats() - - nfs_added_volume_stats = { - 'provisioned_capacity_gb': 25.0 if thin else 80.0, - 'max_over_subscription_ratio': 20.0, - 'reserved_percentage': 5.0, - 'thin_provisioning_support': thin, - 'thick_provisioning_support': not thin, - } - expected = remotefs_volume_stats - expected.update(nfs_added_volume_stats) - - self.assertEqual(expected, self._driver._stats) - self.assertEqual(thin, mock_get_provisioned_capacity.called) - - def _check_is_share_eligible(self, total_size, total_available, - total_allocated, requested_volume_size): - self._set_driver() - with mock.patch.object(self._driver, '_get_capacity_info')\ - as mock_get_capacity_info: - mock_get_capacity_info.return_value = (total_size, - total_available, - total_allocated) - return self._driver._is_share_eligible('fake_share', - requested_volume_size) - - def test_is_share_eligible(self): - self._set_driver() - total_size = 100.0 * units.Gi - total_available = 90.0 * units.Gi - total_allocated = 10.0 * units.Gi - requested_volume_size = 1 # GiB - - self.assertTrue(self._check_is_share_eligible(total_size, - total_available, - total_allocated, - requested_volume_size)) - - def test_share_eligibility_with_reserved_percentage(self): - self._set_driver() - total_size = 100.0 * units.Gi - total_available = 4.0 * units.Gi - total_allocated = 96.0 * units.Gi - requested_volume_size = 1 # GiB - - # Check used > used_ratio statement entered - self.assertFalse(self._check_is_share_eligible(total_size, - total_available, - total_allocated, - requested_volume_size)) - - def test_is_share_eligible_above_oversub_ratio(self): - self._set_driver() - total_size = 100.0 * units.Gi - total_available = 10.0 * units.Gi - total_allocated = 90.0 * units.Gi - requested_volume_size = 10 # GiB - - # Check apparent_available <= requested_volume_size statement entered - self.assertFalse(self._check_is_share_eligible(total_size, - total_available, - total_allocated, - requested_volume_size)) - - def test_is_share_eligible_reserved_space_above_oversub_ratio(self): - self._set_driver() - total_size = 100.0 * units.Gi - total_available = 10.0 * units.Gi - total_allocated = 100.0 * units.Gi - requested_volume_size = 1 # GiB - - # Check total_allocated / total_size >= oversub_ratio - # statement entered - self.assertFalse(self._check_is_share_eligible(total_size, - total_available, - total_allocated, - requested_volume_size)) - - def test_extend_volume(self): - """Extend a volume by 1.""" - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj( - self.context, - id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', - size=1, - provider_location='nfs_share') - path = 'path' - newSize = volume['size'] + 1 - - with mock.patch.object(image_utils, 'resize_image') as resize: - with mock.patch.object(drv, 'local_path', return_value=path): - with mock.patch.object(drv, '_is_share_eligible', - return_value=True): - with mock.patch.object(drv, '_is_file_size_equal', - return_value=True): - drv.extend_volume(volume, newSize) - - resize.assert_called_once_with(path, newSize, - run_as_root=True) - - def test_extend_volume_failure(self): - """Error during extend operation.""" - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj( - self.context, - id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', - size=1, - provider_location='nfs_share') - - with mock.patch.object(image_utils, 'resize_image'): - with mock.patch.object(drv, 'local_path', return_value='path'): - with mock.patch.object(drv, '_is_share_eligible', - return_value=True): - with mock.patch.object(drv, '_is_file_size_equal', - return_value=False): - self.assertRaises(exception.ExtendVolumeError, - drv.extend_volume, volume, 2) - - def test_extend_volume_insufficient_space(self): - """Insufficient space on nfs_share during extend operation.""" - self._set_driver() - drv = self._driver - volume = fake_volume.fake_volume_obj( - self.context, - id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', - size=1, - provider_location='nfs_share') - - with mock.patch.object(image_utils, 'resize_image'): - with mock.patch.object(drv, 'local_path', return_value='path'): - with mock.patch.object(drv, '_is_share_eligible', - return_value=False): - with mock.patch.object(drv, '_is_file_size_equal', - return_value=False): - self.assertRaises(exception.ExtendVolumeError, - drv.extend_volume, volume, 2) - - def test_is_file_size_equal(self): - """File sizes are equal.""" - self._set_driver() - drv = self._driver - path = 'fake/path' - size = 2 - data = mock.MagicMock() - data.virtual_size = size * units.Gi - - with mock.patch.object(image_utils, 'qemu_img_info', - return_value=data): - self.assertTrue(drv._is_file_size_equal(path, size)) - - def test_is_file_size_equal_false(self): - """File sizes are not equal.""" - self._set_driver() - drv = self._driver - path = 'fake/path' - size = 2 - data = mock.MagicMock() - data.virtual_size = (size + 1) * units.Gi - - with mock.patch.object(image_utils, 'qemu_img_info', - return_value=data): - self.assertFalse(drv._is_file_size_equal(path, size)) - - @mock.patch.object(nfs, 'LOG') - def test_set_nas_security_options_when_true(self, LOG): - """Test higher level setting of NAS Security options. - - The NFS driver overrides the base method with a driver specific - version. - """ - self._set_driver() - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - is_new_install = True - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._determine_nas_security_option_setting = mock.Mock( - return_value='true') - - drv.set_nas_security_options(is_new_install) - - self.assertEqual('true', drv.configuration.nas_secure_file_operations) - self.assertEqual('true', drv.configuration.nas_secure_file_permissions) - self.assertFalse(LOG.warning.called) - - @mock.patch.object(nfs, 'LOG') - def test_set_nas_security_options_when_false(self, LOG): - """Test higher level setting of NAS Security options. - - The NFS driver overrides the base method with a driver specific - version. - """ - self._set_driver() - drv = self._driver - drv._mounted_shares = [self.TEST_NFS_EXPORT1] - is_new_install = False - - drv._ensure_shares_mounted = mock.Mock() - drv._get_mount_point_for_share = mock.Mock( - return_value=self.TEST_MNT_POINT) - drv._determine_nas_security_option_setting = mock.Mock( - return_value='false') - - drv.set_nas_security_options(is_new_install) - - self.assertEqual('false', drv.configuration.nas_secure_file_operations) - self.assertEqual('false', - drv.configuration.nas_secure_file_permissions) - self.assertTrue(LOG.warning.called) - - def test_set_nas_security_options_exception_if_no_mounted_shares(self): - """Ensure proper exception is raised if there are no mounted shares.""" - - self._set_driver() - drv = self._driver - drv._ensure_shares_mounted = mock.Mock() - drv._mounted_shares = [] - is_new_cinder_install = 'does not matter' - - self.assertRaises(exception.NfsNoSharesMounted, - drv.set_nas_security_options, - is_new_cinder_install) - - def test_ensure_share_mounted(self): - """Case where the mount works the first time.""" - - self._set_driver() - self.mock_object(self._driver._remotefsclient, 'mount') - drv = self._driver - drv.configuration.nfs_mount_attempts = 3 - drv.shares = {self.TEST_NFS_EXPORT1: ''} - - drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) - - drv._remotefsclient.mount.called_once() - - @mock.patch('time.sleep') - def test_ensure_share_mounted_exception(self, _mock_sleep): - """Make the configured number of attempts when mounts fail.""" - - num_attempts = 3 - - self._set_driver() - self.mock_object(self._driver._remotefsclient, 'mount', - side_effect=Exception) - drv = self._driver - drv.configuration.nfs_mount_attempts = num_attempts - drv.shares = {self.TEST_NFS_EXPORT1: ''} - - self.assertRaises(exception.NfsException, drv._ensure_share_mounted, - self.TEST_NFS_EXPORT1) - - self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count) - - def test_ensure_share_mounted_at_least_one_attempt(self): - """Make at least one mount attempt even if configured for less.""" - - min_num_attempts = 1 - num_attempts = 0 - self._set_driver() - self.mock_object(self._driver._remotefsclient, 'mount', - side_effect=Exception) - drv = self._driver - drv.configuration.nfs_mount_attempts = num_attempts - drv.shares = {self.TEST_NFS_EXPORT1: ''} - - self.assertRaises(exception.NfsException, drv._ensure_share_mounted, - self.TEST_NFS_EXPORT1) - - self.assertEqual(min_num_attempts, - drv._remotefsclient.mount.call_count) - - @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG2, QEMU_IMG_INFO_OUT4], - [NFS_CONFIG3, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG4, QEMU_IMG_INFO_OUT4]) - @ddt.unpack - def test_copy_volume_from_snapshot(self, nfs_conf, qemu_img_info): - self._set_driver(extra_confs=nfs_conf) - drv = self._driver - dest_volume = self._simple_volume() - src_volume = self._simple_volume() - - fake_snap = fake_snapshot.fake_snapshot_obj(self.context) - fake_snap.volume = src_volume - - img_out = qemu_img_info % {'volid': src_volume.id, - 'snapid': fake_snap.id, - 'size_gb': src_volume.size, - 'size_b': src_volume.size * units.Gi} - - img_info = imageutils.QemuImgInfo(img_out) - mock_img_info = self.mock_object(image_utils, 'qemu_img_info') - mock_img_info.return_value = img_info - mock_convert_image = self.mock_object(image_utils, 'convert_image') - - vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str(src_volume.provider_location)) - src_vol_path = os.path.join(vol_dir, img_info.backing_file) - dest_vol_path = os.path.join(vol_dir, dest_volume.name) - info_path = os.path.join(vol_dir, src_volume.name) + '.info' - - snap_file = dest_volume.name + '.' + fake_snap.id - snap_path = os.path.join(vol_dir, snap_file) - size = dest_volume.size - - mock_read_info_file = self.mock_object(drv, '_read_info_file') - mock_read_info_file.return_value = {'active': snap_file, - fake_snap.id: snap_file} - - mock_permission = self.mock_object(drv, '_set_rw_permissions_for_all') - - drv._copy_volume_from_snapshot(fake_snap, dest_volume, size) - - mock_read_info_file.assert_called_once_with(info_path) - mock_img_info.assert_called_once_with(snap_path, run_as_root=True) - used_qcow = nfs_conf['nfs_qcow2_volumes'] - mock_convert_image.assert_called_once_with( - src_vol_path, dest_vol_path, 'qcow2' if used_qcow else 'raw', - run_as_root=True) - mock_permission.assert_called_once_with(dest_vol_path) - - @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG2, QEMU_IMG_INFO_OUT4], - [NFS_CONFIG3, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG4, QEMU_IMG_INFO_OUT4]) - @ddt.unpack - def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info): - self._set_driver(extra_confs=nfs_conf) - drv = self._driver - - # Volume source of the snapshot we are trying to clone from. We need it - # to have a different id than the default provided. - src_volume = self._simple_volume(size=10) - src_volume.id = six.text_type(uuid.uuid4()) - src_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str( - src_volume.provider_location)) - src_volume_path = os.path.join(src_volume_dir, src_volume.name) - fake_snap = fake_snapshot.fake_snapshot_obj(self.context) - - # Fake snapshot based in the previous created volume - snap_file = src_volume.name + '.' + fake_snap.id - fake_snap.volume = src_volume - fake_snap.status = 'available' - fake_snap.size = 10 - - # New fake volume where the snap will be copied - new_volume = self._simple_volume(size=10) - new_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str( - src_volume.provider_location)) - new_volume_path = os.path.join(new_volume_dir, new_volume.name) - - # Mocks - img_out = qemu_img_info % {'volid': src_volume.id, - 'snapid': fake_snap.id, - 'size_gb': src_volume.size, - 'size_b': src_volume.size * units.Gi} - img_info = imageutils.QemuImgInfo(img_out) - mock_img_info = self.mock_object(image_utils, 'qemu_img_info') - mock_img_info.return_value = img_info - - mock_ensure = self.mock_object(drv, '_ensure_shares_mounted') - mock_find_share = self.mock_object(drv, '_find_share', - return_value=self.TEST_NFS_EXPORT1) - mock_read_info_file = self.mock_object(drv, '_read_info_file') - mock_read_info_file.return_value = {'active': snap_file, - fake_snap.id: snap_file} - mock_convert_image = self.mock_object(image_utils, 'convert_image') - self.mock_object(drv, '_create_qcow2_file') - self.mock_object(drv, '_create_regular_file') - self.mock_object(drv, '_create_regular_file') - self.mock_object(drv, '_set_rw_permissions') - self.mock_object(drv, '_read_file') - - ret = drv.create_volume_from_snapshot(new_volume, fake_snap) - - # Test asserts - self.assertEqual(self.TEST_NFS_EXPORT1, ret['provider_location']) - used_qcow = nfs_conf['nfs_qcow2_volumes'] - mock_convert_image.assert_called_once_with( - src_volume_path, new_volume_path, 'qcow2' if used_qcow else 'raw', - run_as_root=True) - mock_ensure.assert_called_once() - mock_find_share.assert_called_once_with(new_volume) - - def test_create_volume_from_snapshot_status_not_available(self): - """Expect an error when the snapshot's status is not 'available'.""" - self._set_driver() - drv = self._driver - - src_volume = self._simple_volume() - - fake_snap = fake_snapshot.fake_snapshot_obj(self.context) - fake_snap.volume = src_volume - - new_volume = self._simple_volume() - new_volume['size'] = fake_snap['volume_size'] - - self.assertRaises(exception.InvalidSnapshot, - drv.create_volume_from_snapshot, - new_volume, - fake_snap) - - @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT1], - [NFS_CONFIG2, QEMU_IMG_INFO_OUT2], - [NFS_CONFIG3, QEMU_IMG_INFO_OUT1], - [NFS_CONFIG4, QEMU_IMG_INFO_OUT2]) - @ddt.unpack - def test_initialize_connection(self, nfs_confs, qemu_img_info): - self._set_driver(extra_confs=nfs_confs) - drv = self._driver - - volume = self._simple_volume() - vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str(volume.provider_location)) - vol_path = os.path.join(vol_dir, volume.name) - - mock_img_utils = self.mock_object(image_utils, 'qemu_img_info') - img_out = qemu_img_info % {'volid': volume.id, 'size_gb': volume.size, - 'size_b': volume.size * units.Gi} - mock_img_utils.return_value = imageutils.QemuImgInfo(img_out) - self.mock_object(drv, '_read_info_file', - return_value={'active': "volume-%s" % volume.id}) - - conn_info = drv.initialize_connection(volume, None) - - mock_img_utils.assert_called_once_with(vol_path, run_as_root=True) - self.assertEqual('nfs', conn_info['driver_volume_type']) - self.assertEqual(volume.name, conn_info['data']['name']) - self.assertEqual(self.TEST_MNT_POINT_BASE, - conn_info['mount_point_base']) - - @mock.patch.object(image_utils, 'qemu_img_info') - def test_initialize_connection_raise_exception(self, mock_img_info): - self._set_driver() - drv = self._driver - volume = self._simple_volume() - - qemu_img_output = """image: %s - file format: iso - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - """ % volume['name'] - mock_img_info.return_value = imageutils.QemuImgInfo(qemu_img_output) - - self.assertRaises(exception.InvalidVolume, - drv.initialize_connection, - volume, - None) - - def test_create_snapshot(self): - self._set_driver() - drv = self._driver - volume = self._simple_volume() - self.configuration.nfs_snapshot_support = True - fake_snap = fake_snapshot.fake_snapshot_obj(self.context) - fake_snap.volume = volume - vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str(self.TEST_NFS_EXPORT1)) - snap_file = volume['name'] + '.' + fake_snap.id - snap_path = os.path.join(vol_dir, snap_file) - info_path = os.path.join(vol_dir, volume['name']) + '.info' - - with mock.patch.object(drv, '_local_path_volume_info', - return_value=info_path), \ - mock.patch.object(drv, '_read_info_file', return_value={}), \ - mock.patch.object(drv, '_do_create_snapshot') \ - as mock_do_create_snapshot, \ - mock.patch.object(drv, '_write_info_file') \ - as mock_write_info_file, \ - mock.patch.object(drv, 'get_active_image_from_info', - return_value=volume['name']), \ - mock.patch.object(drv, '_get_new_snap_path', - return_value=snap_path): - self._driver.create_snapshot(fake_snap) - - mock_do_create_snapshot.assert_called_with(fake_snap, volume['name'], - snap_path) - mock_write_info_file.assert_called_with( - info_path, {'active': snap_file, fake_snap.id: snap_file}) - - -class NfsDriverDoSetupTestCase(test.TestCase): - - def setUp(self): - super(NfsDriverDoSetupTestCase, self).setUp() - self.context = mock.Mock() - self.create_configuration() - - def create_configuration(self): - config = conf.Configuration(None) - config.append_config_values(nfs.nfs_opts) - self.configuration = config - - def test_setup_should_throw_error_if_shares_config_not_configured(self): - """do_setup should throw error if shares config is not configured.""" - - self.override_config('nfs_shares_config', None) - drv = nfs.NfsDriver(configuration=self.configuration) - - mock_os_path_exists = self.mock_object(os.path, 'exists') - - with self.assertRaisesRegex(exception.NfsException, - ".*no NFS config file configured.*"): - drv.do_setup(self.context) - - self.assertEqual(0, mock_os_path_exists.call_count) - - def test_setup_should_throw_error_if_shares_file_does_not_exist(self): - """do_setup should throw error if shares file does not exist.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = False - - with self.assertRaisesRegex(exception.NfsException, - "NFS config file.*doesn't exist"): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - - def test_setup_should_not_throw_error_if_host_and_share_set(self): - """do_setup shouldn't throw shares file error if host and share set.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - self.override_config('nas_host', 'nfs-host1') - self.override_config('nas_share_path', '/export') - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = False - mock_set_nas_sec_options = self.mock_object(nfs.NfsDriver, - 'set_nas_security_options') - mock_set_nas_sec_options.return_value = True - mock_execute = self.mock_object(drv, '_execute') - mock_execute.return_value = True - - drv.do_setup(self.context) - - mock_os_path_exists.assert_not_called() - - def test_setup_throw_error_if_shares_file_does_not_exist_no_host(self): - """do_setup should throw error if no shares file and no host set.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - self.override_config('nas_share_path', '/export') - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = False - - with self.assertRaisesRegex(exception.NfsException, - "NFS config file.*doesn't exist"): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - - def test_setup_throw_error_if_shares_file_does_not_exist_no_share(self): - """do_setup should throw error if no shares file and no share set.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - self.override_config('nas_host', 'nfs-host1') - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = False - - with self.assertRaisesRegex(exception.NfsException, - "NFS config file.*doesn't exist"): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - - def test_setup_throw_error_if_shares_file_doesnt_exist_no_share_host(self): - """do_setup should throw error if no shares file and no host/share.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = False - - with self.assertRaisesRegex(exception.NfsException, - "NFS config file.*doesn't exist"): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - - def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): - """do_setup should throw error if nfs client is not installed.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = True - mock_execute = self.mock_object(drv, '_execute') - mock_execute.side_effect = OSError( - errno.ENOENT, 'No such file or directory.') - - with self.assertRaisesRegex(exception.NfsException, - 'mount.nfs is not installed'): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - mock_execute.assert_has_calls( - [mock.call('mount.nfs', - check_exit_code=False, - run_as_root=True)]) - - def test_setup_should_throw_exception_if_mount_nfs_command_fails(self): - """do_setup should throw error if mount.nfs fails with OSError - - This test covers the OSError path when mount.nfs is installed. - """ - - drv = nfs.NfsDriver(configuration=self.configuration) - - mock_os_path_exists = self.mock_object(os.path, 'exists') - mock_os_path_exists.return_value = True - mock_execute = self.mock_object(drv, '_execute') - mock_execute.side_effect = OSError( - errno.EPERM, 'Operation... BROKEN') - - with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'): - drv.do_setup(self.context) - - mock_os_path_exists.assert_has_calls( - [mock.call(self.configuration.nfs_shares_config)]) - mock_execute.assert_has_calls( - [mock.call('mount.nfs', - check_exit_code=False, - run_as_root=True)]) - - @mock.patch.object(os, 'rename') - def test_update_migrated_available_volume(self, rename_volume): - self._test_update_migrated_volume('available', rename_volume) - - @mock.patch.object(os, 'rename') - def test_update_migrated_available_volume_rename_fail(self, rename_volume): - self._test_update_migrated_volume('available', rename_volume, - rename_exception=True) - - @mock.patch.object(os, 'rename') - def test_update_migrated_in_use_volume(self, rename_volume): - self._test_update_migrated_volume('in-use', rename_volume) - - def _test_update_migrated_volume(self, volume_status, rename_volume, - rename_exception=False): - drv = nfs.NfsDriver(configuration=self.configuration) - fake_volume_id = 'f51b5730-13b7-11e6-a238-fa163e67a298' - fake_new_volume_id = '12341234-13b7-11e6-a238-fa163e67a298' - fake_provider_source = 'fake_provider_source' - fake_provider = 'fake_provider' - base_dir = '/dir_base/' - volume_name_template = 'volume-%s' - original_volume_name = volume_name_template % fake_volume_id - current_name = volume_name_template % fake_new_volume_id - original_volume_path = base_dir + original_volume_name - current_path = base_dir + current_name - volume = fake_volume.fake_volume_obj( - self.context, - id=fake_volume_id, - size=1, - provider_location=fake_provider_source, - _name_id=None) - - new_volume = fake_volume.fake_volume_obj( - self.context, - id=fake_new_volume_id, - size=1, - provider_location=fake_provider, - _name_id=None) - - with mock.patch.object(drv, 'local_path') as local_path: - local_path.return_value = base_dir + current_name - if volume_status == 'in-use': - update = drv.update_migrated_volume(self.context, - volume, - new_volume, - volume_status) - self.assertEqual({'_name_id': fake_new_volume_id, - 'provider_location': fake_provider}, update) - elif rename_exception: - rename_volume.side_effect = OSError - update = drv.update_migrated_volume(self.context, - volume, - new_volume, - volume_status) - rename_volume.assert_called_once_with(current_path, - original_volume_path) - self.assertEqual({'_name_id': fake_new_volume_id, - 'provider_location': fake_provider}, update) - else: - update = drv.update_migrated_volume(self.context, - volume, - new_volume, - volume_status) - rename_volume.assert_called_once_with(current_path, - original_volume_path) - self.assertEqual({'_name_id': None, - 'provider_location': fake_provider}, update) - - def test_retype_is_there(self): - "Ensure that driver.retype() is there.""" - - drv = nfs.NfsDriver(configuration=self.configuration) - v1 = fake_volume.fake_volume_obj(self.context) - - ret = drv.retype(self.context, - v1, - mock.sentinel.new_type, - mock.sentinel.diff, - mock.sentinel.host) - - self.assertEqual((False, None), ret) diff --git a/cinder/tests/unit/volume/drivers/test_nimble.py b/cinder/tests/unit/volume/drivers/test_nimble.py deleted file mode 100644 index 2ad1eafc9..000000000 --- a/cinder/tests/unit/volume/drivers/test_nimble.py +++ /dev/null @@ -1,1285 +0,0 @@ -# Nimble Storage, Inc. (c) 2013-2014 -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -from six.moves import http_client -import sys - -from cinder import context -from cinder import exception -from cinder.objects import volume as obj_volume -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.volume.drivers import nimble -from cinder.volume import volume_types - -NIMBLE_CLIENT = 'cinder.volume.drivers.nimble.NimbleRestAPIExecutor' -NIMBLE_URLLIB2 = 'cinder.volume.drivers.nimble.requests' -NIMBLE_RANDOM = 'cinder.volume.drivers.nimble.random' -NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.nimble.NimbleISCSIDriver' -NIMBLE_FC_DRIVER = 'cinder.volume.drivers.nimble.NimbleFCDriver' -DRIVER_VERSION = '4.0.1' -nimble.DEFAULT_SLEEP = 0 - -FAKE_POSITIVE_LOGIN_RESPONSE_1 = '2c20aad78a220ed1dae21dcd6f9446f5' - -FAKE_POSITIVE_LOGIN_RESPONSE_2 = '2c20aad78a220ed1dae21dcd6f9446ff' - -FAKE_POSITIVE_HEADERS = {'X-Auth-Token': FAKE_POSITIVE_LOGIN_RESPONSE_1} - -FAKE_POSITIVE_NETCONFIG_RESPONSE = { - 'role': 'active', - 'subnet_list': [{'network': '172.18.212.0', - 'discovery_ip': '172.18.108.21', - 'type': 'data', - 'allow_iscsi': True, - 'label': 'data1', - 'allow_group': True, - 'vlan_id': 0}], - 'array_list': [{'nic_list': [{'subnet_label': 'data1', - 'tagged': False, - 'data_ip': '172.18.212.82', - 'name': 'eth3'}]}], - 'name': 'test-array'} - -FAKE_NEGATIVE_NETCONFIG_RESPONSE = exception.VolumeDriverException( - "Session expired") - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE = { - 'clone': False, - 'name': "testvolume"} - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION = { - 'clone': False, - 'name': "testvolume-encryption"} - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY = { - 'clone': False, - 'name': "testvolume-perf-policy"} - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR = { - 'clone': False, - 'name': "testvolume-multi-initiator"} - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE = { - 'clone': False, - 'name': "testvolume-dedupe"} - -FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS = { - 'clone': False, - 'name': "testvolume-qos"} - -FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume', - 'clone': False, - 'target_name': 'iqn.test', - 'online': True, - 'agent_type': 'openstack'} - -FAKE_GET_VOL_INFO_RESPONSE_MANAGE = {'name': 'testvolume', - 'agent_type': 'none', - 'online': False, - 'target_name': 'iqn.test'} - -FAKE_GET_VOL_INFO_ONLINE = {'name': 'testvolume', - 'size': 2048, - 'online': True, - 'agent_type': 'none'} - -FAKE_GET_VOL_INFO_BACKUP_RESPONSE = {'name': 'testvolume', - 'clone': True, - 'target_name': 'iqn.test', - 'online': False, - 'agent_type': 'openstack', - 'parent_vol_id': 'volume-' + - fake.VOLUME2_ID, - 'base_snap_id': 'test-backup-snap'} - -FAKE_GET_SNAP_INFO_BACKUP_RESPONSE = { - 'description': "backup-vol-" + fake.VOLUME2_ID, - 'name': 'test-backup-snap', - 'id': fake.SNAPSHOT_ID, - 'vol_id': fake.VOLUME_ID, - 'volume_name': 'volume-' + fake.VOLUME_ID} - -FAKE_POSITIVE_GROUP_CONFIG_RESPONSE = { - 'name': 'group-test', - 'version_current': '0.0.0.0', - 'access_protocol_list': ['iscsi']} - -FAKE_LOGIN_POST_RESPONSE = { - 'data': {'session_token': FAKE_POSITIVE_LOGIN_RESPONSE_1}} - -FAKE_EXTEND_VOLUME_PARAMS = {'data': {'size': 5120, - 'reserve': 0, - 'warn_level': 80, - 'limit': 100, - 'snap_limit': sys.maxsize}} - -FAKE_IGROUP_LIST_RESPONSE = [ - {'iscsi_initiators': [{'iqn': 'test-initiator1'}], - 'name': 'test-igrp1'}, - {'iscsi_initiators': [{'iqn': 'test-initiator2'}], - 'name': 'test-igrp2'}] - -FAKE_IGROUP_LIST_RESPONSE_FC = [ - {'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'}], - 'name': 'test-igrp1'}, - {'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'}, - {'wwpn': '10:00:00:00:00:00:00:01'}], - 'name': 'test-igrp2'}] - - -FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException( - "Volume testvolume not found") - -FAKE_VOLUME_INFO_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException( - "Volume testvolume not found") - -FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION = exception.VolumeBackendAPIException( - "Volume testvolume-encryption not found") - -FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY = exception.VolumeBackendAPIException( - "Volume testvolume-perfpolicy not found") - -FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE = exception.VolumeBackendAPIException( - "The specified pool is not capable of hosting deduplicated volumes") - -FAKE_CREATE_VOLUME_NEGATIVE_QOS = exception.VolumeBackendAPIException( - "Please set valid IOPS limitin the range [256, 4294967294]") - -FAKE_POSITIVE_GROUP_INFO_RESPONSE = { - 'version_current': '3.0.0.0', - 'group_target_enabled': False, - 'name': 'group-nimble', - 'usage_valid': True, - 'usable_capacity_bytes': 8016883089408, - 'compressed_vol_usage_bytes': 2938311843, - 'compressed_snap_usage_bytes': 36189, - 'unused_reserve_bytes': 0} - -FAKE_GENERIC_POSITIVE_RESPONSE = "" - -FAKE_TYPE_ID = fake.VOLUME_TYPE_ID -FAKE_POOL_ID = fake.GROUP_ID -FAKE_PERFORMANCE_POLICY_ID = fake.OBJECT_ID -NIMBLE_MANAGEMENT_IP = "10.18.108.55" -NIMBLE_SAN_LOGIN = "nimble" -NIMBLE_SAN_PASS = "nimble_pass" - - -def create_configuration(username, password, ip_address, - pool_name=None, subnet_label=None, - thin_provision=True): - configuration = mock.Mock() - configuration.san_login = username - configuration.san_password = password - configuration.san_ip = ip_address - configuration.san_thin_provision = thin_provision - configuration.nimble_pool_name = pool_name - configuration.nimble_subnet_label = subnet_label - configuration.safe_get.return_value = 'NIMBLE' - return configuration - - -class NimbleDriverBaseTestCase(test.TestCase): - - """Base Class for the NimbleDriver Tests.""" - - def setUp(self): - super(NimbleDriverBaseTestCase, self).setUp() - self.mock_client_service = None - self.mock_client_class = None - self.driver = None - - @staticmethod - def client_mock_decorator(configuration): - def client_mock_wrapper(func): - def inner_client_mock( - self, mock_client_class, mock_urllib2, *args, **kwargs): - self.mock_client_class = mock_client_class - self.mock_client_service = mock.MagicMock(name='Client') - self.mock_client_class.return_value = self.mock_client_service - self.driver = nimble.NimbleISCSIDriver( - configuration=configuration) - mock_login_response = mock_urllib2.post.return_value - mock_login_response = mock.MagicMock() - mock_login_response.status_code.return_value = http_client.OK - mock_login_response.json.return_value = ( - FAKE_LOGIN_POST_RESPONSE) - self.driver.do_setup(context.get_admin_context()) - self.driver.APIExecutor.login() - func(self, *args, **kwargs) - return inner_client_mock - return client_mock_wrapper - - @staticmethod - def client_mock_decorator_fc(configuration): - def client_mock_wrapper(func): - def inner_clent_mock( - self, mock_client_class, mock_urllib2, *args, **kwargs): - self.mock_client_class = mock_client_class - self.mock_client_service = mock.MagicMock(name='Client') - self.mock_client_class.return_value = ( - self.mock_client_service) - self.driver = nimble.NimbleFCDriver( - configuration=configuration) - mock_login_response = mock_urllib2.post.return_value - mock_login_response = mock.MagicMock() - mock_login_response.status_code.return_value = http_client.OK - mock_login_response.json.return_value = ( - FAKE_LOGIN_POST_RESPONSE) - self.driver.do_setup(context.get_admin_context()) - self.driver.APIExecutor.login() - func(self, *args, **kwargs) - return inner_clent_mock - return client_mock_wrapper - - def tearDown(self): - super(NimbleDriverBaseTestCase, self).tearDown() - - -class NimbleDriverLoginTestCase(NimbleDriverBaseTestCase): - - """Tests do_setup api.""" - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - "nimble", "nimble_pass", "10.18.108.55", 'default', '*')) - def test_do_setup_positive(self): - expected_call_list = [mock.call.login()] - self.mock_client_service.assert_has_calls(expected_call_list) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_expire_session_id(self): - expected_call_list = [mock.call.login()] - self.mock_client_service.assert_has_calls(expected_call_list) - - self.driver.APIExecutor.get("groups") - expected_call_list = [mock.call.get_group_info(), - mock.call.login(), - mock.call.get("groups")] - - self.assertEqual( - self.mock_client_service.method_calls, - expected_call_list) - - -class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): - - """Tests volume related api's.""" - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'yes'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, - 'default', '*')) - def test_create_volume_positive(self): - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - self.assertEqual({ - 'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume({'name': 'testvolume', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''})) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''}, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'yes', - 'nimble:multi-initiator': 'false'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_encryption_positive(self): - self.mock_client_service._execute_create_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - volume = {'name': 'testvolume-encryption', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''} - self.assertEqual({ - 'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume(volume)) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume-encryption', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': '', - }, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'VMware ESX', - 'nimble:encryption': 'no', - 'nimble:multi-initiator': 'false'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_perfpolicy_positive(self): - self.mock_client_service._execute_create_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - self.assertEqual( - {'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume({'name': 'testvolume-perfpolicy', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''})) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume-perfpolicy', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': '', - }, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'no', - 'nimble:multi-initiator': 'true'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_multi_initiator_positive(self): - self.mock_client_service._execute_create_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - self.assertEqual( - {'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume({'name': 'testvolume-multi-initiator', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''})) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume-multi-initiator', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': '', - }, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'no', - 'nimble:dedupe': 'true'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_dedupe_positive(self): - self.mock_client_service._execute_create_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - self.assertEqual( - {'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume({'name': 'testvolume-dedupe', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''})) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume-dedupe', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': '', - }, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:iops-limit': '1024'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_qos_positive(self): - self.mock_client_service._execute_create_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - self.assertEqual( - {'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume({'name': 'testvolume-qos', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''})) - - self.mock_client_service.create_vol.assert_called_once_with( - {'name': 'testvolume-qos', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': '', - }, - 'default', - False, - 'iSCSI', - False) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'no', - 'nimble:multi-initiator': 'true'})) - def test_create_volume_negative(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE) - - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': 'testvolume', - 'size': 1, - 'volume_type_id': FAKE_TYPE_ID, - 'display_name': '', - 'display_description': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_encryption_negative(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': 'testvolume-encryption', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_perfpolicy_negative(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': 'testvolume-perfpolicy', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_dedupe_negative(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': 'testvolume-dedupe', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:iops-limit': '200'})) - def test_create_volume_qos_negative(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_CREATE_VOLUME_NEGATIVE_QOS) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.create_volume, - {'name': 'testvolume-qos', - 'size': 1, - 'volume_type_id': None, - 'display_name': '', - 'display_description': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( - return_value = ['', ''])) - def test_delete_volume(self): - self.mock_client_service.online_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.delete_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.driver.delete_volume({'name': 'testvolume'}) - expected_calls = [mock.call.online_vol( - 'testvolume', False), - mock.call.delete_vol('testvolume')] - - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( - return_value=['test-backup-snap', 'volume-' + fake.VOLUME_ID])) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') - def test_delete_volume_with_backup(self, mock_volume_list): - mock_volume_list.return_value = [] - self.mock_client_service.online_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.delete_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.online_snap.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.delete_snap.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - - self.driver.delete_volume({'name': 'testvolume'}) - expected_calls = [mock.call.online_vol( - 'testvolume', False), - mock.call.delete_vol('testvolume'), - mock.call.online_snap('volume-' + fake.VOLUME_ID, - False, - 'test-backup-snap'), - mock.call.delete_snap('volume-' + fake.VOLUME_ID, - 'test-backup-snap')] - - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_extend_volume(self): - self.mock_client_service.edit_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) - self.driver.extend_volume({'name': 'testvolume'}, 5) - - self.mock_client_service.edit_vol.assert_called_once_with( - 'testvolume', FAKE_EXTEND_VOLUME_PARAMS) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, - return_value= - {'nimble:perfpol-name': 'default', - 'nimble:encryption': 'yes', - 'nimble:multi-initiator': 'false', - 'nimble:iops-limit': '1024'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False)) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') - @mock.patch(NIMBLE_RANDOM) - def test_create_cloned_volume(self, mock_random, mock_volume_list): - mock_random.sample.return_value = fake.VOLUME_ID - mock_volume_list.return_value = [] - self.mock_client_service.snap_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.clone_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - - volume = obj_volume.Volume(context.get_admin_context(), - id=fake.VOLUME_ID, - size=5.0, - _name_id=None, - display_name='', - volume_type_id=FAKE_TYPE_ID - ) - src_volume = obj_volume.Volume(context.get_admin_context(), - id=fake.VOLUME2_ID, - _name_id=None, - size=5.0) - self.assertEqual({ - 'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_cloned_volume(volume, src_volume)) - - expected_calls = [mock.call.snap_vol( - {'volume_name': "volume-" + fake.VOLUME2_ID, - 'name': 'openstack-clone-volume-' + fake.VOLUME_ID + "-" + - fake.VOLUME_ID, - 'volume_size': src_volume['size'], - 'display_name': volume['display_name'], - 'display_description': ''}), - mock.call.clone_vol(volume, - {'volume_name': "volume-" + fake.VOLUME2_ID, - 'name': 'openstack-clone-volume-' + - fake.VOLUME_ID + "-" + - fake.VOLUME_ID, - 'volume_size': src_volume['size'], - 'display_name': volume['display_name'], - 'display_description': ''}, - True, False, 'iSCSI', 'default')] - - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_positive(self): - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE_MANAGE) - self.mock_client_service.online_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.edit_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) - self.assertEqual({ - 'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.manage_existing({'name': 'volume-abcdef', - 'id': fake.VOLUME_ID, - 'agent_type': None}, - {'source-name': 'test-vol'})) - expected_calls = [mock.call.edit_vol( - 'test-vol', {'data': {'agent_type': 'openstack', - 'name': 'volume-abcdef'}}), - mock.call.online_vol('volume-abcdef', True)] - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_which_is_online(self): - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_ONLINE) - self.assertRaises( - exception.InvalidVolume, - self.driver.manage_existing, - {'name': 'volume-abcdef'}, - {'source-name': 'test-vol'}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_get_size(self): - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_ONLINE) - size = self.driver.manage_existing_get_size( - {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) - self.assertEqual(2, size) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_with_improper_ref(self): - self.assertRaises( - exception.ManageExistingInvalidReference, - self.driver.manage_existing, - {'name': 'volume-abcdef'}, - {'source-id': 'test-vol'}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_with_nonexistant_volume(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_VOLUME_INFO_NEGATIVE_RESPONSE) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.manage_existing, - {'name': 'volume-abcdef'}, - {'source-name': 'test-vol'}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_manage_volume_with_wrong_agent_type(self): - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.assertRaises( - exception.ManageExistingAlreadyManaged, - self.driver.manage_existing, - {'id': 'abcdef', 'name': 'volume-abcdef'}, - {'source-name': 'test-vol'}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_unmanage_volume_positive(self): - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.edit_vol.return_value = ( - FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) - self.driver.unmanage({'name': 'volume-abcdef'}) - expected_calls = [ - mock.call.edit_vol( - 'volume-abcdef', - {'data': {'agent_type': 'none'}}), - - mock.call.online_vol('volume-abcdef', False)] - - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_unmanage_with_invalid_volume(self): - self.mock_client_service.get_vol_info.side_effect = ( - FAKE_VOLUME_INFO_NEGATIVE_RESPONSE) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.unmanage, - {'name': 'volume-abcdef'} - ) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_unmanage_with_invalid_agent_type(self): - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_ONLINE) - self.assertRaises( - exception.InvalidVolume, - self.driver.unmanage, - {'name': 'volume-abcdef'} - ) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_get_volume_stats(self): - self.mock_client_service.get_group_info.return_value = ( - FAKE_POSITIVE_GROUP_INFO_RESPONSE) - expected_res = {'driver_version': DRIVER_VERSION, - 'vendor_name': 'Nimble', - 'volume_backend_name': 'NIMBLE', - 'storage_protocol': 'iSCSI', - 'pools': [{'pool_name': 'NIMBLE', - 'total_capacity_gb': 7466.30419921875, - 'free_capacity_gb': 7463.567649364471, - 'reserved_percentage': 0, - 'QoS_support': False}]} - self.assertEqual( - expected_res, - self.driver.get_volume_stats(refresh=True)) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_is_volume_backup_clone(self): - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_BACKUP_RESPONSE) - self.mock_client_service.get_snap_info_by_id.return_value = ( - FAKE_GET_SNAP_INFO_BACKUP_RESPONSE) - self.mock_client_service.get_snap_info_detail.return_value = ( - FAKE_GET_SNAP_INFO_BACKUP_RESPONSE) - self.mock_client_service.get_volume_name.return_value = ( - 'volume-' + fake.VOLUME2_ID) - - volume = obj_volume.Volume(context.get_admin_context(), - id=fake.VOLUME_ID, - _name_id=None) - self.assertEqual(("test-backup-snap", "volume-" + fake.VOLUME2_ID), - self.driver.is_volume_backup_clone(volume)) - expected_calls = [ - mock.call.get_vol_info('volume-' + fake.VOLUME_ID), - mock.call.get_snap_info_by_id('test-backup-snap', - 'volume-' + fake.VOLUME2_ID) - ] - self.mock_client_service.assert_has_calls(expected_calls) - - -class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase): - - """Tests snapshot related api's.""" - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_snapshot(self): - self.mock_client_service.snap_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.driver.create_snapshot( - {'volume_name': 'testvolume', - 'name': 'testvolume-snap1', - 'display_name': ''}) - self.mock_client_service.snap_vol.assert_called_once_with( - {'volume_name': 'testvolume', - 'name': 'testvolume-snap1', - 'display_name': ''}) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_delete_snapshot(self): - self.mock_client_service.online_snap.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.delete_snap.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.driver.delete_snapshot( - {'volume_name': 'testvolume', - 'name': 'testvolume-snap1'}) - expected_calls = [mock.call.online_snap( - 'testvolume', False, 'testvolume-snap1'), - mock.call.delete_snap('testvolume', - 'testvolume-snap1')] - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @mock.patch.object(volume_types, 'get_volume_type_extra_specs', - mock.Mock(type_id=FAKE_TYPE_ID, return_value={ - 'nimble:perfpol-name': 'default', - 'nimble:encryption': 'yes', - 'nimble:multi-initiator': 'false'})) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_create_volume_from_snapshot(self): - self.mock_client_service.clone_vol.return_value = ( - FAKE_GENERIC_POSITIVE_RESPONSE) - self.mock_client_service.get_vol_info.return_value = ( - FAKE_GET_VOL_INFO_RESPONSE) - self.mock_client_service.get_netconfig.return_value = ( - FAKE_POSITIVE_NETCONFIG_RESPONSE) - self.assertEqual({ - 'provider_location': '172.18.108.21:3260 iqn.test', - 'provider_auth': None}, - self.driver.create_volume_from_snapshot( - {'name': 'clone-testvolume', - 'size': 2, - 'volume_type_id': FAKE_TYPE_ID}, - {'volume_name': 'testvolume', - 'name': 'testvolume-snap1', - 'volume_size': 1})) - expected_calls = [ - mock.call.clone_vol( - {'name': 'clone-testvolume', - 'volume_type_id': FAKE_TYPE_ID, - 'size': 2}, - {'volume_name': 'testvolume', - 'name': 'testvolume-snap1', - 'volume_size': 1}, - False, - False, - 'iSCSI', - 'default'), - mock.call.edit_vol('clone-testvolume', - {'data': {'size': 2048, - 'snap_limit': sys.maxsize, - 'warn_level': 80, - 'reserve': 0, - 'limit': 100}})] - self.mock_client_service.assert_has_calls(expected_calls) - - -class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase): - - """Tests Connection related api's.""" - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_initialize_connection_igroup_exist(self): - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE) - expected_res = { - 'driver_volume_type': 'iscsi', - 'data': { - 'volume_id': 12, - 'target_iqn': '13', - 'target_lun': 0, - 'target_portal': '12'}} - self.assertEqual( - expected_res, - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - {'initiator': 'test-initiator1'})) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_initialize_connection_live_migration(self): - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE) - expected_res = { - 'driver_volume_type': 'iscsi', - 'data': { - 'volume_id': 12, - 'target_iqn': '13', - 'target_lun': 0, - 'target_portal': '12'}} - - self.assertEqual( - expected_res, - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - {'initiator': 'test-initiator1'})) - - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - {'initiator': 'test-initiator1'}) - - # 2 or more calls to initialize connection and add_acl for live - # migration to work - expected_calls = [ - mock.call.get_initiator_grp_list(), - mock.call.add_acl({'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - 'test-igrp1'), - mock.call.get_initiator_grp_list(), - mock.call.add_acl({'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - 'test-igrp1')] - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number") - @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") - def test_initialize_connection_fc_igroup_exist(self, mock_wwpns, - mock_lun_number): - mock_lun_number.return_value = 13 - mock_wwpns.return_value = ["1111111111111101"] - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE_FC) - expected_res = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_lun': 13, - 'target_discovered': True, - 'target_wwn': ["1111111111111101"], - 'initiator_target_map': {'1000000000000000': - ['1111111111111101']}}} - self.assertEqual( - expected_res, - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': 'array1', - 'id': 12}, - {'initiator': 'test-initiator1', - 'wwpns': ['1000000000000000']})) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_RANDOM) - def test_initialize_connection_igroup_not_exist(self, mock_random): - mock_random.sample.return_value = 'abcdefghijkl' - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE) - expected_res = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_lun': 0, - 'volume_id': 12, - 'target_iqn': '13', - 'target_portal': '12'}} - self.assertEqual( - expected_res, - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - {'initiator': 'test-initiator3'})) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") - @mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number") - @mock.patch(NIMBLE_RANDOM) - def test_initialize_connection_fc_igroup_not_exist(self, mock_random, - mock_lun_number, - mock_wwpns): - mock_random.sample.return_value = 'abcdefghijkl' - mock_lun_number.return_value = 13 - mock_wwpns.return_value = ["1111111111111101"] - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE_FC) - expected_res = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_lun': 13, - 'target_discovered': True, - 'target_wwn': ["1111111111111101"], - 'initiator_target_map': {'1000000000000000': - ['1111111111111101']}}} - - self.driver._create_igroup_for_initiator("test-initiator3", - [1111111111111101]) - self.assertEqual( - expected_res, - self.driver.initialize_connection( - {'name': 'test-volume', - 'provider_location': 'array1', - 'id': 12}, - {'initiator': 'test-initiator3', - 'wwpns': ['1000000000000000']})) - - expected_calls = [mock.call.create_initiator_group_fc( - 'openstack-abcdefghijkl'), - mock.call.add_initiator_to_igroup_fc('openstack-abcdefghijkl', - 1111111111111101)] - self.mock_client_service.assert_has_calls(expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_terminate_connection_positive(self): - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE) - self.driver.terminate_connection( - {'name': 'test-volume', - 'provider_location': '12 13', - 'id': 12}, - {'initiator': 'test-initiator1'}) - expected_calls = [mock.call._get_igroupname_for_initiator( - 'test-initiator1'), - mock.call.remove_acl({'name': 'test-volume'}, - 'test-igrp1')] - self.mock_client_service.assert_has_calls( - self.mock_client_service.method_calls, - expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") - def test_terminate_connection_positive_fc(self, mock_wwpns): - mock_wwpns.return_value = ["1111111111111101"] - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE_FC) - self.driver.terminate_connection( - {'name': 'test-volume', - 'provider_location': 'array1', - 'id': 12}, - {'initiator': 'test-initiator1', - 'wwpns': ['1000000000000000']}) - expected_calls = [ - mock.call.get_igroupname_for_initiator_fc( - "10:00:00:00:00:00:00:00"), - mock.call.remove_acl({'name': 'test-volume'}, - 'test-igrp1')] - self.mock_client_service.assert_has_calls( - self.mock_client_service.method_calls, - expected_calls) - - @mock.patch(NIMBLE_URLLIB2) - @mock.patch(NIMBLE_CLIENT) - @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', - mock.Mock(return_value=[])) - @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( - 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) - def test_terminate_connection_negative(self): - self.mock_client_service.get_initiator_grp_list.return_value = ( - FAKE_IGROUP_LIST_RESPONSE) - self.assertRaises( - exception.VolumeDriverException, - self.driver.terminate_connection, - {'name': 'test-volume', - 'provider_location': '12 13', 'id': 12}, - {'initiator': 'test-initiator3'}) diff --git a/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py b/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py deleted file mode 100644 index f452d6d3e..000000000 --- a/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py +++ /dev/null @@ -1,931 +0,0 @@ -# Copyright (c) 2014 ProphetStor, Inc. -# All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import re - -import mock -from oslo_utils import units -from six.moves import http_client - -from cinder import context -from cinder import exception -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import utils as test_utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER -from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON -from cinder.volume import group_types - -POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4' -VOLUMEUUID = 'a000000000000000000000000000001' -INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' -DATA_IN_CONNECTOR = {'initiator': INITIATOR} -DATA_SERVER_INFO = 0, { - 'metadata': {'vendor': 'ProphetStor', - 'version': '1.5'}} - -DATA_POOLS = 0, { - 'children': [POOLUUID] -} - -DATA_POOLINFO = 0, { - 'capabilitiesURI': '', - 'children': [], - 'childrenrange': '', - 'completionStatus': 'Complete', - 'metadata': {'available_capacity': 4294967296, - 'ctime': 1390551362349, - 'vendor': 'prophetstor', - 'version': '1.5', - 'display_description': 'Default Pool', - 'display_name': 'default_pool', - 'event_uuid': '4f7c4d679a664857afa4d51f282a516a', - 'physical_device': {'cache': [], - 'data': ['disk_uuid_0', - 'disk_uuid_1', - 'disk_uuid_2'], - 'log': [], - 'spare': []}, - 'pool_uuid': POOLUUID, - 'properties': {'raid_level': 'raid0'}, - 'state': 'Online', - 'used_capacity': 0, - 'total_capacity': 4294967296, - 'zpool_guid': '8173612007304181810'}, - 'objectType': 'application/cdmi-container', - 'percentComplete': 100} - -DATA_ASSIGNVDEV = 0, { - 'children': [], - 'childrenrange': '', - 'completionStatus': 'Complete', - 'domainURI': '', - 'exports': {'Network/iSCSI': [ - {'logical_unit_name': '', - 'logical_unit_number': '101', - 'permissions': [INITIATOR], - 'portals': ['172.31.1.210:3260'], - 'target_identifier': - 'iqn.2013-09.com.prophetstor:hypervisor.886423051816' - }]}, - 'metadata': {'ctime': 0, - 'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5', - 'type': 'volume'}, - 'objectID': '', - 'objectName': 'd827e23d403f4f12bb208a6fec208fd8', - 'objectType': 'application/cdmi-container', - 'parentID': '8daa374670af447e8efea27e16bf84cd', - 'parentURI': '/dpl_volume', - 'snapshots': [] -} - -DATA_OUTPUT = 0, None - -MOD_OUTPUT = {'status': 'available'} - -DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - 'name': 'group123', - 'description': 'des123', - 'status': ''} - -DATA_IN_VOLUME = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', - 'display_name': 'abc123', - 'display_description': '', - 'size': 10, - 'host': "hostname@backend#%s" % POOLUUID} - -DATA_IN_VOLUME_VG = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', - 'display_name': 'abc123', - 'display_description': '', - 'size': 10, - 'group_id': - 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - 'status': 'available', - 'host': "hostname@backend#%s" % POOLUUID} - -DATA_IN_REMOVE_VOLUME_VG = { - 'id': 'fe2dbc515810451dab2f8c8a48d15bee', - 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', - 'display_description': '', - 'size': 10, - 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - 'status': 'available', - 'host': "hostname@backend#%s" % POOLUUID} - -DATA_IN_VOLUME1 = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4bef', - 'display_name': 'abc456', - 'display_description': '', - 'size': 10, - 'host': "hostname@backend#%s" % POOLUUID} - -DATA_IN_CG_SNAPSHOT = { - 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - 'id': 'cgsnapshot1', - 'name': 'cgsnapshot1', - 'description': 'cgsnapshot1', - 'status': ''} - -DATA_IN_SNAPSHOT = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', - 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', - 'display_name': 'snapshot1', - 'display_description': '', - 'volume_size': 5} - -DATA_OUT_SNAPSHOT_CG = { - 'id': 'snapshot1', - 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', - 'display_name': 'snapshot1', - 'display_description': '', - 'group_snapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'} - -DATA_OUT_CG = { - "objectType": "application/cdmi-container", - "objectID": "fe2dbc515810451dab2f8c8a48d15bee", - "objectName": "", - "parentURI": "/dpl_volgroup", - "parentID": "fe2dbc515810451dab2f8c8a48d15bee", - "domainURI": "", - "capabilitiesURI": "", - "completionStatus": "Complete", - "percentComplete": 100, - "metadata": - { - "type": "volume|snapshot|replica", - "volume_group_uuid": "", - "origin_uuid": "", - "snapshot_uuid": "", - "display_name": "", - "display_description": "", - "ctime": 12345678, - "total_capacity": 1024, - "snapshot_used_capacity": 0, - "maximum_snapshot": 1024, - "snapshot_quota": 0, - "state": "", - "properties": - { - "snapshot_rotation": True, - } - }, - "childrenrange": "", - "children": - [ - 'fe2dbc515810451dab2f8c8a48d15bee', - ], -} - - -class TestProphetStorDPLVolume(test.TestCase): - - def _gen_snapshot_url(self, vdevid, snapshotid): - snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT, - snapshotid) - return snapshot_url - - def setUp(self): - super(TestProphetStorDPLVolume, self).setUp() - self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password') - self.DPL_MOCK = mock.MagicMock() - self.dplcmd.objCmd = self.DPL_MOCK - self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT - - def test_getserverinfo(self): - self.dplcmd.get_server_info() - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM), - None, - [http_client.OK, http_client.ACCEPTED]) - - def test_createvdev(self): - self.dplcmd.create_vdev(DATA_IN_VOLUME['id'], - DATA_IN_VOLUME['display_name'], - DATA_IN_VOLUME['display_description'], - POOLUUID, - int(DATA_IN_VOLUME['size']) * units.Gi) - - metadata = {} - metadata['display_name'] = DATA_IN_VOLUME['display_name'] - metadata['display_description'] = DATA_IN_VOLUME['display_description'] - metadata['pool_uuid'] = POOLUUID - metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi - metadata['maximum_snapshot'] = 1024 - metadata['properties'] = dict(thin_provision=True) - params = {} - params['metadata'] = metadata - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) - - def test_extendvdev(self): - self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'], - DATA_IN_VOLUME['display_name'], - DATA_IN_VOLUME['display_description'], - int(DATA_IN_VOLUME['size']) * units.Gi) - metadata = {} - metadata['display_name'] = DATA_IN_VOLUME['display_name'] - metadata['display_description'] = DATA_IN_VOLUME['display_description'] - metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi - metadata['maximum_snapshot'] = 1024 - params = {} - params['metadata'] = metadata - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) - - def test_deletevdev(self): - self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True) - metadata = {} - params = {} - metadata['force'] = True - params['metadata'] = metadata - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'DELETE', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND, - http_client.NO_CONTENT]) - - def test_createvdevfromsnapshot(self): - self.dplcmd.create_vdev_from_snapshot( - DATA_IN_VOLUME['id'], - DATA_IN_VOLUME['display_name'], - DATA_IN_VOLUME['display_description'], - DATA_IN_SNAPSHOT['id'], - POOLUUID) - metadata = {} - params = {} - metadata['snapshot_operation'] = 'copy' - metadata['display_name'] = DATA_IN_VOLUME['display_name'] - metadata['display_description'] = DATA_IN_VOLUME['display_description'] - metadata['pool_uuid'] = POOLUUID - metadata['maximum_snapshot'] = 1024 - metadata['properties'] = dict(thin_provision=True) - params['metadata'] = metadata - params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], - DATA_IN_SNAPSHOT['id']) - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) - - def test_getpool(self): - self.dplcmd.get_pool(POOLUUID) - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, - POOLUUID), - None, - [http_client.OK, http_client.ACCEPTED]) - - def test_clonevdev(self): - self.dplcmd.clone_vdev( - DATA_IN_VOLUME['id'], - DATA_IN_VOLUME1['id'], - POOLUUID, - DATA_IN_VOLUME['display_name'], - DATA_IN_VOLUME['display_description'], - int(DATA_IN_VOLUME['size']) * units.Gi - ) - metadata = {} - params = {} - metadata["snapshot_operation"] = "clone" - metadata["display_name"] = DATA_IN_VOLUME['display_name'] - metadata["display_description"] = DATA_IN_VOLUME['display_description'] - metadata["pool_uuid"] = POOLUUID - metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi - metadata['maximum_snapshot'] = 1024 - metadata['properties'] = dict(thin_provision=True) - params["metadata"] = metadata - params["copy"] = DATA_IN_VOLUME['id'] - - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME1['id']), - params, - [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) - - def test_createvdevsnapshot(self): - self.dplcmd.create_vdev_snapshot( - DATA_IN_VOLUME['id'], - DATA_IN_SNAPSHOT['id'], - DATA_IN_SNAPSHOT['display_name'], - DATA_IN_SNAPSHOT['display_description'] - ) - metadata = {} - params = {} - metadata['display_name'] = DATA_IN_SNAPSHOT['display_name'] - metadata['display_description'] = ( - DATA_IN_SNAPSHOT['display_description']) - params['metadata'] = metadata - params['snapshot'] = DATA_IN_SNAPSHOT['id'] - - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.CREATED, http_client.ACCEPTED]) - - def test_getvdev(self): - self.dplcmd.get_vdev(DATA_IN_VOLUME['id']) - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - None, - [http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND]) - - def test_getvdevstatus(self): - self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456') - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id'], - '123456'), - None, - [http_client.OK, http_client.NOT_FOUND]) - - def test_getpoolstatus(self): - self.dplcmd.get_pool_status(POOLUUID, '123456') - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_POOL, - POOLUUID, - '123456'), - None, - [http_client.OK, http_client.NOT_FOUND]) - - def test_assignvdev(self): - self.dplcmd.assign_vdev( - DATA_IN_VOLUME['id'], - 'iqn.1993-08.org.debian:01:test1', - '', - '1.1.1.1:3260', - 0 - ) - params = {} - metadata = {} - exports = {} - metadata['export_operation'] = 'assign' - exports['Network/iSCSI'] = {} - target_info = {} - target_info['logical_unit_number'] = 0 - target_info['logical_unit_name'] = '' - permissions = [] - portals = [] - portals.append('1.1.1.1:3260') - permissions.append('iqn.1993-08.org.debian:01:test1') - target_info['permissions'] = permissions - target_info['portals'] = portals - exports['Network/iSCSI'] = target_info - - params['metadata'] = metadata - params['exports'] = exports - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, http_client.CREATED]) - - def test_unassignvdev(self): - self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'], - 'iqn.1993-08.org.debian:01:test1', - '') - params = {} - metadata = {} - exports = {} - metadata['export_operation'] = 'unassign' - params['metadata'] = metadata - - exports['Network/iSCSI'] = {} - exports['Network/iSCSI']['target_identifier'] = '' - permissions = [] - permissions.append('iqn.1993-08.org.debian:01:test1') - exports['Network/iSCSI']['permissions'] = permissions - - params['exports'] = exports - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'PUT', - '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id']), - params, - [http_client.OK, http_client.ACCEPTED, - http_client.NO_CONTENT, http_client.NOT_FOUND]) - - def test_deletevdevsnapshot(self): - self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'], - DATA_IN_SNAPSHOT['id']) - params = {} - params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], - DATA_IN_SNAPSHOT['id']) - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'DELETE', - '/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id'], - DPLCOMMON.DPL_OBJ_SNAPSHOT, - DATA_IN_SNAPSHOT['id']), - None, - [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT, - http_client.NOT_FOUND]) - - def test_listvdevsnapshots(self): - self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id']) - self.DPL_MOCK.send_cmd.assert_called_once_with( - 'GET', - '/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, - DPLCOMMON.DPL_OBJ_VOLUME, - DATA_IN_VOLUME['id'], - DPLCOMMON.DPL_OBJ_SNAPSHOT), - None, - [http_client.OK]) - - -class TestProphetStorDPLDriver(test.TestCase): - - def __init__(self, method): - super(TestProphetStorDPLDriver, self).__init__(method) - - def _conver_uuid2hex(self, strID): - return strID.replace('-', '') - - def setUp(self): - super(TestProphetStorDPLDriver, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.san_ip = '1.1.1.1' - self.configuration.dpl_port = 8356 - self.configuration.san_login = 'admin' - self.configuration.san_password = 'password' - self.configuration.dpl_pool = POOLUUID - self.configuration.iscsi_port = 3260 - self.configuration.san_is_local = False - self.configuration.san_thin_provision = True - self.configuration.driver_ssl_cert_verify = False - self.configuration.driver_ssl_cert_path = None - self.context = context.get_admin_context() - self.DPL_MOCK = mock.MagicMock() - self.DB_MOCK = mock.MagicMock() - self.dpldriver = DPLDRIVER.DPLISCSIDriver( - configuration=self.configuration) - self.dpldriver.dpl = self.DPL_MOCK - self.dpldriver.db = self.DB_MOCK - self.dpldriver.do_setup(self.context) - - def test_get_volume_stats(self): - self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO - self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO - res = self.dpldriver.get_volume_stats(True) - self.assertEqual('ProphetStor', res['vendor_name']) - self.assertEqual('1.5', res['driver_version']) - pool = res["pools"][0] - self.assertEqual(4, pool['total_capacity_gb']) - self.assertEqual(4, pool['free_capacity_gb']) - self.assertEqual(0, pool['reserved_percentage']) - self.assertFalse(pool['QoS_support']) - - def test_create_volume(self): - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME['id'], - display_name=DATA_IN_VOLUME['display_name'], - size=DATA_IN_VOLUME['size'], - host=DATA_IN_VOLUME['host']) - self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT - self.dpldriver.create_volume(volume) - self.DPL_MOCK.create_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id), - volume.display_name, - volume.display_description, - self.configuration.dpl_pool, - int(volume.size) * units.Gi, - True) - - def test_create_volume_without_pool(self): - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME['id'], - display_name=DATA_IN_VOLUME['display_name'], - size=DATA_IN_VOLUME['size'], - host=DATA_IN_VOLUME['host']) - self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT - self.configuration.dpl_pool = "" - volume.host = "host@backend" # missing pool - self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume, - volume=volume) - - def test_create_volume_with_configuration_pool(self): - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME['id'], - display_name=DATA_IN_VOLUME['display_name'], - size=DATA_IN_VOLUME['size'], - host="host@backend") - self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT - self.dpldriver.create_volume(volume) - self.DPL_MOCK.create_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id), - volume.display_name, volume.display_description, - self.configuration.dpl_pool, int(volume.size) * units.Gi, True) - - def test_create_volume_of_group(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT - self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME_VG['id'], - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - group_id=group.id, - host=DATA_IN_VOLUME_VG['host']) - self.dpldriver.create_volume(volume) - self.DPL_MOCK.create_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id), - volume.display_name, - volume.display_description, - self.configuration.dpl_pool, - int(volume.size) * units.Gi, - True) - self.DPL_MOCK.join_vg.assert_called_once_with( - self._conver_uuid2hex(volume.id), - self._conver_uuid2hex(volume.group_id)) - - def test_delete_volume(self): - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME['id'], - display_name=DATA_IN_VOLUME['display_name'], - size=DATA_IN_VOLUME['size'], - host=DATA_IN_VOLUME['host']) - self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT - self.dpldriver.delete_volume(volume) - self.DPL_MOCK.delete_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id)) - - def test_delete_volume_of_group(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME_VG['id'], - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - group_id=group.id, - host=DATA_IN_VOLUME_VG['host']) - self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT - self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT - self.dpldriver.delete_volume(volume) - self.DPL_MOCK.leave_vg.assert_called_once_with( - self._conver_uuid2hex(volume.id), - self._conver_uuid2hex(volume.group_id) - ) - self.DPL_MOCK.delete_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id)) - - def test_create_volume_from_snapshot(self): - self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT - self.DPL_MOCK.extend_vdev.return_value = DATA_OUTPUT - volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME_VG['id'], - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - host=DATA_IN_VOLUME_VG['host']) - self.dpldriver.create_volume_from_snapshot( - volume, DATA_IN_SNAPSHOT) - self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with( - self._conver_uuid2hex(volume.id), - volume.display_name, - volume.display_description, - self._conver_uuid2hex(volume.id), - self.configuration.dpl_pool, - True) - self.DPL_MOCK.extend_vdev.assert_called_once_with( - self._conver_uuid2hex(volume.id), - volume.display_name, - volume.display_description, - volume.size * units.Gi) - - def test_create_cloned_volume(self): - new_volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME1['id'], - display_name=DATA_IN_VOLUME1['display_name'], - size=DATA_IN_VOLUME1['size'], - host=DATA_IN_VOLUME1['host']) - src_volume = test_utils.create_volume( - self.context, - id=DATA_IN_VOLUME['id']) - self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT - self.dpldriver.create_cloned_volume(new_volume, src_volume) - self.DPL_MOCK.clone_vdev.assert_called_once_with( - self._conver_uuid2hex(src_volume.id), - self._conver_uuid2hex(new_volume.id), - self.configuration.dpl_pool, - new_volume.display_name, - new_volume.display_description, - int(new_volume.size) * - units.Gi, - True) - - def test_create_snapshot(self): - self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT - self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT) - self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with( - self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), - self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), - DATA_IN_SNAPSHOT['display_name'], - DATA_IN_SNAPSHOT['display_description']) - - def test_delete_snapshot(self): - self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT - self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT) - self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( - self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), - self._conver_uuid2hex(DATA_IN_SNAPSHOT['id'])) - - def test_initialize_connection(self): - self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV - self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV - res = self.dpldriver.initialize_connection(DATA_IN_VOLUME, - DATA_IN_CONNECTOR) - self.assertEqual('iscsi', res['driver_volume_type']) - self.assertEqual(101, res['data']['target_lun']) - self.assertTrue(res['data']['target_discovered']) - self.assertEqual('172.31.1.210:3260', res['data']['target_portal']) - self.assertEqual( - 'iqn.2013-09.com.prophetstor:hypervisor.886423051816', - res['data']['target_iqn']) - - def test_terminate_connection(self): - self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT - self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) - self.DPL_MOCK.unassign_vdev.assert_called_once_with( - self._conver_uuid2hex(DATA_IN_VOLUME['id']), - DATA_IN_CONNECTOR['initiator']) - - def test_terminate_connection_volume_detached(self): - self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None - self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) - self.DPL_MOCK.unassign_vdev.assert_called_once_with( - self._conver_uuid2hex(DATA_IN_VOLUME['id']), - DATA_IN_CONNECTOR['initiator']) - - def test_terminate_connection_failed(self): - self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None - ex = self.assertRaises( - exception.VolumeBackendAPIException, - self.dpldriver.terminate_connection, - volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR) - self.assertTrue( - re.match(r".*Flexvisor failed", ex.msg)) - - def test_get_pool_info(self): - self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO - _, res = self.dpldriver._get_pool_info(POOLUUID) - self.assertEqual(4294967296, res['metadata']['available_capacity']) - self.assertEqual(1390551362349, res['metadata']['ctime']) - self.assertEqual('Default Pool', - res['metadata']['display_description']) - self.assertEqual('default_pool', - res['metadata']['display_name']) - self.assertEqual('4f7c4d679a664857afa4d51f282a516a', - res['metadata']['event_uuid']) - self.assertEqual( - {'cache': [], - 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], - 'log': [], - 'spare': []}, - res['metadata']['physical_device']) - self.assertEqual(POOLUUID, res['metadata']['pool_uuid']) - self.assertEqual( - {'raid_level': 'raid0'}, - res['metadata']['properties']) - self.assertEqual('Online', res['metadata']['state']) - self.assertEqual(4294967296, res['metadata']['total_capacity']) - self.assertEqual('8173612007304181810', res['metadata']['zpool_guid']) - - def test_create_group(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT - model_update = self.dpldriver.create_group(self.context, group) - self.DPL_MOCK.create_vg.assert_called_once_with( - self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID), - 'test_group', - 'this is a test group') - self.assertDictEqual({'status': ( - fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) - - def test_delete_group(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - self.DB_MOCK.volume_get_all_by_group.return_value = ( - [DATA_IN_VOLUME_VG]) - self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT - self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT - model_update, volumes = self.dpldriver.delete_group( - self.context, group, []) - self.DPL_MOCK.delete_vg.assert_called_once_with( - self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID)) - self.DPL_MOCK.delete_vdev.assert_called_once_with( - self._conver_uuid2hex((DATA_IN_VOLUME_VG['id']))) - self.assertDictEqual({'status': ( - fields.ConsistencyGroupStatus.DELETED)}, model_update) - - def test_update_group(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) - self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT - self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT - group = test_utils.create_group( - self.context, - id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - vol_add = test_utils.create_volume( - self.context, - id=fake_constants.VOLUME2_ID, - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - host=DATA_IN_VOLUME_VG['host']) - vol_del = test_utils.create_volume( - self.context, - id=DATA_IN_REMOVE_VOLUME_VG['id'], - display_name=DATA_IN_REMOVE_VOLUME_VG['display_name'], - size=DATA_IN_REMOVE_VOLUME_VG['size'], - group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - host=DATA_IN_REMOVE_VOLUME_VG['host']) - (model_update, add_vols, remove_vols) = ( - self.dpldriver.update_group( - self.context, group, [vol_add], [vol_del])) - self.DPL_MOCK.join_vg.assert_called_once_with( - self._conver_uuid2hex(vol_add.id), - self._conver_uuid2hex(group.id)) - self.DPL_MOCK.leave_vg.assert_called_once_with( - self._conver_uuid2hex(vol_del.id), - self._conver_uuid2hex(group.id)) - self.assertDictEqual({'status': ( - fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) - - def test_update_group_exception_join(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) - self.DPL_MOCK.join_vg.return_value = -1, None - self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT - volume = test_utils.create_volume( - self.context, - id=fake_constants.VOLUME2_ID, - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - host=DATA_IN_VOLUME_VG['host']) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - self.assertRaises(exception.VolumeBackendAPIException, - self.dpldriver.update_group, - context=None, - group=group, - add_volumes=[volume], - remove_volumes=None) - - def test_update_group_exception_leave(self): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) - self.DPL_MOCK.leave_vg.return_value = -1, None - volume = test_utils.create_volume( - self.context, - id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', - display_name=DATA_IN_VOLUME_VG['display_name'], - size=DATA_IN_VOLUME_VG['size'], - host=DATA_IN_VOLUME_VG['host']) - group = test_utils.create_group( - self.context, - id=fake_constants.CONSISTENCY_GROUP_ID, - host='host@backend#unit_test_pool', - group_type_id=group_type.id) - self.assertRaises(exception.VolumeBackendAPIException, - self.dpldriver.update_group, - context=None, - group=group, - add_volumes=None, - remove_volumes=[volume]) - - @mock.patch( - 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') - def test_create_group_snapshot(self, get_all_for_group_snapshot): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) - snapshot_obj.group_id = \ - DATA_IN_CG_SNAPSHOT['group_id'] - snapshot_obj.group_type_id = group_type.id - get_all_for_group_snapshot.return_value = [snapshot_obj] - self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT - model_update, snapshots = self.dpldriver.create_group_snapshot( - self.context, snapshot_obj, []) - self.assertDictEqual({'status': 'available'}, model_update) - - @mock.patch( - 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') - def test_delete_group_snapshot(self, get_all_for_group_snapshot): - group_type = group_types.create( - self.context, - 'group', - {'consistent_group_snapshot_enabled': ' True'} - ) - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) - snapshot_obj.group_id = \ - DATA_IN_CG_SNAPSHOT['group_id'] - snapshot_obj.group_type_id = group_type.id - get_all_for_group_snapshot.return_value = [snapshot_obj] - self.DPL_MOCK.delete_group_snapshot.return_value = DATA_OUTPUT - model_update, snapshots = self.dpldriver.delete_group_snapshot( - self.context, snapshot_obj, []) - self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( - self._conver_uuid2hex(snapshot_obj.group_id), - self._conver_uuid2hex(snapshot_obj.id), - True) - self.assertDictEqual({'status': 'deleted'}, model_update) diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py deleted file mode 100644 index bc8941581..000000000 --- a/cinder/tests/unit/volume/drivers/test_pure.py +++ /dev/null @@ -1,2990 +0,0 @@ -# Copyright (c) 2014 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from copy import deepcopy -import sys - -import ddt -import mock -from oslo_utils import units -from six.moves import http_client - -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_group -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume - - -def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2): - def _decorator(f): - return f - return _decorator - -patch_retry = mock.patch('cinder.utils.retry', fake_retry) -patch_retry.start() -sys.modules['purestorage'] = mock.Mock() -from cinder.volume.drivers import pure - -# Only mock utils.retry for cinder.volume.drivers.pure import -patch_retry.stop() - -DRIVER_PATH = "cinder.volume.drivers.pure" -BASE_DRIVER_OBJ = DRIVER_PATH + ".PureBaseVolumeDriver" -ISCSI_DRIVER_OBJ = DRIVER_PATH + ".PureISCSIDriver" -FC_DRIVER_OBJ = DRIVER_PATH + ".PureFCDriver" -ARRAY_OBJ = DRIVER_PATH + ".FlashArray" - -GET_ARRAY_PRIMARY = {"version": "99.9.9", - "revision": "201411230504+8a400f7", - "array_name": "pure_target1", - "id": "primary_array_id"} - -GET_ARRAY_SECONDARY = {"version": "99.9.9", - "revision": "201411230504+8a400f7", - "array_name": "pure_target2", - "id": "secondary_array_id"} - -REPLICATION_TARGET_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" -REPLICATION_PROTECTION_GROUP = "cinder-group" -REPLICATION_INTERVAL_IN_SEC = 900 -REPLICATION_RETENTION_SHORT_TERM = 14400 -REPLICATION_RETENTION_LONG_TERM = 6 -REPLICATION_RETENTION_LONG_TERM_PER_DAY = 3 - -PRIMARY_MANAGEMENT_IP = GET_ARRAY_PRIMARY["array_name"] -API_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" -VOLUME_BACKEND_NAME = "Pure_iSCSI" -ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] -FC_PORT_NAMES = ["ct0.fc2", "ct0.fc3", "ct1.fc2", "ct1.fc3"] -ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] -FC_WWNS = ["21000024ff59fe9" + str(i + 1) for i in range(len(FC_PORT_NAMES))] -HOSTNAME = "computenode1" -PURE_HOST_NAME = pure.PureBaseVolumeDriver._generate_purity_host_name(HOSTNAME) -PURE_HOST = { - "name": PURE_HOST_NAME, - "hgroup": None, - "iqn": [], - "wwn": [], -} -REST_VERSION = "1.2" -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" -VOLUME_TYPE_ID = "357aa1f1-4f9c-4f10-acec-626af66425ba" -VOLUME = { - "name": "volume-" + VOLUME_ID, - "id": VOLUME_ID, - "display_name": "fake_volume", - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": VOLUME_TYPE_ID, - "replication_status": None, - "consistencygroup_id": None, - "provider_location": GET_ARRAY_PRIMARY["id"], - "group_id": None, -} -VOLUME_PURITY_NAME = VOLUME['name'] + '-cinder' -VOLUME_WITH_CGROUP = VOLUME.copy() -VOLUME_WITH_CGROUP['group_id'] = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" -VOLUME_WITH_CGROUP['consistencygroup_id'] = \ - "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" -SRC_VOL_ID = "dc7a294d-5964-4379-a15f-ce5554734efc" -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": 'fake_src', - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": None, - "consistencygroup_id": None, - "group_id": None, -} -SNAPSHOT_ID = "04fe2f9a-d0c4-4564-a30d-693cc3657b47" -SNAPSHOT = { - "name": "snapshot-" + SNAPSHOT_ID, - "id": SNAPSHOT_ID, - "volume_id": SRC_VOL_ID, - "volume_name": "volume-" + SRC_VOL_ID, - "volume_size": 2, - "display_name": "fake_snapshot", - "cgsnapshot_id": None, - "cgsnapshot": None, - "group_snapshot_id": None, - "group_snapshot": None, -} -SNAPSHOT_PURITY_NAME = SRC_VOL["name"] + '-cinder.' + SNAPSHOT["name"] -SNAPSHOT_WITH_CGROUP = SNAPSHOT.copy() -SNAPSHOT_WITH_CGROUP['group_snapshot'] = { - "group_id": "4a2f7e3a-312a-40c5-96a8-536b8a0fe044", -} -INITIATOR_IQN = "iqn.1993-08.org.debian:01:222" -INITIATOR_WWN = "5001500150015081abc" -ISCSI_CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME} -FC_CONNECTOR = {"wwpns": {INITIATOR_WWN}, "host": HOSTNAME} -TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc" -TARGET_WWN = "21000024ff59fe94" -TARGET_PORT = "3260" -INITIATOR_TARGET_MAP =\ - { - # _build_initiator_target_map() calls list(set()) on the list, - # we must also call list(set()) to get the exact same order - '5001500150015081abc': list(set(FC_WWNS)), - } -DEVICE_MAPPING =\ - { - "fabric": {'initiator_port_wwn_list': {INITIATOR_WWN}, - 'target_port_wwn_list': FC_WWNS - }, - } - -ISCSI_PORTS = [{"name": name, - "iqn": TARGET_IQN, - "portal": ip + ":" + TARGET_PORT, - "wwn": None, - } for name, ip in zip(ISCSI_PORT_NAMES, ISCSI_IPS)] -FC_PORTS = [{"name": name, - "iqn": None, - "portal": None, - "wwn": wwn, - } for name, wwn in zip(FC_PORT_NAMES, FC_WWNS)] -NON_ISCSI_PORT = { - "name": "ct0.fc1", - "iqn": None, - "portal": None, - "wwn": "5001500150015081", -} -PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT] -PORTS_WITHOUT = [NON_ISCSI_PORT] -VOLUME_CONNECTIONS = [ - {"host": "h1", "name": VOLUME["name"] + "-cinder"}, - {"host": "h2", "name": VOLUME["name"] + "-cinder"}, -] -TOTAL_CAPACITY = 50.0 -USED_SPACE = 32.1 -PROVISIONED_CAPACITY = 70.0 -DEFAULT_OVER_SUBSCRIPTION = 20 -SPACE_INFO = { - "capacity": TOTAL_CAPACITY * units.Gi, - "total": USED_SPACE * units.Gi, -} -SPACE_INFO_EMPTY = { - "capacity": TOTAL_CAPACITY * units.Gi, - "total": 0, -} - -PERF_INFO = { - 'writes_per_sec': 318, - 'usec_per_write_op': 255, - 'output_per_sec': 234240, - 'reads_per_sec': 15, - 'input_per_sec': 2827943, - 'time': '2015-12-17T21:50:55Z', - 'usec_per_read_op': 192, - 'queue_depth': 4, -} -PERF_INFO_RAW = [PERF_INFO] - -ISCSI_CONNECTION_INFO = { - "driver_volume_type": "iscsi", - "data": { - "target_discovered": False, - "discard": True, - "target_luns": [1, 1, 1, 1], - "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN], - "target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT, - ISCSI_IPS[1] + ":" + TARGET_PORT, - ISCSI_IPS[2] + ":" + TARGET_PORT, - ISCSI_IPS[3] + ":" + TARGET_PORT], - }, -} -FC_CONNECTION_INFO = { - "driver_volume_type": "fibre_channel", - "data": { - "target_wwn": FC_WWNS, - "target_lun": 1, - "target_discovered": True, - "initiator_target_map": INITIATOR_TARGET_MAP, - "discard": True, - }, -} -PURE_SNAPSHOT = { - "created": "2015-05-27T17:34:33Z", - "name": "vol1.snap1", - "serial": "8343DFDE2DAFBE40000115E4", - "size": 3221225472, - "source": "vol1" -} -PURE_PGROUP = { - "hgroups": None, - "hosts": None, - "name": "pg1", - "source": "pure01", - "targets": None, - "volumes": ["v1"] -} - -PGROUP_ON_TARGET_NOT_ALLOWED = { - "name": "array1:replicated_pgroup", - "hgroups": None, - "source": "array1", - "hosts": None, - "volumes": ["array1:replicated_volume"], - "time_remaining": None, - "targets": [{"name": "array2", - "allowed": False}]} -PGROUP_ON_TARGET_ALLOWED = { - "name": "array1:replicated_pgroup", - "hgroups": None, - "source": "array1", - "hosts": None, - "volumes": ["array1:replicated_volume"], - "time_remaining": None, - "targets": [{"name": "array2", - "allowed": True}]} -CONNECTED_ARRAY = { - "id": "6b1a7ce3-da61-0d86-65a7-9772cd259fef", - "version": "99.9.9", - "connected": True, - "management_address": "10.42.10.229", - "replication_address": "192.168.10.229", - "type": ["replication"], - "array_name": "3rd-pure-generic2"} -REPLICATED_PGSNAPS = [ - { - "name": "array1:cinder-repl-pg.3", - "created": "2014-12-04T22:59:38Z", - "started": "2014-12-04T22:59:38Z", - "completed": "2014-12-04T22:59:39Z", - "source": "array1:cinder-repl-pg", - "logical_data_transferred": 0, - "progress": 1.0, - "data_transferred": 318 - }, - { - "name": "array1:cinder-repl-pg.2", - "created": "2014-12-04T21:59:38Z", - "started": "2014-12-04T21:59:38Z", - "completed": "2014-12-04T21:59:39Z", - "source": "array1:cinder-repl-pg", - "logical_data_transferred": 0, - "progress": 1.0, - "data_transferred": 318 - }, - { - "name": "array1:cinder-repl-pg.1", - "created": "2014-12-04T20:59:38Z", - "started": "2014-12-04T20:59:38Z", - "completed": "2014-12-04T20:59:39Z", - "source": "array1:cinder-repl-pg", - "logical_data_transferred": 0, - "progress": 1.0, - "data_transferred": 318 - }] -REPLICATED_VOLUME_OBJS = [ - fake_volume.fake_volume_obj(None, id=fake.VOLUME_ID), - fake_volume.fake_volume_obj(None, id=fake.VOLUME2_ID), - fake_volume.fake_volume_obj(None, id=fake.VOLUME3_ID), -] -REPLICATED_VOLUME_SNAPS = [ - { - "source": "array1:volume-%s-cinder" % fake.VOLUME_ID, - "serial": "BBA481C01639104E0001D5F7", - "created": "2014-12-04T22:59:38Z", - "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME_ID, - "size": 1048576 - }, - { - "source": "array1:volume-%s-cinder" % fake.VOLUME2_ID, - "serial": "BBA481C01639104E0001D5F8", - "created": "2014-12-04T22:59:38Z", - "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME2_ID, - "size": 1048576 - }, - { - "source": "array1:volume-%s-cinder" % fake.VOLUME3_ID, - "serial": "BBA481C01639104E0001D5F9", - "created": "2014-12-04T22:59:38Z", - "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME3_ID, - "size": 1048576 - } -] - -NON_REPLICATED_VOL_TYPE = {"is_public": True, - "extra_specs": {}, - "name": "volume_type_1", - "id": VOLUME_TYPE_ID} -REPLICATED_VOL_TYPE = {"is_public": True, - "extra_specs": - {pure.EXTRA_SPECS_REPL_ENABLED: - " True"}, - "name": "volume_type_2", - "id": VOLUME_TYPE_ID} -MANAGEABLE_PURE_VOLS = [ - { - 'name': 'myVol1', - 'serial': '8E9C7E588B16C1EA00048CCA', - 'size': 3221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': None, - }, - { - 'name': 'myVol2', - 'serial': '8E9C7E588B16C1EA00048CCB', - 'size': 3221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': None, - }, - { - 'name': 'myVol3', - 'serial': '8E9C7E588B16C1EA00048CCD', - 'size': 3221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': None, - } -] -MANAGEABLE_PURE_VOL_REFS = [ - { - 'reference': {'name': 'myVol1'}, - 'size': 3, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - }, - { - 'reference': {'name': 'myVol2'}, - 'size': 3, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - }, - { - 'reference': {'name': 'myVol3'}, - 'size': 3, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - } -] - -MANAGEABLE_PURE_SNAPS = [ - { - 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap1', - 'serial': '8E9C7E588B16C1EA00048CCA', - 'size': 3221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', - }, - { - 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap2', - 'serial': '8E9C7E588B16C1EA00048CCB', - 'size': 4221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', - }, - { - 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap3', - 'serial': '8E9C7E588B16C1EA00048CCD', - 'size': 5221225472, - 'created': '2016-08-05T17:26:34Z', - 'source': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder', - } -] -MANAGEABLE_PURE_SNAP_REFS = [ - { - 'reference': {'name': MANAGEABLE_PURE_SNAPS[0]['name']}, - 'size': 3, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[0]['source']}, - }, - { - 'reference': {'name': MANAGEABLE_PURE_SNAPS[1]['name']}, - 'size': 4, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[1]['source']}, - }, - { - 'reference': {'name': MANAGEABLE_PURE_SNAPS[2]['name']}, - 'size': 5, - 'safe_to_manage': True, - 'reason_not_safe': None, - 'cinder_id': None, - 'extra_info': None, - 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[2]['source']}, - } -] - - -class FakePureStorageHTTPError(Exception): - def __init__(self, target=None, rest_version=None, code=None, - headers=None, text=None): - self.target = target - self.rest_version = rest_version - self.code = code - self.headers = headers - self.text = text - - -class PureDriverTestCase(test.TestCase): - def setUp(self): - super(PureDriverTestCase, self).setUp() - self.mock_config = mock.Mock() - self.mock_config.san_ip = PRIMARY_MANAGEMENT_IP - self.mock_config.pure_api_token = API_TOKEN - self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME - self.mock_config.safe_get.return_value = None - self.mock_config.pure_eradicate_on_delete = False - self.mock_config.driver_ssl_cert_verify = False - self.mock_config.driver_ssl_cert_path = None - self.array = mock.Mock() - self.array.get.return_value = GET_ARRAY_PRIMARY - self.array.array_name = GET_ARRAY_PRIMARY["array_name"] - self.array.array_id = GET_ARRAY_PRIMARY["id"] - self.array2 = mock.Mock() - self.array2.array_name = GET_ARRAY_SECONDARY["array_name"] - self.array2.array_id = GET_ARRAY_SECONDARY["id"] - self.array2.get.return_value = GET_ARRAY_SECONDARY - self.purestorage_module = pure.purestorage - self.purestorage_module.VERSION = '1.4.0' - self.purestorage_module.PureHTTPError = FakePureStorageHTTPError - - def fake_get_array(*args, **kwargs): - if 'action' in kwargs and kwargs['action'] is 'monitor': - return PERF_INFO_RAW - - if 'space' in kwargs and kwargs['space'] is True: - return SPACE_INFO - - def assert_error_propagates(self, mocks, func, *args, **kwargs): - """Assert that errors from mocks propagate to func. - - Fail if exceptions raised by mocks are not seen when calling - func(*args, **kwargs). Ensure that we are really seeing exceptions - from the mocks by failing if just running func(*args, **kargs) raises - an exception itself. - """ - func(*args, **kwargs) - for mock_func in mocks: - original_side_effect = mock_func.side_effect - mock_func.side_effect = [exception.PureDriverException( - reason='reason')] - self.assertRaises(exception.PureDriverException, - func, *args, **kwargs) - mock_func.side_effect = original_side_effect - - @mock.patch('platform.platform') - def test_for_user_agent(self, mock_platform): - mock_platform.return_value = 'MyFavoritePlatform' - driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) - expected_agent = "OpenStack Cinder %s/%s (MyFavoritePlatform)" % ( - driver.__class__.__name__, - driver.VERSION - ) - self.assertEqual(expected_agent, driver._user_agent) - - -class PureBaseSharedDriverTestCase(PureDriverTestCase): - def setUp(self): - super(PureBaseSharedDriverTestCase, self).setUp() - self.driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) - self.driver._array = self.array - self.array.get_rest_version.return_value = '1.4' - self.purestorage_module.FlashArray.side_effect = None - self.array2.get_rest_version.return_value = '1.4' - - def tearDown(self): - super(PureBaseSharedDriverTestCase, self).tearDown() - - -@ddt.ddt -class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): - def _setup_mocks_for_replication(self): - # Mock config values - self.mock_config.pure_replica_interval_default = ( - REPLICATION_INTERVAL_IN_SEC) - self.mock_config.pure_replica_retention_short_term_default = ( - REPLICATION_RETENTION_SHORT_TERM) - self.mock_config.pure_replica_retention_long_term_default = ( - REPLICATION_RETENTION_LONG_TERM) - self.mock_config.pure_replica_retention_long_term_default = ( - REPLICATION_RETENTION_LONG_TERM_PER_DAY) - self.mock_config.safe_get.return_value = [ - {"backend_id": self.driver._array.array_id, - "managed_backend_name": None, - "san_ip": "1.2.3.4", - "api_token": "abc123"}] - - @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') - @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') - def test_parse_replication_configs_single_target( - self, - mock_setup_repl_pgroups, - mock_generate_replication_retention): - retention = mock.MagicMock() - mock_generate_replication_retention.return_value = retention - mock_setup_repl_pgroups.return_value = None - - # Test single array configured - self.mock_config.safe_get.return_value = [ - {"backend_id": self.driver._array.id, - "managed_backend_name": None, - "san_ip": "1.2.3.4", - "api_token": "abc123"}] - self.purestorage_module.FlashArray.return_value = self.array - self.driver.parse_replication_configs() - self.assertEqual(1, len(self.driver._replication_target_arrays)) - self.assertEqual(self.array, self.driver._replication_target_arrays[0]) - only_target_array = self.driver._replication_target_arrays[0] - self.assertEqual(self.driver._array.id, - only_target_array._backend_id) - - @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') - @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') - def test_parse_replication_configs_multiple_target( - self, - mock_setup_repl_pgroups, - mock_generate_replication_retention): - - retention = mock.MagicMock() - mock_generate_replication_retention.return_value = retention - mock_setup_repl_pgroups.return_value = None - - # Test multiple arrays configured - self.mock_config.safe_get.return_value = [ - {"backend_id": GET_ARRAY_PRIMARY["id"], - "managed_backend_name": None, - "san_ip": "1.2.3.4", - "api_token": "abc123"}, - {"backend_id": GET_ARRAY_SECONDARY["id"], - "managed_backend_name": None, - "san_ip": "1.2.3.5", - "api_token": "abc124"}] - self.purestorage_module.FlashArray.side_effect = \ - [self.array, self.array2] - self.driver.parse_replication_configs() - self.assertEqual(2, len(self.driver._replication_target_arrays)) - self.assertEqual(self.array, self.driver._replication_target_arrays[0]) - first_target_array = self.driver._replication_target_arrays[0] - self.assertEqual(GET_ARRAY_PRIMARY["id"], - first_target_array._backend_id) - self.assertEqual( - self.array2, self.driver._replication_target_arrays[1]) - second_target_array = self.driver._replication_target_arrays[1] - self.assertEqual(GET_ARRAY_SECONDARY["id"], - second_target_array._backend_id) - - @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') - @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_do_setup_replicated(self, mock_get_volume_type, - mock_setup_repl_pgroups, - mock_generate_replication_retention): - retention = mock.MagicMock() - mock_generate_replication_retention.return_value = retention - mock_get_volume_type.return_value = REPLICATED_VOL_TYPE - self._setup_mocks_for_replication() - self.array2.get.return_value = GET_ARRAY_SECONDARY - self.array.get.return_value = GET_ARRAY_PRIMARY - self.purestorage_module.FlashArray.side_effect = [self.array, - self.array2] - self.driver.do_setup(None) - self.assertEqual(self.array, self.driver._array) - self.assertEqual(1, len(self.driver._replication_target_arrays)) - self.assertEqual(self.array2, - self.driver._replication_target_arrays[0]) - calls = [ - mock.call(self.array, [self.array2], 'cinder-group', - REPLICATION_INTERVAL_IN_SEC, retention) - ] - mock_setup_repl_pgroups.assert_has_calls(calls) - - def test_generate_purity_host_name(self): - result = self.driver._generate_purity_host_name( - "really-long-string-thats-a-bit-too-long") - self.assertTrue(result.startswith("really-long-string-that-")) - self.assertTrue(result.endswith("-cinder")) - self.assertEqual(63, len(result)) - self.assertTrue(pure.GENERATED_NAME.match(result)) - result = self.driver._generate_purity_host_name("!@#$%^-invalid&*") - self.assertTrue(result.startswith("invalid---")) - self.assertTrue(result.endswith("-cinder")) - self.assertEqual(49, len(result)) - self.assertTrue(pure.GENERATED_NAME.match(result)) - - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) - def test_create_volume(self, mock_is_replicated_type, mock_add_to_group): - mock_is_replicated_type.return_value = False - self.driver.create_volume(VOLUME) - vol_name = VOLUME["name"] + "-cinder" - self.array.create_volume.assert_called_with( - vol_name, 2 * units.Gi) - mock_add_to_group.assert_called_once_with(VOLUME, - vol_name) - self.assert_error_propagates([self.array.create_volume], - self.driver.create_volume, VOLUME) - - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) - def test_create_volume_from_snapshot(self, mock_is_replicated_type, - mock_add_to_group): - vol_name = VOLUME["name"] + "-cinder" - snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] - mock_is_replicated_type.return_value = False - - # Branch where extend unneeded - self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) - self.array.copy_volume.assert_called_with(snap_name, vol_name) - self.assertFalse(self.array.extend_volume.called) - mock_add_to_group.assert_called_once_with(VOLUME, - vol_name) - self.assert_error_propagates( - [self.array.copy_volume], - self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT) - self.assertFalse(self.array.extend_volume.called) - - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", - autospec=True) - def test_create_volume_from_snapshot_with_extend(self, - mock_is_replicated_type, - mock_add_to_group): - vol_name = VOLUME["name"] + "-cinder" - snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] - mock_is_replicated_type.return_value = False - - # Branch where extend needed - src = deepcopy(SNAPSHOT) - src["volume_size"] = 1 # resize so smaller than VOLUME - self.driver.create_volume_from_snapshot(VOLUME, src) - expected = [mock.call.copy_volume(snap_name, vol_name), - mock.call.extend_volume(vol_name, 2 * units.Gi)] - self.array.assert_has_calls(expected) - mock_add_to_group.assert_called_once_with(VOLUME, - vol_name) - - @mock.patch(BASE_DRIVER_OBJ + "._get_snap_name") - def test_create_volume_from_snapshot_cant_get_name(self, mock_get_name): - mock_get_name.return_value = None - self.assertRaises(exception.PureDriverException, - self.driver.create_volume_from_snapshot, - VOLUME, SNAPSHOT) - - @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name_from_snapshot") - def test_create_volume_from_cgsnapshot_cant_get_name(self, mock_get_name): - mock_get_name.return_value = None - self.assertRaises(exception.PureDriverException, - self.driver.create_volume_from_snapshot, - VOLUME, SNAPSHOT_WITH_CGROUP) - - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed", autospec=True) - @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name_from_snapshot") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) - def test_create_volume_from_cgsnapshot(self, mock_is_replicated_type, - mock_get_snap_name, - mock_extend_if_needed, - mock_add_to_group): - vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder" - snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ - "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075."\ - + vol_name - mock_get_snap_name.return_value = snap_name - mock_is_replicated_type.return_value = False - - self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP, - SNAPSHOT_WITH_CGROUP) - - self.array.copy_volume.assert_called_with(snap_name, vol_name) - self.assertTrue(mock_get_snap_name.called) - self.assertTrue(mock_extend_if_needed.called) - - self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP, - SNAPSHOT_WITH_CGROUP) - mock_add_to_group\ - .assert_called_with(VOLUME_WITH_CGROUP, - vol_name) - - # Tests cloning a volume that is not replicated type - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) - def test_create_cloned_volume(self, mock_is_replicated_type, - mock_add_to_group): - vol_name = VOLUME["name"] + "-cinder" - src_name = SRC_VOL["name"] + "-cinder" - mock_is_replicated_type.return_value = False - # Branch where extend unneeded - self.driver.create_cloned_volume(VOLUME, SRC_VOL) - self.array.copy_volume.assert_called_with(src_name, vol_name) - self.assertFalse(self.array.extend_volume.called) - mock_add_to_group.assert_called_once_with(VOLUME, - vol_name) - self.assert_error_propagates( - [self.array.copy_volume], - self.driver.create_cloned_volume, VOLUME, SRC_VOL) - self.assertFalse(self.array.extend_volume.called) - - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", - autospec=True) - def test_create_cloned_volume_and_extend(self, mock_is_replicated_type, - mock_add_to_group): - vol_name = VOLUME["name"] + "-cinder" - src_name = SRC_VOL["name"] + "-cinder" - src = deepcopy(SRC_VOL) - src["size"] = 1 # resize so smaller than VOLUME - self.driver.create_cloned_volume(VOLUME, src) - expected = [mock.call.copy_volume(src_name, vol_name), - mock.call.extend_volume(vol_name, 2 * units.Gi)] - self.array.assert_has_calls(expected) - mock_add_to_group.assert_called_once_with(VOLUME, - vol_name) - - # Tests cloning a volume that is part of a consistency group - @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") - @mock.patch(BASE_DRIVER_OBJ + "._is_volume_replicated_type", autospec=True) - def test_create_cloned_volume_with_cgroup(self, mock_is_replicated_type, - mock_add_to_group): - vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder" - mock_is_replicated_type.return_value = False - - self.driver.create_cloned_volume(VOLUME_WITH_CGROUP, SRC_VOL) - - mock_add_to_group.assert_called_with(VOLUME_WITH_CGROUP, - vol_name) - - def test_delete_volume_already_deleted(self): - self.array.list_volume_private_connections.side_effect = \ - self.purestorage_module.PureHTTPError( - code=400, - text="Volume does not exist" - ) - self.driver.delete_volume(VOLUME) - self.assertFalse(self.array.destroy_volume.called) - self.assertFalse(self.array.eradicate_volume.called) - - # Testing case where array.destroy_volume returns an exception - # because volume has already been deleted - self.array.list_volume_private_connections.side_effect = None - self.array.list_volume_private_connections.return_value = {} - self.array.destroy_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Volume does not exist" - ) - self.driver.delete_volume(VOLUME) - self.assertTrue(self.array.destroy_volume.called) - self.assertFalse(self.array.eradicate_volume.called) - - def test_delete_volume(self): - vol_name = VOLUME["name"] + "-cinder" - self.array.list_volume_private_connections.return_value = {} - self.driver.delete_volume(VOLUME) - expected = [mock.call.destroy_volume(vol_name)] - self.array.assert_has_calls(expected) - self.assertFalse(self.array.eradicate_volume.called) - self.array.destroy_volume.side_effect = ( - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text="does not exist")) - self.driver.delete_volume(VOLUME) - self.array.destroy_volume.side_effect = None - self.assert_error_propagates([self.array.destroy_volume], - self.driver.delete_volume, VOLUME) - - def test_delete_volume_eradicate_now(self): - vol_name = VOLUME["name"] + "-cinder" - self.array.list_volume_private_connections.return_value = {} - self.mock_config.pure_eradicate_on_delete = True - self.driver.delete_volume(VOLUME) - expected = [mock.call.destroy_volume(vol_name), - mock.call.eradicate_volume(vol_name)] - self.array.assert_has_calls(expected) - - def test_delete_connected_volume(self): - vol_name = VOLUME["name"] + "-cinder" - host_name_a = "ha" - host_name_b = "hb" - self.array.list_volume_private_connections.return_value = [{ - "host": host_name_a, - "lun": 7, - "name": vol_name, - "size": 3221225472, - }, { - "host": host_name_b, - "lun": 2, - "name": vol_name, - "size": 3221225472, - }] - - self.driver.delete_volume(VOLUME) - expected = [mock.call.list_volume_private_connections(vol_name), - mock.call.disconnect_host(host_name_a, vol_name), - mock.call.list_host_connections(host_name_a, private=True), - mock.call.disconnect_host(host_name_b, vol_name), - mock.call.list_host_connections(host_name_b, private=True), - mock.call.destroy_volume(vol_name)] - self.array.assert_has_calls(expected) - - def test_create_snapshot(self): - vol_name = SRC_VOL["name"] + "-cinder" - self.driver.create_snapshot(SNAPSHOT) - self.array.create_snapshot.assert_called_with( - vol_name, - suffix=SNAPSHOT["name"] - ) - self.assert_error_propagates([self.array.create_snapshot], - self.driver.create_snapshot, SNAPSHOT) - - @ddt.data("does not exist", "has been destroyed") - def test_delete_snapshot(self, error_text): - snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] - self.driver.delete_snapshot(SNAPSHOT) - expected = [mock.call.destroy_volume(snap_name)] - self.array.assert_has_calls(expected) - self.assertFalse(self.array.eradicate_volume.called) - self.array.destroy_volume.side_effect = ( - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text=error_text)) - self.driver.delete_snapshot(SNAPSHOT) - self.array.destroy_volume.side_effect = None - self.assert_error_propagates([self.array.destroy_volume], - self.driver.delete_snapshot, SNAPSHOT) - - def test_delete_snapshot_eradicate_now(self): - snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"] - self.mock_config.pure_eradicate_on_delete = True - self.driver.delete_snapshot(SNAPSHOT) - expected = [mock.call.destroy_volume(snap_name), - mock.call.eradicate_volume(snap_name)] - self.array.assert_has_calls(expected) - - @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) - def test_terminate_connection(self, mock_host): - vol_name = VOLUME["name"] + "-cinder" - mock_host.return_value = {"name": "some-host"} - # Branch with manually created host - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with("some-host", vol_name) - self.assertTrue(self.array.list_host_connections.called) - self.assertFalse(self.array.delete_host.called) - # Branch with host added to host group - self.array.reset_mock() - self.array.list_host_connections.return_value = [] - mock_host.return_value = PURE_HOST.copy() - mock_host.return_value.update(hgroup="some-group") - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.assertTrue(self.array.list_host_connections.called) - self.assertTrue(self.array.delete_host.called) - # Branch with host still having connected volumes - self.array.reset_mock() - self.array.list_host_connections.return_value = [ - {"lun": 2, "name": PURE_HOST_NAME, "vol": "some-vol"}] - mock_host.return_value = PURE_HOST - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, - private=True) - self.assertFalse(self.array.delete_host.called) - # Branch where host gets deleted - self.array.reset_mock() - self.array.list_host_connections.return_value = [] - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, - private=True) - self.array.delete_host.assert_called_with(PURE_HOST_NAME) - # Branch where connection is missing and the host is still deleted - self.array.reset_mock() - self.array.disconnect_host.side_effect = \ - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text="is not connected") - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, - private=True) - self.array.delete_host.assert_called_with(PURE_HOST_NAME) - # Branch where an unexpected exception occurs - self.array.reset_mock() - self.array.disconnect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.INTERNAL_SERVER_ERROR, - text="Some other error" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver.terminate_connection, - VOLUME, - ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.assertFalse(self.array.list_host_connections.called) - self.assertFalse(self.array.delete_host.called) - - def _test_terminate_connection_with_error(self, mock_host, error): - vol_name = VOLUME["name"] + "-cinder" - mock_host.return_value = PURE_HOST.copy() - self.array.reset_mock() - self.array.list_host_connections.return_value = [] - self.array.delete_host.side_effect = \ - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text=error) - self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) - self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name) - self.array.list_host_connections.assert_called_with(PURE_HOST_NAME, - private=True) - self.array.delete_host.assert_called_once_with(PURE_HOST_NAME) - - @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) - def test_terminate_connection_host_deleted(self, mock_host): - self._test_terminate_connection_with_error(mock_host, - 'Host does not exist.') - - @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) - def test_terminate_connection_host_got_new_connections(self, mock_host): - self._test_terminate_connection_with_error( - mock_host, - 'Host cannot be deleted due to existing connections.' - ) - - def test_terminate_connection_no_connector_with_host(self): - # Show the volume having a connection - self.array.list_volume_private_connections.return_value = \ - [VOLUME_CONNECTIONS[0]] - - self.driver.terminate_connection(VOLUME, None) - self.array.disconnect_host.assert_called_with( - VOLUME_CONNECTIONS[0]["host"], - VOLUME_CONNECTIONS[0]["name"] - ) - - def test_terminate_connection_no_connector_no_host(self): - vol = fake_volume.fake_volume_obj(None, name=VOLUME["name"]) - - # Show the volume having a connection - self.array.list_volume_private_connections.return_value = [] - - # Make sure - self.driver.terminate_connection(vol, None) - self.array.disconnect_host.assert_not_called() - - def test_extend_volume(self): - vol_name = VOLUME["name"] + "-cinder" - self.driver.extend_volume(VOLUME, 3) - self.array.extend_volume.assert_called_with(vol_name, 3 * units.Gi) - self.assert_error_propagates([self.array.extend_volume], - self.driver.extend_volume, VOLUME, 3) - - def test_get_pgroup_name_from_id(self): - id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - expected_name = "consisgroup-%s-cinder" % id - actual_name = self.driver._get_pgroup_name_from_id(id) - self.assertEqual(expected_name, actual_name) - - def test_get_pgroup_snap_suffix(self): - cgsnap = { - 'id': "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - } - expected_suffix = "cgsnapshot-%s-cinder" % cgsnap['id'] - actual_suffix = self.driver._get_pgroup_snap_suffix(cgsnap) - self.assertEqual(expected_suffix, actual_suffix) - - def test_get_pgroup_snap_name(self): - cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075" - - cgsnap = { - 'id': cgsnap_id, - 'group_id': cg_id - } - expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\ - % {"cg": cg_id, "snap": cgsnap_id} - - actual_name = self.driver._get_pgroup_snap_name(cgsnap) - - self.assertEqual(expected_name, actual_name) - - def test_get_pgroup_snap_name_from_snapshot(self): - - groupsnapshot_id = 'b919b266-23b4-4b83-9a92-e66031b9a921' - volume_name = 'volume-a3b8b294-8494-4a72-bec7-9aadec561332' - cg_id = '0cfc0e4e-5029-4839-af20-184fbc42a9ed' - pgsnap_name_base = ( - 'consisgroup-%s-cinder.cgsnapshot-%s-cinder.%s-cinder') - pgsnap_name = pgsnap_name_base % (cg_id, groupsnapshot_id, volume_name) - - self.driver.db = mock.MagicMock() - cgsnap = { - 'id': groupsnapshot_id, - 'group_id': cg_id - } - self.driver.db.group_snapshot_get.return_value = cgsnap - - mock_snap = mock.MagicMock() - mock_snap.group_snapshot = cgsnap - mock_snap.volume_name = volume_name - - actual_name = self.driver._get_pgroup_snap_name_from_snapshot( - mock_snap - ) - self.assertEqual(pgsnap_name, actual_name) - - def test_create_consistencygroup(self): - mock_cgroup = mock.Mock() - mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - - model_update = self.driver.create_consistencygroup(None, mock_cgroup) - - expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id) - self.array.create_pgroup.assert_called_with(expected_name) - self.assertEqual({'status': 'available'}, model_update) - - self.assert_error_propagates( - [self.array.create_pgroup], - self.driver.create_consistencygroup, None, mock_cgroup) - - @mock.patch(BASE_DRIVER_OBJ + ".create_volume_from_snapshot") - @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") - def test_create_consistencygroup_from_cgsnapshot(self, mock_create_cg, - mock_create_vol): - mock_context = mock.Mock() - mock_group = mock.Mock() - mock_cgsnapshot = mock.Mock() - mock_snapshots = [mock.Mock() for i in range(5)] - mock_volumes = [mock.Mock() for i in range(5)] - result = self.driver.create_consistencygroup_from_src( - mock_context, - mock_group, - mock_volumes, - cgsnapshot=mock_cgsnapshot, - snapshots=mock_snapshots, - source_cg=None, - source_vols=None - ) - self.assertEqual((None, None), result) - mock_create_cg.assert_called_with(mock_context, mock_group) - expected_calls = [mock.call(vol, snap) - for vol, snap in zip(mock_volumes, mock_snapshots)] - mock_create_vol.assert_has_calls(expected_calls, - any_order=True) - - self.assert_error_propagates( - [mock_create_vol, mock_create_cg], - self.driver.create_consistencygroup_from_src, - mock_context, - mock_group, - mock_volumes, - cgsnapshot=mock_cgsnapshot, - snapshots=mock_snapshots, - source_cg=None, - source_vols=None - ) - - @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") - def test_create_consistencygroup_from_cg(self, mock_create_cg): - num_volumes = 5 - mock_context = mock.MagicMock() - mock_group = mock.MagicMock() - mock_source_cg = mock.MagicMock() - mock_volumes = [mock.MagicMock() for i in range(num_volumes)] - mock_source_vols = [mock.MagicMock() for i in range(num_volumes)] - result = self.driver.create_consistencygroup_from_src( - mock_context, - mock_group, - mock_volumes, - source_cg=mock_source_cg, - source_vols=mock_source_vols - ) - self.assertEqual((None, None), result) - mock_create_cg.assert_called_with(mock_context, mock_group) - self.assertTrue(self.array.create_pgroup_snapshot.called) - self.assertEqual(num_volumes, self.array.copy_volume.call_count) - self.assertEqual(num_volumes, self.array.set_pgroup.call_count) - self.assertTrue(self.array.destroy_pgroup.called) - - @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") - def test_create_consistencygroup_from_cg_with_error(self, mock_create_cg): - num_volumes = 5 - mock_context = mock.MagicMock() - mock_group = mock.MagicMock() - mock_source_cg = mock.MagicMock() - mock_volumes = [mock.MagicMock() for i in range(num_volumes)] - mock_source_vols = [mock.MagicMock() for i in range(num_volumes)] - - self.array.copy_volume.side_effect = FakePureStorageHTTPError() - - self.assertRaises( - FakePureStorageHTTPError, - self.driver.create_consistencygroup_from_src, - mock_context, - mock_group, - mock_volumes, - source_cg=mock_source_cg, - source_vols=mock_source_vols - ) - mock_create_cg.assert_called_with(mock_context, mock_group) - self.assertTrue(self.array.create_pgroup_snapshot.called) - # Make sure that the temp snapshot is cleaned up even when copying - # the volume fails! - self.assertTrue(self.array.destroy_pgroup.called) - - @mock.patch(BASE_DRIVER_OBJ + ".delete_volume", autospec=True) - def test_delete_consistencygroup(self, mock_delete_volume): - mock_cgroup = mock.MagicMock() - mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - mock_cgroup['status'] = "deleted" - mock_context = mock.Mock() - mock_volume = mock.MagicMock() - - model_update, volumes = self.driver.delete_consistencygroup( - mock_context, mock_cgroup, [mock_volume]) - - expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id) - self.array.destroy_pgroup.assert_called_with(expected_name) - self.assertFalse(self.array.eradicate_pgroup.called) - self.assertIsNone(volumes) - self.assertIsNone(model_update) - mock_delete_volume.assert_called_with(self.driver, mock_volume) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Protection group has been destroyed." - ) - self.driver.delete_consistencygroup(mock_context, - mock_cgroup, - [mock_volume]) - self.array.destroy_pgroup.assert_called_with(expected_name) - self.assertFalse(self.array.eradicate_pgroup.called) - mock_delete_volume.assert_called_with(self.driver, mock_volume) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Protection group does not exist" - ) - self.driver.delete_consistencygroup(mock_context, - mock_cgroup, - [mock_volume]) - self.array.destroy_pgroup.assert_called_with(expected_name) - self.assertFalse(self.array.eradicate_pgroup.called) - mock_delete_volume.assert_called_with(self.driver, mock_volume) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Some other error" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver.delete_consistencygroup, - mock_context, - mock_volume, - [mock_volume]) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.INTERNAL_SERVER_ERROR, - text="Another different error" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver.delete_consistencygroup, - mock_context, - mock_volume, - [mock_volume]) - - self.array.destroy_pgroup.side_effect = None - self.assert_error_propagates( - [self.array.destroy_pgroup], - self.driver.delete_consistencygroup, - mock_context, - mock_cgroup, - [mock_volume] - ) - - def _create_mock_cg(self): - mock_group = mock.MagicMock() - mock_group.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" - mock_group.status = "Available" - mock_group.cg_name = "consisgroup-" + mock_group.id + "-cinder" - return mock_group - - def test_update_consistencygroup(self): - mock_group = self._create_mock_cg() - add_vols = [ - {'name': 'vol1'}, - {'name': 'vol2'}, - {'name': 'vol3'}, - ] - expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols] - remove_vols = [ - {'name': 'vol4'}, - {'name': 'vol5'}, - ] - expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols] - self.driver.update_consistencygroup(mock.Mock(), mock_group, - add_vols, remove_vols) - self.array.set_pgroup.assert_called_with( - mock_group.cg_name, - addvollist=expected_addvollist, - remvollist=expected_remvollist - ) - - def test_update_consistencygroup_no_add_vols(self): - mock_group = self._create_mock_cg() - expected_addvollist = [] - remove_vols = [ - {'name': 'vol4'}, - {'name': 'vol5'}, - ] - expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols] - self.driver.update_consistencygroup(mock.Mock(), mock_group, - None, remove_vols) - self.array.set_pgroup.assert_called_with( - mock_group.cg_name, - addvollist=expected_addvollist, - remvollist=expected_remvollist - ) - - def test_update_consistencygroup_no_remove_vols(self): - mock_group = self._create_mock_cg() - add_vols = [ - {'name': 'vol1'}, - {'name': 'vol2'}, - {'name': 'vol3'}, - ] - expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols] - expected_remvollist = [] - self.driver.update_consistencygroup(mock.Mock(), mock_group, - add_vols, None) - self.array.set_pgroup.assert_called_with( - mock_group.cg_name, - addvollist=expected_addvollist, - remvollist=expected_remvollist - ) - - def test_update_consistencygroup_no_vols(self): - mock_group = self._create_mock_cg() - self.driver.update_consistencygroup(mock.Mock(), mock_group, - None, None) - self.array.set_pgroup.assert_called_with( - mock_group.cg_name, - addvollist=[], - remvollist=[] - ) - - def test_create_cgsnapshot(self): - mock_cgsnap = { - 'id': "4a2f7e3a-312a-40c5-96a8-536b8a0fe074", - 'group_id': "4a2f7e3a-312a-40c5-96a8-536b8a0fe075", - } - mock_context = mock.Mock() - mock_snap = mock.MagicMock() - - model_update, snapshots = self.driver.create_cgsnapshot(mock_context, - mock_cgsnap, - [mock_snap]) - cg_id = mock_cgsnap["group_id"] - expected_pgroup_name = self.driver._get_pgroup_name_from_id(cg_id) - expected_snap_suffix = self.driver._get_pgroup_snap_suffix(mock_cgsnap) - self.array.create_pgroup_snapshot\ - .assert_called_with(expected_pgroup_name, - suffix=expected_snap_suffix) - self.assertIsNone(model_update) - self.assertIsNone(snapshots) - - self.assert_error_propagates( - [self.array.create_pgroup_snapshot], - self.driver.create_cgsnapshot, mock_context, mock_cgsnap, []) - - @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", - spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) - def test_delete_cgsnapshot(self, mock_get_snap_name): - snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ - "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" - mock_get_snap_name.return_value = snap_name - mock_cgsnap = mock.Mock() - mock_cgsnap.status = 'deleted' - mock_context = mock.Mock() - mock_snap = mock.Mock() - - model_update, snapshots = self.driver.delete_cgsnapshot(mock_context, - mock_cgsnap, - [mock_snap]) - - self.array.destroy_pgroup.assert_called_with(snap_name) - self.assertFalse(self.array.eradicate_pgroup.called) - self.assertIsNone(model_update) - self.assertIsNone(snapshots) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Protection group snapshot has been destroyed." - ) - self.driver.delete_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) - self.array.destroy_pgroup.assert_called_with(snap_name) - self.assertFalse(self.array.eradicate_pgroup.called) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Protection group snapshot does not exist" - ) - self.driver.delete_cgsnapshot(mock_context, mock_cgsnap, [mock_snap]) - self.array.destroy_pgroup.assert_called_with(snap_name) - self.assertFalse(self.array.eradicate_pgroup.called) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Some other error" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver.delete_cgsnapshot, - mock_context, - mock_cgsnap, - [mock_snap]) - - self.array.destroy_pgroup.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.INTERNAL_SERVER_ERROR, - text="Another different error" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver.delete_cgsnapshot, - mock_context, - mock_cgsnap, - [mock_snap]) - - self.array.destroy_pgroup.side_effect = None - - self.assert_error_propagates( - [self.array.destroy_pgroup], - self.driver.delete_cgsnapshot, - mock_context, - mock_cgsnap, - [mock_snap] - ) - - @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", - spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) - def test_delete_cgsnapshot_eradicate_now(self, mock_get_snap_name): - snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ - "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" - mock_get_snap_name.return_value = snap_name - self.mock_config.pure_eradicate_on_delete = True - model_update, snapshots = self.driver.delete_cgsnapshot(mock.Mock(), - mock.Mock(), - [mock.Mock()]) - - self.array.destroy_pgroup.assert_called_once_with(snap_name) - self.array.eradicate_pgroup.assert_called_once_with(snap_name) - - def test_manage_existing(self): - ref_name = 'vol1' - volume_ref = {'name': ref_name} - self.array.list_volume_private_connections.return_value = [] - vol_name = VOLUME['name'] + '-cinder' - self.driver.manage_existing(VOLUME, volume_ref) - self.array.list_volume_private_connections.assert_called_with(ref_name) - self.array.rename_volume.assert_called_with(ref_name, vol_name) - - def test_manage_existing_error_propagates(self): - self.array.list_volume_private_connections.return_value = [] - self.assert_error_propagates( - [self.array.list_volume_private_connections, - self.array.rename_volume], - self.driver.manage_existing, - VOLUME, {'name': 'vol1'} - ) - - def test_manage_existing_bad_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - VOLUME, {'bad_key': 'bad_value'}) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - VOLUME, {'name': ''}) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - VOLUME, {'name': None}) - - self.array.get_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - text="Volume does not exist.", - code=http_client.BAD_REQUEST - ) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - VOLUME, {'name': 'non-existing-volume'}) - - def test_manage_existing_with_connected_hosts(self): - ref_name = 'vol1' - self.array.list_volume_private_connections.return_value = \ - ["host1", "host2"] - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing, - VOLUME, {'name': ref_name}) - - self.array.list_volume_private_connections.assert_called_with(ref_name) - self.assertFalse(self.array.rename_volume.called) - - def test_manage_existing_get_size(self): - ref_name = 'vol1' - volume_ref = {'name': ref_name} - expected_size = 5 - self.array.get_volume.return_value = {"size": 5368709120} - - size = self.driver.manage_existing_get_size(VOLUME, volume_ref) - - self.assertEqual(expected_size, size) - self.array.get_volume.assert_called_with(ref_name, snap=False) - - def test_manage_existing_get_size_error_propagates(self): - self.array.get_volume.return_value = mock.MagicMock() - self.assert_error_propagates([self.array.get_volume], - self.driver.manage_existing_get_size, - VOLUME, {'name': 'vol1'}) - - def test_manage_existing_get_size_bad_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - VOLUME, {'bad_key': 'bad_value'}) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - VOLUME, {'name': ''}) - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - VOLUME, {'name': None}) - - def test_unmanage(self): - vol_name = VOLUME['name'] + "-cinder" - unmanaged_vol_name = vol_name + "-unmanaged" - - self.driver.unmanage(VOLUME) - - self.array.rename_volume.assert_called_with(vol_name, - unmanaged_vol_name) - - def test_unmanage_error_propagates(self): - self.assert_error_propagates([self.array.rename_volume], - self.driver.unmanage, - VOLUME) - - def test_unmanage_with_deleted_volume(self): - vol_name = VOLUME['name'] + "-cinder" - unmanaged_vol_name = vol_name + "-unmanaged" - self.array.rename_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - text="Volume does not exist.", - code=http_client.BAD_REQUEST - ) - - self.driver.unmanage(VOLUME) - - self.array.rename_volume.assert_called_with(vol_name, - unmanaged_vol_name) - - def test_manage_existing_snapshot(self): - ref_name = PURE_SNAPSHOT['name'] - snap_ref = {'name': ref_name} - self.array.get_volume.return_value = [PURE_SNAPSHOT] - self.driver.manage_existing_snapshot(SNAPSHOT, snap_ref) - self.array.rename_volume.assert_called_once_with(ref_name, - SNAPSHOT_PURITY_NAME) - self.array.get_volume.assert_called_with(PURE_SNAPSHOT['source'], - snap=True) - - def test_manage_existing_snapshot_multiple_snaps_on_volume(self): - ref_name = PURE_SNAPSHOT['name'] - snap_ref = {'name': ref_name} - pure_snaps = [PURE_SNAPSHOT] - for i in range(5): - snap = PURE_SNAPSHOT.copy() - snap['name'] += str(i) - pure_snaps.append(snap) - self.array.get_volume.return_value = pure_snaps - self.driver.manage_existing_snapshot(SNAPSHOT, snap_ref) - self.array.rename_volume.assert_called_once_with(ref_name, - SNAPSHOT_PURITY_NAME) - - def test_manage_existing_snapshot_error_propagates(self): - self.array.get_volume.return_value = [PURE_SNAPSHOT] - self.assert_error_propagates( - [self.array.rename_volume], - self.driver.manage_existing_snapshot, - SNAPSHOT, {'name': PURE_SNAPSHOT['name']} - ) - - def test_manage_existing_snapshot_bad_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - SNAPSHOT, {'bad_key': 'bad_value'}) - - def test_manage_existing_snapshot_empty_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - SNAPSHOT, {'name': ''}) - - def test_manage_existing_snapshot_none_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - SNAPSHOT, {'name': None}) - - def test_manage_existing_snapshot_volume_ref_not_exist(self): - self.array.get_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - text="Volume does not exist.", - code=http_client.BAD_REQUEST - ) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - SNAPSHOT, {'name': 'non-existing-volume.snap1'}) - - def test_manage_existing_snapshot_ref_not_exist(self): - ref_name = PURE_SNAPSHOT['name'] + '-fake' - snap_ref = {'name': ref_name} - self.array.get_volume.return_value = [PURE_SNAPSHOT] - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot, - SNAPSHOT, snap_ref) - - def test_manage_existing_snapshot_bad_api_version(self): - self.array.get_rest_version.return_value = '1.3' - self.assertRaises(exception.PureDriverException, - self.driver.manage_existing_snapshot, - SNAPSHOT, {'name': PURE_SNAPSHOT['name']}) - - def test_manage_existing_snapshot_get_size(self): - ref_name = PURE_SNAPSHOT['name'] - snap_ref = {'name': ref_name} - self.array.get_volume.return_value = [PURE_SNAPSHOT] - - size = self.driver.manage_existing_snapshot_get_size(SNAPSHOT, - snap_ref) - expected_size = 3.0 - self.assertEqual(expected_size, size) - self.array.get_volume.assert_called_with(PURE_SNAPSHOT['source'], - snap=True) - - def test_manage_existing_snapshot_get_size_error_propagates(self): - self.array.get_volume.return_value = [PURE_SNAPSHOT] - self.assert_error_propagates( - [self.array.get_volume], - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'name': PURE_SNAPSHOT['name']} - ) - - def test_manage_existing_snapshot_get_size_bad_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'bad_key': 'bad_value'}) - - def test_manage_existing_snapshot_get_size_empty_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'name': ''}) - - def test_manage_existing_snapshot_get_size_none_ref(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'name': None}) - - def test_manage_existing_snapshot_get_size_volume_ref_not_exist(self): - self.array.get_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - text="Volume does not exist.", - code=http_client.BAD_REQUEST - ) - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'name': 'non-existing-volume.snap1'}) - - def test_manage_existing_snapshot_get_size_bad_api_version(self): - self.array.get_rest_version.return_value = '1.3' - self.assertRaises(exception.PureDriverException, - self.driver.manage_existing_snapshot_get_size, - SNAPSHOT, {'name': PURE_SNAPSHOT['name']}) - - def test_unmanage_snapshot(self): - unmanaged_snap_name = SNAPSHOT_PURITY_NAME + "-unmanaged" - self.driver.unmanage_snapshot(SNAPSHOT) - self.array.rename_volume.assert_called_with(SNAPSHOT_PURITY_NAME, - unmanaged_snap_name) - - def test_unmanage_snapshot_error_propagates(self): - self.assert_error_propagates([self.array.rename_volume], - self.driver.unmanage_snapshot, - SNAPSHOT) - - def test_unmanage_snapshot_with_deleted_snapshot(self): - unmanaged_snap_name = SNAPSHOT_PURITY_NAME + "-unmanaged" - self.array.rename_volume.side_effect = \ - self.purestorage_module.PureHTTPError( - text="Snapshot does not exist.", - code=http_client.BAD_REQUEST - ) - - self.driver.unmanage_snapshot(SNAPSHOT) - - self.array.rename_volume.assert_called_with(SNAPSHOT_PURITY_NAME, - unmanaged_snap_name) - - def test_unmanage_snapshot_bad_api_version(self): - self.array.get_rest_version.return_value = '1.3' - self.assertRaises(exception.PureDriverException, - self.driver.unmanage_snapshot, - SNAPSHOT) - - def _test_retype_repl(self, mock_is_repl, is_vol_repl, - repl_cabability, volume_id=None): - mock_is_repl.return_value = is_vol_repl - context = mock.MagicMock() - volume = fake_volume.fake_volume_obj(context) - if volume_id: - volume.id = volume_id - new_type = { - 'extra_specs': { - pure.EXTRA_SPECS_REPL_ENABLED: - ' ' + str(repl_cabability) - } - } - - actual = self.driver.retype(context, volume, new_type, None, None) - expected = (True, None) - self.assertEqual(expected, actual) - return context, volume - - def _test_get_manageable_things(self, - pure_objs=MANAGEABLE_PURE_VOLS, - expected_refs=MANAGEABLE_PURE_VOL_REFS, - pure_hosts=list(), - cinder_objs=list(), - is_snapshot=False): - self.array.list_volumes.return_value = pure_objs - self.array.list_hosts.return_value = pure_hosts - marker = mock.Mock() - limit = mock.Mock() - offset = mock.Mock() - sort_keys = mock.Mock() - sort_dirs = mock.Mock() - - with mock.patch('cinder.volume.utils.paginate_entries_list') as mpage: - if is_snapshot: - test_func = self.driver.get_manageable_snapshots - else: - test_func = self.driver.get_manageable_volumes - test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - expected_refs, - marker, - limit, - offset, - sort_keys, - sort_dirs - ) - - def test_get_manageable_volumes(self,): - """Default success case. - - Given a list of pure volumes from the REST API, give back a list - of volume references. - """ - self._test_get_manageable_things(pure_hosts=[PURE_HOST]) - - def test_get_manageable_volumes_connected_vol(self): - """Make sure volumes connected to hosts are flagged as unsafe.""" - connected_host = deepcopy(PURE_HOST) - connected_host['name'] = 'host2' - connected_host['vol'] = MANAGEABLE_PURE_VOLS[0]['name'] - pure_hosts = [PURE_HOST, connected_host] - - expected_refs = deepcopy(MANAGEABLE_PURE_VOL_REFS) - expected_refs[0]['safe_to_manage'] = False - expected_refs[0]['reason_not_safe'] = 'Volume connected to host host2.' - - self._test_get_manageable_things(expected_refs=expected_refs, - pure_hosts=pure_hosts) - - def test_get_manageable_volumes_already_managed(self): - """Make sure volumes already owned by cinder are flagged as unsafe.""" - cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) - cinder_vol.id = VOLUME_ID - cinders_vols = [cinder_vol] - - # Have one of our vol names match up with the existing cinder volume - purity_vols = deepcopy(MANAGEABLE_PURE_VOLS) - purity_vols[0]['name'] = 'volume-' + VOLUME_ID + '-cinder' - - expected_refs = deepcopy(MANAGEABLE_PURE_VOL_REFS) - expected_refs[0]['reference'] = {'name': purity_vols[0]['name']} - expected_refs[0]['safe_to_manage'] = False - expected_refs[0]['reason_not_safe'] = 'Volume already managed.' - expected_refs[0]['cinder_id'] = VOLUME_ID - - self._test_get_manageable_things(pure_objs=purity_vols, - expected_refs=expected_refs, - pure_hosts=[PURE_HOST], - cinder_objs=cinders_vols) - - def test_get_manageable_volumes_no_pure_volumes(self): - """Expect no refs to be found if no volumes are on Purity.""" - self._test_get_manageable_things(pure_objs=[], - expected_refs=[], - pure_hosts=[PURE_HOST]) - - def test_get_manageable_volumes_no_hosts(self): - """Success case with no hosts on Purity.""" - self._test_get_manageable_things(pure_hosts=[]) - - def test_get_manageable_snapshots(self): - """Default success case. - - Given a list of pure snapshots from the REST API, give back a list - of snapshot references. - """ - self._test_get_manageable_things( - pure_objs=MANAGEABLE_PURE_SNAPS, - expected_refs=MANAGEABLE_PURE_SNAP_REFS, - pure_hosts=[PURE_HOST], - is_snapshot=True - ) - - def test_get_manageable_snapshots_already_managed(self): - """Make sure snaps already owned by cinder are flagged as unsafe.""" - cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) - cinder_vol.id = VOLUME_ID - cinder_snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) - cinder_snap.id = SNAPSHOT_ID - cinder_snap.volume = cinder_vol - cinder_snaps = [cinder_snap] - - purity_snaps = deepcopy(MANAGEABLE_PURE_SNAPS) - purity_snaps[0]['name'] = 'volume-%s-cinder.snapshot-%s' % ( - VOLUME_ID, SNAPSHOT_ID - ) - - expected_refs = deepcopy(MANAGEABLE_PURE_SNAP_REFS) - expected_refs[0]['reference'] = {'name': purity_snaps[0]['name']} - expected_refs[0]['safe_to_manage'] = False - expected_refs[0]['reason_not_safe'] = 'Snapshot already managed.' - expected_refs[0]['cinder_id'] = SNAPSHOT_ID - - self._test_get_manageable_things( - pure_objs=purity_snaps, - expected_refs=expected_refs, - cinder_objs=cinder_snaps, - pure_hosts=[PURE_HOST], - is_snapshot=True - ) - - def test_get_manageable_snapshots_no_pure_snapshots(self): - """Expect no refs to be found if no snapshots are on Purity.""" - self._test_get_manageable_things(pure_objs=[], - expected_refs=[], - pure_hosts=[PURE_HOST], - is_snapshot=True) - - @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) - def test_retype_repl_to_repl(self, mock_is_replicated_type): - self._test_retype_repl(mock_is_replicated_type, True, True) - - @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) - def test_retype_non_repl_to_non_repl(self, mock_is_replicated_type): - self._test_retype_repl(mock_is_replicated_type, False, False) - - @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) - def test_retype_non_repl_to_repl(self, mock_is_replicated_type): - - context, volume = self._test_retype_repl(mock_is_replicated_type, - False, - True, - volume_id=VOLUME_ID) - self.array.set_pgroup.assert_called_once_with( - pure.REPLICATION_CG_NAME, - addvollist=[VOLUME_PURITY_NAME] - ) - - @mock.patch(BASE_DRIVER_OBJ + '._is_volume_replicated_type', autospec=True) - def test_retype_repl_to_non_repl(self, mock_is_replicated_type,): - context, volume = self._test_retype_repl(mock_is_replicated_type, - True, - False, - volume_id=VOLUME_ID) - self.array.set_pgroup.assert_called_once_with( - pure.REPLICATION_CG_NAME, - remvollist=[VOLUME_PURITY_NAME] - ) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_is_vol_replicated_no_extra_specs(self, mock_get_vol_type): - mock_get_vol_type.return_value = NON_REPLICATED_VOL_TYPE - volume = fake_volume.fake_volume_obj(mock.MagicMock()) - actual = self.driver._is_volume_replicated_type(volume) - self.assertFalse(actual) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_is_vol_replicated_has_repl_extra_specs(self, mock_get_vol_type): - mock_get_vol_type.return_value = REPLICATED_VOL_TYPE - volume = fake_volume.fake_volume_obj(mock.MagicMock()) - volume.volume_type_id = REPLICATED_VOL_TYPE['id'] - actual = self.driver._is_volume_replicated_type(volume) - self.assertTrue(actual) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_is_vol_replicated_none_type(self, mock_get_vol_type): - mock_get_vol_type.side_effect = exception.InvalidVolumeType(reason='') - volume = fake_volume.fake_volume_obj(mock.MagicMock()) - volume.volume_type = None - volume.volume_type_id = None - actual = self.driver._is_volume_replicated_type(volume) - self.assertFalse(actual) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_is_vol_replicated_has_other_extra_specs(self, mock_get_vol_type): - vtype_test = deepcopy(NON_REPLICATED_VOL_TYPE) - vtype_test["extra_specs"] = {"some_key": "some_value"} - mock_get_vol_type.return_value = vtype_test - volume = fake_volume.fake_volume_obj(mock.MagicMock()) - actual = self.driver._is_volume_replicated_type(volume) - self.assertFalse(actual) - - def test_does_pgroup_exist_not_exists(self): - self.array.get_pgroup.side_effect = ( - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text="does not exist")) - exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") - self.assertFalse(exists) - - def test_does_pgroup_exist_exists(self): - self.array.get_pgroup.side_effect = None - self.array.get_pgroup.return_value = PGROUP_ON_TARGET_NOT_ALLOWED - exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") - self.assertTrue(exists) - - def test_does_pgroup_exist_error_propagates(self): - self.assert_error_propagates([self.array.get_pgroup], - self.driver._does_pgroup_exist, - self.array, - "some_pgroup") - - @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") - def test_wait_until_target_group_setting_propagates_ready(self, - mock_exists): - mock_exists.return_value = True - self.driver._wait_until_target_group_setting_propagates( - self.array, - "some_pgroup" - ) - - @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") - def test_wait_until_target_group_setting_propagates_not_ready(self, - mock_exists): - mock_exists.return_value = False - self.assertRaises( - exception.PureDriverException, - self.driver._wait_until_target_group_setting_propagates, - self.array, - "some_pgroup" - ) - - def test_wait_until_source_array_allowed_ready(self): - self.array.get_pgroup.return_value = PGROUP_ON_TARGET_ALLOWED - self.driver._wait_until_source_array_allowed( - self.array, - "some_pgroup",) - - def test_wait_until_source_array_allowed_not_ready(self): - self.array.get_pgroup.return_value = PGROUP_ON_TARGET_NOT_ALLOWED - self.assertRaises( - exception.PureDriverException, - self.driver._wait_until_source_array_allowed, - self.array, - "some_pgroup", - ) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_create_volume_replicated(self, mock_get_volume_type): - mock_get_volume_type.return_value = REPLICATED_VOL_TYPE - self._setup_mocks_for_replication() - self.driver._array = self.array - self.driver._array.array_name = GET_ARRAY_PRIMARY["array_name"] - self.driver._array.array_id = GET_ARRAY_PRIMARY["id"] - self.driver._replication_target_arrays = [mock.Mock()] - self.driver._replication_target_arrays[0].array_name = ( - GET_ARRAY_SECONDARY["array_name"]) - self.driver.create_volume(VOLUME) - self.array.create_volume.assert_called_with( - VOLUME["name"] + "-cinder", 2 * units.Gi) - self.array.set_pgroup.assert_called_with( - REPLICATION_PROTECTION_GROUP, - addvollist=[VOLUME["name"] + "-cinder"]) - - def test_find_failover_target_no_repl_targets(self): - self.driver._replication_target_arrays = [] - self.assertRaises(exception.PureDriverException, - self.driver._find_failover_target, - None) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_secondary_specified(self, mock_get_snap): - mock_backend_1 = mock.Mock() - mock_backend_2 = mock.Mock() - secondary_id = 'foo' - mock_backend_2._backend_id = secondary_id - self.driver._replication_target_arrays = [mock_backend_1, - mock_backend_2] - mock_get_snap.return_value = REPLICATED_PGSNAPS[0] - - array, pg_snap = self.driver._find_failover_target(secondary_id) - self.assertEqual(mock_backend_2, array) - self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) - - def test_find_failover_target_secondary_specified_not_found(self): - mock_backend = mock.Mock() - mock_backend._backend_id = 'not_foo' - self.driver._replication_target_arrays = [mock_backend] - self.assertRaises(exception.InvalidReplicationTarget, - self.driver._find_failover_target, - 'foo') - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_secondary_specified_no_pgsnap(self, - mock_get_snap): - mock_backend = mock.Mock() - secondary_id = 'foo' - mock_backend._backend_id = secondary_id - self.driver._replication_target_arrays = [mock_backend] - mock_get_snap.return_value = None - - self.assertRaises(exception.PureDriverException, - self.driver._find_failover_target, - secondary_id) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_no_secondary_specified(self, - mock_get_snap): - mock_backend_1 = mock.Mock() - mock_backend_2 = mock.Mock() - self.driver._replication_target_arrays = [mock_backend_1, - mock_backend_2] - mock_get_snap.return_value = REPLICATED_PGSNAPS[0] - - array, pg_snap = self.driver._find_failover_target(None) - self.assertEqual(mock_backend_1, array) - self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_no_secondary_specified_missing_pgsnap( - self, mock_get_snap): - mock_backend_1 = mock.Mock() - mock_backend_2 = mock.Mock() - self.driver._replication_target_arrays = [mock_backend_1, - mock_backend_2] - mock_get_snap.side_effect = [None, REPLICATED_PGSNAPS[0]] - - array, pg_snap = self.driver._find_failover_target(None) - self.assertEqual(mock_backend_2, array) - self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_no_secondary_specified_no_pgsnap( - self, mock_get_snap): - mock_backend = mock.Mock() - self.driver._replication_target_arrays = [mock_backend] - mock_get_snap.return_value = None - - self.assertRaises(exception.PureDriverException, - self.driver._find_failover_target, - None) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_error_propagates_secondary_specified( - self, mock_get_snap): - mock_backend = mock.Mock() - mock_backend._backend_id = 'foo' - self.driver._replication_target_arrays = [mock_backend] - self.assert_error_propagates( - [mock_get_snap], - self.driver._find_failover_target, - 'foo' - ) - - @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') - def test_find_failover_target_error_propagates_no_secondary( - self, mock_get_snap): - self.driver._replication_target_arrays = [mock.Mock()] - self.assert_error_propagates( - [mock_get_snap], - self.driver._find_failover_target, - None - ) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_enable_replication_if_needed_success( - self, mock_get_volume_type): - mock_get_volume_type.return_value = REPLICATED_VOL_TYPE - self.driver._enable_replication_if_needed(self.array, VOLUME) - - self.array.set_pgroup.assert_called_with( - self.driver._replication_pg_name, - addvollist=[VOLUME_PURITY_NAME] - ) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_enable_replication_if_needed_not_repl_type( - self, mock_get_volume_type): - mock_get_volume_type.return_value = NON_REPLICATED_VOL_TYPE - self.driver._enable_replication_if_needed(self.array, VOLUME) - self.assertFalse(self.array.set_pgroup.called) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_enable_replication_if_needed_already_repl( - self, mock_get_volume_type): - mock_get_volume_type.return_value = REPLICATED_VOL_TYPE - self.array.set_pgroup.side_effect = FakePureStorageHTTPError( - code=http_client.BAD_REQUEST, text='already belongs to') - self.driver._enable_replication_if_needed(self.array, VOLUME) - self.array.set_pgroup.assert_called_with( - self.driver._replication_pg_name, - addvollist=[VOLUME_PURITY_NAME] - ) - - @mock.patch('cinder.volume.volume_types.get_volume_type') - def test_enable_replication_if_needed_error_propagates( - self, mock_get_volume_type): - mock_get_volume_type.return_value = REPLICATED_VOL_TYPE - self.driver._enable_replication_if_needed(self.array, VOLUME) - self.assert_error_propagates( - [self.array.set_pgroup], - self.driver._enable_replication, - self.array, VOLUME - ) - - @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') - @mock.patch(BASE_DRIVER_OBJ + '._find_failover_target') - def test_failover(self, mock_find_failover_target, mock_get_array): - secondary_device_id = 'foo' - self.array2._backend_id = secondary_device_id - self.driver._replication_target_arrays = [self.array2] - - array2_v1_3 = mock.Mock() - array2_v1_3._backend_id = secondary_device_id - array2_v1_3.array_name = GET_ARRAY_SECONDARY['array_name'] - array2_v1_3.array_id = GET_ARRAY_SECONDARY['id'] - array2_v1_3.version = '1.3' - mock_get_array.return_value = array2_v1_3 - - target_array = self.array2 - target_array.copy_volume = mock.Mock() - - mock_find_failover_target.return_value = ( - target_array, - REPLICATED_PGSNAPS[1] - ) - - array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS - - context = mock.MagicMock() - new_active_id, volume_updates, __ = self.driver.failover_host( - context, - REPLICATED_VOLUME_OBJS, - None, - [] - ) - - self.assertEqual(secondary_device_id, new_active_id) - self.assertEqual([], volume_updates) - - calls = [] - for snap in REPLICATED_VOLUME_SNAPS: - vol_name = snap['name'].split('.')[-1] - calls.append(mock.call( - snap['name'], - vol_name, - overwrite=True - )) - target_array.copy_volume.assert_has_calls(calls, any_order=True) - - @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') - @mock.patch(BASE_DRIVER_OBJ + '._find_failover_target') - def test_failover_error_propagates(self, mock_find_failover_target, - mock_get_array): - mock_find_failover_target.return_value = ( - self.array2, - REPLICATED_PGSNAPS[1] - ) - - array2_v1_3 = mock.Mock() - array2_v1_3.array_name = GET_ARRAY_SECONDARY['array_name'] - array2_v1_3.array_id = GET_ARRAY_SECONDARY['id'] - array2_v1_3.version = '1.3' - mock_get_array.return_value = array2_v1_3 - - array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS - self.assert_error_propagates( - [mock_find_failover_target, - mock_get_array, - array2_v1_3.get_volume, - self.array2.copy_volume], - self.driver.failover_host, - mock.Mock(), REPLICATED_VOLUME_OBJS, None - ) - - def test_disable_replication_success(self): - self.driver._disable_replication(VOLUME) - self.array.set_pgroup.assert_called_with( - self.driver._replication_pg_name, - remvollist=[VOLUME_PURITY_NAME] - ) - - def test_disable_replication_error_propagates(self): - self.assert_error_propagates( - [self.array.set_pgroup], - self.driver._disable_replication, - VOLUME - ) - - def test_disable_replication_already_disabled(self): - self.array.set_pgroup.side_effect = FakePureStorageHTTPError( - code=http_client.BAD_REQUEST, text='could not be found') - self.driver._disable_replication(VOLUME) - self.array.set_pgroup.assert_called_with( - self.driver._replication_pg_name, - remvollist=[VOLUME_PURITY_NAME] - ) - - def test_get_flasharray_verify_https(self): - san_ip = '1.2.3.4' - api_token = 'abcdef' - cert_path = '/my/ssl/certs' - self.purestorage_module.FlashArray.return_value = mock.MagicMock() - - self.driver._get_flasharray(san_ip, - api_token, - verify_https=True, - ssl_cert_path=cert_path) - self.purestorage_module.FlashArray.assert_called_with( - san_ip, - api_token=api_token, - rest_version=None, - verify_https=True, - ssl_cert=cert_path, - user_agent=self.driver._user_agent, - ) - - -class PureISCSIDriverTestCase(PureDriverTestCase): - - def setUp(self): - super(PureISCSIDriverTestCase, self).setUp() - self.mock_config.use_chap_auth = False - self.driver = pure.PureISCSIDriver(configuration=self.mock_config) - self.driver._array = self.array - self.mock_utils = mock.Mock() - self.driver.driver_utils = self.mock_utils - - def test_get_host(self): - good_host = PURE_HOST.copy() - good_host.update(iqn=["another-wrong-iqn", INITIATOR_IQN]) - bad_host = {"name": "bad-host", "iqn": ["wrong-iqn"]} - self.array.list_hosts.return_value = [bad_host] - real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) - self.assertIsNone(real_result) - self.array.list_hosts.return_value.append(good_host) - real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) - self.assertEqual(good_host, real_result) - self.assert_error_propagates([self.array.list_hosts], - self.driver._get_host, - self.array, - ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._connect") - @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") - def test_initialize_connection(self, mock_get_iscsi_ports, - mock_connection): - mock_get_iscsi_ports.return_value = ISCSI_PORTS - lun = 1 - connection = { - "vol": VOLUME["name"] + "-cinder", - "lun": lun, - } - mock_connection.return_value = connection - result = deepcopy(ISCSI_CONNECTION_INFO) - - real_result = self.driver.initialize_connection(VOLUME, - ISCSI_CONNECTOR) - self.assertDictEqual(result, real_result) - mock_get_iscsi_ports.assert_called_with() - mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR) - self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], - self.driver.initialize_connection, - VOLUME, ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._connect") - @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") - def test_initialize_connection_with_auth(self, mock_get_iscsi_ports, - mock_connection): - auth_type = "CHAP" - chap_username = ISCSI_CONNECTOR["host"] - chap_password = "password" - mock_get_iscsi_ports.return_value = ISCSI_PORTS - mock_connection.return_value = { - "vol": VOLUME["name"] + "-cinder", - "lun": 1, - "auth_username": chap_username, - "auth_password": chap_password, - } - result = deepcopy(ISCSI_CONNECTION_INFO) - result["data"]["auth_method"] = auth_type - result["data"]["auth_username"] = chap_username - result["data"]["auth_password"] = chap_password - - self.mock_config.use_chap_auth = True - - # Branch where no credentials were generated - real_result = self.driver.initialize_connection(VOLUME, - ISCSI_CONNECTOR) - mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR) - self.assertDictEqual(result, real_result) - - self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], - self.driver.initialize_connection, - VOLUME, ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._connect") - @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") - def test_initialize_connection_multipath(self, - mock_get_iscsi_ports, - mock_connection): - mock_get_iscsi_ports.return_value = ISCSI_PORTS - lun = 1 - connection = { - "vol": VOLUME["name"] + "-cinder", - "lun": lun, - } - mock_connection.return_value = connection - multipath_connector = deepcopy(ISCSI_CONNECTOR) - multipath_connector["multipath"] = True - result = deepcopy(ISCSI_CONNECTION_INFO) - - real_result = self.driver.initialize_connection(VOLUME, - multipath_connector) - self.assertDictEqual(result, real_result) - mock_get_iscsi_ports.assert_called_with() - mock_connection.assert_called_with(VOLUME, multipath_connector) - - multipath_connector["multipath"] = False - self.driver.initialize_connection(VOLUME, multipath_connector) - - def test_get_target_iscsi_ports(self): - self.array.list_ports.return_value = ISCSI_PORTS - ret = self.driver._get_target_iscsi_ports() - self.assertEqual(ISCSI_PORTS, ret) - - def test_get_target_iscsi_ports_with_iscsi_and_fc(self): - self.array.list_ports.return_value = PORTS_WITH - ret = self.driver._get_target_iscsi_ports() - self.assertEqual(ISCSI_PORTS, ret) - - def test_get_target_iscsi_ports_with_no_ports(self): - # Should raise an exception if there are no ports - self.array.list_ports.return_value = [] - self.assertRaises(exception.PureDriverException, - self.driver._get_target_iscsi_ports) - - def test_get_target_iscsi_ports_with_only_fc_ports(self): - # Should raise an exception of there are no iscsi ports - self.array.list_ports.return_value = PORTS_WITHOUT - self.assertRaises(exception.PureDriverException, - self.driver._get_target_iscsi_ports) - - @mock.patch("cinder.volume.utils.generate_password", autospec=True) - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) - def test_connect(self, mock_generate, mock_host, mock_gen_secret): - vol_name = VOLUME["name"] + "-cinder" - result = {"vol": vol_name, "lun": 1} - - # Branch where host already exists - mock_host.return_value = PURE_HOST - self.array.connect_host.return_value = {"vol": vol_name, "lun": 1} - real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR) - self.assertEqual(result, real_result) - mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR) - self.assertFalse(mock_generate.called) - self.assertFalse(self.array.create_host.called) - self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name) - - # Branch where new host is created - mock_host.return_value = None - mock_generate.return_value = PURE_HOST_NAME - real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR) - mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR) - mock_generate.assert_called_with(HOSTNAME) - self.array.create_host.assert_called_with(PURE_HOST_NAME, - iqnlist=[INITIATOR_IQN]) - self.assertEqual(result, real_result) - - mock_generate.reset_mock() - self.array.reset_mock() - self.assert_error_propagates( - [mock_host, mock_generate, self.array.connect_host, - self.array.create_host], self.driver._connect, VOLUME, - ISCSI_CONNECTOR) - - self.mock_config.use_chap_auth = True - chap_user = ISCSI_CONNECTOR["host"] - chap_password = "sOmEseCr3t" - - # Branch where chap is used and credentials already exist - initiator_data = [{"key": pure.CHAP_SECRET_KEY, - "value": chap_password}] - self.mock_utils.get_driver_initiator_data.return_value = initiator_data - self.driver._connect(VOLUME, ISCSI_CONNECTOR) - result["auth_username"] = chap_user - result["auth_password"] = chap_password - self.assertDictEqual(result, real_result) - self.array.set_host.assert_called_with(PURE_HOST_NAME, - host_user=chap_user, - host_password=chap_password) - - # Branch where chap is used and credentials are generated - mock_gen_secret.return_value = chap_password - self.mock_utils.get_driver_initiator_data.return_value = None - self.driver._connect(VOLUME, ISCSI_CONNECTOR) - result["auth_username"] = chap_user - result["auth_password"] = chap_password - - self.assertDictEqual(result, real_result) - self.array.set_host.assert_called_with(PURE_HOST_NAME, - host_user=chap_user, - host_password=chap_password) - self.mock_utils.insert_driver_initiator_data.assert_called_with( - ISCSI_CONNECTOR['initiator'], - pure.CHAP_SECRET_KEY, - chap_password - ) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected(self, mock_host): - mock_host.return_value = PURE_HOST - expected = {"host": PURE_HOST_NAME, "lun": 1} - self.array.list_volume_private_connections.return_value = \ - [expected, {"host": "extra", "lun": 2}] - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - actual = self.driver._connect(VOLUME, ISCSI_CONNECTOR) - self.assertEqual(expected, actual) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected_list_hosts_empty(self, mock_host): - mock_host.return_value = PURE_HOST - self.array.list_volume_private_connections.return_value = {} - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - self.assertRaises(exception.PureDriverException, self.driver._connect, - VOLUME, ISCSI_CONNECTOR) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected_list_hosts_exception(self, mock_host): - mock_host.return_value = PURE_HOST - self.array.list_volume_private_connections.side_effect = \ - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text="") - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver._connect, VOLUME, - ISCSI_CONNECTOR) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_chap_secret_from_init_data") - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_host_deleted(self, mock_host, mock_get_secret): - mock_host.return_value = None - self.mock_config.use_chap_auth = True - mock_get_secret.return_value = 'abcdef' - - self.array.set_host.side_effect = ( - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, text='Host does not exist.')) - - # Because we mocked out retry make sure we are raising the right - # exception to allow for retries to happen. - self.assertRaises(exception.PureRetryableException, - self.driver._connect, - VOLUME, ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_iqn_already_in_use(self, mock_host): - mock_host.return_value = None - - self.array.create_host.side_effect = ( - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text='The specified IQN is already in use.')) - - # Because we mocked out retry make sure we are raising the right - # exception to allow for retries to happen. - self.assertRaises(exception.PureRetryableException, - self.driver._connect, - VOLUME, ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_create_host_already_exists(self, mock_host): - mock_host.return_value = None - - self.array.create_host.side_effect = ( - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, text='Host already exists.')) - - # Because we mocked out retry make sure we are raising the right - # exception to allow for retries to happen. - self.assertRaises(exception.PureRetryableException, - self.driver._connect, - VOLUME, ISCSI_CONNECTOR) - - @mock.patch(ISCSI_DRIVER_OBJ + "._generate_chap_secret") - def test_get_chap_credentials_create_new(self, mock_generate_secret): - self.mock_utils.get_driver_initiator_data.return_value = [] - host = 'host1' - expected_password = 'foo123' - mock_generate_secret.return_value = expected_password - self.mock_utils.insert_driver_initiator_data.return_value = True - username, password = self.driver._get_chap_credentials(host, - INITIATOR_IQN) - self.assertEqual(host, username) - self.assertEqual(expected_password, password) - self.mock_utils.insert_driver_initiator_data.assert_called_once_with( - INITIATOR_IQN, pure.CHAP_SECRET_KEY, expected_password - ) - - @mock.patch(ISCSI_DRIVER_OBJ + "._generate_chap_secret") - def test_get_chap_credentials_create_new_fail_to_set(self, - mock_generate_secret): - host = 'host1' - expected_password = 'foo123' - mock_generate_secret.return_value = 'badpassw0rd' - self.mock_utils.insert_driver_initiator_data.return_value = False - self.mock_utils.get_driver_initiator_data.side_effect = [ - [], - [{'key': pure.CHAP_SECRET_KEY, 'value': expected_password}], - exception.PureDriverException(reason='this should never be hit'), - ] - - username, password = self.driver._get_chap_credentials(host, - INITIATOR_IQN) - self.assertEqual(host, username) - self.assertEqual(expected_password, password) - - -class PureFCDriverTestCase(PureDriverTestCase): - - def setUp(self): - super(PureFCDriverTestCase, self).setUp() - self.driver = pure.PureFCDriver(configuration=self.mock_config) - self.driver._array = self.array - self.driver._lookup_service = mock.Mock() - - def test_get_host(self): - good_host = PURE_HOST.copy() - good_host.update(wwn=["another-wrong-wwn", INITIATOR_WWN]) - bad_host = {"name": "bad-host", "wwn": ["wrong-wwn"]} - self.array.list_hosts.return_value = [bad_host] - actual_result = self.driver._get_host(self.array, FC_CONNECTOR) - self.assertIsNone(actual_result) - self.array.list_hosts.return_value.append(good_host) - actual_result = self.driver._get_host(self.array, FC_CONNECTOR) - self.assertEqual(good_host, actual_result) - self.assert_error_propagates([self.array.list_hosts], - self.driver._get_host, - self.array, - FC_CONNECTOR) - - def test_get_host_uppercase_wwpn(self): - expected_host = PURE_HOST.copy() - expected_host['wwn'] = [INITIATOR_WWN] - self.array.list_hosts.return_value = [expected_host] - connector = FC_CONNECTOR.copy() - connector['wwpns'] = [wwpn.upper() for wwpn in FC_CONNECTOR['wwpns']] - - actual_result = self.driver._get_host(self.array, connector) - self.assertEqual(expected_host, actual_result) - - @mock.patch(FC_DRIVER_OBJ + "._connect") - def test_initialize_connection(self, mock_connection): - lookup_service = self.driver._lookup_service - (lookup_service.get_device_mapping_from_network. - return_value) = DEVICE_MAPPING - mock_connection.return_value = {"vol": VOLUME["name"] + "-cinder", - "lun": 1, - } - self.array.list_ports.return_value = FC_PORTS - actual_result = self.driver.initialize_connection(VOLUME, FC_CONNECTOR) - self.assertDictEqual(FC_CONNECTION_INFO, actual_result) - - @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) - @mock.patch(FC_DRIVER_OBJ + "._generate_purity_host_name", spec=True) - def test_connect(self, mock_generate, mock_host): - vol_name = VOLUME["name"] + "-cinder" - result = {"vol": vol_name, "lun": 1} - - # Branch where host already exists - mock_host.return_value = PURE_HOST - self.array.connect_host.return_value = {"vol": vol_name, "lun": 1} - real_result = self.driver._connect(VOLUME, FC_CONNECTOR) - self.assertEqual(result, real_result) - mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR) - self.assertFalse(mock_generate.called) - self.assertFalse(self.array.create_host.called) - self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name) - - # Branch where new host is created - mock_host.return_value = None - mock_generate.return_value = PURE_HOST_NAME - real_result = self.driver._connect(VOLUME, FC_CONNECTOR) - mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR) - mock_generate.assert_called_with(HOSTNAME) - self.array.create_host.assert_called_with(PURE_HOST_NAME, - wwnlist={INITIATOR_WWN}) - self.assertEqual(result, real_result) - - mock_generate.reset_mock() - self.array.reset_mock() - self.assert_error_propagates( - [mock_host, mock_generate, self.array.connect_host, - self.array.create_host], - self.driver._connect, VOLUME, FC_CONNECTOR) - - @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected(self, mock_host): - mock_host.return_value = PURE_HOST - expected = {"host": PURE_HOST_NAME, "lun": 1} - self.array.list_volume_private_connections.return_value = \ - [expected, {"host": "extra", "lun": 2}] - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - actual = self.driver._connect(VOLUME, FC_CONNECTOR) - self.assertEqual(expected, actual) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected_list_hosts_empty(self, mock_host): - mock_host.return_value = PURE_HOST - self.array.list_volume_private_connections.return_value = {} - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - self.assertRaises(exception.PureDriverException, self.driver._connect, - VOLUME, FC_CONNECTOR) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_already_connected_list_hosts_exception(self, mock_host): - mock_host.return_value = PURE_HOST - self.array.list_volume_private_connections.side_effect = \ - self.purestorage_module.PureHTTPError(code=http_client.BAD_REQUEST, - text="") - self.array.connect_host.side_effect = \ - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text="Connection already exists" - ) - self.assertRaises(self.purestorage_module.PureHTTPError, - self.driver._connect, VOLUME, FC_CONNECTOR) - self.assertTrue(self.array.connect_host.called) - self.assertTrue(self.array.list_volume_private_connections) - - @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) - def test_connect_wwn_already_in_use(self, mock_host): - mock_host.return_value = None - - self.array.create_host.side_effect = ( - self.purestorage_module.PureHTTPError( - code=http_client.BAD_REQUEST, - text='The specified WWN is already in use.')) - - # Because we mocked out retry make sure we are raising the right - # exception to allow for retries to happen. - self.assertRaises(exception.PureRetryableException, - self.driver._connect, - VOLUME, FC_CONNECTOR) - - -@ddt.ddt -class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase): - def setUp(self): - super(PureVolumeUpdateStatsTestCase, self).setUp() - self.array.get.side_effect = self.fake_get_array - - @ddt.data(dict(used=10, - provisioned=100, - config_ratio=5, - expected_ratio=5, - auto=False), - dict(used=10, - provisioned=100, - config_ratio=5, - expected_ratio=10, - auto=True), - dict(used=0, - provisioned=100, - config_ratio=5, - expected_ratio=5, - auto=True), - dict(used=10, - provisioned=0, - config_ratio=5, - expected_ratio=5, - auto=True)) - @ddt.unpack - def test_get_thin_provisioning(self, - used, - provisioned, - config_ratio, - expected_ratio, - auto): - self.mock_config.pure_automatic_max_oversubscription_ratio = auto - self.mock_config.max_over_subscription_ratio = config_ratio - actual_ratio = self.driver._get_thin_provisioning(provisioned, used) - self.assertEqual(expected_ratio, actual_ratio) - - @mock.patch(BASE_DRIVER_OBJ + '.get_goodness_function') - @mock.patch(BASE_DRIVER_OBJ + '.get_filter_function') - @mock.patch(BASE_DRIVER_OBJ + '._get_provisioned_space') - @mock.patch(BASE_DRIVER_OBJ + '._get_thin_provisioning') - def test_get_volume_stats(self, mock_get_thin_provisioning, mock_get_space, - mock_get_filter, mock_get_goodness): - filter_function = 'capabilities.total_volumes < 10' - goodness_function = '90' - num_hosts = 20 - num_snaps = 175 - num_pgroups = 15 - reserved_percentage = 12 - - self.array.list_hosts.return_value = [PURE_HOST] * num_hosts - self.array.list_volumes.return_value = [PURE_SNAPSHOT] * num_snaps - self.array.list_pgroups.return_value = [PURE_PGROUP] * num_pgroups - self.mock_config.reserved_percentage = reserved_percentage - mock_get_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100) - mock_get_filter.return_value = filter_function - mock_get_goodness.return_value = goodness_function - mock_get_thin_provisioning.return_value = (PROVISIONED_CAPACITY / - USED_SPACE) - - expected_result = { - 'volume_backend_name': VOLUME_BACKEND_NAME, - 'vendor_name': 'Pure Storage', - 'driver_version': self.driver.VERSION, - 'storage_protocol': None, - 'consistencygroup_support': True, - 'thin_provisioning_support': True, - 'multiattach': False, - 'QoS_support': False, - 'total_capacity_gb': TOTAL_CAPACITY, - 'free_capacity_gb': TOTAL_CAPACITY - USED_SPACE, - 'reserved_percentage': reserved_percentage, - 'provisioned_capacity': PROVISIONED_CAPACITY, - 'max_over_subscription_ratio': (PROVISIONED_CAPACITY / - USED_SPACE), - 'filter_function': filter_function, - 'goodness_function': goodness_function, - 'total_volumes': 100, - 'total_snapshots': num_snaps, - 'total_hosts': num_hosts, - 'total_pgroups': num_pgroups, - 'writes_per_sec': PERF_INFO['writes_per_sec'], - 'reads_per_sec': PERF_INFO['reads_per_sec'], - 'input_per_sec': PERF_INFO['input_per_sec'], - 'output_per_sec': PERF_INFO['output_per_sec'], - 'usec_per_read_op': PERF_INFO['usec_per_read_op'], - 'usec_per_write_op': PERF_INFO['usec_per_write_op'], - 'queue_depth': PERF_INFO['queue_depth'], - 'replication_enabled': False, - 'replication_type': ['async'], - 'replication_count': 0, - 'replication_targets': [], - } - - real_result = self.driver.get_volume_stats(refresh=True) - self.assertDictEqual(expected_result, real_result) - - # Make sure when refresh=False we are using cached values and not - # sending additional requests to the array. - self.array.reset_mock() - real_result = self.driver.get_volume_stats(refresh=False) - self.assertDictEqual(expected_result, real_result) - self.assertFalse(self.array.get.called) - self.assertFalse(self.array.list_volumes.called) - self.assertFalse(self.array.list_hosts.called) - self.assertFalse(self.array.list_pgroups.called) - - -class PureVolumeGroupsTestCase(PureBaseSharedDriverTestCase): - def setUp(self): - super(PureVolumeGroupsTestCase, self).setUp() - self.array.get.side_effect = self.fake_get_array - self.mock_context = mock.Mock() - self.driver.db = mock.Mock() - self.driver.db.group_get = mock.Mock() - - @mock.patch('cinder.db.group_get') - @mock.patch(BASE_DRIVER_OBJ + '._add_volume_to_consistency_group') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_add_to_group_if_needed(self, mock_is_cg, mock_add_to_cg, - mock_db_group_get): - mock_is_cg.return_value = False - vol_name = 'foo' - group_id = fake.GROUP_ID - volume = fake_volume.fake_volume_obj(None, group_id=group_id) - group = mock.MagicMock() - mock_db_group_get.return_value = group - - self.driver._add_to_group_if_needed(volume, vol_name) - - mock_is_cg.assert_called_once_with(group) - mock_add_to_cg.assert_not_called() - - @mock.patch('cinder.db.group_get') - @mock.patch(BASE_DRIVER_OBJ + '._add_volume_to_consistency_group') - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_add_to_group_if_needed_with_cg(self, mock_is_cg, mock_add_to_cg, - mock_db_group_get): - mock_is_cg.return_value = True - vol_name = 'foo' - group_id = fake.GROUP_ID - volume = fake_volume.fake_volume_obj(None, group_id=group_id) - group = mock.MagicMock() - mock_db_group_get.return_value = group - - self.driver._add_to_group_if_needed(volume, vol_name) - - mock_is_cg.assert_called_once_with(group) - mock_add_to_cg.assert_called_once_with( - group_id, - vol_name - ) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group(self, mock_is_cg): - mock_is_cg.return_value = False - group = fake_group.fake_group_type_obj(None) - self.assertRaises( - NotImplementedError, - self.driver.create_group, - self.mock_context, group - ) - mock_is_cg.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group(self, mock_is_cg): - mock_is_cg.return_value = False - group = mock.MagicMock() - volumes = [fake_volume.fake_volume_obj(None)] - self.assertRaises( - NotImplementedError, - self.driver.delete_group, - self.mock_context, group, volumes - ) - mock_is_cg.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_update_group(self, mock_is_cg): - mock_is_cg.return_value = False - group = mock.MagicMock() - self.assertRaises( - NotImplementedError, - self.driver.update_group, - self.mock_context, group - ) - mock_is_cg.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_from_src(self, mock_is_cg): - mock_is_cg.return_value = False - group = mock.MagicMock() - volumes = [fake_volume.fake_volume_obj(None)] - self.assertRaises( - NotImplementedError, - self.driver.create_group_from_src, - self.mock_context, group, volumes - ) - mock_is_cg.assert_called_once_with(group) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_create_group_snapshot(self, mock_is_cg): - mock_is_cg.return_value = False - group_snapshot = mock.MagicMock() - snapshots = [fake_snapshot.fake_snapshot_obj(None)] - self.assertRaises( - NotImplementedError, - self.driver.create_group_snapshot, - self.mock_context, group_snapshot, snapshots - ) - mock_is_cg.assert_called_once_with(group_snapshot) - - @mock.patch('cinder.volume.utils.is_group_a_cg_snapshot_type') - def test_delete_group_snapshot(self, mock_is_cg): - mock_is_cg.return_value = False - group_snapshot = mock.MagicMock() - snapshots = [fake_snapshot.fake_snapshot_obj(None)] - self.assertRaises( - NotImplementedError, - self.driver.create_group_snapshot, - self.mock_context, group_snapshot, snapshots - ) - mock_is_cg.assert_called_once_with(group_snapshot) - - @mock.patch(BASE_DRIVER_OBJ + '.create_consistencygroup') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_create_group_with_cg(self, mock_get_specs, mock_create_cg): - mock_get_specs.return_value = ' True' - group = mock.MagicMock() - self.driver.create_group(self.mock_context, group) - mock_create_cg.assert_called_once_with(self.mock_context, group) - - @mock.patch(BASE_DRIVER_OBJ + '.delete_consistencygroup') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_delete_group_with_cg(self, mock_get_specs, mock_delete_cg): - mock_get_specs.return_value = ' True' - group = mock.MagicMock() - volumes = [fake_volume.fake_volume_obj(None)] - self.driver.delete_group(self.mock_context, group, volumes) - mock_delete_cg.assert_called_once_with(self.mock_context, - group, - volumes) - - @mock.patch(BASE_DRIVER_OBJ + '.update_consistencygroup') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_update_group_with_cg(self, mock_get_specs, mock_update_cg): - mock_get_specs.return_value = ' True' - group = mock.MagicMock() - addvollist = [mock.Mock()] - remvollist = [mock.Mock()] - self.driver.update_group( - self.mock_context, - group, - addvollist, - remvollist - ) - mock_update_cg.assert_called_once_with( - self.mock_context, - group, - addvollist, - remvollist - ) - - @mock.patch(BASE_DRIVER_OBJ + '.create_consistencygroup_from_src') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_create_group_from_src_with_cg(self, mock_get_specs, mock_create): - mock_get_specs.return_value = ' True' - group = mock.MagicMock() - volumes = [mock.Mock()] - group_snapshot = mock.Mock() - snapshots = [mock.Mock()] - source_group = mock.MagicMock() - source_vols = [mock.Mock()] - - self.driver.create_group_from_src( - self.mock_context, - group, - volumes, - group_snapshot, - snapshots, - source_group, - source_vols - ) - mock_create.assert_called_once_with( - self.mock_context, - group, - volumes, - group_snapshot, - snapshots, - source_group, - source_vols - ) - - @mock.patch(BASE_DRIVER_OBJ + '.create_cgsnapshot') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_create_group_snapshot_with_cg(self, mock_get_specs, - mock_create_cgsnap): - mock_get_specs.return_value = ' True' - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - - self.driver.create_group_snapshot( - self.mock_context, - group_snapshot, - snapshots - ) - mock_create_cgsnap.assert_called_once_with( - self.mock_context, - group_snapshot, - snapshots - ) - - @mock.patch(BASE_DRIVER_OBJ + '.delete_cgsnapshot') - @mock.patch('cinder.volume.group_types.get_group_type_specs') - def test_delete_group_snapshot_with_cg(self, mock_get_specs, - mock_delete_cg): - mock_get_specs.return_value = ' True' - group_snapshot = mock.MagicMock() - snapshots = [mock.Mock()] - - self.driver.delete_group_snapshot( - self.mock_context, - group_snapshot, - snapshots - ) - mock_delete_cg.assert_called_once_with( - self.mock_context, - group_snapshot, - snapshots - ) diff --git a/cinder/tests/unit/volume/drivers/test_qnap.py b/cinder/tests/unit/volume/drivers/test_qnap.py deleted file mode 100644 index 31b0e6bd3..000000000 --- a/cinder/tests/unit/volume/drivers/test_qnap.py +++ /dev/null @@ -1,2183 +0,0 @@ -# Copyright (c) 2016 QNAP Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -try: - import xml.etree.cElementTree as ET -except ImportError: - import xml.etree.ElementTree as ET - -import mock -from oslo_config import cfg -from oslo_utils import units -import six -from six.moves import urllib - -from cinder import test -from cinder.volume.drivers import qnap - -CONF = cfg.CONF - -FAKE_LUNNAA = {'LUNNAA': 'fakeLunNaa'} -FAKE_SNAPSHOT = {'snapshot_id': 'fakeSnapshotId'} - -FAKE_PASSWORD = 'qnapadmin' -FAKE_PARMS = {} -FAKE_PARMS['pwd'] = base64.b64encode(FAKE_PASSWORD.encode("utf-8")) -FAKE_PARMS['serviceKey'] = 1 -FAKE_PARMS['user'] = 'admin' -sanitized_params = {} - -for key in FAKE_PARMS: - value = FAKE_PARMS[key] - if value is not None: - sanitized_params[key] = six.text_type(value) -global_sanitized_params = urllib.parse.urlencode(sanitized_params) -header = { - 'charset': 'utf-8', 'Content-Type': 'application/x-www-form-urlencoded'} -login_url = ('/cgi-bin/authLogin.cgi?') - -get_basic_info_url = ('/cgi-bin/authLogin.cgi') - -FAKE_RES_DETAIL_DATA_LOGIN = """ - - - - """ - -FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS = """ - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_GETBASIC_INFO = """ - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES = """ - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_LUN_INFO = """ - - - - - - - - - - - - - - - - - 1 - - - - """ - -FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO = """ - - - - - - - - - - - - - - - - - 2 - - - - """ - -FAKE_RES_DETAIL_DATA_SNAPSHOT = """ - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO = """ - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_ETHERNET_IP = """ - - - - - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_CREATE_LUN = """ - - - - """ - -FAKE_RES_DETAIL_DATA_CREATE_TARGET = """ - - - - """ - -FAKE_RES_DETAIL_DATA_GETHOSTIDLISTBYINITIQN = """ - - - - - - - - - - - - - - - """ - -FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING = """ - - - - - - - - - - - - - - - - - - - - - fakeTargetIqn - - - - - """ - -FAKE_RES_DETAIL_DATA_TARGET_INFO = """ - - - - - - - fakeTargetIqn - - - - - """ - -FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING = { - 'data': FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING, - 'error': None, - 'http_status': 'fackStatus' -} - -FAKE_RES_DETAIL_ISCSI_PORTAL_INFO = { - 'data': FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO, - 'error': None, - 'http_status': 'fackStatus' -} - - -def create_configuration( - username, - password, - management_url, - san_iscsi_ip, - poolname, - thin_provision=True): - """Create configuration.""" - configuration = mock.Mock() - configuration.san_login = username - configuration.san_password = password - configuration.qnap_management_url = management_url - configuration.san_thin_provision = thin_provision - configuration.san_iscsi_ip = san_iscsi_ip - configuration.qnap_poolname = poolname - configuration.safe_get.return_value = 'QNAP' - configuration.iscsi_ip_address = '1.2.3.4' - configuration.qnap_storage_protocol = 'iscsi' - configuration.reserved_percentage = 0 - return configuration - - -class QnapDriverBaseTestCase(test.TestCase): - """Base Class for the QnapDriver Tests.""" - - def setUp(self): - """Setup the Qnap Driver Base TestCase.""" - super(QnapDriverBaseTestCase, self).setUp() - self.driver = None - self.mock_HTTPConnection = None - self.mock_object(qnap.QnapISCSIDriver, 'TIME_INTERVAL', 0) - - @staticmethod - def driver_mock_decorator(configuration): - """Driver mock decorator.""" - def driver_mock_wrapper(func): - def inner_driver_mock( - self, - mock_http_connection, - *args, - **kwargs): - """Inner driver mock.""" - self.mock_HTTPConnection = mock_http_connection - - self.driver = qnap.QnapISCSIDriver(configuration=configuration) - self.driver.do_setup('context') - func(self, *args, **kwargs) - return inner_driver_mock - return driver_mock_wrapper - - def tearDown(self): - """Tear down.""" - super(QnapDriverBaseTestCase, self).tearDown() - - -class SnapshotClass(object): - """Snapshot Class.""" - - volume = {} - name = '' - volume_name = '' - volume_size = 0 - metadata = {'snapshot_id': 'fakeSnapshotId'} - - def __init__(self, volume, volume_size): - """Init.""" - self.volume = volume - self.volume_size = volume_size - - def __getitem__(self, arg): - """Getitem.""" - return { - 'display_name': 'fakeSnapshotDisplayName', - 'id': 'fakeSnapshotId', - 'volume_size': self.volume_size, - 'metadata': self.metadata - }[arg] - - -class VolumeClass(object): - """Volume Class.""" - - display_name = '' - id = '' - size = 0 - name = '' - volume_metadata = {} - - def __init__(self, display_name, id, size, name): - """Init.""" - self.display_name = display_name - self.id = id - self.size = size - self.name = name - self.volume_metadata = {'LUNNAA': 'fakeLunNaa'} - - def __getitem__(self, arg): - """Getitem.""" - return { - 'display_name': self.display_name, - 'size': self.size, - 'id': self.id, - 'name': self.name, - 'provider_location': None, - 'volume_metadata': self.volume_metadata, - 'metadata': self.volume_metadata - }[arg] - - def __setitem__(self, key, value): - """Setitem.""" - if key == 'display_name': - self.display_name = value - - -class HostClass(object): - """Host Class.""" - - def __init__(self, host): - """Init.""" - self.host = host - - def __getitem__(self, arg): - """Getitem.""" - return { - 'host': 'fakeHost', - }[arg] - - -class FakeLoginResponse(object): - """Fake login response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_LOGIN - - -class FakeGetBasicInfoResponse(object): - """Fake GetBasicInfo response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_GETBASIC_INFO - - -class FakeGetBasicInfoTsResponse(object): - """Fake GetBasicInfoTs response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS - - -class FakeGetBasicInfoTesResponse(object): - """Fake GetBasicInfoTs response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES - - -class FakeLunInfoResponse(object): - """Fake lun info response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_LUN_INFO - - -class FakePoolInfoResponse(object): - """Fake pool info response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO - - -class FakeCreateLunResponse(object): - """Fake create lun response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_CREATE_LUN - - -class FakeCreatTargetResponse(object): - """Fake create target response.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_CREATE_TARGET - - -class FakeGetIscsiPortalInfoResponse(object): - """Fake get iscsi portal inforesponse.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO - - def __repr__(self): - """Repr.""" - return six.StringIO(FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO) - - -class FakeCreateSnapshotResponse(object): - """Fake Create snapshot inforesponse.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_SNAPSHOT - - -class FakeGetAllIscsiPortalSetting(object): - """Fake get all iSCSI portal setting.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING - - -class FakeGetAllEthernetIp(object): - """Fake get all ethernet ip setting.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_ETHERNET_IP - - -class FakeTargetInfo(object): - """Fake target info setting.""" - - status = 'fackStatus' - - def read(self): - """Mock response.read.""" - return FAKE_RES_DETAIL_DATA_TARGET_INFO - - -class QnapDriverLoginTestCase(QnapDriverBaseTestCase): - """Tests do_setup api.""" - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_do_setup_positive( - self, - mock_http_connection): - """Test do_setup with http://1.2.3.4:8080.""" - fake_login_response = FakeLoginResponse() - fake_get_basic_info_response = FakeGetBasicInfoResponse() - mock_http_connection.return_value.getresponse.side_effect = ([ - fake_login_response, - fake_get_basic_info_response, - fake_login_response]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Storage Pool 1', - True)) - self.driver.do_setup('context') - - self.assertEqual('fakeSid', self.driver.api_executor.sid) - self.assertEqual('admin', self.driver.api_executor.username) - self.assertEqual('qnapadmin', self.driver.api_executor.password) - self.assertEqual('1.2.3.4', self.driver.api_executor.ip) - self.assertEqual('8080', self.driver.api_executor.port) - self.assertFalse(self.driver.api_executor.ssl) - - @mock.patch('six.moves.http_client.HTTPSConnection') - def test_do_setup_positive_with_ssl( - self, - mock_http_connection): - """Test do_setup with https://1.2.3.4:443.""" - fake_login_response = FakeLoginResponse() - fake_get_basic_info_response = FakeGetBasicInfoResponse() - mock_http_connection.return_value.getresponse.side_effect = ([ - fake_login_response, - fake_get_basic_info_response, - fake_login_response]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'https://1.2.3.4:443', - '1.2.3.4', - 'Storage Pool 1', - True)) - self.driver.do_setup('context') - - self.assertEqual('fakeSid', self.driver.api_executor.sid) - self.assertEqual('admin', self.driver.api_executor.username) - self.assertEqual('qnapadmin', self.driver.api_executor.password) - self.assertEqual('1.2.3.4', self.driver.api_executor.ip) - self.assertEqual('443', self.driver.api_executor.port) - self.assertTrue(self.driver.api_executor.ssl) - - -class QnapDriverVolumeTestCase(QnapDriverBaseTestCase): - """Tests volume related api's.""" - - def get_lun_info_return_value(self): - """Return the lun form get_lun_info method.""" - root = ET.fromstring(FAKE_RES_DETAIL_DATA_LUN_INFO) - - lun_list = root.find('iSCSILUNList') - lun_info_tree = lun_list.findall('LUNInfo') - for lun in lun_info_tree: - return lun - - def get_mapped_lun_info_return_value(self): - """Return the lun form get_lun_info method.""" - root = ET.fromstring(FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO) - - lun_list = root.find('iSCSILUNList') - lun_info_tree = lun_list.findall('LUNInfo') - for lun in lun_info_tree: - return lun - - def get_snapshot_info_return_value(self): - """Return the lun form get_lun_info method.""" - root = ET.fromstring(FAKE_RES_DETAIL_DATA_SNAPSHOT) - - snapshot_list = root.find('SnapshotList') - snapshot_info_tree = snapshot_list.findall('row') - for snapshot in snapshot_info_tree: - return snapshot - - @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') - @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_create_volume_positive( - self, - mock_api_executor, - mock_gen_random_name, - mock_get_volume_metadata): - """Test create_volume with fake_volume.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_executor.return_value.get_lun_info.side_effect = [ - None, - self.get_lun_info_return_value()] - mock_gen_random_name.return_value = 'fakeLun' - mock_api_executor.return_value.create_lun.return_value = 'fakeIndex' - mock_get_volume_metadata.return_value = {} - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.create_volume(fake_volume) - - mock_api_executor.return_value.create_lun.assert_called_once_with( - fake_volume, - self.driver.configuration.qnap_poolname, - 'fakeLun', - True) - - expected_call_list = [ - mock.call(LUNName='fakeLun'), - mock.call(LUNIndex='fakeIndex')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_lun_info.call_args_list) - - @mock.patch.object( - qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_delete_volume_positive( - self, - mock_api_executor, - mock_get_lun_naa_from_volume_metadata): - """Test delete_volume with fake_volume.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_get_lun_naa_from_volume_metadata.return_value = FAKE_LUNNAA - mock_api_executor.return_value.get_lun_info.return_value = ( - self.get_lun_info_return_value()) - mock_api_executor.return_value.delete_lun.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.delete_volume(fake_volume) - - mock_api_executor.return_value.delete_lun.assert_called_once_with( - 'fakeLunIndex') - - @mock.patch.object( - qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') - @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') - @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_create_cloned_volume_volume_size_less_src_verf( - self, - mock_api_executor, - mock_get_volume_metadata, - mock_gen_random_name, - mock_get_lun_naa_from_volume_metadata): - """Test create cloned volume.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 90, 'fakeLunName') - fake_src_vref = VolumeClass( - 'fakeSrcVrefName', 'fakeId', 100, 'fakeSrcVref') - - mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_get_volume_metadata.return_value = {} - mock_api_executor.return_value.get_lun_info.side_effect = [ - self.get_lun_info_return_value(), - None, - self.get_lun_info_return_value()] - mock_gen_random_name.side_effect = ['fakeSnapshot', 'fakeLun'] - mock_api_executor.return_value.get_snapshot_info.side_effect = [ - None, self.get_snapshot_info_return_value()] - mock_api_executor.return_value.create_snapshot_api.return_value = ( - 'fakeSnapshotId') - mock_api_executor.return_value.clone_snapshot.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.create_cloned_volume(fake_volume, fake_src_vref) - - expected_call_list = [ - mock.call(LUNNAA='fakeLunNaa'), - mock.call(LUNName='fakeLun'), - mock.call(LUNName='fakeLun')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_lun_info.call_args_list) - expected_call_list = [ - mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot'), - mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_snapshot_info.call_args_list) - mock_api_return = mock_api_executor.return_value - mock_api_return.create_snapshot_api.assert_called_once_with( - 'fakeLunIndex', 'fakeSnapshot') - mock_api_return.clone_snapshot.assert_called_once_with( - 'fakeSnapshotId', 'fakeLun') - - @mock.patch.object( - qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') - @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') - @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') - @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_create_cloned_volume_volume_size_morethan_src_verf( - self, - mock_api_executor, - mock_get_volume_metadata, - mock_gen_random_name, - mock_extend_lun, - mock_get_lun_naa_from_volume_metadata): - """Test create cloned volume.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - fake_src_vref = VolumeClass( - 'fakeSrcVrefName', 'fakeId', 90, 'fakeSrcVref') - - mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_get_volume_metadata.return_value = FAKE_LUNNAA - mock_api_executor.return_value.get_lun_info.side_effect = [ - self.get_lun_info_return_value(), - None, - self.get_lun_info_return_value()] - mock_gen_random_name.side_effect = ['fakeSnapshot', 'fakeLun'] - mock_api_executor.return_value.get_snapshot_info.side_effect = [ - None, self.get_snapshot_info_return_value()] - mock_api_executor.return_value.create_snapshot_api.return_value = ( - 'fakeSnapshotId') - mock_api_executor.return_value.clone_snapshot.return_value = None - mock_extend_lun.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.create_cloned_volume(fake_volume, fake_src_vref) - - mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa') - - @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_create_snapshot_positive( - self, - mock_api_executor, - mock_gen_random_name): - """Test create snapshot.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - snapshot = SnapshotClass(fake_volume, 100) - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_executor.return_value.get_lun_info.return_value = ( - self.get_lun_info_return_value()) - mock_gen_random_name.return_value = 'fakeSnapshot' - mock_api_executor.return_value.get_snapshot_info.side_effect = [ - None, self.get_snapshot_info_return_value()] - mock_api_executor.return_value.create_snapshot_api.return_value = ( - 'fakeSnapshotId') - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.create_snapshot(snapshot) - - mock_api_return = mock_api_executor.return_value - mock_api_return.get_lun_info.assert_called_once_with( - LUNNAA='fakeLunNaa') - expected_call_list = [ - mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot'), - mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_snapshot_info.call_args_list) - mock_api_return.create_snapshot_api.assert_called_once_with( - 'fakeLunIndex', 'fakeSnapshot') - - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_delete_snapshot_positive( - self, - mock_api_executor): - """Test delete snapshot.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - fake_snapshot = SnapshotClass(fake_volume, 100) - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_executor.return_value.api_delete_snapshot.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.delete_snapshot(fake_snapshot) - - mock_api_return = mock_api_executor.return_value - mock_api_return.api_delete_snapshot.assert_called_once_with( - 'fakeSnapshotId') - - @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') - @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') - @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_create_volume_from_snapshot_positive_volsize_more_snapshotvolsize( - self, - mock_api_executor, - mock_gen_random_name, - mock_extend_lun, - mock_get_volume_metadata): - """Test create volume from snapshot positive.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - fake_snapshot = SnapshotClass(fake_volume, 90) - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_gen_random_name.return_value = 'fakeLun' - mock_api_executor.return_value.get_lun_info.side_effect = [ - None, - self.get_lun_info_return_value()] - mock_api_executor.return_value.clone_snapshot.return_value = None - - mock_api_executor.return_value.create_snapshot_api.return_value = ( - 'fakeSnapshotId') - mock_extend_lun.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot) - - expected_call_list = [ - mock.call(LUNName='fakeLun'), - mock.call(LUNName='fakeLun')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_lun_info.call_args_list) - mock_api_return = mock_api_executor.return_value - mock_api_return.clone_snapshot.assert_called_once_with( - 'fakeSnapshotId', 'fakeLun') - mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa') - - def get_specific_poolinfo_return_value(self): - """Get specific pool info.""" - root = ET.fromstring(FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO) - pool_list = root.find('Pool_Index') - pool_info_tree = pool_list.findall('row') - for pool in pool_info_tree: - return pool - - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_get_volume_stats( - self, - mock_api_executor): - """Get volume stats.""" - mock_api_return = mock_api_executor.return_value - mock_api_return.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_return.get_specific_poolinfo.return_value = ( - self.get_specific_poolinfo_return_value()) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - - expected_res = {'volume_backend_name': 'QNAP', - 'vendor_name': 'QNAP', - 'driver_version': '1.0.0', - 'storage_protocol': 'iscsi'} - single_pool = dict( - pool_name=self.driver.configuration.qnap_poolname, - total_capacity_gb=930213412209 / units.Gi, - free_capacity_gb=928732941681 / units.Gi, - provisioned_capacity_gb=1480470528 / units.Gi, - reserved_percentage=self.driver.configuration.reserved_percentage, - QoS_support=False) - expected_res['pools'] = [single_pool] - - self.assertEqual( - expected_res, - self.driver.get_volume_stats(refresh=True)) - mock_api_return.get_specific_poolinfo.assert_called_once_with( - self.driver.configuration.qnap_poolname) - - @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_extend_volume( - self, - mock_api_executor, - mock_extend_lun): - """Test extend volume.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.extend_volume(fake_volume, 'fakeSize') - - mock_extend_lun.assert_called_once_with(fake_volume, '') - - @mock.patch.object( - qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_extend_lun( - self, - mock_api_executor, - mock_get_lun_naa_from_volume_metadata): - """Test _extend_lun method.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' - mock_api_executor.return_value.get_lun_info.return_value = ( - self.get_lun_info_return_value()) - mock_api_executor.return_value.edit_lun.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver._extend_lun(fake_volume, '') - - mock_api_return = mock_api_executor.return_value - mock_api_return.get_lun_info.assert_called_once_with( - LUNNAA='fakeLunNaa') - expect_lun = { - 'LUNName': 'fakeLunName', - 'LUNCapacity': fake_volume['size'], - 'LUNIndex': 'fakeLunIndex', - 'LUNThinAllocate': 'fakeLunThinAllocate', - 'LUNPath': 'fakeLunPath', - 'LUNStatus': '1'} - mock_api_return.edit_lun.assert_called_once_with(expect_lun) - - @mock.patch.object(qnap.QnapISCSIDriver, - '_get_lun_naa_from_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_initialize_connection_with_target_exist( - self, - mock_api_executor, - mock_get_lun_naa_from_volume_metadata): - """Test initialize connection.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - fake_connector = {'initiator': 'fakeInitiatorIqn'} - - mock_api_return = mock_api_executor.return_value - mock_api_return.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_return.get_iscsi_portal_info.return_value = ( - FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) - mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' - mock_api_executor.return_value.get_lun_info.side_effect = [ - self.get_lun_info_return_value(), - self.get_lun_info_return_value()] - mock_api_return.get_all_iscsi_portal_setting.return_value = ( - FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING) - mock_api_executor.return_value.map_lun.return_value = None - mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'] - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - - expected_properties = { - 'target_discovered': True, - 'target_portal': '1.2.3.4:fakeServicePort', - 'target_iqn': 'fakeTargetIqn', - 'target_lun': 1, - 'volume_id': fake_volume['id'], - 'target_portals': ['1.2.3.4:fakeServicePort'], - 'target_iqns': ['fakeTargetIqn'], - 'target_luns': [1]} - expected_return = { - 'driver_volume_type': 'iscsi', 'data': expected_properties} - - self.assertEqual(expected_return, self.driver.initialize_connection( - fake_volume, fake_connector)) - - mock_api_return = mock_api_executor.return_value - mock_api_return.get_iscsi_portal_info.assert_called_once_with() - expected_call_list = [ - mock.call(LUNNAA='fakeLunNaa'), - mock.call(LUNNAA='fakeLunNaa')] - self.assertEqual( - expected_call_list, - mock_api_executor.return_value.get_lun_info.call_args_list) - mock_api_return = mock_api_executor.return_value - mock_api_return.get_all_iscsi_portal_setting.assert_called_once_with() - mock_api_return.map_lun.assert_called_once_with( - 'fakeLunIndex', 'fakeTargeIndex') - mock_api_return.get_ethernet_ip.assert_called_once_with(type='data') - - @mock.patch.object( - qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') - @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') - def test_terminate_connection( - self, - mock_api_executor, - mock_get_lun_naa_from_volume_metadata): - """Test terminate connection.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - fake_connector = {'initiator': 'fakeInitiator'} - - mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' - mock_api_executor.return_value.get_basic_info.return_value = ( - 'ES1640dc ', 'ES1640dc ', '1.1.3') - mock_api_executor.return_value.get_lun_info.return_value = ( - self.get_mapped_lun_info_return_value()) - mock_api_executor.return_value.unmap_lun.return_value = None - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.terminate_connection(fake_volume, fake_connector) - - mock_api_return = mock_api_executor.return_value - mock_api_return.get_lun_info.assert_called_once_with( - LUNNAA='fakeLunNaa') - mock_api_return.unmap_lun.assert_called_once_with( - 'fakeLunIndex', '9') - - -class QnapAPIExecutorTestCase(QnapDriverBaseTestCase): - """Tests QnapAPIExecutor.""" - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_create_lun( - self, - mock_http_connection): - """Test create lun.""" - fake_volume = VolumeClass( - 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') - - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - - self.assertEqual( - 'fakeLunIndex', - self.driver.api_executor.create_lun( - fake_volume, 'fakepool', 'fakeLun', 'False')) - - fake_params = {} - fake_params['func'] = 'add_lun' - fake_params['FileIO'] = 'no' - fake_params['LUNThinAllocate'] = '1' - fake_params['LUNName'] = 'fakeLun' - fake_params['LUNPath'] = 'fakeLun' - fake_params['poolID'] = 'fakepool' - fake_params['lv_ifssd'] = 'no' - fake_params['LUNCapacity'] = 100 - fake_params['lv_threshold'] = '80' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - create_lun_url = ( - '/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', create_lun_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_delete_lun( - self, - mock_http_connection): - """Test delete lun.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.delete_lun('fakeLunIndex') - - fake_params = {} - fake_params['func'] = 'remove_lun' - fake_params['run_background'] = '1' - fake_params['ha_sync'] = '1' - fake_params['LUNIndex'] = 'fakeLunIndex' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - delete_lun_url = ( - '/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', delete_lun_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_specific_poolinfo( - self, - mock_http_connection): - """Test get specific pool info.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakePoolInfoResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_specific_poolinfo('Pool1') - - fake_params = {} - fake_params['store'] = 'poolInfo' - fake_params['func'] = 'extra_get' - fake_params['poolID'] = 'Pool1' - fake_params['Pool_Info'] = '1' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_specific_poolinfo_url = ( - '/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_specific_poolinfo_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_create_target( - self, - mock_http_connection): - """Test create target.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreatTargetResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.create_target('fakeTargetName', 'sca') - fake_params = {} - fake_params['func'] = 'add_target' - fake_params['targetName'] = 'fakeTargetName' - fake_params['targetAlias'] = 'fakeTargetName' - fake_params['bTargetDataDigest'] = '0' - fake_params['bTargetHeaderDigest'] = '0' - fake_params['bTargetClusterEnable'] = '1' - fake_params['controller_name'] = 'sca' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - create_target_url = ( - '/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', create_target_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_add_target_init( - self, - mock_http_connection): - """Test add target init.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.add_target_init( - 'fakeTargetIqn', 'fakeInitiatorIqn') - - fake_params = {} - fake_params['func'] = 'add_init' - fake_params['targetIQN'] = 'fakeTargetIqn' - fake_params['initiatorIQN'] = 'fakeInitiatorIqn' - fake_params['initiatorAlias'] = 'fakeInitiatorIqn' - fake_params['bCHAPEnable'] = '0' - fake_params['CHAPUserName'] = '' - fake_params['CHAPPasswd'] = '' - fake_params['bMutualCHAPEnable'] = '0' - fake_params['mutualCHAPUserName'] = '' - fake_params['mutualCHAPPasswd'] = '' - fake_params['ha_sync'] = '1' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - add_target_init_url = ( - '/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', add_target_init_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_map_lun( - self, - mock_http_connection): - """Test map lun.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.map_lun( - 'fakeLunIndex', 'fakeTargetIndex') - - fake_params = {} - fake_params['func'] = 'add_lun' - fake_params['LUNIndex'] = 'fakeLunIndex' - fake_params['targetIndex'] = 'fakeTargetIndex' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - map_lun_url = ( - '/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', map_lun_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_unmap_lun( - self, - mock_http_connection): - """Test unmap lun.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.unmap_lun( - 'fakeLunIndex', 'fakeTargetIndex') - - fake_params = {} - fake_params['func'] = 'remove_lun' - fake_params['LUNIndex'] = 'fakeLunIndex' - fake_params['targetIndex'] = 'fakeTargetIndex' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - unmap_lun_url = ( - '/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', unmap_lun_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_iscsi_portal_info( - self, - mock_http_connection): - """Test get iscsi portal info.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateLunResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_iscsi_portal_info() - - fake_params = {} - fake_params['func'] = 'extra_get' - fake_params['iSCSI_portal'] = '1' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_iscsi_portal_info_url = ( - '/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_iscsi_portal_info_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_lun_info( - self, - mock_http_connection): - """Test get lun info.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeLunInfoResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_lun_info() - - fake_params = {} - fake_params['func'] = 'extra_get' - fake_params['lunList'] = '1' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - sanitized_params = urllib.parse.urlencode(sanitized_params) - - get_lun_info_url = ( - '/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_lun_info_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_snapshot_info( - self, - mock_http_connection): - """Test get snapshot info.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeLunInfoResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_snapshot_info( - lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') - - fake_params = {} - fake_params['func'] = 'extra_get' - fake_params['LUNIndex'] = 'fakeLunIndex' - fake_params['snapshot_list'] = '1' - fake_params['snap_start'] = '0' - fake_params['snap_count'] = '100' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_snapshot_info_url = ( - '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_snapshot_info_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_create_snapshot_api( - self, - mock_http_connection): - """Test create snapshot api.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateSnapshotResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.create_snapshot_api( - 'fakeLunIndex', 'fakeSnapshotName') - - fake_params = {} - fake_params['func'] = 'create_snapshot' - fake_params['lunID'] = 'fakeLunIndex' - fake_params['snapshot_name'] = 'fakeSnapshotName' - fake_params['expire_min'] = '0' - fake_params['vital'] = '1' - fake_params['snapshot_type'] = '0' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - create_snapshot_api_url = ( - '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', create_snapshot_api_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_api_delete_snapshot( - self, - mock_http_connection): - """Test api de;ete snapshot.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateSnapshotResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.api_delete_snapshot( - 'fakeSnapshotId') - fake_params = {} - fake_params['func'] = 'del_snapshots' - fake_params['snapshotID'] = 'fakeSnapshotId' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - api_delete_snapshot_url = ( - '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', api_delete_snapshot_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_clone_snapshot( - self, - mock_http_connection): - """Test clone snapshot.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeCreateSnapshotResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.clone_snapshot( - 'fakeSnapshotId', 'fakeLunName') - - fake_params = {} - fake_params['func'] = 'clone_qsnapshot' - fake_params['by_lun'] = '1' - fake_params['snapshotID'] = 'fakeSnapshotId' - fake_params['new_name'] = 'fakeLunName' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - clone_snapshot_url = ( - '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', clone_snapshot_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_edit_lun( - self, - mock_http_connection): - """Test edit lun.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeLunInfoResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - fake_lun = {'LUNName': 'fakeLunName', - 'LUNCapacity': 100, - 'LUNIndex': 'fakeLunIndex', - 'LUNThinAllocate': False, - 'LUNPath': 'fakeLunPath', - 'LUNStatus': 'fakeLunStatus'} - self.driver.api_executor.edit_lun(fake_lun) - - fake_params = {} - fake_params['func'] = 'edit_lun' - fake_params['LUNName'] = 'fakeLunName' - fake_params['LUNCapacity'] = 100 - fake_params['LUNIndex'] = 'fakeLunIndex' - fake_params['LUNThinAllocate'] = False - fake_params['LUNPath'] = 'fakeLunPath' - fake_params['LUNStatus'] = 'fakeLunStatus' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - edit_lun_url = ( - '/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', edit_lun_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_all_iscsi_portal_setting( - self, - mock_http_connection): - """Test get all iscsi portal setting.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeLunInfoResponse()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_all_iscsi_portal_setting() - - fake_params = {} - fake_params['func'] = 'get_all' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_all_iscsi_portal_setting_url = ( - '/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_all_iscsi_portal_setting_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_ethernet_ip( - self, - mock_http_connection): - """Test get ethernet ip.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeGetAllEthernetIp()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_ethernet_ip(type='data') - - fake_params = {} - fake_params['subfunc'] = 'net_setting' - fake_params['sid'] = 'fakeSid' - sanitized_params = {} - - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_ethernet_ip_url = ( - '/cgi-bin/sys/sysRequest.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_ethernet_ip_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_target_info( - self, - mock_http_connection): - """Test get target info.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoResponse(), - FakeLoginResponse(), - FakeTargetInfo()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_target_info('fakeTargetIndex') - - fake_params = {} - fake_params['func'] = 'extra_get' - fake_params['targetInfo'] = 1 - fake_params['targetIndex'] = 'fakeTargetIndex' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_target_info_url = ( - '/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_target_info_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - -class QnapAPIExecutorTsTestCase(QnapDriverBaseTestCase): - """Tests QnapAPIExecutorTS.""" - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_remove_target_init( - self, - mock_http_connection): - """Test remove target init.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoTsResponse(), - FakeLoginResponse(), - FakeTargetInfo()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Storage Pool 1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.remove_target_init( - 'fakeTargetIqn', 'fakeDefaultAcl') - - fake_params = {} - fake_params['func'] = 'remove_init' - fake_params['targetIQN'] = 'fakeTargetIqn' - fake_params['initiatorIQN'] = 'fakeDefaultAcl' - fake_params['ha_sync'] = '1' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - remove_target_init_url = ( - '/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', remove_target_init_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_target_info( - self, - mock_http_connection): - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoTsResponse(), - FakeLoginResponse(), - FakeTargetInfo()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Storage Pool 1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_target_info( - 'fakeTargetIndex') - - fake_params = {} - fake_params['func'] = 'extra_get' - fake_params['targetInfo'] = 1 - fake_params['targetIndex'] = 'fakeTargetIndex' - fake_params['ha_sync'] = '1' - fake_params['sid'] = 'fakeSid' - - sanitized_params = {} - for key in fake_params: - value = fake_params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - get_target_info_url = ( - '/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params) - - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_target_info_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_ethernet_ip( - self, - mock_http_connection): - """Test get ethernet ip.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoTsResponse(), - FakeLoginResponse(), - FakeGetAllEthernetIp()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Storage Pool 1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_ethernet_ip( - type='data') - - get_ethernet_ip_url = ( - '/cgi-bin/sys/sysRequest.cgi?subfunc=net_setting&sid=fakeSid') - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_ethernet_ip_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) - - -class QnapAPIExecutorTesTestCase(QnapDriverBaseTestCase): - """Tests QnapAPIExecutorTES.""" - - @mock.patch('six.moves.http_client.HTTPConnection') - def test_get_ethernet_ip( - self, - mock_http_connection): - """Test get ehternet ip.""" - mock_http_connection.return_value.getresponse.side_effect = ([ - FakeLoginResponse(), - FakeGetBasicInfoTesResponse(), - FakeLoginResponse(), - FakeGetAllEthernetIp()]) - - self.driver = qnap.QnapISCSIDriver( - configuration=create_configuration( - 'admin', - 'qnapadmin', - 'http://1.2.3.4:8080', - '1.2.3.4', - 'Pool1', - True)) - self.driver.do_setup('context') - self.driver.api_executor.get_ethernet_ip( - type='data') - - get_ethernet_ip_url = ( - '/cgi-bin/sys/sysRequest.cgi?subfunc=net_setting&sid=fakeSid') - expected_call_list = [ - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_basic_info_url), - mock.call('POST', login_url, global_sanitized_params, header), - mock.call('GET', get_ethernet_ip_url)] - self.assertEqual( - expected_call_list, - mock_http_connection.return_value.request.call_args_list) diff --git a/cinder/tests/unit/volume/drivers/test_quobyte.py b/cinder/tests/unit/volume/drivers/test_quobyte.py deleted file mode 100644 index ccd61524a..000000000 --- a/cinder/tests/unit/volume/drivers/test_quobyte.py +++ /dev/null @@ -1,1013 +0,0 @@ -# Copyright (c) 2014 Quobyte Inc. -# Copyright (c) 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the Quobyte driver module.""" - -import errno -import os -import psutil -import six -import traceback - -import mock -from oslo_concurrency import processutils as putils -from oslo_utils import imageutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers import quobyte - - -class FakeDb(object): - msg = "Tests are broken: mock this out." - - def volume_get(self, *a, **kw): - raise Exception(self.msg) - - def snapshot_get_all_for_volume(self, *a, **kw): - """Mock this if you want results from it.""" - return [] - - -class QuobyteDriverTestCase(test.TestCase): - """Test case for Quobyte driver.""" - - TEST_QUOBYTE_VOLUME = 'quobyte://quobyte-host/openstack-volumes' - TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL = 'quobyte-host/openstack-volumes' - TEST_SIZE_IN_GB = 1 - TEST_MNT_POINT = '/mnt/quobyte' - TEST_MNT_POINT_BASE = '/mnt' - TEST_FILE_NAME = 'test.txt' - TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' - TEST_TMP_FILE = '/tmp/tempfile' - VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' - SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca' - SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede' - - def setUp(self): - super(QuobyteDriverTestCase, self).setUp() - - self._configuration = mock.Mock(conf.Configuration) - self._configuration.append_config_values(mock.ANY) - self._configuration.quobyte_volume_url = \ - self.TEST_QUOBYTE_VOLUME - self._configuration.quobyte_client_cfg = None - self._configuration.quobyte_sparsed_volumes = True - self._configuration.quobyte_qcow2_volumes = False - self._configuration.quobyte_mount_point_base = \ - self.TEST_MNT_POINT_BASE - self._configuration.nas_secure_file_operations = "auto" - self._configuration.nas_secure_file_permissions = "auto" - - self._driver =\ - quobyte.QuobyteDriver(configuration=self._configuration, - db=FakeDb()) - self._driver.shares = {} - self._driver.set_nas_security_options(is_new_cinder_install=False) - self.context = context.get_admin_context() - - def assertRaisesAndMessageMatches( - self, excClass, msg, callableObj, *args, **kwargs): - """Ensure that the specified exception was raised. """ - - caught = False - try: - callableObj(*args, **kwargs) - except Exception as exc: - caught = True - self.assertIsInstance(exc, excClass, - 'Wrong exception caught: %s Stacktrace: %s' % - (exc, traceback.format_exc())) - self.assertIn(msg, six.text_type(exc)) - - if not caught: - self.fail('Expected raised exception but nothing caught.') - - def get_mock_partitions(self): - mypart = mock.Mock() - mypart.device = "quobyte@" - mypart.mountpoint = self.TEST_MNT_POINT - return [mypart] - - def test_local_path(self): - """local_path common use case.""" - drv = self._driver - vol_id = self.VOLUME_UUID - volume = self._simple_volume(_name_id=vol_id) - - self.assertEqual( - '/mnt/1331538734b757ed52d0e18c0a7210cd/volume-%s' % vol_id, - drv.local_path(volume)) - - def test_mount_quobyte_should_mount_correctly(self): - with mock.patch.object(self._driver, '_execute') as mock_execute, \ - mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '.read_proc_mount') as mock_open, \ - mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '._validate_volume') as mock_validate: - # Content of /proc/mount (not mounted yet). - mock_open.return_value = six.StringIO( - "/dev/sda5 / ext4 rw,relatime,data=ordered 0 0") - - self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT) - - mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) - - mount_call = mock.call( - 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT, run_as_root=False) - - mock_execute.assert_has_calls( - [mkdir_call, mount_call], any_order=False) - mock_validate.called_once_with(self.TEST_MNT_POINT) - - def test_mount_quobyte_already_mounted_detected_seen_in_proc_mount(self): - with mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '.read_proc_mount') as mock_open, \ - mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '._validate_volume') as mock_validate: - # Content of /proc/mount (already mounted). - mock_open.return_value = six.StringIO( - "quobyte@%s %s fuse rw,nosuid,nodev,noatime,user_id=1000" - ",group_id=100,default_permissions,allow_other 0 0" - % (self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT)) - - self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT) - mock_validate.assert_called_once_with(self.TEST_MNT_POINT) - - def test_mount_quobyte_should_suppress_and_log_already_mounted_error(self): - """test_mount_quobyte_should_suppress_and_log_already_mounted_error - - Based on /proc/mount, the file system is not mounted yet. However, - mount.quobyte returns with an 'already mounted' error. This is - a last-resort safe-guard in case /proc/mount parsing was not - successful. - - Because _mount_quobyte gets called with ensure=True, the error will - be suppressed and logged instead. - """ - with mock.patch.object(self._driver, '_execute') as mock_execute, \ - mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '.read_proc_mount') as mock_open, \ - mock.patch('cinder.volume.drivers.quobyte.LOG') as mock_LOG: - # Content of /proc/mount (empty). - mock_open.return_value = six.StringIO() - mock_execute.side_effect = [None, putils.ProcessExecutionError( - stderr='is busy or already mounted')] - - self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT, - ensure=True) - - mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) - mount_call = mock.call( - 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT, run_as_root=False) - mock_execute.assert_has_calls([mkdir_call, mount_call], - any_order=False) - - mock_LOG.warning.assert_called_once_with('%s is already mounted', - self.TEST_QUOBYTE_VOLUME) - - def test_mount_quobyte_should_reraise_already_mounted_error(self): - """test_mount_quobyte_should_reraise_already_mounted_error - - Like test_mount_quobyte_should_suppress_and_log_already_mounted_error - but with ensure=False. - """ - with mock.patch.object(self._driver, '_execute') as mock_execute, \ - mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' - '.read_proc_mount') as mock_open: - mock_open.return_value = six.StringIO() - mock_execute.side_effect = [ - None, # mkdir - putils.ProcessExecutionError( # mount - stderr='is busy or already mounted')] - - self.assertRaises(putils.ProcessExecutionError, - self._driver._mount_quobyte, - self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT, - ensure=False) - - mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT) - mount_call = mock.call( - 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, - self.TEST_MNT_POINT, run_as_root=False) - mock_execute.assert_has_calls([mkdir_call, mount_call], - any_order=False) - - def test_get_hash_str(self): - """_get_hash_str should calculation correct value.""" - drv = self._driver - - self.assertEqual('1331538734b757ed52d0e18c0a7210cd', - drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) - - def test_get_available_capacity_with_df(self): - """_get_available_capacity should calculate correct value.""" - drv = self._driver - - df_total_size = 2620544 - df_avail = 1490560 - df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' - df_data = 'quobyte@%s %d 996864 %d 41%% %s' % \ - (self.TEST_QUOBYTE_VOLUME, df_total_size, df_avail, - self.TEST_MNT_POINT) - df_output = df_head + df_data - - drv._get_mount_point_for_share = mock.Mock(return_value=self. - TEST_MNT_POINT) - - drv._execute = mock.Mock(return_value=(df_output, None)) - - self.assertEqual((df_avail, df_total_size), - drv._get_available_capacity(self.TEST_QUOBYTE_VOLUME)) - (drv._get_mount_point_for_share. - assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) - (drv._execute. - assert_called_once_with('df', - '--portability', - '--block-size', - '1', - self.TEST_MNT_POINT, - run_as_root=self._driver._execute_as_root)) - - def test_get_capacity_info(self): - with mock.patch.object(self._driver, '_get_available_capacity') \ - as mock_get_available_capacity: - drv = self._driver - - df_size = 2620544 - df_avail = 1490560 - - mock_get_available_capacity.return_value = (df_avail, df_size) - - size, available, used = drv._get_capacity_info(mock.ANY) - - mock_get_available_capacity.assert_called_once_with(mock.ANY) - - self.assertEqual(df_size, size) - self.assertEqual(df_avail, available) - self.assertEqual(size - available, used) - - def test_load_shares_config(self): - """_load_shares_config takes the Volume URL and strips quobyte://.""" - drv = self._driver - - drv._load_shares_config() - - self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) - - def test_load_shares_config_without_protocol(self): - """Same as test_load_shares_config, but URL is without quobyte://.""" - drv = self._driver - - drv.configuration.quobyte_volume_url = \ - self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL - - drv._load_shares_config() - - self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) - - def test_ensure_share_mounted(self): - """_ensure_share_mounted simple use case.""" - with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ - mock_get_mount_point, \ - mock.patch.object(self._driver, '_mount_quobyte') as \ - mock_mount: - drv = self._driver - drv._ensure_share_mounted(self.TEST_QUOBYTE_VOLUME) - - mock_get_mount_point.assert_called_once_with( - self.TEST_QUOBYTE_VOLUME) - mock_mount.assert_called_once_with( - self.TEST_QUOBYTE_VOLUME, - mock_get_mount_point.return_value, - ensure=True) - - def test_ensure_shares_mounted_should_save_mounting_successfully(self): - """_ensure_shares_mounted should save share if mounted with success.""" - with mock.patch.object(self._driver, '_ensure_share_mounted') \ - as mock_ensure_share_mounted: - drv = self._driver - - drv._ensure_shares_mounted() - - mock_ensure_share_mounted.assert_called_once_with( - self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) - self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, - drv._mounted_shares) - - def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): - """_ensure_shares_mounted should not save if mount raised an error.""" - with mock.patch.object(self._driver, '_ensure_share_mounted') \ - as mock_ensure_share_mounted: - drv = self._driver - - mock_ensure_share_mounted.side_effect = Exception() - - drv._ensure_shares_mounted() - - mock_ensure_share_mounted.assert_called_once_with( - self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) - self.assertEqual(1, len(drv.shares)) - self.assertEqual(0, len(drv._mounted_shares)) - - @mock.patch.object(quobyte.QuobyteDriver, "set_nas_security_options") - def test_do_setup(self, qb_snso_mock): - """do_setup runs successfully.""" - drv = self._driver - - drv.do_setup(mock.create_autospec(context.RequestContext)) - - qb_snso_mock.assert_called_once_with(is_new_cinder_install=mock.ANY) - - def test_check_for_setup_error_throws_quobyte_volume_url_not_set(self): - """check_for_setup_error throws if 'quobyte_volume_url' is not set.""" - drv = self._driver - - drv.configuration.quobyte_volume_url = None - - self.assertRaisesAndMessageMatches(exception.VolumeDriverException, - 'no Quobyte volume configured', - drv.check_for_setup_error) - - def test_check_for_setup_error_throws_client_not_installed(self): - """check_for_setup_error throws if client is not installed.""" - drv = self._driver - drv._execute = mock.Mock(side_effect=OSError - (errno.ENOENT, 'No such file or directory')) - - self.assertRaisesAndMessageMatches(exception.VolumeDriverException, - 'mount.quobyte is not installed', - drv.check_for_setup_error) - drv._execute.assert_called_once_with('mount.quobyte', - check_exit_code=False, - run_as_root=False) - - def test_check_for_setup_error_throws_client_not_executable(self): - """check_for_setup_error throws if client cannot be executed.""" - drv = self._driver - - drv._execute = mock.Mock(side_effect=OSError - (errno.EPERM, 'Operation not permitted')) - - self.assertRaisesAndMessageMatches(OSError, - 'Operation not permitted', - drv.check_for_setup_error) - drv._execute.assert_called_once_with('mount.quobyte', - check_exit_code=False, - run_as_root=False) - - def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): - """_find_share should throw error if there is no mounted share.""" - drv = self._driver - - drv._mounted_shares = [] - - self.assertRaises(exception.NotFound, - drv._find_share, - self._simple_volume()) - - def test_find_share(self): - """_find_share simple use case.""" - drv = self._driver - - drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] - - self.assertEqual(self.TEST_QUOBYTE_VOLUME, - drv._find_share(self._simple_volume())) - - def test_find_share_does_not_throw_error_if_there_isnt_enough_space(self): - """_find_share intentionally does not throw when no space is left.""" - with mock.patch.object(self._driver, '_get_available_capacity') \ - as mock_get_available_capacity: - drv = self._driver - - df_size = 2620544 - df_avail = 0 - mock_get_available_capacity.return_value = (df_avail, df_size) - - drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] - - self.assertEqual(self.TEST_QUOBYTE_VOLUME, - drv._find_share(self._simple_volume())) - - # The current implementation does not call _get_available_capacity. - # Future ones might do and therefore we mocked it. - self.assertGreaterEqual(mock_get_available_capacity.call_count, 0) - - def _simple_volume(self, **kwargs): - updates = {'id': self.VOLUME_UUID, - 'provider_location': self.TEST_QUOBYTE_VOLUME, - 'display_name': 'volume-%s' % self.VOLUME_UUID, - 'size': 10, - 'status': 'available'} - - updates.update(kwargs) - if 'display_name' not in updates: - updates['display_name'] = 'volume-%s' % updates['id'] - - return fake_volume.fake_volume_obj(self.context, **updates) - - def test_create_sparsed_volume(self): - drv = self._driver - volume = self._simple_volume() - - drv._create_sparsed_file = mock.Mock() - drv._set_rw_permissions_for_all = mock.Mock() - - drv._do_create_volume(volume) - drv._create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY) - drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) - - def test_create_nonsparsed_volume(self): - drv = self._driver - volume = self._simple_volume() - - old_value = self._configuration.quobyte_sparsed_volumes - self._configuration.quobyte_sparsed_volumes = False - - drv._create_regular_file = mock.Mock() - drv._set_rw_permissions_for_all = mock.Mock() - - drv._do_create_volume(volume) - drv._create_regular_file.assert_called_once_with(mock.ANY, mock.ANY) - drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) - - self._configuration.quobyte_sparsed_volumes = old_value - - def test_create_qcow2_volume(self): - drv = self._driver - - volume = self._simple_volume() - old_value = self._configuration.quobyte_qcow2_volumes - self._configuration.quobyte_qcow2_volumes = True - - drv._execute = mock.Mock() - - hashed = drv._get_hash_str(volume['provider_location']) - path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, - hashed, - self.VOLUME_UUID) - - drv._do_create_volume(volume) - - assert_calls = [mock.call('qemu-img', 'create', '-f', 'qcow2', - '-o', 'preallocation=metadata', path, - str(volume['size'] * units.Gi), - run_as_root=self._driver._execute_as_root), - mock.call('chmod', 'ugo+rw', path, - run_as_root=self._driver._execute_as_root)] - drv._execute.assert_has_calls(assert_calls) - - self._configuration.quobyte_qcow2_volumes = old_value - - def test_create_volume_should_ensure_quobyte_mounted(self): - """create_volume ensures shares provided in config are mounted.""" - drv = self._driver - - drv.LOG = mock.Mock() - drv._find_share = mock.Mock() - drv._find_share.return_value = self.TEST_QUOBYTE_VOLUME - drv._do_create_volume = mock.Mock() - drv._ensure_shares_mounted = mock.Mock() - - volume = self._simple_volume(size=self.TEST_SIZE_IN_GB) - drv.create_volume(volume) - - drv._find_share.assert_called_once_with(mock.ANY) - drv._do_create_volume.assert_called_once_with(volume) - drv._ensure_shares_mounted.assert_called_once_with() - - def test_create_volume_should_return_provider_location(self): - """create_volume should return provider_location with found share.""" - drv = self._driver - - drv.LOG = mock.Mock() - drv._ensure_shares_mounted = mock.Mock() - drv._do_create_volume = mock.Mock() - drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) - - volume = self._simple_volume(size=self.TEST_SIZE_IN_GB) - result = drv.create_volume(volume) - self.assertEqual(self.TEST_QUOBYTE_VOLUME, result['provider_location']) - - drv._do_create_volume.assert_called_once_with(volume) - drv._ensure_shares_mounted.assert_called_once_with() - drv._find_share.assert_called_once_with(volume) - - @mock.patch('oslo_utils.fileutils.delete_if_exists') - def test_delete_volume(self, mock_delete_if_exists): - volume = self._simple_volume() - volume_filename = 'volume-%s' % self.VOLUME_UUID - volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename) - info_file = volume_path + '.info' - - with mock.patch.object(self._driver, '_ensure_share_mounted') as \ - mock_ensure_share_mounted, \ - mock.patch.object(self._driver, '_local_volume_dir') as \ - mock_local_volume_dir, \ - mock.patch.object(self._driver, - 'get_active_image_from_info') as \ - mock_active_image_from_info, \ - mock.patch.object(self._driver, '_execute') as \ - mock_execute, \ - mock.patch.object(self._driver, '_local_path_volume') as \ - mock_local_path_volume, \ - mock.patch.object(self._driver, '_local_path_volume_info') as \ - mock_local_path_volume_info: - mock_local_volume_dir.return_value = self.TEST_MNT_POINT - mock_active_image_from_info.return_value = volume_filename - mock_local_path_volume.return_value = volume_path - mock_local_path_volume_info.return_value = info_file - - self._driver.delete_volume(volume) - - mock_ensure_share_mounted.assert_called_once_with( - volume['provider_location']) - mock_local_volume_dir.assert_called_once_with(volume) - mock_active_image_from_info.assert_called_once_with(volume) - mock_execute.assert_called_once_with('rm', '-f', volume_path, - run_as_root= - self._driver._execute_as_root) - mock_local_path_volume_info.assert_called_once_with(volume) - mock_local_path_volume.assert_called_once_with(volume) - mock_delete_if_exists.assert_any_call(volume_path) - mock_delete_if_exists.assert_any_call(info_file) - - def test_delete_should_ensure_share_mounted(self): - """delete_volume should ensure that corresponding share is mounted.""" - drv = self._driver - - drv._execute = mock.Mock() - - volume = self._simple_volume(display_name='volume-123') - - drv._ensure_share_mounted = mock.Mock() - - drv.delete_volume(volume) - - (drv._ensure_share_mounted. - assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) - drv._execute.assert_called_once_with('rm', '-f', - mock.ANY, - run_as_root=False) - - def test_delete_should_not_delete_if_provider_location_not_provided(self): - """delete_volume shouldn't delete if provider_location missed.""" - drv = self._driver - - drv._ensure_share_mounted = mock.Mock() - drv._execute = mock.Mock() - - volume = self._simple_volume(display_name='volume-123', - provider_location=None) - - drv.delete_volume(volume) - - drv._ensure_share_mounted.assert_not_called() - drv._execute.assert_not_called() - - def test_extend_volume(self): - drv = self._driver - - volume = self._simple_volume() - - volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, - drv._get_hash_str( - self.TEST_QUOBYTE_VOLUME), - self.VOLUME_UUID) - - qemu_img_info_output = """image: volume-%s - file format: qcow2 - virtual size: 1.0G (1073741824 bytes) - disk size: 473K - """ % self.VOLUME_UUID - - img_info = imageutils.QemuImgInfo(qemu_img_info_output) - - image_utils.qemu_img_info = mock.Mock(return_value=img_info) - image_utils.resize_image = mock.Mock() - - drv.extend_volume(volume, 3) - - image_utils.qemu_img_info.assert_called_once_with(volume_path, - run_as_root=False) - image_utils.resize_image.assert_called_once_with(volume_path, 3) - - def test_copy_volume_from_snapshot(self): - drv = self._driver - - # lots of test vars to be prepared at first - dest_volume = self._simple_volume( - id='c1073000-0000-0000-0000-0000000c1073') - src_volume = self._simple_volume() - - vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) - src_vol_path = os.path.join(vol_dir, src_volume['name']) - dest_vol_path = os.path.join(vol_dir, dest_volume['name']) - info_path = os.path.join(vol_dir, src_volume['name']) + '.info' - - snapshot = fake_snapshot.fake_snapshot_obj( - self.context, - volume_name=src_volume.name, - display_name='clone-snap-%s' % src_volume.id, - size=src_volume.size, - volume_size=src_volume.size, - volume_id=src_volume.id, - id=self.SNAP_UUID) - snapshot.volume = src_volume - - snap_file = dest_volume['name'] + '.' + snapshot['id'] - snap_path = os.path.join(vol_dir, snap_file) - - size = dest_volume['size'] - - qemu_img_output = """image: %s - file format: raw - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - backing file: %s - """ % (snap_file, src_volume['name']) - img_info = imageutils.QemuImgInfo(qemu_img_output) - - # mocking and testing starts here - image_utils.convert_image = mock.Mock() - drv._read_info_file = mock.Mock(return_value= - {'active': snap_file, - snapshot['id']: snap_file}) - image_utils.qemu_img_info = mock.Mock(return_value=img_info) - drv._set_rw_permissions_for_all = mock.Mock() - - drv._copy_volume_from_snapshot(snapshot, dest_volume, size) - - drv._read_info_file.assert_called_once_with(info_path) - image_utils.qemu_img_info.assert_called_once_with(snap_path, - run_as_root=False) - (image_utils.convert_image. - assert_called_once_with(src_vol_path, - dest_vol_path, - 'raw', - run_as_root=self._driver._execute_as_root)) - drv._set_rw_permissions_for_all.assert_called_once_with(dest_vol_path) - - def test_create_volume_from_snapshot_status_not_available(self): - """Expect an error when the snapshot's status is not 'available'.""" - drv = self._driver - - src_volume = self._simple_volume() - - snap_ref = fake_snapshot.fake_snapshot_obj( - self.context, - volume_name=src_volume.name, - display_name='clone-snap-%s' % src_volume.id, - volume_size=src_volume.size, - volume_id=src_volume.id, - id=self.SNAP_UUID, - status='error') - snap_ref.volume = src_volume - - new_volume = self._simple_volume(size=snap_ref.volume_size) - - self.assertRaises(exception.InvalidSnapshot, - drv.create_volume_from_snapshot, - new_volume, - snap_ref) - - def test_create_volume_from_snapshot(self): - drv = self._driver - - src_volume = self._simple_volume() - - snap_ref = fake_snapshot.fake_snapshot_obj( - self.context, - volume_name=src_volume.name, - display_name='clone-snap-%s' % src_volume.id, - volume_size=src_volume.size, - volume_id=src_volume.id, - id=self.SNAP_UUID, - status='available') - snap_ref.volume = src_volume - - new_volume = self._simple_volume(size=snap_ref.volume_size) - - drv._ensure_shares_mounted = mock.Mock() - drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) - drv._do_create_volume = mock.Mock() - drv._copy_volume_from_snapshot = mock.Mock() - - drv.create_volume_from_snapshot(new_volume, snap_ref) - - drv._ensure_shares_mounted.assert_called_once_with() - drv._find_share.assert_called_once_with(new_volume) - drv._do_create_volume.assert_called_once_with(new_volume) - (drv._copy_volume_from_snapshot. - assert_called_once_with(snap_ref, new_volume, new_volume['size'])) - - def test_initialize_connection(self): - drv = self._driver - - volume = self._simple_volume() - vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, - drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) - vol_path = os.path.join(vol_dir, volume['name']) - - qemu_img_output = """image: %s - file format: raw - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - """ % volume['name'] - img_info = imageutils.QemuImgInfo(qemu_img_output) - - drv.get_active_image_from_info = mock.Mock(return_value=volume['name']) - image_utils.qemu_img_info = mock.Mock(return_value=img_info) - - conn_info = drv.initialize_connection(volume, None) - - drv.get_active_image_from_info.assert_called_once_with(volume) - image_utils.qemu_img_info.assert_called_once_with(vol_path, - run_as_root=False) - - self.assertEqual('raw', conn_info['data']['format']) - self.assertEqual('quobyte', conn_info['driver_volume_type']) - self.assertEqual(volume['name'], conn_info['data']['name']) - self.assertEqual(self.TEST_MNT_POINT_BASE, - conn_info['mount_point_base']) - - def test_copy_volume_to_image_raw_image(self): - drv = self._driver - - volume = self._simple_volume() - volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) - image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} - - with mock.patch.object(drv, 'get_active_image_from_info') as \ - mock_get_active_image_from_info, \ - mock.patch.object(drv, '_local_volume_dir') as \ - mock_local_volume_dir, \ - mock.patch.object(image_utils, 'qemu_img_info') as \ - mock_qemu_img_info, \ - mock.patch.object(image_utils, 'upload_volume') as \ - mock_upload_volume, \ - mock.patch.object(image_utils, 'create_temporary_file') as \ - mock_create_temporary_file: - mock_get_active_image_from_info.return_value = volume['name'] - - mock_local_volume_dir.return_value = self.TEST_MNT_POINT - - mock_create_temporary_file.return_value = self.TEST_TMP_FILE - - qemu_img_output = """image: %s - file format: raw - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - """ % volume['name'] - img_info = imageutils.QemuImgInfo(qemu_img_output) - mock_qemu_img_info.return_value = img_info - - upload_path = volume_path - - drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) - - mock_get_active_image_from_info.assert_called_once_with(volume) - mock_local_volume_dir.assert_called_once_with(volume) - mock_qemu_img_info.assert_called_once_with(volume_path, - run_as_root=False) - mock_upload_volume.assert_called_once_with( - mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False) - self.assertTrue(mock_create_temporary_file.called) - - def test_copy_volume_to_image_qcow2_image(self): - """Upload a qcow2 image file which has to be converted to raw first.""" - drv = self._driver - - volume = self._simple_volume() - volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) - image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} - - with mock.patch.object(drv, 'get_active_image_from_info') as \ - mock_get_active_image_from_info, \ - mock.patch.object(drv, '_local_volume_dir') as \ - mock_local_volume_dir, \ - mock.patch.object(image_utils, 'qemu_img_info') as \ - mock_qemu_img_info, \ - mock.patch.object(image_utils, 'convert_image') as \ - mock_convert_image, \ - mock.patch.object(image_utils, 'upload_volume') as \ - mock_upload_volume, \ - mock.patch.object(image_utils, 'create_temporary_file') as \ - mock_create_temporary_file: - mock_get_active_image_from_info.return_value = volume['name'] - - mock_local_volume_dir.return_value = self.TEST_MNT_POINT - - mock_create_temporary_file.return_value = self.TEST_TMP_FILE - - qemu_img_output = """image: %s - file format: qcow2 - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - """ % volume['name'] - img_info = imageutils.QemuImgInfo(qemu_img_output) - mock_qemu_img_info.return_value = img_info - - upload_path = self.TEST_TMP_FILE - - drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) - - mock_get_active_image_from_info.assert_called_once_with(volume) - mock_local_volume_dir.assert_called_with(volume) - mock_qemu_img_info.assert_called_once_with(volume_path, - run_as_root=False) - mock_convert_image.assert_called_once_with( - volume_path, upload_path, 'raw', run_as_root=False) - mock_upload_volume.assert_called_once_with( - mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False) - self.assertTrue(mock_create_temporary_file.called) - - def test_copy_volume_to_image_snapshot_exists(self): - """Upload an active snapshot which has to be converted to raw first.""" - drv = self._driver - - volume = self._simple_volume() - volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID) - volume_filename = 'volume-%s' % self.VOLUME_UUID - image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} - - with mock.patch.object(drv, 'get_active_image_from_info') as \ - mock_get_active_image_from_info, \ - mock.patch.object(drv, '_local_volume_dir') as \ - mock_local_volume_dir, \ - mock.patch.object(image_utils, 'qemu_img_info') as \ - mock_qemu_img_info, \ - mock.patch.object(image_utils, 'convert_image') as \ - mock_convert_image, \ - mock.patch.object(image_utils, 'upload_volume') as \ - mock_upload_volume, \ - mock.patch.object(image_utils, 'create_temporary_file') as \ - mock_create_temporary_file: - mock_get_active_image_from_info.return_value = volume['name'] - - mock_local_volume_dir.return_value = self.TEST_MNT_POINT - - mock_create_temporary_file.return_value = self.TEST_TMP_FILE - - qemu_img_output = """image: volume-%s.%s - file format: qcow2 - virtual size: 1.0G (1073741824 bytes) - disk size: 173K - backing file: %s - """ % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename) - img_info = imageutils.QemuImgInfo(qemu_img_output) - mock_qemu_img_info.return_value = img_info - - upload_path = self.TEST_TMP_FILE - - drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) - - mock_get_active_image_from_info.assert_called_once_with(volume) - mock_local_volume_dir.assert_called_with(volume) - mock_qemu_img_info.assert_called_once_with(volume_path, - run_as_root=False) - mock_convert_image.assert_called_once_with( - volume_path, upload_path, 'raw', run_as_root=False) - mock_upload_volume.assert_called_once_with( - mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False) - self.assertTrue(mock_create_temporary_file.called) - - def test_set_nas_security_options_default(self): - drv = self._driver - self.assertEqual("true", drv.configuration.nas_secure_file_operations) - self.assertEqual("true", - drv.configuration.nas_secure_file_permissions) - self.assertFalse(drv._execute_as_root) - - def test_set_nas_security_options_insecure(self): - drv = self._driver - drv.configuration.nas_secure_file_operations = "false" - drv.configuration.nas_secure_file_permissions = "false" - - drv.set_nas_security_options(is_new_cinder_install=True) - - self.assertEqual("false", - drv.configuration.nas_secure_file_operations) - self.assertEqual("false", - drv.configuration.nas_secure_file_permissions) - self.assertTrue(drv._execute_as_root) - - def test_set_nas_security_options_explicitly_secure(self): - drv = self._driver - drv.configuration.nas_secure_file_operations = "true" - drv.configuration.nas_secure_file_permissions = "true" - - drv.set_nas_security_options(is_new_cinder_install=True) - - self.assertEqual("true", - drv.configuration.nas_secure_file_operations) - self.assertEqual("true", - drv.configuration.nas_secure_file_permissions) - self.assertFalse(drv._execute_as_root) - - @mock.patch.object(psutil, "disk_partitions") - @mock.patch.object(os, "stat") - def test_validate_volume_all_good(self, stat_mock, part_mock): - part_mock.return_value = self.get_mock_partitions() - drv = self._driver - - def statMockCall(*args): - if args[0] == self.TEST_MNT_POINT: - stat_result = mock.Mock() - stat_result.st_size = 0 - return stat_result - return os.stat(args) - stat_mock.side_effect = statMockCall - - drv._validate_volume(self.TEST_MNT_POINT) - - stat_mock.assert_called_once_with(self.TEST_MNT_POINT) - part_mock.assert_called_once_with(all=True) - - @mock.patch.object(psutil, "disk_partitions") - @mock.patch.object(os, "stat") - def test_validate_volume_mount_not_working(self, stat_mock, part_mock): - part_mock.return_value = self.get_mock_partitions() - drv = self._driver - - def statMockCall(*args): - if args[0] == self.TEST_MNT_POINT: - raise exception.VolumeDriverException() - stat_mock.side_effect = [statMockCall, os.stat] - - self.assertRaises( - exception.VolumeDriverException, - drv._validate_volume, - self.TEST_MNT_POINT) - stat_mock.assert_called_once_with(self.TEST_MNT_POINT) - part_mock.assert_called_once_with(all=True) - - @mock.patch.object(psutil, "disk_partitions") - def test_validate_volume_no_mtab_entry(self, part_mock): - part_mock.return_value = [] # no quobyte@ devices - msg = ("Volume driver reported an error: " - "No matching Quobyte mount entry for %(mpt)s" - " could be found for validation in partition list." - % {'mpt': self.TEST_MNT_POINT}) - - self.assertRaisesAndMessageMatches( - exception.VolumeDriverException, - msg, - self._driver._validate_volume, - self.TEST_MNT_POINT) - - @mock.patch.object(psutil, "disk_partitions") - def test_validate_volume_wrong_mount_type(self, part_mock): - mypart = mock.Mock() - mypart.device = "not-quobyte" - mypart.mountpoint = self.TEST_MNT_POINT - part_mock.return_value = [mypart] - msg = ("Volume driver reported an error: " - "The mount %(mpt)s is not a valid" - " Quobyte volume according to partition list." - % {'mpt': self.TEST_MNT_POINT}) - drv = self._driver - - self.assertRaisesAndMessageMatches( - exception.VolumeDriverException, - msg, - drv._validate_volume, - self.TEST_MNT_POINT) - part_mock.assert_called_once_with(all=True) - - @mock.patch.object(psutil, "disk_partitions") - def test_validate_volume_stale_mount(self, part_mock): - part_mock.return_value = self.get_mock_partitions() - drv = self._driver - - # As this uses a local fs the dir size is >0, raising an exception - self.assertRaises( - exception.VolumeDriverException, - drv._validate_volume, - self.TEST_MNT_POINT) diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py deleted file mode 100644 index b7481bdc3..000000000 --- a/cinder/tests/unit/volume/drivers/test_rbd.py +++ /dev/null @@ -1,1993 +0,0 @@ - -# Copyright 2012 Josh Durgin -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import math -import os -import tempfile - -import mock -from oslo_utils import imageutils -from oslo_utils import units - -from cinder import context -from cinder import exception -import cinder.image.glance -from cinder.image import image_utils -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils -from cinder.tests.unit.volume import test_driver -from cinder.volume import configuration as conf -import cinder.volume.drivers.rbd as driver -from cinder.volume.flows.manager import create_volume - - -# This is used to collect raised exceptions so that tests may check what was -# raised. -# NOTE: this must be initialised in test setUp(). -RAISED_EXCEPTIONS = [] - - -class MockException(Exception): - - def __init__(self, *args, **kwargs): - RAISED_EXCEPTIONS.append(self.__class__) - - -class MockImageNotFoundException(MockException): - """Used as mock for rbd.ImageNotFound.""" - - -class MockImageBusyException(MockException): - """Used as mock for rbd.ImageBusy.""" - - -class MockImageExistsException(MockException): - """Used as mock for rbd.ImageExists.""" - - -def common_mocks(f): - """Decorator to set mocks common to all tests. - - The point of doing these mocks here is so that we don't accidentally set - mocks that can't/don't get unset. - """ - def _FakeRetrying(wait_func=None, - original_retrying = driver.utils.retrying.Retrying, - *args, **kwargs): - return original_retrying(wait_func=lambda *a, **k: 0, - *args, **kwargs) - - def _common_inner_inner1(inst, *args, **kwargs): - @mock.patch('retrying.Retrying', _FakeRetrying) - @mock.patch.object(driver.RBDDriver, '_get_usage_info') - @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') - @mock.patch('cinder.volume.drivers.rbd.RADOSClient') - @mock.patch('cinder.backup.drivers.ceph.rbd') - @mock.patch('cinder.backup.drivers.ceph.rados') - def _common_inner_inner2(mock_rados, mock_rbd, mock_client, - mock_proxy, mock_usage_info): - inst.mock_rbd = mock_rbd - inst.mock_rados = mock_rados - inst.mock_client = mock_client - inst.mock_proxy = mock_proxy - inst.mock_rbd.RBD.Error = Exception - inst.mock_rados.Error = Exception - inst.mock_rbd.ImageBusy = MockImageBusyException - inst.mock_rbd.ImageNotFound = MockImageNotFoundException - inst.mock_rbd.ImageExists = MockImageExistsException - inst.mock_rbd.InvalidArgument = MockImageNotFoundException - - inst.driver.rbd = inst.mock_rbd - inst.driver.rados = inst.mock_rados - return f(inst, *args, **kwargs) - - return _common_inner_inner2() - - return _common_inner_inner1 - - -CEPH_MON_DUMP = r"""dumped monmap epoch 1 -{ "epoch": 1, - "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", - "modified": "2013-05-22 17:44:56.343618", - "created": "2013-05-22 17:44:56.343618", - "mons": [ - { "rank": 0, - "name": "a", - "addr": "[::1]:6789\/0"}, - { "rank": 1, - "name": "b", - "addr": "[::1]:6790\/0"}, - { "rank": 2, - "name": "c", - "addr": "[::1]:6791\/0"}, - { "rank": 3, - "name": "d", - "addr": "127.0.0.1:6792\/0"}, - { "rank": 4, - "name": "e", - "addr": "example.com:6791\/0"}], - "quorum": [ - 0, - 1, - 2]} -""" - - -def mock_driver_configuration(value): - if value == 'max_over_subscription_ratio': - return 1.0 - if value == 'reserved_percentage': - return 0 - return 'RBD' - - -@ddt.ddt -class RBDTestCase(test.TestCase): - - def setUp(self): - global RAISED_EXCEPTIONS - RAISED_EXCEPTIONS = [] - super(RBDTestCase, self).setUp() - - self.cfg = mock.Mock(spec=conf.Configuration) - self.cfg.image_conversion_dir = None - self.cfg.rbd_cluster_name = 'nondefault' - self.cfg.rbd_pool = 'rbd' - self.cfg.rbd_ceph_conf = '/etc/ceph/my_ceph.conf' - self.cfg.rbd_keyring_conf = '/etc/ceph/my_ceph.client.keyring' - self.cfg.rbd_secret_uuid = None - self.cfg.rbd_user = 'cinder' - self.cfg.volume_backend_name = None - self.cfg.volume_dd_blocksize = '1M' - self.cfg.rbd_store_chunk_size = 4 - self.cfg.rados_connection_retries = 3 - self.cfg.rados_connection_interval = 5 - - mock_exec = mock.Mock() - mock_exec.return_value = ('', '') - - self.driver = driver.RBDDriver(execute=mock_exec, - configuration=self.cfg) - self.driver.set_initialized() - - self.context = context.get_admin_context() - - self.volume_a = fake_volume.fake_volume_obj( - self.context, - **{'name': u'volume-0000000a', - 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38', - 'size': 10}) - - self.volume_b = fake_volume.fake_volume_obj( - self.context, - **{'name': u'volume-0000000b', - 'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6', - 'size': 10}) - - self.snapshot = fake_snapshot.fake_snapshot_obj( - self.context, name='snapshot-0000000a') - - self.snapshot_b = fake_snapshot.fake_snapshot_obj( - self.context, - **{'name': u'snapshot-0000000n', - 'expected_attrs': ['volume'], - 'volume': {'id': fake.VOLUME_ID, - 'name': 'cinder-volume', - 'size': 128, - 'host': 'host@fakebackend#fakepool'} - }) - - @ddt.data({'cluster_name': None, 'pool_name': 'rbd'}, - {'cluster_name': 'volumes', 'pool_name': None}) - @ddt.unpack - def test_min_config(self, cluster_name, pool_name): - self.cfg.rbd_cluster_name = cluster_name - self.cfg.rbd_pool = pool_name - - with mock.patch('cinder.volume.drivers.rbd.rados'): - self.assertRaises(exception.InvalidConfigurationValue, - self.driver.check_for_setup_error) - - def test_parse_replication_config_empty(self): - self.driver._parse_replication_configs([]) - self.assertEqual([], self.driver._replication_targets) - - def test_parse_replication_config_missing(self): - """Parsing replication_device without required backend_id.""" - cfg = [{'conf': '/etc/ceph/secondary.conf'}] - self.assertRaises(exception.InvalidConfigurationValue, - self.driver._parse_replication_configs, - cfg) - - def test_parse_replication_config_defaults(self): - """Parsing replication_device with default conf and user.""" - cfg = [{'backend_id': 'secondary-backend'}] - expected = [{'name': 'secondary-backend', - 'conf': '/etc/ceph/secondary-backend.conf', - 'user': 'cinder'}] - self.driver._parse_replication_configs(cfg) - self.assertEqual(expected, self.driver._replication_targets) - - @ddt.data(1, 2) - def test_parse_replication_config(self, num_targets): - cfg = [{'backend_id': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}, - {'backend_id': 'tertiary-backend'}] - expected = [{'name': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}, - {'name': 'tertiary-backend', - 'conf': '/etc/ceph/tertiary-backend.conf', - 'user': 'cinder'}] - self.driver._parse_replication_configs(cfg[:num_targets]) - self.assertEqual(expected[:num_targets], - self.driver._replication_targets) - - def test_do_setup_replication_disabled(self): - with mock.patch.object(self.driver.configuration, 'safe_get', - return_value=None): - self.driver.do_setup(self.context) - self.assertFalse(self.driver._is_replication_enabled) - self.assertEqual([], self.driver._replication_targets) - self.assertEqual([], self.driver._target_names) - self.assertEqual({'name': self.cfg.rbd_cluster_name, - 'conf': self.cfg.rbd_ceph_conf, - 'user': self.cfg.rbd_user}, - self.driver._active_config) - - def test_do_setup_replication(self): - cfg = [{'backend_id': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}] - expected = [{'name': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}] - - with mock.patch.object(self.driver.configuration, 'safe_get', - return_value=cfg): - self.driver.do_setup(self.context) - self.assertTrue(self.driver._is_replication_enabled) - self.assertEqual(expected, self.driver._replication_targets) - self.assertEqual({'name': self.cfg.rbd_cluster_name, - 'conf': self.cfg.rbd_ceph_conf, - 'user': self.cfg.rbd_user}, - self.driver._active_config) - - def test_do_setup_replication_failed_over(self): - cfg = [{'backend_id': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}] - expected = [{'name': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}] - self.driver._active_backend_id = 'secondary-backend' - - with mock.patch.object(self.driver.configuration, 'safe_get', - return_value=cfg): - self.driver.do_setup(self.context) - self.assertTrue(self.driver._is_replication_enabled) - self.assertEqual(expected, self.driver._replication_targets) - self.assertEqual(expected[0], self.driver._active_config) - - def test_do_setup_replication_failed_over_unknown(self): - cfg = [{'backend_id': 'secondary-backend', - 'conf': 'foo', - 'user': 'bar'}] - self.driver._active_backend_id = 'unknown-backend' - - with mock.patch.object(self.driver.configuration, 'safe_get', - return_value=cfg): - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.do_setup, - self.context) - - @mock.patch.object(driver.RBDDriver, '_enable_replication', - return_value=mock.sentinel.volume_update) - def test_enable_replication_if_needed_replicated_volume(self, mock_enable): - self.volume_a.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - res = self.driver._enable_replication_if_needed(self.volume_a) - self.assertEqual(mock.sentinel.volume_update, res) - mock_enable.assert_called_once_with(self.volume_a) - - @ddt.data(False, True) - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_enable_replication_if_needed_non_replicated(self, enabled, - mock_enable): - self.driver._is_replication_enabled = enabled - res = self.driver._enable_replication_if_needed(self.volume_a) - if enabled: - expect = {'replication_status': fields.ReplicationStatus.DISABLED} - else: - expect = None - self.assertEqual(expect, res) - mock_enable.assert_not_called() - - @ddt.data(True, False) - @common_mocks - def test_enable_replication(self, journaling_enabled): - """Test _enable_replication method. - - We want to confirm that if the Ceph backend has globally enabled - journaling we don't try to enable it again and we properly indicate - with our return value that it was already enabled. - """ - journaling_feat = 1 - self.driver.rbd.RBD_FEATURE_JOURNALING = journaling_feat - image = self.mock_proxy.return_value.__enter__.return_value - if journaling_enabled: - image.features.return_value = journaling_feat - else: - image.features.return_value = 0 - - enabled = str(journaling_enabled).lower() - expected = { - 'replication_driver_data': '{"had_journaling":%s}' % enabled, - 'replication_status': 'enabled', - } - - res = self.driver._enable_replication(self.volume_a) - self.assertEqual(expected, res) - - if journaling_enabled: - image.update_features.assert_not_called() - else: - image.update_features.assert_called_once_with(journaling_feat, - True) - image.mirror_image_enable.assert_called_once_with() - - @ddt.data('true', 'false') - @common_mocks - def test_disable_replication(self, had_journaling): - driver_data = '{"had_journaling": %s}' % had_journaling - self.volume_a.replication_driver_data = driver_data - image = self.mock_proxy.return_value.__enter__.return_value - - res = self.driver._disable_replication(self.volume_a) - expected = {'replication_status': fields.ReplicationStatus.DISABLED, - 'replication_driver_data': None} - self.assertEqual(expected, res) - image.mirror_image_disable.assert_called_once_with(False) - - if had_journaling == 'true': - image.update_features.assert_not_called() - else: - image.update_features.assert_called_once_with( - self.driver.rbd.RBD_FEATURE_JOURNALING, False) - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_volume(self, mock_enable_repl): - client = self.mock_client.return_value - client.__enter__.return_value = client - - res = self.driver.create_volume(self.volume_a) - - self.assertIsNone(res) - chunk_size = self.cfg.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) - args = [client.ioctx, str(self.volume_a.name), - self.volume_a.size * units.Gi, order] - kwargs = {'old_format': False, - 'features': client.features} - self.mock_rbd.RBD.return_value.create.assert_called_once_with( - *args, **kwargs) - client.__enter__.assert_called_once_with() - client.__exit__.assert_called_once_with(None, None, None) - mock_enable_repl.assert_not_called() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_volume_replicated(self, mock_enable_repl): - self.volume_a.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - - client = self.mock_client.return_value - client.__enter__.return_value = client - - expected_update = { - 'replication_status': 'enabled', - 'replication_driver_data': '{"had_journaling": false}' - } - mock_enable_repl.return_value = expected_update - - res = self.driver.create_volume(self.volume_a) - self.assertEqual(expected_update, res) - mock_enable_repl.assert_called_once_with(self.volume_a) - - chunk_size = self.cfg.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) - self.mock_rbd.RBD.return_value.create.assert_called_once_with( - client.ioctx, self.volume_a.name, self.volume_a.size * units.Gi, - order, old_format=False, features=client.features) - - client.__enter__.assert_called_once_with() - client.__exit__.assert_called_once_with(None, None, None) - - @common_mocks - def test_create_encrypted_volume(self): - self.volume_a.encryption_key_id = \ - '00000000-0000-0000-0000-000000000000' - self.assertRaises(exception.VolumeDriverException, - self.driver.create_volume, - self.volume_a) - - @common_mocks - def test_manage_existing_get_size(self): - with mock.patch.object(self.driver.rbd.Image(), 'size') as \ - mock_rbd_image_size: - with mock.patch.object(self.driver.rbd.Image(), 'close') \ - as mock_rbd_image_close: - mock_rbd_image_size.return_value = 2 * units.Gi - existing_ref = {'source-name': self.volume_a.name} - return_size = self.driver.manage_existing_get_size( - self.volume_a, - existing_ref) - self.assertEqual(2, return_size) - mock_rbd_image_size.assert_called_once_with() - mock_rbd_image_close.assert_called_once_with() - - @common_mocks - def test_manage_existing_get_non_integer_size(self): - rbd_image = self.driver.rbd.Image.return_value - rbd_image.size.return_value = int(1.75 * units.Gi) - existing_ref = {'source-name': self.volume_a.name} - return_size = self.driver.manage_existing_get_size(self.volume_a, - existing_ref) - self.assertEqual(2, return_size) - rbd_image.size.assert_called_once_with() - rbd_image.close.assert_called_once_with() - - @common_mocks - def test_manage_existing_get_invalid_size(self): - - with mock.patch.object(self.driver.rbd.Image(), 'size') as \ - mock_rbd_image_size: - with mock.patch.object(self.driver.rbd.Image(), 'close') \ - as mock_rbd_image_close: - mock_rbd_image_size.return_value = 'abcd' - existing_ref = {'source-name': self.volume_a.name} - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing_get_size, - self.volume_a, existing_ref) - - mock_rbd_image_size.assert_called_once_with() - mock_rbd_image_close.assert_called_once_with() - - @common_mocks - def test_manage_existing(self): - client = self.mock_client.return_value - client.__enter__.return_value = client - - with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ - mock_rbd_image_rename: - exist_volume = 'vol-exist' - existing_ref = {'source-name': exist_volume} - mock_rbd_image_rename.return_value = 0 - self.driver.manage_existing(self.volume_a, existing_ref) - mock_rbd_image_rename.assert_called_with( - client.ioctx, - exist_volume, - self.volume_a.name) - - @common_mocks - def test_manage_existing_with_exist_rbd_image(self): - client = self.mock_client.return_value - client.__enter__.return_value = client - - self.mock_rbd.RBD.return_value.rename.side_effect = ( - MockImageExistsException) - - exist_volume = 'vol-exist' - existing_ref = {'source-name': exist_volume} - self.assertRaises(self.mock_rbd.ImageExists, - self.driver.manage_existing, - self.volume_a, existing_ref) - - # Make sure the exception was raised - self.assertEqual(RAISED_EXCEPTIONS, - [self.mock_rbd.ImageExists]) - - @common_mocks - def test_manage_existing_with_invalid_rbd_image(self): - self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound - - invalid_volume = 'vol-invalid' - invalid_ref = {'source-name': invalid_volume} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_get_size, - self.volume_a, invalid_ref) - # Make sure the exception was raised - self.assertEqual([self.mock_rbd.ImageNotFound], - RAISED_EXCEPTIONS) - - @common_mocks - def test_delete_backup_snaps(self): - self.driver.rbd.Image.remove_snap = mock.Mock() - with mock.patch.object(self.driver, '_get_backup_snaps') as \ - mock_get_backup_snaps: - mock_get_backup_snaps.return_value = [{'name': 'snap1'}] - rbd_image = self.driver.rbd.Image() - self.driver._delete_backup_snaps(rbd_image) - mock_get_backup_snaps.assert_called_once_with(rbd_image) - self.assertTrue( - self.driver.rbd.Image.return_value.remove_snap.called) - - @common_mocks - def test_delete_volume(self): - client = self.mock_client.return_value - - self.driver.rbd.Image.return_value.list_snaps.return_value = [] - - with mock.patch.object(self.driver, '_get_clone_info') as \ - mock_get_clone_info: - with mock.patch.object(self.driver, '_delete_backup_snaps') as \ - mock_delete_backup_snaps: - mock_get_clone_info.return_value = (None, None, None) - - self.driver.delete_volume(self.volume_a) - - mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, - self.volume_a.name, - None) - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) - client.__enter__.assert_called_once_with() - client.__exit__.assert_called_once_with(None, None, None) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) - self.assertFalse( - self.driver.rbd.Image.return_value.unprotect_snap.called) - self.assertEqual( - 1, self.driver.rbd.RBD.return_value.remove.call_count) - - @common_mocks - def delete_volume_not_found(self): - self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound - self.assertIsNone(self.driver.delete_volume(self.volume_a)) - self.mock_rbd.Image.assert_called_once_with() - # Make sure the exception was raised - self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) - - @common_mocks - def test_delete_busy_volume(self): - self.mock_rbd.Image.return_value.list_snaps.return_value = [] - - self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageBusy) - - with mock.patch.object(self.driver, '_get_clone_info') as \ - mock_get_clone_info: - mock_get_clone_info.return_value = (None, None, None) - with mock.patch.object(self.driver, '_delete_backup_snaps') as \ - mock_delete_backup_snaps: - with mock.patch.object(driver, 'RADOSClient') as \ - mock_rados_client: - self.assertRaises(exception.VolumeIsBusy, - self.driver.delete_volume, self.volume_a) - - mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, - self.volume_a.name, - None) - (self.mock_rbd.Image.return_value.list_snaps - .assert_called_once_with()) - mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) - self.assertFalse( - self.mock_rbd.Image.return_value.unprotect_snap.called) - self.assertEqual( - 3, self.mock_rbd.RBD.return_value.remove.call_count) - self.assertEqual(3, len(RAISED_EXCEPTIONS)) - # Make sure the exception was raised - self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) - - @common_mocks - def test_delete_volume_not_found(self): - self.mock_rbd.Image.return_value.list_snaps.return_value = [] - - self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageNotFound) - - with mock.patch.object(self.driver, '_get_clone_info') as \ - mock_get_clone_info: - mock_get_clone_info.return_value = (None, None, None) - with mock.patch.object(self.driver, '_delete_backup_snaps') as \ - mock_delete_backup_snaps: - with mock.patch.object(driver, 'RADOSClient') as \ - mock_rados_client: - self.assertIsNone(self.driver.delete_volume(self.volume_a)) - mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, - self.volume_a.name, - None) - (self.mock_rbd.Image.return_value.list_snaps - .assert_called_once_with()) - mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) - self.assertFalse( - self.mock_rbd.Image.return_value.unprotect_snap.called) - self.assertEqual( - 1, self.mock_rbd.RBD.return_value.remove.call_count) - # Make sure the exception was raised - self.assertEqual(RAISED_EXCEPTIONS, - [self.mock_rbd.ImageNotFound]) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_create_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - - self.driver.create_snapshot(self.snapshot) - - args = [str(self.snapshot.name)] - proxy.create_snap.assert_called_with(*args) - proxy.protect_snap.assert_called_with(*args) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_delete_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - - self.driver.delete_snapshot(self.snapshot) - - proxy.remove_snap.assert_called_with(self.snapshot.name) - proxy.unprotect_snap.assert_called_with(self.snapshot.name) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_delete_notfound_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - - proxy.unprotect_snap.side_effect = ( - self.mock_rbd.ImageNotFound) - - self.driver.delete_snapshot(self.snapshot) - - proxy.remove_snap.assert_called_with(self.snapshot.name) - proxy.unprotect_snap.assert_called_with(self.snapshot.name) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_delete_notfound_on_remove_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - - proxy.remove_snap.side_effect = ( - self.mock_rbd.ImageNotFound) - - self.driver.delete_snapshot(self.snapshot) - - proxy.remove_snap.assert_called_with(self.snapshot.name) - proxy.unprotect_snap.assert_called_with(self.snapshot.name) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_delete_unprotected_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - proxy.unprotect_snap.side_effect = self.mock_rbd.InvalidArgument - - self.driver.delete_snapshot(self.snapshot) - self.assertTrue(proxy.unprotect_snap.called) - self.assertTrue(proxy.remove_snap.called) - - @common_mocks - @mock.patch('cinder.objects.Volume.get_by_id') - def test_delete_busy_snapshot(self, volume_get_by_id): - volume_get_by_id.return_value = self.volume_a - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - - proxy.unprotect_snap.side_effect = ( - self.mock_rbd.ImageBusy) - - with mock.patch.object(self.driver, '_get_children_info') as \ - mock_get_children_info: - mock_get_children_info.return_value = [('pool', 'volume2')] - - with mock.patch.object(driver, 'LOG') as \ - mock_log: - - self.assertRaises(exception.SnapshotIsBusy, - self.driver.delete_snapshot, - self.snapshot) - - mock_get_children_info.assert_called_once_with( - proxy, - self.snapshot.name) - - self.assertTrue(mock_log.info.called) - self.assertTrue(proxy.unprotect_snap.called) - self.assertFalse(proxy.remove_snap.called) - - @common_mocks - def test_get_children_info(self): - volume = self.mock_proxy - volume.set_snap = mock.Mock() - volume.list_children = mock.Mock() - list_children = [('pool', 'volume2')] - volume.list_children.return_value = list_children - - info = self.driver._get_children_info(volume, - self.snapshot['name']) - - self.assertEqual(list_children, info) - - @common_mocks - def test_get_clone_info(self): - volume = self.mock_rbd.Image() - volume.set_snap = mock.Mock() - volume.parent_info = mock.Mock() - parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) - volume.parent_info.return_value = parent_info - - info = self.driver._get_clone_info(volume, self.volume_a.name) - - self.assertEqual(parent_info, info) - - self.assertFalse(volume.set_snap.called) - volume.parent_info.assert_called_once_with() - - @common_mocks - def test_get_clone_info_w_snap(self): - volume = self.mock_rbd.Image() - volume.set_snap = mock.Mock() - volume.parent_info = mock.Mock() - parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) - volume.parent_info.return_value = parent_info - - snapshot = self.mock_rbd.ImageSnapshot() - - info = self.driver._get_clone_info(volume, self.volume_a.name, - snap=snapshot) - - self.assertEqual(parent_info, info) - - self.assertEqual(2, volume.set_snap.call_count) - volume.parent_info.assert_called_once_with() - - @common_mocks - def test_get_clone_info_w_exception(self): - volume = self.mock_rbd.Image() - volume.set_snap = mock.Mock() - volume.parent_info = mock.Mock() - volume.parent_info.side_effect = self.mock_rbd.ImageNotFound - - snapshot = self.mock_rbd.ImageSnapshot() - - info = self.driver._get_clone_info(volume, self.volume_a.name, - snap=snapshot) - - self.assertEqual((None, None, None), info) - - self.assertEqual(2, volume.set_snap.call_count) - volume.parent_info.assert_called_once_with() - # Make sure the exception was raised - self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) - - @common_mocks - def test_get_clone_info_deleted_volume(self): - volume = self.mock_rbd.Image() - volume.set_snap = mock.Mock() - volume.parent_info = mock.Mock() - parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) - volume.parent_info.return_value = parent_info - - info = self.driver._get_clone_info(volume, - "%s.deleted" % (self.volume_a.name)) - - self.assertEqual(parent_info, info) - - self.assertFalse(volume.set_snap.called) - volume.parent_info.assert_called_once_with() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_cloned_volume_same_size(self, mock_enable_repl): - self.cfg.rbd_max_clone_depth = 2 - - with mock.patch.object(self.driver, '_get_clone_depth') as \ - mock_get_clone_depth: - # Try with no flatten required - with mock.patch.object(self.driver, '_resize') as mock_resize: - mock_get_clone_depth.return_value = 1 - - res = self.driver.create_cloned_volume(self.volume_b, - self.volume_a) - - self.assertIsNone(res) - (self.mock_rbd.Image.return_value.create_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.protect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - self.assertEqual( - 1, self.mock_rbd.RBD.return_value.clone.call_count) - self.mock_rbd.Image.return_value.close \ - .assert_called_once_with() - self.assertTrue(mock_get_clone_depth.called) - mock_resize.assert_not_called() - mock_enable_repl.assert_not_called() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_get_clone_depth', return_value=1) - @mock.patch.object(driver.RBDDriver, '_resize') - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_cloned_volume_replicated(self, - mock_enable_repl, - mock_resize, - mock_get_clone_depth): - self.cfg.rbd_max_clone_depth = 2 - self.volume_b.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - - expected_update = { - 'replication_status': 'enabled', - 'replication_driver_data': '{"had_journaling": false}' - } - mock_enable_repl.return_value = expected_update - - res = self.driver.create_cloned_volume(self.volume_b, self.volume_a) - self.assertEqual(expected_update, res) - mock_enable_repl.assert_called_once_with(self.volume_b) - - name = self.volume_b.name - image = self.mock_rbd.Image.return_value - - image.create_snap.assert_called_once_with(name + '.clone_snap') - image.protect_snap.assert_called_once_with(name + '.clone_snap') - self.assertEqual(1, self.mock_rbd.RBD.return_value.clone.call_count) - self.mock_rbd.Image.return_value.close.assert_called_once_with() - mock_get_clone_depth.assert_called_once_with( - self.mock_client().__enter__(), self.volume_a.name) - mock_resize.assert_not_called() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_cloned_volume_different_size(self, mock_enable_repl): - self.cfg.rbd_max_clone_depth = 2 - - with mock.patch.object(self.driver, '_get_clone_depth') as \ - mock_get_clone_depth: - # Try with no flatten required - with mock.patch.object(self.driver, '_resize') as mock_resize: - mock_get_clone_depth.return_value = 1 - - self.volume_b.size = 20 - res = self.driver.create_cloned_volume(self.volume_b, - self.volume_a) - - self.assertIsNone(res) - (self.mock_rbd.Image.return_value.create_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.protect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - self.assertEqual( - 1, self.mock_rbd.RBD.return_value.clone.call_count) - self.mock_rbd.Image.return_value.close \ - .assert_called_once_with() - self.assertTrue(mock_get_clone_depth.called) - self.assertEqual( - 1, mock_resize.call_count) - mock_enable_repl.assert_not_called() - - @common_mocks - def test_create_cloned_volume_different_size_copy_only(self): - self.cfg.rbd_max_clone_depth = 0 - - with mock.patch.object(self.driver, '_get_clone_depth') as \ - mock_get_clone_depth: - # Try with no flatten required - with mock.patch.object(self.driver, '_resize') as mock_resize: - mock_get_clone_depth.return_value = 1 - - self.volume_b.size = 20 - self.driver.create_cloned_volume(self.volume_b, self.volume_a) - - self.assertEqual(1, mock_resize.call_count) - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_cloned_volume_w_flatten(self, mock_enable_repl): - self.cfg.rbd_max_clone_depth = 1 - - with mock.patch.object(self.driver, '_get_clone_info') as \ - mock_get_clone_info: - mock_get_clone_info.return_value = ( - ('fake_pool', self.volume_b.name, - '.'.join((self.volume_b.name, 'clone_snap')))) - with mock.patch.object(self.driver, '_get_clone_depth') as \ - mock_get_clone_depth: - # Try with no flatten required - mock_get_clone_depth.return_value = 1 - - res = self.driver.create_cloned_volume(self.volume_b, - self.volume_a) - - self.assertIsNone(res) - (self.mock_rbd.Image.return_value.create_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.protect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - self.assertEqual( - 1, self.mock_rbd.RBD.return_value.clone.call_count) - (self.mock_rbd.Image.return_value.unprotect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.remove_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - - # We expect the driver to close both volumes, so 2 is expected - self.assertEqual( - 2, self.mock_rbd.Image.return_value.close.call_count) - self.assertTrue(mock_get_clone_depth.called) - mock_enable_repl.assert_not_called() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_create_cloned_volume_w_clone_exception(self, mock_enable_repl): - self.cfg.rbd_max_clone_depth = 2 - self.mock_rbd.RBD.return_value.clone.side_effect = ( - self.mock_rbd.RBD.Error) - with mock.patch.object(self.driver, '_get_clone_depth') as \ - mock_get_clone_depth: - # Try with no flatten required - mock_get_clone_depth.return_value = 1 - - self.assertRaises(self.mock_rbd.RBD.Error, - self.driver.create_cloned_volume, - self.volume_b, self.volume_a) - - (self.mock_rbd.Image.return_value.create_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.protect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - self.assertEqual( - 1, self.mock_rbd.RBD.return_value.clone.call_count) - (self.mock_rbd.Image.return_value.unprotect_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - (self.mock_rbd.Image.return_value.remove_snap - .assert_called_once_with('.'.join( - (self.volume_b.name, 'clone_snap')))) - self.mock_rbd.Image.return_value.close.assert_called_once_with() - mock_enable_repl.assert_not_called() - - @common_mocks - def test_good_locations(self): - locations = ['rbd://fsid/pool/image/snap', - 'rbd://%2F/%2F/%2F/%2F', ] - map(self.driver._parse_location, locations) - - @common_mocks - def test_bad_locations(self): - locations = ['rbd://image', - 'http://path/to/somewhere/else', - 'rbd://image/extra', - 'rbd://image/', - 'rbd://fsid/pool/image/', - 'rbd://fsid/pool/image/snap/', - 'rbd://///', ] - for loc in locations: - self.assertRaises(exception.ImageUnacceptable, - self.driver._parse_location, - loc) - self.assertFalse( - self.driver._is_cloneable(loc, {'disk_format': 'raw'})) - - @common_mocks - def test_cloneable(self): - with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: - mock_get_fsid.return_value = 'abc' - location = 'rbd://abc/pool/image/snap' - info = {'disk_format': 'raw'} - self.assertTrue(self.driver._is_cloneable(location, info)) - self.assertTrue(mock_get_fsid.called) - - @common_mocks - def test_uncloneable_different_fsid(self): - with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: - mock_get_fsid.return_value = 'abc' - location = 'rbd://def/pool/image/snap' - self.assertFalse( - self.driver._is_cloneable(location, {'disk_format': 'raw'})) - self.assertTrue(mock_get_fsid.called) - - @common_mocks - def test_uncloneable_unreadable(self): - with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: - mock_get_fsid.return_value = 'abc' - location = 'rbd://abc/pool/image/snap' - - self.driver.rbd.Error = Exception - self.mock_proxy.side_effect = Exception - - args = [location, {'disk_format': 'raw'}] - self.assertFalse(self.driver._is_cloneable(*args)) - self.assertEqual(1, self.mock_proxy.call_count) - self.assertTrue(mock_get_fsid.called) - - @common_mocks - def test_uncloneable_bad_format(self): - with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: - mock_get_fsid.return_value = 'abc' - location = 'rbd://abc/pool/image/snap' - formats = ['qcow2', 'vmdk', 'vdi'] - for f in formats: - self.assertFalse( - self.driver._is_cloneable(location, {'disk_format': f})) - self.assertTrue(mock_get_fsid.called) - - def _copy_image(self): - with mock.patch.object(tempfile, 'NamedTemporaryFile'): - with mock.patch.object(os.path, 'exists') as mock_exists: - mock_exists.return_value = True - with mock.patch.object(image_utils, 'fetch_to_raw'): - with mock.patch.object(self.driver, 'delete_volume'): - with mock.patch.object(self.driver, '_resize'): - mock_image_service = mock.MagicMock() - args = [None, self.volume_a, - mock_image_service, None] - self.driver.copy_image_to_volume(*args) - - @common_mocks - def test_copy_image_no_volume_tmp(self): - self.cfg.image_conversion_dir = None - self._copy_image() - - @common_mocks - def test_copy_image_volume_tmp(self): - self.cfg.image_conversion_dir = '/var/run/cinder/tmp' - self._copy_image() - - @ddt.data(True, False) - @common_mocks - def test_update_volume_stats(self, replication_enabled): - client = self.mock_client.return_value - client.__enter__.return_value = client - - client.cluster = mock.Mock() - client.cluster.mon_command = mock.Mock() - client.cluster.mon_command.return_value = ( - 0, '{"stats":{"total_bytes":64385286144,' - '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' - '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' - '"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},' - '{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,' - '"max_avail":28987613184,"objects":0}}]}\n', '') - - expected = dict( - volume_backend_name='RBD', - replication_enabled=replication_enabled, - vendor_name='Open Source', - driver_version=self.driver.VERSION, - storage_protocol='ceph', - total_capacity_gb=28.44, - free_capacity_gb=27.0, - reserved_percentage=0, - thin_provisioning_support=True, - provisioned_capacity_gb=0.0, - max_over_subscription_ratio=1.0, - multiattach=False) - - if replication_enabled: - targets = [{'backend_id': 'secondary-backend'}, - {'backend_id': 'tertiary-backend'}] - with mock.patch.object(self.driver.configuration, 'safe_get', - return_value=targets): - self.driver._do_setup_replication() - expected['replication_targets'] = [t['backend_id']for t in targets] - expected['replication_targets'].append('default') - - self.mock_object(self.driver.configuration, 'safe_get', - mock_driver_configuration) - - actual = self.driver.get_volume_stats(True) - client.cluster.mon_command.assert_called_once_with( - '{"prefix":"df", "format":"json"}', '') - self.assertDictEqual(expected, actual) - - @common_mocks - def test_update_volume_stats_error(self): - client = self.mock_client.return_value - client.__enter__.return_value = client - - client.cluster = mock.Mock() - client.cluster.mon_command = mock.Mock() - client.cluster.mon_command.return_value = (22, '', '') - - self.mock_object(self.driver.configuration, 'safe_get', - mock_driver_configuration) - - expected = dict(volume_backend_name='RBD', - replication_enabled=False, - vendor_name='Open Source', - driver_version=self.driver.VERSION, - storage_protocol='ceph', - total_capacity_gb='unknown', - free_capacity_gb='unknown', - reserved_percentage=0, - multiattach=False, - provisioned_capacity_gb=0.0, - max_over_subscription_ratio=1.0, - thin_provisioning_support=True) - - actual = self.driver.get_volume_stats(True) - client.cluster.mon_command.assert_called_once_with( - '{"prefix":"df", "format":"json"}', '') - self.assertDictEqual(expected, actual) - - @common_mocks - def test_get_mon_addrs(self): - with mock.patch.object(self.driver, '_execute') as mock_execute: - mock_execute.return_value = (CEPH_MON_DUMP, '') - hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] - ports = ['6789', '6790', '6791', '6792', '6791'] - self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) - - @common_mocks - def _initialize_connection_helper(self, expected, hosts, ports): - - with mock.patch.object(self.driver, '_get_mon_addrs') as \ - mock_get_mon_addrs: - mock_get_mon_addrs.return_value = (hosts, ports) - actual = self.driver.initialize_connection(self.volume_a, None) - self.assertDictEqual(expected, actual) - self.assertTrue(mock_get_mon_addrs.called) - - @mock.patch.object(cinder.volume.drivers.rbd.RBDDriver, - '_get_keyring_contents') - def test_initialize_connection(self, mock_keyring): - hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] - ports = ['6789', '6790', '6791', '6792', '6791'] - - keyring_data = "[client.cinder]\n key = test\n" - mock_keyring.return_value = keyring_data - - expected = { - 'driver_volume_type': 'rbd', - 'data': { - 'name': '%s/%s' % (self.cfg.rbd_pool, - self.volume_a.name), - 'hosts': hosts, - 'ports': ports, - 'cluster_name': self.cfg.rbd_cluster_name, - 'auth_enabled': True, - 'auth_username': self.cfg.rbd_user, - 'secret_type': 'ceph', - 'secret_uuid': None, - 'volume_id': self.volume_a.id, - 'discard': True, - 'keyring': keyring_data, - } - } - self._initialize_connection_helper(expected, hosts, ports) - - # Check how it will work with empty keyring path - mock_keyring.return_value = None - expected['data']['keyring'] = None - self._initialize_connection_helper(expected, hosts, ports) - - def test__get_keyring_contents_no_config_file(self): - self.cfg.rbd_keyring_conf = '' - self.assertIsNone(self.driver._get_keyring_contents()) - - @mock.patch('os.path.isfile') - def test__get_keyring_contents_read_file(self, mock_isfile): - mock_isfile.return_value = True - keyring_data = "[client.cinder]\n key = test\n" - mockopen = mock.mock_open(read_data=keyring_data) - mockopen.return_value.__exit__ = mock.Mock() - with mock.patch('cinder.volume.drivers.rbd.open', mockopen, - create=True): - self.assertEqual(self.driver._get_keyring_contents(), keyring_data) - - @mock.patch('os.path.isfile') - def test__get_keyring_contents_raise_error(self, mock_isfile): - mock_isfile.return_value = True - mockopen = mock.mock_open() - mockopen.return_value.__exit__ = mock.Mock() - with mock.patch('cinder.volume.drivers.rbd.open', mockopen, - create=True) as mock_keyring_file: - mock_keyring_file.side_effect = IOError - self.assertIsNone(self.driver._get_keyring_contents()) - - @ddt.data({'rbd_chunk_size': 1, 'order': 20}, - {'rbd_chunk_size': 8, 'order': 23}, - {'rbd_chunk_size': 32, 'order': 25}) - @ddt.unpack - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_clone(self, mock_enable_repl, rbd_chunk_size, order): - self.cfg.rbd_store_chunk_size = rbd_chunk_size - src_pool = u'images' - src_image = u'image-name' - src_snap = u'snapshot-name' - - client_stack = [] - - def mock__enter__(inst): - def _inner(): - client_stack.append(inst) - return inst - return _inner - - client = self.mock_client.return_value - # capture both rados client used to perform the clone - client.__enter__.side_effect = mock__enter__(client) - - res = self.driver._clone(self.volume_a, src_pool, src_image, src_snap) - - self.assertEqual({}, res) - - args = [client_stack[0].ioctx, str(src_image), str(src_snap), - client_stack[1].ioctx, str(self.volume_a.name)] - kwargs = {'features': client.features, - 'order': order} - self.mock_rbd.RBD.return_value.clone.assert_called_once_with( - *args, **kwargs) - self.assertEqual(2, client.__enter__.call_count) - mock_enable_repl.assert_not_called() - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_enable_replication') - def test_clone_replicated(self, mock_enable_repl): - rbd_chunk_size = 1 - order = 20 - self.volume_a.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - - expected_update = { - 'replication_status': 'enabled', - 'replication_driver_data': '{"had_journaling": false}' - } - mock_enable_repl.return_value = expected_update - - self.cfg.rbd_store_chunk_size = rbd_chunk_size - src_pool = u'images' - src_image = u'image-name' - src_snap = u'snapshot-name' - - client_stack = [] - - def mock__enter__(inst): - def _inner(): - client_stack.append(inst) - return inst - return _inner - - client = self.mock_client.return_value - # capture both rados client used to perform the clone - client.__enter__.side_effect = mock__enter__(client) - - res = self.driver._clone(self.volume_a, src_pool, src_image, src_snap) - - self.assertEqual(expected_update, res) - mock_enable_repl.assert_called_once_with(self.volume_a) - - args = [client_stack[0].ioctx, str(src_image), str(src_snap), - client_stack[1].ioctx, str(self.volume_a.name)] - kwargs = {'features': client.features, - 'order': order} - self.mock_rbd.RBD.return_value.clone.assert_called_once_with( - *args, **kwargs) - self.assertEqual(2, client.__enter__.call_count) - - @ddt.data({}, - {'replication_status': 'enabled', - 'replication_driver_data': '{"had_journaling": false}'}) - @common_mocks - @mock.patch.object(driver.RBDDriver, '_is_cloneable', return_value=True) - def test_clone_image_replication(self, return_value, mock_cloneable): - mock_clone = self.mock_object(self.driver, '_clone', - return_value=return_value) - image_loc = ('rbd://fee/fi/fo/fum', None) - image_meta = {'disk_format': 'raw', 'id': 'id.foo'} - - res = self.driver.clone_image(self.context, - self.volume_a, - image_loc, - image_meta, - mock.Mock()) - - expected = return_value.copy() - expected['provider_location'] = None - self.assertEqual((expected, True), res) - - mock_clone.assert_called_once_with(self.volume_a, 'fi', 'fo', 'fum') - - @common_mocks - @mock.patch.object(driver.RBDDriver, '_clone', - return_value=mock.sentinel.volume_update) - @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) - def test_create_vol_from_snap_replication(self, mock_clone): - self.cfg.rbd_flatten_volume_from_snapshot = False - snapshot = mock.Mock() - - res = self.driver.create_volume_from_snapshot(self.volume_a, snapshot) - - self.assertEqual(mock.sentinel.volume_update, res) - mock_clone.assert_called_once_with(self.volume_a, - self.cfg.rbd_pool, - snapshot.volume_name, - snapshot.name) - - @common_mocks - def test_extend_volume(self): - fake_size = '20' - size = int(fake_size) * units.Gi - with mock.patch.object(self.driver, '_resize') as mock_resize: - self.driver.extend_volume(self.volume_a, fake_size) - mock_resize.assert_called_once_with(self.volume_a, size=size) - - @ddt.data(False, True) - @common_mocks - def test_retype(self, enabled): - """Test retyping a non replicated volume. - - We will test on a system that doesn't have replication enabled and on - one that hast it enabled. - """ - self.driver._is_replication_enabled = enabled - if enabled: - expect = {'replication_status': fields.ReplicationStatus.DISABLED} - else: - expect = None - context = {} - diff = {'encryption': {}, - 'extra_specs': {}} - updates = {'name': 'testvolume', - 'host': 'currenthost', - 'id': fake.VOLUME_ID} - fake_type = fake_volume.fake_volume_type_obj(context) - volume = fake_volume.fake_volume_obj(context, **updates) - volume.volume_type = None - - # The hosts have been checked same before rbd.retype - # is called. - # RBD doesn't support multiple pools in a driver. - host = {'host': 'currenthost'} - self.assertEqual((True, expect), - self.driver.retype(context, volume, - fake_type, diff, host)) - - # The encryptions have been checked as same before rbd.retype - # is called. - diff['encryption'] = {} - self.assertEqual((True, expect), - self.driver.retype(context, volume, - fake_type, diff, host)) - - # extra_specs changes are supported. - diff['extra_specs'] = {'non-empty': 'non-empty'} - self.assertEqual((True, expect), - self.driver.retype(context, volume, - fake_type, diff, host)) - diff['extra_specs'] = {} - - self.assertEqual((True, expect), - self.driver.retype(context, volume, - fake_type, diff, host)) - - @ddt.data({'old_replicated': False, 'new_replicated': False}, - {'old_replicated': False, 'new_replicated': True}, - {'old_replicated': True, 'new_replicated': False}, - {'old_replicated': True, 'new_replicated': True}) - @ddt.unpack - @common_mocks - @mock.patch.object(driver.RBDDriver, '_disable_replication', - return_value=mock.sentinel.disable_replication) - @mock.patch.object(driver.RBDDriver, '_enable_replication', - return_value=mock.sentinel.enable_replication) - def test_retype_replicated(self, mock_disable, mock_enable, old_replicated, - new_replicated): - """Test retyping a non replicated volume. - - We will test on a system that doesn't have replication enabled and on - one that hast it enabled. - """ - self.driver._is_replication_enabled = True - replicated_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - - self.volume_a.volume_type = replicated_type if old_replicated else None - - if new_replicated: - new_type = replicated_type - if old_replicated: - update = None - else: - update = mock.sentinel.enable_replication - else: - new_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID), - if old_replicated: - update = mock.sentinel.disable_replication - else: - update = {'replication_status': - fields.ReplicationStatus.DISABLED} - - res = self.driver.retype(self.context, self.volume_a, new_type, None, - None) - self.assertEqual((True, update), res) - - @common_mocks - def test_update_migrated_volume(self): - client = self.mock_client.return_value - client.__enter__.return_value = client - - with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: - context = {} - mock_rename.return_value = 0 - model_update = self.driver.update_migrated_volume(context, - self.volume_a, - self.volume_b, - 'available') - mock_rename.assert_called_with(client.ioctx, - 'volume-%s' % self.volume_b.id, - 'volume-%s' % self.volume_a.id) - self.assertEqual({'_name_id': None, - 'provider_location': None}, model_update) - - def test_rbd_volume_proxy_init(self): - mock_driver = mock.Mock(name='driver') - mock_driver._connect_to_rados.return_value = (None, None) - with driver.RBDVolumeProxy(mock_driver, self.volume_a.name): - self.assertEqual(1, mock_driver._connect_to_rados.call_count) - self.assertFalse(mock_driver._disconnect_from_rados.called) - - self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) - - mock_driver.reset_mock() - - snap = u'snapshot-name' - with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, - snapshot=snap): - self.assertEqual(1, mock_driver._connect_to_rados.call_count) - self.assertFalse(mock_driver._disconnect_from_rados.called) - - self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) - - @common_mocks - def test_connect_to_rados(self): - # Default - self.cfg.rados_connect_timeout = -1 - - self.mock_rados.Rados.return_value.open_ioctx.return_value = \ - self.mock_rados.Rados.return_value.ioctx - - # default configured pool - ret = self.driver._connect_to_rados() - self.assertTrue(self.mock_rados.Rados.return_value.connect.called) - # Expect no timeout if default is used - self.mock_rados.Rados.return_value.connect.assert_called_once_with() - self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) - self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) - self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( - self.cfg.rbd_pool) - conf_set = self.mock_rados.Rados.return_value.conf_set - conf_set.assert_not_called() - - # different pool - ret = self.driver._connect_to_rados('alt_pool') - self.assertTrue(self.mock_rados.Rados.return_value.connect.called) - self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) - self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) - self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( - 'alt_pool') - - # With timeout - self.cfg.rados_connect_timeout = 1 - self.mock_rados.Rados.return_value.connect.reset_mock() - self.driver._connect_to_rados() - conf_set.assert_has_calls((mock.call('rados_osd_op_timeout', '1'), - mock.call('rados_mon_op_timeout', '1'), - mock.call('client_mount_timeout', '1'))) - self.mock_rados.Rados.return_value.connect.assert_called_once_with() - - # error - self.mock_rados.Rados.return_value.open_ioctx.reset_mock() - self.mock_rados.Rados.return_value.shutdown.reset_mock() - self.mock_rados.Rados.return_value.open_ioctx.side_effect = ( - self.mock_rados.Error) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver._connect_to_rados) - self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) - self.assertEqual( - 3, self.mock_rados.Rados.return_value.shutdown.call_count) - - @common_mocks - def test_failover_host_no_replication(self): - self.driver._is_replication_enabled = False - self.assertRaises(exception.UnableToFailOver, - self.driver.failover_host, - self.context, [self.volume_a], []) - - @ddt.data(None, 'tertiary-backend') - @common_mocks - @mock.patch.object(driver.RBDDriver, '_get_failover_target_config') - @mock.patch.object(driver.RBDDriver, '_failover_volume', autospec=True) - def test_failover_host(self, secondary_id, mock_failover_vol, - mock_get_cfg): - mock_failover_vol.side_effect = lambda self, v, r, d, s: v - self.mock_object(self.driver.configuration, 'safe_get', - return_value=[{'backend_id': 'secondary-backend'}, - {'backend_id': 'tertiary-backend'}]) - self.driver._do_setup_replication() - volumes = [self.volume_a, self.volume_b] - remote = self.driver._replication_targets[1 if secondary_id else 0] - mock_get_cfg.return_value = (remote['name'], remote) - - res = self.driver.failover_host(self.context, volumes, secondary_id, - []) - - self.assertEqual((remote['name'], volumes, []), res) - self.assertEqual(remote, self.driver._active_config) - mock_failover_vol.assert_has_calls( - [mock.call(mock.ANY, v, remote, False, - fields.ReplicationStatus.FAILED_OVER) - for v in volumes]) - mock_get_cfg.assert_called_once_with(secondary_id) - - @mock.patch.object(driver.RBDDriver, '_failover_volume', autospec=True) - def test_failover_host_failback(self, mock_failover_vol): - mock_failover_vol.side_effect = lambda self, v, r, d, s: v - self.driver._active_backend_id = 'secondary-backend' - self.mock_object(self.driver.configuration, 'safe_get', - return_value=[{'backend_id': 'secondary-backend'}, - {'backend_id': 'tertiary-backend'}]) - self.driver._do_setup_replication() - - remote = self.driver._get_target_config('default') - volumes = [self.volume_a, self.volume_b] - res = self.driver.failover_host(self.context, volumes, 'default', []) - - self.assertEqual(('default', volumes, []), res) - self.assertEqual(remote, self.driver._active_config) - mock_failover_vol.assert_has_calls( - [mock.call(mock.ANY, v, remote, False, - fields.ReplicationStatus.ENABLED) - for v in volumes]) - - @mock.patch.object(driver.RBDDriver, '_failover_volume') - def test_failover_host_no_more_replica_targets(self, mock_failover_vol): - mock_failover_vol.side_effect = lambda w, x, y, z: w - self.driver._active_backend_id = 'secondary-backend' - self.mock_object(self.driver.configuration, 'safe_get', - return_value=[{'backend_id': 'secondary-backend'}]) - self.driver._do_setup_replication() - - volumes = [self.volume_a, self.volume_b] - self.assertRaises(exception.InvalidReplicationTarget, - self.driver.failover_host, - self.context, volumes, None, []) - - def test_failover_volume_non_replicated(self): - self.volume_a.replication_status = fields.ReplicationStatus.DISABLED - remote = {'name': 'name', 'user': 'user', 'conf': 'conf', - 'pool': 'pool'} - expected = { - 'volume_id': self.volume_a.id, - 'updates': { - 'status': 'error', - 'previous_status': self.volume_a.status, - 'replication_status': fields.ReplicationStatus.NOT_CAPABLE, - } - } - res = self.driver._failover_volume( - self.volume_a, remote, False, fields.ReplicationStatus.FAILED_OVER) - self.assertEqual(expected, res) - - @ddt.data(True, False) - @mock.patch.object(driver.RBDDriver, '_exec_on_volume', - side_effect=Exception) - def test_failover_volume_error(self, is_demoted, mock_exec): - self.volume_a.replication_driver_data = '{"had_journaling": false}' - self.volume_a.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - remote = {'name': 'name', 'user': 'user', 'conf': 'conf', - 'pool': 'pool'} - repl_status = fields.ReplicationStatus.FAILOVER_ERROR - expected = {'volume_id': self.volume_a.id, - 'updates': {'status': 'error', - 'previous_status': self.volume_a.status, - 'replication_status': repl_status}} - res = self.driver._failover_volume( - self.volume_a, remote, is_demoted, - fields.ReplicationStatus.FAILED_OVER) - self.assertEqual(expected, res) - mock_exec.assert_called_once_with(self.volume_a.name, remote, - 'mirror_image_promote', - not is_demoted) - - @mock.patch.object(driver.RBDDriver, '_exec_on_volume') - def test_failover_volume(self, mock_exec): - self.volume_a.replication_driver_data = '{"had_journaling": false}' - self.volume_a.volume_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - extra_specs={'replication_enabled': ' True'}) - remote = {'name': 'name', 'user': 'user', 'conf': 'conf', - 'pool': 'pool'} - repl_status = fields.ReplicationStatus.FAILED_OVER - expected = {'volume_id': self.volume_a.id, - 'updates': {'replication_status': repl_status}} - res = self.driver._failover_volume(self.volume_a, remote, True, - repl_status) - self.assertEqual(expected, res) - mock_exec.assert_called_once_with(self.volume_a.name, remote, - 'mirror_image_promote', False) - - @common_mocks - def test_manage_existing_snapshot_get_size(self): - with mock.patch.object(self.driver.rbd.Image(), 'size') as \ - mock_rbd_image_size: - with mock.patch.object(self.driver.rbd.Image(), 'close') \ - as mock_rbd_image_close: - mock_rbd_image_size.return_value = 2 * units.Gi - existing_ref = {'source-name': self.snapshot_b.name} - return_size = self.driver.manage_existing_snapshot_get_size( - self.snapshot_b, - existing_ref) - self.assertEqual(2, return_size) - mock_rbd_image_size.assert_called_once_with() - mock_rbd_image_close.assert_called_once_with() - - @common_mocks - def test_manage_existing_snapshot_get_non_integer_size(self): - rbd_snapshot = self.driver.rbd.Image.return_value - rbd_snapshot.size.return_value = int(1.75 * units.Gi) - existing_ref = {'source-name': self.snapshot_b.name} - return_size = self.driver.manage_existing_snapshot_get_size( - self.snapshot_b, existing_ref) - self.assertEqual(2, return_size) - rbd_snapshot.size.assert_called_once_with() - rbd_snapshot.close.assert_called_once_with() - - @common_mocks - def test_manage_existing_snapshot_get_invalid_size(self): - - with mock.patch.object(self.driver.rbd.Image(), 'size') as \ - mock_rbd_image_size: - with mock.patch.object(self.driver.rbd.Image(), 'close') \ - as mock_rbd_image_close: - mock_rbd_image_size.return_value = 'abcd' - existing_ref = {'source-name': self.snapshot_b.name} - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.manage_existing_snapshot_get_size, - self.snapshot_b, existing_ref) - - mock_rbd_image_size.assert_called_once_with() - mock_rbd_image_close.assert_called_once_with() - - @common_mocks - def test_manage_existing_snapshot_with_invalid_rbd_image(self): - self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound - - invalid_snapshot = 'snapshot-invalid' - invalid_ref = {'source-name': invalid_snapshot} - - self.assertRaises(exception.ManageExistingInvalidReference, - self.driver.manage_existing_snapshot_get_size, - self.snapshot_b, invalid_ref) - # Make sure the exception was raised - self.assertEqual([self.mock_rbd.ImageNotFound], - RAISED_EXCEPTIONS) - - @common_mocks - def test_manage_existing_snapshot(self): - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - exist_snapshot = 'snapshot-exist' - existing_ref = {'source-name': exist_snapshot} - proxy.rename_snap.return_value = 0 - self.driver.manage_existing_snapshot(self.snapshot_b, existing_ref) - proxy.rename_snap.assert_called_with(exist_snapshot, - self.snapshot_b.name) - - @common_mocks - def test_manage_existing_snapshot_with_exist_rbd_image(self): - proxy = self.mock_proxy.return_value - proxy.__enter__.return_value = proxy - proxy.rename_snap.side_effect = MockImageExistsException - - exist_snapshot = 'snapshot-exist' - existing_ref = {'source-name': exist_snapshot} - self.assertRaises(self.mock_rbd.ImageExists, - self.driver.manage_existing_snapshot, - self.snapshot_b, existing_ref) - - # Make sure the exception was raised - self.assertEqual(RAISED_EXCEPTIONS, - [self.mock_rbd.ImageExists]) - - -class ManagedRBDTestCase(test_driver.BaseDriverTestCase): - driver_name = "cinder.volume.drivers.rbd.RBDDriver" - - def setUp(self): - super(ManagedRBDTestCase, self).setUp() - self.volume.driver.set_initialized() - self.volume.stats = {'allocated_capacity_gb': 0, - 'pools': {}} - self.called = [] - - def _create_volume_from_image(self, expected_status, raw=False, - clone_error=False): - """Try to clone a volume from an image, and check status afterwards. - - NOTE: if clone_error is True we force the image type to raw otherwise - clone_image is not called - """ - - # See tests.image.fake for image types. - if raw: - image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' - else: - image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - - # creating volume testdata - db_volume = {'display_description': 'Test Desc', - 'size': 20, - 'status': 'creating', - 'availability_zone': 'fake_zone', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'host': 'dummy'} - volume = objects.Volume(context=self.context, **db_volume) - volume.create() - - try: - if not clone_error: - self.volume.create_volume(self.context, volume, - request_spec={'image_id': image_id}) - else: - self.assertRaises(exception.CinderException, - self.volume.create_volume, - self.context, - volume, - request_spec={'image_id': image_id}) - - volume = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual(expected_status, volume.status) - finally: - # cleanup - volume.destroy() - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch.object(cinder.image.glance, 'get_default_image_service') - def test_create_vol_from_image_status_available(self, mock_gdis, - mock_check_space): - """Clone raw image then verify volume is in available state.""" - - def _mock_clone_image(context, volume, image_location, - image_meta, image_service): - return {'provider_location': None}, True - - with mock.patch.object(self.volume.driver, 'clone_image') as \ - mock_clone_image: - mock_clone_image.side_effect = _mock_clone_image - with mock.patch.object(self.volume.driver, 'create_volume') as \ - mock_create: - with mock.patch.object(create_volume.CreateVolumeFromSpecTask, - '_copy_image_to_volume') as mock_copy: - self._create_volume_from_image('available', raw=True) - self.assertFalse(mock_copy.called) - - self.assertTrue(mock_clone_image.called) - self.assertFalse(mock_create.called) - self.assertTrue(mock_gdis.called) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch.object(cinder.image.glance, 'get_default_image_service') - @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_vol_from_non_raw_image_status_available( - self, mock_qemu_info, mock_fetch, mock_gdis, mock_check_space): - """Clone non-raw image then verify volume is in available state.""" - - def _mock_clone_image(context, volume, image_location, - image_meta, image_service): - return {'provider_location': None}, False - - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) - with mock.patch.object(self.volume.driver, 'clone_image') as \ - mock_clone_image: - mock_clone_image.side_effect = _mock_clone_image - with mock.patch.object(self.volume.driver, 'create_volume') as \ - mock_create: - with mock.patch.object(create_volume.CreateVolumeFromSpecTask, - '_copy_image_to_volume') as mock_copy: - self._create_volume_from_image('available', raw=False) - self.assertTrue(mock_copy.called) - - self.assertTrue(mock_clone_image.called) - self.assertTrue(mock_create.called) - self.assertTrue(mock_gdis.called) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch.object(cinder.image.glance, 'get_default_image_service') - def test_create_vol_from_image_status_error(self, mock_gdis, - mock_check_space): - """Fail to clone raw image then verify volume is in error state.""" - with mock.patch.object(self.volume.driver, 'clone_image') as \ - mock_clone_image: - mock_clone_image.side_effect = exception.CinderException - with mock.patch.object(self.volume.driver, 'create_volume'): - with mock.patch.object(create_volume.CreateVolumeFromSpecTask, - '_copy_image_to_volume') as mock_copy: - self._create_volume_from_image('error', raw=True, - clone_error=True) - self.assertFalse(mock_copy.called) - - self.assertTrue(mock_clone_image.called) - self.assertFalse(self.volume.driver.create_volume.called) - self.assertTrue(mock_gdis.called) - - def test_clone_failure(self): - driver = self.volume.driver - - with mock.patch.object(driver, '_is_cloneable', lambda *args: False): - image_loc = (mock.Mock(), None) - actual = driver.clone_image(mock.Mock(), - mock.Mock(), - image_loc, - {}, - mock.Mock()) - self.assertEqual(({}, False), actual) - - self.assertEqual(({}, False), - driver.clone_image('', object(), None, {}, '')) - - def test_clone_success(self): - expected = ({'provider_location': None}, True) - driver = self.volume.driver - - with mock.patch.object(self.volume.driver, '_is_cloneable') as \ - mock_is_cloneable: - mock_is_cloneable.return_value = True - with mock.patch.object(self.volume.driver, '_clone') as \ - mock_clone: - with mock.patch.object(self.volume.driver, '_resize') as \ - mock_resize: - mock_clone.return_value = {} - image_loc = ('rbd://fee/fi/fo/fum', None) - - volume = {'name': 'vol1'} - actual = driver.clone_image(mock.Mock(), - volume, - image_loc, - {'disk_format': 'raw', - 'id': 'id.foo'}, - mock.Mock()) - - self.assertEqual(expected, actual) - mock_clone.assert_called_once_with(volume, - 'fi', 'fo', 'fum') - mock_resize.assert_called_once_with(volume) - - def test_clone_multilocation_success(self): - expected = ({'provider_location': None}, True) - driver = self.volume.driver - - def cloneable_side_effect(url_location, image_meta): - return url_location == 'rbd://fee/fi/fo/fum' - - with mock.patch.object(self.volume.driver, '_is_cloneable') \ - as mock_is_cloneable, \ - mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ - mock.patch.object(self.volume.driver, '_resize') \ - as mock_resize: - mock_is_cloneable.side_effect = cloneable_side_effect - mock_clone.return_value = {} - image_loc = ('rbd://bee/bi/bo/bum', - [{'url': 'rbd://bee/bi/bo/bum'}, - {'url': 'rbd://fee/fi/fo/fum'}]) - volume = {'name': 'vol1'} - image_meta = mock.sentinel.image_meta - image_service = mock.sentinel.image_service - - actual = driver.clone_image(self.context, - volume, - image_loc, - image_meta, - image_service) - - self.assertEqual(expected, actual) - self.assertEqual(2, mock_is_cloneable.call_count) - mock_clone.assert_called_once_with(volume, - 'fi', 'fo', 'fum') - mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum', - image_meta) - mock_resize.assert_called_once_with(volume) - - def test_clone_multilocation_failure(self): - expected = ({}, False) - driver = self.volume.driver - - with mock.patch.object(driver, '_is_cloneable', return_value=False) \ - as mock_is_cloneable, \ - mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ - mock.patch.object(self.volume.driver, '_resize') \ - as mock_resize: - image_loc = ('rbd://bee/bi/bo/bum', - [{'url': 'rbd://bee/bi/bo/bum'}, - {'url': 'rbd://fee/fi/fo/fum'}]) - - volume = {'name': 'vol1'} - image_meta = mock.sentinel.image_meta - image_service = mock.sentinel.image_service - actual = driver.clone_image(self.context, - volume, - image_loc, - image_meta, - image_service) - - self.assertEqual(expected, actual) - self.assertEqual(2, mock_is_cloneable.call_count) - mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum', - image_meta) - mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum', - image_meta) - self.assertFalse(mock_clone.called) - self.assertFalse(mock_resize.called) diff --git a/cinder/tests/unit/volume/drivers/test_reduxio.py b/cinder/tests/unit/volume/drivers/test_reduxio.py deleted file mode 100644 index f7acc2ce8..000000000 --- a/cinder/tests/unit/volume/drivers/test_reduxio.py +++ /dev/null @@ -1,649 +0,0 @@ -# Copyright (c) 2016 Reduxio Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import random -import string - -import mock -from oslo_utils import units - -from cinder import exception -from cinder import test -from cinder.volume.drivers.reduxio import rdx_cli_api -from cinder.volume.drivers.reduxio import rdx_iscsi_driver - -DRIVER_PATH = ("cinder.volume.drivers." - "reduxio.rdx_iscsi_driver.ReduxioISCSIDriver") -API_PATH = "cinder.volume.drivers.reduxio.rdx_cli_api.ReduxioAPI" - -TARGET = "mock_target" -TARGET_USER = "rdxadmin" -TARGET_PASSWORD = "mock_password" -VOLUME_BACKEND_NAME = "REDUXIO_VOLUME_TYPE" -CINDER_ID_LENGTH = 36 -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" -VOLUME = { - "name": "volume-" + VOLUME_ID, - "id": VOLUME_ID, - "display_name": "fake_volume", - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": None, - "consistencygroup_id": None, - 'metadata': {} -} - -VOLUME_RDX_NAME = "abcdabcd1234abcd1234abcdeffedcb" -VOLUME_RDX_DESC = "openstack_" + VOLUME["name"] - -SRC_VOL_ID = "4c7a294d-5964-4379-a15f-ce5554734efc" -SRC_VOL_RDX_NAME = "ac7a294d59644379a15fce5554734ef" - -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": 'fake_src', - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": None, - "consistencygroup_id": None, -} - -SNAPSHOT_ID = "04fe2f9a-d0c4-4564-a30d-693cc3657b47" -SNAPSHOT_RDX_NAME = "a4fe2f9ad0c44564a30d693cc3657b4" - -SNAPSHOT = { - "name": "snapshot-" + SNAPSHOT_ID, - "id": SNAPSHOT_ID, - "volume_id": SRC_VOL_ID, - "volume_name": "volume-" + SRC_VOL_ID, - "volume_size": 2, - "display_name": "fake_snapshot", - "cgsnapshot_id": None, - "metadata": {} -} - -CONNECTOR = { - "initiator": "iqn.2013-12.com.stub:af4032f00014000e", -} - -LS_SETTINGS = { - "system_configuration": [ - { - "name": "host_name", - "value": "reduxio" - }, - { - "name": "serial_number", - "value": "af4032f00014000e" - }, - { - "name": "primary_ntp", - "value": "mickey.il.reduxio" - }, - { - "name": "secondary_ntp", - "value": "minnie.il.reduxio" - }, - { - "name": "timezone", - "value": "Asia/Jerusalem" - }, - { - "name": "capacity_threshold", - "value": "93%" - }, - { - "name": "storsense_enabled", - "value": True - } - ], - "network_configuration": [ - { - "name": "iscsi_target_iqn", - "value": "iqn.2013-12.com.reduxio:af4032f00014000e" - }, - { - "name": "iscsi_target_tcp_port", - "value": "3260" - }, - { - "name": "mtu", - "value": "9000" - } - ], - "iscsi_network1": [ - { - "name": "controller_1_port_1", - "value": "10.46.93.11" - }, - { - "name": "controller_2_port_1", - "value": "10.46.93.22" - }, - { - "name": "subnet_mask", - "value": "255.0.0.0" - }, - { - "name": "default_gateway", - "value": None - }, - { - "name": "vlan_tag", - "value": None - } - ], - "iscsi_network2": [ - { - "name": "controller_1_port_2", - "value": "10.64.93.11" - }, - { - "name": "controller_2_port_2", - "value": "10.64.93.22" - }, - { - "name": "subnet_mask", - "value": "255.0.0.0" - }, - { - "name": "default_gateway", - "value": None - }, - { - "name": "vlan_tag", - "value": None - } - ], - "management_settings": [ - { - "name": "floating_ip", - "value": "172.17.46.93" - }, - { - "name": "management_ip1", - "value": "172.17.46.91" - }, - { - "name": "management_ip2", - "value": "172.17.46.92" - }, - { - "name": "subnet_mask", - "value": "255.255.254.0" - }, - { - "name": "default_gateway", - "value": "172.17.47.254" - }, - { - "name": "primary_dns", - "value": "172.17.32.11" - }, - { - "name": "secondary_dns", - "value": "8.8.8.8" - }, - { - "name": "domain_name", - "value": "il.reduxio" - } - ], - "snmp": [ - { - "Name": "trap_destination", - "value": None - }, - { - "Name": "udp_port", - "value": "162" - }, - { - "Name": "community", - "value": "public" - } - ], - "email_notification": [ - { - "Name": "smtp_server", - "value": None - }, - { - "Name": "tcp_port", - "value": None - }, - { - "Name": "smtp_authentication", - "value": "None" - }, - { - "Name": "user_name", - "value": None - }, - { - "Name": "sender_address", - "value": None - } - ], - "email_recipient_list": [ - { - "email": None - } - ], - "directories": [ - { - "name": "history_policies/" - } - ] -} - -TEST_ASSIGN_LUN_NUM = 7 - -ISCSI_CONNECTION_INFO_NO_MULTIPATH = { - "driver_volume_type": "iscsi", - "data": { - "target_discovered": False, - "discard": False, - "volume_id": VOLUME["id"], - "target_lun": TEST_ASSIGN_LUN_NUM, - "target_iqn": "iqn.2013-12.com.reduxio:af4032f00014000e", - "target_portal": "10.46.93.11:3260", - - } -} - -connection_copied = copy.deepcopy( - ISCSI_CONNECTION_INFO_NO_MULTIPATH["data"] -) -connection_copied.update({ - "target_luns": [TEST_ASSIGN_LUN_NUM] * 4, - "target_iqns": ["iqn.2013-12.com.reduxio:af4032f00014000e", - "iqn.2013-12.com.reduxio:af4032f00014000e", - "iqn.2013-12.com.reduxio:af4032f00014000e", - "iqn.2013-12.com.reduxio:af4032f00014000e"], - "target_portals": ["10.46.93.11:3260", "10.46.93.22:3260", - "10.64.93.11:3260", "10.64.93.22:3260"] -}) - -ISCSI_CONNECTION_INFO = { - "driver_volume_type": "iscsi", - "data": connection_copied -} - - -def mock_api(to_mock=False): - def client_mock_wrapper(func): - - def inner_client_mock(self, *args, **kwargs): - rdx_cli_api.ReduxioAPI._connect = mock.Mock() - if to_mock: - self.driver = rdx_iscsi_driver.ReduxioISCSIDriver( - configuration=self.mock_config) - self.mock_api = mock.Mock(spec=rdx_cli_api.ReduxioAPI) - self.driver.rdxApi = self.mock_api - else: - self.driver = rdx_iscsi_driver.ReduxioISCSIDriver( - configuration=self.mock_config) - self.driver.do_setup(None) - func(self, *args) - - return inner_client_mock - - return client_mock_wrapper - - -class ReduxioISCSIDriverTestCase(test.TestCase): - def setUp(self): - super(ReduxioISCSIDriverTestCase, self).setUp() - self.mock_config = mock.Mock() - self.mock_config.san_ip = TARGET - self.mock_config.san_login = TARGET_USER - self.mock_config.san_password = TARGET_PASSWORD - self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME - self.driver = None # type: ReduxioISCSIDriver - - @staticmethod - def generate_random_uuid(): - return ''.join( - random.choice(string.ascii_uppercase + string.digits) for _ in - range(rdx_iscsi_driver.RDX_CLI_MAX_VOL_LENGTH)) - - @mock_api(False) - def test_cinder_id_to_rdx(self): - random_uuid1 = self.generate_random_uuid() - random_uuid2 = self.generate_random_uuid() - result1 = self.driver._cinder_id_to_rdx(random_uuid1) - result2 = self.driver._cinder_id_to_rdx(random_uuid2) - self.assertEqual(rdx_iscsi_driver.RDX_CLI_MAX_VOL_LENGTH, len(result1)) - self.assertEqual(rdx_iscsi_driver.RDX_CLI_MAX_VOL_LENGTH, len(result2)) - self.assertNotEqual(result1, result2) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_create_volume(self, mock_run_cmd): - self.driver.create_volume(VOLUME) - expected_cmd = rdx_cli_api.RdxApiCmd("volumes new", - argument=VOLUME_RDX_NAME, - flags=[ - ["size", VOLUME["size"]], - ["description", - VOLUME_RDX_DESC] - ]) - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_manage_existing(self, mock_run_cmd): - source_name = 'test-source' - self.driver.rdxApi.find_volume_by_name = mock.Mock() - self.driver.rdxApi.find_volume_by_name.return_value = { - 'name': source_name, - 'description': None - - } - self.driver.manage_existing(VOLUME, {'source-name': source_name}) - - expected_cmd = rdx_cli_api.RdxApiCmd("volumes update", - argument=source_name, - flags=[ - ["new-name", VOLUME_RDX_NAME], - ["description", - VOLUME_RDX_DESC] - ]) - mock_run_cmd.assert_called_with(expected_cmd) - - self.driver.rdxApi.find_volume_by_name.return_value = { - 'name': source_name, - 'description': "openstack_1234" - } - - self.assertRaises( - exception.ManageExistingAlreadyManaged, - self.driver.manage_existing, - VOLUME, {'source-name': source_name} - ) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_manage_existing_get_size(self, mock_run_cmd): - source_name = 'test-source' - self.driver.rdxApi.find_volume_by_name = mock.Mock() - - vol_cli_ret = { - 'name': source_name, - 'description': None, - "size": units.Gi * 10 - } - source_vol = {'source-name': source_name} - - self.driver.rdxApi.find_volume_by_name.return_value = vol_cli_ret - ret = self.driver.manage_existing_get_size(VOLUME, source_vol) - self.assertEqual(10, ret) - - vol_cli_ret["size"] = units.Gi * 9 - self.driver.rdxApi.find_volume_by_name.return_value = vol_cli_ret - ret = self.driver.manage_existing_get_size(VOLUME, source_vol) - self.assertNotEqual(10, ret) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_unmanage(self, mock_run_cmd): - source_name = 'test-source' - self.driver.rdxApi.find_volume_by_name = mock.Mock() - self.driver.rdxApi.find_volume_by_name.return_value = { - 'name': source_name, - 'description': "openstack_1234" - - } - self.driver.unmanage(VOLUME) - - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes update", - argument=VOLUME_RDX_NAME, - flags=[["description", ""]]) - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_delete_volume(self, mock_run_cmd): - self.driver.delete_volume(VOLUME) - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes delete {} -force".format(VOLUME_RDX_NAME)) - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_create_volume_from_snapshot(self, mock_run_cmd): - self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) - - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes clone", - argument=SRC_VOL_RDX_NAME, - flags={ - "name": VOLUME_RDX_NAME, - "bookmark": SNAPSHOT_RDX_NAME, - "description": VOLUME_RDX_DESC} - ) - - mock_run_cmd.assert_called_with(expected_cmd) - - # Test resize - bigger_vol = copy.deepcopy(VOLUME) - bigger_size = SNAPSHOT['volume_size'] + 10 - bigger_vol['size'] = bigger_size - - self.driver.create_volume_from_snapshot(bigger_vol, SNAPSHOT) - - expected_cmd = rdx_cli_api.RdxApiCmd("volumes update", - argument=VOLUME_RDX_NAME, - flags={"size": bigger_size}) - - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_create_cloned_volume(self, mock_run_cmd): - self.driver.create_cloned_volume(VOLUME, SRC_VOL) - - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes clone", - argument=SRC_VOL_RDX_NAME, - flags={"name": VOLUME_RDX_NAME, "description": VOLUME_RDX_DESC}) - - mock_run_cmd.assert_called_with(expected_cmd) - - # Test clone from date - backdated_clone = copy.deepcopy(VOLUME) - clone_date = "02/17/2015-11:39:00" - backdated_clone["metadata"]["backdate"] = clone_date - - self.driver.create_cloned_volume(backdated_clone, SRC_VOL) - expected_cmd.add_flag("timestamp", clone_date) - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_create_snapshot(self, mock_run_cmd): - self.driver.create_snapshot(SNAPSHOT) - - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes bookmark", - argument=SRC_VOL_RDX_NAME, - flags={"name": SNAPSHOT_RDX_NAME, "type": "manual"}) - - mock_run_cmd.assert_called_with(expected_cmd) - - backdated_snap = copy.deepcopy(SNAPSHOT) - clone_date = "02/17/2015-11:39:00" - backdated_snap["metadata"]["backdate"] = clone_date - - self.driver.create_snapshot(backdated_snap) - - expected_cmd = rdx_cli_api.RdxApiCmd( - "volumes bookmark", - argument=SRC_VOL_RDX_NAME, - flags={ - "name": SNAPSHOT_RDX_NAME, - "type": "manual", - "timestamp": clone_date} - ) - - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_delete_snapshot(self, mock_run_cmd): - self.driver.delete_snapshot(SNAPSHOT) - - expected_cmd = rdx_cli_api.RdxApiCmd("volumes delete-bookmark", - argument=SRC_VOL_RDX_NAME, - flags={"name": SNAPSHOT_RDX_NAME}) - - mock_run_cmd.assert_called_with(expected_cmd) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_get_volume_stats(self, mock_run_cmd): - pass - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_extend_volume(self, mock_run_cmd): - new_size = VOLUME["size"] + 1 - self.driver.extend_volume(VOLUME, new_size) - - expected_cmd = rdx_cli_api.RdxApiCmd("volumes update", - argument=VOLUME_RDX_NAME, - flags={"size": new_size}) - - mock_run_cmd.assert_called_with(expected_cmd) - - def settings_side_effect(*args): - if args[0].cmd == "settings ls": - return LS_SETTINGS - else: - return mock.Mock() - - def get_single_assignment_side_effect(*args, **kwargs): - if "raise_on_non_exists" in kwargs: - raise_given = kwargs["raise_on_non_exists"] - else: - raise_given = True - if (raise_given is True) or (raise_given is None): - return { - "host": kwargs["host"], - "vol": kwargs["vol"], - "lun": TEST_ASSIGN_LUN_NUM - } - else: - return None - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd", - side_effect=settings_side_effect) - @mock.patch.object(rdx_cli_api.ReduxioAPI, "get_single_assignment", - side_effect=get_single_assignment_side_effect) - @mock_api(False) - def test_initialize_connection(self, mock_list_assignmnet, mock_run_cmd): - generated_host_name = "openstack-123456789012" - self.driver.rdxApi.list_hosts = mock.Mock() - self.driver.rdxApi.list_hosts.return_value = [] - self.driver._generate_initiator_name = mock.Mock() - self.driver._generate_initiator_name.return_value = generated_host_name - - ret_connection_info = self.driver.initialize_connection(VOLUME, - CONNECTOR) - - create_host_cmd = rdx_cli_api.RdxApiCmd( - "hosts new", - argument=generated_host_name, - flags={"iscsi-name": CONNECTOR["initiator"]} - ) - assign_cmd = rdx_cli_api.RdxApiCmd( - "volumes assign", - argument=VOLUME_RDX_NAME, - flags={"host": generated_host_name} - ) - - calls = [ - mock.call.driver._run_cmd(create_host_cmd), - mock.call.driver._run_cmd(assign_cmd) - ] - - mock_run_cmd.assert_has_calls(calls) - self.assertDictEqual( - ret_connection_info, - ISCSI_CONNECTION_INFO_NO_MULTIPATH - ) - - connector = copy.deepcopy(CONNECTOR) - connector["multipath"] = True - - ret_connection_info = self.driver.initialize_connection(VOLUME, - connector) - - create_host_cmd = rdx_cli_api.RdxApiCmd( - "hosts new", - argument=generated_host_name, - flags={"iscsi-name": CONNECTOR["initiator"]}) - - assign_cmd = rdx_cli_api.RdxApiCmd( - "volumes assign", - argument=VOLUME_RDX_NAME, - flags={"host": generated_host_name} - ) - - calls = [ - mock.call.driver._run_cmd(create_host_cmd), - mock.call.driver._run_cmd(assign_cmd) - ] - - mock_run_cmd.assert_has_calls(calls) - self.assertDictEqual(ret_connection_info, ISCSI_CONNECTION_INFO) - - self.driver.rdxApi.list_hosts.return_value = [{ - "iscsi_name": CONNECTOR["initiator"], - "name": generated_host_name - }] - - ret_connection_info = self.driver.initialize_connection(VOLUME, - connector) - - mock_run_cmd.assert_has_calls([mock.call.driver._run_cmd(assign_cmd)]) - - self.assertDictEqual(ISCSI_CONNECTION_INFO, ret_connection_info) - - @mock.patch.object(rdx_cli_api.ReduxioAPI, "_run_cmd") - @mock_api(False) - def test_terminate_connection(self, mock_run_cmd): - generated_host_name = "openstack-123456789012" - self.driver.rdxApi.list_hosts = mock.Mock() - self.driver.rdxApi.list_hosts.return_value = [{ - "iscsi_name": CONNECTOR["initiator"], - "name": generated_host_name - }] - - self.driver.terminate_connection(VOLUME, CONNECTOR) - - unassign_cmd = rdx_cli_api.RdxApiCmd( - "volumes unassign", - argument=VOLUME_RDX_NAME, - flags={"host": generated_host_name} - ) - - mock_run_cmd.assert_has_calls( - [mock.call.driver._run_cmd(unassign_cmd)]) diff --git a/cinder/tests/unit/volume/drivers/test_remotefs.py b/cinder/tests/unit/volume/drivers/test_remotefs.py deleted file mode 100644 index c36e8a70d..000000000 --- a/cinder/tests/unit/volume/drivers/test_remotefs.py +++ /dev/null @@ -1,704 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy -import os - -import ddt -import mock - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume.drivers import remotefs - - -@ddt.ddt -class RemoteFsSnapDriverTestCase(test.TestCase): - - _FAKE_MNT_POINT = '/mnt/fake_hash' - - def setUp(self): - super(RemoteFsSnapDriverTestCase, self).setUp() - self._driver = remotefs.RemoteFSSnapDriver() - self._driver._remotefsclient = mock.Mock() - self._driver._execute = mock.Mock() - self._driver._delete = mock.Mock() - - self.context = context.get_admin_context() - - self._fake_volume = fake_volume.fake_volume_obj( - self.context, provider_location='fake_share') - self._fake_volume_path = os.path.join(self._FAKE_MNT_POINT, - self._fake_volume.name) - self._fake_snapshot = fake_snapshot.fake_snapshot_obj(self.context) - self._fake_snapshot_path = (self._fake_volume_path + '.' + - self._fake_snapshot.id) - self._fake_snapshot.volume = self._fake_volume - - @ddt.data({'current_state': 'in-use', - 'acceptable_states': ['available', 'in-use']}, - {'current_state': 'in-use', - 'acceptable_states': ['available'], - 'expected_exception': exception.InvalidVolume}) - @ddt.unpack - def test_validate_state(self, current_state, acceptable_states, - expected_exception=None): - if expected_exception: - self.assertRaises(expected_exception, - self._driver._validate_state, - current_state, - acceptable_states) - else: - self._driver._validate_state(current_state, acceptable_states) - - def _test_delete_snapshot(self, volume_in_use=False, - stale_snapshot=False, - is_active_image=True, - is_tmp_snap=False): - # If the snapshot is not the active image, it is guaranteed that - # another snapshot exists having it as backing file. - - fake_snapshot_name = os.path.basename(self._fake_snapshot_path) - fake_info = {'active': fake_snapshot_name, - self._fake_snapshot.id: fake_snapshot_name} - fake_snap_img_info = mock.Mock() - fake_base_img_info = mock.Mock() - if stale_snapshot: - fake_snap_img_info.backing_file = None - else: - fake_snap_img_info.backing_file = self._fake_volume.name - fake_snap_img_info.file_format = 'qcow2' - fake_base_img_info.backing_file = None - fake_base_img_info.file_format = 'raw' - - self._driver._local_path_volume_info = mock.Mock( - return_value=mock.sentinel.fake_info_path) - self._driver._qemu_img_info = mock.Mock( - side_effect=[fake_snap_img_info, fake_base_img_info]) - self._driver._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - - self._driver._validate_state = mock.Mock() - self._driver._read_info_file = mock.Mock() - self._driver._write_info_file = mock.Mock() - self._driver._img_commit = mock.Mock() - self._driver._rebase_img = mock.Mock() - self._driver._ensure_share_writable = mock.Mock() - self._driver._delete_stale_snapshot = mock.Mock() - self._driver._delete_snapshot_online = mock.Mock() - - expected_info = { - 'active': fake_snapshot_name, - self._fake_snapshot.id: fake_snapshot_name - } - - exp_acceptable_states = ['available', 'in-use', 'backing-up', - 'deleting', 'downloading'] - - if volume_in_use: - self._fake_snapshot.volume.status = 'in-use' - - self._driver._read_info_file.return_value = fake_info - - self._driver._delete_snapshot(self._fake_snapshot) - self._driver._validate_state.assert_called_once_with( - self._fake_snapshot.volume.status, - exp_acceptable_states) - if stale_snapshot: - self._driver._delete_stale_snapshot.assert_called_once_with( - self._fake_snapshot) - else: - expected_online_delete_info = { - 'active_file': fake_snapshot_name, - 'snapshot_file': fake_snapshot_name, - 'base_file': self._fake_volume.name, - 'base_id': None, - 'new_base_file': None - } - self._driver._delete_snapshot_online.assert_called_once_with( - self.context, self._fake_snapshot, - expected_online_delete_info) - - elif is_active_image: - self._driver._read_info_file.return_value = fake_info - - self._driver._delete_snapshot(self._fake_snapshot) - - self._driver._img_commit.assert_called_once_with( - self._fake_snapshot_path) - self.assertNotIn(self._fake_snapshot.id, fake_info) - self._driver._write_info_file.assert_called_once_with( - mock.sentinel.fake_info_path, fake_info) - else: - fake_upper_snap_id = 'fake_upper_snap_id' - fake_upper_snap_path = ( - self._fake_volume_path + '-snapshot' + fake_upper_snap_id) - fake_upper_snap_name = os.path.basename(fake_upper_snap_path) - - fake_backing_chain = [ - {'filename': fake_upper_snap_name, - 'backing-filename': fake_snapshot_name}, - {'filename': fake_snapshot_name, - 'backing-filename': self._fake_volume.name}, - {'filename': self._fake_volume.name, - 'backing-filename': None}] - - fake_info[fake_upper_snap_id] = fake_upper_snap_name - fake_info[self._fake_snapshot.id] = fake_snapshot_name - fake_info['active'] = fake_upper_snap_name - - expected_info = copy.deepcopy(fake_info) - del expected_info[self._fake_snapshot.id] - - self._driver._read_info_file.return_value = fake_info - self._driver._get_backing_chain_for_path = mock.Mock( - return_value=fake_backing_chain) - - self._driver._delete_snapshot(self._fake_snapshot) - - self._driver._img_commit.assert_called_once_with( - self._fake_snapshot_path) - self._driver._rebase_img.assert_called_once_with( - fake_upper_snap_path, self._fake_volume.name, - fake_base_img_info.file_format) - self._driver._write_info_file.assert_called_once_with( - mock.sentinel.fake_info_path, expected_info) - - def test_delete_snapshot_when_active_file(self): - self._test_delete_snapshot() - - def test_delete_snapshot_in_use(self): - self._test_delete_snapshot(volume_in_use=True) - - def test_delete_snapshot_in_use_stale_snapshot(self): - self._test_delete_snapshot(volume_in_use=True, - stale_snapshot=True) - - def test_delete_snapshot_with_one_upper_file(self): - self._test_delete_snapshot(is_active_image=False) - - def test_delete_stale_snapshot(self): - fake_snapshot_name = os.path.basename(self._fake_snapshot_path) - fake_snap_info = { - 'active': self._fake_volume.name, - self._fake_snapshot.id: fake_snapshot_name - } - expected_info = {'active': self._fake_volume.name} - - self._driver._local_path_volume_info = mock.Mock( - return_value=mock.sentinel.fake_info_path) - self._driver._read_info_file = mock.Mock( - return_value=fake_snap_info) - self._driver._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - self._driver._write_info_file = mock.Mock() - - self._driver._delete_stale_snapshot(self._fake_snapshot) - - self._driver._delete.assert_called_once_with(self._fake_snapshot_path) - self._driver._write_info_file.assert_called_once_with( - mock.sentinel.fake_info_path, expected_info) - - @mock.patch.object(remotefs.RemoteFSDriver, - 'secure_file_operations_enabled', - return_value=True) - @mock.patch.object(os, 'stat') - def test_do_create_snapshot(self, _mock_stat, _mock_sec_enabled): - self._driver._local_volume_dir = mock.Mock( - return_value=self._fake_volume_path) - fake_backing_path = os.path.join( - self._driver._local_volume_dir(), - self._fake_volume.name) - - self._driver._execute = mock.Mock() - self._driver._set_rw_permissions = mock.Mock() - self._driver._qemu_img_info = mock.Mock( - return_value=mock.Mock(file_format=mock.sentinel.backing_fmt)) - - self._driver._do_create_snapshot(self._fake_snapshot, - self._fake_volume.name, - self._fake_snapshot_path) - command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o', - 'backing_file=%s,backing_fmt=%s' % - (fake_backing_path, - mock.sentinel.backing_fmt), - self._fake_snapshot_path, - "%dG" % self._fake_volume.size] - command2 = ['qemu-img', 'rebase', '-u', - '-b', self._fake_volume.name, - '-F', mock.sentinel.backing_fmt, - self._fake_snapshot_path] - command3 = ['chown', '--reference=%s' % fake_backing_path, - self._fake_snapshot_path] - calls = [mock.call(*command1, run_as_root=True), - mock.call(*command2, run_as_root=True), - mock.call(*command3, run_as_root=True)] - self._driver._execute.assert_has_calls(calls) - - def _test_create_snapshot(self, volume_in_use=False, tmp_snap=False): - fake_snapshot_info = {} - fake_snapshot_file_name = os.path.basename(self._fake_snapshot_path) - - self._driver._local_path_volume_info = mock.Mock( - return_value=mock.sentinel.fake_info_path) - self._driver._read_info_file = mock.Mock( - return_value=fake_snapshot_info) - self._driver._do_create_snapshot = mock.Mock() - self._driver._create_snapshot_online = mock.Mock() - self._driver._write_info_file = mock.Mock() - self._driver.get_active_image_from_info = mock.Mock( - return_value=self._fake_volume.name) - self._driver._get_new_snap_path = mock.Mock( - return_value=self._fake_snapshot_path) - self._driver._validate_state = mock.Mock() - - expected_snapshot_info = { - 'active': fake_snapshot_file_name, - self._fake_snapshot.id: fake_snapshot_file_name - } - exp_acceptable_states = ['available', 'in-use', 'backing-up'] - if tmp_snap: - exp_acceptable_states.append('downloading') - self._fake_snapshot.id = 'tmp-snap-%s' % self._fake_snapshot.id - - if volume_in_use: - self._fake_snapshot.volume.status = 'in-use' - expected_method_called = '_create_snapshot_online' - else: - self._fake_snapshot.volume.status = 'available' - expected_method_called = '_do_create_snapshot' - - self._driver._create_snapshot(self._fake_snapshot) - - self._driver._validate_state.assert_called_once_with( - self._fake_snapshot.volume.status, - exp_acceptable_states) - fake_method = getattr(self._driver, expected_method_called) - fake_method.assert_called_with( - self._fake_snapshot, self._fake_volume.name, - self._fake_snapshot_path) - self._driver._write_info_file.assert_called_with( - mock.sentinel.fake_info_path, - expected_snapshot_info) - - def test_create_snapshot_volume_available(self): - self._test_create_snapshot() - - def test_create_snapshot_volume_in_use(self): - self._test_create_snapshot(volume_in_use=True) - - def test_create_snapshot_invalid_volume(self): - self._fake_snapshot.volume.status = 'error' - self.assertRaises(exception.InvalidVolume, - self._driver._create_snapshot, - self._fake_snapshot) - - @mock.patch('cinder.db.snapshot_get') - @mock.patch('time.sleep') - def test_create_snapshot_online_with_concurrent_delete( - self, mock_sleep, mock_snapshot_get): - self._driver._nova = mock.Mock() - - # Test what happens when progress is so slow that someone - # decides to delete the snapshot while the last known status is - # "creating". - mock_snapshot_get.side_effect = [ - {'status': 'creating', 'progress': '42%'}, - {'status': 'creating', 'progress': '45%'}, - {'status': 'deleting'}, - ] - - with mock.patch.object(self._driver, '_do_create_snapshot') as \ - mock_do_create_snapshot: - self.assertRaises(exception.RemoteFSConcurrentRequest, - self._driver._create_snapshot_online, - self._fake_snapshot, - self._fake_volume.name, - self._fake_snapshot_path) - - mock_do_create_snapshot.assert_called_once_with( - self._fake_snapshot, self._fake_volume.name, - self._fake_snapshot_path) - self.assertEqual([mock.call(1), mock.call(1)], - mock_sleep.call_args_list) - self.assertEqual(3, mock_snapshot_get.call_count) - mock_snapshot_get.assert_called_with(self._fake_snapshot._context, - self._fake_snapshot.id) - - @mock.patch.object(utils, 'synchronized') - def _locked_volume_operation_test_helper(self, mock_synchronized, func, - expected_exception=False, - *args, **kwargs): - def mock_decorator(*args, **kwargs): - def mock_inner(f): - return f - return mock_inner - - mock_synchronized.side_effect = mock_decorator - expected_lock = '%s-%s' % (self._driver.driver_prefix, - self._fake_volume.id) - - if expected_exception: - self.assertRaises(expected_exception, func, - self._driver, - *args, **kwargs) - else: - ret_val = func(self._driver, *args, **kwargs) - - mock_synchronized.assert_called_with(expected_lock, - external=False) - self.assertEqual(mock.sentinel.ret_val, ret_val) - - def test_locked_volume_id_operation(self): - mock_volume = mock.Mock() - mock_volume.id = self._fake_volume.id - - @remotefs.locked_volume_id_operation - def synchronized_func(inst, volume): - return mock.sentinel.ret_val - - self._locked_volume_operation_test_helper(func=synchronized_func, - volume=mock_volume) - - def test_locked_volume_id_snapshot_operation(self): - mock_snapshot = mock.Mock() - mock_snapshot.volume.id = self._fake_volume.id - - @remotefs.locked_volume_id_operation - def synchronized_func(inst, snapshot): - return mock.sentinel.ret_val - - self._locked_volume_operation_test_helper(func=synchronized_func, - snapshot=mock_snapshot) - - def test_locked_volume_id_operation_exception(self): - @remotefs.locked_volume_id_operation - def synchronized_func(inst): - return mock.sentinel.ret_val - - self._locked_volume_operation_test_helper( - func=synchronized_func, - expected_exception=exception.VolumeBackendAPIException) - - @mock.patch.object(image_utils, 'qemu_img_info') - @mock.patch('os.path.basename') - def _test_qemu_img_info(self, mock_basename, - mock_qemu_img_info, backing_file, basedir, - valid_backing_file=True): - fake_vol_name = 'fake_vol_name' - mock_info = mock_qemu_img_info.return_value - mock_info.image = mock.sentinel.image_path - mock_info.backing_file = backing_file - - self._driver._VALID_IMAGE_EXTENSIONS = ['vhd', 'vhdx', 'raw', 'qcow2'] - - mock_basename.side_effect = [mock.sentinel.image_basename, - mock.sentinel.backing_file_basename] - - if valid_backing_file: - img_info = self._driver._qemu_img_info_base( - mock.sentinel.image_path, fake_vol_name, basedir) - self.assertEqual(mock_info, img_info) - self.assertEqual(mock.sentinel.image_basename, - mock_info.image) - expected_basename_calls = [mock.call(mock.sentinel.image_path)] - if backing_file: - self.assertEqual(mock.sentinel.backing_file_basename, - mock_info.backing_file) - expected_basename_calls.append(mock.call(backing_file)) - mock_basename.assert_has_calls(expected_basename_calls) - else: - self.assertRaises(exception.RemoteFSException, - self._driver._qemu_img_info_base, - mock.sentinel.image_path, - fake_vol_name, basedir) - - mock_qemu_img_info.assert_called_with(mock.sentinel.image_path, - run_as_root=True) - - @ddt.data([None, '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name', '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name.VHD', '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name.404f-404', - '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name.tmp-snap-404f-404', - '/fake_basedir']) - @ddt.unpack - def test_qemu_img_info_valid_backing_file(self, backing_file, basedir): - self._test_qemu_img_info(backing_file=backing_file, - basedir=basedir) - - @ddt.data(['/other_random_path', '/fake_basedir'], - ['/other_basedir/cb2016/fake_vol_name', '/fake_basedir'], - ['/fake_basedir/invalid_hash/fake_vol_name', '/fake_basedir'], - ['/fake_basedir/cb2016/invalid_vol_name', '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name.info', '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name-random-suffix', - '/fake_basedir'], - ['/fake_basedir/cb2016/fake_vol_name.invalidext', - '/fake_basedir']) - @ddt.unpack - def test_qemu_img_info_invalid_backing_file(self, backing_file, basedir): - self._test_qemu_img_info(backing_file=backing_file, - basedir=basedir, - valid_backing_file=False) - - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_local_volume_dir') - @mock.patch.object(remotefs.RemoteFSSnapDriver, - 'get_active_image_from_info') - def test_local_path_active_image(self, mock_get_active_img, - mock_local_vol_dir): - fake_vol_dir = 'fake_vol_dir' - fake_active_img = 'fake_active_img_fname' - - mock_get_active_img.return_value = fake_active_img - mock_local_vol_dir.return_value = fake_vol_dir - - active_img_path = self._driver._local_path_active_image( - mock.sentinel.volume) - exp_act_img_path = os.path.join(fake_vol_dir, fake_active_img) - - self.assertEqual(exp_act_img_path, active_img_path) - mock_get_active_img.assert_called_once_with(mock.sentinel.volume) - mock_local_vol_dir.assert_called_once_with(mock.sentinel.volume) - - @ddt.data({}, - {'provider_location': None}, - {'active_fpath': 'last_snap_img', - 'expect_snaps': True}) - @ddt.unpack - @mock.patch.object(remotefs.RemoteFSSnapDriver, - '_local_path_active_image') - @mock.patch.object(remotefs.RemoteFSSnapDriver, - 'local_path') - def test_snapshots_exist(self, mock_local_path, - mock_local_path_active_img, - provider_location='fake_share', - active_fpath='base_img_path', - base_vol_path='base_img_path', - expect_snaps=False): - self._fake_volume.provider_location = provider_location - - mock_local_path.return_value = base_vol_path - mock_local_path_active_img.return_value = active_fpath - - snaps_exist = self._driver._snapshots_exist(self._fake_volume) - - self.assertEqual(expect_snaps, snaps_exist) - - if provider_location: - mock_local_path.assert_called_once_with(self._fake_volume) - mock_local_path_active_img.assert_called_once_with( - self._fake_volume) - else: - self.assertFalse(mock_local_path.called) - - @ddt.data({}, - {'snapshots_exist': True}, - {'force_temp_snap': True}) - @ddt.unpack - @mock.patch.object(remotefs.RemoteFSSnapDriver, 'local_path') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_snapshots_exist') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_copy_volume_image') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_extend_volume') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_validate_state') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_create_snapshot') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_delete_snapshot') - @mock.patch.object(remotefs.RemoteFSSnapDriver, - '_copy_volume_from_snapshot') - def test_create_cloned_volume(self, mock_copy_volume_from_snapshot, - mock_delete_snapshot, - mock_create_snapshot, - mock_validate_state, - mock_extend_volme, - mock_copy_volume_image, - mock_snapshots_exist, - mock_local_path, - snapshots_exist=False, - force_temp_snap=False): - drv = self._driver - - volume = fake_volume.fake_volume_obj(self.context) - src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' - src_vref = fake_volume.fake_volume_obj( - self.context, - id=src_vref_id, - name='volume-%s' % src_vref_id) - - mock_snapshots_exist.return_value = snapshots_exist - drv._always_use_temp_snap_when_cloning = force_temp_snap - - vol_attrs = ['provider_location', 'size', 'id', 'name', 'status', - 'volume_type', 'metadata'] - Volume = collections.namedtuple('Volume', vol_attrs) - - snap_attrs = ['volume_name', 'volume_size', 'name', - 'volume_id', 'id', 'volume'] - Snapshot = collections.namedtuple('Snapshot', snap_attrs) - - volume_ref = Volume(id=volume.id, - name=volume.name, - status=volume.status, - provider_location=volume.provider_location, - size=volume.size, - volume_type=volume.volume_type, - metadata=volume.metadata) - - snap_ref = Snapshot(volume_name=volume.name, - name='clone-snap-%s' % src_vref.id, - volume_size=src_vref.size, - volume_id=src_vref.id, - id='tmp-snap-%s' % src_vref.id, - volume=src_vref) - - drv.create_cloned_volume(volume, src_vref) - - exp_acceptable_states = ['available', 'backing-up', 'downloading'] - mock_validate_state.assert_called_once_with( - src_vref.status, - exp_acceptable_states, - obj_description='source volume') - - if snapshots_exist or force_temp_snap: - mock_create_snapshot.assert_called_once_with(snap_ref) - mock_copy_volume_from_snapshot.assert_called_once_with( - snap_ref, volume_ref, volume['size']) - self.assertTrue(mock_delete_snapshot.called) - else: - self.assertFalse(mock_create_snapshot.called) - - mock_snapshots_exist.assert_called_once_with(src_vref) - - mock_copy_volume_image.assert_called_once_with( - mock_local_path.return_value, - mock_local_path.return_value) - mock_local_path.assert_has_calls( - [mock.call(src_vref), mock.call(volume_ref)]) - mock_extend_volme.assert_called_once_with(volume_ref, - volume.size) - - @mock.patch('shutil.copyfile') - @mock.patch.object(remotefs.RemoteFSSnapDriver, '_set_rw_permissions') - def test_copy_volume_image(self, mock_set_perm, mock_copyfile): - self._driver._copy_volume_image(mock.sentinel.src, mock.sentinel.dest) - - mock_copyfile.assert_called_once_with(mock.sentinel.src, - mock.sentinel.dest) - mock_set_perm.assert_called_once_with(mock.sentinel.dest) - - def test_create_regular_file(self): - self._driver._create_regular_file('/path', 1) - self._driver._execute.assert_called_once_with('dd', 'if=/dev/zero', - 'of=/path', 'bs=1M', - 'count=1024', - run_as_root=True) - - -class RemoteFSPoolMixinTestCase(test.TestCase): - def setUp(self): - super(RemoteFSPoolMixinTestCase, self).setUp() - # We'll instantiate this directly for now. - self._driver = remotefs.RemoteFSPoolMixin() - - self.context = context.get_admin_context() - - @mock.patch.object(remotefs.RemoteFSPoolMixin, - '_get_pool_name_from_volume') - @mock.patch.object(remotefs.RemoteFSPoolMixin, - '_get_share_from_pool_name') - def test_find_share(self, mock_get_share_from_pool, - mock_get_pool_from_volume): - share = self._driver._find_share(mock.sentinel.volume) - - self.assertEqual(mock_get_share_from_pool.return_value, share) - mock_get_pool_from_volume.assert_called_once_with( - mock.sentinel.volume) - mock_get_share_from_pool.assert_called_once_with( - mock_get_pool_from_volume.return_value) - - def test_get_pool_name_from_volume(self): - fake_pool = 'fake_pool' - fake_host = 'fake_host@fake_backend#%s' % fake_pool - fake_vol = fake_volume.fake_volume_obj( - self.context, provider_location='fake_share', - host=fake_host) - - pool_name = self._driver._get_pool_name_from_volume(fake_vol) - self.assertEqual(fake_pool, pool_name) - - def test_update_volume_stats(self): - share_total_gb = 3 - share_free_gb = 2 - share_used_gb = 4 # provisioned space - expected_allocated_gb = share_total_gb - share_free_gb - - self._driver._mounted_shares = [mock.sentinel.share] - - self._driver.configuration = mock.Mock() - self._driver.configuration.safe_get.return_value = ( - mock.sentinel.backend_name) - self._driver.vendor_name = mock.sentinel.vendor_name - self._driver.driver_volume_type = mock.sentinel.driver_volume_type - self._driver._thin_provisioning_support = ( - mock.sentinel.thin_prov_support) - - self._driver.get_version = mock.Mock( - return_value=mock.sentinel.driver_version) - self._driver._ensure_shares_mounted = mock.Mock() - self._driver._get_capacity_info = mock.Mock( - return_value=(share_total_gb << 30, - share_free_gb << 30, - share_used_gb << 30)) - self._driver._get_pool_name_from_share = mock.Mock( - return_value=mock.sentinel.pool_name) - - expected_pool = { - 'pool_name': mock.sentinel.pool_name, - 'total_capacity_gb': float(share_total_gb), - 'free_capacity_gb': float(share_free_gb), - 'provisioned_capacity_gb': float(share_used_gb), - 'allocated_capacity_gb': float(expected_allocated_gb), - 'reserved_percentage': ( - self._driver.configuration.reserved_percentage), - 'max_over_subscription_ratio': ( - self._driver.configuration.max_over_subscription_ratio), - 'thin_provisioning_support': ( - mock.sentinel.thin_prov_support), - 'QoS_support': False, - } - - expected_stats = { - 'volume_backend_name': mock.sentinel.backend_name, - 'vendor_name': mock.sentinel.vendor_name, - 'driver_version': mock.sentinel.driver_version, - 'storage_protocol': mock.sentinel.driver_volume_type, - 'total_capacity_gb': 0, - 'free_capacity_gb': 0, - 'pools': [expected_pool], - } - - self._driver._update_volume_stats() - - self.assertDictEqual(expected_stats, self._driver._stats) - - self._driver._get_capacity_info.assert_called_once_with( - mock.sentinel.share) - self._driver.configuration.safe_get.assert_called_once_with( - 'volume_backend_name') diff --git a/cinder/tests/unit/volume/drivers/test_san.py b/cinder/tests/unit/volume/drivers/test_san.py deleted file mode 100644 index 447865555..000000000 --- a/cinder/tests/unit/volume/drivers/test_san.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import mock - -from cinder import test -from cinder.volume import configuration -from cinder.volume.drivers.san import san - - -class SanDriverTestCase(test.TestCase): - """Tests for SAN driver""" - - def __init__(self, *args, **kwargs): - super(SanDriverTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(SanDriverTestCase, self).setUp() - self.configuration = mock.Mock(spec=configuration.Configuration) - self.configuration.san_is_local = False - self.configuration.san_ip = "10.0.0.1" - self.configuration.san_login = "admin" - self.configuration.san_password = "password" - self.configuration.san_ssh_port = 22 - self.configuration.san_thin_provision = True - self.configuration.san_private_key = 'private_key' - self.configuration.ssh_min_pool_conn = 1 - self.configuration.ssh_max_pool_conn = 5 - self.configuration.ssh_conn_timeout = 30 - - class fake_san_driver(san.SanDriver): - def initialize_connection(): - pass - - def create_volume(): - pass - - def delete_volume(): - pass - - def terminate_connection(): - pass - - @mock.patch.object(san.processutils, 'ssh_execute') - @mock.patch.object(san.ssh_utils, 'SSHPool') - @mock.patch.object(san.utils, 'check_ssh_injection') - def test_ssh_formatted_command(self, mock_check_ssh_injection, - mock_ssh_pool, mock_ssh_execute): - driver = self.fake_san_driver(configuration=self.configuration) - cmd_list = ['uname', '-s'] - expected_cmd = 'uname -s' - driver.san_execute(*cmd_list) - # get the same used mocked item from the pool - with driver.sshpool.item() as ssh_item: - mock_ssh_execute.assert_called_with(ssh_item, expected_cmd, - check_exit_code=None) diff --git a/cinder/tests/unit/volume/drivers/test_sheepdog.py b/cinder/tests/unit/volume/drivers/test_sheepdog.py deleted file mode 100644 index 4b5d7ca73..000000000 --- a/cinder/tests/unit/volume/drivers/test_sheepdog.py +++ /dev/null @@ -1,1396 +0,0 @@ - -# Copyright (c) 2013 Zelin.io -# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import contextlib -import errno - -import mock -from oslo_concurrency import processutils -from oslo_utils import importutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers import sheepdog - -SHEEP_ADDR = '127.0.0.1' -SHEEP_PORT = 7000 - - -class SheepdogDriverTestDataGenerator(object): - def __init__(self): - self.TEST_VOLUME = self._make_fake_volume(self.TEST_VOL_DATA) - self.TEST_CLONED_VOLUME = self._make_fake_volume( - self.TEST_CLONED_VOL_DATA) - self.TEST_SNAPSHOT = self._make_fake_snapshot( - self.TEST_SNAPSHOT_DATA, self.TEST_VOLUME) - - def sheepdog_cmd_error(self, cmd, exit_code, stdout, stderr): - return (('(Command: %(cmd)s) ' - '(Return Code: %(exit_code)s) ' - '(Stdout: %(stdout)s) ' - '(Stderr: %(stderr)s)') % - {'cmd': cmd, - 'exit_code': exit_code, - 'stdout': stdout.replace('\n', '\\n'), - 'stderr': stderr.replace('\n', '\\n')}) - - def _make_fake_volume(self, volume_data): - return fake_volume.fake_volume_obj(context.get_admin_context(), - **volume_data) - - def _make_fake_snapshot(self, snapshot_data, src_volume): - snapshot_obj = fake_snapshot.fake_snapshot_obj( - context.get_admin_context(), **snapshot_data) - snapshot_obj.volume = src_volume - return snapshot_obj - - def cmd_dog_vdi_create(self, name, size): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'create', name, - '%sG' % size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) - - def cmd_dog_vdi_delete(self, name): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', name, - '-a', SHEEP_ADDR, '-p', SHEEP_PORT) - - def cmd_dog_vdi_create_snapshot(self, vdiname, snapname): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'snapshot', '-s', - snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname) - - def cmd_dog_vdi_delete_snapshot(self, vdiname, snapname): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'delete', '-s', - snapname, '-a', SHEEP_ADDR, '-p', SHEEP_PORT, vdiname) - - def cmd_qemuimg_vdi_clone(self, src_vdiname, src_snapname, dst_vdiname, - size): - return ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-b', - 'sheepdog:%(addr)s:%(port)s:%(src_vdiname)s:%(src_snapname)s' % - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT, - 'src_vdiname': src_vdiname, 'src_snapname': src_snapname}, - 'sheepdog:%(addr)s:%(port)s:%(dst_vdiname)s' % - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT, - 'dst_vdiname': dst_vdiname}, '%sG' % size) - - def cmd_dog_vdi_resize(self, name, size): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'resize', name, - size, '-a', SHEEP_ADDR, '-p', SHEEP_PORT) - - def cmd_dog_vdi_list(self, name): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'vdi', 'list', name, - '-r', '-a', SHEEP_ADDR, '-p', SHEEP_PORT) - - def cmd_dog_node_info(self): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'info', - '-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r') - - def cmd_dog_node_list(self): - return ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'node', 'list', - '-a', SHEEP_ADDR, '-p', SHEEP_PORT, '-r') - - CMD_DOG_CLUSTER_INFO = ('env', 'LC_ALL=C', 'LANG=C', 'dog', 'cluster', - 'info', '-a', SHEEP_ADDR, '-p', SHEEP_PORT) - - TEST_VOL_DATA = { - 'size': 1, - 'id': fake.VOLUME_ID, - 'provider_auth': None, - 'host': 'host@backendsec#unit_test_pool', - 'project_id': fake.PROJECT_ID, - 'provider_location': 'location', - 'display_name': 'vol1', - 'display_description': 'unit test volume', - 'volume_type_id': None, - 'consistencygroup_id': None, - } - - TEST_CLONED_VOL_DATA = { - 'size': 2, - 'id': fake.VOLUME2_ID, - 'provider_auth': None, - 'host': 'host@backendsec#unit_test_pool', - 'project_id': fake.PROJECT_ID, - 'provider_location': 'location', - 'display_name': 'vol3', - 'display_description': 'unit test cloned volume', - 'volume_type_id': None, - 'consistencygroup_id': None, - } - - TEST_SNAPSHOT_DATA = { - 'id': fake.SNAPSHOT_ID, - } - - COLLIE_NODE_INFO = """ -0 107287605248 3623897354 3% -Total 107287605248 3623897354 3% 54760833024 -""" - - COLLIE_NODE_LIST = """ -0 127.0.0.1:7000 128 1 -""" - - COLLIE_VDI_LIST = """ -= testvolume 0 0 0 0 1467037106 fd32fc 3 -""" - - COLLIE_CLUSTER_INFO_0_5 = """\ -Cluster status: running - -Cluster created at Tue Jun 25 19:51:41 2013 - -Epoch Time Version -2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] -""" - - COLLIE_CLUSTER_INFO_0_6 = """\ -Cluster status: running, auto-recovery enabled - -Cluster created at Tue Jun 25 19:51:41 2013 - -Epoch Time Version -2013-06-25 19:51:41 1 [127.0.0.1:7000, 127.0.0.1:7001, 127.0.0.1:7002] -""" - - DOG_CLUSTER_RUNNING = """\ -Cluster status: running, auto-recovery enabled - -Cluster created at Thu Jun 18 17:24:56 2015 - -Epoch Time Version [Host:Port:V-Nodes,,,] -2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128,\ - 127.0.0.1:7002:128] -""" - - DOG_CLUSTER_INFO_TO_BE_FORMATTED = """\ -Cluster status: Waiting for cluster to be formatted -""" - - DOG_CLUSTER_INFO_WAITING_OTHER_NODES = """\ -Cluster status: Waiting for other nodes to join cluster - -Cluster created at Thu Jun 18 17:24:56 2015 - -Epoch Time Version [Host:Port:V-Nodes,,,] -2015-06-18 17:24:56 1 [127.0.0.1:7000:128, 127.0.0.1:7001:128] -""" - - DOG_CLUSTER_INFO_SHUTTING_DOWN = """\ -Cluster status: System is shutting down -""" - - DOG_VDI_CREATE_VDI_ALREADY_EXISTS = """\ -Failed to create VDI %(vdiname)s: VDI exists already -""" - - DOG_VDI_SNAPSHOT_VDI_NOT_FOUND = """\ -Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001: \ -No VDI found -""" - - DOG_VDI_SNAPSHOT_ALREADY_EXISTED = """\ -Failed to create snapshot for volume-00000000-0000-0000-0000-000000000001, \ -maybe snapshot id (0) or tag (snapshot-00000000-0000-0000-0000-000000000002) \ -is existed -""" - - DOG_VDI_SNAPSHOT_TAG_NOT_FOUND = """\ -Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \ -(snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \ -Failed to find requested tag -""" - - DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND = """\ -Failed to open VDI volume-00000000-0000-0000-0000-000000000001 \ -(snapshot id: 0 snapshot tag: snapshot-00000000-0000-0000-0000-000000000002): \ -No VDI found -""" - - DOG_VDI_RESIZE_SIZE_SHRINK = """\ -Shrinking VDIs is not implemented -""" - - DOG_VDI_RESIZE_TOO_LARGE = """\ -New VDI size is too large. This volume's max size is 4398046511104 -""" - - DOG_COMMAND_ERROR_VDI_NOT_EXISTS = """\ -Failed to open VDI %(vdiname)s (snapshot id: 0 snapshot tag: ): No VDI found -""" - - DOG_COMMAND_ERROR_FAIL_TO_CONNECT = """\ -failed to connect to 127.0.0.1:7000: Connection refused -failed to connect to 127.0.0.1:7000: Connection refused -Failed to get node list -""" - - QEMU_IMG_VDI_ALREADY_EXISTS = """\ -qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \ -VDI exists already, -""" - - QEMU_IMG_VDI_NOT_FOUND = """\ -qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \ -cannot get vdi info, No vdi found, \ -volume-00000000-0000-0000-0000-000000000001 \ -snapshot-00000000-0000-0000-0000-000000000002 -""" - - QEMU_IMG_SNAPSHOT_NOT_FOUND = """\ -qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000003: \ -cannot get vdi info, Failed to find the requested tag, \ -volume-00000000-0000-0000-0000-000000000001 \ -snapshot-00000000-0000-0000-0000-000000000002 -""" - - QEMU_IMG_SIZE_TOO_LARGE = """\ -qemu-img: sheepdog:volume-00000000-0000-0000-0000-000000000001: \ -An image is too large. The maximum image size is 4096GB -""" - - QEMU_IMG_FAILED_TO_CONNECT = """\ -qemu-img: sheepdog::volume-00000000-0000-0000-0000-000000000001: \ -Failed to connect socket: Connection refused -""" - - -class FakeImageService(object): - def download(self, context, image_id, path): - pass - - -class SheepdogIOWrapperTestCase(test.TestCase): - def setUp(self): - super(SheepdogIOWrapperTestCase, self).setUp() - self.volume = {'name': 'volume-2f9b2ff5-987b-4412-a91c-23caaf0d5aff'} - self.snapshot_name = 'snapshot-bf452d80-068a-43d7-ba9f-196cf47bd0be' - - self.vdi_wrapper = sheepdog.SheepdogIOWrapper( - SHEEP_ADDR, SHEEP_PORT, self.volume) - self.snapshot_wrapper = sheepdog.SheepdogIOWrapper( - SHEEP_ADDR, SHEEP_PORT, self.volume, self.snapshot_name) - - self.execute = mock.MagicMock() - self.mock_object(processutils, 'execute', self.execute) - - def test_init(self): - self.assertEqual(self.volume['name'], self.vdi_wrapper._vdiname) - self.assertIsNone(self.vdi_wrapper._snapshot_name) - self.assertEqual(0, self.vdi_wrapper._offset) - - self.assertEqual(self.snapshot_name, - self.snapshot_wrapper._snapshot_name) - - def test_execute(self): - cmd = ('cmd1', 'arg1') - data = 'data1' - - self.vdi_wrapper._execute(cmd, data) - - self.execute.assert_called_once_with(*cmd, process_input=data) - - def test_execute_error(self): - cmd = ('cmd1', 'arg1') - data = 'data1' - self.mock_object(processutils, 'execute', - mock.MagicMock(side_effect=OSError)) - - args = (cmd, data) - self.assertRaises(exception.VolumeDriverException, - self.vdi_wrapper._execute, - *args) - - def test_read_vdi(self): - self.vdi_wrapper.read() - self.execute.assert_called_once_with( - 'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, - self.volume['name'], 0, process_input=None) - - def test_read_vdi_invalid(self): - self.vdi_wrapper._valid = False - self.assertRaises(exception.VolumeDriverException, - self.vdi_wrapper.read) - - def test_write_vdi(self): - data = 'data1' - - self.vdi_wrapper.write(data) - - self.execute.assert_called_once_with( - 'dog', 'vdi', 'write', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, - self.volume['name'], 0, len(data), - process_input=data) - self.assertEqual(len(data), self.vdi_wrapper.tell()) - - def test_write_vdi_invalid(self): - self.vdi_wrapper._valid = False - self.assertRaises(exception.VolumeDriverException, - self.vdi_wrapper.write, 'dummy_data') - - def test_read_snapshot(self): - self.snapshot_wrapper.read() - self.execute.assert_called_once_with( - 'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT, - '-s', self.snapshot_name, self.volume['name'], 0, - process_input=None) - - def test_seek(self): - self.vdi_wrapper.seek(12345) - self.assertEqual(12345, self.vdi_wrapper.tell()) - - self.vdi_wrapper.seek(-2345, whence=1) - self.assertEqual(10000, self.vdi_wrapper.tell()) - - # This results in negative offset. - self.assertRaises(IOError, self.vdi_wrapper.seek, -20000, whence=1) - - def test_seek_invalid(self): - seek_num = 12345 - self.vdi_wrapper._valid = False - self.assertRaises(exception.VolumeDriverException, - self.vdi_wrapper.seek, seek_num) - - def test_flush(self): - # flush does nothing. - self.vdi_wrapper.flush() - self.assertFalse(self.execute.called) - - def test_fileno(self): - self.assertRaises(IOError, self.vdi_wrapper.fileno) - - -class SheepdogClientTestCase(test.TestCase): - def setUp(self): - super(SheepdogClientTestCase, self).setUp() - self._cfg = conf.Configuration(None) - self._cfg.sheepdog_store_address = SHEEP_ADDR - self._cfg.sheepdog_store_port = SHEEP_PORT - self.driver = sheepdog.SheepdogDriver(configuration=self._cfg) - db_driver = self.driver.configuration.db_driver - self.db = importutils.import_module(db_driver) - self.driver.db = self.db - self.driver.do_setup(None) - self.test_data = SheepdogDriverTestDataGenerator() - node_list = [SHEEP_ADDR] - self.client = sheepdog.SheepdogClient(node_list, SHEEP_PORT) - self._addr = SHEEP_ADDR - self._port = SHEEP_PORT - self._vdiname = self.test_data.TEST_VOLUME.name - self._vdisize = self.test_data.TEST_VOLUME.size - self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name - self._snapname = self.test_data.TEST_SNAPSHOT.name - self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name - self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size - - @mock.patch.object(utils, 'execute') - def test_run_dog_success(self, fake_execute): - args = ('cluster', 'info') - expected_cmd = self.test_data.CMD_DOG_CLUSTER_INFO - fake_execute.return_value = ('', '') - self.client._run_dog(*args) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_dog_command_not_found(self, fake_logger, fake_execute): - args = ('cluster', 'info') - expected_msg = 'No such file or directory' - expected_errno = errno.ENOENT - fake_execute.side_effect = OSError(expected_errno, expected_msg) - self.assertRaises(OSError, self.client._run_dog, *args) - self.assertTrue(fake_logger.error.called) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_dog_operation_not_permitted(self, fake_logger, fake_execute): - args = ('cluster', 'info') - expected_msg = 'Operation not permitted' - expected_errno = errno.EPERM - fake_execute.side_effect = OSError(expected_errno, expected_msg) - self.assertRaises(OSError, self.client._run_dog, *args) - self.assertTrue(fake_logger.error.called) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_dog_fail_to_connect(self, fake_logger, fake_execute): - args = ('cluster', 'info') - cmd = self.test_data.CMD_DOG_CLUSTER_INFO - exit_code = 2 - stdout = 'stdout dummy' - stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT - expected_reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) - fake_execute.side_effect = processutils.ProcessExecutionError( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client._run_dog, *args) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_dog_fail_to_connect_bugcase(self, fake_logger, fake_execute): - # NOTE(zhangsong): Sheepdog's bug case. - # details are written to Sheepdog driver code. - args = ('node', 'list') - stdout = '' - stderr = self.test_data.DOG_COMMAND_ERROR_FAIL_TO_CONNECT - expected_reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) - fake_execute.return_value = (stdout, stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client._run_dog, *args) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_dog_unknown_error(self, fake_logger, fake_execute): - args = ('cluster', 'info') - cmd = self.test_data.CMD_DOG_CLUSTER_INFO - exit_code = 1 - stdout = 'stdout dummy' - stderr = 'stderr dummy' - expected_msg = self.test_data.sheepdog_cmd_error( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - fake_execute.side_effect = processutils.ProcessExecutionError( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client._run_dog, *args) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(utils, 'execute') - def test_run_qemu_img_success(self, fake_execute): - # multiple part of args match the prefix and - # volume name is matched the prefix unfortunately - expected_cmd = ('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'create', '-b', - 'sheepdog:%(addr)s:%(port)s:sheepdog:snap' % - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}, - 'sheepdog:%(addr)s:%(port)s:clone' % - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}, '10G') - fake_execute.return_value = ('', '') - self.client._run_qemu_img('create', '-b', 'sheepdog:sheepdog:snap', - 'sheepdog:clone', '10G') - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_qemu_img_command_not_found(self, fake_logger, fake_execute): - args = ('create', 'dummy') - expected_msg = 'No such file or directory' - expected_errno = errno.ENOENT - fake_execute.side_effect = OSError(expected_errno, expected_msg) - self.assertRaises(OSError, self.client._run_qemu_img, *args) - self.assertTrue(fake_logger.error.called) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_qemu_img_unknown_os_error(self, fake_logger, fake_execute): - args = ('create', 'dummy') - expected_msg = 'unknown' - expected_errno = errno.EPERM - fake_execute.side_effect = OSError(expected_errno, expected_msg) - self.assertRaises(OSError, self.client._run_qemu_img, *args) - self.assertTrue(fake_logger.error.called) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_qemu_img_fail_to_connect(self, fake_logger, fake_execute): - args = ('create', 'dummy') - cmd = ('qemu-img', 'create', 'dummy') - exit_code = 1 - stdout = 'stdout dummy' - stderr = self.test_data.QEMU_IMG_FAILED_TO_CONNECT - expected_reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': SHEEP_ADDR, 'port': SHEEP_PORT}) - fake_execute.side_effect = processutils.ProcessExecutionError( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client._run_qemu_img, *args) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(utils, 'execute') - @mock.patch.object(sheepdog, 'LOG') - def test_run_qemu_img_unknown_execution_error(self, fake_logger, - fake_execute): - args = ('create', 'dummy') - cmd = ('qemu-img', 'create', 'dummy') - exit_code = 1 - stdout = 'stdout dummy' - stderr = 'stderr dummy' - expected_msg = self.test_data.sheepdog_cmd_error( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - fake_execute.side_effect = processutils.ProcessExecutionError( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client._run_qemu_img, *args) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_check_cluster_status_success(self, fake_logger, fake_execute): - stdout = self.test_data.DOG_CLUSTER_RUNNING - stderr = '' - expected_cmd = ('cluster', 'info') - fake_execute.return_value = (stdout, stderr) - self.client.check_cluster_status() - fake_execute.assert_called_once_with(*expected_cmd) - self.assertTrue(fake_logger.debug.called) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_check_cluster_status_v0_5(self, fake_execute): - stdout = self.test_data.COLLIE_CLUSTER_INFO_0_5 - stderr = '' - fake_execute.return_value = (stdout, stderr) - self.client.check_cluster_status() - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_check_cluster_status_v0_6(self, fake_execute): - stdout = self.test_data.COLLIE_CLUSTER_INFO_0_6 - stderr = '' - fake_execute.return_value = (stdout, stderr) - self.client.check_cluster_status() - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_check_cluster_status_not_formatted(self, fake_logger, - fake_execute): - stdout = self.test_data.DOG_CLUSTER_INFO_TO_BE_FORMATTED - stderr = '' - expected_reason = _('Cluster is not formatted. ' - 'You should probably perform ' - '"dog cluster format".') - fake_execute.return_value = (stdout, stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client.check_cluster_status) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_check_cluster_status_waiting_to_join_cluster(self, fake_logger, - fake_execute): - stdout = self.test_data.DOG_CLUSTER_INFO_WAITING_OTHER_NODES - stderr = '' - expected_reason = _('Waiting for all nodes to join cluster. ' - 'Ensure all sheep daemons are running.') - fake_execute.return_value = (stdout, stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client.check_cluster_status) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_check_cluster_status_shutting_down(self, fake_logger, - fake_execute): - stdout = self.test_data.DOG_CLUSTER_INFO_SHUTTING_DOWN - stderr = '' - expected_reason = _('Invalid sheepdog cluster status.') - fake_execute.return_value = (stdout, stderr) - ex = self.assertRaises(exception.SheepdogError, - self.client.check_cluster_status) - self.assertEqual(expected_reason, ex.kwargs['reason']) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_check_cluster_status_unknown_error(self, fake_logger, - fake_execute): - cmd = self.test_data.CMD_DOG_CLUSTER_INFO - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stdout_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.check_cluster_status) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_create_success(self, fake_execute): - expected_cmd = ('vdi', 'create', self._vdiname, '%sG' % self._vdisize) - fake_execute.return_value = ('', '') - self.client.create(self._vdiname, self._vdisize) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_create_vdi_already_exists(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize) - exit_code = 1 - stdout = '' - stderr = (self.test_data.DOG_VDI_CREATE_VDI_ALREADY_EXISTS % - {'vdiname': self._vdiname}) - expected_msg = self.test_data.sheepdog_cmd_error( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.create, - self._vdiname, self._vdisize) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_create_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_create(self._vdiname, self._vdisize) - exit_code = 1 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error( - cmd=cmd, exit_code=exit_code, stdout=stdout, stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.create, - self._vdiname, self._vdisize) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_delete_success(self, fake_execute): - expected_cmd = ('vdi', 'delete', self._vdiname) - fake_execute.return_value = ('', '') - self.client.delete(self._vdiname) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_vdi_not_found(self, fake_logger, fake_execute): - stdout = '' - stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS % - {'vdiname': self._vdiname}) - fake_execute.return_value = (stdout, stderr) - self.client.delete(self._vdiname) - self.assertTrue(fake_logger.warning.called) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_delete(self._vdiname) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.delete, self._vdiname) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_create_snapshot_success(self, fake_execute): - args = (self._src_vdiname, self._snapname) - expected_cmd = ('vdi', 'snapshot', '-s', self._snapname, - self._src_vdiname) - fake_execute.return_value = ('', '') - self.client.create_snapshot(*args) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_create_snapshot_vdi_not_found(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) - exit_code = 1 - stdout = '' - stderr = self.test_data.DOG_VDI_SNAPSHOT_VDI_NOT_FOUND - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.create_snapshot, *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_create_snapshot_snap_name_already_used(self, fake_logger, - fake_execute): - args = (self._src_vdiname, self._snapname) - cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) - exit_code = 1 - stdout = 'stdout_dummy' - stderr = self.test_data.DOG_VDI_SNAPSHOT_ALREADY_EXISTED - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.create_snapshot, *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_create_snapshot_unknown_error(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - cmd = self.test_data.cmd_dog_vdi_create_snapshot(*args) - exit_code = 1 - stdout = 'stdout_dummy' - stderr = 'unknown_error' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.create_snapshot, *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_snapshot_success(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - expected_cmd = ('vdi', 'delete', '-s', self._snapname, - self._src_vdiname) - fake_execute.return_value = ('', '') - self.client.delete_snapshot(*args) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_snapshot_not_found(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - stdout = '' - stderr = self.test_data.DOG_VDI_SNAPSHOT_TAG_NOT_FOUND - fake_execute.return_value = (stdout, stderr) - self.client.delete_snapshot(*args) - self.assertTrue(fake_logger.warning.called) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_snapshot_vdi_not_found(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - stdout = '' - stderr = self.test_data.DOG_VDI_SNAPSHOT_VOLUME_NOT_FOUND - fake_execute.return_value = (stdout, stderr) - self.client.delete_snapshot(*args) - self.assertTrue(fake_logger.warning.called) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_delete_snapshot_unknown_error(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname) - cmd = self.test_data.cmd_dog_vdi_delete_snapshot(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'unknown_error' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.delete_snapshot, *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - def test_clone_success(self, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - src_volume = 'sheepdog:%(src_vdiname)s:%(snapname)s' % { - 'src_vdiname': self._src_vdiname, 'snapname': self._snapname} - dst_volume = 'sheepdog:%s' % self._dst_vdiname - expected_cmd = ('create', '-b', src_volume, dst_volume, - '%sG' % self._dst_vdisize) - fake_execute.return_code = ("", "") - self.client.clone(*args) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - @mock.patch.object(sheepdog, 'LOG') - def test_clone_dst_vdi_already_exists(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = self.test_data.QEMU_IMG_VDI_ALREADY_EXISTS - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, - *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - @mock.patch.object(sheepdog, 'LOG') - def test_clone_src_vdi_not_found(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = self.test_data.QEMU_IMG_VDI_NOT_FOUND - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, - *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - @mock.patch.object(sheepdog, 'LOG') - def test_clone_src_snapshot_not_found(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = self.test_data.QEMU_IMG_SNAPSHOT_NOT_FOUND - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, - *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - @mock.patch.object(sheepdog, 'LOG') - def test_clone_too_large_volume_size(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = self.test_data.QEMU_IMG_SIZE_TOO_LARGE - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, - *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_qemu_img') - @mock.patch.object(sheepdog, 'LOG') - def test_clone_unknown_error(self, fake_logger, fake_execute): - args = (self._src_vdiname, self._snapname, - self._dst_vdiname, self._dst_vdisize) - cmd = self.test_data.cmd_qemuimg_vdi_clone(*args) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, self.client.clone, - *args) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_resize_success(self, fake_execute): - expected_cmd = ('vdi', 'resize', self._vdiname, 10 * 1024 ** 3) - fake_execute.return_value = ('', '') - self.client.resize(self._vdiname, 10) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_resize_vdi_not_found(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3) - exit_code = 1 - stdout = 'stdout_dummy' - stderr = (self.test_data.DOG_COMMAND_ERROR_VDI_NOT_EXISTS % - {'vdiname': self._vdiname}) - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.resize, self._vdiname, 1) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_resize_shrinking_not_supported(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 1 * 1024 ** 3) - exit_code = 1 - stdout = 'stdout_dummy' - stderr = self.test_data.DOG_VDI_RESIZE_SIZE_SHRINK - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.resize, self._vdiname, 1) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_resize_too_large_size(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 5 * 1024 ** 4) - exit_code = 64 - stdout = 'stdout_dummy' - stderr = self.test_data.DOG_VDI_RESIZE_TOO_LARGE - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.resize, self._vdiname, 5120) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_resize_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_resize(self._vdiname, 10 * 1024 ** 3) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.resize, self._vdiname, 10) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_get_volume_stats_success(self, fake_execute): - expected_cmd = ('node', 'info', '-r') - fake_execute.return_value = (self.test_data.COLLIE_NODE_INFO, '') - self.client.get_volume_stats() - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_get_volume_stats_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_node_info() - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.get_volume_stats) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_get_vdi_info_success(self, fake_execute): - - expected_cmd = ('vdi', 'list', self._vdiname, '-r') - fake_execute.return_value = (self.test_data.COLLIE_VDI_LIST, '') - self.client.get_vdi_info(self._vdiname) - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_get_vdi_info_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_vdi_list(self._vdiname) - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.get_vdi_info, self._vdiname) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_update_node_list_success(self, fake_execute): - expected_cmd = ('node', 'list', '-r') - fake_execute.return_value = (self.test_data.COLLIE_NODE_LIST, '') - self.client.update_node_list() - fake_execute.assert_called_once_with(*expected_cmd) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - @mock.patch.object(sheepdog, 'LOG') - def test_update_node_list_unknown_error(self, fake_logger, fake_execute): - cmd = self.test_data.cmd_dog_node_list() - exit_code = 2 - stdout = 'stdout_dummy' - stderr = 'stderr_dummy' - expected_msg = self.test_data.sheepdog_cmd_error(cmd=cmd, - exit_code=exit_code, - stdout=stdout, - stderr=stderr) - fake_execute.side_effect = exception.SheepdogCmdError( - cmd=cmd, exit_code=exit_code, stdout=stdout.replace('\n', '\\n'), - stderr=stderr.replace('\n', '\\n')) - ex = self.assertRaises(exception.SheepdogCmdError, - self.client.update_node_list) - self.assertTrue(fake_logger.error.called) - self.assertEqual(expected_msg, ex.msg) - - -class SheepdogDriverTestCase(test.TestCase): - def setUp(self): - super(SheepdogDriverTestCase, self).setUp() - self._cfg = conf.Configuration(None) - self._cfg.sheepdog_store_address = SHEEP_ADDR - self._cfg.sheepdog_store_port = SHEEP_PORT - self.driver = sheepdog.SheepdogDriver(configuration=self._cfg) - db_driver = self.driver.configuration.db_driver - self.db = importutils.import_module(db_driver) - self.driver.db = self.db - self.driver.do_setup(None) - self.test_data = SheepdogDriverTestDataGenerator() - self.client = self.driver.client - self._addr = SHEEP_ADDR - self._port = SHEEP_PORT - self._vdiname = self.test_data.TEST_VOLUME.name - self._vdisize = self.test_data.TEST_VOLUME.size - self._src_vdiname = self.test_data.TEST_SNAPSHOT.volume_name - self._snapname = self.test_data.TEST_SNAPSHOT.name - self._dst_vdiname = self.test_data.TEST_CLONED_VOLUME.name - self._dst_vdisize = self.test_data.TEST_CLONED_VOLUME.size - - @mock.patch.object(sheepdog.SheepdogClient, 'update_node_list') - @mock.patch.object(sheepdog.SheepdogClient, 'check_cluster_status') - def test_check_for_setup_error(self, fake_check, fake_update): - self.driver.check_for_setup_error() - fake_check.assert_called_once_with() - fake_update.assert_called_once_with() - - @mock.patch.object(sheepdog.SheepdogClient, 'create') - def test_create_volume(self, fake_execute): - self.driver.create_volume(self.test_data.TEST_VOLUME) - fake_execute.assert_called_once_with(self._vdiname, self._vdisize) - - @mock.patch.object(sheepdog.SheepdogClient, 'delete') - def test_delete_volume(self, fake_execute): - self.driver.delete_volume(self.test_data.TEST_VOLUME) - fake_execute.assert_called_once_with(self._vdiname) - - @mock.patch.object(sheepdog.SheepdogClient, 'get_volume_stats') - def test_update_volume_stats(self, fake_execute): - fake_execute.return_value = self.test_data.COLLIE_NODE_INFO - expected = dict( - volume_backend_name='sheepdog', - vendor_name='Open Source', - driver_version=self.driver.VERSION, - storage_protocol='sheepdog', - total_capacity_gb=float(107287605248) / units.Gi, - free_capacity_gb=float(107287605248 - 3623897354) / units.Gi, - reserved_percentage=0, - QoS_support=False) - actual = self.driver.get_volume_stats(True) - self.assertDictEqual(expected, actual) - - @mock.patch.object(sheepdog.SheepdogClient, '_run_dog') - def test_copy_image_to_volume(self, fake_run_dog): - @contextlib.contextmanager - def fake_temp_file(): - class FakeTmp(object): - def __init__(self, name): - self.name = name - yield FakeTmp('test').name - - def fake_try_execute(obj, *command, **kwargs): - return True - - self.mock_object(image_utils, 'temporary_file', fake_temp_file) - self.mock_object(image_utils, 'fetch_verify_image', - return_value=None) - self.mock_object(image_utils, 'convert_image', - return_value=None) - self.mock_object(sheepdog.SheepdogDriver, '_try_execute', - fake_try_execute) - fake_run_dog.return_value = ('fake_stdout', 'fake_stderr') - self.driver.copy_image_to_volume(None, self.test_data.TEST_VOLUME, - FakeImageService(), None) - - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.temporary_file') - def test_copy_volume_to_image(self, mock_temp, mock_open): - fake_context = {} - fake_volume = {'name': 'volume-00000001'} - fake_image_service = mock.Mock() - fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} - temp_file = mock_temp.return_value.__enter__.return_value - - patch = mock.patch.object - with patch(self.driver, '_try_execute') as fake_try_execute: - self.driver.copy_volume_to_image(fake_context, - fake_volume, - fake_image_service, - fake_image_meta) - - expected_cmd = ('qemu-img', - 'convert', - '-f', 'raw', - '-t', 'none', - '-O', 'raw', - 'sheepdog:%s:%s:%s' % ( - self._addr, - self._port, - fake_volume['name']), - mock.ANY) - mock_open.assert_called_once_with(temp_file, 'rb') - fake_try_execute.assert_called_once_with(*expected_cmd) - fake_image_service.update.assert_called_once_with( - fake_context, fake_image_meta['id'], mock.ANY, mock.ANY) - - @mock.patch('six.moves.builtins.open') - @mock.patch('cinder.image.image_utils.temporary_file') - def test_copy_volume_to_image_nonexistent_volume(self, mock_temp, - mock_open): - fake_context = {} - fake_volume = { - 'name': 'nonexistent-volume-82c4539e-c2a5-11e4-a293-0aa186c60fe0'} - fake_image_service = mock.Mock() - fake_image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} - - patch = mock.patch.object - with patch(self.driver, '_try_execute') as fake_try_execute: - fake_try_execute.side_effect = ( - processutils.ProcessExecutionError) - args = (fake_context, fake_volume, fake_image_service, - fake_image_meta) - expected_cmd = ('qemu-img', - 'convert', - '-f', 'raw', - '-t', 'none', - '-O', 'raw', - 'sheepdog:%s:%s:%s' % ( - self._addr, - self._port, - fake_volume['name']), - mock.ANY) - - self.assertRaises(processutils.ProcessExecutionError, - self.driver.copy_volume_to_image, - *args) - - fake_try_execute.assert_called_once_with(*expected_cmd) - mock_open.assert_not_called() - fake_image_service.update.assert_not_called() - - @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') - @mock.patch.object(sheepdog.SheepdogClient, 'clone') - @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') - def test_create_cloned_volume(self, fake_delete_snapshot, - fake_clone, fake_create_snapshot): - src_vol = self.test_data.TEST_VOLUME - cloned_vol = self.test_data.TEST_CLONED_VOLUME - - self.driver.create_cloned_volume(cloned_vol, src_vol) - snapshot_name = 'tmp-snap-%s-%s' % (src_vol.name, cloned_vol.id) - fake_create_snapshot.assert_called_once_with(src_vol.name, - snapshot_name) - fake_clone.assert_called_once_with(src_vol.name, snapshot_name, - cloned_vol.name, cloned_vol.size) - fake_delete_snapshot.assert_called_once_with(src_vol.name, - snapshot_name) - - @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') - @mock.patch.object(sheepdog.SheepdogClient, 'clone') - @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') - @mock.patch.object(sheepdog, 'LOG') - def test_create_cloned_volume_failure(self, fake_logger, - fake_delete_snapshot, - fake_clone, fake_create_snapshot): - src_vol = self.test_data.TEST_VOLUME - cloned_vol = self.test_data.TEST_CLONED_VOLUME - snapshot_name = 'tmp-snap-%s-%s' % (src_vol.name, cloned_vol.id) - - fake_clone.side_effect = exception.SheepdogCmdError( - cmd='dummy', exit_code=1, stdout='dummy', stderr='dummy') - self.assertRaises(exception.SheepdogCmdError, - self.driver.create_cloned_volume, - cloned_vol, src_vol) - fake_delete_snapshot.assert_called_once_with(src_vol.name, - snapshot_name) - self.assertTrue(fake_logger.error.called) - - @mock.patch.object(sheepdog.SheepdogClient, 'create_snapshot') - def test_create_snapshot(self, fake_create_snapshot): - snapshot = self.test_data.TEST_SNAPSHOT - self.driver.create_snapshot(snapshot) - fake_create_snapshot.assert_called_once_with(snapshot.volume_name, - snapshot.name) - - @mock.patch.object(sheepdog.SheepdogClient, 'delete_snapshot') - def test_delete_snapshot(self, fake_delete_snapshot): - snapshot = self.test_data.TEST_SNAPSHOT - self.driver.delete_snapshot(snapshot) - fake_delete_snapshot.assert_called_once_with(snapshot.volume_name, - snapshot.name) - - def test_clone_image_success(self): - context = {} - image_id = "caa4ffd0-fake-fake-fake-f8631a807f5a" - image_location = ('sheepdog://192.168.1.111:7000:%s' % image_id, None) - image_meta = {'id': image_id, 'size': 1, 'disk_format': 'raw'} - image_service = '' - - patch = mock.patch.object - with patch(self.driver, '_is_cloneable', return_value=True): - with patch(self.driver, 'create_cloned_volume'): - with patch(self.client, 'resize'): - model_updated, cloned = self.driver.clone_image( - context, self.test_data.TEST_CLONED_VOLUME, - image_location, image_meta, image_service) - - self.assertTrue(cloned) - self.assertEqual("sheepdog:%s:%s:%s" % ( - self._addr, - self._port, - self.test_data.TEST_CLONED_VOLUME.name), - model_updated['provider_location']) - - def test_clone_image_failure(self): - context = {} - fake_vol = {} - image_location = ('image_location', None) - image_meta = {} - image_service = '' - - with mock.patch.object(self.driver, '_is_cloneable', - lambda *args: False): - result = self.driver.clone_image( - context, fake_vol, image_location, image_meta, image_service) - self.assertEqual(({}, False), result) - - def test_is_cloneable(self): - uuid = '87f1b01c-f46c-4537-bd5d-23962f5f4316' - location = 'sheepdog://127.0.0.1:7000:%s' % uuid - image_meta = {'id': uuid, 'size': 1, 'disk_format': 'raw'} - invalid_image_meta = {'id': uuid, 'size': 1, 'disk_format': 'iso'} - - with mock.patch.object(self.client, 'get_vdi_info') as fake_execute: - fake_execute.return_value = self.test_data.COLLIE_VDI_LIST - self.assertTrue( - self.driver._is_cloneable(location, image_meta)) - - # Test for invalid location - self.assertFalse( - self.driver._is_cloneable('invalid-location', image_meta)) - - # Test for image not exist in sheepdog cluster - fake_execute.return_value = '' - self.assertFalse( - self.driver._is_cloneable(location, image_meta)) - - # Test for invalid image meta - self.assertFalse( - self.driver._is_cloneable(location, invalid_image_meta)) - - def test_create_volume_from_snapshot(self): - dst_volume = self.test_data.TEST_CLONED_VOLUME - snapshot = self.test_data.TEST_SNAPSHOT - with mock.patch.object(self.client, 'clone') as fake_execute: - self.driver.create_volume_from_snapshot(dst_volume, snapshot) - fake_execute.assert_called_once_with(self._src_vdiname, - self._snapname, - self._dst_vdiname, - self._dst_vdisize) - - def test_initialize_connection(self): - fake_volume = self.test_data.TEST_VOLUME - expected = { - 'driver_volume_type': 'sheepdog', - 'data': { - 'name': fake_volume.name, - 'hosts': ["127.0.0.1"], - 'ports': ["7000"], - } - } - actual = self.driver.initialize_connection(fake_volume, None) - self.assertDictEqual(expected, actual) - - @mock.patch.object(sheepdog.SheepdogClient, 'resize') - @mock.patch.object(sheepdog, 'LOG') - def test_extend_volume(self, fake_logger, fake_execute): - self.driver.extend_volume(self.test_data.TEST_VOLUME, 10) - fake_execute.assert_called_once_with(self._vdiname, 10) - self.assertTrue(fake_logger.debug.called) diff --git a/cinder/tests/unit/volume/drivers/test_tegile.py b/cinder/tests/unit/volume/drivers/test_tegile.py deleted file mode 100644 index 6a92fba3c..000000000 --- a/cinder/tests/unit/volume/drivers/test_tegile.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright (c) 2015 by Tegile Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver Test for Tegile storage. -""" - -import mock - -from cinder import context -from cinder.exception import TegileAPIException -from cinder import test -from cinder.volume.drivers import tegile - -BASE_DRIVER = tegile.TegileIntelliFlashVolumeDriver -ISCSI_DRIVER = tegile.TegileISCSIDriver -FC_DRIVER = tegile.TegileFCDriver - -test_config = mock.Mock() -test_config.san_ip = 'some-ip' -test_config.san_login = 'some-user' -test_config.san_password = 'some-password' -test_config.san_is_local = True -test_config.tegile_default_pool = 'random-pool' -test_config.tegile_default_project = 'random-project' -test_config.volume_backend_name = "unittest" - -test_volume = {'host': 'node#testPool', - 'name': 'testvol', - 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', - '_name_id': 'testvol', - 'metadata': {'project': 'testProj'}, - 'provider_location': None, - 'size': 10} - -test_snapshot = {'name': 'testSnap', - 'id': '07ae9978-5445-405e-8881-28f2adfee732', - 'volume': {'host': 'node#testPool', - 'size': 1, - '_name_id': 'testvol' - } - } - -array_stats = {'total_capacity_gb': 4569.199686084874, - 'free_capacity_gb': 4565.381390112452, - 'pools': [{'total_capacity_gb': 913.5, - 'QoS_support': False, - 'free_capacity_gb': 911.812650680542, - 'reserved_percentage': 0, - 'pool_name': 'pyramid' - }, - {'total_capacity_gb': 2742.1996604874, - 'QoS_support': False, - 'free_capacity_gb': 2740.148867149747, - 'reserved_percentage': 0, - 'pool_name': 'cobalt' - }, - {'total_capacity_gb': 913.5, - 'QoS_support': False, - 'free_capacity_gb': 913.4198722839355, - 'reserved_percentage': 0, - 'pool_name': 'test' - }] - } - - -class FakeTegileService(object): - @staticmethod - def send_api_request(method, params=None, - request_type='post', - api_service='v2', - fine_logging=False): - if method is 'createVolume': - return '' - elif method is 'deleteVolume': - return '' - elif method is 'createVolumeSnapshot': - return '' - elif method is 'deleteVolumeSnapshot': - return '' - elif method is 'cloneVolumeSnapshot': - return '' - elif method is 'listPools': - return '' - elif method is 'resizeVolume': - return '' - elif method is 'getVolumeSizeinGB': - return 25 - elif method is 'getISCSIMappingForVolume': - return {'target_lun': 27, - 'target_iqn': 'iqn.2012-02.com.tegile:openstack-cobalt', - 'target_portal': '10.68.103.106:3260' - } - elif method is 'getFCPortsForVolume': - return {'target_lun': 12, - 'initiator_target_map': - '{"21000024ff59bb6e":["21000024ff578701",],' - '"21000024ff59bb6f":["21000024ff578700",],}', - 'target_wwn': '["21000024ff578700","21000024ff578701",]'} - elif method is 'getArrayStats': - return array_stats - - -fake_tegile_backend = FakeTegileService() - - -class FakeTegileServiceFail(object): - @staticmethod - def send_api_request(method, params=None, - request_type='post', - api_service='v2', - fine_logging=False): - raise TegileAPIException - - -fake_tegile_backend_fail = FakeTegileServiceFail() - - -class TegileIntelliFlashVolumeDriverTestCase(test.TestCase): - def setUp(self): - self.ctxt = context.get_admin_context() - self.configuration = test_config - super(TegileIntelliFlashVolumeDriverTestCase, self).setUp() - - def test_create_volume(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({ - 'metadata': {'pool': 'testPool', - 'project': test_config.tegile_default_project - } - }, tegile_driver.create_volume(test_volume)) - - def test_create_volume_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.create_volume, - test_volume) - - def test_delete_volume(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - tegile_driver.delete_volume(test_volume) - - def test_delete_volume_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.delete_volume, - test_volume) - - def test_create_snapshot(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - tegile_driver.create_snapshot(test_snapshot) - - def test_create_snapshot_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.create_snapshot, - test_snapshot) - - def test_delete_snapshot(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - tegile_driver.delete_snapshot(test_snapshot) - - def test_delete_snapshot_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.delete_snapshot, - test_snapshot) - - def test_create_volume_from_snapshot(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({ - 'metadata': {'pool': 'testPool', - 'project': test_config.tegile_default_project - } - }, tegile_driver.create_volume_from_snapshot(test_volume, - test_snapshot)) - - def test_create_volume_from_snapshot_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.create_volume_from_snapshot, - test_volume, test_snapshot) - - def test_create_cloned_volume(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({'metadata': {'project': 'testProj', - 'pool': 'testPool'}}, - tegile_driver.create_cloned_volume(test_volume, - test_volume)) - - def test_create_cloned_volume_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.create_cloned_volume, - test_volume, test_volume) - - def test_get_volume_stats(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({'driver_version': '1.0.0', - 'free_capacity_gb': 4565.381390112452, - 'pools': [{'QoS_support': False, - 'allocated_capacity_gb': 0.0, - 'free_capacity_gb': 911.812650680542, - 'pool_name': 'pyramid', - 'reserved_percentage': 0, - 'total_capacity_gb': 913.5}, - {'QoS_support': False, - 'allocated_capacity_gb': 0.0, - 'free_capacity_gb': 2740.148867149747, - 'pool_name': 'cobalt', - 'reserved_percentage': 0, - 'total_capacity_gb': 2742.1996604874}, - {'QoS_support': False, - 'allocated_capacity_gb': 0.0, - 'free_capacity_gb': 913.4198722839355, - 'pool_name': 'test', - 'reserved_percentage': 0, - 'total_capacity_gb': 913.5}], - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': 4569.199686084874, - 'vendor_name': 'Tegile Systems Inc.', - 'volume_backend_name': 'unittest'}, - tegile_driver.get_volume_stats(True)) - - def test_get_pool(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual('testPool', tegile_driver.get_pool(test_volume)) - - def test_extend_volume(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - tegile_driver.extend_volume(test_volume, 12) - - def test_extend_volume_fail(self): - tegile_driver = self.get_object(self.configuration) - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.extend_volume, - test_volume, 30) - - def test_manage_existing(self): - tegile_driver = self.get_object(self.configuration) - existing_ref = {'name': 'existingvol'} - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({'metadata': {'pool': 'testPool', - 'project': 'testProj' - }, - '_name_id': ('existingvol',) - }, tegile_driver.manage_existing(test_volume, - existing_ref)) - - def test_manage_existing_get_size(self): - tegile_driver = self.get_object(self.configuration) - existing_ref = {'name': 'existingvol'} - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual(25, - tegile_driver.manage_existing_get_size( - test_volume, - existing_ref)) - - def test_manage_existing_get_size_fail(self): - tegile_driver = self.get_object(self.configuration) - existing_ref = {'name': 'existingvol'} - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend_fail): - self.assertRaises(TegileAPIException, - tegile_driver.manage_existing_get_size, - test_volume, existing_ref) - - def get_object(self, configuration): - class TegileBaseDriver(BASE_DRIVER): - def initialize_connection(self, volume, connector, **kwargs): - pass - - def terminate_connection(self, volume, connector, - force=False, **kwargs): - pass - - return TegileBaseDriver(configuration=self.configuration) - - -class TegileISCSIDriverTestCase(test.TestCase): - def setUp(self): - super(TegileISCSIDriverTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.configuration = test_config - self.configuration.chap_username = 'fake' - self.configuration.chap_password = "test" - - def test_initialize_connection(self): - tegile_driver = self.get_object(self.configuration) - connector = {'initiator': 'iqn.1993-08.org.debian:01:d0bb9a834f8'} - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual( - {'data': {'auth_method': 'CHAP', - 'discard': False, - 'target_discovered': (False,), - 'auth_password': 'test', - 'auth_username': 'fake', - 'target_iqn': 'iqn.2012-02.' - 'com.tegile:openstack-cobalt', - 'target_lun': 27, - 'target_portal': '10.68.103.106:3260', - 'volume_id': ( - 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e',)}, - 'driver_volume_type': 'iscsi'}, - tegile_driver.initialize_connection(test_volume, - connector)) - - def get_object(self, configuration): - return ISCSI_DRIVER(configuration=configuration) - - -class TegileFCDriverTestCase(test.TestCase): - def setUp(self): - super(TegileFCDriverTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.configuration = test_config - - def test_initialize_connection(self): - tegile_driver = self.get_object(self.configuration) - connector = {'wwpns': ['500110a0001a3990']} - with mock.patch.object(tegile_driver, - '_api_executor', - fake_tegile_backend): - self.assertEqual({'data': {'encrypted': False, - 'initiator_target_map': { - '21000024ff59bb6e': - ['21000024ff578701'], - '21000024ff59bb6f': - ['21000024ff578700'] - }, - 'target_discovered': False, - 'target_lun': 12, - 'target_wwn': - ['21000024ff578700', - '21000024ff578701']}, - 'driver_volume_type': 'fibre_channel'}, - tegile_driver.initialize_connection( - test_volume, - connector)) - - def get_object(self, configuration): - return FC_DRIVER(configuration=configuration) diff --git a/cinder/tests/unit/volume/drivers/test_tintri.py b/cinder/tests/unit/volume/drivers/test_tintri.py deleted file mode 100644 index 6d5533827..000000000 --- a/cinder/tests/unit/volume/drivers/test_tintri.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (c) 2015 Tintri. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver test for Tintri storage. -""" - -import ddt -import mock - -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as cinder_utils -from cinder.volume.drivers.tintri import TClient -from cinder.volume.drivers.tintri import TintriDriver - - -class FakeImage(object): - def __init__(self): - self.id = 'image-id' - self.name = 'image-name' - self.properties = {'provider_location': 'nfs://share'} - - def __getitem__(self, key): - return self.__dict__[key] - - -@ddt.ddt -class TintriDriverTestCase(test.TestCase): - def setUp(self): - super(TintriDriverTestCase, self).setUp() - self.context = context.get_admin_context() - kwargs = {'configuration': self.create_configuration()} - self._driver = TintriDriver(**kwargs) - self._driver._hostname = 'host' - self._driver._username = 'user' - self._driver._password = 'password' - self._driver._api_version = 'v310' - self._driver._image_cache_expiry = 30 - self._provider_location = 'localhost:/share' - self._driver._mounted_shares = [self._provider_location] - self.fake_stubs() - - def create_configuration(self): - configuration = mock.Mock() - configuration.nfs_mount_point_base = '/mnt/test' - configuration.nfs_mount_options = None - configuration.nas_mount_options = None - return configuration - - def fake_stubs(self): - self.mock_object(TClient, 'login', self.fake_login) - self.mock_object(TClient, 'logout', self.fake_logout) - self.mock_object(TClient, 'get_snapshot', self.fake_get_snapshot) - self.mock_object(TClient, 'get_image_snapshots_to_date', - self.fake_get_image_snapshots_to_date) - self.mock_object(TintriDriver, '_move_cloned_volume', - self.fake_move_cloned_volume) - self.mock_object(TintriDriver, '_get_provider_location', - self.fake_get_provider_location) - self.mock_object(TintriDriver, '_set_rw_permissions', - self.fake_set_rw_permissions) - self.mock_object(TintriDriver, '_is_volume_present', - self.fake_is_volume_present) - self.mock_object(TintriDriver, '_is_share_vol_compatible', - self.fake_is_share_vol_compatible) - self.mock_object(TintriDriver, '_is_file_size_equal', - self.fake_is_file_size_equal) - - def fake_login(self, user_name, password): - return 'session-id' - - def fake_logout(self): - pass - - def fake_get_snapshot(self, volume_id): - return fake.SNAPSHOT_ID - - def fake_get_image_snapshots_to_date(self, date): - return [{'uuid': {'uuid': 'image_snapshot-id'}}] - - def fake_move_cloned_volume(self, clone_name, volume_id, share=None): - pass - - def fake_get_provider_location(self, volume_path): - return self._provider_location - - def fake_set_rw_permissions(self, path): - pass - - def fake_is_volume_present(self, volume_path): - return True - - def fake_is_share_vol_compatible(self, volume, share): - return True - - def fake_is_file_size_equal(self, path, size): - return True - - @mock.patch.object(TClient, 'create_snapshot', - mock.Mock(return_value=fake.PROVIDER_ID)) - def test_create_snapshot(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - provider_id = fake.PROVIDER_ID - snapshot.volume = volume - with mock.patch('cinder.objects.snapshot.Snapshot.save'): - self.assertEqual({'provider_id': fake.PROVIDER_ID}, - self._driver.create_snapshot(snapshot)) - self.assertEqual(provider_id, snapshot.provider_id) - - @mock.patch.object(TClient, 'create_snapshot', mock.Mock( - side_effect=exception.VolumeDriverException)) - def test_create_snapshot_failure(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - snapshot.volume = volume - self.assertRaises(exception.VolumeDriverException, - self._driver.create_snapshot, snapshot) - - @mock.patch.object(TClient, 'delete_snapshot', mock.Mock()) - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_cleanup_cache(self): - self.assertFalse(self._driver.cache_cleanup) - timer = self._driver._initiate_image_cache_cleanup() - # wait for cache cleanup to complete - timer.wait() - self.assertFalse(self._driver.cache_cleanup) - - @mock.patch.object(TClient, 'delete_snapshot', mock.Mock( - side_effect=exception.VolumeDriverException)) - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new= - cinder_utils.ZeroIntervalLoopingCall) - def test_cleanup_cache_delete_fail(self): - self.assertFalse(self._driver.cache_cleanup) - timer = self._driver._initiate_image_cache_cleanup() - # wait for cache cleanup to complete - timer.wait() - self.assertFalse(self._driver.cache_cleanup) - - @mock.patch.object(TClient, 'delete_snapshot', mock.Mock()) - def test_delete_snapshot(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - snapshot.provider_id = fake.PROVIDER_ID - self.assertIsNone(self._driver.delete_snapshot(snapshot)) - - @mock.patch.object(TClient, 'delete_snapshot', mock.Mock( - side_effect=exception.VolumeDriverException)) - def test_delete_snapshot_failure(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - snapshot.provider_id = fake.PROVIDER_ID - self.assertRaises(exception.VolumeDriverException, - self._driver.delete_snapshot, snapshot) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock()) - def test_create_volume_from_snapshot(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - self.assertEqual({'provider_location': self._provider_location}, - self._driver.create_volume_from_snapshot( - volume, snapshot)) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock( - side_effect=exception.VolumeDriverException)) - def test_create_volume_from_snapshot_failure(self): - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - self.assertRaises(exception.VolumeDriverException, - self._driver.create_volume_from_snapshot, - volume, snapshot) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock()) - @mock.patch.object(TClient, 'create_snapshot', mock.Mock()) - def test_create_cloned_volume(self): - volume = fake_volume.fake_volume_obj(self.context) - self.assertEqual({'provider_location': self._provider_location}, - self._driver.create_cloned_volume(volume, volume)) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock( - side_effect=exception.VolumeDriverException)) - @mock.patch.object(TClient, 'create_snapshot', mock.Mock()) - def test_create_cloned_volume_failure(self): - volume = fake_volume.fake_volume_obj(self.context) - self.assertRaises(exception.VolumeDriverException, - self._driver.create_cloned_volume, volume, volume) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock()) - def test_clone_image(self): - volume = fake_volume.fake_volume_obj(self.context) - self.assertEqual(({'provider_location': self._provider_location, - 'bootable': True}, True), - self._driver.clone_image( - None, volume, 'image-name', FakeImage().__dict__, - None)) - - @mock.patch.object(TClient, 'clone_volume', mock.Mock( - side_effect=exception.VolumeDriverException)) - def test_clone_image_failure(self): - volume = fake_volume.fake_volume_obj(self.context) - self.assertEqual(({'provider_location': None, - 'bootable': False}, False), - self._driver.clone_image( - None, volume, 'image-name', FakeImage().__dict__, - None)) - - def test_manage_existing(self): - volume = fake_volume.fake_volume_obj(self.context) - existing = {'source-name': self._provider_location + '/' + - volume.name} - with mock.patch('os.path.isfile', return_value=True): - self.assertEqual({'provider_location': self._provider_location}, - self._driver.manage_existing(volume, existing)) - - def test_manage_existing_invalid_ref(self): - existing = fake_volume.fake_volume_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - self.assertRaises(exception.ManageExistingInvalidReference, - self._driver.manage_existing, volume, existing) - - def test_manage_existing_not_found(self): - volume = fake_volume.fake_volume_obj(self.context) - existing = {'source-name': self._provider_location + '/' + - volume.name} - with mock.patch('os.path.isfile', return_value=False): - self.assertRaises(exception.ManageExistingInvalidReference, - self._driver.manage_existing, volume, existing) - - @mock.patch.object(TintriDriver, '_move_file', mock.Mock( - return_value=False)) - def test_manage_existing_move_failure(self): - volume = fake_volume.fake_volume_obj(self.context) - existing = {'source-name': self._provider_location + '/source-volume'} - with mock.patch('os.path.isfile', return_value=True): - self.assertRaises(exception.VolumeDriverException, - self._driver.manage_existing, - volume, existing) - - @ddt.data((123, 123), (123.5, 124)) - @ddt.unpack - def test_manage_existing_get_size(self, st_size, exp_size): - volume = fake_volume.fake_volume_obj(self.context) - existing = {'source-name': self._provider_location + '/' + - volume.name} - file = mock.Mock(st_size=int(st_size * units.Gi)) - with mock.patch('os.path.isfile', return_value=True): - with mock.patch('os.stat', return_value=file): - self.assertEqual(exp_size, - self._driver.manage_existing_get_size( - volume, existing)) - - def test_manage_existing_get_size_failure(self): - volume = fake_volume.fake_volume_obj(self.context) - existing = {'source-name': self._provider_location + '/' + - volume.name} - with mock.patch('os.path.isfile', return_value=True): - with mock.patch('os.stat', side_effect=OSError): - self.assertRaises(exception.VolumeDriverException, - self._driver.manage_existing_get_size, - volume, existing) - - def test_unmanage(self): - volume = fake_volume.fake_volume_obj(self.context) - volume.provider_location = self._provider_location - self._driver.unmanage(volume) - - def test_retype(self): - volume = fake_volume.fake_volume_obj(self.context) - retype, update = self._driver.retype(None, volume, None, None, None) - self.assertTrue(retype) - self.assertIsNone(update) diff --git a/cinder/tests/unit/volume/drivers/test_v7000_iscsi.py b/cinder/tests/unit/volume/drivers/test_v7000_iscsi.py deleted file mode 100644 index e01bf29cc..000000000 --- a/cinder/tests/unit/volume/drivers/test_v7000_iscsi.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2016 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for Violin Memory 7000 Series All-Flash Array ISCSI Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.violin import \ - fake_vmem_client as vmemclient -from cinder.volume import configuration as conf -from cinder.volume.drivers.violin import v7000_common -from cinder.volume.drivers.violin import v7000_iscsi - -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" -VOLUME = { - "name": "volume-" + VOLUME_ID, - "id": VOLUME_ID, - "display_name": "fake_volume", - "size": 2, - "host": "myhost", - "volume_type": None, - "volume_type_id": None, -} -SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" -SNAPSHOT = { - "name": "snapshot-" + SNAPSHOT_ID, - "id": SNAPSHOT_ID, - "volume_id": VOLUME_ID, - "volume_name": "volume-" + VOLUME_ID, - "volume_size": 2, - "display_name": "fake_snapshot", - "volume": VOLUME, -} -SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": "fake_src_vol", - "size": 2, - "host": "myhost", - "volume_type": None, - "volume_type_id": None, -} -SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": "fake_src_vol", - "size": 2, - "host": "myhost", - "volume_type": None, - "volume_type_id": None, -} -INITIATOR_IQN = "iqn.1111-22.org.debian:11:222" -CONNECTOR = { - "initiator": INITIATOR_IQN, - "host": "irrelevant", - "ip": "1.2.3.4", -} -TARGET = "iqn.2004-02.com.vmem:%s" % VOLUME['id'] - -GET_VOLUME_STATS_RESPONSE = { - 'vendor_name': 'Violin Memory, Inc.', - 'reserved_percentage': 0, - 'QoS_support': False, - 'free_capacity_gb': 4094, - 'total_capacity_gb': 2558, -} - -CLIENT_INFO = { - 'issanip_enabled': False, - 'sanclient_id': 7, - 'ISCSIDevices': - [{'category': 'Virtual Device', - 'sizeMB': VOLUME['size'] * 1024, - 'name': VOLUME['id'], - 'object_id': 'v0000058', - 'access': 'ReadWrite', - 'ISCSITarget': - {'name': TARGET, - 'startingLun': '0', - 'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1', - 'object_id': '2c68c1a4-67bb-59b3-93df-58bcdf422a66', - 'access': 'ReadWrite', - 'isInfiniBand': 'false', - 'iscsiurl': ''}, - 'type': 'SAN', - 'lun': '8', - 'size': VOLUME['size'] * 1024 * 1024}], - 'name': 'lab-srv3377', - 'isiscsi_enabled': True, - 'clusterName': '', - 'ipAddress': '', - 'isclustered': False, - 'username': '', - 'isbmr_enabled': False, - 'useracl': None, - 'isfibrechannel_enabled': False, - 'iSCSIPolicy': - {'initiators': ['iqn.1993-08.org.debian:01:1ebcd244a059'], - 'authentication': - {'mutualCHAP': - {'enabled': False, - 'user': ''}, - 'enabled': False, - 'defaultUser': ''}, - 'accessType': 'stationary'}, - 'ISCSITargetList': - [{'name': 'iqn.2004-02.com.vmem:lab-fsp-mga.openstack', - 'startingLun': '0', - 'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1', - 'object_id': '716cc60a-576a-55f1-bfe3-af4a21ca5554', - 'access': 'ReadWrite', - 'isInfiniBand': 'false', - 'iscsiurl': ''}], - 'type': 'Windows', - 'persistent_reservation': True, - 'isxboot_enabled': False} - - -class V7000ISCSIDriverTestCase(test.TestCase): - """Test cases for VMEM ISCSI driver.""" - def setUp(self): - super(V7000ISCSIDriverTestCase, self).setUp() - self.conf = self.setup_configuration() - self.driver = v7000_iscsi.V7000ISCSIDriver(configuration=self.conf) - self.driver.gateway_iscsi_ip_addresses = [ - '192.168.91.1', '192.168.92.1', '192.168.93.1', '192.168.94.1'] - self.stats = {} - self.driver.set_initialized() - - def setup_configuration(self): - config = mock.Mock(spec=conf.Configuration) - config.volume_backend_name = 'v7000_iscsi' - config.san_ip = '8.8.8.8' - config.san_login = 'admin' - config.san_password = '' - config.san_thin_provision = False - config.san_is_local = False - config.use_igroups = False - config.request_timeout = 300 - return config - - def setup_mock_concerto(self, m_conf=None): - """Create a fake Concerto communication object.""" - _m_concerto = mock.Mock(name='Concerto', - version='1.1.1', - spec=vmemclient.mock_client_conf) - - if m_conf: - _m_concerto.configure_mock(**m_conf) - - return _m_concerto - - @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error') - def test_check_for_setup_error(self, m_setup_func): - """No setup errors are found.""" - result = self.driver.check_for_setup_error() - m_setup_func.assert_called_with() - self.assertIsNone(result) - - def test_create_volume(self): - """Volume created successfully.""" - self.driver.common._create_lun = mock.Mock() - - result = self.driver.create_volume(VOLUME) - - self.driver.common._create_lun.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_create_volume_from_snapshot(self): - self.driver.common._create_volume_from_snapshot = mock.Mock() - - result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) - - self.driver.common._create_volume_from_snapshot.assert_called_with( - SNAPSHOT, VOLUME) - - self.assertIsNone(result) - - def test_create_cloned_volume(self): - self.driver.common._create_lun_from_lun = mock.Mock() - - result = self.driver.create_cloned_volume(VOLUME, SRC_VOL) - - self.driver.common._create_lun_from_lun.assert_called_with( - SRC_VOL, VOLUME) - self.assertIsNone(result) - - def test_delete_volume(self): - """Volume deleted successfully.""" - self.driver.common._delete_lun = mock.Mock() - - result = self.driver.delete_volume(VOLUME) - - self.driver.common._delete_lun.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_extend_volume(self): - """Volume extended successfully.""" - new_size = 10 - self.driver.common._extend_lun = mock.Mock() - - result = self.driver.extend_volume(VOLUME, new_size) - - self.driver.common._extend_lun.assert_called_with(VOLUME, new_size) - self.assertIsNone(result) - - def test_create_snapshot(self): - self.driver.common._create_lun_snapshot = mock.Mock() - - result = self.driver.create_snapshot(SNAPSHOT) - self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT) - self.assertIsNone(result) - - def test_delete_snapshot(self): - self.driver.common._delete_lun_snapshot = mock.Mock() - - result = self.driver.delete_snapshot(SNAPSHOT) - self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT) - self.assertIsNone(result) - - def test_get_volume_stats(self): - self.driver._update_volume_stats = mock.Mock() - self.driver._update_volume_stats() - - result = self.driver.get_volume_stats(True) - - self.driver._update_volume_stats.assert_called_with() - self.assertEqual(self.driver.stats, result) - - def test_update_volume_stats(self): - """Mock query to the backend collects stats on all physical devices.""" - backend_name = self.conf.volume_backend_name - - self.driver.common._get_volume_stats = mock.Mock( - return_value=GET_VOLUME_STATS_RESPONSE, - ) - - result = self.driver._update_volume_stats() - - self.driver.common._get_volume_stats.assert_called_with( - self.conf.san_ip) - self.assertEqual(backend_name, - self.driver.stats['volume_backend_name']) - self.assertEqual('iSCSI', - self.driver.stats['storage_protocol']) - self.assertIsNone(result) - - def test_initialize_connection(self): - lun_id = 1 - response = {'success': True, 'msg': 'None'} - - conf = { - 'client.create_client.return_value': response, - 'client.create_iscsi_target.return_value': response, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._get_iqn = mock.Mock(return_value=TARGET) - self.driver._export_lun = mock.Mock(return_value=lun_id) - - props = self.driver.initialize_connection(VOLUME, CONNECTOR) - - self.driver._export_lun.assert_called_with(VOLUME, TARGET, CONNECTOR) - self.assertEqual("iscsi", props['driver_volume_type']) - self.assertFalse(props['data']['target_discovered']) - self.assertEqual(TARGET, props['data']['target_iqn']) - self.assertEqual(lun_id, props['data']['target_lun']) - self.assertEqual(VOLUME['id'], props['data']['volume_id']) - - def test_terminate_connection(self): - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver._get_iqn = mock.Mock(return_value=TARGET) - self.driver._unexport_lun = mock.Mock() - - result = self.driver.terminate_connection(VOLUME, CONNECTOR) - - self.driver._unexport_lun.assert_called_with(VOLUME, TARGET, CONNECTOR) - self.assertIsNone(result) - - def test_export_lun(self): - lun_id = '1' - response = {'success': True, 'msg': 'Assign device successfully'} - - self.driver.common.vmem_mg = self.setup_mock_concerto() - - self.driver.common._send_cmd_and_verify = mock.Mock( - return_value=response) - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - result = self.driver._export_lun(VOLUME, TARGET, CONNECTOR) - - self.driver.common._send_cmd_and_verify.assert_called_with( - self.driver.common.vmem_mg.lun.assign_lun_to_iscsi_target, - self.driver._is_lun_id_ready, - 'Assign device successfully', - [VOLUME['id'], TARGET], - [VOLUME['id'], CONNECTOR['host']]) - self.driver._get_lun_id.assert_called_with( - VOLUME['id'], CONNECTOR['host']) - self.assertEqual(lun_id, result) - - def test_export_lun_fails_with_exception(self): - lun_id = '1' - response = {'success': False, 'msg': 'Generic error'} - - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver.common._send_cmd_and_verify = mock.Mock( - side_effect=exception.ViolinBackendErr(response['msg'])) - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - self.assertRaises(exception.ViolinBackendErr, - self.driver._export_lun, - VOLUME, TARGET, CONNECTOR) - - def test_unexport_lun(self): - response = {'success': True, 'msg': 'Unassign device successfully'} - - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver.common._send_cmd = mock.Mock( - return_value=response) - - result = self.driver._unexport_lun(VOLUME, TARGET, CONNECTOR) - - self.driver.common._send_cmd.assert_called_with( - self.driver.common.vmem_mg.lun.unassign_lun_from_iscsi_target, - "Unassign device successfully", - VOLUME['id'], TARGET, True) - self.assertIsNone(result) - - def test_is_lun_id_ready(self): - lun_id = '1' - self.driver.common.vmem_mg = self.setup_mock_concerto() - - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - result = self.driver._is_lun_id_ready( - VOLUME['id'], CONNECTOR['host']) - self.assertTrue(result) - - def test_get_lun_id(self): - - conf = { - 'client.get_client_info.return_value': CLIENT_INFO, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host']) - - self.assertEqual(8, result) diff --git a/cinder/tests/unit/volume/drivers/test_veritas_cnfs.py b/cinder/tests/unit/volume/drivers/test_veritas_cnfs.py deleted file mode 100644 index 2d5dc0ebc..000000000 --- a/cinder/tests/unit/volume/drivers/test_veritas_cnfs.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -import mock - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers import veritas_cnfs as cnfs - - -class VeritasCNFSDriverTestCase(test.TestCase): - - """Test case for VeritasCNFS driver.""" - TEST_CNFS_SHARE = 'cnfs-host1:/share' - TEST_VOL_NM = 'volume-a6707cd3-348c-45cd-9524-255be0939b60' - TEST_SNAP_NM = 'snapshot-73368c68-1c0b-4027-ba8a-14629918945e' - TEST_VOL_SIZE = 1 - TEST_MNT_BASE = '/cnfs/share' - TEST_LOCAL_PATH = '/cnfs/share/mnt' - TEST_VOL_LOCAL_PATH = TEST_LOCAL_PATH + '/' + TEST_VOL_NM - TEST_SNAP_LOCAL_PATH = TEST_LOCAL_PATH + '/' + TEST_SNAP_NM - TEST_SPL_SNAP_LOCAL_PATH = TEST_SNAP_LOCAL_PATH + "::snap:vxfs:" - TEST_NFS_SHARES_CONFIG = '/etc/cinder/access_nfs_share' - TEST_NFS_MOUNT_OPTIONS_FAIL_NONE = '' - TEST_NFS_MOUNT_OPTIONS_FAIL_V4 = 'nfsvers=4' - TEST_NFS_MOUNT_OPTIONS_FAIL_V2 = 'nfsvers=2' - TEST_NFS_MOUNT_OPTIONS_PASS_V3 = 'nfsvers=3' - TEST_VOL_ID = 'a6707cd3-348c-45cd-9524-255be0939b60' - SNAPSHOT_ID = '73368c68-1c0b-4027-ba8a-14629918945e' - - def setUp(self): - super(VeritasCNFSDriverTestCase, self).setUp() - self.configuration = mock.Mock(conf.Configuration) - self.configuration.nfs_shares_config = self.TEST_NFS_SHARES_CONFIG - self.configuration.nfs_sparsed_volumes = True - self.configuration.nfs_mount_point_base = self.TEST_MNT_BASE - self.configuration.nfs_mount_options = (self. - TEST_NFS_MOUNT_OPTIONS_PASS_V3) - self.configuration.nfs_oversub_ratio = 1.0 - self.configuration.nfs_used_ratio = 0.95 - self.configuration.nfs_disk_util = 'df' - self.configuration.reserved_percentage = 0 - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.nas_secure_file_permissions = 'false' - self.configuration.nas_secure_file_operations = 'false' - self._loc = 'localhost:/share' - self.context = context.get_admin_context() - self.driver = cnfs.VeritasCNFSDriver(configuration=self.configuration) - - def test_throw_error_if_nfs_mount_options_not_configured(self): - """Fail if no nfs mount options are configured""" - drv = self.driver - none_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_NONE - self.configuration.nfs_mount_options = none_opts - self.assertRaises( - exception.NfsException, drv.do_setup, context.RequestContext) - - def test_throw_error_if_nfs_mount_options_configured_with_NFSV2(self): - """Fail if nfs mount options is not nfsv4 """ - drv = self.driver - nfs_v2_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_V2 - self.configuration.nfs_mount_options = nfs_v2_opts - self.assertRaises( - exception.NfsException, drv.do_setup, context.RequestContext) - - def test_throw_error_if_nfs_mount_options_configured_with_NFSV4(self): - """Fail if nfs mount options is not nfsv4 """ - drv = self.driver - nfs_v4_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_V4 - self.configuration.nfs_mount_options = nfs_v4_opts - self.assertRaises( - exception.NfsException, drv.do_setup, context.RequestContext) - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_local_volume_path') - @mock.patch.object(os.path, 'exists') - def test_do_clone_volume_success(self, m_exists, m_get_local_volume_path): - """test _do_clone_volume() when filesnap over nfs is supported""" - drv = self.driver - volume = fake_volume.fake_volume_obj(self.context, - provider_location=self._loc) - snapshot = fake_volume.fake_volume_obj(self.context) - with mock.patch.object(drv, '_execute'): - m_exists.return_value = True - drv._do_clone_volume(volume, volume.name, snapshot) - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_local_volume_path') - @mock.patch.object(os.path, 'exists') - def test_do_clone_volume_fail(self, m_exists, m_get_local_volume_path): - """test _do_clone_volume() when filesnap over nfs is supported""" - drv = self.driver - volume = fake_volume.fake_volume_obj(self.context) - snapshot = fake_volume.fake_volume_obj(self.context) - with mock.patch.object(drv, '_execute'): - m_exists.return_value = False - self.assertRaises(exception.NfsException, drv._do_clone_volume, - volume, volume.name, snapshot) - - def assign_provider_loc(self, src_vol, tgt_vol): - tgt_vol.provider_location = src_vol.provider_location - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') - def test_create_volume_from_snapshot(self, m_do_clone_volume): - """test create volume from snapshot""" - drv = self.driver - volume = fake_volume.fake_volume_obj(self.context) - snapshot = fake_volume.fake_volume_obj(self.context, - provider_location=self._loc) - volume.size = 10 - snapshot.volume_size = 10 - m_do_clone_volume(snapshot, snapshot.name, - volume).return_value = True - drv.create_volume_from_snapshot(volume, snapshot) - self.assertEqual(volume.provider_location, snapshot.provider_location) - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_vol_by_id') - @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') - def test_create_snapshot(self, m_do_clone_volume, m_get_vol_by_id): - """test create snapshot""" - drv = self.driver - volume = fake_volume.fake_volume_obj(context.get_admin_context(), - provider_location=self._loc) - snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) - snapshot.volume = volume - m_get_vol_by_id.return_value = volume - m_do_clone_volume(snapshot, snapshot.name, - volume).return_value = True - drv.create_snapshot(snapshot) - self.assertEqual(volume.provider_location, snapshot.provider_location) - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_ensure_share_mounted') - @mock.patch.object(cnfs.VeritasCNFSDriver, 'local_path') - def test_delete_snapshot(self, m_local_path, m_ensure_share_mounted): - """test delete snapshot""" - drv = self.driver - snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context(), - provider_location=self._loc) - m_ensure_share_mounted(self._loc).AndReturn(None) - m_local_path(snapshot).AndReturn(self.TEST_SNAP_LOCAL_PATH) - with mock.patch.object(drv, '_execute'): - drv.delete_snapshot(snapshot) - - @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') - @mock.patch.object(cnfs.VeritasCNFSDriver, 'local_path') - def test_create_volume_from_snapshot_greater_size(self, m_local_path, - m_do_clone_volume): - """test create volume from snapshot with greater volume size""" - drv = self.driver - volume = fake_volume.fake_volume_obj(self.context) - snapshot = fake_volume.fake_volume_obj(self.context, - provider_location=self._loc) - volume.size = 20 - snapshot.volume_size = 10 - m_do_clone_volume(snapshot, snapshot.name, - volume).return_value = True - m_local_path(volume).AndReturn(self.TEST_VOL_LOCAL_PATH) - with mock.patch.object(drv, '_execute'): - drv.create_volume_from_snapshot(volume, snapshot) - self.assertEqual(volume.provider_location, snapshot.provider_location) diff --git a/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py b/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py deleted file mode 100644 index 1ed917b60..000000000 --- a/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration as conf -from cinder.volume.drivers.veritas import vrtshyperscale as vrts - - -class FakeDb(object): - def volume_metadata_get(self, *a, **kw): - return {} - - def volume_metadata_update(self, *a, **kw): - return None - - -def _stub_volume(*args, **kwargs): - updates = {'provider_location': 'hyperscale-sv:/hyperscale'} - return fake_volume.fake_db_volume(**updates) - - -def _stub_snapshot(*args, **kwargs): - updates = {'volume': _stub_volume(), 'name': 'vrts'} - return fake_snapshot.fake_db_snapshot(**updates) - - -def _stub_stats(): - data = {} - data["volume_backend_name"] = 'Veritas_HyperScale' - data["vendor_name"] = 'Veritas Technologies LLC' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'nfs' - data['total_capacity_gb'] = 0.0 - data['free_capacity_gb'] = 0.0 - data['reserved_percentage'] = 0 - data['QoS_support'] = False - return data - - -class VRTSHyperScaleDriverTestCase(test.TestCase): - """Test case for Veritas HyperScale VolumeDriver.""" - - driver_name = "cinder.volume.drivers.veritas.vrtshyperscale" - - @staticmethod - def gvmv_side_effect(arg1, arg2): - """Mock side effect for _get_volume_metadata_value.""" - # mock the return of get_volume_metadata_value - # for different arguments - if arg2 == 'Secondary_datanode_key': - return '{9876}' - elif arg2 == 'Secondary_datanode_ip': - return '192.0.2.2' - elif arg2 == 'current_dn_ip': - return '192.0.2.1' - elif arg2 == 'vsa_ip': - return '192.0.2.1' - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._fetch_config_for_compute') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._fetch_config_for_datanode') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._fetch_config_for_controller') - def setUp(self, mock_fcfcntr, mock_fcfd, mock_fcfc): - mock_fcfcntr.return_value = None - mock_fcfd.return_value = None - mock_fcfc.return_value = None - - # Initialise a test seup - super(VRTSHyperScaleDriverTestCase, self).setUp() - - self.configuration = mock.Mock(conf.Configuration(None)) - self.configuration.reserved_percentage = 0 - self.context = context.get_admin_context() - self.driver = vrts.HyperScaleDriver( - db=FakeDb(), configuration=self.configuration) - self.driver.dn_routing_key = '{1234}' - self.driver.datanode_ip = '192.0.2.1' - self.volume = _stub_volume() - self.snapshot = _stub_snapshot() - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_replicas') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_details_for_create_volume') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.api.API.update_volume_metadata') - def test_create_volume_single_replicas(self, mock_uvm, mock_mdp, - mock_gvdfcv, mock_get_replicas, - mock_gvm): - """Test single volume replica. Happy path test case.""" - # Mock volume meatadata - mock_gvm.return_value = _stub_volume() - - # Mock number of replicas to 1 - mock_get_replicas.return_value = 1 - # assume volume details are populated correctly - mock_gvdfcv.return_value = _stub_volume() - - # assume volume message is sent to data node successfully - mock_mdp.return_value = ("", None) - # assume that the volume metadata gets updated correctly - mock_uvm.return_value = {} - - # declare the expected result - expected_result = { - 'provider_location': 'hyperscale-sv:/hyperscale', - 'metadata': mock_gvm.return_value - } - - # call create volume and get the result - actual_result = self.driver.create_volume(self.volume) - - # Test if the return value matched the expected results - self.assertDictEqual(actual_result, expected_result) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.get_hyperscale_version') - def test_check_for_setup_error(self, mock_ghv): - """Test check for setup errors in Veritas HyperScale driver. - - The test case checks happy path execution when driver version 1.0.0 - is installed. - """ - mock_ghv.return_value = "1.0.0" - - # check the driver for setup errors - self.driver.check_for_setup_error() - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.get_hyperscale_version') - def test_check_for_setup_error_unsupported_version(self, mock_ghv): - """Test check for setup errors in Veritas HyperScale driver. - - The test case checks happy path execution when driver version 1.0.0 - is installed. - """ - mock_ghv.return_value = "1.0.0.1" - - # check the driver for setup errors - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.get_hyperscale_version') - def test_check_for_setup_error_exception(self, mock_ghv): - """Test check for setup errors in Veritas HyperScale driver. - - The test case checks happy path execution when driver version 1.0.0 - is installed. - """ - mock_ghv.side_effect = exception.ErrorInHyperScaleVersion( - cmd_error="mock error") - - # check the driver for setup errors - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.check_for_setup_error) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_delete_volume_no_replica(self, mock_mdp, mock_gvmv): - """Test happy path for delete_volume one data nodes.""" - mock_gvmv.return_value = None - self.driver.delete_volume(self.volume) - - message_body = {'display_name': self.volume['name']} - - mock_mdp.assert_called_with(self.driver.dn_routing_key, - 'hyperscale.storage.dm.volume.delete', - **message_body) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_delete_volume_more_than_one_replica(self, mock_mdp, mock_gvmv): - """Test happy path for delete_volume with more than one data nodes.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - - message_body = {'display_name': self.volume['name']} - - # make the delete call - self.driver.delete_volume(self.volume) - - # check if delete volume sent to reflection target on data node - # check if mq message sent with 'Secondary_datanode_key' - mock_mdp.assert_any_call( - '{9876}', 'hyperscale.storage.dm.volume.delete', **message_body) - - # check if the delete is sent to primary data node as well - mock_mdp.assert_any_call(self.driver.dn_routing_key, - 'hyperscale.storage.dm.volume.delete', - **message_body) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_delete_volume_no_replica_failure(self, mock_mdp, mock_gvmv): - """Failure case for delete_volume one node in data plane.""" - mock_gvmv.side_effect = None - self.driver.delete_volume(self.volume) - mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( - cmd_out='mock error') - self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, - self.volume) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_delete_volume_more_than_one_replica_failure(self, mock_mdp, - mock_gvmv): - """failure case for delete_volume with more than one data nodes.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - - mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( - cmd_out='mock error') - - self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, - self.volume) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.get_guid_with_curly_brackets') - def test_delete_snapshot_force_flag(self, mock_ggwcb): - """Test snapshot deletion does not happen if force flag is set.""" - # get a mock snapshot object - snapshot = fake_snapshot.fake_db_snapshot() - # set the force in metadata of snapshot - snapshot['metadata'] = {"force": "force"} - - # call the delete volume - self.driver.delete_snapshot(snapshot) - - # if snapshot has force set in metadata then - # get_guid_with_curly_brackets() will not be called because we - # return as soon as we see force - mock_ggwcb.assert_not_called() - - def test_delete_snapshot_isbusy_flag(self): - """Test snapshot deletion throws exception if snapshot is busy.""" - # get a mock snapshot object - snapshot = fake_snapshot.fake_db_snapshot() - # set the force in metadata of snapshot - snapshot['metadata'] = {"is_busy": "is_busy"} - - # call the delete volume to check if it raises Busy Exception - self.assertRaises(exception.SnapshotIsBusy, - self.driver.delete_snapshot, snapshot) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata') - @mock.patch('cinder.volume.api.API.get_volume') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_delete_snapshot_from_primary_dn(self, mock_mdp, mock_gv, - mock_gvm): - """Test snapshot deletion from primary DN.""" - # get mock volume - mock_gv.return_value = None - mock_gvm.return_value = {'current_dn_ip': self.driver.datanode_ip} - - message_body = {} - message_body['volume_guid'] = '{' + self.volume['id'] + '}' - message_body['snapshot_id'] = '{' + self.snapshot['id'] + '}' - - # call delete snapshot - self.driver.delete_snapshot(self.snapshot) - - # assert msg is sent over mq with primary DN routing key - mock_mdp.assert_called_with(self.driver.dn_routing_key, - 'hyperscale.storage.dm.version.delete', - **message_body) - - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata') - @mock.patch('cinder.volume.api.API.get_volume') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - def test_delete_snapshot_from_current_dn(self, mock_gvmv, mock_mdp, - mock_gv, mock_gvm): - """Test snapshot deletion DN value from volume.""" - # get a mock volume - mock_gv.return_value = _stub_volume() - - # get a mock value of DN from volume - mock_gvmv.return_value = '{9876}' - - message_body = {} - message_body['volume_guid'] = '{' + self.volume['id'] + '}' - message_body['snapshot_id'] = '{' + self.snapshot['id'] + '}' - - # call delete snapshot - self.driver.delete_snapshot(self.snapshot) - - # assert msg is sent over mq with key from volume's current_dn_owner - mock_mdp.assert_called_with( - '{9876}', 'hyperscale.storage.dm.version.delete', **message_body) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_fetch_volume_stats_failure(self, mock_mdp): - """Test case checking failure of pool for fetching stats.""" - # since we have initialised the pool to None in setup() - # the function will return only the stub without populating - # any free and used stats - mock_obj = {'payload': {}} - - mock_mdp.return_value = (mock_obj, None) - self.assertDictEqual(_stub_stats(), self.driver._fetch_volume_status()) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_create_cloned_volume_with_exception(self, mock_mdp): - """Test case throws exception when command failed to execute.""" - vol_a = _stub_volume() - vol_b = _stub_volume() - mock_mdp.side_effect = exception.UnableToExecuteHyperScaleCmd( - cmd_out='mock error') - self.assertRaises(exception.UnableToExecuteHyperScaleCmd, - self.driver.create_cloned_volume, vol_b, vol_a) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' - '.HyperScaleDriver._select_rt') - def test_create_cloned_volume_with_no_replica(self, mock_srt, mock_mdp): - """Test case clone volume when there is no replica.""" - mock_obj = {'payload': {}} - mock_mdp.return_value = (mock_obj, None) - mock_srt.return_value = (None, None) - vol_a = _stub_volume() - vol_b = _stub_volume() - self.assertDictContainsSubset({ - 'provider_location': 'hyperscale-sv:/hyperscale' - }, self.driver.create_cloned_volume(vol_b, vol_a)) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' - '.HyperScaleDriver._select_rt') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - def test_create_cloned_volume_with_replica(self, mock_gvmv, mock_srt, - mock_mdp): - """Test case clone volume when there is replica.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - mock_obj = {'payload': {}} - mock_mdp.return_value = (mock_obj, None) - mock_srt.return_value = ('{1234}', '192.0.2.2') - vol_a = _stub_volume() - vol_b = _stub_volume() - metadata = { - 'current_dn_owner': '{1234}', - 'Potential_secondary_key': '{1234}', - 'Primary_datanode_ip': '192.0.2.1', - 'Potential_secondary_ip': '192.0.2.2', - 'current_dn_ip': '192.0.2.1', - 'source_volid': vol_a['id'], - 'size': vol_a['size'] - } - self.assertDictContainsSubset({ - 'provider_location': 'hyperscale-sv:/hyperscale', - 'metadata': metadata - }, self.driver.create_cloned_volume(vol_b, vol_a)) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_extend_volume_with_exception(self, mock_mdp): - """Test case extend volume to the given size in GB.""" - mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( - cmd_out='mock error') - self.assertRaises(exception.VolumeDriverException, - self.driver.extend_volume, _stub_volume(), 256) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_extend_volume_no_exception(self, mock_mdp): - """Test case extend volume thorws exception.""" - mock_mdp.return_value = (None, None) - self.driver.extend_volume(_stub_volume(), 256) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - def test_create_volume_from_snapshot_with_exception(self, mock_mdp): - """Test case create volume from snapshot thorws exception.""" - fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() - mock_mdp.side_effect = exception.UnableToExecuteHyperScaleCmd( - cmd_out='mock error') - self.assertRaises(exception.UnableToExecuteHyperScaleCmd, - self.driver.create_volume_from_snapshot, fake_volume, - fake_snapshot) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' - '.HyperScaleDriver._select_rt') - def test_create_volume_from_snapshot_with_no_replica(self, mock_srt, - mock_mdp): - """Test case create volume from snapshot when there is no replica.""" - mock_obj = {'payload': {}} - mock_mdp.return_value = (mock_obj, None) - mock_srt.return_value = (None, None) - fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() - self.assertDictContainsSubset({ - 'provider_location': 'hyperscale-sv:/hyperscale' - }, self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' - '.HyperScaleDriver._select_rt') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - def test_create_volume_from_snapshot_with_replica(self, mock_gvmv, - mock_srt, mock_mdp): - """Test case create volume from snapshot when there is replica.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - mock_obj = {'payload': {}} - mock_mdp.return_value = (mock_obj, None) - mock_srt.return_value = ('{1234}', '192.0.2.2') - fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() - metadata = { - 'current_dn_owner': '{1234}', - 'Potential_secondary_key': '{1234}', - 'Primary_datanode_ip': '192.0.2.1', - 'Potential_secondary_ip': '192.0.2.2', - 'current_dn_ip': '192.0.2.1', - 'snapshot_id': fake_snapshot['id'], - 'parent_volume_guid': '{' + fake_snapshot['volume']['id'] + '}' - } - self.assertDictContainsSubset({ - 'provider_location': 'hyperscale-sv:/hyperscale', - 'metadata': metadata - }, self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)) - - def test_initialize_connection(self): - """Test case intialize_connection.""" - fake_volume = _stub_volume() - expected_data = { - 'driver_volume_type': 'veritas_hyperscale', - 'data': { - 'export': fake_volume['provider_location'], - 'name': fake_volume['name'] - } - } - self.assertEqual(expected_data, - self.driver.initialize_connection(fake_volume, None)) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_compute_plane') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.episodic_snap') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - def test_create_snapshot_with_exception( - self, mock_gvmv, mock_es, mock_mcp): - """Test case create snapshot throws exception.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - mock_es_obj = {'payload': {'update': False}} - mock_es.return_value = mock_es_obj - mock_mcp.side_effect = exception.UnableToExecuteHyperScaleCmd( - cmd_out='mock error') - fake_snapshot = _stub_snapshot() - self.assertRaises(exception.UnableToExecuteHyperScaleCmd, - self.driver.create_snapshot, fake_snapshot) - - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_controller') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_data_plane') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.episodic_snap') - @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' - '._get_volume_metadata_value') - @mock.patch('cinder.volume.drivers.veritas.utils' - '.message_compute_plane') - def test_create_snapshot_user( - self, mock_cdp, mock_gvmv, mock_es, mock_mdp, mock_mc): - """Test case user snapshot.""" - mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect - mock_es_obj = {'payload': {'update': False}} - mock_es.return_value = mock_es_obj - mock_obj = {'payload': {}} - mock_mdp.return_value = ("", None) - mock_mc.return_value = ("", None) - mock_cdp.return_value = (mock_obj, None) - fake_snapshot = _stub_snapshot() - expected = { - 'metadata': { - 'status': 'creating', - 'datanode_ip': '192.0.2.1', - 'TYPE': vrts.TYPE_USER_SNAP - } - } - self.assertEqual(expected, self.driver.create_snapshot(fake_snapshot)) diff --git a/cinder/tests/unit/volume/drivers/test_vzstorage.py b/cinder/tests/unit/volume/drivers/test_vzstorage.py deleted file mode 100644 index d7d2a5531..000000000 --- a/cinder/tests/unit/volume/drivers/test_vzstorage.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright 2015 Odin -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import errno -import os - -import mock - -from os_brick.remotefs import remotefs -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume.drivers import vzstorage - - -_orig_path_exists = os.path.exists - - -class VZStorageTestCase(test.TestCase): - - _FAKE_SHARE = "10.0.0.1,10.0.0.2:/cluster123:123123" - _FAKE_MNT_BASE = '/mnt' - _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash') - _FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc' - _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) - _FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba' - _FAKE_SNAPSHOT_PATH = ( - _FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID) - - _FAKE_VZ_CONFIG = mock.MagicMock() - _FAKE_VZ_CONFIG.vzstorage_shares_config = '/fake/config/path' - _FAKE_VZ_CONFIG.vzstorage_sparsed_volumes = False - _FAKE_VZ_CONFIG.vzstorage_used_ratio = 0.7 - _FAKE_VZ_CONFIG.vzstorage_mount_point_base = _FAKE_MNT_BASE - _FAKE_VZ_CONFIG.vzstorage_default_volume_format = 'raw' - _FAKE_VZ_CONFIG.nas_secure_file_operations = 'auto' - _FAKE_VZ_CONFIG.nas_secure_file_permissions = 'auto' - - def setUp(self): - super(VZStorageTestCase, self).setUp() - - self._remotefsclient = mock.patch.object(remotefs, - 'RemoteFsClient').start() - get_mount_point = mock.Mock(return_value=self._FAKE_MNT_POINT) - self._remotefsclient.get_mount_point = get_mount_point - cfg = copy.copy(self._FAKE_VZ_CONFIG) - self._vz_driver = vzstorage.VZStorageDriver(configuration=cfg) - self._vz_driver._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - self._vz_driver._execute = mock.Mock() - self._vz_driver.base = self._FAKE_MNT_BASE - - self.context = context.get_admin_context() - vol_type = fake_volume.fake_volume_type_obj(self.context) - vol_type.extra_specs = {} - _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', - 'size': 1, - 'provider_location': self._FAKE_SHARE, - 'name': self._FAKE_VOLUME_NAME, - 'status': 'available'} - self.vol = fake_volume.fake_volume_obj(self.context, - volume_type_id=vol_type.id, - **_FAKE_VOLUME) - self.vol.volume_type = vol_type - - _FAKE_SNAPSHOT = {'id': self._FAKE_SNAPSHOT_ID, - 'status': 'available', - 'volume_size': 1} - self.snap = fake_snapshot.fake_snapshot_obj(self.context, - **_FAKE_SNAPSHOT) - self.snap.volume = self.vol - - def _path_exists(self, path): - if path.startswith(self._FAKE_VZ_CONFIG.vzstorage_shares_config): - return True - return _orig_path_exists(path) - - def _path_dont_exists(self, path): - if path.startswith('/fake'): - return False - return _orig_path_exists(path) - - @mock.patch('os.path.exists') - def test_setup_ok(self, mock_exists): - mock_exists.side_effect = self._path_exists - self._vz_driver.do_setup(mock.sentinel.context) - - @mock.patch('os.path.exists') - def test_setup_missing_shares_conf(self, mock_exists): - mock_exists.side_effect = self._path_dont_exists - self.assertRaises(exception.VzStorageException, - self._vz_driver.do_setup, - mock.sentinel.context) - - @mock.patch('os.path.exists') - def test_setup_invalid_usage_ratio(self, mock_exists): - mock_exists.side_effect = self._path_exists - self._vz_driver.configuration.vzstorage_used_ratio = 1.2 - self.assertRaises(exception.VzStorageException, - self._vz_driver.do_setup, - mock.sentinel.context) - - @mock.patch('os.path.exists') - def test_setup_invalid_usage_ratio2(self, mock_exists): - mock_exists.side_effect = self._path_exists - self._vz_driver.configuration.vzstorage_used_ratio = 0 - self.assertRaises(exception.VzStorageException, - self._vz_driver.do_setup, - mock.sentinel.context) - - @mock.patch('os.path.exists') - def test_setup_invalid_mount_point_base(self, mock_exists): - mock_exists.side_effect = self._path_exists - conf = copy.copy(self._FAKE_VZ_CONFIG) - conf.vzstorage_mount_point_base = './tmp' - vz_driver = vzstorage.VZStorageDriver(configuration=conf) - self.assertRaises(exception.VzStorageException, - vz_driver.do_setup, - mock.sentinel.context) - - @mock.patch('os.path.exists') - def test_setup_no_vzstorage(self, mock_exists): - mock_exists.side_effect = self._path_exists - exc = OSError() - exc.errno = errno.ENOENT - self._vz_driver._execute.side_effect = exc - self.assertRaises(exception.VzStorageException, - self._vz_driver.do_setup, - mock.sentinel.context) - - def test_initialize_connection(self): - drv = self._vz_driver - file_format = 'raw' - info = mock.Mock() - info.file_format = file_format - snap_info = """{"volume_format": "raw", - "active": "%s"}""" % self.vol.id - with mock.patch.object(drv, '_qemu_img_info', return_value=info): - with mock.patch.object(drv, '_read_file', - return_value=snap_info): - ret = drv.initialize_connection(self.vol, None) - name = drv.get_active_image_from_info(self.vol) - expected = {'driver_volume_type': 'vzstorage', - 'data': {'export': self._FAKE_SHARE, - 'format': file_format, - 'name': name}, - 'mount_point_base': self._FAKE_MNT_BASE} - self.assertEqual(expected, ret) - - def test_ensure_share_mounted_invalid_share(self): - self.assertRaises(exception.VzStorageException, - self._vz_driver._ensure_share_mounted, ':') - - def test_ensure_share_mounted(self): - drv = self._vz_driver - share = self._FAKE_SHARE - drv.shares = {'1': '["1", "2", "3"]', share: '["some", "options"]'} - drv._ensure_share_mounted(share) - - def test_find_share(self): - drv = self._vz_driver - drv._mounted_shares = [self._FAKE_SHARE] - with mock.patch.object(drv, '_is_share_eligible', return_value=True): - ret = drv._find_share(self.vol) - self.assertEqual(self._FAKE_SHARE, ret) - - def test_find_share_no_shares_mounted(self): - drv = self._vz_driver - with mock.patch.object(drv, '_is_share_eligible', return_value=True): - self.assertRaises(exception.VzStorageNoSharesMounted, - drv._find_share, self.vol) - - def test_find_share_no_shares_suitable(self): - drv = self._vz_driver - drv._mounted_shares = [self._FAKE_SHARE] - with mock.patch.object(drv, '_is_share_eligible', return_value=False): - self.assertRaises(exception.VzStorageNoSuitableShareFound, - drv._find_share, self.vol) - - def test_is_share_eligible_false(self): - drv = self._vz_driver - cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) - with mock.patch.object(drv, '_get_capacity_info', - return_value=cap_info): - ret = drv._is_share_eligible(self._FAKE_SHARE, 50) - self.assertFalse(ret) - - def test_is_share_eligible_true(self): - drv = self._vz_driver - cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) - with mock.patch.object(drv, '_get_capacity_info', - return_value=cap_info): - ret = drv._is_share_eligible(self._FAKE_SHARE, 30) - self.assertTrue(ret) - - @mock.patch.object(image_utils, 'resize_image') - def test_extend_volume(self, mock_resize_image): - drv = self._vz_driver - drv._check_extend_volume_support = mock.Mock(return_value=True) - drv._is_file_size_equal = mock.Mock(return_value=True) - - snap_info = '{"active": "%s"}' % self.vol.id - with mock.patch.object(drv, 'get_volume_format', - return_value="raw"): - with mock.patch.object(drv, 'local_path', - return_value=self._FAKE_VOLUME_PATH): - with mock.patch.object(drv, '_read_file', - return_value=snap_info): - drv.extend_volume(self.vol, 10) - - mock_resize_image.assert_called_once_with(self._FAKE_VOLUME_PATH, 10) - - def _test_check_extend_support(self, has_snapshots=False, - is_eligible=True): - drv = self._vz_driver - drv.local_path = mock.Mock(return_value=self._FAKE_VOLUME_PATH) - drv._is_share_eligible = mock.Mock(return_value=is_eligible) - - if has_snapshots: - active = self._FAKE_SNAPSHOT_PATH - else: - active = self._FAKE_VOLUME_PATH - - drv.get_active_image_from_info = mock.Mock(return_value=active) - if has_snapshots: - self.assertRaises(exception.InvalidVolume, - drv._check_extend_volume_support, - self.vol, 2) - elif not is_eligible: - self.assertRaises(exception.ExtendVolumeError, - drv._check_extend_volume_support, - self.vol, 2) - else: - drv._check_extend_volume_support(self.vol, 2) - drv._is_share_eligible.assert_called_once_with(self._FAKE_SHARE, 1) - - def test_check_extend_support(self): - self._test_check_extend_support() - - def test_check_extend_volume_with_snapshots(self): - self._test_check_extend_support(has_snapshots=True) - - def test_check_extend_volume_uneligible_share(self): - self._test_check_extend_support(is_eligible=False) - - @mock.patch.object(image_utils, 'convert_image') - def test_copy_volume_from_snapshot(self, mock_convert_image): - drv = self._vz_driver - - fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name', - 'backing-files': - {self._FAKE_SNAPSHOT_ID: - self._FAKE_VOLUME_NAME}} - fake_img_info = mock.MagicMock() - fake_img_info.backing_file = self._FAKE_VOLUME_NAME - - drv.get_volume_format = mock.Mock(return_value='raw') - drv._local_path_volume_info = mock.Mock( - return_value=self._FAKE_VOLUME_PATH + '.info') - drv._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - drv._read_info_file = mock.Mock( - return_value=fake_volume_info) - drv._qemu_img_info = mock.Mock( - return_value=fake_img_info) - drv.local_path = mock.Mock( - return_value=self._FAKE_VOLUME_PATH[:-1]) - drv._extend_volume = mock.Mock() - - drv._copy_volume_from_snapshot( - self.snap, self.vol, - self.vol['size']) - drv._extend_volume.assert_called_once_with( - self.vol, self.vol['size'], 'raw') - mock_convert_image.assert_called_once_with( - self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw') - - def test_delete_volume(self): - drv = self._vz_driver - fake_vol_info = self._FAKE_VOLUME_PATH + '.info' - - drv._ensure_share_mounted = mock.MagicMock() - fake_ensure_mounted = drv._ensure_share_mounted - - drv._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - drv.get_active_image_from_info = mock.Mock( - return_value=self._FAKE_VOLUME_NAME) - drv._delete = mock.Mock() - drv._local_path_volume_info = mock.Mock( - return_value=fake_vol_info) - - with mock.patch('os.path.exists', lambda x: True): - drv.delete_volume(self.vol) - - fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) - drv._delete.assert_any_call( - self._FAKE_VOLUME_PATH) - drv._delete.assert_any_call(fake_vol_info) - - @mock.patch('cinder.volume.drivers.remotefs.RemoteFSSnapDriverBase.' - '_write_info_file') - def test_delete_snapshot_ploop(self, _mock_write_info_file): - fake_snap_info = { - 'active': self._FAKE_VOLUME_NAME, - self._FAKE_SNAPSHOT_ID: self._FAKE_SNAPSHOT_PATH, - } - self._vz_driver.get_volume_format = mock.Mock( - return_value=vzstorage.DISK_FORMAT_PLOOP) - self._vz_driver._read_info_file = mock.Mock( - return_value=fake_snap_info - ) - self._vz_driver._get_desc_path = mock.Mock( - return_value='%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH - ) - self._vz_driver.delete_snapshot(self.snap) - self._vz_driver._execute.assert_called_once_with( - 'ploop', 'snapshot-delete', '-u', - '{%s}' % self._FAKE_SNAPSHOT_ID, - '%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH, - run_as_root=True - ) - - @mock.patch('cinder.volume.drivers.remotefs.RemoteFSSnapDriverBase.' - '_delete_snapshot') - def test_delete_snapshot_qcow2_invalid_snap_info(self, - mock_delete_snapshot): - fake_snap_info = { - 'active': self._FAKE_VOLUME_NAME, - } - self._vz_driver.get_volume_format = mock.Mock( - return_value=vzstorage.DISK_FORMAT_QCOW2) - self._vz_driver._read_info_file = mock.Mock( - return_value=fake_snap_info - ) - self._vz_driver.delete_snapshot(self.snap) - self.assertFalse(mock_delete_snapshot.called) - - def test_extend_volume_ploop(self): - drv = self._vz_driver - drv.local_path = mock.Mock( - return_value=self._FAKE_VOLUME_PATH) - drv.get_volume_format = mock.Mock( - return_value=vzstorage.DISK_FORMAT_PLOOP) - drv._is_share_eligible = mock.Mock( - return_value=True) - drv.extend_volume(self.vol, 100) - drv._execute.assert_called_once_with( - 'ploop', 'resize', '-s', '100G', - '%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH, - run_as_root=True) diff --git a/cinder/tests/unit/volume/drivers/test_xio.py b/cinder/tests/unit/volume/drivers/test_xio.py deleted file mode 100644 index 7a7ebb3b9..000000000 --- a/cinder/tests/unit/volume/drivers/test_xio.py +++ /dev/null @@ -1,1478 +0,0 @@ -# Copyright (c) 2014 X-IO Technologies. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from six.moves import http_client - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import utils -from cinder.volume.drivers import xio -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -ISE_IP1 = '10.12.12.1' -ISE_IP2 = '10.11.12.2' -ISE_ISCSI_IP1 = '1.2.3.4' -ISE_ISCSI_IP2 = '1.2.3.5' - -ISE_GID = 'isegid' -ISE_IQN = ISE_GID -ISE_WWN1 = ISE_GID + '1' -ISE_WWN2 = ISE_GID + '2' -ISE_WWN3 = ISE_GID + '3' -ISE_WWN4 = ISE_GID + '4' -ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4] -ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS, - 'init_wwn2': ISE_TARGETS} - -VOLUME_SIZE = 10 -NEW_VOLUME_SIZE = 20 - -VOLUME1 = {'id': '1', 'name': 'volume1', - 'size': VOLUME_SIZE, 'volume_type_id': 'type1'} - -VOLUME2 = {'id': '2', 'name': 'volume2', - 'size': VOLUME_SIZE, 'volume_type_id': 'type2', - 'provider_auth': 'CHAP abc abc'} - -VOLUME3 = {'id': '3', 'name': 'volume3', - 'size': VOLUME_SIZE, 'volume_type_id': None} - -SNAPSHOT1 = {'name': 'snapshot1', - 'volume_name': VOLUME1['name'], - 'volume_type_id': 'type3'} - -CLONE1 = {'id': '3', 'name': 'clone1', - 'size': VOLUME_SIZE, 'volume_type_id': 'type4'} - -HOST1 = 'host1' - -HOST2 = 'host2' - -ISCSI_CONN1 = {'initiator': 'init_iqn1', - 'host': HOST1} - -ISCSI_CONN2 = {'initiator': 'init_iqn2', - 'host': HOST2} - -FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'], - 'host': HOST1} - -FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'], - 'host': HOST2} - -ISE_HTTP_IP = 'http://' + ISE_IP1 - -ISE_HOST_LOCATION = '/storage/hosts/1' -ISE_HOST_LOCATION_URL = ISE_HTTP_IP + ISE_HOST_LOCATION - -ISE_VOLUME1_LOCATION = '/storage/volumes/volume1' -ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION -ISE_VOLUME2_LOCATION = '/storage/volumes/volume2' -ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION -ISE_VOLUME3_LOCATION = '/storage/volumes/volume3' -ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION - -ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1' -ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION - -ISE_CLONE_LOCATION = '/storage/volumes/clone1' -ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION - -ISE_ALLOCATION_LOCATION = '/storage/allocations/a1' -ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION - -ISE_GET_QUERY_XML =\ - """ - ABC12345 - - - - - - - - - - %s - - - - %s - - - - """ % (ISE_IP1, ISE_IP2) - -ISE_GET_QUERY_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_XML.split())} - -ISE_GET_QUERY_NO_CAP_XML =\ - """ - ABC12345 - - - %s - - - - %s - - - - """ % (ISE_IP1, ISE_IP2) - -ISE_GET_QUERY_NO_CAP_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_NO_CAP_XML.split())} - -ISE_GET_QUERY_NO_CTRL_XML =\ - """ - ABC12345 - - - - - - - - - - """ - -ISE_GET_QUERY_NO_CTRL_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_NO_CTRL_XML.split())} - -ISE_GET_QUERY_NO_IP_XML =\ - """ - ABC12345 - - - - - - - - - - - - - - - - - - - - """ - -ISE_GET_QUERY_NO_IP_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_NO_IP_XML.split())} - -ISE_GET_QUERY_NO_GID_XML =\ - """ - - - - - - - - - - %s - - - - %s - - - - """ % (ISE_IP1, ISE_IP2) - -ISE_GET_QUERY_NO_GID_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())} - -ISE_GET_QUERY_NO_CLONE_XML =\ - """ - ABC12345 - - - - - - - - - %s - - - - %s - - - - """ % (ISE_IP1, ISE_IP2) - -ISE_GET_QUERY_NO_CLONE_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())} - -ISE_GET_STORAGE_POOLS_XML =\ - """ - - - Pool 1 - 1 - -
- None -
-
- - - 60 - 30 - 45 - - - - - 0 - 40 - 0 - - - - - 100 - - - - - - volgid - - - volgid2 - - -
-
- """ - -ISE_GET_STORAGE_POOLS_RESP =\ - {'status': http_client.OK, - 'location': 'Pool location', - 'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())} - -ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\ - """""" - -ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, - 'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())} - -ISE_GET_VOL_STATUS_NO_STATUS_XML =\ - """ - - - """ % (ISE_VOLUME1_LOCATION_URL) - -ISE_GET_VOL_STATUS_NO_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, - 'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())} - -ISE_GET_VOL1_STATUS_XML =\ - """ - - -
- Prepared -
-
- 10 -
-
""" % (ISE_VOLUME1_LOCATION_URL) - -ISE_GET_VOL1_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_VOLUME1_LOCATION_URL, - 'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())} - -ISE_GET_VOL2_STATUS_XML =\ - """ - - -
- Prepared -
-
-
-
""" % (ISE_VOLUME2_LOCATION_URL) - -ISE_GET_VOL2_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_VOLUME2_LOCATION_URL, - 'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())} - -ISE_GET_VOL3_STATUS_XML =\ - """ - - -
- Prepared -
-
-
-
""" % (ISE_VOLUME3_LOCATION_URL) - -ISE_GET_VOL3_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_VOLUME3_LOCATION_URL, - 'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())} - -ISE_GET_SNAP1_STATUS_XML =\ - """ - - -
- Prepared -
-
-
-
""" % (ISE_SNAPSHOT_LOCATION_URL) - -ISE_GET_SNAP1_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL, - 'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())} - -ISE_GET_CLONE1_STATUS_XML =\ - """ - - -
- Prepared -
-
-
-
""" % (ISE_CLONE_LOCATION_URL) - -ISE_GET_CLONE1_STATUS_RESP =\ - {'status': http_client.OK, - 'location': 'u%s' % ISE_CLONE_LOCATION_URL, - 'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())} - -ISE_CREATE_VOLUME_XML = """""" - -ISE_CREATE_VOLUME_RESP =\ - {'status': http_client.CREATED, - 'location': ISE_VOLUME1_LOCATION_URL, - 'content': " ".join(ISE_CREATE_VOLUME_XML.split())} - -ISE_GET_IONETWORKS_XML =\ - """ - - - - - - - - - """ - -ISE_GET_IONETWORKS_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_IONETWORKS_XML.split())} - -ISE_GET_IONETWORKS_CHAP_XML =\ - """ - - abc - abc - - - - - - """ - -ISE_GET_IONETWORKS_CHAP_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())} - -ISE_DELETE_VOLUME_XML = """""" - -ISE_DELETE_VOLUME_RESP =\ - {'status': http_client.NO_CONTENT, - 'location': '', - 'content': " ".join(ISE_DELETE_VOLUME_XML.split())} - -ISE_GET_ALLOC_WITH_EP_XML =\ - """ - - - %s - - - %s - - 1 - - """ %\ - (ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1) - -ISE_GET_ALLOC_WITH_EP_RESP =\ - {'status': http_client.OK, - 'location': ISE_ALLOCATION_LOCATION_URL, - 'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())} - -ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\ - """""" % ISE_ALLOCATION_LOCATION_URL - -ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\ - {'status': http_client.OK, - 'location': ISE_ALLOCATION_LOCATION_URL, - 'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())} - -ISE_DELETE_ALLOC_XML = """""" - -ISE_DELETE_ALLOC_RESP =\ - {'status': http_client.NO_CONTENT, - 'location': '', - 'content': " ".join(ISE_DELETE_ALLOC_XML.split())} - -ISE_GET_HOSTS_NOHOST_XML =\ - """""" - -ISE_GET_HOSTS_NOHOST_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())} - -ISE_GET_HOSTS_HOST1_XML =\ - """ - - "OPENSTACK" - %s - 1 - - - init_wwn1 - - - init_wwn2 - - - init_iqn1 - - - - """ % HOST1 - -ISE_GET_HOSTS_HOST1_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())} - -ISE_GET_HOSTS_HOST1_HOST_TYPE_XML =\ - """ - - "WINDOWS" - %s - 1 - - - init_wwn1 - - - init_wwn2 - - - init_iqn1 - - - - """ % HOST1 - -ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_HOSTS_HOST1_HOST_TYPE_XML.split())} - -ISE_GET_HOSTS_HOST2_XML =\ - """ - - %s - 2 - - - init_wwn3 - - - init_wwn4 - - - init_iqn2 - - - - """ % HOST2 - -ISE_GET_HOSTS_HOST2_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())} - -ISE_CREATE_HOST_XML =\ - """""" - -ISE_CREATE_HOST_RESP =\ - {'status': http_client.CREATED, - 'location': 'http://ip/storage/hosts/host1', - 'content': " ".join(ISE_CREATE_HOST_XML.split())} - -ISE_CREATE_ALLOC_XML =\ - """""" - -ISE_CREATE_ALLOC_RESP =\ - {'status': http_client.CREATED, - 'location': ISE_ALLOCATION_LOCATION_URL, - 'content': " ".join(ISE_CREATE_ALLOC_XML.split())} - -ISE_GET_ENDPOINTS_XML =\ - """ - - isegid - iSCSI - - ise1 - - - - - - a1 - - - - - - isegid - Fibre Channel - - ise1 - - - - - - a1 - - - - - """ % (ISE_ALLOCATION_LOCATION_URL, - ISE_ALLOCATION_LOCATION_URL) - -ISE_GET_ENDPOINTS_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_ENDPOINTS_XML.split())} - -ISE_GET_CONTROLLERS_XML =\ - """ - - - - - - %s - - - isegid - - - - - - %s - - - %s - - - - - - - - - %s - - - isegid - - - - - - %s - - - %s - - - - """ % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2, - ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4) - -ISE_GET_CONTROLLERS_RESP =\ - {'status': http_client.OK, - 'location': '', - 'content': " ".join(ISE_GET_CONTROLLERS_XML.split())} - -ISE_CREATE_SNAPSHOT_XML = """""" - -ISE_CREATE_SNAPSHOT_RESP =\ - {'status': http_client.CREATED, - 'location': ISE_SNAPSHOT_LOCATION_URL, - 'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())} - -ISE_PREP_SNAPSHOT_XML = """""" - -ISE_PREP_SNAPSHOT_RESP =\ - {'status': http_client.ACCEPTED, - 'location': ISE_SNAPSHOT_LOCATION_URL, - 'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())} - -ISE_MODIFY_VOLUME_XML = """""" - -ISE_MODIFY_VOLUME_RESP =\ - {'status': http_client.CREATED, - 'location': ISE_VOLUME1_LOCATION_URL, - 'content': " ".join(ISE_MODIFY_VOLUME_XML.split())} - -ISE_MODIFY_HOST_XML = """""" - -ISE_MODIFY_HOST_RESP =\ - {'status': http_client.CREATED, - 'location': ISE_HOST_LOCATION_URL, - 'content': " ".join(ISE_MODIFY_HOST_XML.split())} - -ISE_BAD_CONNECTION_RESP =\ - {'status': 0, - 'location': '', - 'content': " "} - -ISE_400_RESP =\ - {'status': http_client.BAD_REQUEST, - 'location': '', - 'content': ""} - -ISE_GET_VOL_STATUS_404_XML = \ - """VOLUME not found.""" - -ISE_GET_VOL_STATUS_404_RESP =\ - {'status': http_client.NOT_FOUND, - 'location': '', - 'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())} - -ISE_400_INVALID_STATE_XML = \ - """Not in a valid state.""" - -ISE_400_INVALID_STATE_RESP =\ - {'status': http_client.BAD_REQUEST, - 'location': '', - 'content': " ".join(ISE_400_INVALID_STATE_XML.split())} - -ISE_409_CONFLICT_XML = \ - """Conflict""" - -ISE_409_CONFLICT_RESP =\ - {'status': http_client.CONFLICT, - 'location': '', - 'content': " ".join(ISE_409_CONFLICT_XML.split())} - - -DRIVER = "cinder.volume.drivers.xio.XIOISEDriver" - - -@mock.patch(DRIVER + "._opener", autospec=True) -class XIOISEDriverTestCase(object): - - # Test cases for X-IO volume driver - - def setUp(self): - super(XIOISEDriverTestCase, self).setUp() - - # set good default values - self.configuration = mock.Mock() - self.configuration.san_ip = ISE_IP1 - self.configuration.san_user = 'fakeuser' - self.configuration.san_password = 'fakepass' - self.configuration.iscsi_ip_address = ISE_ISCSI_IP1 - self.configuration.driver_use_ssl = False - self.configuration.ise_completion_retries = 30 - self.configuration.ise_connection_retries = 5 - self.configuration.ise_retry_interval = 1 - self.configuration.volume_backend_name = 'ise1' - self.driver = None - self.protocol = '' - self.connector = None - self.connection_failures = 0 - self.hostgid = '' - self.use_response_table = 1 - - def setup_test(self, protocol): - self.protocol = protocol - - # set good default values - if self.protocol == 'iscsi': - self.configuration.ise_protocol = protocol - self.connector = ISCSI_CONN1 - self.hostgid = self.connector['initiator'] - elif self.protocol == 'fibre_channel': - self.configuration.ise_protocol = protocol - self.connector = FC_CONN1 - self.hostgid = self.connector['wwpns'][0] - - def setup_driver(self): - # this setups up driver object with previously set configuration values - if self.configuration.ise_protocol == 'iscsi': - self.driver =\ - xio.XIOISEISCSIDriver(configuration=self.configuration) - elif self.configuration.ise_protocol == 'fibre_channel': - self.driver =\ - xio.XIOISEFCDriver(configuration=self.configuration) - elif self.configuration.ise_protocol == 'test_prot': - # if test_prot specified override with correct protocol - # used to bypass protocol specific driver - self.configuration.ise_protocol = self.protocol - self.driver = xio.XIOISEDriver(configuration=self.configuration) - else: - # Invalid protocol type - raise exception.Invalid() - -################################# -# UNIT TESTS # -################################# - def test_do_setup(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) - self.driver.do_setup(None) - - def test_negative_do_setup_no_clone_support(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.do_setup, None) - - def test_negative_do_setup_no_capabilities(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_NO_CAP_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.do_setup, None) - - def test_negative_do_setup_no_ctrl(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_NO_CTRL_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.do_setup, None) - - def test_negative_do_setup_no_ipaddress(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_NO_IP_RESP]) - self.driver.do_setup(None) - - def test_negative_do_setup_bad_globalid_none(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.do_setup, None) - - def test_check_for_setup_error(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) - self.setup_driver() - self.driver.check_for_setup_error() - - def test_negative_do_setup_bad_ip(self, mock_req): - # set san_ip to bad value - self.configuration.san_ip = '' - mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.check_for_setup_error) - - def test_negative_do_setup_bad_user_blank(self, mock_req): - # set san_user to bad value - self.configuration.san_login = '' - mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.check_for_setup_error) - - def test_negative_do_setup_bad_password_blank(self, mock_req): - # set san_password to bad value - self.configuration.san_password = '' - mock_req.side_effect = iter([ISE_GET_QUERY_RESP]) - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.check_for_setup_error) - - def test_get_volume_stats(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_STORAGE_POOLS_RESP]) - - backend_name = self.configuration.volume_backend_name - if self.configuration.ise_protocol == 'iscsi': - protocol = 'iSCSI' - else: - protocol = 'fibre_channel' - exp_result = {'vendor_name': "X-IO", - 'driver_version': "1.1.4", - 'volume_backend_name': backend_name, - 'reserved_percentage': 0, - 'total_capacity_gb': 100, - 'free_capacity_gb': 60, - 'QoS_support': True, - 'affinity': True, - 'thin': False, - 'pools': [{'pool_ise_name': "Pool 1", - 'pool_name': "1", - 'status': "Operational", - 'status_details': "None", - 'free_capacity_gb': 60, - 'free_capacity_gb_raid_0': 60, - 'free_capacity_gb_raid_1': 30, - 'free_capacity_gb_raid_5': 45, - 'allocated_capacity_gb': 40, - 'allocated_capacity_gb_raid_0': 0, - 'allocated_capacity_gb_raid_1': 40, - 'allocated_capacity_gb_raid_5': 0, - 'health': 100, - 'media': "Hybrid", - 'total_capacity_gb': 100, - 'QoS_support': True, - 'reserved_percentage': 0}], - 'active_volumes': 2, - 'storage_protocol': protocol} - - act_result = self.driver.get_volume_stats(True) - self.assertDictEqual(exp_result, act_result) - - def test_get_volume_stats_ssl(self, mock_req): - self.configuration.driver_use_ssl = True - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_STORAGE_POOLS_RESP]) - self.driver.get_volume_stats(True) - - def test_negative_get_volume_stats_bad_primary(self, mock_req): - self.configuration.ise_connection_retries = 1 - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_BAD_CONNECTION_RESP, - ISE_GET_STORAGE_POOLS_RESP]) - self.driver.get_volume_stats(True) - - def test_create_volume(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - VOLUME1['volume_type_id'] = type_ref['id'] - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_IONETWORKS_RESP]) - exp_result = {} - exp_result = {"provider_auth": ""} - act_result = self.driver.create_volume(VOLUME1) - self.assertDictEqual(exp_result, act_result) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.driver.create_volume(VOLUME1) - - def test_create_volume_chap(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - VOLUME1['volume_type_id'] = type_ref['id'] - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_IONETWORKS_CHAP_RESP]) - exp_result = {} - exp_result = {"provider_auth": "CHAP abc abc"} - act_result = self.driver.create_volume(VOLUME1) - self.assertDictEqual(exp_result, act_result) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.driver.create_volume(VOLUME1) - - def test_create_volume_type_none(self, mock_req): - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_IONETWORKS_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_CREATE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.driver.create_volume(VOLUME3) - - def test_delete_volume(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_DELETE_VOLUME_RESP, - ISE_GET_VOL_STATUS_404_RESP]) - self.setup_driver() - self.driver.delete_volume(VOLUME1) - - def test_delete_volume_delayed(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_DELETE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_VOL_STATUS_404_RESP]) - self.setup_driver() - self.driver.delete_volume(VOLUME1) - - def test_delete_volume_timeout(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_DELETE_VOLUME_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_GET_VOL1_STATUS_RESP]) - - self.configuration.ise_completion_retries = 3 - self.setup_driver() - self.driver.delete_volume(VOLUME1) - - def test_delete_volume_none_existing(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.setup_driver() - self.driver.delete_volume(VOLUME2) - - def test_initialize_connection_positive(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST2_RESP, - ISE_CREATE_HOST_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_CREATE_ALLOC_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_GET_CONTROLLERS_RESP]) - self.setup_driver() - - exp_result = {} - if self.configuration.ise_protocol == 'iscsi': - exp_result = {"driver_volume_type": "iscsi", - "data": {"target_lun": 1, - "volume_id": '1', - "target_discovered": False, - "target_iqn": ISE_IQN, - "target_portal": ISE_ISCSI_IP1 + ":3260"}} - elif self.configuration.ise_protocol == 'fibre_channel': - exp_result = {"driver_volume_type": "fibre_channel", - "data": {"target_lun": 1, - "volume_id": '1', - "target_discovered": True, - "initiator_target_map": ISE_INIT_TARGET_MAP, - "target_wwn": ISE_TARGETS}} - - act_result =\ - self.driver.initialize_connection(VOLUME1, self.connector) - self.assertDictEqual(exp_result, act_result) - - def test_initialize_connection_positive_host_type(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, - ISE_MODIFY_HOST_RESP, - ISE_CREATE_ALLOC_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_GET_CONTROLLERS_RESP]) - self.setup_driver() - - exp_result = {} - if self.configuration.ise_protocol == 'iscsi': - exp_result = {"driver_volume_type": "iscsi", - "data": {"target_lun": 1, - "volume_id": '1', - "target_discovered": False, - "target_iqn": ISE_IQN, - "target_portal": ISE_ISCSI_IP1 + ":3260"}} - elif self.configuration.ise_protocol == 'fibre_channel': - exp_result = {"driver_volume_type": "fibre_channel", - "data": {"target_lun": 1, - "volume_id": '1', - "target_discovered": True, - "initiator_target_map": ISE_INIT_TARGET_MAP, - "target_wwn": ISE_TARGETS}} - - act_result =\ - self.driver.initialize_connection(VOLUME1, self.connector) - self.assertDictEqual(exp_result, act_result) - - def test_initialize_connection_positive_chap(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST2_RESP, - ISE_CREATE_HOST_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_CREATE_ALLOC_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_GET_CONTROLLERS_RESP]) - self.setup_driver() - exp_result = {} - if self.configuration.ise_protocol == 'iscsi': - exp_result = {"driver_volume_type": "iscsi", - "data": {"target_lun": 1, - "volume_id": '2', - "target_discovered": False, - "target_iqn": ISE_IQN, - "target_portal": ISE_ISCSI_IP1 + ":3260", - 'auth_method': 'CHAP', - 'auth_username': 'abc', - 'auth_password': 'abc'}} - elif self.configuration.ise_protocol == 'fibre_channel': - exp_result = {"driver_volume_type": "fibre_channel", - "data": {"target_lun": 1, - "volume_id": '2', - "target_discovered": True, - "initiator_target_map": ISE_INIT_TARGET_MAP, - "target_wwn": ISE_TARGETS}} - - act_result =\ - self.driver.initialize_connection(VOLUME2, self.connector) - self.assertDictEqual(exp_result, act_result) - - def test_initialize_connection_negative_no_host(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST2_RESP, - ISE_CREATE_HOST_RESP, - ISE_GET_HOSTS_HOST2_RESP]) - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.initialize_connection, - VOLUME2, self.connector) - - def test_initialize_connection_negative_host_type(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_HOST_TYPE_RESP, - ISE_400_RESP]) - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.initialize_connection, - VOLUME2, self.connector) - - def test_terminate_connection_positive(self, mock_req): - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_DELETE_ALLOC_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_GET_CONTROLLERS_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_DELETE_ALLOC_RESP]) - self.driver.terminate_connection(VOLUME1, self.connector) - - def test_terminate_connection_positive_noalloc(self, mock_req): - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, - ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_DELETE_ALLOC_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, - ISE_GET_ALLOC_WITH_NO_ALLOC_RESP, - ISE_GET_CONTROLLERS_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_DELETE_ALLOC_RESP]) - self.driver.terminate_connection(VOLUME1, self.connector) - - def test_negative_terminate_connection_bad_host(self, mock_req): - self.setup_driver() - test_connector = {} - if self.configuration.ise_protocol == 'iscsi': - test_connector['initiator'] = 'bad_iqn' - test_connector['host'] = '' - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - test_connector['wwpns'] = 'bad_wwn' - test_connector['host'] = '' - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_HOSTS_HOST1_RESP, - ISE_GET_CONTROLLERS_RESP]) - - self.driver.terminate_connection(VOLUME1, test_connector) - - def test_create_snapshot(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - SNAPSHOT1['volume_type_id'] = type_ref['id'] - - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_PREP_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP, - ISE_CREATE_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP]) - self.setup_driver() - self.driver.create_snapshot(SNAPSHOT1) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - def test_negative_create_snapshot_invalid_state_recover(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - SNAPSHOT1['volume_type_id'] = type_ref['id'] - - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_400_INVALID_STATE_RESP, - ISE_PREP_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP, - ISE_CREATE_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP]) - self.setup_driver() - self.driver.create_snapshot(SNAPSHOT1) - - @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', - new=utils.ZeroIntervalLoopingCall) - def test_negative_create_snapshot_invalid_state_norecover(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - SNAPSHOT1['volume_type_id'] = type_ref['id'] - - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_400_INVALID_STATE_RESP, - ISE_400_INVALID_STATE_RESP, - ISE_400_INVALID_STATE_RESP, - ISE_400_INVALID_STATE_RESP, - ISE_400_INVALID_STATE_RESP]) - self.configuration.ise_completion_retries = 5 - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.create_snapshot, SNAPSHOT1) - - def test_negative_create_snapshot_conflict(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - SNAPSHOT1['volume_type_id'] = type_ref['id'] - - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_409_CONFLICT_RESP]) - self.configuration.ise_completion_retries = 1 - self.setup_driver() - self.assertRaises(exception.XIODriverException, - self.driver.create_snapshot, SNAPSHOT1) - - def test_delete_snapshot(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_ALLOC_WITH_EP_RESP, - ISE_DELETE_ALLOC_RESP, - ISE_GET_SNAP1_STATUS_RESP, - ISE_DELETE_VOLUME_RESP]) - self.setup_driver() - self.driver.delete_snapshot(SNAPSHOT1) - - def test_clone_volume(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - VOLUME1['volume_type_id'] = type_ref['id'] - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_PREP_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP, - ISE_CREATE_SNAPSHOT_RESP, - ISE_GET_SNAP1_STATUS_RESP]) - self.setup_driver() - self.driver.create_cloned_volume(CLONE1, VOLUME1) - - def test_extend_volume(self, mock_req): - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP]) - self.setup_driver() - self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE) - - def test_retype_volume(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - VOLUME1['volume_type_id'] = type_ref['id'] - # New volume type - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "5", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT2', extra_specs) - specs = {'qos:minIOPS': '30', - 'qos:maxIOPS': '3000', - 'qos:burstIOPS': '10000'} - qos = qos_specs.create(ctxt, 'fake-qos2', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP]) - self.setup_driver() - self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0) - - def test_create_volume_from_snapshot(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - SNAPSHOT1['volume_type_id'] = type_ref['id'] - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_SNAP1_STATUS_RESP, - ISE_PREP_SNAPSHOT_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_CREATE_SNAPSHOT_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.setup_driver() - self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1) - - def test_manage_existing(self, mock_req): - ctxt = context.get_admin_context() - extra_specs = {"Feature:Pool": "1", - "Feature:Raid": "1", - "Affinity:Type": "flash", - "Alloc:Type": "thick"} - type_ref = volume_types.create(ctxt, 'VT1', extra_specs) - specs = {'qos:minIOPS': '20', - 'qos:maxIOPS': '2000', - 'qos:burstIOPS': '5000'} - qos = qos_specs.create(ctxt, 'fake-qos', specs) - qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) - VOLUME1['volume_type_id'] = type_ref['id'] - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP, - ISE_GET_IONETWORKS_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP]) - self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'}) - - def test_manage_existing_no_source_name(self, mock_req): - self.setup_driver() - if self.configuration.ise_protocol == 'iscsi': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP, - ISE_GET_IONETWORKS_RESP]) - elif self.configuration.ise_protocol == 'fibre_channel': - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP, - ISE_MODIFY_VOLUME_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.manage_existing, VOLUME1, {}) - - def test_manage_existing_get_size(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP]) - exp_result = 10 - act_result = \ - self.driver.manage_existing_get_size(VOLUME1, - {'source-name': 'a'}) - self.assertEqual(exp_result, act_result) - - def test_manage_existing_get_size_no_source_name(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.manage_existing_get_size, VOLUME1, {}) - - def test_unmanage(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL1_STATUS_RESP]) - self.driver.unmanage(VOLUME1) - - def test_negative_unmanage_no_volume_status_xml(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL_STATUS_NO_STATUS_RESP]) - self.driver.unmanage(VOLUME1) - - def test_negative_unmanage_no_volume_xml(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.unmanage, VOLUME1) - - def test_negative_unmanage_non_existing_volume(self, mock_req): - self.setup_driver() - mock_req.side_effect = iter([ISE_GET_QUERY_RESP, - ISE_GET_VOL_STATUS_404_RESP]) - self.assertRaises(exception.XIODriverException, - self.driver.unmanage, VOLUME1) - - -class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase): - - def setUp(self): - super(XIOISEISCSIDriverTestCase, self).setUp() - self.setup_test('iscsi') - - -class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase): - - def setUp(self): - super(XIOISEFCDriverTestCase, self).setUp() - self.setup_test('fibre_channel') diff --git a/cinder/tests/unit/volume/drivers/test_zadara.py b/cinder/tests/unit/volume/drivers/test_zadara.py deleted file mode 100644 index d951b5611..000000000 --- a/cinder/tests/unit/volume/drivers/test_zadara.py +++ /dev/null @@ -1,759 +0,0 @@ -# Copyright (c) 2016 Zadara Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Zadara VPSA volume driver -""" -import copy -import mock -import requests -from six.moves.urllib import parse - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers import zadara - - -DEFAULT_RUNTIME_VARS = { - 'status': 200, - 'user': 'test', - 'password': 'test_password', - 'access_key': '0123456789ABCDEF', - 'volumes': [], - 'servers': [], - 'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})], - 'counter': 1000, - - 'login': """ - - - 2012-04-30... - %s - 1 - 2012-02-21... - jsmith@example.com - jsmith - - 0 - """, - - 'good': """ - - 0 - """, - - 'bad_login': """ - - 5 - Some message... - """, - - 'bad_volume': """ - - 10081 - Virtual volume xxx not found - """, - - 'bad_server': """ - - 10086 - Server xxx not found - """, - - 'server_created': """ - - %s - 0 - """, -} - -RUNTIME_VARS = None - - -class FakeResponse(object): - def __init__(self, method, url, body): - self.method = method - self.url = url - self.body = body - self.status = RUNTIME_VARS['status'] - - def read(self): - ops = {'POST': [('/api/users/login.xml', self._login), - ('/api/volumes.xml', self._create_volume), - ('/api/servers.xml', self._create_server), - ('/api/servers/*/volumes.xml', self._attach), - ('/api/volumes/*/detach.xml', self._detach), - ('/api/volumes/*/expand.xml', self._expand), - ('/api/consistency_groups/*/snapshots.xml', - self._create_snapshot), - ('/api/consistency_groups/*/clone.xml', - self._create_clone)], - 'DELETE': [('/api/volumes/*', self._delete), - ('/api/snapshots/*', self._delete_snapshot)], - 'GET': [('/api/volumes.xml', self._list_volumes), - ('/api/pools.xml', self._list_pools), - ('/api/vcontrollers.xml', self._list_controllers), - ('/api/servers.xml', self._list_servers), - ('/api/consistency_groups/*/snapshots.xml', - self._list_vol_snapshots), - ('/api/volumes/*/servers.xml', - self._list_vol_attachments)] - } - - ops_list = ops[self.method] - modified_url = self.url.split('?')[0] - for (templ_url, func) in ops_list: - if self._compare_url(modified_url, templ_url): - result = func() - return result - - def _compare_url(self, url, template_url): - items = url.split('/') - titems = template_url.split('/') - for (i, titem) in enumerate(titems): - if titem != '*' and titem != items[i]: - return False - return True - - def _get_parameters(self, data): - items = data.split('&') - params = {} - for item in items: - if item: - (k, v) = item.split('=') - params[k] = v - return params - - def _get_counter(self): - cnt = RUNTIME_VARS['counter'] - RUNTIME_VARS['counter'] += 1 - return cnt - - def _login(self): - params = self._get_parameters(self.body) - if (params['user'] == RUNTIME_VARS['user'] and - params['password'] == RUNTIME_VARS['password']): - return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] - else: - return RUNTIME_VARS['bad_login'] - - def _incorrect_access_key(self, params): - return (params['access_key'] != RUNTIME_VARS['access_key']) - - def _create_volume(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - params['display-name'] = params['name'] - params['cg-name'] = params['name'] - params['snapshots'] = [] - params['attachments'] = [] - vpsa_vol = 'volume-%07d' % self._get_counter() - RUNTIME_VARS['volumes'].append((vpsa_vol, params)) - return RUNTIME_VARS['good'] - - def _create_server(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - params['display-name'] = params['display_name'] - vpsa_srv = 'srv-%07d' % self._get_counter() - RUNTIME_VARS['servers'].append((vpsa_srv, params)) - return RUNTIME_VARS['server_created'] % vpsa_srv - - def _attach(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - srv = self.url.split('/')[3] - vol = params['volume_name[]'] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if vol_name == vol: - attachments = params['attachments'] - if srv in attachments: - # already attached - ok - return RUNTIME_VARS['good'] - else: - attachments.append(srv) - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _detach(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - vol = self.url.split('/')[3] - srv = params['server_name[]'] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if vol_name == vol: - attachments = params['attachments'] - if srv not in attachments: - return RUNTIME_VARS['bad_server'] - else: - attachments.remove(srv) - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _expand(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - vol = self.url.split('/')[3] - capacity = params['capacity'] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if vol_name == vol: - params['capacity'] = capacity - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _create_snapshot(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - cg_name = self.url.split('/')[3] - snap_name = params['display_name'] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if params['cg-name'] == cg_name: - snapshots = params['snapshots'] - if snap_name in snapshots: - # already attached - return RUNTIME_VARS['bad_volume'] - else: - snapshots.append(snap_name) - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _delete_snapshot(self): - snap = self.url.split('/')[3].split('.')[0] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if snap in params['snapshots']: - params['snapshots'].remove(snap) - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _create_clone(self): - params = self._get_parameters(self.body) - if self._incorrect_access_key(params): - return RUNTIME_VARS['bad_login'] - - params['display-name'] = params['name'] - params['cg-name'] = params['name'] - params['capacity'] = 1 - params['snapshots'] = [] - params['attachments'] = [] - vpsa_vol = 'volume-%07d' % self._get_counter() - RUNTIME_VARS['volumes'].append((vpsa_vol, params)) - return RUNTIME_VARS['good'] - - def _delete(self): - vol = self.url.split('/')[3].split('.')[0] - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if vol_name == vol: - if params['attachments']: - # there are attachments - should be volume busy error - return RUNTIME_VARS['bad_volume'] - else: - RUNTIME_VARS['volumes'].remove((vol_name, params)) - return RUNTIME_VARS['good'] - - return RUNTIME_VARS['bad_volume'] - - def _generate_list_resp(self, header, footer, body, lst, vol): - resp = header - for (obj, params) in lst: - if vol: - resp += body % (obj, - params['display-name'], - params['cg-name'], - params['capacity']) - else: - resp += body % (obj, params['display-name']) - resp += footer - return resp - - def _list_volumes(self): - header = """ - 0 - """ - footer = "" - body = """ - %s - %s - %s - Available - %s - 1 - r5 - write-through - 2012-01-28... - 2012-01-28... - """ - return self._generate_list_resp(header, - footer, - body, - RUNTIME_VARS['volumes'], - True) - - def _list_controllers(self): - header = """ - 0 - """ - footer = "" - body = """ - %s - %s - active - iqn.2011-04.com.zadarastorage:vsa-xxx:1 - 1.1.1.1 - 1.1.1.1 - 0.0.09-05.1--77.7 - ok - ok - test_chap_user - test_chap_secret - """ - return self._generate_list_resp(header, - footer, - body, - RUNTIME_VARS['controllers'], - False) - - def _list_pools(self): - header = """ - 0 - - """ - footer = "" - return header + footer - - def _list_servers(self): - header = """ - 0 - """ - footer = "" - body = """ - %s - %s - %s - Active - 2012-01-28... - 2012-01-28... - """ - - resp = header - for (obj, params) in RUNTIME_VARS['servers']: - resp += body % (obj, params['display-name'], params['iqn']) - resp += footer - return resp - - def _get_server_obj(self, name): - for (srv_name, params) in RUNTIME_VARS['servers']: - if srv_name == name: - return params - - def _list_vol_attachments(self): - vol = self.url.split('/')[3] - - header = """ - 0 - """ - footer = "" - body = """ - %s - %s - %s - iqn.2011-04.com.zadarastorage:vsa-xxx:1 - 0 - """ - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if vol_name == vol: - attachments = params['attachments'] - resp = header - for server in attachments: - srv_params = self._get_server_obj(server) - resp += body % (server, - srv_params['display-name'], - srv_params['iqn']) - resp += footer - return resp - - return RUNTIME_VARS['bad_volume'] - - def _list_vol_snapshots(self): - cg_name = self.url.split('/')[3] - - header = """ - 0 - """ - footer = "" - - body = """ - %s - %s - normal - %s - pool-00000001 - """ - - for (vol_name, params) in RUNTIME_VARS['volumes']: - if params['cg-name'] == cg_name: - snapshots = params['snapshots'] - resp = header - for snap in snapshots: - resp += body % (snap, snap, cg_name) - resp += footer - return resp - - return RUNTIME_VARS['bad_volume'] - - -class FakeRequests(object): - """A fake requests for zadara volume driver tests.""" - def __init__(self, method, api_url, data, verify): - url = parse.urlparse(api_url).path - res = FakeResponse(method, url, data) - self.content = res.read() - self.status_code = res.status - - -class ZadaraVPSADriverTestCase(test.TestCase): - """Test case for Zadara VPSA volume driver.""" - @mock.patch.object(requests, 'request', FakeRequests) - def setUp(self): - super(ZadaraVPSADriverTestCase, self).setUp() - - global RUNTIME_VARS - RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) - self.configuration = mock.Mock(conf.Configuration(None)) - self.configuration.append_config_values(zadara.zadara_opts) - self.configuration.reserved_percentage = 10 - self.configuration.zadara_use_iser = True - self.configuration.zadara_vpsa_host = '192.168.5.5' - self.configuration.zadara_vpsa_port = '80' - self.configuration.zadara_user = 'test' - self.configuration.zadara_password = 'test_password' - self.configuration.zadara_vpsa_poolname = 'pool-0001' - self.configuration.zadara_vol_encrypt = False - self.configuration.zadara_vol_name_template = 'OS_%s' - self.configuration.zadara_vpsa_use_ssl = False - self.configuration.zadara_ssl_cert_verify = False - self.configuration.zadara_default_snap_policy = False - self.driver = (zadara.ZadaraVPSAISCSIDriver( - configuration=self.configuration)) - self.driver.do_setup(None) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_create_destroy(self): - """Create/Delete volume.""" - volume = {'name': 'test_volume_01', 'size': 1} - self.driver.create_volume(volume) - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_create_destroy_multiple(self): - """Create/Delete multiple volumes.""" - self.driver.create_volume({'name': 'test_volume_01', 'size': 1}) - self.driver.create_volume({'name': 'test_volume_02', 'size': 2}) - self.driver.create_volume({'name': 'test_volume_03', 'size': 3}) - self.driver.delete_volume({'name': 'test_volume_02'}) - self.driver.delete_volume({'name': 'test_volume_03'}) - self.driver.delete_volume({'name': 'test_volume_01'}) - self.driver.delete_volume({'name': 'test_volume_04'}) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_destroy_non_existent(self): - """Delete non-existent volume.""" - volume = {'name': 'test_volume_02', 'size': 1} - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_empty_apis(self): - """Test empty func (for coverage only).""" - context = None - volume = {'name': 'test_volume_01', 'size': 1} - self.driver.create_export(context, volume) - self.driver.ensure_export(context, volume) - self.driver.remove_export(context, volume) - self.assertRaises(NotImplementedError, - self.driver.local_path, - None) - self.driver.check_for_setup_error() - - @mock.patch.object(requests, 'request', FakeRequests) - def test_volume_attach_detach(self): - """Test volume attachment and detach.""" - volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} - connector = dict(initiator='test_iqn.1') - self.driver.create_volume(volume) - props = self.driver.initialize_connection(volume, connector) - self.assertEqual('iser', props['driver_volume_type']) - data = props['data'] - self.assertEqual('1.1.1.1:3260', data['target_portal']) - self.assertEqual('iqn.2011-04.com.zadarastorage:vsa-xxx:1', - data['target_iqn']) - self.assertEqual(int('0'), data['target_lun']) - self.assertEqual(123, data['volume_id']) - self.assertEqual('CHAP', data['auth_method']) - self.assertEqual('test_chap_user', data['auth_username']) - self.assertEqual('test_chap_secret', data['auth_password']) - self.driver.terminate_connection(volume, connector) - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_volume_attach_multiple_detach(self): - """Test multiple volume attachment and detach.""" - volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} - connector1 = dict(initiator='test_iqn.1') - connector2 = dict(initiator='test_iqn.2') - connector3 = dict(initiator='test_iqn.3') - - self.driver.create_volume(volume) - self.driver.initialize_connection(volume, connector1) - self.driver.initialize_connection(volume, connector2) - self.driver.initialize_connection(volume, connector3) - - self.driver.terminate_connection(volume, connector1) - self.driver.terminate_connection(volume, connector3) - self.driver.terminate_connection(volume, connector2) - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_wrong_attach_params(self): - """Test different wrong attach scenarios.""" - volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} - connector1 = dict(initiator='test_iqn.1') - self.assertRaises(exception.VolumeNotFound, - self.driver.initialize_connection, - volume1, connector1) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_wrong_detach_params(self): - """Test different wrong detachment scenarios.""" - volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} - volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102} - volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103} - connector1 = dict(initiator='test_iqn.1') - connector2 = dict(initiator='test_iqn.2') - connector3 = dict(initiator='test_iqn.3') - self.driver.create_volume(volume1) - self.driver.create_volume(volume2) - self.driver.initialize_connection(volume1, connector1) - self.driver.initialize_connection(volume2, connector2) - self.assertRaises(exception.ZadaraServerNotFound, - self.driver.terminate_connection, - volume1, connector3) - self.assertRaises(exception.VolumeNotFound, - self.driver.terminate_connection, - volume3, connector1) - self.assertRaises(exception.FailedCmdWithDump, - self.driver.terminate_connection, - volume1, connector2) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_wrong_login_reply(self): - """Test wrong login reply.""" - - RUNTIME_VARS['login'] = """ - %s - 0 - """ - self.assertRaises(exception.MalformedResponse, - self.driver.do_setup, None) - - RUNTIME_VARS['login'] = """ - - - 2012-04-30... - 1 - 2012-02-21... - jsmith@example.com - jsmith - - %s - 0 - """ - self.assertRaises(exception.MalformedResponse, - self.driver.do_setup, None) - - @mock.patch.object(requests, 'request') - def test_ssl_use(self, request): - """Coverage test for SSL connection.""" - self.configuration.zadara_ssl_cert_verify = True - self.configuration.zadara_vpsa_use_ssl = True - self.configuration.driver_ssl_cert_path = '/path/to/cert' - - good_response = mock.MagicMock() - good_response.status_code = RUNTIME_VARS['status'] - good_response.content = RUNTIME_VARS['login'] - - def request_verify_cert(*args, **kwargs): - self.assertEqual(kwargs['verify'], '/path/to/cert') - return good_response - - request.side_effect = request_verify_cert - self.driver.do_setup(None) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_bad_http_response(self): - """Coverage test for non-good HTTP response.""" - RUNTIME_VARS['status'] = 400 - - volume = {'name': 'test_volume_01', 'size': 1} - self.assertRaises(exception.BadHTTPResponseStatus, - self.driver.create_volume, volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_delete_without_detach(self): - """Test volume deletion without detach.""" - - volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101} - connector1 = dict(initiator='test_iqn.1') - connector2 = dict(initiator='test_iqn.2') - connector3 = dict(initiator='test_iqn.3') - self.driver.create_volume(volume1) - self.driver.initialize_connection(volume1, connector1) - self.driver.initialize_connection(volume1, connector2) - self.driver.initialize_connection(volume1, connector3) - self.driver.delete_volume(volume1) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_no_active_ctrl(self): - - RUNTIME_VARS['controllers'] = [] - volume = {'name': 'test_volume_01', 'size': 1, 'id': 123} - connector = dict(initiator='test_iqn.1') - self.driver.create_volume(volume) - self.assertRaises(exception.ZadaraVPSANoActiveController, - self.driver.initialize_connection, - volume, connector) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_create_destroy_snapshot(self): - """Create/Delete snapshot test.""" - volume = {'name': 'test_volume_01', 'size': 1} - snapshot = {'name': 'snap_01', - 'volume_name': volume['name']} - - self.driver.create_volume(volume) - self.assertRaises(exception.VolumeDriverException, - self.driver.create_snapshot, - {'name': snapshot['name'], - 'volume_name': 'wrong_vol'}) - - self.driver.create_snapshot(snapshot) - - # Deleted should succeed for missing volume - self.driver.delete_snapshot({'name': snapshot['name'], - 'volume_name': 'wrong_vol'}) - # Deleted should succeed for missing snap - self.driver.delete_snapshot({'name': 'wrong_snap', - 'volume_name': volume['name']}) - - self.driver.delete_snapshot(snapshot) - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_expand_volume(self): - """Expand volume test.""" - volume = {'name': 'test_volume_01', 'size': 10} - volume2 = {'name': 'test_volume_02', 'size': 10} - - self.driver.create_volume(volume) - - self.assertRaises(exception.ZadaraVolumeNotFound, - self.driver.extend_volume, - volume2, 15) - self.assertRaises(exception.InvalidInput, - self.driver.extend_volume, - volume, 5) - - self.driver.extend_volume(volume, 15) - self.driver.delete_volume(volume) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_create_destroy_clones(self): - """Create/Delete clones test.""" - volume1 = {'name': 'test_volume_01', 'id': '01', 'size': 1} - volume2 = {'name': 'test_volume_02', 'id': '02', 'size': 2} - volume3 = {'name': 'test_volume_03', 'id': '03', 'size': 1} - snapshot = {'name': 'snap_01', - 'id': '01', - 'volume_name': volume1['name'], - 'volume_size': 1} - - self.driver.create_volume(volume1) - self.driver.create_snapshot(snapshot) - - # Test invalid vol reference - self.assertRaises(exception.VolumeNotFound, - self.driver.create_volume_from_snapshot, - volume2, - {'name': snapshot['name'], - 'id': snapshot['id'], - 'volume_name': 'wrong_vol'}) - # Test invalid snap reference - self.assertRaises(exception.SnapshotNotFound, - self.driver.create_volume_from_snapshot, - volume2, - {'name': 'wrong_snap', - 'id': 'wrong_id', - 'volume_name': snapshot['volume_name']}) - # Test invalid src_vref for volume clone - self.assertRaises(exception.VolumeNotFound, - self.driver.create_cloned_volume, - volume3, volume2) - self.driver.create_volume_from_snapshot(volume2, snapshot) - self.driver.create_cloned_volume(volume3, volume1) - self.driver.delete_volume(volume3) - self.driver.delete_volume(volume2) - self.driver.delete_snapshot(snapshot) - self.driver.delete_volume(volume1) - - @mock.patch.object(requests, 'request', FakeRequests) - def test_get_volume_stats(self): - """Get stats test.""" - self.configuration.safe_get.return_value = 'ZadaraVPSAISCSIDriver' - data = self.driver.get_volume_stats(True) - self.assertEqual('Zadara Storage', data['vendor_name']) - self.assertEqual('unknown', data['total_capacity_gb']) - self.assertEqual('unknown', data['free_capacity_gb']) - self.assertEqual({'total_capacity_gb': 'unknown', - 'free_capacity_gb': 'unknown', - 'reserved_percentage': - self.configuration.reserved_percentage, - 'QoS_support': False, - 'vendor_name': 'Zadara Storage', - 'driver_version': self.driver.VERSION, - 'storage_protocol': 'iSER', - 'volume_backend_name': 'ZadaraVPSAISCSIDriver'}, - data) diff --git a/cinder/tests/unit/volume/drivers/test_zfssa.py b/cinder/tests/unit/volume/drivers/test_zfssa.py deleted file mode 100644 index 4fe70c8b9..000000000 --- a/cinder/tests/unit/volume/drivers/test_zfssa.py +++ /dev/null @@ -1,1893 +0,0 @@ -# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for Oracle's ZFSSA Cinder volume driver.""" - -from datetime import date -import errno -import json -import math - -import mock -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_utils -from cinder.tests.unit import utils -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume.drivers import nfs as nfsdriver -from cinder.volume.drivers import remotefs -from cinder.volume.drivers.zfssa import restclient as client -from cinder.volume.drivers.zfssa import webdavclient -from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi -from cinder.volume.drivers.zfssa import zfssanfs -from cinder.volume.drivers.zfssa import zfssarest as rest - - -nfs_logbias = 'latency' -nfs_compression = 'off' -zfssa_cache_dir = 'os-cinder-cache' - -no_virtsize_img = { - 'id': 'no_virtsize_img_id1234', - 'size': 654321, - 'updated_at': date(2015, 1, 1), -} - -small_img = { - 'id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', - 'size': 654321, - 'virtual_size': 2361393152, - 'updated_at': date(2015, 1, 1), -} - -large_img = { - 'id': 'large_id5678', - 'size': 50000000, - 'virtual_size': 11806965760, - 'updated_at': date(2015, 2, 2), -} - -fakespecs = { - 'prop1': 'prop1_val', - 'prop2': 'prop2_val', -} - -small_img_props = { - 'size': 3, -} - -img_props_nfs = { - 'image_id': small_img['id'], - 'updated_at': small_img['updated_at'].isoformat(), - 'size': 3, - 'name': '%(dir)s/os-cache-vol-%(name)s' % ({'dir': zfssa_cache_dir, - 'name': small_img['id']}), - 'id': small_img['id'] -} - -fakecontext = 'fakecontext' -img_service = 'fakeimgservice' -img_location = 'fakeimglocation' - - -class ImgInfo(object): - def __init__(self, vsize): - self.virtual_size = vsize - - -class FakeResponse(object): - def __init__(self, statuscode, data='data'): - self.status = statuscode - self.data = data - - -class FakeSSL(object): - def _create_unverified_context(self): - return 'fakecontext' - - -class TestZFSSAISCSIDriver(test.TestCase): - - test_vol = { - 'name': 'cindervol', - 'size': 3, - 'id': 1, - 'provider_location': 'fake_location 1 2', - 'provider_auth': 'fake_auth user pass', - } - - test_vol2 = { - 'name': 'cindervol2', - 'size': 5, - 'id': 2, - 'provider_location': 'fake_location 3 4', - 'provider_auth': 'fake_auth user pass', - } - - test_snap = { - 'name': 'cindersnap', - 'volume_name': test_vol['name'] - } - - test_vol_snap = { - 'name': 'cindersnapvol', - 'size': test_vol['size'] - } - - def __init__(self, method): - super(TestZFSSAISCSIDriver, self).__init__(method) - - @mock.patch.object(iscsi, 'factory_zfssa') - def setUp(self, _factory_zfssa): - super(TestZFSSAISCSIDriver, self).setUp() - self._create_fake_config() - _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSAApi) - iscsi.ZFSSAISCSIDriver._execute = fake_utils.fake_execute - self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration) - self.drv.do_setup({}) - - def _create_fake_config(self): - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.san_ip = '1.1.1.1' - self.configuration.san_login = 'user' - self.configuration.san_password = 'passwd' - self.configuration.zfssa_pool = 'pool' - self.configuration.zfssa_project = 'project' - self.configuration.zfssa_lun_volblocksize = '8k' - self.configuration.zfssa_lun_sparse = 'false' - self.configuration.zfssa_lun_logbias = 'latency' - self.configuration.zfssa_lun_compression = 'off' - self.configuration.zfssa_initiator_group = 'test-init-grp1' - self.configuration.zfssa_initiator = \ - 'iqn.1-0.org.deb:01:d7, iqn.1-0.org.deb:01:d9' - self.configuration.zfssa_initiator_user = '' - self.configuration.zfssa_initiator_password = '' - self.configuration.zfssa_initiator_config = "{'test-init-grp1':[{'iqn':\ - 'iqn.1-0.org.deb:01:d7','user':'','password':''}],'test-init-grp\ - 2':[{'iqn':'iqn.1-0.org.deb:01:d9','user':'','password':''}]}" - self.configuration.zfssa_target_group = 'test-target-grp1' - self.configuration.zfssa_target_user = '' - self.configuration.zfssa_target_password = '' - self.configuration.zfssa_target_portal = '1.1.1.1:3260' - self.configuration.zfssa_target_interfaces = 'e1000g0' - self.configuration.zfssa_rest_timeout = 60 - self.configuration.volume_backend_name = 'fake_zfssa' - self.configuration.zfssa_enable_local_cache = True - self.configuration.zfssa_cache_project = zfssa_cache_dir - self.configuration.safe_get = self.fake_safe_get - self.configuration.zfssa_replication_ip = '1.1.1.1' - self.configuration.zfssa_manage_policy = 'loose' - - def _util_migrate_volume_exceptions(self): - self.drv.zfssa.get_lun.return_value = ( - {'targetgroup': 'test-target-grp1'}) - self.drv.zfssa.get_asn.return_value = ( - '9a2b5a0f-e3af-6d14-9578-8825f229dc89') - self.drv.tgt_zfssa.get_asn.return_value = ( - '9a2b5a0f-e3af-6d14-9578-8825f229dc89') - targets = {'targets': [{'hostname': '2.2.2.2', - 'address': '2.2.2.2:216', - 'label': '2.2.2.2', - 'asn': - '9a2b5a0f-e3af-6d14-9578-8825f229dc89'}]} - - self.drv.zfssa.get_replication_targets.return_value = targets - self.drv.zfssa.edit_inherit_replication_flag.return_value = {} - self.drv.zfssa.create_replication_action.return_value = 'action-123' - self.drv.zfssa.send_repl_update.return_value = True - - @mock.patch.object(iscsi.LOG, 'warning') - @mock.patch.object(iscsi.LOG, 'error') - @mock.patch.object(iscsi, 'factory_zfssa') - def test_parse_initiator_config(self, _factory_zfssa, elog, wlog): - """Test the parsing of the old style initator config variables. """ - lcfg = self.configuration - - with mock.patch.object(lcfg, 'zfssa_initiator_config', ''): - # Test empty zfssa_initiator_group - with mock.patch.object(lcfg, 'zfssa_initiator_group', ''): - self.assertRaises(exception.InvalidConfigurationValue, - self.drv.do_setup, {}) - - # Test empty zfssa_initiator with zfssa_initiator_group set to - # a value other than "default" - with mock.patch.object(lcfg, 'zfssa_initiator', ''): - self.assertRaises(exception.InvalidConfigurationValue, - self.drv.do_setup, {}) - - # Test zfssa_initiator_group set to 'default' with non-empty - # zfssa_initiator. - with mock.patch.object(lcfg, 'zfssa_initiator_group', 'default'): - self.drv.do_setup({}) - wlog.assert_called_with(mock.ANY, - {'inigrp': lcfg.zfssa_initiator_group, - 'ini': lcfg.zfssa_initiator}) - - def test_migrate_volume(self): - self._util_migrate_volume_exceptions() - - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' - - host = {'host': 'stack@zfssa_iscsi#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'iSCSI', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - - # Test the normal case - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((True, None), result) - - # Test when volume status is not available - volume['status'] = 'in-use' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - volume['status'] = 'available' - - # Test when vendor is not Oracle - host['capabilities']['vendor_name'] = 'elcarO' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['vendor_name'] = 'Oracle' - - # Test when storage protocol is not iSCSI - host['capabilities']['storage_protocol'] = 'not_iSCSI' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['storage_protocol'] = 'iSCSI' - - # Test when location_info is incorrect - host['capabilities']['location_info'] = '' - self.assertEqual((False, None), result) - host['capabilities']['location_info'] = loc_info - - # Test if replication ip and replication target's address dont match - invalid_loc_info = ( - '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:9.9.9.9') - host['capabilities']['location_info'] = invalid_loc_info - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['location_info'] = loc_info - - # Test if no targets are returned - self.drv.zfssa.get_replication_targets.return_value = {'targets': []} - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - - def test_migrate_volume_uninherit_exception(self): - self._util_migrate_volume_exceptions() - - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' - - host = {'host': 'stack@zfssa_iscsi#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'iSCSI', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - - self.drv.zfssa.edit_inherit_replication_flag.side_effect = ( - exception.VolumeBackendAPIException(data='uniherit ex')) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.migrate_volume, ctxt, volume, host) - - def test_migrate_volume_create_action_exception(self): - self._util_migrate_volume_exceptions() - - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' - - host = {'host': 'stack@zfssa_iscsi#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'iSCSI', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - - self.drv.zfssa.create_replication_action.side_effect = ( - exception.VolumeBackendAPIException(data= - 'failed to create action')) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.migrate_volume, ctxt, volume, host) - - def test_migrate_volume_send_update_exception(self): - self._util_migrate_volume_exceptions() - - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' - - host = {'host': 'stack@zfssa_iscsi#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'iSCSI', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - - self.drv.zfssa.send_repl_update.side_effect = ( - exception.VolumeBackendAPIException(data='failed to send update')) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.migrate_volume, ctxt, volume, host) - - def test_migrate_volume_sever_repl_exception(self): - self._util_migrate_volume_exceptions() - - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2' - - host = {'host': 'stack@zfssa_iscsi#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'iSCSI', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - self.drv.tgt_zfssa.sever_replication.side_effect = ( - exception.VolumeBackendAPIException(data= - 'failed to sever replication')) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.migrate_volume, ctxt, volume, host) - - def test_create_delete_volume(self): - self.drv.zfssa.get_lun.return_value = {'guid': - '00000000000000000000000000000', - 'number': 0, - 'initiatorgroup': 'default', - 'size': 1, - 'nodestroy': False} - lcfg = self.configuration - self.drv.create_volume(self.test_vol) - self.drv.zfssa.create_lun.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - six.text_type(self.test_vol['size']) + 'g', - lcfg.zfssa_target_group, - mock.ANY) - self.drv.delete_volume(self.test_vol) - self.drv.zfssa.get_lun.assert_called_once_with(lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name']) - self.drv.zfssa.delete_lun.assert_called_once_with( - pool=lcfg.zfssa_pool, - project=lcfg.zfssa_project, - lun=self.test_vol['name']) - - def test_delete_volume_with_missing_lun(self): - self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound( - volume_id=self.test_vol['name']) - self.drv.delete_volume(self.test_vol) - self.drv.zfssa.delete_lun.assert_not_called() - - def test_delete_volume_backend_fail(self): - self.drv.zfssa.get_lun.side_effect = \ - exception.VolumeBackendAPIException(data='fakemsg') - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.delete_volume, - self.test_vol) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_check_origin') - def test_delete_cache_volume(self, _check_origin): - lcfg = self.configuration - lun2del = { - 'guid': '00000000000000000000000000000', - 'number': 0, - 'initiatorgroup': 'default', - 'size': 1, - 'nodestroy': False, - 'origin': { - 'project': lcfg.zfssa_cache_project, - 'snapshot': 'image-%s' % small_img['id'], - 'share': 'os-cache-vol-%s' % small_img['id'], - } - } - self.drv.zfssa.get_lun.return_value = lun2del - self.drv.delete_volume(self.test_vol) - self.drv._check_origin.assert_called_once_with(lun2del, - self.test_vol['name']) - - def test_check_origin(self): - lcfg = self.configuration - lun2del = { - 'guid': '00000000000000000000000000000', - 'number': 0, - 'initiatorgroup': 'default', - 'size': 1, - 'nodestroy': False, - 'origin': { - 'project': lcfg.zfssa_cache_project, - 'snapshot': 'image-%s' % small_img['id'], - 'share': 'os-cache-vol-%s' % small_img['id'], - } - } - cache = lun2del['origin'] - self.drv.zfssa.num_clones.return_value = 0 - self.drv._check_origin(lun2del, 'volname') - self.drv.zfssa.delete_lun.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache['share']) - - def test_create_delete_snapshot(self): - self.drv.zfssa.num_clones.return_value = 0 - lcfg = self.configuration - self.drv.create_snapshot(self.test_snap) - self.drv.zfssa.create_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name']) - self.drv.delete_snapshot(self.test_snap) - self.drv.zfssa.delete_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name']) - - def test_create_volume_from_snapshot(self): - lcfg = self.configuration - self.drv.zfssa.get_lun.return_value = self.test_vol - self.drv.create_snapshot(self.test_snap) - self.drv.zfssa.create_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name']) - self.drv.create_volume_from_snapshot(self.test_vol_snap, - self.test_snap) - specs = self.drv._get_voltype_specs(self.test_vol) - specs.update({'custom:cinder_managed': True}) - self.drv.zfssa.clone_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name'], - lcfg.zfssa_project, - self.test_vol_snap['name'], - specs) - - def test_create_larger_volume_from_snapshot(self): - lcfg = self.configuration - self.drv.zfssa.get_lun.return_value = self.test_vol - self.drv.create_snapshot(self.test_snap) - self.drv.zfssa.create_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name']) - - # use the larger test volume - self.drv.create_volume_from_snapshot(self.test_vol2, - self.test_snap) - specs = self.drv._get_voltype_specs(self.test_vol) - specs.update({'custom:cinder_managed': True}) - self.drv.zfssa.clone_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_snap['volume_name'], - self.test_snap['name'], - lcfg.zfssa_project, - self.test_vol2['name'], - specs) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info') - def test_volume_attach_detach(self, _get_provider_info): - lcfg = self.configuration - test_target_iqn = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd' - self.drv._get_provider_info.return_value = { - 'provider_location': '%s %s' % (lcfg.zfssa_target_portal, - test_target_iqn) - } - - def side_effect_get_initiator_initiatorgroup(arg): - return [{ - 'iqn.1-0.org.deb:01:d7': 'test-init-grp1', - 'iqn.1-0.org.deb:01:d9': 'test-init-grp2', - }[arg]] - - self.drv.zfssa.get_initiator_initiatorgroup.side_effect = ( - side_effect_get_initiator_initiatorgroup) - - initiator = 'iqn.1-0.org.deb:01:d7' - initiator_group = 'test-init-grp1' - lu_number = '246' - - self.drv.zfssa.get_lun.side_effect = iter([ - {'initiatorgroup': [], 'number': []}, - {'initiatorgroup': [initiator_group], 'number': [lu_number]}, - {'initiatorgroup': [initiator_group], 'number': [lu_number]}, - ]) - - connector = dict(initiator=initiator) - props = self.drv.initialize_connection(self.test_vol, connector) - self.drv._get_provider_info.assert_called_once_with() - self.assertEqual('iscsi', props['driver_volume_type']) - self.assertEqual(self.test_vol['id'], props['data']['volume_id']) - self.assertEqual(lcfg.zfssa_target_portal, - props['data']['target_portal']) - self.assertEqual(test_target_iqn, props['data']['target_iqn']) - self.assertEqual(int(lu_number), props['data']['target_lun']) - self.assertFalse(props['data']['target_discovered']) - self.drv.zfssa.set_lun_initiatorgroup.assert_called_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - [initiator_group]) - - self.drv.terminate_connection(self.test_vol, connector) - self.drv.zfssa.set_lun_initiatorgroup.assert_called_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - []) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info') - def test_volume_attach_detach_live_migration(self, _get_provider_info): - lcfg = self.configuration - test_target_iqn = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd' - self.drv._get_provider_info.return_value = { - 'provider_location': '%s %s' % (lcfg.zfssa_target_portal, - test_target_iqn) - } - - def side_effect_get_initiator_initiatorgroup(arg): - return [{ - 'iqn.1-0.org.deb:01:d7': 'test-init-grp1', - 'iqn.1-0.org.deb:01:d9': 'test-init-grp2', - }[arg]] - - self.drv.zfssa.get_initiator_initiatorgroup.side_effect = ( - side_effect_get_initiator_initiatorgroup) - - src_initiator = 'iqn.1-0.org.deb:01:d7' - src_initiator_group = 'test-init-grp1' - src_connector = dict(initiator=src_initiator) - src_lu_number = '123' - - dst_initiator = 'iqn.1-0.org.deb:01:d9' - dst_initiator_group = 'test-init-grp2' - dst_connector = dict(initiator=dst_initiator) - dst_lu_number = '456' - - # In the beginning, the LUN is already presented to the source - # node. During initialize_connection(), and at the beginning of - # terminate_connection(), it's presented to both nodes. - self.drv.zfssa.get_lun.side_effect = iter([ - {'initiatorgroup': [src_initiator_group], - 'number': [src_lu_number]}, - {'initiatorgroup': [dst_initiator_group, src_initiator_group], - 'number': [dst_lu_number, src_lu_number]}, - {'initiatorgroup': [dst_initiator_group, src_initiator_group], - 'number': [dst_lu_number, src_lu_number]}, - ]) - - # Before migration, the volume gets connected to the destination - # node (whilst still connected to the source node), so it should - # be presented to the initiator groups for both - props = self.drv.initialize_connection(self.test_vol, dst_connector) - self.drv.zfssa.set_lun_initiatorgroup.assert_called_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - [src_initiator_group, dst_initiator_group]) - - # LU number must be an int - - # https://bugs.launchpad.net/cinder/+bug/1538582 - # and must be the LU number for the destination node's - # initiatorgroup (where the connection was just initialized) - self.assertEqual(int(dst_lu_number), props['data']['target_lun']) - - # After migration, the volume gets detached from the source node - # so it should be present to only the destination node - self.drv.terminate_connection(self.test_vol, src_connector) - self.drv.zfssa.set_lun_initiatorgroup.assert_called_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - [dst_initiator_group]) - - def test_volume_attach_detach_negative(self): - self.drv.zfssa.get_initiator_initiatorgroup.return_value = [] - - connector = dict(initiator='iqn.1-0.org.deb:01:d7') - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.initialize_connection, - self.test_vol, - connector) - - def test_get_volume_stats(self): - self.drv.zfssa.get_project_stats.return_value = 2 * units.Gi,\ - 3 * units.Gi - self.drv.zfssa.get_pool_details.return_value = \ - {"profile": "mirror:log_stripe"} - lcfg = self.configuration - stats = self.drv.get_volume_stats(refresh=True) - self.drv.zfssa.get_project_stats.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project) - self.drv.zfssa.get_pool_details.assert_called_once_with( - lcfg.zfssa_pool) - self.assertEqual('Oracle', stats['vendor_name']) - self.assertEqual(self.configuration.volume_backend_name, - stats['volume_backend_name']) - self.assertEqual(self.drv.VERSION, stats['driver_version']) - self.assertEqual(self.drv.protocol, stats['storage_protocol']) - self.assertEqual(0, stats['reserved_percentage']) - self.assertFalse(stats['QoS_support']) - self.assertEqual(3, stats['total_capacity_gb']) - self.assertEqual(2, stats['free_capacity_gb']) - self.assertEqual('mirror:log_stripe', stats['zfssa_poolprofile']) - self.assertEqual('8k', stats['zfssa_volblocksize']) - self.assertEqual('false', stats['zfssa_sparse']) - self.assertEqual('off', stats['zfssa_compression']) - self.assertEqual('latency', stats['zfssa_logbias']) - - self.drv.zfssa.get_pool_details.return_value = {"profile": "raidz2"} - stats = self.drv.get_volume_stats(refresh=True) - self.assertEqual('raidz2', stats['zfssa_poolprofile']) - - def test_extend_volume(self): - lcfg = self.configuration - self.drv.extend_volume(self.test_vol, 3) - self.drv.zfssa.set_lun_props.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - self.test_vol['name'], - volsize= 3 * units.Gi) - - @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') - def test_get_voltype_specs(self, get_volume_type_extra_specs): - volume_type_id = mock.sentinel.volume_type_id - volume = {'volume_type_id': volume_type_id} - get_volume_type_extra_specs.return_value = { - 'zfssa:volblocksize': '128k', - 'zfssa:compression': 'gzip' - } - ret = self.drv._get_voltype_specs(volume) - self.assertEqual('128k', ret.get('volblocksize')) - self.assertEqual(self.configuration.zfssa_lun_sparse, - ret.get('sparse')) - self.assertEqual('gzip', ret.get('compression')) - self.assertEqual(self.configuration.zfssa_lun_logbias, - ret.get('logbias')) - - def fake_safe_get(self, value): - try: - val = getattr(self.configuration, value) - except AttributeError: - val = None - return val - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume') - def test_clone_image_negative(self, _verify_cache_volume): - # Disabling local cache feature: - self.configuration.zfssa_enable_local_cache = False - - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - small_img, - img_service)) - - self.configuration.zfssa_enable_local_cache = True - # Creating a volume smaller than image: - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - large_img, - img_service)) - - # Creating a volume equal as image: - eq_img = large_img.copy() - eq_img['virtual_size'] = self.test_vol['size'] * units.Gi - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - eq_img, - img_service)) - - # Exception raised in _verify_cache_image - self.drv._verify_cache_volume.side_effect = ( - exception.VolumeBackendAPIException('fakeerror')) - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - small_img, - img_service)) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_voltype_specs') - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume') - @mock.patch.object(iscsi.ZFSSAISCSIDriver, 'extend_volume') - def test_clone_image(self, _extend_vol, _verify_cache, _get_specs): - lcfg = self.configuration - cache_vol = 'volume-os-cache-vol-%s' % small_img['id'] - cache_snap = 'image-%s' % small_img['id'] - self.drv._get_voltype_specs.return_value = fakespecs.copy() - self.drv._verify_cache_volume.return_value = cache_vol, cache_snap - - model, cloned = self.drv.clone_image(fakecontext, self.test_vol2, - img_location, - small_img, - img_service) - specs = fakespecs - specs.update({'custom:cinder_managed': True}) - self.drv._verify_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - fakespecs, - small_img_props) - self.drv.zfssa.clone_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol, - cache_snap, - lcfg.zfssa_project, - self.test_vol2['name'], - specs) - - self.drv.extend_volume.assert_called_once_with(self.test_vol2, - self.test_vol2['size']) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume') - def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol): - vol_name = 'os-cache-vol-%s' % small_img['id'] - self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound( - volume_id=vol_name) - self.drv._verify_cache_volume(fakecontext, small_img, - img_service, fakespecs, small_img_props) - self.drv._create_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - fakespecs, - small_img_props) - - def test_verify_cache_vol_no_cache_snap(self): - snap_name = 'image-%s' % small_img['id'] - self.drv.zfssa.get_lun_snapshot.side_effect = ( - exception.SnapshotNotFound(snapshot_id=snap_name)) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv._verify_cache_volume, - fakecontext, - small_img, - img_service, - fakespecs, - small_img_props) - - def test_verify_cache_vol_stale_vol(self): - self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 5} - self.assertRaises(exception.VolumeBackendAPIException, - self.drv._verify_cache_volume, - fakecontext, - small_img, - img_service, - fakespecs, - small_img_props) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume') - def test_verify_cache_vol_updated_vol(self, _create_cache_vol): - lcfg = self.configuration - updated_vol = { - 'updated_at': date(3000, 12, 12), - 'image_id': 'updated_id', - } - cachevol_name = 'os-cache-vol-%s' % small_img['id'] - self.drv.zfssa.get_lun.return_value = updated_vol - self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 0} - self.drv._verify_cache_volume(fakecontext, small_img, - img_service, fakespecs, small_img_props) - self.drv.zfssa.delete_lun.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cachevol_name) - self.drv._create_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - fakespecs, - small_img_props) - - @mock.patch.object(driver.BaseVD, 'copy_image_to_volume') - def test_create_cache_volume(self, _copy_image): - lcfg = self.configuration - virtual_size = int(small_img['virtual_size']) - volsize = math.ceil(float(virtual_size) / units.Gi) - lunsize = "%sg" % six.text_type(int(volsize)) - volname = 'os-cache-vol-%s' % small_img['id'] - snapname = 'image-%s' % small_img['id'] - cachevol_props = { - 'cache_name': volname, - 'snap_name': snapname, - } - cachevol_props.update(small_img_props) - cache_vol = { - 'name': volname, - 'id': small_img['id'], - 'size': volsize, - } - lun_props = { - 'custom:image_id': small_img['id'], - 'custom:updated_at': ( - six.text_type(small_img['updated_at'].isoformat())), - } - lun_props.update(fakespecs) - - self.drv._create_cache_volume(fakecontext, - small_img, - img_service, - fakespecs, - cachevol_props) - - self.drv.zfssa.create_lun.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol['name'], - lunsize, - lcfg.zfssa_target_group, - lun_props) - _copy_image.assert_called_once_with(fakecontext, - cache_vol, - img_service, - small_img['id']) - self.drv.zfssa.create_snapshot.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol['name'], - snapname) - - def test_create_cache_vol_negative(self): - lcfg = self.configuration - volname = 'os-cache-vol-%s' % small_img['id'] - snapname = 'image-%s' % small_img['id'] - cachevol_props = { - 'cache_name': volname, - 'snap_name': snapname, - } - cachevol_props.update(small_img) - - self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound( - volume_id=volname) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv._create_cache_volume, - fakecontext, - small_img, - img_service, - fakespecs, - cachevol_props) - self.drv.zfssa.delete_lun.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - volname) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage') - def test_volume_manage(self, _get_existing_vol, _verify_volume_to_manage): - lcfg = self.configuration - lcfg.zfssa_manage_policy = 'loose' - test_vol = self.test_vol - self.drv._get_existing_vol.return_value = test_vol - self.drv._verify_volume_to_manage.return_value = None - self.drv.zfssa.set_lun_props.return_value = True - self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, - {'source-name': - 'volume-567'})) - self.drv._get_existing_vol.assert_called_once_with({'source-name': - 'volume-567'}) - self.drv._verify_volume_to_manage.assert_called_once_with(test_vol) - self.drv.zfssa.set_lun_props.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - test_vol['name'], - name='volume-123', - schema={"custom:cinder_managed": True}) - - # Case when zfssa_manage_policy is 'loose' and 'cinder_managed' is - # set to true. - test_vol.update({'cinder_managed': False}) - self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, - {'source-name': - 'volume-567'})) - - # Another case is when the zfssa_manage_policy is set to 'strict' - lcfg.zfssa_manage_policy = 'strict' - test_vol.update({'cinder_managed': False}) - self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'}, - {'source-name': - 'volume-567'})) - - def test_volume_manage_negative(self): - lcfg = self.configuration - lcfg.zfssa_manage_policy = 'strict' - test_vol = self.test_vol - - if 'cinder_managed' in test_vol: - del test_vol['cinder_managed'] - - self.drv.zfssa.get_lun.return_value = test_vol - self.assertRaises(exception.InvalidInput, - self.drv.manage_existing, {'name': 'cindervol'}, - {'source-name': 'volume-567'}) - - test_vol.update({'cinder_managed': True}) - self.drv.zfssa.get_lun.return_value = test_vol - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.drv.manage_existing, {'name': 'cindervol'}, - {'source-name': 'volume-567'}) - - test_vol.update({'cinder_managed': False}) - self.drv.zfssa.get_lun.return_value = test_vol - self.assertRaises(exception.ManageExistingInvalidReference, - self.drv.manage_existing, {'name': 'cindervol'}, - {'source-id': 'volume-567'}) - - lcfg.zfssa_manage_policy = 'loose' - self.assertRaises(exception.ManageExistingInvalidReference, - self.drv.manage_existing, {'name': 'cindervol'}, - {'source-id': 'volume-567'}) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage') - def test_volume_manage_negative_api_exception(self, - _verify_volume_to_manage): - lcfg = self.configuration - lcfg.zfssa_manage_policy = 'loose' - self.drv.zfssa.get_lun.return_value = self.test_vol - self.drv._verify_volume_to_manage.return_value = None - self.drv.zfssa.set_lun_props.side_effect = \ - exception.VolumeBackendAPIException(data='fake exception') - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.manage_existing, {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - def test_volume_unmanage(self): - lcfg = self.configuration - self.drv.zfssa.set_lun_props.return_value = True - self.assertIsNone(self.drv.unmanage({'name': 'volume-123'})) - self.drv.zfssa.set_lun_props.assert_called_once_with( - lcfg.zfssa_pool, - lcfg.zfssa_project, - 'volume-123', - name='unmanaged-volume-123', - schema={"custom:cinder_managed": False}) - - def test_volume_unmanage_negative(self): - self.drv.zfssa.set_lun_props.side_effect = \ - exception.VolumeBackendAPIException(data='fake exception') - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.unmanage, {'name': 'volume-123'}) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') - def test_manage_existing_get_size(self, _get_existing_vol): - test_vol = self.test_vol - test_vol['size'] = 3 * units.Gi - self.drv._get_existing_vol.return_value = test_vol - self.assertEqual(3, self.drv.manage_existing_get_size( - {'name': 'volume-123'}, - {'source-name': 'volume-567'})) - - @mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol') - def test_manage_existing_get_size_negative(self, _get_existing_vol): - self.drv._get_existing_vol.side_effect = \ - exception.VolumeNotFound(volume_id='123') - self.assertRaises(exception.VolumeNotFound, - self.drv.manage_existing_get_size, - {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - -class TestZFSSANFSDriver(test.TestCase): - - test_vol = { - 'name': 'test-vol', - 'id': '1', - 'size': 3, - 'provider_location': - 'fakelocation', - } - - test_snap = { - 'name': 'cindersnap', - 'volume_name': test_vol['name'], - 'volume_size': test_vol['size'] - } - - test_vol_snap = { - 'name': 'cindersnapvol', - 'size': test_vol['size'] - } - - def __init__(self, method): - super(TestZFSSANFSDriver, self).__init__(method) - - @mock.patch.object(zfssanfs, 'factory_zfssa') - def setUp(self, _factory_zfssa): - super(TestZFSSANFSDriver, self).setUp() - self._create_fake_config() - _factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSANfsApi) - self.drv = zfssanfs.ZFSSANFSDriver(configuration=self.configuration) - self.drv._execute = fake_utils.fake_execute - self.drv.do_setup({}) - self.drv.mount_path = 'fake_mount_path' - self.context = context.get_admin_context() - - def _create_fake_config(self): - self.configuration = mock.Mock(spec=conf.Configuration) - self.configuration.reserved_percentage = 0 - self.configuration.max_over_subscription_ratio = 20.0 - self.configuration.san_ip = '1.1.1.1' - self.configuration.san_login = 'user' - self.configuration.san_password = 'passwd' - self.configuration.zfssa_data_ip = '2.2.2.2' - self.configuration.zfssa_https_port = '443' - self.configuration.zfssa_nfs_pool = 'pool' - self.configuration.zfssa_nfs_project = 'nfs_project' - self.configuration.zfssa_nfs_share = 'nfs_share' - self.configuration.zfssa_nfs_share_logbias = nfs_logbias - self.configuration.zfssa_nfs_share_compression = nfs_compression - self.configuration.zfssa_nfs_mount_options = '' - self.configuration.zfssa_rest_timeout = '30' - self.configuration.zfssa_enable_local_cache = True - self.configuration.zfssa_cache_directory = zfssa_cache_dir - self.configuration.nfs_sparsed_volumes = 'true' - self.configuration.nfs_mount_point_base = '$state_path/mnt' - self.configuration.nfs_mount_options = None - self.configuration.zfssa_manage_policy = 'strict' - - def test_setup_nfs_client(self): - mock_execute = self.mock_object(self.drv, '_execute', - side_effect= OSError(errno.ENOENT, - 'No such file or ' - 'directory.')) - - self.assertRaises(exception.NfsException, self.drv.do_setup, - self.context) - mock_execute.assert_has_calls( - [mock.call('mount.nfs', - check_exit_code=False, - run_as_root=True), - mock.call('/usr/sbin/mount', - check_exit_code=False, - run_as_root=True)]) - - def test_migrate_volume(self): - self.drv.zfssa.get_asn.return_value = ( - '9a2b5a0f-e3af-6d14-9578-8825f229dc89') - volume = self.test_vol - volume.update({'host': 'fake_host', - 'status': 'available', - 'name': 'vol-1', - 'source_volid': self.test_vol['id']}) - - loc_info = '9a2b5a0f-e3af-6d14-9578-8825f229dc89:nfs_share' - - host = {'host': 'stack@zfssa_nfs#fake_zfssa', - 'capabilities': {'vendor_name': 'Oracle', - 'storage_protocol': 'nfs', - 'location_info': loc_info}} - ctxt = context.get_admin_context() - - # Test Normal case - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((True, None), result) - - # Test when volume status is not available - volume['status'] = 'in-use' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - volume['status'] = 'available' - - # Test when Vendor is not Oracle - host['capabilities']['vendor_name'] = 'elcarO' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['vendor_name'] = 'Oracle' - - # Test when storage protocol is not iSCSI - host['capabilities']['storage_protocol'] = 'not_nfs' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['storage_protocol'] = 'nfs' - - # Test for exceptions - host['capabilities']['location_info'] = '' - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - host['capabilities']['location_info'] = loc_info - - # Test case when source and target asn dont match - invalid_loc_info = ( - 'fake_asn*https://2.2.2.2:/shares/export/nfs_share*nfs_share') - host['capabilities']['location_info'] = invalid_loc_info - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - - # Test case when source and target shares names are different - invalid_loc_info = ( - '9a2b5a0f-e3af-6d14-9578-8825f229dc89*' + - 'https://tgt:/shares/export/nfs_share*nfs_share_1') - host['capabilities']['location_info'] = invalid_loc_info - result = self.drv.migrate_volume(ctxt, volume, host) - self.assertEqual((False, None), result) - - def test_create_delete_snapshot(self): - lcfg = self.configuration - self.drv.create_snapshot(self.test_snap) - self.drv.zfssa.create_snapshot.assert_called_once_with( - lcfg.zfssa_nfs_pool, - lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share, - mock.ANY) - self.drv.zfssa.create_snapshot_of_volume_file.assert_called_once_with( - src_file=mock.ANY, - dst_file=self.test_snap['name']) - self.drv.delete_snapshot(self.test_snap) - self.drv.zfssa.delete_snapshot_of_volume_file.assert_called_with( - src_file=self.test_snap['name']) - - def test_create_volume_from_snapshot(self): - self.drv.create_snapshot(self.test_snap) - with mock.patch.object(self.drv, '_ensure_shares_mounted'): - self.drv.create_volume_from_snapshot(self.test_vol_snap, - self.test_snap, - method='COPY') - - self.drv.zfssa.create_volume_from_snapshot_file.\ - assert_called_once_with(src_file=self.test_snap['name'], - dst_file=self.test_vol_snap['name'], - method='COPY') - - def test_get_volume_stats(self): - lcfg = self.configuration - self.drv._mounted_shares = ['nfs_share'] - with mock.patch.object(self.drv, '_ensure_shares_mounted'): - with mock.patch.object(self.drv, '_get_share_capacity_info') as \ - mock_get_share_capacity_info: - mock_get_share_capacity_info.return_value = (1073741824, - 9663676416) - self.drv.zfssa.get_pool_details.return_value = \ - {"profile": "mirror:log_stripe"} - self.drv.zfssa.get_share.return_value = {"compression": "lzjb", - "encryption": "off", - "logbias": "latency"} - stats = self.drv.get_volume_stats(refresh=True) - self.drv.zfssa.get_pool_details.assert_called_once_with( - lcfg.zfssa_nfs_pool) - self.drv.zfssa.get_share.assert_called_with( - lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share) - - self.assertEqual(1, stats['free_capacity_gb']) - self.assertEqual(10, stats['total_capacity_gb']) - self.assertEqual('mirror:log_stripe', - stats['zfssa_poolprofile']) - self.assertEqual('lzjb', stats['zfssa_compression']) - self.assertEqual('true', stats['zfssa_sparse']) - self.assertEqual('off', stats['zfssa_encryption']) - self.assertEqual('latency', stats['zfssa_logbias']) - - self.drv.zfssa.get_pool_details.return_value = \ - {"profile": "mirror3"} - stats = self.drv.get_volume_stats(refresh=True) - self.assertEqual('mirror3', stats['zfssa_poolprofile']) - - def tearDown(self): - super(TestZFSSANFSDriver, self).tearDown() - - @mock.patch.object(nfsdriver.NfsDriver, 'delete_volume') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_check_origin') - def test_delete_volume(self, _check_origin, _delete_vol): - self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect - test_vol = zfssanfs.Volume() - test_vol._name_id = small_img['id'] - test_vol.size = 3 - test_vol.provider_location = 'fakelocation' - - self.drv.delete_volume(test_vol) - _delete_vol.assert_called_once_with(test_vol) - self.drv._check_origin.assert_called_once_with(img_props_nfs['name']) - - def _get_volume_side_effect(self, *args, **kwargs): - lcfg = self.configuration - volname = six.text_type(args[0]) - if volname.startswith(lcfg.zfssa_cache_directory): - return {'numclones': 0} - else: - return {'origin': img_props_nfs['name']} - - def test_check_origin(self): - self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect - self.drv._check_origin(img_props_nfs['name']) - self.drv.zfssa.delete_file.assert_called_once_with( - img_props_nfs['name']) - - @mock.patch.object(image_utils, 'qemu_img_info') - @mock.patch.object(image_utils.TemporaryImages, 'fetch') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume') - def test_clone_image_negative(self, _create_clone, _verify_cache_volume, - _fetch, _info): - _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) - _info.return_value = ImgInfo(small_img['virtual_size']) - - # Disabling local cache feature: - self.configuration.zfssa_enable_local_cache = False - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - small_img, - img_service)) - - self.configuration.zfssa_enable_local_cache = True - - # Creating a volume smaller than image: - _info.return_value = ImgInfo(large_img['virtual_size']) - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - large_img, - img_service)) - - # Exception raised in _verify_cache_image - _info.return_value = ImgInfo(small_img['virtual_size']) - self.drv._verify_cache_volume.side_effect = ( - exception.VolumeBackendAPIException('fakeerror')) - self.assertEqual((None, False), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - small_img, - img_service)) - - @mock.patch.object(image_utils, 'qemu_img_info') - @mock.patch.object(image_utils.TemporaryImages, 'fetch') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume') - @mock.patch.object(zfssanfs.ZFSSANFSDriver, 'extend_volume') - def test_clone_image(self, _extend_vol, _verify_cache, _create_clone, - _fetch, _info): - _fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) - _info.return_value = ImgInfo(small_img['virtual_size']) - self.drv._verify_cache_volume.return_value = \ - 'volume-' + img_props_nfs['id'] - prov_loc = {'provider_location': self.test_vol['provider_location']} - self.drv.create_cloned_volume.return_value = prov_loc - self.assertEqual((prov_loc, True), - self.drv.clone_image(fakecontext, self.test_vol, - img_location, - small_img, - img_service)) - img_props = {} - img_props['id'] = img_props_nfs['image_id'] - img_props['image_id'] = img_props_nfs['image_id'] - img_props['updated_at'] = img_props_nfs['updated_at'] - img_props['size'] = img_props_nfs['size'] - - self.drv._verify_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - img_props) - cache_vol = { - 'name': self.drv._verify_cache_volume.return_value, - 'size': 3, - 'id': small_img['id'], - } - self.drv.create_cloned_volume.assert_called_once_with(self.test_vol, - cache_vol) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume') - def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol): - self.drv.zfssa.get_volume.side_effect = exception.VolumeNotFound( - volume_id=img_props_nfs['name']) - self.drv._verify_cache_volume(fakecontext, small_img, - img_service, img_props_nfs) - self.drv._create_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - img_props_nfs) - - def test_verify_cache_vol_stale_vol(self): - self.drv.zfssa.get_volume.return_value = { - 'numclones': 5, - 'updated_at': small_img['updated_at'].isoformat(), - 'image_id': 'wrong_id', - } - self.assertRaises(exception.VolumeBackendAPIException, - self.drv._verify_cache_volume, - fakecontext, - small_img, - img_service, - img_props_nfs) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume') - @mock.patch.object(nfsdriver.NfsDriver, 'delete_volume') - def test_verify_cache_vol_updated_vol(self, _del_vol, _create_cache_vol): - updated_vol = { - 'updated_at': date(3000, 12, 12), - 'image_id': 'updated_id', - 'numclones': 0, - } - self.drv.zfssa.get_volume.return_value = updated_vol - self.drv._verify_cache_volume(fakecontext, small_img, - img_service, img_props_nfs) - - self.drv._create_cache_volume.assert_called_once_with(fakecontext, - small_img, - img_service, - img_props_nfs) - - @mock.patch.object(remotefs.RemoteFSDriver, 'copy_image_to_volume') - @mock.patch.object(nfsdriver.NfsDriver, 'create_volume') - def test_create_cache_volume(self, _create_vol, _copy_image): - self.drv.zfssa.webdavclient = mock.Mock() - self.drv._create_cache_volume(fakecontext, - small_img, - img_service, - img_props_nfs) - - self.assertEqual(1, _create_vol.call_count) - self.assertEqual(1, _copy_image.call_count) - - def test_create_cache_vol_negative(self): - self.drv.zfssa.get_lun.side_effect = ( - exception.VolumeBackendAPIException) - self.assertRaises(exception.VolumeBackendAPIException, - self.drv._create_cache_volume, - fakecontext, - small_img, - img_service, - img_props_nfs) - self.drv.zfssa.delete_file.assert_called_once_with( - 'os-cinder-cache/volume-' + img_props_nfs['id']) - - def test_volume_manage(self): - lcfg = self.configuration - lcfg.zfssa_manage_policy = 'loose' - test_vol = self.test_vol - - self.drv.zfssa.get_volume.return_value = test_vol - self.drv.zfssa.rename_volume.return_value = None - self.drv.zfssa.set_file_props.return_value = None - self.drv.mount_path = lcfg.zfssa_data_ip + ':' + 'fake_mountpoint' - self.assertEqual({'provider_location': self.drv.mount_path}, - self.drv.manage_existing({'name': 'volume-123'}, - {'source-name': - 'volume-567'})) - - self.drv.zfssa.get_volume.assert_called_once_with('volume-567') - self.drv.zfssa.rename_volume.assert_called_once_with('volume-567', - 'volume-123') - self.drv.zfssa.set_file_props.assert_called_once_with( - 'volume-123', {'cinder_managed': 'True'}) - # Test when 'zfssa_manage_policy' is set to 'strict'. - lcfg.zfssa_manage_policy = 'strict' - test_vol.update({'cinder_managed': 'False'}) - self.drv.zfssa.get_volume.return_value = test_vol - self.assertEqual({'provider_location': self.drv.mount_path}, - self.drv.manage_existing({'name': 'volume-123'}, - {'source-name': - 'volume-567'})) - - def test_volume_manage_negative_no_source_name(self): - self.assertRaises(exception.ManageExistingInvalidReference, - self.drv.manage_existing, - {'name': 'volume-123'}, - {'source-id': 'volume-567'}) - - def test_volume_manage_negative_backend_exception(self): - self.drv.zfssa.get_volume.side_effect = \ - exception.VolumeNotFound(volume_id='volume-567') - self.assertRaises(exception.InvalidInput, - self.drv.manage_existing, - {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - def test_volume_manage_negative_verify_fail(self): - lcfg = self.configuration - lcfg.zfssa_manage_policy = 'strict' - test_vol = self.test_vol - test_vol['cinder_managed'] = '' - - self.drv.zfssa.get_volume.return_value = test_vol - self.assertRaises(exception.InvalidInput, - self.drv.manage_existing, - {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - test_vol.update({'cinder_managed': 'True'}) - self.drv.zfssa.get_volume.return_value = test_vol - self.assertRaises(exception.ManageExistingAlreadyManaged, - self.drv.manage_existing, - {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage') - def test_volume_manage_negative_rename_fail(self, - _verify_volume_to_manage): - test_vol = self.test_vol - test_vol.update({'cinder_managed': 'False'}) - self.drv.zfssa.get_volume.return_value = test_vol - self.drv._verify_volume_to_manage.return_value = None - self.drv.zfssa.rename_volume.side_effect = \ - exception.VolumeBackendAPIException(data="fake exception") - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.manage_existing, {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage') - def test_volume_manage_negative_set_prop_fail(self, - _verify_volume_to_manage): - test_vol = self.test_vol - test_vol.update({'cinder_managed': 'False'}) - self.drv.zfssa.get_volume.return_value = test_vol - self.drv._verify_volume_to_manage.return_value = None - self.drv.zfssa.rename_volume.return_value = None - self.drv.zfssa.set_file_props.side_effect = \ - exception.VolumeBackendAPIException(data="fake exception") - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.manage_existing, {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - def test_volume_unmanage(self): - test_vol = self.test_vol - test_vol.update({'cinder_managed': 'True'}) - self.drv.zfssa.rename_volume.return_value = None - self.drv.zfssa.set_file_props.return_value = None - self.assertIsNone(self.drv.unmanage(test_vol)) - new_vol_name = 'unmanaged-' + test_vol['name'] - self.drv.zfssa.rename_volume.assert_called_once_with(test_vol['name'], - new_vol_name) - self.drv.zfssa.set_file_props.assert_called_once_with( - new_vol_name, {'cinder_managed': 'False'}) - - def test_volume_unmanage_negative_rename_fail(self): - test_vol = self.test_vol - test_vol.update({'cinder_managed': 'True'}) - self.drv.zfssa.rename_volume.side_effect = \ - exception.VolumeBackendAPIException(data="fake exception") - self.drv.zfssa.set_file_props.return_value = None - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.unmanage, test_vol) - - def test_volume_unmanage_negative_set_prop_fail(self): - test_vol = self.test_vol - test_vol.update({'cinder_managed': 'True'}) - self.drv.zfssa.rename_volume.return_value = None - self.drv.zfssa.set_file_props.side_effect = \ - exception.VolumeBackendAPIException(data="fake exception") - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.unmanage, test_vol) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share') - def test_manage_existing_get_size(self, _get_mount_point_for_share): - self.drv._get_mount_point_for_share.return_value = \ - '/fake/mnt/fake_share/' - self.drv._mounted_shares = [] - self.drv._mounted_shares.append('fake_share') - file = mock.Mock(st_size=123 * units.Gi) - with mock.patch('os.path.isfile', return_value=True): - with mock.patch('os.stat', return_value=file): - self.assertEqual(float(file.st_size / units.Gi), - self.drv.manage_existing_get_size( - {'name': 'volume-123'}, - {'source-name': 'volume-567'})) - - @mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share') - def test_manage_existing_get_size_negative(self, - _get_mount_point_for_share): - self.drv._get_mount_point_for_share.return_value = \ - '/fake/mnt/fake_share/' - self.drv._mounted_shares = [] - self.drv._mounted_shares.append('fake_share') - with mock.patch('os.path.isfile', return_value=True): - with mock.patch('os.stat', side_effect=OSError): - self.assertRaises(exception.VolumeBackendAPIException, - self.drv.manage_existing_get_size, - {'name': 'volume-123'}, - {'source-name': 'volume-567'}) - - -class TestZFSSAApi(test.TestCase): - - @mock.patch.object(rest, 'factory_restclient') - def setUp(self, _restclient): - super(TestZFSSAApi, self).setUp() - self.host = 'fakehost' - self.user = 'fakeuser' - self.url = None - self.pool = 'fakepool' - self.project = 'fakeproject' - self.vol = 'fakevol' - self.snap = 'fakesnapshot' - self.clone = 'fakeclone' - self.targetalias = 'fakealias' - _restclient.return_value = mock.MagicMock(spec=client.RestClientURL) - self.zfssa = rest.ZFSSAApi() - self.zfssa.set_host('fakehost') - self.pool_url = '/api/storage/v1/pools/' - - def _create_response(self, status, data='data'): - response = FakeResponse(status, data) - return response - - def test_create_project(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK) - self.zfssa.create_project(self.pool, self.project) - expected_svc = self.pool_url + self.pool + '/projects/' + self.project - self.zfssa.rclient.get.assert_called_with(expected_svc) - - def test_create_initiator(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK) - initiator = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd' - alias = 'init-group' - self.zfssa.create_initiator(initiator, alias) - self.zfssa.rclient.get.assert_called_with( - '/api/san/v1/iscsi/initiators/alias=' + alias) - - def test_create_target(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.NOT_FOUND) - ret_val = json.dumps( - {'target': {'iqn': - 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'}}) - self.zfssa.rclient.post.return_value = self._create_response( - client.Status.CREATED, ret_val) - alias = 'tgt-group' - self.zfssa.create_target(alias) - self.zfssa.rclient.post.assert_called_with('/api/san/v1/iscsi/targets', - {'alias': alias}) - - def test_get_target(self): - ret_val = json.dumps( - {'target': {'href': 'fake_href', - 'alias': 'tgt-group', - 'iqn': - 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd', - 'targetchapuser': '', - 'targetchapsecret': '', - 'interfaces': ['nge0']}}) - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK, ret_val) - ret = self.zfssa.get_target('tgt-group') - self.zfssa.rclient.get.assert_called_once_with( - '/api/san/v1/iscsi/targets/alias=tgt-group') - self.assertEqual('iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd', - ret) - - def test_verify_pool(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK) - self.zfssa.verify_pool(self.pool) - self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool) - - def test_verify_project(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.NOT_FOUND) - self.assertRaises(exception.VolumeBackendAPIException, - self.zfssa.verify_project, - self.pool, - self.project) - - def test_verify_initiator(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK) - self.zfssa.verify_initiator('iqn.1-0.org.deb:01:d7') - self.zfssa.rclient.get.assert_called_with( - '/api/san/v1/iscsi/initiators/iqn.1-0.org.deb:01:d7') - - def test_verify_target(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.BAD_REQUEST) - self.assertRaises(exception.VolumeBackendAPIException, - self.zfssa.verify_target, - self.targetalias) - - def test_create_delete_lun(self): - arg = json.dumps({'name': self.vol, - 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'}) - self.zfssa.rclient.post.return_value = self._create_response( - client.Status.CREATED, data=arg) - self.zfssa.create_lun(self.pool, self.project, self.vol, 1, 'tgt-grp', - None) - expected_arg = {'name': self.vol, - 'volsize': 1, - 'targetgroup': 'tgt-grp', - 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'} - self.zfssa.rclient.post.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + '/luns', - expected_arg) - - self.zfssa.rclient.delete.return_value = self._create_response( - client.Status.NO_CONTENT) - self.zfssa.delete_lun(self.pool, self.project, self.vol) - self.zfssa.rclient.delete.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + - '/luns/' + self.vol) - - def test_create_delete_snapshot(self): - self.zfssa.rclient.post.return_value = self._create_response( - client.Status.CREATED) - self.zfssa.create_snapshot(self.pool, - self.project, - self.vol, - self.snap) - expected_arg = {'name': self.snap} - self.zfssa.rclient.post.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + - '/luns/' + self.vol + '/snapshots', expected_arg) - - self.zfssa.rclient.delete.return_value = self._create_response( - client.Status.NO_CONTENT) - self.zfssa.delete_snapshot(self.pool, - self.project, - self.vol, - self.snap) - self.zfssa.rclient.delete.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + - '/luns/' + self.vol + '/snapshots/' + self.snap) - - def test_clone_snapshot(self): - self.zfssa.rclient.put.return_value = self._create_response( - client.Status.CREATED) - self.zfssa.clone_snapshot(self.pool, - self.project, - self.vol, - self.snap, - self.project, - self.clone, - None) - expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \ - self.project + '/luns/' + self.vol + '/snapshots/' + self.snap + \ - '/clone' - expected_arg = {'project': self.project, - 'share': self.clone, - 'nodestroy': True} - self.zfssa.rclient.put.assert_called_with(expected_svc, expected_arg) - - def test_get_project_stats(self): - ret_val = json.dumps({"project": {"name": self.project, - "space_available": 15754895360, - "space_total": 25754895360, - "dedup": False, - "logbias": "latency", - "encryption": "off"}}) - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK, ret_val) - self.zfssa.get_project_stats(self.pool, self.project) - expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \ - self.project - self.zfssa.rclient.get.assert_called_with(expected_svc) - - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.NOT_FOUND) - self.assertRaises(exception.VolumeBackendAPIException, - self.zfssa.get_project_stats, - self.pool, - self.project) - - -class TestZFSSANfsApi(test.TestCase): - - @mock.patch.object(rest, 'factory_restclient') - def setUp(self, _restclient): - super(TestZFSSANfsApi, self).setUp() - self.host = 'fakehost' - self.user = 'fakeuser' - self.url = None - self.pool = 'fakepool' - self.project = 'fakeproject' - self.share = 'fakeshare' - self.snap = 'fakesnapshot' - self.targetalias = 'fakealias' - _restclient.return_value = mock.MagicMock(spec=client.RestClientURL) - self.webdavclient = mock.MagicMock(spec=webdavclient.ZFSSAWebDAVClient) - self.zfssa = rest.ZFSSANfsApi() - self.zfssa.set_host('fakehost') - self.pool_url = '/api/storage/v1/pools/' - - def _create_response(self, status, data='data'): - response = FakeResponse(status, data) - return response - - def test_verify_share(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK) - self.zfssa.verify_share(self.pool, self.project, self.share) - self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool + - '/projects/' + self.project + - '/filesystems/' + self.share) - - def test_create_delete_snapshot(self): - self.zfssa.rclient.post.return_value = self._create_response( - client.Status.CREATED) - self.zfssa.create_snapshot(self.pool, - self.project, - self.share, - self.snap) - expected_arg = {'name': self.snap} - self.zfssa.rclient.post.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + - '/filesystems/' + self.share + '/snapshots', expected_arg) - - self.zfssa.rclient.delete.return_value = self._create_response( - client.Status.NO_CONTENT) - self.zfssa.delete_snapshot(self.pool, - self.project, - self.share, - self.snap) - self.zfssa.rclient.delete.assert_called_with( - self.pool_url + self.pool + '/projects/' + self.project + - '/filesystems/' + self.share + '/snapshots/' + self.snap) - - def create_delete_snapshot_of_volume_file(self): - src_file = "fake_src_file" - dst_file = "fake_dst_file" - self.zfssa.create_snapshot_of_volume_file(src_file=src_file, - dst_file=dst_file) - self.zfssa.webdavclient.request.assert_called_once_with( - src_file=src_file, - dst_file=dst_file, - method='COPY') - self.zfssa.delete_snapshot_of_volume_file(src_file=src_file) - self.zfssa.webdavclient.request.assert_called_once_with( - src_file=src_file, method='DELETE') - - def test_get_share(self): - ret_val = json.dumps({'filesystem': 'test_fs'}) - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.OK, ret_val) - ret = self.zfssa.get_share(self.pool, self.project, self.share) - self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool + - '/projects/' + self.project + - '/filesystems/' + self.share) - self.assertEqual('test_fs', ret) - - def test_create_share(self): - self.zfssa.rclient.get.return_value = self._create_response( - client.Status.NOT_FOUND) - self.zfssa.rclient.post.return_value = self._create_response( - client.Status.BAD_REQUEST) - self.assertRaises(exception.VolumeBackendAPIException, - self.zfssa.create_share, - self.pool, - self.project, - self.share, - {}) - - @mock.patch.object(rest.ZFSSANfsApi, '_change_service_state') - @mock.patch.object(rest.ZFSSANfsApi, 'verify_service') - def test_enable_disable_modify_service(self, - verify_service, - _change_service_state): - self.zfssa.enable_service('http') - self.zfssa._change_service_state.assert_called_with( - 'http', state='enable') - self.zfssa.verify_service.assert_called_with('http') - - self.zfssa.disable_service('http') - self.zfssa._change_service_state.assert_called_with( - 'http', state='disable') - self.zfssa.verify_service.assert_called_with('http', status='offline') - - ret_val = json.dumps({'service': { - "href": "/api/service/v1/services/http", - "": "online", - "require_login": False, - "protocols": "http/https", - "listen_port": 81, - "https_port": 443}}) - self.zfssa.rclient.put.return_value = self._create_response( - client.Status.ACCEPTED, ret_val) - args = {'listen_port': 81} - self.zfssa.modify_service('http', args) - self.zfssa.rclient.put.called_with('/api/service/v1/services/http', - args) - - -class TestRestClientURL(test.TestCase): - def setUp(self): - super(TestRestClientURL, self).setUp() - self.timeout = 60 - self.url = '1.1.1.1' - self.client = client.RestClientURL(self.url, timeout=self.timeout) - - @mock.patch.object(client.RestClientURL, 'request') - def test_post(self, _request): - path = '/api/storage/v1/pools' - body = {'name': 'fakepool'} - self.client.post(path, body=body) - self.client.request.assert_called_with(path, 'POST', body) - - @mock.patch.object(client.RestClientURL, 'request') - def test_get(self, _request): - path = '/api/storage/v1/pools' - self.client.get(path) - self.client.request.assert_called_with(path, 'GET') - - @mock.patch.object(client.RestClientURL, 'request') - def test_put(self, _request): - path = '/api/storage/v1/pools' - body = {'name': 'fakepool'} - self.client.put(path, body=body) - self.client.request.assert_called_with(path, 'PUT', body) - - @mock.patch.object(client.RestClientURL, 'request') - def test_delete(self, _request): - path = '/api/storage/v1/pools' - self.client.delete(path) - self.client.request.assert_called_with(path, 'DELETE') - - @mock.patch.object(client.RestClientURL, 'request') - def test_head(self, _request): - path = '/api/storage/v1/pools' - self.client.head(path) - self.client.request.assert_called_with(path, 'HEAD') - - @mock.patch.object(client, 'RestResult') - @mock.patch.object(client.urllib.request, 'Request') - @mock.patch.object(client.urllib.request, 'urlopen') - def test_request(self, _urlopen, _Request, _RestResult): - path = '/api/storage/v1/pools' - _urlopen.return_value = mock.Mock() - self.client.request(path, mock.ANY) - _Request.assert_called_with(self.url + path, None, self.client.headers) - self.assertEqual(1, _urlopen.call_count) - _RestResult.assert_called_with(response=mock.ANY) - - @mock.patch.object(client, 'RestResult') - @mock.patch.object(client.urllib.request, 'Request') - @mock.patch.object(client.urllib.request, 'urlopen') - @mock.patch.object(client, 'ssl', new_callable=FakeSSL) - def test_ssl_with_context(self, _ssl, _urlopen, _Request, _RestResult): - """Test PEP476 certificate opt_out fix. """ - path = '/api/storage/v1/pools' - _urlopen.return_value = mock.Mock() - self.client.request(path, mock.ANY) - _urlopen.assert_called_once_with(mock.ANY, - timeout=self.timeout, - context='fakecontext') - - @mock.patch.object(client, 'RestResult') - @mock.patch.object(client.urllib.request, 'Request') - @mock.patch.object(client.urllib.request, 'urlopen') - @mock.patch.object(client, 'ssl', new_callable=object) - def test_ssl_no_context(self, _ssl, _urlopen, _Request, _RestResult): - """Verify the PEP476 fix backward compatibility. """ - path = '/api/storage/v1/pools' - _urlopen.return_value = mock.Mock() - self.client.request(path, mock.ANY) - _urlopen.assert_called_once_with(mock.ANY, timeout=self.timeout) diff --git a/cinder/tests/unit/volume/drivers/test_zte_ks.py b/cinder/tests/unit/volume/drivers/test_zte_ks.py deleted file mode 100644 index 557e7b077..000000000 --- a/cinder/tests/unit/volume/drivers/test_zte_ks.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright 2016 ZTE Corporation. All rights reserved -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Self test for ZTE Storage Driver platform. -""" -from oslo_config import cfg - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.volume.drivers.zte import zte_ks -from cinder.volume.drivers.zte import zte_pub - - -session_id = 'kfomqdnoetjcjlva' -volume_paras = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'size': 2, - 'volume_name': 'vol1', - 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', - 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'source_volid': None, - 'volume_metadata': [], - 'display_description': 'test volume', - 'volume_type_id': None} - -volume_clone = {'name': 'volume-ee317512-f6a6-4284-a94e-5f4ac8783169', - 'size': 4, - 'volume_name': 'vol1', - 'id': 'ee317512-f6a6-4284-a94e-5f4ac8783169', - 'volume_id': 'ee317512-f6a6-4284-a94e-5f4ac8783169', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'clone_vol1', - 'source_volid': None, - 'volume_metadata': [], - 'display_description': 'test clone volume', - 'volume_type_id': None} -snapvolume_paras = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'size': 2, - 'volume_size': 2, - 'volume_name': 'vol1', - 'id': '21ec7341-9256-497b-97d9-ef48edcf0635', - 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'source_volid': None, - 'volume_metadata': [], - 'display_description': 'test volume', - 'volume_type_id': None} -connector = {'ip': '10.0.0.0', - 'initiator': 'iqn.1993-08.org.debian:01:222'} -fcconnector = {'ip': '10.0.0.0', - 'wwpns': [1, 2, 3, 4, 5, 6, 7, 8]} -fake_opt = [ - cfg.StrOpt('fake_opt', default='fake', help='fake opts') -] -VolFlowLimitAttr_paras = {'sqwWriteFlowLimit': 0, - 'cVolName': 'OpenCos_5072124445952515861', - 'sqwTotalFlowLimit': 500, - 'sqwWriteIoCount': 0, - 'sqwTotalIoCount': 0, - 'sqwReadFlowLimit': 0, - 'sqwReadIoCount': 0} - -volume_name = 'OpenCos_8359669312515962256' -return_success = {'returncode': zte_pub.ZTE_SUCCESS, 'data': {}} -return_error = {'returncode': zte_pub.ZTE_ERR_LUNDEV_NOT_EXIST, 'data': {}} -return_port_error = ( - {'returncode': zte_pub.ZTE_ERR_PORT_EXIST_INOTHER, 'data': {}}) -return_host_error = ( - {'returncode': zte_pub.ZTE_ERR_HOST_EXIST_INOTHER, 'data': {}}) - -MAP_TO_RESPONSE = {} -signin_info = {'sessionID': session_id} -MAP_TO_RESPONSE['plat.session.signin'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': signin_info} -MAP_TO_RESPONSE['plat.session.heartbeat'] = return_success -pool_info = {'sdwState': 1, 'qwTotalCapacity': 1024560, - 'qwFreeCapacity': 102456} -MAP_TO_RESPONSE['GetPoolInfo'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': pool_info} -MAP_TO_RESPONSE['CreateVolOnPool'] = return_success -MAP_TO_RESPONSE['DelCvol'] = return_success -MAP_TO_RESPONSE['GetCvolNamesOnVol'] = { - 'returncode': zte_pub.ZTE_SUCCESS, - 'data': {'sdwCvolNum': 2, 'scCvolNames': [{'scCvolName': 'clone1'}, - {'scCvolName': 'clone2'}]}} -MAP_TO_RESPONSE['CreateSvol'] = return_success -MAP_TO_RESPONSE['DelSvol'] = return_success -MAP_TO_RESPONSE['ExpandVolOnPool'] = return_success -MAP_TO_RESPONSE['CreateCvol'] = return_success -cVolName_from_vol_name = "OpenCos_9fbc232bf71ee2fa8bd" -grp_info = {'sdwHostNum': 1, - 'tHostInfo': [{'ucHostName': 'host1'}], - 'sdwLunNum': 5, - 'cMapGrpName': 'group_cjf', - 'tLunInfo': [{'sdwLunState': 0, 'sdwBlockSize': 0, - 'sdwAccessAttr': 0, 'sdwLunId': 0, - 'cVolName': 'vol1'}, - {'sdwLunState': 0, 'sdwBlockSize': 0, - 'sdwAccessAttr': 0, 'sdwLunId': 1, - 'cVolName': volume_name}, - {'sdwLunState': 0, 'sdwBlockSize': 0, - 'sdwAccessAttr': 0, 'sdwLunId': 2, - 'cVolName': 'vol3'}, - {'sdwLunState': 0, 'sdwBlockSize': 0, - 'sdwAccessAttr': 0, 'sdwLunId': 3, - 'cVolName': cVolName_from_vol_name}, - {'sdwLunState': 0, 'sdwBlockSize': 0, - 'sdwAccessAttr': 0, 'sdwLunId': 5, - 'cVolName': 'vol4'}]} -MAP_TO_RESPONSE['GetMapGrpInfo'] = ( - {'returncode': zte_pub.ZTE_SUCCESS, 'data': grp_info}) -MAP_TO_RESPONSE['DelMapGrp'] = return_success -simple_grp_info = {'sdwMapGrpNum': 0, - 'tMapGrpSimpleInfo': [{'sdwHostNum': 0, - 'sdwLunNum': 0, - 'cMapGrpName': session_id}, - {'sdwHostNum': 1, - 'sdwLunNum': 1, - 'cMapGrpName': ''}]} -MAP_TO_RESPONSE['GetGrpSimpleInfoList'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': simple_grp_info} -luninfo = {'sdwLunId': 3} -MAP_TO_RESPONSE['AddVolToGrp'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': luninfo} -sys_info = {'cVendor': 'ZTE', 'cVersionName': 'V1.0', - 'storage_protocol': 'iSCSI'} -MAP_TO_RESPONSE['GetSysInfo'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': sys_info} -cfg_info = {'sdwDeviceNum': 4, - 'tSystemNetCfg': [ - {'udwCtrlId': 0, 'udwRoleType': 0, - 'udwPortType': 1, 'udwDeviceId': 123, - 'cIpAddr': '198.51.100.20'}, - {'udwCtrlId': 0, 'udwRoleType': 0, - 'udwPortType': 1, 'udwDeviceId': 123, - 'cIpAddr': '198.51.100.21'}, - {'udwCtrlId': 0, 'udwRoleType': 0, - 'udwPortType': 1, 'udwDeviceId': 123, - 'cIpAddr': '198.51.100.22'}, - {'udwCtrlId': 0, 'udwRoleType': 0, - 'udwPortType': 1, 'udwDeviceId': 123, - 'cIpAddr': '198.51.100.23'}]} -MAP_TO_RESPONSE['GetSystemNetCfg'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': cfg_info} -iscsi_target = { - 'tIscsiTargetInfo': [ - {'udwCtrlId': 0, - 'cTgtName': 'iqn.2099-01.cn.com.zte:usp.spr11-00:00:22:15'}, - {'udwCtrlId': 0, - 'cTgtName': 'iqn.2099-01.cn.com.zte:usp.spr11-00:00:22:25'}], - 'udwCtrlCount': 2} -MAP_TO_RESPONSE['GetIscsiTargetName'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': iscsi_target} -MAP_TO_RESPONSE['CreateMapGrp'] = return_success -grp_info_forsearch = {'cVolName': 'vol1', - 'sdwMapGrpNum': 1, - 'cMapGrpNames': ['grp1'], - 'sdwLunLocalId': [1]} -MAP_TO_RESPONSE['GetGrpNamesOfVol'] = ( - {'returncode': zte_pub.ZTE_SUCCESS, 'data': grp_info_forsearch}) -MAP_TO_RESPONSE['CreateHost'] = return_success -MAP_TO_RESPONSE['AddPortToHost'] = return_success -MAP_TO_RESPONSE['AddHostToGrp'] = return_success -MAP_TO_RESPONSE['DelVolFromGrp'] = return_success -MAP_TO_RESPONSE['DelHostFromGrp'] = return_success -host_info = {'sdwPortNum': 2, - 'tPort': [{'cPortName': 'port1'}, - {'cPortName': 'port2'}]} -MAP_TO_RESPONSE['GetHost'] = {'returncode': zte_pub.ZTE_SUCCESS, - 'data': host_info} -MAP_TO_RESPONSE['DelPortFromHost'] = return_success -MAP_TO_RESPONSE['DelHost'] = return_success - - -class FakeZteISCSIDriver(zte_ks.ZteISCSIDriver): - def __init__(self, configuration): - self.configuration = configuration - super(FakeZteISCSIDriver, self).__init__( - configuration=self.configuration) - self.result = zte_pub.ZTE_SUCCESS - self.test_flag = True - self.portexist_flag = False - self.portexistother_flag = False - self.hostexist_flag = False - self.hostexistother_flag = False - - def _call(self, sessionid='', method='', params=None): - return_data = return_success - - if method in MAP_TO_RESPONSE.keys(): - return_data = MAP_TO_RESPONSE[method] - - if not self.test_flag: - return_data = return_error - if self.portexistother_flag: - return_data = return_port_error - if self.hostexistother_flag: - return_data = return_host_error - return return_data - - def _check_conf_file(self): - pass - - def _get_iscsi_info(self): - iscsi_info = {'DefaultTargetIPs': ["198.51.100.20"]} - - return iscsi_info - - -class ZteBaseDriverTestCase(object): - def test_create_volume_success(self): - self.driver.test_flag = True - self.driver.create_volume(volume_paras) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_create_volume_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.create_volume, volume_paras) - - def test_delete_volume_success(self): - self.driver.test_flag = True - self.driver.delete_volume(volume_paras) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_delete_volume_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.delete_volume, volume_paras) - - def test_delete_cloned_volume_success(self): - self.driver.test_flag = True - vol = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'source_volid': '68a52c1e-ecbe-4f6c-954c-9f551347ff3f'} - self.driver.delete_volume(vol) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_delete_cloned_volume_fail(self): - self.driver.test_flag = False - vol = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'source_volid': '68a52c1e-ecbe-4f6c-954c-9f551347ff3f'} - self.assertRaises(exception.CinderException, - self.driver.delete_volume, vol) - - def test_create_snapshot_success(self): - self.driver.test_flag = True - snap_vol = {'name': 'snapshot-2b9b982a-8b56-46e3-9d4f-6392e8a72e6e', - 'volume_name': - 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'volume_size': 2} - self.driver.create_snapshot(snap_vol) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_create_snapshot_fail(self): - self.driver.test_flag = False - snap_vol = {'name': 'snapshot-2b9b982a-8b56-46e3-9d4f-6392e8a72e6e', - 'volume_name': - 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', - 'volume_size': 2} - self.assertRaises(exception.CinderException, - self.driver.create_snapshot, snap_vol) - - def test_delete_snapshot_success(self): - self.driver.test_flag = True - snap_vol = {'name': 'snapshot-2b9b982a-8b56-46e3-9d4f-6392e8a72e6e'} - self.driver.delete_snapshot(snap_vol) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_delete_snapshot_fail(self): - self.driver.test_flag = False - snap_vol = {'name': 'snapshot-2b9b982a-8b56-46e3-9d4f-6392e8a72e6e'} - self.assertRaises(exception.CinderException, - self.driver.delete_snapshot, snap_vol) - - def test_extend_volume_success(self): - self.driver.test_flag = True - self.driver.extend_volume(volume_paras, 4) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_extend_volume_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.extend_volume, volume_paras, 4) - - def test_create_cloned_volume_success(self): - self.driver.test_flag = True - self.driver.create_cloned_volume(volume_clone, volume_paras) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_create_cloned_volume_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - volume_clone, volume_paras) - - def test_create_volume_from_snapshot_success(self): - self.driver.test_flag = True - self.driver.create_volume_from_snapshot(volume_clone, snapvolume_paras) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_create_volume_from_snapshot_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.create_cloned_volume, - volume_clone, volume_paras) - - -class ZteISCSIDriverTestCase(ZteBaseDriverTestCase, test.TestCase): - """Test ZTE iSCSI volume driver.""" - - def __init__(self, *args, **kwargs): - super(ZteISCSIDriverTestCase, self).__init__(*args, **kwargs) - - def setUp(self): - super(ZteISCSIDriverTestCase, self).setUp() - configuration = conf.Configuration(None) - self.configuration = configuration - - self.configuration.zteControllerIP0 = '192.0.2.2' - self.configuration.zteLocalIP = '192.0.2.8' - self.configuration.zteUserName = 'root' - self.configuration.zteUserPassword = 'root' - self.configuration.zteChunkSize = 64 - self.configuration.zteAheadReadSize = 8 - self.configuration.zteCachePolicy = 65535 - self.configuration.zteSSDCacheSwitch = 0 - self.configuration.zteStoragePool = 'pool1,pool2,pool3' - self.configuration.ztePoolVolAllocPolicy = 0 - self.configuration.ztePoolVolMovePolicy = 0 - self.configuration.ztePoolVolIsThin = 0 - self.configuration.ztePoolVolInitAllocedCapacity = 0 - self.configuration.ztePoolVolAlarmThreshold = 0 - self.configuration.ztePoolVolAlarmStopAllocFlag = 0 - - self.driver = FakeZteISCSIDriver(configuration=self.configuration) - self.driver.do_setup({}) - - def test_get_volume_stats(self): - stats = self.driver.get_volume_stats(True) - self.assertEqual("ZTE", stats["vendor_name"]) - self.assertEqual("iSCSI", stats["storage_protocol"]) - self.assertEqual("V1.0", stats["driver_version"]) - self.assertLess(0, stats["total_capacity_gb"]) - - def test_initialize_connection_success(self): - self.driver.test_flag = True - data = self.driver.initialize_connection(volume_paras, connector) - properties = data['data'] - self.assertEqual("iscsi", data["driver_volume_type"]) - self.assertEqual('iqn.2099-01.cn.com.zte:usp.spr11-00:00:22:15', - properties["target_iqn"]) - self.assertEqual(3, properties["target_lun"]) - self.assertEqual('198.51.100.20:3260', properties["target_portal"]) - - def test_initialize_connection_portexist(self): - self.driver.portexist_flag = True - data = self.driver.initialize_connection(volume_paras, connector) - properties = data['data'] - self.assertEqual("iscsi", data["driver_volume_type"]) - self.assertEqual('iqn.2099-01.cn.com.zte:usp.spr11-00:00:22:15', - properties["target_iqn"]) - self.assertEqual(3, properties["target_lun"]) - self.assertEqual('198.51.100.20:3260', properties["target_portal"]) - - def test_initialize_connection_hostexist(self): - self.driver.hostexist_flag = True - data = self.driver.initialize_connection(volume_paras, connector) - - properties = data['data'] - self.assertEqual("iscsi", data["driver_volume_type"]) - self.assertEqual('iqn.2099-01.cn.com.zte:usp.spr11-00:00:22:15', - properties["target_iqn"]) - self.assertEqual(3, properties["target_lun"]) - self.assertEqual('198.51.100.20:3260', properties["target_portal"]) - - def test_initialize_connection_portexistother(self): - self.driver.portexistother_flag = True - self.assertRaises(exception.CinderException, - self.driver.initialize_connection, - volume_paras, connector) - - def test_initialize_connection_hostexistother(self): - self.driver.hostexistother_flag = True - self.assertRaises(exception.CinderException, - self.driver.initialize_connection, - volume_paras, connector) - - def test_initialize_connection_fail(self): - self.driver.test_flag = False - self.assertRaises(exception.CinderException, - self.driver.initialize_connection, - volume_paras, connector) - - def test_terminate_connection_success(self): - self.driver.test_flag = True - vol = {'name': 'volume-ee317512-f6a6-4284-a94e-5f4ac8783169'} - self.driver.terminate_connection(vol, connector) - self.assertEqual(zte_pub.ZTE_SUCCESS, self.driver.result) - - def test_terminate_connection_fail(self): - self.driver.test_flag = False - vol = {'name': 'volume-ee317512-f6a6-4284-a94e-5f4ac8783169'} - self.assertRaises(exception.CinderException, - self.driver.terminate_connection, vol, connector) diff --git a/cinder/tests/unit/volume/drivers/violin/__init__.py b/cinder/tests/unit/volume/drivers/violin/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/violin/fake_vmem_client.py b/cinder/tests/unit/volume/drivers/violin/fake_vmem_client.py deleted file mode 100644 index db42bfc05..000000000 --- a/cinder/tests/unit/volume/drivers/violin/fake_vmem_client.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2014 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Fake VMEM REST client for testing drivers. -""" - -import sys - -import mock - - -# The following gymnastics to fake an exception class globally is done because -# we want to globally model and make available certain exceptions. If we do -# not do this, then the real-driver's import will not see our fakes. -class NoMatchingObjectIdError(Exception): - pass - -error = mock.Mock() -error.NoMatchingObjectIdError = NoMatchingObjectIdError - -core = mock.Mock() -core.attach_mock(error, 'error') - -vmemclient = mock.Mock() -vmemclient.__version__ = "unknown" -vmemclient.attach_mock(core, 'core') - -sys.modules['vmemclient'] = vmemclient - -mock_client_conf = [ - 'basic', - 'basic.login', - 'basic.get_node_values', - 'basic.save_config', - 'lun', - 'lun.export_lun', - 'lun.unexport_lun', - 'snapshot', - 'snapshot.export_lun_snapshot', - 'snapshot.unexport_lun_snapshot', - 'iscsi', - 'iscsi.bind_ip_to_target', - 'iscsi.create_iscsi_target', - 'iscsi.delete_iscsi_target', - 'igroup', - 'client', - 'client.get_client_info', - 'client.create_client', - 'client.delete_client', - 'adapter', - 'adapter.get_fc_info', - 'pool', - 'utility', -] diff --git a/cinder/tests/unit/volume/drivers/violin/test_v7000_common.py b/cinder/tests/unit/volume/drivers/violin/test_v7000_common.py deleted file mode 100644 index 8f7110753..000000000 --- a/cinder/tests/unit/volume/drivers/violin/test_v7000_common.py +++ /dev/null @@ -1,1417 +0,0 @@ -# Copyright 2015 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for Violin Memory 7000 Series All-Flash Array Common Driver -""" -import ddt -import math -import mock -import six - -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.volume.drivers.violin \ - import fake_vmem_client as vmemclient -from cinder.volume import configuration as conf -from cinder.volume.drivers.violin import v7000_common -from cinder.volume import volume_types - - -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" -VOLUME = {"name": "volume-" + VOLUME_ID, - "id": VOLUME_ID, - "display_name": "fake_volume", - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": None, - } -SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" -SNAPSHOT = {"name": "snapshot-" + SNAPSHOT_ID, - "id": SNAPSHOT_ID, - "volume_id": VOLUME_ID, - "volume_name": "volume-" + VOLUME_ID, - "volume_size": 2, - "display_name": "fake_snapshot", - } -SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" -SRC_VOL = {"name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": "fake_src_vol", - "size": 2, - "host": "irrelevant", - "volume_type": None, - "volume_type_id": None, - } -INITIATOR_IQN = "iqn.1111-22.org.debian:11:222" -CONNECTOR = {"initiator": INITIATOR_IQN} -DEFAULT_DEDUP_POOL = {"storage_pool": 'PoolA', - "storage_pool_id": 99, - "dedup": True, - "thin": True, - } -DEFAULT_THIN_POOL = {"storage_pool": 'PoolA', - "storage_pool_id": 99, - "dedup": False, - "thin": True, - } -DEFAULT_THICK_POOL = {"storage_pool": 'PoolA', - "storage_pool_id": 99, - "dedup": False, - "thin": False, - } - -# Note: select superfluous fields are removed for brevity -STATS_STORAGE_POOL_RESPONSE = [({ - 'availsize_mb': 1572827, - 'category': 'Virtual Device', - 'name': 'dedup-pool', - 'object_id': '487d1940-c53f-55c3-b1d5-073af43f80fc', - 'size_mb': 2097124, - 'storage_pool_id': 1, - 'usedsize_mb': 524297}, - {'category': 'Virtual Device', - 'name': 'dedup-pool', - 'object_id': '487d1940-c53f-55c3-b1d5-073af43f80fc', - 'physicaldevices': [ - {'availsize_mb': 524281, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.003', - 'object_id': '260f30b0-0300-59b5-b7b9-54aa55704a12', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 0}, - {'availsize_mb': 524281, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.004', - 'object_id': '7b58eda2-69da-5aec-9e06-6607934efa93', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 0}, - {'availsize_mb': 0, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.001', - 'object_id': '69adbea1-2349-5df5-a04a-abd7f14868b2', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 524281}, - {'availsize_mb': 524265, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.002', - 'object_id': 'a14a0e36-8901-5987-95d8-aa574c6138a2', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 16}], - 'size_mb': 2097124, - 'storage_pool_id': 1, - 'total_physicaldevices': 4, - 'usedsize_mb': 524297}), - ({'availsize': 0, - 'availsize_mb': 0, - 'category': None, - 'name': 'thick_pool_13531mgb', - 'object_id': '20610abd-4c58-546c-8905-bf42fab9a11b', - 'size': 0, - 'size_mb': 0, - 'storage_pool_id': 3, - 'tag': '', - 'total_physicaldevices': 0, - 'usedsize': 0, - 'usedsize_mb': 0}, - {'category': None, - 'name': 'thick_pool_13531mgb', - 'object_id': '20610abd-4c58-546c-8905-bf42fab9a11b', - 'resource_type': ['All'], - 'size': 0, - 'size_mb': 0, - 'storage_pool_id': 3, - 'tag': [''], - 'total_physicaldevices': 0, - 'usedsize': 0, - 'usedsize_mb': 0}), - ({'availsize_mb': 627466, - 'category': 'Virtual Device', - 'name': 'StoragePool', - 'object_id': '1af66d9a-f62e-5b69-807b-892b087fa0b4', - 'size_mb': 21139267, - 'storage_pool_id': 7, - 'usedsize_mb': 20511801}, - {'category': 'Virtual Device', - 'name': 'StoragePool', - 'object_id': '1af66d9a-f62e-5b69-807b-892b087fa0b4', - 'physicaldevices': [ - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN02.000', - 'object_id': 'ecc775f1-1228-5131-8f68-4176001786ef', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN01.000', - 'object_id': '5c60812b-34d2-5473-b7bf-21e30ec70311', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN08.001', - 'object_id': 'eb6d06b7-8d6f-5d9d-b720-e86d8ad1beab', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN03.001', - 'object_id': '063aced7-1f8f-5e15-b36e-e9d34a2826fa', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN07.001', - 'object_id': 'ebf34594-2b92-51fe-a6a8-b6cf91f05b2b', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0A.000', - 'object_id': 'ff084188-b97f-5e30-9ff0-bc60e546ee06', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN06.001', - 'object_id': 'f9cbeadf-5524-5697-a3a6-667820e37639', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 167887, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN15.000', - 'object_id': 'aaacc124-26c9-519a-909a-a93d24f579a1', - 'owner': 'lab-host2', - 'size_mb': 167887, - 'type': 'Direct-Access', - 'usedsize_mb': 0}, - {'availsize_mb': 229276, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN09.001', - 'object_id': '30967a84-56a4-52a5-ac3f-b4f544257bbd', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 819293}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN04.001', - 'object_id': 'd997eb42-55d4-5e4c-b797-c68b748e7e1f', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN05.001', - 'object_id': '56ecf98c-f10b-5bb5-9d3b-5af6037dad73', - 'owner': 'lab-host1', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0B.000', - 'object_id': 'cfb6f61c-508d-5394-8257-78b1f9bcad3b', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0C.000', - 'object_id': '7b0bcb51-5c7d-5752-9e18-392057e534f0', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0D.000', - 'object_id': 'b785a3b1-6316-50c3-b2e0-6bb0739499c6', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0E.000', - 'object_id': '76b9d038-b757-515a-b962-439a4fd85fd5', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN0F.000', - 'object_id': '9591d24a-70c4-5e80-aead-4b788202c698', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN10.000', - 'object_id': '2bb09a2b-9063-595b-9d7a-7e5fad5016db', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN11.000', - 'object_id': 'b9ff58eb-5e6e-5c79-bf95-fae424492519', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN12.000', - 'object_id': '6abd4fd6-9841-5978-bfcb-5d398d1715b4', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}, - {'availsize_mb': 230303, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN13.000', - 'object_id': 'ffd5a4b7-0f50-5a71-bbba-57a348b96c68', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 818266}, - {'availsize_mb': 0, - 'connection_type': 'block', - 'name': 'BKSC:OTHDISK-MFCN14.000', - 'object_id': '52ffbbae-bdac-5194-ba6b-62ee17bfafce', - 'owner': 'lab-host2', - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize_mb': 1048569}], - 'size_mb': 21139267, - 'storage_pool_id': 7, - 'tag': [''], - 'total_physicaldevices': 21, - 'usedsize_mb': 20511801}), - ({'availsize_mb': 1048536, - 'category': 'Virtual Device', - 'name': 'thick-pool', - 'object_id': 'c1e0becc-3497-5d74-977a-1e5a79769576', - 'size_mb': 2097124, - 'storage_pool_id': 9, - 'usedsize_mb': 1048588}, - {'category': 'Virtual Device', - 'name': 'thick-pool', - 'object_id': 'c1e0becc-3497-5d74-977a-1e5a79769576', - 'physicaldevices': [ - {'availsize_mb': 524255, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.001', - 'object_id': 'a90c4a11-33af-5530-80ca-2360fa477781', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 26}, - {'availsize_mb': 0, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.002', - 'object_id': '0a625ec8-2e80-5086-9644-2ea8dd5c32ec', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 524281}, - {'availsize_mb': 0, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.004', - 'object_id': '7018670b-3a79-5bdc-9d02-2d85602f361a', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 524281}, - {'availsize_mb': 524281, - 'connection_type': 'fc', - 'name': 'VIOLIN:CONCERTO ARRAY.003', - 'object_id': 'd859d47b-ca65-5d9d-a1c0-e288bbf39f48', - 'owner': 'lab-host1', - 'size_mb': 524281, - 'type': 'Direct-Access', - 'usedsize_mb': 0}], - 'size_mb': 2097124, - 'storage_pool_id': 9, - 'total_physicaldevices': 4, - 'usedsize_mb': 1048588})] - - -@ddt.ddt -class V7000CommonTestCase(test.TestCase): - """Test case for Violin drivers.""" - def setUp(self): - super(V7000CommonTestCase, self).setUp() - self.conf = self.setup_configuration() - self.driver = v7000_common.V7000Common(self.conf) - self.driver.container = 'myContainer' - self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022' - self.stats = {} - - def setup_configuration(self): - config = mock.Mock(spec=conf.Configuration) - config.volume_backend_name = 'v7000_common' - config.san_ip = '1.1.1.1' - config.san_login = 'admin' - config.san_password = '' - config.san_thin_provision = False - config.san_is_local = False - config.gateway_mga = '2.2.2.2' - config.gateway_mgb = '3.3.3.3' - config.use_igroups = False - config.violin_request_timeout = 300 - config.container = 'myContainer' - config.violin_pool_allocation_method = 'random' - config.violin_dedup_only_pools = None - config.violin_dedup_capable_pools = None - return config - - @mock.patch('vmemclient.open') - def setup_mock_client(self, _m_client, m_conf=None): - """Create a fake backend communication factory. - - The xg-tools creates a Concerto connection object (for V7000 - devices) and returns it for use on a call to vmemclient.open(). - """ - # configure the concerto object mock with defaults - _m_concerto = mock.Mock(name='Concerto', - version='1.1.1', - spec=vmemclient.mock_client_conf) - - # if m_conf, clobber the defaults with it - if m_conf: - _m_concerto.configure_mock(**m_conf) - - # set calls to vmemclient.open() to return this mocked concerto object - _m_client.return_value = _m_concerto - - return _m_client - - def setup_mock_concerto(self, m_conf=None): - """Create a fake Concerto communication object.""" - _m_concerto = mock.Mock(name='Concerto', - version='1.1.1', - spec=vmemclient.mock_client_conf) - - if m_conf: - _m_concerto.configure_mock(**m_conf) - - return _m_concerto - - def test_check_for_setup_error(self): - """No setup errors are found.""" - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._is_supported_vmos_version = mock.Mock(return_value=True) - - result = self.driver.check_for_setup_error() - - self.driver._is_supported_vmos_version.assert_called_with( - self.driver.vmem_mg.version) - self.assertIsNone(result) - - def test_create_lun(self): - """Lun is successfully created.""" - response = {'success': True, 'msg': 'Create resource successfully.'} - size_in_mb = VOLUME['size'] * units.Ki - - conf = { - 'lun.create_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - result = self.driver._create_lun(VOLUME) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.create_lun, - 'Create resource successfully.', - VOLUME['id'], size_in_mb, False, False, size_in_mb, - storage_pool_id=99) - self.assertIsNone(result) - - def test_create_dedup_lun(self): - """Lun is successfully created.""" - vol = VOLUME.copy() - vol['size'] = 100 - vol['volume_type_id'] = '1' - - response = {'success': True, 'msg': 'Create resource successfully.'} - size_in_mb = vol['size'] * units.Ki - full_size_mb = size_in_mb - - conf = { - 'lun.create_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - - # simulate extra specs of {'thin': 'true', 'dedupe': 'true'} - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="True") - - self.driver._get_violin_extra_spec = mock.Mock( - return_value=None) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_DEDUP_POOL) - - result = self.driver._create_lun(vol) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.create_lun, - 'Create resource successfully.', - VOLUME['id'], size_in_mb / 10, True, True, full_size_mb, - storage_pool_id=99) - self.assertIsNone(result) - - def test_fail_extend_dedup_lun(self): - """Volume extend fails when new size would shrink the volume.""" - vol = VOLUME.copy() - vol['volume_type_id'] = '1' - - size_in_mb = vol['size'] * units.Ki - self.driver.vmem_mg = self.setup_mock_concerto() - type(self.driver.vmem_mg.utility).is_external_head = mock.PropertyMock( - return_value=False) - - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="True") - - failure = exception.VolumeDriverException - self.assertRaises(failure, self.driver._extend_lun, - vol, size_in_mb) - - def test_extend_dedup_lun_external_head(self): - """Volume extend fails when new size would shrink the volume.""" - vol = VOLUME.copy() - vol['volume_type_id'] = '1' - new_volume_size = 10 - - response = {'success': True, 'message': 'Expand resource successfully'} - conf = { - 'lun.extend_lun.return_value': response, - } - - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - type(self.driver.vmem_mg.utility).is_external_head = mock.PropertyMock( - return_value=False) - - change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki - self.driver._send_cmd = mock.Mock(return_value=response) - - result = self.driver._extend_lun(VOLUME, new_volume_size) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.extend_lun, - response['message'], VOLUME['id'], change_in_size_mb) - self.assertIsNone(result) - - def test_create_non_dedup_lun(self): - """Lun is successfully created.""" - vol = VOLUME.copy() - vol['size'] = 100 - vol['volume_type_id'] = '1' - - response = {'success': True, 'msg': 'Create resource successfully.'} - size_in_mb = vol['size'] * units.Ki - full_size_mb = size_in_mb - - conf = { - 'lun.create_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - - # simulate extra specs of {'thin': 'false', 'dedupe': 'false'} - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="False") - - self.driver._get_violin_extra_spec = mock.Mock( - return_value=None) - - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - result = self.driver._create_lun(vol) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.create_lun, - 'Create resource successfully.', - VOLUME['id'], size_in_mb, False, False, full_size_mb, - storage_pool_id=99) - self.assertIsNone(result) - - def test_create_lun_fails(self): - """Array returns error that the lun already exists.""" - response = {'success': False, - 'msg': 'Duplicate Virtual Device name. Error: 0x90010022'} - conf = { - 'lun.create_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - self.driver._send_cmd = mock.Mock(return_value=response) - - self.assertIsNone(self.driver._create_lun(VOLUME)) - - def test_create_lun_on_a_storage_pool(self): - """Lun is successfully created.""" - vol = VOLUME.copy() - vol['size'] = 100 - vol['volume_type_id'] = '1' - - response = {'success': True, 'msg': 'Create resource successfully.'} - size_in_mb = vol['size'] * units.Ki - full_size_mb = size_in_mb - - conf = { - 'lun.create_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="False") - - # simulates extra specs: {'storage_pool', 'StoragePool'} - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePool") - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - result = self.driver._create_lun(vol) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.create_lun, - 'Create resource successfully.', - VOLUME['id'], size_in_mb, False, False, full_size_mb, - storage_pool_id=99) - self.assertIsNone(result) - - def test_delete_lun(self): - """Lun is deleted successfully.""" - response = {'success': True, 'msg': 'Delete resource successfully'} - success_msgs = ['Delete resource successfully', ''] - - conf = { - 'lun.delete_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - self.driver._delete_lun_snapshot_bookkeeping = mock.Mock() - - result = self.driver._delete_lun(VOLUME) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.delete_lun, - success_msgs, VOLUME['id']) - self.driver._delete_lun_snapshot_bookkeeping.assert_called_with( - VOLUME['id']) - - self.assertIsNone(result) - - # TODO(vthirumalai): More delete lun failure cases to be added after - # collecting the possible responses from Concerto - - def test_extend_lun(self): - """Volume extend completes successfully.""" - new_volume_size = 10 - change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki - - response = {'success': True, 'message': 'Expand resource successfully'} - - conf = { - 'lun.extend_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - - result = self.driver._extend_lun(VOLUME, new_volume_size) - - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.lun.extend_lun, - response['message'], VOLUME['id'], change_in_size_mb) - self.assertIsNone(result) - - def test_extend_lun_new_size_is_too_small(self): - """Volume extend fails when new size would shrink the volume.""" - new_volume_size = 0 - change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki - - response = {'success': False, 'msg': 'Invalid size. Error: 0x0902000c'} - failure = exception.ViolinBackendErr - - conf = { - 'lun.resize_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) - - self.assertRaises(failure, self.driver._extend_lun, - VOLUME, change_in_size_mb) - - def test_create_volume_from_snapshot(self): - """Create a new cinder volume from a given snapshot of a lun.""" - object_id = '12345' - vdev_id = 11111 - lun_info_response = {'subType': 'THICK', - 'virtualDeviceID': vdev_id} - response = {'success': True, - 'object_id': object_id, - 'msg': 'Copy TimeMark successfully.'} - compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_snapshot_to_new_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._compress_snapshot_id = mock.Mock( - return_value=compressed_snap_id) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - self.driver._wait_for_lun_or_snap_copy = mock.Mock() - - result = self.driver._create_volume_from_snapshot(SNAPSHOT, VOLUME) - - self.driver.vmem_mg.lun.copy_snapshot_to_new_lun.assert_called_with( - source_lun=SNAPSHOT['volume_id'], - source_snapshot_comment=compressed_snap_id, - destination=VOLUME['id'], storage_pool_id=99) - self.driver._wait_for_lun_or_snap_copy.assert_called_with( - SNAPSHOT['volume_id'], dest_vdev_id=vdev_id) - - self.assertIsNone(result) - - def test_create_volume_from_snapshot_on_a_storage_pool(self): - """Create a new cinder volume from a given snapshot of a lun.""" - dest_vol = VOLUME.copy() - dest_vol['size'] = 100 - dest_vol['volume_type_id'] = '1' - object_id = '12345' - vdev_id = 11111 - lun_info_response = {'subType': 'THICK', - 'virtualDeviceID': vdev_id} - response = {'success': True, - 'object_id': object_id, - 'msg': 'Copy TimeMark successfully.'} - compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_snapshot_to_new_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._compress_snapshot_id = mock.Mock( - return_value=compressed_snap_id) - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePool") - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="False") - self.driver._wait_for_lun_or_snap_copy = mock.Mock() - - result = self.driver._create_volume_from_snapshot(SNAPSHOT, dest_vol) - - self.assertIsNone(result) - - def test_create_volume_from_snapshot_fails(self): - """Array returns error that the lun already exists.""" - vdev_id = 11111 - lun_info_response = {'subType': 'THICK', - 'virtualDeviceID': vdev_id} - response = {'success': False, - 'msg': 'Duplicate Virtual Device name. Error: 0x90010022'} - compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' - failure = exception.ViolinBackendErrExists - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_snapshot_to_new_lun.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._send_cmd = mock.Mock(return_value=response) - self.driver._compress_snapshot_id = mock.Mock( - return_value=compressed_snap_id) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) - - self.assertRaises(failure, self.driver._create_volume_from_snapshot, - SNAPSHOT, VOLUME) - - @ddt.data(2, 10) - def test_create_lun_from_lun_and_resize(self, size): - """lun full clone to new volume completes successfully.""" - larger_size_flag = False - dest_vol = VOLUME.copy() - if size > VOLUME['size']: - dest_vol['size'] = size - larger_size_flag = True - object_id = fake.OBJECT_ID - lun_info_response = {'subType': 'THICK'} - copy_response = {'success': True, - 'object_id': object_id, - 'msg': 'Copy Snapshot resource successfully'} - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_lun_to_new_lun.return_value': copy_response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._ensure_snapshot_resource_area = mock.Mock() - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - self.driver._wait_for_lun_or_snap_copy = mock.Mock() - self.driver._extend_lun = mock.Mock() - - result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol) - - self.driver._ensure_snapshot_resource_area.assert_called_with( - SRC_VOL['id']) - self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with( - source=SRC_VOL['id'], destination=VOLUME['id'], storage_pool_id=99) - self.driver._wait_for_lun_or_snap_copy.assert_called_with( - SRC_VOL['id'], dest_obj_id=object_id) - if larger_size_flag: - self.driver._extend_lun.assert_called_once_with( - dest_vol, dest_vol['size']) - else: - self.assertFalse(self.driver._extend_lun.called) - - self.assertIsNone(result) - - @ddt.data(2, 10) - def test_create_lun_from_lun_on_a_storage_pool_and_resize(self, size): - """lun full clone to new volume completes successfully.""" - larger_size_flag = False - dest_vol = VOLUME.copy() - if size > VOLUME['size']: - dest_vol['size'] = size - larger_size_flag = True - dest_vol['volume_type_id'] = fake.VOLUME_TYPE_ID - object_id = fake.OBJECT_ID - lun_info_response = {'subType': 'THICK'} - copy_response = {'success': True, - 'object_id': object_id, - 'msg': 'Copy Snapshot resource successfully'} - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_lun_to_new_lun.return_value': copy_response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._ensure_snapshot_resource_area = mock.Mock() - self.driver._wait_for_lun_or_snap_copy = mock.Mock() - self.driver._extend_lun = mock.Mock() - - # simulates extra specs: {'storage_pool', 'StoragePool'} - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePool") - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THIN_POOL) - - self.driver._get_volume_type_extra_spec = mock.Mock( - side_effect=["True", "False"]) - - result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol) - - self.driver._ensure_snapshot_resource_area.assert_called_with( - SRC_VOL['id']) - self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with( - source=SRC_VOL['id'], destination=dest_vol['id'], - storage_pool_id=99) - self.driver._wait_for_lun_or_snap_copy.assert_called_with( - SRC_VOL['id'], dest_obj_id=object_id) - if larger_size_flag: - self.driver._extend_lun.assert_called_once_with( - dest_vol, dest_vol['size']) - else: - self.assertFalse(self.driver._extend_lun.called) - - self.assertIsNone(result) - - def test_create_lun_from_lun_fails(self): - """lun full clone to new volume fails correctly.""" - failure = exception.ViolinBackendErr - lun_info_response = { - 'subType': 'THICK', - } - copy_response = { - 'success': False, - 'msg': 'Snapshot Resource is not created ' + - 'for this virtual device. Error: 0x0901008c', - } - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - 'lun.copy_lun_to_new_lun.return_value': copy_response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._ensure_snapshot_resource_area = mock.Mock() - self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail')) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - self.assertRaises(failure, self.driver._create_lun_from_lun, - SRC_VOL, VOLUME) - - def test_create_lun_from_thin_lun_fails(self): - """lun full clone of thin lun is not supported.""" - failure = exception.ViolinBackendErr - lun_info_response = { - 'subType': 'THIN', - } - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - self.assertRaises(failure, self.driver._create_lun_from_lun, - SRC_VOL, VOLUME) - - def test_create_lun_from_dedup_lun_fails(self): - """lun full clone of dedup lun is not supported.""" - failure = exception.ViolinBackendErr - lun_info_response = { - 'subType': 'DEDUP', - } - - conf = { - 'lun.get_lun_info.return_value': lun_info_response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - self.assertRaises(failure, self.driver._create_lun_from_lun, - SRC_VOL, VOLUME) - - def test_send_cmd(self): - """Command callback completes successfully.""" - success_msg = 'success' - request_args = ['arg1', 'arg2', 'arg3'] - response = {'success': True, 'msg': 'Operation successful'} - - request_func = mock.Mock(return_value=response) - - result = self.driver._send_cmd(request_func, success_msg, request_args) - - self.assertEqual(response, result) - - def test_send_cmd_request_timed_out(self): - """The callback retry timeout hits immediately.""" - failure = exception.ViolinRequestRetryTimeout - success_msg = 'success' - request_args = ['arg1', 'arg2', 'arg3'] - self.conf.violin_request_timeout = 0 - - request_func = mock.Mock() - - self.assertRaises(failure, self.driver._send_cmd, - request_func, success_msg, request_args) - - def test_send_cmd_response_has_no_message(self): - """The callback returns no message on the first call.""" - success_msg = 'success' - request_args = ['arg1', 'arg2', 'arg3'] - response1 = {'success': True, 'msg': None} - response2 = {'success': True, 'msg': 'success'} - - request_func = mock.Mock(side_effect=[response1, response2]) - - self.assertEqual(response2, self.driver._send_cmd - (request_func, success_msg, request_args)) - - def test_check_error_code(self): - """Return an exception for a valid error code.""" - failure = exception.ViolinBackendErr - response = {'success': False, 'msg': 'Error: 0x90000000'} - self.assertRaises(failure, self.driver._check_error_code, - response) - - def test_check_error_code_non_fatal_error(self): - """Returns no exception for a non-fatal error code.""" - response = {'success': False, 'msg': 'Error: 0x9001003c'} - self.assertIsNone(self.driver._check_error_code(response)) - - def test_compress_snapshot_id(self): - test_snap_id = "12345678-abcd-1234-cdef-0123456789ab" - expected = "12345678abcd1234cdef0123456789ab" - - self.assertEqual(32, len(expected)) - result = self.driver._compress_snapshot_id(test_snap_id) - self.assertEqual(expected, result) - - def test_ensure_snapshot_resource_area(self): - result_dict = {'success': True, 'res': 'Successful'} - - self.driver.vmem_mg = self.setup_mock_concerto() - snap = self.driver.vmem_mg.snapshot - snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False) - snap.create_snapshot_resource = mock.Mock(return_value=result_dict) - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - with mock.patch('cinder.db.sqlalchemy.api.volume_get', - return_value=VOLUME): - result = self.driver._ensure_snapshot_resource_area(VOLUME_ID) - - self.assertIsNone(result) - snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID) - snap.create_snapshot_resource.assert_called_with( - lun=VOLUME_ID, - size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))), - enable_notification=False, - policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY, - enable_expansion= - v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, - expansion_threshold= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, - expansion_increment= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, - expansion_max_size= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, - enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, - storage_pool_id=99) - - def test_ensure_snapshot_resource_area_with_storage_pool(self): - - dest_vol = VOLUME.copy() - dest_vol['size'] = 2 - dest_vol['volume_type_id'] = '1' - - result_dict = {'success': True, 'res': 'Successful'} - - self.driver.vmem_mg = self.setup_mock_concerto() - snap = self.driver.vmem_mg.snapshot - snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False) - snap.create_snapshot_resource = mock.Mock(return_value=result_dict) - - # simulates extra specs: {'storage_pool', 'StoragePool'} - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePool") - self.driver._get_storage_pool = mock.Mock( - return_value=DEFAULT_THICK_POOL) - - self.driver._get_volume_type_extra_spec = mock.Mock( - side_effect=["True", "False"]) - - with mock.patch('cinder.db.sqlalchemy.api.volume_get', - return_value=dest_vol): - result = self.driver._ensure_snapshot_resource_area(VOLUME_ID) - - self.assertIsNone(result) - snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID) - snap.create_snapshot_resource.assert_called_with( - lun=VOLUME_ID, - size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))), - enable_notification=False, - policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY, - enable_expansion= - v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, - expansion_threshold= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, - expansion_increment= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, - expansion_max_size= - v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, - enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, - storage_pool_id=99) - - def test_ensure_snapshot_resource_policy(self): - result_dict = {'success': True, 'res': 'Successful'} - - self.driver.vmem_mg = self.setup_mock_concerto() - - snap = self.driver.vmem_mg.snapshot - snap.lun_has_a_snapshot_policy = mock.Mock(return_value=False) - snap.create_snapshot_policy = mock.Mock(return_value=result_dict) - - result = self.driver._ensure_snapshot_policy(VOLUME_ID) - self.assertIsNone(result) - snap.lun_has_a_snapshot_policy.assert_called_with(lun=VOLUME_ID) - - snap.create_snapshot_policy.assert_called_with( - lun=VOLUME_ID, - max_snapshots=v7000_common.CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS, - enable_replication=False, - enable_snapshot_schedule=False, - enable_cdp=False, - retention_mode=v7000_common.CONCERTO_DEFAULT_POLICY_RETENTION_MODE) - - def test_delete_lun_snapshot_bookkeeping(self): - result_dict = {'success': True, 'res': 'Successful'} - - self.driver.vmem_mg = self.setup_mock_concerto() - snap = self.driver.vmem_mg.snapshot - snap.get_snapshots = mock.Mock( - return_value=[], - side_effect=vmemclient.core.error.NoMatchingObjectIdError) - snap.delete_snapshot_policy = mock.Mock(return_value=result_dict) - snap.delete_snapshot_resource = mock.Mock() - - result = self.driver._delete_lun_snapshot_bookkeeping( - volume_id=VOLUME_ID) - - self.assertIsNone(result) - - snap.get_snapshots.assert_called_with(VOLUME_ID) - snap.delete_snapshot_policy.assert_called_with(lun=VOLUME_ID) - snap.delete_snapshot_resource.assert_called_with(lun=VOLUME_ID) - - def test_create_lun_snapshot(self): - response = {'success': True, 'msg': 'Create TimeMark successfully'} - - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._ensure_snapshot_resource_area = ( - mock.Mock(return_value=True)) - self.driver._ensure_snapshot_policy = mock.Mock(return_value=True) - self.driver._send_cmd = mock.Mock(return_value=response) - - with mock.patch('cinder.db.sqlalchemy.api.volume_get', - return_value=VOLUME): - result = self.driver._create_lun_snapshot(SNAPSHOT) - - self.assertIsNone(result) - - self.driver._ensure_snapshot_resource_area.assert_called_with( - VOLUME_ID) - self.driver._ensure_snapshot_policy.assert_called_with(VOLUME_ID) - self.driver._send_cmd.assert_called_with( - self.driver.vmem_mg.snapshot.create_lun_snapshot, - 'Create TimeMark successfully', - lun=VOLUME_ID, - comment=self.driver._compress_snapshot_id(SNAPSHOT_ID), - priority=v7000_common.CONCERTO_DEFAULT_PRIORITY, - enable_notification=False) - - def test_delete_lun_snapshot(self): - response = {'success': True, 'msg': 'Delete TimeMark successfully'} - compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' - oid = 'abc123-abc123abc123-abc123' - - conf = { - 'snapshot.snapshot_comment_to_object_id.return_value': oid, - 'snapshot.delete_lun_snapshot.return_value': response, - } - - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._compress_snapshot_id = mock.Mock( - return_value=compressed_snap_id) - - result = self.driver._delete_lun_snapshot(SNAPSHOT) - - self.assertTrue(result) - - def test_delete_lun_snapshot_with_retry(self): - response = [ - {'success': False, 'msg': 'Error 0x50f7564c'}, - {'success': True, 'msg': 'Delete TimeMark successfully'}] - compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb' - oid = 'abc123-abc123abc123-abc123' - - conf = { - 'snapshot.snapshot_comment_to_object_id.return_value': oid, - 'snapshot.delete_lun_snapshot.side_effect': response, - } - - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._compress_snapshot_id = mock.Mock( - return_value=compressed_snap_id) - - result = self.driver._delete_lun_snapshot(SNAPSHOT) - - self.assertTrue(result) - self.assertEqual( - len(response), - self.driver.vmem_mg.snapshot.delete_lun_snapshot.call_count) - - def test_wait_for_lun_or_snap_copy_completes_for_snap(self): - """waiting for a snapshot to copy succeeds.""" - vdev_id = 11111 - response = (vdev_id, None, 100) - - conf = { - 'snapshot.get_snapshot_copy_status.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._wait_for_lun_or_snap_copy( - SRC_VOL['id'], dest_vdev_id=vdev_id) - - (self.driver.vmem_mg.snapshot.get_snapshot_copy_status. - assert_called_with(SRC_VOL['id'])) - self.assertTrue(result) - - def test_wait_for_lun_or_snap_copy_completes_for_lun(self): - """waiting for a lun to copy succeeds.""" - object_id = '12345' - response = (object_id, None, 100) - - conf = { - 'lun.get_lun_copy_status.return_value': response, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._wait_for_lun_or_snap_copy( - SRC_VOL['id'], dest_obj_id=object_id) - - self.driver.vmem_mg.lun.get_lun_copy_status.assert_called_with( - SRC_VOL['id']) - self.assertTrue(result) - - @mock.patch.object(context, 'get_admin_context') - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_volume_type_extra_spec(self, - m_get_volume_type, - m_get_admin_context): - """Volume_type extra specs are found successfully.""" - vol = VOLUME.copy() - vol['volume_type_id'] = 1 - volume_type = {'extra_specs': {'override:test_key': 'test_value'}} - - m_get_admin_context.return_value = None - m_get_volume_type.return_value = volume_type - - result = self.driver._get_volume_type_extra_spec(vol, 'test_key') - - m_get_admin_context.assert_called_with() - m_get_volume_type.assert_called_with(None, vol['volume_type_id']) - self.assertEqual('test_value', result) - - @mock.patch.object(context, 'get_admin_context') - @mock.patch.object(volume_types, 'get_volume_type') - def test_get_violin_extra_spec(self, - m_get_volume_type, - m_get_admin_context): - """Volume_type extra specs are found successfully.""" - vol = VOLUME.copy() - vol['volume_type_id'] = 1 - volume_type = {'extra_specs': {'violin:test_key': 'test_value'}} - - m_get_admin_context.return_value = None - m_get_volume_type.return_value = volume_type - - result = self.driver._get_volume_type_extra_spec(vol, 'test_key') - - m_get_admin_context.assert_called_with() - m_get_volume_type.assert_called_with(None, vol['volume_type_id']) - self.assertEqual(result, 'test_value') - - def test_process_extra_specs_dedup(self): - '''Process the given extra specs and fill the required dict.''' - vol = VOLUME.copy() - vol['volume_type_id'] = 1 - spec_dict = { - 'pool_type': 'dedup', - 'size_mb': 205, - 'thick': False, - 'dedup': True, - 'thin': True} - - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="True") - - result = self.driver._process_extra_specs(vol) - self.assertEqual(spec_dict, result) - - def test_process_extra_specs_no_specs(self): - '''Fill the required spec_dict in the absence of extra specs.''' - vol = VOLUME.copy() - spec_dict = { - 'pool_type': 'thick', - 'size_mb': 2048, - 'thick': True, - 'dedup': False, - 'thin': False} - - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="False") - - result = self.driver._process_extra_specs(vol) - self.assertEqual(spec_dict, result) - - def test_process_extra_specs_no_specs_thin(self): - '''Fill the required spec_dict in the absence of extra specs.''' - vol = VOLUME.copy() - spec_dict = { - 'pool_type': 'thin', - 'size_mb': 205, - 'thick': False, - 'dedup': False, - 'thin': True} - - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._get_volume_type_extra_spec = mock.Mock( - return_value="False") - - save_thin = self.conf.san_thin_provision - self.conf.san_thin_provision = True - result = self.driver._process_extra_specs(vol) - self.assertEqual(spec_dict, result) - self.conf.san_thin_provision = save_thin - - def test_process_extra_specs_thin(self): - '''Fill the required spec_dict in the absence of extra specs.''' - vol = VOLUME.copy() - vol['volume_type_id'] = 1 - spec_dict = { - 'pool_type': 'thin', - 'size_mb': 205, - 'thick': False, - 'dedup': False, - 'thin': True} - - self.driver.vmem_mg = self.setup_mock_concerto() - self.driver._get_volume_type_extra_spec = mock.Mock( - side_effect=["True", "False"]) - - result = self.driver._process_extra_specs(vol) - self.assertEqual(spec_dict, result) - - def test_get_storage_pool_with_extra_specs(self): - '''Select a suitable pool based on specified extra specs.''' - vol = VOLUME.copy() - vol['volume_type_id'] = 1 - pool_type = "thick" - - selected_pool = { - 'storage_pool': 'StoragePoolA', - 'storage_pool_id': 99, - 'dedup': False, - 'thin': False} - - conf = { - 'pool.select_storage_pool.return_value': selected_pool, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePoolA", - ) - - result = self.driver._get_storage_pool( - vol, - 100, - pool_type, - "create_lun") - - self.assertEqual(result, selected_pool) - - def test_get_storage_pool_configured_pools(self): - '''Select a suitable pool based on configured pools.''' - vol = VOLUME.copy() - pool_type = "dedup" - - self.conf.violin_dedup_only_pools = ['PoolA', 'PoolB'] - self.conf.violin_dedup_capable_pools = ['PoolC', 'PoolD'] - - selected_pool = { - 'dedup': True, - 'storage_pool': 'PoolA', - 'storage_pool_id': 123, - 'thin': True, - } - - conf = { - 'pool.select_storage_pool.return_value': selected_pool, - } - - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._get_violin_extra_spec = mock.Mock( - return_value="StoragePoolA") - - result = self.driver._get_storage_pool( - vol, - 100, - pool_type, - "create_lun", - ) - - self.assertEqual(result, selected_pool) - self.driver.vmem_mg.pool.select_storage_pool.assert_called_with( - 100, - pool_type, - None, - self.conf.violin_dedup_only_pools, - self.conf.violin_dedup_capable_pools, - "random", - "create_lun", - ) - - def test_get_volume_stats(self): - '''Getting stats works successfully.''' - - self.conf.reserved_percentage = 0 - - expected_answers = { - 'vendor_name': 'Violin Memory, Inc.', - 'reserved_percentage': 0, - 'QoS_support': False, - 'free_capacity_gb': 2781, - 'total_capacity_gb': 14333, - 'consistencygroup_support': False, - } - owner = 'lab-host1' - - def lookup(value): - return six.text_type(value) + '.vmem.com' - conf = { - 'pool.get_storage_pools.return_value': STATS_STORAGE_POOL_RESPONSE, - } - self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - with mock.patch('socket.getfqdn', side_effect=lookup): - result = self.driver._get_volume_stats(owner) - - self.assertDictEqual(expected_answers, result) diff --git a/cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py b/cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py deleted file mode 100644 index fe45585bc..000000000 --- a/cinder/tests/unit/volume/drivers/violin/test_v7000_fcp.py +++ /dev/null @@ -1,574 +0,0 @@ -# Copyright 2015 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver -""" - -import mock - -from cinder import exception -from cinder import test -from cinder.tests.unit.volume.drivers.violin \ - import fake_vmem_client as vmemclient -from cinder.volume import configuration as conf -from cinder.volume.drivers.violin import v7000_common -from cinder.volume.drivers.violin import v7000_fcp - -VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba" -VOLUME = { - "name": "volume-" + VOLUME_ID, - "id": VOLUME_ID, - "display_name": "fake_volume", - "size": 2, - "host": "myhost", - "volume_type": None, - "volume_type_id": None, -} -SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" -SNAPSHOT = { - "name": "snapshot-" + SNAPSHOT_ID, - "id": SNAPSHOT_ID, - "volume_id": VOLUME_ID, - "volume_name": "volume-" + VOLUME_ID, - "volume_size": 2, - "display_name": "fake_snapshot", - "volume": VOLUME, -} -SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" -SRC_VOL = { - "name": "volume-" + SRC_VOL_ID, - "id": SRC_VOL_ID, - "display_name": "fake_src_vol", - "size": 2, - "host": "myhost", - "volume_type": None, - "volume_type_id": None, -} -INITIATOR_IQN = "iqn.1111-22.org.debian:11:222" -CONNECTOR = { - "initiator": INITIATOR_IQN, - "host": "irrelevant", - 'wwpns': ['50014380186b3f65', '50014380186b3f67'], -} -FC_TARGET_WWPNS = [ - '31000024ff45fb22', '21000024ff45fb23', - '51000024ff45f1be', '41000024ff45f1bf' -] -FC_INITIATOR_WWPNS = [ - '50014380186b3f65', '50014380186b3f67' -] -FC_FABRIC_MAP = { - 'fabricA': - {'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], - 'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]}, - 'fabricB': - {'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]], - 'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]} -} -FC_INITIATOR_TARGET_MAP = { - FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], - FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]] -} - -PHY_DEVICES_RESPONSE = { - 'data': - {'physical_devices': - [{'availsize': 1099504287744, - 'availsize_mb': 524284, - 'category': 'Virtual Device', - 'connection_type': 'block', - 'firmware': 'v1.0', - 'guid': '3cc4d6dd-166d-77d2-4967-00005463f597', - 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0', - 'is_foreign': True, - 'name': 'BKSC:OTHDISK-MFCN01.000', - 'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151', - 'owner': 'example.com', - 'pool': None, - 'product': 'OTHDISK-MFCN01', - 'scsi_address': - {'adapter': '98', - 'channel': '0', - 'id': '0', - 'lun': '0', - 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, - 'size': 1099504287744, - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize': 0, - 'usedsize_mb': 0, - 'vendor': 'BKSC', - 'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'}, - {'availsize': 1099504287744, - 'availsize_mb': 524284, - 'category': 'Virtual Device', - 'connection_type': 'block', - 'firmware': 'v1.0', - 'guid': '283b2694-192b-4745-6768-00005463f673', - 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0', - 'is_foreign': False, - 'name': 'BKSC:OTHDISK-MFCN08.000', - 'object_id': '8555b888-bf43-5083-a433-f0c7b0282370', - 'owner': 'example.com', - 'pool': - {'name': 'mga-pool', - 'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'}, - 'product': 'OTHDISK-MFCN08', - 'scsi_address': - {'adapter': '98', - 'channel': '0', - 'id': '11', - 'lun': '0', - 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, - 'size': 1099504287744, - 'size_mb': 1048569, - 'type': 'Direct-Access', - 'usedsize': 0, - 'usedsize_mb': 0, - 'vendor': 'BKSC', - 'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'}, - {'availsize': 1099504287744, - 'availsize_mb': 1048569, - 'category': 'Virtual Device', - 'connection_type': 'block', - 'firmware': 'v1.0', - 'guid': '7f47db19-019c-707d-0df1-00005463f949', - 'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0', - 'is_foreign': False, - 'name': 'BKSC:OTHDISK-MFCN09.000', - 'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291', - 'owner': 'a.b.c.d', - 'pool': - {'name': 'mga-pool', - 'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'}, - 'product': 'OTHDISK-MFCN09', - 'scsi_address': - {'adapter': '98', - 'channel': '0', - 'id': '12', - 'lun': '0', - 'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'}, - 'size': 1099504287744, - 'size_mb': 524284, - 'type': 'Direct-Access', - 'usedsize': 0, - 'usedsize_mb': 0, - 'vendor': 'BKSC', - 'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}], - 'total_physical_devices': 3}, - 'msg': 'Successful', - 'success': True -} - -# The FC_INFO dict returned by the backend is keyed on -# object_id of the FC adapter and the values are the -# wwmns -FC_INFO = { - '1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'], - '4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'], - 'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'], - 'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231'] -} - -CLIENT_INFO = { - 'FCPolicy': - {'AS400enabled': False, - 'VSAenabled': False, - 'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66', - '50-01-43-80-18-6b-3f-64']}, - 'FibreChannelDevices': - [{'access': 'ReadWrite', - 'id': 'v0000004', - 'initiatorWWPN': '*', - 'lun': '8', - 'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba', - 'sizeMB': 10240, - 'targetWWPN': '*', - 'type': 'SAN'}] -} - -CLIENT_INFO1 = { - 'FCPolicy': - {'AS400enabled': False, - 'VSAenabled': False, - 'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66', - '50-01-43-80-18-6b-3f-64']}, - 'FibreChannelDevices': [] -} - - -class V7000FCPDriverTestCase(test.TestCase): - """Test cases for VMEM FCP driver.""" - def setUp(self): - super(V7000FCPDriverTestCase, self).setUp() - self.conf = self.setup_configuration() - self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf) - self.driver.common.container = 'myContainer' - self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022' - self.driver.gateway_fc_wwns = FC_TARGET_WWPNS - self.stats = {} - self.driver.set_initialized() - - def setup_configuration(self): - config = mock.Mock(spec=conf.Configuration) - config.volume_backend_name = 'v7000_fcp' - config.san_ip = '8.8.8.8' - config.san_login = 'admin' - config.san_password = '' - config.san_thin_provision = False - config.san_is_local = False - config.request_timeout = 300 - config.container = 'myContainer' - return config - - def setup_mock_concerto(self, m_conf=None): - """Create a fake Concerto communication object.""" - _m_concerto = mock.Mock(name='Concerto', - version='1.1.1', - spec=vmemclient.mock_client_conf) - - if m_conf: - _m_concerto.configure_mock(**m_conf) - - return _m_concerto - - @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error') - def test_check_for_setup_error(self, m_setup_func): - """No setup errors are found.""" - result = self.driver.check_for_setup_error() - m_setup_func.assert_called_with() - self.assertIsNone(result) - - @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error') - def test_check_for_setup_error_no_wwn_config(self, m_setup_func): - """No wwns were found during setup.""" - self.driver.gateway_fc_wwns = [] - failure = exception.ViolinInvalidBackendConfig - self.assertRaises(failure, self.driver.check_for_setup_error) - - def test_create_volume(self): - """Volume created successfully.""" - self.driver.common._create_lun = mock.Mock() - - result = self.driver.create_volume(VOLUME) - - self.driver.common._create_lun.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_create_volume_from_snapshot(self): - self.driver.common._create_volume_from_snapshot = mock.Mock() - - result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) - - self.driver.common._create_volume_from_snapshot.assert_called_with( - SNAPSHOT, VOLUME) - - self.assertIsNone(result) - - def test_create_cloned_volume(self): - self.driver.common._create_lun_from_lun = mock.Mock() - - result = self.driver.create_cloned_volume(VOLUME, SRC_VOL) - - self.driver.common._create_lun_from_lun.assert_called_with( - SRC_VOL, VOLUME) - self.assertIsNone(result) - - def test_delete_volume(self): - """Volume deleted successfully.""" - self.driver.common._delete_lun = mock.Mock() - - result = self.driver.delete_volume(VOLUME) - - self.driver.common._delete_lun.assert_called_with(VOLUME) - self.assertIsNone(result) - - def test_extend_volume(self): - """Volume extended successfully.""" - new_size = 10 - self.driver.common._extend_lun = mock.Mock() - - result = self.driver.extend_volume(VOLUME, new_size) - - self.driver.common._extend_lun.assert_called_with(VOLUME, new_size) - self.assertIsNone(result) - - def test_create_snapshot(self): - self.driver.common._create_lun_snapshot = mock.Mock() - - result = self.driver.create_snapshot(SNAPSHOT) - self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT) - self.assertIsNone(result) - - def test_delete_snapshot(self): - self.driver.common._delete_lun_snapshot = mock.Mock() - - result = self.driver.delete_snapshot(SNAPSHOT) - self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT) - self.assertIsNone(result) - - def test_get_volume_stats(self): - self.driver._update_volume_stats = mock.Mock() - self.driver._update_volume_stats() - - result = self.driver.get_volume_stats(True) - - self.driver._update_volume_stats.assert_called_with() - self.assertEqual(self.driver.stats, result) - - @mock.patch('socket.gethostbyaddr') - def test_update_volume_stats(self, mock_gethost): - """Test Update Volume Stats. - - Makes a mock query to the backend to collect stats on all physical - devices. - """ - - def gethostbyaddr(addr): - if addr == '8.8.8.8' or addr == 'example.com': - return ('example.com', [], ['8.8.8.8']) - else: - return ('a.b.c.d', [], addr) - mock_gethost.side_effect = gethostbyaddr - - backend_name = self.conf.volume_backend_name - vendor_name = "Violin Memory, Inc." - tot_gb = 2046 - free_gb = 1022 - - phy_devices = "/batch/physicalresource/physicaldevice" - - conf = { - 'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ], - } - - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._update_volume_stats() - - calls = [mock.call(phy_devices)] - self.driver.common.vmem_mg.basic.get.assert_has_calls(calls) - self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb']) - self.assertEqual(free_gb, self.driver.stats['free_capacity_gb']) - self.assertEqual(backend_name, - self.driver.stats['volume_backend_name']) - self.assertEqual(vendor_name, self.driver.stats['vendor_name']) - self.assertIsNone(result) - - def test_get_active_fc_targets(self): - """Test Get Active FC Targets. - - Makes a mock query to the backend to collect all the physical - adapters and extract the WWNs. - """ - - conf = { - 'adapter.get_fc_info.return_value': FC_INFO, - } - - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._get_active_fc_targets() - - self.assertEqual({'2100001b9745e230', '2100001b9745e25f', - '2100001b9745e231', '2100001b9745e25e'}, - set(result)) - - def test_initialize_connection(self): - lun_id = 1 - target_wwns = self.driver.gateway_fc_wwns - init_targ_map = {} - - conf = { - 'client.create_client.return_value': None, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - self.driver._export_lun = mock.Mock(return_value=lun_id) - self.driver._build_initiator_target_map = mock.Mock( - return_value=(target_wwns, init_targ_map)) - - props = self.driver.initialize_connection(VOLUME, CONNECTOR) - - self.driver.common.vmem_mg.client.create_client.assert_called_with( - name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns']) - self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR) - self.driver._build_initiator_target_map.assert_called_with( - CONNECTOR) - self.assertEqual("fibre_channel", props['driver_volume_type']) - self.assertTrue(props['data']['target_discovered']) - self.assertEqual(self.driver.gateway_fc_wwns, - props['data']['target_wwn']) - self.assertEqual(lun_id, props['data']['target_lun']) - - def test_terminate_connection(self): - target_wwns = self.driver.gateway_fc_wwns - init_targ_map = {} - - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver._unexport_lun = mock.Mock() - self.driver._is_initiator_connected_to_array = mock.Mock( - return_value=False) - self.driver._build_initiator_target_map = mock.Mock( - return_value=(target_wwns, init_targ_map)) - - props = self.driver.terminate_connection(VOLUME, CONNECTOR) - - self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR) - self.driver._is_initiator_connected_to_array.assert_called_with( - CONNECTOR) - self.driver._build_initiator_target_map.assert_called_with( - CONNECTOR) - self.assertEqual("fibre_channel", props['driver_volume_type']) - self.assertEqual(target_wwns, props['data']['target_wwn']) - self.assertEqual(init_targ_map, props['data']['initiator_target_map']) - - def test_export_lun(self): - lun_id = '1' - response = {'success': True, 'msg': 'Assign SAN client successfully'} - - conf = { - 'client.get_client_info.return_value': CLIENT_INFO, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - self.driver.common._send_cmd_and_verify = mock.Mock( - return_value=response) - - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - result = self.driver._export_lun(VOLUME, CONNECTOR) - - self.driver.common._send_cmd_and_verify.assert_called_with( - self.driver.common.vmem_mg.lun.assign_lun_to_client, - self.driver._is_lun_id_ready, - 'Assign SAN client successfully', - [VOLUME['id'], CONNECTOR['host'], "ReadWrite"], - [VOLUME['id'], CONNECTOR['host']]) - self.driver._get_lun_id.assert_called_with( - VOLUME['id'], CONNECTOR['host']) - self.assertEqual(lun_id, result) - - def test_export_lun_fails_with_exception(self): - lun_id = '1' - response = {'status': False, 'msg': 'Generic error'} - failure = exception.ViolinBackendErr - - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver.common._send_cmd_and_verify = mock.Mock( - side_effect=exception.ViolinBackendErr(response['msg'])) - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR) - - def test_unexport_lun(self): - response = {'success': True, 'msg': 'Unassign SAN client successfully'} - - self.driver.common.vmem_mg = self.setup_mock_concerto() - self.driver.common._send_cmd = mock.Mock( - return_value=response) - - result = self.driver._unexport_lun(VOLUME, CONNECTOR) - - self.driver.common._send_cmd.assert_called_with( - self.driver.common.vmem_mg.lun.unassign_client_lun, - "Unassign SAN client successfully", - VOLUME['id'], CONNECTOR['host'], True) - self.assertIsNone(result) - - def test_get_lun_id(self): - - conf = { - 'client.get_client_info.return_value': CLIENT_INFO, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host']) - - self.assertEqual(8, result) - - def test_is_lun_id_ready(self): - lun_id = '1' - self.driver.common.vmem_mg = self.setup_mock_concerto() - - self.driver._get_lun_id = mock.Mock(return_value=lun_id) - - result = self.driver._is_lun_id_ready( - VOLUME['id'], CONNECTOR['host']) - self.assertTrue(result) - - def test_build_initiator_target_map(self): - """Successfully build a map when zoning is enabled.""" - expected_targ_wwns = FC_TARGET_WWPNS - - self.driver.lookup_service = mock.Mock() - (self.driver.lookup_service.get_device_mapping_from_network. - return_value) = FC_FABRIC_MAP - - result = self.driver._build_initiator_target_map(CONNECTOR) - (targ_wwns, init_targ_map) = result - - (self.driver.lookup_service.get_device_mapping_from_network. - assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns)) - self.assertEqual(set(expected_targ_wwns), set(targ_wwns)) - - i = FC_INITIATOR_WWPNS[0] - self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i]) - self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i]) - self.assertEqual(2, len(init_targ_map[i])) - - i = FC_INITIATOR_WWPNS[1] - self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i]) - self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i]) - self.assertEqual(2, len(init_targ_map[i])) - - self.assertEqual(2, len(init_targ_map)) - - def test_build_initiator_target_map_no_lookup_service(self): - """Successfully build a map when zoning is disabled.""" - expected_targ_wwns = FC_TARGET_WWPNS - expected_init_targ_map = { - CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS, - CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS - } - self.driver.lookup_service = None - - targ_wwns, init_targ_map = self.driver._build_initiator_target_map( - CONNECTOR) - - self.assertEqual(expected_targ_wwns, targ_wwns) - self.assertEqual(expected_init_targ_map, init_targ_map) - - def test_is_initiator_connected_to_array(self): - """Successfully finds an initiator with remaining active session.""" - conf = { - 'client.get_client_info.return_value': CLIENT_INFO, - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - self.assertTrue(self.driver._is_initiator_connected_to_array( - CONNECTOR)) - self.driver.common.vmem_mg.client.get_client_info.assert_called_with( - CONNECTOR['host']) - - def test_is_initiator_connected_to_array_empty_response(self): - """Successfully finds no initiators with remaining active sessions.""" - conf = { - 'client.get_client_info.return_value': CLIENT_INFO1 - } - self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf) - - self.assertFalse(self.driver._is_initiator_connected_to_array( - CONNECTOR)) diff --git a/cinder/tests/unit/volume/drivers/vmware/__init__.py b/cinder/tests/unit/volume/drivers/vmware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py deleted file mode 100644 index 9ae89d9e0..000000000 --- a/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for datastore module. -""" - -import mock -from oslo_utils import units - -from cinder import test -from cinder.volume.drivers.vmware import datastore as ds_sel -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions - - -class DatastoreTest(test.TestCase): - """Unit tests for Datastore.""" - - def setUp(self): - super(DatastoreTest, self).setUp() - self._session = mock.Mock() - self._vops = mock.Mock() - self._ds_sel = ds_sel.DatastoreSelector( - self._vops, self._session, 1024) - - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_profile_id(self, get_profile_id_by_name): - profile_id = mock.sentinel.profile_id - get_profile_id_by_name.return_value = profile_id - profile_name = mock.sentinel.profile_name - - self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name)) - get_profile_id_by_name.assert_called_once_with(self._session, - profile_name) - - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_profile_id_with_invalid_profile(self, get_profile_id_by_name): - get_profile_id_by_name.return_value = None - profile_name = mock.sentinel.profile_name - - self.assertRaises(vmdk_exceptions.ProfileNotFoundException, - self._ds_sel.get_profile_id, - profile_name) - get_profile_id_by_name.assert_called_once_with(self._session, - profile_name) - - def _create_datastore(self, value): - return mock.Mock(name=value, value=value) - - def _create_summary( - self, ds, free_space=units.Mi, _type=ds_sel.DatastoreType.VMFS, - capacity=2 * units.Mi, accessible=True): - return mock.Mock(datastore=ds, freeSpace=free_space, type=_type, - capacity=capacity, accessible=accessible, - name=ds.value) - - def _create_host(self, value): - host = mock.Mock(spec=['_type', 'value'], name=value) - host._type = 'HostSystem' - host.value = value - return host - - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_filter_by_profile') - def test_filter_datastores(self, filter_by_profile): - host1 = self._create_host('host-1') - host2 = self._create_host('host-2') - host3 = self._create_host('host-3') - - host_mounts1 = [mock.Mock(key=host1)] - host_mounts2 = [mock.Mock(key=host2)] - host_mounts3 = [mock.Mock(key=host3)] - - # empty summary - ds1 = self._create_datastore('ds-1') - ds1_props = {'host': host_mounts1} - - # hard anti-affinity datastore - ds2 = self._create_datastore('ds-2') - ds2_props = {'summary': self._create_summary(ds2), - 'host': host_mounts2} - - # not enough free space - ds3 = self._create_datastore('ds-3') - ds3_props = {'summary': self._create_summary(ds3, free_space=128), - 'host': host_mounts1} - - # not connected to a valid host - ds4 = self._create_datastore('ds-4') - ds4_props = {'summary': self._create_summary(ds4), - 'host': host_mounts3} - - # invalid datastore type - ds5 = self._create_datastore('ds-5') - ds5_props = {'summary': self._create_summary(ds5, _type='foo'), - 'host': host_mounts1} - - # hard affinity datastore type - ds6 = self._create_datastore('ds-6') - ds6_props = { - 'summary': self._create_summary( - ds6, _type=ds_sel.DatastoreType.VSAN), - 'host': host_mounts2} - - # inaccessible datastore - ds7 = self._create_datastore('ds-7') - ds7_props = {'summary': self._create_summary(ds7, accessible=False), - 'host': host_mounts1} - - def mock_in_maintenace(summary): - return summary.datastore.value == 'ds-8' - - self._vops._in_maintenance.side_effect = mock_in_maintenace - # in-maintenance datastore - ds8 = self._create_datastore('ds-8') - ds8_props = {'summary': self._create_summary(ds8), - 'host': host_mounts2} - - # not compliant with profile - ds9 = self._create_datastore('ds-9') - ds9_props = {'summary': self._create_summary(ds9), - 'host': host_mounts1} - - # valid datastore - ds10 = self._create_datastore('ds-10') - ds10_props = {'summary': self._create_summary(ds10), - 'host': host_mounts1} - filter_by_profile.return_value = {ds10: ds10_props} - - datastores = {ds1: ds1_props, - ds2: ds2_props, - ds3: ds3_props, - ds4: ds4_props, - ds5: ds5_props, - ds6: ds6_props, - ds7: ds7_props, - ds8: ds8_props, - ds9: ds9_props, - ds10: ds10_props} - profile_id = mock.sentinel.profile_id - datastores = self._ds_sel._filter_datastores( - datastores, - 512, - profile_id, - ['ds-2'], - {ds_sel.DatastoreType.VMFS, ds_sel.DatastoreType.NFS}, - valid_host_refs=[host1, host2]) - - self.assertEqual({ds10: ds10_props}, datastores) - filter_by_profile.assert_called_once_with( - {ds9: ds9_props, ds10: ds10_props}, - profile_id) - - def test_filter_datastores_with_empty_datastores(self): - self.assertIsNone(self._ds_sel._filter_datastores( - {}, 1024, None, None, None)) - - def _create_host_properties( - self, parent, connection_state='connected', in_maintenace=False): - return mock.Mock(connectionState=connection_state, - inMaintenanceMode=in_maintenace, - parent=parent) - - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_get_host_properties') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_get_resource_pool') - def test_select_best_datastore(self, get_resource_pool, get_host_props): - host1 = self._create_host('host-1') - host2 = self._create_host('host-2') - host3 = self._create_host('host-3') - - host_mounts1 = [mock.Mock(key=host1, - mountInfo=mock.sentinel.ds1_mount_info1), - mock.Mock(key=host2, - mountInfo=mock.sentinel.ds1_mount_info2), - mock.Mock(key=host3, - mountInfo=mock.sentinel.ds1_mount_info3)] - host_mounts2 = [mock.Mock(key=host2, - mountInfo=mock.sentinel.ds2_mount_info2), - mock.Mock(key=host3, - mountInfo=mock.sentinel.ds2_mount_info3)] - host_mounts3 = [mock.Mock(key=host1, - mountInfo=mock.sentinel.ds3_mount_info1), - mock.Mock(key=host2, - mountInfo=mock.sentinel.ds3_mount_info2)] - host_mounts4 = [mock.Mock(key=host1, - mountInfo=mock.sentinel.ds4_mount_info1)] - - ds1 = self._create_datastore('ds-1') - ds1_props = {'summary': self._create_summary(ds1), - 'host': host_mounts1} - - ds2 = self._create_datastore('ds-2') - ds2_props = { - 'summary': self._create_summary( - ds2, free_space=1024, capacity=2048), - 'host': host_mounts2} - - ds3 = self._create_datastore('ds-3') - ds3_props = { - 'summary': self._create_summary( - ds3, free_space=512, capacity=2048), - 'host': host_mounts3} - - ds4 = self._create_datastore('ds-3') - ds4_props = {'summary': self._create_summary(ds4), - 'host': host_mounts4} - - cluster_ref = mock.sentinel.cluster_ref - - def mock_get_host_properties(host_ref): - self.assertIsNot(host1, host_ref) - if host_ref == host2: - in_maintenance = False - else: - in_maintenance = True - runtime = mock.Mock(spec=['connectionState', 'inMaintenanceMode']) - runtime.connectionState = 'connected' - runtime.inMaintenanceMode = in_maintenance - return {'parent': cluster_ref, 'runtime': runtime} - - get_host_props.side_effect = mock_get_host_properties - - def mock_is_usable(mount_info): - if (mount_info == mock.sentinel.ds1_mount_info2 or - mount_info == mock.sentinel.ds2_mount_info2): - return False - else: - return True - - self._vops._is_usable.side_effect = mock_is_usable - - rp = mock.sentinel.resource_pool - get_resource_pool.return_value = rp - - # ds1 is mounted to 3 hosts: host1, host2 and host3; host1 is - # not a valid host, ds1 is not usable in host1, and host3 is - # in maintenance mode. - # ds2 and ds3 are mounted to same hosts, and ds2 has a low space - # utilization. But ds2 is not usable in host2, and host3 is in - # maintenance mode. Therefore, ds3 and host2 will be selected. - datastores = {ds1: ds1_props, - ds2: ds2_props, - ds3: ds3_props, - ds4: ds4_props} - ret = self._ds_sel._select_best_datastore( - datastores, valid_host_refs=[host2, host3]) - - self.assertEqual((host2, rp, ds3_props['summary']), ret) - self.assertItemsEqual([mock.call(mock.sentinel.ds1_mount_info2), - mock.call(mock.sentinel.ds1_mount_info3), - mock.call(mock.sentinel.ds2_mount_info2), - mock.call(mock.sentinel.ds2_mount_info3), - mock.call(mock.sentinel.ds3_mount_info2)], - self._vops._is_usable.call_args_list) - self.assertEqual([mock.call(host3), mock.call(host2)], - get_host_props.call_args_list) - get_resource_pool.assert_called_once_with(cluster_ref) - - def test_select_best_datastore_with_empty_datastores(self): - self.assertIsNone(self._ds_sel._select_best_datastore({})) - - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - 'get_profile_id') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_get_datastores') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_filter_datastores') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_select_best_datastore') - def test_select_datastore( - self, select_best_datastore, filter_datastores, get_datastores, - get_profile_id): - - profile_id = mock.sentinel.profile_id - get_profile_id.return_value = profile_id - - datastores = mock.sentinel.datastores - get_datastores.return_value = datastores - - filtered_datastores = mock.sentinel.filtered_datastores - filter_datastores.return_value = filtered_datastores - - best_datastore = mock.sentinel.best_datastore - select_best_datastore.return_value = best_datastore - - size_bytes = 1024 - req = {self._ds_sel.SIZE_BYTES: size_bytes} - aff_ds_types = [ds_sel.DatastoreType.VMFS] - req[ds_sel.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = aff_ds_types - anti_affinity_ds = [mock.sentinel.ds] - req[ds_sel.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = anti_affinity_ds - profile_name = mock.sentinel.profile_name - req[ds_sel.DatastoreSelector.PROFILE_NAME] = profile_name - - hosts = mock.sentinel.hosts - self.assertEqual(best_datastore, - self._ds_sel.select_datastore(req, hosts)) - get_datastores.assert_called_once_with() - filter_datastores.assert_called_once_with( - datastores, size_bytes, profile_id, anti_affinity_ds, aff_ds_types, - valid_host_refs=hosts) - select_best_datastore.assert_called_once_with(filtered_datastores, - valid_host_refs=hosts) - - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' - '_filter_by_profile') - def test_is_datastore_compliant(self, filter_by_profile, - get_profile_id_by_name): - # Test with empty profile. - profile_name = None - datastore = mock.sentinel.datastore - self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, - profile_name)) - - # Test with invalid profile. - profile_name = mock.sentinel.profile_name - get_profile_id_by_name.return_value = None - self.assertRaises(vmdk_exceptions.ProfileNotFoundException, - self._ds_sel.is_datastore_compliant, - datastore, - profile_name) - get_profile_id_by_name.assert_called_once_with(self._session, - profile_name) - - # Test with valid profile and non-compliant datastore. - get_profile_id_by_name.reset_mock() - profile_id = mock.sentinel.profile_id - get_profile_id_by_name.return_value = profile_id - filter_by_profile.return_value = {} - self.assertFalse(self._ds_sel.is_datastore_compliant(datastore, - profile_name)) - get_profile_id_by_name.assert_called_once_with(self._session, - profile_name) - filter_by_profile.assert_called_once_with({datastore: None}, - profile_id) - - # Test with valid profile and compliant datastore. - get_profile_id_by_name.reset_mock() - filter_by_profile.reset_mock() - filter_by_profile.return_value = {datastore: None} - self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, - profile_name)) - get_profile_id_by_name.assert_called_once_with(self._session, - profile_name) - filter_by_profile.assert_called_once_with({datastore: None}, - profile_id) diff --git a/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py deleted file mode 100644 index 43f726fe5..000000000 --- a/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py +++ /dev/null @@ -1,2552 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test suite for VMware vCenter VMDK driver. -""" - -import ddt -import mock -from oslo_utils import units -from oslo_utils import versionutils -from oslo_vmware import api -from oslo_vmware import exceptions -from oslo_vmware import image_transfer -import six - -from cinder import context -from cinder import exception as cinder_exceptions -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume import configuration -from cinder.volume.drivers.vmware import datastore as hub -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions -from cinder.volume.drivers.vmware import vmdk -from cinder.volume.drivers.vmware import volumeops - - -# TODO(vbala) Split test methods handling multiple cases into multiple methods, -# each handling a specific case. -@ddt.ddt -class VMwareVcVmdkDriverTestCase(test.TestCase): - """Unit tests for VMwareVcVmdkDriver.""" - - IP = 'localhost' - PORT = 2321 - USERNAME = 'username' - PASSWORD = 'password' - VOLUME_FOLDER = 'cinder-volumes' - API_RETRY_COUNT = 3 - TASK_POLL_INTERVAL = 5.0 - IMG_TX_TIMEOUT = 10 - MAX_OBJECTS = 100 - TMP_DIR = "/vmware-tmp" - CA_FILE = "/etc/ssl/rui-ca-cert.pem" - VMDK_DRIVER = vmdk.VMwareVcVmdkDriver - CLUSTERS = ["cls-1", "cls-2"] - DEFAULT_VC_VERSION = '5.5' - POOL_SIZE = 20 - - VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' - SRC_VOL_ID = '9b3f6f1b-03a9-4f1e-aaff-ae15122b6ccf' - DISPLAY_NAME = 'foo' - VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a' - VOL_SIZE = 2 - PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc' - SNAPSHOT_ID = '2f59670a-0355-4790-834c-563b65bba740' - SNAPSHOT_NAME = 'snap-foo' - SNAPSHOT_DESCRIPTION = 'test snapshot' - IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a' - IMAGE_NAME = 'image-1' - - def setUp(self): - super(VMwareVcVmdkDriverTestCase, self).setUp() - - self._config = mock.Mock(spec=configuration.Configuration) - self._config.vmware_host_ip = self.IP - self._config.vmware_host_port = self.PORT - self._config.vmware_host_username = self.USERNAME - self._config.vmware_host_password = self.PASSWORD - self._config.vmware_wsdl_location = None - self._config.vmware_volume_folder = self.VOLUME_FOLDER - self._config.vmware_api_retry_count = self.API_RETRY_COUNT - self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL - self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT - self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS - self._config.vmware_tmp_dir = self.TMP_DIR - self._config.vmware_ca_file = self.CA_FILE - self._config.vmware_insecure = False - self._config.vmware_cluster_name = self.CLUSTERS - self._config.vmware_host_version = self.DEFAULT_VC_VERSION - self._config.vmware_connection_pool_size = self.POOL_SIZE - - self._db = mock.Mock() - self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config, - db=self._db) - - api_retry_count = self._config.vmware_api_retry_count - task_poll_interval = self._config.vmware_task_poll_interval, - self._session = api.VMwareAPISession(self.IP, self.USERNAME, - self.PASSWORD, api_retry_count, - task_poll_interval, - create_session=False) - self._volumeops = volumeops.VMwareVolumeOps(self._session, - self.MAX_OBJECTS) - self._context = context.get_admin_context() - - def test_get_volume_stats(self): - stats = self._driver.get_volume_stats() - - self.assertEqual('VMware', stats['vendor_name']) - self.assertEqual(self._driver.VERSION, stats['driver_version']) - self.assertEqual('vmdk', stats['storage_protocol']) - self.assertEqual(0, stats['reserved_percentage']) - self.assertEqual('unknown', stats['total_capacity_gb']) - self.assertEqual('unknown', stats['free_capacity_gb']) - - def _create_volume_dict(self, - vol_id=VOL_ID, - display_name=DISPLAY_NAME, - volume_type_id=VOL_TYPE_ID, - status='available', - size=VOL_SIZE, - attachment=None, - project_id=PROJECT_ID): - return {'id': vol_id, - 'display_name': display_name, - 'name': 'volume-%s' % vol_id, - 'volume_type_id': volume_type_id, - 'status': status, - 'size': size, - 'volume_attachment': attachment, - 'project_id': project_id, - } - - def _create_volume_obj(self, - vol_id=VOL_ID, - display_name=DISPLAY_NAME, - volume_type_id=VOL_TYPE_ID, - status='available', - size=VOL_SIZE, - attachment=None, - project_id=PROJECT_ID): - vol = self._create_volume_dict( - vol_id, display_name, volume_type_id, status, size, attachment, - project_id) - return fake_volume.fake_volume_obj(self._context, **vol) - - @mock.patch.object(VMDK_DRIVER, '_get_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_verify_volume_creation(self, ds_sel, get_storage_profile, - get_disk_type): - profile_name = mock.sentinel.profile_name - get_storage_profile.return_value = profile_name - - volume = self._create_volume_obj() - self._driver._verify_volume_creation(volume) - - get_disk_type.assert_called_once_with(volume) - get_storage_profile.assert_called_once_with(volume) - ds_sel.get_profile_id.assert_called_once_with(profile_name) - - @mock.patch.object(VMDK_DRIVER, '_verify_volume_creation') - def test_create_volume(self, verify_volume_creation): - volume = self._create_volume_dict() - self._driver.create_volume(volume) - - verify_volume_creation.assert_called_once_with(volume) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_delete_volume_without_backing(self, vops): - vops.get_backing.return_value = None - - volume = self._create_volume_dict() - self._driver.delete_volume(volume) - - vops.get_backing.assert_called_once_with(volume['name']) - self.assertFalse(vops.delete_backing.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_delete_volume(self, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - volume = self._create_volume_dict() - self._driver.delete_volume(volume) - - vops.get_backing.assert_called_once_with(volume['name']) - vops.delete_backing.assert_called_once_with(backing) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.' - '_get_volume_type_extra_spec') - @mock.patch('cinder.volume.drivers.vmware.volumeops.' - 'VirtualDiskType.validate') - def test_get_extra_spec_disk_type(self, validate, - get_volume_type_extra_spec): - vmdk_type = mock.sentinel.vmdk_type - get_volume_type_extra_spec.return_value = vmdk_type - - type_id = mock.sentinel.type_id - self.assertEqual(vmdk_type, - self._driver._get_extra_spec_disk_type(type_id)) - get_volume_type_extra_spec.assert_called_once_with( - type_id, 'vmdk_type', default_value=vmdk.THIN_VMDK_TYPE) - validate.assert_called_once_with(vmdk_type) - - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_disk_type') - def test_get_disk_type(self, get_extra_spec_disk_type): - vmdk_type = mock.sentinel.vmdk_type - get_extra_spec_disk_type.return_value = vmdk_type - - volume = self._create_volume_dict() - self.assertEqual(vmdk_type, self._driver._get_disk_type(volume)) - get_extra_spec_disk_type.assert_called_once_with( - volume['volume_type_id']) - - def _create_snapshot_dict(self, - volume, - snap_id=SNAPSHOT_ID, - name=SNAPSHOT_NAME, - description=SNAPSHOT_DESCRIPTION): - return {'id': snap_id, - 'volume': volume, - 'volume_name': volume['name'], - 'name': name, - 'display_description': description, - } - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_create_snapshot_without_backing(self, vops): - vops.get_backing.return_value = None - - volume = self._create_volume_dict() - snapshot = self._create_snapshot_dict(volume) - self._driver.create_snapshot(snapshot) - - vops.get_backing.assert_called_once_with(snapshot['volume_name']) - self.assertFalse(vops.create_snapshot.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_create_snapshot_with_backing(self, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - volume = self._create_volume_dict() - snapshot = self._create_snapshot_dict(volume) - self._driver.create_snapshot(snapshot) - - vops.get_backing.assert_called_once_with(snapshot['volume_name']) - vops.create_snapshot.assert_called_once_with( - backing, snapshot['name'], snapshot['display_description']) - - def test_create_snapshot_when_attached(self): - volume = self._create_volume_dict(status='in-use') - snapshot = self._create_snapshot_dict(volume) - self.assertRaises(cinder_exceptions.InvalidVolume, - self._driver.create_snapshot, snapshot) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_delete_snapshot_without_backing(self, vops): - vops.get_backing.return_value = None - - volume = self._create_volume_dict() - snapshot = fake_snapshot.fake_snapshot_obj(self._context, - volume=volume) - self._driver.delete_snapshot(snapshot) - - vops.get_backing.assert_called_once_with(snapshot.volume_name) - vops.get_snapshot.assert_not_called() - vops.delete_snapshot.assert_not_called() - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - def test_delete_snapshot_with_backing(self, in_use, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - volume = self._create_volume_dict(status='deleting') - snapshot = fake_snapshot.fake_snapshot_obj(self._context, - volume=volume) - self._driver.delete_snapshot(snapshot) - - vops.get_backing.assert_called_once_with(snapshot.volume_name) - vops.get_snapshot.assert_called_once_with(backing, snapshot.name) - in_use.assert_called_once_with(snapshot.volume) - vops.delete_snapshot.assert_called_once_with( - backing, snapshot.name) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) - def test_delete_snapshot_when_attached(self, in_use, vops): - volume = self._create_volume_dict(status='in-use') - snapshot = fake_snapshot.fake_snapshot_obj(self._context, - volume=volume) - - self.assertRaises(cinder_exceptions.InvalidSnapshot, - self._driver.delete_snapshot, snapshot) - in_use.assert_called_once_with(snapshot.volume) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_delete_snapshot_without_backend_snapshot(self, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - vops.get_snapshot.return_value = None - - volume = self._create_volume_dict(status='in-use') - snapshot = fake_snapshot.fake_snapshot_obj(self._context, - volume=volume) - self._driver.delete_snapshot(snapshot) - - vops.get_backing.assert_called_once_with(snapshot.volume_name) - vops.get_snapshot.assert_called_once_with(backing, snapshot.name) - vops.delete_snapshot.assert_not_called() - - @ddt.data('vmdk', 'VMDK', None) - def test_validate_disk_format(self, disk_format): - self._driver._validate_disk_format(disk_format) - - def test_validate_disk_format_with_invalid_format(self): - self.assertRaises(cinder_exceptions.ImageUnacceptable, - self._driver._validate_disk_format, - 'img') - - def _create_image_meta(self, - _id=IMAGE_ID, - name=IMAGE_NAME, - disk_format='vmdk', - size=1 * units.Gi, - container_format='bare', - vmware_disktype='streamOptimized', - vmware_adaptertype='lsiLogic', - is_public=True): - return {'id': _id, - 'name': name, - 'disk_format': disk_format, - 'size': size, - 'container_format': container_format, - 'properties': {'vmware_disktype': vmware_disktype, - 'vmware_adaptertype': vmware_adaptertype, - }, - 'is_public': is_public, - } - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_validate_disk_format') - def test_copy_image_to_volume_with_invalid_container(self, - validate_disk_format): - image_service = mock.Mock() - image_meta = self._create_image_meta(container_format='ami') - image_service.show.return_value = image_meta - - context = mock.sentinel.context - volume = self._create_volume_dict() - image_id = mock.sentinel.image_id - - self.assertRaises( - cinder_exceptions.ImageUnacceptable, - self._driver.copy_image_to_volume, context, volume, image_service, - image_id) - validate_disk_format.assert_called_once_with(image_meta['disk_format']) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_validate_disk_format') - @mock.patch('cinder.volume.drivers.vmware.volumeops.' - 'VirtualDiskAdapterType.validate') - @mock.patch('cinder.volume.drivers.vmware.vmdk.ImageDiskType.' - 'validate') - @mock.patch.object(VMDK_DRIVER, - '_create_volume_from_non_stream_optimized_image') - @mock.patch.object(VMDK_DRIVER, - '_fetch_stream_optimized_image') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - def _test_copy_image_to_volume(self, - extend_backing, - vops, - fetch_stream_optimized_image, - create_volume_from_non_stream_opt_image, - validate_image_disk_type, - validate_image_adapter_type, - validate_disk_format, - vmware_disk_type='streamOptimized', - backing_disk_size=VOL_SIZE, - call_extend_backing=False, - container_format='bare'): - - image_service = mock.Mock() - image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type, - container_format=container_format) - image_service.show.return_value = image_meta - - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - vops.get_disk_size.return_value = backing_disk_size * units.Gi - - context = mock.sentinel.context - volume = self._create_volume_dict() - image_id = mock.sentinel.image_id - self._driver.copy_image_to_volume( - context, volume, image_service, image_id) - - validate_disk_format.assert_called_once_with(image_meta['disk_format']) - validate_image_disk_type.assert_called_once_with( - image_meta['properties']['vmware_disktype']) - validate_image_adapter_type.assert_called_once_with( - image_meta['properties']['vmware_adaptertype']) - - if vmware_disk_type == 'streamOptimized': - fetch_stream_optimized_image.assert_called_once_with( - context, volume, image_service, image_id, image_meta['size'], - image_meta['properties']['vmware_adaptertype']) - else: - create_volume_from_non_stream_opt_image.assert_called_once_with( - context, volume, image_service, image_id, image_meta['size'], - image_meta['properties']['vmware_adaptertype'], - image_meta['properties']['vmware_disktype']) - - vops.get_disk_size.assert_called_once_with(backing) - if call_extend_backing: - extend_backing.assert_called_once_with(backing, volume['size']) - else: - self.assertFalse(extend_backing.called) - - @ddt.data('sparse', 'preallocated', 'streamOptimized') - def test_copy_image_to_volume(self, vmware_disk_type): - self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type) - - @ddt.data('sparse', 'preallocated', 'streamOptimized') - def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type): - self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type, - backing_disk_size=1, - call_extend_backing=True) - - def test_copy_image_to_volume_with_ova_container(self): - self._test_copy_image_to_volume(container_format='ova') - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch.object(VMDK_DRIVER, '_check_disk_conversion') - @mock.patch('oslo_utils.uuidutils.generate_uuid') - @mock.patch.object(VMDK_DRIVER, '_create_backing') - @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image') - @mock.patch.object(VMDK_DRIVER, - '_create_virtual_disk_from_preallocated_image') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') - @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') - @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') - def _test_create_volume_from_non_stream_optimized_image( - self, - delete_tmp_backing, - select_ds_for_volume, - get_storage_profile_id, - create_disk_from_preallocated_image, - create_disk_from_sparse_image, - vops, - get_ds_name_folder_path, - create_backing, - generate_uuid, - check_disk_conversion, - get_disk_type, - image_disk_type='sparse', - disk_conversion=False): - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - check_disk_conversion.return_value = disk_conversion - - volume = self._create_volume_dict() - if disk_conversion: - disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e" - generate_uuid.return_value = disk_name - else: - disk_name = volume['name'] - - backing = mock.sentinel.backing - create_backing.return_value = backing - - ds_name = mock.sentinel.ds_name - folder_path = mock.sentinel.folder_path - get_ds_name_folder_path.return_value = (ds_name, folder_path) - - host = mock.sentinel.host - dc_ref = mock.sentinel.dc_ref - vops.get_host.return_value = host - vops.get_dc.return_value = dc_ref - - vmdk_path = mock.Mock(spec=volumeops.FlatExtentVirtualDiskPath) - create_disk_from_sparse_image.return_value = vmdk_path - create_disk_from_preallocated_image.return_value = vmdk_path - - profile_id = mock.sentinel.profile_id - get_storage_profile_id.return_value = profile_id - - if disk_conversion: - rp = mock.sentinel.rp - folder = mock.sentinel.folder - datastore = mock.sentinel.datastore - summary = mock.Mock(datastore=datastore) - select_ds_for_volume.return_value = (host, rp, folder, summary) - - clone = mock.sentinel.clone - vops.clone_backing.return_value = clone - - context = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = units.Gi - adapter_type = mock.sentinel.adapter_type - - self._driver._create_volume_from_non_stream_optimized_image( - context, volume, image_service, image_id, image_size_in_bytes, - adapter_type, image_disk_type) - - check_disk_conversion.assert_called_once_with(image_disk_type, - mock.sentinel.disk_type) - if disk_conversion: - create_backing.assert_called_once_with( - volume, - create_params={vmdk.CREATE_PARAM_DISK_LESS: True, - vmdk.CREATE_PARAM_BACKING_NAME: disk_name, - vmdk.CREATE_PARAM_TEMP_BACKING: True}) - else: - create_backing.assert_called_once_with( - volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) - - if image_disk_type == 'sparse': - create_disk_from_sparse_image.assert_called_once_with( - context, image_service, image_id, image_size_in_bytes, - dc_ref, ds_name, folder_path, disk_name) - else: - create_disk_from_preallocated_image.assert_called_once_with( - context, image_service, image_id, image_size_in_bytes, - dc_ref, ds_name, folder_path, disk_name, adapter_type) - - get_storage_profile_id.assert_called_once_with(volume) - vops.attach_disk_to_backing.assert_called_once_with( - backing, image_size_in_bytes / units.Ki, disk_type, - adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path()) - - if disk_conversion: - select_ds_for_volume.assert_called_once_with(volume) - extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], - volumeops.BACKING_UUID_KEY: volume['id']} - vops.clone_backing.assert_called_once_with( - volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, - datastore, disk_type=disk_type, host=host, resource_pool=rp, - extra_config=extra_config, folder=folder) - delete_tmp_backing.assert_called_once_with(backing) - vops.update_backing_disk_uuid(clone, volume['id']) - else: - vops.update_backing_disk_uuid(backing, volume['id']) - - @ddt.data('sparse', 'preallocated') - def test_create_volume_from_non_stream_optimized_image(self, - image_disk_type): - self._test_create_volume_from_non_stream_optimized_image( - image_disk_type=image_disk_type) - - @ddt.data('sparse', 'preallocated') - def test_create_volume_from_non_stream_opt_image_with_disk_conversion( - self, image_disk_type): - self._test_create_volume_from_non_stream_optimized_image( - image_disk_type=image_disk_type, disk_conversion=True) - - @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') - @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') - @mock.patch('oslo_utils.uuidutils.generate_uuid') - @mock.patch( - 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') - @mock.patch.object(VMDK_DRIVER, '_copy_image') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_create_virtual_disk_from_preallocated_image( - self, vops, copy_image, flat_extent_path, generate_uuid, - get_temp_image_folder, copy_temp_virtual_disk): - dc_ref = mock.Mock(value=mock.sentinel.dc_ref) - ds_name = mock.sentinel.ds_name - folder_path = mock.sentinel.folder_path - get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) - - uuid = mock.sentinel.uuid - generate_uuid.return_value = uuid - path = mock.Mock() - dest_path = mock.Mock() - flat_extent_path.side_effect = [path, dest_path] - - context = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = 2 * units.Gi - dest_dc_ref = mock.sentinel.dest_dc_ref - dest_ds_name = mock.sentinel.dest_ds_name - dest_folder_path = mock.sentinel.dest_folder_path - dest_disk_name = mock.sentinel.dest_disk_name - adapter_type = mock.sentinel.adapter_type - ret = self._driver._create_virtual_disk_from_preallocated_image( - context, image_service, image_id, image_size_in_bytes, dest_dc_ref, - dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) - - exp_flat_extent_path_calls = [ - mock.call(ds_name, folder_path, uuid), - mock.call(dest_ds_name, dest_folder_path, dest_disk_name)] - self.assertEqual(exp_flat_extent_path_calls, - flat_extent_path.call_args_list) - create_descriptor = vops.create_flat_extent_virtual_disk_descriptor - create_descriptor.assert_called_once_with( - dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, - vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) - copy_image.assert_called_once_with( - context, dc_ref, image_service, image_id, image_size_in_bytes, - ds_name, path.get_flat_extent_file_path()) - copy_temp_virtual_disk.assert_called_once_with(dc_ref, path, - dest_dc_ref, dest_path) - self.assertEqual(dest_path, ret) - - @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') - @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') - @mock.patch( - 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') - @mock.patch.object(VMDK_DRIVER, '_copy_image') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy( - self, vops, copy_image, flat_extent_path, get_temp_image_folder, - copy_temp_virtual_disk): - dc_ref = mock.Mock(value=mock.sentinel.dc_ref) - ds_name = mock.sentinel.ds_name - folder_path = mock.sentinel.folder_path - get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) - - path = mock.Mock() - flat_extent_path.return_value = path - - context = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = 2 * units.Gi - dest_dc_ref = mock.Mock(value=mock.sentinel.dc_ref) - dest_ds_name = ds_name - dest_folder_path = mock.sentinel.dest_folder_path - dest_disk_name = mock.sentinel.dest_disk_name - adapter_type = mock.sentinel.adapter_type - ret = self._driver._create_virtual_disk_from_preallocated_image( - context, image_service, image_id, image_size_in_bytes, dest_dc_ref, - dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) - - flat_extent_path.assert_called_once_with( - dest_ds_name, dest_folder_path, dest_disk_name) - create_descriptor = vops.create_flat_extent_virtual_disk_descriptor - create_descriptor.assert_called_once_with( - dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, - vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) - copy_image.assert_called_once_with( - context, dc_ref, image_service, image_id, image_size_in_bytes, - ds_name, path.get_flat_extent_file_path()) - self.assertFalse(copy_temp_virtual_disk.called) - self.assertEqual(path, ret) - - @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') - @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') - @mock.patch('oslo_utils.uuidutils.generate_uuid') - @mock.patch( - 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') - @mock.patch.object(VMDK_DRIVER, '_copy_image') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_create_virtual_disk_from_preallocated_image_with_copy_error( - self, vops, copy_image, flat_extent_path, generate_uuid, - get_temp_image_folder, copy_temp_virtual_disk): - dc_ref = mock.Mock(value=mock.sentinel.dc_ref) - ds_name = mock.sentinel.ds_name - folder_path = mock.sentinel.folder_path - get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) - - uuid = mock.sentinel.uuid - generate_uuid.return_value = uuid - path = mock.Mock() - dest_path = mock.Mock() - flat_extent_path.side_effect = [path, dest_path] - - copy_image.side_effect = exceptions.VimException("error") - - context = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = 2 * units.Gi - dest_dc_ref = mock.sentinel.dest_dc_ref - dest_ds_name = mock.sentinel.dest_ds_name - dest_folder_path = mock.sentinel.dest_folder_path - dest_disk_name = mock.sentinel.dest_disk_name - adapter_type = mock.sentinel.adapter_type - self.assertRaises( - exceptions.VimException, - self._driver._create_virtual_disk_from_preallocated_image, - context, image_service, image_id, image_size_in_bytes, dest_dc_ref, - dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) - - vops.delete_file.assert_called_once_with( - path.get_descriptor_ds_file_path(), dc_ref) - self.assertFalse(copy_temp_virtual_disk.called) - - @mock.patch('oslo_utils.uuidutils.generate_uuid') - @mock.patch( - 'cinder.volume.drivers.vmware.volumeops.' - 'MonolithicSparseVirtualDiskPath') - @mock.patch( - 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') - @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') - @mock.patch.object(VMDK_DRIVER, '_copy_image') - def test_create_virtual_disk_from_sparse_image( - self, copy_image, copy_temp_virtual_disk, flat_extent_path, - sparse_path, generate_uuid): - uuid = mock.sentinel.uuid - generate_uuid.return_value = uuid - - src_path = mock.Mock() - sparse_path.return_value = src_path - - dest_path = mock.Mock() - flat_extent_path.return_value = dest_path - - context = mock.sentinel.context - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = 2 * units.Gi - dc_ref = mock.sentinel.dc_ref - ds_name = mock.sentinel.ds_name - folder_path = mock.sentinel.folder_path - disk_name = mock.sentinel.disk_name - - ret = self._driver._create_virtual_disk_from_sparse_image( - context, image_service, image_id, image_size_in_bytes, dc_ref, - ds_name, folder_path, disk_name) - - sparse_path.assert_called_once_with(ds_name, folder_path, uuid) - copy_image.assert_called_once_with( - context, dc_ref, image_service, image_id, image_size_in_bytes, - ds_name, src_path.get_descriptor_file_path()) - flat_extent_path.assert_called_once_with( - ds_name, folder_path, disk_name) - copy_temp_virtual_disk.assert_called_once_with( - dc_ref, src_path, dc_ref, dest_path) - self.assertEqual(dest_path, ret) - - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_get_temp_image_folder(self, vops, select_datastore): - host = mock.sentinel.host - resource_pool = mock.sentinel.rp - summary = mock.Mock() - ds_name = mock.sentinel.ds_name - summary.name = ds_name - select_datastore.return_value = (host, resource_pool, summary) - - dc = mock.sentinel.dc - vops.get_dc.return_value = dc - - image_size = 2 * units.Gi - ret = self._driver._get_temp_image_folder(image_size) - - self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH), - ret) - exp_req = { - hub.DatastoreSelector.SIZE_BYTES: image_size, - hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE: - {hub.DatastoreType.VMFS, hub.DatastoreType.NFS}} - select_datastore.assert_called_once_with(exp_req) - vops.create_datastore_folder.assert_called_once_with( - ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc) - - @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_extra_config') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch.object(image_transfer, 'download_stream_optimized_image') - def _test_copy_image_to_volume_stream_optimized(self, - download_image, - session, - vops, - get_extra_config, - get_disk_type, - get_profile_id, - select_ds_for_volume, - download_error=False): - host = mock.sentinel.host - rp = mock.sentinel.rp - folder = mock.sentinel.folder - # NOTE(mriedem): The summary.name gets logged so it has to be a string - summary = mock.Mock(name=six.text_type(mock.sentinel.ds_name)) - select_ds_for_volume.return_value = (host, rp, folder, summary) - - profile_id = mock.sentinel.profile_id - get_profile_id.return_value = profile_id - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - - extra_config = mock.sentinel.extra_config - get_extra_config.return_value = extra_config - - vm_create_spec = mock.sentinel.vm_create_spec - vops.get_create_spec.return_value = vm_create_spec - - import_spec = mock.Mock() - session.vim.client.factory.create.return_value = import_spec - - backing = mock.sentinel.backing - if download_error: - download_image.side_effect = exceptions.VimException - vops.get_backing.return_value = backing - else: - download_image.return_value = backing - - context = mock.sentinel.context - volume = self._create_volume_dict(size=3) - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size = 2 * units.Gi - adapter_type = mock.sentinel.adapter_type - - if download_error: - self.assertRaises( - exceptions.VimException, - self._driver._fetch_stream_optimized_image, - context, volume, image_service, image_id, - image_size, adapter_type) - else: - self._driver._fetch_stream_optimized_image( - context, volume, image_service, image_id, image_size, - adapter_type) - - select_ds_for_volume.assert_called_once_with(volume) - vops.get_create_spec.assert_called_once_with( - volume['name'], 0, disk_type, summary.name, profile_id=profile_id, - adapter_type=adapter_type, extra_config=extra_config) - self.assertEqual(vm_create_spec, import_spec.configSpec) - download_image.assert_called_with( - context, - self._config.vmware_image_transfer_timeout_secs, - image_service, - image_id, - session=session, - host=self._config.vmware_host_ip, - port=self._config.vmware_host_port, - resource_pool=rp, - vm_folder=folder, - vm_import_spec=import_spec, - image_size=image_size) - if download_error: - self.assertFalse(vops.update_backing_disk_uuid.called) - vops.delete_backing.assert_called_once_with(backing) - else: - vops.update_backing_disk_uuid.assert_called_once_with( - backing, volume['id']) - - def test_copy_image_to_volume_stream_optimized(self): - self._test_copy_image_to_volume_stream_optimized() - - def test_copy_image_to_volume_stream_optimized_with_download_error(self): - self._test_copy_image_to_volume_stream_optimized(download_error=True) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) - def test_copy_volume_to_image_when_attached(self, in_use): - volume = self._create_volume_dict( - status="uploading", - attachment=[mock.sentinel.attachment_1]) - self.assertRaises( - cinder_exceptions.InvalidVolume, - self._driver.copy_volume_to_image, - mock.sentinel.context, - volume, - mock.sentinel.image_service, - mock.sentinel.image_meta) - in_use.assert_called_once_with(volume) - - @mock.patch.object(VMDK_DRIVER, '_validate_disk_format') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_create_backing') - @mock.patch('oslo_vmware.image_transfer.upload_image') - @mock.patch.object(VMDK_DRIVER, 'session') - def _test_copy_volume_to_image( - self, session, upload_image, create_backing, vops, - validate_disk_format, backing_exists=True): - backing = mock.sentinel.backing - if backing_exists: - vops.get_backing.return_value = backing - else: - vops.get_backing.return_value = None - create_backing.return_value = backing - - vmdk_file_path = mock.sentinel.vmdk_file_path - vops.get_vmdk_path.return_value = vmdk_file_path - - context = mock.sentinel.context - volume = self._create_volume_dict() - image_service = mock.sentinel.image_service - image_meta = self._create_image_meta() - self._driver.copy_volume_to_image( - context, volume, image_service, image_meta) - - validate_disk_format.assert_called_once_with(image_meta['disk_format']) - vops.get_backing.assert_called_once_with(volume['name']) - if not backing_exists: - create_backing.assert_called_once_with(volume) - vops.get_vmdk_path.assert_called_once_with(backing) - upload_image.assert_called_once_with( - context, - self._config.vmware_image_transfer_timeout_secs, - image_service, - image_meta['id'], - volume['project_id'], - session=session, - host=self._config.vmware_host_ip, - port=self._config.vmware_host_port, - vm=backing, - vmdk_file_path=vmdk_file_path, - vmdk_size=volume['size'] * units.Gi, - image_name=image_meta['name'], - image_version=1) - - def test_copy_volume_to_image(self): - self._test_copy_volume_to_image() - - def test_copy_volume_to_image_with_no_backing(self): - self._test_copy_volume_to_image(backing_exists=False) - - def test_in_use(self): - volume = self._create_volume_dict( - attachment=[mock.sentinel.attachment_1]) - self.assertTrue(self._driver._in_use(volume)) - - def test_in_use_with_available_volume(self): - volume = self._create_volume_dict() - self.assertFalse(self._driver._in_use(volume)) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) - def test_retype_with_in_use_volume(self, in_use): - context = mock.sentinel.context - volume = self._create_volume_dict( - status='retyping', attachment=[mock.sentinel.attachment_1]) - new_type = mock.sentinel.new_type - diff = mock.sentinel.diff - host = mock.sentinel.host - self.assertFalse(self._driver.retype(context, volume, new_type, diff, - host)) - in_use.assert_called_once_with(volume) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_retype_with_no_volume_backing(self, vops, in_use): - vops.get_backing.return_value = None - - context = mock.sentinel.context - volume = self._create_volume_dict(status='retyping') - new_type = mock.sentinel.new_type - diff = mock.sentinel.diff - host = mock.sentinel.host - self.assertTrue(self._driver.retype(context, volume, new_type, diff, - host)) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_extra_spec_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - def test_retype_with_diff_profile_and_ds_compliance( - self, select_datastore, ds_sel, get_extra_spec_storage_profile, - get_storage_profile, get_extra_spec_disk_type, get_disk_type, - vops, in_use): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - datastore = mock.Mock(value='ds1') - vops.get_datastore.return_value = datastore - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - get_extra_spec_disk_type.return_value = disk_type - - self._driver._storage_policy_enabled = True - profile = 'gold' - get_storage_profile.return_value = profile - new_profile = 'silver' - get_extra_spec_storage_profile.return_value = new_profile - - ds_sel.is_datastore_compliant.return_value = True - - new_profile_id = mock.sentinel.new_profile_id - ds_sel.get_profile_id.return_value = new_profile_id - - context = mock.sentinel.context - volume = self._create_volume_dict(status='retyping') - new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} - diff = mock.sentinel.diff - host = mock.sentinel.host - self.assertTrue(self._driver.retype(context, volume, new_type, diff, - host)) - ds_sel.is_datastore_compliant.assert_called_once_with(datastore, - new_profile) - select_datastore.assert_not_called() - vops.change_backing_profile.assert_called_once_with(backing, - new_profile_id) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_extra_spec_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - def test_retype_with_diff_profile_and_ds_sel_no_candidate( - self, select_datastore, ds_sel, get_extra_spec_storage_profile, - get_storage_profile, get_extra_spec_disk_type, get_disk_type, - vops, in_use): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - datastore = mock.Mock(value='ds1') - vops.get_datastore.return_value = datastore - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - get_extra_spec_disk_type.return_value = disk_type - - vops.snapshot_exists.return_value = False - - self._driver._storage_policy_enabled = True - profile = 'gold' - get_storage_profile.return_value = profile - new_profile = 'silver' - get_extra_spec_storage_profile.return_value = new_profile - - ds_sel.is_datastore_compliant.return_value = False - select_datastore.side_effect = ( - vmdk_exceptions.NoValidDatastoreException) - - context = mock.sentinel.context - volume = self._create_volume_dict(status='retyping') - new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} - diff = mock.sentinel.diff - host = mock.sentinel.host - self.assertFalse(self._driver.retype(context, volume, new_type, diff, - host)) - ds_sel.is_datastore_compliant.assert_called_once_with(datastore, - new_profile) - select_datastore.assert_called_once_with( - {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, - hub.DatastoreSelector.PROFILE_NAME: new_profile}) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_extra_spec_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - @mock.patch.object(VMDK_DRIVER, '_get_dc') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - def test_retype_with_diff_extra_spec_and_vol_snapshot( - self, - get_volume_group_folder, - get_dc, - select_datastore, - ds_sel, get_extra_spec_storage_profile, - get_storage_profile, - get_extra_spec_disk_type, - get_disk_type, - vops, - in_use): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - datastore = mock.Mock(value='ds1') - vops.get_datastore.return_value = datastore - - get_disk_type.return_value = 'thin' - new_disk_type = 'thick' - get_extra_spec_disk_type.return_value = new_disk_type - - vops.snapshot_exists.return_value = True - - self._driver._storage_policy_enabled = True - profile = 'gold' - get_storage_profile.return_value = profile - new_profile = 'silver' - get_extra_spec_storage_profile.return_value = new_profile - - ds_sel.is_datastore_compliant.return_value = False - host = mock.sentinel.host - rp = mock.sentinel.rp - new_datastore = mock.Mock(value='ds2') - summary = mock.Mock(datastore=new_datastore) - select_datastore.return_value = (host, rp, summary) - - dc = mock.sentinel.dc - get_dc.return_value = dc - - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - new_profile_id = mock.sentinel.new_profile_id - ds_sel.get_profile_id.return_value = new_profile_id - - context = mock.sentinel.context - volume = self._create_volume_dict(status='retyping') - new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} - diff = mock.sentinel.diff - host = mock.sentinel.host - self.assertTrue(self._driver.retype(context, volume, new_type, diff, - host)) - ds_sel.is_datastore_compliant.assert_called_once_with(datastore, - new_profile) - select_datastore.assert_called_once_with( - {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, - hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: ['ds1'], - hub.DatastoreSelector.PROFILE_NAME: new_profile}) - get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with(dc, - volume['project_id']) - vops.relocate_backing.assert_called_once_with( - backing, new_datastore, rp, host, new_disk_type) - vops.move_backing_to_folder.assert_called_once_with(backing, folder) - vops.change_backing_profile.assert_called_once_with(backing, - new_profile_id) - - @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_extra_spec_disk_type') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - @mock.patch.object(VMDK_DRIVER, '_get_dc') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - @mock.patch('oslo_utils.uuidutils.generate_uuid') - @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') - def _test_retype_with_diff_extra_spec_and_ds_compliance( - self, - delete_temp_backing, - generate_uuid, - get_volume_group_folder, - get_dc, - select_datastore, - ds_sel, - get_extra_spec_storage_profile, - get_storage_profile, - get_extra_spec_disk_type, - get_disk_type, - vops, - in_use, - clone_error=False): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - datastore = mock.Mock(value='ds1') - vops.get_datastore.return_value = datastore - - get_disk_type.return_value = 'thin' - new_disk_type = 'thick' - get_extra_spec_disk_type.return_value = new_disk_type - - vops.snapshot_exists.return_value = False - - self._driver._storage_policy_enabled = True - profile = 'gold' - get_storage_profile.return_value = profile - new_profile = 'silver' - get_extra_spec_storage_profile.return_value = new_profile - - ds_sel.is_datastore_compliant.return_value = True - host = mock.sentinel.host - rp = mock.sentinel.rp - summary = mock.Mock(datastore=datastore) - select_datastore.return_value = (host, rp, summary) - - dc = mock.sentinel.dc - get_dc.return_value = dc - - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - new_profile_id = mock.sentinel.new_profile_id - ds_sel.get_profile_id.return_value = new_profile_id - - uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc' - generate_uuid.return_value = uuid - - if clone_error: - vops.clone_backing.side_effect = exceptions.VimException - else: - new_backing = mock.sentinel.new_backing - vops.clone_backing.return_value = new_backing - - context = mock.sentinel.context - volume = self._create_volume_dict(status='retyping') - new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} - diff = mock.sentinel.diff - host = mock.sentinel.host - if clone_error: - self.assertRaises(exceptions.VimException, self._driver.retype, - context, volume, new_type, diff, host) - else: - self.assertTrue(self._driver.retype(context, volume, new_type, - diff, host)) - ds_sel.is_datastore_compliant.assert_called_once_with(datastore, - new_profile) - select_datastore.assert_called_once_with( - {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, - hub.DatastoreSelector.PROFILE_NAME: new_profile}) - get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with(dc, - volume['project_id']) - vops.clone_backing.assert_called_once_with( - volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, - datastore, disk_type=new_disk_type, host=host, resource_pool=rp, - folder=folder) - if clone_error: - exp_rename_calls = [mock.call(backing, uuid), - mock.call(backing, volume['name'])] - self.assertEqual(exp_rename_calls, - vops.rename_backing.call_args_list) - else: - vops.rename_backing.assert_called_once_with(backing, uuid) - vops.update_backing_uuid.assert_called_once_with( - new_backing, volume['id']) - vops.update_backing_disk_uuid.assert_called_once_with( - new_backing, volume['id']) - delete_temp_backing.assert_called_once_with(backing) - vops.change_backing_profile.assert_called_once_with(new_backing, - new_profile_id) - - def test_retype_with_diff_extra_spec_and_ds_compliance(self): - self._test_retype_with_diff_extra_spec_and_ds_compliance() - - def test_retype_with_diff_extra_spec_ds_compliance_and_clone_error(self): - self._test_retype_with_diff_extra_spec_and_ds_compliance( - clone_error=True) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_extend_backing(self, vops): - vmdk_path = mock.sentinel.vmdk_path - vops.get_vmdk_path.return_value = vmdk_path - dc = mock.sentinel.datacenter - vops.get_dc.return_value = dc - - backing = mock.sentinel.backing - new_size = 1 - self._driver._extend_backing(backing, new_size) - - vops.get_vmdk_path.assert_called_once_with(backing) - vops.get_dc.assert_called_once_with(backing) - vops.extend_virtual_disk.assert_called_once_with(new_size, - vmdk_path, - dc) - - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.vim_util.get_vc_version') - def test_get_vc_version(self, get_vc_version, session): - self._driver.configuration.vmware_host_version = None - - version_str = '6.0.0' - get_vc_version.return_value = version_str - - version = self._driver._get_vc_version() - - self.assertEqual(version_str, version) - get_vc_version.assert_called_once_with(session) - - @mock.patch('oslo_vmware.vim_util.get_vc_version') - def test_get_vc_version_override(self, get_vc_version): - version = self._driver._get_vc_version() - - self.assertEqual( - self._driver.configuration.vmware_host_version, - version) - get_vc_version.assert_not_called() - - @mock.patch('cinder.volume.drivers.vmware.vmdk.LOG') - @ddt.data('5.5', '6.0') - def test_validate_vcenter_version(self, version, log): - # vCenter versions 5.5 and above should pass validation. - self._driver._validate_vcenter_version(version) - # Deprecation warning should be logged for vCenter versions which are - # incompatible with next minimum supported version. - if not versionutils.is_compatible( - self._driver.NEXT_MIN_SUPPORTED_VC_VERSION, version, - same_major=False): - log.warning.assert_called_once() - else: - log.warning.assert_not_called() - - def test_validate_vcenter_version_with_less_than_min_supported_version( - self): - # Validation should fail for vCenter version less than 5.1. - self.assertRaises(exceptions.VMwareDriverException, - self._driver._validate_vcenter_version, - '5.1') - - @mock.patch.object(VMDK_DRIVER, '_validate_params') - @mock.patch.object(VMDK_DRIVER, '_get_vc_version') - @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') - @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') - @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, 'session') - def _test_do_setup( - self, session, vops, ds_sel_cls, vops_cls, get_pbm_wsdl_loc, - validate_vc_version, get_vc_version, validate_params, - enable_pbm=True): - if enable_pbm: - ver_str = '5.5' - pbm_wsdl = mock.sentinel.pbm_wsdl - get_pbm_wsdl_loc.return_value = pbm_wsdl - else: - ver_str = '5.1' - get_vc_version.return_value = ver_str - - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - cluster_refs = {'cls-1': cls_1, 'cls-2': cls_2} - vops.get_cluster_refs.return_value = cluster_refs - - self._driver.do_setup(mock.ANY) - - validate_params.assert_called_once_with() - get_vc_version.assert_called_once_with() - validate_vc_version.assert_called_once_with(ver_str) - if enable_pbm: - get_pbm_wsdl_loc.assert_called_once_with(ver_str) - self.assertEqual(pbm_wsdl, self._driver.pbm_wsdl) - self.assertEqual(enable_pbm, self._driver._storage_policy_enabled) - vops_cls.assert_called_once_with( - session, self._driver.configuration.vmware_max_objects_retrieval) - self.assertEqual(vops_cls.return_value, self._driver._volumeops) - ds_sel_cls.assert_called_once_with( - vops, - session, - self._driver.configuration.vmware_max_objects_retrieval) - self.assertEqual(ds_sel_cls.return_value, self._driver._ds_sel) - vops.get_cluster_refs.assert_called_once_with( - self._driver.configuration.vmware_cluster_name) - self.assertEqual(list(cluster_refs.values()), - list(self._driver._clusters)) - - def test_do_setup(self): - self._test_do_setup() - - def test_do_setup_with_pbm_disabled(self): - self._test_do_setup(enable_pbm=False) - - @mock.patch.object(VMDK_DRIVER, '_validate_params') - @mock.patch.object(VMDK_DRIVER, '_get_vc_version') - @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') - @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') - def test_do_setup_with_invalid_pbm_wsdl( - self, get_pbm_wsdl_loc, validate_vc_version, get_vc_version, - validate_params): - ver_str = '5.5' - get_vc_version.return_value = ver_str - - get_pbm_wsdl_loc.return_value = None - - self.assertRaises(exceptions.VMwareDriverException, - self._driver.do_setup, - mock.ANY) - - validate_params.assert_called_once_with() - get_vc_version.assert_called_once_with() - validate_vc_version.assert_called_once_with(ver_str) - get_pbm_wsdl_loc.assert_called_once_with(ver_str) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_get_dc(self, vops): - dc_1 = mock.sentinel.dc_1 - dc_2 = mock.sentinel.dc_2 - vops.get_dc.side_effect = [dc_1, dc_2] - - # cache miss - rp_1 = mock.Mock(value='rp-1') - rp_2 = mock.Mock(value='rp-2') - self.assertEqual(dc_1, self._driver._get_dc(rp_1)) - self.assertEqual(dc_2, self._driver._get_dc(rp_2)) - self.assertDictEqual({'rp-1': dc_1, 'rp-2': dc_2}, - self._driver._dc_cache) - - # cache hit - self.assertEqual(dc_1, self._driver._get_dc(rp_1)) - self.assertEqual(dc_2, self._driver._get_dc(rp_2)) - - vops.get_dc.assert_has_calls([mock.call(rp_1), mock.call(rp_2)]) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, '_select_datastore') - @mock.patch.object(VMDK_DRIVER, '_get_dc') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - @ddt.data(None, {vmdk.CREATE_PARAM_DISK_SIZE: 2 * VOL_SIZE}) - def test_select_ds_for_volume( - self, create_params, get_volume_group_folder, vops, get_dc, - select_datastore, get_storage_profile): - - profile = mock.sentinel.profile - get_storage_profile.return_value = profile - - host = mock.sentinel.host - rp = mock.sentinel.rp - summary = mock.sentinel.summary - select_datastore.return_value = (host, rp, summary) - - dc = mock.sentinel.dc - get_dc.return_value = dc - - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - vol = self._create_volume_dict() - ret = self._driver._select_ds_for_volume( - vol, host=host, create_params=create_params) - - self.assertEqual((host, rp, folder, summary), ret) - if create_params: - exp_size = create_params[vmdk.CREATE_PARAM_DISK_SIZE] * units.Gi - else: - exp_size = vol['size'] * units.Gi - exp_req = {hub.DatastoreSelector.SIZE_BYTES: exp_size, - hub.DatastoreSelector.PROFILE_NAME: profile} - select_datastore.assert_called_once_with(exp_req, host) - get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with(dc, vol['project_id']) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def _test_get_connection_info(self, vops, vmdk_connector=False): - volume = self._create_volume_obj() - backing = mock.Mock(value='ref-1') - if vmdk_connector: - vmdk_path = mock.sentinel.vmdk_path - vops.get_vmdk_path.return_value = vmdk_path - - datastore = mock.Mock(value='ds-1') - vops.get_datastore.return_value = datastore - - datacenter = mock.Mock(value='dc-1') - vops.get_dc.return_value = datacenter - - connector = {'platform': mock.sentinel.platform, - 'os_type': mock.sentinel.os_type} - else: - connector = {'instance': 'vm-1'} - ret = self._driver._get_connection_info(volume, backing, connector) - - self.assertEqual('vmdk', ret['driver_volume_type']) - self.assertEqual('ref-1', ret['data']['volume']) - self.assertEqual(volume.id, ret['data']['volume_id']) - self.assertEqual(volume.name, ret['data']['name']) - - if vmdk_connector: - self.assertEqual(volume.size * units.Gi, ret['data']['vmdk_size']) - self.assertEqual(vmdk_path, ret['data']['vmdk_path']) - self.assertEqual('ds-1', ret['data']['datastore']) - self.assertEqual('dc-1', ret['data']['datacenter']) - - config = self._driver.configuration - exp_config = { - 'vmware_host_ip': config.vmware_host_ip, - 'vmware_host_port': config.vmware_host_port, - 'vmware_host_username': config.vmware_host_username, - 'vmware_host_password': config.vmware_host_password, - 'vmware_api_retry_count': config.vmware_api_retry_count, - 'vmware_task_poll_interval': config.vmware_task_poll_interval, - 'vmware_ca_file': config.vmware_ca_file, - 'vmware_insecure': config.vmware_insecure, - 'vmware_tmp_dir': config.vmware_tmp_dir, - 'vmware_image_transfer_timeout_secs': - config.vmware_image_transfer_timeout_secs, - } - self.assertEqual(exp_config, ret['data']['config']) - - def test_get_connection_info(self): - self._test_get_connection_info() - - def test_get_connection_info_vmdk_connector(self): - self._test_get_connection_info(vmdk_connector=True) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch('oslo_vmware.vim_util.get_moref') - @mock.patch.object(VMDK_DRIVER, '_create_backing') - @mock.patch.object(VMDK_DRIVER, '_relocate_backing') - @mock.patch.object(VMDK_DRIVER, '_get_connection_info') - def _test_initialize_connection( - self, get_connection_info, relocate_backing, create_backing, - get_moref, vops, backing_exists=True, instance_exists=True): - - backing_val = mock.sentinel.backing_val - backing = mock.Mock(value=backing_val) - if backing_exists: - vops.get_backing.return_value = backing - else: - vops.get_backing.return_value = None - create_backing.return_value = backing - - if instance_exists: - instance_val = mock.sentinel.instance_val - connector = {'instance': instance_val} - - instance_moref = mock.sentinel.instance_moref - get_moref.return_value = instance_moref - - host = mock.sentinel.host - vops.get_host.return_value = host - else: - connector = {} - - conn_info = mock.sentinel.conn_info - get_connection_info.return_value = conn_info - - volume = self._create_volume_obj() - ret = self._driver.initialize_connection(volume, connector) - - self.assertEqual(conn_info, ret) - if instance_exists: - vops.get_host.assert_called_once_with(instance_moref) - if backing_exists: - relocate_backing.assert_called_once_with(volume, backing, host) - create_backing.assert_not_called() - else: - create_backing.assert_called_once_with(volume, host) - relocate_backing.assert_not_called() - elif not backing_exists: - create_backing.assert_called_once_with(volume) - relocate_backing.assert_not_called() - else: - create_backing.assert_not_called() - relocate_backing.assert_not_called() - get_connection_info.assert_called_once_with(volume, backing, connector) - - def test_initialize_connection_with_instance_and_backing(self): - self._test_initialize_connection() - - def test_initialize_connection_with_instance_and_no_backing(self): - self._test_initialize_connection(backing_exists=False) - - def test_initialize_connection_with_no_instance_and_no_backing(self): - self._test_initialize_connection( - backing_exists=False, instance_exists=False) - - def test_initialize_connection_with_no_instance_and_backing(self): - self._test_initialize_connection(instance_exists=False) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_get_volume_group_folder(self, vops): - folder = mock.sentinel.folder - vops.create_vm_inventory_folder.return_value = folder - - datacenter = mock.sentinel.dc - project_id = '63c19a12292549818c09946a5e59ddaf' - self.assertEqual(folder, - self._driver._get_volume_group_folder(datacenter, - project_id)) - project_folder_name = 'Project (%s)' % project_id - vops.create_vm_inventory_folder.assert_called_once_with( - datacenter, ['OpenStack', project_folder_name, self.VOLUME_FOLDER]) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.' - '_get_volume_type_extra_spec') - @ddt.data('full', 'linked') - def test_get_clone_type(self, clone_type, get_volume_type_extra_spec): - get_volume_type_extra_spec.return_value = clone_type - - volume = self._create_volume_dict() - self.assertEqual(clone_type, self._driver._get_clone_type(volume)) - get_volume_type_extra_spec.assert_called_once_with( - volume['volume_type_id'], 'clone_type', - default_value=volumeops.FULL_CLONE_TYPE) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.' - '_get_volume_type_extra_spec') - def test_get_clone_type_invalid( - self, get_volume_type_extra_spec): - get_volume_type_extra_spec.return_value = 'foo' - - volume = self._create_volume_dict() - self.assertRaises( - cinder_exceptions.Invalid, self._driver._get_clone_type, volume) - get_volume_type_extra_spec.assert_called_once_with( - volume['volume_type_id'], 'clone_type', - default_value=volumeops.FULL_CLONE_TYPE) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - def _test_clone_backing( - self, extend_backing, select_ds_for_volume, vops, - clone_type=volumeops.FULL_CLONE_TYPE, extend_needed=False, - vc60=False): - host = mock.sentinel.host - rp = mock.sentinel.rp - folder = mock.sentinel.folder - datastore = mock.sentinel.datastore - summary = mock.Mock(datastore=datastore) - select_ds_for_volume.return_value = (host, rp, folder, summary) - - clone = mock.sentinel.clone - vops.clone_backing.return_value = clone - - if vc60: - self._driver._vc_version = '6.0' - else: - self._driver._vc_version = '5.5' - - src_vsize = 1 - if extend_needed: - size = 2 - else: - size = 1 - volume = self._create_volume_obj(size=size) - backing = mock.sentinel.backing - snapshot = mock.sentinel.snapshot - self._driver._clone_backing( - volume, backing, snapshot, clone_type, src_vsize) - - extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], - volumeops.BACKING_UUID_KEY: volume['id']} - if volume.size > src_vsize or clone_type == volumeops.FULL_CLONE_TYPE: - vops.clone_backing.assert_called_once_with( - volume.name, - backing, - snapshot, - volumeops.FULL_CLONE_TYPE, - datastore, - host=host, - resource_pool=rp, - extra_config=extra_config, - folder=folder) - vops.update_backing_disk_uuid.assert_called_once_with(clone, - volume.id) - else: - vops.clone_backing.assert_called_once_with( - volume.name, - backing, - snapshot, - volumeops.LINKED_CLONE_TYPE, - None, - host=None, - resource_pool=None, - extra_config=extra_config, - folder=None) - if not vc60: - vops.update_backing_disk_uuid.assert_called_once_with( - clone, volume.id) - else: - vops.update_backing_disk_uuid.assert_not_called() - - if volume.size > src_vsize: - extend_backing.assert_called_once_with(clone, volume.size) - else: - extend_backing.assert_not_called() - - @ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE) - def test_clone_backing(self, clone_type): - self._test_clone_backing(clone_type=clone_type) - - @ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE) - def test_clone_backing_with_extend(self, clone_type): - self._test_clone_backing(clone_type=clone_type, extend_needed=True) - - def test_clone_backing_linked_vc_60(self): - self._test_clone_backing( - clone_type=volumeops.LINKED_CLONE_TYPE, vc60=True) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - def test_create_volume_from_snapshot_without_backing(self, mock_vops): - """Test create_volume_from_snapshot without a backing.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'name': 'mock_vol'} - snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'} - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = None - - # invoke the create_volume_from_snapshot api - driver.create_volume_from_snapshot(volume, snapshot) - - # verify calls - driver._verify_volume_creation.assert_called_once_with(volume) - mock_vops.get_backing.assert_called_once_with('mock_vol') - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - def test_create_volume_from_snap_without_backing_snap(self, mock_vops): - """Test create_volume_from_snapshot without a backing snapshot.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'volume_type_id': None, 'name': 'mock_vol'} - snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'} - backing = mock.sentinel.backing - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = backing - mock_vops.get_snapshot.return_value = None - - # invoke the create_volume_from_snapshot api - driver.create_volume_from_snapshot(volume, snapshot) - - # verify calls - driver._verify_volume_creation.assert_called_once_with(volume) - mock_vops.get_backing.assert_called_once_with('mock_vol') - mock_vops.get_snapshot.assert_called_once_with(backing, - 'mock_snap') - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - def test_create_volume_from_snapshot(self, mock_vops): - """Test create_volume_from_snapshot.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'volume_type_id': None, 'name': 'mock_vol'} - snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap', - 'volume_size': 2} - backing = mock.sentinel.backing - snap_moref = mock.sentinel.snap_moref - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = backing - mock_vops.get_snapshot.return_value = snap_moref - driver._clone_backing = mock.MagicMock() - - # invoke the create_volume_from_snapshot api - driver.create_volume_from_snapshot(volume, snapshot) - - # verify calls - driver._verify_volume_creation.assert_called_once_with(volume) - mock_vops.get_backing.assert_called_once_with('mock_vol') - mock_vops.get_snapshot.assert_called_once_with(backing, - 'mock_snap') - default_clone_type = volumeops.FULL_CLONE_TYPE - driver._clone_backing.assert_called_once_with(volume, - backing, - snap_moref, - default_clone_type, - snapshot['volume_size']) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - def test_create_cloned_volume_without_backing(self, mock_vops): - """Test create_cloned_volume without a backing.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'name': 'mock_vol'} - src_vref = {'name': 'src_snapshot_name'} - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = None - - # invoke the create_volume_from_snapshot api - driver.create_cloned_volume(volume, src_vref) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - def test_create_cloned_volume_with_backing(self, mock_vops): - """Test create_cloned_volume with clone type - full.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'volume_type_id': None, 'name': 'mock_vol'} - src_vref = {'name': 'src_snapshot_name', 'size': 1} - backing = mock.sentinel.backing - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = backing - default_clone_type = volumeops.FULL_CLONE_TYPE - driver._clone_backing = mock.MagicMock() - - # invoke the create_volume_from_snapshot api - driver.create_cloned_volume(volume, src_vref) - - # verify calls - driver._verify_volume_creation.assert_called_once_with(volume) - mock_vops.get_backing.assert_called_once_with('src_snapshot_name') - driver._clone_backing.assert_called_once_with(volume, - backing, - None, - default_clone_type, - src_vref['size']) - - @mock.patch.object(VMDK_DRIVER, '_verify_volume_creation') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_clone_type') - @mock.patch.object(VMDK_DRIVER, '_clone_backing') - def test_create_linked_cloned_volume_with_backing( - self, clone_backing, get_clone_type, vops, verify_volume_creation): - - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - get_clone_type.return_value = volumeops.LINKED_CLONE_TYPE - - temp_snapshot = mock.sentinel.temp_snapshot - vops.create_snapshot.return_value = temp_snapshot - - volume = self._create_volume_dict() - src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) - self._driver.create_cloned_volume(volume, src_vref) - - verify_volume_creation.assert_called_once_with(volume) - vops.get_backing.assert_called_once_with(src_vref['name']) - get_clone_type.assert_called_once_with(volume) - temp_snap_name = 'temp-snapshot-%s' % volume['id'] - vops.create_snapshot.assert_called_once_with( - backing, temp_snap_name, None) - self._driver._clone_backing.assert_called_once_with( - volume, backing, temp_snapshot, volumeops.LINKED_CLONE_TYPE, - src_vref['size']) - vops.delete_snapshot.assert_called_once_with(backing, temp_snap_name) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - 'volumeops', new_callable=mock.PropertyMock) - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_clone_type') - def test_create_linked_cloned_volume_when_attached(self, get_clone_type, - mock_vops): - """Test create_cloned_volume linked clone when volume is attached.""" - mock_vops = mock_vops.return_value - driver = self._driver - volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'} - src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'} - backing = mock.sentinel.backing - driver._verify_volume_creation = mock.MagicMock() - mock_vops.get_backing.return_value = backing - linked_clone = volumeops.LINKED_CLONE_TYPE - get_clone_type.return_value = linked_clone - - # invoke the create_volume_from_snapshot api - self.assertRaises(cinder_exceptions.InvalidVolume, - driver.create_cloned_volume, - volume, - src_vref) - - # verify calls - driver._verify_volume_creation.assert_called_once_with(volume) - mock_vops.get_backing.assert_called_once_with('src_snapshot_name') - get_clone_type.assert_called_once_with(volume) - - @mock.patch('cinder.volume.drivers.vmware.vmdk.' - '_get_volume_type_extra_spec') - def test_get_extra_spec_storage_profile(self, get_volume_type_extra_spec): - vol_type_id = mock.sentinel.vol_type_id - self._driver._get_extra_spec_storage_profile(vol_type_id) - get_volume_type_extra_spec.assert_called_once_with(vol_type_id, - 'storage_profile') - - @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') - def test_get_storage_profile(self, get_extra_spec_storage_profile): - volume = self._create_volume_dict() - self._driver._get_storage_profile(volume) - get_extra_spec_storage_profile.assert_called_once_with( - volume['volume_type_id']) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_storage_profile_id( - self, get_profile_id_by_name, session, get_storage_profile): - get_storage_profile.return_value = 'gold' - profile_id = mock.sentinel.profile_id - get_profile_id_by_name.return_value = mock.Mock(uniqueId=profile_id) - - self._driver._storage_policy_enabled = True - volume = self._create_volume_dict() - self.assertEqual(profile_id, - self._driver._get_storage_profile_id(volume)) - get_storage_profile.assert_called_once_with(volume) - get_profile_id_by_name.assert_called_once_with(session, 'gold') - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_storage_profile_id_with_missing_extra_spec( - self, get_profile_id_by_name, session, get_storage_profile): - get_storage_profile.return_value = None - - self._driver._storage_policy_enabled = True - volume = self._create_volume_dict() - self.assertIsNone(self._driver._get_storage_profile_id(volume)) - get_storage_profile.assert_called_once_with(volume) - self.assertFalse(get_profile_id_by_name.called) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_storage_profile_id_with_pbm_disabled( - self, get_profile_id_by_name, session, get_storage_profile): - get_storage_profile.return_value = 'gold' - - volume = self._create_volume_dict() - self.assertIsNone(self._driver._get_storage_profile_id(volume)) - get_storage_profile.assert_called_once_with(volume) - self.assertFalse(get_profile_id_by_name.called) - - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') - def test_get_storage_profile_id_with_missing_profile( - self, get_profile_id_by_name, session, get_storage_profile): - get_storage_profile.return_value = 'gold' - get_profile_id_by_name.return_value = None - - self._driver._storage_policy_enabled = True - volume = self._create_volume_dict() - self.assertIsNone(self._driver._get_storage_profile_id(volume)) - get_storage_profile.assert_called_once_with(volume) - get_profile_id_by_name.assert_called_once_with(session, 'gold') - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, 'session') - @mock.patch('oslo_vmware.image_transfer.download_flat_image') - def _test_copy_image(self, download_flat_image, session, vops, - expected_cacerts=False): - - dc_name = mock.sentinel.dc_name - vops.get_entity_name.return_value = dc_name - - context = mock.sentinel.context - dc_ref = mock.sentinel.dc_ref - image_service = mock.sentinel.image_service - image_id = mock.sentinel.image_id - image_size_in_bytes = 102400 - ds_name = mock.sentinel.ds_name - upload_file_path = mock.sentinel.upload_file_path - self._driver._copy_image( - context, dc_ref, image_service, image_id, image_size_in_bytes, - ds_name, upload_file_path) - - vops.get_entity_name.assert_called_once_with(dc_ref) - cookies = session.vim.client.options.transport.cookiejar - download_flat_image.assert_called_once_with( - context, - self._config.vmware_image_transfer_timeout_secs, - image_service, - image_id, - image_size=image_size_in_bytes, - host=self._config.vmware_host_ip, - port=self._config.vmware_host_port, - data_center_name=dc_name, - datastore_name=ds_name, - cookies=cookies, - file_path=upload_file_path, - cacerts=expected_cacerts) - - def test_copy_image(self): - # Default value of vmware_ca_file is not None; it should be passed - # to download_flat_image as cacerts. - self._test_copy_image(expected_cacerts=self._config.vmware_ca_file) - - def test_copy_image_insecure(self): - # Set config options to allow insecure connections. - self._config.vmware_ca_file = None - self._config.vmware_insecure = True - # Since vmware_ca_file is unset and vmware_insecure is True, - # dowload_flat_image should be called with cacerts=False. - self._test_copy_image() - - @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_disk_type') - def _test_create_backing( - self, get_disk_type, vops, get_storage_profile_id, - select_ds_for_volume, create_params=None): - create_params = create_params or {} - - host = mock.sentinel.host - resource_pool = mock.sentinel.resource_pool - folder = mock.sentinel.folder - summary = mock.sentinel.summary - select_ds_for_volume.return_value = (host, resource_pool, folder, - summary) - - profile_id = mock.sentinel.profile_id - get_storage_profile_id.return_value = profile_id - - backing = mock.sentinel.backing - vops.create_backing_disk_less.return_value = backing - vops.create_backing.return_value = backing - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - - volume = self._create_volume_dict() - ret = self._driver._create_backing(volume, host, create_params) - - self.assertEqual(backing, ret) - select_ds_for_volume.assert_called_once_with(volume, host) - get_storage_profile_id.assert_called_once_with(volume) - - exp_extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], - volumeops.BACKING_UUID_KEY: volume['id']} - if create_params.get(vmdk.CREATE_PARAM_DISK_LESS): - vops.create_backing_disk_less.assert_called_once_with( - volume['name'], - folder, - resource_pool, - host, - summary.name, - profileId=profile_id, - extra_config=exp_extra_config) - vops.update_backing_disk_uuid.assert_not_called() - else: - get_disk_type.assert_called_once_with(volume) - exp_backing_name = ( - create_params.get(vmdk.CREATE_PARAM_BACKING_NAME) or - volume['name']) - exp_adapter_type = ( - create_params.get(vmdk.CREATE_PARAM_ADAPTER_TYPE) or - 'lsiLogic') - vops.create_backing.assert_called_once_with( - exp_backing_name, - volume['size'] * units.Mi, - disk_type, - folder, - resource_pool, - host, - summary.name, - profileId=profile_id, - adapter_type=exp_adapter_type, - extra_config=exp_extra_config) - vops.update_backing_disk_uuid.assert_called_once_with(backing, - volume['id']) - - def test_create_backing_disk_less(self): - create_params = {vmdk.CREATE_PARAM_DISK_LESS: True} - self._test_create_backing(create_params=create_params) - - def test_create_backing_with_adapter_type_override(self): - create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'} - self._test_create_backing(create_params=create_params) - - def test_create_backing_with_backing_name_override(self): - create_params = {vmdk.CREATE_PARAM_BACKING_NAME: 'foo'} - self._test_create_backing(create_params=create_params) - - def test_create_backing(self): - self._test_create_backing() - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_get_hosts(self, vops): - host_1 = mock.sentinel.host_1 - host_2 = mock.sentinel.host_2 - host_3 = mock.sentinel.host_3 - vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]] - - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - self.assertEqual([host_1, host_2, host_3], - self._driver._get_hosts([cls_1, cls_2])) - exp_calls = [mock.call(cls_1), mock.call(cls_2)] - self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) - - @mock.patch.object(VMDK_DRIVER, '_get_hosts') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_datastore(self, ds_sel, get_hosts): - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - self._driver._clusters = [cls_1, cls_2] - - host_1 = mock.sentinel.host_1 - host_2 = mock.sentinel.host_2 - host_3 = mock.sentinel.host_3 - get_hosts.return_value = [host_1, host_2, host_3] - - best_candidate = mock.sentinel.best_candidate - ds_sel.select_datastore.return_value = best_candidate - - req = mock.sentinel.req - self.assertEqual(best_candidate, self._driver._select_datastore(req)) - get_hosts.assert_called_once_with(self._driver._clusters) - ds_sel.select_datastore.assert_called_once_with( - req, hosts=[host_1, host_2, host_3]) - - @mock.patch.object(VMDK_DRIVER, '_get_hosts') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_datastore_with_no_best_candidate(self, ds_sel, get_hosts): - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - self._driver._clusters = [cls_1, cls_2] - - host_1 = mock.sentinel.host_1 - host_2 = mock.sentinel.host_2 - host_3 = mock.sentinel.host_3 - get_hosts.return_value = [host_1, host_2, host_3] - - ds_sel.select_datastore.return_value = () - - req = mock.sentinel.req - self.assertRaises(vmdk_exceptions.NoValidDatastoreException, - self._driver._select_datastore, - req) - get_hosts.assert_called_once_with(self._driver._clusters) - ds_sel.select_datastore.assert_called_once_with( - req, hosts=[host_1, host_2, host_3]) - - @mock.patch.object(VMDK_DRIVER, '_get_hosts') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_datastore_with_single_host(self, ds_sel, get_hosts): - best_candidate = mock.sentinel.best_candidate - ds_sel.select_datastore.return_value = best_candidate - - req = mock.sentinel.req - host_1 = mock.sentinel.host_1 - self.assertEqual(best_candidate, - self._driver._select_datastore(req, host_1)) - ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1]) - self.assertFalse(get_hosts.called) - - @mock.patch.object(VMDK_DRIVER, '_get_hosts') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_datastore_with_empty_clusters(self, ds_sel, get_hosts): - self._driver._clusters = None - - best_candidate = mock.sentinel.best_candidate - ds_sel.select_datastore.return_value = best_candidate - - req = mock.sentinel.req - self.assertEqual(best_candidate, self._driver._select_datastore(req)) - ds_sel.select_datastore.assert_called_once_with(req, hosts=None) - self.assertFalse(get_hosts.called) - - @mock.patch.object(VMDK_DRIVER, '_get_hosts') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_select_datastore_with_no_valid_host(self, ds_sel, get_hosts): - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - self._driver._clusters = [cls_1, cls_2] - - get_hosts.return_value = [] - - req = mock.sentinel.req - self.assertRaises(vmdk_exceptions.NoValidHostException, - self._driver._select_datastore, req) - get_hosts.assert_called_once_with(self._driver._clusters) - self.assertFalse(ds_sel.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_relocate_backing_nop(self, ds_sel, get_profile, vops): - self._driver._storage_policy_enabled = True - volume = self._create_volume_dict() - - datastore = mock.sentinel.datastore - vops.get_datastore.return_value = datastore - - profile = mock.sentinel.profile - get_profile.return_value = profile - - vops.is_datastore_accessible.return_value = True - ds_sel.is_datastore_compliant.return_value = True - - backing = mock.sentinel.backing - host = mock.sentinel.host - self._driver._relocate_backing(volume, backing, host) - - get_profile.assert_called_once_with(volume) - vops.is_datastore_accessible.assert_called_once_with(datastore, host) - ds_sel.is_datastore_compliant.assert_called_once_with(datastore, - profile) - self.assertFalse(vops.relocate_backing.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_relocate_backing_with_no_datastore( - self, ds_sel, get_profile, vops): - self._driver._storage_policy_enabled = True - volume = self._create_volume_dict() - - profile = mock.sentinel.profile - get_profile.return_value = profile - - vops.is_datastore_accessible.return_value = True - ds_sel.is_datastore_compliant.return_value = False - - ds_sel.select_datastore.return_value = [] - - backing = mock.sentinel.backing - host = mock.sentinel.host - - self.assertRaises(vmdk_exceptions.NoValidDatastoreException, - self._driver._relocate_backing, - volume, - backing, - host) - get_profile.assert_called_once_with(volume) - ds_sel.select_datastore.assert_called_once_with( - {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, - hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host]) - self.assertFalse(vops.relocate_backing.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_dc') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_relocate_backing( - self, ds_sel, get_volume_group_folder, get_dc, vops): - volume = self._create_volume_dict() - - vops.is_datastore_accessible.return_value = False - ds_sel.is_datastore_compliant.return_value = True - - backing = mock.sentinel.backing - host = mock.sentinel.host - - rp = mock.sentinel.rp - datastore = mock.sentinel.datastore - summary = mock.Mock(datastore=datastore) - ds_sel.select_datastore.return_value = (host, rp, summary) - - dc = mock.sentinel.dc - get_dc.return_value = dc - - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - self._driver._relocate_backing(volume, backing, host) - - get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with( - dc, volume['project_id']) - vops.relocate_backing.assert_called_once_with(backing, - datastore, - rp, - host) - vops.move_backing_to_folder.assert_called_once_with(backing, - folder) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_dc') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - @mock.patch.object(VMDK_DRIVER, 'ds_sel') - def test_relocate_backing_with_pbm_disabled( - self, ds_sel, get_volume_group_folder, get_dc, vops): - self._driver._storage_policy_enabled = False - volume = self._create_volume_dict() - - vops.is_datastore_accessible.return_value = False - - backing = mock.sentinel.backing - host = mock.sentinel.host - - rp = mock.sentinel.rp - datastore = mock.sentinel.datastore - summary = mock.Mock(datastore=datastore) - ds_sel.select_datastore.return_value = (host, rp, summary) - - dc = mock.sentinel.dc - get_dc.return_value = dc - - folder = mock.sentinel.folder - get_volume_group_folder.return_value = folder - - self._driver._relocate_backing(volume, backing, host) - - self.assertFalse(vops.get_profile.called) - get_dc.assert_called_once_with(rp) - get_volume_group_folder.assert_called_once_with( - dc, volume['project_id']) - vops.relocate_backing.assert_called_once_with(backing, - datastore, - rp, - host) - vops.move_backing_to_folder.assert_called_once_with(backing, - folder) - ds_sel.select_datastore.assert_called_once_with( - {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, - hub.DatastoreSelector.PROFILE_NAME: None}, hosts=[host]) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_get_disk_device(self, vops): - vm = mock.sentinel.vm - vops.get_entity_by_inventory_path.return_value = vm - - dev = mock.sentinel.dev - vops.get_disk_device.return_value = dev - - vm_inv_path = mock.sentinel.vm_inv_path - vmdk_path = mock.sentinel.vmdk_path - ret = self._driver._get_disk_device(vmdk_path, vm_inv_path) - - self.assertEqual((vm, dev), ret) - vops.get_entity_by_inventory_path.assert_called_once_with(vm_inv_path) - vops.get_disk_device.assert_called_once_with(vm, vmdk_path) - - def test_get_existing_with_empty_source_name(self): - self.assertRaises(cinder_exceptions.InvalidInput, - self._driver._get_existing, - {}) - - def test_get_existing_with_invalid_source_name(self): - self.assertRaises(cinder_exceptions.InvalidInput, - self._driver._get_existing, - {'source-name': 'foo'}) - - @mock.patch.object(VMDK_DRIVER, '_get_disk_device', return_value=None) - def test_get_existing_with_invalid_existing_ref(self, get_disk_device): - self.assertRaises(cinder_exceptions.ManageExistingInvalidReference, - self._driver._get_existing, - {'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'}) - get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', - '/dc-1/vm/foo') - - @mock.patch.object(VMDK_DRIVER, '_get_disk_device') - def test_get_existing(self, get_disk_device): - vm = mock.sentinel.vm - disk_device = mock.sentinel.disk_device - get_disk_device.return_value = (vm, disk_device) - self.assertEqual( - (vm, disk_device), - self._driver._get_existing({'source-name': - '[ds1] foo/foo.vmdk@/dc-1/vm/foo'})) - get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', - '/dc-1/vm/foo') - - @mock.patch.object(VMDK_DRIVER, '_get_existing') - @ddt.data((16384, 1), (1048576, 1), (1572864, 2)) - def test_manage_existing_get_size(self, test_data, get_existing): - (capacity_kb, exp_size) = test_data - disk_device = mock.Mock(capacityInKB=capacity_kb) - get_existing.return_value = (mock.sentinel.vm, disk_device) - - volume = mock.sentinel.volume - existing_ref = mock.sentinel.existing_ref - self.assertEqual(exp_size, - self._driver.manage_existing_get_size(volume, - existing_ref)) - get_existing.assert_called_once_with(existing_ref) - - @mock.patch.object(VMDK_DRIVER, '_get_existing') - @mock.patch.object(VMDK_DRIVER, '_create_backing') - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') - @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') - @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' - '_get_disk_type') - def test_manage_existing( - self, get_disk_type, get_storage_profile_id, - get_ds_name_folder_path, vops, create_backing, get_existing): - - vm = mock.sentinel.vm - src_path = mock.sentinel.src_path - disk_backing = mock.Mock(fileName=src_path) - disk_device = mock.Mock(backing=disk_backing, capacityInKB=1048576) - get_existing.return_value = (vm, disk_device) - - backing = mock.sentinel.backing - create_backing.return_value = backing - - src_dc = mock.sentinel.src_dc - dest_dc = mock.sentinel.dest_dc - vops.get_dc.side_effect = [src_dc, dest_dc] - - volume = self._create_volume_dict() - ds_name = "ds1" - folder_path = "%s/" % volume['name'] - get_ds_name_folder_path.return_value = (ds_name, folder_path) - - profile_id = mock.sentinel.profile_id - get_storage_profile_id.return_value = profile_id - - disk_type = mock.sentinel.disk_type - get_disk_type.return_value = disk_type - - existing_ref = mock.sentinel.existing_ref - self._driver.manage_existing(volume, existing_ref) - - get_existing.assert_called_once_with(existing_ref) - create_backing.assert_called_once_with( - volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) - vops.detach_disk_from_backing.assert_called_once_with(vm, disk_device) - dest_path = "[%s] %s%s.vmdk" % (ds_name, folder_path, volume['name']) - vops.move_vmdk_file.assert_called_once_with( - src_dc, src_path, dest_path, dest_dc_ref=dest_dc) - get_storage_profile_id.assert_called_once_with(volume) - vops.attach_disk_to_backing.assert_called_once_with( - backing, disk_device.capacityInKB, disk_type, 'lsiLogic', - profile_id, dest_path) - vops.update_backing_disk_uuid.assert_called_once_with(backing, - volume['id']) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - def test_unmanage(self, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - volume = self._create_volume_dict() - self._driver.unmanage(volume) - - vops.get_backing.assert_called_once_with(volume['name']) - vops.update_backing_extra_config.assert_called_once_with( - backing, {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: '', - volumeops.BACKING_UUID_KEY: ''}) - - @mock.patch('oslo_vmware.api.VMwareAPISession') - def test_session(self, apiSession): - self._session = None - - self._driver.session() - - config = self._driver.configuration - apiSession.assert_called_once_with( - config.vmware_host_ip, - config.vmware_host_username, - config.vmware_host_password, - config.vmware_api_retry_count, - config.vmware_task_poll_interval, - wsdl_loc=config.safe_get('vmware_wsdl_location'), - pbm_wsdl_loc=None, - port=config.vmware_host_port, - cacert=config.vmware_ca_file, - insecure=config.vmware_insecure, - pool_size=config.vmware_connection_pool_size, - op_id_prefix='c-vol') - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - def test_extend_volume_with_no_backing(self, extend_backing, vops): - vops.get_backing.return_value = None - - volume = self._create_volume_dict() - self._driver.extend_volume(volume, 2) - - self.assertFalse(extend_backing.called) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - def test_extend_volume(self, extend_backing, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - volume = self._create_volume_dict() - new_size = 2 - self._driver.extend_volume(volume, new_size) - - extend_backing.assert_called_once_with(backing, new_size) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') - def test_extend_volume_with_no_disk_space(self, select_ds_for_volume, - extend_backing, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - extend_backing.side_effect = [exceptions.NoDiskSpaceException, None] - - host = mock.sentinel.host - rp = mock.sentinel.rp - folder = mock.sentinel.folder - datastore = mock.sentinel.datastore - summary = mock.Mock(datastore=datastore) - select_ds_for_volume.return_value = (host, rp, folder, summary) - - volume = self._create_volume_dict() - new_size = 2 - self._driver.extend_volume(volume, new_size) - - create_params = {vmdk.CREATE_PARAM_DISK_SIZE: new_size} - select_ds_for_volume.assert_called_once_with( - volume, create_params=create_params) - - vops.relocate_backing.assert_called_once_with(backing, datastore, rp, - host) - vops.move_backing_to_folder(backing, folder) - - extend_backing_calls = [mock.call(backing, new_size), - mock.call(backing, new_size)] - self.assertEqual(extend_backing_calls, extend_backing.call_args_list) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_extend_backing') - def test_extend_volume_with_extend_backing_error( - self, extend_backing, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - extend_backing.side_effect = exceptions.VimException("Error") - - volume = self._create_volume_dict() - new_size = 2 - self.assertRaises(exceptions.VimException, self._driver.extend_volume, - volume, new_size) - extend_backing.assert_called_once_with(backing, new_size) - - @mock.patch.object(VMDK_DRIVER, 'volumeops') - @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') - def test_accept_transfer(self, get_volume_group_folder, vops): - backing = mock.sentinel.backing - vops.get_backing.return_value = backing - - dc = mock.sentinel.dc - vops.get_dc.return_value = dc - - new_folder = mock.sentinel.new_folder - get_volume_group_folder.return_value = new_folder - - context = mock.sentinel.context - volume = self._create_volume_obj() - new_project = mock.sentinel.new_project - self._driver.accept_transfer(context, volume, mock.sentinel.new_user, - new_project) - - vops.get_backing.assert_called_once_with(volume.name) - vops.get_dc.assert_called_once_with(backing) - get_volume_group_folder.assert_called_once_with(dc, new_project) - vops.move_backing_to_folder.assert_called_once_with(backing, - new_folder) - - -@ddt.ddt -class ImageDiskTypeTest(test.TestCase): - """Unit tests for ImageDiskType.""" - - @ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse') - def test_is_valid(self, image_disk_type): - self.assertTrue(vmdk.ImageDiskType.is_valid(image_disk_type)) - - def test_is_valid_with_invalid_type(self): - self.assertFalse(vmdk.ImageDiskType.is_valid('thick')) - - @ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse') - def test_validate(self, image_disk_type): - vmdk.ImageDiskType.validate(image_disk_type) - - def test_validate_with_invalid_type(self): - self.assertRaises(cinder_exceptions.ImageUnacceptable, - vmdk.ImageDiskType.validate, - "thick") diff --git a/cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py b/cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py deleted file mode 100644 index 7b5599a45..000000000 --- a/cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py +++ /dev/null @@ -1,1826 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test suite for VMware VMDK driver volumeops module. -""" - -import ddt -import mock -from oslo_utils import units -from oslo_vmware import exceptions -from oslo_vmware import vim_util - -from cinder import test -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions -from cinder.volume.drivers.vmware import volumeops - - -@ddt.ddt -class VolumeOpsTestCase(test.TestCase): - """Unit tests for volumeops module.""" - - MAX_OBJECTS = 100 - - def setUp(self): - super(VolumeOpsTestCase, self).setUp() - self.session = mock.MagicMock() - self.vops = volumeops.VMwareVolumeOps(self.session, self.MAX_OBJECTS) - - def test_split_datastore_path(self): - test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx' - (datastore, folder, file_name) = volumeops.split_datastore_path(test1) - self.assertEqual('datastore1', datastore) - self.assertEqual('myfolder/mysubfolder/', folder) - self.assertEqual('myvm.vmx', file_name) - - test2 = '[datastore2 ] myfolder/myvm.vmdk' - (datastore, folder, file_name) = volumeops.split_datastore_path(test2) - self.assertEqual('datastore2', datastore) - self.assertEqual('myfolder/', folder) - self.assertEqual('myvm.vmdk', file_name) - - test3 = 'myfolder/myvm.vmdk' - self.assertRaises(IndexError, volumeops.split_datastore_path, test3) - - def vm(self, val): - """Create a mock vm in retrieve result format.""" - vm = mock.MagicMock() - prop = mock.Mock(spec=object) - prop.val = val - vm.propSet = [prop] - return vm - - def test_get_backing(self): - name = 'mock-backing' - - # Test no result - self.session.invoke_api.return_value = None - result = self.vops.get_backing(name) - self.assertIsNone(result) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_objects', - self.session.vim, - 'VirtualMachine', - self.MAX_OBJECTS) - - # Test single result - vm = self.vm(name) - vm.obj = mock.sentinel.vm_obj - retrieve_result = mock.Mock(spec=object) - retrieve_result.objects = [vm] - self.session.invoke_api.return_value = retrieve_result - self.vops.cancel_retrieval = mock.Mock(spec=object) - result = self.vops.get_backing(name) - self.assertEqual(mock.sentinel.vm_obj, result) - self.session.invoke_api.assert_called_with(vim_util, 'get_objects', - self.session.vim, - 'VirtualMachine', - self.MAX_OBJECTS) - self.vops.cancel_retrieval.assert_called_once_with(retrieve_result) - - # Test multiple results - retrieve_result2 = mock.Mock(spec=object) - retrieve_result2.objects = [vm('1'), vm('2'), vm('3')] - self.session.invoke_api.return_value = retrieve_result2 - self.vops.continue_retrieval = mock.Mock(spec=object) - self.vops.continue_retrieval.return_value = retrieve_result - result = self.vops.get_backing(name) - self.assertEqual(mock.sentinel.vm_obj, result) - self.session.invoke_api.assert_called_with(vim_util, 'get_objects', - self.session.vim, - 'VirtualMachine', - self.MAX_OBJECTS) - self.vops.continue_retrieval.assert_called_once_with(retrieve_result2) - self.vops.cancel_retrieval.assert_called_with(retrieve_result) - - def test_delete_backing(self): - backing = mock.sentinel.backing - task = mock.sentinel.task - self.session.invoke_api.return_value = task - self.vops.delete_backing(backing) - self.session.invoke_api.assert_called_once_with(self.session.vim, - "Destroy_Task", - backing) - self.session.wait_for_task(task) - - def test_get_host(self): - instance = mock.sentinel.instance - host = mock.sentinel.host - self.session.invoke_api.return_value = host - result = self.vops.get_host(instance) - self.assertEqual(host, result) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - instance, - 'runtime.host') - - def _host_runtime_info( - self, connection_state='connected', in_maintenance=False): - return mock.Mock(connectionState=connection_state, - inMaintenanceMode=in_maintenance) - - def test_get_hosts(self): - hosts = mock.sentinel.hosts - self.session.invoke_api.return_value = hosts - result = self.vops.get_hosts() - self.assertEqual(hosts, result) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_objects', - self.session.vim, - 'HostSystem', - self.MAX_OBJECTS) - - def test_continue_retrieval(self): - retrieve_result = mock.sentinel.retrieve_result - self.session.invoke_api.return_value = retrieve_result - result = self.vops.continue_retrieval(retrieve_result) - self.assertEqual(retrieve_result, result) - self.session.invoke_api.assert_called_once_with(vim_util, - 'continue_retrieval', - self.session.vim, - retrieve_result) - - def test_cancel_retrieval(self): - retrieve_result = mock.sentinel.retrieve_result - self.session.invoke_api.return_value = retrieve_result - result = self.vops.cancel_retrieval(retrieve_result) - self.assertIsNone(result) - self.session.invoke_api.assert_called_once_with(vim_util, - 'cancel_retrieval', - self.session.vim, - retrieve_result) - - def test_is_usable(self): - mount_info = mock.Mock(spec=object) - mount_info.accessMode = "readWrite" - mount_info.mounted = True - mount_info.accessible = True - self.assertTrue(self.vops._is_usable(mount_info)) - - del mount_info.mounted - self.assertTrue(self.vops._is_usable(mount_info)) - - mount_info.accessMode = "readonly" - self.assertFalse(self.vops._is_usable(mount_info)) - - mount_info.accessMode = "readWrite" - mount_info.mounted = False - self.assertFalse(self.vops._is_usable(mount_info)) - - mount_info.mounted = True - mount_info.accessible = False - self.assertFalse(self.vops._is_usable(mount_info)) - - del mount_info.accessible - self.assertFalse(self.vops._is_usable(mount_info)) - - def _create_host_mounts(self, access_mode, host, set_accessible=True, - is_accessible=True, mounted=True): - """Create host mount value of datastore with single mount info. - - :param access_mode: string specifying the read/write permission - :param set_accessible: specify whether accessible property - should be set - :param is_accessible: boolean specifying whether the datastore - is accessible to host - :param host: managed object reference of the connected - host - :return: list of host mount info - """ - mntInfo = mock.Mock(spec=object) - mntInfo.accessMode = access_mode - if set_accessible: - mntInfo.accessible = is_accessible - else: - del mntInfo.accessible - mntInfo.mounted = mounted - - host_mount = mock.Mock(spec=object) - host_mount.key = host - host_mount.mountInfo = mntInfo - host_mounts = mock.Mock(spec=object) - host_mounts.DatastoreHostMount = [host_mount] - - return host_mounts - - def test_get_connected_hosts(self): - with mock.patch.object(self.vops, 'get_summary') as get_summary: - datastore = mock.sentinel.datastore - summary = mock.Mock(spec=object) - get_summary.return_value = summary - - summary.accessible = False - hosts = self.vops.get_connected_hosts(datastore) - self.assertEqual([], hosts) - - summary.accessible = True - host = mock.Mock(spec=object) - host.value = mock.sentinel.host - host_mounts = self._create_host_mounts("readWrite", host) - self.session.invoke_api.return_value = host_mounts - hosts = self.vops.get_connected_hosts(datastore) - self.assertEqual([mock.sentinel.host], hosts) - self.session.invoke_api.assert_called_once_with( - vim_util, - 'get_object_property', - self.session.vim, - datastore, - 'host') - - del host_mounts.DatastoreHostMount - hosts = self.vops.get_connected_hosts(datastore) - self.assertEqual([], hosts) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'get_connected_hosts') - def test_is_datastore_accessible(self, get_connected_hosts): - host_1 = mock.sentinel.host_1 - host_2 = mock.sentinel.host_2 - get_connected_hosts.return_value = [host_1, host_2] - - ds = mock.sentinel.datastore - host = mock.Mock(value=mock.sentinel.host_1) - self.assertTrue(self.vops.is_datastore_accessible(ds, host)) - get_connected_hosts.assert_called_once_with(ds) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'get_connected_hosts') - def test_is_datastore_accessible_with_inaccessible(self, - get_connected_hosts): - host_1 = mock.sentinel.host_1 - get_connected_hosts.return_value = [host_1] - - ds = mock.sentinel.datastore - host = mock.Mock(value=mock.sentinel.host_2) - self.assertFalse(self.vops.is_datastore_accessible(ds, host)) - get_connected_hosts.assert_called_once_with(ds) - - def test_get_parent(self): - # Not recursive - child = mock.Mock(spec=object) - child._type = 'Parent' - ret = self.vops._get_parent(child, 'Parent') - self.assertEqual(child, ret) - - # Recursive - parent = mock.Mock(spec=object) - parent._type = 'Parent' - child = mock.Mock(spec=object) - child._type = 'Child' - self.session.invoke_api.return_value = parent - ret = self.vops._get_parent(child, 'Parent') - self.assertEqual(parent, ret) - self.session.invoke_api.assert_called_with(vim_util, - 'get_object_property', - self.session.vim, child, - 'parent') - - def test_get_dc(self): - # set up hierarchy of objects - dc = mock.Mock(spec=object) - dc._type = 'Datacenter' - o1 = mock.Mock(spec=object) - o1._type = 'mockType1' - o1.parent = dc - o2 = mock.Mock(spec=object) - o2._type = 'mockType2' - o2.parent = o1 - - # mock out invoke_api behaviour to fetch parent - def mock_invoke_api(vim_util, method, vim, the_object, arg): - return the_object.parent - - self.session.invoke_api.side_effect = mock_invoke_api - ret = self.vops.get_dc(o2) - self.assertEqual(dc, ret) - - # Clear side effects. - self.session.invoke_api.side_effect = None - - def test_get_vmfolder(self): - self.session.invoke_api.return_value = mock.sentinel.ret - ret = self.vops.get_vmfolder(mock.sentinel.dc) - self.assertEqual(mock.sentinel.ret, ret) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - mock.sentinel.dc, - 'vmFolder') - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'get_entity_name') - def test_get_child_folder(self, get_entity_name): - child_entity_1 = mock.Mock(_type='Folder') - child_entity_2 = mock.Mock(_type='foo') - child_entity_3 = mock.Mock(_type='Folder') - - prop_val = mock.Mock(ManagedObjectReference=[child_entity_1, - child_entity_2, - child_entity_3]) - self.session.invoke_api.return_value = prop_val - get_entity_name.side_effect = ['bar', '%2fcinder-volumes'] - - parent_folder = mock.sentinel.parent_folder - child_name = '/cinder-volumes' - ret = self.vops._get_child_folder(parent_folder, child_name) - - self.assertEqual(child_entity_3, ret) - self.session.invoke_api.assert_called_once_with( - vim_util, 'get_object_property', self.session.vim, parent_folder, - 'childEntity') - get_entity_name.assert_has_calls([mock.call(child_entity_1), - mock.call(child_entity_3)]) - - def test_create_folder(self): - folder = mock.sentinel.folder - self.session.invoke_api.return_value = folder - - parent_folder = mock.sentinel.parent_folder - child_folder_name = mock.sentinel.child_folder_name - ret = self.vops.create_folder(parent_folder, child_folder_name) - - self.assertEqual(folder, ret) - self.session.invoke_api.assert_called_once_with( - self.session.vim, 'CreateFolder', parent_folder, - name=child_folder_name) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_child_folder') - def test_create_folder_with_duplicate_name(self, get_child_folder): - self.session.invoke_api.side_effect = exceptions.DuplicateName - - folder = mock.sentinel.folder - get_child_folder.return_value = folder - - parent_folder = mock.sentinel.parent_folder - child_folder_name = mock.sentinel.child_folder_name - ret = self.vops.create_folder(parent_folder, child_folder_name) - - self.assertEqual(folder, ret) - self.session.invoke_api.assert_called_once_with( - self.session.vim, 'CreateFolder', parent_folder, - name=child_folder_name) - get_child_folder.assert_called_once_with(parent_folder, - child_folder_name) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'get_vmfolder') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'create_folder') - def test_create_vm_inventory_folder(self, create_folder, get_vmfolder): - vm_folder_1 = mock.sentinel.vm_folder_1 - get_vmfolder.return_value = vm_folder_1 - - folder_1a = mock.sentinel.folder_1a - folder_1b = mock.sentinel.folder_1b - create_folder.side_effect = [folder_1a, folder_1b] - - datacenter_1 = mock.Mock(value='dc-1') - path_comp = ['a', 'b'] - ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) - - self.assertEqual(folder_1b, ret) - get_vmfolder.assert_called_once_with(datacenter_1) - exp_calls = [mock.call(vm_folder_1, 'a'), mock.call(folder_1a, 'b')] - self.assertEqual(exp_calls, create_folder.call_args_list) - exp_cache = {'/dc-1': vm_folder_1, - '/dc-1/a': folder_1a, - '/dc-1/a/b': folder_1b} - self.assertEqual(exp_cache, self.vops._folder_cache) - - # Test cache - get_vmfolder.reset_mock() - create_folder.reset_mock() - - folder_1c = mock.sentinel.folder_1c - create_folder.side_effect = [folder_1c] - - path_comp = ['a', 'c'] - ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) - - self.assertEqual(folder_1c, ret) - self.assertFalse(get_vmfolder.called) - exp_calls = [mock.call(folder_1a, 'c')] - self.assertEqual(exp_calls, create_folder.call_args_list) - exp_cache = {'/dc-1': vm_folder_1, - '/dc-1/a': folder_1a, - '/dc-1/a/b': folder_1b, - '/dc-1/a/c': folder_1c} - self.assertEqual(exp_cache, self.vops._folder_cache) - - # Test cache with different datacenter - get_vmfolder.reset_mock() - create_folder.reset_mock() - - vm_folder_2 = mock.sentinel.vm_folder_2 - get_vmfolder.return_value = vm_folder_2 - - folder_2a = mock.sentinel.folder_2a - folder_2b = mock.sentinel.folder_2b - create_folder.side_effect = [folder_2a, folder_2b] - - datacenter_2 = mock.Mock(value='dc-2') - path_comp = ['a', 'b'] - ret = self.vops.create_vm_inventory_folder(datacenter_2, path_comp) - - self.assertEqual(folder_2b, ret) - get_vmfolder.assert_called_once_with(datacenter_2) - exp_calls = [mock.call(vm_folder_2, 'a'), mock.call(folder_2a, 'b')] - self.assertEqual(exp_calls, create_folder.call_args_list) - exp_cache = {'/dc-1': vm_folder_1, - '/dc-1/a': folder_1a, - '/dc-1/a/b': folder_1b, - '/dc-1/a/c': folder_1c, - '/dc-2': vm_folder_2, - '/dc-2/a': folder_2a, - '/dc-2/a/b': folder_2b - } - self.assertEqual(exp_cache, self.vops._folder_cache) - - def test_create_disk_backing_thin(self): - backing = mock.Mock() - del backing.eagerlyScrub - cf = self.session.vim.client.factory - cf.create.return_value = backing - - disk_type = 'thin' - ret = self.vops._create_disk_backing(disk_type, None) - - self.assertEqual(backing, ret) - self.assertIsInstance(ret.thinProvisioned, bool) - self.assertTrue(ret.thinProvisioned) - self.assertEqual('', ret.fileName) - self.assertEqual('persistent', ret.diskMode) - - def test_create_disk_backing_thick(self): - backing = mock.Mock() - del backing.eagerlyScrub - del backing.thinProvisioned - cf = self.session.vim.client.factory - cf.create.return_value = backing - - disk_type = 'thick' - ret = self.vops._create_disk_backing(disk_type, None) - - self.assertEqual(backing, ret) - self.assertEqual('', ret.fileName) - self.assertEqual('persistent', ret.diskMode) - - def test_create_disk_backing_eager_zeroed_thick(self): - backing = mock.Mock() - del backing.thinProvisioned - cf = self.session.vim.client.factory - cf.create.return_value = backing - - disk_type = 'eagerZeroedThick' - ret = self.vops._create_disk_backing(disk_type, None) - - self.assertEqual(backing, ret) - self.assertIsInstance(ret.eagerlyScrub, bool) - self.assertTrue(ret.eagerlyScrub) - self.assertEqual('', ret.fileName) - self.assertEqual('persistent', ret.diskMode) - - def test_create_virtual_disk_config_spec(self): - - cf = self.session.vim.client.factory - cf.create.side_effect = lambda *args: mock.Mock() - - size_kb = units.Ki - controller_key = 200 - disk_type = 'thick' - profile_id = mock.sentinel.profile_id - spec = self.vops._create_virtual_disk_config_spec(size_kb, - disk_type, - controller_key, - profile_id, - None) - - cf.create.side_effect = None - self.assertEqual('add', spec.operation) - self.assertEqual('create', spec.fileOperation) - device = spec.device - self.assertEqual(size_kb, device.capacityInKB) - self.assertEqual(-101, device.key) - self.assertEqual(0, device.unitNumber) - self.assertEqual(controller_key, device.controllerKey) - backing = device.backing - self.assertEqual('', backing.fileName) - self.assertEqual('persistent', backing.diskMode) - disk_profiles = spec.profile - self.assertEqual(1, len(disk_profiles)) - self.assertEqual(profile_id, disk_profiles[0].profileId) - - def test_create_specs_for_ide_disk_add(self): - factory = self.session.vim.client.factory - factory.create.side_effect = lambda *args: mock.Mock() - - size_kb = 1 - disk_type = 'thin' - adapter_type = 'ide' - profile_id = mock.sentinel.profile_id - ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, - adapter_type, profile_id) - - factory.create.side_effect = None - self.assertEqual(1, len(ret)) - self.assertEqual(units.Ki, ret[0].device.capacityInKB) - self.assertEqual(200, ret[0].device.controllerKey) - expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'), - mock.call.create('ns0:VirtualDisk'), - mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')] - factory.create.assert_has_calls(expected, any_order=True) - - def test_create_specs_for_scsi_disk_add(self): - factory = self.session.vim.client.factory - factory.create.side_effect = lambda *args: mock.Mock() - - size_kb = 2 * units.Ki - disk_type = 'thin' - adapter_type = 'lsiLogicsas' - profile_id = mock.sentinel.profile_id - ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, - adapter_type, profile_id) - - factory.create.side_effect = None - self.assertEqual(2, len(ret)) - self.assertEqual('noSharing', ret[1].device.sharedBus) - self.assertEqual(size_kb, ret[0].device.capacityInKB) - expected = [mock.call.create('ns0:VirtualLsiLogicSASController'), - mock.call.create('ns0:VirtualDeviceConfigSpec'), - mock.call.create('ns0:VirtualDisk'), - mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'), - mock.call.create('ns0:VirtualDeviceConfigSpec')] - factory.create.assert_has_calls(expected, any_order=True) - - def test_get_create_spec_disk_less(self): - factory = self.session.vim.client.factory - factory.create.side_effect = lambda *args: mock.Mock() - - name = mock.sentinel.name - ds_name = mock.sentinel.ds_name - profile_id = mock.sentinel.profile_id - option_key = mock.sentinel.key - option_value = mock.sentinel.value - extra_config = {option_key: option_value, - volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} - ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id, - extra_config) - - factory.create.side_effect = None - self.assertEqual(name, ret.name) - self.assertEqual(mock.sentinel.uuid, ret.instanceUuid) - self.assertEqual('[%s]' % ds_name, ret.files.vmPathName) - self.assertEqual("vmx-08", ret.version) - self.assertEqual(profile_id, ret.vmProfile[0].profileId) - self.assertEqual(1, len(ret.extraConfig)) - self.assertEqual(option_key, ret.extraConfig[0].key) - self.assertEqual(option_value, ret.extraConfig[0].value) - expected = [mock.call.create('ns0:VirtualMachineFileInfo'), - mock.call.create('ns0:VirtualMachineConfigSpec'), - mock.call.create('ns0:VirtualMachineDefinedProfileSpec'), - mock.call.create('ns0:OptionValue')] - factory.create.assert_has_calls(expected, any_order=True) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_create_spec_disk_less') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_create_specs_for_disk_add') - def test_get_create_spec(self, create_specs_for_disk_add, - get_create_spec_disk_less): - name = 'vol-1' - size_kb = 1024 - disk_type = 'thin' - ds_name = 'nfs-1' - profile_id = mock.sentinel.profile_id - adapter_type = 'busLogic' - extra_config = mock.sentinel.extra_config - - self.vops.get_create_spec(name, size_kb, disk_type, ds_name, - profile_id, adapter_type, extra_config) - - get_create_spec_disk_less.assert_called_once_with( - name, ds_name, profileId=profile_id, extra_config=extra_config) - create_specs_for_disk_add.assert_called_once_with( - size_kb, disk_type, adapter_type, profile_id) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'get_create_spec') - def test_create_backing(self, get_create_spec): - create_spec = mock.sentinel.create_spec - get_create_spec.return_value = create_spec - task = mock.sentinel.task - self.session.invoke_api.return_value = task - task_info = mock.Mock(spec=object) - task_info.result = mock.sentinel.result - self.session.wait_for_task.return_value = task_info - name = 'backing_name' - size_kb = mock.sentinel.size_kb - disk_type = mock.sentinel.disk_type - adapter_type = mock.sentinel.adapter_type - folder = mock.sentinel.folder - resource_pool = mock.sentinel.resource_pool - host = mock.sentinel.host - ds_name = mock.sentinel.ds_name - profile_id = mock.sentinel.profile_id - extra_config = mock.sentinel.extra_config - ret = self.vops.create_backing(name, size_kb, disk_type, folder, - resource_pool, host, ds_name, - profile_id, adapter_type, extra_config) - self.assertEqual(mock.sentinel.result, ret) - get_create_spec.assert_called_once_with( - name, size_kb, disk_type, ds_name, profile_id=profile_id, - adapter_type=adapter_type, extra_config=extra_config) - self.session.invoke_api.assert_called_once_with(self.session.vim, - 'CreateVM_Task', - folder, - config=create_spec, - pool=resource_pool, - host=host) - self.session.wait_for_task.assert_called_once_with(task) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_create_spec_disk_less') - def test_create_backing_disk_less(self, get_create_spec_disk_less): - create_spec = mock.sentinel.create_spec - get_create_spec_disk_less.return_value = create_spec - task = mock.sentinel.task - self.session.invoke_api.return_value = task - task_info = mock.Mock(spec=object) - task_info.result = mock.sentinel.result - self.session.wait_for_task.return_value = task_info - name = 'backing_name' - folder = mock.sentinel.folder - resource_pool = mock.sentinel.resource_pool - host = mock.sentinel.host - ds_name = mock.sentinel.ds_name - profile_id = mock.sentinel.profile_id - extra_config = mock.sentinel.extra_config - ret = self.vops.create_backing_disk_less(name, folder, resource_pool, - host, ds_name, profile_id, - extra_config) - - self.assertEqual(mock.sentinel.result, ret) - get_create_spec_disk_less.assert_called_once_with( - name, ds_name, profileId=profile_id, extra_config=extra_config) - self.session.invoke_api.assert_called_once_with(self.session.vim, - 'CreateVM_Task', - folder, - config=create_spec, - pool=resource_pool, - host=host) - self.session.wait_for_task.assert_called_once_with(task) - - def test_get_datastore(self): - backing = mock.sentinel.backing - datastore = mock.Mock(spec=object) - datastore.ManagedObjectReference = [mock.sentinel.ds] - self.session.invoke_api.return_value = datastore - ret = self.vops.get_datastore(backing) - self.assertEqual(mock.sentinel.ds, ret) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - backing, 'datastore') - - def test_get_summary(self): - datastore = mock.sentinel.datastore - summary = mock.sentinel.summary - self.session.invoke_api.return_value = summary - ret = self.vops.get_summary(datastore) - self.assertEqual(summary, ret) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - datastore, - 'summary') - - def test_get_relocate_spec(self): - - delete_disk_attribute = True - - def _create_side_effect(type): - obj = mock.Mock() - if type == "ns0:VirtualDiskFlatVer2BackingInfo": - del obj.eagerlyScrub - elif (type == "ns0:VirtualMachineRelocateSpec" and - delete_disk_attribute): - del obj.disk - else: - pass - return obj - - factory = self.session.vim.client.factory - factory.create.side_effect = _create_side_effect - - datastore = mock.sentinel.datastore - resource_pool = mock.sentinel.resource_pool - host = mock.sentinel.host - disk_move_type = mock.sentinel.disk_move_type - ret = self.vops._get_relocate_spec(datastore, resource_pool, host, - disk_move_type) - - self.assertEqual(datastore, ret.datastore) - self.assertEqual(resource_pool, ret.pool) - self.assertEqual(host, ret.host) - self.assertEqual(disk_move_type, ret.diskMoveType) - - # Test with disk locator. - delete_disk_attribute = False - disk_type = 'thin' - disk_device = mock.Mock() - ret = self.vops._get_relocate_spec(datastore, resource_pool, host, - disk_move_type, disk_type, - disk_device) - - factory.create.side_effect = None - self.assertEqual(datastore, ret.datastore) - self.assertEqual(resource_pool, ret.pool) - self.assertEqual(host, ret.host) - self.assertEqual(disk_move_type, ret.diskMoveType) - self.assertIsInstance(ret.disk, list) - self.assertEqual(1, len(ret.disk)) - disk_locator = ret.disk[0] - self.assertEqual(datastore, disk_locator.datastore) - self.assertEqual(disk_device.key, disk_locator.diskId) - backing = disk_locator.diskBackingInfo - self.assertIsInstance(backing.thinProvisioned, bool) - self.assertTrue(backing.thinProvisioned) - self.assertEqual('', backing.fileName) - self.assertEqual('persistent', backing.diskMode) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_device') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_relocate_spec') - def test_relocate_backing(self, get_relocate_spec, get_disk_device): - disk_device = mock.sentinel.disk_device - get_disk_device.return_value = disk_device - - spec = mock.sentinel.relocate_spec - get_relocate_spec.return_value = spec - - task = mock.sentinel.task - self.session.invoke_api.return_value = task - - backing = mock.sentinel.backing - datastore = mock.sentinel.datastore - resource_pool = mock.sentinel.resource_pool - host = mock.sentinel.host - disk_type = mock.sentinel.disk_type - self.vops.relocate_backing(backing, datastore, resource_pool, host, - disk_type) - # Verify calls - disk_move_type = 'moveAllDiskBackingsAndAllowSharing' - get_disk_device.assert_called_once_with(backing) - get_relocate_spec.assert_called_once_with(datastore, resource_pool, - host, disk_move_type, - disk_type, disk_device) - self.session.invoke_api.assert_called_once_with(self.session.vim, - 'RelocateVM_Task', - backing, - spec=spec) - self.session.wait_for_task.assert_called_once_with(task) - - def test_move_backing_to_folder(self): - task = mock.sentinel.task - self.session.invoke_api.return_value = task - backing = mock.sentinel.backing - folder = mock.sentinel.folder - self.vops.move_backing_to_folder(backing, folder) - # Verify calls - self.session.invoke_api.assert_called_once_with(self.session.vim, - 'MoveIntoFolder_Task', - folder, - list=[backing]) - self.session.wait_for_task.assert_called_once_with(task) - - def test_create_snapshot_operation(self): - task = mock.sentinel.task - self.session.invoke_api.return_value = task - task_info = mock.Mock(spec=object) - task_info.result = mock.sentinel.result - self.session.wait_for_task.return_value = task_info - backing = mock.sentinel.backing - name = mock.sentinel.name - desc = mock.sentinel.description - quiesce = True - ret = self.vops.create_snapshot(backing, name, desc, quiesce) - self.assertEqual(mock.sentinel.result, ret) - self.session.invoke_api.assert_called_once_with(self.session.vim, - 'CreateSnapshot_Task', - backing, name=name, - description=desc, - memory=False, - quiesce=quiesce) - self.session.wait_for_task.assert_called_once_with(task) - - def test_get_snapshot_from_tree(self): - volops = volumeops.VMwareVolumeOps - name = mock.sentinel.name - # Test snapshot == 'None' - ret = volops._get_snapshot_from_tree(name, None) - self.assertIsNone(ret) - # Test root == snapshot - snapshot = mock.sentinel.snapshot - node = mock.Mock(spec=object) - node.name = name - node.snapshot = snapshot - ret = volops._get_snapshot_from_tree(name, node) - self.assertEqual(snapshot, ret) - # Test root.childSnapshotList == None - root = mock.Mock(spec=object) - root.name = 'root' - del root.childSnapshotList - ret = volops._get_snapshot_from_tree(name, root) - self.assertIsNone(ret) - # Test root.child == snapshot - root.childSnapshotList = [node] - ret = volops._get_snapshot_from_tree(name, root) - self.assertEqual(snapshot, ret) - - def test_get_snapshot(self): - # build out the root snapshot tree - snapshot_name = mock.sentinel.snapshot_name - snapshot = mock.sentinel.snapshot - root = mock.Mock(spec=object) - root.name = 'root' - node = mock.Mock(spec=object) - node.name = snapshot_name - node.snapshot = snapshot - root.childSnapshotList = [node] - # Test rootSnapshotList is not None - snapshot_tree = mock.Mock(spec=object) - snapshot_tree.rootSnapshotList = [root] - self.session.invoke_api.return_value = snapshot_tree - backing = mock.sentinel.backing - ret = self.vops.get_snapshot(backing, snapshot_name) - self.assertEqual(snapshot, ret) - self.session.invoke_api.assert_called_with(vim_util, - 'get_object_property', - self.session.vim, - backing, - 'snapshot') - # Test rootSnapshotList == None - snapshot_tree.rootSnapshotList = None - ret = self.vops.get_snapshot(backing, snapshot_name) - self.assertIsNone(ret) - self.session.invoke_api.assert_called_with(vim_util, - 'get_object_property', - self.session.vim, - backing, - 'snapshot') - - def test_snapshot_exists(self): - backing = mock.sentinel.backing - invoke_api = self.session.invoke_api - invoke_api.return_value = None - - self.assertFalse(self.vops.snapshot_exists(backing)) - invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - backing, - 'snapshot') - - snapshot = mock.Mock() - invoke_api.return_value = snapshot - snapshot.rootSnapshotList = None - self.assertFalse(self.vops.snapshot_exists(backing)) - - snapshot.rootSnapshotList = [mock.Mock()] - self.assertTrue(self.vops.snapshot_exists(backing)) - - def test_delete_snapshot(self): - backing = mock.sentinel.backing - snapshot_name = mock.sentinel.snapshot_name - # Test snapshot is None - with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: - get_snapshot.return_value = None - self.vops.delete_snapshot(backing, snapshot_name) - get_snapshot.assert_called_once_with(backing, snapshot_name) - # Test snapshot is not None - snapshot = mock.sentinel.snapshot - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: - get_snapshot.return_value = snapshot - self.vops.delete_snapshot(backing, snapshot_name) - get_snapshot.assert_called_with(backing, snapshot_name) - invoke_api.assert_called_once_with(self.session.vim, - 'RemoveSnapshot_Task', - snapshot, removeChildren=False) - self.session.wait_for_task.assert_called_once_with(task) - - def test_get_folder(self): - folder = mock.sentinel.folder - backing = mock.sentinel.backing - with mock.patch.object(self.vops, '_get_parent') as get_parent: - get_parent.return_value = folder - ret = self.vops._get_folder(backing) - self.assertEqual(folder, ret) - get_parent.assert_called_once_with(backing, 'Folder') - - def _verify_extra_config(self, option_values, key, value): - self.assertEqual(1, len(option_values)) - self.assertEqual(key, option_values[0].key) - self.assertEqual(value, option_values[0].value) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_relocate_spec') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_device') - def test_get_clone_spec(self, get_disk_device, get_relocate_spec): - factory = self.session.vim.client.factory - factory.create.side_effect = lambda *args: mock.Mock() - relocate_spec = mock.sentinel.relocate_spec - get_relocate_spec.return_value = relocate_spec - - # Test with empty disk type. - datastore = mock.sentinel.datastore - disk_move_type = mock.sentinel.disk_move_type - snapshot = mock.sentinel.snapshot - disk_type = None - backing = mock.sentinel.backing - host = mock.sentinel.host - rp = mock.sentinel.rp - key = mock.sentinel.key - value = mock.sentinel.value - extra_config = {key: value, - volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} - ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot, - backing, disk_type, host, rp, - extra_config) - - self.assertEqual(relocate_spec, ret.location) - self.assertFalse(ret.powerOn) - self.assertFalse(ret.template) - self.assertEqual(snapshot, ret.snapshot) - self.assertEqual(mock.sentinel.uuid, ret.config.instanceUuid) - get_relocate_spec.assert_called_once_with(datastore, rp, host, - disk_move_type, disk_type, - None) - self._verify_extra_config(ret.config.extraConfig, key, value) - - # Test with non-empty disk type. - disk_device = mock.sentinel.disk_device - get_disk_device.return_value = disk_device - - disk_type = 'thin' - ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot, - backing, disk_type, host, rp, - extra_config) - - factory.create.side_effect = None - self.assertEqual(relocate_spec, ret.location) - self.assertFalse(ret.powerOn) - self.assertFalse(ret.template) - self.assertEqual(snapshot, ret.snapshot) - get_disk_device.assert_called_once_with(backing) - get_relocate_spec.assert_called_with(datastore, rp, host, - disk_move_type, disk_type, - disk_device) - self._verify_extra_config(ret.config.extraConfig, key, value) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_folder') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_clone_spec') - def _test_clone_backing( - self, clone_type, folder, get_clone_spec, get_folder): - backing_folder = mock.sentinel.backing_folder - get_folder.return_value = backing_folder - - clone_spec = mock.sentinel.clone_spec - get_clone_spec.return_value = clone_spec - - task = mock.sentinel.task - self.session.invoke_api.return_value = task - - clone = mock.sentinel.clone - self.session.wait_for_task.return_value = mock.Mock(result=clone) - - name = mock.sentinel.name - backing = mock.sentinel.backing - snapshot = mock.sentinel.snapshot - datastore = mock.sentinel.datastore - disk_type = mock.sentinel.disk_type - host = mock.sentinel.host - resource_pool = mock.sentinel.resource_pool - extra_config = mock.sentinel.extra_config - ret = self.vops.clone_backing( - name, backing, snapshot, clone_type, datastore, - disk_type=disk_type, host=host, resource_pool=resource_pool, - extra_config=extra_config, folder=folder) - - if folder: - self.assertFalse(get_folder.called) - else: - get_folder.assert_called_once_with(backing) - - if clone_type == 'linked': - exp_disk_move_type = 'createNewChildDiskBacking' - else: - exp_disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' - get_clone_spec.assert_called_once_with( - datastore, exp_disk_move_type, snapshot, backing, disk_type, - host=host, resource_pool=resource_pool, extra_config=extra_config) - - exp_folder = folder if folder else backing_folder - self.session.invoke_api.assert_called_once_with( - self.session.vim, 'CloneVM_Task', backing, folder=exp_folder, - name=name, spec=clone_spec) - - self.session.wait_for_task.assert_called_once_with(task) - self.assertEqual(clone, ret) - - @ddt.data('linked', 'full') - def test_clone_backing(self, clone_type): - self._test_clone_backing(clone_type, mock.sentinel.folder) - - def test_clone_backing_with_empty_folder(self): - self._test_clone_backing('linked', None) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_create_specs_for_disk_add') - def test_attach_disk_to_backing(self, create_spec): - reconfig_spec = mock.Mock() - self.session.vim.client.factory.create.return_value = reconfig_spec - disk_add_config_specs = mock.Mock() - create_spec.return_value = disk_add_config_specs - task = mock.Mock() - self.session.invoke_api.return_value = task - - backing = mock.Mock() - size_in_kb = units.Ki - disk_type = "thin" - adapter_type = "ide" - profile_id = mock.sentinel.profile_id - vmdk_ds_file_path = mock.sentinel.vmdk_ds_file_path - self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type, - adapter_type, profile_id, - vmdk_ds_file_path) - - self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange) - create_spec.assert_called_once_with( - size_in_kb, disk_type, adapter_type, profile_id, - vmdk_ds_file_path=vmdk_ds_file_path) - self.session.invoke_api.assert_called_once_with(self.session.vim, - "ReconfigVM_Task", - backing, - spec=reconfig_spec) - self.session.wait_for_task.assert_called_once_with(task) - - def test_create_spec_for_disk_remove(self): - disk_spec = mock.Mock() - self.session.vim.client.factory.create.return_value = disk_spec - - disk_device = mock.sentinel.disk_device - self.vops._create_spec_for_disk_remove(disk_device) - - self.session.vim.client.factory.create.assert_called_once_with( - 'ns0:VirtualDeviceConfigSpec') - self.assertEqual('remove', disk_spec.operation) - self.assertEqual(disk_device, disk_spec.device) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_create_spec_for_disk_remove') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_detach_disk_from_backing(self, reconfigure_backing, create_spec): - disk_spec = mock.sentinel.disk_spec - create_spec.return_value = disk_spec - - reconfig_spec = mock.Mock() - self.session.vim.client.factory.create.return_value = reconfig_spec - - backing = mock.sentinel.backing - disk_device = mock.sentinel.disk_device - self.vops.detach_disk_from_backing(backing, disk_device) - - create_spec.assert_called_once_with(disk_device) - self.session.vim.client.factory.create.assert_called_once_with( - 'ns0:VirtualMachineConfigSpec') - self.assertEqual([disk_spec], reconfig_spec.deviceChange) - reconfigure_backing.assert_called_once_with(backing, reconfig_spec) - - def test_rename_backing(self): - task = mock.sentinel.task - self.session.invoke_api.return_value = task - - backing = mock.sentinel.backing - new_name = mock.sentinel.new_name - self.vops.rename_backing(backing, new_name) - - self.session.invoke_api.assert_called_once_with(self.session.vim, - "Rename_Task", - backing, - newName=new_name) - self.session.wait_for_task.assert_called_once_with(task) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_device') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_update_backing_disk_uuid(self, reconfigure_backing, - get_disk_device): - disk_spec = mock.Mock() - reconfig_spec = mock.Mock() - self.session.vim.client.factory.create.side_effect = [disk_spec, - reconfig_spec] - - disk_device = mock.Mock() - get_disk_device.return_value = disk_device - - self.vops.update_backing_disk_uuid(mock.sentinel.backing, - mock.sentinel.disk_uuid) - - get_disk_device.assert_called_once_with(mock.sentinel.backing) - self.assertEqual(mock.sentinel.disk_uuid, disk_device.backing.uuid) - self.assertEqual('edit', disk_spec.operation) - self.assertEqual(disk_device, disk_spec.device) - self.assertEqual([disk_spec], reconfig_spec.deviceChange) - reconfigure_backing.assert_called_once_with(mock.sentinel.backing, - reconfig_spec) - exp_factory_create_calls = [mock.call('ns0:VirtualDeviceConfigSpec'), - mock.call('ns0:VirtualMachineConfigSpec')] - self.assertEqual(exp_factory_create_calls, - self.session.vim.client.factory.create.call_args_list) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_extra_config_option_values') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_update_backing_extra_config(self, - reconfigure_backing, - get_extra_config_option_values): - reconfig_spec = mock.Mock() - self.session.vim.client.factory.create.return_value = reconfig_spec - - option_values = mock.sentinel.option_values - get_extra_config_option_values.return_value = option_values - - backing = mock.sentinel.backing - option_key = mock.sentinel.key - option_value = mock.sentinel.value - extra_config = {option_key: option_value, - volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} - self.vops.update_backing_extra_config(backing, extra_config) - - get_extra_config_option_values.assert_called_once_with( - {option_key: option_value}) - self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid) - self.assertEqual(option_values, reconfig_spec.extraConfig) - reconfigure_backing.assert_called_once_with(backing, reconfig_spec) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_update_backing_uuid(self, reconfigure_backing): - reconfig_spec = mock.Mock() - self.session.vim.client.factory.create.return_value = reconfig_spec - - backing = mock.sentinel.backing - uuid = mock.sentinel.uuid - self.vops.update_backing_uuid(backing, uuid) - - self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid) - reconfigure_backing.assert_called_once_with(backing, reconfig_spec) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_device') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_change_backing_profile_to_empty_profile( - self, reconfigure_backing, get_disk_device): - reconfig_spec = mock.Mock() - empty_profile_spec = mock.sentinel.empty_profile_spec - disk_spec = mock.Mock() - self.session.vim.client.factory.create.side_effect = [ - empty_profile_spec, reconfig_spec, disk_spec] - - disk_device = mock.sentinel.disk_device - get_disk_device.return_value = disk_device - - backing = mock.sentinel.backing - self.vops.change_backing_profile(backing, None) - - self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile) - get_disk_device.assert_called_once_with(backing) - self.assertEqual(disk_device, disk_spec.device) - self.assertEqual('edit', disk_spec.operation) - self.assertEqual([empty_profile_spec], disk_spec.profile) - self.assertEqual([disk_spec], reconfig_spec.deviceChange) - reconfigure_backing.assert_called_once_with(backing, reconfig_spec) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_device') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_reconfigure_backing') - def test_change_backing_profile( - self, reconfigure_backing, get_disk_device): - reconfig_spec = mock.Mock() - profile_spec = mock.Mock() - disk_spec = mock.Mock() - self.session.vim.client.factory.create.side_effect = [ - profile_spec, reconfig_spec, disk_spec] - - disk_device = mock.sentinel.disk_device - get_disk_device.return_value = disk_device - - backing = mock.sentinel.backing - unique_id = mock.sentinel.unique_id - profile_id = mock.Mock(uniqueId=unique_id) - self.vops.change_backing_profile(backing, profile_id) - - self.assertEqual(unique_id, profile_spec.profileId) - self.assertEqual([profile_spec], reconfig_spec.vmProfile) - get_disk_device.assert_called_once_with(backing) - self.assertEqual(disk_device, disk_spec.device) - self.assertEqual('edit', disk_spec.operation) - self.assertEqual([profile_spec], disk_spec.profile) - self.assertEqual([disk_spec], reconfig_spec.deviceChange) - reconfigure_backing.assert_called_once_with(backing, reconfig_spec) - - def test_delete_file(self): - file_mgr = mock.sentinel.file_manager - self.session.vim.service_content.fileManager = file_mgr - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - # Test delete file - file_path = mock.sentinel.file_path - datacenter = mock.sentinel.datacenter - self.vops.delete_file(file_path, datacenter) - # verify calls - invoke_api.assert_called_once_with(self.session.vim, - 'DeleteDatastoreFile_Task', - file_mgr, - name=file_path, - datacenter=datacenter) - self.session.wait_for_task.assert_called_once_with(task) - - def test_create_datastore_folder(self): - file_manager = mock.sentinel.file_manager - self.session.vim.service_content.fileManager = file_manager - invoke_api = self.session.invoke_api - - ds_name = "nfs" - folder_path = "test/" - datacenter = mock.sentinel.datacenter - - self.vops.create_datastore_folder(ds_name, folder_path, datacenter) - invoke_api.assert_called_once_with(self.session.vim, - 'MakeDirectory', - file_manager, - name="[nfs] test/", - datacenter=datacenter) - - def test_create_datastore_folder_with_existing_folder(self): - file_manager = mock.sentinel.file_manager - self.session.vim.service_content.fileManager = file_manager - invoke_api = self.session.invoke_api - invoke_api.side_effect = exceptions.FileAlreadyExistsException - - ds_name = "nfs" - folder_path = "test/" - datacenter = mock.sentinel.datacenter - - self.vops.create_datastore_folder(ds_name, folder_path, datacenter) - invoke_api.assert_called_once_with(self.session.vim, - 'MakeDirectory', - file_manager, - name="[nfs] test/", - datacenter=datacenter) - invoke_api.side_effect = None - - def test_create_datastore_folder_with_invoke_api_error(self): - file_manager = mock.sentinel.file_manager - self.session.vim.service_content.fileManager = file_manager - invoke_api = self.session.invoke_api - invoke_api.side_effect = exceptions.VimFaultException( - ["FileFault"], "error") - - ds_name = "nfs" - folder_path = "test/" - datacenter = mock.sentinel.datacenter - - self.assertRaises(exceptions.VimFaultException, - self.vops.create_datastore_folder, - ds_name, - folder_path, - datacenter) - invoke_api.assert_called_once_with(self.session.vim, - 'MakeDirectory', - file_manager, - name="[nfs] test/", - datacenter=datacenter) - invoke_api.side_effect = None - - def test_get_path_name(self): - path = mock.Mock(spec=object) - path_name = mock.sentinel.vm_path_name - path.vmPathName = path_name - invoke_api = self.session.invoke_api - invoke_api.return_value = path - backing = mock.sentinel.backing - ret = self.vops.get_path_name(backing) - self.assertEqual(path_name, ret) - invoke_api.assert_called_once_with(vim_util, 'get_object_property', - self.session.vim, backing, - 'config.files') - - def test_get_entity_name(self): - entity_name = mock.sentinel.entity_name - invoke_api = self.session.invoke_api - invoke_api.return_value = entity_name - entity = mock.sentinel.entity - ret = self.vops.get_entity_name(entity) - self.assertEqual(entity_name, ret) - invoke_api.assert_called_once_with(vim_util, 'get_object_property', - self.session.vim, entity, 'name') - - def test_get_vmdk_path(self): - # Setup hardware_devices for test - device = mock.Mock() - device.__class__.__name__ = 'VirtualDisk' - backing = mock.Mock() - backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' - backing.fileName = mock.sentinel.vmdk_path - device.backing = backing - invoke_api = self.session.invoke_api - invoke_api.return_value = [device] - # Test get_vmdk_path - ret = self.vops.get_vmdk_path(backing) - self.assertEqual(mock.sentinel.vmdk_path, ret) - invoke_api.assert_called_once_with(vim_util, 'get_object_property', - self.session.vim, backing, - 'config.hardware.device') - - backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo' - self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing) - - # Test with no disk device. - invoke_api.return_value = [] - self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, - self.vops.get_vmdk_path, - backing) - - def test_get_disk_size(self): - # Test with valid disk device. - device = mock.Mock() - device.__class__.__name__ = 'VirtualDisk' - disk_size_bytes = 1024 - device.capacityInKB = disk_size_bytes / units.Ki - invoke_api = self.session.invoke_api - invoke_api.return_value = [device] - - self.assertEqual(disk_size_bytes, - self.vops.get_disk_size(mock.sentinel.backing)) - - # Test with no disk device. - invoke_api.return_value = [] - - self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, - self.vops.get_disk_size, - mock.sentinel.backing) - - def test_create_virtual_disk(self): - task = mock.Mock() - invoke_api = self.session.invoke_api - invoke_api.return_value = task - spec = mock.Mock() - factory = self.session.vim.client.factory - factory.create.return_value = spec - disk_mgr = self.session.vim.service_content.virtualDiskManager - - dc_ref = mock.Mock() - vmdk_ds_file_path = mock.Mock() - size_in_kb = 1024 - adapter_type = 'ide' - disk_type = 'thick' - self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb, - adapter_type, disk_type) - - self.assertEqual(volumeops.VirtualDiskAdapterType.IDE, - spec.adapterType) - self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType) - self.assertEqual(size_in_kb, spec.capacityKb) - invoke_api.assert_called_once_with(self.session.vim, - 'CreateVirtualDisk_Task', - disk_mgr, - name=vmdk_ds_file_path, - datacenter=dc_ref, - spec=spec) - self.session.wait_for_task.assert_called_once_with(task) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'create_virtual_disk') - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'delete_file') - def test_create_flat_extent_virtual_disk_descriptor(self, delete_file, - create_virtual_disk): - dc_ref = mock.Mock() - path = mock.Mock() - size_in_kb = 1024 - adapter_type = 'ide' - disk_type = 'thick' - - self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref, - path, - size_in_kb, - adapter_type, - disk_type) - create_virtual_disk.assert_called_once_with( - dc_ref, path.get_descriptor_ds_file_path(), size_in_kb, - adapter_type, disk_type) - delete_file.assert_called_once_with( - path.get_flat_extent_ds_file_path(), dc_ref) - - def test_copy_vmdk_file(self): - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - - disk_mgr = self.session.vim.service_content.virtualDiskManager - src_dc_ref = mock.sentinel.src_dc_ref - src_vmdk_file_path = mock.sentinel.src_vmdk_file_path - dest_dc_ref = mock.sentinel.dest_dc_ref - dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path - self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, - dest_vmdk_file_path, dest_dc_ref) - - invoke_api.assert_called_once_with(self.session.vim, - 'CopyVirtualDisk_Task', - disk_mgr, - sourceName=src_vmdk_file_path, - sourceDatacenter=src_dc_ref, - destName=dest_vmdk_file_path, - destDatacenter=dest_dc_ref, - force=True) - self.session.wait_for_task.assert_called_once_with(task) - - def test_copy_vmdk_file_with_default_dest_datacenter(self): - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - - disk_mgr = self.session.vim.service_content.virtualDiskManager - src_dc_ref = mock.sentinel.src_dc_ref - src_vmdk_file_path = mock.sentinel.src_vmdk_file_path - dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path - self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, - dest_vmdk_file_path) - - invoke_api.assert_called_once_with(self.session.vim, - 'CopyVirtualDisk_Task', - disk_mgr, - sourceName=src_vmdk_file_path, - sourceDatacenter=src_dc_ref, - destName=dest_vmdk_file_path, - destDatacenter=src_dc_ref, - force=True) - self.session.wait_for_task.assert_called_once_with(task) - - def test_move_vmdk_file(self): - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - - disk_mgr = self.session.vim.service_content.virtualDiskManager - src_dc_ref = mock.sentinel.src_dc_ref - src_vmdk_file_path = mock.sentinel.src_vmdk_file_path - dest_dc_ref = mock.sentinel.dest_dc_ref - dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path - self.vops.move_vmdk_file(src_dc_ref, - src_vmdk_file_path, - dest_vmdk_file_path, - dest_dc_ref=dest_dc_ref) - - invoke_api.assert_called_once_with(self.session.vim, - 'MoveVirtualDisk_Task', - disk_mgr, - sourceName=src_vmdk_file_path, - sourceDatacenter=src_dc_ref, - destName=dest_vmdk_file_path, - destDatacenter=dest_dc_ref, - force=True) - self.session.wait_for_task.assert_called_once_with(task) - - def test_delete_vmdk_file(self): - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - disk_mgr = self.session.vim.service_content.virtualDiskManager - dc_ref = self.session.dc_ref - vmdk_file_path = self.session.vmdk_file - self.vops.delete_vmdk_file(vmdk_file_path, dc_ref) - invoke_api.assert_called_once_with(self.session.vim, - 'DeleteVirtualDisk_Task', - disk_mgr, - name=vmdk_file_path, - datacenter=dc_ref) - self.session.wait_for_task.assert_called_once_with(task) - - def test_extend_virtual_disk(self): - """Test volumeops.extend_virtual_disk.""" - task = mock.sentinel.task - invoke_api = self.session.invoke_api - invoke_api.return_value = task - disk_mgr = self.session.vim.service_content.virtualDiskManager - fake_size = 5 - fake_size_in_kb = fake_size * units.Mi - fake_name = 'fake_volume_0000000001' - fake_dc = mock.sentinel.datacenter - self.vops.extend_virtual_disk(fake_size, - fake_name, fake_dc) - invoke_api.assert_called_once_with(self.session.vim, - "ExtendVirtualDisk_Task", - disk_mgr, - name=fake_name, - datacenter=fake_dc, - newCapacityKb=fake_size_in_kb, - eagerZero=False) - self.session.wait_for_task.assert_called_once_with(task) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_all_clusters') - def test_get_cluster_refs(self, get_all_clusters): - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - clusters = {"cls_1": cls_1, "cls_2": cls_2} - get_all_clusters.return_value = clusters - - self.assertEqual({"cls_2": cls_2}, - self.vops.get_cluster_refs(["cls_2"])) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_all_clusters') - def test_get_cluster_refs_with_invalid_cluster(self, get_all_clusters): - cls_1 = mock.sentinel.cls_1 - cls_2 = mock.sentinel.cls_2 - clusters = {"cls_1": cls_1, "cls_2": cls_2} - get_all_clusters.return_value = clusters - - self.assertRaises(vmdk_exceptions.ClusterNotFoundException, - self.vops.get_cluster_refs, - ["cls_1", "cls_3"]) - - def test_get_cluster_hosts(self): - host_1 = mock.sentinel.host_1 - host_2 = mock.sentinel.host_2 - hosts = mock.Mock(ManagedObjectReference=[host_1, host_2]) - self.session.invoke_api.return_value = hosts - - cluster = mock.sentinel.cluster - ret = self.vops.get_cluster_hosts(cluster) - - self.assertEqual([host_1, host_2], ret) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - cluster, - 'host') - - def test_get_cluster_hosts_with_no_host(self): - self.session.invoke_api.return_value = None - - cluster = mock.sentinel.cluster - ret = self.vops.get_cluster_hosts(cluster) - - self.assertEqual([], ret) - self.session.invoke_api.assert_called_once_with(vim_util, - 'get_object_property', - self.session.vim, - cluster, - 'host') - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - 'continue_retrieval', return_value=None) - def test_get_all_clusters(self, continue_retrieval): - prop_1 = mock.Mock(val='test_cluster_1') - cls_1 = mock.Mock(propSet=[prop_1], obj=mock.sentinel.mor_1) - prop_2 = mock.Mock(val='/test_cluster_2') - cls_2 = mock.Mock(propSet=[prop_2], obj=mock.sentinel.mor_2) - - retrieve_result = mock.Mock(objects=[cls_1, cls_2]) - self.session.invoke_api.return_value = retrieve_result - - ret = self.vops._get_all_clusters() - exp = {'test_cluster_1': mock.sentinel.mor_1, - '/test_cluster_2': mock.sentinel.mor_2} - self.assertEqual(exp, ret) - self.session.invoke_api.assert_called_once_with( - vim_util, 'get_objects', self.session.vim, - 'ClusterComputeResource', self.MAX_OBJECTS) - continue_retrieval.assert_called_once_with(retrieve_result) - - def test_get_entity_by_inventory_path(self): - self.session.invoke_api.return_value = mock.sentinel.ref - - path = mock.sentinel.path - ret = self.vops.get_entity_by_inventory_path(path) - self.assertEqual(mock.sentinel.ref, ret) - self.session.invoke_api.assert_called_once_with( - self.session.vim, - "FindByInventoryPath", - self.session.vim.service_content.searchIndex, - inventoryPath=path) - - def test_get_disk_devices(self): - disk_device = mock.Mock() - disk_device.__class__.__name__ = 'VirtualDisk' - - controller_device = mock.Mock() - controller_device.__class__.__name__ = 'VirtualLSILogicController' - - devices = mock.Mock() - devices.__class__.__name__ = "ArrayOfVirtualDevice" - devices.VirtualDevice = [disk_device, controller_device] - self.session.invoke_api.return_value = devices - - vm = mock.sentinel.vm - self.assertEqual([disk_device], self.vops._get_disk_devices(vm)) - self.session.invoke_api.assert_called_once_with( - vim_util, 'get_object_property', self.session.vim, - vm, 'config.hardware.device') - - def _create_disk_device(self, file_name): - backing = mock.Mock(fileName=file_name) - backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' - return mock.Mock(backing=backing) - - @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' - '_get_disk_devices') - def test_get_disk_device(self, get_disk_devices): - dev_1 = self._create_disk_device('[ds1] foo/foo.vmdk') - dev_2 = self._create_disk_device('[ds1] foo/foo_1.vmdk') - get_disk_devices.return_value = [dev_1, dev_2] - - vm = mock.sentinel.vm - self.assertEqual(dev_2, - self.vops.get_disk_device(vm, '[ds1] foo/foo_1.vmdk')) - get_disk_devices.assert_called_once_with(vm) - - -class VirtualDiskPathTest(test.TestCase): - """Unit tests for VirtualDiskPath.""" - - def setUp(self): - super(VirtualDiskPathTest, self).setUp() - self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk") - - def test_get_datastore_file_path(self): - self.assertEqual("[nfs] A/B/disk.vmdk", - self._path.get_datastore_file_path("nfs", - "A/B/disk.vmdk")) - - def test_get_descriptor_file_path(self): - self.assertEqual("A/B/disk.vmdk", - self._path.get_descriptor_file_path()) - - def test_get_descriptor_ds_file_path(self): - self.assertEqual("[nfs] A/B/disk.vmdk", - self._path.get_descriptor_ds_file_path()) - - -class FlatExtentVirtualDiskPathTest(test.TestCase): - """Unit tests for FlatExtentVirtualDiskPath.""" - - def setUp(self): - super(FlatExtentVirtualDiskPathTest, self).setUp() - self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk") - - def test_get_flat_extent_file_path(self): - self.assertEqual("A/B/disk-flat.vmdk", - self._path.get_flat_extent_file_path()) - - def test_get_flat_extent_ds_file_path(self): - self.assertEqual("[nfs] A/B/disk-flat.vmdk", - self._path.get_flat_extent_ds_file_path()) - - -class VirtualDiskTypeTest(test.TestCase): - """Unit tests for VirtualDiskType.""" - - def test_is_valid(self): - self.assertTrue(volumeops.VirtualDiskType.is_valid("thick")) - self.assertTrue(volumeops.VirtualDiskType.is_valid("thin")) - self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick")) - self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated")) - - def test_validate(self): - volumeops.VirtualDiskType.validate("thick") - volumeops.VirtualDiskType.validate("thin") - volumeops.VirtualDiskType.validate("eagerZeroedThick") - self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, - volumeops.VirtualDiskType.validate, - "preallocated") - - def test_get_virtual_disk_type(self): - self.assertEqual("preallocated", - volumeops.VirtualDiskType.get_virtual_disk_type( - "thick")) - self.assertEqual("thin", - volumeops.VirtualDiskType.get_virtual_disk_type( - "thin")) - self.assertEqual("eagerZeroedThick", - volumeops.VirtualDiskType.get_virtual_disk_type( - "eagerZeroedThick")) - self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, - volumeops.VirtualDiskType.get_virtual_disk_type, - "preallocated") - - -class VirtualDiskAdapterTypeTest(test.TestCase): - """Unit tests for VirtualDiskAdapterType.""" - - def test_is_valid(self): - self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic")) - self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic")) - self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid( - "lsiLogicsas")) - self.assertTrue( - volumeops.VirtualDiskAdapterType.is_valid("paraVirtual")) - self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide")) - self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi")) - - def test_validate(self): - volumeops.VirtualDiskAdapterType.validate("lsiLogic") - volumeops.VirtualDiskAdapterType.validate("busLogic") - volumeops.VirtualDiskAdapterType.validate("lsiLogicsas") - volumeops.VirtualDiskAdapterType.validate("paraVirtual") - volumeops.VirtualDiskAdapterType.validate("ide") - self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, - volumeops.VirtualDiskAdapterType.validate, - "pvscsi") - - def test_get_adapter_type(self): - self.assertEqual("lsiLogic", - volumeops.VirtualDiskAdapterType.get_adapter_type( - "lsiLogic")) - self.assertEqual("busLogic", - volumeops.VirtualDiskAdapterType.get_adapter_type( - "busLogic")) - self.assertEqual("lsiLogic", - volumeops.VirtualDiskAdapterType.get_adapter_type( - "lsiLogicsas")) - self.assertEqual("lsiLogic", - volumeops.VirtualDiskAdapterType.get_adapter_type( - "paraVirtual")) - self.assertEqual("ide", - volumeops.VirtualDiskAdapterType.get_adapter_type( - "ide")) - self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, - volumeops.VirtualDiskAdapterType.get_adapter_type, - "pvscsi") - - -class ControllerTypeTest(test.TestCase): - """Unit tests for ControllerType.""" - - def test_get_controller_type(self): - self.assertEqual(volumeops.ControllerType.LSI_LOGIC, - volumeops.ControllerType.get_controller_type( - 'lsiLogic')) - self.assertEqual(volumeops.ControllerType.BUS_LOGIC, - volumeops.ControllerType.get_controller_type( - 'busLogic')) - self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS, - volumeops.ControllerType.get_controller_type( - 'lsiLogicsas')) - self.assertEqual(volumeops.ControllerType.PARA_VIRTUAL, - volumeops.ControllerType.get_controller_type( - 'paraVirtual')) - self.assertEqual(volumeops.ControllerType.IDE, - volumeops.ControllerType.get_controller_type( - 'ide')) - self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, - volumeops.ControllerType.get_controller_type, - 'invalid_type') - - def test_is_scsi_controller(self): - self.assertTrue(volumeops.ControllerType.is_scsi_controller( - volumeops.ControllerType.LSI_LOGIC)) - self.assertTrue(volumeops.ControllerType.is_scsi_controller( - volumeops.ControllerType.BUS_LOGIC)) - self.assertTrue(volumeops.ControllerType.is_scsi_controller( - volumeops.ControllerType.LSI_LOGIC_SAS)) - self.assertTrue(volumeops.ControllerType.is_scsi_controller( - volumeops.ControllerType.PARA_VIRTUAL)) - self.assertFalse(volumeops.ControllerType.is_scsi_controller( - volumeops.ControllerType.IDE)) diff --git a/cinder/tests/unit/volume/flows/__init__.py b/cinder/tests/unit/volume/flows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/volume/flows/fake_volume_api.py b/cinder/tests/unit/volume/flows/fake_volume_api.py deleted file mode 100644 index b944761ad..000000000 --- a/cinder/tests/unit/volume/flows/fake_volume_api.py +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class FakeVolumeAPI(object): - def __init__(self, expected_spec, test_inst): - self.expected_spec = expected_spec - self.test_inst = test_inst - - def create_volume(self, ctxt, volume, host, - request_spec, filter_properties, - allow_reschedule=True, - snapshot_id=None, image_id=None, - source_volid=None, - source_replicaid=None): - - self.test_inst.assertEqual(self.expected_spec, request_spec) - self.test_inst.assertEqual(request_spec['source_volid'], source_volid) - self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id) - self.test_inst.assertEqual(request_spec['image_id'], image_id) - self.test_inst.assertEqual(request_spec['source_replicaid'], - source_replicaid) - - -class FakeSchedulerRpcAPI(object): - def __init__(self, expected_spec, test_inst): - self.expected_spec = expected_spec - self.test_inst = test_inst - - def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None, - request_spec=None, filter_properties=None): - - self.test_inst.assertEqual(self.expected_spec, request_spec) - - def manage_existing(self, context, volume, request_spec=None): - self.test_inst.assertEqual(self.expected_spec, request_spec) - - -class FakeDb(object): - - def volume_get(self, *args, **kwargs): - return {'host': 'barf'} - - def volume_update(self, *args, **kwargs): - return {'host': 'farb'} - - def snapshot_get(self, *args, **kwargs): - return {'volume_id': 1} - - def consistencygroup_get(self, *args, **kwargs): - return {'consistencygroup_id': 1} diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py deleted file mode 100644 index bb451b023..000000000 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ /dev/null @@ -1,1609 +0,0 @@ -# Copyright 2013 Canonical Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Tests for create_volume TaskFlow """ - -import sys - -import ddt -import mock - -from castellan.tests.unit.key_manager import mock_key_manager -from oslo_utils import imageutils - -from cinder import context -from cinder import exception -from cinder.message import message_field -from cinder import test -from cinder.tests.unit.consistencygroup import fake_consistencygroup -from cinder.tests.unit import fake_constants as fakes -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils -from cinder.tests.unit.volume.flows import fake_volume_api -from cinder.volume.flows.api import create_volume -from cinder.volume.flows.manager import create_volume as create_volume_manager - - -@ddt.ddt -class CreateVolumeFlowTestCase(test.TestCase): - - def time_inc(self): - self.counter += 1 - return self.counter - - def setUp(self): - super(CreateVolumeFlowTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - # Ensure that time.time() always returns more than the last time it was - # called to avoid div by zero errors. - self.counter = float(0) - self.get_extra_specs = self.patch( - 'cinder.volume.volume_types.get_volume_type_extra_specs', - return_value={}) - - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.volume.utils.extract_host') - @mock.patch('time.time') - @mock.patch('cinder.objects.ConsistencyGroup.get_by_id') - def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time, - mock_extract_host, volume_get_by_id): - mock_time.side_effect = self.time_inc - volume = fake_volume.fake_volume_obj(self.ctxt) - volume_get_by_id.return_value = volume - props = {} - cg_obj = (fake_consistencygroup. - fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1, - host='host@backend#pool')) - consistencygroup_get_by_id.return_value = cg_obj - spec = {'volume_id': None, - 'volume': None, - 'source_volid': None, - 'snapshot_id': None, - 'image_id': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, } - - # Fake objects assert specs - task = create_volume.VolumeCastTask( - fake_volume_api.FakeSchedulerRpcAPI(spec, self), - fake_volume_api.FakeVolumeAPI(spec, self), - fake_volume_api.FakeDb()) - - task._cast_create_volume(self.ctxt, spec, props) - - spec = {'volume_id': volume.id, - 'volume': volume, - 'source_volid': 2, - 'snapshot_id': 3, - 'image_id': 4, - 'source_replicaid': 5, - 'consistencygroup_id': 5, - 'cgsnapshot_id': None, - 'group_id': None, } - - # Fake objects assert specs - task = create_volume.VolumeCastTask( - fake_volume_api.FakeSchedulerRpcAPI(spec, self), - fake_volume_api.FakeVolumeAPI(spec, self), - fake_volume_api.FakeDb()) - - task._cast_create_volume(self.ctxt, spec, props) - consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5) - mock_extract_host.assert_called_once_with('host@backend#pool') - - @ddt.data(('enabled', {'replication_enabled': ' True'}), - ('disabled', {'replication_enabled': ' False'}), - ('disabled', {})) - @ddt.unpack - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_encryption_key_id', mock.Mock()) - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - def test_extract_volume_request_replication_status(self, - replication_status, - extra_specs, - fake_get_qos): - self.get_extra_specs.return_value = extra_specs - fake_image_service = fake_image.FakeImageService() - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask(fake_image_service, - {'nova'}) - - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=None, - source_volume=None, - availability_zone='nova', - volume_type={'id': fakes.VOLUME_TYPE_ID, - 'size': 1}, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - self.assertEqual(replication_status, result['replication_status'], - extra_specs) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_encryption_key_id') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - def test_extract_volume_request_from_image_encrypted( - self, - fake_get_qos, - fake_get_encryption_key, - fake_get_volume_type_id, - fake_is_encrypted): - - fake_image_service = fake_image.FakeImageService() - image_id = 1 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = True - fake_get_volume_type_id.return_value = fakes.VOLUME_TYPE_ID - task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - fake_get_encryption_key.assert_called_once_with( - fake_key_manager, self.ctxt, fakes.VOLUME_TYPE_ID, - None, None, image_meta) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_volume_request_from_image( - self, - fake_get_type_id, - fake_get_qos, - fake_is_encrypted): - - fake_image_service = fake_image.FakeImageService() - image_id = 2 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - volume_type = 'type1' - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=volume_type, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': volume_type, - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_availability_zone_without_fallback( - self, - fake_get_type_id, - fake_get_qos, - fake_is_encrypted): - fake_image_service = fake_image.FakeImageService() - image_id = 3 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - volume_type = 'type1' - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_qos.return_value = {'qos_specs': None} - self.assertRaises(exception.InvalidInput, - task.execute, - self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='notnova', - volume_type=volume_type, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_availability_zone_with_fallback( - self, - fake_get_type_id, - fake_get_qos, - fake_is_encrypted): - - self.override_config('allow_availability_zone_fallback', True) - - fake_image_service = fake_image.FakeImageService() - image_id = 4 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - volume_type = 'type1' - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='does_not_exist', - volume_type=volume_type, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': volume_type, - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_volume_request_task_with_large_volume_size( - self, - fake_get_type_id, - fake_get_qos, - fake_is_encrypted): - fake_image_service = fake_image.FakeImageService() - image_id = 11 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - volume_type = 'type1' - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=(sys.maxsize + 1), - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone=None, - volume_type=volume_type, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': (sys.maxsize + 1), - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': volume_type, - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'replication_status': 'disabled', - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, } - self.assertEqual(expected_result, result) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_volume_request_from_image_with_qos_specs( - self, - fake_get_type_id, - fake_get_qos, - fake_is_encrypted): - - fake_image_service = fake_image.FakeImageService() - image_id = 5 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - volume_type = 'type1' - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_qos_spec = {'specs': {'fake_key': 'fake'}} - fake_get_qos.return_value = {'qos_specs': fake_qos_spec} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=volume_type, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': volume_type, - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': {'fake_key': 'fake'}, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.volume_types.get_default_volume_type') - @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_image_volume_type_from_image( - self, - fake_get_type_id, - fake_get_vol_type, - fake_get_def_vol_type, - fake_get_qos, - fake_is_encrypted): - - image_volume_type = 'type_from_image' - fake_image_service = fake_image.FakeImageService() - image_id = 6 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - image_meta['properties'] = {} - image_meta['properties']['cinder_img_volume_type'] = image_volume_type - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_vol_type.return_value = image_volume_type - fake_get_def_vol_type.return_value = 'fake_vol_type' - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': image_volume_type, - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.db.volume_type_get_by_name') - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.volume_types.get_default_volume_type') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_image_volume_type_from_image_invalid_type( - self, - fake_get_type_id, - fake_get_def_vol_type, - fake_get_qos, - fake_is_encrypted, - fake_db_get_vol_type): - - image_volume_type = 'invalid' - fake_image_service = fake_image.FakeImageService() - image_id = 7 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - image_meta['properties'] = {} - image_meta['properties']['cinder_img_volume_type'] = image_volume_type - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_def_vol_type.return_value = 'fake_vol_type' - fake_db_get_vol_type.side_effect = ( - exception.VolumeTypeNotFoundByName(volume_type_name='invalid')) - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': 'fake_vol_type', - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.db.volume_type_get_by_name') - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.volume_types.get_default_volume_type') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - @ddt.data((8, None), (9, {'cinder_img_volume_type': None})) - @ddt.unpack - def test_extract_image_volume_type_from_image_properties_error( - self, - image_id, - fake_img_properties, - fake_get_type_id, - fake_get_def_vol_type, - fake_get_qos, - fake_is_encrypted, - fake_db_get_vol_type): - - fake_image_service = fake_image.FakeImageService() - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - image_meta['properties'] = fake_img_properties - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_def_vol_type.return_value = 'fake_vol_type' - fake_get_qos.return_value = {'qos_specs': None} - result = task.execute(self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - expected_result = {'size': 1, - 'snapshot_id': None, - 'source_volid': None, - 'availability_zone': 'nova', - 'volume_type': 'fake_vol_type', - 'volume_type_id': 1, - 'encryption_key_id': None, - 'qos_specs': None, - 'source_replicaid': None, - 'consistencygroup_id': None, - 'cgsnapshot_id': None, - 'group_id': None, - 'replication_status': 'disabled'} - self.assertEqual(expected_result, result) - - @mock.patch('cinder.db.volume_type_get_by_name') - @mock.patch('cinder.volume.volume_types.is_encrypted') - @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') - @mock.patch('cinder.volume.volume_types.get_default_volume_type') - @mock.patch('cinder.volume.flows.api.create_volume.' - 'ExtractVolumeRequestTask.' - '_get_volume_type_id') - def test_extract_image_volume_type_from_image_invalid_input( - self, - fake_get_type_id, - fake_get_def_vol_type, - fake_get_qos, - fake_is_encrypted, - fake_db_get_vol_type): - - fake_image_service = fake_image.FakeImageService() - image_id = 10 - image_meta = {} - image_meta['id'] = image_id - image_meta['status'] = 'inactive' - fake_image_service.create(self.ctxt, image_meta) - fake_key_manager = mock_key_manager.MockKeyManager() - - task = create_volume.ExtractVolumeRequestTask( - fake_image_service, - {'nova'}) - - fake_is_encrypted.return_value = False - fake_get_type_id.return_value = 1 - fake_get_def_vol_type.return_value = 'fake_vol_type' - fake_get_qos.return_value = {'qos_specs': None} - - self.assertRaises(exception.InvalidInput, - task.execute, - self.ctxt, - size=1, - snapshot=None, - image_id=image_id, - source_volume=None, - availability_zone='nova', - volume_type=None, - metadata=None, - key_manager=fake_key_manager, - source_replica=None, - consistencygroup=None, - cgsnapshot=None, - group=None) - - -@ddt.ddt -class CreateVolumeFlowManagerTestCase(test.TestCase): - - def setUp(self): - super(CreateVolumeFlowManagerTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_cleanup_cg_in_volume') - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_handle_bootable_volume_glance_meta') - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_create_from_snapshot(self, snapshot_get_by_id, volume_get_by_id, - handle_bootable, cleanup_cg): - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_volume_manager = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - fake_volume_manager, fake_db, fake_driver) - volume_db = {'bootable': True} - volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) - snapshot_get_by_id.return_value = snapshot_obj - volume_get_by_id.return_value = volume_obj - - fake_manager._create_from_snapshot(self.ctxt, volume_obj, - snapshot_obj.id) - fake_driver.create_volume_from_snapshot.assert_called_once_with( - volume_obj, snapshot_obj) - handle_bootable.assert_called_once_with(self.ctxt, volume_obj, - snapshot_id=snapshot_obj.id) - cleanup_cg.assert_called_once_with(volume_obj) - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_cleanup_cg_in_volume') - @mock.patch('cinder.objects.Snapshot.get_by_id') - def test_create_from_snapshot_update_failure(self, snapshot_get_by_id, - mock_cleanup_cg): - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_volume_manager = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - fake_volume_manager, fake_db, fake_driver) - volume_obj = fake_volume.fake_volume_obj(self.ctxt) - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) - snapshot_get_by_id.return_value = snapshot_obj - fake_db.volume_get.side_effect = exception.CinderException - - self.assertRaises(exception.MetadataUpdateFailure, - fake_manager._create_from_snapshot, self.ctxt, - volume_obj, snapshot_obj.id) - fake_driver.create_volume_from_snapshot.assert_called_once_with( - volume_obj, snapshot_obj) - mock_cleanup_cg.assert_called_once_with(volume_obj) - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_cleanup_cg_in_volume') - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_handle_bootable_volume_glance_meta') - @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_virtual_size') - def test_create_encrypted_volume_from_image(self, - mock_check_size, - mock_qemu_img, - mock_fetch_img, - mock_handle_bootable, - mock_cleanup_cg): - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_volume_manager = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - fake_volume_manager, fake_db, fake_driver) - volume = fake_volume.fake_volume_obj( - self.ctxt, - encryption_key_id=fakes.ENCRYPTION_KEY_ID, - host='host@backend#pool') - - fake_image_service = fake_image.FakeImageService() - image_meta = {} - image_id = fakes.IMAGE_ID - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - image_location = 'abc' - - fake_db.volume_update.return_value = volume - fake_manager._create_from_image(self.ctxt, volume, - image_location, image_id, - image_meta, fake_image_service) - - fake_driver.create_volume.assert_called_once_with(volume) - fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( - self.ctxt, volume, fake_image_service, image_id) - mock_handle_bootable.assert_called_once_with(self.ctxt, volume, - image_id=image_id, - image_meta=image_meta) - mock_cleanup_cg.assert_called_once_with(volume) - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_cleanup_cg_in_volume') - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_handle_bootable_volume_glance_meta') - @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_virtual_size') - def test_create_encrypted_volume_from_enc_image(self, - mock_check_size, - mock_qemu_img, - mock_fetch_img, - mock_handle_bootable, - mock_cleanup_cg): - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_volume_manager = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - fake_volume_manager, fake_db, fake_driver) - volume = fake_volume.fake_volume_obj( - self.ctxt, - encryption_key_id=fakes.ENCRYPTION_KEY_ID, - host='host@backend#pool') - - fake_image_service = fake_image.FakeImageService() - image_meta = {} - image_id = fakes.IMAGE_ID - image_meta['id'] = image_id - image_meta['status'] = 'active' - image_meta['size'] = 1 - image_meta['cinder_encryption_key_id'] = \ - '00000000-0000-0000-0000-000000000000' - image_location = 'abc' - - fake_db.volume_update.return_value = volume - fake_manager._create_from_image(self.ctxt, volume, - image_location, image_id, - image_meta, fake_image_service) - - fake_driver.create_volume.assert_called_once_with(volume) - fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( - self.ctxt, volume, fake_image_service, image_id) - mock_handle_bootable.assert_called_once_with(self.ctxt, volume, - image_id=image_id, - image_meta=image_meta) - mock_cleanup_cg.assert_called_once_with(volume) - - @ddt.data(True, False) - def test__copy_image_to_volume(self, is_encrypted): - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_volume_manager = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - fake_volume_manager, fake_db, fake_driver) - key = fakes.ENCRYPTION_KEY_ID if is_encrypted else None - volume = fake_volume.fake_volume_obj( - self.ctxt, - encryption_key_id=key) - - fake_image_service = fake_image.FakeImageService() - image_id = fakes.IMAGE_ID - image_meta = {'id': image_id} - image_location = 'abc' - - fake_manager._copy_image_to_volume(self.ctxt, volume, image_meta, - image_location, fake_image_service) - if is_encrypted: - fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( - self.ctxt, volume, fake_image_service, image_id) - else: - fake_driver.copy_image_to_volume.assert_called_once_with( - self.ctxt, volume, fake_image_service, image_id) - - -class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): - - def setUp(self): - super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp() - self.ctxt = context.get_admin_context() - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_cleanup_cg_in_volume') - @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_handle_bootable_volume_glance_meta') - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_from_image_volume(self, mock_qemu_info, handle_bootable, - mock_fetch_img, mock_cleanup_cg, - format='raw', owner=None, - location=True): - self.flags(allowed_direct_url_schemes=['cinder']) - mock_fetch_img.return_value = mock.MagicMock( - spec=utils.get_file_spec()) - fake_db = mock.MagicMock() - fake_driver = mock.MagicMock() - fake_manager = create_volume_manager.CreateVolumeFromSpecTask( - mock.MagicMock(), fake_db, fake_driver) - fake_image_service = fake_image.FakeImageService() - - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool') - image_volume = fake_volume.fake_volume_obj(self.ctxt, - volume_metadata={}) - image_id = fakes.IMAGE_ID - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - url = 'cinder://%s' % image_volume['id'] - image_location = None - if location: - image_location = (url, [{'url': url, 'metadata': {}}]) - image_meta = {'id': image_id, - 'container_format': 'bare', - 'disk_format': format, - 'size': 1024, - 'owner': owner or self.ctxt.project_id, - 'virtual_size': None, - 'cinder_encryption_key_id': None} - - fake_driver.clone_image.return_value = (None, False) - fake_db.volume_get_all_by_host.return_value = [image_volume] - - fake_manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - fake_image_service) - if format is 'raw' and not owner and location: - fake_driver.create_cloned_volume.assert_called_once_with( - volume, image_volume) - handle_bootable.assert_called_once_with(self.ctxt, volume, - image_id=image_id, - image_meta=image_meta) - else: - self.assertFalse(fake_driver.create_cloned_volume.called) - mock_cleanup_cg.assert_called_once_with(volume) - - def test_create_from_image_volume_in_qcow2_format(self): - self.test_create_from_image_volume(format='qcow2') - - def test_create_from_image_volume_of_other_owner(self): - self.test_create_from_image_volume(owner='fake-owner') - - def test_create_from_image_volume_without_location(self): - self.test_create_from_image_volume(location=False) - - -@ddt.ddt -@mock.patch('cinder.image.image_utils.TemporaryImages.fetch') -@mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_handle_bootable_volume_glance_meta') -@mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_create_from_source_volume') -@mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.' - '_create_from_image_download') -@mock.patch('cinder.context.get_internal_tenant_context') -class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): - - def setUp(self): - super(CreateVolumeFlowManagerImageCacheTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.mock_db = mock.MagicMock() - self.mock_driver = mock.MagicMock() - self.mock_cache = mock.MagicMock() - self.mock_image_service = mock.MagicMock() - self.mock_volume_manager = mock.MagicMock() - - self.internal_context = self.ctxt - self.internal_context.user_id = 'abc123' - self.internal_context.project_id = 'def456' - - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_clone_image_and_skip_cache( - self, mock_check_space, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - self.mock_driver.clone_image.return_value = (None, True) - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool') - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': '1073741824', 'size': 1073741824} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - # Make sure check_available_space is always called - self.assertTrue(mock_check_space.called) - - # Make sure clone_image is always called even if the cache is enabled - self.assertTrue(self.mock_driver.clone_image.called) - - # Create from source shouldn't happen if clone_image succeeds - self.assertFalse(mock_create_from_src.called) - - # The image download should not happen if clone_image succeeds - self.assertFalse(mock_create_from_img_dl.called) - - mock_handle_bootable.assert_called_once_with( - self.ctxt, - volume, - image_id=image_id, - image_meta=image_meta - ) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_cannot_use_cache( - self, mock_qemu_info, mock_check_space, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - mock_get_internal_context.return_value = None - self.mock_driver.clone_image.return_value = (None, False) - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool') - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'id': image_id, - 'virtual_size': '1073741824', - 'size': 1073741824} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - # Make sure check_available_space is always called - self.assertTrue(mock_check_space.called) - - # Make sure clone_image is always called - self.assertTrue(self.mock_driver.clone_image.called) - - # Create from source shouldn't happen if cache cannot be used. - self.assertFalse(mock_create_from_src.called) - - # The image download should happen if clone fails and we can't use the - # image-volume cache. - mock_create_from_img_dl.assert_called_once_with( - self.ctxt, - volume, - image_location, - image_meta, - self.mock_image_service - ) - - # This should not attempt to use a minimal size volume - self.assertFalse(self.mock_db.volume_update.called) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) - - mock_handle_bootable.assert_called_once_with( - self.ctxt, - volume, - image_id=image_id, - image_meta=image_meta - ) - - @ddt.data( - NotImplementedError('Driver does not support clone'), - exception.CinderException('Error during cloning')) - def test_create_from_image_clone_failure( - self, effect, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - mock_get_internal_context.return_value = None - volume = fake_volume.fake_volume_obj(self.ctxt) - mock_create_from_src.side_effect = effect - - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': '1073741824'} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - model, result = manager._create_from_image_cache(self.ctxt, - None, - volume, - image_id, - image_meta) - - self.assertIsNone(model) - self.assertFalse(result) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.db.volume_update') - def test_create_from_image_extend_failure( - self, mock_volume_update, mock_qemu_info, mock_check_size, - mock_get_internal_context, mock_create_from_img_dl, - mock_create_from_src, mock_handle_bootable, mock_fetch_img): - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - self.mock_driver.extend_volume.side_effect = ( - exception.CinderException('Error during extending')) - - volume_size = 2 - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool', - size=volume_size) - - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': '1073741824', 'size': '1073741824'} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises(exception.CinderException, - manager._create_from_image, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 1}) - self.assertEqual(volume_size, volume.size) - - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_bigger_size( - self, mock_check_space, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - volume = fake_volume.fake_volume_obj(self.ctxt) - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': '2147483648', 'size': 2147483648} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises( - exception.ImageUnacceptable, - manager._create_from_image, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - def test_create_from_image_cache_hit( - self, mock_get_internal_context, mock_create_from_img_dl, - mock_create_from_src, mock_handle_bootable, mock_fetch_img): - self.mock_driver.clone_image.return_value = (None, False) - image_volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' - self.mock_cache.get_entry.return_value = { - 'volume_id': image_volume_id - } - - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool') - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': None, 'size': 1024} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - # Make sure clone_image is always called even if the cache is enabled - self.assertTrue(self.mock_driver.clone_image.called) - - # For a cache hit it should only clone from the image-volume - mock_create_from_src.assert_called_once_with(self.ctxt, - volume, - image_volume_id) - - # The image download should not happen when we get a cache hit - self.assertFalse(mock_create_from_img_dl.called) - - mock_handle_bootable.assert_called_once_with( - self.ctxt, - volume, - image_id=image_id, - image_meta=image_meta - ) - - @mock.patch('cinder.db.volume_update') - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_cache_miss( - self, mock_check_size, mock_qemu_info, mock_volume_get, - mock_volume_update, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - mock_get_internal_context.return_value = self.ctxt - mock_fetch_img.return_value = mock.MagicMock( - spec=utils.get_file_spec()) - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '2147483648' - mock_qemu_info.return_value = image_info - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - - volume = fake_volume.fake_volume_obj(self.ctxt, size=10, - host='foo@bar#pool') - mock_volume_get.return_value = volume - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'id': image_id, - 'size': 2000000} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - # Make sure clone_image is always called - self.assertTrue(self.mock_driver.clone_image.called) - - # The image download should happen if clone fails and - # we get a cache miss - mock_create_from_img_dl.assert_called_once_with( - self.ctxt, - mock.ANY, - image_location, - image_meta, - self.mock_image_service - ) - - # The volume size should be reduced to virtual_size and then put back - mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) - mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) - - # Make sure created a new cache entry - (self.mock_volume_manager. - _create_image_cache_volume_entry.assert_called_once_with( - self.ctxt, volume, image_id, image_meta)) - - mock_handle_bootable.assert_called_once_with( - self.ctxt, - volume, - image_id=image_id, - image_meta=image_meta - ) - - @mock.patch('cinder.db.volume_update') - @mock.patch('cinder.objects.Volume.get_by_id') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_cache_miss_error_downloading( - self, mock_check_size, mock_qemu_info, mock_volume_get, - mock_volume_update, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - mock_fetch_img.return_value = mock.MagicMock() - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '2147483648' - mock_qemu_info.return_value = image_info - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - - volume = fake_volume.fake_volume_obj(self.ctxt, size=10, - host='foo@bar#pool') - mock_volume_get.return_value = volume - - mock_create_from_img_dl.side_effect = exception.CinderException() - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = mock.MagicMock() - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises( - exception.CinderException, - manager._create_from_image, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service - ) - - # Make sure clone_image is always called - self.assertTrue(self.mock_driver.clone_image.called) - - # The image download should happen if clone fails and - # we get a cache miss - mock_create_from_img_dl.assert_called_once_with( - self.ctxt, - mock.ANY, - image_location, - image_meta, - self.mock_image_service - ) - - # The volume size should be reduced to virtual_size and then put back, - # especially if there is an exception while creating the volume. - self.assertEqual(2, mock_volume_update.call_count) - mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) - mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.image.image_utils.check_available_space') - def test_create_from_image_no_internal_context( - self, mock_chk_space, mock_qemu_info, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - self.mock_driver.clone_image.return_value = (None, False) - mock_get_internal_context.return_value = None - volume = fake_volume.fake_volume_obj(self.ctxt, - host='host@backend#pool') - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = {'virtual_size': '1073741824', 'size': 1073741824} - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - manager._create_from_image(self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service) - - # Make sure check_available_space is always called - self.assertTrue(mock_chk_space.called) - - # Make sure clone_image is always called - self.assertTrue(self.mock_driver.clone_image.called) - - # Create from source shouldn't happen if cache cannot be used. - self.assertFalse(mock_create_from_src.called) - - # The image download should happen if clone fails and we can't use the - # image-volume cache due to not having an internal context available. - mock_create_from_img_dl.assert_called_once_with( - self.ctxt, - volume, - image_location, - image_meta, - self.mock_image_service - ) - - # This should not attempt to use a minimal size volume - self.assertFalse(self.mock_db.volume_update.called) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) - - mock_handle_bootable.assert_called_once_with( - self.ctxt, - volume, - image_id=image_id, - image_meta=image_meta - ) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_from_image_cache_miss_error_size_invalid( - self, mock_qemu_info, mock_check_space, mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - mock_fetch_img.return_value = mock.MagicMock() - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '2147483648' - mock_qemu_info.return_value = image_info - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - - volume = fake_volume.fake_volume_obj(self.ctxt, size=1, - host='foo@bar#pool') - image_volume = fake_volume.fake_db_volume(size=2) - self.mock_db.volume_create.return_value = image_volume - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = mock.MagicMock() - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises( - exception.ImageUnacceptable, - manager._create_from_image, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service - ) - - # The volume size should NOT be changed when in this case - self.assertFalse(self.mock_db.volume_update.called) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.message.api.API.create') - def test_create_from_image_insufficient_space( - self, mock_message_create, mock_qemu_info, mock_check_space, - mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '2147483648' - mock_qemu_info.return_value = image_info - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - - volume = fake_volume.fake_volume_obj(self.ctxt, size=1, - host='foo@bar#pool') - image_volume = fake_volume.fake_db_volume(size=2) - self.mock_db.volume_create.return_value = image_volume - - image_location = 'someImageLocationStr' - image_id = fakes.IMAGE_ID - image_meta = mock.MagicMock() - mock_check_space.side_effect = exception.ImageTooBig( - image_id=image_id, reason="fake") - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises( - exception.ImageTooBig, - manager._create_from_image, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service - ) - - mock_message_create.assert_called_once_with( - self.ctxt, message_field.Action.COPY_IMAGE_TO_VOLUME, - resource_uuid=volume.id, - detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, - exception=mock.ANY) - - # The volume size should NOT be changed when in this case - self.assertFalse(self.mock_db.volume_update.called) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) - - @mock.patch('cinder.image.image_utils.check_available_space') - @mock.patch('cinder.image.image_utils.qemu_img_info') - @mock.patch('cinder.message.api.API.create') - def test_create_from_image_cache_insufficient_size( - self, mock_message_create, mock_qemu_info, mock_check_space, - mock_get_internal_context, - mock_create_from_img_dl, mock_create_from_src, - mock_handle_bootable, mock_fetch_img): - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - self.mock_driver.clone_image.return_value = (None, False) - self.mock_cache.get_entry.return_value = None - volume = fake_volume.fake_volume_obj(self.ctxt, size=1, - host='foo@bar#pool') - image_volume = fake_volume.fake_db_volume(size=2) - self.mock_db.volume_create.return_value = image_volume - image_id = fakes.IMAGE_ID - mock_create_from_img_dl.side_effect = exception.ImageTooBig( - image_id=image_id, reason="fake") - - image_location = 'someImageLocationStr' - image_meta = mock.MagicMock() - - manager = create_volume_manager.CreateVolumeFromSpecTask( - self.mock_volume_manager, - self.mock_db, - self.mock_driver, - image_volume_cache=self.mock_cache - ) - - self.assertRaises( - exception.ImageTooBig, - manager._create_from_image_cache_or_download, - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service - ) - - mock_message_create.assert_called_once_with( - self.ctxt, message_field.Action.COPY_IMAGE_TO_VOLUME, - resource_uuid=volume.id, - detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, - exception=mock.ANY) - - # The volume size should NOT be changed when in this case - self.assertFalse(self.mock_db.volume_update.called) - - # Make sure we didn't try and create a cache entry - self.assertFalse(self.mock_cache.ensure_space.called) - self.assertFalse(self.mock_cache.create_cache_entry.called) diff --git a/cinder/tests/unit/volume/flows/test_manage_snapshot_flow.py b/cinder/tests/unit/volume/flows/test_manage_snapshot_flow.py deleted file mode 100644 index a2a1af110..000000000 --- a/cinder/tests/unit/volume/flows/test_manage_snapshot_flow.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2017 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Tests for manage_existing_snapshot TaskFlow.""" - -# TODO(mdovgal): add tests for other TaskFlow cases - -import mock - -from cinder import context -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume.flows.manager import manage_existing_snapshot as manager - - -class ManageSnapshotFlowTestCase(test.TestCase): - def setUp(self): - super(ManageSnapshotFlowTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - @mock.patch('cinder.objects.snapshot.Snapshot.get_by_id') - def test_manage_snapshot_after_volume_extending(self, _get_by_id): - """Test checks snapshot's volume_size value after it is managed.""" - fake_size = 3 - fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, - volume_size=fake_size) - fake_snap.save = mock.MagicMock() - _get_by_id.return_value = fake_snap - - real_size = 1 - mock_db = mock.MagicMock() - mock_driver = mock.MagicMock() - mock_manage_existing_ref = mock.MagicMock() - mock_driver.manage_existing_snapshot.return_value = {} - - task = manager.ManageExistingTask(mock_db, mock_driver) - result = task.execute(self.ctxt, fake_snap, mock_manage_existing_ref, - real_size) - snap_after_manage = result['snapshot'] - # assure value is equal that size, that we want - self.assertEqual(real_size, snap_after_manage['volume_size']) - - @mock.patch('cinder.quota.QuotaEngine.reserve') - @mock.patch('cinder.db.sqlalchemy.api.volume_type_get') - @mock.patch('cinder.objects.volume.Volume.get_by_id') - def test_quota_reservation_task(self, mock_get_vol_by_id, mock_type_get, - mock_quota_reserve): - fake_size = 1 - fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, - volume_size=fake_size) - fake_snap.save = mock.MagicMock() - fake_vol = fake_volume.fake_volume_obj( - self.ctxt, id=fake.VOLUME_ID, volume_type_id=fake.VOLUME_TYPE_ID) - mock_get_vol_by_id.return_value = fake_vol - mock_type_get.return_value = {'name': 'fake_type_name'} - - task = manager.QuotaReserveTask() - task.execute(self.ctxt, fake_size, fake_snap, {}) - - reserve_opts = {'gigabytes': 1, 'snapshots': 1, - 'gigabytes_fake_type_name': 1, - 'snapshots_fake_type_name': 1} - mock_quota_reserve.assert_called_once_with(self.ctxt, **reserve_opts) diff --git a/cinder/tests/unit/volume/flows/test_manage_volume_flow.py b/cinder/tests/unit/volume/flows/test_manage_volume_flow.py deleted file mode 100644 index bf4f75d9b..000000000 --- a/cinder/tests/unit/volume/flows/test_manage_volume_flow.py +++ /dev/null @@ -1,157 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" Tests for manage_existing TaskFlow """ - -import inspect -import mock -import taskflow.engines - -from cinder import context -from cinder import test -from cinder.tests.unit import fake_constants as fakes -from cinder.tests.unit import fake_volume -from cinder.tests.unit.volume.flows import fake_volume_api -from cinder.volume.flows.api import manage_existing -from cinder.volume.flows import common as flow_common -from cinder.volume.flows.manager import manage_existing as manager - - -class ManageVolumeFlowTestCase(test.TestCase): - - def setUp(self): - super(ManageVolumeFlowTestCase, self).setUp() - self.ctxt = context.get_admin_context() - self.counter = float(0) - - def test_cast_manage_existing(self): - volume = fake_volume.fake_volume_type_obj(self.ctxt) - - spec = { - 'name': 'name', - 'description': 'description', - 'host': 'host', - 'ref': 'ref', - 'volume_type': 'volume_type', - 'metadata': 'metadata', - 'availability_zone': 'availability_zone', - 'bootable': 'bootable', - 'volume_id': volume.id, - } - - # Fake objects assert specs - task = manage_existing.ManageCastTask( - fake_volume_api.FakeSchedulerRpcAPI(spec, self), - fake_volume_api.FakeDb()) - - create_what = spec.copy() - create_what.update({'volume': volume}) - create_what.pop('volume_id') - task.execute(self.ctxt, **create_what) - - @staticmethod - def _stub_volume_object_get(self): - volume = { - 'id': fakes.VOLUME_ID, - 'volume_type_id': fakes.VOLUME_TYPE_ID, - 'status': 'creating', - 'name': fakes.VOLUME_NAME, - } - return fake_volume.fake_volume_obj(self.ctxt, **volume) - - def test_prepare_for_quota_reserveration_task_execute(self): - mock_db = mock.MagicMock() - mock_driver = mock.MagicMock() - mock_manage_existing_ref = mock.MagicMock() - mock_get_size = self.mock_object( - mock_driver, 'manage_existing_get_size') - mock_get_size.return_value = '5' - - volume_ref = self._stub_volume_object_get(self) - task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) - - result = task.execute(self.ctxt, volume_ref, mock_manage_existing_ref) - - self.assertEqual(volume_ref, result['volume_properties']) - self.assertEqual('5', result['size']) - self.assertEqual(volume_ref.id, result['volume_spec']['volume_id']) - mock_get_size.assert_called_once_with( - volume_ref, mock_manage_existing_ref) - - def test_prepare_for_quota_reservation_task_revert(self): - mock_db = mock.MagicMock() - mock_driver = mock.MagicMock() - mock_result = mock.MagicMock() - mock_flow_failures = mock.MagicMock() - mock_error_out = self.mock_object(flow_common, 'error_out') - volume_ref = self._stub_volume_object_get(self) - task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) - - task.revert(self.ctxt, mock_result, mock_flow_failures, volume_ref) - mock_error_out.assert_called_once_with(volume_ref, - reason='Volume manage failed.', - status='error_managing') - - def test_get_flow(self): - mock_volume_flow = mock.Mock() - mock_linear_flow = self.mock_object(manager.linear_flow, 'Flow') - mock_linear_flow.return_value = mock_volume_flow - mock_taskflow_engine = self.mock_object(taskflow.engines, 'load') - expected_store = { - 'context': mock.sentinel.context, - 'volume': mock.sentinel.volume, - 'manage_existing_ref': mock.sentinel.ref, - 'optional_args': {'is_quota_committed': False}, - } - - manager.get_flow( - mock.sentinel.context, mock.sentinel.db, mock.sentinel.driver, - mock.sentinel.host, mock.sentinel.volume, mock.sentinel.ref) - - mock_linear_flow.assert_called_once_with( - 'volume_manage_existing_manager') - mock_taskflow_engine.assert_called_once_with( - mock_volume_flow, store=expected_store) - - def test_get_flow_volume_flow_tasks(self): - """Test that all expected parameter names exist for added tasks.""" - mock_taskflow_engine = self.mock_object(taskflow.engines, 'load') - mock_taskflow_engine.side_effect = self._verify_volume_flow_tasks - - manager.get_flow( - mock.sentinel.context, mock.sentinel.db, mock.sentinel.driver, - mock.sentinel.host, mock.sentinel.volume, mock.sentinel.ref) - - def _verify_volume_flow_tasks(self, volume_flow, store=None): - param_names = [ - 'context', - 'volume', - 'manage_existing_ref', - 'optional_args', - ] - - provides = {'self'} - revert_provides = ['self', 'result', 'flow_failures'] - for node in volume_flow.iter_nodes(): - task = node[0] - # Subsequent tasks may use parameters defined in a previous task's - # default_provides list. Add these names to the provides set. - if task.default_provides: - for p in task.default_provides: - provides.add(p) - - execute_args = inspect.getargspec(task.execute)[0] - execute_args = [x for x in execute_args if x not in provides] - [self.assertIn(arg, param_names) for arg in execute_args] - - revert_args = inspect.getargspec(task.revert)[0] - revert_args = [x for x in revert_args if x not in revert_provides] - [self.assertIn(arg, param_names) for arg in revert_args] diff --git a/cinder/tests/unit/volume/test_availability_zone.py b/cinder/tests/unit/volume/test_availability_zone.py deleted file mode 100644 index 64bc0b86f..000000000 --- a/cinder/tests/unit/volume/test_availability_zone.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test for volume availability zone.""" - -import datetime -import mock - -from oslo_utils import timeutils - -from cinder.tests.unit import volume as base -import cinder.volume - - -class AvailabilityZoneTestCase(base.BaseVolumeTestCase): - def setUp(self): - super(AvailabilityZoneTestCase, self).setUp() - self.get_all = self.patch( - 'cinder.db.service_get_all', autospec=True, - return_value = [{'availability_zone': 'a', 'disabled': False}]) - - def test_list_availability_zones_cached(self): - azs = self.volume_api.list_availability_zones(enable_cache=True) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) - self.assertTrue(self.get_all.called) - self.volume_api.list_availability_zones(enable_cache=True) - self.assertEqual(1, self.get_all.call_count) - - def test_list_availability_zones_no_cached(self): - azs = self.volume_api.list_availability_zones(enable_cache=False) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNone(self.volume_api.availability_zones_last_fetched) - - self.get_all.return_value[0]['disabled'] = True - azs = self.volume_api.list_availability_zones(enable_cache=False) - self.assertEqual([{"name": 'a', 'available': False}], list(azs)) - self.assertIsNone(self.volume_api.availability_zones_last_fetched) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_list_availability_zones_refetched(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime.utcnow() - azs = self.volume_api.list_availability_zones(enable_cache=True) - self.assertEqual([{"name": 'a', 'available': True}], list(azs)) - self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) - last_fetched = self.volume_api.availability_zones_last_fetched - self.assertTrue(self.get_all.called) - self.volume_api.list_availability_zones(enable_cache=True) - self.assertEqual(1, self.get_all.call_count) - - # The default cache time is 3600, push past that... - mock_utcnow.return_value = (timeutils.utcnow() + - datetime.timedelta(0, 3800)) - self.get_all.return_value = [ - { - 'availability_zone': 'a', - 'disabled': False, - }, - { - 'availability_zone': 'b', - 'disabled': False, - }, - ] - azs = self.volume_api.list_availability_zones(enable_cache=True) - azs = sorted([n['name'] for n in azs]) - self.assertEqual(['a', 'b'], azs) - self.assertEqual(2, self.get_all.call_count) - self.assertGreater(self.volume_api.availability_zones_last_fetched, - last_fetched) - mock_utcnow.assert_called_with() - - def test_list_availability_zones_enabled_service(self): - def sort_func(obj): - return obj['name'] - - self.get_all.return_value = [ - {'availability_zone': 'ping', 'disabled': 0}, - {'availability_zone': 'ping', 'disabled': 1}, - {'availability_zone': 'pong', 'disabled': 0}, - {'availability_zone': 'pung', 'disabled': 1}, - ] - - volume_api = cinder.volume.api.API() - azs = volume_api.list_availability_zones() - azs = sorted(azs, key=sort_func) - - expected = sorted([ - {'name': 'pung', 'available': False}, - {'name': 'pong', 'available': True}, - {'name': 'ping', 'available': True}, - ], key=sort_func) - - self.assertEqual(expected, azs) diff --git a/cinder/tests/unit/volume/test_capabilities.py b/cinder/tests/unit/volume/test_capabilities.py deleted file mode 100644 index 20b78522a..000000000 --- a/cinder/tests/unit/volume/test_capabilities.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_serialization import jsonutils - -from cinder import exception -from cinder.tests import fake_driver -from cinder.tests.unit import volume as base -from cinder.volume import driver -from cinder.volume import manager as vol_manager -# import cinder.volume.targets.tgt - -"""Tests for volume capabilities test cases.""" - - -class VolumeCapabilitiesTestCase(base.BaseVolumeTestCase): - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') - @mock.patch.object(driver.BaseVD, '_init_vendor_properties') - def test_get_capabilities(self, mock_init_vendor, mock_get_volume_stats): - stats = { - 'volume_backend_name': 'lvm', - 'vendor_name': 'Open Source', - 'storage_protocol': 'iSCSI', - 'vendor_prefix': 'abcd' - } - expected = stats.copy() - expected['properties'] = { - 'compression': { - 'title': 'Compression', - 'description': 'Enables compression.', - 'type': 'boolean'}, - 'qos': { - 'title': 'QoS', - 'description': 'Enables QoS.', - 'type': 'boolean'}, - 'replication_enabled': { - 'title': 'Replication', - 'description': 'Enables replication.', - 'type': 'boolean'}, - 'thin_provisioning': { - 'title': 'Thin Provisioning', - 'description': 'Sets thin provisioning.', - 'type': 'boolean'}, - } - - # Test to get updated capabilities - discover = True - mock_get_volume_stats.return_value = stats - mock_init_vendor.return_value = ({}, None) - capabilities = self.volume.get_capabilities(self.context, - discover) - self.assertEqual(expected, capabilities) - mock_get_volume_stats.assert_called_once_with(True) - - # Test to get existing original capabilities - mock_get_volume_stats.reset_mock() - discover = False - capabilities = self.volume.get_capabilities(self.context, - discover) - self.assertEqual(expected, capabilities) - self.assertFalse(mock_get_volume_stats.called) - - # Normal test case to get vendor unique capabilities - def init_vendor_properties(self): - properties = {} - self._set_property( - properties, - "abcd:minIOPS", - "Minimum IOPS QoS", - "Sets minimum IOPS if QoS is enabled.", - "integer", - minimum=10, - default=100) - return properties, 'abcd' - - expected['properties'].update( - {'abcd:minIOPS': { - 'title': 'Minimum IOPS QoS', - 'description': 'Sets minimum IOPS if QoS is enabled.', - 'type': 'integer', - 'minimum': 10, - 'default': 100}}) - - mock_get_volume_stats.reset_mock() - mock_init_vendor.reset_mock() - discover = True - mock_init_vendor.return_value = ( - init_vendor_properties(self.volume.driver)) - capabilities = self.volume.get_capabilities(self.context, - discover) - self.assertEqual(expected, capabilities) - self.assertTrue(mock_get_volume_stats.called) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') - @mock.patch.object(driver.BaseVD, '_init_vendor_properties') - @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') - def test_get_capabilities_prefix_error(self, mock_init_standard, - mock_init_vendor, - mock_get_volume_stats): - - # Error test case: property does not match vendor prefix - def init_vendor_properties(self): - properties = {} - self._set_property( - properties, - "aaa:minIOPS", - "Minimum IOPS QoS", - "Sets minimum IOPS if QoS is enabled.", - "integer") - self._set_property( - properties, - "abcd:compression_type", - "Compression type", - "Specifies compression type.", - "string") - - return properties, 'abcd' - - expected = { - 'abcd:compression_type': { - 'title': 'Compression type', - 'description': 'Specifies compression type.', - 'type': 'string'}} - - discover = True - mock_get_volume_stats.return_value = {} - mock_init_standard.return_value = {} - mock_init_vendor.return_value = ( - init_vendor_properties(self.volume.driver)) - capabilities = self.volume.get_capabilities(self.context, - discover) - self.assertEqual(expected, capabilities['properties']) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') - @mock.patch.object(driver.BaseVD, '_init_vendor_properties') - @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') - def test_get_capabilities_fail_override(self, mock_init_standard, - mock_init_vendor, - mock_get_volume_stats): - - # Error test case: property cannot override any standard capabilities - def init_vendor_properties(self): - properties = {} - self._set_property( - properties, - "qos", - "Minimum IOPS QoS", - "Sets minimum IOPS if QoS is enabled.", - "integer") - self._set_property( - properties, - "ab::cd:compression_type", - "Compression type", - "Specifies compression type.", - "string") - - return properties, 'ab::cd' - - expected = { - 'ab__cd:compression_type': { - 'title': 'Compression type', - 'description': 'Specifies compression type.', - 'type': 'string'}} - - discover = True - mock_get_volume_stats.return_value = {} - mock_init_standard.return_value = {} - mock_init_vendor.return_value = ( - init_vendor_properties(self.volume.driver)) - capabilities = self.volume.get_capabilities(self.context, - discover) - self.assertEqual(expected, capabilities['properties']) - - def test_extra_capabilities(self): - # Test valid extra_capabilities. - fake_capabilities = {'key1': 1, 'key2': 2} - - with mock.patch.object(jsonutils, 'loads') as mock_loads: - mock_loads.return_value = fake_capabilities - manager = vol_manager.VolumeManager() - manager.stats = {'pools': {}} - manager.driver.set_initialized() - manager.publish_service_capabilities(self.context) - self.assertTrue(mock_loads.called) - volume_stats = manager.last_capabilities - self.assertEqual(fake_capabilities['key1'], - volume_stats['key1']) - self.assertEqual(fake_capabilities['key2'], - volume_stats['key2']) - - def test_extra_capabilities_fail(self): - with mock.patch.object(jsonutils, 'loads') as mock_loads: - mock_loads.side_effect = exception.CinderException('test') - self.assertRaises(exception.CinderException, - vol_manager.VolumeManager) diff --git a/cinder/tests/unit/volume/test_connection.py b/cinder/tests/unit/volume/test_connection.py deleted file mode 100644 index 25f6cb41f..000000000 --- a/cinder/tests/unit/volume/test_connection.py +++ /dev/null @@ -1,1168 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Volume connection test cases.""" - -import ddt -import mock - -from cinder import context -from cinder import db -from cinder import exception -from cinder.message import message_field -from cinder import objects -from cinder.objects import fields -from cinder.tests import fake_driver -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -import cinder.volume -import cinder.volume.targets -import cinder.volume.targets.iscsi - - -@ddt.ddt -class DiscardFlagTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(DiscardFlagTestCase, self).setUp() - self.volume.driver = mock.MagicMock() - - @ddt.data(dict(config_discard_flag=True, - driver_discard_flag=None, - expected_flag=True), - dict(config_discard_flag=False, - driver_discard_flag=None, - expected_flag=None), - dict(config_discard_flag=True, - driver_discard_flag=True, - expected_flag=True), - dict(config_discard_flag=False, - driver_discard_flag=True, - expected_flag=True), - dict(config_discard_flag=False, - driver_discard_flag=False, - expected_flag=False), - dict(config_discard_flag=None, - driver_discard_flag=True, - expected_flag=True), - dict(config_discard_flag=None, - driver_discard_flag=False, - expected_flag=False)) - @ddt.unpack - def test_initialize_connection_discard_flag(self, - config_discard_flag, - driver_discard_flag, - expected_flag): - self.volume.driver.create_export.return_value = None - connector = {'ip': 'IP', 'initiator': 'INITIATOR'} - - conn_info = { - 'driver_volume_type': 'iscsi', - 'data': {'access_mode': 'rw', - 'encrypted': False} - } - - if driver_discard_flag is not None: - conn_info['data']['discard'] = driver_discard_flag - - self.volume.driver.initialize_connection.return_value = conn_info - - def _safe_get(key): - if key is 'report_discard_supported': - return config_discard_flag - else: - return None - - self.volume.driver.configuration.safe_get.side_effect = _safe_get - - with mock.patch.object(objects, 'Volume') as mock_vol: - volume = tests_utils.create_volume(self.context) - volume.volume_type_id = None - mock_vol.get_by_id.return_value = volume - - conn_info = self.volume.initialize_connection(self.context, - volume, - connector) - - self.assertEqual(expected_flag, conn_info['data'].get('discard')) - - -class VolumeConnectionTestCase(base.BaseVolumeTestCase): - @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, - '_get_target_chap_auth') - @mock.patch.object(db, 'volume_admin_metadata_get') - @mock.patch.object(db.sqlalchemy.api, 'volume_get') - @mock.patch.object(db, 'volume_update') - def test_initialize_connection_fetchqos(self, - _mock_volume_update, - _mock_volume_get, - _mock_volume_admin_metadata_get, - mock_get_target): - """Make sure initialize_connection returns correct information.""" - _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] - _fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'name': 'fake_name', - 'host': 'fake_host', - 'id': fake.VOLUME_ID, - 'volume_admin_metadata': _fake_admin_meta} - fake_volume_obj = fake_volume.fake_volume_obj(self.context, - **_fake_volume) - - _mock_volume_get.return_value = _fake_volume - _mock_volume_update.return_value = _fake_volume - _mock_volume_admin_metadata_get.return_value = { - 'fake-key': 'fake-value'} - - connector = {'ip': 'IP', 'initiator': 'INITIATOR'} - qos_values = {'consumer': 'front-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2'} - } - - with mock.patch.object(cinder.volume.volume_types, - 'get_volume_type_qos_specs') as type_qos, \ - mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, - 'initialize_connection') as driver_init: - type_qos.return_value = dict(qos_specs=qos_values) - driver_init.return_value = {'data': {}} - mock_get_target.return_value = None - qos_specs_expected = {'key1': 'value1', - 'key2': 'value2'} - # initialize_connection() passes qos_specs that is designated to - # be consumed by front-end or both front-end and back-end - conn_info = self.volume.initialize_connection( - self.context, fake_volume_obj, connector,) - self.assertDictEqual(qos_specs_expected, - conn_info['data']['qos_specs']) - - qos_values.update({'consumer': 'both'}) - conn_info = self.volume.initialize_connection( - self.context, fake_volume_obj, connector) - self.assertDictEqual(qos_specs_expected, - conn_info['data']['qos_specs']) - # initialize_connection() skips qos_specs that is designated to be - # consumed by back-end only - qos_values.update({'consumer': 'back-end'}) - type_qos.return_value = dict(qos_specs=qos_values) - conn_info = self.volume.initialize_connection( - self.context, fake_volume_obj, connector) - self.assertIsNone(conn_info['data']['qos_specs']) - - @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, - '_get_target_chap_auth') - @mock.patch.object(db, 'volume_admin_metadata_get') - @mock.patch.object(db.sqlalchemy.api, 'volume_get') - @mock.patch.object(db, 'volume_update') - def test_initialize_connection_qos_per_gb(self, - _mock_volume_update, - _mock_volume_get, - _mock_volume_admin_metadata_get, - mock_get_target): - """Make sure initialize_connection returns correct information.""" - _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] - _fake_volume = {'size': 3, - 'volume_type_id': fake.VOLUME_TYPE_ID, - 'name': 'fake_name', - 'host': 'fake_host', - 'id': fake.VOLUME_ID, - 'volume_admin_metadata': _fake_admin_meta} - fake_volume_obj = fake_volume.fake_volume_obj(self.context, - **_fake_volume) - - _mock_volume_get.return_value = _fake_volume - _mock_volume_update.return_value = _fake_volume - _mock_volume_admin_metadata_get.return_value = { - 'fake-key': 'fake-value'} - - connector = {'ip': 'IP', 'initiator': 'INITIATOR'} - qos_values = {'consumer': 'front-end', - 'specs': { - 'write_iops_sec_per_gb': 5, - 'read_iops_sec_per_gb': 7700, - 'total_iops_sec_per_gb': 300000} - } - - with mock.patch.object(cinder.volume.volume_types, - 'get_volume_type_qos_specs') as type_qos, \ - mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, - 'initialize_connection') as driver_init: - type_qos.return_value = dict(qos_specs=qos_values) - driver_init.return_value = {'data': {}} - mock_get_target.return_value = None - qos_specs_expected = {'write_iops_sec': 15, - 'read_iops_sec': 23100, - 'total_iops_sec': 900000} - # initialize_connection() passes qos_specs that is designated to - # be consumed by front-end or both front-end and back-end - conn_info = self.volume.initialize_connection( - self.context, fake_volume_obj, connector,) - self.assertDictEqual(qos_specs_expected, - conn_info['data']['qos_specs']) - - qos_values.update({'consumer': 'both'}) - conn_info = self.volume.initialize_connection( - self.context, fake_volume_obj, connector) - self.assertDictEqual(qos_specs_expected, - conn_info['data']['qos_specs']) - - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') - def test_initialize_connection_export_failure(self, - _mock_create_export): - """Test exception path for create_export failure.""" - volume = tests_utils.create_volume( - self.context, admin_metadata={'fake-key': 'fake-value'}, - volume_type_id=fake.VOLUME_TYPE_ID, **self.volume_params) - _mock_create_export.side_effect = exception.CinderException - - connector = {'ip': 'IP', 'initiator': 'INITIATOR'} - - self.assertRaises(exception.VolumeBackendAPIException, - self.volume.initialize_connection, - self.context, volume, connector) - - def test_initialize_connection_maintenance(self): - """Test initialize connection in maintenance.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.initialize_connection, - self.context, - volume, - None) - - -@ddt.ddt -class VolumeAttachDetachTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(VolumeAttachDetachTestCase, self).setUp() - self.patch('cinder.volume.utils.clear_volume', autospec=True) - self.user_context = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID) - - @ddt.data(False, True) - def test_run_attach_detach_volume_for_instance(self, volume_object): - """Make sure volume can be attached and detached from instance.""" - mountpoint = "/dev/sdf" - # attach volume to the instance then to detach - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume = tests_utils.create_volume(self.user_context, - **self.volume_params) - with volume.obj_as_admin(): - volume.admin_metadata['readonly'] = True - volume.save() - volume_id = volume.id - self.volume.create_volume(self.user_context, - volume=volume) - volume_passed = volume if volume_object else None - attachment = self.volume.attach_volume(self.user_context, - volume_id, - instance_uuid, None, - mountpoint, 'ro', - volume=volume_passed) - attachment2 = self.volume.attach_volume(self.user_context, - volume_id, - instance_uuid, None, - mountpoint, 'ro', - volume=volume_passed) - self.assertEqual(attachment.id, attachment2.id) - vol = objects.Volume.get_by_id(self.context, volume_id) - self.assertEqual("in-use", vol.status) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment.attach_status) - self.assertEqual(mountpoint, attachment.mountpoint) - self.assertEqual(instance_uuid, attachment.instance_uuid) - self.assertIsNone(attachment.attached_host) - admin_metadata = vol.volume_admin_metadata - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - volume = volume if volume_object else vol - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume=volume) - self.volume.detach_volume(self.context, volume_id, - attachment.id, - volume=volume_passed) - vol = objects.Volume.get_by_id(self.context, volume_id) - self.assertEqual('available', vol.status) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - @mock.patch('cinder.volume.manager.LOG', mock.Mock()) - def test_initialize_connection(self): - volume = mock.Mock(save=mock.Mock(side_effect=Exception)) - with mock.patch.object(self.volume, 'driver') as driver_mock: - self.assertRaises(exception.ExportFailure, - self.volume.initialize_connection, self.context, - volume, mock.Mock()) - driver_mock.remove_export.assert_called_once_with(mock.ANY, volume) - - def test_run_attach_detach_2volumes_for_instance(self): - """Make sure volume can be attached and detached from instance.""" - # attach first volume to the instance - mountpoint1 = "/dev/vdc" - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume1 = tests_utils.create_volume( - self.context, admin_metadata={'readonly': 'True'}, - **self.volume_params) - volume1_id = volume1['id'] - self.volume.create_volume(self.context, volume1) - attachment = self.volume.attach_volume(self.context, volume1_id, - instance_uuid, None, - mountpoint1, 'ro') - vol1 = db.volume_get(context.get_admin_context(), volume1_id) - self.assertEqual("in-use", vol1['status']) - self.assertEqual('attached', attachment['attach_status']) - self.assertEqual(mountpoint1, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol1['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume1, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume1) - - # attach 2nd volume to the instance - mountpoint2 = "/dev/vdd" - volume2 = tests_utils.create_volume( - self.context, admin_metadata={'readonly': 'False'}, - **self.volume_params) - volume2_id = volume2['id'] - self.volume.create_volume(self.context, volume2) - attachment2 = self.volume.attach_volume(self.context, volume2_id, - instance_uuid, None, - mountpoint2, 'rw') - vol2 = db.volume_get(context.get_admin_context(), volume2_id) - self.assertEqual("in-use", vol2['status']) - self.assertEqual('attached', attachment2['attach_status']) - self.assertEqual(mountpoint2, attachment2['mountpoint']) - self.assertEqual(instance_uuid, attachment2['instance_uuid']) - self.assertIsNone(attachment2['attached_host']) - admin_metadata = vol2['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='False', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - connector = {'initiator': 'iqn.2012-07.org.fake:02'} - conn_info = self.volume.initialize_connection(self.context, - volume2, connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume2) - - # detach first volume and then 2nd volume - self.volume.detach_volume(self.context, volume1_id, attachment['id']) - vol1 = db.volume_get(self.context, volume1_id) - self.assertEqual('available', vol1['status']) - - self.volume.delete_volume(self.context, volume1) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume1_id) - - self.volume.detach_volume(self.context, volume2_id, attachment2['id']) - vol2 = db.volume_get(self.context, volume2_id) - self.assertEqual('available', vol2['status']) - - self.volume.delete_volume(self.context, volume2) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume2_id) - - def test_detach_invalid_attachment_id(self): - """Make sure if the attachment id isn't found we raise.""" - attachment_id = "notfoundid" - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=False, - **self.volume_params) - self.volume.detach_volume(self.context, volume['id'], - attachment_id) - volume = db.volume_get(self.context, volume['id']) - self.assertEqual('available', volume['status']) - - instance_uuid = '12345678-1234-5678-1234-567812345678' - attached_host = 'fake_host' - mountpoint = '/dev/fake' - tests_utils.attach_volume(self.context, volume['id'], - instance_uuid, attached_host, - mountpoint) - self.volume.detach_volume(self.context, volume['id'], - attachment_id) - volume = db.volume_get(self.context, volume['id']) - self.assertEqual('in-use', volume['status']) - - def test_detach_no_attachments(self): - self.volume_params['status'] = 'detaching' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=False, - **self.volume_params) - self.volume.detach_volume(self.context, volume['id']) - volume = db.volume_get(self.context, volume['id']) - self.assertEqual('available', volume['status']) - - def test_run_attach_detach_volume_for_instance_no_attachment_id(self): - """Make sure volume can be attached and detached from instance.""" - mountpoint = "/dev/sdf" - # attach volume to the instance then to detach - instance_uuid = '12345678-1234-5678-1234-567812345678' - instance_uuid_2 = '12345678-4321-8765-4321-567812345678' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=True, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - attachment2 = self.volume.attach_volume(self.context, volume_id, - instance_uuid_2, None, - mountpoint, 'ro') - - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - - self.assertRaises(exception.InvalidVolume, - self.volume.detach_volume, - self.context, volume_id) - - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('in-use', vol['status']) - - self.volume.detach_volume(self.context, volume_id, attachment2['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) - - attachment = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint, 'ro') - vol = db.volume_get(self.context, volume_id) - self.assertEqual('in-use', vol['status']) - self.volume.detach_volume(self.context, volume_id) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_detach_multiattach_volume_for_instances(self): - """Make sure volume can be attached to multiple instances.""" - mountpoint = "/dev/sdf" - # attach volume to the instance then to detach - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=True, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - instance2_uuid = '12345678-1234-5678-1234-567812345000' - mountpoint2 = "/dev/sdx" - attachment2 = self.volume.attach_volume(self.context, volume_id, - instance2_uuid, None, - mountpoint2, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment2['attach_status']) - self.assertEqual(mountpoint2, attachment2['mountpoint']) - self.assertEqual(instance2_uuid, attachment2['instance_uuid']) - self.assertIsNone(attachment2['attached_host']) - self.assertNotEqual(attachment, attachment2) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('in-use', vol['status']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - - self.volume.detach_volume(self.context, volume_id, attachment2['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_twice_multiattach_volume_for_instances(self): - """Make sure volume can be attached to multiple instances.""" - mountpoint = "/dev/sdf" - # attach volume to the instance then to detach - instance_uuid = '12345678-1234-5678-1234-567812345699' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=True, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - mountpoint2 = "/dev/sdx" - attachment2 = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint2, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual('attached', attachment2['attach_status']) - self.assertEqual(mountpoint, attachment2['mountpoint']) - self.assertEqual(instance_uuid, attachment2['instance_uuid']) - self.assertIsNone(attachment2['attached_host']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - - def test_attach_detach_not_multiattach_volume_for_instances(self): - """Make sure volume can't be attached to more than one instance.""" - mountpoint = "/dev/sdf" - # attach volume to the instance then to detach - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - multiattach=False, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, - instance_uuid, None, - mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertFalse(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - instance2_uuid = '12345678-1234-5678-1234-567812345000' - mountpoint2 = "/dev/sdx" - self.assertRaises(exception.InvalidVolume, - self.volume.attach_volume, - self.context, - volume_id, - instance2_uuid, - None, - mountpoint2, 'ro') - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_detach_volume_for_host(self): - """Make sure volume can be attached and detached from host.""" - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume( - self.context, - admin_metadata={'readonly': 'False'}, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint, 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host', attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='False', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual("available", vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_detach_multiattach_volume_for_hosts(self): - """Make sure volume can be attached and detached from hosts.""" - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume( - self.context, - admin_metadata={'readonly': 'False'}, - multiattach=True, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint, 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host', attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='False', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - mountpoint2 = "/dev/sdx" - attachment2 = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host2', mountpoint2, - 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment2['attach_status']) - self.assertEqual(mountpoint2, attachment2['mountpoint']) - self.assertIsNone(attachment2['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host2', attachment2['attached_host']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual("in-use", vol['status']) - - self.volume.detach_volume(self.context, volume_id, attachment2['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual("available", vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_twice_multiattach_volume_for_hosts(self): - """Make sure volume can be attached and detached from hosts.""" - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume( - self.context, - admin_metadata={'readonly': 'False'}, - multiattach=True, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint, 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertTrue(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host', attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='False', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - mountpoint2 = "/dev/sdx" - attachment2 = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint2, - 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual('attached', attachment2['attach_status']) - self.assertEqual(mountpoint, attachment2['mountpoint']) - self.assertIsNone(attachment2['instance_uuid']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - - def test_run_attach_detach_not_multiattach_volume_for_hosts(self): - """Make sure volume can't be attached to more than one host.""" - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume( - self.context, - admin_metadata={'readonly': 'False'}, - multiattach=False, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - attachment = self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint, 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertFalse(vol['multiattach']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host', attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='False', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('rw', conn_info['data']['access_mode']) - - mountpoint2 = "/dev/sdx" - self.assertRaises(exception.InvalidVolume, - self.volume.attach_volume, - self.context, - volume_id, - None, - 'fake_host2', - mountpoint2, - 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - attachment['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - # sanitized, conforms to RFC-952 and RFC-1123 specs. - self.assertEqual('fake-host', attachment['attached_host']) - - self.assertRaises(exception.VolumeAttached, - self.volume.delete_volume, - self.context, - volume) - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - self.assertEqual('available', vol['status']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_attach_detach_volume_with_attach_mode(self): - instance_uuid = '12345678-1234-5678-1234-567812345678' - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - **self.volume_params) - volume_id = volume['id'] - db.volume_update(self.context, volume_id, {'status': 'available', }) - self.volume.attach_volume(self.context, volume_id, instance_uuid, - None, mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - attachment = vol['volume_attachment'][0] - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - vol['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertEqual(instance_uuid, attachment['instance_uuid']) - self.assertIsNone(attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - - self.assertEqual('ro', conn_info['data']['access_mode']) - - self.volume.detach_volume(self.context, volume_id, attachment['id']) - vol = db.volume_get(self.context, volume_id) - attachment = vol['volume_attachment'] - self.assertEqual('available', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - self.assertEqual([], attachment) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('True', admin_metadata[0]['value']) - - self.volume.attach_volume(self.context, volume_id, None, - 'fake_host', mountpoint, 'ro') - vol = db.volume_get(context.get_admin_context(), volume_id) - attachment = vol['volume_attachment'][0] - self.assertEqual('in-use', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.ATTACHED, - vol['attach_status']) - self.assertEqual(mountpoint, attachment['mountpoint']) - self.assertIsNone(attachment['instance_uuid']) - self.assertEqual('fake-host', attachment['attached_host']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='ro') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - connector = {'initiator': 'iqn.2012-07.org.fake:01'} - conn_info = self.volume.initialize_connection(self.context, - volume, connector) - self.assertEqual('ro', conn_info['data']['access_mode']) - - self.volume.detach_volume(self.context, volume_id, - attachment['id']) - vol = db.volume_get(self.context, volume_id) - attachment = vol['volume_attachment'] - self.assertEqual('available', vol['status']) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - self.assertEqual([], attachment) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('True', admin_metadata[0]['value']) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, - self.context, - volume_id) - - def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self): - # Not allow using 'read-write' mode attach readonly volume - instance_uuid = '12345678-1234-5678-1234-567812345678' - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - self.assertRaises(exception.InvalidVolumeAttachMode, - self.volume.attach_volume, - self.context, - volume_id, - instance_uuid, - None, - mountpoint, - 'rw') - - # Assert a user message was created - self.volume.message_api.create.assert_called_once_with( - self.context, message_field.Action.ATTACH_VOLUME, - resource_uuid=volume['id'], - exception=mock.ANY) - - attachment = objects.VolumeAttachmentList.get_all_by_volume_id( - context.get_admin_context(), volume_id)[0] - self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, - attachment.attach_status) - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - db.volume_update(self.context, volume_id, {'status': 'available'}) - self.assertRaises(exception.InvalidVolumeAttachMode, - self.volume.attach_volume, - self.context, - volume_id, - None, - 'fake_host', - mountpoint, - 'rw') - attachment = objects.VolumeAttachmentList.get_all_by_volume_id( - context.get_admin_context(), volume_id)[0] - self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, - attachment.attach_status) - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(2, len(admin_metadata)) - expected = dict(readonly='True', attached_mode='rw') - ret = {} - for item in admin_metadata: - ret.update({item['key']: item['value']}) - self.assertDictEqual(expected, ret) - - def test_run_api_attach_detach_volume_with_wrong_attach_mode(self): - # Not allow using 'read-write' mode attach readonly volume - instance_uuid = '12345678-1234-5678-1234-567812345678' - mountpoint = "/dev/sdf" - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolumeAttachMode, - volume_api.attach, - self.context, - volume, - instance_uuid, - None, - mountpoint, - 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('True', admin_metadata[0]['value']) - - db.volume_update(self.context, volume_id, {'status': 'available'}) - self.assertRaises(exception.InvalidVolumeAttachMode, - volume_api.attach, - self.context, - volume, - None, - 'fake_host', - mountpoint, - 'rw') - vol = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - admin_metadata = vol['volume_admin_metadata'] - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('True', admin_metadata[0]['value']) - - def test_detach_volume_while_uploading_to_image_is_in_progress(self): - # If instance is booted from volume with 'Terminate on Delete' flag - # set, and when we delete instance then it tries to delete volume - # even it is in 'uploading' state. - # It is happening because detach call is setting volume status to - # 'available'. - mountpoint = "/dev/sdf" - # Attach volume to the instance - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - self.volume.attach_volume(self.context, volume_id, instance_uuid, - None, mountpoint, 'ro') - # Change volume status to 'uploading' - db.volume_update(self.context, volume_id, {'status': 'uploading'}) - # Call detach api - self.volume.detach_volume(self.context, volume_id) - vol = db.volume_get(self.context, volume_id) - # Check that volume status is 'uploading' - self.assertEqual("uploading", vol['status']) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - vol['attach_status']) - - def test_volume_attach_in_maintenance(self): - """Test attach the volume in maintenance.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - self.assertRaises(exception.InvalidVolume, - self.volume_api.attach, - self.context, - volume, None, None, None, None) - - def test_volume_detach_in_maintenance(self): - """Test detach the volume in maintenance.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.detach, - self.context, - volume, None) diff --git a/cinder/tests/unit/volume/test_driver.py b/cinder/tests/unit/volume/test_driver.py deleted file mode 100644 index f1d91d83d..000000000 --- a/cinder/tests/unit/volume/test_driver.py +++ /dev/null @@ -1,533 +0,0 @@ -# Copyright (c) 2016 Red Hat Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Volume Code.""" - -import ddt -import mock -import shutil -import tempfile - -import os_brick -from oslo_config import cfg -from oslo_utils import importutils - -from cinder.brick.local_dev import lvm as brick_lvm -from cinder import context -from cinder import db -from cinder import exception -from cinder.image import image_utils -from cinder import objects -from cinder.objects import fields -import cinder.policy -from cinder import test -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils as tests_utils -from cinder import utils -import cinder.volume -from cinder.volume import configuration as conf -from cinder.volume import driver -from cinder.volume import manager -from cinder.volume import rpcapi as volume_rpcapi -import cinder.volume.targets.tgt -from cinder.volume import utils as volutils - - -CONF = cfg.CONF - - -def my_safe_get(self, value): - if value == 'replication_device': - return ['replication'] - return None - - -@ddt.ddt -class DriverTestCase(test.TestCase): - - @staticmethod - def _get_driver(relicated, version): - class NonReplicatedDriver(driver.VolumeDriver): - pass - - class V21Driver(driver.VolumeDriver): - def failover_host(*args, **kwargs): - pass - - class AADriver(V21Driver): - def failover_completed(*args, **kwargs): - pass - - if not relicated: - return NonReplicatedDriver - - if version == 'v2.1': - return V21Driver - - return AADriver - - @ddt.data('v2.1', 'a/a', 'newfeature') - def test_supports_replication_feature_none(self, rep_version): - my_driver = self._get_driver(False, None) - self.assertFalse(my_driver.supports_replication_feature(rep_version)) - - @ddt.data('v2.1', 'a/a', 'newfeature') - def test_supports_replication_feature_only_21(self, rep_version): - version = 'v2.1' - my_driver = self._get_driver(True, version) - self.assertEqual(rep_version == version, - my_driver.supports_replication_feature(rep_version)) - - @ddt.data('v2.1', 'a/a', 'newfeature') - def test_supports_replication_feature_aa(self, rep_version): - my_driver = self._get_driver(True, 'a/a') - self.assertEqual(rep_version in ('v2.1', 'a/a'), - my_driver.supports_replication_feature(rep_version)) - - def test_init_non_replicated(self): - config = manager.config.Configuration(manager.volume_manager_opts, - config_group='volume') - # No exception raised - self._get_driver(False, None)(configuration=config) - - @ddt.data('v2.1', 'a/a') - @mock.patch('cinder.volume.configuration.Configuration.safe_get', - my_safe_get) - def test_init_replicated_non_clustered(self, version): - def append_config_values(self, volume_opts): - pass - - config = manager.config.Configuration(manager.volume_manager_opts, - config_group='volume') - # No exception raised - self._get_driver(True, version)(configuration=config) - - @mock.patch('cinder.volume.configuration.Configuration.safe_get', - my_safe_get) - def test_init_replicated_clustered_not_supported(self): - config = manager.config.Configuration(manager.volume_manager_opts, - config_group='volume') - # Raises exception because we are trying to run a replicated service - # in clustered mode but the driver doesn't support it. - self.assertRaises(exception.Invalid, self._get_driver(True, 'v2.1'), - configuration=config, cluster_name='mycluster') - - @mock.patch('cinder.volume.configuration.Configuration.safe_get', - my_safe_get) - def test_init_replicated_clustered_supported(self): - config = manager.config.Configuration(manager.volume_manager_opts, - config_group='volume') - # No exception raised - self._get_driver(True, 'a/a')(configuration=config, - cluster_name='mycluster') - - def test_failover(self): - """Test default failover behavior of calling failover_host.""" - my_driver = self._get_driver(True, 'a/a')() - with mock.patch.object(my_driver, 'failover_host') as failover_mock: - res = my_driver.failover(mock.sentinel.context, - mock.sentinel.volumes, - secondary_id=mock.sentinel.secondary_id, - groups=[]) - self.assertEqual(failover_mock.return_value, res) - failover_mock.assert_called_once_with(mock.sentinel.context, - mock.sentinel.volumes, - mock.sentinel.secondary_id, - []) - - -class BaseDriverTestCase(test.TestCase): - """Base Test class for Drivers.""" - driver_name = "cinder.volume.driver.FakeBaseDriver" - - def setUp(self): - super(BaseDriverTestCase, self).setUp() - vol_tmpdir = tempfile.mkdtemp() - self.override_config('volume_driver', self.driver_name, - conf.SHARED_CONF_GROUP) - self.override_config('volumes_dir', vol_tmpdir, - conf.SHARED_CONF_GROUP) - self.volume = importutils.import_object(CONF.volume_manager) - self.context = context.get_admin_context() - self.output = "" - self.configuration = conf.Configuration(None) - self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True) - - def _fake_execute(_command, *_args, **_kwargs): - """Fake _execute.""" - return self.output, None - exec_patcher = mock.patch.object(self.volume.driver, '_execute', - _fake_execute) - exec_patcher.start() - self.addCleanup(exec_patcher.stop) - self.volume.driver.set_initialized() - self.addCleanup(self._cleanup) - - def _cleanup(self): - try: - shutil.rmtree(CONF.volumes_dir) - except OSError: - pass - - def _attach_volume(self): - """Attach volumes to an instance.""" - return [] - - -@ddt.ddt -class GenericVolumeDriverTestCase(BaseDriverTestCase): - """Test case for VolumeDriver.""" - driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver" - - def test_create_temp_cloned_volume(self): - with mock.patch.object( - self.volume.driver, - 'create_cloned_volume') as mock_create_cloned_volume: - model_update = {'provider_location': 'dummy'} - mock_create_cloned_volume.return_value = model_update - vol = tests_utils.create_volume(self.context, - status='backing-up') - cloned_vol = self.volume.driver._create_temp_cloned_volume( - self.context, vol) - self.assertEqual('dummy', cloned_vol.provider_location) - self.assertEqual('available', cloned_vol.status) - - mock_create_cloned_volume.return_value = None - vol = tests_utils.create_volume(self.context, - status='backing-up') - cloned_vol = self.volume.driver._create_temp_cloned_volume( - self.context, vol) - self.assertEqual('available', cloned_vol.status) - - def test_get_backup_device_available(self): - vol = tests_utils.create_volume(self.context) - self.context.user_id = fake.USER_ID - self.context.project_id = fake.PROJECT_ID - backup_obj = tests_utils.create_backup(self.context, - vol['id']) - (backup_device, is_snapshot) = self.volume.driver.get_backup_device( - self.context, backup_obj) - volume = objects.Volume.get_by_id(self.context, vol.id) - self.assertEqual(volume, backup_device) - self.assertFalse(is_snapshot) - backup_obj.refresh() - self.assertIsNone(backup_obj.temp_volume_id) - - def test_get_backup_device_in_use(self): - vol = tests_utils.create_volume(self.context, - status='backing-up', - previous_status='in-use') - temp_vol = tests_utils.create_volume(self.context) - self.context.user_id = fake.USER_ID - self.context.project_id = fake.PROJECT_ID - backup_obj = tests_utils.create_backup(self.context, - vol['id']) - with mock.patch.object( - self.volume.driver, - '_create_temp_cloned_volume') as mock_create_temp: - mock_create_temp.return_value = temp_vol - (backup_device, is_snapshot) = ( - self.volume.driver.get_backup_device(self.context, - backup_obj)) - self.assertEqual(temp_vol, backup_device) - self.assertFalse(is_snapshot) - backup_obj.refresh() - self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) - - def test__create_temp_volume_from_snapshot(self): - volume_dict = {'id': fake.SNAPSHOT_ID, - 'host': 'fakehost', - 'cluster_name': 'fakecluster', - 'availability_zone': 'fakezone', - 'size': 1} - vol = fake_volume.fake_volume_obj(self.context, **volume_dict) - snapshot = fake_snapshot.fake_snapshot_obj(self.context) - - with mock.patch.object( - self.volume.driver, - 'create_volume_from_snapshot'): - temp_vol = self.volume.driver._create_temp_volume_from_snapshot( - self.context, - vol, snapshot) - self.assertEqual(fields.VolumeAttachStatus.DETACHED, - temp_vol.attach_status) - self.assertEqual('fakezone', temp_vol.availability_zone) - self.assertEqual('fakecluster', temp_vol.cluster_name) - - @mock.patch.object(utils, 'brick_get_connector_properties') - @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') - @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') - @mock.patch.object(volutils, 'copy_volume') - @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') - @mock.patch.object(cinder.volume.volume_types, - 'volume_types_encryption_changed') - @ddt.data(False, True) - def test_copy_volume_data_mgr(self, - encryption_changed, - mock_encryption_changed, - mock_get_capabilities, - mock_copy, - mock_detach, - mock_attach, - mock_get_connector): - """Test function of _copy_volume_data.""" - - src_vol = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - dest_vol = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - mock_get_connector.return_value = {} - mock_encryption_changed.return_value = encryption_changed - self.volume.driver._throttle = mock.MagicMock() - - attach_expected = [ - mock.call(self.context, dest_vol, {}, - remote=False, - attach_encryptor=encryption_changed), - mock.call(self.context, src_vol, {}, - remote=False, - attach_encryptor=encryption_changed)] - - detach_expected = [ - mock.call(self.context, {'device': {'path': 'bar'}}, - dest_vol, {}, force=False, remote=False, - attach_encryptor=encryption_changed), - mock.call(self.context, {'device': {'path': 'foo'}}, - src_vol, {}, force=False, remote=False, - attach_encryptor=encryption_changed)] - - attach_volume_returns = [ - {'device': {'path': 'bar'}}, - {'device': {'path': 'foo'}} - ] - - # Test case for sparse_copy_volume = False - mock_attach.side_effect = attach_volume_returns - mock_get_capabilities.return_value = {} - self.volume._copy_volume_data(self.context, - src_vol, - dest_vol) - - self.assertEqual(attach_expected, mock_attach.mock_calls) - mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False) - self.assertEqual(detach_expected, mock_detach.mock_calls) - - # Test case for sparse_copy_volume = True - mock_attach.reset_mock() - mock_detach.reset_mock() - mock_attach.side_effect = attach_volume_returns - mock_get_capabilities.return_value = {'sparse_copy_volume': True} - self.volume._copy_volume_data(self.context, - src_vol, - dest_vol) - - self.assertEqual(attach_expected, mock_attach.mock_calls) - mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True) - self.assertEqual(detach_expected, mock_detach.mock_calls) - - # cleanup resource - db.volume_destroy(self.context, src_vol['id']) - db.volume_destroy(self.context, dest_vol['id']) - - @mock.patch.object(os_brick.initiator.connector, - 'get_connector_properties') - @mock.patch.object(image_utils, 'fetch_to_raw') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') - @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') - @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') - def test_copy_image_to_encrypted_volume(self, - mock_detach_encryptor, - mock_attach_encryptor, - mock_detach_volume, - mock_attach_volume, - mock_fetch_to_raw, - mock_get_connector_properties): - properties = {} - volume = tests_utils.create_volume( - self.context, status='available', - size=2, - encryption_key_id=fake.ENCRYPTION_KEY_ID) - volume_id = volume['id'] - volume = db.volume_get(context.get_admin_context(), volume_id) - image_service = fake_image.FakeImageService() - local_path = 'dev/sda' - attach_info = {'device': {'path': local_path}, - 'conn': {'driver_volume_type': 'iscsi', - 'data': {}, }} - - mock_get_connector_properties.return_value = properties - mock_attach_volume.return_value = [attach_info, volume] - - self.volume.driver.copy_image_to_encrypted_volume( - self.context, volume, image_service, fake.IMAGE_ID) - - encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} - mock_attach_volume.assert_called_once_with( - self.context, volume, properties) - mock_attach_encryptor.assert_called_once_with( - self.context, attach_info, encryption) - mock_fetch_to_raw.assert_called_once_with( - self.context, image_service, fake.IMAGE_ID, - local_path, '1M', size=2) - mock_detach_encryptor.assert_called_once_with( - attach_info, encryption) - mock_detach_volume.assert_called_once_with( - self.context, attach_info, volume, properties) - - @mock.patch.object(os_brick.initiator.connector, - 'get_connector_properties') - @mock.patch.object(image_utils, 'fetch_to_raw') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') - @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') - @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') - def test_copy_image_to_encrypted_volume_failed_attach_encryptor( - self, - mock_detach_encryptor, - mock_attach_encryptor, - mock_detach_volume, - mock_attach_volume, - mock_fetch_to_raw, - mock_get_connector_properties): - properties = {} - volume = tests_utils.create_volume( - self.context, status='available', - size=2, - encryption_key_id=fake.ENCRYPTION_KEY_ID) - volume_id = volume['id'] - volume = db.volume_get(context.get_admin_context(), volume_id) - image_service = fake_image.FakeImageService() - attach_info = {'device': {'path': 'dev/sda'}, - 'conn': {'driver_volume_type': 'iscsi', - 'data': {}, }} - - mock_get_connector_properties.return_value = properties - mock_attach_volume.return_value = [attach_info, volume] - raised_exception = os_brick.exception.VolumeEncryptionNotSupported( - volume_id = "123", - volume_type = "abc") - mock_attach_encryptor.side_effect = raised_exception - - self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, - self.volume.driver.copy_image_to_encrypted_volume, - self.context, volume, image_service, fake.IMAGE_ID) - - encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} - mock_attach_volume.assert_called_once_with( - self.context, volume, properties) - mock_attach_encryptor.assert_called_once_with( - self.context, attach_info, encryption) - self.assertFalse(mock_fetch_to_raw.called) - self.assertFalse(mock_detach_encryptor.called) - mock_detach_volume.assert_called_once_with( - self.context, attach_info, volume, properties) - - @mock.patch.object(os_brick.initiator.connector, - 'get_connector_properties') - @mock.patch.object(image_utils, 'fetch_to_raw') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') - @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') - @mock.patch.object(cinder.utils, 'brick_attach_volume_encryptor') - @mock.patch.object(cinder.utils, 'brick_detach_volume_encryptor') - @ddt.data(exception.ImageUnacceptable( - reason='fake', image_id=fake.IMAGE_ID), - exception.ImageTooBig( - reason='fake image size exceeded', image_id=fake.IMAGE_ID)) - def test_copy_image_to_encrypted_volume_failed_fetch( - self, excep, - mock_detach_encryptor, mock_attach_encryptor, - mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, - mock_get_connector_properties): - properties = {} - volume = tests_utils.create_volume( - self.context, status='available', - size=2, - encryption_key_id=fake.ENCRYPTION_KEY_ID) - volume_id = volume['id'] - volume = db.volume_get(context.get_admin_context(), volume_id) - image_service = fake_image.FakeImageService() - local_path = 'dev/sda' - attach_info = {'device': {'path': local_path}, - 'conn': {'driver_volume_type': 'iscsi', - 'data': {}, }} - - mock_get_connector_properties.return_value = properties - mock_attach_volume.return_value = [attach_info, volume] - mock_fetch_to_raw.side_effect = excep - - encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} - self.assertRaises(type(excep), - self.volume.driver.copy_image_to_encrypted_volume, - self.context, volume, image_service, fake.IMAGE_ID) - - mock_attach_volume.assert_called_once_with( - self.context, volume, properties) - mock_attach_encryptor.assert_called_once_with( - self.context, attach_info, encryption) - mock_fetch_to_raw.assert_called_once_with( - self.context, image_service, fake.IMAGE_ID, - local_path, '1M', size=2) - mock_detach_encryptor.assert_called_once_with( - attach_info, encryption) - mock_detach_volume.assert_called_once_with( - self.context, attach_info, volume, properties) - - -class FibreChannelTestCase(BaseDriverTestCase): - """Test Case for FibreChannelDriver.""" - driver_name = "cinder.volume.driver.FibreChannelDriver" - - def test_initialize_connection(self): - self.assertRaises(NotImplementedError, - self.volume.driver.initialize_connection, {}, {}) - - def test_validate_connector(self): - """validate_connector() successful use case. - - validate_connector() does not throw an exception when - wwpns and wwnns are both set and both are not empty. - """ - connector = {'wwpns': ["not empty"], - 'wwnns': ["not empty"]} - self.volume.driver.validate_connector(connector) - - def test_validate_connector_no_wwpns(self): - """validate_connector() throws exception when it has no wwpns.""" - connector = {'wwnns': ["not empty"]} - self.assertRaises(exception.InvalidConnectorException, - self.volume.driver.validate_connector, connector) - - def test_validate_connector_empty_wwpns(self): - """validate_connector() throws exception when it has empty wwpns.""" - connector = {'wwpns': [], - 'wwnns': ["not empty"]} - self.assertRaises(exception.InvalidConnectorException, - self.volume.driver.validate_connector, connector) - - def test_validate_connector_no_wwnns(self): - """validate_connector() throws exception when it has no wwnns.""" - connector = {'wwpns': ["not empty"]} - self.assertRaises(exception.InvalidConnectorException, - self.volume.driver.validate_connector, connector) - - def test_validate_connector_empty_wwnns(self): - """validate_connector() throws exception when it has empty wwnns.""" - connector = {'wwnns': [], - 'wwpns': ["not empty"]} - self.assertRaises(exception.InvalidConnectorException, - self.volume.driver.validate_connector, connector) diff --git a/cinder/tests/unit/volume/test_image.py b/cinder/tests/unit/volume/test_image.py deleted file mode 100644 index 435d707c3..000000000 --- a/cinder/tests/unit/volume/test_image.py +++ /dev/null @@ -1,694 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for volume and images.""" - -import datetime -import mock -import os -import tempfile - -from oslo_utils import imageutils -from oslo_utils import units - -from cinder import db -from cinder import exception -from cinder.message import message_field -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder.tests import fake_driver -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit.image import fake as fake_image -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -import cinder.volume -from cinder.volume import manager as vol_manager - - -QUOTAS = quota.QUOTAS -NON_EXISTENT_IMAGE_ID = '003f540f-ec6b-4293-a3f9-7c68646b0f5c' - - -class FakeImageService(object): - def __init__(self, db_driver=None, image_service=None): - pass - - def show(self, context, image_id): - return {'size': 2 * units.Gi, - 'disk_format': 'raw', - 'container_format': 'bare', - 'status': 'active'} - - -class CopyVolumeToImageTestCase(base.BaseVolumeTestCase): - def fake_local_path(self, volume): - return self.dst_path - - def setUp(self): - super(CopyVolumeToImageTestCase, self).setUp() - self.dst_fd, self.dst_path = tempfile.mkstemp() - self.addCleanup(os.unlink, self.dst_path) - - os.close(self.dst_fd) - self.mock_object(self.volume.driver, 'local_path', - self.fake_local_path) - self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - self.image_meta = { - 'id': self.image_id, - 'container_format': 'bare', - 'disk_format': 'raw' - } - self.volume_id = fake.VOLUME_ID - self.addCleanup(db.volume_destroy, self.context, self.volume_id) - - self.volume_attrs = { - 'id': self.volume_id, - 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'display_description': 'Test Desc', - 'size': 20, - 'status': 'uploading', - 'host': 'dummy' - } - - def test_copy_volume_to_image_status_available(self): - # creating volume testdata - self.volume_attrs['instance_uuid'] = None - db.volume_create(self.context, self.volume_attrs) - - # start test - self.volume.copy_volume_to_image(self.context, - self.volume_id, - self.image_meta) - - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - - def test_copy_volume_to_image_over_image_quota(self): - # creating volume testdata - self.volume_attrs['instance_uuid'] = None - volume = db.volume_create(self.context, self.volume_attrs) - - with mock.patch.object(self.volume.driver, - 'copy_volume_to_image') as driver_copy_mock: - driver_copy_mock.side_effect = exception.ImageLimitExceeded - - # test with image not in queued state - self.assertRaises(exception.ImageLimitExceeded, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - self.image_meta) - # Assert a user message was created - self.volume.message_api.create.assert_called_once_with( - self.context, - message_field.Action.COPY_VOLUME_TO_IMAGE, - resource_uuid=volume['id'], - exception=mock.ANY, - detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME) - - def test_copy_volume_to_image_instance_deleted(self): - # During uploading volume to image if instance is deleted, - # volume should be in available status. - self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' - # Creating volume testdata - self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \ - '45b1161abb02' - db.volume_create(self.context, self.volume_attrs) - - method = 'volume_update_status_based_on_attachment' - with mock.patch.object(db, method, - wraps=getattr(db, method)) as mock_update: - # Start test - self.volume.copy_volume_to_image(self.context, - self.volume_id, - self.image_meta) - # Check 'volume_update_status_after_copy_volume_to_image' - # is called 1 time - self.assertEqual(1, mock_update.call_count) - - # Check volume status has changed to available because - # instance is deleted - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - - def test_copy_volume_to_image_status_use(self): - self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' - # creating volume testdata - db.volume_create(self.context, self.volume_attrs) - - # start test - self.volume.copy_volume_to_image(self.context, - self.volume_id, - self.image_meta) - - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - - def test_copy_volume_to_image_exception(self): - self.image_meta['id'] = NON_EXISTENT_IMAGE_ID - # creating volume testdata - self.volume_attrs['status'] = 'in-use' - db.volume_create(self.context, self.volume_attrs) - - # start test - self.assertRaises(exception.ImageNotFound, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - self.image_meta) - - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - - def test_copy_volume_to_image_driver_not_initialized(self): - # creating volume testdata - db.volume_create(self.context, self.volume_attrs) - - # set initialized to False - self.volume.driver._initialized = False - - # start test - self.assertRaises(exception.DriverNotInitialized, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - self.image_meta) - - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume.status) - - def test_copy_volume_to_image_driver_exception(self): - self.image_meta['id'] = self.image_id - - image_service = fake_image.FakeImageService() - # create new image in queued state - queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9' - queued_image_meta = image_service.show(self.context, self.image_id) - queued_image_meta['id'] = queued_image_id - queued_image_meta['status'] = 'queued' - image_service.create(self.context, queued_image_meta) - - # create new image in saving state - saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' - saving_image_meta = image_service.show(self.context, self.image_id) - saving_image_meta['id'] = saving_image_id - saving_image_meta['status'] = 'saving' - image_service.create(self.context, saving_image_meta) - - # create volume - self.volume_attrs['status'] = 'available' - self.volume_attrs['instance_uuid'] = None - db.volume_create(self.context, self.volume_attrs) - - with mock.patch.object(self.volume.driver, - 'copy_volume_to_image') as driver_copy_mock: - driver_copy_mock.side_effect = exception.VolumeDriverException( - "Error") - - # test with image not in queued state - self.assertRaises(exception.VolumeDriverException, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - self.image_meta) - # Make sure we are passing an OVO instance and not an ORM instance - # to the driver - self.assertIsInstance(driver_copy_mock.call_args[0][1], - objects.Volume) - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - # image shouldn't be deleted if it is not in queued state - image_service.show(self.context, self.image_id) - - # test with image in queued state - self.assertRaises(exception.VolumeDriverException, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - queued_image_meta) - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - # queued image should be deleted - self.assertRaises(exception.ImageNotFound, - image_service.show, - self.context, - queued_image_id) - - # test with image in saving state - self.assertRaises(exception.VolumeDriverException, - self.volume.copy_volume_to_image, - self.context, - self.volume_id, - saving_image_meta) - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - # image in saving state should be deleted - self.assertRaises(exception.ImageNotFound, - image_service.show, - self.context, - saving_image_id) - - @mock.patch.object(QUOTAS, 'reserve') - @mock.patch.object(QUOTAS, 'commit') - @mock.patch.object(vol_manager.VolumeManager, 'create_volume') - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, - 'copy_volume_to_image') - def _test_copy_volume_to_image_with_image_volume( - self, mock_copy, mock_create, mock_quota_commit, - mock_quota_reserve): - self.flags(glance_api_version=2) - self.volume.driver.configuration.image_upload_use_cinder_backend = True - self.addCleanup(fake_image.FakeImageService_reset) - image_service = fake_image.FakeImageService() - - def add_location_wrapper(ctx, id, uri, metadata): - try: - volume = db.volume_get(ctx, id) - self.assertEqual(ctx.project_id, - volume['metadata']['image_owner']) - except exception.VolumeNotFound: - pass - return image_service.add_location_orig(ctx, id, uri, metadata) - - image_service.add_location_orig = image_service.add_location - image_service.add_location = add_location_wrapper - - image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' - self.image_meta['id'] = image_id - self.image_meta['status'] = 'queued' - image_service.create(self.context, self.image_meta) - - # creating volume testdata - self.volume_attrs['instance_uuid'] = None - db.volume_create(self.context, self.volume_attrs) - - def fake_create(context, volume, **kwargs): - db.volume_update(context, volume.id, {'status': 'available'}) - - mock_create.side_effect = fake_create - - # start test - self.volume.copy_volume_to_image(self.context, - self.volume_id, - self.image_meta) - - volume = db.volume_get(self.context, self.volume_id) - self.assertEqual('available', volume['status']) - - # return create image - image = image_service.show(self.context, image_id) - image_service.delete(self.context, image_id) - return image - - def test_copy_volume_to_image_with_image_volume(self): - image = self._test_copy_volume_to_image_with_image_volume() - self.assertTrue(image['locations'][0]['url'].startswith('cinder://')) - - def test_copy_volume_to_image_with_image_volume_qcow2(self): - self.image_meta['disk_format'] = 'qcow2' - image = self._test_copy_volume_to_image_with_image_volume() - self.assertNotIn('locations', image) - - @mock.patch.object(vol_manager.VolumeManager, 'delete_volume') - @mock.patch.object(fake_image._FakeImageService, 'add_location', - side_effect=exception.Invalid) - def test_copy_volume_to_image_with_image_volume_failure( - self, mock_add_location, mock_delete): - image = self._test_copy_volume_to_image_with_image_volume() - self.assertNotIn('locations', image) - self.assertTrue(mock_delete.called) - - -class ImageVolumeCacheTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(ImageVolumeCacheTestCase, self).setUp() - self.volume.driver.set_initialized() - - @mock.patch('oslo_utils.importutils.import_object') - def test_cache_configs(self, mock_import_object): - opts = { - 'image_volume_cache_enabled': True, - 'image_volume_cache_max_size_gb': 100, - 'image_volume_cache_max_count': 20 - } - - def conf_get(option): - if option in opts: - return opts[option] - else: - return None - - mock_driver = mock.Mock() - mock_driver.configuration.safe_get.side_effect = conf_get - mock_driver.configuration.extra_capabilities = 'null' - - def import_obj(*args, **kwargs): - return mock_driver - - mock_import_object.side_effect = import_obj - - manager = vol_manager.VolumeManager(volume_driver=mock_driver) - self.assertIsNotNone(manager) - self.assertIsNotNone(manager.image_volume_cache) - self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb) - self.assertEqual(20, manager.image_volume_cache.max_cache_size_count) - - def test_delete_image_volume(self): - volume_params = { - 'status': 'creating', - 'host': 'some_host', - 'cluster_name': 'some_cluster', - 'size': 1 - } - volume_api = cinder.volume.api.API() - volume = tests_utils.create_volume(self.context, **volume_params) - volume.status = 'available' - volume.save() - image_id = '70a599e0-31e7-49b7-b260-868f441e862b' - db.image_volume_cache_create(self.context, - volume['host'], - volume_params['cluster_name'], - image_id, - datetime.datetime.utcnow(), - volume['id'], - volume['size']) - volume_api.delete(self.context, volume) - entry = db.image_volume_cache_get_by_volume_id(self.context, - volume['id']) - self.assertIsNone(entry) - - def test_delete_volume_with_keymanager_exception(self): - volume_params = { - 'host': 'some_host', - 'size': 1 - } - volume_api = cinder.volume.api.API() - volume = tests_utils.create_volume(self.context, **volume_params) - - with mock.patch.object( - volume_api.key_manager, 'delete') as key_del_mock: - key_del_mock.side_effect = Exception("Key not found") - volume_api.delete(self.context, volume) - - -class ImageVolumeTestCases(base.BaseVolumeTestCase): - - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'create_cloned_volume') - @mock.patch('cinder.quota.QUOTAS.rollback') - @mock.patch('cinder.quota.QUOTAS.commit') - @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) - def test_clone_image_volume(self, mock_reserve, mock_commit, - mock_rollback, mock_cloned_volume): - vol = tests_utils.create_volume(self.context, - **self.volume_params) - # unnecessary attributes should be removed from image volume - vol.consistencygroup = None - result = self.volume._clone_image_volume(self.context, vol, - {'id': fake.VOLUME_ID}) - - self.assertNotEqual(False, result) - mock_reserve.assert_called_once_with(self.context, volumes=1, - gigabytes=vol.size) - mock_commit.assert_called_once_with(self.context, ["RESERVATION"], - project_id=vol.project_id) - - @mock.patch('cinder.quota.QUOTAS.rollback') - @mock.patch('cinder.quota.QUOTAS.commit') - @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) - def test_clone_image_volume_creation_failure(self, mock_reserve, - mock_commit, mock_rollback): - vol = tests_utils.create_volume(self.context, **self.volume_params) - with mock.patch.object(objects, 'Volume', side_effect=ValueError): - self.assertFalse(self.volume._clone_image_volume( - self.context, vol, {'id': fake.VOLUME_ID})) - - mock_reserve.assert_called_once_with(self.context, volumes=1, - gigabytes=vol.size) - mock_rollback.assert_called_once_with(self.context, ["RESERVATION"]) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_cloned_status_available( - self, mock_qemu_info): - """Test create volume from image via cloning. - - Verify that after cloning image to volume, it is in available - state and is bootable. - """ - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - volume = self._create_volume_from_image() - self.assertEqual('available', volume['status']) - self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_not_cloned_status_available( - self, mock_qemu_info): - """Test create volume from image via full copy. - - Verify that after copying image to volume, it is in available - state and is bootable. - """ - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - volume = self._create_volume_from_image(fakeout_clone_image=True) - self.assertEqual('available', volume['status']) - self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume) - - def test_create_volume_from_image_exception(self): - """Test create volume from a non-existing image. - - Verify that create volume from a non-existing image, the volume - status is 'error' and is not bootable. - """ - dst_fd, dst_path = tempfile.mkstemp() - os.close(dst_fd) - - self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path) - - # creating volume testdata - kwargs = {'display_description': 'Test Desc', - 'size': 20, - 'availability_zone': 'fake_availability_zone', - 'status': 'creating', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'host': 'dummy'} - volume = objects.Volume(context=self.context, **kwargs) - volume.create() - - self.assertRaises(exception.ImageNotFound, - self.volume.create_volume, - self.context, - volume, - {'image_id': NON_EXISTENT_IMAGE_ID}) - volume = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual("error", volume['status']) - self.assertFalse(volume['bootable']) - # cleanup - volume.destroy() - os.unlink(dst_path) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_copy_exception_rescheduling( - self, mock_qemu_info): - """Test create volume with ImageCopyFailure - - This exception should not trigger rescheduling and allocated_capacity - should be incremented so we're having assert for that here. - """ - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - def fake_copy_image_to_volume(context, volume, image_service, - image_id): - raise exception.ImageCopyFailure() - - self.mock_object(self.volume.driver, 'copy_image_to_volume', - fake_copy_image_to_volume) - mock_delete = self.mock_object(self.volume.driver, 'delete_volume') - self.assertRaises(exception.ImageCopyFailure, - self._create_volume_from_image) - # NOTE(dulek): Rescheduling should not occur, so lets assert that - # allocated_capacity is incremented. - self.assertDictEqual(self.volume.stats['pools'], - {'_pool0': {'allocated_capacity_gb': 1}}) - # NOTE(dulek): As we haven't rescheduled, make sure no delete_volume - # was called. - self.assertFalse(mock_delete.called) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.utils.brick_get_connector') - @mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled') - @mock.patch('cinder.volume.driver.BaseVD._detach_volume') - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_unavailable( - self, mock_qemu_info, mock_detach, mock_secure, *args): - """Test create volume with ImageCopyFailure - - We'll raise an exception inside _connect_device after volume has - already been attached to confirm that it detaches the volume. - """ - mock_secure.side_effect = NameError - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume - bound_copy_method = unbound_copy_method.__get__(self.volume.driver) - with mock.patch.object(self.volume.driver, 'copy_image_to_volume', - side_effect=bound_copy_method): - self.assertRaises(exception.ImageCopyFailure, - self._create_volume_from_image, - fakeout_copy_image_to_volume=False) - # We must have called detach method. - self.assertEqual(1, mock_detach.call_count) - - @mock.patch('cinder.utils.brick_get_connector_properties') - @mock.patch('cinder.utils.brick_get_connector') - @mock.patch('cinder.volume.driver.BaseVD._connect_device') - @mock.patch('cinder.volume.driver.BaseVD._detach_volume') - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_unavailable_no_attach_info( - self, mock_qemu_info, mock_detach, mock_connect, *args): - """Test create volume with ImageCopyFailure - - We'll raise an exception on _connect_device call to confirm that it - detaches the volume even if the exception doesn't have attach_info. - """ - mock_connect.side_effect = NameError - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume - bound_copy_method = unbound_copy_method.__get__(self.volume.driver) - with mock.patch.object(self.volume.driver, 'copy_image_to_volume', - side_effect=bound_copy_method): - self.assertRaises(exception.ImageCopyFailure, - self._create_volume_from_image, - fakeout_copy_image_to_volume=False) - # We must have called detach method. - self.assertEqual(1, mock_detach.call_count) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info): - """Test create volume from image via image volume. - - Verify that after cloning image to volume, it is in available - state and is bootable. - """ - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - volume = self._create_volume_from_image(clone_image_volume=True) - self.assertEqual('available', volume['status']) - self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume) - - def test_create_volume_from_exact_sized_image(self): - """Test create volume from an image of the same size. - - Verify that an image which is exactly the same size as the - volume, will work correctly. - """ - try: - volume_id = None - volume_api = cinder.volume.api.API( - image_service=FakeImageService()) - volume = volume_api.create(self.context, 2, 'name', 'description', - image_id=self.FAKE_UUID) - volume_id = volume['id'] - self.assertEqual('creating', volume['status']) - - finally: - # cleanup - db.volume_destroy(self.context, volume_id) - - def test_create_volume_from_oversized_image(self): - """Verify that an image which is too big will fail correctly.""" - class _ModifiedFakeImageService(FakeImageService): - def show(self, context, image_id): - return {'size': 2 * units.Gi + 1, - 'disk_format': 'raw', - 'container_format': 'bare', - 'status': 'active'} - - volume_api = cinder.volume.api.API( - image_service=_ModifiedFakeImageService()) - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, 2, - 'name', 'description', image_id=1) - - def test_create_volume_with_mindisk_error(self): - """Verify volumes smaller than image minDisk will cause an error.""" - class _ModifiedFakeImageService(FakeImageService): - def show(self, context, image_id): - return {'size': 2 * units.Gi, - 'disk_format': 'raw', - 'container_format': 'bare', - 'min_disk': 5, - 'status': 'active'} - - volume_api = cinder.volume.api.API( - image_service=_ModifiedFakeImageService()) - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, 2, - 'name', 'description', image_id=1) - - def test_create_volume_with_deleted_imaged(self): - """Verify create volume from image will cause an error.""" - class _ModifiedFakeImageService(FakeImageService): - def show(self, context, image_id): - return {'size': 2 * units.Gi, - 'disk_format': 'raw', - 'container_format': 'bare', - 'min_disk': 5, - 'status': 'deleted'} - - volume_api = cinder.volume.api.API( - image_service=_ModifiedFakeImageService()) - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, 2, - 'name', 'description', image_id=1) - - def test_copy_volume_to_image_maintenance(self): - """Test copy volume to image in maintenance.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.copy_volume_to_image, - self.context, - volume, - test_meta1, - force=True) diff --git a/cinder/tests/unit/volume/test_init_host.py b/cinder/tests/unit/volume/test_init_host.py deleted file mode 100644 index 9e30f6110..000000000 --- a/cinder/tests/unit/volume/test_init_host.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for volume init host method cases.""" - -import mock -from oslo_config import cfg - -from cinder import context -from cinder import objects -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -from cinder.volume import driver -from cinder.volume import utils as volutils - - -CONF = cfg.CONF - - -class VolumeInitHostTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(VolumeInitHostTestCase, self).setUp() - self.service_id = 1 - - @mock.patch('cinder.manager.CleanableManager.init_host') - def test_init_host_count_allocated_capacity(self, init_host_mock): - vol0 = tests_utils.create_volume( - self.context, size=100, host=CONF.host) - vol1 = tests_utils.create_volume( - self.context, size=128, - host=volutils.append_host(CONF.host, 'pool0')) - vol2 = tests_utils.create_volume( - self.context, size=256, - host=volutils.append_host(CONF.host, 'pool0')) - vol3 = tests_utils.create_volume( - self.context, size=512, - host=volutils.append_host(CONF.host, 'pool1')) - vol4 = tests_utils.create_volume( - self.context, size=1024, - host=volutils.append_host(CONF.host, 'pool2')) - self.volume.init_host(service_id=self.service_id) - init_host_mock.assert_called_once_with( - service_id=self.service_id, added_to_cluster=None) - stats = self.volume.stats - self.assertEqual(2020, stats['allocated_capacity_gb']) - self.assertEqual( - 384, stats['pools']['pool0']['allocated_capacity_gb']) - self.assertEqual( - 512, stats['pools']['pool1']['allocated_capacity_gb']) - self.assertEqual( - 1024, stats['pools']['pool2']['allocated_capacity_gb']) - - # NOTE(jdg): On the create we have host='xyz', BUT - # here we do a db.volume_get, and now the host has - # been updated to xyz#pool-name. Note this is - # done via the managers init, which calls the drivers - # get_pool method, which in the legacy case is going - # to be volume_backend_name or None - - vol0.refresh() - expected_host = volutils.append_host(CONF.host, 'fake') - self.assertEqual(expected_host, vol0.host) - self.volume.delete_volume(self.context, vol0) - self.volume.delete_volume(self.context, vol1) - self.volume.delete_volume(self.context, vol2) - self.volume.delete_volume(self.context, vol3) - self.volume.delete_volume(self.context, vol4) - - @mock.patch('cinder.manager.CleanableManager.init_host') - def test_init_host_count_allocated_capacity_cluster(self, init_host_mock): - cluster_name = 'mycluster' - self.volume.cluster = cluster_name - # All these volumes belong to the same cluster, so we will calculate - # the capacity of them all because we query the DB by cluster_name. - tests_utils.create_volume(self.context, size=100, host=CONF.host, - cluster_name=cluster_name) - tests_utils.create_volume( - self.context, size=128, cluster_name=cluster_name, - host=volutils.append_host(CONF.host, 'pool0')) - tests_utils.create_volume( - self.context, size=256, cluster_name=cluster_name, - host=volutils.append_host(CONF.host + '2', 'pool0')) - tests_utils.create_volume( - self.context, size=512, cluster_name=cluster_name, - host=volutils.append_host(CONF.host + '2', 'pool1')) - tests_utils.create_volume( - self.context, size=1024, cluster_name=cluster_name, - host=volutils.append_host(CONF.host + '3', 'pool2')) - - # These don't belong to the cluster so they will be ignored - tests_utils.create_volume( - self.context, size=1024, - host=volutils.append_host(CONF.host, 'pool2')) - tests_utils.create_volume( - self.context, size=1024, cluster_name=cluster_name + '1', - host=volutils.append_host(CONF.host + '3', 'pool2')) - - self.volume.init_host(service_id=self.service_id) - init_host_mock.assert_called_once_with( - service_id=self.service_id, added_to_cluster=None) - stats = self.volume.stats - self.assertEqual(2020, stats['allocated_capacity_gb']) - self.assertEqual( - 384, stats['pools']['pool0']['allocated_capacity_gb']) - self.assertEqual( - 512, stats['pools']['pool1']['allocated_capacity_gb']) - self.assertEqual( - 1024, stats['pools']['pool2']['allocated_capacity_gb']) - - @mock.patch.object(driver.BaseVD, "update_provider_info") - def test_init_host_sync_provider_info(self, mock_update): - vol0 = tests_utils.create_volume( - self.context, size=1, host=CONF.host) - vol1 = tests_utils.create_volume( - self.context, size=1, host=CONF.host) - vol2 = tests_utils.create_volume( - self.context, size=1, host=CONF.host, status='creating') - snap0 = tests_utils.create_snapshot(self.context, vol0.id) - snap1 = tests_utils.create_snapshot(self.context, vol1.id) - # Return values for update_provider_info - volumes = [{'id': vol0.id, 'provider_id': '1 2 xxxx'}, - {'id': vol1.id, 'provider_id': '3 4 yyyy'}] - snapshots = [{'id': snap0.id, 'provider_id': '5 6 xxxx'}, - {'id': snap1.id, 'provider_id': '7 8 yyyy'}] - mock_update.return_value = (volumes, snapshots) - # initialize - self.volume.init_host(service_id=self.service_id) - # Grab volume and snapshot objects - vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol0.id) - vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol1.id) - vol2_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol2.id) - snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) - snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) - # Check updated provider ids - self.assertEqual('1 2 xxxx', vol0_obj.provider_id) - self.assertEqual('3 4 yyyy', vol1_obj.provider_id) - self.assertIsNone(vol2_obj.provider_id) - self.assertEqual('5 6 xxxx', snap0_obj.provider_id) - self.assertEqual('7 8 yyyy', snap1_obj.provider_id) - # Clean up - self.volume.delete_snapshot(self.context, snap0_obj) - self.volume.delete_snapshot(self.context, snap1_obj) - self.volume.delete_volume(self.context, vol0) - self.volume.delete_volume(self.context, vol1) - - @mock.patch.object(driver.BaseVD, "update_provider_info") - def test_init_host_sync_provider_info_no_update(self, mock_update): - vol0 = tests_utils.create_volume( - self.context, size=1, host=CONF.host) - vol1 = tests_utils.create_volume( - self.context, size=1, host=CONF.host) - snap0 = tests_utils.create_snapshot(self.context, vol0.id) - snap1 = tests_utils.create_snapshot(self.context, vol1.id) - mock_update.return_value = ([], []) - # initialize - self.volume.init_host(service_id=self.service_id) - # Grab volume and snapshot objects - vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol0.id) - vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol1.id) - snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) - snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) - # Check provider ids are not changed - self.assertIsNone(vol0_obj.provider_id) - self.assertIsNone(vol1_obj.provider_id) - self.assertIsNone(snap0_obj.provider_id) - self.assertIsNone(snap1_obj.provider_id) - # Clean up - self.volume.delete_snapshot(self.context, snap0_obj) - self.volume.delete_snapshot(self.context, snap1_obj) - self.volume.delete_volume(self.context, vol0) - self.volume.delete_volume(self.context, vol1) - - @mock.patch.object(driver.BaseVD, "update_provider_info") - def test_init_host_sync_provider_info_no_update_cluster(self, mock_update): - cluster_name = 'mycluster' - self.volume.cluster = cluster_name - vol0 = tests_utils.create_volume( - self.context, size=1, host=CONF.host, cluster_name=cluster_name) - vol1 = tests_utils.create_volume( - self.context, size=1, host=CONF.host + '2', - cluster_name=cluster_name) - vol2 = tests_utils.create_volume( - self.context, size=1, host=CONF.host) - vol3 = tests_utils.create_volume( - self.context, size=1, host=CONF.host, - cluster_name=cluster_name + '2') - snap0 = tests_utils.create_snapshot(self.context, vol0.id) - snap1 = tests_utils.create_snapshot(self.context, vol1.id) - tests_utils.create_snapshot(self.context, vol2.id) - tests_utils.create_snapshot(self.context, vol3.id) - mock_update.return_value = ([], []) - # initialize - self.volume.init_host(service_id=self.service_id) - # Grab volume and snapshot objects - vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol0.id) - vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), - vol1.id) - snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) - snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) - - self.assertSetEqual({vol0.id, vol1.id}, - {vol.id for vol in mock_update.call_args[0][0]}) - self.assertSetEqual({snap0.id, snap1.id}, - {snap.id for snap in mock_update.call_args[0][1]}) - # Check provider ids are not changed - self.assertIsNone(vol0_obj.provider_id) - self.assertIsNone(vol1_obj.provider_id) - self.assertIsNone(snap0_obj.provider_id) - self.assertIsNone(snap1_obj.provider_id) - # Clean up - self.volume.delete_snapshot(self.context, snap0_obj) - self.volume.delete_snapshot(self.context, snap1_obj) - self.volume.delete_volume(self.context, vol0) - self.volume.delete_volume(self.context, vol1) - - @mock.patch('cinder.volume.manager.VolumeManager.' - '_include_resources_in_cluster') - def test_init_host_cluster_not_changed(self, include_in_cluster_mock): - self.volume.init_host(added_to_cluster=False, - service_id=self.service_id) - include_in_cluster_mock.assert_not_called() - - @mock.patch('cinder.objects.snapshot.SnapshotList.get_all', - return_value=[]) - @mock.patch('cinder.objects.volume.VolumeList.get_all', return_value=[]) - @mock.patch('cinder.objects.volume.VolumeList.include_in_cluster') - @mock.patch('cinder.objects.consistencygroup.ConsistencyGroupList.' - 'include_in_cluster') - @mock.patch('cinder.db.image_volume_cache_include_in_cluster') - def test_init_host_added_to_cluster(self, image_cache_include_mock, - cg_include_mock, - vol_include_mock, vol_get_all_mock, - snap_get_all_mock): - cluster = str(mock.sentinel.cluster) - self.mock_object(self.volume, 'cluster', cluster) - self.volume.init_host(added_to_cluster=True, - service_id=self.service_id) - - vol_include_mock.assert_called_once_with(mock.ANY, cluster, - host=self.volume.host) - cg_include_mock.assert_called_once_with(mock.ANY, cluster, - host=self.volume.host) - image_cache_include_mock.assert_called_once_with(mock.ANY, cluster, - host=self.volume.host) - vol_get_all_mock.assert_called_once_with( - mock.ANY, filters={'cluster_name': cluster}) - snap_get_all_mock.assert_called_once_with( - mock.ANY, filters={'cluster_name': cluster}) diff --git a/cinder/tests/unit/volume/test_manage_volume.py b/cinder/tests/unit/volume/test_manage_volume.py deleted file mode 100644 index f9a81cd57..000000000 --- a/cinder/tests/unit/volume/test_manage_volume.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright (c) 2016 Chuck Fouts. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder import quota -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -from cinder.volume.flows.manager import manage_existing -from cinder.volume import manager -from cinder.volume import utils - -FAKE_HOST_POOL = 'volPool' -FAKE_HOST = 'hostname@backend' - -QUOTAS = quota.QUOTAS - - -class ManageVolumeTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(ManageVolumeTestCase, self).setUp() - self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, - True) - self.manager = manager.VolumeManager() - self.manager.stats = {'allocated_capacity_gb': 0, 'pools': {}} - - @staticmethod - def _stub_volume_object_get(cls, host=FAKE_HOST): - volume = { - 'id': fake.VOLUME_ID, - 'size': 1, - 'name': fake.VOLUME_NAME, - 'host': host, - } - return fake_volume.fake_volume_obj(cls.context, **volume) - - def test_manage_existing(self): - volume_object = self._stub_volume_object_get(self) - mock_run_flow_engine = self.mock_object( - self.manager, '_run_manage_existing_flow_engine', - return_value=volume_object) - mock_update_volume_stats = self.mock_object( - self.manager, '_update_stats_for_managed') - - result = self.manager.manage_existing(self.context, volume_object) - - self.assertEqual(fake.VOLUME_ID, result) - mock_run_flow_engine.assert_called_once_with(self.context, - volume_object, - None) - mock_update_volume_stats.assert_called_once_with(volume_object) - - def test_manage_existing_with_volume_object(self): - volume_object = self._stub_volume_object_get(self) - mock_object_volume = self.mock_object(objects.Volume, 'get_by_id') - mock_run_flow_engine = self.mock_object( - self.manager, '_run_manage_existing_flow_engine', - return_value=volume_object) - mock_update_volume_stats = self.mock_object( - self.manager, '_update_stats_for_managed') - - result = self.manager.manage_existing( - self.context, volume_object) - - self.assertEqual(fake.VOLUME_ID, result) - mock_object_volume.assert_not_called() - mock_run_flow_engine.assert_called_once_with(self.context, - volume_object, - None) - mock_update_volume_stats.assert_called_once_with(volume_object) - - def test_run_manage_existing_flow_engine(self): - mock_volume = mock.Mock() - volume_object = self._stub_volume_object_get(self) - - mock_flow_engine = mock.Mock() - mock_flow_engine_run = self.mock_object(mock_flow_engine, 'run') - mock_flow_engine_fetch = self.mock_object( - mock_flow_engine.storage, 'fetch', return_value=volume_object) - mock_get_flow = self.mock_object( - manage_existing, 'get_flow', return_value=mock_flow_engine) - - result = self.manager._run_manage_existing_flow_engine(self.context, - mock_volume, - None) - - self.assertEqual(volume_object, result) - - mock_get_flow.assert_called_once_with(self.context, - self.manager.db, - self.manager.driver, - self.manager.host, - mock_volume, - None) - mock_flow_engine_run.assert_called_once_with() - mock_flow_engine_fetch.assert_called_once_with('volume') - - def test_run_manage_existing_flow_engine_exception(self): - mock_get_flow = self.mock_object( - manage_existing, 'get_flow', side_effect=Exception) - volume_object = self._stub_volume_object_get(self) - self.assertRaises(exception.CinderException, - self.manager._run_manage_existing_flow_engine, - self.context, - volume_object, - None) - - mock_get_flow.assert_called_once_with(self.context, - self.manager.db, - self.manager.driver, - self.manager.host, - volume_object, - None) - - def test_update_stats_for_managed(self): - volume_object = self._stub_volume_object_get(self, - host=FAKE_HOST + - '#volPool') - self.manager._update_stats_for_managed(volume_object) - backend_stats = self.manager.stats['pools'][FAKE_HOST_POOL] - self.assertEqual( - 1, backend_stats['allocated_capacity_gb']) - - def test_update_stats_for_managed_no_pool(self): - safe_get_backend = 'safe_get_backend' - volume_obj = self._stub_volume_object_get(self) - mock_safe_get = self.mock_object( - self.manager.driver.configuration, 'safe_get', - return_value=safe_get_backend) - - self.manager._update_stats_for_managed(volume_obj) - - mock_safe_get.assert_called_once_with('volume_backend_name') - backend_stats = self.manager.stats['pools'][safe_get_backend] - self.assertEqual(1, backend_stats['allocated_capacity_gb']) - - def test_update_stats_for_managed_default_backend(self): - volume_obj = self._stub_volume_object_get(self) - mock_safe_get = self.mock_object( - self.manager.driver.configuration, 'safe_get', return_value=None) - - self.manager._update_stats_for_managed(volume_obj) - - mock_safe_get.assert_called_once_with('volume_backend_name') - backend_stats = self.manager.stats['pools'][utils.DEFAULT_POOL_NAME] - self.assertEqual(1, backend_stats['allocated_capacity_gb']) - - def test_update_stats_key_error(self): - self.manager.stats = {} - - self.assertRaises( - KeyError, self.manager._update_stats_for_managed, - self._stub_volume_object_get(self)) - - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'manage_existing') - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'manage_existing_get_size') - @mock.patch('cinder.volume.utils.notify_about_volume_usage') - def test_manage_volume_with_notify(self, mock_notify, mock_size, - mock_manage): - elevated = context.get_admin_context() - vol_type = db.volume_type_create( - elevated, {'name': 'type1', 'extra_specs': {}}) - # create source volume - volume_params = {'volume_type_id': vol_type.id, 'status': 'managing'} - test_vol = tests_utils.create_volume(self.context, **volume_params) - mock_size.return_value = 1 - mock_manage.return_value = None - - self.volume.manage_existing(self.context, test_vol, 'volume_ref') - mock_notify.assert_called_with(self.context, test_vol, - 'manage_existing.end', - host=test_vol.host) - - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'manage_existing_get_size') - @mock.patch('cinder.volume.flows.manager.manage_existing.' - 'ManageExistingTask.execute') - def test_manage_volume_raise_driver_exception(self, mock_execute, - mock_driver_get_size): - elevated = context.get_admin_context() - project_id = self.context.project_id - db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}}) - vol_type = db.volume_type_get_by_name(elevated, 'type1') - # create source volume - self.volume_params['volume_type_id'] = vol_type['id'] - self.volume_params['status'] = 'managing' - test_vol = tests_utils.create_volume(self.context, - **self.volume_params) - mock_execute.side_effect = exception.VolumeBackendAPIException( - data="volume driver got exception") - mock_driver_get_size.return_value = 1 - # Set quota usage - reserve_opts = {'volumes': 1, 'gigabytes': 1} - reservations = QUOTAS.reserve(self.context, project_id=project_id, - **reserve_opts) - QUOTAS.commit(self.context, reservations) - usage = db.quota_usage_get(self.context, project_id, 'volumes') - volumes_in_use = usage.in_use - usage = db.quota_usage_get(self.context, project_id, 'gigabytes') - gigabytes_in_use = usage.in_use - - self.assertRaises(exception.VolumeBackendAPIException, - self.volume.manage_existing, - self.context, test_vol, - 'volume_ref') - # check volume status - volume = objects.Volume.get_by_id(context.get_admin_context(), - test_vol.id) - self.assertEqual('error_managing', volume.status) - # Delete this volume with 'error_managing_deleting' status in c-vol. - test_vol.status = 'error_managing_deleting' - test_vol.save() - self.volume.delete_volume(self.context, test_vol) - ctxt = context.get_admin_context(read_deleted='yes') - volume = objects.Volume.get_by_id(ctxt, test_vol.id) - self.assertEqual('deleted', volume.status) - # Get in_use number after deleting error_managing volume - usage = db.quota_usage_get(self.context, project_id, 'volumes') - volumes_in_use_new = usage.in_use - self.assertEqual(volumes_in_use, volumes_in_use_new) - usage = db.quota_usage_get(self.context, project_id, 'gigabytes') - gigabytes_in_use_new = usage.in_use - self.assertEqual(gigabytes_in_use, gigabytes_in_use_new) - - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'manage_existing_get_size') - def test_manage_volume_raise_driver_size_exception(self, - mock_driver_get_size): - elevated = context.get_admin_context() - project_id = self.context.project_id - db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}}) - # create source volume - test_vol = tests_utils.create_volume(self.context, - **self.volume_params) - mock_driver_get_size.side_effect = exception.VolumeBackendAPIException( - data="volume driver got exception") - - # Set quota usage - reserve_opts = {'volumes': 1, 'gigabytes': 1} - reservations = QUOTAS.reserve(self.context, project_id=project_id, - **reserve_opts) - QUOTAS.commit(self.context, reservations) - usage = db.quota_usage_get(self.context, project_id, 'volumes') - volumes_in_use = usage.in_use - usage = db.quota_usage_get(self.context, project_id, 'gigabytes') - gigabytes_in_use = usage.in_use - - self.assertRaises(exception.VolumeBackendAPIException, - self.volume.manage_existing, - self.context, test_vol, - 'volume_ref') - # check volume status - volume = objects.Volume.get_by_id(context.get_admin_context(), - test_vol.id) - self.assertEqual('error_managing', volume.status) - # Delete this volume with 'error_managing_deleting' status in c-vol. - test_vol.status = 'error_managing_deleting' - test_vol.save() - self.volume.delete_volume(self.context, test_vol) - ctxt = context.get_admin_context(read_deleted='yes') - volume = objects.Volume.get_by_id(ctxt, test_vol.id) - self.assertEqual('deleted', volume.status) - # Get in_use number after raising exception - usage = db.quota_usage_get(self.context, project_id, 'volumes') - volumes_in_use_new = usage.in_use - self.assertEqual(volumes_in_use, volumes_in_use_new) - usage = db.quota_usage_get(self.context, project_id, 'gigabytes') - gigabytes_in_use_new = usage.in_use - self.assertEqual(gigabytes_in_use, gigabytes_in_use_new) diff --git a/cinder/tests/unit/volume/test_policy.py b/cinder/tests/unit/volume/test_policy.py deleted file mode 100644 index b4a931b91..000000000 --- a/cinder/tests/unit/volume/test_policy.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for volume policy.""" -import mock - -from cinder import context -from cinder import test - -import cinder.policy - - -class VolumePolicyTestCase(test.TestCase): - - def setUp(self): - super(VolumePolicyTestCase, self).setUp() - - cinder.policy.init() - - self.context = context.get_admin_context() - - def test_check_policy(self): - target = { - 'project_id': self.context.project_id, - 'user_id': self.context.user_id, - } - with mock.patch.object(cinder.policy, 'enforce') as mock_enforce: - cinder.volume.api.check_policy(self.context, 'attach') - mock_enforce.assert_called_once_with(self.context, - 'volume:attach', - target) - - def test_check_policy_with_target(self): - target = { - 'project_id': self.context.project_id, - 'user_id': self.context.user_id, - 'id': 2, - } - with mock.patch.object(cinder.policy, 'enforce') as mock_enforce: - cinder.volume.api.check_policy(self.context, 'attach', {'id': 2}) - mock_enforce.assert_called_once_with(self.context, - 'volume:attach', - target) diff --git a/cinder/tests/unit/volume/test_replication_manager.py b/cinder/tests/unit/volume/test_replication_manager.py deleted file mode 100644 index e3f95b3f5..000000000 --- a/cinder/tests/unit/volume/test_replication_manager.py +++ /dev/null @@ -1,665 +0,0 @@ -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt - -import mock - -from oslo_config import cfg -from oslo_utils import timeutils - -from cinder.common import constants -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_service -from cinder.tests.unit import utils -from cinder.tests.unit import volume as base -import cinder.volume -from cinder.volume import manager -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as vol_utils - - -CONF = cfg.CONF - - -@ddt.ddt -class ReplicationTestCase(base.BaseVolumeTestCase): - def setUp(self): - super(ReplicationTestCase, self).setUp() - self.host = 'host@backend#pool' - self.manager = manager.VolumeManager(host=self.host) - - @mock.patch('cinder.objects.VolumeList.get_all') - @mock.patch('cinder.volume.driver.BaseVD.failover_host', - side_effect=exception.InvalidReplicationTarget('')) - @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER), - ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER), - (None, 'backend2', fields.ReplicationStatus.ENABLED), - ('', 'backend2', fields.ReplicationStatus.ENABLED)) - @ddt.unpack - def test_failover_host_invalid_target(self, svc_backend, new_backend, - expected, mock_failover, - mock_getall): - """Test replication failover_host with invalid_target. - - When failingover fails due to an invalid target exception we return - replication_status to its previous status, and we decide what that is - depending on the currect active backend. - """ - svc = utils.create_service( - self.context, - {'host': self.host, - 'binary': constants.VOLUME_BINARY, - 'active_backend_id': svc_backend, - 'replication_status': fields.ReplicationStatus.FAILING_OVER}) - - self.manager.failover_host(self.context, new_backend) - mock_getall.assert_called_once_with(self.context, - filters={'host': self.host}) - mock_failover.assert_called_once_with(self.context, - [], - secondary_id=new_backend, - groups=[]) - - db_svc = objects.Service.get_by_id(self.context, svc.id) - self.assertEqual(expected, db_svc.replication_status) - - @mock.patch('cinder.volume.driver.BaseVD.failover_host', - mock.Mock(side_effect=exception.VolumeDriverException(''))) - def test_failover_host_driver_exception(self): - svc = utils.create_service( - self.context, - {'host': self.host, - 'binary': constants.VOLUME_BINARY, - 'active_backend_id': None, - 'replication_status': fields.ReplicationStatus.FAILING_OVER}) - - self.manager.failover_host(self.context, mock.sentinel.backend_id) - - db_svc = objects.Service.get_by_id(self.context, svc.id) - self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR, - db_svc.replication_status) - - @mock.patch('cinder.objects.Service.is_up', True) - @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover') - @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(objects.ServiceList, 'get_all') - def test_failover(self, mock_get_all, mock_db_update, mock_failover): - """Test replication failover.""" - - service = fake_service.fake_service_obj(self.context, - binary='cinder-volume') - mock_get_all.return_value = [service] - mock_db_update.return_value = {'replication_status': 'enabled'} - volume_api = cinder.volume.api.API() - volume_api.failover(self.context, host=CONF.host, cluster_name=None) - mock_failover.assert_called_once_with(self.context, service, None) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover') - @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_all') - def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update, - mock_failover): - """Test replication failover unexpected status.""" - - mock_db_get_all.return_value = [fake_service.fake_service_obj( - self.context, - binary='cinder-volume')] - mock_db_update.return_value = None - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.failover, - self.context, - host=CONF.host, - cluster_name=None) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') - @mock.patch.object(cinder.db, 'conditional_update', return_value=1) - @mock.patch.object(cinder.objects.ServiceList, 'get_all') - def test_freeze_host(self, mock_get_all, mock_db_update, - mock_freeze): - """Test replication freeze_host.""" - - service = fake_service.fake_service_obj(self.context, - binary='cinder-volume') - mock_get_all.return_value = [service] - mock_freeze.return_value = True - volume_api = cinder.volume.api.API() - volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None) - mock_freeze.assert_called_once_with(self.context, service) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') - @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_all') - def test_freeze_host_unexpected_status(self, mock_get_all, - mock_db_update, - mock_freeze): - """Test replication freeze_host unexpected status.""" - - mock_get_all.return_value = [fake_service.fake_service_obj( - self.context, - binary='cinder-volume')] - mock_db_update.return_value = None - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.freeze_host, - self.context, - host=CONF.host, - cluster_name=None) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') - @mock.patch.object(cinder.db, 'conditional_update', return_value=1) - @mock.patch.object(cinder.objects.ServiceList, 'get_all') - def test_thaw_host(self, mock_get_all, mock_db_update, - mock_thaw): - """Test replication thaw_host.""" - - service = fake_service.fake_service_obj(self.context, - binary='cinder-volume') - mock_get_all.return_value = [service] - mock_thaw.return_value = True - volume_api = cinder.volume.api.API() - volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None) - mock_thaw.assert_called_once_with(self.context, service) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') - @mock.patch.object(cinder.db, 'conditional_update') - @mock.patch.object(cinder.db, 'service_get_all') - def test_thaw_host_unexpected_status(self, mock_get_all, - mock_db_update, - mock_thaw): - """Test replication thaw_host unexpected status.""" - - mock_get_all.return_value = [fake_service.fake_service_obj( - self.context, - binary='cinder-volume')] - mock_db_update.return_value = None - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.thaw_host, - self.context, - host=CONF.host, cluster_name=None) - - @mock.patch('cinder.volume.driver.BaseVD.failover_completed') - def test_failover_completed(self, completed_mock): - rep_field = fields.ReplicationStatus - svc = objects.Service(self.context, host=self.volume.host, - binary=constants.VOLUME_BINARY, - replication_status=rep_field.ENABLED) - svc.create() - self.volume.failover_completed( - self.context, - {'active_backend_id': 'secondary', - 'replication_status': rep_field.FAILED_OVER}) - service = objects.Service.get_by_id(self.context, svc.id) - self.assertEqual('secondary', service.active_backend_id) - self.assertEqual('failed-over', service.replication_status) - completed_mock.assert_called_once_with(self.context, 'secondary') - - @mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True) - def test_failover_completed_driver_failure(self, completed_mock): - rep_field = fields.ReplicationStatus - svc = objects.Service(self.context, host=self.volume.host, - binary=constants.VOLUME_BINARY, - replication_status=rep_field.ENABLED) - svc.create() - self.volume.failover_completed( - self.context, - {'active_backend_id': 'secondary', - 'replication_status': rep_field.FAILED_OVER}) - service = objects.Service.get_by_id(self.context, svc.id) - self.assertEqual('secondary', service.active_backend_id) - self.assertEqual(rep_field.ERROR, service.replication_status) - self.assertTrue(service.disabled) - self.assertIsNotNone(service.disabled_reason) - completed_mock.assert_called_once_with(self.context, 'secondary') - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed') - def test_finish_failover_non_clustered(self, completed_mock): - svc = mock.Mock(is_clustered=None) - self.volume.finish_failover(self.context, svc, mock.sentinel.updates) - svc.update.assert_called_once_with(mock.sentinel.updates) - svc.save.assert_called_once_with() - completed_mock.assert_not_called() - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed') - def test_finish_failover_clustered(self, completed_mock): - svc = mock.Mock(cluster_name='cluster_name') - updates = {'status': 'error'} - self.volume.finish_failover(self.context, svc, updates) - completed_mock.assert_called_once_with(self.context, svc, updates) - svc.cluster.status = 'error' - svc.cluster.save.assert_called_once() - - @ddt.data(None, 'cluster_name') - @mock.patch('cinder.volume.manager.VolumeManager.finish_failover') - @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes') - def test_failover_manager(self, cluster, get_vols_mock, finish_mock): - """Test manager's failover method for clustered and not clustered.""" - rep_field = fields.ReplicationStatus - svc = objects.Service(self.context, host=self.volume.host, - binary=constants.VOLUME_BINARY, - cluster_name=cluster, - replication_status=rep_field.ENABLED) - svc.create() - - vol = objects.Volume(self.context, host=self.volume.host) - vol.create() - - get_vols_mock.return_value = [vol] - - with mock.patch.object(self.volume, 'driver') as driver: - called, not_called = driver.failover_host, driver.failover - if cluster: - called, not_called = not_called, called - - called.return_value = ('secondary', [{'volume_id': vol.id, - 'updates': {'status': 'error'}}], []) - - self.volume.failover(self.context, - secondary_backend_id='secondary') - - not_called.assert_not_called() - called.assert_called_once_with(self.context, [vol], - secondary_id='secondary', groups=[]) - - expected_update = {'replication_status': rep_field.FAILED_OVER, - 'active_backend_id': 'secondary', - 'disabled': True, - 'disabled_reason': 'failed-over'} - finish_mock.assert_called_once_with(self.context, svc, expected_update) - - volume = objects.Volume.get_by_id(self.context, vol.id) - self.assertEqual('error', volume.status) - - @ddt.data(('host1', None), (None, 'mycluster')) - @ddt.unpack - def test_failover_api_fail_multiple_results(self, host, cluster): - """Fail if we try to failover multiple backends in the same request.""" - rep_field = fields.ReplicationStatus - clusters = [ - objects.Cluster(self.context, - name='mycluster@backend1', - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - objects.Cluster(self.context, - name='mycluster@backend2', - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY) - ] - clusters[0].create() - clusters[1].create() - services = [ - objects.Service(self.context, host='host1@backend1', - cluster_name=clusters[0].name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - objects.Service(self.context, host='host1@backend2', - cluster_name=clusters[1].name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - ] - services[0].create() - services[1].create() - self.assertRaises(exception.Invalid, - self.volume_api.failover, self.context, host, - cluster) - - def test_failover_api_not_found(self): - self.assertRaises(exception.ServiceNotFound, self.volume_api.failover, - self.context, 'host1', None) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') - def test_failover_api_success_multiple_results(self, failover_mock): - """Succeed to failover multiple services for the same backend.""" - rep_field = fields.ReplicationStatus - cluster_name = 'mycluster@backend1' - cluster = objects.Cluster(self.context, - name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY) - cluster.create() - services = [ - objects.Service(self.context, host='host1@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - objects.Service(self.context, host='host2@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - ] - services[0].create() - services[1].create() - - self.volume_api.failover(self.context, None, cluster_name, - mock.sentinel.secondary_id) - - for service in services + [cluster]: - self.assertEqual(rep_field.ENABLED, service.replication_status) - service.refresh() - self.assertEqual(rep_field.FAILING_OVER, - service.replication_status) - - failover_mock.assert_called_once_with(self.context, mock.ANY, - mock.sentinel.secondary_id) - self.assertEqual(services[0].id, failover_mock.call_args[0][1].id) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') - def test_failover_api_success_multiple_results_not_updated(self, - failover_mock): - """Succeed to failover even if a service is not updated.""" - rep_field = fields.ReplicationStatus - cluster_name = 'mycluster@backend1' - cluster = objects.Cluster(self.context, - name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY) - cluster.create() - services = [ - objects.Service(self.context, host='host1@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY), - objects.Service(self.context, host='host2@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ERROR, - binary=constants.VOLUME_BINARY), - ] - services[0].create() - services[1].create() - - self.volume_api.failover(self.context, None, cluster_name, - mock.sentinel.secondary_id) - - for service in services[:1] + [cluster]: - service.refresh() - self.assertEqual(rep_field.FAILING_OVER, - service.replication_status) - - services[1].refresh() - self.assertEqual(rep_field.ERROR, services[1].replication_status) - - failover_mock.assert_called_once_with(self.context, mock.ANY, - mock.sentinel.secondary_id) - self.assertEqual(services[0].id, failover_mock.call_args[0][1].id) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') - def test_failover_api_fail_multiple_results_not_updated(self, - failover_mock): - """Fail if none of the services could be updated.""" - rep_field = fields.ReplicationStatus - cluster_name = 'mycluster@backend1' - cluster = objects.Cluster(self.context, - name=cluster_name, - replication_status=rep_field.ENABLED, - binary=constants.VOLUME_BINARY) - cluster.create() - down_time = timeutils.datetime.datetime(1970, 1, 1) - services = [ - # This service is down - objects.Service(self.context, host='host1@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ENABLED, - created_at=down_time, - updated_at=down_time, - modified_at=down_time, - binary=constants.VOLUME_BINARY), - # This service is not with the right replication status - objects.Service(self.context, host='host2@backend1', - cluster_name=cluster_name, - replication_status=rep_field.ERROR, - binary=constants.VOLUME_BINARY), - ] - services[0].create() - services[1].create() - - self.assertRaises(exception.InvalidInput, - self.volume_api.failover, self.context, None, - cluster_name, mock.sentinel.secondary_id) - - for service in services: - svc = objects.Service.get_by_id(self.context, service.id) - self.assertEqual(service.replication_status, - svc.replication_status) - - cluster.refresh() - self.assertEqual(rep_field.ENABLED, cluster.replication_status) - - failover_mock.assert_not_called() - - def _check_failover_db(self, get_method, expected_results): - db_data = get_method.get_all(self.context, None) - db_data = {e.id: e for e in db_data} - for expected in expected_results: - id_ = expected['id'] - for key, value in expected.items(): - self.assertEqual(value, getattr(db_data[id_], key), - '(%s) ref=%s != act=%s' % ( - key, expected, dict(db_data[id_]))) - - def _test_failover_model_updates(self, in_volumes, in_snapshots, - driver_volumes, driver_result, - out_volumes, out_snapshots, - in_groups=None, out_groups=None, - driver_group_result=None, - secondary_id=None): - host = vol_utils.extract_host(self.manager.host) - utils.create_service(self.context, {'host': host, - 'binary': 'cinder-volume'}) - for volume in in_volumes: - utils.create_volume(self.context, self.manager.host, **volume) - - for snapshot in in_snapshots: - utils.create_snapshot(self.context, **snapshot) - - for group in in_groups: - utils.create_group(self.context, self.manager.host, **group) - - with mock.patch.object( - self.manager.driver, 'failover_host', - return_value=(secondary_id, driver_result, - driver_group_result)) as driver_mock: - self.manager.failover_host(self.context, secondary_id) - - self.assertSetEqual(driver_volumes, - {v.id for v in driver_mock.call_args[0][1]}) - - self._check_failover_db(objects.VolumeList, out_volumes) - self._check_failover_db(objects.SnapshotList, out_snapshots) - self._check_failover_db(objects.GroupList, out_groups) - - @mock.patch('cinder.volume.utils.is_group_a_type') - def test_failover_host_model_updates(self, mock_group_type): - status = fields.ReplicationStatus - mock_group_type.return_value = True - in_groups = [ - {'id': str(uuid.uuid4()), 'status': 'available', - 'group_type_id': fake.GROUP_TYPE_ID, - 'volume_type_ids': [fake.VOLUME_TYPE_ID], - 'replication_status': status.FAILOVER_ERROR}, - {'id': str(uuid.uuid4()), 'status': 'available', - 'group_type_id': fake.GROUP_TYPE_ID, - 'volume_type_ids': [fake.VOLUME_TYPE_ID], - 'replication_status': status.ENABLED}, - ] - driver_group_result = [ - {'group_id': in_groups[0]['id'], - 'updates': {'replication_status': status.FAILOVER_ERROR}}, - {'group_id': in_groups[1]['id'], - 'updates': {'replication_status': status.FAILED_OVER}}, - ] - out_groups = [ - {'id': in_groups[0]['id'], 'status': 'error', - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_groups[1]['id'], 'status': in_groups[1]['status'], - 'replication_status': status.FAILED_OVER}, - ] - - # test volumes - in_volumes = [ - {'id': str(uuid.uuid4()), 'status': 'available', - 'replication_status': status.DISABLED}, - {'id': str(uuid.uuid4()), 'status': 'in-use', - 'replication_status': status.NOT_CAPABLE}, - {'id': str(uuid.uuid4()), 'status': 'available', - 'replication_status': status.FAILOVER_ERROR}, - {'id': str(uuid.uuid4()), 'status': 'in-use', - 'replication_status': status.ENABLED}, - {'id': str(uuid.uuid4()), 'status': 'available', - 'replication_status': status.FAILOVER_ERROR}, - {'id': str(uuid.uuid4()), 'status': 'in-use', - 'replication_status': status.ENABLED}, - {'id': str(uuid.uuid4()), 'status': 'available', - 'group_id': in_groups[0]['id'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': str(uuid.uuid4()), 'status': 'available', - 'group_id': in_groups[1]['id'], - 'replication_status': status.ENABLED}, - ] - in_snapshots = [ - {'id': v['id'], 'volume_id': v['id'], 'status': 'available'} - for v in in_volumes - ] - driver_volumes = { - v['id'] for v in in_volumes - if v['replication_status'] not in (status.DISABLED, - status.NOT_CAPABLE)} - driver_result = [ - {'volume_id': in_volumes[3]['id'], - 'updates': {'status': 'error'}}, - {'volume_id': in_volumes[4]['id'], - 'updates': {'replication_status': status.FAILOVER_ERROR}}, - {'volume_id': in_volumes[5]['id'], - 'updates': {'replication_status': status.FAILED_OVER}}, - {'volume_id': in_volumes[6]['id'], - 'updates': {'replication_status': status.FAILOVER_ERROR}}, - {'volume_id': in_volumes[7]['id'], - 'updates': {'replication_status': status.FAILED_OVER}}, - ] - out_volumes = [ - {'id': in_volumes[0]['id'], 'status': 'error', - 'replication_status': status.NOT_CAPABLE, - 'previous_status': in_volumes[0]['status']}, - {'id': in_volumes[1]['id'], 'status': 'error', - 'replication_status': status.NOT_CAPABLE, - 'previous_status': in_volumes[1]['status']}, - {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'], - 'replication_status': status.FAILED_OVER}, - {'id': in_volumes[3]['id'], 'status': 'error', - 'previous_status': in_volumes[3]['status'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_volumes[4]['id'], 'status': 'error', - 'previous_status': in_volumes[4]['status'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'], - 'replication_status': status.FAILED_OVER}, - {'id': in_volumes[6]['id'], 'status': 'error', - 'previous_status': in_volumes[6]['status'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_volumes[7]['id'], 'status': in_volumes[7]['status'], - 'replication_status': status.FAILED_OVER}, - ] - out_snapshots = [ - {'id': ov['id'], - 'status': 'error' if ov['status'] == 'error' else 'available'} - for ov in out_volumes - ] - - self._test_failover_model_updates(in_volumes, in_snapshots, - driver_volumes, driver_result, - out_volumes, out_snapshots, - in_groups, out_groups, - driver_group_result) - - def test_failback_host_model_updates(self): - status = fields.ReplicationStatus - # IDs will be overwritten with UUIDs, but they help follow the code - in_volumes = [ - {'id': 0, 'status': 'available', - 'replication_status': status.DISABLED}, - {'id': 1, 'status': 'in-use', - 'replication_status': status.NOT_CAPABLE}, - {'id': 2, 'status': 'available', - 'replication_status': status.FAILOVER_ERROR}, - {'id': 3, 'status': 'in-use', - 'replication_status': status.ENABLED}, - {'id': 4, 'status': 'available', - 'replication_status': status.FAILOVER_ERROR}, - {'id': 5, 'status': 'in-use', - 'replication_status': status.FAILED_OVER}, - ] - # Generate real volume IDs - for volume in in_volumes: - volume['id'] = str(uuid.uuid4()) - in_snapshots = [ - {'id': in_volumes[0]['id'], 'volume_id': in_volumes[0]['id'], - 'status': fields.SnapshotStatus.ERROR_DELETING}, - {'id': in_volumes[1]['id'], 'volume_id': in_volumes[1]['id'], - 'status': fields.SnapshotStatus.AVAILABLE}, - {'id': in_volumes[2]['id'], 'volume_id': in_volumes[2]['id'], - 'status': fields.SnapshotStatus.CREATING}, - {'id': in_volumes[3]['id'], 'volume_id': in_volumes[3]['id'], - 'status': fields.SnapshotStatus.DELETING}, - {'id': in_volumes[4]['id'], 'volume_id': in_volumes[4]['id'], - 'status': fields.SnapshotStatus.CREATING}, - {'id': in_volumes[5]['id'], 'volume_id': in_volumes[5]['id'], - 'status': fields.SnapshotStatus.CREATING}, - ] - driver_volumes = { - v['id'] for v in in_volumes - if v['replication_status'] not in (status.DISABLED, - status.NOT_CAPABLE)} - driver_result = [ - {'volume_id': in_volumes[3]['id'], - 'updates': {'status': 'error'}}, - {'volume_id': in_volumes[4]['id'], - 'updates': {'replication_status': status.FAILOVER_ERROR}}, - {'volume_id': in_volumes[5]['id'], - 'updates': {'replication_status': status.FAILED_OVER}}, - ] - out_volumes = [ - {'id': in_volumes[0]['id'], 'status': in_volumes[0]['status'], - 'replication_status': in_volumes[0]['replication_status'], - 'previous_status': None}, - {'id': in_volumes[1]['id'], 'status': in_volumes[1]['status'], - 'replication_status': in_volumes[1]['replication_status'], - 'previous_status': None}, - {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'], - 'replication_status': status.ENABLED}, - {'id': in_volumes[3]['id'], 'status': 'error', - 'previous_status': in_volumes[3]['status'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_volumes[4]['id'], 'status': 'error', - 'previous_status': in_volumes[4]['status'], - 'replication_status': status.FAILOVER_ERROR}, - {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'], - 'replication_status': status.ENABLED}, - ] - # Snapshot status is preserved except for those that error the failback - out_snapshots = in_snapshots[:] - out_snapshots[3]['status'] = fields.SnapshotStatus.ERROR - out_snapshots[4]['status'] = fields.SnapshotStatus.ERROR - - self._test_failover_model_updates(in_volumes, in_snapshots, - driver_volumes, driver_result, - out_volumes, out_snapshots, - [], [], [], - self.manager.FAILBACK_SENTINEL) diff --git a/cinder/tests/unit/volume/test_rpcapi.py b/cinder/tests/unit/volume/test_rpcapi.py deleted file mode 100644 index 137f2f541..000000000 --- a/cinder/tests/unit/volume/test_rpcapi.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright 2012, Intel, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for cinder.volume.rpcapi -""" -import ddt -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils - -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import test -from cinder.tests.unit.backup import fake_backup -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_service -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils -from cinder.volume import rpcapi as volume_rpcapi - - -CONF = cfg.CONF - - -@ddt.ddt -class VolumeRPCAPITestCase(test.RPCAPITestCase): - - def setUp(self): - super(VolumeRPCAPITestCase, self).setUp() - self.rpcapi = volume_rpcapi.VolumeAPI - self.base_version = '3.0' - vol = {} - vol['host'] = 'fake_host' - vol['availability_zone'] = CONF.storage_availability_zone - vol['status'] = "available" - vol['attach_status'] = "detached" - vol['metadata'] = {"test_key": "test_val"} - vol['size'] = 1 - volume = db.volume_create(self.context, vol) - - kwargs = { - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'display_name': 'fake_name', - 'display_description': 'fake_description'} - snapshot = tests_utils.create_snapshot(self.context, vol['id'], - **kwargs) - - generic_group = tests_utils.create_group( - self.context, - availability_zone=CONF.storage_availability_zone, - group_type_id='group_type1', - host='fakehost@fakedrv#fakepool') - - group_snapshot = tests_utils.create_group_snapshot( - self.context, - group_id=generic_group.id, - group_type_id=fake.GROUP_TYPE_ID) - - self.fake_volume = jsonutils.to_primitive(volume) - self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) - self.fake_snapshot = snapshot - self.fake_reservations = ["RESERVATION"] - self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) - self.fake_group = generic_group - self.fake_group_snapshot = group_snapshot - - self.can_send_version_mock = self.patch( - 'oslo_messaging.RPCClient.can_send_version', return_value=True) - - def tearDown(self): - super(VolumeRPCAPITestCase, self).tearDown() - self.fake_snapshot.destroy() - self.fake_volume_obj.destroy() - self.fake_group_snapshot.destroy() - self.fake_group.destroy() - self.fake_backup_obj.destroy() - - def _change_cluster_name(self, resource, cluster_name): - resource.cluster_name = cluster_name - resource.obj_reset_changes() - - def test_create_volume(self): - self._test_rpc_api('create_volume', - rpc_method='cast', - server='fake_host', - volume=self.fake_volume_obj, - request_spec=objects.RequestSpec.from_primitives( - {}), - filter_properties={'availability_zone': 'fake_az'}, - allow_reschedule=True) - - @ddt.data(None, 'my_cluster') - def test_delete_volume(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('delete_volume', - rpc_method='cast', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - unmanage_only=False, - cascade=False) - - def test_delete_volume_cascade(self): - self._test_rpc_api('delete_volume', - rpc_method='cast', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - unmanage_only=False, - cascade=True) - - @ddt.data(None, 'mycluster') - def test_create_snapshot(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('create_snapshot', - rpc_method='cast', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - snapshot=self.fake_snapshot) - - @ddt.data(None, 'mycluster') - def test_delete_snapshot(self, cluster_name): - self._change_cluster_name(self.fake_snapshot.volume, cluster_name) - self._test_rpc_api( - 'delete_snapshot', rpc_method='cast', - server=cluster_name or self.fake_snapshot.volume.host, - snapshot=self.fake_snapshot, unmanage_only=False) - - def test_delete_snapshot_with_unmanage_only(self): - self._test_rpc_api('delete_snapshot', - rpc_method='cast', - server=self.fake_snapshot.volume.host, - snapshot=self.fake_snapshot, - unmanage_only=True) - - @ddt.data('3.0', '3.3') - def test_attach_volume_to_instance(self, version): - self.can_send_version_mock.return_value = (version == '3.3') - self._test_rpc_api('attach_volume', - rpc_method='call', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - instance_uuid=fake.INSTANCE_ID, - host_name=None, - mountpoint='fake_mountpoint', - mode='ro', - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - retval=fake_volume.fake_db_volume_attachment(), - version=version) - - @ddt.data('3.0', '3.3') - def test_attach_volume_to_host(self, version): - self.can_send_version_mock.return_value = (version == '3.3') - self._test_rpc_api('attach_volume', - rpc_method='call', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - instance_uuid=None, - host_name='fake_host', - mountpoint='fake_mountpoint', - mode='rw', - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - retval=fake_volume.fake_db_volume_attachment(), - version=version) - - @ddt.data('3.0', '3.3') - def test_attach_volume_cluster(self, version): - self.can_send_version_mock.return_value = (version == '3.3') - self._change_cluster_name(self.fake_volume_obj, 'mycluster') - self._test_rpc_api('attach_volume', - rpc_method='call', - server=self.fake_volume_obj.cluster_name, - volume=self.fake_volume_obj, - instance_uuid=None, - host_name='fake_host', - mountpoint='fake_mountpoint', - mode='rw', - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - retval=fake_volume.fake_db_volume_attachment(), - version=version) - - @ddt.data('3.0', '3.4') - def test_detach_volume(self, version): - self.can_send_version_mock.return_value = (version == '3.4') - self._test_rpc_api('detach_volume', - rpc_method='call', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - attachment_id=fake.ATTACHMENT_ID, - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - # NOTE(dulek): Detach isn't returning anything, but - # it's a call and it is synchronous. - retval=None, - version=version) - - @ddt.data('3.0', '3.4') - def test_detach_volume_cluster(self, version): - self.can_send_version_mock.return_value = (version == '3.4') - self._change_cluster_name(self.fake_volume_obj, 'mycluster') - self._test_rpc_api('detach_volume', - rpc_method='call', - server=self.fake_volume_obj.cluster_name, - volume=self.fake_volume_obj, - attachment_id='fake_uuid', - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - # NOTE(dulek): Detach isn't returning anything, but - # it's a call and it is synchronous. - retval=None, - version=version) - - @ddt.data(None, 'mycluster') - def test_copy_volume_to_image(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('copy_volume_to_image', - rpc_method='cast', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}, - image_meta={'id': fake.IMAGE_ID, - 'container_format': 'fake_type', - 'disk_format': 'fake_format'}) - - @ddt.data(None, 'mycluster') - def test_initialize_connection(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('initialize_connection', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - connector='fake_connector', - volume=self.fake_volume_obj) - - @ddt.data(None, 'mycluster') - def test_terminate_connection(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('terminate_connection', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - connector='fake_connector', - force=False, - # NOTE(dulek): Terminate isn't returning anything, - # but it's a call and it is synchronous. - retval=None, - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}) - - @ddt.data(None, 'mycluster') - def test_accept_transfer(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('accept_transfer', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - new_user=fake.USER_ID, - new_project=fake.PROJECT_ID, - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}) - - @ddt.data(None, 'mycluster') - def test_extend_volume(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('extend_volume', - rpc_method='cast', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - new_size=1, - reservations=self.fake_reservations) - - def test_migrate_volume(self): - class FakeBackend(object): - - def __init__(self): - self.host = 'fake_host' - self.cluster_name = 'cluster_name' - self.capabilities = {} - dest_backend = FakeBackend() - self._test_rpc_api('migrate_volume', - rpc_method='cast', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - dest_backend=dest_backend, - force_host_copy=True, - expected_kwargs_diff={ - 'host': {'host': 'fake_host', - 'cluster_name': 'cluster_name', - 'capabilities': {}}}, - version='3.5') - - def test_migrate_volume_completion(self): - self._test_rpc_api('migrate_volume_completion', - rpc_method='call', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - new_volume=self.fake_volume_obj, - error=False, - retval=fake.VOLUME_ID) - - def test_retype(self): - class FakeBackend(object): - - def __init__(self): - self.host = 'fake_host' - self.cluster_name = 'cluster_name' - self.capabilities = {} - dest_backend = FakeBackend() - self._test_rpc_api('retype', - rpc_method='cast', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - new_type_id=fake.VOLUME_TYPE_ID, - dest_backend=dest_backend, - migration_policy='never', - reservations=self.fake_reservations, - old_reservations=self.fake_reservations, - expected_kwargs_diff={ - 'host': {'host': 'fake_host', - 'cluster_name': 'cluster_name', - 'capabilities': {}}}, - version='3.5') - - def test_manage_existing(self): - self._test_rpc_api('manage_existing', - rpc_method='cast', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - ref={'lv_name': 'foo'}) - - def test_manage_existing_snapshot(self): - self._test_rpc_api('manage_existing_snapshot', - rpc_method='cast', - server=self.fake_snapshot.volume.host, - snapshot=self.fake_snapshot, - ref='foo', - backend='fake_host') - - def test_freeze_host(self): - service = fake_service.fake_service_obj(self.context, - host='fake_host', - binary='cinder-volume') - self._test_rpc_api('freeze_host', - rpc_method='call', - server='fake_host', - service=service, - retval=True) - - def test_thaw_host(self): - service = fake_service.fake_service_obj(self.context, - host='fake_host', - binary='cinder-volume') - self._test_rpc_api('thaw_host', - rpc_method='call', - server='fake_host', - service=service, - retval=True) - - @ddt.data('3.0', '3.8') - def test_failover(self, version): - self.can_send_version_mock.side_effect = lambda x: x == version - service = objects.Service(self.context, host='fake_host', - cluster_name=None) - expected_method = 'failover' if version == '3.8' else 'failover_host' - self._test_rpc_api('failover', rpc_method='cast', - expected_method=expected_method, server='fake_host', - service=service, - secondary_backend_id='fake_backend', - version=version) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') - def test_failover_completed(self, cctxt_mock): - service = objects.Service(self.context, host='fake_host', - cluster_name='cluster_name') - self._test_rpc_api('failover_completed', rpc_method='cast', - fanout=True, server='fake_host', service=service, - updates=mock.sentinel.updates) - - def test_get_capabilities(self): - self._test_rpc_api('get_capabilities', - rpc_method='call', - server='fake_host', - backend_id='fake_host', - discover=True, - retval={'foo': 'bar'}) - - def test_remove_export(self): - self._test_rpc_api('remove_export', - rpc_method='cast', - server=self.fake_volume_obj.host, - volume=self.fake_volume_obj, - expected_kwargs_diff={ - 'volume_id': self.fake_volume_obj.id}) - - @ddt.data(None, 'mycluster') - def test_get_backup_device(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - backup_device_dict = {'backup_device': self.fake_volume, - 'is_snapshot': False, - 'secure_enabled': True} - backup_device_obj = objects.BackupDeviceInfo.from_primitive( - backup_device_dict, self.context) - self._test_rpc_api('get_backup_device', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - backup=self.fake_backup_obj, - volume=self.fake_volume_obj, - expected_kwargs_diff={ - 'want_objects': True, - }, - retval=backup_device_obj, - version='3.2') - - @ddt.data(None, 'mycluster') - def test_get_backup_device_old(self, cluster_name): - self.can_send_version_mock.side_effect = (True, False, False) - self._change_cluster_name(self.fake_volume_obj, cluster_name) - backup_device_dict = {'backup_device': self.fake_volume, - 'is_snapshot': False, - 'secure_enabled': True} - backup_device_obj = objects.BackupDeviceInfo.from_primitive( - backup_device_dict, self.context) - - self._test_rpc_api('get_backup_device', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - backup=self.fake_backup_obj, - volume=self.fake_volume_obj, - retval=backup_device_dict, - expected_retval=backup_device_obj, - version='3.0') - - @ddt.data(None, 'mycluster') - def test_secure_file_operations_enabled(self, cluster_name): - self._change_cluster_name(self.fake_volume_obj, cluster_name) - self._test_rpc_api('secure_file_operations_enabled', - rpc_method='call', - server=cluster_name or self.fake_volume_obj.host, - volume=self.fake_volume_obj, - retval=True) - - def test_create_group(self): - self._test_rpc_api('create_group', rpc_method='cast', - server='fakehost@fakedrv', group=self.fake_group) - - @ddt.data(None, 'mycluster') - def test_delete_group(self, cluster_name): - self._change_cluster_name(self.fake_group, cluster_name) - self._test_rpc_api('delete_group', rpc_method='cast', - server=cluster_name or self.fake_group.host, - group=self.fake_group) - - @ddt.data(None, 'mycluster') - def test_update_group(self, cluster_name): - self._change_cluster_name(self.fake_group, cluster_name) - self._test_rpc_api('update_group', rpc_method='cast', - server=cluster_name or self.fake_group.host, - group=self.fake_group, - add_volumes=[fake.VOLUME2_ID], - remove_volumes=[fake.VOLUME3_ID]) - - def test_create_group_from_src(self): - self._test_rpc_api('create_group_from_src', rpc_method='cast', - server=self.fake_group.host, group=self.fake_group, - group_snapshot=self.fake_group_snapshot, - source_group=None) - - def test_create_group_snapshot(self): - self._test_rpc_api('create_group_snapshot', rpc_method='cast', - server=self.fake_group_snapshot.group.host, - group_snapshot=self.fake_group_snapshot) - - def test_delete_group_snapshot(self): - self._test_rpc_api('delete_group_snapshot', rpc_method='cast', - server=self.fake_group_snapshot.group.host, - group_snapshot=self.fake_group_snapshot) - - @ddt.data(('myhost', None), ('myhost', 'mycluster')) - @ddt.unpack - @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') - def test_do_cleanup(self, host, cluster, get_cctxt_mock): - cleanup_request = objects.CleanupRequest(self.context, - host=host, - cluster_name=cluster) - rpcapi = volume_rpcapi.VolumeAPI() - rpcapi.do_cleanup(self.context, cleanup_request) - get_cctxt_mock.assert_called_once_with( - cleanup_request.service_topic_queue, '3.7') - get_cctxt_mock.return_value.cast.assert_called_once_with( - self.context, 'do_cleanup', cleanup_request=cleanup_request) - - def test_do_cleanup_too_old(self): - cleanup_request = objects.CleanupRequest(self.context) - rpcapi = volume_rpcapi.VolumeAPI() - with mock.patch.object(rpcapi.client, 'can_send_version', - return_value=False) as can_send_mock: - self.assertRaises(exception.ServiceTooOld, - rpcapi.do_cleanup, - self.context, - cleanup_request) - can_send_mock.assert_called_once_with('3.7') - - @ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'), - ('myhost', None, '3.0')) - @ddt.unpack - @mock.patch('oslo_messaging.RPCClient.can_send_version') - def test_get_manageable_volumes( - self, - host, - cluster_name, - version, - can_send_version): - can_send_version.side_effect = lambda x: x == version - service = objects.Service(self.context, host=host, - cluster_name=cluster_name) - expected_kwargs_diff = { - 'want_objects': True} if version == '3.10' else {} - self._test_rpc_api('get_manageable_volumes', - rpc_method='call', - service=service, - server=cluster_name or host, - marker=5, - limit=20, - offset=5, - sort_keys='fake_keys', - sort_dirs='fake_dirs', - expected_kwargs_diff=expected_kwargs_diff, - version=version) - can_send_version.assert_has_calls([mock.call('3.10')]) - - @ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'), - ('myhost', None, '3.0')) - @ddt.unpack - @mock.patch('oslo_messaging.RPCClient.can_send_version') - def test_get_manageable_snapshots( - self, - host, - cluster_name, - version, - can_send_version): - can_send_version.side_effect = lambda x: x == version - service = objects.Service(self.context, host=host, - cluster_name=cluster_name) - expected_kwargs_diff = { - 'want_objects': True} if version == '3.10' else {} - self._test_rpc_api('get_manageable_snapshots', - rpc_method='call', - service=service, - server=cluster_name or host, - marker=5, - limit=20, - offset=5, - sort_keys='fake_keys', - sort_dirs='fake_dirs', - expected_kwargs_diff=expected_kwargs_diff, - version=version) - can_send_version.assert_has_calls([mock.call('3.10')]) - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_set_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('set_log_levels', - rpc_method='cast', - server=service.host, - service=service, - log_request='log_request', - version='3.12') - - @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) - def test_get_log_levels(self): - service = objects.Service(self.context, host='host1') - self._test_rpc_api('get_log_levels', - rpc_method='call', - server=service.host, - service=service, - log_request='log_request', - version='3.12') - - @ddt.data(None, 'mycluster') - def test_initialize_connection_snapshot(self, cluster_name): - self._change_cluster_name(self.fake_snapshot.volume, cluster_name) - self._test_rpc_api('initialize_connection_snapshot', - rpc_method='call', - server=(cluster_name or - self.fake_snapshot.volume.host), - connector='fake_connector', - snapshot=self.fake_snapshot, - expected_kwargs_diff={ - 'snapshot_id': self.fake_snapshot.id}, - version='3.13') - - @ddt.data(None, 'mycluster') - def test_terminate_connection_snapshot(self, cluster_name): - self._change_cluster_name(self.fake_snapshot.volume, cluster_name) - self._test_rpc_api('terminate_connection_snapshot', - rpc_method='call', - server=(cluster_name or - self.fake_snapshot.volume.host), - snapshot=self.fake_snapshot, - connector='fake_connector', - force=False, - retval=None, - expected_kwargs_diff={ - 'snapshot_id': self.fake_snapshot.id}, - version='3.13') - - def test_remove_export_snapshot(self): - self._test_rpc_api('remove_export_snapshot', - rpc_method='cast', - server=self.fake_volume_obj.host, - snapshot=self.fake_snapshot, - expected_kwargs_diff={ - 'snapshot_id': self.fake_snapshot.id}, - version='3.13') - - def test_enable_replication(self): - self._test_rpc_api('enable_replication', rpc_method='cast', - server=self.fake_group.host, - group=self.fake_group, - version='3.14') - - def test_disable_replication(self): - self._test_rpc_api('disable_replication', rpc_method='cast', - server=self.fake_group.host, - group=self.fake_group, - version='3.14') - - def test_failover_replication(self): - self._test_rpc_api('failover_replication', rpc_method='cast', - server=self.fake_group.host, - group=self.fake_group, - allow_attached_volume=False, - secondary_backend_id=None, - version='3.14') - - def test_list_replication_targets(self): - self._test_rpc_api('list_replication_targets', rpc_method='call', - server=self.fake_group.host, - group=self.fake_group, - version='3.14') diff --git a/cinder/tests/unit/volume/test_snapshot.py b/cinder/tests/unit/volume/test_snapshot.py deleted file mode 100644 index 4f1d4978c..000000000 --- a/cinder/tests/unit/volume/test_snapshot.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for global snapshot cases.""" - -import ddt -import os -import sys - -import mock -from oslo_config import cfg -from oslo_utils import imageutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import test -from cinder.tests.unit.brick import fake_lvm -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -import cinder.volume - -QUOTAS = quota.QUOTAS - -CONF = cfg.CONF - -OVER_SNAPSHOT_QUOTA_EXCEPTION = exception.OverQuota( - overs=['snapshots'], - usages = {'snapshots': {'reserved': 1, 'in_use': 9}}, - quotas = {'gigabytes': 10, 'snapshots': 10}) - - -def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, - **kwargs): - """Create a snapshot object.""" - metadata = metadata or {} - snap = objects.Snapshot(ctxt or context.get_admin_context()) - snap.volume_size = size - snap.user_id = fake.USER_ID - snap.project_id = fake.PROJECT_ID - snap.volume_id = volume_id - snap.status = fields.SnapshotStatus.CREATING - if metadata is not None: - snap.metadata = metadata - snap.update(kwargs) - - snap.create() - return snap - - -@ddt.ddt -class SnapshotTestCase(base.BaseVolumeTestCase): - def test_delete_snapshot_frozen(self): - service = tests_utils.create_service(self.context, {'frozen': True}) - volume = tests_utils.create_volume(self.context, host=service.host) - snapshot = tests_utils.create_snapshot(self.context, volume.id) - self.assertRaises(exception.InvalidInput, - self.volume_api.delete_snapshot, self.context, - snapshot) - - @ddt.data('create_snapshot', 'create_snapshot_force') - def test_create_snapshot_frozen(self, method): - service = tests_utils.create_service(self.context, {'frozen': True}) - volume = tests_utils.create_volume(self.context, host=service.host) - method = getattr(self.volume_api, method) - self.assertRaises(exception.InvalidInput, - method, self.context, volume, 'name', 'desc') - - def test_create_snapshot_driver_not_initialized(self): - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - snapshot_id = create_snapshot(volume_src['id'], - size=volume_src['size'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - - self.volume.driver._initialized = False - - self.assertRaises(exception.DriverNotInitialized, - self.volume.create_snapshot, - self.context, snapshot_obj) - - # NOTE(flaper87): The volume status should be error. - self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) - - # lets cleanup the mess - self.volume.driver._initialized = True - self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume_src) - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - def test_create_delete_snapshot(self, mock_notify): - """Test snapshot can be created and deleted.""" - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **self.volume_params) - - mock_notify.assert_not_called() - - self.volume.create_volume(self.context, volume) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'])) - - snapshot = create_snapshot(volume['id'], size=volume['size']) - snapshot_id = snapshot.id - self.volume.create_snapshot(self.context, snapshot) - self.assertEqual( - snapshot_id, objects.Snapshot.get_by_id(self.context, - snapshot_id).id) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'], - ['INFO', 'snapshot.create.start'], - ['INFO', 'snapshot.create.end'])) - - self.volume.delete_snapshot(self.context, snapshot) - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'], - ['INFO', 'snapshot.create.start'], - ['INFO', 'snapshot.create.end'], - ['INFO', 'snapshot.delete.start'], - ['INFO', 'snapshot.delete.end'])) - - snap = objects.Snapshot.get_by_id(context.get_admin_context( - read_deleted='yes'), snapshot_id) - self.assertEqual(fields.SnapshotStatus.DELETED, snap.status) - self.assertRaises(exception.NotFound, - db.snapshot_get, - self.context, - snapshot_id) - self.volume.delete_volume(self.context, volume) - - def test_create_delete_snapshot_with_metadata(self): - """Test snapshot can be created with metadata and deleted.""" - test_meta = {'fake_key': 'fake_value'} - volume = tests_utils.create_volume(self.context, **self.volume_params) - snapshot = create_snapshot(volume['id'], size=volume['size'], - metadata=test_meta) - snapshot_id = snapshot.id - - result_dict = snapshot.metadata - - self.assertEqual(test_meta, result_dict) - self.volume.delete_snapshot(self.context, snapshot) - self.assertRaises(exception.NotFound, - db.snapshot_get, - self.context, - snapshot_id) - - def test_delete_snapshot_another_cluster_fails(self): - """Test delete of snapshot from another cluster fails.""" - self.volume.cluster = 'mycluster' - volume = tests_utils.create_volume(self.context, status='available', - size=1, host=CONF.host + 'fake', - cluster_name=self.volume.cluster) - snapshot = create_snapshot(volume.id, size=volume.size) - - self.volume.delete_snapshot(self.context, snapshot) - self.assertRaises(exception.NotFound, - db.snapshot_get, - self.context, - snapshot.id) - - @mock.patch.object(db, 'snapshot_create', - side_effect=exception.InvalidSnapshot( - 'Create snapshot in db failed!')) - def test_create_snapshot_failed_db_snapshot(self, mock_snapshot): - """Test exception handling when create snapshot in db failed.""" - test_volume = tests_utils.create_volume( - self.context, - status='available', - host=CONF.host) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidSnapshot, - volume_api.create_snapshot, - self.context, - test_volume, - 'fake_name', - 'fake_description') - - def test_create_snapshot_failed_maintenance(self): - """Test exception handling when create snapshot in maintenance.""" - test_volume = tests_utils.create_volume( - self.context, - status='maintenance', - host=CONF.host) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.create_snapshot, - self.context, - test_volume, - 'fake_name', - 'fake_description') - - @mock.patch.object(QUOTAS, 'commit', - side_effect=exception.QuotaError( - 'Snapshot quota commit failed!')) - def test_create_snapshot_failed_quota_commit(self, mock_snapshot): - """Test exception handling when snapshot quota commit failed.""" - test_volume = tests_utils.create_volume( - self.context, - status='available', - host=CONF.host) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.QuotaError, - volume_api.create_snapshot, - self.context, - test_volume, - 'fake_name', - 'fake_description') - - @mock.patch.object(QUOTAS, 'reserve', - side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) - def test_create_snapshot_failed_quota_reserve(self, mock_reserve): - """Test exception handling when snapshot quota reserve failed.""" - test_volume = tests_utils.create_volume( - self.context, - status='available', - host=CONF.host) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.SnapshotLimitExceeded, - volume_api.create_snapshot, - self.context, - test_volume, - 'fake_name', - 'fake_description') - - @mock.patch.object(QUOTAS, 'reserve', - side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) - def test_create_snapshots_in_db_failed_quota_reserve(self, mock_reserve): - """Test exception handling when snapshot quota reserve failed.""" - test_volume = tests_utils.create_volume( - self.context, - status='available', - host=CONF.host) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.SnapshotLimitExceeded, - volume_api.create_snapshots_in_db, - self.context, - [test_volume], - 'fake_name', - 'fake_description', - fake.CONSISTENCY_GROUP_ID) - - def test_create_snapshot_failed_host_is_None(self): - """Test exception handling when create snapshot and host is None.""" - test_volume = tests_utils.create_volume( - self.context, - host=None) - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.create_snapshot, - self.context, - test_volume, - 'fake_name', - 'fake_description') - - def test_create_snapshot_force(self): - """Test snapshot in use can be created forcibly.""" - - instance_uuid = '12345678-1234-5678-1234-567812345678' - # create volume and attach to the instance - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - values = {'volume_id': volume['id'], - 'instance_uuid': instance_uuid, - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.context, values) - db.volume_attached(self.context, attachment['id'], instance_uuid, - None, '/dev/sda1') - - volume_api = cinder.volume.api.API() - volume = volume_api.get(self.context, volume['id']) - self.assertRaises(exception.InvalidVolume, - volume_api.create_snapshot, - self.context, volume, - 'fake_name', 'fake_description') - snapshot_ref = volume_api.create_snapshot_force(self.context, - volume, - 'fake_name', - 'fake_description') - snapshot_ref.destroy() - db.volume_destroy(self.context, volume['id']) - - # create volume and attach to the host - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - values = {'volume_id': volume['id'], - 'attached_host': 'fake_host', - 'attach_status': fields.VolumeAttachStatus.ATTACHING, } - attachment = db.volume_attach(self.context, values) - db.volume_attached(self.context, attachment['id'], None, - 'fake_host', '/dev/sda1') - - volume_api = cinder.volume.api.API() - volume = volume_api.get(self.context, volume['id']) - self.assertRaises(exception.InvalidVolume, - volume_api.create_snapshot, - self.context, volume, - 'fake_name', 'fake_description') - snapshot_ref = volume_api.create_snapshot_force(self.context, - volume, - 'fake_name', - 'fake_description') - snapshot_ref.destroy() - db.volume_destroy(self.context, volume['id']) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_snapshot_from_bootable_volume(self, mock_qemu_info): - """Test create snapshot from bootable volume.""" - # create bootable volume from image - volume = self._create_volume_from_image() - volume_id = volume['id'] - self.assertEqual('available', volume['status']) - self.assertTrue(volume['bootable']) - - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - # get volume's volume_glance_metadata - ctxt = context.get_admin_context() - vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) - self.assertTrue(vol_glance_meta) - - # create snapshot from bootable volume - snap = create_snapshot(volume_id) - self.volume.create_snapshot(ctxt, snap) - - # get snapshot's volume_glance_metadata - snap_glance_meta = db.volume_snapshot_glance_metadata_get( - ctxt, snap.id) - self.assertTrue(snap_glance_meta) - - # ensure that volume's glance metadata is copied - # to snapshot's glance metadata - self.assertEqual(len(vol_glance_meta), len(snap_glance_meta)) - vol_glance_dict = {x.key: x.value for x in vol_glance_meta} - snap_glance_dict = {x.key: x.value for x in snap_glance_meta} - self.assertDictEqual(vol_glance_dict, snap_glance_dict) - - # ensure that snapshot's status is changed to 'available' - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snap.status) - - # cleanup resource - snap.destroy() - db.volume_destroy(ctxt, volume_id) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_snapshot_from_bootable_volume_fail(self, mock_qemu_info): - """Test create snapshot from bootable volume. - - But it fails to volume_glance_metadata_copy_to_snapshot. - As a result, status of snapshot is changed to ERROR. - """ - # create bootable volume from image - volume = self._create_volume_from_image() - volume_id = volume['id'] - self.assertEqual('available', volume['status']) - self.assertTrue(volume['bootable']) - - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - # get volume's volume_glance_metadata - ctxt = context.get_admin_context() - vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) - self.assertTrue(vol_glance_meta) - snap = create_snapshot(volume_id) - self.assertEqual(36, len(snap.id)) # dynamically-generated UUID - self.assertEqual('creating', snap.status) - - # set to return DB exception - with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\ - as mock_db: - mock_db.side_effect = exception.MetadataCopyFailure( - reason="Because of DB service down.") - # create snapshot from bootable volume - self.assertRaises(exception.MetadataCopyFailure, - self.volume.create_snapshot, - ctxt, - snap) - - # get snapshot's volume_glance_metadata - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_snapshot_glance_metadata_get, - ctxt, snap.id) - - # ensure that status of snapshot is 'error' - self.assertEqual(fields.SnapshotStatus.ERROR, snap.status) - - # cleanup resource - snap.destroy() - db.volume_destroy(ctxt, volume_id) - - def test_create_snapshot_from_bootable_volume_with_volume_metadata_none( - self): - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume_id = volume['id'] - - self.volume.create_volume(self.context, volume) - # set bootable flag of volume to True - db.volume_update(self.context, volume_id, {'bootable': True}) - - snapshot = create_snapshot(volume['id']) - self.volume.create_snapshot(self.context, snapshot) - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_snapshot_glance_metadata_get, - self.context, snapshot.id) - - # ensure that status of snapshot is 'available' - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) - - # cleanup resource - snapshot.destroy() - db.volume_destroy(self.context, volume_id) - - def test_delete_busy_snapshot(self): - """Test snapshot can be created and deleted.""" - - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - snapshot = create_snapshot(volume_id, size=volume['size']) - self.volume.create_snapshot(self.context, snapshot) - - with mock.patch.object(self.volume.driver, 'delete_snapshot', - side_effect=exception.SnapshotIsBusy( - snapshot_name='fake') - ) as mock_del_snap: - snapshot_id = snapshot.id - self.volume.delete_snapshot(self.context, snapshot) - snapshot_ref = objects.Snapshot.get_by_id(self.context, - snapshot_id) - self.assertEqual(snapshot_id, snapshot_ref.id) - self.assertEqual(fields.SnapshotStatus.AVAILABLE, - snapshot_ref.status) - mock_del_snap.assert_called_once_with(snapshot) - - @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") - def test_delete_no_dev_fails(self): - """Test delete snapshot with no dev file fails.""" - self.mock_object(os.path, 'exists', lambda x: False) - self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', - False, - None, - 'default') - - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - snapshot = create_snapshot(volume_id) - snapshot_id = snapshot.id - self.volume.create_snapshot(self.context, snapshot) - - with mock.patch.object(self.volume.driver, 'delete_snapshot', - side_effect=exception.SnapshotIsBusy( - snapshot_name='fake')) as mock_del_snap: - self.volume.delete_snapshot(self.context, snapshot) - snapshot_ref = objects.Snapshot.get_by_id(self.context, - snapshot_id) - self.assertEqual(snapshot_id, snapshot_ref.id) - self.assertEqual(fields.SnapshotStatus.AVAILABLE, - snapshot_ref.status) - mock_del_snap.assert_called_once_with(snapshot) - - def test_volume_api_update_snapshot(self): - # create raw snapshot - volume = tests_utils.create_volume(self.context, **self.volume_params) - snapshot = create_snapshot(volume['id']) - snapshot_id = snapshot.id - self.assertIsNone(snapshot.display_name) - # use volume.api to update name - volume_api = cinder.volume.api.API() - update_dict = {'display_name': 'test update name'} - volume_api.update_snapshot(self.context, snapshot, update_dict) - # read changes from db - snap = objects.Snapshot.get_by_id(context.get_admin_context(), - snapshot_id) - self.assertEqual('test update name', snap.display_name) - - @mock.patch.object(QUOTAS, 'reserve', - side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) - def test_existing_snapshot_failed_quota_reserve(self, mock_reserve): - vol = tests_utils.create_volume(self.context) - snap = tests_utils.create_snapshot(self.context, vol.id) - with mock.patch.object( - self.volume.driver, - 'manage_existing_snapshot_get_size') as mock_get_size: - mock_get_size.return_value = 1 - self.assertRaises(exception.SnapshotLimitExceeded, - self.volume.manage_existing_snapshot, - self.context, - snap) diff --git a/cinder/tests/unit/volume/test_volume.py b/cinder/tests/unit/volume/test_volume.py deleted file mode 100644 index e8bd668e1..000000000 --- a/cinder/tests/unit/volume/test_volume.py +++ /dev/null @@ -1,2896 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Volume Code.""" - -import datetime -import ddt -import time -import uuid - -import enum -import eventlet -import mock -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import imageutils -import six -from taskflow.engines.action_engine import engine - -from cinder.api import common -from cinder import context -from cinder import coordination -from cinder import db -from cinder import exception -from cinder import keymgr as key_manager -from cinder import objects -from cinder.objects import fields -import cinder.policy -from cinder import quota -from cinder.tests import fake_driver -from cinder.tests.unit import conf_fixture -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.keymgr import fake as fake_keymgr -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -from cinder import utils -import cinder.volume -from cinder.volume import driver -from cinder.volume import manager as vol_manager -from cinder.volume import rpcapi as volume_rpcapi -import cinder.volume.targets.tgt - - -QUOTAS = quota.QUOTAS - -CONF = cfg.CONF - -ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor' - -fake_opt = [ - cfg.StrOpt('fake_opt1', default='fake', help='fake opts') -] - - -def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, - **kwargs): - """Create a snapshot object.""" - metadata = metadata or {} - snap = objects.Snapshot(ctxt or context.get_admin_context()) - snap.volume_size = size - snap.user_id = fake.USER_ID - snap.project_id = fake.PROJECT_ID - snap.volume_id = volume_id - snap.status = fields.SnapshotStatus.CREATING - if metadata is not None: - snap.metadata = metadata - snap.update(kwargs) - - snap.create() - return snap - - -@ddt.ddt -class VolumeTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(VolumeTestCase, self).setUp() - self.patch('cinder.volume.utils.clear_volume', autospec=True) - self.expected_status = 'available' - self.service_id = 1 - self.user_context = context.RequestContext(user_id=fake.USER_ID, - project_id=fake.PROJECT_ID) - - @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') - @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') - @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'}) - @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.4'}) - def test_reset(self, get_min_obj, get_min_rpc): - vol_mgr = vol_manager.VolumeManager() - - scheduler_rpcapi = vol_mgr.scheduler_rpcapi - self.assertEqual('1.3', scheduler_rpcapi.client.version_cap) - self.assertEqual('1.4', - scheduler_rpcapi.client.serializer._base.version_cap) - get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() - vol_mgr.reset() - - scheduler_rpcapi = vol_mgr.scheduler_rpcapi - self.assertEqual(get_min_rpc.return_value, - scheduler_rpcapi.client.version_cap) - self.assertEqual(get_min_obj.return_value, - scheduler_rpcapi.client.serializer._base.version_cap) - self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest) - - @mock.patch('oslo_utils.importutils.import_object') - def test_backend_availability_zone(self, mock_import_object): - # NOTE(smcginnis): This isn't really the best place for this test, - # but we don't currently have a pure VolumeManager test class. So - # until we create a good suite for that class, putting here with - # other tests that use VolumeManager. - - opts = { - 'backend_availability_zone': 'caerbannog' - } - - def conf_get(option): - if option in opts: - return opts[option] - return None - - mock_driver = mock.Mock() - mock_driver.configuration.safe_get.side_effect = conf_get - mock_driver.configuration.extra_capabilities = 'null' - - def import_obj(*args, **kwargs): - return mock_driver - - mock_import_object.side_effect = import_obj - - manager = vol_manager.VolumeManager(volume_driver=mock_driver) - self.assertIsNotNone(manager) - self.assertEqual(opts['backend_availability_zone'], - manager.availability_zone) - - @mock.patch.object(vol_manager.VolumeManager, - 'update_service_capabilities') - def test_report_filter_goodness_function(self, mock_update): - manager = vol_manager.VolumeManager() - manager.driver.set_initialized() - myfilterfunction = "myFilterFunction" - mygoodnessfunction = "myGoodnessFunction" - expected = {'name': 'cinder-volumes', - 'filter_function': myfilterfunction, - 'goodness_function': mygoodnessfunction, - } - with mock.patch.object(manager.driver, - 'get_volume_stats') as m_get_stats: - with mock.patch.object(manager.driver, - 'get_goodness_function') as m_get_goodness: - with mock.patch.object(manager.driver, - 'get_filter_function') as m_get_filter: - m_get_stats.return_value = {'name': 'cinder-volumes'} - m_get_filter.return_value = myfilterfunction - m_get_goodness.return_value = mygoodnessfunction - manager._report_driver_status(1) - self.assertTrue(m_get_stats.called) - mock_update.assert_called_once_with(expected) - - def test_is_working(self): - # By default we have driver mocked to be initialized... - self.assertTrue(self.volume.is_working()) - - # ...lets switch it and check again! - self.volume.driver._initialized = False - self.assertFalse(self.volume.is_working()) - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - @mock.patch.object(QUOTAS, 'reserve') - @mock.patch.object(QUOTAS, 'commit') - @mock.patch.object(QUOTAS, 'rollback') - def test_create_driver_not_initialized(self, reserve, commit, rollback, - mock_notify): - self.volume.driver._initialized = False - - def fake_reserve(context, expire=None, project_id=None, **deltas): - return ["RESERVATION"] - - def fake_commit_and_rollback(context, reservations, project_id=None): - pass - - reserve.return_value = fake_reserve - commit.return_value = fake_commit_and_rollback - rollback.return_value = fake_commit_and_rollback - - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **self.volume_params) - - volume_id = volume['id'] - self.assertIsNone(volume['encryption_key_id']) - mock_notify.assert_not_called() - self.assertRaises(exception.DriverNotInitialized, - self.volume.create_volume, self.context, volume) - - volume = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual("error", volume.status) - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_create_driver_not_initialized_rescheduling(self): - self.volume.driver._initialized = False - mock_delete = self.mock_object(self.volume.driver, 'delete_volume') - - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **self.volume_params) - - volume_id = volume['id'] - self.assertRaises(exception.DriverNotInitialized, - self.volume.create_volume, - self.context, volume, - {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}) - # NOTE(dulek): Volume should be rescheduled as we passed request_spec - # and filter_properties, assert that it wasn't counted in - # allocated_capacity tracking. - self.assertEqual({}, self.volume.stats['pools']) - - # NOTE(dulek): As we've rescheduled, make sure delete_volume was - # called. - self.assertTrue(mock_delete.called) - - db.volume_destroy(context.get_admin_context(), volume_id) - - def test_create_non_cinder_exception_rescheduling(self): - params = self.volume_params - del params['host'] - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **params) - - volume_id = volume['id'] - with mock.patch.object(self.volume.driver, 'create_volume', - side_effect=processutils.ProcessExecutionError): - self.assertRaises(processutils.ProcessExecutionError, - self.volume.create_volume, - self.context, volume, - {'volume_properties': params}, - {'retry': {'num_attempts': 1, 'host': []}}) - # NOTE(dulek): Volume should be rescheduled as we passed request_spec - # and filter_properties, assert that it wasn't counted in - # allocated_capacity tracking. - self.assertEqual({}, self.volume.stats['pools']) - - db.volume_destroy(context.get_admin_context(), volume_id) - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - @mock.patch.object(QUOTAS, 'rollback') - @mock.patch.object(QUOTAS, 'commit') - @mock.patch.object(QUOTAS, 'reserve') - def test_delete_driver_not_initialized(self, reserve, commit, rollback, - mock_notify): - self.volume.driver._initialized = False - - def fake_reserve(context, expire=None, project_id=None, **deltas): - return ["RESERVATION"] - - def fake_commit_and_rollback(context, reservations, project_id=None): - pass - - reserve.return_value = fake_reserve - commit.return_value = fake_commit_and_rollback - rollback.return_value = fake_commit_and_rollback - - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **self.volume_params) - - self.assertIsNone(volume['encryption_key_id']) - mock_notify.assert_not_called() - self.assertRaises(exception.DriverNotInitialized, - self.volume.delete_volume, self.context, volume) - - volume = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual("error_deleting", volume.status) - volume.destroy() - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock()) - @mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock()) - @mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION']) - def test_create_delete_volume(self, _mock_reserve, mock_notify): - """Test volume can be created and deleted.""" - volume = tests_utils.create_volume( - self.context, - availability_zone=CONF.storage_availability_zone, - **self.volume_params) - volume_id = volume['id'] - - mock_notify.assert_not_called() - - self.assertIsNone(volume['encryption_key_id']) - - self.volume.create_volume(self.context, volume) - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'])) - - self.volume.delete_volume(self.context, volume) - vol = db.volume_get(context.get_admin_context(read_deleted='yes'), - volume_id) - self.assertEqual(vol['status'], 'deleted') - - self.assert_notify_called(mock_notify, - (['INFO', 'volume.create.start'], - ['INFO', 'volume.create.end'], - ['INFO', 'volume.delete.start'], - ['INFO', 'volume.delete.end'])) - - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume_id) - - def test_create_delete_volume_with_metadata(self): - """Test volume can be created with metadata and deleted.""" - test_meta = {'fake_key': 'fake_value'} - volume = tests_utils.create_volume(self.context, metadata=test_meta, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - self.assertEqual(test_meta, volume.metadata) - - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume_id) - - def test_delete_volume_frozen(self): - service = tests_utils.create_service(self.context, {'frozen': True}) - volume = tests_utils.create_volume(self.context, host=service.host) - self.assertRaises(exception.InvalidInput, - self.volume_api.delete, self.context, volume) - - def test_delete_volume_another_cluster_fails(self): - """Test delete of volume from another cluster fails.""" - self.volume.cluster = 'mycluster' - volume = tests_utils.create_volume(self.context, status='available', - size=1, host=CONF.host + 'fake', - cluster_name=self.volume.cluster) - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume.id) - - @mock.patch('cinder.db.volume_metadata_update') - def test_create_volume_metadata(self, metadata_update): - metadata = {'fake_key': 'fake_value'} - metadata_update.return_value = metadata - volume = tests_utils.create_volume(self.context, **self.volume_params) - res = self.volume_api.create_volume_metadata(self.context, - volume, metadata) - metadata_update.assert_called_once_with(self.context, volume.id, - metadata, False, - common.METADATA_TYPES.user) - self.assertEqual(metadata, res) - - @ddt.data('maintenance', 'uploading') - def test_create_volume_metadata_maintenance(self, status): - metadata = {'fake_key': 'fake_value'} - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume['status'] = status - self.assertRaises(exception.InvalidVolume, - self.volume_api.create_volume_metadata, - self.context, - volume, - metadata) - - def test_update_volume_metadata_with_metatype(self): - """Test update volume metadata with different metadata type.""" - test_meta1 = {'fake_key1': 'fake_value1'} - test_meta2 = {'fake_key1': 'fake_value2'} - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - self.volume.create_volume(self.context, volume) - # update user metadata associated with the volume. - result_meta = self.volume_api.update_volume_metadata( - self.context, - volume, - test_meta2, - False, - common.METADATA_TYPES.user) - self.assertEqual(test_meta2, result_meta) - - # create image metadata associated with the volume. - result_meta = self.volume_api.update_volume_metadata( - self.context, - volume, - test_meta1, - False, - common.METADATA_TYPES.image) - self.assertEqual(test_meta1, result_meta) - - # update image metadata associated with the volume. - result_meta = self.volume_api.update_volume_metadata( - self.context, - volume, - test_meta2, - False, - common.METADATA_TYPES.image) - self.assertEqual(test_meta2, result_meta) - - # update volume metadata with invalid metadta type. - self.assertRaises(exception.InvalidMetadataType, - self.volume_api.update_volume_metadata, - self.context, - volume, - test_meta1, - False, - FAKE_METADATA_TYPE.fake_type) - - def test_update_volume_metadata_maintenance(self): - """Test update volume metadata with different metadata type.""" - test_meta1 = {'fake_key1': 'fake_value1'} - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - self.assertRaises(exception.InvalidVolume, - self.volume_api.update_volume_metadata, - self.context, - volume, - test_meta1, - False, - FAKE_METADATA_TYPE.fake_type) - - @mock.patch('cinder.db.volume_update') - def test_update_with_ovo(self, volume_update): - """Test update volume using oslo_versionedobject.""" - volume = tests_utils.create_volume(self.context, **self.volume_params) - updates = {'display_name': 'foobbar'} - self.volume_api.update(self.context, volume, updates) - volume_update.assert_called_once_with(self.context, volume.id, - updates) - self.assertEqual('foobbar', volume.display_name) - - def test_delete_volume_metadata_with_metatype(self): - """Test delete volume metadata with different metadata type.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - test_meta2 = {'fake_key1': 'fake_value1'} - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - # delete user metadata associated with the volume. - self.volume_api.delete_volume_metadata( - self.context, - volume, - 'fake_key2', - common.METADATA_TYPES.user) - - self.assertEqual(test_meta2, - db.volume_metadata_get(self.context, volume_id)) - - # create image metadata associated with the volume. - result_meta = self.volume_api.update_volume_metadata( - self.context, - volume, - test_meta1, - False, - common.METADATA_TYPES.image) - - self.assertEqual(test_meta1, result_meta) - - # delete image metadata associated with the volume. - self.volume_api.delete_volume_metadata( - self.context, - volume, - 'fake_key2', - common.METADATA_TYPES.image) - - # parse the result to build the dict. - rows = db.volume_glance_metadata_get(self.context, volume_id) - result = {} - for row in rows: - result[row['key']] = row['value'] - self.assertEqual(test_meta2, result) - - # delete volume metadata with invalid metadta type. - self.assertRaises(exception.InvalidMetadataType, - self.volume_api.delete_volume_metadata, - self.context, - volume, - 'fake_key1', - FAKE_METADATA_TYPE.fake_type) - - def test_delete_volume_metadata_maintenance(self): - """Test delete volume metadata in maintenance.""" - FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - self.assertRaises(exception.InvalidVolume, - self.volume_api.delete_volume_metadata, - self.context, - volume, - 'fake_key1', - FAKE_METADATA_TYPE.fake_type) - - def test_accept_transfer_maintenance(self): - """Test accept transfer in maintenance.""" - test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} - volume = tests_utils.create_volume(self.context, metadata=test_meta1, - **self.volume_params) - volume['status'] = 'maintenance' - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidVolume, - volume_api.accept_transfer, - self.context, - volume, - None, None) - - @mock.patch.object(cinder.volume.api.API, 'list_availability_zones') - def test_create_volume_uses_default_availability_zone(self, mock_list_az): - """Test setting availability_zone correctly during volume create.""" - mock_list_az.return_value = ({'name': 'az1', 'available': True}, - {'name': 'az2', 'available': True}, - {'name': 'default-az', 'available': True}) - - volume_api = cinder.volume.api.API() - - # Test backwards compatibility, default_availability_zone not set - self.override_config('storage_availability_zone', 'az2') - volume = volume_api.create(self.context, - 1, - 'name', - 'description') - self.assertEqual('az2', volume['availability_zone']) - - self.override_config('default_availability_zone', 'default-az') - volume = volume_api.create(self.context, - 1, - 'name', - 'description') - self.assertEqual('default-az', volume['availability_zone']) - - @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock()) - @mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock()) - @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) - def test_create_volume_with_volume_type(self, _mock_reserve): - """Test volume creation with default volume type.""" - volume_api = cinder.volume.api.API() - - # Create volume with default volume type while default - # volume type doesn't exist, volume_type_id should be NULL - volume = volume_api.create(self.context, - 1, - 'name', - 'description') - self.assertIsNone(volume['volume_type_id']) - self.assertIsNone(volume['encryption_key_id']) - - # Create default volume type - vol_type = conf_fixture.def_vol_type - db.volume_type_create(context.get_admin_context(), - {'name': vol_type, 'extra_specs': {}}) - - db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), - vol_type) - - # Create volume with default volume type - volume = volume_api.create(self.context, - 1, - 'name', - 'description') - self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - self.assertIsNone(volume['encryption_key_id']) - - # Create volume with specific volume type - vol_type = 'test' - db.volume_type_create(context.get_admin_context(), - {'name': vol_type, 'extra_specs': {}}) - db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), - vol_type) - - volume = volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - - @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) - def test_create_volume_with_encrypted_volume_type_aes(self): - ctxt = context.get_admin_context() - - cipher = 'aes-xts-plain64' - key_size = 256 - control_location = 'front-end' - - db.volume_type_create(ctxt, - {'id': '61298380-0c12-11e3-bfd6-4b48424183be', - 'name': 'LUKS'}) - db.volume_type_encryption_create( - ctxt, - '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': control_location, - 'provider': ENCRYPTION_PROVIDER, - 'cipher': cipher, - 'key_size': key_size}) - - volume_api = cinder.volume.api.API() - - db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') - - volume = volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - - key_manager = volume_api.key_manager - key = key_manager.get(self.context, volume['encryption_key_id']) - self.assertEqual(key_size, len(key.get_encoded()) * 8) - self.assertEqual('aes', key.algorithm) - - metadata = db.volume_encryption_metadata_get(self.context, volume.id) - self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - self.assertEqual(cipher, metadata.get('cipher')) - self.assertEqual(key_size, metadata.get('key_size')) - self.assertIsNotNone(volume['encryption_key_id']) - - @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) - def test_create_volume_with_encrypted_volume_type_blowfish(self): - ctxt = context.get_admin_context() - - cipher = 'blowfish-cbc' - key_size = 32 - control_location = 'front-end' - - db.volume_type_create(ctxt, - {'id': '61298380-0c12-11e3-bfd6-4b48424183be', - 'name': 'LUKS'}) - db.volume_type_encryption_create( - ctxt, - '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': control_location, - 'provider': ENCRYPTION_PROVIDER, - 'cipher': cipher, - 'key_size': key_size}) - - volume_api = cinder.volume.api.API() - - db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') - - volume = volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - - key_manager = volume_api.key_manager - key = key_manager.get(self.context, volume['encryption_key_id']) - self.assertEqual('blowfish', key.algorithm) - - metadata = db.volume_encryption_metadata_get(self.context, volume.id) - self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - self.assertEqual(cipher, metadata.get('cipher')) - self.assertEqual(key_size, metadata.get('key_size')) - self.assertIsNotNone(volume['encryption_key_id']) - - def test_create_volume_with_provider_id(self): - volume_params_with_provider_id = dict(provider_id=fake.PROVIDER_ID, - **self.volume_params) - - volume = tests_utils.create_volume(self.context, - **volume_params_with_provider_id) - - self.volume.create_volume(self.context, volume) - self.assertEqual(fake.PROVIDER_ID, volume['provider_id']) - - def test_create_volume_with_admin_metadata(self): - with mock.patch.object( - self.volume.driver, 'create_volume', - return_value={'admin_metadata': {'foo': 'bar'}}): - volume = tests_utils.create_volume(self.user_context) - self.volume.create_volume(self.user_context, volume) - self.assertEqual({'foo': 'bar'}, volume['admin_metadata']) - - @mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api) - def test_create_delete_volume_with_encrypted_volume_type(self): - cipher = 'aes-xts-plain64' - key_size = 256 - db.volume_type_create(self.context, - {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) - db.volume_type_encryption_create( - self.context, fake.VOLUME_TYPE_ID, - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, - 'cipher': cipher, 'key_size': key_size}) - - db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') - - volume = self.volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - - self.assertIsNotNone(volume.get('encryption_key_id', None)) - self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) - - volume['host'] = 'fake_host' - volume['status'] = 'available' - db.volume_update(self.context, volume['id'], {'status': 'available'}) - self.volume_api.delete(self.context, volume) - - volume = objects.Volume.get_by_id(self.context, volume.id) - while volume.status == 'available': - # Must wait for volume_api delete request to process enough to - # change the volume status. - time.sleep(0.5) - volume.refresh() - - self.assertEqual('deleting', volume['status']) - - db.volume_destroy(self.context, volume['id']) - self.assertRaises(exception.NotFound, - db.volume_get, - self.context, - volume['id']) - - def test_delete_busy_volume(self): - """Test volume survives deletion if driver reports it as busy.""" - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - - with mock.patch.object(self.volume.driver, 'delete_volume', - side_effect=exception.VolumeIsBusy( - volume_name='fake') - ) as mock_del_vol: - self.volume.delete_volume(self.context, volume) - volume_ref = db.volume_get(context.get_admin_context(), volume_id) - self.assertEqual(volume_id, volume_ref.id) - self.assertEqual("available", volume_ref.status) - mock_del_vol.assert_called_once_with(volume) - - def test_get_volume_different_tenant(self): - """Test can't get volume of another tenant when viewable_admin_meta.""" - volume = tests_utils.create_volume(self.context, - **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume) - - another_context = context.RequestContext('another_user_id', - 'another_project_id', - is_admin=False) - self.assertNotEqual(another_context.project_id, - self.context.project_id) - - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.VolumeNotFound, volume_api.get, - another_context, volume_id, viewable_admin_meta=True) - self.assertEqual(volume_id, - volume_api.get(self.context, volume_id)['id']) - - self.volume.delete_volume(self.context, volume) - - def test_get_all_limit_bad_value(self): - """Test value of 'limit' is numeric and >= 0""" - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.get_all, - self.context, - limit="A") - self.assertRaises(exception.InvalidInput, - volume_api.get_all, - self.context, - limit="-1") - - def test_get_all_tenants_volume_list(self): - """Validate when the volume list for all tenants is returned""" - volume_api = cinder.volume.api.API() - - with mock.patch.object(volume_api.db, - 'volume_get_all_by_project') as by_project: - with mock.patch.object(volume_api.db, - 'volume_get_all') as get_all: - db_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, - 'name': 'fake_name', - 'host': 'fake_host', - 'id': fake.VOLUME_ID} - - volume = fake_volume.fake_db_volume(**db_volume) - by_project.return_value = [volume] - get_all.return_value = [volume] - - volume_api.get_all(self.context, filters={'all_tenants': '0'}) - self.assertTrue(by_project.called) - by_project.called = False - - self.context.is_admin = False - volume_api.get_all(self.context, filters={'all_tenants': '1'}) - self.assertTrue(by_project.called) - - # check for volume list of all tenants - self.context.is_admin = True - volume_api.get_all(self.context, filters={'all_tenants': '1'}) - self.assertTrue(get_all.called) - - def test_delete_volume_in_error_extending(self): - """Test volume can be deleted in error_extending stats.""" - # create a volume - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - - # delete 'error_extending' volume - db.volume_update(self.context, volume['id'], - {'status': 'error_extending'}) - self.volume.delete_volume(self.context, volume) - self.assertRaises(exception.NotFound, db.volume_get, - self.context, volume['id']) - - @mock.patch.object(db.sqlalchemy.api, 'volume_get', - side_effect=exception.VolumeNotFound( - volume_id='12345678-1234-5678-1234-567812345678')) - def test_delete_volume_not_found(self, mock_get_volume): - """Test delete volume moves on if the volume does not exist.""" - volume_id = '12345678-1234-5678-1234-567812345678' - volume = objects.Volume(self.context, status='available', id=volume_id) - self.volume.delete_volume(self.context, volume) - self.assertTrue(mock_get_volume.called) - - @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' - 'create_volume_from_snapshot') - def test_create_volume_from_snapshot(self, mock_create_from_snap): - """Test volume can be created from a snapshot.""" - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - snapshot_id = create_snapshot(volume_src['id'], - size=volume_src['size'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, snapshot_obj) - volume_dst = tests_utils.create_volume(self.context, - snapshot_id=snapshot_id, - **self.volume_params) - self.volume.create_volume(self.context, volume_dst) - self.assertEqual(volume_dst['id'], - db.volume_get( - context.get_admin_context(), - volume_dst['id']).id) - self.assertEqual(snapshot_id, - db.volume_get(context.get_admin_context(), - volume_dst['id']).snapshot_id) - - self.volume.delete_volume(self.context, volume_dst) - self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume_src) - - @mock.patch('cinder.volume.flows.api.create_volume.get_flow') - def test_create_volume_from_snapshot_with_types(self, _get_flow): - """Test volume create from snapshot with types including mistmatch.""" - volume_api = cinder.volume.api.API() - - foo_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='foo', - extra_specs={'volume_backend_name': 'dev_1'}) - biz_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID, - name='foo', - extra_specs={'volume_backend_name': 'dev_2'}) - - source_vol = fake_volume.fake_volume_obj( - self.context, - id=fake.VOLUME_ID, - status='available', - volume_size=10, - volume_type_id=biz_type.id) - source_vol.volume_type = biz_type - snapshot = {'id': fake.SNAPSHOT_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 10, - 'volume_type_id': biz_type.id} - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, - **snapshot) - snapshot_obj.volume = source_vol - # Make sure the case of specifying a type that - # doesn't match the snapshots type fails - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - snapshot=snapshot_obj) - - # Make sure that trying to specify a type - # when the snapshots type is None fails - snapshot_obj.volume_type_id = None - snapshot_obj.volume.volume_type_id = None - snapshot_obj.volume.volume_type = None - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - snapshot=snapshot_obj) - - snapshot_obj.volume_type_id = foo_type.id - snapshot_obj.volume.volume_type_id = foo_type.id - snapshot_obj.volume.volume_type = foo_type - volume_api.create(self.context, size=1, name='fake_name', - description='fake_desc', volume_type=foo_type, - snapshot=snapshot_obj) - - @mock.patch('cinder.volume.flows.api.create_volume.get_flow') - def test_create_volume_from_source_with_types(self, _get_flow): - """Test volume create from source with types including mistmatch.""" - volume_api = cinder.volume.api.API() - foo_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='foo', - extra_specs={'volume_backend_name': 'dev_1'}) - - biz_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID, - name='biz', - extra_specs={'volume_backend_name': 'dev_2'}) - - source_vol = fake_volume.fake_volume_obj( - self.context, - id=fake.VOLUME_ID, - status='available', - volume_size=0, - volume_type_id=biz_type.id) - source_vol.volume_type = biz_type - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - source_volume=source_vol) - - # Make sure that trying to specify a type - # when the source type is None fails - source_vol.volume_type_id = None - source_vol.volume_type = None - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - source_volume=source_vol) - - source_vol.volume_type_id = biz_type.id - source_vol.volume_type = biz_type - volume_api.create(self.context, size=1, name='fake_name', - description='fake_desc', volume_type=biz_type, - source_volume=source_vol) - - @mock.patch('cinder.volume.flows.api.create_volume.get_flow') - def test_create_volume_from_source_with_same_backend(self, _get_flow): - """Test volume create from source with type mismatch same backend.""" - volume_api = cinder.volume.api.API() - - foo_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='foo', - qos_specs_id=None, - deleted=False, - created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), - updated_at=None, - extra_specs={'volume_backend_name': 'dev_1'}, - is_public=True, - deleted_at=None, - description=None) - - biz_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID, - name='biz', - qos_specs_id=None, - deleted=False, - created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), - updated_at=None, - extra_specs={'volume_backend_name': 'dev_1'}, - is_public=True, - deleted_at=None, - description=None) - - source_vol = fake_volume.fake_volume_obj( - self.context, - id=fake.VOLUME_ID, - status='available', - volume_size=10, - volume_type_id=biz_type.id) - source_vol.volume_type = biz_type - volume_api.create(self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - source_volume=source_vol) - - @mock.patch('cinder.volume.flows.api.create_volume.get_flow') - def test_create_from_source_and_snap_only_one_backend(self, _get_flow): - """Test create from source and snap with type mismatch one backend.""" - volume_api = cinder.volume.api.API() - - foo_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='foo', - qos_specs_id=None, - deleted=False, - created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), - updated_at=None, - extra_specs={'some_key': 3}, - is_public=True, - deleted_at=None, - description=None) - - biz_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID, - name='biz', - qos_specs_id=None, - deleted=False, - created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), - updated_at=None, - extra_specs={'some_other_key': 4}, - is_public=True, - deleted_at=None, - description=None) - - source_vol = fake_volume.fake_volume_obj( - self.context, - id=fake.VOLUME_ID, - status='available', - volume_size=10, - volume_type_id=biz_type.id) - source_vol.volume_type = biz_type - - snapshot = {'id': fake.SNAPSHOT_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 10, - 'volume_type_id': biz_type['id']} - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, - **snapshot) - snapshot_obj.volume = source_vol - - with mock.patch('cinder.db.service_get_all') as mock_get_service, \ - mock.patch.object(volume_api, - 'list_availability_zones') as mock_get_azs: - mock_get_service.return_value = [{'host': 'foo'}] - mock_get_azs.return_value = {} - volume_api.create(self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - source_volume=source_vol) - - volume_api.create(self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - snapshot=snapshot_obj) - - def _test_create_from_source_snapshot_encryptions( - self, is_snapshot=False): - volume_api = cinder.volume.api.API() - foo_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE_ID, - name='foo', - extra_specs={'volume_backend_name': 'dev_1'}) - biz_type = fake_volume.fake_volume_type_obj( - self.context, - id=fake.VOLUME_TYPE2_ID, - name='biz', - extra_specs={'volume_backend_name': 'dev_1'}) - - source_vol = fake_volume.fake_volume_obj( - self.context, - id=fake.VOLUME_ID, - status='available', - volume_size=1, - volume_type_id=biz_type.id) - source_vol.volume_type = biz_type - - snapshot = {'id': fake.SNAPSHOT_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 1, - 'volume_type_id': biz_type['id']} - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, - **snapshot) - snapshot_obj.volume = source_vol - - with mock.patch.object( - cinder.volume.volume_types, - 'volume_types_encryption_changed') as mock_encryption_changed: - mock_encryption_changed.return_value = True - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - volume_type=foo_type, - source_volume=( - source_vol if not is_snapshot else None), - snapshot=snapshot_obj if is_snapshot else None) - - def test_create_from_source_encryption_changed(self): - self._test_create_from_source_snapshot_encryptions() - - def test_create_from_snapshot_encryption_changed(self): - self._test_create_from_source_snapshot_encryptions(is_snapshot=True) - - def _mock_synchronized(self, name, *s_args, **s_kwargs): - def inner_sync1(f): - def inner_sync2(*args, **kwargs): - self.called.append('lock-%s' % (name)) - ret = f(*args, **kwargs) - self.called.append('unlock-%s' % (name)) - return ret - return inner_sync2 - return inner_sync1 - - def _fake_execute(self, *cmd, **kwargs): - pass - - @mock.patch.object(coordination.Coordinator, 'get_lock') - @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, - 'create_volume_from_snapshot') - def test_create_volume_from_snapshot_check_locks( - self, mock_lvm_create, mock_lock): - orig_flow = engine.ActionEngine.run - - def mock_flow_run(*args, **kwargs): - # ensure the lock has been taken - mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) - # now proceed with the flow. - ret = orig_flow(*args, **kwargs) - return ret - - # create source volume - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - - # no lock - self.volume.create_volume(self.context, src_vol) - - snap_id = create_snapshot(src_vol.id, - size=src_vol['size'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) - # no lock - self.volume.create_snapshot(self.context, snapshot_obj) - - dst_vol = tests_utils.create_volume(self.context, - snapshot_id=snap_id, - **self.volume_params) - admin_ctxt = context.get_admin_context() - - # mock the flow runner so we can do some checks - self.mock_object(engine.ActionEngine, 'run', mock_flow_run) - - # locked - self.volume.create_volume(self.context, dst_vol, - request_spec={'snapshot_id': snap_id}) - mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) - self.assertEqual(dst_vol.id, db.volume_get(admin_ctxt, dst_vol.id).id) - self.assertEqual(snap_id, - db.volume_get(admin_ctxt, dst_vol.id).snapshot_id) - - # locked - self.volume.delete_volume(self.context, dst_vol) - mock_lock.assert_called_with('%s-delete_volume' % dst_vol.id) - - # locked - self.volume.delete_snapshot(self.context, snapshot_obj) - mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) - - # locked - self.volume.delete_volume(self.context, src_vol) - mock_lock.assert_called_with('%s-delete_volume' % src_vol.id) - - self.assertTrue(mock_lvm_create.called) - - @mock.patch.object(coordination.Coordinator, 'get_lock') - def test_create_volume_from_volume_check_locks(self, mock_lock): - # mock the synchroniser so we can record events - self.mock_object(utils, 'execute', self._fake_execute) - - orig_flow = engine.ActionEngine.run - - def mock_flow_run(*args, **kwargs): - # ensure the lock has been taken - mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) - # now proceed with the flow. - ret = orig_flow(*args, **kwargs) - return ret - - # create source volume - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - # no lock - self.volume.create_volume(self.context, src_vol) - self.assertEqual(0, mock_lock.call_count) - - dst_vol = tests_utils.create_volume(self.context, - source_volid=src_vol_id, - **self.volume_params) - dst_vol_id = dst_vol['id'] - admin_ctxt = context.get_admin_context() - - # mock the flow runner so we can do some checks - self.mock_object(engine.ActionEngine, 'run', mock_flow_run) - - # locked - self.volume.create_volume(self.context, dst_vol, - request_spec={'source_volid': src_vol_id}) - mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) - self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) - self.assertEqual(src_vol_id, - db.volume_get(admin_ctxt, dst_vol_id).source_volid) - - # locked - self.volume.delete_volume(self.context, dst_vol) - mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id) - - # locked - self.volume.delete_volume(self.context, src_vol) - mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) - - def _raise_metadata_copy_failure(self, method, dst_vol): - # MetadataCopyFailure exception will be raised if DB service is Down - # while copying the volume glance metadata - with mock.patch.object(db, method) as mock_db: - mock_db.side_effect = exception.MetadataCopyFailure( - reason="Because of DB service down.") - self.assertRaises(exception.MetadataCopyFailure, - self.volume.create_volume, - self.context, - dst_vol) - - # ensure that status of volume is 'error' - vol = db.volume_get(self.context, dst_vol.id) - self.assertEqual('error', vol['status']) - - # cleanup resource - db.volume_destroy(self.context, dst_vol.id) - - @mock.patch('cinder.utils.execute') - def test_create_volume_from_volume_with_glance_volume_metadata_none( - self, mock_execute): - # create source volume - mock_execute.return_value = None - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - self.volume.create_volume(self.context, src_vol) - # set bootable flag of volume to True - db.volume_update(self.context, src_vol['id'], {'bootable': True}) - - # create volume from source volume - dst_vol = tests_utils.create_volume(self.context, - source_volid=src_vol_id, - **self.volume_params) - self.volume.create_volume(self.context, dst_vol) - - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_glance_metadata_copy_from_volume_to_volume, - self.context, src_vol_id, dst_vol['id']) - - # ensure that status of volume is 'available' - vol = db.volume_get(self.context, dst_vol['id']) - self.assertEqual('available', vol['status']) - - # cleanup resource - db.volume_destroy(self.context, src_vol_id) - db.volume_destroy(self.context, dst_vol['id']) - - @mock.patch('cinder.utils.execute') - def test_create_volume_from_volume_raise_metadata_copy_failure( - self, mock_execute): - # create source volume - mock_execute.return_value = None - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - self.volume.create_volume(self.context, src_vol) - # set bootable flag of volume to True - db.volume_update(self.context, src_vol['id'], {'bootable': True}) - - # create volume from source volume - dst_vol = tests_utils.create_volume(self.context, - source_volid=src_vol_id, - **self.volume_params) - self._raise_metadata_copy_failure( - 'volume_glance_metadata_copy_from_volume_to_volume', - dst_vol) - - # cleanup resource - db.volume_destroy(self.context, src_vol_id) - - @mock.patch('cinder.utils.execute') - def test_create_volume_from_snapshot_raise_metadata_copy_failure( - self, mock_execute): - # create source volume - mock_execute.return_value = None - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - self.volume.create_volume(self.context, src_vol) - # set bootable flag of volume to True - db.volume_update(self.context, src_vol['id'], {'bootable': True}) - - # create volume from snapshot - snapshot_id = create_snapshot(src_vol['id'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, snapshot_obj) - - # ensure that status of snapshot is 'available' - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) - - dst_vol = tests_utils.create_volume(self.context, - snapshot_id=snapshot_id, - **self.volume_params) - self._raise_metadata_copy_failure( - 'volume_glance_metadata_copy_to_volume', - dst_vol) - - # cleanup resource - snapshot_obj.destroy() - db.volume_destroy(self.context, src_vol_id) - - @mock.patch( - 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') - @mock.patch('cinder.utils.execute') - def test_create_volume_from_srcreplica_raise_metadata_copy_failure( - self, mock_execute, _create_replica_test): - mock_execute.return_value = None - _create_replica_test.return_value = None - # create source volume - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - self.volume.create_volume(self.context, src_vol) - # set bootable flag of volume to True - db.volume_update(self.context, src_vol['id'], {'bootable': True}) - - # create volume from source volume - dst_vol = tests_utils.create_volume(self.context, - source_volid=src_vol_id, - **self.volume_params) - self._raise_metadata_copy_failure( - 'volume_glance_metadata_copy_from_volume_to_volume', - dst_vol) - - # cleanup resource - db.volume_destroy(self.context, src_vol_id) - - @mock.patch('cinder.utils.execute') - def test_create_volume_from_snapshot_with_glance_volume_metadata_none( - self, mock_execute): - # create source volume - mock_execute.return_value = None - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - self.volume.create_volume(self.context, src_vol) - # set bootable flag of volume to True - db.volume_update(self.context, src_vol['id'], {'bootable': True}) - - volume = db.volume_get(self.context, src_vol_id) - - # create snapshot of volume - snapshot_id = create_snapshot(volume['id'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, snapshot_obj) - - # ensure that status of snapshot is 'available' - self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) - - # create volume from snapshot - dst_vol = tests_utils.create_volume(self.context, - snapshot_id=snapshot_id, - **self.volume_params) - self.volume.create_volume(self.context, dst_vol) - - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_glance_metadata_copy_to_volume, - self.context, dst_vol['id'], snapshot_id) - - # ensure that status of volume is 'available' - vol = db.volume_get(self.context, dst_vol['id']) - self.assertEqual('available', vol['status']) - - # cleanup resource - snapshot_obj.destroy() - db.volume_destroy(self.context, src_vol_id) - db.volume_destroy(self.context, dst_vol['id']) - - @mock.patch( - 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') - def test_create_volume_from_srcreplica_with_glance_volume_metadata_none( - self, _create_replica_test): - """Test volume can be created from a volume replica.""" - _create_replica_test.return_value = None - - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - db.volume_update(self.context, volume_src['id'], {'bootable': True}) - - volume = db.volume_get(self.context, volume_src['id']) - volume_dst = tests_utils.create_volume( - self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_dst, - {'source_replicaid': volume.id}) - - self.assertRaises(exception.GlanceMetadataNotFound, - db.volume_glance_metadata_copy_from_volume_to_volume, - self.context, volume_src['id'], volume_dst['id']) - - self.assertEqual('available', - db.volume_get(self.context, - volume_dst['id']).status) - self.assertTrue(_create_replica_test.called) - - # cleanup resource - db.volume_destroy(self.context, volume_dst['id']) - db.volume_destroy(self.context, volume_src['id']) - - @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) - def test_create_volume_from_snapshot_with_encryption(self): - """Test volume can be created from a snapshot of an encrypted volume""" - ctxt = context.get_admin_context() - cipher = 'aes-xts-plain64' - key_size = 256 - - db.volume_type_create(ctxt, - {'id': '61298380-0c12-11e3-bfd6-4b48424183be', - 'name': 'LUKS'}) - db.volume_type_encryption_create( - ctxt, - '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, - 'cipher': cipher, 'key_size': key_size}) - - volume_api = cinder.volume.api.API() - - db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), - 'LUKS') - volume_src = volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - - volume_src['host'] = 'fake_host' - snapshot_ref = volume_api.create_snapshot_force(self.context, - volume_src, - 'name', - 'description') - snapshot_ref['status'] = fields.SnapshotStatus.AVAILABLE - # status must be available - volume_dst = volume_api.create(self.context, - 1, - 'name', - 'description', - snapshot=snapshot_ref) - self.assertEqual(volume_dst['id'], - db.volume_get( - context.get_admin_context(), - volume_dst['id']).id) - self.assertEqual(snapshot_ref['id'], - db.volume_get(context.get_admin_context(), - volume_dst['id']).snapshot_id) - - # ensure encryption keys match - self.assertIsNotNone(volume_src['encryption_key_id']) - self.assertIsNotNone(volume_dst['encryption_key_id']) - - key_manager = volume_api.key_manager # must use *same* key manager - volume_src_key = key_manager.get(self.context, - volume_src['encryption_key_id']) - volume_dst_key = key_manager.get(self.context, - volume_dst['encryption_key_id']) - self.assertEqual(volume_src_key, volume_dst_key) - - def test_create_volume_from_encrypted_volume(self): - """Test volume can be created from an encrypted volume.""" - self.mock_object(key_manager, 'API', fake_keymgr.fake_api) - cipher = 'aes-xts-plain64' - key_size = 256 - - volume_api = cinder.volume.api.API() - - ctxt = context.get_admin_context() - - db.volume_type_create(ctxt, - {'id': '61298380-0c12-11e3-bfd6-4b48424183be', - 'name': 'LUKS'}) - db.volume_type_encryption_create( - ctxt, - '61298380-0c12-11e3-bfd6-4b48424183be', - {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, - 'cipher': cipher, 'key_size': key_size}) - - db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), - 'LUKS') - volume_src = volume_api.create(self.context, - 1, - 'name', - 'description', - volume_type=db_vol_type) - volume_src['status'] = 'available' # status must be available - volume_dst = volume_api.create(self.context, - 1, - 'name', - 'description', - source_volume=volume_src) - self.assertEqual(volume_dst['id'], - db.volume_get(context.get_admin_context(), - volume_dst['id']).id) - self.assertEqual(volume_src['id'], - db.volume_get(context.get_admin_context(), - volume_dst['id']).source_volid) - - # ensure encryption keys match - self.assertIsNotNone(volume_src['encryption_key_id']) - self.assertIsNotNone(volume_dst['encryption_key_id']) - - km = volume_api.key_manager # must use *same* key manager - volume_src_key = km.get(self.context, - volume_src['encryption_key_id']) - volume_dst_key = km.get(self.context, - volume_dst['encryption_key_id']) - self.assertEqual(volume_src_key, volume_dst_key) - - def test_delete_encrypted_volume(self): - self.volume_params['status'] = 'active' - volume = tests_utils.create_volume(self.context, - **self.volume_params) - vol_api = cinder.volume.api.API() - with mock.patch.object( - vol_api.key_manager, - 'delete', - side_effect=Exception): - self.assertRaises(exception.InvalidVolume, - vol_api.delete, - self.context, volume) - - def test_create_volume_from_snapshot_fail_bad_size(self): - """Test volume can't be created from snapshot with bad volume size.""" - volume_api = cinder.volume.api.API() - - snapshot = {'id': fake.SNAPSHOT_ID, - 'status': fields.SnapshotStatus.AVAILABLE, - 'volume_size': 10} - snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, - **snapshot) - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - snapshot=snapshot_obj) - - def test_create_volume_from_snapshot_fail_wrong_az(self): - """Test volume can't be created from snapshot in a different az.""" - volume_api = cinder.volume.api.API() - - def fake_list_availability_zones(enable_cache=False): - return ({'name': 'nova', 'available': True}, - {'name': 'az2', 'available': True}) - - self.mock_object(volume_api, - 'list_availability_zones', - fake_list_availability_zones) - - volume_src = tests_utils.create_volume(self.context, - availability_zone='az2', - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - snapshot = create_snapshot(volume_src['id']) - - self.volume.create_snapshot(self.context, snapshot) - - volume_dst = volume_api.create(self.context, - size=1, - name='fake_name', - description='fake_desc', - snapshot=snapshot) - self.assertEqual('az2', volume_dst['availability_zone']) - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - snapshot=snapshot, - availability_zone='nova') - - def test_create_volume_with_invalid_exclusive_options(self): - """Test volume create with multiple exclusive options fails.""" - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - 1, - 'name', - 'description', - snapshot=fake.SNAPSHOT_ID, - image_id=fake.IMAGE_ID, - source_volume=fake.VOLUME_ID) - - def test_reserve_volume_success(self): - volume = tests_utils.create_volume(self.context, status='available') - cinder.volume.api.API().reserve_volume(self.context, volume) - volume_db = db.volume_get(self.context, volume.id) - self.assertEqual('attaching', volume_db.status) - db.volume_destroy(self.context, volume.id) - - def test_reserve_volume_in_attaching(self): - self._test_reserve_volume_bad_status('attaching') - - def test_reserve_volume_in_maintenance(self): - self._test_reserve_volume_bad_status('maintenance') - - def _test_reserve_volume_bad_status(self, status): - volume = tests_utils.create_volume(self.context, status=status) - self.assertRaises(exception.InvalidVolume, - cinder.volume.api.API().reserve_volume, - self.context, - volume) - db.volume_destroy(self.context, volume.id) - - def test_unreserve_volume_success_in_use(self): - UUID = six.text_type(uuid.uuid4()) - volume = tests_utils.create_volume(self.context, status='attaching') - tests_utils.attach_volume(self.context, volume.id, UUID, - 'attached_host', 'mountpoint', mode='rw') - - cinder.volume.api.API().unreserve_volume(self.context, volume) - - db_volume = db.volume_get(self.context, volume.id) - self.assertEqual('in-use', db_volume.status) - - def test_unreserve_volume_success_available(self): - volume = tests_utils.create_volume(self.context, status='attaching') - - cinder.volume.api.API().unreserve_volume(self.context, volume) - - db_volume = db.volume_get(self.context, volume.id) - self.assertEqual('available', db_volume.status) - - def test_multi_node(self): - # TODO(termie): Figure out how to test with two nodes, - # each of them having a different FLAG for storage_node - # This will allow us to test cross-node interactions - pass - - def test_cannot_delete_volume_in_use(self): - """Test volume can't be deleted in in-use status.""" - self._test_cannot_delete_volume('in-use') - - def test_cannot_delete_volume_maintenance(self): - """Test volume can't be deleted in maintenance status.""" - self._test_cannot_delete_volume('maintenance') - - def _test_cannot_delete_volume(self, status): - """Test volume can't be deleted in invalid stats.""" - # create a volume and assign to host - volume = tests_utils.create_volume(self.context, CONF.host, - status=status) - - # 'in-use' status raises InvalidVolume - self.assertRaises(exception.InvalidVolume, - self.volume_api.delete, - self.context, - volume) - - # clean up - self.volume.delete_volume(self.context, volume) - - def test_force_delete_volume(self): - """Test volume can be forced to delete.""" - # create a volume and assign to host - self.volume_params['status'] = 'error_deleting' - volume = tests_utils.create_volume(self.context, **self.volume_params) - - # 'error_deleting' volumes can't be deleted - self.assertRaises(exception.InvalidVolume, - self.volume_api.delete, - self.context, - volume) - - # delete with force - self.volume_api.delete(self.context, volume, force=True) - - # status is deleting - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('deleting', volume.status) - - # clean up - self.volume.delete_volume(self.context, volume) - - def test_cannot_force_delete_attached_volume(self): - """Test volume can't be force delete in attached state.""" - volume = tests_utils.create_volume(self.context, CONF.host, - status='in-use', - attach_status= - fields.VolumeAttachStatus.ATTACHED) - - self.assertRaises(exception.InvalidVolume, - self.volume_api.delete, - self.context, - volume, - force=True) - - db.volume_destroy(self.context, volume.id) - - def test__revert_to_snapshot_generic_failed(self): - fake_volume = tests_utils.create_volume(self.context, - status='available') - fake_snapshot = tests_utils.create_snapshot(self.context, - fake_volume.id) - with mock.patch.object( - self.volume.driver, - '_create_temp_volume_from_snapshot') as mock_temp, \ - mock.patch.object( - self.volume.driver, - 'delete_volume') as mock_driver_delete, \ - mock.patch.object( - self.volume, '_copy_volume_data') as mock_copy: - temp_volume = tests_utils.create_volume(self.context, - status='available') - mock_copy.side_effect = [exception.VolumeDriverException('error')] - mock_temp.return_value = temp_volume - - self.assertRaises(exception.VolumeDriverException, - self.volume._revert_to_snapshot_generic, - self.context, fake_volume, fake_snapshot) - - mock_copy.assert_called_once_with( - self.context, temp_volume, fake_volume) - mock_driver_delete.assert_called_once_with(temp_volume) - - def test__revert_to_snapshot_generic(self): - fake_volume = tests_utils.create_volume(self.context, - status='available') - fake_snapshot = tests_utils.create_snapshot(self.context, - fake_volume.id) - with mock.patch.object( - self.volume.driver, - '_create_temp_volume_from_snapshot') as mock_temp,\ - mock.patch.object( - self.volume.driver, 'delete_volume') as mock_driver_delete,\ - mock.patch.object( - self.volume, '_copy_volume_data') as mock_copy: - temp_volume = tests_utils.create_volume(self.context, - status='available') - mock_temp.return_value = temp_volume - self.volume._revert_to_snapshot_generic( - self.context, fake_volume, fake_snapshot) - mock_copy.assert_called_once_with( - self.context, temp_volume, fake_volume) - mock_driver_delete.assert_called_once_with(temp_volume) - - @ddt.data({'driver_error': True}, - {'driver_error': False}) - @ddt.unpack - def test__revert_to_snapshot(self, driver_error): - mock.patch.object(self.volume, '_notify_about_snapshot_usage') - with mock.patch.object(self.volume.driver, - 'revert_to_snapshot') as driver_revert, \ - mock.patch.object(self.volume, '_notify_about_volume_usage'), \ - mock.patch.object(self.volume, '_notify_about_snapshot_usage'),\ - mock.patch.object(self.volume, - '_revert_to_snapshot_generic') as generic_revert: - if driver_error: - driver_revert.side_effect = [NotImplementedError] - else: - driver_revert.return_value = None - - self.volume._revert_to_snapshot(self.context, {}, {}) - - driver_revert.assert_called_once_with(self.context, {}, {}) - if driver_error: - generic_revert.assert_called_once_with(self.context, {}, {}) - - @ddt.data(True, False) - def test_revert_to_snapshot(self, has_snapshot): - fake_volume = tests_utils.create_volume(self.context, - status='reverting', - project_id='123', - size=2) - fake_snapshot = tests_utils.create_snapshot(self.context, - fake_volume['id'], - status='restoring', - volume_size=1) - with mock.patch.object(self.volume, - '_revert_to_snapshot') as _revert,\ - mock.patch.object(self.volume, - '_create_backup_snapshot') as _create_snapshot,\ - mock.patch.object(self.volume, - 'delete_snapshot') as _delete_snapshot: - _revert.return_value = None - if has_snapshot: - _create_snapshot.return_value = {'id': 'fake_snapshot'} - else: - _create_snapshot.return_value = None - self.volume.revert_to_snapshot(self.context, fake_volume, - fake_snapshot) - _revert.assert_called_once_with(self.context, fake_volume, - fake_snapshot) - _create_snapshot.assert_called_once_with(self.context, fake_volume) - if has_snapshot: - _delete_snapshot.assert_called_once_with( - self.context, {'id': 'fake_snapshot'}, handle_quota=False) - else: - _delete_snapshot.assert_not_called() - fake_volume.refresh() - fake_snapshot.refresh() - self.assertEqual('available', fake_volume['status']) - self.assertEqual('available', fake_snapshot['status']) - self.assertEqual(2, fake_volume['size']) - - def test_revert_to_snapshot_failed(self): - fake_volume = tests_utils.create_volume(self.context, - status='reverting', - project_id='123', - size=2) - fake_snapshot = tests_utils.create_snapshot(self.context, - fake_volume['id'], - status='restoring', - volume_size=1) - with mock.patch.object(self.volume, - '_revert_to_snapshot') as _revert, \ - mock.patch.object(self.volume, - '_create_backup_snapshot'), \ - mock.patch.object(self.volume, - 'delete_snapshot') as _delete_snapshot: - _revert.side_effect = [exception.VolumeDriverException( - message='fake_message')] - self.assertRaises(exception.VolumeDriverException, - self.volume.revert_to_snapshot, - self.context, fake_volume, - fake_snapshot) - _revert.assert_called_once_with(self.context, fake_volume, - fake_snapshot) - _delete_snapshot.assert_not_called() - fake_volume.refresh() - fake_snapshot.refresh() - self.assertEqual('error', fake_volume['status']) - self.assertEqual('available', fake_snapshot['status']) - self.assertEqual(2, fake_volume['size']) - - def test_cannot_delete_volume_with_snapshots(self): - """Test volume can't be deleted with dependent snapshots.""" - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, snapshot) - self.assertEqual( - snapshot.id, objects.Snapshot.get_by_id(self.context, - snapshot.id).id) - - volume['status'] = 'available' - volume['host'] = 'fakehost' - - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.InvalidVolume, - volume_api.delete, - self.context, - volume) - self.volume.delete_snapshot(self.context, snapshot) - self.volume.delete_volume(self.context, volume) - - def test_can_delete_errored_snapshot(self): - """Test snapshot can be created and deleted.""" - volume = tests_utils.create_volume(self.context, CONF.host) - - snapshot = create_snapshot(volume.id, size=volume['size'], - ctxt=self.context, - status=fields.SnapshotStatus.ERROR) - - self.volume_api.delete_snapshot(self.context, snapshot) - - self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) - self.volume.delete_volume(self.context, volume) - - def test_create_snapshot_set_worker(self): - volume = tests_utils.create_volume(self.context) - snapshot = create_snapshot(volume.id, size=volume['size'], - ctxt=self.context, - status=fields.SnapshotStatus.CREATING) - - self.volume.create_snapshot(self.context, snapshot) - - volume.set_worker.assert_called_once_with() - - def test_cannot_delete_snapshot_with_bad_status(self): - volume = tests_utils.create_volume(self.context, CONF.host) - snapshot = create_snapshot(volume.id, size=volume['size'], - ctxt=self.context, - status=fields.SnapshotStatus.CREATING) - self.assertRaises(exception.InvalidSnapshot, - self.volume_api.delete_snapshot, - self.context, - snapshot) - - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - self.volume_api.delete_snapshot(self.context, snapshot) - - self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) - self.volume.delete_volume(self.context, volume) - - @mock.patch.object(QUOTAS, "rollback") - @mock.patch.object(QUOTAS, "commit") - @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) - def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks): - volume_api = cinder.volume.api.API() - - volume = volume_api.create(self.context, - size, - 'name', - 'description') - self.assertEqual(int(size), volume['size']) - - def test_create_volume_int_size(self): - """Test volume creation with int size.""" - self._do_test_create_volume_with_size(2) - - def test_create_volume_string_size(self): - """Test volume creation with string size.""" - self._do_test_create_volume_with_size('2') - - @mock.patch.object(QUOTAS, "rollback") - @mock.patch.object(QUOTAS, "commit") - @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) - def test_create_volume_with_bad_size(self, *_unused_quota_mocks): - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - '2Gb', - 'name', - 'description') - - def test_create_volume_with_float_fails(self): - """Test volume creation with invalid float size.""" - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - '1.5', - 'name', - 'description') - - def test_create_volume_with_zero_size_fails(self): - """Test volume creation with string size.""" - volume_api = cinder.volume.api.API() - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - '0', - 'name', - 'description') - - def test_begin_detaching_fails_available(self): - volume_api = cinder.volume.api.API() - volume = tests_utils.create_volume(self.context, status='available') - # Volume status is 'available'. - self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, - self.context, volume) - - db.volume_update(self.context, volume.id, - {'status': 'in-use', - 'attach_status': - fields.VolumeAttachStatus.DETACHED}) - # Should raise an error since not attached - self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, - self.context, volume) - - db.volume_update(self.context, volume.id, - {'attach_status': - fields.VolumeAttachStatus.ATTACHED}) - # Ensure when attached no exception raised - volume_api.begin_detaching(self.context, volume) - - volume_api.update(self.context, volume, {'status': 'maintenance'}) - self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, - self.context, volume) - db.volume_destroy(self.context, volume.id) - - def test_begin_roll_detaching_volume(self): - """Test begin_detaching and roll_detaching functions.""" - - instance_uuid = '12345678-1234-5678-1234-567812345678' - volume = tests_utils.create_volume(self.context, **self.volume_params) - attachment = db.volume_attach(self.context, - {'volume_id': volume['id'], - 'attached_host': 'fake-host'}) - db.volume_attached(self.context, attachment['id'], instance_uuid, - 'fake-host', 'vdb') - volume_api = cinder.volume.api.API() - volume_api.begin_detaching(self.context, volume) - volume = volume_api.get(self.context, volume['id']) - self.assertEqual("detaching", volume['status']) - volume_api.roll_detaching(self.context, volume) - volume = volume_api.get(self.context, volume['id']) - self.assertEqual("in-use", volume['status']) - - def test_volume_api_update(self): - # create a raw vol - volume = tests_utils.create_volume(self.context, **self.volume_params) - # use volume.api to update name - volume_api = cinder.volume.api.API() - update_dict = {'display_name': 'test update name'} - volume_api.update(self.context, volume, update_dict) - # read changes from db - vol = db.volume_get(context.get_admin_context(), volume['id']) - self.assertEqual('test update name', vol['display_name']) - - def test_volume_api_update_maintenance(self): - # create a raw vol - volume = tests_utils.create_volume(self.context, **self.volume_params) - volume['status'] = 'maintenance' - # use volume.api to update name - volume_api = cinder.volume.api.API() - update_dict = {'display_name': 'test update name'} - self.assertRaises(exception.InvalidVolume, volume_api.update, - self.context, volume, update_dict) - - def test_volume_api_get_list_volumes_image_metadata(self): - """Test get_list_volumes_image_metadata in volume API.""" - ctxt = context.get_admin_context() - db.volume_create(ctxt, {'id': 'fake1', 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1') - db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2') - db.volume_create(ctxt, {'id': 'fake2', 'status': 'available', - 'host': 'test', 'provider_location': '', - 'size': 1}) - db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3') - db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4') - volume_api = cinder.volume.api.API() - results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1', - 'fake2']) - expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'}, - 'fake2': {'key3': 'value3', 'key4': 'value4'}} - self.assertEqual(expect_results, results) - - @mock.patch.object(QUOTAS, 'limit_check') - @mock.patch.object(QUOTAS, 'reserve') - def test_extend_attached_volume(self, reserve, limit_check): - volume = tests_utils.create_volume(self.context, size=2, - status='available', host=CONF.host) - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.InvalidVolume, - volume_api._extend, - self.context, - volume, 3, attached=True) - - db.volume_update(self.context, volume.id, {'status': 'in-use'}) - reserve.return_value = ["RESERVATION"] - volume_api._extend(self.context, volume, 3, attached=True) - volume.refresh() - self.assertEqual('extending', volume.status) - reserve.assert_called_once_with(self.context, gigabytes=1, - project_id=volume.project_id) - limit_check.side_effect = None - reserve.side_effect = None - db.volume_update(self.context, volume.id, {'status': 'in-use'}) - volume_api.scheduler_rpcapi = mock.MagicMock() - volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() - volume_api._extend(self.context, volume, 3, attached=True) - - request_spec = { - 'volume_properties': volume, - 'volume_type': {}, - 'volume_id': volume.id - } - volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( - self.context, volume, 3, ["RESERVATION"], request_spec) - # clean up - self.volume.delete_volume(self.context, volume) - - @mock.patch.object(QUOTAS, 'limit_check') - @mock.patch.object(QUOTAS, 'reserve') - def test_extend_volume(self, reserve, limit_check): - """Test volume can be extended at API level.""" - # create a volume and assign to host - volume = tests_utils.create_volume(self.context, size=2, - status='in-use', host=CONF.host) - volume_api = cinder.volume.api.API() - - # Extend fails when status != available - self.assertRaises(exception.InvalidVolume, - volume_api._extend, - self.context, - volume, - 3) - - db.volume_update(self.context, volume.id, {'status': 'available'}) - # Extend fails when new_size < orig_size - self.assertRaises(exception.InvalidInput, - volume_api._extend, - self.context, - volume, - 1) - - # Extend fails when new_size == orig_size - self.assertRaises(exception.InvalidInput, - volume_api._extend, - self.context, - volume, - 2) - - # works when new_size > orig_size - reserve.return_value = ["RESERVATION"] - volume_api._extend(self.context, volume, 3) - volume.refresh() - self.assertEqual('extending', volume.status) - reserve.assert_called_once_with(self.context, gigabytes=1, - project_id=volume.project_id) - - # Test the quota exceeded - db.volume_update(self.context, volume.id, {'status': 'available'}) - reserve.side_effect = exception.OverQuota(overs=['gigabytes'], - quotas={'gigabytes': 20}, - usages={'gigabytes': - {'reserved': 5, - 'in_use': 15}}) - self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, - volume_api._extend, self.context, - volume, 3) - db.volume_update(self.context, volume.id, {'status': 'available'}) - - limit_check.side_effect = exception.OverQuota( - overs=['per_volume_gigabytes'], quotas={'per_volume_gigabytes': 2}) - self.assertRaises(exception.VolumeSizeExceedsLimit, - volume_api._extend, self.context, - volume, 3) - - # Test scheduler path - limit_check.side_effect = None - reserve.side_effect = None - db.volume_update(self.context, volume.id, {'status': 'available'}) - volume_api.scheduler_rpcapi = mock.MagicMock() - volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() - - volume_api._extend(self.context, volume, 3) - - request_spec = { - 'volume_properties': volume, - 'volume_type': {}, - 'volume_id': volume.id - } - volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( - self.context, volume, 3, ["RESERVATION"], request_spec) - - # clean up - self.volume.delete_volume(self.context, volume) - - def test_extend_volume_driver_not_initialized(self): - """Test volume can be extended at API level.""" - # create a volume and assign to host - fake_reservations = ['RESERVATION'] - volume = tests_utils.create_volume(self.context, size=2, - status='available', - host=CONF.host) - self.volume.create_volume(self.context, volume) - - self.volume.driver._initialized = False - - self.assertRaises(exception.DriverNotInitialized, - self.volume.extend_volume, - self.context, volume, 3, - fake_reservations) - - volume.refresh() - self.assertEqual('error_extending', volume.status) - - # lets cleanup the mess. - self.volume.driver._initialized = True - self.volume.delete_volume(self.context, volume) - - def _test_extend_volume_manager_fails_with_exception(self, volume): - fake_reservations = ['RESERVATION'] - - # Test driver exception - with mock.patch.object(self.volume.driver, - 'extend_volume') as extend_volume: - extend_volume.side_effect =\ - exception.CinderException('fake exception') - volume['status'] = 'extending' - self.volume.extend_volume(self.context, volume, '4', - fake_reservations) - volume.refresh() - self.assertEqual(2, volume.size) - self.assertEqual('error_extending', volume.status) - - @mock.patch('cinder.compute.API') - def _test_extend_volume_manager_successful(self, volume, nova_api): - """Test volume can be extended at the manager level.""" - def fake_extend(volume, new_size): - volume['size'] = new_size - - nova_extend_volume = nova_api.return_value.extend_volume - fake_reservations = ['RESERVATION'] - orig_status = volume.status - - # Test driver success - with mock.patch.object(self.volume.driver, - 'extend_volume') as extend_volume: - with mock.patch.object(QUOTAS, 'commit') as quotas_commit: - extend_volume.return_value = fake_extend - volume.status = 'extending' - self.volume.extend_volume(self.context, volume, '4', - fake_reservations) - volume.refresh() - self.assertEqual(4, volume.size) - self.assertEqual(orig_status, volume.status) - quotas_commit.assert_called_with( - self.context, - ['RESERVATION'], - project_id=volume.project_id) - if orig_status == 'in-use': - instance_uuids = [ - attachment.instance_uuid - for attachment in volume.volume_attachment] - nova_extend_volume.assert_called_with( - self.context, instance_uuids, volume.id) - - def test_extend_volume_manager_available_fails_with_exception(self): - volume = tests_utils.create_volume(self.context, size=2, - status='creating', host=CONF.host) - self.volume.create_volume(self.context, volume) - self._test_extend_volume_manager_fails_with_exception(volume) - self.volume.delete_volume(self.context, volume) - - def test_extend_volume_manager_available_successful(self): - volume = tests_utils.create_volume(self.context, size=2, - status='creating', host=CONF.host) - self.volume.create_volume(self.context, volume) - self._test_extend_volume_manager_successful(volume) - self.volume.delete_volume(self.context, volume) - - def test_extend_volume_manager_in_use_fails_with_exception(self): - volume = tests_utils.create_volume(self.context, size=2, - status='creating', host=CONF.host) - self.volume.create_volume(self.context, volume) - instance_uuid = '12345678-1234-5678-1234-567812345678' - attachment = db.volume_attach(self.context, - {'volume_id': volume.id, - 'attached_host': 'fake-host'}) - db.volume_attached(self.context, attachment.id, instance_uuid, - 'fake-host', 'vdb') - volume.refresh() - self._test_extend_volume_manager_fails_with_exception(volume) - self.volume.detach_volume(self.context, volume.id, attachment.id) - self.volume.delete_volume(self.context, volume) - - def test_extend_volume_manager_in_use_successful(self): - volume = tests_utils.create_volume(self.context, size=2, - status='creating', host=CONF.host) - self.volume.create_volume(self.context, volume) - instance_uuid = '12345678-1234-5678-1234-567812345678' - attachment = db.volume_attach(self.context, - {'volume_id': volume.id, - 'attached_host': 'fake-host'}) - db.volume_attached(self.context, attachment.id, instance_uuid, - 'fake-host', 'vdb') - volume.refresh() - self._test_extend_volume_manager_successful(volume) - self.volume.detach_volume(self.context, volume.id, attachment.id) - self.volume.delete_volume(self.context, volume) - - @mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume') - def test_extend_volume_with_volume_type(self, mock_rpc_extend): - elevated = context.get_admin_context() - project_id = self.context.project_id - db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}}) - vol_type = db.volume_type_get_by_name(elevated, 'type') - - volume_api = cinder.volume.api.API() - volume = volume_api.create(self.context, 100, 'name', 'description', - volume_type=vol_type) - try: - usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') - volumes_in_use = usage.in_use - except exception.QuotaUsageNotFound: - volumes_in_use = 0 - self.assertEqual(100, volumes_in_use) - db.volume_update(self.context, volume.id, {'status': 'available'}) - - volume_api._extend(self.context, volume, 200) - mock_rpc_extend.called_once_with(self.context, volume, 200, mock.ANY) - - try: - usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') - volumes_reserved = usage.reserved - except exception.QuotaUsageNotFound: - volumes_reserved = 0 - - self.assertEqual(100, volumes_reserved) - - @mock.patch( - 'cinder.volume.driver.VolumeDriver.create_replica_test_volume') - def test_create_volume_from_sourcereplica(self, _create_replica_test): - """Test volume can be created from a volume replica.""" - _create_replica_test.return_value = None - - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - volume_dst = tests_utils.create_volume( - self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_dst, - {'source_replicaid': volume_src.id}) - self.assertEqual('available', - db.volume_get(context.get_admin_context(), - volume_dst['id']).status) - self.assertTrue(_create_replica_test.called) - self.volume.delete_volume(self.context, volume_dst) - self.volume.delete_volume(self.context, volume_src) - - def test_create_volume_from_sourcevol(self): - """Test volume can be created from a source volume.""" - def fake_create_cloned_volume(volume, src_vref): - pass - - self.mock_object(self.volume.driver, 'create_cloned_volume', - fake_create_cloned_volume) - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - volume_dst = tests_utils.create_volume(self.context, - source_volid=volume_src['id'], - **self.volume_params) - self.volume.create_volume(self.context, volume_dst) - volume_dst.refresh() - self.assertEqual('available', volume_dst.status) - self.volume.delete_volume(self.context, volume_dst) - self.volume.delete_volume(self.context, volume_src) - - @mock.patch('cinder.volume.api.API.list_availability_zones', - return_value=({'name': 'nova', 'available': True}, - {'name': 'az2', 'available': True})) - def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz): - """Test volume can't be cloned from an other volume in different az.""" - volume_api = cinder.volume.api.API() - - volume_src = tests_utils.create_volume(self.context, - availability_zone='az2', - **self.volume_params) - self.volume.create_volume(self.context, volume_src) - - volume_src = db.volume_get(self.context, volume_src['id']) - - volume_dst = volume_api.create(self.context, - size=1, - name='fake_name', - description='fake_desc', - source_volume=volume_src) - self.assertEqual('az2', volume_dst['availability_zone']) - - self.assertRaises(exception.InvalidInput, - volume_api.create, - self.context, - size=1, - name='fake_name', - description='fake_desc', - source_volume=volume_src, - availability_zone='nova') - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_create_volume_from_sourcevol_with_glance_metadata( - self, mock_qemu_info): - """Test glance metadata can be correctly copied to new volume.""" - def fake_create_cloned_volume(volume, src_vref): - pass - - self.mock_object(self.volume.driver, 'create_cloned_volume', - fake_create_cloned_volume) - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - volume_src = self._create_volume_from_image() - self.volume.create_volume(self.context, volume_src) - volume_dst = tests_utils.create_volume(self.context, - source_volid=volume_src['id'], - **self.volume_params) - self.volume.create_volume(self.context, volume_dst) - self.assertEqual('available', - db.volume_get(context.get_admin_context(), - volume_dst['id']).status) - src_glancemeta = db.volume_get(context.get_admin_context(), - volume_src['id']).volume_glance_metadata - dst_glancemeta = db.volume_get(context.get_admin_context(), - volume_dst['id']).volume_glance_metadata - for meta_src in src_glancemeta: - for meta_dst in dst_glancemeta: - if meta_dst.key == meta_src.key: - self.assertEqual(meta_src.value, meta_dst.value) - self.volume.delete_volume(self.context, volume_src) - self.volume.delete_volume(self.context, volume_dst) - - def test_create_volume_from_sourcevol_failed_clone(self): - """Test src vol status will be restore by error handling code.""" - def fake_error_create_cloned_volume(volume, src_vref): - db.volume_update(self.context, src_vref['id'], {'status': 'error'}) - raise exception.CinderException('fake exception') - - self.mock_object(self.volume.driver, 'create_cloned_volume', - fake_error_create_cloned_volume) - volume_src = tests_utils.create_volume(self.context, - **self.volume_params) - self.assertEqual('creating', volume_src.status) - self.volume.create_volume(self.context, volume_src) - self.assertEqual('available', volume_src.status) - volume_dst = tests_utils.create_volume(self.context, - source_volid=volume_src['id'], - **self.volume_params) - self.assertEqual('creating', volume_dst.status) - self.assertRaises(exception.CinderException, - self.volume.create_volume, - self.context, - volume_dst) - # Source volume's status is still available and dst is set to error - self.assertEqual('available', volume_src.status) - self.assertEqual('error', volume_dst.status) - self.volume.delete_volume(self.context, volume_dst) - self.volume.delete_volume(self.context, volume_src) - - def test_clean_temporary_volume(self): - def fake_delete_volume(ctxt, volume): - volume.destroy() - - fake_volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host, - migration_status='migrating') - fake_new_volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - # 1. Only clean the db - self.volume._clean_temporary_volume(self.context, fake_volume, - fake_new_volume, - clean_db_only=True) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, self.context, - fake_new_volume.id) - - # 2. Delete the backend storage - fake_new_volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \ - mock_delete_volume: - mock_delete_volume.side_effect = fake_delete_volume - self.volume._clean_temporary_volume(self.context, - fake_volume, - fake_new_volume, - clean_db_only=False) - self.assertRaises(exception.VolumeNotFound, - db.volume_get, self.context, - fake_new_volume.id) - - # Check when the migrated volume is not in migration - fake_new_volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - fake_volume.migration_status = 'non-migrating' - fake_volume.save() - self.volume._clean_temporary_volume(self.context, fake_volume, - fake_new_volume) - volume = db.volume_get(context.get_admin_context(), - fake_new_volume.id) - self.assertIsNone(volume.migration_status) - - def test_check_volume_filters_true(self): - """Test bootable as filter for true""" - volume_api = cinder.volume.api.API() - filters = {'bootable': 'TRUE'} - - # To convert filter value to True or False - volume_api.check_volume_filters(filters) - - # Confirming converted filter value against True - self.assertTrue(filters['bootable']) - - def test_check_volume_filters_false(self): - """Test bootable as filter for false""" - volume_api = cinder.volume.api.API() - filters = {'bootable': 'false'} - - # To convert filter value to True or False - volume_api.check_volume_filters(filters) - - # Confirming converted filter value against False - self.assertEqual(False, filters['bootable']) - - def test_check_volume_filters_invalid(self): - """Test bootable as filter""" - volume_api = cinder.volume.api.API() - filters = {'bootable': 'invalid'} - - # To convert filter value to True or False - volume_api.check_volume_filters(filters) - - # Confirming converted filter value against invalid value - self.assertTrue(filters['bootable']) - - def test_update_volume_readonly_flag(self): - """Test volume readonly flag can be updated at API level.""" - # create a volume and assign to host - volume = tests_utils.create_volume(self.context, - admin_metadata={'readonly': 'True'}, - **self.volume_params) - self.volume.create_volume(self.context, volume) - volume.status = 'in-use' - - def sort_func(obj): - return obj['name'] - - volume_api = cinder.volume.api.API() - - # Update fails when status != available - self.assertRaises(exception.InvalidVolume, - volume_api.update_readonly_flag, - self.context, - volume, - False) - - volume.status = 'available' - - # works when volume in 'available' status - volume_api.update_readonly_flag(self.context, volume, False) - - volume.refresh() - self.assertEqual('available', volume.status) - admin_metadata = volume.volume_admin_metadata - self.assertEqual(1, len(admin_metadata)) - self.assertEqual('readonly', admin_metadata[0]['key']) - self.assertEqual('False', admin_metadata[0]['value']) - - # clean up - self.volume.delete_volume(self.context, volume) - - def test_secure_file_operations_enabled(self): - """Test secure file operations setting for base driver. - - General, non network file system based drivers do not have - anything to do with "secure_file_operations". This test verifies that - calling the method always returns False. - """ - ret_flag = self.volume.driver.secure_file_operations_enabled() - self.assertFalse(ret_flag) - - @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') - def test_secure_file_operations_enabled_2(self, mock_secure): - mock_secure.return_value = True - vol = tests_utils.create_volume(self.context) - result = self.volume.secure_file_operations_enabled(self.context, - vol) - mock_secure.assert_called_once_with() - self.assertTrue(result) - - @mock.patch('cinder.volume.flows.common.make_pretty_name', - new=mock.MagicMock()) - @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume', - return_value=None) - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.execute', - side_effect=exception.DriverNotInitialized()) - def test_create_volume_raise_rescheduled_exception(self, mock_execute, - mock_reschedule): - # Create source volume - test_vol = tests_utils.create_volume(self.context, - **self.volume_params) - test_vol_id = test_vol['id'] - self.assertRaises(exception.DriverNotInitialized, - self.volume.create_volume, - self.context, test_vol, - {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}) - self.assertTrue(mock_reschedule.called) - volume = db.volume_get(context.get_admin_context(), test_vol_id) - self.assertEqual('creating', volume['status']) - - @mock.patch('cinder.volume.flows.manager.create_volume.' - 'CreateVolumeFromSpecTask.execute') - def test_create_volume_raise_unrescheduled_exception(self, mock_execute): - # create source volume - test_vol = tests_utils.create_volume(self.context, - **self.volume_params) - test_vol_id = test_vol['id'] - mock_execute.side_effect = exception.VolumeNotFound( - volume_id=test_vol_id) - self.assertRaises(exception.VolumeNotFound, - self.volume.create_volume, - self.context, test_vol, - {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}) - volume = db.volume_get(context.get_admin_context(), test_vol_id) - self.assertEqual('error', volume['status']) - - def test_cascade_delete_volume_with_snapshots(self): - """Test volume deletion with dependent snapshots.""" - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, snapshot) - self.assertEqual( - snapshot.id, objects.Snapshot.get_by_id(self.context, - snapshot.id).id) - - volume['status'] = 'available' - volume['host'] = 'fakehost' - - volume_api = cinder.volume.api.API() - - volume_api.delete(self.context, - volume, - cascade=True) - - def test_cascade_delete_volume_with_snapshots_error(self): - """Test volume deletion with dependent snapshots.""" - volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume) - snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, snapshot) - self.assertEqual( - snapshot.id, objects.Snapshot.get_by_id(self.context, - snapshot.id).id) - - snapshot.update({'status': fields.SnapshotStatus.CREATING}) - snapshot.save() - - volume['status'] = 'available' - volume['host'] = 'fakehost' - - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.InvalidVolume, - volume_api.delete, - self.context, - volume, - cascade=True) - - def test_cascade_force_delete_volume_with_snapshots_error(self): - """Test volume force deletion with errored dependent snapshots.""" - volume = tests_utils.create_volume(self.context, - host='fakehost') - - snapshot = create_snapshot(volume.id, - size=volume.size, - status=fields.SnapshotStatus.ERROR_DELETING) - self.volume.create_snapshot(self.context, snapshot) - - volume_api = cinder.volume.api.API() - - volume_api.delete(self.context, volume, cascade=True, force=True) - - snapshot = objects.Snapshot.get_by_id(self.context, snapshot.id) - self.assertEqual('deleting', snapshot.status) - - volume = objects.Volume.get_by_id(self.context, volume.id) - self.assertEqual('deleting', volume.status) - - def test_cascade_delete_volume_with_snapshots_in_other_project(self): - """Test volume deletion with dependent snapshots in other project.""" - volume = tests_utils.create_volume(self.user_context, - **self.volume_params) - snapshot = create_snapshot(volume['id'], size=volume['size'], - project_id=fake.PROJECT2_ID) - self.volume.create_snapshot(self.context, snapshot) - self.assertEqual( - snapshot.id, objects.Snapshot.get_by_id(self.context, - snapshot.id).id) - - volume['status'] = 'available' - volume['host'] = 'fakehost' - - volume_api = cinder.volume.api.API() - - self.assertRaises(exception.InvalidVolume, - volume_api.delete, - self.user_context, - volume, - cascade=True) - - @mock.patch.object(driver.BaseVD, 'get_backup_device') - @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') - def test_get_backup_device(self, mock_secure, mock_get_backup): - vol = tests_utils.create_volume(self.context) - backup = tests_utils.create_backup(self.context, vol['id']) - mock_secure.return_value = False - mock_get_backup.return_value = (vol, False) - result = self.volume.get_backup_device(self.context, - backup) - - mock_get_backup.assert_called_once_with(self.context, backup) - mock_secure.assert_called_once_with() - expected_result = {'backup_device': vol, 'secure_enabled': False, - 'is_snapshot': False} - self.assertEqual(expected_result, result) - - @mock.patch.object(driver.BaseVD, 'get_backup_device') - @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') - def test_get_backup_device_want_objects(self, mock_secure, - mock_get_backup): - vol = tests_utils.create_volume(self.context) - backup = tests_utils.create_backup(self.context, vol['id']) - mock_secure.return_value = False - mock_get_backup.return_value = (vol, False) - result = self.volume.get_backup_device(self.context, - backup, want_objects=True) - - mock_get_backup.assert_called_once_with(self.context, backup) - mock_secure.assert_called_once_with() - expected_result = objects.BackupDeviceInfo.from_primitive( - {'backup_device': vol, 'secure_enabled': False, - 'is_snapshot': False}, - self.context) - self.assertEqual(expected_result, result) - - @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' - 'SUPPORTS_ACTIVE_ACTIVE', True) - def test_set_resource_host_different(self): - manager = vol_manager.VolumeManager(host='localhost-1@ceph', - cluster='mycluster@ceph') - volume = tests_utils.create_volume(self.user_context, - host='localhost-2@ceph#ceph', - cluster_name='mycluster@ceph') - manager._set_resource_host(volume) - volume.refresh() - self.assertEqual('localhost-1@ceph#ceph', volume.host) - - @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' - 'SUPPORTS_ACTIVE_ACTIVE', True) - def test_set_resource_host_equal(self): - manager = vol_manager.VolumeManager(host='localhost-1@ceph', - cluster='mycluster@ceph') - volume = tests_utils.create_volume(self.user_context, - host='localhost-1@ceph#ceph', - cluster_name='mycluster@ceph') - with mock.patch.object(volume, 'save') as save_mock: - manager._set_resource_host(volume) - save_mock.assert_not_called() - - -class VolumeTestCaseLocks(base.BaseVolumeTestCase): - MOCK_TOOZ = False - - def test_create_volume_from_volume_delete_lock_taken(self): - # create source volume - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] - - # no lock - self.volume.create_volume(self.context, src_vol) - - dst_vol = tests_utils.create_volume(self.context, - source_volid=src_vol_id, - **self.volume_params) - - orig_elevated = self.context.elevated - - gthreads = [] - - def mock_elevated(*args, **kwargs): - # unset mock so it is only called once - self.mock_object(self.context, 'elevated', orig_elevated) - - # we expect this to block and then fail - t = eventlet.spawn(self.volume.create_volume, - self.context, - volume=dst_vol, - request_spec={'source_volid': src_vol_id}) - gthreads.append(t) - - return orig_elevated(*args, **kwargs) - - # mock something from early on in the delete operation and within the - # lock so that when we do the create we expect it to block. - self.mock_object(self.context, 'elevated', mock_elevated) - - # locked - self.volume.delete_volume(self.context, src_vol) - - # we expect the volume create to fail with the following err since the - # source volume was deleted while the create was locked. Note that the - # volume is still in the db since it was created by the test prior to - # calling manager.create_volume. - with mock.patch('sys.stderr', new=six.StringIO()): - self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) - - def test_create_volume_from_snapshot_delete_lock_taken(self): - # create source volume - src_vol = tests_utils.create_volume(self.context, **self.volume_params) - - # no lock - self.volume.create_volume(self.context, src_vol) - - # create snapshot - snap_id = create_snapshot(src_vol.id, - size=src_vol['size'])['id'] - snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) - # no lock - self.volume.create_snapshot(self.context, snapshot_obj) - - # create vol from snapshot... - dst_vol = tests_utils.create_volume(self.context, - snapshot_id=snap_id, - source_volid=src_vol.id, - **self.volume_params) - - orig_elevated = self.context.elevated - - gthreads = [] - - def mock_elevated(*args, **kwargs): - # unset mock so it is only called once - self.mock_object(self.context, 'elevated', orig_elevated) - - # We expect this to block and then fail - t = eventlet.spawn(self.volume.create_volume, self.context, - volume=dst_vol, - request_spec={'snapshot_id': snap_id}) - gthreads.append(t) - - return orig_elevated(*args, **kwargs) - - # mock something from early on in the delete operation and within the - # lock so that when we do the create we expect it to block. - self.mock_object(self.context, 'elevated', mock_elevated) - - # locked - self.volume.delete_snapshot(self.context, snapshot_obj) - - # we expect the volume create to fail with the following err since the - # snapshot was deleted while the create was locked. Note that the - # volume is still in the db since it was created by the test prior to - # calling manager.create_volume. - with mock.patch('sys.stderr', new=six.StringIO()): - self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) - # locked - self.volume.delete_volume(self.context, src_vol) - # make sure it is gone - self.assertRaises(exception.VolumeNotFound, db.volume_get, - self.context, src_vol.id) diff --git a/cinder/tests/unit/volume/test_volume_migration.py b/cinder/tests/unit/volume/test_volume_migration.py deleted file mode 100644 index ccdd8938e..000000000 --- a/cinder/tests/unit/volume/test_volume_migration.py +++ /dev/null @@ -1,919 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Volume Code.""" - -import ddt -import time - -import mock -import os_brick -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import imageutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder.tests.unit.api import fakes -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import fake_volume -from cinder.tests.unit import utils as tests_utils -from cinder.tests.unit import volume as base -import cinder.volume -from cinder.volume import api as volume_api -from cinder.volume.flows.manager import create_volume as create_volume_manager -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as volutils -from cinder.volume import volume_types - - -QUOTAS = quota.QUOTAS - -CONF = cfg.CONF - - -def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, - **kwargs): - """Create a snapshot object.""" - metadata = metadata or {} - snap = objects.Snapshot(ctxt or context.get_admin_context()) - snap.volume_size = size - snap.user_id = kwargs.get('user_id', fake.USER_ID) - snap.project_id = kwargs.get('project_id', fake.PROJECT_ID) - snap.volume_id = volume_id - snap.status = fields.SnapshotStatus.CREATING - if metadata is not None: - snap.metadata = metadata - snap.update(kwargs) - - snap.create() - return snap - - -@ddt.ddt -class VolumeMigrationTestCase(base.BaseVolumeTestCase): - - def setUp(self): - super(VolumeMigrationTestCase, self).setUp() - self._clear_patch = mock.patch('cinder.volume.utils.clear_volume', - autospec=True) - self._clear_patch.start() - self.expected_status = 'available' - - def tearDown(self): - super(VolumeMigrationTestCase, self).tearDown() - self._clear_patch.stop() - - def test_migrate_volume_driver(self): - """Test volume migration done by driver.""" - # Mock driver and rpc functions - self.mock_object(self.volume.driver, 'migrate_volume', - lambda x, y, z, new_type_id=None: ( - True, {'user_id': fake.USER_ID})) - - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host, - migration_status='migrating') - host_obj = {'host': 'newhost', 'capabilities': {}} - self.volume.migrate_volume(self.context, volume, host_obj, False) - - # check volume properties - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('newhost', volume.host) - self.assertEqual('success', volume.migration_status) - - def _fake_create_volume(self, ctxt, volume, req_spec, filters, - allow_reschedule=True): - return db.volume_update(ctxt, volume['id'], - {'status': self.expected_status}) - - def test_migrate_volume_error(self): - with mock.patch.object(self.volume.driver, 'migrate_volume') as \ - mock_migrate,\ - mock.patch.object(self.volume.driver, 'create_export') as \ - mock_create_export: - - # Exception case at self.driver.migrate_volume and create_export - mock_migrate.side_effect = processutils.ProcessExecutionError - mock_create_export.side_effect = processutils.ProcessExecutionError - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(processutils.ProcessExecutionError, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - False) - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume.migration_status) - self.assertEqual('available', volume.status) - - @mock.patch('cinder.compute.API') - @mock.patch('cinder.volume.manager.VolumeManager.' - 'migrate_volume_completion') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_migrate_volume_generic(self, volume_get, - migrate_volume_completion, - nova_api): - fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} - fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) - new_volume_obj = fake_volume.fake_volume_obj(self.context, - **fake_new_volume) - host_obj = {'host': 'newhost', 'capabilities': {}} - volume_get.return_value = fake_new_volume - update_server_volume = nova_api.return_value.update_server_volume - volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - with mock.patch.object(self.volume, '_copy_volume_data') as \ - mock_copy_volume: - self.volume._migrate_volume_generic(self.context, volume, - host_obj, None) - mock_copy_volume.assert_called_with(self.context, volume, - new_volume_obj, - remote='dest') - migrate_volume_completion.assert_called_with( - self.context, volume, new_volume_obj, error=False) - self.assertFalse(update_server_volume.called) - - @mock.patch('cinder.compute.API') - @mock.patch('cinder.volume.manager.VolumeManager.' - 'migrate_volume_completion') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_migrate_volume_generic_attached_volume(self, volume_get, - migrate_volume_completion, - nova_api): - attached_host = 'some-host' - fake_volume_id = fake.VOLUME_ID - fake_db_new_volume = {'status': 'available', 'id': fake_volume_id} - fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) - host_obj = {'host': 'newhost', 'capabilities': {}} - fake_uuid = fakes.get_fake_uuid() - update_server_volume = nova_api.return_value.update_server_volume - volume_get.return_value = fake_new_volume - volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - volume_attach = tests_utils.attach_volume( - self.context, volume['id'], fake_uuid, attached_host, '/dev/vda') - self.assertIsNotNone(volume_attach['volume_attachment'][0]['id']) - self.assertEqual( - fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid']) - self.assertEqual('in-use', volume_attach['status']) - self.volume._migrate_volume_generic(self.context, volume, - host_obj, None) - self.assertFalse(migrate_volume_completion.called) - update_server_volume.assert_called_with(self.context, fake_uuid, - volume['id'], fake_volume_id) - - @mock.patch('cinder.objects.volume.Volume.save') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume') - @mock.patch('cinder.compute.API') - @mock.patch('cinder.volume.manager.VolumeManager.' - 'migrate_volume_completion') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_migrate_volume_generic_volume_from_snap(self, volume_get, - migrate_volume_completion, - nova_api, create_volume, - save): - def fake_create_volume(*args, **kwargs): - context, volume, request_spec, filter_properties = args - fake_db = mock.Mock() - task = create_volume_manager.ExtractVolumeSpecTask(fake_db) - specs = task.execute(context, volume, {}) - self.assertEqual('raw', specs['type']) - - def fake_copy_volume_data_with_chk_param(*args, **kwargs): - context, src, dest = args - self.assertEqual(src['snapshot_id'], dest['snapshot_id']) - - fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} - fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) - host_obj = {'host': 'newhost', 'capabilities': {}} - volume_get.return_value = fake_new_volume - - volume_from_snap = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - volume_from_snap['snapshot_id'] = fake.SNAPSHOT_ID - create_volume.side_effect = fake_create_volume - - with mock.patch.object(self.volume, '_copy_volume_data') as \ - mock_copy_volume: - mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param - self.volume._migrate_volume_generic(self.context, volume_from_snap, - host_obj, None) - - @mock.patch('cinder.objects.volume.Volume.save') - @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume') - @mock.patch('cinder.compute.API') - @mock.patch('cinder.volume.manager.VolumeManager.' - 'migrate_volume_completion') - @mock.patch('cinder.db.sqlalchemy.api.volume_get') - def test_migrate_volume_generic_for_clone(self, volume_get, - migrate_volume_completion, - nova_api, create_volume, save): - def fake_create_volume(*args, **kwargs): - context, volume, request_spec, filter_properties = args - fake_db = mock.Mock() - task = create_volume_manager.ExtractVolumeSpecTask(fake_db) - specs = task.execute(context, volume, {}) - self.assertEqual('raw', specs['type']) - - def fake_copy_volume_data_with_chk_param(*args, **kwargs): - context, src, dest = args - self.assertEqual(src['source_volid'], dest['source_volid']) - - fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} - fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) - host_obj = {'host': 'newhost', 'capabilities': {}} - volume_get.return_value = fake_new_volume - - clone = tests_utils.create_volume(self.context, size=1, - host=CONF.host) - clone['source_volid'] = fake.VOLUME2_ID - create_volume.side_effect = fake_create_volume - - with mock.patch.object(self.volume, '_copy_volume_data') as \ - mock_copy_volume: - mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param - self.volume._migrate_volume_generic(self.context, clone, - host_obj, None) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume') - @mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') - @mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') - def test_migrate_volume_for_volume_generic(self, create_volume, - rpc_delete_volume, - update_migrated_volume): - fake_volume = tests_utils.create_volume(self.context, size=1, - previous_status='available', - host=CONF.host) - - host_obj = {'host': 'newhost', 'capabilities': {}} - with mock.patch.object(self.volume.driver, 'migrate_volume') as \ - mock_migrate_volume,\ - mock.patch.object(self.volume, '_copy_volume_data'),\ - mock.patch.object(self.volume.driver, 'delete_volume') as \ - delete_volume: - create_volume.side_effect = self._fake_create_volume - self.volume.migrate_volume(self.context, fake_volume, host_obj, - True) - volume = objects.Volume.get_by_id(context.get_admin_context(), - fake_volume.id) - self.assertEqual('newhost', volume.host) - self.assertEqual('success', volume.migration_status) - self.assertFalse(mock_migrate_volume.called) - self.assertFalse(delete_volume.called) - self.assertTrue(rpc_delete_volume.called) - self.assertTrue(update_migrated_volume.called) - - def test_migrate_volume_generic_copy_error(self): - with mock.patch.object(self.volume.driver, 'migrate_volume'),\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ - as mock_create_volume,\ - mock.patch.object(self.volume, '_copy_volume_data') as \ - mock_copy_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ - mock.patch.object(self.volume, 'migrate_volume_completion'),\ - mock.patch.object(self.volume.driver, 'create_export'): - - # Exception case at migrate_volume_generic - # source_volume['migration_status'] is 'migrating' - mock_create_volume.side_effect = self._fake_create_volume - mock_copy_volume.side_effect = processutils.ProcessExecutionError - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(processutils.ProcessExecutionError, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - True) - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume.migration_status) - self.assertEqual('available', volume.status) - - @mock.patch('cinder.image.image_utils.qemu_img_info') - def test_migrate_volume_with_glance_metadata(self, mock_qemu_info): - volume = self._create_volume_from_image(clone_image_volume=True) - glance_metadata = volume.glance_metadata - - # We imitate the behavior of rpcapi, by serializing and then - # deserializing the volume object we created earlier. - serializer = objects.base.CinderObjectSerializer() - serialized_volume = serializer.serialize_entity(self.context, volume) - volume = serializer.deserialize_entity(self.context, serialized_volume) - - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - - host_obj = {'host': 'newhost', 'capabilities': {}} - with mock.patch.object(self.volume.driver, - 'migrate_volume') as mock_migrate_volume: - mock_migrate_volume.side_effect = ( - lambda x, y, z, new_type_id=None: ( - True, {'user_id': fake.USER_ID})) - self.volume.migrate_volume(self.context, volume, host_obj, - False) - self.assertEqual('newhost', volume.host) - self.assertEqual('success', volume.migration_status) - self.assertEqual(glance_metadata, volume.glance_metadata) - - @mock.patch('cinder.db.volume_update') - def test_update_migrated_volume(self, volume_update): - fake_host = 'fake_host' - fake_new_host = 'fake_new_host' - fake_update = {'_name_id': fake.VOLUME2_NAME_ID, - 'provider_location': 'updated_location'} - fake_elevated = context.RequestContext(fake.USER_ID, self.project_id, - is_admin=True) - volume = tests_utils.create_volume(self.context, size=1, - status='available', - host=fake_host) - new_volume = tests_utils.create_volume( - self.context, size=1, - status='available', - provider_location='fake_provider_location', - _name_id=fake.VOLUME_NAME_ID, - host=fake_new_host) - new_volume._name_id = fake.VOLUME_NAME_ID - new_volume.provider_location = 'fake_provider_location' - fake_update_error = {'_name_id': new_volume._name_id, - 'provider_location': - new_volume.provider_location} - expected_update = {'_name_id': volume._name_id, - 'provider_location': volume.provider_location} - with mock.patch.object(self.volume.driver, - 'update_migrated_volume') as migrate_update,\ - mock.patch.object(self.context, 'elevated') as elevated: - migrate_update.return_value = fake_update - elevated.return_value = fake_elevated - self.volume.update_migrated_volume(self.context, volume, - new_volume, 'available') - volume_update.assert_has_calls(( - mock.call(fake_elevated, new_volume.id, expected_update), - mock.call(fake_elevated, volume.id, fake_update))) - - # Test the case for update_migrated_volume not implemented - # for the driver. - migrate_update.reset_mock() - volume_update.reset_mock() - # Reset the volume objects to their original value, since they - # were changed in the last call. - new_volume._name_id = fake.VOLUME_NAME_ID - new_volume.provider_location = 'fake_provider_location' - migrate_update.side_effect = NotImplementedError - self.volume.update_migrated_volume(self.context, volume, - new_volume, 'available') - volume_update.assert_has_calls(( - mock.call(fake_elevated, new_volume.id, fake_update), - mock.call(fake_elevated, volume.id, fake_update_error))) - - def test_migrate_volume_generic_create_volume_error(self): - self.expected_status = 'error' - - with mock.patch.object(self.volume.driver, 'migrate_volume'), \ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'create_volume') as mock_create_volume, \ - mock.patch.object(self.volume, '_clean_temporary_volume') as \ - clean_temporary_volume: - - # Exception case at the creation of the new temporary volume - mock_create_volume.side_effect = self._fake_create_volume - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(exception.VolumeMigrationFailed, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - True) - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume['migration_status']) - self.assertEqual('available', volume['status']) - self.assertTrue(clean_temporary_volume.called) - self.expected_status = 'available' - - def test_migrate_volume_generic_timeout_error(self): - CONF.set_override("migration_create_volume_timeout_secs", 2) - - with mock.patch.object(self.volume.driver, 'migrate_volume'), \ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'create_volume') as mock_create_volume, \ - mock.patch.object(self.volume, '_clean_temporary_volume') as \ - clean_temporary_volume, \ - mock.patch.object(time, 'sleep'): - - # Exception case at the timeout of the volume creation - self.expected_status = 'creating' - mock_create_volume.side_effect = self._fake_create_volume - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(exception.VolumeMigrationFailed, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - True) - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume['migration_status']) - self.assertEqual('available', volume['status']) - self.assertTrue(clean_temporary_volume.called) - self.expected_status = 'available' - - def test_migrate_volume_generic_create_export_error(self): - with mock.patch.object(self.volume.driver, 'migrate_volume'),\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ - as mock_create_volume,\ - mock.patch.object(self.volume, '_copy_volume_data') as \ - mock_copy_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ - mock.patch.object(self.volume, 'migrate_volume_completion'),\ - mock.patch.object(self.volume.driver, 'create_export') as \ - mock_create_export: - - # Exception case at create_export - mock_create_volume.side_effect = self._fake_create_volume - mock_copy_volume.side_effect = processutils.ProcessExecutionError - mock_create_export.side_effect = processutils.ProcessExecutionError - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(processutils.ProcessExecutionError, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - True) - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume['migration_status']) - self.assertEqual('available', volume['status']) - - def test_migrate_volume_generic_migrate_volume_completion_error(self): - def fake_migrate_volume_completion(ctxt, volume, new_volume, - error=False): - db.volume_update(ctxt, volume['id'], - {'migration_status': 'completing'}) - raise processutils.ProcessExecutionError - - with mock.patch.object(self.volume.driver, 'migrate_volume'),\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ - as mock_create_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\ - mock.patch.object(self.volume, 'migrate_volume_completion')\ - as mock_migrate_compl,\ - mock.patch.object(self.volume.driver, 'create_export'), \ - mock.patch.object(self.volume, '_attach_volume') \ - as mock_attach, \ - mock.patch.object(self.volume, '_detach_volume'), \ - mock.patch.object(os_brick.initiator.connector, - 'get_connector_properties') \ - as mock_get_connector_properties, \ - mock.patch.object(volutils, 'copy_volume') as mock_copy, \ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'get_capabilities') \ - as mock_get_capabilities: - - # Exception case at delete_volume - # source_volume['migration_status'] is 'completing' - mock_create_volume.side_effect = self._fake_create_volume - mock_migrate_compl.side_effect = fake_migrate_volume_completion - mock_get_connector_properties.return_value = {} - mock_attach.side_effect = [{'device': {'path': 'bar'}}, - {'device': {'path': 'foo'}}] - mock_get_capabilities.return_value = {'sparse_copy_volume': True} - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - self.assertRaises(processutils.ProcessExecutionError, - self.volume.migrate_volume, - self.context, - volume, - host_obj, - True) - volume = db.volume_get(context.get_admin_context(), volume['id']) - self.assertEqual('error', volume['migration_status']) - self.assertEqual('available', volume['status']) - mock_copy.assert_called_once_with('foo', 'bar', 0, '1M', - sparse=True) - - def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name, - mountpoint, mode): - tests_utils.attach_volume(ctxt, volume.id, - instance_uuid, host_name, - '/dev/vda') - - def _test_migrate_volume_completion(self, status='available', - instance_uuid=None, attached_host=None, - retyping=False, - previous_status='available'): - - initial_status = retyping and 'retyping' or status - old_volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host, - status=initial_status, - migration_status='migrating', - previous_status=previous_status) - attachment = None - if status == 'in-use': - vol = tests_utils.attach_volume(self.context, old_volume.id, - instance_uuid, attached_host, - '/dev/vda') - self.assertEqual('in-use', vol['status']) - attachment = vol['volume_attachment'][0] - target_status = 'target:%s' % old_volume.id - new_host = CONF.host + 'new' - new_volume = tests_utils.create_volume(self.context, size=0, - host=new_host, - migration_status=target_status) - with mock.patch.object(self.volume, 'detach_volume') as \ - mock_detach_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'delete_volume') as mock_delete_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'attach_volume') as mock_attach_volume,\ - mock.patch.object(volume_rpcapi.VolumeAPI, - 'update_migrated_volume'),\ - mock.patch.object(self.volume.driver, 'attach_volume'): - mock_attach_volume.side_effect = self.fake_attach_volume - old_volume_host = old_volume.host - new_volume_host = new_volume.host - self.volume.migrate_volume_completion(self.context, old_volume, - new_volume) - after_new_volume = objects.Volume.get_by_id(self.context, - new_volume.id) - after_old_volume = objects.Volume.get_by_id(self.context, - old_volume.id) - if status == 'in-use': - mock_detach_volume.assert_called_with(self.context, - old_volume.id, - attachment['id']) - attachments = db.volume_attachment_get_all_by_instance_uuid( - self.context, instance_uuid) - mock_attach_volume.assert_called_once_with( - self.context, - old_volume, - attachment['instance_uuid'], - attachment['attached_host'], - attachment['mountpoint'], - 'rw' - ) - self.assertIsNotNone(attachments) - self.assertEqual(attached_host, - attachments[0]['attached_host']) - self.assertEqual(instance_uuid, - attachments[0]['instance_uuid']) - else: - self.assertFalse(mock_detach_volume.called) - self.assertTrue(mock_delete_volume.called) - # NOTE(sborkows): the migrate_volume_completion method alters - # old and new volume objects, so we need to check the equality - # between the former host value and the actual one. - self.assertEqual(old_volume_host, after_new_volume.host) - self.assertEqual(new_volume_host, after_old_volume.host) - - def test_migrate_volume_completion_retype_available(self): - self._test_migrate_volume_completion('available', retyping=True) - - def test_migrate_volume_completion_retype_in_use(self): - self._test_migrate_volume_completion( - 'in-use', - '83c969d5-065e-4c9c-907d-5394bc2e98e2', - 'some-host', - retyping=True, - previous_status='in-use') - - def test_migrate_volume_completion_migrate_available(self): - self._test_migrate_volume_completion() - - def test_migrate_volume_completion_migrate_in_use(self): - self._test_migrate_volume_completion( - 'in-use', - '83c969d5-065e-4c9c-907d-5394bc2e98e2', - 'some-host', - retyping=False, - previous_status='in-use') - - @ddt.data(False, True) - def test_api_migrate_volume_completion_from_swap_with_no_migration( - self, swap_error): - # This test validates that Cinder properly finishes the swap volume - # status updates for the case that no migration has occurred - instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2' - attached_host = 'attached-host' - orig_attached_vol = tests_utils.create_volume(self.context, size=0) - orig_attached_vol = tests_utils.attach_volume( - self.context, orig_attached_vol['id'], instance_uuid, - attached_host, '/dev/vda') - new_volume = tests_utils.create_volume(self.context, size=0) - - @mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume') - @mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') - def _run_migration_completion(rpc_attach_volume, - rpc_detach_volume): - attachment = orig_attached_vol['volume_attachment'][0] - attachment_id = attachment['id'] - rpc_attach_volume.side_effect = self.fake_attach_volume - vol_id = volume_api.API().migrate_volume_completion( - self.context, orig_attached_vol, new_volume, swap_error) - if swap_error: - # When swap failed, we don't want to finish attachment - self.assertFalse(rpc_detach_volume.called) - self.assertFalse(rpc_attach_volume.called) - else: - # When no error, we should be finishing the attachment - rpc_detach_volume.assert_called_with(self.context, - orig_attached_vol, - attachment_id) - rpc_attach_volume.assert_called_with( - self.context, new_volume, attachment['instance_uuid'], - attachment['attached_host'], attachment['mountpoint'], - 'rw') - self.assertEqual(new_volume['id'], vol_id) - - _run_migration_completion() - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - def test_retype_setup_fail_volume_is_available(self, mock_notify): - """Verify volume is still available if retype prepare failed.""" - elevated = context.get_admin_context() - project_id = self.context.project_id - - db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) - old_vol_type = db.volume_type_get_by_name(elevated, 'old') - db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}}) - new_vol_type = db.volume_type_get_by_name(elevated, 'new') - db.quota_create(elevated, project_id, 'volumes_new', 0) - - volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host, status='available', - volume_type_id=old_vol_type['id']) - - api = cinder.volume.api.API() - self.assertRaises(exception.VolumeLimitExceeded, api.retype, - self.context, volume, new_vol_type['id']) - - volume = db.volume_get(elevated, volume.id) - mock_notify.assert_not_called() - self.assertEqual('available', volume['status']) - - @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') - def _retype_volume_exec(self, driver, mock_notify, - snap=False, policy='on-demand', - migrate_exc=False, exc=None, diff_equal=False, - replica=False, reserve_vol_type_only=False, - encryption_changed=False, - replica_new=None): - elevated = context.get_admin_context() - project_id = self.context.project_id - - if replica: - rep_status = 'enabled' - extra_specs = {'replication_enabled': ' True'} - else: - rep_status = 'disabled' - extra_specs = {} - - if replica_new is None: - replica_new = replica - new_specs = {'replication_enabled': ' True'} if replica_new else {} - - db.volume_type_create(elevated, {'name': 'old', - 'extra_specs': extra_specs}) - old_vol_type = db.volume_type_get_by_name(elevated, 'old') - - db.volume_type_create(elevated, {'name': 'new', - 'extra_specs': new_specs}) - vol_type = db.volume_type_get_by_name(elevated, 'new') - db.quota_create(elevated, project_id, 'volumes_new', 10) - - volume = tests_utils.create_volume(self.context, size=1, - host=CONF.host, status='retyping', - volume_type_id=old_vol_type['id'], - replication_status=rep_status) - volume.previous_status = 'available' - volume.save() - if snap: - create_snapshot(volume.id, size=volume.size, - user_id=self.user_context.user_id, - project_id=self.user_context.project_id, - ctxt=self.user_context) - if driver or diff_equal: - host_obj = {'host': CONF.host, 'capabilities': {}} - else: - host_obj = {'host': 'newhost', 'capabilities': {}} - - reserve_opts = {'volumes': 1, 'gigabytes': volume.size} - QUOTAS.add_volume_type_opts(self.context, - reserve_opts, - vol_type['id']) - if reserve_vol_type_only: - reserve_opts.pop('volumes') - reserve_opts.pop('gigabytes') - try: - usage = db.quota_usage_get(elevated, project_id, 'volumes') - total_volumes_in_use = usage.in_use - usage = db.quota_usage_get(elevated, project_id, 'gigabytes') - total_gigabytes_in_use = usage.in_use - except exception.QuotaUsageNotFound: - total_volumes_in_use = 0 - total_gigabytes_in_use = 0 - reservations = QUOTAS.reserve(self.context, - project_id=project_id, - **reserve_opts) - - old_reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} - QUOTAS.add_volume_type_opts(self.context, - old_reserve_opts, - old_vol_type['id']) - old_reservations = QUOTAS.reserve(self.context, - project_id=project_id, - **old_reserve_opts) - - with mock.patch.object(self.volume.driver, 'retype') as _retype,\ - mock.patch.object(volume_types, 'volume_types_diff') as _diff,\ - mock.patch.object(self.volume, 'migrate_volume') as _mig,\ - mock.patch.object(db.sqlalchemy.api, 'volume_get') as _vget,\ - mock.patch.object(context.RequestContext, 'elevated') as _ctx: - _vget.return_value = volume - _retype.return_value = driver - _ctx.return_value = self.context - returned_diff = { - 'encryption': {}, - 'qos_specs': {}, - 'extra_specs': {}, - } - if replica != replica_new: - returned_diff['extra_specs']['replication_enabled'] = ( - extra_specs.get('replication_enabled'), - new_specs.get('replication_enabled')) - expected_replica_status = 'enabled' if replica_new else 'disabled' - - if encryption_changed: - returned_diff['encryption'] = 'fake' - _diff.return_value = (returned_diff, diff_equal) - if migrate_exc: - _mig.side_effect = KeyError - else: - _mig.return_value = True - - if not exc: - self.volume.retype(self.context, volume, - vol_type['id'], host_obj, - migration_policy=policy, - reservations=reservations, - old_reservations=old_reservations) - else: - self.assertRaises(exc, self.volume.retype, - self.context, volume, - vol_type['id'], host_obj, - migration_policy=policy, - reservations=reservations, - old_reservations=old_reservations) - if host_obj['host'] != CONF.host: - _retype.assert_not_called() - - # get volume/quota properties - volume = objects.Volume.get_by_id(elevated, volume.id) - try: - usage = db.quota_usage_get(elevated, project_id, 'volumes_new') - volumes_in_use = usage.in_use - except exception.QuotaUsageNotFound: - volumes_in_use = 0 - - # Get new in_use after retype, it should not be changed. - if reserve_vol_type_only: - try: - usage = db.quota_usage_get(elevated, project_id, 'volumes') - new_total_volumes_in_use = usage.in_use - usage = db.quota_usage_get(elevated, project_id, 'gigabytes') - new_total_gigabytes_in_use = usage.in_use - except exception.QuotaUsageNotFound: - new_total_volumes_in_use = 0 - new_total_gigabytes_in_use = 0 - self.assertEqual(total_volumes_in_use, new_total_volumes_in_use) - self.assertEqual(total_gigabytes_in_use, - new_total_gigabytes_in_use) - - # check properties - if driver or diff_equal: - self.assertEqual(vol_type['id'], volume.volume_type_id) - self.assertEqual('available', volume.status) - self.assertEqual(CONF.host, volume.host) - self.assertEqual(1, volumes_in_use) - self.assert_notify_called(mock_notify, - (['INFO', 'volume.retype'],)) - elif not exc: - self.assertEqual(old_vol_type['id'], volume.volume_type_id) - self.assertEqual('retyping', volume.status) - self.assertEqual(CONF.host, volume.host) - self.assertEqual(1, volumes_in_use) - self.assert_notify_called(mock_notify, - (['INFO', 'volume.retype'],)) - else: - self.assertEqual(old_vol_type['id'], volume.volume_type_id) - self.assertEqual('available', volume.status) - self.assertEqual(CONF.host, volume.host) - self.assertEqual(0, volumes_in_use) - mock_notify.assert_not_called() - if encryption_changed: - self.assertTrue(_mig.called) - self.assertEqual(expected_replica_status, volume.replication_status) - - def test_retype_volume_driver_success(self): - self._retype_volume_exec(True) - - @ddt.data((False, False), (False, True), (True, False), (True, True)) - @ddt.unpack - def test_retype_volume_replica(self, replica, replica_new): - self._retype_volume_exec(True, replica=replica, - replica_new=replica_new) - - def test_retype_volume_migration_bad_policy(self): - # Test volume retype that requires migration by not allowed - self._retype_volume_exec(False, policy='never', - exc=exception.VolumeMigrationFailed) - - def test_retype_volume_migration_with_replica(self): - self._retype_volume_exec(False, - replica=True, - exc=exception.InvalidVolume) - - def test_retype_volume_migration_with_snaps(self): - self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume) - - def test_retype_volume_migration_failed(self): - self._retype_volume_exec(False, migrate_exc=True, exc=KeyError) - - def test_retype_volume_migration_success(self): - self._retype_volume_exec(False, migrate_exc=False, exc=None) - - def test_retype_volume_migration_equal_types(self): - self._retype_volume_exec(False, diff_equal=True) - - def test_retype_volume_with_type_only(self): - self._retype_volume_exec(True, reserve_vol_type_only=True) - - def test_retype_volume_migration_encryption(self): - self._retype_volume_exec(False, encryption_changed=True) - - def test_migrate_driver_not_initialized(self): - volume = tests_utils.create_volume(self.context, size=0, - host=CONF.host) - host_obj = {'host': 'newhost', 'capabilities': {}} - - self.volume.driver._initialized = False - self.assertRaises(exception.DriverNotInitialized, - self.volume.migrate_volume, - self.context, volume, host_obj, True) - - volume = objects.Volume.get_by_id(context.get_admin_context(), - volume.id) - self.assertEqual('error', volume.migration_status) - - # lets cleanup the mess. - self.volume.driver._initialized = True - self.volume.delete_volume(self.context, volume) - - def test_delete_source_volume_in_migration(self): - """Test deleting a source volume that is in migration.""" - self._test_delete_volume_in_migration('migrating') - - def test_delete_destination_volume_in_migration(self): - """Test deleting a destination volume that is in migration.""" - self._test_delete_volume_in_migration('target:vol-id') - - def _test_delete_volume_in_migration(self, migration_status): - """Test deleting a volume that is in migration.""" - volume = tests_utils.create_volume(self.context, host=CONF.host, - migration_status=migration_status) - self.volume.delete_volume(self.context, volume=volume) - - # The volume is successfully removed during the volume delete - # and won't exist in the database any more. - self.assertRaises(exception.VolumeNotFound, volume.refresh) diff --git a/cinder/tests/unit/volume/test_volume_usage_audit.py b/cinder/tests/unit/volume/test_volume_usage_audit.py deleted file mode 100644 index a20c21ac1..000000000 --- a/cinder/tests/unit/volume/test_volume_usage_audit.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Volume usage audit feature.""" - -import datetime - -from cinder import context -from cinder import db -from cinder import objects -from cinder.objects import fields -from cinder.tests.unit import fake_constants as fake -from cinder.tests.unit import volume as base - - -class GetActiveByWindowTestCase(base.BaseVolumeTestCase): - def setUp(self): - super(GetActiveByWindowTestCase, self).setUp() - self.ctx = context.get_admin_context(read_deleted="yes") - self.db_vol_attrs = [ - { - 'id': fake.VOLUME_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), - }, - { - 'id': fake.VOLUME2_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), - }, - { - 'id': fake.VOLUME3_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), - }, - { - 'id': fake.VOLUME4_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), - }, - { - 'id': fake.VOLUME5_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), - } - ] - - self.db_snap_attrs = [ - { - 'id': fake.SNAPSHOT_ID, - 'project_id': 'p1', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, - 'status': fields.SnapshotStatus.DELETED, - 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), - 'volume_id': fake.VOLUME_ID, - }, - - { - 'id': fake.SNAPSHOT2_ID, - 'project_id': 'p1', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, - 'status': fields.SnapshotStatus.DELETED, - 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), - 'volume_id': fake.VOLUME_ID, - }, - { - 'id': fake.SNAPSHOT3_ID, - 'project_id': 'p1', - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': True, - 'status': fields.SnapshotStatus.DELETED, - 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), - 'volume_id': fake.VOLUME_ID, - }, - { - 'id': fake.SNAPSHOT_ID, - 'project_id': 'p1', - 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), - 'volume_id': fake.VOLUME_ID, - }, - { - 'id': fake.SNAPSHOT2_ID, - 'project_id': 'p1', - 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), - 'volume_id': fake.VOLUME_ID - } - ] - - self.db_back_attrs = [ - { - 'id': fake.BACKUP_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': 1, - 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1) - }, - { - 'id': fake.BACKUP2_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': 1, - 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1) - }, - { - 'id': fake.BACKUP3_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), - 'deleted': 1, - 'status': 'deleted', - 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1) - }, - { - 'id': fake.BACKUP4_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), - }, - { - 'id': fake.BACKUP5_ID, - 'host': 'devstack', - 'project_id': fake.PROJECT_ID, - 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), - }, - ] - - def test_volume_get_all_active_by_window(self): - # Find all all volumes valid within a timeframe window. - - # Not in window - db.volume_create(self.ctx, self.db_vol_attrs[0]) - - # In - deleted in window - db.volume_create(self.ctx, self.db_vol_attrs[1]) - - # In - deleted after window - db.volume_create(self.ctx, self.db_vol_attrs[2]) - - # In - created in window - db.volume_create(self.context, self.db_vol_attrs[3]) - - # Not of window. - db.volume_create(self.context, self.db_vol_attrs[4]) - - volumes = db.volume_get_all_active_by_window( - self.context, - datetime.datetime(1, 3, 1, 1, 1, 1), - datetime.datetime(1, 4, 1, 1, 1, 1), - project_id=fake.PROJECT_ID) - self.assertEqual(3, len(volumes)) - self.assertEqual(fake.VOLUME2_ID, volumes[0].id) - self.assertEqual(fake.VOLUME3_ID, volumes[1].id) - self.assertEqual(fake.VOLUME4_ID, volumes[2].id) - - def test_snapshot_get_all_active_by_window(self): - # Find all all snapshots valid within a timeframe window. - db.volume_create(self.context, {'id': fake.VOLUME_ID}) - for i in range(5): - self.db_vol_attrs[i]['volume_id'] = fake.VOLUME_ID - - # Not in window - del self.db_snap_attrs[0]['id'] - snap1 = objects.Snapshot(self.ctx, **self.db_snap_attrs[0]) - snap1.create() - - # In - deleted in window - del self.db_snap_attrs[1]['id'] - snap2 = objects.Snapshot(self.ctx, **self.db_snap_attrs[1]) - snap2.create() - - # In - deleted after window - del self.db_snap_attrs[2]['id'] - snap3 = objects.Snapshot(self.ctx, **self.db_snap_attrs[2]) - snap3.create() - - # In - created in window - del self.db_snap_attrs[3]['id'] - snap4 = objects.Snapshot(self.ctx, **self.db_snap_attrs[3]) - snap4.create() - - # Not of window. - del self.db_snap_attrs[4]['id'] - snap5 = objects.Snapshot(self.ctx, **self.db_snap_attrs[4]) - snap5.create() - - snapshots = objects.SnapshotList.get_all_active_by_window( - self.context, - datetime.datetime(1, 3, 1, 1, 1, 1), - datetime.datetime(1, 4, 1, 1, 1, 1)).objects - self.assertEqual(3, len(snapshots)) - self.assertEqual(snap2.id, snapshots[0].id) - self.assertEqual(fake.VOLUME_ID, snapshots[0].volume_id) - self.assertEqual(snap3.id, snapshots[1].id) - self.assertEqual(fake.VOLUME_ID, snapshots[1].volume_id) - self.assertEqual(snap4.id, snapshots[2].id) - self.assertEqual(fake.VOLUME_ID, snapshots[2].volume_id) - - def test_backup_get_all_active_by_window(self): - # Find all backups valid within a timeframe window. - db.volume_create(self.context, {'id': fake.VOLUME_ID}) - for i in range(5): - self.db_back_attrs[i]['volume_id'] = fake.VOLUME_ID - - # Not in window - db.backup_create(self.ctx, self.db_back_attrs[0]) - - # In - deleted in window - db.backup_create(self.ctx, self.db_back_attrs[1]) - - # In - deleted after window - db.backup_create(self.ctx, self.db_back_attrs[2]) - - # In - created in window - db.backup_create(self.ctx, self.db_back_attrs[3]) - - # Not of window - db.backup_create(self.ctx, self.db_back_attrs[4]) - - backups = db.backup_get_all_active_by_window( - self.context, - datetime.datetime(1, 3, 1, 1, 1, 1), - datetime.datetime(1, 4, 1, 1, 1, 1), - project_id=fake.PROJECT_ID - ) - self.assertEqual(3, len(backups)) - self.assertEqual(fake.BACKUP2_ID, backups[0].id) - self.assertEqual(fake.BACKUP3_ID, backups[1].id) - self.assertEqual(fake.BACKUP4_ID, backups[2].id) diff --git a/cinder/tests/unit/windows/__init__.py b/cinder/tests/unit/windows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/windows/db_fakes.py b/cinder/tests/unit/windows/db_fakes.py deleted file mode 100644 index 8442c2c9b..000000000 --- a/cinder/tests/unit/windows/db_fakes.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2012 Pedro Navarro Perez -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Stubouts, mocks and fixtures for windows volume test suite -""" - - -def get_fake_volume_info(): - return {'name': 'volume_name', - 'size': 1, - 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name', - 'id': 1, - 'provider_auth': None} - - -def get_fake_volume_info_cloned(): - return {'name': 'volume_name_cloned', - 'size': 1, - 'provider_location': 'iqn.2010-10.org.openstack:' + - 'volume_name_cloned', - 'id': 1, - 'provider_auth': None} - - -def get_fake_image_meta(): - return {'id': '10958016-e196-42e3-9e7f-5d8927ae3099' - } - - -def get_fake_snapshot_info(): - return {'name': 'snapshot_name', - 'volume_name': 'volume_name', } - - -def get_fake_connector_info(): - return {'initiator': 'iqn.2010-10.org.openstack:' + 'volume_name', } diff --git a/cinder/tests/unit/windows/test_smbfs.py b/cinder/tests/unit/windows/test_smbfs.py deleted file mode 100644 index 30cba88ef..000000000 --- a/cinder/tests/unit/windows/test_smbfs.py +++ /dev/null @@ -1,800 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os - -import ddt -import mock -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder.objects import fields -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.volume.drivers import remotefs -from cinder.volume.drivers.windows import smbfs - - -@ddt.ddt -class WindowsSmbFsTestCase(test.TestCase): - - _FAKE_SHARE = '//1.2.3.4/share1' - _FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131' - _FAKE_MNT_BASE = r'c:\openstack\mnt' - _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH) - _FAKE_VOLUME_ID = '4f711859-4928-4cb7-801a-a50c37ceaccc' - _FAKE_VOLUME_NAME = 'volume-%s.vhdx' % _FAKE_VOLUME_ID - _FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba' - _FAKE_SNAPSHOT_NAME = 'volume-%s-%s.vhdx' % (_FAKE_VOLUME_ID, - _FAKE_SNAPSHOT_ID) - _FAKE_SNAPSHOT_PATH = os.path.join(_FAKE_MNT_POINT, - _FAKE_SNAPSHOT_NAME) - _FAKE_VOLUME_SIZE = 1 - _FAKE_TOTAL_SIZE = 2048 - _FAKE_TOTAL_AVAILABLE = 1024 - _FAKE_TOTAL_ALLOCATED = 1024 - _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' - _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, - _FAKE_VOLUME_NAME) - _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' - - @mock.patch.object(smbfs, 'utilsfactory') - @mock.patch.object(smbfs, 'remotefs_brick') - def setUp(self, mock_remotefs, mock_utilsfactory): - super(WindowsSmbFsTestCase, self).setUp() - - self.context = context.get_admin_context() - - self._FAKE_SMBFS_CONFIG = mock.MagicMock( - smbfs_oversub_ratio = 2, - smbfs_used_ratio = 0.5, - smbfs_shares_config = mock.sentinel.share_config_file, - smbfs_default_volume_format = 'vhdx', - smbfs_sparsed_volumes = False) - - self._smbfs_driver = smbfs.WindowsSmbfsDriver( - configuration=mock.Mock()) - self._smbfs_driver._delete = mock.Mock() - self._smbfs_driver._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - self._smbfs_driver.base = self._FAKE_MNT_BASE - - self._diskutils = self._smbfs_driver._diskutils - self._vhdutils = self._smbfs_driver._vhdutils - - self.volume = self._simple_volume() - self.snapshot = self._simple_snapshot(volume=self.volume) - - def _simple_volume(self, **kwargs): - updates = {'id': self._FAKE_VOLUME_ID, - 'size': self._FAKE_VOLUME_SIZE, - 'provider_location': self._FAKE_SHARE} - updates.update(kwargs) - ctxt = context.get_admin_context() - return fake_volume.fake_volume_obj(ctxt, **updates) - - def _simple_snapshot(self, **kwargs): - volume = kwargs.pop('volume', None) or self._simple_volume() - ctxt = context.get_admin_context() - updates = {'id': self._FAKE_SNAPSHOT_ID, - 'volume_size': volume.size, - 'volume_id': volume.id} - updates.update(kwargs) - snapshot = fake_snapshot.fake_snapshot_obj(ctxt, **updates) - snapshot.volume = volume - return snapshot - - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_check_os_platform') - @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, 'do_setup') - @mock.patch('os.path.exists') - @mock.patch('os.path.isabs') - @mock.patch.object(image_utils, 'check_qemu_img_version') - def _test_setup(self, mock_check_qemu_img_version, - mock_is_abs, mock_exists, - mock_remotefs_do_setup, - mock_check_os_platform, - config, share_config_exists=True): - mock_exists.return_value = share_config_exists - fake_ensure_mounted = mock.MagicMock() - self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted - self._smbfs_driver._setup_pool_mappings = mock.Mock() - self._smbfs_driver.configuration = config - - if not (config.smbfs_shares_config and share_config_exists and - config.smbfs_oversub_ratio > 0 and - 0 <= config.smbfs_used_ratio <= 1): - self.assertRaises(exception.SmbfsException, - self._smbfs_driver.do_setup, - mock.sentinel.context) - else: - self._smbfs_driver.do_setup(mock.sentinel.context) - - mock_check_qemu_img_version.assert_called_once_with( - self._smbfs_driver._MINIMUM_QEMU_IMG_VERSION) - mock_is_abs.assert_called_once_with(self._smbfs_driver.base) - self.assertEqual({}, self._smbfs_driver.shares) - fake_ensure_mounted.assert_called_once_with() - self._smbfs_driver._setup_pool_mappings.assert_called_once_with() - - mock_check_os_platform.assert_called_once_with() - - def test_setup_pools(self): - pool_mappings = { - '//ip/share0': 'pool0', - '//ip/share1': 'pool1', - } - self._smbfs_driver.configuration.smbfs_pool_mappings = pool_mappings - self._smbfs_driver.shares = { - '//ip/share0': None, - '//ip/share1': None, - '//ip/share2': None - } - - expected_pool_mappings = pool_mappings.copy() - expected_pool_mappings['//ip/share2'] = 'share2' - - self._smbfs_driver._setup_pool_mappings() - self.assertEqual(expected_pool_mappings, - self._smbfs_driver._pool_mappings) - - def test_setup_pool_duplicates(self): - self._smbfs_driver.configuration.smbfs_pool_mappings = { - 'share0': 'pool0', - 'share1': 'pool0' - } - self.assertRaises(exception.SmbfsException, - self._smbfs_driver._setup_pool_mappings) - - def test_initialize_connection(self): - self._smbfs_driver.get_active_image_from_info = mock.Mock( - return_value=self._FAKE_VOLUME_NAME) - self._smbfs_driver._get_mount_point_base = mock.Mock( - return_value=self._FAKE_MNT_BASE) - self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} - self._smbfs_driver.get_volume_format = mock.Mock( - return_value=mock.sentinel.format) - - fake_data = {'export': self._FAKE_SHARE, - 'format': mock.sentinel.format, - 'name': self._FAKE_VOLUME_NAME, - 'options': self._FAKE_SHARE_OPTS} - expected = { - 'driver_volume_type': 'smbfs', - 'data': fake_data, - 'mount_point_base': self._FAKE_MNT_BASE} - ret_val = self._smbfs_driver.initialize_connection( - self.volume, None) - - self.assertEqual(expected, ret_val) - - def test_setup(self): - self._test_setup(config=self._FAKE_SMBFS_CONFIG) - - def test_setup_missing_shares_config_option(self): - fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) - fake_config.smbfs_shares_config = None - self._test_setup(config=fake_config, - share_config_exists=False) - - def test_setup_missing_shares_config_file(self): - self._test_setup(config=self._FAKE_SMBFS_CONFIG, - share_config_exists=False) - - def test_setup_invlid_oversub_ratio(self): - fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) - fake_config.smbfs_oversub_ratio = -1 - self._test_setup(config=fake_config) - - def test_setup_invalid_used_ratio(self): - fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) - fake_config.smbfs_used_ratio = -1 - self._test_setup(config=fake_config) - - def test_setup_invalid_used_ratio2(self): - fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) - fake_config.smbfs_used_ratio = 1.1 - self._test_setup(config=fake_config) - - @mock.patch.object(smbfs, 'context') - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_get_pool_name_from_share') - def test_get_total_allocated(self, mock_get_pool_name, mock_ctxt): - fake_pool_name = 'pool0' - fake_host_name = 'fake_host@fake_backend' - fake_vol_sz_sum = 5 - - mock_db = mock.Mock() - mock_db.volume_data_get_for_host.return_value = [ - mock.sentinel.vol_count, fake_vol_sz_sum] - - self._smbfs_driver.host = fake_host_name - self._smbfs_driver.db = mock_db - - mock_get_pool_name.return_value = fake_pool_name - - allocated = self._smbfs_driver._get_total_allocated( - mock.sentinel.share) - self.assertEqual(fake_vol_sz_sum << 30, - allocated) - - mock_get_pool_name.assert_called_once_with(mock.sentinel.share) - mock_db.volume_data_get_for_host.assert_called_once_with( - context=mock_ctxt.get_admin_context.return_value, - host='fake_host@fake_backend#pool0') - - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_get_local_volume_path_template') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path') - @mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format') - def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume, - mock_get_path_template, volume_exists=True): - drv = self._smbfs_driver - (mock_get_path_template.return_value, - ext) = os.path.splitext(self._FAKE_VOLUME_PATH) - volume_format = ext.strip('.') - - mock_lookup_volume.return_value = ( - self._FAKE_VOLUME_PATH if volume_exists else None) - mock_get_volume_format.return_value = volume_format - - ret_val = drv.local_path(self.volume) - - if volume_exists: - self.assertFalse(mock_get_volume_format.called) - else: - mock_get_volume_format.assert_called_once_with(self.volume) - self.assertEqual(self._FAKE_VOLUME_PATH, ret_val) - - def test_get_existing_volume_path(self): - self._test_get_volume_path() - - def test_get_new_volume_path(self): - self._test_get_volume_path(volume_exists=False) - - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir') - def test_get_local_volume_path_template(self, mock_get_local_dir): - mock_get_local_dir.return_value = self._FAKE_MNT_POINT - ret_val = self._smbfs_driver._get_local_volume_path_template( - self.volume) - exp_template = os.path.splitext(self._FAKE_VOLUME_PATH)[0] - self.assertEqual(exp_template, ret_val) - - @mock.patch('os.path.exists') - def test_lookup_local_volume_path(self, mock_exists): - expected_path = self._FAKE_VOLUME_PATH + '.vhdx' - mock_exists.side_effect = lambda x: x == expected_path - - ret_val = self._smbfs_driver._lookup_local_volume_path( - self._FAKE_VOLUME_PATH) - - extensions = [ - ".%s" % ext - for ext in self._smbfs_driver._SUPPORTED_IMAGE_FORMATS] - possible_paths = [self._FAKE_VOLUME_PATH + ext - for ext in extensions] - mock_exists.assert_has_calls( - [mock.call(path) for path in possible_paths]) - self.assertEqual(expected_path, ret_val) - - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_get_local_volume_path_template') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_volume_format_spec') - def _test_get_volume_format(self, mock_get_format_spec, - mock_lookup_volume, mock_get_path_template, - qemu_format=False, volume_format='vhdx', - expected_vol_fmt=None, - volume_exists=True): - expected_vol_fmt = expected_vol_fmt or volume_format - - vol_path = '%s.%s' % (os.path.splitext(self._FAKE_VOLUME_PATH)[0], - volume_format) - mock_get_path_template.return_value = vol_path - mock_lookup_volume.return_value = ( - vol_path if volume_exists else None) - - mock_get_format_spec.return_value = volume_format - - supported_fmts = self._smbfs_driver._SUPPORTED_IMAGE_FORMATS - if volume_format.lower() not in supported_fmts: - self.assertRaises(exception.SmbfsException, - self._smbfs_driver.get_volume_format, - self.volume, - qemu_format) - - else: - ret_val = self._smbfs_driver.get_volume_format(self.volume, - qemu_format) - - if volume_exists: - self.assertFalse(mock_get_format_spec.called) - else: - mock_get_format_spec.assert_called_once_with(self.volume) - - self.assertEqual(expected_vol_fmt, ret_val) - - def test_get_volume_format_invalid_extension(self): - self._test_get_volume_format(volume_format='fake') - - def test_get_existing_vhdx_volume_format(self): - self._test_get_volume_format() - - def test_get_new_vhd_volume_format(self): - fmt = 'vhd' - self._test_get_volume_format(volume_format=fmt, - volume_exists=False, - expected_vol_fmt=fmt) - - def test_get_new_vhd_legacy_volume_format(self): - img_fmt = 'vhd' - expected_fmt = 'vpc' - self._test_get_volume_format(volume_format=img_fmt, - volume_exists=False, - qemu_format=True, - expected_vol_fmt=expected_fmt) - - @ddt.data([False, False], - [True, True], - [False, True]) - @ddt.unpack - def test_get_volume_format_spec(self, - volume_meta_contains_fmt, - volume_type_contains_fmt): - self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG) - - fake_vol_meta_fmt = 'vhd' - fake_vol_type_fmt = 'vhdx' - - volume_metadata = {} - volume_type_extra_specs = {} - - if volume_meta_contains_fmt: - volume_metadata['volume_format'] = fake_vol_meta_fmt - elif volume_type_contains_fmt: - volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt - - volume_type = fake_volume.fake_volume_type_obj(self.context) - volume = fake_volume.fake_volume_obj(self.context) - # Optional arguments are not set in _from_db_object, - # so have to set explicitly here - volume.volume_type = volume_type - volume.metadata = volume_metadata - # Same for extra_specs and VolumeType - volume_type.extra_specs = volume_type_extra_specs - - resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume) - - if volume_meta_contains_fmt: - expected_fmt = fake_vol_meta_fmt - elif volume_type_contains_fmt: - expected_fmt = fake_vol_type_fmt - else: - expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format - - self.assertEqual(expected_fmt, resulted_fmt) - - @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, - 'create_volume') - def test_create_volume_base(self, mock_create_volume): - self._smbfs_driver.create_volume(self.volume) - mock_create_volume.assert_called_once_with(self.volume) - - def _test_create_volume(self, volume_exists=False, volume_format='vhdx'): - self._smbfs_driver.create_dynamic_vhd = mock.MagicMock() - fake_create = self._smbfs_driver._vhdutils.create_dynamic_vhd - self._smbfs_driver.get_volume_format = mock.Mock( - return_value=volume_format) - - with mock.patch('os.path.exists', new=lambda x: volume_exists): - volume = self._simple_volume() - if volume_exists or volume_format not in ('vhd', 'vhdx'): - self.assertRaises(exception.InvalidVolume, - self._smbfs_driver._do_create_volume, - volume) - else: - fake_vol_path = self._FAKE_VOLUME_PATH - self._smbfs_driver._do_create_volume(volume) - fake_create.assert_called_once_with( - fake_vol_path, volume.size << 30) - - def test_create_volume(self): - self._test_create_volume() - - def test_create_existing_volume(self): - self._test_create_volume(True) - - def test_create_volume_invalid_volume(self): - self._test_create_volume(volume_format="qcow") - - def test_delete_volume(self): - drv = self._smbfs_driver - fake_vol_info = self._FAKE_VOLUME_PATH + '.info' - - drv._ensure_share_mounted = mock.MagicMock() - fake_ensure_mounted = drv._ensure_share_mounted - - drv._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - drv.get_active_image_from_info = mock.Mock( - return_value=self._FAKE_VOLUME_NAME) - drv._delete = mock.Mock() - drv._local_path_volume_info = mock.Mock( - return_value=fake_vol_info) - - with mock.patch('os.path.exists', lambda x: True): - drv.delete_volume(self.volume) - - fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) - drv._delete.assert_any_call( - self._FAKE_VOLUME_PATH) - drv._delete.assert_any_call(fake_vol_info) - - def test_ensure_mounted(self): - self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} - - self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE) - self._smbfs_driver._remotefsclient.mount.assert_called_once_with( - self._FAKE_SHARE, self._FAKE_SHARE_OPTS) - - def test_get_capacity_info(self): - self._diskutils.get_disk_capacity.return_value = ( - self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE) - self._smbfs_driver._get_mount_point_for_share = mock.Mock( - return_value=mock.sentinel.mnt_point) - self._smbfs_driver._get_total_allocated = mock.Mock( - return_value=self._FAKE_TOTAL_ALLOCATED) - - ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE) - expected_ret_val = [int(x) for x in [self._FAKE_TOTAL_SIZE, - self._FAKE_TOTAL_AVAILABLE, - self._FAKE_TOTAL_ALLOCATED]] - self.assertEqual(expected_ret_val, ret_val) - - self._smbfs_driver._get_mount_point_for_share.assert_called_once_with( - self._FAKE_SHARE) - self._diskutils.get_disk_capacity.assert_called_once_with( - mock.sentinel.mnt_point) - self._smbfs_driver._get_total_allocated.assert_called_once_with( - self._FAKE_SHARE) - - def _test_get_img_info(self, backing_file=None): - self._smbfs_driver._vhdutils.get_vhd_parent_path.return_value = ( - backing_file) - - image_info = self._smbfs_driver._qemu_img_info(self._FAKE_VOLUME_PATH) - self.assertEqual(self._FAKE_VOLUME_NAME, - image_info.image) - backing_file_name = backing_file and os.path.basename(backing_file) - self.assertEqual(backing_file_name, image_info.backing_file) - - def test_get_img_info_without_backing_file(self): - self._test_get_img_info() - - def test_get_snapshot_info(self): - self._test_get_img_info(self._FAKE_VOLUME_PATH) - - @ddt.data('in-use', 'available') - def test_create_snapshot(self, volume_status): - snapshot = self._simple_snapshot() - snapshot.volume.status = volume_status - - self._smbfs_driver._vhdutils.create_differencing_vhd = ( - mock.Mock()) - self._smbfs_driver._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - - fake_create_diff = ( - self._smbfs_driver._vhdutils.create_differencing_vhd) - - self._smbfs_driver._do_create_snapshot( - snapshot, - os.path.basename(self._FAKE_VOLUME_PATH), - self._FAKE_SNAPSHOT_PATH) - - if volume_status != 'in-use': - fake_create_diff.assert_called_once_with(self._FAKE_SNAPSHOT_PATH, - self._FAKE_VOLUME_PATH) - else: - fake_create_diff.assert_not_called() - - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_check_extend_volume_support') - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_local_path_active_image') - def test_extend_volume(self, mock_get_active_img, - mock_check_ext_support): - volume = fake_volume.fake_volume_obj(self.context) - new_size = volume.size + 1 - - self._smbfs_driver.extend_volume(volume, new_size) - - mock_check_ext_support.assert_called_once_with(volume, new_size) - mock_get_active_img.assert_called_once_with(volume) - self._vhdutils.resize_vhd.assert_called_once_with( - mock_get_active_img.return_value, - new_size * units.Gi, - is_file_max_size=False) - - @ddt.data({'snapshots_exist': True}, - {'vol_fmt': smbfs.WindowsSmbfsDriver._DISK_FORMAT_VHD, - 'snapshots_exist': True, - 'expected_exc': exception.InvalidVolume}) - @ddt.unpack - @mock.patch.object(smbfs.WindowsSmbfsDriver, - 'get_volume_format') - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_snapshots_exist') - def test_check_extend_support(self, mock_snapshots_exist, - mock_get_volume_format, - vol_fmt=None, snapshots_exist=False, - share_eligible=True, - expected_exc=None): - vol_fmt = vol_fmt or self._smbfs_driver._DISK_FORMAT_VHDX - - volume = fake_volume.fake_volume_obj( - self.context, provider_location='fake_provider_location') - new_size = volume.size + 1 - - mock_snapshots_exist.return_value = snapshots_exist - mock_get_volume_format.return_value = vol_fmt - - if expected_exc: - self.assertRaises(expected_exc, - self._smbfs_driver._check_extend_volume_support, - volume, new_size) - else: - self._smbfs_driver._check_extend_volume_support(volume, new_size) - - mock_get_volume_format.assert_called_once_with(volume) - mock_snapshots_exist.assert_called_once_with(volume) - - @ddt.data({}, - {'delete_latest': True}, - {'volume_status': 'available'}, - {'snap_info_contains_snap_id': False}) - @ddt.unpack - @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, - '_delete_snapshot') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_path_volume_info') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_write_info_file') - @mock.patch.object(smbfs.WindowsSmbfsDriver, '_read_info_file') - @mock.patch.object(smbfs.WindowsSmbfsDriver, - '_nova_assisted_vol_snap_delete') - def test_delete_snapshot(self, mock_nova_assisted_snap_del, - mock_read_info_file, mock_write_info_file, - mock_local_path_volume_info, - mock_get_local_dir, - mock_remotefs_snap_delete, - volume_status='in-use', - snap_info_contains_snap_id=True, - delete_latest=False): - snapshot = self._simple_snapshot() - snapshot.volume.status = volume_status - - fake_snap_file = 'snap_file' - fake_snap_parent_path = os.path.join(self._FAKE_MNT_POINT, - 'snap_file_parent') - active_img = 'active_img' if not delete_latest else fake_snap_file - - snap_info = dict(active=active_img) - if snap_info_contains_snap_id: - snap_info[snapshot.id] = fake_snap_file - - mock_info_path = mock_local_path_volume_info.return_value - mock_read_info_file.return_value = snap_info - mock_get_local_dir.return_value = self._FAKE_MNT_POINT - self._vhdutils.get_vhd_parent_path.return_value = ( - fake_snap_parent_path) - - expected_delete_info = {'file_to_merge': fake_snap_file, - 'volume_id': snapshot.volume.id} - - self._smbfs_driver._delete_snapshot(snapshot) - - if volume_status != 'in-use': - mock_remotefs_snap_delete.assert_called_once_with(snapshot) - elif snap_info_contains_snap_id: - mock_local_path_volume_info.assert_called_once_with( - snapshot.volume) - mock_read_info_file.assert_called_once_with( - mock_info_path, empty_if_missing=True) - mock_nova_assisted_snap_del.assert_called_once_with( - snapshot._context, snapshot, expected_delete_info) - - exp_merged_img_path = os.path.join(self._FAKE_MNT_POINT, - fake_snap_file) - self._smbfs_driver._delete.assert_called_once_with( - exp_merged_img_path) - - if delete_latest: - self._vhdutils.get_vhd_parent_path.assert_called_once_with( - exp_merged_img_path) - exp_active = os.path.basename(fake_snap_parent_path) - else: - exp_active = active_img - - self.assertEqual(exp_active, snap_info['active']) - self.assertNotIn(snap_info, snapshot.id) - mock_write_info_file.assert_called_once_with(mock_info_path, - snap_info) - - if volume_status != 'in-use' or not snap_info_contains_snap_id: - mock_nova_assisted_snap_del.assert_not_called() - mock_write_info_file.assert_not_called() - - def test_create_volume_from_unavailable_snapshot(self): - self.snapshot.status = fields.SnapshotStatus.ERROR - self.assertRaises( - exception.InvalidSnapshot, - self._smbfs_driver.create_volume_from_snapshot, - self.volume, self.snapshot) - - @ddt.data(True, False) - def test_copy_volume_to_image(self, has_parent=False): - drv = self._smbfs_driver - - fake_image_meta = {'id': 'fake-image-id'} - fake_img_format = self._smbfs_driver._DISK_FORMAT_VHDX - - if has_parent: - fake_volume_path = self._FAKE_SNAPSHOT_PATH - fake_parent_path = self._FAKE_VOLUME_PATH - else: - fake_volume_path = self._FAKE_VOLUME_PATH - fake_parent_path = None - - fake_active_image = os.path.basename(fake_volume_path) - - drv.get_active_image_from_info = mock.Mock( - return_value=fake_active_image) - drv._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - drv.get_volume_format = mock.Mock( - return_value=fake_img_format) - drv._vhdutils.get_vhd_parent_path.return_value = ( - fake_parent_path) - - with mock.patch.object(image_utils, 'upload_volume') as ( - fake_upload_volume): - volume = self._simple_volume() - drv.copy_volume_to_image( - mock.sentinel.context, volume, - mock.sentinel.image_service, fake_image_meta) - - if has_parent: - fake_temp_image_name = '%s.temp_image.%s.%s' % ( - volume.id, - fake_image_meta['id'], - fake_img_format) - fake_temp_image_path = os.path.join( - self._FAKE_MNT_POINT, - fake_temp_image_name) - fake_active_image_path = os.path.join( - self._FAKE_MNT_POINT, - fake_active_image) - upload_path = fake_temp_image_path - - drv._vhdutils.convert_vhd.assert_called_once_with( - fake_active_image_path, - fake_temp_image_path) - drv._delete.assert_called_once_with( - fake_temp_image_path) - else: - upload_path = fake_volume_path - - fake_upload_volume.assert_called_once_with( - mock.sentinel.context, mock.sentinel.image_service, - fake_image_meta, upload_path, fake_img_format) - - def test_copy_image_to_volume(self): - drv = self._smbfs_driver - - drv.get_volume_format = mock.Mock( - return_value=mock.sentinel.volume_format) - drv.local_path = mock.Mock( - return_value=self._FAKE_VOLUME_PATH) - drv.configuration = mock.MagicMock() - drv.configuration.volume_dd_blocksize = mock.sentinel.block_size - - with mock.patch.object(image_utils, - 'fetch_to_volume_format') as fake_fetch: - volume = self._simple_volume() - drv.copy_image_to_volume( - mock.sentinel.context, volume, - mock.sentinel.image_service, - mock.sentinel.image_id) - fake_fetch.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.image_service, - mock.sentinel.image_id, - self._FAKE_VOLUME_PATH, mock.sentinel.volume_format, - mock.sentinel.block_size) - drv._vhdutils.resize_vhd.assert_called_once_with( - self._FAKE_VOLUME_PATH, - volume.size * units.Gi, - is_file_max_size=False) - - def test_copy_volume_from_snapshot(self): - drv = self._smbfs_driver - snapshot = self._simple_snapshot() - fake_volume_info = { - snapshot.id: 'fake_snapshot_file_name'} - fake_img_info = mock.MagicMock() - fake_img_info.backing_file = self._FAKE_VOLUME_NAME - - drv._local_path_volume_info = mock.Mock( - return_value=self._FAKE_VOLUME_PATH + '.info') - drv._local_volume_dir = mock.Mock( - return_value=self._FAKE_MNT_POINT) - drv._read_info_file = mock.Mock( - return_value=fake_volume_info) - drv._qemu_img_info = mock.Mock( - return_value=fake_img_info) - drv.local_path = mock.Mock( - return_value=mock.sentinel.new_volume_path) - - volume = self._simple_volume() - drv._copy_volume_from_snapshot(snapshot, - volume, volume.size) - - drv._delete.assert_called_once_with(mock.sentinel.new_volume_path) - drv._vhdutils.convert_vhd.assert_called_once_with( - self._FAKE_VOLUME_PATH, - mock.sentinel.new_volume_path) - drv._vhdutils.resize_vhd.assert_called_once_with( - mock.sentinel.new_volume_path, - volume.size * units.Gi, - is_file_max_size=False) - - def test_rebase_img(self): - drv = self._smbfs_driver - drv._rebase_img( - self._FAKE_SNAPSHOT_PATH, - self._FAKE_VOLUME_NAME, 'vhdx') - drv._vhdutils.reconnect_parent_vhd.assert_called_once_with( - self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH) - - def test_copy_volume_image(self): - self._smbfs_driver._copy_volume_image(mock.sentinel.src, - mock.sentinel.dest) - self._smbfs_driver._pathutils.copy.assert_called_once_with( - mock.sentinel.src, mock.sentinel.dest) - - def test_get_pool_name_from_share(self): - self._smbfs_driver._pool_mappings = { - mock.sentinel.share: mock.sentinel.pool} - - pool = self._smbfs_driver._get_pool_name_from_share( - mock.sentinel.share) - self.assertEqual(mock.sentinel.pool, pool) - - def test_get_share_from_pool_name(self): - self._smbfs_driver._pool_mappings = { - mock.sentinel.share: mock.sentinel.pool} - - share = self._smbfs_driver._get_share_from_pool_name( - mock.sentinel.pool) - self.assertEqual(mock.sentinel.share, share) - - def test_get_pool_name_from_share_exception(self): - self._smbfs_driver._pool_mappings = {} - - self.assertRaises(exception.SmbfsException, - self._smbfs_driver._get_share_from_pool_name, - mock.sentinel.pool) diff --git a/cinder/tests/unit/windows/test_windows.py b/cinder/tests/unit/windows/test_windows.py deleted file mode 100644 index 4ba4916e2..000000000 --- a/cinder/tests/unit/windows/test_windows.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright 2012 Pedro Navarro Perez -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for Windows Server 2012 OpenStack Cinder volume driver -""" - -import os - -import ddt -import mock -from oslo_utils import fileutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.image import image_utils -from cinder import test -from cinder.tests.unit import fake_snapshot -from cinder.tests.unit import fake_volume -from cinder.tests.unit.windows import db_fakes -from cinder.volume import configuration as conf -from cinder.volume.drivers.windows import windows - - -@ddt.ddt -class TestWindowsDriver(test.TestCase): - @mock.patch.object(windows, 'utilsfactory') - def setUp(self, mock_utilsfactory): - super(TestWindowsDriver, self).setUp() - self.configuration = conf.Configuration(None) - self.configuration.append_config_values(windows.windows_opts) - self.flags(windows_iscsi_lun_path='fake_iscsi_lun_path') - self.flags(image_conversion_dir='fake_image_conversion_dir') - - self._driver = windows.WindowsDriver(configuration=self.configuration) - - @mock.patch.object(fileutils, 'ensure_tree') - def test_do_setup(self, mock_ensure_tree): - self._driver.do_setup(mock.sentinel.context) - - mock_ensure_tree.assert_has_calls( - [mock.call('fake_iscsi_lun_path'), - mock.call('fake_image_conversion_dir')]) - - @mock.patch.object(windows.WindowsDriver, '_get_portals') - def test_check_for_setup_error(self, mock_get_portals): - self._driver.check_for_setup_error() - - mock_get_portals.assert_called_once_with() - - @ddt.data(True, False) - def test_get_portals(self, portals_available=True): - iscsi_port = mock.sentinel.iscsi_port - available_ips = ['fake_ip0', 'fake_ip1', 'fake_unrequested_ip'] - requested_ips = available_ips[:-1] + ['fake_inexistent_ips'] - - available_portals = ([":".join([ip_addr, str(iscsi_port)]) - for ip_addr in available_ips] - if portals_available else []) - - self._driver.configuration = mock.Mock() - self._driver.configuration.iscsi_port = iscsi_port - self._driver.configuration.iscsi_ip_address = requested_ips[0] - self._driver.configuration.iscsi_secondary_ip_addresses = ( - requested_ips[1:]) - - self._driver._tgt_utils.get_portal_locations.return_value = ( - available_portals) - - if portals_available: - portals = self._driver._get_portals() - self.assertEqual(set(available_portals[:-1]), set(portals)) - else: - self.assertRaises(exception.VolumeDriverException, - self._driver._get_portals) - - self._driver._tgt_utils.get_portal_locations.assert_called_once_with( - available_only=True, - fail_if_none_found=True) - - @ddt.data(True, False) - @mock.patch.object(windows.WindowsDriver, '_get_portals') - @mock.patch.object(windows.WindowsDriver, '_get_target_name') - def test_get_host_information(self, multipath, mock_get_target_name, - mock_get_portals): - tgt_utils = self._driver._tgt_utils - - fake_auth_meth = 'CHAP' - fake_chap_username = 'fake_chap_username' - fake_chap_password = 'fake_chap_password' - fake_target_iqn = 'fake_target_iqn' - fake_host_info = {'target_iqn': 'fake_target_iqn', - 'fake_prop': 'fake_value'} - fake_provider_auth = "%s %s %s" % (fake_auth_meth, - fake_chap_username, - fake_chap_password) - fake_portals = [mock.sentinel.portal_location0, - mock.sentinel.portal_location1] - - volume = fake_volume.fake_volume_obj(mock.sentinel.context, - provider_auth=fake_provider_auth) - - mock_get_target_name.return_value = mock.sentinel.target_name - mock_get_portals.return_value = fake_portals - tgt_utils.get_target_information.return_value = fake_host_info - - expected_host_info = dict(fake_host_info, - auth_method=fake_auth_meth, - auth_username=fake_chap_username, - auth_password=fake_chap_password, - target_discovered=False, - target_portal=fake_portals[0], - target_lun=0, - volume_id=volume.id) - if multipath: - expected_host_info['target_portals'] = fake_portals - expected_host_info['target_iqns'] = [fake_target_iqn] * 2 - expected_host_info['target_luns'] = [0] * 2 - - host_info = self._driver._get_host_information(volume, multipath) - - self.assertEqual(expected_host_info, host_info) - - mock_get_target_name.assert_called_once_with(volume) - mock_get_portals.assert_called_once_with() - tgt_utils.get_target_information.assert_called_once_with( - mock.sentinel.target_name) - - @mock.patch.object(windows.WindowsDriver, '_get_host_information') - def test_initialize_connection(self, mock_get_host_info): - tgt_utils = self._driver._tgt_utils - - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - fake_initiator = db_fakes.get_fake_connector_info() - fake_initiator['multipath'] = mock.sentinel.multipath - fake_host_info = {'fake_host_prop': 'fake_value'} - - mock_get_host_info.return_value = fake_host_info - - expected_conn_info = {'driver_volume_type': 'iscsi', - 'data': fake_host_info} - conn_info = self._driver.initialize_connection(volume, - fake_initiator) - - self.assertEqual(expected_conn_info, conn_info) - mock_get_host_info.assert_called_once_with( - volume, mock.sentinel.multipath) - mock_associate = tgt_utils.associate_initiator_with_iscsi_target - mock_associate.assert_called_once_with( - fake_initiator['initiator'], - volume.provider_location) - - def test_terminate_connection(self): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - fake_initiator = db_fakes.get_fake_connector_info() - - self._driver.terminate_connection(volume, fake_initiator) - - self._driver._tgt_utils.deassociate_initiator.assert_called_once_with( - fake_initiator['initiator'], volume.provider_location) - - @mock.patch.object(windows.WindowsDriver, 'local_path') - def test_create_volume(self, mock_local_path): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - self._driver.create_volume(volume) - - mock_local_path.assert_called_once_with(volume) - self._driver._tgt_utils.create_wt_disk.assert_called_once_with( - mock_local_path.return_value, - volume.name, - size_mb=volume.size * 1024) - - def test_local_path(self): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - fake_lun_path = 'fake_lun_path' - self.flags(windows_iscsi_lun_path=fake_lun_path) - - disk_format = 'vhd' - mock_get_fmt = self._driver._tgt_utils.get_supported_disk_format - mock_get_fmt.return_value = disk_format - - disk_path = self._driver.local_path(volume) - - expected_fname = "%s.%s" % (volume.name, disk_format) - expected_disk_path = os.path.join(fake_lun_path, - expected_fname) - self.assertEqual(expected_disk_path, disk_path) - mock_get_fmt.assert_called_once_with() - - @mock.patch.object(windows.WindowsDriver, 'local_path') - @mock.patch.object(fileutils, 'delete_if_exists') - def test_delete_volume(self, mock_delete_if_exists, mock_local_path): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - self._driver.delete_volume(volume) - - mock_local_path.assert_called_once_with(volume) - self._driver._tgt_utils.remove_wt_disk.assert_called_once_with( - volume.name) - mock_delete_if_exists.assert_called_once_with( - mock_local_path.return_value) - - def test_create_snapshot(self): - volume = fake_volume.fake_volume_obj(context.get_admin_context()) - snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context(), - volume_id=volume.id) - snapshot.volume = volume - - self._driver.create_snapshot(snapshot) - - self._driver._tgt_utils.create_snapshot.assert_called_once_with( - snapshot.volume_name, snapshot.name) - - @mock.patch.object(windows.WindowsDriver, 'local_path') - def test_create_volume_from_snapshot(self, mock_local_path): - volume = fake_volume.fake_volume_obj(context.get_admin_context()) - snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) - snapshot.volume = volume - - self._driver.create_volume_from_snapshot(volume, snapshot) - - self._driver._tgt_utils.export_snapshot.assert_called_once_with( - snapshot.name, mock_local_path.return_value) - self._driver._tgt_utils.import_wt_disk.assert_called_once_with( - mock_local_path.return_value, volume.name) - - def test_delete_snapshot(self): - snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) - - self._driver.delete_snapshot(snapshot) - - self._driver._tgt_utils.delete_snapshot.assert_called_once_with( - snapshot.name) - - def test_get_target_name(self): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - expected_target_name = "%s%s" % ( - self._driver.configuration.iscsi_target_prefix, - volume.name) - - target_name = self._driver._get_target_name(volume) - self.assertEqual(expected_target_name, target_name) - - @mock.patch.object(windows.WindowsDriver, '_get_target_name') - @mock.patch.object(windows.utils, 'generate_username') - @mock.patch.object(windows.utils, 'generate_password') - def test_create_export(self, mock_generate_password, - mock_generate_username, - mock_get_target_name): - tgt_utils = self._driver._tgt_utils - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - self._driver.configuration.chap_username = None - self._driver.configuration.chap_password = None - self._driver.configuration.use_chap_auth = True - fake_chap_username = 'fake_chap_username' - fake_chap_password = 'fake_chap_password' - - mock_get_target_name.return_value = mock.sentinel.target_name - mock_generate_username.return_value = fake_chap_username - mock_generate_password.return_value = fake_chap_password - tgt_utils.iscsi_target_exists.return_value = False - - vol_updates = self._driver.create_export(mock.sentinel.context, - volume, - mock.sentinel.connector) - - mock_get_target_name.assert_called_once_with(volume) - tgt_utils.iscsi_target_exists.assert_called_once_with( - mock.sentinel.target_name) - tgt_utils.set_chap_credentials.assert_called_once_with( - mock.sentinel.target_name, - fake_chap_username, - fake_chap_password) - tgt_utils.add_disk_to_target.assert_called_once_with( - volume.name, mock.sentinel.target_name) - - expected_provider_auth = ' '.join(('CHAP', - fake_chap_username, - fake_chap_password)) - expected_vol_updates = dict( - provider_location=mock.sentinel.target_name, - provider_auth=expected_provider_auth) - self.assertEqual(expected_vol_updates, vol_updates) - - @mock.patch.object(windows.WindowsDriver, '_get_target_name') - def test_remove_export(self, mock_get_target_name): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - self._driver.remove_export(mock.sentinel.context, volume) - - mock_get_target_name.assert_called_once_with(volume) - self._driver._tgt_utils.delete_iscsi_target.assert_called_once_with( - mock_get_target_name.return_value) - - @mock.patch.object(windows.WindowsDriver, 'local_path') - @mock.patch.object(image_utils, 'temporary_file') - @mock.patch.object(image_utils, 'fetch_to_vhd') - @mock.patch('os.unlink') - def test_copy_image_to_volume(self, mock_unlink, mock_fetch_to_vhd, - mock_tmp_file, mock_local_path): - tgt_utils = self._driver._tgt_utils - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - mock_tmp_file.return_value.__enter__.return_value = ( - mock.sentinel.tmp_vhd_path) - mock_local_path.return_value = mock.sentinel.vol_vhd_path - - self._driver.copy_image_to_volume(mock.sentinel.context, - volume, - mock.sentinel.image_service, - mock.sentinel.image_id) - - mock_local_path.assert_called_once_with(volume) - mock_tmp_file.assert_called_once_with(suffix='.vhd') - image_utils.fetch_to_vhd.assert_called_once_with( - mock.sentinel.context, mock.sentinel.image_service, - mock.sentinel.image_id, mock.sentinel.tmp_vhd_path, - self._driver.configuration.volume_dd_blocksize) - - mock_unlink.assert_called_once_with(mock.sentinel.vol_vhd_path) - self._driver._vhdutils.convert_vhd.assert_called_once_with( - mock.sentinel.tmp_vhd_path, - mock.sentinel.vol_vhd_path, - tgt_utils.get_supported_vhd_type.return_value) - self._driver._vhdutils.resize_vhd.assert_called_once_with( - mock.sentinel.vol_vhd_path, - volume.size * units.Gi, - is_file_max_size=False) - - tgt_utils.change_wt_disk_status.assert_has_calls( - [mock.call(volume.name, enabled=False), - mock.call(volume.name, enabled=True)]) - - @mock.patch.object(windows.uuidutils, 'generate_uuid') - def test_temporary_snapshot(self, mock_generate_uuid): - tgt_utils = self._driver._tgt_utils - mock_generate_uuid.return_value = mock.sentinel.snap_uuid - expected_snap_name = '%s-tmp-snapshot-%s' % ( - mock.sentinel.volume_name, mock.sentinel.snap_uuid) - - with self._driver._temporary_snapshot( - mock.sentinel.volume_name) as snap_name: - self.assertEqual(expected_snap_name, snap_name) - tgt_utils.create_snapshot.assert_called_once_with( - mock.sentinel.volume_name, expected_snap_name) - - tgt_utils.delete_snapshot.assert_called_once_with( - expected_snap_name) - - @mock.patch.object(windows.WindowsDriver, '_temporary_snapshot') - @mock.patch.object(image_utils, 'upload_volume') - @mock.patch.object(fileutils, 'delete_if_exists') - def test_copy_volume_to_image(self, mock_delete_if_exists, - mock_upload_volume, - mock_tmp_snap): - tgt_utils = self._driver._tgt_utils - - disk_format = 'vhd' - fake_image_meta = db_fakes.get_fake_image_meta() - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - fake_img_conv_dir = 'fake_img_conv_dir' - self.flags(image_conversion_dir=fake_img_conv_dir) - - tgt_utils.get_supported_disk_format.return_value = disk_format - mock_tmp_snap.return_value.__enter__.return_value = ( - mock.sentinel.tmp_snap_name) - - expected_tmp_vhd_path = os.path.join( - fake_img_conv_dir, - fake_image_meta['id'] + '.' + disk_format) - - self._driver.copy_volume_to_image( - mock.sentinel.context, volume, - mock.sentinel.image_service, - fake_image_meta) - - mock_tmp_snap.assert_called_once_with(volume.name) - tgt_utils.export_snapshot.assert_called_once_with( - mock.sentinel.tmp_snap_name, - expected_tmp_vhd_path) - mock_upload_volume.assert_called_once_with( - mock.sentinel.context, mock.sentinel.image_service, - fake_image_meta, expected_tmp_vhd_path, 'vhd') - mock_delete_if_exists.assert_called_once_with( - expected_tmp_vhd_path) - - @mock.patch.object(windows.WindowsDriver, '_temporary_snapshot') - @mock.patch.object(windows.WindowsDriver, 'local_path') - def test_create_cloned_volume(self, mock_local_path, - mock_tmp_snap): - tgt_utils = self._driver._tgt_utils - - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - src_volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - - mock_tmp_snap.return_value.__enter__.return_value = ( - mock.sentinel.tmp_snap_name) - mock_local_path.return_value = mock.sentinel.vol_vhd_path - - self._driver.create_cloned_volume(volume, src_volume) - - mock_tmp_snap.assert_called_once_with(src_volume.name) - tgt_utils.export_snapshot.assert_called_once_with( - mock.sentinel.tmp_snap_name, - mock.sentinel.vol_vhd_path) - self._driver._vhdutils.resize_vhd.assert_called_once_with( - mock.sentinel.vol_vhd_path, volume.size * units.Gi, - is_file_max_size=False) - tgt_utils.import_wt_disk.assert_called_once_with( - mock.sentinel.vol_vhd_path, volume.name) - - @mock.patch('os.path.splitdrive') - def test_get_capacity_info(self, mock_splitdrive): - mock_splitdrive.return_value = (mock.sentinel.drive, - mock.sentinel.path_tail) - fake_size_gb = 2 - fake_free_space_gb = 1 - self._driver._hostutils.get_volume_info.return_value = ( - fake_size_gb * units.Gi, - fake_free_space_gb * units.Gi) - - total_gb, free_gb = self._driver._get_capacity_info() - - self.assertEqual(fake_size_gb, total_gb) - self.assertEqual(fake_free_space_gb, free_gb) - - self._driver._hostutils.get_volume_info.assert_called_once_with( - mock.sentinel.drive) - mock_splitdrive.assert_called_once_with('fake_iscsi_lun_path') - - @mock.patch.object(windows.WindowsDriver, '_get_capacity_info') - def test_update_volume_stats(self, mock_get_capacity_info): - mock_get_capacity_info.return_value = ( - mock.sentinel.size_gb, - mock.sentinel.free_space_gb) - - self.flags(volume_backend_name='volume_backend_name') - self.flags(reserved_percentage=10) - - expected_volume_stats = dict( - volume_backend_name='volume_backend_name', - vendor_name='Microsoft', - driver_version=self._driver.VERSION, - storage_protocol='iSCSI', - total_capacity_gb=mock.sentinel.size_gb, - free_capacity_gb=mock.sentinel.free_space_gb, - reserved_percentage=10, - QoS_support=False) - - self._driver._update_volume_stats() - self.assertEqual(expected_volume_stats, - self._driver._stats) - - def test_extend_volume(self): - volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) - new_size_gb = 2 - expected_additional_sz_mb = 1024 - - self._driver.extend_volume(volume, new_size_gb) - - self._driver._tgt_utils.extend_wt_disk.assert_called_once_with( - volume.name, expected_additional_sz_mb) diff --git a/cinder/tests/unit/zonemanager/__init__.py b/cinder/tests/unit/zonemanager/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py b/cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py deleted file mode 100644 index 6c915054a..000000000 --- a/cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py +++ /dev/null @@ -1,153 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for brcd fc san lookup service.""" - -import mock -from oslo_config import cfg -from oslo_utils import importutils - -from cinder import test -from cinder.volume import configuration as conf -import cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service \ - as brcd_lookup - - -parsed_switch_port_wwns = ['20:1a:00:05:1e:e8:e3:29', - '10:00:00:90:fa:34:40:f6'] -switch_data = (""" - Type Pid COS PortName NodeName TTL(sec) - N 011a00; 2,3; %(port_1)s; 20:1a:00:05:1e:e8:e3:29; na - FC4s: FCP - PortSymb: [26] "222222 - 1:1:1 - LPe12442" - NodeSymb: [32] "SomeSym 7211" - Fabric Port Name: 20:1a:00:05:1e:e8:e3:29 - Permanent Port Name: 22:22:00:22:ac:00:bc:b0 - Port Index: 0 - Share Area: No - Device Shared in Other AD: No - Redirect: No - Partial: No - LSAN: No - N 010100; 2,3; %(port_2)s; 20:00:00:00:af:00:00:af; na - FC4s: FCP - PortSymb: [26] "333333 - 1:1:1 - LPe12442" - NodeSymb: [32] "SomeSym 2222" - Fabric Port Name: 10:00:00:90:fa:34:40:f6 - Permanent Port Name: 22:22:00:22:ac:00:bc:b0 - Port Index: 0 - Share Area: No - Device Shared in Other AD: No - Redirect: No - Partial: No - LSAN: No""" % {'port_1': parsed_switch_port_wwns[0], - 'port_2': parsed_switch_port_wwns[1]}) - -_device_map_to_verify = { - 'BRCD_FAB_2': { - 'initiator_port_wwn_list': [parsed_switch_port_wwns[1].replace(':', - '')], - 'target_port_wwn_list': [parsed_switch_port_wwns[0].replace(':', '')]}} - - -class TestBrcdFCSanLookupService(brcd_lookup.BrcdFCSanLookupService, - test.TestCase): - - def setUp(self): - super(TestBrcdFCSanLookupService, self).setUp() - self.configuration = conf.Configuration(None) - self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_2', - 'fc-zone-manager') - self.configuration.fc_fabric_names = 'BRCD_FAB_2' - self.configuration.brcd_sb_connector = ('cinder.tests.unit.zonemanager' - '.test_brcd_fc_san_lookup_' - 'service' - '.FakeBrcdFCZoneClientCLI') - self.create_configuration() - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - def create_configuration(self): - fc_fabric_opts = [] - fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address', - default='10.24.49.100', help='')) - fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user', - default='admin', help='')) - fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password', - default='password', help='', - secret=True)) - fc_fabric_opts.append(cfg.PortOpt('fc_fabric_port', - default=22, help='')) - config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2') - self.fabric_configs = {'BRCD_FAB_2': config} - - def get_client(self, protocol='HTTPS'): - conn = ('cinder.tests.unit.zonemanager.' - 'test_brcd_fc_san_lookup_service.' + - ('FakeBrcdFCZoneClientCLI' if protocol == "CLI" - else 'FakeBrcdHttpFCZoneClient')) - client = importutils.import_object( - conn, - ipaddress="10.24.48.213", - username="admin", - password="password", - key="/home/stack/.ssh/id_rsa", - port=22, - vfid="2", - protocol=protocol - ) - return client - - @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, - '_get_southbound_client') - def test_get_device_mapping_from_network(self, get_southbound_client_mock): - initiator_list = [parsed_switch_port_wwns[1]] - target_list = [parsed_switch_port_wwns[0], '20240002ac000a40'] - get_southbound_client_mock.return_value = self.get_client("HTTPS") - device_map = self.get_device_mapping_from_network( - initiator_list, target_list) - self.assertDictEqual(_device_map_to_verify, device_map) - - -class FakeClient(object): - def is_supported_firmware(self): - return True - - def get_nameserver_info(self): - ns_info_list_expected = (parsed_switch_port_wwns) - return ns_info_list_expected - - def close_connection(self): - pass - - def cleanup(self): - pass - - -class FakeBrcdFCZoneClientCLI(FakeClient): - def __init__(self, ipaddress, username, - password, port, key, vfid, protocol): - self.firmware_supported = True - - -class FakeBrcdHttpFCZoneClient(FakeClient): - - def __init__(self, ipaddress, username, - password, port, key, vfid, protocol): - self.firmware_supported = True diff --git a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py b/cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py deleted file mode 100644 index c00beab27..000000000 --- a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py +++ /dev/null @@ -1,312 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for brcd fc zone client cli.""" - -import mock -from oslo_concurrency import processutils - -from cinder import exception -from cinder import test -from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli - as client_cli) -import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant - - -nsshow = '20:1a:00:05:1e:e8:e3:29' -switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\ - 20:1a:00:05:1e:e8:e3:29;na', - ' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29'] -cfgactvshow = ['Effective configuration:\n', - ' cfg:\tOpenStack_Cfg\t\n', - ' zone:\topenstack50060b0000c26604201900051ee8e329\t\n', - '\t\t50:06:0b:00:00:c2:66:04\n', - '\t\t20:19:00:05:1e:e8:e3:29\n'] -active_zoneset = { - 'zones': { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, - 'active_zone_config': 'OpenStack_Cfg'} -active_zoneset_multiple_zones = { - 'zones': { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], - 'openstack50060b0000c26602201900051ee8e327': - ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, - 'active_zone_config': 'OpenStack_Cfg'} -new_zone_memb_same = { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']} -new_zone_memb_not_same = { - 'openstack50060b0000c26604201900051ee8e330': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']} -new_zone = {'openstack10000012345678902001009876543210': - ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} -new_zones = {'openstack10000012345678902001009876543210': - ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], - 'openstack10000011111111112001001111111111': - ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} -zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' -supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1'] -unsupported_firmware = ['Fabric OS: v6.2.1'] - - -class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase): - - def setUp(self): - super(TestBrcdFCZoneClientCLI, self).setUp() - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') - def test_get_active_zone_set(self, get_switch_info_mock): - cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG] - get_switch_info_mock.return_value = cfgactvshow - active_zoneset_returned = self.get_active_zone_set() - get_switch_info_mock.assert_called_once_with(cmd_list) - self.assertDictEqual(active_zoneset, active_zoneset_returned) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test_get_active_zone_set_ssh_error(self, run_ssh_mock): - run_ssh_mock.side_effect = processutils.ProcessExecutionError - self.assertRaises(exception.BrocadeZoningCliException, - self.get_active_zone_set) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') - def test_add_zones_new_zone_no_activate(self, cfg_save_mock, - apply_zone_change_mock, - get_active_zs_mock): - get_active_zs_mock.return_value = active_zoneset - self.add_zones(new_zones, False, None) - self.assertEqual(1, get_active_zs_mock.call_count) - self.assertEqual(3, apply_zone_change_mock.call_count) - cfg_save_mock.assert_called_once_with() - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') - def test_add_zones_new_zone_activate(self, activate_zoneset_mock, - apply_zone_change_mock, - get_active_zs_mock): - get_active_zs_mock.return_value = active_zoneset - self.add_zones(new_zone, True, active_zoneset) - self.assertEqual(2, apply_zone_change_mock.call_count) - activate_zoneset_mock.assert_called_once_with( - active_zoneset['active_zone_config']) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - def test_update_zone_exists_memb_same(self, apply_zone_change_mock, - activate_zoneset_mock, - get_active_zs_mock): - get_active_zs_mock.return_value = active_zoneset - self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD, - active_zoneset) - self.assertEqual(1, apply_zone_change_mock.call_count) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock, - activate_zoneset_mock, - get_active_zs_mock): - get_active_zs_mock.return_value = active_zoneset - self.update_zones(new_zone_memb_not_same, True, - zone_constant.ZONE_ADD, active_zoneset) - self.assertEqual(1, apply_zone_change_mock.call_count) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock, - activate_zoneset_mock, - get_active_zs_mock): - - self.add_zones(new_zone_memb_not_same, True, active_zoneset) - call_args = apply_zone_change_mock.call_args[0][0] - self.assertEqual(0, get_active_zs_mock.call_count) - self.assertEqual(2, apply_zone_change_mock.call_count) - self.assertIn(zone_constant.CFG_ADD.strip(), call_args) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') - def test_activate_zoneset(self, ssh_execute_mock): - ssh_execute_mock.return_value = True - return_value = self.activate_zoneset('zoneset1') - self.assertTrue(return_value) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') - def test_deactivate_zoneset(self, ssh_execute_mock): - ssh_execute_mock.return_value = True - return_value = self.deactivate_zoneset() - self.assertTrue(return_value) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') - def test_delete_zones_activate_false(self, cfg_save_mock, - apply_zone_change_mock): - with mock.patch.object(self, '_zone_delete') as zone_delete_mock: - self.delete_zones(zone_names_to_delete, False, - active_zoneset_multiple_zones) - self.assertEqual(1, apply_zone_change_mock.call_count) - zone_delete_mock.assert_called_once_with(zone_names_to_delete) - cfg_save_mock.assert_called_once_with() - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') - def test_delete_zones_activate_true(self, activate_zs_mock, - apply_zone_change_mock): - with mock.patch.object(self, '_zone_delete') \ - as zone_delete_mock: - self.delete_zones(zone_names_to_delete, True, - active_zoneset_multiple_zones) - self.assertEqual(1, apply_zone_change_mock.call_count) - zone_delete_mock.assert_called_once_with(zone_names_to_delete) - activate_zs_mock.assert_called_once_with( - active_zoneset['active_zone_config']) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') - def test_get_nameserver_info(self, get_switch_info_mock): - ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] - get_switch_info_mock.return_value = (switch_data) - ns_info_list = self.get_nameserver_info() - self.assertEqual(ns_info_list_expected, ns_info_list) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test_get_nameserver_info_ssh_error(self, run_ssh_mock): - run_ssh_mock.side_effect = processutils.ProcessExecutionError - self.assertRaises(exception.BrocadeZoningCliException, - self.get_nameserver_info) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') - def test__cfg_save(self, ssh_execute_mock): - cmd_list = [zone_constant.CFG_SAVE] - self._cfg_save() - ssh_execute_mock.assert_called_once_with(cmd_list, True, 1) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - def test__zone_delete(self, apply_zone_change_mock): - zone_name = 'testzone' - cmd_list = ['zonedelete', '"testzone"'] - self._zone_delete(zone_name) - apply_zone_change_mock.assert_called_once_with(cmd_list) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') - def test__cfg_trans_abort(self, apply_zone_change_mock): - cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT] - with mock.patch.object(self, '_is_trans_abortable') \ - as is_trans_abortable_mock: - is_trans_abortable_mock.return_value = True - self._cfg_trans_abort() - is_trans_abortable_mock.assert_called_once_with() - apply_zone_change_mock.assert_called_once_with(cmd_list) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test__is_trans_abortable_true(self, run_ssh_mock): - cmd_list = [zone_constant.CFG_SHOW_TRANS] - run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE), - None) - data = self._is_trans_abortable() - self.assertTrue(data) - run_ssh_mock.assert_called_once_with(cmd_list, True, 1) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test__is_trans_abortable_ssh_error(self, run_ssh_mock): - run_ssh_mock.return_value = (Stream(), Stream()) - self.assertRaises(exception.BrocadeZoningCliException, - self._is_trans_abortable) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test__is_trans_abortable_false(self, run_ssh_mock): - cmd_list = [zone_constant.CFG_SHOW_TRANS] - cfgtransshow = 'There is no outstanding zoning transaction' - run_ssh_mock.return_value = (Stream(cfgtransshow), None) - data = self._is_trans_abortable() - self.assertFalse(data) - run_ssh_mock.assert_called_once_with(cmd_list, True, 1) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test_apply_zone_change(self, run_ssh_mock): - cmd_list = [zone_constant.CFG_SAVE] - run_ssh_mock.return_value = (None, None) - self.apply_zone_change(cmd_list) - run_ssh_mock.assert_called_once_with(cmd_list, True, 1) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') - def test__get_switch_info(self, run_ssh_mock): - cmd_list = [zone_constant.NS_SHOW] - nsshow_list = [nsshow] - run_ssh_mock.return_value = (Stream(nsshow), Stream()) - switch_data = self._get_switch_info(cmd_list) - self.assertEqual(nsshow_list, switch_data) - run_ssh_mock.assert_called_once_with(cmd_list, True, 1) - - def test__parse_ns_output(self): - invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] - expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] - return_wwn_list = self._parse_ns_output(switch_data) - self.assertEqual(expected_wwn_list, return_wwn_list) - self.assertRaises(exception.InvalidParameterValue, - self._parse_ns_output, invalid_switch_data) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') - def test_is_supported_firmware(self, exec_shell_cmd_mock): - exec_shell_cmd_mock.return_value = (supported_firmware, None) - self.assertTrue(self.is_supported_firmware()) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') - def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock): - exec_shell_cmd_mock.return_value = (unsupported_firmware, None) - self.assertFalse(self.is_supported_firmware()) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') - def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock): - exec_shell_cmd_mock.return_value = (None, Stream()) - self.assertFalse(self.is_supported_firmware()) - - @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') - def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock): - exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError - self.assertRaises(exception.BrocadeZoningCliException, - self.is_supported_firmware) - - -class Channel(object): - def recv_exit_status(self): - return 0 - - -class Stream(object): - def __init__(self, buffer=''): - self.buffer = buffer - self.channel = Channel() - - def readlines(self): - return self.buffer - - def splitlines(self): - return self.buffer.splitlines() - - def close(self): - pass - - def flush(self): - self.buffer = '' diff --git a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py b/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py deleted file mode 100644 index ea15d5bf6..000000000 --- a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py +++ /dev/null @@ -1,261 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Brocade fc zone driver.""" - -import mock -from oslo_config import cfg -from oslo_utils import importutils -import paramiko -import requests - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver - -_active_cfg_before_add = {} -_active_cfg_before_delete = { - 'zones': { - 'openstack10008c7cff523b0120240002ac000a50': ( - ['10:00:8c:7c:ff:52:3b:01', - '20:24:00:02:ac:00:0a:50']), 't_zone': ['1,0']}, - 'active_zone_config': 'cfg1'} -_activate = True -_zone_name = 'openstack10008c7cff523b0120240002ac000a50' -_target_ns_map = {'100000051e55a100': ['20240002ac000a50']} -_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} -_zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': ( - ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])} - -_initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} -_device_map_to_verify = { - '100000051e55a100': { - 'initiator_port_wwn_list': [ - '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} -_fabric_wwn = '100000051e55a100' - - -class BrcdFcZoneDriverBaseTest(object): - - def setup_config(self, is_normal, mode): - fc_test_opts = [ - cfg.StrOpt('fc_fabric_address_BRCD_FAB_1', default='10.24.48.213', - help='FC Fabric names'), - ] - configuration = conf.Configuration(fc_test_opts) - # fill up config - configuration.zoning_mode = 'fabric' - configuration.zone_driver = ('cinder.tests.unit.zonemanager.' - 'test_brcd_fc_zone_driver.' - 'FakeBrcdFCZoneDriver') - configuration.brcd_sb_connector = ('cinder.tests.unit.zonemanager.' - 'test_brcd_fc_zone_driver' - '.FakeBrcdFCZoneClientCLI') - configuration.zoning_policy = 'initiator-target' - configuration.zone_activate = True - configuration.zone_name_prefix = 'openstack' - configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.' - 'test_brcd_fc_zone_driver.' - 'FakeBrcdFCSanLookupService') - - configuration.fc_fabric_names = 'BRCD_FAB_1' - configuration.fc_fabric_address_BRCD_FAB_1 = '10.24.48.213' - configuration.fc_southbound_connector = 'CLI' - if is_normal: - configuration.fc_fabric_user_BRCD_FAB_1 = 'admin' - else: - configuration.fc_fabric_user_BRCD_FAB_1 = 'invaliduser' - configuration.fc_fabric_password_BRCD_FAB_1 = 'password' - - if mode == 1: - configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target' - elif mode == 2: - configuration.zoning_policy_BRCD_FAB_1 = 'initiator' - else: - configuration.zoning_policy_BRCD_FAB_1 = 'initiator-target' - configuration.zone_activate_BRCD_FAB_1 = True - configuration.zone_name_prefix_BRCD_FAB_1 = 'openstack_fab1' - return configuration - - -class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase): - - def setUp(self): - super(TestBrcdFcZoneDriver, self).setUp() - # setup config for normal flow - self.setup_driver(self.setup_config(True, 1)) - GlobalVars._zone_state = [] - - def setup_driver(self, config): - self.driver = importutils.import_object( - 'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' - '.BrcdFCZoneDriver', configuration=config) - - def fake__get_active_zone_set(self, brcd_sb_connector, fabric_ip): - return GlobalVars._active_cfg - - def get_client(self, protocol='HTTPS'): - conn = ('cinder.tests.unit.zonemanager.test_brcd_fc_zone_driver.' + - ('FakeBrcdFCZoneClientCLI' if protocol == "CLI" - else 'FakeBrcdHttpFCZoneClient')) - client = importutils.import_object( - conn, - ipaddress="10.24.48.213", - username="admin", - password="password", - key="/home/stack/.ssh/id_rsa", - port=22, - vfid="2", - protocol=protocol - ) - return client - - def fake_get_san_context(self, target_wwn_list): - fabric_map = {} - return fabric_map - - @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') - def test_add_connection(self, get_southbound_client_mock): - """Normal flow for i-t mode.""" - GlobalVars._is_normal_test = True - GlobalVars._zone_state = [] - GlobalVars._active_cfg = _active_cfg_before_add - get_southbound_client_mock.return_value = self.get_client("HTTPS") - self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) - self.assertIn(_zone_name, GlobalVars._zone_state) - - @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') - def test_delete_connection(self, get_southbound_client_mock): - GlobalVars._is_normal_test = True - get_southbound_client_mock.return_value = self.get_client("CLI") - GlobalVars._active_cfg = _active_cfg_before_delete - self.driver.delete_connection( - 'BRCD_FAB_1', _initiator_target_map) - self.assertNotIn(_zone_name, GlobalVars._zone_state) - - @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') - def test_add_connection_for_initiator_mode(self, get_southbound_client_mk): - """Normal flow for i mode.""" - GlobalVars._is_normal_test = True - get_southbound_client_mk.return_value = self.get_client("CLI") - GlobalVars._active_cfg = _active_cfg_before_add - self.setup_driver(self.setup_config(True, 2)) - self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) - self.assertIn(_zone_name, GlobalVars._zone_state) - - @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') - def test_delete_connection_for_initiator_mode(self, - get_southbound_client_mk): - GlobalVars._is_normal_test = True - get_southbound_client_mk.return_value = self.get_client("HTTPS") - GlobalVars._active_cfg = _active_cfg_before_delete - self.setup_driver(self.setup_config(True, 2)) - self.driver.delete_connection( - 'BRCD_FAB_1', _initiator_target_map) - self.assertNotIn(_zone_name, GlobalVars._zone_state) - - def test_add_connection_for_invalid_fabric(self): - """Test abnormal flows.""" - GlobalVars._is_normal_test = True - GlobalVars._active_cfg = _active_cfg_before_add - GlobalVars._is_normal_test = False - self.setup_driver(self.setup_config(False, 1)) - self.assertRaises(exception.FCZoneDriverException, - self.driver.add_connection, - 'BRCD_FAB_1', - _initiator_target_map) - - def test_delete_connection_for_invalid_fabric(self): - GlobalVars._active_cfg = _active_cfg_before_delete - GlobalVars._is_normal_test = False - self.setup_driver(self.setup_config(False, 1)) - self.assertRaises(exception.FCZoneDriverException, - self.driver.delete_connection, - 'BRCD_FAB_1', - _initiator_target_map) - - -class FakeClient(object): - def get_active_zone_set(self): - return GlobalVars._active_cfg - - def add_zones(self, zones, isActivate, active_zone_set): - GlobalVars._zone_state.extend(zones.keys()) - - def delete_zones(self, zone_names, isActivate, active_zone_set): - zone_list = zone_names.split(';') - GlobalVars._zone_state = [ - x for x in GlobalVars._zone_state if x not in zone_list] - - def is_supported_firmware(self): - return True - - def get_nameserver_info(self): - return _target_ns_map - - def close_connection(self): - pass - - def cleanup(self): - pass - - -class FakeBrcdFCZoneClientCLI(FakeClient): - def __init__(self, ipaddress, username, - password, port, key, vfid, protocol): - self.firmware_supported = True - if not GlobalVars._is_normal_test: - raise paramiko.SSHException("Unable to connect to fabric.") - - -class FakeBrcdHttpFCZoneClient(FakeClient): - - def __init__(self, ipaddress, username, - password, port, key, vfid, protocol): - self.firmware_supported = True - if not GlobalVars._is_normal_test: - raise requests.exception.HTTPError("Unable to connect to fabric") - - -class FakeBrcdFCSanLookupService(object): - - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - device_map = {} - initiators = [] - targets = [] - for i in initiator_wwn_list: - if i in _initiator_ns_map[_fabric_wwn]: - initiators.append(i) - for t in target_wwn_list: - if t in _target_ns_map[_fabric_wwn]: - targets.append(t) - device_map[_fabric_wwn] = { - 'initiator_port_wwn_list': initiators, - 'target_port_wwn_list': targets} - return device_map - - -class GlobalVars(object): - global _active_cfg - _active_cfg = {} - global _zone_state - _zone_state = list() - global _is_normal_test - _is_normal_test = True diff --git a/cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py b/cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py deleted file mode 100644 index 53c4c588e..000000000 --- a/cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py +++ /dev/null @@ -1,830 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Unit tests for brcd fc zone client http(s).""" -import time - -from oslo_utils import encodeutils - -import mock -from mock import patch -import six - -from cinder import exception -from cinder import test -from cinder.zonemanager.drivers.brocade import (brcd_http_fc_zone_client - as client) -import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant - - -cfgs = {'openstack_cfg': 'zone1;zone2'} -cfgs_to_delete = { - 'openstack_cfg': 'zone1;zone2;openstack50060b0000c26604201900051ee8e329'} -zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} - -zones_to_delete = { - 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', - 'openstack50060b0000c26604201900051ee8e329': - '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} - -alias = {} -qlps = {} -ifas = {} -parsed_raw_zoneinfo = "" -random_no = '' -session = None -active_cfg = 'openstack_cfg' -activate = True -no_activate = False -vf_enable = True -ns_info = ['10:00:00:05:1e:7c:64:96'] -nameserver_info = """ - - - - -NSInfo Page - - -
---BEGIN NS INFO
-
-2;8;020800;N    ;10:00:00:05:1e:7c:64:96;20:00:00:05:1e:7c:64:96;[89]""" \
-"""Brocade-825 | 3.0.4.09 | DCM-X3650-94 | Microsoft Windows Server 2003 R2"""\
-    """| Service Pack 2";FCP ;      3;20:08:00:05:1e:89:54:a0;"""\
-    """0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0;000000;port8"""\
-    """
---END NS INFO
-
-
- - -""" -mocked_zone_string = 'zonecfginfo=openstack_cfg zone1;zone2 '\ - 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ - 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ - 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ - 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ - 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ - 'openstack_cfg null &saveonly=false' -mocked_zone_string_no_activate = 'zonecfginfo=openstack_cfg zone1;zone2 '\ - 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ - 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ - 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ - 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ - 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c &saveonly=true' -zone_string_to_post = "zonecfginfo=openstack_cfg "\ - "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ - "openstack50060b0000c26604201900051ee8e329 "\ - "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ - "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ - "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ - "openstack_cfg null &saveonly=false" -zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\ - "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ - "openstack50060b0000c26604201900051ee8e329 "\ - "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 " \ - "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ - "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ - "&saveonly=true" -zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\ - "openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\ - "zone1;zone2 openstack50060b0000c26604201900051ee8e329000000000000000000000"\ - "00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ - "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ - "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true" -zone_string_del_to_post = "zonecfginfo=openstack_cfg zone1;zone2"\ - " zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ - "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ - "openstack_cfg null &saveonly=false" -zone_string_del_to_post_no_active = "zonecfginfo=openstack_cfg zone1;zone2"\ - " zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 " \ - "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ - "&saveonly=true" -zone_post_page = """ - -
---BEGIN ZONE_TXN_INFO
-txnId=34666
-adId=0
-user=admin
-roleUser=admin
-openTxnOwner=
-openTxnId=0
-openTxnAbortable=0
-txnStarttime=1421916354
-txnEndtime=1421916355
-currStateInt=4
-prevStateInt=3
-actionInt=5
-currState=done
-prevState=progress
-action=error
-sessionId=5892021
-selfAborted=false
-status=done
-errorCode=-1
-errorMessage=Name too long
---END ZONE_TXN_INFO
-
-""" -zone_post_page_no_error = """ - -
---BEGIN ZONE_TXN_INFO
-txnId=34666
-adId=0
-user=admin
-roleUser=admin
-openTxnOwner=
-openTxnId=0
-openTxnAbortable=0
-txnStarttime=1421916354
-txnEndtime=1421916355
-currStateInt=4
-prevStateInt=3
-actionInt=5
-currState=done
-prevState=progress
-action=error
-sessionId=5892021
-selfAborted=false
-status=done
-errorCode=0
-errorMessage=
---END ZONE_TXN_INFO
-
-""" -secinfo_resp = """ - -
---BEGIN SECINFO
-SECURITY = OFF
-RANDOM = 6281590
-DefaultPasswdBitmap = 0
-primaryFCS = no
-switchType = 66
-resource = 10.24.48.210
-REALM = FC Switch Administration
-AUTHMETHOD = Custom_Basic
-hasUpfrontLogin=yes
-AUTHVERSION = 1
-vfEnabled=false
-vfSupported=true
---END SECINFO
-
- -""" -authenticate_resp = """ -
---BEGIN AUTHENTICATE
-authenticated = yes
-username=admin
-userrole=admin
-adCapable=1
-currentAD=AD0
-trueADEnvironment=0
-adId=0
-adList=ALL
-contextType=0
---END AUTHENTICATE
-
- -""" -un_authenticate_resp = """ - - - -Authentication - - -
---BEGIN AUTHENTICATE
-authenticated = no
-errCode = -3
-authType = Custom_Basic
-realm = FC Switch Administration
---END AUTHENTICATE
-
- -""" -switch_page_resp = """ - - - - - -
---BEGIN SWITCH INFORMATION
-didOffset=96
-swFWVersion=v7.3.0b_rc1_bld06
-swDomain=2
---END SWITCH INFORMATION
-
- - -""" -switch_page_invalid_firm = """ - - - - - -
---BEGIN SWITCH INFORMATION
-didOffset=96
-swFWVersion=v6.1.1
-swDomain=2
---END SWITCH INFORMATION
-
- - -""" -parsed_value = """ -didOffset=96 -swFWVersion=v7.3.0b_rc1_bld06 -swDomain=2 -""" -parsed_session_info_vf = """ -sessionId=524461483 -user=admin -userRole=admin -isAdminRole=Yes -authSource=0 -sessionIp=172.26.1.146 -valid=yes -adName= -adId=128 -adCapable=1 -currentAD=AD0 -currentADId=0 -homeAD=AD0 -trueADEnvironment=0 -adList= -adIdList= -pfAdmin=0 -switchIsMember=0 -definedADList=AD0,Physical Fabric -definedADIdList=0,255, -effectiveADList=AD0,Physical Fabric -rc=0 -err= -contextType=1 -vfEnabled=true -vfSupported=true -HomeVF=128 -sessionLFId=2 -isContextManageable=1 -manageableLFList=2,128, -activeLFList=128,2, -""" -session_info_vf = """ - -
---BEGIN SESSION
-sessionId=524461483
-user=admin
-userRole=admin
-isAdminRole=Yes
-authSource=0
-sessionIp=172.26.1.146
-valid=yes
-adName=
-adId=128
-adCapable=1
-currentAD=AD0
-currentADId=0
-homeAD=AD0
-trueADEnvironment=0
-adList=
-adIdList=
-pfAdmin=0
-switchIsMember=0
-definedADList=AD0,Physical Fabric
-definedADIdList=0,255,
-effectiveADList=AD0,Physical Fabric
-rc=0
-err=
-contextType=1
-vfEnabled=true
-vfSupported=true
-HomeVF=128
-sessionLFId=2
-isContextManageable=1
-manageableLFList=2,128,
-activeLFList=128,2,
---END SESSION
-
- -""" -session_info_vf_not_changed = """ - -
---BEGIN SESSION
-sessionId=524461483
-user=admin
-userRole=admin
-isAdminRole=Yes
-authSource=0
-sessionIp=172.26.1.146
-User-Agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,
-valid=yes
-adName=
-adId=128
-adCapable=1
-currentAD=AD0
-currentADId=0
-homeAD=AD0
-trueADEnvironment=0
-adList=
-adIdList=
-pfAdmin=0
-switchIsMember=0
-definedADList=AD0,Physical Fabric
-definedADIdList=0,255,
-effectiveADList=AD0,Physical Fabric
-rc=0
-err=
-contextType=1
-vfEnabled=true
-vfSupported=true
-HomeVF=128
-sessionLFId=128
-isContextManageable=1
-manageableLFList=2,128,
-activeLFList=128,2,
---END SESSION
-
- -""" -session_info_AD = """ - - - -Webtools Session Info - - -
---BEGIN SESSION
-sessionId=-2096740776
-user=
-userRole=root
-isAdminRole=No
-authSource=0
-sessionIp=
-User-Agent=
-valid=no
-adName=
-adId=0
-adCapable=1
-currentAD=AD0
-currentADId=0
-homeAD=AD0
-trueADEnvironment=0
-adList=
-adIdList=
-pfAdmin=0
-switchIsMember=1
-definedADList=AD0,Physical Fabric
-definedADIdList=0,255,
-effectiveADList=AD0,Physical Fabric
-rc=-2
-err=Could not obtain session data from store
-contextType=0
---END SESSION
-
- - -""" -zone_info = """ - - - -Zone Configuration Information - - -
---BEGIN ZONE CHANGE
-LastZoneChangeTime=1421926251
---END ZONE CHANGE
-isZoneTxnSupported=true
-ZoneLicense=true
-QuickLoopLicense=true
-DefZoneStatus=noaccess
-McDataDefaultZone=false
-McDataSafeZone=false
-AvailableZoneSize=1043890
---BEGIN ZONE INFO
-openstack_cfg zone1;zone2 """\
-"""zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 """\
-    """zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 """\
-    """alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 """\
-    """qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
-    """fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
-    """openstack_cfg null 1045274"""\
-    """--END ZONE INFO
-
- - - -""" - -active_zone_set = { - 'zones': - {'zone1': - ['20:01:00:05:33:0e:96:15', '20:00:00:05:33:0e:93:11'], - 'zone2': - ['20:01:00:05:33:0e:96:14', '20:00:00:05:33:0e:93:11']}, - 'active_zone_config': 'openstack_cfg'} -updated_zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', - 'test_updated_zone': - '20:01:00:05:33:0e:96:10;20:00:00:05:33:0e:93:11'} -updated_cfgs = {'openstack_cfg': 'test_updated_zone;zone1;zone2'} -valid_zone_name = "openstack50060b0000c26604201900051ee8e329" - - -class TestBrcdHttpFCZoneClient(client.BrcdHTTPFCZoneClient, test.TestCase): - - def setUp(self): - self.auth_header = "YWRtaW46cGFzc3dvcmQ6NDM4ODEyNTIw" - self.switch_user = "admin" - self.switch_pwd = "password" - self.protocol = "HTTPS" - self.conn = None - self.alias = {} - self.qlps = {} - self.ifas = {} - self.parsed_raw_zoneinfo = "" - self.random_no = '' - self.session = None - super(TestBrcdHttpFCZoneClient, self).setUp() - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_create_auth_token(self, connect_mock): - connect_mock.return_value = secinfo_resp - self.assertEqual("Custom_Basic YWRtaW46cGFzc3dvcmQ6NjI4MTU5MA==", - self.create_auth_token()) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_authenticate(self, connect_mock): - connect_mock.return_value = authenticate_resp - self.assertEqual( - (True, "Custom_Basic YWRtaW46eHh4Og=="), self.authenticate()) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_authenticate_failed(self, connect_mock): - connect_mock.return_value = un_authenticate_resp - self.assertRaises( - exception.BrocadeZoningHttpException, self.authenticate) - - def test_get_parsed_data(self): - valid_delimiter1 = zone_constant.SWITCHINFO_BEGIN - valid_delimiter2 = zone_constant.SWITCHINFO_END - invalid_delimiter = "--END SWITCH INFORMATION1" - self.assertEqual(parsed_value, self.get_parsed_data( - switch_page_resp, valid_delimiter1, valid_delimiter2)) - self.assertRaises(exception.BrocadeZoningHttpException, - self.get_parsed_data, - switch_page_resp, - valid_delimiter1, - invalid_delimiter) - self.assertRaises(exception.BrocadeZoningHttpException, - self.get_parsed_data, - switch_page_resp, - invalid_delimiter, - valid_delimiter2) - - def test_get_nvp_value(self): - valid_keyname = zone_constant.FIRMWARE_VERSION - invalid_keyname = "swFWVersion1" - self.assertEqual( - "v7.3.0b_rc1_bld06", self.get_nvp_value(parsed_value, - valid_keyname)) - self.assertRaises(exception.BrocadeZoningHttpException, - self.get_nvp_value, - parsed_value, - invalid_keyname) - - def test_get_managable_vf_list(self): - manageable_list = ['2', '128'] - self.assertEqual( - manageable_list, self.get_managable_vf_list(session_info_vf)) - self.assertRaises(exception.BrocadeZoningHttpException, - self.get_managable_vf_list, session_info_AD) - - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') - def test_check_change_vf_context_vf_enabled(self, is_vf_enabled_mock): - is_vf_enabled_mock.return_value = (True, session_info_vf) - self.vfid = None - self.assertRaises( - exception.BrocadeZoningHttpException, - self.check_change_vf_context) - self.vfid = "2" - with mock.patch.object(self, 'change_vf_context') \ - as change_vf_context_mock: - self.check_change_vf_context() - change_vf_context_mock.assert_called_once_with( - self.vfid, session_info_vf) - - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') - def test_check_change_vf_context_vf_disabled(self, is_vf_enabled_mock): - is_vf_enabled_mock.return_value = (False, session_info_AD) - self.vfid = "128" - self.assertRaises( - exception.BrocadeZoningHttpException, - self.check_change_vf_context) - - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_change_vf_context_valid(self, connect_mock, - get_managable_vf_list_mock): - get_managable_vf_list_mock.return_value = ['2', '128'] - connect_mock.return_value = session_info_vf - self.assertIsNone(self.change_vf_context("2", session_info_vf)) - data = zone_constant.CHANGE_VF.format(vfid="2") - headers = {zone_constant.AUTH_HEADER: self.auth_header} - connect_mock.assert_called_once_with( - zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, - data, headers) - - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_change_vf_context_vf_not_changed(self, - connect_mock, - get_managable_vf_list_mock): - get_managable_vf_list_mock.return_value = ['2', '128'] - connect_mock.return_value = session_info_vf_not_changed - self.assertRaises(exception.BrocadeZoningHttpException, - self.change_vf_context, "2", session_info_vf) - data = zone_constant.CHANGE_VF.format(vfid="2") - headers = {zone_constant.AUTH_HEADER: self.auth_header} - connect_mock.assert_called_once_with( - zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, - data, headers) - - @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') - def test_change_vf_context_vfid_not_managaed(self, - get_managable_vf_list_mock): - get_managable_vf_list_mock.return_value = ['2', '128'] - self.assertRaises(exception.BrocadeZoningHttpException, - self.change_vf_context, "12", session_info_vf) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_is_supported_firmware(self, connect_mock): - connect_mock.return_value = switch_page_resp - self.assertTrue(self.is_supported_firmware()) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_is_supported_firmware_invalid(self, connect_mock): - connect_mock.return_value = switch_page_invalid_firm - self.assertFalse(self.is_supported_firmware()) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_get_active_zone_set(self, connect_mock): - connect_mock.return_value = zone_info - returned_zone_map = self.get_active_zone_set() - self.assertDictEqual(active_zone_set, returned_zone_map) - - def test_form_zone_string(self): - new_alias = { - 'alia1': u'10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'} - new_qlps = {'qlp': u'10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} - new_ifas = {'fa1': u'20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} - self.assertEqual(type(self.form_zone_string( - cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)), - six.binary_type) - self.assertEqual( - encodeutils.safe_encode(mocked_zone_string), - self.form_zone_string( - cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)) - self.assertEqual( - encodeutils.safe_encode(mocked_zone_string_no_activate), - self.form_zone_string( - cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, False)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_add_zones_activate(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("0", "") - self.cfgs = cfgs.copy() - self.zones = zones.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - add_zones_info = {valid_zone_name: - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - self.add_zones(add_zones_info, True) - post_zone_data_mock.assert_called_once_with( - encodeutils.safe_encode(zone_string_to_post)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_add_zones_invalid_zone_name(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("-1", "Name Too Long") - self.cfgs = cfgs.copy() - self.zones = zones.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - invalid_zone_name = valid_zone_name + "00000000000000000000000000" - add_zones_info = {invalid_zone_name: - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - self.assertRaises( - exception.BrocadeZoningHttpException, - self.add_zones, add_zones_info, False) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_add_zones_no_activate(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("0", "") - self.cfgs = cfgs.copy() - self.zones = zones.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - add_zones_info = {valid_zone_name: - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - self.add_zones(add_zones_info, False) - post_zone_data_mock.assert_called_once_with( - encodeutils.safe_encode(zone_string_to_post_no_activate)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_delete_zones_activate(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("0", "") - self.cfgs = cfgs_to_delete.copy() - self.zones = zones_to_delete.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - delete_zones_info = valid_zone_name - - self.delete_zones(delete_zones_info, True) - post_zone_data_mock.assert_called_once_with( - encodeutils.safe_encode(zone_string_del_to_post)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_delete_zones_no_activate(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("0", "") - self.cfgs = cfgs_to_delete.copy() - self.zones = zones_to_delete.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - delete_zones_info = valid_zone_name - self.delete_zones(delete_zones_info, False) - post_zone_data_mock.assert_called_once_with( - encodeutils.safe_encode(zone_string_del_to_post_no_active)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') - def test_delete_zones_invalid_zone_name(self, post_zone_data_mock): - post_zone_data_mock.return_value = ("0", "") - self.cfgs = cfgs_to_delete.copy() - self.zones = zones_to_delete.copy() - self.alias = alias.copy() - self.qlps = qlps.copy() - self.ifas = ifas.copy() - self.active_cfg = active_cfg - delete_zones_info = 'openstack50060b0000c26604201900051ee8e32' - self.assertRaises(exception.BrocadeZoningHttpException, - self.delete_zones, delete_zones_info, False) - - @patch.object(time, 'sleep') - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_post_zone_data(self, connect_mock, sleep_mock): - connect_mock.return_value = zone_post_page - self.assertEqual( - ("-1", "Name too long"), self.post_zone_data(zone_string_to_post)) - connect_mock.return_value = zone_post_page_no_error - self.assertEqual(("0", ""), self.post_zone_data(zone_string_to_post)) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_get_nameserver_info(self, connect_mock): - connect_mock.return_value = nameserver_info - self.assertEqual(ns_info, self.get_nameserver_info()) - - @patch.object(client.BrcdHTTPFCZoneClient, 'get_session_info') - def test_is_vf_enabled(self, get_session_info_mock): - get_session_info_mock.return_value = session_info_vf - self.assertEqual((True, parsed_session_info_vf), self.is_vf_enabled()) - - def test_delete_zones_cfgs(self): - - cfgs = {'openstack_cfg': 'zone1;zone2'} - zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} - delete_zones_info = valid_zone_name - self.assertEqual( - (zones, cfgs, active_cfg), - self.delete_zones_cfgs( - cfgs_to_delete.copy(), - zones_to_delete.copy(), - delete_zones_info, - active_cfg)) - - cfgs = {'openstack_cfg': 'zone2'} - zones = {'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} - delete_zones_info = valid_zone_name + ";zone1" - self.assertEqual( - (zones, cfgs, active_cfg), - self.delete_zones_cfgs( - cfgs_to_delete.copy(), - zones_to_delete.copy(), - delete_zones_info, - active_cfg)) - - def test_add_zones_cfgs(self): - add_zones_info = {valid_zone_name: - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - updated_cfgs = { - 'openstack_cfg': - valid_zone_name + ';zone1;zone2'} - updated_zones = { - 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', - valid_zone_name: - '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} - self.assertEqual((updated_zones, updated_cfgs, active_cfg), - self.add_zones_cfgs( - cfgs.copy(), - zones.copy(), - add_zones_info, - active_cfg, - "openstack_cfg")) - - add_zones_info = {valid_zone_name: - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'], - 'test4': - ['20:06:0b:00:00:b2:66:07', - '20:10:00:05:1e:b8:c3:19'] - } - updated_cfgs = { - 'openstack_cfg': - 'test4;openstack50060b0000c26604201900051ee8e329;zone1;zone2'} - updated_zones = { - 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', - valid_zone_name: - '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29', - 'test4': '20:06:0b:00:00:b2:66:07;20:10:00:05:1e:b8:c3:19'} - - result = self.add_zones_cfgs(cfgs.copy(), zones.copy(), add_zones_info, - active_cfg, "openstack_cfg") - self.assertEqual(updated_zones, result[0]) - self.assertEqual(active_cfg, result[2]) - - result_cfg = result[1]['openstack_cfg'] - self.assertIn('test4', result_cfg) - self.assertIn('openstack50060b0000c26604201900051ee8e329', result_cfg) - self.assertIn('zone1', result_cfg) - self.assertIn('zone2', result_cfg) - - @patch.object(client.BrcdHTTPFCZoneClient, 'connect') - def test_get_zone_info(self, connect_mock): - connect_mock.return_value = zone_info - self.get_zone_info() - self.assertEqual({'openstack_cfg': 'zone1;zone2'}, self.cfgs) - self.assertEqual( - {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', - 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}, - self.zones) - self.assertEqual('openstack_cfg', self.active_cfg) - self.assertEqual( - {'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'}, - self.alias) - self.assertEqual( - {'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, - self.ifas) - self.assertEqual( - {'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, - self.qlps) diff --git a/cinder/tests/unit/zonemanager/test_brcd_lookup_service.py b/cinder/tests/unit/zonemanager/test_brcd_lookup_service.py deleted file mode 100644 index c73c95f64..000000000 --- a/cinder/tests/unit/zonemanager/test_brcd_lookup_service.py +++ /dev/null @@ -1,95 +0,0 @@ -# (c) Copyright 2013 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Unit tests for fc san lookup service.""" - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.zonemanager import fc_san_lookup_service as san_service - -_target_ns_map = {'100000051e55a100': ['20240002ac000a50']} -_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} -_device_map_to_verify = { - '100000051e55a100': { - 'initiator_port_wwn_list': [ - '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} -_fabric_wwn = '100000051e55a100' - - -class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): - - def setUp(self): - super(TestFCSanLookupService, self).setUp() - self.configuration = self.setup_config() - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - def setup_config(self): - configuration = conf.Configuration(None) - # fill up config - configuration.fc_san_lookup_service = ( - 'cinder.tests.unit.zonemanager.test_brcd_lookup_service.' - 'FakeBrcdFCSanLookupService') - return configuration - - def test_get_device_mapping_from_network(self): - GlobalParams._is_normal_test = True - initiator_list = ['10008c7cff523b01'] - target_list = ['20240002ac000a50', '20240002ac000a40'] - device_map = self.get_device_mapping_from_network( - initiator_list, target_list) - self.assertDictEqual(_device_map_to_verify, device_map) - - def test_get_device_mapping_from_network_for_invalid_config(self): - GlobalParams._is_normal_test = False - initiator_list = ['10008c7cff523b01'] - target_list = ['20240002ac000a50', '20240002ac000a40'] - self.assertRaises(exception.FCSanLookupServiceException, - self.get_device_mapping_from_network, - initiator_list, target_list) - - -class FakeBrcdFCSanLookupService(object): - - def __init__(self, **kwargs): - pass - - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - if not GlobalParams._is_normal_test: - raise exception.FCSanLookupServiceException("Error") - device_map = {} - initiators = [] - targets = [] - for i in initiator_wwn_list: - if (i in _initiator_ns_map[_fabric_wwn]): - initiators.append(i) - for t in target_wwn_list: - if (t in _target_ns_map[_fabric_wwn]): - targets.append(t) - device_map[_fabric_wwn] = { - 'initiator_port_wwn_list': initiators, - 'target_port_wwn_list': targets} - return device_map - - -class GlobalParams(object): - global _is_normal_test - _is_normal_test = True diff --git a/cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py b/cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py deleted file mode 100644 index b87446ef5..000000000 --- a/cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py +++ /dev/null @@ -1,149 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Cisco fc san lookup service.""" - -import mock -from oslo_config import cfg - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -import cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service \ - as cisco_lookup -import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant -from cinder.zonemanager import utils as zm_utils - -nsshow = '20:1a:00:05:1e:e8:e3:29' -switch_data = ['VSAN 304\n', - '------------------------------------------------------\n', - 'FCID TYPE PWWN (VENDOR) \n', - '------------------------------------------------------\n', - '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', - '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', - '0x030200 N 10:00:00:49:c9:28:c7:01\n'] - -nsshow_data = ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'] - -_device_map_to_verify = { - '304': { - 'initiator_port_wwn_list': ['10008c7cff523b01'], - 'target_port_wwn_list': ['20240002ac000a50']}} - - -class TestCiscoFCSanLookupService(cisco_lookup.CiscoFCSanLookupService, - test.TestCase): - - def setUp(self): - super(TestCiscoFCSanLookupService, self).setUp() - self.configuration = conf.Configuration(None) - self.configuration.set_default('fc_fabric_names', 'CISCO_FAB_2', - 'fc-zone-manager') - self.configuration.fc_fabric_names = 'CISCO_FAB_2' - self.create_configuration() - self.fabric_vsan = '304' - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - def create_configuration(self): - fc_fabric_opts = [] - fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_address', - default='172.24.173.142', help='')) - fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_user', - default='admin', help='')) - fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_password', - default='admin1234', help='', - secret=True)) - fc_fabric_opts.append(cfg.PortOpt('cisco_fc_fabric_port', - default=22, help='')) - fc_fabric_opts.append(cfg.StrOpt('cisco_zoning_vsan', - default='304', help='')) - config = conf.Configuration(fc_fabric_opts, 'CISCO_FAB_2') - self.fabric_configs = {'CISCO_FAB_2': config} - - @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, - 'get_nameserver_info') - def test_get_device_mapping_from_network(self, get_nameserver_info_mock): - initiator_list = ['10008c7cff523b01'] - target_list = ['20240002ac000a50', '20240002ac000a40'] - get_nameserver_info_mock.return_value = (nsshow_data) - device_map = self.get_device_mapping_from_network( - initiator_list, target_list) - self.assertDictEqual(_device_map_to_verify, device_map) - - @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, - '_get_switch_info') - def test_get_nameserver_info(self, get_switch_data_mock): - ns_info_list = [] - ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29', - '10:00:00:49:c9:28:c7:01'] - get_switch_data_mock.return_value = (switch_data) - ns_info_list = self.get_nameserver_info('304') - self.assertEqual(ns_info_list_expected, ns_info_list) - - def test_parse_ns_output(self): - invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] - return_wwn_list = [] - expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29', - '10:00:00:49:c9:28:c7:01'] - return_wwn_list = self._parse_ns_output(switch_data) - self.assertEqual(expected_wwn_list, return_wwn_list) - self.assertRaises(exception.InvalidParameterValue, - self._parse_ns_output, invalid_switch_data) - - def test_get_formatted_wwn(self): - wwn_list = ['10008c7cff523b01'] - return_wwn_list = [] - expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] - return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0])) - self.assertEqual(expected_wwn_list, return_wwn_list) - - @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, - '_run_ssh') - def test__get_switch_info(self, run_ssh_mock): - cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan, - ' | no-more'] - nsshow_list = [nsshow] - run_ssh_mock.return_value = (Stream(nsshow), Stream()) - switch_data = self._get_switch_info(cmd_list) - self.assertEqual(nsshow_list, switch_data) - run_ssh_mock.assert_called_once_with(cmd_list, True, 1) - - -class Channel(object): - def recv_exit_status(self): - return 0 - - -class Stream(object): - def __init__(self, buffer=''): - self.buffer = buffer - self.channel = Channel() - - def readlines(self): - return self.buffer - - def splitlines(self): - return self.buffer.splitlines() - - def close(self): - pass - - def flush(self): - self.buffer = '' diff --git a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py b/cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py deleted file mode 100644 index de0137c2d..000000000 --- a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py +++ /dev/null @@ -1,331 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Cisco fc zone client cli.""" - -import time - -import mock -from oslo_concurrency import processutils -from six.moves import range - -from cinder import exception -from cinder import test -from cinder.zonemanager.drivers.cisco \ - import cisco_fc_zone_client_cli as cli -import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant - -nsshow = '20:1a:00:05:1e:e8:e3:29' -switch_data = ['VSAN 303\n', - '----------------------------------------------------------\n', - 'FCID TYPE PWWN (VENDOR) FC4-TYPE:FEATURE\n', - '----------------------------------------------------------\n', - '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', - '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', - '0x030200 NL 10:00:00:49:c9:28:c7:01\n'] - -cfgactv = ['zoneset name OpenStack_Cfg vsan 303\n', - 'zone name openstack50060b0000c26604201900051ee8e329 vsan 303\n', - 'pwwn 50:06:0b:00:00:c2:66:04\n', - 'pwwn 20:19:00:05:1e:e8:e3:29\n'] - -active_zoneset = { - 'zones': { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, - 'active_zone_config': 'OpenStack_Cfg'} - -zoning_status_data_basic = [ - 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', - ' mode: basic merge-control: allow\n', - ' session: none\n', - ' hard-zoning: enabled broadcast: unsupported\n', - ' smart-zoning: disabled\n', - ' rscn-format: fabric-address\n', - 'Default zone:\n', - ' qos: none broadcast: unsupported ronly: unsupported\n', - 'Full Zoning Database :\n', - ' DB size: 220 bytes\n', - ' Zonesets:2 Zones:2 Aliases: 0\n', - 'Active Zoning Database :\n', - ' DB size: 80 bytes\n', - ' Name: test-zs-test Zonesets:1 Zones:1\n', - 'Status:\n'] - -zoning_status_basic = {'mode': 'basic', 'session': 'none'} - -zoning_status_data_enhanced_nosess = [ - 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', - ' mode: enhanced merge-control: allow\n', - ' session: none\n', - ' hard-zoning: enabled broadcast: unsupported\n', - ' smart-zoning: disabled\n', - ' rscn-format: fabric-address\n', - 'Default zone:\n', - ' qos: none broadcast: unsupported ronly: unsupported\n', - 'Full Zoning Database :\n', - ' DB size: 220 bytes\n', - ' Zonesets:2 Zones:2 Aliases: 0\n', - 'Active Zoning Database :\n', - ' DB size: 80 bytes\n', - ' Name: test-zs-test Zonesets:1 Zones:1\n', - 'Status:\n'] - -zoning_status_enhanced_nosess = {'mode': 'enhanced', 'session': 'none'} - -zoning_status_data_enhanced_sess = [ - 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', - ' mode: enhanced merge-control: allow\n', - ' session: otherthannone\n', - ' hard-zoning: enabled broadcast: unsupported\n', - ' smart-zoning: disabled\n', - ' rscn-format: fabric-address\n', - 'Default zone:\n', - ' qos: none broadcast: unsupported ronly: unsupported\n', - 'Full Zoning Database :\n', - ' DB size: 220 bytes\n', - ' Zonesets:2 Zones:2 Aliases: 0\n', - 'Active Zoning Database :\n', - ' DB size: 80 bytes\n', - ' Name: test-zs-test Zonesets:1 Zones:1\n', - 'Status:\n'] - -zoning_status_enhanced_sess = {'mode': 'enhanced', 'session': 'otherthannone'} - -active_zoneset_multiple_zones = { - 'zones': { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], - 'openstack10000012345678902001009876543210': - ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, - 'active_zone_config': 'OpenStack_Cfg'} - -new_zone = {'openstack10000012345678902001009876543210': - ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} - -new_zones = {'openstack10000012345678902001009876543210': - ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], - 'openstack10000011111111112001001111111111': - ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} - -zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' - - -class TestCiscoFCZoneClientCLI(cli.CiscoFCZoneClientCLI, test.TestCase): - - def setUp(self): - super(TestCiscoFCZoneClientCLI, self).setUp() - self.fabric_vsan = '303' - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') - def test_get_active_zone_set(self, get_switch_info_mock): - cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, - ' | no-more'] - get_switch_info_mock.return_value = cfgactv - active_zoneset_returned = self.get_active_zone_set() - get_switch_info_mock.assert_called_once_with(cmd_list) - self.assertDictEqual(active_zoneset, active_zoneset_returned) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - def test_get_active_zone_set_ssh_error(self, run_ssh_mock): - run_ssh_mock.side_effect = processutils.ProcessExecutionError - self.assertRaises(exception.CiscoZoningCliException, - self.get_active_zone_set) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') - def test_get_zoning_status_basic(self, get_zoning_status_mock): - cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] - get_zoning_status_mock.return_value = zoning_status_data_basic - zoning_status_returned = self.get_zoning_status() - get_zoning_status_mock.assert_called_once_with(cmd_list) - self.assertDictEqual(zoning_status_basic, zoning_status_returned) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') - def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock): - cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] - get_zoning_status_mock.return_value =\ - zoning_status_data_enhanced_nosess - zoning_status_returned = self.get_zoning_status() - get_zoning_status_mock.assert_called_once_with(cmd_list) - self.assertDictEqual(zoning_status_enhanced_nosess, - zoning_status_returned) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') - def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock): - cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] - get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess - zoning_status_returned = self.get_zoning_status() - get_zoning_status_mock.assert_called_once_with(cmd_list) - self.assertDictEqual(zoning_status_enhanced_sess, - zoning_status_returned) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') - def test_get_nameserver_info(self, get_switch_info_mock): - ns_info_list = [] - ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] - get_switch_info_mock.return_value = (switch_data) - ns_info_list = self.get_nameserver_info() - self.assertEqual(ns_info_list_expected, ns_info_list) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - def test_get_nameserver_info_ssh_error(self, run_ssh_mock): - run_ssh_mock.side_effect = processutils.ProcessExecutionError - self.assertRaises(exception.CiscoZoningCliException, - self.get_nameserver_info) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - def test__cfg_save(self, run_ssh_mock): - cmd_list = ['copy', 'running-config', 'startup-config'] - self._cfg_save() - run_ssh_mock.assert_called_once_with(cmd_list, True) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - @mock.patch.object(time, 'sleep') - def test__cfg_save_with_retry(self, mock_sleep, run_ssh_mock): - cmd_list = ['copy', 'running-config', 'startup-config'] - run_ssh_mock.side_effect = [ - processutils.ProcessExecutionError, - ('', None) - ] - - self._cfg_save() - - self.assertEqual(2, run_ssh_mock.call_count) - run_ssh_mock.assert_has_calls([ - mock.call(cmd_list, True), - mock.call(cmd_list, True) - ]) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - @mock.patch.object(time, 'sleep') - def test__cfg_save_with_error(self, mock_sleep, run_ssh_mock): - cmd_list = ['copy', 'running-config', 'startup-config'] - run_ssh_mock.side_effect = processutils.ProcessExecutionError - - self.assertRaises(processutils.ProcessExecutionError, self._cfg_save) - - expected_num_calls = 5 - expected_calls = [] - for i in range(expected_num_calls): - expected_calls.append(mock.call(cmd_list, True)) - - self.assertEqual(expected_num_calls, run_ssh_mock.call_count) - run_ssh_mock.assert_has_calls(expected_calls) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') - def test__get_switch_info(self, run_ssh_mock): - cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan] - nsshow_list = [nsshow] - run_ssh_mock.return_value = (Stream(nsshow), Stream()) - switch_data = self._get_switch_info(cmd_list) - self.assertEqual(nsshow_list, switch_data) - run_ssh_mock.assert_called_once_with(cmd_list, True) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_ssh_execute') - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_cfg_save') - def test__update_zones_add(self, cfg_save_mock, ssh_execute_mock): - self.update_zones(new_zone, False, self.fabric_vsan, - ZoneConstant.ZONE_ADD, active_zoneset_multiple_zones, - zoning_status_basic) - ssh_cmd = [['conf'], - ['zoneset', 'name', 'OpenStack_Cfg', 'vsan', - self.fabric_vsan], - ['zone', 'name', - 'openstack10000012345678902001009876543210'], - ['member', 'pwwn', '10:00:00:12:34:56:78:90'], - ['member', 'pwwn', '20:01:00:98:76:54:32:10'], - ['end']] - - self.assertEqual(1, cfg_save_mock.call_count) - ssh_execute_mock.assert_called_once_with(ssh_cmd, True, 1) - - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_ssh_execute') - @mock.patch.object(cli.CiscoFCZoneClientCLI, '_cfg_save') - def test__update_zones_remove(self, cfg_save_mock, ssh_execute_mock): - self.update_zones(new_zone, False, self.fabric_vsan, - ZoneConstant.ZONE_REMOVE, - active_zoneset_multiple_zones, - zoning_status_basic) - ssh_cmd = [['conf'], - ['zoneset', 'name', 'OpenStack_Cfg', 'vsan', - self.fabric_vsan], - ['zone', 'name', - 'openstack10000012345678902001009876543210'], - ['no', 'member', 'pwwn', '10:00:00:12:34:56:78:90'], - ['no', 'member', 'pwwn', '20:01:00:98:76:54:32:10'], - ['end']] - - self.assertEqual(1, cfg_save_mock.call_count) - ssh_execute_mock.assert_called_once_with(ssh_cmd, True, 1) - - def test__parse_ns_output(self): - return_wwn_list = [] - expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] - return_wwn_list = self._parse_ns_output(switch_data) - self.assertEqual(expected_wwn_list, return_wwn_list) - - -class TestCiscoFCZoneClientCLISSH(test.TestCase): - - def setUp(self): - super(TestCiscoFCZoneClientCLISSH, self).setUp() - self.client = cli.CiscoFCZoneClientCLI(None, None, None, None, None) - self.client.sshpool = mock.MagicMock() - self.mock_ssh = self.client.sshpool.item().__enter__() - - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test__run_ssh(self, mock_execute): - mock_execute.return_value = 'ssh output' - ret = self.client._run_ssh(['cat', 'foo']) - self.assertEqual('ssh output', ret) - mock_execute.assert_called_once_with(self.mock_ssh, - 'cat foo', - check_exit_code=True) - - @mock.patch('oslo_concurrency.processutils.ssh_execute') - def test__run_ssh_with_error(self, mock_execute): - mock_execute.side_effect = processutils.ProcessExecutionError() - self.assertRaises(processutils.ProcessExecutionError, - self.client._run_ssh, - ['cat', 'foo']) - - -class Channel(object): - def recv_exit_status(self): - return 0 - - -class Stream(object): - def __init__(self, buffer=''): - self.buffer = buffer - self.channel = Channel() - - def readlines(self): - return self.buffer - - def splitlines(self): - return self.buffer.splitlines() - - def close(self): - pass - - def flush(self): - self.buffer = '' diff --git a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py b/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py deleted file mode 100644 index 5e458a062..000000000 --- a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py +++ /dev/null @@ -1,245 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Cisco FC zone driver.""" - -import mock -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import importutils - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as driver - -_active_cfg_before_add = {} -_active_cfg_before_delete = { - 'zones': { - 'openstack10008c7cff523b0120240002ac000a50': ( - ['10:00:8c:7c:ff:52:3b:01', - '20:24:00:02:ac:00:0a:50'])}, - 'active_zone_config': 'cfg1'} -_active_cfg_default = { - 'zones': { - 'openstack10008c7cff523b0120240002ac000b90': ( - ['10:00:8c:7c:ff:52:3b:01', - '20:24:00:02:ac:00:0a:50'])}, - 'active_zone_config': 'cfg1'} -_activate = True -_zone_name = 'openstack10008c7cff523b0120240002ac000a50' -_target_ns_map = {'100000051e55a100': ['20240002ac000a50']} -_zoning_status = {'mode': 'basis', 'session': 'none'} -_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} -_zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': ( - ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])} - -_initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} -_device_map_to_verify = { - '304': { - 'initiator_port_wwn_list': [ - '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} -_fabric_wwn = '304' - - -class CiscoFcZoneDriverBaseTest(object): - - def setup_config(self, is_normal, mode): - fc_test_opts = [ - cfg.StrOpt('fc_fabric_address_CISCO_FAB_1', default='10.24.48.213', - help='FC Fabric names'), - ] - configuration = conf.Configuration(fc_test_opts) - # fill up config - configuration.zoning_mode = 'fabric' - configuration.zone_driver = ('cinder.tests.unit.zonemanager.' - 'test_cisco_fc_zone_driver.' - 'FakeCiscoFCZoneDriver') - configuration.cisco_sb_connector = ('cinder.tests.unit.zonemanager.' - 'test_cisco_fc_zone_driver' - '.FakeCiscoFCZoneClientCLI') - configuration.zoning_policy = 'initiator-target' - configuration.zone_activate = True - configuration.zone_name_prefix = 'openstack' - configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.' - 'test_cisco_fc_zone_driver.' - 'FakeCiscoFCSanLookupService') - - configuration.fc_fabric_names = 'CISCO_FAB_1' - configuration.fc_fabric_address_CISCO_FAB_1 = '172.21.60.220' - if (is_normal): - configuration.fc_fabric_user_CISCO_FAB_1 = 'admin' - else: - configuration.fc_fabric_user_CISCO_FAB_1 = 'invaliduser' - configuration.fc_fabric_password_CISCO_FAB_1 = 'admin1234' - - if (mode == 1): - configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' - elif (mode == 2): - configuration.zoning_policy_CISCO_FAB_1 = 'initiator' - else: - configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' - configuration.zone_activate_CISCO_FAB_1 = True - configuration.zone_name_prefix_CISCO_FAB_1 = 'openstack' - configuration.zoning_vsan_CISCO_FAB_1 = '304' - return configuration - - -class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase): - - def setUp(self): - super(TestCiscoFcZoneDriver, self).setUp() - # setup config for normal flow - self.setup_driver(self.setup_config(True, 1)) - GlobalVars._zone_state = [] - - def setup_driver(self, config): - self.driver = importutils.import_object( - 'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver' - '.CiscoFCZoneDriver', configuration=config) - - def fake_get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, - zoning_vsan): - return GlobalVars._active_cfg - - def fake_get_san_context(self, target_wwn_list): - fabric_map = {} - return fabric_map - - def test_delete_connection(self): - GlobalVars._is_normal_test = True - GlobalVars._active_cfg = _active_cfg_before_delete - self.driver.delete_connection( - 'CISCO_FAB_1', _initiator_target_map) - self.assertNotIn(_zone_name, GlobalVars._zone_state) - - def test_delete_connection_for_initiator_mode(self): - GlobalVars._is_normal_test = True - GlobalVars._active_cfg = _active_cfg_before_delete - self.setup_driver(self.setup_config(True, 2)) - self.driver.delete_connection( - 'CISCO_FAB_1', _initiator_target_map) - self.assertNotIn(_zone_name, GlobalVars._zone_state) - - @mock.patch.object(driver.CiscoFCZoneDriver, 'get_zoning_status') - @mock.patch.object(driver.CiscoFCZoneDriver, 'get_active_zone_set') - def test_add_connection(self, get_active_zone_set_mock, - get_zoning_status_mock): - """Test normal flows.""" - GlobalVars._is_normal_test = True - GlobalVars._zone_state = [] - self.setup_driver(self.setup_config(True, 1)) - get_zoning_status_mock.return_value = {'mode': 'basis', - 'session': 'none'} - get_active_zone_set_mock.return_value = _active_cfg_default - self.driver.add_connection('CISCO_FAB_1', _initiator_target_map) - self.assertTrue(_zone_name in GlobalVars._zone_state) - - @mock.patch.object(driver.CiscoFCZoneDriver, 'get_zoning_status') - @mock.patch.object(driver.CiscoFCZoneDriver, 'get_active_zone_set') - def test_add_connection_with_no_cfg(self, get_active_zone_set_mock, - get_zoning_status_mock): - """Test normal flows.""" - GlobalVars._is_normal_test = True - GlobalVars._zone_state = [] - self.setup_driver(self.setup_config(True, 1)) - get_zoning_status_mock.return_value = {'mode': 'basis', - 'session': 'none'} - get_active_zone_set_mock.return_value = {} - self.driver.add_connection('CISCO_FAB_1', _initiator_target_map) - self.assertTrue(_zone_name in GlobalVars._zone_state) - - def test_add_connection_for_invalid_fabric(self): - """Test abnormal flows.""" - GlobalVars._is_normal_test = True - GlobalVars._active_cfg = _active_cfg_before_add - GlobalVars._is_normal_test = False - self.setup_driver(self.setup_config(False, 1)) - self.assertRaises(exception.FCZoneDriverException, - self.driver.add_connection, - 'CISCO_FAB_1', - _initiator_target_map) - - def test_delete_connection_for_invalid_fabric(self): - GlobalVars._active_cfg = _active_cfg_before_delete - GlobalVars._is_normal_test = False - self.setup_driver(self.setup_config(False, 1)) - self.assertRaises(exception.FCZoneDriverException, - self.driver.delete_connection, - 'CISCO_FAB_1', - _initiator_target_map) - - -class FakeCiscoFCZoneClientCLI(object): - def __init__(self, ipaddress, username, password, port, vsan): - if not GlobalVars._is_normal_test: - raise processutils.ProcessExecutionError( - "Unable to connect to fabric") - - def get_active_zone_set(self): - return GlobalVars._active_cfg - - def add_zones(self, zones, activate, fabric_vsan, active_zone_set, - zone_status): - GlobalVars._zone_state.extend(zones.keys()) - - def delete_zones(self, zone_names, isActivate): - zone_list = zone_names.split(';') - GlobalVars._zone_state = [ - x for x in GlobalVars._zone_state if x not in zone_list] - - def get_nameserver_info(self): - return _target_ns_map - - def get_zoning_status(self): - return _zoning_status - - def close_connection(self): - pass - - def cleanup(self): - pass - - -class FakeCiscoFCSanLookupService(object): - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - device_map = {} - initiators = [] - targets = [] - for i in initiator_wwn_list: - if (i in _initiator_ns_map[_fabric_wwn]): - initiators.append(i) - for t in target_wwn_list: - if (t in _target_ns_map[_fabric_wwn]): - targets.append(t) - device_map[_fabric_wwn] = { - 'initiator_port_wwn_list': initiators, - 'target_port_wwn_list': targets} - return device_map - - -class GlobalVars(object): - global _active_cfg - _active_cfg = {} - global _zone_state - _zone_state = list() - global _is_normal_test - _is_normal_test = True - global _zoning_status - _zoning_status = {} diff --git a/cinder/tests/unit/zonemanager/test_cisco_lookup_service.py b/cinder/tests/unit/zonemanager/test_cisco_lookup_service.py deleted file mode 100644 index 3e297c64e..000000000 --- a/cinder/tests/unit/zonemanager/test_cisco_lookup_service.py +++ /dev/null @@ -1,96 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Cisco FC san lookup service.""" - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.zonemanager import fc_san_lookup_service as san_service - -_target_ns_map = {'100000051e55a100': ['20240002ac000a50']} -_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} -_device_map_to_verify = { - '100000051e55a100': { - 'initiator_port_wwn_list': [ - '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} -_fabric_wwn = '100000051e55a100' - - -class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): - - def setUp(self): - super(TestFCSanLookupService, self).setUp() - self.configuration = self.setup_config() - - # override some of the functions - def __init__(self, *args, **kwargs): - test.TestCase.__init__(self, *args, **kwargs) - - def setup_config(self): - configuration = conf.Configuration(None) - # fill up config - configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager' - '.test_cisco_lookup_service' - '.FakeCiscoFCSanLookupService') - return configuration - - def test_get_device_mapping_from_network(self): - GlobalParams._is_normal_test = True - initiator_list = ['10008c7cff523b01'] - target_list = ['20240002ac000a50', '20240002ac000a40'] - device_map = self.get_device_mapping_from_network( - initiator_list, target_list) - self.assertDictEqual(_device_map_to_verify, device_map) - - def test_get_device_mapping_from_network_for_invalid_config(self): - GlobalParams._is_normal_test = False - initiator_list = ['10008c7cff523b01'] - target_list = ['20240002ac000a50', '20240002ac000a40'] - self.assertRaises(exception.FCSanLookupServiceException, - self.get_device_mapping_from_network, - initiator_list, target_list) - - -class FakeCiscoFCSanLookupService(object): - - def __init__(self, **kwargs): - pass - - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - if not GlobalParams._is_normal_test: - raise exception.FCSanLookupServiceException("Error") - device_map = {} - initiators = [] - targets = [] - for i in initiator_wwn_list: - if (i in _initiator_ns_map[_fabric_wwn]): - initiators.append(i) - for t in target_wwn_list: - if (t in _target_ns_map[_fabric_wwn]): - targets.append(t) - device_map[_fabric_wwn] = { - 'initiator_port_wwn_list': initiators, - 'target_port_wwn_list': targets} - return device_map - - -class GlobalParams(object): - global _is_normal_test - _is_normal_test = True diff --git a/cinder/tests/unit/zonemanager/test_driverutils.py b/cinder/tests/unit/zonemanager/test_driverutils.py deleted file mode 100644 index bb5d34dcf..000000000 --- a/cinder/tests/unit/zonemanager/test_driverutils.py +++ /dev/null @@ -1,113 +0,0 @@ -# (c) Copyright 2015 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for friendly zone name.""" -import ddt -import string - -from cinder import test -from cinder.zonemanager.drivers import driver_utils - -TEST_CHAR_SET = string.ascii_letters + string.digits - - -@ddt.ddt -class TestDriverUtils(test.TestCase): - - @ddt.data('OSHost10010008c7cff523b01AMCEArray20240002ac000a50') - def test_get_friendly_zone_name_valid_hostname_storagesystem(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", "OS_Host100", 'AMCE' - '_Array', "openstack", TEST_CHAR_SET)) - - @ddt.data('openstack10008c7cff523b0120240002ac000a50') - def test_get_friendly_zone_name_hostname_storagesystem_none(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", None, None, - "openstack", TEST_CHAR_SET)) - - @ddt.data('openstack10008c7cff523b0120240002ac000a50') - def test_get_friendly_zone_name_storagesystem_none(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", "OS_Host100", None, - "openstack", TEST_CHAR_SET)) - - @ddt.data('openstack10008c7cff523b0120240002ac000a50') - def test_get_friendly_zone_name_hostname_none(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", - "openstack", TEST_CHAR_SET)) - - @ddt.data('OSHost10010008c7cff523b01') - def test_get_friendly_zone_name_initiator_mode(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator', "10:00:8c:7c:ff:52:3b:01", None, - "OS_Host100", None, "openstack", TEST_CHAR_SET)) - - @ddt.data('openstack10008c7cff523b01') - def test_get_friendly_zone_name_initiator_mode_hostname_none(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator', "10:00:8c:7c:ff:52:3b:01", None, - None, None, "openstack", TEST_CHAR_SET)) - - @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') - def test_get_friendly_zone_name_storagename_length_too_long(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", - "OS_Host100XXXXXXXXXX", - "AMCE_ArrayYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY" - "YYYY", "openstack", TEST_CHAR_SET)) - - @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') - def test_get_friendly_zone_name_max_length(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", - "OS_Host100XXXXXXXXXX", - "AMCE_ArrayYYYYYYYYYY", - "openstack", TEST_CHAR_SET)) - - @ddt.data('OSHost100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX10008c7cff523b01') - def test_get_friendly_zone_name_initiator_mode_hostname_max_length(self, - value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator', "10:00:8c:7c:ff:52:3b:01", None, - 'OS_Host100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' - 'XXXXX', - None, "openstack", TEST_CHAR_SET)) - - @ddt.data('openstack110008c7cff523b0120240002ac000a50') - def test_get_friendly_zone_name_invalid_characters(self, value): - self.assertEqual(value, - driver_utils.get_friendly_zone_name( - 'initiator-target', "10:00:8c:7c:ff:52:3b:01", - "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", - "open-stack*1_", TEST_CHAR_SET)) diff --git a/cinder/tests/unit/zonemanager/test_fc_zone_manager.py b/cinder/tests/unit/zonemanager/test_fc_zone_manager.py deleted file mode 100644 index 82eb8e5cc..000000000 --- a/cinder/tests/unit/zonemanager/test_fc_zone_manager.py +++ /dev/null @@ -1,151 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for FC Zone Manager.""" - -import mock - -from cinder import exception -from cinder import test -from cinder.volume import configuration as conf -from cinder.zonemanager.drivers import fc_zone_driver -from cinder.zonemanager import fc_zone_manager - -fabric_name = 'BRCD_FAB_3' -init_target_map = {'10008c7cff523b01': ['20240002ac000a50']} -conn_info = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '20240002ac000a50', - 'initiator_target_map': { - '10008c7cff523b01': ['20240002ac000a50'] - } - } -} -fabric_map = {'BRCD_FAB_3': ['20240002ac000a50']} -target_list = ['20240002ac000a50'] - - -class TestFCZoneManager(test.TestCase): - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - def setUp(self, opt_mock): - super(TestFCZoneManager, self).setUp() - - def __init__(self, *args, **kwargs): - super(TestFCZoneManager, self).__init__(*args, **kwargs) - - def setup_fake_driver(self): - config = conf.Configuration(None) - config.fc_fabric_names = fabric_name - - def fake_build_driver(self): - self.driver = mock.Mock(fc_zone_driver.FCZoneDriver) - self.set_initialized(True) - - self.mock_object(fc_zone_manager.ZoneManager, '_build_driver', - fake_build_driver) - - self.zm = fc_zone_manager.ZoneManager(configuration=config) - self.configuration = conf.Configuration(None) - self.configuration.fc_fabric_names = fabric_name - - def test_unsupported_driver_disabled(self): - config = conf.Configuration(fc_zone_manager.zone_manager_opts, - 'fc-zone-manager') - config.fc_fabric_names = fabric_name - config.enable_unsupported_driver = False - - def fake_import(self, *args, **kwargs): - fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) - fake_driver.supported = False - return fake_driver - - self.patch('oslo_utils.importutils.import_object', - fake_import) - - zm = fc_zone_manager.ZoneManager(configuration=config) - self.assertFalse(zm.driver.supported) - self.assertFalse(zm.initialized) - - def test_unsupported_driver_enabled(self): - config = conf.Configuration(None) - config.fc_fabric_names = fabric_name - - def fake_import(self, *args, **kwargs): - fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) - fake_driver.supported = False - return fake_driver - - self.patch('oslo_utils.importutils.import_object', - fake_import) - - with mock.patch( - 'cinder.volume.configuration.Configuration') as mock_config: - mock_config.return_value.zone_driver = 'test' - mock_config.return_value.enable_unsupported_driver = True - zm = fc_zone_manager.ZoneManager(configuration=config) - self.assertFalse(zm.driver.supported) - self.assertTrue(zm.initialized) - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - def test_add_connection(self, opt_mock): - self.setup_fake_driver() - with mock.patch.object(self.zm.driver, 'add_connection')\ - as add_connection_mock: - self.zm.driver.get_san_context.return_value = fabric_map - self.zm.add_connection(conn_info) - self.zm.driver.get_san_context.assert_called_once_with(target_list) - add_connection_mock.assert_called_once_with(fabric_name, - init_target_map, - None, - None) - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - def test_add_connection_error(self, opt_mock): - self.setup_fake_driver() - with mock.patch.object(self.zm.driver, 'add_connection')\ - as add_connection_mock: - add_connection_mock.side_effect = exception.FCZoneDriverException - self.assertRaises(exception.ZoneManagerException, - self.zm.add_connection, conn_info) - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - def test_delete_connection(self, opt_mock): - self.setup_fake_driver() - with mock.patch.object(self.zm.driver, 'delete_connection')\ - as delete_connection_mock: - self.zm.driver.get_san_context.return_value = fabric_map - self.zm.delete_connection(conn_info) - self.zm.driver.get_san_context.assert_called_once_with(target_list) - delete_connection_mock.assert_called_once_with(fabric_name, - init_target_map, - None, - None) - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - def test_delete_connection_error(self, opt_mock): - self.setup_fake_driver() - with mock.patch.object(self.zm.driver, 'delete_connection')\ - as del_connection_mock: - del_connection_mock.side_effect = exception.FCZoneDriverException - self.assertRaises(exception.ZoneManagerException, - self.zm.delete_connection, conn_info) diff --git a/cinder/tests/unit/zonemanager/test_volume_driver.py b/cinder/tests/unit/zonemanager/test_volume_driver.py deleted file mode 100644 index 34dd1de20..000000000 --- a/cinder/tests/unit/zonemanager/test_volume_driver.py +++ /dev/null @@ -1,90 +0,0 @@ -# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -"""Unit tests for Volume Manager.""" - -import mock - -from cinder import test -from cinder.tests import fake_driver -from cinder import utils -from cinder.volume import configuration as conf -from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver -from cinder.zonemanager import fc_zone_manager - - -class TestVolumeDriver(test.TestCase): - - def setUp(self): - super(TestVolumeDriver, self).setUp() - self.driver = fake_driver.FakeFibreChannelDriver() - brcd_fc_zone_driver.BrcdFCZoneDriver = mock.Mock() - self.addCleanup(self._cleanup) - - def _cleanup(self): - self.driver = None - - def __init__(self, *args, **kwargs): - super(TestVolumeDriver, self).__init__(*args, **kwargs) - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - @mock.patch.object(utils, 'require_driver_initialized') - def test_initialize_connection_with_decorator(self, utils_mock, opt_mock): - utils_mock.return_value = True - with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ - as add_zone_mock: - with mock.patch.object(conf.Configuration, 'safe_get')\ - as mock_safe_get: - mock_safe_get.return_value = 'fabric' - conn_info = self.driver.initialize_connection(None, None) - add_zone_mock.assert_called_once_with(conn_info) - - @mock.patch.object(utils, 'require_driver_initialized') - def test_initialize_connection_no_decorator(self, utils_mock): - utils_mock.return_value = True - with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ - as add_zone_mock: - with mock.patch.object(conf.Configuration, 'safe_get')\ - as mock_safe_get: - mock_safe_get.return_value = 'fabric' - self.driver.no_zone_initialize_connection(None, None) - add_zone_mock.assert_not_called() - - @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) - @mock.patch.object(utils, 'require_driver_initialized') - def test_terminate_connection_with_decorator(self, utils_mock, opt_mock): - utils_mock.return_value = True - with mock.patch.object(fc_zone_manager.ZoneManager, - 'delete_connection') as remove_zone_mock: - with mock.patch.object(conf.Configuration, 'safe_get')\ - as mock_safe_get: - mock_safe_get.return_value = 'fabric' - conn_info = self.driver.terminate_connection(None, None) - remove_zone_mock.assert_called_once_with(conn_info) - - @mock.patch.object(utils, 'require_driver_initialized') - def test_terminate_connection_no_decorator(self, utils_mock): - utils_mock.return_value = True - with mock.patch.object(fc_zone_manager.ZoneManager, - 'delete_connection') as remove_zone_mock: - with mock.patch.object(conf.Configuration, 'safe_get')\ - as mock_safe_get: - mock_safe_get.return_value = 'fabric' - self.driver.no_zone_terminate_connection(None, None) - remove_zone_mock.assert_not_called() diff --git a/cinder/transfer/__init__.py b/cinder/transfer/__init__.py deleted file mode 100644 index 7b4107dad..000000000 --- a/cinder/transfer/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Importing full names to not pollute the namespace and cause possible -# collisions with use of 'from cinder.transfer import ' elsewhere. - - -from oslo_config import cfg -from oslo_utils import importutils - - -CONF = cfg.CONF - -API = importutils.import_class(CONF.transfer_api_class) diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py deleted file mode 100644 index cd0dfa186..000000000 --- a/cinder/transfer/api.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to transferring ownership of volumes. -""" - - -import hashlib -import hmac -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder.db import base -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import quota -from cinder import quota_utils -from cinder.volume import api as volume_api -from cinder.volume import utils as volume_utils - - -volume_transfer_opts = [ - cfg.IntOpt('volume_transfer_salt_length', default=8, - help='The number of characters in the salt.'), - cfg.IntOpt('volume_transfer_key_length', default=16, - help='The number of characters in the ' - 'autogenerated auth key.'), ] - -CONF = cfg.CONF -CONF.register_opts(volume_transfer_opts) - -LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS - - -class API(base.Base): - """API for interacting volume transfers.""" - - def __init__(self, db_driver=None): - self.volume_api = volume_api.API() - super(API, self).__init__(db_driver) - - def get(self, context, transfer_id): - volume_api.check_policy(context, 'get_transfer') - rv = self.db.transfer_get(context, transfer_id) - return dict(rv) - - def delete(self, context, transfer_id): - """Make the RPC call to delete a volume transfer.""" - volume_api.check_policy(context, 'delete_transfer') - transfer = self.db.transfer_get(context, transfer_id) - - volume_ref = self.db.volume_get(context, transfer.volume_id) - volume_utils.notify_about_volume_usage(context, volume_ref, - "transfer.delete.start") - if volume_ref['status'] != 'awaiting-transfer': - LOG.error("Volume in unexpected state") - self.db.transfer_destroy(context, transfer_id) - volume_utils.notify_about_volume_usage(context, volume_ref, - "transfer.delete.end") - - def get_all(self, context, filters=None): - filters = filters or {} - volume_api.check_policy(context, 'get_all_transfers') - if context.is_admin and 'all_tenants' in filters: - transfers = self.db.transfer_get_all(context) - else: - transfers = self.db.transfer_get_all_by_project(context, - context.project_id) - return transfers - - def _get_random_string(self, length): - """Get a random hex string of the specified length.""" - rndstr = "" - - # Note that the string returned by this function must contain only - # characters that the recipient can enter on their keyboard. The - # function ssh224().hexdigit() achieves this by generating a hash - # which will only contain hexadecimal digits. - while len(rndstr) < length: - rndstr += hashlib.sha224(os.urandom(255)).hexdigest() - - return rndstr[0:length] - - def _get_crypt_hash(self, salt, auth_key): - """Generate a random hash based on the salt and the auth key.""" - if not isinstance(salt, (six.binary_type, six.text_type)): - salt = str(salt) - if isinstance(salt, six.text_type): - salt = salt.encode('utf-8') - if not isinstance(auth_key, (six.binary_type, six.text_type)): - auth_key = str(auth_key) - if isinstance(auth_key, six.text_type): - auth_key = auth_key.encode('utf-8') - return hmac.new(salt, auth_key, hashlib.sha1).hexdigest() - - def create(self, context, volume_id, display_name): - """Creates an entry in the transfers table.""" - volume_api.check_policy(context, 'create_transfer') - LOG.info("Generating transfer record for volume %s", volume_id) - volume_ref = self.db.volume_get(context, volume_id) - if volume_ref['status'] != "available": - raise exception.InvalidVolume(reason=_("status must be available")) - - volume_utils.notify_about_volume_usage(context, volume_ref, - "transfer.create.start") - # The salt is just a short random string. - salt = self._get_random_string(CONF.volume_transfer_salt_length) - auth_key = self._get_random_string(CONF.volume_transfer_key_length) - crypt_hash = self._get_crypt_hash(salt, auth_key) - - # TODO(ollie): Transfer expiry needs to be implemented. - transfer_rec = {'volume_id': volume_id, - 'display_name': display_name, - 'salt': salt, - 'crypt_hash': crypt_hash, - 'expires_at': None} - - try: - transfer = self.db.transfer_create(context, transfer_rec) - except Exception: - LOG.error("Failed to create transfer record for %s", volume_id) - raise - volume_utils.notify_about_volume_usage(context, volume_ref, - "transfer.create.end") - return {'id': transfer['id'], - 'volume_id': transfer['volume_id'], - 'display_name': transfer['display_name'], - 'auth_key': auth_key, - 'created_at': transfer['created_at']} - - def accept(self, context, transfer_id, auth_key): - """Accept a volume that has been offered for transfer.""" - # We must use an elevated context to see the volume that is still - # owned by the donor. - volume_api.check_policy(context, 'accept_transfer') - transfer = self.db.transfer_get(context.elevated(), transfer_id) - - crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key) - if crypt_hash != transfer['crypt_hash']: - msg = (_("Attempt to transfer %s with invalid auth key.") % - transfer_id) - LOG.error(msg) - raise exception.InvalidAuthKey(reason=msg) - - volume_id = transfer['volume_id'] - vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id) - if vol_ref['consistencygroup_id']: - msg = _("Volume %s must not be part of a consistency " - "group.") % vol_ref['id'] - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - try: - values = {'per_volume_gigabytes': vol_ref.size} - QUOTAS.limit_check(context, project_id=context.project_id, - **values) - except exception.OverQuota as e: - quotas = e.kwargs['quotas'] - raise exception.VolumeSizeExceedsLimit( - size=vol_ref.size, limit=quotas['per_volume_gigabytes']) - - try: - reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - vol_ref.volume_type_id) - reservations = QUOTAS.reserve(context, **reserve_opts) - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota(context, e, - resource='volumes', - size=vol_ref.size) - try: - donor_id = vol_ref['project_id'] - reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - vol_ref.volume_type_id) - donor_reservations = QUOTAS.reserve(context.elevated(), - project_id=donor_id, - **reserve_opts) - except Exception: - donor_reservations = None - LOG.exception("Failed to update quota donating volume" - " transfer id %s", transfer_id) - - volume_utils.notify_about_volume_usage(context, vol_ref, - "transfer.accept.start") - try: - # Transfer ownership of the volume now, must use an elevated - # context. - self.volume_api.accept_transfer(context, - vol_ref, - context.user_id, - context.project_id) - self.db.transfer_accept(context.elevated(), - transfer_id, - context.user_id, - context.project_id) - QUOTAS.commit(context, reservations) - if donor_reservations: - QUOTAS.commit(context, donor_reservations, project_id=donor_id) - LOG.info("Volume %s has been transferred.", volume_id) - except Exception: - with excutils.save_and_reraise_exception(): - QUOTAS.rollback(context, reservations) - if donor_reservations: - QUOTAS.rollback(context, donor_reservations, - project_id=donor_id) - - vol_ref = self.db.volume_get(context, volume_id) - volume_utils.notify_about_volume_usage(context, vol_ref, - "transfer.accept.end") - return {'id': transfer_id, - 'display_name': transfer['display_name'], - 'volume_id': vol_ref['id']} diff --git a/cinder/utils.py b/cinder/utils.py deleted file mode 100644 index 0b18662fd..000000000 --- a/cinder/utils.py +++ /dev/null @@ -1,1139 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities and helper functions.""" - - -import abc -import contextlib -import datetime -import functools -import inspect -import logging as py_logging -import math -import os -import pyclbr -import random -import re -import shutil -import socket -import stat -import sys -import tempfile -import time -import types - -from os_brick import encryptors -from os_brick.initiator import connector -from oslo_concurrency import lockutils -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils -import retrying -import six -import webob.exc - -from cinder import exception -from cinder.i18n import _ -from cinder import keymgr - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" -PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" -VALID_TRACE_FLAGS = {'method', 'api'} -TRACE_METHOD = False -TRACE_API = False - -synchronized = lockutils.synchronized_with_prefix('cinder-') - - -def as_int(obj, quiet=True): - # Try "2" -> 2 - try: - return int(obj) - except (ValueError, TypeError): - pass - # Try "2.5" -> 2 - try: - return int(float(obj)) - except (ValueError, TypeError): - pass - # Eck, not sure what this is then. - if not quiet: - raise TypeError(_("Can not translate %s to integer.") % (obj)) - return obj - - -def check_exclusive_options(**kwargs): - """Checks that only one of the provided options is actually not-none. - - Iterates over all the kwargs passed in and checks that only one of said - arguments is not-none, if more than one is not-none then an exception will - be raised with the names of those arguments who were not-none. - """ - - if not kwargs: - return - - pretty_keys = kwargs.pop("pretty_keys", True) - exclusive_options = {} - for (k, v) in kwargs.items(): - if v is not None: - exclusive_options[k] = True - - if len(exclusive_options) > 1: - # Change the format of the names from pythonic to - # something that is more readable. - # - # Ex: 'the_key' -> 'the key' - if pretty_keys: - names = [k.replace('_', ' ') for k in kwargs.keys()] - else: - names = kwargs.keys() - names = ", ".join(sorted(names)) - msg = (_("May specify only one of %s") % (names)) - raise exception.InvalidInput(reason=msg) - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method.""" - if 'run_as_root' in kwargs and 'root_helper' not in kwargs: - kwargs['root_helper'] = get_root_helper() - return processutils.execute(*cmd, **kwargs) - - -def check_ssh_injection(cmd_list): - ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', - '<'] - - # Check whether injection attacks exist - for arg in cmd_list: - arg = arg.strip() - - # Check for matching quotes on the ends - is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) - if is_quoted: - # Check for unescaped quotes within the quoted argument - quoted = is_quoted.group('quoted') - if quoted: - if (re.match('[\'"]', quoted) or - re.search('[^\\\\][\'"]', quoted)): - raise exception.SSHInjectionThreat(command=cmd_list) - else: - # We only allow spaces within quoted arguments, and that - # is the only special character allowed within quotes - if len(arg.split()) > 1: - raise exception.SSHInjectionThreat(command=cmd_list) - - # Second, check whether danger character in command. So the shell - # special operator must be a single argument. - for c in ssh_injection_pattern: - if c not in arg: - continue - - result = arg.find(c) - if not result == -1: - if result == 0 or not arg[result - 1] == '\\': - raise exception.SSHInjectionThreat(command=cmd_list) - - -def check_metadata_properties(metadata=None): - """Checks that the volume metadata properties are valid.""" - - if not metadata: - metadata = {} - if not isinstance(metadata, dict): - msg = _("Metadata should be a dict.") - raise exception.InvalidInput(msg) - - for k, v in metadata.items(): - try: - check_string_length(k, "Metadata key: %s" % k, min_length=1) - check_string_length(v, "Value for metadata key: %s" % k) - except exception.InvalidInput as exc: - raise exception.InvalidVolumeMetadata(reason=exc) - # for backward compatibility - if len(k) > 255: - msg = _("Metadata property key %s greater than 255 " - "characters.") % k - raise exception.InvalidVolumeMetadataSize(reason=msg) - if len(v) > 255: - msg = _("Metadata property key %s value greater than " - "255 characters.") % k - raise exception.InvalidVolumeMetadataSize(reason=msg) - - -def last_completed_audit_period(unit=None): - """This method gives you the most recently *completed* audit period. - - arguments: - units: string, one of 'hour', 'day', 'month', 'year' - Periods normally begin at the beginning (UTC) of the - period unit (So a 'day' period begins at midnight UTC, - a 'month' unit on the 1st, a 'year' on Jan, 1) - unit string may be appended with an optional offset - like so: 'day@18' This will begin the period at 18:00 - UTC. 'month@15' starts a monthly period on the 15th, - and year@3 begins a yearly one on March 1st. - - - returns: 2 tuple of datetimes (begin, end) - The begin timestamp of this audit period is the same as the - end of the previous. - """ - if not unit: - unit = CONF.volume_usage_audit_period - - offset = 0 - if '@' in unit: - unit, offset = unit.split("@", 1) - offset = int(offset) - - rightnow = timeutils.utcnow() - if unit not in ('month', 'day', 'year', 'hour'): - raise ValueError('Time period must be hour, day, month or year') - if unit == 'month': - if offset == 0: - offset = 1 - end = datetime.datetime(day=offset, - month=rightnow.month, - year=rightnow.year) - if end >= rightnow: - year = rightnow.year - if 1 >= rightnow.month: - year -= 1 - month = 12 + (rightnow.month - 1) - else: - month = rightnow.month - 1 - end = datetime.datetime(day=offset, - month=month, - year=year) - year = end.year - if 1 >= end.month: - year -= 1 - month = 12 + (end.month - 1) - else: - month = end.month - 1 - begin = datetime.datetime(day=offset, month=month, year=year) - - elif unit == 'year': - if offset == 0: - offset = 1 - end = datetime.datetime(day=1, month=offset, year=rightnow.year) - if end >= rightnow: - end = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 1) - begin = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 2) - else: - begin = datetime.datetime(day=1, - month=offset, - year=rightnow.year - 1) - - elif unit == 'day': - end = datetime.datetime(hour=offset, - day=rightnow.day, - month=rightnow.month, - year=rightnow.year) - if end >= rightnow: - end = end - datetime.timedelta(days=1) - begin = end - datetime.timedelta(days=1) - - elif unit == 'hour': - end = rightnow.replace(minute=offset, second=0, microsecond=0) - if end >= rightnow: - end = end - datetime.timedelta(hours=1) - begin = end - datetime.timedelta(hours=1) - - return (begin, end) - - -def is_none_string(val): - """Check if a string represents a None value.""" - if not isinstance(val, six.string_types): - return False - - return val.lower() == 'none' - - -def monkey_patch(): - """Patches decorators for all functions in a specified module. - - If the CONF.monkey_patch set as True, - this function patches a decorator - for all functions in specified modules. - - You can set decorators for each modules - using CONF.monkey_patch_modules. - The format is "Module path:Decorator function". - Example: 'cinder.api.ec2.cloud:' \ - cinder.openstack.common.notifier.api.notify_decorator' - - Parameters of the decorator are as follows. - (See cinder.openstack.common.notifier.api.notify_decorator) - - :param name: name of the function - :param function: object of the function - """ - # If CONF.monkey_patch is not True, this function do nothing. - if not CONF.monkey_patch: - return - # Get list of modules and decorators - for module_and_decorator in CONF.monkey_patch_modules: - module, decorator_name = module_and_decorator.split(':') - # import decorator function - decorator = importutils.import_class(decorator_name) - __import__(module) - # Retrieve module information using pyclbr - module_data = pyclbr.readmodule_ex(module) - for key in module_data.keys(): - # set the decorator for the class methods - if isinstance(module_data[key], pyclbr.Class): - clz = importutils.import_class("%s.%s" % (module, key)) - # On Python 3, unbound methods are regular functions - predicate = inspect.isfunction if six.PY3 else inspect.ismethod - for method, func in inspect.getmembers(clz, predicate): - setattr( - clz, method, - decorator("%s.%s.%s" % (module, key, method), func)) - # set the decorator for the function - elif isinstance(module_data[key], pyclbr.Function): - func = importutils.import_class("%s.%s" % (module, key)) - setattr(sys.modules[module], key, - decorator("%s.%s" % (module, key), func)) - - -def make_dev_path(dev, partition=None, base='/dev'): - """Return a path to a particular device. - - >>> make_dev_path('xvdc') - /dev/xvdc - - >>> make_dev_path('xvdc', 1) - /dev/xvdc1 - """ - path = os.path.join(base, dev) - if partition: - path += str(partition) - return path - - -def sanitize_hostname(hostname): - """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" - if six.PY3: - hostname = hostname.encode('latin-1', 'ignore') - hostname = hostname.decode('latin-1') - else: - if isinstance(hostname, six.text_type): - hostname = hostname.encode('latin-1', 'ignore') - - hostname = re.sub(r'[ _]', '-', hostname) - hostname = re.sub(r'[^\w.-]+', '', hostname) - hostname = hostname.lower() - hostname = hostname.strip('.-') - - return hostname - - -def read_file_as_root(file_path): - """Secure helper to read file as root.""" - try: - out, _err = execute('cat', file_path, run_as_root=True) - return out - except processutils.ProcessExecutionError: - raise exception.FileNotFound(file_path=file_path) - - -def robust_file_write(directory, filename, data): - """Robust file write. - - Use "write to temp file and rename" model for writing the - persistence file. - - :param directory: Target directory to create a file. - :param filename: File name to store specified data. - :param data: String data. - """ - tempname = None - dirfd = None - try: - dirfd = os.open(directory, os.O_DIRECTORY) - - # write data to temporary file - with tempfile.NamedTemporaryFile(prefix=filename, - dir=directory, - delete=False) as tf: - tempname = tf.name - tf.write(data.encode('utf-8')) - tf.flush() - os.fdatasync(tf.fileno()) - tf.close() - - # Fsync the directory to ensure the fact of the existence of - # the temp file hits the disk. - os.fsync(dirfd) - # If destination file exists, it will be replaced silently. - os.rename(tempname, os.path.join(directory, filename)) - # Fsync the directory to ensure the rename hits the disk. - os.fsync(dirfd) - except OSError: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to write persistence file: %(path)s.", - {'path': os.path.join(directory, filename)}) - if os.path.isfile(tempname): - os.unlink(tempname) - finally: - if dirfd: - os.close(dirfd) - - -@contextlib.contextmanager -def temporary_chown(path, owner_uid=None): - """Temporarily chown a path. - - :params owner_uid: UID of temporary owner (defaults to current user) - """ - if owner_uid is None: - owner_uid = os.getuid() - - orig_uid = os.stat(path).st_uid - - if orig_uid != owner_uid: - execute('chown', owner_uid, path, run_as_root=True) - try: - yield - finally: - if orig_uid != owner_uid: - execute('chown', orig_uid, path, run_as_root=True) - - -@contextlib.contextmanager -def tempdir(**kwargs): - tmpdir = tempfile.mkdtemp(**kwargs) - try: - yield tmpdir - finally: - try: - shutil.rmtree(tmpdir) - except OSError as e: - LOG.debug('Could not remove tmpdir: %s', - six.text_type(e)) - - -def walk_class_hierarchy(clazz, encountered=None): - """Walk class hierarchy, yielding most derived classes first.""" - if not encountered: - encountered = [] - for subclass in clazz.__subclasses__(): - if subclass not in encountered: - encountered.append(subclass) - # drill down to leaves first - for subsubclass in walk_class_hierarchy(subclass, encountered): - yield subsubclass - yield subclass - - -def get_root_helper(): - return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config - - -def brick_get_connector_properties(multipath=False, enforce_multipath=False): - """Wrapper to automatically set root_helper in brick calls. - - :param multipath: A boolean indicating whether the connector can - support multipath. - :param enforce_multipath: If True, it raises exception when multipath=True - is specified but multipathd is not running. - If False, it falls back to multipath=False - when multipathd is not running. - """ - - root_helper = get_root_helper() - return connector.get_connector_properties(root_helper, - CONF.my_ip, - multipath, - enforce_multipath) - - -def brick_get_connector(protocol, driver=None, - use_multipath=False, - device_scan_attempts=3, - *args, **kwargs): - """Wrapper to get a brick connector object. - - This automatically populates the required protocol as well - as the root_helper needed to execute commands. - """ - - root_helper = get_root_helper() - return connector.InitiatorConnector.factory(protocol, root_helper, - driver=driver, - use_multipath=use_multipath, - device_scan_attempts= - device_scan_attempts, - *args, **kwargs) - - -def brick_get_encryptor(connection_info, *args, **kwargs): - """Wrapper to get a brick encryptor object.""" - - root_helper = get_root_helper() - key_manager = keymgr.API(CONF) - return encryptors.get_volume_encryptor(root_helper=root_helper, - connection_info=connection_info, - keymgr=key_manager, - *args, **kwargs) - - -def brick_attach_volume_encryptor(context, attach_info, encryption): - """Attach encryption layer.""" - connection_info = attach_info['conn'] - connection_info['data']['device_path'] = attach_info['device']['path'] - encryptor = brick_get_encryptor(connection_info, - **encryption) - encryptor.attach_volume(context, **encryption) - - -def brick_detach_volume_encryptor(attach_info, encryption): - """Detach encryption layer.""" - connection_info = attach_info['conn'] - connection_info['data']['device_path'] = attach_info['device']['path'] - - encryptor = brick_get_encryptor(connection_info, - **encryption) - encryptor.detach_volume(**encryption) - - -def require_driver_initialized(driver): - """Verifies if `driver` is initialized - - If the driver is not initialized, an exception will be raised. - - :params driver: The driver instance. - :raises: `exception.DriverNotInitialized` - """ - # we can't do anything if the driver didn't init - if not driver.initialized: - driver_name = driver.__class__.__name__ - LOG.error("Volume driver %s not initialized", driver_name) - raise exception.DriverNotInitialized() - else: - log_unsupported_driver_warning(driver) - - -def log_unsupported_driver_warning(driver): - """Annoy the log about unsupported drivers.""" - if not driver.supported: - # Check to see if the driver is flagged as supported. - LOG.warning("Volume driver (%(driver_name)s %(version)s) is " - "currently unsupported and may be removed in the " - "next release of OpenStack. Use at your own risk.", - {'driver_name': driver.__class__.__name__, - 'version': driver.get_version()}, - resource={'type': 'driver', - 'id': driver.__class__.__name__}) - - -def get_file_mode(path): - """This primarily exists to make unit testing easier.""" - return stat.S_IMODE(os.stat(path).st_mode) - - -def get_file_gid(path): - """This primarily exists to make unit testing easier.""" - return os.stat(path).st_gid - - -def get_file_size(path): - """Returns the file size.""" - return os.stat(path).st_size - - -def _get_disk_of_partition(devpath, st=None): - """Gets a disk device path and status from partition path. - - Returns a disk device path from a partition device path, and stat for - the device. If devpath is not a partition, devpath is returned as it is. - For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is - for '/dev/disk1p1' ('p' is prepended to the partition number if the disk - name ends with numbers). - """ - diskpath = re.sub(r'(?:(?<=\d)p)?\d+$', '', devpath) - if diskpath != devpath: - try: - st_disk = os.stat(diskpath) - if stat.S_ISBLK(st_disk.st_mode): - return (diskpath, st_disk) - except OSError: - pass - # devpath is not a partition - if st is None: - st = os.stat(devpath) - return (devpath, st) - - -def get_bool_param(param_string, params, default=False): - param = params.get(param_string, default) - if not strutils.is_valid_boolstr(param): - msg = _("Value '%(param)s' for '%(param_string)s' is not " - "a boolean.") % {'param': param, 'param_string': param_string} - raise exception.InvalidParameterValue(err=msg) - - return strutils.bool_from_string(param, strict=True) - - -def get_blkdev_major_minor(path, lookup_for_file=True): - """Get 'major:minor' number of block device. - - Get the device's 'major:minor' number of a block device to control - I/O ratelimit of the specified path. - If lookup_for_file is True and the path is a regular file, lookup a disk - device which the file lies on and returns the result for the device. - """ - st = os.stat(path) - if stat.S_ISBLK(st.st_mode): - path, st = _get_disk_of_partition(path, st) - return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev)) - elif stat.S_ISCHR(st.st_mode): - # No I/O ratelimit control is provided for character devices - return None - elif lookup_for_file: - # lookup the mounted disk which the file lies on - out, _err = execute('df', path) - devpath = out.split("\n")[1].split()[0] - if devpath[0] is not '/': - # the file is on a network file system - return None - return get_blkdev_major_minor(devpath, False) - else: - msg = _("Unable to get a block device for file \'%s\'") % path - raise exception.Error(msg) - - -def check_string_length(value, name, min_length=0, max_length=None, - allow_all_spaces=True): - """Check the length of specified string. - - :param value: the value of the string - :param name: the name of the string - :param min_length: the min_length of the string - :param max_length: the max_length of the string - """ - try: - strutils.check_string_length(value, name=name, - min_length=min_length, - max_length=max_length) - except(ValueError, TypeError) as exc: - raise exception.InvalidInput(reason=exc) - - if not allow_all_spaces and value.isspace(): - msg = _('%(name)s cannot be all spaces.') - raise exception.InvalidInput(reason=msg) - - -_visible_admin_metadata_keys = ['readonly', 'attached_mode'] - - -def add_visible_admin_metadata(volume): - """Add user-visible admin metadata to regular metadata. - - Extracts the admin metadata keys that are to be made visible to - non-administrators, and adds them to the regular metadata structure for the - passed-in volume. - """ - visible_admin_meta = {} - - if volume.get('volume_admin_metadata'): - if isinstance(volume['volume_admin_metadata'], dict): - volume_admin_metadata = volume['volume_admin_metadata'] - for key in volume_admin_metadata: - if key in _visible_admin_metadata_keys: - visible_admin_meta[key] = volume_admin_metadata[key] - else: - for item in volume['volume_admin_metadata']: - if item['key'] in _visible_admin_metadata_keys: - visible_admin_meta[item['key']] = item['value'] - # avoid circular ref when volume is a Volume instance - elif (volume.get('admin_metadata') and - isinstance(volume.get('admin_metadata'), dict)): - for key in _visible_admin_metadata_keys: - if key in volume['admin_metadata'].keys(): - visible_admin_meta[key] = volume['admin_metadata'][key] - - if not visible_admin_meta: - return - - # NOTE(zhiyan): update visible administration metadata to - # volume metadata, administration metadata will rewrite existing key. - if volume.get('volume_metadata'): - orig_meta = list(volume.get('volume_metadata')) - for item in orig_meta: - if item['key'] in visible_admin_meta.keys(): - item['value'] = visible_admin_meta.pop(item['key']) - for key, value in visible_admin_meta.items(): - orig_meta.append({'key': key, 'value': value}) - volume['volume_metadata'] = orig_meta - # avoid circular ref when vol is a Volume instance - elif (volume.get('metadata') and - isinstance(volume.get('metadata'), dict)): - volume['metadata'].update(visible_admin_meta) - else: - volume['metadata'] = visible_admin_meta - - -def remove_invalid_filter_options(context, filters, - allowed_search_options): - """Remove search options that are not valid for non-admin API/context.""" - - if context.is_admin: - # Allow all options - return - # Otherwise, strip out all unknown options - unknown_options = [opt for opt in filters - if opt not in allowed_search_options] - bad_options = ", ".join(unknown_options) - LOG.debug("Removing options '%s' from query.", bad_options) - for opt in unknown_options: - del filters[opt] - - -def is_blk_device(dev): - try: - if stat.S_ISBLK(os.stat(dev).st_mode): - return True - return False - except Exception: - LOG.debug('Path %s not found in is_blk_device check', dev) - return False - - -class ComparableMixin(object): - def _compare(self, other, method): - try: - return method(self._cmpkey(), other._cmpkey()) - except (AttributeError, TypeError): - # _cmpkey not implemented, or return different type, - # so I can't compare with "other". - return NotImplemented - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - -def retry(exceptions, interval=1, retries=3, backoff_rate=2, - wait_random=False): - - def _retry_on_exception(e): - return isinstance(e, exceptions) - - def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms): - exp = backoff_rate ** previous_attempt_number - wait_for = interval * exp - - if wait_random: - random.seed() - wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0) - else: - wait_val = wait_for * 1000.0 - - LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0)) - - return wait_val - - def _print_stop(previous_attempt_number, delay_since_first_attempt_ms): - delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0 - LOG.debug("Failed attempt %s", previous_attempt_number) - LOG.debug("Have been at this for %s seconds", - delay_since_first_attempt) - return previous_attempt_number == retries - - if retries < 1: - raise ValueError('Retries must be greater than or ' - 'equal to 1 (received: %s). ' % retries) - - def _decorator(f): - - @six.wraps(f) - def _wrapper(*args, **kwargs): - r = retrying.Retrying(retry_on_exception=_retry_on_exception, - wait_func=_backoff_sleep, - stop_func=_print_stop) - return r.call(f, *args, **kwargs) - - return _wrapper - - return _decorator - - -def convert_str(text): - """Convert to native string. - - Convert bytes and Unicode strings to native strings: - - * convert to bytes on Python 2: - encode Unicode using encodeutils.safe_encode() - * convert to Unicode on Python 3: decode bytes from UTF-8 - """ - if six.PY2: - return encodeutils.to_utf8(text) - else: - if isinstance(text, bytes): - return text.decode('utf-8') - else: - return text - - -def trace_method(f): - """Decorates a function if TRACE_METHOD is true.""" - @functools.wraps(f) - def trace_method_logging_wrapper(*args, **kwargs): - if TRACE_METHOD: - return trace(f)(*args, **kwargs) - return f(*args, **kwargs) - return trace_method_logging_wrapper - - -def trace_api(f): - """Decorates a function if TRACE_API is true.""" - @functools.wraps(f) - def trace_api_logging_wrapper(*args, **kwargs): - if TRACE_API: - return trace(f)(*args, **kwargs) - return f(*args, **kwargs) - return trace_api_logging_wrapper - - -def trace(f): - """Trace calls to the decorated function. - - This decorator should always be defined as the outermost decorator so it - is defined last. This is important so it does not interfere - with other decorators. - - Using this decorator on a function will cause its execution to be logged at - `DEBUG` level with arguments, return values, and exceptions. - - :returns: a function decorator - """ - - func_name = f.__name__ - - @functools.wraps(f) - def trace_logging_wrapper(*args, **kwargs): - if len(args) > 0: - maybe_self = args[0] - else: - maybe_self = kwargs.get('self', None) - - if maybe_self and hasattr(maybe_self, '__module__'): - logger = logging.getLogger(maybe_self.__module__) - else: - logger = LOG - - # NOTE(ameade): Don't bother going any further if DEBUG log level - # is not enabled for the logger. - if not logger.isEnabledFor(py_logging.DEBUG): - return f(*args, **kwargs) - - all_args = inspect.getcallargs(f, *args, **kwargs) - - logger.debug('==> %(func)s: call %(all_args)r', - {'func': func_name, 'all_args': all_args}) - - start_time = time.time() * 1000 - try: - result = f(*args, **kwargs) - except Exception as exc: - total_time = int(round(time.time() * 1000)) - start_time - logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', - {'func': func_name, - 'time': total_time, - 'exc': exc}) - raise - total_time = int(round(time.time() * 1000)) - start_time - - if isinstance(result, dict): - mask_result = strutils.mask_dict_password(result) - elif isinstance(result, six.string_types): - mask_result = strutils.mask_password(result) - else: - mask_result = result - - logger.debug('<== %(func)s: return (%(time)dms) %(result)r', - {'func': func_name, - 'time': total_time, - 'result': mask_result}) - return result - return trace_logging_wrapper - - -class TraceWrapperMetaclass(type): - """Metaclass that wraps all methods of a class with trace_method. - - This metaclass will cause every function inside of the class to be - decorated with the trace_method decorator. - - To use the metaclass you define a class like so: - @six.add_metaclass(utils.TraceWrapperMetaclass) - class MyClass(object): - """ - def __new__(meta, classname, bases, classDict): - newClassDict = {} - for attributeName, attribute in classDict.items(): - if isinstance(attribute, types.FunctionType): - # replace it with a wrapped version - attribute = functools.update_wrapper(trace_method(attribute), - attribute) - newClassDict[attributeName] = attribute - - return type.__new__(meta, classname, bases, newClassDict) - - -class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass): - """Metaclass that wraps all methods of a class with trace.""" - pass - - -def setup_tracing(trace_flags): - """Set global variables for each trace flag. - - Sets variables TRACE_METHOD and TRACE_API, which represent - whether to log methods or api traces. - - :param trace_flags: a list of strings - """ - global TRACE_METHOD - global TRACE_API - try: - trace_flags = [flag.strip() for flag in trace_flags] - except TypeError: # Handle when trace_flags is None or a test mock - trace_flags = [] - for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): - LOG.warning('Invalid trace flag: %s', invalid_flag) - TRACE_METHOD = 'method' in trace_flags - TRACE_API = 'api' in trace_flags - - -def resolve_hostname(hostname): - """Resolves host name to IP address. - - Resolves a host name (my.data.point.com) to an IP address (10.12.143.11). - This routine also works if the data passed in hostname is already an IP. - In this case, the same IP address will be returned. - - :param hostname: Host name to resolve. - :returns: IP Address for Host name. - """ - result = socket.getaddrinfo(hostname, None)[0] - (family, socktype, proto, canonname, sockaddr) = result - LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.', - {'host': hostname, 'ip': sockaddr[0]}) - return sockaddr[0] - - -def build_or_str(elements, str_format=None): - """Builds a string of elements joined by 'or'. - - Will join strings with the 'or' word and if a str_format is provided it - will be used to format the resulted joined string. - If there are no elements an empty string will be returned. - - :param elements: Elements we want to join. - :type elements: String or iterable of strings. - :param str_format: String to use to format the response. - :type str_format: String. - """ - if not elements: - return '' - - if not isinstance(elements, six.string_types): - elements = _(' or ').join(elements) - - if str_format: - return str_format % elements - return elements - - -def calculate_virtual_free_capacity(total_capacity, - free_capacity, - provisioned_capacity, - thin_provisioning_support, - max_over_subscription_ratio, - reserved_percentage, - thin): - """Calculate the virtual free capacity based on thin provisioning support. - - :param total_capacity: total_capacity_gb of a host_state or pool. - :param free_capacity: free_capacity_gb of a host_state or pool. - :param provisioned_capacity: provisioned_capacity_gb of a host_state - or pool. - :param thin_provisioning_support: thin_provisioning_support of - a host_state or a pool. - :param max_over_subscription_ratio: max_over_subscription_ratio of - a host_state or a pool - :param reserved_percentage: reserved_percentage of a host_state or - a pool. - :param thin: whether volume to be provisioned is thin - :returns: the calculated virtual free capacity. - """ - - total = float(total_capacity) - reserved = float(reserved_percentage) / 100 - - if thin and thin_provisioning_support: - free = (total * max_over_subscription_ratio - - provisioned_capacity - - math.floor(total * reserved)) - else: - # Calculate how much free space is left after taking into - # account the reserved space. - free = free_capacity - math.floor(total * reserved) - return free - - -def validate_integer(value, name, min_value=None, max_value=None): - """Make sure that value is a valid integer, potentially within range. - - :param value: the value of the integer - :param name: the name of the integer - :param min_length: the min_length of the integer - :param max_length: the max_length of the integer - :returns: integer - """ - if not strutils.is_int_like(value): - raise webob.exc.HTTPBadRequest(explanation=( - _('%s must be an integer.') % name)) - value = int(value) - - if min_value is not None and value < min_value: - raise webob.exc.HTTPBadRequest( - explanation=(_('%(value_name)s must be >= %(min_value)d') % - {'value_name': name, 'min_value': min_value})) - if max_value is not None and value > max_value: - raise webob.exc.HTTPBadRequest( - explanation=(_('%(value_name)s must be <= %(max_value)d') % - {'value_name': name, 'max_value': max_value})) - - return value - - -def validate_dictionary_string_length(specs): - """Check the length of each key and value of dictionary.""" - if not isinstance(specs, dict): - msg = _('specs must be a dictionary.') - raise exception.InvalidInput(reason=msg) - - for key, value in specs.items(): - if key is not None: - check_string_length(key, 'Key "%s"' % key, - min_length=1, max_length=255) - - if value is not None: - check_string_length(value, 'Value for key "%s"' % key, - min_length=0, max_length=255) - - -def service_expired_time(with_timezone=False): - return (timeutils.utcnow(with_timezone=with_timezone) - - datetime.timedelta(seconds=CONF.service_down_time)) - - -class DoNothing(str): - """Class that literrally does nothing. - - We inherit from str in case it's called with json.dumps. - """ - def __call__(self, *args, **kwargs): - return self - - def __getattr__(self, name): - return self - - -DO_NOTHING = DoNothing() - - -def notifications_enabled(conf): - """Check if oslo notifications are enabled.""" - notifications_driver = set(conf.oslo_messaging_notifications.driver) - return notifications_driver and notifications_driver != {'noop'} - - -def if_notifications_enabled(f): - """Calls decorated method only if notifications are enabled.""" - @functools.wraps(f) - def wrapped(*args, **kwargs): - if notifications_enabled(CONF): - return f(*args, **kwargs) - return DO_NOTHING - return wrapped - - -LOG_LEVELS = ('INFO', 'WARNING', 'ERROR', 'DEBUG') - - -def get_log_method(level_string): - level_string = level_string or '' - upper_level_string = level_string.upper() - if upper_level_string not in LOG_LEVELS: - raise exception.InvalidInput( - reason=_('%s is not a valid log level.') % level_string) - return getattr(logging, upper_level_string) - - -def set_log_levels(prefix, level_string): - level = get_log_method(level_string) - prefix = prefix or '' - - for k, v in logging._loggers.items(): - if k and k.startswith(prefix): - v.logger.setLevel(level) - - -def get_log_levels(prefix): - prefix = prefix or '' - return {k: logging.logging.getLevelName(v.logger.getEffectiveLevel()) - for k, v in logging._loggers.items() - if k and k.startswith(prefix)} - - -def paths_normcase_equal(path_a, path_b): - return os.path.normcase(path_a) == os.path.normcase(path_b) diff --git a/cinder/version.py b/cinder/version.py deleted file mode 100644 index e3dbd70d7..000000000 --- a/cinder/version.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from pbr import version as pbr_version - -CINDER_VENDOR = "OpenStack Foundation" -CINDER_PRODUCT = "OpenStack Cinder" -CINDER_PACKAGE = None # OS distro package version suffix - -loaded = False -version_info = pbr_version.VersionInfo('cinder') -version_string = version_info.version_string diff --git a/cinder/volume/__init__.py b/cinder/volume/__init__.py deleted file mode 100644 index 91589956c..000000000 --- a/cinder/volume/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Importing full names to not pollute the namespace and cause possible -# collisions with use of 'from cinder.volume import ' elsewhere. - -from oslo_utils import importutils - -from cinder.common import config - - -CONF = config.CONF - - -def API(*args, **kwargs): - class_name = CONF.volume_api_class - return importutils.import_object(class_name, *args, **kwargs) diff --git a/cinder/volume/api.py b/cinder/volume/api.py deleted file mode 100644 index a9b435fc2..000000000 --- a/cinder/volume/api.py +++ /dev/null @@ -1,2063 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Handles all requests relating to volumes.""" - -import ast -import collections -import datetime -import functools - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import versionutils -import six - -from cinder.api import common -from cinder.common import constants -from cinder import context -from cinder import db -from cinder.db import base -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder.image import cache as image_cache -from cinder.image import glance -from cinder import keymgr as key_manager -from cinder import objects -from cinder.objects import base as objects_base -from cinder.objects import fields -import cinder.policy -from cinder import quota -from cinder import quota_utils -from cinder.scheduler import rpcapi as scheduler_rpcapi -from cinder import utils -from cinder.volume.flows.api import create_volume -from cinder.volume.flows.api import manage_existing -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - - -allow_force_upload_opt = cfg.BoolOpt('enable_force_upload', - default=False, - help='Enables the Force option on ' - 'upload_to_image. This enables ' - 'running upload_volume on in-use ' - 'volumes for backends that ' - 'support it.') -volume_host_opt = cfg.BoolOpt('snapshot_same_host', - default=True, - help='Create volume from snapshot at the host ' - 'where snapshot resides') -volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az', - default=True, - help='Ensure that the new volumes are the ' - 'same AZ as snapshot or source volume') -az_cache_time_opt = cfg.IntOpt('az_cache_duration', - default=3600, - help='Cache volume availability zones in ' - 'memory for the provided duration in ' - 'seconds') - -CONF = cfg.CONF -CONF.register_opt(allow_force_upload_opt) -CONF.register_opt(volume_host_opt) -CONF.register_opt(volume_same_az_opt) -CONF.register_opt(az_cache_time_opt) - -CONF.import_opt('glance_core_properties', 'cinder.image.glance') - -LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS -AO_LIST = objects.VolumeAttachmentList - - -def wrap_check_policy(func): - """Check policy corresponding to the wrapped methods prior to execution - - This decorator requires the first 3 args of the wrapped function - to be (self, context, volume) - """ - @functools.wraps(func) - def wrapped(self, context, target_obj, *args, **kwargs): - check_policy(context, func.__name__, target_obj) - return func(self, context, target_obj, *args, **kwargs) - return wrapped - - -def check_policy(context, action, target_obj=None): - target = { - 'project_id': context.project_id, - 'user_id': context.user_id, - } - - if isinstance(target_obj, objects_base.CinderObject): - # Turn object into dict so target.update can work - target.update( - target_obj.obj_to_primitive()['versioned_object.data'] or {}) - else: - target.update(target_obj or {}) - - _action = 'volume:%s' % action - cinder.policy.enforce(context, _action, target) - - -class API(base.Base): - """API for interacting with the volume manager.""" - - AVAILABLE_MIGRATION_STATUS = (None, 'deleting', 'error', 'success') - - def __init__(self, db_driver=None, image_service=None): - self.image_service = (image_service or - glance.get_default_image_service()) - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() - self.volume_rpcapi = volume_rpcapi.VolumeAPI() - self.availability_zones = [] - self.availability_zones_last_fetched = None - self.key_manager = key_manager.API(CONF) - super(API, self).__init__(db_driver) - - def list_availability_zones(self, enable_cache=False): - """Describe the known availability zones - - :retval tuple of dicts, each with a 'name' and 'available' key - """ - refresh_cache = False - if enable_cache: - if self.availability_zones_last_fetched is None: - refresh_cache = True - else: - cache_age = timeutils.delta_seconds( - self.availability_zones_last_fetched, - timeutils.utcnow()) - if cache_age >= CONF.az_cache_duration: - refresh_cache = True - if refresh_cache or not enable_cache: - topic = constants.VOLUME_TOPIC - ctxt = context.get_admin_context() - services = objects.ServiceList.get_all_by_topic(ctxt, topic) - az_data = [(s.availability_zone, s.disabled) - for s in services] - disabled_map = {} - for (az_name, disabled) in az_data: - tracked_disabled = disabled_map.get(az_name, True) - disabled_map[az_name] = tracked_disabled and disabled - azs = [{'name': name, 'available': not disabled} - for (name, disabled) in disabled_map.items()] - if refresh_cache: - now = timeutils.utcnow() - self.availability_zones = azs - self.availability_zones_last_fetched = now - LOG.debug("Availability zone cache updated, next update will" - " occur around %s.", now + datetime.timedelta( - seconds=CONF.az_cache_duration)) - else: - azs = self.availability_zones - LOG.info("Availability Zones retrieved successfully.") - return tuple(azs) - - def _retype_is_possible(self, context, - source_type, target_type): - elevated = context.elevated() - # If encryptions are different, it is not allowed - # to create volume from source volume or snapshot. - if volume_types.volume_types_encryption_changed( - elevated, - source_type.id if source_type else None, - target_type.id if target_type else None): - return False - services = objects.ServiceList.get_all_by_topic( - elevated, - constants.VOLUME_TOPIC, - disabled=True) - if len(services.objects) == 1: - return True - - source_extra_specs = {} - if source_type: - with source_type.obj_as_admin(): - source_extra_specs = source_type.extra_specs - target_extra_specs = {} - if target_type: - with target_type.obj_as_admin(): - target_extra_specs = target_type.extra_specs - if (volume_utils.matching_backend_name( - source_extra_specs, target_extra_specs)): - return True - return False - - def _is_volume_migrating(self, volume): - # The migration status 'none' means no migration has ever been done - # before. The migration status 'error' means the previous migration - # failed. The migration status 'success' means the previous migration - # succeeded. The migration status 'deleting' means the source volume - # fails to delete after a migration. - # All of the statuses above means the volume is not in the process - # of a migration. - return (volume['migration_status'] not in - self.AVAILABLE_MIGRATION_STATUS) - - def create(self, context, size, name, description, snapshot=None, - image_id=None, volume_type=None, metadata=None, - availability_zone=None, source_volume=None, - scheduler_hints=None, - source_replica=None, consistencygroup=None, - cgsnapshot=None, multiattach=False, source_cg=None, - group=None, group_snapshot=None, source_group=None): - - check_policy(context, 'create_from_image' if image_id else 'create') - - # NOTE(jdg): we can have a create without size if we're - # doing a create from snap or volume. Currently - # the taskflow api will handle this and pull in the - # size from the source. - - # NOTE(jdg): cinderclient sends in a string representation - # of the size value. BUT there is a possibility that somebody - # could call the API directly so the is_int_like check - # handles both cases (string representation of true float or int). - if size and (not strutils.is_int_like(size) or int(size) <= 0): - msg = _('Invalid volume size provided for create request: %s ' - '(size argument must be an integer (or string ' - 'representation of an integer) and greater ' - 'than zero).') % size - raise exception.InvalidInput(reason=msg) - - if consistencygroup and (not cgsnapshot and not source_cg): - if not volume_type: - msg = _("volume_type must be provided when creating " - "a volume in a consistency group.") - raise exception.InvalidInput(reason=msg) - cg_voltypeids = consistencygroup.volume_type_id - if volume_type.id not in cg_voltypeids: - msg = _("Invalid volume_type provided: %s (requested " - "type must be supported by this consistency " - "group).") % volume_type - raise exception.InvalidInput(reason=msg) - - if group and (not group_snapshot and not source_group): - if not volume_type: - msg = _("volume_type must be provided when creating " - "a volume in a group.") - raise exception.InvalidInput(reason=msg) - vol_type_ids = [v_type.id for v_type in group.volume_types] - if volume_type.id not in vol_type_ids: - msg = _("Invalid volume_type provided: %s (requested " - "type must be supported by this " - "group).") % volume_type - raise exception.InvalidInput(reason=msg) - - if source_volume and volume_type: - if volume_type.id != source_volume.volume_type_id: - if not self._retype_is_possible( - context, - source_volume.volume_type, - volume_type): - msg = _("Invalid volume_type provided: %s (requested type " - "is not compatible; either match source volume, " - "or omit type argument).") % volume_type.id - raise exception.InvalidInput(reason=msg) - - # When cloning replica (for testing), volume type must be omitted - if source_replica and volume_type: - msg = _("No volume_type should be provided when creating test " - "replica.") - raise exception.InvalidInput(reason=msg) - - if snapshot and volume_type: - if volume_type.id != snapshot.volume_type_id: - if not self._retype_is_possible(context, - snapshot.volume.volume_type, - volume_type): - msg = _("Invalid volume_type provided: %s (requested " - "type is not compatible; recommend omitting " - "the type argument).") % volume_type.id - raise exception.InvalidInput(reason=msg) - - # Determine the valid availability zones that the volume could be - # created in (a task in the flow will/can use this information to - # ensure that the availability zone requested is valid). - raw_zones = self.list_availability_zones(enable_cache=True) - availability_zones = set([az['name'] for az in raw_zones]) - if CONF.storage_availability_zone: - availability_zones.add(CONF.storage_availability_zone) - - utils.check_metadata_properties(metadata) - - create_what = { - 'context': context, - 'raw_size': size, - 'name': name, - 'description': description, - 'snapshot': snapshot, - 'image_id': image_id, - 'raw_volume_type': volume_type, - 'metadata': metadata or {}, - 'raw_availability_zone': availability_zone, - 'source_volume': source_volume, - 'scheduler_hints': scheduler_hints, - 'key_manager': self.key_manager, - 'source_replica': source_replica, - 'optional_args': {'is_quota_committed': False}, - 'consistencygroup': consistencygroup, - 'cgsnapshot': cgsnapshot, - 'multiattach': multiattach, - 'group': group, - 'group_snapshot': group_snapshot, - 'source_group': source_group, - } - try: - sched_rpcapi = (self.scheduler_rpcapi if ( - not cgsnapshot and not source_cg and - not group_snapshot and not source_group) - else None) - volume_rpcapi = (self.volume_rpcapi if ( - not cgsnapshot and not source_cg and - not group_snapshot and not source_group) - else None) - flow_engine = create_volume.get_flow(self.db, - self.image_service, - availability_zones, - create_what, - sched_rpcapi, - volume_rpcapi) - except Exception: - msg = _('Failed to create api volume flow.') - LOG.exception(msg) - raise exception.CinderException(msg) - - # Attaching this listener will capture all of the notifications that - # taskflow sends out and redirect them to a more useful log for - # cinders debugging (or error reporting) usage. - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - vref = flow_engine.storage.fetch('volume') - LOG.info("Create volume request issued successfully.", - resource=vref) - return vref - - @wrap_check_policy - def revert_to_snapshot(self, context, volume, snapshot): - """revert a volume to a snapshot""" - - v_res = volume.update_single_status_where( - 'reverting', 'available') - if not v_res: - msg = _("Can't revert volume %s to its latest snapshot. " - "Volume's status must be 'available'.") % volume.id - raise exception.InvalidVolume(reason=msg) - s_res = snapshot.update_single_status_where( - fields.SnapshotStatus.RESTORING, - fields.SnapshotStatus.AVAILABLE) - if not s_res: - msg = _("Can't revert volume %s to its latest snapshot. " - "Snapshot's status must be 'available'.") % snapshot.id - raise exception.InvalidSnapshot(reason=msg) - - self.volume_rpcapi.revert_to_snapshot(context, volume, snapshot) - - @wrap_check_policy - def delete(self, context, volume, - force=False, - unmanage_only=False, - cascade=False): - if context.is_admin and context.project_id != volume.project_id: - project_id = volume.project_id - else: - project_id = context.project_id - - if not volume.host: - volume_utils.notify_about_volume_usage(context, - volume, "delete.start") - # NOTE(vish): scheduling failed, so delete it - # Note(zhiteng): update volume quota reservation - try: - reservations = None - if volume.status != 'error_managing': - LOG.debug("Decrease volume quotas only if status is not " - "error_managing.") - reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.volume_type_id) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - LOG.exception("Failed to update quota while " - "deleting volume.") - volume.destroy() - - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - volume_utils.notify_about_volume_usage(context, - volume, "delete.end") - LOG.info("Delete volume request issued successfully.", - resource={'type': 'volume', - 'id': volume.id}) - return - - if not unmanage_only: - volume.assert_not_frozen() - - # Build required conditions for conditional update - expected = { - 'attach_status': db.Not(fields.VolumeAttachStatus.ATTACHED), - 'migration_status': self.AVAILABLE_MIGRATION_STATUS, - 'consistencygroup_id': None, - 'group_id': None} - - # If not force deleting we have status conditions - if not force: - expected['status'] = ('available', 'error', 'error_restoring', - 'error_extending', 'error_managing') - - if cascade: - if force: - # Ignore status checks, but ensure snapshots are not part - # of a cgsnapshot. - filters = [~db.volume_has_snapshots_in_a_cgsnapshot_filter()] - else: - # Allow deletion if all snapshots are in an expected state - filters = [~db.volume_has_undeletable_snapshots_filter()] - # Check if the volume has snapshots which are existing in - # other project now. - if not context.is_admin: - filters.append(~db.volume_has_other_project_snp_filter()) - else: - # Don't allow deletion of volume with snapshots - filters = [~db.volume_has_snapshots_filter()] - values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()} - if unmanage_only is True: - values['status'] = 'unmanaging' - if volume.status == 'error_managing': - values['status'] = 'error_managing_deleting' - - result = volume.conditional_update(values, expected, filters) - - if not result: - status = utils.build_or_str(expected.get('status'), - _('status must be %s and')) - msg = _('Volume %s must not be migrating, attached, belong to a ' - 'group, have snapshots or be disassociated from ' - 'snapshots after volume transfer.') % status - LOG.info(msg) - raise exception.InvalidVolume(reason=msg) - - if cascade: - values = {'status': 'deleting'} - expected = {'cgsnapshot_id': None, - 'group_snapshot_id': None} - if not force: - expected['status'] = ('available', 'error', 'deleting') - - snapshots = objects.snapshot.SnapshotList.get_all_for_volume( - context, volume.id) - for s in snapshots: - result = s.conditional_update(values, expected, filters) - - if not result: - volume.update({'status': 'error_deleting'}) - volume.save() - - msg = _('Failed to update snapshot.') - raise exception.InvalidVolume(reason=msg) - - cache = image_cache.ImageVolumeCache(self.db, self) - entry = cache.get_by_image_volume(context, volume.id) - if entry: - cache.evict(context, entry) - - # If the volume is encrypted, delete its encryption key from the key - # manager. This operation makes volume deletion an irreversible process - # because the volume cannot be decrypted without its key. - encryption_key_id = volume.get('encryption_key_id', None) - if encryption_key_id is not None: - try: - self.key_manager.delete(context, encryption_key_id) - except exception.CinderException as e: - LOG.warning("Unable to delete encryption key for " - "volume: %s.", e.msg, resource=volume) - except Exception: - LOG.exception("Unable to delete encryption key for " - "volume.") - - self.volume_rpcapi.delete_volume(context, - volume, - unmanage_only, - cascade) - LOG.info("Delete volume request issued successfully.", - resource=volume) - - @wrap_check_policy - def update(self, context, volume, fields): - # TODO(karthikp): Making sure volume is always oslo-versioned - # If not we convert it at the start of update method. This check - # needs to be removed once we have moved to ovo. - if not isinstance(volume, objects_base.CinderObject): - vol_obj = objects.Volume() - volume = objects.Volume._from_db_object(context, vol_obj, volume) - - if volume.status == 'maintenance': - LOG.info("Unable to update volume, " - "because it is in maintenance.", resource=volume) - msg = _("The volume cannot be updated during maintenance.") - raise exception.InvalidVolume(reason=msg) - - utils.check_metadata_properties(fields.get('metadata', None)) - - volume.update(fields) - volume.save() - LOG.info("Volume updated successfully.", resource=volume) - - def get(self, context, volume_id, viewable_admin_meta=False): - volume = objects.Volume.get_by_id(context, volume_id) - - try: - check_policy(context, 'get', volume) - except exception.PolicyNotAuthorized: - # raise VolumeNotFound to avoid providing info about - # the existence of an unauthorized volume id - raise exception.VolumeNotFound(volume_id=volume_id) - - if viewable_admin_meta: - ctxt = context.elevated() - admin_metadata = self.db.volume_admin_metadata_get(ctxt, - volume_id) - volume.admin_metadata = admin_metadata - volume.obj_reset_changes() - - LOG.info("Volume info retrieved successfully.", resource=volume) - return volume - - def get_all(self, context, marker=None, limit=None, sort_keys=None, - sort_dirs=None, filters=None, viewable_admin_meta=False, - offset=None): - check_policy(context, 'get_all') - - if filters is None: - filters = {} - - allTenants = utils.get_bool_param('all_tenants', filters) - - try: - if limit is not None: - limit = int(limit) - if limit < 0: - msg = _('limit param must be positive') - raise exception.InvalidInput(reason=msg) - except ValueError: - msg = _('limit param must be an integer') - raise exception.InvalidInput(reason=msg) - - # Non-admin shouldn't see temporary target of a volume migration, add - # unique filter data to reflect that only volumes with a NULL - # 'migration_status' or a 'migration_status' that does not start with - # 'target:' should be returned (processed in db/sqlalchemy/api.py) - if not context.is_admin: - filters['no_migration_targets'] = True - - if filters: - LOG.debug("Searching by: %s.", six.text_type(filters)) - - if context.is_admin and allTenants: - # Need to remove all_tenants to pass the filtering below. - del filters['all_tenants'] - volumes = objects.VolumeList.get_all(context, marker, limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - filters=filters, - offset=offset) - else: - if viewable_admin_meta: - context = context.elevated() - volumes = objects.VolumeList.get_all_by_project( - context, context.project_id, marker, limit, - sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, - offset=offset) - - LOG.info("Get all volumes completed successfully.") - return volumes - - def get_volume_summary(self, context, filters=None): - check_policy(context, 'get_all') - - if filters is None: - filters = {} - - all_tenants = utils.get_bool_param('all_tenants', filters) - filters.pop('all_tenants', None) - project_only = not (all_tenants and context.is_admin) - volumes = objects.VolumeList.get_volume_summary(context, project_only) - - LOG.info("Get summary completed successfully.") - return volumes - - def get_snapshot(self, context, snapshot_id): - check_policy(context, 'get_snapshot') - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - - # FIXME(jdg): The objects don't have the db name entries - # so build the resource tag manually for now. - LOG.info("Snapshot retrieved successfully.", - resource={'type': 'snapshot', - 'id': snapshot.id}) - return snapshot - - def get_volume(self, context, volume_id): - check_policy(context, 'get_volume') - volume = objects.Volume.get_by_id(context, volume_id) - LOG.info("Volume retrieved successfully.", resource=volume) - return volume - - def get_all_snapshots(self, context, search_opts=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None): - check_policy(context, 'get_all_snapshots') - - search_opts = search_opts or {} - - if context.is_admin and 'all_tenants' in search_opts: - # Need to remove all_tenants to pass the filtering below. - del search_opts['all_tenants'] - snapshots = objects.SnapshotList.get_all( - context, search_opts, marker, limit, sort_keys, sort_dirs, - offset) - else: - snapshots = objects.SnapshotList.get_all_by_project( - context, context.project_id, search_opts, marker, limit, - sort_keys, sort_dirs, offset) - - LOG.info("Get all snapshots completed successfully.") - return snapshots - - @wrap_check_policy - def reserve_volume(self, context, volume): - expected = {'multiattach': volume.multiattach, - 'status': (('available', 'in-use') if volume.multiattach - else 'available')} - - result = volume.conditional_update({'status': 'attaching'}, expected) - - if not result: - expected_status = utils.build_or_str(expected['status']) - msg = _('Volume status must be %(expected)s to reserve, but the ' - 'status is %(current)s.') % {'expected': expected_status, - 'current': volume.status} - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - LOG.info("Reserve volume completed successfully.", - resource=volume) - - @wrap_check_policy - def unreserve_volume(self, context, volume): - expected = {'status': 'attaching'} - # Status change depends on whether it has attachments (in-use) or not - # (available) - value = {'status': db.Case([(db.volume_has_attachments_filter(), - 'in-use')], - else_='available')} - result = volume.conditional_update(value, expected) - if not result: - LOG.debug("Attempted to unreserve volume that was not " - "reserved, nothing to do.", - resource=volume) - return - - LOG.info("Unreserve volume completed successfully.", - resource=volume) - - @wrap_check_policy - def begin_detaching(self, context, volume): - # If we are in the middle of a volume migration, we don't want the - # user to see that the volume is 'detaching'. Having - # 'migration_status' set will have the same effect internally. - expected = {'status': 'in-use', - 'attach_status': fields.VolumeAttachStatus.ATTACHED, - 'migration_status': self.AVAILABLE_MIGRATION_STATUS} - - result = volume.conditional_update({'status': 'detaching'}, expected) - - if not (result or self._is_volume_migrating(volume)): - msg = _("Unable to detach volume. Volume status must be 'in-use' " - "and attach_status must be 'attached' to detach.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - LOG.info("Begin detaching volume completed successfully.", - resource=volume) - - @wrap_check_policy - def roll_detaching(self, context, volume): - volume.conditional_update({'status': 'in-use'}, - {'status': 'detaching'}) - LOG.info("Roll detaching of volume completed successfully.", - resource=volume) - - @wrap_check_policy - def attach(self, context, volume, instance_uuid, host_name, - mountpoint, mode): - if volume.status == 'maintenance': - LOG.info('Unable to attach volume, ' - 'because it is in maintenance.', resource=volume) - msg = _("The volume cannot be attached in maintenance mode.") - raise exception.InvalidVolume(reason=msg) - - # We add readonly metadata if it doesn't already exist - readonly = self.update_volume_admin_metadata(context.elevated(), - volume, - {'readonly': 'False'}, - update=False)['readonly'] - if readonly == 'True' and mode != 'ro': - raise exception.InvalidVolumeAttachMode(mode=mode, - volume_id=volume.id) - - attach_results = self.volume_rpcapi.attach_volume(context, - volume, - instance_uuid, - host_name, - mountpoint, - mode) - LOG.info("Attach volume completed successfully.", - resource=volume) - return attach_results - - @wrap_check_policy - def detach(self, context, volume, attachment_id): - if volume['status'] == 'maintenance': - LOG.info('Unable to detach volume, ' - 'because it is in maintenance.', resource=volume) - msg = _("The volume cannot be detached in maintenance mode.") - raise exception.InvalidVolume(reason=msg) - detach_results = self.volume_rpcapi.detach_volume(context, volume, - attachment_id) - LOG.info("Detach volume completed successfully.", - resource=volume) - return detach_results - - @wrap_check_policy - def initialize_connection(self, context, volume, connector): - if volume.status == 'maintenance': - LOG.info('Unable to initialize the connection for ' - 'volume, because it is in ' - 'maintenance.', resource=volume) - msg = _("The volume connection cannot be initialized in " - "maintenance mode.") - raise exception.InvalidVolume(reason=msg) - init_results = self.volume_rpcapi.initialize_connection(context, - volume, - connector) - LOG.info("Initialize volume connection completed successfully.", - resource=volume) - return init_results - - @wrap_check_policy - def terminate_connection(self, context, volume, connector, force=False): - self.volume_rpcapi.terminate_connection(context, - volume, - connector, - force) - LOG.info("Terminate volume connection completed successfully.", - resource=volume) - self.unreserve_volume(context, volume) - - @wrap_check_policy - def accept_transfer(self, context, volume, new_user, new_project): - if volume['status'] == 'maintenance': - LOG.info('Unable to accept transfer for volume, ' - 'because it is in maintenance.', resource=volume) - msg = _("The volume cannot accept transfer in maintenance mode.") - raise exception.InvalidVolume(reason=msg) - results = self.volume_rpcapi.accept_transfer(context, - volume, - new_user, - new_project) - LOG.info("Transfer volume completed successfully.", - resource=volume) - return results - - def _create_snapshot(self, context, - volume, name, description, - force=False, metadata=None, - cgsnapshot_id=None, - group_snapshot_id=None): - volume.assert_not_frozen() - snapshot = self.create_snapshot_in_db( - context, volume, name, - description, force, metadata, cgsnapshot_id, - True, group_snapshot_id) - self.volume_rpcapi.create_snapshot(context, volume, snapshot) - - return snapshot - - def create_snapshot_in_db(self, context, - volume, name, description, - force, metadata, - cgsnapshot_id, - commit_quota=True, - group_snapshot_id=None): - check_policy(context, 'create_snapshot', volume) - - if not volume.host: - msg = _("The snapshot cannot be created because volume has " - "not been scheduled to any host.") - raise exception.InvalidVolume(reason=msg) - - if volume['status'] == 'maintenance': - LOG.info('Unable to create the snapshot for volume, ' - 'because it is in maintenance.', resource=volume) - msg = _("The snapshot cannot be created when the volume is in " - "maintenance mode.") - raise exception.InvalidVolume(reason=msg) - if self._is_volume_migrating(volume): - # Volume is migrating, wait until done - msg = _("Snapshot cannot be created while volume is migrating.") - raise exception.InvalidVolume(reason=msg) - - if volume['status'].startswith('replica_'): - # Can't snapshot secondary replica - msg = _("Snapshot of secondary replica is not allowed.") - raise exception.InvalidVolume(reason=msg) - - if ((not force) and (volume['status'] != "available")): - msg = _("Volume %(vol_id)s status must be available, " - "but current status is: " - "%(vol_status)s.") % {'vol_id': volume['id'], - 'vol_status': volume['status']} - raise exception.InvalidVolume(reason=msg) - - if commit_quota: - try: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': 1} - else: - reserve_opts = {'snapshots': 1, - 'gigabytes': volume['size']} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.get('volume_type_id')) - reservations = QUOTAS.reserve(context, **reserve_opts) - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota( - context, e, - resource='snapshots', - size=volume.size) - utils.check_metadata_properties(metadata) - - snapshot = None - try: - kwargs = { - 'volume_id': volume['id'], - 'cgsnapshot_id': cgsnapshot_id, - 'group_snapshot_id': group_snapshot_id, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': volume['size'], - 'display_name': name, - 'display_description': description, - 'volume_type_id': volume['volume_type_id'], - 'encryption_key_id': volume['encryption_key_id'], - 'metadata': metadata or {} - } - snapshot = objects.Snapshot(context=context, **kwargs) - snapshot.create() - - if commit_quota: - QUOTAS.commit(context, reservations) - except Exception: - with excutils.save_and_reraise_exception(): - try: - if snapshot.obj_attr_is_set('id'): - snapshot.destroy() - finally: - if commit_quota: - QUOTAS.rollback(context, reservations) - - return snapshot - - def create_snapshots_in_db(self, context, - volume_list, - name, description, - cgsnapshot_id, - group_snapshot_id=None): - snapshot_list = [] - for volume in volume_list: - self._create_snapshot_in_db_validate(context, volume) - - reservations = self._create_snapshots_in_db_reserve( - context, volume_list) - - options_list = [] - for volume in volume_list: - options = self._create_snapshot_in_db_options( - context, volume, name, description, cgsnapshot_id, - group_snapshot_id) - options_list.append(options) - - try: - for options in options_list: - snapshot = objects.Snapshot(context=context, **options) - snapshot.create() - snapshot_list.append(snapshot) - - QUOTAS.commit(context, reservations) - except Exception: - with excutils.save_and_reraise_exception(): - try: - for snap in snapshot_list: - snap.destroy() - finally: - QUOTAS.rollback(context, reservations) - - return snapshot_list - - def _create_snapshot_in_db_validate(self, context, volume): - check_policy(context, 'create_snapshot', volume) - - if volume['status'] == 'maintenance': - LOG.info('Unable to create the snapshot for volume, ' - 'because it is in maintenance.', resource=volume) - msg = _("The snapshot cannot be created when the volume is in " - "maintenance mode.") - raise exception.InvalidVolume(reason=msg) - if self._is_volume_migrating(volume): - # Volume is migrating, wait until done - msg = _("Snapshot cannot be created while volume is migrating.") - raise exception.InvalidVolume(reason=msg) - if volume['status'] == 'error': - msg = _("The snapshot cannot be created when the volume is " - "in error status.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - def _create_snapshots_in_db_reserve(self, context, volume_list): - reserve_opts_list = [] - total_reserve_opts = {} - try: - for volume in volume_list: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': 1} - else: - reserve_opts = {'snapshots': 1, - 'gigabytes': volume['size']} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.get('volume_type_id')) - reserve_opts_list.append(reserve_opts) - - for reserve_opts in reserve_opts_list: - for (key, value) in reserve_opts.items(): - if key not in total_reserve_opts.keys(): - total_reserve_opts[key] = value - else: - total_reserve_opts[key] = \ - total_reserve_opts[key] + value - reservations = QUOTAS.reserve(context, **total_reserve_opts) - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota(context, e, - resource='snapshots', - size=volume.size) - - return reservations - - def _create_snapshot_in_db_options(self, context, volume, - name, description, - cgsnapshot_id, - group_snapshot_id=None): - options = {'volume_id': volume['id'], - 'cgsnapshot_id': cgsnapshot_id, - 'group_snapshot_id': group_snapshot_id, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': volume['size'], - 'display_name': name, - 'display_description': description, - 'volume_type_id': volume['volume_type_id'], - 'encryption_key_id': volume['encryption_key_id']} - return options - - def create_snapshot(self, context, - volume, name, description, - metadata=None, cgsnapshot_id=None, - group_snapshot_id=None): - result = self._create_snapshot(context, volume, name, description, - False, metadata, cgsnapshot_id, - group_snapshot_id) - LOG.info("Snapshot create request issued successfully.", - resource=result) - return result - - def create_snapshot_force(self, context, - volume, name, - description, metadata=None): - result = self._create_snapshot(context, volume, name, description, - True, metadata) - LOG.info("Snapshot force create request issued successfully.", - resource=result) - return result - - @wrap_check_policy - def delete_snapshot(self, context, snapshot, force=False, - unmanage_only=False): - if not unmanage_only: - snapshot.assert_not_frozen() - - # Build required conditions for conditional update - expected = {'cgsnapshot_id': None, - 'group_snapshot_id': None} - # If not force deleting we have status conditions - if not force: - expected['status'] = (fields.SnapshotStatus.AVAILABLE, - fields.SnapshotStatus.ERROR) - - values = {'status': fields.SnapshotStatus.DELETING} - if unmanage_only is True: - values['status'] = fields.SnapshotStatus.UNMANAGING - result = snapshot.conditional_update(values, expected) - if not result: - status = utils.build_or_str(expected.get('status'), - _('status must be %s and')) - msg = (_('Snapshot %s must not be part of a group.') % - status) - LOG.error(msg) - raise exception.InvalidSnapshot(reason=msg) - - self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only) - LOG.info("Snapshot delete request issued successfully.", - resource=snapshot) - - @wrap_check_policy - def update_snapshot(self, context, snapshot, fields): - snapshot.update(fields) - snapshot.save() - - @wrap_check_policy - def get_volume_metadata(self, context, volume): - """Get all metadata associated with a volume.""" - rv = self.db.volume_metadata_get(context, volume['id']) - LOG.info("Get volume metadata completed successfully.", - resource=volume) - return dict(rv) - - @wrap_check_policy - def create_volume_metadata(self, context, volume, metadata): - """Creates volume metadata.""" - db_meta = self._update_volume_metadata(context, volume, metadata) - - LOG.info("Create volume metadata completed successfully.", - resource=volume) - return db_meta - - @wrap_check_policy - def delete_volume_metadata(self, context, volume, - key, meta_type=common.METADATA_TYPES.user): - """Delete the given metadata item from a volume.""" - if volume.status in ('maintenance', 'uploading'): - msg = _('Deleting volume metadata is not allowed for volumes in ' - '%s status.') % volume.status - LOG.info(msg, resource=volume) - raise exception.InvalidVolume(reason=msg) - self.db.volume_metadata_delete(context, volume.id, key, meta_type) - LOG.info("Delete volume metadata completed successfully.", - resource=volume) - - def _update_volume_metadata(self, context, volume, metadata, delete=False, - meta_type=common.METADATA_TYPES.user): - if volume['status'] in ('maintenance', 'uploading'): - msg = _('Updating volume metadata is not allowed for volumes in ' - '%s status.') % volume['status'] - LOG.info(msg, resource=volume) - raise exception.InvalidVolume(reason=msg) - utils.check_metadata_properties(metadata) - return self.db.volume_metadata_update(context, volume['id'], - metadata, delete, meta_type) - - @wrap_check_policy - def update_volume_metadata(self, context, volume, metadata, delete=False, - meta_type=common.METADATA_TYPES.user): - """Updates volume metadata. - - If delete is True, metadata items that are not specified in the - `metadata` argument will be deleted. - - """ - db_meta = self._update_volume_metadata(context, volume, metadata, - delete, meta_type) - - # TODO(jdg): Implement an RPC call for drivers that may use this info - - LOG.info("Update volume metadata completed successfully.", - resource=volume) - return db_meta - - @wrap_check_policy - def get_volume_admin_metadata(self, context, volume): - """Get all administration metadata associated with a volume.""" - rv = self.db.volume_admin_metadata_get(context, volume['id']) - LOG.info("Get volume admin metadata completed successfully.", - resource=volume) - return dict(rv) - - @wrap_check_policy - def update_volume_admin_metadata(self, context, volume, metadata, - delete=False, add=True, update=True): - """Updates or creates volume administration metadata. - - If delete is True, metadata items that are not specified in the - `metadata` argument will be deleted. - - """ - utils.check_metadata_properties(metadata) - db_meta = self.db.volume_admin_metadata_update(context, volume.id, - metadata, delete, add, - update) - - # TODO(jdg): Implement an RPC call for drivers that may use this info - - LOG.info("Update volume admin metadata completed successfully.", - resource=volume) - return db_meta - - @wrap_check_policy - def get_snapshot_metadata(self, context, snapshot): - """Get all metadata associated with a snapshot.""" - LOG.info("Get snapshot metadata completed successfully.", - resource=snapshot) - return snapshot.metadata - - @wrap_check_policy - def delete_snapshot_metadata(self, context, snapshot, key): - """Delete the given metadata item from a snapshot.""" - snapshot.delete_metadata_key(context, key) - LOG.info("Delete snapshot metadata completed successfully.", - resource=snapshot) - - @wrap_check_policy - def update_snapshot_metadata(self, context, - snapshot, metadata, - delete=False): - """Updates or creates snapshot metadata. - - If delete is True, metadata items that are not specified in the - `metadata` argument will be deleted. - - """ - if delete: - _metadata = metadata - else: - orig_meta = snapshot.metadata - _metadata = orig_meta.copy() - _metadata.update(metadata) - - utils.check_metadata_properties(_metadata) - - snapshot.metadata = _metadata - snapshot.save() - - # TODO(jdg): Implement an RPC call for drivers that may use this info - - LOG.info("Update snapshot metadata completed successfully.", - resource=snapshot) - return snapshot.metadata - - def get_snapshot_metadata_value(self, snapshot, key): - LOG.info("Get snapshot metadata value not implemented.", - resource=snapshot) - # FIXME(jdg): Huh? Pass? - pass - - def get_volumes_image_metadata(self, context): - check_policy(context, 'get_volumes_image_metadata') - db_data = self.db.volume_glance_metadata_get_all(context) - results = collections.defaultdict(dict) - for meta_entry in db_data: - results[meta_entry['volume_id']].update({meta_entry['key']: - meta_entry['value']}) - return results - - @wrap_check_policy - def get_volume_image_metadata(self, context, volume): - db_data = self.db.volume_glance_metadata_get(context, volume['id']) - LOG.info("Get volume image-metadata completed successfully.", - resource=volume) - return {meta_entry.key: meta_entry.value for meta_entry in db_data} - - def get_list_volumes_image_metadata(self, context, volume_id_list): - db_data = self.db.volume_glance_metadata_list_get(context, - volume_id_list) - results = collections.defaultdict(dict) - for meta_entry in db_data: - results[meta_entry['volume_id']].update({meta_entry['key']: - meta_entry['value']}) - return results - - @wrap_check_policy - def copy_volume_to_image(self, context, volume, metadata, force): - """Create a new image from the specified volume.""" - if not CONF.enable_force_upload and force: - LOG.info("Force upload to image is disabled, " - "Force option will be ignored.", - resource={'type': 'volume', 'id': volume['id']}) - force = False - - # Build required conditions for conditional update - expected = {'status': ('available', 'in-use') if force - else 'available'} - values = {'status': 'uploading', - 'previous_status': volume.model.status} - - result = volume.conditional_update(values, expected) - if not result: - msg = (_('Volume %(vol_id)s status must be %(statuses)s') % - {'vol_id': volume.id, - 'statuses': utils.build_or_str(expected['status'])}) - raise exception.InvalidVolume(reason=msg) - - try: - glance_core_props = CONF.glance_core_properties - if glance_core_props: - try: - vol_img_metadata = self.get_volume_image_metadata( - context, volume) - custom_property_set = ( - set(vol_img_metadata).difference(glance_core_props)) - if custom_property_set: - metadata['properties'] = { - custom_prop: vol_img_metadata[custom_prop] - for custom_prop in custom_property_set} - except exception.GlanceMetadataNotFound: - # If volume is not created from image, No glance metadata - # would be available for that volume in - # volume glance metadata table - pass - - recv_metadata = self.image_service.create( - context, self.image_service._translate_to_glance(metadata)) - except Exception: - # NOTE(geguileo): To mimic behavior before conditional_update we - # will rollback status if image create fails - with excutils.save_and_reraise_exception(): - volume.conditional_update( - {'status': volume.model.previous_status, - 'previous_status': None}, - {'status': 'uploading'}) - - self.volume_rpcapi.copy_volume_to_image(context, - volume, - recv_metadata) - - response = {"id": volume['id'], - "updated_at": volume['updated_at'], - "status": 'uploading', - "display_description": volume['display_description'], - "size": volume['size'], - "volume_type": volume['volume_type'], - "image_id": recv_metadata['id'], - "container_format": recv_metadata['container_format'], - "disk_format": recv_metadata['disk_format'], - "image_name": recv_metadata.get('name', None)} - if 'protected' in recv_metadata: - response['protected'] = recv_metadata.get('protected') - if 'is_public' in recv_metadata: - response['is_public'] = recv_metadata.get('is_public') - elif 'visibility' in recv_metadata: - response['visibility'] = recv_metadata.get('visibility') - LOG.info("Copy volume to image completed successfully.", - resource=volume) - return response - - def _extend(self, context, volume, new_size, attached=False): - value = {'status': 'extending'} - if attached: - expected = {'status': 'in-use'} - else: - expected = {'status': 'available'} - orig_status = {'status': volume.status} - - def _roll_back_status(): - status = orig_status['status'] - msg = _('Could not return volume %(id)s to %(status)s.') - try: - if not volume.conditional_update(orig_status, value): - LOG.error(msg, {'id': volume.id, 'status': status}) - except Exception: - LOG.exception(msg, {'id': volume.id, 'status': status}) - - size_increase = (int(new_size)) - volume.size - if size_increase <= 0: - msg = (_("New size for extend must be greater " - "than current size. (current: %(size)s, " - "extended: %(new_size)s).") % {'new_size': new_size, - 'size': volume.size}) - raise exception.InvalidInput(reason=msg) - - result = volume.conditional_update(value, expected) - if not result: - msg = (_("Volume %(vol_id)s status must be '%(expected)s' " - "to extend, currently %(status)s.") - % {'vol_id': volume.id, - 'status': volume.status, - 'expected': six.text_type(expected)}) - raise exception.InvalidVolume(reason=msg) - - rollback = True - try: - values = {'per_volume_gigabytes': new_size} - QUOTAS.limit_check(context, project_id=context.project_id, - **values) - rollback = False - except exception.OverQuota as e: - quotas = e.kwargs['quotas'] - raise exception.VolumeSizeExceedsLimit( - size=new_size, limit=quotas['per_volume_gigabytes']) - finally: - # NOTE(geguileo): To mimic behavior before conditional_update we - # will rollback status on quota reservation failure regardless of - # the exception that caused the failure. - if rollback: - _roll_back_status() - - try: - reservations = None - reserve_opts = {'gigabytes': size_increase} - QUOTAS.add_volume_type_opts(context, reserve_opts, - volume.volume_type_id) - reservations = QUOTAS.reserve(context, - project_id=volume.project_id, - **reserve_opts) - except exception.OverQuota as exc: - gigabytes = exc.kwargs['usages']['gigabytes'] - gb_quotas = exc.kwargs['quotas']['gigabytes'] - - consumed = gigabytes['reserved'] + gigabytes['in_use'] - LOG.error("Quota exceeded for %(s_pid)s, tried to extend volume " - "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " - "already consumed).", - {'s_pid': context.project_id, - 's_size': size_increase, - 'd_consumed': consumed, - 'd_quota': gb_quotas}) - raise exception.VolumeSizeExceedsAvailableQuota( - requested=size_increase, consumed=consumed, quota=gb_quotas) - finally: - # NOTE(geguileo): To mimic behavior before conditional_update we - # will rollback status on quota reservation failure regardless of - # the exception that caused the failure. - if reservations is None: - _roll_back_status() - - volume_type = {} - if volume.volume_type_id: - volume_type = volume_types.get_volume_type(context.elevated(), - volume.volume_type_id) - - request_spec = { - 'volume_properties': volume, - 'volume_type': volume_type, - 'volume_id': volume.id - } - - self.scheduler_rpcapi.extend_volume(context, volume, new_size, - reservations, request_spec) - - LOG.info("Extend volume request issued successfully.", - resource=volume) - - @wrap_check_policy - def extend(self, context, volume, new_size): - self._extend(context, volume, new_size, attached=False) - - # NOTE(tommylikehu): New method is added here so that administrator - # can enable/disable this ability by editing the policy file if the - # cloud environment doesn't allow this operation. - @wrap_check_policy - def extend_attached_volume(self, context, volume, new_size): - self._extend(context, volume, new_size, attached=True) - - @wrap_check_policy - def migrate_volume(self, context, volume, host, cluster_name, force_copy, - lock_volume): - """Migrate the volume to the specified host or cluster.""" - elevated = context.elevated() - - # If we received a request to migrate to a host - # Look for the service - must be up and enabled - svc_host = host and volume_utils.extract_host(host, 'backend') - svc_cluster = cluster_name and volume_utils.extract_host(cluster_name, - 'backend') - # NOTE(geguileo): Only svc_host or svc_cluster is set, so when we get - # a service from the DB we are getting either one specific service from - # a host or any service from a cluster that is up, which means that the - # cluster itself is also up. - try: - svc = objects.Service.get_by_id(elevated, None, is_up=True, - topic=constants.VOLUME_TOPIC, - host=svc_host, disabled=False, - cluster_name=svc_cluster, - backend_match_level='pool') - except exception.ServiceNotFound: - msg = _("No available service named '%s'") % (cluster_name or host) - LOG.error(msg) - raise exception.InvalidHost(reason=msg) - # Even if we were requested to do a migration to a host, if the host is - # in a cluster we will do a cluster migration. - cluster_name = svc.cluster_name - - # Build required conditions for conditional update - expected = {'status': ('available', 'in-use'), - 'migration_status': self.AVAILABLE_MIGRATION_STATUS, - 'replication_status': (None, 'disabled'), - 'consistencygroup_id': (None, ''), - 'group_id': (None, '')} - - # We want to make sure that the migration is to another host or - # another cluster. - if cluster_name: - expected['cluster_name'] = db.Not(cluster_name) - else: - expected['host'] = db.Not(host) - - filters = [~db.volume_has_snapshots_filter()] - - updates = {'migration_status': 'starting', - 'previous_status': volume.model.status} - - # When the migration of an available volume starts, both the status - # and the migration status of the volume will be changed. - # If the admin sets lock_volume flag to True, the volume - # status is changed to 'maintenance', telling users - # that this volume is in maintenance mode, and no action is allowed - # on this volume, e.g. attach, detach, retype, migrate, etc. - if lock_volume: - updates['status'] = db.Case( - [(volume.model.status == 'available', 'maintenance')], - else_=volume.model.status) - - result = volume.conditional_update(updates, expected, filters) - - if not result: - msg = _('Volume %s status must be available or in-use, must not ' - 'be migrating, have snapshots, be replicated, be part of ' - 'a group and destination host/cluster must be different ' - 'than the current one') % {'vol_id': volume.id} - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - # Call the scheduler to ensure that the host exists and that it can - # accept the volume - volume_type = {} - if volume.volume_type_id: - volume_type = volume_types.get_volume_type(context.elevated(), - volume.volume_type_id) - request_spec = {'volume_properties': volume, - 'volume_type': volume_type, - 'volume_id': volume.id} - self.scheduler_rpcapi.migrate_volume(context, - volume, - cluster_name or host, - force_copy, - request_spec) - LOG.info("Migrate volume request issued successfully.", - resource=volume) - - @wrap_check_policy - def migrate_volume_completion(self, context, volume, new_volume, error): - if not (volume.migration_status or new_volume.migration_status): - # When we're not migrating and haven't hit any errors, we issue - # volume attach and detach requests so the volumes don't end in - # 'attaching' and 'detaching' state - if not error: - attachments = volume.volume_attachment - for attachment in attachments: - self.detach(context, volume, attachment.id) - - self.attach(context, new_volume, - attachment.instance_uuid, - attachment.attached_host, - attachment.mountpoint, - 'rw') - - return new_volume.id - - if not volume.migration_status: - msg = _('Source volume not mid-migration.') - raise exception.InvalidVolume(reason=msg) - - if not new_volume.migration_status: - msg = _('Destination volume not mid-migration.') - raise exception.InvalidVolume(reason=msg) - - expected_status = 'target:%s' % volume.id - if not new_volume.migration_status == expected_status: - msg = (_('Destination has migration_status %(stat)s, expected ' - '%(exp)s.') % {'stat': new_volume.migration_status, - 'exp': expected_status}) - raise exception.InvalidVolume(reason=msg) - - LOG.info("Migrate volume completion issued successfully.", - resource=volume) - return self.volume_rpcapi.migrate_volume_completion(context, volume, - new_volume, error) - - @wrap_check_policy - def update_readonly_flag(self, context, volume, flag): - if volume['status'] != 'available': - msg = _('Volume %(vol_id)s status must be available ' - 'to update readonly flag, but current status is: ' - '%(vol_status)s.') % {'vol_id': volume['id'], - 'vol_status': volume['status']} - raise exception.InvalidVolume(reason=msg) - self.update_volume_admin_metadata(context.elevated(), volume, - {'readonly': six.text_type(flag)}) - LOG.info("Update readonly setting on volume " - "completed successfully.", - resource=volume) - - @wrap_check_policy - def retype(self, context, volume, new_type, migration_policy=None): - """Attempt to modify the type associated with an existing volume.""" - if migration_policy and migration_policy not in ('on-demand', 'never'): - msg = _('migration_policy must be \'on-demand\' or \'never\', ' - 'passed: %s') % new_type - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - # Support specifying volume type by ID or name - try: - vol_type = ( - volume_types.get_by_name_or_id(context.elevated(), new_type)) - except exception.InvalidVolumeType: - msg = _('Invalid volume_type passed: %s.') % new_type - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - vol_type_id = vol_type['id'] - - # We're checking here in so that we can report any quota issues as - # early as possible, but won't commit until we change the type. We - # pass the reservations onward in case we need to roll back. - reservations = quota_utils.get_volume_type_reservation( - context, volume, vol_type_id, reserve_vol_type_only=True) - - # Get old reservations - try: - reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.volume_type_id) - # NOTE(wanghao): We don't need to reserve volumes and gigabytes - # quota for retyping operation since they didn't changed, just - # reserve volume_type and type gigabytes is fine. - reserve_opts.pop('volumes') - reserve_opts.pop('gigabytes') - old_reservations = QUOTAS.reserve(context, - project_id=volume.project_id, - **reserve_opts) - except Exception: - volume.status = volume.previous_status - volume.save() - msg = _("Failed to update quota usage while retyping volume.") - LOG.exception(msg, resource=volume) - raise exception.CinderException(msg) - - # Build required conditions for conditional update - expected = {'status': ('available', 'in-use'), - 'migration_status': self.AVAILABLE_MIGRATION_STATUS, - 'consistencygroup_id': (None, ''), - 'group_id': (None, ''), - 'volume_type_id': db.Not(vol_type_id)} - - # We don't support changing QoS at the front-end yet for in-use volumes - # TODO(avishay): Call Nova to change QoS setting (libvirt has support - # - virDomainSetBlockIoTune() - Nova does not have support yet). - filters = [db.volume_qos_allows_retype(vol_type_id)] - - updates = {'status': 'retyping', - 'previous_status': objects.Volume.model.status} - - if not volume.conditional_update(updates, expected, filters): - msg = _('Retype needs volume to be in available or in-use state, ' - 'not be part of an active migration or a consistency ' - 'group, requested type has to be different that the ' - 'one from the volume, and for in-use volumes front-end ' - 'qos specs cannot change.') - LOG.error(msg) - QUOTAS.rollback(context, reservations + old_reservations, - project_id=volume.project_id) - raise exception.InvalidVolume(reason=msg) - - request_spec = {'volume_properties': volume, - 'volume_id': volume.id, - 'volume_type': vol_type, - 'migration_policy': migration_policy, - 'quota_reservations': reservations, - 'old_reservations': old_reservations} - - self.scheduler_rpcapi.retype(context, volume, - request_spec=request_spec, - filter_properties={}) - LOG.info("Retype volume request issued successfully.", - resource=volume) - - def _get_service_by_host_cluster(self, context, host, cluster_name, - resource='volume'): - elevated = context.elevated() - - svc_cluster = cluster_name and volume_utils.extract_host(cluster_name, - 'backend') - svc_host = host and volume_utils.extract_host(host, 'backend') - - # NOTE(geguileo): Only svc_host or svc_cluster is set, so when we get - # a service from the DB we are getting either one specific service from - # a host or any service that is up from a cluster, which means that the - # cluster itself is also up. - try: - service = objects.Service.get_by_id(elevated, None, host=svc_host, - binary='cinder-volume', - cluster_name=svc_cluster) - except exception.ServiceNotFound: - with excutils.save_and_reraise_exception(): - LOG.error('Unable to find service: %(service)s for ' - 'given host: %(host)s and cluster %(cluster)s.', - {'service': constants.VOLUME_BINARY, 'host': host, - 'cluster': cluster_name}) - - if service.disabled and (not service.cluster_name or - service.cluster.disabled): - LOG.error('Unable to manage existing %s on a disabled ' - 'service.', resource) - raise exception.ServiceUnavailable() - - if not service.is_up: - LOG.error('Unable to manage existing %s on a service that is ' - 'down.', resource) - raise exception.ServiceUnavailable() - - return service - - def manage_existing(self, context, host, cluster_name, ref, name=None, - description=None, volume_type=None, metadata=None, - availability_zone=None, bootable=False): - if volume_type and 'extra_specs' not in volume_type: - extra_specs = volume_types.get_volume_type_extra_specs( - volume_type['id']) - volume_type['extra_specs'] = extra_specs - - service = self._get_service_by_host_cluster(context, host, - cluster_name) - - if availability_zone is None: - availability_zone = service.availability_zone - - manage_what = { - 'context': context, - 'name': name, - 'description': description, - 'host': service.host, - 'cluster_name': service.cluster_name, - 'ref': ref, - 'volume_type': volume_type, - 'metadata': metadata, - 'availability_zone': availability_zone, - 'bootable': bootable, - } - - try: - flow_engine = manage_existing.get_flow(self.scheduler_rpcapi, - self.db, - manage_what) - except Exception: - msg = _('Failed to manage api volume flow.') - LOG.exception(msg) - raise exception.CinderException(msg) - - # Attaching this listener will capture all of the notifications that - # taskflow sends out and redirect them to a more useful log for - # cinder's debugging (or error reporting) usage. - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - vol_ref = flow_engine.storage.fetch('volume') - LOG.info("Manage volume request issued successfully.", - resource=vol_ref) - return vol_ref - - def get_manageable_volumes(self, context, host, cluster_name, marker=None, - limit=None, offset=None, sort_keys=None, - sort_dirs=None): - svc = self._get_service_by_host_cluster(context, host, cluster_name) - return self.volume_rpcapi.get_manageable_volumes(context, svc, - marker, limit, - offset, sort_keys, - sort_dirs) - - def manage_existing_snapshot(self, context, ref, volume, - name=None, description=None, - metadata=None): - service = self._get_service_by_host_cluster(context, volume.host, - volume.cluster_name, - 'snapshot') - - snapshot_object = self.create_snapshot_in_db(context, volume, name, - description, True, - metadata, None, - commit_quota=False) - self.volume_rpcapi.manage_existing_snapshot( - context, snapshot_object, ref, service.service_topic_queue) - return snapshot_object - - def get_manageable_snapshots(self, context, host, cluster_name, - marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - svc = self._get_service_by_host_cluster(context, host, cluster_name, - 'snapshot') - return self.volume_rpcapi.get_manageable_snapshots(context, svc, - marker, limit, - offset, sort_keys, - sort_dirs) - - def _get_cluster_and_services_for_replication(self, ctxt, host, - cluster_name): - services = objects.ServiceList.get_all( - ctxt, filters={'host': host, 'cluster_name': cluster_name, - 'binary': constants.VOLUME_BINARY}) - - if not services: - if host: - msg = _("No service found with host=%s") % host - else: - msg = _("No service found with cluster=%s") % cluster_name - - raise exception.ServiceNotFound(msg) - - cluster = services[0].cluster - # Check that the host or cluster we received only results in 1 host or - # hosts from the same cluster. - if cluster_name: - check_attribute = 'cluster_name' - expected = cluster.name - else: - check_attribute = 'host' - expected = services[0].host - if any(getattr(s, check_attribute) != expected for s in services): - msg = _('Services from different clusters found.') - raise exception.InvalidParameterValue(msg) - - # If we received host parameter but host belongs to a cluster we have - # to change all the services in the cluster, not just one host - if host and cluster: - services = cluster.services - - return cluster, services - - def _replication_db_change(self, ctxt, field, expected_value, new_value, - host, cluster_name, check_up=False): - def _error_msg(service): - expected = utils.build_or_str(six.text_type(expected_value)) - up_msg = 'and must be up ' if check_up else '' - msg = (_('%(field)s in %(service)s must be %(expected)s ' - '%(up_msg)sto failover.') - % {'field': field, 'service': service, - 'expected': expected, 'up_msg': up_msg}) - LOG.error(msg) - return msg - - cluster, services = self._get_cluster_and_services_for_replication( - ctxt, host, cluster_name) - - expect = {field: expected_value} - change = {field: new_value} - - if cluster: - old_value = getattr(cluster, field) - if ((check_up and not cluster.is_up) - or not cluster.conditional_update(change, expect)): - msg = _error_msg(cluster.name) - raise exception.InvalidInput(reason=msg) - - changed = [] - not_changed = [] - for service in services: - if ((not check_up or service.is_up) - and service.conditional_update(change, expect)): - changed.append(service) - else: - not_changed.append(service) - - # If there were some services that couldn't be changed we should at - # least log the error. - if not_changed: - msg = _error_msg([s.host for s in not_changed]) - # If we couldn't change any of the services - if not changed: - # Undo the cluster change - if cluster: - setattr(cluster, field, old_value) - cluster.save() - raise exception.InvalidInput( - reason=_('No service could be changed: %s') % msg) - LOG.warning('Some services could not be changed: %s', msg) - - return cluster, services - - def failover(self, ctxt, host, cluster_name, secondary_id=None): - check_policy(ctxt, 'failover_host') - ctxt = ctxt if ctxt.is_admin else ctxt.elevated() - - # TODO(geguileo): In P - Remove this version check - rpc_version = self.volume_rpcapi.determine_rpc_version_cap() - rpc_version = versionutils.convert_version_to_tuple(rpc_version) - if cluster_name and rpc_version < (3, 5): - msg = _('replication operations with cluster field') - raise exception.UnavailableDuringUpgrade(action=msg) - - rep_fields = fields.ReplicationStatus - expected_values = [rep_fields.ENABLED, rep_fields.FAILED_OVER] - new_value = rep_fields.FAILING_OVER - - cluster, services = self._replication_db_change( - ctxt, 'replication_status', expected_values, new_value, host, - cluster_name, check_up=True) - - self.volume_rpcapi.failover(ctxt, services[0], secondary_id) - - def freeze_host(self, ctxt, host, cluster_name): - check_policy(ctxt, 'freeze_host') - ctxt = ctxt if ctxt.is_admin else ctxt.elevated() - - expected = False - new_value = True - cluster, services = self._replication_db_change( - ctxt, 'frozen', expected, new_value, host, cluster_name, - check_up=False) - - # Should we set service status to disabled to keep - # scheduler calls from being sent? Just use existing - # `cinder service-disable reason=freeze` - self.volume_rpcapi.freeze_host(ctxt, services[0]) - - def thaw_host(self, ctxt, host, cluster_name): - check_policy(ctxt, 'thaw_host') - ctxt = ctxt if ctxt.is_admin else ctxt.elevated() - - expected = True - new_value = False - cluster, services = self._replication_db_change( - ctxt, 'frozen', expected, new_value, host, cluster_name, - check_up=False) - - if not self.volume_rpcapi.thaw_host(ctxt, services[0]): - return "Backend reported error during thaw_host operation." - - def check_volume_filters(self, filters, strict=False): - """Sets the user filter value to accepted format""" - booleans = self.db.get_booleans_for_table('volume') - - # To translate any true/false equivalent to True/False - # which is only acceptable format in database queries. - - for key, val in filters.items(): - try: - if key in booleans: - filters[key] = self._check_boolean_filter_value( - key, val, strict) - elif key == 'display_name': - # Use the raw value of display name as is for the filter - # without passing it through ast.literal_eval(). If the - # display name is a properly quoted string (e.g. '"foo"') - # then literal_eval() strips the quotes (i.e. 'foo'), so - # the filter becomes different from the user input. - continue - else: - filters[key] = ast.literal_eval(val) - except (ValueError, SyntaxError): - LOG.debug('Could not evaluate value %s, assuming string', val) - - def _check_boolean_filter_value(self, key, val, strict=False): - """Boolean filter values in Volume GET. - - Before V3.2, all values other than 'False', 'false', 'FALSE' were - trated as True for specific boolean filter parameters in Volume - GET request. - - But V3.2 onwards, only true/True/0/1/False/false parameters are - supported. - All other input values to specific boolean filter parameter will - lead to raising exception. - - This changes API behavior. So, micro version introduced for V3.2 - onwards. - """ - if strict: - # for updated behavior, from V3.2 onwards. - # To translate any true/false/t/f/0/1 to True/False - # which is only acceptable format in database queries. - try: - return strutils.bool_from_string(val, strict=True) - except ValueError: - msg = _('\'%(key)s = %(value)s\'') % {'key': key, - 'value': val} - raise exception.InvalidInput(reason=msg) - else: - # For existing behavior(before version 3.2) - accepted_true = ['True', 'true', 'TRUE'] - accepted_false = ['False', 'false', 'FALSE'] - - if val in accepted_false: - return False - elif val in accepted_true: - return True - else: - return bool(val) - - def _attachment_reserve(self, ctxt, vref, instance_uuid=None): - # NOTE(jdg): Reserved is a special case, we're avoiding allowing - # creation of other new reserves/attachments while in this state - # so we avoid contention issues with shared connections - - # FIXME(JDG): We want to be able to do things here like reserve a - # volume for Nova to do BFV WHILE the volume may be in the process of - # downloading image, we add downloading here; that's easy enough but - # we've got a race inbetween with the attaching/detaching that we do - # locally on the Cinder node. Just come up with an easy way to - # determine if we're attaching to the Cinder host for some work or if - # we're being used by the outside world. - expected = {'multiattach': vref.multiattach, - 'status': (('available', 'in-use', 'downloading') - if vref.multiattach - else ('available', 'downloading'))} - result = vref.conditional_update({'status': 'reserved'}, expected) - - if not result: - # Make sure we're not going to the same instance, in which case - # it could be a live-migrate or similar scenario (LP BUG: 1694530) - override = False - if instance_uuid: - override = True - for attachment in vref.volume_attachment: - if attachment.instance_uuid != instance_uuid: - override = False - break - - if not override: - msg = (_('Volume %(vol_id)s status must be %(statuses)s') % - {'vol_id': vref.id, - 'statuses': utils.build_or_str(expected['status'])}) - raise exception.InvalidVolume(reason=msg) - - values = {'volume_id': vref.id, - 'volume_host': vref.host, - 'attach_status': 'reserved', - 'instance_uuid': instance_uuid} - db_ref = self.db.volume_attach(ctxt.elevated(), values) - return objects.VolumeAttachment.get_by_id(ctxt, db_ref['id']) - - @wrap_check_policy - def attachment_create(self, - ctxt, - volume_ref, - instance_uuid, - connector=None): - """Create an attachment record for the specified volume.""" - connection_info = {} - attachment_ref = self._attachment_reserve(ctxt, - volume_ref, - instance_uuid) - if connector: - connection_info = ( - self.volume_rpcapi.attachment_update(ctxt, - volume_ref, - connector, - attachment_ref.id)) - attachment_ref.connection_info = connection_info - attachment_ref.save() - return attachment_ref - - @wrap_check_policy - def attachment_update(self, ctxt, attachment_ref, connector): - """Update an existing attachment record.""" - # Valid items to update (connector includes mode and mountpoint): - # 1. connector (required) - # a. mode (if None use value from attachment_ref) - # b. mountpoint (if None use value from attachment_ref) - # c. instance_uuid(if None use value from attachment_ref) - - # We fetch the volume object and pass it to the rpc call because we - # need to direct this to the correct host/backend - - volume_ref = objects.Volume.get_by_id(ctxt, attachment_ref.volume_id) - connection_info = ( - self.volume_rpcapi.attachment_update(ctxt, - volume_ref, - connector, - attachment_ref.id)) - attachment_ref.connection_info = connection_info - attachment_ref.save() - return attachment_ref - - @wrap_check_policy - def attachment_delete(self, ctxt, attachment): - volume = objects.Volume.get_by_id(ctxt, attachment.volume_id) - if attachment.attach_status == 'reserved': - self.db.volume_detached(ctxt.elevated(), attachment.volume_id, - attachment.get('id')) - self.db.volume_admin_metadata_delete(ctxt.elevated(), - attachment.volume_id, - 'attached_mode') - volume_utils.notify_about_volume_usage(ctxt, volume, "detach.end") - else: - self.volume_rpcapi.attachment_delete(ctxt, - attachment.id, - volume) - remaining_attachments = AO_LIST.get_all_by_volume_id(ctxt, volume.id) - - # TODO(jdg): Make this check attachments_by_volume_id when we - # implement multi-attach for real - if len(remaining_attachments) < 1: - volume.status = 'available' - volume.attach_status = 'detached' - volume.save() - return remaining_attachments - - -class HostAPI(base.Base): - """Sub-set of the Volume Manager API for managing host operations.""" - def set_host_enabled(self, context, host, enabled): - """Sets the specified host's ability to accept new volumes.""" - raise NotImplementedError() - - def get_host_uptime(self, context, host): - """Returns the result of calling "uptime" on the target host.""" - raise NotImplementedError() - - def host_power_action(self, context, host, action): - raise NotImplementedError() - - def set_host_maintenance(self, context, host, mode): - """Start/Stop host maintenance window. - - On start, it triggers volume evacuation. - """ - raise NotImplementedError() diff --git a/cinder/volume/configuration.py b/cinder/volume/configuration.py deleted file mode 100644 index 7a5710cbc..000000000 --- a/cinder/volume/configuration.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Configuration support for all drivers. - -This module allows support for setting configurations either from default -or from a particular FLAGS group, to be able to set multiple configurations -for a given set of values. - -For instance, two lvm configurations can be set by naming them in groups as - - [lvm1] - volume_group=lvm-group-1 - ... - - [lvm2] - volume_group=lvm-group-2 - ... - -And the configuration group name will be passed in so that all calls to -configuration.volume_group within that instance will be mapped to the proper -named group. - -This class also ensures the implementation's configuration is grafted into the -option group. This is due to the way cfg works. All cfg options must be defined -and registered in the group in which they are used. -""" - - -from oslo_config import cfg - - -CONF = cfg.CONF -SHARED_CONF_GROUP = 'backend_defaults' - - -class DefaultGroupConfiguration(object): - """Get config options from only DEFAULT.""" - - def __init__(self): - # set the local conf so that __call__'s know what to use - self.local_conf = CONF - - def _ensure_config_values(self, volume_opts): - CONF.register_opts(volume_opts, group=None) - - def append_config_values(self, volume_opts): - self._ensure_config_values(volume_opts) - - def safe_get(self, value): - try: - return self.__getattr__(value) - except cfg.NoSuchOptError: - return None - - def __getattr__(self, value): - # Don't use self.local_conf to avoid reentrant call to __getattr__() - local_conf = object.__getattribute__(self, 'local_conf') - return getattr(local_conf, value) - - -class BackendGroupConfiguration(object): - - def __init__(self, volume_opts, config_group=None): - """Initialize configuration. - - This takes care of grafting the implementation's config - values into the config group and shared defaults. We will try to - pull values from the specified 'config_group', but fall back to - defaults from the SHARED_CONF_GROUP. - """ - self.config_group = config_group - - # set the local conf so that __call__'s know what to use - self._ensure_config_values(volume_opts) - self.backend_conf = CONF._get(self.config_group) - self.shared_backend_conf = CONF._get(SHARED_CONF_GROUP) - - def _safe_register(self, opt, group): - try: - CONF.register_opt(opt, group=group) - except cfg.DuplicateOptError: - pass # If it's already registered ignore it - - def _ensure_config_values(self, volume_opts): - """Register the options in the shared group. - - When we go to get a config option we will try the backend specific - group first and fall back to the shared group. We override the default - from all the config options for the backend group so we can know if it - was set or not. - """ - for opt in volume_opts: - self._safe_register(opt, SHARED_CONF_GROUP) - # Assuming they aren't the same groups, graft on the options into - # the backend group and override its default value. - if self.config_group != SHARED_CONF_GROUP: - self._safe_register(opt, self.config_group) - CONF.set_default(opt.name, None, group=self.config_group) - - def append_config_values(self, volume_opts): - self._ensure_config_values(volume_opts) - - def set_default(self, opt_name, default): - CONF.set_default(opt_name, default, group=SHARED_CONF_GROUP) - - def get(self, key, default=None): - return getattr(self, key, default) - - def safe_get(self, value): - try: - return self.__getattr__(value) - except cfg.NoSuchOptError: - return None - - def __getattr__(self, opt_name): - # Don't use self.X to avoid reentrant call to __getattr__() - backend_conf = object.__getattribute__(self, 'backend_conf') - opt_value = getattr(backend_conf, opt_name) - if opt_value is None: - shared_conf = object.__getattribute__(self, 'shared_backend_conf') - opt_value = getattr(shared_conf, opt_name) - return opt_value - - -class Configuration(object): - - def __init__(self, volume_opts, config_group=None): - """Initialize configuration. - - This shim will allow for compatibility with the DEFAULT - style of backend configuration which is used by some of the users - of this configuration helper, or by the volume drivers that have - all been forced over to the config_group style. - """ - self.config_group = config_group - if config_group: - self.conf = BackendGroupConfiguration(volume_opts, config_group) - else: - self.conf = DefaultGroupConfiguration() - - def append_config_values(self, volume_opts): - self.conf.append_config_values(volume_opts) - - def safe_get(self, value): - return self.conf.safe_get(value) - - def __getattr__(self, value): - # Don't use self.conf to avoid reentrant call to __getattr__() - conf = object.__getattribute__(self, 'conf') - return getattr(conf, value) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py deleted file mode 100644 index 23cb520e0..000000000 --- a/cinder/volume/driver.py +++ /dev/null @@ -1,2858 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Drivers for volumes.""" - -import abc -import time - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_config import types -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import objects -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver_utils -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import throttling - -LOG = logging.getLogger(__name__) - - -volume_opts = [ - cfg.IntOpt('num_shell_tries', - default=3, - help='Number of times to attempt to run flakey shell commands'), - cfg.IntOpt('reserved_percentage', - default=0, - min=0, max=100, - help='The percentage of backend capacity is reserved'), - cfg.StrOpt('iscsi_target_prefix', - default='iqn.2010-10.org.openstack:', - help='Prefix for iSCSI volumes'), - cfg.StrOpt('iscsi_ip_address', - default='$my_ip', - help='The IP address that the iSCSI daemon is listening on'), - cfg.ListOpt('iscsi_secondary_ip_addresses', - default=[], - help='The list of secondary IP addresses of the iSCSI daemon'), - cfg.PortOpt('iscsi_port', - default=3260, - help='The port that the iSCSI daemon is listening on'), - cfg.IntOpt('num_volume_device_scan_tries', - default=3, - help='The maximum number of times to rescan targets' - ' to find volume'), - cfg.StrOpt('volume_backend_name', - help='The backend name for a given driver implementation'), - cfg.BoolOpt('use_multipath_for_image_xfer', - default=False, - help='Do we attach/detach volumes in cinder using multipath ' - 'for volume to image and image to volume transfers?'), - cfg.BoolOpt('enforce_multipath_for_image_xfer', - default=False, - help='If this is set to True, attachment of volumes for ' - 'image transfer will be aborted when multipathd is not ' - 'running. Otherwise, it will fallback to single path.'), - cfg.StrOpt('volume_clear', - default='zero', - choices=['none', 'zero'], - help='Method used to wipe old volumes'), - cfg.IntOpt('volume_clear_size', - default=0, - max=1024, - help='Size in MiB to wipe at start of old volumes. 1024 MiB' - 'at max. 0 => all'), - cfg.StrOpt('volume_clear_ionice', - help='The flag to pass to ionice to alter the i/o priority ' - 'of the process used to zero a volume after deletion, ' - 'for example "-c3" for idle only priority.'), - cfg.StrOpt('iscsi_helper', - default='tgtadm', - choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl', - 'ietadm', 'fake'], - help='iSCSI target user-land tool to use. tgtadm is default, ' - 'use lioadm for LIO iSCSI support, scstadmin for SCST ' - 'target support, ietadm for iSCSI Enterprise Target, ' - 'iscsictl for Chelsio iSCSI ' - 'Target or fake for testing.'), - cfg.StrOpt('volumes_dir', - default='$state_path/volumes', - help='Volume configuration file storage ' - 'directory'), - cfg.StrOpt('iet_conf', - default='/etc/iet/ietd.conf', - help='IET configuration file'), - cfg.StrOpt('chiscsi_conf', - default='/etc/chelsio-iscsi/chiscsi.conf', - help='Chiscsi (CXT) global defaults configuration file'), - cfg.StrOpt('iscsi_iotype', - default='fileio', - choices=['blockio', 'fileio', 'auto'], - help=('Sets the behavior of the iSCSI target ' - 'to either perform blockio or fileio ' - 'optionally, auto can be set and Cinder ' - 'will autodetect type of backing device')), - cfg.StrOpt('volume_dd_blocksize', - default='1M', - help='The default block size used when copying/clearing ' - 'volumes'), - cfg.StrOpt('volume_copy_blkio_cgroup_name', - default='cinder-volume-copy', - help='The blkio cgroup name to be used to limit bandwidth ' - 'of volume copy'), - cfg.IntOpt('volume_copy_bps_limit', - default=0, - help='The upper limit of bandwidth of volume copy. ' - '0 => unlimited'), - cfg.StrOpt('iscsi_write_cache', - default='on', - choices=['on', 'off'], - help='Sets the behavior of the iSCSI target to either ' - 'perform write-back(on) or write-through(off). ' - 'This parameter is valid if iscsi_helper is set ' - 'to tgtadm.'), - cfg.StrOpt('iscsi_target_flags', - default='', - help='Sets the target-specific flags for the iSCSI target. ' - 'Only used for tgtadm to specify backing device flags ' - 'using bsoflags option. The specified string is passed ' - 'as is to the underlying tool.'), - cfg.StrOpt('iscsi_protocol', - default='iscsi', - choices=['iscsi', 'iser'], - help='Determines the iSCSI protocol for new iSCSI volumes, ' - 'created with tgtadm or lioadm target helpers. In ' - 'order to enable RDMA, this parameter should be set ' - 'with the value "iser". The supported iSCSI protocol ' - 'values are "iscsi" and "iser".'), - cfg.StrOpt('driver_client_cert_key', - help='The path to the client certificate key for verification, ' - 'if the driver supports it.'), - cfg.StrOpt('driver_client_cert', - help='The path to the client certificate for verification, ' - 'if the driver supports it.'), - cfg.BoolOpt('driver_use_ssl', - default=False, - help='Tell driver to use SSL for connection to backend ' - 'storage if the driver supports it.'), - cfg.FloatOpt('max_over_subscription_ratio', - default=20.0, - help='Float representation of the over subscription ratio ' - 'when thin provisioning is involved. Default ratio is ' - '20.0, meaning provisioned capacity can be 20 times of ' - 'the total physical capacity. If the ratio is 10.5, it ' - 'means provisioned capacity can be 10.5 times of the ' - 'total physical capacity. A ratio of 1.0 means ' - 'provisioned capacity cannot exceed the total physical ' - 'capacity. The ratio has to be a minimum of 1.0.'), - cfg.StrOpt('scst_target_iqn_name', - help='Certain ISCSI targets have predefined target names, ' - 'SCST target driver uses this name.'), - cfg.StrOpt('scst_target_driver', - default='iscsi', - help='SCST target implementation can choose from multiple ' - 'SCST target drivers.'), - cfg.BoolOpt('use_chap_auth', - default=False, - help='Option to enable/disable CHAP authentication for ' - 'targets.'), - cfg.StrOpt('chap_username', - default='', - help='CHAP user name.'), - cfg.StrOpt('chap_password', - default='', - help='Password for specified CHAP account name.', - secret=True), - cfg.StrOpt('driver_data_namespace', - help='Namespace for driver private data values to be ' - 'saved in.'), - cfg.StrOpt('filter_function', - help='String representation for an equation that will be ' - 'used to filter hosts. Only used when the driver ' - 'filter is set to be used by the Cinder scheduler.'), - cfg.StrOpt('goodness_function', - help='String representation for an equation that will be ' - 'used to determine the goodness of a host. Only used ' - 'when using the goodness weigher is set to be used by ' - 'the Cinder scheduler.'), - cfg.BoolOpt('driver_ssl_cert_verify', - default=False, - help='If set to True the http client will validate the SSL ' - 'certificate of the backend endpoint.'), - cfg.StrOpt('driver_ssl_cert_path', - help='Can be used to specify a non default path to a ' - 'CA_BUNDLE file or directory with certificates of ' - 'trusted CAs, which will be used to validate the backend'), - cfg.ListOpt('trace_flags', - help='List of options that control which trace info ' - 'is written to the DEBUG log level to assist ' - 'developers. Valid values are method and api.'), - cfg.MultiOpt('replication_device', - item_type=types.Dict(), - secret=True, - help="Multi opt of dictionaries to represent a replication " - "target device. This option may be specified multiple " - "times in a single config section to specify multiple " - "replication target devices. Each entry takes the " - "standard dict config form: replication_device = " - "target_device_id:," - "key1:value1,key2:value2..."), - cfg.BoolOpt('image_upload_use_cinder_backend', - default=False, - help='If set to True, upload-to-image in raw format will ' - 'create a cloned volume and register its location to ' - 'the image service, instead of uploading the volume ' - 'content. The cinder backend and locations support ' - 'must be enabled in the image service, and ' - 'glance_api_version must be set to 2.'), - cfg.BoolOpt('image_upload_use_internal_tenant', - default=False, - help='If set to True, the image volume created by ' - 'upload-to-image will be placed in the internal tenant. ' - 'Otherwise, the image volume is created in the current ' - 'context\'s tenant.'), - cfg.BoolOpt('image_volume_cache_enabled', - default=False, - help='Enable the image volume cache for this backend.'), - cfg.IntOpt('image_volume_cache_max_size_gb', - default=0, - help='Max size of the image volume cache for this backend in ' - 'GB. 0 => unlimited.'), - cfg.IntOpt('image_volume_cache_max_count', - default=0, - help='Max number of entries allowed in the image volume cache. ' - '0 => unlimited.'), - cfg.BoolOpt('report_discard_supported', - default=False, - help='Report to clients of Cinder that the backend supports ' - 'discard (aka. trim/unmap). This will not actually ' - 'change the behavior of the backend or the client ' - 'directly, it will only notify that it can be used.'), - cfg.StrOpt('storage_protocol', - ignore_case=True, - default='iscsi', - choices=['iscsi', 'fc'], - help='Protocol for transferring data between host and ' - 'storage back-end.'), - cfg.BoolOpt('backup_use_temp_snapshot', - default=False, - help='If this is set to True, the backup_use_temp_snapshot ' - 'path will be used during the backup. Otherwise, it ' - 'will use backup_use_temp_volume path.'), - cfg.BoolOpt('enable_unsupported_driver', - default=False, - help="Set this to True when you want to allow an unsupported " - "driver to start. Drivers that haven't maintained a " - "working CI system and testing are marked as unsupported " - "until CI is working again. This also marks a driver as " - "deprecated and may be removed in the next release."), - cfg.StrOpt('backend_availability_zone', - default=None, - help='Availability zone for this volume backend. If not set, ' - 'the storage_availability_zone option value is used as ' - 'the default for all backends.'), -] - -# for backward compatibility -iser_opts = [ - cfg.IntOpt('num_iser_scan_tries', - default=3, - help='The maximum number of times to rescan iSER target' - 'to find volume'), - cfg.StrOpt('iser_target_prefix', - default='iqn.2010-10.org.openstack:', - help='Prefix for iSER volumes'), - cfg.StrOpt('iser_ip_address', - default='$my_ip', - help='The IP address that the iSER daemon is listening on'), - cfg.PortOpt('iser_port', - default=3260, - help='The port that the iSER daemon is listening on'), - cfg.StrOpt('iser_helper', - default='tgtadm', - help='The name of the iSER target user-land tool to use'), -] - - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(volume_opts) -CONF.register_opts(iser_opts) -CONF.import_opt('backup_use_same_host', 'cinder.backup.api') - - -@six.add_metaclass(abc.ABCMeta) -class BaseVD(object): - """Executes commands relating to Volumes. - - Base Driver for Cinder Volume Control Path, - This includes supported/required implementation - for API calls. Also provides *generic* implementation - of core features like cloning, copy_image_to_volume etc, - this way drivers that inherit from this base class and - don't offer their own impl can fall back on a general - solution here. - - Key thing to keep in mind with this driver is that it's - intended that these drivers ONLY implement Control Path - details (create, delete, extend...), while transport or - data path related implementation should be a *member object* - that we call a connector. The point here is that for example - don't allow the LVM driver to implement iSCSI methods, instead - call whatever connector it has configured via conf file - (iSCSI{LIO, TGT, IET}, FC, etc). - - In the base class and for example the LVM driver we do this via a has-a - relationship and just provide an interface to the specific connector - methods. How you do this in your own driver is of course up to you. - """ - VERSION = "N/A" - - # NOTE(geguileo): By default we assume drivers don't support Active-Active - # configurations. If driver supports it then they can set this class - # attribute on the driver, and if support depends on configuration options - # then they can set it at the instance level on the driver's __init__ - # method since the manager will do the check after that. - SUPPORTS_ACTIVE_ACTIVE = False - - # If a driver hasn't maintained their CI system, this will get - # set to False, which prevents the driver from starting. - # Add enable_unsupported_driver = True in cinder.conf to get - # the unsupported driver started. - SUPPORTED = True - - # Methods checked to detect a driver implements a replication feature - REPLICATION_FEATURE_CHECKERS = {'v2.1': 'failover_host', - 'a/a': 'failover_completed'} - - def __init__(self, execute=utils.execute, *args, **kwargs): - # NOTE(vish): db is set by Manager - self.db = kwargs.get('db') - self.host = kwargs.get('host') - self.cluster_name = kwargs.get('cluster_name') - self.configuration = kwargs.get('configuration', None) - - if self.configuration: - self.configuration.append_config_values(volume_opts) - self.configuration.append_config_values(iser_opts) - utils.setup_tracing(self.configuration.safe_get('trace_flags')) - - # NOTE(geguileo): Don't allow to start if we are enabling - # replication on a cluster service with a backend that doesn't - # support the required mechanism for Active-Active. - replication_devices = self.configuration.safe_get( - 'replication_device') - if (self.cluster_name and replication_devices and - not self.supports_replication_feature('a/a')): - raise exception.Invalid(_("Driver doesn't support clustered " - "replication.")) - - self.driver_utils = driver_utils.VolumeDriverUtils( - self._driver_data_namespace(), self.db) - - self._execute = execute - self._stats = {} - self._throttle = None - - self.pools = [] - self.capabilities = {} - - # We set these mappings up in the base driver so they - # can be used by children - # (intended for LVM and BlockDevice, but others could use as well) - self.target_mapping = { - 'fake': 'cinder.volume.targets.fake.FakeTarget', - 'ietadm': 'cinder.volume.targets.iet.IetAdm', - 'lioadm': 'cinder.volume.targets.lio.LioAdm', - 'tgtadm': 'cinder.volume.targets.tgt.TgtAdm', - 'scstadmin': 'cinder.volume.targets.scst.SCSTAdm', - 'iscsictl': 'cinder.volume.targets.cxt.CxtAdm'} - - # set True by manager after successful check_for_setup - self._initialized = False - - def _driver_data_namespace(self): - namespace = self.__class__.__name__ - if self.configuration: - namespace = self.configuration.safe_get('driver_data_namespace') - if not namespace: - namespace = self.configuration.safe_get('volume_backend_name') - return namespace - - def _is_non_recoverable(self, err, non_recoverable_list): - for item in non_recoverable_list: - if item in err: - return True - - return False - - def _try_execute(self, *command, **kwargs): - # NOTE(vish): Volume commands can partially fail due to timing, but - # running them a second time on failure will usually - # recover nicely. - - non_recoverable = kwargs.pop('no_retry_list', []) - - tries = 0 - while True: - try: - self._execute(*command, **kwargs) - return True - except processutils.ProcessExecutionError as ex: - tries = tries + 1 - - if tries >= self.configuration.num_shell_tries or\ - self._is_non_recoverable(ex.stderr, non_recoverable): - raise - - LOG.exception("Recovering from a failed execute. " - "Try number %s", tries) - time.sleep(tries ** 2) - - def _detach_volume(self, context, attach_info, volume, properties, - force=False, remote=False): - """Disconnect the volume from the host.""" - # Use Brick's code to do attach/detach - if attach_info: - connector = attach_info['connector'] - connector.disconnect_volume(attach_info['conn']['data'], - attach_info['device']) - if remote: - # Call remote manager's terminate_connection which includes - # driver's terminate_connection and remove export - rpcapi = volume_rpcapi.VolumeAPI() - rpcapi.terminate_connection(context, volume, properties, - force=force) - else: - # Call local driver's terminate_connection and remove export. - # NOTE(avishay) This is copied from the manager's code - need to - # clean this up in the future. - try: - self.terminate_connection(volume, properties, force=force) - except Exception as err: - err_msg = (_('Unable to terminate volume connection: %(err)s') - % {'err': six.text_type(err)}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - try: - LOG.debug("volume %s: removing export", volume['id']) - self.remove_export(context, volume) - except Exception as ex: - LOG.exception("Error detaching volume %(volume)s, " - "due to remove export failure.", - {"volume": volume['id']}) - raise exception.RemoveExportException(volume=volume['id'], - reason=ex) - - def set_initialized(self): - self._initialized = True - - @property - def initialized(self): - return self._initialized - - @property - def supported(self): - return self.SUPPORTED - - def set_throttle(self): - bps_limit = ((self.configuration and - self.configuration.safe_get('volume_copy_bps_limit')) or - CONF.volume_copy_bps_limit) - cgroup_name = ((self.configuration and - self.configuration.safe_get( - 'volume_copy_blkio_cgroup_name')) or - CONF.volume_copy_blkio_cgroup_name) - self._throttle = None - if bps_limit: - try: - self._throttle = throttling.BlkioCgroup(int(bps_limit), - cgroup_name) - except processutils.ProcessExecutionError as err: - LOG.warning('Failed to activate volume copy throttling: ' - '%(err)s', {'err': err}) - throttling.Throttle.set_default(self._throttle) - - def get_version(self): - """Get the current version of this driver.""" - return self.VERSION - - @abc.abstractmethod - def check_for_setup_error(self): - return - - @abc.abstractmethod - def create_volume(self, volume): - """Creates a volume. - - Can optionally return a Dictionary of changes to the volume object to - be persisted. - - If volume_type extra specs includes - 'capabilities:replication True' the driver - needs to create a volume replica (secondary), and setup replication - between the newly created volume and the secondary volume. - Returned dictionary should include: - - .. code-block:: python - - volume['replication_status'] = 'copying' - volume['replication_extended_status'] = - volume['driver_data'] = - - """ - return - - @abc.abstractmethod - def delete_volume(self, volume): - """Deletes a volume. - - If volume_type extra specs includes 'replication: True' - then the driver needs to delete the volume replica too. - """ - return - - def secure_file_operations_enabled(self): - """Determine if driver is running in Secure File Operations mode. - - The Cinder Volume driver needs to query if this driver is running - in a secure file operations mode. By default, it is False: any driver - that does support secure file operations should override this method. - """ - return False - - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service. - - If 'refresh' is True, run the update first. - - For replication the following state should be reported: - replication = True (None or false disables replication) - """ - return - - def get_prefixed_property(self, property): - """Return prefixed property name - - :returns: a prefixed property name string or None - """ - - if property and self.capabilities.get('vendor_prefix'): - return self.capabilities.get('vendor_prefix') + ':' + property - - def _set_property(self, properties, entry, title, description, - type, **kwargs): - prop = dict(title=title, description=description, type=type) - allowed_keys = ('enum', 'default', 'minimum', 'maximum') - for key in kwargs: - if key in allowed_keys: - prop[key] = kwargs[key] - properties[entry] = prop - - def _init_standard_capabilities(self): - """Create a dictionary of Cinder standard capabilities. - - This method creates a dictionary of Cinder standard capabilities - and returns the created dictionary. - The keys of this dictionary don't contain prefix and separator(:). - """ - - properties = {} - self._set_property( - properties, - "thin_provisioning", - "Thin Provisioning", - _("Sets thin provisioning."), - "boolean") - - self._set_property( - properties, - "compression", - "Compression", - _("Enables compression."), - "boolean") - - self._set_property( - properties, - "qos", - "QoS", - _("Enables QoS."), - "boolean") - - self._set_property( - properties, - "replication_enabled", - "Replication", - _("Enables replication."), - "boolean") - - return properties - - def _init_vendor_properties(self): - """Create a dictionary of vendor unique properties. - - This method creates a dictionary of vendor unique properties - and returns both created dictionary and vendor name. - Returned vendor name is used to check for name of vendor - unique properties. - - - Vendor name shouldn't include colon(:) because of the separator - and it is automatically replaced by underscore(_). - ex. abc:d -> abc_d - - Vendor prefix is equal to vendor name. - ex. abcd - - Vendor unique properties must start with vendor prefix + ':'. - ex. abcd:maxIOPS - - Each backend driver needs to override this method to expose - its own properties using _set_property() like this: - - self._set_property( - properties, - "vendorPrefix:specific_property", - "Title of property", - _("Description of property"), - "type") - - : return dictionary of vendor unique properties - : return vendor name - - Example of implementation:: - - properties = {} - self._set_property( - properties, - "abcd:compression_type", - "Compression type", - _("Specifies compression type."), - "string", - enum=["lossy", "lossless", "special"]) - - self._set_property( - properties, - "abcd:minIOPS", - "Minimum IOPS QoS", - _("Sets minimum IOPS if QoS is enabled."), - "integer", - minimum=10, - default=100) - - return properties, 'abcd' - """ - - return {}, None - - def init_capabilities(self): - """Obtain backend volume stats and capabilities list. - - This stores a dictionary which is consisted of two parts. - First part includes static backend capabilities which are - obtained by get_volume_stats(). Second part is properties, - which includes parameters correspond to extra specs. - This properties part is consisted of cinder standard - capabilities and vendor unique properties. - - Using this capabilities list, operator can manage/configure - backend using key/value from capabilities without specific - knowledge of backend. - """ - - # Set static backend capabilities from get_volume_stats() - stats = self.get_volume_stats(True) - if stats: - self.capabilities = stats.copy() - - # Set cinder standard capabilities - self.capabilities['properties'] = self._init_standard_capabilities() - - # Set Vendor unique properties - vendor_prop, vendor_name = self._init_vendor_properties() - if vendor_name and vendor_prop: - updated_vendor_prop = {} - old_name = None - # Replace colon in vendor name to underscore. - if ':' in vendor_name: - old_name = vendor_name - vendor_name = vendor_name.replace(':', '_') - LOG.warning('The colon in vendor name was replaced ' - 'by underscore. Updated vendor name is ' - '%(name)s".', {'name': vendor_name}) - - for key in vendor_prop: - # If key has colon in vendor name field, we replace it to - # underscore. - # ex. abc:d:storagetype:provisioning - # -> abc_d:storagetype:provisioning - if old_name and key.startswith(old_name + ':'): - new_key = key.replace(old_name, vendor_name, 1) - updated_vendor_prop[new_key] = vendor_prop[key] - continue - if not key.startswith(vendor_name + ':'): - LOG.warning('Vendor unique property "%(property)s" ' - 'must start with vendor prefix with colon ' - '"%(prefix)s". The property was ' - 'not registered on capabilities list.', - {'prefix': vendor_name + ':', - 'property': key}) - continue - updated_vendor_prop[key] = vendor_prop[key] - - # Update vendor unique properties to the dictionary - self.capabilities['vendor_prefix'] = vendor_name - self.capabilities['properties'].update(updated_vendor_prop) - - LOG.debug("Initialized capabilities list: %s.", self.capabilities) - - def _update_pools_and_stats(self, data): - """Updates data for pools and volume stats based on provided data.""" - # provisioned_capacity_gb is set to None by default below, but - # None won't be used in calculation. It will be overridden by - # driver's provisioned_capacity_gb if reported, otherwise it - # defaults to allocated_capacity_gb in host_manager.py. - if self.pools: - for pool in self.pools: - new_pool = {} - new_pool.update(dict( - pool_name=pool, - total_capacity_gb=0, - free_capacity_gb=0, - provisioned_capacity_gb=None, - reserved_percentage=100, - QoS_support=False, - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function() - )) - data["pools"].append(new_pool) - else: - # No pool configured, the whole backend will be treated as a pool - single_pool = {} - single_pool.update(dict( - pool_name=data["volume_backend_name"], - total_capacity_gb=0, - free_capacity_gb=0, - provisioned_capacity_gb=None, - reserved_percentage=100, - QoS_support=False, - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function() - )) - data["pools"].append(single_pool) - self._stats = data - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch image from image_service and write to unencrypted volume. - - This does not attach an encryptor layer when connecting to the volume. - """ - self._copy_image_data_to_volume( - context, volume, image_service, image_id, encrypted=False) - - def copy_image_to_encrypted_volume( - self, context, volume, image_service, image_id): - """Fetch image from image_service and write to encrypted volume. - - This attaches the encryptor layer when connecting to the volume. - """ - self._copy_image_data_to_volume( - context, volume, image_service, image_id, encrypted=True) - - def _copy_image_data_to_volume(self, context, volume, image_service, - image_id, encrypted=False): - """Fetch the image from image_service and write it to the volume.""" - LOG.debug('copy_image_to_volume %s.', volume['name']) - - use_multipath = self.configuration.use_multipath_for_image_xfer - enforce_multipath = self.configuration.enforce_multipath_for_image_xfer - properties = utils.brick_get_connector_properties(use_multipath, - enforce_multipath) - attach_info, volume = self._attach_volume(context, volume, properties) - try: - if encrypted: - encryption = self.db.volume_encryption_metadata_get(context, - volume.id) - utils.brick_attach_volume_encryptor(context, - attach_info, - encryption) - try: - image_utils.fetch_to_raw( - context, - image_service, - image_id, - attach_info['device']['path'], - self.configuration.volume_dd_blocksize, - size=volume['size']) - except exception.ImageTooBig: - with excutils.save_and_reraise_exception(): - LOG.exception("Copying image %(image_id)s " - "to volume failed due to " - "insufficient available space.", - {'image_id': image_id}) - - finally: - if encrypted: - utils.brick_detach_volume_encryptor(attach_info, - encryption) - finally: - self._detach_volume(context, attach_info, volume, properties) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - LOG.debug('copy_volume_to_image %s.', volume['name']) - - use_multipath = self.configuration.use_multipath_for_image_xfer - enforce_multipath = self.configuration.enforce_multipath_for_image_xfer - properties = utils.brick_get_connector_properties(use_multipath, - enforce_multipath) - attach_info, volume = self._attach_volume(context, volume, properties) - - try: - image_utils.upload_volume(context, - image_service, - image_meta, - attach_info['device']['path']) - finally: - self._detach_volume(context, attach_info, volume, properties) - - def before_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions before copyvolume data. - - This method will be called before _copy_volume_data during volume - migration - """ - pass - - def after_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions after copyvolume data. - - This method will be called after _copy_volume_data during volume - migration - """ - pass - - def get_filter_function(self): - """Get filter_function string. - - Returns either the string from the driver instance or global section - in cinder.conf. If nothing is specified in cinder.conf, then try to - find the default filter_function. When None is returned the scheduler - will always pass the driver instance. - - :returns: a filter_function string or None - """ - ret_function = self.configuration.filter_function - if not ret_function: - ret_function = CONF.filter_function - if not ret_function: - ret_function = self.get_default_filter_function() - return ret_function - - def get_goodness_function(self): - """Get good_function string. - - Returns either the string from the driver instance or global section - in cinder.conf. If nothing is specified in cinder.conf, then try to - find the default goodness_function. When None is returned the scheduler - will give the lowest score to the driver instance. - - :returns: a goodness_function string or None - """ - ret_function = self.configuration.goodness_function - if not ret_function: - ret_function = CONF.goodness_function - if not ret_function: - ret_function = self.get_default_goodness_function() - return ret_function - - def get_default_filter_function(self): - """Get the default filter_function string. - - Each driver could overwrite the method to return a well-known - default string if it is available. - - :returns: None - """ - return None - - def get_default_goodness_function(self): - """Get the default goodness_function string. - - Each driver could overwrite the method to return a well-known - default string if it is available. - - :returns: None - """ - return None - - def _attach_volume(self, context, volume, properties, remote=False): - """Attach the volume.""" - if remote: - # Call remote manager's initialize_connection which includes - # driver's create_export and initialize_connection - rpcapi = volume_rpcapi.VolumeAPI() - try: - conn = rpcapi.initialize_connection(context, volume, - properties) - except Exception: - with excutils.save_and_reraise_exception(): - # It is possible that initialize_connection fails due to - # timeout. In fact, the volume is already attached after - # the timeout error is raised, so the connection worths - # a try of terminating. - try: - rpcapi.terminate_connection(context, volume, - properties, force=True) - except Exception: - LOG.warning("Failed terminating the connection " - "of volume %(volume_id)s, but it is " - "acceptable.", - {'volume_id': volume['id']}) - else: - # Call local driver's create_export and initialize_connection. - # NOTE(avishay) This is copied from the manager's code - need to - # clean this up in the future. - model_update = None - try: - LOG.debug("Volume %s: creating export", volume['id']) - model_update = self.create_export(context, volume, properties) - if model_update: - volume.update(model_update) - volume.save() - except exception.CinderException as ex: - if model_update: - LOG.exception("Failed updating model of volume " - "%(volume_id)s with driver provided " - "model %(model)s", - {'volume_id': volume['id'], - 'model': model_update}) - raise exception.ExportFailure(reason=ex) - - try: - conn = self.initialize_connection(volume, properties) - except Exception as err: - try: - err_msg = (_('Unable to fetch connection information from ' - 'backend: %(err)s') % - {'err': six.text_type(err)}) - LOG.error(err_msg) - LOG.debug("Cleaning up failed connect initialization.") - self.remove_export(context, volume) - except Exception as ex: - ex_msg = (_('Error encountered during cleanup ' - 'of a failed attach: %(ex)s') % - {'ex': six.text_type(ex)}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=ex_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - try: - attach_info = self._connect_device(conn) - except Exception as exc: - # We may have reached a point where we have attached the volume, - # so we have to detach it (do the cleanup). - attach_info = getattr(exc, 'kwargs', {}).get('attach_info', None) - - try: - LOG.debug('Device for volume %s is unavailable but did ' - 'attach, detaching it.', volume['id']) - self._detach_volume(context, attach_info, volume, - properties, force=True, - remote=remote) - except Exception: - LOG.exception('Error detaching volume %s', - volume['id']) - raise - - return (attach_info, volume) - - def _attach_snapshot(self, ctxt, snapshot, properties): - """Attach the snapshot.""" - model_update = None - try: - LOG.debug("Snapshot %s: creating export.", snapshot.id) - model_update = self.create_export_snapshot(ctxt, snapshot, - properties) - if model_update: - snapshot.provider_location = model_update.get( - 'provider_location', None) - snapshot.provider_auth = model_update.get( - 'provider_auth', None) - snapshot.save() - except exception.CinderException as ex: - if model_update: - LOG.exception("Failed updating model of snapshot " - "%(snapshot_id)s with driver provided " - "model %(model)s.", - {'snapshot_id': snapshot.id, - 'model': model_update}) - raise exception.ExportFailure(reason=ex) - - try: - conn = self.initialize_connection_snapshot( - snapshot, properties) - except Exception as err: - try: - err_msg = (_('Unable to fetch connection information from ' - 'backend: %(err)s') % - {'err': six.text_type(err)}) - LOG.error(err_msg) - LOG.debug("Cleaning up failed connect initialization.") - self.remove_export_snapshot(ctxt, snapshot) - except Exception as ex: - ex_msg = (_('Error encountered during cleanup ' - 'of a failed attach: %(ex)s') % - {'ex': six.text_type(ex)}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=ex_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - return conn - - def _connect_device(self, conn): - # Use Brick's code to do attach/detach - use_multipath = self.configuration.use_multipath_for_image_xfer - device_scan_attempts = self.configuration.num_volume_device_scan_tries - protocol = conn['driver_volume_type'] - connector = utils.brick_get_connector( - protocol, - use_multipath=use_multipath, - device_scan_attempts=device_scan_attempts, - conn=conn) - device = connector.connect_volume(conn['data']) - host_device = device['path'] - - attach_info = {'conn': conn, 'device': device, 'connector': connector} - - unavailable = True - try: - # Secure network file systems will NOT run as root. - root_access = not self.secure_file_operations_enabled() - unavailable = not connector.check_valid_device(host_device, - root_access) - except Exception: - LOG.exception('Could not validate device %s', host_device) - - if unavailable: - raise exception.DeviceUnavailable(path=host_device, - attach_info=attach_info, - reason=(_("Unable to access " - "the backend storage " - "via the path " - "%(path)s.") % - {'path': host_device})) - return attach_info - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - return None, False - - def backup_use_temp_snapshot(self): - return False - - def snapshot_remote_attachable(self): - # TODO(lixiaoy1): the method will be deleted later when remote - # attach snapshot is implemented. - return False - - def get_backup_device(self, context, backup): - """Get a backup device from an existing volume. - - The function returns a volume or snapshot to backup service, - and then backup service attaches the device and does backup. - """ - backup_device = None - is_snapshot = False - if self.backup_use_temp_snapshot(): - (backup_device, is_snapshot) = ( - self._get_backup_volume_temp_snapshot(context, backup)) - else: - backup_device = self._get_backup_volume_temp_volume( - context, backup) - is_snapshot = False - return (backup_device, is_snapshot) - - def _get_backup_volume_temp_volume(self, context, backup): - """Return a volume to do backup. - - To backup a snapshot, create a temp volume from the snapshot and - back it up. - - Otherwise to backup an in-use volume, create a temp volume and - back it up. - """ - volume = objects.Volume.get_by_id(context, backup.volume_id) - snapshot = None - if backup.snapshot_id: - snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) - - LOG.debug('Creating a new backup for volume %s.', volume['name']) - - temp_vol_ref = None - device_to_backup = volume - - # NOTE(xyang): If it is to backup from snapshot, create a temp - # volume from the source snapshot, backup the temp volume, and - # then clean up the temp volume. - if snapshot: - temp_vol_ref = self._create_temp_volume_from_snapshot( - context, volume, snapshot) - backup.temp_volume_id = temp_vol_ref.id - backup.save() - device_to_backup = temp_vol_ref - - else: - # NOTE(xyang): Check volume status if it is not to backup from - # snapshot; if 'in-use', create a temp volume from the source - # volume, backup the temp volume, and then clean up the temp - # volume; if 'available', just backup the volume. - previous_status = volume.get('previous_status') - if previous_status == "in-use": - temp_vol_ref = self._create_temp_cloned_volume( - context, volume) - backup.temp_volume_id = temp_vol_ref.id - backup.save() - device_to_backup = temp_vol_ref - - return device_to_backup - - def _get_backup_volume_temp_snapshot(self, context, backup): - """Return a device to backup. - - If it is to backup from snapshot, back it up directly. - - Otherwise for in-use volume, create a temp snapshot and back it up. - """ - volume = objects.Volume.get_by_id(context, backup.volume_id) - snapshot = None - if backup.snapshot_id: - snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) - - LOG.debug('Creating a new backup for volume %s.', volume['name']) - - device_to_backup = volume - is_snapshot = False - temp_snapshot = None - - # NOTE(xyang): If it is to backup from snapshot, back it up - # directly. No need to clean it up. - if snapshot: - device_to_backup = snapshot - is_snapshot = True - - else: - # NOTE(xyang): If it is not to backup from snapshot, check volume - # status. If the volume status is 'in-use', create a temp snapshot - # from the source volume, backup the temp snapshot, and then clean - # up the temp snapshot; if the volume status is 'available', just - # backup the volume. - previous_status = volume.get('previous_status') - if previous_status == "in-use": - temp_snapshot = self._create_temp_snapshot(context, volume) - backup.temp_snapshot_id = temp_snapshot.id - backup.save() - device_to_backup = temp_snapshot - is_snapshot = True - - return (device_to_backup, is_snapshot) - - def _create_temp_snapshot(self, context, volume): - kwargs = { - 'volume_id': volume['id'], - 'cgsnapshot_id': None, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': volume['size'], - 'display_name': 'backup-snap-%s' % volume['id'], - 'display_description': None, - 'volume_type_id': volume['volume_type_id'], - 'encryption_key_id': volume['encryption_key_id'], - 'metadata': {}, - } - temp_snap_ref = objects.Snapshot(context=context, **kwargs) - temp_snap_ref.create() - try: - model_update = self.create_snapshot(temp_snap_ref) - if model_update: - temp_snap_ref.update(model_update) - except Exception: - with excutils.save_and_reraise_exception(): - with temp_snap_ref.obj_as_admin(): - self.db.volume_glance_metadata_delete_by_snapshot( - context, temp_snap_ref.id) - temp_snap_ref.destroy() - - temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE - temp_snap_ref.save() - return temp_snap_ref - - def _create_temp_volume(self, context, volume, volume_options=None): - kwargs = { - 'size': volume.size, - 'display_name': 'backup-vol-%s' % volume.id, - 'host': volume.host, - 'cluster_name': volume.cluster_name, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'creating', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'availability_zone': volume.availability_zone, - 'volume_type_id': volume.volume_type_id, - } - kwargs.update(volume_options or {}) - temp_vol_ref = objects.Volume(context=context, **kwargs) - temp_vol_ref.create() - return temp_vol_ref - - def _create_temp_cloned_volume(self, context, volume): - temp_vol_ref = self._create_temp_volume(context, volume) - try: - model_update = self.create_cloned_volume(temp_vol_ref, volume) - if model_update: - temp_vol_ref.update(model_update) - except Exception: - with excutils.save_and_reraise_exception(): - temp_vol_ref.destroy() - - temp_vol_ref.status = 'available' - temp_vol_ref.save() - return temp_vol_ref - - def _create_temp_volume_from_snapshot(self, context, volume, snapshot, - volume_options=None): - temp_vol_ref = self._create_temp_volume(context, volume, - volume_options=volume_options) - try: - model_update = self.create_volume_from_snapshot(temp_vol_ref, - snapshot) - if model_update: - temp_vol_ref.update(model_update) - except Exception: - with excutils.save_and_reraise_exception(): - temp_vol_ref.destroy() - - temp_vol_ref.status = 'available' - temp_vol_ref.save() - return temp_vol_ref - - def clear_download(self, context, volume): - """Clean up after an interrupted image copy.""" - pass - - def attach_volume(self, context, volume, instance_uuid, host_name, - mountpoint): - """Callback for volume attached to instance or host.""" - pass - - def detach_volume(self, context, volume, attachment=None): - """Callback for volume detached.""" - pass - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - pass - - def validate_connector(self, connector): - """Fail if connector doesn't contain all the data needed by driver.""" - pass - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - Each driver implementing this method needs to be responsible for the - values of _name_id and provider_location. If None is returned or either - key is not set, it means the volume table does not need to change the - value(s) for the key(s). - The return format is {"_name_id": value, "provider_location": value}. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - msg = _("The method update_migrated_volume is not implemented.") - raise NotImplementedError(msg) - - @staticmethod - def validate_connector_has_setting(connector, setting): - pass - - def retype(self, context, volume, new_type, diff, host): - return False, None - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - If volume_type extra specs includes 'replication: True' the - driver needs to create a volume replica (secondary) - and setup replication between the newly created volume - and the secondary volume. - """ - raise NotImplementedError() - - # ####### Interface methods for DataPath (Connector) ######## - @abc.abstractmethod - def ensure_export(self, context, volume): - """Synchronously recreates an export for a volume.""" - return - - @abc.abstractmethod - def create_export(self, context, volume, connector): - """Exports the volume. - - Can optionally return a Dictionary of changes - to the volume object to be persisted. - """ - return - - def create_export_snapshot(self, context, snapshot, connector): - """Exports the snapshot. - - Can optionally return a Dictionary of changes - to the snapshot object to be persisted. - """ - return - - @abc.abstractmethod - def remove_export(self, context, volume): - """Removes an export for a volume.""" - return - - def remove_export_snapshot(self, context, snapshot): - """Removes an export for a snapshot.""" - return - - @abc.abstractmethod - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: The volume to be attached - :param connector: Dictionary containing information about what is being - connected to. - :returns conn_info: A dictionary of connection information. - """ - return - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Allow connection to connector and return connection info. - - :param snapshot: The snapshot to be attached - :param connector: Dictionary containing information about what - is being connected to. - :returns conn_info: A dictionary of connection information. This - can optionally include a "initiator_updates" - field. - - The "initiator_updates" field must be a dictionary containing a - "set_values" and/or "remove_values" field. The "set_values" field must - be a dictionary of key-value pairs to be set/updated in the db. The - "remove_values" field must be a list of keys, previously set with - "set_values", that will be deleted from the db. - """ - return - - @abc.abstractmethod - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector. - - :param volume: The volume to be disconnected. - :param connector: A dictionary describing the connection with details - about the initiator. Can be None. - """ - return - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Disallow connection from connector.""" - return - - def get_pool(self, volume): - """Return pool name where volume reside on. - - :param volume: The volume hosted by the driver. - :returns: name of the pool where given volume is in. - """ - return None - - def update_provider_info(self, volumes, snapshots): - """Get provider info updates from driver. - - :param volumes: List of Cinder volumes to check for updates - :param snapshots: List of Cinder snapshots to check for updates - :returns: tuple (volume_updates, snapshot_updates) - - where volume updates {'id': uuid, provider_id: } - and snapshot updates {'id': uuid, provider_id: } - """ - return None, None - - def migrate_volume(self, context, volume, host): - """Migrate volume stub. - - This is for drivers that don't implement an enhanced version - of this operation. - """ - return (False, None) - - def manage_existing(self, volume, existing_ref): - """Manage exiting stub. - - This is for drivers that don't implement manage_existing(). - """ - msg = _("Manage existing volume not implemented.") - raise NotImplementedError(msg) - - def unmanage(self, volume): - """Unmanage stub. - - This is for drivers that don't implement unmanage(). - """ - msg = _("Unmanage volume not implemented.") - raise NotImplementedError(msg) - - def freeze_backend(self, context): - """Notify the backend that it's frozen. - - We use set to prohibit the creation of any new resources - on the backend, or any modifications to existing items on - a backend. We set/enforce this by not allowing scheduling - of new volumes to the specified backend, and checking at the - api for modifications to resources and failing. - - In most cases the driver may not need to do anything, but - this provides a handle if they need it. - - :param context: security context - :response: True|False - """ - return True - - def thaw_backend(self, context): - """Notify the backend that it's unfrozen/thawed. - - Returns the backend to a normal state after a freeze - operation. - - In most cases the driver may not need to do anything, but - this provides a handle if they need it. - - :param context: security context - :response: True|False - """ - return True - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover a backend to a secondary replication target. - - Instructs a replication capable/configured backend to failover - to one of it's secondary replication targets. host=None is - an acceptable input, and leaves it to the driver to failover - to the only configured target, or to choose a target on it's - own. All of the hosts volumes will be passed on to the driver - in order for it to determine the replicated volumes on the host, - if needed. - - Response is a tuple, including the new target backend_id - AND a lit of dictionaries with volume_id and updates. - Key things to consider (attaching failed-over volumes): - - provider_location - - provider_auth - - provider_id - - replication_status - - :param context: security context - :param volumes: list of volume objects, in case the driver needs - to take action on them in some way - :param secondary_id: Specifies rep target backend to fail over to - :param groups: replication groups - :returns: ID of the backend that was failed-over to, - model update for volumes, and model update for groups - """ - - # Example volume_updates data structure: - # [{'volume_id': , - # 'updates': {'provider_id': 8, - # 'replication_status': 'failed-over', - # 'replication_extended_status': 'whatever',...}},] - # Example group_updates data structure: - # [{'group_id': , - # 'updates': {'replication_status': 'failed-over',...}},] - raise NotImplementedError() - - def failover(self, context, volumes, secondary_id=None, groups=None): - """Like failover but for a host that is clustered. - - Most of the time this will be the exact same behavior as failover_host, - so if it's not overwritten, it is assumed to be the case. - """ - return self.failover_host(context, volumes, secondary_id, groups) - - def failover_completed(self, context, active_backend_id=None): - """This method is called after failover for clustered backends.""" - raise NotImplementedError() - - @classmethod - def _is_base_method(cls, method_name): - method = getattr(cls, method_name) - return method.__module__ == getattr(BaseVD, method_name).__module__ - - # Replication Group (Tiramisu) - def enable_replication(self, context, group, volumes): - """Enables replication for a group and volumes in the group. - - :param group: group object - :param volumes: list of volume objects in the group - :returns: model_update - dict of group updates - :returns: volume_model_updates - list of dicts of volume updates - """ - raise NotImplementedError() - - # Replication Group (Tiramisu) - def disable_replication(self, context, group, volumes): - """Disables replication for a group and volumes in the group. - - :param group: group object - :param volumes: list of volume objects in the group - :returns: model_update - dict of group updates - :returns: volume_model_updates - list of dicts of volume updates - """ - raise NotImplementedError() - - # Replication Group (Tiramisu) - def failover_replication(self, context, group, volumes, - secondary_backend_id=None): - """Fails over replication for a group and volumes in the group. - - :param group: group object - :param volumes: list of volume objects in the group - :param secondary_backend_id: backend_id of the secondary site - :returns: model_update - dict of group updates - :returns: volume_model_updates - list of dicts of volume updates - """ - raise NotImplementedError() - - def get_replication_error_status(self, context, groups): - """Returns error info for replicated groups and its volumes. - - :returns: group_model_updates - list of dicts of group updates - - if error happens. For example, a dict of a group can be as follows: - - .. code:: python - - {'group_id': xxxx, - 'replication_status': fields.ReplicationStatus.ERROR} - - :returns: volume_model_updates - list of dicts of volume updates - - if error happens. For example, a dict of a volume can be as follows: - - .. code:: python - - {'volume_id': xxxx, - 'replication_status': fields.ReplicationStatus.ERROR} - - """ - return [], [] - - @classmethod - def supports_replication_feature(cls, feature): - """Check if driver class supports replication features. - - Feature is a string that must be one of: - - v2.1 - - a/a - """ - if feature not in cls.REPLICATION_FEATURE_CHECKERS: - return False - - # Check if method is being implemented/overwritten by the driver - method_name = cls.REPLICATION_FEATURE_CHECKERS[feature] - return not cls._is_base_method(method_name) - - def get_replication_updates(self, context): - """Old replication update method, deprecate.""" - raise NotImplementedError() - - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be created. - :returns: model_update - - model_update will be in this format: {'status': xxx, ......}. - - If the status in model_update is 'error', the manager will throw - an exception and it will be caught in the try-except block in the - manager. If the driver throws an exception, the manager will also - catch it in the try-except block. The group status in the db will - be changed to 'error'. - - For a successful operation, the driver can either build the - model_update and return it or return None. The group status will - be set to 'available'. - """ - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be deleted. - :param volumes: a list of Volume objects in the group. - :returns: model_update, volumes_model_update - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate volumes_model_update and model_update - and return them. - - The manager will check volumes_model_update and update db accordingly - for each volume. If the driver successfully deleted some volumes - but failed to delete others, it should set statuses of the volumes - accordingly so that the manager can update db correctly. - - If the status in any entry of volumes_model_update is 'error_deleting' - or 'error', the status in model_update will be set to the same if it - is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of the group will be - set to 'error' in the db. If volumes_model_update is not returned by - the driver, the manager will set the status of every volume in the - group to 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager. The statuses of the - group and all volumes in it will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and volumes_model_update and return them or - return None, None. The statuses of the group and all volumes - will be set to 'deleted' after the manager deletes them from db. - """ - raise NotImplementedError() - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be updated. - :param add_volumes: a list of Volume objects to be added. - :param remove_volumes: a list of Volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - model_update is a dictionary that the driver wants the manager - to update upon a successful return. If None is returned, the manager - will set the status to 'available'. - - add_volumes_update and remove_volumes_update are lists of dictionaries - that the driver wants the manager to update upon a successful return. - Note that each entry requires a {'id': xxx} so that the correct - volume entry can be updated. If None is returned, the volume will - remain its original status. Also note that you cannot directly - assign add_volumes to add_volumes_update as add_volumes is a list of - volume objects and cannot be used for db update directly. Same with - remove_volumes. - - If the driver throws an exception, the status of the group as well as - those of the volumes to be added/removed will be set to 'error'. - """ - raise NotImplementedError() - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of Snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of Volume objects in the source_group. - :returns: model_update, volumes_model_update - - The source can be group_snapshot or a source_group. - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - To be consistent with other volume operations, the manager will - assume the operation is successful if no exception is thrown by - the driver. For a successful operation, the driver can either build - the model_update and volumes_model_update and return them or - return None, None. - """ - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of Snapshot objects. It cannot be assigned - to snapshots_model_update. snapshots_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is 'error', the - status in model_update will be set to the same if it is not already - 'error'. - - If the status in model_update is 'error', the manager will raise an - exception and the status of group_snapshot will be set to 'error' in - the db. If snapshots_model_update is not returned by the driver, the - manager will set the status of every snapshot to 'error' in the except - block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'available' at the end of the manager function. - """ - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of objects. It cannot be assigned to - snapshots_model_update. snapshots_model_update is a list of of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is - 'error_deleting' or 'error', the status in model_update will be set to - the same if it is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of group_snapshot will - be set to 'error' in the db. If snapshots_model_update is not returned - by the driver, the manager will set the status of every snapshot to - 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'deleted' after the manager deletes them from db. - """ - raise NotImplementedError() - - def extend_volume(self, volume, new_size): - msg = _("Extend volume not implemented") - raise NotImplementedError(msg) - - def accept_transfer(self, context, volume, new_user, new_project): - pass - - -class LocalVD(object): - """This class has been deprecated and should not be inherited.""" - pass - - -class SnapshotVD(object): - """This class has been deprecated and should not be inherited.""" - pass - - -class ConsistencyGroupVD(object): - """This class has been deprecated and should not be inherited.""" - pass - - -@six.add_metaclass(abc.ABCMeta) -class CloneableImageVD(object): - @abc.abstractmethod - def clone_image(self, volume, image_location, - image_id, image_meta, image_service): - """Create a volume efficiently from an existing image. - - image_location is a string whose format depends on the - image service backend in use. The driver should use it - to determine whether cloning is possible. - - image_id is a string which represents id of the image. - It can be used by the driver to introspect internal - stores or registry to do an efficient image clone. - - image_meta is a dictionary that includes 'disk_format' (e.g. - raw, qcow2) and other image attributes that allow drivers to - decide whether they can clone the image without first requiring - conversion. - - image_service is the reference of the image_service to use. - Note that this is needed to be passed here for drivers that - will want to fetch images from the image service directly. - - Returns a dict of volume properties eg. provider_location, - boolean indicating whether cloning occurred - """ - return None, False - - -@six.add_metaclass(abc.ABCMeta) -class MigrateVD(object): - @abc.abstractmethod - def migrate_volume(self, context, volume, host): - """Migrate the volume to the specified host. - - Returns a boolean indicating whether the migration occurred, as well as - model_update. - - :param context: Context - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - return (False, None) - - -class ExtendVD(object): - """This class has been deprecated and should not be inherited.""" - pass - - -class TransferVD(object): - """This class has been deprecated and should not be inherited.""" - pass - - -@six.add_metaclass(abc.ABCMeta) -class ManageableVD(object): - @abc.abstractmethod - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - volume structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - 2. Place some metadata on the volume, or somewhere in the backend, that - allows other driver requests (e.g. delete, clone, attach, detach...) - to locate the backend storage object when required. - - If the existing_ref doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - - The volume may have a volume_type, and the driver can inspect that and - compare against the properties of the referenced backend storage - object. If they are incompatible, raise a - ManageExistingVolumeTypeMismatch, specifying a reason for the failure. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - return - - @abc.abstractmethod - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - :returns size: Volume size in GiB (integer) - """ - return - - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder. - - Returns a list of dictionaries, each specifying a volume in the host, - with the following keys: - - reference (dictionary): The reference for a volume, which can be - passed to "manage_existing". - - size (int): The size of the volume according to the storage - backend, rounded up to the nearest GB. - - safe_to_manage (boolean): Whether or not this volume is safe to - manage according to the storage backend. For example, is the volume - in use or invalid for any reason. - - reason_not_safe (string): If safe_to_manage is False, the reason why. - - cinder_id (string): If already managed, provide the Cinder ID. - - extra_info (string): Any extra information to return to the user - - :param cinder_volumes: A list of volumes in this host that Cinder - currently manages, used to determine if - a volume is manageable or not. - :param marker: The last item of the previous page; we return the - next results after this value (after sorting) - :param limit: Maximum number of items to return - :param offset: Number of items to skip after marker - :param sort_keys: List of keys to sort results by (valid keys are - 'identifier' and 'size') - :param sort_dirs: List of directions to sort by, corresponding to - sort_keys (valid directions are 'asc' and 'desc') - """ - return [] - - @abc.abstractmethod - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - For most drivers, this will not need to do anything. However, some - drivers might use this call as an opportunity to clean up any - Cinder-specific configuration that they have associated with the - backend storage object. - - :param volume: Cinder volume to unmanage - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class ManageableSnapshotsVD(object): - # NOTE: Can't use abstractmethod before all drivers implement it - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - snapshot structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the - snapshot['name'] which is how drivers traditionally map between a - cinder snapshot and the associated backend storage object. - - 2. Place some metadata on the snapshot, or somewhere in the backend, - that allows other driver requests (e.g. delete) to locate the - backend storage object when required. - - If the existing_ref doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - - :param snapshot: Cinder volume snapshot to manage - :param existing_ref: Driver-specific information used to identify a - volume snapshot - """ - return - - # NOTE: Can't use abstractmethod before all drivers implement it - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of snapshot to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param snapshot: Cinder volume snapshot to manage - :param existing_ref: Driver-specific information used to identify a - volume snapshot - :returns size: Volume snapshot size in GiB (integer) - """ - return - - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - """List snapshots on the backend available for management by Cinder. - - Returns a list of dictionaries, each specifying a snapshot in the host, - with the following keys: - - reference (dictionary): The reference for a snapshot, which can be - passed to "manage_existing_snapshot". - - size (int): The size of the snapshot according to the storage - backend, rounded up to the nearest GB. - - safe_to_manage (boolean): Whether or not this snapshot is safe to - manage according to the storage backend. For example, is the snapshot - in use or invalid for any reason. - - reason_not_safe (string): If safe_to_manage is False, the reason why. - - cinder_id (string): If already managed, provide the Cinder ID. - - extra_info (string): Any extra information to return to the user - - source_reference (string): Similar to "reference", but for the - snapshot's source volume. - - :param cinder_snapshots: A list of snapshots in this host that Cinder - currently manages, used to determine if - a snapshot is manageable or not. - :param marker: The last item of the previous page; we return the - next results after this value (after sorting) - :param limit: Maximum number of items to return - :param offset: Number of items to skip after marker - :param sort_keys: List of keys to sort results by (valid keys are - 'identifier' and 'size') - :param sort_dirs: List of directions to sort by, corresponding to - sort_keys (valid directions are 'asc' and 'desc') - - """ - return [] - - # NOTE: Can't use abstractmethod before all drivers implement it - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - For most drivers, this will not need to do anything. However, some - drivers might use this call as an opportunity to clean up any - Cinder-specific configuration that they have associated with the - backend storage object. - - :param snapshot: Cinder volume snapshot to unmanage - """ - pass - - -class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD, - MigrateVD, BaseVD): - def check_for_setup_error(self): - raise NotImplementedError() - - def create_volume(self, volume): - raise NotImplementedError() - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - If volume_type extra specs includes 'replication: True' - the driver needs to create a volume replica (secondary), - and setup replication between the newly created volume and - the secondary volume. - """ - - raise NotImplementedError() - - def create_replica_test_volume(self, volume, src_vref): - raise NotImplementedError() - - def delete_volume(self, volume): - raise NotImplementedError() - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - raise NotImplementedError() - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - raise NotImplementedError() - - def local_path(self, volume): - raise NotImplementedError() - - def clear_download(self, context, volume): - pass - - def extend_volume(self, volume, new_size): - msg = _("Extend volume not implemented") - raise NotImplementedError(msg) - - def manage_existing(self, volume, existing_ref): - msg = _("Manage existing volume not implemented.") - raise NotImplementedError(msg) - - def revert_to_snapshot(self, context, volume, snapshot): - """Revert volume to snapshot. - - Note: the revert process should not change the volume's - current size, that means if the driver shrank - the volume during the process, it should extend the - volume internally. - """ - msg = _("Revert volume to snapshot not implemented.") - raise NotImplementedError(msg) - - def manage_existing_get_size(self, volume, existing_ref): - msg = _("Manage existing volume not implemented.") - raise NotImplementedError(msg) - - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - msg = _("Get manageable volumes not implemented.") - raise NotImplementedError(msg) - - def unmanage(self, volume): - pass - - def manage_existing_snapshot(self, snapshot, existing_ref): - msg = _("Manage existing snapshot not implemented.") - raise NotImplementedError(msg) - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - msg = _("Manage existing snapshot not implemented.") - raise NotImplementedError(msg) - - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - msg = _("Get manageable snapshots not implemented.") - raise NotImplementedError(msg) - - def unmanage_snapshot(self, snapshot): - """Unmanage the specified snapshot from Cinder management.""" - - def retype(self, context, volume, new_type, diff, host): - return False, None - - # ####### Interface methods for DataPath (Connector) ######## - def ensure_export(self, context, volume): - raise NotImplementedError() - - def create_export(self, context, volume, connector): - raise NotImplementedError() - - def create_export_snapshot(self, context, snapshot, connector): - raise NotImplementedError() - - def remove_export(self, context, volume): - raise NotImplementedError() - - def remove_export_snapshot(self, context, snapshot): - raise NotImplementedError() - - def initialize_connection(self, volume, connector, **kwargs): - raise NotImplementedError() - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Allow connection from connector for a snapshot.""" - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector - - :param volume: The volume to be disconnected. - :param connector: A dictionary describing the connection with details - about the initiator. Can be None. - """ - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Disallow connection from connector for a snapshot.""" - - def create_consistencygroup(self, context, group): - """Creates a consistencygroup. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be created. - :returns: model_update - - model_update will be in this format: {'status': xxx, ......}. - - If the status in model_update is 'error', the manager will throw - an exception and it will be caught in the try-except block in the - manager. If the driver throws an exception, the manager will also - catch it in the try-except block. The group status in the db will - be changed to 'error'. - - For a successful operation, the driver can either build the - model_update and return it or return None. The group status will - be set to 'available'. - """ - raise NotImplementedError() - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates a consistencygroup from source. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be created. - :param volumes: a list of volume dictionaries in the group. - :param cgsnapshot: the dictionary of the cgsnapshot as source. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :param source_cg: the dictionary of a consistency group as source. - :param source_vols: a list of volume dictionaries in the source_cg. - :returns: model_update, volumes_model_update - - The source can be cgsnapshot or a source cg. - - param volumes is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Volume to be precise. It cannot be - assigned to volumes_model_update. volumes_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - To be consistent with other volume operations, the manager will - assume the operation is successful if no exception is thrown by - the driver. For a successful operation, the driver can either build - the model_update and volumes_model_update and return them or - return None, None. - """ - raise NotImplementedError() - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be deleted. - :param volumes: a list of volume dictionaries in the group. - :returns: model_update, volumes_model_update - - param volumes is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Volume to be precise. It cannot be - assigned to volumes_model_update. volumes_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate volumes_model_update and model_update - and return them. - - The manager will check volumes_model_update and update db accordingly - for each volume. If the driver successfully deleted some volumes - but failed to delete others, it should set statuses of the volumes - accordingly so that the manager can update db correctly. - - If the status in any entry of volumes_model_update is 'error_deleting' - or 'error', the status in model_update will be set to the same if it - is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of the group will be - set to 'error' in the db. If volumes_model_update is not returned by - the driver, the manager will set the status of every volume in the - group to 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager. The statuses of the - group and all volumes in it will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and volumes_model_update and return them or - return None, None. The statuses of the group and all volumes - will be set to 'deleted' after the manager deletes them from db. - """ - raise NotImplementedError() - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a consistency group. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be updated. - :param add_volumes: a list of volume dictionaries to be added. - :param remove_volumes: a list of volume dictionaries to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - model_update is a dictionary that the driver wants the manager - to update upon a successful return. If None is returned, the manager - will set the status to 'available'. - - add_volumes_update and remove_volumes_update are lists of dictionaries - that the driver wants the manager to update upon a successful return. - Note that each entry requires a {'id': xxx} so that the correct - volume entry can be updated. If None is returned, the volume will - remain its original status. Also note that you cannot directly - assign add_volumes to add_volumes_update as add_volumes is a list of - cinder.db.sqlalchemy.models.Volume objects and cannot be used for - db update directly. Same with remove_volumes. - - If the driver throws an exception, the status of the group as well as - those of the volumes to be added/removed will be set to 'error'. - """ - raise NotImplementedError() - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot. - - :param context: the context of the caller. - :param cgsnapshot: the dictionary of the cgsnapshot to be created. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :returns: model_update, snapshots_model_update - - param snapshots is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be - assigned to snapshots_model_update. snapshots_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is 'error', the - status in model_update will be set to the same if it is not already - 'error'. - - If the status in model_update is 'error', the manager will raise an - exception and the status of cgsnapshot will be set to 'error' in the - db. If snapshots_model_update is not returned by the driver, the - manager will set the status of every snapshot to 'error' in the except - block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - cgsnapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of cgsnapshot and all snapshots - will be set to 'available' at the end of the manager function. - """ - raise NotImplementedError() - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot. - - :param context: the context of the caller. - :param cgsnapshot: the dictionary of the cgsnapshot to be deleted. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :returns: model_update, snapshots_model_update - - param snapshots is retrieved directly from the db. It is a list of - cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be - assigned to snapshots_model_update. snapshots_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is - 'error_deleting' or 'error', the status in model_update will be set to - the same if it is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of cgsnapshot will be - set to 'error' in the db. If snapshots_model_update is not returned by - the driver, the manager will set the status of every snapshot to - 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - cgsnapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of cgsnapshot and all snapshots - will be set to 'deleted' after the manager deletes them from db. - """ - raise NotImplementedError() - - def clone_image(self, volume, image_location, image_id, image_meta, - image_service): - return None, False - - def get_pool(self, volume): - """Return pool name where volume reside on. - - :param volume: The volume hosted by the driver. - :returns: name of the pool where given volume is in. - """ - return None - - def migrate_volume(self, context, volume, host): - return (False, None) - - def accept_transfer(self, context, volume, new_user, new_project): - pass - - -class ProxyVD(object): - """Proxy Volume Driver to mark proxy drivers - - If a driver uses a proxy class (e.g. by using __setattr__ and - __getattr__) without directly inheriting from base volume driver this - class can help marking them and retrieve the actual used driver object. - """ - def _get_driver(self): - """Returns the actual driver object. - - Can be overloaded by the proxy. - """ - return getattr(self, "driver", None) - - -class ISCSIDriver(VolumeDriver): - """Executes commands relating to ISCSI volumes. - - We make use of model provider properties as follows: - - ``provider_location`` - if present, contains the iSCSI target information in the same - format as an ietadm discovery - i.e. ':, ' - - ``provider_auth`` - if present, contains a space-separated triple: - ' '. - `CHAP` is the only auth_method in use at the moment. - """ - - def __init__(self, *args, **kwargs): - super(ISCSIDriver, self).__init__(*args, **kwargs) - - def _do_iscsi_discovery(self, volume): - # TODO(justinsb): Deprecate discovery and use stored info - # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) - LOG.warning("ISCSI provider_location not stored, using discovery") - - volume_name = volume['name'] - - try: - # NOTE(griff) We're doing the split straight away which should be - # safe since using '@' in hostname is considered invalid - - (out, _err) = self._execute('iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', - volume['host'].split('@')[0], - run_as_root=True) - except processutils.ProcessExecutionError as ex: - LOG.error("ISCSI discovery attempt failed for:%s", - volume['host'].split('@')[0]) - LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) - return None - - for target in out.splitlines(): - if (self.configuration.iscsi_ip_address in target - and volume_name in target): - return target - return None - - def _get_iscsi_properties(self, volume, multipath=False): - """Gets iscsi configuration - - We ideally get saved information in the volume entity, but fall back - to discovery if need be. Discovery may be completely removed in future - The properties are: - - :target_discovered: boolean indicating whether discovery was used - - :target_iqn: the IQN of the iSCSI target - - :target_portal: the portal of the iSCSI target - - :target_lun: the lun of the iSCSI target - - :volume_id: the id of the volume (currently used by xen) - - :auth_method:, :auth_username:, :auth_password: - - the authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - - :discard: boolean indicating if discard is supported - - In some of drivers that support multiple connections (for multipath - and for single path with failover on connection failure), it returns - :target_iqns, :target_portals, :target_luns, which contain lists of - multiple values. The main portal information is also returned in - :target_iqn, :target_portal, :target_lun for backward compatibility. - - Note that some of drivers don't return :target_portals even if they - support multipath. Then the connector should use sendtargets discovery - to find the other portals if it supports multipath. - """ - - properties = {} - - location = volume['provider_location'] - - if location: - # provider_location is the same format as iSCSI discovery output - properties['target_discovered'] = False - else: - location = self._do_iscsi_discovery(volume) - - if not location: - msg = (_("Could not find iSCSI export for volume %s") % - (volume['name'])) - raise exception.InvalidVolume(reason=msg) - - LOG.debug("ISCSI Discovery: Found %s", location) - properties['target_discovered'] = True - - results = location.split(" ") - portals = results[0].split(",")[0].split(";") - iqn = results[1] - nr_portals = len(portals) - - try: - lun = int(results[2]) - except (IndexError, ValueError): - if (self.configuration.volume_driver == - 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and - self.configuration.iscsi_helper == 'tgtadm'): - lun = 1 - else: - lun = 0 - - if nr_portals > 1: - properties['target_portals'] = portals - properties['target_iqns'] = [iqn] * nr_portals - properties['target_luns'] = [lun] * nr_portals - properties['target_portal'] = portals[0] - properties['target_iqn'] = iqn - properties['target_lun'] = lun - - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - geometry = volume.get('provider_geometry', None) - if geometry: - (physical_block_size, logical_block_size) = geometry.split() - properties['physical_block_size'] = physical_block_size - properties['logical_block_size'] = logical_block_size - - encryption_key_id = volume.get('encryption_key_id', None) - properties['encrypted'] = encryption_key_id is not None - - return properties - - def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): - check_exit_code = kwargs.pop('check_exit_code', 0) - (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', - iscsi_properties['target_iqn'], - '-p', iscsi_properties['target_portal'], - *iscsi_command, run_as_root=True, - check_exit_code=check_exit_code) - LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", - {'command': iscsi_command, 'out': out, 'err': err}) - return (out, err) - - def _run_iscsiadm_bare(self, iscsi_command, **kwargs): - check_exit_code = kwargs.pop('check_exit_code', 0) - (out, err) = self._execute('iscsiadm', - *iscsi_command, - run_as_root=True, - check_exit_code=check_exit_code) - LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", - {'command': iscsi_command, 'out': out, 'err': err}) - return (out, err) - - def _iscsiadm_update(self, iscsi_properties, property_key, property_value, - **kwargs): - iscsi_command = ('--op', 'update', '-n', property_key, - '-v', property_value) - return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs) - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The iscsi driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value:: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': 1, - 'discard': False, - } - } - - If the backend driver supports multiple connections for multipath and - for single path with failover, "target_portals", "target_iqns", - "target_luns" are also populated:: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': False, - 'target_iqn': 'iqn.2010-10.org.openstack:volume1', - 'target_iqns': ['iqn.2010-10.org.openstack:volume1', - 'iqn.2010-10.org.openstack:volume1-2'], - 'target_portal': '10.0.0.1:3260', - 'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'] - 'target_lun': 1, - 'target_luns': [1, 1], - 'volume_id': 1, - 'discard': False, - } - } - """ - # NOTE(jdg): Yes, this is duplicated in the volume/target - # drivers, for now leaving it as there are 3'rd party - # drivers that don't use target drivers, but inherit from - # this base class and use this init data - iscsi_properties = self._get_iscsi_properties(volume) - return { - 'driver_volume_type': - self.configuration.safe_get('iscsi_protocol'), - 'data': iscsi_properties - } - - def validate_connector(self, connector): - # iSCSI drivers require the initiator information - required = 'initiator' - if required not in connector: - LOG.error('The volume driver requires %(data)s ' - 'in the connector.', {'data': required}) - raise exception.InvalidConnectorException(missing=required) - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats...") - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'Generic_iSCSI' - data["vendor_name"] = 'Open Source' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'iSCSI' - data["pools"] = [] - data["replication_enabled"] = False - - self._update_pools_and_stats(data) - - -class ISERDriver(ISCSIDriver): - """Executes commands relating to ISER volumes. - - We make use of model provider properties as follows: - - ``provider_location`` - if present, contains the iSER target information in the same - format as an ietadm discovery - i.e. ':, ' - - ``provider_auth`` - if present, contains a space-separated triple: - ' '. - `CHAP` is the only auth_method in use at the moment. - """ - def __init__(self, *args, **kwargs): - super(ISERDriver, self).__init__(*args, **kwargs) - # for backward compatibility - self.configuration.num_volume_device_scan_tries = \ - self.configuration.num_iser_scan_tries - self.configuration.iscsi_target_prefix = \ - self.configuration.iser_target_prefix - self.configuration.iscsi_ip_address = \ - self.configuration.iser_ip_address - self.configuration.iscsi_port = self.configuration.iser_port - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The iser driver returns a driver_volume_type of 'iser'. - The format of the driver data is defined in _get_iser_properties. - Example return value: - - .. code-block:: default - - { - 'driver_volume_type': 'iser', - 'data': { - 'target_discovered': True, - 'target_iqn': - 'iqn.2010-10.org.iser.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': 1 - } - } - - """ - iser_properties = self._get_iscsi_properties(volume) - return { - 'driver_volume_type': 'iser', - 'data': iser_properties - } - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats...") - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'Generic_iSER' - data["vendor_name"] = 'Open Source' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'iSER' - data["pools"] = [] - - self._update_pools_and_stats(data) - - -class FibreChannelDriver(VolumeDriver): - """Executes commands relating to Fibre Channel volumes.""" - def __init__(self, *args, **kwargs): - super(FibreChannelDriver, self).__init__(*args, **kwargs) - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - - .. code-block:: default - - { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '1234567890123', - 'discard': False - } - } - - or - - .. code-block:: default - - { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - 'discard': False - } - } - - """ - msg = _("Driver must implement initialize_connection") - raise NotImplementedError(msg) - - def validate_connector(self, connector): - """Fail if connector doesn't contain all the data needed by driver. - - Do a check on the connector and ensure that it has wwnns, wwpns. - """ - self.validate_connector_has_setting(connector, 'wwpns') - self.validate_connector_has_setting(connector, 'wwnns') - - @staticmethod - def validate_connector_has_setting(connector, setting): - """Test for non-empty setting in connector.""" - if setting not in connector or not connector[setting]: - LOG.error( - "FibreChannelDriver validate_connector failed. " - "No '%(setting)s'. Make sure HBA state is Online.", - {'setting': setting}) - raise exception.InvalidConnectorException(missing=setting) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats...") - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'Generic_FC' - data["vendor_name"] = 'Open Source' - data["driver_version"] = '1.0' - data["storage_protocol"] = 'FC' - data["pools"] = [] - - self._update_pools_and_stats(data) diff --git a/cinder/volume/driver_utils.py b/cinder/volume/driver_utils.py deleted file mode 100644 index d22a265b8..000000000 --- a/cinder/volume/driver_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2014 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from cinder import context -from cinder import exception - -LOG = logging.getLogger(__name__) - - -class VolumeDriverUtils(object): - def __init__(self, namespace, db): - self._data_namespace = namespace - self._db = db - - @staticmethod - def _get_context(ctxt): - if not ctxt: - return context.get_admin_context() - return ctxt - - def get_driver_initiator_data(self, initiator, ctxt=None): - try: - return self._db.driver_initiator_data_get( - self._get_context(ctxt), - initiator, - self._data_namespace - ) - except exception.CinderException: - LOG.exception("Failed to get driver initiator data for" - " initiator %(initiator)s and namespace" - " %(namespace)s", - {'initiator': initiator, - 'namespace': self._data_namespace}) - raise - - def insert_driver_initiator_data(self, initiator, key, value, ctxt=None): - """Update the initiator data at key with value. - - If the key has already been set to something return False, otherwise - if saved successfully return True. - """ - try: - return self._db.driver_initiator_data_insert_by_key( - self._get_context(ctxt), - initiator, - self._data_namespace, - key, - value - ) - except exception.CinderException: - LOG.exception("Failed to insert initiator data for" - " initiator %(initiator)s and backend" - " %(backend)s for key %(key)s.", - {'initiator': initiator, - 'backend': self._data_namespace, - 'key': key}) - raise diff --git a/cinder/volume/drivers/__init__.py b/cinder/volume/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/block_device.py b/cinder/volume/drivers/block_device.py deleted file mode 100644 index 043023562..000000000 --- a/cinder/volume/drivers/block_device.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright (c) 2013 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import importutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import objects -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils as volutils - - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.ListOpt('available_devices', - default=[], - help='List of all available devices'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class BlockDeviceDriver(driver.BaseVD, - driver.CloneableImageVD): - VERSION = '2.3.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(BlockDeviceDriver, self).__init__(*args, **kwargs) - # This driver has been marked as deprecated in the Ocata release, as - # per the standard OpenStack deprecation policy it can be removed in - # the Queens release. - msg = _("The block_device driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - - self.configuration.append_config_values(volume_opts) - self.backend_name = \ - self.configuration.safe_get('volume_backend_name') or "BlockDev" - target_driver =\ - self.target_mapping[self.configuration.safe_get('iscsi_helper')] - self.target_driver = importutils.import_object( - target_driver, - configuration=self.configuration, - db=self.db, - executor=self._execute) - - def check_for_setup_error(self): - pass - - def _update_provider_location(self, obj, device): - # We update provider_location and host to mark device as used to - # avoid race with other threads. - # TODO(ynesenenko): need to remove DB access from driver - host = '{host}#{pool}'.format(host=self.host, pool=self.get_pool(obj)) - obj.update({'provider_location': device, 'host': host}) - obj.save() - - @utils.synchronized('block_device', external=True) - def create_volume(self, volume): - device = self.find_appropriate_size_device(volume.size) - LOG.info("Creating %(volume)s on %(device)s", - {"volume": volume.name, "device": device}) - self._update_provider_location(volume, device) - - def delete_volume(self, volume): - """Deletes a logical volume.""" - self._clear_block_device(volume) - - def _clear_block_device(self, device): - """Deletes a block device.""" - dev_path = self.local_path(device) - if not dev_path or dev_path not in \ - self.configuration.available_devices: - return - if os.path.exists(dev_path) and \ - self.configuration.volume_clear != 'none': - dev_size = self._get_devices_sizes([dev_path]) - volutils.clear_volume( - dev_size[dev_path], dev_path, - volume_clear=self.configuration.volume_clear, - volume_clear_size=self.configuration.volume_clear_size) - else: - LOG.warning("The device %s won't be cleared.", device) - - if device.status == "error_deleting": - msg = _("Failed to delete device.") - LOG.error(msg, resource=device) - raise exception.VolumeDriverException(msg) - - def local_path(self, device): - if device.provider_location: - path = device.provider_location.rsplit(" ", 1) - return path[-1] - else: - return None - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - image_utils.fetch_to_raw(context, - image_service, - image_id, - self.local_path(volume), - self.configuration.volume_dd_blocksize, - size=volume.size) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_utils.upload_volume(context, - image_service, - image_meta, - self.local_path(volume)) - - @utils.synchronized('block_device', external=True) - def create_cloned_volume(self, volume, src_vref): - LOG.info('Creating clone of volume: %s.', src_vref.id) - device = self.find_appropriate_size_device(src_vref.size) - dev_size = self._get_devices_sizes([device]) - volutils.copy_volume( - self.local_path(src_vref), device, - dev_size[device], - self.configuration.volume_dd_blocksize, - execute=self._execute) - self._update_provider_location(volume, device) - - def get_volume_stats(self, refresh=False): - if refresh: - self._update_volume_stats() - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - dict_of_devices_sizes = self._devices_sizes() - used_devices = self._get_used_devices() - total_size = 0 - free_size = 0 - for device, size in dict_of_devices_sizes.items(): - if device not in used_devices: - free_size += size - total_size += size - - LOG.debug("Updating volume stats.") - data = { - 'volume_backend_name': self.backend_name, - 'vendor_name': "Open Source", - 'driver_version': self.VERSION, - 'storage_protocol': 'unknown', - 'pools': []} - - single_pool = { - 'pool_name': data['volume_backend_name'], - 'total_capacity_gb': total_size / units.Ki, - 'free_capacity_gb': free_size / units.Ki, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False} - - data['pools'].append(single_pool) - self._stats = data - - def get_pool(self, volume): - return self.backend_name - - def _get_used_paths(self, lst): - used_dev = set() - for item in lst: - local_path = self.local_path(item) - if local_path: - used_dev.add(local_path) - return used_dev - - def _get_used_devices(self): - lst = objects.VolumeList.get_all_by_host(context.get_admin_context(), - self.host) - used_devices = self._get_used_paths(lst) - snp_lst = objects.SnapshotList.get_by_host(context.get_admin_context(), - self.host) - return used_devices.union(self._get_used_paths(snp_lst)) - - def _get_devices_sizes(self, dev_paths): - """Return devices' sizes in Mb""" - out, _err = self._execute('blockdev', '--getsize64', *dev_paths, - run_as_root=True) - dev_sizes = {} - out = out.split('\n') - # blockdev returns devices' sizes in order that - # they have been passed to it. - for n, size in enumerate(out[:-1]): - dev_sizes[dev_paths[n]] = int(size) / units.Mi - - return dev_sizes - - def _devices_sizes(self): - available_devices = self.configuration.available_devices - return self._get_devices_sizes(available_devices) - - def find_appropriate_size_device(self, size): - dict_of_devices_sizes = self._devices_sizes() - free_devices = (set(self.configuration.available_devices) - - self._get_used_devices()) - if not free_devices: - raise exception.CinderException(_("No free disk")) - possible_device = None - possible_device_size = None - for device in free_devices: - dev_size = dict_of_devices_sizes[device] - if (size * units.Ki <= dev_size and - (possible_device is None or - dev_size < possible_device_size)): - possible_device = device - possible_device_size = dev_size - - if possible_device: - return possible_device - else: - raise exception.CinderException(_("No big enough free disk")) - - def extend_volume(self, volume, new_size): - dev_path = self.local_path(volume) - total_size = self._get_devices_sizes([dev_path]) - # Convert from Megabytes to Gigabytes - size = total_size[dev_path] / units.Ki - if size < new_size: - msg = _("Insufficient free space available to extend volume.") - LOG.error(msg, resource=volume) - raise exception.CinderException(msg) - - @utils.synchronized('block_device', external=True) - def create_snapshot(self, snapshot): - volume = snapshot.volume - if volume.status != 'available': - msg = _("Volume is not available.") - LOG.error(msg, resource=volume) - raise exception.CinderException(msg) - - LOG.info('Creating volume snapshot: %s.', snapshot.id) - device = self.find_appropriate_size_device(snapshot.volume_size) - dev_size = self._get_devices_sizes([device]) - volutils.copy_volume( - self.local_path(volume), device, - dev_size[device], - self.configuration.volume_dd_blocksize, - execute=self._execute) - self._update_provider_location(snapshot, device) - - def delete_snapshot(self, snapshot): - self._clear_block_device(snapshot) - - @utils.synchronized('block_device', external=True) - def create_volume_from_snapshot(self, volume, snapshot): - LOG.info('Creating volume %s from snapshot.', volume.id) - device = self.find_appropriate_size_device(snapshot.volume_size) - dev_size = self._get_devices_sizes([device]) - volutils.copy_volume( - self.local_path(snapshot), device, - dev_size[device], - self.configuration.volume_dd_blocksize, - execute=self._execute) - self._update_provider_location(volume, device) - - # ####### Interface methods for DataPath (Target Driver) ######## - - def ensure_export(self, context, volume): - volume_path = self.local_path(volume) - model_update = \ - self.target_driver.ensure_export( - context, - volume, - volume_path) - return model_update - - def create_export(self, context, volume, connector): - volume_path = self.local_path(volume) - export_info = self.target_driver.create_export(context, - volume, - volume_path) - return { - 'provider_location': export_info['location'] + ' ' + volume_path, - 'provider_auth': export_info['auth'], - } - - def remove_export(self, context, volume): - self.target_driver.remove_export(context, volume) - - def initialize_connection(self, volume, connector): - if connector['host'] != volutils.extract_host(volume.host, 'host'): - return self.target_driver.initialize_connection(volume, connector) - else: - return { - 'driver_volume_type': 'local', - 'data': {'device_path': self.local_path(volume)}, - } - - def validate_connector(self, connector): - return self.target_driver.validate_connector(connector) - - def terminate_connection(self, volume, connector, **kwargs): - pass diff --git a/cinder/volume/drivers/blockbridge.py b/cinder/volume/drivers/blockbridge.py deleted file mode 100644 index 84936bca8..000000000 --- a/cinder/volume/drivers/blockbridge.py +++ /dev/null @@ -1,604 +0,0 @@ -# Copyright 2013-2015 Blockbridge Networks, LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Blockbridge EPS iSCSI Volume Driver -""" - -import base64 -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import units -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -blockbridge_opts = [ - cfg.StrOpt("blockbridge_api_host", - help="IP address/hostname of Blockbridge API."), - cfg.IntOpt("blockbridge_api_port", - help="Override HTTPS port to connect to Blockbridge " - "API server."), - cfg.StrOpt("blockbridge_auth_scheme", - default='token', - choices=['token', 'password'], - help="Blockbridge API authentication scheme (token " - "or password)"), - cfg.StrOpt("blockbridge_auth_token", - help="Blockbridge API token (for auth scheme 'token')", - secret=True), - cfg.StrOpt("blockbridge_auth_user", - help="Blockbridge API user (for auth scheme 'password')"), - cfg.StrOpt("blockbridge_auth_password", - help="Blockbridge API password (for auth scheme 'password')", - secret=True), - cfg.DictOpt("blockbridge_pools", - default={'OpenStack': '+openstack'}, - help="Defines the set of exposed pools and their associated " - "backend query strings"), - cfg.StrOpt("blockbridge_default_pool", - help="Default pool name if unspecified."), -] - -CONF = cfg.CONF -CONF.register_opts(blockbridge_opts, group=configuration.SHARED_CONF_GROUP) - - -class BlockbridgeAPIClient(object): - _api_cfg = None - - def __init__(self, configuration=None): - self.configuration = configuration - - def _get_api_cfg(self): - if self._api_cfg: - # return cached configuration - return self._api_cfg - - if self.configuration.blockbridge_auth_scheme == 'password': - user = self.configuration.safe_get('blockbridge_auth_user') - pw = self.configuration.safe_get('blockbridge_auth_password') - creds = "%s:%s" % (user, pw) - if six.PY3: - creds = creds.encode('utf-8') - b64_creds = base64.encodestring(creds).decode('ascii') - else: - b64_creds = base64.encodestring(creds) - authz = "Basic %s" % b64_creds.replace("\n", "") - elif self.configuration.blockbridge_auth_scheme == 'token': - token = self.configuration.blockbridge_auth_token or '' - authz = "Bearer %s" % token - - # set and return cached api cfg - self._api_cfg = { - 'host': self.configuration.blockbridge_api_host, - 'port': self.configuration.blockbridge_api_port, - 'base_url': '/api/cinder', - 'default_headers': { - 'User-Agent': ("cinder-volume/%s" % - BlockbridgeISCSIDriver.VERSION), - 'Accept': 'application/vnd.blockbridge-3+json', - 'Authorization': authz, - }, - } - - return self._api_cfg - - def submit(self, rel_url, method='GET', params=None, user_id=None, - project_id=None, req_id=None, action=None, **kwargs): - """Submit a request to the configured API endpoint.""" - - cfg = self._get_api_cfg() - if cfg is None: - msg = _("Failed to determine blockbridge API configuration") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # alter the url appropriately if an action is requested - if action: - rel_url += "/actions/%s" % action - - headers = cfg['default_headers'].copy() - url = cfg['base_url'] + rel_url - body = None - - # include user, project and req-id, if supplied - tsk_ctx = [] - if user_id and project_id: - tsk_ctx.append("ext_auth=keystone/%s/%s" % (project_id, user_id)) - if req_id: - tsk_ctx.append("id=%s", req_id) - - if tsk_ctx: - headers['X-Blockbridge-Task'] = ','.join(tsk_ctx) - - # encode params based on request method - if method in ['GET', 'DELETE']: - # For GET method add parameters to the URL - if params: - url += '?' + urllib.parse.urlencode(params) - elif method in ['POST', 'PUT', 'PATCH']: - body = jsonutils.dumps(params) - headers['Content-Type'] = 'application/json' - else: - raise exception.UnknownCmd(cmd=method) - - # connect and execute the request - connection = http_client.HTTPSConnection(cfg['host'], cfg['port']) - connection.request(method, url, body, headers) - response = connection.getresponse() - - # read response data - rsp_body = response.read() - rsp_data = jsonutils.loads(rsp_body) - - connection.close() - - code = response.status - if code in [200, 201, 202, 204]: - pass - elif code == 401: - raise exception.NotAuthorized(_("Invalid credentials")) - elif code == 403: - raise exception.NotAuthorized(_("Insufficient privileges")) - else: - raise exception.VolumeBackendAPIException(data=rsp_data['message']) - - return rsp_data - - -@interface.volumedriver -class BlockbridgeISCSIDriver(driver.ISCSIDriver): - """Manages volumes hosted on Blockbridge EPS.""" - - VERSION = '1.3.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Blockbridge_EPS_CI" - - # TODO(smcginnis) Either remove this if CI requirements are met, or - # remove this driver in the Queens release per normal deprecation - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(BlockbridgeISCSIDriver, self).__init__(*args, **kwargs) - - self.client = kwargs.get('client', None) or ( - BlockbridgeAPIClient(configuration=self.configuration)) - - self.configuration.append_config_values(blockbridge_opts) - self.hostname = socket.gethostname() - - def do_setup(self, context): - """Set up the Blockbridge volume driver.""" - pass - - def check_for_setup_error(self): - """Verify configuration is valid.""" - - # ensure the host is configured - if self.configuration.safe_get('blockbridge_api_host') is None: - raise exception.InvalidInput( - reason=_("Blockbridge api host not configured")) - - # ensure the auth scheme is valid and has the necessary configuration. - auth_scheme = self.configuration.safe_get("blockbridge_auth_scheme") - - if auth_scheme == 'password': - auth_user = self.configuration.safe_get('blockbridge_auth_user') - auth_pw = self.configuration.safe_get('blockbridge_auth_password') - if auth_user is None: - raise exception.InvalidInput( - reason=_("Blockbridge user not configured (required for " - "auth scheme 'password')")) - if auth_pw is None: - raise exception.InvalidInput( - reason=_("Blockbridge password not configured (required " - "for auth scheme 'password')")) - elif auth_scheme == 'token': - token = self.configuration.safe_get('blockbridge_auth_token') - if token is None: - raise exception.InvalidInput( - reason=_("Blockbridge token not configured (required " - "for auth scheme 'token')")) - else: - raise exception.InvalidInput( - reason=(_("Blockbridge configured with invalid auth scheme " - "'%(auth_scheme)s'") % {'auth_scheme': auth_scheme})) - - # ensure at least one pool is defined - pools = self.configuration.safe_get('blockbridge_pools') - if pools is None: - raise exception.InvalidInput( - reason=_("Blockbridge pools not configured")) - - default_pool = self.configuration.safe_get('blockbridge_default_pool') - if default_pool and default_pool not in pools: - raise exception.InvalidInput( - reason=_("Blockbridge default pool does not exist")) - - def _vol_api_submit(self, vol_id, **kwargs): - vol_id = urllib.parse.quote(vol_id, '') - rel_url = "/volumes/%s" % vol_id - - return self.client.submit(rel_url, **kwargs) - - def _create_volume(self, vol_id, params, **kwargs): - """Execute a backend volume create operation.""" - - self._vol_api_submit(vol_id, method='PUT', params=params, **kwargs) - - def _delete_volume(self, vol_id, **kwargs): - """Execute a backend volume delete operation.""" - - self._vol_api_submit(vol_id, method='DELETE', **kwargs) - - def _extend_volume(self, vol_id, capacity, **kwargs): - """Execute a backend volume grow operation.""" - - params = kwargs.get('params', {}) - params['capacity'] = capacity - - self._vol_api_submit(vol_id, method='POST', action='grow', - params=params, **kwargs) - - def _snap_api_submit(self, vol_id, snap_id, **kwargs): - vol_id = urllib.parse.quote(vol_id, '') - snap_id = urllib.parse.quote(snap_id, '') - rel_url = "/volumes/%s/snapshots/%s" % (vol_id, snap_id) - - return self.client.submit(rel_url, **kwargs) - - def _create_snapshot(self, vol_id, snap_id, params, **kwargs): - """Execute a backend snapshot create operation.""" - - self._snap_api_submit(vol_id, snap_id, method='PUT', - params=params, **kwargs) - - def _delete_snapshot(self, vol_id, snap_id, **kwargs): - """Execute a backend snapshot delete operation.""" - - return self._snap_api_submit(vol_id, snap_id, method='DELETE', - **kwargs) - - def _export_api_submit(self, vol_id, ini_name, **kwargs): - vol_id = urllib.parse.quote(vol_id, '') - ini_name = urllib.parse.quote(ini_name, '') - rel_url = "/volumes/%s/exports/%s" % (vol_id, ini_name) - - return self.client.submit(rel_url, **kwargs) - - def _create_export(self, vol_id, ini_name, params, **kwargs): - """Execute a backend volume export operation.""" - - return self._export_api_submit(vol_id, ini_name, method='PUT', - params=params, **kwargs) - - def _delete_export(self, vol_id, ini_name, **kwargs): - """Remove a previously created volume export.""" - - self._export_api_submit(vol_id, ini_name, method='DELETE', - **kwargs) - - def _get_pool_stats(self, pool, query, **kwargs): - """Retrieve pool statistics and capabilities.""" - - pq = { - 'pool': pool, - 'query': query, - } - pq.update(kwargs) - - return self.client.submit('/status', params=pq) - - def _get_dbref_name(self, ref): - display_name = ref.get('display_name') - if not display_name: - return ref.get('name') - return display_name - - def _get_query_string(self, ctxt, volume): - pools = self.configuration.blockbridge_pools - default_pool = self.configuration.blockbridge_default_pool - explicit_pool = volume_utils.extract_host(volume['host'], 'pool') - - pool_name = explicit_pool or default_pool - if pool_name: - return pools[pool_name] - else: - # no pool specified or defaulted -- just pick whatever comes out of - # the dictionary first. - return list(pools.values())[0] - - def create_volume(self, volume): - """Create a volume on a Blockbridge EPS backend. - - :param volume: volume reference - """ - - ctxt = context.get_admin_context() - create_params = { - 'name': self._get_dbref_name(volume), - 'query': self._get_query_string(ctxt, volume), - 'capacity': int(volume['size'] * units.Gi), - } - - LOG.debug("Provisioning %(capacity)s byte volume " - "with query '%(query)s'", create_params, resource=volume) - - return self._create_volume(volume['id'], - create_params, - user_id=volume['user_id'], - project_id=volume['project_id']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - create_params = { - 'name': self._get_dbref_name(volume), - 'capacity': int(volume['size'] * units.Gi), - 'src': { - 'volume_id': src_vref['id'], - }, - } - - LOG.debug("Cloning source volume %(id)s", src_vref, resource=volume) - - return self._create_volume(volume['id'], - create_params, - user_id=volume['user_id'], - project_id=volume['project_id']) - - def delete_volume(self, volume): - """Remove an existing volume. - - :param volume: volume reference - """ - - LOG.debug("Removing volume %(id)s", volume, resource=volume) - - return self._delete_volume(volume['id'], - user_id=volume['user_id'], - project_id=volume['project_id']) - - def create_snapshot(self, snapshot): - """Create snapshot of existing volume. - - :param snapshot: shapshot reference - """ - - create_params = { - 'name': self._get_dbref_name(snapshot), - } - - LOG.debug("Creating snapshot of volume %(volume_id)s", snapshot, - resource=snapshot) - - return self._create_snapshot(snapshot['volume_id'], - snapshot['id'], - create_params, - user_id=snapshot['user_id'], - project_id=snapshot['project_id']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from existing snapshot. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - - create_params = { - 'name': self._get_dbref_name(volume), - 'capacity': int(volume['size'] * units.Gi), - 'src': { - 'volume_id': snapshot['volume_id'], - 'snapshot_id': snapshot['id'], - }, - } - - LOG.debug("Creating volume from snapshot %(id)s", snapshot, - resource=volume) - - return self._create_volume(volume['id'], - create_params, - user_id=volume['user_id'], - project_id=volume['project_id']) - - def delete_snapshot(self, snapshot): - """Delete volume's snapshot. - - :param snapshot: shapshot reference - """ - - LOG.debug("Deleting snapshot of volume %(volume_id)s", snapshot, - resource=snapshot) - - self._delete_snapshot(snapshot['volume_id'], - snapshot['id'], - user_id=snapshot['user_id'], - project_id=snapshot['project_id']) - - def create_export(self, _ctx, volume, connector): - """Do nothing: target created during instance attachment.""" - pass - - def ensure_export(self, _ctx, volume): - """Do nothing: target created during instance attachment.""" - pass - - def remove_export(self, _ctx, volume): - """Do nothing: target created during instance attachment.""" - pass - - def initialize_connection(self, volume, connector, **kwargs): - """Attach volume to initiator/host. - - Creates a profile for the initiator, and adds the new profile to the - target ACL. - - """ - - # generate a CHAP secret here -- there is no way to retrieve an - # existing CHAP secret over the Blockbridge API, so it must be - # supplied by the volume driver. - export_params = { - 'chap_user': ( - kwargs.get('user', volume_utils.generate_username(16))), - 'chap_secret': ( - kwargs.get('password', volume_utils.generate_password(32))), - } - - LOG.debug("Configuring export for %(initiator)s", connector, - resource=volume) - - rsp = self._create_export(volume['id'], - connector['initiator'], - export_params, - user_id=volume['user_id'], - project_id=volume['project_id']) - - # combine locally generated chap credentials with target iqn/lun to - # present the attach properties. - target_portal = "%s:%s" % (rsp['target_ip'], rsp['target_port']) - - properties = { - 'target_discovered': False, - 'target_portal': target_portal, - 'target_iqn': rsp['target_iqn'], - 'target_lun': rsp['target_lun'], - 'volume_id': volume['id'], - 'auth_method': 'CHAP', - 'auth_username': rsp['initiator_login'], - 'auth_password': export_params['chap_secret'], - } - - LOG.debug("Attach properties: %(properties)s", - {'properties': properties}) - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Detach volume from the initiator. - - Removes initiator profile entry from target ACL. - - """ - - LOG.debug("Unconfiguring export for %(initiator)s", connector, - resource=volume) - - self._delete_export(volume['id'], - connector['initiator'], - user_id=volume['user_id'], - project_id=volume['project_id']) - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - - capacity = new_size * units.Gi - - LOG.debug("Extending volume to %(capacity)s bytes", - {'capacity': capacity}, resource=volume) - - self._extend_volume(volume['id'], - int(new_size * units.Gi), - user_id=volume['user_id'], - project_id=volume['project_id']) - - def get_volume_stats(self, refresh=False): - if refresh: - self._update_volume_stats() - return self._stats - - def _update_volume_stats(self): - if self.configuration: - cfg_name = self.configuration.safe_get('volume_backend_name') - backend_name = cfg_name or self.__class__.__name__ - - driver_cfg = { - 'hostname': self.hostname, - 'version': self.VERSION, - 'backend_name': backend_name, - } - - filter_function = self.get_filter_function() - goodness_function = self.get_goodness_function() - pools = [] - - LOG.debug("Updating volume driver statistics", - resource={'type': 'driver', 'id': backend_name}) - - for pool_name, query in self.configuration.blockbridge_pools.items(): - stats = self._get_pool_stats(pool_name, query, **driver_cfg) - - system_serial = stats.get('system_serial', 'unknown') - free_capacity = stats.get('free_capacity', None) - total_capacity = stats.get('total_capacity', None) - provisioned_capacity = stats.get('provisioned_capacity', None) - - if free_capacity is None: - free_capacity = 'unknown' - else: - free_capacity = int(free_capacity / units.Gi) - - if total_capacity is None: - total_capacity = 'unknown' - else: - total_capacity = int(total_capacity / units.Gi) - - pool = { - 'pool_name': pool_name, - 'location_info': ('BlockbridgeDriver:%(sys_id)s:%(pool)s' % - {'sys_id': system_serial, - 'pool': pool_name}), - 'max_over_subscription_ratio': ( - self.configuration.safe_get('max_over_subscription_ratio') - ), - 'free_capacity_gb': free_capacity, - 'total_capacity_gb': total_capacity, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'filter_function': filter_function, - 'goodness_function': goodness_function, - } - - if provisioned_capacity is not None: - pool['provisioned_capacity_gb'] = int( - provisioned_capacity / units.Gi - ) - - pools.append(pool) - - self._stats = { - 'volume_backend_name': backend_name, - 'vendor_name': 'Blockbridge', - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'pools': pools, - } diff --git a/cinder/volume/drivers/coho.py b/cinder/volume/drivers/coho.py deleted file mode 100644 index 99dcd91bf..000000000 --- a/cinder/volume/drivers/coho.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright (c) 2015 Coho Data, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import os -import six -import socket -import xdrlib - -from oslo_config import cfg -from oslo_log import log as logging -from random import randint - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import nfs -from cinder.volume import qos_specs -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -# -# RPC Definition -# - -RPCVERSION = 2 - -CALL = 0 -REPLY = 1 - -AUTH_NULL = 0 - -MSG_ACCEPTED = 0 -MSG_DENIED = 1 - -SUCCESS = 0 -PROG_UNAVAIL = 1 -PROG_MISMATCH = 2 -PROC_UNAVAIL = 3 -GARBAGE_ARGS = 4 - -RPC_MISMATCH = 0 -AUTH_ERROR = 1 - -COHO_PROGRAM = 400115 -COHO_V1 = 1 -COHO1_CREATE_SNAPSHOT = 1 -COHO1_DELETE_SNAPSHOT = 2 -COHO1_CREATE_VOLUME_FROM_SNAPSHOT = 3 -COHO1_SET_QOS_POLICY = 4 - -COHO_MAX_RETRIES = 5 - -COHO_NO_QOS = {'maxIOPS': 0, 'maxMBS': 0} - -# -# Simple RPC Client -# - - -class Client(object): - - def __init__(self, address, prog, vers, port): - self.packer = xdrlib.Packer() - self.unpacker = xdrlib.Unpacker('') - self.address = address - self.prog = prog - self.vers = vers - self.port = port - self.cred = None - self.verf = None - - self.init_socket() - self.init_xid() - - def init_socket(self): - try: - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.connect((self.address, self.port)) - except socket.error: - msg = _('Failed to establish connection with Coho cluster') - raise exception.CohoException(msg) - - def init_xid(self): - self.xid = randint(0, 4096) - - def make_xid(self): - self.xid += 1 - - def make_cred(self): - if self.cred is None: - self.cred = (AUTH_NULL, six.b('')) - return self.cred - - def make_verf(self): - if self.verf is None: - self.verf = (AUTH_NULL, six.b('')) - return self.verf - - def pack_auth(self, auth): - flavor, stuff = auth - self.packer.pack_enum(flavor) - self.packer.pack_opaque(stuff) - - def pack_callheader(self, xid, prog, vers, proc, cred, verf): - self.packer.pack_uint(xid) - self.packer.pack_enum(CALL) - self.packer.pack_uint(RPCVERSION) - self.packer.pack_uint(prog) - self.packer.pack_uint(vers) - self.packer.pack_uint(proc) - self.pack_auth(cred) - self.pack_auth(verf) - - def unpack_auth(self): - flavor = self.unpacker.unpack_enum() - stuff = self.unpacker.unpack_opaque() - return (flavor, stuff) - - def unpack_replyheader(self): - xid = self.unpacker.unpack_uint() - mtype = self.unpacker.unpack_enum() - if mtype != REPLY: - raise exception.CohoException( - _('no REPLY but %r') % (mtype,)) - stat = self.unpacker.unpack_enum() - if stat == MSG_DENIED: - stat = self.unpacker.unpack_enum() - if stat == RPC_MISMATCH: - low = self.unpacker.unpack_uint() - high = self.unpacker.unpack_uint() - raise exception.CohoException( - _('MSG_DENIED: RPC_MISMATCH: %r') % ((low, high),)) - if stat == AUTH_ERROR: - stat = self.unpacker.unpack_uint() - raise exception.CohoException( - _('MSG_DENIED: AUTH_ERROR: %r') % (stat,)) - raise exception.CohoException(_('MSG_DENIED: %r') % (stat,)) - if stat != MSG_ACCEPTED: - raise exception.CohoException( - _('Neither MSG_DENIED nor MSG_ACCEPTED: %r') % (stat,)) - verf = self.unpack_auth() - stat = self.unpacker.unpack_enum() - if stat == PROG_UNAVAIL: - raise exception.CohoException(_('call failed: PROG_UNAVAIL')) - if stat == PROG_MISMATCH: - low = self.unpacker.unpack_uint() - high = self.unpacker.unpack_uint() - raise exception.CohoException( - _('call failed: PROG_MISMATCH: %r') % ((low, high),)) - if stat == PROC_UNAVAIL: - raise exception.CohoException(_('call failed: PROC_UNAVAIL')) - if stat == GARBAGE_ARGS: - raise exception.CohoException(_('call failed: GARBAGE_ARGS')) - if stat != SUCCESS: - raise exception.CohoException(_('call failed: %r') % (stat,)) - return xid, verf - - def init_call(self, proc, args): - self.make_xid() - self.packer.reset() - cred = self.make_cred() - verf = self.make_verf() - self.pack_callheader(self.xid, self.prog, self.vers, proc, cred, verf) - - for arg, func in args: - func(arg) - - return self.xid, self.packer.get_buf() - - def _sendfrag(self, last, frag): - x = len(frag) - if last: - x = x | 0x80000000 - header = (six.int2byte(int(x >> 24 & 0xff)) + - six.int2byte(int(x >> 16 & 0xff)) + - six.int2byte(int(x >> 8 & 0xff)) + - six.int2byte(int(x & 0xff))) - self.sock.send(header + frag) - - def _sendrecord(self, record): - self._sendfrag(1, record) - - def _recvfrag(self): - header = self.sock.recv(4) - if len(header) < 4: - raise exception.CohoException( - _('Invalid response header from RPC server')) - x = (six.indexbytes(header, 0) << 24 | - six.indexbytes(header, 1) << 16 | - six.indexbytes(header, 2) << 8 | - six.indexbytes(header, 3)) - last = ((x & 0x80000000) != 0) - n = int(x & 0x7fffffff) - frag = six.b('') - while n > 0: - buf = self.sock.recv(n) - if not buf: - raise exception.CohoException( - _('RPC server response is incomplete')) - n = n - len(buf) - frag = frag + buf - return last, frag - - def _recvrecord(self): - record = six.b('') - last = 0 - while not last: - last, frag = self._recvfrag() - record = record + frag - return record - - def _make_call(self, proc, args): - self.packer.reset() - xid, call = self.init_call(proc, args) - self._sendrecord(call) - reply = self._recvrecord() - self.unpacker.reset(reply) - xid, verf = self.unpack_replyheader() - - @utils.synchronized('coho-rpc', external=True) - def _call(self, proc, args): - for retry in range(COHO_MAX_RETRIES): - try: - self._make_call(proc, args) - break - except socket.error as e: - if e.errno == errno.EPIPE: - # Reopen connection to cluster and retry - LOG.debug('Re-establishing socket, retry number %d', retry) - self.init_socket() - else: - msg = (_('Unable to send requests: %s') % - six.text_type(e)) - raise exception.CohoException(msg) - else: - msg = _('Failed to establish a stable connection') - raise exception.CohoException(msg) - - res = self.unpacker.unpack_uint() - if res != SUCCESS: - raise exception.CohoException(os.strerror(res)) - - -class CohoRPCClient(Client): - - def __init__(self, address, port): - Client.__init__(self, address, COHO_PROGRAM, 1, port) - - def create_snapshot(self, src, dst, flags): - LOG.debug('COHO1_CREATE_SNAPSHOT src %s to dst %s', src, dst) - self._call(COHO1_CREATE_SNAPSHOT, - [(six.b(src), self.packer.pack_string), - (six.b(dst), self.packer.pack_string), - (flags, self.packer.pack_uint)]) - - def delete_snapshot(self, name): - LOG.debug('COHO1_DELETE_SNAPSHOT name %s', name) - self._call(COHO1_DELETE_SNAPSHOT, - [(six.b(name), self.packer.pack_string)]) - - def create_volume_from_snapshot(self, src, dst): - LOG.debug('COHO1_CREATE_VOLUME_FROM_SNAPSHOT src %s to dst %s', - src, dst) - self._call(COHO1_CREATE_VOLUME_FROM_SNAPSHOT, - [(six.b(src), self.packer.pack_string), - (six.b(dst), self.packer.pack_string)]) - - def set_qos_policy(self, src, qos): - LOG.debug('COHO1_SET_QOS_POLICY volume %s, uuid %s, %d:%d', - src, qos.get('uuid', ''), qos.get('maxIOPS', 0), - qos.get('maxMBS', '')) - self._call(COHO1_SET_QOS_POLICY, - [(six.b(src), self.packer.pack_string), - (six.b(qos.get('uuid', '')), self.packer.pack_string), - (0, self.packer.pack_uhyper), - (qos.get('maxIOPS', 0), self.packer.pack_uhyper), - (0, self.packer.pack_uhyper), - (qos.get('maxMBS', 0), self.packer.pack_uhyper)]) - - -# -# Coho Data Volume Driver -# - -VERSION = '1.1.1' - -coho_opts = [ - cfg.IntOpt('coho_rpc_port', - default=2049, - help='RPC port to connect to Coho Data MicroArray') -] - -CONF = cfg.CONF -CONF.register_opts(coho_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class CohoDriver(nfs.NfsDriver): - """Coho Data NFS based cinder driver. - - Creates file on NFS share for using it as block device on hypervisor. - Version history: - 1.0.0 - Initial driver - 1.1.0 - Added QoS support - 1.1.1 - Stability fixes in the RPC client - """ - - # We have to overload this attribute of RemoteFSDriver because - # unfortunately the base method doesn't accept exports of the form: - #
:/ - # It expects a non blank export name following the /. - # We are more permissive. - SHARE_FORMAT_REGEX = r'.+:/.*' - - COHO_QOS_KEYS = ['maxIOPS', 'maxMBS'] - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "Coho_Data_CI" - - def __init__(self, *args, **kwargs): - super(CohoDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(coho_opts) - self._backend_name = (self.configuration.volume_backend_name or - self.__class__.__name__) - - def _get_rpcclient(self, addr, port): - return CohoRPCClient(addr, port) - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(CohoDriver, self).do_setup(context) - self._execute_as_root = True - self._context = context - - config = self.configuration.coho_rpc_port - if not config: - msg = _("Coho rpc port is not configured") - LOG.warning(msg) - raise exception.CohoException(msg) - if config < 1 or config > 65535: - msg = (_("Invalid port number %(config)s for Coho rpc port") % - {'config': config}) - LOG.warning(msg) - raise exception.CohoException(msg) - - def _do_clone_volume(self, volume, src): - """Clone volume to source. - - Create a volume on given remote share with the same contents - as the specified source. - """ - volume_path = self.local_path(volume) - source_path = self.local_path(src) - - self._execute('cp', source_path, volume_path, - run_as_root=self._execute_as_root) - - qos = self._retrieve_qos_setting(volume) - self._do_set_qos_policy(volume, qos) - - def _get_volume_location(self, volume_id): - """Returns provider location for given volume.""" - - # The driver should not directly access db, but since volume is not - # passed in create_snapshot and delete_snapshot we are forced to read - # the volume info from the database - volume = self.db.volume_get(self._context, volume_id) - addr, path = volume.provider_location.split(":") - return addr, path - - def _do_set_qos_policy(self, volume, qos): - if qos: - addr, path = volume['provider_location'].split(':') - volume_path = os.path.join(path, volume['name']) - - client = self._get_rpcclient(addr, - self.configuration.coho_rpc_port) - client.set_qos_policy(volume_path, qos) - - def _get_qos_by_volume_type(self, ctxt, type_id): - qos = {} - - # NOTE(bardia): we only honor qos_specs - if type_id: - volume_type = volume_types.get_volume_type(ctxt, type_id) - qos_specs_id = volume_type.get('qos_specs_id') - - if qos_specs_id is not None: - kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - qos['uuid'] = qos_specs_id - else: - kvs = {} - - for key, value in kvs.items(): - if key in self.COHO_QOS_KEYS: - qos[key] = int(value) - return qos - - def _retrieve_qos_setting(self, volume): - ctxt = context.get_admin_context() - type_id = volume['volume_type_id'] - - return self._get_qos_by_volume_type(ctxt, type_id) - - def create_volume(self, volume): - resp = super(CohoDriver, self).create_volume(volume) - qos = self._retrieve_qos_setting(volume) - self._do_set_qos_policy(volume, qos) - - return resp - - def create_snapshot(self, snapshot): - """Create a volume snapshot.""" - addr, path = self._get_volume_location(snapshot['volume_id']) - volume_path = os.path.join(path, snapshot['volume_name']) - snapshot_name = snapshot['name'] - flags = 0 # unused at this time - client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) - client.create_snapshot(volume_path, snapshot_name, flags) - - def delete_snapshot(self, snapshot): - """Delete a volume snapshot.""" - addr, unused = self._get_volume_location(snapshot['volume_id']) - snapshot_name = snapshot['name'] - client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) - client.delete_snapshot(snapshot_name) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - volume['provider_location'] = self._find_share(volume) - addr, path = volume['provider_location'].split(":") - volume_path = os.path.join(path, volume['name']) - snapshot_name = snapshot['name'] - - client = self._get_rpcclient(addr, self.configuration.coho_rpc_port) - client.create_volume_from_snapshot(snapshot_name, volume_path) - - qos = self._retrieve_qos_setting(volume) - self._do_set_qos_policy(volume, qos) - - return {'provider_location': volume['provider_location']} - - def _extend_file_sparse(self, path, size): - """Extend the size of a file (with no additional disk usage).""" - self._execute('truncate', '-s', '%sG' % size, - path, run_as_root=self._execute_as_root) - - def create_cloned_volume(self, volume, src_vref): - volume['provider_location'] = self._find_share(volume) - - self._do_clone_volume(volume, src_vref) - - if volume['size'] > src_vref['size']: - self.extend_volume(volume, volume['size']) - - def extend_volume(self, volume, new_size): - """Extend the specified file to the new_size (sparsely).""" - volume_path = self.local_path(volume) - - self._extend_file_sparse(volume_path, new_size) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Changes the volume's QoS policy if needed. - """ - qos = self._get_qos_by_volume_type(ctxt, new_type['id']) - - # Reset the QoS policy on the volume in case the previous - # type had a QoS policy - if not qos: - qos = COHO_NO_QOS - - self._do_set_qos_policy(volume, qos) - - return True, None - - def get_volume_stats(self, refresh=False): - """Pass in Coho Data information in volume stats.""" - _stats = super(CohoDriver, self).get_volume_stats(refresh) - _stats["vendor_name"] = 'Coho Data' - _stats["driver_version"] = VERSION - _stats["storage_protocol"] = 'NFS' - _stats["volume_backend_name"] = self._backend_name - _stats["total_capacity_gb"] = 'unknown' - _stats["free_capacity_gb"] = 'unknown' - _stats["export_paths"] = self._mounted_shares - _stats["QoS_support"] = True - - return _stats diff --git a/cinder/volume/drivers/coprhd/__init__.py b/cinder/volume/drivers/coprhd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/coprhd/common.py b/cinder/volume/drivers/coprhd/common.py deleted file mode 100644 index 1c8072764..000000000 --- a/cinder/volume/drivers/coprhd/common.py +++ /dev/null @@ -1,1512 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import binascii -import random -import string - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume.drivers.coprhd.helpers import ( - authentication as coprhd_auth) -from cinder.volume.drivers.coprhd.helpers import ( - commoncoprhdapi as coprhd_utils) -from cinder.volume.drivers.coprhd.helpers import ( - consistencygroup as coprhd_cg) -from cinder.volume.drivers.coprhd.helpers import exportgroup as coprhd_eg -from cinder.volume.drivers.coprhd.helpers import host as coprhd_host -from cinder.volume.drivers.coprhd.helpers import snapshot as coprhd_snap -from cinder.volume.drivers.coprhd.helpers import tag as coprhd_tag - -from cinder.volume.drivers.coprhd.helpers import ( - virtualarray as coprhd_varray) -from cinder.volume.drivers.coprhd.helpers import volume as coprhd_vol -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -MAX_RETRIES = 10 -INTERVAL_10_SEC = 10 - -volume_opts = [ - cfg.StrOpt('coprhd_hostname', - default=None, - help='Hostname for the CoprHD Instance'), - cfg.PortOpt('coprhd_port', - default=4443, - help='Port for the CoprHD Instance'), - cfg.StrOpt('coprhd_username', - default=None, - help='Username for accessing the CoprHD Instance'), - cfg.StrOpt('coprhd_password', - default=None, - help='Password for accessing the CoprHD Instance', - secret=True), - cfg.StrOpt('coprhd_tenant', - default=None, - help='Tenant to utilize within the CoprHD Instance'), - cfg.StrOpt('coprhd_project', - default=None, - help='Project to utilize within the CoprHD Instance'), - cfg.StrOpt('coprhd_varray', - default=None, - help='Virtual Array to utilize within the CoprHD Instance'), - cfg.BoolOpt('coprhd_emulate_snapshot', - default=False, - help='True | False to indicate if the storage array ' - 'in CoprHD is VMAX or VPLEX') -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - -URI_VPOOL_VARRAY_CAPACITY = '/block/vpools/{0}/varrays/{1}/capacity' -URI_BLOCK_EXPORTS_FOR_INITIATORS = '/block/exports?initiators={0}' -EXPORT_RETRY_COUNT = 5 -MAX_DEFAULT_NAME_LENGTH = 128 -MAX_SNAPSHOT_NAME_LENGTH = 63 -MAX_CONSISTENCY_GROUP_NAME_LENGTH = 64 -MAX_SIO_LEN = 31 - - -def retry_wrapper(func): - def try_and_retry(*args, **kwargs): - retry = False - try: - return func(*args, **kwargs) - except coprhd_utils.CoprHdError as e: - # if we got an http error and - # the string contains 401 or if the string contains the word cookie - if (e.err_code == coprhd_utils.CoprHdError.HTTP_ERR and - (e.msg.find('401') != -1 or - e.msg.lower().find('cookie') != -1)): - retry = True - args[0].AUTHENTICATED = False - else: - exception_message = (_("\nCoprHD Exception: %(msg)s\n") % - {'msg': e.msg}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - except Exception as exc: - exception_message = (_("\nGeneral Exception: %(exec_info)s\n") % - {'exec_info': - encodeutils.exception_to_unicode(exc)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - if retry: - return func(*args, **kwargs) - - return try_and_retry - - -class EMCCoprHDDriverCommon(object): - - OPENSTACK_TAG = 'OpenStack' - - def __init__(self, protocol, default_backend_name, configuration=None): - self.AUTHENTICATED = False - self.protocol = protocol - self.configuration = configuration - self.configuration.append_config_values(volume_opts) - - self.init_coprhd_api_components() - - self.stats = {'driver_version': '3.0.0.0', - 'free_capacity_gb': 'unknown', - 'reserved_percentage': '0', - 'storage_protocol': protocol, - 'total_capacity_gb': 'unknown', - 'vendor_name': 'CoprHD', - 'volume_backend_name': - self.configuration.volume_backend_name or - default_backend_name} - - def init_coprhd_api_components(self): - - coprhd_utils.AUTH_TOKEN = None - - # instantiate coprhd api objects for later use - self.volume_obj = coprhd_vol.Volume( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.exportgroup_obj = coprhd_eg.ExportGroup( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.host_obj = coprhd_host.Host( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.varray_obj = coprhd_varray.VirtualArray( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.snapshot_obj = coprhd_snap.Snapshot( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.consistencygroup_obj = coprhd_cg.ConsistencyGroup( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - self.tag_obj = coprhd_tag.Tag( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - def check_for_setup_error(self): - # validate all of the coprhd_* configuration values - if self.configuration.coprhd_hostname is None: - message = _("coprhd_hostname is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_port is None: - message = _("coprhd_port is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_username is None: - message = _("coprhd_username is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_password is None: - message = _("coprhd_password is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_tenant is None: - message = _("coprhd_tenant is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_project is None: - message = _("coprhd_project is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - if self.configuration.coprhd_varray is None: - message = _("coprhd_varray is not set in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - def authenticate_user(self): - # we should check to see if we are already authenticated before blindly - # doing it again - if self.AUTHENTICATED is False: - obj = coprhd_auth.Authentication( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - username = self.configuration.coprhd_username - password = self.configuration.coprhd_password - - coprhd_utils.AUTH_TOKEN = obj.authenticate_user(username, - password) - self.AUTHENTICATED = True - - def create_volume(self, vol, driver, truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(vol, MAX_DEFAULT_NAME_LENGTH, - truncate_name) - size = int(vol.size) * units.Gi - - vpool = self._get_vpool(vol) - self.vpool = vpool['CoprHD:VPOOL'] - - try: - coprhd_cgid = None - try: - if vol.group_id: - if volume_utils.is_group_a_cg_snapshot_type(vol.group): - coprhd_cgid = self._get_coprhd_cgid(vol.group_id) - except KeyError: - coprhd_cgid = None - except AttributeError: - coprhd_cgid = None - - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - ) - self.volume_obj.create(full_project_name, name, size, - self.configuration.coprhd_varray, - self.vpool, - # no longer specified in volume creation - sync=True, - # no longer specified in volume creation - consistencygroup=coprhd_cgid) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(name)s: create failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s creation failed" % name) - self._raise_or_log_exception( - e.err_code, coprhd_err_msg, log_err_msg) - - @retry_wrapper - def create_consistencygroup(self, context, group, truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(group, - MAX_CONSISTENCY_GROUP_NAME_LENGTH, - truncate_name) - - try: - self.consistencygroup_obj.create( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - cg_uri = self.consistencygroup_obj.consistencygroup_query( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.set_tags_for_resource( - coprhd_cg.ConsistencyGroup.URI_CONSISTENCY_GROUP_TAGS, - cg_uri, group) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(name)s:" - " create failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s creation failed" % - name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def update_consistencygroup(self, group, add_volumes, - remove_volumes): - self.authenticate_user() - model_update = {'status': fields.GroupStatus.AVAILABLE} - cg_uri = self._get_coprhd_cgid(group.id) - add_volnames = [] - remove_volnames = [] - - try: - if add_volumes: - for vol in add_volumes: - vol_name = self._get_coprhd_volume_name(vol) - add_volnames.append(vol_name) - - if remove_volumes: - for vol in remove_volumes: - vol_name = self._get_coprhd_volume_name(vol) - remove_volnames.append(vol_name) - - self.consistencygroup_obj.update( - cg_uri, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant, - add_volnames, remove_volnames, True) - - return model_update, None, None - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(cg_uri)s:" - " update failed\n%(err)s") % - {'cg_uri': cg_uri, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s update failed" % - cg_uri) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_consistencygroup(self, context, group, volumes, - truncate_name=False): - self.authenticate_user() - name = self._get_resource_name(group, - MAX_CONSISTENCY_GROUP_NAME_LENGTH, - truncate_name) - volumes_model_update = [] - - try: - for vol in volumes: - try: - vol_name = self._get_coprhd_volume_name(vol) - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - self.volume_obj.delete(full_project_name, vol_name, - sync=True, - force_delete=True) - - update_item = {'id': vol.id, - 'status': - fields.GroupStatus.DELETED} - volumes_model_update.append(update_item) - - except exception.VolumeBackendAPIException: - update_item = {'id': vol.id, - 'status': fields.ConsistencyGroupStatus. - ERROR_DELETING} - - volumes_model_update.append(update_item) - - LOG.exception("Failed to delete the volume %s of CG.", - vol.name) - - self.consistencygroup_obj.delete( - name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - model_update = {} - model_update['status'] = group.status - - return model_update, volumes_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Consistency Group %(name)s:" - " delete failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Consistency Group : %s deletion failed" % - name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): - self.authenticate_user() - - snapshots_model_update = [] - cgsnapshot_name = self._get_resource_name(cgsnapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - - cg_id = None - cg_group = None - - try: - cg_id = cgsnapshot.group_id - cg_group = cgsnapshot.group - except AttributeError: - pass - - cg_name = None - coprhd_cgid = None - - if cg_id: - coprhd_cgid = self._get_coprhd_cgid(cg_id) - cg_name = self._get_consistencygroup_name(cg_group) - - LOG.info('Start to create cgsnapshot for consistency group' - ': %(group_name)s', - {'group_name': cg_name}) - - try: - self.snapshot_obj.snapshot_create( - 'block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name, - False, - True) - - for snapshot in snapshots: - vol_id_of_snap = snapshot.volume_id - - # Finding the volume in CoprHD for this volume id - tagname = "OpenStack:id:" + vol_id_of_snap - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format( - tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if not rslt: - continue - - vol_uri = rslt[0] - - snapshots_of_volume = self.snapshot_obj.snapshot_list_uri( - 'block', - 'volumes', - vol_uri) - - for snapUri in snapshots_of_volume: - snapshot_obj = self.snapshot_obj.snapshot_show_uri( - 'block', - vol_uri, - snapUri['id']) - - if not coprhd_utils.get_node_value(snapshot_obj, - 'inactive'): - - # Creating snapshot for a consistency group. - # When we create a consistency group snapshot on - # coprhd then each snapshot of volume in the - # consistencygroup will be given a subscript. Ex if - # the snapshot name is cgsnap1 and lets say there are - # three vols(a,b,c) in CG. Then the names of snapshots - # of the volumes in cg on coprhd end will be like - # cgsnap1-1 cgsnap1-2 cgsnap1-3. So, we list the - # snapshots of the volume under consideration and then - # split the name using - from the ending as prefix - # and postfix. We compare the prefix to the cgsnapshot - # name and filter our the snapshots that correspond to - # the cgsnapshot - - if '-' in snapshot_obj['name']: - (prefix, postfix) = snapshot_obj[ - 'name'].rsplit('-', 1) - - if cgsnapshot_name == prefix: - self.set_tags_for_resource( - coprhd_snap.Snapshot. - URI_BLOCK_SNAPSHOTS_TAG, - snapUri['id'], - snapshot) - - elif cgsnapshot_name == snapshot_obj['name']: - self.set_tags_for_resource( - coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, - snapUri['id'], - snapshot) - - snapshot['status'] = fields.SnapshotStatus.AVAILABLE - snapshots_model_update.append( - {'id': snapshot.id, 'status': - fields.SnapshotStatus.AVAILABLE}) - - model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - - return model_update, snapshots_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot for Consistency Group %(cg_name)s:" - " create failed\n%(err)s") % - {'cg_name': cg_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s creation failed" % - {'cg_name': cg_name, - 'name': cgsnapshot_name}) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_cgsnapshot(self, cgsnapshot, snapshots, truncate_name=False): - self.authenticate_user() - cgsnapshot_id = cgsnapshot.id - cgsnapshot_name = self._get_resource_name(cgsnapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - - snapshots_model_update = [] - - cg_id = None - cg_group = None - - try: - cg_id = cgsnapshot.group_id - cg_group = cgsnapshot.group - except AttributeError: - pass - - coprhd_cgid = self._get_coprhd_cgid(cg_id) - cg_name = self._get_consistencygroup_name(cg_group) - - model_update = {} - LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' - '%(group_name)s', {'snap_name': cgsnapshot.name, - 'group_name': cg_name}) - - try: - uri = None - try: - uri = self.snapshot_obj.snapshot_query('block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name + '-1') - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: - uri = self.snapshot_obj.snapshot_query( - 'block', - 'consistency-groups', - coprhd_cgid, - cgsnapshot_name) - self.snapshot_obj.snapshot_delete_uri( - 'block', - coprhd_cgid, - uri, - True, - 0) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': fields.SnapshotStatus.DELETED}) - - return model_update, snapshots_model_update - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %(cgsnapshot_id)s: for" - " Consistency Group %(cg_name)s: delete" - " failed\n%(err)s") % - {'cgsnapshot_id': cgsnapshot_id, - 'cg_name': cg_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot %(name)s for Consistency" - " Group: %(cg_name)s deletion failed" % - {'cg_name': cg_name, - 'name': cgsnapshot_name}) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def set_volume_tags(self, vol, exempt_tags=None, truncate_name=False): - if exempt_tags is None: - exempt_tags = [] - - self.authenticate_user() - name = self._get_resource_name(vol, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - full_project_name = ("%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - - vol_uri = self.volume_obj.volume_query(full_project_name, - name) - - self.set_tags_for_resource( - coprhd_vol.Volume.URI_TAG_VOLUME, vol_uri, vol, exempt_tags) - - @retry_wrapper - def set_tags_for_resource(self, uri, resource_id, resource, - exempt_tags=None): - if exempt_tags is None: - exempt_tags = [] - - self.authenticate_user() - - # first, get the current tags that start with the OPENSTACK_TAG - # eyecatcher - formattedUri = uri.format(resource_id) - remove_tags = [] - currentTags = self.tag_obj.list_tags(formattedUri) - for cTag in currentTags: - if cTag.startswith(self.OPENSTACK_TAG): - remove_tags.append(cTag) - - try: - if remove_tags: - self.tag_obj.tag_resource(uri, - resource_id, - None, - remove_tags) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - LOG.debug("CoprHdError adding the tag:\n %s", e.msg) - - # now add the tags for the resource - add_tags = [] - # put all the openstack resource properties into the CoprHD resource - - try: - for prop, value in vars(resource).items(): - try: - if prop in exempt_tags: - continue - - if prop.startswith("_"): - prop = prop.replace("_", '', 1) - - # don't put the status in, it's always the status before - # the current transaction - if ((not prop.startswith("status") and not - prop.startswith("obj_status") and - prop != "obj_volume") and value): - tag = ("%s:%s:%s" % - (self.OPENSTACK_TAG, prop, - six.text_type(value))) - - if len(tag) > 128: - tag = tag[0:128] - add_tags.append(tag) - except TypeError: - LOG.error( - "Error tagging the resource property %s", prop) - except TypeError: - LOG.error("Error tagging the resource properties") - - try: - self.tag_obj.tag_resource( - uri, - resource_id, - add_tags, - None) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - LOG.debug( - "Adding the tag failed. CoprHdError: %s", e.msg) - - return self.tag_obj.list_tags(formattedUri) - - @retry_wrapper - def create_cloned_volume(self, vol, src_vref, truncate_name=False): - """Creates a clone of the specified volume.""" - self.authenticate_user() - name = self._get_resource_name(vol, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - srcname = self._get_coprhd_volume_name(src_vref) - - try: - if src_vref.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Clone can't be taken individually on a volume" - " that is part of a Consistency Group")) - except KeyError as e: - pass - except AttributeError: - pass - try: - (storageres_type, - storageres_typename) = self.volume_obj.get_storageAttributes( - srcname, None, None) - - resource_id = self.volume_obj.storage_resource_query( - storageres_type, - srcname, - None, - None, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.volume_obj.clone( - name, - resource_id, - sync=True) - - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - detachable = self.volume_obj.is_volume_detachable( - full_project_name, name) - LOG.debug("Is volume detachable : %s", detachable) - - # detach it from the source volume immediately after creation - if detachable: - self.volume_obj.volume_clone_detach( - "", full_project_name, name, True) - - except IndexError: - LOG.exception("Volume clone detach returned empty task list") - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(name)s: clone failed\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : {%s} clone failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - src_vol_size = 0 - dest_vol_size = 0 - - try: - src_vol_size = src_vref.size - except AttributeError: - src_vol_size = src_vref.volume_size - - try: - dest_vol_size = vol.size - except AttributeError: - dest_vol_size = vol.volume_size - - if dest_vol_size > src_vol_size: - size_in_bytes = coprhd_utils.to_bytes("%sG" % dest_vol_size) - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), name, - size_in_bytes, - True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" - "\n%(err)s") % - {'volume_name': name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def expand_volume(self, vol, new_size): - """expands the volume to new_size specified.""" - self.authenticate_user() - volume_name = self._get_coprhd_volume_name(vol) - size_in_bytes = coprhd_utils.to_bytes("%sG" % new_size) - - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), volume_name, - size_in_bytes, - True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s:" - " expand failed\n%(err)s") % - {'volume_name': volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % - volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_volume_from_snapshot(self, snapshot, volume, - truncate_name=False): - """Creates volume from given snapshot ( snapshot clone to volume ).""" - self.authenticate_user() - - if self.configuration.coprhd_emulate_snapshot: - self.create_cloned_volume(volume, snapshot, truncate_name) - return - - try: - if snapshot.group_snapshot_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Volume cannot be created individually from a snapshot " - "that is part of a Consistency Group")) - except AttributeError: - pass - - src_snapshot_name = None - src_vol_ref = snapshot.volume - new_volume_name = self._get_resource_name(volume, - MAX_DEFAULT_NAME_LENGTH, - truncate_name) - - try: - coprhd_vol_info = self._get_coprhd_volume_name( - src_vol_ref, True) - src_snapshot_name = self._get_coprhd_snapshot_name( - snapshot, coprhd_vol_info['volume_uri']) - - (storageres_type, - storageres_typename) = self.volume_obj.get_storageAttributes( - coprhd_vol_info['volume_name'], None, src_snapshot_name) - - resource_id = self.volume_obj.storage_resource_query( - storageres_type, - coprhd_vol_info['volume_name'], - None, - src_snapshot_name, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - - self.volume_obj.clone( - new_volume_name, - resource_id, - sync=True) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %(src_snapshot_name)s:" - " clone failed\n%(err)s") % - {'src_snapshot_name': src_snapshot_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot : %s clone failed" % - src_snapshot_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - if volume.size > snapshot.volume_size: - size_in_bytes = coprhd_utils.to_bytes("%sG" % volume.size) - - try: - self.volume_obj.expand( - ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)), - new_volume_name, size_in_bytes, True) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: expand failed" - "\n%(err)s") % - {'volume_name': new_volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s expand failed" % - new_volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_volume(self, vol): - self.authenticate_user() - name = self._get_coprhd_volume_name(vol) - try: - full_project_name = ("%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - self.volume_obj.delete(full_project_name, name, sync=True) - except coprhd_utils.CoprHdError as e: - if e.err_code == coprhd_utils.CoprHdError.NOT_FOUND_ERR: - LOG.info( - "Volume %s" - " no longer exists; volume deletion is" - " considered successful.", name) - else: - coprhd_err_msg = (_("Volume %(name)s: delete failed" - "\n%(err)s") % - {'name': name, 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s delete failed" % name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def create_snapshot(self, snapshot, truncate_name=False): - self.authenticate_user() - - volume = snapshot.volume - - try: - if volume.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Snapshot can't be taken individually on a volume" - " that is part of a Consistency Group")) - except KeyError: - LOG.info("No Consistency Group associated with the volume") - - if self.configuration.coprhd_emulate_snapshot: - self.create_cloned_volume(snapshot, volume, truncate_name) - self.set_volume_tags( - snapshot, ['_volume', '_obj_volume_type'], truncate_name) - return - - try: - snapshotname = self._get_resource_name(snapshot, - MAX_SNAPSHOT_NAME_LENGTH, - truncate_name) - vol = snapshot.volume - - volumename = self._get_coprhd_volume_name(vol) - projectname = self.configuration.coprhd_project - tenantname = self.configuration.coprhd_tenant - storageres_type = 'block' - storageres_typename = 'volumes' - resource_uri = self.snapshot_obj.storage_resource_query( - storageres_type, - volume_name=volumename, - cg_name=None, - project=projectname, - tenant=tenantname) - inactive = False - sync = True - self.snapshot_obj.snapshot_create( - storageres_type, - storageres_typename, - resource_uri, - snapshotname, - inactive, - sync) - - snapshot_uri = self.snapshot_obj.snapshot_query( - storageres_type, - storageres_typename, - resource_uri, - snapshotname) - - self.set_tags_for_resource( - coprhd_snap.Snapshot.URI_BLOCK_SNAPSHOTS_TAG, - snapshot_uri, snapshot, ['_volume']) - - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot: %(snapshotname)s, create failed" - "\n%(err)s") % {'snapshotname': snapshotname, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Snapshot : %s create failed" % snapshotname) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def delete_snapshot(self, snapshot): - self.authenticate_user() - - vol = snapshot.volume - - try: - if vol.group_id: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - _("Snapshot delete can't be done individually on a volume" - " that is part of a Consistency Group")) - except KeyError: - LOG.info("No Consistency Group associated with the volume") - - if self.configuration.coprhd_emulate_snapshot: - self.delete_volume(snapshot) - return - - snapshotname = None - try: - volumename = self._get_coprhd_volume_name(vol) - projectname = self.configuration.coprhd_project - tenantname = self.configuration.coprhd_tenant - storageres_type = 'block' - storageres_typename = 'volumes' - resource_uri = self.snapshot_obj.storage_resource_query( - storageres_type, - volume_name=volumename, - cg_name=None, - project=projectname, - tenant=tenantname) - if resource_uri is None: - LOG.info( - "Snapshot %s" - " is not found; snapshot deletion" - " is considered successful.", snapshotname) - else: - snapshotname = self._get_coprhd_snapshot_name( - snapshot, resource_uri) - - self.snapshot_obj.snapshot_delete( - storageres_type, - storageres_typename, - resource_uri, - snapshotname, - sync=True) - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Snapshot %s : Delete Failed\n") % - snapshotname) - - log_err_msg = ("Snapshot : %s delete failed" % snapshotname) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) - - @retry_wrapper - def initialize_connection(self, volume, protocol, initiator_ports, - hostname): - - try: - self.authenticate_user() - volumename = self._get_coprhd_volume_name(volume) - foundgroupname = self._find_exportgroup(initiator_ports) - foundhostname = None - if foundgroupname is None: - for i in range(len(initiator_ports)): - # check if this initiator is contained in any CoprHD Host - # object - LOG.debug( - "checking for initiator port: %s", initiator_ports[i]) - foundhostname = self._find_host(initiator_ports[i]) - - if foundhostname: - LOG.info("Found host %s", foundhostname) - break - - if not foundhostname: - LOG.error("Auto host creation not supported") - # create an export group for this host - foundgroupname = foundhostname + 'SG' - # create a unique name - foundgroupname = foundgroupname + '-' + ''.join( - random.choice(string.ascii_uppercase + - string.digits) - for x in range(6)) - self.exportgroup_obj.exportgroup_create( - foundgroupname, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant, - self.configuration.coprhd_varray, - 'Host', - foundhostname) - - LOG.debug( - "adding the volume to the exportgroup : %s", volumename) - - self.exportgroup_obj.exportgroup_add_volumes( - True, - foundgroupname, - self.configuration.coprhd_tenant, - None, - None, - None, - self.configuration.coprhd_project, - [volumename], - None, - None) - - return self._find_device_info(volume, initiator_ports) - - except coprhd_utils.CoprHdError as e: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - (_("Attach volume (%(name)s) to host" - " (%(hostname)s) initiator (%(initiatorport)s)" - " failed:\n%(err)s") % - {'name': self._get_coprhd_volume_name( - volume), - 'hostname': hostname, - 'initiatorport': initiator_ports[0], - 'err': six.text_type(e.msg)}) - ) - - @retry_wrapper - def terminate_connection(self, volume, protocol, initiator_ports, - hostname): - try: - self.authenticate_user() - volumename = self._get_coprhd_volume_name(volume) - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - voldetails = self.volume_obj.show(full_project_name, volumename) - volid = voldetails['id'] - - # find the exportgroups - exports = self.volume_obj.get_exports_by_uri(volid) - exportgroups = set() - itls = exports['itl'] - for itl in itls: - itl_port = itl['initiator']['port'] - if itl_port in initiator_ports: - exportgroups.add(itl['export']['id']) - - for exportgroup in exportgroups: - self.exportgroup_obj.exportgroup_remove_volumes_by_uri( - exportgroup, - volid, - True, - None, - None, - None, - None) - else: - LOG.info( - "No export group found for the host: %s" - "; this is considered already detached.", hostname) - - return itls - - except coprhd_utils.CoprHdError as e: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - (_("Detaching volume %(volumename)s from host" - " %(hostname)s failed: %(err)s") % - {'volumename': volumename, - 'hostname': hostname, - 'err': six.text_type(e.msg)}) - ) - - @retry_wrapper - def _find_device_info(self, volume, initiator_ports): - """Returns device_info in list of itls having the matched initiator. - - (there could be multiple targets, hence a list): - [ - { - "hlu":9, - "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, - "export":{...}, - "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, - "target":{...,"port":"50:06:01:6A:46:E0:72:EF"}, - "san_zone_name":"..." - }, - { - "hlu":9, - "initiator":{...,"port":"20:00:00:25:B5:49:00:22"}, - "export":{...}, - "device":{...,"wwn":"600601602B802D00B62236585D0BE311"}, - "target":{...,"port":"50:06:01:62:46:E0:72:EF"}, - "san_zone_name":"..." - } - ] - """ - volumename = self._get_coprhd_volume_name(volume) - full_project_name = ("%s/%s" % (self.configuration.coprhd_tenant, - self.configuration.coprhd_project)) - vol_uri = self.volume_obj.volume_query(full_project_name, volumename) - - # The itl info shall be available at the first try since now export is - # a synchronous call. We are trying a few more times to accommodate - # any delay on filling in the itl info after the export task is - # completed. - - itls = [] - for x in range(MAX_RETRIES): - exports = self.volume_obj.get_exports_by_uri(vol_uri) - LOG.debug("Volume exports: ") - LOG.info(vol_uri) - LOG.debug(exports) - for itl in exports['itl']: - itl_port = itl['initiator']['port'] - if itl_port in initiator_ports: - found_device_number = itl['hlu'] - if (found_device_number is not None and - found_device_number != '-1'): - # 0 is a valid number for found_device_number. - # Only loop if it is None or -1 - LOG.debug("Found Device Number: %s", - found_device_number) - itls.append(itl) - - if itls: - break - else: - LOG.debug("Device Number not found yet." - " Retrying after 10 seconds...") - eventlet.sleep(INTERVAL_10_SEC) - - if itls is None: - # No device number found after 10 tries; return an empty itl - LOG.info( - "No device number has been found after 10 tries; " - "this likely indicates an unsuccessful attach of " - "volume volumename=%(volumename)s to" - " initiator initiator_ports=%(initiator_ports)s", - {'volumename': volumename, - 'initiator_ports': initiator_ports}) - - return itls - - def _get_coprhd_cgid(self, cgid): - tagname = self.OPENSTACK_TAG + ":id:" + cgid - rslt = coprhd_utils.search_by_tag( - coprhd_cg.ConsistencyGroup.URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. - format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname as - # "OpenStack:obj_id" the openstack attribute for id can be obj_id - # instead of id. this depends on the version - if rslt is None or len(rslt) == 0: - tagname = self.OPENSTACK_TAG + ":obj_id:" + cgid - rslt = coprhd_utils.search_by_tag( - coprhd_cg.ConsistencyGroup - .URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG. - format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if len(rslt) > 0: - rslt_cg = self.consistencygroup_obj.show( - rslt[0], - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - return rslt_cg['id'] - else: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.NOT_FOUND_ERR, - (_("Consistency Group %s not found") % cgid)) - - def _get_consistencygroup_name(self, consisgrp): - return consisgrp.name - - def _get_coprhd_snapshot_name(self, snapshot, resUri): - tagname = self.OPENSTACK_TAG + ":id:" + snapshot['id'] - rslt = coprhd_utils.search_by_tag( - coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname - # as "OpenStack:obj_id" - # as snapshots will be having the obj_id instead of just id. - if not rslt: - tagname = self.OPENSTACK_TAG + ":obj_id:" + snapshot['id'] - rslt = coprhd_utils.search_by_tag( - coprhd_snap.Snapshot.URI_SEARCH_SNAPSHOT_BY_TAG.format( - tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if rslt is None or len(rslt) == 0: - return snapshot['name'] - else: - rslt_snap = self.snapshot_obj.snapshot_show_uri( - 'block', - resUri, - rslt[0]) - return rslt_snap['name'] - - def _get_coprhd_volume_name(self, vol, verbose=False): - tagname = self.OPENSTACK_TAG + ":id:" + vol.id - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - # if the result is empty, then search with the tagname - # as "OpenStack:obj_id" - # as snapshots will be having the obj_id instead of just id. - if len(rslt) == 0: - tagname = self.OPENSTACK_TAG + ":obj_id:" + vol.id - rslt = coprhd_utils.search_by_tag( - coprhd_vol.Volume.URI_SEARCH_VOLUMES_BY_TAG.format(tagname), - self.configuration.coprhd_hostname, - self.configuration.coprhd_port) - - if len(rslt) > 0: - rslt_vol = self.volume_obj.show_by_uri(rslt[0]) - - if verbose is True: - return {'volume_name': rslt_vol['name'], 'volume_uri': rslt[0]} - else: - return rslt_vol['name'] - else: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.NOT_FOUND_ERR, - (_("Volume %s not found") % vol['display_name'])) - - def _get_resource_name(self, resource, - max_name_cap=MAX_DEFAULT_NAME_LENGTH, - truncate_name=False): - # 36 refers to the length of UUID and +1 for '-' - permitted_name_length = max_name_cap - (36 + 1) - name = resource.display_name - if not name: - name = resource.name - - ''' - for scaleio, truncate_name will be true. We make sure the - total name is less than or equal to 31 characters. - _id_to_base64 will return a 24 character name''' - if truncate_name: - name = self._id_to_base64(resource.id) - return name - - elif len(name) > permitted_name_length: - ''' - The maximum length of resource name in CoprHD is 128. Hence we use - only first 91 characters of the resource name''' - return name[0:permitted_name_length] + "-" + resource.id - - else: - return name + "-" + resource.id - - def _get_vpool(self, volume): - vpool = {} - ctxt = context.get_admin_context() - type_id = volume.volume_type_id - if type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - specs = volume_type.get('extra_specs') - for key, value in specs.items(): - vpool[key] = value - - return vpool - - def _id_to_base64(self, id): - # Base64 encode the id to get a volume name less than 32 characters due - # to ScaleIO limitation. - name = six.text_type(id).replace("-", "") - try: - name = base64.b16decode(name.upper()) - except (TypeError, binascii.Error): - pass - encoded_name = name - if isinstance(encoded_name, six.text_type): - encoded_name = encoded_name.encode('utf-8') - encoded_name = base64.b64encode(encoded_name) - if six.PY3: - encoded_name = encoded_name.decode('ascii') - LOG.debug("Converted id %(id)s to scaleio name %(name)s.", - {'id': id, 'name': encoded_name}) - return encoded_name - - def _raise_or_log_exception(self, err_code, coprhd_err_msg, log_err_msg): - - if err_code == coprhd_utils.CoprHdError.SOS_FAILURE_ERR: - raise coprhd_utils.CoprHdError( - coprhd_utils.CoprHdError.SOS_FAILURE_ERR, - coprhd_err_msg) - else: - with excutils.save_and_reraise_exception(): - LOG.exception(log_err_msg) - - @retry_wrapper - def _find_exportgroup(self, initiator_ports): - """Find export group with initiator ports same as given initiators.""" - foundgroupname = None - grouplist = self.exportgroup_obj.exportgroup_list( - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - for groupid in grouplist: - groupdetails = self.exportgroup_obj.exportgroup_show( - groupid, - self.configuration.coprhd_project, - self.configuration.coprhd_tenant) - if groupdetails is not None: - if groupdetails['inactive']: - continue - initiators = groupdetails['initiators'] - if initiators is not None: - inits_eg = set() - for initiator in initiators: - inits_eg.add(initiator['initiator_port']) - - if inits_eg <= set(initiator_ports): - foundgroupname = groupdetails['name'] - if foundgroupname is not None: - # Check the associated varray - if groupdetails['varray']: - varray_uri = groupdetails['varray']['id'] - varray_details = self.varray_obj.varray_show( - varray_uri) - if varray_details['name'] == ( - self.configuration.coprhd_varray): - LOG.debug( - "Found exportgroup %s", - foundgroupname) - break - - # Not the right varray - foundgroupname = None - - return foundgroupname - - @retry_wrapper - def _find_host(self, initiator_port): - """Find the host, if exists, to which the given initiator belong.""" - foundhostname = None - hosts = self.host_obj.list_all(self.configuration.coprhd_tenant) - for host in hosts: - initiators = self.host_obj.list_initiators(host['id']) - for initiator in initiators: - if initiator_port == initiator['name']: - foundhostname = host['name'] - break - - if foundhostname is not None: - break - - return foundhostname - - @retry_wrapper - def get_exports_count_by_initiators(self, initiator_ports): - """Fetches ITL map for a given list of initiator ports.""" - comma_delimited_initiator_list = ",".join(initiator_ports) - (s, h) = coprhd_utils.service_json_request( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port, "GET", - URI_BLOCK_EXPORTS_FOR_INITIATORS.format( - comma_delimited_initiator_list), - None) - - export_itl_maps = coprhd_utils.json_decode(s) - - if export_itl_maps is None: - return 0 - - itls = export_itl_maps['itl'] - return itls.__len__() - - @retry_wrapper - def update_volume_stats(self): - """Retrieve stats info.""" - LOG.debug("Updating volume stats") - self.authenticate_user() - - try: - self.stats['consistencygroup_support'] = True - self.stats['consistent_group_snapshot_enabled'] = True - vols = self.volume_obj.list_volumes( - self.configuration.coprhd_tenant + - "/" + - self.configuration.coprhd_project) - - vpairs = set() - if len(vols) > 0: - for vol in vols: - if vol: - vpair = (vol["vpool"]["id"], vol["varray"]["id"]) - if vpair not in vpairs: - vpairs.add(vpair) - - if len(vpairs) > 0: - free_gb = 0.0 - used_gb = 0.0 - for vpair in vpairs: - if vpair: - (s, h) = coprhd_utils.service_json_request( - self.configuration.coprhd_hostname, - self.configuration.coprhd_port, - "GET", - URI_VPOOL_VARRAY_CAPACITY.format(vpair[0], - vpair[1]), - body=None) - capacity = coprhd_utils.json_decode(s) - - free_gb += float(capacity["free_gb"]) - used_gb += float(capacity["used_gb"]) - - self.stats['free_capacity_gb'] = free_gb - self.stats['total_capacity_gb'] = free_gb + used_gb - self.stats['reserved_percentage'] = ( - self.configuration.reserved_percentage) - - return self.stats - - except coprhd_utils.CoprHdError: - with excutils.save_and_reraise_exception(): - LOG.exception("Update volume stats failed") - - @retry_wrapper - def retype(self, ctxt, volume, new_type, diff, host): - """changes the vpool type.""" - self.authenticate_user() - volume_name = self._get_coprhd_volume_name(volume) - vpool_name = new_type['extra_specs']['CoprHD:VPOOL'] - - try: - full_project_name = "%s/%s" % ( - self.configuration.coprhd_tenant, - self.configuration.coprhd_project) - - task = self.volume_obj.update( - full_project_name, - volume_name, - vpool_name) - - self.volume_obj.check_for_sync(task['task'][0], True) - return True - except coprhd_utils.CoprHdError as e: - coprhd_err_msg = (_("Volume %(volume_name)s: update failed" - "\n%(err)s") % {'volume_name': volume_name, - 'err': six.text_type(e.msg)}) - - log_err_msg = ("Volume : %s type update failed" % - volume_name) - self._raise_or_log_exception(e.err_code, coprhd_err_msg, - log_err_msg) diff --git a/cinder/volume/drivers/coprhd/fc.py b/cinder/volume/drivers/coprhd/fc.py deleted file mode 100644 index 3018e0685..000000000 --- a/cinder/volume/drivers/coprhd/fc.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD FC volumes.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class EMCCoprHDFCDriver(driver.FibreChannelDriver): - """CoprHD FC Driver.""" - VERSION = "3.0.0.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_CoprHD_CI" - - def __init__(self, *args, **kwargs): - super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='FC', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.create_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - properties = {} - properties['volume_id'] = volume.id - properties['target_discovered'] = False - properties['target_wwn'] = [] - - init_ports = self._build_initport_list(connector) - itls = self.common.initialize_connection(volume, 'FC', init_ports, - connector['host']) - - target_wwns = None - initiator_target_map = None - - if itls: - properties['target_lun'] = itls[0]['hlu'] - target_wwns, initiator_target_map = ( - self._build_initiator_target_map(itls, connector)) - - properties['target_wwn'] = target_wwns - properties['initiator_target_map'] = initiator_target_map - - auth = None - try: - auth = volume.provider_auth - except AttributeError: - pass - - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - LOG.debug('FC properties: %s', properties) - return { - 'driver_volume_type': 'fibre_channel', - 'data': properties, - } - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to detach a volume from an instance.""" - - init_ports = self._build_initport_list(connector) - itls = self.common.terminate_connection(volume, 'FC', init_ports, - connector['host']) - - volumes_count = self.common.get_exports_count_by_initiators(init_ports) - if volumes_count > 0: - # return empty data - data = {'driver_volume_type': 'fibre_channel', 'data': {}} - else: - target_wwns, initiator_target_map = ( - self._build_initiator_target_map(itls, connector)) - data = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_wwn': target_wwns, - 'initiator_target_map': initiator_target_map}} - - LOG.debug('Return FC data: %s', data) - return data - - def _build_initiator_target_map(self, itls, connector): - - target_wwns = [] - for itl in itls: - target_wwns.append(itl['target']['port'].replace(':', '').lower()) - - initiator_wwns = connector['wwpns'] - initiator_target_map = {} - for initiator in initiator_wwns: - initiator_target_map[initiator] = target_wwns - - return target_wwns, initiator_target_map - - def _build_initport_list(self, connector): - init_ports = [] - for i in range(len(connector['wwpns'])): - initiator_port = ':'.join(re.findall( - '..', - connector['wwpns'][i])).upper() # Add ":" every two digits - init_ports.append(initiator_port) - - return init_ports - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/helpers/__init__.py b/cinder/volume/drivers/coprhd/helpers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/coprhd/helpers/authentication.py b/cinder/volume/drivers/coprhd/helpers/authentication.py deleted file mode 100644 index c0d9f7c1b..000000000 --- a/cinder/volume/drivers/coprhd/helpers/authentication.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -try: - import cookielib as cookie_lib -except ImportError: - import http.cookiejar as cookie_lib -import socket - -import requests -from requests import exceptions -import six -from six.moves import http_client - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Authentication(common.CoprHDResource): - - # Commonly used URIs for the 'Authentication' module - URI_SERVICES_BASE = '' - URI_AUTHENTICATION = '/login' - - HEADERS = {'Content-Type': 'application/json', - 'ACCEPT': 'application/json', 'X-EMC-REST-CLIENT': 'TRUE'} - - def authenticate_user(self, username, password): - """Makes REST API call to generate the authentication token. - - Authentication token is generated for the specified user after - validation - - :param username: Name of the user - :param password: Password for the user - :returns: The authtoken - """ - - SEC_REDIRECT = 302 - SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' - LB_API_PORT = 4443 - # Port on which load-balancer/reverse-proxy listens to all incoming - # requests for CoprHD REST APIs - APISVC_PORT = 8443 # Port on which apisvc listens to incoming requests - - cookiejar = cookie_lib.LWPCookieJar() - - url = ('https://%(ip)s:%(port)d%(uri)s' % - {'ip': self.ipaddr, 'port': self.port, - 'uri': self.URI_AUTHENTICATION}) - - try: - if self.port == APISVC_PORT: - login_response = requests.get( - url, headers=self.HEADERS, verify=False, - auth=(username, password), cookies=cookiejar, - allow_redirects=False, timeout=common.TIMEOUT_SEC) - if login_response.status_code == SEC_REDIRECT: - location = login_response.headers['Location'] - if not location: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_("The redirect" - " location of the" - " authentication" - " service is not" - " provided"))) - # Make the second request - login_response = requests.get( - location, headers=self.HEADERS, verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if (login_response.status_code != - http_client.UNAUTHORIZED): - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_("The" - " authentication" - " service failed" - " to reply with" - " 401"))) - - # Now provide the credentials - login_response = requests.get( - location, headers=self.HEADERS, - auth=(username, password), verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if login_response.status_code != SEC_REDIRECT: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("Access forbidden: Authentication required"))) - location = login_response.headers['Location'] - if not location: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("The" - " authentication service failed to provide the" - " location of the service URI when redirecting" - " back"))) - authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] - if not authtoken: - details_str = self.extract_error_detail(login_response) - raise common.CoprHdError(common.CoprHdError.HTTP_ERR, - (_("The token is not" - " generated by" - " authentication service." - "%s") % - details_str)) - # Make the final call to get the page with the token - new_headers = self.HEADERS - new_headers[SEC_AUTHTOKEN_HEADER] = authtoken - login_response = requests.get( - location, headers=new_headers, verify=False, - cookies=cookiejar, allow_redirects=False, - timeout=common.TIMEOUT_SEC) - if login_response.status_code != http_client.OK: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, (_( - "Login failure code: " - "%(statuscode)s Error: %(responsetext)s") % - {'statuscode': six.text_type( - login_response.status_code), - 'responsetext': login_response.text})) - elif self.port == LB_API_PORT: - login_response = requests.get( - url, headers=self.HEADERS, verify=False, - cookies=cookiejar, allow_redirects=False) - - if(login_response.status_code == - http_client.UNAUTHORIZED): - # Now provide the credentials - login_response = requests.get( - url, headers=self.HEADERS, auth=(username, password), - verify=False, cookies=cookiejar, allow_redirects=False) - authtoken = None - if SEC_AUTHTOKEN_HEADER in login_response.headers: - authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER] - else: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("Incorrect port number. Load balanced port is: " - "%(lb_api_port)s, api service port is: " - "%(apisvc_port)s") % - {'lb_api_port': LB_API_PORT, - 'apisvc_port': APISVC_PORT})) - - if not authtoken: - details_str = self.extract_error_detail(login_response) - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, - (_("The token is not generated by authentication service." - " %s") % details_str)) - - if login_response.status_code != http_client.OK: - error_msg = None - if login_response.status_code == http_client.UNAUTHORIZED: - error_msg = _("Access forbidden: Authentication required") - elif login_response.status_code == http_client.FORBIDDEN: - error_msg = _("Access forbidden: You don't have" - " sufficient privileges to perform" - " this operation") - elif (login_response.status_code == - http_client.INTERNAL_SERVER_ERROR): - error_msg = _("Bourne internal server error") - elif login_response.status_code == http_client.NOT_FOUND: - error_msg = _( - "Requested resource is currently unavailable") - elif (login_response.status_code == - http_client.METHOD_NOT_ALLOWED): - error_msg = (_("GET method is not supported by resource:" - " %s"), - url) - elif (login_response.status_code == - http_client.SERVICE_UNAVAILABLE): - error_msg = _("Service temporarily unavailable:" - " The server is temporarily unable" - " to service your request") - else: - error_msg = login_response.text - raise common.CoprHdError(common.CoprHdError.HTTP_ERR, - (_("HTTP code: %(status_code)s" - ", response: %(reason)s" - " [%(error_msg)s]") % { - 'status_code': six.text_type( - login_response.status_code), - 'reason': six.text_type( - login_response.reason), - 'error_msg': six.text_type( - error_msg) - })) - except (exceptions.SSLError, socket.error, exceptions.ConnectionError, - exceptions.Timeout) as e: - raise common.CoprHdError( - common.CoprHdError.HTTP_ERR, six.text_type(e)) - - return authtoken - - def extract_error_detail(self, login_response): - details_str = "" - try: - if login_response.content: - json_object = common.json_decode(login_response.content) - if 'details' in json_object: - details_str = json_object['details'] - - return details_str - except common.CoprHdError: - return details_str diff --git a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py b/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py deleted file mode 100644 index 1a04ef536..000000000 --- a/cinder/volume/drivers/coprhd/helpers/commoncoprhdapi.py +++ /dev/null @@ -1,522 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Contains some commonly used utility methods.""" -try: - import cookielib as cookie_lib -except ImportError: - import http.cookiejar as cookie_lib -import json -import re -import socket - -import oslo_serialization -from oslo_utils import timeutils -from oslo_utils import units -import requests -from requests import exceptions -import six -from six.moves import http_client - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import urihelper - - -PROD_NAME = 'storageos' - -TIMEOUT_SEC = 20 # 20 SECONDS - -global AUTH_TOKEN -AUTH_TOKEN = None - -TASK_TIMEOUT = 300 - -URI_TASKS_BY_OPID = '/vdc/tasks/{0}' - - -def _decode_list(data): - rv = [] - for item in data: - if isinstance(item, six.text_type): - item = item.encode('utf-8') - elif isinstance(item, list): - item = _decode_list(item) - elif isinstance(item, dict): - item = _decode_dict(item) - rv.append(item) - return rv - - -def _decode_dict(data): - rv = {} - for key, value in data.items(): - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(value, six.text_type): - value = value.encode('utf-8') - elif isinstance(value, list): - value = _decode_list(value) - elif isinstance(value, dict): - value = _decode_dict(value) - rv[key] = value - return rv - - -def json_decode(rsp): - """Used to decode the JSON encoded response.""" - - try: - o = json.loads(rsp, object_hook=_decode_dict) - except ValueError: - raise CoprHdError(CoprHdError.VALUE_ERR, - (_("Failed to recognize JSON payload:\n[%s]") % rsp)) - return o - - -def service_json_request(ip_addr, port, http_method, uri, body, - contenttype='application/json', customheaders=None): - """Used to make an HTTP request and get the response. - - The message body is encoded in JSON format - - :param ip_addr: IP address or host name of the server - :param port: port number of the server on which it - is listening to HTTP requests - :param http_method: one of GET, POST, PUT, DELETE - :param uri: the request URI - :param body: the request payload - :returns: a tuple of two elements: (response body, response headers) - :raises CoprHdError: in case of HTTP errors with err_code 3 - """ - - SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN' - - headers = {'Content-Type': contenttype, - 'ACCEPT': 'application/json, application/octet-stream', - 'X-EMC-REST-CLIENT': 'TRUE'} - - if customheaders: - headers.update(customheaders) - - try: - protocol = "https://" - if port == 8080: - protocol = "http://" - url = protocol + ip_addr + ":" + six.text_type(port) + uri - - cookiejar = cookie_lib.LWPCookieJar() - headers[SEC_AUTHTOKEN_HEADER] = AUTH_TOKEN - - if http_method == 'GET': - response = requests.get(url, headers=headers, verify=False, - cookies=cookiejar) - elif http_method == 'POST': - response = requests.post(url, data=body, headers=headers, - verify=False, cookies=cookiejar) - elif http_method == 'PUT': - response = requests.put(url, data=body, headers=headers, - verify=False, cookies=cookiejar) - elif http_method == 'DELETE': - - response = requests.delete(url, headers=headers, verify=False, - cookies=cookiejar) - else: - raise CoprHdError(CoprHdError.HTTP_ERR, - (_("Unknown/Unsupported HTTP method: %s") % - http_method)) - - if (response.status_code == http_client.OK or - response.status_code == http_client.ACCEPTED): - return (response.text, response.headers) - - error_msg = None - if response.status_code == http_client.INTERNAL_SERVER_ERROR: - response_text = json_decode(response.text) - error_details = "" - if 'details' in response_text: - error_details = response_text['details'] - error_msg = (_("CoprHD internal server error. Error details: %s"), - error_details) - elif response.status_code == http_client.UNAUTHORIZED: - error_msg = _("Access forbidden: Authentication required") - elif response.status_code == http_client.FORBIDDEN: - error_msg = "" - error_details = "" - error_description = "" - - response_text = json_decode(response.text) - - if 'details' in response_text: - error_details = response_text['details'] - error_msg = (_("%(error_msg)s Error details:" - " %(error_details)s"), - {'error_msg': error_msg, - 'error_details': error_details - }) - elif 'description' in response_text: - error_description = response_text['description'] - error_msg = (_("%(error_msg)s Error description:" - " %(error_description)s"), - {'error_msg': error_msg, - 'error_description': error_description - }) - else: - error_msg = _("Access forbidden: You don't have" - " sufficient privileges to perform this" - " operation") - - elif response.status_code == http_client.NOT_FOUND: - error_msg = "Requested resource not found" - elif response.status_code == http_client.METHOD_NOT_ALLOWED: - error_msg = six.text_type(response.text) - elif response.status_code == http_client.SERVICE_UNAVAILABLE: - error_msg = "" - error_details = "" - error_description = "" - - response_text = json_decode(response.text) - - if 'code' in response_text: - errorCode = response_text['code'] - error_msg = "Error " + six.text_type(errorCode) - - if 'details' in response_text: - error_details = response_text['details'] - error_msg = error_msg + ": " + error_details - elif 'description' in response_text: - error_description = response_text['description'] - error_msg = error_msg + ": " + error_description - else: - error_msg = _("Service temporarily unavailable:" - " The server is temporarily unable to" - " service your request") - else: - error_msg = response.text - if isinstance(error_msg, six.text_type): - error_msg = error_msg.encode('utf-8') - raise CoprHdError(CoprHdError.HTTP_ERR, - (_("HTTP code: %(status_code)s" - ", %(reason)s" - " [%(error_msg)s]") % { - 'status_code': six.text_type( - response.status_code), - 'reason': six.text_type( - response.reason), - 'error_msg': six.text_type( - error_msg) - })) - except (CoprHdError, socket.error, exceptions.SSLError, - exceptions.ConnectionError, exceptions.TooManyRedirects, - exceptions.Timeout) as e: - raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) - # TODO(Ravi) : Either following exception should have proper message or - # IOError should just be combined with the above statement - except IOError as e: - raise CoprHdError(CoprHdError.HTTP_ERR, six.text_type(e)) - - -def is_uri(name): - """Checks whether the name is a URI or not. - - :param name: Name of the resource - :returns: True if name is URI, False otherwise - """ - try: - (urn, prod, trailer) = name.split(':', 2) - return (urn == 'urn' and prod == PROD_NAME) - except Exception: - return False - - -def format_json_object(obj): - """Formats JSON object to make it readable by proper indentation. - - :param obj: JSON object - :returns: a string of formatted JSON object - """ - return oslo_serialization.jsonutils.dumps(obj, sort_keys=True, indent=3) - - -def get_parent_child_from_xpath(name): - """Returns the parent and child elements from XPath.""" - if '/' in name: - (pname, label) = name.rsplit('/', 1) - else: - pname = None - label = name - return (pname, label) - - -def to_bytes(in_str): - """Converts a size to bytes. - - :param in_str: a number suffixed with a unit: {number}{unit} - units supported: - K, KB, k or kb - kilobytes - M, MB, m or mb - megabytes - G, GB, g or gb - gigabytes - T, TB, t or tb - terabytes - :returns: number of bytes - None; if input is incorrect - """ - match = re.search('^([0-9]+)([a-zA-Z]{0,2})$', in_str) - - if not match: - return None - - unit = match.group(2).upper() - value = match.group(1) - - size_count = int(value) - if unit in ['K', 'KB']: - multiplier = int(units.Ki) - elif unit in ['M', 'MB']: - multiplier = int(units.Mi) - elif unit in ['G', 'GB']: - multiplier = int(units.Gi) - elif unit in ['T', 'TB']: - multiplier = int(units.Ti) - elif unit == "": - return size_count - else: - return None - - size_in_bytes = int(size_count * multiplier) - return size_in_bytes - - -def get_list(json_object, parent_node_name, child_node_name=None): - """Returns a list of values from child_node_name. - - If child_node is not given, then it will retrieve list from parent node - """ - if not json_object: - return [] - - return_list = [] - if isinstance(json_object[parent_node_name], list): - for detail in json_object[parent_node_name]: - if child_node_name: - return_list.append(detail[child_node_name]) - else: - return_list.append(detail) - else: - if child_node_name: - return_list.append(json_object[parent_node_name][child_node_name]) - else: - return_list.append(json_object[parent_node_name]) - - return return_list - - -def get_node_value(json_object, parent_node_name, child_node_name=None): - """Returns value of given child_node. - - If child_node is not given, then value of parent node is returned - returns None: If json_object or parent_node is not given, - If child_node is not found under parent_node - """ - if not json_object: - return None - - if not parent_node_name: - return None - - detail = json_object[parent_node_name] - if not child_node_name: - return detail - - return_value = None - - if child_node_name in detail: - return_value = detail[child_node_name] - else: - return_value = None - - return return_value - - -def format_err_msg_and_raise(operation_type, component, - error_message, error_code): - """Method to format error message. - - :param operation_type: create, update, add, etc - :param component: storagesystem, vpool, etc - :param error_code: Error code from the API call - :param error_message: Detailed error message - """ - - formated_err_msg = (_("Error: Failed to %(operation_type)s" - " %(component)s") % - {'operation_type': operation_type, - 'component': component - }) - if error_message.startswith("\"\'") and error_message.endswith("\'\""): - # stripping the first 2 and last 2 characters, which are quotes. - error_message = error_message[2:len(error_message) - 2] - - formated_err_msg = formated_err_msg + "\nReason:" + error_message - raise CoprHdError(error_code, formated_err_msg) - - -def search_by_tag(resource_search_uri, ipaddr, port): - """Fetches the list of resources with a given tag. - - :param resource_search_uri: The tag based search uri - Example: '/block/volumes/search?tag=tagexample1' - :param ipaddr: IP address of CoprHD host - :param port: Port number - """ - # check if the URI passed has both project and name parameters - str_uri = six.text_type(resource_search_uri) - if 'search' in str_uri and '?tag=' in str_uri: - # Get the project URI - - (s, h) = service_json_request( - ipaddr, port, "GET", - resource_search_uri, None) - - o = json_decode(s) - if not o: - return None - - resources = get_node_value(o, "resource") - - resource_uris = [] - for resource in resources: - resource_uris.append(resource["id"]) - return resource_uris - else: - raise CoprHdError(CoprHdError.VALUE_ERR, (_("Search URI %s" - " is not in the expected" - " format, it should end" - " with ?tag={0}") - % str_uri)) - - -# Blocks the operation until the task is complete/error out/timeout -def block_until_complete(component_type, - resource_uri, - task_id, - ipaddr, - port, - synctimeout=0): - - if not synctimeout: - synctimeout = TASK_TIMEOUT - t = timeutils.StopWatch(duration=synctimeout) - t.start() - while not t.expired(): - if component_type == 'block': - out = show_task_opid(task_id, ipaddr, port) - else: - out = get_task_by_resourceuri_and_taskId( - component_type, resource_uri, task_id, ipaddr, port) - - if out: - if out["state"] == "ready": - - # stop the timer and return - t.stop() - break - - # if the status of the task is 'error' then stop the timer - # and raise exception - if out["state"] == "error": - # stop the timer - t.stop() - error_message = "Please see logs for more details" - if ("service_error" in out and - "details" in out["service_error"]): - error_message = out["service_error"]["details"] - raise CoprHdError(CoprHdError.VALUE_ERR, - (_("Task: %(task_id)s" - " is failed with" - " error: %(error_message)s") % - {'task_id': task_id, - 'error_message': error_message - })) - - else: - raise CoprHdError(CoprHdError.TIME_OUT, - (_("Task did not complete in %d secs." - " Operation timed out. Task in CoprHD" - " will continue") % synctimeout)) - - return - - -def show_task_opid(taskid, ipaddr, port): - (s, h) = service_json_request( - ipaddr, port, - "GET", - URI_TASKS_BY_OPID.format(taskid), - None) - if (not s): - return None - o = json_decode(s) - return o - - -def get_task_by_resourceuri_and_taskId(component_type, resource_uri, - task_id, ipaddr, port): - """Returns the single task details.""" - - task_uri_constant = urihelper.singletonURIHelperInstance.getUri( - component_type, "task") - (s, h) = service_json_request( - ipaddr, port, "GET", - task_uri_constant.format(resource_uri, task_id), None) - if not s: - return None - o = json_decode(s) - return o - - -class CoprHdError(exception.VolumeBackendAPIException): - - """Custom exception class used to report logical errors. - - Attributes: - err_code - String error code - msg - String text - """ - SOS_FAILURE_ERR = 1 - CMD_LINE_ERR = 2 - HTTP_ERR = 3 - VALUE_ERR = 4 - NOT_FOUND_ERR = 1 - ENTRY_ALREADY_EXISTS_ERR = 5 - MAX_COUNT_REACHED = 6 - TIME_OUT = 7 - - def __init__(self, err_code, msg): - self.err_code = err_code - self.msg = msg - - def __str__(self): - return repr(self.msg) - - -class CoprHDResource(object): - - def __init__(self, ipaddr, port): - """Constructor: takes IP address and port of the CoprHD instance. - - These are needed to make http requests for REST API - """ - self.ipaddr = ipaddr - self.port = port diff --git a/cinder/volume/drivers/coprhd/helpers/consistencygroup.py b/cinder/volume/drivers/coprhd/helpers/consistencygroup.py deleted file mode 100644 index 0723e070d..000000000 --- a/cinder/volume/drivers/coprhd/helpers/consistencygroup.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import project - - -class ConsistencyGroup(common.CoprHDResource): - - URI_CONSISTENCY_GROUP = "/block/consistency-groups" - URI_CONSISTENCY_GROUPS_INSTANCE = URI_CONSISTENCY_GROUP + "/{0}" - URI_CONSISTENCY_GROUPS_DEACTIVATE = (URI_CONSISTENCY_GROUPS_INSTANCE + - "/deactivate") - URI_CONSISTENCY_GROUPS_SEARCH = ( - '/block/consistency-groups/search?project={0}') - URI_SEARCH_CONSISTENCY_GROUPS_BY_TAG = ( - '/block/consistency-groups/search?tag={0}') - URI_CONSISTENCY_GROUP_TAGS = ( - '/block/consistency-groups/{0}/tags') - - def list(self, project_name, tenant): - """This function gives list of comma separated consistency group uris. - - :param project_name: Name of the project path - :param tenant: Name of the tenant - :returns: list of consistency group ids separated by comma - """ - if tenant is None: - tenant = "" - projobj = project.Project(self.ipaddr, self.port) - fullproj = tenant + "/" + project_name - projuri = projobj.project_query(fullproj) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_CONSISTENCY_GROUPS_SEARCH.format(projuri), None) - o = common.json_decode(s) - if not o: - return [] - - congroups = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - congroups.append(resource["id"]) - - return congroups - - def show(self, name, project, tenant): - """This function will display the consistency group with details. - - :param name : Name of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: details of consistency group - """ - uri = self.consistencygroup_query(name, project, tenant) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), None) - o = common.json_decode(s) - if o['inactive']: - return None - return o - - def consistencygroup_query(self, name, project, tenant): - """This function will return consistency group id. - - :param name : Name/id of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: id of the consistency group - """ - if common.is_uri(name): - return name - - uris = self.list(project, tenant) - for uri in uris: - congroup = self.show(uri, project, tenant) - if congroup and congroup['name'] == name: - return congroup['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Consistency Group %s: not found") % name)) - - # Blocks the operation until the task is complete/error out/timeout - def check_for_sync(self, result, sync, synctimeout=0): - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("consistencygroup", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - - def create(self, name, project_name, tenant): - """This function will create consistency group with the given name. - - :param name : Name of the consistency group - :param project_name: Name of the project path - :param tenant: Container tenant name - :returns: status of creation - """ - # check for existence of consistency group. - try: - status = self.show(name, project_name, tenant) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - if tenant is None: - tenant = "" - fullproj = tenant + "/" + project_name - projobj = project.Project(self.ipaddr, self.port) - projuri = projobj.project_query(fullproj) - - parms = {'name': name, 'project': projuri, } - body = oslo_serialization.jsonutils.dumps(parms) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "POST", - self.URI_CONSISTENCY_GROUP, body) - - o = common.json_decode(s) - return o - else: - raise - if status: - common.format_err_msg_and_raise( - "create", "consistency group", - (_("consistency group with name: %s already exists") % name), - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR) - - def delete(self, name, project, tenant, coprhdonly=False): - """This function marks a particular consistency group as delete. - - :param name: Name of the consistency group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: status of the delete operation - false, incase it fails to do delete - """ - params = '' - if coprhdonly is True: - params += "?type=" + 'CoprHD_ONLY' - uri = self.consistencygroup_query(name, project, tenant) - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - self.URI_CONSISTENCY_GROUPS_DEACTIVATE.format(uri) + params, - None) - return - - def update(self, uri, project, tenant, add_volumes, remove_volumes, - sync, synctimeout=0): - """Function used to add or remove volumes from consistency group. - - It will update the consistency group with given volumes - - :param uri : URI of the consistency group - :param project : Name of the project path - :param tenant : Container tenant name - :param add_volumes : volumes to be added to the consistency group - :param remove_volumes: volumes to be removed from CG - :param sync : synchronous request - :param synctimeout : Query for task status for "synctimeout" secs. - If the task doesn't complete in synctimeout - secs, an exception is thrown - :returns: status of creation - """ - if tenant is None: - tenant = "" - - parms = [] - add_voluris = [] - remove_voluris = [] - from cinder.volume.drivers.coprhd.helpers.volume import Volume - volobj = Volume(self.ipaddr, self.port) - if add_volumes: - for volname in add_volumes: - full_project_name = tenant + "/" + project - add_voluris.append( - volobj.volume_query(full_project_name, volname)) - volumes = {'volume': add_voluris} - parms = {'add_volumes': volumes} - - if remove_volumes: - for volname in remove_volumes: - full_project_name = tenant + "/" + project - remove_voluris.append( - volobj.volume_query(full_project_name, volname)) - volumes = {'volume': remove_voluris} - parms = {'remove_volumes': volumes} - - body = oslo_serialization.jsonutils.dumps(parms) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "PUT", - self.URI_CONSISTENCY_GROUPS_INSTANCE.format(uri), - body) - - o = common.json_decode(s) - if sync: - return self.check_for_sync(o, sync, synctimeout) - else: - return o diff --git a/cinder/volume/drivers/coprhd/helpers/exportgroup.py b/cinder/volume/drivers/coprhd/helpers/exportgroup.py deleted file mode 100644 index fc36f9198..000000000 --- a/cinder/volume/drivers/coprhd/helpers/exportgroup.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import host -from cinder.volume.drivers.coprhd.helpers import project -from cinder.volume.drivers.coprhd.helpers import virtualarray -from cinder.volume.drivers.coprhd.helpers import volume - - -class ExportGroup(common.CoprHDResource): - - URI_EXPORT_GROUP = "/block/exports" - URI_EXPORT_GROUPS_SHOW = URI_EXPORT_GROUP + "/{0}" - URI_EXPORT_GROUP_SEARCH = '/block/exports/search' - URI_EXPORT_GROUP_UPDATE = '/block/exports/{0}' - - def exportgroup_remove_volumes_by_uri(self, exportgroup_uri, - volume_id_list, sync=False, - tenantname=None, projectname=None, - cg=None, synctimeout=0): - """Remove volumes from the exportgroup, given the uris of volume.""" - - volume_list = volume_id_list - parms = {} - - parms['volume_changes'] = self._remove_list(volume_list) - o = self.send_json_request(exportgroup_uri, parms) - return self.check_for_sync(o, sync, synctimeout) - - def _remove_list(self, uris): - resChanges = {} - if not isinstance(uris, list): - resChanges['remove'] = [uris] - else: - resChanges['remove'] = uris - return resChanges - - def send_json_request(self, exportgroup_uri, param): - body = oslo_serialization.jsonutils.dumps(param) - (s, h) = common.service_json_request( - self.ipaddr, self.port, "PUT", - self.URI_EXPORT_GROUP_UPDATE.format(exportgroup_uri), body) - return common.json_decode(s) - - def check_for_sync(self, result, sync, synctimeout=0): - if sync: - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("export", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, _( - "error: task list is empty, no task response found")) - else: - return result - - def exportgroup_list(self, project_name, tenant): - """This function gives list of export group uris separated by comma. - - :param project_name: Name of the project path - :param tenant: Name of the tenant - :returns: list of export group ids separated by comma - """ - if tenant is None: - tenant = "" - projobj = project.Project(self.ipaddr, self.port) - fullproj = tenant + "/" + project_name - projuri = projobj.project_query(fullproj) - - uri = self.URI_EXPORT_GROUP_SEARCH - - if '?' in uri: - uri += '&project=' + projuri - else: - uri += '?project=' + projuri - - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - uri, None) - o = common.json_decode(s) - if not o: - return [] - - exportgroups = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - exportgroups.append(resource["id"]) - - return exportgroups - - def exportgroup_show(self, name, project, tenant, varray=None): - """This function displays the Export group with details. - - :param name: Name of the export group - :param project: Name of the project - :param tenant: Name of the tenant - :returns: Details of export group - """ - varrayuri = None - if varray: - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - varrayuri = varrayObject.varray_query(varray) - uri = self.exportgroup_query(name, project, tenant, varrayuri) - (s, h) = common.service_json_request( - self.ipaddr, - self.port, - "GET", - self.URI_EXPORT_GROUPS_SHOW.format(uri), None) - o = common.json_decode(s) - if o['inactive']: - return None - - return o - - def exportgroup_create(self, name, project_name, tenant, varray, - exportgrouptype, export_destination=None): - """This function creates the Export group with given name. - - :param name: Name of the export group - :param project_name: Name of the project path - :param tenant: Container tenant name - :param varray: Name of the virtual array - :param exportgrouptype: Type of the export group. Ex:Host etc - :returns: status of creation - """ - # check for existence of export group. - try: - status = self.exportgroup_show(name, project_name, tenant) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - if tenant is None: - tenant = "" - - fullproj = tenant + "/" + project_name - projObject = project.Project(self.ipaddr, self.port) - projuri = projObject.project_query(fullproj) - - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - nhuri = varrayObject.varray_query(varray) - - parms = { - 'name': name, - 'project': projuri, - 'varray': nhuri, - 'type': exportgrouptype - } - - if exportgrouptype and export_destination: - host_obj = host.Host(self.ipaddr, self.port) - host_uri = host_obj.query_by_name(export_destination) - parms['hosts'] = [host_uri] - - body = oslo_serialization.jsonutils.dumps(parms) - (s, h) = common.service_json_request(self.ipaddr, - self.port, "POST", - self.URI_EXPORT_GROUP, - body) - - o = common.json_decode(s) - return o - else: - raise - - if status: - raise common.CoprHdError( - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, (_( - "Export group with name %s" - " already exists") % name)) - - def exportgroup_query(self, name, project, tenant, varrayuri=None): - """Makes REST API call to query the exportgroup by name. - - :param name: Name/id of the export group - :param project: Name of the project - :param tenant: Name of the tenant - :param varrayuri: URI of the virtual array - :returns: id of the export group - """ - if common.is_uri(name): - return name - - uris = self.exportgroup_list(project, tenant) - for uri in uris: - exportgroup = self.exportgroup_show(uri, project, tenant) - if exportgroup and exportgroup['name'] == name: - if varrayuri: - varrayobj = exportgroup['varray'] - if varrayobj['id'] == varrayuri: - return exportgroup['id'] - else: - continue - else: - return exportgroup['id'] - raise common.CoprHdError( - common.CoprHdError.NOT_FOUND_ERR, - (_("Export Group %s: not found") % name)) - - def exportgroup_add_volumes(self, sync, exportgroupname, tenantname, - maxpaths, minpaths, pathsperinitiator, - projectname, volumenames, - cg=None, synctimeout=0, varray=None): - """Add volume to export group. - - :param sync : synchronous request - :param exportgroupname : Name/id of the export group - :param tenantname : tenant name - :param maxpaths : Maximum number of paths - :param minpaths : Minimum number of paths - :param pathsperinitiator : Paths per initiator - :param projectname : name of project - :param volumenames : names of volumes that needs - to be added to exportgroup - :param cg : consistency group - :param synctimeout : Query for task status for "synctimeout" secs - If the task doesn't complete in synctimeout - secs, an exception is thrown - :param varray : Name of varray - :returns: action result - """ - varrayuri = None - if varray: - varrayObject = virtualarray.VirtualArray( - self.ipaddr, self.port) - varrayuri = varrayObject.varray_query(varray) - - exportgroup_uri = self.exportgroup_query(exportgroupname, - projectname, - tenantname, - varrayuri) - - # get volume uri - if tenantname is None: - tenantname = "" - # List of volumes - volume_list = [] - - if volumenames: - volume_list = self._get_resource_lun_tuple( - volumenames, "volumes", None, tenantname, - projectname, None) - - parms = {} - # construct the body - - volChanges = {} - volChanges['add'] = volume_list - parms['volume_changes'] = volChanges - - o = self.send_json_request(exportgroup_uri, parms) - return self.check_for_sync(o, sync, synctimeout) - - def _get_resource_lun_tuple(self, resources, resType, baseResUri, - tenantname, projectname, blockTypeName): - """Function to validate input volumes and return list of ids and luns. - - """ - copyEntries = [] - volumeObject = volume.Volume(self.ipaddr, self.port) - for copy in resources: - copyParam = [] - try: - copyParam = copy.split(":") - except Exception: - raise common.CoprHdError( - common.CoprHdError.CMD_LINE_ERR, - (_("Please provide valid format volume:" - " lun for parameter %s") % - resType)) - copy = dict() - if not len(copyParam): - raise common.CoprHdError( - common.CoprHdError.CMD_LINE_ERR, - (_("Please provide at least one volume for parameter %s") % - resType)) - if resType == "volumes": - full_project_name = tenantname + "/" + projectname - copy['id'] = volumeObject.volume_query( - full_project_name, copyParam[0]) - if len(copyParam) > 1: - copy['lun'] = copyParam[1] - copyEntries.append(copy) - return copyEntries diff --git a/cinder/volume/drivers/coprhd/helpers/host.py b/cinder/volume/drivers/coprhd/helpers/host.py deleted file mode 100644 index 8f6cb4b54..000000000 --- a/cinder/volume/drivers/coprhd/helpers/host.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import tenant - - -class Host(common.CoprHDResource): - - # All URIs for the Host operations - URI_HOST_DETAILS = "/compute/hosts/{0}" - URI_HOST_LIST_INITIATORS = "/compute/hosts/{0}/initiators" - URI_COMPUTE_HOST = "/compute/hosts" - - def query_by_name(self, host_name, tenant_name=None): - """Search host matching host_name and tenant if tenant_name provided. - - tenant_name is optional - """ - hostList = self.list_all(tenant_name) - for host in hostList: - hostUri = host['id'] - hostDetails = self.show_by_uri(hostUri) - if hostDetails: - if hostDetails['name'] == host_name: - return hostUri - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( - "Host with name: %s not found") % host_name)) - - def list_initiators(self, host_name): - """Lists all initiators for the given host. - - :param host_name: The name of the host - """ - if not common.is_uri(host_name): - hostUri = self.query_by_name(host_name, None) - else: - hostUri = host_name - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - Host.URI_HOST_LIST_INITIATORS.format(hostUri), - None) - o = common.json_decode(s) - - if not o or "initiator" not in o: - return [] - - return common.get_node_value(o, 'initiator') - - def list_all(self, tenant_name): - """Gets the ids and self links for all compute elements.""" - restapi = self.URI_COMPUTE_HOST - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - if tenant_name is None: - tenant_uri = tenant_obj.tenant_getid() - else: - tenant_uri = tenant_obj.tenant_query(tenant_name) - restapi = restapi + "?tenant=" + tenant_uri - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - restapi, - None) - o = common.json_decode(s) - return o['host'] - - def show_by_uri(self, uri): - """Makes REST API call to retrieve Host details based on its UUID.""" - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Host.URI_HOST_DETAILS.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - - if inactive: - return None - return o diff --git a/cinder/volume/drivers/coprhd/helpers/project.py b/cinder/volume/drivers/coprhd/helpers/project.py deleted file mode 100644 index 3cc7b00e1..000000000 --- a/cinder/volume/drivers/coprhd/helpers/project.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import tenant - - -class Project(common.CoprHDResource): - - # Commonly used URIs for the 'Project' module - URI_PROJECT_LIST = '/tenants/{0}/projects' - URI_PROJECT = '/projects/{0}' - - def project_query(self, name): - """Retrieves UUID of project based on its name. - - :param name: name of project - :returns: UUID of project - :raises CoprHdError: - when project name is not found - """ - if common.is_uri(name): - return name - (tenant_name, project_name) = common.get_parent_child_from_xpath(name) - - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - - tenant_uri = tenant_obj.tenant_query(tenant_name) - projects = self.project_list(tenant_uri) - if projects: - for project in projects: - if project: - project_detail = self.project_show_by_uri( - project['id']) - if(project_detail and - project_detail['name'] == project_name): - return project_detail['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, (_( - "Project: %s not found") % project_name)) - - def project_list(self, tenant_name): - """Makes REST API call and retrieves projects based on tenant UUID. - - :param tenant_name: Name of the tenant - :returns: List of project UUIDs in JSON response payload - """ - tenant_obj = tenant.Tenant(self.ipaddr, self.port) - tenant_uri = tenant_obj.tenant_query(tenant_name) - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Project.URI_PROJECT_LIST.format( - tenant_uri), - None) - o = common.json_decode(s) - - if "project" in o: - return common.get_list(o, 'project') - return [] - - def project_show_by_uri(self, uri): - """Makes REST API call and retrieves project derails based on UUID. - - :param uri: UUID of project - :returns: Project details in JSON response payload - """ - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Project.URI_PROJECT.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - if inactive: - return None - - return o diff --git a/cinder/volume/drivers/coprhd/helpers/snapshot.py b/cinder/volume/drivers/coprhd/helpers/snapshot.py deleted file mode 100644 index 857b8babd..000000000 --- a/cinder/volume/drivers/coprhd/helpers/snapshot.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import consistencygroup -from cinder.volume.drivers.coprhd.helpers import volume - - -class Snapshot(common.CoprHDResource): - - # Commonly used URIs for the 'Snapshot' module - URI_SNAPSHOTS = '/{0}/snapshots/{1}' - URI_BLOCK_SNAPSHOTS = '/block/snapshots/{0}' - URI_SEARCH_SNAPSHOT_BY_TAG = '/block/snapshots/search?tag={0}' - URI_SNAPSHOT_LIST = '/{0}/{1}/{2}/protection/snapshots' - URI_SNAPSHOT_TASKS_BY_OPID = '/vdc/tasks/{0}' - URI_RESOURCE_DEACTIVATE = '{0}/deactivate' - URI_CONSISTENCY_GROUP = "/block/consistency-groups" - URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE = ( - URI_CONSISTENCY_GROUP + "/{0}/protection/snapshots/{1}") - URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE = ( - URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE + "/deactivate") - URI_BLOCK_SNAPSHOTS_TAG = URI_BLOCK_SNAPSHOTS + '/tags' - - VOLUMES = 'volumes' - CG = 'consistency-groups' - BLOCK = 'block' - - timeout = 300 - - def snapshot_list_uri(self, otype, otypename, ouri): - """Makes REST API call to list snapshots under a volume. - - :param otype : block - :param otypename : either volume or consistency-group should be - provided - :param ouri : uri of volume or consistency-group - :returns: list of snapshots - """ - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None) - o = common.json_decode(s) - return o['snapshot'] - - def snapshot_show_uri(self, otype, resource_uri, suri): - """Retrieves snapshot details based on snapshot Name or Label. - - :param otype : block - :param suri : uri of the Snapshot. - :param resource_uri: uri of the source resource - :returns: Snapshot details in JSON response payload - """ - if(resource_uri is not None and - resource_uri.find('BlockConsistencyGroup') > 0): - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE.format( - resource_uri, - suri), - None) - else: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - Snapshot.URI_SNAPSHOTS.format(otype, suri), None) - - return common.json_decode(s) - - def snapshot_query(self, storageres_type, - storageres_typename, resuri, snapshot_name): - if resuri is not None: - uris = self.snapshot_list_uri( - storageres_type, - storageres_typename, - resuri) - for uri in uris: - snapshot = self.snapshot_show_uri( - storageres_type, - resuri, - uri['id']) - if (False == common.get_node_value(snapshot, 'inactive') and - snapshot['name'] == snapshot_name): - return snapshot['id'] - - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - (_("snapshot with the name: " - "%s Not Found") % snapshot_name)) - - def storage_resource_query(self, - storageres_type, - volume_name, - cg_name, - project, - tenant): - resourcepath = "/" + project - if tenant is not None: - resourcepath = tenant + resourcepath - - resUri = None - resourceObj = None - if Snapshot.BLOCK == storageres_type and volume_name is not None: - resourceObj = volume.Volume(self.ipaddr, self.port) - resUri = resourceObj.volume_query(resourcepath, volume_name) - elif Snapshot.BLOCK == storageres_type and cg_name is not None: - resourceObj = consistencygroup.ConsistencyGroup( - self.ipaddr, - self.port) - resUri = resourceObj.consistencygroup_query( - cg_name, - project, - tenant) - else: - resourceObj = None - - return resUri - - def snapshot_create(self, otype, typename, ouri, - snaplabel, inactive, sync, - readonly=False, synctimeout=0): - """New snapshot is created, for a given volume. - - :param otype : block type should be provided - :param typename : either volume or consistency-groups should - be provided - :param ouri : uri of volume - :param snaplabel : name of the snapshot - :param inactive : if true, the snapshot will not activate the - synchronization between source and target volumes - :param sync : synchronous request - :param synctimeout : Query for task status for "synctimeout" secs. - If the task doesn't complete in synctimeout - secs, an exception is thrown - """ - - # check snapshot is already exist - is_snapshot_exist = True - try: - self.snapshot_query(otype, typename, ouri, snaplabel) - except common.CoprHdError as e: - if e.err_code == common.CoprHdError.NOT_FOUND_ERR: - is_snapshot_exist = False - else: - raise - - if is_snapshot_exist: - raise common.CoprHdError( - common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR, - (_("Snapshot with name %(snaplabel)s" - " already exists under %(typename)s") % - {'snaplabel': snaplabel, - 'typename': typename - })) - - parms = { - 'name': snaplabel, - # if true, the snapshot will not activate the synchronization - # between source and target volumes - 'create_inactive': inactive - } - if readonly is True: - parms['read_only'] = readonly - body = oslo_serialization.jsonutils.dumps(parms) - - # REST api call - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_SNAPSHOT_LIST.format(otype, typename, ouri), body) - o = common.json_decode(s) - - task = o["task"][0] - - if sync: - return ( - common.block_until_complete( - otype, - task['resource']['id'], - task["id"], self.ipaddr, self.port, synctimeout) - ) - else: - return o - - def snapshot_delete_uri(self, otype, resource_uri, - suri, sync, synctimeout=0): - """Delete a snapshot by uri. - - :param otype : block - :param resource_uri: uri of the source resource - :param suri : Uri of the Snapshot - :param sync : To perform operation synchronously - :param synctimeout : Query for task status for "synctimeout" secs. If - the task doesn't complete in synctimeout secs, an - exception is thrown - """ - s = None - if resource_uri.find("Volume") > 0: - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_RESOURCE_DEACTIVATE.format( - Snapshot.URI_BLOCK_SNAPSHOTS.format(suri)), - None) - elif resource_uri.find("BlockConsistencyGroup") > 0: - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE.format( - resource_uri, - suri), - None) - o = common.json_decode(s) - task = o["task"][0] - - if sync: - return ( - common.block_until_complete( - otype, - task['resource']['id'], - task["id"], self.ipaddr, self.port, synctimeout) - ) - else: - return o - - def snapshot_delete(self, storageres_type, - storageres_typename, resource_uri, - name, sync, synctimeout=0): - snapshotUri = self.snapshot_query( - storageres_type, - storageres_typename, - resource_uri, - name) - self.snapshot_delete_uri( - storageres_type, - resource_uri, - snapshotUri, - sync, synctimeout) diff --git a/cinder/volume/drivers/coprhd/helpers/tag.py b/cinder/volume/drivers/coprhd/helpers/tag.py deleted file mode 100644 index 818c70d92..000000000 --- a/cinder/volume/drivers/coprhd/helpers/tag.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Contains tagging related methods.""" - -import oslo_serialization - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Tag(common.CoprHDResource): - - def tag_resource(self, uri, resource_id, add, remove): - params = { - 'add': add, - 'remove': remove - } - - body = oslo_serialization.jsonutils.dumps(params) - - (s, h) = common.service_json_request(self.ipaddr, self.port, "PUT", - uri.format(resource_id), body) - o = common.json_decode(s) - return o - - def list_tags(self, resource_uri): - if resource_uri.__contains__("tag") is False: - raise common.CoprHdError( - common.CoprHdError.VALUE_ERR, _("URI should end with /tag")) - - (s, h) = common.service_json_request(self.ipaddr, - self.port, - "GET", - resource_uri, - None) - - allTags = [] - o = common.json_decode(s) - allTags = o['tag'] - - return allTags diff --git a/cinder/volume/drivers/coprhd/helpers/tenant.py b/cinder/volume/drivers/coprhd/helpers/tenant.py deleted file mode 100644 index 9fb0f0220..000000000 --- a/cinder/volume/drivers/coprhd/helpers/tenant.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class Tenant(common.CoprHDResource): - - URI_SERVICES_BASE = '' - URI_TENANT = URI_SERVICES_BASE + '/tenant' - URI_TENANTS = URI_SERVICES_BASE + '/tenants/{0}' - URI_TENANTS_SUBTENANT = URI_TENANTS + '/subtenants' - - def tenant_query(self, label): - """Returns the UID of the tenant specified by the hierarchical name. - - (ex tenant1/tenant2/tenant3) - """ - - if common.is_uri(label): - return label - - tenant_id = self.tenant_getid() - - if not label: - return tenant_id - - subtenants = self.tenant_list(tenant_id) - subtenants.append(self.tenant_show(None)) - - for tenant in subtenants: - if tenant['name'] == label: - rslt = self.tenant_show_by_uri(tenant['id']) - if rslt: - return tenant['id'] - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Tenant %s: not found") % label)) - - def tenant_show(self, label): - """Returns the details of the tenant based on its name.""" - if label: - tenant_id = self.tenant_query(label) - else: - tenant_id = self.tenant_getid() - - return self.tenant_show_by_uri(tenant_id) - - def tenant_getid(self): - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", Tenant.URI_TENANT, None) - - o = common.json_decode(s) - return o['id'] - - def tenant_list(self, uri=None): - """Returns all the tenants under a parent tenant. - - :param uri: The parent tenant name - :returns: JSON payload of tenant list - """ - - if not uri: - uri = self.tenant_getid() - - tenantdtls = self.tenant_show_by_uri(uri) - - if(tenantdtls and not ('parent_tenant' in tenantdtls and - ("id" in tenantdtls['parent_tenant']))): - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", self.URI_TENANTS_SUBTENANT.format(uri), None) - - o = common.json_decode(s) - return o['subtenant'] - - else: - return [] - - def tenant_show_by_uri(self, uri): - """Makes REST API call to retrieve tenant details based on UUID.""" - (s, h) = common.service_json_request(self.ipaddr, self.port, "GET", - Tenant.URI_TENANTS.format(uri), - None) - - o = common.json_decode(s) - if 'inactive' in o and o['inactive']: - return None - - return o - - def get_tenant_by_name(self, tenant): - uri = None - if not tenant: - uri = self.tenant_getid() - else: - if not common.is_uri(tenant): - uri = self.tenant_query(tenant) - else: - uri = tenant - if not uri: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Tenant %s: not found") % tenant)) - return uri diff --git a/cinder/volume/drivers/coprhd/helpers/urihelper.py b/cinder/volume/drivers/coprhd/helpers/urihelper.py deleted file mode 100644 index f9b983509..000000000 --- a/cinder/volume/drivers/coprhd/helpers/urihelper.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class URIHelper(object): - - """This map will be a map of maps. - - e.g for project component type, it will hold a map - of its operations vs their uris - """ - COMPONENT_TYPE_VS_URIS_MAP = dict() - """Volume URIs.""" - VOLUME_URIS_MAP = dict() - URI_VOLUMES = '/block/volumes' - URI_VOLUME = URI_VOLUMES + '/{0}' - URI_VOLUME_TASK_LIST = URI_VOLUME + '/tasks' - URI_VOLUME_TASK = URI_VOLUME_TASK_LIST + '/{1}' - - """Consistencygroup URIs.""" - CG_URIS_MAP = dict() - URI_CGS = '/block/consistency-groups' - URI_CG = URI_CGS + '/{0}' - URI_CG_TASK_LIST = URI_CG + '/tasks' - URI_CG_TASK = URI_CG_TASK_LIST + '/{1}' - - """Export Group URIs.""" - # Map to hold all export group uris - EXPORT_GROUP_URIS_MAP = dict() - URI_EXPORT_GROUP_TASKS_LIST = '/block/exports/{0}/tasks' - URI_EXPORT_GROUP_TASK = URI_EXPORT_GROUP_TASKS_LIST + '/{1}' - - def __init__(self): - """During initialization of the class, lets fill all the maps.""" - self.__fillExportGroupMap() - self.__fillVolumeMap() - self.__fillConsistencyGroupMap() - self.__initializeComponentVsUriMap() - - def __call__(self): - return self - - def __initializeComponentVsUriMap(self): - self.COMPONENT_TYPE_VS_URIS_MAP["export"] = self.EXPORT_GROUP_URIS_MAP - self.COMPONENT_TYPE_VS_URIS_MAP[ - "volume"] = self.VOLUME_URIS_MAP - self.COMPONENT_TYPE_VS_URIS_MAP[ - "consistencygroup"] = self.CG_URIS_MAP - - def __fillExportGroupMap(self): - self.EXPORT_GROUP_URIS_MAP["task"] = self.URI_EXPORT_GROUP_TASK - - def __fillVolumeMap(self): - self.VOLUME_URIS_MAP["task"] = self.URI_VOLUME_TASK - - def __fillConsistencyGroupMap(self): - self.CG_URIS_MAP["task"] = self.URI_CG_TASK - - def getUri(self, componentType, operationType): - return ( - self.COMPONENT_TYPE_VS_URIS_MAP.get( - componentType).get( - operationType) - ) - -"""Defining the singleton instance. - -Use this instance any time the access is required for this module/class -""" -singletonURIHelperInstance = URIHelper() diff --git a/cinder/volume/drivers/coprhd/helpers/virtualarray.py b/cinder/volume/drivers/coprhd/helpers/virtualarray.py deleted file mode 100644 index de0ec96b1..000000000 --- a/cinder/volume/drivers/coprhd/helpers/virtualarray.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class VirtualArray(common.CoprHDResource): - - # Commonly used URIs for the 'varrays' module - URI_VIRTUALARRAY = '/vdc/varrays' - URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}' - URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}' - - def varray_query(self, name): - """Returns the UID of the varray specified by the name.""" - if common.is_uri(name): - return name - - uris = self.varray_list() - - for uri in uris: - varray = self.varray_show(uri) - if varray and varray['name'] == name: - return varray['id'] - - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("varray %s: not found") % name)) - - def varray_list(self, vdcname=None): - """Returns all the varrays in a vdc. - - :param vdcname: Name of the Virtual Data Center - :returns: JSON payload of varray list - """ - vdcrestapi = None - if vdcname is not None: - vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format( - vdcname) - else: - vdcrestapi = VirtualArray.URI_VIRTUALARRAY - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - vdcrestapi, None) - - o = common.json_decode(s) - - returnlst = [] - for item in o['varray']: - returnlst.append(item['id']) - - return returnlst - - def varray_show(self, label): - """Makes REST API call to retrieve varray details based on name.""" - uri = self.varray_query(label) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - VirtualArray.URI_VIRTUALARRAY_URI.format(uri), - None) - - o = common.json_decode(s) - if 'inactive' in o and o['inactive'] is True: - return None - else: - return o diff --git a/cinder/volume/drivers/coprhd/helpers/virtualpool.py b/cinder/volume/drivers/coprhd/helpers/virtualpool.py deleted file mode 100644 index f86917f9c..000000000 --- a/cinder/volume/drivers/coprhd/helpers/virtualpool.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common - - -class VirtualPool(common.CoprHDResource): - - URI_VPOOL = "/{0}/vpools" - URI_VPOOL_SHOW = URI_VPOOL + "/{1}" - URI_VPOOL_SEARCH = URI_VPOOL + "/search?name={1}" - - def vpool_show_uri(self, vpooltype, uri): - """Makes REST API call and retrieves vpool details based on UUID. - - This function will take uri as input and returns with - all parameters of VPOOL like label, urn and type. - - :param vpooltype : Type of virtual pool {'block'} - :param uri : unique resource identifier of the vpool - :returns: object containing all the details of vpool - """ - - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "GET", - self.URI_VPOOL_SHOW.format(vpooltype, uri), None) - - o = common.json_decode(s) - if o['inactive']: - return None - - return o - - def vpool_query(self, name, vpooltype): - """Makes REST API call to query the vpool by name and type. - - This function will take the VPOOL name and type of VPOOL - as input and get uri of the first occurrence of given VPOOL. - - :param name: Name of the VPOOL - :param vpooltype: Type of the VPOOL {'block'} - :returns: uri of the given vpool - """ - if common.is_uri(name): - return name - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "GET", - self.URI_VPOOL_SEARCH.format(vpooltype, name), None) - - o = common.json_decode(s) - if len(o['resource']) > 0: - # Get the Active vpool ID. - for vpool in o['resource']: - if self.vpool_show_uri(vpooltype, vpool['id']) is not None: - return vpool['id'] - # Raise not found exception. as we did not find any active vpool. - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("VPool %(name)s ( %(vpooltype)s ) :" - " not found") % - {'name': name, - 'vpooltype': vpooltype - })) diff --git a/cinder/volume/drivers/coprhd/helpers/volume.py b/cinder/volume/drivers/coprhd/helpers/volume.py deleted file mode 100644 index 53bd07f13..000000000 --- a/cinder/volume/drivers/coprhd/helpers/volume.py +++ /dev/null @@ -1,518 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_serialization -from oslo_utils import units -import six - -from cinder.i18n import _ -from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common -from cinder.volume.drivers.coprhd.helpers import consistencygroup -from cinder.volume.drivers.coprhd.helpers import project -from cinder.volume.drivers.coprhd.helpers import virtualarray -from cinder.volume.drivers.coprhd.helpers import virtualpool - - -class Volume(common.CoprHDResource): - - # Commonly used URIs for the 'Volume' module - URI_SEARCH_VOLUMES = '/block/volumes/search?project={0}' - URI_SEARCH_VOLUMES_BY_TAG = '/block/volumes/search?tag={0}' - URI_VOLUMES = '/block/volumes' - URI_VOLUME = URI_VOLUMES + '/{0}' - URI_VOLUME_EXPORTS = URI_VOLUME + '/exports' - URI_BULK_DELETE = URI_VOLUMES + '/deactivate' - URI_DEACTIVATE = URI_VOLUME + '/deactivate' - URI_EXPAND = URI_VOLUME + '/expand' - URI_TAG_VOLUME = URI_VOLUME + "/tags" - URI_VOLUME_CHANGE_VPOOL = URI_VOLUMES + "/vpool-change" - - # Protection REST APIs - clone - URI_VOLUME_PROTECTION_FULLCOPIES = ( - '/block/volumes/{0}/protection/full-copies') - URI_SNAPSHOT_PROTECTION_FULLCOPIES = ( - '/block/snapshots/{0}/protection/full-copies') - - URI_VOLUME_CLONE_DETACH = "/block/full-copies/{0}/detach" - - # New CG URIs - URI_CG_CLONE = "/block/consistency-groups/{0}/protection/full-copies" - URI_CG_CLONE_DETACH = ( - "/block/consistency-groups/{0}/protection/full-copies/{1}/detach") - - VOLUMES = 'volumes' - CG = 'consistency-groups' - BLOCK = 'block' - SNAPSHOTS = 'snapshots' - - # Lists volumes in a project - def list_volumes(self, project): - """Makes REST API call to list volumes under a project. - - :param project: name of project - :returns: List of volumes uuids in JSON response payload - """ - - volume_uris = self.search_volumes(project) - volumes = [] - for uri in volume_uris: - volume = self.show_by_uri(uri) - if volume: - volumes.append(volume) - return volumes - - def search_volumes(self, project_name): - - proj = project.Project(self.ipaddr, self.port) - project_uri = proj.project_query(project_name) - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_SEARCH_VOLUMES.format( - project_uri), - None) - o = common.json_decode(s) - if not o: - return [] - - volume_uris = [] - resources = common.get_node_value(o, "resource") - for resource in resources: - volume_uris.append(resource["id"]) - return volume_uris - - # Shows volume information given its uri - def show_by_uri(self, uri): - """Makes REST API call and retrieves volume details based on UUID. - - :param uri: UUID of volume - :returns: Volume details in JSON response payload - """ - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_VOLUME.format(uri), - None) - o = common.json_decode(s) - inactive = common.get_node_value(o, 'inactive') - if inactive: - return None - return o - - # Creates a volume given label, project, vpool and size - def create(self, project_name, label, size, varray, vpool, - sync, consistencygroup, synctimeout=0): - """Makes REST API call to create volume under a project. - - :param project_name : name of the project under which the volume - will be created - :param label : name of volume - :param size : size of volume - :param varray : name of varray - :param vpool : name of vpool - :param sync : synchronous request - :param consistencygroup : To create volume under a consistencygroup - :param synctimeout : Query for task status for "synctimeout" secs. - If the task doesn't complete in synctimeout - secs, an exception is thrown - :returns: Created task details in JSON response payload - """ - - proj_obj = project.Project(self.ipaddr, self.port) - project_uri = proj_obj.project_query(project_name) - - vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) - vpool_uri = vpool_obj.vpool_query(vpool, "block") - - varray_obj = virtualarray.VirtualArray(self.ipaddr, self.port) - varray_uri = varray_obj.varray_query(varray) - - request = { - 'name': label, - 'size': size, - 'varray': varray_uri, - 'project': project_uri, - 'vpool': vpool_uri, - 'count': 1 - } - if consistencygroup: - request['consistency_group'] = consistencygroup - - body = oslo_serialization.jsonutils.dumps(request) - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_VOLUMES, - body) - o = common.json_decode(s) - - if sync: - # check task empty - if len(o["task"]) > 0: - task = o["task"][0] - return self.check_for_sync(task, sync, synctimeout) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - else: - return o - - # Blocks the operation until the task is complete/error out/timeout - def check_for_sync(self, result, sync, synctimeout=0): - if sync: - if len(result["resource"]) > 0: - resource = result["resource"] - return ( - common.block_until_complete("volume", resource["id"], - result["id"], self.ipaddr, - self.port, synctimeout) - ) - else: - raise common.CoprHdError( - common.CoprHdError.SOS_FAILURE_ERR, - _("error: task list is empty, no task response found")) - else: - return result - - # Queries a volume given its name - def volume_query(self, full_project_name, volume_name): - """Makes REST API call to query the volume by name. - - :param volume_name: name of volume - :param full_project_name: Full project path - :returns: Volume details in JSON response payload - """ - if common.is_uri(volume_name): - return volume_name - - if not full_project_name: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - _("Project name not specified")) - uris = self.search_volumes(full_project_name) - for uri in uris: - volume = self.show_by_uri(uri) - if volume and 'name' in volume and volume['name'] == volume_name: - return volume['id'] - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume" - "%s: not found") % volume_name)) - - def get_storageAttributes(self, volume_name, cg_name, snapshot_name=None): - storageres_type = None - storageres_typename = None - - if snapshot_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.SNAPSHOTS - elif volume_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.VOLUMES - elif cg_name is not None: - storageres_type = Volume.BLOCK - storageres_typename = Volume.CG - else: - storageres_type = None - storageres_typename = None - return (storageres_type, storageres_typename) - - def storage_resource_query(self, - storageres_type, - volume_name, - cg_name, - snapshot_name, - project, - tenant): - resourcepath = "/" + project - if tenant is not None: - resourcepath = tenant + resourcepath - - resUri = None - resourceObj = None - - if Volume.BLOCK == storageres_type and volume_name is not None: - resUri = self.volume_query(resourcepath, volume_name) - if snapshot_name is not None: - from cinder.volume.drivers.coprhd.helpers import snapshot - snapobj = snapshot.Snapshot(self.ipaddr, self.port) - resUri = snapobj.snapshot_query(storageres_type, - Volume.VOLUMES, resUri, - snapshot_name) - - elif Volume.BLOCK == storageres_type and cg_name is not None: - resourceObj = consistencygroup.ConsistencyGroup( - self.ipaddr, self.port) - resUri = resourceObj.consistencygroup_query( - cg_name, - project, - tenant) - else: - resourceObj = None - - return resUri - - # Creates volume(s) from given source volume - def clone(self, new_vol_name, resource_uri, - sync, synctimeout=0): - """Makes REST API call to clone volume. - - :param new_vol_name: name of volume - :param resource_uri: uri of source volume - :param sync : synchronous request - :param synctimeout : Query for task status for "synctimeout" secs. - If the task doesn't complete in synctimeout - secs, an exception is thrown - :returns: Created task details in JSON response payload - """ - is_snapshot_clone = False - clone_full_uri = None - - # consistency group - if resource_uri.find("BlockConsistencyGroup") > 0: - clone_full_uri = Volume.URI_CG_CLONE.format(resource_uri) - elif resource_uri.find("BlockSnapshot") > 0: - is_snapshot_clone = True - clone_full_uri = ( - Volume.URI_SNAPSHOT_PROTECTION_FULLCOPIES.format(resource_uri)) - else: - clone_full_uri = ( - Volume.URI_VOLUME_PROTECTION_FULLCOPIES.format(resource_uri)) - - request = { - 'name': new_vol_name, - 'type': None, - 'count': 1 - } - - request["count"] = 1 - - body = oslo_serialization.jsonutils.dumps(request) - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - clone_full_uri, - body) - o = common.json_decode(s) - - if sync: - task = o["task"][0] - - if is_snapshot_clone: - return ( - common.block_until_complete( - "block", - task["resource"]["id"], - task["id"], self.ipaddr, self.port) - ) - else: - return self.check_for_sync(task, sync, synctimeout) - else: - return o - - # To check whether a cloned volume is in detachable state or not - def is_volume_detachable(self, full_project_name, name): - - volume_uri = self.volume_query(full_project_name, name) - vol = self.show_by_uri(volume_uri) - # Filtering based on "replicaState" attribute value of Cloned volume. - # If "replicaState" value is "SYNCHRONIZED" then only Cloned volume - # would be in detachable state. - try: - return vol['protection']['full_copies'][ - 'replicaState'] == 'SYNCHRONIZED' - except TypeError: - return False - - def volume_clone_detach(self, resource_uri, full_project_name, - name, sync, synctimeout=0): - - volume_uri = self.volume_query(full_project_name, name) - - # consistency group - if resource_uri.find("BlockConsistencyGroup") > 0: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Volume.URI_CG_CLONE_DETACH.format( - resource_uri, - volume_uri), None) - else: - (s, h) = common.service_json_request( - self.ipaddr, self.port, - "POST", - Volume.URI_VOLUME_CLONE_DETACH.format(volume_uri), None) - - o = common.json_decode(s) - if sync: - task = o["task"][0] - return self.check_for_sync(task, sync, synctimeout) - else: - return o - - # Shows volume information given its name - def show(self, full_project_name, name): - """Retrieves volume details based on volume name. - - :param full_project_name : project path of the volume - :param name: name of the volume. If the volume is under a project, - then full XPath needs to be specified. - Example: If VOL1 is a volume under project PROJ1, then the name - of volume is PROJ1/VOL1 - :returns: Volume details in JSON response payload - """ - if common.is_uri(name): - return name - if full_project_name is None: - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume %s : not found") % - six.text_type(name))) - - uris = self.search_volumes(full_project_name) - - for uri in uris: - volume = self.show_by_uri(uri) - if volume and 'name' in volume and volume['name'] == name: - return volume - raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR, - (_("Volume" - " %s : not found") % six.text_type(name))) - - def expand(self, full_project_name, volume_name, new_size, - sync=False, synctimeout=0): - - volume_detail = self.show(full_project_name, volume_name) - from decimal import Decimal - new_size_in_gb = Decimal(Decimal(new_size) / (units.Gi)) - current_size = Decimal(volume_detail["provisioned_capacity_gb"]) - if new_size_in_gb <= current_size: - raise common.CoprHdError( - common.CoprHdError.VALUE_ERR, - (_("error: Incorrect value of new size: %(new_size_in_gb)s" - " GB\nNew size must be greater than current size: " - "%(current_size)s GB") % {'new_size_in_gb': new_size_in_gb, - 'current_size': current_size})) - - body = oslo_serialization.jsonutils.dumps({ - "new_size": new_size - }) - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_EXPAND.format( - volume_detail["id"]), - body) - if not s: - return None - o = common.json_decode(s) - - if sync: - return self.check_for_sync(o, sync, synctimeout) - return o - - # Deletes a volume given a volume name - def delete(self, full_project_name, name, sync=False, - force_delete=False, coprhdonly=False, synctimeout=0): - """Deletes a volume based on volume name. - - :param full_project_name: project name - :param name : name of volume to be deleted - :param sync : synchronous request - :param force_delete: if true, it will force the delete of internal - volumes that have the SUPPORTS_FORCE flag - :param coprhdonly : to delete volumes from coprHD only - :param synctimeout: Query for task status for "synctimeout" secs. If - the task doesn't complete in synctimeout secs, an - exception is thrown - - """ - volume_uri = self.volume_query(full_project_name, name) - return self.delete_by_uri(volume_uri, sync, force_delete, - coprhdonly, synctimeout) - - # Deletes a volume given a volume uri - def delete_by_uri(self, uri, sync=False, - force_delete=False, coprhdonly=False, synctimeout=0): - """Deletes a volume based on volume uri.""" - params = '' - if force_delete: - params += '&' if ('?' in params) else '?' - params += "force=" + "true" - if coprhdonly is True: - params += '&' if ('?' in params) else '?' - params += "type=" + 'CoprHD_ONLY' - - (s, h) = common.service_json_request(self.ipaddr, self.port, - "POST", - Volume.URI_DEACTIVATE.format( - uri) + params, - None) - if not s: - return None - o = common.json_decode(s) - if sync: - return self.check_for_sync(o, sync, synctimeout) - return o - - # Gets the exports info given a volume uri - def get_exports_by_uri(self, uri): - """Makes REST API call to get exports info of a volume. - - :param uri: URI of the volume - :returns: Exports details in JSON response payload - """ - (s, h) = common.service_json_request(self.ipaddr, self.port, - "GET", - Volume.URI_VOLUME_EXPORTS.format( - uri), - None) - return common.json_decode(s) - - # Update a volume information - # Changed the volume vpool - def update(self, prefix_path, name, vpool): - """Makes REST API call to update a volume information. - - :param name: name of the volume to be updated - :param vpool: name of vpool - :returns: Created task details in JSON response payload - """ - namelist = [] - - if isinstance(name, list): - namelist = name - else: - namelist.append(name) - - volumeurilist = [] - - for item in namelist: - volume_uri = self.volume_query(prefix_path, item) - volumeurilist.append(volume_uri) - - vpool_obj = virtualpool.VirtualPool(self.ipaddr, self.port) - vpool_uri = vpool_obj.vpool_query(vpool, "block") - - params = { - 'vpool': vpool_uri, - 'volumes': volumeurilist - } - - body = oslo_serialization.jsonutils.dumps(params) - - (s, h) = common.service_json_request( - self.ipaddr, self.port, "POST", - Volume.URI_VOLUME_CHANGE_VPOOL, - body) - - o = common.json_decode(s) - return o diff --git a/cinder/volume/drivers/coprhd/iscsi.py b/cinder/volume/drivers/coprhd/iscsi.py deleted file mode 100644 index a113f536a..000000000 --- a/cinder/volume/drivers/coprhd/iscsi.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD iSCSI volumes.""" - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class EMCCoprHDISCSIDriver(driver.ISCSIDriver): - """CoprHD iSCSI Driver.""" - VERSION = "3.0.0.0" - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "EMC_CoprHD_CI" - - def __init__(self, *args, **kwargs): - super(EMCCoprHDISCSIDriver, self).__init__(*args, **kwargs) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='iSCSI', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume) - self.common.set_volume_tags(volume, ['_obj_volume_type']) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - LOG.debug("creating a group snapshot") - return self.common.create_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - initiator_ports = [] - initiator_ports.append(connector['initiator']) - itls = self.common.initialize_connection(volume, - 'iSCSI', - initiator_ports, - connector['host']) - properties = {} - properties['target_discovered'] = False - properties['volume_id'] = volume.id - if itls: - properties['target_iqn'] = itls[0]['target']['port'] - properties['target_portal'] = '%s:%s' % ( - itls[0]['target']['ip_address'], - itls[0]['target']['tcp_port']) - properties['target_lun'] = itls[0]['hlu'] - - auth = None - try: - auth = volume.provider_auth - except AttributeError: - pass - - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - LOG.debug("ISCSI properties: %s", properties) - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - - init_ports = [] - init_ports.append(connector['initiator']) - self.common.terminate_connection(volume, - 'iSCSI', - init_ports, - connector['host']) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/coprhd/scaleio.py b/cinder/volume/drivers/coprhd/scaleio.py deleted file mode 100644 index c4ddc3b31..000000000 --- a/cinder/volume/drivers/coprhd/scaleio.py +++ /dev/null @@ -1,375 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Driver for EMC CoprHD ScaleIO volumes.""" - -from oslo_config import cfg -from oslo_log import log as logging -import requests -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.coprhd import common as coprhd_common -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - -scaleio_opts = [ - cfg.StrOpt('coprhd_scaleio_rest_gateway_host', - default='None', - help='Rest Gateway IP or FQDN for Scaleio'), - cfg.PortOpt('coprhd_scaleio_rest_gateway_port', - default=4984, - help='Rest Gateway Port for Scaleio'), - cfg.StrOpt('coprhd_scaleio_rest_server_username', - default=None, - help='Username for Rest Gateway'), - cfg.StrOpt('coprhd_scaleio_rest_server_password', - default=None, - help='Rest Gateway Password', - secret=True), - cfg.BoolOpt('scaleio_verify_server_certificate', - default=False, - help='verify server certificate'), - cfg.StrOpt('scaleio_server_certificate_path', - default=None, - help='Server certificate path') -] - -CONF = cfg.CONF -CONF.register_opts(scaleio_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class EMCCoprHDScaleIODriver(driver.VolumeDriver): - """CoprHD ScaleIO Driver.""" - VERSION = "3.0.0.0" - server_token = None - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_CoprHD_CI" - - def __init__(self, *args, **kwargs): - super(EMCCoprHDScaleIODriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(scaleio_opts) - self.common = self._get_common_driver() - - def _get_common_driver(self): - return coprhd_common.EMCCoprHDDriverCommon( - protocol='scaleio', - default_backend_name=self.__class__.__name__, - configuration=self.configuration) - - def check_for_setup_error(self): - self.common.check_for_setup_error() - if (self.configuration.scaleio_verify_server_certificate is True and - self.configuration.scaleio_server_certificate_path is None): - message = _("scaleio_verify_server_certificate is True but" - " scaleio_server_certificate_path is not provided" - " in cinder configuration") - raise exception.VolumeBackendAPIException(data=message) - - def create_volume(self, volume): - """Creates a Volume.""" - self.common.create_volume(volume, self, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - vol_size = self._update_volume_size(int(volume.size)) - return {'size': vol_size} - - def _update_volume_size(self, vol_size): - """update the openstack volume size.""" - default_size = 8 - if (vol_size % default_size) != 0: - return (vol_size / default_size) * default_size + default_size - else: - return vol_size - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned Volume.""" - self.common.create_cloned_volume(volume, src_vref, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common.create_volume_from_snapshot(snapshot, volume, True) - self.common.set_volume_tags(volume, ['_obj_volume_type'], True) - - def extend_volume(self, volume, new_size): - """expands the size of the volume.""" - self.common.expand_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes an volume.""" - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common.create_snapshot(snapshot, True) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector=None): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver exntry point to remove an export for a volume.""" - pass - - def create_group(self, context, group): - """Creates a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.create_consistencygroup(context, group, True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates volumes in group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - message = _("create group from source is not supported " - "for CoprHD if the group type supports " - "consistent group snapshot.") - raise exception.VolumeBackendAPIException(data=message) - else: - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.common.delete_consistencygroup(context, group, - volumes, True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - LOG.debug("creating a group snapshot") - return self.common.create_cgsnapshot(group_snapshot, snapshots, - True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.common.delete_cgsnapshot(group_snapshot, snapshots, - True) - - # If the group is not consistency group snapshot enabled, then - # we shall rely on generic volume group implementation - raise NotImplementedError() - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - volname = self.common._get_resource_name(volume, - coprhd_common.MAX_SIO_LEN, - True) - - properties = {} - properties['scaleIO_volname'] = volname - properties['scaleIO_volume_id'] = volume.provider_id - properties['hostIP'] = connector['ip'] - properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host - properties[ - 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port - properties[ - 'serverUsername'] = ( - self.configuration.coprhd_scaleio_rest_server_username) - properties[ - 'serverPassword'] = ( - self.configuration.coprhd_scaleio_rest_server_password) - properties['iopsLimit'] = None - properties['bandwidthLimit'] = None - properties['serverToken'] = self.server_token - - initiator_ports = [] - initiator_port = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - initiator_ports.append(initiator_port) - - properties['serverToken'] = self.server_token - self.common.initialize_connection(volume, - 'scaleio', - initiator_ports, - connector['host']) - - dictobj = { - 'driver_volume_type': 'scaleio', - 'data': properties, - } - - return dictobj - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - - volname = volume.display_name - properties = {} - properties['scaleIO_volname'] = volname - properties['scaleIO_volume_id'] = volume.provider_id - properties['hostIP'] = connector['ip'] - properties[ - 'serverIP'] = self.configuration.coprhd_scaleio_rest_gateway_host - properties[ - 'serverPort'] = self.configuration.coprhd_scaleio_rest_gateway_port - properties[ - 'serverUsername'] = ( - self.configuration.coprhd_scaleio_rest_server_username) - properties[ - 'serverPassword'] = ( - self.configuration.coprhd_scaleio_rest_server_password) - properties['serverToken'] = self.server_token - - initiator_port = self._get_client_id(properties['serverIP'], - properties['serverPort'], - properties['serverUsername'], - properties['serverPassword'], - properties['hostIP']) - init_ports = [] - init_ports.append(initiator_port) - self.common.terminate_connection(volume, - 'scaleio', - init_ports, - connector['host']) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from virtual pool/virtual array.""" - LOG.debug("Updating volume stats") - self._stats = self.common.update_volume_stats() - - def _get_client_id(self, server_ip, server_port, server_username, - server_password, sdc_ip): - ip_encoded = urllib.parse.quote(sdc_ip, '') - ip_double_encoded = urllib.parse.quote(ip_encoded, '') - - request = ("https://%s:%s/api/types/Sdc/instances/getByIp::%s/" % - (server_ip, six.text_type(server_port), ip_double_encoded)) - - LOG.info("ScaleIO get client id by ip request: %s", request) - - if self.configuration.scaleio_verify_server_certificate: - verify_cert = self.configuration.scaleio_server_certificate_path - else: - verify_cert = False - - r = requests.get( - request, auth=(server_username, self.server_token), - verify=verify_cert) - r = self._check_response( - r, request, server_ip, server_port, - server_username, server_password) - - sdc_id = r.json() - if not sdc_id: - msg = (_("Client with ip %s wasn't found ") % sdc_ip) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if r.status_code != http_client.OK and "errorCode" in sdc_id: - msg = (_("Error getting sdc id from ip %(sdc_ip)s:" - " %(sdc_id_message)s") % {'sdc_ip': sdc_ip, - 'sdc_id_message': sdc_id[ - 'message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.info("ScaleIO sdc id is %s", sdc_id) - return sdc_id - - def _check_response(self, response, request, - server_ip, server_port, - server_username, server_password): - if (response.status_code == http_client.UNAUTHORIZED) or ( - response.status_code == http_client.FORBIDDEN): - LOG.info( - "Token is invalid, going to re-login and get a new one") - - login_request = ("https://%s:%s/api/login" % - (server_ip, six.text_type(server_port))) - if self.configuration.scaleio_verify_server_certificate: - verify_cert = ( - self.configuration.scaleio_server_certificate_path) - else: - verify_cert = False - - r = requests.get( - login_request, auth=(server_username, server_password), - verify=verify_cert) - - token = r.json() - self.server_token = token - # repeat request with valid token - LOG.info("Going to perform request again %s with valid token", - request) - res = requests.get( - request, auth=(server_username, self.server_token), - verify=verify_cert) - return res - return response - - def retype(self, ctxt, volume, new_type, diff, host): - """Change the volume type.""" - return self.common.retype(ctxt, volume, new_type, diff, host) diff --git a/cinder/volume/drivers/datera/__init__.py b/cinder/volume/drivers/datera/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/datera/datera_api2.py b/cinder/volume/drivers/datera/datera_api2.py deleted file mode 100644 index fb7e58487..000000000 --- a/cinder/volume/drivers/datera/datera_api2.py +++ /dev/null @@ -1,765 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import uuid - -import eventlet -import ipaddress -import six - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import utils as volutils - -import cinder.volume.drivers.datera.datera_common as datc - -LOG = logging.getLogger(__name__) - - -class DateraApi(object): - - # ================= - # = Create Volume = - # ================= - - def _create_volume_2(self, volume): - # Generate App Instance, Storage Instance and Volume - # Volume ID will be used as the App Instance Name - # Storage Instance and Volumes will have standard names - policies = self._get_policies_for_resource(volume) - num_replicas = int(policies['replica_count']) - storage_name = policies['default_storage_name'] - volume_name = policies['default_volume_name'] - template = policies['template'] - - if template: - app_params = ( - { - 'create_mode': "openstack", - # 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'app_template': '/app_templates/{}'.format(template) - }) - else: - - app_params = ( - { - 'create_mode': "openstack", - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'access_control_mode': 'deny_all', - 'storage_instances': { - storage_name: { - 'name': storage_name, - 'volumes': { - volume_name: { - 'name': volume_name, - 'size': volume['size'], - 'replica_count': num_replicas, - 'snapshot_policies': { - } - } - } - } - } - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - 'post', - body=app_params, - api_version='2') - self._update_qos(volume, policies) - - # ================= - # = Extend Volume = - # ================= - - def _extend_volume_2(self, volume, new_size): - # Current product limitation: - # If app_instance is bound to template resizing is not possible - # Once policies are implemented in the product this can go away - policies = self._get_policies_for_resource(volume) - template = policies['template'] - if template: - LOG.warning("Volume size not extended due to template binding:" - " volume: %(volume)s, template: %(template)s", - volume=volume, template=template) - return - - # Offline App Instance, if necessary - reonline = False - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - api_version='2') - if app_inst['admin_state'] == 'online': - reonline = True - self._detach_volume_2(None, volume) - # Change Volume Size - app_inst = datc._get_name(volume['id']) - data = { - 'size': new_size - } - store_name, vol_name = self._scrape_template(policies) - self._issue_api_request( - datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(app_inst), - method='put', - body=data, - api_version='2') - # Online Volume, if it was online before - if reonline: - self._create_export_2(None, volume, None) - - # ================= - # = Cloned Volume = - # ================= - - def _create_cloned_volume_2(self, volume, src_vref): - policies = self._get_policies_for_resource(volume) - - store_name, vol_name = self._scrape_template(policies) - - src = "/" + datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(datc._get_name(src_vref['id'])) - data = { - 'create_mode': 'openstack', - 'name': datc._get_name(volume['id']), - 'uuid': str(volume['id']), - 'clone_src': src, - } - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2') - - if volume['size'] > src_vref['size']: - self._extend_volume_2(volume, volume['size']) - - # ================= - # = Delete Volume = - # ================= - - def _delete_volume_2(self, volume): - self.detach_volume(None, volume) - app_inst = datc._get_name(volume['id']) - try: - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst), - method='delete', - api_version='2') - except exception.NotFound: - LOG.info("Tried to delete volume %s, but it was not found in the " - "Datera cluster. Continuing with delete.", - datc._get_name(volume['id'])) - - # ================= - # = Ensure Export = - # ================= - - def _ensure_export_2(self, context, volume, connector): - return self._create_export_2(context, volume, connector) - - # ========================= - # = Initialize Connection = - # ========================= - - def _initialize_connection_2(self, volume, connector): - # Now online the app_instance (which will online all storage_instances) - multipath = connector.get('multipath', False) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - app_inst = self._issue_api_request( - url, method='put', body=data, api_version='2') - storage_instances = app_inst["storage_instances"] - si_names = list(storage_instances.keys()) - - portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260' - iqn = storage_instances[si_names[0]]['access']['iqn'] - if multipath: - portals = [p + ':3260' for p in - storage_instances[si_names[0]]['access']['ips']] - iqns = [iqn for _ in - storage_instances[si_names[0]]['access']['ips']] - lunids = [self._get_lunid() for _ in - storage_instances[si_names[0]]['access']['ips']] - - return { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_iqns': iqns, - 'target_portal': portal, - 'target_portals': portals, - 'target_lun': self._get_lunid(), - 'target_luns': lunids, - 'volume_id': volume['id'], - 'discard': False}} - else: - return { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_portal': portal, - 'target_lun': self._get_lunid(), - 'volume_id': volume['id'], - 'discard': False}} - - # ================= - # = Create Export = - # ================= - - def _create_export_2(self, context, volume, connector): - # Online volume in case it hasn't been already - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - self._issue_api_request(url, method='put', body=data, api_version='2') - # Check if we've already setup everything for this volume - url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id']))) - storage_instances = self._issue_api_request(url, api_version='2') - # Handle adding initiator to product if necessary - # Then add initiator to ACL - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - if (connector and - connector.get('initiator') and - not policies['acl_allow_all']): - initiator_name = "OpenStack_{}_{}".format( - self.driver_prefix, str(uuid.uuid4())[:4]) - initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id'] - found = False - initiator = connector['initiator'] - current_initiators = self._issue_api_request( - 'initiators', api_version='2') - for iqn, values in current_initiators.items(): - if initiator == iqn: - found = True - break - # If we didn't find a matching initiator, create one - if not found: - data = {'id': initiator, 'name': initiator_name} - # Try and create the initiator - # If we get a conflict, ignore it because race conditions - self._issue_api_request("initiators", - method="post", - body=data, - conflict_ok=True, - api_version='2') - # Create initiator group with initiator in it - initiator_path = "/initiators/{}".format(initiator) - initiator_group_path = "/initiator_groups/{}".format( - initiator_group) - ig_data = {'name': initiator_group, 'members': [initiator_path]} - self._issue_api_request("initiator_groups", - method="post", - body=ig_data, - conflict_ok=True, - api_version='2') - # Create ACL with initiator group as reference for each - # storage_instance in app_instance - # TODO(_alastor_): We need to avoid changing the ACLs if the - # template already specifies an ACL policy. - for si_name in storage_instances.keys(): - acl_url = (datc.URL_TEMPLATES['si']() + - "/{}/acl_policy").format( - datc._get_name(volume['id']), si_name) - existing_acl = self._issue_api_request(acl_url, - method="get", - api_version='2') - data = {} - data['initiators'] = existing_acl['initiators'] - data['initiator_groups'] = existing_acl['initiator_groups'] - data['initiator_groups'].append(initiator_group_path) - self._issue_api_request(acl_url, - method="put", - body=data, - api_version='2') - - if connector and connector.get('ip'): - try: - # Case where volume_type has non default IP Pool info - if policies['ip_pool'] != 'default': - initiator_ip_pool_path = self._issue_api_request( - "access_network_ip_pools/{}".format( - policies['ip_pool']), api_version='2')['path'] - # Fallback to trying reasonable IP based guess - else: - initiator_ip_pool_path = self._get_ip_pool_for_string_ip( - connector['ip']) - - ip_pool_url = datc.URL_TEMPLATES['si_inst']( - store_name).format(datc._get_name(volume['id'])) - ip_pool_data = {'ip_pool': initiator_ip_pool_path} - self._issue_api_request(ip_pool_url, - method="put", - body=ip_pool_data, - api_version='2') - except exception.DateraAPIException: - # Datera product 1.0 support - pass - - # Check to ensure we're ready for go-time - self._si_poll(volume, policies) - - # ================= - # = Detach Volume = - # ================= - - def _detach_volume_2(self, context, volume, attachment=None): - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'offline', - 'force': True - } - try: - self._issue_api_request(url, method='put', body=data, - api_version='2') - except exception.NotFound: - msg = ("Tried to detach volume %s, but it was not found in the " - "Datera cluster. Continuing with detach.") - LOG.info(msg, volume['id']) - # TODO(_alastor_): Make acl cleaning multi-attach aware - self._clean_acl_2(volume) - - def _check_for_acl_2(self, initiator_path): - """Returns True if an acl is found for initiator_path """ - # TODO(_alastor_) when we get a /initiators/:initiator/acl_policies - # endpoint use that instead of this monstrosity - initiator_groups = self._issue_api_request("initiator_groups", - api_version='2') - for ig, igdata in initiator_groups.items(): - if initiator_path in igdata['members']: - LOG.debug("Found initiator_group: %s for initiator: %s", - ig, initiator_path) - return True - LOG.debug("No initiator_group found for initiator: %s", initiator_path) - return False - - def _clean_acl_2(self, volume): - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - acl_url = (datc.URL_TEMPLATES["si_inst"]( - store_name) + "/acl_policy").format(datc._get_name(volume['id'])) - try: - initiator_group = self._issue_api_request( - acl_url, api_version='2')['initiator_groups'][0] - initiator_iqn_path = self._issue_api_request( - initiator_group.lstrip("/"))["members"][0] - # Clear out ACL and delete initiator group - self._issue_api_request(acl_url, - method="put", - body={'initiator_groups': []}, - api_version='2') - self._issue_api_request(initiator_group.lstrip("/"), - method="delete", - api_version='2') - if not self._check_for_acl_2(initiator_iqn_path): - self._issue_api_request(initiator_iqn_path.lstrip("/"), - method="delete", - api_version='2') - except (IndexError, exception.NotFound): - LOG.debug("Did not find any initiator groups for volume: %s", - volume) - - # =================== - # = Create Snapshot = - # =================== - - def _create_snapshot_2(self, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - url_template = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - url = url_template.format(datc._get_name(snapshot['volume_id'])) - - snap_params = { - 'uuid': snapshot['id'], - } - snap = self._issue_api_request(url, method='post', body=snap_params, - api_version='2') - snapu = "/".join((url, snap['timestamp'])) - self._snap_poll(snapu) - - # =================== - # = Delete Snapshot = - # =================== - - def _delete_snapshot_2(self, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request(snapu, method='get', - api_version='2') - - try: - for ts, snap in snapshots.items(): - if snap['uuid'] == snapshot['id']: - url_template = snapu + '/{}' - url = url_template.format(ts) - self._issue_api_request(url, method='delete', - api_version='2') - break - else: - raise exception.NotFound - except exception.NotFound: - msg = ("Tried to delete snapshot %s, but was not found in " - "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(snapshot['id'])) - - # ======================== - # = Volume From Snapshot = - # ======================== - - def _create_volume_from_snapshot_2(self, volume, snapshot): - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request(snapu, method='get', - api_version='2') - for ts, snap in snapshots.items(): - if snap['uuid'] == snapshot['id']: - found_ts = ts - break - else: - raise exception.NotFound - - snap_url = (snap_temp + '/{}').format( - datc._get_name(snapshot['volume_id']), found_ts) - - self._snap_poll(snap_url) - - src = "/" + snap_url - app_params = ( - { - 'create_mode': 'openstack', - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'clone_src': src, - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - method='post', - body=app_params, - api_version='2') - - if (volume['size'] > snapshot['volume_size']): - self._extend_volume_2(volume, volume['size']) - - # ========== - # = Manage = - # ========== - - def _manage_existing_2(self, volume, existing_ref): - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name = existing_ref.split(":")[0] - LOG.debug("Managing existing Datera volume %s. " - "Changing name to %s", - datc._get_name(volume['id']), - existing_ref) - data = {'name': datc._get_name(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst_name), method='put', body=data, api_version='2') - - # =================== - # = Manage Get Size = - # =================== - - def _manage_existing_get_size_2(self, volume, existing_ref): - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name, si_name, vol_name = existing_ref.split(":") - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst_name), - api_version='2') - return self._get_size_2(volume, app_inst, si_name, vol_name) - - def _get_size_2(self, volume, app_inst=None, si_name=None, vol_name=None): - """Helper method for getting the size of a backend object - - If app_inst is provided, we'll just parse the dict to get - the size instead of making a separate http request - """ - policies = self._get_policies_for_resource(volume) - si_name = si_name if si_name else policies['default_storage_name'] - vol_name = vol_name if vol_name else policies['default_volume_name'] - if not app_inst: - vol_url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - app_inst = self._issue_api_request(vol_url) - size = app_inst[ - 'storage_instances'][si_name]['volumes'][vol_name]['size'] - return size - - # ========================= - # = Get Manageable Volume = - # ========================= - - def _get_manageable_volumes_2(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - LOG.debug("Listing manageable Datera volumes") - app_instances = self._issue_api_request( - datc.URL_TEMPLATES['ai'](), api_version='2').values() - - results = [] - - cinder_volume_ids = [vol['id'] for vol in cinder_volumes] - - for ai in app_instances: - ai_name = ai['name'] - reference = None - size = None - safe_to_manage = False - reason_not_safe = None - cinder_id = None - extra_info = None - if re.match(datc.UUID4_RE, ai_name): - cinder_id = ai_name.lstrip(datc.OS_PREFIX) - if (not cinder_id and - ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids): - safe_to_manage = self._is_manageable(ai) - if safe_to_manage: - si = list(ai['storage_instances'].values())[0] - si_name = si['name'] - vol = list(si['volumes'].values())[0] - vol_name = vol['name'] - size = vol['size'] - reference = {"source-name": "{}:{}:{}".format( - ai_name, si_name, vol_name)} - - results.append({ - 'reference': reference, - 'size': size, - 'safe_to_manage': safe_to_manage, - 'reason_not_safe': reason_not_safe, - 'cinder_id': cinder_id, - 'extra_info': extra_info}) - - page_results = volutils.paginate_entries_list( - results, marker, limit, offset, sort_keys, sort_dirs) - - return page_results - - # ============ - # = Unmanage = - # ============ - - def _unmanage_2(self, volume): - LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", - volume['id'], datc._get_unmanaged(volume['id'])) - data = {'name': datc._get_unmanaged(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - method='put', - body=data, - api_version='2') - - # ================ - # = Volume Stats = - # ================ - - def _get_volume_stats_2(self, refresh=False): - if refresh or not self.cluster_stats: - try: - LOG.debug("Updating cluster stats info.") - - results = self._issue_api_request('system', api_version='2') - - if 'uuid' not in results: - LOG.error( - 'Failed to get updated stats from Datera Cluster.') - - backend_name = self.configuration.safe_get( - 'volume_backend_name') - stats = { - 'volume_backend_name': backend_name or 'Datera', - 'vendor_name': 'Datera', - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': ( - int(results['total_capacity']) / units.Gi), - 'free_capacity_gb': ( - int(results['available_capacity']) / units.Gi), - 'reserved_percentage': 0, - } - - self.cluster_stats = stats - except exception.DateraAPIException: - LOG.error('Failed to get updated stats from Datera cluster.') - return self.cluster_stats - - def _is_manageable(self, app_inst): - if len(app_inst['storage_instances']) == 1: - si = list(app_inst['storage_instances'].values())[0] - if len(si['volumes']) == 1: - return True - return False - - # ========= - # = Login = - # ========= - - def _login_2(self): - """Use the san_login and san_password to set token.""" - body = { - 'name': self.username, - 'password': self.password - } - - # Unset token now, otherwise potential expired token will be sent - # along to be used for authorization when trying to login. - self.datera_api_token = None - - try: - LOG.debug('Getting Datera auth token.') - results = self._issue_api_request('login', 'put', body=body, - sensitive=True, api_version='2') - self.datera_api_token = results['key'] - except exception.NotAuthorized: - with excutils.save_and_reraise_exception(): - LOG.error('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.') - - # =========== - # = Polling = - # =========== - - def _snap_poll(self, url): - eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) - TIMEOUT = 10 - retry = 0 - poll = True - while poll and not retry >= TIMEOUT: - retry += 1 - snap = self._issue_api_request(url, api_version='2') - if snap['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Snapshot not ready.')) - - def _si_poll(self, volume, policies): - # Initial 4 second sleep required for some Datera versions - eventlet.sleep(datc.DEFAULT_SI_SLEEP_API_2) - TIMEOUT = 10 - retry = 0 - check_url = datc.URL_TEMPLATES['si_inst']( - policies['default_storage_name']).format( - datc._get_name(volume['id'])) - poll = True - while poll and not retry >= TIMEOUT: - retry += 1 - si = self._issue_api_request(check_url, api_version='2') - if si['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Resource not ready.')) - - # ============ - # = IP Pools = - # ============ - - def _get_ip_pool_for_string_ip(self, ip): - """Takes a string ipaddress and return the ip_pool API object dict """ - pool = 'default' - ip_obj = ipaddress.ip_address(six.text_type(ip)) - ip_pools = self._issue_api_request('access_network_ip_pools', - api_version='2') - for ip_pool, ipdata in ip_pools.items(): - for access, adata in ipdata['network_paths'].items(): - if not adata.get('start_ip'): - continue - pool_if = ipaddress.ip_interface( - "/".join((adata['start_ip'], str(adata['netmask'])))) - if ip_obj in pool_if.network: - pool = ip_pool - return self._issue_api_request( - "access_network_ip_pools/{}".format(pool), api_version='2')['path'] - - # ============= - # = Templates = - # ============= - - def _scrape_template(self, policies): - sname = policies['default_storage_name'] - vname = policies['default_volume_name'] - - template = policies['template'] - if template: - result = self._issue_api_request( - datc.URL_TEMPLATES['at']().format(template), api_version='2') - sname, st = list(result['storage_templates'].items())[0] - vname = list(st['volume_templates'].keys())[0] - return sname, vname - - # ======= - # = QoS = - # ======= - - def _update_qos(self, resource, policies): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - fpolicies = {k: int(v) for k, v in - policies.items() if k.endswith("max")} - # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) - if fpolicies: - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2') diff --git a/cinder/volume/drivers/datera/datera_api21.py b/cinder/volume/drivers/datera/datera_api21.py deleted file mode 100644 index db263e809..000000000 --- a/cinder/volume/drivers/datera/datera_api21.py +++ /dev/null @@ -1,915 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import re -import uuid - -import eventlet -import ipaddress -import six - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import utils as volutils - -import cinder.volume.drivers.datera.datera_common as datc - -LOG = logging.getLogger(__name__) - - -class DateraApi(object): - - # ================= - # = Create Volume = - # ================= - - def _create_volume_2_1(self, volume): - tenant = self._create_tenant(volume) - policies = self._get_policies_for_resource(volume) - num_replicas = int(policies['replica_count']) - storage_name = policies['default_storage_name'] - volume_name = policies['default_volume_name'] - template = policies['template'] - placement = policies['placement_mode'] - - if template: - app_params = ( - { - 'create_mode': "openstack", - # 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'app_template': '/app_templates/{}'.format(template) - }) - - else: - - app_params = ( - { - 'create_mode': "openstack", - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'access_control_mode': 'deny_all', - 'storage_instances': [ - { - 'name': storage_name, - 'volumes': [ - { - 'name': volume_name, - 'size': volume['size'], - 'placement_mode': placement, - 'replica_count': num_replicas, - 'snapshot_policies': [ - ] - } - ] - } - ] - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - 'post', - body=app_params, - api_version='2.1', - tenant=tenant) - self._update_qos_2_1(volume, policies, tenant) - - # ================= - # = Extend Volume = - # ================= - - def _extend_volume_2_1(self, volume, new_size): - tenant = self._create_tenant(volume) - policies = self._get_policies_for_resource(volume) - template = policies['template'] - if template: - LOG.warning("Volume size not extended due to template binding:" - " volume: %(volume)s, template: %(template)s", - volume=volume, template=template) - return - - # Offline App Instance, if necessary - reonline = False - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - api_version='2.1', tenant=tenant) - if app_inst['data']['admin_state'] == 'online': - reonline = True - self._detach_volume_2_1(None, volume) - # Change Volume Size - app_inst = datc._get_name(volume['id']) - data = { - 'size': new_size - } - store_name, vol_name = self._scrape_template(policies) - self._issue_api_request( - datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(app_inst), - method='put', - body=data, - api_version='2.1', - tenant=tenant) - # Online Volume, if it was online before - if reonline: - self._create_export_2_1(None, volume, None) - - # ================= - # = Cloned Volume = - # ================= - - def _create_cloned_volume_2_1(self, volume, src_vref): - policies = self._get_policies_for_resource(volume) - tenant = self._create_tenant(volume) - store_name, vol_name = self._scrape_template(policies) - - src = "/" + datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name).format(datc._get_name(src_vref['id'])) - data = { - 'create_mode': 'openstack', - 'name': datc._get_name(volume['id']), - 'uuid': str(volume['id']), - 'clone_volume_src': {'path': src}, - } - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2.1', - tenant=tenant) - - if volume['size'] > src_vref['size']: - self._extend_volume_2_1(volume, volume['size']) - - # ================= - # = Delete Volume = - # ================= - - def _delete_volume_2_1(self, volume): - self._detach_volume_2_1(None, volume) - tenant = self._create_tenant(volume) - app_inst = datc._get_name(volume['id']) - try: - self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst), - method='delete', - api_version='2.1', - tenant=tenant) - except exception.NotFound: - msg = ("Tried to delete volume %s, but it was not found in the " - "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(volume['id'])) - - # ================= - # = Ensure Export = - # ================= - - def _ensure_export_2_1(self, context, volume, connector=None): - self.create_export(context, volume, connector) - - # ========================= - # = Initialize Connection = - # ========================= - - def _initialize_connection_2_1(self, volume, connector): - # Now online the app_instance (which will online all storage_instances) - multipath = connector.get('multipath', False) - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - app_inst = self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant)[ - 'data'] - storage_instances = app_inst["storage_instances"] - si = storage_instances[0] - - # randomize portal chosen - choice = 0 - policies = self._get_policies_for_resource(volume) - if policies["round_robin"]: - choice = random.randint(0, 1) - portal = si['access']['ips'][choice] + ':3260' - iqn = si['access']['iqn'] - if multipath: - portals = [p + ':3260' for p in si['access']['ips']] - iqns = [iqn for _ in si['access']['ips']] - lunids = [self._get_lunid() for _ in si['access']['ips']] - - result = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_iqns': iqns, - 'target_portal': portal, - 'target_portals': portals, - 'target_lun': self._get_lunid(), - 'target_luns': lunids, - 'volume_id': volume['id'], - 'discard': False}} - else: - result = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_portal': portal, - 'target_lun': self._get_lunid(), - 'volume_id': volume['id'], - 'discard': False}} - - return result - - # ================= - # = Create Export = - # ================= - - def _create_export_2_1(self, context, volume, connector): - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'offline', - 'force': True - } - self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant) - policies = self._get_policies_for_resource(volume) - store_name, _ = self._scrape_template(policies) - if connector and connector.get('ip'): - # Case where volume_type has non default IP Pool info - if policies['ip_pool'] != 'default': - initiator_ip_pool_path = self._issue_api_request( - "access_network_ip_pools/{}".format( - policies['ip_pool']), - api_version='2.1', - tenant=tenant)['path'] - # Fallback to trying reasonable IP based guess - else: - initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_1( - connector['ip']) - - ip_pool_url = datc.URL_TEMPLATES['si_inst']( - store_name).format(datc._get_name(volume['id'])) - ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} - self._issue_api_request(ip_pool_url, - method="put", - body=ip_pool_data, - api_version='2.1', - tenant=tenant) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'online' - } - self._issue_api_request( - url, method='put', body=data, api_version='2.1', tenant=tenant) - # Check if we've already setup everything for this volume - url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id']))) - storage_instances = self._issue_api_request( - url, api_version='2.1', tenant=tenant) - # Handle adding initiator to product if necessary - # Then add initiator to ACL - if (connector and - connector.get('initiator') and - not policies['acl_allow_all']): - initiator_name = "OpenStack_{}_{}".format( - self.driver_prefix, str(uuid.uuid4())[:4]) - initiator_group = datc.INITIATOR_GROUP_PREFIX + str(uuid.uuid4()) - found = False - initiator = connector['initiator'] - if not found: - data = {'id': initiator, 'name': initiator_name} - # Try and create the initiator - # If we get a conflict, ignore it - self._issue_api_request("initiators", - method="post", - body=data, - conflict_ok=True, - api_version='2.1', - tenant=tenant) - # Create initiator group with initiator in it - initiator_path = "/initiators/{}".format(initiator) - initiator_group_path = "/initiator_groups/{}".format( - initiator_group) - ig_data = {'name': initiator_group, - 'members': [{'path': initiator_path}]} - self._issue_api_request("initiator_groups", - method="post", - body=ig_data, - conflict_ok=True, - api_version='2.1', - tenant=tenant) - # Create ACL with initiator group as reference for each - # storage_instance in app_instance - # TODO(_alastor_): We need to avoid changing the ACLs if the - # template already specifies an ACL policy. - for si in storage_instances['data']: - acl_url = (datc.URL_TEMPLATES['si']() + - "/{}/acl_policy").format( - datc._get_name(volume['id']), si['name']) - existing_acl = self._issue_api_request(acl_url, - method="get", - api_version='2.1', - tenant=tenant)['data'] - data = {} - data['initiators'] = existing_acl['initiators'] - data['initiator_groups'] = existing_acl['initiator_groups'] - data['initiator_groups'].append({"path": initiator_group_path}) - self._issue_api_request(acl_url, - method="put", - body=data, - api_version='2.1', - tenant=tenant) - # Check to ensure we're ready for go-time - self._si_poll_2_1(volume, policies, tenant) - - # ================= - # = Detach Volume = - # ================= - - def _detach_volume_2_1(self, context, volume, attachment=None): - tenant = self._create_tenant(volume) - url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - data = { - 'admin_state': 'offline', - 'force': True - } - try: - self._issue_api_request(url, method='put', body=data, - api_version='2.1', tenant=tenant) - except exception.NotFound: - msg = ("Tried to detach volume %s, but it was not found in the " - "Datera cluster. Continuing with detach.") - LOG.info(msg, volume['id']) - # TODO(_alastor_): Make acl cleaning multi-attach aware - self._clean_acl_2_1(volume, tenant) - - def _check_for_acl_2_1(self, initiator_path): - """Returns True if an acl is found for initiator_path """ - # TODO(_alastor_) when we get a /initiators/:initiator/acl_policies - # endpoint use that instead of this monstrosity - initiator_groups = self._issue_api_request("initiator_groups", - api_version='2.1') - for ig, igdata in initiator_groups.items(): - if initiator_path in igdata['members']: - LOG.debug("Found initiator_group: %s for initiator: %s", - ig, initiator_path) - return True - LOG.debug("No initiator_group found for initiator: %s", initiator_path) - return False - - def _clean_acl_2_1(self, volume, tenant): - policies = self._get_policies_for_resource(volume) - - store_name, _ = self._scrape_template(policies) - - acl_url = (datc.URL_TEMPLATES["si_inst"]( - store_name) + "/acl_policy").format(datc._get_name(volume['id'])) - try: - initiator_group = self._issue_api_request( - acl_url, api_version='2.1', tenant=tenant)['data'][ - 'initiator_groups'][0]['path'] - # TODO(_alastor_): Re-enable this when we get a force-delete - # option on the /initiators endpoint - # initiator_iqn_path = self._issue_api_request( - # initiator_group.lstrip("/"), api_version='2.1', - # tenant=tenant)[ - # "data"]["members"][0]["path"] - # Clear out ACL and delete initiator group - self._issue_api_request(acl_url, - method="put", - body={'initiator_groups': []}, - api_version='2.1', - tenant=tenant) - self._issue_api_request(initiator_group.lstrip("/"), - method="delete", - api_version='2.1', - tenant=tenant) - # TODO(_alastor_): Re-enable this when we get a force-delete - # option on the /initiators endpoint - # if not self._check_for_acl_2_1(initiator_iqn_path): - # self._issue_api_request(initiator_iqn_path.lstrip("/"), - # method="delete", - # api_version='2.1', - # tenant=tenant) - except (IndexError, exception.NotFound): - LOG.debug("Did not find any initiator groups for volume: %s", - volume) - - # =================== - # = Create Snapshot = - # =================== - - def _create_snapshot_2_1(self, snapshot): - tenant = self._create_tenant(snapshot) - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - url_template = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - url = url_template.format(datc._get_name(snapshot['volume_id'])) - - snap_params = { - 'uuid': snapshot['id'], - } - snap = self._issue_api_request(url, method='post', body=snap_params, - api_version='2.1', tenant=tenant) - snapu = "/".join((url, snap['data']['timestamp'])) - self._snap_poll_2_1(snapu, tenant) - - # =================== - # = Delete Snapshot = - # =================== - - def _delete_snapshot_2_1(self, snapshot): - tenant = self._create_tenant(snapshot) - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = [] - try: - snapshots = self._issue_api_request(snapu, - method='get', - api_version='2.1', - tenant=tenant) - except exception.NotFound: - msg = ("Tried to delete snapshot %s, but parent volume %s was " - "not found in Datera cluster. Continuing with delete.") - LOG.info(msg, - datc._get_name(snapshot['id']), - datc._get_name(snapshot['volume_id'])) - return - - try: - for snap in snapshots['data']: - if snap['uuid'] == snapshot['id']: - url_template = snapu + '/{}' - url = url_template.format(snap['timestamp']) - self._issue_api_request( - url, - method='delete', - api_version='2.1', - tenant=tenant) - break - else: - raise exception.NotFound - except exception.NotFound: - msg = ("Tried to delete snapshot %s, but was not found in " - "Datera cluster. Continuing with delete.") - LOG.info(msg, datc._get_name(snapshot['id'])) - - # ======================== - # = Volume From Snapshot = - # ======================== - - def _create_volume_from_snapshot_2_1(self, volume, snapshot): - tenant = self._create_tenant(volume) - policies = self._get_policies_for_resource(snapshot) - - store_name, vol_name = self._scrape_template(policies) - - snap_temp = datc.URL_TEMPLATES['vol_inst']( - store_name, vol_name) + '/snapshots' - snapu = snap_temp.format(datc._get_name(snapshot['volume_id'])) - snapshots = self._issue_api_request( - snapu, method='get', api_version='2.1', tenant=tenant) - - for snap in snapshots['data']: - if snap['uuid'] == snapshot['id']: - found_ts = snap['utc_ts'] - break - else: - raise exception.NotFound - - snap_url = (snap_temp + '/{}').format( - datc._get_name(snapshot['volume_id']), found_ts) - - self._snap_poll_2_1(snap_url, tenant) - - src = "/" + snap_url - app_params = ( - { - 'create_mode': 'openstack', - 'uuid': str(volume['id']), - 'name': datc._get_name(volume['id']), - 'clone_snapshot_src': {'path': src}, - }) - self._issue_api_request( - datc.URL_TEMPLATES['ai'](), - method='post', - body=app_params, - api_version='2.1', - tenant=tenant) - - if (volume['size'] > snapshot['volume_size']): - self._extend_volume_2_1(volume, volume['size']) - - # ========== - # = Retype = - # ========== - - def _retype_2_1(self, ctxt, volume, new_type, diff, host): - LOG.debug("Retype called\n" - "Volume: %(volume)s\n" - "NewType: %(new_type)s\n" - "Diff: %(diff)s\n" - "Host: %(host)s\n", {'volume': volume, 'new_type': new_type, - 'diff': diff, 'host': host}) - # We'll take the fast route only if the types share the same backend - # And that backend matches this driver - old_pol = self._get_policies_for_resource(volume) - new_pol = self._get_policies_for_volume_type(new_type) - if (host['capabilities']['vendor_name'].lower() == - self.backend_name.lower()): - LOG.debug("Starting fast volume retype") - - if old_pol.get('template') or new_pol.get('template'): - LOG.warning( - "Fast retyping between template-backed volume-types " - "unsupported. Type1: %s, Type2: %s", - volume['volume_type_id'], new_type) - - tenant = self._create_tenant(volume) - self._update_qos_2_1(volume, new_pol, tenant) - vol_params = ( - { - 'placement_mode': new_pol['placement_mode'], - 'replica_count': new_pol['replica_count'], - }) - url = datc.URL_TEMPLATES['vol_inst']( - old_pol['default_storage_name'], - old_pol['default_volume_name']).format( - datc._get_name(volume['id'])) - self._issue_api_request(url, method='put', body=vol_params, - api_version='2.1', tenant=tenant) - return True - - else: - LOG.debug("Couldn't fast-retype volume between specified types") - return False - - # ========== - # = Manage = - # ========== - - def _manage_existing_2_1(self, volume, existing_ref): - # Only volumes created under the requesting tenant can be managed in - # the v2.1 API. Eg. If tenant A is the tenant for the volume to be - # managed, it must also be tenant A that makes this request. - # This will be fixed in a later API update - tenant = self._create_tenant(volume) - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") not in (2, 3): - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format: " - "tenant:app_inst_name:storage_inst_name:vol_name or " - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name = existing_ref.split(":")[0] - try: - (tenant, app_inst_name, storage_inst_name, - vol_name) = existing_ref.split(":") - except TypeError: - app_inst_name, storage_inst_name, vol_name = existing_ref.split( - ":") - tenant = None - LOG.debug("Managing existing Datera volume %s " - "Changing name to %s", - datc._get_name(volume['id']), existing_ref) - data = {'name': datc._get_name(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - app_inst_name), method='put', body=data, api_version='2.1', - tenant=tenant) - - # =================== - # = Manage Get Size = - # =================== - - def _manage_existing_get_size_2_1(self, volume, existing_ref): - tenant = self._create_tenant(volume) - existing_ref = existing_ref['source-name'] - if existing_ref.count(":") != 2: - raise exception.ManageExistingInvalidReference( - _("existing_ref argument must be of this format:" - "app_inst_name:storage_inst_name:vol_name")) - app_inst_name, si_name, vol_name = existing_ref.split(":") - app_inst = self._issue_api_request( - datc.URL_TEMPLATES['ai_inst']().format(app_inst_name), - api_version='2.1', tenant=tenant) - return self._get_size_2_1( - volume, tenant, app_inst, si_name, vol_name) - - def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None, - vol_name=None): - """Helper method for getting the size of a backend object - - If app_inst is provided, we'll just parse the dict to get - the size instead of making a separate http request - """ - policies = self._get_policies_for_resource(volume) - si_name = si_name if si_name else policies['default_storage_name'] - vol_name = vol_name if vol_name else policies['default_volume_name'] - if not app_inst: - vol_url = datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])) - app_inst = self._issue_api_request( - vol_url, api_version='2.1', tenant=tenant)['data'] - if 'data' in app_inst: - app_inst = app_inst['data'] - sis = app_inst['storage_instances'] - found_si = None - for si in sis: - if si['name'] == si_name: - found_si = si - break - found_vol = None - for vol in found_si['volumes']: - if vol['name'] == vol_name: - found_vol = vol - size = found_vol['size'] - return size - - # ========================= - # = Get Manageable Volume = - # ========================= - - def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit, - offset, sort_keys, sort_dirs): - # Use the first volume to determine the tenant we're working under - if cinder_volumes: - tenant = self._create_tenant(cinder_volumes[0]) - else: - tenant = None - LOG.debug("Listing manageable Datera volumes") - app_instances = self._issue_api_request( - datc.URL_TEMPLATES['ai'](), api_version='2.1', - tenant=tenant)['data'] - - results = [] - - cinder_volume_ids = [vol['id'] for vol in cinder_volumes] - - for ai in app_instances: - ai_name = ai['name'] - reference = None - size = None - safe_to_manage = False - reason_not_safe = "" - cinder_id = None - extra_info = None - if re.match(datc.UUID4_RE, ai_name): - cinder_id = ai_name.lstrip(datc.OS_PREFIX) - if (not cinder_id and - ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids): - safe_to_manage, reason_not_safe = self._is_manageable_2_1(ai) - if safe_to_manage: - si = list(ai['storage_instances'].values())[0] - si_name = si['name'] - vol = list(si['volumes'].values())[0] - vol_name = vol['name'] - size = vol['size'] - reference = {"source-name": "{}:{}:{}".format( - ai_name, si_name, vol_name)} - - results.append({ - 'reference': reference, - 'size': size, - 'safe_to_manage': safe_to_manage, - 'reason_not_safe': reason_not_safe, - 'cinder_id': cinder_id, - 'extra_info': extra_info}) - - page_results = volutils.paginate_entries_list( - results, marker, limit, offset, sort_keys, sort_dirs) - - return page_results - - def _is_manageable_2_1(self, app_inst): - if len(app_inst['storage_instances']) == 1: - si = list(app_inst['storage_instances'].values())[0] - if len(si['volumes']) == 1: - return (True, "") - return (False, - "App Instance has more than one storage instance or volume") - # ============ - # = Unmanage = - # ============ - - def _unmanage_2_1(self, volume): - tenant = self._create_tenant(volume) - LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", - volume['id'], datc._get_unmanaged(volume['id'])) - data = {'name': datc._get_unmanaged(volume['id'])} - self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format( - datc._get_name(volume['id'])), - method='put', - body=data, - api_version='2.1', - tenant=tenant) - - # ================ - # = Volume Stats = - # ================ - - # ========= - # = Login = - # ========= - - # =========== - # = Tenancy = - # =========== - - def _create_tenant(self, volume=None): - # Create the Datera tenant if specified in the config - # Otherwise use the tenant provided - if self.tenant_id is None: - tenant = None - elif self.tenant_id.lower() == "map" and volume: - # Convert dashless uuid to uuid with dashes - # Eg: 0e33e95a9b154d348c675a1d8ea5b651 --> - # 0e33e95a-9b15-4d34-8c67-5a1d8ea5b651 - tenant = datc._get_name(str(uuid.UUID(volume["project_id"]))) - elif self.tenant_id.lower() == "map" and not volume: - tenant = None - else: - tenant = self.tenant_id - - if tenant: - params = {'name': tenant} - self._issue_api_request( - 'tenants', method='post', body=params, conflict_ok=True, - api_version='2.1') - return tenant - - # ========= - # = Login = - # ========= - - def _login_2_1(self): - """Use the san_login and san_password to set token.""" - body = { - 'name': self.username, - 'password': self.password - } - - # Unset token now, otherwise potential expired token will be sent - # along to be used for authorization when trying to login. - self.datera_api_token = None - - try: - LOG.debug('Getting Datera auth token.') - results = self._issue_api_request( - 'login', 'put', body=body, sensitive=True, api_version='2.1', - tenant=None) - self.datera_api_token = results['key'] - except exception.NotAuthorized: - with excutils.save_and_reraise_exception(): - LOG.error('Logging into the Datera cluster failed. Please ' - 'check your username and password set in the ' - 'cinder.conf and start the cinder-volume ' - 'service again.') - - # =========== - # = Polling = - # =========== - - def _snap_poll_2_1(self, url, tenant): - eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) - TIMEOUT = 20 - retry = 0 - poll = True - while poll and not retry >= TIMEOUT: - retry += 1 - snap = self._issue_api_request(url, - api_version='2.1', - tenant=tenant)['data'] - if snap['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Snapshot not ready.')) - - def _si_poll_2_1(self, volume, policies, tenant): - # Initial 4 second sleep required for some Datera versions - eventlet.sleep(datc.DEFAULT_SI_SLEEP) - TIMEOUT = 10 - retry = 0 - check_url = datc.URL_TEMPLATES['si_inst']( - policies['default_storage_name']).format( - datc._get_name(volume['id'])) - poll = True - while poll and not retry >= TIMEOUT: - retry += 1 - si = self._issue_api_request(check_url, - api_version='2.1', - tenant=tenant)['data'] - if si['op_state'] == 'available': - poll = False - else: - eventlet.sleep(1) - if retry >= TIMEOUT: - raise exception.VolumeDriverException( - message=_('Resource not ready.')) - - # ================ - # = Volume Stats = - # ================ - - def _get_volume_stats_2_1(self, refresh=False): - if refresh or not self.cluster_stats: - try: - LOG.debug("Updating cluster stats info.") - - results = self._issue_api_request( - 'system', api_version='2.1')['data'] - - if 'uuid' not in results: - LOG.error( - 'Failed to get updated stats from Datera Cluster.') - - stats = { - 'volume_backend_name': self.backend_name, - 'vendor_name': 'Datera', - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': ( - int(results['total_capacity']) / units.Gi), - 'free_capacity_gb': ( - int(results['available_capacity']) / units.Gi), - 'reserved_percentage': 0, - 'QoS_support': True, - } - - self.cluster_stats = stats - except exception.DateraAPIException: - LOG.error('Failed to get updated stats from Datera cluster.') - return self.cluster_stats - - # ======= - # = QoS = - # ======= - - def _update_qos_2_1(self, resource, policies, tenant): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - fpolicies = {k: int(v) for k, v in - policies.items() if k.endswith("max")} - # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) - if fpolicies: - self._issue_api_request(url, 'delete', api_version='2.1', - tenant=tenant) - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2.1', tenant=tenant) - - # ============ - # = IP Pools = - # ============ - - def _get_ip_pool_for_string_ip_2_1(self, ip): - """Takes a string ipaddress and return the ip_pool API object dict """ - pool = 'default' - ip_obj = ipaddress.ip_address(six.text_type(ip)) - ip_pools = self._issue_api_request('access_network_ip_pools', - api_version='2.1') - for ipdata in ip_pools['data']: - for adata in ipdata['network_paths']: - if not adata.get('start_ip'): - continue - pool_if = ipaddress.ip_interface( - "/".join((adata['start_ip'], str(adata['netmask'])))) - if ip_obj in pool_if.network: - pool = ipdata['name'] - return self._issue_api_request( - "access_network_ip_pools/{}".format(pool), - api_version='2.1')['path'] diff --git a/cinder/volume/drivers/datera/datera_common.py b/cinder/volume/drivers/datera/datera_common.py deleted file mode 100644 index c6bf27c26..000000000 --- a/cinder/volume/drivers/datera/datera_common.py +++ /dev/null @@ -1,491 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import json -import re -import six -import time -import types -import uuid - -import eventlet -import requests - -from oslo_log import log as logging -from six.moves import http_client - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) -OS_PREFIX = "OS-" -UNMANAGE_PREFIX = "UNMANAGED-" - -# Taken from this SO post : -# http://stackoverflow.com/a/18516125 -# Using old-style string formatting because of the nature of the regex -# conflicting with new-style curly braces -UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]" - "[a-f0-9]{3}-?[a-f0-9]{12}") -UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX) - -# Recursive dict to assemble basic url structure for the most common -# API URL endpoints. Most others are constructed from these -URL_TEMPLATES = { - 'ai': lambda: 'app_instances', - 'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'), - 'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'), - 'si_inst': lambda storage_name: ( - (URL_TEMPLATES['si']() + '/{}').format( - '{}', storage_name)), - 'vol': lambda storage_name: ( - (URL_TEMPLATES['si_inst'](storage_name) + '/volumes')), - 'vol_inst': lambda storage_name, volume_name: ( - (URL_TEMPLATES['vol'](storage_name) + '/{}').format( - '{}', volume_name)), - 'at': lambda: 'app_templates/{}'} - -DEFAULT_SI_SLEEP = 1 -DEFAULT_SI_SLEEP_API_2 = 5 -DEFAULT_SNAP_SLEEP = 1 -INITIATOR_GROUP_PREFIX = "IG-" -API_VERSIONS = ["2", "2.1"] -API_TIMEOUT = 20 - -############### -# METADATA KEYS -############### - -M_TYPE = 'cinder_volume_type' -M_CALL = 'cinder_calls' -M_CLONE = 'cinder_clone_from' -M_MANAGED = 'cinder_managed' - -M_KEYS = [M_TYPE, M_CALL, M_CLONE, M_MANAGED] - - -def _get_name(name): - return "".join((OS_PREFIX, name)) - - -def _get_unmanaged(name): - return "".join((UNMANAGE_PREFIX, name)) - - -def _authenticated(func): - """Ensure the driver is authenticated to make a request. - - In do_setup() we fetch an auth token and store it. If that expires when - we do API request, we'll fetch a new one. - """ - @functools.wraps(func) - def func_wrapper(driver, *args, **kwargs): - try: - return func(driver, *args, **kwargs) - except exception.NotAuthorized: - # Prevent recursion loop. After the driver arg is the - # resource_type arg from _issue_api_request(). If attempt to - # login failed, we should just give up. - if args[0] == 'login': - raise - - # Token might've expired, get a new one, try again. - driver.login() - return func(driver, *args, **kwargs) - return func_wrapper - - -def _api_lookup(func): - """Perform a dynamic API implementation lookup for a call - - Naming convention follows this pattern: - - # original_func(args) --> _original_func_X_?Y?(args) - # where X and Y are the major and minor versions of the latest - # supported API version - - # From the Datera box we've determined that it supports API - # versions ['2', '2.1'] - # This is the original function call - @_api_lookup - def original_func(arg1, arg2): - print("I'm a shim, this won't get executed!") - pass - - # This is the function that is actually called after determining - # the correct API version to use - def _original_func_2_1(arg1, arg2): - some_version_2_1_implementation_here() - - # This is the function that would be called if the previous function - # did not exist: - def _original_func_2(arg1, arg2): - some_version_2_implementation_here() - - # This function would NOT be called, because the connected Datera box - # does not support the 1.5 version of the API - def _original_func_1_5(arg1, arg2): - some_version_1_5_implementation_here() - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - obj = args[0] - api_versions = _get_supported_api_versions(obj) - api_version = None - index = -1 - while True: - try: - api_version = api_versions[index] - except (IndexError, KeyError): - msg = _("No compatible API version found for this product: " - "api_versions -> %(api_version)s, %(func)s") - LOG.error(msg, api_version=api_version, func=func) - raise exception.DateraAPIException(msg % { - 'api_version': api_version, 'func': func}) - # Py27 - try: - name = "_" + "_".join( - (func.func_name, api_version.replace(".", "_"))) - # Py3+ - except AttributeError: - name = "_" + "_".join( - (func.__name__, api_version.replace(".", "_"))) - try: - if obj.do_profile: - LOG.info("Trying method: %s", name) - call_id = uuid.uuid4() - LOG.debug("Profiling method: %s, id %s", name, call_id) - t1 = time.time() - obj.thread_local.trace_id = call_id - result = getattr(obj, name)(*args[1:], **kwargs) - if obj.do_profile: - t2 = time.time() - timedelta = round(t2 - t1, 3) - LOG.debug("Profile for method %s, id %s: %ss", - name, call_id, timedelta) - return result - except AttributeError as e: - # If we find the attribute name in the error message - # then we continue otherwise, raise to prevent masking - # errors - if name not in six.text_type(e): - raise - else: - LOG.info(e) - index -= 1 - except exception.DateraAPIException as e: - if "UnsupportedVersionError" in six.text_type(e): - index -= 1 - else: - raise - - return wrapper - - -def _get_supported_api_versions(driver): - t = time.time() - if driver.api_cache and driver.api_timeout - t < API_TIMEOUT: - return driver.api_cache - driver.api_timeout = t + API_TIMEOUT - results = [] - host = driver.configuration.san_ip - port = driver.configuration.datera_api_port - client_cert = driver.configuration.driver_client_cert - client_cert_key = driver.configuration.driver_client_cert_key - cert_data = None - header = {'Content-Type': 'application/json; charset=utf-8', - 'Datera-Driver': 'OpenStack-Cinder-{}'.format(driver.VERSION)} - protocol = 'http' - if client_cert: - protocol = 'https' - cert_data = (client_cert, client_cert_key) - try: - url = '%s://%s:%s/api_versions' % (protocol, host, port) - resp = driver._request(url, "get", None, header, cert_data) - data = resp.json() - results = [elem.strip("v") for elem in data['api_versions']] - except (exception.DateraAPIException, KeyError): - # Fallback to pre-endpoint logic - for version in API_VERSIONS[0:-1]: - url = '%s://%s:%s/v%s' % (protocol, host, port, version) - resp = driver._request(url, "get", None, header, cert_data) - if ("api_req" in resp.json() or - str(resp.json().get("code")) == "99"): - results.append(version) - else: - LOG.error("No supported API versions available, " - "Please upgrade your Datera EDF software") - return results - - -def _get_volume_type_obj(driver, resource): - type_id = resource.get('volume_type_id', None) - # Handle case of volume with no type. We still want the - # specified defaults from above - if type_id: - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - else: - volume_type = None - return volume_type - - -def _get_policies_for_resource(driver, resource): - """Get extra_specs and qos_specs of a volume_type. - - This fetches the scoped keys from the volume type. Anything set from - qos_specs will override key/values set from extra_specs. - """ - volume_type = driver._get_volume_type_obj(resource) - # Handle case of volume with no type. We still want the - # specified defaults from above - if volume_type: - specs = volume_type.get('extra_specs') - else: - specs = {} - - # Set defaults: - policies = {k.lstrip('DF:'): str(v['default']) for (k, v) - in driver._init_vendor_properties()[0].items()} - - if volume_type: - # Populate updated value - for key, value in specs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - policies[key] = value - - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is not None: - ctxt = context.get_admin_context() - qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - if qos_kvs: - policies.update(qos_kvs) - # Cast everything except booleans int that can be cast - for k, v in policies.items(): - # Handle String Boolean case - if v == 'True' or v == 'False': - policies[k] = policies[k] == 'True' - continue - # Int cast - try: - policies[k] = int(v) - except ValueError: - pass - return policies - - -# ================ -# = API Requests = -# ================ - -def _request(driver, connection_string, method, payload, header, cert_data): - LOG.debug("Endpoint for Datera API call: %s", connection_string) - LOG.debug("Payload for Datera API call: %s", payload) - try: - response = getattr(requests, method)(connection_string, - data=payload, headers=header, - verify=False, cert=cert_data) - return response - except requests.exceptions.RequestException as ex: - msg = _( - 'Failed to make a request to Datera cluster endpoint due ' - 'to the following reason: %s') % six.text_type( - ex.message) - LOG.error(msg) - raise exception.DateraAPIException(msg) - - -def _raise_response(driver, response): - msg = _('Request to Datera cluster returned bad status:' - ' %(status)s | %(reason)s') % { - 'status': response.status_code, - 'reason': response.reason} - LOG.error(msg) - raise exception.DateraAPIException(msg) - - -def _handle_bad_status(driver, - response, - connection_string, - method, - payload, - header, - cert_data, - sensitive=False, - conflict_ok=False): - if (response.status_code == http_client.BAD_REQUEST and - connection_string.endswith("api_versions")): - # Raise the exception, but don't log any error. We'll just fall - # back to the old style of determining API version. We make this - # request a lot, so logging it is just noise - raise exception.DateraAPIException - if response.status_code == http_client.NOT_FOUND: - raise exception.NotFound(response.json()['message']) - elif response.status_code in [http_client.FORBIDDEN, - http_client.UNAUTHORIZED]: - raise exception.NotAuthorized() - elif response.status_code == http_client.CONFLICT and conflict_ok: - # Don't raise, because we're expecting a conflict - pass - elif response.status_code == http_client.SERVICE_UNAVAILABLE: - current_retry = 0 - while current_retry <= driver.retry_attempts: - LOG.debug("Datera 503 response, trying request again") - eventlet.sleep(driver.interval) - resp = driver._request(connection_string, - method, - payload, - header, - cert_data) - if resp.ok: - return response.json() - elif resp.status_code != http_client.SERVICE_UNAVAILABLE: - driver._raise_response(resp) - else: - driver._raise_response(response) - - -@_authenticated -def _issue_api_request(driver, resource_url, method='get', body=None, - sensitive=False, conflict_ok=False, - api_version='2', tenant=None): - """All API requests to Datera cluster go through this method. - - :param resource_url: the url of the resource - :param method: the request verb - :param body: a dict with options for the action_type - :param sensitive: Bool, whether request should be obscured from logs - :param conflict_ok: Bool, True to suppress ConflictError exceptions - during this request - :param api_version: The Datera api version for the request - :param tenant: The tenant header value for the request (only applicable - to 2.1 product versions and later) - :returns: a dict of the response from the Datera cluster - """ - host = driver.configuration.san_ip - port = driver.configuration.datera_api_port - api_token = driver.datera_api_token - - payload = json.dumps(body, ensure_ascii=False) - payload.encode('utf-8') - - header = {'Content-Type': 'application/json; charset=utf-8'} - header.update(driver.HEADER_DATA) - - protocol = 'http' - if driver.configuration.driver_use_ssl: - protocol = 'https' - - if api_token: - header['Auth-Token'] = api_token - - if tenant == "all": - header['tenant'] = tenant - elif tenant and '/root' not in tenant: - header['tenant'] = "".join(("/root/", tenant)) - elif tenant and '/root' in tenant: - header['tenant'] = tenant - elif driver.tenant_id and driver.tenant_id.lower() != "map": - header['tenant'] = driver.tenant_id - - client_cert = driver.configuration.driver_client_cert - client_cert_key = driver.configuration.driver_client_cert_key - cert_data = None - - if client_cert: - protocol = 'https' - cert_data = (client_cert, client_cert_key) - - connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port, - api_version, resource_url) - - request_id = uuid.uuid4() - - if driver.do_profile: - t1 = time.time() - if not sensitive: - LOG.debug("\nDatera Trace ID: %(tid)s\n" - "Datera Request ID: %(rid)s\n" - "Datera Request URL: /v%(api)s/%(url)s\n" - "Datera Request Method: %(method)s\n" - "Datera Request Payload: %(payload)s\n" - "Datera Request Headers: %(header)s\n", - {'tid': driver.thread_local.trace_id, - 'rid': request_id, - 'api': api_version, - 'url': resource_url, - 'method': method, - 'payload': payload, - 'header': header}) - response = driver._request(connection_string, - method, - payload, - header, - cert_data) - - data = response.json() - - timedelta = "Profiling disabled" - if driver.do_profile: - t2 = time.time() - timedelta = round(t2 - t1, 3) - if not sensitive: - LOG.debug("\nDatera Trace ID: %(tid)s\n" - "Datera Response ID: %(rid)s\n" - "Datera Response TimeDelta: %(delta)ss\n" - "Datera Response URL: %(url)s\n" - "Datera Response Payload: %(payload)s\n" - "Datera Response Object: %(obj)s\n", - {'tid': driver.thread_local.trace_id, - 'rid': request_id, - 'delta': timedelta, - 'url': response.url, - 'payload': payload, - 'obj': vars(response)}) - if not response.ok: - driver._handle_bad_status(response, - connection_string, - method, - payload, - header, - cert_data, - conflict_ok=conflict_ok) - - return data - - -def register_driver(driver): - for func in [_get_supported_api_versions, - _get_volume_type_obj, - _get_policies_for_resource, - _request, - _raise_response, - _handle_bad_status, - _issue_api_request]: - # PY27 - - f = types.MethodType(func, driver) - try: - setattr(driver, func.func_name, f) - # PY3+ - except AttributeError: - setattr(driver, func.__name__, f) diff --git a/cinder/volume/drivers/datera/datera_iscsi.py b/cinder/volume/drivers/datera/datera_iscsi.py deleted file mode 100644 index 3cb08a329..000000000 --- a/cinder/volume/drivers/datera/datera_iscsi.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright 2017 Datera -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time -import uuid - -from eventlet.green import threading -from oslo_config import cfg -from oslo_log import log as logging -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.san import san - -import cinder.volume.drivers.datera.datera_api2 as api2 -import cinder.volume.drivers.datera.datera_api21 as api21 -import cinder.volume.drivers.datera.datera_common as datc - - -LOG = logging.getLogger(__name__) - -d_opts = [ - cfg.StrOpt('datera_api_port', - default='7717', - help='Datera API port.'), - cfg.StrOpt('datera_api_version', - default='2', - deprecated_for_removal=True, - help='Datera API version.'), - cfg.IntOpt('datera_503_timeout', - default='120', - help='Timeout for HTTP 503 retry messages'), - cfg.IntOpt('datera_503_interval', - default='5', - help='Interval between 503 retries'), - cfg.BoolOpt('datera_debug', - default=False, - help="True to set function arg and return logging"), - cfg.BoolOpt('datera_debug_replica_count_override', - default=False, - help="ONLY FOR DEBUG/TESTING PURPOSES\n" - "True to set replica_count to 1"), - cfg.StrOpt('datera_tenant_id', - default=None, - help="If set to 'Map' --> OpenStack project ID will be mapped " - "implicitly to Datera tenant ID\n" - "If set to 'None' --> Datera tenant ID will not be used " - "during volume provisioning\n" - "If set to anything else --> Datera tenant ID will be the " - "provided value"), - cfg.BoolOpt('datera_disable_profiler', - default=False, - help="Set to True to disable profiling in the Datera driver"), -] - - -CONF = cfg.CONF -CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') -CONF.register_opts(d_opts, group=configuration.SHARED_CONF_GROUP) - - -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi): - - """The OpenStack Datera Driver - - Version history: - * 1.0 - Initial driver - * 1.1 - Look for lun-0 instead of lun-1. - * 2.0 - Update For Datera API v2 - * 2.1 - Multipath, ACL and reorg - * 2.2 - Capabilites List, Extended Volume-Type Support - Naming convention change, - Volume Manage/Unmanage support - * 2.3 - Templates, Tenants, Snapshot Polling, - 2.1 Api Version Support, Restructure - * 2.3.1 - Scalability bugfixes - * 2.3.2 - Volume Placement, ACL multi-attach bugfix - * 2.4.0 - Fast Retype Support - """ - VERSION = '2.4.0' - - CI_WIKI_NAME = "datera-ci" - - HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)} - - def __init__(self, *args, **kwargs): - super(DateraDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(d_opts) - self.username = self.configuration.san_login - self.password = self.configuration.san_password - self.cluster_stats = {} - self.datera_api_token = None - self.interval = self.configuration.datera_503_interval - self.retry_attempts = (self.configuration.datera_503_timeout / - self.interval) - self.driver_prefix = str(uuid.uuid4())[:4] - self.datera_debug = self.configuration.datera_debug - self.datera_api_versions = [] - - if self.datera_debug: - utils.setup_tracing(['method']) - self.tenant_id = self.configuration.datera_tenant_id - if self.tenant_id and self.tenant_id.lower() == 'none': - self.tenant_id = None - self.api_check = time.time() - self.api_cache = [] - self.api_timeout = 0 - self.do_profile = not self.configuration.datera_disable_profiler - self.thread_local = threading.local() - - backend_name = self.configuration.safe_get( - 'volume_backend_name') - self.backend_name = backend_name or 'Datera' - - datc.register_driver(self) - - def do_setup(self, context): - # If we can't authenticate through the old and new method, just fail - # now. - if not all([self.username, self.password]): - msg = _("san_login and/or san_password is not set for Datera " - "driver in the cinder.conf. Set this information and " - "start the cinder-volume service again.") - LOG.error(msg) - raise exception.InvalidInput(msg) - - self.login() - self._create_tenant() - - # ================= - - # ================= - # = Create Volume = - # ================= - - @datc._api_lookup - def create_volume(self, volume): - """Create a logical volume.""" - pass - - # ================= - # = Extend Volume = - # ================= - - @datc._api_lookup - def extend_volume(self, volume, new_size): - pass - - # ================= - - # ================= - # = Cloned Volume = - # ================= - - @datc._api_lookup - def create_cloned_volume(self, volume, src_vref): - pass - - # ================= - # = Delete Volume = - # ================= - - @datc._api_lookup - def delete_volume(self, volume): - pass - - # ================= - # = Ensure Export = - # ================= - - @datc._api_lookup - def ensure_export(self, context, volume, connector=None): - """Gets the associated account, retrieves CHAP info and updates.""" - - # ========================= - # = Initialize Connection = - # ========================= - - @datc._api_lookup - def initialize_connection(self, volume, connector): - pass - - # ================= - # = Create Export = - # ================= - - @datc._api_lookup - def create_export(self, context, volume, connector): - pass - - # ================= - # = Detach Volume = - # ================= - - @datc._api_lookup - def detach_volume(self, context, volume, attachment=None): - pass - - # =================== - # = Create Snapshot = - # =================== - - @datc._api_lookup - def create_snapshot(self, snapshot): - pass - - # =================== - # = Delete Snapshot = - # =================== - - @datc._api_lookup - def delete_snapshot(self, snapshot): - pass - - # ======================== - # = Volume From Snapshot = - # ======================== - - @datc._api_lookup - def create_volume_from_snapshot(self, volume, snapshot): - pass - - # ========== - # = Retype = - # ========== - - @datc._api_lookup - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities (Not Used). - """ - pass - - # ========== - # = Manage = - # ========== - - @datc._api_lookup - def manage_existing(self, volume, existing_ref): - """Manage an existing volume on the Datera backend - - The existing_ref must be either the current name or Datera UUID of - an app_instance on the Datera backend in a colon separated list with - the storage instance name and volume name. This means only - single storage instances and single volumes are supported for - managing by cinder. - - Eg. - - (existing_ref['source-name'] == - tenant:app_inst_name:storage_inst_name:vol_name) - - if using Datera 2.1 API - - or - - (existing_ref['source-name'] == - app_inst_name:storage_inst_name:vol_name) - - if using 2.0 API - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - pass - - # =================== - # = Manage Get Size = - # =================== - - @datc._api_lookup - def manage_existing_get_size(self, volume, existing_ref): - """Get the size of an unmanaged volume on the Datera backend - - The existing_ref must be either the current name or Datera UUID of - an app_instance on the Datera backend in a colon separated list with - the storage instance name and volume name. This means only - single storage instances and single volumes are supported for - managing by cinder. - - Eg. - - existing_ref == app_inst_name:storage_inst_name:vol_name - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume on the Datera backend - """ - pass - - # ========================= - # = Get Manageable Volume = - # ========================= - - @datc._api_lookup - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder. - - Returns a list of dictionaries, each specifying a volume in the host, - with the following keys: - - - reference (dictionary): The reference for a volume, which can be - passed to 'manage_existing'. - - size (int): The size of the volume according to the storage - backend, rounded up to the nearest GB. - - safe_to_manage (boolean): Whether or not this volume is safe to - manage according to the storage backend. For example, is the volume - in use or invalid for any reason. - - reason_not_safe (string): If safe_to_manage is False, the reason why. - - cinder_id (string): If already managed, provide the Cinder ID. - - extra_info (string): Any extra information to return to the user - - :param cinder_volumes: A list of volumes in this host that Cinder - currently manages, used to determine if - a volume is manageable or not. - :param marker: The last item of the previous page; we return the - next results after this value (after sorting) - :param limit: Maximum number of items to return - :param offset: Number of items to skip after marker - :param sort_keys: List of keys to sort results by (valid keys are - 'identifier' and 'size') - :param sort_dirs: List of directions to sort by, corresponding to - sort_keys (valid directions are 'asc' and 'desc') - """ - pass - - # ============ - # = Unmanage = - # ============ - - @datc._api_lookup - def unmanage(self, volume): - """Unmanage a currently managed volume in Cinder - - :param volume: Cinder volume to unmanage - """ - pass - - # ================ - # = Volume Stats = - # ================ - - @datc._api_lookup - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update first. - The name is a bit misleading as - the majority of the data here is cluster - data. - """ - pass - - # ========= - # = Login = - # ========= - - @datc._api_lookup - def login(self): - pass - - # ======= - # = QoS = - # ======= - - def _update_qos(self, resource, policies): - url = datc.URL_TEMPLATES['vol_inst']( - policies['default_storage_name'], - policies['default_volume_name']) + '/performance_policy' - url = url.format(datc._get_name(resource['id'])) - type_id = resource.get('volume_type_id', None) - if type_id is not None: - # Filter for just QOS policies in result. All of their keys - # should end with "max" - fpolicies = {k: int(v) for k, v in - policies.items() if k.endswith("max")} - # Filter all 0 values from being passed - fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items())) - if fpolicies: - self._issue_api_request(url, 'post', body=fpolicies, - api_version='2') - - def _get_lunid(self): - return 0 - - # ============================ - # = Volume-Types/Extra-Specs = - # ============================ - - def _init_vendor_properties(self): - """Create a dictionary of vendor unique properties. - - This method creates a dictionary of vendor unique properties - and returns both created dictionary and vendor name. - Returned vendor name is used to check for name of vendor - unique properties. - - - Vendor name shouldn't include colon(:) because of the separator - and it is automatically replaced by underscore(_). - ex. abc:d -> abc_d - - Vendor prefix is equal to vendor name. - ex. abcd - - Vendor unique properties must start with vendor prefix + ':'. - ex. abcd:maxIOPS - - Each backend driver needs to override this method to expose - its own properties using _set_property() like this: - - self._set_property( - properties, - "vendorPrefix:specific_property", - "Title of property", - _("Description of property"), - "type") - - : return dictionary of vendor unique properties - : return vendor name - - prefix: DF --> Datera Fabric - """ - - properties = {} - - self._set_property( - properties, - "DF:placement_mode", - "Datera Volume Placement", - _("'single_flash' for single-flash-replica placement, " - "'all_flash' for all-flash-replica placement, " - "'hybrid' for hybrid placement"), - "string", - default="hybrid") - - self._set_property( - properties, - "DF:round_robin", - "Datera Round Robin Portals", - _("True to round robin the provided portals for a target"), - "boolean", - default=False) - - if self.configuration.get('datera_debug_replica_count_override'): - replica_count = 1 - else: - replica_count = 3 - self._set_property( - properties, - "DF:replica_count", - "Datera Volume Replica Count", - _("Specifies number of replicas for each volume. Can only be " - "increased once volume is created"), - "integer", - minimum=1, - default=replica_count) - - self._set_property( - properties, - "DF:acl_allow_all", - "Datera ACL Allow All", - _("True to set acl 'allow_all' on volumes created. Cannot be " - "changed on volume once set"), - "boolean", - default=False) - - self._set_property( - properties, - "DF:ip_pool", - "Datera IP Pool", - _("Specifies IP pool to use for volume"), - "string", - default="default") - - self._set_property( - properties, - "DF:template", - "Datera Template", - _("Specifies Template to use for volume provisioning"), - "string", - default="") - - # ###### QoS Settings ###### # - self._set_property( - properties, - "DF:read_bandwidth_max", - "Datera QoS Max Bandwidth Read", - _("Max read bandwidth setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - - self._set_property( - properties, - "DF:default_storage_name", - "Datera Default Storage Instance Name", - _("The name to use for storage instances created"), - "string", - default="storage-1") - - self._set_property( - properties, - "DF:default_volume_name", - "Datera Default Volume Name", - _("The name to use for volumes created"), - "string", - default="volume-1") - - self._set_property( - properties, - "DF:write_bandwidth_max", - "Datera QoS Max Bandwidth Write", - _("Max write bandwidth setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - - self._set_property( - properties, - "DF:total_bandwidth_max", - "Datera QoS Max Bandwidth Total", - _("Max total bandwidth setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - - self._set_property( - properties, - "DF:read_iops_max", - "Datera QoS Max iops Read", - _("Max read iops setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - - self._set_property( - properties, - "DF:write_iops_max", - "Datera QoS Max IOPS Write", - _("Max write iops setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - - self._set_property( - properties, - "DF:total_iops_max", - "Datera QoS Max IOPS Total", - _("Max total iops setting for volume qos, " - "use 0 for unlimited"), - "integer", - minimum=0, - default=0) - # ###### End QoS Settings ###### # - - return properties, 'DF' diff --git a/cinder/volume/drivers/dell_emc/__init__.py b/cinder/volume/drivers/dell_emc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dell_emc/ps.py b/cinder/volume/drivers/dell_emc/ps.py deleted file mode 100644 index 09b12064c..000000000 --- a/cinder/volume/drivers/dell_emc/ps.py +++ /dev/null @@ -1,685 +0,0 @@ -# Copyright (c) 2013-2017 Dell Inc, or its subsidiaries. -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Volume driver for Dell EMC PS Series Storage.""" - -import functools -import math -import random - -import eventlet -from eventlet import greenthread -import greenlet -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from six.moves import range - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import ssh_utils -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import san - -LOG = logging.getLogger(__name__) - -eqlx_opts = [ - cfg.StrOpt('eqlx_group_name', - default='group-0', - help='Group name to use for creating volumes. Defaults to ' - '"group-0".'), - cfg.IntOpt('eqlx_cli_max_retries', - min=0, - default=5, - help='Maximum retry count for reconnection. Default is 5.'), - cfg.StrOpt('eqlx_pool', - default='default', - help='Pool in which volumes will be created. Defaults ' - 'to "default".') -] - - -CONF = cfg.CONF -CONF.register_opts(eqlx_opts, group=configuration.SHARED_CONF_GROUP) - - -def with_timeout(f): - @functools.wraps(f) - def __inner(self, *args, **kwargs): - timeout = kwargs.pop('timeout', None) - gt = eventlet.spawn(f, self, *args, **kwargs) - if timeout is None: - return gt.wait() - else: - kill_thread = eventlet.spawn_after(timeout, gt.kill) - try: - res = gt.wait() - except greenlet.GreenletExit: - raise exception.VolumeBackendAPIException( - data="Command timed out") - else: - kill_thread.cancel() - return res - - return __inner - - -@interface.volumedriver -class PSSeriesISCSIDriver(san.SanISCSIDriver): - """Implements commands for Dell EMC PS Series ISCSI management. - - To enable the driver add the following line to the cinder configuration: - volume_driver=cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver - - Driver's prerequisites are: - - a separate volume group set up and running on the SAN - - SSH access to the SAN - - a special user must be created which must be able to - - create/delete volumes and snapshots; - - clone snapshots into volumes; - - modify volume access records; - - The access credentials to the SAN are provided by means of the following - flags: - - .. code-block:: ini - - san_ip= - san_login= - san_password= - san_private_key= - - Thin provision of volumes is enabled by default, to disable it use: - - .. code-block:: ini - - san_thin_provision=false - - In order to use target CHAP authentication (which is disabled by default) - SAN administrator must create a local CHAP user and specify the following - flags for the driver: - - .. code-block:: ini - - use_chap_auth=True - chap_login= - chap_password= - - eqlx_group_name parameter actually represents the CLI prompt message - without '>' ending. E.g. if prompt looks like 'group-0>', then the - parameter must be set to 'group-0' - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1.0 - Misc fixes - 1.2.0 - Deprecated eqlx_cli_timeout infavor of ssh_conn_timeout - 1.3.0 - Added support for manage/unmanage volume - 1.4.0 - Removed deprecated options eqlx_cli_timeout, eqlx_use_chap, - eqlx_chap_login, and eqlx_chap_password. - 1.4.1 - Rebranded driver to Dell EMC. - 1.4.2 - Enable report discard support. - - """ - - VERSION = "1.4.2" - - # ThirdPartySytems wiki page - CI_WIKI_NAME = "Dell_Storage_CI" - - def __init__(self, *args, **kwargs): - super(PSSeriesISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(eqlx_opts) - self._group_ip = None - self.sshpool = None - - def _get_output(self, chan): - out = '' - ending = '%s> ' % self.configuration.eqlx_group_name - while out.find(ending) == -1: - ret = chan.recv(102400) - if len(ret) == 0: - # According to paramiko.channel.Channel documentation, which - # says "If a string of length zero is returned, the channel - # stream has closed". So we can confirm that the PS server - # has closed the connection. - msg = _("The PS array has closed the connection.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - out += ret - - LOG.debug("CLI output\n%s", out) - return out.splitlines() - - def _get_prefixed_value(self, lines, prefix): - for line in lines: - if line.startswith(prefix): - return line[len(prefix):] - return - - @with_timeout - def _ssh_execute(self, ssh, command, *arg, **kwargs): - transport = ssh.get_transport() - chan = transport.open_session() - completed = False - - try: - chan.invoke_shell() - - LOG.debug("Reading CLI MOTD") - self._get_output(chan) - - cmd = 'stty columns 255' - LOG.debug("Setting CLI terminal width: '%s'", cmd) - chan.send(cmd + '\r') - out = self._get_output(chan) - - LOG.debug("Sending CLI command: '%s'", command) - chan.send(command + '\r') - out = self._get_output(chan) - - completed = True - - if any(ln.startswith(('% Error', 'Error:')) for ln in out): - desc = _("Error executing PS command") - cmdout = '\n'.join(out) - LOG.error(cmdout) - raise processutils.ProcessExecutionError( - stdout=cmdout, cmd=command, description=desc) - return out - finally: - if not completed: - LOG.debug("Timed out executing command: '%s'", command) - chan.close() - - def _run_ssh(self, cmd_list, attempts=1): - utils.check_ssh_injection(cmd_list) - command = ' '. join(cmd_list) - - if not self.sshpool: - password = self.configuration.san_password - privatekey = self.configuration.san_private_key - min_size = self.configuration.ssh_min_pool_conn - max_size = self.configuration.ssh_max_pool_conn - self.sshpool = ssh_utils.SSHPool( - self.configuration.san_ip, - self.configuration.san_ssh_port, - self.configuration.ssh_conn_timeout, - self.configuration.san_login, - password=password, - privatekey=privatekey, - min_size=min_size, - max_size=max_size) - try: - total_attempts = attempts - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - LOG.info('PS-driver: executing "%s".', command) - return self._ssh_execute( - ssh, command, - timeout=self.configuration.ssh_conn_timeout) - except Exception: - LOG.exception('Error running command.') - greenthread.sleep(random.randint(20, 500) / 100.0) - msg = (_("SSH Command failed after '%(total_attempts)r' " - "attempts : '%(command)s'") % - {'total_attempts': total_attempts - attempts, - 'command': command}) - raise exception.VolumeBackendAPIException(data=msg) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Error running SSH command: "%s".', command) - - def check_for_setup_error(self): - super(PSSeriesISCSIDriver, self).check_for_setup_error() - - def _eql_execute(self, *args, **kwargs): - return self._run_ssh( - args, attempts=self.configuration.eqlx_cli_max_retries + 1) - - def _get_volume_data(self, lines): - prefix = 'iSCSI target name is ' - target_name = self._get_prefixed_value(lines, prefix)[:-1] - return self._get_model_update(target_name) - - def _get_model_update(self, target_name): - lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name) - model_update = {} - model_update['provider_location'] = lun_id - if self.configuration.use_chap_auth: - model_update['provider_auth'] = 'CHAP %s %s' % \ - (self.configuration.chap_username, - self.configuration.chap_password) - return model_update - - def _get_space_in_gb(self, val): - scale = 1.0 - part = 'GB' - if val.endswith('MB'): - scale = 1.0 / 1024 - part = 'MB' - elif val.endswith('TB'): - scale = 1.0 * 1024 - part = 'TB' - return math.ceil(scale * float(val.partition(part)[0])) - - def _update_volume_stats(self): - """Retrieve stats info from eqlx group.""" - - LOG.debug('Updating volume stats.') - data = {} - backend_name = "eqlx" - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'eqlx' - data["vendor_name"] = 'Dell EMC' - data["driver_version"] = self.VERSION - data["storage_protocol"] = 'iSCSI' - - data['reserved_percentage'] = 0 - data['QoS_support'] = False - - data['total_capacity_gb'] = 0 - data['free_capacity_gb'] = 0 - data['multiattach'] = False - - provisioned_capacity = 0 - - for line in self._eql_execute('pool', 'select', - self.configuration.eqlx_pool, 'show'): - if line.startswith('TotalCapacity:'): - out_tup = line.rstrip().partition(' ') - data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) - if line.startswith('FreeSpace:'): - out_tup = line.rstrip().partition(' ') - data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) - if line.startswith('VolumeReserve:'): - out_tup = line.rstrip().partition(' ') - provisioned_capacity = self._get_space_in_gb(out_tup[-1]) - - global_capacity = data['total_capacity_gb'] - global_free = data['free_capacity_gb'] - - thin_enabled = self.configuration.san_thin_provision - if not thin_enabled: - provisioned_capacity = round(global_capacity - global_free, 2) - - data['provisioned_capacity_gb'] = provisioned_capacity - data['max_over_subscription_ratio'] = ( - self.configuration.max_over_subscription_ratio) - data['thin_provisioning_support'] = thin_enabled - data['thick_provisioning_support'] = not thin_enabled - - self._stats = data - - def _get_volume_info(self, volume_name): - """Get the volume details on the array""" - command = ['volume', 'select', volume_name, 'show'] - try: - data = {} - for line in self._eql_execute(*command): - if line.startswith('Size:'): - out_tup = line.rstrip().partition(' ') - data['size'] = self._get_space_in_gb(out_tup[-1]) - elif line.startswith('iSCSI Name:'): - out_tup = line.rstrip().partition(': ') - data['iSCSI_Name'] = out_tup[-1] - return data - except processutils.ProcessExecutionError: - msg = (_("Volume does not exists %s.") % volume_name) - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=volume_name, reason=msg) - - def _check_volume(self, volume): - """Check if the volume exists on the Array.""" - command = ['volume', 'select', volume['name'], 'show'] - try: - self._eql_execute(*command) - except processutils.ProcessExecutionError as err: - with excutils.save_and_reraise_exception(): - if err.stdout.find('does not exist.\n') > -1: - LOG.debug('Volume %s does not exist, ' - 'it may have already been deleted', - volume['name']) - raise exception.VolumeNotFound(volume_id=volume['id']) - - def _parse_connection(self, connector, out): - """Returns the correct connection id for the initiator. - - This parses the cli output from the command - 'volume select access show' - and returns the correct connection id. - """ - lines = [line for line in out if line != ''] - # Every record has 2 lines - for i in range(0, len(lines), 2): - try: - int(lines[i][0]) - # sanity check - if len(lines[i + 1].split()) == 1: - check = lines[i].split()[1] + lines[i + 1].strip() - if connector['initiator'] == check: - return lines[i].split()[0] - except (IndexError, ValueError): - pass # skip the line that is not a valid access record - - return None - - def do_setup(self, context): - """Disable cli confirmation and tune output format.""" - try: - disabled_cli_features = ('confirmation', 'paging', 'events', - 'formatoutput') - for feature in disabled_cli_features: - self._eql_execute('cli-settings', feature, 'off') - - for line in self._eql_execute('grpparams', 'show'): - if line.startswith('Group-Ipaddress:'): - out_tup = line.rstrip().partition(' ') - self._group_ip = out_tup[-1] - - LOG.info('PS-driver: Setup is complete, group IP is "%s".', - self._group_ip) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to setup the Dell EMC PS driver.') - - def create_volume(self, volume): - """Create a volume.""" - try: - cmd = ['volume', 'create', - volume['name'], "%sG" % (volume['size'])] - if self.configuration.eqlx_pool != 'default': - cmd.append('pool') - cmd.append(self.configuration.eqlx_pool) - if self.configuration.san_thin_provision: - cmd.append('thin-provision') - out = self._eql_execute(*cmd) - self.add_multihost_access(volume) - return self._get_volume_data(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create volume "%s".', volume['name']) - - def add_multihost_access(self, volume): - """Add multihost-access to a volume. Needed for live migration.""" - try: - cmd = ['volume', 'select', - volume['name'], 'multihost-access', 'enable'] - self._eql_execute(*cmd) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to add multihost-access ' - 'for volume "%s".', - volume['name']) - - def _set_volume_description(self, volume, description): - """Set the description of the volume""" - try: - cmd = ['volume', 'select', - volume['name'], 'description', description] - self._eql_execute(*cmd) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to set description ' - 'for volume "%s".', - volume['name']) - - def delete_volume(self, volume): - """Delete a volume.""" - try: - self._check_volume(volume) - self._eql_execute('volume', 'select', volume['name'], 'offline') - self._eql_execute('volume', 'delete', volume['name']) - except exception.VolumeNotFound: - LOG.warning('Volume %s was not found while trying to delete it.', - volume['name']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to delete volume "%s".', volume['name']) - - def create_snapshot(self, snapshot): - """Create snapshot of existing volume on appliance.""" - try: - out = self._eql_execute('volume', 'select', - snapshot['volume_name'], - 'snapshot', 'create-now') - prefix = 'Snapshot name is ' - snap_name = self._get_prefixed_value(out, prefix) - self._eql_execute('volume', 'select', snapshot['volume_name'], - 'snapshot', 'rename', snap_name, - snapshot['name']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create snapshot of volume "%s".', - snapshot['volume_name']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other volume's snapshot on appliance.""" - try: - out = self._eql_execute('volume', 'select', - snapshot['volume_name'], 'snapshot', - 'select', snapshot['name'], - 'clone', volume['name']) - # Extend Volume if needed - if out and volume['size'] > snapshot['volume_size']: - self.extend_volume(volume, volume['size']) - LOG.debug('Volume from snapshot %(name)s resized from ' - '%(current_size)sGB to %(new_size)sGB.', - {'name': volume['name'], - 'current_size': snapshot['volume_size'], - 'new_size': volume['size']}) - - self.add_multihost_access(volume) - return self._get_volume_data(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create volume from snapshot "%s".', - snapshot['name']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - try: - src_volume_name = src_vref['name'] - out = self._eql_execute('volume', 'select', src_volume_name, - 'clone', volume['name']) - - # Extend Volume if needed - if out and volume['size'] > src_vref['size']: - self.extend_volume(volume, volume['size']) - - self.add_multihost_access(volume) - return self._get_volume_data(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create clone of volume "%s".', - volume['name']) - - def delete_snapshot(self, snapshot): - """Delete volume's snapshot.""" - try: - self._eql_execute('volume', 'select', snapshot['volume_name'], - 'snapshot', 'delete', snapshot['name']) - except processutils.ProcessExecutionError as err: - if err.stdout.find('does not exist') > -1: - LOG.debug('Snapshot %s could not be found.', snapshot['name']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to delete snapshot %(snap)s of ' - 'volume %(vol)s.', - {'snap': snapshot['name'], - 'vol': snapshot['volume_name']}) - - def initialize_connection(self, volume, connector): - """Restrict access to a volume.""" - try: - cmd = ['volume', 'select', volume['name'], 'access', 'create', - 'initiator', connector['initiator']] - if self.configuration.use_chap_auth: - cmd.extend(['authmethod', 'chap', 'username', - self.configuration.chap_username]) - self._eql_execute(*cmd) - iscsi_properties = self._get_iscsi_properties(volume) - iscsi_properties['discard'] = True - return { - 'driver_volume_type': 'iscsi', - 'data': iscsi_properties - } - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to initialize connection to volume "%s".', - volume['name']) - - def terminate_connection(self, volume, connector, force=False, **kwargs): - """Remove access restrictions from a volume.""" - try: - out = self._eql_execute('volume', 'select', volume['name'], - 'access', 'show') - connection_id = self._parse_connection(connector, out) - if connection_id is not None: - self._eql_execute('volume', 'select', volume['name'], - 'access', 'delete', connection_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to terminate connection to volume "%s".', - volume['name']) - - def create_export(self, context, volume, connector): - """Create an export of a volume. - - Driver has nothing to do here for the volume has been exported - already by the SAN, right after it's creation. - """ - pass - - def ensure_export(self, context, volume): - """Ensure an export of a volume. - - Driver has nothing to do here for the volume has been exported - already by the SAN, right after it's creation. We will just make - sure that the volume exists on the array and issue a warning. - """ - try: - self._check_volume(volume) - except exception.VolumeNotFound: - LOG.warning('Volume %s is not found!, it may have been deleted.', - volume['name']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to ensure export of volume "%s".', - volume['name']) - - def remove_export(self, context, volume): - """Remove an export of a volume. - - Driver has nothing to do here for the volume has been exported - already by the SAN, right after it's creation. - Nothing to remove since there's nothing exported. - """ - pass - - def extend_volume(self, volume, new_size): - """Extend the size of the volume.""" - try: - self._eql_execute('volume', 'select', volume['name'], - 'size', "%sG" % new_size) - LOG.info('Volume %(name)s resized from ' - '%(current_size)sGB to %(new_size)sGB.', - {'name': volume['name'], - 'current_size': volume['size'], - 'new_size': new_size}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to extend_volume %(name)s from ' - '%(current_size)sGB to %(new_size)sGB.', - {'name': volume['name'], - 'current_size': volume['size'], - 'new_size': new_size}) - - def _get_existing_volume_ref_name(self, ref): - existing_volume_name = None - if 'source-name' in ref: - existing_volume_name = ref['source-name'] - elif 'source-id' in ref: - existing_volume_name = ref['source-id'] - else: - msg = _('Reference must contain source-id or source-name.') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - return existing_volume_name - - def manage_existing(self, volume, existing_ref): - """Manage an existing volume on the backend storage.""" - existing_volume_name = self._get_existing_volume_ref_name(existing_ref) - try: - cmd = ['volume', 'rename', - existing_volume_name, volume['name']] - self._eql_execute(*cmd) - self._set_volume_description(volume, '"OpenStack Managed"') - self.add_multihost_access(volume) - data = self._get_volume_info(volume['name']) - updates = self._get_model_update(data['iSCSI_Name']) - LOG.info("Backend volume %(back_vol)s renamed to " - "%(vol)s and is now managed by cinder.", - {'back_vol': existing_volume_name, - 'vol': volume['name']}) - return updates - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to manage volume "%s".', volume['name']) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - existing_volume_name = self._get_existing_volume_ref_name(existing_ref) - data = self._get_volume_info(existing_volume_name) - return data['size'] - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - :param volume: Cinder volume to unmanage - """ - try: - self._set_volume_description(volume, '"OpenStack UnManaged"') - LOG.info("Virtual volume %(disp)s '%(vol)s' is no " - "longer managed.", - {'disp': volume['display_name'], - 'vol': volume['name']}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to unmanage volume "%s".', - volume['name']) - - def local_path(self, volume): - raise NotImplementedError() diff --git a/cinder/volume/drivers/dell_emc/sc/__init__.py b/cinder/volume/drivers/dell_emc/sc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dell_emc/sc/storagecenter_api.py b/cinder/volume/drivers/dell_emc/sc/storagecenter_api.py deleted file mode 100644 index 903c5cf0e..000000000 --- a/cinder/volume/drivers/dell_emc/sc/storagecenter_api.py +++ /dev/null @@ -1,3539 +0,0 @@ -# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Interface for interacting with the Dell Storage Center array.""" - -import json -import os.path - -import eventlet -from oslo_log import log as logging -from oslo_utils import excutils -import requests -from simplejson import scanner -import six -from six.moves import http_client -import uuid - -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -LOG = logging.getLogger(__name__) - - -class PayloadFilter(object): - """PayloadFilter - - Simple class for creating filters for interacting with the Dell - Storage API 15.3 and later. - """ - - def __init__(self, filtertype='AND'): - self.payload = {} - self.payload['filter'] = {'filterType': filtertype, - 'filters': []} - - def append(self, name, val, filtertype='Equals'): - if val is not None: - apifilter = {} - apifilter['attributeName'] = name - apifilter['attributeValue'] = val - apifilter['filterType'] = filtertype - self.payload['filter']['filters'].append(apifilter) - - -class LegacyPayloadFilter(object): - """LegacyPayloadFilter - - Simple class for creating filters for interacting with the Dell - Storage API 15.1 and 15.2. - """ - - def __init__(self, filter_type='AND'): - self.payload = {'filterType': filter_type, - 'filters': []} - - def append(self, name, val, filtertype='Equals'): - if val is not None: - apifilter = {} - apifilter['attributeName'] = name - apifilter['attributeValue'] = val - apifilter['filterType'] = filtertype - self.payload['filters'].append(apifilter) - - -class HttpClient(object): - """HttpClient - - Helper for making the REST calls. - """ - - def __init__(self, host, port, user, password, verify, apiversion): - """HttpClient handles the REST requests. - - :param host: IP address of the Dell Data Collector. - :param port: Port the Data Collector is listening on. - :param user: User account to login with. - :param password: Password. - :param verify: Boolean indicating whether certificate verification - should be turned on or not. - :param apiversion: Dell API version. - """ - self.baseUrl = 'https://%s:%s/' % (host, port) - - self.session = requests.Session() - self.session.auth = (user, password) - - self.header = {} - self.header['Content-Type'] = 'application/json; charset=utf-8' - self.header['Accept'] = 'application/json' - self.header['x-dell-api-version'] = apiversion - self.verify = verify - - # Verify is a configurable option. So if this is false do not - # spam the c-vol log. - if not verify: - requests.packages.urllib3.disable_warnings() - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.session.close() - - def __formatUrl(self, url): - baseurl = self.baseUrl - # Some url sources have api/rest and some don't. Handle. - if 'api/rest' not in url: - baseurl += 'api/rest/' - return '%s%s' % (baseurl, url if url[0] != '/' else url[1:]) - - def _get_header(self, async): - if async: - header = self.header.copy() - header['async'] = 'True' - return header - return self.header - - def _get_async_url(self, asyncTask): - """Handle a bug in SC API that gives a full url.""" - try: - # strip off the https. - url = asyncTask.get('returnValue').split( - 'https://')[1].split('/', 1)[1] - except IndexError: - url = asyncTask.get('returnValue') - except AttributeError: - LOG.debug('_get_async_url: Attribute Error. (%r)', asyncTask) - url = 'api/rest/ApiConnection/AsyncTask/' - - # Blank URL - if not url: - LOG.debug('_get_async_url: No URL. (%r)', asyncTask) - url = 'api/rest/ApiConnection/AsyncTask/' - - # Check for incomplete url error case. - if url.endswith('/'): - # Try to fix. - id = asyncTask.get('instanceId') - if id: - # We have an id so note the error and add the id. - LOG.debug('_get_async_url: url format error. (%r)', asyncTask) - url = url + id - else: - # No hope. - LOG.error('_get_async_url: Bogus return async task %r', - asyncTask) - raise exception.VolumeBackendAPIException( - message=_('_get_async_url: Invalid URL.')) - - # Check for an odd error case - if url.startswith('<') and url.endswith('>'): - LOG.error('_get_async_url: Malformed URL (XML returned). (%r)', - asyncTask) - raise exception.VolumeBackendAPIException( - message=_('_get_async_url: Malformed URL.')) - - return url - - def _wait_for_async_complete(self, asyncTask): - url = self._get_async_url(asyncTask) - while True and url: - try: - r = self.get(url) - # We can leave this loop for a variety of reasons. - # Nothing returned. - # r.content blanks. - # Object returned switches to one without objectType or with - # a different objectType. - if not SCApi._check_result(r): - LOG.debug('Async error:\n' - '\tstatus_code: %(code)s\n' - '\ttext: %(text)s\n', - {'code': r.status_code, - 'text': r.text}) - else: - # In theory we have a good run. - if r.content: - content = r.json() - if content.get('objectType') == 'AsyncTask': - url = self._get_async_url(content) - eventlet.sleep(1) - continue - else: - LOG.debug('Async debug: r.content is None') - return r - except Exception: - methodname = asyncTask.get('methodName') - objectTypeName = asyncTask.get('objectTypeName') - msg = (_('Async error: Unable to retrieve %(obj)s ' - 'method %(method)s result') - % {'obj': objectTypeName, 'method': methodname}) - raise exception.VolumeBackendAPIException(message=msg) - # Shouldn't really be able to get here. - LOG.debug('_wait_for_async_complete: Error asyncTask: %r', asyncTask) - return None - - def _rest_ret(self, rest_response, async): - # If we made an async call and it was accepted - # we wait for our response. - if async: - if rest_response.status_code == http_client.ACCEPTED: - asyncTask = rest_response.json() - return self._wait_for_async_complete(asyncTask) - else: - LOG.debug('REST Async error command not accepted:\n' - '\tUrl: %(url)s\n' - '\tCode: %(code)d\n' - '\tReason: %(reason)s\n', - {'url': rest_response.url, - 'code': rest_response.status_code, - 'reason': rest_response.reason}) - msg = _('REST Async Error: Command not accepted.') - raise exception.VolumeBackendAPIException(message=msg) - return rest_response - - @utils.retry(exceptions=(requests.ConnectionError, - exception.DellDriverRetryableException)) - def get(self, url): - LOG.debug('get: %(url)s', {'url': url}) - rest_response = self.session.get(self.__formatUrl(url), - headers=self.header, - verify=self.verify) - - if (rest_response and rest_response.status_code == ( - http_client.BAD_REQUEST)) and ( - 'Unhandled Exception' in rest_response.text): - raise exception.DellDriverRetryableException() - return rest_response - - @utils.retry(exceptions=(requests.ConnectionError,)) - def post(self, url, payload, async=False): - LOG.debug('post: %(url)s data: %(payload)s', - {'url': url, - 'payload': payload}) - return self._rest_ret(self.session.post( - self.__formatUrl(url), - data=json.dumps(payload, - ensure_ascii=False).encode('utf-8'), - headers=self._get_header(async), - verify=self.verify), async) - - @utils.retry(exceptions=(requests.ConnectionError,)) - def put(self, url, payload, async=False): - LOG.debug('put: %(url)s data: %(payload)s', - {'url': url, - 'payload': payload}) - return self._rest_ret(self.session.put( - self.__formatUrl(url), - data=json.dumps(payload, - ensure_ascii=False).encode('utf-8'), - headers=self._get_header(async), - verify=self.verify), async) - - @utils.retry(exceptions=(requests.ConnectionError,)) - def delete(self, url, payload=None, async=False): - LOG.debug('delete: %(url)s data: %(payload)s', - {'url': url, 'payload': payload}) - named = {'headers': self._get_header(async), 'verify': self.verify} - if payload: - named['data'] = json.dumps( - payload, ensure_ascii=False).encode('utf-8') - - return self._rest_ret( - self.session.delete(self.__formatUrl(url), **named), async) - - -class SCApiHelper(object): - """SCApiHelper - - Helper class for API access. Handles opening and closing the - connection to the Dell REST API. - """ - - def __init__(self, config, active_backend_id, storage_protocol): - self.config = config - # Now that active_backend_id is set on failover. - # Use that if set. Mark the backend as failed over. - self.active_backend_id = active_backend_id - self.primaryssn = self.config.dell_sc_ssn - self.storage_protocol = storage_protocol - self.san_ip = self.config.san_ip - self.san_login = self.config.san_login - self.san_password = self.config.san_password - self.san_port = self.config.dell_sc_api_port - self.apiversion = '2.0' - - def _swap_credentials(self): - """Change out to our secondary credentials - - Or back to our primary creds. - :return: True if swapped. False if no alt credentials supplied. - """ - if self.san_ip == self.config.san_ip: - # Do we have a secondary IP and credentials? - if (self.config.secondary_san_ip and - self.config.secondary_san_login and - self.config.secondary_san_password): - self.san_ip = self.config.secondary_san_ip - self.san_login = self.config.secondary_san_login - self.san_password = self.config.secondary_san_password - else: - LOG.info('Swapping DSM credentials: Secondary DSM ' - 'credentials are not set or are incomplete.') - # Cannot swap. - return False - # Odds on this hasn't changed so no need to make setting this a - # requirement. - if self.config.secondary_sc_api_port: - self.san_port = self.config.secondary_sc_api_port - else: - # These have to be set. - self.san_ip = self.config.san_ip - self.san_login = self.config.san_login - self.san_password = self.config.san_password - self.san_port = self.config.dell_sc_api_port - LOG.info('Swapping DSM credentials: New DSM IP is %r.', - self.san_ip) - return True - - def _setup_connection(self): - """Attempts to open a connection to the storage center.""" - connection = SCApi(self.san_ip, - self.san_port, - self.san_login, - self.san_password, - self.config.dell_sc_verify_cert, - self.apiversion) - # This instance is for a single backend. That backend has a - # few items of information we should save rather than passing them - # about. - connection.vfname = self.config.dell_sc_volume_folder - connection.sfname = self.config.dell_sc_server_folder - connection.excluded_domain_ips = self.config.excluded_domain_ip - if not connection.excluded_domain_ips: - connection.excluded_domain_ips = [] - # Our primary SSN doesn't change - connection.primaryssn = self.primaryssn - if self.storage_protocol == 'FC': - connection.protocol = 'FibreChannel' - # Set appropriate ssn and failover state. - if self.active_backend_id: - # active_backend_id is a string. Convert to int. - connection.ssn = int(self.active_backend_id) - else: - connection.ssn = self.primaryssn - # Make the actual connection to the DSM. - connection.open_connection() - return connection - - def open_connection(self): - """Creates the SCApi object. - - :return: SCApi object. - :raises VolumeBackendAPIException: - """ - connection = None - LOG.info('open_connection to %(ssn)s at %(ip)s', - {'ssn': self.primaryssn, - 'ip': self.config.san_ip}) - if self.primaryssn: - try: - """Open connection to REST API.""" - connection = self._setup_connection() - except Exception: - # If we have credentials to swap to we try it here. - if self._swap_credentials(): - connection = self._setup_connection() - else: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to connect to the API. ' - 'No backup DSM provided.') - # Save our api version for next time. - if self.apiversion != connection.apiversion: - LOG.info('open_connection: Updating API version to %s', - connection.apiversion) - self.apiversion = connection.apiversion - - else: - raise exception.VolumeBackendAPIException( - data=_('Configuration error: dell_sc_ssn not set.')) - return connection - - -class SCApi(object): - """SCApi - - Handles calls to Dell SC and EM via the REST API interface. - - Version history: - 1.0.0 - Initial driver - 1.1.0 - Added extra spec support for Storage Profile selection - 1.2.0 - Added consistency group support. - 2.0.0 - Switched to inheriting functional objects rather than volume - driver. - 2.1.0 - Added support for ManageableVD. - 2.2.0 - Added API 2.2 support. - 2.3.0 - Added Legacy Port Mode Support - 2.3.1 - Updated error handling. - 2.4.0 - Added Replication V2 support. - 2.4.1 - Updated Replication support to V2.1. - 2.5.0 - ManageableSnapshotsVD implemented. - 3.0.0 - ProviderID utilized. - 3.1.0 - Failback supported. - 3.2.0 - Live Volume support. - 3.3.0 - Support for a secondary DSM. - 3.4.0 - Support for excluding a domain. - 3.5.0 - Support for AFO. - 3.6.0 - Server type support. - 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. - 4.0.0 - Driver moved to dell_emc. - - """ - - APIDRIVERVERSION = '4.0.0' - - def __init__(self, host, port, user, password, verify, apiversion): - """This creates a connection to Dell SC or EM. - - :param host: IP address of the REST interface.. - :param port: Port the REST interface is listening on. - :param user: User account to login with. - :param password: Password. - :param verify: Boolean indicating whether certificate verification - should be turned on or not. - :param apiversion: Version used on login. - """ - self.notes = 'Created by Dell EMC Cinder Driver' - self.repl_prefix = 'Cinder repl of ' - self.ssn = None - # primaryssn is the ssn of the SC we are configured to use. This - # doesn't change in the case of a failover. - self.primaryssn = None - self.failed_over = False - self.vfname = 'openstack' - self.sfname = 'openstack' - self.excluded_domain_ips = [] - self.legacypayloadfilters = False - self.consisgroups = True - self.protocol = 'Iscsi' - self.apiversion = apiversion - # Nothing other than Replication should care if we are direct connect - # or not. - self.is_direct_connect = False - self.client = HttpClient(host, port, user, password, - verify, apiversion) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close_connection() - - @staticmethod - def _check_result(rest_response): - """Checks and logs API responses. - - :param rest_response: The result from a REST API call. - :returns: ``True`` if success, ``False`` otherwise. - """ - if rest_response is not None: - if http_client.OK <= rest_response.status_code < ( - http_client.MULTIPLE_CHOICES): - # API call was a normal success - return True - - # Some versions return this as a dict. - try: - response_json = rest_response.json() - response_text = response_json.text['result'] - except Exception: - # We do not care why that failed. Just use the text. - response_text = rest_response.text - - LOG.debug('REST call result:\n' - '\tUrl: %(url)s\n' - '\tCode: %(code)d\n' - '\tReason: %(reason)s\n' - '\tText: %(text)s', - {'url': rest_response.url, - 'code': rest_response.status_code, - 'reason': rest_response.reason, - 'text': response_text}) - else: - LOG.warning('Failed to get REST call result.') - return False - - @staticmethod - def _path_to_array(path): - """Breaks a path into a reversed string array. - - :param path: Path to a folder on the Storage Center. - :return: A reversed array of each path element. - """ - array = [] - while True: - (path, tail) = os.path.split(path) - if tail == '': - array.reverse() - return array - array.append(tail) - - def _first_result(self, blob): - """Get the first result from the JSON return value. - - :param blob: Full return from a REST call. - :return: The JSON encoded dict or the first item in a JSON encoded - list. - """ - return self._get_result(blob, None, None) - - def _get_result(self, blob, attribute, value): - """Find the result specified by attribute and value. - - If the JSON blob is a list then it will be searched for the attribute - and value combination. If attribute and value are not specified then - the first item is returned. If the JSON blob is a dict then it - will be returned so long as the dict matches the attribute and value - combination or attribute is None. - - :param blob: The REST call's JSON response. Can be a list or dict. - :param attribute: The attribute we are looking for. If it is None - the first item in the list, or the dict, is returned. - :param value: The attribute value we are looking for. If the attribute - is None this value is ignored. - :returns: The JSON content in blob, the dict specified by matching the - attribute and value or None. - """ - rsp = None - content = self._get_json(blob) - if content is not None: - # We can get a list or a dict or nothing - if isinstance(content, list): - for r in content: - if attribute is None or r.get(attribute) == value: - rsp = r - break - elif isinstance(content, dict): - if attribute is None or content.get(attribute) == value: - rsp = content - elif attribute is None: - rsp = content - - if rsp is None: - LOG.debug('Unable to find result where %(attr)s is %(val)s', - {'attr': attribute, - 'val': value}) - LOG.debug('Blob was %(blob)s', {'blob': blob.text}) - return rsp - - def _get_json(self, blob): - """Returns a dict from the JSON of a REST response. - - :param blob: The response from a REST call. - :returns: JSON or None on error. - """ - try: - return blob.json() - except AttributeError: - LOG.error('Error invalid json: %s', blob) - except TypeError as ex: - LOG.error('Error TypeError. %s', ex) - except scanner.JSONDecodeError as ex: - LOG.error('Error JSONDecodeError. %s', ex) - # We are here so this went poorly. Log our blob. - LOG.debug('_get_json blob %s', blob) - return None - - def _get_id(self, blob): - """Returns the instanceId from a Dell REST object. - - :param blob: A Dell SC REST call's response. - :returns: The instanceId from the Dell SC object or None on error. - """ - try: - if isinstance(blob, dict): - return blob.get('instanceId') - except AttributeError: - LOG.error('Invalid API object: %s', blob) - except TypeError as ex: - LOG.error('Error TypeError. %s', ex) - except scanner.JSONDecodeError as ex: - LOG.error('Error JSONDecodeError. %s', ex) - LOG.debug('_get_id failed: blob %s', blob) - return None - - def _get_payload_filter(self, filterType='AND'): - # 2.1 or earlier and we are talking LegacyPayloadFilters. - if self.legacypayloadfilters: - return LegacyPayloadFilter(filterType) - return PayloadFilter(filterType) - - def _check_version_fail(self, payload, response): - try: - # Is it even our error? - result = self._get_json(response).get('result') - if result and result.startswith( - 'Invalid API version specified, ' - 'the version must be in the range ['): - # We're looking for something very specific. The except - # will catch any errors. - # Update our version and update our header. - self.apiversion = response.text.split('[')[1].split(',')[0] - self.client.header['x-dell-api-version'] = self.apiversion - LOG.debug('API version updated to %s', self.apiversion) - # Give login another go. - r = self.client.post('ApiConnection/Login', payload) - return r - except Exception: - # We don't care what failed. The clues are already in the logs. - # Just log a parsing error and move on. - LOG.error('_check_version_fail: Parsing error.') - # Just eat this if it isn't a version error. - return response - - def open_connection(self): - """Authenticate with Dell REST interface. - - :raises VolumeBackendAPIException.: - """ - # Set our fo state. - self.failed_over = (self.primaryssn != self.ssn) - - # Login - payload = {} - payload['Application'] = 'Cinder REST Driver' - payload['ApplicationVersion'] = self.APIDRIVERVERSION - r = self.client.post('ApiConnection/Login', payload) - if not self._check_result(r): - # SC requires a specific version. See if we can get it. - r = self._check_version_fail(payload, r) - # Either we tried to login and have a new result or we are - # just checking the same result. Either way raise on fail. - if not self._check_result(r): - raise exception.VolumeBackendAPIException( - data=_('Failed to connect to Dell REST API')) - - # We should be logged in. Try to grab the api version out of the - # response. - try: - apidict = self._get_json(r) - version = apidict['apiVersion'] - self.is_direct_connect = apidict['provider'] == 'StorageCenter' - splitver = version.split('.') - if splitver[0] == '2': - if splitver[1] == '0': - self.consisgroups = False - self.legacypayloadfilters = True - - elif splitver[1] == '1': - self.legacypayloadfilters = True - return - - except Exception: - # Good return but not the login response we were expecting. - # Log it and error out. - LOG.error('Unrecognized Login Response: %s', r) - - def close_connection(self): - """Logout of Dell REST API.""" - r = self.client.post('ApiConnection/Logout', {}) - # 204 expected. - self._check_result(r) - self.client = None - - def _use_provider_id(self, provider_id): - """See if our provider_id points at our current backend. - - provider_id is instanceId. The instanceId contains the ssn of the - StorageCenter it is hosted on. This must equal our current ssn or - it isn't valid. - - :param provider_id: Provider_id from an volume or snapshot object. - :returns: True/False - """ - ret = False - if provider_id: - try: - if provider_id.split('.')[0] == six.text_type(self.ssn): - ret = True - else: - LOG.debug('_use_provider_id: provider_id ' - '%(pid)r not valid on %(ssn)r', - {'pid': provider_id, 'ssn': self.ssn}) - except Exception: - LOG.error('_use_provider_id: provider_id %s is invalid!', - provider_id) - return ret - - def find_sc(self, ssn=-1): - """Check that the SC is there and being managed by EM. - - :returns: The SC SSN. - :raises VolumeBackendAPIException: - """ - # We might be looking for another ssn. If not then - # look for our default. - ssn = self._vet_ssn(ssn) - - r = self.client.get('StorageCenter/StorageCenter') - result = self._get_result(r, 'scSerialNumber', ssn) - if result is None: - LOG.error('Failed to find %(s)s. Result %(r)s', - {'s': ssn, - 'r': r}) - raise exception.VolumeBackendAPIException( - data=_('Failed to find Storage Center')) - - return self._get_id(result) - - # Folder functions - - def _create_folder(self, url, parent, folder, ssn=-1): - """Creates folder under parent. - - This can create both to server and volume folders. The REST url - sent in defines the folder type being created on the Dell Storage - Center backend. - - :param url: This is the Dell SC rest url for creating the specific - (server or volume) folder type. - :param parent: The instance ID of this folder's parent folder. - :param folder: The folder name to be created. This is one level deep. - :returns: The REST folder object. - """ - ssn = self._vet_ssn(ssn) - - scfolder = None - payload = {} - payload['Name'] = folder - payload['StorageCenter'] = ssn - if parent != '': - payload['Parent'] = parent - payload['Notes'] = self.notes - - r = self.client.post(url, payload, True) - if self._check_result(r): - scfolder = self._first_result(r) - return scfolder - - def _create_folder_path(self, url, foldername, ssn=-1): - """Creates a folder path from a fully qualified name. - - The REST url sent in defines the folder type being created on the Dell - Storage Center backend. Thus this is generic to server and volume - folders. - - :param url: This is the Dell SC REST url for creating the specific - (server or volume) folder type. - :param foldername: The full folder name with path. - :returns: The REST folder object. - """ - ssn = self._vet_ssn(ssn) - - path = self._path_to_array(foldername) - folderpath = '' - instanceId = '' - # Technically the first folder is the root so that is already created. - found = True - scfolder = None - for folder in path: - folderpath = folderpath + folder - # If the last was found see if this part of the path exists too - if found: - listurl = url + '/GetList' - scfolder = self._find_folder(listurl, folderpath, ssn) - if scfolder is None: - found = False - # We didn't find it so create it - if found is False: - scfolder = self._create_folder(url, instanceId, folder, ssn) - # If we haven't found a folder or created it then leave - if scfolder is None: - LOG.error('Unable to create folder path %s', folderpath) - break - # Next part of the path will need this - instanceId = self._get_id(scfolder) - folderpath = folderpath + '/' - return scfolder - - def _find_folder(self, url, foldername, ssn=-1): - """Find a folder on the SC using the specified url. - - Most of the time the folder will already have been created so - we look for the end folder and check that the rest of the path is - right. - - The REST url sent in defines the folder type being created on the Dell - Storage Center backend. Thus this is generic to server and volume - folders. - - :param url: The portion of the url after the base url (see http class) - to use for this operation. (Can be for Server or Volume - folders.) - :param foldername: Full path to the folder we are looking for. - :returns: Dell folder object. - """ - ssn = self._vet_ssn(ssn) - - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - basename = os.path.basename(foldername) - pf.append('Name', basename) - # If we have any kind of path we throw it into the filters. - folderpath = os.path.dirname(foldername) - if folderpath != '': - # SC convention is to end with a '/' so make sure we do. - folderpath += '/' - pf.append('folderPath', folderpath) - folder = None - r = self.client.post(url, pf.payload) - if self._check_result(r): - folder = self._get_result(r, 'folderPath', folderpath) - return folder - - def _find_volume_folder(self, create=False, ssn=-1): - """Looks for the volume folder where backend volumes will be created. - - Volume folder is specified in the cindef.conf. See __init. - - :param create: If True will create the folder if not found. - :returns: Folder object. - """ - folder = self._find_folder('StorageCenter/ScVolumeFolder/GetList', - self.vfname, ssn) - # Doesn't exist? make it - if folder is None and create is True: - folder = self._create_folder_path('StorageCenter/ScVolumeFolder', - self.vfname, ssn) - return folder - - def _init_volume(self, scvolume): - """Initializes the volume. - - Maps the volume to a random server and immediately unmaps - it. This initializes the volume. - - Don't wig out if this fails. - :param scvolume: Dell Volume object. - """ - pf = self._get_payload_filter() - pf.append('scSerialNumber', scvolume.get('scSerialNumber')) - r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) - if self._check_result(r): - scservers = self._get_json(r) - # Sort through the servers looking for one with connectivity. - for scserver in scservers: - # This needs to be either a physical or virtual server. - # Outside of tempest tests this should not matter as we only - # "init" a volume to allow snapshotting of an empty volume. - if (scserver.get('status', 'down').lower() != 'down' and - scserver.get('type', '').lower() == 'physical'): - # Map to actually create the volume - self.map_volume(scvolume, scserver) - # We have changed the volume so grab a new copy of it. - scvolume = self.get_volume(self._get_id(scvolume)) - # Unmap - self.unmap_volume(scvolume, scserver) - # Did it work? - if not scvolume.get('active', False): - LOG.debug('Failed to activate volume %(name)s via ' - 'server %(srvr)s)', - {'name': scvolume['name'], - 'srvr': scserver['name']}) - else: - return - # We didn't map/unmap the volume. So no initialization done. - # Warn the user before we leave. Note that this is almost certainly - # a tempest test failure we are trying to catch here. A snapshot - # has likely been attempted before the volume has been instantiated - # on the Storage Center. In the real world no one will snapshot - # a volume without first putting some data in that volume. - LOG.warning('Volume %(name)s initialization failure. ' - 'Operations such as snapshot and clone may fail due ' - 'to inactive volume.)', {'name': scvolume['name']}) - - def _find_storage_profile(self, storage_profile): - """Looks for a Storage Profile on the array. - - Storage Profiles determine tiering settings. If not specified a volume - will use the Default storage profile. - - :param storage_profile: The Storage Profile name to find with any - spaces stripped. - :returns: The Storage Profile object or None. - """ - if not storage_profile: - return None - - # Since we are stripping out spaces for convenience we are not - # able to just filter on name. Need to get all Storage Profiles - # and look through for the one we want. Never many profiles, so - # this doesn't cause as much overhead as it might seem. - storage_profile = storage_profile.replace(' ', '').lower() - pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) - r = self.client.post('StorageCenter/ScStorageProfile/GetList', - pf.payload) - if self._check_result(r): - profiles = self._get_json(r) - for profile in profiles: - # Look for the stripped, case insensitive match - name = profile.get('name', '').replace(' ', '').lower() - if name == storage_profile: - return profile - return None - - def _find_user_replay_profiles(self): - """Find user default profiles. - - Note that this only deals with standard and not cg profiles. - - :return: List of replay profiles. - """ - user_prefs = self._get_user_preferences() - if user_prefs: - profileids = [profile['instanceId'] for profile in - user_prefs['replayProfileList']] - return profileids - return [] - - def _find_daily_replay_profile(self): - """Find the system replay profile named "Daily". - - :return: Profile instanceId or None. - """ - pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) - pf.append('instanceName', 'Daily') - r = self.client.post('StorageCenter/ScReplayProfile/GetList', - pf.payload) - if self._check_result(r): - profiles = self._get_json(r) - if profiles: - return profiles[0]['instanceId'] - return None - - def _find_replay_profiles(self, replay_profile_string): - """Find our replay profiles. - - Note that if called on volume creation the removeids list can be safely - ignored. - - :param replay_profile_string: Comma separated list of profile names. - :return: List replication profiles to use, List to remove. - :raises VolumeBackendAPIException: If we can't find our profiles. - """ - addids = [] - removeids = [] - replay_profiles = [] - if replay_profile_string: - replay_profiles = replay_profile_string.split(',') - # Most of the time they will not specify this so don't call anything. - if replay_profiles: - pf = self._get_payload_filter() - pf.append('scSerialNumber', self.ssn) - pf.append('type', 'Standard') - r = self.client.post('StorageCenter/ScReplayProfile/GetList', - pf.payload) - if self._check_result(r): - profiles = self._get_json(r) - for profile in profiles: - if replay_profiles.count(profile['name']) > 0: - addids.append(profile['instanceId']) - else: - # in the volume. - removeids.append(profile['instanceId']) - # Check that we've found what we are looking for if anything - if len(addids) != len(replay_profiles): - msg = (_('Unable to locate specified replay profiles %s ') % - replay_profile_string) - raise exception.VolumeBackendAPIException(data=msg) - - return addids, removeids - - def update_replay_profiles(self, scvolume, replay_profile_string): - """Update our replay profiles. - - If the replay_profile_string is empty we look for the user's default - profiles. If those aren't found we look for the Daily profile. - - Note that this is in addition to the CG profiles which we do not touch. - - :param scvolume: SC Volume object. - :param replay_profile_string: Comma separated string of replay profile - names. - :return: True/False. - """ - # Find our replay_profiles. - addids, removeids = self._find_replay_profiles(replay_profile_string) - # We either found what we were looking for. - # If we are clearing out our ids then find a default. - if not addids: - # if no replay profiles specified we must be clearing out. - addids = self._find_user_replay_profiles() - if not addids: - addids = [self._find_daily_replay_profile()] - # Do any removals first. - for id in removeids: - # We might have added to the addids list after creating removeids. - # User preferences or the daily profile could have been added. - # If our id is in both lists just skip it and remove it from - # The add list. - if addids.count(id): - addids.remove(id) - elif not self._update_volume_profiles( - scvolume, addid=None, removeid=id): - return False - # Add anything new. - for id in addids: - if not self._update_volume_profiles( - scvolume, addid=id, removeid=None): - return False - return True - - def _check_add_profile_payload(self, payload, profile, name, type): - if name: - if profile is None: - msg = _('Profile %s not found.') % name - raise exception.VolumeBackendAPIException(data=msg) - else: - payload[type] = self._get_id(profile) - - def create_volume(self, name, size, storage_profile=None, - replay_profile_string=None, volume_qos=None, - group_qos=None, datareductionprofile=None): - """Creates a new volume on the Storage Center. - - It will create it in a folder called self.vfname. If self.vfname - does not exist it will create it. If it cannot create it - the volume will be created in the root. - - :param name: Name of the volume to be created on the Dell SC backend. - This is the cinder volume ID. - :param size: The size of the volume to be created in GB. - :param storage_profile: Optional storage profile to set for the volume. - :param replay_profile_string: Optional replay profile to set for - the volume. - :param volume_qos: Volume QOS profile name. - :param group_qos: Group QOS profile name. - :param datareductionprofile: Data reduction profile name - - :returns: Dell Volume object or None. - """ - LOG.debug('create_volume: %(name)s %(ssn)s %(folder)s %(profile)s ' - '%(vqos)r %(gqos)r %(dup)r', - {'name': name, - 'ssn': self.ssn, - 'folder': self.vfname, - 'profile': storage_profile, - 'replay': replay_profile_string, - 'vqos': volume_qos, - 'gqos': group_qos, - 'dup': datareductionprofile}) - - # Find our folder - folder = self._find_volume_folder(True) - - # If we actually have a place to put our volume create it - if folder is None: - LOG.warning('Unable to create folder %s', self.vfname) - - # Find our replay_profiles. - addids, removeids = self._find_replay_profiles(replay_profile_string) - - # Init our return. - scvolume = None - - # Create the volume - payload = {} - payload['Name'] = name - payload['Notes'] = self.notes - payload['Size'] = '%d GB' % size - payload['StorageCenter'] = self.ssn - if folder is not None: - payload['VolumeFolder'] = self._get_id(folder) - # Add our storage profile. - self._check_add_profile_payload( - payload, self._find_storage_profile(storage_profile), - storage_profile, 'StorageProfile') - # Add our Volume QOS Profile. - self._check_add_profile_payload( - payload, self._find_qos_profile(volume_qos), volume_qos, - 'VolumeQosProfile') - # Add our Group QOS Profile. - self._check_add_profile_payload( - payload, self._find_qos_profile(group_qos, True), group_qos, - 'GroupQosProfile') - # Add our Data Reduction Proflie. - self._check_add_profile_payload( - payload, self._find_data_reduction_profile(datareductionprofile), - datareductionprofile, 'DataReductionProfile') - - # This is a new volume so there is nothing to remove. - if addids: - payload['ReplayProfileList'] = addids - - r = self.client.post('StorageCenter/ScVolume', payload, True) - if self._check_result(r): - # Our volume should be in the return. - scvolume = self._get_json(r) - if scvolume: - LOG.info('Created volume %(instanceId)s: %(name)s', - {'instanceId': scvolume['instanceId'], - 'name': scvolume['name']}) - else: - LOG.error('ScVolume returned success with empty payload.' - ' Attempting to locate volume') - # In theory it is there since success was returned. - # Try one last time to find it before returning. - scvolume = self._search_for_volume(name) - else: - LOG.error('Unable to create volume on SC: %s', name) - - return scvolume - - def _get_volume_list(self, name, deviceid, filterbyvfname=True, ssn=-1): - """Return the specified list of volumes. - - :param name: Volume name. - :param deviceid: Volume device ID on the SC backend. - :param filterbyvfname: If set to true then this filters by the preset - folder name. - :param ssn: SSN to search on. - :return: Returns the scvolume list or None. - """ - ssn = self._vet_ssn(ssn) - result = None - # We need a name or a device ID to find a volume. - if name or deviceid: - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - if name is not None: - pf.append('Name', name) - if deviceid is not None: - pf.append('DeviceId', deviceid) - # set folderPath - if filterbyvfname: - vfname = (self.vfname if self.vfname.endswith('/') - else self.vfname + '/') - pf.append('volumeFolderPath', vfname) - r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload) - if self._check_result(r): - result = self._get_json(r) - # We return None if there was an error and a list if the command - # succeeded. It might be an empty list. - return result - - def _autofailback(self, lv): - # if we have a working replication state. - ret = False - LOG.debug('Attempting autofailback of %s', lv) - if (lv and lv['status'] == 'Up' and lv['replicationState'] == 'Up' and - lv['failoverState'] == 'Protected' and lv['secondaryStatus'] == 'Up' - and lv['primarySwapRoleState'] == 'NotSwapping'): - ret = self.swap_roles_live_volume(lv) - return ret - - def _find_volume_primary(self, provider_id, name): - # We look for our primary. If it doesn't exist and we have an activated - # secondary then we return that. - # if there is no live volume then we return our provider_id. - primary_id = provider_id - lv = self.get_live_volume(provider_id, name) - LOG.info('Volume %(name)r, id %(provider)s at primary %(primary)s.', - {'name': name, - 'provider': provider_id, - 'primary': primary_id}) - # If we have a live volume and are swapped and are not failed over - # at least give failback a shot. - if lv and (self.is_swapped(provider_id, lv) and not self.failed_over - and self._autofailback(lv)): - lv = self.get_live_volume(provider_id) - LOG.info('After failback %s', lv) - # Make sure we still have a LV. - if lv: - # At this point if the secondaryRole is Active we have - # to return that. Else return normal primary. - if lv.get('secondaryRole') == 'Activated': - primary_id = lv['secondaryVolume']['instanceId'] - else: - primary_id = lv['primaryVolume']['instanceId'] - return primary_id - - def find_volume(self, name, provider_id, islivevol=False): - """Find the volume by name or instanceId. - - We check if we can use provider_id before using it. If so then - we expect to find it by provider_id. - - We also conclude our failover at this point. If we are failed over we - run _import_one to rename the volume. - - :param name: Volume name. - :param provider_id: instanceId of the volume if known. - :param islivevol: Is this a live volume. - :return: sc volume object or None. - :raises VolumeBackendAPIException: if unable to import. - """ - LOG.debug('find_volume: name:%(name)r provider_id:%(id)r islv:%(lv)r', - {'name': name, 'id': provider_id, - 'lv': islivevol}) - scvolume = None - if islivevol: - # Just get the primary from the sc live vol. - primary_id = self._find_volume_primary(provider_id, name) - scvolume = self.get_volume(primary_id) - elif self._use_provider_id(provider_id): - # just get our volume - scvolume = self.get_volume(provider_id) - # if we are failed over we need to check if we - # need to import the failed over volume. - if self.failed_over: - if scvolume['name'] == self._repl_name(name): - scvolume = self._import_one(scvolume, name) - if not scvolume: - msg = (_('Unable to complete failover of %s.') - % name) - raise exception.VolumeBackendAPIException(data=msg) - LOG.info('Imported %(fail)s to %(guid)s.', - {'fail': self._repl_name(name), - 'guid': name}) - else: - # No? Then search for it. - scvolume = self._search_for_volume(name) - return scvolume - - def _search_for_volume(self, name): - """Search self.ssn for volume of name. - - This searches the folder self.vfname (specified in the cinder.conf) - for the volume first. If not found it searches the entire array for - the volume. - - :param name: Name of the volume to search for. This is the cinder - volume ID. - :returns: Dell Volume object or None if not found. - :raises VolumeBackendAPIException: If multiple copies are found. - """ - LOG.debug('Searching %(sn)s for %(name)s', - {'sn': self.ssn, - 'name': name}) - - # Cannot find a volume without the name. - if name is None: - return None - - # Look for our volume in our folder. - vollist = self._get_volume_list(name, None, True) - # If an empty list was returned they probably moved the volumes or - # changed the folder name so try again without the folder. - if not vollist: - LOG.debug('Cannot find volume %(n)s in %(v)s. Searching SC.', - {'n': name, - 'v': self.vfname}) - vollist = self._get_volume_list(name, None, False) - - # If multiple volumes of the same name are found we need to error. - if len(vollist) > 1: - # blow up - msg = _('Multiple copies of volume %s found.') % name - raise exception.VolumeBackendAPIException(data=msg) - - # We made it and should have a valid volume. - return None if not vollist else vollist[0] - - def get_volume(self, provider_id): - """Returns the scvolume associated with provider_id. - - :param provider_id: This is the instanceId - :return: Dell SCVolume object. - """ - result = None - if provider_id: - r = self.client.get('StorageCenter/ScVolume/%s' % provider_id) - if self._check_result(r): - result = self._get_json(r) - return result - - def delete_volume(self, name, provider_id=None): - """Deletes the volume from the SC backend array. - - If the volume cannot be found we claim success. - - :param name: Name of the volume to search for. This is the cinder - volume ID. - :param provider_id: This is the instanceId - :returns: Boolean indicating success or failure. - """ - vol = self.find_volume(name, provider_id) - provider_id = None if not vol else self._get_id(vol) - - # If we have an id then delete the volume. - if provider_id: - r = self.client.delete('StorageCenter/ScVolume/%s' % provider_id, - async=True) - if not self._check_result(r): - msg = _('Error deleting volume %(ssn)s: %(volume)s') % { - 'ssn': self.ssn, - 'volume': provider_id} - raise exception.VolumeBackendAPIException(data=msg) - - # json return should be true or false - return self._get_json(r) - - # If we can't find the volume then it is effectively gone. - LOG.warning('delete_volume: unable to find volume ' - 'provider_id: %s', provider_id) - return True - - def _find_server_folder(self, create=False, ssn=-1): - """Looks for the server folder on the Dell Storage Center. - - This is the folder where a server objects for mapping volumes will be - created. Server folder is specified in cinder.conf. See __init. - - :param create: If True will create the folder if not found. - :return: Folder object. - """ - ssn = self._vet_ssn(ssn) - - folder = self._find_folder('StorageCenter/ScServerFolder/GetList', - self.sfname, ssn) - if folder is None and create is True: - folder = self._create_folder_path('StorageCenter/ScServerFolder', - self.sfname, ssn) - return folder - - def _add_hba(self, scserver, wwnoriscsiname): - """This adds a server HBA to the Dell server object. - - The HBA is taken from the connector provided in initialize_connection. - The Dell server object is largely a container object for the list of - HBAs associated with a single server (or vm or cluster) for the - purposes of mapping volumes. - - :param scserver: Dell server object. - :param wwnoriscsiname: The WWN or IQN to add to this server. - :returns: Boolean indicating success or failure. - """ - payload = {} - payload['HbaPortType'] = self.protocol - payload['WwnOrIscsiName'] = wwnoriscsiname - payload['AllowManual'] = True - r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba' - % self._get_id(scserver), payload, True) - if not self._check_result(r): - LOG.error('_add_hba error: %(wwn)s to %(srvname)s', - {'wwn': wwnoriscsiname, - 'srvname': scserver['name']}) - return False - return True - - def _find_serveros(self, osname='Red Hat Linux 6.x', ssn=-1): - """Returns the serveros instance id of the specified osname. - - Required to create a Dell server object. - - We do not know that we are Red Hat Linux 6.x but that works - best for Red Hat and Ubuntu. So we use that. - - :param osname: The name of the OS to look for. - :param ssn: ssn of the backend SC to use. Default if -1. - :returns: InstanceId of the ScServerOperatingSystem object. - """ - ssn = self._vet_ssn(ssn) - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - r = self.client.post('StorageCenter/ScServerOperatingSystem/GetList', - pf.payload) - if self._check_result(r): - oslist = self._get_json(r) - for srvos in oslist: - name = srvos.get('name', 'nope') - if name.lower() == osname.lower(): - # Found it return the id - return self._get_id(srvos) - - LOG.warning('Unable to find appropriate OS %s', osname) - - return None - - def create_server(self, wwnlist, serveros, ssn=-1): - """Creates a server with multiple WWNS associated with it. - - Same as create_server except it can take a list of HBAs. - - :param wwnlist: A list of FC WWNs or iSCSI IQNs associated with this - server. - :param serveros: Name of server OS to use when creating the server. - :param ssn: ssn of the backend SC to use. Default if -1. - :returns: Dell server object. - """ - # Find our folder or make it - folder = self._find_server_folder(True, ssn) - # Create our server. - scserver = self._create_server('Server_' + wwnlist[0], folder, - serveros, ssn) - if not scserver: - return None - # Add our HBAs. - if scserver: - for wwn in wwnlist: - if not self._add_hba(scserver, wwn): - # We failed so log it. Delete our server and return None. - LOG.error('Error adding HBA %s to server', wwn) - self._delete_server(scserver) - return None - return scserver - - def _create_server(self, servername, folder, serveros, ssn): - ssn = self._vet_ssn(ssn) - - LOG.info('Creating server %s', servername) - payload = {} - payload['Name'] = servername - payload['StorageCenter'] = ssn - payload['Notes'] = self.notes - payload['AlertOnConnectivity'] = False - - # We pick Red Hat Linux 6.x because it supports multipath and - # will attach luns to paths as they are found. - scserveros = self._find_serveros(serveros, ssn) - if not scserveros: - scserveros = self._find_serveros(ssn=ssn) - if scserveros is not None: - payload['OperatingSystem'] = scserveros - - # At this point it doesn't matter if we have a folder or not. - # Let it be in the root if the folder creation fails. - if folder is not None: - payload['ServerFolder'] = self._get_id(folder) - - # create our server - r = self.client.post('StorageCenter/ScPhysicalServer', payload, True) - if self._check_result(r): - # Server was created - scserver = self._first_result(r) - LOG.info('SC server created %s', scserver) - return scserver - LOG.error('Unable to create SC server %s', servername) - return None - - def _vet_ssn(self, ssn): - """Returns the default if a ssn was not set. - - Added to support live volume as we aren't always on the primary ssn - anymore - - :param ssn: ssn to check. - :return: Current ssn or the ssn sent down. - """ - if ssn == -1: - return self.ssn - return ssn - - def find_server(self, instance_name, ssn=-1): - """Hunts for a server on the Dell backend by instance_name. - - The instance_name is the same as the server's HBA. This is the IQN or - WWN listed in the connector. If found, the server the HBA is attached - to, if any, is returned. - - :param instance_name: instance_name is a FC WWN or iSCSI IQN from - the connector. In cinder a server is identified - by its HBA. - :param ssn: Storage center to search. - :returns: Dell server object or None. - """ - ssn = self._vet_ssn(ssn) - - scserver = None - # We search for our server by first finding our HBA - hba = self._find_serverhba(instance_name, ssn) - # Once created hbas stay in the system. So it isn't enough - # that we found one it actually has to be attached to a - # server. - if hba is not None and hba.get('server') is not None: - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - pf.append('instanceId', self._get_id(hba['server'])) - r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) - if self._check_result(r): - scserver = self._first_result(r) - - if scserver is None: - LOG.debug('Server (%s) not found.', instance_name) - return scserver - - def _find_serverhba(self, instance_name, ssn): - """Hunts for a server HBA on the Dell backend by instance_name. - - Instance_name is the same as the IQN or WWN specified in the - connector. - - :param instance_name: Instance_name is a FC WWN or iSCSI IQN from - the connector. - :param ssn: Storage center to search. - :returns: Dell server HBA object. - """ - scserverhba = None - # We search for our server by first finding our HBA - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - pf.append('instanceName', instance_name) - r = self.client.post('StorageCenter/ScServerHba/GetList', pf.payload) - if self._check_result(r): - scserverhba = self._first_result(r) - return scserverhba - - def _find_domains(self, cportid): - """Find the list of Dell domain objects associated with the cportid. - - :param cportid: The Instance ID of the Dell controller port. - :returns: List of fault domains associated with this controller port. - """ - r = self.client.get('StorageCenter/ScControllerPort/%s/FaultDomainList' - % cportid) - if self._check_result(r): - domains = self._get_json(r) - return domains - - LOG.error('Error getting FaultDomainList for %s', cportid) - return None - - def _find_initiators(self, scserver): - """Returns a list of WWNs associated with the specified Dell server. - - :param scserver: The Dell backend server object. - :returns: A list of WWNs associated with this server. - """ - initiators = [] - r = self.client.get('StorageCenter/ScServer/%s/HbaList' - % self._get_id(scserver)) - if self._check_result(r): - hbas = self._get_json(r) - for hba in hbas: - wwn = hba.get('instanceName') - if (hba.get('portType') == self.protocol and - wwn is not None): - initiators.append(wwn) - else: - LOG.error('Unable to find initiators') - LOG.debug('_find_initiators: %s', initiators) - return initiators - - def get_volume_count(self, scserver): - """Returns the number of volumes attached to specified Dell server. - - :param scserver: The Dell backend server object. - :returns: Mapping count. -1 if there was an error. - """ - r = self.client.get('StorageCenter/ScServer/%s/MappingList' - % self._get_id(scserver)) - if self._check_result(r): - mappings = self._get_json(r) - return len(mappings) - # Panic mildly but do not return 0. - return -1 - - def _find_mappings(self, scvolume): - """Find the Dell volume object mappings. - - :param scvolume: Dell volume object. - :returns: A list of Dell mappings objects. - """ - mappings = [] - if scvolume.get('active', False): - r = self.client.get('StorageCenter/ScVolume/%s/MappingList' - % self._get_id(scvolume)) - if self._check_result(r): - mappings = self._get_json(r) - else: - LOG.error('_find_mappings: volume is not active') - LOG.info('Volume mappings for %(name)s: %(mappings)s', - {'name': scvolume.get('name'), - 'mappings': mappings}) - return mappings - - def _find_mapping_profiles(self, scvolume): - """Find the Dell volume object mapping profiles. - - :param scvolume: Dell volume object. - :returns: A list of Dell mapping profile objects. - """ - mapping_profiles = [] - r = self.client.get('StorageCenter/ScVolume/%s/MappingProfileList' - % self._get_id(scvolume)) - if self._check_result(r): - mapping_profiles = self._get_json(r) - else: - LOG.error('Unable to find mapping profiles: %s', - scvolume.get('name')) - LOG.debug(mapping_profiles) - return mapping_profiles - - def _find_controller_port(self, cportid): - """Finds the SC controller port object for the specified cportid. - - :param cportid: The instanceID of the Dell backend controller port. - :returns: The controller port object. - """ - controllerport = None - r = self.client.get('StorageCenter/ScControllerPort/%s' % cportid) - if self._check_result(r): - controllerport = self._first_result(r) - LOG.debug('_find_controller_port: %s', controllerport) - return controllerport - - def find_wwns(self, scvolume, scserver): - """Finds the lun and wwns of the mapped volume. - - :param scvolume: Storage Center volume object. - :param scserver: Storage Center server opbject. - :returns: Lun, wwns, initiator target map - """ - lun = None # our lun. We return the first lun. - wwns = [] # list of targets - itmap = {} # dict of initiators and the associated targets - - # Make sure we know our server's initiators. Only return - # mappings that contain HBA for this server. - initiators = self._find_initiators(scserver) - # Get our volume mappings - mappings = self._find_mappings(scvolume) - # We check each of our mappings. We want to return - # the mapping we have been configured to use. - for mapping in mappings: - # Find the controller port for this mapping - cport = mapping.get('controllerPort') - controllerport = self._find_controller_port(self._get_id(cport)) - if controllerport is not None: - # This changed case at one point or another. - # Look for both keys. - wwn = controllerport.get('wwn', controllerport.get('WWN')) - if wwn: - serverhba = mapping.get('serverHba') - if serverhba: - hbaname = serverhba.get('instanceName') - if hbaname in initiators: - if itmap.get(hbaname) is None: - itmap[hbaname] = [] - itmap[hbaname].append(wwn) - wwns.append(wwn) - mappinglun = mapping.get('lun') - if lun is None: - lun = mappinglun - elif lun != mappinglun: - LOG.warning('Inconsistent Luns.') - else: - LOG.debug('%s not found in initiator list', - hbaname) - else: - LOG.warning('_find_wwn: serverhba is None.') - else: - LOG.warning('_find_wwn: Unable to find port wwn.') - else: - LOG.warning('_find_wwn: controllerport is None.') - LOG.info('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s', - {'lun': lun, - 'wwn': wwns, - 'map': itmap}) - return lun, wwns, itmap - - def _find_active_controller(self, scvolume): - """Finds the controller on which the Dell volume is active. - - There can be more than one Dell backend controller per Storage center - but a given volume can only be active on one of them at a time. - - :param scvolume: Dell backend volume object. - :returns: Active controller ID. - """ - actvctrl = None - volconfig = self._get_volume_configuration(scvolume) - if volconfig: - controller = volconfig.get('controller') - actvctrl = self._get_id(controller) - else: - LOG.error('Unable to retrieve VolumeConfiguration: %s', - self._get_id(scvolume)) - LOG.debug('_find_active_controller: %s', actvctrl) - return actvctrl - - def _get_controller_id(self, mapping): - # The mapping lists the associated controller. - return self._get_id(mapping.get('controller')) - - def _get_domains(self, mapping): - # Return a list of domains associated with this controller port. - return self._find_domains(self._get_id(mapping.get('controllerPort'))) - - def _get_iqn(self, mapping): - # Get our iqn from the controller port listed in our our mapping. - iqn = None - cportid = self._get_id(mapping.get('controllerPort')) - controllerport = self._find_controller_port(cportid) - if controllerport: - iqn = controllerport.get('iscsiName') - LOG.debug('_get_iqn: %s', iqn) - return iqn - - def _is_virtualport_mode(self, ssn=-1): - ssn = self._vet_ssn(ssn) - isvpmode = False - r = self.client.get('StorageCenter/ScConfiguration/%s' % ssn) - if self._check_result(r): - scconfig = self._get_json(r) - if scconfig and scconfig['iscsiTransportMode'] == 'VirtualPort': - isvpmode = True - return isvpmode - - def _find_controller_port_iscsi_config(self, cportid): - """Finds the SC controller port object for the specified cportid. - - :param cportid: The instanceID of the Dell backend controller port. - :returns: The controller port object. - """ - controllerport = None - r = self.client.get( - 'StorageCenter/ScControllerPortIscsiConfiguration/%s' % cportid) - if self._check_result(r): - controllerport = self._first_result(r) - else: - LOG.error('_find_controller_port_iscsi_config: ' - 'Error finding configuration: %s', cportid) - return controllerport - - def find_iscsi_properties(self, scvolume): - """Finds target information for a given Dell scvolume object mapping. - - The data coming back is both the preferred path and all the paths. - - :param scvolume: The dell sc volume object. - :returns: iSCSI property dictionary. - :raises VolumeBackendAPIException: - """ - LOG.debug('find_iscsi_properties: scvolume: %s', scvolume) - # Our mutable process object. - pdata = {'active': -1, - 'up': -1} - # Our output lists. - portals = [] - luns = [] - iqns = [] - - # Process just looks for the best port to return. - def process(lun, iqn, address, port, status, active): - """Process this mapping information. - - :param lun: SCSI Lun. - :param iqn: iSCSI IQN address. - :param address: IP address. - :param port: IP Port number - :param readonly: Boolean indicating mapping is readonly. - :param status: String indicating mapping status. (Up is what we - are looking for.) - :param active: Boolean indicating whether this is on the active - controller or not. - :return: Nothing - """ - if self.excluded_domain_ips.count(address) == 0: - # Make sure this isn't a duplicate. - newportal = address + ':' + six.text_type(port) - for idx, portal in enumerate(portals): - if portal == newportal and iqns[idx] == iqn: - LOG.debug('Skipping duplicate portal %(ptrl)s and' - 'iqn %(iqn)s.', {'ptrl': portal, 'iqn': iqn}) - return - # It isn't in the list so process it. - portals.append(newportal) - iqns.append(iqn) - luns.append(lun) - - # We need to point to the best link. - # So state active and status up is preferred - # but we don't actually need the state to be - # up at this point. - if pdata['up'] == -1: - if active: - pdata['active'] = len(iqns) - 1 - if status == 'Up': - pdata['up'] = pdata['active'] - - # Start by getting our mappings. - mappings = self._find_mappings(scvolume) - - # We should have mappings at the time of this call but do check. - if len(mappings) > 0: - # This might not be on the current controller. - ssn = self._get_id(scvolume).split('.')[0] - # In multipath (per Liberty) we will return all paths. But - # if multipath is not set (ip and port are None) then we need - # to return a mapping from the controller on which the volume - # is active. So find that controller. - actvctrl = self._find_active_controller(scvolume) - # Two different methods are used to find our luns and portals - # depending on whether we are in virtual or legacy port mode. - isvpmode = self._is_virtualport_mode(ssn) - # Trundle through our mappings. - for mapping in mappings: - # Don't return remote sc links. - msrv = mapping.get('server') - if msrv and msrv.get('objectType') == 'ScRemoteStorageCenter': - continue - - # The lun, ro mode and status are in the mapping. - LOG.debug('find_iscsi_properties: mapping: %s', mapping) - lun = mapping.get('lun') - status = mapping.get('status') - # Get our IQN from our mapping. - iqn = self._get_iqn(mapping) - # Check if our controller ID matches our active controller ID. - isactive = True if (self._get_controller_id(mapping) == - actvctrl) else False - # If we have an IQN and are in virtual port mode. - if isvpmode and iqn: - domains = self._get_domains(mapping) - if domains: - for dom in domains: - LOG.debug('find_iscsi_properties: domain: %s', dom) - ipaddress = dom.get('targetIpv4Address', - dom.get('wellKnownIpAddress')) - portnumber = dom.get('portNumber') - # We have all our information. Process this portal. - process(lun, iqn, ipaddress, portnumber, - status, isactive) - # Else we are in legacy mode. - elif iqn: - # Need to get individual ports - cportid = self._get_id(mapping.get('controllerPort')) - # Legacy mode stuff is in the ISCSI configuration object. - cpconfig = self._find_controller_port_iscsi_config(cportid) - # This should really never fail. Things happen so if it - # does just keep moving. Return what we can. - if cpconfig: - ipaddress = cpconfig.get('ipAddress') - portnumber = cpconfig.get('portNumber') - # We have all our information. Process this portal. - process(lun, iqn, ipaddress, portnumber, - status, isactive) - - # We've gone through all our mappings. - # Make sure we found something to return. - if len(luns) == 0: - # Since we just mapped this and can't find that mapping the world - # is wrong so we raise exception. - raise exception.VolumeBackendAPIException( - data=_('Unable to find iSCSI mappings.')) - - # Make sure we point to the best portal we can. This means it is - # on the active controller and, preferably, up. If it isn't return - # what we have. - if pdata['up'] != -1: - # We found a connection that is already up. Return that. - pdata['active'] = pdata['up'] - elif pdata['active'] == -1: - # This shouldn't be able to happen. Maybe a controller went - # down in the middle of this so just return the first one and - # hope the ports are up by the time the connection is attempted. - LOG.debug('find_iscsi_properties: ' - 'Volume is not yet active on any controller.') - pdata['active'] = 0 - - # Make sure we have a good item at the top of the list. - iqns.insert(0, iqns.pop(pdata['active'])) - portals.insert(0, portals.pop(pdata['active'])) - luns.insert(0, luns.pop(pdata['active'])) - data = {'target_discovered': False, - 'target_iqn': iqns[0], - 'target_iqns': iqns, - 'target_portal': portals[0], - 'target_portals': portals, - 'target_lun': luns[0], - 'target_luns': luns - } - LOG.debug('find_iscsi_properties: %s', data) - return data - - def map_volume(self, scvolume, scserver): - """Maps the Dell backend volume object to the Dell server object. - - The check for the Dell server object existence is elsewhere; does not - create the Dell server object. - - :param scvolume: Storage Center volume object. - :param scserver: Storage Center server object. - :returns: SC mapping profile or None - """ - # Make sure we have what we think we have - serverid = self._get_id(scserver) - volumeid = self._get_id(scvolume) - if serverid is not None and volumeid is not None: - # If we have a mapping to our server return it here. - mprofiles = self._find_mapping_profiles(scvolume) - for mprofile in mprofiles: - if self._get_id(mprofile.get('server')) == serverid: - LOG.info('Volume %(vol)s already mapped to %(srv)s', - {'vol': scvolume['name'], - 'srv': scserver['name']}) - return mprofile - # No? Then map it up. - payload = {} - payload['server'] = serverid - payload['Advanced'] = {'MapToDownServerHbas': True} - r = self.client.post('StorageCenter/ScVolume/%s/MapToServer' - % volumeid, payload, True) - if self._check_result(r): - # We just return our mapping - LOG.info('Volume %(vol)s mapped to %(srv)s', - {'vol': scvolume['name'], - 'srv': scserver['name']}) - return self._first_result(r) - - # Error out - LOG.error('Unable to map %(vol)s to %(srv)s', - {'vol': scvolume['name'], - 'srv': scserver['name']}) - return None - - def unmap_volume(self, scvolume, scserver): - """Unmaps the Dell volume object from the Dell server object. - - Deletes all mappings to a Dell server object, not just the ones on - the path defined in cinder.conf. - - :param scvolume: Storage Center volume object. - :param scserver: Storage Center server object. - :returns: True or False. - """ - rtn = True - serverid = self._get_id(scserver) - volumeid = self._get_id(scvolume) - if serverid is not None and volumeid is not None: - profiles = self._find_mapping_profiles(scvolume) - for profile in profiles: - prosrv = profile.get('server') - if prosrv is not None and self._get_id(prosrv) == serverid: - r = self.client.delete('StorageCenter/ScMappingProfile/%s' - % self._get_id(profile), - async=True) - if self._check_result(r): - # Check our result in the json. - result = self._get_json(r) - # EM 15.1 and 15.2 return a boolean directly. - # 15.3 on up return it in a dict under 'result'. - if result is True or (type(result) is dict and - result.get('result')): - LOG.info( - 'Volume %(vol)s unmapped from %(srv)s', - {'vol': scvolume['name'], - 'srv': scserver['name']}) - continue - - LOG.error('Unable to unmap %(vol)s from %(srv)s', - {'vol': scvolume['name'], - 'srv': scserver['name']}) - # 1 failed unmap is as good as 100. - # Fail it and leave - rtn = False - break - # return true/false. - return rtn - - def unmap_all(self, scvolume): - volumeid = self._get_id(scvolume) - r = self.client.post('StorageCenter/ScVolume/%s/Unmap' % volumeid, - {}, True) - return self._check_result(r) - - def get_storage_usage(self): - """Gets the storage usage object from the Dell backend. - - This contains capacity and usage information for the SC. - - :returns: The SC storageusage object. - """ - storageusage = None - if self.ssn is not None: - r = self.client.get( - 'StorageCenter/StorageCenter/%s/StorageUsage' % self.ssn) - if self._check_result(r): - storageusage = self._get_json(r) - return storageusage - - def _is_active(self, scvolume): - if (scvolume.get('active') is not True or - scvolume.get('replayAllowed') is not True): - return False - return True - - def create_replay(self, scvolume, replayid, expire): - """Takes a snapshot of a volume. - - One could snap a volume before it has been activated, so activate - by mapping and unmapping to a random server and let them. This - should be a fail but the Tempest tests require it. - - :param scvolume: Volume to snapshot. - :param replayid: Name to use for the snapshot. This is a portion of - the snapshot ID as we do not have space for the - entire GUID in the replay description. - :param expire: Time in minutes before the replay expires. For most - snapshots this will be 0 (never expire) but if we are - cloning a volume we will snap it right before creating - the clone. - :returns: The Dell replay object or None. - :raises VolumeBackendAPIException: On failure to intialize volume. - """ - replay = None - if scvolume is not None: - if not self._is_active(scvolume): - self._init_volume(scvolume) - scvolume = self.get_volume(self._get_id(scvolume)) - if not self._is_active(scvolume): - raise exception.VolumeBackendAPIException( - message=( - _('Unable to create snapshot from empty volume.' - ' %s') % scvolume['name'])) - # We have a volume and it is initialized. - payload = {} - payload['description'] = replayid - payload['expireTime'] = expire - r = self.client.post('StorageCenter/ScVolume/%s/CreateReplay' - % self._get_id(scvolume), payload, True) - if self._check_result(r): - replay = self._first_result(r) - - # Quick double check. - if replay is None: - LOG.warning('Unable to create snapshot %s', replayid) - # Return replay or None. - return replay - - def find_replay(self, scvolume, replayid): - """Searches for the replay by replayid. - - replayid is stored in the replay's description attribute. - - :param scvolume: Dell volume object. - :param replayid: Name to search for. This is a portion of the - snapshot ID as we do not have space for the entire - GUID in the replay description. - :returns: Dell replay object or None. - """ - r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' - % self._get_id(scvolume)) - try: - replays = self._get_json(r) - # This will be a list. If it isn't bail - if isinstance(replays, list): - for replay in replays: - # The only place to save our information with the public - # api is the description field which isn't quite long - # enough. So we check that our description is pretty much - # the max length and we compare that to the start of - # the snapshot id. - description = replay.get('description') - if (len(description) >= 30 and - replayid.startswith(description) is True and - replay.get('markedForExpiration') is not True): - # We found our replay so return it. - return replay - except Exception: - LOG.error('Invalid ReplayList return: %s', - r) - # If we are here then we didn't find the replay so warn and leave. - LOG.warning('Unable to find snapshot %s', - replayid) - - return None - - def manage_replay(self, screplay, replayid): - """Basically renames the screplay and sets it to never expire. - - :param screplay: DellSC object. - :param replayid: New name for replay. - :return: True on success. False on fail. - """ - if screplay and replayid: - payload = {} - payload['description'] = replayid - payload['expireTime'] = 0 - r = self.client.put('StorageCenter/ScReplay/%s' % - self._get_id(screplay), payload, True) - if self._check_result(r): - return True - LOG.error('Error managing replay %s', - screplay.get('description')) - return False - - def unmanage_replay(self, screplay): - """Basically sets the expireTime - - :param screplay: DellSC object. - :return: True on success. False on fail. - """ - if screplay: - payload = {} - payload['expireTime'] = 1440 - r = self.client.put('StorageCenter/ScReplay/%s' % - self._get_id(screplay), payload, True) - if self._check_result(r): - return True - LOG.error('Error unmanaging replay %s', - screplay.get('description')) - return False - - def delete_replay(self, scvolume, replayid): - """Finds a Dell replay by replayid string and expires it. - - Once marked for expiration we do not return the replay as a snapshot - even though it might still exist. (Backend requirements.) - - :param scvolume: Dell volume object. - :param replayid: Name to search for. This is a portion of the snapshot - ID as we do not have space for the entire GUID in the - replay description. - :returns: Boolean for success or failure. - """ - ret = True - LOG.debug('Expiring replay %s', replayid) - # if we do not have the instanceid then we have to find the replay. - replay = self.find_replay(scvolume, replayid) - if replay is not None: - # expire our replay. - r = self.client.post('StorageCenter/ScReplay/%s/Expire' % - self._get_id(replay), {}, True) - ret = self._check_result(r) - # If we couldn't find it we call that a success. - return ret - - def create_view_volume(self, volname, screplay, replay_profile_string, - volume_qos, group_qos, dr_profile): - """Creates a new volume named volname from the screplay. - - :param volname: Name of new volume. This is the cinder volume ID. - :param screplay: Dell replay object from which to make a new volume. - :param replay_profile_string: Profiles to be applied to the volume - :param volume_qos: Volume QOS Profile to use. - :param group_qos: Group QOS Profile to use. - :param dr_profile: Data reduction profile to use. - :returns: Dell volume object or None. - """ - folder = self._find_volume_folder(True) - - # Find our replay_profiles. - addids, removeids = self._find_replay_profiles(replay_profile_string) - - # payload is just the volume name and folder if we have one. - payload = {} - payload['Name'] = volname - payload['Notes'] = self.notes - if folder is not None: - payload['VolumeFolder'] = self._get_id(folder) - if addids: - payload['ReplayProfileList'] = addids - # Add our Volume QOS Profile. - self._check_add_profile_payload( - payload, self._find_qos_profile(volume_qos), volume_qos, - 'VolumeQosProfile') - # Add our Group QOS Profile. - self._check_add_profile_payload( - payload, self._find_qos_profile(group_qos, True), group_qos, - 'GroupQosProfile') - r = self.client.post('StorageCenter/ScReplay/%s/CreateView' - % self._get_id(screplay), payload, True) - volume = None - if self._check_result(r): - volume = self._first_result(r) - - # If we have a dr_profile to apply we should do so now. - if dr_profile and not self.update_datareduction_profile(volume, - dr_profile): - LOG.error('Unable to apply %s to volume.', dr_profile) - volume = None - - if volume is None: - LOG.error('Unable to create volume %s from replay', volname) - - return volume - - def _expire_all_replays(self, scvolume): - # We just try to grab the replay list and then expire them. - # If this doens't work we aren't overly concerned. - r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' - % self._get_id(scvolume)) - if self._check_result(r): - replays = self._get_json(r) - # This will be a list. If it isn't bail - if isinstance(replays, list): - for replay in replays: - if not replay['active']: - # Send down an async expire. - # We don't care if this fails. - self.client.post('StorageCenter/ScReplay/%s/Expire' % - self._get_id(replay), {}, True) - - def _wait_for_cmm(self, cmm, scvolume, replayid): - # We wait for either the CMM to indicate that our copy has finished or - # for our marker replay to show up. We do this because the CMM might - # have been cleaned up by the system before we have a chance to check - # it. Great. - - # Pick our max number of loops to run AFTER the CMM has gone away and - # the time to wait between loops. - # With a 3 second wait time this will be up to a 1 minute timeout - # after the system claims to have finished. - sleep = 3 - waitforreplaymarkerloops = 20 - while waitforreplaymarkerloops >= 0: - r = self.client.get('StorageCenter/ScCopyMirrorMigrate/%s' - % self._get_id(cmm)) - if self._check_result(r): - cmm = self._get_json(r) - if cmm['state'] == 'Erred' or cmm['state'] == 'Paused': - return False - elif cmm['state'] == 'Finished': - return True - elif self.find_replay(scvolume, replayid): - return True - else: - waitforreplaymarkerloops -= 1 - eventlet.sleep(sleep) - return False - - def create_cloned_volume(self, volumename, scvolume, storage_profile, - replay_profile_list, volume_qos, group_qos, - dr_profile): - """Creates a volume named volumename from a copy of scvolume. - - :param volumename: Name of new volume. This is the cinder volume ID. - :param scvolume: Dell volume object. - :param storage_profile: Storage profile. - :param replay_profile_list: List of snapshot profiles. - :param volume_qos: Volume QOS Profile to use. - :param group_qos: Group QOS Profile to use. - :param dr_profile: Data reduction profile to use. - :returns: The new volume's Dell volume object. - :raises VolumeBackendAPIException: if error doing copy. - """ - LOG.info('create_cloned_volume: Creating %(dst)s from %(src)s', - {'dst': volumename, - 'src': scvolume['name']}) - - n = scvolume['configuredSize'].split(' ', 1) - size = int(float(n[0]) // 1073741824) - # Create our new volume. - newvol = self.create_volume( - volumename, size, storage_profile, replay_profile_list, - volume_qos, group_qos, dr_profile) - if newvol: - try: - replayid = str(uuid.uuid4()) - screplay = self.create_replay(scvolume, replayid, 60) - if not screplay: - raise exception.VolumeBackendAPIException( - message='Unable to create replay marker.') - # Copy our source. - payload = {} - payload['CopyReplays'] = True - payload['DestinationVolume'] = self._get_id(newvol) - payload['SourceVolume'] = self._get_id(scvolume) - payload['StorageCenter'] = self.ssn - payload['Priority'] = 'High' - r = self.client.post('StorageCenter/ScCopyMirrorMigrate/Copy', - payload, True) - if self._check_result(r): - cmm = self._get_json(r) - if (cmm['state'] == 'Erred' or cmm['state'] == 'Paused' or - not self._wait_for_cmm(cmm, newvol, replayid)): - raise exception.VolumeBackendAPIException( - message='ScCopyMirrorMigrate error.') - LOG.debug('create_cloned_volume: Success') - self._expire_all_replays(newvol) - return newvol - else: - raise exception.VolumeBackendAPIException( - message='ScCopyMirrorMigrate fail.') - except exception.VolumeBackendAPIException: - # It didn't. Delete the volume. - self.delete_volume(volumename, self._get_id(newvol)) - raise - # Tell the user. - LOG.error('create_cloned_volume: Unable to clone volume') - return None - - def expand_volume(self, scvolume, newsize): - """Expands scvolume to newsize GBs. - - :param scvolume: Dell volume object to be expanded. - :param newsize: The new size of the volume object. - :returns: The updated Dell volume object on success or None on failure. - """ - vol = None - payload = {} - payload['NewSize'] = '%d GB' % newsize - r = self.client.post('StorageCenter/ScVolume/%s/ExpandToSize' - % self._get_id(scvolume), payload, True) - if self._check_result(r): - vol = self._get_json(r) - # More info might be good. - if vol is not None: - LOG.debug('Volume expanded: %(name)s %(size)s', - {'name': vol['name'], - 'size': vol['configuredSize']}) - else: - LOG.error('Error expanding volume %s.', scvolume['name']) - return vol - - def rename_volume(self, scvolume, name): - """Rename scvolume to name. - - This is mostly used by update_migrated_volume. - - :param scvolume: The Dell volume object to be renamed. - :param name: The new volume name. - :returns: Boolean indicating success or failure. - """ - payload = {} - payload['Name'] = name - r = self.client.put('StorageCenter/ScVolume/%s' - % self._get_id(scvolume), - payload, True) - if self._check_result(r): - return True - - LOG.error('Error renaming volume %(original)s to %(name)s', - {'original': scvolume['name'], - 'name': name}) - return False - - def _update_profile(self, scvolume, profile, profilename, - profiletype, restname, allowprefname, - continuewithoutdefault=False): - prefs = self._get_user_preferences() - if not prefs: - return False - - if not prefs.get(allowprefname): - LOG.error('User does not have permission to change ' - '%s selection.', profiletype) - return False - - if profilename: - if not profile: - LOG.error('%(ptype)s %(pname)s was not found.', - {'ptype': profiletype, - 'pname': profilename}) - return False - else: - # Going from specific profile to the user default - profile = prefs.get(restname) - if not profile and not continuewithoutdefault: - LOG.error('Default %s was not found.', profiletype) - return False - - LOG.info('Switching volume %(vol)s to profile %(prof)s.', - {'vol': scvolume['name'], - 'prof': profile.get('name')}) - payload = {} - payload[restname] = self._get_id(profile) if profile else None - r = self.client.put('StorageCenter/ScVolumeConfiguration/%s' - % self._get_id(scvolume), payload, True) - if self._check_result(r): - return True - - LOG.error('Error changing %(ptype)s for volume ' - '%(original)s to %(name)s', - {'ptype': profiletype, - 'original': scvolume['name'], - 'name': profilename}) - return False - - def update_storage_profile(self, scvolume, storage_profile): - """Update a volume's Storage Profile. - - Changes the volume setting to use a different Storage Profile. If - storage_profile is None, will reset to the default profile for the - cinder user account. - - :param scvolume: The Storage Center volume to be updated. - :param storage_profile: The requested Storage Profile name. - :returns: True if successful, False otherwise. - """ - profile = self._find_storage_profile(storage_profile) - return self._update_profile(scvolume, profile, storage_profile, - 'Storage Profile', 'storageProfile', - 'allowStorageProfileSelection') - - def update_datareduction_profile(self, scvolume, dr_profile): - """Update a volume's Data Reduction Profile - - Changes the volume setting to use a different data reduction profile. - If dr_profile is None, will reset to the default profile for the - cinder user account. - - :param scvolume: The Storage Center volume to be updated. - :param dr_profile: The requested data reduction profile name. - :returns: True if successful, False otherwise. - """ - profile = self._find_data_reduction_profile(dr_profile) - return self._update_profile(scvolume, profile, dr_profile, - 'Data Reduction Profile', - 'dataReductionProfile', - 'allowDataReductionSelection') - - def update_qos_profile(self, scvolume, qosprofile, grouptype=False): - """Update a volume's QOS profile - - Changes the volume setting to use a different QOS Profile. - - :param scvolume: The Storage Center volume to be updated. - :param qosprofile: The requested QOS profile name. - :param grouptype: Is this a group QOS profile? - :returns: True if successful, False otherwise. - """ - profiletype = 'groupQosProfile' if grouptype else 'volumeQosProfile' - - profile = self._find_qos_profile(qosprofile, grouptype) - return self._update_profile(scvolume, profile, qosprofile, - 'Qos Profile', profiletype, - 'allowQosProfileSelection', - grouptype) - - def _get_user_preferences(self): - """Gets the preferences and defaults for this user. - - There are a set of preferences and defaults for each user on the - Storage Center. This retrieves all settings for the current account - used by Cinder. - """ - r = self.client.get('StorageCenter/StorageCenter/%s/UserPreferences' % - self.ssn) - if self._check_result(r): - return self._get_json(r) - return {} - - def _delete_server(self, scserver): - """Deletes scserver from the backend. - - Just give it a shot. If it fails it doesn't matter to cinder. This - is generally used when a create_server call fails in the middle of - creation. Cinder knows nothing of the servers objects on Dell backends - so success or failure is purely an internal thing. - - Note that we do not delete a server object in normal operation. - - :param scserver: Dell server object to delete. - :returns: Nothing. Only logs messages. - """ - LOG.debug('ScServer delete %s', self._get_id(scserver)) - if scserver.get('deleteAllowed') is True: - r = self.client.delete('StorageCenter/ScServer/%s' - % self._get_id(scserver), async=True) - if self._check_result(r): - LOG.debug('ScServer deleted.') - else: - LOG.debug('_delete_server: deleteAllowed is False.') - - def find_replay_profile(self, name): - """Finds the Dell SC replay profile object name. - - :param name: Name of the replay profile object. This is the - consistency group id. - :return: Dell SC replay profile or None. - :raises VolumeBackendAPIException: - """ - self.cg_except_on_no_support() - pf = self._get_payload_filter() - pf.append('ScSerialNumber', self.ssn) - pf.append('Name', name) - r = self.client.post('StorageCenter/ScReplayProfile/GetList', - pf.payload) - if self._check_result(r): - profilelist = self._get_json(r) - if profilelist: - if len(profilelist) > 1: - LOG.error('Multiple replay profiles under name %s', - name) - raise exception.VolumeBackendAPIException( - data=_('Multiple profiles found.')) - return profilelist[0] - return None - - def create_replay_profile(self, name): - """Creates a replay profile on the Dell SC. - - :param name: The ID of the consistency group. This will be matched to - the name on the Dell SC. - :return: SC profile or None. - """ - self.cg_except_on_no_support() - profile = self.find_replay_profile(name) - if not profile: - payload = {} - payload['StorageCenter'] = self.ssn - payload['Name'] = name - payload['Type'] = 'Consistent' - payload['Notes'] = self.notes - r = self.client.post('StorageCenter/ScReplayProfile', - payload, True) - # 201 expected. - if self._check_result(r): - profile = self._first_result(r) - return profile - - def delete_replay_profile(self, profile): - """Delete the replay profile from the Dell SC. - - :param profile: SC replay profile. - :return: Nothing. - :raises VolumeBackendAPIException: - """ - self.cg_except_on_no_support() - r = self.client.delete('StorageCenter/ScReplayProfile/%s' % - self._get_id(profile), async=True) - if self._check_result(r): - LOG.info('Profile %s has been deleted.', - profile.get('name')) - else: - # We failed due to a failure to delete an existing profile. - # This is reason to raise an exception. - LOG.error('Unable to delete profile %s.', profile.get('name')) - raise exception.VolumeBackendAPIException( - data=_('Error deleting replay profile.')) - - def _get_volume_configuration(self, scvolume): - """Get the ScVolumeConfiguration object. - - :param scvolume: The Dell SC volume object. - :return: The SCVolumeConfiguration object or None. - """ - r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration' % - self._get_id(scvolume)) - if self._check_result(r): - return self._first_result(r) - return None - - def _update_volume_profiles(self, scvolume, addid=None, removeid=None): - """Either Adds or removes the listed profile from the SC volume. - - :param scvolume: Dell SC volume object. - :param addid: Profile ID to be added to the SC volume configuration. - :param removeid: ID to be removed to the SC volume configuration. - :return: True/False on success/failure. - """ - if scvolume: - scvolumecfg = self._get_volume_configuration(scvolume) - if scvolumecfg: - profilelist = scvolumecfg.get('replayProfileList', []) - newprofilelist = [] - # Do we have one to add? Start the list with it. - if addid: - newprofilelist = [addid] - # Re-add our existing profiles. - for profile in profilelist: - profileid = self._get_id(profile) - # Make sure it isn't one we want removed and that we - # haven't already added it. (IE it isn't the addid.) - if (profileid != removeid and - newprofilelist.count(profileid) == 0): - newprofilelist.append(profileid) - # Update our volume configuration. - payload = {} - payload['ReplayProfileList'] = newprofilelist - r = self.client.put('StorageCenter/ScVolumeConfiguration/%s' % - self._get_id(scvolumecfg), payload, True) - # check result - LOG.debug('_update_volume_profiles %s : %s : %s', - self._get_id(scvolume), - profilelist, - r) - # Good return? - if self._check_result(r): - return True - return False - - def _add_cg_volumes(self, profileid, add_volumes): - """Trundles through add_volumes and adds the replay profile to them. - - :param profileid: The ID of the replay profile. - :param add_volumes: List of Dell SC volume objects that are getting - added to the consistency group. - :return: True/False on success/failure. - """ - for vol in add_volumes: - scvolume = self.find_volume(vol['id'], vol['provider_id']) - if (self._update_volume_profiles(scvolume, - addid=profileid, - removeid=None)): - LOG.info('Added %s to cg.', vol['id']) - else: - LOG.error('Failed to add %s to cg.', vol['id']) - return False - return True - - def _remove_cg_volumes(self, profileid, remove_volumes): - """Removes the replay profile from the remove_volumes list of vols. - - :param profileid: The ID of the replay profile. - :param remove_volumes: List of Dell SC volume objects that are getting - removed from the consistency group. - :return: True/False on success/failure. - """ - for vol in remove_volumes: - scvolume = self.find_volume(vol['id'], vol['provider_id']) - if (self._update_volume_profiles(scvolume, - addid=None, - removeid=profileid)): - LOG.info('Removed %s from cg.', vol['id']) - else: - LOG.error('Failed to remove %s from cg.', vol['id']) - return False - return True - - def update_cg_volumes(self, profile, add_volumes=None, - remove_volumes=None): - """Adds or removes the profile from the specified volumes - - :param profile: Dell SC replay profile object. - :param add_volumes: List of volumes we are adding to the consistency - group. (Which is to say we are adding the profile - to this list of volumes.) - :param remove_volumes: List of volumes we are removing from the - consistency group. (Which is to say we are - removing the profile from this list of volumes.) - :return: True/False on success/failure. - """ - self.cg_except_on_no_support() - ret = True - profileid = self._get_id(profile) - if add_volumes: - LOG.info('Adding volumes to cg %s.', profile['name']) - ret = self._add_cg_volumes(profileid, add_volumes) - if ret and remove_volumes: - LOG.info('Removing volumes from cg %s.', profile['name']) - ret = self._remove_cg_volumes(profileid, remove_volumes) - return ret - - def _init_cg_volumes(self, profileid): - """Gets the cg volume list and maps/unmaps the non active volumes. - - :param profileid: Replay profile identifier. - :return: Nothing - """ - r = self.client.get('StorageCenter/ScReplayProfile/%s/VolumeList' % - profileid) - if self._check_result(r): - vols = self._get_json(r) - for vol in vols: - if (vol.get('active') is not True or - vol.get('replayAllowed') is not True): - self._init_volume(vol) - - def snap_cg_replay(self, profile, replayid, expire): - """Snaps a replay of a consistency group. - - :param profile: The name of the consistency group profile. - :param replayid: The name of the replay. - :param expire: Time in mintues before a replay expires. 0 means no - expiration. - :returns: Dell SC replay object. - """ - self.cg_except_on_no_support() - if profile: - # We have to make sure these are snappable. - self._init_cg_volumes(self._get_id(profile)) - - # Succeed or fail we soldier on. - payload = {} - payload['description'] = replayid - payload['expireTime'] = expire - r = self.client.post('StorageCenter/ScReplayProfile/%s/' - 'CreateReplay' - % self._get_id(profile), payload, True) - if self._check_result(r): - LOG.info('CreateReplay success %s', replayid) - return True - - return False - - def _find_sc_cg(self, profile, replayid): - """Finds the sc consistency group that matches replayid - - :param profile: Dell profile object. - :param replayid: Name to search for. This is a portion of the - snapshot ID as we do not have space for the entire - GUID in the replay description. - :return: Consistency group object or None. - """ - self.cg_except_on_no_support() - r = self.client.get( - 'StorageCenter/ScReplayProfile/%s/ConsistencyGroupList' - % self._get_id(profile)) - if self._check_result(r): - cglist = self._get_json(r) - if cglist and isinstance(cglist, list): - for cg in cglist: - desc = cg.get('description') - if (len(desc) >= 30 and - replayid.startswith(desc) is True): - # We found our cg so return it. - return cg - return None - - def _find_cg_replays(self, profile, replayid): - """Searches for the replays that match replayid for a given profile. - - replayid is stored in the replay's description attribute. - - :param profile: Dell profile object. - :param replayid: Name to search for. This is a portion of the - snapshot ID as we do not have space for the entire - GUID in the replay description. - :returns: Dell replay object array. - """ - self.cg_except_on_no_support() - replays = [] - sccg = self._find_sc_cg(profile, replayid) - if sccg: - r = self.client.get( - 'StorageCenter/ScReplayConsistencyGroup/%s/ReplayList' - % self._get_id(sccg)) - - replays = self._get_json(r) - else: - LOG.error('Unable to locate snapshot %s', replayid) - - return replays - - def delete_cg_replay(self, profile, replayid): - """Finds a Dell cg replay by replayid string and expires it. - - Once marked for expiration we do not return the replay as a snapshot - even though it might still exist. (Backend requirements.) - - :param cg_name: Consistency Group name. This is the ReplayProfileName. - :param replayid: Name to search for. This is a portion of the snapshot - ID as we do not have space for the entire GUID in the - replay description. - :returns: Boolean for success or failure. - """ - self.cg_except_on_no_support() - LOG.debug('Expiring consistency group replay %s', replayid) - replays = self._find_cg_replays(profile, - replayid) - for replay in replays: - instanceid = self._get_id(replay) - LOG.debug('Expiring replay %s', instanceid) - r = self.client.post('StorageCenter/ScReplay/%s/Expire' - % instanceid, {}, True) - if not self._check_result(r): - return False - # We either couldn't find it or expired it. - return True - - def cg_except_on_no_support(self): - if not self.consisgroups: - msg = _('Dell API 2.1 or later required' - ' for Consistency Group support') - raise NotImplementedError(data=msg) - - @staticmethod - def size_to_gb(spacestring): - """Splits a SC size string into GB and a remainder. - - Space is returned in a string like ... - 7.38197504E8 Bytes - Need to split that apart and convert to GB. - - :param spacestring: SC size string. - :return: Size in GB and remainder in byte. - """ - try: - n = spacestring.split(' ', 1) - fgb = int(float(n[0]) // 1073741824) - frem = int(float(n[0]) % 1073741824) - return fgb, frem - - except Exception: - # We received an invalid size string. Blow up. - raise exception.VolumeBackendAPIException( - data=_('Error retrieving volume size')) - - def _import_one(self, scvolume, newname): - # Find our folder - folder = self._find_volume_folder(True) - - # If we actually have a place to put our volume create it - if folder is None: - LOG.warning('Unable to create folder %s', self.vfname) - - # Rename and move our volume. - payload = {} - payload['Name'] = newname - if folder: - payload['VolumeFolder'] = self._get_id(folder) - - r = self.client.put('StorageCenter/ScVolume/%s' % - self._get_id(scvolume), payload, True) - if self._check_result(r): - return self._get_json(r) - return None - - def manage_existing(self, newname, existing): - """Finds the volume named existing and renames it. - - This checks a few things. The volume has to exist. There can - only be one volume by that name. Since cinder manages volumes - by the GB it has to be defined on a GB boundary. - - This renames existing to newname. newname is the guid from - the cinder volume['id']. The volume is moved to the defined - cinder volume folder. - - :param newname: Name to rename the volume to. - :param existing: The existing volume dict.. - :return: scvolume. - :raises VolumeBackendAPIException, ManageExistingInvalidReference: - """ - vollist = self._get_volume_list(existing.get('source-name'), - existing.get('source-id'), - False) - count = len(vollist) - # If we found one volume with that name we can work with it. - if count == 1: - # First thing to check is if the size is something we can - # work with. - sz, rem = self.size_to_gb(vollist[0]['configuredSize']) - if rem > 0: - raise exception.VolumeBackendAPIException( - data=_('Volume size must multiple of 1 GB.')) - - # We only want to grab detached volumes. - mappings = self._find_mappings(vollist[0]) - if len(mappings) > 0: - msg = _('Volume is attached to a server. (%s)') % existing - raise exception.VolumeBackendAPIException(data=msg) - - scvolume = self._import_one(vollist[0], newname) - if scvolume: - return scvolume - - msg = _('Unable to manage volume %s') % existing - raise exception.VolumeBackendAPIException(data=msg) - elif count > 1: - raise exception.ManageExistingInvalidReference( - existing_ref=existing, reason=_('Volume not unique.')) - else: - raise exception.ManageExistingInvalidReference( - existing_ref=existing, reason=_('Volume not found.')) - - def get_unmanaged_volume_size(self, existing): - """Looks up the volume named existing and returns its size string. - - :param existing: Existing volume dict. - :return: The SC configuredSize string. - :raises ManageExistingInvalidReference: - """ - vollist = self._get_volume_list(existing.get('source-name'), - existing.get('source-id'), - False) - count = len(vollist) - # If we found one volume with that name we can work with it. - if count == 1: - sz, rem = self.size_to_gb(vollist[0]['configuredSize']) - if rem > 0: - raise exception.VolumeBackendAPIException( - data=_('Volume size must multiple of 1 GB.')) - return sz - elif count > 1: - raise exception.ManageExistingInvalidReference( - existing_ref=existing, reason=_('Volume not unique.')) - else: - raise exception.ManageExistingInvalidReference( - existing_ref=existing, reason=_('Volume not found.')) - - def unmanage(self, scvolume): - """Unmanage our volume. - - We simply rename with with a prefix of `Unmanaged_` That's it. - - :param scvolume: The Dell SC volume object. - :return: Nothing. - :raises VolumeBackendAPIException: - """ - newname = 'Unmanaged_' + scvolume['name'] - payload = {} - payload['Name'] = newname - r = self.client.put('StorageCenter/ScVolume/%s' % - self._get_id(scvolume), payload, True) - if self._check_result(r): - LOG.info('Volume %s unmanaged.', scvolume['name']) - else: - msg = _('Unable to rename volume %(existing)s to %(newname)s') % { - 'existing': scvolume['name'], - 'newname': newname} - raise exception.VolumeBackendAPIException(data=msg) - - def _find_qos(self, qosnode, ssn=-1): - """Find Dell SC QOS Node entry for replication. - - :param qosnode: Name of qosnode. - :param ssn: SSN to search on. - :return: scqos node object. - """ - ssn = self._vet_ssn(ssn) - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - pf.append('name', qosnode) - r = self.client.post('StorageCenter/ScReplicationQosNode/GetList', - pf.payload) - if self._check_result(r): - nodes = self._get_json(r) - if len(nodes) > 0: - return nodes[0] - else: - payload = {} - payload['LinkSpeed'] = '1 Gbps' - payload['Name'] = qosnode - payload['StorageCenter'] = ssn - payload['BandwidthLimited'] = False - r = self.client.post('StorageCenter/ScReplicationQosNode', - payload, True) - if self._check_result(r): - return self._get_json(r) - - LOG.error('Unable to find or create QoS Node named %s', qosnode) - raise exception.VolumeBackendAPIException( - data=_('Failed to find QoSnode')) - - def update_replicate_active_replay(self, scvolume, replactive): - """Enables or disables replicating the active replay for given vol. - - :param scvolume: SC Volume object. - :param replactive: True or False - :return: True or False - """ - r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % - self._get_id(scvolume)) - if self._check_result(r): - replications = self._get_json(r) - for replication in replications: - if replication['replicateActiveReplay'] != replactive: - payload = {'ReplicateActiveReplay': replactive} - r = self.client.put('StorageCenter/ScReplication/%s' % - replication['instanceId'], - payload, True) - if not self._check_result(r): - return False - return True - - def get_screplication(self, scvolume, destssn): - """Find the screplication object for the volume on the dest backend. - - :param scvolume: - :param destssn: - :return: - """ - LOG.debug('get_screplication') - r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % - self._get_id(scvolume)) - if self._check_result(r): - replications = self._get_json(r) - for replication in replications: - # So we need to find the replication we are looking for. - LOG.debug(replication) - LOG.debug('looking for %s', destssn) - if replication.get('destinationScSerialNumber') == destssn: - return replication - # Unable to locate replication. - LOG.warning('Unable to locate replication %(vol)s to %(ssn)s', - {'vol': scvolume.get('name'), - 'ssn': destssn}) - return None - - def delete_replication(self, scvolume, destssn, deletedestvolume=True): - """Deletes the SC replication object from scvolume to the destssn. - - :param scvolume: Dell SC Volume object. - :param destssn: SC the replication is replicating to. - :param deletedestvolume: Delete or keep dest volume. - :return: True on success. False on fail. - """ - replication = self.get_screplication(scvolume, destssn) - if replication: - payload = {} - payload['DeleteDestinationVolume'] = deletedestvolume - payload['RecycleDestinationVolume'] = deletedestvolume - payload['DeleteRestorePoint'] = True - r = self.client.delete('StorageCenter/ScReplication/%s' % - self._get_id(replication), payload=payload, - async=True) - if self._check_result(r): - # check that we whacked the dest volume - LOG.info('Replication %(vol)s to %(dest)s.', - {'vol': scvolume.get('name'), - 'dest': destssn}) - - return True - LOG.error('Unable to delete replication for ' - '%(vol)s to %(dest)s.', - {'vol': scvolume.get('name'), - 'dest': destssn}) - return False - - def _repl_name(self, name): - return self.repl_prefix + name - - def _get_disk_folder(self, ssn, foldername): - diskfolder = None - # If no folder name we just pass through this. - if foldername: - pf = self._get_payload_filter() - pf.append('scSerialNumber', ssn) - pf.append('name', foldername) - r = self.client.post('StorageCenter/ScDiskFolder/GetList', - pf.payload) - if self._check_result(r): - try: - # Go for broke. - diskfolder = self._get_json(r)[0] - except Exception: - # We just log this as an error and return nothing. - LOG.error('Unable to find ' - 'disk folder %(name)s on %(ssn)s', - {'name': foldername, - 'ssn': ssn}) - return diskfolder - - def create_replication(self, scvolume, destssn, qosnode, - synchronous, diskfolder, replicate_active): - """Create repl from scvol to destssn. - - :param scvolume: Dell SC volume object. - :param destssn: Destination SSN string. - :param qosnode: Name of Dell SC QOS Node for this replication. - :param synchronous: Boolean. - :param diskfolder: optional disk folder name. - :param replicate_active: replicate active replay. - :return: Dell SC replication object. - """ - screpl = None - ssn = self.find_sc(int(destssn)) - payload = {} - payload['DestinationStorageCenter'] = ssn - payload['QosNode'] = self._get_id(self._find_qos(qosnode)) - payload['SourceVolume'] = self._get_id(scvolume) - payload['StorageCenter'] = self.find_sc() - # Have to replicate the active replay. - payload['ReplicateActiveReplay'] = replicate_active or synchronous - if synchronous: - payload['Type'] = 'Synchronous' - # If our type is synchronous we prefer high availability be set. - payload['SyncMode'] = 'HighAvailability' - else: - payload['Type'] = 'Asynchronous' - destinationvolumeattributes = {} - destinationvolumeattributes['CreateSourceVolumeFolderPath'] = True - destinationvolumeattributes['Notes'] = self.notes - destinationvolumeattributes['Name'] = self._repl_name(scvolume['name']) - # Find our disk folder. If they haven't specified one this will just - # drop through. If they have specified one and it can't be found the - # error will be logged but this will keep going. - df = self._get_disk_folder(destssn, diskfolder) - if df: - destinationvolumeattributes['DiskFolder'] = self._get_id(df) - payload['DestinationVolumeAttributes'] = destinationvolumeattributes - r = self.client.post('StorageCenter/ScReplication', payload, True) - # 201 expected. - if self._check_result(r): - LOG.info('Replication created for %(volname)s to %(destsc)s', - {'volname': scvolume.get('name'), - 'destsc': destssn}) - screpl = self._get_json(r) - - # Check we did something. - if not screpl: - # Failed to launch. Inform user. Throw. - LOG.error('Unable to replicate %(volname)s to %(destsc)s', - {'volname': scvolume.get('name'), - 'destsc': destssn}) - return screpl - - def find_repl_volume(self, name, destssn, instance_id=None, - source=False, destination=True): - """Find our replay destination volume on the destssn. - - :param name: Name to search for. - :param destssn: Where to look for the volume. - :param instance_id: If we know our exact volume ID use that. - :param source: Replication source boolen. - :param destination: Replication destination boolean. - :return: SC Volume object or None - """ - # Do a normal volume search. - pf = self._get_payload_filter() - pf.append('scSerialNumber', destssn) - # Are we looking for a replication destination? - pf.append('ReplicationDestination', destination) - # Are we looking for a replication source? - pf.append('ReplicationSource', source) - # There is a chance we know the exact volume. If so then use that. - if instance_id: - pf.append('instanceId', instance_id) - else: - # Try the name. - pf.append('Name', name) - r = self.client.post('StorageCenter/ScVolume/GetList', - pf.payload) - if self._check_result(r): - volumes = self._get_json(r) - if len(volumes) == 1: - return volumes[0] - return None - - def remove_mappings(self, scvol): - """Peels all the mappings off of scvol. - - :param scvol: Storage Center volume object. - :return: True/False on Success/Failure. - """ - if scvol: - r = self.client.post('StorageCenter/ScVolume/%s/Unmap' % - self._get_id(scvol), {}, True) - return self._check_result(r) - return False - - def break_replication(self, volumename, instance_id, destssn): - """This just breaks the replication. - - If we find the source we just delete the replication. If the source - is down then we find the destination and unmap it. Fail pretty much - every time this goes south. - - :param volumename: Volume name is the guid from the cinder volume. - :param instance_id: Storage Center volume object instance id. - :param destssn: Destination ssn. - :return: Replication SC volume object. - """ - replinstanceid = None - scvolume = self.find_volume(volumename, instance_id) - if scvolume: - screplication = self.get_screplication(scvolume, destssn) - # if we got our replication volume we can do this nicely. - if screplication: - replinstanceid = ( - screplication['destinationVolume']['instanceId']) - screplvol = self.find_repl_volume(self._repl_name(volumename), - destssn, replinstanceid) - # delete_replication fails to delete replication without also - # stuffing it into the recycle bin. - # Instead we try to unmap the destination volume which will break - # the replication but leave the replication object on the SC. - if self.remove_mappings(screplvol): - # Try to kill mappings on the source. - # We don't care that this succeeded or failed. Just move on. - self.remove_mappings(scvolume) - - return screplvol - - def _get_replay_list(self, scvolume): - r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' - % self._get_id(scvolume)) - if self._check_result(r): - return self._get_json(r) - return [] - - def find_common_replay(self, svolume, dvolume): - """Finds the common replay between two volumes. - - This assumes that one volume was replicated from the other. This - should return the most recent replay. - - :param svolume: Source SC Volume. - :param dvolume: Destination SC Volume. - :return: Common replay or None. - """ - if svolume and dvolume: - sreplays = self._get_replay_list(svolume) - dreplays = self._get_replay_list(dvolume) - for dreplay in dreplays: - for sreplay in sreplays: - if dreplay['globalIndex'] == sreplay['globalIndex']: - return dreplay - return None - - def start_replication(self, svolume, dvolume, - replicationtype, qosnode, activereplay): - """Starts a replication between volumes. - - Requires the dvolume to be in an appropriate state to start this. - - :param svolume: Source SC Volume. - :param dvolume: Destiation SC Volume - :param replicationtype: Asynchronous or synchronous. - :param qosnode: QOS node name. - :param activereplay: Boolean to replicate the active replay or not. - :return: ScReplication object or None. - """ - if svolume and dvolume: - qos = self._find_qos(qosnode, svolume['scSerialNumber']) - if qos: - payload = {} - payload['QosNode'] = self._get_id(qos) - payload['SourceVolume'] = self._get_id(svolume) - payload['StorageCenter'] = svolume['scSerialNumber'] - # Have to replicate the active replay. - payload['ReplicateActiveReplay'] = activereplay - payload['Type'] = replicationtype - payload['DestinationVolume'] = self._get_id(dvolume) - payload['DestinationStorageCenter'] = dvolume['scSerialNumber'] - r = self.client.post('StorageCenter/ScReplication', payload, - True) - # 201 expected. - if self._check_result(r): - LOG.info('Replication created for ' - '%(src)s to %(dest)s', - {'src': svolume.get('name'), - 'dest': dvolume.get('name')}) - screpl = self._get_json(r) - return screpl - return None - - def replicate_to_common(self, svolume, dvolume, qosnode): - """Reverses a replication between two volumes. - - :param fovolume: Failed over volume. (Current) - :param ovolume: Original source volume. - :param qosnode: QOS node name to use to create the replay. - :return: ScReplication object or None. - """ - # find our common replay. - creplay = self.find_common_replay(svolume, dvolume) - # if we found one. - if creplay: - # create a view volume from the common replay. - payload = {} - # funky name. - payload['Name'] = 'fback:' + dvolume['name'] - payload['Notes'] = self.notes - payload['VolumeFolder'] = self._get_id(dvolume['volumeFolder']) - r = self.client.post('StorageCenter/ScReplay/%s/CreateView' - % self._get_id(creplay), payload, True) - if self._check_result(r): - vvolume = self._get_json(r) - if vvolume: - # snap a replay and start replicating. - if self.create_replay(svolume, 'failback', 600): - return self.start_replication(svolume, vvolume, - 'Asynchronous', qosnode, - False) - # No joy. Error the volume. - return None - - def flip_replication(self, svolume, dvolume, name, - replicationtype, qosnode, activereplay): - """Enables replication from current destination volume to source. - - :param svolume: Current source. New destination. - :param dvolume: Current destination. New source. - :param name: Volume name. - :param replicationtype: Sync or async - :param qosnode: qos node for the new source ssn. - :param activereplay: replicate the active replay. - :return: True/False. - """ - # We are flipping a replication. That means there was a replication to - # start with. Delete that. - if self.delete_replication(svolume, dvolume['scSerialNumber'], False): - # Kick off a replication going the other way. - if self.start_replication(dvolume, svolume, replicationtype, - qosnode, activereplay) is not None: - # rename - if (self.rename_volume(svolume, self._repl_name(name)) and - self.rename_volume(dvolume, name)): - return True - LOG.warning('flip_replication: Unable to replicate ' - '%(name)s from %(src)s to %(dst)s', - {'name': name, - 'src': dvolume['scSerialNumber'], - 'dst': svolume['scSerialNumber']}) - return False - - def replication_progress(self, screplid): - """Get's the current progress of the replication. - - :param screplid: instanceId of the ScReplication object. - :return: Boolean for synced, float of remaining bytes. (Or None, None.) - """ - if screplid: - r = self.client.get( - 'StorageCenter/ScReplication/%s/CurrentProgress' % screplid) - if self._check_result(r): - progress = self._get_json(r) - try: - remaining = float( - progress['amountRemaining'].split(' ', 1)[0]) - return progress['synced'], remaining - except Exception: - LOG.warning('replication_progress: Invalid replication' - ' progress information returned: %s', - progress) - return None, None - - def is_swapped(self, provider_id, sclivevolume): - if (sclivevolume.get('primaryVolume') and - sclivevolume['primaryVolume']['instanceId'] != provider_id): - LOG.debug('Volume %(pid)r in Live Volume %(lv)r is swapped.', - {'pid': provider_id, 'lv': sclivevolume}) - return True - return False - - def is_failed_over(self, provider_id, sclivevolume): - # either the secondary is active or the secondary is now our primary. - if (sclivevolume.get('secondaryRole') == 'Activated' or - self.is_swapped(provider_id, sclivevolume)): - return True - return False - - def _sc_live_volumes(self, ssn): - if ssn: - r = self.client.get('StorageCenter/StorageCenter/%s/LiveVolumeList' - % ssn) - if self._check_result(r): - return self._get_json(r) - return [] - - def _get_live_volumes(self): - # Work around for a FW bug. Instead of grabbing the entire list at - # once we have to Trundle through each SC's list. - lvs = [] - pf = self._get_payload_filter() - pf.append('connected', True) - r = self.client.post('StorageCenter/StorageCenter/GetList', - pf.payload) - if self._check_result(r): - # Should return [] if nothing there. - # Just in case do the or. - scs = self._get_json(r) or [] - for sc in scs: - lvs += self._sc_live_volumes(self._get_id(sc)) - return lvs - - def get_live_volume(self, primaryid, name=None): - """Get's the live ScLiveVolume object for the vol with primaryid. - - :param primaryid: InstanceId of the primary volume. - :param name: Volume name associated with this live volume. - :return: ScLiveVolume object or None - """ - sclivevol = None - if primaryid: - # Try from our primary SSN. This would be the authoritay on the - # Live Volume in question. - lvs = self._sc_live_volumes(primaryid.split('.')[0]) - # No, grab them all and see if we are on the secondary. - if not lvs: - lvs = self._get_live_volumes() - if lvs: - # Look for our primaryid. - for lv in lvs: - if ((lv.get('primaryVolume') and - lv['primaryVolume']['instanceId'] == primaryid) or - (lv.get('secondaryVolume') and - lv['secondaryVolume']['instanceId'] == primaryid)): - sclivevol = lv - break - # We might not be able to find the LV via the primaryid. - # So look for LVs that match our name. - if name and sclivevol is None: - # If we have a primaryVolume we will have an - # instanceName. Otherwise check the secondaryVolume - # if it exists. - if (name in lv['instanceName'] or - (lv.get('secondaryVolume') and - name in lv['secondaryVolume']['instanceName'])): - sclivevol = lv - - LOG.debug('get_live_volume: %r', sclivevol) - return sclivevol - - def _get_hbas(self, serverid): - # Helper to get the hba's of a given server. - r = self.client.get('StorageCenter/ScServer/%s/HbaList' % serverid) - if self._check_result(r): - return self._get_json(r) - return None - - def map_secondary_volume(self, sclivevol, scdestsrv): - """Map's the secondary volume or a LiveVolume to destsrv. - - :param sclivevol: ScLiveVolume object. - :param scdestsrv: ScServer object for the destination. - :return: ScMappingProfile object or None on failure. - """ - payload = {} - payload['Server'] = self._get_id(scdestsrv) - payload['Advanced'] = {'MapToDownServerHbas': True} - r = self.client.post('StorageCenter/ScLiveVolume/%s/MapSecondaryVolume' - % self._get_id(sclivevol), payload, True) - if self._check_result(r): - return self._get_json(r) - return None - - def create_live_volume(self, scvolume, remotessn, active=False, sync=False, - autofailover=False, primaryqos='CinderQOS', - secondaryqos='CinderQOS'): - """This create's a live volume instead of a replication. - - Servers are not created at this point so we cannot map up a remote - server immediately. - - :param scvolume: Source SC Volume - :param remotessn: Destination SSN. - :param active: Replicate the active replay boolean. - :param sync: Sync replication boolean. - :param autofailover: enable autofailover and failback boolean. - :param primaryqos: QOS node name for the primary side. - :param secondaryqos: QOS node name for the remote side. - :return: ScLiveVolume object or None on failure. - """ - destssn = self.find_sc(int(remotessn)) - pscqos = self._find_qos(primaryqos) - sscqos = self._find_qos(secondaryqos, destssn) - if not destssn: - LOG.error('create_live_volume: Unable to find remote %s', - remotessn) - elif not pscqos: - LOG.error('create_live_volume: Unable to find or create ' - 'qos node %s', primaryqos) - elif not sscqos: - LOG.error('create_live_volume: Unable to find or create remote' - ' qos node %(qos)s on %(ssn)s', - {'qos': secondaryqos, 'ssn': destssn}) - else: - payload = {} - payload['PrimaryVolume'] = self._get_id(scvolume) - payload['PrimaryQosNode'] = self._get_id(pscqos) - payload['SecondaryQosNode'] = self._get_id(sscqos) - payload['SecondaryStorageCenter'] = destssn - payload['StorageCenter'] = self.ssn - # payload['Dedup'] = False - payload['FailoverAutomaticallyEnabled'] = autofailover - payload['RestoreAutomaticallyEnabled'] = autofailover - payload['SwapRolesAutomaticallyEnabled'] = False - payload['ReplicateActiveReplay'] = (active or autofailover) - if sync or autofailover: - payload['Type'] = 'Synchronous' - payload['SyncMode'] = 'HighAvailability' - else: - payload['Type'] = 'Asynchronous' - secondaryvolumeattributes = {} - secondaryvolumeattributes['CreateSourceVolumeFolderPath'] = True - secondaryvolumeattributes['Notes'] = self.notes - secondaryvolumeattributes['Name'] = scvolume['name'] - payload[ - 'SecondaryVolumeAttributes'] = secondaryvolumeattributes - - r = self.client.post('StorageCenter/ScLiveVolume', payload, True) - if self._check_result(r): - LOG.info('create_live_volume: Live Volume created from' - '%(svol)s to %(ssn)s', - {'svol': self._get_id(scvolume), 'ssn': remotessn}) - return self._get_json(r) - LOG.error('create_live_volume: Failed to create Live Volume from' - '%(svol)s to %(ssn)s', - {'svol': self._get_id(scvolume), 'ssn': remotessn}) - return None - - def delete_live_volume(self, sclivevolume, deletesecondaryvolume): - """Deletes the live volume. - - :param sclivevolume: ScLiveVolume object to be whacked. - :return: Boolean on success/fail. - """ - payload = {} - payload['ConvertToReplication'] = False - payload['DeleteSecondaryVolume'] = deletesecondaryvolume - payload['RecycleSecondaryVolume'] = deletesecondaryvolume - payload['DeleteRestorePoint'] = deletesecondaryvolume - r = self.client.delete('StorageCenter/ScLiveVolume/%s' % - self._get_id(sclivevolume), payload, True) - if self._check_result(r): - return True - return False - - def swap_roles_live_volume(self, sclivevolume): - """Swap live volume roles. - - :param sclivevolume: Dell SC live volume object. - :return: True/False on success/failure. - """ - r = self.client.post('StorageCenter/ScLiveVolume/%s/SwapRoles' % - self._get_id(sclivevolume), {}, True) - if self._check_result(r): - return True - return False - - def _find_qos_profile(self, qosprofile, grouptype=False): - if qosprofile: - pf = self._get_payload_filter() - pf.append('ScSerialNumber', self.ssn) - pf.append('Name', qosprofile) - if grouptype: - pf.append('profileType', 'GroupQosProfile') - else: - pf.append('profileType', 'VolumeQosProfile') - r = self.client.post('StorageCenter/ScQosProfile/GetList', - pf.payload) - if self._check_result(r): - qosprofiles = self._get_json(r) - if len(qosprofiles): - return qosprofiles[0] - return None - - def _find_data_reduction_profile(self, drprofile): - if drprofile: - pf = self._get_payload_filter() - pf.append('ScSerialNumber', self.ssn) - pf.append('type', drprofile) - r = self.client.post( - 'StorageCenter/ScDataReductionProfile/GetList', pf.payload) - if self._check_result(r): - drps = self._get_json(r) - if len(drps): - return drps[0] - return None diff --git a/cinder/volume/drivers/dell_emc/sc/storagecenter_common.py b/cinder/volume/drivers/dell_emc/sc/storagecenter_common.py deleted file mode 100644 index 36e53feb0..000000000 --- a/cinder/volume/drivers/dell_emc/sc/storagecenter_common.py +++ /dev/null @@ -1,2013 +0,0 @@ -# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from oslo_config import cfg -from oslo_config import types -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.sc import storagecenter_api -from cinder.volume.drivers.san.san import san_opts -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -common_opts = [ - cfg.IntOpt('dell_sc_ssn', - default=64702, - help='Storage Center System Serial Number'), - cfg.PortOpt('dell_sc_api_port', - default=3033, - help='Dell API port'), - cfg.StrOpt('dell_sc_server_folder', - default='openstack', - help='Name of the server folder to use on the Storage Center'), - cfg.StrOpt('dell_sc_volume_folder', - default='openstack', - help='Name of the volume folder to use on the Storage Center'), - cfg.BoolOpt('dell_sc_verify_cert', - default=False, - help='Enable HTTPS SC certificate verification'), - cfg.StrOpt('secondary_san_ip', - default='', - help='IP address of secondary DSM controller'), - cfg.StrOpt('secondary_san_login', - default='Admin', - help='Secondary DSM user name'), - cfg.StrOpt('secondary_san_password', - default='', - help='Secondary DSM user password name', - secret=True), - cfg.PortOpt('secondary_sc_api_port', - default=3033, - help='Secondary Dell API port'), - cfg.MultiOpt('excluded_domain_ip', - item_type=types.IPAddress(), - default=None, - help='Domain IP to be excluded from iSCSI returns.'), - cfg.StrOpt('dell_server_os', - default='Red Hat Linux 6.x', - help='Server OS type to use when creating a new server on the ' - 'Storage Center.') -] - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) - - -class SCCommonDriver(driver.ManageableVD, - driver.ManageableSnapshotsVD, - driver.BaseVD): - - def __init__(self, *args, **kwargs): - super(SCCommonDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(common_opts) - self.configuration.append_config_values(san_opts) - self.backend_name =\ - self.configuration.safe_get('volume_backend_name') or 'Dell' - self.backends = self.configuration.safe_get('replication_device') - self.replication_enabled = True if self.backends else False - self.is_direct_connect = False - self.active_backend_id = kwargs.get('active_backend_id', None) - self.failed_over = True if self.active_backend_id else False - LOG.info('Loading %(name)s: Failover state is %(state)r', - {'name': self.backend_name, - 'state': self.failed_over}) - self.storage_protocol = 'iSCSI' - self.failback_timeout = 60 - - def _bytes_to_gb(self, spacestring): - """Space is returned in a string like ... - - 7.38197504E8 Bytes - Need to split that apart and convert to GB. - - :returns: gbs in int form - """ - try: - n = spacestring.split(' ', 1) - fgbs = float(n[0]) / 1073741824.0 - igbs = int(fgbs) - return igbs - except Exception: - # If any of that blew up it isn't in the format we - # thought so eat our error and return None - return None - - def do_setup(self, context): - """One time driver setup. - - Called once by the manager after the driver is loaded. - Sets up clients, check licenses, sets up protocol - specific helpers. - """ - self._client = storagecenter_api.SCApiHelper( - self.configuration, self.active_backend_id, self.storage_protocol) - - def check_for_setup_error(self): - """Validates the configuration information.""" - with self._client.open_connection() as api: - api.find_sc() - self.is_direct_connect = api.is_direct_connect - if self.is_direct_connect and self.replication_enabled: - msg = _('Dell Cinder driver configuration error replication ' - 'not supported with direct connect.') - raise exception.InvalidHost(reason=msg) - - # If we are a healthy replicated system make sure our backend - # is alive. - if self.replication_enabled and not self.failed_over: - # Check that our replication destinations are available. - for backend in self.backends: - replssn = backend['target_device_id'] - try: - # Just do a find_sc on it. If it raises we catch - # that and raise with a correct exception. - api.find_sc(int(replssn)) - except exception.VolumeBackendAPIException: - msg = _('Dell Cinder driver configuration error ' - 'replication_device %s not found') % replssn - raise exception.InvalidHost(reason=msg) - - def _get_volume_extra_specs(self, obj): - """Gets extra specs for the given object.""" - type_id = obj.get('volume_type_id') - if type_id: - return volume_types.get_volume_type_extra_specs(type_id) - - return {} - - def _add_volume_to_group(self, api, scvolume, volume): - """Just a helper to add a volume to a group. - - :param api: Dell SC API opbject. - :param scvolume: Dell SC Volume object. - :param volume: Cinder Volume object. - :returns: Nothing. - """ - if scvolume and volume.get('group_id'): - profile = api.find_replay_profile( - volume.get('group_id')) - # If there is a profile then we need to add our - # volumes to it. If there isn't then it was a normal - # group. - if profile: - api.update_cg_volumes(profile, [volume]) - - def _get_replication_specs(self, specs): - """Checks if we can do replication. - - Need the extra spec set and we have to be talking to EM. - - :param specs: Cinder Volume or snapshot extra specs. - :return: rinfo dict. - """ - rinfo = {'enabled': False, 'sync': False, - 'live': False, 'active': False, - 'autofailover': False} - # Repl does not work with direct connect. - if not self.is_direct_connect: - if (not self.failed_over and - specs.get('replication_enabled') == ' True'): - rinfo['enabled'] = True - if specs.get('replication_type') == ' sync': - rinfo['sync'] = True - if specs.get('replication:livevolume') == ' True': - rinfo['live'] = True - if specs.get('replication:livevolume:autofailover') == ' True': - rinfo['autofailover'] = True - if specs.get('replication:activereplay') == ' True': - rinfo['active'] = True - - # Some quick checks. - if rinfo['enabled']: - replication_target_count = len(self.backends) - msg = None - if replication_target_count == 0: - msg = _( - 'Replication setup failure: replication has been ' - 'enabled but no replication target has been specified ' - 'for this backend.') - if rinfo['live'] and replication_target_count != 1: - msg = _('Replication setup failure: replication:livevolume' - ' has been enabled but more than one replication ' - 'target has been specified for this backend.') - if msg: - LOG.debug(msg) - raise exception.ReplicationError(message=msg) - # Got this far. Life is good. Return our data. - return rinfo - - def _is_live_vol(self, obj): - rspecs = self._get_replication_specs(self._get_volume_extra_specs(obj)) - return rspecs['enabled'] and rspecs['live'] - - def _create_replications(self, api, volume, scvolume, extra_specs=None): - """Creates any appropriate replications for a given volume. - - :param api: Dell REST API object. - :param volume: Cinder volume object. - :param scvolume: Dell Storage Center Volume object. - :param extra_specs: Extra specs if we have them otherwise gets them - from the volume. - :return: model_update - """ - # Replication V2 - # for now we assume we have an array named backends. - replication_driver_data = None - # Replicate if we are supposed to. - if not extra_specs: - extra_specs = self._get_volume_extra_specs(volume) - rspecs = self._get_replication_specs(extra_specs) - if rspecs['enabled']: - for backend in self.backends: - targetdeviceid = backend['target_device_id'] - primaryqos = backend.get('qosnode', 'cinderqos') - secondaryqos = backend.get('remoteqos', 'cinderqos') - diskfolder = backend.get('diskfolder', None) - obj = None - if rspecs['live']: - # We are rolling with a live volume. - obj = api.create_live_volume(scvolume, targetdeviceid, - rspecs['active'], - rspecs['sync'], - rspecs['autofailover'], - primaryqos, secondaryqos) - else: - # Else a regular replication. - obj = api.create_replication(scvolume, targetdeviceid, - primaryqos, rspecs['sync'], - diskfolder, rspecs['active']) - # This is either a ScReplication object or a ScLiveVolume - # object. So long as it isn't None we are fine. - if not obj: - # Create replication will have printed a better error. - msg = _('Replication %(name)s to %(ssn)s failed.') % { - 'name': volume['id'], - 'ssn': targetdeviceid} - raise exception.VolumeBackendAPIException(data=msg) - if not replication_driver_data: - replication_driver_data = backend['target_device_id'] - else: - replication_driver_data += ',' - replication_driver_data += backend['target_device_id'] - # If we did something return model update. - model_update = {} - if replication_driver_data: - model_update = { - 'replication_status': fields.ReplicationStatus.ENABLED, - 'replication_driver_data': replication_driver_data} - return model_update - - @staticmethod - def _cleanup_failed_create_volume(api, volumename): - try: - api.delete_volume(volumename) - except exception.VolumeBackendAPIException as ex: - LOG.info('Non fatal cleanup error: %s.', ex.msg) - - def create_volume(self, volume): - """Create a volume.""" - model_update = {} - - # We use id as our name as it is unique. - volume_name = volume.get('id') - # Look for our volume - volume_size = volume.get('size') - - LOG.debug('Creating volume %(name)s of size %(size)s', - {'name': volume_name, - 'size': volume_size}) - scvolume = None - with self._client.open_connection() as api: - try: - # Get our extra specs. - specs = self._get_volume_extra_specs(volume) - scvolume = api.create_volume( - volume_name, volume_size, - specs.get('storagetype:storageprofile'), - specs.get('storagetype:replayprofiles'), - specs.get('storagetype:volumeqos'), - specs.get('storagetype:groupqos'), - specs.get('storagetype:datareductionprofile')) - if scvolume is None: - raise exception.VolumeBackendAPIException( - message=_('Unable to create volume %s') % - volume_name) - - # Update Group - self._add_volume_to_group(api, scvolume, volume) - - # Create replications. (Or not. It checks.) - model_update = self._create_replications(api, volume, scvolume) - - # Save our provider_id. - model_update['provider_id'] = scvolume['instanceId'] - - except Exception: - # if we actually created a volume but failed elsewhere - # clean up the volume now. - self._cleanup_failed_create_volume(api, volume_name) - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create volume %s', - volume_name) - if scvolume is None: - raise exception.VolumeBackendAPIException( - data=_('Unable to create volume. Backend down.')) - - return model_update - - def _split_driver_data(self, replication_driver_data): - """Splits the replication_driver_data into an array of ssn strings. - - :param replication_driver_data: A string of comma separated SSNs. - :returns: SSNs in an array of strings. - """ - ssnstrings = [] - # We have any replication_driver_data. - if replication_driver_data: - # Split the array and wiffle through the entries. - for str in replication_driver_data.split(','): - # Strip any junk from the string. - ssnstring = str.strip() - # Anything left? - if ssnstring: - # Add it to our array. - ssnstrings.append(ssnstring) - return ssnstrings - - def _delete_live_volume(self, api, volume): - """Delete live volume associated with volume. - - :param api: Dell REST API object. - :param volume: Cinder Volume object - :return: True if we actually deleted something. False for everything - else. - """ - # Live Volume was added after provider_id support. So just assume it is - # there. - replication_driver_data = volume.get('replication_driver_data') - # Do we have any replication driver data? - if replication_driver_data: - # Valid replication data? - ssnstrings = self._split_driver_data(replication_driver_data) - if ssnstrings: - ssn = int(ssnstrings[0]) - sclivevolume = api.get_live_volume(volume.get('provider_id'), - volume.get('id')) - # Have we found the live volume? - if (sclivevolume and - sclivevolume.get('secondaryScSerialNumber') == ssn and - api.delete_live_volume(sclivevolume, True)): - LOG.info('%(vname)s\'s replication live volume has ' - 'been deleted from storage Center %(sc)s,', - {'vname': volume.get('id'), - 'sc': ssn}) - return True - # If we are here either we do not have a live volume, we do not have - # one on our configured SC or we were not able to delete it. - # Either way, warn and leave. - LOG.warning('Unable to delete %s live volume.', volume.get('id')) - return False - - def _delete_replications(self, api, volume): - """Delete replications associated with a given volume. - - We should be able to roll through the replication_driver_data list - of SSNs and delete replication objects between them and the source - volume. - - :param api: Dell REST API object. - :param volume: Cinder Volume object - :return: None - """ - replication_driver_data = volume.get('replication_driver_data') - if replication_driver_data: - ssnstrings = self._split_driver_data(replication_driver_data) - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - scvol = api.find_volume(volume_name, provider_id) - # This is just a string of ssns separated by commas. - # Trundle through these and delete them all. - for ssnstring in ssnstrings: - ssn = int(ssnstring) - # Are we a replication or a live volume? - if not api.delete_replication(scvol, ssn): - LOG.warning('Unable to delete replication of Volume ' - '%(vname)s to Storage Center %(sc)s.', - {'vname': volume_name, - 'sc': ssnstring}) - # If none of that worked or there was nothing to do doesn't matter. - # Just move on. - - def delete_volume(self, volume): - deleted = False - # We use id as our name as it is unique. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - # Unless we are migrating. - if volume.get('migration_status') == 'deleting': - volume_name = volume.get('_name_id') - provider_id = None - - LOG.debug('Deleting volume %s', volume_name) - with self._client.open_connection() as api: - try: - rspecs = self._get_replication_specs( - self._get_volume_extra_specs(volume)) - if rspecs['enabled']: - if rspecs['live']: - self._delete_live_volume(api, volume) - else: - self._delete_replications(api, volume) - deleted = api.delete_volume(volume_name, provider_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to delete volume %s', - volume_name) - - # if there was an error we will have raised an - # exception. If it failed to delete it is because - # the conditions to delete a volume were not met. - if deleted is False: - raise exception.VolumeIsBusy(volume_name=volume_name) - - def create_snapshot(self, snapshot): - """Create snapshot""" - # our volume name is the volume id - volume_name = snapshot.get('volume_id') - provider_id = snapshot.volume.get('provider_id') - snapshot_id = snapshot.get('id') - LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', - {'snap': snapshot_id, - 'vol': volume_name}) - with self._client.open_connection() as api: - scvolume = api.find_volume(volume_name, provider_id, - self._is_live_vol(snapshot)) - if scvolume is not None: - replay = api.create_replay(scvolume, snapshot_id, 0) - if replay: - return {'status': fields.SnapshotStatus.AVAILABLE, - 'provider_id': scvolume['instanceId']} - else: - LOG.warning('Unable to locate volume:%s', volume_name) - - snapshot['status'] = fields.SnapshotStatus.ERROR - msg = _('Failed to create snapshot %s') % snapshot_id - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other volume's snapshot on appliance.""" - model_update = {} - scvolume = None - volume_name = volume.get('id') - src_provider_id = snapshot.get('provider_id') - src_volume_name = snapshot.get('volume_id') - # This snapshot could have been created on its own or as part of a - # cgsnapshot. If it was a cgsnapshot it will be identified on the Dell - # backend under group_snapshot_id. Given the volume ID and the - # group_snapshot_id we can find the appropriate snapshot. - # So first we look for group_snapshot_id. If that is blank then it - # must have been a normal snapshot which will be found under - # snapshot_id. - snapshot_id = snapshot.get('group_snapshot_id') - if not snapshot_id: - snapshot_id = snapshot.get('id') - LOG.debug( - 'Creating new volume %(vol)s from snapshot %(snap)s ' - 'from vol %(src)s', - {'vol': volume_name, - 'snap': snapshot_id, - 'src': src_volume_name}) - with self._client.open_connection() as api: - try: - srcvol = api.find_volume(src_volume_name, src_provider_id) - if srcvol is not None: - replay = api.find_replay(srcvol, snapshot_id) - if replay is not None: - # See if we have any extra specs. - specs = self._get_volume_extra_specs(volume) - scvolume = api.create_view_volume( - volume_name, replay, - specs.get('storagetype:replayprofiles'), - specs.get('storagetype:volumeqos'), - specs.get('storagetype:groupqos'), - specs.get('storagetype:datareductionprofile')) - - # Extend Volume - if scvolume and (volume['size'] > - snapshot["volume_size"]): - LOG.debug('Resize the new volume to %s.', - volume['size']) - scvolume = api.expand_volume(scvolume, - volume['size']) - if scvolume is None: - raise exception.VolumeBackendAPIException( - message=_('Unable to create volume ' - '%(name)s from %(snap)s.') % - {'name': volume_name, - 'snap': snapshot_id}) - - # Update Group - self._add_volume_to_group(api, scvolume, volume) - # Replicate if we are supposed to. - model_update = self._create_replications(api, - volume, - scvolume) - # Save our instanceid. - model_update['provider_id'] = ( - scvolume['instanceId']) - - except Exception: - # Clean up after ourselves. - self._cleanup_failed_create_volume(api, volume_name) - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create volume %s', volume_name) - if scvolume is not None: - LOG.debug('Volume %(vol)s created from %(snap)s', - {'vol': volume_name, - 'snap': snapshot_id}) - else: - msg = _('Failed to create volume %s') % volume_name - raise exception.VolumeBackendAPIException(data=msg) - - return model_update - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - model_update = {} - scvolume = None - src_volume_name = src_vref.get('id') - src_provider_id = src_vref.get('provider_id') - volume_name = volume.get('id') - LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s', - {'clone': volume_name, - 'vol': src_volume_name}) - with self._client.open_connection() as api: - try: - srcvol = api.find_volume(src_volume_name, src_provider_id) - if srcvol is not None: - # Get our specs. - specs = self._get_volume_extra_specs(volume) - # Create our volume - scvolume = api.create_cloned_volume( - volume_name, srcvol, - specs.get('storagetype:storageprofile'), - specs.get('storagetype:replayprofiles'), - specs.get('storagetype:volumeqos'), - specs.get('storagetype:groupqos'), - specs.get('storagetype:datareductionprofile')) - - # Extend Volume - if scvolume and volume['size'] > src_vref['size']: - LOG.debug('Resize the volume to %s.', volume['size']) - scvolume = api.expand_volume(scvolume, volume['size']) - - # If either of those didn't work we bail. - if scvolume is None: - raise exception.VolumeBackendAPIException( - message=_('Unable to create volume ' - '%(name)s from %(vol)s.') % - {'name': volume_name, - 'vol': src_volume_name}) - - # Update Group - self._add_volume_to_group(api, scvolume, volume) - # Replicate if we are supposed to. - model_update = self._create_replications(api, - volume, - scvolume) - # Save our provider_id. - model_update['provider_id'] = scvolume['instanceId'] - except Exception: - # Clean up after ourselves. - self._cleanup_failed_create_volume(api, volume_name) - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create volume %s', volume_name) - if scvolume is not None: - LOG.debug('Volume %(vol)s cloned from %(src)s', - {'vol': volume_name, - 'src': src_volume_name}) - else: - msg = _('Failed to create volume %s') % volume_name - raise exception.VolumeBackendAPIException(data=msg) - return model_update - - def delete_snapshot(self, snapshot): - """delete_snapshot""" - volume_name = snapshot.get('volume_id') - snapshot_id = snapshot.get('id') - provider_id = snapshot.get('provider_id') - LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s', - {'snap': snapshot_id, - 'vol': volume_name}) - with self._client.open_connection() as api: - scvolume = api.find_volume(volume_name, provider_id) - if scvolume and api.delete_replay(scvolume, snapshot_id): - return - # if we are here things went poorly. - snapshot['status'] = fields.SnapshotStatus.ERROR_DELETING - msg = _('Failed to delete snapshot %s') % snapshot_id - raise exception.VolumeBackendAPIException(data=msg) - - def create_export(self, context, volume, connector): - """Create an export of a volume. - - The volume exists on creation and will be visible on - initialize connection. So nothing to do here. - """ - pass - - def ensure_export(self, context, volume): - """Ensure an export of a volume. - - Per the eqlx driver we just make sure that the volume actually - exists where we think it does. - """ - scvolume = None - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - LOG.debug('Checking existence of volume %s', volume_name) - with self._client.open_connection() as api: - try: - scvolume = api.find_volume(volume_name, provider_id, - self._is_live_vol(volume)) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to ensure export of volume %s', - volume_name) - if scvolume is None: - msg = _('Unable to find volume %s') % volume_name - raise exception.VolumeBackendAPIException(data=msg) - - def remove_export(self, context, volume): - """Remove an export of a volume. - - We do nothing here to match the nothing we do in create export. Again - we do everything in initialize and terminate connection. - """ - pass - - def extend_volume(self, volume, new_size): - """Extend the size of the volume.""" - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - LOG.debug('Extending volume %(vol)s to %(size)s', - {'vol': volume_name, - 'size': new_size}) - if volume is not None: - with self._client.open_connection() as api: - scvolume = api.find_volume(volume_name, provider_id) - if api.expand_volume(scvolume, new_size) is not None: - return - # If we are here nothing good happened. - msg = _('Unable to extend volume %s') % volume_name - raise exception.VolumeBackendAPIException(data=msg) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - # Take this opportunity to report our failover state. - if self.failed_over: - LOG.debug('%(source)s has been failed over to %(dest)s', - {'source': self.backend_name, - 'dest': self.active_backend_id}) - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - with self._client.open_connection() as api: - # Static stats. - data = {} - data['volume_backend_name'] = self.backend_name - data['vendor_name'] = 'Dell EMC' - data['driver_version'] = self.VERSION - data['storage_protocol'] = self.storage_protocol - data['reserved_percentage'] = 0 - data['consistencygroup_support'] = True - data['consistent_group_snapshot_enabled'] = True - data['thin_provisioning_support'] = True - data['QoS_support'] = False - data['replication_enabled'] = self.replication_enabled - if self.replication_enabled: - data['replication_type'] = ['async', 'sync'] - data['replication_count'] = len(self.backends) - replication_targets = [] - # Trundle through our backends. - for backend in self.backends: - target_device_id = backend.get('target_device_id') - if target_device_id: - replication_targets.append(target_device_id) - data['replication_targets'] = replication_targets - - # Get our capacity. - storageusage = api.get_storage_usage() - if storageusage: - # Get actual stats. - totalcapacity = storageusage.get('availableSpace') - totalcapacitygb = self._bytes_to_gb(totalcapacity) - data['total_capacity_gb'] = totalcapacitygb - freespace = storageusage.get('freeSpace') - freespacegb = self._bytes_to_gb(freespace) - data['free_capacity_gb'] = freespacegb - else: - # Soldier on. Just return 0 for this iteration. - LOG.error('Unable to retrieve volume stats.') - data['total_capacity_gb'] = 0 - data['free_capacity_gb'] = 0 - - self._stats = data - LOG.debug('Total cap %(total)s Free cap %(free)s', - {'total': data['total_capacity_gb'], - 'free': data['free_capacity_gb']}) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - # We use id as our volume name so we need to rename the backend - # volume to the original volume name. - original_volume_name = volume.get('id') - current_name = new_volume.get('id') - # We should have this. If we don't we'll set it below. - provider_id = new_volume.get('provider_id') - LOG.debug('update_migrated_volume: %(current)s to %(original)s', - {'current': current_name, - 'original': original_volume_name}) - if original_volume_name: - with self._client.open_connection() as api: - # todo(tswanson): Delete old volume repliations/live volumes - # todo(tswanson): delete old volume? - scvolume = api.find_volume(current_name, provider_id) - if (scvolume and - api.rename_volume(scvolume, original_volume_name)): - # Replicate if we are supposed to. - model_update = self._create_replications(api, - new_volume, - scvolume) - model_update['_name_id'] = None - model_update['provider_id'] = scvolume['instanceId'] - - return model_update - # The world was horrible to us so we should error and leave. - LOG.error('Unable to rename the logical volume for volume: %s', - original_volume_name) - - return {'_name_id': new_volume['_name_id'] or new_volume['id']} - - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be created. - :returns: model_update - - model_update will be in this format: {'status': xxx, ......}. - - If the status in model_update is 'error', the manager will throw - an exception and it will be caught in the try-except block in the - manager. If the driver throws an exception, the manager will also - catch it in the try-except block. The group status in the db will - be changed to 'error'. - - For a successful operation, the driver can either build the - model_update and return it or return None. The group status will - be set to 'available'. - """ - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.ERROR} - gid = group['id'] - with self._client.open_connection() as api: - cgroup = api.create_replay_profile(gid) - if cgroup: - LOG.info('Created group %s', gid) - model_update['status'] = fields.GroupStatus.AVAILABLE - return model_update - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be deleted. - :param volumes: a list of Volume objects in the group. - :returns: model_update, volumes_model_update - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate volumes_model_update and model_update - and return them. - - The manager will check volumes_model_update and update db accordingly - for each volume. If the driver successfully deleted some volumes - but failed to delete others, it should set statuses of the volumes - accordingly so that the manager can update db correctly. - - If the status in any entry of volumes_model_update is 'error_deleting' - or 'error', the status in model_update will be set to the same if it - is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of the group will be - set to 'error' in the db. If volumes_model_update is not returned by - the driver, the manager will set the status of every volume in the - group to 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager. The statuses of the - group and all volumes in it will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and volumes_model_update and return them or - return None, None. The statuses of the group and all volumes - will be set to 'deleted' after the manager deletes them from db. - """ - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.DELETED} - with self._client.open_connection() as api: - gid = group['id'] - profile = api.find_replay_profile(gid) - if profile: - try: - api.delete_replay_profile(profile) - except exception.VolumeBackendAPIException: - LOG.error('delete_group: error deleting %s', gid) - model_update['status'] = fields.GroupStatus.ERROR - - # Trundle through the list deleting the volumes. - volumes_model_update = [] - for volume in volumes: - status = fields.GroupStatus.ERROR - try: - if self.delete_volume(volume): - status = fields.GroupStatus.DELETED - except (exception.VolumeBackendAPIException, - exception.VolumeIsBusy): - LOG.error('delete_group: error deleting volume %s', - volume['id']) - volumes_model_update.append({'id': volume['id'], 'status': status}) - - return model_update, volumes_model_update - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param context: the context of the caller. - :param group: the Group object of the group to be updated. - :param add_volumes: a list of Volume objects to be added. - :param remove_volumes: a list of Volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - model_update is a dictionary that the driver wants the manager - to update upon a successful return. If None is returned, the manager - will set the status to 'available'. - - add_volumes_update and remove_volumes_update are lists of dictionaries - that the driver wants the manager to update upon a successful return. - Note that each entry requires a {'id': xxx} so that the correct - volume entry can be updated. If None is returned, the volume will - remain its original status. Also note that you cannot directly - assign add_volumes to add_volumes_update as add_volumes is a list of - volume objects and cannot be used for db update directly. Same with - remove_volumes. - - If the driver throws an exception, the status of the group as well as - those of the volumes to be added/removed will be set to 'error'. - """ - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - with self._client.open_connection() as api: - gid = group['id'] - profile = api.find_replay_profile(gid) - if not profile: - LOG.error('update_group: Cannot find volume Group %s', gid) - elif api.update_cg_volumes(profile, - add_volumes, - remove_volumes): - LOG.info('update_group: Updated volume group %s', gid) - # we need nothing updated above us so just return None. - return None, None, None - # Things did not go well so throw. - msg = _('Unable to update group %s') % gid - raise exception.VolumeBackendAPIException(data=msg) - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of Snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of Volume objects in the source_group. - :returns: model_update, volumes_model_update - - The source can be group_snapshot or a source_group. - - param volumes is a list of objects retrieved from the db. It cannot - be assigned to volumes_model_update. volumes_model_update is a list - of dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - To be consistent with other volume operations, the manager will - assume the operation is successful if no exception is thrown by - the driver. For a successful operation, the driver can either build - the model_update and volumes_model_update and return them or - return None, None. - """ - if not (group_snapshot and snapshots and not source_group or - source_group and source_vols and not group_snapshot): - msg = _("create_group_from_src only supports a " - "group_snapshot source or a group source. " - "Multiple sources cannot be used.") - raise exception.InvalidInput(msg) - - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - # Mark us as working. If we are a CG then that will be settled below. - model_update = {'status': fields.GroupStatus.AVAILABLE} - volumes_model_update = [] - if source_group: - for volume, src_vol in zip(volumes, source_vols): - update = self.create_cloned_volume(volume, src_vol) - update['status'] = fields.GroupStatus.AVAILABLE - volumes_model_update.append(update.copy()) - else: - for volume, src_snap in zip(volumes, snapshots): - update = self.create_volume_from_snapshot(volume, src_snap) - update['status'] = fields.GroupStatus.AVAILABLE - volumes_model_update.append(update.copy()) - - # So, in theory, everything has been created. Now is the time to - # add the volumes to the group. - model_update = self.create_group(context, group) - if model_update['status'] == fields.GroupStatus.AVAILABLE: - self.update_group(context, group, volumes, None) - - return model_update, volumes_model_update - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of Snapshot objects. It cannot be assigned - to snapshots_model_update. snapshots_model_update is a list of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is 'error', the - status in model_update will be set to the same if it is not already - 'error'. - - If the status in model_update is 'error', the manager will raise an - exception and the status of group_snapshot will be set to 'error' in - the db. If snapshots_model_update is not returned by the driver, the - manager will set the status of every snapshot to 'error' in the except - block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'available' at the end of the manager function. - """ - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - model_update = {'status': fields.GroupSnapshotStatus.ERROR} - snapshot_updates = None - with self._client.open_connection() as api: - gid = group_snapshot['group_id'] - snapshotid = group_snapshot['id'] - profile = api.find_replay_profile(gid) - if not profile: - LOG.error('create_group_snapshot: profile %s not found', gid) - else: - if not api.snap_cg_replay(profile, snapshotid, 0): - LOG.error('create_group_snapshot: ' - 'failed to snap %(ss)s on %(pro)s', - {'ss': snapshotid, 'pro': profile}) - else: - LOG.info('create_group_snapshot: ' - 'created %(ss)s on %(pro)s', - {'ss': snapshotid, 'pro': profile}) - # Set our returns - model_update['status'] = ( - fields.GroupSnapshotStatus.AVAILABLE) - snapshot_updates = [] - for snapshot in snapshots: - snapshot_updates.append({ - 'id': snapshot.id, - 'status': fields.SnapshotStatus.AVAILABLE}) - - return model_update, snapshot_updates - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - - param snapshots is a list of objects. It cannot be assigned to - snapshots_model_update. snapshots_model_update is a list of of - dictionaries. It has to be built by the driver. An entry will be - in this format: {'id': xxx, 'status': xxx, ......}. model_update - will be in this format: {'status': xxx, ......}. - - The driver should populate snapshots_model_update and model_update - and return them. - - The manager will check snapshots_model_update and update db accordingly - for each snapshot. If the driver successfully deleted some snapshots - but failed to delete others, it should set statuses of the snapshots - accordingly so that the manager can update db correctly. - - If the status in any entry of snapshots_model_update is - 'error_deleting' or 'error', the status in model_update will be set to - the same if it is not already 'error_deleting' or 'error'. - - If the status in model_update is 'error_deleting' or 'error', the - manager will raise an exception and the status of group_snapshot will - be set to 'error' in the db. If snapshots_model_update is not returned - by the driver, the manager will set the status of every snapshot to - 'error' in the except block. - - If the driver raises an exception during the operation, it will be - caught by the try-except block in the manager and the statuses of - group_snapshot and all snapshots will be set to 'error'. - - For a successful operation, the driver can either build the - model_update and snapshots_model_update and return them or - return None, None. The statuses of group_snapshot and all snapshots - will be set to 'deleted' after the manager deletes them from db. - """ - # Setup a generic error return. - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - model_update = {'status': fields.GroupSnapshotStatus.ERROR} - snapshot_updates = None - with self._client.open_connection() as api: - snapshotid = group_snapshot['id'] - profile = api.find_replay_profile(group_snapshot['group_id']) - if profile: - LOG.info('delete_group_snapshot: %(ss)s from %(pro)s', - {'ss': snapshotid, 'pro': profile}) - if not api.delete_cg_replay(profile, snapshotid): - model_update['status'] = ( - fields.GroupSnapshotStatus.ERROR_DELETING) - else: - model_update['status'] = fields.GroupSnapshotStatus.DELETED - snapshot_updates = [] - for snapshot in snapshots: - snapshot_updates.append( - {'id': snapshot['id'], - 'status': fields.SnapshotStatus.DELETED}) - return model_update, snapshot_updates - - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - volume structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - 2. Place some metadata on the volume, or somewhere in the backend, that - allows other driver requests (e.g. delete, clone, attach, detach...) - to locate the backend storage object when required. - - If the existing_ref doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - - The volume may have a volume_type, and the driver can inspect that and - compare against the properties of the referenced backend storage - object. If they are incompatible, raise a - ManageExistingVolumeTypeMismatch, specifying a reason for the failure. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - if existing_ref.get('source-name') or existing_ref.get('source-id'): - with self._client.open_connection() as api: - api.manage_existing(volume['id'], existing_ref) - # Replicate if we are supposed to. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - scvolume = api.find_volume(volume_name, provider_id) - model_update = self._create_replications(api, volume, scvolume) - if model_update: - return model_update - else: - msg = _('Must specify source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - # Only return a model_update if we have replication info to add. - return None - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - if existing_ref.get('source-name') or existing_ref.get('source-id'): - with self._client.open_connection() as api: - return api.get_unmanaged_volume_size(existing_ref) - else: - msg = _('Must specify source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - For most drivers, this will not need to do anything. However, some - drivers might use this call as an opportunity to clean up any - Cinder-specific configuration that they have associated with the - backend storage object. - - :param volume: Cinder volume to unmanage - """ - with self._client.open_connection() as api: - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - scvolume = api.find_volume(volume_name, provider_id) - if scvolume: - api.unmanage(scvolume) - - def _get_retype_spec(self, diff, volume_name, specname, spectype): - """Helper function to get current and requested spec. - - :param diff: A difference dictionary. - :param volume_name: The volume name we are working with. - :param specname: The pretty name of the parameter. - :param spectype: The actual spec string. - :return: current, requested spec. - :raises VolumeBackendAPIException: - """ - spec = (diff['extra_specs'].get(spectype)) - if spec: - if len(spec) != 2: - msg = _('Unable to retype %(specname)s, expected to receive ' - 'current and requested %(spectype)s values. Value ' - 'received: %(spec)s') % {'specname': specname, - 'spectype': spectype, - 'spec': spec} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - current = spec[0] - requested = spec[1] - - if current != requested: - LOG.debug('Retyping volume %(vol)s to use %(specname)s ' - '%(spec)s.', - {'vol': volume_name, - 'specname': specname, - 'spec': requested}) - return current, requested - else: - LOG.info('Retype was to same Storage Profile.') - return None, None - - def _retype_replication(self, api, volume, scvolume, new_type, diff): - model_update = None - ret = True - # Replication. - current, requested = ( - self._get_retype_spec(diff, volume.get('id'), - 'replication_enabled', - 'replication_enabled')) - # We only toggle at the repl level. - if current != requested: - # If we are changing to on... - if requested == ' True': - # We create our replication using our new type's extra specs. - model_update = self._create_replications( - api, volume, scvolume, - new_type.get('extra_specs')) - elif current == ' True': - # If we are killing replication we have to see if we currently - # have live volume enabled or not. - if self._is_live_vol(volume): - ret = self._delete_live_volume(api, volume) - else: - self._delete_replications(api, volume) - model_update = {'replication_status': - fields.ReplicationStatus.DISABLED, - 'replication_driver_data': ''} - # TODO(tswanson): Add support for changing replication options. - return ret, model_update - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities (Not Used). - :returns: Boolean or Boolean, model_update tuple. - """ - LOG.info('retype: volume_name: %(name)s new_type: %(newtype)s ' - 'diff: %(diff)s host: %(host)s', - {'name': volume.get('id'), 'newtype': new_type, - 'diff': diff, 'host': host}) - model_update = None - # Any spec changes? - if diff['extra_specs']: - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - with self._client.open_connection() as api: - try: - # Get our volume - scvolume = api.find_volume(volume_name, provider_id) - if scvolume is None: - LOG.error('Retype unable to find volume %s.', - volume_name) - return False - # Check our specs. - # Storage profiles. - current, requested = ( - self._get_retype_spec(diff, volume_name, - 'Storage Profile', - 'storagetype:storageprofile')) - # if there is a change and it didn't work fast fail. - if (current != requested and not - api.update_storage_profile(scvolume, requested)): - LOG.error('Failed to update storage profile') - return False - - # Replay profiles. - current, requested = ( - self._get_retype_spec(diff, volume_name, - 'Replay Profiles', - 'storagetype:replayprofiles')) - # if there is a change and it didn't work fast fail. - if requested and not api.update_replay_profiles(scvolume, - requested): - LOG.error('Failed to update replay profiles') - return False - - # Volume QOS profiles. - current, requested = ( - self._get_retype_spec(diff, volume_name, - 'Volume QOS Profile', - 'storagetype:volumeqos')) - if current != requested: - if not api.update_qos_profile(scvolume, requested): - LOG.error('Failed to update volume qos profile') - - # Group QOS profiles. - current, requested = ( - self._get_retype_spec(diff, volume_name, - 'Group QOS Profile', - 'storagetype:groupqos')) - if current != requested: - if not api.update_qos_profile(scvolume, requested, - True): - LOG.error('Failed to update group qos profile') - return False - - # Data reduction profiles. - current, requested = ( - self._get_retype_spec( - diff, volume_name, 'Data Reduction Profile', - 'storagetype:datareductionprofile')) - if current != requested: - if not api.update_datareduction_profile(scvolume, - requested): - LOG.error('Failed to update data reduction ' - 'profile') - return False - - # Active Replay - current, requested = ( - self._get_retype_spec(diff, volume_name, - 'Replicate Active Replay', - 'replication:activereplay')) - if current != requested and not ( - api.update_replicate_active_replay( - scvolume, requested == ' True')): - LOG.error('Failed to apply ' - 'replication:activereplay setting') - return False - - # Deal with replication. - ret, model_update = self._retype_replication( - api, volume, scvolume, new_type, diff) - if not ret: - return False - - except exception.VolumeBackendAPIException: - # We do nothing with this. We simply return failure. - return False - # If we have something to send down... - if model_update: - return True, model_update - return True - - def _parse_secondary(self, api, secondary): - """Find the replication destination associated with secondary. - - :param api: Dell SCApi - :param secondary: String indicating the secondary to failover to. - :return: Destination SSN for the given secondary. - """ - LOG.debug('_parse_secondary. Looking for %s.', secondary) - destssn = None - # Trundle through these looking for our secondary. - for backend in self.backends: - ssnstring = backend['target_device_id'] - # If they list a secondary it has to match. - # If they do not list a secondary we return the first - # replication on a working system. - if not secondary or secondary == ssnstring: - # Is a string. Need an int. - ssn = int(ssnstring) - # Without the source being up we have no good - # way to pick a destination to failover to. So just - # look for one that is just up. - try: - # If the SC ssn exists use it. - if api.find_sc(ssn): - destssn = ssn - break - except exception.VolumeBackendAPIException: - LOG.warning('SSN %s appears to be down.', ssn) - LOG.info('replication failover secondary is %(ssn)s', - {'ssn': destssn}) - return destssn - - def _update_backend(self, active_backend_id): - # Mark for failover or undo failover. - LOG.debug('active_backend_id: %s', active_backend_id) - if active_backend_id: - self.active_backend_id = six.text_type(active_backend_id) - self.failed_over = True - else: - self.active_backend_id = None - self.failed_over = False - - self._client.active_backend_id = self.active_backend_id - - def _get_qos(self, targetssn): - # Find our QOS. - qosnode = None - for backend in self.backends: - if int(backend['target_device_id']) == targetssn: - qosnode = backend.get('qosnode', 'cinderqos') - return qosnode - - def _parse_extraspecs(self, volume): - # Digest our extra specs for replication. - extraspecs = {} - specs = self._get_volume_extra_specs(volume) - if specs.get('replication_type') == ' sync': - extraspecs['replicationtype'] = 'Synchronous' - else: - extraspecs['replicationtype'] = 'Asynchronous' - if specs.get('replication:activereplay') == ' True': - extraspecs['activereplay'] = True - else: - extraspecs['activereplay'] = False - extraspecs['storage_profile'] = specs.get('storagetype:storageprofile') - extraspecs['replay_profile_string'] = ( - specs.get('storagetype:replayprofiles')) - return extraspecs - - def _wait_for_replication(self, api, items): - # Wait for our replications to resync with their original volumes. - # We wait for completion, errors or timeout. - deadcount = 5 - lastremain = 0.0 - # The big wait loop. - while True: - # We run until all volumes are synced or in error. - done = True - currentremain = 0.0 - # Run the list. - for item in items: - # If we have one cooking. - if item['status'] == 'inprogress': - # Is it done? - synced, remain = api.replication_progress(item['screpl']) - currentremain += remain - if synced: - # It is! Get our volumes. - cvol = api.get_volume(item['cvol']) - nvol = api.get_volume(item['nvol']) - - # Flip replication. - if (cvol and nvol and api.flip_replication( - cvol, nvol, item['volume']['id'], - item['specs']['replicationtype'], - item['qosnode'], - item['specs']['activereplay'])): - # rename the original. Doesn't matter if it - # succeeded as we should have the provider_id - # of the new volume. - ovol = api.get_volume(item['ovol']) - if not ovol or not api.rename_volume( - ovol, 'org:' + ovol['name']): - # Not a reason to fail but will possibly - # cause confusion so warn. - LOG.warning('Unable to locate and rename ' - 'original volume: %s', - item['ovol']) - item['status'] = 'synced' - else: - item['status'] = 'error' - elif synced is None: - # Couldn't get info on this one. Call it baked. - item['status'] = 'error' - else: - # Miles to go before we're done. - done = False - # done? then leave. - if done: - break - - # Confirm we are or are not still making progress. - if lastremain == currentremain: - # One chance down. Warn user. - deadcount -= 1 - LOG.warning('Waiting for replications to complete. ' - 'No progress for %(timeout)d seconds. ' - 'deadcount = %(cnt)d', - {'timeout': self.failback_timeout, - 'cnt': deadcount}) - else: - # Reset - lastremain = currentremain - deadcount = 5 - - # If we've used up our 5 chances we error and log.. - if deadcount == 0: - LOG.error('Replication progress has stopped: %f remaining.', - currentremain) - for item in items: - if item['status'] == 'inprogress': - LOG.error('Failback failed for volume: %s. ' - 'Timeout waiting for replication to ' - 'sync with original volume.', - item['volume']['id']) - item['status'] = 'error' - break - # This is part of an async call so we should be good sleeping here. - # Have to balance hammering the backend for no good reason with - # the max timeout for the unit tests. Yeah, silly. - eventlet.sleep(self.failback_timeout) - - def _reattach_remaining_replications(self, api, items): - # Wiffle through our backends and reattach any remaining replication - # targets. - for item in items: - if item['status'] == 'synced': - svol = api.get_volume(item['nvol']) - # assume it went well. Will error out if not. - item['status'] = 'reattached' - # wiffle through our backends and kick off replications. - for backend in self.backends: - rssn = int(backend['target_device_id']) - if rssn != api.ssn: - rvol = api.find_repl_volume(item['volume']['id'], - rssn, None) - # if there is an old replication whack it. - api.delete_replication(svol, rssn, False) - if api.start_replication( - svol, rvol, - item['specs']['replicationtype'], - self._get_qos(rssn), - item['specs']['activereplay']): - # Save our replication_driver_data. - item['rdd'] += ',' - item['rdd'] += backend['target_device_id'] - else: - # No joy. Bail - item['status'] = 'error' - - def _fixup_types(self, api, items): - # Update our replay profiles. - for item in items: - if item['status'] == 'reattached': - # Re-apply any appropriate replay profiles. - item['status'] = 'available' - rps = item['specs']['replay_profile_string'] - if rps: - svol = api.get_volume(item['nvol']) - if not api.update_replay_profiles(svol, rps): - item['status'] = 'error' - - def _volume_updates(self, items): - # Update our volume updates. - volume_updates = [] - for item in items: - # Set our status for our replicated volumes - model_update = {'provider_id': item['nvol'], - 'replication_driver_data': item['rdd']} - # These are simple. If the volume reaches available then, - # since we were replicating it, replication status must - # be good. Else error/error. - if item['status'] == 'available': - model_update['status'] = 'available' - model_update['replication_status'] = ( - fields.ReplicationStatus.ENABLED) - else: - model_update['status'] = 'error' - model_update['replication_status'] = ( - fields.ReplicationStatus.ERROR) - volume_updates.append({'volume_id': item['volume']['id'], - 'updates': model_update}) - return volume_updates - - def _failback_replication(self, api, volume, qosnode): - """Sets up the replication failback. - - :param api: Dell SC API. - :param volume: Cinder Volume - :param qosnode: Dell QOS node object. - :return: replitem dict. - """ - LOG.info('failback_volumes: replicated volume') - # Get our current volume. - cvol = api.find_volume(volume['id'], volume['provider_id']) - # Original volume on the primary. - ovol = api.find_repl_volume(volume['id'], api.primaryssn, - None, True, False) - # Delete our current mappings. - api.remove_mappings(cvol) - # If there is a replication to delete do so. - api.delete_replication(ovol, api.ssn, False) - # Replicate to a common replay. - screpl = api.replicate_to_common(cvol, ovol, 'tempqos') - # We made it this far. Update our status. - screplid = None - status = '' - if screpl: - screplid = screpl['instanceId'] - nvolid = screpl['destinationVolume']['instanceId'] - status = 'inprogress' - else: - LOG.error('Unable to restore %s', volume['id']) - screplid = None - nvolid = None - status = 'error' - - # Save some information for the next step. - # nvol is the new volume created by replicate_to_common. - # We also grab our extra specs here. - replitem = { - 'volume': volume, - 'specs': self._parse_extraspecs(volume), - 'qosnode': qosnode, - 'screpl': screplid, - 'cvol': cvol['instanceId'], - 'ovol': ovol['instanceId'], - 'nvol': nvolid, - 'rdd': six.text_type(api.ssn), - 'status': status} - - return replitem - - def _failback_live_volume(self, api, id, provider_id): - """failback the live volume to its original - - :param api: Dell SC API - :param id: Volume ID - :param provider_id: Dell Instance ID - :return: model_update dict - """ - model_update = {} - # We do not search by name. Only failback if we have a complete - # LV object. - sclivevolume = api.get_live_volume(provider_id) - # TODO(tswanson): Check swapped state first. - if sclivevolume and api.swap_roles_live_volume(sclivevolume): - LOG.info('Success swapping sclivevolume roles %s', id) - model_update = { - 'status': 'available', - 'replication_status': fields.ReplicationStatus.ENABLED, - 'provider_id': - sclivevolume['secondaryVolume']['instanceId']} - else: - LOG.info('Failure swapping roles %s', id) - model_update = {'status': 'error'} - - return model_update - - def _finish_failback(self, api, replitems): - # Wait for replication to complete. - # This will also flip replication. - self._wait_for_replication(api, replitems) - # Replications are done. Attach to any additional replication - # backends. - self._reattach_remaining_replications(api, replitems) - self._fixup_types(api, replitems) - return self._volume_updates(replitems) - - def failback_volumes(self, volumes): - """This is a generic volume failback. - - :param volumes: List of volumes that need to be failed back. - :return: volume_updates for the list of volumes. - """ - LOG.info('failback_volumes') - with self._client.open_connection() as api: - # Get our qosnode. This is a good way to make sure the backend - # is still setup so that we can do this. - qosnode = self._get_qos(api.ssn) - if not qosnode: - raise exception.VolumeBackendAPIException( - message=_('Unable to failback. Backend is misconfigured.')) - - volume_updates = [] - replitems = [] - - # Trundle through the volumes. Update non replicated to alive again - # and reverse the replications for the remaining volumes. - for volume in volumes: - LOG.info('failback_volumes: starting volume: %s', volume) - model_update = {} - if volume.get('replication_driver_data'): - rspecs = self._get_replication_specs( - self._get_volume_extra_specs(volume)) - if rspecs['live']: - model_update = self._failback_live_volume( - api, volume['id'], volume['provider_id']) - else: - replitem = self._failback_replication(api, volume, - qosnode) - - # Save some information for the next step. - # nvol is the new volume created by - # replicate_to_common. We also grab our - # extra specs here. - replitems.append(replitem) - else: - # Not replicated. Just set it to available. - model_update = {'status': 'available'} - - # Save our update - if model_update: - volume_updates.append({'volume_id': volume['id'], - 'updates': model_update}) - # Let's do up to 5 replications at once. - if len(replitems) == 5: - volume_updates += self._finish_failback(api, replitems) - replitems = [] - - # Finish any leftover items - if replitems: - volume_updates += self._finish_failback(api, replitems) - - # Set us back to a happy state. - # The only way this doesn't happen is if the primary is down. - self._update_backend(None) - return volume_updates - - def _failover_replication(self, api, id, provider_id, destssn): - rvol = api.break_replication(id, provider_id, destssn) - model_update = {} - if rvol: - LOG.info('Success failing over volume %s', id) - model_update = {'replication_status': - fields.ReplicationStatus.FAILED_OVER, - 'provider_id': rvol['instanceId']} - else: - LOG.info('Failed failing over volume %s', id) - model_update = {'status': 'error'} - - return model_update - - def _failover_live_volume(self, api, id, provider_id): - model_update = {} - # Search for volume by id if we have to. - sclivevolume = api.get_live_volume(provider_id, id) - if sclivevolume: - swapped = api.is_swapped(provider_id, sclivevolume) - # If we aren't swapped try it. If fail error out. - if not swapped and not api.swap_roles_live_volume(sclivevolume): - LOG.info('Failure swapping roles %s', id) - model_update = {'status': 'error'} - return model_update - - LOG.info('Success swapping sclivevolume roles %s', id) - sclivevolume = api.get_live_volume(provider_id) - model_update = { - 'replication_status': - fields.ReplicationStatus.FAILED_OVER, - 'provider_id': - sclivevolume['primaryVolume']['instanceId']} - - # Error and leave. - return model_update - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover to secondary. - - :param context: security context - :param secondary_id: Specifies rep target to fail over to - :param volumes: List of volumes serviced by this backend. - :returns: destssn, volume_updates data structure - - Example volume_updates data structure: - - .. code-block:: json - - [{'volume_id': , - 'updates': {'provider_id': 8, - 'replication_status': 'failed-over', - 'replication_extended_status': 'whatever',...}},] - """ - LOG.debug('failover-host') - LOG.debug(self.failed_over) - LOG.debug(self.active_backend_id) - LOG.debug(self.replication_enabled) - if self.failed_over: - if secondary_id == 'default': - LOG.debug('failing back') - return 'default', self.failback_volumes(volumes), [] - raise exception.InvalidReplicationTarget( - reason=_('Already failed over')) - - LOG.info('Failing backend to %s', secondary_id) - # basic check - if self.replication_enabled: - with self._client.open_connection() as api: - # Look for the specified secondary. - destssn = self._parse_secondary(api, secondary_id) - if destssn: - # We roll through trying to break replications. - # Is failing here a complete failure of failover? - volume_updates = [] - for volume in volumes: - model_update = {} - if volume.get('replication_driver_data'): - rspecs = self._get_replication_specs( - self._get_volume_extra_specs(volume)) - if rspecs['live']: - model_update = self._failover_live_volume( - api, volume['id'], - volume.get('provider_id')) - else: - model_update = self._failover_replication( - api, volume['id'], - volume.get('provider_id'), destssn) - else: - # Not a replicated volume. Try to unmap it. - scvolume = api.find_volume( - volume['id'], volume.get('provider_id')) - api.remove_mappings(scvolume) - model_update = {'status': 'error'} - # Either we are failed over or our status is now error. - volume_updates.append({'volume_id': volume['id'], - 'updates': model_update}) - - # this is it. - self._update_backend(destssn) - LOG.debug('after update backend') - LOG.debug(self.failed_over) - LOG.debug(self.active_backend_id) - LOG.debug(self.replication_enabled) - return destssn, volume_updates, [] - else: - raise exception.InvalidReplicationTarget(reason=( - _('replication_failover failed. %s not found.') % - secondary_id)) - # I don't think we should ever get here. - raise exception.VolumeBackendAPIException(message=( - _('replication_failover failed. ' - 'Backend not configured for failover'))) - - def _get_unmanaged_replay(self, api, volume_name, provider_id, - existing_ref): - replay_name = None - if existing_ref: - replay_name = existing_ref.get('source-name') - if not replay_name: - msg = _('_get_unmanaged_replay: Must specify source-name.') - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - # Find our volume. - scvolume = api.find_volume(volume_name, provider_id) - if not scvolume: - # Didn't find it. - msg = (_('_get_unmanaged_replay: Cannot find volume id %s') - % volume_name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - # Find our replay. - screplay = api.find_replay(scvolume, replay_name) - if not screplay: - # Didn't find it. Reference must be invalid. - msg = (_('_get_unmanaged_replay: Cannot ' - 'find snapshot named %s') % replay_name) - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - return screplay - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - snapshot structure. - - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the - snapshot['name'] which is how drivers traditionally map between a - cinder snapshot and the associated backend storage object. - - 2. Place some metadata on the snapshot, or somewhere in the backend, - that allows other driver requests (e.g. delete) to locate the - backend storage object when required. - - If the existing_ref doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - """ - with self._client.open_connection() as api: - # Find our unmanaged snapshot. This will raise on error. - volume_name = snapshot.get('volume_id') - provider_id = snapshot.get('provider_id') - snapshot_id = snapshot.get('id') - screplay = self._get_unmanaged_replay(api, volume_name, - provider_id, existing_ref) - # Manage means update description and update expiration. - if not api.manage_replay(screplay, snapshot_id): - # That didn't work. Error. - msg = (_('manage_existing_snapshot: Error managing ' - 'existing replay %(ss)s on volume %(vol)s') % - {'ss': screplay.get('description'), - 'vol': volume_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Life is good. Let the world know what we've done. - LOG.info('manage_existing_snapshot: snapshot %(exist)s on ' - 'volume %(volume)s has been renamed to %(id)s and is ' - 'now managed by Cinder.', - {'exist': screplay.get('description'), - 'volume': volume_name, - 'id': snapshot_id}) - return {'provider_id': screplay['createVolume']['instanceId']} - - # NOTE: Can't use abstractmethod before all drivers implement it - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of snapshot to be managed by manage_existing. - - When calculating the size, round up to the next GB. - """ - volume_name = snapshot.get('volume_id') - provider_id = snapshot.get('provider_id') - with self._client.open_connection() as api: - screplay = self._get_unmanaged_replay(api, volume_name, - provider_id, existing_ref) - sz, rem = storagecenter_api.SCApi.size_to_gb( - screplay['size']) - if rem > 0: - raise exception.VolumeBackendAPIException( - data=_('Volume size must be a multiple of 1 GB.')) - return sz - - # NOTE: Can't use abstractmethod before all drivers implement it - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - NOTE: We do set the expire countdown to 1 day. Once a snapshot is - unmanaged it will expire 24 hours later. - """ - with self._client.open_connection() as api: - snapshot_id = snapshot.get('id') - # provider_id is the snapshot's parent volume's instanceId. - provider_id = snapshot.get('provider_id') - volume_name = snapshot.get('volume_id') - # Find our volume. - scvolume = api.find_volume(volume_name, provider_id) - if not scvolume: - # Didn't find it. - msg = (_('unmanage_snapshot: Cannot find volume id %s') - % volume_name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - # Find our replay. - screplay = api.find_replay(scvolume, snapshot_id) - if not screplay: - # Didn't find it. Reference must be invalid. - msg = (_('unmanage_snapshot: Cannot find snapshot named %s') - % snapshot_id) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - # Free our snapshot. - api.unmanage_replay(screplay) - # Do not check our result. - - def thaw_backend(self, context): - """Notify the backend that it's unfrozen/thawed. - - This is a gate. We do not allow the backend to be thawed if - it is still failed over. - - :param context: security context - :response: True on success - :raises Invalid: if it cannot be thawed. - """ - # We shouldn't be called if we are not failed over. - if self.failed_over: - msg = _('The Dell SC array does not support thawing a failed over' - ' replication. Please migrate volumes to an operational ' - 'back-end or resolve primary system issues and ' - 'fail back to reenable full functionality.') - LOG.error(msg) - raise exception.Invalid(reason=msg) - - return True diff --git a/cinder/volume/drivers/dell_emc/sc/storagecenter_fc.py b/cinder/volume/drivers/dell_emc/sc/storagecenter_fc.py deleted file mode 100644 index 191c40b0d..000000000 --- a/cinder/volume/drivers/dell_emc/sc/storagecenter_fc.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Volume driver for Dell Storage Center.""" - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.sc import storagecenter_common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class SCFCDriver(storagecenter_common.SCCommonDriver, - driver.FibreChannelDriver): - - """Implements commands for Dell Storage Center FC management. - - To enable the driver add the following line to the cinder configuration: - volume_driver=cinder.volume.drivers.dell_emc.sc.dell_storagecenter_fc.\ - SCFCDriver - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Added extra spec support for Storage Profile selection - 1.2.0 - Added consistency group support. - 2.0.0 - Switched to inheriting functional objects rather than volume - driver. - 2.1.0 - Added support for ManageableVD. - 2.2.0 - Driver retype support for switching volume's Storage Profile - 2.3.0 - Added Legacy Port Mode Support - 2.3.1 - Updated error handling. - 2.4.0 - Added Replication V2 support. - 2.4.1 - Updated Replication support to V2.1. - 2.5.0 - ManageableSnapshotsVD implemented. - 3.0.0 - ProviderID utilized. - 3.1.0 - Failback supported. - 3.2.0 - Live Volume support. - 3.3.0 - Support for a secondary DSM. - 3.4.0 - Support for excluding a domain. - 3.5.0 - Support for AFO. - 3.6.0 - Server type support. - 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. - 4.0.0 - Driver moved to dell_emc. - - """ - - VERSION = '4.0.0' - - CI_WIKI_NAME = "Dell_Storage_CI" - - def __init__(self, *args, **kwargs): - super(SCFCDriver, self).__init__(*args, **kwargs) - self.backend_name =\ - self.configuration.safe_get('volume_backend_name') or 'Dell-FC' - self.storage_protocol = 'FC' - - def validate_connector(self, connector): - """Fail if connector doesn't contain all the data needed by driver. - - Do a check on the connector and ensure that it has wwnns, wwpns. - """ - self.validate_connector_has_setting(connector, 'wwpns') - self.validate_connector_has_setting(connector, 'wwnns') - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - """ - - # We use id to name the volume name as it is a - # known unique name. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - islivevol = self._is_live_vol(volume) - LOG.debug('Initialize connection: %s', volume_name) - with self._client.open_connection() as api: - try: - wwpns = connector.get('wwpns') - # Find the volume on the storage center. Note that if this - # is live volume and we are swapped this will be the back - # half of the live volume. - scvolume = api.find_volume(volume_name, provider_id, islivevol) - if scvolume: - # Get the SSN it is on. - ssn = scvolume['instanceId'].split('.')[0] - # Find our server. - scserver = self._find_server(api, wwpns, ssn) - - # No? Create it. - if scserver is None: - scserver = api.create_server( - wwpns, self.configuration.dell_server_os, ssn) - # We have a volume and a server. Map them. - if scserver is not None: - mapping = api.map_volume(scvolume, scserver) - if mapping is not None: - # Since we just mapped our volume we had - # best update our sc volume object. - scvolume = api.get_volume(scvolume['instanceId']) - lun, targets, init_targ_map = api.find_wwns( - scvolume, scserver) - - # Do we have extra live volume work? - if islivevol: - # Get our live volume. - sclivevolume = api.get_live_volume(provider_id) - # Do not map to a failed over volume. - if (sclivevolume and not - api.is_failed_over(provider_id, - sclivevolume)): - # Now map our secondary. - lvlun, lvtargets, lvinit_targ_map = ( - self.initialize_secondary(api, - sclivevolume, - wwpns)) - # Unmapped. Add info to our list. - targets += lvtargets - init_targ_map.update(lvinit_targ_map) - - # Roll up our return data. - if lun is not None and len(targets) > 0: - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': lun, - 'target_discovered': True, - 'target_wwn': targets, - 'initiator_target_map': - init_targ_map, - 'discard': True}} - LOG.debug('Return FC data: %s', data) - return data - LOG.error('Lun mapping returned null!') - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to initialize connection.') - - # We get here because our mapping is none so blow up. - raise exception.VolumeBackendAPIException( - data=_('Unable to map volume.')) - - def _find_server(self, api, wwns, ssn=-1): - for wwn in wwns: - scserver = api.find_server(wwn, ssn) - if scserver is not None: - return scserver - return None - - def initialize_secondary(self, api, sclivevolume, wwns): - """Initialize the secondary connection of a live volume pair. - - :param api: Dell SC api object. - :param sclivevolume: Dell SC live volume object. - :param wwns: Cinder list of wwns from the connector. - :return: lun, targets and initiator target map. - """ - # Find our server. - secondary = self._find_server( - api, wwns, sclivevolume['secondaryScSerialNumber']) - - # No? Create it. - if secondary is None: - secondary = api.create_server( - wwns, self.configuration.dell_server_os, - sclivevolume['secondaryScSerialNumber']) - if secondary: - if api.map_secondary_volume(sclivevolume, secondary): - # Get mappings. - secondaryvol = api.get_volume( - sclivevolume['secondaryVolume']['instanceId']) - if secondaryvol: - return api.find_wwns(secondaryvol, secondary) - LOG.warning('Unable to map live volume secondary volume' - ' %(vol)s to secondary server wwns: %(wwns)r', - {'vol': sclivevolume['secondaryVolume']['instanceName'], - 'wwns': wwns}) - return None, [], {} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, force=False, **kwargs): - # Grab some quick info. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - LOG.debug('Terminate connection: %s', volume_name) - - with self._client.open_connection() as api: - try: - wwpns = [] if not connector else connector.get('wwpns', []) - # Find the volume on the storage center. - islivevol = self._is_live_vol(volume) - scvolume = api.find_volume(volume_name, provider_id, islivevol) - if scvolume: - # Get the SSN it is on. - ssn = scvolume['instanceId'].split('.')[0] - - # Will be None if we have no wwpns. - scserver = self._find_server(api, wwpns, ssn) - - # Get our target map so we can return it to free up a zone. - lun, targets, init_targ_map = api.find_wwns(scvolume, - scserver) - - # Do we have extra live volume work? - if islivevol: - # Get our live volume. - sclivevolume = api.get_live_volume(provider_id) - # Do not map to a failed over volume. - if (sclivevolume and not - api.is_failed_over(provider_id, - sclivevolume)): - lvlun, lvtargets, lvinit_targ_map = ( - self.terminate_secondary( - api, sclivevolume, wwpns)) - # Add to our return. - if lvlun: - targets += lvtargets - init_targ_map.update(lvinit_targ_map) - - if (wwpns and scserver and - api.unmap_volume(scvolume, scserver) is True): - LOG.debug('Connection terminated') - elif not wwpns and api.unmap_all(scvolume): - LOG.debug('All connections terminated') - else: - raise exception.VolumeBackendAPIException( - data=_('Terminate connection failed')) - - # basic return info... - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - # if not then we return the target map so that - # the zone can be freed up. - if scserver and api.get_volume_count(scserver) == 0: - info['data'] = {'target_wwn': targets, - 'initiator_target_map': init_targ_map} - return info - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to terminate connection') - raise exception.VolumeBackendAPIException( - data=_('Terminate connection unable to connect to backend.')) - - def terminate_secondary(self, api, sclivevolume, wwns): - lun = None - targets = [] - init_targ_map = {} - # Get our volume. - secondaryvol = api.get_volume( - sclivevolume['secondaryVolume']['instanceId']) - # We have one so let's get to work. - if secondaryvol: - # Are we unmapping a specific server? - if wwns: - # Find our server. - secondary = self._find_server( - api, wwns, sclivevolume['secondaryScSerialNumber']) - # Get our map. - lun, targets, init_targ_map = api.find_wwns(secondaryvol, - secondary) - # If we have a server and a volume lets unmap them. - ret = api.unmap_volume(secondaryvol, secondary) - LOG.debug('terminate_secondary: ' - 'secondary volume %(name)s unmap ' - 'to secondary server %(server)s result: %(result)r', - {'name': secondaryvol['name'], - 'server': secondary['name'], 'result': ret}) - else: - # Just unmap all. - ret = api.unmap_all(secondaryvol) - LOG.debug('terminate_secondary: secondary volume %(name)s ' - 'unmap all result: %(result)r', - {'name': secondaryvol['name'], 'result': ret}) - else: - LOG.debug('terminate_secondary: secondary volume not found.') - # return info if any - return lun, targets, init_targ_map diff --git a/cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py b/cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py deleted file mode 100644 index 994bd5235..000000000 --- a/cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Volume driver for Dell Storage Center.""" - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.sc import storagecenter_common - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class SCISCSIDriver(storagecenter_common.SCCommonDriver, - driver.ISCSIDriver): - - """Implements commands for Dell Storage Center ISCSI management. - - To enable the driver add the following line to the cinder configuration: - volume_driver=cinder.volume.drivers.dell_emc.sc.\ - dell_storagecenter_iscsi.SCISCSIDriver - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Added extra spec support for Storage Profile selection - 1.2.0 - Added consistency group support. - 2.0.0 - Switched to inheriting functional objects rather than volume - driver. - 2.1.0 - Added support for ManageableVD. - 2.2.0 - Driver retype support for switching volume's Storage Profile. - Added API 2.2 support. - 2.3.0 - Added Legacy Port Mode Support - 2.3.1 - Updated error handling. - 2.4.0 - Added Replication V2 support. - 2.4.1 - Updated Replication support to V2.1. - 2.5.0 - ManageableSnapshotsVD implemented. - 3.0.0 - ProviderID utilized. - 3.1.0 - Failback Supported. - 3.2.0 - Live Volume support. - 3.3.0 - Support for a secondary DSM. - 3.4.0 - Support for excluding a domain. - 3.5.0 - Support for AFO. - 3.6.0 - Server type support. - 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. - 4.0.0 - Driver moved to dell_emc. - - """ - - VERSION = '4.0.0' - CI_WIKI_NAME = "Dell_Storage_CI" - - def __init__(self, *args, **kwargs): - super(SCISCSIDriver, self).__init__(*args, **kwargs) - self.backend_name = ( - self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI') - - def initialize_connection(self, volume, connector): - # Initialize_connection will find or create a server identified by the - # connector on the Dell backend. It will then map the volume to it - # and return the properties as follows.. - # {'driver_volume_type': 'iscsi', - # data = {'target_discovered': False, - # 'target_iqn': preferred iqn, - # 'target_iqns': all iqns, - # 'target_portal': preferred portal, - # 'target_portals': all portals, - # 'target_lun': preferred lun, - # 'target_luns': all luns, - # } - - # We use id to name the volume name as it is a - # known unique name. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - islivevol = self._is_live_vol(volume) - initiator_name = connector.get('initiator') - multipath = connector.get('multipath', False) - LOG.info('initialize_ connection: %(vol)s:%(pid)s:' - '%(intr)s. Multipath is %(mp)r', - {'vol': volume_name, - 'pid': provider_id, - 'intr': initiator_name, - 'mp': multipath}) - - with self._client.open_connection() as api: - try: - # Find the volume on the storage center. Note that if this - # is live volume and we are swapped this will be the back - # half of the live volume. - scvolume = api.find_volume(volume_name, provider_id, islivevol) - if scvolume: - # Get the SSN it is on. - ssn = scvolume['instanceId'].split('.')[0] - # Find our server. - scserver = api.find_server(initiator_name, ssn) - # No? Create it. - if scserver is None: - scserver = api.create_server( - [initiator_name], - self.configuration.dell_server_os, ssn) - - # if we have a server and a volume lets bring them - # together. - if scserver is not None: - mapping = api.map_volume(scvolume, scserver) - if mapping is not None: - # Since we just mapped our volume we had best - # update our sc volume object. - scvolume = api.get_volume(scvolume['instanceId']) - # Our return. - iscsiprops = {} - - # Three cases that should all be satisfied with the - # same return of Target_Portal and Target_Portals. - # 1. Nova is calling us so we need to return the - # Target_Portal stuff. It should ignore the - # Target_Portals stuff. - # 2. OS brick is calling us in multipath mode so we - # want to return Target_Portals. It will ignore - # the Target_Portal stuff. - # 3. OS brick is calling us in single path mode so - # we want to return Target_Portal and - # Target_Portals as alternates. - iscsiprops = api.find_iscsi_properties(scvolume) - - # If this is a live volume we need to map up our - # secondary volume. Note that if we have failed - # over we do not wish to do this. - if islivevol: - sclivevolume = api.get_live_volume(provider_id) - # Only map if we are not failed over. - if (sclivevolume and not - api.is_failed_over(provider_id, - sclivevolume)): - secondaryprops = self.initialize_secondary( - api, sclivevolume, initiator_name) - # Combine with iscsiprops - iscsiprops['target_iqns'] += ( - secondaryprops['target_iqns']) - iscsiprops['target_portals'] += ( - secondaryprops['target_portals']) - iscsiprops['target_luns'] += ( - secondaryprops['target_luns']) - - # Return our iscsi properties. - iscsiprops['discard'] = True - return {'driver_volume_type': 'iscsi', - 'data': iscsiprops} - # Re-raise any backend exception. - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to initialize connection') - # If there is a data structure issue then detail the exception - # and bail with a Backend Exception. - except Exception as error: - LOG.error(error) - raise exception.VolumeBackendAPIException(error) - - # We get here because our mapping is none or we have no valid iqn to - # return so blow up. - raise exception.VolumeBackendAPIException( - _('Unable to map volume')) - - def initialize_secondary(self, api, sclivevolume, initiatorname): - """Initialize the secondary connection of a live volume pair. - - :param api: Dell SC api. - :param sclivevolume: Dell SC live volume object. - :param initiatorname: Cinder iscsi initiator from the connector. - :return: ISCSI properties. - """ - - # Find our server. - secondary = api.find_server(initiatorname, - sclivevolume['secondaryScSerialNumber']) - # No? Create it. - if secondary is None: - secondary = api.create_server( - [initiatorname], self.configuration.dell_server_os, - sclivevolume['secondaryScSerialNumber']) - if secondary: - if api.map_secondary_volume(sclivevolume, secondary): - # Get our volume and get our properties. - secondaryvol = api.get_volume( - sclivevolume['secondaryVolume']['instanceId']) - if secondaryvol: - return api.find_iscsi_properties(secondaryvol) - # Dummy return on failure. - data = {'target_discovered': False, - 'target_iqn': None, - 'target_iqns': [], - 'target_portal': None, - 'target_portals': [], - 'target_lun': None, - 'target_luns': [], - } - LOG.warning('Unable to map live volume secondary volume' - ' %(vol)s to secondary server intiator: %(init)r', - {'vol': sclivevolume['secondaryVolume']['instanceName'], - 'init': initiatorname}) - return data - - def terminate_connection(self, volume, connector, force=False, **kwargs): - # Grab some quick info. - volume_name = volume.get('id') - provider_id = volume.get('provider_id') - initiator_name = None if not connector else connector.get('initiator') - LOG.debug('Terminate connection: %(vol)s:%(initiator)s', - {'vol': volume_name, - 'initiator': initiator_name}) - - with self._client.open_connection() as api: - try: - # Find the volume on the storage center. Note that if this - # is live volume and we are swapped this will be the back - # half of the live volume. - islivevol = self._is_live_vol(volume) - scvolume = api.find_volume(volume_name, provider_id, islivevol) - if scvolume: - # Get the SSN it is on. - ssn = scvolume['instanceId'].split('.')[0] - - # Unmap our secondary if not failed over.. - if islivevol: - sclivevolume = api.get_live_volume(provider_id) - if (sclivevolume and not - api.is_failed_over(provider_id, - sclivevolume)): - self.terminate_secondary(api, sclivevolume, - initiator_name) - - # Find our server. - scserver = (None if not initiator_name else - api.find_server(initiator_name, ssn)) - - # If we have a server and a volume lets pull them apart. - if ((scserver and - api.unmap_volume(scvolume, scserver) is True) or - (not scserver and api.unmap_all(scvolume))): - LOG.debug('Connection terminated') - return - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to terminate connection ' - '%(initiator)s %(vol)s', - {'initiator': initiator_name, - 'vol': volume_name}) - raise exception.VolumeBackendAPIException( - _('Terminate connection failed')) - - def terminate_secondary(self, api, sclivevolume, initiatorname): - secondaryvol = api.get_volume( - sclivevolume['secondaryVolume']['instanceId']) - if secondaryvol: - if initiatorname: - # Find our server. - secondary = api.find_server( - initiatorname, sclivevolume['secondaryScSerialNumber']) - return api.unmap_volume(secondaryvol, secondary) - else: - return api.unmap_all(secondaryvol) - else: - LOG.debug('terminate_secondary: secondary volume not found.') diff --git a/cinder/volume/drivers/dell_emc/scaleio/__init__.py b/cinder/volume/drivers/dell_emc/scaleio/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dell_emc/scaleio/driver.py b/cinder/volume/drivers/dell_emc/scaleio/driver.py deleted file mode 100644 index d52dc2ad1..000000000 --- a/cinder/volume/drivers/dell_emc/scaleio/driver.py +++ /dev/null @@ -1,1732 +0,0 @@ -# Copyright (c) 2013 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Driver for Dell EMC ScaleIO based on ScaleIO remote CLI. -""" - -import base64 -import binascii -from distutils import version -import json -import math -from os_brick.initiator import connector -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import units -import re -import requests -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import objects -from cinder import utils - -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import qos_specs -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -scaleio_opts = [ - cfg.StrOpt('sio_rest_server_port', - default='443', - help='REST server port.'), - cfg.BoolOpt('sio_verify_server_certificate', - default=False, - help='Verify server certificate.'), - cfg.StrOpt('sio_server_certificate_path', - help='Server certificate path.'), - cfg.BoolOpt('sio_round_volume_capacity', - default=True, - help='Round up volume capacity.'), - cfg.BoolOpt('sio_unmap_volume_before_deletion', - default=False, - help='Unmap volume before deletion.'), - cfg.StrOpt('sio_storage_pools', - help='Storage Pools.'), - cfg.StrOpt('sio_protection_domain_id', - deprecated_for_removal=True, - deprecated_reason="Replaced by sio_storage_pools option", - deprecated_since="Pike", - help='DEPRECATED: Protection Domain ID.'), - cfg.StrOpt('sio_protection_domain_name', - deprecated_for_removal=True, - deprecated_reason="Replaced by sio_storage_pools option", - deprecated_since="Pike", - help='DEPRECATED: Protection Domain name.'), - cfg.StrOpt('sio_storage_pool_name', - deprecated_for_removal=True, - deprecated_reason="Replaced by sio_storage_pools option", - deprecated_since="Pike", - help='DEPRECATED: Storage Pool name.'), - cfg.StrOpt('sio_storage_pool_id', - deprecated_for_removal=True, - deprecated_reason="Replaced by sio_storage_pools option", - deprecated_since="Pike", - help='DEPRECATED: Storage Pool ID.'), - cfg.StrOpt('sio_server_api_version', - help='ScaleIO API version.'), - cfg.FloatOpt('sio_max_over_subscription_ratio', - # This option exists to provide a default value for the - # ScaleIO driver which is different than the global default. - default=10.0, - help='max_over_subscription_ratio setting for the ScaleIO ' - 'driver. This replaces the general ' - 'max_over_subscription_ratio which has no effect ' - 'in this driver.' - 'Maximum value allowed for ScaleIO is 10.0.') -] - -CONF.register_opts(scaleio_opts, group=configuration.SHARED_CONF_GROUP) - -STORAGE_POOL_NAME = 'sio:sp_name' -STORAGE_POOL_ID = 'sio:sp_id' -PROTECTION_DOMAIN_NAME = 'sio:pd_name' -PROTECTION_DOMAIN_ID = 'sio:pd_id' -PROVISIONING_KEY = 'provisioning:type' -OLD_PROVISIONING_KEY = 'sio:provisioning_type' -IOPS_LIMIT_KEY = 'sio:iops_limit' -BANDWIDTH_LIMIT = 'sio:bandwidth_limit' -QOS_IOPS_LIMIT_KEY = 'maxIOPS' -QOS_BANDWIDTH_LIMIT = 'maxBWS' -QOS_IOPS_PER_GB = 'maxIOPSperGB' -QOS_BANDWIDTH_PER_GB = 'maxBWSperGB' - -BLOCK_SIZE = 8 -VOLUME_NOT_FOUND_ERROR = 79 -# This code belongs to older versions of ScaleIO -OLD_VOLUME_NOT_FOUND_ERROR = 78 -VOLUME_NOT_MAPPED_ERROR = 84 -ILLEGAL_SYNTAX = 0 -VOLUME_ALREADY_MAPPED_ERROR = 81 -MIN_BWS_SCALING_SIZE = 128 -SIO_MAX_OVERSUBSCRIPTION_RATIO = 10.0 - - -@interface.volumedriver -class ScaleIODriver(driver.VolumeDriver): - """Dell EMC ScaleIO Driver.""" - - VERSION = "2.0.2" - # Major changes - # 2.0.1: Added support for SIO 1.3x in addition to 2.0.x - # 2.0.2: Added consistency group support to generic volume groups - - # ThirdPartySystems wiki - CI_WIKI_NAME = "EMC_ScaleIO_CI" - - scaleio_qos_keys = (QOS_IOPS_LIMIT_KEY, QOS_BANDWIDTH_LIMIT, - QOS_IOPS_PER_GB, QOS_BANDWIDTH_PER_GB) - - def __init__(self, *args, **kwargs): - super(ScaleIODriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(san.san_opts) - self.configuration.append_config_values(scaleio_opts) - self.server_ip = self.configuration.san_ip - self.server_port = self.configuration.sio_rest_server_port - self.server_username = self.configuration.san_login - self.server_password = self.configuration.san_password - self.server_token = None - self.server_api_version = self.configuration.sio_server_api_version - # list of statistics/properties to query from SIO - self.statisticProperties = None - self.verify_server_certificate = ( - self.configuration.sio_verify_server_certificate) - self.server_certificate_path = None - if self.verify_server_certificate: - self.server_certificate_path = ( - self.configuration.sio_server_certificate_path) - LOG.info("REST server IP: %(ip)s, port: %(port)s, username: %(" - "user)s. Verify server's certificate: %(verify_cert)s.", - {'ip': self.server_ip, - 'port': self.server_port, - 'user': self.server_username, - 'verify_cert': self.verify_server_certificate}) - - # starting in Pike, prefer the sio_storage_pools option - self.storage_pools = None - if self.configuration.sio_storage_pools: - self.storage_pools = [ - e.strip() for e in - self.configuration.sio_storage_pools.split(',')] - LOG.info("Storage pools names: %(pools)s.", - {'pools': self.storage_pools}) - - LOG.info("Storage pool name: %(pool)s, pool id: %(pool_id)s.", - {'pool': self.configuration.sio_storage_pool_name, - 'pool_id': self.configuration.sio_storage_pool_id}) - - LOG.info("Protection domain name: %(domain)s, " - "domain id: %(domain_id)s.", - {'domain': self.configuration.sio_protection_domain_name, - 'domain_id': self.configuration.sio_protection_domain_id}) - - self.provisioning_type = ( - 'thin' if self.configuration.san_thin_provision else 'thick') - LOG.info("Default provisioning type: %(provisioning_type)s.", - {'provisioning_type': self.provisioning_type}) - self.configuration.max_over_subscription_ratio = ( - self.configuration.sio_max_over_subscription_ratio) - self.connector = connector.InitiatorConnector.factory( - connector.SCALEIO, utils.get_root_helper(), - self.configuration.num_volume_device_scan_tries - ) - - self.connection_properties = { - 'scaleIO_volname': None, - 'hostIP': None, - 'serverIP': self.server_ip, - 'serverPort': self.server_port, - 'serverUsername': self.server_username, - 'serverPassword': self.server_password, - 'serverToken': self.server_token, - 'iopsLimit': None, - 'bandwidthLimit': None, - } - - # simple cache for domain and sp ids - self.cache_pd = {} - self.cache_sp = {} - - def check_for_setup_error(self): - # make sure both domain name and id are not specified - if (self.configuration.sio_protection_domain_name - and self.configuration.sio_protection_domain_id): - msg = _("Cannot specify both protection domain name " - "and protection domain id.") - raise exception.InvalidInput(reason=msg) - - # make sure both storage pool and id are not specified - if (self.configuration.sio_storage_pool_name - and self.configuration.sio_storage_pool_id): - msg = _("Cannot specify both storage pool name and storage " - "pool id.") - raise exception.InvalidInput(reason=msg) - - # make sure the REST gateway is specified - if not self.server_ip: - msg = _("REST server IP must be specified.") - raise exception.InvalidInput(reason=msg) - - # make sure we got a username - if not self.server_username: - msg = _("REST server username must be specified.") - raise exception.InvalidInput(reason=msg) - - # make sure we got a password - if not self.server_password: - msg = _("REST server password must be specified.") - raise exception.InvalidInput(reason=msg) - - # validate certificate settings - if self.verify_server_certificate and not self.server_certificate_path: - msg = _("Path to REST server's certificate must be specified.") - raise exception.InvalidInput(reason=msg) - - # log warning if not using certificates - if not self.verify_server_certificate: - LOG.warning("Verify certificate is not set, using default of " - "False.") - - # validate oversubscription ration - if (self.configuration.max_over_subscription_ratio is not None and - (self.configuration.max_over_subscription_ratio - - SIO_MAX_OVERSUBSCRIPTION_RATIO > 1)): - msg = (_("Max over subscription is configured to %(ratio)1f " - "while ScaleIO support up to %(sio_ratio)s.") % - {'sio_ratio': SIO_MAX_OVERSUBSCRIPTION_RATIO, - 'ratio': self.configuration.max_over_subscription_ratio}) - raise exception.InvalidInput(reason=msg) - - # validate that version of ScaleIO is supported - server_api_version = self._get_server_api_version(fromcache=False) - if not self._version_greater_than_or_equal( - server_api_version, "2.0.0"): - # we are running against a pre-2.0.0 ScaleIO instance - msg = (_("Using ScaleIO versions less than v2.0.0 has been " - "deprecated and will be removed in a future version")) - versionutils.report_deprecated_feature(LOG, msg) - - # we have enough information now to validate pools - self.storage_pools = self._build_storage_pool_list() - if not self.storage_pools: - msg = (_("Must specify storage pools. Option: " - "sio_storage_pools.")) - raise exception.InvalidInput(reason=msg) - - def _build_storage_pool_list(self): - """Build storage pool list - - This method determines the list of storage pools that - are requested, by concatenating a few config settings - """ - # start with the list of pools supplied in the configuration - pools = self.storage_pools - # append the domain:pool specified individually - if (self.configuration.sio_storage_pool_name is not None and - self.configuration.sio_protection_domain_name is not None): - extra_pool = "{}:{}".format( - self.configuration.sio_protection_domain_name, - self.configuration.sio_storage_pool_name) - LOG.info("Ensuring %s is in the list of configured pools.", - extra_pool) - if pools is None: - pools = [] - if extra_pool not in pools: - pools.append(extra_pool) - # if specified, account for the storage_pool_id - if self.configuration.sio_storage_pool_id is not None: - # the user specified a storage pool id - # get the domain and pool names from SIO - extra_pool = self._get_storage_pool_name( - self.configuration.sio_storage_pool_id) - LOG.info("Ensuring %s is in the list of configured pools.", - extra_pool) - if pools is None: - pools = [] - if extra_pool not in pools: - pools.append(extra_pool) - - return pools - - def _get_queryable_statistics(self, sio_type, sio_id): - if self.statisticProperties is None: - self.statisticProperties = [ - "capacityAvailableForVolumeAllocationInKb", - "capacityLimitInKb", "spareCapacityInKb", - "thickCapacityInUseInKb"] - # version 2.0 of SIO introduced thin volumes - if self._version_greater_than_or_equal( - self._get_server_api_version(), - "2.0.0"): - # check to see if thinCapacityAllocatedInKb is valid - # needed due to non-backwards compatible API - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'sio_type': sio_type} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/types/%(sio_type)s/instances/action/" - "querySelectedStatistics") % req_vars - params = {'ids': [sio_id], - 'properties': ["thinCapacityAllocatedInKb"]} - r, response = self._execute_scaleio_post_request(params, - request) - if r.status_code == http_client.OK: - # is it valid, use it - self.statisticProperties.append( - "thinCapacityAllocatedInKb") - else: - # it is not valid, assume use of thinCapacityAllocatedInKm - self.statisticProperties.append( - "thinCapacityAllocatedInKm") - - return self.statisticProperties - - def _find_storage_pool_id_from_storage_type(self, storage_type): - # Default to what was configured in configuration file if not defined. - return storage_type.get(STORAGE_POOL_ID) - - def _find_storage_pool_name_from_storage_type(self, storage_type): - pool_name = storage_type.get(STORAGE_POOL_NAME) - # using the extra spec of sio:sp_name is deprecated - if pool_name is not None: - LOG.warning("Using the volume type extra spec of " - "sio:sp_name is deprecated and will be removed " - "in a future version. The supported way to " - "specify this is by specifying an extra spec " - "of 'pool_name=protection_domain:storage_pool'") - return pool_name - - def _find_protection_domain_id_from_storage_type(self, storage_type): - # Default to what was configured in configuration file if not defined. - return storage_type.get(PROTECTION_DOMAIN_ID) - - def _find_protection_domain_name_from_storage_type(self, storage_type): - domain_name = storage_type.get(PROTECTION_DOMAIN_NAME) - # using the extra spec of sio:pd_name is deprecated - if domain_name is not None: - LOG.warning("Using the volume type extra spec of " - "sio:pd_name is deprecated and will be removed " - "in a future version. The supported way to " - "specify this is by specifying an extra spec " - "of 'pool_name=protection_domain:storage_pool'") - return domain_name - - def _find_provisioning_type(self, storage_type): - new_provisioning_type = storage_type.get(PROVISIONING_KEY) - old_provisioning_type = storage_type.get(OLD_PROVISIONING_KEY) - if new_provisioning_type is None and old_provisioning_type is not None: - LOG.info("Using sio:provisioning_type for defining " - "thin or thick volume will be deprecated in the " - "Ocata release of OpenStack. Please use " - "provisioning:type configuration option.") - provisioning_type = old_provisioning_type - else: - provisioning_type = new_provisioning_type - - if provisioning_type is not None: - if provisioning_type not in ('thick', 'thin'): - msg = _("Illegal provisioning type. The supported " - "provisioning types are 'thick' or 'thin'.") - raise exception.VolumeBackendAPIException(data=msg) - return provisioning_type - else: - return self.provisioning_type - - @staticmethod - def _find_limit(storage_type, qos_key, extraspecs_key): - qos_limit = (storage_type.get(qos_key) - if qos_key is not None else None) - extraspecs_limit = (storage_type.get(extraspecs_key) - if extraspecs_key is not None else None) - if extraspecs_limit is not None: - if qos_limit is not None: - LOG.warning("QoS specs are overriding extra_specs.") - else: - LOG.info("Using extra_specs for defining QoS specs " - "will be deprecated in the N release " - "of OpenStack. Please use QoS specs.") - return qos_limit if qos_limit is not None else extraspecs_limit - - @staticmethod - def _version_greater_than(ver1, ver2): - return version.LooseVersion(ver1) > version.LooseVersion(ver2) - - @staticmethod - def _version_greater_than_or_equal(ver1, ver2): - return version.LooseVersion(ver1) >= version.LooseVersion(ver2) - - @staticmethod - def _convert_kb_to_gib(size): - return int(math.ceil(float(size) / units.Mi)) - - @staticmethod - def _id_to_base64(id): - # Base64 encode the id to get a volume name less than 32 characters due - # to ScaleIO limitation. - name = six.text_type(id).replace("-", "") - try: - name = base64.b16decode(name.upper()) - except (TypeError, binascii.Error): - pass - encoded_name = name - if isinstance(encoded_name, six.text_type): - encoded_name = encoded_name.encode('utf-8') - encoded_name = base64.b64encode(encoded_name) - if six.PY3: - encoded_name = encoded_name.decode('ascii') - LOG.debug("Converted id %(id)s to scaleio name %(name)s.", - {'id': id, 'name': encoded_name}) - return encoded_name - - def create_volume(self, volume): - """Creates a scaleIO volume.""" - self._check_volume_size(volume.size) - - volname = self._id_to_base64(volume.id) - - # the cinder scheduler will send us the pd:sp for the volume - requested_pd = None - requested_sp = None - try: - pd_sp = volume_utils.extract_host(volume.host, 'pool') - if pd_sp is not None: - requested_pd = pd_sp.split(':')[0] - requested_sp = pd_sp.split(':')[1] - except (KeyError, ValueError): - # we seem to have not gotten it so we'll figure out defaults - requested_pd = None - requested_sp = None - - storage_type = self._get_volumetype_extraspecs(volume) - type_sp = self._find_storage_pool_name_from_storage_type(storage_type) - storage_pool_id = self._find_storage_pool_id_from_storage_type( - storage_type) - protection_domain_id = ( - self._find_protection_domain_id_from_storage_type(storage_type)) - type_pd = ( - self._find_protection_domain_name_from_storage_type(storage_type)) - provisioning_type = self._find_provisioning_type(storage_type) - - if type_sp is not None: - # prefer the storage pool in the volume type - # this was undocumented so will likely not happen - storage_pool_name = type_sp - else: - storage_pool_name = requested_sp - if type_pd is not None: - # prefer the protection domain in the volume type - # this was undocumented so will likely not happen - protection_domain_name = type_pd - else: - protection_domain_name = requested_pd - - # check if the requested pd:sp match the ones that will - # be used. If not, spit out a deprecation notice - # should never happen - if (protection_domain_name != requested_pd - or storage_pool_name != requested_sp): - LOG.warning( - "Creating volume in different protection domain or " - "storage pool than scheduler requested. " - "Requested: %(req_pd)s:%(req_sp)s, " - "Actual %(act_pd)s:%(act_sp)s.", - {'req_pd': requested_pd, - 'req_sp': requested_sp, - 'act_pd': protection_domain_name, - 'act_sp': storage_pool_name}) - - LOG.info("Volume type: %(volume_type)s, " - "storage pool name: %(pool_name)s, " - "storage pool id: %(pool_id)s, protection domain id: " - "%(domain_id)s, protection domain name: %(domain_name)s.", - {'volume_type': storage_type, - 'pool_name': storage_pool_name, - 'pool_id': storage_pool_id, - 'domain_id': protection_domain_id, - 'domain_name': protection_domain_name}) - - domain_id = self._get_protection_domain_id(protection_domain_name) - LOG.info("Domain id is %s.", domain_id) - pool_id = self._get_storage_pool_id(protection_domain_name, - storage_pool_name) - LOG.info("Pool id is %s.", pool_id) - - if provisioning_type == 'thin': - provisioning = "ThinProvisioned" - # Default volume type is thick. - else: - provisioning = "ThickProvisioned" - - # units.Mi = 1024 ** 2 - volume_size_kb = volume.size * units.Mi - params = {'protectionDomainId': domain_id, - 'volumeSizeInKb': six.text_type(volume_size_kb), - 'name': volname, - 'volumeType': provisioning, - 'storagePoolId': pool_id} - - LOG.info("Params for add volume request: %s.", params) - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/types/Volume/instances") % req_vars - r, response = self._execute_scaleio_post_request(params, request) - - if r.status_code != http_client.OK and "errorCode" in response: - msg = (_("Error creating volume: %s.") % response['message']) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info("Created volume %(volname)s, volume id %(volid)s.", - {'volname': volname, 'volid': volume.id}) - - real_size = int(self._round_to_num_gran(volume.size)) - - return {'provider_id': response['id'], 'size': real_size} - - def _check_volume_size(self, size): - if size % 8 != 0: - round_volume_capacity = ( - self.configuration.sio_round_volume_capacity) - if not round_volume_capacity: - exception_msg = (_( - "Cannot create volume of size %s: " - "not multiple of 8GB.") % size) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_snapshot(self, snapshot): - """Creates a scaleio snapshot.""" - volume_id = snapshot.volume.provider_id - snapname = self._id_to_base64(snapshot.id) - return self._snapshot_volume(volume_id, snapname) - - def _snapshot_volume(self, vol_id, snapname): - LOG.info("Snapshot volume %(vol)s into snapshot %(id)s.", - {'vol': vol_id, 'id': snapname}) - params = { - 'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]} - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/System/action/snapshotVolumes") % req_vars - r, response = self._execute_scaleio_post_request(params, request) - if r.status_code != http_client.OK and "errorCode" in response: - msg = (_("Failed creating snapshot for volume %(volname)s: " - "%(response)s.") % - {'volname': vol_id, - 'response': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return {'provider_id': response['volumeIdList'][0]} - - def _execute_scaleio_post_request(self, params, request): - r = requests.post( - request, - data=json.dumps(params), - headers=self._get_headers(), - auth=( - self.server_username, - self.server_token), - verify=self._get_verify_cert()) - r = self._check_response(r, request, False, params) - response = None - try: - response = r.json() - except ValueError: - response = None - return r, response - - def _check_response(self, response, request, is_get_request=True, - params=None): - if (response.status_code == http_client.UNAUTHORIZED or - response.status_code == http_client.FORBIDDEN): - LOG.info("Token is invalid, going to re-login and get " - "a new one.") - login_request = ( - "https://" + self.server_ip + - ":" + self.server_port + "/api/login") - verify_cert = self._get_verify_cert() - r = requests.get( - login_request, - auth=( - self.server_username, - self.server_password), - verify=verify_cert) - token = r.json() - self.server_token = token - # Repeat request with valid token. - LOG.info("Going to perform request again %s with valid token.", - request) - if is_get_request: - response = requests.get(request, - auth=(self.server_username, - self.server_token), - verify=verify_cert) - else: - response = requests.post(request, - data=json.dumps(params), - headers=self._get_headers(), - auth=(self.server_username, - self.server_token), - verify=verify_cert) - - level = logging.DEBUG - # for anything other than an OK from the REST API, log an error - if response.status_code != http_client.OK: - level = logging.ERROR - - LOG.log(level, "REST Request: %s with params %s", - request, - json.dumps(params)) - LOG.log(level, "REST Response: %s with data %s", - response.status_code, - response.text) - - return response - - def _get_server_api_version(self, fromcache=True): - if self.server_api_version is None or fromcache is False: - request = ( - "https://" + self.server_ip + - ":" + self.server_port + "/api/version") - r, unused = self._execute_scaleio_get_request(request) - - if r.status_code == http_client.OK: - self.server_api_version = r.text.replace('\"', '') - LOG.info("REST API Version: %(api_version)s", - {'api_version': self.server_api_version}) - else: - msg = (_("Error calling version api " - "status code: %d") % r.status_code) - raise exception.VolumeBackendAPIException(data=msg) - - # make sure the response was valid - pattern = re.compile(r"^\d+(\.\d+)*$") - if not pattern.match(self.server_api_version): - msg = (_("Error calling version api " - "response: %s") % r.text) - raise exception.VolumeBackendAPIException(data=msg) - - return self.server_api_version - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - # We interchange 'volume' and 'snapshot' because in ScaleIO - # snapshot is a volume: once a snapshot is generated it - # becomes a new unmapped volume in the system and the user - # may manipulate it in the same manner as any other volume - # exposed by the system - volume_id = snapshot.provider_id - snapname = self._id_to_base64(volume.id) - LOG.info("ScaleIO create volume from snapshot: snapshot %(snapname)s " - "to volume %(volname)s.", - {'volname': volume_id, - 'snapname': snapname}) - - return self._snapshot_volume(volume_id, snapname) - - @staticmethod - def _get_headers(): - return {'content-type': 'application/json'} - - def _get_verify_cert(self): - verify_cert = False - if self.verify_server_certificate: - verify_cert = self.server_certificate_path - return verify_cert - - def extend_volume(self, volume, new_size): - """Extends the size of an existing available ScaleIO volume. - - This action will round up the volume to the nearest size that is - a granularity of 8 GBs. - """ - return self._extend_volume(volume['provider_id'], volume.size, - new_size) - - def _extend_volume(self, volume_id, old_size, new_size): - vol_id = volume_id - LOG.info( - "ScaleIO extend volume: volume %(volname)s to size %(new_size)s.", - {'volname': vol_id, - 'new_size': new_size}) - - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'vol_id': vol_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/Volume::%(vol_id)s" - "/action/setVolumeSize") % req_vars - LOG.info("Change volume capacity request: %s.", request) - - # Round up the volume size so that it is a granularity of 8 GBs - # because ScaleIO only supports volumes with a granularity of 8 GBs. - volume_new_size = self._round_to_num_gran(new_size) - volume_real_old_size = self._round_to_num_gran(old_size) - if volume_real_old_size == volume_new_size: - return - - round_volume_capacity = self.configuration.sio_round_volume_capacity - if not round_volume_capacity and not new_size % 8 == 0: - LOG.warning("ScaleIO only supports volumes with a granularity " - "of 8 GBs. The new volume size is: %d.", - volume_new_size) - - params = {'sizeInGB': six.text_type(volume_new_size)} - r, response = self._execute_scaleio_post_request(params, request) - if r.status_code != http_client.OK: - response = r.json() - msg = (_("Error extending volume %(vol)s: %(err)s.") - % {'vol': vol_id, - 'err': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - @staticmethod - def _round_to_num_gran(size, num=8): - if size % num == 0: - return size - return size + num - (size % num) - - @staticmethod - def _round_down_to_num_gran(size, num=8): - return size - (size % num) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume.""" - volume_id = src_vref['provider_id'] - snapname = self._id_to_base64(volume.id) - LOG.info("ScaleIO create cloned volume: source volume %(src)s to " - "target volume %(tgt)s.", - {'src': volume_id, - 'tgt': snapname}) - - ret = self._snapshot_volume(volume_id, snapname) - if volume.size > src_vref.size: - self._extend_volume(ret['provider_id'], src_vref.size, volume.size) - - return ret - - def delete_volume(self, volume): - """Deletes a self.logical volume""" - volume_id = volume['provider_id'] - self._delete_volume(volume_id) - - def _delete_volume(self, vol_id): - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'vol_id': six.text_type(vol_id)} - - unmap_before_delete = ( - self.configuration.sio_unmap_volume_before_deletion) - # Ensure that the volume is not mapped to any SDC before deletion in - # case unmap_before_deletion is enabled. - if unmap_before_delete: - params = {'allSdcs': ''} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/Volume::%(vol_id)s" - "/action/removeMappedSdc") % req_vars - LOG.info("Trying to unmap volume from all sdcs" - " before deletion: %s.", - request) - r, unused = self._execute_scaleio_post_request(params, request) - - params = {'removeMode': 'ONLY_ME'} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/Volume::%(vol_id)s" - "/action/removeVolume") % req_vars - r, response = self._execute_scaleio_post_request(params, request) - - if r.status_code != http_client.OK: - error_code = response['errorCode'] - if error_code == VOLUME_NOT_FOUND_ERROR: - LOG.warning("Ignoring error in delete volume %s:" - " Volume not found.", vol_id) - elif vol_id is None: - LOG.warning("Volume does not have provider_id thus does not " - "map to a ScaleIO volume. " - "Allowing deletion to proceed.") - else: - msg = (_("Error deleting volume %(vol)s: %(err)s.") % - {'vol': vol_id, - 'err': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def delete_snapshot(self, snapshot): - """Deletes a ScaleIO snapshot.""" - snap_id = snapshot.provider_id - LOG.info("ScaleIO delete snapshot.") - return self._delete_volume(snap_id) - - def initialize_connection(self, volume, connector, **kwargs): - """Initializes the connection and returns connection info. - - The scaleio driver returns a driver_volume_type of 'scaleio'. - """ - - LOG.debug("Connector is %s.", connector) - connection_properties = dict(self.connection_properties) - - volname = self._id_to_base64(volume.id) - connection_properties['scaleIO_volname'] = volname - connection_properties['scaleIO_volume_id'] = volume.provider_id - extra_specs = self._get_volumetype_extraspecs(volume) - qos_specs = self._get_volumetype_qos(volume) - storage_type = extra_specs.copy() - storage_type.update(qos_specs) - LOG.info("Volume type is %s.", storage_type) - round_volume_size = self._round_to_num_gran(volume.size) - iops_limit = self._get_iops_limit(round_volume_size, storage_type) - bandwidth_limit = self._get_bandwidth_limit(round_volume_size, - storage_type) - LOG.info("iops limit is %s", iops_limit) - LOG.info("bandwidth limit is %s", bandwidth_limit) - connection_properties['iopsLimit'] = iops_limit - connection_properties['bandwidthLimit'] = bandwidth_limit - return {'driver_volume_type': 'scaleio', - 'data': connection_properties} - - def _get_bandwidth_limit(self, size, storage_type): - try: - max_bandwidth = self._find_limit(storage_type, QOS_BANDWIDTH_LIMIT, - BANDWIDTH_LIMIT) - if max_bandwidth is not None: - max_bandwidth = (self._round_to_num_gran(int(max_bandwidth), - units.Ki)) - max_bandwidth = six.text_type(max_bandwidth) - LOG.info("max bandwidth is: %s", max_bandwidth) - bw_per_gb = self._find_limit(storage_type, QOS_BANDWIDTH_PER_GB, - None) - LOG.info("bandwidth per gb is: %s", bw_per_gb) - if bw_per_gb is None: - return max_bandwidth - # Since ScaleIO volumes size is in 8GB granularity - # and BWS limitation is in 1024 KBs granularity, we need to make - # sure that scaled_bw_limit is in 128 granularity. - scaled_bw_limit = (size * - self._round_to_num_gran(int(bw_per_gb), - MIN_BWS_SCALING_SIZE)) - if max_bandwidth is None or scaled_bw_limit < int(max_bandwidth): - return six.text_type(scaled_bw_limit) - else: - return max_bandwidth - except ValueError: - msg = _("None numeric BWS QoS limitation") - raise exception.InvalidInput(reason=msg) - - def _get_iops_limit(self, size, storage_type): - max_iops = self._find_limit(storage_type, QOS_IOPS_LIMIT_KEY, - IOPS_LIMIT_KEY) - LOG.info("max iops is: %s", max_iops) - iops_per_gb = self._find_limit(storage_type, QOS_IOPS_PER_GB, None) - LOG.info("iops per gb is: %s", iops_per_gb) - try: - if iops_per_gb is None: - if max_iops is not None: - return six.text_type(max_iops) - else: - return None - scaled_iops_limit = size * int(iops_per_gb) - if max_iops is None or scaled_iops_limit < int(max_iops): - return six.text_type(scaled_iops_limit) - else: - return six.text_type(max_iops) - except ValueError: - msg = _("None numeric IOPS QoS limitation") - raise exception.InvalidInput(reason=msg) - - def terminate_connection(self, volume, connector, **kwargs): - LOG.debug("scaleio driver terminate connection.") - - def _update_volume_stats(self): - stats = {} - - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = backend_name or 'scaleio' - stats['vendor_name'] = 'Dell EMC' - stats['driver_version'] = self.VERSION - stats['storage_protocol'] = 'scaleio' - stats['reserved_percentage'] = 0 - stats['QoS_support'] = True - stats['consistent_group_snapshot_enabled'] = True - stats['thick_provisioning_support'] = True - stats['thin_provisioning_support'] = True - pools = [] - - free_capacity = 0 - total_capacity = 0 - provisioned_capacity = 0 - - for sp_name in self.storage_pools: - splitted_name = sp_name.split(':') - domain_name = splitted_name[0] - pool_name = splitted_name[1] - # Get pool id from name. - pool_id = self._get_storage_pool_id(domain_name, pool_name) - LOG.info("Pool id is %s.", pool_id) - - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/types/StoragePool/instances/action/" - "querySelectedStatistics") % req_vars - - props = self._get_queryable_statistics("StoragePool", pool_id) - params = {'ids': [pool_id], 'properties': props} - - r, response = self._execute_scaleio_post_request(params, request) - LOG.info("Query capacity stats response: %s.", response) - for res in response.values(): - # Divide by two because ScaleIO creates a copy for each volume - total_capacity_kb = ( - (res['capacityLimitInKb'] - res['spareCapacityInKb']) / 2) - total_capacity_gb = (self._round_down_to_num_gran - (total_capacity_kb / units.Mi)) - # This property is already rounded - # to 8 GB granularity in backend - free_capacity_gb = ( - res['capacityAvailableForVolumeAllocationInKb'] / units.Mi) - thin_capacity_allocated = 0 - # some versions of the API had a typo in the response - try: - thin_capacity_allocated = res['thinCapacityAllocatedInKm'] - except (TypeError, KeyError): - pass - # some versions of the API respond without a typo - try: - thin_capacity_allocated = res['thinCapacityAllocatedInKb'] - except (TypeError, KeyError): - pass - - # Divide by two because ScaleIO creates a copy for each volume - provisioned_capacity = ( - ((res['thickCapacityInUseInKb'] + - thin_capacity_allocated) / 2) / units.Mi) - - LOG.info("Free capacity of pool %(pool)s is: %(free)s, " - "total capacity: %(total)s, " - "provisioned capacity: %(prov)s", - {'pool': sp_name, - 'free': free_capacity_gb, - 'total': total_capacity_gb, - 'prov': provisioned_capacity}) - pool = {'pool_name': sp_name, - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'QoS_support': True, - 'consistent_group_snapshot_enabled': True, - 'reserved_percentage': 0, - 'thin_provisioning_support': True, - 'thick_provisioning_support': True, - 'provisioned_capacity_gb': provisioned_capacity, - 'max_over_subscription_ratio': - self.configuration.max_over_subscription_ratio - } - - pools.append(pool) - free_capacity += free_capacity_gb - total_capacity += total_capacity_gb - - stats['total_capacity_gb'] = total_capacity - stats['free_capacity_gb'] = free_capacity - LOG.info("Free capacity for backend '%(backend)s': %(free)s, " - "total capacity: %(total)s.", - {'backend': stats["volume_backend_name"], - 'free': free_capacity, - 'total': total_capacity}) - - stats['pools'] = pools - - self._stats = stats - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - @staticmethod - def _get_volumetype_extraspecs(volume): - specs = {} - ctxt = context.get_admin_context() - type_id = volume['volume_type_id'] - if type_id: - volume_type = volume_types.get_volume_type(ctxt, type_id) - specs = volume_type.get('extra_specs') - for key, value in specs.items(): - specs[key] = value - - return specs - - def _get_volumetype_qos(self, volume): - qos = {} - ctxt = context.get_admin_context() - type_id = volume['volume_type_id'] - if type_id: - volume_type = volume_types.get_volume_type(ctxt, type_id) - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is not None: - specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - else: - specs = {} - for key, value in specs.items(): - if key in self.scaleio_qos_keys: - qos[key] = value - return qos - - def _sio_attach_volume(self, volume): - """Call connector.connect_volume() and return the path. """ - LOG.debug("Calling os-brick to attach ScaleIO volume.") - connection_properties = dict(self.connection_properties) - connection_properties['scaleIO_volname'] = self._id_to_base64( - volume.id) - connection_properties['scaleIO_volume_id'] = volume.provider_id - device_info = self.connector.connect_volume(connection_properties) - return device_info['path'] - - def _sio_detach_volume(self, volume): - """Call the connector.disconnect() """ - LOG.info("Calling os-brick to detach ScaleIO volume.") - connection_properties = dict(self.connection_properties) - connection_properties['scaleIO_volname'] = self._id_to_base64( - volume.id) - connection_properties['scaleIO_volume_id'] = volume.provider_id - self.connector.disconnect_volume(connection_properties, volume) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - LOG.info("ScaleIO copy_image_to_volume volume: %(vol)s image service: " - "%(service)s image id: %(id)s.", - {'vol': volume, - 'service': six.text_type(image_service), - 'id': six.text_type(image_id)}) - - try: - image_utils.fetch_to_raw(context, - image_service, - image_id, - self._sio_attach_volume(volume), - BLOCK_SIZE, - size=volume['size']) - - finally: - self._sio_detach_volume(volume) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - LOG.info("ScaleIO copy_volume_to_image volume: %(vol)s image service: " - "%(service)s image meta: %(meta)s.", - {'vol': volume, - 'service': six.text_type(image_service), - 'meta': six.text_type(image_meta)}) - try: - image_utils.upload_volume(context, - image_service, - image_meta, - self._sio_attach_volume(volume)) - finally: - self._sio_detach_volume(volume) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return the update from ScaleIO migrated volume. - - This method updates the volume name of the new ScaleIO volume to - match the updated volume ID. - The original volume is renamed first since ScaleIO does not allow - multiple volumes to have the same name. - """ - name_id = None - location = None - if original_volume_status == 'available': - # During migration, a new volume is created and will replace - # the original volume at the end of the migration. We need to - # rename the new volume. The current_name of the new volume, - # which is the id of the new volume, will be changed to the - # new_name, which is the id of the original volume. - current_name = new_volume['id'] - new_name = volume['id'] - vol_id = new_volume['provider_id'] - LOG.info("Renaming %(id)s from %(current_name)s to " - "%(new_name)s.", - {'id': vol_id, 'current_name': current_name, - 'new_name': new_name}) - - # Original volume needs to be renamed first - self._rename_volume(volume, "ff" + new_name) - self._rename_volume(new_volume, new_name) - else: - # The back-end will not be renamed. - name_id = new_volume['_name_id'] or new_volume['id'] - location = new_volume['provider_location'] - - return {'_name_id': name_id, 'provider_location': location} - - def _rename_volume(self, volume, new_id): - new_name = self._id_to_base64(new_id) - vol_id = volume['provider_id'] - - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'id': vol_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/Volume::%(id)s/action/setVolumeName" % - req_vars) - LOG.info("ScaleIO rename volume request: %s.", request) - - params = {'newName': new_name} - r, response = self._execute_scaleio_post_request(params, request) - - if r.status_code != http_client.OK: - error_code = response['errorCode'] - if ((error_code == VOLUME_NOT_FOUND_ERROR or - error_code == OLD_VOLUME_NOT_FOUND_ERROR or - error_code == ILLEGAL_SYNTAX)): - LOG.info("Ignoring renaming action because the volume " - "%(vol)s is not a ScaleIO volume.", - {'vol': vol_id}) - else: - msg = (_("Error renaming volume %(vol)s: %(err)s.") % - {'vol': vol_id, 'err': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info("ScaleIO volume %(vol)s was renamed to " - "%(new_name)s.", - {'vol': vol_id, 'new_name': new_name}) - - def _query_scaleio_volume(self, volume, existing_ref): - request = self._create_scaleio_get_volume_request(volume, existing_ref) - r, response = self._execute_scaleio_get_request(request) - self._manage_existing_check_legal_response(r, existing_ref) - return response - - def _get_protection_domain_id(self, domain_name): - """"Get the id of the protection domain""" - - if not domain_name: - msg = (_("Error getting domain id from None name.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # do we already have the id? - if domain_name in self.cache_pd: - return self.cache_pd[domain_name] - - encoded_domain_name = urllib.parse.quote(domain_name, '') - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'encoded_domain_name': encoded_domain_name} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/types/Domain/instances/getByName::" - "%(encoded_domain_name)s") % req_vars - - r, domain_id = self._execute_scaleio_get_request(request) - - if not domain_id: - msg = (_("Domain with name %s wasn't found.") - % domain_name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if r.status_code != http_client.OK and "errorCode" in domain_id: - msg = (_("Error getting domain id from name %(name)s: %(id)s.") - % {'name': domain_name, - 'id': domain_id['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # add it to our cache - self.cache_pd[domain_name] = domain_id - return domain_id - - def _get_storage_pool_name(self, pool_id): - """Get the protection domain:storage pool name - - From a storage pool id, get the domain name and - storage pool names - """ - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'pool_id': pool_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/StoragePool::%(pool_id)s") % req_vars - r, response = self._execute_scaleio_get_request(request) - - if r.status_code != http_client.OK: - msg = (_("Error getting pool name from id %(pool_id)s: " - "%(err_msg)s.") - % {'pool_id': pool_id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - pool_name = response['name'] - domain_id = response['protectionDomainId'] - domain_name = self._get_protection_domain_name(domain_id) - - pool_name = "{}:{}".format(domain_name, pool_name) - - return pool_name - - def _get_protection_domain_name(self, domain_id): - """Get the protection domain name - - From a protection domain id, get the domain name - """ - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'domain_id': domain_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/ProtectionDomain::%(domain_id)s") % req_vars - r, response = self._execute_scaleio_get_request(request) - - if r.status_code != http_client.OK: - msg = (_("Error getting domain name from id %(domain_id)s: " - "%(err_msg)s.") - % {'domain_id': domain_id, - 'err_msg': response}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - domain_name = response['name'] - - return domain_name - - def _get_storage_pool_id(self, domain_name, pool_name): - """Get the id of the configured storage pool""" - if not domain_name or not pool_name: - msg = (_("Unable to query the storage pool id for " - "Pool %(pool_name)s and Domain %(domain_name)s.") - % {'pool_name': pool_name, - 'domain_name': domain_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - fullname = "{}:{}".format(domain_name, pool_name) - if fullname in self.cache_sp: - - return self.cache_sp[fullname] - - domain_id = self._get_protection_domain_id(domain_name) - encoded_pool_name = urllib.parse.quote(pool_name, '') - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'domain_id': domain_id, - 'encoded_pool_name': encoded_pool_name} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/types/Pool/instances/getByName::" - "%(domain_id)s,%(encoded_pool_name)s") % req_vars - LOG.debug("ScaleIO get pool id by name request: %s.", request) - r, pool_id = self._execute_scaleio_get_request(request) - - if not pool_id: - msg = (_("Pool with name %(pool_name)s wasn't found in " - "domain %(domain_id)s.") - % {'pool_name': pool_name, - 'domain_id': domain_id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if r.status_code != http_client.OK and "errorCode" in pool_id: - msg = (_("Error getting pool id from name %(pool_name)s: " - "%(err_msg)s.") - % {'pool_name': pool_name, - 'err_msg': pool_id['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info("Pool id is %s.", pool_id) - - # add it to ou cache - self.cache_sp[fullname] = pool_id - return pool_id - - def _get_all_scaleio_volumes(self): - """Gets list of all SIO volumes in PD and SP""" - - all_volumes = [] - # check for every storage pool configured - for sp_name in self.storage_pools: - splitted_name = sp_name.split(':') - domain_name = splitted_name[0] - pool_name = splitted_name[1] - - sp_id = self._get_storage_pool_id(domain_name, pool_name) - - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'storage_pool_id': sp_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/StoragePool::%(storage_pool_id)s" - "/relationships/Volume") % req_vars - r, volumes = self._execute_scaleio_get_request(request) - - if r.status_code != http_client.OK: - msg = (_("Error calling api " - "status code: %d") % r.status_code) - raise exception.VolumeBackendAPIException(data=msg) - - all_volumes.extend(volumes) - - return all_volumes - - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder. - - Rule out volumes that are mapped to an SDC or - are already in the list of cinder_volumes. - Return references of the volume ids for any others. - """ - - all_sio_volumes = self._get_all_scaleio_volumes() - - # Put together a map of existing cinder volumes on the array - # so we can lookup cinder id's to SIO id - existing_vols = {} - for cinder_vol in cinder_volumes: - provider_id = cinder_vol['provider_id'] - existing_vols[provider_id] = cinder_vol.name_id - - manageable_volumes = [] - for sio_vol in all_sio_volumes: - cinder_id = existing_vols.get(sio_vol['id']) - is_safe = True - reason = None - - if sio_vol['mappedSdcInfo']: - is_safe = False - numHosts = len(sio_vol['mappedSdcInfo']) - reason = _('Volume mapped to %d host(s).') % numHosts - - if cinder_id: - is_safe = False - reason = _("Volume already managed.") - - if sio_vol['volumeType'] != 'Snapshot': - manageable_volumes.append({ - 'reference': {'source-id': sio_vol['id']}, - 'size': self._convert_kb_to_gib(sio_vol['sizeInKb']), - 'safe_to_manage': is_safe, - 'reason_not_safe': reason, - 'cinder_id': cinder_id, - 'extra_info': {'volumeType': sio_vol['volumeType'], - 'name': sio_vol['name']}}) - - return volume_utils.paginate_entries_list( - manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) - - def _is_managed(self, volume_id): - lst = objects.VolumeList.get_all_by_host(context.get_admin_context(), - self.host) - for vol in lst: - if vol.provider_id == volume_id: - return True - - return False - - def manage_existing(self, volume, existing_ref): - """Manage an existing ScaleIO volume. - - existing_ref is a dictionary of the form: - {'source-id': } - """ - response = self._query_scaleio_volume(volume, existing_ref) - return {'provider_id': response['id']} - - def manage_existing_get_size(self, volume, existing_ref): - return self._get_volume_size(volume, existing_ref) - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Manage an existing ScaleIO snapshot. - - :param snapshot: the snapshot to manage - :param existing_ref: dictionary of the form: - {'source-id': } - """ - response = self._query_scaleio_volume(snapshot, existing_ref) - not_real_parent = (response.get('orig_parent_overriden') or - response.get('is_source_deleted')) - if not_real_parent: - reason = (_("The snapshot's parent is not the original parent due " - "to deletion or revert action, therefore " - "this snapshot cannot be managed.")) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - ancestor_id = response['ancestorVolumeId'] - volume_id = snapshot.volume.provider_id - if ancestor_id != volume_id: - reason = (_("The snapshot's parent in ScaleIO is %(ancestor)s " - "and not %(volume)s.") % - {'ancestor': ancestor_id, 'volume': volume_id}) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - return {'provider_id': response['id']} - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - return self._get_volume_size(snapshot, existing_ref) - - def _get_volume_size(self, volume, existing_ref): - response = self._query_scaleio_volume(volume, existing_ref) - return int(math.ceil(float(response['sizeInKb']) / units.Mi)) - - def _execute_scaleio_get_request(self, request): - r = requests.get( - request, - auth=( - self.server_username, - self.server_token), - verify=self._get_verify_cert()) - r = self._check_response(r, request) - response = r.json() - return r, response - - def _create_scaleio_get_volume_request(self, volume, existing_ref): - """Throws an exception if the input is invalid for manage existing. - - if the input is valid - return a request. - """ - type_id = volume.get('volume_type_id') - if 'source-id' not in existing_ref: - reason = _("Reference must contain source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - if type_id is None: - reason = _("Volume must have a volume type") - raise exception.ManageExistingVolumeTypeMismatch( - existing_ref=existing_ref, - reason=reason - ) - vol_id = existing_ref['source-id'] - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port, - 'id': vol_id} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/Volume::%(id)s" % req_vars) - LOG.info("ScaleIO get volume by id request: %s.", request) - return request - - def _manage_existing_check_legal_response(self, response, existing_ref): - if response.status_code != http_client.OK: - reason = (_("Error managing volume: %s.") % response.json()[ - 'message']) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - - # check if it is already managed - if self._is_managed(response.json()['id']): - reason = _("manage_existing cannot manage a volume " - "that is already being managed.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - - if response.json()['mappedSdcInfo'] is not None: - reason = _("manage_existing cannot manage a volume " - "connected to hosts. Please disconnect this volume " - "from existing hosts before importing.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason - ) - - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the group object. - :returns: model_update - - ScaleIO won't create CG until cg-snapshot creation, - db will maintain the volumes and CG relationship. - """ - - # let generic volume group support handle non-cgsnapshots - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - LOG.info("Creating Group") - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the group object. - :param volumes: a list of volume objects in the group. - :returns: model_update, volumes_model_update - - ScaleIO will delete the volumes of the CG. - """ - - # let generic volume group support handle non-cgsnapshots - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - LOG.info("Deleting Group") - model_update = {'status': fields.GroupStatus.DELETED} - error_statuses = [fields.GroupStatus.ERROR, - fields.GroupStatus.ERROR_DELETING] - volumes_model_update = [] - for volume in volumes: - try: - self._delete_volume(volume['provider_id']) - update_item = {'id': volume['id'], - 'status': 'deleted'} - volumes_model_update.append(update_item) - except exception.VolumeBackendAPIException as err: - update_item = {'id': volume['id'], - 'status': 'error_deleting'} - volumes_model_update.append(update_item) - if model_update['status'] not in error_statuses: - model_update['status'] = 'error_deleting' - LOG.error("Failed to delete the volume %(vol)s of group. " - "Exception: %(exception)s.", - {'vol': volume['name'], 'exception': err}) - return model_update, volumes_model_update - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - - # let generic volume group support handle non-cgsnapshots - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - get_scaleio_snapshot_params = lambda snapshot: { - 'volumeId': snapshot.volume['provider_id'], - 'snapshotName': self._id_to_base64(snapshot['id'])} - snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots)) - r, response = self._snapshot_volume_group(snapshot_defs) - if r.status_code != http_client.OK and "errorCode" in response: - msg = (_("Failed creating snapshot for group: " - "%(response)s.") % - {'response': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - snapshot_model_update = [] - for snapshot, scaleio_id in zip(snapshots, response['volumeIdList']): - update_item = {'id': snapshot['id'], - 'status': fields.SnapshotStatus.AVAILABLE, - 'provider_id': scaleio_id} - snapshot_model_update.append(update_item) - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update, snapshot_model_update - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - - # let generic volume group support handle non-cgsnapshots - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - error_statuses = [fields.SnapshotStatus.ERROR, - fields.SnapshotStatus.ERROR_DELETING] - model_update = {'status': group_snapshot['status']} - snapshot_model_update = [] - for snapshot in snapshots: - try: - self._delete_volume(snapshot.provider_id) - update_item = {'id': snapshot['id'], - 'status': fields.SnapshotStatus.DELETED} - snapshot_model_update.append(update_item) - except exception.VolumeBackendAPIException as err: - update_item = {'id': snapshot['id'], - 'status': fields.SnapshotStatus.ERROR_DELETING} - snapshot_model_update.append(update_item) - if model_update['status'] not in error_statuses: - model_update['status'] = ( - fields.SnapshotStatus.ERROR_DELETING) - LOG.error("Failed to delete the snapshot %(snap)s " - "of snapshot: %(snapshot_id)s. " - "Exception: %(exception)s.", - {'snap': snapshot['name'], - 'exception': err, - 'snapshot_id': group_snapshot.id}) - model_update['status'] = fields.GroupSnapshotStatus.DELETED - return model_update, snapshot_model_update - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - - # let generic volume group support handle non-cgsnapshots - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - get_scaleio_snapshot_params = lambda src_volume, trg_volume: { - 'volumeId': src_volume['provider_id'], - 'snapshotName': self._id_to_base64(trg_volume['id'])} - if group_snapshot and snapshots: - snapshot_defs = map(get_scaleio_snapshot_params, - snapshots, - volumes) - else: - snapshot_defs = map(get_scaleio_snapshot_params, - source_vols, - volumes) - r, response = self._snapshot_volume_group(list(snapshot_defs)) - if r.status_code != http_client.OK and "errorCode" in response: - msg = (_("Failed creating snapshot for group: " - "%(response)s.") % - {'response': response['message']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - volumes_model_update = [] - for volume, scaleio_id in zip(volumes, response['volumeIdList']): - update_item = {'id': volume['id'], - 'status': 'available', - 'provider_id': scaleio_id} - volumes_model_update.append(update_item) - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update, volumes_model_update - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Update a group. - - :param context: the context of the caller. - :param group: the group object. - :param add_volumes: a list of volume objects to be added. - :param remove_volumes: a list of volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - - ScaleIO does not handle volume grouping. - Cinder maintains volumes and CG relationship. - """ - - if volume_utils.is_group_a_cg_snapshot_type(group): - return None, None, None - - # we'll rely on the generic group implementation if it is not a - # consistency group request. - raise NotImplementedError() - - def _snapshot_volume_group(self, snapshot_defs): - LOG.info("ScaleIO snapshot group of volumes") - params = {'snapshotDefs': snapshot_defs} - req_vars = {'server_ip': self.server_ip, - 'server_port': self.server_port} - request = ("https://%(server_ip)s:%(server_port)s" - "/api/instances/System/action/snapshotVolumes") % req_vars - return self._execute_scaleio_post_request(params, request) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass diff --git a/cinder/volume/drivers/dell_emc/unity/__init__.py b/cinder/volume/drivers/dell_emc/unity/__init__.py deleted file mode 100644 index bc5c9f3ef..000000000 --- a/cinder/volume/drivers/dell_emc/unity/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.volume.drivers.dell_emc.unity import driver - -Driver = driver.UnityDriver diff --git a/cinder/volume/drivers/dell_emc/unity/adapter.py b/cinder/volume/drivers/dell_emc/unity/adapter.py deleted file mode 100644 index e48f7a558..000000000 --- a/cinder/volume/drivers/dell_emc/unity/adapter.py +++ /dev/null @@ -1,760 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import copy -import functools -import os -import random - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils - -storops = importutils.try_import('storops') -if storops: - from storops import exception as storops_ex -else: - # Set storops_ex to be None for unit test - storops_ex = None - -from cinder import exception -from cinder.i18n import _ -from cinder import utils as cinder_utils -from cinder.volume.drivers.dell_emc.unity import client -from cinder.volume.drivers.dell_emc.unity import utils -from cinder.volume import utils as vol_utils - -LOG = logging.getLogger(__name__) - -PROTOCOL_FC = 'FC' -PROTOCOL_ISCSI = 'iSCSI' - - -class VolumeParams(object): - def __init__(self, adapter, volume): - self._adapter = adapter - self._volume = volume - - self._volume_id = volume.id - self._name = volume.name - self._size = volume.size - self._description = (volume.display_description - if volume.display_description - else volume.display_name) - self._pool = None - self._io_limit_policy = None - - @property - def volume_id(self): - return self._volume_id - - @property - def name(self): - return self._name - - @name.setter - def name(self, value): - self._name = value - - @property - def size(self): - return self._size - - @size.setter - def size(self, value): - self._size = value - - @property - def description(self): - return self._description - - @description.setter - def description(self, value): - self._description = value - - @property - def pool(self): - if self._pool is None: - self._pool = self._adapter._get_target_pool(self._volume) - return self._pool - - @pool.setter - def pool(self, value): - self._pool = value - - @property - def io_limit_policy(self): - if self._io_limit_policy is None: - qos_specs = utils.get_backend_qos_specs(self._volume) - self._io_limit_policy = self._adapter.client.get_io_limit_policy( - qos_specs) - return self._io_limit_policy - - @io_limit_policy.setter - def io_limit_policy(self, value): - self._io_limit_policy = value - - def __eq__(self, other): - return (self.volume_id == other.volume_id - and self.name == other.name - and self.size == other.size - and self.io_limit_policy == other.io_limit_policy) - - -class CommonAdapter(object): - protocol = 'unknown' - driver_name = 'UnityAbstractDriver' - driver_volume_type = 'unknown' - - def __init__(self, version=None): - self.version = version - self.driver = None - self.config = None - self.configured_pool_names = None - self.reserved_percentage = None - self.max_over_subscription_ratio = None - self.volume_backend_name = None - self.ip = None - self.username = None - self.password = None - self.array_cert_verify = None - self.array_ca_cert_path = None - - self._serial_number = None - self.storage_pools_map = None - self._client = None - self.allowed_ports = None - - def do_setup(self, driver, conf): - self.driver = driver - self.config = self.normalize_config(conf) - self.configured_pool_names = self.config.unity_storage_pool_names - self.reserved_percentage = self.config.reserved_percentage - self.max_over_subscription_ratio = ( - self.config.max_over_subscription_ratio) - self.volume_backend_name = ( - self.config.safe_get('volume_backend_name') or self.driver_name) - self.ip = self.config.san_ip - self.username = self.config.san_login - self.password = self.config.san_password - # Unity currently not support to upload certificate. - # Once it supports, enable the verify. - self.array_cert_verify = False - self.array_ca_cert_path = self.config.driver_ssl_cert_path - - sys_version = self.client.system.system_version - if utils.is_before_4_1(sys_version): - raise exception.VolumeBackendAPIException( - data=_('Unity driver does not support array OE version: %s. ' - 'Upgrade to 4.1 or later.') % sys_version) - - self.storage_pools_map = self.get_managed_pools() - - self.allowed_ports = self.validate_ports(self.config.unity_io_ports) - - group_name = (self.config.config_group if self.config.config_group - else 'DEFAULT') - folder_name = '%(group)s.%(sys_name)s' % { - 'group': group_name, 'sys_name': self.client.system.info.name} - persist_path = os.path.join(cfg.CONF.state_path, 'unity', folder_name) - storops.TCHelper.set_up(persist_path) - - def normalize_config(self, config): - config.unity_storage_pool_names = utils.remove_empty( - '%s.unity_storage_pool_names' % config.config_group, - config.unity_storage_pool_names) - - config.unity_io_ports = utils.remove_empty( - '%s.unity_io_ports' % config.config_group, - config.unity_io_ports) - - return config - - def get_all_ports(self): - raise NotImplementedError() - - def validate_ports(self, ports_whitelist): - all_ports = self.get_all_ports() - # After normalize_config, `ports_whitelist` could be only None or valid - # list in which the items are stripped. - if ports_whitelist is None: - return all_ports.id - - # For iSCSI port, the format is 'spa_eth0', and 'spa_iom_0_fc0' for FC. - # Unix style glob like 'spa_*' is supported. - whitelist = set(ports_whitelist) - - matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id, - whitelist) - if not matched: - LOG.error('No matched ports filtered by all patterns: %s', - whitelist) - raise exception.InvalidConfigurationValue( - option='%s.unity_io_ports' % self.config.config_group, - value=self.config.unity_io_ports) - - if unmatched_whitelist: - LOG.error('No matched ports filtered by below patterns: %s', - unmatched_whitelist) - raise exception.InvalidConfigurationValue( - option='%s.unity_io_ports' % self.config.config_group, - value=self.config.unity_io_ports) - - LOG.info('These ports %(matched)s will be used based on ' - 'the option unity_io_ports: %(config)s', - {'matched': matched, - 'config': self.config.unity_io_ports}) - return matched - - @property - def verify_cert(self): - verify_cert = self.array_cert_verify - if verify_cert and self.array_ca_cert_path is not None: - verify_cert = self.array_ca_cert_path - return verify_cert - - @property - def client(self): - if self._client is None: - self._client = client.UnityClient( - self.ip, - self.username, - self.password, - verify_cert=self.verify_cert) - return self._client - - @property - def serial_number(self): - if self._serial_number is None: - self._serial_number = self.client.get_serial() - return self._serial_number - - def get_managed_pools(self): - names = self.configured_pool_names - array_pools = self.client.get_pools() - valid_names = utils.validate_pool_names(names, array_pools.name) - return {p.name: p for p in array_pools if p.name in valid_names} - - def makeup_model(self, lun, is_snap_lun=False): - lun_type = 'snap_lun' if is_snap_lun else 'lun' - location = self._build_provider_location(lun_id=lun.get_id(), - lun_type=lun_type) - return { - 'provider_location': location, - 'provider_id': lun.get_id() - } - - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume information - """ - params = VolumeParams(self, volume) - - LOG.info('Create Volume: %(name)s, size: %(size)s, description: ' - '%(description)s, pool: %(pool)s, io limit policy: ' - '%(io_limit_policy)s.', params) - - return self.makeup_model( - self.client.create_lun(name=params.name, - size=params.size, - pool=params.pool, - description=params.description, - io_limit_policy=params.io_limit_policy)) - - def delete_volume(self, volume): - lun_id = self.get_lun_id(volume) - if lun_id is None: - LOG.info('Backend LUN not found, skipping the deletion. ' - 'Volume: %(volume_name)s.', - {'volume_name': volume.name}) - else: - self.client.delete_lun(lun_id) - - @cinder_utils.trace - def _initialize_connection(self, lun_or_snap, connector, vol_id): - host = self.client.create_host(connector['host'], - self.get_connector_uids(connector)) - hlu = self.client.attach(host, lun_or_snap) - data = self.get_connection_info(hlu, host, connector) - data['target_discovered'] = True - if vol_id is not None: - data['volume_id'] = vol_id - conn_info = { - 'driver_volume_type': self.driver_volume_type, - 'data': data, - } - LOG.debug('Initialized connection info: %s', conn_info) - return conn_info - - @cinder_utils.trace - def initialize_connection(self, volume, connector): - lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) - return self._initialize_connection(lun, connector, volume.id) - - @cinder_utils.trace - def _terminate_connection(self, lun_or_snap, connector): - host = self.client.get_host(connector['host']) - self.client.detach(host, lun_or_snap) - - @cinder_utils.trace - def terminate_connection(self, volume, connector): - lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) - return self._terminate_connection(lun, connector) - - def get_connector_uids(self, connector): - return None - - def get_connection_info(self, hlu, host, connector): - return {} - - def extend_volume(self, volume, new_size): - lun_id = self.get_lun_id(volume) - if lun_id is None: - msg = (_('Backend LUN not found for Volume: %(volume_name)s.') % - {'volume_name': volume.name}) - raise exception.VolumeBackendAPIException(data=msg) - else: - self.client.extend_lun(lun_id, new_size) - - def _get_target_pool(self, volume): - return self.storage_pools_map[utils.get_pool_name(volume)] - - def _build_provider_location(self, lun_id=None, lun_type=None): - return utils.build_provider_location( - system=self.serial_number, - lun_type=lun_type, - lun_id=lun_id, - version=self.version) - - def update_volume_stats(self): - return { - 'volume_backend_name': self.volume_backend_name, - 'storage_protocol': self.protocol, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'pools': self.get_pools_stats(), - } - - def get_pools_stats(self): - self.storage_pools_map = self.get_managed_pools() - return [self._get_pool_stats(pool) for pool in self.pools] - - @property - def pools(self): - return self.storage_pools_map.values() - - def _get_pool_stats(self, pool): - return { - 'pool_name': pool.name, - 'total_capacity_gb': utils.byte_to_gib(pool.size_total), - 'provisioned_capacity_gb': utils.byte_to_gib( - pool.size_subscribed), - 'free_capacity_gb': utils.byte_to_gib(pool.size_free), - 'reserved_percentage': self.reserved_percentage, - 'location_info': ('%(pool_name)s|%(array_serial)s' % - {'pool_name': pool.name, - 'array_serial': self.serial_number}), - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'max_over_subscription_ratio': ( - self.max_over_subscription_ratio)} - - def get_lun_id(self, volume): - """Retrieves id of the volume's backing LUN. - - :param volume: volume information - """ - if volume.provider_location: - return utils.extract_provider_location(volume.provider_location, - 'id') - else: - # In some cases, cinder will not update volume info in DB with - # provider_location returned by us. We need to retrieve the id - # from array. - lun = self.client.get_lun(name=volume.name) - return lun.get_id() if lun is not None else None - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: snapshot information. - """ - src_lun_id = self.get_lun_id(snapshot.volume) - snap = self.client.create_snap(src_lun_id, snapshot.name) - location = self._build_provider_location(lun_type='snapshot', - lun_id=snap.get_id()) - return {'provider_location': location, - 'provider_id': snap.get_id()} - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: the snapshot to delete. - """ - snap = self.client.get_snap(name=snapshot.name) - self.client.delete_snap(snap) - - def _get_referenced_lun(self, existing_ref): - if 'source-id' in existing_ref: - lun = self.client.get_lun(lun_id=existing_ref['source-id']) - elif 'source-name' in existing_ref: - lun = self.client.get_lun(name=existing_ref['source-name']) - else: - reason = _('Reference must contain source-id or source-name key.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - if lun is None or not lun.existed: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("LUN doesn't exist.")) - return lun - - def manage_existing(self, volume, existing_ref): - """Manages an existing LUN in the array. - - The LUN should be in a manageable pool backend, otherwise return error. - Rename the backend storage object so that it matches the - `volume['name']` which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - LUN ID or name are supported in `existing_ref`, like: - - .. code-block:: none - - existing_ref:{ - - 'source-id': - - } - - or - - .. code-block:: none - - existing_ref:{ - - 'source-name': - - } - """ - lun = self._get_referenced_lun(existing_ref) - lun.modify(name=volume.name) - return { - 'provider_location': - self._build_provider_location(lun_id=lun.get_id(), - lun_type='lun'), - 'provider_id': lun.get_id() - } - - def manage_existing_get_size(self, volume, existing_ref): - """Returns size of volume to be managed by `manage_existing`. - - The driver does some check here: - 1. The LUN `existing_ref` should be managed by the `volume.host`. - """ - lun = self._get_referenced_lun(existing_ref) - target_pool_name = utils.get_pool_name(volume) - lun_pool_name = lun.pool.name - if target_pool_name and lun_pool_name != target_pool_name: - reason = (_('The imported LUN is in pool %(pool_name)s ' - 'which is not managed by the host %(host)s.') % - {'pool_name': lun_pool_name, - 'host': volume.host}) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - - return utils.byte_to_gib(lun.size_total) - - def _disconnect_device(self, conn): - conn['connector'].disconnect_volume(conn['conn']['data'], - conn['device']) - - def _connect_device(self, conn): - return self.driver._connect_device(conn) - - @contextlib.contextmanager - def _connect_resource(self, lun_or_snap, connector, res_id): - """Connects to LUN or snapshot, and makes sure disconnect finally. - - :param lun_or_snap: the LUN or snapshot to connect/disconnect. - :param connector: the host connector information. - :param res_id: the ID of the LUN or snapshot. - - :return: the connection information, in a dict with format - like (same as the one returned by `_connect_device`): - { - 'conn': , - 'device': , - 'connector': - } - """ - init_conn_func = functools.partial(self._initialize_connection, - lun_or_snap, connector, res_id) - term_conn_func = functools.partial(self._terminate_connection, - lun_or_snap, connector) - with utils.assure_cleanup(init_conn_func, term_conn_func, - False) as conn_info: - conn_device_func = functools.partial(self._connect_device, - conn_info) - with utils.assure_cleanup(conn_device_func, - self._disconnect_device, - True) as attach_info: - yield attach_info - - def _dd_copy(self, vol_params, src_snap, src_lun=None): - """Creates a volume via copying a Unity snapshot. - - It attaches the `volume` and `snap`, then use `dd` to copy the - data from the Unity snapshot to the `volume`. - """ - dest_lun = self.client.create_lun( - name=vol_params.name, size=vol_params.size, pool=vol_params.pool, - description=vol_params.description, - io_limit_policy=vol_params.io_limit_policy) - src_id = src_snap.get_id() - try: - conn_props = cinder_utils.brick_get_connector_properties() - - with self._connect_resource(dest_lun, conn_props, - vol_params.volume_id) as dest_info, \ - self._connect_resource(src_snap, conn_props, - src_id) as src_info: - if src_lun is None: - # If size is not specified, need to get the size from LUN - # of snapshot. - lun = self.client.get_lun( - lun_id=src_snap.storage_resource.get_id()) - size_in_m = utils.byte_to_mib(lun.size_total) - else: - size_in_m = utils.byte_to_mib(src_lun.size_total) - vol_utils.copy_volume( - src_info['device']['path'], - dest_info['device']['path'], - size_in_m, - self.driver.configuration.volume_dd_blocksize, - sparse=True) - except Exception: - with excutils.save_and_reraise_exception(): - utils.ignore_exception(self.client.delete_lun, - dest_lun.get_id()) - LOG.error('Failed to create cloned volume: %(vol_id)s, ' - 'from source unity snapshot: %(snap_name)s.', - {'vol_id': vol_params.volume_id, - 'snap_name': src_snap.name}) - - return dest_lun - - def _thin_clone(self, vol_params, src_snap, src_lun=None): - tc_src = src_snap if src_lun is None else src_lun - try: - LOG.debug('Try to thin clone from %s.', tc_src.name) - lun = self.client.thin_clone( - tc_src, vol_params.name, - description=vol_params.description, - io_limit_policy=vol_params.io_limit_policy, - new_size_gb=vol_params.size) - except storops_ex.UnityThinCloneLimitExceededError: - LOG.info('Number of thin clones of base LUN exceeds system ' - 'limit, dd-copy a new one and thin clone from it.') - # Copy via dd if thin clone meets the system limit - hidden = copy.copy(vol_params) - hidden.name = 'hidden-%s' % vol_params.name - hidden.description = 'hidden-%s' % vol_params.description - copied_lun = self._dd_copy(hidden, src_snap, src_lun=src_lun) - LOG.debug('Notify storops the dd action of lun: %(src_name)s. And ' - 'the newly copied lun is: %(copied)s.', - {'src_name': tc_src.name, 'copied': copied_lun.name}) - storops.TCHelper.notify(tc_src, - storops.ThinCloneActionEnum.DD_COPY, - copied_lun) - lun = self.client.thin_clone( - copied_lun, vol_params.name, - description=vol_params.description, - io_limit_policy=vol_params.io_limit_policy, - new_size_gb=vol_params.size) - except storops_ex.SystemAPINotSupported: - # Thin clone not support on array version before Merlin - lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun) - LOG.debug( - 'Volume copied via dd because array OE is too old to support ' - 'thin clone api. source snap: %(src_snap)s, lun: %(src_lun)s.', - {'src_snap': src_snap.name, - 'src_lun': 'Unknown' if src_lun is None else src_lun.name}) - return lun - - def create_volume_from_snapshot(self, volume, snapshot): - snap = self.client.get_snap(snapshot.name) - return self.makeup_model( - self._thin_clone(VolumeParams(self, volume), snap), - is_snap_lun=True) - - def create_cloned_volume(self, volume, src_vref): - """Creates cloned volume. - - 1. Take an internal snapshot of source volume, and attach it. - 2. Thin clone from the snapshot to a new volume. - Note: there are several cases the thin clone will downgrade to `dd`, - 2.1 Source volume is attached (in-use). - 2.2 Array OE version doesn't support thin clone. - 2.3 The current LUN family reaches the thin clone limits. - 3. Delete the internal snapshot created in step 1. - """ - - src_lun_id = self.get_lun_id(src_vref) - if src_lun_id is None: - raise exception.VolumeBackendAPIException( - data=_( - "LUN ID of source volume: %s not found.") % src_vref.name) - src_lun = self.client.get_lun(lun_id=src_lun_id) - src_snap_name = 'snap_clone_%s' % volume.id - - create_snap_func = functools.partial(self.client.create_snap, - src_lun_id, src_snap_name) - vol_params = VolumeParams(self, volume) - with utils.assure_cleanup(create_snap_func, - self.client.delete_snap, - True) as src_snap: - LOG.debug('Internal snapshot for clone is created, ' - 'name: %(name)s, id: %(id)s.', - {'name': src_snap_name, - 'id': src_snap.get_id()}) - if src_vref.volume_attachment: - lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun) - LOG.debug('Volume copied using dd because source volume: ' - '%(name)s is attached: %(attach)s.', - {'name': src_vref.name, - 'attach': src_vref.volume_attachment}) - return self.makeup_model(lun) - else: - lun = self._thin_clone(vol_params, src_snap, src_lun=src_lun) - return self.makeup_model(lun, is_snap_lun=True) - - def get_pool_name(self, volume): - return self.client.get_pool_name(volume.name) - - @cinder_utils.trace - def initialize_connection_snapshot(self, snapshot, connector): - snap = self.client.get_snap(snapshot.name) - return self._initialize_connection(snap, connector, snapshot.id) - - @cinder_utils.trace - def terminate_connection_snapshot(self, snapshot, connector): - snap = self.client.get_snap(snapshot.name) - return self._terminate_connection(snap, connector) - - -class ISCSIAdapter(CommonAdapter): - protocol = PROTOCOL_ISCSI - driver_name = 'UnityISCSIDriver' - driver_volume_type = 'iscsi' - - def get_all_ports(self): - return self.client.get_ethernet_ports() - - def get_connector_uids(self, connector): - return utils.extract_iscsi_uids(connector) - - def get_connection_info(self, hlu, host, connector): - targets = self.client.get_iscsi_target_info(self.allowed_ports) - if not targets: - msg = _("There is no accessible iSCSI targets on the system.") - raise exception.VolumeBackendAPIException(data=msg) - one_target = random.choice(targets) - portals = [a['portal'] for a in targets] - iqns = [a['iqn'] for a in targets] - data = { - 'target_luns': [hlu] * len(portals), - 'target_iqns': iqns, - 'target_portals': portals, - 'target_lun': hlu, - 'target_portal': one_target['portal'], - 'target_iqn': one_target['iqn'], - } - return data - - -class FCAdapter(CommonAdapter): - protocol = PROTOCOL_FC - driver_name = 'UnityFCDriver' - driver_volume_type = 'fibre_channel' - - def __init__(self, version=None): - super(FCAdapter, self).__init__(version=version) - self.lookup_service = None - - def do_setup(self, driver, config): - super(FCAdapter, self).do_setup(driver, config) - self.lookup_service = utils.create_lookup_service() - - def get_all_ports(self): - return self.client.get_fc_ports() - - def get_connector_uids(self, connector): - return utils.extract_fc_uids(connector) - - @property - def auto_zone_enabled(self): - return self.lookup_service is not None - - def get_connection_info(self, hlu, host, connector): - targets = self.client.get_fc_target_info( - host, logged_in_only=(not self.auto_zone_enabled), - allowed_ports=self.allowed_ports) - - if not targets: - msg = _("There is no accessible fibre channel targets on the " - "system.") - raise exception.VolumeBackendAPIException(data=msg) - - if self.auto_zone_enabled: - data = self._get_fc_zone_info(connector['wwpns'], targets) - else: - data = { - 'target_wwn': targets, - } - data['target_lun'] = hlu - return data - - @cinder_utils.trace - def _terminate_connection(self, lun_or_snap, connector): - # For FC, terminate_connection needs to return data to zone manager - # which would clean the zone based on the data. - super(FCAdapter, self)._terminate_connection(lun_or_snap, connector) - - ret = None - if self.auto_zone_enabled: - ret = { - 'driver_volume_type': self.driver_volume_type, - 'data': {} - } - host = self.client.get_host(connector['host']) - if len(host.host_luns) == 0: - targets = self.client.get_fc_target_info( - logged_in_only=True, allowed_ports=self.allowed_ports) - ret['data'] = self._get_fc_zone_info(connector['wwpns'], - targets) - return ret - - def _get_fc_zone_info(self, initiator_wwns, target_wwns): - mapping = self.lookup_service.get_device_mapping_from_network( - initiator_wwns, target_wwns) - targets, itor_tgt_map = utils.convert_to_itor_tgt_map(mapping) - return { - 'target_wwn': targets, - 'initiator_target_map': itor_tgt_map, - } diff --git a/cinder/volume/drivers/dell_emc/unity/client.py b/cinder/volume/drivers/dell_emc/unity/client.py deleted file mode 100644 index c4ce0ff92..000000000 --- a/cinder/volume/drivers/dell_emc/unity/client.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -from oslo_utils import excutils -from oslo_utils import importutils - -storops = importutils.try_import('storops') -if storops: - from storops import exception as storops_ex -else: - # Set storops_ex to be None for unit test - storops_ex = None - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.unity import utils - - -LOG = log.getLogger(__name__) - - -class UnityClient(object): - def __init__(self, host, username, password, verify_cert=True): - if storops is None: - msg = _('Python package storops is not installed which ' - 'is required to run Unity driver.') - raise exception.VolumeBackendAPIException(data=msg) - self._system = None - self.host = host - self.username = username - self.password = password - self.verify_cert = verify_cert - - @property - def system(self): - if self._system is None: - self._system = storops.UnitySystem( - host=self.host, username=self.username, password=self.password, - verify=self.verify_cert) - return self._system - - def get_serial(self): - return self.system.serial_number - - def create_lun(self, name, size, pool, description=None, - io_limit_policy=None): - """Creates LUN on the Unity system. - - :param name: lun name - :param size: lun size in GiB - :param pool: UnityPool object represent to pool to place the lun - :param description: lun description - :param io_limit_policy: io limit on the LUN - :return: UnityLun object - """ - try: - lun = pool.create_lun(lun_name=name, size_gb=size, - description=description, - io_limit_policy=io_limit_policy) - except storops_ex.UnityLunNameInUseError: - LOG.debug("LUN %s already exists. Return the existing one.", - name) - lun = self.system.get_lun(name=name) - return lun - - def thin_clone(self, lun_or_snap, name, io_limit_policy=None, - description=None, new_size_gb=None): - try: - lun = lun_or_snap.thin_clone( - name=name, io_limit_policy=io_limit_policy, - description=description) - except storops_ex.UnityLunNameInUseError: - LOG.debug("LUN(thin clone) %s already exists. " - "Return the existing one.", name) - lun = self.system.get_lun(name=name) - if new_size_gb is not None and new_size_gb > lun.total_size_gb: - lun = self.extend_lun(lun.get_id(), new_size_gb) - return lun - - def delete_lun(self, lun_id): - """Deletes LUN on the Unity system. - - :param lun_id: id of the LUN - """ - try: - lun = self.system.get_lun(_id=lun_id) - lun.delete() - except storops_ex.UnityResourceNotFoundError: - LOG.debug("LUN %s doesn't exist. Deletion is not needed.", - lun_id) - - def get_lun(self, lun_id=None, name=None): - """Gets LUN on the Unity system. - - :param lun_id: id of the LUN - :param name: name of the LUN - :return: `UnityLun` object - """ - lun = None - if lun_id is None and name is None: - LOG.warning( - "Both lun_id and name are None to get LUN. Return None.") - else: - try: - lun = self.system.get_lun(_id=lun_id, name=name) - except storops_ex.UnityResourceNotFoundError: - LOG.warning( - "LUN id=%(id)s, name=%(name)s doesn't exist.", - {'id': lun_id, 'name': name}) - return lun - - def extend_lun(self, lun_id, size_gib): - lun = self.system.get_lun(lun_id) - try: - lun.total_size_gb = size_gib - except storops_ex.UnityNothingToModifyError: - LOG.debug("LUN %s is already expanded. LUN expand is not needed.", - lun_id) - return lun - - def get_pools(self): - """Gets all storage pools on the Unity system. - - :return: list of UnityPool object - """ - return self.system.get_pool() - - def create_snap(self, src_lun_id, name=None): - """Creates a snapshot of LUN on the Unity system. - - :param src_lun_id: the source LUN ID of the snapshot. - :param name: the name of the snapshot. The Unity system will give one - if `name` is None. - """ - try: - lun = self.get_lun(lun_id=src_lun_id) - snap = lun.create_snap(name, is_auto_delete=False) - except storops_ex.UnitySnapNameInUseError as err: - LOG.debug( - "Snap %(snap_name)s already exists on LUN %(lun_id)s. " - "Return the existing one. Message: %(err)s", - {'snap_name': name, - 'lun_id': src_lun_id, - 'err': err}) - snap = self.get_snap(name=name) - return snap - - @staticmethod - def delete_snap(snap): - if snap is None: - LOG.debug("Snap to delete is None, skipping deletion.") - return - - try: - snap.delete() - except storops_ex.UnityResourceNotFoundError as err: - LOG.debug("Snap %(snap_name)s may be deleted already. " - "Message: %(err)s", - {'snap_name': snap.name, - 'err': err}) - except storops_ex.UnityDeleteAttachedSnapError as err: - with excutils.save_and_reraise_exception(): - LOG.warning("Failed to delete snapshot %(snap_name)s " - "which is in use. Message: %(err)s", - {'snap_name': snap.name, 'err': err}) - - def get_snap(self, name=None): - try: - return self.system.get_snap(name=name) - except storops_ex.UnityResourceNotFoundError as err: - LOG.warning("Snapshot %(name)s doesn't exist. Message: %(err)s", - {'name': name, 'err': err}) - return None - - def create_host(self, name, uids): - """Creates a host on Unity. - - Creates a host on Unity which has the uids associated. - - :param name: name of the host - :param uids: iqns or wwns list - :return: UnitHost object - """ - - try: - host = self.system.get_host(name=name) - except storops_ex.UnityResourceNotFoundError: - LOG.debug('Existing host %s not found. Create a new one.', name) - host = self.system.create_host(name=name) - - host_initiators_ids = self.get_host_initiator_ids(host) - un_registered = [h for h in uids if h not in host_initiators_ids] - for uid in un_registered: - host.add_initiator(uid, force_create=True) - - host.update() - return host - - @staticmethod - def get_host_initiator_ids(host): - fc = host.fc_host_initiators - fc_ids = [] if fc is None else fc.initiator_id - iscsi = host.iscsi_host_initiators - iscsi_ids = [] if iscsi is None else iscsi.initiator_id - return fc_ids + iscsi_ids - - @staticmethod - def attach(host, lun_or_snap): - """Attaches a `UnityLun` or `UnitySnap` to a `UnityHost`. - - :param host: `UnityHost` object - :param lun_or_snap: `UnityLun` or `UnitySnap` object - :return: hlu - """ - try: - return host.attach(lun_or_snap, skip_hlu_0=True) - except storops_ex.UnityResourceAlreadyAttachedError: - return host.get_hlu(lun_or_snap) - - @staticmethod - def detach(host, lun_or_snap): - """Detaches a `UnityLun` or `UnitySnap` from a `UnityHost`. - - :param host: `UnityHost` object - :param lun_or_snap: `UnityLun` object - """ - lun_or_snap.update() - host.detach(lun_or_snap) - - def get_host(self, name): - return self.system.get_host(name=name) - - def get_ethernet_ports(self): - return self.system.get_ethernet_port() - - def get_iscsi_target_info(self, allowed_ports=None): - portals = self.system.get_iscsi_portal() - portals = portals.shadow_copy(port_ids=allowed_ports) - return [{'portal': utils.convert_ip_to_portal(p.ip_address), - 'iqn': p.iscsi_node.name} - for p in portals] - - def get_fc_ports(self): - return self.system.get_fc_port() - - def get_fc_target_info(self, host=None, logged_in_only=False, - allowed_ports=None): - """Get the ports WWN of FC on array. - - :param host: the host to which the FC port is registered. - :param logged_in_only: whether to retrieve only the logged-in port. - - :return: the WWN of FC ports. For example, the FC WWN on array is like: - 50:06:01:60:89:20:09:25:50:06:01:6C:09:20:09:25. - This function removes the colons and returns the last 16 bits: - 5006016C09200925. - """ - wwns = set() - if logged_in_only: - for paths in filter(None, host.fc_host_initiators.paths): - paths = paths.shadow_copy(is_logged_in=True) - # `paths.fc_port` is just a list, not a UnityFcPortList, - # so use filter instead of shadow_copy here. - wwns.update(p.wwn.upper() - for p in filter( - lambda fcp: (allowed_ports is None or - fcp.get_id() in allowed_ports), - paths.fc_port)) - else: - ports = self.get_fc_ports() - ports = ports.shadow_copy(port_ids=allowed_ports) - wwns.update(p.wwn.upper() for p in ports) - return [wwn.replace(':', '')[16:] for wwn in wwns] - - def create_io_limit_policy(self, name, max_iops=None, max_kbps=None): - try: - limit = self.system.create_io_limit_policy( - name, max_iops=max_iops, max_kbps=max_kbps) - except storops_ex.UnityPolicyNameInUseError: - limit = self.system.get_io_limit_policy(name=name) - return limit - - def get_io_limit_policy(self, qos_specs): - limit_policy = None - if qos_specs is not None: - limit_policy = self.create_io_limit_policy( - qos_specs['id'], - qos_specs.get(utils.QOS_MAX_IOPS), - qos_specs.get(utils.QOS_MAX_BWS)) - return limit_policy - - def get_pool_name(self, lun_name): - lun = self.system.get_lun(name=lun_name) - return lun.pool_name diff --git a/cinder/volume/drivers/dell_emc/unity/driver.py b/cinder/volume/drivers/dell_emc/unity/driver.py deleted file mode 100644 index 7c3a61ff0..000000000 --- a/cinder/volume/drivers/dell_emc/unity/driver.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cinder Driver for Unity""" - -from oslo_config import cfg -from oslo_log import log as logging - -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.unity import adapter -from cinder.volume.drivers.san.san import san_opts -from cinder.zonemanager import utils as zm_utils - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -UNITY_OPTS = [ - cfg.ListOpt('unity_storage_pool_names', - default=None, - help='A comma-separated list of storage pool names to be ' - 'used.'), - cfg.ListOpt('unity_io_ports', - default=None, - help='A comma-separated list of iSCSI or FC ports to be used. ' - 'Each port can be Unix-style glob expressions.')] - -CONF.register_opts(UNITY_OPTS, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class UnityDriver(driver.ManageableVD, - driver.ManageableSnapshotsVD, - driver.BaseVD): - """Unity Driver. - - Version history: - 1.0.0 - Initial version - 2.0.0 - Add thin clone support - """ - - VERSION = '02.00.00' - VENDOR = 'Dell EMC' - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_UNITY_CI" - - def __init__(self, *args, **kwargs): - super(UnityDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(UNITY_OPTS) - self.configuration.append_config_values(san_opts) - protocol = self.configuration.storage_protocol - if protocol.lower() == adapter.PROTOCOL_FC.lower(): - self.protocol = adapter.PROTOCOL_FC - self.adapter = adapter.FCAdapter(self.VERSION) - else: - self.protocol = adapter.PROTOCOL_ISCSI - self.adapter = adapter.ISCSIAdapter(self.VERSION) - - def do_setup(self, context): - self.adapter.do_setup(self, self.configuration) - - def check_for_setup_error(self): - pass - - def create_volume(self, volume): - """Creates a volume.""" - return self.adapter.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - return self.adapter.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume.""" - return self.adapter.create_cloned_volume(volume, src_vref) - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - self.adapter.extend_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.adapter.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.adapter.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.adapter.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - @zm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - The initiator_target_map is a map that represents the remote wwn(s) - and a list of wwns which are visible to the remote wwn(s). - Example return values: - FC: - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - 'initiator_target_map': { - '1122334455667788': ['1234567890123', - '0987654321321'] - } - } - } - iSCSI: - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', - 'iqn.2010-10.org.openstack:volume-00002'], - 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], - 'target_luns': [1, 1], - } - } - """ - return self.adapter.initialize_connection(volume, connector) - - @zm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - return self.adapter.terminate_connection(volume, connector) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: True to get updated data - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from volume group.""" - LOG.debug("Updating volume stats.") - stats = self.adapter.update_volume_stats() - stats['driver_version'] = self.VERSION - stats['vendor_name'] = self.VENDOR - self._stats = stats - - def manage_existing(self, volume, existing_ref): - """Manages an existing LUN in the array. - - :param volume: the mapping cinder volume of the Unity LUN. - :param existing_ref: the Unity LUN info. - """ - return self.adapter.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - """Returns size of volume to be managed by manage_existing.""" - return self.adapter.manage_existing_get_size(volume, existing_ref) - - def get_pool(self, volume): - """Returns the pool name of a volume.""" - return self.adapter.get_pool_name(volume) - - def unmanage(self, volume): - """Unmanages a volume.""" - pass - - def backup_use_temp_snapshot(self): - return True - - def create_export_snapshot(self, context, snapshot, connector): - """Creates the mount point of the snapshot for backup. - - Not necessary to create on Unity. - """ - pass - - def remove_export_snapshot(self, context, snapshot): - """Deletes the mount point the snapshot for backup. - - Not necessary to create on Unity. - """ - pass - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - return self.adapter.initialize_connection_snapshot(snapshot, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - return self.adapter.terminate_connection_snapshot(snapshot, connector) diff --git a/cinder/volume/drivers/dell_emc/unity/utils.py b/cinder/volume/drivers/dell_emc/unity/utils.py deleted file mode 100644 index 90a8bebf2..000000000 --- a/cinder/volume/drivers/dell_emc/unity/utils.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright (c) 2016 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import division - -import contextlib -from distutils import version -import functools -from oslo_log import log as logging -from oslo_utils import fnmatch -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types -from cinder.zonemanager import utils as zm_utils - -LOG = logging.getLogger(__name__) -BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) -QOS_MAX_IOPS = 'maxIOPS' -QOS_MAX_BWS = 'maxBWS' - - -def dump_provider_location(location_dict): - sorted_keys = sorted(location_dict.keys()) - return '|'.join('%(k)s^%(v)s' % {'k': k, 'v': location_dict[k]} - for k in sorted_keys) - - -def build_provider_location(system, lun_type, lun_id, version): - """Builds provider_location for volume or snapshot. - - :param system: Unity serial number - :param lun_id: LUN ID in Unity - :param lun_type: 'lun' - :param version: driver version - """ - location_dict = {'system': system, - 'type': lun_type, - 'id': six.text_type(lun_id), - 'version': version} - return dump_provider_location(location_dict) - - -def extract_provider_location(provider_location, key): - """Extracts value of the specified field from provider_location string. - - :param provider_location: provider_location string - :param key: field name of the value that to be extracted - :return: value of the specified field if it exists, otherwise, - None is returned - """ - if provider_location: - for kvp in provider_location.split('|'): - fields = kvp.split('^') - if len(fields) == 2 and fields[0] == key: - return fields[1] - else: - LOG.warning('"%(key)s" is not found in provider ' - 'location "%(location)s."', - {'key': key, 'location': provider_location}) - else: - LOG.warning('Empty provider location received.') - - -def byte_to_gib(byte): - return byte / units.Gi - - -def byte_to_mib(byte): - return byte / units.Mi - - -def gib_to_mib(gib): - return gib * units.Ki - - -def validate_pool_names(conf_pools, array_pools): - if not conf_pools: - LOG.debug('No storage pools are specified. This host will manage ' - 'all the pools on the Unity system.') - return array_pools - - conf_pools = set(map(lambda i: i.strip(), conf_pools)) - array_pools = set(map(lambda i: i.strip(), array_pools)) - existed = conf_pools & array_pools - - if not existed: - msg = (_('No storage pools to be managed exist. Please check ' - 'your configuration. The available storage pools on the ' - 'system are %s.') % array_pools) - raise exception.VolumeBackendAPIException(data=msg) - - return existed - - -def extract_iscsi_uids(connector): - if 'initiator' not in connector: - msg = _("Host %s doesn't have iSCSI initiator.") % connector['host'] - raise exception.VolumeBackendAPIException(data=msg) - - return [connector['initiator']] - - -def extract_fc_uids(connector): - if 'wwnns' not in connector or 'wwpns' not in connector: - msg = _("Host %s doesn't have FC initiators.") % connector['host'] - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - wwnns = connector['wwnns'] - wwpns = connector['wwpns'] - wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)] - - def _to_wwn(wwn): - # Format the wwn to include the colon - # For example, convert 1122200000051E55E100 to - # 11:22:20:00:00:05:1E:55:A1:00 - return ':'.join(wwn[i:i + 2] for i in range(0, len(wwn), 2)) - - return list(map(_to_wwn, wwns)) - - -def convert_ip_to_portal(ip): - return '%s:3260' % ip - - -def convert_to_itor_tgt_map(zone_mapping): - """Function to process data from lookup service. - - :param zone_mapping: mapping is the data from the zone lookup service - with below format - - { - : { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'..) - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121'..) - - } - - } - """ - target_wwns = [] - itor_tgt_map = {} - for san_name in zone_mapping: - one_map = zone_mapping[san_name] - for target in one_map['target_port_wwn_list']: - if target not in target_wwns: - target_wwns.append(target) - for initiator in one_map['initiator_port_wwn_list']: - itor_tgt_map[initiator] = one_map['target_port_wwn_list'] - LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s", - {'tgt_wwns': target_wwns, - 'itor_tgt_map': itor_tgt_map}) - return target_wwns, itor_tgt_map - - -def get_pool_name(volume): - return vol_utils.extract_host(volume.host, 'pool') - - -def get_extra_spec(volume, spec_key): - spec_value = None - type_id = volume.volume_type_id - if type_id is not None: - extra_specs = volume_types.get_volume_type_extra_specs(type_id) - if spec_key in extra_specs: - spec_value = extra_specs[spec_key] - return spec_value - - -def ignore_exception(func, *args, **kwargs): - try: - func(*args, **kwargs) - except Exception as ex: - LOG.warning('Error occurred but ignored. Function: %(func_name)s, ' - 'args: %(args)s, kwargs: %(kwargs)s, ' - 'exception: %(ex)s.', - {'func_name': func, 'args': args, - 'kwargs': kwargs, 'ex': ex}) - - -@contextlib.contextmanager -def assure_cleanup(enter_func, exit_func, use_enter_return): - """Assures the resource is cleaned up. Used as a context. - - :param enter_func: the function to execute when entering the context. - :param exit_func: the function to execute when leaving the context. - :param use_enter_return: the flag indicates whether to pass the return - value of enter_func in to the exit_func. - """ - - enter_return = None - try: - if isinstance(enter_func, functools.partial): - enter_func_name = enter_func.func.__name__ - else: - enter_func_name = enter_func.__name__ - LOG.debug(('Entering context. Function: %(func_name)s, ' - 'use_enter_return: %(use)s.'), - {'func_name': enter_func_name, - 'use': use_enter_return}) - enter_return = enter_func() - yield enter_return - finally: - if isinstance(exit_func, functools.partial): - exit_func_name = exit_func.func.__name__ - else: - exit_func_name = exit_func.__name__ - LOG.debug(('Exiting context. Function: %(func_name)s, ' - 'use_enter_return: %(use)s.'), - {'func_name': exit_func_name, - 'use': use_enter_return}) - if enter_return is not None: - if use_enter_return: - ignore_exception(exit_func, enter_return) - else: - ignore_exception(exit_func) - - -def create_lookup_service(): - return zm_utils.create_lookup_service() - - -def get_backend_qos_specs(volume): - type_id = volume.volume_type_id - if type_id is None: - return None - - qos_specs = volume_types.get_volume_type_qos_specs(type_id) - if qos_specs is None: - return None - - qos_specs = qos_specs['qos_specs'] - if qos_specs is None: - return None - - consumer = qos_specs['consumer'] - # Front end QoS specs are handled by nova. We ignore them here. - if consumer not in BACKEND_QOS_CONSUMERS: - return None - - max_iops = qos_specs['specs'].get(QOS_MAX_IOPS) - max_bws = qos_specs['specs'].get(QOS_MAX_BWS) - if max_iops is None and max_bws is None: - return None - - return { - 'id': qos_specs['id'], - QOS_MAX_IOPS: max_iops, - QOS_MAX_BWS: max_bws, - } - - -def remove_empty(option, value_list): - if value_list is not None: - value_list = list(filter(None, map(str.strip, value_list))) - if not value_list: - raise exception.InvalidConfigurationValue(option=option, - value=value_list) - return value_list - - -def match_any(full, patterns): - matched = list( - filter(lambda x: any(fnmatch.fnmatchcase(x, p) for p in patterns), - full)) - unmatched = list( - filter(lambda x: not any(fnmatch.fnmatchcase(x, p) for p in patterns), - full)) - unmatched_patterns = list( - filter(lambda p: not any(fnmatch.fnmatchcase(x, p) for x in full), - patterns)) - return matched, unmatched, unmatched_patterns - - -def is_before_4_1(ver): - return version.LooseVersion(ver) < version.LooseVersion('4.1') diff --git a/cinder/volume/drivers/dell_emc/vmax/__init__.py b/cinder/volume/drivers/dell_emc/vmax/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py deleted file mode 100644 index e61ac9a5d..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ /dev/null @@ -1,3181 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -from copy import deepcopy -import os.path -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import strutils -import six -import uuid - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume.drivers.dell_emc.vmax import masking -from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import rest -from cinder.volume.drivers.dell_emc.vmax import utils -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_dell_emc_config.xml' -CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_dell_emc_config_' -CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' -BACKENDNAME = 'volume_backend_name' -PREFIXBACKENDNAME = 'capabilities:volume_backend_name' - -# Replication -REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED -REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED -REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER -FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR -REPLICATION_ERROR = fields.ReplicationStatus.ERROR - - -vmax_opts = [ - cfg.StrOpt('cinder_dell_emc_config_file', - default=CINDER_EMC_CONFIG_FILE, - help='Use this file for cinder emc plugin ' - 'config data.'), - cfg.StrOpt('interval', - default=3, - help='Use this value to specify ' - 'length of the interval in seconds.'), - cfg.StrOpt('retries', - default=200, - help='Use this value to specify ' - 'number of retries.'), - cfg.BoolOpt('initiator_check', - default=False, - help='Use this value to enable ' - 'the initiator_check.')] - -CONF.register_opts(vmax_opts, group=configuration.SHARED_CONF_GROUP) - - -class VMAXCommon(object): - """Common class for Rest based VMAX volume drivers. - - This common class is for Dell EMC VMAX volume drivers - based on UniSphere Rest API. - It supports VMAX 3 and VMAX All Flash arrays. - - """ - VERSION = "3.0.0" - - stats = {'driver_version': '3.0', - 'free_capacity_gb': 0, - 'reserved_percentage': 0, - 'storage_protocol': None, - 'total_capacity_gb': 0, - 'vendor_name': 'Dell EMC', - 'volume_backend_name': None, - 'replication_enabled': False, - 'replication_targets': None} - - pool_info = {'backend_name': None, - 'config_file': None, - 'arrays_info': {}, - 'max_over_subscription_ratio': None, - 'reserved_percentage': 0, - 'replication_enabled': False} - - def __init__(self, prtcl, version, configuration=None, - active_backend_id=None): - - self.protocol = prtcl - self.configuration = configuration - self.configuration.append_config_values(vmax_opts) - self.rest = rest.VMAXRest() - self.utils = utils.VMAXUtils() - self.masking = masking.VMAXMasking(prtcl, self.rest) - self.provision = provision.VMAXProvision(self.rest) - self.version = version - # replication - self.replication_enabled = False - self.extend_replicated_vol = False - self.rep_devices = None - self.active_backend_id = active_backend_id - self.failover = False - self._get_replication_info() - self._gather_info() - - def _gather_info(self): - """Gather the relevant information for update_volume_stats.""" - self._get_attributes_from_config() - array_info = self.utils.parse_file_to_get_array_map( - self.pool_info['config_file']) - self.rest.set_rest_credentials(array_info) - finalarrayinfolist = self._get_slo_workload_combinations( - array_info) - self.pool_info['arrays_info'] = finalarrayinfolist - - def _get_attributes_from_config(self): - """Get relevent details from configuration file.""" - if hasattr(self.configuration, 'cinder_dell_emc_config_file'): - self.pool_info['config_file'] = ( - self.configuration.cinder_dell_emc_config_file) - else: - self.pool_info['config_file'] = ( - self.configuration.safe_get('cinder_dell_emc_config_file')) - self.interval = self.configuration.safe_get('interval') - self.retries = self.configuration.safe_get('retries') - self.pool_info['backend_name'] = ( - self.configuration.safe_get('volume_backend_name')) - self.pool_info['max_over_subscription_ratio'] = ( - self.configuration.safe_get('max_over_subscription_ratio')) - self.pool_info['reserved_percentage'] = ( - self.configuration.safe_get('reserved_percentage')) - LOG.debug( - "Updating volume stats on file %(emcConfigFileName)s on " - "backend %(backendName)s.", - {'emcConfigFileName': self.pool_info['config_file'], - 'backendName': self.pool_info['backend_name']}) - - def _get_initiator_check_flag(self): - """Reads the configuration for initator_check flag. - - :returns: flag - """ - conf_string = (self.configuration.safe_get('initiator_check')) - ret_val = False - string_true = "True" - if conf_string: - if conf_string.lower() == string_true.lower(): - ret_val = True - return ret_val - - def _get_replication_info(self): - """Gather replication information, if provided.""" - self.rep_config = None - self.replication_targets = None - if hasattr(self.configuration, 'replication_device'): - self.rep_devices = self.configuration.safe_get( - 'replication_device') - if self.rep_devices and len(self.rep_devices) == 1: - self.rep_config = self.utils.get_replication_config( - self.rep_devices) - if self.rep_config: - self.replication_targets = [self.rep_config['array']] - if self.active_backend_id == self.rep_config['array']: - self.failover = True - self.extend_replicated_vol = self.rep_config['allow_extend'] - # use self.replication_enabled for update_volume_stats - self.replication_enabled = True - LOG.debug("The replication configuration is %(rep_config)s.", - {'rep_config': self.rep_config}) - elif self.rep_devices and len(self.rep_devices) > 1: - LOG.error("More than one replication target is configured. " - "Dell EMC VMAX only suppports a single replication " - "target. Replication will not be enabled.") - - def _get_slo_workload_combinations(self, array_info): - """Method to query the array for SLO and Workloads. - - Takes the arrayinfolist object and generates a set which has - all available SLO & Workload combinations - :param array_info: the array information - :returns: finalarrayinfolist - :raises: VolumeBackendAPIException: - """ - try: - array = array_info['SerialNumber'] - if self.failover: - array = self.active_backend_id - # Get the srp slo & workload settings - slo_settings = self.rest.get_slo_list(array) - # Remove 'None' from the list (so a 'None' slo is not combined - # with a workload, which is not permitted) - slo_settings = [x for x in slo_settings - if x.lower() not in ['none']] - workload_settings = self.rest.get_workload_settings(array) - workload_settings.append("None") - slo_workload_set = set( - ['%(slo)s:%(workload)s' % {'slo': slo, 'workload': workload} - for slo in slo_settings for workload in workload_settings]) - # Add back in in the only allowed 'None' slo/ workload combination - slo_workload_set.add('None:None') - - finalarrayinfolist = [] - for sloWorkload in slo_workload_set: - # Doing a shallow copy will work as we are modifying - # only strings - temparray_info = array_info.copy() - slo, workload = sloWorkload.split(':') - temparray_info['SLO'] = slo - temparray_info['Workload'] = workload - finalarrayinfolist.append(temparray_info) - except Exception as e: - exception_message = (_( - "Unable to get the SLO/Workload combinations from the array. " - "Exception received was %(e)s") % {'e': six.text_type(e)}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - return finalarrayinfolist - - def create_volume(self, volume): - """Creates a EMC(VMAX) volume from a storage group. - - :param volume: volume object - :returns: model_update - dict - """ - model_update = {} - volume_id = volume.id - extra_specs = self._initial_setup(volume) - - # Volume_name naming convention is 'OS-UUID'. - volume_name = self.utils.get_volume_element_name(volume_id) - volume_size = volume.size - - volume_dict = (self._create_volume( - volume_name, volume_size, extra_specs)) - - if volume.group_id is not None: - group_name = self._find_volume_group_name_from_id( - extra_specs[utils.ARRAY], volume.group_id) - if group_name is not None: - self.masking.add_volume_to_storage_group( - extra_specs[utils.ARRAY], volume_dict['device_id'], - group_name, volume_name, extra_specs) - - # Set-up volume replication, if enabled - if self.utils.is_replication_enabled(extra_specs): - rep_update = self._replicate_volume(volume, volume_name, - volume_dict, extra_specs) - model_update.update(rep_update) - - LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", - {'name': volume_name, 'dict': volume_dict}) - model_update.update( - {'provider_location': six.text_type(volume_dict)}) - return model_update - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: volume object - :param snapshot: snapshot object - :returns: model_update - :raises: VolumeBackendAPIException: - """ - LOG.debug("Entering create_volume_from_snapshot.") - model_update = {} - extra_specs = self._initial_setup(snapshot) - - clone_dict = self._create_cloned_volume( - volume, snapshot, extra_specs, is_snapshot=False, - from_snapvx=True) - - # Set-up volume replication, if enabled - if self.utils.is_replication_enabled(extra_specs): - rep_update = self._replicate_volume(volume, snapshot['name'], - clone_dict, extra_specs) - model_update.update(rep_update) - - model_update.update( - {'provider_location': six.text_type(clone_dict)}) - return model_update - - def create_cloned_volume(self, clone_volume, source_volume): - """Creates a clone of the specified volume. - - :param clone_volume: clone volume Object - :param source_volume: volume object - :returns: model_update, dict - """ - model_update = {} - extra_specs = self._initial_setup(source_volume) - clone_dict = self._create_cloned_volume(clone_volume, source_volume, - extra_specs) - - # Set-up volume replication, if enabled - if self.utils.is_replication_enabled(extra_specs): - rep_update = self._replicate_volume( - clone_volume, clone_volume.name, clone_dict, extra_specs) - model_update.update(rep_update) - - model_update.update( - {'provider_location': six.text_type(clone_dict)}) - return model_update - - def _replicate_volume(self, volume, volume_name, volume_dict, extra_specs, - delete_src=True): - """Setup up remote replication for a volume. - - :param volume: the volume object - :param volume_name: the volume name - :param volume_dict: the volume dict - :param extra_specs: the extra specifications - :param delete_src: flag to indicate if source should be deleted on - if replication fails - :returns: replication model_update - """ - array = volume_dict['array'] - try: - device_id = volume_dict['device_id'] - replication_status, replication_driver_data = ( - self.setup_volume_replication( - array, volume, device_id, extra_specs)) - except Exception: - if delete_src: - self._cleanup_replication_source( - array, volume, volume_name, volume_dict, extra_specs) - raise - return ({'replication_status': replication_status, - 'replication_driver_data': six.text_type( - replication_driver_data)}) - - def delete_volume(self, volume): - """Deletes a EMC(VMAX) volume. - - :param volume: volume object - """ - LOG.info("Deleting Volume: %(volume)s", - {'volume': volume.name}) - volume_name = self._delete_volume(volume) - LOG.info("Leaving delete_volume: %(volume_name)s.", - {'volume_name': volume_name}) - - def create_snapshot(self, snapshot, volume): - """Creates a snapshot. - - :param snapshot: snapshot object - :param volume: volume Object to create snapshot from - :returns: dict -- the cloned volume dictionary - """ - extra_specs = self._initial_setup(volume) - snapshot_dict = self._create_cloned_volume( - snapshot, volume, extra_specs, is_snapshot=True) - model_update = {'provider_location': six.text_type(snapshot_dict)} - return model_update - - def delete_snapshot(self, snapshot, volume): - """Deletes a snapshot. - - :param snapshot: snapshot object - :param volume: source volume - """ - LOG.info("Delete Snapshot: %(snapshotName)s.", - {'snapshotName': snapshot.name}) - extra_specs = self._initial_setup(volume) - sourcedevice_id, snap_name = self._parse_snap_info( - extra_specs[utils.ARRAY], snapshot) - if not sourcedevice_id or not snap_name: - LOG.info("No snapshot found on the array") - else: - self.provision.delete_volume_snap_check_for_links( - extra_specs[utils.ARRAY], snap_name, - sourcedevice_id, extra_specs) - LOG.info("Leaving delete_snapshot: %(ssname)s.", - {'ssname': snap_name}) - - def _remove_members(self, array, volume, device_id, - extra_specs, connector): - """This method unmaps a volume from a host. - - Removes volume from the storage group that belongs to a masking view. - :param array: the array serial number - :param volume: volume object - :param device_id: the VMAX volume device id - :param extra_specs: extra specifications - :param connector: the connector object - """ - volume_name = volume.name - LOG.debug("Detaching volume %s.", volume_name) - return self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, True, connector) - - def _unmap_lun(self, volume, connector): - """Unmaps a volume from the host. - - :param volume: the volume Object - :param connector: the connector Object - """ - extra_specs = self._initial_setup(volume) - if self.utils.is_volume_failed_over(volume): - extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - volume_name = volume.name - LOG.info("Unmap volume: %(volume)s.", - {'volume': volume_name}) - if connector is None: - exception_message = ( - _("Connector must not be None - Cannot get the required " - "information needed to unmap the volume")) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - device_info, is_live_migration, source_storage_group_list = ( - self.find_host_lun_id(volume, connector['host'], extra_specs)) - if 'hostlunid' not in device_info: - LOG.info("Volume %s is not mapped. No volume to unmap.", - volume_name) - return - if is_live_migration and len(source_storage_group_list) == 1: - LOG.info("Volume %s is mapped. Failed live migration case", - volume_name) - return - source_nf_sg = None - array = extra_specs[utils.ARRAY] - if len(source_storage_group_list) > 1: - for storage_group in source_storage_group_list: - if 'NONFAST' in storage_group: - source_nf_sg = storage_group - break - if source_nf_sg: - # Remove volume from non fast storage group - self.masking.remove_volume_from_sg( - array, device_info['device_id'], volume_name, storage_group, - extra_specs) - else: - self._remove_members(array, volume, device_info['device_id'], - extra_specs, connector) - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns device and connection info. - - The volume may be already mapped, if this is so the deviceInfo tuple - is returned. If the volume is not already mapped then we need to - gather information to either 1. Create an new masking view or 2. Add - the volume to an existing storage group within an already existing - maskingview. - - The naming convention is the following: - - .. code-block:: none - - initiator_group_name = OS---IG - e.g OS-myShortHost-I-IG - storage_group_name = OS----SG - e.g OS-myShortHost-SRP_1-I-SG - port_group_name = OS--PG The port_group_name will come from - the EMC configuration xml file. - These are precreated. If the portGroup does not - exist then an error will be returned to the user - maskingview_name = OS----MV - e.g OS-myShortHost-SRP_1-I-MV - - :param volume: volume Object - :param connector: the connector Object - :returns: dict -- device_info_dict - device information dict - """ - extra_specs = self._initial_setup(volume) - is_multipath = connector.get('multipath', False) - - volume_name = volume.name - LOG.info("Initialize connection: %(volume)s.", - {'volume': volume_name}) - if self.utils.is_volume_failed_over(volume): - extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - device_info_dict, is_live_migration, source_storage_group_list = ( - self.find_host_lun_id(volume, connector['host'], extra_specs)) - masking_view_dict = self._populate_masking_dict( - volume, connector, extra_specs) - - if ('hostlunid' in device_info_dict and - device_info_dict['hostlunid'] is not None and - is_live_migration is False) or ( - is_live_migration and len(source_storage_group_list) > 1): - hostlunid = device_info_dict['hostlunid'] - LOG.info("Volume %(volume)s is already mapped. " - "The hostlunid is %(hostlunid)s.", - {'volume': volume_name, - 'hostlunid': hostlunid}) - port_group_name = ( - self.get_port_group_from_masking_view( - extra_specs[utils.ARRAY], - device_info_dict['maskingview'])) - - else: - if is_live_migration: - source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg = ( - self._setup_for_live_migration( - device_info_dict, source_storage_group_list)) - masking_view_dict['source_nf_sg'] = source_nf_sg - masking_view_dict['source_sg'] = source_sg - masking_view_dict['source_parent_sg'] = source_parent_sg - try: - self.masking.pre_live_migration( - source_nf_sg, source_sg, source_parent_sg, - is_source_nf_sg, device_info_dict, extra_specs) - except Exception: - # Move it back to original storage group - source_storage_group_list = ( - self.rest.get_storage_groups_from_volume( - device_info_dict['array'], - device_info_dict['device_id'])) - self.masking.failed_live_migration( - masking_view_dict, source_storage_group_list, - extra_specs) - exception_message = (_( - "Unable to setup live migration because of the " - "following error: %(errorMessage)s.") - % {'errorMessage': sys.exc_info()[1]}) - raise exception.VolumeBackendAPIException( - data=exception_message) - device_info_dict, port_group_name = ( - self._attach_volume( - volume, connector, extra_specs, masking_view_dict, - is_live_migration)) - if is_live_migration: - self.masking.post_live_migration( - masking_view_dict, extra_specs) - if self.protocol.lower() == 'iscsi': - device_info_dict['ip_and_iqn'] = ( - self._find_ip_and_iqns( - extra_specs[utils.ARRAY], port_group_name)) - device_info_dict['is_multipath'] = is_multipath - return device_info_dict - - def _attach_volume(self, volume, connector, extra_specs, - masking_view_dict, is_live_migration=False): - """Attach a volume to a host. - - :param volume: the volume object - :param connector: the connector object - :param extra_specs: extra specifications - :param masking_view_dict: masking view information - :returns: dict -- device_info_dict - String -- port group name - :raises: VolumeBackendAPIException - """ - volume_name = volume.name - if is_live_migration: - masking_view_dict['isLiveMigration'] = True - else: - masking_view_dict['isLiveMigration'] = False - rollback_dict = self.masking.setup_masking_view( - masking_view_dict[utils.ARRAY], - masking_view_dict, extra_specs) - - # Find host lun id again after the volume is exported to the host. - - device_info_dict, __, __ = self.find_host_lun_id( - volume, connector['host'], extra_specs) - if 'hostlunid' not in device_info_dict: - # Did not successfully attach to host, - # so a rollback for FAST is required. - LOG.error("Error Attaching volume %(vol)s. " - "Cannot retrieve hostlunid. ", - {'vol': volume_name}) - self.masking.check_if_rollback_action_for_masking_required( - masking_view_dict[utils.ARRAY], - masking_view_dict[utils.DEVICE_ID], - rollback_dict) - exception_message = (_("Error Attaching volume %(vol)s.") - % {'vol': volume_name}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - return device_info_dict, rollback_dict['port_group_name'] - - def terminate_connection(self, volume, connector): - """Disallow connection from connector. - - :param volume: the volume Object - :param connector: the connector Object - """ - volume_name = volume.name - LOG.info("Terminate connection: %(volume)s.", - {'volume': volume_name}) - self._unmap_lun(volume, connector) - - def extend_volume(self, volume, new_size): - """Extends an existing volume. - - :param volume: the volume Object - :param new_size: the new size to increase the volume to - :returns: dict -- modifiedVolumeDict - the extended volume Object - :raises: VolumeBackendAPIException: - """ - original_vol_size = volume.size - volume_name = volume.name - extra_specs = self._initial_setup(volume) - device_id = self._find_device_on_array(volume, extra_specs) - array = extra_specs[utils.ARRAY] - # Check if volume is part of an on-going clone operation - self._sync_check(array, device_id, volume_name, extra_specs) - if device_id is None: - exception_message = (_("Cannot find Volume: %(volume_name)s. " - "Extend operation. Exiting....") - % {'volume_name': volume_name}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - __, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id) - if snapvx_src: - exception_message = ( - _("The volume: %(volume)s is a snapshot source. Extending a " - "volume with snapVx snapshots is not supported. Exiting...") - % {'volume': volume_name}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - if int(original_vol_size) > int(new_size): - exception_message = (_( - "Your original size: %(original_vol_size)s GB is greater " - "than: %(new_size)s GB. Only Extend is supported. Exiting...") - % {'original_vol_size': original_vol_size, - 'new_size': new_size}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - LOG.info("Extending volume %(volume)s to %(new_size)d GBs", - {'volume': volume_name, - 'new_size': int(new_size)}) - if self.utils.is_replication_enabled(extra_specs): - # Extra logic required if volume is replicated - self.extend_volume_is_replicated( - array, volume, device_id, volume_name, new_size, extra_specs) - else: - self.provision.extend_volume( - array, device_id, new_size, extra_specs) - - LOG.debug("Leaving extend_volume: %(volume_name)s. ", - {'volume_name': volume_name}) - - def update_volume_stats(self): - """Retrieve stats info.""" - pools = [] - # Dictionary to hold the arrays for which the SRP details - # have already been queried. - # This only applies to the arrays for which WLP is not enabled - arrays = {} - wlp_enabled = False - total_capacity_gb = 0 - free_capacity_gb = 0 - provisioned_capacity_gb = 0 - location_info = None - backend_name = self.pool_info['backend_name'] - max_oversubscription_ratio = ( - self.pool_info['max_over_subscription_ratio']) - reserved_percentage = self.pool_info['reserved_percentage'] - array_reserve_percent = None - array_info_list = self.pool_info['arrays_info'] - already_queried = False - for array_info in array_info_list: - if self.failover: - array_info = self.get_secondary_stats_info( - self.rep_config, array_info) - # Add both SLO & Workload name in the pool name - # Query the SRP only once if WLP is not enabled - # Only insert the array details in the dict once - self.rest.set_rest_credentials(array_info) - if array_info['SerialNumber'] not in arrays: - (location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, - array_reserve_percent, - wlp_enabled) = self._update_srp_stats(array_info) - else: - already_queried = True - pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" - % {'slo': array_info['SLO'], - 'workload': array_info['Workload'], - 'srpName': array_info['srpName'], - 'array': array_info['SerialNumber']}) - if wlp_enabled is False: - arrays[array_info['SerialNumber']] = ( - [total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_reserve_percent]) - - if already_queried: - # The dictionary will only have one key per VMAX - # Construct the location info - temp_location_info = ( - ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" - % {'arrayName': array_info['SerialNumber'], - 'srpName': array_info['srpName'], - 'slo': array_info['SLO'], - 'workload': array_info['Workload']})) - pool = {'pool_name': pool_name, - 'total_capacity_gb': - arrays[array_info['SerialNumber']][0], - 'free_capacity_gb': - arrays[array_info['SerialNumber']][1], - 'provisioned_capacity_gb': - arrays[array_info['SerialNumber']][2], - 'QoS_support': False, - 'location_info': temp_location_info, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'consistent_group_snapshot_enabled': True, - 'max_over_subscription_ratio': - max_oversubscription_ratio, - 'reserved_percentage': reserved_percentage, - 'replication_enabled': self.replication_enabled} - if arrays[array_info['SerialNumber']][3]: - if reserved_percentage: - if (arrays[array_info['SerialNumber']][3] > - reserved_percentage): - pool['reserved_percentage'] = ( - arrays[array_info['SerialNumber']][3]) - else: - pool['reserved_percentage'] = ( - arrays[array_info['SerialNumber']][3]) - else: - pool = {'pool_name': pool_name, - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'provisioned_capacity_gb': provisioned_capacity_gb, - 'QoS_support': False, - 'location_info': location_info, - 'consistencygroup_support': False, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'consistent_group_snapshot_enabled': True, - 'max_over_subscription_ratio': - max_oversubscription_ratio, - 'reserved_percentage': reserved_percentage, - 'replication_enabled': self.replication_enabled - } - if array_reserve_percent: - if isinstance(reserved_percentage, int): - if array_reserve_percent > reserved_percentage: - pool['reserved_percentage'] = array_reserve_percent - else: - pool['reserved_percentage'] = array_reserve_percent - - if max_oversubscription_ratio and ( - 0.0 < max_oversubscription_ratio < 1): - pool['max_over_subscription_ratio'] = ( - self.utils.get_default_oversubscription_ratio( - max_oversubscription_ratio)) - pools.append(pool) - - data = {'vendor_name': "Dell EMC", - 'driver_version': self.version, - 'storage_protocol': 'unknown', - 'volume_backend_name': backend_name or - self.__class__.__name__, - # Use zero capacities here so we always use a pool. - 'total_capacity_gb': 0, - 'free_capacity_gb': 0, - 'provisioned_capacity_gb': 0, - 'reserved_percentage': 0, - 'replication_enabled': self.replication_enabled, - 'replication_targets': self.replication_targets, - 'pools': pools} - - return data - - def _update_srp_stats(self, array_info): - """Update SRP stats. - - :param array_info: array information - :returns: location_info - :returns: totalManagedSpaceGbs - :returns: remainingManagedSpaceGbs - :returns: provisionedManagedSpaceGbs - :returns: array_reserve_percent - :returns: wlpEnabled - """ - (totalManagedSpaceGbs, remainingManagedSpaceGbs, - provisionedManagedSpaceGbs, array_reserve_percent, - wlpEnabled) = ( - self.provision.get_srp_pool_stats( - array_info['SerialNumber'], array_info)) - - LOG.info("Capacity stats for SRP pool %(srpName)s on array " - "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu, " - "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", - {'srpName': array_info['srpName'], - 'arrayName': array_info['SerialNumber'], - 'total_capacity_gb': totalManagedSpaceGbs, - 'free_capacity_gb': remainingManagedSpaceGbs, - 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) - - location_info = ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" - % {'arrayName': array_info['SerialNumber'], - 'srpName': array_info['srpName'], - 'slo': array_info['SLO'], - 'workload': array_info['Workload']}) - - return (location_info, totalManagedSpaceGbs, - remainingManagedSpaceGbs, provisionedManagedSpaceGbs, - array_reserve_percent, wlpEnabled) - - def _set_config_file_and_get_extra_specs(self, volume, - volume_type_id=None): - """Given the volume object get the associated volumetype. - - Given the volume object get the associated volumetype and the - extra specs associated with it. - Based on the name of the config group, register the config file - - :param volume: the volume object including the volume_type_id - :param volume_type_id: Optional override of volume.volume_type_id - :returns: dict -- the extra specs dict - :returns: string -- configuration file - """ - qos_specs = {} - extra_specs = self.utils.get_volumetype_extra_specs( - volume, volume_type_id) - if hasattr(volume, "volume_type") and ( - volume.volume_type and volume.volume_type.qos_specs): - qos_specs = volume.volume_type.qos_specs - config_group = None - # If there are no extra specs then the default case is assumed. - if extra_specs: - config_group = self.configuration.config_group - if extra_specs.get('replication_enabled') == ' True': - extra_specs[utils.IS_RE] = True - config_file = self._register_config_file_from_config_group( - config_group) - return extra_specs, config_file, qos_specs - - def _find_device_on_array(self, volume, extra_specs): - """Given the volume get the VMAX device Id. - - :param volume: volume object - :param extra_specs: the extra Specs - :returns: array, device_id - """ - founddevice_id = None - volume_name = volume.id - - loc = volume.provider_location - - if isinstance(loc, six.string_types): - name = ast.literal_eval(loc) - array = extra_specs[utils.ARRAY] - try: - device_id = name['device_id'] - except KeyError: - device_id = name['keybindings']['DeviceID'] - element_name = self.utils.get_volume_element_name( - volume_name) - admin_metadata = {} - if 'admin_metadata' in volume: - admin_metadata = volume.admin_metadata - if 'targetVolumeName' in admin_metadata: - target_vol_name = admin_metadata['targetVolumeName'] - founddevice_id = self.rest.find_volume_device_id( - array, target_vol_name) - else: - founddevice_id = self.rest.find_volume_device_id( - array, element_name) - # Allow for an external app to delete the volume. - if device_id and device_id != founddevice_id: - founddevice_id = None - - if founddevice_id is None: - LOG.debug("Volume %(volume_name)s not found on the array.", - {'volume_name': volume_name}) - else: - LOG.debug("Volume name: %(volume_name)s Volume device id: " - "%(founddevice_id)s.", - {'volume_name': volume_name, - 'founddevice_id': founddevice_id}) - - return founddevice_id - - def find_host_lun_id(self, volume, host, extra_specs): - """Given the volume dict find the host lun id for a volume. - - :param volume: the volume dict - :param host: host from connector - :param extra_specs: the extra specs - :returns: dict -- the data dict - """ - maskedvols = {} - is_live_migration = False - volume_name = volume.name - device_id = self._find_device_on_array(volume, extra_specs) - if device_id: - array = extra_specs[utils.ARRAY] - host = self.utils.get_host_short_name(host) - source_storage_group_list = ( - self.rest.get_storage_groups_from_volume(array, device_id)) - # return only masking views for this host - maskingviews = self.get_masking_views_from_volume( - array, device_id, host, source_storage_group_list) - - for maskingview in maskingviews: - host_lun_id = self.rest.find_mv_connections_for_vol( - array, maskingview, device_id) - if host_lun_id is not None: - devicedict = {'hostlunid': host_lun_id, - 'maskingview': maskingview, - 'array': array, - 'device_id': device_id} - maskedvols = devicedict - if not maskedvols: - LOG.debug( - "Host lun id not found for volume: %(volume_name)s " - "with the device id: %(device_id)s.", - {'volume_name': volume_name, - 'device_id': device_id}) - else: - LOG.debug("Device info: %(maskedvols)s.", - {'maskedvols': maskedvols}) - host = self.utils.get_host_short_name(host) - hoststr = ("-%(host)s-" - % {'host': host}) - - if hoststr.lower() not in maskedvols['maskingview'].lower(): - LOG.debug( - "Volume is masked but not to host %(host)s as is " - "expected. Assuming live migration.", - {'host': host}) - is_live_migration = True - else: - for storage_group in source_storage_group_list: - if 'NONFAST' in storage_group: - is_live_migration = True - break - else: - exception_message = (_("Cannot retrieve volume %(vol)s " - "from the array.") % {'vol': volume_name}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(exception_message) - - return maskedvols, is_live_migration, source_storage_group_list - - def get_masking_views_from_volume(self, array, device_id, host, - storage_group_list=None): - """Retrieve masking view list for a volume. - - :param array: array serial number - :param device_id: the volume device id - :param host: the host - :param storage_group_list: the storage group list to use - :returns: masking view list - """ - LOG.debug("Getting masking views from volume") - maskingview_list = [] - short_host = self.utils.get_host_short_name(host) - host_compare = False - if not storage_group_list: - storage_group_list = self.rest.get_storage_groups_from_volume( - array, device_id) - host_compare = True - for sg in storage_group_list: - mvs = self.rest.get_masking_views_from_storage_group( - array, sg) - for mv in mvs: - if host_compare: - if short_host.lower() in mv.lower(): - maskingview_list.append(mv) - else: - maskingview_list.append(mv) - return maskingview_list - - def _register_config_file_from_config_group(self, config_group_name): - """Given the config group name register the file. - - :param config_group_name: the config group name - :returns: string -- configurationFile - name of the configuration file - :raises: VolumeBackendAPIException: - """ - if config_group_name is None: - return CINDER_EMC_CONFIG_FILE - if hasattr(self.configuration, 'cinder_dell_emc_config_file'): - config_file = self.configuration.cinder_dell_emc_config_file - else: - config_file = ( - ("%(prefix)s%(configGroupName)s%(postfix)s" - % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, - 'configGroupName': config_group_name, - 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) - - # The file saved in self.configuration may not be the correct one, - # double check. - if config_group_name not in config_file: - config_file = ( - ("%(prefix)s%(configGroupName)s%(postfix)s" - % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, - 'configGroupName': config_group_name, - 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) - - if os.path.isfile(config_file): - LOG.debug("Configuration file : %(configurationFile)s exists.", - {'configurationFile': config_file}) - else: - exception_message = (_( - "Configuration file %(configurationFile)s does not exist.") - % {'configurationFile': config_file}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return config_file - - def _initial_setup(self, volume, volume_type_id=None): - """Necessary setup to accumulate the relevant information. - - The volume object has a host in which we can parse the - config group name. The config group name is the key to our EMC - configuration file. The emc configuration file contains srp name - and array name which are mandatory fields. - :param volume: the volume object - :param volume_type_id: optional override of volume.volume_type_id - :returns: dict -- extra spec dict - :raises: VolumeBackendAPIException: - """ - try: - extra_specs, config_file, qos_specs = ( - self._set_config_file_and_get_extra_specs( - volume, volume_type_id)) - array_info = self.utils.parse_file_to_get_array_map( - config_file) - if not array_info: - exception_message = (_( - "Unable to get corresponding record for srp.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - - self.rest.set_rest_credentials(array_info) - - extra_specs = self._set_vmax_extra_specs(extra_specs, array_info) - if (qos_specs and qos_specs.specs - and qos_specs.consumer != "front-end"): - extra_specs['qos'] = qos_specs.specs - except Exception: - exception_message = (_( - "Unable to get configuration information necessary to " - "create a volume: %(errorMessage)s.") - % {'errorMessage': sys.exc_info()[1]}) - raise exception.VolumeBackendAPIException(data=exception_message) - return extra_specs - - def _populate_masking_dict(self, volume, connector, extra_specs): - """Get all the names of the maskingview and sub-components. - - :param volume: the volume object - :param connector: the connector object - :param extra_specs: extra specifications - :returns: dict -- a dictionary with masking view information - """ - masking_view_dict = {} - host_name = connector['host'] - unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12) - protocol = self.utils.get_short_protocol_type(self.protocol) - short_host_name = self.utils.get_host_short_name(host_name) - masking_view_dict[utils.DISABLECOMPRESSION] = False - masking_view_dict['replication_enabled'] = False - slo = extra_specs[utils.SLO] - workload = extra_specs[utils.WORKLOAD] - rep_enabled = self.utils.is_replication_enabled(extra_specs) - short_pg_name = self.utils.get_pg_short_name( - extra_specs[utils.PORTGROUPNAME]) - masking_view_dict[utils.SLO] = slo - masking_view_dict[utils.WORKLOAD] = workload - masking_view_dict[utils.SRP] = unique_name - masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY] - masking_view_dict[utils.PORTGROUPNAME] = ( - extra_specs[utils.PORTGROUPNAME]) - if self._get_initiator_check_flag(): - masking_view_dict[utils.INITIATOR_CHECK] = True - else: - masking_view_dict[utils.INITIATOR_CHECK] = False - - if slo: - slo_wl_combo = self.utils.truncate_string(slo + workload, 10) - child_sg_name = ( - "OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s" - % {'shortHostName': short_host_name, - 'srpName': unique_name, - 'combo': slo_wl_combo, - 'pg': short_pg_name}) - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - if do_disable_compression: - child_sg_name = ("%(child_sg_name)s-CD" - % {'child_sg_name': child_sg_name}) - masking_view_dict[utils.DISABLECOMPRESSION] = True - else: - child_sg_name = ( - "OS-%(shortHostName)s-No_SLO-%(pg)s" - % {'shortHostName': short_host_name, - 'pg': short_pg_name}) - if rep_enabled: - child_sg_name += "-RE" - masking_view_dict['replication_enabled'] = True - mv_prefix = ( - "OS-%(shortHostName)s-%(protocol)s-%(pg)s" - % {'shortHostName': short_host_name, - 'protocol': protocol, 'pg': short_pg_name}) - - masking_view_dict[utils.SG_NAME] = child_sg_name - - masking_view_dict[utils.MV_NAME] = ("%(prefix)s-MV" - % {'prefix': mv_prefix}) - - masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG" - % {'prefix': mv_prefix}) - volume_name = volume.name - device_id = self._find_device_on_array(volume, extra_specs) - if not device_id: - exception_message = (_("Cannot retrieve volume %(vol)s " - "from the array. ") % {'vol': volume_name}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(exception_message) - - masking_view_dict[utils.IG_NAME] = ( - ("OS-%(shortHostName)s-%(protocol)s-IG" - % {'shortHostName': short_host_name, - 'protocol': protocol})) - masking_view_dict[utils.CONNECTOR] = connector - masking_view_dict[utils.DEVICE_ID] = device_id - masking_view_dict[utils.VOL_NAME] = volume_name - - return masking_view_dict - - def _create_cloned_volume( - self, volume, source_volume, extra_specs, is_snapshot=False, - from_snapvx=False): - """Create a clone volume from the source volume. - - :param volume: clone volume - :param source_volume: source of the clone volume - :param extra_specs: extra specs - :param is_snapshot: boolean -- Defaults to False - :param from_snapvx: bool -- Defaults to False - :returns: dict -- cloneDict the cloned volume dictionary - :raises: VolumeBackendAPIException: - """ - clone_name = volume.name - snap_name = None - LOG.info("Create a replica from Volume: Clone Volume: %(clone_name)s " - "from Source Volume: %(source_name)s.", - {'clone_name': clone_name, - 'source_name': source_volume.name}) - - array = extra_specs[utils.ARRAY] - is_clone_license = self.rest.is_snapvx_licensed(array) - if from_snapvx: - source_device_id, snap_name = self._parse_snap_info( - array, source_volume) - else: - source_device_id = self._find_device_on_array( - source_volume, extra_specs) - - if not is_clone_license: - exception_message = (_( - "SnapVx feature is not licensed on %(array)s.") - % {'array': array}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - # Check if source is currently a snap target. Wait for sync if true. - self._sync_check(array, source_device_id, source_volume.name, - extra_specs, tgt_only=True) - - if not is_snapshot: - clone_dict = self._create_replica( - array, volume, source_device_id, extra_specs, - snap_name=snap_name) - else: - clone_dict = self._create_snapshot( - array, volume, source_device_id, extra_specs) - - LOG.debug("Leaving _create_cloned_volume: Volume: " - "%(clone_name)s Source Device Id: %(source_name)s ", - {'clone_name': clone_name, - 'source_name': source_device_id}) - - return clone_dict - - def _parse_snap_info(self, array, snapshot): - """Given a snapshot object, parse the provider_location. - - :param array: the array serial number - :param snapshot: the snapshot object - :returns: sourcedevice_id, foundsnap_name - """ - foundsnap_name = None - sourcedevice_id = None - volume_name = snapshot.id - - loc = snapshot.provider_location - - if isinstance(loc, six.string_types): - name = ast.literal_eval(loc) - sourcedevice_id = name['source_id'] - snap_name = name['snap_name'] - # Ensure snapvx is on the array. - try: - snap_details = self.rest.get_volume_snap( - array, sourcedevice_id, snap_name) - if snap_details: - foundsnap_name = snap_name - except Exception as e: - LOG.info("Exception in retrieving snapshot: %(e)s.", - {'e': e}) - foundsnap_name = None - - if foundsnap_name is None or sourcedevice_id is None: - exception_message = (_("Error retrieving snapshot details. " - "Snapshot name: %(snap)s") % - {'snap': volume_name}) - LOG.error(exception_message) - - else: - LOG.debug("Source volume: %(volume_name)s Snap name: " - "%(foundsnap_name)s.", - {'volume_name': sourcedevice_id, - 'foundsnap_name': foundsnap_name}) - - return sourcedevice_id, foundsnap_name - - def _create_snapshot(self, array, snapshot, - source_device_id, extra_specs): - """Create a snap Vx of a volume. - - :param array: the array serial number - :param snapshot: the snapshot object - :param source_device_id: the source device id - :param extra_specs: the extra specifications - :returns: snap_dict - """ - clone_name = self.utils.get_volume_element_name(snapshot.id) - snap_name = self.utils.truncate_string(clone_name, 19) - try: - self.provision.create_volume_snapvx(array, source_device_id, - snap_name, extra_specs) - except Exception as e: - exception_message = (_("Error creating snap Vx of %(vol)s. " - "Exception received: %(e)s.") - % {'vol': source_device_id, - 'e': six.text_type(e)}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - snap_dict = {'snap_name': snap_name, 'source_id': source_device_id} - return snap_dict - - def _delete_volume(self, volume): - """Helper function to delete the specified volume. - - Pass in host if is snapshot - :param volume: volume object to be deleted - :returns: volume_name (string vol name) - """ - volume_name = volume.name - extra_specs = self._initial_setup(volume) - - device_id = self._find_device_on_array(volume, extra_specs) - if device_id is None: - LOG.error("Volume %(name)s not found on the array. " - "No volume to delete.", - {'name': volume_name}) - return volume_name - - array = extra_specs[utils.ARRAY] - # Check if volume is snap source - self._sync_check(array, device_id, volume_name, extra_specs) - # Remove from any storage groups and cleanup replication - self._remove_vol_and_cleanup_replication( - array, device_id, volume_name, extra_specs, volume) - self._delete_from_srp( - array, device_id, volume_name, extra_specs) - return volume_name - - def _create_volume( - self, volume_name, volume_size, extra_specs): - """Create a volume. - - :param volume_name: the volume name - :param volume_size: the volume size - :param extra_specs: extra specifications - :returns: int -- return code - :returns: dict -- volume_dict - :raises: VolumeBackendAPIException: - """ - array = extra_specs[utils.ARRAY] - is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( - array, extra_specs[utils.SLO], - extra_specs[utils.WORKLOAD], extra_specs[utils.SRP]) - - if not is_valid_slo or not is_valid_workload: - exception_message = (_( - "Either SLO: %(slo)s or workload %(workload)s is invalid. " - "Examine previous error statement for valid values.") - % {'slo': extra_specs[utils.SLO], - 'workload': extra_specs[utils.WORKLOAD]}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Create Volume: %(volume)s Srp: %(srp)s " - "Array: %(array)s " - "Size: %(size)lu.", - {'volume': volume_name, - 'srp': extra_specs[utils.SRP], - 'array': array, - 'size': volume_size}) - - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - - storagegroup_name = self.masking.get_or_create_default_storage_group( - array, extra_specs[utils.SRP], extra_specs[utils.SLO], - extra_specs[utils.WORKLOAD], extra_specs, - do_disable_compression) - try: - volume_dict = self.provision.create_volume_from_sg( - array, volume_name, storagegroup_name, - volume_size, extra_specs) - except Exception: - # if the volume create fails, check if the - # storage group needs to be cleaned up - LOG.error("Create volume failed. Checking if " - "storage group cleanup necessary...") - num_vol_in_sg = self.rest.get_num_vols_in_sg( - array, storagegroup_name) - - if num_vol_in_sg == 0: - LOG.debug("There are no volumes in the storage group " - "%(sg_id)s. Deleting storage group.", - {'sg_id': storagegroup_name}) - self.rest.delete_storage_group( - array, storagegroup_name) - raise - - return volume_dict - - def _set_vmax_extra_specs(self, extra_specs, pool_record): - """Set the VMAX extra specs. - - The pool_name extra spec must be set, otherwise a default slo/workload - will be chosen. The portgroup can either be passed as an extra spec - on the volume type (e.g. 'port_group_name = os-pg1-pg'), or can - be chosen from a list which must be provided in the xml file, e.g.: - - OS-PORTGROUP1-PG - OS-PORTGROUP2-PG - . - - :param extra_specs: extra specifications - :param pool_record: pool record - :returns: dict -- the extra specifications dictionary - """ - # set extra_specs from pool_record - extra_specs[utils.SRP] = pool_record['srpName'] - extra_specs[utils.ARRAY] = pool_record['SerialNumber'] - if not extra_specs.get(utils.PORTGROUPNAME): - extra_specs[utils.PORTGROUPNAME] = pool_record['PortGroup'] - if not extra_specs[utils.PORTGROUPNAME]: - error_message = (_("Port group name has not been provided - " - "please configure the 'port_group_name' extra " - "spec on the volume type, or enter a list of " - "portgroups to the xml file associated with " - "this backend e.g." - "" - " OS-PORTGROUP1-PG" - " OS-PORTGROUP2-PG" - ".")) - LOG.exception(error_message) - raise exception.VolumeBackendAPIException(data=error_message) - - extra_specs[utils.INTERVAL] = self.interval - LOG.debug("The interval is set at: %(intervalInSecs)s.", - {'intervalInSecs': self.interval}) - extra_specs[utils.RETRIES] = self.retries - LOG.debug("Retries are set at: %(retries)s.", - {'retries': self.retries}) - - # Set pool_name slo and workload - if 'pool_name' in extra_specs: - pool_name = extra_specs['pool_name'] - else: - slo_list = self.rest.get_slo_list(pool_record['SerialNumber']) - if 'Optimized' in slo_list: - slo = 'Optimized' - elif 'Diamond' in slo_list: - slo = 'Diamond' - else: - slo = 'None' - pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" - % {'slo': slo, - 'workload': 'None', - 'srpName': pool_record['srpName'], - 'array': pool_record['SerialNumber']}) - LOG.warning("Pool_name is not present in the extra_specs " - "- using default pool %(pool_name)s.", - {'pool_name': pool_name}) - pool_details = pool_name.split('+') - slo_from_extra_spec = pool_details[0] - workload_from_extra_spec = pool_details[1] - # Standardize slo and workload 'NONE' naming conventions - if workload_from_extra_spec.lower() == 'none': - workload_from_extra_spec = 'NONE' - if slo_from_extra_spec.lower() == 'none': - slo_from_extra_spec = None - extra_specs[utils.SLO] = slo_from_extra_spec - extra_specs[utils.WORKLOAD] = workload_from_extra_spec - if self.rest.is_compression_capable(extra_specs[utils.ARRAY]): - if extra_specs.get(utils.DISABLECOMPRESSION): - # If not True remove it. - if not strutils.bool_from_string( - extra_specs[utils.DISABLECOMPRESSION]): - extra_specs.pop(utils.DISABLECOMPRESSION, None) - else: - extra_specs.pop(utils.DISABLECOMPRESSION, None) - - LOG.debug("SRP is: %(srp)s " - "Array is: %(array)s " - "SLO is: %(slo)s " - "Workload is: %(workload)s.", - {'srp': extra_specs[utils.SRP], - 'array': extra_specs[utils.ARRAY], - 'slo': extra_specs[utils.SLO], - 'workload': extra_specs[utils.WORKLOAD]}) - return extra_specs - - def _delete_from_srp(self, array, device_id, volume_name, - extra_specs): - """Delete from srp. - - :param array: the array serial number - :param device_id: the device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :raises: VolumeBackendAPIException: - """ - try: - LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", - {'name': volume_name, 'device_id': device_id}) - self.provision.delete_volume_from_srp( - array, device_id, volume_name) - except Exception as e: - # If we cannot successfully delete the volume, then we want to - # return the volume to the default storage group, - # which should be the SG it previously belonged to. - self.masking.add_volume_to_default_storage_group( - array, device_id, volume_name, extra_specs) - - error_message = (_("Failed to delete volume %(volume_name)s. " - "Exception received: %(e)s") % - {'volume_name': volume_name, - 'e': six.text_type(e)}) - LOG.exception(error_message) - raise exception.VolumeBackendAPIException(data=error_message) - - def _remove_vol_and_cleanup_replication( - self, array, device_id, volume_name, extra_specs, volume=None): - """Remove a volume from its storage groups and cleanup replication. - - :param array: the array serial number - :param device_id: the device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :param volume: the volume object - """ - # Remove from any storage groups - self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, False) - # Cleanup remote replication - if self.utils.is_replication_enabled(extra_specs): - self.cleanup_lun_replication(volume, volume_name, - device_id, extra_specs) - - def get_target_wwns_from_masking_view( - self, volume, connector): - """Find target WWNs via the masking view. - - :param volume: volume to be attached - :param connector: the connector dict - :returns: list -- the target WWN list - """ - target_wwns = [] - host = connector['host'] - short_host_name = self.utils.get_host_short_name(host) - extra_specs = self._initial_setup(volume) - array = extra_specs[utils.ARRAY] - device_id = self._find_device_on_array(volume, extra_specs) - masking_view_list = self.get_masking_views_from_volume( - array, device_id, short_host_name) - if masking_view_list is not None: - portgroup = self.get_port_group_from_masking_view( - array, masking_view_list[0]) - target_wwns = self.rest.get_target_wwns(array, portgroup) - LOG.info("Target wwns in masking view %(maskingView)s: " - "%(targetWwns)s.", - {'maskingView': masking_view_list[0], - 'targetWwns': target_wwns}) - return target_wwns - - def get_port_group_from_masking_view(self, array, maskingview_name): - """Get the port groups in a masking view. - - :param array: the array serial number - :param maskingview_name: masking view name - :returns: port group name - """ - return self.rest.get_element_from_masking_view( - array, maskingview_name, portgroup=True) - - def get_initiator_group_from_masking_view(self, array, maskingview_name): - """Get the initiator group in a masking view. - - :param array: the array serial number - :param maskingview_name: masking view name - :returns: initiator group name - """ - return self.rest.get_element_from_masking_view( - array, maskingview_name, host=True) - - def get_common_masking_views(self, array, portgroup_name, - initiator_group_name): - """Get common masking views, if any. - - :param array: the array serial number - :param portgroup_name: port group name - :param initiator_group_name: ig name - :returns: list of masking views - """ - LOG.debug("Finding Masking Views for port group %(pg)s and %(ig)s.", - {'pg': portgroup_name, 'ig': initiator_group_name}) - masking_view_list = self.rest.get_common_masking_views( - array, portgroup_name, initiator_group_name) - return masking_view_list - - def _get_ip_and_iqn(self, array, port): - """Get ip and iqn from the director port. - - :param array: the array serial number - :param port: the director port on the array - :returns: ip_and_iqn - dict - """ - ip_iqn_list = [] - ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( - array, port) - for ip in ip_addresses: - ip_iqn_list.append({'iqn': iqn, 'ip': ip}) - return ip_iqn_list - - def _find_ip_and_iqns(self, array, port_group_name): - """Find the list of ips and iqns for the ports in a portgroup. - - :param array: the array serial number - :param port_group_name: the portgroup name - :returns: ip_and_iqn - list of dicts - """ - ips_and_iqns = [] - LOG.debug("The portgroup name for iscsiadm is %(pg)s", - {'pg': port_group_name}) - ports = self.rest.get_port_ids(array, port_group_name) - for port in ports: - ip_and_iqn = self._get_ip_and_iqn(array, port) - ips_and_iqns.extend(ip_and_iqn) - return ips_and_iqns - - def _create_replica( - self, array, clone_volume, source_device_id, - extra_specs, snap_name=None): - """Create a replica. - - Create replica for source volume, source can be volume or snapshot. - :param array: the array serial number - :param clone_volume: the clone volume object - :param source_device_id: the device ID of the volume - :param extra_specs: extra specifications - :param snap_name: the snapshot name - optional - :returns: int -- return code - :returns: dict -- cloneDict - """ - target_device_id = None - clone_id = clone_volume.id - clone_name = self.utils.get_volume_element_name(clone_id) - create_snap = False - # VMAX supports using a target volume that is bigger than - # the source volume, so we create the target volume the desired - # size at this point to avoid having to extend later - try: - clone_dict = self._create_volume( - clone_name, clone_volume.size, extra_specs) - target_device_id = clone_dict['device_id'] - LOG.info("The target device id is: %(device_id)s.", - {'device_id': target_device_id}) - if not snap_name: - snap_name = self.utils.get_temp_snap_name( - clone_name, source_device_id) - create_snap = True - self.provision.create_volume_replica( - array, source_device_id, target_device_id, - snap_name, extra_specs, create_snap) - except Exception as e: - if target_device_id: - LOG.warning("Create replica failed. Cleaning up the target " - "volume. Clone name: %(cloneName)s, Error " - "received is %(e)s.", - {'cloneName': clone_name, 'e': e}) - self._cleanup_target( - array, target_device_id, source_device_id, - clone_name, snap_name, extra_specs) - # Re-throw the exception. - raise - return clone_dict - - def _cleanup_target( - self, array, target_device_id, source_device_id, - clone_name, snap_name, extra_specs): - """Cleanup target volume on failed clone/ snapshot creation. - - :param array: the array serial number - :param target_device_id: the target device ID - :param source_device_id: the source device ID - :param clone_name: the name of the clone volume - :param extra_specs: the extra specifications - """ - snap_session = self.rest.get_sync_session( - array, source_device_id, snap_name, target_device_id) - if snap_session: - self.provision.break_replication_relationship( - array, target_device_id, source_device_id, - snap_name, extra_specs) - self._delete_from_srp( - array, target_device_id, clone_name, extra_specs) - - def _sync_check(self, array, device_id, volume_name, extra_specs, - tgt_only=False): - """Check if volume is part of a SnapVx sync process. - - :param array: the array serial number - :param device_id: volume instance - :param volume_name: volume name - :param tgt_only: Flag - return only sessions where device is target - :param extra_specs: extra specifications - """ - snap_vx_sessions = self.rest.find_snap_vx_sessions( - array, device_id, tgt_only) - if snap_vx_sessions: - for session in snap_vx_sessions: - source = session['source_vol'] - snap_name = session['snap_name'] - targets = session['target_vol_list'] - for target in targets: - # Break the replication relationship - LOG.debug("Unlinking source from target. Source: " - "%(volume)s, Target: %(target)s.", - {'volume': volume_name, 'target': target}) - self.provision.break_replication_relationship( - array, target, source, snap_name, extra_specs) - if 'temp' in snap_name: - self.provision.delete_temp_volume_snap( - array, snap_name, source) - - def manage_existing(self, volume, external_ref): - """Manages an existing VMAX Volume (import to Cinder). - - Renames the existing volume to match the expected name for the volume. - Also need to consider things like QoS, Emulation, account/tenant. - :param volume: the volume object including the volume_type_id - :param external_ref: reference to the existing volume - :returns: dict -- model_update - """ - LOG.info("Beginning manage existing volume process") - array, device_id = self.utils.get_array_and_device_id( - volume, external_ref) - volume_id = volume.id - # Check if the existing volume is valid for cinder management - self._check_lun_valid_for_cinder_management( - array, device_id, volume_id, external_ref) - extra_specs = self._initial_setup(volume) - - volume_name = self.utils.get_volume_element_name(volume_id) - # Rename the volume - LOG.debug("Rename volume %(vol)s to %(element_name)s.", - {'vol': volume_id, - 'element_name': volume_name}) - self.rest.rename_volume(array, device_id, volume_name) - provider_location = {'device_id': device_id, 'array': array} - model_update = {'provider_location': six.text_type(provider_location)} - - # Set-up volume replication, if enabled - if self.utils.is_replication_enabled(extra_specs): - rep_update = self._replicate_volume(volume, volume_name, - provider_location, - extra_specs, delete_src=False) - model_update.update(rep_update) - - else: - # Add volume to default storage group - self.masking.add_volume_to_default_storage_group( - array, device_id, volume_name, extra_specs) - - return model_update - - def _check_lun_valid_for_cinder_management( - self, array, device_id, volume_id, external_ref): - """Check if a volume is valid for cinder management. - - :param array: the array serial number - :param device_id: the device id - :param volume_id: the cinder volume id - :param external_ref: the external reference - :raises: ManageExistingInvalidReference, ManageExistingAlreadyManaged: - """ - # Ensure the volume exists on the array - volume_details = self.rest.get_volume(array, device_id) - if not volume_details: - msg = (_('Unable to retrieve volume details from array for ' - 'device %(device_id)s') % {'device_id': device_id}) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check if volume is already cinder managed - if volume_details.get('volume_identifier'): - volume_identifier = volume_details['volume_identifier'] - if volume_identifier.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): - raise exception.ManageExistingAlreadyManaged( - volume_ref=volume_id) - - # Check if the volume is attached by checking if in any masking view. - storagegrouplist = self.rest.get_storage_groups_from_volume( - array, device_id) - for sg in storagegrouplist: - mvs = self.rest.get_masking_views_from_storage_group( - array, sg) - if mvs: - msg = (_("Unable to import volume %(device_id)s to cinder. " - "Volume is in masking view(s): %(mv)s.") - % {'device_id': device_id, 'mv': mvs}) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check if there are any replication sessions associated - # with the volume. - snapvx_tgt, snapvx_src, rdf = self.rest.is_vol_in_rep_session( - array, device_id) - if snapvx_tgt or snapvx_src or rdf: - msg = (_("Unable to import volume %(device_id)s to cinder. " - "It is part of a replication session.") - % {'device_id': device_id}) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing VMAX volume to manage_existing. - - :param self: reference to class - :param volume: the volume object including the volume_type_id - :param external_ref: reference to the existing volume - :returns: size of the volume in GB - """ - LOG.debug("Volume in manage_existing_get_size: %(volume)s.", - {'volume': volume}) - array, device_id = self.utils.get_array_and_device_id( - volume, external_ref) - # Ensure the volume exists on the array - volume_details = self.rest.get_volume(array, device_id) - if not volume_details: - msg = (_('Unable to retrieve volume details from array for ' - 'device %(device_id)s') % {'device_id': device_id}) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - size = float(self.rest.get_size_of_device_on_array(array, device_id)) - if not size.is_integer(): - exception_message = ( - _("Cannot manage existing VMAX volume %(device_id)s " - "- it has a size of %(vol_size)s but only whole GB " - "sizes are supported. Please extend the " - "volume to the nearest GB value before importing.") - % {'device_id': device_id, 'vol_size': size, }) - LOG.exception(exception_message) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=exception_message) - - LOG.debug("Size of volume %(device_id)s is %(vol_size)s GB.", - {'device_id': device_id, 'vol_size': int(size)}) - return int(size) - - def unmanage(self, volume): - """Export VMAX volume from Cinder. - - Leave the volume intact on the backend array. - :param volume: the volume object - """ - volume_name = volume.name - volume_id = volume.id - LOG.info("Unmanage volume %(name)s, id=%(id)s", - {'name': volume_name, 'id': volume_id}) - extra_specs = self._initial_setup(volume) - device_id = self._find_device_on_array(volume, extra_specs) - if device_id is None: - LOG.error("Cannot find Volume: %(id)s for " - "unmanage operation. Exiting...", - {'id': volume_id}) - else: - # Check if volume is snap source - self._sync_check(extra_specs['array'], device_id, - volume_name, extra_specs) - # Remove volume from any openstack storage groups - # and remove any replication - self._remove_vol_and_cleanup_replication( - extra_specs['array'], device_id, - volume_name, extra_specs, volume) - # Rename the volume to volumeId, thus remove the 'OS-' prefix. - self.rest.rename_volume( - extra_specs[utils.ARRAY], device_id, volume_id) - - def retype(self, volume, new_type, host): - """Migrate volume to another host using retype. - - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param host: The host dict holding the relevant target(destination) - information - :returns: boolean -- True if retype succeeded, False if error - """ - volume_name = volume.name - LOG.info("Migrating Volume %(volume)s via retype.", - {'volume': volume_name}) - - extra_specs = self._initial_setup(volume) - - device_id = self._find_device_on_array(volume, extra_specs) - if device_id is None: - LOG.error("Volume %(name)s not found on the array. " - "No volume to migrate using retype.", - {'name': volume_name}) - return False - - if self.utils.is_replication_enabled(extra_specs): - LOG.error("Volume %(name)s is replicated - " - "Replicated volumes are not eligible for " - "storage assisted retype. Host assisted " - "retype is supported.", - {'name': volume_name}) - return False - - return self._slo_workload_migration(device_id, volume, host, - volume_name, new_type, extra_specs) - - def _slo_workload_migration(self, device_id, volume, host, - volume_name, new_type, extra_specs): - """Migrate from SLO/Workload combination to another. - - :param device_id: the volume device id - :param volume: the volume object - :param host: the host dict - :param volume_name: the name of the volume - :param new_type: the type to migrate to - :param extra_specs: extra specifications - :returns: boolean -- True if migration succeeded, False if error. - """ - is_compression_disabled = self.utils.is_compression_disabled( - extra_specs) - # Check if old type and new type have different compression types - do_change_compression = (self.utils.change_compression_type( - is_compression_disabled, new_type)) - is_valid, target_slo, target_workload = ( - self._is_valid_for_storage_assisted_migration( - device_id, host, extra_specs[utils.ARRAY], - extra_specs[utils.SRP], volume_name, - do_change_compression)) - - if not is_valid: - LOG.error( - "Volume %(name)s is not suitable for storage " - "assisted migration using retype.", - {'name': volume_name}) - return False - if volume.host != host['host'] or do_change_compression: - LOG.debug( - "Retype Volume %(name)s from source host %(sourceHost)s " - "to target host %(targetHost)s. Compression change is %(cc)r.", - {'name': volume_name, - 'sourceHost': volume.host, - 'targetHost': host['host'], - 'cc': do_change_compression}) - return self._migrate_volume( - extra_specs[utils.ARRAY], device_id, - extra_specs[utils.SRP], target_slo, - target_workload, volume_name, new_type, extra_specs) - - return False - - def _migrate_volume( - self, array, device_id, srp, target_slo, - target_workload, volume_name, new_type, extra_specs): - """Migrate from one slo/workload combination to another. - - This requires moving the volume from its current SG to a - new or existing SG that has the target attributes. - :param array: the array serial number - :param device_id: the device number - :param srp: the storage resource pool - :param target_slo: the target service level - :param target_workload: the target workload - :param volume_name: the volume name - :param new_type: the volume type to migrate to - :param extra_specs: the extra specifications - :returns: bool - """ - storagegroups = self.rest.get_storage_groups_from_volume( - array, device_id) - if not storagegroups: - LOG.warning("Volume : %(volume_name)s does not currently " - "belong to any storage groups.", - {'volume_name': volume_name}) - else: - self.masking.remove_and_reset_members( - array, device_id, None, extra_specs, False) - - target_extra_specs = new_type['extra_specs'] - is_compression_disabled = self.utils.is_compression_disabled( - target_extra_specs) - - try: - target_sg_name = self.masking.get_or_create_default_storage_group( - array, srp, target_slo, target_workload, extra_specs, - is_compression_disabled) - except Exception as e: - LOG.error("Failed to get or create storage group. " - "Exception received was %(e)s.", {'e': e}) - return False - - self.masking.add_volume_to_storage_group( - array, device_id, target_sg_name, volume_name, extra_specs) - # Check that it has been added. - vol_check = self.rest.is_volume_in_storagegroup( - array, device_id, target_sg_name) - if not vol_check: - LOG.error( - "Volume: %(volume_name)s has not been " - "added to target storage group %(storageGroup)s.", - {'volume_name': volume_name, - 'storageGroup': target_sg_name}) - return False - - return True - - def _is_valid_for_storage_assisted_migration( - self, device_id, host, source_array, - source_srp, volume_name, do_change_compression): - """Check if volume is suitable for storage assisted (pool) migration. - - :param device_id: the volume device id - :param host: the host dict - :param source_array: the volume's current array serial number - :param source_srp: the volume's current pool name - :param volume_name: the name of the volume to be migrated - :param do_change_compression: do change compression - :returns: boolean -- True/False - :returns: string -- targetSlo - :returns: string -- targetWorkload - """ - false_ret = (False, None, None) - host_info = host['host'] - - LOG.debug("Target host is : %(info)s.", {'info': host_info}) - try: - info_detail = host_info.split('#') - pool_details = info_detail[1].split('+') - target_slo = pool_details[0] - target_workload = pool_details[1] - target_srp = pool_details[2] - target_array_serial = pool_details[3] - except IndexError: - LOG.error("Error parsing array, pool, SLO and workload.") - return false_ret - - if target_array_serial not in source_array: - LOG.error( - "The source array: %(source_array)s does not " - "match the target array: %(target_array)s - " - "skipping storage-assisted migration.", - {'source_array': source_array, - 'target_array': target_array_serial}) - return false_ret - - if target_srp not in source_srp: - LOG.error( - "Only SLO/workload migration within the same SRP Pool is " - "supported in this version. The source pool: " - "%(source_pool_name)s does not match the target array: " - "%(target_pool)s. Skipping storage-assisted migration.", - {'source_pool_name': source_srp, - 'target_pool': target_srp}) - return false_ret - - found_storage_group_list = self.rest.get_storage_groups_from_volume( - source_array, device_id) - if not found_storage_group_list: - LOG.warning("Volume: %(volume_name)s does not currently " - "belong to any storage groups.", - {'volume_name': volume_name}) - - else: - for found_storage_group_name in found_storage_group_list: - emc_fast_setting = ( - self.provision. - get_slo_workload_settings_from_storage_group( - source_array, found_storage_group_name)) - target_combination = ("%(targetSlo)s+%(targetWorkload)s" - % {'targetSlo': target_slo, - 'targetWorkload': target_workload}) - if target_combination in emc_fast_setting: - # Check if migration is from compression to non compression - # or vice versa - if not do_change_compression: - LOG.warning( - "No action required. Volume: %(volume_name)s is " - "already part of slo/workload combination: " - "%(targetCombination)s.", - {'volume_name': volume_name, - 'targetCombination': target_combination}) - return false_ret - - return True, target_slo, target_workload - - def setup_volume_replication(self, array, volume, device_id, - extra_specs, target_device_id=None): - """Setup replication for volume, if enabled. - - Called on create volume, create cloned volume, create volume from - snapshot, manage_existing, and re-establishing a replication - relationship after extending. - :param array: the array serial number - :param volume: the volume object - :param device_id: the device id - :param extra_specs: the extra specifications - :param target_device_id: the target device id - :returns: replication_status -- str, replication_driver_data -- dict - """ - source_name = volume.name - LOG.debug('Starting replication setup ' - 'for volume: %s.', source_name) - # Get rdf details - rdf_group_no, remote_array = self.get_rdf_details(array) - rdf_vol_size = volume.size - if rdf_vol_size == 0: - rdf_vol_size = self.rest.get_size_of_device_on_array( - array, device_id) - - # Give the target volume the same Volume Element Name as the - # source volume - target_name = self.utils.get_volume_element_name(volume.id) - - if not target_device_id: - # Create a target volume on the target array - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - volume_dict = self._create_volume( - target_name, rdf_vol_size, rep_extra_specs) - target_device_id = volume_dict['device_id'] - - LOG.debug("Create volume replica: Target device: %(target)s " - "Source Device: %(source)s " - "Volume identifier: %(name)s.", - {'target': target_device_id, - 'source': device_id, - 'name': target_name}) - - # Enable rdf replication and establish the link - rdf_dict = self.enable_rdf( - array, device_id, rdf_group_no, self.rep_config, - target_name, remote_array, target_device_id, extra_specs) - - LOG.info('Successfully setup replication for %s.', - target_name) - replication_status = REPLICATION_ENABLED - replication_driver_data = rdf_dict - - return replication_status, replication_driver_data - - def cleanup_lun_replication(self, volume, volume_name, - device_id, extra_specs): - """Cleanup target volume on delete. - - Extra logic if target is last in group. - :param volume: the volume object - :param volume_name: the volume name - :param device_id: the device id - :param extra_specs: extra specifications - :raises: VolumeBackendAPIException - """ - LOG.debug('Starting cleanup replication from volume: ' - '%s.', volume_name) - try: - loc = volume.provider_location - rep_data = volume.replication_driver_data - - if (isinstance(loc, six.string_types) - and isinstance(rep_data, six.string_types)): - name = ast.literal_eval(loc) - array = name['array'] - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - (target_device, remote_array, rdf_group_no, - local_vol_state, pair_state) = ( - self.get_remote_target_device(array, volume, device_id)) - - if target_device is not None: - # Clean-up target - self.masking.remove_and_reset_members( - remote_array, target_device, volume_name, - rep_extra_specs, False) - self._cleanup_remote_target( - array, remote_array, device_id, target_device, - rdf_group_no, volume_name, rep_extra_specs) - LOG.info('Successfully destroyed replication for ' - 'volume: %(volume)s', - {'volume': volume_name}) - else: - LOG.warning('Replication target not found for ' - 'replication-enabled volume: %(volume)s', - {'volume': volume_name}) - except Exception as e: - exception_message = ( - _('Cannot get necessary information to cleanup ' - 'replication target for volume: %(volume)s. ' - 'The exception received was: %(e)s. Manual ' - 'clean-up may be required. Please contact ' - 'your administrator.') - % {'volume': volume_name, 'e': six.text_type(e)}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - def _cleanup_remote_target( - self, array, remote_array, device_id, target_device, - rdf_group, volume_name, rep_extra_specs): - """Clean-up remote replication target after exception or on deletion. - - :param array: the array serial number - :param remote_array: the remote array serial number - :param device_id: the source device id - :param target_device: the target device id - :param rdf_group: the RDF group - :param volume_name: the volume name - :param rep_extra_specs: replication extra specifications - """ - are_vols_paired, local_vol_state, pair_state = ( - self.rest.are_vols_rdf_paired( - array, remote_array, device_id, target_device, rdf_group)) - if are_vols_paired: - # Break the sync relationship. - self.provision.break_rdf_relationship( - array, device_id, target_device, rdf_group, - rep_extra_specs, pair_state) - self._delete_from_srp( - remote_array, target_device, volume_name, rep_extra_specs) - - def _cleanup_replication_source( - self, array, volume, volume_name, volume_dict, extra_specs): - """Cleanup a remote replication source volume on failure. - - If replication setup fails at any stage on a new volume create, - we must clean-up the source instance as the cinder database won't - be updated with the provider_location. This means the volume cannot - be properly deleted from the array by cinder. - :param array: the array serial number - :param volume: the volume object - :param volume_name: the name of the volume - :param volume_dict: the source volume dictionary - :param extra_specs: the extra specifications - """ - LOG.warning( - "Replication failed. Cleaning up the source volume. " - "Volume name: %(sourceName)s ", - {'sourceName': volume_name}) - device_id = volume_dict['device_id'] - # Remove from any storage groups and cleanup replication - self._remove_vol_and_cleanup_replication( - array, device_id, volume_name, extra_specs, volume) - self._delete_from_srp( - array, device_id, volume_name, extra_specs) - - def get_rdf_details(self, array): - """Retrieves an SRDF group instance. - - :param array: the array serial number - :returns: rdf_group_no, remote_array - """ - if not self.rep_config: - exception_message = (_("Replication is not configured on " - "backend: %(backend)s.") % - {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - remote_array = self.rep_config['array'] - rdf_group_label = self.rep_config['rdf_group_label'] - LOG.info("Replication group: %(RDFGroup)s.", - {'RDFGroup': rdf_group_label}) - rdf_group_no = self.rest.get_rdf_group_number(array, rdf_group_label) - if rdf_group_no is None: - exception_message = (_("Cannot find replication group: " - "%(RDFGroup)s. Please check the name " - "and the array") % - {'RDFGroup': rdf_group_label}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - LOG.info("Found RDF group number: %(RDFGroup)s.", - {'RDFGroup': rdf_group_no}) - - return rdf_group_no, remote_array - - def failover_host(self, volumes, secondary_id=None, groups=None): - """Fails over the volumes on a host back and forth. - - Driver needs to update following info for failed-over volume: - 1. provider_location: update array details - 2. replication_status: new status for replication-enabled volume - :param volumes: the list of volumes to be failed over - :param secondary_id: the target backend - :param groups: replication groups - :returns: secondary_id, volume_update_list, group_update_list - """ - volume_update_list = [] - if secondary_id != 'default': - if not self.failover: - self.failover = True - if self.rep_config: - secondary_id = self.rep_config['array'] - else: - exception_message = (_( - "Backend %(backend)s is already failed over. " - "If you wish to failback, please append " - "'--backend_id default' to your command.") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - else: - if self.failover: - self.failover = False - secondary_id = None - else: - exception_message = (_( - "Cannot failback backend %(backend)s- backend not " - "in failed over state. If you meant to failover, please " - "omit the '--backend_id default' from the command") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - for volume in volumes: - extra_specs = self._initial_setup(volume) - if self.utils.is_replication_enabled(extra_specs): - model_update = self._failover_volume( - volume, self.failover, extra_specs) - volume_update_list.append(model_update) - else: - if self.failover: - # Since the array has been failed-over, - # volumes without replication should be in error. - volume_update_list.append({ - 'volume_id': volume.id, - 'updates': {'status': 'error'}}) - else: - # This is a failback, so we will attempt - # to recover non-failed over volumes - recovery = self.recover_volumes_on_failback( - volume, extra_specs) - volume_update_list.append(recovery) - - LOG.info("Failover host complete.") - return secondary_id, volume_update_list, [] - - def _failover_volume(self, vol, failover, extra_specs): - """Failover a volume. - - :param vol: the volume object - :param failover: flag to indicate failover or failback -- bool - :param extra_specs: the extra specifications - :returns: model_update -- dict - """ - loc = vol.provider_location - rep_data = vol.replication_driver_data - try: - name = ast.literal_eval(loc) - replication_keybindings = ast.literal_eval(rep_data) - array = name['array'] - device_id = self._find_device_on_array(vol, {utils.ARRAY: array}) - - (target_device, remote_array, rdf_group, - local_vol_state, pair_state) = ( - self.get_remote_target_device(array, vol, device_id)) - - self._sync_check(array, device_id, vol.name, extra_specs) - self.provision.failover_volume( - array, device_id, rdf_group, extra_specs, - local_vol_state, failover) - - if failover: - new_status = REPLICATION_FAILOVER - else: - new_status = REPLICATION_ENABLED - - # Transfer ownership to secondary_backend_id and - # update provider_location field - loc = six.text_type(replication_keybindings) - rep_data = six.text_type(name) - - except Exception as ex: - msg = ('Failed to failover volume %(volume_id)s. ' - 'Error: %(error)s.') - LOG.error(msg, {'volume_id': vol.id, - 'error': ex}, ) - new_status = FAILOVER_ERROR - - model_update = {'volume_id': vol.id, - 'updates': - {'replication_status': new_status, - 'replication_driver_data': rep_data, - 'provider_location': loc}} - return model_update - - def recover_volumes_on_failback(self, volume, extra_specs): - """Recover volumes on failback. - - On failback, attempt to recover non RE(replication enabled) - volumes from primary array. - :param volume: the volume object - :param extra_specs: the extra specifications - :returns: volume_update - """ - # Check if volume still exists on the primary - volume_update = {'volume_id': volume.id} - device_id = self._find_device_on_array(volume, extra_specs) - if not device_id: - volume_update['updates'] = {'status': 'error'} - else: - try: - maskingview = self.get_masking_views_from_volume( - extra_specs[utils.ARRAY], device_id, '') - except Exception: - maskingview = None - LOG.debug("Unable to determine if volume is in masking view.") - if not maskingview: - volume_update['updates'] = {'status': 'available'} - else: - volume_update['updates'] = {'status': 'in-use'} - return volume_update - - def get_remote_target_device(self, array, volume, device_id): - """Get the remote target for a given volume. - - :param array: the array serial number - :param volume: the volume object - :param device_id: the device id - :returns: target_device, target_array, rdf_group, state - """ - target_device, local_vol_state, pair_state = None, '', '' - rdf_group, remote_array = self.get_rdf_details(array) - try: - rep_target_data = volume.replication_driver_data - replication_keybindings = ast.literal_eval(rep_target_data) - remote_array = replication_keybindings['array'] - remote_device = replication_keybindings['device_id'] - target_device_info = self.rest.get_volume( - remote_array, remote_device) - if target_device_info is not None: - target_device = remote_device - are_vols_paired, local_vol_state, pair_state = ( - self.rest.are_vols_rdf_paired( - array, remote_array, device_id, - target_device, rdf_group)) - if not are_vols_paired: - target_device = None - except (KeyError, ValueError): - target_device = None - return (target_device, remote_array, rdf_group, - local_vol_state, pair_state) - - def extend_volume_is_replicated( - self, array, volume, device_id, volume_name, - new_size, extra_specs): - """Extend a replication-enabled volume. - - Cannot extend volumes in a synchronization pair. Must first break the - relationship, extend them separately, then recreate the pair - :param array: the array serial number - :param volume: the volume objcet - :param device_id: the volume device id - :param volume_name: the volume name - :param new_size: the new size the volume should be - :param extra_specs: extra specifications - """ - if self.extend_replicated_vol is True: - try: - (target_device, remote_array, rdf_group, - local_vol_state, pair_state) = ( - self.get_remote_target_device(array, volume, device_id)) - - # Volume must be removed from replication (storage) group - # before the replication relationship can be ended (cannot - # have a mix of replicated and non-replicated volumes as - # the SRDF groups become unmanageable). - self.masking.remove_and_reset_members( - array, device_id, volume_name, extra_specs, False) - - # Repeat on target side - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, self.rep_config) - self.masking.remove_and_reset_members( - remote_array, target_device, volume_name, - rep_extra_specs, False) - - LOG.info("Breaking replication relationship...") - self.provision.break_rdf_relationship( - array, device_id, target_device, - rdf_group, rep_extra_specs, pair_state) - - # Extend the source volume - LOG.info("Extending source volume...") - self.provision.extend_volume( - array, device_id, new_size, extra_specs) - - # Extend the target volume - LOG.info("Extending target volume...") - self.provision.extend_volume( - remote_array, target_device, new_size, rep_extra_specs) - - # Re-create replication relationship - LOG.info("Recreating replication relationship...") - self.setup_volume_replication( - array, volume, device_id, extra_specs, target_device) - - except Exception as e: - exception_message = (_("Error extending volume. " - "Error received was %(e)s") % - {'e': e}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - else: - exception_message = (_( - "Extending a replicated volume is not " - "permitted on this backend. Please contact " - "your administrator.")) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - def enable_rdf(self, array, device_id, rdf_group_no, rep_config, - target_name, remote_array, target_device, extra_specs): - """Create a replication relationship with a target volume. - - :param array: the array serial number - :param device_id: the device id - :param rdf_group_no: the rdf group number - :param rep_config: the replication config - :param target_name: the target volume name - :param remote_array: the remote array serial number - :param target_device: the target device id - :param extra_specs: the extra specifications - :returns: rdf_dict - """ - rep_extra_specs = self._get_replication_extra_specs( - extra_specs, rep_config) - try: - # Remove source and target instances from their - # default storage groups - self.masking.remove_and_reset_members( - array, device_id, target_name, extra_specs, False) - - self.masking.remove_and_reset_members( - remote_array, target_device, target_name, - rep_extra_specs, False) - - # Establish replication relationship - rdf_dict = self.rest.create_rdf_device_pair( - array, device_id, rdf_group_no, target_device, remote_array, - target_name, extra_specs) - - # Add source and target instances to their replication groups - LOG.debug("Adding source device to default replication group.") - self.add_volume_to_replication_group( - array, device_id, target_name, extra_specs) - LOG.debug("Adding target device to default replication group.") - self.add_volume_to_replication_group( - remote_array, target_device, target_name, rep_extra_specs) - - except Exception as e: - LOG.warning( - ("Remote replication failed. Cleaning up the target " - "volume and returning source volume to default storage " - "group. Volume name: %(name)s "), - {'name': target_name}) - self.masking.remove_and_reset_members( - remote_array, target_device, target_name, - rep_extra_specs, False) - self._cleanup_remote_target( - array, remote_array, device_id, target_device, - rdf_group_no, target_name, rep_extra_specs) - # Re-throw the exception. - exception_message = (_("Remote replication failed with exception:" - " %(e)s") - % {'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return rdf_dict - - def add_volume_to_replication_group( - self, array, device_id, volume_name, extra_specs): - """Add a volume to the default replication group. - - Replication groups are VMAX storage groups that contain only - RDF-paired volumes. We can use our normal storage group operations. - :param array: array serial number - :param device_id: the device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :returns: storagegroup_name - """ - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - try: - storagegroup_name = ( - self.masking.get_or_create_default_storage_group( - array, extra_specs[utils.SRP], extra_specs[utils.SLO], - extra_specs[utils.WORKLOAD], extra_specs, - do_disable_compression, is_re=True)) - except Exception as e: - exception_message = (_("Failed to get or create replication" - "group. Exception received: %(e)s") - % {'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - self.masking.add_volume_to_storage_group( - array, device_id, storagegroup_name, volume_name, extra_specs) - - return storagegroup_name - - def _get_replication_extra_specs(self, extra_specs, rep_config): - """Get replication extra specifications. - - Called when target array operations are necessary - - on create, extend, etc and when volume is failed over. - :param extra_specs: the extra specifications - :param rep_config: the replication configuration - :returns: repExtraSpecs - dict - """ - rep_extra_specs = deepcopy(extra_specs) - rep_extra_specs[utils.ARRAY] = rep_config['array'] - rep_extra_specs[utils.SRP] = rep_config['srp'] - rep_extra_specs[utils.PORTGROUPNAME] = rep_config['portgroup'] - - # If disable compression is set, check if target array is all flash - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - if do_disable_compression: - if not self.rest.is_compression_capable( - rep_extra_specs[utils.ARRAY]): - rep_extra_specs.pop(utils.DISABLECOMPRESSION, None) - - # Check to see if SLO and Workload are configured on the target array. - if extra_specs[utils.SLO]: - is_valid_slo, is_valid_workload = ( - self.provision.verify_slo_workload( - rep_extra_specs[utils.ARRAY], - extra_specs[utils.SLO], - rep_extra_specs[utils.WORKLOAD], - rep_extra_specs[utils.SRP])) - if not is_valid_slo or not is_valid_workload: - LOG.warning("The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. Not assigning any SLO or " - "workload.", - {'slo': extra_specs[utils.SLO], - 'workload': extra_specs[utils.WORKLOAD]}) - rep_extra_specs[utils.SLO] = None - if extra_specs[utils.WORKLOAD]: - rep_extra_specs[utils.WORKLOAD] = None - - return rep_extra_specs - - def get_secondary_stats_info(self, rep_config, array_info): - """On failover, report on secondary array statistics. - - :param rep_config: the replication configuration - :param array_info: the array info - :returns: secondary_info - dict - """ - secondary_info = array_info.copy() - secondary_info['SerialNumber'] = six.text_type(rep_config['array']) - secondary_info['srpName'] = rep_config['srp'] - return secondary_info - - def _setup_for_live_migration(self, device_info_dict, - source_storage_group_list): - """Function to set attributes for live migration. - - :param device_info_dict: the data dict - :param source_storage_group_list: - :returns: source_nf_sg: The non fast storage group - :returns: source_sg: The source storage group - :returns: source_parent_sg: The parent storage group - :returns: is_source_nf_sg:if the non fast storage group already exists - """ - array = device_info_dict['array'] - source_sg = None - is_source_nf_sg = False - # Get parent storage group - source_parent_sg = self.rest.get_element_from_masking_view( - array, device_info_dict['maskingview'], storagegroup=True) - source_nf_sg = source_parent_sg[:-2] + 'NONFAST' - for sg in source_storage_group_list: - is_descendant = self.rest.is_child_sg_in_parent_sg( - array, sg, source_parent_sg) - if is_descendant: - source_sg = sg - is_descendant = self.rest.is_child_sg_in_parent_sg( - array, source_nf_sg, source_parent_sg) - if is_descendant: - is_source_nf_sg = True - return source_nf_sg, source_sg, source_parent_sg, is_source_nf_sg - - def create_group(self, context, group): - """Creates a generic volume group. - - :param context: the context - :param group: the group object to be created - :returns: dict -- modelUpdate = {'status': 'available'} - :raises: VolumeBackendAPIException, NotImplementedError - """ - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.AVAILABLE} - - LOG.info("Create generic volume group: %(group)s.", - {'group': group.id}) - - vol_grp_name = self.utils.update_volume_group_name(group) - - try: - array, __ = self.utils.get_volume_group_utils( - group, self.interval, self.retries) - interval_retries_dict = self.utils.get_intervals_retries_dict( - self.interval, self.retries) - self.provision.create_volume_group( - array, vol_grp_name, interval_retries_dict) - except Exception: - exception_message = (_("Failed to create generic volume group:" - " %(volGrpName)s.") - % {'volGrpName': vol_grp_name}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return model_update - - def delete_group(self, context, group, volumes): - """Deletes a generic volume group. - - :param context: the context - :param group: the group object to be deleted - :param volumes: the list of volumes in the generic group to be deleted - :returns: dict -- modelUpdate - :returns: list -- list of volume model updates - :raises: NotImplementedError - """ - LOG.info("Delete generic volume group: %(group)s.", - {'group': group.id}) - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - model_update, volumes_model_update = self._delete_group( - group, volumes) - return model_update, volumes_model_update - - def _delete_group(self, group, volumes): - """Helper function to delete a volume group. - - :param group: the group object - :param volumes: the member volume objects - :returns: model_update, volumes_model_update - """ - volumes_model_update = [] - array, extraspecs_dict_list = self.utils.get_volume_group_utils( - group, self.interval, self.retries) - vol_grp_name = None - - volume_group = self._find_volume_group( - array, group) - - if volume_group is None: - LOG.error("Cannot find generic volume group %(volGrpName)s.", - {'volGrpName': group.id}) - model_update = {'status': fields.GroupStatus.DELETED} - - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, volumes, group.id, status='deleted') - return model_update, volumes_model_update - - if 'name' in volume_group: - vol_grp_name = volume_group['name'] - volume_device_ids = self._get_members_of_volume_group( - array, vol_grp_name) - intervals_retries_dict = self.utils.get_intervals_retries_dict( - self.interval, self.retries) - deleted_volume_device_ids = [] - try: - # If there are no volumes in sg then delete it - if not volume_device_ids: - self.rest.delete_storage_group(array, vol_grp_name) - model_update = {'status': fields.GroupStatus.DELETED} - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, volumes, group.id, status='deleted') - return model_update, volumes_model_update - # First remove all the volumes from the SG - self.masking.remove_volumes_from_storage_group( - array, volume_device_ids, vol_grp_name, intervals_retries_dict) - for vol in volumes: - for extraspecs_dict in extraspecs_dict_list: - if vol.volume_type_id in extraspecs_dict['volumeTypeId']: - extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS) - device_id = self._find_device_on_array(vol, - extraspecs) - if device_id in volume_device_ids: - self._remove_vol_and_cleanup_replication( - array, device_id, - vol.name, extraspecs, vol) - self._delete_from_srp( - array, device_id, "group vol", extraspecs) - else: - LOG.debug("Volume not present in storage group.") - # Add the device id to the deleted list - deleted_volume_device_ids.append(device_id) - # Once all volumes are deleted then delete the SG - self.rest.delete_storage_group(array, vol_grp_name) - model_update = {'status': fields.GroupStatus.DELETED} - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, volumes, group.id, status='deleted') - except Exception as e: - LOG.error("Error deleting volume group." - "Error received: %(e)s", {'e': e}) - model_update = {'status': fields.GroupStatus.ERROR_DELETING} - # Update the volumes_model_update - volumes_not_deleted = [] - for vol in volume_device_ids: - if vol not in deleted_volume_device_ids: - volumes_not_deleted.append(vol) - if not deleted_volume_device_ids: - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, - deleted_volume_device_ids, - group.id, status='deleted') - if not volumes_not_deleted: - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, - volumes_not_deleted, - group.id, status='deleted') - # As a best effort try to add back the undeleted volumes to sg - # Dont throw any exception in case of failure - try: - if not volumes_not_deleted: - self.masking.add_volumes_to_storage_group( - array, volumes_not_deleted, - vol_grp_name, intervals_retries_dict) - except Exception as ex: - LOG.error("Error in rollback - %(ex)s. " - "Failed to add back volumes to sg %(sg_name)s", - {'ex': ex, 'sg_name': vol_grp_name}) - - return model_update, volumes_model_update - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a generic volume group snapshot. - - :param context: the context - :param group_snapshot: the group snapshot to be created - :param snapshots: snapshots - :returns: dict -- modelUpdate - :returns: list -- list of snapshots - :raises: VolumeBackendAPIException, NotImplementedError - """ - grp_id = group_snapshot.group_id - source_group = group_snapshot.get('group') - if not volume_utils.is_group_a_cg_snapshot_type(source_group): - raise NotImplementedError() - snapshots_model_update = [] - LOG.info( - "Create snapshot for %(grpId)s " - "group Snapshot ID: %(group_snapshot)s.", - {'group_snapshot': group_snapshot.id, - 'grpId': grp_id}) - - try: - snap_name = self.utils.truncate_string(group_snapshot.id, 19) - self._create_group_replica(source_group, - snap_name) - - except Exception as e: - exception_message = (_("Failed to create snapshot for group: " - "%(volGrpName)s. Exception received: %(e)s") - % {'volGrpName': grp_id, - 'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': fields.SnapshotStatus.AVAILABLE}) - model_update = {'status': fields.GroupStatus.AVAILABLE} - - return model_update, snapshots_model_update - - def _create_group_replica( - self, source_group, snap_name): - """Create a group replica. - - This can be a group snapshot or a cloned volume group. - :param source_group: the group object - :param snap_name: the name of the snapshot - """ - array, __ = ( - self.utils.get_volume_group_utils( - source_group, self.interval, self.retries)) - vol_grp_name = None - volume_group = ( - self._find_volume_group(array, source_group)) - if volume_group: - if 'name' in volume_group: - vol_grp_name = volume_group['name'] - if vol_grp_name is None: - exception_message = ( - _("Cannot find generic volume group %(group_id)s.") % - {'group_id': source_group.id}) - raise exception.VolumeBackendAPIException( - data=exception_message) - interval_retries_dict = self.utils.get_intervals_retries_dict( - self.interval, self.retries) - self.provision.create_group_replica( - array, vol_grp_name, - snap_name, interval_retries_dict) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Delete a volume group snapshot. - - :param context: the context - :param group_snapshot: the volume group snapshot to be deleted - :param snapshots: the snapshot objects - :returns: model_update, snapshots_model_update - """ - model_update, snapshots_model_update = self._delete_group_snapshot( - group_snapshot, snapshots) - return model_update, snapshots_model_update - - def _delete_group_snapshot(self, group_snapshot, snapshots): - """Helper function to delete a group snapshot. - - :param group_snapshot: the group snapshot object - :param snapshots: the snapshot objects - :returns: model_update, snapshots_model_update - :raises: VolumeBackendApiException, NotImplementedError - """ - snapshots_model_update = [] - model_update = {} - source_group = group_snapshot.get('group') - grp_id = group_snapshot.group_id - if not volume_utils.is_group_a_cg_snapshot_type(source_group): - raise NotImplementedError() - - LOG.info("Delete snapshot grpSnapshotId: %(grpSnapshotId)s" - " for source group %(grpId)s", - {'grpSnapshotId': group_snapshot.id, - 'grpId': grp_id}) - - snap_name = self.utils.truncate_string(group_snapshot.id, 19) - vol_grp_name = None - try: - # Get the array serial - array, __ = ( - self.utils.get_volume_group_utils( - source_group, self.interval, self.retries)) - # Get the volume group dict for getting the group name - volume_group = ( - self._find_volume_group(array, source_group)) - if volume_group: - if 'name' in volume_group: - vol_grp_name = volume_group['name'] - if vol_grp_name is None: - exception_message = ( - _("Cannot find generic volume group %(grp_id)s.") % - {'group_id': source_group.id}) - raise exception.VolumeBackendAPIException( - data=exception_message) - # Check if the snapshot exists - if 'snapVXSnapshots' in volume_group: - if snap_name in volume_group['snapVXSnapshots']: - self.provision.delete_group_replica(array, - snap_name, - vol_grp_name) - else: - # Snapshot has been already deleted, return successfully - LOG.error("Cannot find group snapshot %(snapId)s.", - {'snapId': group_snapshot.id}) - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': fields.SnapshotStatus.DELETED}) - except Exception as e: - LOG.error("Error deleting volume group snapshot." - "Error received: %(e)s", {'e': e}) - model_update = { - 'status': fields.GroupSnapshotStatus.ERROR_DELETING} - - return model_update, snapshots_model_update - - def _find_volume_group_name_from_id(self, array, group_id): - """Finds the volume group name given its id - - :param array: the array serial number - :param group_id: the group id - :returns: group_name: Name of the group - """ - group_name = None - sg_list = self.rest.get_storage_group_list(array) - for sg in sg_list: - if group_id in sg: - group_name = sg - return group_name - return group_name - - def _find_volume_group(self, array, group): - """Finds a volume group given the group. - - :param array: the array serial number - :param group: the group object - :returns: volume group dictionary - """ - group_name = self.utils.update_volume_group_name(group) - volume_group = self.rest.get_storage_group_rep(array, group_name) - if not volume_group: - LOG.warning("Volume group %(group_id)s cannot be found", - {'group_id': group_name}) - return None - return volume_group - - def _get_members_of_volume_group(self, array, group_name): - """Get the members of a volume group. - - :param array: the array serial number - :param group_name: the storage group name - :returns: list -- member_device_ids - """ - member_device_ids = self.rest.get_volumes_in_storage_group( - array, group_name) - if not member_device_ids: - LOG.info("No member volumes found in %(group_id)s", - {'group_id': group_name}) - return member_device_ids - - def update_group(self, group, add_volumes, remove_volumes): - """Updates LUNs in generic volume group. - - :param group: storage configuration service instance - :param add_volumes: the volumes uuids you want to add to the vol grp - :param remove_volumes: the volumes uuids you want to remove from - the CG - :returns: model_update - :raises: VolumeBackendAPIException, NotImplementedError - """ - LOG.info("Update generic volume Group: %(group)s. " - "This adds and/or removes volumes from " - "a generic volume group.", - {'group': group.id}) - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - array, __ = self.utils.get_volume_group_utils( - group, self.interval, self.retries) - model_update = {'status': fields.GroupStatus.AVAILABLE} - add_vols = [vol for vol in add_volumes] if add_volumes else [] - add_device_ids = self._get_volume_device_ids(add_vols, array) - remove_vols = [vol for vol in remove_volumes] if remove_volumes else [] - remove_device_ids = self._get_volume_device_ids(remove_vols, array) - vol_grp_name = None - try: - volume_group = self._find_volume_group( - array, group) - if volume_group: - if 'name' in volume_group: - vol_grp_name = volume_group['name'] - if vol_grp_name is None: - raise exception.GroupNotFound( - group_id=group.id) - interval_retries_dict = self.utils.get_intervals_retries_dict( - self.interval, self.retries) - # Add volume(s) to the group - if add_device_ids: - self.masking.add_volumes_to_storage_group( - array, add_device_ids, vol_grp_name, interval_retries_dict) - # Remove volume(s) from the group - if remove_device_ids: - self.masking.remove_volumes_from_storage_group( - array, remove_device_ids, - vol_grp_name, interval_retries_dict) - except exception.GroupNotFound: - raise - except Exception as ex: - exception_message = (_("Failed to update volume group:" - " %(volGrpName)s. Exception: %(ex)s.") - % {'volGrpName': group.id, - 'ex': ex}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return model_update, None, None - - def _get_volume_device_ids(self, volumes, array): - """Get volume device ids from volume. - - :param volumes: volume objects - :returns: device_ids - """ - device_ids = [] - for volume in volumes: - specs = {utils.ARRAY: array} - device_id = self._find_device_on_array(volume, specs) - if device_id is None: - LOG.error("Volume %(name)s not found on the array.", - {'name': volume['name']}) - else: - device_ids.append(device_id) - return device_ids - - def create_group_from_src(self, context, group, volumes, - group_snapshot, snapshots, source_group, - source_vols): - """Creates the volume group from source. - - :param context: the context - :param group: the volume group object to be created - :param volumes: volumes in the consistency group - :param group_snapshot: the source volume group snapshot - :param snapshots: snapshots of the source volumes - :param source_group: the source volume group - :param source_vols: the source vols - :returns: model_update, volumes_model_update - model_update is a dictionary of cg status - volumes_model_update is a list of dictionaries of volume - update - :raises: VolumeBackendAPIException, NotImplementedError - """ - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - # Check if we need to create a snapshot - create_snapshot = False - volumes_model_update = [] - if group_snapshot: - source_vols_or_snapshots = snapshots - source_id = group_snapshot.id - actual_source_grp = group_snapshot - elif source_group: - source_vols_or_snapshots = source_vols - source_id = source_group.id - actual_source_grp = source_group - create_snapshot = True - else: - exception_message = (_("Must supply either group snapshot or " - "a source group.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - - LOG.debug("Enter VMAX create_volume group_from_src. Group to be " - "created: %(grpId)s, Source : %(SourceGrpId)s.", - {'grpId': group.id, - 'SourceGrpId': source_id}) - - tgt_name = self.utils.update_volume_group_name(group) - self.create_group(context, group) - model_update = {'status': fields.GroupStatus.AVAILABLE} - snap_name = None - try: - array, extraspecs_dict_list = ( - self.utils.get_volume_group_utils( - group, self.interval, self.retries)) - vol_grp_name = "" - # Create the target devices - dict_volume_dicts = {} - target_volume_names = {} - for volume, source_vol_or_snapshot in zip( - volumes, source_vols_or_snapshots): - if 'size' in source_vol_or_snapshot: - volume_size = source_vol_or_snapshot['size'] - else: - volume_size = source_vol_or_snapshot['volume_size'] - for extraspecs_dict in extraspecs_dict_list: - if volume.volume_type_id in ( - extraspecs_dict['volumeTypeId']): - extraspecs = extraspecs_dict.get(utils.EXTRA_SPECS) - # Create a random UUID and use it as volume name - target_volume_name = six.text_type(uuid.uuid4()) - volume_dict = self.provision.create_volume_from_sg( - array, target_volume_name, - tgt_name, volume_size, extraspecs) - dict_volume_dicts[volume.id] = volume_dict - target_volume_names[volume.id] = target_volume_name - - if create_snapshot is True: - # We have to create a snapshot of the source group - snap_name = self.utils.truncate_string(group.id, 19) - self._create_group_replica(actual_source_grp, snap_name) - vol_grp_name = self.utils.update_volume_group_name( - source_group) - else: - # We need to check if the snapshot exists - snap_name = self.utils.truncate_string(source_id, 19) - source_group = actual_source_grp.get('group') - volume_group = self._find_volume_group(array, source_group) - if volume_group is not None: - if 'snapVXSnapshots' in volume_group: - if snap_name in volume_group['snapVXSnapshots']: - LOG.info("Snapshot is present on the array") - if 'name' in volume_group: - vol_grp_name = volume_group['name'] - # Link and break the snapshot to the source group - interval_retries_dict = self.utils.get_intervals_retries_dict( - self.interval, self.retries) - self.provision.link_and_break_replica( - array, vol_grp_name, tgt_name, snap_name, - interval_retries_dict, delete_snapshot=create_snapshot) - - except Exception: - exception_message = (_("Failed to create vol grp %(volGrpName)s" - " from source %(grpSnapshot)s.") - % {'volGrpName': group.id, - 'grpSnapshot': source_id}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - volumes_model_update = self.utils.update_volume_model_updates( - volumes_model_update, volumes, group.id, model_update['status']) - - # Update the provider_location - for volume_model_update in volumes_model_update: - if volume_model_update['id'] in dict_volume_dicts: - volume_model_update.update( - {'provider_location': six.text_type( - dict_volume_dicts[volume_model_update['id']])}) - - # Update the volumes_model_update with admin_metadata - self.utils.update_admin_metadata(volumes_model_update, - key='targetVolumeName', - values=target_volume_names) - - return model_update, volumes_model_update diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py deleted file mode 100644 index fd173c635..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ /dev/null @@ -1,526 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast - -from oslo_log import log as logging - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.vmax import common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class VMAXFCDriver(driver.FibreChannelDriver): - """FC Drivers for VMAX using REST. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Multiple pools and thick/thin provisioning, - performance enhancement. - 2.0.0 - Add driver requirement functions - 2.1.0 - Add consistency group functions - 2.1.1 - Fixed issue with mismatched config (bug #1442376) - 2.1.2 - Clean up failed clones (bug #1440154) - 2.1.3 - Fixed a problem with FAST support (bug #1435069) - 2.2.0 - Add manage/unmanage - 2.2.1 - Support for SE 8.0.3 - 2.2.2 - Update Consistency Group - 2.2.3 - Pool aware scheduler(multi-pool) support - 2.2.4 - Create CG from CG snapshot - 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - - Fix for randomly choosing port group. (bug #1501919) - - get_short_host_name needs to be called in find_device_number - (bug #1520635) - - Proper error handling for invalid SLOs (bug #1512795) - - Extend Volume for VMAX3, SE8.1.0.3 - https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - - Incorrect SG selected on an attach (#1515176) - - Cleanup Zoning (bug #1501938) NOTE: FC only - - Last volume in SG fix - - _remove_last_vol_and_delete_sg is not being called - for VMAX3 (bug #1520549) - - necessary updates for CG changes (#1534616) - - Changing PercentSynced to CopyState (bug #1517103) - - Getting iscsi ip from port in existing masking view - - Replacement of EMCGetTargetEndpoints api (bug #1512791) - - VMAX3 snapvx improvements (bug #1522821) - - Operations and timeout issues (bug #1538214) - 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) - - SnapVX licensing checks for VMAX3 (bug #1587017) - - VMAX oversubscription Support (blueprint vmax-oversubscription) - - QoS support (blueprint vmax-qos) - 2.5.0 - Attach and detach snapshot (blueprint vmax-attach-snapshot) - - MVs and SGs not reflecting correct protocol (bug #1640222) - - Storage assisted volume migration via retype - (bp vmax-volume-migration) - - Support for compression on All Flash - - Volume replication 2.1 (bp add-vmax-replication) - - rename and restructure driver (bp vmax-rename-dell-emc) - 3.0.0 - REST based driver - - Retype (storage-assisted migration) - - QoS support - - Support for compression on All Flash - - Support for volume replication - - Support for live migration - - Support for Generic Volume Group - """ - - VERSION = "3.0.0" - - # ThirdPartySystems wiki - CI_WIKI_NAME = "EMC_VMAX_CI" - - def __init__(self, *args, **kwargs): - - super(VMAXFCDriver, self).__init__(*args, **kwargs) - self.active_backend_id = kwargs.get('active_backend_id', None) - self.common = common.VMAXCommon( - 'FC', - self.VERSION, - configuration=self.configuration, - active_backend_id=self.active_backend_id) - self.zonemanager_lookup_service = fczm_utils.create_lookup_service() - - def check_for_setup_error(self): - pass - - def create_volume(self, volume): - """Creates a VMAX volume. - - :param volume: the cinder volume object - :returns: provider location dict - """ - return self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: the cinder volume object - :param snapshot: the cinder snapshot object - :returns: provider location dict - """ - return self.common.create_volume_from_snapshot( - volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume. - - :param volume: the cinder volume object - :param src_vref: the source volume reference - :returns: provider location dict - """ - return self.common.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - """Deletes a VMAX volume. - - :param volume: the cinder volume object - """ - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: the cinder snapshot object - :returns: provider location dict - """ - src_volume = snapshot.volume - return self.common.create_snapshot(snapshot, src_volume) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: the cinder snapshot object - """ - src_volume = snapshot.volume - self.common.delete_snapshot(snapshot, src_volume) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume. - - :param context: the context - :param volume: the cinder volume object - """ - pass - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume. - - :param context: the context - :param volume: the cinder volume object - :param connector: the connector object - """ - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume. - - :param context: the context - :param volume: the cinder volume object - """ - pass - - @staticmethod - def check_for_export(context, volume_id): - """Make sure volume is exported. - - :param context: the context - :param volume_id: the volume id - """ - pass - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '1234567890123', - } - } - - or - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - } - } - :param volume: the cinder volume object - :param connector: the connector object - :returns: dict -- the target_wwns and initiator_target_map - """ - device_info = self.common.initialize_connection( - volume, connector) - return self.populate_data(device_info, volume, connector) - - def populate_data(self, device_info, volume, connector): - """Populate data dict. - - Add relevant data to data dict, target_lun, target_wwn and - initiator_target_map. - :param device_info: device_info - :param volume: the volume object - :param connector: the connector object - :returns: dict -- the target_wwns and initiator_target_map - """ - device_number = device_info['hostlunid'] - target_wwns, init_targ_map = self._build_initiator_target_map( - volume, connector) - - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': device_number, - 'target_discovered': True, - 'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map}} - - LOG.debug("Return FC data for zone addition: %(data)s.", - {'data': data}) - - return data - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector. - - Return empty data if other volumes are in the same zone. - The FibreChannel ZoneManager doesn't remove zones - if there isn't an initiator_target_map in the - return of terminate_connection. - - :param volume: the volume object - :param connector: the connector object - :returns: dict -- the target_wwns and initiator_target_map if the - zone is to be removed, otherwise empty - """ - data = {'driver_volume_type': 'fibre_channel', 'data': {}} - zoning_mappings = self._get_zoning_mappings(volume, connector) - - if zoning_mappings: - self.common.terminate_connection(volume, connector) - data = self._cleanup_zones(zoning_mappings) - return data - - def _get_zoning_mappings(self, volume, connector): - """Get zoning mappings by building up initiator/target map. - - :param volume: the volume object - :param connector: the connector object - :returns: dict -- the target_wwns and initiator_target_map if the - zone is to be removed, otherwise empty - """ - zoning_mappings = {'port_group': None, - 'initiator_group': None, - 'target_wwns': None, - 'init_targ_map': None, - 'array': None} - loc = volume.provider_location - name = ast.literal_eval(loc) - host = connector['host'] - array = name['array'] - device_id = name['device_id'] - LOG.debug("Start FC detach process for volume: %(volume)s.", - {'volume': volume.name}) - - masking_views = self.common.get_masking_views_from_volume( - array, device_id, host) - if masking_views: - portgroup = ( - self.common.get_port_group_from_masking_view( - array, masking_views[0])) - initiator_group = ( - self.common.get_initiator_group_from_masking_view( - array, masking_views[0])) - - LOG.debug("Found port group: %(portGroup)s " - "in masking view %(maskingView)s.", - {'portGroup': portgroup, - 'maskingView': masking_views[0]}) - # Map must be populated before the terminate_connection - target_wwns, init_targ_map = self._build_initiator_target_map( - volume, connector) - zoning_mappings = {'port_group': portgroup, - 'initiator_group': initiator_group, - 'target_wwns': target_wwns, - 'init_targ_map': init_targ_map, - 'array': array} - else: - LOG.warning("Volume %(volume)s is not in any masking view.", - {'volume': volume.name}) - return zoning_mappings - - def _cleanup_zones(self, zoning_mappings): - """Cleanup zones after terminate connection. - - :param zoning_mappings: zoning mapping dict - :returns: data - dict - """ - LOG.debug("Looking for masking views still associated with " - "Port Group %s.", zoning_mappings['port_group']) - masking_views = self.common.get_common_masking_views( - zoning_mappings['array'], zoning_mappings['port_group'], - zoning_mappings['initiator_group']) - - if masking_views: - LOG.debug("Found %(numViews)d MaskingViews.", - {'numViews': len(masking_views)}) - data = {'driver_volume_type': 'fibre_channel', 'data': {}} - else: # no masking views found - LOG.debug("No MaskingViews were found. Deleting zone.") - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': zoning_mappings['target_wwns'], - 'initiator_target_map': - zoning_mappings['init_targ_map']}} - - LOG.debug("Return FC data for zone removal: %(data)s.", - {'data': data}) - - return data - - def _build_initiator_target_map(self, volume, connector): - """Build the target_wwns and the initiator target map. - - :param volume: the cinder volume object - :param connector: the connector object - :returns: target_wwns -- list, init_targ_map -- dict - """ - target_wwns, init_targ_map = [], {} - initiator_wwns = connector['wwpns'] - fc_targets = self.common.get_target_wwns_from_masking_view( - volume, connector) - - if self.zonemanager_lookup_service: - mapping = ( - self.zonemanager_lookup_service. - get_device_mapping_from_network(initiator_wwns, fc_targets)) - for entry in mapping: - map_d = mapping[entry] - target_wwns.extend(map_d['target_port_wwn_list']) - for initiator in map_d['initiator_port_wwn_list']: - init_targ_map[initiator] = map_d['target_port_wwn_list'] - else: # No lookup service, pre-zoned case. - target_wwns = fc_targets - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return list(set(target_wwns)), init_targ_map - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: the cinder volume object - :param new_size: the required new size - """ - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: boolean -- If True, run update the stats first. - :returns: dict -- the stats dict - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from volume group.""" - LOG.debug("Updating volume stats") - data = self.common.update_volume_stats() - data['storage_protocol'] = 'FC' - data['driver_version'] = self.VERSION - self._stats = data - - def manage_existing(self, volume, external_ref): - """Manages an existing VMAX Volume (import to Cinder). - - Renames the Volume to match the expected name for the volume. - Also need to consider things like QoS, Emulation, account/tenant. - :param volume: the volume object - :param external_ref: the reference for the VMAX volume - :returns: model_update - """ - return self.common.manage_existing(volume, external_ref) - - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing VMAX volume to manage_existing. - - :param self: reference to class - :param volume: the volume object including the volume_type_id - :param external_ref: reference to the existing volume - :returns: size of the volume in GB - """ - return self.common.manage_existing_get_size(volume, external_ref) - - def unmanage(self, volume): - """Export VMAX volume from Cinder. - - Leave the volume intact on the backend array. - """ - return self.common.unmanage(volume) - - def retype(self, ctxt, volume, new_type, diff, host): - """Migrate volume to another host using retype. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param diff: difference between old and new volume types. - Unused in driver. - :param host: the host dict holding the relevant - target(destination) information - :returns: boolean -- True if retype succeeded, False if error - """ - return self.common.retype(volume, new_type, host) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover volumes to a secondary host/ backend. - - :param context: the context - :param volumes: the list of volumes to be failed over - :param secondary_id: the backend to be failed over to, is 'default' - if fail back - :param groups: replication groups - :returns: secondary_id, volume_update_list, group_update_list - """ - return self.common.failover_host(volumes, secondary_id, groups) - - def create_group(self, context, group): - """Creates a generic volume group. - - :param context: the context - :param group: the group object - """ - self.common.create_group(context, group) - - def delete_group(self, context, group, volumes): - """Deletes a generic volume group. - - :param context: the context - :param group: the group object - :param volumes: the member volumes - """ - return self.common.delete_group( - context, group, volumes) - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot. - - :param context: the context - :param group_snapshot: the grouop snapshot - :param snapshots: snapshots list - """ - return self.common.create_group_snapshot(context, - group_snapshot, snapshots) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot. - - :param context: the context - :param group_snapshot: the grouop snapshot - :param snapshots: snapshots list - """ - return self.common.delete_group_snapshot(context, - group_snapshot, snapshots) - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates LUNs in generic volume group. - - :param context: the context - :param group: the group object - :param add_volumes: flag for adding volumes - :param remove_volumes: flag for removing volumes - """ - return self.common.update_group(group, add_volumes, - remove_volumes) - - def create_group_from_src( - self, context, group, volumes, group_snapshot=None, - snapshots=None, source_group=None, source_vols=None): - """Creates the volume group from source. - - :param context: the context - :param group: the group object to be created - :param volumes: volumes in the group - :param group_snapshot: the source volume group snapshot - :param snapshots: snapshots of the source volumes - :param source_group: the dictionary of a volume group as source. - :param source_vols: a list of volume dictionaries in the source_group. - """ - return self.common.create_group_from_src( - context, group, volumes, group_snapshot, snapshots, source_group, - source_vols) diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py deleted file mode 100644 index 189d5ccf8..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ISCSI Drivers for Dell EMC VMAX arrays based on REST. - -""" -from oslo_log import log as logging -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.vmax import common - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class VMAXISCSIDriver(driver.ISCSIDriver): - """ISCSI Drivers for VMAX using Rest. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Multiple pools and thick/thin provisioning, - performance enhancement. - 2.0.0 - Add driver requirement functions - 2.1.0 - Add consistency group functions - 2.1.1 - Fixed issue with mismatched config (bug #1442376) - 2.1.2 - Clean up failed clones (bug #1440154) - 2.1.3 - Fixed a problem with FAST support (bug #1435069) - 2.2.0 - Add manage/unmanage - 2.2.1 - Support for SE 8.0.3 - 2.2.2 - Update Consistency Group - 2.2.3 - Pool aware scheduler(multi-pool) support - 2.2.4 - Create CG from CG snapshot - 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - - Fix for randomly choosing port group. (bug #1501919) - - get_short_host_name needs to be called in find_device_number - (bug #1520635) - - Proper error handling for invalid SLOs (bug #1512795) - - Extend Volume for VMAX3, SE8.1.0.3 - https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - - Incorrect SG selected on an attach (#1515176) - - Cleanup Zoning (bug #1501938) NOTE: FC only - - Last volume in SG fix - - _remove_last_vol_and_delete_sg is not being called - for VMAX3 (bug #1520549) - - necessary updates for CG changes (#1534616) - - Changing PercentSynced to CopyState (bug #1517103) - - Getting iscsi ip from port in existing masking view - - Replacement of EMCGetTargetEndpoints api (bug #1512791) - - VMAX3 snapvx improvements (bug #1522821) - - Operations and timeout issues (bug #1538214) - 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) - - SnapVX licensing checks for VMAX3 (bug #1587017) - - VMAX oversubscription Support (blueprint vmax-oversubscription) - - QoS support (blueprint vmax-qos) - - VMAX2/VMAX3 iscsi multipath support (iscsi only) - https://blueprints.launchpad.net/cinder/+spec/vmax-iscsi-multipath - 2.5.0 - Attach and detach snapshot (blueprint vmax-attach-snapshot) - - MVs and SGs not reflecting correct protocol (bug #1640222) - - Storage assisted volume migration via retype - (bp vmax-volume-migration) - - Support for compression on All Flash - - Volume replication 2.1 (bp add-vmax-replication) - - rename and restructure driver (bp vmax-rename-dell-emc) - 3.0.0 - REST based driver - - Retype (storage-assisted migration) - - QoS support - - Support for compression on All Flash - - Support for volume replication - - Support for live migration - - Support for Generic Volume Group - """ - - VERSION = "3.0.0" - - # ThirdPartySystems wiki - CI_WIKI_NAME = "EMC_VMAX_CI" - - def __init__(self, *args, **kwargs): - - super(VMAXISCSIDriver, self).__init__(*args, **kwargs) - self.active_backend_id = kwargs.get('active_backend_id', None) - self.common = ( - common.VMAXCommon( - 'iSCSI', - self.VERSION, - configuration=self.configuration, - active_backend_id=self.active_backend_id)) - - def check_for_setup_error(self): - pass - - def create_volume(self, volume): - """Creates a VMAX volume. - - :param volume: the cinder volume object - :returns: provider location dict - """ - return self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: the cinder volume object - :param snapshot: the cinder snapshot object - :returns: provider location dict - """ - return self.common.create_volume_from_snapshot( - volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume. - - :param volume: the cinder volume object - :param src_vref: the source volume reference - :returns: provider location dict - """ - return self.common.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - """Deletes a VMAX volume. - - :param volume: the cinder volume object - """ - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: the cinder snapshot object - :returns: provider location dict - """ - src_volume = snapshot.volume - return self.common.create_snapshot(snapshot, src_volume) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: the cinder snapshot object - """ - src_volume = snapshot.volume - - self.common.delete_snapshot(snapshot, src_volume) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume. - - :param context: the context - :param volume: the cinder volume object - """ - pass - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume. - - :param context: the context - :param volume: the cinder volume object - :param connector: the connector object - """ - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume. - - :param context: the context - :param volume: the cinder volume object - """ - pass - - @staticmethod - def check_for_export(context, volume_id): - """Make sure volume is exported. - - :param context: the context - :param volume_id: the volume id - """ - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The iscsi driver returns a driver_volume_type of 'iscsi'. - the format of the driver data is defined in smis_get_iscsi_properties. - Example return value: - - .. code-block:: default - - { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': '12345678-1234-4321-1234-123456789012' - } - } - - Example return value (multipath is enabled): - - .. code-block:: default - - { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': True, - 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', - 'iqn.2010-10.org.openstack:volume-00002'], - 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], - 'target_luns': [1, 1] - } - } - :param volume: the cinder volume object - :param connector: the connector object - :returns: dict -- the iscsi dict - """ - device_info = self.common.initialize_connection( - volume, connector) - return self.get_iscsi_dict(device_info, volume) - - def get_iscsi_dict(self, device_info, volume): - """Populate iscsi dict to pass to nova. - - :param device_info: device info dict - :param volume: volume object - :returns: iscsi dict - """ - try: - ip_and_iqn = device_info['ip_and_iqn'] - is_multipath = device_info['is_multipath'] - host_lun_id = device_info['hostlunid'] - except KeyError as e: - exception_message = (_("Cannot get iSCSI ipaddresses, multipath " - "flag, or hostlunid. Exception is %(e)s.") - % {'e': six.text_type(e)}) - raise exception.VolumeBackendAPIException(data=exception_message) - - iscsi_properties = self.vmax_get_iscsi_properties( - volume, ip_and_iqn, is_multipath, host_lun_id) - - LOG.info("iSCSI properties are: %(props)s", - {'props': iscsi_properties}) - return {'driver_volume_type': 'iscsi', - 'data': iscsi_properties} - - @staticmethod - def vmax_get_iscsi_properties(volume, ip_and_iqn, - is_multipath, host_lun_id): - """Gets iscsi configuration. - - We ideally get saved information in the volume entity, but fall back - to discovery if need be. Discovery may be completely removed in future - The properties are: - :target_discovered: boolean indicating whether discovery was used - :target_iqn: the IQN of the iSCSI target - :target_portal: the portal of the iSCSI target - :target_lun: the lun of the iSCSI target - :volume_id: the UUID of the volume - :auth_method:, :auth_username:, :auth_password: - the authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - - :param volume: the cinder volume object - :param ip_and_iqn: list of ip and iqn dicts - :param is_multipath: flag for multipath - :param host_lun_id: the host lun id of the device - :returns: properties - """ - properties = {} - if len(ip_and_iqn) > 1 and is_multipath: - properties['target_portals'] = ([t['ip'] + ":3260" for t in - ip_and_iqn]) - properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in - ip_and_iqn]) - properties['target_luns'] = [host_lun_id] * len(ip_and_iqn) - properties['target_discovered'] = True - properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0] - properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260" - properties['target_lun'] = host_lun_id - properties['volume_id'] = volume.id - - LOG.info("ISCSI properties: %(properties)s.", - {'properties': properties}) - LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume}) - - if hasattr(volume, 'provider_auth'): - auth = volume.provider_auth - - if auth is not None: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return properties - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector. - - Return empty data if other volumes are in the same zone. - The FibreChannel ZoneManager doesn't remove zones - if there isn't an initiator_target_map in the - return of terminate_connection. - - :param volume: the volume object - :param connector: the connector object - :returns: dict -- the target_wwns and initiator_target_map if the - zone is to be removed, otherwise empty - """ - self.common.terminate_connection(volume, connector) - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: the cinder volume object - :param new_size: the required new size - """ - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: boolean -- If True, run update the stats first. - :returns: dict -- the stats dict - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from volume group.""" - LOG.debug("Updating volume stats") - data = self.common.update_volume_stats() - data['storage_protocol'] = 'iSCSI' - data['driver_version'] = self.VERSION - self._stats = data - - def manage_existing(self, volume, external_ref): - """Manages an existing VMAX Volume (import to Cinder). - - Renames the Volume to match the expected name for the volume. - Also need to consider things like QoS, Emulation, account/tenant. - """ - return self.common.manage_existing(volume, external_ref) - - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing VMAX volume to manage_existing. - - :param self: reference to class - :param volume: the volume object including the volume_type_id - :param external_ref: reference to the existing volume - :returns: size of the volume in GB - """ - return self.common.manage_existing_get_size(volume, external_ref) - - def unmanage(self, volume): - """Export VMAX volume from Cinder. - - Leave the volume intact on the backend array. - """ - return self.common.unmanage(volume) - - def retype(self, ctxt, volume, new_type, diff, host): - """Migrate volume to another host using retype. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param diff: difference between old and new volume types. - Unused in driver. - :param host: the host dict holding the relevant - target(destination) information - :returns: boolean -- True if retype succeeded, False if error - """ - return self.common.retype(volume, new_type, host) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover volumes to a secondary host/ backend. - - :param context: the context - :param volumes: the list of volumes to be failed over - :param secondary_id: the backend to be failed over to, is 'default' - if fail back - :param groups: replication groups - :returns: secondary_id, volume_update_list, group_update_list - """ - return self.common.failover_host(volumes, secondary_id, groups) - - def create_group(self, context, group): - """Creates a generic volume group. - - :param context: the context - :param group: the group object - """ - self.common.create_group(context, group) - - def delete_group(self, context, group, volumes): - """Deletes a generic volume group. - - :param context: the context - :param group: the group object - :param volumes: the member volumes - """ - return self.common.delete_group( - context, group, volumes) - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot. - - :param context: the context - :param group_snapshot: the group snapshot - :param snapshots: snapshots list - """ - return self.common.create_group_snapshot(context, - group_snapshot, snapshots) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot. - - :param context: the context - :param group_snapshot: the grouop snapshot - :param snapshots: snapshots list - """ - return self.common.delete_group_snapshot(context, - group_snapshot, snapshots) - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates LUNs in group. - - :param context: the context - :param group: the group object - :param add_volumes: flag for adding volumes - :param remove_volumes: flag for removing volumes - """ - return self.common.update_group(group, add_volumes, - remove_volumes) - - def create_group_from_src( - self, context, group, volumes, group_snapshot=None, - snapshots=None, source_group=None, source_vols=None): - """Creates the volume group from source. - - :param context: the context - :param group: the consistency group object to be created - :param volumes: volumes in the group - :param group_snapshot: the source volume group snapshot - :param snapshots: snapshots of the source volumes - :param source_group: the dictionary of a volume group as source. - :param source_vols: a list of volume dictionaries in the source_group. - """ - return self.common.create_group_from_src( - context, group, volumes, group_snapshot, snapshots, source_group, - source_vols) diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py deleted file mode 100644 index 5d3e9d809..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ /dev/null @@ -1,1524 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_log import log as logging -import six - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import utils - -LOG = logging.getLogger(__name__) - - -class VMAXMasking(object): - """Masking class for Dell EMC VMAX. - - Masking code to dynamically create a masking view. - It supports VMAX arrays. - """ - def __init__(self, prtcl, rest): - self.protocol = prtcl - self.utils = utils.VMAXUtils() - self.rest = rest - self.provision = provision.VMAXProvision(self.rest) - - def setup_masking_view( - self, serial_number, masking_view_dict, extra_specs): - - @coordination.synchronized("emc-mv-{maskingview_name}") - def do_get_or_create_masking_view_and_map_lun(maskingview_name): - return self.get_or_create_masking_view_and_map_lun( - serial_number, maskingview_name, masking_view_dict, - extra_specs) - return do_get_or_create_masking_view_and_map_lun( - masking_view_dict[utils.MV_NAME]) - - def get_or_create_masking_view_and_map_lun( - self, serial_number, maskingview_name, masking_view_dict, - extra_specs): - """Get or Create a masking view and add a volume to the storage group. - - Given a masking view dict either get or create a masking view and add - the volume to the associated storage group. - :param serial_number: the array serial number - :param maskingview_name: the masking view name - :param masking_view_dict: the masking view dict - :param extra_specs: the extra specifications - :return: rollback_dict - :raises: VolumeBackendAPIException - """ - storagegroup_name = masking_view_dict[utils.SG_NAME] - volume_name = masking_view_dict[utils.VOL_NAME] - masking_view_dict[utils.EXTRA_SPECS] = extra_specs - device_id = masking_view_dict[utils.DEVICE_ID] - if 'source_nf_sg' in masking_view_dict: - default_sg_name = masking_view_dict['source_nf_sg'] - else: - default_sg_name = self._get_default_storagegroup_and_remove_vol( - serial_number, device_id, masking_view_dict, volume_name, - extra_specs) - - try: - error_message = self._get_or_create_masking_view( - serial_number, masking_view_dict, extra_specs) - LOG.debug( - "The masking view in the attach operation is " - "%(masking_name)s. The storage group " - "in the masking view is %(storage_name)s.", - {'masking_name': maskingview_name, - 'storage_name': storagegroup_name}) - except Exception as e: - LOG.exception( - "Masking View creation or retrieval was not successful " - "for masking view %(maskingview_name)s. " - "Attempting rollback.", - {'maskingview_name': masking_view_dict[utils.MV_NAME]}) - error_message = six.text_type(e) - - rollback_dict = masking_view_dict - try: - rollback_dict['portgroup_name'] = ( - self.rest.get_element_from_masking_view( - serial_number, maskingview_name, portgroup=True)) - except Exception as e: - error_message = ("Error retrieving port group. Exception " - "received: %(e)s" % {'e': six.text_type(e)}) - rollback_dict['default_sg_name'] = default_sg_name - - if error_message: - # Rollback code if we cannot complete any of the steps above - # successfully then we must roll back by adding the volume back to - # the default storage group for that slo/workload combination. - - if rollback_dict['slo'] is not None: - self.check_if_rollback_action_for_masking_required( - serial_number, device_id, masking_view_dict) - - else: - self._check_adding_volume_to_storage_group( - serial_number, device_id, rollback_dict['default_sg_name'], - masking_view_dict[utils.VOL_NAME], - masking_view_dict[utils.EXTRA_SPECS]) - - exception_message = (_( - "Failed to get, create or add volume %(volumeName)s " - "to masking view %(maskingview_name)s. " - "The error message received was %(errorMessage)s.") - % {'maskingview_name': maskingview_name, - 'volumeName': volume_name, - 'errorMessage': error_message}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return rollback_dict - - def _get_default_storagegroup_and_remove_vol( - self, serial_number, device_id, masking_view_dict, - volume_name, extra_specs): - """Get the default storage group and remove volume. - - :param serial_number: the array serial number - :param device_id: the device id - :param masking_view_dict: the masking view dict - :param volume_name: the volume name - :param extra_specs: the extra specifications - :return: default_sg_name - """ - default_sg_name = self.utils.get_default_storage_group_name( - masking_view_dict[utils.SRP], - masking_view_dict[utils.SLO], - masking_view_dict[utils.WORKLOAD], - masking_view_dict[utils.DISABLECOMPRESSION], - masking_view_dict[utils.IS_RE]) - - check_vol = self.rest.is_volume_in_storagegroup( - serial_number, device_id, default_sg_name) - if check_vol: - self.remove_vol_from_storage_group( - serial_number, device_id, default_sg_name, - volume_name, extra_specs) - else: - LOG.warning( - "Volume: %(volume_name)s does not belong " - "to default storage group %(default_sg_name)s.", - {'volume_name': volume_name, - 'default_sg_name': default_sg_name}) - return default_sg_name - - def _get_or_create_masking_view(self, serial_number, masking_view_dict, - extra_specs): - """Retrieve an existing masking view or create a new one. - - :param serial_number: the array serial number - :param masking_view_dict: the masking view dict - :param extra_specs: the extra specifications - :return: error message - """ - maskingview_name = masking_view_dict[utils.MV_NAME] - - masking_view_details = self.rest.get_masking_view( - serial_number, masking_view_name=maskingview_name) - if not masking_view_details: - error_message = self._create_new_masking_view( - serial_number, masking_view_dict, maskingview_name, - extra_specs) - - else: - storagegroup_name, error_message = ( - self._validate_existing_masking_view( - serial_number, masking_view_dict, maskingview_name, - extra_specs)) - - return error_message - - def _create_new_masking_view(self, serial_number, masking_view_dict, - maskingview_name, extra_specs): - """Create a new masking view. - - :param serial_number: the array serial number - :param masking_view_dict: the masking view dict - :param maskingview_name: the masking view name - :param extra_specs: the extra specifications - :return: error_message - """ - init_group_name = masking_view_dict[utils.IG_NAME] - parent_sg_name = masking_view_dict[utils.PARENT_SG_NAME] - storagegroup_name = masking_view_dict[utils.SG_NAME] - connector = masking_view_dict[utils.CONNECTOR] - port_group_name = masking_view_dict[utils.PORTGROUPNAME] - LOG.info("Port Group in masking view operation: %(port_group_name)s.", - {'port_group_name': port_group_name}) - - # get or create parent sg - error_message = self._get_or_create_storage_group( - serial_number, masking_view_dict, parent_sg_name, extra_specs, - parent=True) - if error_message: - return error_message - - # get or create child sg - error_message = self._get_or_create_storage_group( - serial_number, masking_view_dict, storagegroup_name, extra_specs) - if error_message: - return error_message - - __, error_message = self._check_port_group( - serial_number, port_group_name) - if error_message: - return error_message - - init_group_name, error_message = (self._get_or_create_initiator_group( - serial_number, init_group_name, connector, extra_specs)) - if error_message: - return error_message - - # Only after the components of the MV have been validated, - # add the volume to the storage group and recheck that it - # has been successfully added. This is necessary before - # creating a new masking view. - error_message = self._check_adding_volume_to_storage_group( - serial_number, masking_view_dict[utils.DEVICE_ID], - storagegroup_name, masking_view_dict[utils.VOL_NAME], - masking_view_dict[utils.EXTRA_SPECS]) - if error_message: - return error_message - - error_message = self._check_add_child_sg_to_parent_sg( - serial_number, storagegroup_name, parent_sg_name, - masking_view_dict[utils.EXTRA_SPECS]) - if error_message: - return error_message - - error_message = (self.create_masking_view( - serial_number, maskingview_name, parent_sg_name, - port_group_name, init_group_name, extra_specs)) - - return error_message - - def _validate_existing_masking_view(self, serial_number, masking_view_dict, - maskingview_name, extra_specs): - """Validate the components of an existing masking view. - - :param serial_number: the array serial number - :param masking_view_dict: the masking view dict - :param maskingview_name: the amsking view name - :param extra_specs: the extra specifications - :return: storage_group_name -- string, msg -- string - """ - storage_group_name, msg = self._check_existing_storage_group( - serial_number, maskingview_name, masking_view_dict) - if not msg: - portgroup_name = self.rest.get_element_from_masking_view( - serial_number, maskingview_name, portgroup=True) - __, msg = self._check_port_group( - serial_number, portgroup_name) - if not msg: - initiator_group, msg = self._check_existing_initiator_group( - serial_number, maskingview_name, masking_view_dict, - storage_group_name, portgroup_name, extra_specs) - - return storage_group_name, msg - - def _check_add_child_sg_to_parent_sg( - self, serial_number, child_sg_name, parent_sg_name, extra_specs): - """Check adding a child storage group to a parent storage group. - - :param serial_number: the array serial number - :param child_sg_name: the name of the child storage group - :param parent_sg_name: the name of the aprent storage group - :param extra_specs: the extra specifications - :return: error_message or None - """ - msg = None - if self.rest.is_child_sg_in_parent_sg( - serial_number, child_sg_name, parent_sg_name): - LOG.info("Child sg: %(child_sg)s is already part " - "of parent storage group %(parent_sg)s.", - {'child_sg': child_sg_name, - 'parent_sg': parent_sg_name}) - else: - try: - self.add_child_sg_to_parent_sg( - serial_number, child_sg_name, parent_sg_name, extra_specs) - except Exception as e: - msg = ("Exception adding child sg %(child_sg)s to " - "%(parent_sg)s. Exception received was %(e)s" - % {'child_sg': child_sg_name, - 'parent_sg': parent_sg_name, - 'e': six.text_type(e)}) - LOG.error(msg) - return msg - - def add_child_sg_to_parent_sg( - self, serial_number, child_sg_name, parent_sg_name, extra_specs, - default_version=True - ): - """Add a child storage group to a parent storage group. - - :param default_version: the default uv4 version - :param serial_number: the array serial number - :param child_sg_name: the name of the child storage group - :param parent_sg_name: the name of the aprent storage group - :param extra_specs: the extra specifications - """ - start_time = time.time() - - @coordination.synchronized("emc-sg-{child_sg}") - @coordination.synchronized("emc-sg-{parent_sg}") - def do_add_sg_to_sg(child_sg, parent_sg): - # Check if another process has added the child to the - # parent sg while this process was waiting for the lock - if self.rest.is_child_sg_in_parent_sg( - serial_number, child_sg_name, parent_sg_name): - pass - else: - if default_version: - self.rest.add_child_sg_to_parent_sg( - serial_number, child_sg, parent_sg, extra_specs) - else: - self.rest.add_empty_child_sg_to_parent_sg( - serial_number, child_sg, parent_sg, extra_specs) - - do_add_sg_to_sg(child_sg_name, parent_sg_name) - - LOG.debug("Add child to storagegroup took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - LOG.info("Added child sg: %(child_name)s to parent storage " - "group %(parent_name)s.", - {'child_name': child_sg_name, 'parent_name': parent_sg_name}) - - def _get_or_create_storage_group( - self, serial_number, masking_view_dict, storagegroup_name, - extra_specs, parent=False): - """Get or create a storage group for a masking view. - - :param serial_number: the array serial number - :param masking_view_dict: the masking view dict - :param storagegroup_name: the storage group name - :param extra_specs: the extra specifications - :param parent: flag to indicate if this a parent storage group - :return: msg -- string or None - """ - msg = None - srp = extra_specs[utils.SRP] - workload = extra_specs[utils.WORKLOAD] - if parent: - slo = None - else: - slo = extra_specs[utils.SLO] - do_disable_compression = ( - masking_view_dict[utils.DISABLECOMPRESSION]) - storagegroup = self.rest.get_storage_group( - serial_number, storagegroup_name) - if storagegroup is None: - storagegroup = self.provision.create_storage_group( - serial_number, storagegroup_name, srp, slo, workload, - extra_specs, do_disable_compression) - - if storagegroup is None: - msg = ("Cannot get or create a storage group: " - "%(storagegroup_name)s for volume %(volume_name)s." - % {'storagegroup_name': storagegroup_name, - 'volume_name': masking_view_dict[utils.VOL_NAME]}) - LOG.error(msg) - - # If qos exists, update storage group to reflect qos parameters - if 'qos' in extra_specs: - self.rest.update_storagegroup_qos( - serial_number, storagegroup_name, extra_specs) - - return msg - - def _check_existing_storage_group( - self, serial_number, maskingview_name, masking_view_dict): - """Check if the masking view has the child storage group. - - Get the parent storage group associated with a masking view and check - if the required child storage group is already a member. If not, get - or create the child storage group. - :param serial_number: the array serial number - :param maskingview_name: the masking view name - :param masking_view_dict: the masking view dict - :return: storage group name, msg - """ - msg = None - child_sg_name = masking_view_dict[utils.SG_NAME] - - sg_from_mv = self.rest.get_element_from_masking_view( - serial_number, maskingview_name, storagegroup=True) - - storagegroup = self.rest.get_storage_group(serial_number, sg_from_mv) - - if not storagegroup: - msg = ("Cannot get storage group: %(sg_from_mv)s " - "from masking view %(masking_view)s." - % {'sg_from_mv': sg_from_mv, - 'masking_view': maskingview_name}) - LOG.error(msg) - else: - check_child = self.rest.is_child_sg_in_parent_sg( - serial_number, child_sg_name, sg_from_mv) - child_sg = self.rest.get_storage_group( - serial_number, child_sg_name) - # Ensure the child sg can be retrieved - if check_child and not child_sg: - msg = ("Cannot get child storage group: %(sg_name)s " - "but it is listed as child of %(parent_sg)s" - % {'sg_name': child_sg_name, 'parent_sg': sg_from_mv}) - LOG.error(msg) - elif check_child and child_sg: - LOG.info("Retrieved child sg %(sg_name)s from %(mv_name)s", - {'sg_name': child_sg_name, - 'mv_name': maskingview_name}) - else: - msg = self._get_or_create_storage_group( - serial_number, masking_view_dict, child_sg_name, - masking_view_dict[utils.EXTRA_SPECS]) - if not msg: - msg = self._check_adding_volume_to_storage_group( - serial_number, masking_view_dict[utils.DEVICE_ID], - child_sg_name, masking_view_dict[utils.VOL_NAME], - masking_view_dict[utils.EXTRA_SPECS]) - if not msg and not check_child: - msg = self._check_add_child_sg_to_parent_sg( - serial_number, child_sg_name, sg_from_mv, - masking_view_dict[utils.EXTRA_SPECS]) - - return child_sg_name, msg - - def move_volume_between_storage_groups( - self, array, device_id, source_storagegroup_name, - target_storagegroup_name, extra_specs): - @coordination.synchronized("emc-sg-{source_storage_group}") - @coordination.synchronized("emc-sg-{target_storage_group}") - def do_move_volume_between_storage_groups(source_storage_group, - target_storage_group): - self.rest.move_volume_between_storage_groups( - array, device_id, source_storage_group, target_storage_group, - extra_specs) - - do_move_volume_between_storage_groups( - source_storagegroup_name, target_storagegroup_name) - - def _check_port_group(self, serial_number, portgroup_name): - """Check that you can get a port group. - - :param serial_number: the array serial number - :param portgroup_name: the port group name - :returns: string -- msg, the error message - """ - msg = None - portgroup = self.rest.get_portgroup(serial_number, portgroup_name) - if portgroup is None: - msg = ("Cannot get port group: %(portgroup)s from the array " - "%(array)s. Portgroups must be pre-configured - please " - "check the array." - % {'portgroup': portgroup_name, 'array': serial_number}) - LOG.error(msg) - return portgroup_name, msg - - def _get_or_create_initiator_group( - self, serial_number, init_group_name, connector, extra_specs): - """Retrieve or create an initiator group. - - :param serial_number: the array serial number - :param init_group_name: the name of the initiator group - :param connector: the connector object - :param extra_specs: the extra specifications - :return: name of the initiator group -- string, msg - """ - msg = None - initiator_names = self.find_initiator_names(connector) - LOG.debug("The initiator name(s) are: %(initiatorNames)s.", - {'initiatorNames': initiator_names}) - - found_init_group = self._find_initiator_group( - serial_number, initiator_names) - - # If you cannot find an initiator group that matches the connector - # info, create a new initiator group. - if found_init_group is None: - found_init_group = self._create_initiator_group( - serial_number, init_group_name, initiator_names, extra_specs) - LOG.info("Created new initiator group name: %(init_group_name)s.", - {'init_group_name': init_group_name}) - else: - LOG.info("Using existing initiator group name: " - "%(init_group_name)s.", - {'init_group_name': found_init_group}) - - if found_init_group is None: - msg = ("Cannot get or create initiator group: " - "%(init_group_name)s. " - % {'init_group_name': init_group_name}) - LOG.error(msg) - - return found_init_group, msg - - def _check_existing_initiator_group( - self, serial_number, maskingview_name, masking_view_dict, - storagegroup_name, portgroup_name, extra_specs): - """Checks an existing initiator group in the masking view. - - Check if the initiators in the initiator group match those in the - system. - :param serial_number: the array serial number - :param maskingview_name: name of the masking view - :param masking_view_dict: masking view dict - :param storagegroup_name: the storage group name - :param portgroup_name: the port group name - :param extra_specs: the extra specifications - :returns: ig_from_mv, msg - """ - msg = None - ig_from_mv = self.rest.get_element_from_masking_view( - serial_number, maskingview_name, host=True) - check_ig = masking_view_dict[utils.INITIATOR_CHECK] - - if check_ig: - # First verify that the initiator group matches the initiators. - check, found_ig = self._verify_initiator_group_from_masking_view( - serial_number, maskingview_name, masking_view_dict, ig_from_mv, - storagegroup_name, portgroup_name, extra_specs) - if not check: - msg = ("Unable to verify initiator group: %(ig_name)s " - "in masking view %(maskingview_name)s." - % {'ig_name': ig_from_mv, - 'maskingview_name': maskingview_name}) - LOG.error(msg) - return ig_from_mv, msg - - def _check_adding_volume_to_storage_group( - self, serial_number, device_id, storagegroup_name, - volume_name, extra_specs): - """Check if a volume is part of an sg and add it if not. - - :param serial_number: the array serial number - :param device_id: the device id - :param storagegroup_name: the storage group name - :param volume_name: volume name - :param extra_specs: extra specifications - :return: msg - """ - msg = None - if self.rest.is_volume_in_storagegroup( - serial_number, device_id, storagegroup_name): - LOG.info("Volume: %(volume_name)s is already part " - "of storage group %(sg_name)s.", - {'volume_name': volume_name, - 'sg_name': storagegroup_name}) - else: - try: - self.add_volume_to_storage_group( - serial_number, device_id, storagegroup_name, - volume_name, extra_specs) - except Exception as e: - msg = ("Exception adding volume %(vol)s to %(sg)s. " - "Exception received was %(e)s." - % {'vol': volume_name, 'sg': storagegroup_name, - 'e': six.text_type(e)}) - LOG.error(msg) - return msg - - def add_volume_to_storage_group( - self, serial_number, device_id, storagegroup_name, - volume_name, extra_specs): - """Add a volume to a storage group. - - :param serial_number: array serial number - :param device_id: volume device id - :param storagegroup_name: storage group name - :param volume_name: volume name - :param extra_specs: extra specifications - """ - start_time = time.time() - - @coordination.synchronized("emc-sg-{sg_name}") - def do_add_volume_to_sg(sg_name): - # Check if another process has added the volume to the - # sg while this process was waiting for the lock - if self.rest.is_volume_in_storagegroup( - serial_number, device_id, storagegroup_name): - LOG.info("Volume: %(volume_name)s is already part " - "of storage group %(sg_name)s.", - {'volume_name': volume_name, - 'sg_name': storagegroup_name}) - else: - self.rest.add_vol_to_sg(serial_number, sg_name, - device_id, extra_specs) - do_add_volume_to_sg(storagegroup_name) - - LOG.debug("Add volume to storagegroup took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - LOG.info("Added volume: %(vol_name)s to storage group %(sg_name)s.", - {'vol_name': volume_name, 'sg_name': storagegroup_name}) - - def add_volumes_to_storage_group( - self, serial_number, list_device_id, storagegroup_name, - extra_specs): - """Add a volume to a storage group. - - :param serial_number: array serial number - :param list_device_id: list of volume device id - :param storagegroup_name: storage group name - :param extra_specs: extra specifications - """ - if not list_device_id: - LOG.info("add_volumes_to_storage_group: No volumes to add") - return - start_time = time.time() - temp_device_id_list = list_device_id - - @coordination.synchronized("emc-sg-{sg_name}") - def do_add_volume_to_sg(sg_name): - # Check if another process has added any volume to the - # sg while this process was waiting for the lock - volume_list = self.rest.get_volumes_in_storage_group( - serial_number, storagegroup_name) - for volume in volume_list: - if volume in temp_device_id_list: - LOG.info("Volume: %(volume_name)s is already part " - "of storage group %(sg_name)s.", - {'volume_name': volume, - 'sg_name': storagegroup_name}) - # Remove this device id from the list - temp_device_id_list.remove(volume) - self.rest.add_vol_to_sg(serial_number, storagegroup_name, - temp_device_id_list, extra_specs) - do_add_volume_to_sg(storagegroup_name) - - LOG.debug("Add volumes to storagegroup took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - LOG.info("Added volumes to storage group %(sg_name)s.", - {'sg_name': storagegroup_name}) - - def remove_vol_from_storage_group( - self, serial_number, device_id, storagegroup_name, - volume_name, extra_specs): - """Remove a volume from a storage group. - - :param serial_number: the array serial number - :param device_id: the volume device id - :param storagegroup_name: the name of the storage group - :param volume_name: the volume name - :param extra_specs: the extra specifications - :raises: VolumeBackendAPIException - """ - start_time = time.time() - - self.rest.remove_vol_from_sg( - serial_number, storagegroup_name, device_id, extra_specs) - - LOG.debug("Remove volume from storagegroup took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - - check_vol = (self.rest.is_volume_in_storagegroup( - serial_number, device_id, storagegroup_name)) - if check_vol: - exception_message = (_( - "Failed to remove volume %(vol)s from SG: %(sg_name)s.") - % {'vol': volume_name, 'sg_name': storagegroup_name}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def remove_volumes_from_storage_group( - self, serial_number, list_of_device_ids, - storagegroup_name, extra_specs): - """Remove multiple volumes from a storage group. - - :param serial_number: the array serial number - :param list_of_device_ids: list of device ids - :param storagegroup_name: the name of the storage group - :param extra_specs: the extra specifications - :raises: VolumeBackendAPIException - """ - start_time = time.time() - - @coordination.synchronized("emc-sg-{sg_name}") - def do_remove_volumes_from_storage_group(sg_name): - self.rest.remove_vol_from_sg( - serial_number, storagegroup_name, - list_of_device_ids, extra_specs) - - LOG.debug("Remove volumes from storagegroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - volume_list = self.rest.get_volumes_in_storage_group( - serial_number, storagegroup_name) - - for device_id in list_of_device_ids: - if device_id in volume_list: - exception_message = (_( - "Failed to remove device " - "with id %(dev_id)s from SG: %(sg_name)s.") - % {'dev_id': device_id, 'sg_name': storagegroup_name}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - return do_remove_volumes_from_storage_group(storagegroup_name) - - def find_initiator_names(self, connector): - """Check the connector object for initiators(ISCSI) or wwpns(FC). - - :param connector: the connector object - :returns: list -- list of found initiator names - :raises: VolumeBackendAPIException - """ - foundinitiatornames = [] - name = 'initiator name' - if self.protocol.lower() == utils.ISCSI and connector['initiator']: - foundinitiatornames.append(connector['initiator']) - elif self.protocol.lower() == utils.FC: - if 'wwpns' in connector and connector['wwpns']: - for wwn in connector['wwpns']: - foundinitiatornames.append(wwn) - name = 'world wide port names' - else: - msg = (_("FC is the protocol but wwpns are " - "not supplied by OpenStack.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not foundinitiatornames: - msg = (_("Error finding %(name)s.") % {'name': name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("Found %(name)s: %(initiator)s.", - {'name': name, - 'initiator': foundinitiatornames}) - - return foundinitiatornames - - def _find_initiator_group(self, serial_number, initiator_names): - """Check to see if an initiator group already exists. - - NOTE: An initiator/wwn can only belong to one initiator group. - If we were to attempt to create one with an initiator/wwn that is - already belonging to another initiator group, it would fail. - :param serial_number: the array serial number - :param initiator_names: the list of initiator names - :returns: initiator group name -- string or None - """ - ig_name = None - init_list = self.rest.get_in_use_initiator_list_from_array( - serial_number) - for initiator in initiator_names: - found_init = [init for init in init_list if initiator in init] - if found_init: - ig_name = self.rest.get_initiator_group_from_initiator( - serial_number, found_init[0]) - break - return ig_name - - def create_masking_view( - self, serial_number, maskingview_name, storagegroup_name, - port_group_name, init_group_name, extra_specs): - """Create a new masking view. - - :param serial_number: the array serial number - :param maskingview_name: the masking view name - :param storagegroup_name: the storage group name - :param port_group_name: the port group - :param init_group_name: the initiator group - :param extra_specs: extra specifications - :return: error_message -- string or None - """ - error_message = None - try: - self.rest.create_masking_view( - serial_number, maskingview_name, storagegroup_name, - port_group_name, init_group_name, extra_specs) - - except Exception as e: - error_message = ("Error creating new masking view. Exception " - "received: %(e)s" % {'e': six.text_type(e)}) - return error_message - - def check_if_rollback_action_for_masking_required( - self, serial_number, device_id, rollback_dict): - """Rollback action for volumes with an associated service level. - - We need to be able to return the volume to the default storage group - if anything has gone wrong. The volume can also potentially belong to - a storage group that is not the default depending on where - the exception occurred. We also may need to clean up any unused - initiator groups. - :param serial_number: the array serial number - :param device_id: the device id - :param rollback_dict: the rollback dict - :return: error message -- string, or None - :raises: VolumeBackendAPIException - """ - message = None - # Check if ig has been created. If so, check for other - # masking views associated with the ig. If none, delete the ig. - self._check_ig_rollback( - serial_number, rollback_dict['init_group_name'], - rollback_dict['connector']) - try: - found_sg_name = ( - self.rest.get_storage_groups_from_volume( - serial_number, rollback_dict['device_id'])) - # Volume is not associated with any storage group so add - # it back to the default. - if not found_sg_name: - error_message = self._check_adding_volume_to_storage_group( - serial_number, device_id, - rollback_dict['default_sg_name'], - rollback_dict[utils.VOL_NAME], - rollback_dict[utils.EXTRA_SPECS]) - if error_message: - LOG.error(error_message) - message = (_("Rollback")) - elif 'isLiveMigration' in rollback_dict and ( - rollback_dict['isLiveMigration'] is True): - # Live migration case. - # Remove from nonfast storage group to fast sg - self.failed_live_migration(rollback_dict, found_sg_name, - rollback_dict[utils.EXTRA_SPECS]) - else: - LOG.info("The storage group found is %(found_sg_name)s.", - {'found_sg_name': found_sg_name}) - - # Check the name, see if it is the default storage group - # or another. - if found_sg_name != rollback_dict['default_sg_name']: - # Remove it from its current storage group and return it - # to its default masking view if slo is defined. - self.remove_and_reset_members( - serial_number, device_id, - rollback_dict['volume_name'], - rollback_dict['extra_specs']) - message = (_("Rollback - Volume in another storage " - "group besides default storage group.")) - except Exception as e: - error_message = (_( - "Rollback for Volume: %(volume_name)s has failed. " - "Please contact your system administrator to manually return " - "your volume to the default storage group for its slo. " - "Exception received: %(e)s") - % {'volume_name': rollback_dict['volume_name'], - 'e': six.text_type(e)}) - LOG.exception(error_message) - raise exception.VolumeBackendAPIException(data=error_message) - return message - - def _verify_initiator_group_from_masking_view( - self, serial_number, maskingview_name, maskingview_dict, - ig_from_mv, storagegroup_name, portgroup_name, extra_specs): - """Check that the initiator group contains the correct initiators. - - If using an existing masking view check that the initiator group - contains the correct initiators. If it does not contain the correct - initiators then we delete the initiator group from the masking view, - re-create it with the correct initiators and add it to the masking view - NOTE: VMAX does not support ModifyMaskingView so we must first - delete the masking view and recreate it. - :param serial_number: the array serial number - :param maskingview_name: name of the masking view - :param maskingview_dict: the masking view dict - :param ig_from_mv: the initiator group name - :param storagegroup_name: the storage group - :param portgroup_name: the port group - :param extra_specs: extra specifications - :return: bool, found_ig_from_connector - """ - connector = maskingview_dict['connector'] - initiator_names = self.find_initiator_names(connector) - found_ig_from_connector = self._find_initiator_group( - serial_number, initiator_names) - - if found_ig_from_connector != ig_from_mv: - check_ig = self.rest.get_initiator_group( - serial_number, initiator_group=ig_from_mv) - if check_ig: - if found_ig_from_connector is None: - # If the name of the current initiator group from the - # masking view matches the igGroupName supplied for the - # new group, the existing ig needs to be deleted before - # the new one with the correct initiators can be created. - if maskingview_dict['init_group_name'] == ig_from_mv: - # Masking view needs to be deleted before IG - # can be deleted. - self.rest.delete_masking_view( - serial_number, maskingview_name) - self.rest.delete_initiator_group( - serial_number, ig_from_mv) - found_ig_from_connector = ( - self._create_initiator_group( - serial_number, ig_from_mv, initiator_names, - extra_specs)) - if (found_ig_from_connector is not None and - storagegroup_name is not None and - portgroup_name is not None): - # Existing masking view (if still on the array) needs - # to be deleted before a new one can be created. - try: - self.rest.delete_masking_view( - serial_number, maskingview_name) - except Exception: - pass - error_message = ( - self.create_masking_view( - serial_number, maskingview_name, storagegroup_name, - portgroup_name, - maskingview_dict['init_group_name'], - extra_specs)) - if not error_message: - LOG.debug( - "The old masking view has been replaced: " - "%(maskingview_name)s.", - {'maskingview_name': maskingview_name}) - else: - LOG.error( - "One of the components of the original masking view " - "%(maskingview_name)s cannot be retrieved so " - "please contact your system administrator to check " - "that the correct initiator(s) are part of masking.", - {'maskingview_name': maskingview_name}) - return False - return True, found_ig_from_connector - - def _create_initiator_group( - self, serial_number, init_group_name, initiator_names, - extra_specs): - """Create a new initiator group. - - Given a list of initiators, create a new initiator group. - :param serial_number: array serial number - :param init_group_name: the name for the initiator group - :param initiator_names: initaitor names - :param extra_specs: the extra specifications - :return: the initiator group name - """ - self.rest.create_initiator_group( - serial_number, init_group_name, initiator_names, extra_specs) - return init_group_name - - def _check_ig_rollback( - self, serial_number, init_group_name, connector): - """Check if rollback action is required on an initiator group. - - If anything goes wrong on a masking view creation, we need to check if - the process created a now-stale initiator group before failing, i.e. - an initiator group a) matching the name used in the mv process and - b) not associated with any other masking views. - If a stale ig exists, delete the ig. - :param serial_number: the array serial number - :param init_group_name: the initiator group name - :param connector: the connector object - """ - initiator_names = self.find_initiator_names(connector) - found_ig_name = self._find_initiator_group( - serial_number, initiator_names) - if found_ig_name: - if found_ig_name == init_group_name: - host = init_group_name.split("-")[1] - LOG.debug("Searching for masking views associated with " - "%(init_group_name)s", - {'init_group_name': init_group_name}) - self._last_volume_delete_initiator_group( - serial_number, found_ig_name, host) - - @coordination.synchronized("emc-vol-{device_id}") - def remove_and_reset_members( - self, serial_number, device_id, volume_name, extra_specs, - reset=True, connector=None): - """This is called on a delete, unmap device or rollback. - - :param serial_number: the array serial number - :param device_id: the volume device id - :param volume_name: the volume name - :param extra_specs: additional info - :param reset: reset, return to original SG (optional) - :param connector: the connector object (optional) - """ - self._cleanup_deletion( - serial_number, device_id, volume_name, extra_specs, connector) - if reset: - self.add_volume_to_default_storage_group( - serial_number, device_id, volume_name, extra_specs) - - def _cleanup_deletion( - self, serial_number, device_id, volume_name, - extra_specs, connector): - """Prepare a volume for a delete operation. - - :param serial_number: the array serial number - :param device_id: the volume device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :param connector: the connector object - """ - storagegroup_names = (self.rest.get_storage_groups_from_volume( - serial_number, device_id)) - if storagegroup_names: - for sg_name in storagegroup_names: - self.remove_volume_from_sg( - serial_number, device_id, volume_name, sg_name, - extra_specs, connector) - - def remove_volume_from_sg( - self, serial_number, device_id, vol_name, storagegroup_name, - extra_specs, connector=None): - """Remove a volume from a storage group. - - :param serial_number: the array serial number - :param device_id: the volume device id - :param vol_name: the volume name - :param storagegroup_name: the storage group name - :param extra_specs: the extra specifications - :param connector: the connector object - """ - masking_list = self.rest.get_masking_views_from_storage_group( - serial_number, storagegroup_name) - if not masking_list: - LOG.debug("No masking views associated with storage group " - "%(sg_name)s", {'sg_name': storagegroup_name}) - - @coordination.synchronized("emc-sg-{sg_name}") - def do_remove_volume_from_sg(sg_name): - # Make sure volume hasn't been recently removed from the sg - is_vol = self.rest.is_volume_in_storagegroup( - serial_number, device_id, sg_name) - if is_vol: - num_vol_in_sg = self.rest.get_num_vols_in_sg( - serial_number, sg_name) - LOG.debug( - "There are %(num_vol)d volumes in the storage group " - "%(sg_name)s.", - {'num_vol': num_vol_in_sg, - 'sg_name': sg_name}) - - if num_vol_in_sg == 1: - # Last volume in the storage group - delete sg. - self._last_vol_in_sg( - serial_number, device_id, vol_name, sg_name, - extra_specs) - else: - # Not the last volume so remove it from storage group - self._multiple_vols_in_sg( - serial_number, device_id, sg_name, vol_name, - extra_specs) - else: - LOG.info("Volume with device_id %(dev)s is no longer a " - "member of %(sg)s.", - {'dev': device_id, 'sg': sg_name}) - - return do_remove_volume_from_sg(storagegroup_name) - else: - # Need to lock masking view when we are locking the storage - # group to avoid possible deadlock situations from concurrent - # processes - masking_name = masking_list[0] - parent_sg_name = self.rest.get_element_from_masking_view( - serial_number, masking_name, storagegroup=True) - - @coordination.synchronized("emc-mv-{parent_name}") - @coordination.synchronized("emc-mv-{mv_name}") - @coordination.synchronized("emc-sg-{sg_name}") - def do_remove_volume_from_sg(mv_name, sg_name, parent_name): - # Make sure volume hasn't been recently removed from the sg - is_vol = self.rest.is_volume_in_storagegroup( - serial_number, device_id, sg_name) - if is_vol: - num_vol_in_sg = self.rest.get_num_vols_in_sg( - serial_number, sg_name) - LOG.debug( - "There are %(num_vol)d volumes in the storage group " - "%(sg_name)s associated with %(mv_name)s. Parent " - "storagegroup is %(parent)s.", - {'num_vol': num_vol_in_sg, 'sg_name': sg_name, - 'mv_name': mv_name, 'parent': parent_name}) - - if num_vol_in_sg == 1: - # Last volume in the storage group - delete sg. - self._last_vol_in_sg( - serial_number, device_id, vol_name, sg_name, - extra_specs, connector) - else: - # Not the last volume so remove it from storage group - self._multiple_vols_in_sg( - serial_number, device_id, sg_name, vol_name, - extra_specs) - else: - LOG.info("Volume with device_id %(dev)s is no longer a " - "member of %(sg)s", - {'dev': device_id, 'sg': sg_name}) - - return do_remove_volume_from_sg(masking_name, storagegroup_name, - parent_sg_name) - - def _last_vol_in_sg(self, serial_number, device_id, volume_name, - storagegroup_name, extra_specs, connector=None): - """Steps if the volume is the last in a storage group. - - 1. Check if the volume is in a masking view. - 2. If it is in a masking view, check if it is the last volume in the - masking view or just this child storage group. - 3. If it is last in the masking view, delete the masking view, - delete the initiator group if there are no other masking views - associated with it, and delete the both the current storage group - and its parent group. - 4. Otherwise, remove the volume and delete the child storage group. - 5. If it is not in a masking view, delete the storage group. - :param serial_number: array serial number - :param device_id: volume device id - :param volume_name: volume name - :param storagegroup_name: storage group name - :param extra_specs: extra specifications - :param connector: the connector object - :return: status -- bool - """ - LOG.debug("Only one volume remains in storage group " - "%(sgname)s. Driver will attempt cleanup.", - {'sgname': storagegroup_name}) - maskingview_list = self.rest.get_masking_views_from_storage_group( - serial_number, storagegroup_name) - if not bool(maskingview_list): - status = self._last_vol_no_masking_views( - serial_number, storagegroup_name, device_id, volume_name, - extra_specs) - else: - status = self._last_vol_masking_views( - serial_number, storagegroup_name, maskingview_list, - device_id, volume_name, extra_specs, connector) - return status - - def _last_vol_no_masking_views(self, serial_number, storagegroup_name, - device_id, volume_name, extra_specs): - """Remove the last vol from an sg not associated with an mv. - - Helper function for removing the last vol from a storage group - which is not associated with a masking view. - :param serial_number: the array serial number - :param storagegroup_name: the storage group name - :param device_id: the device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :return: status -- bool - """ - # Check if storage group is a child sg: - parent_sg = self.get_parent_sg_from_child( - serial_number, storagegroup_name) - # Delete the storage group. - if parent_sg is None: - self.rest.delete_storage_group(serial_number, storagegroup_name) - status = True - else: - num_vols_parent = self.rest.get_num_vols_in_sg( - serial_number, parent_sg) - if num_vols_parent == 1: - self._delete_cascaded_storage_groups( - serial_number, storagegroup_name, parent_sg) - else: - self._remove_last_vol_and_delete_sg( - serial_number, device_id, volume_name, - storagegroup_name, extra_specs, parent_sg) - status = True - return status - - def _last_vol_masking_views( - self, serial_number, storagegroup_name, maskingview_list, - device_id, volume_name, extra_specs, connector): - """Remove the last vol from an sg associated with masking views. - - Helper function for removing the last vol from a storage group - which is associated with one or more masking views. - :param serial_number: the array serial number - :param storagegroup_name: the storage group name - :param maskingview_list: the liast of masking views - :param device_id: the device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - :return: status -- bool - """ - status = False - for mv in maskingview_list: - num_vols_in_mv, parent_sg_name = ( - self._get_num_vols_from_mv(serial_number, mv)) - # If the volume is the last in the masking view, full cleanup - if num_vols_in_mv == 1: - def do_delete_mv_ig_and_sg(): - return self._delete_mv_ig_and_sg( - serial_number, mv, storagegroup_name, - parent_sg_name, connector) - - do_delete_mv_ig_and_sg() - else: - self._remove_last_vol_and_delete_sg( - serial_number, device_id, volume_name, - storagegroup_name, extra_specs, parent_sg_name) - status = True - return status - - def get_parent_sg_from_child(self, serial_number, storagegroup_name): - """Given a storage group name, get its parent storage group, if any. - - :param serial_number: the array serial number - :param storagegroup_name: the name of the storage group - :return: the parent storage group name, or None - """ - parent_sg_name = None - storagegroup = self.rest.get_storage_group( - serial_number, storagegroup_name) - if storagegroup and storagegroup.get('parent_storage_group'): - parent_sg_name = storagegroup['parent_storage_group'][0] - return parent_sg_name - - def _get_num_vols_from_mv(self, serial_number, maskingview_name): - """Get the total number of volumes associated with a masking view. - - :param serial_number: the array serial number - :param maskingview_name: the name of the masking view - :return: num_vols, parent_sg_name - """ - parent_sg_name = self.rest.get_element_from_masking_view( - serial_number, maskingview_name, storagegroup=True) - num_vols = self.rest.get_num_vols_in_sg(serial_number, parent_sg_name) - return num_vols, parent_sg_name - - def _multiple_vols_in_sg(self, serial_number, device_id, storagegroup_name, - volume_name, extra_specs): - """Remove the volume from the SG. - - If the volume is not the last in the storage group, - remove the volume from the SG and leave the sg on the array. - :param serial_number: array serial number - :param device_id: volume device id - :param volume_name: volume name - :param storagegroup_name: storage group name - :param extra_specs: extra specifications - """ - self.remove_vol_from_storage_group( - serial_number, device_id, storagegroup_name, - volume_name, extra_specs) - - LOG.debug( - "RemoveMembers for volume %(volume_name)s completed " - "successfully.", {'volume_name': volume_name}) - - num_vol_in_sg = self.rest.get_num_vols_in_sg( - serial_number, storagegroup_name) - LOG.debug("There are %(num_vol)d volumes remaining in the storage " - "group %(sg_name)s.", - {'num_vol': num_vol_in_sg, - 'sg_name': storagegroup_name}) - - def _delete_cascaded_storage_groups(self, serial_number, child_sg_name, - parent_sg_name): - """Delete a child and parent storage groups. - - :param serial_number: the array serial number - :param child_sg_name: the child storage group name - :param parent_sg_name: the parent storage group name - """ - self.rest.delete_storage_group(serial_number, parent_sg_name) - self.rest.delete_storage_group(serial_number, child_sg_name) - - LOG.debug("Storage Groups %(storagegroup_name)s and %(parent)s " - "successfully deleted.", - {'storagegroup_name': child_sg_name, - 'parent': parent_sg_name}) - - def _delete_mv_ig_and_sg( - self, serial_number, masking_view, storagegroup_name, - parent_sg_name, connector): - """Delete the masking view, storage groups and initiator group. - - :param serial_number: array serial number - :param masking_view: masking view name - :param storagegroup_name: storage group name - :param parent_sg_name: the parent storage group name - :param connector: the connector object - """ - host = self.utils.get_host_short_name(connector['host']) - - initiatorgroup = self.rest.get_element_from_masking_view( - serial_number, masking_view, host=True) - self._last_volume_delete_masking_view(serial_number, masking_view) - self._last_volume_delete_initiator_group( - serial_number, initiatorgroup, host) - self._delete_cascaded_storage_groups(serial_number, storagegroup_name, - parent_sg_name) - - def _last_volume_delete_masking_view(self, serial_number, masking_view): - """Delete the masking view. - - Delete the masking view if the volume is the last one in the - storage group. - :param serial_number: the array serial number - :param masking_view: masking view name - """ - LOG.debug("Last volume in the storage group, deleting masking view " - "%(maskingview_name)s.", {'maskingview_name': masking_view}) - self.rest.delete_masking_view(serial_number, masking_view) - LOG.info("Masking view %(maskingview)s successfully deleted.", - {'maskingview': masking_view}) - - def add_volume_to_default_storage_group( - self, serial_number, device_id, volume_name, extra_specs): - """Return volume to its default storage group. - - :param serial_number: the array serial number - :param device_id: the volume device id - :param volume_name: the volume name - :param extra_specs: the extra specifications - """ - do_disable_compression = self.utils.is_compression_disabled( - extra_specs) - rep_enabled = self.utils.is_replication_enabled(extra_specs) - storagegroup_name = self.get_or_create_default_storage_group( - serial_number, extra_specs[utils.SRP], extra_specs[utils.SLO], - extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, - rep_enabled) - - self._check_adding_volume_to_storage_group( - serial_number, device_id, storagegroup_name, volume_name, - extra_specs) - - def get_or_create_default_storage_group( - self, serial_number, srp, slo, workload, extra_specs, - do_disable_compression=False, is_re=False): - """Get or create a default storage group. - - :param serial_number: the array serial number - :param srp: the SRP name - :param slo: the SLO - :param workload: the workload - :param extra_specs: extra specifications - :param do_disable_compression: flag for compression - :param is_re: is replication enabled - :returns: storagegroup_name - :raises: VolumeBackendAPIException - """ - storagegroup, storagegroup_name = ( - self.rest.get_vmax_default_storage_group( - serial_number, srp, slo, workload, do_disable_compression, - is_re)) - if storagegroup is None: - self.provision.create_storage_group( - serial_number, storagegroup_name, srp, slo, workload, - extra_specs) - else: - # Check that SG is not part of a masking view - LOG.info("Using existing default storage group") - masking_views = self.rest.get_masking_views_from_storage_group( - serial_number, storagegroup_name) - if masking_views: - exception_message = (_( - "Default storage group %(sg_name)s is part of masking " - "views %(mvs)s. Please remove it from all masking views") - % {'sg_name': storagegroup_name, 'mvs': masking_views}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - # If qos exists, update storage group to reflect qos parameters - if 'qos' in extra_specs: - self.rest.update_storagegroup_qos( - serial_number, storagegroup_name, extra_specs) - - return storagegroup_name - - def _remove_last_vol_and_delete_sg( - self, serial_number, device_id, volume_name, - storagegroup_name, extra_specs, parent_sg_name=None): - """Remove the last volume and delete the storage group. - - If the storage group is a child of another storage group, - it must be removed from the parent before deletion. - :param serial_number: the array serial number - :param device_id: the volume device id - :param volume_name: the volume name - :param storagegroup_name: the sg name - :param extra_specs: extra specifications - :param parent_sg_name: the parent sg name - """ - self.remove_vol_from_storage_group( - serial_number, device_id, storagegroup_name, volume_name, - extra_specs) - - LOG.debug("Remove the last volume %(volumeName)s completed " - "successfully.", {'volumeName': volume_name}) - if parent_sg_name: - self.rest.remove_child_sg_from_parent_sg( - serial_number, storagegroup_name, parent_sg_name, - extra_specs) - - self.rest.delete_storage_group(serial_number, storagegroup_name) - - def _last_volume_delete_initiator_group( - self, serial_number, initiatorgroup_name, host): - """Delete the initiator group. - - Delete the Initiator group if it has been created by the VMAX driver, - and if there are no masking views associated with it. - :param serial_number: the array serial number - :param initiatorgroup_name: initiator group name - :param host: the short name of the host - """ - protocol = self.utils.get_short_protocol_type(self.protocol) - default_ig_name = ("OS-%(shortHostName)s-%(protocol)s-IG" - % {'shortHostName': host, - 'protocol': protocol}) - - if initiatorgroup_name == default_ig_name: - maskingview_names = ( - self.rest.get_masking_views_by_initiator_group( - serial_number, initiatorgroup_name)) - if not maskingview_names: - LOG.debug( - "Last volume associated with the initiator group - " - "deleting the associated initiator group " - "%(initiatorgroup_name)s.", - {'initiatorgroup_name': initiatorgroup_name}) - self.rest.delete_initiator_group( - serial_number, initiatorgroup_name) - else: - LOG.warning("Initiator group %(ig_name)s is associated with " - "masking views and can't be deleted. Number of " - "associated masking view is: %(nmv)d.", - {'ig_name': initiatorgroup_name, - 'nmv': len(maskingview_names)}) - else: - LOG.warning("Initiator group %(ig_name)s was " - "not created by the VMAX driver so will " - "not be deleted by the VMAX driver.", - {'ig_name': initiatorgroup_name}) - - def pre_live_migration(self, source_nf_sg, source_sg, source_parent_sg, - is_source_nf_sg, device_info_dict, extra_specs): - """Run before any live migration operation. - - :param source_nf_sg: The non fast storage group - :param source_sg: The source storage group - :param source_parent_sg: The parent storage group - :param is_source_nf_sg: if the non fast storage group already exists - :param device_info_dict: the data dict - :param extra_specs: extra specifications - """ - if is_source_nf_sg is False: - storage_group = self.rest.get_storage_group( - device_info_dict['array'], source_nf_sg) - if storage_group is None: - self.provision.create_storage_group( - device_info_dict['array'], source_nf_sg, None, None, None, - extra_specs) - self.add_child_sg_to_parent_sg( - device_info_dict['array'], source_nf_sg, source_parent_sg, - extra_specs, default_version=False) - self.move_volume_between_storage_groups( - device_info_dict['array'], device_info_dict['device_id'], - source_sg, source_nf_sg, extra_specs) - - def post_live_migration(self, device_info_dict, extra_specs): - """Run after every live migration operation. - - :param device_info_dict: : the data dict - :param extra_specs: extra specifications - """ - array = device_info_dict['array'] - source_sg = device_info_dict['source_sg'] - # Delete fast storage group - num_vol_in_sg = self.rest.get_num_vols_in_sg( - array, source_sg) - if num_vol_in_sg == 0: - self.rest.remove_child_sg_from_parent_sg( - array, source_sg, device_info_dict['source_parent_sg'], - extra_specs) - self.rest.delete_storage_group(array, source_sg) - - def failed_live_migration(self, device_info_dict, - source_storage_group_list, extra_specs): - """This is run in the event of a failed live migration operation. - - :param device_info_dict: the data dict - :param source_storage_group_list: list of storage groups associated - with the device - :param extra_specs: extra specifications - """ - array = device_info_dict['array'] - source_nf_sg = device_info_dict['source_nf_sg'] - source_sg = device_info_dict['source_sg'] - source_parent_sg = device_info_dict['source_parent_sg'] - device_id = device_info_dict['device_id'] - for sg in source_storage_group_list: - if sg not in [source_sg, source_nf_sg]: - self.remove_volume_from_sg( - array, device_id, device_info_dict['volume_name'], sg, - extra_specs) - if source_nf_sg in source_storage_group_list: - self.move_volume_between_storage_groups( - array, device_id, source_nf_sg, - source_sg, extra_specs) - is_descendant = self.rest.is_child_sg_in_parent_sg( - array, source_nf_sg, source_parent_sg) - if is_descendant: - self.rest.remove_child_sg_from_parent_sg( - array, source_nf_sg, source_parent_sg, extra_specs) - # Delete non fast storage group - self.rest.delete_storage_group(array, source_nf_sg) diff --git a/cinder/volume/drivers/dell_emc/vmax/provision.py b/cinder/volume/drivers/dell_emc/vmax/provision.py deleted file mode 100644 index cd7b3e5d3..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/provision.py +++ /dev/null @@ -1,606 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_log import log as logging -from oslo_service import loopingcall - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vmax import utils - -LOG = logging.getLogger(__name__) - -WRITE_DISABLED = "Write Disabled" -UNLINK_INTERVAL = 15 -UNLINK_RETRIES = 30 - - -class VMAXProvision(object): - """Provisioning Class for Dell EMC VMAX volume drivers. - - It supports VMAX arrays. - """ - def __init__(self, rest): - self.utils = utils.VMAXUtils() - self.rest = rest - - def create_storage_group( - self, array, storagegroup_name, srp, slo, workload, - extra_specs, do_disable_compression=False): - """Create a new storage group. - - :param array: the array serial number - :param storagegroup_name: the group name (String) - :param srp: the SRP (String) - :param slo: the SLO (String) - :param workload: the workload (String) - :param extra_specs: additional info - :param do_disable_compression: disable compression flag - :returns: storagegroup - storage group object - """ - start_time = time.time() - - @coordination.synchronized("emc-sg-{storage_group}") - def do_create_storage_group(storage_group): - storagegroup = self.rest.create_storage_group( - array, storage_group, srp, slo, workload, extra_specs, - do_disable_compression) - - LOG.debug("Create storage group took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - LOG.info("Storage group %(sg)s created successfully.", - {'sg': storagegroup_name}) - return storagegroup - - return do_create_storage_group(storagegroup_name) - - def create_volume_from_sg(self, array, volume_name, storagegroup_name, - volume_size, extra_specs): - """Create a new volume in the given storage group. - - :param array: the array serial number - :param volume_name: the volume name (String) - :param storagegroup_name: the storage group name - :param volume_size: volume size (String) - :param extra_specs: the extra specifications - :returns: dict -- volume_dict - the volume dict - """ - @coordination.synchronized("emc-sg-{storage_group}") - def do_create_volume_from_sg(storage_group): - start_time = time.time() - - volume_dict = self.rest.create_volume_from_sg( - array, volume_name, storage_group, - volume_size, extra_specs) - - LOG.debug("Create volume from storage group " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - return volume_dict - return do_create_volume_from_sg(storagegroup_name) - - def delete_volume_from_srp(self, array, device_id, volume_name): - """Delete a volume from the srp. - - :param array: the array serial number - :param device_id: the volume device id - :param volume_name: the volume name - """ - start_time = time.time() - LOG.debug("Delete volume %(volume_name)s from srp.", - {'volume_name': volume_name}) - self.rest.delete_volume(array, device_id) - LOG.debug("Delete volume took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta( - start_time, time.time())}) - - def create_volume_snapvx(self, array, source_device_id, - snap_name, extra_specs): - """Create a snapVx of a volume. - - :param array: the array serial number - :param source_device_id: source volume device id - :param snap_name: the snapshot name - :param extra_specs: the extra specifications - """ - start_time = time.time() - LOG.debug("Create Snap Vx snapshot of: %(source)s.", - {'source': source_device_id}) - self.rest.create_volume_snap( - array, snap_name, source_device_id, extra_specs) - LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - - def create_volume_replica( - self, array, source_device_id, target_device_id, - snap_name, extra_specs, create_snap=False): - """Create a snap vx of a source and copy to a target. - - :param array: the array serial number - :param source_device_id: source volume device id - :param target_device_id: target volume device id - :param snap_name: the name for the snap shot - :param extra_specs: extra specifications - :param create_snap: Flag for create snapvx - """ - start_time = time.time() - if create_snap: - self.create_volume_snapvx(array, source_device_id, - snap_name, extra_specs) - # Link source to target - self.rest.modify_volume_snap( - array, source_device_id, target_device_id, snap_name, - extra_specs, link=True) - - LOG.debug("Create element replica took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - - def break_replication_relationship( - self, array, target_device_id, source_device_id, snap_name, - extra_specs): - """Unlink a snapshot from its target volume. - - :param array: the array serial number - :param source_device_id: source volume device id - :param target_device_id: target volume device id - :param snap_name: the name for the snap shot - :param extra_specs: extra specifications - """ - LOG.debug("Break snap vx link relationship between: %(src)s " - "and: %(tgt)s.", - {'src': source_device_id, 'tgt': target_device_id}) - - self._unlink_volume(array, source_device_id, target_device_id, - snap_name, extra_specs) - - def _unlink_volume( - self, array, source_device_id, target_device_id, snap_name, - extra_specs): - """Unlink a target volume from its source volume. - - :param array: the array serial number - :param source_device_id: the source device id - :param target_device_id: the target device id - :param snap_name: the snap name - :param extra_specs: extra specifications - :return: return code - """ - - def _unlink_vol(): - """Called at an interval until the synchronization is finished. - - :raises: loopingcall.LoopingCallDone - """ - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['modify_vol_success']: - self.rest.modify_volume_snap( - array, source_device_id, target_device_id, snap_name, - extra_specs, unlink=True) - kwargs['modify_vol_success'] = True - except exception.VolumeBackendAPIException: - pass - - if kwargs['retries'] > UNLINK_RETRIES: - LOG.error("_unlink_volume failed after %(retries)d " - "tries.", {'retries': retries}) - raise loopingcall.LoopingCallDone(retvalue=30) - if kwargs['modify_vol_success']: - raise loopingcall.LoopingCallDone() - - kwargs = {'retries': 0, - 'modify_vol_success': False} - timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol) - rc = timer.start(interval=UNLINK_INTERVAL).wait() - return rc - - def delete_volume_snap(self, array, snap_name, source_device_id): - """Delete a snapVx snapshot of a volume. - - :param array: the array serial number - :param snap_name: the snapshot name - :param source_device_id: the source device id - """ - LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.", - {'vol': source_device_id, 'snap_name': snap_name}) - self.rest.delete_volume_snap(array, snap_name, source_device_id) - - def delete_temp_volume_snap(self, array, snap_name, source_device_id): - """Delete the temporary snapshot created for clone operations. - - There can be instances where the source and target both attempt to - delete a temp snapshot simultaneously, so we must lock the snap and - then double check it is on the array. - :param array: the array serial number - :param snap_name: the snapshot name - :param source_device_id: the source device id - """ - - @coordination.synchronized("emc-snapvx-{snapvx_name}") - def do_delete_temp_snap(snapvx_name): - # Ensure snap has not been recently deleted - if self.rest.get_volume_snap( - array, source_device_id, snapvx_name): - self.delete_volume_snap(array, snapvx_name, source_device_id) - - do_delete_temp_snap(snap_name) - - def delete_volume_snap_check_for_links(self, array, snap_name, - source_device, extra_specs): - """Check if a snap has any links before deletion. - - If a snapshot has any links, break the replication relationship - before deletion. - :param array: the array serial number - :param snap_name: the snapshot name - :param source_device: the source device id - :param extra_specs: the extra specifications - """ - LOG.debug("Check for linked devices to SnapVx: %(snap_name)s " - "for volume %(vol)s.", - {'vol': source_device, 'snap_name': snap_name}) - linked_list = self.rest.get_snap_linked_device_list( - array, source_device, snap_name) - for link in linked_list: - target_device = link['targetDevice'] - self.break_replication_relationship( - array, target_device, source_device, snap_name, extra_specs) - self.delete_volume_snap(array, snap_name, source_device) - - def extend_volume(self, array, device_id, new_size, extra_specs): - """Extend a volume. - - :param array: the array serial number - :param device_id: the volume device id - :param new_size: the new size (GB) - :param extra_specs: the extra specifications - :returns: status_code - """ - start_time = time.time() - self.rest.extend_volume(array, device_id, new_size, extra_specs) - LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(start_time, - time.time())}) - - def get_srp_pool_stats(self, array, array_info): - """Get the srp capacity stats. - - :param array: the array serial number - :param array_info: the array dict - :returns: total_capacity_gb - :returns: remaining_capacity_gb - :returns: subscribed_capacity_gb - :returns: array_reserve_percent - :returns: wlp_enabled - """ - total_capacity_gb = 0 - remaining_capacity_gb = 0 - allocated_capacity_gb = None - subscribed_capacity_gb = 0 - array_reserve_percent = 0 - wlp_enabled = False - srp = array_info['srpName'] - LOG.debug( - "Retrieving capacity for srp %(srpName)s on array %(array)s.", - {'srpName': srp, 'array': array}) - - srp_details = self.rest.get_srp_by_name(array, srp) - if not srp_details: - LOG.error("Unable to retrieve srp instance of %(srpName)s on " - "array %(array)s.", - {'srpName': srp, 'array': array}) - return 0, 0, 0, 0, False - try: - total_capacity_gb = srp_details['total_usable_cap_gb'] - allocated_capacity_gb = srp_details['total_allocated_cap_gb'] - subscribed_capacity_gb = srp_details['total_subscribed_cap_gb'] - remaining_capacity_gb = float( - total_capacity_gb - allocated_capacity_gb) - array_reserve_percent = srp_details['reserved_cap_percent'] - except KeyError: - pass - - total_slo_capacity = ( - self._get_remaining_slo_capacity_wlp( - array, srp, array_info)) - if total_slo_capacity != -1 and allocated_capacity_gb: - remaining_capacity_gb = float( - total_slo_capacity - allocated_capacity_gb) - wlp_enabled = True - else: - LOG.debug( - "Remaining capacity %(remaining_capacity_gb)s " - "GBs is determined from SRP capacity " - "and not the SLO capacity. Performance may " - "not be what you expect.", - {'remaining_capacity_gb': remaining_capacity_gb}) - - return (total_capacity_gb, remaining_capacity_gb, - subscribed_capacity_gb, array_reserve_percent, wlp_enabled) - - def _get_remaining_slo_capacity_wlp(self, array, srp, array_info): - """Get the remaining capacity of the SLO/ workload combination. - - This is derived from the WLP portion of Unisphere. Please - see the UniSphere doc and the readme doc for details. - :param array: the array serial number - :param srp: the srp name - :param array_info: array info dict - :returns: remaining_capacity - """ - remaining_capacity = -1 - if array_info['SLO']: - headroom_capacity = self.rest.get_headroom_capacity( - array, srp, array_info['SLO'], array_info['Workload']) - if headroom_capacity: - remaining_capacity = headroom_capacity - LOG.debug("Received remaining SLO Capacity %(remaining)s GBs " - "for SLO %(SLO)s and workload %(workload)s.", - {'remaining': remaining_capacity, - 'SLO': array_info['SLO'], - 'workload': array_info['Workload']}) - return remaining_capacity - - def verify_slo_workload(self, array, slo, workload, srp): - """Check if SLO and workload values are valid. - - :param array: the array serial number - :param slo: Service Level Object e.g bronze - :param workload: workload e.g DSS - :param srp: the storage resource pool name - :returns: boolean - """ - is_valid_slo, is_valid_workload = False, False - - if workload and workload.lower() == 'none': - workload = None - - if not workload: - is_valid_workload = True - - if slo and slo.lower() == 'none': - slo = None - - valid_slos = self.rest.get_slo_list(array) - valid_workloads = self.rest.get_workload_settings(array) - for valid_slo in valid_slos: - if slo == valid_slo: - is_valid_slo = True - break - - for valid_workload in valid_workloads: - if workload == valid_workload: - is_valid_workload = True - break - - if not slo: - is_valid_slo = True - if workload: - is_valid_workload = False - - if not is_valid_slo: - LOG.error( - "SLO: %(slo)s is not valid. Valid values are: " - "%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos}) - - if not is_valid_workload: - LOG.error( - "Workload: %(workload)s is not valid. Valid values are " - "%(valid_workloads)s. Note you cannot " - "set a workload without an SLO.", - {'workload': workload, 'valid_workloads': valid_workloads}) - - return is_valid_slo, is_valid_workload - - def get_slo_workload_settings_from_storage_group( - self, array, sg_name): - """Get slo and workload settings from a storage group. - - :param array: the array serial number - :param sg_name: the storage group name - :returns: storage group slo settings - """ - slo = 'NONE' - workload = 'NONE' - storage_group = self.rest.get_storage_group(array, sg_name) - if storage_group: - try: - slo = storage_group['slo'] - workload = storage_group['workload'] - except KeyError: - pass - else: - exception_message = (_( - "Could not retrieve storage group %(sg_name)%. ") % - {'sg_name': sg_name}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload} - - def break_rdf_relationship(self, array, device_id, target_device, - rdf_group, rep_extra_specs, state): - """Break the rdf relationship between a pair of devices. - - :param array: the array serial number - :param device_id: the source device id - :param target_device: target device id - :param rdf_group: the rdf group number - :param rep_extra_specs: replication extra specs - :param state: the state of the rdf pair - """ - LOG.info("Splitting rdf pair: source device: %(src)s " - "target device: %(tgt)s.", - {'src': device_id, 'tgt': target_device}) - if state == 'Synchronized': - self.rest.modify_rdf_device_pair( - array, device_id, rdf_group, rep_extra_specs, split=True) - LOG.info("Deleting rdf pair: source device: %(src)s " - "target device: %(tgt)s.", - {'src': device_id, 'tgt': target_device}) - self.rest.delete_rdf_pair(array, device_id, rdf_group) - - def failover_volume(self, array, device_id, rdf_group, - extra_specs, local_vol_state, failover): - """Failover or back a volume pair. - - :param array: the array serial number - :param device_id: the source device id - :param rdf_group: the rdf group number - :param extra_specs: extra specs - :param local_vol_state: the local volume state - :param failover: flag to indicate failover or failback -- bool - """ - if local_vol_state == WRITE_DISABLED: - LOG.info("Volume %(dev)s is already failed over.", - {'dev': device_id}) - return - if failover: - action = "Failing over" - else: - action = "Failing back" - LOG.info("%(action)s rdf pair: source device: %(src)s ", - {'action': action, 'src': device_id}) - self.rest.modify_rdf_device_pair( - array, device_id, rdf_group, extra_specs, split=False) - - def create_volume_group(self, array, group_name, extra_specs): - """Create a generic volume group. - - :param array: the array serial number - :param group_name: the name of the group - :param extra_specs: the extra specifications - :returns: volume_group - """ - return self.create_storage_group(array, group_name, - None, None, None, extra_specs) - - def create_group_replica( - self, array, source_group, snap_name, extra_specs): - """Create a replica (snapVx) of a volume group. - - :param array: the array serial number - :param source_group: the source group name - :param snap_name: the name for the snap shot - :param extra_specs: extra specifications - """ - LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.", - {'srcGroup': source_group}) - - # Create snapshot - self.rest.create_storagegroup_snap( - array, source_group, snap_name, extra_specs) - - def delete_group_replica(self, array, snap_name, - source_group_name): - """Delete the snapshot. - - :param array: the array serial number - :param snap_name: the name for the snap shot - :param source_group_name: the source group name - """ - # Delete snapvx snapshot - LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " - "snapshot: %(snap_name)s.", - {'srcGroup': source_group_name, - 'snap_name': snap_name}) - # The check for existence of snapshot has already happened - # So we just need to delete the snapshot - self.rest.delete_storagegroup_snap(array, snap_name, source_group_name) - - def link_and_break_replica(self, array, source_group_name, - target_group_name, snap_name, extra_specs, - delete_snapshot=False): - """Links a group snap and breaks the relationship. - - :param array: the array serial - :param source_group_name: the source group name - :param target_group_name: the target group name - :param snap_name: the snapshot name - :param extra_specs: extra specifications - :param delete_snapshot: delete snapshot flag - """ - LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s " - "targetGroup: %(tgtGroup)s.", - {'srcGroup': source_group_name, - 'tgtGroup': target_group_name}) - # Link the snapshot - self.rest.modify_storagegroup_snap( - array, source_group_name, target_group_name, snap_name, - extra_specs, link=True) - # Unlink the snapshot - LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s " - "targetGroup: %(tgtGroup)s.", - {'srcGroup': source_group_name, - 'tgtGroup': target_group_name}) - self._unlink_group(array, source_group_name, - target_group_name, snap_name, extra_specs) - # Delete the snapshot if necessary - if delete_snapshot: - LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " - "snapshot: %(snap_name)s.", - {'srcGroup': source_group_name, - 'snap_name': snap_name}) - self.rest.delete_storagegroup_snap(array, snap_name, - source_group_name) - - def _unlink_group( - self, array, source_group_name, target_group_name, snap_name, - extra_specs): - """Unlink a target group from it's source group. - - :param array: the array serial number - :param source_group_name: the source group name - :param target_group_name: the target device name - :param snap_name: the snap name - :param extra_specs: extra specifications - :returns: return code - """ - - def _unlink_grp(): - """Called at an interval until the synchronization is finished. - - :raises: loopingcall.LoopingCallDone - """ - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['modify_grp_snap_success']: - self.rest.modify_storagegroup_snap( - array, source_group_name, target_group_name, - snap_name, extra_specs, unlink=True) - kwargs['modify_grp_snap_success'] = True - except exception.VolumeBackendAPIException: - pass - - if kwargs['retries'] > UNLINK_RETRIES: - LOG.error("_unlink_grp failed after %(retries)d " - "tries.", {'retries': retries}) - raise loopingcall.LoopingCallDone(retvalue=30) - if kwargs['modify_grp_snap_success']: - raise loopingcall.LoopingCallDone() - - kwargs = {'retries': 0, - 'modify_grp_snap_success': False} - timer = loopingcall.FixedIntervalLoopingCall(_unlink_grp) - rc = timer.start(interval=UNLINK_INTERVAL).wait() - return rc diff --git a/cinder/volume/drivers/dell_emc/vmax/rest.py b/cinder/volume/drivers/dell_emc/vmax/rest.py deleted file mode 100644 index 2f16014c9..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/rest.py +++ /dev/null @@ -1,1983 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from oslo_log import log as logging -from oslo_service import loopingcall -import requests -import requests.auth -import requests.packages.urllib3.exceptions as urllib_exp -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.utils import retry -from cinder.volume.drivers.dell_emc.vmax import utils - -requests.packages.urllib3.disable_warnings(urllib_exp.InsecureRequestWarning) - -LOG = logging.getLogger(__name__) -SLOPROVISIONING = 'sloprovisioning' -REPLICATION = 'replication' -U4V_VERSION = '84' -retry_exc_tuple = (exception.VolumeBackendAPIException,) -# HTTP constants -GET = 'GET' -POST = 'POST' -PUT = 'PUT' -DELETE = 'DELETE' -STATUS_200 = 200 -STATUS_201 = 201 -STATUS_202 = 202 -STATUS_204 = 204 -# Job constants -INCOMPLETE_LIST = ['created', 'scheduled', 'running', - 'validating', 'validated'] -CREATED = 'created' -SUCCEEDED = 'succeeded' -CREATE_VOL_STRING = "Creating new Volumes" - - -class VMAXRest(object): - """Rest class based on Unisphere for VMAX Rest API.""" - - def __init__(self): - self.utils = utils.VMAXUtils() - self.session = None - self.base_uri = None - self.user = None - self.passwd = None - self.verify = None - self.cert = None - - def set_rest_credentials(self, array_info): - """Given the array record set the rest server credentials. - - :param array_info: record - """ - ip = array_info['RestServerIp'] - port = array_info['RestServerPort'] - self.user = array_info['RestUserName'] - self.passwd = array_info['RestPassword'] - self.cert = array_info['SSLCert'] - verify = array_info['SSLVerify'] - if verify and verify.lower() == 'false': - verify = False - self.verify = verify - ip_port = "%(ip)s:%(port)s" % {'ip': ip, 'port': port} - self.base_uri = ("https://%(ip_port)s/univmax/restapi" - % {'ip_port': ip_port}) - self.session = self._establish_rest_session() - - def _establish_rest_session(self): - """Establish the rest session. - - :returns: requests.session() -- session, the rest session - """ - session = requests.session() - session.headers = {'content-type': 'application/json', - 'accept': 'application/json', - 'Application-Type': 'openstack'} - session.auth = requests.auth.HTTPBasicAuth(self.user, self.passwd) - if self.verify is not None: - session.verify = self.verify - if self.cert: - session.cert = self.cert - - return session - - def request(self, target_uri, method, params=None, request_object=None): - """Sends a request (GET, POST, PUT, DELETE) to the target api. - - :param target_uri: target uri (string) - :param method: The method (GET, POST, PUT, or DELETE) - :param params: Additional URL parameters - :param request_object: request payload (dict) - :returns: server response object (dict) - :raises: VolumeBackendAPIException - """ - message, status_code = None, None - if not self.session: - self.session = self._establish_rest_session() - url = ("%(self.base_uri)s%(target_uri)s" % - {'self.base_uri': self.base_uri, - 'target_uri': target_uri}) - try: - if request_object: - response = self.session.request( - method=method, url=url, - data=json.dumps(request_object, sort_keys=True, - indent=4)) - elif params: - response = self.session.request(method=method, url=url, - params=params) - else: - response = self.session.request(method=method, url=url) - status_code = response.status_code - try: - message = response.json() - except ValueError: - LOG.debug("No response received from API. Status code " - "received is: %(status_code)s", - {'status_code': status_code}) - message = None - LOG.debug("%(method)s request to %(url)s has returned with " - "a status code of: %(status_code)s.", - {'method': method, 'url': url, - 'status_code': status_code}) - - except requests.Timeout: - LOG.error("The %(method)s request to URL %(url)s timed-out, " - "but may have been successful. Please check the array.", - {'method': method, 'url': url}) - except Exception as e: - exception_message = (_("The %(method)s request to URL %(url)s " - "failed with exception %(e)s") - % {'method': method, 'url': url, - 'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return status_code, message - - def wait_for_job_complete(self, job, extra_specs): - """Given the job wait for it to complete. - - :param job: the job dict - :param extra_specs: the extra_specs dict. - :returns: rc -- int, result -- string, status -- string, - task -- list of dicts detailing tasks in the job - :raises: VolumeBackendAPIException - """ - res, tasks = None, None - if job['status'].lower == CREATED: - try: - res, tasks = job['result'], job['task'] - except KeyError: - pass - return 0, res, job['status'], tasks - - def _wait_for_job_complete(): - result = None - # Called at an interval until the job is finished. - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['wait_for_job_called']: - is_complete, result, rc, status, task = ( - self._is_job_finished(job_id)) - if is_complete is True: - kwargs['wait_for_job_called'] = True - kwargs['rc'], kwargs['status'] = rc, status - kwargs['result'], kwargs['task'] = result, task - except Exception: - exception_message = (_("Issue encountered waiting for job.")) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - if retries > int(extra_specs[utils.RETRIES]): - LOG.error("_wait_for_job_complete failed after " - "%(retries)d tries.", {'retries': retries}) - kwargs['rc'], kwargs['result'] = -1, result - - raise loopingcall.LoopingCallDone() - if kwargs['wait_for_job_called']: - raise loopingcall.LoopingCallDone() - - job_id = job['jobId'] - kwargs = {'retries': 0, 'wait_for_job_called': False, - 'rc': 0, 'result': None} - - timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) - timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() - LOG.debug("Return code is: %(rc)lu. Result is %(res)s.", - {'rc': kwargs['rc'], 'res': kwargs['result']}) - return (kwargs['rc'], kwargs['result'], - kwargs['status'], kwargs['task']) - - def _is_job_finished(self, job_id): - """Check if the job is finished. - - :param job_id: the id of the job - :returns: complete -- bool, result -- string, - rc -- int, status -- string, task -- list of dicts - """ - complete, rc, status, result, task = False, 0, None, None, None - job_url = "/%s/system/job/%s" % (U4V_VERSION, job_id) - job = self._get_request(job_url, 'job') - if job: - status = job['status'] - try: - result, task = job['result'], job['task'] - except KeyError: - pass - if status.lower() == SUCCEEDED: - complete = True - elif status.lower() in INCOMPLETE_LIST: - complete = False - else: - rc, complete = -1, True - return complete, result, rc, status, task - - @staticmethod - def check_status_code_success(operation, status_code, message): - """Check if a status code indicates success. - - :param operation: the operation - :param status_code: the status code - :param message: the server response - :raises: VolumeBackendAPIException - """ - if status_code not in [STATUS_200, STATUS_201, - STATUS_202, STATUS_204]: - exception_message = ( - _('Error %(operation)s. The status code received ' - 'is %(sc)s and the message is %(message)s.') - % {'operation': operation, - 'sc': status_code, 'message': message}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def wait_for_job(self, operation, status_code, job, extra_specs): - """Check if call is async, wait for it to complete. - - :param operation: the operation being performed - :param status_code: the status code - :param job: the job - :param extra_specs: the extra specifications - :returns: task -- list of dicts detailing tasks in the job - :raises: VolumeBackendAPIException - """ - task = None - if status_code == STATUS_202: - rc, result, status, task = self.wait_for_job_complete( - job, extra_specs) - if rc != 0: - exception_message = (_( - "Error %(operation)s. Status code: %(sc)lu. " - "Error: %(error)s. Status: %(status)s.") - % {'operation': operation, 'sc': rc, - 'error': six.text_type(result), - 'status': status}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - return task - - @staticmethod - def _build_uri(array, category, resource_type, - resource_name=None, private='', version=U4V_VERSION): - """Build the target url. - - :param array: the array serial number - :param category: the resource category e.g. sloprovisioning - :param resource_type: the resource type e.g. maskingview - :param resource_name: the name of a specific resource - :param private: empty string or '/private' if private url - :returns: target url, string - """ - target_uri = ('%(private)s/%(version)s/%(category)s/symmetrix/' - '%(array)s/%(resource_type)s' - % {'private': private, 'version': version, - 'category': category, 'array': array, - 'resource_type': resource_type}) - if resource_name: - target_uri += '/%(resource_name)s' % { - 'resource_name': resource_name} - return target_uri - - def _get_request(self, target_uri, resource_type, params=None): - """Send a GET request to the array. - - :param target_uri: the target uri - :param resource_type: the resource type, e.g. maskingview - :param params: optional dict of filter params - :returns: resource_object -- dict or None - """ - resource_object = None - sc, message = self.request(target_uri, GET, params=params) - operation = 'get %(res)s' % {'res': resource_type} - try: - self.check_status_code_success(operation, sc, message) - except Exception as e: - LOG.debug("Get resource failed with %(e)s", - {'e': e}) - if sc == STATUS_200: - resource_object = message - return resource_object - - def get_resource(self, array, category, resource_type, - resource_name=None, params=None, private=''): - """Get resource details from array. - - :param array: the array serial number - :param category: the resource category e.g. sloprovisioning - :param resource_type: the resource type e.g. maskingview - :param resource_name: the name of a specific resource - :param params: query parameters - :param private: empty string or '/private' if private url - :returns: resource object -- dict or None - """ - target_uri = self._build_uri(array, category, resource_type, - resource_name, private) - return self._get_request(target_uri, resource_type, params) - - def create_resource(self, array, category, resource_type, payload, - private=''): - """Create a provisioning resource. - - :param array: the array serial number - :param category: the category - :param resource_type: the resource type - :param payload: the payload - :param private: empty string or '/private' if private url - :returns: status_code -- int, message -- string, server response - """ - target_uri = self._build_uri(array, category, resource_type, - None, private) - status_code, message = self.request(target_uri, POST, - request_object=payload) - operation = 'Create %(res)s resource' % {'res': resource_type} - self.check_status_code_success( - operation, status_code, message) - return status_code, message - - def modify_resource(self, array, category, resource_type, payload, - version=U4V_VERSION, resource_name=None, private=''): - """Modify a resource. - - :param version: the uv4 version - :param array: the array serial number - :param category: the category - :param resource_type: the resource type - :param payload: the payload - :param resource_name: the resource name - :param private: empty string or '/private' if private url - :returns: status_code -- int, message -- string (server response) - """ - target_uri = self._build_uri(array, category, resource_type, - resource_name, private, version) - status_code, message = self.request(target_uri, PUT, - request_object=payload) - operation = 'modify %(res)s resource' % {'res': resource_type} - self.check_status_code_success(operation, status_code, message) - return status_code, message - - def delete_resource( - self, array, category, resource_type, resource_name, - payload=None, private='', params=None): - """Delete a provisioning resource. - - :param array: the array serial number - :param category: the resource category e.g. sloprovisioning - :param resource_type: the type of resource to be deleted - :param resource_name: the name of the resource to be deleted - :param payload: the payload, optional - :param private: empty string or '/private' if private url - :param params: dict of optional query params - """ - target_uri = self._build_uri(array, category, resource_type, - resource_name, private) - status_code, message = self.request(target_uri, DELETE, - request_object=payload, - params=params) - operation = 'delete %(res)s resource' % {'res': resource_type} - self.check_status_code_success(operation, status_code, message) - - def get_array_serial(self, array): - """Get an array from its serial number. - - :param array: the array serial number - :returns: array_details -- dict or None - """ - target_uri = '/%s/system/symmetrix/%s' % (U4V_VERSION, array) - array_details = self._get_request(target_uri, 'system') - if not array_details: - LOG.error("Cannot connect to array %(array)s.", - {'array': array}) - return array_details - - def get_srp_by_name(self, array, srp=None): - """Returns the details of a storage pool. - - :param array: the array serial number - :param srp: the storage resource pool name - :returns: SRP_details -- dict or None - """ - LOG.debug("storagePoolName: %(srp)s, array: %(array)s.", - {'srp': srp, 'array': array}) - srp_details = self.get_resource(array, SLOPROVISIONING, 'srp', - resource_name=srp, params=None) - return srp_details - - def get_slo_list(self, array): - """Retrieve the list of slo's from the array - - :param array: the array serial number - :returns: slo_list -- list of service level names - """ - slo_list = [] - slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo') - if slo_dict and slo_dict.get('sloId'): - slo_list = slo_dict['sloId'] - return slo_list - - def get_workload_settings(self, array): - """Get valid workload options from array. - - :param array: the array serial number - :returns: workload_setting -- list of workload names - """ - workload_setting = [] - wl_details = self.get_resource(array, SLOPROVISIONING, 'workloadtype') - if wl_details: - workload_setting = wl_details['workloadId'] - return workload_setting - - def get_headroom_capacity(self, array, srp, slo, workload): - """Get capacity of the different slo/ workload combinations. - - :param array: the array serial number - :param srp: the storage resource srp - :param slo: the service level - :param workload: the workload - :returns: remaining_capacity -- string, or None - """ - params = {'srp': srp, 'slo': slo, 'workloadtype': workload} - try: - headroom = self.get_resource(array, 'wlp', - 'headroom', params=params) - remaining_capacity = headroom['headroom'][0]['headroomCapacity'] - except (KeyError, TypeError): - remaining_capacity = None - return remaining_capacity - - def is_compression_capable(self, array): - """Check if array is compression capable. - - :param array: array serial number - :returns: bool - """ - is_compression_capable = False - target_uri = "/84/sloprovisioning/symmetrix?compressionCapable=true" - status_code, message = self.request(target_uri, GET) - self.check_status_code_success( - "Check if compression enabled", status_code, message) - if message.get('symmetrixId'): - if array in message['symmetrixId']: - is_compression_capable = True - return is_compression_capable - - def get_storage_group(self, array, storage_group_name): - """Given a name, return storage group details. - - :param array: the array serial number - :param storage_group_name: the name of the storage group - :returns: storage group dict or None - """ - return self.get_resource( - array, SLOPROVISIONING, 'storagegroup', - resource_name=storage_group_name) - - def get_storage_group_list(self, array, params=None): - """"Return a list of storage groups. - - :param array: the array serial number - :param params: optional filter parameters - :returns: storage group list - """ - sg_list = [] - sg_details = self.get_resource(array, SLOPROVISIONING, - 'storagegroup', params=params) - if sg_details: - sg_list = sg_details['storageGroupId'] - return sg_list - - def get_num_vols_in_sg(self, array, storage_group_name): - """Get the number of volumes in a storage group. - - :param array: the array serial number - :param storage_group_name: the storage group name - :returns: num_vols -- int - """ - num_vols = 0 - storagegroup = self.get_storage_group(array, storage_group_name) - try: - num_vols = int(storagegroup['num_of_vols']) - except (KeyError, TypeError): - pass - return num_vols - - def is_child_sg_in_parent_sg(self, array, child_name, parent_name): - """Check if a child storage group is a member of a parent group. - - :param array: the array serial number - :param child_name: the child sg name - :param parent_name: the parent sg name - :returns: bool - """ - parent_sg = self.get_storage_group(array, parent_name) - if parent_sg and parent_sg.get('child_storage_group'): - child_sg_list = parent_sg['child_storage_group'] - if child_name in child_sg_list: - return True - return False - - def add_child_sg_to_parent_sg( - self, array, child_sg, parent_sg, extra_specs): - """Add a storage group to a parent storage group. - - This method adds an existing storage group to another storage - group, i.e. cascaded storage groups. - :param array: the array serial number - :param child_sg: the name of the child sg - :param parent_sg: the name of the parent sg - :param extra_specs: the extra specifications - """ - payload = {"editStorageGroupActionParam": { - "expandStorageGroupParam": { - "addExistingStorageGroupParam": { - "storageGroupId": [child_sg]}}}} - sc, job = self.modify_storage_group(array, parent_sg, payload) - self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs) - - def add_empty_child_sg_to_parent_sg( - self, array, child_sg, parent_sg, extra_specs): - """Add an empty storage group to a parent storage group. - - This method adds an existing storage group to another storage - group, i.e. cascaded storage groups. - :param array: the array serial number - :param child_sg: the name of the child sg - :param parent_sg: the name of the parent sg - :param extra_specs: the extra specifications - """ - payload = {"editStorageGroupActionParam": { - "addExistingStorageGroupParam": { - "storageGroupId": [child_sg]}}} - sc, job = self.modify_storage_group(array, parent_sg, payload, - version="83") - self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs) - - def remove_child_sg_from_parent_sg( - self, array, child_sg, parent_sg, extra_specs): - """Remove a storage group from its parent storage group. - - This method removes a child storage group from its parent group. - :param array: the array serial number - :param child_sg: the name of the child sg - :param parent_sg: the name of the parent sg - :param extra_specs: the extra specifications - """ - payload = {"editStorageGroupActionParam": { - "removeStorageGroupParam": { - "storageGroupId": [child_sg], "force": 'true'}}} - status_code, job = self.modify_storage_group( - array, parent_sg, payload) - self.wait_for_job( - 'Remove child sg from parent sg', status_code, job, extra_specs) - - def _create_storagegroup(self, array, payload): - """Create a storage group. - - :param array: the array serial number - :param payload: the payload -- dict - :returns: status_code -- int, message -- string, server response - """ - return self.create_resource( - array, SLOPROVISIONING, 'storagegroup', payload) - - def create_storage_group(self, array, storagegroup_name, - srp, slo, workload, extra_specs, - do_disable_compression=False): - """Create the volume in the specified storage group. - - :param array: the array serial number - :param storagegroup_name: the group name (String) - :param srp: the SRP (String) - :param slo: the SLO (String) - :param workload: the workload (String) - :param do_disable_compression: flag for disabling compression - :param extra_specs: additional info - :returns: storagegroup_name - string - """ - srp_id = srp if slo else "None" - payload = ({"srpId": srp_id, - "storageGroupId": storagegroup_name, - "emulation": "FBA"}) - - if slo: - slo_param = {"num_of_vols": 0, - "sloId": slo, - "workloadSelection": workload, - "volumeAttribute": { - "volume_size": "0", - "capacityUnit": "GB"}} - if do_disable_compression: - slo_param.update({"noCompression": "true"}) - elif self.is_compression_capable(array): - slo_param.update({"noCompression": "false"}) - - payload.update({"sloBasedStorageGroupParam": [slo_param]}) - - status_code, job = self._create_storagegroup(array, payload) - self.wait_for_job('Create storage group', status_code, - job, extra_specs) - return storagegroup_name - - def modify_storage_group(self, array, storagegroup, payload, - version=U4V_VERSION): - """Modify a storage group (PUT operation). - - :param version: the uv4 version - :param array: the array serial number - :param storagegroup: storage group name - :param payload: the request payload - :returns: status_code -- int, message -- string, server response - """ - return self.modify_resource( - array, SLOPROVISIONING, 'storagegroup', payload, version, - resource_name=storagegroup) - - def create_volume_from_sg(self, array, volume_name, storagegroup_name, - volume_size, extra_specs): - """Create a new volume in the given storage group. - - :param array: the array serial number - :param volume_name: the volume name (String) - :param storagegroup_name: the storage group name - :param volume_size: volume size (String) - :param extra_specs: the extra specifications - :returns: dict -- volume_dict - the volume dict - :raises: VolumeBackendAPIException - """ - payload = ( - {"executionOption": "ASYNCHRONOUS", - "editStorageGroupActionParam": { - "expandStorageGroupParam": { - "addVolumeParam": { - "num_of_vols": 1, - "emulation": "FBA", - "volumeIdentifier": { - "identifier_name": volume_name, - "volumeIdentifierChoice": "identifier_name"}, - "volumeAttribute": { - "volume_size": volume_size, - "capacityUnit": "GB"}}}}}) - status_code, job = self.modify_storage_group( - array, storagegroup_name, payload) - - LOG.debug("Create Volume: %(volumename)s. Status code: %(sc)lu.", - {'volumename': volume_name, - 'sc': status_code}) - - task = self.wait_for_job('Create volume', status_code, - job, extra_specs) - - # Find the newly created volume. - device_id = None - if task: - for t in task: - try: - desc = t["description"] - if CREATE_VOL_STRING in desc: - t_list = desc.split() - device_id = t_list[(len(t_list) - 1)] - device_id = device_id[1:-1] - break - if device_id: - self.get_volume(array, device_id) - except Exception as e: - LOG.info("Could not retrieve device id from job. " - "Exception received was %(e)s. Attempting " - "retrieval by volume_identifier.", - {'e': e}) - - if not device_id: - device_id = self.find_volume_device_id(array, volume_name) - - volume_dict = {'array': array, 'device_id': device_id} - return volume_dict - - def add_vol_to_sg(self, array, storagegroup_name, device_id, extra_specs): - """Add a volume to a storage group. - - :param array: the array serial number - :param storagegroup_name: storage group name - :param device_id: the device id - :param extra_specs: extra specifications - """ - if not isinstance(device_id, list): - device_id = [device_id] - payload = ({"executionOption": "ASYNCHRONOUS", - "editStorageGroupActionParam": { - "expandStorageGroupParam": { - "addSpecificVolumeParam": { - "volumeId": device_id}}}}) - status_code, job = self.modify_storage_group( - array, storagegroup_name, payload) - - self.wait_for_job('Add volume to sg', status_code, job, extra_specs) - - @retry(retry_exc_tuple, interval=2, retries=3) - def remove_vol_from_sg(self, array, storagegroup_name, - device_id, extra_specs): - """Remove a volume from a storage group. - - :param array: the array serial number - :param storagegroup_name: storage group name - :param device_id: the device id - :param extra_specs: the extra specifications - """ - if not isinstance(device_id, list): - device_id = [device_id] - payload = ({"executionOption": "ASYNCHRONOUS", - "editStorageGroupActionParam": { - "removeVolumeParam": { - "volumeId": device_id}}}) - status_code, job = self.modify_storage_group( - array, storagegroup_name, payload) - - self.wait_for_job('Remove vol from sg', status_code, job, extra_specs) - - def update_storagegroup_qos(self, array, storage_group_name, extra_specs): - """Update the storagegroupinstance with qos details. - - If maxIOPS or maxMBPS is in extra_specs, then DistributionType can be - modified in addition to maxIOPS or/and maxMBPS - If maxIOPS or maxMBPS is NOT in extra_specs, we check to see if - either is set in StorageGroup. If so, then DistributionType can be - modified - :param array: the array serial number - :param storage_group_name: the storagegroup instance name - :param extra_specs: extra specifications - :returns: bool, True if updated, else False - """ - return_value = False - sg_details = self.get_storage_group(array, storage_group_name) - sg_qos_details = None - sg_maxiops = None - sg_maxmbps = None - sg_distribution_type = None - maxiops = "nolimit" - maxmbps = "nolimit" - distribution_type = "never" - propertylist = [] - try: - sg_qos_details = sg_details['hostIOLimit'] - sg_maxiops = sg_qos_details['host_io_limit_io_sec'] - sg_maxmbps = sg_qos_details['host_io_limit_mb_sec'] - sg_distribution_type = sg_qos_details['dynamicDistribution'] - except KeyError: - LOG.debug("Unable to get storage group QoS details.") - if 'maxIOPS' in extra_specs.get('qos'): - maxiops = extra_specs['qos']['maxIOPS'] - if maxiops != sg_maxiops: - propertylist.append(maxiops) - if 'maxMBPS' in extra_specs.get('qos'): - maxmbps = extra_specs['qos']['maxMBPS'] - if maxmbps != sg_maxmbps: - propertylist.append(maxmbps) - if 'DistributionType' in extra_specs.get('qos') and ( - propertylist or sg_qos_details): - dynamic_list = ['never', 'onfailure', 'always'] - if (extra_specs.get('qos').get('DistributionType').lower() not - in dynamic_list): - exception_message = (_( - "Wrong Distribution type value %(dt)s entered. " - "Please enter one of: %(dl)s") % - {'dt': extra_specs.get('qos').get('DistributionType'), - 'dl': dynamic_list - }) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - else: - distribution_type = extra_specs['qos']['DistributionType'] - if distribution_type != sg_distribution_type: - propertylist.append(distribution_type) - if propertylist: - payload = {"editStorageGroupActionParam": { - "setHostIOLimitsParam": { - "host_io_limit_io_sec": maxiops, - "host_io_limit_mb_sec": maxmbps, - "dynamicDistribution": distribution_type}}} - status_code, message = ( - self.modify_storage_group(array, storage_group_name, payload)) - try: - self.check_status_code_success('Add qos specs', status_code, - message) - return_value = True - except Exception as e: - LOG.error("Error setting qos. Exception received was: " - "%(e)s", {'e': e}) - return_value = False - return return_value - - def get_vmax_default_storage_group( - self, array, srp, slo, workload, - do_disable_compression=False, is_re=False): - """Get the default storage group. - - :param array: the array serial number - :param srp: the pool name - :param slo: the SLO - :param workload: the workload - :param do_disable_compression: flag for disabling compression - :param is_re: flag for replication - :returns: the storage group dict (or None), the storage group name - """ - storagegroup_name = self.utils.get_default_storage_group_name( - srp, slo, workload, do_disable_compression, is_re) - storagegroup = self.get_storage_group(array, storagegroup_name) - return storagegroup, storagegroup_name - - def delete_storage_group(self, array, storagegroup_name): - """Delete a storage group. - - :param array: the array serial number - :param storagegroup_name: storage group name - """ - self.delete_resource( - array, SLOPROVISIONING, 'storagegroup', storagegroup_name) - LOG.debug("Storage Group successfully deleted.") - - def move_volume_between_storage_groups( - self, array, device_id, source_storagegroup_name, - target_storagegroup_name, extra_specs): - """Move a volume to a different storage group. - - :param array: the array serial number - :param source_storagegroup_name: the originating storage group name - :param target_storagegroup_name: the destination storage group name - :param device_id: the device id - :param extra_specs: extra specifications - """ - payload = ({"executionOption": "ASYNCHRONOUS", - "editStorageGroupActionParam": { - "moveVolumeToStorageGroupParam": { - "volumeId": [device_id], - "storageGroupId": target_storagegroup_name, - "useForceFlag": "false"}}}) - status_code, job = self.modify_storage_group( - array, source_storagegroup_name, payload) - self.wait_for_job('move volume between storage groups', status_code, - job, extra_specs) - - def get_volume(self, array, device_id): - """Get a VMAX volume from array. - - :param array: the array serial number - :param device_id: the volume device id - :returns: volume dict - :raises: VolumeBackendAPIException - """ - volume_dict = self.get_resource( - array, SLOPROVISIONING, 'volume', resource_name=device_id) - if not volume_dict: - exception_message = (_("Volume %(deviceID)s not found.") - % {'deviceID': device_id}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - return volume_dict - - def _get_private_volume(self, array, device_id): - """Get a more detailed list of attributes of a volume. - - :param array: the array serial number - :param device_id: the volume device id - :returns: volume dict - :raises: VolumeBackendAPIException - """ - try: - wwn = (self.get_volume(array, device_id))['wwn'] - params = {'wwn': wwn} - volume_info = self.get_resource( - array, SLOPROVISIONING, 'volume', params=params, - private='/private') - volume_dict = volume_info['resultList']['result'][0] - except (KeyError, TypeError): - exception_message = (_("Volume %(deviceID)s not found.") - % {'deviceID': device_id}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - return volume_dict - - def get_volume_list(self, array, params): - """Get a filtered list of VMAX volumes from array. - - Filter parameters are required as the unfiltered volume list could be - very large and could affect performance if called often. - :param array: the array serial number - :param params: filter parameters - :returns: device_ids -- list - """ - device_ids = [] - volumes = self.get_resource( - array, SLOPROVISIONING, 'volume', params=params) - try: - volume_dict_list = volumes['resultList']['result'] - for vol_dict in volume_dict_list: - device_id = vol_dict['volumeId'] - device_ids.append(device_id) - except (KeyError, TypeError): - pass - return device_ids - - def _modify_volume(self, array, device_id, payload): - """Modify a volume (PUT operation). - - :param array: the array serial number - :param device_id: volume device id - :param payload: the request payload - """ - return self.modify_resource(array, SLOPROVISIONING, 'volume', - payload, resource_name=device_id) - - def extend_volume(self, array, device_id, new_size, extra_specs): - """Extend a VMAX volume. - - :param array: the array serial number - :param device_id: volume device id - :param new_size: the new required size for the device - :param extra_specs: the extra specifications - """ - extend_vol_payload = {"executionOption": "ASYNCHRONOUS", - "editVolumeActionParam": { - "expandVolumeParam": { - "volumeAttribute": { - "volume_size": new_size, - "capacityUnit": "GB"}}}} - - status_code, job = self._modify_volume( - array, device_id, extend_vol_payload) - LOG.debug("Extend Device: %(device_id)s. Status code: %(sc)lu.", - {'device_id': device_id, 'sc': status_code}) - self.wait_for_job('Extending volume', status_code, job, extra_specs) - - def rename_volume(self, array, device_id, new_name): - """Rename a volume. - - :param array: the array serial number - :param device_id: the volume device id - :param new_name: the new name for the volume - """ - rename_vol_payload = {"editVolumeActionParam": { - "modifyVolumeIdentifierParam": { - "volumeIdentifier": { - "identifier_name": new_name, - "volumeIdentifierChoice": "identifier_name"}}}} - self._modify_volume(array, device_id, rename_vol_payload) - - def delete_volume(self, array, device_id): - """Deallocate and delete a volume. - - :param array: the array serial number - :param device_id: volume device id - """ - # Deallocate volume - payload = {"editVolumeActionParam": { - "freeVolumeParam": {"free_volume": 'true'}}} - try: - self._modify_volume(array, device_id, payload) - except Exception as e: - LOG.warning('Deallocate volume failed with %(e)s.' - 'Attempting delete.', {'e': e}) - # Delete volume - self.delete_resource(array, SLOPROVISIONING, "volume", device_id) - - def find_mv_connections_for_vol(self, array, maskingview, device_id): - """Find the host_lun_id for a volume in a masking view. - - :param array: the array serial number - :param maskingview: the masking view name - :param device_id: the device ID - :returns: host_lun_id -- int - """ - host_lun_id = None - resource_name = ('%(maskingview)s/connections' - % {'maskingview': maskingview}) - params = {'volume_id': device_id} - connection_info = self.get_resource( - array, SLOPROVISIONING, 'maskingview', - resource_name=resource_name, params=params) - if not connection_info: - LOG.error('Cannot retrive masking view connection information ' - 'for %(mv)s.', {'mv': maskingview}) - else: - try: - host_lun_id = (connection_info['maskingViewConnection'] - [0]['host_lun_address']) - host_lun_id = int(host_lun_id, 16) - except Exception as e: - LOG.error("Unable to retrieve connection information " - "for volume %(vol)s in masking view %(mv)s" - "Exception received: %(e)s.", - {'vol': device_id, 'mv': maskingview, - 'e': e}) - return host_lun_id - - def get_storage_groups_from_volume(self, array, device_id): - """Returns all the storage groups for a particular volume. - - :param array: the array serial number - :param device_id: the volume device id - :returns: storagegroup_list - """ - sg_list = [] - vol = self.get_volume(array, device_id) - if vol and vol.get('storageGroupId'): - sg_list = vol['storageGroupId'] - num_storage_groups = len(sg_list) - LOG.debug("There are %(num)d storage groups associated " - "with volume %(deviceId)s.", - {'num': num_storage_groups, 'deviceId': device_id}) - return sg_list - - def is_volume_in_storagegroup(self, array, device_id, storagegroup): - """See if a volume is a member of the given storage group. - - :param array: the array serial number - :param device_id: the device id - :param storagegroup: the storage group name - :returns: bool - """ - is_vol_in_sg = False - sg_list = self.get_storage_groups_from_volume(array, device_id) - if storagegroup in sg_list: - is_vol_in_sg = True - return is_vol_in_sg - - def find_volume_device_id(self, array, volume_name): - """Given a volume identifier, find the corresponding device_id. - - :param array: the array serial number - :param volume_name: the volume name (OS-) - :returns: device_id - """ - device_id = None - params = {"volume_identifier": volume_name} - - volume_list = self.get_volume_list(array, params) - if not volume_list: - LOG.debug("Cannot find record for volume %(volumeId)s.", - {'volumeId': volume_name}) - else: - device_id = volume_list[0] - return device_id - - def find_volume_identifier(self, array, device_id): - """Get the volume identifier of a VMAX volume. - - :param array: array serial number - :param device_id: the device id - :returns: the volume identifier -- string - """ - vol = self.get_volume(array, device_id) - return vol['volume_identifier'] - - def get_size_of_device_on_array(self, array, device_id): - """Get the size of the volume from the array. - - :param array: the array serial number - :param device_id: the volume device id - :returns: size -- or None - """ - cap = None - try: - vol = self.get_volume(array, device_id) - cap = vol['cap_gb'] - except Exception as e: - LOG.error("Error retrieving size of volume %(vol)s. " - "Exception received was %(e)s.", - {'vol': device_id, 'e': e}) - return cap - - def get_portgroup(self, array, portgroup): - """Get a portgroup from the array. - - :param array: array serial number - :param portgroup: the portgroup name - :returns: portgroup dict or None - """ - return self.get_resource( - array, SLOPROVISIONING, 'portgroup', resource_name=portgroup) - - def get_port_ids(self, array, portgroup): - """Get a list of port identifiers from a port group. - - :param array: the array serial number - :param portgroup: the name of the portgroup - :returns: list of port ids, e.g. ['FA-3D:35', 'FA-4D:32'] - """ - portlist = [] - portgroup_info = self.get_portgroup(array, portgroup) - if portgroup_info: - port_key = portgroup_info["symmetrixPortKey"] - for key in port_key: - port = key['portId'] - portlist.append(port) - return portlist - - def get_port(self, array, port_id): - """Get director port details. - - :param array: the array serial number - :param port_id: the port id - :returns: port dict, or None - """ - dir_id = port_id.split(':')[0] - port_no = port_id.split(':')[1] - - resource_name = ('%(directorId)s/port/%(port_number)s' - % {'directorId': dir_id, 'port_number': port_no}) - return self.get_resource(array, SLOPROVISIONING, 'director', - resource_name=resource_name) - - def get_iscsi_ip_address_and_iqn(self, array, port_id): - """Get the IPv4Address from the director port. - - :param array: the array serial number - :param port_id: the director port identifier - :returns: (list of ip_addresses, iqn) - """ - ip_addresses, iqn = None, None - port_details = self.get_port(array, port_id) - if port_details: - ip_addresses = port_details['symmetrixPort']['ip_addresses'] - iqn = port_details['symmetrixPort']['identifier'] - return ip_addresses, iqn - - def get_target_wwns(self, array, portgroup): - """Get the director ports' wwns. - - :param array: the array serial number - :param portgroup: portgroup - :returns: target_wwns -- the list of target wwns for the masking view - """ - target_wwns = [] - port_ids = self.get_port_ids(array, portgroup) - for port in port_ids: - port_info = self.get_port(array, port) - if port_info: - wwn = port_info['symmetrixPort']['identifier'] - target_wwns.append(wwn) - else: - LOG.error("Error retrieving port %(port)s " - "from portgroup %(portgroup)s.", - {'port': port, 'portgroup': portgroup}) - return target_wwns - - def get_initiator_group(self, array, initiator_group=None, params=None): - """Retrieve initiator group details from the array. - - :param array: the array serial number - :param initiator_group: the initaitor group name - :param params: optional filter parameters - :returns: initiator group dict, or None - """ - return self.get_resource( - array, SLOPROVISIONING, 'host', - resource_name=initiator_group, params=params) - - def get_initiator(self, array, initiator_id): - """Retrieve initaitor details from the array. - - :param array: the array serial number - :param initiator_id: the initiator id - :returns: initiator dict, or None - """ - return self.get_resource( - array, SLOPROVISIONING, 'initiator', - resource_name=initiator_id) - - def get_initiator_list(self, array, params=None): - """Retrieve initaitor list from the array. - - :param array: the array serial number - :param params: dict of optional params - :returns: list of initiators - """ - init_dict = self.get_resource( - array, SLOPROVISIONING, 'initiator', params=params) - try: - init_list = init_dict['initiatorId'] - except KeyError: - init_list = [] - return init_list - - def get_in_use_initiator_list_from_array(self, array): - """Get the list of initiators which are in-use from the array. - - Gets the list of initiators from the array which are in - hosts/ initiator groups. - :param array: the array serial number - :returns: init_list - """ - params = {'in_a_host': 'true'} - return self.get_initiator_list(array, params) - - def get_initiator_group_from_initiator(self, array, initiator): - """Given an initiator, get its corresponding initiator group, if any. - - :param array: the array serial number - :param initiator: the initiator id - :returns: found_init_group_name -- string - """ - found_init_group_name = None - init_details = self.get_initiator(array, initiator) - if init_details: - found_init_group_name = init_details.get('host') - else: - LOG.error("Unable to retrieve initiator details for " - "%(init)s.", {'init': initiator}) - return found_init_group_name - - def create_initiator_group(self, array, init_group_name, - init_list, extra_specs): - """Create a new initiator group containing the given initiators. - - :param array: the array serial number - :param init_group_name: the initiator group name - :param init_list: the list of initiators - :param extra_specs: extra specifications - """ - new_ig_data = ({"executionOption": "ASYNCHRONOUS", - "hostId": init_group_name, "initiatorId": init_list}) - sc, job = self.create_resource(array, SLOPROVISIONING, - 'host', new_ig_data) - self.wait_for_job('create initiator group', sc, job, extra_specs) - - def delete_initiator_group(self, array, initiatorgroup_name): - """Delete an initiator group. - - :param array: the array serial number - :param initiatorgroup_name: initiator group name - """ - self.delete_resource( - array, SLOPROVISIONING, 'host', initiatorgroup_name) - LOG.debug("Initiator Group successfully deleted.") - - def get_masking_view(self, array, masking_view_name): - """Get details of a masking view. - - :param array: array serial number - :param masking_view_name: the masking view name - :returns: masking view dict - """ - return self.get_resource( - array, SLOPROVISIONING, 'maskingview', masking_view_name) - - def get_masking_view_list(self, array, params): - """Get a list of masking views from the array. - - :param array: array serial number - :param params: optional GET parameters - :returns: masking view list - """ - masking_view_list = [] - masking_view_details = self.get_resource( - array, SLOPROVISIONING, 'maskingview', params=params) - try: - masking_view_list = masking_view_details['maskingViewId'] - except (KeyError, TypeError): - pass - return masking_view_list - - def get_masking_views_from_storage_group(self, array, storagegroup): - """Return any masking views associated with a storage group. - - :param array: the array serial number - :param storagegroup: the storage group name - :returns: masking view list - """ - maskingviewlist = [] - storagegroup = self.get_storage_group(array, storagegroup) - if storagegroup and storagegroup.get('maskingview'): - maskingviewlist = storagegroup['maskingview'] - return maskingviewlist - - def get_masking_views_by_initiator_group( - self, array, initiatorgroup_name): - """Given initiator group, retrieve the masking view instance name. - - Retrieve the list of masking view instances associated with the - given initiator group. - :param array: the array serial number - :param initiatorgroup_name: the name of the initiator group - :returns: list of masking view names - """ - masking_view_list = [] - ig_details = self.get_initiator_group( - array, initiatorgroup_name) - if ig_details: - if ig_details.get('maskingview'): - masking_view_list = ig_details['maskingview'] - else: - LOG.error("Error retrieving initiator group %(ig_name)s", - {'ig_name': initiatorgroup_name}) - return masking_view_list - - def get_element_from_masking_view( - self, array, maskingview_name, portgroup=False, host=False, - storagegroup=False): - """Return the name of the specified element from a masking view. - - :param array: the array serial number - :param maskingview_name: the masking view name - :param portgroup: the port group name - optional - :param host: the host name - optional - :param storagegroup: the storage group name - optional - :returns: name of the specified element -- string - :raises: VolumeBackendAPIException - """ - element = None - masking_view_details = self.get_masking_view(array, maskingview_name) - if masking_view_details: - if portgroup: - element = masking_view_details['portGroupId'] - elif host: - element = masking_view_details['hostId'] - elif storagegroup: - element = masking_view_details['storageGroupId'] - else: - exception_message = (_("Error retrieving masking group.")) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - return element - - def get_common_masking_views(self, array, portgroup_name, ig_name): - """Get common masking views for a given portgroup and initiator group. - - :param array: the array serial number - :param portgroup_name: the port group name - :param ig_name: the initiator group name - :returns: masking view list - """ - params = {'port_group_name': portgroup_name, - 'host_or_host_group_name': ig_name} - masking_view_list = self.get_masking_view_list(array, params) - if not masking_view_list: - LOG.info("No common masking views found for %(pg_name)s " - "and %(ig_name)s.", - {'pg_name': portgroup_name, 'ig_name': ig_name}) - return masking_view_list - - def create_masking_view(self, array, maskingview_name, storagegroup_name, - port_group_name, init_group_name, extra_specs): - """Create a new masking view. - - :param array: the array serial number - :param maskingview_name: the masking view name - :param storagegroup_name: the storage group name - :param port_group_name: the port group - :param init_group_name: the initiator group - :param extra_specs: extra specifications - """ - payload = ({"executionOption": "ASYNCHRONOUS", - "portGroupSelection": { - "useExistingPortGroupParam": { - "portGroupId": port_group_name}}, - "maskingViewId": maskingview_name, - "hostOrHostGroupSelection": { - "useExistingHostParam": { - "hostId": init_group_name}}, - "storageGroupSelection": { - "useExistingStorageGroupParam": { - "storageGroupId": storagegroup_name}}}) - - status_code, job = self.create_resource( - array, SLOPROVISIONING, 'maskingview', payload) - - self.wait_for_job('Create masking view', status_code, job, extra_specs) - - def delete_masking_view(self, array, maskingview_name): - """Delete a masking view. - - :param array: the array serial number - :param maskingview_name: the masking view name - """ - return self.delete_resource( - array, SLOPROVISIONING, 'maskingview', maskingview_name) - - def get_replication_capabilities(self, array): - """Check what replication features are licensed and enabled. - - Example return value for this method: - - .. code:: python - - {"symmetrixId": "000197800128", - "snapVxCapable": true, - "rdfCapable": true} - - :param: array - :returns: capabilities dict for the given array - """ - array_capabilities = None - target_uri = ("/%s/replication/capabilities/symmetrix" - % U4V_VERSION) - capabilities = self._get_request( - target_uri, 'replication capabilities') - if capabilities: - symm_list = capabilities['symmetrixCapability'] - for symm in symm_list: - if symm['symmetrixId'] == array: - array_capabilities = symm - break - return array_capabilities - - def is_snapvx_licensed(self, array): - """Check if the snapVx feature is licensed and enabled. - - :param array: the array serial number - :returns: True if licensed and enabled; False otherwise. - """ - snap_capability = False - capabilities = self.get_replication_capabilities(array) - if capabilities: - snap_capability = capabilities['snapVxCapable'] - else: - LOG.error("Cannot access replication capabilities " - "for array %(array)s", {'array': array}) - return snap_capability - - def create_volume_snap(self, array, snap_name, device_id, extra_specs): - """Create a snapVx snapshot of a volume. - - :param array: the array serial number - :param snap_name: the name of the snapshot - :param device_id: the source device id - :param extra_specs: the extra specifications - """ - payload = {"deviceNameListSource": [{"name": device_id}], - "bothSides": 'false', "star": 'false', - "force": 'false'} - resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} - status_code, job = self.create_resource( - array, REPLICATION, resource_type, - payload, private='/private') - self.wait_for_job('Create volume snapVx', status_code, - job, extra_specs) - - def modify_volume_snap(self, array, source_id, target_id, snap_name, - extra_specs, link=False, unlink=False): - """Link or unlink a snapVx to or from a target volume. - - :param array: the array serial number - :param source_id: the source device id - :param target_id: the target device id - :param snap_name: the snapshot name - :param extra_specs: extra specifications - :param link: Flag to indicate action = Link - :param unlink: Flag to indicate action = Unlink - """ - action = '' - if link: - action = "Link" - elif unlink: - action = "Unlink" - if action: - payload = {"deviceNameListSource": [{"name": source_id}], - "deviceNameListTarget": [ - {"name": target_id}], - "copy": 'true', "action": action, - "star": 'false', "force": 'false', - "exact": 'false', "remote": 'false', - "symforce": 'false', "nocopy": 'false'} - status_code, job = self.modify_resource( - array, REPLICATION, 'snapshot', payload, - resource_name=snap_name, private='/private') - - self.wait_for_job('Modify snapVx relationship to target', - status_code, job, extra_specs) - - def delete_volume_snap(self, array, snap_name, source_device_id): - """Delete the snapshot of a volume. - - :param array: the array serial number - :param snap_name: the name of the snapshot - :param source_device_id: the source device id - """ - payload = {"deviceNameListSource": [{"name": source_device_id}]} - return self.delete_resource( - array, REPLICATION, 'snapshot', snap_name, payload=payload, - private='/private') - - def get_volume_snap_info(self, array, source_device_id): - """Get snapVx information associated with a volume. - - :param array: the array serial number - :param source_device_id: the source volume device ID - :returns: message -- dict, or None - """ - resource_name = ("%(device_id)s/snapshot" - % {'device_id': source_device_id}) - return self.get_resource(array, REPLICATION, 'volume', - resource_name, private='/private') - - def get_volume_snap(self, array, device_id, snap_name): - """Given a volume snap info, retrieve the snapVx object. - - :param array: the array serial number - :param device_id: the source volume device id - :param snap_name: the name of the snapshot - :returns: snapshot dict, or None - """ - snapshot = None - snap_info = self.get_volume_snap_info(array, device_id) - if snap_info: - if (snap_info.get('snapshotSrcs') and - bool(snap_info['snapshotSrcs'])): - for snap in snap_info['snapshotSrcs']: - if snap['snapshotName'] == snap_name: - snapshot = snap - return snapshot - - def get_volume_snapshot_list(self, array, source_device_id): - """Get a list of snapshot details for a particular volume. - - :param array: the array serial number - :param source_device_id: the osurce device id - :returns: snapshot list or None - """ - snapshot_list = [] - snap_info = self.get_volume_snap_info(array, source_device_id) - if snap_info: - if bool(snap_info['snapshotSrcs']): - snapshot_list = snap_info['snapshotSrcs'] - return snapshot_list - - def is_vol_in_rep_session(self, array, device_id): - """Check if a volume is in a replication session. - - :param array: the array serial number - :param device_id: the device id - :returns: snapvx_tgt -- bool, snapvx_src -- bool, - rdf_grp -- list or None - """ - snapvx_src = False - snapvx_tgt = False - rdf_grp = None - volume_details = self.get_volume(array, device_id) - if volume_details: - if volume_details.get('snapvx_target'): - snap_target = volume_details['snapvx_target'] - snapvx_tgt = True if snap_target == 'true' else False - if volume_details.get('snapvx_source'): - snap_source = volume_details['snapvx_source'] - snapvx_src = True if snap_source == 'true' else False - if volume_details.get('rdfGroupId'): - rdf_grp = volume_details['rdfGroupId'] - return snapvx_tgt, snapvx_src, rdf_grp - - def is_sync_complete(self, array, source_device_id, - target_device_id, snap_name, extra_specs): - """Check if a sync session is complete. - - :param array: the array serial number - :param source_device_id: source device id - :param target_device_id: target device id - :param snap_name: snapshot name - :param extra_specs: extra specifications - :returns: bool - """ - - def _wait_for_sync(): - """Called at an interval until the synchronization is finished. - - :raises: loopingcall.LoopingCallDone - :raises: VolumeBackendAPIException - """ - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['wait_for_sync_called']: - if self._is_sync_complete( - array, source_device_id, snap_name, - target_device_id): - kwargs['wait_for_sync_called'] = True - except Exception: - exception_message = (_("Issue encountered waiting for " - "synchronization.")) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - if kwargs['retries'] > int(extra_specs[utils.RETRIES]): - LOG.error("_wait_for_sync failed after %(retries)d " - "tries.", {'retries': retries}) - raise loopingcall.LoopingCallDone( - retvalue=int(extra_specs[utils.RETRIES])) - if kwargs['wait_for_sync_called']: - raise loopingcall.LoopingCallDone() - - kwargs = {'retries': 0, - 'wait_for_sync_called': False} - timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) - rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() - return rc - - def _is_sync_complete(self, array, source_device_id, snap_name, - target_device_id): - """Helper function to check if snapVx sync session is complete. - - :param array: the array serial number - :param source_device_id: source device id - :param snap_name: the snapshot name - :param target_device_id: the target device id - :returns: defined -- bool - """ - defined = True - session = self.get_sync_session( - array, source_device_id, snap_name, target_device_id) - if session: - defined = session['defined'] - return defined - - def get_sync_session(self, array, source_device_id, snap_name, - target_device_id): - """Get a particular sync session. - - :param array: the array serial number - :param source_device_id: source device id - :param snap_name: the snapshot name - :param target_device_id: the target device id - :returns: sync session -- dict, or None - """ - session = None - linked_device_list = self.get_snap_linked_device_list( - array, source_device_id, snap_name) - for target in linked_device_list: - if target_device_id == target['targetDevice']: - session = target - return session - - def _find_snap_vx_source_sessions(self, array, source_device_id): - """Find all snap sessions for a given source volume. - - :param array: the array serial number - :param source_device_id: the source device id - :returns: list of snapshot dicts - """ - snap_dict_list = [] - snapshots = self.get_volume_snapshot_list(array, source_device_id) - for snapshot in snapshots: - if bool(snapshot['linkedDevices']): - link_info = {'linked_vols': snapshot['linkedDevices'], - 'snap_name': snapshot['snapshotName']} - snap_dict_list.append(link_info) - return snap_dict_list - - def get_snap_linked_device_list(self, array, source_device_id, snap_name): - """Get the list of linked devices for a particular snapVx snapshot. - - :param array: the array serial number - :param source_device_id: source device id - :param snap_name: the snapshot name - :returns: linked_device_list - """ - linked_device_list = [] - snap_list = self._find_snap_vx_source_sessions(array, source_device_id) - for snap in snap_list: - if snap['snap_name'] == snap_name: - linked_device_list = snap['linked_vols'] - return linked_device_list - - def find_snap_vx_sessions(self, array, device_id, tgt_only=False): - """Find all snapVX sessions for a device (source and target). - - :param array: the array serial number - :param device_id: the device id - :param tgt_only: Flag - return only sessions where device is target - :returns: list of snapshot dicts - """ - snap_dict_list, sessions = [], [] - vol_details = self._get_private_volume(array, device_id) - snap_vx_info = vol_details['timeFinderInfo'] - is_snap_src = snap_vx_info['snapVXSrc'] - is_snap_tgt = snap_vx_info['snapVXTgt'] - if snap_vx_info.get('snapVXSession'): - sessions = snap_vx_info['snapVXSession'] - if is_snap_src and not tgt_only: - for session in sessions: - if session.get('srcSnapshotGenInfo'): - src_list = session['srcSnapshotGenInfo'] - for src in src_list: - snap_name = src['snapshotHeader']['snapshotName'] - target_list, target_dict = [], {} - if src.get('lnkSnapshotGenInfo'): - target_dict = src['lnkSnapshotGenInfo'] - for tgt in target_dict: - target_list.append(tgt['targetDevice']) - link_info = {'target_vol_list': target_list, - 'snap_name': snap_name, - 'source_vol': device_id} - snap_dict_list.append(link_info) - if is_snap_tgt: - for session in sessions: - if session.get('tgtSrcSnapshotGenInfo'): - tgt = session['tgtSrcSnapshotGenInfo'] - snap_name = tgt['snapshotName'] - target_list = [tgt['targetDevice']] - source_vol = tgt['sourceDevice'] - link_info = {'target_vol_list': target_list, - 'snap_name': snap_name, - 'source_vol': source_vol} - snap_dict_list.append(link_info) - return snap_dict_list - - def get_rdf_group(self, array, rdf_number): - """Get specific rdf group details. - - :param array: the array serial number - :param rdf_number: the rdf number - """ - return self.get_resource(array, REPLICATION, 'rdf_group', - rdf_number) - - def get_rdf_group_list(self, array): - """Get rdf group list from array. - - :param array: the array serial number - """ - return self.get_resource(array, REPLICATION, 'rdf_group') - - def get_rdf_group_volume(self, array, rdf_number, device_id): - """Get specific volume details, from an RDF group. - - :param array: the array serial number - :param rdf_number: the rdf group number - :param device_id: the device id - """ - resource_name = "%(rdf)s/volume/%(dev)s" % { - 'rdf': rdf_number, 'dev': device_id} - return self.get_resource(array, REPLICATION, 'rdf_group', - resource_name) - - def are_vols_rdf_paired(self, array, remote_array, device_id, - target_device, rdf_group): - """Check if a pair of volumes are RDF paired. - - :param array: the array serial number - :param remote_array: the remote array serial number - :param device_id: the device id - :param target_device: the target device id - :param rdf_group: the rdf group - :returns: paired -- bool, state -- string - """ - paired, local_vol_state, rdf_pair_state = False, '', '' - volume = self.get_rdf_group_volume(array, rdf_group, device_id) - if volume: - remote_volume = volume['remoteVolumeName'] - remote_symm = volume['remoteSymmetrixId'] - if (remote_volume == target_device - and remote_array == remote_symm): - paired = True - local_vol_state = volume['localVolumeState'] - rdf_pair_state = volume['rdfpairState'] - else: - LOG.warning("Cannot locate source RDF volume %s", device_id) - return paired, local_vol_state, rdf_pair_state - - def get_rdf_group_number(self, array, rdf_group_label): - """Given an rdf_group_label, return the associated group number. - - :param array: the array serial number - :param rdf_group_label: the group label - :returns: rdf_group_number - """ - number = None - rdf_list = self.get_rdf_group_list(array) - if rdf_list and rdf_list.get('rdfGroupID'): - number = [rdf['rdfgNumber'] for rdf in rdf_list['rdfGroupID'] - if rdf['label'] == rdf_group_label][0] - if number: - rdf_group = self.get_rdf_group(array, number) - if not rdf_group: - number = None - return number - - def create_rdf_device_pair(self, array, device_id, rdf_group_no, - target_device, remote_array, - target_vol_name, extra_specs): - """Create an RDF pairing. - - Create a remote replication relationship between source and target - devices. - :param array: the array serial number - :param device_id: the device id - :param rdf_group_no: the rdf group number - :param target_device: the target device id - :param remote_array: the remote array serial - :param target_vol_name: the name of the target volume - :param extra_specs: the extra specs - :returns: rdf_dict - """ - payload = ({"deviceNameListSource": [{"name": device_id}], - "deviceNameListTarget": [{"name": target_device}], - "replicationMode": "Synchronous", - "establish": 'true', - "rdfType": 'RDF1'}) - resource_type = ("rdf_group/%(rdf_num)s/volume" - % {'rdf_num': rdf_group_no}) - status_code, job = self.create_resource(array, REPLICATION, - resource_type, payload, - private="/private") - self.wait_for_job('Create rdf pair', status_code, - job, extra_specs) - rdf_dict = {'array': remote_array, 'device_id': target_device} - return rdf_dict - - def modify_rdf_device_pair( - self, array, device_id, rdf_group, extra_specs, split=False): - """Modify an rdf device pair. - - :param array: the array serial number - :param device_id: the device id - :param rdf_group: the rdf group - :param extra_specs: the extra specs - :param split: flag to indicate "split" action - """ - common_opts = {"force": 'false', - "symForce": 'false', - "star": 'false', - "hop2": 'false', - "bypass": 'false'} - if split: - common_opts.update({"immediate": 'false'}) - payload = {"action": "Split", - "executionOption": "ASYNCHRONOUS", - "split": common_opts} - - else: - common_opts.update({"establish": 'true', - "restore": 'false', - "remote": 'false', - "immediate": 'false'}) - payload = {"action": "Failover", - "executionOption": "ASYNCHRONOUS", - "failover": common_opts} - resource_name = ("%(rdf_num)s/volume/%(device_id)s" - % {'rdf_num': rdf_group, 'device_id': device_id}) - sc, job = self.modify_resource( - array, REPLICATION, 'rdf_group', - payload, resource_name=resource_name, private="/private") - self.wait_for_job('Modify device pair', sc, - job, extra_specs) - - def delete_rdf_pair(self, array, device_id, rdf_group): - """Delete an rdf pair. - - :param array: the array serial number - :param device_id: the device id - :param rdf_group: the rdf group - """ - params = {'half': 'false', 'force': 'true', 'symforce': 'false', - 'star': 'false', 'bypass': 'false'} - resource_name = ("%(rdf_num)s/volume/%(device_id)s" - % {'rdf_num': rdf_group, 'device_id': device_id}) - self.delete_resource(array, REPLICATION, 'rdf_group', resource_name, - private="/private", params=params) - - def get_storage_group_rep(self, array, storage_group_name): - """Given a name, return storage group details wrt replication. - - :param array: the array serial number - :param storage_group_name: the name of the storage group - :returns: storage group dict or None - """ - return self.get_resource( - array, REPLICATION, 'storagegroup', - resource_name=storage_group_name) - - def get_volumes_in_storage_group(self, array, storagegroup_name): - """Given a volume identifier, find the corresponding device_id. - - :param array: the array serial number - :param storagegroup_name: the storage group name - :returns: volume_list - """ - volume_list = None - params = {"storageGroupId": storagegroup_name} - - volume_list = self.get_volume_list(array, params) - if not volume_list: - LOG.debug("Cannot find record for storage group %(storageGrpId)s", - {'storageGrpId': storagegroup_name}) - return volume_list - - def create_storagegroup_snap(self, array, source_group, - snap_name, extra_specs): - """Create a snapVx snapshot of a storage group. - - :param array: the array serial number - :param source_group: the source group name - :param snap_name: the name of the snapshot - :param extra_specs: the extra specifications - """ - payload = {"snapshotName": snap_name} - resource_type = ('storagegroup/%(sg_name)s/snapshot' - % {'sg_name': source_group}) - status_code, job = self.create_resource( - array, REPLICATION, resource_type, payload) - self.wait_for_job('Create storage group snapVx', status_code, - job, extra_specs) - - def modify_storagegroup_snap( - self, array, source_sg_id, target_sg_id, snap_name, - extra_specs, link=False, unlink=False): - """Link or unlink a snapVx to or from a target storagegroup. - - :param array: the array serial number - :param source_sg_id: the source device id - :param target_sg_id: the target device id - :param snap_name: the snapshot name - :param extra_specs: extra specifications - :param link: Flag to indicate action = Link - :param unlink: Flag to indicate action = Unlink - """ - payload = '' - if link: - payload = {"link": {"linkStorageGroupName": target_sg_id, - "copy": "true"}, - "action": "Link"} - elif unlink: - payload = {"unlink": {"unlinkStorageGroupName": target_sg_id}, - "action": "Unlink"} - - resource_name = ('%(sg_name)s/snapshot/%(snap_id)s/generation/0' - % {'sg_name': source_sg_id, 'snap_id': snap_name}) - - status_code, job = self.modify_resource( - array, REPLICATION, 'storagegroup', payload, - resource_name=resource_name) - - self.wait_for_job('Modify storagegroup snapVx relationship to target', - status_code, job, extra_specs) - - def delete_storagegroup_snap(self, array, snap_name, source_sg_id): - """Delete the snapshot of a storagegroup. - - :param array: the array serial number - :param snap_name: the name of the snapshot - :param source_sg_id: the source device id - """ - resource_name = ('%(sg_name)s/snapshot/%(snap_id)s/generation/0' - % {'sg_name': source_sg_id, 'snap_id': snap_name}) - return self.delete_resource( - array, REPLICATION, 'storagegroup', resource_name) diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py deleted file mode 100644 index 3c6845c10..000000000 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ /dev/null @@ -1,660 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import hashlib -import random -import re -from xml.dom import minidom - -from cinder.objects.group import Group -from oslo_log import log as logging -from oslo_utils import strutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) -# SHARED CONSTANTS -ISCSI = 'iscsi' -FC = 'fc' -INTERVAL = 'interval' -RETRIES = 'retries' -VOLUME_ELEMENT_NAME_PREFIX = 'OS-' -MAX_SRP_LENGTH = 16 -TRUNCATE_5 = 5 -TRUNCATE_27 = 27 - -ARRAY = 'array' -SLO = 'slo' -WORKLOAD = 'workload' -SRP = 'srp' -PORTGROUPNAME = 'port_group_name' -DEVICE_ID = 'device_id' -INITIATOR_CHECK = 'initiator_check' -SG_NAME = 'storagegroup_name' -MV_NAME = 'maskingview_name' -IG_NAME = 'init_group_name' -PARENT_SG_NAME = 'parent_sg_name' -CONNECTOR = 'connector' -VOL_NAME = 'volume_name' -EXTRA_SPECS = 'extra_specs' -IS_RE = 'replication_enabled' -DISABLECOMPRESSION = 'storagetype:disablecompression' - - -class VMAXUtils(object): - """Utility class for Rest based VMAX volume drivers. - - This Utility class is for VMAX volume drivers based on Unisphere Rest API. - """ - - def __init__(self): - """Utility class for Rest based VMAX volume drivers.""" - - def get_host_short_name(self, host_name): - """Returns the short name for a given qualified host name. - - Checks the host name to see if it is the fully qualified host name - and returns part before the dot. If there is no dot in the host name - the full host name is returned. - :param host_name: the fully qualified host name - :returns: string -- the short host_name - """ - host_array = host_name.split('.') - if len(host_array) > 1: - short_host_name = host_array[0] - else: - short_host_name = host_name - - return self.generate_unique_trunc_host(short_host_name) - - @staticmethod - def get_volumetype_extra_specs(volume, volume_type_id=None): - """Gets the extra specs associated with a volume type. - - :param volume: the volume dictionary - :param volume_type_id: Optional override for volume.volume_type_id - :returns: dict -- extra_specs - the extra specs - :raises: VolumeBackendAPIException - """ - extra_specs = {} - - try: - if volume_type_id: - type_id = volume_type_id - else: - type_id = volume.volume_type_id - if type_id is not None: - extra_specs = volume_types.get_volume_type_extra_specs(type_id) - except Exception as e: - LOG.debug('Exception getting volume type extra specs: %(e)s', - {'e': six.text_type(e)}) - return extra_specs - - @staticmethod - def get_short_protocol_type(protocol): - """Given the protocol type, return I for iscsi and F for fc. - - :param protocol: iscsi or fc - :returns: string -- 'I' for iscsi or 'F' for fc - """ - if protocol.lower() == ISCSI.lower(): - return 'I' - elif protocol.lower() == FC.lower(): - return 'F' - else: - return protocol - - @staticmethod - def truncate_string(str_to_truncate, max_num): - """Truncate a string by taking first and last characters. - - :param str_to_truncate: the string to be truncated - :param max_num: the maximum number of characters - :returns: string -- truncated string or original string - """ - if len(str_to_truncate) > max_num: - new_num = len(str_to_truncate) - max_num // 2 - first_chars = str_to_truncate[:max_num // 2] - last_chars = str_to_truncate[new_num:] - str_to_truncate = first_chars + last_chars - return str_to_truncate - - @staticmethod - def get_time_delta(start_time, end_time): - """Get the delta between start and end time. - - :param start_time: the start time - :param end_time: the end time - :returns: string -- delta in string H:MM:SS - """ - delta = end_time - start_time - return six.text_type(datetime.timedelta(seconds=int(delta))) - - @staticmethod - def get_default_storage_group_name( - srp_name, slo, workload, is_compression_disabled=False, - is_re=False): - """Determine default storage group from extra_specs. - - :param srp_name: the name of the srp on the array - :param slo: the service level string e.g Bronze - :param workload: the workload string e.g DSS - :param is_compression_disabled: flag for disabling compression - :param is_re: flag for replication - :returns: storage_group_name - """ - if slo and workload: - prefix = ("OS-%(srpName)s-%(slo)s-%(workload)s" - % {'srpName': srp_name, 'slo': slo, - 'workload': workload}) - - if is_compression_disabled: - prefix += "-CD" - - else: - prefix = "OS-no_SLO" - if is_re: - prefix += "-RE" - - storage_group_name = ("%(prefix)s-SG" % {'prefix': prefix}) - return storage_group_name - - @staticmethod - def get_volume_element_name(volume_id): - """Get volume element name follows naming convention, i.e. 'OS-UUID'. - - :param volume_id: Openstack volume ID containing uuid - :returns: volume element name in format of OS-UUID - """ - element_name = volume_id - uuid_regex = (re.compile( - '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', - re.I)) - match = uuid_regex.search(volume_id) - if match: - volume_uuid = match.group() - element_name = ("%(prefix)s%(volumeUUID)s" - % {'prefix': VOLUME_ELEMENT_NAME_PREFIX, - 'volumeUUID': volume_uuid}) - LOG.debug( - "get_volume_element_name elementName: %(elementName)s.", - {'elementName': element_name}) - return element_name - - def generate_unique_trunc_host(self, host_name): - """Create a unique short host name under 16 characters. - - :param host_name: long host name - :returns: truncated host name - """ - if host_name and len(host_name) > 16: - host_name = host_name.lower() - m = hashlib.md5() - m.update(host_name.encode('utf-8')) - uuid = m.hexdigest() - new_name = ("%(host)s%(uuid)s" - % {'host': host_name[-6:], - 'uuid': uuid}) - host_name = self.truncate_string(new_name, 16) - return host_name - - def get_pg_short_name(self, portgroup_name): - """Create a unique port group name under 12 characters. - - :param portgroup_name: long portgroup_name - :returns: truncated portgroup_name - """ - if portgroup_name and len(portgroup_name) > 12: - portgroup_name = portgroup_name.lower() - m = hashlib.md5() - m.update(portgroup_name.encode('utf-8')) - uuid = m.hexdigest() - new_name = ("%(pg)s%(uuid)s" - % {'pg': portgroup_name[-6:], - 'uuid': uuid}) - portgroup_name = self.truncate_string(new_name, 12) - return portgroup_name - - @staticmethod - def get_default_oversubscription_ratio(max_over_sub_ratio): - """Override ratio if necessary. - - The over subscription ratio will be overridden if the user supplied - max oversubscription ratio is less than 1. - :param max_over_sub_ratio: user supplied over subscription ratio - :returns: max_over_sub_ratio - """ - if max_over_sub_ratio < 1.0: - LOG.info("The user supplied value for max_over_subscription " - "ratio is less than 1.0. Using the default value of " - "20.0 instead...") - max_over_sub_ratio = 20.0 - return max_over_sub_ratio - - @staticmethod - def _process_tag(element, tag_name): - """Process the tag to get the value. - - :param element: the parent element - :param tag_name: the tag name - :returns: nodeValue(can be None) - """ - node_value = None - try: - processed_element = element.getElementsByTagName(tag_name)[0] - node_value = processed_element.childNodes[0].nodeValue - if node_value: - node_value = node_value.strip() - except IndexError: - pass - return node_value - - def _get_connection_info(self, rest_element): - """Given the filename get the rest server connection details. - - :param rest_element: the rest element - :returns: dict -- connargs - the connection info dictionary - :raises: VolumeBackendAPIException - """ - connargs = { - 'RestServerIp': ( - self._process_tag(rest_element, 'RestServerIp')), - 'RestServerPort': ( - self._process_tag(rest_element, 'RestServerPort')), - 'RestUserName': ( - self._process_tag(rest_element, 'RestUserName')), - 'RestPassword': ( - self._process_tag(rest_element, 'RestPassword'))} - - for k, __ in connargs.items(): - if connargs[k] is None: - exception_message = (_( - "RestServerIp, RestServerPort, RestUserName, " - "RestPassword must have valid values.")) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - # These can be None - connargs['SSLCert'] = self._process_tag(rest_element, 'SSLCert') - connargs['SSLVerify'] = ( - self._process_tag(rest_element, 'SSLVerify')) - - return connargs - - def parse_file_to_get_array_map(self, file_name): - """Parses a file and gets array map. - - Given a file, parse it to get array and pool(srp). - - .. code:: ini - - - 10.108.246.202 - 8443 - smc - smc - /path/client.cert - /path/to/certfile.pem - - OS-PORTGROUP1-PG - - 000198700439 - SRP_1 - - - :param file_name: the configuration file - :returns: list - """ - kwargs = {} - my_file = open(file_name, 'r') - data = my_file.read() - my_file.close() - dom = minidom.parseString(data) - try: - connargs = self._get_connection_info(dom) - portgroup = self._get_random_portgroup(dom) - serialnumber = self._process_tag(dom, 'Array') - if serialnumber is None: - LOG.error("Array Serial Number must be in the file %(file)s.", - {'file': file_name}) - srp_name = self._process_tag(dom, 'SRP') - if srp_name is None: - LOG.error("SRP Name must be in the file %(file)s.", - {'file': file_name}) - kwargs = ( - {'RestServerIp': connargs['RestServerIp'], - 'RestServerPort': connargs['RestServerPort'], - 'RestUserName': connargs['RestUserName'], - 'RestPassword': connargs['RestPassword'], - 'SSLCert': connargs['SSLCert'], - 'SSLVerify': connargs['SSLVerify'], - 'SerialNumber': serialnumber, - 'srpName': srp_name, - 'PortGroup': portgroup}) - - except IndexError: - pass - return kwargs - - @staticmethod - def _get_random_portgroup(element): - """Randomly choose a portgroup from list of portgroups. - - :param element: the parent element - :returns: the randomly chosen port group - """ - portgroupelements = element.getElementsByTagName('PortGroup') - if portgroupelements and len(portgroupelements) > 0: - portgroupnames = [portgroupelement.childNodes[0].nodeValue.strip() - for portgroupelement in portgroupelements - if portgroupelement.childNodes] - portgroupnames = list(set(filter(None, portgroupnames))) - pg_len = len(portgroupnames) - if pg_len > 0: - return portgroupnames[random.randint(0, pg_len - 1)] - return None - - def get_temp_snap_name(self, clone_name, source_device_id): - """Construct a temporary snapshot name for clone operation. - - :param clone_name: the name of the clone - :param source_device_id: the source device id - :returns: snap_name - """ - trunc_clone = self.truncate_string(clone_name, 10) - snap_name = ("temp-%(device)s-%(clone)s" - % {'device': source_device_id, 'clone': trunc_clone}) - return snap_name - - @staticmethod - def get_array_and_device_id(volume, external_ref): - """Helper function for manage volume to get array name and device ID. - - :param volume: volume object from API - :param external_ref: the existing volume object to be manged - :returns: string value of the array name and device ID - """ - device_id = external_ref.get(u'source-name', None) - LOG.debug("External_ref: %(er)s", {'er': external_ref}) - if not device_id: - device_id = external_ref.get(u'source-id', None) - host = volume.host - host_list = host.split('+') - array = host_list[(len(host_list) - 1)] - - if device_id: - LOG.debug("Get device ID of existing volume - device ID: " - "%(device_id)s, Array: %(array)s.", - {'device_id': device_id, - 'array': array}) - else: - exception_message = (_("Source volume device ID is required.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - return array, device_id - - @staticmethod - def is_compression_disabled(extra_specs): - """Check is compression is to be disabled. - - :param extra_specs: extra specifications - :returns: boolean - """ - do_disable_compression = False - if DISABLECOMPRESSION in extra_specs: - if strutils.bool_from_string(extra_specs[DISABLECOMPRESSION]): - do_disable_compression = True - return do_disable_compression - - def change_compression_type(self, is_source_compr_disabled, new_type): - """Check if volume type have different compression types - - :param is_source_compr_disabled: from source - :param new_type: from target - :returns: boolean - """ - extra_specs = new_type['extra_specs'] - is_target_compr_disabled = self.is_compression_disabled(extra_specs) - if is_target_compr_disabled == is_source_compr_disabled: - return False - else: - return True - - @staticmethod - def is_replication_enabled(extra_specs): - """Check if replication is to be enabled. - - :param extra_specs: extra specifications - :returns: bool - true if enabled, else false - """ - replication_enabled = False - if IS_RE in extra_specs: - replication_enabled = True - return replication_enabled - - def get_replication_config(self, rep_device_list): - """Gather necessary replication configuration info. - - :param rep_device_list: the replication device list from cinder.conf - :returns: rep_config, replication configuration dict - """ - rep_config = {} - if not rep_device_list: - return None - else: - target = rep_device_list[0] - try: - rep_config['array'] = target['target_device_id'] - rep_config['srp'] = target['remote_pool'] - rep_config['rdf_group_label'] = target['rdf_group_label'] - rep_config['portgroup'] = target['remote_port_group'] - - except KeyError as ke: - error_message = (_("Failed to retrieve all necessary SRDF " - "information. Error received: %(ke)s.") % - {'ke': six.text_type(ke)}) - LOG.exception(error_message) - raise exception.VolumeBackendAPIException(data=error_message) - - try: - allow_extend = target['allow_extend'] - if strutils.bool_from_string(allow_extend): - rep_config['allow_extend'] = True - else: - rep_config['allow_extend'] = False - except KeyError: - rep_config['allow_extend'] = False - - return rep_config - - @staticmethod - def is_volume_failed_over(volume): - """Check if a volume has been failed over. - - :param volume: the volume object - :returns: bool - """ - if volume is not None: - if volume.get('replication_status') and ( - volume.replication_status == - fields.ReplicationStatus.FAILED_OVER): - return True - return False - - @staticmethod - def update_volume_model_updates(volume_model_updates, - volumes, group_id, status='available'): - """Update the volume model's status and return it. - - :param volume_model_updates: list of volume model update dicts - :param volumes: volumes object api - :param group_id: consistency group id - :param status: string value reflects the status of the member volume - :returns: volume_model_updates - updated volumes - """ - LOG.info( - "Updating status for group: %(id)s.", - {'id': group_id}) - if volumes: - for volume in volumes: - volume_model_updates.append({'id': volume.id, - 'status': status}) - else: - LOG.info("No volume found for group: %(cg)s.", - {'cg': group_id}) - return volume_model_updates - - @staticmethod - def update_extra_specs(extraspecs): - """Update extra specs. - - :param extraspecs: the additional info - :returns: extraspecs - """ - try: - pool_details = extraspecs['pool_name'].split('+') - extraspecs[SLO] = pool_details[0] - extraspecs[WORKLOAD] = pool_details[1] - extraspecs[SRP] = pool_details[2] - extraspecs[ARRAY] = pool_details[3] - except KeyError: - LOG.error("Error parsing SLO, workload from" - " the provided extra_specs.") - return extraspecs - - @staticmethod - def get_intervals_retries_dict(interval, retries): - """Get the default intervals and retries. - - :param interval: Interval in seconds between retries - :param retries: Retry count - :returns: default_dict - """ - default_dict = {} - default_dict[INTERVAL] = interval - default_dict[RETRIES] = retries - return default_dict - - @staticmethod - def update_admin_metadata(volumes_model_update, key, values): - """Update the volume_model_updates with admin metadata. - - :param volumes_model_update: List of volume model updates - :param key: Key to be updated in the admin_metadata - :param values: Dictionary of values per volume id - """ - for volume_model_update in volumes_model_update: - volume_id = volume_model_update['id'] - if volume_id in values: - admin_metadata = {} - admin_metadata.update({key: values[volume_id]}) - volume_model_update.update( - {'admin_metadata': admin_metadata}) - - def get_volume_group_utils(self, group, interval, retries): - """Standard utility for generic volume groups. - - :param group: the generic volume group object to be created - :param interval: Interval in seconds between retries - :param retries: Retry count - :returns: array, extra specs dict list - :raises: VolumeBackendAPIException - """ - arrays = set() - extraspecs_dict_list = [] - # Check if it is a generic volume group instance - if isinstance(group, Group): - for volume_type in group.volume_types: - extraspecs_dict = ( - self._update_extra_specs_list( - volume_type.extra_specs, - volume_type.id, interval, retries)) - extraspecs_dict_list.append(extraspecs_dict) - arrays.add(extraspecs_dict[EXTRA_SPECS][ARRAY]) - else: - msg = (_("Unable to get volume type ids.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if len(arrays) != 1: - if not arrays: - msg = (_("Failed to get an array associated with " - "volume group: %(groupid)s.") - % {'groupid': group.id}) - else: - msg = (_("There are multiple arrays " - "associated with volume group: %(groupid)s.") - % {'groupid': group.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - array = arrays.pop() - return array, extraspecs_dict_list - - def _update_extra_specs_list(self, extraspecs, volumetype_id, - interval, retries): - """Update the extra specs list. - - :param extraspecs: extraspecs - :param volumetype_Id: volume type identifier - :param interval: Interval in seconds between retries - :param retries: Retry count - :returns: extraspecs_dict_list - """ - extraspecs_dict = {} - extraspecs = self.update_extra_specs(extraspecs) - extraspecs = self._update_intervals_and_retries( - extraspecs, interval, retries) - extraspecs_dict["volumeTypeId"] = volumetype_id - extraspecs_dict[EXTRA_SPECS] = extraspecs - return extraspecs_dict - - def update_volume_group_name(self, group): - """Format id and name consistency group. - - :param group: the generic volume group object - :returns: group_name -- formatted name + id - """ - group_name = "" - if group.name is not None: - group_name = ( - self.truncate_string( - group.name, TRUNCATE_27) + "_") - - group_name += group.id - return group_name - - @staticmethod - def _update_intervals_and_retries(extra_specs, interval, retries): - """Updates the extraSpecs with intervals and retries values. - - :param extra_specs: - :param interval: Interval in seconds between retries - :param retries: Retry count - :returns: Updated extra_specs - """ - extra_specs[INTERVAL] = interval - LOG.debug("The interval is set at: %(intervalInSecs)s.", - {'intervalInSecs': interval}) - extra_specs[RETRIES] = retries - LOG.debug("Retries are set at: %(retries)s.", - {'retries': retries}) - return extra_specs diff --git a/cinder/volume/drivers/dell_emc/vnx/__init__.py b/cinder/volume/drivers/dell_emc/vnx/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dell_emc/vnx/adapter.py b/cinder/volume/drivers/dell_emc/vnx/adapter.py deleted file mode 100644 index 3985f9fd9..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/adapter.py +++ /dev/null @@ -1,1450 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import math -import os -import random -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -import six - - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields - -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import replication -from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow -from cinder.volume.drivers.dell_emc.vnx import utils -from cinder.volume import utils as vol_utils -from cinder.zonemanager import utils as zm_utils - -storops = importutils.try_import('storops') -if storops: - from storops import exception as storops_ex - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class CommonAdapter(replication.ReplicationAdapter): - - VERSION = None - - def __init__(self, configuration, active_backend_id): - self.config = configuration - self.active_backend_id = active_backend_id - self.client = None - self.protocol = None - self.serial_number = None - self.mirror_view = None - self.storage_pools = None - self.max_retries = 5 - self.allowed_ports = None - self.force_delete_lun_in_sg = None - self.max_over_subscription_ratio = None - self.ignore_pool_full_threshold = None - self.reserved_percentage = None - self.destroy_empty_sg = None - self.itor_auto_dereg = None - self.queue_path = None - - def do_setup(self): - self._normalize_config() - self.client = client.Client( - self.config.san_ip, - self.config.san_login, - self.config.san_password, - self.config.storage_vnx_authentication_type, - self.config.naviseccli_path, - self.config.storage_vnx_security_file_dir, - self.queue_path) - # Replication related - if (self.active_backend_id in - common.ReplicationDeviceList.get_backend_ids(self.config)): - # The backend is in failed-over state - self.mirror_view = self.build_mirror_view(self.config, False) - self.client = self.mirror_view.primary_client - else: - self.mirror_view = self.build_mirror_view(self.config, True) - self.serial_number = self.client.get_serial() - self.storage_pools = self.parse_pools() - self.force_delete_lun_in_sg = ( - self.config.force_delete_lun_in_storagegroup) - self.max_over_subscription_ratio = ( - self.config.max_over_subscription_ratio) - self.ignore_pool_full_threshold = ( - self.config.ignore_pool_full_threshold) - self.reserved_percentage = self.config.reserved_percentage - self.protocol = self.config.storage_protocol - self.destroy_empty_sg = self.config.destroy_empty_storage_group - self.itor_auto_dereg = self.config.initiator_auto_deregistration - self.set_extra_spec_defaults() - - def _normalize_config(self): - group_name = ( - self.config.config_group if self.config.config_group - else 'DEFAULT') - self.queue_path = os.path.join(CONF.state_path, 'vnx', group_name) - # Check option `naviseccli_path`. - # Set to None (then pass to storops) if it is not set or set to an - # empty string. - naviseccli_path = self.config.naviseccli_path - if naviseccli_path is None or len(naviseccli_path.strip()) == 0: - LOG.warning('[%(group)s] naviseccli_path is not set or set to ' - 'an empty string. None will be passed into ' - 'storops.', {'group': self.config.config_group}) - self.config.naviseccli_path = None - - # Check option `storage_vnx_pool_names`. - # Raise error if it is set to an empty list. - pool_names = self.config.storage_vnx_pool_names - if pool_names is not None: - # Filter out the empty string in the list. - pool_names = [name.strip() - for name in [x for x in pool_names - if len(x.strip()) != 0]] - if len(pool_names) == 0: - raise exception.InvalidConfigurationValue( - option='[{group}] storage_vnx_pool_names'.format( - group=self.config.config_group), - value=pool_names) - self.config.storage_vnx_pool_names = pool_names - - # Check option `io_port_list`. - # Raise error if it is set to an empty list. - io_port_list = self.config.io_port_list - if io_port_list is not None: - io_port_list = [port.strip().upper() - for port in [x for x in io_port_list - if len(x.strip()) != 0]] - if len(io_port_list) == 0: - # io_port_list is allowed to be an empty list, which means - # none of the ports will be registered. - raise exception.InvalidConfigurationValue( - option='[{group}] io_port_list'.format( - group=self.config.config_group), - value=io_port_list) - self.config.io_port_list = io_port_list - - if self.config.ignore_pool_full_threshold: - LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' - 'LUN creation will still be forced even if the ' - 'pool full threshold is exceeded.', - {'group': self.config.config_group}) - - if self.config.destroy_empty_storage_group: - LOG.warning('[%(group)s] destroy_empty_storage_group: True. ' - 'Empty storage group will be deleted after volume ' - 'is detached.', - {'group': self.config.config_group}) - - if not self.config.initiator_auto_registration: - LOG.info('[%(group)s] initiator_auto_registration: False. ' - 'Initiator auto registration is not enabled. ' - 'Please register initiator manually.', - {'group': self.config.config_group}) - - if self.config.force_delete_lun_in_storagegroup: - LOG.warning( - '[%(group)s] force_delete_lun_in_storagegroup=True', - {'group': self.config.config_group}) - - if self.config.ignore_pool_full_threshold: - LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' - 'LUN creation will still be forced even if the ' - 'pool full threshold is exceeded.', - {'group': self.config.config_group}) - - def _build_port_str(self, port): - raise NotImplementedError() - - def validate_ports(self, all_ports, ports_whitelist): - # `ports_whitelist` passed the _normalize_config, then it could be only - # None or valid list in which the items are stripped and converted to - # upper case. - result_ports = None - if ports_whitelist is None: - result_ports = all_ports - else: - # Split the whitelist, remove spaces around the comma, - # and remove the empty item. - port_strs_configed = set(ports_whitelist) - # For iSCSI port, the format is 'A-1-1', - # while for FC, it is 'A-2'. - valid_port_map = {self._build_port_str(port): port - for port in all_ports} - - invalid_port_strs = port_strs_configed - set(valid_port_map.keys()) - if invalid_port_strs: - msg = (_('[%(group)s] Invalid %(protocol)s ports %(port)s ' - 'specified for io_port_list.') % { - 'group': self.config.config_group, - 'protocol': self.config.storage_protocol, - 'port': ','.join(invalid_port_strs)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - result_ports = [valid_port_map[port_str] - for port_str in port_strs_configed] - - if not result_ports: - raise exception.VolumeBackendAPIException( - data=_('No valid ports.')) - return result_ports - - def set_extra_spec_defaults(self): - provision_default = storops.VNXProvisionEnum.THICK - tier_default = None - if self.client.is_fast_enabled(): - tier_default = storops.VNXTieringEnum.HIGH_AUTO - common.ExtraSpecs.set_defaults(provision_default, tier_default) - - def create_volume(self, volume): - """Creates a EMC volume.""" - volume_size = volume['size'] - volume_name = volume['name'] - utils.check_type_matched(volume) - volume_metadata = utils.get_metadata(volume) - pool = utils.get_pool_from_host(volume.host) - specs = common.ExtraSpecs.from_volume(volume) - - provision = specs.provision - tier = specs.tier - - volume_metadata['snapcopy'] = 'False' - LOG.info('Create Volume: %(volume)s Size: %(size)s ' - 'pool: %(pool)s ' - 'provision: %(provision)s ' - 'tier: %(tier)s ', - {'volume': volume_name, - 'size': volume_size, - 'pool': pool, - 'provision': provision, - 'tier': tier}) - - qos_specs = utils.get_backend_qos_specs(volume) - if (volume.group and - vol_utils.is_group_a_cg_snapshot_type(volume.group)): - cg_id = volume.group_id - else: - cg_id = None - lun = self.client.create_lun( - pool, volume_name, volume_size, - provision, tier, cg_id, - ignore_thresholds=self.config.ignore_pool_full_threshold, - qos_specs=qos_specs) - location = self._build_provider_location( - lun_type='lun', - lun_id=lun.lun_id, - base_lun_name=volume.name) - # Setup LUN Replication/MirrorView between devices. - # Secondary LUN will inherit properties from primary LUN. - rep_update = self.setup_lun_replication( - volume, lun.lun_id) - model_update = {'provider_location': location, - 'metadata': volume_metadata} - model_update.update(rep_update) - return model_update - - def retype(self, ctxt, volume, new_type, diff, host): - """Changes volume from one type to another.""" - new_specs = common.ExtraSpecs.from_volume_type(new_type) - new_specs.validate(self.client.get_vnx_enabler_status()) - lun = self.client.get_lun(name=volume.name) - if volume.volume_type_id: - old_specs = common.ExtraSpecs.from_volume(volume) - else: - # Get extra specs from the LUN properties when the lun - # has no volume type. - utils.update_res_without_poll(lun) - old_specs = common.ExtraSpecs.from_lun(lun) - old_provision = old_specs.provision - old_tier = old_specs.tier - need_migration = utils.retype_need_migration( - volume, old_provision, new_specs.provision, host) - turn_on_compress = utils.retype_need_turn_on_compression( - old_provision, new_specs.provision) - change_tier = utils.retype_need_change_tier( - old_tier, new_specs.tier) - - if need_migration or turn_on_compress: - if self.client.lun_has_snapshot(lun): - LOG.debug('Driver is not able to do retype because the volume ' - '%s has a snapshot.', - volume.id) - return False - - if need_migration: - LOG.debug('Driver needs to use storage-assisted migration ' - 'to retype the volume.') - return self._migrate_volume(volume, host, new_specs) - if turn_on_compress: - # Turn on compression feature on the volume - self.client.enable_compression(lun) - if change_tier: - # Modify lun to change tiering policy - lun.tier = new_specs.tier - return True - - def create_volume_from_snapshot(self, volume, snapshot): - """Constructs a work flow to create a volume from snapshot. - - :param volume: new volume - :param snapshot: base snapshot - This flow will do the following: - - 1. Create a snap mount point (SMP) for the snapshot. - 2. Attach the snapshot to the SMP created in the first step. - 3. Create a temporary lun prepare for migration. - (Skipped if snapcopy='true') - 4. Start a migration between the SMP and the temp lun. - (Skipped if snapcopy='true') - """ - volume_metadata = utils.get_metadata(volume) - pool = utils.get_pool_from_host(volume.host) - - specs = common.ExtraSpecs.from_volume(volume) - tier = specs.tier - base_lun_name = utils.get_base_lun_name(snapshot.volume) - rep_update = dict() - if utils.is_snapcopy_enabled(volume): - new_lun_id = emc_taskflow.fast_create_volume_from_snapshot( - client=self.client, - snap_name=snapshot.name, - new_snap_name=utils.construct_snap_name(volume), - lun_name=volume.name, - base_lun_name=base_lun_name, - pool_name=pool) - - location = self._build_provider_location( - lun_type='smp', - lun_id=new_lun_id, - base_lun_name=base_lun_name) - volume_metadata['snapcopy'] = 'True' - volume_metadata['async_migrate'] = 'False' - else: - async_migrate, provision = utils.calc_migrate_and_provision(volume) - new_snap_name = ( - utils.construct_snap_name(volume) if async_migrate else None) - new_lun_id = emc_taskflow.create_volume_from_snapshot( - client=self.client, - src_snap_name=snapshot.name, - lun_name=volume.name, - lun_size=volume.size, - base_lun_name=base_lun_name, - pool_name=pool, - provision=provision, - tier=tier, - new_snap_name=new_snap_name) - - location = self._build_provider_location( - lun_type='lun', - lun_id=new_lun_id, - base_lun_name=volume.name) - volume_metadata['snapcopy'] = 'False' - volume_metadata['async_migrate'] = six.text_type(async_migrate) - rep_update = self.setup_lun_replication(volume, new_lun_id) - - model_update = {'provider_location': location, - 'metadata': volume_metadata} - model_update.update(rep_update) - return model_update - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - volume_metadata = utils.get_metadata(volume) - pool = utils.get_pool_from_host(volume.host) - - specs = common.ExtraSpecs.from_volume(volume) - tier = specs.tier - base_lun_name = utils.get_base_lun_name(src_vref) - - source_lun_id = self.client.get_lun_id(src_vref) - snap_name = utils.construct_snap_name(volume) - rep_update = dict() - if utils.is_snapcopy_enabled(volume): - # snapcopy feature enabled - new_lun_id = emc_taskflow.fast_create_cloned_volume( - client=self.client, - snap_name=snap_name, - lun_id=source_lun_id, - lun_name=volume.name, - base_lun_name=base_lun_name - ) - location = self._build_provider_location( - lun_type='smp', - lun_id=new_lun_id, - base_lun_name=base_lun_name) - volume_metadata['snapcopy'] = 'True' - volume_metadata['async_migrate'] = 'False' - else: - async_migrate, provision = utils.calc_migrate_and_provision(volume) - new_lun_id = emc_taskflow.create_cloned_volume( - client=self.client, - snap_name=snap_name, - lun_id=source_lun_id, - lun_name=volume.name, - lun_size=volume.size, - base_lun_name=base_lun_name, - pool_name=pool, - provision=provision, - tier=tier, - async_migrate=async_migrate) - # After migration, volume's base lun is itself - location = self._build_provider_location( - lun_type='lun', - lun_id=new_lun_id, - base_lun_name=volume.name) - volume_metadata['snapcopy'] = 'False' - volume_metadata['async_migrate'] = six.text_type(async_migrate) - rep_update = self.setup_lun_replication(volume, new_lun_id) - - model_update = {'provider_location': location, - 'metadata': volume_metadata} - model_update.update(rep_update) - return model_update - - def migrate_volume(self, context, volume, host): - """Leverage the VNX on-array migration functionality. - - This method is invoked at the source backend. - """ - specs = common.ExtraSpecs.from_volume(volume) - return self._migrate_volume(volume, host, specs) - - def _migrate_volume(self, volume, host, extra_specs): - """Migrates volume. - - :param extra_specs: Instance of ExtraSpecs. The new volume will be - changed to align with the new extra specs. - """ - r = utils.validate_storage_migration( - volume, host, self.serial_number, self.protocol) - if not r: - return r, None - rate = utils.get_migration_rate(volume) - - new_pool = utils.get_pool_from_host(host['host']) - lun_id = self.client.get_lun_id(volume) - lun_name = volume.name - provision = extra_specs.provision - tier = extra_specs.tier - - emc_taskflow.run_migration_taskflow( - self.client, lun_id, lun_name, volume.size, - new_pool, provision, tier, rate) - - # A smp will become a LUN after migration - if utils.is_volume_smp(volume): - self.client.delete_snapshot( - utils.construct_snap_name(volume)) - volume_metadata = utils.get_metadata(volume) - pl = self._build_provider_location( - lun_type='lun', - lun_id=lun_id, - base_lun_name=volume.name) - volume_metadata['snapcopy'] = 'False' - model_update = {'provider_location': pl, - 'metadata': volume_metadata} - return True, model_update - - def create_consistencygroup(self, context, group): - cg_name = group.id - model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - self.client.create_consistency_group(cg_name=cg_name) - return model_update - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - cg_name = group.id - - model_update = {} - volumes_model_update = [] - model_update['status'] = group.status - LOG.info('Start to delete consistency group: %(cg_name)s', - {'cg_name': cg_name}) - - self.client.delete_consistency_group(cg_name) - - for volume in volumes: - try: - self.client.delete_lun(volume.name) - volumes_model_update.append( - {'id': volume.id, - 'status': fields.ConsistencyGroupStatus.DELETED}) - except storops_ex.VNXDeleteLunError: - volumes_model_update.append( - {'id': volume.id, - 'status': fields.ConsistencyGroupStatus.ERROR_DELETING}) - - return model_update, volumes_model_update - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - - """Creates a CG snapshot(snap group).""" - return self.do_create_cgsnap(cgsnapshot.consistencygroup_id, - cgsnapshot.id, - snapshots) - - def do_create_cgsnap(self, group_name, snap_name, snapshots): - model_update = {} - snapshots_model_update = [] - LOG.info('Creating consistency snapshot for group' - ': %(group_name)s', - {'group_name': group_name}) - - self.client.create_cg_snapshot(snap_name, - group_name) - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, 'status': 'available'}) - model_update['status'] = 'available' - - return model_update, snapshots_model_update - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a CG snapshot(snap group).""" - return self.do_delete_cgsnap(cgsnapshot.consistencygroup_id, - cgsnapshot.id, - cgsnapshot.status, - snapshots) - - def do_delete_cgsnap(self, group_name, snap_name, - snap_status, snapshots): - model_update = {} - snapshots_model_update = [] - model_update['status'] = snap_status - LOG.info('Deleting consistency snapshot %(snap_name)s for ' - 'group: %(group_name)s', - {'snap_name': snap_name, - 'group_name': group_name}) - - self.client.delete_cg_snapshot(snap_name) - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, 'status': 'deleted'}) - model_update['status'] = 'deleted' - - return model_update, snapshots_model_update - - def create_cg_from_cgsnapshot(self, context, group, - volumes, cgsnapshot, snapshots): - return self.do_create_cg_from_cgsnap( - group.id, group.host, volumes, cgsnapshot.id, snapshots) - - def do_create_cg_from_cgsnap(self, cg_id, cg_host, volumes, - cgsnap_id, snapshots): - # 1. Copy a temp CG snapshot from CG snapshot - # and allow RW for it - # 2. Create SMPs from source volumes - # 3. Attach SMPs to the CG snapshot - # 4. Create migration target LUNs - # 5. Migrate from SMPs to LUNs one by one - # 6. Wait completion of migration - # 7. Create a new CG, add all LUNs to it - # 8. Delete the temp CG snapshot - cg_name = cg_id - src_cg_snap_name = cgsnap_id - pool_name = utils.get_pool_from_host(cg_host) - lun_sizes = [] - lun_names = [] - src_lun_names = [] - specs_list = [] - for volume, snapshot in zip(volumes, snapshots): - lun_sizes.append(volume.size) - lun_names.append(volume.name) - src_lun_names.append(snapshot.volume.name) - specs_list.append(common.ExtraSpecs.from_volume(volume)) - - lun_id_list = emc_taskflow.create_cg_from_cg_snapshot( - client=self.client, - cg_name=cg_name, - src_cg_name=None, - cg_snap_name=None, - src_cg_snap_name=src_cg_snap_name, - pool_name=pool_name, - lun_sizes=lun_sizes, - lun_names=lun_names, - src_lun_names=src_lun_names, - specs_list=specs_list) - - volume_model_updates = [] - for volume, lun_id in zip(volumes, lun_id_list): - model_update = { - 'id': volume.id, - 'provider_location': - self._build_provider_location( - lun_id=lun_id, - lun_type='lun', - base_lun_name=volume.name - )} - volume_model_updates.append(model_update) - return None, volume_model_updates - - def create_cloned_cg(self, context, group, - volumes, source_cg, source_vols): - self.do_clone_cg(group.id, group.host, volumes, - source_cg.id, source_vols) - - def do_clone_cg(self, cg_id, cg_host, volumes, - source_cg_id, source_vols): - # 1. Create temp CG snapshot from source_cg - # Same with steps 2-8 of create_cg_from_cgsnapshot - pool_name = utils.get_pool_from_host(cg_host) - lun_sizes = [] - lun_names = [] - src_lun_names = [] - specs_list = [] - for volume, source_vol in zip(volumes, source_vols): - lun_sizes.append(volume.size) - lun_names.append(volume.name) - src_lun_names.append(source_vol.name) - specs_list.append(common.ExtraSpecs.from_volume(volume)) - - lun_id_list = emc_taskflow.create_cloned_cg( - client=self.client, - cg_name=cg_id, - src_cg_name=source_cg_id, - pool_name=pool_name, - lun_sizes=lun_sizes, - lun_names=lun_names, - src_lun_names=src_lun_names, - specs_list=specs_list) - - volume_model_updates = [] - for volume, lun_id in zip(volumes, lun_id_list): - model_update = { - 'id': volume.id, - 'provider_location': - self._build_provider_location( - lun_id=lun_id, - lun_type='lun', - base_lun_name=volume.name - )} - volume_model_updates.append(model_update) - return None, volume_model_updates - - def parse_pools(self): - pool_names = self.config.storage_vnx_pool_names - array_pools = self.client.get_pools() - if pool_names: - pool_names = set([po.strip() for po in pool_names]) - array_pool_names = set([po.name for po in array_pools]) - nonexistent_pools = pool_names.difference(array_pool_names) - pool_names.difference_update(nonexistent_pools) - if not pool_names: - msg = _('All the specified storage pools to be managed ' - 'do not exist. Please check your configuration. ' - 'Non-existent pools: %s') % ','.join(nonexistent_pools) - raise exception.VolumeBackendAPIException(data=msg) - if nonexistent_pools: - LOG.warning('The following specified storage pools ' - 'do not exist: %(nonexistent)s. ' - 'This host will only manage the storage ' - 'pools: %(exist)s', - {'nonexistent': ','.join(nonexistent_pools), - 'exist': ','.join(pool_names)}) - else: - LOG.debug('This host will manage the storage pools: %s.', - ','.join(pool_names)) - else: - pool_names = [p.name for p in array_pools] - LOG.info('No storage pool is configured. This host will ' - 'manage all the pools on the VNX system.') - - return [pool for pool in array_pools if pool.name in pool_names] - - def get_enabler_stats(self): - stats = dict() - stats['compression_support'] = self.client.is_compression_enabled() - stats['fast_support'] = self.client.is_fast_enabled() - stats['deduplication_support'] = self.client.is_dedup_enabled() - stats['thin_provisioning_support'] = self.client.is_thin_enabled() - stats['consistencygroup_support'] = self.client.is_snap_enabled() - stats['replication_enabled'] = True if self.mirror_view else False - stats['consistent_group_snapshot_enabled'] = ( - self.client.is_snap_enabled()) - return stats - - def get_pool_stats(self, enabler_stats=None): - stats = enabler_stats if enabler_stats else self.get_enabler_stats() - self.storage_pools = self.parse_pools() - pool_feature = self.client.get_pool_feature() - pools_stats = list() - for pool in self.storage_pools: - pool_stats = { - 'pool_name': pool.name, - 'total_capacity_gb': pool.user_capacity_gbs, - 'provisioned_capacity_gb': pool.total_subscribed_capacity_gbs - } - - # Handle pool state Initializing, Ready, Faulted, Offline - # or Deleting. - if pool.state in common.PoolState.VALID_CREATE_LUN_STATE: - pool_stats['free_capacity_gb'] = 0 - LOG.warning('Storage Pool [%(pool)s] is [%(state)s].', - {'pool': pool.name, - 'state': pool.state}) - else: - pool_stats['free_capacity_gb'] = pool.available_capacity_gbs - - if (pool_feature.max_pool_luns <= - pool_feature.total_pool_luns): - LOG.warning('Maximum number of Pool LUNs %(max_luns)s ' - 'have been created for %(pool_name)s. ' - 'No more LUN creation can be done.', - {'max_luns': pool_feature.max_pool_luns, - 'pool_name': pool.name}) - pool_stats['free_capacity_gb'] = 0 - - if not self.reserved_percentage: - # Since the admin is not sure of what value is proper, - # the driver will calculate the recommended value. - - # Some extra capacity will be used by meta data of pool LUNs. - # The overhead is about LUN_Capacity * 0.02 + 3 GB - # reserved_percentage will be used to make sure the scheduler - # takes the overhead into consideration. - # Assume that all the remaining capacity is to be used to - # create a thick LUN, reserved_percentage is estimated as - # follows: - reserved = (((0.02 * pool.available_capacity_gbs + 3) / - (1.02 * pool.user_capacity_gbs)) * 100) - # Take pool full threshold into consideration - if not self.ignore_pool_full_threshold: - reserved += 100 - pool.percent_full_threshold - pool_stats['reserved_percentage'] = int(math.ceil(min(reserved, - 100))) - else: - pool_stats['reserved_percentage'] = self.reserved_percentage - - array_serial = self.serial_number - pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' % - {'pool_name': pool.name, - 'array_serial': array_serial}) - pool_stats['fast_cache_enabled'] = pool.fast_cache - - # Copy advanced feature stats from backend stats - pool_stats['compression_support'] = stats['compression_support'] - pool_stats['fast_support'] = stats['fast_support'] - pool_stats['deduplication_support'] = ( - stats['deduplication_support']) - pool_stats['thin_provisioning_support'] = ( - stats['thin_provisioning_support']) - pool_stats['thick_provisioning_support'] = True - pool_stats['consistencygroup_support'] = ( - stats['consistencygroup_support']) - pool_stats['consistent_group_snapshot_enabled'] = ( - stats['consistent_group_snapshot_enabled']) - pool_stats['max_over_subscription_ratio'] = ( - self.max_over_subscription_ratio) - pool_stats['QoS_support'] = True - # Add replication v2.1 support - self.append_replication_stats(pool_stats) - pools_stats.append(pool_stats) - return pools_stats - - def update_volume_stats(self): - stats = self.get_enabler_stats() - stats['pools'] = self.get_pool_stats(stats) - stats['storage_protocol'] = self.config.storage_protocol - self.append_replication_stats(stats) - return stats - - def delete_volume(self, volume): - """Deletes an EMC volume.""" - async_migrate = utils.is_async_migrate_enabled(volume) - self.cleanup_lun_replication(volume) - try: - self.client.delete_lun(volume.name, - force=self.force_delete_lun_in_sg) - except storops_ex.VNXLunUsedByFeatureError: - # Case 1. Migration not finished, cleanup related stuff. - if async_migrate: - self.client.cleanup_async_lun( - name=volume.name, - force=self.force_delete_lun_in_sg) - else: - raise - except (storops_ex.VNXLunHasSnapError, - storops_ex.VNXLunHasSnapMountPointError): - # Here, we assume no Cinder managed snaps, and add it to queue - # for later deletion - self.client.delay_delete_lun(volume.name) - # Case 2. Migration already finished, delete temp snap if exists. - if async_migrate: - self.client.delete_snapshot(utils.construct_snap_name(volume)) - - def extend_volume(self, volume, new_size): - """Extends an EMC volume.""" - self.client.expand_lun(volume.name, new_size, poll=False) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - src_lun_id = self.client.get_lun_id(snapshot.volume) - self.client.create_snapshot(src_lun_id, snapshot.name) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.client.delete_snapshot(snapshot.name) - - def _get_referenced_lun(self, existing_ref): - lun = None - if 'source-id' in existing_ref: - lun = self.client.get_lun(lun_id=existing_ref['source-id']) - elif 'source-name' in existing_ref: - lun = self.client.get_lun(name=existing_ref['source-name']) - else: - reason = _('Reference must contain source-id or source-name key.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - if not lun.existed: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("LUN doesn't exist.")) - return lun - - def manage_existing_get_size(self, volume, existing_ref): - """Returns size of volume to be managed by manage_existing.""" - lun = self._get_referenced_lun(existing_ref) - target_pool = utils.get_pool_from_host(volume.host) - if target_pool and lun.pool_name != target_pool: - reason = (_('The imported lun is in pool %(lun_pool)s ' - 'which is not managed by the host %(host)s.') - % {'lun_pool': lun.pool_name, - 'host': volume['host']}) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - return lun.total_capacity_gb - - def manage_existing(self, volume, existing_ref): - """Imports the existing backend storage object as a volume. - - manage_existing_ref:{ - 'source-id': - } - or - manage_existing_ref:{ - 'source-name': - } - - When the volume has a volume_type, the driver inspects that and - compare against the properties of the referenced backend storage - object. If they are incompatible, raise a - ManageExistingVolumeTypeMismatch exception. - """ - lun = self._get_referenced_lun(existing_ref) - if volume.volume_type_id: - type_specs = common.ExtraSpecs.from_volume(volume) - if not type_specs.match_with_lun(lun): - raise exception.ManageExistingVolumeTypeMismatch( - reason=_("The volume to be managed is a %(provision)s LUN " - "and the tiering setting is %(tier)s. This " - "doesn't match with the type %(type)s.") - % {'provision': lun.provision, - 'tier': lun.tier, - 'type': volume.volume_type_id}) - lun.rename(volume.name) - if lun.is_snap_mount_point: - lun_type = 'smp' - base_lun_name = lun.primary_lun - else: - lun_type = 'lun' - base_lun_name = volume.name - pl = self._build_provider_location( - lun_id=lun.lun_id, - lun_type=lun_type, - base_lun_name=base_lun_name) - return {'provider_location': pl} - - def unmanage(self, volume): - """Unmanages a volume.""" - pass - - def build_host(self, connector): - raise NotImplementedError - - def assure_storage_group(self, host): - """Assures that the storage group with name of `host` exists. - - If the storage group doesn't exist, create a one. - """ - sg = self.client.get_storage_group(host.name) - is_new_sg = False - if not sg.existed: - sg = self.client.create_storage_group(host.name) - is_new_sg = True - return (sg, is_new_sg) - - def assure_host_access(self, storage_group, host, volume, is_new_sg): - """Assures that `host` is connected to the Array. - - It first registers initiators to `storage_group` then add `volume` to - `storage_group`. - - :param storage_group: object of storops storage group to which the - host access is registered. - :param host: `common.Host` object with initiator information. - :param volume: `common.Volume` object with volume information. - :param is_new_sg: flag indicating whether the `storage_group` is newly - created or not. - """ - if not self.config.initiator_auto_registration: - if is_new_sg: - # Invoke connect_host on storage group to register all - # host information. - # Call connect_host only once when sg is newly created. - storage_group.connect_host(host.name) - else: - self.auto_register_initiator(storage_group, host) - - return self.client.add_lun_to_sg( - storage_group, - self.client.get_lun(lun_id=volume.vnx_lun_id), - self.max_retries) - - def auto_register_initiator(self, storage_group, host): - """Registers the initiators to storage group. - - :param storage_group: storage group object to which the initiator is - registered. - :param host: information of initiator, etc. - - The behavior depends on the combination of the registered - initiators of SG and the configured white list of the ports (that is - `self.config.io_port_list`). - - 1. Register all non-registered initiators to `self.allowed_ports`. - 2. For registered initiators, if the white list is configured, register - them to `self.allowed_ports` except the ones which are already - registered. - Note that `self.allowed_ports` comprises of all iSCSI/FC ports on array - or the valid ports of the white list if `self.config.io_port_list` is - configured. - """ - - host_initiators = set(host.initiators) - sg_initiators = set(storage_group.initiator_uid_list) - unreg_initiators = host_initiators - sg_initiators - initiator_port_map = {unreg_id: set(self.allowed_ports) - for unreg_id in unreg_initiators} - - if self.config.io_port_list is not None: - reg_initiators = host_initiators & sg_initiators - for reg_id in reg_initiators: - ports_to_reg = (set(self.allowed_ports) - - set(storage_group.get_ports(reg_id))) - if ports_to_reg: - initiator_port_map[reg_id] = ports_to_reg - LOG.debug('Ports [%(ports)s] in white list will be bound ' - 'to the registered initiator: %(reg_id)s', - {'ports': ports_to_reg, 'reg_id': reg_id}) - - self.client.register_initiator(storage_group, host, initiator_port_map) - - def prepare_target_data(self, storage_group, host, volume, hlu): - raise NotImplementedError() - - def initialize_connection(self, cinder_volume, connector): - """Initializes the connection to `cinder_volume`.""" - volume = common.Volume( - cinder_volume.name, cinder_volume.id, - vnx_lun_id=self.client.get_lun_id(cinder_volume)) - return self._initialize_connection(volume, connector) - - def _initialize_connection(self, volume, connector): - """Helps to initialize the connection. - - To share common codes with initialize_connection_snapshot. - - :param volume: `common.Volume` object with volume information. - :param connector: connector information from Nova. - """ - host = self.build_host(connector) - sg, is_new_sg = self.assure_storage_group(host) - hlu = self.assure_host_access(sg, host, volume, is_new_sg) - return self.prepare_target_data(sg, host, volume, hlu) - - def terminate_connection(self, cinder_volume, connector): - """Terminates the connection to `cinder_volume`.""" - volume = common.Volume( - cinder_volume.name, cinder_volume.id, - vnx_lun_id=self.client.get_lun_id(cinder_volume)) - return self._terminate_connection(volume, connector) - - def _terminate_connection(self, volume, connector): - """Helps to terminate the connection. - - To share common codes with terminate_connection_snapshot. - - :param volume: `common.Volume` object with volume information. - :param connector: connector information from Nova. - """ - host = self.build_host(connector) - sg = self.client.get_storage_group(host.name) - self.remove_host_access(volume, host, sg) - - # build_terminate_connection return data should go before - # terminate_connection_cleanup. The storage group may be deleted in - # the terminate_connection_cleanup which is needed during getting - # return data - self.update_storage_group_if_required(sg) - re = self.build_terminate_connection_return_data(host, sg) - self.terminate_connection_cleanup(host, sg) - - return re - - def update_storage_group_if_required(self, sg): - if sg.existed and self.destroy_empty_sg: - utils.update_res_with_poll(sg) - - def remove_host_access(self, volume, host, sg): - """Removes the host access from `volume`. - - :param volume: `common.Volume` object with volume information. - :param host: `common.Host` object with host information. - :param sg: object of `storops` storage group. - """ - lun = self.client.get_lun(lun_id=volume.vnx_lun_id) - hostname = host.name - if not sg.existed: - LOG.warning("Storage Group %s is not found. " - "Nothing can be done in terminate_connection().", - hostname) - else: - try: - sg.detach_alu(lun) - except storops_ex.VNXDetachAluNotFoundError: - LOG.warning("Volume %(vol)s is not in Storage Group %(sg)s.", - {'vol': volume.name, 'sg': hostname}) - - def build_terminate_connection_return_data(self, host, sg): - raise NotImplementedError() - - def terminate_connection_cleanup(self, host, sg): - if not sg.existed: - return - - if self.destroy_empty_sg: - if not self.client.sg_has_lun_attached(sg): - self._destroy_empty_sg(host, sg) - - def _destroy_empty_sg(self, host, sg): - try: - LOG.info("Storage Group %s is empty.", sg.name) - sg.disconnect_host(sg.name) - sg.delete() - if self.itor_auto_dereg: - self._deregister_initiator(host) - except storops_ex.StoropsException: - LOG.warning("Failed to destroy Storage Group %s.", - sg.name) - try: - sg.connect_host(sg.name) - except storops_ex.StoropsException: - LOG.warning("Failed to connect host %(host)s " - "back to storage group %(sg)s.", - {'host': sg.name, 'sg': sg.name}) - - def _deregister_initiator(self, host): - initiators = host.initiators - try: - self.client.deregister_initiators(initiators) - except storops_ex: - LOG.warning("Failed to deregister the initiators %s", - initiators) - - def _is_allowed_port(self, port): - return port in self.allowed_ports - - def _build_provider_location( - self, lun_id=None, lun_type=None, base_lun_name=None): - return utils.build_provider_location( - system=self.serial_number, - lun_type=lun_type, - lun_id=lun_id, - base_lun_name=base_lun_name, - version=self.VERSION) - - def update_consistencygroup(self, context, group, add_volumes, - remove_volumes): - return self.do_update_cg(group.id, add_volumes, - remove_volumes) - - def do_update_cg(self, cg_name, add_volumes, - remove_volumes): - cg = self.client.get_cg(name=cg_name) - lun_ids_to_add = [self.client.get_lun_id(volume) - for volume in add_volumes] - lun_ids_to_remove = [self.client.get_lun_id(volume) - for volume in remove_volumes] - self.client.update_consistencygroup(cg, lun_ids_to_add, - lun_ids_to_remove) - return ({'status': fields.ConsistencyGroupStatus.AVAILABLE}, - None, - None) - - def create_export_snapshot(self, context, snapshot, connector): - self.client.create_mount_point(snapshot.volume_name, - utils.construct_smp_name(snapshot.id)) - - def remove_export_snapshot(self, context, snapshot): - self.client.delete_lun(utils.construct_smp_name(snapshot.id)) - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Initializes connection for snapshot mount point.""" - smp_name = utils.construct_smp_name(snapshot.id) - self.client.attach_snapshot(smp_name, snapshot.name) - lun = self.client.get_lun(name=smp_name) - volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id) - return self._initialize_connection(volume, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Terminates connection for snapshot mount point.""" - smp_name = utils.construct_smp_name(snapshot.id) - lun = self.client.get_lun(name=smp_name) - volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id) - connection_info = self._terminate_connection(volume, connector) - self.client.detach_snapshot(smp_name) - return connection_info - - def get_pool_name(self, volume): - return self.client.get_pool_name(volume.name) - - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status=None): - """Updates metadata after host-assisted migration.""" - metadata = utils.get_metadata(volume) - metadata['snapcopy'] = ('True' if utils.is_volume_smp(new_volume) - else 'False') - return {'provider_location': new_volume.provider_location, - 'metadata': metadata} - - def create_group(self, context, group): - rep_update = self.create_group_replication(group) - model_update = self.create_consistencygroup(context, group) - model_update.update(rep_update) - return model_update - - def delete_group(self, context, group, volumes): - self.delete_group_replication(group) - return self.delete_consistencygroup(context, group, volumes) - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot.""" - return self.do_create_cgsnap(group_snapshot.group_id, - group_snapshot.id, - snapshots) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - return self.do_delete_cgsnap( - group_snapshot.group_id, - group_snapshot.id, - group_snapshot.status, - snapshots) - - def create_group_from_group_snapshot(self, - context, group, volumes, - group_snapshot, snapshots): - """Creates a group from a group snapshot.""" - return self.do_create_cg_from_cgsnap(group.id, group.host, volumes, - group_snapshot.id, snapshots) - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group.""" - # 1. First make sure group and volumes have same - # replication extra-specs and replications status. - for volume in (add_volumes + remove_volumes): - utils.check_type_matched(volume) - # 2. Secondly, make sure replication status must be enabled for - # replication-enabled group, - utils.check_rep_status_matched(group) - self.add_volumes_to_group_replication(group, add_volumes) - self.remove_volumes_from_group_replication(group, remove_volumes) - - return self.do_update_cg(group.id, - add_volumes, - remove_volumes) - - def create_cloned_group(self, context, group, volumes, - source_group, source_vols): - """Clones a group""" - return self.do_clone_cg(group.id, group.host, volumes, - source_group.id, source_vols) - - -class ISCSIAdapter(CommonAdapter): - def __init__(self, configuration, active_backend_id): - super(ISCSIAdapter, self).__init__(configuration, active_backend_id) - self.iscsi_initiator_map = None - - def do_setup(self): - super(ISCSIAdapter, self).do_setup() - - self.iscsi_initiator_map = self.config.iscsi_initiators - self.allowed_ports = self.validate_ports( - self.client.get_iscsi_targets(), - self.config.io_port_list) - LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].', - {'group': self.config.config_group, - 'ports': ','.join( - [port.display_name for port in self.allowed_ports])}) - - def _normalize_config(self): - super(ISCSIAdapter, self)._normalize_config() - - # Check option `iscsi_initiators`. - # Set to None if it is not set or set to an empty string. - # Raise error if it is set to an empty string. - iscsi_initiators = self.config.iscsi_initiators - option = '[{group}] iscsi_initiators'.format( - group=self.config.config_group) - if iscsi_initiators is None: - return - elif len(iscsi_initiators.strip()) == 0: - raise exception.InvalidConfigurationValue(option=option, - value=iscsi_initiators) - else: - try: - self.config.iscsi_initiators = json.loads(iscsi_initiators) - except ValueError: - raise exception.InvalidConfigurationValue( - option=option, - value=iscsi_initiators) - if not isinstance(self.config.iscsi_initiators, dict): - raise exception.InvalidConfigurationValue( - option=option, - value=iscsi_initiators) - LOG.info("[%(group)s] iscsi_initiators is configured: %(value)s", - {'group': self.config.config_group, - 'value': self.config.iscsi_initiators}) - - def update_volume_stats(self): - """Retrieves stats info.""" - stats = super(ISCSIAdapter, self).update_volume_stats() - self.allowed_ports = self.validate_ports( - self.client.get_iscsi_targets(), - self.config.io_port_list) - backend_name = self.config.safe_get('volume_backend_name') - stats['volume_backend_name'] = backend_name or 'VNXISCSIDriver' - return stats - - def _build_port_str(self, port): - return '%(sp)s-%(pid)s-%(vpid)s' % { - 'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B', - 'pid': port.port_id, - 'vpid': port.vport_id} - - def build_host(self, connector): - return common.Host(connector['host'], [connector['initiator']], - ip=connector['ip']) - - def arrange_io_ports(self, reg_port_white_list, iscsi_initiator_ips): - """Arranges IO ports. - - Arranges the registered IO ports and puts a pingable port in the - first place as the main portal. - """ - - random.shuffle(reg_port_white_list) - random.shuffle(iscsi_initiator_ips) - - main_portal_index = None - for index, port in enumerate(reg_port_white_list): - for initiator_ip in iscsi_initiator_ips: - if self.client.ping_node(port, initiator_ip): - main_portal_index = index - break - else: - # For loop fell through without finding a pingable initiator. - continue - break - - if main_portal_index is not None: - reg_port_white_list.insert( - 0, reg_port_white_list.pop(main_portal_index)) - - return reg_port_white_list - - def prepare_target_data(self, storage_group, host, volume, hlu): - """Prepares the target data for Nova. - - :param storage_group: object of `storops` storage group. - :param host: `common.Host` object with initiator information. - :param volume: `common.Volume` object with volume information. - :param hlu: the HLU number assigned to volume. - """ - - target_io_ports = utils.sift_port_white_list( - self.allowed_ports, storage_group.get_ports(host.initiators[0])) - - if not target_io_ports: - msg = (_('Failed to find available iSCSI targets for %s.') - % storage_group.name) - raise exception.VolumeBackendAPIException(data=msg) - - if self.iscsi_initiator_map and host.name in self.iscsi_initiator_map: - iscsi_initiator_ips = list(self.iscsi_initiator_map[host.name]) - target_io_ports = self.arrange_io_ports(target_io_ports, - iscsi_initiator_ips) - - iscsi_target_data = common.ISCSITargetData(volume.id, False) - iqns = [port.wwn for port in target_io_ports] - portals = ["%s:3260" % port.ip_address for port in target_io_ports] - iscsi_target_data = common.ISCSITargetData( - volume.id, True, iqn=iqns[0], iqns=iqns, portal=portals[0], - portals=portals, lun=hlu, luns=[hlu] * len(target_io_ports)) - LOG.debug('Prepared iSCSI targets for %(host)s: %(target_data)s.', - {'host': host.name, 'target_data': iscsi_target_data}) - - return iscsi_target_data.to_dict() - - def build_terminate_connection_return_data(self, host, sg): - return None - - -class FCAdapter(CommonAdapter): - def __init__(self, configuration, active_backend_id): - super(FCAdapter, self).__init__(configuration, active_backend_id) - self.lookup_service = None - - def do_setup(self): - super(FCAdapter, self).do_setup() - - self.lookup_service = zm_utils.create_lookup_service() - self.allowed_ports = self.validate_ports( - self.client.get_fc_targets(), - self.config.io_port_list) - LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].', - {'group': self.config.config_group, - 'ports': ','.join( - [port.display_name for port in self.allowed_ports])}) - - def update_volume_stats(self): - """Retrieves stats info.""" - stats = super(FCAdapter, self).update_volume_stats() - backend_name = self.config.safe_get('volume_backend_name') - stats['volume_backend_name'] = backend_name or 'VNXFCDriver' - return stats - - def _build_port_str(self, port): - return '%(sp)s-%(pid)s' % { - 'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B', - 'pid': port.port_id} - - def build_host(self, connector): - if 'wwnns' not in connector or 'wwpns' not in connector: - msg = _('Host %s has no FC initiators') % connector['host'] - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - wwnns = connector['wwnns'] - wwpns = connector['wwpns'] - wwns = [(node + port).upper() for (node, port) in zip(wwnns, wwpns)] - # WWNS is like '20000090FA534CD110000090FA534CD1', convert it to - # '20:00:00:90:FA:53:4C:D1:10:00:00:90:FA:53:4C:D1' - # Note that use // division operator due to the change behavior of - # / division operator in Python 3. - wwns = [re.sub(r'\S\S', lambda m: m.group(0) + ':', wwn, - len(wwn) // 2 - 1) - for wwn in wwns] - - return common.Host(connector['host'], wwns, wwpns=wwpns) - - def prepare_target_data(self, storage_group, host, volume, hlu): - """Prepares the target data for Nova. - - :param storage_group: object of `storops` storage group. - :param host: `common.Host` object with initiator information. - :param volume: `common.Volume` object with volume information. - :param hlu: the HLU number assigned to volume. - """ - - if self.lookup_service is None: - registed_ports = [] - for wwn in host.initiators: - registed_ports.extend(storage_group.get_ports(wwn)) - - reg_port_white_list = utils.sift_port_white_list( - self.allowed_ports, - registed_ports) - - if not reg_port_white_list: - msg = (_('Failed to find available FC targets for %s.') - % storage_group.name) - raise exception.VolumeBackendAPIException(data=msg) - - target_wwns = [utils.truncate_fc_port_wwn(port.wwn) - for port in reg_port_white_list] - return common.FCTargetData(volume.id, True, wwn=target_wwns, - lun=hlu).to_dict() - else: - target_wwns, initiator_target_map = ( - self._get_tgt_list_and_initiator_tgt_map( - storage_group, host, True)) - return common.FCTargetData( - volume.id, True, wwn=target_wwns, lun=hlu, - initiator_target_map=initiator_target_map).to_dict() - - def update_storage_group_if_required(self, sg): - if sg.existed and (self.destroy_empty_sg or self.lookup_service): - utils.update_res_with_poll(sg) - - def build_terminate_connection_return_data(self, host, sg): - conn_info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - if self.lookup_service is None: - return conn_info - - if not sg.existed or self.client.sg_has_lun_attached(sg): - return conn_info - - itor_tgt_map = self._get_initiator_tgt_map(sg, host, False) - conn_info['data']['initiator_target_map'] = itor_tgt_map - - return conn_info - - def _get_initiator_tgt_map( - self, sg, host, allowed_port_only=False): - return self._get_tgt_list_and_initiator_tgt_map( - sg, host, allowed_port_only)[1] - - def _get_tgt_list_and_initiator_tgt_map( - self, sg, host, allowed_port_only=False): - fc_initiators = host.wwpns - fc_ports_wwns = list(map(utils.truncate_fc_port_wwn, - self._get_wwns_of_online_fc_ports( - sg, allowed_port_only=allowed_port_only))) - mapping = ( - self.lookup_service. - get_device_mapping_from_network(fc_initiators, fc_ports_wwns)) - return utils.convert_to_tgt_list_and_itor_tgt_map(mapping) - - def _get_wwns_of_online_fc_ports(self, sg, allowed_port_only=False): - ports = sg.fc_ports - if allowed_port_only: - ports = [po for po in ports if self._is_allowed_port(po)] - - fc_port_wwns = self.client.get_wwn_of_online_fc_ports(ports) - - return fc_port_wwns diff --git a/cinder/volume/drivers/dell_emc/vnx/client.py b/cinder/volume/drivers/dell_emc/vnx/client.py deleted file mode 100644 index f6f2e33a3..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/client.py +++ /dev/null @@ -1,725 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder import utils as cinder_utils -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import const -from cinder.volume.drivers.dell_emc.vnx import utils - -storops = importutils.try_import('storops') -if storops: - from storops import exception as storops_ex - from storops.lib import tasks as storops_tasks - -LOG = logging.getLogger(__name__) - - -class Condition(object): - """Defines some condition checker which are used in wait_until, .etc.""" - - @staticmethod - def is_lun_io_ready(lun): - utils.update_res_without_poll(lun) - if not lun.existed: - return False - lun_state = lun.state - if lun_state == common.LUNState.INITIALIZING: - return False - elif lun_state in [common.LUNState.READY, - common.LUNState.FAULTED]: - return lun.operation == 'None' - else: - # Quick exit wait_until when the lun is other state to avoid - # long-time timeout. - msg = (_('Volume %(name)s was created in VNX, ' - 'but in %(state)s state.') % { - 'name': lun.name, 'state': lun_state}) - raise exception.VolumeBackendAPIException(data=msg) - - @staticmethod - def is_object_existed(vnx_obj): - utils.update_res_without_poll(vnx_obj) - return vnx_obj.existed - - @staticmethod - def is_lun_ops_ready(lun): - utils.update_res_without_poll(lun) - return 'None' == lun.operation - - @staticmethod - def is_lun_expanded(lun, new_size): - utils.update_res_without_poll(lun) - return new_size == lun.total_capacity_gb - - @staticmethod - def is_mirror_synced(mirror): - utils.update_res_without_poll(mirror) - return ( - mirror.secondary_image.state == - storops.VNXMirrorImageState.SYNCHRONIZED) - - -class Client(object): - def __init__(self, ip, username, password, scope, - naviseccli, sec_file, queue_path=None): - self.naviseccli = naviseccli - if not storops: - msg = _('storops Python library is not installed.') - raise exception.VolumeBackendAPIException(message=msg) - self.vnx = storops.VNXSystem(ip=ip, - username=username, - password=password, - scope=scope, - naviseccli=naviseccli, - sec_file=sec_file) - self.sg_cache = {} - if queue_path: - self.queue = storops_tasks.PQueue(path=queue_path) - self.queue.start() - LOG.info('PQueue[%s] starts now.', queue_path) - - def create_lun(self, pool, name, size, provision, - tier, cg_id=None, ignore_thresholds=False, - qos_specs=None): - pool = self.vnx.get_pool(name=pool) - try: - lun = pool.create_lun(lun_name=name, - size_gb=size, - provision=provision, - tier=tier, - ignore_thresholds=ignore_thresholds) - except storops_ex.VNXLunNameInUseError: - lun = self.vnx.get_lun(name=name) - - utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun) - if cg_id: - cg = self.vnx.get_cg(name=cg_id) - cg.add_member(lun) - ioclasses = self.get_ioclass(qos_specs) - if ioclasses: - policy, is_new = self.get_running_policy() - for one in ioclasses: - one.add_lun(lun) - policy.add_class(one) - if is_new: - policy.run_policy() - return lun - - def get_lun(self, name=None, lun_id=None): - return self.vnx.get_lun(name=name, lun_id=lun_id) - - def get_lun_id(self, volume): - """Retrieves the LUN ID of volume.""" - if volume.provider_location: - return int(utils.extract_provider_location( - volume.provider_location, 'id')) - else: - # In some cases, cinder will not update volume info in DB with - # provider_location returned by us. We need to retrieve the id - # from array. For example, cinder backup-create doesn't use the - # provider_location returned from create_cloned_volume. - lun = self.get_lun(name=volume.name) - return lun.lun_id - - def delete_lun(self, name, force=False): - """Deletes a LUN or mount point.""" - lun = self.get_lun(name=name) - smp_attached_snap = (lun.attached_snapshot if lun.is_snap_mount_point - else None) - - try: - # Do not delete the snapshots of the lun. - lun.delete(force_detach=True, detach_from_sg=force) - if smp_attached_snap: - smp_attached_snap.delete() - except storops_ex.VNXLunNotFoundError as ex: - LOG.info("LUN %(name)s is already deleted. This message can " - "be safely ignored. Message: %(msg)s", - {'name': name, 'msg': ex.message}) - - def cleanup_async_lun(self, name, force=False): - """Helper method to cleanup stuff for async migration. - - .. note:: - Only call it when VNXLunUsedByFeatureError occurs - """ - lun = self.get_lun(name=name) - self.cleanup_migration(src_id=lun.lun_id) - lun.delete(force_detach=True, detach_from_sg=force) - - def delay_delete_lun(self, name): - """Delay the deletion by putting it in a storops queue.""" - self.queue.put(self.vnx.delete_lun, name=name) - LOG.info("VNX object has been added to queue for later" - " deletion: %s", name) - - @cinder_utils.retry(const.VNXLunPreparingError, retries=1, - backoff_rate=1) - def expand_lun(self, name, new_size, poll=True): - - lun = self.get_lun(name=name) - - try: - lun.poll = poll - lun.expand(new_size, ignore_thresholds=True) - except storops_ex.VNXLunExpandSizeError as ex: - LOG.warning("LUN %(name)s is already expanded. " - "Message: %(msg)s.", - {'name': name, 'msg': ex.message}) - - except storops_ex.VNXLunPreparingError as ex: - # The error means the operation cannot be performed because the LUN - # is 'Preparing'. Wait for a while so that the LUN may get out of - # the transitioning state. - with excutils.save_and_reraise_exception(): - LOG.warning("LUN %(name)s is not ready for extension: %(msg)s", - {'name': name, 'msg': ex.message}) - - utils.wait_until(Condition.is_lun_ops_ready, lun=lun) - - utils.wait_until(Condition.is_lun_expanded, lun=lun, new_size=new_size) - - def modify_lun(self): - pass - - @cinder_utils.retry(exceptions=const.VNXTargetNotReadyError, - interval=15, - retries=5, backoff_rate=1) - def migrate_lun(self, src_id, dst_id, - rate=const.MIGRATION_RATE_HIGH): - src = self.vnx.get_lun(lun_id=src_id) - src.migrate(dst_id, rate) - - def session_finished(self, src_lun): - session = self.vnx.get_migration_session(src_lun) - if not session.existed: - return True - elif session.current_state in ('FAULTED', 'STOPPED'): - LOG.warning('Session is %s, need to handled then.', - session.current_state) - return True - else: - return False - - def verify_migration(self, src_id, dst_id, dst_wwn): - """Verify whether migration session finished successfully. - - :param src_id: source LUN id - :param dst_id: destination LUN id - :param dst_wwn: destination LUN WWN - :returns Boolean: True or False - """ - src_lun = self.vnx.get_lun(lun_id=src_id) - - utils.wait_until(condition=self.session_finished, - interval=common.INTERVAL_30_SEC, - src_lun=src_lun) - new_lun = self.vnx.get_lun(lun_id=dst_id) - new_wwn = new_lun.wwn - if not new_wwn or new_wwn != dst_wwn: - return True - else: - return False - - def cleanup_migration(self, src_id, dst_id=None): - """Invoke when migration meets error. - - :param src_id: source LUN id - :param dst_id: destination LUN id - """ - # if migration session is still there - # we need to cancel the session - session = self.vnx.get_migration_session(src_id) - src_lun = self.vnx.get_lun(lun_id=src_id) - if session.existed: - LOG.warning('Cancelling migration session: ' - '%(src_id)s -> %(dst_id)s.', - {'src_id': src_id, - 'dst_id': dst_id}) - try: - src_lun.cancel_migrate() - except storops_ex.VNXLunNotMigratingError: - LOG.info('The LUN is not migrating or completed, ' - 'this message can be safely ignored') - except (storops_ex.VNXLunSyncCompletedError, - storops_ex.VNXMigrationError): - # Wait until session finishes - self.verify_migration(src_id, session.dest_lu_id, None) - - def create_snapshot(self, lun_id, snap_name, keep_for=None): - """Creates a snapshot.""" - - lun = self.get_lun(lun_id=lun_id) - try: - lun.create_snap( - snap_name, allow_rw=True, auto_delete=False, - keep_for=keep_for) - except storops_ex.VNXSnapNameInUseError as ex: - LOG.warning('Snapshot %(name)s already exists. ' - 'Message: %(msg)s', - {'name': snap_name, 'msg': ex.message}) - - def delete_snapshot(self, snapshot_name): - """Deletes a snapshot.""" - - snap = self.vnx.get_snap(name=snapshot_name) - try: - snap.delete() - except storops_ex.VNXSnapNotExistsError as ex: - LOG.warning("Snapshot %(name)s may be deleted already. " - "Message: %(msg)s", - {'name': snapshot_name, 'msg': ex.message}) - except storops_ex.VNXDeleteAttachedSnapError as ex: - with excutils.save_and_reraise_exception(): - LOG.warning("Failed to delete snapshot %(name)s " - "which is in use. Message: %(msg)s", - {'name': snapshot_name, 'msg': ex.message}) - - def copy_snapshot(self, snap_name, new_snap_name): - snap = self.vnx.get_snap(name=snap_name) - snap.copy(new_name=new_snap_name) - - def create_mount_point(self, lun_name, smp_name): - lun = self.vnx.get_lun(name=lun_name) - try: - return lun.create_mount_point(name=smp_name) - except storops_ex.VNXLunNameInUseError as ex: - LOG.warning('Mount point %(name)s already exists. ' - 'Message: %(msg)s', - {'name': smp_name, 'msg': ex.message}) - # Ignore the failure that due to retry. - return self.vnx.get_lun(name=smp_name) - - def attach_snapshot(self, smp_name, snap_name): - lun = self.vnx.get_lun(name=smp_name) - try: - lun.attach_snap(snap=snap_name) - except storops_ex.VNXSnapAlreadyMountedError as ex: - LOG.warning("Snapshot %(snap_name)s is attached to " - "snapshot mount point %(smp_name)s already. " - "Message: %(msg)s", - {'snap_name': snap_name, - 'smp_name': smp_name, - 'msg': ex.message}) - - def detach_snapshot(self, smp_name): - lun = self.vnx.get_lun(name=smp_name) - try: - lun.detach_snap() - except storops_ex.VNXSnapNotAttachedError as ex: - LOG.warning("Snapshot mount point %(smp_name)s is not " - "currently attached. Message: %(msg)s", - {'smp_name': smp_name, 'msg': ex.message}) - - def modify_snapshot(self, snap_name, allow_rw=None, - auto_delete=None, keep_for=None): - snap = self.vnx.get_snap(name=snap_name) - snap.modify(allow_rw=allow_rw, auto_delete=auto_delete, - keep_for=None) - - def create_consistency_group(self, cg_name, lun_id_list=None): - try: - cg = self.vnx.create_cg(name=cg_name, members=lun_id_list) - except storops_ex.VNXConsistencyGroupNameInUseError: - cg = self.vnx.get_cg(name=cg_name) - # Wait until cg is found on VNX, or deletion will fail afterwards - utils.wait_until(Condition.is_object_existed, vnx_obj=cg) - return cg - - def delete_consistency_group(self, cg_name): - cg = self.vnx.get_cg(cg_name) - try: - cg.delete() - except storops_ex.VNXConsistencyGroupNotFoundError: - pass - - def create_cg_snapshot(self, cg_snap_name, cg_name): - cg = self.vnx.get_cg(cg_name) - try: - snap = cg.create_snap(cg_snap_name, allow_rw=True) - except storops_ex.VNXSnapNameInUseError: - snap = self.vnx.get_snap(cg_snap_name) - utils.wait_until(Condition.is_object_existed, - vnx_obj=snap) - return snap - - def delete_cg_snapshot(self, cg_snap_name): - self.delete_snapshot(cg_snap_name) - - def get_serial(self): - return self.vnx.serial - - def get_pools(self): - return self.vnx.get_pool() - - def get_pool(self, name): - return self.vnx.get_pool(name=name) - - def get_iscsi_targets(self, sp=None, port_id=None, vport_id=None): - return self.vnx.get_iscsi_port(sp=sp, port_id=port_id, - vport_id=vport_id, - has_ip=True) - - def get_fc_targets(self, sp=None, port_id=None): - return self.vnx.get_fc_port(sp=sp, port_id=port_id) - - def get_enablers(self): - return self.vnx.get_ndu() - - def is_fast_enabled(self): - return self.vnx.is_auto_tiering_enabled() - - def is_compression_enabled(self): - return self.vnx.is_compression_enabled() - - def is_dedup_enabled(self): - return self.vnx.is_dedup_enabled() - - def is_fast_cache_enabled(self): - return self.vnx.is_fast_cache_enabled() - - def is_thin_enabled(self): - return self.vnx.is_thin_enabled() - - def is_snap_enabled(self): - return self.vnx.is_snap_enabled() - - def is_mirror_view_enabled(self): - return self.vnx.is_mirror_view_sync_enabled() - - def get_pool_feature(self): - return self.vnx.get_pool_feature() - - def lun_has_snapshot(self, lun): - """Checks lun has snapshot. - - :param lun: instance of VNXLun - """ - snaps = lun.get_snap() - return len(snaps) != 0 - - def enable_compression(self, lun): - """Enables compression on lun. - - :param lun: instance of VNXLun - """ - try: - lun.enable_compression(ignore_thresholds=True) - except storops_ex.VNXCompressionAlreadyEnabledError: - LOG.warning("Compression has already been enabled on %s.", - lun.name) - - def get_vnx_enabler_status(self): - return common.VNXEnablerStatus( - dedup=self.is_dedup_enabled(), - compression=self.is_compression_enabled(), - thin=self.is_thin_enabled(), - fast=self.is_fast_enabled(), - snap=self.is_snap_enabled()) - - def create_storage_group(self, name): - try: - self.sg_cache[name] = self.vnx.create_sg(name) - except storops_ex.VNXStorageGroupNameInUseError as ex: - # Ignore the failure due to retry - LOG.warning('Storage group %(name)s already exists. ' - 'Message: %(msg)s', - {'name': name, 'msg': ex.message}) - self.sg_cache[name] = self.vnx.get_sg(name=name) - - return self.sg_cache[name] - - def get_storage_group(self, name): - """Retrieve the storage group by name. - - Check the storage group instance cache first to save - CLI call. - If the specified storage group doesn't exist in the cache, - try to grab it from CLI. - - :param name: name of the storage group - :return: storage group instance - """ - if name not in self.sg_cache: - self.sg_cache[name] = self.vnx.get_sg(name) - return self.sg_cache[name] - - def register_initiator(self, storage_group, host, initiator_port_map): - """Registers the initiators of `host` to the `storage_group`. - - :param storage_group: the storage group object. - :param host: the ip and name information of the initiator. - :param initiator_port_map: the dict specifying which initiators are - bound to which ports. - """ - for (initiator_id, ports_to_bind) in initiator_port_map.items(): - for port in ports_to_bind: - try: - storage_group.connect_hba(port, initiator_id, host.name, - host_ip=host.ip) - except storops_ex.VNXStorageGroupError as ex: - LOG.warning('Failed to set path to port %(port)s for ' - 'initiator %(hba_id)s. Message: %(msg)s', - {'port': port, 'hba_id': initiator_id, - 'msg': ex.message}) - - if any(initiator_port_map.values()): - LOG.debug('New path set for initiator %(hba_id)s, so update ' - 'storage group with poll.', {'hba_id': initiator_id}) - utils.update_res_with_poll(storage_group) - - def ping_node(self, port, ip_address): - iscsi_port = self.get_iscsi_targets(sp=port.sp, - port_id=port.port_id, - vport_id=port.vport_id) - try: - iscsi_port.ping_node(ip_address, count=1) - return True - except storops_ex.VNXPingNodeError: - return False - - def add_lun_to_sg(self, storage_group, lun, max_retries): - """Adds the `lun` to `storage_group`.""" - try: - return storage_group.attach_alu(lun, max_retries) - except storops_ex.VNXAluAlreadyAttachedError as ex: - # Ignore the failure due to retry. - return storage_group.get_hlu(lun) - except storops_ex.VNXNoHluAvailableError as ex: - with excutils.save_and_reraise_exception(): - # Reach the max times of retry, fail the attach action. - LOG.error('Failed to add %(lun)s into %(sg)s after ' - '%(tried)s tries. Reach the max retry times. ' - 'Message: %(msg)s', - {'lun': lun.lun_id, 'sg': storage_group.name, - 'tried': max_retries, 'msg': ex.message}) - - def get_wwn_of_online_fc_ports(self, ports): - """Returns wwns of online fc ports. - - wwn of a certain port will not be included in the return list when it - is not present or down. - """ - wwns = set() - ports_with_all_info = self.vnx.get_fc_port() - for po in ports: - online_list = [p for p in ports_with_all_info if p == po and - p.link_status == 'Up' and p.port_status == 'Online'] - - wwns.update([p.wwn for p in online_list]) - return list(wwns) - - def sg_has_lun_attached(self, sg): - return bool(sg.get_alu_hlu_map()) - - def deregister_initiators(self, initiators): - if not isinstance(initiators, list): - initiators = [initiators] - for initiator_uid in initiators: - self.vnx.remove_hba(initiator_uid) - - def update_consistencygroup(self, cg, lun_ids_to_add, lun_ids_to_remove): - lun_ids_in_cg = (set([l.lun_id for l in cg.lun_list]) if cg.lun_list - else set()) - - # lun_ids_to_add and lun_ids_to_remove never overlap. - lun_ids_updated = ((lun_ids_in_cg | set(lun_ids_to_add)) - - set(lun_ids_to_remove)) - - if lun_ids_updated: - cg.replace_member(*[self.get_lun(lun_id=lun_id) - for lun_id in lun_ids_updated]) - else: - # Need to remove all LUNs from cg. However, replace_member cannot - # handle empty list. So use delete_member. - cg.delete_member(*[self.get_lun(lun_id=lun_id) - for lun_id in lun_ids_in_cg]) - - def get_cg(self, name): - return self.vnx.get_cg(name=name) - - def get_available_ip(self): - return self.vnx.alive_sp_ip - - def get_mirror(self, mirror_name): - return self.vnx.get_mirror_view(mirror_name) - - def create_mirror(self, mirror_name, primary_lun_id): - src_lun = self.vnx.get_lun(lun_id=primary_lun_id) - try: - mv = self.vnx.create_mirror_view(mirror_name, src_lun) - except storops_ex.VNXMirrorNameInUseError: - mv = self.vnx.get_mirror_view(mirror_name) - return mv - - def delete_mirror(self, mirror_name): - mv = self.vnx.get_mirror_view(mirror_name) - try: - mv.delete() - except storops_ex.VNXMirrorNotFoundError: - pass - - def add_image(self, mirror_name, sp_ip, secondary_lun_id): - mv = self.vnx.get_mirror_view(mirror_name) - mv.add_image(sp_ip, secondary_lun_id) - # Secondary image info usually did not appear, so - # here add a poll to update. - utils.update_res_with_poll(mv) - utils.wait_until(Condition.is_mirror_synced, mirror=mv) - - def remove_image(self, mirror_name): - mv = self.vnx.get_mirror_view(mirror_name) - mv.remove_image() - - def fracture_image(self, mirror_name): - mv = self.vnx.get_mirror_view(mirror_name) - mv.fracture_image() - - def sync_image(self, mirror_name): - mv = self.vnx.get_mirror_view(mirror_name) - mv.sync_image() - utils.wait_until(Condition.is_mirror_synced, mirror=mv) - - def promote_image(self, mirror_name): - mv = self.vnx.get_mirror_view(mirror_name) - mv.promote_image() - - def create_mirror_group(self, group_name): - try: - mg = self.vnx.create_mirror_group(group_name) - except storops_ex.VNXMirrorGroupNameInUseError: - mg = self.vnx.get_mirror_group(group_name) - return mg - - def delete_mirror_group(self, group_name): - mg = self.vnx.get_mirror_group(group_name) - try: - mg.delete() - except storops_ex.VNXMirrorGroupNotFoundError: - LOG.info('Mirror group %s was already deleted.', group_name) - - def add_mirror(self, group_name, mirror_name): - mg = self.vnx.get_mirror_group(group_name) - mv = self.vnx.get_mirror_view(mirror_name) - try: - mg.add_mirror(mv) - except storops_ex.VNXMirrorGroupAlreadyMemberError: - LOG.info('Mirror %(mirror)s is already a member of %(group)s', - {'mirror': mirror_name, 'group': group_name}) - return mg - - def remove_mirror(self, group_name, mirror_name): - mg = self.vnx.get_mirror_group(group_name) - mv = self.vnx.get_mirror_view(mirror_name) - try: - mg.remove_mirror(mv) - except storops_ex.VNXMirrorGroupMirrorNotMemberError: - LOG.info('Mirror %(mirror)s is not a member of %(group)s', - {'mirror': mirror_name, 'group': group_name}) - - def promote_mirror_group(self, group_name): - mg = self.vnx.get_mirror_group(group_name) - try: - mg.promote_group() - except storops_ex.VNXMirrorGroupAlreadyPromotedError: - LOG.info('Mirror group %s was already promoted.', group_name) - return mg - - def sync_mirror_group(self, group_name): - mg = self.vnx.get_mirror_group(group_name) - mg.sync_group() - - def fracture_mirror_group(self, group_name): - mg = self.vnx.get_mirror_group(group_name) - mg.fracture_group() - - def get_pool_name(self, lun_name): - lun = self.get_lun(name=lun_name) - utils.update_res_without_poll(lun) - return lun.pool_name - - def get_ioclass(self, qos_specs): - ioclasses = [] - if qos_specs is not None: - prefix = qos_specs['id'] - max_bws = qos_specs[common.QOS_MAX_BWS] - max_iops = qos_specs[common.QOS_MAX_IOPS] - if max_bws: - name = '%(prefix)s-bws-%(max)s' % { - 'prefix': prefix, 'max': max_bws} - class_bws = self.vnx.get_ioclass(name=name) - if not class_bws.existed: - class_bws = self.create_ioclass_bws(name, - max_bws) - ioclasses.append(class_bws) - if max_iops: - name = '%(prefix)s-iops-%(max)s' % { - 'prefix': prefix, 'max': max_iops} - class_iops = self.vnx.get_ioclass(name=name) - if not class_iops.existed: - class_iops = self.create_ioclass_iops(name, - max_iops) - ioclasses.append(class_iops) - return ioclasses - - def create_ioclass_iops(self, name, max_iops): - """Creates a ioclass by IOPS.""" - max_iops = int(max_iops) - ctrl_method = storops.VNXCtrlMethod( - method=storops.VNXCtrlMethod.LIMIT_CTRL, - metric='tt', value=max_iops) - ioclass = self.vnx.create_ioclass(name=name, iotype='rw', - ctrlmethod=ctrl_method) - return ioclass - - def create_ioclass_bws(self, name, max_bws): - """Creates a ioclass by bandwidth in MiB.""" - max_bws = int(max_bws) - ctrl_method = storops.VNXCtrlMethod( - method=storops.VNXCtrlMethod.LIMIT_CTRL, - metric='bw', value=max_bws) - ioclass = self.vnx.create_ioclass(name=name, iotype='rw', - ctrlmethod=ctrl_method) - return ioclass - - def create_policy(self, policy_name): - """Creates the policy and starts it.""" - policy = self.vnx.get_policy(name=policy_name) - if not policy.existed: - LOG.info('Creating the policy: %s', policy_name) - policy = self.vnx.create_policy(name=policy_name) - return policy - - def get_running_policy(self): - """Returns the only running/measuring policy on VNX. - - .. note: VNX only allows one running policy. - """ - policies = self.vnx.get_policy() - policies = list(filter(lambda p: p.state == "Running" or p.state == - "Measuring", policies)) - if len(policies) >= 1: - return policies[0], False - else: - return self.create_policy("vnx_policy"), True - - def add_lun_to_ioclass(self, ioclass_name, lun_id): - ioclass = self.vnx.get_ioclass(name=ioclass_name) - ioclass.add_lun(lun_id) diff --git a/cinder/volume/drivers/dell_emc/vnx/common.py b/cinder/volume/drivers/dell_emc/vnx/common.py deleted file mode 100644 index e9f079e4a..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/common.py +++ /dev/null @@ -1,550 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -VNX Common Utils -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration -from cinder.volume.drivers.dell_emc.vnx import const -from cinder.volume import group_types -from cinder.volume import volume_types - -storops = importutils.try_import('storops') -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -DEFAULT_TIMEOUT = 60 * 60 * 24 * 365 - -INTERVAL_5_SEC = 5 -INTERVAL_20_SEC = 20 -INTERVAL_30_SEC = 30 -INTERVAL_60_SEC = 60 - -SNAP_EXPIRATION_HOUR = '1h' - - -BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) -QOS_MAX_IOPS = 'maxIOPS' -QOS_MAX_BWS = 'maxBWS' - - -VNX_OPTS = [ - cfg.StrOpt('storage_vnx_authentication_type', - default='global', - help='VNX authentication scope type. ' - 'By default, the value is global.'), - cfg.StrOpt('storage_vnx_security_file_dir', - help='Directory path that contains the VNX security file. ' - 'Make sure the security file is generated first.'), - cfg.StrOpt('naviseccli_path', - help='Naviseccli Path.'), - cfg.ListOpt('storage_vnx_pool_names', - help='Comma-separated list of storage pool names to be used.'), - cfg.IntOpt('default_timeout', - default=DEFAULT_TIMEOUT, - help='Default timeout for CLI operations in minutes. ' - 'For example, LUN migration is a typical long ' - 'running operation, which depends on the LUN size and ' - 'the load of the array. ' - 'An upper bound in the specific deployment can be set to ' - 'avoid unnecessary long wait. ' - 'By default, it is 365 days long.'), - cfg.IntOpt('max_luns_per_storage_group', - default=255, - help='Default max number of LUNs in a storage group.' - ' By default, the value is 255.'), - cfg.BoolOpt('destroy_empty_storage_group', - default=False, - help='To destroy storage group ' - 'when the last LUN is removed from it. ' - 'By default, the value is False.'), - # iscsi_initiators is a dict which key is string and value is a list. - # This could be a DictOpt. Unfortunately DictOpt doesn't support the value - # of list type. - cfg.StrOpt('iscsi_initiators', - help='Mapping between hostname and ' - 'its iSCSI initiator IP addresses.'), - cfg.ListOpt('io_port_list', - help='Comma separated iSCSI or FC ports ' - 'to be used in Nova or Cinder.'), - cfg.BoolOpt('initiator_auto_registration', - default=False, - help='Automatically register initiators. ' - 'By default, the value is False.'), - cfg.BoolOpt('initiator_auto_deregistration', - default=False, - help='Automatically deregister initiators after the related ' - 'storage group is destroyed. ' - 'By default, the value is False.'), - cfg.BoolOpt('check_max_pool_luns_threshold', - default=False, - help='Report free_capacity_gb as 0 when the limit to ' - 'maximum number of pool LUNs is reached. ' - 'By default, the value is False.'), - cfg.BoolOpt('force_delete_lun_in_storagegroup', - default=False, - help='Delete a LUN even if it is in Storage Groups. ' - 'By default, the value is False.'), - cfg.BoolOpt('ignore_pool_full_threshold', - default=False, - help='Force LUN creation even if ' - 'the full threshold of pool is reached. ' - 'By default, the value is False.') -] - -CONF.register_opts(VNX_OPTS, group=configuration.SHARED_CONF_GROUP) - - -PROTOCOL_FC = 'fc' -PROTOCOL_ISCSI = 'iscsi' - - -class ExtraSpecs(object): - _provision_key = 'provisioning:type' - _tier_key = 'storagetype:tiering' - _replication_key = 'replication_enabled' - - PROVISION_DEFAULT = const.PROVISION_THICK - TIER_DEFAULT = None - - def __init__(self, extra_specs, group_specs=None): - self.specs = extra_specs - self._provision = self._get_provision() - self.provision = self._provision - self._tier = self._get_tier() - self.tier = self._tier - self.apply_default_values() - self.group_specs = group_specs if group_specs else {} - - def apply_default_values(self): - self.provision = (ExtraSpecs.PROVISION_DEFAULT - if self.provision is None - else self.provision) - # Can not set Tier when provision is set to deduped. So don't set the - # tier default when provision is deduped. - if self.provision != storops.VNXProvisionEnum.DEDUPED: - self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None - else self.tier) - - @classmethod - def set_defaults(cls, provision_default, tier_default): - cls.PROVISION_DEFAULT = provision_default - cls.TIER_DEFAULT = tier_default - - def _get_provision(self): - value = self._parse_to_enum(self._provision_key, - storops.VNXProvisionEnum) - return value - - def _get_tier(self): - return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum) - - @property - def is_replication_enabled(self): - return self.specs.get('replication_enabled', '').lower() == ' true' - - @property - def is_group_replication_enabled(self): - return self.group_specs.get( - 'consistent_group_replication_enabled', '').lower() == ' true' - - def _parse_to_enum(self, key, enum_class): - value = (self.specs[key] - if key in self.specs else None) - if value is not None: - try: - value = enum_class.parse(value) - except ValueError: - reason = (_("The value %(value)s for key %(key)s in extra " - "specs is invalid.") % - {'key': key, 'value': value}) - raise exception.InvalidVolumeType(reason=reason) - return value - - @classmethod - def from_volume(cls, volume): - specs = {} - type_id = volume['volume_type_id'] - if type_id is not None: - specs = volume_types.get_volume_type_extra_specs(type_id) - - return cls(specs) - - @classmethod - def from_group(cls, group): - group_specs = {} - - if group and group.group_type_id: - group_specs = group_types.get_group_type_specs( - group.group_type_id) - - return cls(extra_specs={}, group_specs=group_specs) - - @classmethod - def from_volume_type(cls, type): - return cls(type['extra_specs']) - - @classmethod - def from_lun(cls, lun): - ex = cls({}) - ex.provision = lun.provision - ex.tier = (lun.tier - if lun.provision != storops.VNXProvisionEnum.DEDUPED - else None) - return ex - - def match_with_lun(self, lun): - ex = ExtraSpecs.from_lun(lun) - return (self.provision == ex.provision and - self.tier == ex.tier) - - def validate(self, enabler_status): - """Checks whether the extra specs are valid. - - :param enabler_status: Instance of VNXEnablerStatus - """ - if "storagetype:pool" in self.specs: - LOG.warning("Extra spec key 'storagetype:pool' is obsoleted " - "since driver version 5.1.0. This key will be " - "ignored.") - - if (self._provision == storops.VNXProvisionEnum.DEDUPED and - self._tier is not None): - msg = _("Can not set tiering policy for a deduplicated volume. " - "Set the tiering policy on the pool where the " - "deduplicated volume locates.") - raise exception.InvalidVolumeType(reason=msg) - - if (self._provision == storops.VNXProvisionEnum.COMPRESSED - and not enabler_status.compression_enabled): - msg = _("Compression Enabler is not installed. " - "Can not create compressed volume.") - raise exception.InvalidVolumeType(reason=msg) - - if (self._provision == storops.VNXProvisionEnum.DEDUPED - and not enabler_status.dedup_enabled): - msg = _("Deduplication Enabler is not installed. " - "Can not create deduplicated volume.") - raise exception.InvalidVolumeType(reason=msg) - - if (self._provision in [storops.VNXProvisionEnum.THIN, - storops.VNXProvisionEnum.COMPRESSED, - storops.VNXProvisionEnum.DEDUPED] - and not enabler_status.thin_enabled): - msg = _("ThinProvisioning Enabler is not installed. " - "Can not create thin volume.") - raise exception.InvalidVolumeType(reason=msg) - - if (self._tier is not None - and not enabler_status.fast_enabled): - msg = _("FAST VP Enabler is not installed. " - "Can not set tiering policy for the volume.") - raise exception.InvalidVolumeType(reason=msg) - return True - - def __len__(self): - return len(self.specs) - - def __getitem__(self, key): - return self.specs[key] - - def __iter__(self): - return iter(self.specs) - - def __contains__(self, item): - return item in self.specs - - def __eq__(self, other): - if isinstance(other, ExtraSpecs): - return self.specs == other.specs - elif isinstance(other, dict): - return self.specs == other - else: - return False - - def __hash__(self): - return self.specs.__hash__() - - -class LUNState(object): - INITIALIZING = 'Initializing' - READY = 'Ready' - FAULTED = 'Faulted' - - -class PoolState(object): - INITIALIZING = 'Initializing' - OFFLINE = 'Offline' - DELETING = 'Deleting' - VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING) - - -class VNXEnablerStatus(object): - - def __init__(self, - dedup=False, - compression=False, - fast=False, - thin=False, - snap=False): - self.dedup_enabled = dedup - self.compression_enabled = compression - self.fast_enabled = fast - self.thin_enabled = thin - self.snap_enabled = snap - - -class WaitUtilTimeoutException(exception.VolumeDriverException): - """Raised when timeout occurs in wait_until.""" - # TODO(Ryan) put this exception under Cinder shared module. - pass - - -class Host(object): - """The model of a host which acts as an initiator to access the storage.""" - - def __init__(self, name, initiators, ip=None, wwpns=None): - # ip and wwpns are optional. - self.name = name - if not self.name: - raise ValueError(('Name of host cannot be empty.')) - self.initiators = initiators - if not self.initiators: - raise ValueError(_('Initiators of host cannot be empty.')) - self.ip = ip - self.wwpns = wwpns - - -class Volume(object): - """The internal volume which is used to pass in method call.""" - - def __init__(self, name, id, vnx_lun_id=None): - self.name = name - self.id = id - self.vnx_lun_id = vnx_lun_id - - -class ISCSITargetData(dict): - def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None, - portal='unknown', portals=None, lun='unknown', luns=None): - data = {'volume_id': volume_id, 'target_discovered': is_discovered, - 'target_iqn': iqn, 'target_iqns': iqns, - 'target_portal': portal, 'target_portals': portals, - 'target_lun': lun, 'target_luns': luns} - self['driver_volume_type'] = 'iscsi' - self['data'] = data - - def to_dict(self): - """Converts to the dict. - - It helps serialize and deserialize the data before returning to nova. - """ - return {key: value for (key, value) in self.items()} - - -class FCTargetData(dict): - def __init__(self, volume_id, is_discovered, wwn=None, lun=None, - initiator_target_map=None): - data = {'volume_id': volume_id, 'target_discovered': is_discovered, - 'target_lun': lun, 'target_wwn': wwn, - 'initiator_target_map': initiator_target_map} - self['driver_volume_type'] = 'fibre_channel' - self['data'] = data - - def to_dict(self): - """Converts to the dict. - - It helps serialize and deserialize the data before returning to nova. - """ - return {key: value for (key, value) in self.items()} - - -class ReplicationDevice(object): - def __init__(self, replication_device): - self.replication_device = replication_device - - @property - def backend_id(self): - return self.replication_device.get('backend_id') - - @property - def san_ip(self): - return self.replication_device.get('san_ip') - - @property - def san_login(self): - return self.replication_device.get('san_login') - - @property - def san_password(self): - return self.replication_device.get('san_password') - - @property - def storage_vnx_authentication_type(self): - return self.replication_device.get( - 'storage_vnx_authentication_type', - 'global') - - @property - def storage_vnx_security_file_dir(self): - return self.replication_device.get('storage_vnx_security_file_dir') - - @property - def pool_name(self): - return self.replication_device.get('pool_name', None) - - -class ReplicationDeviceList(list): - """Replication devices configured in cinder.conf - - Cinder supports multiple replication_device, while VNX driver - only support one replication_device for now. - """ - - def __init__(self, configuration): - self.list = [] - self.configuration = configuration - self._device_map = dict() - self.parse_configuration() - - def parse_configuration(self): - if self.configuration.replication_device: - for replication_device in self.configuration.replication_device: - rd = ReplicationDevice(replication_device) - if not rd.backend_id or not rd.san_ip: - msg = _('backend_id or san_ip cannot be empty for ' - 'replication_device.') - raise exception.InvalidInput(reason=msg) - self._device_map[rd.backend_id] = rd - self.list.append(rd) - return self._device_map - - def get_device(self, backend_id): - try: - device = self._device_map[backend_id] - except KeyError: - device = None - LOG.warning('Unable to find secondary device named: %s', - backend_id) - return device - - @property - def devices(self): - return self._device_map.values() - - def __len__(self): - return len(self.list) - - def __iter__(self): - self._iter = self.list.__iter__() - return self - - def next(self): - return next(self._iter) - - def __next__(self): - return self.next() - - def __getitem__(self, item): - return self.list[item] - - @classmethod - def get_backend_ids(cls, config): - """Returns all configured device_id.""" - rep_list = cls(config) - backend_ids = [] - for item in rep_list.devices: - backend_ids.append(item.backend_id) - return backend_ids - - -class VNXMirrorView(object): - def __init__(self, primary_client, secondary_client): - self.primary_client = primary_client - self.secondary_client = secondary_client - - def create_mirror(self, name, primary_lun_id): - self.primary_client.create_mirror(name, primary_lun_id) - - def create_secondary_lun(self, pool_name, lun_name, size, provision, tier): - return self.secondary_client.create_lun( - pool_name, lun_name, size, provision, tier) - - def delete_secondary_lun(self, lun_name): - self.secondary_client.delete_lun(lun_name) - - def delete_mirror(self, mirror_name): - self.primary_client.delete_mirror(mirror_name) - - def add_image(self, mirror_name, secondary_lun_id): - sp_ip = self.secondary_client.get_available_ip() - self.primary_client.add_image(mirror_name, sp_ip, secondary_lun_id) - - def remove_image(self, mirror_name): - self.primary_client.remove_image(mirror_name) - - def fracture_image(self, mirror_name): - self.primary_client.fracture_image(mirror_name) - - def promote_image(self, mirror_name): - """Promote the image on the secondary array.""" - self.secondary_client.promote_image(mirror_name) - - def destroy_mirror(self, mirror_name, secondary_lun_name): - """Destroy the mirror view's related VNX objects. - - NOTE: primary lun will not be deleted here. - :param mirror_name: name of mirror to be destroyed - :param secondary_lun_name: name of LUN name - """ - mv = self.primary_client.get_mirror(mirror_name) - if not mv.existed: - # We will skip the mirror operations if not existed - LOG.warning('Mirror view %s was deleted already.', - mirror_name) - return - self.fracture_image(mirror_name) - self.remove_image(mirror_name) - self.delete_mirror(mirror_name) - self.delete_secondary_lun(lun_name=secondary_lun_name) - - def create_mirror_group(self, group_name): - return self.primary_client.create_mirror_group(group_name) - - def delete_mirror_group(self, group_name): - return self.primary_client.delete_mirror_group(group_name) - - def add_mirror(self, group_name, mirror_name): - return self.primary_client.add_mirror(group_name, mirror_name) - - def remove_mirror(self, group_name, mirror_name): - return self.primary_client.remove_mirror(group_name, mirror_name) - - def sync_mirror_group(self, group_name): - return self.primary_client.sync_mirror_group(group_name) - - def promote_mirror_group(self, group_name): - """Promote the mirror group on the secondary array.""" - return self.secondary_client.promote_mirror_group(group_name) - - def fracture_mirror_group(self, group_name): - return self.primary_client.fracture_mirror_group(group_name) diff --git a/cinder/volume/drivers/dell_emc/vnx/const.py b/cinder/volume/drivers/dell_emc/vnx/const.py deleted file mode 100644 index 39b2c9170..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/const.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -VNX Constants - -This module includes re-declaration from storops which directly used -by driver in module scope. That's to say: -If a constant from storops is used in class level, function signature, -module level, a re-declaration is needed in this file to avoid some static -import error when storops is not installed. -""" - -from oslo_utils import importutils - -storops = importutils.try_import('storops') - -if storops: - from storops import exception as storops_ex - VNXLunPreparingError = storops_ex.VNXLunPreparingError - VNXTargetNotReadyError = storops_ex.VNXTargetNotReadyError - MIGRATION_RATE_HIGH = storops.VNXMigrationRate.HIGH - PROVISION_THICK = storops.VNXProvisionEnum.THICK -else: - VNXLunPreparingError = None - MIGRATION_RATE_HIGH = None - PROVISION_THICK = None - VNXTargetNotReadyError = None diff --git a/cinder/volume/drivers/dell_emc/vnx/driver.py b/cinder/volume/drivers/dell_emc/vnx/driver.py deleted file mode 100644 index 375588264..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/driver.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Cinder Driver for EMC VNX based on CLI.""" - -from oslo_log import log as logging - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.dell_emc.vnx import adapter -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import utils -from cinder.zonemanager import utils as zm_utils - - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class VNXDriver(driver.ManageableVD, - driver.ManageableSnapshotsVD, - driver.MigrateVD, - driver.BaseVD): - """Dell EMC Cinder Driver for VNX using CLI. - - Version history: - 1.0.0 - Initial driver - 2.0.0 - Thick/thin provisioning, robust enhancement - 3.0.0 - Array-based Backend Support, FC Basic Support, - Target Port Selection for MPIO, - Initiator Auto Registration, - Storage Group Auto Deletion, - Multiple Authentication Type Support, - Storage-Assisted Volume Migration, - SP Toggle for HA - 3.0.1 - Security File Support - 4.0.0 - Advance LUN Features (Compression Support, - Deduplication Support, FAST VP Support, - FAST Cache Support), Storage-assisted Retype, - External Volume Management, Read-only Volume, - FC Auto Zoning - 4.1.0 - Consistency group support - 5.0.0 - Performance enhancement, LUN Number Threshold Support, - Initiator Auto Deregistration, - Force Deleting LUN in Storage Groups, - robust enhancement - 5.1.0 - iSCSI multipath enhancement - 5.2.0 - Pool-aware scheduler support - 5.3.0 - Consistency group modification support - 6.0.0 - Over subscription support - Create consistency group from cgsnapshot support - Multiple pools support enhancement - Manage/unmanage volume revise - White list target ports support - Snap copy support - Support efficient non-disruptive backup - 7.0.0 - Clone consistency group support - Replication v2 support(managed) - Configurable migration rate support - 8.0.0 - New VNX Cinder driver - 9.0.0 - Use asynchronous migration for cloning - 10.0.0 - Extend SMP size before aync migration when cloning from an - image cache volume - 10.1.0 - Add QoS support - 10.2.0 - Add replication group support - """ - - VERSION = '10.02.00' - VENDOR = 'Dell EMC' - # ThirdPartySystems wiki page - CI_WIKI_NAME = "EMC_VNX_CI" - - def __init__(self, *args, **kwargs): - super(VNXDriver, self).__init__(*args, **kwargs) - utils.init_ops(self.configuration) - self.protocol = self.configuration.storage_protocol.lower() - self.active_backend_id = kwargs.get('active_backend_id', None) - self.adapter = None - self._stats = {} - - def do_setup(self, context): - if self.protocol == common.PROTOCOL_FC: - self.adapter = adapter.FCAdapter(self.configuration, - self.active_backend_id) - else: - self.adapter = adapter.ISCSIAdapter(self.configuration, - self.active_backend_id) - self.adapter.VERSION = self.VERSION - self.adapter.do_setup() - - def check_for_setup_error(self): - pass - - def create_volume(self, volume): - """Creates a volume.""" - return self.adapter.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - return self.adapter.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume.""" - return self.adapter.create_cloned_volume(volume, src_vref) - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - self.adapter.extend_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.adapter.delete_volume(volume) - - def migrate_volume(self, ctxt, volume, host): - """Migrate volume via EMC migration functionality.""" - return self.adapter.migrate_volume(ctxt, volume, host) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - return self.adapter.retype(ctxt, volume, new_type, diff, host) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.adapter.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.adapter.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - pass - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - pass - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - pass - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - @zm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - The initiator_target_map is a map that represents the remote wwn(s) - and a list of wwns which are visible to the remote wwn(s). - Example return values: - FC: - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - 'initiator_target_map': { - '1122334455667788': ['1234567890123', - '0987654321321'] - } - } - } - iSCSI: - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', - 'iqn.2010-10.org.openstack:volume-00002'], - 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], - 'target_luns': [1, 1], - } - } - """ - LOG.debug("Entering initialize_connection" - " - connector: %(connector)s.", - {'connector': connector}) - conn_info = self.adapter.initialize_connection(volume, - connector) - LOG.debug("Exit initialize_connection" - " - Returning connection info: %(conn_info)s.", - {'conn_info': conn_info}) - return conn_info - - @zm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - LOG.debug("Entering terminate_connection" - " - connector: %(connector)s.", - {'connector': connector}) - conn_info = self.adapter.terminate_connection(volume, connector) - LOG.debug("Exit terminate_connection" - " - Returning connection info: %(conn_info)s.", - {'conn_info': conn_info}) - return conn_info - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: True to get updated data - """ - if refresh: - self.update_volume_stats() - - return self._stats - - def update_volume_stats(self): - """Retrieve stats info from volume group.""" - LOG.debug("Updating volume stats.") - self._stats = self.adapter.update_volume_stats() - self._stats['driver_version'] = self.VERSION - self._stats['vendor_name'] = self.VENDOR - - def manage_existing(self, volume, existing_ref): - """Manage an existing lun in the array. - - The lun should be in a manageable pool backend, otherwise - error would return. - Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - manage_existing_ref:{ - 'source-id': - } - or - manage_existing_ref:{ - 'source-name': - } - """ - return self.adapter.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing.""" - return self.adapter.manage_existing_get_size(volume, existing_ref) - - def get_pool(self, volume): - """Returns the pool name of a volume.""" - return self.adapter.get_pool_name(volume) - - def unmanage(self, volume): - """Unmanages a volume.""" - return self.adapter.unmanage(volume) - - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status=None): - """Returns model update for migrated volume.""" - return self.adapter.update_migrated_volume(context, volume, new_volume, - original_volume_status) - - def create_export_snapshot(self, context, snapshot, connector): - """Creates a snapshot mount point for snapshot.""" - return self.adapter.create_export_snapshot( - context, snapshot, connector) - - def remove_export_snapshot(self, context, snapshot): - """Removes snapshot mount point for snapshot.""" - return self.adapter.remove_export_snapshot(context, snapshot) - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Allows connection to snapshot.""" - return self.adapter.initialize_connection_snapshot(snapshot, - connector, - **kwargs) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Disallows connection to snapshot.""" - return self.adapter.terminate_connection_snapshot(snapshot, - connector, - **kwargs) - - def backup_use_temp_snapshot(self): - return True - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Fail-overs volumes from primary device to secondary.""" - return self.adapter.failover_host(context, volumes, secondary_id, - groups) - - @utils.require_consistent_group_snapshot_enabled - def create_group(self, context, group): - """Creates a group.""" - return self.adapter.create_group(context, group) - - @utils.require_consistent_group_snapshot_enabled - def delete_group(self, context, group, volumes): - """Deletes a group.""" - return self.adapter.delete_group( - context, group, volumes) - - @utils.require_consistent_group_snapshot_enabled - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group.""" - return self.adapter.update_group(context, group, - add_volumes, - remove_volumes) - - @utils.require_consistent_group_snapshot_enabled - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - if group_snapshot: - return self.adapter.create_group_from_group_snapshot( - context, group, volumes, group_snapshot, snapshots) - elif source_group: - return self.adapter.create_cloned_group( - context, group, volumes, source_group, source_vols) - - @utils.require_consistent_group_snapshot_enabled - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot.""" - return self.adapter.create_group_snapshot( - context, group_snapshot, snapshots) - - @utils.require_consistent_group_snapshot_enabled - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot.""" - return self.adapter.delete_group_snapshot( - context, group_snapshot, snapshots) - - def is_consistent_group_snapshot_enabled(self): - return self._stats.get('consistent_group_snapshot_enabled') - - def enable_replication(self, context, group, volumes): - return self.adapter.enable_replication(context, group, volumes) - - def disable_replication(self, context, group, volumes): - return self.adapter.disable_replication(context, group, volumes) - - def failover_replication(self, context, group, volumes, - secondary_backend_id): - return self.adapter.failover_replication( - context, group, volumes, secondary_backend_id) - - def get_replication_error_status(self, context, groups): - return self.adapter.get_replication_error_status(context, groups) diff --git a/cinder/volume/drivers/dell_emc/vnx/replication.py b/cinder/volume/drivers/dell_emc/vnx/replication.py deleted file mode 100644 index ce10a7067..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/replication.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright (c) 2017 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume.drivers.dell_emc.vnx import client -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow -from cinder.volume.drivers.dell_emc.vnx import utils - -storops = importutils.try_import('storops') -if storops: - from storops import exception as storops_ex - - -LOG = logging.getLogger(__name__) - - -class ReplicationAdapter(object): - - def __init__(self, client=None, config=None): - self.client = client - self.config = config - self.mirror_view = None - - def do_setup(self): - pass - - def setup_lun_replication(self, volume, primary_lun_id): - """Setup replication for LUN, this only happens in primary system.""" - specs = common.ExtraSpecs.from_volume(volume) - provision = specs.provision - tier = specs.tier - rep_update = {'replication_driver_data': None, - 'replication_status': fields.ReplicationStatus.DISABLED} - mirror_name = utils.construct_mirror_name(volume) - - if specs.is_replication_enabled: - LOG.debug('Starting setup replication ' - 'for volume: %s.', volume.id) - lun_size = volume.size - pool_name = utils.get_remote_pool(self.config, volume) - emc_taskflow.create_mirror_view( - self.mirror_view, mirror_name, - primary_lun_id, pool_name, - volume.name, lun_size, - provision, tier) - LOG.info('Successfully setup replication for %s.', volume.id) - rep_update.update({'replication_status': - fields.ReplicationStatus.ENABLED}) - group_specs = common.ExtraSpecs.from_group(volume.group) - if volume.group and group_specs.is_group_replication_enabled: - # If in a group, add it to group then. - LOG.debug('Starting add volume %(volume)s to group %(group)s', - {'volume': volume.id, 'group': volume.group.id}) - group_name = utils.construct_group_name(volume.group) - self.client.add_mirror(group_name, mirror_name) - - return rep_update - - def create_group_replication(self, group): - rep_update = {'replication_status': group.replication_status} - - group_specs = common.ExtraSpecs.from_group(group) - if group_specs.is_group_replication_enabled: - group_name = utils.construct_group_name(group) - self.client.create_mirror_group(group_name) - rep_update['replication_status'] = ( - fields.ReplicationStatus.ENABLED) - return rep_update - - def add_volumes_to_group_replication(self, group, volumes): - group_specs = common.ExtraSpecs.from_group(group) - if group_specs.is_group_replication_enabled: - group_name = utils.construct_group_name(group) - for volume in volumes: - mirror_name = utils.construct_mirror_name(volume) - self.client.add_mirror(group_name, mirror_name) - - def delete_group_replication(self, group): - group_specs = common.ExtraSpecs.from_group(group) - if group_specs.is_group_replication_enabled: - group_name = utils.construct_group_name(group) - self.client.delete_mirror_group(group_name) - - def remove_volumes_from_group_replication(self, group, volumes): - group_name = utils.construct_group_name(group) - group_specs = common.ExtraSpecs.from_group(group) - if group_specs.is_group_replication_enabled: - for volume in volumes: - mirror_name = utils.construct_mirror_name(volume) - self.client.remove_mirror(group_name, mirror_name) - - def cleanup_lun_replication(self, volume): - specs = common.ExtraSpecs.from_volume(volume) - - group_specs = common.ExtraSpecs.from_group(volume.group) - if group_specs.is_group_replication_enabled: - # If in a group, remove from group first. - group_name = utils.construct_group_name(volume.group) - mirror_name = utils.construct_mirror_name(volume) - self.client.remove_mirror(group_name, mirror_name) - - if specs.is_replication_enabled: - LOG.debug('Starting cleanup replication for volume: ' - '%s.', volume.id) - mirror_name = utils.construct_mirror_name(volume) - mirror_view = self.build_mirror_view(self.config, True) - mirror_view.destroy_mirror(mirror_name, volume.name) - LOG.info( - 'Successfully destroyed replication for volume: %s', - volume.id) - - def append_replication_stats(self, stats): - if self.mirror_view: - stats['replication_enabled'] = True - stats['group_replication_enabled'] = False - stats['consistent_group_replication_enabled'] = True - stats['replication_count'] = 1 - stats['replication_type'] = ['sync'] - else: - stats['replication_enabled'] = False - stats['replication_targets'] = [ - device.backend_id for device in common.ReplicationDeviceList( - self.config)] - - def build_mirror_view(self, configuration, failover=True): - """Builds a mirror view operation class. - - :param configuration: driver configuration - :param failover: True if from primary to configured array, - False if from configured array to primary. - """ - rep_devices = configuration.replication_device - if not rep_devices: - LOG.info('Replication is not configured on backend: %s.', - configuration.config_group) - return None - elif len(rep_devices) == 1: - if not self.client.is_mirror_view_enabled(): - error_msg = _('Replication is configured, ' - 'but no MirrorView/S enabler installed on VNX.') - raise exception.InvalidInput(reason=error_msg) - rep_list = common.ReplicationDeviceList(configuration) - device = rep_list[0] - secondary_client = client.Client( - ip=device.san_ip, - username=device.san_login, - password=device.san_password, - scope=device.storage_vnx_authentication_type, - naviseccli=self.client.naviseccli, - sec_file=device.storage_vnx_security_file_dir) - if failover: - mirror_view = common.VNXMirrorView( - self.client, secondary_client) - else: - # For fail-back, we need to take care of reversed ownership. - mirror_view = common.VNXMirrorView( - secondary_client, self.client) - return mirror_view - else: - error_msg = _('VNX Cinder driver does not support ' - 'multiple replication targets.') - raise exception.InvalidInput(reason=error_msg) - - def validate_backend_id(self, backend_id): - # Currently, VNX driver only supports 1 remote device. - if self.active_backend_id: - if backend_id != 'default': - raise exception.InvalidReplicationTarget( - reason=_('Invalid backend_id specified.')) - elif backend_id not in ( - common.ReplicationDeviceList.get_backend_ids(self.config)): - raise exception.InvalidReplicationTarget( - reason=_('Invalid backend_id specified.')) - - def failover_host(self, context, volumes, secondary_backend_id, groups): - """Fails over the volume back and forth. - - Driver needs to update following info for failed-over volume: - 1. provider_location: update serial number and lun id - 2. replication_status: new status for replication-enabled volume - """ - volume_update_list = [] - group_update_list = [] - self.validate_backend_id(secondary_backend_id) - - if secondary_backend_id != 'default': - rep_status = fields.ReplicationStatus.FAILED_OVER - mirror_view = self.build_mirror_view(self.config, True) - else: - rep_status = fields.ReplicationStatus.ENABLED - mirror_view = self.build_mirror_view(self.config, False) - - def failover_volume(volume, new_status): - mirror_name = utils.construct_mirror_name(volume) - - provider_location = volume.provider_location - try: - mirror_view.promote_image(mirror_name) - except storops_ex.VNXMirrorException as ex: - LOG.error( - 'Failed to failover volume %(volume_id)s ' - 'to %(target)s: %(error)s.', - {'volume_id': volume.id, - 'target': secondary_backend_id, - 'error': ex}) - new_status = fields.ReplicationStatus.FAILOVER_ERROR - else: - # Transfer ownership to secondary_backend_id and - # update provider_location field - secondary_client = mirror_view.secondary_client - provider_location = utils.update_remote_provider_location( - volume, secondary_client) - - model_update = {'volume_id': volume.id, - 'updates': - {'replication_status': new_status, - 'provider_location': provider_location}} - volume_update_list.append(model_update) - - # Fail over groups if needed. - def failover_group(group): - is_failover_needed = False - if (secondary_backend_id != 'default' and - group.replication_status == - fields.ReplicationStatus.ENABLED): - # Group is on the primary VNX, failover is needed. - LOG.info('%(group_id)s will be failed over to secondary' - '%(secondary_backend_id)s.', - {'group_id': group.id, - 'secondary_backend_id': secondary_backend_id}) - is_failover_needed = True - if (secondary_backend_id == 'default' and - group.replication_status == - fields.ReplicationStatus.FAILED_OVER): - # Group is on the secondary VNX, failover is needed. - LOG.info('%(group_id)s will be failed over to primary' - '%(secondary_backend_id)s.', - {'group_id': group.id, - 'secondary_backend_id': secondary_backend_id}) - is_failover_needed = True - if is_failover_needed: - group_update, volume_update_list = self.failover_replication( - context, group, group.volumes, secondary_backend_id) - return ({'group_id': group.id, 'updates': group_update}, - [{'volume_id': vol_update['id'], 'updates': vol_update} - for vol_update in volume_update_list]) - - return [], [] - - for group in groups: - specs = common.ExtraSpecs.from_group(group) - if specs.is_group_replication_enabled: - group_update, vols_in_group_update = failover_group(group) - if group_update: - group_update_list.append(group_update) - volume_update_list.extend(vols_in_group_update) - - # Filter out the volumes in passed-in groups. - group_ids = [group.id for group in groups] - for volume in [volume for volume in volumes - if volume.group_id not in group_ids]: - specs = common.ExtraSpecs.from_volume(volume) - if specs.is_replication_enabled: - failover_volume(volume, rep_status) - - # After failover, the secondary is now the primary, - # any subsequent request will be redirected to it. - self.client = mirror_view.secondary_client - # Remember the current backend id. - self.active_backend_id = (None if secondary_backend_id == 'default' - else secondary_backend_id) - return secondary_backend_id, volume_update_list, group_update_list - - def enable_replication(self, context, group, volumes): - """Enable the group replication. - - Note: this will not interfere with the replication on individual LUNs. - """ - self.create_group_replication(group) - self.add_volumes_to_group_replication(group, volumes) - return {}, [] - - def disable_replication(self, context, group, volumes): - """Disable the group replication. - - Note: This will not disable the replication on individual LUNs. - """ - self.remove_volumes_from_group_replication(group, volumes) - self.delete_group_replication(group) - return {}, [] - - def failover_replication(self, context, group, volumes, - secondary_backend_id): - """"Fail-over the consistent mirror group. - - Note: - VNX supports fail over all the mirrors in a group as a whole, - no need to handle each mirror one by one. - """ - volume_update_list = [] - group_update = {'replication_status': group.replication_status} - - if secondary_backend_id != 'default': - mirror_view = self.build_mirror_view(self.config, True) - rep_status = fields.ReplicationStatus.FAILED_OVER - else: - mirror_view = self.build_mirror_view(self.config, False) - rep_status = fields.ReplicationStatus.ENABLED - - # Update volume provider_location - secondary_client = mirror_view.secondary_client - - group_name = utils.construct_group_name(group) - try: - mirror_view.promote_mirror_group(group_name) - except storops_ex.VNXMirrorException as ex: - LOG.error( - 'Failed to failover group %(group_id)s ' - 'to %(target)s: %(error)s.', - {'group_id': group.id, - 'target': secondary_backend_id, - 'error': ex}) - rep_status = fields.ReplicationStatus.FAILOVER_ERROR - - for volume in volumes: - volume_update = { - 'id': volume.id, - 'provider_location': utils.update_remote_provider_location( - volume, secondary_client), - 'replication_status': rep_status} - volume_update_list.append(volume_update) - - group_update['replication_status'] = rep_status - - return group_update, volume_update_list - - def get_replication_error_status(self, context, groups): - """The failover only happens manually, no need to update the status.""" - return [], [] diff --git a/cinder/volume/drivers/dell_emc/vnx/taskflows.py b/cinder/volume/drivers/dell_emc/vnx/taskflows.py deleted file mode 100644 index 2fe7a656c..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/taskflows.py +++ /dev/null @@ -1,623 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import importutils - - -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow import task -from taskflow.types import failure - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.dell_emc.vnx import const -from cinder.volume.drivers.dell_emc.vnx import utils - -storops = importutils.try_import('storops') - -LOG = logging.getLogger(__name__) - - -class MigrateLunTask(task.Task): - """Starts a migration between two LUNs/SMPs. - - Reversion strategy: Cleanup the migration session - """ - def __init__(self, name=None, provides=None, inject=None, - rebind=None): - super(MigrateLunTask, self).__init__(name=name, - provides=provides, - inject=inject, - rebind=rebind) - - def execute(self, client, src_id, dst_id, async_migrate, *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - dst_lun = client.get_lun(lun_id=dst_id) - dst_wwn = dst_lun.wwn - client.migrate_lun(src_id, dst_id) - if not async_migrate: - migrated = client.verify_migration(src_id, dst_id, dst_wwn) - if not migrated: - msg = _("Failed to migrate volume between source vol %(src)s" - " and dest vol %(dst)s.") % { - 'src': src_id, 'dst': dst_id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def revert(self, result, client, src_id, dst_id, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method)s: cleanup migration session: ' - '%(src_id)s -> %(dst_id)s.', - {'method': method_name, - 'src_id': src_id, - 'dst_id': dst_id}) - client.cleanup_migration(src_id, dst_id) - - -class CreateLunTask(task.Task): - """Creates a new lun task. - - Reversion strategy: Delete the lun. - """ - def __init__(self, name=None, provides=('new_lun_id', 'new_lun_wwn'), - inject=None): - super(CreateLunTask, self).__init__(name=name, - provides=provides, - inject=inject) - if provides and not isinstance(provides, tuple): - raise ValueError('Only tuple is allowed for [provides].') - - def execute(self, client, pool_name, lun_name, lun_size, - provision, tier, ignore_thresholds=False, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - lun = client.create_lun(pool=pool_name, - name=lun_name, - size=lun_size, - provision=provision, - tier=tier, - ignore_thresholds=ignore_thresholds) - return lun.lun_id, lun.wwn - - def revert(self, result, client, lun_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - if isinstance(result, failure.Failure): - return - else: - LOG.warning('%(method_name)s: delete lun %(lun_name)s', - {'method_name': method_name, 'lun_name': lun_name}) - client.delete_lun(lun_name) - - -class CopySnapshotTask(task.Task): - """Task to copy a volume snapshot/consistency group snapshot. - - Reversion Strategy: Delete the copied snapshot/cgsnapshot - """ - def execute(self, client, snap_name, new_snap_name, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - client.copy_snapshot(snap_name, - new_snap_name) - - def revert(self, result, client, snap_name, new_snap_name, - *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: delete the ' - 'copied snapshot %(new_name)s of ' - '%(source_name)s.', - {'method_name': method_name, - 'new_name': new_snap_name, - 'source_name': snap_name}) - client.delete_snapshot(new_snap_name) - - -class CreateSMPTask(task.Task): - """Creates a snap mount point (SMP) for the source snapshot. - - Reversion strategy: Delete the SMP. - """ - def __init__(self, name=None, provides='smp_id', inject=None): - super(CreateSMPTask, self).__init__(name=name, - provides=provides, - inject=inject) - - def execute(self, client, smp_name, base_lun_name, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - - client.create_mount_point(base_lun_name, smp_name) - lun = client.get_lun(name=smp_name) - return lun.lun_id - - def revert(self, result, client, smp_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: delete mount point %(name)s', - {'method_name': method_name, - 'name': smp_name}) - client.delete_lun(smp_name) - - -class AttachSnapTask(task.Task): - """Attaches the snapshot to the SMP created before. - - Reversion strategy: Detach the SMP. - """ - def execute(self, client, smp_name, snap_name, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - client.attach_snapshot(smp_name, snap_name) - - def revert(self, result, client, smp_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: detach mount point %(smp_name)s', - {'method_name': method_name, - 'smp_name': smp_name}) - client.detach_snapshot(smp_name) - - -class CreateSnapshotTask(task.Task): - """Creates a snapshot of a volume. - - Reversion Strategy: Delete the created snapshot. - """ - def execute(self, client, snap_name, lun_id, keep_for=None, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - LOG.info('Create snapshot: %(snapshot)s: lun: %(lun)s', - {'snapshot': snap_name, - 'lun': lun_id}) - client.create_snapshot(lun_id, snap_name, keep_for=keep_for) - - def revert(self, result, client, snap_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: ' - 'delete temp snapshot %(snap_name)s', - {'method_name': method_name, - 'snap_name': snap_name}) - client.delete_snapshot(snap_name) - - -class ModifySnapshotTask(task.Task): - """Task to modify a Snapshot to allow ReadWrite on it.""" - def execute(self, client, snap_name, keep_for=None, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - client.modify_snapshot(snap_name, allow_rw=True, keep_for=keep_for) - - def revert(self, result, client, snap_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: ' - 'setting snapshot %(snap_name)s to read-only.', - {'method_name': method_name, - 'snap_name': snap_name}) - client.modify_snapshot(snap_name, allow_rw=False) - - -class WaitMigrationsTask(task.Task): - """Task to wait migrations to be completed.""" - def __init__(self, src_id_template, dst_id_template, - dst_wwn_template, num_of_members, *args, **kwargs): - self.migrate_tuples = [ - (src_id_template % x, dst_id_template % x, dst_wwn_template % x) - for x in range(num_of_members)] - src_id_keys = sorted(set( - [src_id_template % i for i in range(num_of_members)])) - dst_id_keys = sorted(set( - [dst_id_template % i for i in range(num_of_members)])) - dst_wwn_keys = sorted(set( - [dst_wwn_template % i for i in range(num_of_members)])) - - super(WaitMigrationsTask, self).__init__( - requires=(src_id_keys + dst_id_keys + dst_wwn_keys), - *args, **kwargs) - - def execute(self, client, *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - for src_id_key, dst_id_key, dst_wwn_key in self.migrate_tuples: - src_id = kwargs[src_id_key] - dst_id = kwargs[dst_id_key] - dst_wwn = kwargs[dst_wwn_key] - migrated = client.verify_migration(src_id, - dst_id, - dst_wwn) - if not migrated: - msg = _("Failed to migrate volume %(src)s.") % {'src': src_id} - raise exception.VolumeBackendAPIException(data=msg) - - -class CreateConsistencyGroupTask(task.Task): - """Task to create a consistency group.""" - def __init__(self, lun_id_key_template, num_of_members, - *args, **kwargs): - self.lun_id_keys = sorted(set( - [lun_id_key_template % i for i in range(num_of_members)])) - super(CreateConsistencyGroupTask, self).__init__( - requires=self.lun_id_keys, *args, **kwargs) - - def execute(self, client, new_cg_name, *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - lun_ids = [kwargs[key] for key in self.lun_id_keys] - client.create_consistency_group(new_cg_name, - lun_ids) - - -class CreateCGSnapshotTask(task.Task): - """Task to create a CG snapshot.""" - def __init__(self, provides='new_cg_snap_name', *args, **kwargs): - super(CreateCGSnapshotTask, self).__init__( - provides=provides, *args, **kwargs) - - def execute(self, client, cg_snap_name, cg_name, *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - return client.create_cg_snapshot(cg_snap_name, cg_name) - - def revert(self, client, cg_snap_name, cg_name, *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method_name)s: ' - 'deleting CG snapshot %(snap_name)s.', - {'method_name': method_name, - 'snap_name': cg_snap_name}) - client.delete_cg_snapshot(cg_snap_name) - - -class CreateMirrorTask(task.Task): - """Creates a MirrorView with primary lun for replication. - - Reversion strategy: Destroy the created MirrorView. - """ - def execute(self, mirror, mirror_name, primary_lun_id, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - mirror.create_mirror(mirror_name, primary_lun_id) - - def revert(self, result, mirror, mirror_name, - *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method)s: removing mirror ' - 'view %(name)s.', - {'method': method_name, - 'name': mirror_name}) - mirror.delete_mirror(mirror_name) - - -class AddMirrorImageTask(task.Task): - """Add the secondary image to MirrorView. - - Reversion strategy: Remove the secondary image. - """ - def execute(self, mirror, mirror_name, secondary_lun_id, - *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - mirror.add_image(mirror_name, secondary_lun_id) - - def revert(self, result, mirror, mirror_name, - *args, **kwargs): - method_name = '%s.revert' % self.__class__.__name__ - LOG.warning('%(method)s: removing secondary image ' - 'from %(name)s.', - {'method': method_name, - 'name': mirror_name}) - mirror.remove_image(mirror_name) - - -class ExtendSMPTask(task.Task): - """Extend the SMP if needed. - - If the SMP is thin and the new size is larger than the old one, then - extend it. - """ - def execute(self, client, smp_name, lun_size, *args, **kwargs): - LOG.debug('%s.execute', self.__class__.__name__) - smp = client.get_lun(name=smp_name) - if lun_size > smp.total_capacity_gb: - if smp.primary_lun.is_thin_lun: - client.expand_lun(smp_name, lun_size) - else: - LOG.warning('Not extending the SMP: %s, because its base lun ' - 'is not thin.', smp_name) - else: - LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because' - 'the new size: %(new_size)s is smaller.', - {'smp': smp_name, 'size': smp.total_capacity_gb, - 'new_size': lun_size}) - - -def run_migration_taskflow(client, - lun_id, - lun_name, - lun_size, - pool_name, - provision, - tier, - rate=const.MIGRATION_RATE_HIGH): - # Step 1: create target LUN - # Step 2: start and migrate migration session - tmp_lun_name = utils.construct_tmp_lun_name(lun_name) - flow_name = 'migrate_lun' - store_spec = {'client': client, - 'pool_name': pool_name, - 'lun_name': tmp_lun_name, - 'lun_size': lun_size, - 'provision': provision, - 'tier': tier, - 'ignore_thresholds': True, - 'src_id': lun_id, - 'async_migrate': False, - } - work_flow = linear_flow.Flow(flow_name) - work_flow.add(CreateLunTask(), - MigrateLunTask(rebind={'dst_id': 'new_lun_id'})) - engine = taskflow.engines.load( - work_flow, store=store_spec) - engine.run() - - -def fast_create_volume_from_snapshot(client, - snap_name, - new_snap_name, - lun_name, - base_lun_name, - pool_name): - # Step 1: copy snapshot - # Step 2: allow read/write for snapshot - # Step 3: create smp LUN - # Step 4: attach the snapshot - flow_name = 'create_snapcopy_volume_from_snapshot' - - store_spec = {'client': client, - 'snap_name': snap_name, - 'new_snap_name': new_snap_name, - 'pool_name': pool_name, - 'smp_name': lun_name, - 'base_lun_name': base_lun_name, - 'ignore_thresholds': True, - } - work_flow = linear_flow.Flow(flow_name) - work_flow.add(CopySnapshotTask(), - ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'}), - CreateSMPTask(), - AttachSnapTask(rebind={'snap_name': 'new_snap_name'})) - engine = taskflow.engines.load( - work_flow, store=store_spec) - engine.run() - lun_id = engine.storage.fetch('smp_id') - return lun_id - - -def create_volume_from_snapshot(client, src_snap_name, lun_name, - lun_size, base_lun_name, pool_name, - provision, tier, new_snap_name=None): - # Step 1: Copy and modify snap(only for async migrate) - # Step 2: Create smp from base lun - # Step 3: Attach snapshot to smp - # Step 4: Create new LUN - # Step 5: migrate the smp to new LUN - tmp_lun_name = '%s_dest' % lun_name - flow_name = 'create_volume_from_snapshot' - store_spec = {'client': client, - 'snap_name': src_snap_name, - 'new_snap_name': new_snap_name, - 'smp_name': lun_name, - 'lun_name': tmp_lun_name, - 'lun_size': lun_size, - 'base_lun_name': base_lun_name, - 'pool_name': pool_name, - 'provision': provision, - 'tier': tier, - 'keep_for': (common.SNAP_EXPIRATION_HOUR - if new_snap_name else None), - 'async_migrate': True if new_snap_name else False, - } - work_flow = linear_flow.Flow(flow_name) - if new_snap_name: - work_flow.add(CopySnapshotTask(), - ModifySnapshotTask( - rebind={'snap_name': 'new_snap_name'})) - - work_flow.add(CreateSMPTask(), - AttachSnapTask(rebind={'snap_name': 'new_snap_name'}) - if new_snap_name else AttachSnapTask(), - ExtendSMPTask(), - CreateLunTask(), - MigrateLunTask( - rebind={'src_id': 'smp_id', - 'dst_id': 'new_lun_id'})) - engine = taskflow.engines.load( - work_flow, store=store_spec) - engine.run() - lun_id = engine.storage.fetch('smp_id') - return lun_id - - -def fast_create_cloned_volume(client, snap_name, lun_id, - lun_name, base_lun_name): - flow_name = 'create_cloned_snapcopy_volume' - store_spec = { - 'client': client, - 'snap_name': snap_name, - 'lun_id': lun_id, - 'smp_name': lun_name, - 'base_lun_name': base_lun_name} - work_flow = linear_flow.Flow(flow_name) - work_flow.add(CreateSnapshotTask(), - CreateSMPTask(), - AttachSnapTask()) - engine = taskflow.engines.load(work_flow, store=store_spec) - engine.run() - lun_id = engine.storage.fetch('smp_id') - return lun_id - - -def create_cloned_volume(client, snap_name, lun_id, lun_name, - lun_size, base_lun_name, pool_name, - provision, tier, async_migrate=False): - tmp_lun_name = '%s_dest' % lun_name - flow_name = 'create_cloned_volume' - store_spec = {'client': client, - 'snap_name': snap_name, - 'lun_id': lun_id, - 'smp_name': lun_name, - 'lun_name': tmp_lun_name, - 'lun_size': lun_size, - 'base_lun_name': base_lun_name, - 'pool_name': pool_name, - 'provision': provision, - 'tier': tier, - 'keep_for': (common.SNAP_EXPIRATION_HOUR if - async_migrate else None), - 'async_migrate': async_migrate, - } - work_flow = linear_flow.Flow(flow_name) - work_flow.add( - CreateSnapshotTask(), - CreateSMPTask(), - AttachSnapTask(), - ExtendSMPTask(), - CreateLunTask(), - MigrateLunTask( - rebind={'src_id': 'smp_id', 'dst_id': 'new_lun_id'})) - engine = taskflow.engines.load( - work_flow, store=store_spec) - engine.run() - if not async_migrate: - client.delete_snapshot(snap_name) - lun_id = engine.storage.fetch('smp_id') - return lun_id - - -def create_cg_from_cg_snapshot(client, cg_name, src_cg_name, - cg_snap_name, src_cg_snap_name, - pool_name, lun_sizes, lun_names, - src_lun_names, specs_list, copy_snap=True): - prepare_tasks = [] - store_spec = {} - - if copy_snap: - flow_name = 'create_cg_from_cg_snapshot' - temp_cg_snap = utils.construct_tmp_cg_snap_name(cg_name) - snap_name = temp_cg_snap - store_spec.update({'snap_name': src_cg_snap_name, - 'new_snap_name': snap_name}) - prepare_tasks.append( - CopySnapshotTask()) - prepare_tasks.append( - ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'})) - else: - flow_name = 'create_cg_from_cg' - snap_name = cg_snap_name - store_spec.update({'cg_name': src_cg_name, - 'cg_snap_name': snap_name}) - prepare_tasks.append(CreateCGSnapshotTask()) - - work_flow = linear_flow.Flow(flow_name) - work_flow.add(*prepare_tasks) - new_src_id_template = 'new_src_id_%s' - new_dst_id_template = 'new_dst_id_%s' - new_dst_wwn_template = 'new_dst_wwn_%s' - - common_store_spec = { - 'client': client, - 'pool_name': pool_name, - 'ignore_thresholds': True, - 'new_cg_name': cg_name - } - store_spec.update(common_store_spec) - - # Create LUNs for CG - for i, lun_name in enumerate(lun_names): - sub_store_spec = { - 'lun_name': utils.construct_tmp_lun_name(lun_name), - 'lun_size': lun_sizes[i], - 'provision': specs_list[i].provision, - 'tier': specs_list[i].tier, - 'base_lun_name': src_lun_names[i], - 'smp_name': lun_name, - 'snap_name': snap_name, - 'async_migrate': True, - } - work_flow.add(CreateSMPTask(name="CreateSMPTask_%s" % i, - inject=sub_store_spec, - provides=new_src_id_template % i), - AttachSnapTask(name="AttachSnapTask_%s" % i, - inject=sub_store_spec), - CreateLunTask(name="CreateLunTask_%s" % i, - inject=sub_store_spec, - provides=(new_dst_id_template % i, - new_dst_wwn_template % i)), - MigrateLunTask( - name="MigrateLunTask_%s" % i, - inject=sub_store_spec, - rebind={'src_id': new_src_id_template % i, - 'dst_id': new_dst_id_template % i})) - - # Wait all migration session finished - work_flow.add(WaitMigrationsTask(new_src_id_template, - new_dst_id_template, - new_dst_wwn_template, - len(lun_names)), - CreateConsistencyGroupTask(new_src_id_template, - len(lun_names))) - engine = taskflow.engines.load(work_flow, store=store_spec) - engine.run() - # Fetch all created LUNs and add them into CG - lun_id_list = [] - for i, lun_name in enumerate(lun_names): - lun_id = engine.storage.fetch(new_src_id_template % i) - lun_id_list.append(lun_id) - - client.delete_cg_snapshot(snap_name) - return lun_id_list - - -def create_cloned_cg(client, cg_name, src_cg_name, - pool_name, lun_sizes, lun_names, - src_lun_names, specs_list): - cg_snap_name = utils.construct_tmp_cg_snap_name(cg_name) - return create_cg_from_cg_snapshot( - client, cg_name, src_cg_name, - cg_snap_name, None, - pool_name, lun_sizes, lun_names, - src_lun_names, specs_list, copy_snap=False) - - -def create_mirror_view(mirror_view, mirror_name, - primary_lun_id, pool_name, - lun_name, lun_size, provision, tier): - flow_name = 'create_mirror_view' - store_specs = { - 'mirror': mirror_view, - 'mirror_name': mirror_name, - 'primary_lun_id': primary_lun_id, - 'pool_name': pool_name, - 'lun_name': lun_name, - 'lun_size': lun_size, - 'provision': provision, - 'tier': tier, - 'ignore_thresholds': True - } - # NOTE: should create LUN on secondary device/array - work_flow = linear_flow.Flow(flow_name) - work_flow.add(CreateMirrorTask(), - CreateLunTask( - name='CreateSecondaryLunTask', - provides=('secondary_lun_id', 'secondary_lun_wwn'), - inject={'client': mirror_view.secondary_client}), - AddMirrorImageTask()) - engine = taskflow.engines.load(work_flow, store=store_specs) - engine.run() diff --git a/cinder/volume/drivers/dell_emc/vnx/utils.py b/cinder/volume/drivers/dell_emc/vnx/utils.py deleted file mode 100644 index b24eb066e..000000000 --- a/cinder/volume/drivers/dell_emc/vnx/utils.py +++ /dev/null @@ -1,476 +0,0 @@ -# Copyright (c) 2016 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import time - -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import uuidutils - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume.drivers.dell_emc.vnx import common -from cinder.volume.drivers.san.san import san_opts -from cinder.volume import utils as vol_utils - -storops = importutils.try_import('storops') - - -storops = importutils.try_import('storops') -LOG = logging.getLogger(__name__) - - -def init_ops(configuration): - configuration.append_config_values(common.VNX_OPTS) - configuration.append_config_values(san_opts) - - -def get_metadata(volume): - # Since versionedobjects is partially merged, metadata - # may come from 'volume_metadata' or 'metadata', here - # we need to take care both of them. - volume_metadata = {} - if 'volume_metadata' in volume: - for metadata in volume['volume_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - return volume_metadata - return volume['metadata'] if 'metadata' in volume else {} - - -def dump_provider_location(location_dict): - return '|'.join([k + '^' + v for k, v in location_dict.items()]) - - -def build_provider_location(system, lun_type, lun_id, base_lun_name, version): - """Builds provider_location for volume or snapshot. - - :param system: VNX serial number - :param lun_id: LUN ID in VNX - :param lun_type: 'lun' or 'smp' - :param base_lun_name: primary LUN name, - it will be used when creating snap lun - :param version: driver version - """ - location_dict = {'system': system, - 'type': lun_type, - 'id': six.text_type(lun_id), - 'base_lun_name': six.text_type(base_lun_name), - 'version': version} - return dump_provider_location(location_dict) - - -def extract_provider_location(provider_location, key): - """Extracts value of the specified field from provider_location string. - - :param provider_location: provider_location string - :param key: field name of the value that to be extracted - :return: value of the specified field if it exists, otherwise, - None is returned - """ - if not provider_location: - return None - - kvps = provider_location.split('|') - for kvp in kvps: - fields = kvp.split('^') - if len(fields) == 2 and fields[0] == key: - return fields[1] - - -def update_provider_location(provider_location, items): - """Updates provider_location with new dict items. - - :param provider_location: volume's provider_location. - :param items: dict items for updating. - """ - location_dict = {tp.split('^')[0]: tp.split('^')[1] - for tp in provider_location.split('|')} - for key, value in items.items(): - location_dict[key] = value - return dump_provider_location(location_dict) - - -def update_remote_provider_location(volume, client): - """Update volume provider_location after volume failed-over.""" - provider_location = volume.provider_location - updated = {} - updated['system'] = client.get_serial() - updated['id'] = six.text_type( - client.get_lun(name=volume.name).lun_id) - provider_location = update_provider_location( - provider_location, updated) - return provider_location - - -def get_pool_from_host(host): - return vol_utils.extract_host(host, 'pool') - - -def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC, - reraise_arbiter=lambda ex: True, *args, **kwargs): - start_time = time.time() - if not timeout: - timeout = common.DEFAULT_TIMEOUT - - def _inner(): - try: - test_value = condition(*args, **kwargs) - except Exception as ex: - test_value = False - with excutils.save_and_reraise_exception( - reraise=reraise_arbiter(ex)): - LOG.debug('Exception raised when executing %(condition_name)s' - 'in wait_until. Message: %(msg)s', - {'condition_name': condition.__name__, - 'msg': ex.message}) - if test_value: - raise loopingcall.LoopingCallDone() - - if int(time.time()) - start_time > timeout: - msg = (_('Timeout waiting for %(condition_name)s in wait_until.') - % {'condition_name': condition.__name__}) - LOG.error(msg) - raise common.WaitUtilTimeoutException(msg) - - timer = loopingcall.FixedIntervalLoopingCall(_inner) - timer.start(interval=interval).wait() - - -def validate_storage_migration(volume, target_host, src_serial, src_protocol): - if 'location_info' not in target_host['capabilities']: - LOG.warning("Failed to get pool name and " - "serial number. 'location_info' " - "from %s.", target_host['host']) - return False - info = target_host['capabilities']['location_info'] - LOG.debug("Host for migration is %s.", info) - try: - serial_number = info.split('|')[1] - except AttributeError: - LOG.warning('Error on getting serial number ' - 'from %s.', target_host['host']) - return False - if serial_number != src_serial: - LOG.debug('Skip storage-assisted migration because ' - 'target and source backend are not managing' - 'the same array.') - return False - if (target_host['capabilities']['storage_protocol'] != src_protocol - and get_original_status(volume) == 'in-use'): - LOG.debug('Skip storage-assisted migration because ' - 'in-use volume can not be ' - 'migrate between different protocols.') - return False - return True - - -def retype_need_migration(volume, old_provision, new_provision, host): - if volume['host'] != host['host']: - return True - - lun_type = extract_provider_location(volume['provider_location'], 'type') - if lun_type == 'smp': - return True - - if old_provision != new_provision: - if retype_need_turn_on_compression(old_provision, new_provision): - return False - else: - return True - - return False - - -def retype_need_turn_on_compression(old_provision, new_provision): - return (old_provision in [storops.VNXProvisionEnum.THIN, - storops.VNXProvisionEnum.THICK] - and new_provision == storops.VNXProvisionEnum.COMPRESSED) - - -def retype_need_change_tier(old_tier, new_tier): - return new_tier is not None and old_tier != new_tier - - -def get_original_status(volume): - if not volume['volume_attachment']: - return 'available' - else: - return 'in-use' - - -def construct_snap_name(volume): - """Return snapshot name.""" - if is_snapcopy_enabled(volume): - return 'snap-as-vol-' + six.text_type(volume.name_id) - else: - return 'tmp-snap-' + six.text_type(volume.name_id) - - -def construct_mirror_name(volume): - """Constructs MirrorView name for volume.""" - return 'mirror_' + six.text_type(volume.id) - - -def construct_group_name(group): - """Constructs MirrorGroup name for volumes. - - VNX only allows for 32-character group name, so - trim the dash(-) from group id. - """ - return group.id.replace('-', '') - - -def construct_tmp_cg_snap_name(cg_name): - """Return CG snapshot name.""" - return 'tmp-snap-' + six.text_type(cg_name) - - -def construct_tmp_lun_name(lun_name): - """Constructs a time-based temporary LUN name.""" - return '%(src)s-%(ts)s' % {'src': lun_name, - 'ts': int(time.time())} - - -def construct_smp_name(snap_id): - return 'tmp-smp-' + six.text_type(snap_id) - - -def is_snapcopy_enabled(volume): - meta = get_metadata(volume) - return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true' - - -def is_async_migrate_enabled(volume): - extra_specs = common.ExtraSpecs.from_volume(volume) - if extra_specs.is_replication_enabled: - # For replication-enabled volume, we should not use the async-cloned - # volume, or setup replication would fail with - # VNXMirrorLunNotAvailableError - return False - meta = get_metadata(volume) - if 'async_migrate' not in meta: - # Asynchronous migration is the default behavior now - return True - return 'async_migrate' in meta and meta['async_migrate'].lower() == 'true' - - -def get_migration_rate(volume): - metadata = get_metadata(volume) - rate = metadata.get('migrate_rate', None) - if rate: - if rate.lower() in storops.VNXMigrationRate.values(): - return storops.VNXMigrationRate.parse(rate.lower()) - else: - LOG.warning('Unknown migration rate specified, ' - 'using [high] as migration rate.') - - return storops.VNXMigrationRate.HIGH - - -def check_type_matched(volume): - """Check volume type and group type - - This will make sure they do not conflict with each other. - - :param volume: volume to be checked - :returns: None - :raises: InvalidInput - - """ - - # If volume is not a member of group, skip this check anyway. - if not volume.group: - return - extra_specs = common.ExtraSpecs.from_volume(volume) - group_specs = common.ExtraSpecs.from_group(volume.group) - - if not (group_specs.is_group_replication_enabled == - extra_specs.is_replication_enabled): - msg = _('Replication should be enabled or disabled for both ' - 'volume or group. volume replication status: %(vol_status)s, ' - 'group replication status: %(group_status)s') % { - 'vol_status': extra_specs.is_replication_enabled, - 'group_status': group_specs.is_group_replication_enabled} - raise exception.InvalidInput(reason=msg) - - -def check_rep_status_matched(group): - """Check replication status for group. - - Group status must be enabled before proceeding. - """ - group_specs = common.ExtraSpecs.from_group(group) - if group_specs.is_group_replication_enabled: - if group.replication_status != fields.ReplicationStatus.ENABLED: - msg = _('Replication status should be %s for replication-enabled ' - 'group.') % fields.ReplicationStatus.ENABLED - raise exception.InvalidInput(reason=msg) - else: - LOG.info('Replication is not enabled on group %s, skip status check.', - group.id) - - -def update_res_without_poll(res): - with res.with_no_poll(): - res.update() - - -def update_res_with_poll(res): - with res.with_poll(): - res.update() - - -def get_base_lun_name(volume): - """Returns base LUN name for LUN/snapcopy LUN.""" - base_name = extract_provider_location( - volume.provider_location, 'base_lun_name') - if base_name is None or base_name == 'None': - return volume.name - return base_name - - -def sift_port_white_list(port_white_list, registered_io_ports): - """Filters out the unregistered ports. - - Goes through the `port_white_list`, and filters out the ones not - registered (that is not in `registered_io_ports`). - """ - valid_port_list = [] - LOG.debug('Filter ports in [%(white)s}] but not in [%(reg_ports)s].', - {'white': ','.join( - [port.display_name for port in port_white_list]), - 'reg_ports': ','.join( - [port.display_name for port in registered_io_ports])}) - for io_port in port_white_list: - if io_port not in registered_io_ports: - LOG.debug('Skipped SP port %(port)s due to it is not registered. ' - 'The registered IO ports: %(reg_ports)s.', - {'port': io_port, 'reg_ports': registered_io_ports}) - else: - valid_port_list.append(io_port) - - return valid_port_list - - -def convert_to_tgt_list_and_itor_tgt_map(zone_mapping): - """Function to process data from lookup service. - - :param zone_mapping: mapping is the data from the zone lookup service - with below format - { - : { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'..) - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121'..) - } - } - """ - target_wwns = [] - itor_tgt_map = {} - for san_name in zone_mapping: - one_map = zone_mapping[san_name] - for target in one_map['target_port_wwn_list']: - if target not in target_wwns: - target_wwns.append(target) - for initiator in one_map['initiator_port_wwn_list']: - itor_tgt_map[initiator] = one_map['target_port_wwn_list'] - LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s", - {'tgt_wwns': target_wwns, - 'itor_tgt_map': itor_tgt_map}) - return target_wwns, itor_tgt_map - - -def truncate_fc_port_wwn(wwn): - return wwn.replace(':', '')[16:] - - -def is_volume_smp(volume): - return 'smp' == extract_provider_location(volume.provider_location, 'type') - - -def require_consistent_group_snapshot_enabled(func): - @six.wraps(func) - def inner(self, *args, **kwargs): - if not vol_utils.is_group_a_cg_snapshot_type(args[1]): - raise NotImplementedError - return func(self, *args, **kwargs) - return inner - - -def get_remote_pool(config, volume): - """Select remote pool name for replication. - - Prefer configured remote pool name, or same pool name - as the source volume. - """ - pool_name = get_pool_from_host(volume.host) - rep_list = common.ReplicationDeviceList(config) - remote_pool_name = rep_list[0].pool_name - return remote_pool_name if remote_pool_name else pool_name - - -def is_image_cache_volume(volume): - display_name = volume.display_name - if (display_name.startswith('image-') - and uuidutils.is_uuid_like(display_name[6:])): - LOG.debug('Volume: %s is for image cache. Use sync migration and ' - 'thin provisioning.', volume.name) - return True - return False - - -def calc_migrate_and_provision(volume): - """Returns a tuple of async migrate and provision type. - - The first element is the flag whether to enable async migrate, - the second is the provision type (thin or thick). - """ - if is_image_cache_volume(volume): - return False, storops.VNXProvisionEnum.THIN - else: - specs = common.ExtraSpecs.from_volume(volume) - return is_async_migrate_enabled(volume), specs.provision - - -def get_backend_qos_specs(volume): - qos_specs = volume.volume_type.qos_specs - if qos_specs is None: - return None - - qos_specs = qos_specs['qos_specs'] - if qos_specs is None: - return None - - consumer = qos_specs['consumer'] - # Front end QoS specs are handled by nova. Just ignore them here. - if consumer not in common.BACKEND_QOS_CONSUMERS: - return None - - max_iops = qos_specs['specs'].get(common.QOS_MAX_IOPS) - max_bws = qos_specs['specs'].get(common.QOS_MAX_BWS) - - if max_iops is None and max_bws is None: - return None - - return { - 'id': qos_specs['id'], - common.QOS_MAX_IOPS: max_iops, - common.QOS_MAX_BWS: max_bws, - } diff --git a/cinder/volume/drivers/dell_emc/xtremio.py b/cinder/volume/drivers/dell_emc/xtremio.py deleted file mode 100644 index bac055a0a..000000000 --- a/cinder/volume/drivers/dell_emc/xtremio.py +++ /dev/null @@ -1,1230 +0,0 @@ -# Copyright (c) 2012 - 2014 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Driver for Dell EMC XtremIO Storage. -supported XtremIO version 2.4 and up - -.. code-block:: none - - 1.0.0 - initial release - 1.0.1 - enable volume extend - 1.0.2 - added FC support, improved error handling - 1.0.3 - update logging level, add translation - 1.0.4 - support for FC zones - 1.0.5 - add support for XtremIO 4.0 - 1.0.6 - add support for iSCSI multipath, CA validation, consistency groups, - R/O snapshots, CHAP discovery authentication - 1.0.7 - cache glance images on the array - 1.0.8 - support for volume retype, CG fixes -""" - -import json -import math -import random -import requests -import string - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import units -import six -from six.moves import http_client - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.zonemanager import utils as fczm_utils - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -DEFAULT_PROVISIONING_FACTOR = 20.0 -XTREMIO_OPTS = [ - cfg.StrOpt('xtremio_cluster_name', - default='', - help='XMS cluster id in multi-cluster environment'), - cfg.IntOpt('xtremio_array_busy_retry_count', - default=5, - help='Number of retries in case array is busy'), - cfg.IntOpt('xtremio_array_busy_retry_interval', - default=5, - help='Interval between retries in case array is busy'), - cfg.IntOpt('xtremio_volumes_per_glance_cache', - default=100, - help='Number of volumes created from each cached glance image')] - -CONF.register_opts(XTREMIO_OPTS, group=configuration.SHARED_CONF_GROUP) - -RANDOM = random.Random() -OBJ_NOT_FOUND_ERR = 'obj_not_found' -VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique' -VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found' -ALREADY_MAPPED_ERR = 'already_mapped' -SYSTEM_BUSY = 'system_is_busy' -TOO_MANY_OBJECTS = 'too_many_objs' -TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol' - - -XTREMIO_OID_NAME = 1 -XTREMIO_OID_INDEX = 2 - - -class XtremIOClient(object): - def __init__(self, configuration, cluster_id): - self.configuration = configuration - self.cluster_id = cluster_id - self.verify = (self.configuration. - safe_get('driver_ssl_cert_verify') or False) - if self.verify: - verify_path = (self.configuration. - safe_get('driver_ssl_cert_path') or None) - if verify_path: - self.verify = verify_path - - def get_base_url(self, ver): - if ver == 'v1': - return 'https://%s/api/json/types' % self.configuration.san_ip - elif ver == 'v2': - return 'https://%s/api/json/v2/types' % self.configuration.san_ip - - def req(self, object_type='volumes', method='GET', data=None, - name=None, idx=None, ver='v1'): - @utils.retry(exception.XtremIOArrayBusy, - self.configuration.xtremio_array_busy_retry_count, - self.configuration.xtremio_array_busy_retry_interval, 1) - def _do_req(object_type, method, data, name, idx, ver): - if not data: - data = {} - if name and idx: - msg = _("can't handle both name and index in req") - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - url = '%s/%s' % (self.get_base_url(ver), object_type) - params = {} - key = None - if name: - params['name'] = name - key = name - elif idx: - url = '%s/%d' % (url, idx) - key = str(idx) - if method in ('GET', 'DELETE'): - params.update(data) - self.update_url(params, self.cluster_id) - if method != 'GET': - self.update_data(data, self.cluster_id) - LOG.debug('data: %s', data) - LOG.debug('%(type)s %(url)s', {'type': method, 'url': url}) - try: - response = requests.request( - method, url, params=params, data=json.dumps(data), - verify=self.verify, auth=(self.configuration.san_login, - self.configuration.san_password)) - except requests.exceptions.RequestException as exc: - msg = (_('Exception: %s') % six.text_type(exc)) - raise exception.VolumeDriverException(message=msg) - - if (http_client.OK <= response.status_code < - http_client.MULTIPLE_CHOICES): - if method in ('GET', 'POST'): - return response.json() - else: - return '' - - self.handle_errors(response, key, object_type) - return _do_req(object_type, method, data, name, idx, ver) - - def handle_errors(self, response, key, object_type): - if response.status_code == http_client.BAD_REQUEST: - error = response.json() - err_msg = error.get('message') - if err_msg.endswith(OBJ_NOT_FOUND_ERR): - LOG.warning("object %(key)s of " - "type %(typ)s not found, %(err_msg)s", - {'key': key, 'typ': object_type, - 'err_msg': err_msg, }) - raise exception.NotFound() - elif err_msg == VOL_NOT_UNIQUE_ERR: - LOG.error("can't create 2 volumes with the same name, %s", - err_msg) - msg = _('Volume by this name already exists') - raise exception.VolumeBackendAPIException(data=msg) - elif err_msg == VOL_OBJ_NOT_FOUND_ERR: - LOG.error("Can't find volume to map %(key)s, %(msg)s", - {'key': key, 'msg': err_msg, }) - raise exception.VolumeNotFound(volume_id=key) - elif ALREADY_MAPPED_ERR in err_msg: - raise exception.XtremIOAlreadyMappedError() - elif err_msg == SYSTEM_BUSY: - raise exception.XtremIOArrayBusy() - elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL): - raise exception.XtremIOSnapshotsLimitExceeded() - msg = _('Bad response from XMS, %s') % response.text - LOG.error(msg) - raise exception.VolumeBackendAPIException(message=msg) - - def update_url(self, data, cluster_id): - return - - def update_data(self, data, cluster_id): - return - - def get_cluster(self): - return self.req('clusters', idx=1)['content'] - - def create_snapshot(self, src, dest, ro=False): - """Create a snapshot of a volume on the array. - - XtreamIO array snapshots are also volumes. - - :src: name of the source volume to be cloned - :dest: name for the new snapshot - :ro: new snapshot type ro/regular. only applicable to Client4 - """ - raise NotImplementedError() - - def get_extra_capabilities(self): - return {} - - def get_initiator(self, port_address): - raise NotImplementedError() - - def add_vol_to_cg(self, vol_id, cg_id): - pass - - def get_initiators_igs(self, port_addresses): - ig_indexes = set() - for port_address in port_addresses: - initiator = self.get_initiator(port_address) - ig_indexes.add(initiator['ig-id'][XTREMIO_OID_INDEX]) - - return list(ig_indexes) - - def get_fc_up_ports(self): - targets = [self.req('targets', name=target['name'])['content'] - for target in self.req('targets')['targets']] - return [target for target in targets - if target['port-type'] == 'fc' and - target["port-state"] == 'up'] - - -class XtremIOClient3(XtremIOClient): - def __init__(self, configuration, cluster_id): - super(XtremIOClient3, self).__init__(configuration, cluster_id) - self._portals = [] - - def find_lunmap(self, ig_name, vol_name): - try: - lun_mappings = self.req('lun-maps')['lun-maps'] - except exception.NotFound: - raise (exception.VolumeDriverException - (_("can't find lun-map, ig:%(ig)s vol:%(vol)s") % - {'ig': ig_name, 'vol': vol_name})) - - for lm_link in lun_mappings: - idx = lm_link['href'].split('/')[-1] - # NOTE(geguileo): There can be races so mapped elements retrieved - # in the listing may no longer exist. - try: - lm = self.req('lun-maps', idx=int(idx))['content'] - except exception.NotFound: - continue - if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name: - return lm - - return None - - def num_of_mapped_volumes(self, initiator): - cnt = 0 - for lm_link in self.req('lun-maps')['lun-maps']: - idx = lm_link['href'].split('/')[-1] - # NOTE(geguileo): There can be races so mapped elements retrieved - # in the listing may no longer exist. - try: - lm = self.req('lun-maps', idx=int(idx))['content'] - except exception.NotFound: - continue - if lm['ig-name'] == initiator: - cnt += 1 - return cnt - - def get_iscsi_portals(self): - if self._portals: - return self._portals - - iscsi_portals = [t['name'] for t in self.req('iscsi-portals') - ['iscsi-portals']] - for portal_name in iscsi_portals: - try: - self._portals.append(self.req('iscsi-portals', - name=portal_name)['content']) - except exception.NotFound: - raise (exception.VolumeBackendAPIException - (data=_("iscsi portal, %s, not found") % portal_name)) - - return self._portals - - def create_snapshot(self, src, dest, ro=False): - data = {'snap-vol-name': dest, 'ancestor-vol-id': src} - - self.req('snapshots', 'POST', data) - - def get_initiator(self, port_address): - try: - return self.req('initiators', 'GET', name=port_address)['content'] - except exception.NotFound: - pass - - -class XtremIOClient4(XtremIOClient): - def __init__(self, configuration, cluster_id): - super(XtremIOClient4, self).__init__(configuration, cluster_id) - self._cluster_name = None - - def req(self, object_type='volumes', method='GET', data=None, - name=None, idx=None, ver='v2'): - return super(XtremIOClient4, self).req(object_type, method, data, - name, idx, ver) - - def get_extra_capabilities(self): - return {'consistencygroup_support': True} - - def find_lunmap(self, ig_name, vol_name): - try: - return (self.req('lun-maps', - data={'full': 1, - 'filter': ['vol-name:eq:%s' % vol_name, - 'ig-name:eq:%s' % ig_name]}) - ['lun-maps'][0]) - except (KeyError, IndexError): - raise exception.VolumeNotFound(volume_id=vol_name) - - def num_of_mapped_volumes(self, initiator): - return len(self.req('lun-maps', - data={'filter': 'ig-name:eq:%s' % initiator}) - ['lun-maps']) - - def update_url(self, data, cluster_id): - if cluster_id: - data['cluster-name'] = cluster_id - - def update_data(self, data, cluster_id): - if cluster_id: - data['cluster-id'] = cluster_id - - def get_iscsi_portals(self): - return self.req('iscsi-portals', - data={'full': 1})['iscsi-portals'] - - def get_cluster(self): - if not self.cluster_id: - self.cluster_id = self.req('clusters')['clusters'][0]['name'] - - return self.req('clusters', name=self.cluster_id)['content'] - - def create_snapshot(self, src, dest, ro=False): - data = {'snapshot-set-name': dest, 'snap-suffix': dest, - 'volume-list': [src], - 'snapshot-type': 'readonly' if ro else 'regular'} - - res = self.req('snapshots', 'POST', data, ver='v2') - typ, idx = res['links'][0]['href'].split('/')[-2:] - - # rename the snapshot - data = {'name': dest} - try: - self.req(typ, 'PUT', data, idx=int(idx)) - except exception.VolumeBackendAPIException: - # reverting - LOG.error('Failed to rename the created snapshot, reverting.') - self.req(typ, 'DELETE', idx=int(idx)) - raise - - def add_vol_to_cg(self, vol_id, cg_id): - add_data = {'vol-id': vol_id, 'cg-id': cg_id} - self.req('consistency-group-volumes', 'POST', add_data, ver='v2') - - def get_initiator(self, port_address): - inits = self.req('initiators', - data={'filter': 'port-address:eq:' + port_address, - 'full': 1})['initiators'] - if len(inits) == 1: - return inits[0] - else: - pass - - def get_fc_up_ports(self): - return self.req('targets', - data={'full': 1, - 'filter': ['port-type:eq:fc', - 'port-state:eq:up'], - 'prop': 'port-address'})["targets"] - - -class XtremIOClient42(XtremIOClient4): - def get_initiators_igs(self, port_addresses): - init_filter = ','.join('port-address:eq:{}'.format(port_address) for - port_address in port_addresses) - initiators = self.req('initiators', - data={'filter': init_filter, - 'full': 1, 'prop': 'ig-id'})['initiators'] - return list(set(ig_id['ig-id'][XTREMIO_OID_INDEX] - for ig_id in initiators)) - - -class XtremIOVolumeDriver(san.SanDriver): - """Executes commands relating to Volumes.""" - - VERSION = '1.0.9' - - # ThirdPartySystems wiki - CI_WIKI_NAME = "EMC_XIO_CI" - - driver_name = 'XtremIO' - MIN_XMS_VERSION = [3, 0, 0] - - def __init__(self, *args, **kwargs): - super(XtremIOVolumeDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(XTREMIO_OPTS) - self.protocol = None - self.backend_name = (self.configuration.safe_get('volume_backend_name') - or self.driver_name) - self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name') - or '') - self.provisioning_factor = (self.configuration. - safe_get('max_over_subscription_ratio') - or DEFAULT_PROVISIONING_FACTOR) - self._stats = {} - self.client = XtremIOClient3(self.configuration, self.cluster_id) - - def _obj_from_result(self, res): - typ, idx = res['links'][0]['href'].split('/')[-2:] - return self.client.req(typ, idx=int(idx))['content'] - - def check_for_setup_error(self): - try: - name = self.client.req('clusters')['clusters'][0]['name'] - cluster = self.client.req('clusters', name=name)['content'] - version_text = cluster['sys-sw-version'] - except exception.NotFound: - msg = _("XtremIO not initialized correctly, no clusters found") - raise (exception.VolumeBackendAPIException - (data=msg)) - ver = [int(n) for n in version_text.split('-')[0].split('.')] - if ver < self.MIN_XMS_VERSION: - msg = (_('Invalid XtremIO version %(cur)s,' - ' version %(min)s or up is required') % - {'min': self.MIN_XMS_VERSION, - 'cur': ver}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info('XtremIO Cluster version %s', version_text) - client_ver = '3' - if ver[0] >= 4: - # get XMS version - xms = self.client.req('xms', idx=1)['content'] - xms_version = tuple([int(i) for i in - xms['sw-version'].split('-')[0].split('.')]) - LOG.info('XtremIO XMS version %s', version_text) - if xms_version >= (4, 2): - self.client = XtremIOClient42(self.configuration, - self.cluster_id) - client_ver = '4.2' - else: - self.client = XtremIOClient4(self.configuration, - self.cluster_id) - client_ver = '4' - LOG.info('Using XtremIO Client %s', client_ver) - - def create_volume(self, volume): - """Creates a volume.""" - data = {'vol-name': volume['id'], - 'vol-size': str(volume['size']) + 'g' - } - self.client.req('volumes', 'POST', data) - - # Add the volume to a cg in case volume requested a cgid or group_id. - # If both cg_id and group_id exists in a volume. group_id will take - # place. - - consistency_group = volume.get('consistencygroup_id') - - # if cg_id and group_id are both exists, we gives priority to group_id. - if volume.get('group_id'): - consistency_group = volume.get('group_id') - - if consistency_group: - self.client.add_vol_to_cg(volume['id'], - consistency_group) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - if snapshot.get('cgsnapshot_id'): - # get array snapshot id from CG snapshot - snap_by_anc = self._get_snapset_ancestors(snapshot.cgsnapshot) - snapshot_id = snap_by_anc[snapshot['volume_id']] - else: - snapshot_id = snapshot['id'] - - self.client.create_snapshot(snapshot_id, volume['id']) - - # add new volume to consistency group - if (volume.get('consistencygroup_id') and - self.client is XtremIOClient4): - self.client.add_vol_to_cg(volume['id'], - snapshot['consistencygroup_id']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - vol = self.client.req('volumes', name=src_vref['id'])['content'] - ctxt = context.get_admin_context() - cache = self.db.image_volume_cache_get_by_volume_id(ctxt, - src_vref['id']) - limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache') - if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']: - raise exception.CinderException('Exceeded the configured limit of ' - '%d snapshots per volume' % limit) - try: - self.client.create_snapshot(src_vref['id'], volume['id']) - except exception.XtremIOSnapshotsLimitExceeded as e: - raise exception.CinderException(e.message) - - # extend the snapped volume if requested size is larger then original - if volume['size'] > src_vref['size']: - try: - self.extend_volume(volume, volume['size']) - except Exception: - LOG.error('failes to extend volume %s, ' - 'reverting clone operation', volume['id']) - # remove the volume in case resize failed - self.delete_volume(volume) - raise - - if volume.get('consistencygroup_id') and self.client is XtremIOClient4: - self.client.add_vol_to_cg(volume['id'], - volume['consistencygroup_id']) - - def delete_volume(self, volume): - """Deletes a volume.""" - try: - self.client.req('volumes', 'DELETE', name=volume.name_id) - except exception.NotFound: - LOG.info("volume %s doesn't exist", volume.name_id) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.client.create_snapshot(snapshot.volume_id, snapshot.id, True) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - try: - self.client.req('volumes', 'DELETE', name=snapshot.id) - except exception.NotFound: - LOG.info("snapshot %s doesn't exist", snapshot.id) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - # as the volume name is used to id the volume we need to rename it - name_id = None - provider_location = None - current_name = new_volume['id'] - original_name = volume['id'] - try: - data = {'name': original_name} - self.client.req('volumes', 'PUT', data, name=current_name) - except exception.VolumeBackendAPIException: - LOG.error('Unable to rename the logical volume ' - 'for volume: %s', original_name) - # If the rename fails, _name_id should be set to the new - # volume id and provider_location should be set to the - # one from the new volume as well. - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - - return {'_name_id': name_id, 'provider_location': provider_location} - - def _update_volume_stats(self): - sys = self.client.get_cluster() - physical_space = int(sys["ud-ssd-space"]) / units.Mi - used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi - free_physical = physical_space - used_physical_space - actual_prov = int(sys["vol-size"]) / units.Mi - self._stats = {'volume_backend_name': self.backend_name, - 'vendor_name': 'Dell EMC', - 'driver_version': self.VERSION, - 'storage_protocol': self.protocol, - 'total_capacity_gb': physical_space, - 'free_capacity_gb': (free_physical * - self.provisioning_factor), - 'provisioned_capacity_gb': actual_prov, - 'max_over_subscription_ratio': self.provisioning_factor, - 'thin_provisioning_support': True, - 'thick_provisioning_support': False, - 'reserved_percentage': - self.configuration.reserved_percentage, - 'QoS_support': False, - 'multiattach': False, - } - self._stats.update(self.client.get_extra_capabilities()) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - return self._stats - - def manage_existing(self, volume, existing_ref, is_snapshot=False): - """Manages an existing LV.""" - lv_name = existing_ref['source-name'] - # Attempt to locate the volume. - try: - vol_obj = self.client.req('volumes', name=lv_name)['content'] - if ( - is_snapshot and - (not vol_obj['ancestor-vol-id'] or - vol_obj['ancestor-vol-id'][XTREMIO_OID_NAME] != - volume.volume_id)): - kwargs = {'existing_ref': lv_name, - 'reason': 'Not a snapshot of vol %s' % - volume.volume_id} - raise exception.ManageExistingInvalidReference(**kwargs) - except exception.NotFound: - kwargs = {'existing_ref': lv_name, - 'reason': 'Specified logical %s does not exist.' % - 'snapshot' if is_snapshot else 'volume'} - raise exception.ManageExistingInvalidReference(**kwargs) - - # Attempt to rename the LV to match the OpenStack internal name. - self.client.req('volumes', 'PUT', data={'vol-name': volume['id']}, - idx=vol_obj['index']) - - def manage_existing_get_size(self, volume, existing_ref, - is_snapshot=False): - """Return size of an existing LV for manage_existing.""" - # Check that the reference is valid - if 'source-name' not in existing_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - lv_name = existing_ref['source-name'] - # Attempt to locate the volume. - try: - vol_obj = self.client.req('volumes', name=lv_name)['content'] - except exception.NotFound: - kwargs = {'existing_ref': lv_name, - 'reason': 'Specified logical %s does not exist.' % - 'snapshot' if is_snapshot else 'volume'} - raise exception.ManageExistingInvalidReference(**kwargs) - # LV size is returned in gigabytes. Attempt to parse size as a float - # and round up to the next integer. - lv_size = int(math.ceil(float(vol_obj['vol-size']) / units.Mi)) - - return lv_size - - def unmanage(self, volume, is_snapshot=False): - """Removes the specified volume from Cinder management.""" - # trying to rename the volume to [cinder name]-unmanged - try: - self.client.req('volumes', 'PUT', name=volume['id'], - data={'vol-name': volume['name'] + '-unmanged'}) - except exception.NotFound: - LOG.info("%(typ)s with the name %(name)s wasn't found, " - "can't unmanage", - {'typ': 'Snapshot' if is_snapshot else 'Volume', - 'name': volume['id']}) - raise exception.VolumeNotFound(volume_id=volume['id']) - - def manage_existing_snapshot(self, snapshot, existing_ref): - self.manage_existing(snapshot, existing_ref, True) - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - return self.manage_existing_get_size(snapshot, existing_ref, True) - - def unmanage_snapshot(self, snapshot): - self.unmanage(snapshot, True) - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - data = {'vol-size': six.text_type(new_size) + 'g'} - try: - self.client.req('volumes', 'PUT', data, name=volume['id']) - except exception.NotFound: - msg = _("can't find the volume to extend") - raise exception.VolumeDriverException(message=msg) - - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" - pass - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector""" - tg_index = '1' - - if not connector: - vol = self.client.req('volumes', name=volume.id)['content'] - # foce detach, unmap all IGs from volume - IG_OID = 0 - ig_indexes = [lun_map[IG_OID][XTREMIO_OID_INDEX] for - lun_map in vol['lun-mapping-list']] - LOG.info('Force detach volume %(vol)s from luns %(luns)s.', - {'vol': vol['name'], 'luns': ig_indexes}) - else: - vol = self.client.req('volumes', name=volume.id, - data={'prop': 'index'})['content'] - ig_indexes = self._get_ig_indexes_from_initiators(connector) - - for ig_idx in ig_indexes: - lm_name = '%s_%s_%s' % (six.text_type(vol['index']), - six.text_type(ig_idx), - tg_index) - LOG.debug('Removing lun map %s.', lm_name) - try: - self.client.req('lun-maps', 'DELETE', name=lm_name) - except exception.NotFound: - LOG.warning("terminate_connection: lun map not found") - - def _get_password(self): - return ''.join(RANDOM.choice - (string.ascii_uppercase + string.digits) - for _ in range(12)) - - def create_lun_map(self, volume, ig, lun_num=None): - try: - data = {'ig-id': ig, 'vol-id': volume['id']} - if lun_num: - data['lun'] = lun_num - res = self.client.req('lun-maps', 'POST', data) - - lunmap = self._obj_from_result(res) - LOG.info('Created lun-map:\n%s', lunmap) - except exception.XtremIOAlreadyMappedError: - LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s', - {'ig': ig, 'vol': volume['id']}) - lunmap = self.client.find_lunmap(ig, volume['id']) - return lunmap - - def _get_ig_name(self, connector): - raise NotImplementedError() - - def _get_ig_indexes_from_initiators(self, connector): - initiator_names = self._get_initiator_names(connector) - return self.client.get_initiators_igs(initiator_names) - - def _get_initiator_names(self, connector): - raise NotImplementedError() - - def create_consistencygroup(self, context, group): - """Creates a consistency group. - - :param context: the context - :param group: the group object to be created - :returns: dict -- modelUpdate = {'status': 'available'} - :raises: VolumeBackendAPIException - """ - create_data = {'consistency-group-name': group['id']} - self.client.req('consistency-groups', 'POST', data=create_data, - ver='v2') - return {'status': fields.ConsistencyGroupStatus.AVAILABLE} - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - self.client.req('consistency-groups', 'DELETE', name=group['id'], - ver='v2') - - volumes_model_update = [] - - for volume in volumes: - self.delete_volume(volume) - - update_item = {'id': volume['id'], - 'status': 'deleted'} - - volumes_model_update.append(update_item) - - model_update = {'status': group['status']} - - return model_update, volumes_model_update - - def _get_snapset_ancestors(self, snapset_name): - snapset = self.client.req('snapshot-sets', - name=snapset_name)['content'] - volume_ids = [s[XTREMIO_OID_INDEX] for s in snapset['vol-list']] - return {v['ancestor-vol-id'][XTREMIO_OID_NAME]: v['name'] for v - in self.client.req('volumes', - data={'full': 1, - 'props': - 'ancestor-vol-id'})['volumes'] - if v['index'] in volume_ids} - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates a consistencygroup from source. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be created. - :param volumes: a list of volume dictionaries in the group. - :param cgsnapshot: the dictionary of the cgsnapshot as source. - :param snapshots: a list of snapshot dictionaries in the cgsnapshot. - :param source_cg: the dictionary of a consistency group as source. - :param source_vols: a list of volume dictionaries in the source_cg. - :returns: model_update, volumes_model_update - """ - if not (cgsnapshot and snapshots and not source_cg or - source_cg and source_vols and not cgsnapshot): - msg = _("create_consistencygroup_from_src only supports a " - "cgsnapshot source or a consistency group source. " - "Multiple sources cannot be used.") - raise exception.InvalidInput(msg) - - if cgsnapshot: - snap_name = self._get_cgsnap_name(cgsnapshot) - snap_by_anc = self._get_snapset_ancestors(snap_name) - for volume, snapshot in zip(volumes, snapshots): - real_snap = snap_by_anc[snapshot['volume_id']] - self.create_volume_from_snapshot(volume, {'id': real_snap}) - - elif source_cg: - data = {'consistency-group-id': source_cg['id'], - 'snapshot-set-name': group['id']} - self.client.req('snapshots', 'POST', data, ver='v2') - snap_by_anc = self._get_snapset_ancestors(group['id']) - for volume, src_vol in zip(volumes, source_vols): - snap_vol_name = snap_by_anc[src_vol['id']] - self.client.req('volumes', 'PUT', {'name': volume['id']}, - name=snap_vol_name) - - create_data = {'consistency-group-name': group['id'], - 'vol-list': [v['id'] for v in volumes]} - self.client.req('consistency-groups', 'POST', data=create_data, - ver='v2') - - return None, None - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a consistency group. - - :param context: the context of the caller. - :param group: the dictionary of the consistency group to be updated. - :param add_volumes: a list of volume dictionaries to be added. - :param remove_volumes: a list of volume dictionaries to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - """ - add_volumes = add_volumes if add_volumes else [] - remove_volumes = remove_volumes if remove_volumes else [] - for vol in add_volumes: - add_data = {'vol-id': vol['id'], 'cg-id': group['id']} - self.client.req('consistency-group-volumes', 'POST', add_data, - ver='v2') - for vol in remove_volumes: - remove_data = {'vol-id': vol['id'], 'cg-id': group['id']} - self.client.req('consistency-group-volumes', 'DELETE', remove_data, - name=group['id'], ver='v2') - return None, None, None - - def _get_cgsnap_name(self, cgsnapshot): - - group_id = cgsnapshot.get('group_id') - if group_id is None: - group_id = cgsnapshot.get('consistencygroup_id') - - return '%(cg)s%(snap)s' % {'cg': group_id - .replace('-', ''), - 'snap': cgsnapshot['id'].replace('-', '')} - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - - group_id = cgsnapshot.get('group_id') - if group_id is None: - group_id = cgsnapshot.get('consistencygroup_id') - - data = {'consistency-group-id': group_id, - 'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)} - self.client.req('snapshots', 'POST', data, ver='v2') - - return None, None - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - self.client.req('snapshot-sets', 'DELETE', - name=self._get_cgsnap_name(cgsnapshot), ver='v2') - return None, None - - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the group object. - :returns: model_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.create_consistencygroup(context, group) - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the group object. - :param volumes: a list of volume objects in the group. - :returns: model_update, volumes_model_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.delete_consistencygroup(context, group, volumes) - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param context: the context of the caller. - :param group: the group object. - :param add_volumes: a list of volume objects to be added. - :param remove_volumes: a list of volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.update_consistencygroup(context, group, add_volumes, - remove_volumes) - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.create_consistencygroup_from_src(context, group, volumes, - group_snapshot, snapshots, - source_group, source_vols) - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.create_cgsnapshot(context, group_snapshot, snapshots) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - - # the driver treats a group as a CG internally. - # We proxy the calls to the CG api. - return self.delete_cgsnapshot(context, group_snapshot, snapshots) - - def _get_ig(self, name): - try: - return self.client.req('initiator-groups', 'GET', - name=name)['content'] - except exception.NotFound: - pass - - def _create_ig(self, name): - # create an initiator group to hold the initiator - data = {'ig-name': name} - self.client.req('initiator-groups', 'POST', data) - try: - return self.client.req('initiator-groups', name=name)['content'] - except exception.NotFound: - raise (exception.VolumeBackendAPIException - (data=_("Failed to create IG, %s") % name)) - - -@interface.volumedriver -class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver): - """Executes commands relating to ISCSI volumes. - - We make use of model provider properties as follows: - - ``provider_location`` - if present, contains the iSCSI target information in the same - format as an ietadm discovery - i.e. ':, ' - - ``provider_auth`` - if present, contains a space-separated triple: - ' '. - `CHAP` is the only auth_method in use at the moment. - """ - driver_name = 'XtremIO_ISCSI' - - def __init__(self, *args, **kwargs): - super(XtremIOISCSIDriver, self).__init__(*args, **kwargs) - self.protocol = 'iSCSI' - - def _add_auth(self, data, login_chap, discovery_chap): - login_passwd, discovery_passwd = None, None - if login_chap: - data['initiator-authentication-user-name'] = 'chap_user' - login_passwd = self._get_password() - data['initiator-authentication-password'] = login_passwd - if discovery_chap: - data['initiator-discovery-user-name'] = 'chap_user' - discovery_passwd = self._get_password() - data['initiator-discovery-password'] = discovery_passwd - return login_passwd, discovery_passwd - - def _create_initiator(self, connector, login_chap, discovery_chap): - initiator = self._get_initiator_names(connector)[0] - # create an initiator - data = {'initiator-name': initiator, - 'ig-id': initiator, - 'port-address': initiator} - l, d = self._add_auth(data, login_chap, discovery_chap) - self.client.req('initiators', 'POST', data) - return l, d - - def initialize_connection(self, volume, connector): - try: - sys = self.client.get_cluster() - except exception.NotFound: - msg = _("XtremIO not initialized correctly, no clusters found") - raise exception.VolumeBackendAPIException(data=msg) - login_chap = (sys.get('chap-authentication-mode', 'disabled') != - 'disabled') - discovery_chap = (sys.get('chap-discovery-mode', 'disabled') != - 'disabled') - initiator_name = self._get_initiator_names(connector)[0] - initiator = self.client.get_initiator(initiator_name) - if initiator: - login_passwd = initiator['chap-authentication-initiator-password'] - discovery_passwd = initiator['chap-discovery-initiator-password'] - ig = self._get_ig(initiator['ig-id'][XTREMIO_OID_NAME]) - else: - ig = self._get_ig(self._get_ig_name(connector)) - if not ig: - ig = self._create_ig(self._get_ig_name(connector)) - (login_passwd, - discovery_passwd) = self._create_initiator(connector, - login_chap, - discovery_chap) - # if CHAP was enabled after the initiator was created - if login_chap and not login_passwd: - LOG.info('Initiator has no password while using chap, adding it.') - data = {} - (login_passwd, - d_passwd) = self._add_auth(data, login_chap, discovery_chap and - not discovery_passwd) - discovery_passwd = (discovery_passwd if discovery_passwd - else d_passwd) - self.client.req('initiators', 'PUT', data, idx=initiator['index']) - - # lun mappping - lunmap = self.create_lun_map(volume, ig['ig-id'][XTREMIO_OID_NAME]) - - properties = self._get_iscsi_properties(lunmap) - - if login_chap: - properties['auth_method'] = 'CHAP' - properties['auth_username'] = 'chap_user' - properties['auth_password'] = login_passwd - if discovery_chap: - properties['discovery_auth_method'] = 'CHAP' - properties['discovery_auth_username'] = 'chap_user' - properties['discovery_auth_password'] = discovery_passwd - LOG.debug('init conn params:\n%s', - strutils.mask_dict_password(properties)) - return { - 'driver_volume_type': 'iscsi', - 'data': properties - } - - def _get_iscsi_properties(self, lunmap): - """Gets iscsi configuration. - - :target_discovered: boolean indicating whether discovery was used - :target_iqn: the IQN of the iSCSI target - :target_portal: the portal of the iSCSI target - :target_lun: the lun of the iSCSI target - :volume_id: the id of the volume (currently used by xen) - :auth_method:, :auth_username:, :auth_password: - the authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - multiple connection return - :target_iqns, :target_portals, :target_luns, which contain lists of - multiple values. The main portal information is also returned in - :target_iqn, :target_portal, :target_lun for backward compatibility. - """ - portals = self.client.get_iscsi_portals() - if not portals: - msg = _("XtremIO not configured correctly, no iscsi portals found") - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - portal = RANDOM.choice(portals) - portal_addr = ('%(ip)s:%(port)d' % - {'ip': portal['ip-addr'].split('/')[0], - 'port': portal['ip-port']}) - - tg_portals = ['%(ip)s:%(port)d' % {'ip': p['ip-addr'].split('/')[0], - 'port': p['ip-port']} - for p in portals] - properties = {'target_discovered': False, - 'target_iqn': portal['port-address'], - 'target_lun': lunmap['lun'], - 'target_portal': portal_addr, - 'target_iqns': [p['port-address'] for p in portals], - 'target_portals': tg_portals, - 'target_luns': [lunmap['lun']] * len(portals)} - return properties - - def _get_initiator_names(self, connector): - return [connector['initiator']] - - def _get_ig_name(self, connector): - return connector['initiator'] - - -@interface.volumedriver -class XtremIOFCDriver(XtremIOVolumeDriver, - driver.FibreChannelDriver): - - def __init__(self, *args, **kwargs): - super(XtremIOFCDriver, self).__init__(*args, **kwargs) - self.protocol = 'FC' - self._targets = None - - def get_targets(self): - if not self._targets: - try: - targets = self.client.get_fc_up_ports() - self._targets = [target['port-address'].replace(':', '') - for target in targets] - except exception.NotFound: - raise (exception.VolumeBackendAPIException - (data=_("Failed to get targets"))) - return self._targets - - def _get_free_lun(self, igs): - luns = [] - for ig in igs: - luns.extend(lm['lun'] for lm in - self.client.req('lun-maps', - data={'full': 1, 'prop': 'lun', - 'filter': 'ig-name:eq:%s' % ig}) - ['lun-maps']) - uniq_luns = set(luns + [0]) - seq = range(len(uniq_luns) + 1) - return min(set(seq) - uniq_luns) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - wwpns = self._get_initiator_names(connector) - ig_name = self._get_ig_name(connector) - i_t_map = {} - found = [] - new = [] - for wwpn in wwpns: - init = self.client.get_initiator(wwpn) - if init: - found.append(init) - else: - new.append(wwpn) - i_t_map[wwpn.replace(':', '')] = self.get_targets() - # get or create initiator group - if new: - ig = self._get_ig(ig_name) - if not ig: - ig = self._create_ig(ig_name) - for wwpn in new: - data = {'initiator-name': wwpn, 'ig-id': ig_name, - 'port-address': wwpn} - self.client.req('initiators', 'POST', data) - igs = list(set([i['ig-id'][XTREMIO_OID_NAME] for i in found])) - if new and ig['ig-id'][XTREMIO_OID_NAME] not in igs: - igs.append(ig['ig-id'][XTREMIO_OID_NAME]) - - if len(igs) > 1: - lun_num = self._get_free_lun(igs) - else: - lun_num = None - for ig in igs: - lunmap = self.create_lun_map(volume, ig, lun_num) - lun_num = lunmap['lun'] - return {'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': False, - 'target_lun': lun_num, - 'target_wwn': self.get_targets(), - 'initiator_target_map': i_t_map}} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - (super(XtremIOFCDriver, self) - .terminate_connection(volume, connector, **kwargs)) - has_volumes = (not connector - or self.client. - num_of_mapped_volumes(self._get_ig_name(connector)) > 0) - - if has_volumes: - data = {} - else: - i_t_map = {} - for initiator in self._get_initiator_names(connector): - i_t_map[initiator.replace(':', '')] = self.get_targets() - data = {'target_wwn': self.get_targets(), - 'initiator_target_map': i_t_map} - - return {'driver_volume_type': 'fibre_channel', - 'data': data} - - def _get_initiator_names(self, connector): - return [wwpn if ':' in wwpn else - ':'.join(wwpn[i:i + 2] for i in range(0, len(wwpn), 2)) - for wwpn in connector['wwpns']] - - def _get_ig_name(self, connector): - return connector['host'] diff --git a/cinder/volume/drivers/disco/__init__.py b/cinder/volume/drivers/disco/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/disco/disco.py b/cinder/volume/drivers/disco/disco.py deleted file mode 100644 index 8414324da..000000000 --- a/cinder/volume/drivers/disco/disco.py +++ /dev/null @@ -1,634 +0,0 @@ -# copyright (c) 2016 Industrial Technology Research Institute. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""DISCO Block device Driver.""" - -import os -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units -import six -from suds import client - -from cinder import context -from cinder.db.sqlalchemy import api -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.disco import disco_api -from cinder.volume.drivers.disco import disco_attach_detach - - -LOG = logging.getLogger(__name__) - -disco_opts = [ - cfg.IPOpt('disco_client', - default='127.0.0.1', - help='The IP of DMS client socket server', - deprecated_group='DEFAULT'), - cfg.PortOpt('disco_client_port', - default='9898', - help='The port to connect DMS client socket server', - deprecated_group='DEFAULT'), - cfg.StrOpt('disco_wsdl_path', - default='/etc/cinder/DISCOService.wsdl', - deprecated_for_removal=True, - help='Path to the wsdl file ' - 'to communicate with DISCO request manager'), - cfg.IPOpt('disco_rest_ip', - help='The IP address of the REST server', - deprecated_name='rest_ip', deprecated_group='DEFAULT'), - cfg.StrOpt('disco_choice_client', - help='Use soap client or rest client for communicating ' - 'with DISCO. Possible values are "soap" or ' - '"rest".', choices=['soap', 'rest'], - deprecated_name='choice_client', deprecated_group='DEFAULT'), - cfg.PortOpt('disco_src_api_port', - default='8080', - help='The port of DISCO source API', - deprecated_group='DEFAULT'), - cfg.StrOpt('disco_volume_name_prefix', - default='openstack-', - help='Prefix before volume name to differentiate ' - 'DISCO volume created through openstack ' - 'and the other ones', - deprecated_name='volume_name_prefix'), - cfg.IntOpt('disco_snapshot_check_timeout', - default=3600, - help='How long we check whether a snapshot ' - 'is finished before we give up', - deprecated_name='snapshot_check_timeout'), - cfg.IntOpt('disco_restore_check_timeout', - default=3600, - help='How long we check whether a restore ' - 'is finished before we give up', - deprecated_name='restore_check_timeout'), - cfg.IntOpt('disco_clone_check_timeout', - default=3600, - help='How long we check whether a clone ' - 'is finished before we give up', - deprecated_name='clone_check_timeout'), - cfg.IntOpt('disco_retry_interval', - default=1, - help='How long we wait before retrying to ' - 'get an item detail', - deprecated_name='retry_interval') -] - -DISCO_CODE_MAPPING = { - 'request.success': 1, - 'request.ongoing': 2, - 'request.failure': 3, -} - -CONF = cfg.CONF -CONF.register_opts(disco_opts, group=configuration.SHARED_CONF_GROUP) - - -# Driver to communicate with DISCO storage solution -@interface.volumedriver -class DiscoDriver(driver.VolumeDriver): - """Execute commands related to DISCO Volumes. - - .. code:: text - - Version history: - 1.0 - disco volume driver using SOAP - 1.1 - disco volume driver using REST and only compatible - with version greater than disco-1.6.4 - - """ - - VERSION = "1.1" - CI_WIKI_NAME = "ITRI_DISCO_CI" - - def __init__(self, *args, **kwargs): - """Init Disco driver : get configuration, create client.""" - super(DiscoDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(disco_opts) - self.ctxt = context.get_admin_context() - self.attach_detach_volume = ( - disco_attach_detach.AttachDetachDiscoVolume(self.configuration)) - - def do_setup(self, context): - """Create client for DISCO request manager.""" - LOG.debug("Enter in DiscoDriver do_setup.") - if self.configuration.disco_choice_client.lower() == "rest": - self.client = disco_api.DiscoApi( - self.configuration.disco_rest_ip, - self.configuration.disco_src_api_port) - else: - path = ''.join(['file:', self.configuration.disco_wsdl_path]) - init_client = client.Client(path, cache=None) - self.client = init_client.service - - def check_for_setup_error(self): - """Make sure we have the pre-requisites.""" - if (not self.configuration.disco_rest_ip and - self.configuration.disco_choice_client.lower() == "rest"): - msg = _("Could not find the IP address of the REST server.") - raise exception.VolumeBackendAPIException(data=msg) - else: - path = self.configuration.disco_wsdl_path - if not os.path.exists(path): - msg = _("Could not find DISCO wsdl file.") - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume(self, volume): - """Create a disco volume.""" - name = self.configuration.disco_volume_name_prefix, volume["id"] - vol_name = ''.join(name) - vol_size = volume['size'] * units.Ki - LOG.debug("Create volume : [name] %(vname)s - [size] %(vsize)s.", - {'vname': vol_name, 'vsize': six.text_type(vol_size)}) - reply = self.client.volumeCreate(vol_name, vol_size) - status = reply['status'] - result = reply['result'] - LOG.debug("Create volume : [status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error while creating volume " - "[status] %(stat)s - [result] %(res)s.") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug("Volume %s created.", volume["name"]) - return {'provider_location': result} - - def delete_volume(self, volume): - """Delete a logical volume.""" - disco_vol_id = volume['provider_location'] - LOG.debug("Delete disco volume : %s.", disco_vol_id) - reply = self.client.volumeDelete(disco_vol_id) - status = reply['status'] - result = reply['result'] - - LOG.debug("Delete volume [status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error while deleting volume " - "[status] %(stat)s - [result] %(res)s.") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("Volume %s deleted.", volume['name']) - - def create_snapshot(self, snapshot): - """Create a disco snapshot.""" - volume = api.volume_get(self.ctxt, snapshot['volume_id']) - description = snapshot['display_description'] - vol_id = volume['provider_location'] - LOG.debug("Create snapshot of volume : %(id)s, " - "description : %(desc)s.", - {'id': vol_id, 'desc': description}) - - # Trigger an asynchronous local snapshot - reply = self.client.snapshotCreate(vol_id, - -1, -1, - description) - status = reply['status'] - result = reply['result'] - LOG.debug("Create snapshot : [status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error while creating snapshot " - "[status] %(stat)s - [result] %(res)s.") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Monitor the status until it becomes either success or fail - params = {'snapshot_id': int(result)} - start_time = int(time.time()) - snapshot_request = DISCOCheck(self.client, - params, - start_time, - "snapshot_detail", - self.configuration) - timeout = self.configuration.disco_snapshot_check_timeout - snapshot_request._monitor_request(timeout) - - snapshot['provider_location'] = result - LOG.debug("snapshot taken successfully on volume : %(volume)s.", - {'volume': volume['name']}) - return {'provider_location': result} - - def delete_snapshot(self, snapshot): - """Delete a disco snapshot.""" - LOG.debug("Enter in delete a disco snapshot.") - - snap_id = snapshot['provider_location'] - LOG.debug("[start] Delete snapshot : %s.", snap_id) - reply = self.client.snapshotDelete(snap_id) - status = reply['status'] - result = reply['result'] - LOG.debug("[End] Delete snapshot : " - "[status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error while deleting snapshot " - "[status] %(stat)s - [result] %(res)s") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - name = self.configuration.disco_volume_name_prefix, volume['id'] - snap_id = snapshot['provider_location'] - vol_name = ''.join(name) - # Trigger an asynchronous restore operation - LOG.debug("[start] Create volume from snapshot : " - "%(snap_id)s - name : %(vol_name)s.", - {'snap_id': snap_id, 'vol_name': vol_name}) - reply = self.client.restoreFromSnapshot(snap_id, vol_name, -1, None, - -1) - status = reply['status'] - result = reply['result'] - LOG.debug("Restore volume from snapshot " - "[status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error[%(stat)s - %(res)s] while restoring snapshot " - "[%(snap_id)s] into volume [%(vol)s].") % - {'stat': six.text_type(status), 'res': result, - 'snap_id': snap_id, 'vol': vol_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Monitor the status until it becomes - # either success, fail or timeout - params = {'restore_id': int(result)} - start_time = int(time.time()) - restore_request = DISCOCheck(self.client, - params, - start_time, - "restore_detail", - self.configuration) - timeout = self.configuration.disco_restore_check_timeout - restore_request._monitor_request(timeout) - reply = self.client.volumeDetailByName(vol_name) - status = reply['status'] - new_vol_id = reply['volumeInfoResult']['volumeId'] - - if status: - msg = (_("Error[status] %(stat)s - [result] %(res)s] " - "while getting volume id.") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug("Restore done [status] %(stat)s - " - "[volume id] %(vol_id)s.", - {'stat': status, 'vol_id': six.text_type(new_vol_id)}) - return {'provider_location': new_vol_id} - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume.""" - LOG.debug("Creating clone of volume: %s.", src_vref['id']) - name = self.configuration.disco_volume_name_prefix, volume['id'] - vol_name = ''.join(name) - vol_size = volume['size'] * units.Ki - src_vol_id = src_vref['provider_location'] - LOG.debug("Clone volume : " - "[name] %(name)s - [source] %(source)s - [size] %(size)s.", - {'name': vol_name, - 'source': src_vol_id, - 'size': six.text_type(vol_size)}) - reply = self.client.volumeClone(src_vol_id, vol_name) - status = reply['status'] - result = reply['result'] - LOG.debug("Clone volume : [status] %(stat)s - [result] %(res)s.", - {'stat': six.text_type(status), 'res': result}) - - if status: - msg = (_("Error while creating volume " - "[status] %(stat)s - [result] %(res)s.") % - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Monitor the status until it becomes - # either success, fail or timeout - params = {'clone_id': int(result), - 'vol_name': vol_name} - start_time = int(time.time()) - clone_request = DISCOCheck(self.client, - params, - start_time, - "clone_detail", - self.configuration) - clone_request._monitor_request( - self.configuration.disco_clone_check_timeout) - reply = self.client.volumeDetailByName(vol_name) - status = reply['status'] - new_vol_id = reply['volumeInfoResult']['volumeId'] - - if status: - msg = (_("Error[%(stat)s - %(res)s] " - "while getting volume id."), - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("clone done : " - "[status] %(stat)s - [volume id] %(vol_id)s.", - {'stat': status, 'vol_id': six.text_type(new_vol_id)}) - return {'provider_location': new_vol_id} - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - LOG.debug("Enter in copy image to volume for disco.") - - try: - attach_detach_volume = ( - disco_attach_detach.AttachDetachDiscoVolume( - self.configuration)) - device_info = attach_detach_volume._attach_volume(volume) - image_utils.fetch_to_raw(context, - image_service, - image_id, - device_info['path'], - self.configuration.volume_dd_blocksize, - size=volume['size']) - finally: - attach_detach_volume._detach_volume(volume) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy a volume to a new image.""" - LOG.debug("Enter in copy image to volume for disco.") - try: - attach_detach_volume = ( - disco_attach_detach.AttachDetachDiscoVolume( - self.configuration)) - device_info = attach_detach_volume._attach_volume(volume) - image_utils.upload_volume(context, - image_service, - image_meta, - device_info['path']) - finally: - attach_detach_volume._detach_volume(volume) - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - vol_id = volume['provider_location'] - LOG.debug("Extends volume : %(id)s, new size : %(size)s.", - {'id': vol_id, 'size': new_size}) - new_size_mb = new_size * units.Ki - reply = self.client.volumeExtend(vol_id, new_size_mb) - status = reply['status'] - result = reply['result'] - if status: - msg = (_("Error while extending volume " - "[status] %(stat)s - [result] %(res)s."), - {'stat': six.text_type(status), 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug("Volume extended : [id] %(vid)s - " - "[status] %(stat)s - [result] %(res)s.", - {'vid': vol_id, - 'stat': six.text_type(status), - 'res': result}) - - def initialize_connection(self, volume, connector): - """Function called before attaching a volume.""" - LOG.debug("Enter in initialize connection with disco, " - "connector is %s.", connector) - cp = self.attach_detach_volume._get_connection_properties(volume) - data = { - 'driver_volume_type': 'disco', - 'data': cp - } - LOG.debug("Initialize connection [data]: %s.", data) - return data - - def terminate_connection(self, volume, connector, **kwargs): - """Function called after attaching a volume.""" - LOG.debug("Enter in terminate connection with disco.") - - def _update_volume_stats(self): - LOG.debug("Enter in update volume stats.") - stats = {} - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = backend_name or 'disco' - stats['storage_protocol'] = 'disco' - stats['driver_version'] = self.VERSION - stats['reserved_percentage'] = 0 - stats['vendor_name'] = 'ITRI' - stats['QoS_support'] = False - - try: - reply = self.client.systemInformationList() - status = reply['status'] - - if status: - msg = (_("Error while getting " - "disco information [%s].") % - six.text_type(status)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - info_list = reply['propertyListResult']['PropertyInfoList'] - for info in info_list: - if info['name'] == 'freeCapacityGB': - stats['free_capacity_gb'] = float(info['value']) - elif info['name'] == 'totalCapacityGB': - stats['total_capacity_gb'] = float(info['value']) - except Exception: - stats['total_capacity_gb'] = 'unknown' - stats['free_capacity_gb'] = 'unknown' - - self._stats = stats - - def get_volume_stats(self, refresh=False): - """Get backend information.""" - if refresh: - self._update_volume_stats() - return self._stats - - def local_path(self, volume): - """Return the path to the DISCO volume.""" - return "/dev/dms%s" % volume['name'] - - def manage_existing(self, volume, existing_ref): - """Manage an existing volume.""" - if 'source-name' not in existing_ref and'source-id'not in existing_ref: - msg = _("No source-id/source-name in existing_ref") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - elif 'source-name' not in existing_ref: - src_vol = self.client.volumeDetail( - existing_ref['source-id']) - if src_vol['status']: - vol_id = existing_ref['source-id'] - msg = (_("Error while getting volume details, " - "[status] %(stat)s - [volume id] %(vol_id)s") % - {'stat': six.text_type(src_vol['status']), - 'vol_id': vol_id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return {'display_name': src_vol['volumeInfoResult']['volumeName'], - 'provider_location': existing_ref['source-id']} - else: - src_vol = self.client.volumeDetailByName( - existing_ref['source-name']) - if src_vol['status']: - vol_name = existing_ref['source-name'] - msg = (_("Error while getting volume details with the name " - "%(name)s: [status] %(stat)s") % {'name': vol_name, - 'stat': src_vol['status']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return { - 'provider_location': src_vol['volumeInfoResult']['volumeId']} - - def unmanage(self, volume): - """Unmanage an existing volume.""" - LOG.debug("unmanage is called", resource=volume) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of an existing volume.""" - if 'source-name' not in existing_ref and'source-id'not in existing_ref: - msg = _("No source-id/source-name in existing_ref") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - elif 'source-name' not in existing_ref: - src_vol = self.client.volumeDetail( - existing_ref['source-id']) - if src_vol['status']: - vol_id = existing_ref['source-id'] - msg = (_("Error while getting volume details, " - "[status] %(stat)s - [volume id] %(vol_id)s") % - {'stat': six.text_type(src_vol['status']), - 'vol_id': vol_id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return src_vol['volumeInfoResult']['volSizeMb'] - else: - src_vol = self.client.volumeDetailByName( - existing_ref['source-name']) - if src_vol['status']: - vol_name = existing_ref['source-name'] - msg = (_("Error while getting volume details with the name " - "%(name)s: [status] %(stat)s") % {'name': vol_name, - 'stat': src_vol['status']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return src_vol['volumeInfoResult']['volSizeMb'] - - def ensure_export(self, context, volume): - """Ensure an export.""" - pass - - def create_export(self, context, volume, connector): - """Export the volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a logical volume.""" - pass - - -class DISCOCheck(object): - """Used to monitor DISCO operations.""" - - def __init__(self, client, param, start_time, function, configuration): - """Init some variables for checking some requests done in DISCO.""" - self.start_time = start_time - self.function = function - self.client = client - self.param = param - self.configuration = configuration - - def is_timeout(self, start_time, timeout): - """Check whether we reach the timeout.""" - current_time = int(time.time()) - return current_time - start_time > timeout - - def _retry_get_detail(self, start_time, timeout, operation, params): - """Keep trying to query an item detail unless we reach the timeout.""" - reply = self._call_api(operation, params) - status = reply['status'] - msg = (_("Error while getting %(op)s details, " - "returned code: %(status)s.") % - {'op': operation, 'status': six.text_type(status)}) - if status: - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - item_status = self._get_item_status(operation, reply) - if item_status == DISCO_CODE_MAPPING['request.failure']: - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - elif item_status == DISCO_CODE_MAPPING['request.success']: - raise loopingcall.LoopingCallDone(retvalue=reply) - elif self.is_timeout(start_time, timeout): - msg = (_("Timeout while calling %s ") % operation) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _call_api(self, operation, params): - """Make the call to the SOAP api.""" - if operation == 'snapshot_detail': - return self.client.snapshotDetail(params['snapshot_id']) - if operation == 'restore_detail': - return self.client.restoreDetail(params['restore_id']) - if operation == 'clone_detail': - return self.client.cloneDetail(params['clone_id'], - params['vol_name']) - else: - msg = (_("Unknown operation %s."), operation) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _get_item_status(self, operation, reply): - """Make the call to the SOAP api.""" - if reply is None: - msg = (_("Call returned a None object")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - elif operation == 'snapshot_detail': - return reply['snapshotInfoResult']['status'] - elif operation == 'restore_detail': - return reply['restoreInfoResult']['status'] - elif operation == 'clone_detail': - return int(reply['result']) - else: - msg = (_("Unknown operation " - "%s."), operation) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _monitor_request(self, timeout): - """Monitor the request.""" - timer = loopingcall.FixedIntervalLoopingCall( - self._retry_get_detail, - self.start_time, - timeout, - self.function, - self.param) - timer.start(interval=self.configuration.disco_retry_interval).wait() diff --git a/cinder/volume/drivers/disco/disco_api.py b/cinder/volume/drivers/disco/disco_api.py deleted file mode 100644 index 7678b7df2..000000000 --- a/cinder/volume/drivers/disco/disco_api.py +++ /dev/null @@ -1,165 +0,0 @@ -# copyright (c) 2016 Industrial Technology Research Institute. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""DISCO Backup Service Implementation.""" - -import json - -import requests -import six - - -class DiscoApi(object): - """Class for all the requests to Disco API.""" - - def __init__(self, ip, port): - """Init client.""" - # Rest related variables - self.req_headers = {'Content-type': 'application/json'} - prefix_vars = {'server_ip': ip, - 'server_port': port, - 'api_prefix': 'RM-REST-Server/disco'} - self.request_prefix = ("http://%(server_ip)s:%(server_port)s" - "/%(api_prefix)s") % prefix_vars - self.prefix_var = {'req_prefix': self.request_prefix} - - def volumeCreate(self, volume_name, size): - """Create a DISCO volume.""" - params = {'volumeName': volume_name, 'volumeSize': size, - 'backupPolicyId': -1} - data = json.dumps(params, - sort_keys=True, - indent=4, - separators=(',', ': ')) - request = ("%(req_prefix)s/volume" % self.prefix_var) - r = requests.post(request, data, headers=self.req_headers) - return r.json() - - def volumeDelete(self, volume_id): - """Delete the temporary volume.""" - request_vars = {'req_prefix': self.request_prefix, - 'volume_id': six.text_type(volume_id)} - request = ("%(req_prefix)s/volume/%(volume_id)s") % request_vars - r = requests.delete(request) - return r.json() - - def volumeExtend(self, vol_id, size): - """Extend DISCO volume.""" - params = {'volumeSize': six.text_type(size), - 'volumeId': six.text_type(vol_id)} - data = json.dumps(params, - sort_keys=True, - indent=4, - separators=(',', ': ')) - request = ("%(req_prefix)s/volume/extend" % self.prefix_var) - r = requests.put(request, data, headers=self.req_headers) - return r.json() - - def volumeDetail(self, volume_id): - """Get volume information of the destination DISCO volume.""" - request_vars = {'req_prefix': self.request_prefix, - 'vol_id': six.text_type(volume_id)} - request = ("%(req_prefix)s/volume/%(vol_id)s") % request_vars - r = requests.get(request) - volume_info = r.json() - return volume_info - - def volumeDetailByName(self, volume_name): - """Get volume information of the DISCO volume.""" - request_vars = {'req_prefix': self.request_prefix, - 'volume_name': six.text_type(volume_name)} - request = ("%(req_prefix)s/volume?name=%(volume_name)s") % request_vars - r = requests.get(request) - return r.json() - - def volumeClone(self, volume_id, volume_name): - """Clone a DISCO volume.""" - params = {'volumeName': volume_name, 'volumeId': volume_id} - data = json.dumps(params, - sort_keys=True, - indent=4, - separators=(',', ': ')) - request = ("%(req_prefix)s/clone" % self.prefix_var) - r = requests.post(request, data, headers=self.req_headers) - return r.json() - - def cloneDetail(self, clone_id, clone_name): - """Get detail of the clone.""" - request_vars = {'req_prefix': self.request_prefix, - 'clone_name': clone_name, - 'clone_id': six.text_type(clone_id)} - request = ("%(req_prefix)s/clone?cloneId=%(clone_id)s&" - "name=%(clone_name)s") % request_vars - r = requests.get(request) - return r.json() - - def snapshotCreate(self, disco_volume_id, reserve_days, zone_id=None, - description=None): - """Take a snapshot of the volume.""" - params = {'volumeId': disco_volume_id, - 'reserveDays': reserve_days, - 'description': description} - data = json.dumps(params, sort_keys=True, indent=4, - separators=(',', ': ')) - - request = ("%(req_prefix)s/snapshot" % self.prefix_var) - r = requests.post(request, data, headers=self.req_headers) - return r.json() - - def snapshotDelete(self, snapshot_id): - """Delete a snapshot.""" - request_vars = {'req_prefix': self.request_prefix, - 'snapshot_id': six.text_type(snapshot_id)} - request = ("%(req_prefix)s/snapshot/%(snapshot_id)s") % request_vars - r = requests.delete(request) - return r.json() - - def snapshotDetail(self, snapshot_id): - """Monitor end of the snapshot.""" - request_vars = {'req_prefix': self.request_prefix, - 'snapshot_id': snapshot_id} - request = ("%(req_prefix)s/snapshot/%(snapshot_id)s") % request_vars - r = requests.get(request) - return r.json() - - def restoreFromSnapshot(self, snapshot_id, volume_name, zone_id, - description, volume_id): - """restore a snapshot of into a volume.""" - params = {'snapshotId': snapshot_id, - 'volumeName': volume_name, - 'zone_id': zone_id, - 'description': "local restore snapshot", - 'volumeId': volume_id} - data = json.dumps(params, - sort_keys=True, - indent=4, - separators=(',', ': ')) - request = ("%(req_prefix)s/restore" % self.prefix_var) - r = requests.post(request, data, headers=self.req_headers) - return r.json() - - def restoreDetail(self, restore_id): - """Monitor end of the restore.""" - request_vars = {'req_prefix': self.request_prefix, - 'restore_id': restore_id} - request = ("%(req_prefix)s/restore/%(restore_id)s") % request_vars - r = requests.get(request) - return r.json() - - def systemInformationList(self): - """Get the list of the system information.""" - request = ("%(req_prefix)s/systemInformationList") % self.prefix_var - r = requests.get(request) - return r.json() diff --git a/cinder/volume/drivers/disco/disco_attach_detach.py b/cinder/volume/drivers/disco/disco_attach_detach.py deleted file mode 100644 index e4252bd01..000000000 --- a/cinder/volume/drivers/disco/disco_attach_detach.py +++ /dev/null @@ -1,68 +0,0 @@ -# copyright (c) 2016 Industrial Technology Research Institute. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Class for DISCO to attach and detach volume.""" - -from os_brick.initiator import connector -from oslo_log import log as logging - -from cinder import utils - - -LOG = logging.getLogger(__name__) - - -class AttachDetachDiscoVolume(object): - """Class for attach and detach a DISCO volume.""" - - def __init__(self, configuration): - """Init volume attachment class.""" - self.configuration = configuration - self.connector = connector.InitiatorConnector.factory( - self._get_connector_identifier(), utils.get_root_helper(), - device_scan_attempts=( - self.configuration.num_volume_device_scan_tries) - ) - self.connection_conf = {} - self.connection_conf['server_ip'] = self.configuration.disco_client - self.connection_conf['server_port'] = ( - self.configuration.disco_client_port) - - self.connection_properties = {} - self.connection_properties['name'] = None - self.connection_properties['disco_id'] = None - self.connection_properties['conf'] = self.connection_conf - - def _get_connection_properties(self, volume): - """Return a dictionnary with the connection properties.""" - connection_properties = dict(self.connection_properties) - connection_properties['name'] = volume['name'] - connection_properties['disco_id'] = volume['provider_location'] - return connection_properties - - def _get_connector_identifier(self): - """Return connector identifier, put here to mock it in unit tests.""" - return connector.DISCO - - def _attach_volume(self, volume): - """Call the connector.connect_volume().""" - connection_properties = self._get_connection_properties(volume) - device_info = self.connector.connect_volume(connection_properties) - return device_info - - def _detach_volume(self, volume): - """Call the connector.disconnect_volume().""" - connection_properties = self._get_connection_properties(volume) - self.connector.disconnect_volume(connection_properties, volume) diff --git a/cinder/volume/drivers/dothill/__init__.py b/cinder/volume/drivers/dothill/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/dothill/dothill_client.py b/cinder/volume/drivers/dothill/dothill_client.py deleted file mode 100644 index c9ba1b97d..000000000 --- a/cinder/volume/drivers/dothill/dothill_client.py +++ /dev/null @@ -1,613 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import hashlib -import math -import time - -from lxml import etree -from oslo_log import log as logging -from oslo_utils import units -import requests -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -LOG = logging.getLogger(__name__) - - -class DotHillClient(object): - def __init__(self, host, login, password, protocol, ssl_verify): - self._mgmt_ip_addrs = list(map(str.strip, host.split(','))) - self._login = login - self._password = password - self._protocol = protocol - self._session_key = None - self.ssl_verify = ssl_verify - self._set_host(self._mgmt_ip_addrs[0]) - self._fw = '' - self._luns_in_use_by_host = {} - - def _set_host(self, ip_addr): - self._curr_ip_addr = ip_addr - self._base_url = "%s://%s/api" % (self._protocol, ip_addr) - - def _get_auth_token(self, xml): - """Parse an XML authentication reply to extract the session key.""" - self._session_key = None - try: - tree = etree.XML(xml) - if (tree.findtext(".//PROPERTY[@name='response-type']") == - "success"): - self._session_key = ( - tree.findtext(".//PROPERTY[@name='response']")) - except Exception as e: - msg = _("Cannot parse session key: %s") % e.msg - raise exception.DotHillConnectionError(message=msg) - - def login(self): - if self._session_key is None: - return self.session_login() - - def session_login(self): - """Authenticates the service on the device. - - Tries all the IP addrs listed in the san_ip parameter - until a working one is found or the list is exhausted. - """ - - try: - self._get_session_key() - self.get_firmware_version() - LOG.debug("Logged in to array at %s (session %s)", - self._base_url, self._session_key) - return - except exception.DotHillConnectionError: - not_responding = self._curr_ip_addr - LOG.exception('session_login failed to connect to %s', - self._curr_ip_addr) - # Loop through the remaining management addresses - # to find one that's up. - for host in self._mgmt_ip_addrs: - if host is not_responding: - continue - self._set_host(host) - try: - self._get_session_key() - return - except exception.DotHillConnectionError: - LOG.error('Failed to connect to %s', - self._curr_ip_addr) - continue - raise exception.DotHillConnectionError( - message=_("Failed to log in to management controller")) - - @utils.synchronized(__name__, external=True) - def _get_session_key(self): - """Retrieve a session key from the array.""" - - self._session_key = None - hash_ = "%s_%s" % (self._login, self._password) - if six.PY3: - hash_ = hash_.encode('utf-8') - hash_ = hashlib.md5(hash_) - digest = hash_.hexdigest() - - url = self._base_url + "/login/" + digest - try: - xml = requests.get(url, verify=self.ssl_verify, timeout=30) - except requests.exceptions.RequestException: - msg = _("Failed to obtain MC session key") - LOG.exception(msg) - raise exception.DotHillConnectionError(message=msg) - - self._get_auth_token(xml.text.encode('utf8')) - LOG.debug("session key = %s", self._session_key) - if self._session_key is None: - raise exception.DotHillAuthenticationError - - def _assert_response_ok(self, tree): - """Parses the XML returned by the device to check the return code. - - Raises a DotHillRequestError error if the return code is not 0 - or if the return code is None. - """ - # Get the return code for the operation, raising an exception - # if it is not present. - return_code = tree.findtext(".//PROPERTY[@name='return-code']") - if not return_code: - raise exception.DotHillRequestError(message="No status found") - - # If no error occurred, just return. - if return_code == '0': - return - - # Format a message for the status code. - msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"), - return_code) - - raise exception.DotHillRequestError(message=msg) - - def _build_request_url(self, path, *args, **kargs): - url = self._base_url + path - if kargs: - url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v) - for (k, v) in kargs.items()]) - if args: - url += '/' + '/'.join(args) - - return url - - def _request(self, path, *args, **kargs): - """Performs an API request on the array, with retry. - - Propagates a DotHillConnectionError if no valid response is - received from the array, e.g. if the network is down. - - Propagates a DotHillRequestError if the device returned a response - but the status is not 0. The device error message will be used - in the exception message. - - If the status is OK, returns the XML data for further processing. - """ - tries_left = 2 - while tries_left > 0: - try: - return self._api_request(path, *args, **kargs) - except exception.DotHillConnectionError as e: - if tries_left < 1: - LOG.error("Array Connection error: " - "%s (no more retries)", e.msg) - raise - # Retry on any network connection errors, SSL errors, etc - LOG.error("Array Connection error: %s (retrying)", e.msg) - except exception.DotHillRequestError as e: - if tries_left < 1: - LOG.error("Array Request error: %s (no more retries)", - e.msg) - raise - # Retry specific errors which may succeed if we log in again - # -10027 => The user is not recognized on this system. - if '(-10027)' in e.msg: - LOG.error("Array Request error: %s (retrying)", e.msg) - else: - raise - - tries_left -= 1 - self.session_login() - - @utils.synchronized(__name__, external=True) - def _api_request(self, path, *args, **kargs): - """Performs an HTTP request on the device, with locking. - - Raises a DotHillRequestError if the device returned but the status is - not 0. The device error message will be used in the exception message. - - If the status is OK, returns the XML data for further processing. - """ - url = self._build_request_url(path, *args, **kargs) - LOG.debug("Array Request URL: %s (session %s)", - url, self._session_key) - headers = {'dataType': 'api', 'sessionKey': self._session_key} - try: - xml = requests.get(url, headers=headers, - verify=self.ssl_verify, timeout=60) - tree = etree.XML(xml.text.encode('utf8')) - except Exception as e: - message = _("Exception handling URL %(url)s: %(msg)s") % { - 'url': url, 'msg': e} - raise exception.DotHillConnectionError(message=message) - - if path == "/show/volumecopy-status": - return tree - self._assert_response_ok(tree) - return tree - - def logout(self): - pass - - def session_logout(self): - url = self._base_url + '/exit' - try: - requests.get(url, verify=self.ssl_verify, timeout=30) - return True - except Exception: - return False - - def is_titanium(self): - """True if array is an older generation.""" - return True if len(self._fw) > 0 and self._fw[0] == 'T' else False - - def create_volume(self, name, size, backend_name, backend_type): - # NOTE: size is in this format: [0-9]+GiB - path_dict = {'size': size} - if backend_type == "linear": - path_dict['vdisk'] = backend_name - else: - path_dict['pool'] = backend_name - - try: - self._request("/create/volume", name, **path_dict) - except exception.DotHillRequestError as e: - # -10186 => The specified name is already in use. - # This can occur during controller failover. - if '(-10186)' in e.msg: - LOG.warning("Ignoring error in create volume: %s", e.msg) - return None - raise - - return None - - def delete_volume(self, name): - try: - self._request("/delete/volumes", name) - except exception.DotHillRequestError as e: - # -10075 => The specified volume was not found. - # This can occur during controller failover. - if '(-10075)' in e.msg: - LOG.warning("Ignorning error while deleting %(volume)s:" - " %(reason)s", - {'volume': name, 'reason': e.msg}) - return - raise - - def extend_volume(self, name, added_size): - self._request("/expand/volume", name, size=added_size) - - def create_snapshot(self, volume_name, snap_name): - try: - self._request("/create/snapshots", snap_name, volumes=volume_name) - except exception.DotHillRequestError as e: - # -10186 => The specified name is already in use. - # This can occur during controller failover. - if '(-10186)' in e.msg: - LOG.warning("Ignoring error attempting to create snapshot:" - " %s", e.msg) - return None - - def delete_snapshot(self, snap_name): - try: - self._request("/delete/snapshot", "cleanup", snap_name) - except exception.DotHillRequestError as e: - # -10050 => The volume was not found on this system. - # This can occur during controller failover. - if '(-10050)' in e.msg: - LOG.warning("Ignoring unmap error -10050: %s", e.msg) - return None - raise - - def backend_exists(self, backend_name, backend_type): - try: - if backend_type == "linear": - path = "/show/vdisks" - else: - path = "/show/pools" - self._request(path, backend_name) - return True - except exception.DotHillRequestError: - return False - - def _get_size(self, size): - return int(math.ceil(float(size) * 512 / (units.G))) - - def backend_stats(self, backend_name, backend_type): - stats = {'free_capacity_gb': 0, - 'total_capacity_gb': 0} - prop_list = [] - if backend_type == "linear": - path = "/show/vdisks" - prop_list = ["size-numeric", "freespace-numeric"] - else: - path = "/show/pools" - prop_list = ["total-size-numeric", "total-avail-numeric"] - tree = self._request(path, backend_name) - - size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0]) - if size: - stats['total_capacity_gb'] = self._get_size(size) - - size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1]) - if size: - stats['free_capacity_gb'] = self._get_size(size) - return stats - - def list_luns_for_host(self, host): - tree = self._request("/show/host-maps", host) - return [int(prop.text) for prop in tree.xpath( - "//PROPERTY[@name='lun']")] - - def _get_first_available_lun_for_host(self, host): - """Find next available LUN number. - - Returns a lun number greater than 0 which is not known to be in - use between the array and the specified host. - """ - luns = self.list_luns_for_host(host) - self._luns_in_use_by_host[host] = luns - lun = 1 - while True: - if lun not in luns: - return lun - lun += 1 - - def _get_next_available_lun_for_host(self, host, after=0): - # host can be a comma-separated list of WWPNs; we only use the first. - firsthost = host.split(',')[0] - LOG.debug('get_next_available_lun: host=%s, firsthost=%s, after=%d', - host, firsthost, after) - if after == 0: - return self._get_first_available_lun_for_host(firsthost) - luns = self._luns_in_use_by_host[firsthost] - lun = after + 1 - while lun < 1024: - LOG.debug('get_next_available_lun: host=%s, trying lun %d', - firsthost, lun) - if lun not in luns: - LOG.debug('get_next_available_lun: host=%s, RETURNING lun %d', - firsthost, lun) - return lun - lun += 1 - raise exception.DotHillRequestError( - message=_("No LUNs available for mapping to host %s.") % host) - - @utils.synchronized(__name__ + '.map_volume', external=True) - def map_volume(self, volume_name, connector, connector_element): - if connector_element == 'wwpns': - lun = self._get_first_available_lun_for_host(connector['wwpns'][0]) - host = ",".join(connector['wwpns']) - else: - host = connector['initiator'] - host_status = self._check_host(host) - if host_status != 0: - hostname = self._safe_hostname(connector['host']) - try: - self._request("/create/host", hostname, id=host) - except exception.DotHillRequestError as e: - # -10058: The host identifier or nickname is already in use - if '(-10058)' in e.msg: - LOG.error("While trying to create host nickname" - " %(nickname)s: %(error_msg)s", - {'nickname': hostname, - 'error_msg': e.msg}) - else: - raise - lun = self._get_first_available_lun_for_host(host) - - while lun < 255: - try: - self._request("/map/volume", - volume_name, - lun=str(lun), - host=host, - access="rw") - return lun - except exception.DotHillRequestError as e: - # -3177 => "The specified LUN overlaps a previously defined LUN - if '(-3177)' in e.msg: - LOG.info("Unable to map volume" - " %(volume_name)s to lun %(lun)d:" - " %(reason)s", - {'volume_name': volume_name, - 'lun': lun, 'reason': e.msg}) - lun = self._get_next_available_lun_for_host(host, - after=lun) - continue - raise - except Exception as e: - LOG.error("Error while mapping volume" - " %(volume_name)s to lun %(lun)d:", - {'volume_name': volume_name, 'lun': lun}, - e) - raise - - raise exception.DotHillRequestError( - message=_("Failed to find a free LUN for host %s") % host) - - def unmap_volume(self, volume_name, connector, connector_element): - if connector_element == 'wwpns': - host = ",".join(connector['wwpns']) - else: - host = connector['initiator'] - try: - self._request("/unmap/volume", volume_name, host=host) - except exception.DotHillRequestError as e: - # -10050 => The volume was not found on this system. - # This can occur during controller failover. - if '(-10050)' in e.msg: - LOG.warning("Ignoring unmap error -10050: %s", e.msg) - return None - raise - - def get_active_target_ports(self): - ports = [] - tree = self._request("/show/ports") - - for obj in tree.xpath("//OBJECT[@basetype='port']"): - port = {prop.get('name'): prop.text - for prop in obj.iter("PROPERTY") - if prop.get('name') in - ["port-type", "target-id", "status"]} - if port['status'] == 'Up': - ports.append(port) - return ports - - def get_active_fc_target_ports(self): - return [port['target-id'] for port in self.get_active_target_ports() - if port['port-type'] == "FC"] - - def get_active_iscsi_target_iqns(self): - return [port['target-id'] for port in self.get_active_target_ports() - if port['port-type'] == "iSCSI"] - - def linear_copy_volume(self, src_name, dest_name, dest_bknd_name): - """Copy a linear volume.""" - - self._request("/volumecopy", - dest_name, - dest_vdisk=dest_bknd_name, - source_volume=src_name, - prompt='yes') - - # The copy has started; now monitor until the operation completes. - count = 0 - while True: - tree = self._request("/show/volumecopy-status") - return_code = tree.findtext(".//PROPERTY[@name='return-code']") - - if return_code == '0': - status = tree.findtext(".//PROPERTY[@name='progress']") - progress = False - if status: - progress = True - LOG.debug("Volume copy is in progress: %s", status) - if not progress: - LOG.debug("Volume copy completed: %s", status) - break - else: - if count >= 5: - LOG.error('Error in copying volume: %s', src_name) - raise exception.DotHillRequestError - - time.sleep(1) - count += 1 - - time.sleep(5) - - def copy_volume(self, src_name, dest_name, dest_bknd_name, - backend_type='virtual'): - """Copy a linear or virtual volume.""" - - if backend_type == 'linear': - return self.linear_copy_volume(src_name, dest_name, dest_bknd_name) - # Copy a virtual volume to another in the same pool. - self._request("/copy/volume", src_name, name=dest_name) - LOG.debug("Volume copy of source_volume: %(src_name)s to " - "destination_volume: %(dest_name)s started.", - {'src_name': src_name, 'dest_name': dest_name, }) - - # Loop until this volume copy is no longer in progress. - while self.volume_copy_in_progress(src_name): - time.sleep(5) - - # Once the copy operation is finished, check to ensure that - # the volume was not deleted because of a subsequent error. An - # exception will be raised if the named volume is not present. - self._request("/show/volumes", dest_name) - LOG.debug("Volume copy of source_volume: %(src_name)s to " - "destination_volume: %(dest_name)s completed.", - {'src_name': src_name, 'dest_name': dest_name, }) - - def volume_copy_in_progress(self, src_name): - """Check if a volume copy is in progress for the named volume.""" - - # 'show volume-copies' always succeeds, even if none in progress. - tree = self._request("/show/volume-copies") - - # Find 0 or 1 job(s) with source volume we're interested in - q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name - joblist = tree.xpath(q) - if len(joblist) == 0: - return False - LOG.debug("Volume copy of volume: %(src_name)s is " - "%(pc)s percent completed.", - {'src_name': src_name, - 'pc': joblist[0].findtext("PROPERTY[@name='progress']"), }) - return True - - def _check_host(self, host): - host_status = -1 - tree = self._request("/show/hosts") - for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']" - % host): - host_status = 0 - return host_status - - def _safe_hostname(self, hostname): - """Modify an initiator name to match firmware requirements. - - Initiator name cannot include certain characters and cannot exceed - 15 bytes in 'T' firmware (31 bytes in 'G' firmware). - """ - for ch in [',', '"', '\\', '<', '>']: - if ch in hostname: - hostname = hostname.replace(ch, '') - hostname = hostname.replace('.', '_') - name_limit = 15 if self.is_titanium() else 31 - index = len(hostname) - if index > name_limit: - index = name_limit - return hostname[:index] - - def get_active_iscsi_target_portals(self): - # This function returns {'ip': status,} - portals = {} - prop = 'ip-address' - tree = self._request("/show/ports") - for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"): - prop = 'primary-ip-address' - break - iscsi_ips = [ip.text for ip in tree.xpath( - "//PROPERTY[@name='%s']" % prop)] - if not iscsi_ips: - return portals - for index, port_type in enumerate(tree.xpath( - "//PROPERTY[@name='port-type' and text()='iSCSI']")): - status = port_type.getparent().findtext("PROPERTY[@name='status']") - if status == 'Up': - portals[iscsi_ips[index]] = status - return portals - - def get_chap_record(self, initiator_name): - tree = self._request("/show/chap-records") - for prop in tree.xpath("//PROPERTY[@name='initiator-name' and " - "text()='%s']" % initiator_name): - chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator" - "-secret']") - return chap_secret - - def create_chap_record(self, initiator_name, chap_secret): - self._request("/create/chap-record", - name=initiator_name, - secret=chap_secret) - - def get_serial_number(self): - tree = self._request("/show/system") - return tree.findtext(".//PROPERTY[@name='midplane-serial-number']") - - def get_owner_info(self, backend_name, backend_type): - if backend_type == 'linear': - tree = self._request("/show/vdisks", backend_name) - else: - tree = self._request("/show/pools", backend_name) - - return tree.findtext(".//PROPERTY[@name='owner']") - - def modify_volume_name(self, old_name, new_name): - self._request("/set/volume", old_name, name=new_name) - - def get_volume_size(self, volume_name): - tree = self._request("/show/volumes", volume_name) - size = tree.findtext(".//PROPERTY[@name='size-numeric']") - return self._get_size(size) - - def get_firmware_version(self): - tree = self._request("/show/controllers") - self._fw = tree.xpath("//PROPERTY[@name='sc-fw']")[0].text - LOG.debug("Array firmware is %s\n", self._fw) - return self._fw diff --git a/cinder/volume/drivers/dothill/dothill_common.py b/cinder/volume/drivers/dothill/dothill_common.py deleted file mode 100644 index bc1c44c1d..000000000 --- a/cinder/volume/drivers/dothill/dothill_common.py +++ /dev/null @@ -1,509 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Volume driver common utilities for DotHill Storage array -""" - -import base64 -import six -import uuid - -from oslo_config import cfg -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume.drivers.dothill import dothill_client as dothill - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class DotHillCommon(object): - VERSION = "1.6" - - stats = {} - - def __init__(self, config): - self.config = config - self.vendor_name = "DotHill" - self.backend_name = self.config.dothill_backend_name - self.backend_type = self.config.dothill_backend_type - self.api_protocol = self.config.dothill_api_protocol - ssl_verify = False - if (self.api_protocol == 'https' and - self.config.dothill_verify_certificate): - ssl_verify = self.config.dothill_verify_certificate_path or True - self.client = dothill.DotHillClient(self.config.san_ip, - self.config.san_login, - self.config.san_password, - self.api_protocol, - ssl_verify) - - def get_version(self): - return self.VERSION - - def do_setup(self, context): - self.client_login() - self._validate_backend() - self._get_owner_info() - self._get_serial_number() - self.client_logout() - - def client_login(self): - try: - self.client.login() - except exception.DotHillConnectionError as ex: - msg = _("Failed to connect to %(vendor_name)s Array %(host)s: " - "%(err)s") % {'vendor_name': self.vendor_name, - 'host': self.config.san_ip, - 'err': six.text_type(ex)} - LOG.error(msg) - raise exception.DotHillConnectionError(message=msg) - except exception.DotHillAuthenticationError: - msg = _("Failed to log on %s Array " - "(invalid login?).") % self.vendor_name - LOG.error(msg) - raise exception.DotHillAuthenticationError(message=msg) - - def _get_serial_number(self): - self.serialNumber = self.client.get_serial_number() - - def _get_owner_info(self): - self.owner = self.client.get_owner_info(self.backend_name, - self.backend_type) - - def _validate_backend(self): - if not self.client.backend_exists(self.backend_name, - self.backend_type): - self.client_logout() - raise exception.DotHillInvalidBackend(backend=self.backend_name) - - def client_logout(self): - self.client.logout() - - def _get_vol_name(self, volume_id): - volume_name = self._encode_name(volume_id) - return "v%s" % volume_name - - def _get_snap_name(self, snapshot_id): - snapshot_name = self._encode_name(snapshot_id) - return "s%s" % snapshot_name - - def _encode_name(self, name): - """Get converted DotHill volume name. - - Converts the openstack volume id from - fceec30e-98bc-4ce5-85ff-d7309cc17cc2 - to - v_O7DDpi8TOWF_9cwnMF - We convert the 128(32*4) bits of the uuid into a 24 characters long - base64 encoded string. This still exceeds the limit of 20 characters - in some models so we return 19 characters because the - _get_{vol,snap}_name functions prepend a character. - """ - uuid_str = name.replace("-", "") - vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) - vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes) - if six.PY3: - vol_encoded = vol_encoded.decode('ascii') - return vol_encoded[:19] - - def check_flags(self, options, required_flags): - for flag in required_flags: - if not getattr(options, flag, None): - msg = _('%s configuration option is not set.') % flag - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - def create_volume(self, volume): - self.client_login() - # Use base64 to encode the volume name (UUID is too long for DotHill) - volume_name = self._get_vol_name(volume['id']) - volume_size = "%dGiB" % volume['size'] - LOG.debug("Create Volume having display_name: %(display_name)s " - "name: %(name)s id: %(id)s size: %(size)s", - {'display_name': volume['display_name'], - 'name': volume['name'], - 'id': volume_name, - 'size': volume_size, }) - try: - self.client.create_volume(volume_name, - volume_size, - self.backend_name, - self.backend_type) - except exception.DotHillRequestError as ex: - LOG.exception("Creation of volume %s failed.", volume['id']) - raise exception.Invalid(ex) - - finally: - self.client_logout() - - def _assert_enough_space_for_copy(self, volume_size): - """The DotHill creates a snap pool before trying to copy the volume. - - The pool is 5.27GB or 20% of the volume size, whichever is larger. - Verify that we have enough space for the pool and then copy - """ - pool_size = max(volume_size * 0.2, 5.27) - required_size = pool_size + volume_size - - if required_size > self.stats['pools'][0]['free_capacity_gb']: - raise exception.DotHillNotEnoughSpace(backend=self.backend_name) - - def _assert_source_detached(self, volume): - """The DotHill requires a volume to be dettached to clone it. - - Make sure that the volume is not in use when trying to copy it. - """ - if (volume['status'] != "available" or - volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED): - LOG.error("Volume must be detached for clone operation.") - raise exception.VolumeAttached(volume_id=volume['id']) - - def create_cloned_volume(self, volume, src_vref): - self.get_volume_stats(True) - self._assert_enough_space_for_copy(volume['size']) - self._assert_source_detached(src_vref) - LOG.debug("Cloning Volume %(source_id)s to (%(dest_id)s)", - {'source_id': src_vref['id'], - 'dest_id': volume['id'], }) - - if src_vref['name_id']: - orig_name = self._get_vol_name(src_vref['name_id']) - else: - orig_name = self._get_vol_name(src_vref['id']) - dest_name = self._get_vol_name(volume['id']) - - self.client_login() - try: - self.client.copy_volume(orig_name, dest_name, - self.backend_name, self.backend_type) - except exception.DotHillRequestError as ex: - LOG.exception("Cloning of volume %s failed.", - src_vref['id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - if volume['size'] > src_vref['size']: - self.extend_volume(volume, volume['size']) - - def create_volume_from_snapshot(self, volume, snapshot): - self.get_volume_stats(True) - self._assert_enough_space_for_copy(volume['size']) - LOG.debug("Creating Volume from snapshot %(source_id)s to " - "(%(dest_id)s)", {'source_id': snapshot['id'], - 'dest_id': volume['id'], }) - - orig_name = self._get_snap_name(snapshot['id']) - dest_name = self._get_vol_name(volume['id']) - self.client_login() - try: - self.client.copy_volume(orig_name, dest_name, - self.backend_name, self.backend_type) - except exception.DotHillRequestError as ex: - LOG.exception("Create volume failed from snapshot: %s", - snapshot['id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - if volume['size'] > snapshot['volume_size']: - self.extend_volume(volume, volume['size']) - - def delete_volume(self, volume): - LOG.debug("Deleting Volume: %s", volume['id']) - if volume['name_id']: - volume_name = self._get_vol_name(volume['name_id']) - else: - volume_name = self._get_vol_name(volume['id']) - - self.client_login() - try: - self.client.delete_volume(volume_name) - except exception.DotHillRequestError as ex: - # if the volume wasn't found, ignore the error - if 'The volume was not found on this system.' in ex.args: - return - LOG.exception("Deletion of volume %s failed.", volume['id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def get_volume_stats(self, refresh): - if refresh: - self.client_login() - try: - self._update_volume_stats() - finally: - self.client_logout() - return self.stats - - def _update_volume_stats(self): - # storage_protocol and volume_backend_name are - # set in the child classes - stats = {'driver_version': self.VERSION, - 'storage_protocol': None, - 'vendor_name': self.vendor_name, - 'volume_backend_name': None, - 'pools': []} - - pool = {'QoS_support': False} - try: - src_type = "%sVolumeDriver" % self.vendor_name - backend_stats = self.client.backend_stats(self.backend_name, - self.backend_type) - pool.update(backend_stats) - pool['location_info'] = ('%s:%s:%s:%s' % - (src_type, - self.serialNumber, - self.backend_name, - self.owner)) - pool['pool_name'] = self.backend_name - except exception.DotHillRequestError: - err = (_("Unable to get stats for backend_name: %s") % - self.backend_name) - LOG.exception(err) - raise exception.Invalid(reason=err) - - stats['pools'].append(pool) - self.stats = stats - - def _assert_connector_ok(self, connector, connector_element): - if not connector[connector_element]: - msg = _("Connector does not provide: %s") % connector_element - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - def map_volume(self, volume, connector, connector_element): - self._assert_connector_ok(connector, connector_element) - if volume['name_id']: - volume_name = self._get_vol_name(volume['name_id']) - else: - volume_name = self._get_vol_name(volume['id']) - try: - data = self.client.map_volume(volume_name, - connector, - connector_element) - return data - except exception.DotHillRequestError as ex: - LOG.exception("Error mapping volume: %s", volume_name) - raise exception.Invalid(ex) - - def unmap_volume(self, volume, connector, connector_element): - self._assert_connector_ok(connector, connector_element) - if volume['name_id']: - volume_name = self._get_vol_name(volume['name_id']) - else: - volume_name = self._get_vol_name(volume['id']) - - self.client_login() - try: - self.client.unmap_volume(volume_name, - connector, - connector_element) - except exception.DotHillRequestError as ex: - LOG.exception("Error unmapping volume: %s", volume_name) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def get_active_fc_target_ports(self): - try: - return self.client.get_active_fc_target_ports() - except exception.DotHillRequestError as ex: - LOG.exception("Error getting active FC target ports.") - raise exception.Invalid(ex) - - def get_active_iscsi_target_iqns(self): - try: - return self.client.get_active_iscsi_target_iqns() - except exception.DotHillRequestError as ex: - LOG.exception("Error getting active ISCSI target iqns.") - raise exception.Invalid(ex) - - def get_active_iscsi_target_portals(self): - try: - return self.client.get_active_iscsi_target_portals() - except exception.DotHillRequestError as ex: - LOG.exception("Error getting active ISCSI target portals.") - raise exception.Invalid(ex) - - def create_snapshot(self, snapshot): - LOG.debug("Creating snapshot (%(snap_id)s) from %(volume_id)s)", - {'snap_id': snapshot['id'], - 'volume_id': snapshot['volume_id'], }) - if snapshot['volume']['name_id']: - vol_name = self._get_vol_name(snapshot['volume']['name_id']) - else: - vol_name = self._get_vol_name(snapshot['volume_id']) - snap_name = self._get_snap_name(snapshot['id']) - - self.client_login() - try: - self.client.create_snapshot(vol_name, snap_name) - except exception.DotHillRequestError as ex: - LOG.exception("Creation of snapshot failed for volume: %s", - snapshot['volume_id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def delete_snapshot(self, snapshot): - snap_name = self._get_snap_name(snapshot['id']) - LOG.debug("Deleting snapshot (%s)", snapshot['id']) - - self.client_login() - try: - self.client.delete_snapshot(snap_name) - except exception.DotHillRequestError as ex: - # if the volume wasn't found, ignore the error - if 'The volume was not found on this system.' in ex.args: - return - LOG.exception("Deleting snapshot %s failed", snapshot['id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def extend_volume(self, volume, new_size): - if volume['name_id']: - volume_name = self._get_vol_name(volume['name_id']) - else: - volume_name = self._get_vol_name(volume['id']) - old_size = self.client.get_volume_size(volume_name) - growth_size = int(new_size) - old_size - LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to " - "%(new_size)s, by %(growth_size)s GiB.", - {'volume_name': volume_name, - 'old_size': old_size, - 'new_size': new_size, - 'growth_size': growth_size, }) - if growth_size < 1: - return - self.client_login() - try: - self.client.extend_volume(volume_name, "%dGiB" % growth_size) - except exception.DotHillRequestError as ex: - LOG.exception("Extension of volume %s failed.", volume['id']) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def get_chap_record(self, initiator_name): - try: - return self.client.get_chap_record(initiator_name) - except exception.DotHillRequestError as ex: - LOG.exception("Error getting chap record.") - raise exception.Invalid(ex) - - def create_chap_record(self, initiator_name, chap_secret): - try: - self.client.create_chap_record(initiator_name, chap_secret) - except exception.DotHillRequestError as ex: - LOG.exception("Error creating chap record.") - raise exception.Invalid(ex) - - def migrate_volume(self, volume, host): - """Migrate directly if source and dest are managed by same storage. - - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - :returns: (False, None) if the driver does not support migration, - (True, None) if successful - - """ - false_ret = (False, None) - if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: - return false_ret - if 'location_info' not in host['capabilities']: - return false_ret - info = host['capabilities']['location_info'] - try: - (dest_type, dest_id, - dest_back_name, dest_owner) = info.split(':') - except ValueError: - return false_ret - - if not (dest_type == 'DotHillVolumeDriver' and - dest_id == self.serialNumber and - dest_owner == self.owner): - return false_ret - if volume['name_id']: - source_name = self._get_vol_name(volume['name_id']) - else: - source_name = self._get_vol_name(volume['id']) - # DotHill Array does not support duplicate names - dest_name = "m%s" % source_name[1:] - - self.client_login() - try: - self.client.copy_volume(source_name, dest_name, - dest_back_name, self.backend_type) - self.client.delete_volume(source_name) - self.client.modify_volume_name(dest_name, source_name) - return (True, None) - except exception.DotHillRequestError as ex: - LOG.exception("Error migrating volume: %s", source_name) - raise exception.Invalid(ex) - finally: - self.client_logout() - - def retype(self, volume, new_type, diff, host): - ret = self.migrate_volume(volume, host) - return ret[0] - - def manage_existing(self, volume, existing_ref): - """Manage an existing non-openstack DotHill volume - - existing_ref is a dictionary of the form: - {'source-name': } - """ - target_vol_name = existing_ref['source-name'] - modify_target_vol_name = self._get_vol_name(volume['id']) - - self.client_login() - try: - self.client.modify_volume_name(target_vol_name, - modify_target_vol_name) - except exception.DotHillRequestError as ex: - LOG.exception("Error manage existing volume.") - raise exception.Invalid(ex) - finally: - self.client_logout() - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - target_vol_name = existing_ref['source-name'] - - self.client_login() - try: - size = self.client.get_volume_size(target_vol_name) - return size - except exception.DotHillRequestError as ex: - LOG.exception("Error manage existing get volume size.") - raise exception.Invalid(ex) - finally: - self.client_logout() diff --git a/cinder/volume/drivers/dothill/dothill_fc.py b/cinder/volume/drivers/dothill/dothill_fc.py deleted file mode 100644 index 0f0b96da5..000000000 --- a/cinder/volume/drivers/dothill/dothill_fc.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import exception -from cinder import interface -import cinder.volume.driver -from cinder.volume.drivers.dothill import dothill_common -from cinder.volume.drivers.san import san -from cinder.zonemanager import utils as fczm_utils - - -# As of Pike, the DotHill driver is no longer considered supported, -# but the code remains as it is still subclassed by other drivers. -# The __init__() function prevents any direct instantiation. -@interface.volumedriver -class DotHillFCDriver(cinder.volume.driver.FibreChannelDriver): - """OpenStack Fibre Channel cinder drivers for DotHill Arrays. - - .. code:: text - - Version history: - 0.1 - Base version developed for HPMSA FC drivers: - "https://github.com/openstack/cinder/tree/stable/juno/ - cinder/volume/drivers/san/hp" - 1.0 - Version developed for DotHill arrays with the following - modifications: - - added support for v3 API(virtual pool feature) - - added support for retype volume - - added support for manage/unmanage volume - - added initiator target mapping in FC zoning - - added https support - 1.6 - Add management path redundancy and reduce load placed - on management controller. - 1.7 - Modified so it can't be invoked except as a superclass - - """ - - def __init__(self, *args, **kwargs): - # Make sure we're not invoked directly - if type(self) == DotHillFCDriver: - raise exception.DotHillDriverNotSupported - super(DotHillFCDriver, self).__init__(*args, **kwargs) - self.common = None - self.configuration.append_config_values(dothill_common.common_opts) - self.configuration.append_config_values(san.san_opts) - self.lookup_service = fczm_utils.create_lookup_service() - - def _init_common(self): - return dothill_common.DotHillCommon(self.configuration) - - def _check_flags(self): - required_flags = ['san_ip', 'san_login', 'san_password'] - self.common.check_flags(self.configuration, required_flags) - - def do_setup(self, context): - self.common = self._init_common() - self._check_flags() - self.common.do_setup(context) - - def check_for_setup_error(self): - self._check_flags() - - def create_volume(self, volume): - self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, src_vref): - self.common.create_volume_from_snapshot(volume, src_vref) - - def create_cloned_volume(self, volume, src_vref): - self.common.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.common.delete_volume(volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - self.common.client_login() - try: - data = {} - data['target_lun'] = self.common.map_volume(volume, - connector, - 'wwpns') - - ports, init_targ_map = self.get_init_targ_map(connector) - data['target_discovered'] = True - data['target_wwn'] = ports - data['initiator_target_map'] = init_targ_map - info = {'driver_volume_type': 'fibre_channel', - 'data': data} - return info - finally: - self.common.client_logout() - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - info = {'driver_volume_type': 'fibre_channel', 'data': {}} - try: - self.common.unmap_volume(volume, connector, 'wwpns') - if not self.common.client.list_luns_for_host( - connector['wwpns'][0]): - ports, init_targ_map = self.get_init_targ_map(connector) - info['data'] = {'target_wwn': ports, - 'initiator_target_map': init_targ_map} - finally: - return info - - def get_init_targ_map(self, connector): - init_targ_map = {} - target_wwns = [] - ports = self.common.get_active_fc_target_ports() - if self.lookup_service is not None: - dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], - ports) - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - target_wwns = list(set(target_wwns)) - else: - initiator_wwns = connector['wwpns'] - target_wwns = ports - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map - - def get_volume_stats(self, refresh=False): - stats = self.common.get_volume_stats(refresh) - stats['storage_protocol'] = 'FC' - stats['driver_version'] = self.VERSION - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return stats - - def create_export(self, context, volume, connector=None): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def create_snapshot(self, snapshot): - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.common.delete_snapshot(snapshot) - - def extend_volume(self, volume, new_size): - self.common.extend_volume(volume, new_size) - - def retype(self, context, volume, new_type, diff, host): - return self.common.retype(volume, new_type, diff, host) - - def manage_existing(self, volume, existing_ref): - self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - pass diff --git a/cinder/volume/drivers/dothill/dothill_iscsi.py b/cinder/volume/drivers/dothill/dothill_iscsi.py deleted file mode 100644 index d6dc0cea0..000000000 --- a/cinder/volume/drivers/dothill/dothill_iscsi.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -import cinder.volume.driver -from cinder.volume.drivers.dothill import dothill_common as dothillcommon -from cinder.volume.drivers.san import san - - -DEFAULT_ISCSI_PORT = "3260" -LOG = logging.getLogger(__name__) - - -# As of Pike, the DotHill driver is no longer considered supported, -# but the code remains as it is still subclassed by other drivers. -# The __init__() function prevents any direct instantiation. -@interface.volumedriver -class DotHillISCSIDriver(cinder.volume.driver.ISCSIDriver): - """OpenStack iSCSI cinder drivers for DotHill Arrays. - - .. code:: text - - Version history: - 0.1 - Base structure for DotHill iSCSI drivers based on HPMSA FC - drivers: - "https://github.com/openstack/cinder/tree/stable/juno/ - cinder/volume/drivers/san/hp" - 1.0 - Version developed for DotHill arrays with the following - modifications: - - added iSCSI support - - added CHAP support in iSCSI - - added support for v3 API(virtual pool feature) - - added support for retype volume - - added support for manage/unmanage volume - - added https support - 1.6 - Add management path redundancy and reduce load placed - on management controller. - 1.7 - Modified so it can't be invoked except as a superclass - - """ - - def __init__(self, *args, **kwargs): - # Make sure we're not invoked directly - if type(self) == DotHillISCSIDriver: - raise exception.DotHillDriverNotSupported - super(DotHillISCSIDriver, self).__init__(*args, **kwargs) - self.common = None - self.configuration.append_config_values(dothillcommon.common_opts) - self.configuration.append_config_values(dothillcommon.iscsi_opts) - self.configuration.append_config_values(san.san_opts) - self.iscsi_ips = self.configuration.dothill_iscsi_ips - - def _init_common(self): - return dothillcommon.DotHillCommon(self.configuration) - - def _check_flags(self): - required_flags = ['san_ip', 'san_login', 'san_password'] - self.common.check_flags(self.configuration, required_flags) - - def do_setup(self, context): - self.common = self._init_common() - self._check_flags() - self.common.do_setup(context) - self.initialize_iscsi_ports() - - def initialize_iscsi_ports(self): - iscsi_ips = [] - if self.iscsi_ips: - for ip_addr in self.iscsi_ips: - ip = ip_addr.split(':') - if len(ip) == 1: - iscsi_ips.append([ip_addr, DEFAULT_ISCSI_PORT]) - elif len(ip) == 2: - iscsi_ips.append([ip[0], ip[1]]) - else: - msg = _("Invalid IP address format: '%s'") % ip_addr - LOG.error(msg) - raise exception.InvalidInput(reason=(msg)) - self.iscsi_ips = iscsi_ips - else: - msg = _('At least one valid iSCSI IP address must be set.') - LOG.error(msg) - raise exception.InvalidInput(reason=(msg)) - - def check_for_setup_error(self): - self._check_flags() - - def create_volume(self, volume): - self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, src_vref): - self.common.create_volume_from_snapshot(volume, src_vref) - - def create_cloned_volume(self, volume, src_vref): - self.common.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.common.delete_volume(volume) - - def initialize_connection(self, volume, connector): - self.common.client_login() - try: - data = {} - data['target_lun'] = self.common.map_volume(volume, - connector, - 'initiator') - iqns = self.common.get_active_iscsi_target_iqns() - data['target_discovered'] = True - data['target_iqn'] = iqns[0] - iscsi_portals = self.common.get_active_iscsi_target_portals() - - for ip_port in self.iscsi_ips: - if (ip_port[0] in iscsi_portals): - data['target_portal'] = ":".join(ip_port) - break - - if 'target_portal' not in data: - raise exception.DotHillNotTargetPortal() - - if self.configuration.use_chap_auth: - chap_secret = self.common.get_chap_record( - connector['initiator'] - ) - if not chap_secret: - chap_secret = self.create_chap_record( - connector['initiator'] - ) - data['auth_password'] = chap_secret - data['auth_username'] = connector['initiator'] - data['auth_method'] = 'CHAP' - - info = {'driver_volume_type': 'iscsi', - 'data': data} - return info - finally: - self.common.client_logout() - - def terminate_connection(self, volume, connector, **kwargs): - if type(connector) == dict and 'initiator' in connector: - self.common.unmap_volume(volume, connector, 'initiator') - - def get_volume_stats(self, refresh=False): - stats = self.common.get_volume_stats(refresh) - stats['storage_protocol'] = 'iSCSI' - stats['driver_version'] = self.VERSION - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return stats - - def create_export(self, context, volume, connector=None): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def create_snapshot(self, snapshot): - self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.common.delete_snapshot(snapshot) - - def extend_volume(self, volume, new_size): - self.common.extend_volume(volume, new_size) - - def create_chap_record(self, initiator_name): - chap_secret = self.configuration.chap_password - # Chap secret length should be 12 to 16 characters - if 12 <= len(chap_secret) <= 16: - self.common.create_chap_record(initiator_name, chap_secret) - else: - msg = _('CHAP secret should be 12-16 bytes.') - LOG.error(msg) - raise exception.InvalidInput(reason=(msg)) - return chap_secret - - def retype(self, context, volume, new_type, diff, host): - return self.common.retype(volume, new_type, diff, host) - - def manage_existing(self, volume, existing_ref): - self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - pass diff --git a/cinder/volume/drivers/drbdmanagedrv.py b/cinder/volume/drivers/drbdmanagedrv.py deleted file mode 100644 index 650ff55e2..000000000 --- a/cinder/volume/drivers/drbdmanagedrv.py +++ /dev/null @@ -1,1070 +0,0 @@ -# Copyright (c) 2014 LINBIT HA Solutions GmbH -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" - -This driver connects Cinder to an installed DRBDmanage instance, see -http://drbd.linbit.com/users-guide-9.0/ch-openstack.html -for more details. - -""" - - -import eventlet -import json -import six -import socket -import time -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import units - - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver - -try: - import dbus - import drbdmanage.consts as dm_const - import drbdmanage.exceptions as dm_exc - import drbdmanage.utils as dm_utils -except ImportError: - # Used for the tests, when no DRBDmanage is installed - dbus = None - dm_const = None - dm_exc = None - dm_utils = None - - -LOG = logging.getLogger(__name__) - -drbd_opts = [ - cfg.IntOpt('drbdmanage_redundancy', - default=1, - help='Number of nodes that should replicate the data.'), - cfg.StrOpt('drbdmanage_resource_policy', - default='{"ratio": "0.51", "timeout": "60"}', - help='Resource deployment completion wait policy.'), - cfg.StrOpt('drbdmanage_disk_options', - default='{"c-min-rate": "4M"}', - help='Disk options to set on new resources. ' - 'See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf' - ' for all the details.'), - cfg.StrOpt('drbdmanage_net_options', - default='{"connect-int": "4", "allow-two-primaries": "yes", ' - '"ko-count": "30", "max-buffers": "20000", ' - '"ping-timeout": "100"}', - help='Net options to set on new resources. ' - 'See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf' - ' for all the details.'), - cfg.StrOpt('drbdmanage_resource_options', - default='{"auto-promote-timeout": "300"}', - help='Resource options to set on new resources. ' - 'See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf' - ' for all the details.'), - cfg.StrOpt('drbdmanage_snapshot_policy', - default='{"count": "1", "timeout": "60"}', - help='Snapshot completion wait policy.'), - cfg.StrOpt('drbdmanage_resize_policy', - default='{"timeout": "60"}', - help='Volume resize completion wait policy.'), - cfg.StrOpt('drbdmanage_resource_plugin', - default="drbdmanage.plugins.plugins.wait_for.WaitForResource", - help='Resource deployment completion wait plugin.'), - cfg.StrOpt('drbdmanage_snapshot_plugin', - default="drbdmanage.plugins.plugins.wait_for.WaitForSnapshot", - help='Snapshot completion wait plugin.'), - cfg.StrOpt('drbdmanage_resize_plugin', - default="drbdmanage.plugins.plugins.wait_for.WaitForVolumeSize", - help='Volume resize completion wait plugin.'), - cfg.BoolOpt('drbdmanage_devs_on_controller', - default=True, - help='''If set, the c-vol node will receive a useable - /dev/drbdX device, even if the actual data is stored on - other nodes only. - This is useful for debugging, maintenance, and to be - able to do the iSCSI export from the c-vol node.''') - # TODO(PM): offsite_redundancy? - # TODO(PM): choose DRBDmanage storage pool? -] - - -CONF = cfg.CONF -CONF.register_opts(drbd_opts, group=configuration.SHARED_CONF_GROUP) - - -AUX_PROP_CINDER_VOL_ID = "cinder-id" -AUX_PROP_TEMP_CLIENT = "cinder-is-temp-client" -DM_VN_PREFIX = 'CV_' # sadly 2CV isn't allowed by DRBDmanage -DM_SN_PREFIX = 'SN_' - - -# Need to be set later, so that the tests can fake -CS_DEPLOYED = None -CS_DISKLESS = None -CS_UPD_CON = None - - -class DrbdManageBaseDriver(driver.VolumeDriver): - """Cinder driver that uses DRBDmanage for storage.""" - - VERSION = '1.1.0' - drbdmanage_dbus_name = 'org.drbd.drbdmanaged' - drbdmanage_dbus_interface = '/interface' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - - def __init__(self, *args, **kwargs): - self.empty_list = dbus.Array([], signature="a(s)") - self.empty_dict = dbus.Array([], signature="a(ss)") - - super(DrbdManageBaseDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(drbd_opts) - if not self.drbdmanage_dbus_name: - self.drbdmanage_dbus_name = 'org.drbd.drbdmanaged' - if not self.drbdmanage_dbus_interface: - self.drbdmanage_dbus_interface = '/interface' - self.drbdmanage_redundancy = int(getattr(self.configuration, - 'drbdmanage_redundancy', 1)) - self.drbdmanage_devs_on_controller = bool( - getattr(self.configuration, - 'drbdmanage_devs_on_controller', - True)) - self.dm_control_vol = ".drbdctrl" - - self.backend_name = self.configuration.safe_get( - 'volume_backend_name') or 'drbdmanage' - - js_decoder = json.JSONDecoder() - self.policy_resource = js_decoder.decode( - self.configuration.safe_get('drbdmanage_resource_policy')) - self.policy_snapshot = js_decoder.decode( - self.configuration.safe_get('drbdmanage_snapshot_policy')) - self.policy_resize = js_decoder.decode( - self.configuration.safe_get('drbdmanage_resize_policy')) - - self.resource_options = js_decoder.decode( - self.configuration.safe_get('drbdmanage_resource_options')) - self.net_options = js_decoder.decode( - self.configuration.safe_get('drbdmanage_net_options')) - self.disk_options = js_decoder.decode( - self.configuration.safe_get('drbdmanage_disk_options')) - - self.plugin_resource = self.configuration.safe_get( - 'drbdmanage_resource_plugin') - self.plugin_snapshot = self.configuration.safe_get( - 'drbdmanage_snapshot_plugin') - self.plugin_resize = self.configuration.safe_get( - 'drbdmanage_resize_plugin') - - # needed as per pep8: - # F841 local variable 'CS_DEPLOYED' is assigned to but never used - global CS_DEPLOYED, CS_DISKLESS, CS_UPD_CON - CS_DEPLOYED = dm_const.CSTATE_PREFIX + dm_const.FLAG_DEPLOY - CS_DISKLESS = dm_const.CSTATE_PREFIX + dm_const.FLAG_DISKLESS - CS_UPD_CON = dm_const.CSTATE_PREFIX + dm_const.FLAG_UPD_CON - - def dbus_connect(self): - self.odm = dbus.SystemBus().get_object(self.drbdmanage_dbus_name, - self.drbdmanage_dbus_interface) - self.odm.ping() - - def call_or_reconnect(self, fn, *args): - """Call DBUS function; on a disconnect try once to reconnect.""" - try: - return fn(*args) - except dbus.DBusException as e: - LOG.warning("Got disconnected; trying to reconnect. (%s)", e) - self.dbus_connect() - # Old function object is invalid, get new one. - return getattr(self.odm, fn._method_name)(*args) - - def _fetch_answer_data(self, res, key, level=None, req=True): - for code, fmt, data in res: - if code == dm_exc.DM_INFO: - if level and level != fmt: - continue - - value = [v for k, v in data if k == key] - if value: - if len(value) == 1: - return value[0] - else: - return value - - if req: - if level: - l = level + ":" + key - else: - l = key - - msg = _('DRBDmanage driver error: expected key "%s" ' - 'not in answer, wrong DRBDmanage version?') % l - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - return None - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(DrbdManageBaseDriver, self).do_setup(context) - self.dbus_connect() - - def check_for_setup_error(self): - """Verify that requirements are in place to use DRBDmanage driver.""" - if not all((dbus, dm_exc, dm_const, dm_utils)): - msg = _('DRBDmanage driver setup error: some required ' - 'libraries (dbus, drbdmanage.*) not found.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - if self.odm.ping() != 0: - message = _('Cannot ping DRBDmanage backend') - raise exception.VolumeBackendAPIException(data=message) - - def _clean_uuid(self): - """Returns a UUID string, WITHOUT braces.""" - # Some uuid library versions put braces around the result!? - # We don't want them, just a plain [0-9a-f-]+ string. - id = str(uuid.uuid4()) - id = id.replace("{", "") - id = id.replace("}", "") - return id - - def _check_result(self, res, ignore=None, ret=0): - seen_success = False - seen_error = False - result = ret - for (code, fmt, arg_l) in res: - # convert from DBUS to Python - arg = dict(arg_l) - if ignore and code in ignore: - if not result: - result = code - continue - if code == dm_exc.DM_SUCCESS: - seen_success = True - continue - if code == dm_exc.DM_INFO: - continue - seen_error = _("Received error string: %s") % (fmt % arg) - - if seen_error: - raise exception.VolumeBackendAPIException(data=seen_error) - if seen_success: - return ret - # by default okay - or the ignored error code. - return ret - - # DRBDmanage works in kiB units; Cinder uses GiB. - def _vol_size_to_dm(self, size): - return int(size * units.Gi / units.Ki) - - def _vol_size_to_cinder(self, size): - return int(size * units.Ki / units.Gi) - - def is_clean_volume_name(self, name, prefix): - try: - if (name.startswith(CONF.volume_name_template % "") and - uuid.UUID(name[7:]) is not None): - return prefix + name[7:] - except ValueError: - return None - - try: - if uuid.UUID(name) is not None: - return prefix + name - except ValueError: - return None - - def _call_policy_plugin(self, plugin, pol_base, pol_this): - """Returns True for done, False for timeout.""" - - pol_inp_data = dict(pol_base) - pol_inp_data.update(pol_this, - starttime=str(time.time())) - - retry = 0 - while True: - res, pol_result = self.call_or_reconnect( - self.odm.run_external_plugin, - plugin, - pol_inp_data) - self._check_result(res) - - if pol_result['result'] == dm_const.BOOL_TRUE: - return True - - if pol_result['timeout'] == dm_const.BOOL_TRUE: - return False - - eventlet.sleep(min(0.5 + retry / 5, 2)) - retry += 1 - - def _wait_for_node_assignment(self, res_name, vol_nr, nodenames, - filter_props=None, timeout=90, - check_vol_deployed=True): - """Return True as soon as one assignment matches the filter.""" - - # TODO(LINBIT): unify with policy plugins - - if not filter_props: - filter_props = self.empty_dict - - end_time = time.time() + timeout - - retry = 0 - while time.time() < end_time: - res, assgs = self.call_or_reconnect(self.odm.list_assignments, - nodenames, [res_name], 0, - filter_props, self.empty_list) - self._check_result(res) - - if len(assgs) > 0: - for assg in assgs: - vols = assg[3] - - for v_nr, v_prop in vols: - if (v_nr == vol_nr): - if not check_vol_deployed: - # no need to check - return True - - if v_prop[CS_DEPLOYED] == dm_const.BOOL_TRUE: - return True - - retry += 1 - # Not yet - LOG.warning('Try #%(try)d: Volume "%(res)s"/%(vol)d ' - 'not yet deployed on "%(host)s", waiting.', - {'try': retry, 'host': nodenames, - 'res': res_name, 'vol': vol_nr}) - - eventlet.sleep(min(0.5 + retry / 5, 2)) - - # Timeout - return False - - def _priv_hash_from_volume(self, volume): - return dm_utils.dict_to_aux_props({ - AUX_PROP_CINDER_VOL_ID: volume['id'], - }) - - def snapshot_name_from_cinder_snapshot(self, snapshot): - sn_name = self.is_clean_volume_name(snapshot['id'], DM_SN_PREFIX) - return sn_name - - def _res_and_vl_data_for_volume(self, volume, empty_ok=False): - """Find DRBD resource and volume ID. - - A DRBD resource might consist of several "volumes" - (think consistency groups). - So we have to find the number of the volume within one resource. - Returns resource name, volume number, and resource - and volume properties. - """ - - # If we get a string, use it as-is. - # Else it's a dictionary; then get the ID. - if isinstance(volume, six.string_types): - v_uuid = volume - else: - v_uuid = volume['id'] - - res, rl = self.call_or_reconnect(self.odm.list_volumes, - self.empty_dict, - 0, - dm_utils.dict_to_aux_props( - {AUX_PROP_CINDER_VOL_ID: v_uuid}), - self.empty_dict) - self._check_result(res) - - if (not rl) or (len(rl) == 0): - if empty_ok: - LOG.debug("No volume %s found.", v_uuid) - return None, None, None, None - raise exception.VolumeBackendAPIException( - data=_("volume %s not found in drbdmanage") % v_uuid) - if len(rl) > 1: - raise exception.VolumeBackendAPIException( - data=_("multiple resources with name %s found by drbdmanage") % - v_uuid) - - (r_name, r_props, vols) = rl[0] - if len(vols) != 1: - raise exception.VolumeBackendAPIException( - data=_("not exactly one volume with id %s") % - v_uuid) - - (v_nr, v_props) = vols[0] - - LOG.debug("volume %(uuid)s is %(res)s/%(nr)d; %(rprop)s, %(vprop)s", - {'uuid': v_uuid, 'res': r_name, 'nr': v_nr, - 'rprop': dict(r_props), 'vprop': dict(v_props)}) - - return r_name, v_nr, r_props, v_props - - def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False): - """Find DRBD resource and snapshot name from the snapshot ID.""" - s_uuid = snapshot['id'] - res, rs = self.call_or_reconnect(self.odm.list_snapshots, - self.empty_dict, - self.empty_dict, - 0, - dm_utils.dict_to_aux_props( - {AUX_PROP_CINDER_VOL_ID: s_uuid}), - self.empty_dict) - self._check_result(res) - - if (not rs) or (len(rs) == 0): - if empty_ok: - return None - else: - raise exception.VolumeBackendAPIException( - data=_("no snapshot with id %s found in drbdmanage") % - s_uuid) - if len(rs) > 1: - raise exception.VolumeBackendAPIException( - data=_("multiple resources with snapshot ID %s found") % - s_uuid) - - (r_name, snaps) = rs[0] - if len(snaps) != 1: - raise exception.VolumeBackendAPIException( - data=_("not exactly one snapshot with id %s") % s_uuid) - - (s_name, s_props) = snaps[0] - - LOG.debug("snapshot %(uuid)s is %(res)s/%(snap)s", - {'uuid': s_uuid, 'res': r_name, 'snap': s_name}) - - return r_name, s_name, s_props - - def _resource_name_volnr_for_volume(self, volume, empty_ok=False): - res, vol, __, __ = self._res_and_vl_data_for_volume(volume, empty_ok) - return res, vol - - def local_path(self, volume): - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) - - res, data = self.call_or_reconnect(self.odm.text_query, - [dm_const.TQ_GET_PATH, - d_res_name, - str(d_vol_nr)]) - self._check_result(res) - - if len(data) == 1: - return data[0] - - message = _('Got bad path information from DRBDmanage! (%s)') % data - raise exception.VolumeBackendAPIException(data=message) - - def _push_drbd_options(self, d_res_name): - res_opt = {'resource': d_res_name, - 'target': 'resource', - 'type': 'reso'} - res_opt.update(self.resource_options) - res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) - self._check_result(res) - - res_opt = {'resource': d_res_name, - 'target': 'resource', - 'type': 'neto'} - res_opt.update(self.net_options) - res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) - self._check_result(res) - - res_opt = {'resource': d_res_name, - 'target': 'resource', - 'type': 'disko'} - res_opt.update(self.disk_options) - res = self.call_or_reconnect(self.odm.set_drbdsetup_props, res_opt) - self._check_result(res) - - def create_volume(self, volume): - """Creates a DRBD resource. - - We address it later on via the ID that gets stored - as a private property. - """ - - # TODO(PM): consistency groups - d_res_name = self.is_clean_volume_name(volume['id'], DM_VN_PREFIX) - - res = self.call_or_reconnect(self.odm.create_resource, - d_res_name, - self.empty_dict) - self._check_result(res, ignore=[dm_exc.DM_EEXIST], ret=None) - - self._push_drbd_options(d_res_name) - - # If we get DM_EEXIST, then the volume already exists, eg. because - # deploy gave an error on a previous try (like ENOSPC). - # Still, there might or might not be the volume in the resource - - # we have to check that explicitly. - (__, drbd_vol) = self._resource_name_volnr_for_volume(volume, - empty_ok=True) - if not drbd_vol: - props = self._priv_hash_from_volume(volume) - # TODO(PM): properties - redundancy, etc - res = self.call_or_reconnect(self.odm.create_volume, - d_res_name, - self._vol_size_to_dm(volume['size']), - props) - self._check_result(res) - drbd_vol = self._fetch_answer_data(res, dm_const.VOL_ID) - - # If we crashed between create_volume and the deploy call, - # the volume might be defined but not exist on any server. Oh my. - res = self.call_or_reconnect(self.odm.auto_deploy, - d_res_name, self.drbdmanage_redundancy, - 0, False) - self._check_result(res) - - okay = self._call_policy_plugin(self.plugin_resource, - self.policy_resource, - dict(resource=d_res_name, - volnr=str(drbd_vol))) - if not okay: - message = (_('DRBDmanage timeout waiting for volume creation; ' - 'resource "%(res)s", volume "%(vol)s"') % - {'res': d_res_name, 'vol': volume['id']}) - raise exception.VolumeBackendAPIException(data=message) - - if self.drbdmanage_devs_on_controller: - # TODO(pm): CG - res = self.call_or_reconnect(self.odm.assign, - socket.gethostname(), - d_res_name, - [(dm_const.FLAG_DISKLESS, - dm_const.BOOL_TRUE)]) - self._check_result(res, ignore=[dm_exc.DM_EEXIST]) - - return {} - - def delete_volume(self, volume): - """Deletes a resource.""" - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( - volume, - empty_ok=True) - - if not d_res_name: - # OK, already gone. - return True - - # TODO(PM): check if in use? Ask whether Primary, or just check result? - res = self.call_or_reconnect(self.odm.remove_volume, - d_res_name, d_vol_nr, False) - self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - - # Ask for volumes in that resource that are not scheduled for deletion. - res, rl = self.call_or_reconnect(self.odm.list_volumes, - [d_res_name], - 0, - [(dm_const.TSTATE_PREFIX + - dm_const.FLAG_REMOVE, - dm_const.BOOL_FALSE)], - self.empty_list) - self._check_result(res) - - # We expect the _resource_ to be here still (we just got a volnr from - # it!), so just query the volumes. - # If the resource has no volumes anymore, the current DRBDmanage - # version (errorneously, IMO) returns no *resource*, too. - if len(rl) > 1: - message = _('DRBDmanage expected one resource ("%(res)s"), ' - 'got %(n)d') % {'res': d_res_name, 'n': len(rl)} - raise exception.VolumeBackendAPIException(data=message) - - # Delete resource, if empty - if (not rl) or (not rl[0]) or (len(rl[0][2]) == 0): - res = self.call_or_reconnect(self.odm.remove_resource, - d_res_name, False) - self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - - LOG.debug("create vol from snap: from %(snap)s make %(vol)s", - {'snap': snapshot['id'], 'vol': volume['id']}) - # TODO(PM): Consistency groups. - d_res_name, sname, sprop = self._resource_and_snap_data_from_snapshot( - snapshot) - - new_res = self.is_clean_volume_name(volume['id'], DM_VN_PREFIX) - - r_props = self.empty_dict - # TODO(PM): consistency groups => different volume number possible - new_vol_nr = 0 - v_props = [(new_vol_nr, self._priv_hash_from_volume(volume))] - - res = self.call_or_reconnect(self.odm.restore_snapshot, - new_res, - d_res_name, - sname, - r_props, - v_props) - self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - - self._push_drbd_options(d_res_name) - - # TODO(PM): CG - okay = self._call_policy_plugin(self.plugin_resource, - self.policy_resource, - dict(resource=new_res, - volnr=str(new_vol_nr))) - if not okay: - message = (_('DRBDmanage timeout waiting for new volume ' - 'after snapshot restore; ' - 'resource "%(res)s", volume "%(vol)s"') % - {'res': new_res, 'vol': volume['id']}) - raise exception.VolumeBackendAPIException(data=message) - - if (('size' in volume) and (volume['size'] > snapshot['volume_size'])): - LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " - "%(dst_size)d", - {'dst_vol': volume['id'], - 'src_size': snapshot['volume_size'], - 'dst_size': volume['size']}) - self.extend_volume(volume, volume['size']) - - def create_cloned_volume(self, volume, src_vref): - temp_id = self._clean_uuid() - snapshot = {'id': temp_id} - - self.create_snapshot({'id': temp_id, - 'volume_id': src_vref['id']}) - - snapshot['volume_size'] = src_vref['size'] - self.create_volume_from_snapshot(volume, snapshot) - - self.delete_snapshot(snapshot) - - def _update_volume_stats(self): - data = {} - - data["vendor_name"] = 'Open Source' - data["driver_version"] = self.VERSION - # This has to match the name set in the cinder volume driver spec, - # so keep it lowercase - data["volume_backend_name"] = self.backend_name - data["pools"] = [] - - res, free, total = self.call_or_reconnect(self.odm.cluster_free_query, - self.drbdmanage_redundancy) - self._check_result(res) - - location_info = ('DrbdManageBaseDriver:%(cvol)s:%(dbus)s' % - {'cvol': self.dm_control_vol, - 'dbus': self.drbdmanage_dbus_name}) - - # add volumes - res, rl = self.call_or_reconnect(self.odm.list_volumes, - self.empty_list, - 0, - self.empty_dict, - self.empty_list) - self._check_result(res) - total_volumes = 0 - for res in rl: - total_volumes += len(res[2]) - - # TODO(PM): multiple DRBDmanage instances and/or multiple pools - single_pool = {} - single_pool.update(dict( - pool_name=data["volume_backend_name"], - free_capacity_gb=self._vol_size_to_cinder(free), - total_capacity_gb=self._vol_size_to_cinder(total), - reserved_percentage=self.configuration.reserved_percentage, - location_info=location_info, - total_volumes=total_volumes, - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function(), - QoS_support=False)) - - data["pools"].append(single_pool) - - self._stats = data - return self._stats - - def extend_volume(self, volume, new_size): - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) - - res = self.call_or_reconnect(self.odm.resize_volume, - d_res_name, d_vol_nr, -1, - self._vol_size_to_dm(new_size), - 0) - self._check_result(res) - - okay = self._call_policy_plugin(self.plugin_resize, - self.policy_resize, - dict(resource=d_res_name, - volnr=str(d_vol_nr), - req_size=str(new_size))) - if not okay: - message = (_('DRBDmanage timeout waiting for volume size; ' - 'volume ID "%(id)s" (res "%(res)s", vnr %(vnr)d)') % - {'id': volume['id'], - 'res': d_res_name, 'vnr': d_vol_nr}) - raise exception.VolumeBackendAPIException(data=message) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - sn_name = self.snapshot_name_from_cinder_snapshot(snapshot) - - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( - snapshot["volume_id"]) - - res, data = self.call_or_reconnect(self.odm.list_assignments, - self.empty_dict, - [d_res_name], - 0, - {CS_DISKLESS: dm_const.BOOL_FALSE}, - self.empty_list) - self._check_result(res) - - nodes = [d[0] for d in data] - if len(nodes) < 1: - raise exception.VolumeBackendAPIException( - _('Snapshot res "%s" that is not deployed anywhere?') % - (d_res_name)) - - props = self._priv_hash_from_volume(snapshot) - res = self.call_or_reconnect(self.odm.create_snapshot, - d_res_name, sn_name, nodes, props) - self._check_result(res) - - okay = self._call_policy_plugin(self.plugin_snapshot, - self.policy_snapshot, - dict(resource=d_res_name, - snapshot=sn_name)) - if not okay: - message = (_('DRBDmanage timeout waiting for snapshot creation; ' - 'resource "%(res)s", snapshot "%(sn)s"') % - {'res': d_res_name, 'sn': sn_name}) - raise exception.VolumeBackendAPIException(data=message) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - - d_res_name, sname, _ = self._resource_and_snap_data_from_snapshot( - snapshot, empty_ok=True) - - if not d_res_name: - # resource already gone? - LOG.warning("snapshot: %s not found, " - "skipping delete operation", snapshot['id']) - LOG.info('Successfully deleted snapshot: %s', snapshot['id']) - return True - - res = self.call_or_reconnect(self.odm.remove_snapshot, - d_res_name, sname, True) - return self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - - -# Class with iSCSI interface methods -@interface.volumedriver -class DrbdManageIscsiDriver(DrbdManageBaseDriver): - """Cinder driver that uses the iSCSI protocol. """ - - def __init__(self, *args, **kwargs): - super(DrbdManageIscsiDriver, self).__init__(*args, **kwargs) - target_driver = self.target_mapping[ - self.configuration.safe_get('iscsi_helper')] - - LOG.debug('Attempting to initialize DRBD driver with the ' - 'following target_driver: %s', - target_driver) - - self.target_driver = importutils.import_object( - target_driver, - configuration=self.configuration, - db=self.db, - executor=self._execute) - - def get_volume_stats(self, refresh=False): - """Get volume status.""" - - self._update_volume_stats() - self._stats["storage_protocol"] = "iSCSI" - return self._stats - - def ensure_export(self, context, volume): - volume_path = self.local_path(volume) - return self.target_driver.ensure_export( - context, - volume, - volume_path) - - def create_export(self, context, volume, connector): - volume_path = self.local_path(volume) - export_info = self.target_driver.create_export( - context, - volume, - volume_path) - - return {'provider_location': export_info['location'], - 'provider_auth': export_info['auth'], } - - def remove_export(self, context, volume): - return self.target_driver.remove_export(context, volume) - - def initialize_connection(self, volume, connector): - return self.target_driver.initialize_connection(volume, connector) - - def validate_connector(self, connector): - return self.target_driver.validate_connector(connector) - - def terminate_connection(self, volume, connector, **kwargs): - return self.target_driver.terminate_connection(volume, - connector, - **kwargs) - -# for backwards compatibility keep the old class name, too -DrbdManageDriver = DrbdManageIscsiDriver - - -# Class with DRBD transport mode -@interface.volumedriver -class DrbdManageDrbdDriver(DrbdManageBaseDriver): - """Cinder driver that uses the DRBD protocol. """ - - def __init__(self, *args, **kwargs): - super(DrbdManageDrbdDriver, self).__init__(*args, **kwargs) - - def get_volume_stats(self, refresh=False): - """Get volume status.""" - - self._update_volume_stats() - self._stats["storage_protocol"] = "DRBD" - return self._stats - - def _return_local_access(self, nodename, volume, - d_res_name=None, volume_path=None): - - if not volume_path: - volume_path = self.local_path(volume) - - return { - 'driver_volume_type': 'local', - 'data': { - "device_path": volume_path - } - } - - def _return_drbdadm_config(self, volume, nodename, - d_res_name=None, volume_path=None): - - if not d_res_name: - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) - - res, data = self.call_or_reconnect( - self.odm.text_query, - ['export_conf_split_up', nodename, d_res_name]) - self._check_result(res) - - config = six.text_type(data.pop(0)) - subst_data = {} - while len(data): - k = data.pop(0) - subst_data[k] = data.pop(0) - - if not volume_path: - volume_path = self.local_path(volume) - - return { - 'driver_volume_type': 'drbd', - 'data': { - 'provider_location': ' '.join('drbd', nodename), - 'device': volume_path, - # TODO(pm): consistency groups - 'devices': [volume_path], - 'provider_auth': subst_data['shared-secret'], - 'config': config, - 'name': d_res_name, - } - } - - def _is_external_node(self, nodename): - """Return whether the given node is an "external" node.""" - - # If the node accessing the data (the "initiator" in iSCSI speak, - # "client" or "target" otherwise) is marked as an FLAG_EXTERNAL - # node, it does not have DRBDmanage active - and that means - # we have to send the necessary DRBD configuration. - # - # If DRBDmanage is running there, just pushing the (client) - # assignment is enough to make the local path available. - - res, nodes = self.call_or_reconnect(self.odm.list_nodes, - [nodename], 0, - self.empty_dict, - [dm_const.FLAG_EXTERNAL]) - self._check_result(res) - - if len(nodes) != 1: - msg = _('Expected exactly one node called "%s"') % nodename - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - __, nodeattr = nodes[0] - - return getattr(nodeattr, dm_const.FLAG_EXTERNAL, - dm_const.BOOL_FALSE) == dm_const.BOOL_TRUE - - def _return_connection_data(self, nodename, volume, d_res_name=None): - if nodename and self._is_external_node(nodename): - return self._return_drbdadm_config(nodename, - volume, - d_res_name=d_res_name) - else: - return self._return_local_access(nodename, volume) - - def create_export(self, context, volume, connector): - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume(volume) - - nodename = connector["host"] - - # Ensure the node is known to DRBDmanage. - # Note that this does *not* mean that DRBDmanage has to - # be installed on it! - # This is just so that DRBD allows the IP to connect. - node_prop = { - dm_const.NODE_ADDR: connector["ip"], - dm_const.FLAG_DRBDCTRL: dm_const.BOOL_FALSE, - dm_const.FLAG_STORAGE: dm_const.BOOL_FALSE, - dm_const.FLAG_EXTERNAL: dm_const.BOOL_TRUE, - } - res = self.call_or_reconnect( - self.odm.create_node, nodename, node_prop) - self._check_result(res, ignore=[dm_exc.DM_EEXIST]) - - # Ensure the data is accessible, by creating an assignment. - assg_prop = { - dm_const.FLAG_DISKLESS: dm_const.BOOL_TRUE, - } - # If we create the assignment here, it's temporary - - # and has to be removed later on again. - assg_prop.update(dm_utils.aux_props_to_dict({ - AUX_PROP_TEMP_CLIENT: dm_const.BOOL_TRUE, - })) - - res = self.call_or_reconnect( - self.odm.assign, nodename, d_res_name, assg_prop) - self._check_result(res, ignore=[dm_exc.DM_EEXIST]) - - # Wait for DRBDmanage to have completed that action. - - # A DRBDmanage controlled node will set the cstate:deploy flag; - # an external node will not be available to change it, so we have - # to wait for the storage nodes to remove the upd_con flag - # (ie. they're now ready to receive the connection). - if self._is_external_node(nodename): - self._wait_for_node_assignment( - d_res_name, d_vol_nr, [], - check_vol_deployed=False, - filter_props={ - # must be deployed - CS_DEPLOYED: dm_const.BOOL_TRUE, - # must be a storage node (not diskless), - CS_DISKLESS: dm_const.BOOL_FALSE, - # connection must be available, no need for updating - CS_UPD_CON: dm_const.BOOL_FALSE, - }) - else: - self._wait_for_node_assignment( - d_res_name, d_vol_nr, [nodename], - check_vol_deployed=True, - filter_props={ - CS_DEPLOYED: dm_const.BOOL_TRUE, - }) - - return self._return_connection_data(nodename, volume) - - def ensure_export(self, context, volume): - p_location = volume['provider_location'] - if p_location: - fields = p_location.split(" ") - nodename = fields[1] - else: - nodename = None - - return self._return_connection_data(nodename, volume) - - def initialize_connection(self, volume, connector): - - nodename = connector["host"] - - return self._return_connection_data(nodename, volume) - - def terminate_connection(self, volume, connector, - force=False, **kwargs): - d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( - volume, empty_ok=True) - if not d_res_name: - return - - nodename = connector["host"] - - # If the DRBD volume is diskless on that node, we remove it; - # if it has local storage, we keep it. - res, data = self.call_or_reconnect( - self.odm.list_assignments, - [nodename], [d_res_name], 0, - self.empty_list, self.empty_list) - self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - - if len(data) < 1: - # already removed?! - LOG.info('DRBD connection for %s already removed', - volume['id']) - elif len(data) == 1: - __, __, props, __ = data[0] - my_props = dm_utils.dict_to_aux_props(props) - diskless = getattr(props, - dm_const.FLAG_DISKLESS, - dm_const.BOOL_FALSE) - temp_cli = getattr(my_props, - AUX_PROP_TEMP_CLIENT, - dm_const.BOOL_FALSE) - # If diskless assigned, - if ((diskless == dm_const.BOOL_TRUE) and - (temp_cli == dm_const.BOOL_TRUE)): - # remove the assignment - - # TODO(pm): does it make sense to relay "force" here? - # What are the semantics? - - # TODO(pm): consistency groups shouldn't really - # remove until *all* volumes are detached - - res = self.call_or_reconnect(self.odm.unassign, - nodename, d_res_name, force) - self._check_result(res, ignore=[dm_exc.DM_ENOENT]) - else: - # more than one assignment? - LOG.error("DRBDmanage: too many assignments returned.") - return - - def remove_export(self, context, volume): - pass diff --git a/cinder/volume/drivers/falconstor/__init__.py b/cinder/volume/drivers/falconstor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/falconstor/fc.py b/cinder/volume/drivers/falconstor/fc.py deleted file mode 100644 index b2af0d94c..000000000 --- a/cinder/volume/drivers/falconstor/fc.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2016 FalconStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Fibre channel Cinder volume driver for FalconStor FSS storage system. - -This driver requires FSS-8.00-8865 or later. -""" - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -import cinder.volume.driver -from cinder.volume.drivers.falconstor import fss_common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class FSSFCDriver(fss_common.FalconstorBaseDriver, - cinder.volume.driver.FibreChannelDriver): - """Implements commands for FalconStor FSS FC management. - - To enable the driver add the following line to the cinder configuration: - volume_driver=cinder.volume.drivers.falconstor.fc.FSSFCDriver - - Version history: - 1.0.0 - Initial driver - - """ - - VERSION = '1.0.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "FalconStor_CI" - - def __init__(self, *args, **kwargs): - super(FSSFCDriver, self).__init__(*args, **kwargs) - self.gateway_fc_wwns = [] - self._storage_protocol = "FC" - self._backend_name = ( - self.configuration.safe_get('volume_backend_name') or - self.__class__.__name__) - self._lookup_service = fczm_utils.create_lookup_service() - - def do_setup(self, context): - """Any initialization the driver does while starting.""" - super(FSSFCDriver, self).do_setup(context) - self.gateway_fc_wwns = self.proxy.list_fc_target_wwpn() - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - super(FSSFCDriver, self).check_for_setup_error() - if len(self.gateway_fc_wwns) == 0: - msg = _('No FC targets found') - raise exception.InvalidHost(reason=msg) - - def validate_connector(self, connector): - """Check connector for at least one enabled FC protocol.""" - if 'FC' == self._storage_protocol and 'wwpns' not in connector: - LOG.error('The connector does not contain the required ' - 'information.') - raise exception.InvalidConnectorException(missing='wwpns') - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - fss_hosts = [] - fss_hosts.append(self.configuration.san_ip) - target_info = self.proxy.fc_initialize_connection(volume, connector, - fss_hosts) - init_targ_map = self._build_initiator_target_map( - target_info['available_initiator']) - - fc_info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': int(target_info['lun']), - 'target_discovered': True, - 'target_wwn': self.gateway_fc_wwns, - 'initiator_target_map': init_targ_map, - 'volume_id': volume['id'], - } - } - return fc_info - - def _build_initiator_target_map(self, initiator_wwns): - """Build the target_wwns and the initiator target map.""" - init_targ_map = dict.fromkeys(initiator_wwns, self.gateway_fc_wwns) - return init_targ_map - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - host_id = self.proxy.fc_terminate_connection(volume, connector) - fc_info = {"driver_volume_type": "fibre_channel", "data": {}} - if self.proxy._check_fc_host_devices_empty(host_id): - available_initiator, fc_initiators_info = ( - self.proxy._get_fc_client_initiators(connector)) - init_targ_map = self._build_initiator_target_map( - available_initiator) - fc_info["data"] = {"target_wwn": self.gateway_fc_wwns, - "initiator_target_map": init_targ_map} - return fc_info diff --git a/cinder/volume/drivers/falconstor/fss_common.py b/cinder/volume/drivers/falconstor/fss_common.py deleted file mode 100644 index f2503328d..000000000 --- a/cinder/volume/drivers/falconstor/fss_common.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright (c) 2016 FalconStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for FalconStor FSS storage system. - -This driver requires FSS-8.00-8865 or later. -""" - -import math -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration -from cinder.volume.drivers.falconstor import rest_proxy -from cinder.volume.drivers.san import san - -LOG = logging.getLogger(__name__) - -FSS_OPTS = [ - cfg.IntOpt('fss_pool', - default='', - help='DEPRECATED: FSS pool id in which FalconStor volumes are ' - 'stored.', - deprecated_since='Pike', - deprecated_reason='This option will be removed once Queens ' - 'development opens up. Please use fss_pools ' - 'instead.'), - cfg.DictOpt('fss_pools', - default={}, - help='FSS pool id list in which FalconStor volumes are stored.' - ' If you have only one pool, use A:. ' - 'You can also have up to two storage pools, ' - 'P for primary and O for all supporting devices. ' - 'The usage is P:,O:', - deprecated_name='fss_pool'), - cfg.StrOpt('fss_san_secondary_ip', - default='', - help='Specifies FSS secondary management IP to be used ' - 'if san_ip is invalid or becomes inaccessible.'), - cfg.BoolOpt('fss_debug', - default=False, - help="Enable HTTP debugging to FSS"), - cfg.StrOpt('additional_retry_list', - default='', - help='FSS additional retry list, separate by ;') -] - -CONF = cfg.CONF -CONF.register_opts(FSS_OPTS, group=configuration.SHARED_CONF_GROUP) - - -class FalconstorBaseDriver(san.SanDriver): - def __init__(self, *args, **kwargs): - super(FalconstorBaseDriver, self).__init__(*args, **kwargs) - if self.configuration: - self.configuration.append_config_values(FSS_OPTS) - - if self.configuration.fss_pool: - self.configuration.fss_pools = {'A': str( - self.configuration.fss_pool)} - LOG.warning("'fss_pool=' is deprecated. Using the " - "fss_pools=A: for single pool or " - "fss_pools=P:,O: instead " - "as old format will be removed once Queens development" - " opens up.") - - self.proxy = rest_proxy.RESTProxy(self.configuration) - self._backend_name = ( - self.configuration.safe_get('volume_backend_name') or 'FalconStor') - self._storage_protocol = '' - - def do_setup(self, context): - self.proxy.do_setup() - LOG.info('Activate FalconStor cinder volume driver.') - - def check_for_setup_error(self): - if self.proxy.session_id is None: - msg = _('FSS cinder volume driver not ready: Unable to determine ' - 'session id.') - raise exception.VolumeBackendAPIException(data=msg) - - if self.configuration.fss_pool: - self.configuration.fss_pools = {'A': six.text_type( - self.configuration.fss_pool)} - # The fss_pool is deprecated. - LOG.warning("'fss_pool=' is deprecated. Using the " - "fss_pools=A: for single pool or " - "fss_pools=P:,O: instead " - "as old format will be removed once Queens development" - " opens up.") - - if not self.configuration.fss_pools: - msg = _('Pool is not available in the cinder configuration ' - 'fields.') - raise exception.InvalidHost(reason=msg) - self._pool_checking(self.configuration.fss_pools) - - if self.configuration.san_thin_provision: - if not self.configuration.max_over_subscription_ratio: - msg = _('The max_over_subscription_ratio have to set ' - 'when thin provisioning enabled.') - raise exception.InvalidConfigurationValue(reason=msg) - - def _pool_checking(self, pool_info): - pool_count = 0 - try: - if len(pool_info) == 1: - _pool_state = self._is_single_pool(pool_info) - if not _pool_state: - msg = _('The given pool info does not match.') - raise exception.VolumeBackendAPIException(data=msg) - else: - _pool_state = self._is_multi_pool(pool_info) - if not _pool_state: - msg = _('The given pool info does not match.') - raise exception.VolumeBackendAPIException(data=msg) - - for index, pool_id in pool_info.items(): - output = self.proxy.list_pool_info(pool_id) - if "name" in output['data']: - pool_count = len(re.findall(rest_proxy.GROUP_PREFIX, - output['data']['name'])) - if pool_count is 0: - msg = _('The given pool info must include the storage ' - 'pool and naming start with OpenStack-') - raise exception.VolumeBackendAPIException(data=msg) - except Exception: - msg = _('Unexpected exception during pool checking.') - LOG.exception(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _check_multipath(self): - if self.configuration.use_multipath_for_image_xfer: - if not self.configuration.fss_san_secondary_ip: - msg = _('The san_secondary_ip param is null.') - raise exception.VolumeBackendAPIException(data=msg) - output = self.proxy._check_iocluster_state() - if not output: - msg = _('FSS do not support multipathing.') - raise exception.VolumeBackendAPIException(data=msg) - return output - else: - return False - - def _is_single_pool(self, pool_info): - if len(pool_info) == 1 and "A" in pool_info: - return True - else: - return False - - def _is_multi_pool(self, pool_info): - if len(pool_info) == 2 and "P" in pool_info and "O" in pool_info: - return True - else: - return False - - def create_volume(self, volume): - """Creates a volume. - - We use the metadata of the volume to create variety volume. - - Create a thin provisioned volume : - - .. code:: console - - create --volume-type FSS-THIN - --metadata thinsize= volume-size - - Create a LUN that is a Timeview of another LUN at a specified CDP tag: - - .. code:: console - - create --volume-type FSS --metadata timeview= - cdptag= volume-size - - Create a LUN that is a Timeview of another LUN at a specified Timemark: - - .. code:: console - - create --volume-type FSS --metadata timeview= - rawtimestamp= volume-size - - Create a mirrored volume : - - .. code:: console - - create --volume-type FSS --metadata mirrored=true - - """ - - volume_metadata = self._get_volume_metadata(volume) - if not volume_metadata: - volume_name, fss_metadata = self.proxy.create_vdev(volume) - else: - if self.configuration.san_thin_provision: - volume_name, fss_metadata = self.proxy.create_thin_vdev( - volume_metadata, volume) - elif ("timeview" in volume_metadata and - ("cdptag" in volume_metadata) or - ("rawtimestamp" in volume_metadata)): - volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag( - volume_metadata, volume) - elif 'mirrored' in volume_metadata: - volume_name, fss_metadata = self.proxy.create_vdev_with_mirror( - volume_metadata, volume) - else: - volume_name, fss_metadata = self.proxy.create_vdev(volume) - fss_metadata.update(volume_metadata) - - if type(volume['metadata']) is dict: - fss_metadata.update(volume['metadata']) - if volume['consistencygroup_id']: - self.proxy._add_volume_to_consistency_group( - volume['consistencygroup_id'], - volume_name - ) - return {'metadata': fss_metadata} - - def _get_volume_metadata(self, volume): - volume_metadata = {} - if 'volume_metadata' in volume: - for metadata in volume['volume_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - return volume_metadata - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - new_vol_name = self.proxy._get_fss_volume_name(volume) - src_name = self.proxy._get_fss_volume_name(src_vref) - vol_size = volume["size"] - src_size = src_vref["size"] - fss_metadata = self.proxy.clone_volume(new_vol_name, src_name) - self.proxy.extend_vdev(new_vol_name, src_size, vol_size) - - if volume['consistencygroup_id']: - self.proxy._add_volume_to_consistency_group( - volume['consistencygroup_id'], - new_vol_name - ) - volume_metadata = self._get_volume_metadata(volume) - fss_metadata.update(volume_metadata) - - if type(volume['metadata']) is dict: - fss_metadata.update(volume['metadata']) - return {'metadata': fss_metadata} - - def extend_volume(self, volume, new_size): - """Extend volume to new_size.""" - volume_name = self.proxy._get_fss_volume_name(volume) - self.proxy.extend_vdev(volume_name, volume["size"], new_size) - - def delete_volume(self, volume): - """Disconnect all hosts and delete the volume""" - try: - self.proxy.delete_vdev(volume) - except rest_proxy.FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - LOG.warning("Volume deletion failed with message: %s", - err.reason) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - snap_metadata = snapshot["metadata"] - metadata = self.proxy.create_snapshot(snapshot) - snap_metadata.update(metadata) - return {'metadata': snap_metadata} - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - try: - self.proxy.delete_snapshot(snapshot) - except rest_proxy.FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - LOG.error( - "Snapshot deletion failed with message: %s", - err.reason) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - vol_size = volume['size'] - snap_size = snapshot['volume_size'] - volume_name, fss_metadata = self.proxy.create_volume_from_snapshot( - volume, snapshot) - - if vol_size != snap_size: - try: - extend_volume_name = self.proxy._get_fss_volume_name(volume) - self.proxy.extend_vdev(extend_volume_name, snap_size, vol_size) - except rest_proxy.FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - LOG.error( - "Resizing %(id)s failed with message: %(msg)s. " - "Cleaning volume.", {'id': volume["id"], - 'msg': err.reason}) - - if type(volume['metadata']) is dict: - fss_metadata.update(volume['metadata']) - - if volume['consistencygroup_id']: - self.proxy._add_volume_to_consistency_group( - volume['consistencygroup_id'], - volume_name) - return {'metadata': fss_metadata} - - def ensure_export(self, context, volume): - pass - - def create_export(self, context, volume, connector): - pass - - def remove_export(self, context, volume): - pass - - # Attach/detach volume to instance/host - def attach_volume(self, context, volume, instance_uuid, host_name, - mountpoint): - pass - - def detach_volume(self, context, volume, attachment=None): - pass - - def get_volume_stats(self, refresh=False): - total_capacity = 0 - free_space = 0 - # Thin provisioning - thin_enabled = self.configuration.san_thin_provision - if refresh: - try: - info = self.proxy._get_pools_info() - if info: - total_capacity = int(info['total_capacity_gb']) - used_space = int(info['used_gb']) - free_space = int(total_capacity - used_space) - - data = {"vendor_name": "FalconStor", - "volume_backend_name": self._backend_name, - "driver_version": self.VERSION, - "storage_protocol": self._storage_protocol, - "total_capacity_gb": total_capacity, - "free_capacity_gb": free_space, - "reserved_percentage": 0, - "consistencygroup_support": True, - "thin_provisioning_support": thin_enabled, - "thick_provisioning_support": not thin_enabled - } - if thin_enabled: - provisioned_capacity = int(info['used_gb']) - data['provisioned_capacity_gb'] = provisioned_capacity - data['max_over_subscription_ratio'] = ( - self.configuration.max_over_subscription_ratio) - self._stats = data - - except Exception as exc: - LOG.error('Cannot get volume status %(exc)s.', - {'exc': exc}) - return self._stats - - def create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - self.proxy.create_group(group) - model_update = {'status': 'available'} - return model_update - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - self.proxy.destroy_group(group) - volume_updates = [] - for volume in volumes: - self.delete_volume(volume) - volume_updates.append({ - 'id': volume.id, - 'status': 'deleted' - }) - - model_update = {'status': group['status']} - return model_update, volume_updates - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - addvollist = [] - remvollist = [] - if add_volumes: - for volume in add_volumes: - addvollist.append(self.proxy._get_fss_volume_name(volume)) - if remove_volumes: - for volume in remove_volumes: - remvollist.append(self.proxy._get_fss_volume_name(volume)) - - self.proxy.set_group(group['id'], addvollist=addvollist, - remvollist=remvollist) - return None, None, None - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - cgsnapshot_id = cgsnapshot['id'] - try: - self.proxy.create_cgsnapshot(cgsnapshot) - except Exception as e: - msg = _('Failed to create cg snapshot %(id)s ' - 'due to %(reason)s.') % {'id': cgsnapshot_id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - snapshot_updates = [] - for snapshot in snapshots: - snapshot_updates.append({ - 'id': snapshot.id, - 'status': 'available' - }) - model_update = {'status': 'available'} - return model_update, snapshot_updates - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - cgsnapshot_id = cgsnapshot.id - try: - self.proxy.delete_cgsnapshot(cgsnapshot) - except Exception as e: - msg = _('Failed to delete cgsnapshot %(id)s ' - 'due to %(reason)s.') % {'id': cgsnapshot_id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - snapshot_updates = [] - for snapshot in snapshots: - snapshot_updates.append({ - 'id': snapshot.id, - 'status': 'deleted', - }) - model_update = {'status': cgsnapshot.status} - return model_update, snapshot_updates - - def manage_existing(self, volume, existing_ref): - """Convert an existing FSS volume to a Cinder volume. - - We expect a volume id in the existing_ref that matches one in FSS. - """ - volume_metadata = {} - self.proxy._get_existing_volume_ref_vid(existing_ref) - self.proxy._manage_existing_volume(existing_ref['source-id'], volume) - volume_metadata['FSS-vid'] = existing_ref['source-id'] - updates = {'metadata': volume_metadata} - return updates - - def manage_existing_get_size(self, volume, existing_ref): - """Get size of an existing FSS volume. - - We expect a volume id in the existing_ref that matches one in FSS. - """ - sizemb = self.proxy._get_existing_volume_ref_vid(existing_ref) - size = int(math.ceil(float(sizemb) / units.Ki)) - return size - - def unmanage(self, volume): - """Remove Cinder management from FSS volume""" - self.proxy.unmanage(volume) diff --git a/cinder/volume/drivers/falconstor/iscsi.py b/cinder/volume/drivers/falconstor/iscsi.py deleted file mode 100644 index 633931305..000000000 --- a/cinder/volume/drivers/falconstor/iscsi.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2016 FalconStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for FalconStor FSS storage system. - -This driver requires FSS-8.00-8865 or later. -""" - -from cinder import interface -import cinder.volume.driver -from cinder.volume.drivers.falconstor import fss_common - -DEFAULT_ISCSI_PORT = 3260 - - -@interface.volumedriver -class FSSISCSIDriver(fss_common.FalconstorBaseDriver, - cinder.volume.driver.ISCSIDriver): - - """Implements commands for FalconStor FSS ISCSI management. - - To enable the driver add the following line to the cinder configuration: - volume_driver=cinder.volume.drivers.falconstor.iscsi.FSSISCSIDriver - - .. code: text - - Version history: - 1.0.0 - Initial driver - 1.0.1 - Fix copy_image_to_volume error. - 1.0.2 - Closes-Bug #1554184, add lun id type conversion in - initialize_connection - 1.03 - merge source code - 1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume() - metadata TypeError - 2.0.0 - Newton driver - -- fixed consisgroup commands error - 2.0.1 -- fixed bugs - 2.0.2 -- support Multipath - 3.0.0 - Ocata driver - -- fixed bugs - 4.0.0 - Pike driver - -- extend Cinder driver to utilize multiple FSS storage pools - - """ - - VERSION = '4.0.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "FalconStor_CI" - - def __init__(self, *args, **kwargs): - super(FSSISCSIDriver, self).__init__(*args, **kwargs) - self._storage_protocol = "iSCSI" - self._backend_name = ( - self.configuration.safe_get('volume_backend_name') or - self.__class__.__name__) - - def initialize_connection(self, volume, connector, initiator_data=None): - fss_hosts = [] - target_portal = [] - multipath = connector.get('multipath', False) - fss_hosts.append(self.configuration.san_ip) - - if multipath: - if self._check_multipath(): - fss_hosts.append(self.configuration.fss_san_secondary_ip) - else: - multipath = False - - for host in fss_hosts: - iscsi_ip_port = "%s:%d" % (host, DEFAULT_ISCSI_PORT) - target_portal.append(iscsi_ip_port) - - target_info = self.proxy.initialize_connection_iscsi(volume, - connector, - fss_hosts) - properties = {} - properties['target_discovered'] = True - properties['discard'] = True - properties['encrypted'] = False - properties['qos_specs'] = None - properties['access_mode'] = 'rw' - properties['volume_id'] = volume['id'] - properties['target_iqn'] = target_info['iqn'] - properties['target_portal'] = target_portal[0] - properties['target_lun'] = int(target_info['lun']) - - if multipath: - properties['target_iqns'] = [target_info['iqn'], - target_info['iqn']] - properties['target_portals'] = target_portal - properties['target_luns'] = [int(target_info['lun']), - int(target_info['lun'])] - - return {'driver_volume_type': 'iscsi', 'data': properties} - - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection.""" - self.proxy.terminate_connection_iscsi(volume, connector) diff --git a/cinder/volume/drivers/falconstor/rest_proxy.py b/cinder/volume/drivers/falconstor/rest_proxy.py deleted file mode 100644 index 5ab07f677..000000000 --- a/cinder/volume/drivers/falconstor/rest_proxy.py +++ /dev/null @@ -1,1580 +0,0 @@ -# Copyright (c) 2016 FalconStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import json -import random -import six -import time -import uuid - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -from six.moves import http_client - -from cinder import exception -from cinder.i18n import _ - -FSS_BATCH = 'batch' -FSS_PHYSICALRESOURCE = 'physicalresource' -FSS_PHYSICALADAPTER = 'physicaladapter' -FSS_FCCLIENTINITIATORS = 'fcclientinitiators' -FSS_FC_TGT_WWPN = 'fctgtwwpn' -FSS_STORAGE_POOL = 'storagepool' -FSS_LOGICALRESOURCE = 'logicalresource' -FSS_SAN = 'sanresource' -FSS_MIRROR = 'mirror' -FSS_TIMEMARKPOLICY = 'timemarkpolicy' -FSS_TIMEMARK = 'timemark' -FSS_TIMEVIEW = 'timeview' -FSS_SNAPSHOT_RESOURCE = 'snapshotresource' -FSS_SNAPSHOT_GROUP = 'snapshotgroup' -FSS_CLIENT = 'client' -FSS_SANCLIENT = 'sanclient' -FSS_ISCSI_TARGET = 'iscsitarget' -FSS_ISCSI_CLIENT_INITIATORS = 'iscsiclientinitiators' -FSS_SERVER = 'server' -FSS_OPTIONS = 'options' -FSS_PORTAL = 'defaultiscsiportal' -FSS_PROPERTIES = 'properties' -FSS_HOST = 'host' -FSS_RETURN_CODE = 'rcs' -FSS_AUTH = 'auth' -FSS_LOGIN = 'login' -FSS_SINGLE_TYPE = 'single' - -POST = 'POST' -GET = 'GET' -PUT = 'PUT' -DELETE = 'DELETE' -GROUP_PREFIX = 'OpenStack-' -PRODUCT_NAME = 'ipstor' -SESSION_COOKIE_NAME = 'session_id' -RETRY_LIST = ['107', '2147680512'] - -MAXSNAPSHOTS = 1000 -OPERATION_TIMEOUT = 60 * 60 -RETRY_CNT = 5 -RETRY_INTERVAL = 15 - -LOG = logging.getLogger(__name__) - - -class RESTProxy(object): - def __init__(self, config): - self.fss_host = config.san_ip - self.fss_defined_pools = config.fss_pools - if config.additional_retry_list: - RETRY_LIST.append(config.additional_retry_list) - - self.FSS = FSSRestCommon(config) - self.session_id = None - - # naming - def _get_vol_name_from_snap(self, snapshot): - """Return the name of the snapshot that FSS will use.""" - return "cinder-%s" % snapshot["volume_id"] - - def _get_fss_volume_name(self, volume): - """Return the name of the volume FSS will use.""" - return "cinder-%s" % volume["id"] - - def _get_group_name_from_id(self, id): - return "cinder-consisgroup-%s" % id - - def _encode_name(self, name): - uuid_str = name.replace("-", "") - vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) - newuuid = (base64.urlsafe_b64encode(vol_uuid.bytes). - decode('utf-8').strip('=')) - return "cinder-%s" % newuuid - - def do_setup(self): - self.session_id = self.FSS.fss_login() - - def _convert_size_to_gb(self, size): - s = round(float(size) / units.Gi, 2) - if s > 0: - return s - else: - return 0 - - def _convert_size_to_mb(self, size): - return size * units.Ki - - def _get_pools_info(self): - qpools = [] - poolinfo = {} - total_capacity_gb = 0 - used_gb = 0 - try: - output = self.list_pool_info() - if output and "storagepools" in output['data']: - for item in output['data']['storagepools']: - if item['name'].startswith(GROUP_PREFIX) and ( - six.text_type(item['id']) in - self.fss_defined_pools.values()): - poolid = int(item['id']) - qpools.append(poolid) - - if not qpools: - msg = _('The storage pool information is empty or not correct') - raise exception.DriverNotInitialized(msg) - - # Query pool detail information - for poolid in qpools: - output = self.list_pool_info(poolid) - total_capacity_gb += ( - self._convert_size_to_gb(output['data']['size'])) - used_gb += (self._convert_size_to_gb(output['data']['used'])) - - except Exception: - msg = (_('Unexpected exception during get pools info.')) - LOG.exception(msg) - raise exception.VolumeBackendAPIException(data=msg) - - poolinfo['total_capacity_gb'] = total_capacity_gb - poolinfo['used_gb'] = used_gb - poolinfo['QoS_support'] = False - poolinfo['reserved_percentage'] = 0 - - return poolinfo - - def list_pool_info(self, pool_id=None): - return self.FSS.list_pool_info(pool_id) - - def list_physicaladapter_info(self, adapter_id=None): - return self.FSS.list_physicaladapter_info(adapter_id) - - def _checking_adapter_type(self, id): - adapter_type = '' - output = self.list_physicaladapter_info() - if "physicaladapters" in output['data']: - physicaladapters = output['data']['physicaladapters'] - if physicaladapters['id'] == id: - adapter_type = physicaladapters['type'] - return adapter_type - - def _selected_pool_id(self, pool_info, pool_type=None): - _pool_id = 0 - if len(pool_info) == 1 and "A" in pool_info: - _pool_id = pool_info['A'] - elif len(pool_info) == 2 and "P" in pool_info and "O" in pool_info: - if pool_type: - if pool_type == "P": - _pool_id = pool_info['P'] - elif pool_type == "O": - _pool_id = pool_info['O'] - return _pool_id - - def create_vdev(self, volume): - sizemb = self._convert_size_to_mb(volume["size"]) - volume_name = self._get_fss_volume_name(volume) - params = dict(category="virtual", - sizemb=sizemb, - name=volume_name) - pool_id = self._selected_pool_id(self.fss_defined_pools, "P") - params.update(storagepoolid=pool_id) - return volume_name, self.FSS.create_vdev(params) - - def create_tv_from_cdp_tag(self, volume_metadata, volume): - tv_vid = '' - cdp_tag = '' - - if 'cdptag' in volume_metadata: - tv_vid = str(volume_metadata['timeview']) + '_0' - cdp_tag = str(volume_metadata['cdptag']) - - if 'rawtimestamp' in volume_metadata: - tv_vid = '{0}_{1}'.format(str(volume_metadata['timeview']), - str(volume_metadata['rawtimestamp'])) - volume_name = self._get_fss_volume_name(volume) - sizemb = self._convert_size_to_mb(volume['size']) - params = dict(name=volume_name, - automaticexpansion=dict(enabled=False), - timeviewcopy=True) - if cdp_tag: - params.update(cdpjournaltag=cdp_tag) - - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - params.update(storage={'storagepoolid': pool_id, 'sizemb': sizemb}) - metadata = self.FSS.create_timeview(tv_vid, params) - return volume_name, metadata - - def create_thin_vdev(self, volume_metadata, volume): - thin_size = 0 - size = volume["size"] - sizemb = self._convert_size_to_mb(size) - params = {'category': 'virtual'} - - if 'thinprovisioned' in volume_metadata: - if volume_metadata['thinprovisioned'] is False: - msg = (_('If you want to create a thin provisioning volume,' - ' this param must be True.')) - raise exception.VolumeBackendAPIException(msg) - - if 'thinsize' in volume_metadata: - thin_size = int(volume_metadata['thinsize']) - - if size < 10: - msg = _('The resource is a FSS thin device, minimum size is ' - '10240 MB.') - raise exception.VolumeBackendAPIException(msg) - else: - try: - if thin_size > size: - msg = _('The allocated size must less than total size.') - raise exception.VolumeBackendAPIException(msg) - except Exception: - msg = _('The resource is a thin device, thin size is invalid.') - raise exception.VolumeBackendAPIException(msg) - - thin_size = self._convert_size_to_mb(thin_size) - thin_disk = dict( - enabled=True, - fullsizemb=sizemb) - params.update(thinprovisioning=thin_disk) - params.update(sizemb=thin_size) - - pool_id = self._selected_pool_id(self.fss_defined_pools, "P") - params.update(storagepoolid=pool_id) - volume_name = self._get_fss_volume_name(volume) - params.update(name=volume_name) - return volume_name, self.FSS.create_vdev(params) - - def create_vdev_with_mirror(self, volume_metadata, volume): - - if 'mirrored' in volume_metadata: - if volume_metadata['mirrored'] is False: - msg = _('If you want to create a mirrored volume, this param ' - 'must be True.') - raise exception.VolumeBackendAPIException(data=msg) - - sizemb = self._convert_size_to_mb(volume["size"]) - volume_name = self._get_fss_volume_name(volume) - params = {'category': 'virtual', 'sizemb': sizemb, 'name': volume_name} - - pool_id = self._selected_pool_id(self.fss_defined_pools, "P") - params.update(storagepoolid=pool_id) - metadata = self.FSS.create_vdev(params) - if metadata: - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - mirror_params = {'category': 'virtual', - 'selectioncriteria': 'anydrive', - 'mirrortarget': "virtual"} - - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - mirror_params.update(storagepoolid=pool_id) - - ret = self.FSS.create_mirror(vid, mirror_params) - if ret: - return volume_name, metadata - - def _get_fss_vid_from_name(self, volume_name, fss_type=None): - vid = [] - output = self.FSS.list_fss_volume_info() - try: - if "virtualdevices" in output['data']: - for item in output['data']['virtualdevices']: - if item['name'] in volume_name: - vid.append(item['id']) - except Exception: - msg = (_('Can not find cinder volume - %(volumeName)s') % - {"volumeName": volume_name}) - raise exception.VolumeBackendAPIException(msg) - - if fss_type is not None and fss_type == FSS_SINGLE_TYPE: - vid = ''.join(str(x) for x in vid) - return vid - - def _get_fss_gid_from_name(self, group_name): - gid = '' - output = self.FSS.list_group_info() - if "snapshotgroups" in output['data']: - for item in output['data']['snapshotgroups']: - if item['name'] == group_name: - gid = item['id'] - break - if gid == '': - msg = (_('Can not find consistency group: %s.') % group_name) - raise exception.VolumeBackendAPIException(msg) - return gid - - def _get_fss_group_membercount(self, gid): - membercount = 0 - output = self.FSS.list_group_info(gid) - if "membercount" in output['data']: - membercount = output['data']['membercount'] - return membercount - - def _get_vdev_id_from_group_id(self, group_id): - vidlist = [] - output = self.FSS.list_group_info(group_id) - if "virtualdevices" in output['data']: - for item in output['data']['virtualdevices']: - vidlist.append(item['id']) - return vidlist - - def clone_volume(self, new_vol_name, source_volume_name): - volume_metadata = {} - new_vid = '' - vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE) - mirror_params = dict( - category='virtual', - selectioncriteria='anydrive', - mirrortarget="virtual" - ) - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - mirror_params.update(storagepoolid=pool_id) - ret1 = self.FSS.create_mirror(vid, mirror_params) - - if ret1: - if ret1['rc'] != 0: - failed_ret = self.FSS.get_fss_error_code(ret1['rc']) - raise exception.VolumeBackendAPIException(data=failed_ret) - - ret2 = self.FSS.sync_mirror(vid) - self.FSS._random_sleep() - if ret2['rc'] == 0: - self.FSS._check_mirror_sync_finished(vid, OPERATION_TIMEOUT) - ret3 = self.FSS.promote_mirror(vid, new_vol_name) - if ret3 and ret3['rc'] == 0: - new_vid = ret3['id'] - - volume_metadata['FSS-vid'] = new_vid - return volume_metadata - - def delete_vdev(self, volume): - volume_name = self._get_fss_volume_name(volume) - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - if vid: - return self.FSS.delete_vdev(vid) - else: - msg = _('vid is null. FSS failed to delete volume.') - raise exception.VolumeBackendAPIException(data=msg) - - def create_snapshot(self, snapshot): - snap_metadata = {} - volume_name = self._get_vol_name_from_snap(snapshot) - snap_name = snapshot["display_name"] - size = snapshot['volume_size'] - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - if not vid: - msg = _('vid is null. FSS failed to create snapshot.') - raise exception.VolumeBackendAPIException(data=msg) - - (snap, tm_policy, vdev_size) = (self.FSS. - _check_if_snapshot_tm_exist(vid)) - if not snap: - self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) - if not tm_policy: - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - self.FSS.create_timemark_policy(vid, storagepoolid=pool_id) - if not snap_name: - snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S') - - if len(snap_name) > 32: - snap_name = self._encode_name(snapshot["id"]) - - self.FSS.create_timemark(vid, snap_name) - snap_metadata['fss_tm_comment'] = snap_name - return snap_metadata - - def delete_snapshot(self, snapshot): - volume_name = self._get_vol_name_from_snap(snapshot) - snap_name = snapshot["display_name"] - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - - if not vid: - msg = _('vid is null. FSS failed to delete snapshot') - raise exception.VolumeBackendAPIException(data=msg) - if not snap_name: - if ('metadata' in snapshot and 'fss_tm_comment' in - snapshot['metadata']): - snap_name = snapshot['metadata']['fss_tm_comment'] - - if len(snap_name) > 32: - snap_name = self._encode_name(snapshot["id"]) - - tm_info = self.FSS.get_timemark(vid) - rawtimestamp = self._get_timestamp(tm_info, snap_name) - if rawtimestamp: - timestamp = '%s_%s' % (vid, rawtimestamp) - self.FSS.delete_timemark(timestamp) - - final_tm_data = self.FSS.get_timemark(vid) - if "timemark" in final_tm_data['data']: - if not final_tm_data['data']['timemark']: - self.FSS.delete_timemark_policy(vid) - self.FSS.delete_vdev_snapshot(vid) - - def _get_timestamp(self, tm_data, encode_snap_name): - timestamp = '' - if "timemark" in tm_data['data']: - for item in tm_data['data']['timemark']: - if "comment" in item and item['comment'] == encode_snap_name: - timestamp = item['rawtimestamp'] - break - return timestamp - - def create_volume_from_snapshot(self, volume, snapshot): - volume_metadata = {} - volume_name = self._get_vol_name_from_snap(snapshot) - snap_name = snapshot["display_name"] - new_vol_name = self._get_fss_volume_name(volume) - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - if not vid: - msg = _('vid is null. FSS failed to create_volume_from_snapshot.') - raise exception.VolumeBackendAPIException(data=msg) - - if not snap_name: - if ('metadata' in snapshot) and ('fss_tm_comment' - in snapshot['metadata']): - snap_name = snapshot['metadata']['fss_tm_comment'] - if len(snap_name) > 32: - snap_name = self._encode_name(snapshot["id"]) - - tm_info = self.FSS.get_timemark(vid) - rawtimestamp = self._get_timestamp(tm_info, snap_name) - if not rawtimestamp: - msg = _('rawtimestamp is null. FSS failed to ' - 'create_volume_from_snapshot.') - raise exception.VolumeBackendAPIException(data=msg) - - timestamp = '%s_%s' % (vid, rawtimestamp) - pool_id = self._selected_pool_id(self.fss_defined_pools, "P") - output = self.FSS.copy_timemark( - timestamp, storagepoolid=pool_id, name=new_vol_name) - if output['rc'] == 0: - vid = output['id'] - self.FSS._random_sleep() - if self.FSS._check_tm_copy_finished(vid, OPERATION_TIMEOUT): - volume_metadata['FSS-vid'] = vid - return volume_name, volume_metadata - - def extend_vdev(self, volume_name, vol_size, new_size): - if new_size > vol_size: - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - size = self._convert_size_to_mb(new_size - vol_size) - params = dict( - action='expand', - sizemb=size - ) - return self.FSS.extend_vdev(vid, params) - - def list_volume_info(self, vid): - return self.FSS.list_fss_volume_info(vid) - - def rename_vdev(self, vid, new_vol_name): - params = dict( - action='update', - name=new_vol_name - ) - return self.FSS.rename_vdev(vid, params) - - def assign_iscsi_vdev(self, client_id, target_id, vid): - params = dict( - action="assign", - virtualdeviceids=[vid], - iscsi=dict(target=target_id) - ) - return self.FSS.assign_vdev(client_id, params) - - def assign_fc_vdev(self, client_id, vid): - params = dict( - action="assign", - virtualdeviceids=[vid], - fc=dict( - fcmapping='alltoall', - accessmode='readwritenonexclusive') - ) - return self.FSS.assign_vdev(client_id, params) - - def unassign_vdev(self, client_id, vid): - params = dict( - action="unassign", - virtualdeviceid=vid - ) - return self.FSS.unassign_vdev(client_id, params) - - def _create_vdev_snapshot(self, volume_name, size): - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) - - def create_vdev_snapshot(self, vid, size): - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - params = dict( - idlist=[vid], - selectioncriteria='anydrive', - policy='preserveall', - sizemb=size, - storagepoolid=pool_id - ) - return self.FSS.create_vdev_snapshot(params) - - def create_group(self, group): - group_name = self._get_group_name_from_id(group['id']) - params = dict( - name=group_name - ) - return self.FSS.create_group(params) - - def destroy_group(self, group): - group_name = self._get_group_name_from_id(group['id']) - gid = self._get_fss_gid_from_name(group_name) - return self.FSS.destroy_group(gid) - - def _add_volume_to_consistency_group(self, group_id, vol_name): - self.set_group(group_id, addvollist=[vol_name]) - - def set_group(self, group_id, **kwargs): - group_name = self._get_group_name_from_id(group_id) - gid = self._get_fss_gid_from_name(group_name) - - join_params = dict() - leave_params = dict() - if kwargs.get('addvollist'): - joing_vid = self._get_fss_vid_from_name(kwargs['addvollist']) - join_params.update( - action='join', - virtualdevices=joing_vid - ) - if kwargs.get('remvollist'): - leave_vid = self._get_fss_vid_from_name(kwargs['remvollist']) - leave_params.update( - action='leave', - virtualdevices=leave_vid - ) - return self.FSS.set_group(gid, join_params, leave_params) - - def create_cgsnapshot(self, cgsnapshot): - group_name = self._get_group_name_from_id( - cgsnapshot['consistencygroup_id']) - gsnap_name = self._encode_name(cgsnapshot['id']) - gid = self._get_fss_gid_from_name(group_name) - vidlist = self._get_vdev_id_from_group_id(gid) - pool_id = self._selected_pool_id(self.fss_defined_pools, "O") - - for vid in vidlist: - (snap, tm_policy, sizemb) = (self.FSS. - _check_if_snapshot_tm_exist(vid)) - if not snap: - self.create_vdev_snapshot(vid, sizemb) - if not tm_policy: - self.FSS.create_timemark_policy(vid, storagepoolid=pool_id) - - group_tm_policy = self.FSS._check_if_group_tm_enabled(gid) - if not group_tm_policy: - self.create_group_timemark_policy(gid) - - self.create_group_timemark(gid, gsnap_name) - - def create_group_timemark_policy(self, gid): - tm_params = dict( - automatic=dict(enabled=False), - maxtimemarkcount=MAXSNAPSHOTS - ) - return self.FSS.create_group_timemark_policy(gid, tm_params) - - def create_group_timemark(self, gid, gsnap_name): - params = dict( - comment=gsnap_name, - priority='medium', - snapshotnotification=False - ) - return self.FSS.create_group_timemark(gid, params) - - def delete_cgsnapshot(self, cgsnapshot): - group_name = self._get_group_name_from_id( - cgsnapshot['consistencygroup_id']) - encode_snap_name = self._encode_name(cgsnapshot['id']) - gid = self._get_fss_gid_from_name(group_name) - - if not gid: - msg = _('gid is null. FSS failed to delete cgsnapshot.') - raise exception.VolumeBackendAPIException(data=msg) - - if self._get_fss_group_membercount(gid) != 0: - tm_info = self.FSS.get_group_timemark(gid) - rawtimestamp = self._get_timestamp(tm_info, encode_snap_name) - timestamp = '%s_%s' % (gid, rawtimestamp) - self.delete_group_timemark(timestamp) - - final_tm_data = self.FSS.get_group_timemark(gid) - if "timemark" in final_tm_data['data']: - if not final_tm_data['data']['timemark']: - self.FSS.delete_group_timemark_policy(gid) - - def delete_group_timemark(self, timestamp): - params = dict( - deleteallbefore=False - ) - return self.FSS.delete_group_timemark(timestamp, params) - - def _check_iscsi_option(self): - output = self.FSS.get_server_options() - if "iscsitarget" in output['data']: - if not output['data']['iscsitarget']: - self.FSS.set_server_options('iscsitarget') - - def _check_fc_target_option(self): - output = self.FSS.get_server_options() - if "fctarget" in output['data']: - if not output['data']['fctarget']: - self.FSS.set_server_options('fctarget') - - def _check_iocluster_state(self): - output = self.FSS.get_server_options() - if 'iocluster' not in output['data']: - msg = _('No iocluster information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return output['data']['iocluster'] - - def list_fc_target_wwpn(self): - return self.FSS.list_fc_target_wwpn() - - def list_fc_client_initiators(self): - return self.FSS.list_fc_client_initiators() - - def create_fc_client(self, cinder_host_name, free_initiator_wwpns): - client_id = 0 - params = dict( - name=cinder_host_name, - protocoltype=["fc"], - ipaddress=self.fss_host, - ostype='linux', - fcpolicy=dict( - initiators=[free_initiator_wwpns], - vsaenabled=False - ) - ) - client_info = self.FSS.create_client(params) - if client_info and client_info['rc'] == 0: - client_id = client_info['id'] - return client_id - - def list_iscsi_target_info(self, target_id=None): - return self.FSS.list_iscsi_target_info(target_id) - - def _check_fc_host_devices_empty(self, client_id): - is_empty = False - output = self.FSS.list_sanclient_info(client_id) - if 'data' not in output: - msg = _('No target in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if 'fcdevices' not in output['data']: - msg = _('No fcdevices in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if len(output['data']['fcdevices']) == 0: - is_empty = True - self.FSS.delete_client(client_id) - return is_empty - - def create_iscsi_client(self, cinder_host_name, initiator): - params = dict( - name=cinder_host_name, - protocoltype=["iscsi"], - ipaddress=self.fss_host, - ostype='linux', - iscsipolicy=dict( - initiators=[initiator], - authentication=dict(enabled=False, - mutualchap=dict(enabled=False)) - ) - ) - return self.FSS.create_client(params) - - def create_iscsitarget(self, client_id, initiator, fss_hosts): - params = dict( - clientid=client_id, - name=initiator, - ipaddress=fss_hosts, - accessmode='readwritenonexclusive' - ) - return self.FSS.create_iscsitarget(params) - - def _get_iscsi_host(self, connector): - target_info = self.list_iscsi_target_info() - if 'data' not in target_info: - msg = _('No data information in return info.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'iscsitargets' not in target_info['data']: - msg = _('No iscsitargets in return info.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if target_info['data']['iscsitargets']: - iscsitargets = target_info['data']['iscsitargets'] - for iscsitarget in iscsitargets: - if connector["initiator"] in iscsitarget["name"]: - target_id = iscsitarget["id"] - client_id = iscsitarget["clientid"] - return client_id, target_id - return None, None - - def _create_iscsi_host(self, host_name, initiator, fss_hosts): - client_id = '' - target_id = '' - client_info = self.create_iscsi_client(host_name, initiator) - if client_info and client_info['rc'] == 0: - client_id = client_info['id'] - - target_info = self.create_iscsitarget(client_id, initiator, fss_hosts) - if target_info['rc'] == 0: - target_id = target_info['id'] - return client_id, target_id - - def _get_fc_client_initiators(self, connector): - fc_initiators_assigned = [] - fc_available_initiator = [] - fc_initiators_info = self.list_fc_client_initiators() - if 'data' not in fc_initiators_info: - raise ValueError(_('No data information in return info.')) - - if fc_initiators_info['data']: - fc_initiators = fc_initiators_info['data'] - for fc_initiator in fc_initiators: - if fc_initiator['wwpn'] in connector['wwpns']: - fc_available_initiator.append(str(fc_initiator['wwpn'])) - fc_initiators_assigned.append(dict( - wwpn=str(fc_initiator['wwpn']), - assigned=fc_initiator['assigned'])) - return fc_available_initiator, fc_initiators_assigned - - def fc_initialize_connection(self, volume, connector, fss_hosts): - """Connect the host and volume; return dict describing connection.""" - vid = 0 - fc_target_info = {} - free_fc_initiator = None - - volume_name = self._get_fss_volume_name(volume) - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - if not vid: - msg = (_('Can not find cinder volume - %s.') % volume_name) - raise exception.VolumeBackendAPIException(msg) - - available_initiator, fc_initiators_info = ( - self._get_fc_client_initiators(connector)) - - if fc_initiators_info is None: - msg = _('No FC initiator can be added to host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for fc_initiator in fc_initiators_info: - value = fc_initiator['assigned'] - if len(value) == 0: - free_fc_initiator = fc_initiator['wwpn'] - - if free_fc_initiator is None: - msg = _('No free FC initiator can be assigned to host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - initiator = connector["initiator"] - host_name = GROUP_PREFIX + '%s-' % connector["host"] - - initiator_name = initiator.split(':') - idx = len(initiator_name) - 1 - client_host_name = host_name + initiator_name[ - idx] + '_FC-wwpn-' + free_fc_initiator - - client_id = self.create_fc_client(client_host_name, free_fc_initiator) - - try: - self.assign_fc_vdev(client_id, vid) - time.sleep(3) - except FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 2415984845 and "XML_ERROR_CLIENT_EXIST" - in err.text): - ctxt.reraise = False - LOG.warning('Assign volume failed with message: %(msg)s.', - {"msg": err.reason}) - finally: - lun = self.FSS._get_fc_client_info(client_id, vid) - - fc_target_info['lun'] = lun - fc_target_info['available_initiator'] = available_initiator - - if not fc_target_info: - msg = _('Failed to get iSCSI target info for the LUN: %s.') - raise exception.VolumeBackendAPIException(data=msg % volume_name) - return fc_target_info - - def fc_terminate_connection(self, volume, connector): - client_id = 0 - volume_name = self._get_fss_volume_name(volume) - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - output = self.list_volume_info(vid) - if 'data' not in output: - msg = _('No vdev information in given data') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'clients' not in output['data']: - msg = _('No clients in vdev information.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - client_info = output['data']['clients'] - for fcclients in client_info: - client_id = int(fcclients['id']) - - if client_id == 0: - msg = _( - 'Can not find client id. The connection target name is %s.') - raise exception.VolumeBackendAPIException( - data=msg % connector["initiator"]) - try: - self.unassign_vdev(client_id, vid) - except FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 2415984988 and - "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" - in err.text): - ctxt.reraise = False - LOG.warning('Disconnection failed with message: %(msg)s.', - {"msg": err.reason}) - return client_id - - def initialize_connection_iscsi(self, volume, connector, fss_hosts): - """Connect the host and volume; return dict describing connection.""" - vid = 0 - iscsi_target_info = {} - self._check_iscsi_option() - client_id, target_id = self._get_iscsi_host(connector) - - if target_id is None: - initiator = connector["initiator"] - host_name = GROUP_PREFIX + '%s-' % connector["host"] - - initiator_info = initiator.split(':') - idx = len(initiator_info) - 1 - client_host_name = host_name + initiator_info[idx] - - client_id, target_id = self._create_iscsi_host(client_host_name, - initiator, - fss_hosts) - volume_name = self._get_fss_volume_name(volume) - try: - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - if not vid: - msg = (_('Can not find cinder volume - %(volumeName)s.') % - {"volumeName": volume_name}) - raise exception.VolumeBackendAPIException(msg) - - self.assign_iscsi_vdev(client_id, target_id, vid) - time.sleep(3) - except FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 2415984989 and - "XML_ERROR_VIRTUAL_DEV_ASSIGNED_TO_iSCSI_TARGET" in - err.text): - ctxt.reraise = False - LOG.warning("Assign volume failed with message: %(msg)s.", - {"msg": err.reason}) - finally: - (lun, target_name) = self.FSS._get_iscsi_target_info(client_id, - vid) - iscsi_target_info['lun'] = lun - iscsi_target_info['iqn'] = target_name - - if not iscsi_target_info: - msg = _('Failed to get iSCSI target info for the LUN: %s') - raise exception.VolumeBackendAPIException(data=msg % volume_name) - return iscsi_target_info - - def terminate_connection_iscsi(self, volume, connector): - volume_name = self._get_fss_volume_name(volume) - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - client_id, target_id = self._get_iscsi_host(connector) - if not client_id: - msg = _('Can not find client id. The connection target name ' - 'is %s.') - raise exception.VolumeBackendAPIException( - data=msg % connector["initiator"]) - try: - self.unassign_vdev(client_id, vid) - except FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 2415984988 and - "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" - in err.text): - ctxt.reraise = False - LOG.warning("Disconnection failed with message: %(msg)s.", - {"msg": err.reason}) - finally: - is_empty = self.FSS._check_host_mapping_status(client_id, - target_id) - - if is_empty: - self.FSS.delete_iscsi_target(target_id) - self.FSS.delete_client(client_id) - - def _get_existing_volume_ref_vid(self, existing_ref): - if 'source-id' in existing_ref: - vid = existing_ref['source-id'] - else: - reason = _("FSSISCSIDriver manage_existing requires vid to " - "identify an existing volume.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - vdev_info = self.list_volume_info(vid) - if not vdev_info: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("Unable to find volume with FSS vid =%s.") % vid) - - if 'data' not in vdev_info: - msg = _('No vdev information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'sizemb' not in vdev_info['data']: - msg = _('No vdev sizemb in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return vdev_info['data']['sizemb'] - - def _manage_existing_volume(self, vid, volume): - new_vol_name = self._get_fss_volume_name(volume) - try: - self.rename_vdev(vid, new_vol_name) - except FSSHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - LOG.warning("Volume manage_existing_volume was unable " - "to rename the volume, error message: %s.", - err.reason) - - def unmanage(self, volume): - volume_name = self._get_fss_volume_name(volume) - unmanaged_vol_name = volume_name + "-unmanaged" - try: - vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) - self.rename_vdev(vid, unmanaged_vol_name) - except FSSHTTPError as err: - LOG.warning("Volume unmanage was unable to rename the volume," - " error message: %(msg)s.", {"msg": err.reason}) - - -class FSSRestCommon(object): - def __init__(self, config): - self.hostip = config.san_ip - self.username = config.san_login - self.password = config.san_password - self.session_id = None - self.fss_debug = config.fss_debug - - def _fss_request(self, method, path, data=None): - json_data = None - url = "http://%(ip)s/%(product)s/%(path)s" % { - "ip": self.hostip, "product": PRODUCT_NAME, "path": path} - headers = {"Content-Type": "application/json"} - if self.session_id is not None: - cookie = dict( - Cookie=SESSION_COOKIE_NAME + '=' + self.session_id - ) - headers.update(cookie) - - if data is not None: - request_body = json.dumps(data).encode("utf-8") - else: - request_body = None - - connection = http_client.HTTPConnection(self.hostip, 80, timeout=60) - - if self.fss_debug: - LOG.info("[FSS_RESTAPI]====%(method)s@url=%(url)s ====" - "@request_body=%(body)s===", - {"method": method, - "url": url, - "body": request_body}) - - attempt = 1 - while True: - connection.request(method, url, request_body, headers) - response = connection.getresponse() - response_body = response.read() - if response_body: - try: - data = json.loads(response_body) - json_data = json.dumps(data) - json_data = json.loads(json_data.decode('utf8')) - except ValueError: - pass - - if self.fss_debug: - LOG.info("[FSS_RESTAPI]==@json_data: %s ==", json_data) - - if response.status == 200: - return json_data - elif response.status == 404: - msg = (_('FSS REST API return failed, method=%(method)s, ' - 'uri=%(url)s, response=%(response)s') % { - "method": method, - "url": url, - "response": response_body}) - raise exception.VolumeBackendAPIException(msg) - else: - err_code = json_data['rc'] - if (attempt > RETRY_CNT) or (str(err_code) not in RETRY_LIST): - err_target = ("method=%(method)s, url=%(url)s, " - "response=%(response)s" % - {"method": method, "url": url, - "response": response_body}) - err_response = self.get_fss_error_code(err_code) - err = dict( - code=err_code, - text=err_response['key'], - reason=err_response['message'] - ) - raise FSSHTTPError(err_target, err) - attempt += 1 - LOG.warning("Retry with rc: %s.", err_code) - self._random_sleep(RETRY_INTERVAL) - if err_code == 107: - self.fss_login() - - def _random_sleep(self, interval=60): - nsleep = random.randint(10, interval * 10) - value = round(float(nsleep) / 10, 2) - time.sleep(value) - - # - # REST API session management methods - # - def fss_login(self): - url = '%s/%s' % (FSS_AUTH, FSS_LOGIN) - params = dict( - username=self.username, - password=self.password, - server=self.hostip - ) - data = self._fss_request(POST, url, params) - if 'id' in data: - self.session_id = data['id'] - return self.session_id - - # - # Physical Adapters management methods - # - - def list_physicaladapter_info(self, adapter_id=None): - url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER) - if adapter_id is not None: - url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, - FSS_PHYSICALADAPTER, adapter_id) - return self._fss_request(GET, url) - - def list_fc_target_wwpn(self): - url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, - FSS_FC_TGT_WWPN) - tgt_wwpn = [] - output = self._fss_request(GET, url) - if output['data']: - tgt_wwpns = output['data'] - for tgt_alias_wwpn in tgt_wwpns: - tgt_wwpn.append( - str(tgt_alias_wwpn['aliaswwpn'].replace('-', ''))) - return tgt_wwpn - - def list_fc_client_initiators(self): - url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, - FSS_FCCLIENTINITIATORS) - return self._fss_request(GET, url) - - # - # storage pool management methods - # - - def list_pool_info(self, pool_id=None): - url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_STORAGE_POOL) - if pool_id is not None: - url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, - FSS_STORAGE_POOL, pool_id) - return self._fss_request(GET, url) - - # - # Volume and snapshot management methods - # - - def create_vdev(self, params): - metadata = {} - url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) - output = self._fss_request(POST, url, params) - if output: - if output['rc'] == 0: - metadata['FSS-vid'] = output['id'] - return metadata - - def _check_mirror_sync_finished(self, vid, timeout): - starttime = time.time() - while True: - self._random_sleep() - if time.time() > starttime + timeout: - msg = (_('FSS get mirror sync timeout on vid: %s ') % vid) - raise exception.VolumeBackendAPIException(data=msg) - elif self._check_mirror_sync_status(vid): - break - - def delete_vdev(self, vid): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) - return self._fss_request(DELETE, url, dict(force=True)) - - def extend_vdev(self, vid, params): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) - return self._fss_request(PUT, url, params) - - def rename_vdev(self, vid, params): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) - return vid, self._fss_request(PUT, url, params) - - def list_fss_volume_info(self, vid=None): - url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) - if vid is not None: - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) - return self._fss_request(GET, url) - - def _get_fss_vid_from_name(self, volume_name, fss_type=None): - vid = [] - output = self.list_fss_volume_info() - try: - if "virtualdevices" in output['data']: - for item in output['data']['virtualdevices']: - if item['name'] in volume_name: - vid.append(item['id']) - except Exception: - msg = (_('Can not find cinder volume - %s') % volume_name) - raise exception.VolumeBackendAPIException(msg) - - if fss_type is not None and fss_type == FSS_SINGLE_TYPE: - vid = ''.join(str(x) for x in vid) - return vid - - def _check_if_snapshot_tm_exist(self, vid): - snapshotenabled = False - timemarkenabled = False - sizemb = 0 - output = self.list_fss_volume_info(vid) - if "snapshotenabled" in output['data']: - snapshotenabled = output['data']['snapshotenabled'] - if "timemarkenabled" in output['data']: - timemarkenabled = output['data']['timemarkenabled'] - if "sizemb" in output['data']: - sizemb = output['data']['sizemb'] - return (snapshotenabled, timemarkenabled, sizemb) - - def create_vdev_snapshot(self, params): - url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_RESOURCE) - return self._fss_request(POST, url, params) - - def create_timemark_policy(self, vid, **kwargs): - url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) - params = dict( - idlist=[vid], - automatic=dict(enabled=False), - maxtimemarkcount=MAXSNAPSHOTS, - retentionpolicy=dict(mode='all'), - ) - if kwargs.get('storagepoolid'): - params.update(kwargs) - return self._fss_request(POST, url, params) - - def create_timemark(self, vid, snap_name): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) - params = dict( - comment=snap_name, - priority='medium', - snapshotnotification=False - ) - return self._fss_request(POST, url, params) - - def get_timemark(self, vid): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) - return self._fss_request(GET, url) - - def delete_timemark(self, timestamp): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) - params = dict( - deleteallbefore=False - ) - return self._fss_request(DELETE, url, params) - - def delete_timemark_policy(self, vid): - url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) - params = dict( - idlist=[vid] - ) - return self._fss_request(DELETE, url, params) - - def delete_vdev_snapshot(self, vid): - url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_RESOURCE) - params = dict( - idlist=[vid] - ) - return self._fss_request(DELETE, url, params) - - def copy_timemark(self, timestamp, **kwargs): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) - params = dict( - action='copy', - includetimeviewdata=False - ) - params.update(kwargs) - return self._fss_request(PUT, url, params) - - def get_timemark_copy_status(self, vid): - url = '%s/%s/%s?type=operationstatus' % ( - FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) - return self._fss_request(GET, url) - - def _check_tm_copy_status(self, vid): - finished = False - output = self.get_timemark_copy_status(vid) - if output['timemarkoperationstatus']: - timemark_status = output['timemarkoperationstatus'] - if timemark_status['operation'] == "copy": - if timemark_status['status'] == 'completed': - finished = True - return finished - - def _check_tm_copy_finished(self, vid, timeout): - finished = False - starttime = time.time() - while True: - self._random_sleep() - if time.time() > starttime + timeout: - msg = (_('FSS get timemark copy timeout on vid: %s') % vid) - raise exception.VolumeBackendAPIException(data=msg) - elif self._check_tm_copy_status(vid): - finished = True - return finished - - # - # TimeView methods - # - - def create_timeview(self, tv_vid, params): - vid = '' - volume_metadata = {} - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEVIEW, tv_vid) - output = self._fss_request(POST, url, params) - if output and output['rc'] == 0: - if output['copyid'] == -1: - vid = output['id'] - else: - vid = output['copyid'] - volume_metadata['FSS-vid'] = vid - return volume_metadata - - # - # Mirror methods - # - - def create_mirror(self, vid, pool_id): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) - params = dict( - category='virtual', - selectioncriteria='anydrive', - mirrortarget="virtual" - ) - params.update(pool_id) - return self._fss_request(POST, url, params) - - def get_mirror_sync_status(self, vid): - url = '%s/%s/%s?type=syncstatus' % ( - FSS_LOGICALRESOURCE, FSS_MIRROR, vid) - return self._fss_request(GET, url) - - def _check_mirror_sync_status(self, vid): - finished = False - output = self.get_mirror_sync_status(vid) - if output['mirrorsyncstatus']: - mirrorsyncstatus = output['mirrorsyncstatus'] - if mirrorsyncstatus['status'] == "insync": - if mirrorsyncstatus['percentage'] == 0: - finished = True - return finished - - def _set_mirror(self, vid, **kwargs): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) - return self._fss_request(PUT, url, kwargs) - - def sync_mirror(self, vid): - return self._set_mirror(vid, action='sync') - - def promote_mirror(self, vid, new_volume_name): - return self._set_mirror(vid, action='promote', name=new_volume_name) - - # - # Host management methods - # - - def get_server_options(self): - url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) - return self._fss_request(GET, url) - - def set_server_options(self, action): - url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) - params = dict( - action=action, - enabled=True - ) - return self._fss_request(PUT, url, params) - - def get_server_name(self): - url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) - return self._fss_request(GET, url) - - # - # SAN Client management methods - # - - def list_client_initiators(self): - url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, - FSS_ISCSI_CLIENT_INITIATORS) - return self._fss_request(GET, url) - - def get_default_portal(self): - url = '%s/%s/%s' % (FSS_SERVER, FSS_OPTIONS, FSS_PORTAL) - return self._fss_request(GET, url) - - def create_client(self, params): - url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) - return self._fss_request(POST, url, params) - - def list_sanclient_info(self, client_id=None): - url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) - if client_id is not None: - url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, - client_id) - return self._fss_request(GET, url) - - def assign_vdev(self, client_id, params): - url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) - return self._fss_request(PUT, url, params) - - def unassign_vdev(self, client_id, params): - url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) - return self._fss_request(PUT, url, params) - - def _get_iscsi_target_info(self, client_id, vid): - lun = 0 - target_name = None - output = self.list_sanclient_info(client_id) - - if 'data' not in output: - msg = _('No target information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'iscsidevices' not in output['data']: - msg = _('No iscsidevices information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for iscsidevices in output['data']['iscsidevices']: - if int(vid) == int(iscsidevices['id']): - lun = iscsidevices['lun'] - iscsitarget_info = iscsidevices['iscsitarget'] - for key, value in iscsitarget_info.items(): - if key == 'name': - target_name = value - - return lun, target_name - - def _check_host_mapping_status(self, client_id, target_id): - is_empty = False - hosting_cnt = 0 - output = self.list_sanclient_info(client_id) - if 'data' not in output: - msg = _('No target in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'iscsidevices' not in output['data']: - msg = _('No iscsidevices information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if len(output['data']['iscsidevices']) == 0: - is_empty = True - else: - for iscsidevices in output['data']['iscsidevices']: - iscsitarget_info = iscsidevices['iscsitarget'] - for key, value in iscsitarget_info.items(): - if key == 'id' and target_id == value: - hosting_cnt += 1 - - if hosting_cnt == 0: - is_empty = True - return is_empty - - def list_iscsi_target_info(self, target_id=None): - url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) - if target_id is not None: - url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, - target_id) - return self._fss_request(GET, url) - - def _get_iscsi_target_id(self, initiator_iqn): - target_id = '' - client_id = '' - output = self.list_iscsi_target_info() - - if 'data' not in output: - msg = _('No target in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'iscsitargets' not in output['data']: - msg = _('No iscsitargets for target.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for targets in output['data']['iscsitargets']: - if 'name' in targets: - if initiator_iqn in targets['name']: - target_id = str(targets['id']) - client_id = str(targets['clientid']) - break - return target_id, client_id - - def create_iscsitarget(self, params): - url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) - return self._fss_request(POST, url, params) - - def delete_iscsi_target(self, target_id): - url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, target_id) - params = dict( - force=True - ) - return self._fss_request(DELETE, url, params) - - def delete_client(self, client_id): - url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) - return self._fss_request(DELETE, url) - - def _get_fc_client_info(self, client_id, vid): - lun = 0 - output = self.list_sanclient_info(client_id) - if 'data' not in output: - msg = _('No target information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if 'fcdevices' not in output['data']: - msg = _('No fcdevices information in given data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for fcdevices in output['data']['fcdevices']: - if int(vid) == int(fcdevices['id']): - lun = fcdevices['lun'] - - return lun - - # - # Group related methods - # - - def create_group(self, params): - url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) - return self._fss_request(POST, url, params) - - def list_group_info(self, gid=None): - if gid is not None: - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) - else: - url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) - return self._fss_request(GET, url) - - def set_group(self, gid, join_params=None, leave_params=None): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) - if join_params: - self._fss_request(PUT, url, join_params) - if leave_params: - self._fss_request(PUT, url, leave_params) - - def create_group_timemark_policy(self, gid, params): - url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) - return self._fss_request(POST, url, params) - - def _check_if_group_tm_enabled(self, gid): - timemarkenabled = False - output = self.list_group_info(gid) - if "timemarkenabled" in output['data']: - timemarkenabled = output['data']['timemarkenabled'] - return timemarkenabled - - def create_group_timemark(self, gid, params): - url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) - return self._fss_request(POST, url, params) - - def get_group_timemark(self, gid): - url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) - return self._fss_request(GET, url) - - def delete_group_timemark(self, timestamp, params): - url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, timestamp) - return self._fss_request(DELETE, url, params) - - def delete_group_timemark_policy(self, gid): - url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, - FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) - return self._fss_request(DELETE, url) - - def delete_snapshot_group(self, gid): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) - return self._fss_request(DELETE, url) - - def destroy_group(self, gid): - url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) - return self._fss_request(DELETE, url) - - def get_fss_error_code(self, err_id): - try: - url = '%s/%s/%s' % (FSS_SERVER, FSS_RETURN_CODE, err_id) - output = self._fss_request(GET, url) - if output['rc'] == 0: - return output - except Exception: - msg = (_('Can not find this error code:%s.') % err_id) - raise exception.APIException(reason=msg) - - -class FSSHTTPError(Exception): - - def __init__(self, target, response): - super(FSSHTTPError, self).__init__() - self.target = target - self.code = response['code'] - self.text = response['text'] - self.reason = response['reason'] - - def __str__(self): - msg = ("FSSHTTPError code {0} returned by REST at {1}: {2}\n{3}") - return msg.format(self.code, self.target, - self.reason, self.text) diff --git a/cinder/volume/drivers/fujitsu/__init__.py b/cinder/volume/drivers/fujitsu/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_common.py b/cinder/volume/drivers/fujitsu/eternus_dx_common.py deleted file mode 100644 index 16649678b..000000000 --- a/cinder/volume/drivers/fujitsu/eternus_dx_common.py +++ /dev/null @@ -1,2160 +0,0 @@ -# Copyright (c) 2015 FUJITSU LIMITED -# Copyright (c) 2012 EMC Corporation. -# Copyright (c) 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Cinder Volume driver for Fujitsu ETERNUS DX S3 series. -""" -import ast -import base64 -import hashlib -import six -import time -from xml.etree.ElementTree import parse - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration as conf -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -try: - import pywbem - pywbemAvailable = True -except ImportError: - pywbemAvailable = False - -VOL_PREFIX = "FJosv_" -RAIDGROUP = 2 -TPPOOL = 5 -SNAPOPC = 4 -OPC = 5 -RETURN_TO_RESOURCEPOOL = 19 -DETACH = 8 -INITIALIZED = 2 -UNSYNCHRONIZED = 3 -BROKEN = 5 -PREPARED = 11 -REPL = "FUJITSU_ReplicationService" -STOR_CONF = "FUJITSU_StorageConfigurationService" -CTRL_CONF = "FUJITSU_ControllerConfigurationService" -STOR_HWID = "FUJITSU_StorageHardwareIDManagementService" - -UNDEF_MSG = 'Undefined Error!!' -JOB_RETRIES = 60 -JOB_INTERVAL_SEC = 10 - -# Error code keyword. -VOLUMENAME_IN_USE = 32788 -COPYSESSION_NOT_EXIST = 32793 -LUNAME_IN_USE = 4102 -LUNAME_NOT_EXIST = 4097 # Only for InvokeMethod(HidePaths). -EC_REC = 3 -FJ_ETERNUS_DX_OPT_opts = [ - cfg.StrOpt('cinder_eternus_config_file', - default='/etc/cinder/cinder_fujitsu_eternus_dx.xml', - help='config file for cinder eternus_dx volume driver'), -] - -POOL_TYPE_dic = { - RAIDGROUP: 'RAID_GROUP', - TPPOOL: 'Thinporvisioning_POOL', -} - -OPERATION_dic = { - SNAPOPC: RETURN_TO_RESOURCEPOOL, - OPC: DETACH, - EC_REC: DETACH, -} - -RETCODE_dic = { - '0': 'Success', - '1': 'Method Not Supported', - '4': 'Failed', - '5': 'Invalid Parameter', - '4096': 'Method Parameters Checked - Job Started', - '4097': 'Size Not Supported', - '4101': 'Target/initiator combination already exposed', - '4102': 'Requested logical unit number in use', - '32769': 'Maximum number of Logical Volume in a RAID group ' - 'has been reached', - '32770': 'Maximum number of Logical Volume in the storage device ' - 'has been reached', - '32771': 'Maximum number of registered Host WWN ' - 'has been reached', - '32772': 'Maximum number of affinity group has been reached', - '32773': 'Maximum number of host affinity has been reached', - '32785': 'The RAID group is in busy state', - '32786': 'The Logical Volume is in busy state', - '32787': 'The device is in busy state', - '32788': 'Element Name is in use', - '32792': 'No Copy License', - '32793': 'Session is not exist', - '32796': 'Quick Format Error', - '32801': 'The CA port is in invalid setting', - '32802': 'The Logical Volume is Mainframe volume', - '32803': 'The RAID group is not operative', - '32804': 'The Logical Volume is not operative', - '32808': 'No Thin Provisioning License', - '32809': 'The Logical Element is ODX volume', - '32811': 'This operation cannot be performed to the NAS resources', - '32812': 'This operation cannot be performed to the Storage Cluster ' - 'resources', - '32816': 'Fatal error generic', - '35302': 'Invalid LogicalElement', - '35304': 'LogicalElement state error', - '35316': 'Multi-hop error', - '35318': 'Maximum number of multi-hop has been reached', - '35324': 'RAID is broken', - '35331': 'Maximum number of session has been reached(per device)', - '35333': 'Maximum number of session has been reached(per SourceElement)', - '35334': 'Maximum number of session has been reached(per TargetElement)', - '35335': 'Maximum number of Snapshot generation has been reached ' - '(per SourceElement)', - '35346': 'Copy table size is not setup', - '35347': 'Copy table size is not enough', -} - -CONF.register_opts(FJ_ETERNUS_DX_OPT_opts, group=conf.SHARED_CONF_GROUP) - - -class FJDXCommon(object): - """Common code that does not depend on protocol.""" - - VERSION = "1.3.0" - stats = { - 'driver_version': VERSION, - 'free_capacity_gb': 0, - 'reserved_percentage': 0, - 'storage_protocol': None, - 'total_capacity_gb': 0, - 'vendor_name': 'FUJITSU', - 'QoS_support': False, - 'volume_backend_name': None, - } - - def __init__(self, prtcl, configuration=None): - - self.pywbemAvailable = pywbemAvailable - - self.protocol = prtcl - self.configuration = configuration - self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts) - - if prtcl == 'iSCSI': - # Get iSCSI ipaddress from driver configuration file. - self.configuration.iscsi_ip_address = ( - self._get_drvcfg('EternusISCSIIP')) - - def create_volume(self, volume): - """Create volume on ETERNUS.""" - LOG.debug('create_volume, ' - 'volume id: %(vid)s, volume size: %(vsize)s.', - {'vid': volume['id'], 'vsize': volume['size']}) - - self.conn = self._get_eternus_connection() - volumesize = int(volume['size']) * units.Gi - volumename = self._create_volume_name(volume['id']) - - LOG.debug('create_volume, volumename: %(volumename)s, ' - 'volumesize: %(volumesize)u.', - {'volumename': volumename, - 'volumesize': volumesize}) - - # get poolname from driver configuration file - eternus_pool = self._get_drvcfg('EternusPool') - # Existence check the pool - pool = self._find_pool(eternus_pool) - - if 'RSP' in pool['InstanceID']: - pooltype = RAIDGROUP - else: - pooltype = TPPOOL - - configservice = self._find_eternus_service(STOR_CONF) - if configservice is None: - msg = (_('create_volume, volume: %(volume)s, ' - 'volumename: %(volumename)s, ' - 'eternus_pool: %(eternus_pool)s, ' - 'Storage Configuration Service not found.') - % {'volume': volume, - 'volumename': volumename, - 'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('create_volume, ' - 'CreateOrModifyElementFromStoragePool, ' - 'ConfigService: %(service)s, ' - 'ElementName: %(volumename)s, ' - 'InPool: %(eternus_pool)s, ' - 'ElementType: %(pooltype)u, ' - 'Size: %(volumesize)u.', - {'service': configservice, - 'volumename': volumename, - 'eternus_pool': eternus_pool, - 'pooltype': pooltype, - 'volumesize': volumesize}) - - # Invoke method for create volume - rc, errordesc, job = self._exec_eternus_service( - 'CreateOrModifyElementFromStoragePool', - configservice, - ElementName=volumename, - InPool=pool, - ElementType=self._pywbem_uint(pooltype, '16'), - Size=self._pywbem_uint(volumesize, '64')) - - if rc == VOLUMENAME_IN_USE: # Element Name is in use - LOG.warning('create_volume, ' - 'volumename: %(volumename)s, ' - 'Element Name is in use.', - {'volumename': volumename}) - vol_instance = self._find_lun(volume) - element = vol_instance - elif rc != 0: - msg = (_('create_volume, ' - 'volumename: %(volumename)s, ' - 'poolname: %(eternus_pool)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'volumename': volumename, - 'eternus_pool': eternus_pool, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - element = job['TheElement'] - - # Get eternus model name - try: - systemnamelist = ( - self._enum_eternus_instances('FUJITSU_StorageProduct')) - except Exception: - msg = (_('create_volume, ' - 'volume: %(volume)s, ' - 'EnumerateInstances, ' - 'cannot connect to ETERNUS.') - % {'volume': volume}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('create_volume, ' - 'volumename: %(volumename)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s, ' - 'Backend: %(backend)s, ' - 'Pool Name: %(eternus_pool)s, ' - 'Pool Type: %(pooltype)s.', - {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc, - 'backend': systemnamelist[0]['IdentifyingNumber'], - 'eternus_pool': eternus_pool, - 'pooltype': POOL_TYPE_dic[pooltype]}) - - # Create return value. - element_path = { - 'classname': element.classname, - 'keybindings': { - 'CreationClassName': element['CreationClassName'], - 'SystemName': element['SystemName'], - 'DeviceID': element['DeviceID'], - 'SystemCreationClassName': element['SystemCreationClassName'] - } - } - - volume_no = "0x" + element['DeviceID'][24:28] - - metadata = {'FJ_Backend': systemnamelist[0]['IdentifyingNumber'], - 'FJ_Volume_Name': volumename, - 'FJ_Volume_No': volume_no, - 'FJ_Pool_Name': eternus_pool, - 'FJ_Pool_Type': POOL_TYPE_dic[pooltype]} - - return (element_path, metadata) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - LOG.debug('create_volume_from_snapshot, ' - 'volume id: %(vid)s, volume size: %(vsize)s, ' - 'snapshot id: %(sid)s.', - {'vid': volume['id'], 'vsize': volume['size'], - 'sid': snapshot['id']}) - - self.conn = self._get_eternus_connection() - source_volume_instance = self._find_lun(snapshot) - - # Check the existence of source volume. - if source_volume_instance is None: - msg = _('create_volume_from_snapshot, ' - 'Source Volume does not exist in ETERNUS.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Create volume for the target volume. - (element_path, metadata) = self.create_volume(volume) - target_volume_instancename = self._create_eternus_instance_name( - element_path['classname'], element_path['keybindings']) - - try: - target_volume_instance = ( - self._get_eternus_instance(target_volume_instancename)) - except Exception: - msg = (_('create_volume_from_snapshot, ' - 'target volume instancename: %(volume_instancename)s, ' - 'Get Instance Failed.') - % {'volume_instancename': target_volume_instancename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - self._create_local_cloned_volume(target_volume_instance, - source_volume_instance) - - return (element_path, metadata) - - def create_cloned_volume(self, volume, src_vref): - """Create clone of the specified volume.""" - LOG.debug('create_cloned_volume, ' - 'tgt: (%(tid)s, %(tsize)s), src: (%(sid)s, %(ssize)s).', - {'tid': volume['id'], 'tsize': volume['size'], - 'sid': src_vref['id'], 'ssize': src_vref['size']}) - - self.conn = self._get_eternus_connection() - source_volume_instance = self._find_lun(src_vref) - - if source_volume_instance is None: - msg = _('create_cloned_volume, ' - 'Source Volume does not exist in ETERNUS.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - (element_path, metadata) = self.create_volume(volume) - target_volume_instancename = self._create_eternus_instance_name( - element_path['classname'], element_path['keybindings']) - - try: - target_volume_instance = ( - self._get_eternus_instance(target_volume_instancename)) - except Exception: - msg = (_('create_cloned_volume, ' - 'target volume instancename: %(volume_instancename)s, ' - 'Get Instance Failed.') - % {'volume_instancename': target_volume_instancename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - self._create_local_cloned_volume(target_volume_instance, - source_volume_instance) - - return (element_path, metadata) - - @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) - def _create_local_cloned_volume(self, tgt_vol_instance, src_vol_instance): - """Create local clone of the specified volume.""" - s_volumename = src_vol_instance['ElementName'] - t_volumename = tgt_vol_instance['ElementName'] - - LOG.debug('_create_local_cloned_volume, ' - 'tgt volume name: %(t_volumename)s, ' - 'src volume name: %(s_volumename)s, ', - {'t_volumename': t_volumename, - 's_volumename': s_volumename}) - - # Get replicationservice for CreateElementReplica. - repservice = self._find_eternus_service(REPL) - - if repservice is None: - msg = _('_create_local_cloned_volume, ' - 'Replication Service not found.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Invoke method for create cloned volume from volume. - rc, errordesc, job = self._exec_eternus_service( - 'CreateElementReplica', - repservice, - SyncType=self._pywbem_uint(8, '16'), - SourceElement=src_vol_instance.path, - TargetElement=tgt_vol_instance.path) - - if rc != 0: - msg = (_('_create_local_cloned_volume, ' - 'volumename: %(volumename)s, ' - 'sourcevolumename: %(sourcevolumename)s, ' - 'source volume instance: %(source_volume)s, ' - 'target volume instance: %(target_volume)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'volumename': t_volumename, - 'sourcevolumename': s_volumename, - 'source_volume': src_vol_instance.path, - 'target_volume': tgt_vol_instance.path, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_create_local_cloned_volume, out: %(rc)s, %(job)s.', - {'rc': rc, 'job': job}) - - def delete_volume(self, volume): - """Delete volume on ETERNUS.""" - LOG.debug('delete_volume, volume id: %s.', volume['id']) - - self.conn = self._get_eternus_connection() - vol_exist = self._delete_volume_setting(volume) - - if not vol_exist: - LOG.debug('delete_volume, volume not found in 1st check.') - return False - - # Check volume existence on ETERNUS again - # because volume is deleted when SnapOPC copysession is deleted. - vol_instance = self._find_lun(volume) - if vol_instance is None: - LOG.debug('delete_volume, volume not found in 2nd check, ' - 'but no problem.') - return True - - self._delete_volume(vol_instance) - return True - - @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) - def _delete_volume_setting(self, volume): - """Delete volume setting (HostAffinity, CopySession) on ETERNUS.""" - LOG.debug('_delete_volume_setting, volume id: %s.', volume['id']) - - # Check the existence of volume. - volumename = self._create_volume_name(volume['id']) - vol_instance = self._find_lun(volume) - - if vol_instance is None: - LOG.info('_delete_volume_setting, volumename:%(volumename)s, ' - 'volume not found on ETERNUS.', - {'volumename': volumename}) - return False - - # Delete host-affinity setting remained by unexpected error. - self._unmap_lun(volume, None, force=True) - - # Check copy session relating to target volume. - cpsessionlist = self._find_copysession(vol_instance) - delete_copysession_list = [] - wait_copysession_list = [] - - for cpsession in cpsessionlist: - LOG.debug('_delete_volume_setting, ' - 'volumename: %(volumename)s, ' - 'cpsession: %(cpsession)s.', - {'volumename': volumename, - 'cpsession': cpsession}) - - if cpsession['SyncedElement'] == vol_instance.path: - # Copy target : other_volume --(copy)--> vol_instance - delete_copysession_list.append(cpsession) - elif cpsession['SystemElement'] == vol_instance.path: - # Copy source : vol_instance --(copy)--> other volume - wait_copysession_list.append(cpsession) - - LOG.debug('_delete_volume_setting, ' - 'wait_cpsession: %(wait_cpsession)s, ' - 'delete_cpsession: %(delete_cpsession)s.', - {'wait_cpsession': wait_copysession_list, - 'delete_cpsession': delete_copysession_list}) - - for cpsession in wait_copysession_list: - self._wait_for_copy_complete(cpsession) - - for cpsession in delete_copysession_list: - self._delete_copysession(cpsession) - - LOG.debug('_delete_volume_setting, ' - 'wait_cpsession: %(wait_cpsession)s, ' - 'delete_cpsession: %(delete_cpsession)s, complete.', - {'wait_cpsession': wait_copysession_list, - 'delete_cpsession': delete_copysession_list}) - return True - - @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) - def _delete_volume(self, vol_instance): - """Delete volume on ETERNUS.""" - LOG.debug('_delete_volume, volume name: %s.', - vol_instance['ElementName']) - - volumename = vol_instance['ElementName'] - - configservice = self._find_eternus_service(STOR_CONF) - if configservice is None: - msg = (_('_delete_volume, volumename: %(volumename)s, ' - 'Storage Configuration Service not found.') - % {'volumename': volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_delete_volume, volumename: %(volumename)s, ' - 'vol_instance: %(vol_instance)s, ' - 'Method: ReturnToStoragePool.', - {'volumename': volumename, - 'vol_instance': vol_instance.path}) - - # Invoke method for delete volume - rc, errordesc, job = self._exec_eternus_service( - 'ReturnToStoragePool', - configservice, - TheElement=vol_instance.path) - - if rc != 0: - msg = (_('_delete_volume, volumename: %(volumename)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_delete_volume, volumename: %(volumename)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.', - {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc}) - - @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) - def create_snapshot(self, snapshot): - """Create snapshot using SnapOPC.""" - LOG.debug('create_snapshot, ' - 'snapshot id: %(sid)s, volume id: %(vid)s.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - self.conn = self._get_eternus_connection() - snapshotname = snapshot['name'] - volumename = snapshot['volume_name'] - vol_id = snapshot['volume_id'] - volume = snapshot['volume'] - d_volumename = self._create_volume_name(snapshot['id']) - s_volumename = self._create_volume_name(vol_id) - vol_instance = self._find_lun(volume) - repservice = self._find_eternus_service(REPL) - - # Check the existence of volume. - if vol_instance is None: - # Volume not found on ETERNUS. - msg = (_('create_snapshot, ' - 'volumename: %(s_volumename)s, ' - 'source volume not found on ETERNUS.') - % {'s_volumename': s_volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if repservice is None: - msg = (_('create_snapshot, ' - 'volumename: %(volumename)s, ' - 'Replication Service not found.') - % {'volumename': volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Get poolname from driver configuration file. - eternus_pool = self._get_drvcfg('EternusSnapPool') - # Check the existence of pool - pool = self._find_pool(eternus_pool) - if pool is None: - msg = (_('create_snapshot, ' - 'eternus_pool: %(eternus_pool)s, ' - 'pool not found.') - % {'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('create_snapshot, ' - 'snapshotname: %(snapshotname)s, ' - 'source volume name: %(volumename)s, ' - 'vol_instance.path: %(vol_instance)s, ' - 'dest_volumename: %(d_volumename)s, ' - 'pool: %(pool)s, ' - 'Invoke CreateElementReplica.', - {'snapshotname': snapshotname, - 'volumename': volumename, - 'vol_instance': vol_instance.path, - 'd_volumename': d_volumename, - 'pool': pool}) - - # Invoke method for create snapshot - rc, errordesc, job = self._exec_eternus_service( - 'CreateElementReplica', - repservice, - ElementName=d_volumename, - TargetPool=pool, - SyncType=self._pywbem_uint(7, '16'), - SourceElement=vol_instance.path) - - if rc != 0: - msg = (_('create_snapshot, ' - 'snapshotname: %(snapshotname)s, ' - 'source volume name: %(volumename)s, ' - 'vol_instance.path: %(vol_instance)s, ' - 'dest volume name: %(d_volumename)s, ' - 'pool: %(pool)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'snapshotname': snapshotname, - 'volumename': volumename, - 'vol_instance': vol_instance.path, - 'd_volumename': d_volumename, - 'pool': pool, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - element = job['TargetElement'] - - LOG.debug('create_snapshot, ' - 'volumename:%(volumename)s, ' - 'Return code:%(rc)lu, ' - 'Error:%(errordesc)s.', - {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc}) - - # Create return value. - element_path = { - 'classname': element.classname, - 'keybindings': { - 'CreationClassName': element['CreationClassName'], - 'SystemName': element['SystemName'], - 'DeviceID': element['DeviceID'], - 'SystemCreationClassName': element['SystemCreationClassName'] - } - } - - sdv_no = "0x" + element['DeviceID'][24:28] - metadata = {'FJ_SDV_Name': d_volumename, - 'FJ_SDV_No': sdv_no, - 'FJ_Pool_Name': eternus_pool} - return (element_path, metadata) - - def delete_snapshot(self, snapshot): - """Delete snapshot.""" - LOG.debug('delete_snapshot, ' - 'snapshot id: %(sid)s, volume id: %(vid)s.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - vol_exist = self.delete_volume(snapshot) - LOG.debug('delete_snapshot, vol_exist: %s.', vol_exist) - return vol_exist - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - LOG.debug('initialize_connection, ' - 'volume id: %(vid)s, protocol: %(prtcl)s.', - {'vid': volume['id'], 'prtcl': self.protocol}) - - self.conn = self._get_eternus_connection() - vol_instance = self._find_lun(volume) - # Check the existence of volume - if vol_instance is None: - # Volume not found - msg = (_('initialize_connection, ' - 'volume: %(volume)s, ' - 'Volume not found.') - % {'volume': volume['name']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - target_portlist = self._get_target_port() - mapdata = self._get_mapdata(vol_instance, connector, target_portlist) - - if mapdata: - # volume is already mapped - target_lun = mapdata.get('target_lun', None) - target_luns = mapdata.get('target_luns', None) - - LOG.info('initialize_connection, ' - 'volume: %(volume)s, ' - 'target_lun: %(target_lun)s, ' - 'target_luns: %(target_luns)s, ' - 'Volume is already mapped.', - {'volume': volume['name'], - 'target_lun': target_lun, - 'target_luns': target_luns}) - else: - self._map_lun(vol_instance, connector, target_portlist) - mapdata = self._get_mapdata(vol_instance, - connector, target_portlist) - - mapdata['target_discovered'] = True - mapdata['volume_id'] = volume['id'] - - if self.protocol == 'fc': - device_info = {'driver_volume_type': 'fibre_channel', - 'data': mapdata} - elif self.protocol == 'iSCSI': - device_info = {'driver_volume_type': 'iscsi', - 'data': mapdata} - - LOG.debug('initialize_connection, ' - 'device_info:%(info)s.', - {'info': device_info}) - return device_info - - def terminate_connection(self, volume, connector, force=False, **kwargs): - """Disallow connection from connector.""" - LOG.debug('terminate_connection, ' - 'volume id: %(vid)s, protocol: %(prtcl)s, force: %(frc)s.', - {'vid': volume['id'], 'prtcl': self.protocol, 'frc': force}) - - self.conn = self._get_eternus_connection() - map_exist = self._unmap_lun(volume, connector) - - LOG.debug('terminate_connection, map_exist: %s.', map_exist) - return map_exist - - def build_fc_init_tgt_map(self, connector, target_wwn=None): - """Build parameter for Zone Manager""" - LOG.debug('build_fc_init_tgt_map, target_wwn: %s.', target_wwn) - - initiatorlist = self._find_initiator_names(connector) - - if target_wwn is None: - target_wwn = [] - target_portlist = self._get_target_port() - for target_port in target_portlist: - target_wwn.append(target_port['Name']) - - init_tgt_map = {initiator: target_wwn for initiator in initiatorlist} - - LOG.debug('build_fc_init_tgt_map, ' - 'initiator target mapping: %s.', init_tgt_map) - return init_tgt_map - - def check_attached_volume_in_zone(self, connector): - """Check Attached Volume in Same FC Zone or not""" - LOG.debug('check_attached_volume_in_zone, connector: %s.', connector) - - aglist = self._find_affinity_group(connector) - if not aglist: - attached = False - else: - attached = True - - LOG.debug('check_attached_volume_in_zone, attached: %s.', attached) - return attached - - @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) - def extend_volume(self, volume, new_size): - """Extend volume on ETERNUS.""" - LOG.debug('extend_volume, volume id: %(vid)s, ' - 'size: %(size)s, new_size: %(nsize)s.', - {'vid': volume['id'], - 'size': volume['size'], 'nsize': new_size}) - - self.conn = self._get_eternus_connection() - volumesize = new_size * units.Gi - volumename = self._create_volume_name(volume['id']) - - # Get source volume instance. - vol_instance = self._find_lun(volume) - if vol_instance is None: - msg = (_('extend_volume, ' - 'volumename: %(volumename)s, ' - 'volume not found.') - % {'volumename': volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('extend_volume, volumename: %(volumename)s, ' - 'volumesize: %(volumesize)u, ' - 'volume instance: %(vol_instance)s.', - {'volumename': volumename, - 'volumesize': volumesize, - 'vol_instance': vol_instance.path}) - - # Get poolname from driver configuration file. - eternus_pool = self._get_drvcfg('EternusPool') - # Check the existence of volume. - pool = self._find_pool(eternus_pool) - if pool is None: - msg = (_('extend_volume, ' - 'eternus_pool: %(eternus_pool)s, ' - 'pool not found.') - % {'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Set pooltype. - if 'RSP' in pool['InstanceID']: - pooltype = RAIDGROUP - else: - pooltype = TPPOOL - - configservice = self._find_eternus_service(STOR_CONF) - if configservice is None: - msg = (_('extend_volume, volume: %(volume)s, ' - 'volumename: %(volumename)s, ' - 'eternus_pool: %(eternus_pool)s, ' - 'Storage Configuration Service not found.') - % {'volume': volume, - 'volumename': volumename, - 'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('extend_volume, ' - 'CreateOrModifyElementFromStoragePool, ' - 'ConfigService: %(service)s, ' - 'ElementName: %(volumename)s, ' - 'InPool: %(eternus_pool)s, ' - 'ElementType: %(pooltype)u, ' - 'Size: %(volumesize)u, ' - 'TheElement: %(vol_instance)s.', - {'service': configservice, - 'volumename': volumename, - 'eternus_pool': eternus_pool, - 'pooltype': pooltype, - 'volumesize': volumesize, - 'vol_instance': vol_instance.path}) - - # Invoke method for extend volume - rc, errordesc, job = self._exec_eternus_service( - 'CreateOrModifyElementFromStoragePool', - configservice, - ElementName=volumename, - InPool=pool, - ElementType=self._pywbem_uint(pooltype, '16'), - Size=self._pywbem_uint(volumesize, '64'), - TheElement=vol_instance.path) - - if rc != 0: - msg = (_('extend_volume, ' - 'volumename: %(volumename)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s, ' - 'PoolType: %(pooltype)s.') - % {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc, - 'pooltype': POOL_TYPE_dic[pooltype]}) - - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('extend_volume, ' - 'volumename: %(volumename)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s, ' - 'Pool Name: %(eternus_pool)s, ' - 'Pool Type: %(pooltype)s.', - {'volumename': volumename, - 'rc': rc, - 'errordesc': errordesc, - 'eternus_pool': eternus_pool, - 'pooltype': POOL_TYPE_dic[pooltype]}) - - return eternus_pool - - @lockutils.synchronized('ETERNUS-update', 'cinder-', True) - def update_volume_stats(self): - """get pool capacity.""" - - self.conn = self._get_eternus_connection() - eternus_pool = self._get_drvcfg('EternusPool') - - LOG.debug('update_volume_stats, pool name: %s.', eternus_pool) - - pool = self._find_pool(eternus_pool, True) - if pool: - # pool is found - self.stats['total_capacity_gb'] = ( - pool['TotalManagedSpace'] / units.Gi) - - self.stats['free_capacity_gb'] = ( - pool['RemainingManagedSpace'] / units.Gi) - else: - # if pool information is unknown, set 0 GB to capacity information - LOG.warning('update_volume_stats, ' - 'eternus_pool:%(eternus_pool)s, ' - 'specified pool is not found.', - {'eternus_pool': eternus_pool}) - self.stats['total_capacity_gb'] = 0 - self.stats['free_capacity_gb'] = 0 - - self.stats['multiattach'] = False - - LOG.debug('update_volume_stats, ' - 'eternus_pool:%(eternus_pool)s, ' - 'total capacity[%(total)s], ' - 'free capacity[%(free)s].', - {'eternus_pool': eternus_pool, - 'total': self.stats['total_capacity_gb'], - 'free': self.stats['free_capacity_gb']}) - - return (self.stats, eternus_pool) - - def _get_mapdata(self, vol_instance, connector, target_portlist): - """return mapping information.""" - mapdata = None - multipath = connector.get('multipath', False) - - LOG.debug('_get_mapdata, volume name: %(vname)s, ' - 'protocol: %(prtcl)s, multipath: %(mpath)s.', - {'vname': vol_instance['ElementName'], - 'prtcl': self.protocol, 'mpath': multipath}) - - # find affinity group - # attach the connector and include the volume - aglist = self._find_affinity_group(connector, vol_instance) - if not aglist: - LOG.debug('_get_mapdata, ag_list:%s.', aglist) - else: - if self.protocol == 'fc': - mapdata = self._get_mapdata_fc(aglist, vol_instance, - target_portlist) - elif self.protocol == 'iSCSI': - mapdata = self._get_mapdata_iscsi(aglist, vol_instance, - multipath) - - LOG.debug('_get_mapdata, mapdata: %s.', mapdata) - return mapdata - - def _get_mapdata_fc(self, aglist, vol_instance, target_portlist): - """_get_mapdata for FibreChannel.""" - target_wwn = [] - - try: - ag_volmaplist = self._reference_eternus_names( - aglist[0], - ResultClass='CIM_ProtocolControllerForUnit') - vo_volmaplist = self._reference_eternus_names( - vol_instance.path, - ResultClass='CIM_ProtocolControllerForUnit') - except pywbem.CIM_Error: - msg = (_('_get_mapdata_fc, ' - 'getting host-affinity from aglist/vol_instance failed, ' - 'affinitygroup: %(ag)s, ' - 'ReferenceNames, ' - 'cannot connect to ETERNUS.') - % {'ag': aglist[0]}) - LOG.exception(msg) - raise exception.VolumeBackendAPIException(data=msg) - - volmap = None - for vo_volmap in vo_volmaplist: - if vo_volmap in ag_volmaplist: - volmap = vo_volmap - break - - try: - volmapinstance = self._get_eternus_instance( - volmap, - LocalOnly=False) - except pywbem.CIM_Error: - msg = (_('_get_mapdata_fc, ' - 'getting host-affinity instance failed, ' - 'volmap: %(volmap)s, ' - 'GetInstance, ' - 'cannot connect to ETERNUS.') - % {'volmap': volmap}) - LOG.exception(msg) - raise exception.VolumeBackendAPIException(data=msg) - - target_lun = int(volmapinstance['DeviceNumber'], 16) - - for target_port in target_portlist: - target_wwn.append(target_port['Name']) - - mapdata = {'target_wwn': target_wwn, - 'target_lun': target_lun} - LOG.debug('_get_mapdata_fc, mapdata: %s.', mapdata) - return mapdata - - def _get_mapdata_iscsi(self, aglist, vol_instance, multipath): - """_get_mapdata for iSCSI.""" - target_portals = [] - target_iqns = [] - target_luns = [] - - try: - vo_volmaplist = self._reference_eternus_names( - vol_instance.path, - ResultClass='CIM_ProtocolControllerForUnit') - except Exception: - msg = (_('_get_mapdata_iscsi, ' - 'vol_instance: %(vol_instance)s, ' - 'ReferenceNames: CIM_ProtocolControllerForUnit, ' - 'cannot connect to ETERNUS.') - % {'vol_instance': vol_instance}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - target_properties_list = self._get_eternus_iscsi_properties() - target_list = [prop[0] for prop in target_properties_list] - properties_list = ( - [(prop[1], prop[2]) for prop in target_properties_list]) - - for ag in aglist: - try: - iscsi_endpointlist = ( - self._assoc_eternus_names( - ag, - AssocClass='FUJITSU_SAPAvailableForElement', - ResultClass='FUJITSU_iSCSIProtocolEndpoint')) - except Exception: - msg = (_('_get_mapdata_iscsi, ' - 'Associators: FUJITSU_SAPAvailableForElement, ' - 'cannot connect to ETERNUS.')) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - iscsi_endpoint = iscsi_endpointlist[0] - if iscsi_endpoint not in target_list: - continue - - idx = target_list.index(iscsi_endpoint) - target_portal, target_iqn = properties_list[idx] - - try: - ag_volmaplist = self._reference_eternus_names( - ag, - ResultClass='CIM_ProtocolControllerForUnit') - except Exception: - msg = (_('_get_mapdata_iscsi, ' - 'affinitygroup: %(ag)s, ' - 'ReferenceNames, ' - 'cannot connect to ETERNUS.') - % {'ag': ag}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - volmap = None - for vo_volmap in vo_volmaplist: - if vo_volmap in ag_volmaplist: - volmap = vo_volmap - break - - if volmap is None: - continue - - try: - volmapinstance = self._get_eternus_instance( - volmap, - LocalOnly=False) - except Exception: - msg = (_('_get_mapdata_iscsi, ' - 'volmap: %(volmap)s, ' - 'GetInstance, ' - 'cannot connect to ETERNUS.') - % {'volmap': volmap}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - target_lun = int(volmapinstance['DeviceNumber'], 16) - - target_portals.append(target_portal) - target_iqns.append(target_iqn) - target_luns.append(target_lun) - - if multipath: - mapdata = {'target_portals': target_portals, - 'target_iqns': target_iqns, - 'target_luns': target_luns} - else: - mapdata = {'target_portal': target_portals[0], - 'target_iqn': target_iqns[0], - 'target_lun': target_luns[0]} - - LOG.debug('_get_mapdata_iscsi, mapdata: %s.', mapdata) - return mapdata - - def _get_drvcfg(self, tagname, filename=None, multiple=False): - """read from driver configuration file.""" - if filename is None: - # set default configuration file name - filename = self.configuration.cinder_eternus_config_file - - LOG.debug("_get_drvcfg, input[%(filename)s][%(tagname)s].", - {'filename': filename, 'tagname': tagname}) - - tree = parse(filename) - elem = tree.getroot() - - ret = None - if not multiple: - ret = elem.findtext(".//" + tagname) - else: - ret = [] - for e in elem.findall(".//" + tagname): - if (e.text is not None) and (e.text not in ret): - ret.append(e.text) - - if not ret: - msg = (_('_get_drvcfg, ' - 'filename: %(filename)s, ' - 'tagname: %(tagname)s, ' - 'data is None!! ' - 'Please edit driver configuration file and correct.') - % {'filename': filename, - 'tagname': tagname}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return ret - - def _get_eternus_connection(self, filename=None): - """return WBEM connection.""" - LOG.debug('_get_eternus_connection, filename: %s.', filename) - - ip = self._get_drvcfg('EternusIP', filename) - port = self._get_drvcfg('EternusPort', filename) - user = self._get_drvcfg('EternusUser', filename) - passwd = self._get_drvcfg('EternusPassword', filename) - url = 'http://' + ip + ':' + port - - conn = pywbem.WBEMConnection(url, (user, passwd), - default_namespace='root/eternus') - - if conn is None: - msg = (_('_get_eternus_connection, ' - 'filename: %(filename)s, ' - 'ip: %(ip)s, ' - 'port: %(port)s, ' - 'user: %(user)s, ' - 'passwd: ****, ' - 'url: %(url)s, ' - 'FAILED!!.') - % {'filename': filename, - 'ip': ip, - 'port': port, - 'user': user, - 'url': url}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_get_eternus_connection, conn: %s.', conn) - return conn - - def _create_volume_name(self, id_code): - """create volume_name on ETERNUS from id on OpenStack.""" - LOG.debug('_create_volume_name, id_code: %s.', id_code) - - if id_code is None: - msg = _('_create_volume_name, id_code is None.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - m = hashlib.md5() - m.update(id_code.encode('utf-8')) - - # pylint: disable=E1121 - volumename = base64.urlsafe_b64encode(m.digest()).decode() - ret = VOL_PREFIX + six.text_type(volumename) - - LOG.debug('_create_volume_name, ret: %s', ret) - return ret - - def _find_pool(self, eternus_pool, detail=False): - """find Instance or InstanceName of pool by pool name on ETERNUS.""" - LOG.debug('_find_pool, pool name: %s.', eternus_pool) - - tppoollist = [] - rgpoollist = [] - - # Get pools info form CIM instance(include info about instance path). - try: - tppoollist = self._enum_eternus_instances( - 'FUJITSU_ThinProvisioningPool') - rgpoollist = self._enum_eternus_instances( - 'FUJITSU_RAIDStoragePool') - except Exception: - msg = (_('_find_pool, ' - 'eternus_pool:%(eternus_pool)s, ' - 'EnumerateInstances, ' - 'cannot connect to ETERNUS.') - % {'eternus_pool': eternus_pool}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Make total pools list. - poollist = tppoollist + rgpoollist - - # One eternus backend has only one special pool name - # so just use pool name can get the target pool. - for pool in poollist: - if pool['ElementName'] == eternus_pool: - poolinstance = pool - break - else: - poolinstance = None - - if poolinstance is None: - ret = None - elif detail is True: - ret = poolinstance - else: - ret = poolinstance.path - - LOG.debug('_find_pool, pool: %s.', ret) - return ret - - def _find_eternus_service(self, classname): - """find CIM instance about service information.""" - LOG.debug('_find_eternus_service, ' - 'classname: %s.', classname) - - try: - services = self._enum_eternus_instance_names( - six.text_type(classname)) - except Exception: - msg = (_('_find_eternus_service, ' - 'classname: %(classname)s, ' - 'EnumerateInstanceNames, ' - 'cannot connect to ETERNUS.') - % {'classname': classname}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - ret = services[0] - LOG.debug('_find_eternus_service, ' - 'classname: %(classname)s, ' - 'ret: %(ret)s.', - {'classname': classname, 'ret': ret}) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-exec', 'cinder-', True) - def _exec_eternus_service(self, classname, instanceNameList, **param_dict): - """Execute SMI-S Method.""" - LOG.debug('_exec_eternus_service, ' - 'classname: %(a)s, ' - 'instanceNameList: %(b)s, ' - 'parameters: %(c)s.', - {'a': classname, - 'b': instanceNameList, - 'c': param_dict}) - - # Use InvokeMethod. - try: - rc, retdata = self.conn.InvokeMethod( - classname, - instanceNameList, - **param_dict) - except Exception: - if rc is None: - msg = (_('_exec_eternus_service, ' - 'classname: %(classname)s, ' - 'InvokeMethod, ' - 'cannot connect to ETERNUS.') - % {'classname': classname}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # If the result has job information, wait for job complete - if "Job" in retdata: - rc = self._wait_for_job_complete(self.conn, retdata) - - errordesc = RETCODE_dic.get(six.text_type(rc), UNDEF_MSG) - - ret = (rc, errordesc, retdata) - - LOG.debug('_exec_eternus_service, ' - 'classname: %(a)s, ' - 'instanceNameList: %(b)s, ' - 'parameters: %(c)s, ' - 'Return code: %(rc)s, ' - 'Error: %(errordesc)s, ' - 'Retrun data: %(retdata)s.', - {'a': classname, - 'b': instanceNameList, - 'c': param_dict, - 'rc': rc, - 'errordesc': errordesc, - 'retdata': retdata}) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) - def _enum_eternus_instances(self, classname): - """Enumerate Instances.""" - LOG.debug('_enum_eternus_instances, classname: %s.', classname) - - ret = self.conn.EnumerateInstances(classname) - - LOG.debug('_enum_eternus_instances, enum %d instances.', len(ret)) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) - def _enum_eternus_instance_names(self, classname): - """Enumerate Instance Names.""" - LOG.debug('_enum_eternus_instance_names, classname: %s.', classname) - - ret = self.conn.EnumerateInstanceNames(classname) - - LOG.debug('_enum_eternus_instance_names, enum %d names.', len(ret)) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-getinstance', 'cinder-', True) - def _get_eternus_instance(self, classname, **param_dict): - """Get Instance.""" - LOG.debug('_get_eternus_instance, ' - 'classname: %(cls)s, param: %(param)s.', - {'cls': classname, 'param': param_dict}) - - ret = self.conn.GetInstance(classname, **param_dict) - - LOG.debug('_get_eternus_instance, ret: %s.', ret) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) - def _assoc_eternus(self, classname, **param_dict): - """Associator.""" - LOG.debug('_assoc_eternus, ' - 'classname: %(cls)s, param: %(param)s.', - {'cls': classname, 'param': param_dict}) - - ret = self.conn.Associators(classname, **param_dict) - - LOG.debug('_assoc_eternus, enum %d instances.', len(ret)) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) - def _assoc_eternus_names(self, classname, **param_dict): - """Associator Names.""" - LOG.debug('_assoc_eternus_names, ' - 'classname: %(cls)s, param: %(param)s.', - {'cls': classname, 'param': param_dict}) - - ret = self.conn.AssociatorNames(classname, **param_dict) - - LOG.debug('_assoc_eternus_names, enum %d names.', len(ret)) - return ret - - @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) - def _reference_eternus_names(self, classname, **param_dict): - """Refference Names.""" - LOG.debug('_reference_eternus_names, ' - 'classname: %(cls)s, param: %(param)s.', - {'cls': classname, 'param': param_dict}) - - ret = self.conn.ReferenceNames(classname, **param_dict) - - LOG.debug('_reference_eternus_names, enum %d names.', len(ret)) - return ret - - def _create_eternus_instance_name(self, classname, bindings): - """create CIM InstanceName from classname and bindings.""" - LOG.debug('_create_eternus_instance_name, ' - 'classname: %(cls)s, bindings: %(bind)s.', - {'cls': classname, 'bind': bindings}) - - instancename = None - - try: - instancename = pywbem.CIMInstanceName( - classname, - namespace='root/eternus', - keybindings=bindings) - except NameError: - instancename = None - - LOG.debug('_create_eternus_instance_name, ret: %s.', instancename) - return instancename - - def _find_lun(self, volume): - """find lun instance from volume class or volumename on ETERNUS.""" - LOG.debug('_find_lun, volume id: %s.', volume['id']) - volumeinstance = None - volumename = self._create_volume_name(volume['id']) - - try: - location = ast.literal_eval(volume['provider_location']) - classname = location['classname'] - bindings = location['keybindings'] - - if classname and bindings: - LOG.debug('_find_lun, ' - 'classname: %(classname)s, ' - 'bindings: %(bindings)s.', - {'classname': classname, - 'bindings': bindings}) - volume_instance_name = ( - self._create_eternus_instance_name(classname, bindings)) - - LOG.debug('_find_lun, ' - 'volume_insatnce_name: %(volume_instance_name)s.', - {'volume_instance_name': volume_instance_name}) - - vol_instance = ( - self._get_eternus_instance(volume_instance_name)) - - if vol_instance['ElementName'] == volumename: - volumeinstance = vol_instance - except Exception: - volumeinstance = None - LOG.debug('_find_lun, ' - 'Cannot get volume instance from provider location, ' - 'Search all volume using EnumerateInstanceNames.') - - if volumeinstance is None: - # for old version - - LOG.debug('_find_lun, ' - 'volumename: %(volumename)s.', - {'volumename': volumename}) - - # get volume instance from volumename on ETERNUS - try: - namelist = self._enum_eternus_instance_names( - 'FUJITSU_StorageVolume') - except Exception: - msg = (_('_find_lun, ' - 'volumename: %(volumename)s, ' - 'EnumerateInstanceNames, ' - 'cannot connect to ETERNUS.') - % {'volumename': volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for name in namelist: - try: - vol_instance = self._get_eternus_instance(name) - - if vol_instance['ElementName'] == volumename: - volumeinstance = vol_instance - path = volumeinstance.path - - LOG.debug('_find_lun, ' - 'volumename: %(volumename)s, ' - 'vol_instance: %(vol_instance)s.', - {'volumename': volumename, - 'vol_instance': path}) - break - except Exception: - continue - else: - LOG.debug('_find_lun, ' - 'volumename: %(volumename)s, ' - 'volume not found on ETERNUS.', - {'volumename': volumename}) - - LOG.debug('_find_lun, ret: %s.', volumeinstance) - return volumeinstance - - def _find_copysession(self, vol_instance): - """find copysession from volumename on ETERNUS.""" - LOG.debug('_find_copysession, volume name: %s.', - vol_instance['ElementName']) - - try: - cpsessionlist = self.conn.ReferenceNames( - vol_instance.path, - ResultClass='FUJITSU_StorageSynchronized') - except Exception: - msg = (_('_find_copysession, ' - 'ReferenceNames, ' - 'vol_instance: %(vol_instance_path)s, ' - 'Cannot connect to ETERNUS.') - % {'vol_instance_path': vol_instance.path}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_find_copysession, ' - 'cpsessionlist: %(cpsessionlist)s.', - {'cpsessionlist': cpsessionlist}) - - LOG.debug('_find_copysession, ret: %s.', cpsessionlist) - return cpsessionlist - - def _wait_for_copy_complete(self, cpsession): - """Wait for the completion of copy.""" - LOG.debug('_wait_for_copy_complete, cpsession: %s.', cpsession) - - cpsession_instance = None - - while True: - try: - cpsession_instance = self.conn.GetInstance( - cpsession, - LocalOnly=False) - except Exception: - cpsession_instance = None - - # if copy session is none, - # it means copy session was finished,break and return - if cpsession_instance is None: - break - - LOG.debug('_wait_for_copy_complete, ' - 'find target copysession, ' - 'wait for end of copysession.') - - if cpsession_instance['CopyState'] == BROKEN: - msg = (_('_wait_for_copy_complete, ' - 'cpsession: %(cpsession)s, ' - 'copysession state is BROKEN.') - % {'cpsession': cpsession}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - time.sleep(10) - - def _delete_copysession(self, cpsession): - """delete copysession.""" - LOG.debug('_delete_copysession: cpssession: %s.', cpsession) - - try: - cpsession_instance = self._get_eternus_instance( - cpsession, LocalOnly=False) - except Exception: - LOG.info('_delete_copysession, ' - 'the copysession was already completed.') - return - - copytype = cpsession_instance['CopyType'] - - # set oparation code - # SnapOPC: 19 (Return To ResourcePool) - # OPC:8 (Detach) - # EC/REC:8 (Detach) - operation = OPERATION_dic.get(copytype, None) - if operation is None: - msg = (_('_delete_copysession, ' - 'copy session type is undefined! ' - 'copy session: %(cpsession)s, ' - 'copy type: %(copytype)s.') - % {'cpsession': cpsession, - 'copytype': copytype}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - repservice = self._find_eternus_service(REPL) - if repservice is None: - msg = (_('_delete_copysession, ' - 'Cannot find Replication Service')) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Invoke method for delete copysession - rc, errordesc, job = self._exec_eternus_service( - 'ModifyReplicaSynchronization', - repservice, - Operation=self._pywbem_uint(operation, '16'), - Synchronization=cpsession, - Force=True, - WaitForCopyState=self._pywbem_uint(15, '16')) - - LOG.debug('_delete_copysession, ' - 'copysession: %(cpsession)s, ' - 'operation: %(operation)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.', - {'cpsession': cpsession, - 'operation': operation, - 'rc': rc, - 'errordesc': errordesc}) - - if rc == COPYSESSION_NOT_EXIST: - LOG.debug('_delete_copysession, ' - 'cpsession: %(cpsession)s, ' - 'copysession is not exist.', - {'cpsession': cpsession}) - elif rc != 0: - msg = (_('_delete_copysession, ' - 'copysession: %(cpsession)s, ' - 'operation: %(operation)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'cpsession': cpsession, - 'operation': operation, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _get_target_port(self): - """return target portid.""" - LOG.debug('_get_target_port, protocol: %s.', self.protocol) - - target_portlist = [] - if self.protocol == 'fc': - prtcl_endpoint = 'FUJITSU_SCSIProtocolEndpoint' - connection_type = 2 - elif self.protocol == 'iSCSI': - prtcl_endpoint = 'FUJITSU_iSCSIProtocolEndpoint' - connection_type = 7 - - try: - tgtportlist = self._enum_eternus_instances(prtcl_endpoint) - except Exception: - msg = (_('_get_target_port, ' - 'EnumerateInstances, ' - 'cannot connect to ETERNUS.')) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for tgtport in tgtportlist: - # Check : protocol of tgtport - if tgtport['ConnectionType'] != connection_type: - continue - - # Check : if port is for remote copy, continue - if (tgtport['RAMode'] & 0x7B) != 0x00: - continue - - # Check : if port is for StorageCluster, continue - if 'SCGroupNo' in tgtport: - continue - - target_portlist.append(tgtport) - - LOG.debug('_get_target_port, ' - 'connection type: %(cont)s, ' - 'ramode: %(ramode)s.', - {'cont': tgtport['ConnectionType'], - 'ramode': tgtport['RAMode']}) - - LOG.debug('_get_target_port, ' - 'target port: %(target_portid)s.', - {'target_portid': target_portlist}) - - if len(target_portlist) == 0: - msg = (_('_get_target_port, ' - 'protcol: %(protocol)s, ' - 'target_port not found.') - % {'protocol': self.protocol}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_get_target_port, ret: %s.', target_portlist) - return target_portlist - - @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) - def _map_lun(self, vol_instance, connector, targetlist=None): - """map volume to host.""" - volumename = vol_instance['ElementName'] - LOG.debug('_map_lun, ' - 'volume name: %(vname)s, connector: %(connector)s.', - {'vname': volumename, 'connector': connector}) - - volume_uid = vol_instance['Name'] - initiatorlist = self._find_initiator_names(connector) - aglist = self._find_affinity_group(connector) - configservice = self._find_eternus_service(CTRL_CONF) - - if targetlist is None: - targetlist = self._get_target_port() - - if configservice is None: - msg = (_('_map_lun, ' - 'vol_instance.path:%(vol)s, ' - 'volumename: %(volumename)s, ' - 'volume_uid: %(uid)s, ' - 'initiator: %(initiator)s, ' - 'target: %(tgt)s, ' - 'aglist: %(aglist)s, ' - 'Storage Configuration Service not found.') - % {'vol': vol_instance.path, - 'volumename': volumename, - 'uid': volume_uid, - 'initiator': initiatorlist, - 'tgt': targetlist, - 'aglist': aglist}) - - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_map_lun, ' - 'vol_instance.path: %(vol_instance)s, ' - 'volumename:%(volumename)s, ' - 'initiator:%(initiator)s, ' - 'target:%(tgt)s.', - {'vol_instance': vol_instance.path, - 'volumename': [volumename], - 'initiator': initiatorlist, - 'tgt': targetlist}) - - if not aglist: - # Create affinity group and set host-affinity. - for target in targetlist: - LOG.debug('_map_lun, ' - 'lun_name: %(volume_uid)s, ' - 'Initiator: %(initiator)s, ' - 'target: %(target)s.', - {'volume_uid': [volume_uid], - 'initiator': initiatorlist, - 'target': target['Name']}) - - rc, errordesc, job = self._exec_eternus_service( - 'ExposePaths', - configservice, - LUNames=[volume_uid], - InitiatorPortIDs=initiatorlist, - TargetPortIDs=[target['Name']], - DeviceAccesses=[self._pywbem_uint(2, '16')]) - - LOG.debug('_map_lun, ' - 'Error: %(errordesc)s, ' - 'Return code: %(rc)lu, ' - 'Create affinitygroup and set host-affinity.', - {'errordesc': errordesc, - 'rc': rc}) - - if rc != 0 and rc != LUNAME_IN_USE: - LOG.warning('_map_lun, ' - 'lun_name: %(volume_uid)s, ' - 'Initiator: %(initiator)s, ' - 'target: %(target)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.', - {'volume_uid': [volume_uid], - 'initiator': initiatorlist, - 'target': target['Name'], - 'rc': rc, - 'errordesc': errordesc}) - else: - # Add lun to affinity group - for ag in aglist: - LOG.debug('_map_lun, ' - 'ag: %(ag)s, lun_name: %(volume_uid)s.', - {'ag': ag, - 'volume_uid': volume_uid}) - - rc, errordesc, job = self._exec_eternus_service( - 'ExposePaths', - configservice, LUNames=[volume_uid], - DeviceAccesses=[self._pywbem_uint(2, '16')], - ProtocolControllers=[ag]) - - LOG.debug('_map_lun, ' - 'Error: %(errordesc)s, ' - 'Return code: %(rc)lu, ' - 'Add lun to affinity group.', - {'errordesc': errordesc, - 'rc': rc}) - - if rc != 0 and rc != LUNAME_IN_USE: - LOG.warning('_map_lun, ' - 'lun_name: %(volume_uid)s, ' - 'Initiator: %(initiator)s, ' - 'ag: %(ag)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.', - {'volume_uid': [volume_uid], - 'initiator': initiatorlist, - 'ag': ag, - 'rc': rc, - 'errordesc': errordesc}) - - def _find_initiator_names(self, connector): - """return initiator names.""" - - initiatornamelist = [] - - if self.protocol == 'fc' and connector['wwpns']: - LOG.debug('_find_initiator_names, wwpns: %s.', - connector['wwpns']) - initiatornamelist = connector['wwpns'] - elif self.protocol == 'iSCSI' and connector['initiator']: - LOG.debug('_find_initiator_names, initiator: %s.', - connector['initiator']) - initiatornamelist.append(connector['initiator']) - - if not initiatornamelist: - msg = (_('_find_initiator_names, ' - 'connector: %(connector)s, ' - 'initiator not found.') - % {'connector': connector}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_find_initiator_names, ' - 'initiator list: %(initiator)s.', - {'initiator': initiatornamelist}) - - return initiatornamelist - - def _find_affinity_group(self, connector, vol_instance=None): - """find affinity group from connector.""" - LOG.debug('_find_affinity_group, vol_instance: %s.', vol_instance) - - affinity_grouplist = [] - initiatorlist = self._find_initiator_names(connector) - - if vol_instance is None: - try: - aglist = self._enum_eternus_instance_names( - 'FUJITSU_AffinityGroupController') - except Exception: - msg = (_('_find_affinity_group, ' - 'connector: %(connector)s, ' - 'EnumerateInstanceNames, ' - 'cannot connect to ETERNUS.') - % {'connector': connector}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_find_affinity_group,' - 'affinity_groups:%s', aglist) - else: - try: - aglist = self._assoc_eternus_names( - vol_instance.path, - AssocClass='FUJITSU_ProtocolControllerForUnit', - ResultClass='FUJITSU_AffinityGroupController') - except Exception: - msg = (_('_find_affinity_group,' - 'connector: %(connector)s,' - 'AssocNames: FUJITSU_ProtocolControllerForUnit, ' - 'cannot connect to ETERNUS.') - % {'connector': connector}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_find_affinity_group, ' - 'vol_instance.path: %(volume)s, ' - 'affinity_groups: %(aglist)s.', - {'volume': vol_instance.path, - 'aglist': aglist}) - - for ag in aglist: - try: - hostaglist = self._assoc_eternus( - ag, - AssocClass='FUJITSU_AuthorizedTarget', - ResultClass='FUJITSU_AuthorizedPrivilege') - except Exception: - msg = (_('_find_affinity_group, ' - 'connector: %(connector)s, ' - 'Associators: FUJITSU_AuthorizedTarget, ' - 'cannot connect to ETERNUS.') - % {'connector': connector}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for hostag in hostaglist: - for initiator in initiatorlist: - if initiator.lower() not in hostag['InstanceID'].lower(): - continue - - LOG.debug('_find_affinity_group, ' - 'AffinityGroup: %(ag)s.', {'ag': ag}) - affinity_grouplist.append(ag) - break - break - - LOG.debug('_find_affinity_group, ' - 'initiators: %(initiator)s, ' - 'affinity_group: %(affinity_group)s.', - {'initiator': initiatorlist, - 'affinity_group': affinity_grouplist}) - return affinity_grouplist - - @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) - def _unmap_lun(self, volume, connector, force=False): - """unmap volume from host.""" - LOG.debug('_map_lun, volume id: %(vid)s, ' - 'connector: %(connector)s, force: %(frc)s.', - {'vid': volume['id'], - 'connector': connector, 'frc': force}) - - volumename = self._create_volume_name(volume['id']) - vol_instance = self._find_lun(volume) - if vol_instance is None: - LOG.info('_unmap_lun, ' - 'volumename:%(volumename)s, ' - 'volume not found.', - {'volumename': volumename}) - return False - - volume_uid = vol_instance['Name'] - - if not force: - aglist = self._find_affinity_group(connector, vol_instance) - if not aglist: - LOG.info('_unmap_lun, ' - 'volumename: %(volumename)s, ' - 'volume is not mapped.', - {'volumename': volumename}) - return False - else: - try: - aglist = self._assoc_eternus_names( - vol_instance.path, - AssocClass='CIM_ProtocolControllerForUnit', - ResultClass='FUJITSU_AffinityGroupController') - except Exception: - msg = (_('_unmap_lun,' - 'vol_instance.path: %(volume)s, ' - 'AssociatorNames: CIM_ProtocolControllerForUnit, ' - 'cannot connect to ETERNUS.') - % {'volume': vol_instance.path}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_unmap_lun, ' - 'vol_instance.path: %(volume)s, ' - 'affinity_groups: %(aglist)s.', - {'volume': vol_instance.path, - 'aglist': aglist}) - - configservice = self._find_eternus_service(CTRL_CONF) - if configservice is None: - msg = (_('_unmap_lun, ' - 'vol_instance.path: %(volume)s, ' - 'volumename: %(volumename)s, ' - 'volume_uid: %(uid)s, ' - 'aglist: %(aglist)s, ' - 'Controller Configuration Service not found.') - % {'vol': vol_instance.path, - 'volumename': [volumename], - 'uid': [volume_uid], - 'aglist': aglist}) - - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for ag in aglist: - LOG.debug('_unmap_lun, ' - 'volumename: %(volumename)s, ' - 'volume_uid: %(volume_uid)s, ' - 'AffinityGroup: %(ag)s.', - {'volumename': volumename, - 'volume_uid': volume_uid, - 'ag': ag}) - - rc, errordesc, job = self._exec_eternus_service( - 'HidePaths', - configservice, - LUNames=[volume_uid], - ProtocolControllers=[ag]) - - LOG.debug('_unmap_lun, ' - 'Error: %(errordesc)s, ' - 'Return code: %(rc)lu.', - {'errordesc': errordesc, - 'rc': rc}) - - if rc == LUNAME_NOT_EXIST: - LOG.debug('_unmap_lun, ' - 'volumename: %(volumename)s, ' - 'Invalid LUNames.', - {'volumename': volumename}) - elif rc != 0: - msg = (_('_unmap_lun, ' - 'volumename: %(volumename)s, ' - 'volume_uid: %(volume_uid)s, ' - 'AffinityGroup: %(ag)s, ' - 'Return code: %(rc)lu, ' - 'Error: %(errordesc)s.') - % {'volumename': volumename, - 'volume_uid': volume_uid, - 'ag': ag, - 'rc': rc, - 'errordesc': errordesc}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_unmap_lun, ' - 'volumename: %(volumename)s.', - {'volumename': volumename}) - return True - - def _get_eternus_iscsi_properties(self): - """get target port iqns and target_portals.""" - - iscsi_properties_list = [] - iscsiip_list = self._get_drvcfg('EternusISCSIIP', multiple=True) - iscsi_port = self.configuration.iscsi_port - - LOG.debug('_get_eternus_iscsi_properties, iplist: %s.', iscsiip_list) - - try: - ip_endpointlist = self._enum_eternus_instance_names( - 'FUJITSU_IPProtocolEndpoint') - except Exception: - msg = (_('_get_eternus_iscsi_properties, ' - 'iscsiip: %(iscsiip)s, ' - 'EnumerateInstanceNames, ' - 'cannot connect to ETERNUS.') - % {'iscsiip': iscsiip_list}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for ip_endpoint in ip_endpointlist: - try: - ip_endpoint_instance = self._get_eternus_instance( - ip_endpoint) - ip_address = ip_endpoint_instance['IPv4Address'] - LOG.debug('_get_eternus_iscsi_properties, ' - 'instanceip: %(ip)s, ' - 'iscsiip: %(iscsiip)s.', - {'ip': ip_address, - 'iscsiip': iscsiip_list}) - except Exception: - msg = (_('_get_eternus_iscsi_properties, ' - 'iscsiip: %(iscsiip)s, ' - 'GetInstance, ' - 'cannot connect to ETERNUS.') - % {'iscsiip': iscsiip_list}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if ip_address not in iscsiip_list: - continue - - LOG.debug('_get_eternus_iscsi_properties, ' - 'find iscsiip: %(ip)s.', {'ip': ip_address}) - try: - tcp_endpointlist = self._assoc_eternus_names( - ip_endpoint, - AssocClass='CIM_BindsTo', - ResultClass='FUJITSU_TCPProtocolEndpoint') - except Exception: - msg = (_('_get_eternus_iscsi_properties, ' - 'iscsiip: %(iscsiip)s, ' - 'AssociatorNames: CIM_BindsTo, ' - 'cannot connect to ETERNUS.') - % {'iscsiip': ip_address}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for tcp_endpoint in tcp_endpointlist: - try: - iscsi_endpointlist = ( - self._assoc_eternus(tcp_endpoint, - AssocClass='CIM_BindsTo', - ResultClass='FUJITSU_iSCSI' - 'ProtocolEndpoint')) - except Exception: - msg = (_('_get_eternus_iscsi_properties, ' - 'iscsiip: %(iscsiip)s, ' - 'AssociatorNames: CIM_BindsTo, ' - 'cannot connect to ETERNUS.') - % {'iscsiip': ip_address}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for iscsi_endpoint in iscsi_endpointlist: - target_portal = "%s:%s" % (ip_address, iscsi_port) - iqn = iscsi_endpoint['Name'].split(',')[0] - iscsi_properties_list.append((iscsi_endpoint.path, - target_portal, - iqn)) - LOG.debug('_get_eternus_iscsi_properties, ' - 'target_portal: %(target_portal)s, ' - 'iqn: %(iqn)s.', - {'target_portal': target_portal, - 'iqn': iqn}) - - if len(iscsi_properties_list) == 0: - msg = (_('_get_eternus_iscsi_properties, ' - 'iscsiip list: %(iscsiip_list)s, ' - 'iqn not found.') - % {'iscsiip_list': iscsiip_list}) - - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug('_get_eternus_iscsi_properties, ' - 'iscsi_properties_list: %(iscsi_properties_list)s.', - {'iscsi_properties_list': iscsi_properties_list}) - - return iscsi_properties_list - - def _wait_for_job_complete(self, conn, job): - """Given the job wait for it to complete.""" - self.retries = 0 - self.wait_for_job_called = False - - def _wait_for_job_complete(): - """Called at an interval until the job is finished.""" - if self._is_job_finished(conn, job): - raise loopingcall.LoopingCallDone() - if self.retries > JOB_RETRIES: - LOG.error("_wait_for_job_complete, " - "failed after %(retries)d tries.", - {'retries': self.retries}) - raise loopingcall.LoopingCallDone() - - try: - self.retries += 1 - if not self.wait_for_job_called: - if self._is_job_finished(conn, job): - self.wait_for_job_called = True - except Exception: - exceptionMessage = _("Issue encountered waiting for job.") - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(exceptionMessage) - - self.wait_for_job_called = False - timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) - timer.start(interval=JOB_INTERVAL_SEC).wait() - - jobInstanceName = job['Job'] - jobinstance = conn.GetInstance(jobInstanceName, - LocalOnly=False) - - rc = jobinstance['ErrorCode'] - - LOG.debug('_wait_for_job_complete, rc: %s.', rc) - return rc - - def _is_job_finished(self, conn, job): - """Check if the job is finished.""" - jobInstanceName = job['Job'] - jobinstance = conn.GetInstance(jobInstanceName, - LocalOnly=False) - jobstate = jobinstance['JobState'] - LOG.debug('_is_job_finished,' - 'state: %(state)s', {'state': jobstate}) - # From ValueMap of JobState in CIM_ConcreteJob - # 2=New, 3=Starting, 4=Running, 32767=Queue Pending - # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, - # 32768..65535"), - # Values("New, Starting, Running, Suspended, Shutting Down, - # Completed, Terminated, Killed, Exception, Service, - # Query Pending, DMTF Reserved, Vendor Reserved")] - # NOTE(deva): string matching based on - # http://ipmitool.cvs.sourceforge.net/ - # viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c - - if jobstate in [2, 3, 4]: - job_finished = False - else: - job_finished = True - - LOG.debug('_is_job_finished, finish: %s.', job_finished) - return job_finished - - def _pywbem_uint(self, num, datatype): - try: - result = { - '8': pywbem.Uint8(num), - '16': pywbem.Uint16(num), - '32': pywbem.Uint32(num), - '64': pywbem.Uint64(num) - } - result = result.get(datatype, num) - except NameError: - result = num - - return result diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_fc.py b/cinder/volume/drivers/fujitsu/eternus_dx_fc.py deleted file mode 100644 index dd667c7e7..000000000 --- a/cinder/volume/drivers/fujitsu/eternus_dx_fc.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) 2015 FUJITSU LIMITED -# Copyright (c) 2012 EMC Corporation. -# Copyright (c) 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series. -""" -from oslo_log import log as logging -import six - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.fujitsu import eternus_dx_common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class FJDXFCDriver(driver.FibreChannelDriver): - """FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" - VERSION = eternus_dx_common.FJDXCommon.VERSION - - # TODO(smcginnis) Remove driver in Queens if CI requirements are not metA - SUPPORTED = False - - def __init__(self, *args, **kwargs): - - super(FJDXFCDriver, self).__init__(*args, **kwargs) - self.common = eternus_dx_common.FJDXCommon( - 'fc', - configuration=self.configuration) - self.VERSION = self.common.VERSION - - def check_for_setup_error(self): - if not self.common.pywbemAvailable: - LOG.error('pywbem could not be imported! ' - 'pywbem is necessary for this volume driver.') - - pass - - def create_volume(self, volume): - """Create volume.""" - LOG.debug('create_volume, ' - 'volume id: %s, enter method.', volume['id']) - - location, metadata = self.common.create_volume(volume) - - v_metadata = self._get_metadata(volume) - metadata.update(v_metadata) - - LOG.debug('create_volume, info: %s, exit method.', metadata) - return {'provider_location': six.text_type(location), - 'metadata': metadata} - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - LOG.debug('create_volume_from_snapshot, ' - 'volume id: %(vid)s, snap id: %(sid)s, enter method.', - {'vid': volume['id'], 'sid': snapshot['id']}) - - location, metadata = ( - self.common.create_volume_from_snapshot(volume, snapshot)) - - v_metadata = self._get_metadata(volume) - metadata.update(v_metadata) - - LOG.debug('create_volume_from_snapshot, ' - 'info: %s, exit method.', metadata) - return {'provider_location': six.text_type(location), - 'metadata': metadata} - - def create_cloned_volume(self, volume, src_vref): - """Create cloned volume.""" - LOG.debug('create_cloned_volume, ' - 'target volume id: %(tid)s, ' - 'source volume id: %(sid)s, enter method.', - {'tid': volume['id'], 'sid': src_vref['id']}) - - location, metadata = ( - self.common.create_cloned_volume(volume, src_vref)) - - v_metadata = self._get_metadata(volume) - metadata.update(v_metadata) - - LOG.debug('create_cloned_volume, ' - 'info: %s, exit method.', metadata) - return {'provider_location': six.text_type(location), - 'metadata': metadata} - - def delete_volume(self, volume): - """Delete volume on ETERNUS.""" - LOG.debug('delete_volume, ' - 'volume id: %s, enter method.', volume['id']) - - vol_exist = self.common.delete_volume(volume) - - LOG.debug('delete_volume, ' - 'delete: %s, exit method.', vol_exist) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - LOG.debug('create_snapshot, ' - 'snap id: %(sid)s, volume id: %(vid)s, enter method.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - location, metadata = self.common.create_snapshot(snapshot) - - LOG.debug('create_snapshot, info: %s, exit method.', metadata) - return {'provider_location': six.text_type(location)} - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.debug('delete_snapshot, ' - 'snap id: %(sid)s, volume id: %(vid)s, enter method.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - vol_exist = self.common.delete_snapshot(snapshot) - - LOG.debug('delete_snapshot, ' - 'delete: %s, exit method.', vol_exist) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - return - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - return - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - return - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - LOG.debug('initialize_connection, volume id: %(vid)s, ' - 'wwpns: %(wwpns)s, enter method.', - {'vid': volume['id'], 'wwpns': connector['wwpns']}) - - info = self.common.initialize_connection(volume, connector) - - data = info['data'] - init_tgt_map = ( - self.common.build_fc_init_tgt_map(connector, data['target_wwn'])) - data['initiator_target_map'] = init_tgt_map - - info['data'] = data - LOG.debug('initialize_connection, ' - 'info: %s, exit method.', info) - return info - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - LOG.debug('terminate_connection, volume id: %(vid)s, ' - 'wwpns: %(wwpns)s, enter method.', - {'vid': volume['id'], 'wwpns': connector['wwpns']}) - - map_exist = self.common.terminate_connection(volume, connector) - attached = self.common.check_attached_volume_in_zone(connector) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - if not attached: - # No more volumes attached to the host - init_tgt_map = self.common.build_fc_init_tgt_map(connector) - info['data'] = {'initiator_target_map': init_tgt_map} - - LOG.debug('terminate_connection, unmap: %(unmap)s, ' - 'connection info: %(info)s, exit method', - {'unmap': map_exist, 'info': info}) - return info - - def get_volume_stats(self, refresh=False): - """Get volume stats.""" - LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh) - - pool_name = None - if refresh is True: - data, pool_name = self.common.update_volume_stats() - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or 'FJDXFCDriver' - data['storage_protocol'] = 'FC' - self._stats = data - - LOG.debug('get_volume_stats, ' - 'pool name: %s, exit method.', pool_name) - return self._stats - - def extend_volume(self, volume, new_size): - """Extend volume.""" - LOG.debug('extend_volume, ' - 'volume id: %s, enter method.', volume['id']) - - used_pool_name = self.common.extend_volume(volume, new_size) - - LOG.debug('extend_volume, ' - 'used pool name: %s, exit method.', used_pool_name) - - def _get_metadata(self, volume): - v_metadata = volume.get('volume_metadata') - if v_metadata: - ret = {data['key']: data['value'] for data in v_metadata} - else: - ret = volume.get('metadata', {}) - - return ret diff --git a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py b/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py deleted file mode 100644 index dcb14b1b6..000000000 --- a/cinder/volume/drivers/fujitsu/eternus_dx_iscsi.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2015 FUJITSU LIMITED -# Copyright (c) 2012 EMC Corporation. -# Copyright (c) 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series. -""" -import six - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.fujitsu import eternus_dx_common -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class FJDXISCSIDriver(driver.ISCSIDriver): - """iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" - VERSION = eternus_dx_common.FJDXCommon.VERSION - - # TODO(smcginnis) Remove driver in Queens if CI requirements are not met - SUPPORTED = False - - def __init__(self, *args, **kwargs): - - super(FJDXISCSIDriver, self).__init__(*args, **kwargs) - self.common = eternus_dx_common.FJDXCommon( - 'iSCSI', - configuration=self.configuration) - self.VERSION = self.common.VERSION - - def check_for_setup_error(self): - if not self.common.pywbemAvailable: - LOG.error('pywbem could not be imported! ' - 'pywbem is necessary for this volume driver.') - - return - - def create_volume(self, volume): - """Create volume.""" - LOG.info('create_volume, volume id: %s, Enter method.', volume['id']) - - element_path, metadata = self.common.create_volume(volume) - - v_metadata = volume.get('volume_metadata') - if v_metadata: - for data in v_metadata: - metadata[data['key']] = data['value'] - else: - v_metadata = volume.get('metadata', {}) - metadata.update(v_metadata) - - LOG.info('create_volume, info: %s, Exit method.', metadata) - return {'provider_location': six.text_type(element_path), - 'metadata': metadata} - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - LOG.info('create_volume_from_snapshot, ' - 'volume id: %(vid)s, snap id: %(sid)s, Enter method.', - {'vid': volume['id'], 'sid': snapshot['id']}) - - element_path, metadata = ( - self.common.create_volume_from_snapshot(volume, snapshot)) - - v_metadata = volume.get('volume_metadata') - if v_metadata: - for data in v_metadata: - metadata[data['key']] = data['value'] - else: - v_metadata = volume.get('metadata', {}) - metadata.update(v_metadata) - - LOG.info('create_volume_from_snapshot, ' - 'info: %s, Exit method.', metadata) - return {'provider_location': six.text_type(element_path), - 'metadata': metadata} - - def create_cloned_volume(self, volume, src_vref): - """Create cloned volume.""" - LOG.info('create_cloned_volume, ' - 'target volume id: %(tid)s, ' - 'source volume id: %(sid)s, Enter method.', - {'tid': volume['id'], 'sid': src_vref['id']}) - - element_path, metadata = ( - self.common.create_cloned_volume(volume, src_vref)) - - v_metadata = volume.get('volume_metadata') - if v_metadata: - for data in v_metadata: - metadata[data['key']] = data['value'] - else: - v_metadata = volume.get('metadata', {}) - metadata.update(v_metadata) - - LOG.info('create_cloned_volume, info: %s, Exit method.', metadata) - return {'provider_location': six.text_type(element_path), - 'metadata': metadata} - - def delete_volume(self, volume): - """Delete volume on ETERNUS.""" - LOG.info('delete_volume, volume id: %s, Enter method.', volume['id']) - - vol_exist = self.common.delete_volume(volume) - - LOG.info('delete_volume, delete: %s, Exit method.', vol_exist) - return - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - LOG.info('create_snapshot, snap id: %(sid)s, volume id: %(vid)s, ' - 'Enter method.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - element_path, metadata = self.common.create_snapshot(snapshot) - - LOG.info('create_snapshot, info: %s, Exit method.', metadata) - return {'provider_location': six.text_type(element_path)} - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.info('delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, ' - 'Enter method.', - {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) - - vol_exist = self.common.delete_snapshot(snapshot) - - LOG.info('delete_snapshot, delete: %s, Exit method.', vol_exist) - return - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - return - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - return - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - return - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - LOG.info('initialize_connection, volume id: %(vid)s, ' - 'initiator: %(initiator)s, Enter method.', - {'vid': volume['id'], 'initiator': connector['initiator']}) - - info = self.common.initialize_connection(volume, connector) - - LOG.info('initialize_connection, info: %s, Exit method.', info) - return info - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - LOG.info('terminate_connection, volume id: %(vid)s, ' - 'initiator: %(initiator)s, Enter method.', - {'vid': volume['id'], 'initiator': connector['initiator']}) - - map_exist = self.common.terminate_connection(volume, connector) - - LOG.info('terminate_connection, unmap: %s, Exit method.', map_exist) - return - - def get_volume_stats(self, refresh=False): - """Get volume stats.""" - LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) - - pool_name = None - if refresh is True: - data, pool_name = self.common.update_volume_stats() - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver' - data['storage_protocol'] = 'iSCSI' - self._stats = data - - LOG.debug('get_volume_stats, ' - 'pool name: %s, Exit method.', pool_name) - return self._stats - - def extend_volume(self, volume, new_size): - """Extend volume.""" - LOG.info('extend_volume, volume id: %s, Enter method.', volume['id']) - - used_pool_name = self.common.extend_volume(volume, new_size) - - LOG.info('extend_volume, used pool name: %s, Exit method.', - used_pool_name) diff --git a/cinder/volume/drivers/fusionstorage/__init__.py b/cinder/volume/drivers/fusionstorage/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/fusionstorage/dsware.py b/cinder/volume/drivers/fusionstorage/dsware.py deleted file mode 100644 index 7f17fdd6f..000000000 --- a/cinder/volume/drivers/fusionstorage/dsware.py +++ /dev/null @@ -1,627 +0,0 @@ -# Copyright (c) 2013 - 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Driver for Huawei FusionStorage. -""" - -import os -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.fusionstorage import fspythonapi - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.BoolOpt('dsware_isthin', - default=False, - help='The flag of thin storage allocation.'), - cfg.StrOpt('dsware_manager', - default='', - help='Fusionstorage manager ip addr for cinder-volume.'), - cfg.StrOpt('fusionstorageagent', - default='', - help='Fusionstorage agent ip addr range.'), - cfg.StrOpt('pool_type', - default='default', - help = 'Pool type, like sata-2copy.'), - cfg.ListOpt('pool_id_filter', - default=[], - help='Pool id permit to use.'), - cfg.IntOpt('clone_volume_timeout', - default=680, - help='Create clone volume timeout.'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - -OLD_VERSION = 1 -NEW_VERSION = 0 -VOLUME_ALREADY_ATTACHED = 50151401 -VOLUME_NOT_EXIST = '50150005\n' -VOLUME_BEING_DELETED = '50151002\n' -SNAP_NOT_EXIST = '50150006\n' - - -@interface.volumedriver -class DSWAREDriver(driver.VolumeDriver): - """Huawei FusionStorage Driver.""" - VERSION = '1.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Huawei_FusionStorage_CI" - - DSWARE_VOLUME_CREATE_SUCCESS_STATUS = 0 - DSWARE_VOLUME_DUPLICATE_VOLUME = 6 - DSWARE_VOLUME_CREATING_STATUS = 7 - - def __init__(self, *args, **kwargs): - super(DSWAREDriver, self).__init__(*args, **kwargs) - self.dsware_client = fspythonapi.FSPythonApi() - self.check_cloned_interval = 2 - self.configuration.append_config_values(volume_opts) - - def check_for_setup_error(self): - # lrk: check config file here. - if not os.path.exists(fspythonapi.fsc_conf_file): - msg = _("Dsware config file not exists!") - LOG.error("Dsware config file: %s not exists!", - fspythonapi.fsc_conf_file) - raise exception.VolumeBackendAPIException(data=msg) - - def do_setup(self, context): - # lrk: create fsc_conf_file here. - conf_info = ["manage_ip=%s" % self.configuration.dsware_manager, - "\n", - "vbs_url=%s" % self.configuration.fusionstorageagent] - - fsc_dir = os.path.dirname(fspythonapi.fsc_conf_file) - if not os.path.exists(fsc_dir): - os.makedirs(fsc_dir) - - with open(fspythonapi.fsc_conf_file, 'w') as f: - f.writelines(conf_info) - - # Get pool type. - self.pool_type = self.configuration.pool_type - LOG.debug("Dsware Driver do_setup finish.") - - def _get_dsware_manage_ip(self, volume): - dsw_manager_ip = volume.provider_id - if dsw_manager_ip is not None: - return dsw_manager_ip - else: - msg = _("Dsware get manager ip failed, " - "volume provider_id is None!") - raise exception.VolumeBackendAPIException(data=msg) - - def _get_poolid_from_host(self, host): - # Host format: 'hostid@backend#poolid'. - # Other formats: return 'default', and the pool id would be zero. - if host: - if len(host.split('#', 1)) == 2: - return host.split('#')[1] - return self.pool_type - - def _create_volume(self, volume_id, volume_size, is_thin, volume_host): - pool_id = 0 - result = 1 - - # Query Dsware version. - retcode = self.dsware_client.query_dsware_version() - # Old version. - if retcode == OLD_VERSION: - pool_id = 0 - # New version. - elif retcode == NEW_VERSION: - pool_info = self._get_poolid_from_host(volume_host) - if pool_info != self.pool_type: - pool_id = int(pool_info) - # Query Dsware version failed! - else: - LOG.error("Query Dsware version fail!") - msg = (_("Query Dsware version failed! Retcode is %s.") % - retcode) - raise exception.VolumeBackendAPIException(data=msg) - - try: - result = self.dsware_client.create_volume( - volume_id, pool_id, volume_size, int(is_thin)) - except Exception as e: - LOG.exception("Create volume error, details is: %s.", e) - raise - - if result != 0: - msg = _("Dsware create volume failed! Result is: %s.") % result - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume(self, volume): - # Creates a volume in Dsware. - LOG.debug("Begin to create volume %s in Dsware.", volume.name) - volume_id = volume.name - volume_size = volume.size - volume_host = volume.host - is_thin = self.configuration.dsware_isthin - # Change GB to MB. - volume_size *= 1024 - self._create_volume(volume_id, volume_size, is_thin, volume_host) - - dsw_manager_ip = self.dsware_client.get_manage_ip() - return {"provider_id": dsw_manager_ip} - - def _create_volume_from_snap(self, volume_id, volume_size, snapshot_name): - result = self.dsware_client.create_volume_from_snap( - volume_id, volume_size, snapshot_name) - if result != 0: - msg = (_("Dsware: create volume from snap failed. Result: %s.") % - result) - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume_from_snapshot(self, volume, snapshot): - # Creates a volume from snapshot. - volume_id = volume.name - volume_size = volume.size - snapshot_name = snapshot.name - if volume_size < int(snapshot.volume_size): - msg = _("Dsware: volume size can not be less than snapshot size.") - raise exception.VolumeBackendAPIException(data=msg) - # Change GB to MB. - volume_size *= 1024 - self._create_volume_from_snap(volume_id, volume_size, snapshot_name) - - dsw_manager_ip = self.dsware_client.get_manage_ip() - return {"provider_id": dsw_manager_ip} - - def create_cloned_volume(self, volume, src_volume): - """Dispatcher to Dsware client to create volume from volume. - - Wait volume create finished. - """ - volume_name = volume.name - volume_size = volume.size - src_volume_name = src_volume.name - # Change GB to MB. - volume_size *= 1024 - result = self.dsware_client.create_volume_from_volume( - volume_name, volume_size, src_volume_name) - if result: - msg = _('Dsware fails to start cloning volume %s.') % volume_name - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('Dsware create volume %(volume_name)s of size ' - '%(volume_size)s from src volume %(src_volume_name)s start.', - {"volume_name": volume_name, - "volume_size": volume_size, - "src_volume_name": src_volume_name}) - - ret = self._wait_for_create_cloned_volume_finish_timer(volume_name) - if not ret: - msg = (_('Clone volume %s failed while waiting for success.') % - volume_name) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('Dsware create volume from volume ends.') - - dsw_manager_ip = self.dsware_client.get_manage_ip() - return {"provider_id": dsw_manager_ip} - - def _check_create_cloned_volume_finish(self, new_volume_name): - LOG.debug('Loopcall: _check_create_cloned_volume_finish(), ' - 'volume-name: %s.', new_volume_name) - current_volume = self.dsware_client.query_volume(new_volume_name) - - if current_volume: - status = current_volume['status'] - LOG.debug('Wait clone volume %(volume_name)s, status: %(status)s.', - {"volume_name": new_volume_name, - "status": status}) - if int(status) == self.DSWARE_VOLUME_CREATING_STATUS or int( - status) == self.DSWARE_VOLUME_DUPLICATE_VOLUME: - self.count += 1 - elif int(status) == self.DSWARE_VOLUME_CREATE_SUCCESS_STATUS: - raise loopingcall.LoopingCallDone(retvalue=True) - else: - msg = _('Clone volume %(new_volume_name)s failed, ' - 'volume status is: %(status)s.') - LOG.error(msg, {'new_volume_name': new_volume_name, - 'status': status}) - raise loopingcall.LoopingCallDone(retvalue=False) - if self.count > self.configuration.clone_volume_timeout: - msg = _('Dsware clone volume time out. ' - 'Volume: %(new_volume_name)s, status: %(status)s') - LOG.error(msg, {'new_volume_name': new_volume_name, - 'status': current_volume['status']}) - raise loopingcall.LoopingCallDone(retvalue=False) - else: - LOG.warning('Can not find volume %s from Dsware.', - new_volume_name) - self.count += 1 - if self.count > 10: - msg = _("Dsware clone volume failed: volume " - "can not be found from Dsware.") - LOG.error(msg) - raise loopingcall.LoopingCallDone(retvalue=False) - - def _wait_for_create_cloned_volume_finish_timer(self, new_volume_name): - timer = loopingcall.FixedIntervalLoopingCall( - self._check_create_cloned_volume_finish, new_volume_name) - LOG.debug('Call _check_create_cloned_volume_finish: volume-name %s.', - new_volume_name) - self.count = 0 - ret = timer.start(interval=self.check_cloned_interval).wait() - timer.stop() - return ret - - def _analyse_output(self, out): - if out is not None: - analyse_result = {} - out_temp = out.split('\n') - for line in out_temp: - if re.search('^ret_code=', line): - analyse_result['ret_code'] = line[9:] - elif re.search('^ret_desc=', line): - analyse_result['ret_desc'] = line[9:] - elif re.search('^dev_addr=', line): - analyse_result['dev_addr'] = line[9:] - return analyse_result - else: - return None - - def _attach_volume(self, volume_name, dsw_manager_ip): - cmd = ['vbs_cli', '-c', 'attachwithip', '-v', volume_name, '-i', - dsw_manager_ip.replace('\n', ''), '-p', 0] - out, err = self._execute(*cmd, run_as_root=True) - analyse_result = self._analyse_output(out) - LOG.debug("Attach volume result is %s.", analyse_result) - return analyse_result - - def _detach_volume(self, volume_name, dsw_manager_ip): - cmd = ['vbs_cli', '-c', 'detachwithip', '-v', volume_name, '-i', - dsw_manager_ip.replace('\n', ''), '-p', 0] - out, err = self._execute(*cmd, run_as_root=True) - analyse_result = self._analyse_output(out) - LOG.debug("Detach volume result is %s.", analyse_result) - return analyse_result - - def _query_volume_attach(self, volume_name, dsw_manager_ip): - cmd = ['vbs_cli', '-c', 'querydevwithip', '-v', volume_name, '-i', - dsw_manager_ip.replace('\n', ''), '-p', 0] - out, err = self._execute(*cmd, run_as_root=True) - analyse_result = self._analyse_output(out) - LOG.debug("Query volume attach result is %s.", analyse_result) - return analyse_result - - def copy_image_to_volume(self, context, volume, image_service, image_id): - # Copy image to volume. - # Step1: attach volume to host. - LOG.debug("Begin to copy image to volume.") - dsw_manager_ip = self._get_dsware_manage_ip(volume) - volume_attach_result = self._attach_volume(volume.name, - dsw_manager_ip) - volume_attach_path = '' - if volume_attach_result is not None and int( - volume_attach_result['ret_code']) == 0: - volume_attach_path = volume_attach_result['dev_addr'] - LOG.debug("Volume attach path is %s.", volume_attach_path) - if volume_attach_path == '': - msg = _("Host attach volume failed!") - raise exception.VolumeBackendAPIException(data=msg) - # Step2: fetch the image from image_service and write it to the - # volume. - try: - image_utils.fetch_to_raw(context, - image_service, - image_id, - volume_attach_path, - self.configuration.volume_dd_blocksize) - finally: - # Step3: detach volume from host. - dsw_manager_ip = self._get_dsware_manage_ip(volume) - volume_detach_result = self._detach_volume(volume.name, - dsw_manager_ip) - if volume_detach_result is not None and int( - volume_detach_result['ret_code']) != 0: - msg = (_("Dsware detach volume from host failed: %s!") % - volume_detach_result) - raise exception.VolumeBackendAPIException(data=msg) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - # Copy volume to image. - # If volume was not attached, then attach it. - - dsw_manager_ip = self._get_dsware_manage_ip(volume) - - already_attached = False - _attach_result = self._attach_volume(volume.name, dsw_manager_ip) - if _attach_result: - retcode = _attach_result['ret_code'] - if int(retcode) == VOLUME_ALREADY_ATTACHED: - already_attached = True - result = self._query_volume_attach(volume.name, - dsw_manager_ip) - if not result or int(result['ret_code']) != 0: - msg = (_("Query volume attach failed, result=%s.") % - result) - raise exception.VolumeBackendAPIException(data=msg) - - elif int(retcode) == 0: - result = _attach_result - else: - msg = (_("Attach volume to host failed " - "in copy volume to image, retcode: %s.") % - retcode) - raise exception.VolumeBackendAPIException(data=msg) - - volume_attach_path = result['dev_addr'] - - else: - msg = _("Attach_volume failed.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - image_utils.upload_volume(context, - image_service, - image_meta, - volume_attach_path) - except Exception as e: - LOG.error("Upload volume error, details: %s.", e) - raise - finally: - if not already_attached: - self._detach_volume(volume.name, dsw_manager_ip) - - def _get_volume(self, volume_name): - result = self.dsware_client.query_volume(volume_name) - LOG.debug("Dsware query volume result is %s.", result['result']) - if result['result'] == VOLUME_NOT_EXIST: - LOG.debug("Dsware volume %s does not exist.", volume_name) - return False - elif result['result'] == 0: - return True - else: - msg = _("Dsware query volume %s failed!") % volume_name - raise exception.VolumeBackendAPIException(data=msg) - - def _delete_volume(self, volume_name): - # Delete volume in Dsware. - result = self.dsware_client.delete_volume(volume_name) - LOG.debug("Dsware delete volume, result is %s.", result) - if result == VOLUME_NOT_EXIST: - LOG.debug("Dsware delete volume, volume does not exist.") - return True - elif result == VOLUME_BEING_DELETED: - LOG.debug("Dsware delete volume, volume is being deleted.") - return True - elif result == 0: - return True - else: - msg = _("Dsware delete volume failed: %s!") % result - raise exception.VolumeBackendAPIException(data=msg) - - def delete_volume(self, volume): - # Delete volume. - # If volume does not exist, then return. - LOG.debug("Begin to delete volume in Dsware: %s.", volume.name) - if not self._get_volume(volume.name): - return True - - return self._delete_volume(volume.name) - - def _get_snapshot(self, snapshot_name): - snapshot_info = self.dsware_client.query_snap(snapshot_name) - LOG.debug("Get snapshot, snapshot_info is : %s.", snapshot_info) - if snapshot_info['result'] == SNAP_NOT_EXIST: - LOG.error('Snapshot: %s not found!', snapshot_name) - return False - elif snapshot_info['result'] == 0: - return True - else: - msg = _("Dsware get snapshot failed!") - raise exception.VolumeBackendAPIException(data=msg) - - def _create_snapshot(self, snapshot_id, volume_id): - LOG.debug("Create snapshot %s to Dsware.", snapshot_id) - smart_flag = 0 - res = self.dsware_client.create_snapshot(snapshot_id, - volume_id, - smart_flag) - if res != 0: - msg = _("Dsware Create Snapshot failed! Result: %s.") % res - raise exception.VolumeBackendAPIException(data=msg) - - def _delete_snapshot(self, snapshot_id): - LOG.debug("Delete snapshot %s to Dsware.", snapshot_id) - res = self.dsware_client.delete_snapshot(snapshot_id) - LOG.debug("Ddelete snapshot result is: %s.", res) - if res != 0: - raise exception.SnapshotIsBusy(snapshot_name=snapshot_id) - - def create_snapshot(self, snapshot): - vol_id = 'volume-%s' % snapshot.volume_id - snapshot_id = snapshot.name - if not self._get_volume(vol_id): - LOG.error('Create Snapshot, but volume: %s not found!', vol_id) - raise exception.VolumeNotFound(volume_id=vol_id) - else: - self._create_snapshot(snapshot_id, vol_id) - - def delete_snapshot(self, snapshot): - LOG.debug("Delete snapshot %s.", snapshot.name) - snapshot_id = snapshot.name - if self._get_snapshot(snapshot_id): - self._delete_snapshot(snapshot_id) - - def _calculate_pool_info(self, pool_sets): - filter = False - pools_status = [] - reserved_percentage = self.configuration.reserved_percentage - pool_id_filter = self.configuration.pool_id_filter - LOG.debug("Filtered pool id is %s.", pool_id_filter) - if pool_id_filter == []: - for pool_info in pool_sets: - pool = {} - pool['pool_name'] = pool_info['pool_id'] - pool['total_capacity_gb'] = float( - pool_info['total_capacity']) / 1024 - pool['allocated_capacity_gb'] = float( - pool_info['used_capacity']) / 1024 - pool['free_capacity_gb'] = pool['total_capacity_gb'] - pool[ - 'allocated_capacity_gb'] - pool['QoS_support'] = False - pool['reserved_percentage'] = reserved_percentage - pools_status.append(pool) - else: - for pool_info in pool_sets: - for pool_id in pool_id_filter: - if pool_id == pool_info['pool_id']: - filter = True - break - - if filter: - pool = {} - pool['pool_name'] = pool_info['pool_id'] - pool['total_capacity_gb'] = float( - pool_info['total_capacity']) / 1024 - pool['allocated_capacity_gb'] = float( - pool_info['used_capacity']) / 1024 - pool['free_capacity_gb'] = float( - pool['total_capacity_gb'] - pool[ - 'allocated_capacity_gb']) - pool['QoS_support'] = False - pool['reserved_percentage'] = reserved_percentage - pools_status.append(pool) - - filter = False - - return pools_status - - def _update_single_pool_info_status(self): - """Query pool info when Dsware is single-pool version.""" - status = {} - status['volume_backend_name'] = self.configuration.volume_backend_name - status['vendor_name'] = 'Open Source' - status['driver_version'] = self.VERSION - status['storage_protocol'] = 'dsware' - - status['total_capacity_gb'] = 0 - status['free_capacity_gb'] = 0 - status['reserved_percentage'] = self.configuration.reserved_percentage - status['QoS_support'] = False - pool_id = 0 - pool_info = self.dsware_client.query_pool_info(pool_id) - result = pool_info['result'] - if result == 0: - status['total_capacity_gb'] = float( - pool_info['total_capacity']) / 1024 - status['free_capacity_gb'] = (float( - pool_info['total_capacity']) - float( - pool_info['used_capacity'])) / 1024 - LOG.debug("total_capacity_gb is %s, free_capacity_gb is %s.", - status['total_capacity_gb'], - status['free_capacity_gb']) - self._stats = status - else: - self._stats = None - - def _update_multi_pool_of_same_type_status(self): - """Query info of multiple pools when Dsware is multi-pool version. - - These pools have the same pool type. - """ - status = {} - status['volume_backend_name'] = self.configuration.volume_backend_name - status['vendor_name'] = 'Open Source' - status['driver_version'] = self.VERSION - status['storage_protocol'] = 'dsware' - - (result, pool_sets) = self.dsware_client.query_pool_type( - self.pool_type) - if pool_sets == []: - self._stats = None - else: - pools_status = self._calculate_pool_info(pool_sets) - status['pools'] = pools_status - self._stats = status - - def get_volume_stats(self, refresh=False): - if refresh: - dsware_version = self.dsware_client.query_dsware_version() - # Old version. - if dsware_version == OLD_VERSION: - self._update_single_pool_info_status() - # New version. - elif dsware_version == NEW_VERSION: - self._update_multi_pool_of_same_type_status() - else: - msg = _("Dsware query Dsware version failed!") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return self._stats - - def extend_volume(self, volume, new_size): - # Extend volume in Dsware. - LOG.debug("Begin to extend volume in Dsware: %s.", volume.name) - volume_id = volume.name - if volume.size > new_size: - msg = (_("Dsware extend Volume failed! " - "New size %(new_size)s should be greater than " - "old size %(old_size)s!") - % {'new_size': new_size, - 'old_size': volume.size}) - raise exception.VolumeBackendAPIException(data=msg) - # Change GB to MB. - volume_size = new_size * 1024 - result = self.dsware_client.extend_volume(volume_id, volume_size) - if result != 0: - msg = _("Dsware extend Volume failed! Result:%s.") % result - raise exception.VolumeBackendAPIException(data=msg) - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - LOG.debug("Begin initialize connection.") - - properties = {} - properties['volume_name'] = volume.name - properties['volume'] = volume - properties['dsw_manager_ip'] = self._get_dsware_manage_ip(volume) - - LOG.debug("End initialize connection with properties:%s.", properties) - - return {'driver_volume_type': 'dsware', - 'data': properties} - - def terminate_connection(self, volume, connector, force=False, **kwargs): - pass - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass diff --git a/cinder/volume/drivers/fusionstorage/fspythonapi.py b/cinder/volume/drivers/fusionstorage/fspythonapi.py deleted file mode 100644 index a88687028..000000000 --- a/cinder/volume/drivers/fusionstorage/fspythonapi.py +++ /dev/null @@ -1,495 +0,0 @@ -# Copyright (c) 2013 - 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume api for FusionStorage systems. -""" - -import os -import re -import six - -from oslo_log import log as logging - -from cinder import utils - -LOG = logging.getLogger(__name__) -fsc_conf_file = "/etc/cinder/volumes/fsc_conf" -fsc_cli = "fsc_cli" -fsc_ip = [] -fsc_port = '10519' -manage_ip = "127.0.0.1" -CMD_BIN = fsc_cli - -volume_info = { - 'result': '', - 'vol_name': '', - 'father_name': '', - 'status': '', - 'vol_size': '', - 'real_size': '', - 'pool_id': '', - 'create_time': ''} - - -snap_info = { - 'result': '', - 'snap_name': '', - 'father_name': '', - 'status': '', - 'snap_size': '', - 'real_size': '', - 'pool_id': '', - 'delete_priority': '', - 'create_time': ''} - - -pool_info = { - 'result': '', - 'pool_id': '', - 'total_capacity': '', - 'used_capacity': '', - 'alloc_capacity': ''} - - -class FSPythonApi(object): - - def __init__(self): - LOG.debug("FSPythonApi init.") - self.get_ip_port() - self.res_idx = len('result=') - - def get_ip_port(self): - LOG.debug("File fsc_conf_file is %s.", fsc_conf_file) - if os.path.exists(fsc_conf_file): - try: - fsc_file = open(fsc_conf_file, 'r') - full_txt = fsc_file.readlines() - LOG.debug("Full_txt is %s.", full_txt) - for line in full_txt: - if re.search('^vbs_url=', line): - tmp_vbs_url = line[8:] - return re.split(',', tmp_vbs_url) - except Exception as e: - LOG.debug("Get fsc ip failed, error=%s.", e) - finally: - fsc_file.close() - else: - LOG.debug("Fsc conf file not exist, file_name=%s.", fsc_conf_file) - - def get_manage_ip(self): - LOG.debug("File fsc_conf_file is %s.", fsc_conf_file) - if os.path.exists(fsc_conf_file): - try: - fsc_file = open(fsc_conf_file, 'r') - full_txt = fsc_file.readlines() - for line in full_txt: - if re.search('^manage_ip=', line): - manage_ip = line[len('manage_ip='):] - manage_ip = manage_ip.strip('\n') - return manage_ip - except Exception as e: - LOG.debug("Get manage ip failed, error=%s.", e) - finally: - fsc_file.close() - else: - LOG.debug("Fsc conf file not exist, file_name=%s.", fsc_conf_file) - - def get_dsw_manage_ip(self): - return manage_ip - - def start_execute_cmd(self, cmd, full_result_flag): - fsc_ip = self.get_ip_port() - manage_ip = self.get_manage_ip() - ip_num = len(fsc_ip) - - LOG.debug("fsc_ip is %s", fsc_ip) - - if ip_num <= 0: - return None - - if ip_num > 3: - ip_num = 3 - - exec_result = '' - result = '' - if full_result_flag: - for ip in fsc_ip: - cmd_args = [CMD_BIN, '--manage_ip', manage_ip.replace( - '\n', ''), '--ip', ip.replace('\n', '')] + cmd.split() - LOG.debug("Dsware cmd_args is %s.", cmd_args) - - exec_result, err = utils.execute(*cmd_args, run_as_root=True) - exec_result = exec_result.split('\n') - LOG.debug("Result is %s.", exec_result) - if exec_result: - for line in exec_result: - if re.search('^result=0', line): - return exec_result - elif re.search('^result=50150007', line): - return 'result=0' - elif re.search('^result=50150008', line): - return 'result=0' - elif re.search('^result=50', line): - return exec_result - return exec_result - else: - for ip in fsc_ip: - cmd_args = [CMD_BIN, '--manage_ip', manage_ip.replace( - '\n', ''), '--ip', ip.replace('\n', '')] + cmd.split() - LOG.debug("Dsware cmd_args is %s.", cmd_args) - - exec_result, err = utils.execute(*cmd_args, run_as_root=True) - LOG.debug("Result is %s.", exec_result) - exec_result = exec_result.split('\n') - if exec_result: - for line in exec_result: - if re.search('^result=', line): - result = line - if re.search('^result=0', line): - return line - elif re.search('^result=50150007', line): - return 'result=0' - elif re.search('^result=50150008', line): - return 'result=0' - elif re.search('^result=50', line): - return line - return result - - def create_volume(self, vol_name, pool_id, vol_size, thin_flag): - cmd = '--op createVolume' + ' ' + '--volName' + ' ' + six.text_type( - vol_name) + ' ' + '--poolId' + ' ' + six.text_type( - pool_id) + ' ' + '--volSize' + ' ' + six.text_type( - vol_size) + ' ' + '--thinFlag' + ' ' + six.text_type(thin_flag) - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def extend_volume(self, vol_name, new_vol_size): - cmd = '' - cmd = '--op expandVolume' + ' ' + '--volName' + ' ' + six.text_type( - vol_name) + ' ' + '--volSize' + ' ' + six.text_type(new_vol_size) - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def create_volume_from_snap(self, vol_name, vol_size, snap_name): - cmd = ('--op createVolumeFromSnap' + ' ') + ( - '--volName' + ' ') + six.text_type( - vol_name) + ' ' + '--snapNameSrc' + ' ' + six.text_type( - snap_name) + ' ' + '--volSize' + ' ' + six.text_type(vol_size) - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def create_fullvol_from_snap(self, vol_name, snap_name): - cmd = ('--op createFullVolumeFromSnap' + ' ') + ( - '--volName' + ' ') + six.text_type( - vol_name) + ' ' + '--snapName' + ' ' + six.text_type(snap_name) - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def create_volume_from_volume(self, vol_name, vol_size, src_vol_name): - retcode = 1 - tmp_snap_name = six.text_type(vol_name) + '_tmp_snap' - - retcode = self.create_snapshot(tmp_snap_name, src_vol_name, 0) - if 0 != retcode: - return retcode - - retcode = self.create_volume(vol_name, 0, vol_size, 0) - if 0 != retcode: - self.delete_snapshot(tmp_snap_name) - return retcode - - retcode = self.create_fullvol_from_snap(vol_name, tmp_snap_name) - if 0 != retcode: - self.delete_snapshot(tmp_snap_name) - self.delete_volume(vol_name) - return retcode - - return 0 - - def create_clone_volume_from_volume(self, vol_name, - vol_size, src_vol_name): - retcode = 1 - tmp_snap_name = six.text_type(src_vol_name) + '_DT_clnoe_snap' - - retcode = self.create_snapshot(tmp_snap_name, src_vol_name, 0) - if 0 != retcode: - return retcode - - retcode = self.create_volume_from_snap( - vol_name, vol_size, tmp_snap_name) - if 0 != retcode: - return retcode - - return 0 - - def volume_info_analyze(self, vol_info): - local_volume_info = volume_info - - if not vol_info: - local_volume_info['result'] = 1 - return local_volume_info - - local_volume_info['result'] = 0 - - vol_info_list = [] - vol_info_list = re.split(',', vol_info) - for line in vol_info_list: - line = line.replace('\n', '') - if re.search('^vol_name=', line): - local_volume_info['vol_name'] = line[len('vol_name='):] - elif re.search('^father_name=', line): - local_volume_info['father_name'] = line[len('father_name='):] - elif re.search('^status=', line): - local_volume_info['status'] = line[len('status='):] - elif re.search('^vol_size=', line): - local_volume_info['vol_size'] = line[len('vol_size='):] - elif re.search('^real_size=', line): - local_volume_info['real_size'] = line[len('real_size='):] - elif re.search('^pool_id=', line): - local_volume_info['pool_id'] = line[len('pool_id='):] - elif re.search('^create_time=', line): - local_volume_info['create_time'] = line[len('create_time='):] - else: - LOG.error("Analyze key not exist, key=%s.", line) - return local_volume_info - - def query_volume(self, vol_name): - tmp_volume_info = volume_info - cmd = '--op queryVolume' + ' ' + '--volName' + ' ' + vol_name - - exec_result = self.start_execute_cmd(cmd, 1) - if exec_result: - for line in exec_result: - if re.search('^result=', line): - if not re.search('^result=0', line): - tmp_volume_info['result'] = line[self.res_idx:] - return tmp_volume_info - for line in exec_result: - if re.search('^vol_name=' + vol_name, line): - tmp_volume_info = self.volume_info_analyze(line) - if six.text_type(0) == tmp_volume_info['status']: - tmp_snap_name = six.text_type( - vol_name) + '_tmp_snap' - self.delete_snapshot(tmp_snap_name) - return tmp_volume_info - - tmp_volume_info['result'] = 1 - return tmp_volume_info - - def delete_volume(self, vol_name): - cmd = '--op deleteVolume' + ' ' + '--volName' + ' ' + vol_name - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def create_snapshot(self, snap_name, vol_name, smart_flag): - cmd = '--op createSnapshot' + ' ' + '--volName' + ' ' + six.text_type( - vol_name) + ' ' + '--snapName' + ' ' + six.text_type( - snap_name) + ' ' + '--smartFlag' + ' ' + six.text_type(smart_flag) - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def snap_info_analyze(self, info): - local_snap_info = snap_info.copy() - - if not info: - local_snap_info['result'] = 1 - return local_snap_info - - local_snap_info['result'] = 0 - - snap_info_list = [] - snap_info_list = re.split(',', info) - for line in snap_info_list: - line = line.replace('\n', '') - if re.search('^snap_name=', line): - local_snap_info['snap_name'] = line[len('snap_name='):] - elif re.search('^father_name=', line): - local_snap_info['father_name'] = line[len('father_name='):] - elif re.search('^status=', line): - local_snap_info['status'] = line[len('status='):] - elif re.search('^snap_size=', line): - local_snap_info['snap_size'] = line[len('snap_size='):] - elif re.search('^real_size=', line): - local_snap_info['real_size'] = line[len('real_size='):] - elif re.search('^pool_id=', line): - local_snap_info['pool_id'] = line[len('pool_id='):] - elif re.search('^delete_priority=', line): - local_snap_info['delete_priority'] = line[ - len('delete_priority='):] - elif re.search('^create_time=', line): - local_snap_info['create_time'] = line[len('create_time='):] - else: - LOG.error("Analyze key not exist, key=%s.", line) - - return local_snap_info - - def query_snap(self, snap_name): - tmp_snap_info = snap_info.copy() - cmd = '--op querySnapshot' + ' ' + '--snapName' + ' ' + snap_name - - exec_result = self.start_execute_cmd(cmd, 1) - if exec_result: - for line in exec_result: - if re.search('^result=', line): - if not re.search('^result=0', line): - tmp_snap_info['result'] = line[self.res_idx:] - return tmp_snap_info - for line in exec_result: - if re.search('^snap_name=' + snap_name, line): - tmp_snap_info = self.snap_info_analyze(line) - return tmp_snap_info - - tmp_snap_info['result'] = 1 - return tmp_snap_info - - def delete_snapshot(self, snap_name): - cmd = '--op deleteSnapshot' + ' ' + '--snapName' + ' ' + snap_name - - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - if re.search('^result=0', exec_result): - return 0 - else: - return exec_result[self.res_idx:] - else: - return 1 - - def pool_info_analyze(self, info): - local_pool_info = pool_info.copy() - - if not info: - local_pool_info['result'] = 1 - return local_pool_info - - local_pool_info['result'] = 0 - - pool_info_list = [] - pool_info_list = re.split(',', info) - for line in pool_info_list: - line = line.replace('\n', '') - if re.search('^pool_id=', line): - local_pool_info['pool_id'] = line[len('pool_id='):] - elif re.search('^total_capacity=', line): - local_pool_info['total_capacity'] = line[ - len('total_capacity='):] - elif re.search('^used_capacity=', line): - local_pool_info['used_capacity'] = line[len('used_capacity='):] - elif re.search('^alloc_capacity=', line): - local_pool_info['alloc_capacity'] = line[ - len('alloc_capacity='):] - else: - LOG.error("Analyze key not exist, key=%s.", line) - return local_pool_info - - def query_pool_info(self, pool_id): - tmp_pool_info = pool_info.copy() - cmd = '--op queryPoolInfo' + ' ' + '--poolId' + ' ' + six.text_type( - pool_id) - LOG.debug("Pool id is %s.", pool_id) - exec_result = self.start_execute_cmd(cmd, 1) - if exec_result: - for line in exec_result: - if re.search('^result=', line): - if not re.search('^result=0', line): - tmp_pool_info['result'] = line[self.res_idx:] - return tmp_pool_info - for line in exec_result: - if re.search('^pool_id=' + six.text_type(pool_id), - line): - tmp_pool_info = self.pool_info_analyze(line) - return tmp_pool_info - - tmp_pool_info['result'] = 1 - return tmp_pool_info - - def query_pool_type(self, pool_type): - pool_list = [] - tmp_pool_info = {} - result = 0 - cmd = '' - cmd = '--op queryPoolType --poolType' + ' ' + pool_type - LOG.debug("Query poolType: %s.", pool_type) - exec_result = self.start_execute_cmd(cmd, 1) - if exec_result: - for line in exec_result: - line = line.replace('\n', '') - if re.search('^result=', line): - if not re.search('^result=0', line): - result = int(line[self.res_idx:]) - break - for one_line in exec_result: - if re.search('^pool_id=', one_line): - tmp_pool_info = self.pool_info_analyze(one_line) - pool_list.append(tmp_pool_info) - break - return (result, pool_list) - - def query_dsware_version(self): - retcode = 2 - cmd = '--op getDSwareIdentifier' - exec_result = self.start_execute_cmd(cmd, 0) - if exec_result: - # New version. - if re.search('^result=0', exec_result): - retcode = 0 - # Old version. - elif re.search('^result=50500001', exec_result): - retcode = 1 - # Failed! - else: - retcode = exec_result[self.res_idx:] - return retcode diff --git a/cinder/volume/drivers/hgst.py b/cinder/volume/drivers/hgst.py deleted file mode 100644 index 26bbb81ba..000000000 --- a/cinder/volume/drivers/hgst.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright 2015 HGST -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Desc : Driver to store Cinder volumes using HGST Flash Storage Suite -Require : HGST Flash Storage Suite -Author : Earle F. Philhower, III -""" - -import grp -import json -import math -import os -import pwd -import six -import socket -import string - -from oslo_concurrency import lockutils -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils as volutils - -LOG = logging.getLogger(__name__) - -hgst_opts = [ - cfg.StrOpt('hgst_net', - default='Net 1 (IPv4)', - help='Space network name to use for data transfer'), - cfg.StrOpt('hgst_storage_servers', - default='os:gbd0', - help='Comma separated list of Space storage servers:devices. ' - 'ex: os1_stor:gbd0,os2_stor:gbd0'), - cfg.StrOpt('hgst_redundancy', - default='0', - help='Should spaces be redundantly stored (1/0)'), - cfg.StrOpt('hgst_space_user', - default='root', - help='User to own created spaces'), - cfg.StrOpt('hgst_space_group', - default='disk', - help='Group to own created spaces'), - cfg.StrOpt('hgst_space_mode', - default='0600', - help='UNIX mode for created spaces'), -] - - -CONF = cfg.CONF -CONF.register_opts(hgst_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class HGSTDriver(driver.VolumeDriver): - """This is the Class to set in cinder.conf (volume_driver). - - Implements a Cinder Volume driver which creates a HGST Space for each - Cinder Volume or Snapshot requested. Use the vgc-cluster CLI to do - all management operations. - - The Cinder host will nominally have all Spaces made visible to it, - while individual compute nodes will only have Spaces connected to KVM - instances connected. - """ - - VERSION = '1.0.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "HGST_Solutions_CI" - - VGCCLUSTER = 'vgc-cluster' - SPACEGB = units.G - 16 * units.M # Workaround for shrinkage Bug 28320 - BLOCKED = "BLOCKED" # Exit code when a command is blocked - - def __init__(self, *args, **kwargs): - """Initialize our protocol descriptor/etc.""" - super(HGSTDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(hgst_opts) - self._vgc_host = None - self.check_for_setup_error() - self._stats = {'driver_version': self.VERSION, - 'reserved_percentage': 0, - 'storage_protocol': 'hgst', - 'total_capacity_gb': 'unknown', - 'free_capacity_gb': 'unknown', - 'vendor_name': 'HGST', - } - backend_name = self.configuration.safe_get('volume_backend_name') - self._stats['volume_backend_name'] = backend_name or 'hgst' - self.update_volume_stats() - - def _log_cli_err(self, err): - """Dumps the full command output to a logfile in error cases.""" - LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\n" - "err: %(stderr)s", - {'cmd': err.cmd, 'code': err.exit_code, - 'stdout': err.stdout, 'stderr': err.stderr}) - - def _find_vgc_host(self): - """Finds vgc-cluster hostname for this box.""" - params = [self.VGCCLUSTER, "domain-list", "-1"] - try: - out, unused = self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Unable to get list of domain members, check that " - "the cluster is running.") - raise exception.VolumeDriverException(message=msg) - domain = out.splitlines() - params = ["ip", "addr", "list"] - try: - out, unused = self._execute(*params, run_as_root=False) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Unable to get list of IP addresses on this host, " - "check permissions and networking.") - raise exception.VolumeDriverException(message=msg) - nets = out.splitlines() - for host in domain: - try: - ip = socket.gethostbyname(host) - for l in nets: - x = l.strip() - if x.startswith("inet %s/" % ip): - return host - except socket.error: - pass - msg = _("Current host isn't part of HGST domain.") - raise exception.VolumeDriverException(message=msg) - - def _hostname(self): - """Returns hostname to use for cluster operations on this box.""" - if self._vgc_host is None: - self._vgc_host = self._find_vgc_host() - return self._vgc_host - - def _make_server_list(self): - """Converts a comma list into params for use by HGST CLI.""" - csv = self.configuration.safe_get('hgst_storage_servers') - servers = csv.split(",") - params = [] - for server in servers: - params.append('-S') - params.append(six.text_type(server)) - return params - - def _make_space_name(self, name): - """Generates the hashed name for the space from the name. - - This must be called in a locked context as there are race conditions - where 2 contexts could both pick what they think is an unallocated - space name, and fail later on due to that conflict. - """ - # Sanitize the name string - valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits) - name = ''.join(c for c in name if c in valid_chars) - name = name.strip(".") # Remove any leading .s from evil users - name = name or "space" # In case of all illegal chars, safe default - # Start out with just the name, truncated to 14 characters - outname = name[0:13] - # See what names already defined - params = [self.VGCCLUSTER, "space-list", "--name-only"] - try: - out, unused = self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Unable to get list of spaces to make new name. Please " - "verify the cluster is running.") - raise exception.VolumeDriverException(message=msg) - names = out.splitlines() - # And anything in /dev/* is also illegal - names += os.listdir("/dev") # Do it the Python way! - names += ['.', '..'] # Not included above - # While there's a conflict, add incrementing digits until it passes - itr = 0 - while outname in names: - itrstr = six.text_type(itr) - outname = outname[0:13 - len(itrstr)] + itrstr - itr += 1 - return outname - - def _get_space_size_redundancy(self, space_name): - """Parse space output to get allocated size and redundancy.""" - params = [self.VGCCLUSTER, "space-list", "-n", space_name, "--json"] - try: - out, unused = self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Unable to get information on space %(space)s, please " - "verify that the cluster is running and " - "connected.") % {'space': space_name} - raise exception.VolumeDriverException(message=msg) - ret = json.loads(out) - retval = {} - retval['redundancy'] = int(ret['resources'][0]['redundancy']) - retval['sizeBytes'] = int(ret['resources'][0]['sizeBytes']) - return retval - - def _adjust_size_g(self, size_g): - """Adjust space size to next legal value because of redundancy.""" - # Extending requires expanding to a multiple of the # of - # storage hosts in the cluster - count = len(self._make_server_list()) // 2 # Remove -s from count - if size_g % count: - size_g = int(size_g + count) - size_g -= size_g % count - return int(math.ceil(size_g)) - - def do_setup(self, context): - pass - - def _get_space_name(self, volume): - """Pull name of /dev/ from the provider_id.""" - try: - return volume.get('provider_id') - except Exception: - return '' # Some error during create, may be able to continue - - def _handle_blocked(self, err, msg): - """Safely handle a return code of BLOCKED from a cluster command. - - Handle the case where a command is in BLOCKED state by trying to - cancel it. If the cancel fails, then the command actually did - complete. If the cancel succeeds, then throw the original error - back up the stack. - """ - if (err.stdout is not None) and (self.BLOCKED in err.stdout): - # Command is queued but did not complete in X seconds, so - # we will cancel it to keep things sane. - request = err.stdout.split('\n', 1)[0].strip() - params = [self.VGCCLUSTER, 'request-cancel'] - params += ['-r', six.text_type(request)] - throw_err = False - try: - self._execute(*params, run_as_root=True) - # Cancel succeeded, the command was aborted - # Send initial exception up the stack - LOG.error("VGC-CLUSTER command blocked and cancelled.") - # Can't throw it here, the except below would catch it! - throw_err = True - except Exception: - # The cancel failed because the command was just completed. - # That means there was no failure, so continue with Cinder op - pass - if throw_err: - self._log_cli_err(err) - msg = _("Command %(cmd)s blocked in the CLI and was " - "cancelled") % {'cmd': six.text_type(err.cmd)} - raise exception.VolumeDriverException(message=msg) - else: - # Some other error, just throw it up the chain - self._log_cli_err(err) - raise exception.VolumeDriverException(message=msg) - - def _add_cinder_apphost(self, spacename): - """Add this host to the apphost list of a space.""" - # Connect to source volume - params = [self.VGCCLUSTER, 'space-set-apphosts'] - params += ['-n', spacename] - params += ['-A', self._hostname()] - params += ['--action', 'ADD'] # Non-error to add already existing - try: - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - msg = _("Unable to add Cinder host to apphosts for space " - "%(space)s") % {'space': spacename} - self._handle_blocked(err, msg) - - @lockutils.synchronized('devices', 'cinder-hgst-') - def create_volume(self, volume): - """API entry to create a volume on the cluster as a HGST space. - - Creates a volume, adjusting for GiB/GB sizing. Locked to ensure we - don't have race conditions on the name we pick to use for the space. - """ - # For ease of deugging, use friendly name if it exists - volname = self._make_space_name(volume['display_name'] - or volume['name']) - volnet = self.configuration.safe_get('hgst_net') - volbytes = volume['size'] * units.Gi # OS=Base2, but HGST=Base10 - volsize_gb_cinder = int(math.ceil(float(volbytes) / - float(self.SPACEGB))) - volsize_g = self._adjust_size_g(volsize_gb_cinder) - params = [self.VGCCLUSTER, 'space-create'] - params += ['-n', six.text_type(volname)] - params += ['-N', six.text_type(volnet)] - params += ['-s', six.text_type(volsize_g)] - params += ['--redundancy', six.text_type( - self.configuration.safe_get('hgst_redundancy'))] - params += ['--user', six.text_type( - self.configuration.safe_get('hgst_space_user'))] - params += ['--group', six.text_type( - self.configuration.safe_get('hgst_space_group'))] - params += ['--mode', six.text_type( - self.configuration.safe_get('hgst_space_mode'))] - params += self._make_server_list() - params += ['-A', self._hostname()] # Make it visible only here - try: - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - msg = _("Error in space-create for %(space)s of size " - "%(size)d GB") % {'space': volname, - 'size': int(volsize_g)} - self._handle_blocked(err, msg) - # Stash away the hashed name - provider = {} - provider['provider_id'] = volname - return provider - - def update_volume_stats(self): - """Parse the JSON output of vgc-cluster to find space available.""" - params = [self.VGCCLUSTER, "host-storage", "--json"] - try: - out, unused = self._execute(*params, run_as_root=True) - ret = json.loads(out) - cap = ret["totalCapacityBytes"] // units.Gi - used = ret["totalUsedBytes"] // units.Gi - avail = cap - used - if int(self.configuration.safe_get('hgst_redundancy')) == 1: - cap = cap // 2 - avail = avail // 2 - # Reduce both by 1 GB due to BZ 28320 - if cap > 0: - cap = cap - 1 - if avail > 0: - avail = avail - 1 - except processutils.ProcessExecutionError as err: - # Could be cluster still starting up, return unknown for now - LOG.warning("Unable to poll cluster free space.") - self._log_cli_err(err) - cap = 'unknown' - avail = 'unknown' - self._stats['free_capacity_gb'] = avail - self._stats['total_capacity_gb'] = cap - self._stats['reserved_percentage'] = 0 - - def get_volume_stats(self, refresh=False): - """Return Volume statistics, potentially cached copy.""" - if refresh: - self.update_volume_stats() - return self._stats - - def create_cloned_volume(self, volume, src_vref): - """Create a cloned volume from an existing one. - - No cloning operation in the current release so simply copy using - DD to a new space. This could be a lengthy operation. - """ - # Connect to source volume - volname = self._get_space_name(src_vref) - self._add_cinder_apphost(volname) - - # Make new volume - provider = self.create_volume(volume) - self._add_cinder_apphost(provider['provider_id']) - - # And copy original into it... - info = self._get_space_size_redundancy(volname) - volutils.copy_volume( - self.local_path(src_vref), - "/dev/" + provider['provider_id'], - info['sizeBytes'] // units.Mi, - self.configuration.volume_dd_blocksize, - execute=self._execute) - - # That's all, folks! - return provider - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - image_utils.fetch_to_raw(context, - image_service, - image_id, - self.local_path(volume), - self.configuration.volume_dd_blocksize, - size=volume['size']) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_utils.upload_volume(context, - image_service, - image_meta, - self.local_path(volume)) - - def delete_volume(self, volume): - """Delete a Volume's underlying space.""" - volname = self._get_space_name(volume) - if volname: - params = [self.VGCCLUSTER, 'space-delete'] - params += ['-n', six.text_type(volname)] - # This can fail benignly when we are deleting a snapshot - try: - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - LOG.warning("Unable to delete space %(space)s", - {'space': volname}) - self._log_cli_err(err) - else: - # This can be benign when we are deleting a snapshot - LOG.warning("Attempted to delete a space that's not there.") - - def _check_host_storage(self, server): - if ":" not in server: - msg = _("hgst_storage server %(svr)s not of format " - ":") % {'svr': server} - raise exception.VolumeDriverException(message=msg) - h, b = server.split(":") - try: - params = [self.VGCCLUSTER, 'host-storage', '-h', h] - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Storage host %(svr)s not detected, verify " - "name") % {'svr': six.text_type(server)} - raise exception.VolumeDriverException(message=msg) - - def check_for_setup_error(self): - """Throw an exception if configuration values/setup isn't okay.""" - # Verify vgc-cluster exists and is executable by cinder user - try: - params = [self.VGCCLUSTER, '--version'] - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("Cannot run vgc-cluster command, please ensure software " - "is installed and permissions are set properly.") - raise exception.VolumeDriverException(message=msg) - - # Checks the host is identified with the HGST domain, as well as - # that vgcnode and vgcclustermgr services are running. - self._vgc_host = None - self._hostname() - - # Redundancy better be 0 or 1, otherwise no comprendo - r = six.text_type(self.configuration.safe_get('hgst_redundancy')) - if r not in ["0", "1"]: - msg = _("hgst_redundancy must be set to 0 (non-HA) or 1 (HA) in " - "cinder.conf.") - raise exception.VolumeDriverException(message=msg) - - # Verify user and group exist or we can't connect volumes - try: - pwd.getpwnam(self.configuration.safe_get('hgst_space_user')) - grp.getgrnam(self.configuration.safe_get('hgst_space_group')) - except KeyError as err: - msg = _("hgst_group %(grp)s and hgst_user %(usr)s must map to " - "valid users/groups in cinder.conf") % { - 'grp': self.configuration.safe_get('hgst_space_group'), - 'usr': self.configuration.safe_get('hgst_space_user')} - raise exception.VolumeDriverException(message=msg) - - # Verify mode is a nicely formed octal or integer - try: - int(self.configuration.safe_get('hgst_space_mode')) - except Exception as err: - msg = _("hgst_space_mode must be an octal/int in cinder.conf") - raise exception.VolumeDriverException(message=msg) - - # Validate network maps to something we know about - try: - params = [self.VGCCLUSTER, 'network-list'] - params += ['-N', self.configuration.safe_get('hgst_net')] - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - self._log_cli_err(err) - msg = _("hgst_net %(net)s specified in cinder.conf not found " - "in cluster") % { - 'net': self.configuration.safe_get('hgst_net')} - raise exception.VolumeDriverException(message=msg) - - # Storage servers require us to split them up and check for - sl = self.configuration.safe_get('hgst_storage_servers') - if (sl is None) or (six.text_type(sl) == ""): - msg = _("hgst_storage_servers must be defined in cinder.conf") - raise exception.VolumeDriverException(message=msg) - servers = sl.split(",") - # Each server must be of the format : w/host in domain - for server in servers: - self._check_host_storage(server) - - # We made it here, we should be good to go! - return True - - def create_snapshot(self, snapshot): - """Create a snapshot volume. - - We don't yet support snaps in SW so make a new volume and dd the - source one into it. This could be a lengthy operation. - """ - origvol = {} - origvol['name'] = snapshot['volume_name'] - origvol['size'] = snapshot['volume_size'] - origvol['id'] = snapshot['volume_id'] - origvol['provider_id'] = snapshot.get('volume').get('provider_id') - # Add me to the apphosts so I can see the volume - self._add_cinder_apphost(self._get_space_name(origvol)) - - # Make snapshot volume - snapvol = {} - snapvol['display_name'] = snapshot['display_name'] - snapvol['name'] = snapshot['name'] - snapvol['size'] = snapshot['volume_size'] - snapvol['id'] = snapshot['id'] - provider = self.create_volume(snapvol) - # Create_volume attaches the volume to this host, ready to snapshot. - # Copy it using dd for now, we don't have real snapshots - # We need to copy the entire allocated volume space, Nova will allow - # full access, even beyond requested size (when our volume is larger - # due to our ~1B byte alignment or cluster makeup) - info = self._get_space_size_redundancy(origvol['provider_id']) - volutils.copy_volume( - self.local_path(origvol), - "/dev/" + provider['provider_id'], - info['sizeBytes'] // units.Mi, - self.configuration.volume_dd_blocksize, - execute=self._execute) - return provider - - def delete_snapshot(self, snapshot): - """Delete a snapshot. For now, snapshots are full volumes.""" - self.delete_volume(snapshot) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from a snapshot, but snaps still full volumes.""" - return self.create_cloned_volume(volume, snapshot) - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - We may not actually need to resize the space because it's size is - always rounded up to a function of the GiB/GB and number of storage - nodes. - """ - volname = self._get_space_name(volume) - info = self._get_space_size_redundancy(volname) - volnewbytes = new_size * units.Gi - new_size_g = math.ceil(float(volnewbytes) / float(self.SPACEGB)) - wantedsize_g = self._adjust_size_g(new_size_g) - havesize_g = (info['sizeBytes'] // self.SPACEGB) - if havesize_g >= wantedsize_g: - return # Already big enough, happens with redundancy - else: - # Have to extend it - delta = int(wantedsize_g - havesize_g) - params = [self.VGCCLUSTER, 'space-extend'] - params += ['-n', six.text_type(volname)] - params += ['-s', six.text_type(delta)] - params += self._make_server_list() - try: - self._execute(*params, run_as_root=True) - except processutils.ProcessExecutionError as err: - msg = _("Error in space-extend for volume %(space)s with " - "%(size)d additional GB") % {'space': volname, - 'size': delta} - self._handle_blocked(err, msg) - - def initialize_connection(self, volume, connector): - """Return connection information. - - Need to return noremovehost so that the Nova host - doesn't accidentally remove us from the apphost list if it is - running on the same host (like in devstack testing). - """ - hgst_properties = {'name': volume['provider_id'], - 'noremovehost': self._hostname()} - return {'driver_volume_type': 'hgst', - 'data': hgst_properties} - - def local_path(self, volume): - """Query the provider_id to figure out the proper devnode.""" - return "/dev/" + self._get_space_name(volume) - - def create_export(self, context, volume, connector): - # Not needed for spaces - pass - - def remove_export(self, context, volume): - # Not needed for spaces - pass - - def terminate_connection(self, volume, connector, **kwargs): - # Not needed for spaces - pass - - def ensure_export(self, context, volume): - # Not needed for spaces - pass diff --git a/cinder/volume/drivers/hitachi/__init__.py b/cinder/volume/drivers/hitachi/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/hitachi/hbsd_basiclib.py b/cinder/volume/drivers/hitachi/hbsd_basiclib.py deleted file mode 100644 index 9b22e2173..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_basiclib.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os -import shlex - -from oslo_concurrency import lockutils -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils - -SMPL = 1 -COPY = 2 -PAIR = 3 -PSUS = 4 -PSUE = 5 -UNKN = 0xff - -FULL = 'Full copy' -THIN = 'Thin copy' - -DEFAULT_TRY_RANGE = range(3) -MAX_PROCESS_WAITTIME = 86400 -DEFAULT_PROCESS_WAITTIME = 900 - -GETSTORAGEARRAY_ONCE = 100 - -WARNING_ID = 300 - -DEFAULT_GROUP_RANGE = [0, 65535] - -NAME_PREFIX = 'HBSD-' - -NORMAL_VOLUME_TYPE = 'Normal' - -LOCK_DIR = '/var/lock/hbsd/' - -LOG = logging.getLogger(__name__) - -HBSD_INFO_MSG = { - 1: _('The parameter of the storage backend. ' - '(config_group: %(config_group)s)'), - 3: _('The storage backend can be used. (config_group: %(config_group)s)'), - 4: _('The volume %(volume_id)s is managed successfully. (LDEV: %(ldev)s)'), - 5: _('The volume %(volume_id)s is unmanaged successfully. ' - '(LDEV: %(ldev)s)'), -} - -HBSD_WARN_MSG = { - 301: _('A LUN (HLUN) was not found. (LDEV: %(ldev)s)'), - 302: _('Failed to specify a logical device for the volume ' - '%(volume_id)s to be unmapped.'), - 303: _('An iSCSI CHAP user could not be deleted. (username: %(user)s)'), - 304: _('Failed to specify a logical device to be deleted. ' - '(method: %(method)s, id: %(id)s)'), - 305: _('The logical device for specified %(type)s %(id)s ' - 'was already deleted.'), - 306: _('A host group could not be deleted. (port: %(port)s, ' - 'gid: %(gid)s, name: %(name)s)'), - 307: _('An iSCSI target could not be deleted. (port: %(port)s, ' - 'tno: %(tno)s, alias: %(alias)s)'), - 308: _('A host group could not be added. (port: %(port)s, ' - 'name: %(name)s)'), - 309: _('An iSCSI target could not be added. ' - '(port: %(port)s, alias: %(alias)s, reason: %(reason)s)'), - 310: _('Failed to unmap a logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), - 311: _('A free LUN (HLUN) was not found. Add a different host' - ' group. (LDEV: %(ldev)s)'), - 312: _('Failed to get a storage resource. The system will attempt ' - 'to get the storage resource again. (resource: %(resource)s)'), - 313: _('Failed to delete a logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)'), - 314: _('Failed to map a logical device. (LDEV: %(ldev)s, LUN: %(lun)s, ' - 'port: %(port)s, id: %(id)s)'), - 315: _('Failed to perform a zero-page reclamation. ' - '(LDEV: %(ldev)s, reason: %(reason)s)'), - 316: _('Failed to assign the iSCSI initiator IQN. (port: %(port)s, ' - 'reason: %(reason)s)'), -} - -HBSD_ERR_MSG = { - 600: _('The command %(cmd)s failed. (ret: %(ret)s, stdout: %(out)s, ' - 'stderr: %(err)s)'), - 601: _('A parameter is invalid. (%(param)s)'), - 602: _('A parameter value is invalid. (%(meta)s)'), - 603: _('Failed to acquire a resource lock. (serial: %(serial)s, ' - 'inst: %(inst)s, ret: %(ret)s, stderr: %(err)s)'), - 604: _('Cannot set both hitachi_serial_number and hitachi_unit_name.'), - 605: _('Either hitachi_serial_number or hitachi_unit_name is required.'), - 615: _('A pair could not be created. The maximum number of pair is ' - 'exceeded. (copy method: %(copy_method)s, P-VOL: %(pvol)s)'), - 616: _('A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: %(svol)s)'), - 617: _('The specified operation is not supported. The volume size ' - 'must be the same as the source %(type)s. (volume: %(volume_id)s)'), - 618: _('The volume %(volume_id)s could not be extended. ' - 'The volume type must be Normal.'), - 619: _('The volume %(volume_id)s to be mapped was not found.'), - 624: _('The %(type)s %(id)s source to be replicated was not found.'), - 631: _('Failed to create a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)'), - 632: _('Failed to open a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)'), - 633: _('%(file)s: Permission denied.'), - 636: _('Failed to add the logical device.'), - 637: _('The method %(method)s is timed out. (timeout value: %(timeout)s)'), - 640: _('A pool could not be found. (pool id: %(pool_id)s)'), - 641: _('The host group or iSCSI target could not be added.'), - 642: _('An iSCSI CHAP user could not be added. (username: %(user)s)'), - 643: _('The iSCSI CHAP user %(user)s does not exist.'), - 648: _('There are no resources available for use. ' - '(resource: %(resource)s)'), - 649: _('The host group or iSCSI target was not found.'), - 650: _('The resource %(resource)s was not found.'), - 651: _('The IP Address was not found.'), - 653: _('The creation of a logical device could not be ' - 'completed. (LDEV: %(ldev)s)'), - 654: _('A volume status is invalid. (status: %(status)s)'), - 655: _('A snapshot status is invalid. (status: %(status)s)'), - 659: _('A host group is invalid. (host group: %(gid)s)'), - 660: _('The specified %(desc)s is busy.'), - 700: _('There is no designation of the %(param)s. ' - 'The specified storage is essential to manage the volume.'), - 701: _('There is no designation of the ldev. ' - 'The specified ldev is essential to manage the volume.'), - 702: _('The specified ldev %(ldev)s could not be managed. ' - 'The volume type must be DP-VOL.'), - 703: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev size must be in multiples of gigabyte.'), - 704: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev must not be mapping.'), - 705: _('The specified ldev %(ldev)s could not be managed. ' - 'The ldev must not be paired.'), - 706: _('The volume %(volume_id)s could not be unmanaged. ' - 'The volume type must be %(volume_type)s.'), -} - - -def set_msg(msg_id, **kwargs): - if msg_id < WARNING_ID: - msg_header = 'MSGID%04d-I:' % msg_id - msg_body = HBSD_INFO_MSG.get(msg_id) - else: - msg_header = 'MSGID%04d-W:' % msg_id - msg_body = HBSD_WARN_MSG.get(msg_id) - - return '%(header)s %(body)s' % {'header': msg_header, - 'body': msg_body % kwargs} - - -def output_err(msg_id, **kwargs): - msg = HBSD_ERR_MSG.get(msg_id) % kwargs - - LOG.error("MSGID%(id)04d-E: %(msg)s", {'id': msg_id, 'msg': msg}) - - return msg - - -def get_process_lock(file): - if not os.access(file, os.W_OK): - msg = output_err(633, file=file) - raise exception.HBSDError(message=msg) - return lockutils.InterProcessLock(file) - - -def create_empty_file(filename): - if not os.path.exists(filename): - try: - utils.execute('touch', filename) - except putils.ProcessExecutionError as ex: - msg = output_err( - 631, file=filename, ret=ex.exit_code, err=ex.stderr) - raise exception.HBSDError(message=msg) - - -class FileLock(lockutils.InterProcessLock): - - def __init__(self, name, lock_object): - self.lock_object = lock_object - - super(FileLock, self).__init__(name) - - def __enter__(self): - self.lock_object.acquire() - - try: - ret = super(FileLock, self).__enter__() - except Exception: - with excutils.save_and_reraise_exception(): - self.lock_object.release() - - return ret - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - super(FileLock, self).__exit__(exc_type, exc_val, exc_tb) - finally: - self.lock_object.release() - - -class NopLock(object): - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class HBSDBasicLib(object): - - def __init__(self, conf=None): - self.conf = conf - - def exec_command(self, cmd, args=None, printflag=True): - if printflag: - if args: - LOG.debug('cmd: %(cmd)s, args: %(args)s', - {'cmd': cmd, 'args': args}) - else: - LOG.debug('cmd: %s', cmd) - - cmd = [cmd] - - if args: - if six.PY2 and isinstance(args, six.text_type): - cmd += shlex.split(args.encode()) - else: - cmd += shlex.split(args) - - try: - stdout, stderr = utils.execute(*cmd, run_as_root=True) - ret = 0 - except putils.ProcessExecutionError as e: - ret = e.exit_code - stdout = e.stdout - stderr = e.stderr - - LOG.debug('cmd: %s', cmd) - LOG.debug('from: %s', inspect.stack()[2]) - LOG.debug('ret: %d', ret) - LOG.debug('stdout: %s', stdout.replace(os.linesep, ' ')) - LOG.debug('stderr: %s', stderr.replace(os.linesep, ' ')) - - return ret, stdout, stderr - - def set_pair_flock(self): - return NopLock() - - def set_horcmgr_flock(self): - return NopLock() - - def discard_zero_page(self, ldev): - pass - - def output_param_to_log(self, conf): - pass - - def connect_storage(self): - pass - - def get_max_hostgroups(self): - pass - - def restart_pair_horcm(self): - pass diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py deleted file mode 100644 index 66b4692d3..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ /dev/null @@ -1,835 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Common class for Hitachi storage drivers. - -""" - -import re -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_horcm as horcm -from cinder.volume.drivers.hitachi import hbsd_snm2 as snm2 -from cinder.volume import utils as volume_utils - -""" -Version history: - 1.0.0 - Initial driver - 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods -""" -VERSION = '1.1.0' - -PARAM_RANGE = { - 'hitachi_copy_check_interval': {'min': 1, 'max': 600}, - 'hitachi_async_copy_check_interval': {'min': 1, 'max': 600}, - 'hitachi_copy_speed': {'min': 1, 'max': 15}, -} - -DEFAULT_LDEV_RANGE = [0, 65535] - -COPY_METHOD = ('FULL', 'THIN') -VALID_DP_VOLUME_STATUS = ['available', 'in-use'] -VALID_V_VOLUME_STATUS = ['available'] -SYSTEM_LOCK_FILE = basic_lib.LOCK_DIR + 'system' -SERVICE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'service_' -STORAGE_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'storage_' - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('hitachi_serial_number', - help='Serial number of storage system'), - cfg.StrOpt('hitachi_unit_name', - help='Name of an array unit'), - cfg.IntOpt('hitachi_pool_id', - help='Pool ID of storage system'), - cfg.IntOpt('hitachi_thin_pool_id', - help='Thin pool ID of storage system'), - cfg.StrOpt('hitachi_ldev_range', - help='Range of logical device of storage system'), - cfg.StrOpt('hitachi_default_copy_method', - default='FULL', - help='Default copy method of storage system'), - cfg.IntOpt('hitachi_copy_speed', - default=3, - help='Copy speed of storage system'), - cfg.IntOpt('hitachi_copy_check_interval', - default=3, - help='Interval to check copy'), - cfg.IntOpt('hitachi_async_copy_check_interval', - default=10, - help='Interval to check copy asynchronously'), - cfg.StrOpt('hitachi_target_ports', - help='Control port names for HostGroup or iSCSI Target'), - cfg.StrOpt('hitachi_group_range', - help='Range of group number'), - cfg.BoolOpt('hitachi_group_request', - default=False, - secret=True, - help='Request for creating HostGroup or iSCSI Target'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -class TryLock(object): - - def __init__(self): - self.lock = threading.RLock() - self.desc = None - - def set_desc(self, description): - self.desc = description - - def __enter__(self): - if not self.lock.acquire(False): - msg = basic_lib.output_err(660, desc=self.desc) - raise exception.HBSDError(message=msg) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.lock.release() - - -class HBSDCommon(object): - - def __init__(self, conf, parent, context, db): - self.configuration = conf - self.generated_from = parent - self.context = context - self.db = db - - self.system_lock_file = SYSTEM_LOCK_FILE - self.service_lock_file = '%s%s' % (SERVICE_LOCK_PATH_BASE, - conf.config_group) - if conf.hitachi_serial_number: - self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, - six.text_type( - conf.hitachi_serial_number)) - elif conf.hitachi_unit_name: - self.storage_lock_file = '%s%s' % (STORAGE_LOCK_PATH_BASE, - six.text_type( - conf.hitachi_unit_name)) - - self.storage_obj_lock = threading.Lock() - self.volinfo_lock = threading.Lock() - self.volume_info = {} - self.output_first = True - - def get_volume(self, volume_id): - return self.db.volume_get(self.context, volume_id) - - def get_volume_metadata(self, volume_id): - return self.db.volume_metadata_get(self.context, volume_id) - - def get_snapshot_metadata(self, snapshot_id): - return self.db.snapshot_metadata_get(self.context, snapshot_id) - - def _update_volume_metadata(self, volume_id, volume_metadata): - self.db.volume_metadata_update(self.context, volume_id, - volume_metadata, False) - - def get_ldev(self, obj): - if not obj: - return None - - ldev = obj.get('provider_location') - if not ldev or not ldev.isdigit(): - return None - else: - return int(ldev) - - def get_value(self, obj, name, key): - if not obj: - return None - - if obj.get(name): - if isinstance(obj[name], dict): - return obj[name].get(key) - else: - for i in obj[name]: - if i['key'] == key: - return i['value'] - return None - - def get_is_vvol(self, obj, name): - return self.get_value(obj, name, 'type') == 'V-VOL' - - def get_volume_is_vvol(self, volume): - return self.get_is_vvol(volume, 'volume_metadata') - - def get_snapshot_is_vvol(self, snapshot): - return self.get_is_vvol(snapshot, 'metadata') - - def get_copy_method(self, volume): - method = self.get_value(volume, 'volume_metadata', 'copy_method') - if method: - if method not in COPY_METHOD: - msg = basic_lib.output_err(602, meta='copy_method') - raise exception.HBSDError(message=msg) - elif (method == 'THIN' - and self.configuration.hitachi_thin_pool_id is None): - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - else: - method = self.configuration.hitachi_default_copy_method - return method - - def _string2int(self, num): - if not num: - return None - if num.isdigit(): - return int(num, 10) - if not re.match(r'\w\w:\w\w:\w\w', num): - return None - - try: - num = int(num.replace(':', ''), 16) - except ValueError: - return None - - return num - - def _range2list(self, conf, param): - str = getattr(conf, param) - lists = str.split('-') - if len(lists) != 2: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - - first_type = None - for i in range(len(lists)): - if lists[i].isdigit(): - lists[i] = int(lists[i], 10) - if first_type == 'hex': - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - first_type = 'dig' - else: - if (first_type == 'dig' - or not re.match(r'\w\w:\w\w:\w\w', lists[i])): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - try: - lists[i] = int(lists[i].replace(':', ''), 16) - first_type = 'hex' - except Exception: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - if lists[0] > lists[1]: - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - return lists - - def output_param_to_log(self, storage_protocol): - essential_inherited_param = ['volume_backend_name', 'volume_driver'] - conf = self.configuration - - LOG.info(basic_lib.set_msg(1, config_group=conf.config_group)) - version = self.command.get_comm_version() - if conf.hitachi_unit_name: - prefix = 'HSNM2 version' - else: - prefix = 'RAID Manager version' - LOG.info('\t%(prefix)-35s : %(version)s', - {'prefix': prefix, 'version': version}) - for param in essential_inherited_param: - value = conf.safe_get(param) - LOG.info('\t%(param)-35s : %(value)s', - {'param': param, 'value': value}) - for opt in volume_opts: - if not opt.secret: - value = getattr(conf, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - if storage_protocol == 'iSCSI': - value = getattr(conf, 'hitachi_group_request') - LOG.info('\t%(request)-35s : %(value)s', - {'request': 'hitachi_group_request', 'value': value}) - - def check_param(self): - conf = self.configuration - - if conf.hitachi_unit_name and conf.hitachi_serial_number: - msg = basic_lib.output_err(604) - raise exception.HBSDError(message=msg) - - if not conf.hitachi_unit_name and not conf.hitachi_serial_number: - msg = basic_lib.output_err(605) - raise exception.HBSDError(message=msg) - - if conf.hitachi_pool_id is None: - msg = basic_lib.output_err(601, param='hitachi_pool_id') - raise exception.HBSDError(message=msg) - - for param in PARAM_RANGE.keys(): - _value = getattr(conf, param) - if (_value and - (not PARAM_RANGE[param]['min'] <= _value <= - PARAM_RANGE[param]['max'])): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - - if conf.hitachi_default_copy_method not in COPY_METHOD: - msg = basic_lib.output_err(601, - param='hitachi_default_copy_method') - raise exception.HBSDError(message=msg) - - if (conf.hitachi_default_copy_method == 'THIN' - and conf.hitachi_thin_pool_id is None): - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - - for param in ('hitachi_ldev_range', 'hitachi_group_range'): - if not getattr(conf, param): - continue - else: - _value = self._range2list(conf, param) - setattr(conf, param, _value) - - if conf.hitachi_target_ports: - conf.hitachi_target_ports = conf.hitachi_target_ports.split(',') - - for opt in volume_opts: - getattr(conf, opt.name) - - if conf.hitachi_unit_name: - self.command = snm2.HBSDSNM2(conf) - else: - conf.append_config_values(horcm.volume_opts) - self.command = horcm.HBSDHORCM(conf) - self.command.check_param() - self.pair_flock = self.command.set_pair_flock() - self.horcmgr_flock = self.command.set_horcmgr_flock() - - def create_lock_file(self): - basic_lib.create_empty_file(self.system_lock_file) - basic_lib.create_empty_file(self.service_lock_file) - basic_lib.create_empty_file(self.storage_lock_file) - self.command.create_lock_file() - - def _add_ldev(self, volume_num, capacity, pool_id, is_vvol): - self.command.comm_add_ldev(pool_id, volume_num, capacity, is_vvol) - - def _get_unused_volume_num(self, ldev_range): - return self.command.get_unused_ldev(ldev_range) - - def add_volinfo(self, ldev, id=None, type='volume'): - with self.volinfo_lock: - if ldev not in self.volume_info: - self.init_volinfo(self.volume_info, ldev) - if id: - desc = '%s %s' % (type, id) - self.volume_info[ldev]['in_use'].set_desc(desc) - - def delete_pair(self, ldev, all_split=True, is_vvol=None): - paired_info = self.command.get_paired_info(ldev) - LOG.debug('paired_info: %s', paired_info) - pvol = paired_info['pvol'] - svols = paired_info['svol'] - driver = self.generated_from - restart = False - svol_list = [] - try: - if pvol is None: - return - elif pvol == ldev: - for svol in svols[:]: - if svol['is_vvol'] or svol['status'] != basic_lib.PSUS: - continue - - self.command.delete_pair(pvol, svol['lun'], False) - restart = True - driver.pair_terminate_connection(svol['lun']) - svols.remove(svol) - - if all_split and svols: - svol_list.append(six.text_type(svols[0]['lun'])) - for svol in svols[1:]: - svol_list.append(', %d' % svol['lun']) - - msg = basic_lib.output_err(616, pvol=pvol, - svol=''.join(svol_list)) - raise exception.HBSDBusy(message=msg) - - if not svols: - driver.pair_terminate_connection(pvol) - - else: - self.add_volinfo(pvol) - if not self.volume_info[pvol]['in_use'].lock.acquire(False): - desc = self.volume_info[pvol]['in_use'].desc - msg = basic_lib.output_err(660, desc=desc) - raise exception.HBSDBusy(message=msg) - try: - paired_info = self.command.get_paired_info(ldev) - if paired_info['pvol'] is None: - return - svol = paired_info['svol'][0] - if svol['status'] != basic_lib.PSUS: - msg = basic_lib.output_err(616, pvol=pvol, svol=ldev) - raise exception.HBSDBusy(message=msg) - - self.command.delete_pair(pvol, ldev, svol['is_vvol']) - if not svol['is_vvol']: - restart = True - driver.pair_terminate_connection(ldev) - paired_info = self.command.get_paired_info(pvol) - if paired_info['pvol'] is None: - driver.pair_terminate_connection(pvol) - finally: - self.volume_info[pvol]['in_use'].lock.release() - except Exception: - with excutils.save_and_reraise_exception(): - if restart: - try: - self.command.restart_pair_horcm() - except Exception as e: - LOG.warning('Failed to restart horcm: %s', e) - else: - if (all_split or is_vvol) and restart: - try: - self.command.restart_pair_horcm() - except Exception as e: - LOG.warning('Failed to restart horcm: %s', e) - - def copy_async_data(self, pvol, svol, is_vvol): - path_list = [] - driver = self.generated_from - try: - with self.pair_flock: - self.delete_pair(pvol, all_split=False, is_vvol=is_vvol) - paired_info = self.command.get_paired_info(pvol) - if paired_info['pvol'] is None: - driver.pair_initialize_connection(pvol) - path_list.append(pvol) - driver.pair_initialize_connection(svol) - path_list.append(svol) - self.command.comm_create_pair(pvol, svol, is_vvol) - except Exception: - with excutils.save_and_reraise_exception(): - for ldev in path_list: - try: - driver.pair_terminate_connection(ldev) - except Exception as ex: - LOG.warning(basic_lib.set_msg(310, ldev=ldev, - reason=ex)) - - def copy_sync_data(self, src_ldev, dest_ldev, size): - src_vol = {'provider_location': six.text_type(src_ldev), - 'id': 'src_vol'} - dest_vol = {'provider_location': six.text_type(dest_ldev), - 'id': 'dest_vol'} - properties = utils.brick_get_connector_properties() - driver = self.generated_from - src_info = None - dest_info = None - try: - dest_info = driver._attach_volume(self.context, dest_vol, - properties) - src_info = driver._attach_volume(self.context, src_vol, - properties) - volume_utils.copy_volume(src_info['device']['path'], - dest_info['device']['path'], size * 1024, - self.configuration.volume_dd_blocksize) - finally: - if dest_info: - driver._detach_volume(self.context, dest_info, - dest_vol, properties) - if src_info: - driver._detach_volume(self.context, src_info, - src_vol, properties) - self.command.discard_zero_page(dest_ldev) - - def copy_data(self, pvol, size, p_is_vvol, method): - type = 'Normal' - is_vvol = method == 'THIN' - svol = self._create_volume(size, is_vvol=is_vvol) - try: - if p_is_vvol: - self.copy_sync_data(pvol, svol, size) - else: - if is_vvol: - type = 'V-VOL' - self.copy_async_data(pvol, svol, is_vvol) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.delete_ldev(svol, is_vvol) - except Exception as ex: - LOG.warning(basic_lib.set_msg(313, ldev=svol, - reason=ex)) - - return six.text_type(svol), type - - def add_lun(self, command, hostgroups, ldev, is_once=False): - lock = basic_lib.get_process_lock(self.storage_lock_file) - with lock: - self.command.comm_add_lun(command, hostgroups, ldev, is_once) - - def create_ldev(self, size, ldev_range, pool_id, is_vvol): - LOG.debug('create start (normal)') - for i in basic_lib.DEFAULT_TRY_RANGE: - LOG.debug('Try number: %(tries)s / %(max_tries)s', - {'tries': i + 1, - 'max_tries': len(basic_lib.DEFAULT_TRY_RANGE)}) - new_ldev = self._get_unused_volume_num(ldev_range) - try: - self._add_ldev(new_ldev, size, pool_id, is_vvol) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(312, resource='LDEV')) - continue - else: - break - else: - msg = basic_lib.output_err(636) - raise exception.HBSDError(message=msg) - LOG.debug('create end (normal: %s)', new_ldev) - self.init_volinfo(self.volume_info, new_ldev) - return new_ldev - - def _create_volume(self, size, is_vvol=False): - ldev_range = self.configuration.hitachi_ldev_range - if not ldev_range: - ldev_range = DEFAULT_LDEV_RANGE - pool_id = self.configuration.hitachi_pool_id - - lock = basic_lib.get_process_lock(self.storage_lock_file) - with self.storage_obj_lock, lock: - ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol) - return ldev - - def create_volume(self, volume): - volume_metadata = self.get_volume_metadata(volume['id']) - volume_metadata['type'] = 'Normal' - - size = volume['size'] - ldev = self._create_volume(size) - volume_metadata['ldev'] = six.text_type(ldev) - - return {'provider_location': six.text_type(ldev), - 'metadata': volume_metadata} - - def delete_ldev(self, ldev, is_vvol): - LOG.debug('Call delete_ldev (LDEV: %(ldev)d is_vvol: %(vvol)s)', - {'ldev': ldev, 'vvol': is_vvol}) - with self.pair_flock: - self.delete_pair(ldev) - self.command.comm_delete_ldev(ldev, is_vvol) - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.debug('delete_ldev is finished ' - '(LDEV: %(ldev)d, is_vvol: %(vvol)s)', - {'ldev': ldev, 'vvol': is_vvol}) - - def delete_volume(self, volume): - ldev = self.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(304, method='delete_volume', - id=volume['id'])) - return - self.add_volinfo(ldev, volume['id']) - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.VolumeIsBusy(volume_name=volume['name']) - try: - is_vvol = self.get_volume_is_vvol(volume) - try: - self.delete_ldev(ldev, is_vvol) - except exception.HBSDNotFound: - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.warning(basic_lib.set_msg( - 305, type='volume', id=volume['id'])) - except exception.HBSDBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() - - def check_volume_status(self, volume, is_vvol): - if not is_vvol: - status = VALID_DP_VOLUME_STATUS - else: - status = VALID_V_VOLUME_STATUS - if volume['status'] not in status: - msg = basic_lib.output_err(654, status=volume['status']) - raise exception.HBSDError(message=msg) - - def create_snapshot(self, snapshot): - src_ref = self.get_volume(snapshot['volume_id']) - pvol = self.get_ldev(src_ref) - if pvol is None: - msg = basic_lib.output_err(624, type='volume', id=src_ref['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, src_ref['id']) - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_volume_is_vvol(src_ref) - self.check_volume_status(src_ref, is_vvol) - size = snapshot['volume_size'] - snap_metadata = snapshot.get('metadata') - method = None if is_vvol else self.get_copy_method(src_ref) - - svol, type = self.copy_data(pvol, size, is_vvol, method) - - if type == 'V-VOL': - snap_metadata['type'] = type - snap_metadata['ldev'] = svol - - return {'provider_location': svol, - 'metadata': snap_metadata} - - def delete_snapshot(self, snapshot): - ldev = self.get_ldev(snapshot) - if ldev is None: - LOG.warning(basic_lib.set_msg( - 304, method='delete_snapshot', id=snapshot['id'])) - return - self.add_volinfo(ldev, id=snapshot['id'], type='snapshot') - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - try: - is_vvol = self.get_snapshot_is_vvol(snapshot) - try: - self.delete_ldev(ldev, is_vvol) - except exception.HBSDNotFound: - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - LOG.warning(basic_lib.set_msg( - 305, type='snapshot', id=snapshot['id'])) - except exception.HBSDBusy: - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() - - def create_cloned_volume(self, volume, src_vref): - pvol = self.get_ldev(src_vref) - if pvol is None: - msg = basic_lib.output_err(624, type='volume', id=src_vref['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, src_vref['id']) - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_volume_is_vvol(src_vref) - self.check_volume_status(self.get_volume(src_vref['id']), is_vvol) - size = volume['size'] - src_size = src_vref['size'] - if size < src_size: - msg = basic_lib.output_err(617, type='volume', - volume_id=volume['id']) - raise exception.HBSDError(message=msg) - - metadata = self.get_volume_metadata(volume['id']) - method = None if is_vvol else self.get_copy_method(volume) - - svol, type = self.copy_data(pvol, src_size, is_vvol, method) - - if size > src_size: - self.extend_volume(volume, size) - - metadata['type'] = type - metadata['volume'] = src_vref['id'] - metadata['ldev'] = svol - - return {'provider_location': svol, 'metadata': metadata} - - def create_volume_from_snapshot(self, volume, snapshot): - pvol = self.get_ldev(snapshot) - if pvol is None: - msg = basic_lib.output_err(624, type='snapshot', id=snapshot['id']) - raise exception.HBSDError(message=msg) - - self.add_volinfo(pvol, id=snapshot['id'], type='snapshot') - with self.volume_info[pvol]['in_use']: - is_vvol = self.get_snapshot_is_vvol(snapshot) - if snapshot['status'] != 'available': - msg = basic_lib.output_err(655, status=snapshot['status']) - raise exception.HBSDError(message=msg) - - size = volume['size'] - src_size = snapshot['volume_size'] - if size != src_size: - msg = basic_lib.output_err(617, type='snapshot', - volume_id=volume['id']) - raise exception.HBSDError(message=msg) - - metadata = self.get_volume_metadata(volume['id']) - method = None if is_vvol else self.get_copy_method(volume) - svol, type = self.copy_data(pvol, size, is_vvol, method) - - metadata['type'] = type - metadata['snapshot'] = snapshot['id'] - metadata['ldev'] = svol - - return {'provider_location': svol, 'metadata': metadata} - - def _extend_volume(self, ldev, old_size, new_size): - with self.pair_flock: - self.delete_pair(ldev) - self.command.comm_extend_ldev(ldev, old_size, new_size) - - def extend_volume(self, volume, new_size): - pvol = self.get_ldev(volume) - self.add_volinfo(pvol, volume['id']) - with self.volume_info[pvol]['in_use']: - if self.get_volume_is_vvol(volume): - msg = basic_lib.output_err(618, volume_id=volume['id']) - raise exception.HBSDError(message=msg) - self._extend_volume(pvol, volume['size'], new_size) - - def output_backend_available_once(self): - if self.output_first: - self.output_first = False - LOG.warning(basic_lib.set_msg( - 3, config_group=self.configuration.config_group)) - - def update_volume_stats(self, storage_protocol): - data = {} - total_gb = None - free_gb = None - data['volume_backend_name'] = self.configuration.safe_get( - 'volume_backend_name') or 'HBSD%s' % storage_protocol - data['vendor_name'] = 'Hitachi' - data['driver_version'] = VERSION - data['storage_protocol'] = storage_protocol - - try: - total_gb, free_gb = self.command.comm_get_dp_pool( - self.configuration.hitachi_pool_id) - except Exception as ex: - LOG.error('Failed to update volume status: %s', ex) - return None - - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - data['reserved_percentage'] = self.configuration.safe_get( - 'reserved_percentage') - data['QoS_support'] = False - - LOG.debug('Updating volume status (%s)', data) - - return data - - def init_volinfo(self, vol_info, ldev): - vol_info[ldev] = {'in_use': TryLock(), 'lock': threading.Lock()} - - def manage_existing(self, volume, existing_ref): - """Manage an existing Hitachi storage volume. - - existing_ref is a dictionary of the form: - - For HUS 100 Family: - - .. code-block:: default - - { - 'ldev': , - 'unit_name': - } - - For VSP G1000/VSP/HUS VM: - - .. code-block:: default - - { - 'ldev': , - 'serial_number': - } - - """ - - ldev = self._string2int(existing_ref.get('ldev')) - - LOG.info(basic_lib.set_msg(4, volume_id=volume['id'], ldev=ldev)) - - return {'provider_location': ldev} - - def _manage_existing_get_size(self, volume, existing_ref): - """Return size of volume for manage_existing.""" - - ldev = self._string2int(existing_ref.get('ldev')) - if ldev is None: - msg = basic_lib.output_err(701) - raise exception.HBSDError(data=msg) - - size = self.command.get_ldev_size_in_gigabyte(ldev, existing_ref) - - metadata = {'type': basic_lib.NORMAL_VOLUME_TYPE, 'ldev': ldev} - self._update_volume_metadata(volume['id'], metadata) - - return size - - def manage_existing_get_size(self, volume, existing_ref): - try: - return self._manage_existing_get_size(volume, existing_ref) - except exception.HBSDError as ex: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=six.text_type(ex)) - - def _unmanage(self, volume, ldev): - with self.horcmgr_flock: - self.delete_pair(ldev) - - with self.volinfo_lock: - if ldev in self.volume_info: - self.volume_info.pop(ldev) - - def unmanage(self, volume): - """Remove the specified volume from Cinder management.""" - - ldev = self.get_ldev(volume) - - if ldev is None: - return - - self.add_volinfo(ldev, volume['id']) - if not self.volume_info[ldev]['in_use'].lock.acquire(False): - desc = self.volume_info[ldev]['in_use'].desc - basic_lib.output_err(660, desc=desc) - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - - is_vvol = self.get_volume_is_vvol(volume) - if is_vvol: - basic_lib.output_err(706, volume_id=volume['id'], - volume_type=basic_lib.NORMAL_VOLUME_TYPE) - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - try: - self._unmanage(volume, ldev) - except exception.HBSDBusy: - raise exception.HBSDVolumeIsBusy(volume_name=volume['name']) - else: - LOG.info(basic_lib.set_msg(5, volume_id=volume['id'], ldev=ldev)) - finally: - if ldev in self.volume_info: - self.volume_info[ldev]['in_use'].lock.release() diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py deleted file mode 100644 index aaf581ab4..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Fibre channel Cinder volume driver for Hitachi storage. - -""" - -import os -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -import cinder.volume.driver -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_common as common -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.BoolOpt('hitachi_zoning_request', - default=False, - help='Request for FC Zone creating HostGroup'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver): - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - os.environ['LANG'] = 'C' - super(HBSDFCDriver, self).__init__(*args, **kwargs) - self.db = kwargs.get('db') - self.common = None - self.configuration.append_config_values(common.volume_opts) - self._stats = {} - self.context = None - self.max_hostgroups = None - self.pair_hostgroups = [] - self.pair_hostnum = 0 - self.do_setup_status = threading.Event() - - def _check_param(self): - self.configuration.append_config_values(volume_opts) - for opt in volume_opts: - getattr(self.configuration, opt.name) - - def check_param(self): - try: - self.common.check_param() - self._check_param() - except exception.HBSDError: - raise - except Exception as ex: - msg = basic_lib.output_err(601, param=six.text_type(ex)) - raise exception.HBSDError(message=msg) - - def output_param_to_log(self): - lock = basic_lib.get_process_lock(self.common.system_lock_file) - - with lock: - self.common.output_param_to_log('FC') - for opt in volume_opts: - if not opt.secret: - value = getattr(self.configuration, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - self.common.command.output_param_to_log(self.configuration) - - def _add_wwn(self, hgs, port, gid, wwns): - for wwn in wwns: - wwn = six.text_type(wwn) - self.common.command.comm_add_hbawwn(port, gid, wwn) - detected = self.common.command.is_detected(port, wwn) - hgs.append({'port': port, 'gid': gid, 'initiator_wwn': wwn, - 'detected': detected}) - LOG.debug('Create host group for %s', hgs) - - def _add_lun(self, hostgroups, ldev): - if hostgroups is self.pair_hostgroups: - is_once = True - else: - is_once = False - self.common.add_lun('auhgmap', hostgroups, ldev, is_once) - - def _delete_lun(self, hostgroups, ldev): - try: - self.common.command.comm_delete_lun(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(301, ldev=ldev)) - - def _get_hgname_gid(self, port, host_grp_name): - return self.common.command.get_hgname_gid(port, host_grp_name) - - def _get_unused_gid(self, port): - group_range = self.configuration.hitachi_group_range - if not group_range: - group_range = basic_lib.DEFAULT_GROUP_RANGE - return self.common.command.get_unused_gid(group_range, port) - - def _get_hostgroup_info(self, hgs, wwns, login=True): - target_ports = self.configuration.hitachi_target_ports - return self.common.command.comm_get_hostgroup_info( - hgs, wwns, target_ports, login=login) - - def _fill_group(self, hgs, port, host_grp_name, wwns): - added_hostgroup = False - LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s ' - 'name: %(name)s wwns: %(wwns)s)', - {'hgs': hgs, 'port': port, - 'name': host_grp_name, 'wwns': wwns}) - gid = self._get_hgname_gid(port, host_grp_name) - if gid is None: - for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: - try: - gid = self._get_unused_gid(port) - self._add_hostgroup(port, gid, host_grp_name) - added_hostgroup = True - except exception.HBSDNotFound: - gid = None - LOG.warning(basic_lib.set_msg(312, resource='GID')) - continue - else: - LOG.debug('Completed to add host target' - '(port: %(port)s gid: %(gid)d)', - {'port': port, 'gid': gid}) - break - else: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - try: - if wwns: - self._add_wwn(hgs, port, gid, wwns) - else: - hgs.append({'port': port, 'gid': gid, 'initiator_wwn': None, - 'detected': True}) - except Exception: - with excutils.save_and_reraise_exception(): - if added_hostgroup: - self._delete_hostgroup(port, gid, host_grp_name) - - def add_hostgroup_master(self, hgs, master_wwns, host_ip, security_ports): - target_ports = self.configuration.hitachi_target_ports - group_request = self.configuration.hitachi_group_request - wwns = [] - for wwn in master_wwns: - wwns.append(wwn.lower()) - if target_ports and group_request: - host_grp_name = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) - for port in security_ports: - wwns_copy = wwns[:] - for hostgroup in hgs: - if (hostgroup['port'] == port and - hostgroup['initiator_wwn'].lower() in wwns_copy): - wwns_copy.remove(hostgroup['initiator_wwn'].lower()) - if wwns_copy: - try: - self._fill_group(hgs, port, host_grp_name, wwns_copy) - except Exception as ex: - LOG.warning('Failed to add host group: %s', ex) - LOG.warning(basic_lib.set_msg( - 308, port=port, name=host_grp_name)) - - if not hgs: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - def add_hostgroup_pair(self, pair_hostgroups): - if self.configuration.hitachi_unit_name: - return - - properties = utils.brick_get_connector_properties() - if 'wwpns' not in properties: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - hostgroups = [] - self._get_hostgroup_info(hostgroups, properties['wwpns'], - login=False) - host_grp_name = '%spair%02x' % (basic_lib.NAME_PREFIX, - self.pair_hostnum) - for hostgroup in hostgroups: - gid = self._get_hgname_gid(hostgroup['port'], - host_grp_name) - - # When 'gid' is 0, it should be true. - # So, it cannot remove 'is not None'. - if gid is not None: - pair_hostgroups.append({'port': hostgroup['port'], - 'gid': gid, 'initiator_wwn': None, - 'detected': True}) - break - - if not pair_hostgroups: - for hostgroup in hostgroups: - pair_port = hostgroup['port'] - try: - self._fill_group(pair_hostgroups, pair_port, - host_grp_name, None) - except Exception: - if hostgroup is hostgroups[-1]: - raise - else: - break - - def add_hostgroup(self): - properties = utils.brick_get_connector_properties() - if 'wwpns' not in properties: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - LOG.debug("wwpns: %s", properties['wwpns']) - - hostgroups = [] - security_ports = self._get_hostgroup_info( - hostgroups, properties['wwpns'], login=False) - self.add_hostgroup_master(hostgroups, properties['wwpns'], - properties['ip'], security_ports) - self.add_hostgroup_pair(self.pair_hostgroups) - - def _get_target_wwn(self, port): - target_wwns = self.common.command.comm_set_target_wwns( - self.configuration.hitachi_target_ports) - return target_wwns[port] - - def _add_hostgroup(self, port, gid, host_grp_name): - self.common.command.comm_add_hostgrp(port, gid, host_grp_name) - - def _delete_hostgroup(self, port, gid, host_grp_name): - try: - self.common.command.comm_del_hostgrp(port, gid, host_grp_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.warning(basic_lib.set_msg( - 306, port=port, gid=gid, name=host_grp_name)) - - def _check_volume_mapping(self, hostgroup): - port = hostgroup['port'] - gid = hostgroup['gid'] - if self.common.command.get_hostgroup_luns(port, gid): - return True - else: - return False - - def _build_initiator_target_map(self, hostgroups, terminate=False): - target_wwns = [] - init_targ_map = {} - - target_ports = self.configuration.hitachi_target_ports - zoning_request = self.configuration.hitachi_zoning_request - - for hostgroup in hostgroups: - target_wwn = self._get_target_wwn(hostgroup['port']) - - if target_wwn not in target_wwns: - target_wwns.append(target_wwn) - - if target_ports and zoning_request: - if terminate and self._check_volume_mapping(hostgroup): - continue - - initiator_wwn = hostgroup['initiator_wwn'] - if initiator_wwn not in init_targ_map: - init_targ_map[initiator_wwn] = [] - - init_targ_map[initiator_wwn].append(target_wwn) - - return target_wwns, init_targ_map - - def _get_properties(self, volume, hostgroups, terminate=False): - properties = {} - - target_wwns, init_targ_map = self._build_initiator_target_map( - hostgroups, terminate) - - properties['target_wwn'] = target_wwns - - if init_targ_map: - properties['initiator_target_map'] = init_targ_map - - if not terminate: - properties['target_lun'] = hostgroups[0]['lun'] - - return properties - - def do_setup(self, context): - self.context = context - self.common = common.HBSDCommon(self.configuration, self, - context, self.db) - msg = _("The HBSD FC driver is deprecated and " - "will be removed in P release.") - versionutils.report_deprecated_feature(LOG, msg) - - self.check_param() - - self.common.create_lock_file() - - self.common.command.connect_storage() - self.max_hostgroups = self.common.command.get_max_hostgroups() - - lock = basic_lib.get_process_lock(self.common.service_lock_file) - with lock: - self.add_hostgroup() - - self.output_param_to_log() - self.do_setup_status.set() - - def check_for_setup_error(self): - pass - - def extend_volume(self, volume, new_size): - self.do_setup_status.wait() - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - if refresh: - if self.do_setup_status.isSet(): - self.common.output_backend_available_once() - _stats = self.common.update_volume_stats("FC") - if _stats: - self._stats = _stats - return self._stats - - def create_volume(self, volume): - self.do_setup_status.wait() - metadata = self.common.create_volume(volume) - return metadata - - def delete_volume(self, volume): - self.do_setup_status.wait() - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_snapshot(snapshot) - return metadata - - def delete_snapshot(self, snapshot): - self.do_setup_status.wait() - self.common.delete_snapshot(snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.do_setup_status.wait() - metadata = self.common.create_cloned_volume(volume, src_vref) - return metadata - - def create_volume_from_snapshot(self, volume, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_volume_from_snapshot(volume, snapshot) - return metadata - - def _initialize_connection(self, ldev, connector, src_hgs=None): - LOG.debug("Call _initialize_connection " - "(config_group: %(group)s ldev: %(ldev)d)", - {'group': self.configuration.config_group, 'ldev': ldev}) - if src_hgs is self.pair_hostgroups: - hostgroups = src_hgs - else: - hostgroups = [] - security_ports = self._get_hostgroup_info( - hostgroups, connector['wwpns'], login=True) - self.add_hostgroup_master(hostgroups, connector['wwpns'], - connector['ip'], security_ports) - - if src_hgs is self.pair_hostgroups: - try: - self._add_lun(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(311, ldev=ldev)) - for i in range(self.max_hostgroups + 1): - self.pair_hostnum += 1 - pair_hostgroups = [] - try: - self.add_hostgroup_pair(pair_hostgroups) - self.pair_hostgroups.extend(pair_hostgroups) - except exception.HBSDNotFound: - if i >= self.max_hostgroups: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - else: - break - self.pair_initialize_connection(ldev) - else: - self._add_lun(hostgroups, ldev) - - return hostgroups - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - msg = basic_lib.output_err(619, volume_id=volume['id']) - raise exception.HBSDError(message=msg) - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - hostgroups = self._initialize_connection(ldev, connector) - properties = self._get_properties(volume, hostgroups) - LOG.debug('Initialize volume_info: %s', - self.common.volume_info) - - LOG.debug('HFCDrv: properties=%s', properties) - return { - 'driver_volume_type': 'fibre_channel', - 'data': properties - } - - def _terminate_connection(self, ldev, connector, src_hgs): - LOG.debug("Call _terminate_connection(config_group: %s)", - self.configuration.config_group) - hostgroups = src_hgs[:] - self._delete_lun(hostgroups, ldev) - LOG.debug("*** _terminate_ ***") - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) - return - - if 'wwpns' not in connector: - msg = basic_lib.output_err(650, resource='HBA') - raise exception.HBSDError(message=msg) - - hostgroups = [] - self._get_hostgroup_info(hostgroups, - connector['wwpns'], login=False) - if not hostgroups: - msg = basic_lib.output_err(649) - raise exception.HBSDError(message=msg) - - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - self._terminate_connection(ldev, connector, hostgroups) - properties = self._get_properties(volume, hostgroups, - terminate=True) - LOG.debug('Terminate volume_info: %s', self.common.volume_info) - - return { - 'driver_volume_type': 'fibre_channel', - 'data': properties - } - - def pair_initialize_connection(self, ldev): - if self.configuration.hitachi_unit_name: - return - self._initialize_connection(ldev, None, self.pair_hostgroups) - - def pair_terminate_connection(self, ldev): - if self.configuration.hitachi_unit_name: - return - self._terminate_connection(ldev, None, self.pair_hostgroups) - - def discard_zero_page(self, volume): - self.common.command.discard_zero_page(self.common.get_ldev(volume)) - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def copy_image_to_volume(self, context, volume, image_service, image_id): - self.do_setup_status.wait() - super(HBSDFCDriver, self).copy_image_to_volume(context, volume, - image_service, - image_id) - self.discard_zero_page(volume) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - self.do_setup_status.wait() - if volume['volume_attachment']: - desc = 'volume %s' % volume['id'] - msg = basic_lib.output_err(660, desc=desc) - raise exception.HBSDError(message=msg) - super(HBSDFCDriver, self).copy_volume_to_image(context, volume, - image_service, - image_meta) - - def before_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions before copyvolume data. - - This method will be called before _copy_volume_data during volume - migration - """ - self.do_setup_status.wait() - - def after_volume_copy(self, context, src_vol, dest_vol, remote=None): - """Driver-specific actions after copyvolume data. - - This method will be called after _copy_volume_data during volume - migration - """ - self.discard_zero_page(dest_vol) - - def manage_existing(self, volume, existing_ref): - return self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - self.do_setup_status.wait() - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - self.do_setup_status.wait() - self.common.unmanage(volume) diff --git a/cinder/volume/drivers/hitachi/hbsd_horcm.py b/cinder/volume/drivers/hitachi/hbsd_horcm.py deleted file mode 100644 index d27f59ec5..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_horcm.py +++ /dev/null @@ -1,1502 +0,0 @@ -# Copyright (C) 2014, 2015, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import os -import re -import shlex -import threading -import time - -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib - -GETSTORAGEARRAY_ONCE = 100 -MAX_SNAPSHOT_COUNT = 1021 -SNAP_LAST_PATH_SSB = '0xB958,0x020A' -HOST_IO_SSB = '0xB958,0x0233' -INVALID_LUN_SSB = '0x2E20,0x0000' -INTERCEPT_LDEV_SSB = '0x2E22,0x0001' -HOSTGROUP_INSTALLED = '0xB956,0x3173' -RESOURCE_LOCKED = 'SSB=0x2E11,0x2205' - -LDEV_STATUS_WAITTIME = 120 -LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME -LUN_DELETE_INTERVAL = 3 -EXEC_MAX_WAITTIME = 30 -EXEC_RETRY_INTERVAL = 5 -HORCM_WAITTIME = 1 -PAIR_TYPE = ('HORC', 'MRCF', 'QS') -PERMITTED_TYPE = ('CVS', 'HDP', 'HDT') - -RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_' -HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_' -RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_' - -STATUS_TABLE = { - 'SMPL': basic_lib.SMPL, - 'COPY': basic_lib.COPY, - 'RCPY': basic_lib.COPY, - 'PAIR': basic_lib.PAIR, - 'PFUL': basic_lib.PAIR, - 'PSUS': basic_lib.PSUS, - 'PFUS': basic_lib.PSUS, - 'SSUS': basic_lib.PSUS, - 'PSUE': basic_lib.PSUE, -} -NOT_SET = '-' -HORCM_RUNNING = 1 -COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d' -SNAP_NAME = basic_lib.NAME_PREFIX + 'snap' -LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d' -MAX_MUNS = 3 - -EX_ENAUTH = 202 -EX_ENOOBJ = 205 -EX_CMDRJE = 221 -EX_CMDIOE = 237 -EX_INVCMD = 240 -EX_INVMOD = 241 -EX_ENODEV = 246 -EX_ENOENT = 247 -EX_OPTINV = 248 -EX_ATTDBG = 250 -EX_ATTHOR = 251 -EX_COMERR = 255 - -NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT) - -COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV) - -HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR) - -MAX_HOSTGROUPS = 254 -MAX_HLUN = 2047 - -DEFAULT_PORT_BASE = 31000 - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('hitachi_horcm_numbers', - default='200,201', - help='Instance numbers for HORCM'), - cfg.StrOpt('hitachi_horcm_user', - help='Username of storage system for HORCM'), - cfg.StrOpt('hitachi_horcm_password', - help='Password of storage system for HORCM', - secret=True), - cfg.BoolOpt('hitachi_horcm_add_conf', - default=True, - help='Add to HORCM configuration'), - cfg.IntOpt('hitachi_horcm_resource_lock_timeout', - default=600, - help='Timeout until a resource lock is released, in seconds. ' - 'The value must be between 0 and 7200.'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -def horcm_synchronized(function): - @functools.wraps(function) - def wrapper(*args, **kargs): - if len(args) == 1: - inst = args[0].conf.hitachi_horcm_numbers[0] - raidcom_obj_lock = args[0].raidcom_lock - else: - inst = args[1] - raidcom_obj_lock = args[0].raidcom_pair_lock - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - lock = basic_lib.get_process_lock(raidcom_lock_file) - with raidcom_obj_lock, lock: - return function(*args, **kargs) - return wrapper - - -def storage_synchronized(function): - @functools.wraps(function) - def wrapper(*args, **kargs): - serial = args[0].conf.hitachi_serial_number - resource_lock = args[0].resource_lock - resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) - lock = basic_lib.get_process_lock(resource_lock_file) - with resource_lock, lock: - return function(*args, **kargs) - return wrapper - - -class HBSDHORCM(basic_lib.HBSDBasicLib): - - def __init__(self, conf): - super(HBSDHORCM, self).__init__(conf=conf) - - self.copy_groups = [None] * MAX_MUNS - self.raidcom_lock = threading.Lock() - self.raidcom_pair_lock = threading.Lock() - self.horcmgr_lock = threading.Lock() - self.horcmgr_flock = None - self.resource_lock = threading.Lock() - - def check_param(self): - numbers = self.conf.hitachi_horcm_numbers.split(',') - if len(numbers) != 2: - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - for i in numbers: - if not i.isdigit(): - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - self.conf.hitachi_horcm_numbers = [int(num) for num in numbers] - inst = self.conf.hitachi_horcm_numbers[0] - pair_inst = self.conf.hitachi_horcm_numbers[1] - if inst == pair_inst: - msg = basic_lib.output_err(601, param='hitachi_horcm_numbers') - raise exception.HBSDError(message=msg) - for param in ('hitachi_horcm_user', 'hitachi_horcm_password'): - if not getattr(self.conf, param): - msg = basic_lib.output_err(601, param=param) - raise exception.HBSDError(message=msg) - if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id: - msg = basic_lib.output_err(601, param='hitachi_thin_pool_id') - raise exception.HBSDError(message=msg) - resource_lock_timeout = self.conf.hitachi_horcm_resource_lock_timeout - if not ((resource_lock_timeout >= 0) and - (resource_lock_timeout <= 7200)): - msg = basic_lib.output_err( - 601, param='hitachi_horcm_resource_lock_timeout') - raise exception.HBSDError(message=msg) - for opt in volume_opts: - getattr(self.conf, opt.name) - - def set_copy_groups(self, host_ip): - serial = self.conf.hitachi_serial_number - inst = self.conf.hitachi_horcm_numbers[1] - - for mun in range(MAX_MUNS): - copy_group = COPY_GROUP % (host_ip, serial, inst, mun) - self.copy_groups[mun] = copy_group - - def set_pair_flock(self): - inst = self.conf.hitachi_horcm_numbers[1] - name = '%s%d' % (HORCMGR_LOCK_FILE, inst) - self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock) - return self.horcmgr_flock - - def check_horcm(self, inst): - args = 'HORCMINST=%d horcmgr -check' % inst - ret, _stdout, _stderr = self.exec_command('env', args=args, - printflag=False) - return ret - - def shutdown_horcm(self, inst): - ret, stdout, stderr = self.exec_command( - 'horcmshutdown.sh', args=six.text_type(inst), printflag=False) - return ret - - def start_horcm(self, inst): - return self.exec_command('horcmstart.sh', args=six.text_type(inst), - printflag=False) - - def _wait_for_horcm_shutdown(self, inst): - if self.check_horcm(inst) != HORCM_RUNNING: - raise loopingcall.LoopingCallDone() - - if self.shutdown_horcm(inst): - LOG.error("Failed to shutdown horcm.") - raise loopingcall.LoopingCallDone() - - @horcm_synchronized - def restart_horcm(self, inst=None): - if inst is None: - inst = self.conf.hitachi_horcm_numbers[0] - - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_horcm_shutdown, inst) - - loop.start(interval=HORCM_WAITTIME).wait() - - ret, stdout, stderr = self.start_horcm(inst) - if ret: - msg = basic_lib.output_err( - 600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def restart_pair_horcm(self): - inst = self.conf.hitachi_horcm_numbers[1] - self.restart_horcm(inst=inst) - - def setup_horcmgr(self, host_ip): - pair_inst = self.conf.hitachi_horcm_numbers[1] - self.set_copy_groups(host_ip) - if self.conf.hitachi_horcm_add_conf: - self.create_horcmconf() - self.create_horcmconf(inst=pair_inst) - self.restart_horcm() - with self.horcmgr_flock: - self.restart_pair_horcm() - ret, stdout, stderr = self.comm_login() - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def _wait_for_exec_horcm(self, cmd, args, printflag, start): - if cmd == 'raidcom': - serial = self.conf.hitachi_serial_number - inst = self.conf.hitachi_horcm_numbers[0] - raidcom_obj_lock = self.raidcom_lock - args = '%s -s %s -I%d' % (args, serial, inst) - else: - inst = self.conf.hitachi_horcm_numbers[1] - raidcom_obj_lock = self.raidcom_pair_lock - args = '%s -ISI%d' % (args, inst) - user = self.conf.hitachi_horcm_user - passwd = self.conf.hitachi_horcm_password - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - lock = basic_lib.get_process_lock(raidcom_lock_file) - - with raidcom_obj_lock, lock: - ret, stdout, stderr = self.exec_command(cmd, args=args, - printflag=printflag) - - # The resource group may be locked by other software. - # Therefore, wait until the lock is released. - if (RESOURCE_LOCKED in stderr and - (time.time() - start < - self.conf.hitachi_horcm_resource_lock_timeout)): - return - - if not ret or ret <= 127: - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if time.time() - start >= EXEC_MAX_WAITTIME: - LOG.error("horcm command timeout.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if (ret == EX_ENAUTH and - not re.search("-login %s %s" % (user, passwd), args)): - _ret, _stdout, _stderr = self.comm_login() - if _ret: - LOG.error("Failed to authenticate user.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - elif ret in HORCM_ERROR: - _ret = 0 - with raidcom_obj_lock, lock: - if self.check_horcm(inst) != HORCM_RUNNING: - _ret, _stdout, _stderr = self.start_horcm(inst) - if _ret and _ret != HORCM_RUNNING: - LOG.error("Failed to start horcm.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - elif ret not in COMMAND_IO_TO_RAID: - LOG.error("Unexpected error occurs in horcm.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - def exec_raidcom(self, cmd, args, printflag=True): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_exec_horcm, cmd, args, printflag, time.time()) - - return loop.start(interval=EXEC_RETRY_INTERVAL).wait() - - def comm_login(self): - rmi_user = self.conf.hitachi_horcm_user - rmi_pass = self.conf.hitachi_horcm_password - args = '-login %s %s' % (rmi_user, rmi_pass) - return self.exec_raidcom('raidcom', args, printflag=False) - - def comm_reset_status(self): - self.exec_raidcom('raidcom', 'reset command_status') - - def comm_get_status(self): - return self.exec_raidcom('raidcom', 'get command_status') - - def get_command_error(self, stdout): - lines = stdout.splitlines() - line = shlex.split(lines[1]) - return int(line[3]) - - def comm_get_ldev(self, ldev): - opt = 'get ldev -ldev_id %s' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def add_used_hlun(self, port, gid, used_list): - opt = 'get lun -port %s-%d' % (port, gid) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[1:]: - lun = int(shlex.split(line)[3]) - if lun not in used_list: - used_list.append(lun) - - def get_unused_ldev(self, ldev_range): - start = ldev_range[0] - end = ldev_range[1] - - while start < end: - if end - start + 1 > GETSTORAGEARRAY_ONCE: - cnt = GETSTORAGEARRAY_ONCE - else: - cnt = end - start + 1 - opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - ldev_num = None - for line in lines: - if re.match("LDEV :", line): - ldev_num = int(shlex.split(line)[2]) - continue - if re.match("VOL_TYPE : NOT DEFINED", line): - return ldev_num - - start += GETSTORAGEARRAY_ONCE - else: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(message=msg) - - def get_hgname_gid(self, port, host_grp_name): - opt = 'get host_grp -port %s -key host_grp' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[2] == host_grp_name: - return int(line[1]) - return None - - def get_unused_gid(self, range, port): - _min = range[0] - _max = range[1] - opt = 'get host_grp -port %s -key host_grp' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - free_gid = None - for line in lines[_min + 1:]: - line = shlex.split(line) - if int(line[1]) > _max: - break - if line[2] == '-': - free_gid = int(line[1]) - break - if free_gid is None: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - return free_gid - - def comm_set_target_wwns(self, target_ports): - opt = 'get port' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - target_wwns = {} - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - port = line[0][:5] - if target_ports and port not in target_ports: - continue - - target_wwns[port] = line[10] - LOG.debug('target wwns: %s', target_wwns) - return target_wwns - - def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected): - opt = 'get host_grp -port %s' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - found_wwns = 0 - for line in lines[1:]: - line = shlex.split(line) - if not re.match(basic_lib.NAME_PREFIX, line[2]): - continue - gid = line[1] - opt = 'get hba_wwn -port %s-%s' % (port, gid) - ret, stdout, stderr = self.exec_raidcom( - 'raidcom', opt, printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - hba_info = shlex.split(line) - - if hba_info[3] in wwns: - hostgroups.append({'port': six.text_type(port), - 'gid': int(hba_info[1]), - 'initiator_wwn': hba_info[3], - 'detected': is_detected}) - found_wwns += 1 - if len(wwns) == found_wwns: - break - - if len(wwns) == found_wwns: - break - - def comm_chk_login_wwn(self, wwns, port): - opt = 'get port -port %s' % port - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - login_info = shlex.split(line) - if login_info[1] in wwns: - return True - else: - return False - - def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): - security_ports = [] - hostgroups = [] - - opt = 'get port' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - - for line in lines[1:]: - line = shlex.split(line) - port = line[0][:5] - if target_ports and port not in target_ports: - continue - security = True if line[7] == 'Y' else False - - is_detected = None - if login: - is_detected = self.comm_chk_login_wwn(wwns, port) - - if security: - self.comm_get_hbawwn(hostgroups, wwns, port, is_detected) - security_ports.append(port) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def _get_lun(self, port, gid, ldev): - lun = None - - opt = 'get lun -port %s-%d' % (port, gid) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[5] == six.text_type(ldev): - lun = int(line[3]) - break - - return lun - - def _wait_for_delete_lun(self, hostgroup, ldev, start): - opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'], - hostgroup['gid'], ldev) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if not ret: - raise loopingcall.LoopingCallDone() - - if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and - not self.comm_get_snapshot(ldev) or - re.search('SSB=%s' % HOST_IO_SSB, stderr)): - LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr)) - - if time.time() - start >= LUN_DELETE_WAITTIME: - msg = basic_lib.output_err( - 637, method='_wait_for_delete_lun', - timeout=LUN_DELETE_WAITTIME) - raise exception.HBSDError(message=msg) - else: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_lun_core(self, hostgroup, ldev): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_delete_lun, hostgroup, ldev, time.time()) - - loop.start(interval=LUN_DELETE_INTERVAL).wait() - - def comm_delete_lun(self, hostgroups, ldev): - deleted_hostgroups = [] - no_ldev_cnt = 0 - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - is_deleted = False - for deleted in deleted_hostgroups: - if port == deleted['port'] and gid == deleted['gid']: - is_deleted = True - if is_deleted: - continue - try: - self.comm_delete_lun_core(hostgroup, ldev) - except exception.HBSDCmdError as ex: - no_ldev_cnt += 1 - if ex.ret == EX_ENOOBJ: - if no_ldev_cnt != len(hostgroups): - continue - raise exception.HBSDNotFound - else: - raise - deleted_hostgroups.append({'port': port, 'gid': gid}) - - def _check_ldev_status(self, ldev, status): - opt = ('get ldev -ldev_id %s -check_status %s -time %s' % - (ldev, status, LDEV_STATUS_WAITTIME)) - ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt) - return ret - - # Don't remove a storage_syncronized decorator. - # It is need to avoid comm_add_ldev() and comm_delete_ldev() are - # executed concurrently. - @storage_synchronized - def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): - emulation = 'OPEN-V' - if is_vvol: - opt = ('add ldev -pool snap -ldev_id %d ' - '-capacity %dG -emulation %s' - % (ldev, capacity, emulation)) - else: - opt = ('add ldev -pool %d -ldev_id %d ' - '-capacity %dG -emulation %s' - % (pool_id, ldev, capacity, emulation)) - - self.comm_reset_status() - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - if self._check_ldev_status(ldev, "NML"): - msg = basic_lib.output_err(653, ldev=ldev) - raise exception.HBSDError(message=msg) - - def comm_add_hostgrp(self, port, gid, host_grp_name): - opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid, - host_grp_name) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_del_hostgrp(self, port, gid, host_grp_name): - opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hbawwn(self, port, gid, wwn): - opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - @storage_synchronized - def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False): - tmp_hostgroups = hostgroups[:] - is_ok = False - used_list = [] - lun = None - old_lun = None - - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - self.add_used_hlun(port, gid, used_list) - lun = self._get_lun(port, gid, ldev) - - # When 'lun' or 'old_lun' is 0, it should be true. - # So, it cannot remove 'is not None'. - if lun is not None: - if old_lun is not None and old_lun != lun: - msg = basic_lib.output_err(648, resource='LUN (HLUN)') - raise exception.HBSDError(message=msg) - is_ok = True - hostgroup['lun'] = lun - tmp_hostgroups.remove(hostgroup) - old_lun = lun - - if is_once: - # When 'lun' is 0, it should be true. - # So, it cannot remove 'is not None'. - if lun is not None: - return - elif len(used_list) < MAX_HLUN + 1: - break - else: - tmp_hostgroups.remove(hostgroup) - if tmp_hostgroups: - used_list = [] - - if not used_list: - lun = 0 - elif lun is None: - for i in range(MAX_HLUN + 1): - if i not in used_list: - lun = i - break - else: - raise exception.HBSDNotFound - - opt = None - ret = 0 - stdout = None - stderr = None - invalid_hgs_str = None - - for hostgroup in tmp_hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - if not hostgroup['detected']: - if invalid_hgs_str: - invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, - port, gid) - else: - invalid_hgs_str = '%s:%d' % (port, gid) - continue - opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % ( - port, gid, ldev, lun) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if not ret: - is_ok = True - hostgroup['lun'] = lun - if is_once: - break - else: - LOG.warning(basic_lib.set_msg( - 314, ldev=ldev, lun=lun, port=port, id=gid)) - - if not is_ok: - if stderr: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - else: - msg = basic_lib.output_err(659, gid=invalid_hgs_str) - raise exception.HBSDError(message=msg) - - # Don't remove a storage_syncronized decorator. - # It is need to avoid comm_add_ldev() and comm_delete_ldev() are - # executed concurrently. - @storage_synchronized - def comm_delete_ldev(self, ldev, is_vvol): - ret = -1 - stdout = "" - stderr = "" - self.comm_reset_status() - opt = 'delete ldev -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - if re.search('SSB=%s' % INVALID_LUN_SSB, stderr): - raise exception.HBSDNotFound - - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - ret, stdout, stderr = self.comm_get_status() - if ret or self.get_command_error(stdout): - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_extend_ldev(self, ldev, old_size, new_size): - extend_size = new_size - old_size - opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size) - ret, stdout, stderr = self.exec_raidcom('raidcom', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_dp_pool(self, pool_id): - opt = 'get dp_pool' - ret, stdout, stderr = self.exec_raidcom('raidcom', opt, - printflag=False) - if ret: - opt = 'raidcom %s' % opt - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[1:]: - if int(shlex.split(line)[0]) == pool_id: - free_gb = int(shlex.split(line)[3]) / 1024 - total_gb = int(shlex.split(line)[4]) / 1024 - return total_gb, free_gb - - msg = basic_lib.output_err(640, pool_id=pool_id) - raise exception.HBSDError(message=msg) - - def comm_modify_ldev(self, ldev): - args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr)) - - def is_detected(self, port, wwn): - return self.comm_chk_login_wwn([wwn], port) - - def discard_zero_page(self, ldev): - try: - self.comm_modify_ldev(ldev) - except Exception as ex: - LOG.warning('Failed to discard zero page: %s', ex) - - def comm_add_snapshot(self, pvol, svol): - pool = self.conf.hitachi_thin_pool_id - copy_size = self.conf.hitachi_copy_speed - args = ('add snapshot -ldev_id %d %d -pool %d ' - '-snapshot_name %s -copy_size %d' - % (pvol, svol, pool, SNAP_NAME, copy_size)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_snapshot(self, ldev): - args = 'delete snapshot -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_modify_snapshot(self, ldev, op): - args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args) - if ret: - msg = basic_lib.output_err( - 600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def _wait_for_snap_status(self, pvol, svol, status, timeout, start): - if (self.get_snap_pvol_status(pvol, svol) in status and - self.get_snap_svol_status(svol) in status): - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_snap_status', timuout=timeout) - raise exception.HBSDError(message=msg) - - def wait_snap(self, pvol, svol, status, timeout, interval): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_snap_status, pvol, - svol, status, timeout, time.time()) - - loop.start(interval=interval).wait() - - def comm_get_snapshot(self, ldev): - args = 'get snapshot -ldev_id %d' % ldev - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def check_snap_count(self, ldev): - stdout = self.comm_get_snapshot(ldev) - if not stdout: - return - lines = stdout.splitlines() - if len(lines) >= MAX_SNAPSHOT_COUNT + 1: - msg = basic_lib.output_err( - 615, copy_method=basic_lib.THIN, pvol=ldev) - raise exception.HBSDBusy(message=msg) - - def get_snap_pvol_status(self, pvol, svol): - stdout = self.comm_get_snapshot(pvol) - if not stdout: - return basic_lib.SMPL - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if int(line[6]) == svol: - return STATUS_TABLE[line[2]] - else: - return basic_lib.SMPL - - def get_snap_svol_status(self, ldev): - stdout = self.comm_get_snapshot(ldev) - if not stdout: - return basic_lib.SMPL - lines = stdout.splitlines() - line = shlex.split(lines[1]) - return STATUS_TABLE[line[2]] - - @horcm_synchronized - def create_horcmconf(self, inst=None): - if inst is None: - inst = self.conf.hitachi_horcm_numbers[0] - - serial = self.conf.hitachi_serial_number - filename = '/etc/horcm%d.conf' % inst - - port = DEFAULT_PORT_BASE + inst - - found = False - - if not os.path.exists(filename): - file_str = """ -HORCM_MON -#ip_address service poll(10ms) timeout(10ms) -127.0.0.1 %16d 6000 3000 -HORCM_CMD -""" % port - else: - file_str = utils.read_file_as_root(filename) - - lines = file_str.splitlines() - for line in lines: - if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line): - found = True - break - - if not found: - insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial - file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)', - r'\1\n%s\n' % insert_str, file_str) - - try: - utils.execute('tee', filename, process_input=file_str, - run_as_root=True) - except putils.ProcessExecutionError as ex: - msg = basic_lib.output_err( - 632, file=filename, ret=ex.exit_code, err=ex.stderr) - raise exception.HBSDError(message=msg) - - def comm_get_copy_grp(self): - ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp', - printflag=False) - if ret: - opt = 'raidcom get copy_grp' - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun): - args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d' - % (copy_group, pvol_group, svol_group, mun)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_copy_grp(self, copy_group): - args = 'delete copy_grp -copy_grp_name %s' % copy_group - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_device_grp(self, group_name): - args = 'get device_grp -device_grp_name %s' % group_name - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def comm_add_device_grp(self, group_name, ldev_name, ldev): - args = ('add device_grp -device_grp_name %s %s -ldev_id %d' - % (group_name, ldev_name, ldev)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_delete_device_grp(self, group_name, ldev): - args = ('delete device_grp -device_grp_name %s -ldev_id %d' - % (group_name, ldev)) - ret, stdout, stderr = self.exec_raidcom('raidcom', args, - printflag=False) - if ret: - opt = 'raidcom %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_paircreate(self, copy_group, ldev_name): - args = ('-g %s -d %s -split -fq quick -c %d -vl' - % (copy_group, ldev_name, self.conf.hitachi_copy_speed)) - ret, stdout, stderr = self.exec_raidcom('paircreate', args) - if ret: - opt = 'paircreate %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_pairsplit(self, copy_group, ldev_name): - args = '-g %s -d %s -S' % (copy_group, ldev_name) - ret, stdout, stderr = self.exec_raidcom('pairsplit', args) - if ret: - opt = 'pairsplit %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_pairevtwait(self, copy_group, ldev_name, check_svol): - if not check_svol: - option = '-nowait' - else: - option = '-nowaits' - args = '-g %s -d %s %s' % (copy_group, ldev_name, option) - ret, stdout, stderr = self.exec_raidcom('pairevtwait', args, - printflag=False) - if ret > 127: - opt = 'pairevtwait %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret - - def comm_pairdisplay(self, copy_group, ldev_name=None): - if not ldev_name: - args = '-g %s -CLI' % copy_group - else: - args = '-g %s -d %s -CLI' % (copy_group, ldev_name) - ret, stdout, stderr = self.exec_raidcom('pairdisplay', args, - printflag=False) - if ret and ret not in NO_SUCH_DEVICE: - opt = 'pairdisplay %s' % args - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret, stdout, stderr - - def check_copy_grp(self, copy_group): - stdout = self.comm_get_copy_grp() - lines = stdout.splitlines() - count = 0 - for line in lines[1:]: - line = shlex.split(line) - if line[0] == copy_group: - count += 1 - if count == 2: - break - return count - - def check_device_grp(self, group_name, ldev, ldev_name=None): - stdout = self.comm_get_device_grp(group_name) - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if int(line[2]) == ldev: - if not ldev_name: - return True - else: - return line[1] == ldev_name - else: - return False - - def is_smpl(self, copy_group, ldev_name): - ret, stdout, stderr = self.comm_pairdisplay(copy_group, - ldev_name=ldev_name) - if not stdout: - return True - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[9] in [NOT_SET, 'SMPL']: - return True - else: - return False - - def get_copy_groups(self): - copy_groups = [] - stdout = self.comm_get_copy_grp() - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[0] in self.copy_groups and line[0] not in copy_groups: - copy_groups.append(line[0]) - return copy_groups - - def get_matched_copy_group(self, pvol, svol, ldev_name): - for copy_group in self.get_copy_groups(): - pvol_group = '%sP' % copy_group - if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - return copy_group - else: - return None - - def get_paired_info(self, ldev, only_flag=False): - paired_info = {'pvol': None, 'svol': []} - pvol = None - is_svol = False - - stdout = self.comm_get_snapshot(ldev) - if stdout: - lines = stdout.splitlines() - line = shlex.split(lines[1]) - status = STATUS_TABLE.get(line[2], basic_lib.UNKN) - - if line[1] == 'P-VOL': - pvol = ldev - svol = int(line[6]) - else: - is_svol = True - pvol = int(line[6]) - svol = ldev - - if status == basic_lib.PSUS: - status = self.get_snap_pvol_status(pvol, svol) - - svol_info = {'lun': svol, 'status': status, 'is_vvol': True} - paired_info['svol'].append(svol_info) - paired_info['pvol'] = pvol - - if only_flag or is_svol: - return paired_info - - for copy_group in self.get_copy_groups(): - ldev_name = None - pvol_status = basic_lib.UNKN - svol_status = basic_lib.UNKN - - ret, stdout, stderr = self.comm_pairdisplay(copy_group) - if not stdout: - continue - - lines = stdout.splitlines() - for line in lines[1:]: - line = shlex.split(line) - if line[9] not in ['P-VOL', 'S-VOL']: - continue - - ldev0 = int(line[8]) - ldev1 = int(line[12]) - if ldev not in [ldev0, ldev1]: - continue - - ldev_name = line[1] - - if line[9] == 'P-VOL': - pvol = ldev0 - svol = ldev1 - pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) - else: - svol = ldev0 - pvol = ldev1 - svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN) - - if svol == ldev: - is_svol = True - - if not ldev_name: - continue - - pvol_group = '%sP' % copy_group - pvol_ok = self.check_device_grp(pvol_group, pvol, - ldev_name=ldev_name) - - svol_group = '%sS' % copy_group - svol_ok = self.check_device_grp(svol_group, svol, - ldev_name=ldev_name) - - if pvol_ok and svol_ok: - if pvol_status == basic_lib.PSUS: - status = svol_status - else: - status = pvol_status - - svol_info = {'lun': svol, 'status': status, 'is_vvol': False} - paired_info['svol'].append(svol_info) - - if is_svol: - break - - # When 'pvol' is 0, it should be true. - # So, it cannot remove 'is not None'. - if pvol is not None and paired_info['pvol'] is None: - paired_info['pvol'] = pvol - - return paired_info - - def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): - pvol_group = '%sP' % copy_group - svol_group = '%sS' % copy_group - self.comm_add_device_grp(pvol_group, ldev_name, pvol) - self.comm_add_device_grp(svol_group, ldev_name, svol) - nr_copy_groups = self.check_copy_grp(copy_group) - if nr_copy_groups == 1: - self.comm_delete_copy_grp(copy_group) - if nr_copy_groups != 2: - self.comm_add_copy_grp(copy_group, pvol_group, svol_group, mun) - - def delete_pair_config(self, pvol, svol, copy_group, ldev_name): - pvol_group = '%sP' % copy_group - svol_group = '%sS' % copy_group - if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - self.comm_delete_device_grp(pvol_group, pvol) - if self.check_device_grp(svol_group, svol, ldev_name=ldev_name): - self.comm_delete_device_grp(svol_group, svol) - - def _wait_for_pair_status(self, copy_group, ldev_name, - status, timeout, check_svol, start): - if self.comm_pairevtwait(copy_group, ldev_name, - check_svol) in status: - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_pair_status', timout=timeout) - raise exception.HBSDError(message=msg) - - def wait_pair(self, copy_group, ldev_name, status, timeout, - interval, check_svol=False): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_pair_status, copy_group, ldev_name, - status, timeout, check_svol, time.time()) - - loop.start(interval=interval).wait() - - def comm_create_pair(self, pvol, svol, is_vvol): - timeout = basic_lib.DEFAULT_PROCESS_WAITTIME - interval = self.conf.hitachi_copy_check_interval - if not is_vvol: - restart = False - create = False - ldev_name = LDEV_NAME % (pvol, svol) - mun = 0 - for mun in range(MAX_MUNS): - copy_group = self.copy_groups[mun] - pvol_group = '%sP' % copy_group - - if not self.check_device_grp(pvol_group, pvol): - break - else: - msg = basic_lib.output_err( - 615, copy_method=basic_lib.FULL, pvol=pvol) - raise exception.HBSDBusy(message=msg) - try: - self.add_pair_config(pvol, svol, copy_group, ldev_name, mun) - self.restart_pair_horcm() - restart = True - self.comm_paircreate(copy_group, ldev_name) - create = True - self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS], - timeout, interval) - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS, basic_lib.COPY], - timeout, interval, check_svol=True) - except Exception: - with excutils.save_and_reraise_exception(): - if create: - try: - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS], timeout, - interval) - self.wait_pair(copy_group, ldev_name, - [basic_lib.PSUS], timeout, - interval, check_svol=True) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - try: - self.comm_pairsplit(copy_group, ldev_name) - self.wait_pair( - copy_group, ldev_name, - [basic_lib.SMPL], timeout, - self.conf.hitachi_async_copy_check_interval) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - if self.is_smpl(copy_group, ldev_name): - try: - self.delete_pair_config(pvol, svol, copy_group, - ldev_name) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - if restart: - try: - self.restart_pair_horcm() - except Exception as ex: - LOG.warning('Failed to restart horcm: %s', ex) - - else: - self.check_snap_count(pvol) - self.comm_add_snapshot(pvol, svol) - - try: - self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval) - self.comm_modify_snapshot(svol, 'create') - self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self.comm_delete_snapshot(svol) - self.wait_snap( - pvol, svol, [basic_lib.SMPL], timeout, - self.conf.hitachi_async_copy_check_interval) - except Exception as ex: - LOG.warning('Failed to create pair: %s', ex) - - def delete_pair(self, pvol, svol, is_vvol): - timeout = basic_lib.DEFAULT_PROCESS_WAITTIME - interval = self.conf.hitachi_async_copy_check_interval - if not is_vvol: - ldev_name = LDEV_NAME % (pvol, svol) - copy_group = self.get_matched_copy_group(pvol, svol, ldev_name) - if not copy_group: - return - try: - self.comm_pairsplit(copy_group, ldev_name) - self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL], - timeout, interval) - finally: - if self.is_smpl(copy_group, ldev_name): - self.delete_pair_config(pvol, svol, copy_group, ldev_name) - else: - self.comm_delete_snapshot(svol) - self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval) - - def comm_raidqry(self): - ret, stdout, stderr = self.exec_command('raidqry', '-h') - if ret: - opt = 'raidqry -h' - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return stdout - - def get_comm_version(self): - stdout = self.comm_raidqry() - lines = stdout.splitlines() - return shlex.split(lines[1])[1] - - def output_param_to_log(self, conf): - for opt in volume_opts: - if not opt.secret: - value = getattr(conf, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - def create_lock_file(self): - inst = self.conf.hitachi_horcm_numbers[0] - pair_inst = self.conf.hitachi_horcm_numbers[1] - serial = self.conf.hitachi_serial_number - raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst) - raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst) - horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst) - resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial) - - basic_lib.create_empty_file(raidcom_lock_file) - basic_lib.create_empty_file(raidcom_pair_lock_file) - basic_lib.create_empty_file(horcmgr_lock_file) - basic_lib.create_empty_file(resource_lock_file) - - def connect_storage(self): - properties = utils.brick_get_connector_properties() - self.setup_horcmgr(properties['ip']) - - def get_max_hostgroups(self): - """return the maximum value of hostgroup id.""" - return MAX_HOSTGROUPS - - def get_hostgroup_luns(self, port, gid): - list = [] - self.add_used_hlun(port, gid, list) - - return list - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - param = 'serial_number' - - if param not in existing_ref: - msg = basic_lib.output_err(700, param=param) - raise exception.HBSDError(data=msg) - - storage = existing_ref.get(param) - if storage != self.conf.hitachi_serial_number: - msg = basic_lib.output_err(648, resource=param) - raise exception.HBSDError(data=msg) - - stdout = self.comm_get_ldev(ldev) - if not stdout: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(data=msg) - - sts_line = vol_type = "" - vol_attrs = [] - size = num_port = 1 - - lines = stdout.splitlines() - for line in lines: - if line.startswith("STS :"): - sts_line = line - - elif line.startswith("VOL_TYPE :"): - vol_type = shlex.split(line)[2] - - elif line.startswith("VOL_ATTR :"): - vol_attrs = shlex.split(line)[2:] - - elif line.startswith("VOL_Capacity(BLK) :"): - size = int(shlex.split(line)[2]) - - elif line.startswith("NUM_PORT :"): - num_port = int(shlex.split(line)[2]) - - if 'NML' not in sts_line: - msg = basic_lib.output_err(648, resource='LDEV') - - raise exception.HBSDError(data=msg) - - if 'OPEN-V' not in vol_type: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - if 'HDP' not in vol_attrs: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - for vol_attr in vol_attrs: - if vol_attr == ':': - continue - - if vol_attr in PAIR_TYPE: - msg = basic_lib.output_err(705, ldev=ldev) - raise exception.HBSDError(data=msg) - - if vol_attr not in PERMITTED_TYPE: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - # So, units.Gi is divided by 512. - if size % (units.Gi / 512): - msg = basic_lib.output_err(703, ldev=ldev) - raise exception.HBSDError(data=msg) - - if num_port: - msg = basic_lib.output_err(704, ldev=ldev) - raise exception.HBSDError(data=msg) - - return size / (units.Gi / 512) diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py deleted file mode 100644 index 425d4d6e0..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ /dev/null @@ -1,432 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -iSCSI Cinder volume driver for Hitachi storage. - -""" - -import os -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -import cinder.volume.driver -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib -from cinder.volume.drivers.hitachi import hbsd_common as common - -LOG = logging.getLogger(__name__) - -CHAP_METHOD = ('None', 'CHAP None', 'CHAP') - -volume_opts = [ - cfg.BoolOpt('hitachi_add_chap_user', - default=False, - help='Add CHAP user'), - cfg.StrOpt('hitachi_auth_method', - help='iSCSI authentication method'), - cfg.StrOpt('hitachi_auth_user', - default='%sCHAP-user' % basic_lib.NAME_PREFIX, - help='iSCSI authentication username'), - cfg.StrOpt('hitachi_auth_password', - default='%sCHAP-password' % basic_lib.NAME_PREFIX, - help='iSCSI authentication password', secret=True), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver): - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = ["Hitachi_HBSD_CI", "Hitachi_HBSD2_CI"] - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - os.environ['LANG'] = 'C' - super(HBSDISCSIDriver, self).__init__(*args, **kwargs) - self.db = kwargs.get('db') - self.common = None - self.configuration.append_config_values(common.volume_opts) - self._stats = {} - self.context = None - self.do_setup_status = threading.Event() - - def _check_param(self): - self.configuration.append_config_values(volume_opts) - if (self.configuration.hitachi_auth_method and - self.configuration.hitachi_auth_method not in CHAP_METHOD): - raise exception.HBSDError( - message=basic_lib.output_err(601, param='hitachi_auth_method')) - if self.configuration.hitachi_auth_method == 'None': - self.configuration.hitachi_auth_method = None - for opt in volume_opts: - getattr(self.configuration, opt.name) - - def check_param(self): - try: - self.common.check_param() - self._check_param() - except exception.HBSDError: - raise - except Exception as ex: - raise exception.HBSDError( - message=basic_lib.output_err(601, param=six.text_type(ex))) - - def output_param_to_log(self): - lock = basic_lib.get_process_lock(self.common.system_lock_file) - - with lock: - self.common.output_param_to_log('iSCSI') - for opt in volume_opts: - if not opt.secret: - value = getattr(self.configuration, opt.name) - LOG.info('\t%(name)-35s : %(value)s', - {'name': opt.name, 'value': value}) - - def _delete_lun_iscsi(self, hostgroups, ldev): - try: - self.common.command.comm_delete_lun_iscsi(hostgroups, ldev) - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(301, ldev=ldev)) - - def _add_target(self, hostgroups, ldev): - self.common.add_lun('autargetmap', hostgroups, ldev) - - def _add_initiator(self, hgs, port, gid, host_iqn): - self.common.command.comm_add_initiator(port, gid, host_iqn) - hgs.append({'port': port, 'gid': int(gid), 'detected': True}) - LOG.debug("Create iSCSI target for %s", hgs) - - def _get_unused_gid_iscsi(self, port): - group_range = self.configuration.hitachi_group_range - if not group_range: - group_range = basic_lib.DEFAULT_GROUP_RANGE - return self.common.command.get_unused_gid_iscsi(group_range, port) - - def _delete_iscsi_target(self, port, target_no, target_alias): - ret, _stdout, _stderr = self.common.command.delete_iscsi_target( - port, target_no, target_alias) - if ret: - LOG.warning(basic_lib.set_msg( - 307, port=port, tno=target_no, alias=target_alias)) - - def _delete_chap_user(self, port): - ret, _stdout, _stderr = self.common.command.delete_chap_user(port) - if ret: - LOG.warning(basic_lib.set_msg( - 303, user=self.configuration.hitachi_auth_user)) - - def _get_hostgroup_info_iscsi(self, hgs, host_iqn): - return self.common.command.comm_get_hostgroup_info_iscsi( - hgs, host_iqn, self.configuration.hitachi_target_ports) - - def _discovery_iscsi_target(self, hostgroups): - for hostgroup in hostgroups: - ip_addr, ip_port = self.common.command.comm_get_iscsi_ip( - hostgroup['port']) - target_iqn = self.common.command.comm_get_target_iqn( - hostgroup['port'], hostgroup['gid']) - hostgroup['ip_addr'] = ip_addr - hostgroup['ip_port'] = ip_port - hostgroup['target_iqn'] = target_iqn - LOG.debug("ip_addr=%(addr)s ip_port=%(port)s target_iqn=%(iqn)s", - {'addr': ip_addr, 'port': ip_port, 'iqn': target_iqn}) - - def _fill_groups(self, hgs, ports, target_iqn, target_alias, add_iqn): - for port in ports: - added_hostgroup = False - added_user = False - LOG.debug('Create target (hgs: %(hgs)s port: %(port)s ' - 'target_iqn: %(tiqn)s target_alias: %(alias)s ' - 'add_iqn: %(aiqn)s)', - {'hgs': hgs, 'port': port, 'tiqn': target_iqn, - 'alias': target_alias, 'aiqn': add_iqn}) - gid = self.common.command.get_gid_from_targetiqn( - target_iqn, target_alias, port) - if gid is None: - for retry_cnt in basic_lib.DEFAULT_TRY_RANGE: - gid = None - try: - gid = self._get_unused_gid_iscsi(port) - self.common.command.comm_add_hostgrp_iscsi( - port, gid, target_alias, target_iqn) - added_hostgroup = True - except exception.HBSDNotFound: - LOG.warning(basic_lib.set_msg(312, resource='GID')) - continue - except Exception as ex: - LOG.warning(basic_lib.set_msg( - 309, port=port, alias=target_alias, - reason=ex)) - break - else: - LOG.debug('Completed to add target' - '(port: %(port)s gid: %(gid)d)', - {'port': port, 'gid': gid}) - break - if gid is None: - LOG.error('Failed to add target(port: %s)', port) - continue - try: - if added_hostgroup: - if self.configuration.hitachi_auth_method: - added_user = self.common.command.set_chap_authention( - port, gid) - self.common.command.comm_set_hostgrp_reportportal( - port, target_alias) - self._add_initiator(hgs, port, gid, add_iqn) - except Exception as ex: - LOG.warning(basic_lib.set_msg( - 316, port=port, reason=ex)) - if added_hostgroup: - if added_user: - self._delete_chap_user(port) - self._delete_iscsi_target(port, gid, target_alias) - - def add_hostgroup_core(self, hgs, ports, target_iqn, - target_alias, add_iqn): - if ports: - self._fill_groups(hgs, ports, target_iqn, target_alias, add_iqn) - - def add_hostgroup_master(self, hgs, master_iqn, host_ip, security_ports): - target_ports = self.configuration.hitachi_target_ports - group_request = self.configuration.hitachi_group_request - target_alias = '%s%s' % (basic_lib.NAME_PREFIX, host_ip) - if target_ports and group_request: - target_iqn = '%s.target' % master_iqn - - diff_ports = [] - for port in security_ports: - for hostgroup in hgs: - if hostgroup['port'] == port: - break - else: - diff_ports.append(port) - - self.add_hostgroup_core(hgs, diff_ports, target_iqn, - target_alias, master_iqn) - if not hgs: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - def add_hostgroup(self): - properties = utils.brick_get_connector_properties() - if 'initiator' not in properties: - raise exception.HBSDError( - message=basic_lib.output_err(650, resource='HBA')) - LOG.debug("initiator: %s", properties['initiator']) - hostgroups = [] - security_ports = self._get_hostgroup_info_iscsi( - hostgroups, properties['initiator']) - self.add_hostgroup_master(hostgroups, properties['initiator'], - properties['ip'], security_ports) - - def _get_properties(self, volume, hostgroups): - conf = self.configuration - properties = {} - self._discovery_iscsi_target(hostgroups) - hostgroup = hostgroups[0] - - properties['target_discovered'] = True - properties['target_portal'] = "%s:%s" % (hostgroup['ip_addr'], - hostgroup['ip_port']) - properties['target_iqn'] = hostgroup['target_iqn'] - properties['target_lun'] = hostgroup['lun'] - - if conf.hitachi_auth_method: - properties['auth_method'] = 'CHAP' - properties['auth_username'] = conf.hitachi_auth_user - properties['auth_password'] = conf.hitachi_auth_password - - return properties - - def do_setup(self, context): - self.context = context - self.common = common.HBSDCommon(self.configuration, self, - context, self.db) - msg = _("The HBSD iSCSI driver is deprecated and " - "will be removed in P release") - versionutils.report_deprecated_feature(LOG, msg) - - self.check_param() - - self.common.create_lock_file() - - self.common.command.connect_storage() - - lock = basic_lib.get_process_lock(self.common.service_lock_file) - with lock: - self.add_hostgroup() - - self.output_param_to_log() - self.do_setup_status.set() - - def check_for_setup_error(self): - pass - - def extend_volume(self, volume, new_size): - self.do_setup_status.wait() - self.common.extend_volume(volume, new_size) - - def get_volume_stats(self, refresh=False): - if refresh: - if self.do_setup_status.isSet(): - self.common.output_backend_available_once() - _stats = self.common.update_volume_stats("iSCSI") - if _stats: - self._stats = _stats - return self._stats - - def create_volume(self, volume): - self.do_setup_status.wait() - metadata = self.common.create_volume(volume) - return metadata - - def delete_volume(self, volume): - self.do_setup_status.wait() - self.common.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_snapshot(snapshot) - return metadata - - def delete_snapshot(self, snapshot): - self.do_setup_status.wait() - self.common.delete_snapshot(snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.do_setup_status.wait() - metadata = self.common.create_cloned_volume(volume, src_vref) - return metadata - - def create_volume_from_snapshot(self, volume, snapshot): - self.do_setup_status.wait() - metadata = self.common.create_volume_from_snapshot(volume, snapshot) - return metadata - - def _initialize_connection(self, ldev, connector, src_hgs=None): - LOG.debug("Call _initialize_connection " - "(config_group: %(group)s ldev: %(ldev)d)", - {'group': self.configuration.config_group, 'ldev': ldev}) - if src_hgs: - hostgroups = src_hgs[:] - else: - hostgroups = [] - security_ports = self._get_hostgroup_info_iscsi( - hostgroups, connector['initiator']) - self.add_hostgroup_master(hostgroups, connector['initiator'], - connector['ip'], security_ports) - - self._add_target(hostgroups, ldev) - - return hostgroups - - def initialize_connection(self, volume, connector): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - raise exception.HBSDError( - message=basic_lib.output_err(619, volume_id=volume['id'])) - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - hostgroups = self._initialize_connection(ldev, connector) - protocol = 'iscsi' - properties = self._get_properties(volume, hostgroups) - LOG.debug('Initialize volume_info: %s', - self.common.volume_info) - - LOG.debug('HFCDrv: properties=%s', properties) - return { - 'driver_volume_type': protocol, - 'data': properties - } - - def _terminate_connection(self, ldev, connector, src_hgs): - LOG.debug("Call _terminate_connection(config_group: %s)", - self.configuration.config_group) - hostgroups = src_hgs[:] - self._delete_lun_iscsi(hostgroups, ldev) - - LOG.debug("*** _terminate_ ***") - - def terminate_connection(self, volume, connector, **kwargs): - self.do_setup_status.wait() - ldev = self.common.get_ldev(volume) - if ldev is None: - LOG.warning(basic_lib.set_msg(302, volume_id=volume['id'])) - return - - if 'initiator' not in connector: - raise exception.HBSDError( - message=basic_lib.output_err(650, resource='HBA')) - - hostgroups = [] - self._get_hostgroup_info_iscsi(hostgroups, - connector['initiator']) - if not hostgroups: - raise exception.HBSDError(message=basic_lib.output_err(649)) - - self.common.add_volinfo(ldev, volume['id']) - with self.common.volume_info[ldev]['lock'],\ - self.common.volume_info[ldev]['in_use']: - self._terminate_connection(ldev, connector, hostgroups) - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def pair_initialize_connection(self, unused_ldev): - pass - - def pair_terminate_connection(self, unused_ldev): - pass - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - self.do_setup_status.wait() - if volume['volume_attachment']: - desc = 'volume %s' % volume['id'] - raise exception.HBSDError( - message=basic_lib.output_err(660, desc=desc)) - super(HBSDISCSIDriver, self).copy_volume_to_image(context, volume, - image_service, - image_meta) - - def manage_existing(self, volume, existing_ref): - return self.common.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - self.do_setup_status.wait() - return self.common.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - self.do_setup_status.wait() - self.common.unmanage(volume) diff --git a/cinder/volume/drivers/hitachi/hbsd_snm2.py b/cinder/volume/drivers/hitachi/hbsd_snm2.py deleted file mode 100644 index 14a990579..000000000 --- a/cinder/volume/drivers/hitachi/hbsd_snm2.py +++ /dev/null @@ -1,1154 +0,0 @@ -# Copyright (C) 2014, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import shlex -import threading -import time - -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils -from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib - -LOG = logging.getLogger(__name__) - -SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm ' - 'LD_LIBRARY_PATH=/usr/stonavm/lib ' - 'STONAVM_RSP_PASS=on STONAVM_ACT=on') - -MAX_HOSTGROUPS = 127 -MAX_HOSTGROUPS_ISCSI = 254 -MAX_HLUN = 2047 -EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_' -EXEC_TIMEOUT = 10 -EXEC_INTERVAL = 1 - -CHAP_TIMEOUT = 5 -PAIRED = 12 -DUMMY_LU = -1 - - -class HBSDSNM2(basic_lib.HBSDBasicLib): - - def __init__(self, conf): - super(HBSDSNM2, self).__init__(conf=conf) - - self.unit_name = conf.hitachi_unit_name - self.hsnm_lock = threading.Lock() - self.hsnm_lock_file = ('%s%s' - % (EXEC_LOCK_PATH_BASE, self.unit_name)) - copy_speed = conf.hitachi_copy_speed - if copy_speed <= 2: - self.pace = 'slow' - elif copy_speed == 3: - self.pace = 'normal' - else: - self.pace = 'prior' - - def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start): - lock = basic_lib.get_process_lock(self.hsnm_lock_file) - with self.hsnm_lock, lock: - ret, stdout, stderr = self.exec_command('env', args=args, - printflag=printflag) - - if not ret or noretry: - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if time.time() - start >= timeout: - LOG.error("snm2 command timeout.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - if (re.search('DMEC002047', stderr) - or re.search('DMEC002048', stderr) - or re.search('DMED09000A', stderr) - or re.search('DMED090026', stderr) - or re.search('DMED0E002B', stderr) - or re.search('DMER03006A', stderr) - or re.search('DMER030080', stderr) - or re.search('DMER0300B8', stderr) - or re.search('DMER0800CF', stderr) - or re.search('DMER0800D[0-6D]', stderr) - or re.search('DMES052602', stderr)): - LOG.error("Unexpected error occurs in snm2.") - raise loopingcall.LoopingCallDone((ret, stdout, stderr)) - - def exec_hsnm(self, command, args, printflag=True, noretry=False, - timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL): - args = '%s %s %s' % (SNM2_ENV, command, args) - - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_exec_hsnm, args, printflag, - noretry, timeout, time.time()) - - return loop.start(interval=interval).wait() - - def _execute_with_exception(self, cmd, args, **kwargs): - ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs) - if ret: - cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args} - msg = basic_lib.output_err( - 600, cmd=cmds, ret=ret, out=stdout, err=stderr) - raise exception.HBSDError(data=msg) - - return ret, stdout, stderr - - def _execute_and_return_stdout(self, cmd, args, **kwargs): - result = self._execute_with_exception(cmd, args, **kwargs) - - return result[1] - - def get_comm_version(self): - ret, stdout, stderr = self.exec_hsnm('auman', '-help') - m = re.search(r'Version (\d+).(\d+)', stdout) - if not m: - msg = basic_lib.output_err( - 600, cmd='auman', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return '%s.%s' % (m.group(1), m.group(2)) - - def add_used_hlun(self, command, port, gid, used_list, ldev): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm(command, - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - if line[0] == port and int(line[1][0:3]) == gid: - if int(line[2]) not in used_list: - used_list.append(int(line[2])) - if int(line[3]) == ldev: - hlu = int(line[2]) - LOG.warning('ldev(%(ldev)d) is already mapped ' - '(hlun: %(hlu)d)', - {'ldev': ldev, 'hlu': hlu}) - return hlu - return None - - def _get_lu(self, lu=None): - # When 'lu' is 0, it should be true. So, it cannot remove 'is None'. - if lu is None: - args = '-unit %s' % self.unit_name - else: - args = '-unit %s -lu %s' % (self.unit_name, lu) - - return self._execute_and_return_stdout('auluref', args) - - def get_unused_ldev(self, ldev_range): - start = ldev_range[0] - end = ldev_range[1] - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auluref', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - free_ldev = start - lines = stdout.splitlines() - found = False - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - ldev_num = int(line[0]) - if free_ldev > ldev_num: - continue - if free_ldev == ldev_num: - free_ldev += 1 - else: - found = True - break - if free_ldev > end: - break - else: - found = True - - if not found: - msg = basic_lib.output_err(648, resource='LDEV') - raise exception.HBSDError(message=msg) - - return free_ldev - - def get_hgname_gid(self, port, host_grp_name): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auhgdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - lines = stdout.splitlines() - is_target_port = False - for line in lines: - line = shlex.split(line) - if not line: - continue - if line[0] == 'Port' and line[1] == port: - is_target_port = True - continue - if is_target_port: - if line[0] == 'Port': - break - if not line[0].isdigit(): - continue - gid = int(line[0]) - if line[1] == host_grp_name: - return gid - return None - - def get_unused_gid(self, group_range, port): - start = group_range[0] - end = group_range[1] - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('auhgdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - is_target_port = False - free_gid = start - found = False - for line in lines: - line = shlex.split(line) - if not line: - continue - if line[0] == 'Port' and line[1] == port: - is_target_port = True - continue - if is_target_port: - if line[0] == 'Port': - found = True - break - if not line[0].isdigit(): - continue - - gid = int(line[0]) - if free_gid > gid: - continue - if free_gid == gid: - free_gid += 1 - else: - found = True - break - if free_gid > end or free_gid > MAX_HOSTGROUPS: - break - else: - found = True - - if not found: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - - return free_gid - - def comm_set_target_wwns(self, target_ports): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('aufibre1', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='aufibre1', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - target_wwns = {} - for line in lines[3:]: - if re.match('Transfer', line): - break - - line = shlex.split(line) - if len(line) < 4: - continue - - port = '%s%s' % (line[0], line[1]) - if target_ports: - if port in target_ports: - target_wwns[port] = line[3] - else: - target_wwns[port] = line[3] - - LOG.debug('target wwns: %s', target_wwns) - return target_wwns - - def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login): - for pt in wwns: - for line in buf[port]['assigned']: - hgname = shlex.split(line[38:])[1][4:] - if not re.match(basic_lib.NAME_PREFIX, hgname): - continue - if pt.search(line[38:54]): - wwn = line[38:54] - gid = int(shlex.split(line[38:])[1][0:3]) - is_detected = None - if login: - for line in buf[port]['detected']: - if pt.search(line[38:54]): - is_detected = True - break - else: - is_detected = False - hostgroups.append({'port': six.text_type(port), 'gid': gid, - 'initiator_wwn': wwn, - 'detected': is_detected}) - - def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auhgwwn', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - security_ports = [] - patterns = [] - for wwn in wwns: - pt = re.compile(wwn, re.IGNORECASE) - patterns.append(pt) - - lines = stdout.splitlines() - buf = {} - _buffer = [] - port = None - security = None - for line in lines: - if re.match('Port', line): - port = shlex.split(line)[1] - if target_ports and port not in target_ports: - port = None - else: - security = True if shlex.split(line)[5] == 'ON' else False - buf[port] = {'detected': [], 'assigned': [], - 'assignable': []} - if security: - security_ports.append(port) - continue - if port and security: - if re.search('Detected WWN', line): - _buffer = buf[port]['detected'] - continue - elif re.search('Assigned WWN', line): - _buffer = buf[port]['assigned'] - continue - elif re.search('Assignable WWN', line): - _buffer = buf[port]['assignable'] - continue - _buffer.append(line) - - hostgroups = [] - for port in buf.keys(): - self.get_hostgroup_from_wwns( - hostgroups, port, patterns, buf, login) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def comm_delete_lun_core(self, command, hostgroups, lun): - unit = self.unit_name - - no_lun_cnt = 0 - deleted_hostgroups = [] - for hostgroup in hostgroups: - LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup) - port = hostgroup['port'] - gid = hostgroup['gid'] - ctl_no = port[0] - port_no = port[1] - - is_deleted = False - for deleted in deleted_hostgroups: - if port == deleted['port'] and gid == deleted['gid']: - is_deleted = True - if is_deleted: - continue - ret, stdout, stderr = self.exec_hsnm(command, - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[2:]: - line = shlex.split(line) - if not line: - continue - if (line[0] == port and int(line[1][0:3]) == gid - and int(line[3]) == lun): - hlu = int(line[2]) - break - else: - no_lun_cnt += 1 - if no_lun_cnt == len(hostgroups): - raise exception.HBSDNotFound - else: - continue - - opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no, - gid, hlu, lun) - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - deleted_hostgroups.append({'port': port, 'gid': gid}) - LOG.debug('comm_delete_lun is over (%d)', lun) - - def comm_delete_lun(self, hostgroups, ldev): - self.comm_delete_lun_core('auhgmap', hostgroups, ldev) - - def comm_delete_lun_iscsi(self, hostgroups, ldev): - self.comm_delete_lun_core('autargetmap', hostgroups, ldev) - - def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol): - unit = self.unit_name - - if is_vvol: - command = 'aureplicationvvol' - opt = ('-unit %s -add -lu %d -size %dg' - % (unit, ldev, capacity)) - else: - command = 'auluadd' - opt = ('-unit %s -lu %d -dppoolno %d -size %dg' - % (unit, ldev, pool_id, capacity)) - - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret: - if (re.search('DMEC002047', stderr) - or re.search('DMES052602', stderr) - or re.search('DMED09000A', stderr)): - raise exception.HBSDNotFound - else: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hostgrp(self, port, gid, host_grp_name): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - - opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no, - port_no, gid, - host_grp_name) - ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) - if ret: - raise exception.HBSDNotFound - - def comm_del_hostgrp(self, port, gid, host_grp_name): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no, - host_grp_name) - ret, stdout, stderr = self.exec_hsnm('auhgdef', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_hbawwn(self, port, gid, wwn): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no, - port_no, wwn, gid) - ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) - if ret: - opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no, - port_no, wwn, - gid) - ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_lun(self, command, hostgroups, ldev, is_once=False): - unit = self.unit_name - tmp_hostgroups = hostgroups[:] - used_list = [] - is_ok = False - hlu = None - old_hlu = None - for hostgroup in hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - hlu = self.add_used_hlun(command, port, gid, used_list, ldev) - # When 'hlu' or 'old_hlu' is 0, it should be true. - # So, it cannot remove 'is not None'. - if hlu is not None: - if old_hlu is not None and old_hlu != hlu: - msg = basic_lib.output_err(648, resource='LUN (HLUN)') - raise exception.HBSDError(message=msg) - is_ok = True - hostgroup['lun'] = hlu - tmp_hostgroups.remove(hostgroup) - old_hlu = hlu - else: - hlu = old_hlu - - if not used_list: - hlu = 0 - elif hlu is None: - for i in range(MAX_HLUN + 1): - if i not in used_list: - hlu = i - break - else: - raise exception.HBSDNotFound - - ret = 0 - stdout = None - stderr = None - invalid_hgs_str = None - for hostgroup in tmp_hostgroups: - port = hostgroup['port'] - gid = hostgroup['gid'] - ctl_no = port[0] - port_no = port[1] - if not hostgroup['detected']: - if invalid_hgs_str: - invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str, - port, gid) - else: - invalid_hgs_str = '%s:%d' % (port, gid) - continue - opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no, - gid, hlu, ldev) - ret, stdout, stderr = self.exec_hsnm(command, opt) - if ret == 0: - is_ok = True - hostgroup['lun'] = hlu - if is_once: - break - else: - LOG.warning(basic_lib.set_msg( - 314, ldev=ldev, lun=hlu, port=port, id=gid)) - - if not is_ok: - if stderr: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - else: - msg = basic_lib.output_err(659, gid=invalid_hgs_str) - raise exception.HBSDError(message=msg) - - def comm_delete_ldev(self, ldev, is_vvol): - unit = self.unit_name - - if is_vvol: - command = 'aureplicationvvol' - opt = '-unit %s -rm -lu %d' % (unit, ldev) - else: - command = 'auludel' - opt = '-unit %s -lu %d -f' % (unit, ldev) - - ret, stdout, stderr = self.exec_hsnm(command, opt, - timeout=30, interval=3) - if ret: - if (re.search('DMEC002048', stderr) - or re.search('DMED090026', stderr)): - raise exception.HBSDNotFound - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - return ret - - def comm_extend_ldev(self, ldev, old_size, new_size): - unit = self.unit_name - command = 'auluchgsize' - options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size) - - ret, stdout, stderr = self.exec_hsnm(command, options) - if ret: - msg = basic_lib.output_err( - 600, cmd=command, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def delete_chap_user(self, port): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - auth_username = self.conf.hitachi_auth_user - - opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no, - auth_username) - return self.exec_hsnm('auchapuser', opt) - - def _wait_for_add_chap_user(self, cmd, auth_username, - auth_password, start): - # Don't move 'import pexpect' to the beginning of the file so that - # a tempest can work. - import pexpect - - lock = basic_lib.get_process_lock(self.hsnm_lock_file) - with self.hsnm_lock, lock: - try: - child = pexpect.spawn(cmd) - child.expect('Secret: ', timeout=CHAP_TIMEOUT) - child.sendline(auth_password) - child.expect('Re-enter Secret: ', - timeout=CHAP_TIMEOUT) - child.sendline(auth_password) - child.expect('The CHAP user information has ' - 'been added successfully.', - timeout=CHAP_TIMEOUT) - except Exception: - if time.time() - start >= EXEC_TIMEOUT: - msg = basic_lib.output_err(642, user=auth_username) - raise exception.HBSDError(message=msg) - else: - raise loopingcall.LoopingCallDone(True) - - def set_chap_authention(self, port, gid): - ctl_no = port[0] - port_no = port[1] - unit = self.unit_name - auth_username = self.conf.hitachi_auth_user - auth_password = self.conf.hitachi_auth_password - add_chap_user = self.conf.hitachi_add_chap_user - assign_flag = True - added_flag = False - opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no, - auth_username) - ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True) - - if ret: - if not add_chap_user: - msg = basic_lib.output_err(643, user=auth_username) - raise exception.HBSDError(message=msg) - - root_helper = utils.get_root_helper() - cmd = ('%s env %s auchapuser -unit %s -add %s %s ' - '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no, - port_no, gid, auth_username)) - - LOG.debug('Add CHAP user') - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_add_chap_user, cmd, - auth_username, auth_password, time.time()) - - added_flag = loop.start(interval=EXEC_INTERVAL).wait() - - else: - lines = stdout.splitlines()[4:] - for line in lines: - if int(shlex.split(line)[0][0:3]) == gid: - assign_flag = False - break - - if assign_flag: - opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no, - port_no, gid, - auth_username) - ret, stdout, stderr = self.exec_hsnm('auchapuser', opt) - if ret: - if added_flag: - _ret, _stdout, _stderr = self.delete_chap_user(port) - if _ret: - LOG.warning(basic_lib.set_msg( - 303, user=auth_username)) - - msg = basic_lib.output_err( - 600, cmd='auchapuser', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - return added_flag - - def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn): - auth_method = self.conf.hitachi_auth_method - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - if auth_method: - auth_arg = '-authmethod %s -mutual disable' % auth_method - else: - auth_arg = '-authmethod None' - - opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid) - opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn, - auth_arg) - ret, stdout, stderr = self.exec_hsnm('autargetdef', opt) - - if ret: - raise exception.HBSDNotFound - - def delete_iscsi_target(self, port, _target_no, target_alias): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no, - target_alias) - return self.exec_hsnm('autargetdef', opt) - - def comm_set_hostgrp_reportportal(self, port, target_alias): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no, - target_alias) - opt = '%s -ReportFullPortalList enable' % opt - ret, stdout, stderr = self.exec_hsnm('autargetopt', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetopt', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_add_initiator(self, port, gid, host_iqn): - unit = self.unit_name - ctl_no = port[0] - port_no = port[1] - opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no, - port_no, gid, - host_iqn) - ret, stdout, stderr = self.exec_hsnm('autargetini', opt) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetini', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetini', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - security_ports = [] - lines = stdout.splitlines() - hostgroups = [] - security = True - for line in lines: - if not shlex.split(line): - continue - if re.match('Port', line): - line = shlex.split(line) - port = line[1] - security = True if line[4] == 'ON' else False - continue - - if target_ports and port not in target_ports: - continue - - if security: - if (host_iqn in shlex.split(line[72:]) and - re.match(basic_lib.NAME_PREFIX, - shlex.split(line)[0][4:])): - gid = int(shlex.split(line)[0][0:3]) - hostgroups.append( - {'port': port, 'gid': gid, 'detected': True}) - LOG.debug('Find port=%(port)s gid=%(gid)d', - {'port': port, 'gid': gid}) - if port not in security_ports: - security_ports.append(port) - - for hostgroup in hostgroups: - hgs.append(hostgroup) - - return security_ports - - def comm_get_iscsi_ip(self, port): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('auiscsi', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='auiscsi', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - is_target_port = False - for line in lines: - line_array = shlex.split(line) - if not line_array: - continue - if line_array[0] == 'Port' and line_array[1] != 'Number': - if line_array[1] == port: - is_target_port = True - else: - is_target_port = False - continue - if is_target_port and re.search('IPv4 Address', line): - ip_addr = shlex.split(line)[3] - break - if is_target_port and re.search('Port Number', line): - ip_port = shlex.split(line)[3] - else: - msg = basic_lib.output_err(651) - raise exception.HBSDError(message=msg) - - return ip_addr, ip_port - - def comm_get_target_iqn(self, port, gid): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - is_target_host = False - tmp_port = None - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - gid_tmp = line[0][0:3] - if gid_tmp.isdigit() and int(gid_tmp) == gid: - is_target_host = True - continue - if is_target_host and line[0] == "iSCSI": - target_iqn = line[3] - break - else: - msg = basic_lib.output_err(650, resource='IQN') - raise exception.HBSDError(message=msg) - - return target_iqn - - def get_unused_gid_iscsi(self, group_range, port): - start = group_range[0] - end = min(group_range[1], MAX_HOSTGROUPS_ISCSI) - unit = self.unit_name - - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - used_list = [] - tmp_port = None - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - if line[0][0:3].isdigit(): - gid = int(line[0][0:3]) - if start <= gid <= end: - used_list.append(gid) - if not used_list: - return start - - for gid in range(start, end + 1): - if gid not in used_list: - break - else: - msg = basic_lib.output_err(648, resource='GID') - raise exception.HBSDError(message=msg) - - return gid - - def get_gid_from_targetiqn(self, target_iqn, target_alias, port): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('autargetdef', - '-unit %s -refer' % unit) - if ret: - msg = basic_lib.output_err( - 600, cmd='autargetdef', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - gid = None - tmp_port = None - found_alias_full = False - found_alias_part = False - lines = stdout.splitlines() - for line in lines: - line = shlex.split(line) - if not line: - continue - - if line[0] == "Port": - tmp_port = line[1] - continue - - if port != tmp_port: - continue - - if line[0][0:3].isdigit(): - tmp_gid = int(line[0][0:3]) - if re.match(basic_lib.NAME_PREFIX, line[0][4:]): - found_alias_part = True - if line[0][4:] == target_alias: - found_alias_full = True - continue - - if line[0] == "iSCSI": - if line[3] == target_iqn: - gid = tmp_gid - break - else: - found_alias_part = False - - if found_alias_full and gid is None: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - # When 'gid' is 0, it should be true. - # So, it cannot remove 'is not None'. - if not found_alias_part and gid is not None: - msg = basic_lib.output_err(641) - raise exception.HBSDError(message=msg) - - return gid - - def comm_get_dp_pool(self, pool_id): - unit = self.unit_name - ret, stdout, stderr = self.exec_hsnm('audppool', - '-unit %s -refer -g' % unit, - printflag=False) - if ret: - msg = basic_lib.output_err( - 600, cmd='audppool', ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - lines = stdout.splitlines() - for line in lines[2:]: - tc_cc = re.search(r'\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line) - pool_tmp = re.match(r'\s*\d+', line) - if (pool_tmp and tc_cc - and int(pool_tmp.group(0)) == pool_id): - total_gb = int(float(tc_cc.group(1))) - free_gb = total_gb - int(float(tc_cc.group(2))) - return total_gb, free_gb - - msg = basic_lib.output_err(640, pool_id=pool_id) - raise exception.HBSDError(message=msg) - - def is_detected(self, port, wwn): - hgs = [] - self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True) - return hgs[0]['detected'] - - def pairoperate(self, opr, pvol, svol, is_vvol, args=None): - unit = self.unit_name - method = '-ss' if is_vvol else '-si' - opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method, - pvol, svol) - if args: - opt = '%s %s' % (opt, args) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt) - if ret: - opt = '%s %s' % ('aureplicationlocal', opt) - msg = basic_lib.output_err( - 600, cmd=opt, ret=ret, out=stdout, err=stderr) - raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) - - def comm_create_pair(self, pvol, svol, is_vvol): - if not is_vvol: - args = '-compsplit -pace %s' % self.pace - method = basic_lib.FULL - else: - pool = self.conf.hitachi_thin_pool_id - args = ('-localrepdppoolno %d -localmngdppoolno %d ' - '-compsplit -pace %s' % (pool, pool, self.pace)) - method = basic_lib.THIN - try: - self.pairoperate('create', pvol, svol, is_vvol, args=args) - except exception.HBSDCmdError as ex: - if (re.search('DMER0300B8', ex.stderr) - or re.search('DMER0800CF', ex.stderr) - or re.search('DMER0800D[0-6D]', ex.stderr) - or re.search('DMER03006A', ex.stderr) - or re.search('DMER030080', ex.stderr)): - msg = basic_lib.output_err(615, copy_method=method, pvol=pvol) - raise exception.HBSDBusy(message=msg) - else: - raise - - def _comm_pairevtwait(self, pvol, svol, is_vvol): - unit = self.unit_name - if not is_vvol: - pairname = 'SI_LU%04d_LU%04d' % (pvol, svol) - method = '-si' - else: - pairname = 'SS_LU%04d_LU%04d' % (pvol, svol) - method = '-ss' - opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' % - (unit, method, pairname)) - ret, stdout, stderr = self.exec_hsnm('aureplicationmon', - opt, noretry=True) - - return ret - - def _wait_for_pair_status(self, pvol, svol, is_vvol, - status, timeout, start): - if self._comm_pairevtwait(pvol, svol, is_vvol) in status: - raise loopingcall.LoopingCallDone() - - if time.time() - start >= timeout: - msg = basic_lib.output_err( - 637, method='_wait_for_pair_status', timeout=timeout) - raise exception.HBSDError(message=msg) - - def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval): - loop = loopingcall.FixedIntervalLoopingCall( - self._wait_for_pair_status, pvol, svol, is_vvol, - status, timeout, time.time()) - - loop.start(interval=interval).wait() - - def delete_pair(self, pvol, svol, is_vvol): - self.pairoperate('simplex', pvol, svol, is_vvol) - - def trans_status_hsnm2raid(self, str): - status = None - obj = re.search(r'Split\((.*)%\)', str) - if obj: - status = basic_lib.PSUS - obj = re.search(r'Paired\((.*)%\)', str) - if obj: - status = basic_lib.PAIR - return status - - def get_paired_info(self, ldev, only_flag=False): - opt_base = '-unit %s -refer' % self.unit_name - if only_flag: - opt_base = '%s -ss' % opt_base - - opt = '%s -pvol %d' % (opt_base, ldev) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', - opt, noretry=True) - if ret == 0: - lines = stdout.splitlines() - pair_info = {'pvol': ldev, 'svol': []} - for line in lines[1:]: - status = self.trans_status_hsnm2raid(line) - if re.search('SnapShot', line[100:]): - is_vvol = True - else: - is_vvol = False - line = shlex.split(line) - if not line: - break - svol = int(line[2]) - pair_info['svol'].append({'lun': svol, - 'status': status, - 'is_vvol': is_vvol}) - return pair_info - - opt = '%s -svol %d' % (opt_base, ldev) - ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', - opt, noretry=True) - if ret == 1: - return {'pvol': None, 'svol': []} - lines = stdout.splitlines() - status = self.trans_status_hsnm2raid(lines[1]) - if re.search('SnapShot', lines[1][100:]): - is_vvol = True - else: - is_vvol = False - line = shlex.split(lines[1]) - pvol = int(line[1]) - - return {'pvol': pvol, 'svol': [{'lun': ldev, - 'status': status, - 'is_vvol': is_vvol}]} - - def create_lock_file(self): - basic_lib.create_empty_file(self.hsnm_lock_file) - - def get_hostgroup_luns(self, port, gid): - list = [] - self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU) - - return list - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - param = 'unit_name' - if param not in existing_ref: - msg = basic_lib.output_err(700, param=param) - raise exception.HBSDError(data=msg) - storage = existing_ref.get(param) - if storage != self.conf.hitachi_unit_name: - msg = basic_lib.output_err(648, resource=param) - raise exception.HBSDError(data=msg) - - try: - stdout = self._get_lu(ldev) - except exception.HBSDError: - with excutils.save_and_reraise_exception(): - basic_lib.output_err(648, resource='LDEV') - - lines = stdout.splitlines() - line = lines[2] - - splits = shlex.split(line) - - vol_type = splits[len(splits) - 1] - if basic_lib.NORMAL_VOLUME_TYPE != vol_type: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - dppool = splits[5] - if 'N/A' == dppool: - msg = basic_lib.output_err(702, ldev=ldev) - raise exception.HBSDError(data=msg) - - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - # So, units.Gi is divided by 512. - size = int(splits[1]) - if size % (units.Gi / 512): - msg = basic_lib.output_err(703, ldev=ldev) - raise exception.HBSDError(data=msg) - - num_port = int(splits[len(splits) - 2]) - if num_port: - msg = basic_lib.output_err(704, ldev=ldev) - raise exception.HBSDError(data=msg) - - return size / (units.Gi / 512) diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py deleted file mode 100644 index 4497d8247..000000000 --- a/cinder/volume/drivers/hitachi/hnas_backend.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Hitachi Unified Storage (HUS-HNAS) platform. Backend operations. -""" - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils - -LOG = logging.getLogger("cinder.volume.driver") -HNAS_SSC_RETRIES = 5 - - -class HNASSSHBackend(object): - def __init__(self, backend_opts): - - self.mgmt_ip0 = backend_opts.get('mgmt_ip0') - self.hnas_cmd = backend_opts.get('ssc_cmd', 'ssc') - self.cluster_admin_ip0 = backend_opts.get('cluster_admin_ip0') - self.ssh_port = backend_opts.get('ssh_port', '22') - self.ssh_username = backend_opts.get('username') - self.ssh_pwd = backend_opts.get('password') - self.ssh_private_key = backend_opts.get('ssh_private_key') - self.storage_version = None - self.sshpool = None - self.fslist = {} - self.tgt_list = {} - - @utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES, - wait_random=True) - def _run_cmd(self, *args, **kwargs): - """Runs a command on SMU using SSH. - - :returns: stdout and stderr of the command - """ - if self.cluster_admin_ip0 is None: - # Connect to SMU through SSH and run ssc locally - args = (self.hnas_cmd, 'localhost') + args - else: - args = (self.hnas_cmd, '--smuauth', self.cluster_admin_ip0) + args - - utils.check_ssh_injection(args) - command = ' '.join(args) - command = command.replace('"', '\\"') - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(ip=self.mgmt_ip0, - port=int(self.ssh_port), - conn_timeout=None, - login=self.ssh_username, - password=self.ssh_pwd, - privatekey=self.ssh_private_key) - - with self.sshpool.item() as ssh: - try: - out, err = putils.ssh_execute(ssh, command, - check_exit_code=True) - LOG.debug("command %(cmd)s result: out = " - "%(out)s - err = %(err)s", - {'cmd': self.hnas_cmd, 'out': out, 'err': err}) - return out, err - except putils.ProcessExecutionError as e: - if 'Failed to establish SSC connection' in e.stderr: - msg = _("Failed to establish SSC connection!") - LOG.exception(msg) - raise exception.HNASConnError(msg) - elif 'Connection reset' in e.stderr: - msg = _("HNAS connection reset!") - LOG.exception(msg) - raise exception.HNASConnError(msg) - else: - raise - - def get_version(self): - """Gets version information from the storage unit. - - :returns: dictionary with HNAS information - - .. code:: python - - storage_version={ - 'mac': HNAS MAC ID, - 'model': HNAS model, - 'version': the software version, - 'hardware': the hardware version, - 'serial': HNAS serial number - } - - """ - if not self.storage_version: - version_info = {} - out, err = self._run_cmd("cluster-getmac") - mac = out.split(':')[1].strip() - version_info['mac'] = mac - - out, err = self._run_cmd("ver") - split_out = out.split('\n') - - model = split_out[1].split(':')[1].strip() - version = split_out[3].split()[1] - hardware = split_out[5].split(':')[1].strip() - serial = split_out[12].split()[2] - - version_info['model'] = model - version_info['version'] = version - version_info['hardware'] = hardware - version_info['serial'] = serial - - self.storage_version = version_info - - LOG.debug("version_info: %(info)s", {'info': self.storage_version}) - return self.storage_version - - def get_evs_info(self): - """Gets the IP addresses of all EVSs in HNAS. - - :returns: dictionary with EVS information - - .. code:: python - - evs_info={ - : {evs_number: number identifying the EVS1 on HNAS}, - : {evs_number: number identifying the EVS2 on HNAS}, - ... - } - - """ - evs_info = {} - out, err = self._run_cmd("evsipaddr", "-l") - - out = out.split('\n') - for line in out: - if 'evs' in line and 'admin' not in line: - ip = line.split()[3].strip() - evs_info[ip] = {} - evs_info[ip]['evs_number'] = line.split()[1].strip() - - return evs_info - - def get_fs_info(self, fs_label): - """Gets the information of a given FS. - - :param fs_label: Label of the filesystem - :returns: dictionary with FS information - - .. code:: python - - fs_info={ - 'id': a Logical Unit ID, - 'label': a Logical Unit name, - 'evs_id': the ID of the EVS in which the filesystem is created - (not present if there is a single EVS), - 'total_size': the total size of the FS (in GB), - 'used_size': the size that is already used (in GB), - 'available_size': the free space (in GB) - } - - """ - def _convert_size(param): - size = float(param) * units.Mi - return six.text_type(size) - - fs_info = {} - single_evs = True - id, lbl, evs, t_sz, u_sz, a_sz = 0, 1, 2, 3, 5, 12 - t_sz_unit, u_sz_unit, a_sz_unit = 4, 6, 13 - - out, err = self._run_cmd("df", "-af", fs_label) - - invalid_outs = ['Not mounted', 'Not determined', 'not found'] - - for problem in invalid_outs: - if problem in out: - return {} - - if 'EVS' in out: - single_evs = False - - fs_data = out.split('\n')[3].split() - - # Getting only the desired values from the output. If there is a single - # EVS, its ID is not shown in the output and we have to decrease the - # indexes to get the right values. - fs_info['id'] = fs_data[id] - fs_info['label'] = fs_data[lbl] - - if not single_evs: - fs_info['evs_id'] = fs_data[evs] - - fs_info['total_size'] = ( - (fs_data[t_sz]) if not single_evs else fs_data[t_sz - 1]) - fs_info['used_size'] = ( - fs_data[u_sz] if not single_evs else fs_data[u_sz - 1]) - fs_info['available_size'] = ( - fs_data[a_sz] if not single_evs else fs_data[a_sz - 1]) - - # Converting the sizes if necessary. - if not single_evs: - if fs_data[t_sz_unit] == 'TB': - fs_info['total_size'] = _convert_size(fs_info['total_size']) - if fs_data[u_sz_unit] == 'TB': - fs_info['used_size'] = _convert_size(fs_info['used_size']) - if fs_data[a_sz_unit] == 'TB': - fs_info['available_size'] = _convert_size( - fs_info['available_size']) - else: - if fs_data[t_sz_unit - 1] == 'TB': - fs_info['total_size'] = _convert_size(fs_info['total_size']) - if fs_data[u_sz_unit - 1] == 'TB': - fs_info['used_size'] = _convert_size(fs_info['used_size']) - if fs_data[a_sz_unit - 1] == 'TB': - fs_info['available_size'] = _convert_size( - fs_info['available_size']) - - fs_info['provisioned_capacity'] = 0 - - LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.", - {'fs': fs_label, 'info': fs_info}) - - return fs_info - - def get_evs(self, fs_label): - """Gets the EVS ID for the named filesystem. - - :param fs_label: The filesystem label related to the EVS required - :returns: EVS ID of the filesystem - """ - if not self.fslist: - self._get_fs_list() - - # When the FS is found in the list of known FS, returns the EVS ID - for key in self.fslist: - if fs_label == self.fslist[key]['label']: - LOG.debug("EVS ID for fs %(fs)s: %(id)s.", - {'fs': fs_label, 'id': self.fslist[key]['evsid']}) - return self.fslist[key]['evsid'] - LOG.debug("Can't find EVS ID for fs %(fs)s.", {'fs': fs_label}) - - def file_clone(self, fs_label, src, name): - """Clones NFS files to a new one named 'name'. - - Clone primitive used to support all NFS snapshot/cloning functions. - - :param fs_label: file system label of the new file - :param src: source file - :param name: target path of the new created file - """ - fs_list = self._get_fs_list() - fs = fs_list.get(fs_label) - if not fs: - LOG.error("Can't find file %(file)s in FS %(label)s", - {'file': src, 'label': fs_label}) - msg = _('FS label: %(fs_label)s') % {'fs_label': fs_label} - raise exception.InvalidParameterValue(err=msg) - - self._run_cmd("console-context", "--evs", fs['evsid'], - 'file-clone-create', '-f', fs_label, src, name) - LOG.debug('file_clone: fs:%(fs_label)s %(src)s/src: -> %(name)s/dst', - {'fs_label': fs_label, 'src': src, 'name': name}) - - def _get_fs_list(self): - """Gets a list of file systems configured on the backend. - - :returns: a list with the Filesystems configured on HNAS - """ - if not self.fslist: - fslist_out, err = self._run_cmd('evsfs', 'list') - list_raw = fslist_out.split('\n')[3:-2] - - for fs_raw in list_raw: - fs = {} - - fs_raw = fs_raw.split() - fs['id'] = fs_raw[0] - fs['label'] = fs_raw[1] - fs['permid'] = fs_raw[2] - fs['evsid'] = fs_raw[3] - fs['evslabel'] = fs_raw[4] - self.fslist[fs['label']] = fs - - return self.fslist - - def _get_evs_list(self): - """Gets a list of EVS configured on the backend. - - :returns: a list of the EVS configured on HNAS - """ - evslist_out, err = self._run_cmd('evs', 'list') - - evslist = {} - idx = 0 - for evs_raw in evslist_out.split('\n'): - idx += 1 - if 'Service' in evs_raw and 'Online' in evs_raw: - evs = {} - evs_line = evs_raw.split() - evs['node'] = evs_line[0] - evs['id'] = evs_line[1] - evs['label'] = evs_line[3] - evs['ips'] = [] - evs['ips'].append(evs_line[6]) - # Each EVS can have a list of IPs that are displayed in the - # next lines of the evslist_out. We need to check if the next - # lines is a new EVS entry or and IP of this current EVS. - for evs_ip_raw in evslist_out.split('\n')[idx:]: - if 'Service' in evs_ip_raw or not evs_ip_raw.split(): - break - ip = evs_ip_raw.split()[0] - evs['ips'].append(ip) - - evslist[evs['label']] = evs - - return evslist - - def get_export_list(self): - """Gets information on each NFS export. - - :returns: a list of the exports configured on HNAS - """ - nfs_export_out, _ = self._run_cmd('for-each-evs', '-q', 'nfs-export', - 'list') - fs_list = self._get_fs_list() - evs_list = self._get_evs_list() - - export_list = [] - - for export_raw_data in nfs_export_out.split("Export name:")[1:]: - export_info = {} - export_data = export_raw_data.split('\n') - - export_info['name'] = export_data[0].strip() - export_info['path'] = export_data[1].split(':')[1].strip() - export_info['fs'] = export_data[2].split(':')[1].strip() - - if "*** not available ***" in export_raw_data: - export_info['size'] = -1 - export_info['free'] = -1 - else: - evslbl = fs_list[export_info['fs']]['evslabel'] - export_info['evs'] = evs_list[evslbl]['ips'] - - size = export_data[3].split(':')[1].strip().split()[0] - multiplier = export_data[3].split(':')[1].strip().split()[1] - if multiplier == 'TB': - export_info['size'] = float(size) * units.Ki - else: - export_info['size'] = float(size) - - free = export_data[4].split(':')[1].strip().split()[0] - fmultiplier = export_data[4].split(':')[1].strip().split()[1] - if fmultiplier == 'TB': - export_info['free'] = float(free) * units.Ki - else: - export_info['free'] = float(free) - - export_list.append(export_info) - - LOG.debug("get_export_list: %(exp_list)s", {'exp_list': export_list}) - return export_list - - def _get_file_handler(self, volume_path, _evs_id, fs_label, - raise_except): - - try: - out, err = self._run_cmd("console-context", "--evs", _evs_id, - 'file-clone-stat', '-f', fs_label, - volume_path) - except putils.ProcessExecutionError as e: - if 'File is not a clone' in e.stderr and raise_except: - msg = (_("%s is not a clone!") % volume_path) - raise exception.ManageExistingInvalidReference( - existing_ref=volume_path, reason=msg) - else: - return - - lines = out.split('\n') - filehandle_list = [] - - for line in lines: - if "SnapshotFile:" in line and "FileHandle" in line: - item = line.split(':') - handler = item[1][:-1].replace(' FileHandle[', "") - filehandle_list.append(handler) - LOG.debug("Volume handler found: %(fh)s. Adding to list...", - {'fh': handler}) - - return filehandle_list - - def get_cloned_file_relatives(self, file_path, fs_label, - raise_except=False): - """Gets the files related to a clone - - :param file_path: path of the cloned file - :param fs_label: filesystem of the cloned file - :param raise_except: If True exception will be raised for files that - aren't clones. If False, only an error message - is logged. - :returns: list with names of the related files - """ - relatives = [] - - _evs_id = self.get_evs(fs_label) - - file_handler_list = self._get_file_handler(file_path, _evs_id, - fs_label, raise_except) - - if file_handler_list: - for file_handler in file_handler_list: - out, err = self._run_cmd('console-context', '--evs', _evs_id, - 'file-clone-stat-snapshot-file', '-f', - fs_label, file_handler) - - results = out.split('\n') - - for value in results: - if 'Clone:' in value and file_path not in value: - relative = value.split(':')[1] - relatives.append(relative) - else: - LOG.debug("File %(path)s is not a clone.", { - 'path': file_path}) - - return relatives - - def check_snapshot_parent(self, volume_path, snap_name, fs_label): - """Check if a volume is the snapshot source - - :param volume_path: path of the volume - :param snap_name: name of the snapshot - :param fs_label: filesystem label - :return: True if the volume is the snapshot's source or False otherwise - """ - lines = self.get_cloned_file_relatives(volume_path, fs_label, True) - - for line in lines: - if snap_name in line: - LOG.debug("Snapshot %(snap)s found in children list from " - "%(vol)s!", {'snap': snap_name, - 'vol': volume_path}) - return True - - LOG.debug("Snapshot %(snap)s was not found in children list from " - "%(vol)s, probably it is not the parent!", - {'snap': snap_name, 'vol': volume_path}) - return False - - def get_export_path(self, export, fs_label): - """Gets the path of an export on HNAS - - :param export: the export's name - :param fs_label: the filesystem name - :returns: string of the export's path - """ - evs_id = self.get_evs(fs_label) - out, err = self._run_cmd("console-context", "--evs", evs_id, - 'nfs-export', 'list', export) - - lines = out.split('\n') - - for line in lines: - if 'Export path:' in line: - return line.split('Export path:')[1].strip() diff --git a/cinder/volume/drivers/hitachi/hnas_nfs.py b/cinder/volume/drivers/hitachi/hnas_nfs.py deleted file mode 100644 index 45029d925..000000000 --- a/cinder/volume/drivers/hitachi/hnas_nfs.py +++ /dev/null @@ -1,1014 +0,0 @@ -# Copyright (c) 2014 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Volume driver for HNAS NFS storage. -""" - -import math -import os -import socket - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils as cutils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import hnas_backend -from cinder.volume.drivers.hitachi import hnas_utils -from cinder.volume.drivers import nfs -from cinder.volume import utils - - -HNAS_NFS_VERSION = '6.0.0' - -LOG = logging.getLogger(__name__) - -NFS_OPTS = [ - cfg.StrOpt('hds_hnas_nfs_config_file', - default='/opt/hds/hnas/cinder_nfs_conf.xml', - help='Legacy configuration file for HNAS NFS Cinder plugin. ' - 'This is not needed if you fill all configuration on ' - 'cinder.conf', - deprecated_for_removal=True) -] - -CONF = cfg.CONF -CONF.register_opts(NFS_OPTS, group=configuration.SHARED_CONF_GROUP) - -HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', 'ssh_port': '22'} - - -@interface.volumedriver -class HNASNFSDriver(nfs.NfsDriver): - """Base class for Hitachi NFS driver. - - Executes commands relating to Volumes. - - Version history: - - .. code-block:: none - - Version 1.0.0: Initial driver version - Version 2.2.0: Added support to SSH authentication - Version 3.0.0: Added pool aware scheduling - Version 4.0.0: Added manage/unmanage features - Version 4.1.0: Fixed XML parser checks on blank options - Version 5.0.0: Remove looping in driver initialization - Code cleaning up - New communication interface between the driver and HNAS - Removed the option to use local SSC (ssh_enabled=False) - Updated to use versioned objects - Changed the class name to HNASNFSDriver - Deprecated XML config file - Added support to manage/unmanage snapshots features - Fixed driver stats reporting - Version 6.0.0: Deprecated hnas_svcX_vol_type configuration - Added list-manageable volumes/snapshots support - Rename snapshots to link with its original volume - """ - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_HNAS_CI" - VERSION = HNAS_NFS_VERSION - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - msg = _("The Hitachi NAS driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - self._execute = None - self.context = None - self.configuration = kwargs.get('configuration', None) - - service_parameters = ['volume_type', 'hdp'] - optional_parameters = ['ssc_cmd', 'cluster_admin_ip0'] - - if self.configuration: - self.configuration.append_config_values( - hnas_utils.drivers_common_opts) - self.configuration.append_config_values(NFS_OPTS) - self.config = {} - - # Trying to get HNAS configuration from cinder.conf - self.config = hnas_utils.read_cinder_conf( - self.configuration) - - # If HNAS configuration are not set on cinder.conf, tries to use - # the deprecated XML configuration file - if not self.config: - self.config = hnas_utils.read_xml_config( - self.configuration.hds_hnas_nfs_config_file, - service_parameters, - optional_parameters) - - super(HNASNFSDriver, self).__init__(*args, **kwargs) - self.backend = hnas_backend.HNASSSHBackend(self.config) - - def _get_service(self, volume): - """Get service parameters. - - Get the available service parameters for a given volume using - its type. - - :param volume: dictionary volume reference - :returns: Tuple containing the service parameters (label, - export path and export file system) or error if no configuration is - found. - :raises ParameterNotFound: - """ - LOG.debug("_get_service: volume: %(vol)s", {'vol': volume}) - label = utils.extract_host(volume.host, level='pool') - - if label in self.config['services'].keys(): - svc = self.config['services'][label] - LOG.debug("_get_service: %(lbl)s->%(svc)s", - {'lbl': label, 'svc': svc['export']['fs']}) - service = (svc['hdp'], svc['export']['path'], svc['export']['fs']) - else: - LOG.info("Available services: %(svc)s", - {'svc': self.config['services'].keys()}) - LOG.error("No configuration found for service: %(lbl)s", - {'lbl': label}) - raise exception.ParameterNotFound(param=label) - - return service - - def _get_snapshot_name(self, snapshot): - snap_file_name = ("%(vol_name)s.%(snap_id)s" % - {'vol_name': snapshot.volume.name, - 'snap_id': snapshot.id}) - return snap_file_name - - @cutils.trace - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: dictionary volume reference - :param new_size: int size in GB to extend - :raises InvalidResults: - """ - nfs_mount = volume.provider_location - path = self._get_file_path(nfs_mount, volume.name) - - # Resize the image file on share to new size. - LOG.info("Checking file for resize.") - - if not self._is_file_size_equal(path, new_size): - LOG.info("Resizing file to %(sz)sG", {'sz': new_size}) - image_utils.resize_image(path, new_size) - - if self._is_file_size_equal(path, new_size): - LOG.info("LUN %(id)s extended to %(size)s GB.", - {'id': volume.id, 'size': new_size}) - else: - msg = _("Resizing image file failed.") - LOG.error(msg) - raise exception.InvalidResults(msg) - - def _is_file_size_equal(self, path, size): - """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path) - virt_size = data.virtual_size / units.Gi - - if virt_size == size: - return True - else: - return False - - @cutils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: volume to be created - :param snapshot: source snapshot - :returns: the provider_location of the volume created - """ - nfs_mount = snapshot.volume.provider_location - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(nfs_mount, snapshot_name): - LOG.info("Creating volume %(vol)s from legacy " - "snapshot %(snap)s.", - {'vol': volume.name, 'snap': snapshot.name}) - snapshot_name = snapshot.name - - self._clone_volume(snapshot.volume, volume.name, snapshot_name) - - return {'provider_location': nfs_mount} - - @cutils.trace - def create_snapshot(self, snapshot): - """Create a snapshot. - - :param snapshot: dictionary snapshot reference - :returns: the provider_location of the snapshot created - """ - snapshot_name = self._get_snapshot_name(snapshot) - self._clone_volume(snapshot.volume, snapshot_name) - - share = snapshot.volume.provider_location - LOG.debug('Share: %(shr)s', {'shr': share}) - - # returns the mount point (not path) - return {'provider_location': share} - - @cutils.trace - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: dictionary snapshot reference - """ - nfs_mount = snapshot.volume.provider_location - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(nfs_mount, snapshot_name): - # Snapshot with new name does not exist. The verification - # for a file with legacy name will be done. - snapshot_name = snapshot.name - - if self._file_not_present(nfs_mount, snapshot_name): - # The file does not exist. Nothing to do. - return - - self._execute('rm', self._get_file_path( - nfs_mount, snapshot_name), run_as_root=True) - - def _file_not_present(self, nfs_mount, volume_name): - """Check if file does not exist. - - :param nfs_mount: string path of the nfs share - :param volume_name: string volume name - :returns: boolean (true for file not present and false otherwise) - """ - try: - self._execute('ls', self._get_file_path(nfs_mount, volume_name)) - except processutils.ProcessExecutionError as e: - if "No such file or directory" in e.stderr: - # If the file isn't present - return True - else: - raise - - return False - - def _get_file_path(self, nfs_share, file_name): - """Get file path (local fs path) for given name on given nfs share. - - :param nfs_share string, example 172.18.194.100:/var/nfs - :param file_name string, - example volume-91ee65ec-c473-4391-8c09-162b00c68a8c - :returns: the local path according to the parameters - """ - return os.path.join(self._get_mount_point_for_share(nfs_share), - file_name) - - @cutils.trace - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: reference to the volume being created - :param src_vref: reference to the source volume - :returns: the provider_location of the cloned volume - """ - - # HNAS always creates cloned volumes in the same pool as the source - # volumes. So, it is not allowed to use different volume types for - # clone operations. - if volume.volume_type_id != src_vref.volume_type_id: - msg = _("Source and cloned volumes should have the same " - "volume type.") - LOG.error(msg) - raise exception.InvalidVolumeType(msg) - - vol_size = volume.size - src_vol_size = src_vref.size - - self._clone_volume(src_vref, volume.name, src_vref.name) - - share = src_vref.provider_location - - if vol_size > src_vol_size: - volume.provider_location = share - self.extend_volume(volume, vol_size) - - return {'provider_location': share} - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - :param refresh: if it is True, update the stats first. - :returns: dictionary with the stats from HNAS - - .. code:: python - - _stats['pools'] = { - 'total_capacity_gb': total size of the pool, - 'free_capacity_gb': the available size, - 'QoS_support': bool to indicate if QoS is supported, - 'reserved_percentage': percentage of size reserved, - 'max_over_subscription_ratio': oversubscription rate, - 'thin_provisioning_support': thin support (True), - } - - """ - LOG.info("Getting volume stats") - - _stats = super(HNASNFSDriver, self).get_volume_stats(refresh) - _stats["vendor_name"] = 'Hitachi' - _stats["driver_version"] = HNAS_NFS_VERSION - _stats["storage_protocol"] = 'NFS' - - max_osr = self.max_over_subscription_ratio - - for pool in self.pools: - capacity, free, provisioned = self._get_capacity_info(pool['fs']) - pool['total_capacity_gb'] = capacity / float(units.Gi) - pool['free_capacity_gb'] = free / float(units.Gi) - pool['provisioned_capacity_gb'] = provisioned / float(units.Gi) - pool['QoS_support'] = 'False' - pool['reserved_percentage'] = self.reserved_percentage - pool['max_over_subscription_ratio'] = max_osr - pool['thin_provisioning_support'] = True - - _stats['pools'] = self.pools - - LOG.debug('Driver stats: %(stat)s', {'stat': _stats}) - - return _stats - - def do_setup(self, context): - """Perform internal driver setup.""" - version_info = self.backend.get_version() - LOG.info("HNAS NFS driver.") - LOG.info("HNAS model: %(mdl)s", {'mdl': version_info['model']}) - LOG.info("HNAS version: %(ver)s", - {'ver': version_info['version']}) - LOG.info("HNAS hardware: %(hw)s", - {'hw': version_info['hardware']}) - LOG.info("HNAS S/N: %(sn)s", {'sn': version_info['serial']}) - - self.context = context - self._load_shares_config( - getattr(self.configuration, self.driver_prefix + '_shares_config')) - LOG.info("Review shares: %(shr)s", {'shr': self.shares}) - - elist = self.backend.get_export_list() - - # Check for all configured exports - for svc_name, svc_info in self.config['services'].items(): - server_ip = svc_info['hdp'].split(':')[0] - mountpoint = svc_info['hdp'].split(':')[1] - - # Ensure export are configured in HNAS - export_configured = False - for export in elist: - if mountpoint == export['name'] and server_ip in export['evs']: - svc_info['export'] = export - export_configured = True - - # Ensure export are reachable - try: - out, err = self._execute('showmount', '-e', server_ip) - except processutils.ProcessExecutionError: - LOG.exception("NFS server %(srv)s not reachable!", - {'srv': server_ip}) - raise - - export_list = out.split('\n')[1:] - export_list.pop() - mountpoint_not_found = mountpoint not in map( - lambda x: x.split()[0], export_list) - if (len(export_list) < 1 or - mountpoint_not_found or - not export_configured): - LOG.error("Configured share %(share)s is not present" - "in %(srv)s.", - {'share': mountpoint, 'srv': server_ip}) - msg = _('Section: %(svc_name)s') % {'svc_name': svc_name} - raise exception.InvalidParameterValue(err=msg) - - LOG.debug("Loading services: %(svc)s", { - 'svc': self.config['services']}) - - service_list = self.config['services'].keys() - for svc in service_list: - svc = self.config['services'][svc] - pool = {} - pool['pool_name'] = svc['pool_name'] - pool['service_label'] = svc['pool_name'] - pool['fs'] = svc['hdp'] - - self.pools.append(pool) - - LOG.debug("Configured pools: %(pool)s", {'pool': self.pools}) - LOG.info("HNAS NFS Driver loaded successfully.") - - def _clone_volume(self, src_vol, clone_name, src_name=None): - """Clones mounted volume using the HNAS file_clone. - - :param src_vol: object source volume - :param clone_name: string clone name (or snapshot) - :param src_name: name of the source volume. - """ - - # when the source is a snapshot, we need to pass the source name and - # use the information of the volume that originated the snapshot to - # get the clone path. - if not src_name: - src_name = src_vol.name - - # volume-ID snapshot-ID, /cinder - LOG.info("Cloning with volume_name %(vname)s, clone_name %(cname)s" - " ,export_path %(epath)s", - {'vname': src_name, 'cname': clone_name, - 'epath': src_vol.provider_location}) - - (fs, path, fs_label) = self._get_service(src_vol) - - target_path = '%s/%s' % (path, clone_name) - source_path = '%s/%s' % (path, src_name) - - self.backend.file_clone(fs_label, source_path, target_path) - - @cutils.trace - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume reference - :returns: the volume provider_location - """ - self._ensure_shares_mounted() - - (fs_id, path, fslabel) = self._get_service(volume) - - volume.provider_location = fs_id - - LOG.info("Volume service: %(label)s. Casted to: %(loc)s", - {'label': fslabel, 'loc': volume.provider_location}) - - self._do_create_volume(volume) - - return {'provider_location': fs_id} - - def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): - """Converts the share point name to an IP address. - - The volume reference may have a DNS name portion in the share name. - Convert that to an IP address and then restore the entire path. - - :param vol_ref: driver-specific information used to identify a volume - :returns: a volume reference where share is in IP format or raises - error - :raises e.strerror: - """ - - # First strip out share and convert to IP format. - share_split = vol_ref.split(':') - - try: - vol_ref_share_ip = cutils.resolve_hostname(share_split[0]) - except socket.gaierror as e: - LOG.exception('Invalid hostname %(host)s', - {'host': share_split[0]}) - LOG.debug('error: %(err)s', {'err': e.strerror}) - raise - - # Now place back into volume reference. - vol_ref_share = vol_ref_share_ip + ':' + share_split[1] - - return vol_ref_share - - def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): - """Get the NFS share, the NFS mount, and the volume from reference. - - Determine the NFS share point, the NFS mount point, and the volume - (with possible path) from the given volume reference. Raise exception - if unsuccessful. - - :param vol_ref: driver-specific information used to identify a volume - :returns: NFS Share, NFS mount, volume path or raise error - :raises ManageExistingInvalidReference: - """ - # Check that the reference is valid. - if 'source-name' not in vol_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, reason=reason) - vol_ref_name = vol_ref['source-name'] - - self._ensure_shares_mounted() - - # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config - # file, but the admin tries to manage the file located at - # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below - # when searching self._mounted_shares to see if we have an existing - # mount that would work to access the volume-to-be-managed (a string - # comparison is done instead of IP comparison). - vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( - vol_ref_name) - for nfs_share in self._mounted_shares: - cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) - (orig_share, work_share, - file_path) = vol_ref_share.partition(cfg_share) - if work_share == cfg_share: - file_path = file_path[1:] # strip off leading path divider - LOG.debug("Found possible share %(shr)s; checking mount.", - {'shr': work_share}) - nfs_mount = self._get_mount_point_for_share(nfs_share) - vol_full_path = os.path.join(nfs_mount, file_path) - if os.path.isfile(vol_full_path): - LOG.debug("Found share %(share)s and vol %(path)s on " - "mount %(mnt)s.", - {'share': nfs_share, 'path': file_path, - 'mnt': nfs_mount}) - return nfs_share, nfs_mount, file_path - else: - LOG.debug("vol_ref %(ref)s not on share %(share)s.", - {'ref': vol_ref_share, 'share': nfs_share}) - - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, - reason=_('Volume/Snapshot not found on configured storage ' - 'backend.')) - - @cutils.trace - def manage_existing(self, volume, existing_vol_ref): - """Manages an existing volume. - - The specified Cinder volume is to be taken into Cinder management. - The driver will verify its existence and then rename it to the - new Cinder volume name. It is expected that the existing volume - reference is an NFS share point and some [/path]/volume; - e.g., 10.10.32.1:/openstack/vol_to_manage - or 10.10.32.1:/openstack/some_directory/vol_to_manage - - :param volume: cinder volume to manage - :param existing_vol_ref: driver-specific information used to identify a - volume - :returns: the provider location - :raises VolumeBackendAPIException: - """ - - # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_name - ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - - LOG.info("Asked to manage NFS volume %(vol)s, " - "with vol ref %(ref)s.", - {'vol': volume.id, - 'ref': existing_vol_ref['source-name']}) - - vol_id = utils.extract_id_from_volume_name(vol_name) - if utils.check_already_managed_volume(vol_id): - raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name) - - self._check_pool_and_share(volume, nfs_share) - - if vol_name == volume.name: - LOG.debug("New Cinder volume %(vol)s name matches reference name: " - "no need to rename.", {'vol': volume.name}) - else: - src_vol = os.path.join(nfs_mount, vol_name) - dst_vol = os.path.join(nfs_mount, volume.name) - try: - self._try_execute("mv", src_vol, dst_vol, run_as_root=False, - check_exit_code=True) - LOG.debug("Setting newly managed Cinder volume name " - "to %(vol)s.", {'vol': volume.name}) - self._set_rw_permissions_for_all(dst_vol) - except (OSError, processutils.ProcessExecutionError) as err: - msg = (_("Failed to manage existing volume " - "%(name)s, because rename operation " - "failed: Error msg: %(msg)s.") % - {'name': existing_vol_ref['source-name'], - 'msg': six.text_type(err)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return {'provider_location': nfs_share} - - def _check_pool_and_share(self, volume, nfs_share): - """Validates the pool and the NFS share. - - Checks if the NFS share for the volume-type chosen matches the - one passed in the volume reference. Also, checks if the pool - for the volume type matches the pool for the host passed. - - :param volume: cinder volume reference - :param nfs_share: NFS share passed to manage - :raises ManageExistingVolumeTypeMismatch: - """ - pool_from_vol_type = hnas_utils.get_pool(self.config, volume) - - pool_from_host = utils.extract_host(volume.host, level='pool') - - if (pool_from_vol_type == 'default' and - 'default' not in self.config['services']): - msg = (_("Failed to manage existing volume %(volume)s because the " - "chosen volume type %(vol_type)s does not have a " - "service_label configured in its extra-specs and there " - "is no pool configured with hnas_svcX_volume_type as " - "'default' in cinder.conf.") % - {'volume': volume.id, - 'vol_type': getattr(volume.volume_type, 'id', None)}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - pool = self.config['services'][pool_from_vol_type]['hdp'] - if pool != nfs_share: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen (%(pool)s) does not match the " - "NFS share passed in the volume reference (%(share)s).") - % {'share': nfs_share, 'pool': pool}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if pool_from_host != pool_from_vol_type: - msg = (_("Failed to manage existing volume because the pool of " - "the volume type chosen (%(pool)s) does not match the " - "pool of the host %(pool_host)s") % - {'pool': pool_from_vol_type, - 'pool_host': pool_from_host}) - LOG.error(msg) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - @cutils.trace - def manage_existing_get_size(self, volume, existing_vol_ref): - """Returns the size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: cinder volume to manage - :param existing_vol_ref: existing volume to take under management - :returns: the size of the volume or raise error - :raises VolumeBackendAPIException: - """ - return self._manage_existing_get_size(existing_vol_ref) - - @cutils.trace - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - It does not delete the underlying backend storage object. A log entry - will be made to notify the Admin that the volume is no longer being - managed. - - :param volume: cinder volume to unmanage - """ - vol_str = CONF.volume_name_template % volume.id - path = self._get_mount_point_for_share(volume.provider_location) - - new_str = "unmanage-" + vol_str - - vol_path = os.path.join(path, vol_str) - new_path = os.path.join(path, new_str) - - try: - self._try_execute("mv", vol_path, new_path, - run_as_root=False, check_exit_code=True) - - LOG.info("The volume with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted " - "and can be found in the new path %(cr)s.", - {'old': vol_path, 'cr': new_path}) - - except (OSError, ValueError): - LOG.exception("The NFS Volume %(cr)s does not exist.", - {'cr': new_path}) - - def _get_file_size(self, file_path): - file_size = float(cutils.get_file_size(file_path)) / units.Gi - # Round up to next Gb - return int(math.ceil(file_size)) - - def _manage_existing_get_size(self, existing_ref): - # Attempt to find NFS share, NFS mount, and path from vol_ref. - (nfs_share, nfs_mount, path - ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) - - try: - LOG.debug("Asked to get size of NFS ref %(ref)s.", - {'ref': existing_ref['source-name']}) - - file_path = os.path.join(nfs_mount, path) - size = self._get_file_size(file_path) - except (OSError, ValueError): - exception_message = (_("Failed to manage existing volume/snapshot " - "%(name)s, because of error in getting " - "its size."), - {'name': existing_ref['source-name']}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Reporting size of NFS ref %(ref)s as %(size)d GB.", - {'ref': existing_ref['source-name'], 'size': size}) - - return size - - def _check_snapshot_parent(self, volume, old_snap_name, share): - volume_name = 'volume-' + volume.id - (fs, path, fs_label) = self._get_service(volume) - # 172.24.49.34:/nfs_cinder - - export_path = self.backend.get_export_path(share.split(':')[1], - fs_label) - volume_path = os.path.join(export_path, volume_name) - - return self.backend.check_snapshot_parent(volume_path, old_snap_name, - fs_label) - - def _get_snapshot_origin_from_name(self, snap_name): - """Gets volume name from snapshot names""" - if 'unmanage' in snap_name: - return snap_name.split('.')[0][9:] - - return snap_name.split('.')[0] - - @cutils.trace - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - :param snapshot: Cinder volume snapshot to manage - :param existing_ref: Driver-specific information used to identify a - volume snapshot - """ - - # Attempt to find NFS share, NFS mount, and volume path from ref. - (nfs_share, nfs_mount, src_snapshot_name - ) = self._get_share_mount_and_vol_from_vol_ref(existing_ref) - - LOG.info("Asked to manage NFS snapshot %(snap)s for volume " - "%(vol)s, with vol ref %(ref)s.", - {'snap': snapshot.id, - 'vol': snapshot.volume_id, - 'ref': existing_ref['source-name']}) - - volume = snapshot.volume - parent_name = self._get_snapshot_origin_from_name(src_snapshot_name) - - if parent_name != volume.name: - # Check if the snapshot belongs to the volume for the legacy case - if not self._check_snapshot_parent( - volume, src_snapshot_name, nfs_share): - msg = (_("This snapshot %(snap)s doesn't belong " - "to the volume parent %(vol)s.") % - {'snap': src_snapshot_name, 'vol': volume.id}) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - snapshot_name = self._get_snapshot_name(snapshot) - - if src_snapshot_name == snapshot_name: - LOG.debug("New Cinder snapshot %(snap)s name matches reference " - "name. No need to rename.", {'snap': snapshot_name}) - else: - src_snap = os.path.join(nfs_mount, src_snapshot_name) - dst_snap = os.path.join(nfs_mount, snapshot_name) - try: - self._try_execute("mv", src_snap, dst_snap, run_as_root=False, - check_exit_code=True) - LOG.info("Setting newly managed Cinder snapshot name " - "to %(snap)s.", {'snap': snapshot_name}) - self._set_rw_permissions_for_all(dst_snap) - except (OSError, processutils.ProcessExecutionError) as err: - msg = (_("Failed to manage existing snapshot " - "%(name)s, because rename operation " - "failed: Error msg: %(msg)s.") % - {'name': existing_ref['source-name'], - 'msg': six.text_type(err)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return {'provider_location': nfs_share} - - @cutils.trace - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - return self._manage_existing_get_size(existing_ref) - - @cutils.trace - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - :param snapshot: Cinder volume snapshot to unmanage - """ - - path = self._get_mount_point_for_share(snapshot.provider_location) - snapshot_name = self._get_snapshot_name(snapshot) - - if self._file_not_present(snapshot.provider_location, snapshot_name): - LOG.info("Unmanaging legacy snapshot %(snap)s.", - {'snap': snapshot.name}) - snapshot_name = snapshot.name - - new_name = "unmanage-" + snapshot_name - - old_path = os.path.join(path, snapshot_name) - new_path = os.path.join(path, new_name) - - try: - self._execute("mv", old_path, new_path, - run_as_root=False, check_exit_code=True) - LOG.info("The snapshot with path %(old)s is no longer being " - "managed by Cinder. However, it was not deleted and " - "can be found in the new path %(cr)s.", - {'old': old_path, 'cr': new_path}) - - except (OSError, ValueError): - LOG.exception("The NFS snapshot %(old)s does not exist.", - {'old': old_path}) - - def _get_volumes_from_export(self, export_path): - mnt_point = self._get_mount_point_for_share(export_path) - - vols = self._execute("ls", mnt_point, run_as_root=False, - check_exit_code=True) - - vols = vols[0].split('\n') - if '' in vols: - vols.remove('') - - return list(vols) - - def _get_snapshot_origin(self, snap_path, fs_label): - relatives = self.backend.get_cloned_file_relatives(snap_path, fs_label) - - origin = [] - - if not relatives: - return - elif len(relatives) > 1: - for relative in relatives: - if 'snapshot' not in relative: - origin.append(relative) - else: - origin.append(relatives[0]) - - return origin - - def _get_manageable_resource_info(self, cinder_resources, resource_type, - marker, limit, offset, sort_keys, - sort_dirs): - """Gets the resources on the backend available for management by Cinder. - - Receives the parameters from "get_manageable_volumes" and - "get_manageable_snapshots" and gets the available resources - - :param cinder_resources: A list of resources in this host that Cinder - currently manages - :param resource_type: If it's a volume or a snapshot - :param marker: The last item of the previous page; we return the - next results after this value (after sorting) - :param limit: Maximum number of items to return - :param offset: Number of items to skip after marker - :param sort_keys: List of keys to sort results by (valid keys - are 'identifier' and 'size') - :param sort_dirs: List of directions to sort by, corresponding to - sort_keys (valid directions are 'asc' and 'desc') - - :returns: list of dictionaries, each specifying a volume or snapshot - (resource) in the host, with the following keys: - - reference (dictionary): The reference for a resource, - which can be passed to "manage_existing_snapshot". - - size (int): The size of the resource according to the storage - backend, rounded up to the nearest GB. - - safe_to_manage (boolean): Whether or not this resource is - safe to manage according to the storage backend. - - reason_not_safe (string): If safe_to_manage is False, - the reason why. - - cinder_id (string): If already managed, provide the Cinder ID. - - extra_info (string): Any extra information to return to the - user - - source_reference (string): Similar to "reference", but for the - snapshot's source volume. - """ - - entries = [] - exports = {} - bend_rsrc = {} - cinder_ids = [resource.id for resource in cinder_resources] - - for service in self.config['services']: - exp_path = self.config['services'][service]['hdp'] - exports[exp_path] = ( - self.config['services'][service]['export']['fs']) - - for exp in exports.keys(): - # bend_rsrc has all the resources in the specified exports - # volumes {u'172.24.54.39:/Export-Cinder': - # ['volume-325e7cdc-8f65-40a8-be9a-6172c12c9394', - # ' snapshot-1bfb6f0d-9497-4c12-a052-5426a76cacdc','']} - bend_rsrc[exp] = self._get_volumes_from_export(exp) - mnt_point = self._get_mount_point_for_share(exp) - - for resource in bend_rsrc[exp]: - # Ignoring resources of unwanted types - if ((resource_type == 'volume' and - ('.' in resource or 'snapshot' in resource)) or - (resource_type == 'snapshot' and '.' not in resource and - 'snapshot' not in resource)): - continue - - path = '%s/%s' % (exp, resource) - mnt_path = '%s/%s' % (mnt_point, resource) - size = self._get_file_size(mnt_path) - - rsrc_inf = {'reference': {'source-name': path}, - 'size': size, 'cinder_id': None, - 'extra_info': None} - - if resource_type == 'volume': - potential_id = utils.extract_id_from_volume_name(resource) - elif 'snapshot' in resource: - # This is for the snapshot legacy case - potential_id = utils.extract_id_from_snapshot_name( - resource) - else: - potential_id = resource.split('.')[1] - - # When a resource is already managed by cinder, it's not - # recommended to manage it again. So we set safe_to_manage = - # False. Otherwise, it is set safe_to_manage = True. - if potential_id in cinder_ids: - rsrc_inf['safe_to_manage'] = False - rsrc_inf['reason_not_safe'] = 'already managed' - rsrc_inf['cinder_id'] = potential_id - else: - rsrc_inf['safe_to_manage'] = True - rsrc_inf['reason_not_safe'] = None - - # If it's a snapshot, we try to get its source volume. However, - # this search is not reliable in some cases. So, if it's not - # possible to return a precise result, we return unknown as - # source-reference, throw a warning message and fill the - # extra-info. - if resource_type == 'snapshot': - if 'snapshot' not in resource: - origin = self._get_snapshot_origin_from_name(resource) - if 'unmanage' in origin: - origin = origin[16:] - else: - origin = origin[7:] - rsrc_inf['source_reference'] = {'id': origin} - else: - path = path.split(':')[1] - origin = self._get_snapshot_origin(path, exports[exp]) - - if not origin: - # if origin is empty, the file is not a clone - continue - elif len(origin) == 1: - origin = origin[0].split('/')[2] - origin = utils.extract_id_from_volume_name(origin) - rsrc_inf['source_reference'] = {'id': origin} - else: - LOG.warning("Could not determine the volume " - "that owns the snapshot %(snap)s", - {'snap': resource}) - rsrc_inf['source_reference'] = {'id': 'unknown'} - rsrc_inf['extra_info'] = ('Could not determine ' - 'the volume that owns ' - 'the snapshot') - - entries.append(rsrc_inf) - - return utils.paginate_entries_list(entries, marker, limit, offset, - sort_keys, sort_dirs) - - @cutils.trace - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder.""" - - return self._get_manageable_resource_info(cinder_volumes, 'volume', - marker, limit, offset, - sort_keys, sort_dirs) - - @cutils.trace - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - """List snapshots on the backend available for management by Cinder.""" - - return self._get_manageable_resource_info(cinder_snapshots, 'snapshot', - marker, limit, offset, - sort_keys, sort_dirs) diff --git a/cinder/volume/drivers/hitachi/hnas_utils.py b/cinder/volume/drivers/hitachi/hnas_utils.py deleted file mode 100644 index edd8c4193..000000000 --- a/cinder/volume/drivers/hitachi/hnas_utils.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright (c) 2016 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Shared code for HNAS drivers -""" - -import os -import re - -from oslo_config import cfg -from oslo_log import log as logging -import six -from xml.etree import ElementTree as ETree - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -HNAS_DEFAULT_CONFIG = {'ssc_cmd': 'ssc', - 'chap_enabled': True, - 'ssh_port': 22} - -MAX_HNAS_ISCSI_TARGETS = 32 - -drivers_common_opts = [ - cfg.IPOpt('hnas_mgmt_ip0', - help='Management IP address of HNAS. This can ' - 'be any IP in the admin address on HNAS or ' - 'the SMU IP.'), - cfg.StrOpt('hnas_ssc_cmd', - default='ssc', - help='Command to communicate to HNAS.'), - cfg.StrOpt('hnas_username', - help='HNAS username.'), - cfg.StrOpt('hnas_password', - secret=True, - help='HNAS password.'), - cfg.PortOpt('hnas_ssh_port', - default=22, - help='Port to be used for SSH authentication.'), - cfg.StrOpt('hnas_ssh_private_key', - help='Path to the SSH private key used to ' - 'authenticate in HNAS SMU.'), - cfg.StrOpt('hnas_cluster_admin_ip0', - default=None, - help='The IP of the HNAS cluster admin. ' - 'Required only for HNAS multi-cluster setups.'), - cfg.StrOpt('hnas_svc0_pool_name', - help='Service 0 pool name', - deprecated_name='hnas_svc0_volume_type'), - cfg.StrOpt('hnas_svc0_hdp', - help='Service 0 HDP'), - cfg.StrOpt('hnas_svc1_pool_name', - help='Service 1 pool name', - deprecated_name='hnas_svc1_volume_type'), - cfg.StrOpt('hnas_svc1_hdp', - help='Service 1 HDP'), - cfg.StrOpt('hnas_svc2_pool_name', - help='Service 2 pool name', - deprecated_name='hnas_svc2_volume_type'), - cfg.StrOpt('hnas_svc2_hdp', - help='Service 2 HDP'), - cfg.StrOpt('hnas_svc3_pool_name', - help='Service 3 pool name:', - deprecated_name='hnas_svc3_volume_type'), - cfg.StrOpt('hnas_svc3_hdp', - help='Service 3 HDP') -] - -CONF = cfg.CONF -CONF.register_opts(drivers_common_opts, group=configuration.SHARED_CONF_GROUP) - - -def _check_conf_params(config, pool_name, idx): - """Validates if the configuration on cinder.conf is complete. - - :param config: Dictionary with the driver configurations - :param pool_name: The name of the current pool - :param dv_type: The type of the driver (NFS or iSCSI) - :param idx: Index of the current pool - """ - - # Validating the inputs on cinder.conf - if config['username'] is None: - msg = (_("The config parameter hnas_username " - "is not set in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if (config['password'] is None and - config['ssh_private_key'] is None): - msg = (_("Credentials configuration parameters " - "missing: you need to set hnas_password " - "or hnas_ssh_private_key " - "in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['mgmt_ip0'] is None: - msg = (_("The config parameter hnas_mgmt_ip0 " - "is not set in the cinder.conf.")) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['services'][pool_name]['hdp'] is None: - msg = (_("The config parameter hnas_svc%(idx)s_hdp is " - "not set in the cinder.conf. Note that you need to " - "have at least one pool configured.") % - {'idx': idx}) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - if config['services'][pool_name]['pool_name'] is None: - msg = (_("The config parameter " - "hnas_svc%(idx)s_pool_name is not set " - "in the cinder.conf. Note that you need to " - "have at least one pool configured.") % - {'idx': idx}) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - - -def _xml_read(root, element, check=None): - """Read an xml element. - - :param root: XML object - :param element: string desired tag - :param check: string if present, throw exception if element missing - """ - - val = root.findtext(element) - - # mandatory parameter not found - if val is None and check: - LOG.error("Mandatory parameter not found: %(p)s", {'p': element}) - raise exception.ParameterNotFound(param=element) - - # tag not found - if val is None: - return None - - svc_tag_pattern = re.compile("svc_[0-3]$") - # tag found but empty parameter. - if not val.strip(): - if svc_tag_pattern.search(element): - return "" - LOG.error("Parameter not found: %(param)s", {'param': element}) - raise exception.ParameterNotFound(param=element) - - LOG.debug("%(element)s: %(val)s", - {'element': element, - 'val': val if element != 'password' else '***'}) - - return val.strip() - - -def read_xml_config(xml_config_file, svc_params, optional_params): - """Read Hitachi driver specific xml config file. - - :param xml_config_file: string filename containing XML configuration - :param svc_params: parameters to configure the services - - .. code:: python - - ['volume_type', 'hdp'] - - :param optional_params: parameters to configure that are not mandatory - - .. code:: python - - ['ssc_cmd', 'cluster_admin_ip0', 'chap_enabled'] - - """ - - if not os.access(xml_config_file, os.R_OK): - msg = (_("Can't find HNAS configurations on cinder.conf neither " - "on the path %(xml)s.") % {'xml': xml_config_file}) - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - else: - LOG.warning("This XML configuration file %(xml)s is deprecated. " - "Please, move all the configurations to the " - "cinder.conf file. If you keep both configuration " - "files, the options set on cinder.conf will be " - "used.", {'xml': xml_config_file}) - - try: - root = ETree.parse(xml_config_file).getroot() - except ETree.ParseError: - msg = (_("Error parsing config file: %(xml_config_file)s") % - {'xml_config_file': xml_config_file}) - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - - # mandatory parameters for NFS - config = {} - arg_prereqs = ['mgmt_ip0', 'username'] - for req in arg_prereqs: - config[req] = _xml_read(root, req, 'check') - - # optional parameters for NFS - for req in optional_params: - config[req] = _xml_read(root, req) - if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None: - config[req] = HNAS_DEFAULT_CONFIG.get(req) - - config['ssh_private_key'] = _xml_read(root, 'ssh_private_key') - config['password'] = _xml_read(root, 'password') - - if config['ssh_private_key'] is None and config['password'] is None: - msg = _("Missing authentication option (passw or private key file).") - LOG.error(msg) - raise exception.ConfigNotFound(message=msg) - - if _xml_read(root, 'ssh_port') is not None: - config['ssh_port'] = int(_xml_read(root, 'ssh_port')) - else: - config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port'] - - config['fs'] = {} - config['services'] = {} - - # min one needed - for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: - if _xml_read(root, svc) is None: - continue - service = {'label': svc} - - # none optional - for arg in svc_params: - service[arg] = _xml_read(root, svc + '/' + arg, 'check') - - # Backward compatibility with volume_type - service.setdefault('pool_name', service.pop('volume_type', None)) - - config['services'][service['pool_name']] = service - config['fs'][service['hdp']] = service['hdp'] - - # at least one service required! - if not config['services'].keys(): - LOG.error("No service found in xml config file") - raise exception.ParameterNotFound(param="svc_0") - - return config - - -def get_pool(config, volume): - """Get the pool of a volume. - - :param config: dictionary containing the configuration parameters - :param volume: dictionary volume reference - :returns: the pool related to the volume - """ - - if volume.volume_type: - metadata = {} - type_id = volume.volume_type_id - if type_id is not None: - metadata = volume_types.get_volume_type_extra_specs(type_id) - if metadata.get('service_label'): - if metadata['service_label'] in config['services'].keys(): - return metadata['service_label'] - return 'default' - - -def read_cinder_conf(config_opts): - """Reads cinder.conf - - Gets the driver specific information set on cinder.conf configuration - file. - - :param config_opts: Configuration object that contains the information - needed by HNAS driver - :param dv_type: The type of the driver (NFS or iSCSI) - :returns: Dictionary with the driver configuration - """ - - config = {} - config['services'] = {} - config['fs'] = {} - mandatory_parameters = ['username', 'password', 'mgmt_ip0'] - optional_parameters = ['ssc_cmd', - 'ssh_port', 'cluster_admin_ip0', - 'ssh_private_key'] - - # Trying to get the mandatory parameters from cinder.conf - for opt in mandatory_parameters: - config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) - - # If there is at least one of the mandatory parameters in - # cinder.conf, we assume that we should use the configuration - # from this file. - # Otherwise, we use the configuration from the deprecated XML file. - for param in mandatory_parameters: - if config[param] is not None: - break - else: - return None - - # Getting the optional parameters from cinder.conf - for opt in optional_parameters: - config[opt] = config_opts.safe_get('hnas_%(opt)s' % {'opt': opt}) - - # It's possible to have up to 4 pools configured. - for i in range(0, 4): - idx = six.text_type(i) - svc_pool_name = (config_opts.safe_get( - 'hnas_svc%(idx)s_pool_name' % {'idx': idx})) - - svc_hdp = (config_opts.safe_get( - 'hnas_svc%(idx)s_hdp' % {'idx': idx})) - - # It's mandatory to have at least 1 pool configured (svc_0) - if (idx == '0' or svc_pool_name is not None or - svc_hdp is not None): - config['services'][svc_pool_name] = {} - config['fs'][svc_hdp] = svc_hdp - config['services'][svc_pool_name]['hdp'] = svc_hdp - config['services'][svc_pool_name]['pool_name'] = svc_pool_name - - config['services'][svc_pool_name]['label'] = ( - 'svc_%(idx)s' % {'idx': idx}) - # Checking to ensure that the pools configurations are complete - _check_conf_params(config, svc_pool_name, idx) - - return config diff --git a/cinder/volume/drivers/hitachi/vsp_common.py b/cinder/volume/drivers/hitachi/vsp_common.py deleted file mode 100644 index a6ae748b9..000000000 --- a/cinder/volume/drivers/hitachi/vsp_common.py +++ /dev/null @@ -1,955 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Common module for Hitachi VSP Driver.""" - -import abc -import re - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import coordination -from cinder import exception -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import vsp_utils as utils -from cinder.volume import utils as volume_utils - - -VERSION = '1.0.0' - -_COPY_METHOD = set(['FULL', 'THIN']) - -_INHERITED_VOLUME_OPTS = [ - 'volume_backend_name', - 'volume_driver', - 'reserved_percentage', - 'use_multipath_for_image_xfer', - 'enforce_multipath_for_image_xfer', - 'num_volume_device_scan_tries', -] - -common_opts = [ - cfg.StrOpt( - 'vsp_storage_id', - help='Product number of the storage system.'), - cfg.StrOpt( - 'vsp_pool', - help='Pool number or pool name of the DP pool.'), - cfg.StrOpt( - 'vsp_thin_pool', - help='Pool number or pool name of the Thin Image pool.'), - cfg.StrOpt( - 'vsp_ldev_range', - help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' - 'can be used by the driver. Values can be in decimal format ' - '(e.g. 1000) or in colon-separated hexadecimal format ' - '(e.g. 00:03:E8).'), - cfg.StrOpt( - 'vsp_default_copy_method', - default='FULL', - choices=['FULL', 'THIN'], - help='Method of volume copy. FULL indicates full data copy by ' - 'Shadow Image and THIN indicates differential data copy by Thin ' - 'Image.'), - cfg.IntOpt( - 'vsp_copy_speed', - min=1, - max=15, - default=3, - help='Speed at which data is copied by Shadow Image. 1 or 2 indicates ' - 'low speed, 3 indicates middle speed, and a value between 4 and ' - '15 indicates high speed.'), - cfg.IntOpt( - 'vsp_copy_check_interval', - min=1, - max=600, - default=3, - help='Interval in seconds at which volume pair synchronization status ' - 'is checked when volume pairs are created.'), - cfg.IntOpt( - 'vsp_async_copy_check_interval', - min=1, - max=600, - default=10, - help='Interval in seconds at which volume pair synchronization status ' - 'is checked when volume pairs are deleted.'), - cfg.ListOpt( - 'vsp_target_ports', - help='IDs of the storage ports used to attach volumes to the ' - 'controller node. To specify multiple ports, connect them by ' - 'commas (e.g. CL1-A,CL2-A).'), - cfg.ListOpt( - 'vsp_compute_target_ports', - help='IDs of the storage ports used to attach volumes to compute ' - 'nodes. To specify multiple ports, connect them by commas ' - '(e.g. CL1-A,CL2-A).'), - cfg.BoolOpt( - 'vsp_group_request', - default=False, - help='If True, the driver will create host groups or iSCSI targets on ' - 'storage ports as needed.'), -] - -_REQUIRED_COMMON_OPTS = [ - 'vsp_storage_id', - 'vsp_pool', -] - -CONF = cfg.CONF -CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -def _str2int(num): - """Convert a string into an integer.""" - if not num: - return None - if num.isdigit(): - return int(num) - if not re.match(r'\w\w:\w\w:\w\w', num): - return None - try: - return int(num.replace(':', ''), 16) - except ValueError: - return None - - -@six.add_metaclass(abc.ABCMeta) -class VSPCommon(object): - """Common class for Hitachi VSP Driver.""" - - def __init__(self, conf, driverinfo, db): - """Initialize instance variables.""" - self.conf = conf - self.db = db - self.ctxt = None - self.lock = {} - self.driver_info = driverinfo - self.storage_info = { - 'protocol': driverinfo['proto'], - 'pool_id': None, - 'ldev_range': [], - 'controller_ports': [], - 'compute_ports': [], - 'pair_ports': [], - 'wwns': {}, - 'portals': {}, - 'output_first': True, - } - - self._stats = {} - - def run_and_verify_storage_cli(self, *cmd, **kwargs): - """Run storage CLI and return the result or raise an exception.""" - do_raise = kwargs.pop('do_raise', True) - ignore_error = kwargs.get('ignore_error') - success_code = kwargs.get('success_code', set([0])) - (ret, stdout, stderr) = self.run_storage_cli(*cmd, **kwargs) - if (ret not in success_code and - not utils.check_ignore_error(ignore_error, stderr)): - msg = utils.output_log( - MSG.STORAGE_COMMAND_FAILED, cmd=utils.mask_password(cmd), - ret=ret, out=' '.join(stdout.splitlines()), - err=' '.join(stderr.splitlines())) - if do_raise: - raise exception.VSPError(msg) - return ret, stdout, stderr - - @abc.abstractmethod - def run_storage_cli(self, *cmd, **kwargs): - """Run storage CLI.""" - raise NotImplementedError() - - def get_copy_method(self, metadata): - """Return copy method(FULL or THIN).""" - method = metadata.get( - 'copy_method', self.conf.vsp_default_copy_method) - if method not in _COPY_METHOD: - msg = utils.output_log(MSG.INVALID_PARAMETER_VALUE, - meta='copy_method') - raise exception.VSPError(msg) - if method == 'THIN' and not self.conf.vsp_thin_pool: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_thin_pool') - raise exception.VSPError(msg) - return method - - def create_volume(self, volume): - """Create a volume and return its properties.""" - try: - ldev = self.create_ldev(volume['size']) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - utils.output_log(MSG.CREATE_LDEV_FAILED) - return { - 'provider_location': six.text_type(ldev), - } - - def create_ldev(self, size, is_vvol=False): - """Create an LDEV and return its LDEV number.""" - ldev = self.get_unused_ldev() - self.create_ldev_on_storage(ldev, size, is_vvol) - LOG.debug('Created logical device. (LDEV: %s)', ldev) - return ldev - - @abc.abstractmethod - def create_ldev_on_storage(self, ldev, size, is_vvol): - """Create an LDEV on the storage system.""" - raise NotImplementedError() - - @abc.abstractmethod - def get_unused_ldev(self): - """Find an unused LDEV and return its LDEV number.""" - raise NotImplementedError() - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - ldev = utils.get_ldev(snapshot) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log( - MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot', - id=snapshot['id']) - raise exception.VSPError(msg) - size = volume['size'] - metadata = utils.get_volume_metadata(volume) - if size < snapshot['volume_size']: - msg = utils.output_log( - MSG.INVALID_VOLUME_SIZE_FOR_COPY, type='snapshot', - volume_id=volume['id']) - raise exception.VSPError(msg) - elif (size > snapshot['volume_size'] and not self.check_vvol(ldev) and - self.get_copy_method(metadata) == "THIN"): - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, - copy_method=utils.THIN, - type='snapshot', volume_id=volume['id']) - raise exception.VSPError(msg) - sync = size > snapshot['volume_size'] - new_ldev = self._copy_ldev( - ldev, snapshot['volume_size'], metadata, sync) - if sync: - self.delete_pair(new_ldev) - self.extend_ldev(new_ldev, snapshot['volume_size'], size) - return { - 'provider_location': six.text_type(new_ldev), - } - - def _copy_ldev(self, ldev, size, metadata, sync=False): - """Create a copy of the specified volume and return its properties.""" - try: - return self.copy_on_storage(ldev, size, metadata, sync) - except exception.VSPNotSupported: - return self._copy_on_host(ldev, size) - - def _copy_on_host(self, src_ldev, size): - """Create a copy of the specified LDEV via host.""" - dest_ldev = self.create_ldev(size) - try: - self._copy_with_dd(src_ldev, dest_ldev, size) - except Exception: - with excutils.save_and_reraise_exception(): - try: - self._delete_ldev(dest_ldev) - except exception.VSPError: - utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=dest_ldev) - return dest_ldev - - def _copy_with_dd(self, src_ldev, dest_ldev, size): - """Copy the content of a volume by dd command.""" - src_info = None - dest_info = None - properties = cinder_utils.brick_get_connector_properties( - multipath=self.conf.use_multipath_for_image_xfer, - enforce_multipath=self.conf.enforce_multipath_for_image_xfer) - try: - dest_info = self._attach_ldev(dest_ldev, properties) - src_info = self._attach_ldev(src_ldev, properties) - volume_utils.copy_volume( - src_info['device']['path'], dest_info['device']['path'], - size * units.Ki, self.conf.volume_dd_blocksize) - finally: - if src_info: - self._detach_ldev(src_info, src_ldev, properties) - if dest_info: - self._detach_ldev(dest_info, dest_ldev, properties) - self.discard_zero_page({'provider_location': six.text_type(dest_ldev)}) - - def _attach_ldev(self, ldev, properties): - """Attach the specified LDEV to the server.""" - volume = { - 'provider_location': six.text_type(ldev), - } - conn = self.initialize_connection(volume, properties) - try: - connector = cinder_utils.brick_get_connector( - conn['driver_volume_type'], - use_multipath=self.conf.use_multipath_for_image_xfer, - device_scan_attempts=self.conf.num_volume_device_scan_tries, - conn=conn) - device = connector.connect_volume(conn['data']) - except Exception as ex: - with excutils.save_and_reraise_exception(): - utils.output_log(MSG.CONNECT_VOLUME_FAILED, ldev=ldev, - reason=six.text_type(ex)) - self._terminate_connection(volume, properties) - return { - 'conn': conn, - 'device': device, - 'connector': connector, - } - - def _detach_ldev(self, attach_info, ldev, properties): - """Detach the specified LDEV from the server.""" - volume = { - 'provider_location': six.text_type(ldev), - } - connector = attach_info['connector'] - try: - connector.disconnect_volume( - attach_info['conn']['data'], attach_info['device']) - except Exception as ex: - utils.output_log(MSG.DISCONNECT_VOLUME_FAILED, ldev=ldev, - reason=six.text_type(ex)) - self._terminate_connection(volume, properties) - - def _terminate_connection(self, volume, connector): - """Disconnect the specified volume from the server.""" - try: - self.terminate_connection(volume, connector) - except exception.VSPError: - utils.output_log(MSG.UNMAP_LDEV_FAILED, - ldev=utils.get_ldev(volume)) - - def copy_on_storage(self, pvol, size, metadata, sync): - """Create a copy of the specified LDEV on the storage.""" - is_thin = self.get_copy_method(metadata) == "THIN" - svol = self.create_ldev(size, is_vvol=is_thin) - try: - self.create_pair_on_storage(pvol, svol, is_thin) - if sync: - self.wait_full_copy_completion(pvol, svol) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - try: - self._delete_ldev(svol) - except exception.VSPError: - utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol) - return svol - - @abc.abstractmethod - def create_pair_on_storage(self, pvol, svol, is_thin): - """Create a copy pair on the storage.""" - raise NotImplementedError() - - def _delete_ldev(self, ldev): - """Delete the specified LDEV.""" - self.delete_pair(ldev) - self.unmap_ldev_from_storage(ldev) - self.delete_ldev_from_storage(ldev) - - def unmap_ldev_from_storage(self, ldev): - """Delete the connection between the specified LDEV and servers.""" - targets = { - 'list': [], - } - self.find_all_mapped_targets_from_storage(targets, ldev) - self.unmap_ldev(targets, ldev) - - @abc.abstractmethod - def find_all_mapped_targets_from_storage(self, targets, ldev): - """Add all port-gids connected with the LDEV to the list.""" - raise NotImplementedError() - - def delete_pair(self, ldev, all_split=True): - """Disconnect all volume pairs to which the specified LDEV belongs.""" - pair_info = self.get_pair_info(ldev) - if not pair_info: - return - if pair_info['pvol'] == ldev: - self.delete_pair_based_on_pvol(pair_info, all_split) - else: - self.delete_pair_based_on_svol( - pair_info['pvol'], pair_info['svol_info'][0]) - - @abc.abstractmethod - def get_pair_info(self, ldev): - """Return volume pair info(LDEV number, pair status and pair type).""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_based_on_pvol(self, pair_info, all_split): - """Disconnect all volume pairs to which the specified P-VOL belongs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_based_on_svol(self, pvol, svol_info): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_pair_from_storage(self, pvol, svol, is_thin): - """Disconnect the volume pair that consists of the specified LDEVs.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - raise NotImplementedError() - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - ldev = utils.get_ldev(src_vref) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, - type='volume', id=src_vref['id']) - raise exception.VSPError(msg) - size = volume['size'] - metadata = utils.get_volume_metadata(volume) - if size < src_vref['size']: - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_COPY, - type='volume', volume_id=volume['id']) - raise exception.VSPError(msg) - elif (size > src_vref['size'] and not self.check_vvol(ldev) and - self.get_copy_method(metadata) == "THIN"): - msg = utils.output_log(MSG.INVALID_VOLUME_SIZE_FOR_TI, - copy_method=utils.THIN, type='volume', - volume_id=volume['id']) - raise exception.VSPError(msg) - sync = size > src_vref['size'] - new_ldev = self._copy_ldev(ldev, src_vref['size'], metadata, sync) - if sync: - self.delete_pair(new_ldev) - self.extend_ldev(new_ldev, src_vref['size'], size) - return { - 'provider_location': six.text_type(new_ldev), - } - - def delete_volume(self, volume): - """Delete the specified volume.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, - method='delete_volume', id=volume['id']) - return - try: - self._delete_ldev(ldev) - except exception.VSPBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - src_vref = snapshot.volume - ldev = utils.get_ldev(src_vref) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_VOLUME_COPY, - type='volume', id=src_vref['id']) - raise exception.VSPError(msg) - size = snapshot['volume_size'] - metadata = utils.get_volume_metadata(src_vref) - new_ldev = self._copy_ldev(ldev, size, metadata) - return { - 'provider_location': six.text_type(new_ldev), - } - - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - ldev = utils.get_ldev(snapshot) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - utils.output_log( - MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', - id=snapshot['id']) - return - try: - self._delete_ldev(ldev) - except exception.VSPBusy: - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - if refresh: - if self.storage_info['output_first']: - self.storage_info['output_first'] = False - utils.output_log(MSG.DRIVER_READY_FOR_USE, - config_group=self.conf.config_group) - self._update_volume_stats() - return self._stats - - def _update_volume_stats(self): - """Update properties, capabilities and current states of the driver.""" - data = {} - backend_name = self.conf.safe_get('volume_backend_name') - data['volume_backend_name'] = ( - backend_name or self.driver_info['volume_backend_name']) - data['vendor_name'] = 'Hitachi' - data['driver_version'] = VERSION - data['storage_protocol'] = self.storage_info['protocol'] - try: - total_gb, free_gb = self.get_pool_info() - except exception.VSPError: - utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, - pool=self.conf.vsp_pool) - return - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - data['reserved_percentage'] = self.conf.safe_get('reserved_percentage') - data['QoS_support'] = False - data['multiattach'] = False - LOG.debug("Updating volume status. (%s)", data) - self._stats = data - - @abc.abstractmethod - def get_pool_info(self): - """Return the total and free capacity of the storage pool.""" - raise NotImplementedError() - - @abc.abstractmethod - def discard_zero_page(self, volume): - """Return the volume's no-data pages to the storage pool.""" - raise NotImplementedError() - - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION, - volume_id=volume['id']) - raise exception.VSPError(msg) - if self.check_vvol(ldev): - msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND, - volume_id=volume['id']) - raise exception.VSPError(msg) - self.delete_pair(ldev) - self.extend_ldev(ldev, volume['size'], new_size) - - @abc.abstractmethod - def check_vvol(self, ldev): - """Return True if the specified LDEV is V-VOL, False otherwise.""" - raise NotImplementedError() - - @abc.abstractmethod - def extend_ldev(self, ldev, old_size, new_size): - """Extend the specified LDEV to the specified new size.""" - raise NotImplementedError() - - def manage_existing(self, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - ldev = _str2int(existing_ref.get('source-id')) - return { - 'provider_location': six.text_type(ldev), - } - - def manage_existing_get_size(self, existing_ref): - """Return the size[GB] of the specified volume.""" - ldev = _str2int(existing_ref.get('source-id')) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - return self.get_ldev_size_in_gigabyte(ldev, existing_ref) - - @abc.abstractmethod - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - """Return the size[GB] of the specified LDEV.""" - raise NotImplementedError() - - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage', - id=volume['id']) - return - if self.check_vvol(ldev): - utils.output_log( - MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'], - volume_type=utils.NORMAL_LDEV_TYPE) - raise exception.VolumeIsBusy(volume_name=volume['name']) - try: - self.delete_pair(ldev) - except exception.VSPBusy: - raise exception.VolumeIsBusy(volume_name=volume['name']) - - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.ctxt = context - - self.check_param() - self.config_lock() - self.connect_storage() - self.init_cinder_hosts() - self.output_param_to_log() - - def check_param(self): - """Check parameter values and consistency among them.""" - utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS) - utils.check_opts(self.conf, common_opts) - utils.check_opts(self.conf, self.driver_info['volume_opts']) - if (self.conf.vsp_default_copy_method == 'THIN' and - not self.conf.vsp_thin_pool): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_thin_pool') - raise exception.VSPError(msg) - if self.conf.vsp_ldev_range: - self.storage_info['ldev_range'] = self._range2list( - 'vsp_ldev_range') - if (not self.conf.vsp_target_ports and - not self.conf.vsp_compute_target_ports): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_target_ports or ' - 'vsp_compute_target_ports') - raise exception.VSPError(msg) - for opt in _REQUIRED_COMMON_OPTS: - if not self.conf.safe_get(opt): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) - raise exception.VSPError(msg) - if self.storage_info['protocol'] == 'iSCSI': - self.check_param_iscsi() - - def check_param_iscsi(self): - """Check iSCSI-related parameter values and consistency among them.""" - if self.conf.vsp_use_chap_auth: - if not self.conf.vsp_auth_user: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_auth_user') - raise exception.VSPError(msg) - if not self.conf.vsp_auth_password: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_auth_password') - raise exception.VSPError(msg) - - def _range2list(self, param): - """Analyze a 'xxx-xxx' string and return a list of two integers.""" - values = [_str2int(value) for value in - self.conf.safe_get(param).split('-')] - if (len(values) != 2 or - values[0] is None or values[1] is None or - values[0] > values[1]): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=param) - raise exception.VSPError(msg) - return values - - @abc.abstractmethod - def config_lock(self): - """Initialize lock resource names.""" - raise NotImplementedError() - - def connect_storage(self): - """Prepare for using the storage.""" - self.storage_info['pool_id'] = self.get_pool_id() - # When 'pool_id' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if self.storage_info['pool_id'] is None: - msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=self.conf.vsp_pool) - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID', - value=self.storage_info['pool_id']) - - def check_ports_info(self): - """Check if available storage ports exist.""" - if (self.conf.vsp_target_ports and - not self.storage_info['controller_ports']): - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Target ports") - raise exception.VSPError(msg) - if (self.conf.vsp_compute_target_ports and - not self.storage_info['compute_ports']): - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Compute target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list', - value=self.storage_info['controller_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='compute target port list', - value=self.storage_info['compute_ports']) - - def get_pool_id(self): - """Return the storage pool ID as integer.""" - pool = self.conf.vsp_pool - if pool.isdigit(): - return int(pool) - return None - - def init_cinder_hosts(self, **kwargs): - """Initialize server-storage connection.""" - targets = kwargs.pop('targets', {'info': {}, 'list': [], 'iqns': {}}) - connector = cinder_utils.brick_get_connector_properties( - multipath=self.conf.use_multipath_for_image_xfer, - enforce_multipath=self.conf.enforce_multipath_for_image_xfer) - target_ports = self.storage_info['controller_ports'] - - if target_ports: - if (self.find_targets_from_storage( - targets, connector, target_ports) and - self.conf.vsp_group_request): - self.create_mapping_targets(targets, connector) - - utils.require_target_existed(targets) - - @abc.abstractmethod - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - raise NotImplementedError() - - def create_mapping_targets(self, targets, connector): - """Create server-storage connection for all specified storage ports.""" - hba_ids = self.get_hba_ids_from_connector(connector) - for port in targets['info'].keys(): - if targets['info'][port]: - continue - - try: - self._create_target(targets, port, connector, hba_ids) - except exception.VSPError: - utils.output_log( - self.driver_info['msg_id']['target'], port=port) - - if not targets['list']: - self.find_targets_from_storage( - targets, connector, targets['info'].keys()) - - def get_hba_ids_from_connector(self, connector): - """Return the HBA ID stored in the connector.""" - if self.driver_info['hba_id'] in connector: - return connector[self.driver_info['hba_id']] - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource=self.driver_info['hba_id_type']) - raise exception.VSPError(msg) - - def _create_target(self, targets, port, connector, hba_ids): - """Create a host group or an iSCSI target on the storage port.""" - target_name, gid = self.create_target_to_storage(port, connector, - hba_ids) - utils.output_log(MSG.OBJECT_CREATED, object='a target', - details='port: %(port)s, gid: %(gid)s, target_name: ' - '%(target)s' % - {'port': port, 'gid': gid, 'target': target_name}) - try: - self.set_target_mode(port, gid) - self.set_hba_ids(port, gid, hba_ids) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - self.delete_target_from_storage(port, gid) - targets['info'][port] = True - targets['list'].append((port, gid)) - - @abc.abstractmethod - def create_target_to_storage(self, port, connector, hba_ids): - """Create a host group or an iSCSI target on the specified port.""" - raise NotImplementedError() - - @abc.abstractmethod - def set_target_mode(self, port, gid): - """Configure the target to meet the environment.""" - raise NotImplementedError() - - @abc.abstractmethod - def set_hba_ids(self, port, gid, hba_ids): - """Connect all specified HBAs with the specified port.""" - raise NotImplementedError() - - @abc.abstractmethod - def delete_target_from_storage(self, port, gid): - """Delete the host group or the iSCSI target from the port.""" - raise NotImplementedError() - - def output_param_to_log(self): - """Output configuration parameter values to the log file.""" - utils.output_log(MSG.OUTPUT_PARAMETER_VALUES, - config_group=self.conf.config_group) - name, version = self.get_storage_cli_info() - utils.output_storage_cli_info(name, version) - utils.output_opt_info(self.conf, _INHERITED_VOLUME_OPTS) - utils.output_opts(self.conf, common_opts) - utils.output_opts(self.conf, self.driver_info['volume_opts']) - - @abc.abstractmethod - def get_storage_cli_info(self): - """Return a tuple of the storage CLI name and its version.""" - raise NotImplementedError() - - @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' - '{connector[host]}') - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - targets = { - 'info': {}, - 'list': [], - 'lun': {}, - 'iqns': {}, - } - ldev = utils.get_ldev(volume) - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION, - volume_id=volume['id']) - raise exception.VSPError(msg) - - target_ports = self.get_target_ports(connector) - if (self.find_targets_from_storage( - targets, connector, target_ports) and - self.conf.vsp_group_request): - self.create_mapping_targets(targets, connector) - - utils.require_target_existed(targets) - - targets['list'].sort() - for port in target_ports: - targets['lun'][port] = False - target_lun = int(self.map_ldev(targets, ldev)) - - return { - 'driver_volume_type': self.driver_info['volume_type'], - 'data': self.get_properties(targets, connector, target_lun), - } - - def get_target_ports(self, connector): - """Return a list of ports corresponding to the specified connector.""" - if 'ip' in connector and connector['ip'] == CONF.my_ip: - return self.storage_info['controller_ports'] - return (self.storage_info['compute_ports'] or - self.storage_info['controller_ports']) - - @abc.abstractmethod - def map_ldev(self, targets, ldev): - """Create the path between the server and the LDEV and return LUN.""" - raise NotImplementedError() - - def get_properties(self, targets, connector, target_lun=None): - """Return server-LDEV connection info.""" - multipath = connector.get('multipath', False) - if self.storage_info['protocol'] == 'FC': - data = self.get_properties_fc(targets) - elif self.storage_info['protocol'] == 'iSCSI': - data = self.get_properties_iscsi(targets, multipath) - if target_lun is not None: - data['target_discovered'] = False - if not multipath or self.storage_info['protocol'] == 'FC': - data['target_lun'] = target_lun - else: - target_luns = [] - for target in targets['list']: - if targets['lun'][target[0]]: - target_luns.append(target_lun) - data['target_luns'] = target_luns - return data - - def get_properties_fc(self, targets): - """Return FC-specific server-LDEV connection info.""" - data = {} - data['target_wwn'] = [ - self.storage_info['wwns'][target[0]] for target in targets['list'] - if targets['lun'][target[0]]] - return data - - def get_properties_iscsi(self, targets, multipath): - """Return iSCSI-specific server-LDEV connection info.""" - data = {} - primary_target = targets['list'][0] - if not multipath: - data['target_portal'] = self.storage_info[ - 'portals'][primary_target[0]] - data['target_iqn'] = targets['iqns'][primary_target] - else: - data['target_portals'] = [ - self.storage_info['portals'][target[0]] for target in - targets['list'] if targets['lun'][target[0]]] - data['target_iqns'] = [ - targets['iqns'][target] for target in targets['list'] - if targets['lun'][target[0]]] - if self.conf.vsp_use_chap_auth: - data['auth_method'] = 'CHAP' - data['auth_username'] = self.conf.vsp_auth_user - data['auth_password'] = self.conf.vsp_auth_password - return data - - @coordination.synchronized('vsp-host-{self.conf.vsp_storage_id}-' - '{connector[host]}') - def terminate_connection(self, volume, connector): - """Terminate connection between the server and the volume.""" - targets = { - 'info': {}, - 'list': [], - 'iqns': {}, - } - mapped_targets = { - 'list': [], - } - unmap_targets = {} - - ldev = utils.get_ldev(volume) - if ldev is None: - utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING, - volume_id=volume['id']) - return - target_ports = self.get_target_ports(connector) - self.find_targets_from_storage(targets, connector, target_ports) - if not targets['list']: - utils.output_log(MSG.NO_CONNECTED_TARGET) - self.find_mapped_targets_from_storage( - mapped_targets, ldev, target_ports) - - unmap_targets['list'] = self.get_unmap_targets_list( - targets['list'], mapped_targets['list']) - unmap_targets['list'].sort(reverse=True) - self.unmap_ldev(unmap_targets, ldev) - - if self.storage_info['protocol'] == 'FC': - target_wwn = [ - self.storage_info['wwns'][port_gid[:utils.PORT_ID_LENGTH]] - for port_gid in unmap_targets['list']] - return {'driver_volume_type': self.driver_info['volume_type'], - 'data': {'target_wwn': target_wwn}} - - @abc.abstractmethod - def find_mapped_targets_from_storage(self, targets, ldev, target_ports): - """Find and store IDs of ports used for server-LDEV connection.""" - raise NotImplementedError() - - @abc.abstractmethod - def get_unmap_targets_list(self, target_list, mapped_list): - """Return a list of IDs of ports that need to be disconnected.""" - raise NotImplementedError() - - @abc.abstractmethod - def unmap_ldev(self, targets, ldev): - """Delete the LUN between the specified LDEV and port-gid.""" - raise NotImplementedError() - - @abc.abstractmethod - def wait_full_copy_completion(self, pvol, svol): - """Wait until FULL copy is completed.""" - raise NotImplementedError() diff --git a/cinder/volume/drivers/hitachi/vsp_fc.py b/cinder/volume/drivers/hitachi/vsp_fc.py deleted file mode 100644 index 3ab4b43f1..000000000 --- a/cinder/volume/drivers/hitachi/vsp_fc.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Fibre channel module for Hitachi VSP Driver.""" - -from oslo_config import cfg - -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -fc_opts = [ - cfg.BoolOpt( - 'vsp_zoning_request', - default=False, - help='If True, the driver will configure FC zoning between the server ' - 'and the storage system provided that FC zoning manager is ' - 'enabled.'), -] - -MSG = utils.VSPMsg - -_DRIVER_INFO = { - 'proto': 'FC', - 'hba_id': 'wwpns', - 'hba_id_type': 'World Wide Name', - 'msg_id': { - 'target': MSG.CREATE_HOST_GROUP_FAILED, - }, - 'volume_backend_name': utils.DRIVER_PREFIX + 'FC', - 'volume_opts': fc_opts, - 'volume_type': 'fibre_channel', -} - -CONF = cfg.CONF -CONF.register_opts(fc_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class VSPFCDriver(driver.FibreChannelDriver): - """Fibre channel class for Hitachi VSP Driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver. - - """ - - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_VSP_CI" - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - """Initialize instance variables.""" - utils.output_log(MSG.DRIVER_INITIALIZATION_START, - driver=self.__class__.__name__, - version=self.get_version()) - super(VSPFCDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(common.common_opts) - self.configuration.append_config_values(fc_opts) - self.common = utils.import_object( - self.configuration, _DRIVER_INFO, kwargs.get('db')) - - def check_for_setup_error(self): - """Error are checked in do_setup() instead of this method.""" - pass - - @utils.output_start_end_log - def create_volume(self, volume): - """Create a volume and return its properties.""" - return self.common.create_volume(volume) - - @utils.output_start_end_log - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - return self.common.create_volume_from_snapshot(volume, snapshot) - - @utils.output_start_end_log - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - return self.common.create_cloned_volume(volume, src_vref) - - @utils.output_start_end_log - def delete_volume(self, volume): - """Delete the specified volume.""" - self.common.delete_volume(volume) - - @utils.output_start_end_log - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - return self.common.create_snapshot(snapshot) - - @utils.output_start_end_log - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - self.common.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - return self.common.get_volume_stats(refresh) - - @utils.output_start_end_log - def update_migrated_volume( - self, ctxt, volume, new_volume, original_volume_status): - """Do any remaining jobs after migration.""" - self.common.discard_zero_page(new_volume) - super(VSPFCDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) - - @utils.output_start_end_log - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - super(VSPFCDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - self.common.discard_zero_page(volume) - - @utils.output_start_end_log - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - self.common.extend_volume(volume, new_size) - - @utils.output_start_end_log - def manage_existing(self, volume, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - return self.common.manage_existing(existing_ref) - - @utils.output_start_end_log - def manage_existing_get_size(self, volume, existing_ref): - """Return the size[GB] of the specified volume.""" - return self.common.manage_existing_get_size(existing_ref) - - @utils.output_start_end_log - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - self.common.unmanage(volume) - - @utils.output_start_end_log - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.common.do_setup(context) - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Export the volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - @utils.output_start_end_log - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - return self.common.initialize_connection(volume, connector) - - @utils.output_start_end_log - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection between the server and the volume.""" - self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_horcm.py b/cinder/volume/drivers/hitachi/vsp_horcm.py deleted file mode 100644 index 1df062aeb..000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm.py +++ /dev/null @@ -1,1437 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface module for Hitachi VSP Driver.""" - -import functools -import math -import os -import re - -from oslo_config import cfg -from oslo_config import types -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import timeutils -from oslo_utils import units -import six -from six.moves import range - -from cinder import coordination -from cinder import exception -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -_GETSTORAGEARRAY_ONCE = 1000 -_LU_PATH_DEFINED = 'SSB=0xB958,0x015A' -_ANOTHER_LDEV_MAPPED = 'SSB=0xB958,0x0947' -_NOT_LOCKED = 'SSB=0x2E11,0x2205' -_LOCK_WAITTIME = 2 * 60 * 60 -NORMAL_STS = 'NML' -_LDEV_STATUS_WAITTIME = 120 -_LDEV_CHECK_INTERVAL = 1 -_LDEV_CREATED = ['-check_status', NORMAL_STS] -_LDEV_DELETED = ['-check_status', 'NOT', 'DEFINED'] -_LUN_MAX_WAITTIME = 50 -_LUN_RETRY_INTERVAL = 1 -FULL_ATTR = 'MRCF' -THIN_ATTR = 'QS' -VVOL_ATTR = 'VVOL' -_PERMITTED_TYPES = set(['CVS', 'HDP', 'HDT']) -_PAIR_ATTRS = set([FULL_ATTR, THIN_ATTR]) -_CHECK_KEYS = ('vol_type', 'vol_size', 'num_port', 'vol_attr', 'sts') -_HORCM_WAITTIME = 1 -_EXEC_MAX_WAITTIME = 30 -_EXTEND_WAITTIME = 10 * 60 -_EXEC_RETRY_INTERVAL = 5 -_HORCM_NO_RETRY_ERRORS = [ - 'SSB=0x2E10,0x9705', - 'SSB=0x2E10,0x9706', - 'SSB=0x2E10,0x9707', - 'SSB=0x2E11,0x8303', - 'SSB=0x2E30,0x0007', - 'SSB=0xB956,0x3173', - 'SSB=0xB956,0x31D7', - 'SSB=0xB956,0x31D9', - 'SSB=0xB957,0x4188', - _LU_PATH_DEFINED, - 'SSB=0xB958,0x015E', -] - -SMPL = 1 -PVOL = 2 -SVOL = 3 - -COPY = 2 -PAIR = 3 -PSUS = 4 -PSUE = 5 -UNKN = 0xff - -_STATUS_TABLE = { - 'SMPL': SMPL, - 'COPY': COPY, - 'RCPY': COPY, - 'PAIR': PAIR, - 'PFUL': PAIR, - 'PSUS': PSUS, - 'PFUS': PSUS, - 'SSUS': PSUS, - 'PSUE': PSUE, -} - -_NOT_SET = '-' - -_SMPL_STAUS = set([_NOT_SET, 'SMPL']) - -_HORCM_RUNNING = 1 -_COPY_GROUP = utils.DRIVER_PREFIX + '-%s%s%03X%d' -_SNAP_NAME = utils.DRIVER_PREFIX + '-SNAP' -_LDEV_NAME = utils.DRIVER_PREFIX + '-LDEV-%d-%d' -_PAIR_TARGET_NAME_BODY = 'pair00' -_PAIR_TARGET_NAME = utils.TARGET_PREFIX + _PAIR_TARGET_NAME_BODY -_MAX_MUNS = 3 - -_SNAP_HASH_SIZE = 8 - -ALL_EXIT_CODE = set(range(256)) -HORCM_EXIT_CODE = set(range(128)) -EX_ENAUTH = 202 -EX_ENOOBJ = 205 -EX_CMDRJE = 221 -EX_ENLDEV = 227 -EX_CMDIOE = 237 -EX_ENOGRP = 239 -EX_INVCMD = 240 -EX_INVMOD = 241 -EX_ENORMT = 242 -EX_ENODEV = 246 -EX_ENOENT = 247 -EX_OPTINV = 248 -EX_ATTDBG = 250 -EX_ATTHOR = 251 -EX_INVARG = 253 -EX_COMERR = 255 -_NO_SUCH_DEVICE = [EX_ENOGRP, EX_ENODEV, EX_ENOENT] -_INVALID_RANGE = [EX_ENLDEV, EX_INVARG] -_HORCM_ERROR = set([EX_ENORMT, EX_ATTDBG, EX_ATTHOR, EX_COMERR]) -_COMMAND_IO_TO_RAID = set( - [EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV]) - -_DEFAULT_PORT_BASE = 31000 - -_HORCMGR = 0 -_PAIR_HORCMGR = 1 -_INFINITE = "-" - -_HORCM_PATTERNS = { - 'gid': { - 'pattern': re.compile(r"ID +(?P\d+)\(0x\w+\)"), - 'type': six.text_type, - }, - 'ldev': { - 'pattern': re.compile(r"^LDEV +: +(?P\d+)", re.M), - 'type': int, - }, - 'lun': { - 'pattern': re.compile(r"LUN +(?P\d+)\(0x\w+\)"), - 'type': six.text_type, - }, - 'num_port': { - 'pattern': re.compile(r"^NUM_PORT +: +(?P\d+)", re.M), - 'type': int, - }, - 'pair_gid': { - 'pattern': re.compile( - r"^CL\w-\w+ +(?P\d+) +%s " % _PAIR_TARGET_NAME, re.M), - 'type': six.text_type, - }, - 'ports': { - 'pattern': re.compile(r"^PORTs +: +(?P.+)$", re.M), - 'type': list, - }, - 'vol_attr': { - 'pattern': re.compile(r"^VOL_ATTR +: +(?P.+)$", re.M), - 'type': list, - }, - 'vol_size': { - 'pattern': re.compile( - r"^VOL_Capacity\(BLK\) +: +(?P\d+)""", re.M), - 'type': int, - }, - 'vol_type': { - 'pattern': re.compile(r"^VOL_TYPE +: +(?P.+)$", re.M), - 'type': six.text_type, - }, - 'sts': { - 'pattern': re.compile(r"^STS +: +(?P.+)", re.M), - 'type': six.text_type, - }, - 'undefined_ldev': { - 'pattern': re.compile( - r"^ +\d+ +(?P\d+) +- +- +NOT +DEFINED", re.M), - 'type': int, - }, -} - -LDEV_SEP_PATTERN = re.compile(r'\ +:\ +') -CMD_PATTERN = re.compile(r"((?:^|\n)HORCM_CMD\n)") - -horcm_opts = [ - cfg.ListOpt( - 'vsp_horcm_numbers', - item_type=types.Integer(min=0, max=2047), - default=[200, 201], - help='Command Control Interface instance numbers in the format of ' - '\'xxx,yyy\'. The second one is for Shadow Image operation and ' - 'the first one is for other purposes.'), - cfg.StrOpt( - 'vsp_horcm_user', - help='Name of the user on the storage system.'), - cfg.StrOpt( - 'vsp_horcm_password', - secret=True, - help='Password corresponding to vsp_horcm_user.'), - cfg.BoolOpt( - 'vsp_horcm_add_conf', - default=True, - help='If True, the driver will create or update the Command Control ' - 'Interface configuration file as needed.'), - cfg.ListOpt( - 'vsp_horcm_pair_target_ports', - help='IDs of the storage ports used to copy volumes by Shadow Image ' - 'or Thin Image. To specify multiple ports, connect them by ' - 'commas (e.g. CL1-A,CL2-A).'), -] - -_REQUIRED_HORCM_OPTS = [ - 'vsp_horcm_user', - 'vsp_horcm_password', -] - -CONF = cfg.CONF -CONF.register_opts(horcm_opts, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -def horcmgr_synchronized(func): - """Synchronize CCI operations per CCI instance.""" - @functools.wraps(func) - def wrap(self, *args, **kwargs): - """Synchronize CCI operations per CCI instance.""" - @coordination.synchronized(self.lock[args[0]]) - def func_locked(*_args, **_kwargs): - """Execute the wrapped function in a synchronized section.""" - return func(*_args, **_kwargs) - return func_locked(self, *args, **kwargs) - return wrap - - -def _is_valid_target(target, target_name, target_ports, is_pair): - """Return True if the specified target is valid, False otherwise.""" - if is_pair: - return (target[:utils.PORT_ID_LENGTH] in target_ports and - target_name == _PAIR_TARGET_NAME) - if (target[:utils.PORT_ID_LENGTH] not in target_ports or - not target_name.startswith(utils.TARGET_PREFIX) or - target_name == _PAIR_TARGET_NAME): - return False - return True - - -def find_value(stdout, key): - """Return the first match from the given raidcom command output.""" - match = _HORCM_PATTERNS[key]['pattern'].search(stdout) - if match: - if _HORCM_PATTERNS[key]['type'] is list: - return [ - value.strip() for value in - LDEV_SEP_PATTERN.split(match.group(key))] - return _HORCM_PATTERNS[key]['type'](match.group(key)) - return None - - -def _run_horcmgr(inst): - """Return 1 if the CCI instance is running.""" - result = utils.execute( - 'env', 'HORCMINST=%s' % inst, 'horcmgr', '-check') - return result[0] - - -def _run_horcmshutdown(inst): - """Stop the CCI instance and return 0 if successful.""" - result = utils.execute('horcmshutdown.sh', inst) - return result[0] - - -def _run_horcmstart(inst): - """Start the CCI instance and return 0 if successful.""" - result = utils.execute('horcmstart.sh', inst) - return result[0] - - -def _check_ldev(ldev_info, ldev, existing_ref): - """Check if the LDEV meets the criteria for being managed by the driver.""" - if ldev_info['sts'] != NORMAL_STS: - msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - vol_attr = set(ldev_info['vol_attr']) - if (not ldev_info['vol_type'].startswith('OPEN-V') or - len(vol_attr) < 2 or not vol_attr.issubset(_PERMITTED_TYPES)): - msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev, - ldevtype=utils.NVOL_LDEV_TYPE) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - if ldev_info['vol_size'] % utils.GIGABYTE_PER_BLOCK_SIZE: - msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - if ldev_info['num_port']: - msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - -class VSPHORCM(common.VSPCommon): - """HORCM interface class for Hitachi VSP Driver.""" - - def __init__(self, conf, storage_protocol, db): - """Initialize instance variables.""" - super(VSPHORCM, self).__init__(conf, storage_protocol, db) - self.conf.append_config_values(horcm_opts) - - self._copy_groups = [None] * _MAX_MUNS - self._pair_targets = [] - self._pattern = { - 'pool': None, - 'p_pool': None, - } - - def run_raidcom(self, *args, **kwargs): - """Run a raidcom command and return its output.""" - if 'success_code' not in kwargs: - kwargs['success_code'] = HORCM_EXIT_CODE - cmd = ['raidcom'] + list(args) + [ - '-s', self.conf.vsp_storage_id, - '-I%s' % self.conf.vsp_horcm_numbers[_HORCMGR]] - return self.run_and_verify_storage_cli(*cmd, **kwargs) - - def _run_pair_cmd(self, command, *args, **kwargs): - """Run a pair-related CCI command and return its output.""" - kwargs['horcmgr'] = _PAIR_HORCMGR - if 'success_code' not in kwargs: - kwargs['success_code'] = HORCM_EXIT_CODE - cmd = [command] + list(args) + [ - '-IM%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]] - return self.run_and_verify_storage_cli(*cmd, **kwargs) - - def run_storage_cli(self, *cmd, **kwargs): - """Run a CCI command and return its output.""" - interval = kwargs.pop('interval', _EXEC_RETRY_INTERVAL) - flag = {'ignore_enauth': True} - - def _wait_for_horcm_execution(start_time, flag, *cmd, **kwargs): - """Run a CCI command and raise its output.""" - ignore_error = kwargs.pop('ignore_error', []) - no_retry_error = ignore_error + _HORCM_NO_RETRY_ERRORS - success_code = kwargs.pop('success_code', HORCM_EXIT_CODE) - timeout = kwargs.pop('timeout', _EXEC_MAX_WAITTIME) - horcmgr = kwargs.pop('horcmgr', _HORCMGR) - do_login = kwargs.pop('do_login', False) - - result = utils.execute(*cmd, **kwargs) - if _NOT_LOCKED in result[2] and not utils.check_timeout( - start_time, _LOCK_WAITTIME): - LOG.debug( - "The resource group to which the operation object " - "belongs is being locked by other software.") - return - if (result[0] in success_code or - utils.check_timeout(start_time, timeout) or - utils.check_ignore_error(no_retry_error, result[2])): - raise loopingcall.LoopingCallDone(result) - if result[0] == EX_ENAUTH: - if not self._retry_login(flag['ignore_enauth'], do_login): - raise loopingcall.LoopingCallDone(result) - flag['ignore_enauth'] = False - elif result[0] in _HORCM_ERROR: - if not self._start_horcmgr(horcmgr): - raise loopingcall.LoopingCallDone(result) - elif result[0] not in _COMMAND_IO_TO_RAID: - raise loopingcall.LoopingCallDone(result) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_horcm_execution, timeutils.utcnow(), - flag, *cmd, **kwargs) - return loop.start(interval=interval).wait() - - def _retry_login(self, ignore_enauth, do_login): - """Return True if login to CCI succeeds, False otherwise.""" - if not ignore_enauth: - if not do_login: - result = self._run_raidcom_login(do_raise=False) - - if do_login or result[0]: - utils.output_log(MSG.HORCM_LOGIN_FAILED, - user=self.conf.vsp_horcm_user) - return False - - return True - - def _run_raidcom_login(self, do_raise=True): - """Log in to CCI and return its output.""" - return self.run_raidcom( - '-login', self.conf.vsp_horcm_user, - self.conf.vsp_horcm_password, - do_raise=do_raise, do_login=True) - - @horcmgr_synchronized - def _restart_horcmgr(self, horcmgr): - """Restart the CCI instance.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - - def _wait_for_horcm_shutdown(start_time, inst): - """Stop the CCI instance and raise True if it stops.""" - if _run_horcmgr(inst) != _HORCM_RUNNING: - raise loopingcall.LoopingCallDone() - if (_run_horcmshutdown(inst) and - _run_horcmgr(inst) == _HORCM_RUNNING or - utils.check_timeout( - start_time, utils.DEFAULT_PROCESS_WAITTIME)): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_horcm_shutdown, timeutils.utcnow(), inst) - if not loop.start(interval=_HORCM_WAITTIME).wait(): - msg = utils.output_log( - MSG.HORCM_SHUTDOWN_FAILED, - inst=self.conf.vsp_horcm_numbers[horcmgr]) - raise exception.VSPError(msg) - - ret = _run_horcmstart(inst) - if ret and ret != _HORCM_RUNNING: - msg = utils.output_log( - MSG.HORCM_RESTART_FAILED, - inst=self.conf.vsp_horcm_numbers[horcmgr]) - raise exception.VSPError(msg) - - @coordination.synchronized('{self.lock[create_ldev]}') - def create_ldev(self, size, is_vvol=False): - """Create an LDEV of the specified size and the specified type.""" - ldev = super(VSPHORCM, self).create_ldev(size, is_vvol=is_vvol) - self._check_ldev_status(ldev) - return ldev - - def _check_ldev_status(self, ldev, delete=False): - """Wait until the LDEV status changes to the specified status.""" - if not delete: - args = _LDEV_CREATED - msg_id = MSG.LDEV_CREATION_WAIT_TIMEOUT - else: - args = _LDEV_DELETED - msg_id = MSG.LDEV_DELETION_WAIT_TIMEOUT - - def _wait_for_ldev_status(start_time, ldev, *args): - """Raise True if the LDEV is in the specified status.""" - result = self.run_raidcom( - 'get', 'ldev', '-ldev_id', ldev, *args, do_raise=False) - if not result[0]: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, _LDEV_STATUS_WAITTIME): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_ldev_status, timeutils.utcnow(), ldev, *args) - if not loop.start(interval=_LDEV_CHECK_INTERVAL).wait(): - msg = utils.output_log(msg_id, ldev=ldev) - raise exception.VSPError(msg) - - def create_ldev_on_storage(self, ldev, size, is_vvol): - """Create an LDEV on the storage system.""" - args = ['add', 'ldev', '-ldev_id', ldev, '-capacity', '%sG' % size, - '-emulation', 'OPEN-V', '-pool'] - if is_vvol: - args.append('snap') - else: - args.append(self.conf.vsp_pool) - self.run_raidcom(*args) - - def get_unused_ldev(self): - """Find an unused LDEV and return its LDEV number.""" - if not self.storage_info['ldev_range']: - ldev_info = self.get_ldev_info( - ['ldev'], '-ldev_list', 'undefined', '-cnt', '1') - ldev = ldev_info.get('ldev') - else: - ldev = self._find_unused_ldev_by_range() - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is None'. - if ldev is None: - msg = utils.output_log(MSG.NO_AVAILABLE_RESOURCE, resource='LDEV') - raise exception.VSPError(msg) - return ldev - - def _find_unused_ldev_by_range(self): - """Return the LDEV number of an unused LDEV in the LDEV range.""" - success_code = HORCM_EXIT_CODE.union(_INVALID_RANGE) - start, end = self.storage_info['ldev_range'][:2] - - while start <= end: - if end - start + 1 > _GETSTORAGEARRAY_ONCE: - cnt = _GETSTORAGEARRAY_ONCE - else: - cnt = end - start + 1 - - ldev_info = self.get_ldev_info( - ['undefined_ldev'], '-ldev_id', start, '-cnt', cnt, - '-key', 'front_end', success_code=success_code) - ldev = ldev_info.get('undefined_ldev') - # When 'ldev' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if ldev is not None: - return ldev - - start += _GETSTORAGEARRAY_ONCE - - return None - - def get_ldev_info(self, keys, *args, **kwargs): - """Return a dictionary of LDEV-related items.""" - data = {} - result = self.run_raidcom('get', 'ldev', *args, **kwargs) - for key in keys: - data[key] = find_value(result[1], key) - return data - - def copy_on_storage(self, pvol, size, metadata, sync): - """Check if the LDEV can be copied on the storage.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', pvol) - if ldev_info['sts'] != NORMAL_STS: - msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol) - raise exception.VSPError(msg) - - if VVOL_ATTR in ldev_info['vol_attr']: - raise exception.VSPNotSupported() - return super(VSPHORCM, self).copy_on_storage(pvol, size, metadata, - sync) - - @coordination.synchronized('{self.lock[create_pair]}') - def create_pair_on_storage(self, pvol, svol, is_thin): - """Create a copy pair on the storage.""" - path_list = [] - vol_type, pair_info = self._get_vol_type_and_pair_info(pvol) - if vol_type == SVOL: - self._delete_pair_based_on_svol( - pair_info['pvol'], pair_info['svol_info'], - no_restart=True) - if vol_type != PVOL: - self._initialize_pair_connection(pvol) - path_list.append(pvol) - try: - self._initialize_pair_connection(svol) - path_list.append(svol) - self._create_pair_on_storage_core(pvol, svol, is_thin, vol_type) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - for ldev in path_list: - try: - self._terminate_pair_connection(ldev) - except exception.VSPError: - utils.output_log(MSG.UNMAP_LDEV_FAILED, ldev=ldev) - - def _create_pair_on_storage_core(self, pvol, svol, is_thin, vol_type): - """Create a copy pair on the storage depending on the copy method.""" - if is_thin: - self._create_thin_copy_pair(pvol, svol) - - else: - self._create_full_copy_pair(pvol, svol, vol_type) - - def _create_thin_copy_pair(self, pvol, svol): - """Create a THIN copy pair on the storage.""" - snapshot_name = _SNAP_NAME + six.text_type(svol % _SNAP_HASH_SIZE) - self.run_raidcom( - 'add', 'snapshot', '-ldev_id', pvol, svol, '-pool', - self.conf.vsp_thin_pool, '-snapshot_name', - snapshot_name, '-copy_size', self.conf.vsp_copy_speed) - try: - self.wait_thin_copy(svol, PAIR) - self.run_raidcom( - 'modify', 'snapshot', '-ldev_id', svol, - '-snapshot_data', 'create') - self.wait_thin_copy(svol, PSUS) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - interval = self.conf.vsp_async_copy_check_interval - try: - self._delete_thin_copy_pair(pvol, svol, interval) - except exception.VSPError: - utils.output_log(MSG.DELETE_TI_PAIR_FAILED, pvol=pvol, - svol=svol) - - def _create_full_copy_pair(self, pvol, svol, vol_type): - """Create a FULL copy pair on the storage.""" - mun = 0 - - if vol_type == PVOL: - mun = self._get_unused_mun(pvol) - - copy_group = self._copy_groups[mun] - ldev_name = _LDEV_NAME % (pvol, svol) - restart = False - create = False - - try: - self._add_pair_config(pvol, svol, copy_group, ldev_name, mun) - self._restart_horcmgr(_PAIR_HORCMGR) - restart = True - self._run_pair_cmd( - 'paircreate', '-g', copy_group, '-d', ldev_name, - '-c', self.conf.vsp_copy_speed, - '-vl', '-split', '-fq', 'quick') - create = True - - self._wait_full_copy(svol, set([PSUS, COPY])) - except exception.VSPError: - with excutils.save_and_reraise_exception(): - if create: - try: - self._wait_full_copy(svol, set([PAIR, PSUS, PSUE])) - except exception.VSPError: - utils.output_log(MSG.WAIT_SI_PAIR_STATUS_FAILED, - pvol=pvol, svol=svol) - - interval = self.conf.vsp_async_copy_check_interval - - try: - self._delete_full_copy_pair(pvol, svol, interval) - except exception.VSPError: - utils.output_log(MSG.DELETE_SI_PAIR_FAILED, pvol=pvol, - svol=svol) - - try: - if self._is_smpl(svol): - self._delete_pair_config( - pvol, svol, copy_group, ldev_name) - except exception.VSPError: - utils.output_log(MSG.DELETE_DEVICE_GRP_FAILED, pvol=pvol, - svol=svol) - - if restart: - try: - self._restart_horcmgr(_PAIR_HORCMGR) - except exception.VSPError: - utils.output_log( - MSG.HORCM_RESTART_FOR_SI_FAILED, - inst=self.conf.vsp_horcm_numbers[1]) - - def _get_unused_mun(self, ldev): - """Return the number of an unused mirror unit.""" - pair_list = [] - - for mun in range(_MAX_MUNS): - pair_info = self._get_full_copy_pair_info(ldev, mun) - if not pair_info: - return mun - - pair_list.append((pair_info['svol_info'], mun)) - - for svol_info, mun in pair_list: - if svol_info['is_psus']: - self._delete_pair_based_on_svol( - ldev, svol_info, no_restart=True) - return mun - - utils.output_log(MSG.NO_AVAILABLE_MIRROR_UNIT, - copy_method=utils.FULL, pvol=ldev) - raise exception.VSPBusy() - - def _get_vol_type_and_pair_info(self, ldev): - """Return a tuple of the LDEV's Shadow Image pair status and info.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS: - return (SMPL, None) - - if THIN_ATTR in ldev_info['vol_attr']: - return (PVOL, None) - - if FULL_ATTR in ldev_info['vol_attr']: - pair_info = self._get_full_copy_pair_info(ldev, 0) - if not pair_info: - return (PVOL, None) - - if pair_info['pvol'] != ldev: - return (SVOL, pair_info) - - return (PVOL, None) - - return (SMPL, None) - - def _get_full_copy_info(self, ldev): - """Return a tuple of P-VOL and S-VOL's info of a Shadow Image pair.""" - vol_type, pair_info = self._get_vol_type_and_pair_info(ldev) - svol_info = [] - - if vol_type == SMPL: - return (None, None) - - elif vol_type == SVOL: - return (pair_info['pvol'], [pair_info['svol_info']]) - - for mun in range(_MAX_MUNS): - pair_info = self._get_full_copy_pair_info(ldev, mun) - if pair_info: - svol_info.append(pair_info['svol_info']) - - return (ldev, svol_info) - - @coordination.synchronized('{self.lock[create_pair]}') - def delete_pair(self, ldev, all_split=True): - """Delete the specified LDEV in a synchronized section.""" - super(VSPHORCM, self).delete_pair(ldev, all_split=all_split) - - def delete_pair_based_on_pvol(self, pair_info, all_split): - """Disconnect all volume pairs to which the specified P-VOL belongs.""" - svols = [] - restart = False - - try: - for svol_info in pair_info['svol_info']: - if svol_info['is_thin'] or not svol_info['is_psus']: - svols.append(six.text_type(svol_info['ldev'])) - continue - - self.delete_pair_from_storage( - pair_info['pvol'], svol_info['ldev'], False) - - restart = True - - self._terminate_pair_connection(svol_info['ldev']) - - if not svols: - self._terminate_pair_connection(pair_info['pvol']) - - finally: - if restart: - self._restart_horcmgr(_PAIR_HORCMGR) - - if all_split and svols: - utils.output_log( - MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'], - svol=', '.join(svols)) - raise exception.VSPBusy() - - def delete_pair_based_on_svol(self, pvol, svol_info): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - self._delete_pair_based_on_svol(pvol, svol_info) - - def _delete_pair_based_on_svol(self, pvol, svol_info, no_restart=False): - """Disconnect all volume pairs to which the specified S-VOL belongs.""" - do_restart = False - - if not svol_info['is_psus']: - utils.output_log(MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, - svol=svol_info['ldev']) - raise exception.VSPBusy() - - try: - self.delete_pair_from_storage( - pvol, svol_info['ldev'], svol_info['is_thin']) - do_restart = True - self._terminate_pair_connection(svol_info['ldev']) - self._terminate_pair_connection(pvol) - finally: - if not no_restart and do_restart: - self._restart_horcmgr(_PAIR_HORCMGR) - - def delete_pair_from_storage(self, pvol, svol, is_thin): - """Disconnect the volume pair that consists of the specified LDEVs.""" - interval = self.conf.vsp_async_copy_check_interval - if is_thin: - self._delete_thin_copy_pair(pvol, svol, interval) - else: - self._delete_full_copy_pair(pvol, svol, interval) - - def _delete_thin_copy_pair(self, pvol, svol, interval): - """Disconnect a THIN volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', svol) - if not result[1]: - return - mun = result[1].splitlines()[1].split()[5] - self.run_raidcom( - 'unmap', 'snapshot', '-ldev_id', svol, - success_code=ALL_EXIT_CODE) - self.run_raidcom( - 'delete', 'snapshot', '-ldev_id', pvol, '-mirror_id', mun) - self._wait_thin_copy_deleting(svol, interval=interval) - - def _wait_thin_copy_deleting(self, ldev, **kwargs): - """Wait until the LDEV is no longer in a THIN volume pair.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_async_copy_check_interval) - - def _wait_for_thin_copy_smpl(start_time, ldev, **kwargs): - """Raise True if the LDEV is no longer in a THIN volume pair.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - ldev_info = self.get_ldev_info( - ['sts', 'vol_attr'], '-ldev_id', ldev) - if (ldev_info['sts'] != NORMAL_STS or - THIN_ATTR not in ldev_info['vol_attr']): - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_thin_copy_smpl, timeutils.utcnow(), ldev, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def _delete_full_copy_pair(self, pvol, svol, interval): - """Disconnect a FULL volume pair.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, svol, 0) - if not stdout: - return - - copy_group = stdout.splitlines()[2].split()[0] - ldev_name = _LDEV_NAME % (pvol, svol) - - if stdout.splitlines()[1].split()[9] != 'P-VOL': - self._restart_horcmgr(_PAIR_HORCMGR) - try: - self._run_pair_cmd( - 'pairsplit', '-g', copy_group, '-d', ldev_name, '-S') - self._wait_full_copy(svol, set([SMPL]), interval=interval) - finally: - if self._is_smpl(svol): - self._delete_pair_config(pvol, svol, copy_group, ldev_name) - - def _initialize_pair_connection(self, ldev): - """Initialize server-volume connection for volume copy.""" - port, gid = None, None - - for port, gid in self._pair_targets: - try: - targets = { - 'list': [(port, gid)], - 'lun': {}, - } - return self.map_ldev(targets, ldev) - except exception.VSPError: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, - id=gid, lun=None) - - msg = utils.output_log(MSG.NO_MAPPING_FOR_LDEV, ldev=ldev) - raise exception.VSPError(msg) - - def _terminate_pair_connection(self, ldev): - """Terminate server-volume connection for volume copy.""" - targets = { - 'list': [], - } - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if (ldev_info['sts'] == NORMAL_STS and - FULL_ATTR in ldev_info['vol_attr'] or - self._get_thin_copy_svol_status(ldev) != SMPL): - LOG.debug( - 'The specified LDEV has pair. Therefore, unmapping ' - 'operation was skipped. ' - '(LDEV: %(ldev)s, vol_attr: %(info)s)', - {'ldev': ldev, 'info': ldev_info['vol_attr']}) - return - self._find_mapped_targets_from_storage( - targets, ldev, self.storage_info['controller_ports'], is_pair=True) - self.unmap_ldev(targets, ldev) - - def check_param(self): - """Check parameter values and consistency among them.""" - super(VSPHORCM, self).check_param() - utils.check_opts(self.conf, horcm_opts) - insts = self.conf.vsp_horcm_numbers - if len(insts) != 2 or insts[_HORCMGR] == insts[_PAIR_HORCMGR]: - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_horcm_numbers') - raise exception.VSPError(msg) - if (not self.conf.vsp_target_ports and - not self.conf.vsp_horcm_pair_target_ports): - msg = utils.output_log(MSG.INVALID_PARAMETER, - param='vsp_target_ports or ' - 'vsp_horcm_pair_target_ports') - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, object='LDEV range', - value=self.storage_info['ldev_range']) - for opt in _REQUIRED_HORCM_OPTS: - if not self.conf.safe_get(opt): - msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt) - raise exception.VSPError(msg) - - def _set_copy_groups(self, host_ip): - """Initialize an instance variable for Shadow Image copy groups.""" - serial = self.conf.vsp_storage_id - inst = self.conf.vsp_horcm_numbers[_PAIR_HORCMGR] - - for mun in range(_MAX_MUNS): - copy_group = _COPY_GROUP % (host_ip, serial, inst, mun) - self._copy_groups[mun] = copy_group - utils.output_log(MSG.SET_CONFIG_VALUE, object='copy group list', - value=self._copy_groups) - - def connect_storage(self): - """Prepare for using the storage.""" - self._set_copy_groups(CONF.my_ip) - - if self.conf.vsp_horcm_add_conf: - self._create_horcm_conf() - self._create_horcm_conf(horcmgr=_PAIR_HORCMGR) - self._restart_horcmgr(_HORCMGR) - self._restart_horcmgr(_PAIR_HORCMGR) - self._run_raidcom_login() - super(VSPHORCM, self).connect_storage() - - self._pattern['p_pool'] = re.compile( - (r"^%03d +\S+ +\d+ +\d+ +(?P\d+) +\d+ +\d+ +\d+ +\w+ +" - r"\d+ +(?P\d+)") % self.storage_info['pool_id'], re.M) - self._pattern['pool'] = re.compile( - r"^%03d +\S+ +\d+ +\S+ +\w+ +\d+ +\w+ +\d+ +(?P\S+)" % - self.storage_info['pool_id'], re.M) - - def _find_lun(self, ldev, port, gid): - """Return LUN determined by the given arguments.""" - result = self.run_raidcom( - 'get', 'lun', '-port', '-'.join([port, gid])) - match = re.search( - r'^%(port)s +%(gid)s +\S+ +(?P\d+) +1 +%(ldev)s ' % { - 'port': port, 'gid': gid, 'ldev': ldev}, result[1], re.M) - if match: - return match.group('lun') - return None - - def _find_mapped_targets_from_storage(self, targets, ldev, - target_ports, is_pair=False): - """Update port-gid list for the specified LDEV.""" - ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) - if not ldev_info['ports']: - return - for ports_strings in ldev_info['ports']: - ports = ports_strings.split() - if _is_valid_target(ports[0], ports[2], target_ports, is_pair): - targets['list'].append(ports[0]) - - def find_mapped_targets_from_storage(self, targets, ldev, target_ports): - """Update port-gid list for the specified LDEV.""" - self._find_mapped_targets_from_storage(targets, ldev, target_ports) - - def get_unmap_targets_list(self, target_list, mapped_list): - """Return a list of IDs of ports that need to be disconnected.""" - unmap_list = [] - for mapping_info in mapped_list: - if (mapping_info[:utils.PORT_ID_LENGTH], - mapping_info.split('-')[2]) in target_list: - unmap_list.append(mapping_info) - return unmap_list - - def unmap_ldev(self, targets, ldev): - """Delete the LUN between the specified LDEV and port-gid.""" - interval = _LUN_RETRY_INTERVAL - success_code = HORCM_EXIT_CODE.union([EX_ENOOBJ]) - timeout = utils.DEFAULT_PROCESS_WAITTIME - for target in targets['list']: - self.run_raidcom( - 'delete', 'lun', '-port', target, '-ldev_id', ldev, - interval=interval, success_code=success_code, timeout=timeout) - LOG.debug( - 'Deleted logical unit path of the specified logical ' - 'device. (LDEV: %(ldev)s, target: %(target)s)', - {'ldev': ldev, 'target': target}) - - def find_all_mapped_targets_from_storage(self, targets, ldev): - """Add all port-gids connected with the LDEV to the list.""" - ldev_info = self.get_ldev_info(['ports'], '-ldev_id', ldev) - if ldev_info['ports']: - for port in ldev_info['ports']: - targets['list'].append(port.split()[0]) - - def delete_target_from_storage(self, port, gid): - """Delete the host group or the iSCSI target from the port.""" - result = self.run_raidcom( - 'delete', 'host_grp', '-port', - '-'.join([port, gid]), do_raise=False) - if result[0]: - utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid) - - def _run_add_lun(self, ldev, port, gid, lun=None): - """Create a LUN between the specified LDEV and port-gid.""" - args = ['add', 'lun', '-port', '-'.join([port, gid]), '-ldev_id', ldev] - ignore_error = [_LU_PATH_DEFINED] - if lun: - args.extend(['-lun_id', lun]) - ignore_error = [_ANOTHER_LDEV_MAPPED] - result = self.run_raidcom( - *args, ignore_error=ignore_error, - interval=_LUN_RETRY_INTERVAL, timeout=_LUN_MAX_WAITTIME) - if not lun: - if result[0] == EX_CMDRJE: - lun = self._find_lun(ldev, port, gid) - LOG.debug( - 'A logical unit path has already been defined in the ' - 'specified logical device. (LDEV: %(ldev)s, ' - 'port: %(port)s, gid: %(gid)s, lun: %(lun)s)', - {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) - else: - lun = find_value(result[1], 'lun') - elif _ANOTHER_LDEV_MAPPED in result[2]: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, - lun=lun) - return None - LOG.debug( - 'Created logical unit path to the specified logical device. ' - '(LDEV: %(ldev)s, port: %(port)s, ' - 'gid: %(gid)s, lun: %(lun)s)', - {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) - return lun - - def map_ldev(self, targets, ldev): - """Create the path between the server and the LDEV and return LUN.""" - port, gid = targets['list'][0] - lun = self._run_add_lun(ldev, port, gid) - targets['lun'][port] = True - for port, gid in targets['list'][1:]: - try: - lun2 = self._run_add_lun(ldev, port, gid, lun=lun) - if lun2 is not None: - targets['lun'][port] = True - except exception.VSPError: - utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, - id=gid, lun=lun) - return lun - - def extend_ldev(self, ldev, old_size, new_size): - """Extend the specified LDEV to the specified new size.""" - timeout = _EXTEND_WAITTIME - self.run_raidcom('extend', 'ldev', '-ldev_id', ldev, '-capacity', - '%sG' % (new_size - old_size), timeout=timeout) - - def get_pool_info(self): - """Return the total and free capacity of the storage pool.""" - result = self.run_raidcom('get', 'dp_pool') - p_pool_match = self._pattern['p_pool'].search(result[1]) - - result = self.run_raidcom('get', 'pool', '-key', 'opt') - pool_match = self._pattern['pool'].search(result[1]) - - if not p_pool_match or not pool_match: - msg = utils.output_log(MSG.POOL_NOT_FOUND, - pool=self.storage_info['pool_id']) - raise exception.VSPError(msg) - - tp_cap = float(p_pool_match.group('tp_cap')) / units.Ki - tl_cap = float(p_pool_match.group('tl_cap')) / units.Ki - vcap = 'infinite' if pool_match.group('vcap') == _INFINITE else ( - int(pool_match.group('vcap'))) - - if vcap == 'infinite': - return 'unknown', 'unknown' - else: - total_gb = int(math.floor(tp_cap * (vcap / 100.0))) - free_gb = int(math.floor(total_gb - tl_cap)) - return total_gb, free_gb - - def discard_zero_page(self, volume): - """Return the volume's no-data pages to the storage pool.""" - ldev = utils.get_ldev(volume) - try: - self.run_raidcom( - 'modify', 'ldev', '-ldev_id', ldev, - '-status', 'discard_zero_page') - except exception.VSPError: - utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev) - - def wait_thin_copy(self, ldev, status, **kwargs): - """Wait until the S-VOL status changes to the specified status.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_copy_check_interval) - - def _wait_for_thin_copy_status(start_time, ldev, status, **kwargs): - """Raise True if the S-VOL is in the specified status.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - if self._get_thin_copy_svol_status(ldev) == status: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_thin_copy_status, timeutils.utcnow(), - ldev, status, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.TI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def _get_thin_copy_svol_status(self, ldev): - """Return the status of the S-VOL in a THIN volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', ldev) - if not result[1]: - return SMPL - return _STATUS_TABLE.get(result[1].splitlines()[1].split()[2], UNKN) - - def _create_horcm_conf(self, horcmgr=_HORCMGR): - """Create a CCI configuration file.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - serial = self.conf.vsp_storage_id - filename = '/etc/horcm%s.conf' % inst - port = _DEFAULT_PORT_BASE + inst - found = False - if not os.path.exists(filename): - file_str = """ -HORCM_MON -#ip_address service poll(10ms) timeout(10ms) -127.0.0.1 %16d 6000 3000 -HORCM_CMD -""" % port - else: - file_str = cinder_utils.read_file_as_root(filename) - if re.search(r'^\\\\.\\CMD-%s:/dev/sd$' % serial, file_str, re.M): - found = True - if not found: - repl_str = r'\1\\\\.\\CMD-%s:/dev/sd\n' % serial - file_str = CMD_PATTERN.sub(repl_str, file_str) - result = utils.execute('tee', filename, process_input=file_str) - if result[0]: - msg = utils.output_log( - MSG.CREATE_HORCM_CONF_FILE_FAILED, file=filename, - ret=result[0], err=result[2]) - raise exception.VSPError(msg) - - def init_cinder_hosts(self, **kwargs): - """Initialize server-storage connection.""" - targets = { - 'info': {}, - 'list': [], - 'iqns': {}, - } - super(VSPHORCM, self).init_cinder_hosts(targets=targets) - if self.storage_info['pair_ports']: - targets['info'] = {} - ports = self._get_pair_ports() - for port in ports: - targets['info'][port] = True - self._init_pair_targets(targets['info']) - - def _init_pair_targets(self, targets_info): - """Initialize server-storage connection for volume copy.""" - for port in targets_info.keys(): - if not targets_info[port]: - continue - result = self.run_raidcom('get', 'host_grp', '-port', port) - gid = find_value(result[1], 'pair_gid') - if not gid: - try: - connector = { - 'ip': _PAIR_TARGET_NAME_BODY, - 'wwpns': [_PAIR_TARGET_NAME_BODY], - } - target_name, gid = self.create_target_to_storage( - port, connector, None) - utils.output_log(MSG.OBJECT_CREATED, - object='a target for pair operation', - details='port: %(port)s, gid: %(gid)s, ' - 'target_name: %(target)s' % - {'port': port, 'gid': gid, - 'target': target_name}) - except exception.VSPError: - utils.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port) - continue - self._pair_targets.append((port, gid)) - - if not self._pair_targets: - msg = utils.output_log(MSG.ADD_PAIR_TARGET_FAILED) - raise exception.VSPError(msg) - self._pair_targets.sort(reverse=True) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='port-gid list for pair operation', - value=self._pair_targets) - - @coordination.synchronized('{self.lock[create_ldev]}') - def delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - self._delete_ldev_from_storage(ldev) - self._check_ldev_status(ldev, delete=True) - - def _delete_ldev_from_storage(self, ldev): - """Delete the specified LDEV from the storage.""" - result = self.run_raidcom( - 'get', 'ldev', '-ldev_id', ldev, *_LDEV_DELETED, do_raise=False) - if not result[0]: - utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev) - return - self.run_raidcom('delete', 'ldev', '-ldev_id', ldev) - - def _run_pairdisplay(self, *args): - """Execute Shadow Image pairdisplay command.""" - result = self._run_pair_cmd( - 'pairdisplay', '-CLI', *args, do_raise=False, - success_code=HORCM_EXIT_CODE.union(_NO_SUCH_DEVICE)) - return result[1] - - def _check_copy_grp(self, copy_group): - """Return the number of device groups in the specified copy group.""" - count = 0 - result = self.run_raidcom('get', 'copy_grp') - for line in result[1].splitlines()[1:]: - line = line.split() - if line[0] == copy_group: - count += 1 - if count == 2: - break - return count - - def _check_device_grp(self, group_name, ldev, ldev_name=None): - """Return True if the LDEV is in the device group, False otherwise.""" - result = self.run_raidcom( - 'get', 'device_grp', '-device_grp_name', group_name) - for line in result[1].splitlines()[1:]: - line = line.split() - if int(line[2]) == ldev: - if not ldev_name: - return True - else: - return line[1] == ldev_name - return False - - def _is_smpl(self, ldev): - """Return True if the status of the LDEV is SMPL, False otherwise.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, ldev, 0) - if not stdout: - return True - return stdout.splitlines()[2].split()[9] in _SMPL_STAUS - - def _get_full_copy_pair_info(self, ldev, mun): - """Return info of the Shadow Image volume pair.""" - stdout = self._run_pairdisplay( - '-d', self.conf.vsp_storage_id, ldev, mun) - if not stdout: - return None - line = stdout.splitlines()[2].split() - if not line[8].isdigit() or not line[12].isdigit(): - return None - pvol, svol = int(line[12]), int(line[8]) - LOG.debug( - 'Full copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' - 'status: %(status)s)', - {'pvol': pvol, 'svol': svol, 'status': line[10]}) - return { - 'pvol': pvol, - 'svol_info': { - 'ldev': svol, - 'is_psus': line[10] == "SSUS", - 'is_thin': False, - }, - } - - def _get_thin_copy_info(self, ldev): - """Return info of the Thin Image volume pair.""" - result = self.run_raidcom( - 'get', 'snapshot', '-ldev_id', ldev) - if not result[1]: - return (None, None) - - line = result[1].splitlines()[1].split() - is_psus = _STATUS_TABLE.get(line[2]) == PSUS - if line[1] == "P-VOL": - pvol, svol = ldev, int(line[6]) - else: - pvol, svol = int(line[6]), ldev - LOG.debug( - 'Thin copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' - 'status: %(status)s)', - {'pvol': pvol, 'svol': svol, 'status': line[2]}) - return (pvol, [{'ldev': svol, 'is_thin': True, 'is_psus': is_psus}]) - - def get_pair_info(self, ldev): - """Return info of the volume pair.""" - pair_info = {} - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS or _PAIR_ATTRS.isdisjoint( - ldev_info['vol_attr']): - return None - - if FULL_ATTR in ldev_info['vol_attr']: - pvol, svol_info = self._get_full_copy_info(ldev) - # When 'pvol' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if pvol is not None: - pair_info['pvol'] = pvol - pair_info.setdefault('svol_info', []) - pair_info['svol_info'].extend(svol_info) - - if THIN_ATTR in ldev_info['vol_attr']: - pvol, svol_info = self._get_thin_copy_info(ldev) - # When 'pvol' is 0, it should be true. - # Therefore, it cannot remove 'is not None'. - if pvol is not None: - pair_info['pvol'] = pvol - pair_info.setdefault('svol_info', []) - pair_info['svol_info'].extend(svol_info) - - return pair_info - - def _get_pair_ports(self): - """Return a list of ports used for volume pair management.""" - return (self.storage_info['pair_ports'] or - self.storage_info['controller_ports']) - - def _add_pair_config(self, pvol, svol, copy_group, ldev_name, mun): - """Create device groups and a copy group for the SI volume pair.""" - pvol_group = copy_group + 'P' - svol_group = copy_group + 'S' - self.run_raidcom( - 'add', 'device_grp', '-device_grp_name', - pvol_group, ldev_name, '-ldev_id', pvol) - self.run_raidcom( - 'add', 'device_grp', '-device_grp_name', - svol_group, ldev_name, '-ldev_id', svol) - nr_copy_groups = self._check_copy_grp(copy_group) - if nr_copy_groups == 1: - self.run_raidcom( - 'delete', 'copy_grp', '-copy_grp_name', copy_group) - if nr_copy_groups != 2: - self.run_and_verify_storage_cli( - 'raidcom', 'add', 'copy_grp', '-copy_grp_name', - copy_group, pvol_group, svol_group, '-mirror_id', mun, - '-s', self.conf.vsp_storage_id, - '-IM%s' % self.conf.vsp_horcm_numbers[_HORCMGR], - success_code=HORCM_EXIT_CODE) - - def _delete_pair_config(self, pvol, svol, copy_group, ldev_name): - """Delete specified LDEVs from Shadow Image device groups.""" - pvol_group = copy_group + 'P' - svol_group = copy_group + 'S' - if self._check_device_grp(pvol_group, pvol, ldev_name=ldev_name): - self.run_raidcom( - 'delete', 'device_grp', '-device_grp_name', - pvol_group, '-ldev_id', pvol) - if self._check_device_grp(svol_group, svol, ldev_name=ldev_name): - self.run_raidcom( - 'delete', 'device_grp', '-device_grp_name', - svol_group, '-ldev_id', svol) - - def _wait_full_copy(self, ldev, status, **kwargs): - """Wait until the LDEV status changes to the specified status.""" - interval = kwargs.pop( - 'interval', self.conf.vsp_copy_check_interval) - - def _wait_for_full_copy_pair_status(start_time, ldev, - status, **kwargs): - """Raise True if the LDEV is in the specified status.""" - timeout = kwargs.pop('timeout', utils.DEFAULT_PROCESS_WAITTIME) - if self._run_pairevtwait(ldev) in status: - raise loopingcall.LoopingCallDone() - if utils.check_timeout(start_time, timeout): - raise loopingcall.LoopingCallDone(False) - - loop = loopingcall.FixedIntervalLoopingCall( - _wait_for_full_copy_pair_status, timeutils.utcnow(), - ldev, status, **kwargs) - if not loop.start(interval=interval).wait(): - msg = utils.output_log(MSG.SI_PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) - raise exception.VSPError(msg) - - def wait_full_copy_completion(self, pvol, svol): - """Wait until the Shadow Image volume copy has finished.""" - self._wait_full_copy(svol, set([PSUS, PSUE]), - timeout=utils.MAX_PROCESS_WAITTIME) - if self._run_pairevtwait(svol) == PSUE: - msg = utils.output_log(MSG.VOLUME_COPY_FAILED, - copy_method=utils.FULL, pvol=pvol, - svol=svol) - raise exception.VSPError(msg) - - def _run_pairevtwait(self, ldev): - """Execute Shadow Image pairevtwait command.""" - result = self._run_pair_cmd( - 'pairevtwait', '-d', self.conf.vsp_storage_id, - ldev, '-nowaits') - return result[0] - - def get_ldev_size_in_gigabyte(self, ldev, existing_ref): - """Return the size[GB] of the specified LDEV.""" - ldev_info = self.get_ldev_info( - _CHECK_KEYS, '-ldev_id', ldev, do_raise=False) - _check_ldev(ldev_info, ldev, existing_ref) - # Hitachi storage calculates volume sizes in a block unit, 512 bytes. - return ldev_info['vol_size'] / utils.GIGABYTE_PER_BLOCK_SIZE - - def get_pool_id(self): - """Return the pool number of vsp_pool.""" - pool_id = super(VSPHORCM, self).get_pool_id() - if pool_id is None: - pool = self.conf.vsp_pool - result = self.run_raidcom('get', 'pool', '-key', 'opt') - for line in result[1].splitlines()[1:]: - line = line.split() - if line[3] == pool: - return int(line[0]) - return pool_id - - def config_lock(self): - """Initialize lock resource names.""" - for key in ['create_ldev', 'create_pair']: - self.lock[key] = '_'.join([key, self.conf.vsp_storage_id]) - self.lock[_HORCMGR] = ( - 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_HORCMGR]) - self.lock[_PAIR_HORCMGR] = ( - 'horcmgr_%s' % self.conf.vsp_horcm_numbers[_PAIR_HORCMGR]) - - @horcmgr_synchronized - def _start_horcmgr(self, horcmgr): - """Start the CCI instance and return True if successful.""" - inst = self.conf.vsp_horcm_numbers[horcmgr] - ret = 0 - if _run_horcmgr(inst) != _HORCM_RUNNING: - ret = _run_horcmstart(inst) - if ret and ret != _HORCM_RUNNING: - utils.output_log(MSG.HORCM_START_FAILED, inst=inst) - return False - return True - - def output_param_to_log(self): - """Output configuration parameter values to the log file.""" - super(VSPHORCM, self).output_param_to_log() - utils.output_opts(self.conf, horcm_opts) - - def get_storage_cli_info(self): - """Return a tuple of the storage CLI name and its version.""" - version = 'N/A' - result = utils.execute('raidqry', '-h') - match = re.search(r'^Ver&Rev: +(?P\S+)', result[1], re.M) - if match: - version = match.group('version') - return ('Command Control Interface', version) - - def check_vvol(self, ldev): - """Return True if the specified LDEV is V-VOL, False otherwise.""" - ldev_info = self.get_ldev_info(['sts', 'vol_attr'], '-ldev_id', ldev) - if ldev_info['sts'] != NORMAL_STS: - return False - return VVOL_ATTR in ldev_info['vol_attr'] diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py b/cinder/volume/drivers/hitachi/vsp_horcm_fc.py deleted file mode 100644 index 917e63ef7..000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm_fc.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface fibre channel module for Hitachi VSP Driver.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.volume.drivers.hitachi import vsp_horcm as horcm -from cinder.volume.drivers.hitachi import vsp_utils as utils -from cinder.zonemanager import utils as fczm_utils - -_FC_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] -_HOST_GROUPS_PATTERN = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +\d+ " % utils.TARGET_PREFIX, - re.M) -_FC_PORT_PATTERN = re.compile( - (r"^(CL\w-\w)\w* +(?:FIBRE|FCoE) +TAR +\w+ +\w+ +\w +\w+ +Y +" - r"\d+ +\d+ +(\w{16})"), re.M) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -class VSPHORCMFC(horcm.VSPHORCM): - """HORCM interface fibre channel class for Hitachi VSP Driver.""" - - def __init__(self, conf, storage_protocol, db): - """Initialize instance variables.""" - super(VSPHORCMFC, self).__init__(conf, storage_protocol, db) - self._lookup_service = fczm_utils.create_lookup_service() - - def connect_storage(self): - """Prepare for using the storage.""" - target_ports = self.conf.vsp_target_ports - compute_target_ports = self.conf.vsp_compute_target_ports - pair_target_ports = self.conf.vsp_horcm_pair_target_ports - - super(VSPHORCMFC, self).connect_storage() - result = self.run_raidcom('get', 'port') - for port, wwn in _FC_PORT_PATTERN.findall(result[1]): - if target_ports and port in target_ports: - self.storage_info['controller_ports'].append(port) - self.storage_info['wwns'][port] = wwn - if compute_target_ports and port in compute_target_ports: - self.storage_info['compute_ports'].append(port) - self.storage_info['wwns'][port] = wwn - if pair_target_ports and port in pair_target_ports: - self.storage_info['pair_ports'].append(port) - - self.check_ports_info() - if pair_target_ports and not self.storage_info['pair_ports']: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Pair target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='pair target port list', - value=self.storage_info['pair_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', - value=self.storage_info['wwns']) - - def create_target_to_storage(self, port, connector, hba_ids): - """Create a host group on the specified port.""" - wwpns = self.get_hba_ids_from_connector(connector) - target_name = utils.TARGET_PREFIX + min(wwpns) - try: - result = self.run_raidcom( - 'add', 'host_grp', '-port', port, '-host_grp_name', - target_name) - except exception.VSPError: - result = self.run_raidcom('get', 'host_grp', '-port', port) - hostgroup_pt = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s +\d+ " % - target_name, re.M) - gid = hostgroup_pt.findall(result[1]) - if gid: - return target_name, gid[0] - else: - raise - return target_name, horcm.find_value(result[1], 'gid') - - def set_hba_ids(self, port, gid, hba_ids): - """Connect all specified HBAs with the specified port.""" - registered_wwns = [] - for wwn in hba_ids: - try: - self.run_raidcom( - 'add', 'hba_wwn', '-port', - '-'.join([port, gid]), '-hba_wwn', wwn) - registered_wwns.append(wwn) - except exception.VSPError: - utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid, - wwn=wwn) - if not registered_wwns: - msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port, - gid=gid) - raise exception.VSPError(msg) - - def set_target_mode(self, port, gid): - """Configure the host group to meet the environment.""" - self.run_raidcom( - 'modify', 'host_grp', '-port', - '-'.join([port, gid]), *_FC_LINUX_MODE_OPTS, - success_code=horcm.ALL_EXIT_CODE) - - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - nr_not_found = 0 - old_target_name = None - if 'ip' in connector: - old_target_name = utils.TARGET_PREFIX + connector['ip'] - success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) - wwpns = self.get_hba_ids_from_connector(connector) - wwpns_pattern = re.compile( - r'^CL\w-\w+ +\d+ +\S+ +(%s) ' % '|'.join(wwpns), re.M | re.I) - target_name = utils.TARGET_PREFIX + min(wwpns) - - for port in target_ports: - targets['info'][port] = False - - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', port, target_name, - success_code=success_code) - wwpns = wwpns_pattern.findall(result[1]) - if not wwpns and old_target_name: - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', port, old_target_name, - success_code=success_code) - wwpns = wwpns_pattern.findall(result[1]) - if wwpns: - gid = result[1].splitlines()[1].split()[1] - targets['info'][port] = True - targets['list'].append((port, gid)) - LOG.debug( - 'Found wwpns in host group immediately. ' - '(port: %(port)s, gid: %(gid)s, wwpns: %(wwpns)s)', - {'port': port, 'gid': gid, 'wwpns': wwpns}) - continue - - result = self.run_raidcom( - 'get', 'host_grp', '-port', port) - for gid in _HOST_GROUPS_PATTERN.findall(result[1]): - result = self.run_raidcom( - 'get', 'hba_wwn', '-port', '-'.join([port, gid])) - wwpns = wwpns_pattern.findall(result[1]) - if wwpns: - targets['info'][port] = True - targets['list'].append((port, gid)) - LOG.debug( - 'Found wwpns in host group. (port: %(port)s, ' - 'gid: %(gid)s, wwpns: %(wwpns)s)', - {'port': port, 'gid': gid, 'wwpns': wwpns}) - break - else: - nr_not_found += 1 - - return nr_not_found - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - conn_info = super(VSPHORCMFC, self).initialize_connection( - volume, connector) - if self.conf.vsp_zoning_request: - utils.update_conn_info(conn_info, connector, self._lookup_service) - return conn_info - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector): - """Terminate connection between the server and the volume.""" - conn_info = super(VSPHORCMFC, self).terminate_connection( - volume, connector) - if self.conf.vsp_zoning_request and ( - conn_info and conn_info['data']['target_wwn']): - utils.update_conn_info(conn_info, connector, self._lookup_service) - return conn_info diff --git a/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py b/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py deleted file mode 100644 index 1b652fae5..000000000 --- a/cinder/volume/drivers/hitachi/vsp_horcm_iscsi.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HORCM interface iSCSI module for Hitachi VSP Driver.""" - -import re - -from oslo_log import log as logging - -from cinder import exception -from cinder.volume.drivers.hitachi import vsp_horcm as horcm -from cinder.volume.drivers.hitachi import vsp_utils as utils - -_ISCSI_LINUX_MODE_OPTS = ['-host_mode', 'LINUX'] -_ISCSI_HOST_MODE_OPT = '-host_mode_opt' -_ISCSI_HMO_REPORT_FULL_PORTAL = 83 -_ISCSI_TARGETS_PATTERN = re.compile( - (r"^CL\w-\w+ +(?P\d+) +%s(?!pair00 )\S* +(?P\S+) +" - r"\w+ +\w +\d+ ") % utils.TARGET_PREFIX, re.M) -_ISCSI_PORT_PATTERN = re.compile( - r"^(CL\w-\w)\w* +ISCSI +TAR +\w+ +\w+ +\w +\w+ +Y ", re.M) -_ISCSI_IPV4_ADDR_PATTERN = re.compile( - r"^IPV4_ADDR +: +(?P\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$", re.M) -_ISCSI_TCP_PORT_PATTERN = re.compile( - r'^TCP_PORT\ +:\ +(?P\d+)$', re.M) - -LOG = logging.getLogger(__name__) -MSG = utils.VSPMsg - - -class VSPHORCMISCSI(horcm.VSPHORCM): - """HORCM interface iscsi class for Hitachi VSP Driver.""" - - def connect_storage(self): - """Prepare for using the storage.""" - target_ports = self.conf.vsp_target_ports - compute_target_ports = self.conf.vsp_compute_target_ports - pair_target_ports = self.conf.vsp_horcm_pair_target_ports - - super(VSPHORCMISCSI, self).connect_storage() - result = self.run_raidcom('get', 'port') - for port in _ISCSI_PORT_PATTERN.findall(result[1]): - if (target_ports and port in target_ports and - self._set_target_portal(port)): - self.storage_info['controller_ports'].append(port) - if (compute_target_ports and port in compute_target_ports and - (port in self.storage_info['portals'] or - self._set_target_portal(port))): - self.storage_info['compute_ports'].append(port) - if pair_target_ports and port in pair_target_ports: - self.storage_info['pair_ports'].append(port) - - self.check_ports_info() - if pair_target_ports and not self.storage_info['pair_ports']: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource="Pair target ports") - raise exception.VSPError(msg) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='pair target port list', - value=self.storage_info['pair_ports']) - utils.output_log(MSG.SET_CONFIG_VALUE, - object='port- list', - value=self.storage_info['portals']) - - def _set_target_portal(self, port): - """Get port info and store it in an instance variable.""" - ipv4_addr = None - tcp_port = None - result = self.run_raidcom( - 'get', 'port', '-port', port, '-key', 'opt') - match = _ISCSI_IPV4_ADDR_PATTERN.search(result[1]) - if match: - ipv4_addr = match.group('ipv4_addr') - match = _ISCSI_TCP_PORT_PATTERN.search(result[1]) - if match: - tcp_port = match.group('tcp_port') - if not ipv4_addr or not tcp_port: - return False - self.storage_info['portals'][port] = ':'.join( - [ipv4_addr, tcp_port]) - return True - - def create_target_to_storage(self, port, connector, hba_ids): - """Create an iSCSI target on the specified port.""" - target_name = utils.TARGET_PREFIX + connector['ip'] - args = [ - 'add', 'host_grp', '-port', port, '-host_grp_name', target_name] - if hba_ids: - args.extend(['-iscsi_name', hba_ids + utils.TARGET_IQN_SUFFIX]) - try: - result = self.run_raidcom(*args) - except exception.VSPError: - result = self.run_raidcom('get', 'host_grp', '-port', port) - hostgroup_pt = re.compile( - r"^CL\w-\w+ +(?P\d+) +%s +\S+ " % - target_name.replace('.', r'\.'), re.M) - gid = hostgroup_pt.findall(result[1]) - if gid: - return target_name, gid[0] - else: - raise - return target_name, horcm.find_value(result[1], 'gid') - - def set_hba_ids(self, port, gid, hba_ids): - """Connect the specified HBA with the specified port.""" - self.run_raidcom( - 'add', 'hba_iscsi', '-port', '-'.join([port, gid]), - '-hba_iscsi_name', hba_ids) - - def set_target_mode(self, port, gid): - """Configure the iSCSI target to meet the environment.""" - hostmode_setting = [] - hostmode_setting[:] = _ISCSI_LINUX_MODE_OPTS - hostmode_setting.append(_ISCSI_HOST_MODE_OPT) - hostmode_setting.append(_ISCSI_HMO_REPORT_FULL_PORTAL) - self.run_raidcom( - 'modify', 'host_grp', '-port', - '-'.join([port, gid]), *hostmode_setting) - - def find_targets_from_storage(self, targets, connector, target_ports): - """Find mapped ports, memorize them and return unmapped port count.""" - nr_not_found = 0 - target_name = utils.TARGET_PREFIX + connector['ip'] - success_code = horcm.HORCM_EXIT_CODE.union([horcm.EX_ENOOBJ]) - iqn = self.get_hba_ids_from_connector(connector) - iqn_pattern = re.compile( - r'^CL\w-\w+ +\d+ +\S+ +%s ' % iqn, re.M) - - for port in target_ports: - targets['info'][port] = False - - result = self.run_raidcom( - 'get', 'hba_iscsi', '-port', port, target_name, - success_code=success_code) - if iqn_pattern.search(result[1]): - gid = result[1].splitlines()[1].split()[1] - targets['info'][port] = True - targets['list'].append((port, gid)) - continue - - result = self.run_raidcom( - 'get', 'host_grp', '-port', port) - for gid, iqn in _ISCSI_TARGETS_PATTERN.findall(result[1]): - result = self.run_raidcom( - 'get', 'hba_iscsi', '-port', '-'.join([port, gid])) - if iqn_pattern.search(result[1]): - targets['info'][port] = True - targets['list'].append((port, gid)) - targets['iqns'][(port, gid)] = iqn - break - else: - nr_not_found += 1 - - return nr_not_found - - def get_properties_iscsi(self, targets, multipath): - """Check if specified iSCSI targets exist and store their IQNs.""" - if not multipath: - target_list = targets['list'][:1] - else: - target_list = targets['list'][:] - - for target in target_list: - if target not in targets['iqns']: - port, gid = target - result = self.run_raidcom('get', 'host_grp', '-port', port) - match = re.search( - r"^CL\w-\w+ +%s +\S+ +(?P\S+) +\w+ +\w +\d+ " % gid, - result[1], re.M) - if not match: - msg = utils.output_log(MSG.RESOURCE_NOT_FOUND, - resource='Target IQN') - raise exception.VSPError(msg) - targets['iqns'][target] = match.group('iqn') - LOG.debug('Found iqn of the iSCSI target. (port: %(port)s, ' - 'gid: %(gid)s, target iqn: %(iqn)s)', - {'port': port, 'gid': gid, - 'iqn': match.group('iqn')}) - return super(VSPHORCMISCSI, self).get_properties_iscsi( - targets, multipath) diff --git a/cinder/volume/drivers/hitachi/vsp_iscsi.py b/cinder/volume/drivers/hitachi/vsp_iscsi.py deleted file mode 100644 index 521413a31..000000000 --- a/cinder/volume/drivers/hitachi/vsp_iscsi.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""iSCSI module for Hitachi VSP Driver.""" - -from oslo_config import cfg - -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.hitachi import vsp_common as common -from cinder.volume.drivers.hitachi import vsp_utils as utils - -iscsi_opts = [ - cfg.BoolOpt( - 'vsp_use_chap_auth', - default=False, - help='If True, CHAP authentication will be applied to communication ' - 'between hosts and any of the iSCSI targets on the storage ports.'), - cfg.StrOpt( - 'vsp_auth_user', - help='Name of the user used for CHAP authentication performed in ' - 'communication between hosts and iSCSI targets on the storage ports.'), - cfg.StrOpt( - 'vsp_auth_password', - secret=True, - help='Password corresponding to vsp_auth_user.'), -] - -MSG = utils.VSPMsg - -_DRIVER_INFO = { - 'proto': 'iSCSI', - 'hba_id': 'initiator', - 'hba_id_type': 'iSCSI initiator IQN', - 'msg_id': { - 'target': MSG.CREATE_ISCSI_TARGET_FAILED, - }, - 'volume_backend_name': utils.DRIVER_PREFIX + 'iSCSI', - 'volume_opts': iscsi_opts, - 'volume_type': 'iscsi', -} - -CONF = cfg.CONF -CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class VSPISCSIDriver(driver.ISCSIDriver): - """iSCSI class for Hitachi VSP Driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver. - - """ - - VERSION = common.VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Hitachi_VSP_CI" - - SUPPORTED = False - - def __init__(self, *args, **kwargs): - """Initialize instance variables.""" - utils.output_log(MSG.DRIVER_INITIALIZATION_START, - driver=self.__class__.__name__, - version=self.get_version()) - super(VSPISCSIDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(common.common_opts) - self.configuration.append_config_values(iscsi_opts) - self.common = utils.import_object( - self.configuration, _DRIVER_INFO, kwargs.get('db')) - - def check_for_setup_error(self): - """Error are checked in do_setup() instead of this method.""" - pass - - @utils.output_start_end_log - def create_volume(self, volume): - """Create a volume and return its properties.""" - return self.common.create_volume(volume) - - @utils.output_start_end_log - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot and return its properties.""" - return self.common.create_volume_from_snapshot(volume, snapshot) - - @utils.output_start_end_log - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume and return its properties.""" - return self.common.create_cloned_volume(volume, src_vref) - - @utils.output_start_end_log - def delete_volume(self, volume): - """Delete the specified volume.""" - self.common.delete_volume(volume) - - @utils.output_start_end_log - def create_snapshot(self, snapshot): - """Create a snapshot from a volume and return its properties.""" - return self.common.create_snapshot(snapshot) - - @utils.output_start_end_log - def delete_snapshot(self, snapshot): - """Delete the specified snapshot.""" - self.common.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Return properties, capabilities and current states of the driver.""" - return self.common.get_volume_stats(refresh) - - @utils.output_start_end_log - def update_migrated_volume( - self, ctxt, volume, new_volume, original_volume_status): - """Do any remaining jobs after migration.""" - self.common.discard_zero_page(new_volume) - super(VSPISCSIDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) - - @utils.output_start_end_log - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - super(VSPISCSIDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - self.common.discard_zero_page(volume) - - @utils.output_start_end_log - def extend_volume(self, volume, new_size): - """Extend the specified volume to the specified size.""" - self.common.extend_volume(volume, new_size) - - @utils.output_start_end_log - def manage_existing(self, volume, existing_ref): - """Return volume properties which Cinder needs to manage the volume.""" - return self.common.manage_existing(existing_ref) - - @utils.output_start_end_log - def manage_existing_get_size(self, volume, existing_ref): - """Return the size[GB] of the specified volume.""" - return self.common.manage_existing_get_size(existing_ref) - - @utils.output_start_end_log - def unmanage(self, volume): - """Prepare the volume for removing it from Cinder management.""" - self.common.unmanage(volume) - - @utils.output_start_end_log - def do_setup(self, context): - """Prepare for the startup of the driver.""" - self.common.do_setup(context) - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Export the volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - @utils.output_start_end_log - def initialize_connection(self, volume, connector): - """Initialize connection between the server and the volume.""" - return self.common.initialize_connection(volume, connector) - - @utils.output_start_end_log - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection between the server and the volume.""" - self.common.terminate_connection(volume, connector) diff --git a/cinder/volume/drivers/hitachi/vsp_utils.py b/cinder/volume/drivers/hitachi/vsp_utils.py deleted file mode 100644 index 93c887a85..000000000 --- a/cinder/volume/drivers/hitachi/vsp_utils.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright (C) 2016, Hitachi, Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""Utility module for Hitachi VSP Driver.""" - -import functools -import inspect -import logging as base_logging -import os -import re - -import enum -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import units -import six - -from cinder import exception -from cinder import utils as cinder_utils - - -_DRIVER_DIR = 'cinder.volume.drivers.hitachi' - -_DRIVERS = { - 'HORCM': { - 'FC': 'vsp_horcm_fc.VSPHORCMFC', - 'iSCSI': 'vsp_horcm_iscsi.VSPHORCMISCSI', - }, -} - -DRIVER_PREFIX = 'VSP' -TARGET_PREFIX = 'HBSD-' -TARGET_IQN_SUFFIX = '.hbsd-target' -GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512 - -MAX_PROCESS_WAITTIME = 24 * 60 * 60 -DEFAULT_PROCESS_WAITTIME = 15 * 60 - -NORMAL_LDEV_TYPE = 'Normal' -NVOL_LDEV_TYPE = 'DP-VOL' - -FULL = 'Full copy' -THIN = 'Thin copy' - -INFO_SUFFIX = 'I' -WARNING_SUFFIX = 'W' -ERROR_SUFFIX = 'E' - -PORT_ID_LENGTH = 5 - - -@enum.unique -class VSPMsg(enum.Enum): - """messages for Hitachi VSP Driver.""" - - METHOD_START = { - 'msg_id': 0, - 'loglevel': base_logging.INFO, - 'msg': '%(method)s starts. (config_group: %(config_group)s)', - 'suffix': INFO_SUFFIX - } - OUTPUT_PARAMETER_VALUES = { - 'msg_id': 1, - 'loglevel': base_logging.INFO, - 'msg': 'The parameter of the storage backend. (config_group: ' - '%(config_group)s)', - 'suffix': INFO_SUFFIX - } - METHOD_END = { - 'msg_id': 2, - 'loglevel': base_logging.INFO, - 'msg': '%(method)s ended. (config_group: %(config_group)s)', - 'suffix': INFO_SUFFIX - } - DRIVER_READY_FOR_USE = { - 'msg_id': 3, - 'loglevel': base_logging.INFO, - 'msg': 'The storage backend can be used. (config_group: ' - '%(config_group)s)', - 'suffix': INFO_SUFFIX - } - DRIVER_INITIALIZATION_START = { - 'msg_id': 4, - 'loglevel': base_logging.INFO, - 'msg': 'Initialization of %(driver)s %(version)s started.', - 'suffix': INFO_SUFFIX - } - SET_CONFIG_VALUE = { - 'msg_id': 5, - 'loglevel': base_logging.INFO, - 'msg': 'Set %(object)s to %(value)s.', - 'suffix': INFO_SUFFIX - } - OBJECT_CREATED = { - 'msg_id': 6, - 'loglevel': base_logging.INFO, - 'msg': 'Created %(object)s. (%(details)s)', - 'suffix': INFO_SUFFIX - } - INVALID_LDEV_FOR_UNMAPPING = { - 'msg_id': 302, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to specify a logical device for the volume ' - '%(volume_id)s to be unmapped.', - 'suffix': WARNING_SUFFIX - } - INVALID_LDEV_FOR_DELETION = { - 'msg_id': 304, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to specify a logical device to be deleted. ' - '(method: %(method)s, id: %(id)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_TARGET_FAILED = { - 'msg_id': 306, - 'loglevel': base_logging.WARNING, - 'msg': 'A host group or an iSCSI target could not be deleted. ' - '(port: %(port)s, gid: %(id)s)', - 'suffix': WARNING_SUFFIX - } - CREATE_HOST_GROUP_FAILED = { - 'msg_id': 308, - 'loglevel': base_logging.WARNING, - 'msg': 'A host group could not be added. (port: %(port)s)', - 'suffix': WARNING_SUFFIX - } - CREATE_ISCSI_TARGET_FAILED = { - 'msg_id': 309, - 'loglevel': base_logging.WARNING, - 'msg': 'An iSCSI target could not be added. (port: %(port)s)', - 'suffix': WARNING_SUFFIX - } - UNMAP_LDEV_FAILED = { - 'msg_id': 310, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to unmap a logical device. (LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_LDEV_FAILED = { - 'msg_id': 313, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - MAP_LDEV_FAILED = { - 'msg_id': 314, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to map a logical device. (LDEV: %(ldev)s, port: ' - '%(port)s, id: %(id)s, lun: %(lun)s)', - 'suffix': WARNING_SUFFIX - } - DISCARD_ZERO_PAGE_FAILED = { - 'msg_id': 315, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to perform a zero-page reclamation. (LDEV: ' - '%(ldev)s)', - 'suffix': WARNING_SUFFIX - } - ADD_HBA_WWN_FAILED = { - 'msg_id': 317, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' - 'wwn: %(wwn)s)', - 'suffix': WARNING_SUFFIX - } - LDEV_NOT_EXIST = { - 'msg_id': 319, - 'loglevel': base_logging.WARNING, - 'msg': 'The logical device does not exist in the storage system. ' - '(LDEV: %(ldev)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_START_FAILED = { - 'msg_id': 320, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to start HORCM. (inst: %(inst)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_RESTART_FOR_SI_FAILED = { - 'msg_id': 322, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to reload the configuration of full copy pair. ' - '(inst: %(inst)s)', - 'suffix': WARNING_SUFFIX - } - HORCM_LOGIN_FAILED = { - 'msg_id': 323, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to perform user authentication of HORCM. ' - '(user: %(user)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_SI_PAIR_FAILED = { - 'msg_id': 324, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete full copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_TI_PAIR_FAILED = { - 'msg_id': 325, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete thin copy pair. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': WARNING_SUFFIX - } - WAIT_SI_PAIR_STATUS_FAILED = { - 'msg_id': 326, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to change the status of full copy pair. (P-VOL: ' - '%(pvol)s, S-VOL: %(svol)s)', - 'suffix': WARNING_SUFFIX - } - DELETE_DEVICE_GRP_FAILED = { - 'msg_id': 327, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to delete the configuration of full copy pair. ' - '(P-VOL: %(pvol)s, S-VOL: %(svol)s)', - 'suffix': WARNING_SUFFIX - } - DISCONNECT_VOLUME_FAILED = { - 'msg_id': 329, - 'loglevel': base_logging.WARNING, - 'msg': 'Failed to detach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)', - 'suffix': WARNING_SUFFIX - } - STORAGE_COMMAND_FAILED = { - 'msg_id': 600, - 'loglevel': base_logging.ERROR, - 'msg': 'The command %(cmd)s failed. (ret: %(ret)s, stdout: ' - '%(out)s, stderr: %(err)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_PARAMETER = { - 'msg_id': 601, - 'loglevel': base_logging.ERROR, - 'msg': 'A parameter is invalid. (%(param)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_PARAMETER_VALUE = { - 'msg_id': 602, - 'loglevel': base_logging.ERROR, - 'msg': 'A parameter value is invalid. (%(meta)s)', - 'suffix': ERROR_SUFFIX - } - HORCM_SHUTDOWN_FAILED = { - 'msg_id': 608, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to shutdown HORCM. (inst: %(inst)s)', - 'suffix': ERROR_SUFFIX - } - HORCM_RESTART_FAILED = { - 'msg_id': 609, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to restart HORCM. (inst: %(inst)s)', - 'suffix': ERROR_SUFFIX - } - SI_PAIR_STATUS_WAIT_TIMEOUT = { - 'msg_id': 610, - 'loglevel': base_logging.ERROR, - 'msg': 'The status change of full copy pair could not be ' - 'completed. (S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - TI_PAIR_STATUS_WAIT_TIMEOUT = { - 'msg_id': 611, - 'loglevel': base_logging.ERROR, - 'msg': 'The status change of thin copy pair could not be ' - 'completed. (S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_STATUS_FOR_COPY = { - 'msg_id': 612, - 'loglevel': base_logging.ERROR, - 'msg': 'The source logical device to be replicated does not exist ' - 'in the storage system. (LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_EXTENSION = { - 'msg_id': 613, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s to be extended was not found.', - 'suffix': ERROR_SUFFIX - } - NO_HBA_WWN_ADDED_TO_HOST_GRP = { - 'msg_id': 614, - 'loglevel': base_logging.ERROR, - 'msg': 'No WWN is assigned. (port: %(port)s, gid: %(gid)s)', - 'suffix': ERROR_SUFFIX - } - NO_AVAILABLE_MIRROR_UNIT = { - 'msg_id': 615, - 'loglevel': base_logging.ERROR, - 'msg': 'A pair could not be created. The maximum number of pair ' - 'is exceeded. (copy method: %(copy_method)s, P-VOL: ' - '%(pvol)s)', - 'suffix': ERROR_SUFFIX - } - UNABLE_TO_DELETE_PAIR = { - 'msg_id': 616, - 'loglevel': base_logging.ERROR, - 'msg': 'A pair cannot be deleted. (P-VOL: %(pvol)s, S-VOL: ' - '%(svol)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_SIZE_FOR_COPY = { - 'msg_id': 617, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to or greater than the size ' - 'of the original %(type)s. (new volume: %(volume_id)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_TYPE_FOR_EXTEND = { - 'msg_id': 618, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s could not be extended. The ' - 'volume type must be Normal.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_CONNECTION = { - 'msg_id': 619, - 'loglevel': base_logging.ERROR, - 'msg': 'The volume %(volume_id)s to be mapped was not found.', - 'suffix': ERROR_SUFFIX - } - POOL_INFO_RETRIEVAL_FAILED = { - 'msg_id': 620, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to provide information about a pool. (pool: ' - '%(pool)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_VOLUME_SIZE_FOR_TI = { - 'msg_id': 621, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to create a volume from a %(type)s. The size of ' - 'the new volume must be equal to the size of the original ' - '%(type)s when the new volume is created by ' - '%(copy_method)s. (new volume: %(volume_id)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_VOLUME_COPY = { - 'msg_id': 624, - 'loglevel': base_logging.ERROR, - 'msg': 'The %(type)s %(id)s source to be replicated was not ' - 'found.', - 'suffix': ERROR_SUFFIX - } - CREATE_HORCM_CONF_FILE_FAILED = { - 'msg_id': 632, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to open a file. (file: %(file)s, ret: %(ret)s, ' - 'stderr: %(err)s)', - 'suffix': ERROR_SUFFIX - } - CONNECT_VOLUME_FAILED = { - 'msg_id': 634, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to attach the logical device. (LDEV: %(ldev)s, ' - 'reason: %(reason)s)', - 'suffix': ERROR_SUFFIX - } - CREATE_LDEV_FAILED = { - 'msg_id': 636, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to add the logical device.', - 'suffix': ERROR_SUFFIX - } - ADD_PAIR_TARGET_FAILED = { - 'msg_id': 638, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to add the pair target.', - 'suffix': ERROR_SUFFIX - } - NO_MAPPING_FOR_LDEV = { - 'msg_id': 639, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to map a logical device to any pair targets. ' - '(LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - POOL_NOT_FOUND = { - 'msg_id': 640, - 'loglevel': base_logging.ERROR, - 'msg': 'A pool could not be found. (pool: %(pool)s)', - 'suffix': ERROR_SUFFIX - } - NO_AVAILABLE_RESOURCE = { - 'msg_id': 648, - 'loglevel': base_logging.ERROR, - 'msg': 'There are no resources available for use. (resource: ' - '%(resource)s)', - 'suffix': ERROR_SUFFIX - } - NO_CONNECTED_TARGET = { - 'msg_id': 649, - 'loglevel': base_logging.ERROR, - 'msg': 'The host group or iSCSI target was not found.', - 'suffix': ERROR_SUFFIX - } - RESOURCE_NOT_FOUND = { - 'msg_id': 650, - 'loglevel': base_logging.ERROR, - 'msg': 'The resource %(resource)s was not found.', - 'suffix': ERROR_SUFFIX - } - LDEV_DELETION_WAIT_TIMEOUT = { - 'msg_id': 652, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - LDEV_CREATION_WAIT_TIMEOUT = { - 'msg_id': 653, - 'loglevel': base_logging.ERROR, - 'msg': 'The creation of a logical device could not be completed. ' - '(LDEV: %(ldev)s)', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_ATTR_FOR_MANAGE = { - 'msg_id': 702, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must be an unpaired %(ldevtype)s.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_SIZE_FOR_MANAGE = { - 'msg_id': 703, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'size must be expressed in gigabytes.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_PORT_FOR_MANAGE = { - 'msg_id': 704, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' - 'must not be mapped.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_TYPE_FOR_UNMANAGE = { - 'msg_id': 706, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to unmanage the volume %(volume_id)s. The volume ' - 'type must be %(volume_type)s.', - 'suffix': ERROR_SUFFIX - } - INVALID_LDEV_FOR_MANAGE = { - 'msg_id': 707, - 'loglevel': base_logging.ERROR, - 'msg': 'No valid value is specified for "source-id". A valid LDEV ' - 'number must be specified in "source-id" to manage the ' - 'volume.', - 'suffix': ERROR_SUFFIX - } - VOLUME_COPY_FAILED = { - 'msg_id': 722, - 'loglevel': base_logging.ERROR, - 'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, ' - 'P-VOL: %(pvol)s, S-VOL: %(svol)s)', - 'suffix': ERROR_SUFFIX - } - - def __init__(self, error_info): - """Initialize Enum attributes.""" - self.msg_id = error_info['msg_id'] - self.level = error_info['loglevel'] - self.msg = error_info['msg'] - self.suffix = error_info['suffix'] - - def output_log(self, **kwargs): - """Output the message to the log file and return the message.""" - msg = self.msg % kwargs - LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s", - {'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg}) - return msg - - -def output_log(msg_enum, **kwargs): - """Output the specified message to the log file and return the message.""" - return msg_enum.output_log(**kwargs) - -LOG = logging.getLogger(__name__) -MSG = VSPMsg - - -def output_start_end_log(func): - """Output the log of the start and the end of the method.""" - @functools.wraps(func) - def wrap(self, *args, **kwargs): - """Wrap the method to add logging function.""" - def _output_start_end_log(*_args, **_kwargs): - """Output the log of the start and the end of the method.""" - output_log(MSG.METHOD_START, - method=func.__name__, - config_group=self.configuration.config_group) - ret = func(*_args, **_kwargs) - output_log(MSG.METHOD_END, - method=func.__name__, - config_group=self.configuration.config_group) - return ret - return _output_start_end_log(self, *args, **kwargs) - return wrap - - -def get_ldev(obj): - """Get the LDEV number from the given object and return it as integer.""" - if not obj: - return None - ldev = obj.get('provider_location') - if not ldev or not ldev.isdigit(): - return None - return int(ldev) - - -def check_timeout(start_time, timeout): - """Return True if the specified time has passed, False otherwise.""" - return timeutils.is_older_than(start_time, timeout) - - -def mask_password(cmd): - """Return a string in which the password is masked.""" - if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': - tmp = list(cmd) - tmp[3] = strutils.mask_dict_password({'password': ''}).get('password') - else: - tmp = cmd - return ' '.join([six.text_type(c) for c in tmp]) - - -def execute(*cmd, **kwargs): - """Run the specified command and return its results.""" - process_input = kwargs.pop('process_input', None) - run_as_root = kwargs.pop('run_as_root', True) - ret = 0 - try: - if len(cmd) > 3 and cmd[0] == 'raidcom' and cmd[1] == '-login': - stdout, stderr = cinder_utils.execute( - *cmd, process_input=process_input, run_as_root=run_as_root, - loglevel=base_logging.NOTSET)[:2] - else: - stdout, stderr = cinder_utils.execute( - *cmd, process_input=process_input, run_as_root=run_as_root)[:2] - except putils.ProcessExecutionError as ex: - ret = ex.exit_code - stdout = ex.stdout - stderr = ex.stderr - LOG.debug('cmd: %s', mask_password(cmd)) - LOG.debug('from: %s', inspect.stack()[2]) - LOG.debug('ret: %s', ret) - LOG.debug('stdout: %s', ' '.join(stdout.splitlines())) - LOG.debug('stderr: %s', ' '.join(stderr.splitlines())) - return ret, stdout, stderr - - -def import_object(conf, driver_info, db): - """Import a class and return an instance of it.""" - os.environ['LANG'] = 'C' - cli = _DRIVERS.get('HORCM') - return importutils.import_object( - '.'.join([_DRIVER_DIR, cli[driver_info['proto']]]), - conf, driver_info, db) - - -def check_ignore_error(ignore_error, stderr): - """Return True if ignore_error is in stderr, False otherwise.""" - if not ignore_error or not stderr: - return False - if not isinstance(ignore_error, six.string_types): - ignore_error = '|'.join(ignore_error) - - if re.search(ignore_error, stderr): - return True - return False - - -def check_opts(conf, opts): - """Check if the specified configuration is valid.""" - names = [] - for opt in opts: - names.append(opt.name) - check_opt_value(conf, names) - - -def check_opt_value(conf, names): - """Check if the parameter names and values are valid.""" - for name in names: - try: - getattr(conf, name) - except (cfg.NoSuchOptError, cfg.ConfigFileValueError): - with excutils.save_and_reraise_exception(): - output_log(MSG.INVALID_PARAMETER, param=name) - - -def output_storage_cli_info(name, version): - """Output storage CLI info to the log file.""" - LOG.info('\t%(name)-35s%(version)s', - {'name': name + ' version: ', 'version': version}) - - -def output_opt_info(conf, names): - """Output parameter names and values to the log file.""" - for name in names: - LOG.info('\t%(name)-35s%(attr)s', - {'name': name + ': ', 'attr': getattr(conf, name)}) - - -def output_opts(conf, opts): - """Output parameter names and values to the log file.""" - names = [opt.name for opt in opts if not opt.secret] - output_opt_info(conf, names) - - -def require_target_existed(targets): - """Check if the target list includes one or more members.""" - if not targets['list']: - msg = output_log(MSG.NO_CONNECTED_TARGET) - raise exception.VSPError(msg) - - -def get_volume_metadata(volume): - """Return a dictionary of the metadata of the specified volume.""" - volume_metadata = volume.get('volume_metadata', {}) - return {item['key']: item['value'] for item in volume_metadata} - - -def update_conn_info(conn_info, connector, lookup_service): - """Set wwn mapping list to the connection info.""" - init_targ_map = build_initiator_target_map( - connector, conn_info['data']['target_wwn'], lookup_service) - if init_targ_map: - conn_info['data']['initiator_target_map'] = init_targ_map - - -def build_initiator_target_map(connector, target_wwns, lookup_service): - """Return a dictionary mapping server-wwns and lists of storage-wwns.""" - init_targ_map = {} - initiator_wwns = connector['wwpns'] - if lookup_service: - dev_map = lookup_service.get_device_mapping_from_network( - initiator_wwns, target_wwns) - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - for initiator in fabric['initiator_port_wwn_list']: - init_targ_map[initiator] = fabric['target_port_wwn_list'] - else: - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - return init_targ_map diff --git a/cinder/volume/drivers/hpe/__init__.py b/cinder/volume/drivers/hpe/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py deleted file mode 100644 index 259911d2f..000000000 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ /dev/null @@ -1,3973 +0,0 @@ -# (c) Copyright 2012-2016 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Volume driver common utilities for HPE 3PAR Storage array - -The 3PAR drivers requires 3.1.3 firmware on the 3PAR array. - -You will need to install the python hpe3parclient module. -sudo pip install python-3parclient - -The drivers uses both the REST service and the SSH -command line to correctly operate. Since the -ssh credentials and the REST credentials can be different -we need to have settings for both. - -The drivers requires the use of the san_ip, san_login, -san_password settings for ssh connections into the 3PAR -array. It also requires the setting of -hpe3par_api_url, hpe3par_username, hpe3par_password -for credentials to talk to the REST service on the 3PAR -array. -""" - -import ast -import json -import math -import pprint -import re -import six -import uuid - -from oslo_serialization import base64 -from oslo_utils import importutils - -hpe3parclient = importutils.try_import("hpe3parclient") -if hpe3parclient: - from hpe3parclient import client - from hpe3parclient import exceptions as hpeexceptions - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume import configuration -from cinder.volume import qos_specs -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -import taskflow.engines -from taskflow.patterns import linear_flow - -LOG = logging.getLogger(__name__) - -MIN_CLIENT_VERSION = '4.2.0' -DEDUP_API_VERSION = 30201120 -FLASH_CACHE_API_VERSION = 30201200 -COMPRESSION_API_VERSION = 30301215 -SRSTATLD_API_VERSION = 30201200 -REMOTE_COPY_API_VERSION = 30202290 - -hpe3par_opts = [ - cfg.StrOpt('hpe3par_api_url', - default='', - help="3PAR WSAPI Server Url like " - "https://<3par ip>:8080/api/v1", - deprecated_name='hp3par_api_url'), - cfg.StrOpt('hpe3par_username', - default='', - help="3PAR username with the 'edit' role", - deprecated_name='hp3par_username'), - cfg.StrOpt('hpe3par_password', - default='', - help="3PAR password for the user specified in hpe3par_username", - secret=True, - deprecated_name='hp3par_password'), - cfg.ListOpt('hpe3par_cpg', - default=["OpenStack"], - help="List of the CPG(s) to use for volume creation", - deprecated_name='hp3par_cpg'), - cfg.StrOpt('hpe3par_cpg_snap', - default="", - help="The CPG to use for Snapshots for volumes. " - "If empty the userCPG will be used.", - deprecated_name='hp3par_cpg_snap'), - cfg.StrOpt('hpe3par_snapshot_retention', - default="", - help="The time in hours to retain a snapshot. " - "You can't delete it before this expires.", - deprecated_name='hp3par_snapshot_retention'), - cfg.StrOpt('hpe3par_snapshot_expiration', - default="", - help="The time in hours when a snapshot expires " - " and is deleted. This must be larger than expiration", - deprecated_name='hp3par_snapshot_expiration'), - cfg.BoolOpt('hpe3par_debug', - default=False, - help="Enable HTTP debugging to 3PAR", - deprecated_name='hp3par_debug'), - cfg.ListOpt('hpe3par_iscsi_ips', - default=[], - help="List of target iSCSI addresses to use.", - deprecated_name='hp3par_iscsi_ips'), - cfg.BoolOpt('hpe3par_iscsi_chap_enabled', - default=False, - help="Enable CHAP authentication for iSCSI connections.", - deprecated_name='hp3par_iscsi_chap_enabled'), -] - - -CONF = cfg.CONF -CONF.register_opts(hpe3par_opts, group=configuration.SHARED_CONF_GROUP) - -# Input/output (total read/write) operations per second. -THROUGHPUT = 'throughput' -# Data processed (total read/write) per unit time: kilobytes per second. -BANDWIDTH = 'bandwidth' -# Response time (total read/write): microseconds. -LATENCY = 'latency' -# IO size (total read/write): kilobytes. -IO_SIZE = 'io_size' -# Queue length for processing IO requests -QUEUE_LENGTH = 'queue_length' -# Average busy percentage -AVG_BUSY_PERC = 'avg_busy_perc' - - -class HPE3PARCommon(object): - """Class that contains common code for the 3PAR drivers. - - Version history: - - .. code-block:: none - - 1.2.0 - Updated hp3parclient API use to 2.0.x - 1.2.1 - Check that the VVS exists - 1.2.2 - log prior to raising exceptions - 1.2.3 - Methods to update key/value pair bug #1258033 - 1.2.4 - Remove deprecated config option hp3par_domain - 1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249 - 1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515 - This update now requires 3.1.2 MU3 firmware - 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. - 2.0.0 - Update hp3parclient API uses 3.0.x - 2.0.1 - Updated to use qos_specs, added new qos settings and personas - 2.0.2 - Add back-end assisted volume migrate - 2.0.3 - Allow deleting missing snapshots bug #1283233 - 2.0.4 - Allow volumes created from snapshots to be larger bug #1279478 - 2.0.5 - Fix extend volume units bug #1284368 - 2.0.6 - use loopingcall.wait instead of time.sleep - 2.0.7 - Allow extend volume based on snapshot bug #1285906 - 2.0.8 - Fix detach issue for multiple hosts bug #1288927 - 2.0.9 - Remove unused 3PAR driver method bug #1310807 - 2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542 - 2.0.11 - Remove hp3parclient requirement from unit tests #1315195 - 2.0.12 - Volume detach hangs when host is in a host set bug #1317134 - 2.0.13 - Added support for managing/unmanaging of volumes - 2.0.14 - Modified manage volume to use standard 'source-name' element. - 2.0.15 - Added support for volume retype - 2.0.16 - Add a better log during delete_volume time. Bug #1349636 - 2.0.17 - Added iSCSI CHAP support - This update now requires 3.1.3 MU1 firmware - and hp3parclient 3.1.0 - 2.0.18 - HP 3PAR manage_existing with volume-type support - 2.0.19 - Update default persona from Generic to Generic-ALUA - 2.0.20 - Configurable SSH missing key policy and known hosts file - 2.0.21 - Remove bogus invalid snapCPG=None exception - 2.0.22 - HP 3PAR drivers should not claim to have 'infinite' space - 2.0.23 - Increase the hostname size from 23 to 31 Bug #1371242 - 2.0.24 - Add pools (hp3par_cpg now accepts a list of CPGs) - 2.0.25 - Migrate without losing type settings bug #1356608 - 2.0.26 - Don't ignore extra-specs snap_cpg when missing cpg #1368972 - 2.0.27 - Fixing manage source-id error bug #1357075 - 2.0.28 - Removing locks bug #1381190 - 2.0.29 - Report a limitless cpg's stats better bug #1398651 - 2.0.30 - Update the minimum hp3parclient version bug #1402115 - 2.0.31 - Removed usage of host name cache #1398914 - 2.0.32 - Update LOG usage to fix translations. bug #1384312 - 2.0.33 - Fix host persona to match WSAPI mapping bug #1403997 - 2.0.34 - Fix log messages to match guidelines. bug #1411370 - 2.0.35 - Fix default snapCPG for manage_existing bug #1393609 - 2.0.36 - Added support for dedup provisioning - 2.0.37 - Added support for enabling Flash Cache - 2.0.38 - Add stats for hp3par goodness_function and filter_function - 2.0.39 - Added support for updated detach_volume attachment. - 2.0.40 - Make the 3PAR drivers honor the pool in create bug #1432876 - 2.0.41 - Only log versions at startup. bug #1447697 - 2.0.42 - Fix type for snapshot config settings. bug #1461640 - 2.0.43 - Report the capability of supporting multiattach - 2.0.44 - Update help strings to reduce the 3PAR user role requirements - 2.0.45 - Python 3 fixes - 2.0.46 - Improved VLUN creation and deletion logic. #1469816 - 2.0.47 - Changed initialize_connection to use getHostVLUNs. #1475064 - 2.0.48 - Adding changes to support 3PAR iSCSI multipath. - 2.0.49 - Added client CPG stats to driver volume stats. bug #1482741 - 2.0.50 - Add over subscription support - 2.0.51 - Adds consistency group support - 2.0.52 - Added update_migrated_volume. bug #1492023 - 2.0.53 - Fix volume size conversion. bug #1513158 - 3.0.0 - Rebranded HP to HPE. - 3.0.1 - Fixed find_existing_vluns bug #1515033 - 3.0.2 - Python 3 support - 3.0.3 - Remove db access for consistency groups - 3.0.4 - Adds v2 managed replication support - 3.0.5 - Adds v2 unmanaged replication support - 3.0.6 - Adding manage/unmanage snapshot support - 3.0.7 - Enable standard capabilities based on 3PAR licenses - 3.0.8 - Optimize array ID retrieval - 3.0.9 - Bump minimum API version for volume replication - 3.0.10 - Added additional volumes checks to the manage snapshot API - 3.0.11 - Fix the image cache capability bug #1491088 - 3.0.12 - Remove client version checks for replication - 3.0.13 - Support creating a cg from a source cg - 3.0.14 - Comparison of WWNs now handles case difference. bug #1546453 - 3.0.15 - Update replication to version 2.1 - 3.0.16 - Use same LUN ID for each VLUN path #1551994 - 3.0.17 - Don't fail on clearing 3PAR object volume key. bug #1546392 - 3.0.18 - create_cloned_volume account for larger size. bug #1554740 - 3.0.19 - Remove metadata that tracks the instance ID. bug #1572665 - 3.0.20 - Fix lun_id of 0 issue. bug #1573298 - 3.0.21 - Driver no longer fails to initialize if - System Reporter license is missing. bug #1568078 - 3.0.22 - Rework delete_vlun. Bug #1582922 - 3.0.23 - Fix CG create failures with long display name or special - characters. bug #1573647 - 3.0.24 - Fix terminate connection on failover - 3.0.25 - Fix delete volume when online clone is active. bug #1349639 - 3.0.26 - Fix concurrent snapshot delete conflict. bug #1600104 - 3.0.27 - Fix snapCPG error during backup of attached volume. - Bug #1646396 and also ,Fix backup of attached ISCSI - and CHAP enabled volume.bug #1644238. - 3.0.28 - Remove un-necessary snapshot creation of source volume - while doing online copy in create_cloned_volume call. - Bug #1661541 - 3.0.29 - Fix convert snapshot volume to base volume type. bug #1656186 - 3.0.30 - Handle manage and unmanage hosts present. bug #1648067 - 3.0.31 - Enable HPE-3PAR Compression Feature. - 3.0.32 - Add consistency group capability to generic volume group - in HPE-3APR - 3.0.33 - Added replication feature in retype flow. bug #1680313 - 3.0.34 - Add cloned volume to vvset in online copy. bug #1664464 - 3.0.35 - Add volume to consistency group if flag enabled. bug #1702317 - 3.0.36 - Swap volume name in migration. bug #1699733 - - """ - - VERSION = "3.0.36" - - stats = {} - - # TODO(Ramy): move these to the 3PAR Client - VLUN_TYPE_EMPTY = 1 - VLUN_TYPE_PORT = 2 - VLUN_TYPE_HOST = 3 - VLUN_TYPE_MATCHED_SET = 4 - VLUN_TYPE_HOST_SET = 5 - - THIN = 2 - DEDUP = 6 - CONVERT_TO_THIN = 1 - CONVERT_TO_FULL = 2 - CONVERT_TO_DEDUP = 3 - - # v2 replication constants - SYNC = 1 - PERIODIC = 2 - EXTRA_SPEC_REP_MODE = "replication:mode" - EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period" - RC_ACTION_CHANGE_TO_PRIMARY = 7 - DEFAULT_REP_MODE = 'periodic' - DEFAULT_SYNC_PERIOD = 900 - RC_GROUP_STARTED = 3 - SYNC_STATUS_COMPLETED = 3 - FAILBACK_VALUE = 'default' - - # License values for reported capabilities - PRIORITY_OPT_LIC = "Priority Optimization" - THIN_PROV_LIC = "Thin Provisioning" - REMOTE_COPY_LIC = "Remote Copy" - SYSTEM_REPORTER_LIC = "System Reporter" - COMPRESSION_LIC = "Compression" - - # Valid values for volume type extra specs - # The first value in the list is the default value - valid_prov_values = ['thin', 'full', 'dedup'] - valid_persona_values = ['2 - Generic-ALUA', - '1 - Generic', - '3 - Generic-legacy', - '4 - HPUX-legacy', - '5 - AIX-legacy', - '6 - EGENERA', - '7 - ONTAP-legacy', - '8 - VMware', - '9 - OpenVMS', - '10 - HPUX', - '11 - WindowsServer'] - hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency', - 'priority'] - qos_priority_level = {'low': 1, 'normal': 2, 'high': 3} - hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs', - 'flash_cache', 'compression'] - - def __init__(self, config, active_backend_id=None): - self.config = config - self.client = None - self.uuid = uuid.uuid4() - self._client_conf = {} - self._replication_targets = [] - self._replication_enabled = False - self._active_backend_id = active_backend_id - - def get_version(self): - return self.VERSION - - def check_flags(self, options, required_flags): - for flag in required_flags: - if not getattr(options, flag, None): - msg = _('%s is not set') % flag - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - def check_replication_flags(self, options, required_flags): - for flag in required_flags: - if not options.get(flag, None): - msg = (_('%s is not set and is required for the replication ' - 'device to be valid.') % flag) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - def _create_client(self, timeout=None): - hpe3par_api_url = self._client_conf['hpe3par_api_url'] - cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout) - client_version = hpe3parclient.version - - if client_version < MIN_CLIENT_VERSION: - ex_msg = (_('Invalid hpe3parclient version found (%(found)s). ' - 'Version %(minimum)s or greater required. Run "pip' - ' install --upgrade python-3parclient" to upgrade' - ' the hpe3parclient.') - % {'found': client_version, - 'minimum': MIN_CLIENT_VERSION}) - LOG.error(ex_msg) - raise exception.InvalidInput(reason=ex_msg) - - return cl - - def client_login(self): - try: - LOG.debug("Connecting to 3PAR") - self.client.login(self._client_conf['hpe3par_username'], - self._client_conf['hpe3par_password']) - except hpeexceptions.HTTPUnauthorized as ex: - msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % - {'url': self._client_conf['hpe3par_api_url'], 'err': ex}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - known_hosts_file = CONF.ssh_hosts_key_file - policy = "AutoAddPolicy" - if CONF.strict_ssh_host_key_policy: - policy = "RejectPolicy" - self.client.setSSHOptions( - self._client_conf['san_ip'], - self._client_conf['san_login'], - self._client_conf['san_password'], - port=self._client_conf['san_ssh_port'], - conn_timeout=self._client_conf['ssh_conn_timeout'], - privatekey=self._client_conf['san_private_key'], - missing_key_policy=policy, - known_hosts_file=known_hosts_file) - - def client_logout(self): - LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid) - self.client.logout() - - def _create_replication_client(self, remote_array): - try: - cl = client.HPE3ParClient(remote_array['hpe3par_api_url']) - cl.login(remote_array['hpe3par_username'], - remote_array['hpe3par_password']) - except hpeexceptions.HTTPUnauthorized as ex: - msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % - {'url': remote_array['hpe3par_api_url'], 'err': ex}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - known_hosts_file = CONF.ssh_hosts_key_file - policy = "AutoAddPolicy" - if CONF.strict_ssh_host_key_policy: - policy = "RejectPolicy" - cl.setSSHOptions( - remote_array['san_ip'], - remote_array['san_login'], - remote_array['san_password'], - port=remote_array['san_ssh_port'], - conn_timeout=remote_array['ssh_conn_timeout'], - privatekey=remote_array['san_private_key'], - missing_key_policy=policy, - known_hosts_file=known_hosts_file) - return cl - - def _destroy_replication_client(self, client): - if client is not None: - client.logout() - - def do_setup(self, context, timeout=None, stats=None): - if hpe3parclient is None: - msg = _('You must install hpe3parclient before using 3PAR' - ' drivers. Run "pip install python-3parclient" to' - ' install the hpe3parclient.') - raise exception.VolumeBackendAPIException(data=msg) - - try: - # This will set self._client_conf with the proper credentials - # to communicate with the 3PAR array. It will contain either - # the values for the primary array or secondary array in the - # case of a fail-over. - self._get_3par_config() - self.client = self._create_client(timeout=timeout) - wsapi_version = self.client.getWsApiVersion() - self.API_VERSION = wsapi_version['build'] - - # If replication is properly configured, the primary array's - # API version must meet the minimum requirements. - if self._replication_enabled and ( - self.API_VERSION < REMOTE_COPY_API_VERSION): - self._replication_enabled = False - LOG.error("The primary array must have an API version of " - "%(min_ver)s or higher, but is only on " - "%(current_ver)s, therefore replication is not " - "supported.", - {'min_ver': REMOTE_COPY_API_VERSION, - 'current_ver': self.API_VERSION}) - except hpeexceptions.UnsupportedVersion as ex: - # In the event we cannot contact the configured primary array, - # we want to allow a failover if replication is enabled. - self._do_replication_setup() - if self._replication_enabled: - self.client = None - raise exception.InvalidInput(ex) - - if context: - # The context is None except at driver startup. - LOG.info("HPE3PARCommon %(common_ver)s," - "hpe3parclient %(rest_ver)s", - {"common_ver": self.VERSION, - "rest_ver": hpe3parclient.get_version_string()}) - if self.config.hpe3par_debug: - self.client.debug_rest(True) - if self.API_VERSION < SRSTATLD_API_VERSION: - # Firmware version not compatible with srstatld - LOG.warning("srstatld requires " - "WSAPI version '%(srstatld_version)s' " - "version '%(version)s' is installed.", - {'srstatld_version': SRSTATLD_API_VERSION, - 'version': self.API_VERSION}) - - # Get the client ID for provider_location. We only need to retrieve - # the ID directly from the array if the driver stats are not provided. - if not stats: - try: - self.client_login() - info = self.client.getStorageSystemInfo() - self.client.id = six.text_type(info['id']) - except Exception: - self.client.id = 0 - finally: - self.client_logout() - else: - self.client.id = stats['array_id'] - - def check_for_setup_error(self): - if self.client: - self.client_login() - try: - cpg_names = self._client_conf['hpe3par_cpg'] - for cpg_name in cpg_names: - self.validate_cpg(cpg_name) - - finally: - self.client_logout() - - def validate_cpg(self, cpg_name): - try: - self.client.getCPG(cpg_name) - except hpeexceptions.HTTPNotFound: - err = (_("CPG (%s) doesn't exist on array") % cpg_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - def get_domain(self, cpg_name): - try: - cpg = self.client.getCPG(cpg_name) - except hpeexceptions.HTTPNotFound: - err = (_("Failed to get domain because CPG (%s) doesn't " - "exist on array.") % cpg_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - if 'domain' in cpg: - return cpg['domain'] - return None - - def extend_volume(self, volume, new_size): - volume_name = self._get_3par_vol_name(volume['id']) - old_size = volume['size'] - growth_size = int(new_size) - old_size - LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, " - " by %(diff)s GB.", - {'vol': volume_name, 'old': old_size, 'new': new_size, - 'diff': growth_size}) - growth_size_mib = growth_size * units.Ki - self._extend_volume(volume, volume_name, growth_size_mib) - - def create_group(self, context, group): - """Creates a group.""" - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - if group.volume_type_ids is not None: - for volume_type in group.volume_types: - allow_type = self.is_volume_group_snap_type( - volume_type) - if not allow_type: - msg = _('For a volume type to be a part of consistent ' - 'group, volume type extra spec must have ' - 'consistent_group_snapshot_enabled=" True"') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - pool = volume_utils.extract_host(group.host, level='pool') - domain = self.get_domain(pool) - cg_name = self._get_3par_vvs_name(group.id) - - extra = {'group_id': group.id} - if group.group_snapshot_id is not None: - extra['group_snapshot_id'] = group.group_snapshot_id - - self.client.createVolumeSet(cg_name, domain=domain, - comment=six.text_type(extra)) - - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - - self.create_group(context, group) - vvs_name = self._get_3par_vvs_name(group.id) - if group_snapshot and snapshots: - cgsnap_name = self._get_3par_snap_name(group_snapshot.id) - snap_base = cgsnap_name - elif source_group and source_vols: - cg_id = source_group.id - # Create a brand new uuid for the temp snap. - snap_uuid = uuid.uuid4().hex - - # Create a temporary snapshot of the volume set in order to - # perform an online copy. These temp snapshots will be deleted - # when the source consistency group is deleted. - temp_snap = self._get_3par_snap_name(snap_uuid, temp_snap=True) - snap_shot_name = temp_snap + "-@count@" - copy_of_name = self._get_3par_vvs_name(cg_id) - optional = {'expirationHours': 1} - self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, - optional=optional) - snap_base = temp_snap - - for i, volume in enumerate(volumes): - snap_name = snap_base + "-" + six.text_type(i) - volume_name = self._get_3par_vol_name(volume.id) - type_info = self.get_volume_settings_from_type(volume) - cpg = type_info['cpg'] - snapcpg = type_info['snap_cpg'] - tpvv = type_info.get('tpvv', False) - tdvv = type_info.get('tdvv', False) - - compression = self.get_compression_policy( - type_info['hpe3par_keys']) - - optional = {'online': True, 'snapCPG': snapcpg, - 'tpvv': tpvv, 'tdvv': tdvv} - - if compression is not None: - optional['compression'] = compression - - self.client.copyVolume(snap_name, volume_name, cpg, optional) - self.client.addVolumeToVolumeSet(vvs_name, volume_name) - - return None, None - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - - try: - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - cg_name = self._get_3par_vvs_name(group.id) - self.client.deleteVolumeSet(cg_name) - except hpeexceptions.HTTPNotFound: - LOG.warning("Virtual Volume Set '%s' doesn't exist on array.", - cg_name) - except hpeexceptions.HTTPConflict as e: - LOG.error("Conflict detected in Virtual Volume Set" - " %(volume_set)s: %(error)s", - {"volume_set": cg_name, - "error": e}) - - volume_model_updates = [] - for volume in volumes: - volume_update = {'id': volume.id} - try: - self.delete_volume(volume) - volume_update['status'] = 'deleted' - except Exception as ex: - LOG.error("There was an error deleting volume %(id)s: " - "%(error)s.", - {'id': volume.id, - 'error': ex}) - volume_update['status'] = 'error' - volume_model_updates.append(volume_update) - model_update = {'status': group.status} - return model_update, volume_model_updates - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - grp_snap_enable = volume_utils.is_group_a_cg_snapshot_type(group) - if not grp_snap_enable: - raise NotImplementedError() - volume_set_name = self._get_3par_vvs_name(group.id) - for volume in add_volumes: - volume_name = self._get_3par_vol_name(volume.id) - vol_snap_enable = self.is_volume_group_snap_type( - volume.volume_type) - try: - if grp_snap_enable and vol_snap_enable: - self.client.addVolumeToVolumeSet(volume_set_name, - volume_name) - else: - msg = (_('Volume with volume id %s is not ' - 'supported as extra specs of this ' - 'volume does not have ' - 'consistent_group_snapshot_enabled=" True"' - ) % volume['id']) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - except hpeexceptions.HTTPNotFound: - msg = (_('Virtual Volume Set %s does not exist.') % - volume_set_name) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - for volume in remove_volumes: - volume_name = self._get_3par_vol_name(volume.id) - try: - self.client.removeVolumeFromVolumeSet( - volume_set_name, volume_name) - except hpeexceptions.HTTPNotFound: - msg = (_('Virtual Volume Set %s does not exist.') % - volume_set_name) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - return None, None, None - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - - cg_id = group_snapshot.group_id - snap_shot_name = self._get_3par_snap_name(group_snapshot.id) + ( - "-@count@") - copy_of_name = self._get_3par_vvs_name(cg_id) - - extra = {'group_snapshot_id': group_snapshot.id} - extra['group_id'] = cg_id - extra['description'] = group_snapshot.description - - optional = {'comment': json.dumps(extra), - 'readOnly': False} - if self.config.hpe3par_snapshot_expiration: - optional['expirationHours'] = ( - int(self.config.hpe3par_snapshot_expiration)) - - if self.config.hpe3par_snapshot_retention: - optional['retentionHours'] = ( - int(self.config.hpe3par_snapshot_retention)) - - try: - self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, - optional=optional) - except Exception as ex: - msg = (_('There was an error creating the cgsnapshot: %s'), - six.text_type(ex)) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - snapshot_model_updates = [] - for snapshot in snapshots: - snapshot_update = {'id': snapshot['id'], - 'status': fields.SnapshotStatus.AVAILABLE} - snapshot_model_updates.append(snapshot_update) - - model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} - - return model_update, snapshot_model_updates - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - cgsnap_name = self._get_3par_snap_name(group_snapshot.id) - - snapshot_model_updates = [] - for i, snapshot in enumerate(snapshots): - snapshot_update = {'id': snapshot['id']} - try: - snap_name = cgsnap_name + "-" + six.text_type(i) - self.client.deleteVolume(snap_name) - snapshot_update['status'] = fields.SnapshotStatus.DELETED - except hpeexceptions.HTTPNotFound as ex: - # We'll let this act as if it worked - # it helps clean up the cinder entries. - LOG.warning("Delete Snapshot id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s", - {'id': snapshot['id'], 'msg': ex}) - snapshot_update['status'] = fields.SnapshotStatus.ERROR - except Exception as ex: - LOG.error("There was an error deleting snapshot %(id)s: " - "%(error)s.", - {'id': snapshot['id'], - 'error': six.text_type(ex)}) - snapshot_update['status'] = fields.SnapshotStatus.ERROR - snapshot_model_updates.append(snapshot_update) - - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - - return model_update, snapshot_model_updates - - def manage_existing(self, volume, existing_ref): - """Manage an existing 3PAR volume. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - target_vol_name = self._get_existing_volume_ref_name(existing_ref) - - # Check for the existence of the virtual volume. - old_comment_str = "" - try: - vol = self.client.getVolume(target_vol_name) - if 'comment' in vol: - old_comment_str = vol['comment'] - except hpeexceptions.HTTPNotFound: - err = (_("Virtual volume '%s' doesn't exist on array.") % - target_vol_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - new_comment = {} - - # Use the display name from the existing volume if no new name - # was chosen by the user. - if volume['display_name']: - display_name = volume['display_name'] - new_comment['display_name'] = volume['display_name'] - elif 'comment' in vol: - display_name = self._get_3par_vol_comment_value(vol['comment'], - 'display_name') - if display_name: - new_comment['display_name'] = display_name - else: - display_name = None - - # Generate the new volume information based on the new ID. - new_vol_name = self._get_3par_vol_name(volume['id']) - name = 'volume-' + volume['id'] - - new_comment['volume_id'] = volume['id'] - new_comment['name'] = name - new_comment['type'] = 'OpenStack' - - volume_type = None - if volume['volume_type_id']: - try: - volume_type = self._get_volume_type(volume['volume_type_id']) - except Exception: - reason = (_("Volume type ID '%s' is invalid.") % - volume['volume_type_id']) - raise exception.ManageExistingVolumeTypeMismatch(reason=reason) - - new_vals = {'newName': new_vol_name, - 'comment': json.dumps(new_comment)} - - # Ensure that snapCPG is set - if 'snapCPG' not in vol: - new_vals['snapCPG'] = vol['userCPG'] - LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG " - "is empty so it will be set to: %(cpg)s", - {'disp': display_name, 'new': new_vol_name, - 'cpg': new_vals['snapCPG']}) - - # Update the existing volume with the new name and comments. - self.client.modifyVolume(target_vol_name, new_vals) - - LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.", - {'ref': existing_ref['source-name'], 'new': new_vol_name}) - - retyped = False - model_update = None - if volume_type: - LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.", - {'disp': display_name, 'new': new_vol_name}) - - try: - retyped, model_update = self._retype_from_no_type(volume, - volume_type) - LOG.info("Virtual volume %(disp)s successfully retyped to " - "%(new_type)s.", - {'disp': display_name, - 'new_type': volume_type.get('name')}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.warning("Failed to manage virtual volume %(disp)s " - "due to error during retype.", - {'disp': display_name}) - # Try to undo the rename and clear the new comment. - self.client.modifyVolume( - new_vol_name, - {'newName': target_vol_name, - 'comment': old_comment_str}) - - updates = {'display_name': display_name} - if retyped and model_update: - updates.update(model_update) - - LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.", - {'disp': display_name, 'new': new_vol_name}) - - # Return display name to update the name displayed in the GUI and - # any model updates from retype. - return updates - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Manage an existing 3PAR snapshot. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Potential parent volume for the snapshot - volume = snapshot['volume'] - - # Do not allow for managing of snapshots for 'failed-over' volumes. - if volume.get('replication_status') == 'failed-over': - err = (_("Managing of snapshots to failed-over volumes is " - "not allowed.")) - raise exception.InvalidInput(reason=err) - - target_snap_name = self._get_existing_volume_ref_name(existing_ref, - is_snapshot=True) - - # Check for the existence of the snapshot. - try: - snap = self.client.getVolume(target_snap_name) - except hpeexceptions.HTTPNotFound: - err = (_("Snapshot '%s' doesn't exist on array.") % - target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - # Make sure the snapshot is being associated with the correct volume. - parent_vol_name = self._get_3par_vol_name(volume['id']) - if parent_vol_name != snap['copyOf']: - err = (_("The provided snapshot '%s' is not a snapshot of " - "the provided volume.") % target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - new_comment = {} - - # Use the display name from the existing snapshot if no new name - # was chosen by the user. - if snapshot['display_name']: - display_name = snapshot['display_name'] - new_comment['display_name'] = snapshot['display_name'] - elif 'comment' in snap: - display_name = self._get_3par_vol_comment_value(snap['comment'], - 'display_name') - if display_name: - new_comment['display_name'] = display_name - else: - display_name = None - - # Generate the new snapshot information based on the new ID. - new_snap_name = self._get_3par_snap_name(snapshot['id']) - new_comment['volume_id'] = volume['id'] - new_comment['volume_name'] = 'volume-' + volume['id'] - if snapshot.get('display_description', None): - new_comment['description'] = snapshot['display_description'] - else: - new_comment['description'] = "" - - new_vals = {'newName': new_snap_name, - 'comment': json.dumps(new_comment)} - - # Update the existing snapshot with the new name and comments. - self.client.modifyVolume(target_snap_name, new_vals) - - LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.", - {'ref': existing_ref['source-name'], 'new': new_snap_name}) - - updates = {'display_name': display_name} - - LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.", - {'disp': display_name, 'new': new_snap_name}) - - # Return display name to update the name displayed in the GUI. - return updates - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - target_vol_name = self._get_existing_volume_ref_name(existing_ref) - - # Make sure the reference is not in use. - if re.match('osv-*|oss-*|vvs-*', target_vol_name): - reason = _("Reference must be for an unmanaged virtual volume.") - raise exception.ManageExistingInvalidReference( - existing_ref=target_vol_name, - reason=reason) - - # Check for the existence of the virtual volume. - try: - vol = self.client.getVolume(target_vol_name) - except hpeexceptions.HTTPNotFound: - err = (_("Virtual volume '%s' doesn't exist on array.") % - target_vol_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - return int(math.ceil(float(vol['sizeMiB']) / units.Ki)) - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of snapshot to be managed by manage_existing_snapshot. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - target_snap_name = self._get_existing_volume_ref_name(existing_ref, - is_snapshot=True) - - # Make sure the reference is not in use. - if re.match('osv-*|oss-*|vvs-*|unm-*', target_snap_name): - reason = _("Reference must be for an unmanaged snapshot.") - raise exception.ManageExistingInvalidReference( - existing_ref=target_snap_name, - reason=reason) - - # Check for the existence of the snapshot. - try: - snap = self.client.getVolume(target_snap_name) - except hpeexceptions.HTTPNotFound: - err = (_("Snapshot '%s' doesn't exist on array.") % - target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - return int(math.ceil(float(snap['sizeMiB']) / units.Ki)) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management.""" - # Rename the volume's name to unm-* format so that it can be - # easily found later. - vol_name = self._get_3par_vol_name(volume['id']) - new_vol_name = self._get_3par_unm_name(volume['id']) - self.client.modifyVolume(vol_name, {'newName': new_vol_name}) - - LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. " - "Volume renamed to '%(new)s'.", - {'disp': volume['display_name'], - 'vol': vol_name, - 'new': new_vol_name}) - - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management.""" - # Parent volume for the snapshot - volume = snapshot['volume'] - - # Do not allow unmanaging of snapshots from 'failed-over' volumes. - if volume.get('replication_status') == 'failed-over': - err = (_("Unmanaging of snapshots from failed-over volumes is " - "not allowed.")) - LOG.error(err) - # TODO(leeantho) Change this exception to Invalid when the volume - # manager supports handling that. - raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) - - # Rename the snapshots's name to ums-* format so that it can be - # easily found later. - snap_name = self._get_3par_snap_name(snapshot['id']) - new_snap_name = self._get_3par_ums_name(snapshot['id']) - self.client.modifyVolume(snap_name, {'newName': new_snap_name}) - - LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. " - "Snapshot renamed to '%(new)s'.", - {'disp': snapshot['display_name'], - 'vol': snap_name, - 'new': new_snap_name}) - - def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False): - """Returns the volume name of an existing reference. - - Checks if an existing volume reference has a source-name or - source-id element. If source-name or source-id is not present an - error will be thrown. - """ - vol_name = None - if 'source-name' in existing_ref: - vol_name = existing_ref['source-name'] - elif 'source-id' in existing_ref: - if is_snapshot: - vol_name = self._get_3par_ums_name(existing_ref['source-id']) - else: - vol_name = self._get_3par_unm_name(existing_ref['source-id']) - else: - reason = _("Reference must contain source-name or source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason) - - return vol_name - - def _extend_volume(self, volume, volume_name, growth_size_mib, - _convert_to_base=False): - model_update = None - rcg_name = self._get_3par_rcg_name(volume['id']) - is_volume_replicated = self._volume_of_replicated_type(volume) - try: - if _convert_to_base: - LOG.debug("Converting to base volume prior to growing.") - model_update = self._convert_to_base_volume(volume) - # If the volume is replicated and we are not failed over, - # remote copy has to be stopped before the volume can be extended. - failed_over = volume.get("replication_status", None) - is_failed_over = failed_over == "failed-over" - if is_volume_replicated and not is_failed_over: - self.client.stopRemoteCopy(rcg_name) - self.client.growVolume(volume_name, growth_size_mib) - if is_volume_replicated and not is_failed_over: - self.client.startRemoteCopy(rcg_name) - except Exception as ex: - # If the extend fails, we must restart remote copy. - if is_volume_replicated: - self.client.startRemoteCopy(rcg_name) - with excutils.save_and_reraise_exception() as ex_ctxt: - if (not _convert_to_base and - isinstance(ex, hpeexceptions.HTTPForbidden) and - ex.get_code() == 150): - # Error code 150 means 'invalid operation: Cannot grow - # this type of volume'. - # Suppress raising this exception because we can - # resolve it by converting it into a base volume. - # Afterwards, extending the volume should succeed, or - # fail with a different exception/error code. - ex_ctxt.reraise = False - model_update = self._extend_volume( - volume, volume_name, - growth_size_mib, - _convert_to_base=True) - else: - LOG.error("Error extending volume: %(vol)s. " - "Exception: %(ex)s", - {'vol': volume_name, 'ex': ex}) - return model_update - - def _get_3par_vol_name(self, volume_id, temp_vol=False): - """Get converted 3PAR volume name. - - Converts the openstack volume id from - ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 - to - osv-7P.DD5jLTPWF7tcwnMF80g - - We convert the 128 bits of the uuid into a 24character long - base64 encoded string to ensure we don't exceed the maximum - allowed 31 character name limit on 3Par - - We strip the padding '=' and replace + with . - and / with - - """ - volume_name = self._encode_name(volume_id) - if temp_vol: - # is this a temporary volume - # this is done during migration - prefix = "tsv-%s" - else: - prefix = "osv-%s" - return prefix % volume_name - - def _get_3par_snap_name(self, snapshot_id, temp_snap=False): - snapshot_name = self._encode_name(snapshot_id) - if temp_snap: - # is this a temporary snapshot - # this is done during cloning - prefix = "tss-%s" - else: - prefix = "oss-%s" - return prefix % snapshot_name - - def _get_3par_ums_name(self, snapshot_id): - ums_name = self._encode_name(snapshot_id) - return "ums-%s" % ums_name - - def _get_3par_vvs_name(self, volume_id): - vvs_name = self._encode_name(volume_id) - return "vvs-%s" % vvs_name - - def _get_3par_unm_name(self, volume_id): - unm_name = self._encode_name(volume_id) - return "unm-%s" % unm_name - - # v2 replication conversion - def _get_3par_rcg_name(self, volume_id): - rcg_name = self._encode_name(volume_id) - rcg = "rcg-%s" % rcg_name - return rcg[:22] - - def _get_3par_remote_rcg_name(self, volume_id, provider_location): - return self._get_3par_rcg_name(volume_id) + ".r" + ( - six.text_type(provider_location)) - - def _encode_name(self, name): - uuid_str = name.replace("-", "") - vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) - vol_encoded = base64.encode_as_text(vol_uuid.bytes) - - # 3par doesn't allow +, nor / - vol_encoded = vol_encoded.replace('+', '.') - vol_encoded = vol_encoded.replace('/', '-') - # strip off the == as 3par doesn't like those. - vol_encoded = vol_encoded.replace('=', '') - return vol_encoded - - def _capacity_from_size(self, vol_size): - # because 3PAR volume sizes are in Mebibytes. - if int(vol_size) == 0: - capacity = units.Gi # default: 1GiB - else: - capacity = vol_size * units.Gi - - capacity = int(math.ceil(capacity / units.Mi)) - return capacity - - def _delete_3par_host(self, hostname): - self.client.deleteHost(hostname) - - def _get_prioritized_host_on_3par(self, host, hosts, hostname): - # Check whether host with wwn/iqn of initiator present on 3par - if hosts and hosts['members'] and 'name' in hosts['members'][0]: - # Retrieving 'host' and 'hosts' from 3par using hostname - # and wwn/iqn respectively. Compare hostname of 'host' and 'hosts', - # if they do not match it means 3par has a pre-existing host - # with some other name. - if host['name'] != hosts['members'][0]['name']: - hostname = hosts['members'][0]['name'] - LOG.info(("Prioritize the host retrieved from wwn/iqn " - "Hostname : %(hosts)s is used instead " - "of Hostname: %(host)s"), - {'hosts': hostname, - 'host': host['name']}) - host = self._get_3par_host(hostname) - return host, hostname - - return host, hostname - - def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None): - try: - location = None - auto = True - - if lun_id is not None: - auto = False - - if nsp is None: - location = self.client.createVLUN(volume, hostname=hostname, - auto=auto, lun=lun_id) - else: - port = self.build_portPos(nsp) - location = self.client.createVLUN(volume, hostname=hostname, - auto=auto, portPos=port, - lun=lun_id) - - vlun_info = None - if location: - # The LUN id is returned as part of the location URI - vlun = location.split(',') - vlun_info = {'volume_name': vlun[0], - 'lun_id': int(vlun[1]), - 'host_name': vlun[2], - } - if len(vlun) > 3: - vlun_info['nsp'] = vlun[3] - - return vlun_info - - except hpeexceptions.HTTPBadRequest as e: - if 'must be in the same domain' in e.get_description(): - LOG.error(e.get_description()) - raise exception.Invalid3PARDomain(err=e.get_description()) - else: - raise exception.VolumeBackendAPIException( - data=e.get_description()) - - def _safe_hostname(self, hostname): - """We have to use a safe hostname length for 3PAR host names.""" - try: - index = hostname.index('.') - except ValueError: - # couldn't find it - index = len(hostname) - - # we'll just chop this off for now. - if index > 31: - index = 31 - - return hostname[:index] - - def _get_3par_host(self, hostname): - return self.client.getHost(hostname) - - def get_ports(self): - return self.client.getPorts() - - def get_active_target_ports(self): - ports = self.get_ports() - target_ports = [] - for port in ports['members']: - if ( - port['mode'] == self.client.PORT_MODE_TARGET and - port['linkState'] == self.client.PORT_STATE_READY - ): - port['nsp'] = self.build_nsp(port['portPos']) - target_ports.append(port) - - return target_ports - - def get_active_fc_target_ports(self): - ports = self.get_active_target_ports() - fc_ports = [] - for port in ports: - if port['protocol'] == self.client.PORT_PROTO_FC: - fc_ports.append(port) - - return fc_ports - - def get_active_iscsi_target_ports(self): - ports = self.get_active_target_ports() - iscsi_ports = [] - for port in ports: - if port['protocol'] == self.client.PORT_PROTO_ISCSI: - iscsi_ports.append(port) - - return iscsi_ports - - def get_volume_stats(self, - refresh, - filter_function=None, - goodness_function=None): - if refresh: - self._update_volume_stats( - filter_function=filter_function, - goodness_function=goodness_function) - - return self.stats - - def _update_volume_stats(self, - filter_function=None, - goodness_function=None): - # const to convert MiB to GB - const = 0.0009765625 - - # storage_protocol and volume_backend_name are - # set in the child classes - - pools = [] - info = self.client.getStorageSystemInfo() - qos_support = True - thin_support = True - remotecopy_support = True - sr_support = True - compression_support = False - if 'licenseInfo' in info: - if 'licenses' in info['licenseInfo']: - valid_licenses = info['licenseInfo']['licenses'] - qos_support = self._check_license_enabled( - valid_licenses, self.PRIORITY_OPT_LIC, - "QoS_support") - thin_support = self._check_license_enabled( - valid_licenses, self.THIN_PROV_LIC, - "Thin_provisioning_support") - remotecopy_support = self._check_license_enabled( - valid_licenses, self.REMOTE_COPY_LIC, - "Replication") - sr_support = self._check_license_enabled( - valid_licenses, self.SYSTEM_REPORTER_LIC, - "System_reporter_support") - compression_support = self._check_license_enabled( - valid_licenses, self.COMPRESSION_LIC, - "Compression") - - for cpg_name in self._client_conf['hpe3par_cpg']: - try: - stat_capabilities = { - THROUGHPUT: None, - BANDWIDTH: None, - LATENCY: None, - IO_SIZE: None, - QUEUE_LENGTH: None, - AVG_BUSY_PERC: None - } - cpg = self.client.getCPG(cpg_name) - if (self.API_VERSION >= SRSTATLD_API_VERSION and sr_support): - interval = 'daily' - history = '7d' - try: - stat_capabilities = self.client.getCPGStatData( - cpg_name, - interval, - history) - except Exception as ex: - LOG.warning("Exception at getCPGStatData() " - "for cpg: '%(cpg_name)s' " - "Reason: '%(reason)s'", - {'cpg_name': cpg_name, 'reason': ex}) - if 'numTDVVs' in cpg: - total_volumes = int( - cpg['numFPVVs'] + cpg['numTPVVs'] + cpg['numTDVVs'] - ) - else: - total_volumes = int( - cpg['numFPVVs'] + cpg['numTPVVs'] - ) - - if 'limitMiB' not in cpg['SDGrowth']: - # cpg usable free space - cpg_avail_space = ( - self.client.getCPGAvailableSpace(cpg_name)) - free_capacity = int( - cpg_avail_space['usableFreeMiB'] * const) - # total_capacity is the best we can do for a limitless cpg - total_capacity = int( - (cpg['SDUsage']['usedMiB'] + - cpg['UsrUsage']['usedMiB'] + - cpg_avail_space['usableFreeMiB']) * const) - else: - total_capacity = int(cpg['SDGrowth']['limitMiB'] * const) - free_capacity = int((cpg['SDGrowth']['limitMiB'] - - (cpg['UsrUsage']['usedMiB'] + - cpg['SDUsage']['usedMiB'])) * const) - capacity_utilization = ( - (float(total_capacity - free_capacity) / - float(total_capacity)) * 100) - provisioned_capacity = int((cpg['UsrUsage']['totalMiB'] + - cpg['SAUsage']['totalMiB'] + - cpg['SDUsage']['totalMiB']) * - const) - - except hpeexceptions.HTTPNotFound: - err = (_("CPG (%s) doesn't exist on array") - % cpg_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - pool = {'pool_name': cpg_name, - 'total_capacity_gb': total_capacity, - 'free_capacity_gb': free_capacity, - 'provisioned_capacity_gb': provisioned_capacity, - 'QoS_support': qos_support, - 'thin_provisioning_support': thin_support, - 'thick_provisioning_support': True, - 'max_over_subscription_ratio': ( - self.config.safe_get('max_over_subscription_ratio')), - 'reserved_percentage': ( - self.config.safe_get('reserved_percentage')), - 'location_info': ('HPE3PARDriver:%(sys_id)s:%(dest_cpg)s' % - {'sys_id': info['serialNumber'], - 'dest_cpg': cpg_name}), - 'total_volumes': total_volumes, - 'capacity_utilization': capacity_utilization, - THROUGHPUT: stat_capabilities[THROUGHPUT], - BANDWIDTH: stat_capabilities[BANDWIDTH], - LATENCY: stat_capabilities[LATENCY], - IO_SIZE: stat_capabilities[IO_SIZE], - QUEUE_LENGTH: stat_capabilities[QUEUE_LENGTH], - AVG_BUSY_PERC: stat_capabilities[AVG_BUSY_PERC], - 'filter_function': filter_function, - 'goodness_function': goodness_function, - 'multiattach': False, - 'consistent_group_snapshot_enabled': True, - 'compression': compression_support, - } - - if remotecopy_support: - pool['replication_enabled'] = self._replication_enabled - pool['replication_type'] = ['sync', 'periodic'] - pool['replication_count'] = len(self._replication_targets) - - pools.append(pool) - - self.stats = {'driver_version': '3.0', - 'storage_protocol': None, - 'vendor_name': 'Hewlett Packard Enterprise', - 'volume_backend_name': None, - 'array_id': info['id'], - 'replication_enabled': self._replication_enabled, - 'replication_targets': self._get_replication_targets(), - 'pools': pools} - - def _check_license_enabled(self, valid_licenses, - license_to_check, capability): - """Check a license against valid licenses on the array.""" - if valid_licenses: - for license in valid_licenses: - if license_to_check in license.get('name'): - return True - LOG.debug("'%(capability)s' requires a '%(license)s' " - "license which is not installed.", - {'capability': capability, - 'license': license_to_check}) - return False - - def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None): - """find a VLUN on a 3PAR host.""" - vluns = self.client.getHostVLUNs(hostname) - found_vlun = None - for vlun in vluns: - if volume_name in vlun['volumeName']: - if lun_id is not None: - if vlun['lun'] == lun_id: - if nsp: - port = self.build_portPos(nsp) - if vlun['portPos'] == port: - found_vlun = vlun - break - else: - found_vlun = vlun - break - else: - found_vlun = vlun - break - - if found_vlun is None: - LOG.info("3PAR vlun %(name)s not found on host %(host)s", - {'name': volume_name, 'host': hostname}) - return found_vlun - - def create_vlun(self, volume, host, nsp=None, lun_id=None): - """Create a VLUN. - - In order to export a volume on a 3PAR box, we have to create a VLUN. - """ - volume_name = self._get_3par_vol_name(volume['id']) - vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp, - lun_id=lun_id) - return self._get_vlun(volume_name, - host['name'], - vlun_info['lun_id'], - nsp) - - def delete_vlun(self, volume, hostname): - volume_name = self._get_3par_vol_name(volume['id']) - vluns = self.client.getHostVLUNs(hostname) - - # When deleteing VLUNs, you simply need to remove the template VLUN - # and any active VLUNs will be automatically removed. The template - # VLUN are marked as active: False - - volume_vluns = [] - - for vlun in vluns: - if volume_name in vlun['volumeName']: - # template VLUNs are 'active' = False - if not vlun['active']: - volume_vluns.append(vlun) - - if not volume_vluns: - LOG.warning("3PAR vlun for volume %(name)s not found on host " - "%(host)s", {'name': volume_name, 'host': hostname}) - return - - # VLUN Type of MATCHED_SET 4 requires the port to be provided - for vlun in volume_vluns: - if 'portPos' in vlun: - self.client.deleteVLUN(volume_name, vlun['lun'], - hostname=hostname, - port=vlun['portPos']) - else: - self.client.deleteVLUN(volume_name, vlun['lun'], - hostname=hostname) - - # Determine if there are other volumes attached to the host. - # This will determine whether we should try removing host from host set - # and deleting the host. - vluns = [] - try: - vluns = self.client.getHostVLUNs(hostname) - except hpeexceptions.HTTPNotFound: - LOG.debug("All VLUNs removed from host %s", hostname) - pass - - for vlun in vluns: - if volume_name not in vlun['volumeName']: - # Found another volume - break - else: - # We deleted the last vlun, so try to delete the host too. - # This check avoids the old unnecessary try/fail when vluns exist - # but adds a minor race condition if a vlun is manually deleted - # externally at precisely the wrong time. Worst case is leftover - # host, so it is worth the unlikely risk. - - try: - self._delete_3par_host(hostname) - except Exception as ex: - # Any exception down here is only logged. The vlun is deleted. - - # If the host is in a host set, the delete host will fail and - # the host will remain in the host set. This is desired - # because cinder was not responsible for the host set - # assignment. The host set could be used outside of cinder - # for future needs (e.g. export volume to host set). - - # The log info explains why the host was left alone. - LOG.info("3PAR vlun for volume '%(name)s' was deleted, " - "but the host '%(host)s' was not deleted " - "because: %(reason)s", - {'name': volume_name, 'host': hostname, - 'reason': ex.get_description()}) - - def _get_volume_type(self, type_id): - ctxt = context.get_admin_context() - return volume_types.get_volume_type(ctxt, type_id) - - def _get_key_value(self, hpe3par_keys, key, default=None): - if hpe3par_keys is not None and key in hpe3par_keys: - return hpe3par_keys[key] - else: - return default - - def _get_qos_value(self, qos, key, default=None): - if key in qos: - return qos[key] - else: - return default - - def _get_qos_by_volume_type(self, volume_type): - qos = {} - qos_specs_id = volume_type.get('qos_specs_id') - specs = volume_type.get('extra_specs') - - # NOTE(kmartin): We prefer the qos_specs association - # and override any existing extra-specs settings - # if present. - if qos_specs_id is not None: - kvs = qos_specs.get_qos_specs(context.get_admin_context(), - qos_specs_id)['specs'] - else: - kvs = specs - - for key, value in kvs.items(): - if 'qos:' in key: - fields = key.split(':') - key = fields[1] - if key in self.hpe_qos_keys: - qos[key] = value - return qos - - def _get_keys_by_volume_type(self, volume_type): - hpe3par_keys = {} - specs = volume_type.get('extra_specs') - for key, value in specs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - if key in self.hpe3par_valid_keys: - hpe3par_keys[key] = value - return hpe3par_keys - - def _set_qos_rule(self, qos, vvs_name): - min_io = self._get_qos_value(qos, 'minIOPS') - max_io = self._get_qos_value(qos, 'maxIOPS') - min_bw = self._get_qos_value(qos, 'minBWS') - max_bw = self._get_qos_value(qos, 'maxBWS') - latency = self._get_qos_value(qos, 'latency') - priority = self._get_qos_value(qos, 'priority', 'normal') - - qosRule = {} - if min_io: - qosRule['ioMinGoal'] = int(min_io) - if max_io is None: - qosRule['ioMaxLimit'] = int(min_io) - if max_io: - qosRule['ioMaxLimit'] = int(max_io) - if min_io is None: - qosRule['ioMinGoal'] = int(max_io) - if min_bw: - qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki - if max_bw is None: - qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki - if max_bw: - qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki - if min_bw is None: - qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki - if latency: - qosRule['latencyGoal'] = int(latency) - if priority: - qosRule['priority'] = self.qos_priority_level.get(priority.lower()) - - try: - self.client.createQoSRules(vvs_name, qosRule) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error creating QOS rule %s", qosRule) - - def get_flash_cache_policy(self, hpe3par_keys): - if hpe3par_keys is not None: - # First check list of extra spec keys - val = self._get_key_value(hpe3par_keys, 'flash_cache', None) - if val is not None: - # If requested, see if supported on back end - if self.API_VERSION < FLASH_CACHE_API_VERSION: - err = (_("Flash Cache Policy requires " - "WSAPI version '%(fcache_version)s' " - "version '%(version)s' is installed.") % - {'fcache_version': FLASH_CACHE_API_VERSION, - 'version': self.API_VERSION}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - else: - if val.lower() == 'true': - return self.client.FLASH_CACHE_ENABLED - else: - return self.client.FLASH_CACHE_DISABLED - - return None - - def get_compression_policy(self, hpe3par_keys): - if hpe3par_keys is not None: - # here it should return true/false/None - val = self._get_key_value(hpe3par_keys, 'compression', None) - compression_support = False - if val is not None: - info = self.client.getStorageSystemInfo() - if 'licenseInfo' in info: - if 'licenses' in info['licenseInfo']: - valid_licenses = info['licenseInfo']['licenses'] - compression_support = self._check_license_enabled( - valid_licenses, self.COMPRESSION_LIC, - "Compression") - # here check the wsapi version - if self.API_VERSION < COMPRESSION_API_VERSION: - err = (_("Compression Policy requires " - "WSAPI version '%(compression_version)s' " - "version '%(version)s' is installed.") % - {'compression_version': COMPRESSION_API_VERSION, - 'version': self.API_VERSION}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - else: - if val.lower() == 'true': - if not compression_support: - msg = _('Compression is not supported on ' - 'underlying hardware') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - return True - else: - return False - return None - - def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name): - # Update virtual volume set - if flash_cache: - try: - self.client.modifyVolumeSet(vvs_name, - flashCachePolicy=flash_cache) - LOG.info("Flash Cache policy set to %s", flash_cache) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error setting Flash Cache policy " - "to %s - exception", flash_cache) - - def _add_volume_to_volume_set(self, volume, volume_name, - cpg, vvs_name, qos, flash_cache): - if vvs_name is not None: - # Admin has set a volume set name to add the volume to - try: - self.client.addVolumeToVolumeSet(vvs_name, volume_name) - except hpeexceptions.HTTPNotFound: - msg = _('VV Set %s does not exist.') % vvs_name - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - else: - vvs_name = self._get_3par_vvs_name(volume['id']) - domain = self.get_domain(cpg) - self.client.createVolumeSet(vvs_name, domain) - try: - self._set_qos_rule(qos, vvs_name) - self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name) - self.client.addVolumeToVolumeSet(vvs_name, volume_name) - except Exception as ex: - # Cleanup the volume set if unable to create the qos rule - # or flash cache policy or add the volume to the volume set - self.client.deleteVolumeSet(vvs_name) - raise exception.CinderException(ex) - - def get_cpg(self, volume, allowSnap=False): - volume_name = self._get_3par_vol_name(volume['id']) - vol = self.client.getVolume(volume_name) - # Search for 'userCPG' in the get volume REST API, - # if found return userCPG , else search for snapCPG attribute - # when allowSnap=True. For the cases where 3PAR REST call for - # get volume doesn't have either userCPG or snapCPG , - # take the default value of cpg from 'host' attribute from volume param - LOG.debug("get volume response is: %s", vol) - if 'userCPG' in vol: - return vol['userCPG'] - elif allowSnap and 'snapCPG' in vol: - return vol['snapCPG'] - else: - return volume_utils.extract_host(volume['host'], 'pool') - - def _get_3par_vol_comment(self, volume_name): - vol = self.client.getVolume(volume_name) - if 'comment' in vol: - return vol['comment'] - return None - - def validate_persona(self, persona_value): - """Validate persona value. - - If the passed in persona_value is not valid, raise InvalidInput, - otherwise return the persona ID. - - :param persona_value: - :raises exception.InvalidInput: - :returns: persona ID - """ - if persona_value not in self.valid_persona_values: - err = (_("Must specify a valid persona %(valid)s," - "value '%(persona)s' is invalid.") % - {'valid': self.valid_persona_values, - 'persona': persona_value}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - # persona is set by the id so remove the text and return the id - # i.e for persona '1 - Generic' returns 1 - persona_id = persona_value.split(' ') - return persona_id[0] - - def get_persona_type(self, volume, hpe3par_keys=None): - default_persona = self.valid_persona_values[0] - type_id = volume.get('volume_type_id', None) - if type_id is not None: - volume_type = self._get_volume_type(type_id) - if hpe3par_keys is None: - hpe3par_keys = self._get_keys_by_volume_type(volume_type) - persona_value = self._get_key_value(hpe3par_keys, 'persona', - default_persona) - return self.validate_persona(persona_value) - - def get_type_info(self, type_id): - """Get 3PAR type info for the given type_id. - - Reconciles VV Set, old-style extra-specs, and QOS specs - and returns commonly used info about the type. - - :returns: hpe3par_keys, qos, volume_type, vvs_name - """ - volume_type = None - vvs_name = None - hpe3par_keys = {} - qos = {} - if type_id is not None: - volume_type = self._get_volume_type(type_id) - hpe3par_keys = self._get_keys_by_volume_type(volume_type) - vvs_name = self._get_key_value(hpe3par_keys, 'vvs') - if vvs_name is None: - qos = self._get_qos_by_volume_type(volume_type) - return hpe3par_keys, qos, volume_type, vvs_name - - def get_volume_settings_from_type_id(self, type_id, pool): - """Get 3PAR volume settings given a type_id. - - Combines type info and config settings to return a dictionary - describing the 3PAR volume settings. Does some validation (CPG). - Uses pool as the default cpg (when not specified in volume type specs). - - :param type_id: id of type to get settings for - :param pool: CPG to use if type does not have one set - :returns: dict - """ - - hpe3par_keys, qos, volume_type, vvs_name = self.get_type_info(type_id) - - # Default to pool extracted from host. - # If that doesn't work use the 1st CPG in the config as the default. - default_cpg = pool or self._client_conf['hpe3par_cpg'][0] - - cpg = self._get_key_value(hpe3par_keys, 'cpg', default_cpg) - if cpg is not default_cpg: - # The cpg was specified in a volume type extra spec so it - # needs to be validated that it's in the correct domain. - # log warning here - msg = ("'hpe3par:cpg' is not supported as an extra spec " - "in a volume type. CPG's are chosen by " - "the cinder scheduler, as a pool, from the " - "cinder.conf entry 'hpe3par_cpg', which can " - "be a list of CPGs.") - versionutils.report_deprecated_feature(LOG, msg) - LOG.info("Using pool %(pool)s instead of %(cpg)s", - {'pool': pool, 'cpg': cpg}) - - cpg = pool - self.validate_cpg(cpg) - # Look to see if the snap_cpg was specified in volume type - # extra spec, if not use hpe3par_cpg_snap from config as the - # default. - snap_cpg = self.config.hpe3par_cpg_snap - snap_cpg = self._get_key_value(hpe3par_keys, 'snap_cpg', snap_cpg) - # If it's still not set or empty then set it to the cpg. - if not snap_cpg: - snap_cpg = cpg - - # if provisioning is not set use thin - default_prov = self.valid_prov_values[0] - prov_value = self._get_key_value(hpe3par_keys, 'provisioning', - default_prov) - # check for valid provisioning type - if prov_value not in self.valid_prov_values: - err = (_("Must specify a valid provisioning type %(valid)s, " - "value '%(prov)s' is invalid.") % - {'valid': self.valid_prov_values, - 'prov': prov_value}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - tpvv = True - tdvv = False - if prov_value == "full": - tpvv = False - elif prov_value == "dedup": - tpvv = False - tdvv = True - - if tdvv and (self.API_VERSION < DEDUP_API_VERSION): - err = (_("Dedup is a valid provisioning type, " - "but requires WSAPI version '%(dedup_version)s' " - "version '%(version)s' is installed.") % - {'dedup_version': DEDUP_API_VERSION, - 'version': self.API_VERSION}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - return {'hpe3par_keys': hpe3par_keys, - 'cpg': cpg, 'snap_cpg': snap_cpg, - 'vvs_name': vvs_name, 'qos': qos, - 'tpvv': tpvv, 'tdvv': tdvv, 'volume_type': volume_type} - - def get_volume_settings_from_type(self, volume, host=None): - """Get 3PAR volume settings given a volume. - - Combines type info and config settings to return a dictionary - describing the 3PAR volume settings. Does some validation (CPG and - persona). - - :param volume: - :param host: Optional host to use for default pool. - :returns: dict - """ - - type_id = volume.get('volume_type_id', None) - - pool = None - if host: - pool = volume_utils.extract_host(host['host'], 'pool') - else: - pool = volume_utils.extract_host(volume['host'], 'pool') - - volume_settings = self.get_volume_settings_from_type_id(type_id, pool) - - # check for valid persona even if we don't use it until - # attach time, this will give the end user notice that the - # persona type is invalid at volume creation time - self.get_persona_type(volume, volume_settings['hpe3par_keys']) - - return volume_settings - - def create_volume(self, volume): - LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on ' - '%(host)s)', - {'disp_name': volume['display_name'], - 'vol_name': volume['name'], - 'id': self._get_3par_vol_name(volume['id']), - 'host': volume['host']}) - try: - comments = {'volume_id': volume['id'], - 'name': volume['name'], - 'type': 'OpenStack'} - - name = volume.get('display_name', None) - if name: - comments['display_name'] = name - - # get the options supported by volume types - type_info = self.get_volume_settings_from_type(volume) - volume_type = type_info['volume_type'] - vvs_name = type_info['vvs_name'] - qos = type_info['qos'] - cpg = type_info['cpg'] - snap_cpg = type_info['snap_cpg'] - tpvv = type_info['tpvv'] - tdvv = type_info['tdvv'] - flash_cache = self.get_flash_cache_policy( - type_info['hpe3par_keys']) - compression = self.get_compression_policy( - type_info['hpe3par_keys']) - - consis_group_snap_type = False - if volume_type is not None: - extra_specs = volume_type.get('extra_specs', None) - if extra_specs: - gsnap_val = extra_specs.get( - 'consistent_group_snapshot_enabled', None) - if gsnap_val is not None and gsnap_val == " True": - consis_group_snap_type = True - - cg_id = volume.get('group_id', None) - if cg_id and consis_group_snap_type: - vvs_name = self._get_3par_vvs_name(cg_id) - - type_id = volume.get('volume_type_id', None) - if type_id is not None: - comments['volume_type_name'] = volume_type.get('name') - comments['volume_type_id'] = type_id - if vvs_name is not None: - comments['vvs'] = vvs_name - else: - comments['qos'] = qos - - extras = {'comment': json.dumps(comments), - 'snapCPG': snap_cpg, - 'tpvv': tpvv} - - # Only set the dedup option if the backend supports it. - if self.API_VERSION >= DEDUP_API_VERSION: - extras['tdvv'] = tdvv - - capacity = self._capacity_from_size(volume['size']) - volume_name = self._get_3par_vol_name(volume['id']) - - if compression is not None: - extras['compression'] = compression - - self.client.createVolume(volume_name, cpg, capacity, extras) - if qos or vvs_name or flash_cache is not None: - try: - self._add_volume_to_volume_set(volume, volume_name, - cpg, vvs_name, qos, - flash_cache) - except exception.InvalidInput as ex: - # Delete the volume if unable to add it to the volume set - self.client.deleteVolume(volume_name) - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - # v2 replication check - replication_flag = False - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume)): - replication_flag = True - - except hpeexceptions.HTTPConflict: - msg = _("Volume (%s) already exists on array") % volume_name - LOG.error(msg) - raise exception.Duplicate(msg) - except hpeexceptions.HTTPBadRequest as ex: - LOG.error("Exception: %s", ex) - raise exception.Invalid(ex.get_description()) - except exception.InvalidInput as ex: - LOG.error("Exception: %s", ex) - raise - except exception.CinderException as ex: - LOG.error("Exception: %s", ex) - raise - except Exception as ex: - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - return self._get_model_update(volume['host'], cpg, - replication=replication_flag, - provider_location=self.client.id) - - def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None, - tpvv=True, tdvv=False, compression=None): - # Virtual volume sets are not supported with the -online option - LOG.debug('Creating clone of a volume %(src)s to %(dest)s.', - {'src': src_name, 'dest': dest_name}) - - optional = {'tpvv': tpvv, 'online': True} - if snap_cpg is not None: - optional['snapCPG'] = snap_cpg - - if self.API_VERSION >= DEDUP_API_VERSION: - optional['tdvv'] = tdvv - - if (compression is not None and - self.API_VERSION >= COMPRESSION_API_VERSION): - optional['compression'] = compression - - body = self.client.copyVolume(src_name, dest_name, cpg, optional) - return body['taskid'] - - def get_next_word(self, s, search_string): - """Return the next word. - - Search 's' for 'search_string', if found return the word preceding - 'search_string' from 's'. - """ - word = re.search(search_string.strip(' ') + ' ([^ ]*)', s) - return word.groups()[0].strip(' ') - - def _get_3par_vol_comment_value(self, vol_comment, key): - comment_dict = dict(ast.literal_eval(vol_comment)) - if key in comment_dict: - return comment_dict[key] - return None - - def _get_model_update(self, volume_host, cpg, replication=False, - provider_location=None): - """Get model_update dict to use when we select a pool. - - The pools implementation uses a volume['host'] suffix of :poolname. - When the volume comes in with this selected pool, we sometimes use - a different pool (e.g. because the type says to use a different pool). - So in the several places that we do this, we need to return a model - update so that the volume will have the actual pool name in the host - suffix after the operation. - - Given a volume_host, which should (might) have the pool suffix, and - given the CPG we actually chose to use, return a dict to use for a - model update iff an update is needed. - - :param volume_host: The volume's host string. - :param cpg: The actual pool (cpg) used, for example from the type. - :returns: dict Model update if we need to update volume host, else None - """ - model_update = {} - host = volume_utils.extract_host(volume_host, 'backend') - host_and_pool = volume_utils.append_host(host, cpg) - if volume_host != host_and_pool: - # Since we selected a pool based on type, update the model. - model_update['host'] = host_and_pool - if replication: - model_update['replication_status'] = 'enabled' - if replication and provider_location: - model_update['provider_location'] = provider_location - if not model_update: - model_update = None - return model_update - - def _create_temp_snapshot(self, volume): - """This creates a temporary snapshot of a volume. - - This is used by cloning a volume so that we can then - issue extend volume against the original volume. - """ - vol_name = self._get_3par_vol_name(volume['id']) - # create a brand new uuid for the temp snap - snap_uuid = uuid.uuid4().hex - - # this will be named tss-%s - snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True) - - extra = {'volume_name': volume['name'], - 'volume_id': volume['id']} - - optional = {'comment': json.dumps(extra)} - - # let the snapshot die in an hour - optional['expirationHours'] = 1 - - LOG.info("Creating temp snapshot %(snap)s from volume %(vol)s", - {'snap': snap_name, 'vol': vol_name}) - - self.client.createSnapshot(snap_name, vol_name, optional) - return self.client.getVolume(snap_name) - - def create_cloned_volume(self, volume, src_vref): - try: - vol_name = self._get_3par_vol_name(volume['id']) - src_vol_name = self._get_3par_vol_name(src_vref['id']) - back_up_process = False - vol_chap_enabled = False - - # Check whether a volume is ISCSI and CHAP enabled on it. - if self._client_conf['hpe3par_iscsi_chap_enabled']: - try: - vol_chap_enabled = self.client.getVolumeMetaData( - src_vol_name, 'HPQ-cinder-CHAP-name')['value'] - except hpeexceptions.HTTPNotFound: - LOG.debug("CHAP is not enabled on volume %(vol)s ", - {'vol': src_vref['id']}) - vol_chap_enabled = False - - # Check whether a process is a backup - if str(src_vref['status']) == 'backing-up': - back_up_process = True - - # if the sizes of the 2 volumes are the same and except backup - # process for ISCSI volume with chap enabled on it. - # we can do an online copy, which is a background process - # on the 3PAR that makes the volume instantly available. - # We can't resize a volume, while it's being copied. - if volume['size'] == src_vref['size'] and not ( - back_up_process and vol_chap_enabled): - LOG.debug("Creating a clone of volume, using online copy.") - - type_info = self.get_volume_settings_from_type(volume) - cpg = type_info['cpg'] - qos = type_info['qos'] - vvs_name = type_info['vvs_name'] - flash_cache = self.get_flash_cache_policy( - type_info['hpe3par_keys']) - - compression_val = self.get_compression_policy( - type_info['hpe3par_keys']) - # make the 3PAR copy the contents. - # can't delete the original until the copy is done. - self._copy_volume(src_vol_name, vol_name, cpg=cpg, - snap_cpg=type_info['snap_cpg'], - tpvv=type_info['tpvv'], - tdvv=type_info['tdvv'], - compression=compression_val) - - if qos or vvs_name or flash_cache is not None: - try: - self._add_volume_to_volume_set( - volume, vol_name, cpg, vvs_name, qos, flash_cache) - except exception.InvalidInput as ex: - # Delete volume if unable to add it to the volume set - self.client.deleteVolume(vol_name) - dbg = {'volume': vol_name, - 'vvs_name': vvs_name, - 'err': six.text_type(ex)} - msg = _("Failed to add volume '%(volume)s' to vvset " - "'%(vvs_name)s' because '%(err)s'") % dbg - LOG.error(msg) - raise exception.CinderException(msg) - - # v2 replication check - replication_flag = False - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume)): - replication_flag = True - - return self._get_model_update(volume['host'], cpg, - replication=replication_flag, - provider_location=self.client.id) - else: - # The size of the new volume is different, so we have to - # copy the volume and wait. Do the resize after the copy - # is complete. - LOG.debug("Creating a clone of volume, using non-online copy.") - - # we first have to create the destination volume - model_update = self.create_volume(volume) - - optional = {'priority': 1} - body = self.client.copyVolume(src_vol_name, vol_name, None, - optional=optional) - task_id = body['taskid'] - - task_status = self._wait_for_task_completion(task_id) - if task_status['status'] is not self.client.TASK_DONE: - dbg = {'status': task_status, 'id': volume['id']} - msg = _('Copy volume task failed: create_cloned_volume ' - 'id=%(id)s, status=%(status)s.') % dbg - raise exception.CinderException(msg) - else: - LOG.debug('Copy volume completed: create_cloned_volume: ' - 'id=%s.', volume['id']) - - return model_update - - except hpeexceptions.HTTPForbidden: - raise exception.NotAuthorized() - except hpeexceptions.HTTPNotFound: - raise exception.NotFound() - except Exception as ex: - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - def delete_volume(self, volume): - # v2 replication check - # If the volume type is replication enabled, we want to call our own - # method of deconstructing the volume and its dependencies - if self._volume_of_replicated_type(volume): - replication_status = volume.get('replication_status', None) - if replication_status and replication_status == "failed-over": - self._delete_replicated_failed_over_volume(volume) - else: - self._do_volume_replication_destroy(volume) - return - - try: - volume_name = self._get_3par_vol_name(volume['id']) - # Try and delete the volume, it might fail here because - # the volume is part of a volume set which will have the - # volume set name in the error. - try: - self.client.deleteVolume(volume_name) - except hpeexceptions.HTTPBadRequest as ex: - if ex.get_code() == 29: - if self.client.isOnlinePhysicalCopy(volume_name): - LOG.debug("Found an online copy for %(volume)s", - {'volume': volume_name}) - # the volume is in process of being cloned. - # stopOnlinePhysicalCopy will also delete - # the volume once it stops the copy. - self.client.stopOnlinePhysicalCopy(volume_name) - else: - LOG.error("Exception: %s", ex) - raise - else: - LOG.error("Exception: %s", ex) - raise - except hpeexceptions.HTTPConflict as ex: - if ex.get_code() == 34: - # This is a special case which means the - # volume is part of a volume set. - vvset_name = self.client.findVolumeSet(volume_name) - LOG.debug("Returned vvset_name = %s", vvset_name) - if vvset_name is not None and \ - vvset_name.startswith('vvs-'): - # We have a single volume per volume set, so - # remove the volume set. - self.client.deleteVolumeSet( - self._get_3par_vvs_name(volume['id'])) - elif vvset_name is not None: - # We have a pre-defined volume set just remove the - # volume and leave the volume set. - self.client.removeVolumeFromVolumeSet(vvset_name, - volume_name) - self.client.deleteVolume(volume_name) - elif ex.get_code() == 151: - if self.client.isOnlinePhysicalCopy(volume_name): - LOG.debug("Found an online copy for %(volume)s", - {'volume': volume_name}) - # the volume is in process of being cloned. - # stopOnlinePhysicalCopy will also delete - # the volume once it stops the copy. - self.client.stopOnlinePhysicalCopy(volume_name) - else: - # the volume is being operated on in a background - # task on the 3PAR. - # TODO(walter-boring) do a retry a few times. - # for now lets log a better message - msg = _("The volume is currently busy on the 3PAR" - " and cannot be deleted at this time. " - "You can try again later.") - LOG.error(msg) - raise exception.VolumeIsBusy(message=msg) - elif (ex.get_code() == 32): - # Error 32 means that the volume has children - - # see if we have any temp snapshots - snaps = self.client.getVolumeSnapshots(volume_name) - for snap in snaps: - if snap.startswith('tss-'): - # looks like we found a temp snapshot. - LOG.info( - "Found a temporary snapshot %(name)s", - {'name': snap}) - try: - self.client.deleteVolume(snap) - except hpeexceptions.HTTPNotFound: - # if the volume is gone, it's as good as a - # successful delete - pass - except Exception: - msg = _("Volume has a temporary snapshot that " - "can't be deleted at this time.") - raise exception.VolumeIsBusy(message=msg) - - try: - self.delete_volume(volume) - except Exception: - msg = _("Volume has children and cannot be deleted!") - raise exception.VolumeIsBusy(message=msg) - else: - LOG.error("Exception: %s", ex) - raise exception.VolumeIsBusy(message=ex.get_description()) - - except hpeexceptions.HTTPNotFound as ex: - # We'll let this act as if it worked - # it helps clean up the cinder entries. - LOG.warning("Delete volume id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s", - {'id': volume['id'], 'msg': ex}) - except hpeexceptions.HTTPForbidden as ex: - LOG.error("Exception: %s", ex) - raise exception.NotAuthorized(ex.get_description()) - except hpeexceptions.HTTPConflict as ex: - LOG.error("Exception: %s", ex) - raise exception.VolumeIsBusy(message=ex.get_description()) - except Exception as ex: - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - def create_volume_from_snapshot(self, volume, snapshot, snap_name=None, - vvs_name=None): - """Creates a volume from a snapshot.""" - LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s", - {'vol_name': pprint.pformat(volume['display_name']), - 'ss_name': pprint.pformat(snapshot['display_name'])}) - - model_update = {} - if volume['size'] < snapshot['volume_size']: - err = ("You cannot reduce size of the volume. It must " - "be greater than or equal to the snapshot.") - LOG.error(err) - raise exception.InvalidInput(reason=err) - - try: - if not snap_name: - snap_name = self._get_3par_snap_name(snapshot['id']) - volume_name = self._get_3par_vol_name(volume['id']) - - extra = {'volume_id': volume['id'], - 'snapshot_id': snapshot['id']} - - type_id = volume.get('volume_type_id', None) - - hpe3par_keys, qos, _volume_type, vvs = self.get_type_info( - type_id) - if vvs: - vvs_name = vvs - - name = volume.get('display_name', None) - if name: - extra['display_name'] = name - - description = volume.get('display_description', None) - if description: - extra['description'] = description - - optional = {'comment': json.dumps(extra), - 'readOnly': False} - - self.client.createSnapshot(volume_name, snap_name, optional) - - # Convert snapshot volume to base volume type - LOG.debug('Converting to base volume type: %s.', - volume['id']) - model_update = self._convert_to_base_volume(volume) - - # Grow the snapshot if needed - growth_size = volume['size'] - snapshot['volume_size'] - if growth_size > 0: - try: - growth_size_mib = growth_size * units.Gi / units.Mi - LOG.debug('Growing volume: %(id)s by %(size)s GiB.', - {'id': volume['id'], 'size': growth_size}) - self.client.growVolume(volume_name, growth_size_mib) - except Exception as ex: - LOG.error("Error extending volume %(id)s. " - "Ex: %(ex)s", - {'id': volume['id'], 'ex': ex}) - # Delete the volume if unable to grow it - self.client.deleteVolume(volume_name) - raise exception.CinderException(ex) - - # Check for flash cache setting in extra specs - flash_cache = self.get_flash_cache_policy(hpe3par_keys) - - if qos or vvs_name or flash_cache is not None: - cpg_names = self._get_key_value( - hpe3par_keys, 'cpg', self._client_conf['hpe3par_cpg']) - try: - self._add_volume_to_volume_set(volume, volume_name, - cpg_names[0], vvs_name, - qos, flash_cache) - except Exception as ex: - # Delete the volume if unable to add it to the volume set - self.client.deleteVolume(volume_name) - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - # v2 replication check - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume)): - model_update['replication_status'] = 'enabled' - model_update['provider_location'] = self.client.id - - except hpeexceptions.HTTPForbidden as ex: - LOG.error("Exception: %s", ex) - raise exception.NotAuthorized() - except hpeexceptions.HTTPNotFound as ex: - LOG.error("Exception: %s", ex) - raise exception.NotFound() - except Exception as ex: - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - return model_update - - def create_snapshot(self, snapshot): - LOG.debug("Create Snapshot\n%s", pprint.pformat(snapshot)) - - try: - snap_name = self._get_3par_snap_name(snapshot['id']) - vol_name = self._get_3par_vol_name(snapshot['volume_id']) - - extra = {'volume_name': snapshot['volume_name']} - vol_id = snapshot.get('volume_id', None) - if vol_id: - extra['volume_id'] = vol_id - - try: - extra['display_name'] = snapshot['display_name'] - except AttributeError: - pass - - try: - extra['description'] = snapshot['display_description'] - except AttributeError: - pass - - optional = {'comment': json.dumps(extra), - 'readOnly': True} - if self.config.hpe3par_snapshot_expiration: - optional['expirationHours'] = ( - int(self.config.hpe3par_snapshot_expiration)) - - if self.config.hpe3par_snapshot_retention: - optional['retentionHours'] = ( - int(self.config.hpe3par_snapshot_retention)) - - self.client.createSnapshot(snap_name, vol_name, optional) - except hpeexceptions.HTTPForbidden as ex: - LOG.error("Exception: %s", ex) - raise exception.NotAuthorized() - except hpeexceptions.HTTPNotFound as ex: - LOG.error("Exception: %s", ex) - raise exception.NotFound() - - def migrate_volume(self, volume, host): - """Migrate directly if source and dest are managed by same storage. - - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - :returns: (False, None) if the driver does not support migration, - (True, model_update) if successful - - """ - - dbg = {'id': volume['id'], - 'host': host['host'], - 'status': volume['status']} - LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, ' - 'status=%(status)s.', dbg) - ret = False, None - - if volume['status'] in ['available', 'in-use']: - volume_type = None - if volume['volume_type_id']: - volume_type = self._get_volume_type(volume['volume_type_id']) - - try: - ret = self.retype(volume, volume_type, None, host) - except Exception as e: - LOG.info('3PAR driver cannot perform migration. ' - 'Retype exception: %s', e) - - LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, ' - 'status=%(status)s.', dbg) - dbg_ret = {'supported': ret[0], 'model_update': ret[1]} - LOG.debug('migrate_volume result: %(supported)s, %(model_update)s', - dbg_ret) - return ret - - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status): - """Rename the new (temp) volume to it's original name. - - - This method tries to rename the new volume to it's original - name after the migration has completed. - - """ - LOG.debug("Update volume name for %(id)s", {'id': new_volume['id']}) - name_id = None - provider_location = None - if original_volume_status == 'available': - # volume isn't attached and can be updated - original_name = self._get_3par_vol_name(volume['id']) - temp_name = self._get_3par_vol_name(volume['id'], temp_vol=True) - current_name = self._get_3par_vol_name(new_volume['id']) - try: - volumeMods = {'newName': original_name} - volumeTempMods = {'newName': temp_name} - volumeCurrentMods = {'newName': current_name} - # swap volume name in backend - self.client.modifyVolume(original_name, volumeTempMods) - self.client.modifyVolume(current_name, volumeMods) - self.client.modifyVolume(temp_name, volumeCurrentMods) - LOG.info("Volume name changed from %(tmp)s to %(orig)s", - {'tmp': current_name, 'orig': original_name}) - except Exception as e: - LOG.error("Changing the volume name from %(tmp)s to " - "%(orig)s failed because %(reason)s", - {'tmp': current_name, 'orig': original_name, - 'reason': e}) - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - else: - # the backend can't change the name. - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - - return {'_name_id': name_id, 'provider_location': provider_location} - - def _wait_for_task_completion(self, task_id): - """This waits for a 3PAR background task complete or fail. - - This looks for a task to get out of the 'active' state. - """ - # Wait for the physical copy task to complete - def _wait_for_task(task_id): - status = self.client.getTask(task_id) - LOG.debug("3PAR Task id %(id)s status = %(status)s", - {'id': task_id, - 'status': status['status']}) - if status['status'] is not self.client.TASK_ACTIVE: - self._task_status = status - raise loopingcall.LoopingCallDone() - - self._task_status = None - timer = loopingcall.FixedIntervalLoopingCall( - _wait_for_task, task_id) - timer.start(interval=1).wait() - - return self._task_status - - def _convert_to_base_volume(self, volume, new_cpg=None): - try: - type_info = self.get_volume_settings_from_type(volume) - if new_cpg: - cpg = new_cpg - else: - cpg = type_info['cpg'] - - # Change the name such that it is unique since 3PAR - # names must be unique across all CPGs - volume_name = self._get_3par_vol_name(volume['id']) - temp_vol_name = volume_name.replace("osv-", "omv-") - - compression = self.get_compression_policy( - type_info['hpe3par_keys']) - # Create a physical copy of the volume - task_id = self._copy_volume(volume_name, temp_vol_name, - cpg, cpg, type_info['tpvv'], - type_info['tdvv'], - compression) - - LOG.debug('Copy volume scheduled: convert_to_base_volume: ' - 'id=%s.', volume['id']) - - task_status = self._wait_for_task_completion(task_id) - - if task_status['status'] is not self.client.TASK_DONE: - dbg = {'status': task_status, 'id': volume['id']} - msg = _('Copy volume task failed: convert_to_base_volume: ' - 'id=%(id)s, status=%(status)s.') % dbg - raise exception.CinderException(msg) - else: - LOG.debug('Copy volume completed: convert_to_base_volume: ' - 'id=%s.', volume['id']) - - comment = self._get_3par_vol_comment(volume_name) - if comment: - self.client.modifyVolume(temp_vol_name, {'comment': comment}) - LOG.debug('Volume rename completed: convert_to_base_volume: ' - 'id=%s.', volume['id']) - - # Delete source volume after the copy is complete - self.client.deleteVolume(volume_name) - LOG.debug('Delete src volume completed: convert_to_base_volume: ' - 'id=%s.', volume['id']) - - # Rename the new volume to the original name - self.client.modifyVolume(temp_vol_name, {'newName': volume_name}) - - LOG.info('Completed: convert_to_base_volume: ' - 'id=%s.', volume['id']) - except hpeexceptions.HTTPConflict: - msg = _("Volume (%s) already exists on array.") % volume_name - LOG.error(msg) - raise exception.Duplicate(msg) - except hpeexceptions.HTTPBadRequest as ex: - LOG.error("Exception: %s", ex) - raise exception.Invalid(ex.get_description()) - except exception.CinderException as ex: - LOG.error("Exception: %s", ex) - raise - except Exception as ex: - LOG.error("Exception: %s", ex) - raise exception.CinderException(ex) - - return self._get_model_update(volume['host'], cpg) - - def delete_snapshot(self, snapshot): - LOG.debug("Delete Snapshot id %(id)s %(name)s", - {'id': snapshot['id'], 'name': pprint.pformat(snapshot)}) - - try: - snap_name = self._get_3par_snap_name(snapshot['id']) - self.client.deleteVolume(snap_name) - except hpeexceptions.HTTPForbidden as ex: - LOG.error("Exception: %s", ex) - raise exception.NotAuthorized() - except hpeexceptions.HTTPNotFound as ex: - # We'll let this act as if it worked - # it helps clean up the cinder entries. - LOG.warning("Delete Snapshot id not found. Removing from " - "cinder: %(id)s Ex: %(msg)s", - {'id': snapshot['id'], 'msg': ex}) - except hpeexceptions.HTTPConflict as ex: - if (ex.get_code() == 32): - # Error 32 means that the snapshot has children - # see if we have any temp snapshots - snaps = self.client.getVolumeSnapshots(snap_name) - for snap in snaps: - if snap.startswith('tss-'): - LOG.info( - "Found a temporary snapshot %(name)s", - {'name': snap}) - try: - self.client.deleteVolume(snap) - except hpeexceptions.HTTPNotFound: - # if the volume is gone, it's as good as a - # successful delete - pass - except Exception: - msg = _("Snapshot has a temporary snapshot that " - "can't be deleted at this time.") - raise exception.SnapshotIsBusy(message=msg) - - try: - self.client.deleteVolume(snap_name) - except Exception: - msg = _("Snapshot has children and cannot be deleted!") - raise exception.SnapshotIsBusy(message=msg) - else: - LOG.error("Exception: %s", ex) - raise exception.SnapshotIsBusy(message=ex.get_description()) - - def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): - if wwns is not None and not isinstance(wwns, list): - wwns = [wwns] - if iqns is not None and not isinstance(iqns, list): - iqns = [iqns] - - out = self.client.getHosts() - hosts = out['members'] - for host in hosts: - if 'iSCSIPaths' in host and iqns is not None: - iscsi_paths = host['iSCSIPaths'] - for iscsi in iscsi_paths: - for iqn in iqns: - if iqn == iscsi['name']: - return host['name'] - - if 'FCPaths' in host and wwns is not None: - fc_paths = host['FCPaths'] - for fc in fc_paths: - for wwn in wwns: - if wwn.upper() == fc['wwn'].upper(): - return host['name'] - - def terminate_connection(self, volume, hostname, wwn=None, iqn=None): - """Driver entry point to unattach a volume from an instance.""" - # does 3par know this host by a different name? - hosts = None - if wwn: - hosts = self.client.queryHost(wwns=wwn) - elif iqn: - hosts = self.client.queryHost(iqns=[iqn]) - - if hosts and hosts['members'] and 'name' in hosts['members'][0]: - hostname = hosts['members'][0]['name'] - - try: - self.delete_vlun(volume, hostname) - return - except hpeexceptions.HTTPNotFound as e: - if 'host does not exist' in e.get_description(): - # If a host is failed-over, we want to allow the detach to - # 'succeed' when it cannot find the host. We can simply - # return out of the terminate connection in order for things - # to be updated correctly. - if self._active_backend_id: - LOG.warning("Because the host is currently in a " - "failed-over state, the volume will not " - "be properly detached from the primary " - "array. The detach will be considered a " - "success as far as Cinder is concerned. " - "The volume can now be attached to the " - "secondary target.") - return - else: - # use the wwn to see if we can find the hostname - hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn) - # no 3par host, re-throw - if hostname is None: - LOG.error("Exception: %s", e) - raise - else: - # not a 'host does not exist' HTTPNotFound exception, re-throw - LOG.error("Exception: %s", e) - raise - - # try again with name retrieved from 3par - self.delete_vlun(volume, hostname) - - def build_nsp(self, portPos): - return '%s:%s:%s' % (portPos['node'], - portPos['slot'], - portPos['cardPort']) - - def build_portPos(self, nsp): - split = nsp.split(":") - portPos = {} - portPos['node'] = int(split[0]) - portPos['slot'] = int(split[1]) - portPos['cardPort'] = int(split[2]) - return portPos - - def tune_vv(self, old_tpvv, new_tpvv, old_tdvv, new_tdvv, - old_cpg, new_cpg, volume_name, new_compression): - """Tune the volume to change the userCPG and/or provisioningType. - - The volume will be modified/tuned/converted to the new userCPG and - provisioningType, as needed. - - TaskWaiter is used to make this function wait until the 3PAR task - is no longer active. When the task is no longer active, then it must - either be done or it is in a state that we need to treat as an error. - """ - - compression = False - if new_compression is not None: - compression = new_compression - - if old_tpvv == new_tpvv and old_tdvv == new_tdvv: - if new_cpg != old_cpg: - LOG.info("Modifying %(volume_name)s userCPG " - "from %(old_cpg)s" - " to %(new_cpg)s", - {'volume_name': volume_name, - 'old_cpg': old_cpg, 'new_cpg': new_cpg}) - _response, body = self.client.modifyVolume( - volume_name, - {'action': 6, - 'tuneOperation': 1, - 'userCPG': new_cpg}) - task_id = body['taskid'] - status = self.TaskWaiter(self.client, task_id).wait_for_task() - if status['status'] is not self.client.TASK_DONE: - msg = (_('Tune volume task stopped before it was done: ' - 'volume_name=%(volume_name)s, ' - 'task-status=%(status)s.') % - {'status': status, 'volume_name': volume_name}) - raise exception.VolumeBackendAPIException(msg) - else: - if new_tpvv: - cop = self.CONVERT_TO_THIN - LOG.info("Converting %(volume_name)s to thin provisioning " - "with userCPG=%(new_cpg)s", - {'volume_name': volume_name, 'new_cpg': new_cpg}) - elif new_tdvv: - cop = self.CONVERT_TO_DEDUP - LOG.info("Converting %(volume_name)s to thin dedup " - "provisioning with userCPG=%(new_cpg)s", - {'volume_name': volume_name, 'new_cpg': new_cpg}) - else: - cop = self.CONVERT_TO_FULL - LOG.info("Converting %(volume_name)s to full provisioning " - "with userCPG=%(new_cpg)s", - {'volume_name': volume_name, 'new_cpg': new_cpg}) - - try: - if self.API_VERSION < COMPRESSION_API_VERSION: - response, body = self.client.modifyVolume( - volume_name, - {'action': 6, - 'tuneOperation': 1, - 'userCPG': new_cpg, - 'conversionOperation': cop}) - else: - response, body = self.client.modifyVolume( - volume_name, - {'action': 6, - 'tuneOperation': 1, - 'userCPG': new_cpg, - 'compression': compression, - 'conversionOperation': cop}) - except hpeexceptions.HTTPBadRequest as ex: - if ex.get_code() == 40 and "keepVV" in six.text_type(ex): - # Cannot retype with snapshots because we don't want to - # use keepVV and have straggling volumes. Log additional - # info and then raise. - LOG.info("tunevv failed because the volume '%s' " - "has snapshots.", volume_name) - raise - - task_id = body['taskid'] - status = self.TaskWaiter(self.client, task_id).wait_for_task() - if status['status'] is not self.client.TASK_DONE: - msg = (_('Tune volume task stopped before it was done: ' - 'volume_name=%(volume_name)s, ' - 'task-status=%(status)s.') % - {'status': status, 'volume_name': volume_name}) - raise exception.VolumeBackendAPIException(msg) - - def _retype_pre_checks(self, volume, host, new_persona, - old_cpg, new_cpg, - new_snap_cpg): - """Test retype parameters before making retype changes. - - Do pre-retype parameter validation. These checks will - raise an exception if we should not attempt this retype. - """ - - if new_persona: - self.validate_persona(new_persona) - - if host is not None: - (host_type, host_id, _host_cpg) = ( - host['capabilities']['location_info']).split(':') - - if not (host_type == 'HPE3PARDriver'): - reason = (_("Cannot retype from HPE3PARDriver to %s.") % - host_type) - raise exception.InvalidHost(reason) - - sys_info = self.client.getStorageSystemInfo() - if not (host_id == sys_info['serialNumber']): - reason = (_("Cannot retype from one 3PAR array to another.")) - raise exception.InvalidHost(reason) - - # Validate new_snap_cpg. A white-space snapCPG will fail eventually, - # but we'd prefer to fail fast -- if this ever happens. - if not new_snap_cpg or new_snap_cpg.isspace(): - reason = (_("Invalid new snapCPG name for retype. " - "new_snap_cpg='%s'.") % new_snap_cpg) - raise exception.InvalidInput(reason) - - # Check to make sure CPGs are in the same domain - domain = self.get_domain(old_cpg) - if domain != self.get_domain(new_cpg): - reason = (_('Cannot retype to a CPG in a different domain.')) - raise exception.Invalid3PARDomain(reason) - - if domain != self.get_domain(new_snap_cpg): - reason = (_('Cannot retype to a snap CPG in a different domain.')) - raise exception.Invalid3PARDomain(reason) - - def _retype(self, volume, volume_name, new_type_name, new_type_id, host, - new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, - old_tpvv, new_tpvv, old_tdvv, new_tdvv, - old_vvs, new_vvs, old_qos, new_qos, - old_flash_cache, new_flash_cache, - old_comment, new_compression): - - action = "volume:retype" - - self._retype_pre_checks(volume, host, new_persona, - old_cpg, new_cpg, - new_snap_cpg) - - flow_name = action.replace(":", "_") + "_api" - retype_flow = linear_flow.Flow(flow_name) - # Keep this linear and do the big tunevv last. Everything leading - # up to that is reversible, but we'd let the 3PAR deal with tunevv - # errors on its own. - retype_flow.add( - ModifyVolumeTask(action), - ModifySpecsTask(action), - TuneVolumeTask(action), - ReplicateVolumeTask(action)) - - taskflow.engines.run( - retype_flow, - store={'common': self, - 'volume_name': volume_name, 'volume': volume, - 'old_tpvv': old_tpvv, 'new_tpvv': new_tpvv, - 'old_tdvv': old_tdvv, 'new_tdvv': new_tdvv, - 'old_cpg': old_cpg, 'new_cpg': new_cpg, - 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg, - 'old_vvs': old_vvs, 'new_vvs': new_vvs, - 'old_qos': old_qos, 'new_qos': new_qos, - 'old_flash_cache': old_flash_cache, - 'new_flash_cache': new_flash_cache, - 'new_type_name': new_type_name, 'new_type_id': new_type_id, - 'old_comment': old_comment, - 'new_compression': new_compression - }) - - def _retype_from_old_to_new(self, volume, new_type, old_volume_settings, - host): - """Convert the volume to be of the new type. Given old type settings. - - Returns True if the retype was successful. - Uses taskflow to revert changes if errors occur. - - :param volume: A dictionary describing the volume to retype - :param new_type: A dictionary describing the volume type to convert to - :param old_volume_settings: Volume settings describing the old type. - :param host: A dictionary describing the host, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. Host validation - is just skipped if host is None. - """ - volume_id = volume['id'] - volume_name = self._get_3par_vol_name(volume_id) - new_type_name = None - new_type_id = None - if new_type: - new_type_name = new_type['name'] - new_type_id = new_type['id'] - pool = None - if host: - pool = volume_utils.extract_host(host['host'], 'pool') - else: - pool = volume_utils.extract_host(volume['host'], 'pool') - new_volume_settings = self.get_volume_settings_from_type_id( - new_type_id, pool) - new_cpg = new_volume_settings['cpg'] - new_snap_cpg = new_volume_settings['snap_cpg'] - new_tpvv = new_volume_settings['tpvv'] - new_tdvv = new_volume_settings['tdvv'] - new_qos = new_volume_settings['qos'] - new_vvs = new_volume_settings['vvs_name'] - new_persona = None - new_hpe3par_keys = new_volume_settings['hpe3par_keys'] - if 'persona' in new_hpe3par_keys: - new_persona = new_hpe3par_keys['persona'] - new_flash_cache = self.get_flash_cache_policy(new_hpe3par_keys) - - # it will return None / True /False$ - new_compression = self.get_compression_policy(new_hpe3par_keys) - - old_qos = old_volume_settings['qos'] - old_vvs = old_volume_settings['vvs_name'] - old_hpe3par_keys = old_volume_settings['hpe3par_keys'] - old_flash_cache = self.get_flash_cache_policy(old_hpe3par_keys) - - # Get the current volume info because we can get in a bad state - # if we trust that all the volume type settings are still the - # same settings that were used with this volume. - old_volume_info = self.client.getVolume(volume_name) - old_tpvv = old_volume_info['provisioningType'] == self.THIN - old_tdvv = old_volume_info['provisioningType'] == self.DEDUP - old_cpg = old_volume_info['userCPG'] - old_comment = old_volume_info['comment'] - old_snap_cpg = None - if 'snapCPG' in old_volume_info: - old_snap_cpg = old_volume_info['snapCPG'] - - LOG.debug("retype old_volume_info=%s", old_volume_info) - LOG.debug("retype old_volume_settings=%s", old_volume_settings) - LOG.debug("retype new_volume_settings=%s", new_volume_settings) - - self._retype(volume, volume_name, new_type_name, new_type_id, - host, new_persona, old_cpg, new_cpg, - old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, - old_tdvv, new_tdvv, old_vvs, new_vvs, - old_qos, new_qos, old_flash_cache, new_flash_cache, - old_comment, new_compression) - - if host: - return True, self._get_model_update(host['host'], new_cpg) - else: - return True, self._get_model_update(volume['host'], new_cpg) - - def _retype_from_no_type(self, volume, new_type): - """Convert the volume to be of the new type. Starting from no type. - - Returns True if the retype was successful. - Uses taskflow to revert changes if errors occur. - - :param volume: A dictionary describing the volume to retype. Except the - volume-type is not used here. This method uses None. - :param new_type: A dictionary describing the volume type to convert to - """ - pool = volume_utils.extract_host(volume['host'], 'pool') - none_type_settings = self.get_volume_settings_from_type_id(None, pool) - return self._retype_from_old_to_new(volume, new_type, - none_type_settings, None) - - def retype(self, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns True if the retype was successful. - Uses taskflow to revert changes if errors occur. - - :param volume: A dictionary describing the volume to retype - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. Host validation - is just skipped if host is None. - """ - LOG.debug(("enter: retype: id=%(id)s, new_type=%(new_type)s," - "diff=%(diff)s, host=%(host)s"), {'id': volume['id'], - 'new_type': new_type, - 'diff': diff, - 'host': host}) - old_volume_settings = self.get_volume_settings_from_type(volume, host) - return self._retype_from_old_to_new(volume, new_type, - old_volume_settings, host) - - def find_existing_vlun(self, volume, host): - """Finds an existing VLUN for a volume on a host. - - Returns an existing VLUN's information. If no existing VLUN is found, - None is returned. - - :param volume: A dictionary describing a volume. - :param host: A dictionary describing a host. - """ - existing_vlun = None - try: - vol_name = self._get_3par_vol_name(volume['id']) - host_vluns = self.client.getHostVLUNs(host['name']) - - # The first existing VLUN found will be returned. - for vlun in host_vluns: - if vlun['volumeName'] == vol_name: - existing_vlun = vlun - break - except hpeexceptions.HTTPNotFound: - # ignore, no existing VLUNs were found - LOG.debug("No existing VLUNs were found for host/volume " - "combination: %(host)s, %(vol)s", - {'host': host['name'], - 'vol': vol_name}) - pass - return existing_vlun - - def find_existing_vluns(self, volume, host): - existing_vluns = [] - try: - vol_name = self._get_3par_vol_name(volume['id']) - host_vluns = self.client.getHostVLUNs(host['name']) - - for vlun in host_vluns: - if vlun['volumeName'] == vol_name: - existing_vluns.append(vlun) - except hpeexceptions.HTTPNotFound: - # ignore, no existing VLUNs were found - LOG.debug("No existing VLUNs were found for host/volume " - "combination: %(host)s, %(vol)s", - {'host': host['name'], - 'vol': vol_name}) - pass - return existing_vluns - - # v2 replication methods - def failover_host(self, context, volumes, secondary_backend_id): - """Force failover to a secondary replication target.""" - # Ensure replication is enabled before we try and failover. - if not self._replication_enabled: - msg = _("Issuing a fail-over failed because replication is " - "not properly configured.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Check to see if the user requested to failback. - if (secondary_backend_id and - secondary_backend_id == self.FAILBACK_VALUE): - volume_update_list = self._replication_failback(volumes) - target_id = None - else: - # Find the failover target. - failover_target = None - for target in self._replication_targets: - if target['backend_id'] == secondary_backend_id: - failover_target = target - break - if not failover_target: - msg = _("A valid secondary target MUST be specified in order " - "to failover.") - LOG.error(msg) - raise exception.InvalidReplicationTarget(reason=msg) - - target_id = failover_target['backend_id'] - # For each volume, if it is replicated, we want to fail it over. - volume_update_list = [] - for volume in volumes: - if self._volume_of_replicated_type(volume): - try: - # Try and stop remote-copy on main array. We eat the - # exception here because when an array goes down, the - # groups will stop automatically. - rcg_name = self._get_3par_rcg_name(volume['id']) - self.client.stopRemoteCopy(rcg_name) - except Exception: - pass - - try: - # Failover to secondary array. - remote_rcg_name = self._get_3par_remote_rcg_name( - volume['id'], volume['provider_location']) - cl = self._create_replication_client(failover_target) - cl.recoverRemoteCopyGroupFromDisaster( - remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'failed-over'}}) - except Exception as ex: - LOG.error("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target.", - {'error': ex, - 'volume': volume['id']}) - LOG.error(msg) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'error'}}) - finally: - self._destroy_replication_client(cl) - else: - # If the volume is not of replicated type, we need to - # force the status into error state so a user knows they - # do not have access to the volume. - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'status': 'error'}}) - - return target_id, volume_update_list - - def _replication_failback(self, volumes): - # Make sure the proper steps on the backend have been completed before - # we allow a fail-over. - if not self._is_host_ready_for_failback(volumes): - msg = _("The host is not ready to be failed back. Please " - "resynchronize the volumes and resume replication on the " - "3PAR backends.") - LOG.error(msg) - raise exception.InvalidReplicationTarget(reason=msg) - - # Update the volumes status to available. - volume_update_list = [] - for volume in volumes: - if self._volume_of_replicated_type(volume): - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'available'}}) - else: - # Upon failing back, we can move the non-replicated volumes - # back into available state. - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'status': 'available'}}) - - return volume_update_list - - def _is_host_ready_for_failback(self, volumes): - """Checks to make sure the volume has been synchronized - - This ensures that all the remote copy targets have been restored - to their natural direction, and all of the volumes have been - fully synchronized. - """ - try: - for volume in volumes: - if self._volume_of_replicated_type(volume): - location = volume.get('provider_location') - remote_rcg_name = self._get_3par_remote_rcg_name( - volume['id'], - location) - rcg = self.client.getRemoteCopyGroup(remote_rcg_name) - - # Make sure all targets are in their natural direction. - targets = rcg['targets'] - for target in targets: - if target['roleReversed'] or ( - target['state'] != self.RC_GROUP_STARTED): - return False - - # Make sure all volumes are fully synced. - volumes = rcg['volumes'] - for volume in volumes: - remote_volumes = volume['remoteVolumes'] - for remote_volume in remote_volumes: - if remote_volume['syncStatus'] != ( - self.SYNC_STATUS_COMPLETED): - return False - except Exception: - # If there was a problem, we will return false so we can - # log an error in the parent function. - return False - - return True - - def _do_replication_setup(self): - replication_targets = [] - replication_devices = self.config.replication_device - if replication_devices: - for dev in replication_devices: - remote_array = dict(dev.items()) - # Override and set defaults for certain entries - remote_array['managed_backend_name'] = ( - dev.get('managed_backend_name')) - remote_array['replication_mode'] = ( - self._get_remote_copy_mode_num( - dev.get('replication_mode'))) - remote_array['san_ssh_port'] = ( - dev.get('san_ssh_port', self.config.san_ssh_port)) - remote_array['ssh_conn_timeout'] = ( - dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout)) - remote_array['san_private_key'] = ( - dev.get('san_private_key', self.config.san_private_key)) - # Format iscsi IPs correctly - iscsi_ips = dev.get('hpe3par_iscsi_ips') - if iscsi_ips: - remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ') - # Format hpe3par_iscsi_chap_enabled as a bool - remote_array['hpe3par_iscsi_chap_enabled'] = ( - dev.get('hpe3par_iscsi_chap_enabled') == 'True') - array_name = remote_array['backend_id'] - - # Make sure we can log into the array, that it has been - # correctly configured, and its API version meets the - # minimum requirement. - cl = None - try: - cl = self._create_replication_client(remote_array) - array_id = six.text_type(cl.getStorageSystemInfo()['id']) - remote_array['id'] = array_id - wsapi_version = cl.getWsApiVersion()['build'] - - if wsapi_version < REMOTE_COPY_API_VERSION: - LOG.warning("The secondary array must have an API " - "version of %(min_ver)s or higher. Array " - "'%(target)s' is on %(target_ver)s, " - "therefore it will not be added as a " - "valid replication target.", - {'target': array_name, - 'min_ver': REMOTE_COPY_API_VERSION, - 'target_ver': wsapi_version}) - elif not self._is_valid_replication_array(remote_array): - LOG.warning("'%s' is not a valid replication array. " - "In order to be valid, backend_id, " - "replication_mode, " - "hpe3par_api_url, hpe3par_username, " - "hpe3par_password, cpg_map, san_ip, " - "san_login, and san_password " - "must be specified. If the target is " - "managed, managed_backend_name must be " - "set as well.", array_name) - else: - replication_targets.append(remote_array) - except Exception: - LOG.error("Could not log in to 3PAR array (%s) with the " - "provided credentials.", array_name) - finally: - self._destroy_replication_client(cl) - - self._replication_targets = replication_targets - if self._is_replication_configured_correct(): - self._replication_enabled = True - - def _is_valid_replication_array(self, target): - required_flags = ['hpe3par_api_url', 'hpe3par_username', - 'hpe3par_password', 'san_ip', 'san_login', - 'san_password', 'backend_id', - 'replication_mode', 'cpg_map'] - try: - self.check_replication_flags(target, required_flags) - return True - except Exception: - return False - - def _is_replication_configured_correct(self): - rep_flag = True - # Make sure there is at least one replication target. - if len(self._replication_targets) < 1: - LOG.error("There must be at least one valid replication " - "device configured.") - rep_flag = False - return rep_flag - - def _is_replication_mode_correct(self, mode, sync_num): - rep_flag = True - # Make sure replication_mode is set to either sync|periodic. - mode = self._get_remote_copy_mode_num(mode) - if not mode: - LOG.error("Extra spec replication:mode must be set and must " - "be either 'sync' or 'periodic'.") - rep_flag = False - else: - # If replication:mode is periodic, replication_sync_period must be - # set between 300 - 31622400 seconds. - if mode == self.PERIODIC and ( - sync_num < 300 or sync_num > 31622400): - LOG.error("Extra spec replication:sync_period must be " - "greater than 299 and less than 31622401 " - "seconds.") - rep_flag = False - return rep_flag - - def is_volume_group_snap_type(self, volume_type): - consis_group_snap_type = False - if volume_type: - extra_specs = volume_type.extra_specs - if 'consistent_group_snapshot_enabled' in extra_specs: - gsnap_val = extra_specs['consistent_group_snapshot_enabled'] - consis_group_snap_type = (gsnap_val == " True") - return consis_group_snap_type - - def _volume_of_replicated_type(self, volume): - replicated_type = False - volume_type_id = volume.get('volume_type_id') - if volume_type_id: - volume_type = self._get_volume_type(volume_type_id) - - extra_specs = volume_type.get('extra_specs') - if extra_specs and 'replication_enabled' in extra_specs: - rep_val = extra_specs['replication_enabled'] - replicated_type = (rep_val == " True") - - return replicated_type - - def _is_volume_in_remote_copy_group(self, volume): - rcg_name = self._get_3par_rcg_name(volume['id']) - try: - self.client.getRemoteCopyGroup(rcg_name) - return True - except hpeexceptions.HTTPNotFound: - return False - - def _get_remote_copy_mode_num(self, mode): - ret_mode = None - if mode == "sync": - ret_mode = self.SYNC - if mode == "periodic": - ret_mode = self.PERIODIC - return ret_mode - - def _get_3par_config(self): - self._do_replication_setup() - conf = None - if self._replication_enabled: - for target in self._replication_targets: - if target['backend_id'] == self._active_backend_id: - conf = target - break - self._build_3par_config(conf) - - def _build_3par_config(self, conf=None): - """Build 3PAR client config dictionary. - - self._client_conf will contain values from self.config if the volume - is located on the primary array in order to properly contact it. If - the volume has been failed over and therefore on a secondary array, - self._client_conf will contain values on how to contact that array. - The only time we will return with entries from a secondary array is - with unmanaged replication. - """ - if conf: - self._client_conf['hpe3par_cpg'] = self._generate_hpe3par_cpgs( - conf.get('cpg_map')) - self._client_conf['hpe3par_username'] = ( - conf.get('hpe3par_username')) - self._client_conf['hpe3par_password'] = ( - conf.get('hpe3par_password')) - self._client_conf['san_ip'] = conf.get('san_ip') - self._client_conf['san_login'] = conf.get('san_login') - self._client_conf['san_password'] = conf.get('san_password') - self._client_conf['san_ssh_port'] = conf.get('san_ssh_port') - self._client_conf['ssh_conn_timeout'] = ( - conf.get('ssh_conn_timeout')) - self._client_conf['san_private_key'] = conf.get('san_private_key') - self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url') - self._client_conf['hpe3par_iscsi_ips'] = ( - conf.get('hpe3par_iscsi_ips')) - self._client_conf['hpe3par_iscsi_chap_enabled'] = ( - conf.get('hpe3par_iscsi_chap_enabled')) - self._client_conf['iscsi_ip_address'] = ( - conf.get('iscsi_ip_address')) - self._client_conf['iscsi_port'] = conf.get('iscsi_port') - else: - self._client_conf['hpe3par_cpg'] = ( - self.config.hpe3par_cpg) - self._client_conf['hpe3par_username'] = ( - self.config.hpe3par_username) - self._client_conf['hpe3par_password'] = ( - self.config.hpe3par_password) - self._client_conf['san_ip'] = self.config.san_ip - self._client_conf['san_login'] = self.config.san_login - self._client_conf['san_password'] = self.config.san_password - self._client_conf['san_ssh_port'] = self.config.san_ssh_port - self._client_conf['ssh_conn_timeout'] = ( - self.config.ssh_conn_timeout) - self._client_conf['san_private_key'] = self.config.san_private_key - self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url - self._client_conf['hpe3par_iscsi_ips'] = ( - self.config.hpe3par_iscsi_ips) - self._client_conf['hpe3par_iscsi_chap_enabled'] = ( - self.config.hpe3par_iscsi_chap_enabled) - self._client_conf['iscsi_ip_address'] = ( - self.config.iscsi_ip_address) - self._client_conf['iscsi_port'] = self.config.iscsi_port - - def _get_cpg_from_cpg_map(self, cpg_map, target_cpg): - ret_target_cpg = None - cpg_pairs = cpg_map.split(' ') - for cpg_pair in cpg_pairs: - cpgs = cpg_pair.split(':') - cpg = cpgs[0] - dest_cpg = cpgs[1] - if cpg == target_cpg: - ret_target_cpg = dest_cpg - - return ret_target_cpg - - def _generate_hpe3par_cpgs(self, cpg_map): - hpe3par_cpgs = [] - cpg_pairs = cpg_map.split(' ') - for cpg_pair in cpg_pairs: - cpgs = cpg_pair.split(':') - hpe3par_cpgs.append(cpgs[1]) - - return hpe3par_cpgs - - def _get_replication_targets(self): - replication_targets = [] - for target in self._replication_targets: - replication_targets.append(target['backend_id']) - - return replication_targets - - def _do_volume_replication_setup(self, volume, retype=False, - dist_type_id=None): - """This function will do or ensure the following: - - -Create volume on main array (already done in create_volume) - -Create Remote Copy Group on main array - -Add volume to Remote Copy Group on main array - -Start remote copy - - If anything here fails, we will need to clean everything up in - reverse order, including the original volume. - """ - - rcg_name = self._get_3par_rcg_name(volume['id']) - # If the volume is already in a remote copy group, return True - # after starting remote copy. If remote copy is already started, - # issuing this command again will be fine. - if self._is_volume_in_remote_copy_group(volume): - try: - self.client.startRemoteCopy(rcg_name) - except Exception: - pass - return True - - try: - # Grab the extra_spec entries for replication and make sure they - # are set correctly. - volume_type = self._get_volume_type(volume["volume_type_id"]) - if retype and dist_type_id is not None: - dist_type = self._get_volume_type(dist_type_id) - extra_specs = dist_type.get("extra_specs") - else: - extra_specs = volume_type.get("extra_specs") - replication_mode = extra_specs.get( - self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE) - replication_mode_num = self._get_remote_copy_mode_num( - replication_mode) - replication_sync_period = extra_specs.get( - self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD) - if replication_sync_period: - replication_sync_period = int(replication_sync_period) - if not self._is_replication_mode_correct(replication_mode, - replication_sync_period): - msg = _("The replication mode was not configured correctly " - "in the volume type extra_specs. If replication:mode " - "is periodic, replication:sync_period must also be " - "specified and be between 300 and 31622400 seconds.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - vol_settings = self.get_volume_settings_from_type(volume) - local_cpg = vol_settings['cpg'] - vol_name = self._get_3par_vol_name(volume['id']) - - # Create remote copy group on main array. - rcg_targets = [] - sync_targets = [] - for target in self._replication_targets: - # Only add targets that match the volumes replication mode. - if target['replication_mode'] == replication_mode_num: - cpg = self._get_cpg_from_cpg_map(target['cpg_map'], - local_cpg) - rcg_target = {'targetName': target['backend_id'], - 'mode': replication_mode_num, - 'snapCPG': cpg, - 'userCPG': cpg} - rcg_targets.append(rcg_target) - sync_target = {'targetName': target['backend_id'], - 'syncPeriod': replication_sync_period} - sync_targets.append(sync_target) - - optional = {'localSnapCPG': vol_settings['snap_cpg'], - 'localUserCPG': local_cpg} - pool = volume_utils.extract_host(volume['host'], level='pool') - domain = self.get_domain(pool) - if domain: - optional["domain"] = domain - try: - self.client.createRemoteCopyGroup(rcg_name, rcg_targets, - optional) - except Exception as ex: - msg = (_("There was an error creating the remote copy " - "group: %s.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Add volume to remote copy group. - rcg_targets = [] - for target in self._replication_targets: - # Only add targets that match the volumes replication mode. - if target['replication_mode'] == replication_mode_num: - rcg_target = {'targetName': target['backend_id'], - 'secVolumeName': vol_name} - rcg_targets.append(rcg_target) - optional = {'volumeAutoCreation': True} - try: - self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name, - rcg_targets, - optional=optional) - except Exception as ex: - msg = (_("There was an error adding the volume to the remote " - "copy group: %s.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Check and see if we are in periodic mode. If we are, update - # Remote Copy Group to have a sync period. - if replication_sync_period and ( - replication_mode_num == self.PERIODIC): - opt = {'targets': sync_targets} - try: - self.client.modifyRemoteCopyGroup(rcg_name, opt) - except Exception as ex: - msg = (_("There was an error setting the sync period for " - "the remote copy group: %s.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Start the remote copy. - try: - self.client.startRemoteCopy(rcg_name) - except Exception as ex: - msg = (_("There was an error starting remote copy: %s.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return True - except Exception as ex: - self._do_volume_replication_destroy(volume) - msg = (_("There was an error setting up a remote copy group " - "on the 3PAR arrays: ('%s'). The volume will not be " - "recognized as replication type.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _do_volume_replication_destroy(self, volume, rcg_name=None, - retype=False): - """This will completely remove all traces of a remote copy group. - - It should be used when deleting a replication enabled volume - or if setting up a remote copy group fails. It will try and do the - following: - -Stop remote copy - -Remove volume from Remote Copy Group on main array - -Delete Remote Copy Group from main array - -Delete volume from main array - """ - if not rcg_name: - rcg_name = self._get_3par_rcg_name(volume['id']) - vol_name = self._get_3par_vol_name(volume['id']) - - # Stop remote copy. - try: - self.client.stopRemoteCopy(rcg_name) - except Exception: - pass - - # Delete volume from remote copy group on main array. - try: - self.client.removeVolumeFromRemoteCopyGroup( - rcg_name, vol_name, removeFromTarget=True) - except Exception: - pass - - # Delete remote copy group on main array. - try: - self.client.removeRemoteCopyGroup(rcg_name) - except Exception: - pass - - # Delete volume on the main array. - try: - if not retype: - self.client.deleteVolume(vol_name) - except Exception: - pass - - def _delete_replicated_failed_over_volume(self, volume): - location = volume.get('provider_location') - rcg_name = self._get_3par_remote_rcg_name(volume['id'], location) - targets = self.client.getRemoteCopyGroup(rcg_name)['targets'] - # When failed over, we want to temporarily disable config mirroring - # in order to be allowed to delete the volume and remote copy group - for target in targets: - target_name = target['targetName'] - self.client.toggleRemoteCopyConfigMirror(target_name, - mirror_config=False) - - # Do regular volume replication destroy now config mirroring is off - try: - self._do_volume_replication_destroy(volume, rcg_name) - except Exception as ex: - msg = (_("The failed-over volume could not be deleted: %s") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeIsBusy(message=msg) - finally: - # Turn config mirroring back on - for target in targets: - target_name = target['targetName'] - self.client.toggleRemoteCopyConfigMirror(target_name, - mirror_config=True) - - class TaskWaiter(object): - """TaskWaiter waits for task to be not active and returns status.""" - - def __init__(self, client, task_id, interval=1, initial_delay=0): - self.client = client - self.task_id = task_id - self.interval = interval - self.initial_delay = initial_delay - - def _wait_for_task(self): - status = self.client.getTask(self.task_id) - LOG.debug("3PAR Task id %(id)s status = %(status)s", - {'id': self.task_id, - 'status': status['status']}) - if status['status'] is not self.client.TASK_ACTIVE: - raise loopingcall.LoopingCallDone(status) - - def wait_for_task(self): - timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_task) - return timer.start(interval=self.interval, - initial_delay=self.initial_delay).wait() - - -class ReplicateVolumeTask(flow_utils.CinderTask): - - """Task to replicate a volume. - - This is a task for adding/removing the replication feature to volume. - It is intended for use during retype(). This task has no revert. - # TODO(sumit): revert back to original volume extra-spec - """ - - def __init__(self, action, **kwargs): - super(ReplicateVolumeTask, self).__init__(addons=[action]) - - def execute(self, common, volume, new_type_id): - - new_replicated_type = False - - if new_type_id: - new_volume_type = common._get_volume_type(new_type_id) - - extra_specs = new_volume_type.get('extra_specs', None) - if extra_specs and 'replication_enabled' in extra_specs: - rep_val = extra_specs['replication_enabled'] - new_replicated_type = (rep_val == " True") - - if common._volume_of_replicated_type(volume) and new_replicated_type: - # Retype from replication enabled to replication enable. - common._do_volume_replication_destroy(volume, retype=True) - common._do_volume_replication_setup( - volume, - retype=True, - dist_type_id=new_type_id) - elif (not common._volume_of_replicated_type(volume) - and new_replicated_type): - # Retype from replication disabled to replication enable. - common._do_volume_replication_setup( - volume, - retype=True, - dist_type_id=new_type_id) - elif common._volume_of_replicated_type(volume): - # Retype from replication enabled to replication disable. - common._do_volume_replication_destroy(volume, retype=True) - - -class ModifyVolumeTask(flow_utils.CinderTask): - - """Task to change a volume's snapCPG and comment. - - This is a task for changing the snapCPG and comment. It is intended for - use during retype(). These changes are done together with a single - modify request which should be fast and easy to revert. - - Because we do not support retype with existing snapshots, we can change - the snapCPG without using a keepVV. If snapshots exist, then this will - fail, as desired. - - This task does not change the userCPG or provisioningType. Those changes - may require tunevv, so they are done by the TuneVolumeTask. - - The new comment will contain the new type, VVS and QOS information along - with whatever else was in the old comment dict. - - The old comment and snapCPG are restored if revert is called. - """ - - def __init__(self, action): - self.needs_revert = False - super(ModifyVolumeTask, self).__init__(addons=[action]) - - def _get_new_comment(self, old_comment, new_vvs, new_qos, - new_type_name, new_type_id): - - # Modify the comment during ModifyVolume - comment_dict = dict(ast.literal_eval(old_comment)) - if 'vvs' in comment_dict: - del comment_dict['vvs'] - if 'qos' in comment_dict: - del comment_dict['qos'] - if new_vvs: - comment_dict['vvs'] = new_vvs - elif new_qos: - comment_dict['qos'] = new_qos - else: - comment_dict['qos'] = {} - - if new_type_name: - comment_dict['volume_type_name'] = new_type_name - else: - comment_dict.pop('volume_type_name', None) - - if new_type_id: - comment_dict['volume_type_id'] = new_type_id - else: - comment_dict.pop('volume_type_id', None) - - return comment_dict - - def execute(self, common, volume_name, old_snap_cpg, new_snap_cpg, - old_comment, new_vvs, new_qos, new_type_name, new_type_id): - - comment_dict = self._get_new_comment( - old_comment, new_vvs, new_qos, new_type_name, new_type_id) - - if new_snap_cpg != old_snap_cpg: - # Modify the snap_cpg. This will fail with snapshots. - LOG.info("Modifying %(volume_name)s snap_cpg from " - "%(old_snap_cpg)s to %(new_snap_cpg)s.", - {'volume_name': volume_name, - 'old_snap_cpg': old_snap_cpg, - 'new_snap_cpg': new_snap_cpg}) - common.client.modifyVolume( - volume_name, - {'snapCPG': new_snap_cpg, - 'comment': json.dumps(comment_dict)}) - self.needs_revert = True - else: - LOG.info("Modifying %s comments.", volume_name) - common.client.modifyVolume( - volume_name, - {'comment': json.dumps(comment_dict)}) - self.needs_revert = True - - def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg, - old_comment, **kwargs): - if self.needs_revert: - LOG.info("Retype revert %(volume_name)s snap_cpg from " - "%(new_snap_cpg)s back to %(old_snap_cpg)s.", - {'volume_name': volume_name, - 'new_snap_cpg': new_snap_cpg, - 'old_snap_cpg': old_snap_cpg}) - try: - common.client.modifyVolume( - volume_name, - {'snapCPG': old_snap_cpg, 'comment': old_comment}) - except Exception as ex: - LOG.error("Exception during snapCPG revert: %s", ex) - - -class TuneVolumeTask(flow_utils.CinderTask): - - """Task to change a volume's CPG and/or provisioning type. - - This is a task for changing the CPG and/or provisioning type. - It is intended for use during retype(). - - This task has no revert. The current design is to do this task last - and do revert-able tasks first. Un-doing a tunevv can be expensive - and should be avoided. - """ - - def __init__(self, action, **kwargs): - super(TuneVolumeTask, self).__init__(addons=[action]) - - def execute(self, common, old_tpvv, new_tpvv, old_tdvv, new_tdvv, - old_cpg, new_cpg, volume_name, new_compression): - common.tune_vv(old_tpvv, new_tpvv, old_tdvv, new_tdvv, - old_cpg, new_cpg, volume_name, new_compression) - - -class ModifySpecsTask(flow_utils.CinderTask): - - """Set/unset the QOS settings and/or VV set for the volume's new type. - - This is a task for changing the QOS settings and/or VV set. It is intended - for use during retype(). If changes are made during execute(), then they - need to be undone if revert() is called (i.e., if a later task fails). - - For 3PAR, we ignore QOS settings if a VVS is explicitly set, otherwise we - create a VV set and use that for QOS settings. That is why they are lumped - together here. Most of the decision-making about VVS vs. QOS settings vs. - old-style scoped extra-specs is handled in existing reusable code. Here - we mainly need to know what old stuff to remove before calling the function - that knows how to set the new stuff. - - Basic task flow is as follows: Remove the volume from the old externally - created VVS (when appropriate), delete the old cinder-created VVS, call - the function that knows how to set a new VVS or QOS settings. - - If any changes are made during execute, then revert needs to reverse them. - """ - - def __init__(self, action): - self.needs_revert = False - super(ModifySpecsTask, self).__init__(addons=[action]) - - def execute(self, common, volume_name, volume, old_cpg, new_cpg, - old_vvs, new_vvs, old_qos, new_qos, - old_flash_cache, new_flash_cache): - - if (old_vvs != new_vvs or - old_qos != new_qos or - old_flash_cache != new_flash_cache): - - # Remove VV from old VV Set. - if old_vvs is not None and old_vvs != new_vvs: - common.client.removeVolumeFromVolumeSet(old_vvs, - volume_name) - self.needs_revert = True - - # If any extra or qos specs changed then remove the old - # special VV set that we create. We'll recreate it - # as needed. - vvs_name = common._get_3par_vvs_name(volume['id']) - try: - common.client.deleteVolumeSet(vvs_name) - self.needs_revert = True - except hpeexceptions.HTTPNotFound as ex: - # HTTPNotFound(code=102) is OK. Set does not exist. - if ex.get_code() != 102: - LOG.error("Unexpected error when retype() tried to " - "deleteVolumeSet(%s)", vvs_name) - raise - - if new_vvs or new_qos or new_flash_cache: - common._add_volume_to_volume_set( - volume, volume_name, new_cpg, new_vvs, new_qos, - new_flash_cache) - self.needs_revert = True - - def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos, - old_cpg, **kwargs): - if self.needs_revert: - # If any extra or qos specs changed then remove the old - # special VV set that we create and recreate it per - # the old type specs. - vvs_name = common._get_3par_vvs_name(volume['id']) - try: - common.client.deleteVolumeSet(vvs_name) - except hpeexceptions.HTTPNotFound as ex: - # HTTPNotFound(code=102) is OK. Set does not exist. - if ex.get_code() != 102: - LOG.error("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)", vvs_name) - except Exception: - LOG.error("Unexpected error when retype() revert " - "tried to deleteVolumeSet(%s)", vvs_name) - - if old_vvs is not None or old_qos is not None: - try: - common._add_volume_to_volume_set( - volume, volume_name, old_cpg, old_vvs, old_qos) - except Exception as ex: - LOG.error("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Original volume set/QOS settings may not " - "have been fully restored.", - {'exception': ex, 'volume_name': volume_name}) - - if new_vvs is not None and old_vvs != new_vvs: - try: - common.client.removeVolumeFromVolumeSet( - new_vvs, volume_name) - except Exception as ex: - LOG.error("%(exception)s: Exception during revert of " - "retype for volume %(volume_name)s. " - "Failed to remove from new volume set " - "%(new_vvs)s.", - {'exception': ex, - 'volume_name': volume_name, - 'new_vvs': new_vvs}) diff --git a/cinder/volume/drivers/hpe/hpe_3par_fc.py b/cinder/volume/drivers/hpe/hpe_3par_fc.py deleted file mode 100644 index f9fdbde65..000000000 --- a/cinder/volume/drivers/hpe/hpe_3par_fc.py +++ /dev/null @@ -1,727 +0,0 @@ -# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Volume driver for HPE 3PAR Storage array. -This driver requires 3.1.3 firmware on the 3PAR array, using -the 4.x version of the hpe3parclient. - -You will need to install the python hpe3parclient. -sudo pip install --upgrade "hpe3parclient>=4.0" - -Set the following in the cinder.conf file to enable the -3PAR Fibre Channel Driver along with the required flags: - -volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver -""" - -try: - from hpe3parclient import exceptions as hpeexceptions -except ImportError: - hpeexceptions = None - -from oslo_log import log as logging -from oslo_utils.excutils import save_and_reraise_exception - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon -from cinder.volume.drivers.san import san -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -# EXISTENT_PATH error code returned from hpe3parclient -EXISTENT_PATH = 73 - - -@interface.volumedriver -class HPE3PARFCDriver(driver.ManageableVD, - driver.ManageableSnapshotsVD, - driver.MigrateVD, - driver.BaseVD): - """OpenStack Fibre Channel driver to enable 3PAR storage array. - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, - session changes, faster clone, requires 3.1.2 MU2 firmware, - copy volume <--> Image. - 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored - the drivers to use the new APIs. - 1.2.1 - Synchronized extend_volume method. - 1.2.2 - Added try/finally around client login/logout. - 1.2.3 - Added ability to add WWNs to host. - 1.2.4 - Added metadata during attach/detach bug #1258033. - 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. - 2.0.0 - Update hp3parclient API uses 3.0.x - 2.0.2 - Add back-end assisted volume migrate - 2.0.3 - Added initiator-target map for FC Zone Manager - 2.0.4 - Added support for managing/unmanaging of volumes - 2.0.5 - Only remove FC Zone on last volume detach - 2.0.6 - Added support for volume retype - 2.0.7 - Only one FC port is used when a single FC path - is present. bug #1360001 - 2.0.8 - Fixing missing login/logout around attach/detach bug #1367429 - 2.0.9 - Add support for pools with model update - 2.0.10 - Migrate without losing type settings bug #1356608 - 2.0.11 - Removing locks bug #1381190 - 2.0.12 - Fix queryHost call to specify wwns bug #1398206 - 2.0.13 - Fix missing host name during attach bug #1398206 - 2.0.14 - Removed usage of host name cache #1398914 - 2.0.15 - Added support for updated detach_volume attachment. - 2.0.16 - Added encrypted property to initialize_connection #1439917 - 2.0.17 - Improved VLUN creation and deletion logic. #1469816 - 2.0.18 - Changed initialize_connection to use getHostVLUNs. #1475064 - 2.0.19 - Adds consistency group support - 2.0.20 - Update driver to use ABC metaclasses - 2.0.21 - Added update_migrated_volume. bug # 1492023 - 3.0.0 - Rebranded HP to HPE. - 3.0.1 - Remove db access for consistency groups - 3.0.2 - Adds v2 managed replication support - 3.0.3 - Adds v2 unmanaged replication support - 3.0.4 - Adding manage/unmanage snapshot support - 3.0.5 - Optimize array ID retrieval - 3.0.6 - Update replication to version 2.1 - 3.0.7 - Remove metadata that tracks the instance ID. bug #1572665 - 3.0.8 - NSP feature, creating FC Vlun as match set instead of - host sees. bug #1577993 - 3.0.9 - Handling HTTP conflict 409, host WWN/iSCSI name already used - by another host, while creating 3PAR FC Host. bug #1597454 - 3.0.10 - Added Entry point tracing - 3.0.11 - Handle manage and unmanage hosts present. bug #1648067 - 3.0.12 - Adds consistency group capability in generic volume groups. - - """ - - VERSION = "3.0.12" - - # The name of the CI wiki page. - CI_WIKI_NAME = "HPE_Storage_CI" - - def __init__(self, *args, **kwargs): - super(HPE3PARFCDriver, self).__init__(*args, **kwargs) - self._active_backend_id = kwargs.get('active_backend_id', None) - self.configuration.append_config_values(hpecommon.hpe3par_opts) - self.configuration.append_config_values(san.san_opts) - self.lookup_service = fczm_utils.create_lookup_service() - - def _init_common(self): - return hpecommon.HPE3PARCommon(self.configuration, - self._active_backend_id) - - def _login(self, timeout=None): - common = self._init_common() - # If replication is enabled and we cannot login, we do not want to - # raise an exception so a failover can still be executed. - try: - common.do_setup(None, timeout=timeout, stats=self._stats) - common.client_login() - except Exception: - if common._replication_enabled: - LOG.warning("The primary array is not reachable at this " - "time. Since replication is enabled, " - "listing replication targets and failing over " - "a volume can still be performed.") - pass - else: - raise - return common - - def _logout(self, common): - # If replication is enabled and we do not have a client ID, we did not - # login, but can still failover. There is no need to logout. - if common.client is None and common._replication_enabled: - return - common.client_logout() - - def _check_flags(self, common): - """Sanity check to ensure we have required options set.""" - required_flags = ['hpe3par_api_url', 'hpe3par_username', - 'hpe3par_password', - 'san_ip', 'san_login', 'san_password'] - common.check_flags(self.configuration, required_flags) - - def get_volume_stats(self, refresh=False): - common = self._login() - try: - self._stats = common.get_volume_stats( - refresh, - self.get_filter_function(), - self.get_goodness_function()) - self._stats['storage_protocol'] = 'FC' - self._stats['driver_version'] = self.VERSION - backend_name = self.configuration.safe_get('volume_backend_name') - self._stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return self._stats - finally: - self._logout(common) - - def do_setup(self, context): - common = self._init_common() - common.do_setup(context) - self._check_flags(common) - common.check_for_setup_error() - - def check_for_setup_error(self): - """Setup errors are already checked for in do_setup so return pass.""" - pass - - @utils.trace - def create_volume(self, volume): - common = self._login() - try: - return common.create_volume(volume) - finally: - self._logout(common) - - @utils.trace - def create_cloned_volume(self, volume, src_vref): - common = self._login() - try: - return common.create_cloned_volume(volume, src_vref) - finally: - self._logout(common) - - @utils.trace - def delete_volume(self, volume): - common = self._login() - try: - common.delete_volume(volume) - finally: - self._logout(common) - - @utils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot. - - TODO: support using the size from the user. - """ - common = self._login() - try: - return common.create_volume_from_snapshot(volume, snapshot) - finally: - self._logout(common) - - @utils.trace - def create_snapshot(self, snapshot): - common = self._login() - try: - common.create_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - def delete_snapshot(self, snapshot): - common = self._login() - try: - common.delete_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Assigns the volume to a server. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'encrypted': False, - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '1234567890123', - } - } - - or - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'encrypted': False, - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - } - } - - - Steps to export a volume on 3PAR - * Create a host on the 3par with the target wwn - * Create a VLUN for that HOST with the volume we want to export. - - """ - common = self._login() - try: - # we have to make sure we have a host - host = self._create_host(common, volume, connector) - target_wwns, init_targ_map, numPaths = \ - self._build_initiator_target_map(common, connector) - # check if a VLUN already exists for this host - existing_vlun = common.find_existing_vlun(volume, host) - - vlun = None - if existing_vlun is None: - # now that we have a host, create the VLUN - nsp = None - lun_id = None - active_fc_port_list = common.get_active_fc_target_ports() - - if self.lookup_service: - if not init_targ_map: - msg = _("Setup is incomplete. Device mapping " - "not found from FC network. " - "Cannot perform VLUN creation.") - LOG.error(msg) - raise exception.FCSanLookupServiceException(msg) - - for target_wwn in target_wwns: - for port in active_fc_port_list: - if port['portWWN'].lower() == target_wwn.lower(): - nsp = port['nsp'] - vlun = common.create_vlun(volume, - host, - nsp, - lun_id=lun_id) - if lun_id is None: - lun_id = vlun['lun'] - break - else: - init_targ_map.clear() - del target_wwns[:] - host_connected_nsp = [] - for fcpath in host['FCPaths']: - if 'portPos' in fcpath: - host_connected_nsp.append( - common.build_nsp(fcpath['portPos'])) - for port in active_fc_port_list: - if ( - port['type'] == common.client.PORT_TYPE_HOST and - port['nsp'] in host_connected_nsp - ): - nsp = port['nsp'] - vlun = common.create_vlun(volume, - host, - nsp, - lun_id=lun_id) - target_wwns.append(port['portWWN']) - if vlun['remoteName'] in init_targ_map: - init_targ_map[vlun['remoteName']].append( - port['portWWN']) - else: - init_targ_map[vlun['remoteName']] = [ - port['portWWN']] - if lun_id is None: - lun_id = vlun['lun'] - if lun_id is None: - # New vlun creation failed - msg = _('No new vlun(s) were created') - LOG.error(msg) - raise exception.VolumeDriverException(msg) - else: - vlun = existing_vlun - - info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': vlun['lun'], - 'target_discovered': True, - 'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map}} - - encryption_key_id = volume.get('encryption_key_id', None) - info['data']['encrypted'] = encryption_key_id is not None - return info - finally: - self._logout(common) - - @utils.trace - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - common = self._login() - try: - hostname = common._safe_hostname(connector['host']) - common.terminate_connection(volume, hostname, - wwn=connector['wwpns']) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - try: - common.client.getHostVLUNs(hostname) - except hpeexceptions.HTTPNotFound: - # No more exports for this host. - LOG.info("Need to remove FC Zone, building initiator " - "target map") - - target_wwns, init_targ_map, _numPaths = \ - self._build_initiator_target_map(common, connector) - - info['data'] = {'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map} - return info - - finally: - self._logout(common) - - def _build_initiator_target_map(self, common, connector): - """Build the target_wwns and the initiator target map.""" - - fc_ports = common.get_active_fc_target_ports() - all_target_wwns = [] - target_wwns = [] - init_targ_map = {} - numPaths = 0 - - for port in fc_ports: - all_target_wwns.append(port['portWWN']) - - if self.lookup_service is not None: - # use FC san lookup to determine which NSPs to use - # for the new VLUN. - dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], - all_target_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - for _target in init_targ_map[initiator]: - numPaths += 1 - target_wwns = list(set(target_wwns)) - else: - initiator_wwns = connector['wwpns'] - target_wwns = all_target_wwns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map, numPaths - - def _create_3par_fibrechan_host(self, common, hostname, wwns, - domain, persona_id): - """Create a 3PAR host. - - Create a 3PAR host, if there is already a host on the 3par using - the same wwn but with a different hostname, return the hostname - used by 3PAR. - """ - # first search for an existing host - host_found = None - hosts = common.client.queryHost(wwns=wwns) - - if hosts and hosts['members'] and 'name' in hosts['members'][0]: - host_found = hosts['members'][0]['name'] - - if host_found is not None: - return host_found - else: - persona_id = int(persona_id) - try: - common.client.createHost(hostname, FCWwns=wwns, - optional={'domain': domain, - 'persona': persona_id}) - except hpeexceptions.HTTPConflict as path_conflict: - msg = "Create FC host caught HTTP conflict code: %s" - LOG.exception(msg, path_conflict.get_code()) - with save_and_reraise_exception(reraise=False) as ctxt: - if path_conflict.get_code() is EXISTENT_PATH: - # Handle exception : EXISTENT_PATH - host WWN/iSCSI - # name already used by another host - hosts = common.client.queryHost(wwns=wwns) - if hosts and hosts['members'] and ( - 'name' in hosts['members'][0]): - hostname = hosts['members'][0]['name'] - else: - # re rasise last caught exception - ctxt.reraise = True - else: - # re rasise last caught exception - # for other HTTP conflict - ctxt.reraise = True - return hostname - - def _modify_3par_fibrechan_host(self, common, hostname, wwn): - mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, - 'FCWWNs': wwn} - try: - common.client.modifyHost(hostname, mod_request) - except hpeexceptions.HTTPConflict as path_conflict: - msg = ("Modify FC Host %(hostname)s caught " - "HTTP conflict code: %(code)s") - LOG.exception(msg, - {'hostname': hostname, - 'code': path_conflict.get_code()}) - - def _create_host(self, common, volume, connector): - """Creates or modifies existing 3PAR host.""" - host = None - hostname = common._safe_hostname(connector['host']) - cpg = common.get_cpg(volume, allowSnap=True) - domain = common.get_domain(cpg) - try: - host = common._get_3par_host(hostname) - # Check whether host with wwn of initiator present on 3par - hosts = common.client.queryHost(wwns=connector['wwpns']) - host, hostname = common._get_prioritized_host_on_3par(host, - hosts, - hostname) - except hpeexceptions.HTTPNotFound: - # get persona from the volume type extra specs - persona_id = common.get_persona_type(volume) - # host doesn't exist, we have to create it - hostname = self._create_3par_fibrechan_host(common, - hostname, - connector['wwpns'], - domain, - persona_id) - host = common._get_3par_host(hostname) - return host - else: - return self._add_new_wwn_to_host(common, host, connector['wwpns']) - - def _add_new_wwn_to_host(self, common, host, wwns): - """Add wwns to a host if one or more don't exist. - - Identify if argument wwns contains any world wide names - not configured in the 3PAR host path. If any are found, - add them to the 3PAR host. - """ - # get the currently configured wwns - # from the host's FC paths - host_wwns = [] - if 'FCPaths' in host: - for path in host['FCPaths']: - wwn = path.get('wwn', None) - if wwn is not None: - host_wwns.append(wwn.lower()) - - # lower case all wwns in the compare list - compare_wwns = [x.lower() for x in wwns] - - # calculate wwns in compare list, but not in host_wwns list - new_wwns = list(set(compare_wwns).difference(host_wwns)) - - # if any wwns found that were not in host list, - # add them to the host - if (len(new_wwns) > 0): - self._modify_3par_fibrechan_host(common, host['name'], new_wwns) - host = common._get_3par_host(host['name']) - return host - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - @utils.trace - def extend_volume(self, volume, new_size): - common = self._login() - try: - common.extend_volume(volume, new_size) - finally: - self._logout(common) - - @utils.trace - def create_group(self, context, group): - common = self._login() - try: - return common.create_group(context, group) - finally: - self._logout(common) - - @utils.trace - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - common = self._login() - try: - return common.create_group_from_src( - context, group, volumes, group_snapshot, snapshots, - source_group, source_vols) - finally: - self._logout(common) - - @utils.trace - def delete_group(self, context, group, volumes): - common = self._login() - try: - return common.delete_group(context, group, volumes) - finally: - self._logout(common) - - @utils.trace - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - common = self._login() - try: - return common.update_group(context, group, add_volumes, - remove_volumes) - finally: - self._logout(common) - - @utils.trace - def create_group_snapshot(self, context, group_snapshot, snapshots): - common = self._login() - try: - return common.create_group_snapshot(context, group_snapshot, - snapshots) - finally: - self._logout(common) - - @utils.trace - def delete_group_snapshot(self, context, group_snapshot, snapshots): - common = self._login() - try: - return common.delete_group_snapshot(context, group_snapshot, - snapshots) - finally: - self._logout(common) - - @utils.trace - def manage_existing(self, volume, existing_ref): - common = self._login() - try: - return common.manage_existing(volume, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_snapshot(self, snapshot, existing_ref): - common = self._login() - try: - return common.manage_existing_snapshot(snapshot, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_get_size(self, volume, existing_ref): - common = self._login() - try: - return common.manage_existing_get_size(volume, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - common = self._login() - try: - return common.manage_existing_snapshot_get_size(snapshot, - existing_ref) - finally: - self._logout(common) - - @utils.trace - def unmanage(self, volume): - common = self._login() - try: - common.unmanage(volume) - finally: - self._logout(common) - - @utils.trace - def unmanage_snapshot(self, snapshot): - common = self._login() - try: - common.unmanage_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - def retype(self, context, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - common = self._login() - try: - return common.retype(volume, new_type, diff, host) - finally: - self._logout(common) - - @utils.trace - def migrate_volume(self, context, volume, host): - if volume['status'] == 'in-use': - protocol = host['capabilities']['storage_protocol'] - if protocol != 'FC': - LOG.debug("3PAR FC driver cannot migrate in-use volume " - "to a host with storage_protocol=%s.", protocol) - return False, None - - common = self._login() - try: - return common.migrate_volume(volume, host) - finally: - self._logout(common) - - @utils.trace - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status): - """Update the name of the migrated volume to it's new ID.""" - common = self._login() - try: - return common.update_migrated_volume(context, volume, new_volume, - original_volume_status) - finally: - self._logout(common) - - @utils.trace - def get_pool(self, volume): - common = self._login() - try: - return common.get_cpg(volume) - except hpeexceptions.HTTPNotFound: - reason = (_("Volume %s doesn't exist on array.") % volume) - LOG.error(reason) - raise exception.InvalidVolume(reason) - finally: - self._logout(common) - - @utils.trace - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Force failover to a secondary replication target.""" - common = self._login(timeout=30) - try: - # Update the active_backend_id in the driver and return it. - active_backend_id, volume_updates = common.failover_host( - context, volumes, secondary_id) - self._active_backend_id = active_backend_id - return active_backend_id, volume_updates, [] - finally: - self._logout(common) diff --git a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py b/cinder/volume/drivers/hpe/hpe_3par_iscsi.py deleted file mode 100644 index 691259727..000000000 --- a/cinder/volume/drivers/hpe/hpe_3par_iscsi.py +++ /dev/null @@ -1,998 +0,0 @@ -# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Volume driver for HPE 3PAR Storage array. -This driver requires 3.1.3 firmware on the 3PAR array, using -the 4.x version of the hpe3parclient. - -You will need to install the python hpe3parclient. -sudo pip install --upgrade "hpe3parclient>=4.0" - -Set the following in the cinder.conf file to enable the -3PAR iSCSI Driver along with the required flags: - -volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver -""" - -import re -import sys - -try: - from hpe3parclient import exceptions as hpeexceptions -except ImportError: - hpeexceptions = None - -from oslo_log import log as logging -from oslo_utils.excutils import save_and_reraise_exception - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -# EXISTENT_PATH error code returned from hpe3parclient -EXISTENT_PATH = 73 -DEFAULT_ISCSI_PORT = 3260 -CHAP_USER_KEY = "HPQ-cinder-CHAP-name" -CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" - - -@interface.volumedriver -class HPE3PARISCSIDriver(driver.ManageableVD, - driver.ManageableSnapshotsVD, - driver.MigrateVD, - driver.BaseVD): - """OpenStack iSCSI driver to enable 3PAR storage array. - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, - session changes, faster clone, requires 3.1.2 MU2 firmware. - 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored - the drivers to use the new APIs. - 1.2.1 - Synchronized extend_volume method. - 1.2.2 - Added try/finally around client login/logout. - 1.2.3 - log exceptions before raising - 1.2.4 - Fixed iSCSI active path bug #1224594 - 1.2.5 - Added metadata during attach/detach bug #1258033 - 1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515 - This update now requires 3.1.2 MU3 firmware - 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. - 2.0.0 - Update hp3parclient API uses 3.0.x - 2.0.2 - Add back-end assisted volume migrate - 2.0.3 - Added support for managing/unmanaging of volumes - 2.0.4 - Added support for volume retype - 2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware - and hp3parclient 3.1.0. - 2.0.6 - Fixing missing login/logout around attach/detach bug #1367429 - 2.0.7 - Add support for pools with model update - 2.0.8 - Migrate without losing type settings bug #1356608 - 2.0.9 - Removing locks bug #1381190 - 2.0.10 - Add call to queryHost instead SSH based findHost #1398206 - 2.0.11 - Added missing host name during attach fix #1398206 - 2.0.12 - Removed usage of host name cache #1398914 - 2.0.13 - Update LOG usage to fix translations. bug #1384312 - 2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be - used during live-migration. bug #1423958 - 2.0.15 - Added support for updated detach_volume attachment. - 2.0.16 - Added encrypted property to initialize_connection #1439917 - 2.0.17 - Python 3 fixes - 2.0.18 - Improved VLUN creation and deletion logic. #1469816 - 2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064 - 2.0.20 - Adding changes to support 3PAR iSCSI multipath. - 2.0.21 - Adds consistency group support - 2.0.22 - Update driver to use ABC metaclasses - 2.0.23 - Added update_migrated_volume. bug # 1492023 - 3.0.0 - Rebranded HP to HPE. - 3.0.1 - Python 3 support - 3.0.2 - Remove db access for consistency groups - 3.0.3 - Fix multipath dictionary key error. bug #1522062 - 3.0.4 - Adds v2 managed replication support - 3.0.5 - Adds v2 unmanaged replication support - 3.0.6 - Adding manage/unmanage snapshot support - 3.0.7 - Optimize array ID retrieval - 3.0.8 - Update replication to version 2.1 - 3.0.9 - Use same LUN ID for each VLUN path #1551994 - 3.0.10 - Remove metadata that tracks the instance ID. bug #1572665 - 3.0.11 - _create_3par_iscsi_host() now accepts iscsi_iqn as list only. - Bug #1590180 - 3.0.12 - Added entry point tracing - 3.0.13 - Handling HTTP conflict 409, host WWN/iSCSI name already used - by another host, while creating 3PAR iSCSI Host. bug #1642945 - 3.0.14 - Handle manage and unmanage hosts present. bug #1648067 - 3.0.15 - Adds consistency group capability in generic volume groups. - 3.0.16 - Get host from os-brick connector. bug #1690244 - - """ - - VERSION = "3.0.16" - - # The name of the CI wiki page. - CI_WIKI_NAME = "HPE_Storage_CI" - - def __init__(self, *args, **kwargs): - super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs) - self._active_backend_id = kwargs.get('active_backend_id', None) - self.configuration.append_config_values(hpecommon.hpe3par_opts) - self.configuration.append_config_values(san.san_opts) - - def _init_common(self): - return hpecommon.HPE3PARCommon(self.configuration, - self._active_backend_id) - - def _login(self, timeout=None): - common = self._init_common() - # If replication is enabled and we cannot login, we do not want to - # raise an exception so a failover can still be executed. - try: - common.do_setup(None, timeout=timeout, stats=self._stats) - common.client_login() - except Exception: - if common._replication_enabled: - LOG.warning("The primary array is not reachable at this " - "time. Since replication is enabled, " - "listing replication targets and failing over " - "a volume can still be performed.") - pass - else: - raise - return common - - def _logout(self, common): - # If replication is enabled and we do not have a client ID, we did not - # login, but can still failover. There is no need to logout. - if common.client is None and common._replication_enabled: - return - common.client_logout() - - def _check_flags(self, common): - """Sanity check to ensure we have required options set.""" - required_flags = ['hpe3par_api_url', 'hpe3par_username', - 'hpe3par_password', 'san_ip', 'san_login', - 'san_password'] - common.check_flags(self.configuration, required_flags) - - @utils.trace - def get_volume_stats(self, refresh=False): - common = self._login() - try: - self._stats = common.get_volume_stats( - refresh, - self.get_filter_function(), - self.get_goodness_function()) - self._stats['storage_protocol'] = 'iSCSI' - self._stats['driver_version'] = self.VERSION - backend_name = self.configuration.safe_get('volume_backend_name') - self._stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return self._stats - finally: - self._logout(common) - - def do_setup(self, context): - common = self._init_common() - common.do_setup(context) - self._check_flags(common) - common.check_for_setup_error() - - self.iscsi_ips = {} - common.client_login() - try: - self.initialize_iscsi_ports(common) - finally: - self._logout(common) - - def initialize_iscsi_ports(self, common): - # map iscsi_ip-> ip_port - # -> iqn - # -> nsp - iscsi_ip_list = {} - temp_iscsi_ip = {} - - # use the 3PAR ip_addr list for iSCSI configuration - if len(common._client_conf['hpe3par_iscsi_ips']) > 0: - # add port values to ip_addr, if necessary - for ip_addr in common._client_conf['hpe3par_iscsi_ips']: - ip = ip_addr.split(':') - if len(ip) == 1: - temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT} - elif len(ip) == 2: - temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} - else: - LOG.warning("Invalid IP address format '%s'", ip_addr) - - # add the single value iscsi_ip_address option to the IP dictionary. - # This way we can see if it's a valid iSCSI IP. If it's not valid, - # we won't use it and won't bother to report it, see below - if (common._client_conf['iscsi_ip_address'] not in temp_iscsi_ip): - ip = common._client_conf['iscsi_ip_address'] - ip_port = common._client_conf['iscsi_port'] - temp_iscsi_ip[ip] = {'ip_port': ip_port} - - # get all the valid iSCSI ports from 3PAR - # when found, add the valid iSCSI ip, ip port, iqn and nsp - # to the iSCSI IP dictionary - iscsi_ports = common.get_active_iscsi_target_ports() - - for port in iscsi_ports: - ip = port['IPAddr'] - if ip in temp_iscsi_ip: - ip_port = temp_iscsi_ip[ip]['ip_port'] - iscsi_ip_list[ip] = {'ip_port': ip_port, - 'nsp': port['nsp'], - 'iqn': port['iSCSIName']} - del temp_iscsi_ip[ip] - - # if the single value iscsi_ip_address option is still in the - # temp dictionary it's because it defaults to $my_ip which doesn't - # make sense in this context. So, if present, remove it and move on. - if common._client_conf['iscsi_ip_address'] in temp_iscsi_ip: - del temp_iscsi_ip[common._client_conf['iscsi_ip_address']] - - # lets see if there are invalid iSCSI IPs left in the temp dict - if len(temp_iscsi_ip) > 0: - LOG.warning("Found invalid iSCSI IP address(s) in " - "configuration option(s) hpe3par_iscsi_ips or " - "iscsi_ip_address '%s.'", - (", ".join(temp_iscsi_ip))) - - if not len(iscsi_ip_list) > 0: - msg = _('At least one valid iSCSI IP address must be set.') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list - - def check_for_setup_error(self): - """Setup errors are already checked for in do_setup so return pass.""" - pass - - @utils.trace - def create_volume(self, volume): - common = self._login() - try: - return common.create_volume(volume) - finally: - self._logout(common) - - @utils.trace - def create_cloned_volume(self, volume, src_vref): - """Clone an existing volume.""" - common = self._login() - try: - return common.create_cloned_volume(volume, src_vref) - finally: - self._logout(common) - - @utils.trace - def delete_volume(self, volume): - common = self._login() - try: - common.delete_volume(volume) - finally: - self._logout(common) - - @utils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - TODO: support using the size from the user. - """ - common = self._login() - try: - return common.create_volume_from_snapshot(volume, snapshot) - finally: - self._logout(common) - - @utils.trace - def create_snapshot(self, snapshot): - common = self._login() - try: - common.create_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - def delete_snapshot(self, snapshot): - common = self._login() - try: - common.delete_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - def initialize_connection(self, volume, connector): - """Assigns the volume to a server. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - This driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value: - - .. code-block:: default - - { - 'driver_volume_type': 'iscsi', - 'data': { - 'encrypted': False, - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_protal': '127.0.0.1:3260', - 'volume_id': 1, - } - } - - Steps to export a volume on 3PAR - * Get the 3PAR iSCSI iqn - * Create a host on the 3par - * create vlun on the 3par - """ - common = self._login() - try: - # If the volume has been failed over, we need to reinitialize - # iSCSI ports so they represent the new array. - if volume.get('replication_status') == 'failed-over' and ( - common._client_conf['hpe3par_api_url'] not in self.iscsi_ips): - self.initialize_iscsi_ports(common) - - # Grab the correct iSCSI ports - iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] - - # we have to make sure we have a host - host, username, password = self._create_host( - common, - volume, - connector) - - if connector.get('multipath'): - ready_ports = common.client.getiSCSIPorts( - state=common.client.PORT_STATE_READY) - - target_portals = [] - target_iqns = [] - target_luns = [] - - # Target portal ips are defined in cinder.conf. - target_portal_ips = iscsi_ips.keys() - - # Collect all existing VLUNs for this volume/host combination. - existing_vluns = common.find_existing_vluns(volume, host) - - # Cycle through each ready iSCSI port and determine if a new - # VLUN should be created or an existing one used. - lun_id = None - for port in ready_ports: - iscsi_ip = port['IPAddr'] - if iscsi_ip in target_portal_ips: - vlun = None - # check for an already existing VLUN matching the - # nsp for this iSCSI IP. If one is found, use it - # instead of creating a new VLUN. - for v in existing_vluns: - portPos = common.build_portPos( - iscsi_ips[iscsi_ip]['nsp']) - if v['portPos'] == portPos: - vlun = v - break - else: - vlun = common.create_vlun( - volume, host, iscsi_ips[iscsi_ip]['nsp'], - lun_id=lun_id) - - # We want to use the same LUN ID for every port - if lun_id is None: - lun_id = vlun['lun'] - iscsi_ip_port = "%s:%s" % ( - iscsi_ip, iscsi_ips[iscsi_ip]['ip_port']) - target_portals.append(iscsi_ip_port) - target_iqns.append(port['iSCSIName']) - target_luns.append(vlun['lun']) - else: - LOG.warning("iSCSI IP: '%s' was not found in " - "hpe3par_iscsi_ips list defined in " - "cinder.conf.", iscsi_ip) - - info = {'driver_volume_type': 'iscsi', - 'data': {'target_portals': target_portals, - 'target_iqns': target_iqns, - 'target_luns': target_luns, - 'target_discovered': True - } - } - else: - least_used_nsp = None - - # check if a VLUN already exists for this host - existing_vlun = common.find_existing_vlun(volume, host) - - if existing_vlun: - # We override the nsp here on purpose to force the - # volume to be exported out the same IP as it already is. - # This happens during nova live-migration, we want to - # disable the picking of a different IP that we export - # the volume to, or nova complains. - least_used_nsp = common.build_nsp(existing_vlun['portPos']) - - if not least_used_nsp: - least_used_nsp = self._get_least_used_nsp_for_host( - common, - host['name']) - - vlun = None - if existing_vlun is None: - # now that we have a host, create the VLUN - vlun = common.create_vlun(volume, host, least_used_nsp) - else: - vlun = existing_vlun - - if least_used_nsp is None: - LOG.warning("Least busy iSCSI port not found, " - "using first iSCSI port in list.") - iscsi_ip = list(iscsi_ips)[0] - else: - iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common) - - iscsi_ip_port = iscsi_ips[iscsi_ip]['ip_port'] - iscsi_target_iqn = iscsi_ips[iscsi_ip]['iqn'] - info = {'driver_volume_type': 'iscsi', - 'data': {'target_portal': "%s:%s" % - (iscsi_ip, iscsi_ip_port), - 'target_iqn': iscsi_target_iqn, - 'target_lun': vlun['lun'], - 'target_discovered': True - } - } - - if common._client_conf['hpe3par_iscsi_chap_enabled']: - info['data']['auth_method'] = 'CHAP' - info['data']['auth_username'] = username - info['data']['auth_password'] = password - - encryption_key_id = volume.get('encryption_key_id', None) - info['data']['encrypted'] = encryption_key_id is not None - - return info - finally: - self._logout(common) - - @utils.trace - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - common = self._login() - try: - hostname = common._safe_hostname(connector['host']) - common.terminate_connection( - volume, - hostname, - iqn=connector['initiator']) - self._clear_chap_3par(common, volume) - finally: - self._logout(common) - - def _clear_chap_3par(self, common, volume): - """Clears CHAP credentials on a 3par volume. - - Ignore exceptions caused by the keys not being present on a volume. - """ - vol_name = common._get_3par_vol_name(volume['id']) - - try: - common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY) - except hpeexceptions.HTTPNotFound: - pass - except Exception: - raise - - try: - common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY) - except hpeexceptions.HTTPNotFound: - pass - except Exception: - raise - - def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain, - persona_id): - """Create a 3PAR host. - - Create a 3PAR host, if there is already a host on the 3par using - the same iqn but with a different hostname, return the hostname - used by 3PAR. - """ - # first search for an existing host - host_found = None - - hosts = common.client.queryHost(iqns=iscsi_iqn) - - if hosts and hosts['members'] and 'name' in hosts['members'][0]: - host_found = hosts['members'][0]['name'] - - if host_found is not None: - return host_found - else: - persona_id = int(persona_id) - try: - common.client.createHost(hostname, iscsiNames=iscsi_iqn, - optional={'domain': domain, - 'persona': persona_id}) - except hpeexceptions.HTTPConflict as path_conflict: - msg = "Create iSCSI host caught HTTP conflict code: %s" - with save_and_reraise_exception(reraise=False) as ctxt: - if path_conflict.get_code() is EXISTENT_PATH: - # Handle exception : EXISTENT_PATH - host WWN/iSCSI - # name already used by another host - hosts = common.client.queryHost(iqns=iscsi_iqn) - if hosts and hosts['members'] and ( - 'name' in hosts['members'][0]): - hostname = hosts['members'][0]['name'] - else: - # re-raise last caught exception - ctxt.reraise = True - LOG.exception(msg, path_conflict.get_code()) - else: - # re-raise last caught exception - # for other HTTP conflict - ctxt.reraise = True - LOG.exception(msg, path_conflict.get_code()) - return hostname - - def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn): - mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, - 'iSCSINames': [iscsi_iqn]} - - common.client.modifyHost(hostname, mod_request) - - def _set_3par_chaps(self, common, hostname, volume, username, password): - """Sets a 3PAR host's CHAP credentials.""" - if not common._client_conf['hpe3par_iscsi_chap_enabled']: - return - - mod_request = {'chapOperation': common.client.HOST_EDIT_ADD, - 'chapOperationMode': common.client.CHAP_INITIATOR, - 'chapName': username, - 'chapSecret': password} - common.client.modifyHost(hostname, mod_request) - - def _create_host(self, common, volume, connector): - """Creates or modifies existing 3PAR host.""" - # make sure we don't have the host already - host = None - username = None - password = None - hostname = common._safe_hostname(connector['host']) - cpg = common.get_cpg(volume, allowSnap=True) - domain = common.get_domain(cpg) - - # Get the CHAP secret if CHAP is enabled - if common._client_conf['hpe3par_iscsi_chap_enabled']: - vol_name = common._get_3par_vol_name(volume['id']) - username = common.client.getVolumeMetaData( - vol_name, CHAP_USER_KEY)['value'] - password = common.client.getVolumeMetaData( - vol_name, CHAP_PASS_KEY)['value'] - - try: - host = common._get_3par_host(hostname) - # Check whether host with iqn of initiator present on 3par - hosts = common.client.queryHost(iqns=[connector['initiator']]) - host, hostname = common._get_prioritized_host_on_3par(host, - hosts, - hostname) - except hpeexceptions.HTTPNotFound: - # get persona from the volume type extra specs - persona_id = common.get_persona_type(volume) - # host doesn't exist, we have to create it - hostname = self._create_3par_iscsi_host(common, - hostname, - [connector['initiator']], - domain, - persona_id) - self._set_3par_chaps(common, hostname, volume, username, password) - host = common._get_3par_host(hostname) - else: - if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1: - self._modify_3par_iscsi_host( - common, hostname, - connector['initiator']) - self._set_3par_chaps( - common, - hostname, - volume, - username, - password) - host = common._get_3par_host(hostname) - elif (not host['initiatorChapEnabled'] and - common._client_conf['hpe3par_iscsi_chap_enabled']): - LOG.warning("Host exists without CHAP credentials set and " - "has iSCSI attachments but CHAP is enabled. " - "Updating host with new CHAP credentials.") - self._set_3par_chaps( - common, - hostname, - volume, - username, - password) - - return host, username, password - - def _do_export(self, common, volume, connector): - """Gets the associated account, generates CHAP info and updates.""" - model_update = {} - - if not common._client_conf['hpe3par_iscsi_chap_enabled']: - model_update['provider_auth'] = None - return model_update - - # CHAP username will be the hostname - chap_username = connector['host'] - - chap_password = None - try: - # Get all active VLUNs for the host - vluns = common.client.getHostVLUNs(chap_username) - - # Host has active VLUNs... is CHAP enabled on host? - host_info = common.client.getHost(chap_username) - - if not host_info['initiatorChapEnabled']: - LOG.warning("Host has no CHAP key, but CHAP is enabled.") - - except hpeexceptions.HTTPNotFound: - chap_password = volume_utils.generate_password(16) - LOG.warning("No host or VLUNs exist. Generating new " - "CHAP key.") - else: - # Get a list of all iSCSI VLUNs and see if there is already a CHAP - # key assigned to one of them. Use that CHAP key if present, - # otherwise create a new one. Skip any VLUNs that are missing - # CHAP credentials in metadata. - chap_exists = False - active_vluns = 0 - - for vlun in vluns: - if not vlun['active']: - continue - - active_vluns += 1 - - # iSCSI connections start with 'iqn'. - if ('remoteName' in vlun and - re.match('iqn.*', vlun['remoteName'])): - try: - chap_password = common.client.getVolumeMetaData( - vlun['volumeName'], CHAP_PASS_KEY)['value'] - chap_exists = True - break - except hpeexceptions.HTTPNotFound: - LOG.debug("The VLUN %s is missing CHAP credentials " - "but CHAP is enabled. Skipping.", - vlun['remoteName']) - else: - LOG.warning("Non-iSCSI VLUN detected.") - - if not chap_exists: - chap_password = volume_utils.generate_password(16) - LOG.warning("No VLUN contained CHAP credentials. " - "Generating new CHAP key.") - - # Add CHAP credentials to the volume metadata - vol_name = common._get_3par_vol_name(volume['id']) - common.client.setVolumeMetaData( - vol_name, CHAP_USER_KEY, chap_username) - common.client.setVolumeMetaData( - vol_name, CHAP_PASS_KEY, chap_password) - - model_update['provider_auth'] = ('CHAP %s %s' % - (chap_username, chap_password)) - - return model_update - - @utils.trace - def create_export(self, context, volume, connector): - common = self._login() - try: - return self._do_export(common, volume, connector) - finally: - self._logout(common) - - @utils.trace - def ensure_export(self, context, volume): - """Ensure the volume still exists on the 3PAR. - - Also retrieves CHAP credentials, if present on the volume - """ - common = self._login() - try: - vol_name = common._get_3par_vol_name(volume['id']) - common.client.getVolume(vol_name) - except hpeexceptions.HTTPNotFound: - LOG.error("Volume %s doesn't exist on array.", vol_name) - else: - metadata = common.client.getAllVolumeMetaData(vol_name) - - username = None - password = None - model_update = {} - model_update['provider_auth'] = None - - for member in metadata['members']: - if member['key'] == CHAP_USER_KEY: - username = member['value'] - elif member['key'] == CHAP_PASS_KEY: - password = member['value'] - - if username and password: - model_update['provider_auth'] = ('CHAP %s %s' % - (username, password)) - - return model_update - finally: - self._logout(common) - - def remove_export(self, context, volume): - pass - - def _get_least_used_nsp_for_host(self, common, hostname): - """Get the least used NSP for the current host. - - Steps to determine which NSP to use. - * If only one iSCSI NSP, return it - * If there is already an active vlun to this host, return its NSP - * Return NSP with fewest active vluns - """ - - iscsi_nsps = self._get_iscsi_nsps(common) - # If there's only one path, use it - if len(iscsi_nsps) == 1: - return iscsi_nsps[0] - - # Try to reuse an existing iscsi path to the host - vluns = common.client.getVLUNs() - for vlun in vluns['members']: - if vlun['active']: - if vlun['hostname'] == hostname: - temp_nsp = common.build_nsp(vlun['portPos']) - if temp_nsp in iscsi_nsps: - # this host already has an iscsi path, so use it - return temp_nsp - - # Calculate the least used iscsi nsp - least_used_nsp = self._get_least_used_nsp(common, - vluns['members'], - self._get_iscsi_nsps(common)) - return least_used_nsp - - def _get_iscsi_nsps(self, common): - """Return the list of candidate nsps.""" - nsps = [] - iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] - for value in iscsi_ips.values(): - nsps.append(value['nsp']) - return nsps - - def _get_ip_using_nsp(self, nsp, common): - """Return IP associated with given nsp.""" - iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] - for (key, value) in iscsi_ips.items(): - if value['nsp'] == nsp: - return key - - def _get_least_used_nsp(self, common, vluns, nspss): - """Return the nsp that has the fewest active vluns.""" - # return only the nsp (node:server:port) - # count the number of nsps - nsp_counts = {} - for nsp in nspss: - # initialize counts to zero - nsp_counts[nsp] = 0 - - current_least_used_nsp = None - - for vlun in vluns: - if vlun['active']: - nsp = common.build_nsp(vlun['portPos']) - if nsp in nsp_counts: - nsp_counts[nsp] = nsp_counts[nsp] + 1 - - # identify key (nsp) of least used nsp - current_smallest_count = sys.maxsize - for (nsp, count) in nsp_counts.items(): - if count < current_smallest_count: - current_least_used_nsp = nsp - current_smallest_count = count - return current_least_used_nsp - - @utils.trace - def extend_volume(self, volume, new_size): - common = self._login() - try: - common.extend_volume(volume, new_size) - finally: - self._logout(common) - - @utils.trace - def create_group(self, context, group): - common = self._login() - try: - common.create_group(context, group) - finally: - self._logout(common) - - @utils.trace - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - common = self._login() - try: - return common.create_group_from_src( - context, group, volumes, group_snapshot, snapshots, - source_group, source_vols) - finally: - self._logout(common) - - @utils.trace - def delete_group(self, context, group, volumes): - common = self._login() - try: - return common.delete_group(context, group, volumes) - finally: - self._logout(common) - - @utils.trace - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - common = self._login() - try: - return common.update_group(context, group, add_volumes, - remove_volumes) - finally: - self._logout(common) - - @utils.trace - def create_group_snapshot(self, context, group_snapshot, snapshots): - common = self._login() - try: - return common.create_group_snapshot(context, group_snapshot, - snapshots) - finally: - self._logout(common) - - @utils.trace - def delete_group_snapshot(self, context, group_snapshot, snapshots): - common = self._login() - try: - return common.delete_group_snapshot(context, group_snapshot, - snapshots) - finally: - self._logout(common) - - @utils.trace - def manage_existing(self, volume, existing_ref): - common = self._login() - try: - return common.manage_existing(volume, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_snapshot(self, snapshot, existing_ref): - common = self._login() - try: - return common.manage_existing_snapshot(snapshot, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_get_size(self, volume, existing_ref): - common = self._login() - try: - return common.manage_existing_get_size(volume, existing_ref) - finally: - self._logout(common) - - @utils.trace - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - common = self._login() - try: - return common.manage_existing_snapshot_get_size(snapshot, - existing_ref) - finally: - self._logout(common) - - @utils.trace - def unmanage(self, volume): - common = self._login() - try: - common.unmanage(volume) - finally: - self._logout(common) - - @utils.trace - def unmanage_snapshot(self, snapshot): - common = self._login() - try: - common.unmanage_snapshot(snapshot) - finally: - self._logout(common) - - @utils.trace - def retype(self, context, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - common = self._login() - try: - return common.retype(volume, new_type, diff, host) - finally: - self._logout(common) - - @utils.trace - def migrate_volume(self, context, volume, host): - if volume['status'] == 'in-use': - protocol = host['capabilities']['storage_protocol'] - if protocol != 'iSCSI': - LOG.debug("3PAR ISCSI driver cannot migrate in-use volume " - "to a host with storage_protocol=%s.", protocol) - return False, None - - common = self._login() - try: - return common.migrate_volume(volume, host) - finally: - self._logout(common) - - @utils.trace - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status): - """Update the name of the migrated volume to it's new ID.""" - common = self._login() - try: - return common.update_migrated_volume(context, volume, new_volume, - original_volume_status) - finally: - self._logout(common) - - @utils.trace - def get_pool(self, volume): - common = self._login() - try: - return common.get_cpg(volume) - except hpeexceptions.HTTPNotFound: - reason = (_("Volume %s doesn't exist on array.") % volume) - LOG.error(reason) - raise exception.InvalidVolume(reason) - finally: - self._logout(common) - - @utils.trace - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Force failover to a secondary replication target.""" - common = self._login(timeout=30) - try: - # Update the active_backend_id in the driver and return it. - active_backend_id, volume_updates = common.failover_host( - context, volumes, secondary_id) - self._active_backend_id = active_backend_id - return active_backend_id, volume_updates, [] - finally: - self._logout(common) diff --git a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py b/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py deleted file mode 100644 index 3c56c68c9..000000000 --- a/cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py +++ /dev/null @@ -1,1996 +0,0 @@ -# (c) Copyright 2014-2016 Hewlett Packard Enterprise Development LP -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -"""HPE LeftHand SAN ISCSI REST Proxy. - -Volume driver for HPE LeftHand Storage array. -This driver requires 11.5 or greater firmware on the LeftHand array, using -the 2.0 or greater version of the hpelefthandclient. - -You will need to install the python hpelefthandclient module. -sudo pip install python-lefthandclient - -Set the following in the cinder.conf file to enable the -LeftHand iSCSI REST Driver along with the required flags: - -volume_driver=cinder.volume.drivers.hpe.hpe_lefthand_iscsi. - HPELeftHandISCSIDriver - -It also requires the setting of hpelefthand_api_url, hpelefthand_username, -hpelefthand_password for credentials to talk to the REST service on the -LeftHand array. - -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import utils -from cinder.volume import volume_types - -import math -import re -import six - -LOG = logging.getLogger(__name__) - -hpelefthandclient = importutils.try_import("hpelefthandclient") -if hpelefthandclient: - from hpelefthandclient import client as hpe_lh_client - from hpelefthandclient import exceptions as hpeexceptions - -hpelefthand_opts = [ - cfg.URIOpt('hpelefthand_api_url', - default=None, - help="HPE LeftHand WSAPI Server Url like " - "https://:8081/lhos", - deprecated_name='hplefthand_api_url'), - cfg.StrOpt('hpelefthand_username', - default=None, - help="HPE LeftHand Super user username", - deprecated_name='hplefthand_username'), - cfg.StrOpt('hpelefthand_password', - default=None, - help="HPE LeftHand Super user password", - secret=True, - deprecated_name='hplefthand_password'), - cfg.StrOpt('hpelefthand_clustername', - default=None, - help="HPE LeftHand cluster name", - deprecated_name='hplefthand_clustername'), - cfg.BoolOpt('hpelefthand_iscsi_chap_enabled', - default=False, - help='Configure CHAP authentication for iSCSI connections ' - '(Default: Disabled)', - deprecated_name='hplefthand_iscsi_chap_enabled'), - cfg.BoolOpt('hpelefthand_debug', - default=False, - help="Enable HTTP debugging to LeftHand", - deprecated_name='hplefthand_debug'), - cfg.PortOpt('hpelefthand_ssh_port', - default=16022, - help="Port number of SSH service."), - -] - -CONF = cfg.CONF -CONF.register_opts(hpelefthand_opts, group=configuration.SHARED_CONF_GROUP) - -MIN_API_VERSION = "1.1" -MIN_CLIENT_VERSION = '2.1.0' - -# map the extra spec key to the REST client option key -extra_specs_key_map = { - 'hpelh:provisioning': 'isThinProvisioned', - 'hpelh:ao': 'isAdaptiveOptimizationEnabled', - 'hpelh:data_pl': 'dataProtectionLevel', - 'hplh:provisioning': 'isThinProvisioned', - 'hplh:ao': 'isAdaptiveOptimizationEnabled', - 'hplh:data_pl': 'dataProtectionLevel', -} - -# map the extra spec value to the REST client option value -extra_specs_value_map = { - 'isThinProvisioned': {'thin': True, 'full': False}, - 'isAdaptiveOptimizationEnabled': {'true': True, 'false': False}, - 'dataProtectionLevel': { - 'r-0': 0, 'r-5': 1, 'r-10-2': 2, 'r-10-3': 3, 'r-10-4': 4, 'r-6': 5} -} - - -@interface.volumedriver -class HPELeftHandISCSIDriver(driver.ISCSIDriver): - """Executes REST commands relating to HPE/LeftHand SAN ISCSI volumes. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial REST iSCSI proxy - 1.0.1 - Added support for retype - 1.0.2 - Added support for volume migrate - 1.0.3 - Fixed bug #1285829, HP LeftHand backend assisted migration - should check for snapshots - 1.0.4 - Fixed bug #1285925, LeftHand AO volume create performance - improvement - 1.0.5 - Fixed bug #1311350, Live-migration of an instance when - attached to a volume was causing an error. - 1.0.6 - Removing locks bug #1395953 - 1.0.7 - Fixed bug #1353137, Server was not removed from the HP - Lefthand backend after the last volume was detached. - 1.0.8 - Fixed bug #1418201, A cloned volume fails to attach. - 1.0.9 - Adding support for manage/unmanage. - 1.0.10 - Add stats for goodness_function and filter_function - 1.0.11 - Add over subscription support - 1.0.12 - Adds consistency group support - 1.0.13 - Added update_migrated_volume #1493546 - 1.0.14 - Removed the old CLIQ based driver - 2.0.0 - Rebranded HP to HPE - 2.0.1 - Remove db access for consistency groups - 2.0.2 - Adds v2 managed replication support - 2.0.3 - Adds v2 unmanaged replication support - 2.0.4 - Add manage/unmanage snapshot support - 2.0.5 - Changed minimum client version to be 2.1.0 - 2.0.6 - Update replication to version 2.1 - 2.0.7 - Fixed bug #1554746, Create clone volume with new size. - 2.0.8 - Add defaults for creating a replication client, bug #1556331 - 2.0.9 - Fix terminate connection on failover - 2.0.10 - Add entry point tracing - 2.0.11 - Fix extend volume if larger than snapshot bug #1560654 - 2.0.12 - add CG capability to generic volume groups. - """ - - VERSION = "2.0.12" - - CI_WIKI_NAME = "HPE_Storage_CI" - - device_stats = {} - - # v2 replication constants - EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period" - EXTRA_SPEC_REP_RETENTION_COUNT = "replication:retention_count" - EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT = ( - "replication:remote_retention_count") - MIN_REP_SYNC_PERIOD = 1800 - DEFAULT_RETENTION_COUNT = 5 - MAX_RETENTION_COUNT = 50 - DEFAULT_REMOTE_RETENTION_COUNT = 5 - MAX_REMOTE_RETENTION_COUNT = 50 - REP_SNAPSHOT_SUFFIX = "_SS" - REP_SCHEDULE_SUFFIX = "_SCHED" - FAILBACK_VALUE = 'default' - - def __init__(self, *args, **kwargs): - super(HPELeftHandISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(hpelefthand_opts) - self.configuration.append_config_values(san.san_opts) - if not self.configuration.hpelefthand_api_url: - raise exception.NotFound(_("HPELeftHand url not found")) - - # blank is the only invalid character for cluster names - # so we need to use it as a separator - self.DRIVER_LOCATION = self.__class__.__name__ + ' %(cluster)s %(vip)s' - self._client_conf = {} - self._replication_targets = [] - self._replication_enabled = False - self._active_backend_id = kwargs.get('active_backend_id', None) - - def _login(self, timeout=None): - conf = self._get_lefthand_config() - if conf: - self._client_conf['hpelefthand_username'] = ( - conf['hpelefthand_username']) - self._client_conf['hpelefthand_password'] = ( - conf['hpelefthand_password']) - self._client_conf['hpelefthand_clustername'] = ( - conf['hpelefthand_clustername']) - self._client_conf['hpelefthand_api_url'] = ( - conf['hpelefthand_api_url']) - self._client_conf['hpelefthand_ssh_port'] = ( - conf['hpelefthand_ssh_port']) - self._client_conf['hpelefthand_iscsi_chap_enabled'] = ( - conf['hpelefthand_iscsi_chap_enabled']) - self._client_conf['ssh_conn_timeout'] = conf['ssh_conn_timeout'] - self._client_conf['san_private_key'] = conf['san_private_key'] - else: - self._client_conf['hpelefthand_username'] = ( - self.configuration.hpelefthand_username) - self._client_conf['hpelefthand_password'] = ( - self.configuration.hpelefthand_password) - self._client_conf['hpelefthand_clustername'] = ( - self.configuration.hpelefthand_clustername) - self._client_conf['hpelefthand_api_url'] = ( - self.configuration.hpelefthand_api_url) - self._client_conf['hpelefthand_ssh_port'] = ( - self.configuration.hpelefthand_ssh_port) - self._client_conf['hpelefthand_iscsi_chap_enabled'] = ( - self.configuration.hpelefthand_iscsi_chap_enabled) - self._client_conf['ssh_conn_timeout'] = ( - self.configuration.ssh_conn_timeout) - self._client_conf['san_private_key'] = ( - self.configuration.san_private_key) - - client = self._create_client(timeout=timeout) - try: - if self.configuration.hpelefthand_debug: - client.debug_rest(True) - - client.login( - self._client_conf['hpelefthand_username'], - self._client_conf['hpelefthand_password']) - - cluster_info = client.getClusterByName( - self._client_conf['hpelefthand_clustername']) - self.cluster_id = cluster_info['id'] - virtual_ips = cluster_info['virtualIPAddresses'] - self.cluster_vip = virtual_ips[0]['ipV4Address'] - - # Extract IP address from API URL - ssh_ip = self._extract_ip_from_url( - self._client_conf['hpelefthand_api_url']) - known_hosts_file = CONF.ssh_hosts_key_file - policy = "AutoAddPolicy" - if CONF.strict_ssh_host_key_policy: - policy = "RejectPolicy" - client.setSSHOptions( - ssh_ip, - self._client_conf['hpelefthand_username'], - self._client_conf['hpelefthand_password'], - port=self._client_conf['hpelefthand_ssh_port'], - conn_timeout=self._client_conf['ssh_conn_timeout'], - privatekey=self._client_conf['san_private_key'], - missing_key_policy=policy, - known_hosts_file=known_hosts_file) - - return client - except hpeexceptions.HTTPNotFound: - raise exception.DriverNotInitialized( - _('LeftHand cluster not found')) - except Exception as ex: - raise exception.DriverNotInitialized(ex) - - def _logout(self, client): - if client is not None: - client.logout() - - def _create_client(self, timeout=None): - # Timeout is only supported in version 2.0.1 and greater of the - # python-lefthandclient. - hpelefthand_api_url = self._client_conf['hpelefthand_api_url'] - client = hpe_lh_client.HPELeftHandClient( - hpelefthand_api_url, timeout=timeout) - return client - - def _create_replication_client(self, remote_array): - cl = hpe_lh_client.HPELeftHandClient( - remote_array['hpelefthand_api_url']) - try: - cl.login( - remote_array['hpelefthand_username'], - remote_array['hpelefthand_password']) - - ssh_conn_timeout = remote_array.get('ssh_conn_timeout', 30) - san_private_key = remote_array.get('san_private_key', '') - - # Extract IP address from API URL - ssh_ip = self._extract_ip_from_url( - remote_array['hpelefthand_api_url']) - known_hosts_file = CONF.ssh_hosts_key_file - policy = "AutoAddPolicy" - if CONF.strict_ssh_host_key_policy: - policy = "RejectPolicy" - cl.setSSHOptions( - ssh_ip, - remote_array['hpelefthand_username'], - remote_array['hpelefthand_password'], - port=remote_array['hpelefthand_ssh_port'], - conn_timeout=ssh_conn_timeout, - privatekey=san_private_key, - missing_key_policy=policy, - known_hosts_file=known_hosts_file) - - return cl - except hpeexceptions.HTTPNotFound: - raise exception.DriverNotInitialized( - _('LeftHand cluster not found')) - except Exception as ex: - raise exception.DriverNotInitialized(ex) - - def _destroy_replication_client(self, client): - if client is not None: - client.logout() - - def _extract_ip_from_url(self, url): - result = re.search("://(.*):", url) - ip = result.group(1) - return ip - - def do_setup(self, context): - """Set up LeftHand client.""" - if not hpelefthandclient: - # Checks if client was successfully imported - ex_msg = _("HPELeftHand client is not installed. Please" - " install using 'pip install " - "python-lefthandclient'.") - LOG.error(ex_msg) - raise exception.VolumeDriverException(ex_msg) - - if hpelefthandclient.version < MIN_CLIENT_VERSION: - ex_msg = (_("Invalid hpelefthandclient version found (" - "%(found)s). Version %(minimum)s or greater " - "required. Run 'pip install --upgrade " - "python-lefthandclient' to upgrade the " - "hpelefthandclient.") - % {'found': hpelefthandclient.version, - 'minimum': MIN_CLIENT_VERSION}) - LOG.error(ex_msg) - raise exception.InvalidInput(reason=ex_msg) - - self._do_replication_setup() - - def check_for_setup_error(self): - """Checks for incorrect LeftHand API being used on backend.""" - client = self._login() - try: - self.api_version = client.getApiVersion() - - LOG.info("HPELeftHand API version %s", self.api_version) - - if self.api_version < MIN_API_VERSION: - LOG.warning("HPELeftHand API is version %(current)s. " - "A minimum version of %(min)s is needed for " - "manage/unmanage support.", - {'current': self.api_version, - 'min': MIN_API_VERSION}) - finally: - self._logout(client) - - def check_replication_flags(self, options, required_flags): - for flag in required_flags: - if not options.get(flag, None): - msg = _('%s is not set and is required for the replication ' - 'device to be valid.') % flag - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - def get_version_string(self): - return (_('REST %(proxy_ver)s hpelefthandclient %(rest_ver)s') % { - 'proxy_ver': self.VERSION, - 'rest_ver': hpelefthandclient.get_version_string()}) - - @cinder_utils.trace - def create_volume(self, volume): - """Creates a volume.""" - client = self._login() - try: - # get the extra specs of interest from this volume's volume type - volume_extra_specs = self._get_volume_extra_specs(volume) - extra_specs = self._get_lh_extra_specs( - volume_extra_specs, - extra_specs_key_map.keys()) - - # map the extra specs key/value pairs to key/value pairs - # used as optional configuration values by the LeftHand backend - optional = self._map_extra_specs(extra_specs) - - # if provisioning is not set, default to thin - if 'isThinProvisioned' not in optional: - optional['isThinProvisioned'] = True - - # AdaptiveOptimization defaults to 'true' if you don't specify the - # value on a create, and that is the most efficient way to create - # a volume. If you pass in 'false' or 'true' for AO, it will result - # in an update operation following the create operation to set this - # value, so it is best to not specify the value and let it default - # to 'true'. - if optional.get('isAdaptiveOptimizationEnabled'): - del optional['isAdaptiveOptimizationEnabled'] - - clusterName = self._client_conf['hpelefthand_clustername'] - optional['clusterName'] = clusterName - - volume_info = client.createVolume( - volume['name'], self.cluster_id, - volume['size'] * units.Gi, - optional) - - model_update = self._update_provider(volume_info) - - # v2 replication check - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume, client, optional)): - model_update['replication_status'] = 'enabled' - model_update['replication_driver_data'] = (json.dumps( - {'location': self._client_conf['hpelefthand_api_url']})) - - return model_update - except Exception as ex: - raise exception.VolumeBackendAPIException(data=ex) - finally: - self._logout(client) - - @cinder_utils.trace - def delete_volume(self, volume): - """Deletes a volume.""" - client = self._login() - # v2 replication check - # If the volume type is replication enabled, we want to call our own - # method of deconstructing the volume and its dependencies - if self._volume_of_replicated_type(volume): - self._do_volume_replication_destroy(volume, client) - return - - try: - volume_info = client.getVolumeByName(volume['name']) - client.deleteVolume(volume_info['id']) - except hpeexceptions.HTTPNotFound: - LOG.error("Volume did not exist. It will not be deleted") - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def extend_volume(self, volume, new_size): - """Extend the size of an existing volume.""" - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - - # convert GB to bytes - options = {'size': int(new_size) * units.Gi} - client.modifyVolume(volume_info['id'], options) - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def create_group(self, context, group): - """Creates a group.""" - LOG.debug("Creating group.") - if not utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - for vol_type_id in group.volume_type_ids: - replication_type = self._volume_of_replicated_type( - None, vol_type_id) - if replication_type: - # An unsupported configuration - LOG.error('Unable to create group: create group with ' - 'replication volume type is not supported.') - model_update = {'status': fields.GroupStatus.ERROR} - return model_update - - return {'status': fields.GroupStatus.AVAILABLE} - - @cinder_utils.trace - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from a source""" - msg = _("Creating a group from a source is not " - "supported when consistent_group_snapshot_enabled to true.") - if not utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - else: - raise exception.VolumeBackendAPIException(data=msg) - - @cinder_utils.trace - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if not utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - volume_model_updates = [] - for volume in volumes: - volume_update = {'id': volume.id} - try: - self.delete_volume(volume) - volume_update['status'] = 'deleted' - except Exception as ex: - LOG.error("There was an error deleting volume %(id)s: " - "%(error)s.", - {'id': volume.id, - 'error': ex}) - volume_update['status'] = 'error' - volume_model_updates.append(volume_update) - - model_update = {'status': group.status} - - return model_update, volume_model_updates - - @cinder_utils.trace - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates a group. - - Because the backend has no concept of volume grouping, cinder will - maintain all volume/group relationships. Because of this - functionality, there is no need to make any client calls; instead - simply returning out of this function allows cinder to properly - add/remove volumes from the group. - """ - LOG.debug("Updating group.") - if not utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - - return None, None, None - - @cinder_utils.trace - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - if not utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - client = self._login() - try: - snap_set = [] - snapshot_base_name = "snapshot-" + group_snapshot.id - snapshot_model_updates = [] - for i, snapshot in enumerate(snapshots): - volume = snapshot.volume - volume_name = volume['name'] - try: - volume_info = client.getVolumeByName(volume_name) - except Exception as ex: - error = six.text_type(ex) - LOG.error("Could not find volume with name %(name)s. " - "Error: %(error)s", - {'name': volume_name, - 'error': error}) - raise exception.VolumeBackendAPIException(data=error) - - volume_id = volume_info['id'] - snapshot_name = snapshot_base_name + "-" + six.text_type(i) - snap_set_member = {'volumeName': volume_name, - 'volumeId': volume_id, - 'snapshotName': snapshot_name} - snap_set.append(snap_set_member) - snapshot_update = {'id': snapshot['id'], - 'status': fields.SnapshotStatus.AVAILABLE} - snapshot_model_updates.append(snapshot_update) - - source_volume_id = snap_set[0]['volumeId'] - optional = {'inheritAccess': True} - description = group_snapshot.description - if description: - optional['description'] = description - - try: - client.createSnapshotSet(source_volume_id, snap_set, optional) - except Exception as ex: - error = six.text_type(ex) - LOG.error("Could not create snapshot set. Error: '%s'", - error) - raise exception.VolumeBackendAPIException( - data=error) - - except Exception as ex: - raise exception.VolumeBackendAPIException(data=six.text_type(ex)) - finally: - self._logout(client) - - model_update = {'status': 'available'} - - return model_update, snapshot_model_updates - - @cinder_utils.trace - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - if not utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - client = self._login() - snap_name_base = "snapshot-" + group_snapshot.id - - snapshot_model_updates = [] - for i, snapshot in enumerate(snapshots): - snapshot_update = {'id': snapshot['id']} - try: - snap_name = snap_name_base + "-" + six.text_type(i) - snap_info = client.getSnapshotByName(snap_name) - client.deleteSnapshot(snap_info['id']) - snapshot_update['status'] = fields.SnapshotStatus.DELETED - except hpeexceptions.HTTPServerError as ex: - in_use_msg = ('cannot be deleted because it is a clone ' - 'point') - if in_use_msg in ex.get_description(): - LOG.error("The snapshot cannot be deleted because " - "it is a clone point.") - snapshot_update['status'] = fields.SnapshotStatus.ERROR - except Exception as ex: - LOG.error("There was an error deleting snapshot %(id)s: " - "%(error)s.", - {'id': snapshot['id'], - 'error': six.text_type(ex)}) - snapshot_update['status'] = fields.SnapshotStatus.ERROR - snapshot_model_updates.append(snapshot_update) - - self._logout(client) - - model_update = {'status': group_snapshot.status} - - return model_update, snapshot_model_updates - - @cinder_utils.trace - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - client = self._login() - try: - volume_info = client.getVolumeByName(snapshot['volume_name']) - - option = {'inheritAccess': True} - client.createSnapshot(snapshot['name'], - volume_info['id'], - option) - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - client = self._login() - try: - snap_info = client.getSnapshotByName(snapshot['name']) - client.deleteSnapshot(snap_info['id']) - except hpeexceptions.HTTPNotFound: - LOG.error("Snapshot did not exist. It will not be deleted") - except hpeexceptions.HTTPServerError as ex: - in_use_msg = 'cannot be deleted because it is a clone point' - if in_use_msg in ex.get_description(): - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - - raise exception.VolumeBackendAPIException(ex) - - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def get_volume_stats(self, refresh=False): - """Gets volume stats.""" - client = self._login() - try: - if refresh: - self._update_backend_status(client) - - return self.device_stats - finally: - self._logout(client) - - def _update_backend_status(self, client): - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['driver_version'] = self.VERSION - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['reserved_percentage'] = ( - self.configuration.safe_get('reserved_percentage')) - data['storage_protocol'] = 'iSCSI' - data['vendor_name'] = 'Hewlett Packard Enterprise' - data['location_info'] = (self.DRIVER_LOCATION % { - 'cluster': self._client_conf['hpelefthand_clustername'], - 'vip': self.cluster_vip}) - data['thin_provisioning_support'] = True - data['thick_provisioning_support'] = True - data['max_over_subscription_ratio'] = ( - self.configuration.safe_get('max_over_subscription_ratio')) - - cluster_info = client.getCluster(self.cluster_id) - - total_capacity = cluster_info['spaceTotal'] - free_capacity = cluster_info['spaceAvailable'] - - # convert to GB - data['total_capacity_gb'] = int(total_capacity) / units.Gi - data['free_capacity_gb'] = int(free_capacity) / units.Gi - - # Collect some stats - capacity_utilization = ( - (float(total_capacity - free_capacity) / - float(total_capacity)) * 100) - # Don't have a better way to get the total number volumes - # so try to limit the size of data for now. Once new lefthand API is - # available, replace this call. - total_volumes = 0 - provisioned_size = 0 - volumes = client.getVolumes( - cluster=self._client_conf['hpelefthand_clustername'], - fields=['members[id]', 'members[clusterName]', 'members[size]']) - if volumes: - total_volumes = volumes['total'] - provisioned_size = sum( - members['size'] for members in volumes['members']) - data['provisioned_capacity_gb'] = int(provisioned_size) / units.Gi - data['capacity_utilization'] = capacity_utilization - data['total_volumes'] = total_volumes - data['filter_function'] = self.get_filter_function() - data['goodness_function'] = self.get_goodness_function() - data['consistent_group_snapshot_enabled'] = True - data['replication_enabled'] = self._replication_enabled - data['replication_type'] = ['periodic'] - data['replication_count'] = len(self._replication_targets) - data['replication_targets'] = self._get_replication_targets() - - self.device_stats = data - - @cinder_utils.trace - def initialize_connection(self, volume, connector): - """Assigns the volume to a server. - - Assign any created volume to a compute node/host so that it can be - used from that host. HPE VSA requires a volume to be assigned - to a server. - """ - client = self._login() - try: - server_info = self._create_server(connector, client) - volume_info = client.getVolumeByName(volume['name']) - - access_already_enabled = False - if volume_info['iscsiSessions'] is not None: - # Extract the server id for each session to check if the - # new server already has access permissions enabled. - for session in volume_info['iscsiSessions']: - server_id = int(session['server']['uri'].split('/')[3]) - if server_id == server_info['id']: - access_already_enabled = True - break - - if not access_already_enabled: - client.addServerAccess( - volume_info['id'], - server_info['id']) - - iscsi_properties = self._get_iscsi_properties(volume) - - if ('chapAuthenticationRequired' in server_info and - server_info['chapAuthenticationRequired']): - iscsi_properties['auth_method'] = 'CHAP' - iscsi_properties['auth_username'] = connector['initiator'] - iscsi_properties['auth_password'] = ( - server_info['chapTargetSecret']) - - return {'driver_volume_type': 'iscsi', 'data': iscsi_properties} - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def terminate_connection(self, volume, connector, **kwargs): - """Unassign the volume from the host.""" - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - server_info = client.getServerByName(connector['host']) - volume_list = client.findServerVolumes(server_info['name']) - - removeServer = True - for entry in volume_list: - if entry['id'] != volume_info['id']: - removeServer = False - break - - client.removeServerAccess( - volume_info['id'], - server_info['id']) - - if removeServer: - client.deleteServer(server_info['id']) - except hpeexceptions.HTTPNotFound as ex: - # If a host is failed-over, we want to allow the detach to - # to 'succeed' when it cannot find the host. We can simply - # return out of the terminate connection in order for things - # to be updated correctly. - if self._active_backend_id: - LOG.warning("Because the host is currently in a " - "failed-over state, the volume will not " - "be properly detached from the primary " - "array. The detach will be considered a " - "success as far as Cinder is concerned. " - "The volume can now be attached to the " - "secondary target.") - return - else: - raise exception.VolumeBackendAPIException(ex) - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - client = self._login() - try: - snap_info = client.getSnapshotByName(snapshot['name']) - volume_info = client.cloneSnapshot( - volume['name'], - snap_info['id']) - - # Extend volume - if volume['size'] > snapshot['volume_size']: - LOG.debug("Resize the new volume to %s.", volume['size']) - self.extend_volume(volume, volume['size']) - - model_update = self._update_provider(volume_info) - - # v2 replication check - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume, client)): - model_update['replication_status'] = 'enabled' - model_update['replication_driver_data'] = (json.dumps( - {'location': self._client_conf['hpelefthand_api_url']})) - - return model_update - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - @cinder_utils.trace - def create_cloned_volume(self, volume, src_vref): - client = self._login() - try: - volume_info = client.getVolumeByName(src_vref['name']) - clone_info = client.cloneVolume(volume['name'], volume_info['id']) - - # Extend volume - if volume['size'] > src_vref['size']: - LOG.debug("Resize the new volume to %s.", volume['size']) - self.extend_volume(volume, volume['size']) - - model_update = self._update_provider(clone_info) - - # v2 replication check - if self._volume_of_replicated_type(volume) and ( - self._do_volume_replication_setup(volume, client)): - model_update['replication_status'] = 'enabled' - model_update['replication_driver_data'] = (json.dumps( - {'location': self._client_conf['hpelefthand_api_url']})) - - return model_update - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - def _get_volume_extra_specs(self, volume): - """Get extra specs from a volume.""" - extra_specs = {} - type_id = volume.get('volume_type_id', None) - if type_id is not None: - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - extra_specs = volume_type.get('extra_specs') - return extra_specs - - def _get_lh_extra_specs(self, extra_specs, valid_keys): - """Get LeftHand extra_specs (valid_keys only).""" - extra_specs_of_interest = {} - for key, value in extra_specs.items(): - if key in valid_keys: - prefix = key.split(":") - if prefix[0] == "hplh": - LOG.warning("The 'hplh' prefix is deprecated. Use " - "'hpelh' instead.") - extra_specs_of_interest[key] = value - return extra_specs_of_interest - - def _map_extra_specs(self, extra_specs): - """Map the extra spec key/values to LeftHand key/values.""" - client_options = {} - for key, value in extra_specs.items(): - # map extra spec key to lh client option key - client_key = extra_specs_key_map[key] - # map extra spect value to lh client option value - try: - value_map = extra_specs_value_map[client_key] - # an invalid value will throw KeyError - client_value = value_map[value] - client_options[client_key] = client_value - except KeyError: - LOG.error("'%(value)s' is an invalid value " - "for extra spec '%(key)s'", - {'value': value, 'key': key}) - return client_options - - def _update_provider(self, volume_info, cluster_vip=None): - if not cluster_vip: - cluster_vip = self.cluster_vip - # TODO(justinsb): Is this always 1? Does it matter? - cluster_interface = '1' - iscsi_portal = cluster_vip + ":3260," + cluster_interface - - return {'provider_location': ( - "%s %s %s" % (iscsi_portal, volume_info['iscsiIqn'], 0))} - - def _create_server(self, connector, client): - server_info = None - chap_enabled = self._client_conf['hpelefthand_iscsi_chap_enabled'] - try: - server_info = client.getServerByName(connector['host']) - chap_secret = server_info['chapTargetSecret'] - if not chap_enabled and chap_secret: - LOG.warning('CHAP secret exists for host %s but CHAP is ' - 'disabled', connector['host']) - if chap_enabled and chap_secret is None: - LOG.warning('CHAP is enabled, but server secret not ' - 'configured on server %s', connector['host']) - return server_info - except hpeexceptions.HTTPNotFound: - # server does not exist, so create one - pass - - optional = None - if chap_enabled: - chap_secret = utils.generate_password() - optional = {'chapName': connector['initiator'], - 'chapTargetSecret': chap_secret, - 'chapAuthenticationRequired': True - } - - server_info = client.createServer(connector['host'], - connector['initiator'], - optional) - return server_info - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - @cinder_utils.trace - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to retype - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' - 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], - 'new_type': new_type, - 'diff': diff, - 'host': host}) - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - - # pick out the LH extra specs - new_extra_specs = dict(new_type).get('extra_specs') - lh_extra_specs = self._get_lh_extra_specs( - new_extra_specs, - extra_specs_key_map.keys()) - - LOG.debug('LH specs=%(specs)s', {'specs': lh_extra_specs}) - - # only set the ones that have changed - changed_extra_specs = {} - for key, value in lh_extra_specs.items(): - (old, new) = diff['extra_specs'][key] - if old != new: - changed_extra_specs[key] = value - - # map extra specs to LeftHand options - options = self._map_extra_specs(changed_extra_specs) - if len(options) > 0: - client.modifyVolume(volume_info['id'], options) - return True - except hpeexceptions.HTTPNotFound: - raise exception.VolumeNotFound(volume_id=volume['id']) - except Exception as ex: - LOG.warning("%s", ex) - finally: - self._logout(client) - - return False - - @cinder_utils.trace - def migrate_volume(self, ctxt, volume, host): - """Migrate the volume to the specified host. - - Backend assisted volume migration will occur if and only if; - - 1. Same LeftHand backend - 2. Volume cannot be attached - 3. Volumes with snapshots cannot be migrated - 4. Source and Destination clusters must be in the same management group - - Volume re-type is not supported. - - Returns a boolean indicating whether the migration occurred, as well as - model_update. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - false_ret = (False, None) - if 'location_info' not in host['capabilities']: - return false_ret - - host_location = host['capabilities']['location_info'] - (driver, cluster, vip) = host_location.split(' ') - client = self._login() - LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, ' - 'cluster=%(cluster)s', { - 'id': volume['id'], - 'host': host, - 'cluster': self._client_conf['hpelefthand_clustername']}) - try: - # get the cluster info, if it exists and compare - cluster_info = client.getClusterByName(cluster) - LOG.debug('Cluster info: %s', cluster_info) - virtual_ips = cluster_info['virtualIPAddresses'] - - if driver != self.__class__.__name__: - LOG.info("Cannot provide backend assisted migration for " - "volume: %s because volume is from a different " - "backend.", volume['name']) - return false_ret - if vip != virtual_ips[0]['ipV4Address']: - LOG.info("Cannot provide backend assisted migration for " - "volume: %s because cluster exists in different " - "management group.", volume['name']) - return false_ret - - except hpeexceptions.HTTPNotFound: - LOG.info("Cannot provide backend assisted migration for " - "volume: %s because cluster exists in different " - "management group.", volume['name']) - return false_ret - finally: - self._logout(client) - - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - LOG.debug('Volume info: %s', volume_info) - - # can't migrate if server is attached - if volume_info['iscsiSessions'] is not None: - LOG.info("Cannot provide backend assisted migration " - "for volume: %s because the volume has been " - "exported.", volume['name']) - return false_ret - - # can't migrate if volume has snapshots - snap_info = client.getVolume( - volume_info['id'], - 'fields=snapshots,snapshots[resource[members[name]]]') - LOG.debug('Snapshot info: %s', snap_info) - if snap_info['snapshots']['resource'] is not None: - LOG.info("Cannot provide backend assisted migration " - "for volume: %s because the volume has " - "snapshots.", volume['name']) - return false_ret - - options = {'clusterName': cluster} - client.modifyVolume(volume_info['id'], options) - except hpeexceptions.HTTPNotFound: - LOG.info("Cannot provide backend assisted migration for " - "volume: %s because volume does not exist in this " - "management group.", volume['name']) - return false_ret - except hpeexceptions.HTTPServerError as ex: - LOG.error("Exception: %s", ex) - return false_ret - finally: - self._logout(client) - - return (True, None) - - @cinder_utils.trace - def update_migrated_volume(self, context, volume, new_volume, - original_volume_status): - """Rename the new (temp) volume to it's original name. - - - This method tries to rename the new volume to it's original - name after the migration has completed. - - """ - LOG.debug("Update volume name for %(id)s.", {'id': new_volume['id']}) - name_id = None - provider_location = None - if original_volume_status == 'available': - # volume isn't attached and can be updated - original_name = CONF.volume_name_template % volume['id'] - current_name = CONF.volume_name_template % new_volume['id'] - client = self._login() - try: - volume_info = client.getVolumeByName(current_name) - volumeMods = {'name': original_name} - client.modifyVolume(volume_info['id'], volumeMods) - LOG.info("Volume name changed from %(tmp)s to %(orig)s.", - {'tmp': current_name, 'orig': original_name}) - except Exception as e: - LOG.error("Changing the volume name from %(tmp)s to " - "%(orig)s failed because %(reason)s.", - {'tmp': current_name, 'orig': original_name, - 'reason': e}) - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - finally: - self._logout(client) - else: - # the backend can't change the name. - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - - return {'_name_id': name_id, 'provider_location': provider_location} - - @cinder_utils.trace - def manage_existing(self, volume, existing_ref): - """Manage an existing LeftHand volume. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Check API Version - self._check_api_version() - - target_vol_name = self._get_existing_volume_ref_name(existing_ref) - - # Check for the existence of the virtual volume. - client = self._login() - try: - volume_info = client.getVolumeByName(target_vol_name) - except hpeexceptions.HTTPNotFound: - err = (_("Virtual volume '%s' doesn't exist on array.") % - target_vol_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - finally: - self._logout(client) - - # Generate the new volume information based on the new ID. - new_vol_name = 'volume-' + volume['id'] - - volume_type = None - if volume['volume_type_id']: - try: - volume_type = self._get_volume_type(volume['volume_type_id']) - except Exception: - reason = (_("Volume type ID '%s' is invalid.") % - volume['volume_type_id']) - raise exception.ManageExistingVolumeTypeMismatch(reason=reason) - - new_vals = {"name": new_vol_name} - - client = self._login() - try: - # Update the existing volume with the new name. - client.modifyVolume(volume_info['id'], new_vals) - finally: - self._logout(client) - - LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.", - {'ref': existing_ref['source-name'], 'new': new_vol_name}) - - display_name = None - if volume['display_name']: - display_name = volume['display_name'] - - if volume_type: - LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.", - {'disp': display_name, 'new': new_vol_name}) - - try: - self.retype(None, - volume, - volume_type, - volume_type['extra_specs'], - volume['host']) - LOG.info("Virtual volume %(disp)s successfully retyped to " - "%(new_type)s.", - {'disp': display_name, - 'new_type': volume_type.get('name')}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.warning("Failed to manage virtual volume %(disp)s " - "due to error during retype.", - {'disp': display_name}) - # Try to undo the rename and clear the new comment. - client = self._login() - try: - client.modifyVolume( - volume_info['id'], - {'name': target_vol_name}) - finally: - self._logout(client) - - updates = {'display_name': display_name} - - LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.", - {'disp': display_name, 'new': new_vol_name}) - - # Return display name to update the name displayed in the GUI and - # any model updates from retype. - return updates - - @cinder_utils.trace - def manage_existing_snapshot(self, snapshot, existing_ref): - """Manage an existing LeftHand snapshot. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Check API Version - self._check_api_version() - - # Potential parent volume for the snapshot - volume = snapshot['volume'] - - if volume.get('replication_status') == 'failed-over': - err = (_("Managing of snapshots to failed-over volumes is " - "not allowed.")) - raise exception.InvalidInput(reason=err) - - target_snap_name = self._get_existing_volume_ref_name(existing_ref) - - # Check for the existence of the virtual volume. - client = self._login() - try: - updates = self._manage_snapshot(client, - volume, - snapshot, - target_snap_name, - existing_ref) - finally: - self._logout(client) - - # Return display name to update the name displayed in the GUI and - # any model updates from retype. - return updates - - def _manage_snapshot(self, client, volume, snapshot, target_snap_name, - existing_ref): - # Check for the existence of the virtual volume. - try: - snapshot_info = client.getSnapshotByName(target_snap_name) - except hpeexceptions.HTTPNotFound: - err = (_("Snapshot '%s' doesn't exist on array.") % - target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - # Make sure the snapshot is being associated with the correct volume. - try: - parent_vol = client.getSnapshotParentVolume(target_snap_name) - except hpeexceptions.HTTPNotFound: - err = (_("Could not find the parent volume for Snapshot '%s' on " - "array.") % target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - parent_vol_name = 'volume-' + snapshot['volume_id'] - if parent_vol_name != parent_vol['name']: - err = (_("The provided snapshot '%s' is not a snapshot of " - "the provided volume.") % target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - # Generate the new snapshot information based on the new ID. - new_snap_name = 'snapshot-' + snapshot['id'] - - new_vals = {"name": new_snap_name} - - try: - # Update the existing snapshot with the new name. - client.modifySnapshot(snapshot_info['id'], new_vals) - except hpeexceptions.HTTPServerError: - err = (_("An error occurred while attempting to modify " - "Snapshot '%s'.") % snapshot_info['id']) - LOG.error(err) - - LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.", - {'ref': existing_ref['source-name'], 'new': new_snap_name}) - - display_name = None - if snapshot['display_name']: - display_name = snapshot['display_name'] - - updates = {'display_name': display_name} - - LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.", - {'disp': display_name, 'new': new_snap_name}) - - return updates - - @cinder_utils.trace - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Check API version. - self._check_api_version() - - target_vol_name = self._get_existing_volume_ref_name(existing_ref) - - # Make sure the reference is not in use. - if re.match('volume-*|snapshot-*', target_vol_name): - reason = _("Reference must be the volume name of an unmanaged " - "virtual volume.") - raise exception.ManageExistingInvalidReference( - existing_ref=target_vol_name, - reason=reason) - - # Check for the existence of the virtual volume. - client = self._login() - try: - volume_info = client.getVolumeByName(target_vol_name) - except hpeexceptions.HTTPNotFound: - err = (_("Virtual volume '%s' doesn't exist on array.") % - target_vol_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - finally: - self._logout(client) - - return int(math.ceil(float(volume_info['size']) / units.Gi)) - - @cinder_utils.trace - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of volume to be managed by manage_existing. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Check API version. - self._check_api_version() - - target_snap_name = self._get_existing_volume_ref_name(existing_ref) - - # Make sure the reference is not in use. - if re.match('volume-*|snapshot-*|unm-*', target_snap_name): - reason = _("Reference must be the name of an unmanaged " - "snapshot.") - raise exception.ManageExistingInvalidReference( - existing_ref=target_snap_name, - reason=reason) - - # Check for the existence of the virtual volume. - client = self._login() - try: - snapshot_info = client.getSnapshotByName(target_snap_name) - except hpeexceptions.HTTPNotFound: - err = (_("Snapshot '%s' doesn't exist on array.") % - target_snap_name) - LOG.error(err) - raise exception.InvalidInput(reason=err) - finally: - self._logout(client) - - return int(math.ceil(float(snapshot_info['size']) / units.Gi)) - - @cinder_utils.trace - def unmanage(self, volume): - """Removes the specified volume from Cinder management.""" - # Check API version. - self._check_api_version() - - # Rename the volume's name to unm-* format so that it can be - # easily found later. - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - new_vol_name = 'unm-' + six.text_type(volume['id']) - options = {'name': new_vol_name} - client.modifyVolume(volume_info['id'], options) - finally: - self._logout(client) - - LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. " - "Volume renamed to '%(new)s'.", - {'disp': volume['display_name'], - 'vol': volume['name'], - 'new': new_vol_name}) - - @cinder_utils.trace - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management.""" - # Check API version. - self._check_api_version() - - # Potential parent volume for the snapshot - volume = snapshot['volume'] - - if volume.get('replication_status') == 'failed-over': - err = (_("Unmanaging of snapshots from 'failed-over' volumes is " - "not allowed.")) - LOG.error(err) - # TODO(leeantho) Change this exception to Invalid when the volume - # manager supports handling that. - raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) - - # Rename the snapshots's name to ums-* format so that it can be - # easily found later. - client = self._login() - try: - snapshot_info = client.getSnapshotByName(snapshot['name']) - new_snap_name = 'ums-' + six.text_type(snapshot['id']) - options = {'name': new_snap_name} - client.modifySnapshot(snapshot_info['id'], options) - LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. " - "Snapshot renamed to '%(new)s'.", - {'disp': snapshot['display_name'], - 'vol': snapshot['name'], - 'new': new_snap_name}) - finally: - self._logout(client) - - def _get_existing_volume_ref_name(self, existing_ref): - """Returns the volume name of an existing reference. - - Checks if an existing volume reference has a source-name element. - If source-name is not present an error will be thrown. - """ - if 'source-name' not in existing_ref: - reason = _("Reference must contain source-name.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason) - - return existing_ref['source-name'] - - def _check_api_version(self): - """Checks that the API version is correct.""" - if (self.api_version < MIN_API_VERSION): - ex_msg = (_('Invalid HPELeftHand API version found: %(found)s. ' - 'Version %(minimum)s or greater required for ' - 'manage/unmanage support.') - % {'found': self.api_version, - 'minimum': MIN_API_VERSION}) - LOG.error(ex_msg) - raise exception.InvalidInput(reason=ex_msg) - - def _get_volume_type(self, type_id): - ctxt = context.get_admin_context() - return volume_types.get_volume_type(ctxt, type_id) - - # v2 replication methods - @cinder_utils.trace - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Force failover to a secondary replication target.""" - if secondary_id and secondary_id == self.FAILBACK_VALUE: - volume_update_list = self._replication_failback(volumes) - target_id = None - else: - failover_target = None - for target in self._replication_targets: - if target['backend_id'] == secondary_id: - failover_target = target - break - if not failover_target: - msg = _("A valid secondary target MUST be specified in order " - "to failover.") - LOG.error(msg) - raise exception.InvalidReplicationTarget(reason=msg) - - target_id = failover_target['backend_id'] - volume_update_list = [] - for volume in volumes: - if self._volume_of_replicated_type(volume): - # Try and stop the remote snapshot schedule. If the primary - # array is down, we will continue with the failover. - client = None - try: - client = self._login(timeout=30) - name = volume['name'] + self.REP_SCHEDULE_SUFFIX + ( - "_Pri") - client.stopRemoteSnapshotSchedule(name) - except Exception: - LOG.warning("The primary array is currently " - "offline, remote copy has been " - "automatically paused.") - finally: - self._logout(client) - - # Update provider location to the new array. - cl = None - try: - cl = self._create_replication_client(failover_target) - # Stop snapshot schedule - try: - name = volume['name'] + ( - self.REP_SCHEDULE_SUFFIX + "_Rmt") - cl.stopRemoteSnapshotSchedule(name) - except Exception: - pass - # Make the volume primary so it can be attached after a - # fail-over. - cl.makeVolumePrimary(volume['name']) - - # Update the provider info for a proper fail-over. - volume_info = cl.getVolumeByName(volume['name']) - prov_location = self._update_provider( - volume_info, - cluster_vip=failover_target['cluster_vip']) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'failed-over', - 'provider_location': - prov_location['provider_location']}}) - except Exception as ex: - LOG.error("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target.", - {'error': six.text_type(ex), - 'volume': volume['id']}) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'error'}}) - finally: - self._destroy_replication_client(cl) - else: - # If the volume is not of replicated type, we need to - # force the status into error state so a user knows they - # do not have access to the volume. - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'status': 'error'}}) - - self._active_backend_id = target_id - - return target_id, volume_update_list, [] - - def _do_replication_setup(self): - default_san_ssh_port = self.configuration.hpelefthand_ssh_port - default_ssh_conn_timeout = self.configuration.ssh_conn_timeout - default_san_private_key = self.configuration.san_private_key - - replication_targets = [] - replication_devices = self.configuration.replication_device - if replication_devices: - # We do not want to fail if we cannot log into the client here - # as a failover can still occur, so we need out replication - # devices to exist. - for dev in replication_devices: - remote_array = dict(dev.items()) - # Override and set defaults for certain entries - remote_array['managed_backend_name'] = ( - dev.get('managed_backend_name')) - remote_array['hpelefthand_ssh_port'] = ( - dev.get('hpelefthand_ssh_port', default_san_ssh_port)) - remote_array['ssh_conn_timeout'] = ( - dev.get('ssh_conn_timeout', default_ssh_conn_timeout)) - remote_array['san_private_key'] = ( - dev.get('san_private_key', default_san_private_key)) - # Format hpe3par_iscsi_chap_enabled as a bool - remote_array['hpelefthand_iscsi_chap_enabled'] = ( - dev.get('hpelefthand_iscsi_chap_enabled') == 'True') - remote_array['cluster_id'] = None - remote_array['cluster_vip'] = None - array_name = remote_array['backend_id'] - - # Make sure we can log into the array, that it has been - # correctly configured, and its API version meets the - # minimum requirement. - cl = None - try: - cl = self._create_replication_client(remote_array) - api_version = cl.getApiVersion() - cluster_info = cl.getClusterByName( - remote_array['hpelefthand_clustername']) - remote_array['cluster_id'] = cluster_info['id'] - virtual_ips = cluster_info['virtualIPAddresses'] - remote_array['cluster_vip'] = virtual_ips[0]['ipV4Address'] - - if api_version < MIN_API_VERSION: - LOG.warning("The secondary array must have an API " - "version of %(min_ver)s or higher. " - "Array '%(target)s' is on %(target_ver)s, " - "therefore it will not be added as a " - "valid replication target.", - {'min_ver': MIN_API_VERSION, - 'target': array_name, - 'target_ver': api_version}) - elif not self._is_valid_replication_array(remote_array): - LOG.warning("'%s' is not a valid replication array. " - "In order to be valid, backend_id, " - "hpelefthand_api_url, " - "hpelefthand_username, " - "hpelefthand_password, and " - "hpelefthand_clustername, " - "must be specified. If the target is " - "managed, managed_backend_name must be " - "set as well.", array_name) - else: - replication_targets.append(remote_array) - except Exception: - LOG.error("Could not log in to LeftHand array (%s) with " - "the provided credentials.", array_name) - finally: - self._destroy_replication_client(cl) - - self._replication_targets = replication_targets - if self._is_replication_configured_correct(): - self._replication_enabled = True - - def _replication_failback(self, volumes): - array_config = {'hpelefthand_api_url': - self.configuration.hpelefthand_api_url, - 'hpelefthand_username': - self.configuration.hpelefthand_username, - 'hpelefthand_password': - self.configuration.hpelefthand_password, - 'hpelefthand_ssh_port': - self.configuration.hpelefthand_ssh_port} - - # Make sure the proper steps on the backend have been completed before - # we allow a failback. - if not self._is_host_ready_for_failback(volumes, array_config): - msg = _("The host is not ready to be failed back. Please " - "resynchronize the volumes and resume replication on the " - "LeftHand backends.") - LOG.error(msg) - raise exception.InvalidReplicationTarget(reason=msg) - - cl = None - volume_update_list = [] - for volume in volumes: - if self._volume_of_replicated_type(volume): - try: - cl = self._create_replication_client(array_config) - # Update the provider info for a proper fail-back. - volume_info = cl.getVolumeByName(volume['name']) - cluster_info = cl.getClusterByName( - self.configuration.hpelefthand_clustername) - virtual_ips = cluster_info['virtualIPAddresses'] - cluster_vip = virtual_ips[0]['ipV4Address'] - provider_location = self._update_provider( - volume_info, cluster_vip=cluster_vip) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'available', - 'provider_location': - provider_location['provider_location']}}) - except Exception as ex: - # The secondary array was not able to execute the fail-back - # properly. The replication status is now in an unknown - # state, so we will treat it as an error. - LOG.error("There was a problem with the failover " - "(%(error)s) and it was unsuccessful. " - "Volume '%(volume)s will not be available " - "on the failed over target.", - {'error': ex, - 'volume': volume['id']}) - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': 'error'}}) - finally: - self._destroy_replication_client(cl) - else: - # Upon failing back, we can move the non-replicated volumes - # back into available state. - volume_update_list.append( - {'volume_id': volume['id'], - 'updates': {'status': 'available'}}) - - return volume_update_list - - def _is_host_ready_for_failback(self, volumes, array_config): - """Checks to make sure the volumes have been synchronized - - This entails ensuring the remote snapshot schedule has been resumed - on the backends and the secondary volume's data has been copied back - to the primary. - """ - is_ready = True - cl = None - try: - for volume in volumes: - if self._volume_of_replicated_type(volume): - schedule_name = volume['name'] + ( - self.REP_SCHEDULE_SUFFIX + "_Pri") - cl = self._create_replication_client(array_config) - schedule = cl.getRemoteSnapshotSchedule(schedule_name) - schedule = ''.join(schedule) - # We need to check the status of the schedule to make sure - # it is not paused. - result = re.search(r".*paused\s+(\w+)", schedule) - is_schedule_active = result.group(1) == 'false' - - volume_info = cl.getVolumeByName(volume['name']) - if not volume_info['isPrimary'] or not is_schedule_active: - is_ready = False - break - except Exception as ex: - LOG.error("There was a problem when trying to determine if " - "the volume can be failed-back: %s", ex) - is_ready = False - finally: - self._destroy_replication_client(cl) - - return is_ready - - def _get_replication_targets(self): - replication_targets = [] - for target in self._replication_targets: - replication_targets.append(target['backend_id']) - - return replication_targets - - def _is_valid_replication_array(self, target): - required_flags = ['hpelefthand_api_url', 'hpelefthand_username', - 'hpelefthand_password', 'backend_id', - 'hpelefthand_clustername'] - try: - self.check_replication_flags(target, required_flags) - return True - except Exception: - return False - - def _is_replication_configured_correct(self): - rep_flag = True - # Make sure there is at least one replication target. - if len(self._replication_targets) < 1: - LOG.error("There must be at least one valid replication " - "device configured.") - rep_flag = False - return rep_flag - - def _volume_of_replicated_type(self, volume, vol_type_id=None): - # TODO(kushal) : we will use volume.volume_types when we re-write - # the design for unit tests to use objects instead of dicts. - replicated_type = False - volume_type_id = vol_type_id if vol_type_id else volume.get( - 'volume_type_id') - if volume_type_id: - volume_type = self._get_volume_type(volume_type_id) - - extra_specs = volume_type.get('extra_specs') - if extra_specs and 'replication_enabled' in extra_specs: - rep_val = extra_specs['replication_enabled'] - replicated_type = (rep_val == " True") - - return replicated_type - - def _does_snapshot_schedule_exist(self, schedule_name, client): - try: - exists = client.doesRemoteSnapshotScheduleExist(schedule_name) - except Exception: - exists = False - return exists - - def _get_lefthand_config(self): - conf = None - for target in self._replication_targets: - if target['backend_id'] == self._active_backend_id: - conf = target - break - - return conf - - def _do_volume_replication_setup(self, volume, client, optional=None): - """This function will do or ensure the following: - - -Create volume on main array (already done in create_volume) - -Create volume on secondary array - -Make volume remote on secondary array - -Create the snapshot schedule - - If anything here fails, we will need to clean everything up in - reverse order, including the original volume. - """ - schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX - # If there is already a snapshot schedule, the volume is setup - # for replication on the backend. Start the schedule and return - # success. - if self._does_snapshot_schedule_exist(schedule_name + "_Pri", client): - try: - client.startRemoteSnapshotSchedule(schedule_name + "_Pri") - except Exception: - pass - return True - - # Grab the extra_spec entries for replication and make sure they - # are set correctly. - volume_type = self._get_volume_type(volume["volume_type_id"]) - extra_specs = volume_type.get("extra_specs") - - # Get and check replication sync period - replication_sync_period = extra_specs.get( - self.EXTRA_SPEC_REP_SYNC_PERIOD) - if replication_sync_period: - replication_sync_period = int(replication_sync_period) - if replication_sync_period < self.MIN_REP_SYNC_PERIOD: - msg = (_("The replication sync period must be at least %s " - "seconds.") % self.MIN_REP_SYNC_PERIOD) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - # If there is no extra_spec value for replication sync period, we - # will default it to the required minimum and log a warning. - replication_sync_period = self.MIN_REP_SYNC_PERIOD - LOG.warning("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs.", - {'spec_name': self.EXTRA_SPEC_REP_SYNC_PERIOD, - 'def_val': self.MIN_REP_SYNC_PERIOD}) - - # Get and check retention count - retention_count = extra_specs.get( - self.EXTRA_SPEC_REP_RETENTION_COUNT) - if retention_count: - retention_count = int(retention_count) - if retention_count > self.MAX_RETENTION_COUNT: - msg = (_("The retention count must be %s or less.") % - self.MAX_RETENTION_COUNT) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - # If there is no extra_spec value for retention count, we - # will default it and log a warning. - retention_count = self.DEFAULT_RETENTION_COUNT - LOG.warning("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs.", - {'spec_name': self.EXTRA_SPEC_REP_RETENTION_COUNT, - 'def_val': self.DEFAULT_RETENTION_COUNT}) - - # Get and checkout remote retention count - remote_retention_count = extra_specs.get( - self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT) - if remote_retention_count: - remote_retention_count = int(remote_retention_count) - if remote_retention_count > self.MAX_REMOTE_RETENTION_COUNT: - msg = (_("The remote retention count must be %s or less.") % - self.MAX_REMOTE_RETENTION_COUNT) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - # If there is no extra_spec value for remote retention count, we - # will default it and log a warning. - remote_retention_count = self.DEFAULT_REMOTE_RETENTION_COUNT - spec_name = self.EXTRA_SPEC_REP_REMOTE_RETENTION_COUNT - LOG.warning("There was no extra_spec value for %(spec_name)s, " - "so the default value of %(def_val)s will be " - "used. To overwrite this, set this value in the " - "volume type extra_specs.", - {'spec_name': spec_name, - 'def_val': self.DEFAULT_REMOTE_RETENTION_COUNT}) - - cl = None - try: - # Create volume on secondary system - for remote_target in self._replication_targets: - cl = self._create_replication_client(remote_target) - - if optional: - optional['clusterName'] = ( - remote_target['hpelefthand_clustername']) - cl.createVolume(volume['name'], - remote_target['cluster_id'], - volume['size'] * units.Gi, - optional) - - # Make secondary volume a remote volume - # NOTE: The snapshot created when making a volume remote is - # not managed by cinder. This snapshot will be removed when - # _do_volume_replication_destroy is called. - snap_name = volume['name'] + self.REP_SNAPSHOT_SUFFIX - cl.makeVolumeRemote(volume['name'], snap_name) - - # A remote IP address is needed from the cluster in order to - # create the snapshot schedule. - remote_ip = cl.getIPFromCluster( - remote_target['hpelefthand_clustername']) - - # Destroy remote client - self._destroy_replication_client(cl) - - # Create remote snapshot schedule on the primary system. - # We want to start the remote snapshot schedule instantly; a - # date in the past will do that. We will use the Linux epoch - # date formatted to ISO 8601 (YYYY-MM-DDTHH:MM:SSZ). - start_date = "1970-01-01T00:00:00Z" - remote_vol_name = volume['name'] - - client.createRemoteSnapshotSchedule( - volume['name'], - schedule_name, - replication_sync_period, - start_date, - retention_count, - remote_target['hpelefthand_clustername'], - remote_retention_count, - remote_vol_name, - remote_ip, - remote_target['hpelefthand_username'], - remote_target['hpelefthand_password']) - - return True - except Exception as ex: - # Destroy the replication client that was created - self._destroy_replication_client(cl) - # Deconstruct what we tried to create - self._do_volume_replication_destroy(volume, client) - msg = (_("There was an error setting up a remote schedule " - "on the LeftHand arrays: ('%s'). The volume will not be " - "recognized as replication type.") % - six.text_type(ex)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _do_volume_replication_destroy(self, volume, client): - """This will remove all dependencies of a replicated volume - - It should be used when deleting a replication enabled volume - or if setting up a remote copy group fails. It will try and do the - following: - -Delete the snapshot schedule - -Delete volume and snapshots on secondary array - -Delete volume and snapshots on primary array - """ - # Delete snapshot schedule - try: - schedule_name = volume['name'] + self.REP_SCHEDULE_SUFFIX - client.deleteRemoteSnapshotSchedule(schedule_name) - except Exception: - pass - - # Delete volume on secondary array(s) - remote_vol_name = volume['name'] - for remote_target in self._replication_targets: - try: - cl = self._create_replication_client(remote_target) - volume_info = cl.getVolumeByName(remote_vol_name) - cl.deleteVolume(volume_info['id']) - except Exception: - pass - finally: - # Destroy the replication client that was created - self._destroy_replication_client(cl) - - # Delete volume on primary array - try: - volume_info = client.getVolumeByName(volume['name']) - client.deleteVolume(volume_info['id']) - except Exception: - pass diff --git a/cinder/volume/drivers/huawei/__init__.py b/cinder/volume/drivers/huawei/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/huawei/constants.py b/cinder/volume/drivers/huawei/constants.py deleted file mode 100644 index be37dc77c..000000000 --- a/cinder/volume/drivers/huawei/constants.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -STATUS_HEALTH = '1' -STATUS_ACTIVE = '43' -STATUS_RUNNING = '10' -STATUS_VOLUME_READY = '27' -STATUS_LUNCOPY_READY = '40' -STATUS_QOS_ACTIVE = '2' -STATUS_QOS_INACTIVE = '45' -LUN_TYPE = '11' -SNAPSHOT_TYPE = '27' - -BLOCK_STORAGE_POOL_TYPE = '1' -FILE_SYSTEM_POOL_TYPE = '2' - -HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' -LUNGROUP_PREFIX = 'OpenStack_LunGroup_' -MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' -PORTGROUP_PREFIX = 'OpenStack_PortGroup_' -QOS_NAME_PREFIX = 'OpenStack_' -PORTGROUP_DESCRIP_PREFIX = "Please do NOT modify this. Engine ID: " -ARRAY_VERSION = 'V300R003C00' -FC_PORT_CONNECTED = '10' -FC_INIT_ONLINE = '27' -FC_PORT_MODE_FABRIC = '0' -CAPACITY_UNIT = 1024.0 * 1024.0 * 2 -DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 -DEFAULT_WAIT_INTERVAL = 5 - -MIGRATION_WAIT_INTERVAL = 5 -MIGRATION_FAULT = '74' -MIGRATION_COMPLETE = '76' - -ERROR_CONNECT_TO_SERVER = -403 -ERROR_UNAUTHORIZED_TO_SERVER = -401 -SOCKET_TIMEOUT = 52 -ERROR_VOLUME_ALREADY_EXIST = 1077948993 -LOGIN_SOCKET_TIMEOUT = 4 -ERROR_VOLUME_NOT_EXIST = 1077939726 -RELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST] -RUNNING_NORMAL = '1' -RUNNING_SYNC = '23' -RUNNING_STOP = '41' -HEALTH_NORMAL = '1' - -NO_SPLITMIRROR_LICENSE = 1077950233 -NO_MIGRATION_LICENSE = 1073806606 - -THICK_LUNTYPE = 0 -THIN_LUNTYPE = 1 -MAX_HOSTNAME_LENGTH = 31 -MAX_VOL_DESCRIPTION = 170 -PORT_NUM_PER_CONTR = 2 -PWD_EXPIRED = 3 -PWD_RESET = 4 - -OS_TYPE = {'Linux': '0', - 'Windows': '1', - 'Solaris': '2', - 'HP-UX': '3', - 'AIX': '4', - 'XenServer': '5', - 'Mac OS X': '6', - 'VMware ESX': '7'} - -HUAWEI_VALID_KEYS = ['maxIOPS', 'minIOPS', 'minBandWidth', - 'maxBandWidth', 'latency', 'IOType'] -QOS_KEYS = [i.upper() for i in HUAWEI_VALID_KEYS] -EXTRA_QOS_KEYS = ['MAXIOPS', 'MINIOPS', 'MINBANDWIDTH', 'MAXBANDWIDTH'] -LOWER_LIMIT_KEYS = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] -UPPER_LIMIT_KEYS = ['MAXIOPS', 'MAXBANDWIDTH'] -MAX_LUN_NUM_IN_QOS = 64 - -DEFAULT_REPLICA_WAIT_INTERVAL = 1 -DEFAULT_REPLICA_WAIT_TIMEOUT = 20 - -REPLICA_SYNC_MODEL = '1' -REPLICA_ASYNC_MODEL = '2' -REPLICA_SPEED = '2' -REPLICA_PERIOD = '3600' -REPLICA_SECOND_RO = '2' -REPLICA_SECOND_RW = '3' - -REPLICA_RUNNING_STATUS_KEY = 'RUNNINGSTATUS' -REPLICA_RUNNING_STATUS_INITIAL_SYNC = '21' -REPLICA_RUNNING_STATUS_SYNC = '23' -REPLICA_RUNNING_STATUS_SYNCED = '24' -REPLICA_RUNNING_STATUS_NORMAL = '1' -REPLICA_RUNNING_STATUS_SPLIT = '26' -REPLICA_RUNNING_STATUS_ERRUPTED = '34' -REPLICA_RUNNING_STATUS_INVALID = '35' - -REPLICA_HEALTH_STATUS_KEY = 'HEALTHSTATUS' -REPLICA_HEALTH_STATUS_NORMAL = '1' - -REPLICA_LOCAL_DATA_STATUS_KEY = 'PRIRESDATASTATUS' -REPLICA_REMOTE_DATA_STATUS_KEY = 'SECRESDATASTATUS' -REPLICA_DATA_SYNC_KEY = 'ISDATASYNC' -REPLICA_DATA_STATUS_SYNCED = '1' -REPLICA_DATA_STATUS_COMPLETE = '2' -REPLICA_DATA_STATUS_INCOMPLETE = '3' - -LUN_TYPE_MAP = {'Thick': THICK_LUNTYPE, - 'Thin': THIN_LUNTYPE} - -PRODUCT_LUN_TYPE = { - 'Dorado': 'Thin', -} - -VOLUME_NOT_EXISTS_WARN = 'warning' -VOLUME_NOT_EXISTS_RAISE = 'raise' - -LUN_COPY_SPEED_TYPES = ( - LUN_COPY_SPEED_LOW, - LUN_COPY_SPEED_MEDIUM, - LUN_COPY_SPEED_HIGH, - LUN_COPY_SPEED_HIGHEST -) = ('1', '2', '3', '4') diff --git a/cinder/volume/drivers/huawei/fc_zone_helper.py b/cinder/volume/drivers/huawei/fc_zone_helper.py deleted file mode 100644 index 03c0943b5..000000000 --- a/cinder/volume/drivers/huawei/fc_zone_helper.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.huawei import constants - - -LOG = logging.getLogger(__name__) - - -class FCZoneHelper(object): - """FC zone helper for Huawei driver.""" - - def __init__(self, fcsan_lookup_service, client): - self.fcsan = fcsan_lookup_service - self.client = client - - def _get_fc_ports_info(self): - ports_info = {} - data = self.client.get_fc_ports_on_array() - for item in data: - if item['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: - location = item['PARENTID'].split('.') - port_info = {} - port_info['id'] = item['ID'] - port_info['contr'] = location[0] - port_info['bandwidth'] = item['RUNSPEED'] - ports_info[item['WWN']] = port_info - return ports_info - - def _count_port_weight(self, port, ports_info): - LOG.debug("Count weight for port: %s.", port) - portgs = self.client.get_portgs_by_portid(ports_info[port]['id']) - LOG.debug("Port %(port)s belongs to PortGroup %(portgs)s.", - {"port": port, "portgs": portgs}) - weight = 0 - for portg in portgs: - views = self.client.get_views_by_portg(portg) - if not views: - LOG.debug("PortGroup %s doesn't belong to any view.", portg) - continue - - LOG.debug("PortGroup %(portg)s belongs to view %(views)s.", - {"portg": portg, "views": views[0]}) - # In fact, there is just one view for one port group. - lungroup = self.client.get_lungroup_by_view(views[0]) - lun_num = self.client.get_obj_count_from_lungroup(lungroup) - ports_in_portg = self.client.get_ports_by_portg(portg) - LOG.debug("PortGroup %(portg)s contains ports: %(ports)s.", - {"portg": portg, "ports": ports_in_portg}) - total_bandwidth = 0 - for port_pg in ports_in_portg: - if port_pg in ports_info: - total_bandwidth += int(ports_info[port_pg]['bandwidth']) - - LOG.debug("Total bandwidth for PortGroup %(portg)s is %(bindw)s.", - {"portg": portg, "bindw": total_bandwidth}) - - if total_bandwidth: - weight += float(lun_num) / float(total_bandwidth) - - bandwidth = float(ports_info[port]['bandwidth']) - return (weight, 10000 / bandwidth) - - def _get_weighted_ports_per_contr(self, ports, ports_info): - port_weight_map = {} - for port in ports: - port_weight_map[port] = self._count_port_weight(port, ports_info) - - LOG.debug("port_weight_map: %s", port_weight_map) - sorted_ports = sorted(port_weight_map.items(), key=lambda d: d[1]) - weighted_ports = [] - count = 0 - for port in sorted_ports: - if count >= constants.PORT_NUM_PER_CONTR: - break - weighted_ports.append(port[0]) - count += 1 - return weighted_ports - - def _get_weighted_ports(self, contr_port_map, ports_info, contrs): - LOG.debug("_get_weighted_ports, we only select ports from " - "controllers: %s", contrs) - weighted_ports = [] - for contr in contrs: - if contr in contr_port_map: - weighted_ports_per_contr = self._get_weighted_ports_per_contr( - contr_port_map[contr], ports_info) - LOG.debug("Selected ports %(ports)s on controller %(contr)s.", - {"ports": weighted_ports_per_contr, - "contr": contr}) - weighted_ports.extend(weighted_ports_per_contr) - return weighted_ports - - def _filter_by_fabric(self, wwns, ports): - """Filter FC ports and initiators connected to fabrics.""" - ini_tgt_map = self.fcsan.get_device_mapping_from_network(wwns, ports) - fabric_connected_ports = [] - fabric_connected_initiators = [] - for fabric in ini_tgt_map: - fabric_connected_ports.extend( - ini_tgt_map[fabric]['target_port_wwn_list']) - fabric_connected_initiators.extend( - ini_tgt_map[fabric]['initiator_port_wwn_list']) - - if not fabric_connected_ports: - msg = _("No FC port connected to fabric.") - raise exception.VolumeBackendAPIException(data=msg) - if not fabric_connected_initiators: - msg = _("No initiator connected to fabric.") - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("Fabric connected ports: %(ports)s, " - "Fabric connected initiators: %(initiators)s.", - {'ports': fabric_connected_ports, - 'initiators': fabric_connected_initiators}) - return fabric_connected_ports, fabric_connected_initiators - - def _get_lun_engine_contrs(self, engines, lun_id, - lun_type=constants.LUN_TYPE): - contrs = [] - engine_id = None - lun_info = self.client.get_lun_info(lun_id, lun_type) - lun_contr_id = lun_info['OWNINGCONTROLLER'] - for engine in engines: - contrs = json.loads(engine['NODELIST']) - engine_id = engine['ID'] - if lun_contr_id in contrs: - break - - LOG.debug("LUN %(lun_id)s belongs to engine %(engine_id)s. Engine " - "%(engine_id)s has controllers: %(contrs)s.", - {"lun_id": lun_id, "engine_id": engine_id, "contrs": contrs}) - return contrs, engine_id - - def _build_contr_port_map(self, fabric_connected_ports, ports_info): - contr_port_map = {} - for port in fabric_connected_ports: - contr = ports_info[port]['contr'] - if not contr_port_map.get(contr): - contr_port_map[contr] = [] - contr_port_map[contr].append(port) - LOG.debug("Controller port map: %s.", contr_port_map) - return contr_port_map - - def _create_new_portg(self, portg_name, engine_id): - portg_id = self.client.get_tgt_port_group(portg_name) - if portg_id: - LOG.debug("Found port group %s not belonged to any view, " - "deleting it.", portg_name) - ports = self.client.get_fc_ports_by_portgroup(portg_id) - for port_id in ports.values(): - self.client.remove_port_from_portgroup(portg_id, port_id) - self.client.delete_portgroup(portg_id) - description = constants.PORTGROUP_DESCRIP_PREFIX + engine_id - new_portg_id = self.client.create_portg(portg_name, description) - return new_portg_id - - def build_ini_targ_map(self, wwns, host_id, lun_id, - lun_type=constants.LUN_TYPE): - engines = self.client.get_all_engines() - LOG.debug("Get array engines: %s", engines) - - contrs, engine_id = self._get_lun_engine_contrs(engines, lun_id, - lun_type) - - # Check if there is already a port group in the view. - # If yes and have already considered the engine, - # we won't change anything about the port group and zone. - view_name = constants.MAPPING_VIEW_PREFIX + host_id - portg_name = constants.PORTGROUP_PREFIX + host_id - view_id = self.client.find_mapping_view(view_name) - portg_info = self.client.get_portgroup_by_view(view_id) - portg_id = portg_info[0]['ID'] if portg_info else None - - init_targ_map = {} - if portg_id: - description = portg_info[0].get("DESCRIPTION", '') - engines = description.replace(constants.PORTGROUP_DESCRIP_PREFIX, - "") - engines = engines.split(',') - ports = self.client.get_fc_ports_by_portgroup(portg_id) - if engine_id in engines: - LOG.debug("Have already selected ports for engine %s, just " - "use them.", engine_id) - return (list(ports.keys()), portg_id, init_targ_map) - - # Filter initiators and ports that connected to fabrics. - ports_info = self._get_fc_ports_info() - (fabric_connected_ports, fabric_connected_initiators) = ( - self._filter_by_fabric(wwns, ports_info.keys())) - - # Build a controller->ports map for convenience. - contr_port_map = self._build_contr_port_map(fabric_connected_ports, - ports_info) - # Get the 'best' ports for the given controllers. - weighted_ports = self._get_weighted_ports(contr_port_map, ports_info, - contrs) - if not weighted_ports: - msg = _("No FC port can be used for LUN %s.") % lun_id - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Handle port group. - port_list = [ports_info[port]['id'] for port in weighted_ports] - - if portg_id: - # Add engine ID to the description of the port group. - self.client.append_portg_desc(portg_id, engine_id) - # Extend the weighted_ports to include the ports already in the - # port group. - weighted_ports.extend(list(ports.keys())) - else: - portg_id = self._create_new_portg(portg_name, engine_id) - - for port in port_list: - self.client.add_port_to_portg(portg_id, port) - - for ini in fabric_connected_initiators: - init_targ_map[ini] = weighted_ports - LOG.debug("build_ini_targ_map: Port group name: %(portg_name)s, " - "init_targ_map: %(map)s.", - {"portg_name": portg_name, - "map": init_targ_map}) - return weighted_ports, portg_id, init_targ_map - - def get_init_targ_map(self, wwns, host_id): - error_ret = ([], None, {}) - if not host_id: - return error_ret - - view_name = constants.MAPPING_VIEW_PREFIX + host_id - view_id = self.client.find_mapping_view(view_name) - if not view_id: - return error_ret - port_group = self.client.get_portgroup_by_view(view_id) - portg_id = port_group[0]['ID'] if port_group else None - ports = self.client.get_fc_ports_by_portgroup(portg_id) - for port_id in ports.values(): - self.client.remove_port_from_portgroup(portg_id, port_id) - init_targ_map = {} - for wwn in wwns: - init_targ_map[wwn] = list(ports.keys()) - return list(ports.keys()), portg_id, init_targ_map diff --git a/cinder/volume/drivers/huawei/huawei_conf.py b/cinder/volume/drivers/huawei/huawei_conf.py deleted file mode 100644 index 4f8032af2..000000000 --- a/cinder/volume/drivers/huawei/huawei_conf.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Set Huawei private configuration into Configuration object. - -For conveniently get private configuration. We parse Huawei config file -and set every property into Configuration object as an attribute. -""" - -import base64 -import six -from xml.etree import ElementTree as ET - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.huawei import constants - -LOG = logging.getLogger(__name__) - - -class HuaweiConf(object): - def __init__(self, conf): - self.conf = conf - - def _encode_authentication(self): - need_encode = False - tree = ET.parse(self.conf.cinder_huawei_conf_file) - xml_root = tree.getroot() - name_node = xml_root.find('Storage/UserName') - pwd_node = xml_root.find('Storage/UserPassword') - if (name_node is not None - and not name_node.text.startswith('!$$$')): - name_node.text = '!$$$' + base64.b64encode(name_node.text) - need_encode = True - if (pwd_node is not None - and not pwd_node.text.startswith('!$$$')): - pwd_node.text = '!$$$' + base64.b64encode(pwd_node.text) - need_encode = True - - if need_encode: - utils.execute('chmod', - '600', - self.conf.cinder_huawei_conf_file, - run_as_root=True) - tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8') - - def update_config_value(self): - self._encode_authentication() - - set_attr_funcs = (self._san_address, - self._san_user, - self._san_password, - self._san_product, - self._san_protocol, - self._lun_type, - self._lun_ready_wait_interval, - self._lun_copy_wait_interval, - self._lun_timeout, - self._lun_write_type, - self._lun_prefetch, - self._lun_policy, - self._lun_read_cache_policy, - self._lun_write_cache_policy, - self._storage_pools, - self._iscsi_default_target_ip, - self._iscsi_info,) - - tree = ET.parse(self.conf.cinder_huawei_conf_file) - xml_root = tree.getroot() - for f in set_attr_funcs: - f(xml_root) - - def _san_address(self, xml_root): - text = xml_root.findtext('Storage/RestURL') - if not text: - msg = _("RestURL is not configured.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - addrs = text.split(';') - addrs = list(set([x.strip() for x in addrs if x.strip()])) - setattr(self.conf, 'san_address', addrs) - - def _san_user(self, xml_root): - text = xml_root.findtext('Storage/UserName') - if not text: - msg = _("UserName is not configured.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - user = base64.b64decode(text[4:]) - setattr(self.conf, 'san_user', user) - - def _san_password(self, xml_root): - text = xml_root.findtext('Storage/UserPassword') - if not text: - msg = _("UserPassword is not configured.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - pwd = base64.b64decode(text[4:]) - setattr(self.conf, 'san_password', pwd) - - def _san_product(self, xml_root): - text = xml_root.findtext('Storage/Product') - if not text: - msg = _("SAN product is not configured.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - product = text.strip() - setattr(self.conf, 'san_product', product) - - def _san_protocol(self, xml_root): - text = xml_root.findtext('Storage/Protocol') - if not text: - msg = _("SAN protocol is not configured.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - protocol = text.strip() - setattr(self.conf, 'san_protocol', protocol) - - def _lun_type(self, xml_root): - lun_type = constants.PRODUCT_LUN_TYPE.get(self.conf.san_product, - 'Thick') - - def _verify_conf_lun_type(lun_type): - if lun_type not in constants.LUN_TYPE_MAP: - msg = _("Invalid lun type %s is configured.") % lun_type - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - if self.conf.san_product in constants.PRODUCT_LUN_TYPE: - product_lun_type = constants.PRODUCT_LUN_TYPE[ - self.conf.san_product] - if lun_type != product_lun_type: - msg = _("%(array)s array requires %(valid)s lun type, " - "but %(conf)s is specified.") % { - 'array': self.conf.san_product, - 'valid': product_lun_type, - 'conf': lun_type} - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - text = xml_root.findtext('LUN/LUNType') - if text: - lun_type = text.strip() - _verify_conf_lun_type(lun_type) - - lun_type = constants.LUN_TYPE_MAP[lun_type] - setattr(self.conf, 'lun_type', lun_type) - - def _lun_ready_wait_interval(self, xml_root): - text = xml_root.findtext('LUN/LUNReadyWaitInterval') - interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL - setattr(self.conf, 'lun_ready_wait_interval', int(interval)) - - def _lun_copy_wait_interval(self, xml_root): - text = xml_root.findtext('LUN/LUNcopyWaitInterval') - interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL - setattr(self.conf, 'lun_copy_wait_interval', int(interval)) - - def _lun_timeout(self, xml_root): - text = xml_root.findtext('LUN/Timeout') - interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT - setattr(self.conf, 'lun_timeout', int(interval)) - - def _lun_write_type(self, xml_root): - text = xml_root.findtext('LUN/WriteType') - write_type = text.strip() if text else '1' - setattr(self.conf, 'lun_write_type', write_type) - - def _lun_prefetch(self, xml_root): - prefetch_type = '3' - prefetch_value = '0' - - node = xml_root.find('LUN/Prefetch') - if (node is not None - and node.attrib['Type'] - and node.attrib['Value']): - prefetch_type = node.attrib['Type'].strip() - if prefetch_type not in ['0', '1', '2', '3']: - msg = (_( - "Invalid prefetch type '%s' is configured. " - "PrefetchType must be in 0,1,2,3.") % prefetch_type) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - prefetch_value = node.attrib['Value'].strip() - factor = {'1': 2} - factor = int(factor.get(prefetch_type, '1')) - prefetch_value = int(prefetch_value) * factor - prefetch_value = six.text_type(prefetch_value) - - setattr(self.conf, 'lun_prefetch_type', prefetch_type) - setattr(self.conf, 'lun_prefetch_value', prefetch_value) - - def _lun_policy(self, xml_root): - setattr(self.conf, 'lun_policy', '0') - - def _lun_read_cache_policy(self, xml_root): - setattr(self.conf, 'lun_read_cache_policy', '2') - - def _lun_write_cache_policy(self, xml_root): - setattr(self.conf, 'lun_write_cache_policy', '5') - - def _storage_pools(self, xml_root): - nodes = xml_root.findall('LUN/StoragePool') - if not nodes: - msg = _('Storage pool is not configured.') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - texts = [x.text for x in nodes] - merged_text = ';'.join(texts) - pools = set(x.strip() for x in merged_text.split(';') if x.strip()) - if not pools: - msg = _('Invalid storage pool is configured.') - LOG.error(msg) - raise exception.InvalidInput(msg) - - setattr(self.conf, 'storage_pools', list(pools)) - - def _iscsi_default_target_ip(self, xml_root): - text = xml_root.findtext('iSCSI/DefaultTargetIP') - target_ip = text.split() if text else [] - setattr(self.conf, 'iscsi_default_target_ip', target_ip) - - def _iscsi_info(self, xml_root): - nodes = xml_root.findall('iSCSI/Initiator') - if nodes is None: - setattr(self.conf, 'iscsi_info', []) - return - - iscsi_info = [] - for node in nodes: - props = {} - for item in node.items(): - props[item[0].strip()] = item[1].strip() - - iscsi_info.append(props) - - setattr(self.conf, 'iscsi_info', iscsi_info) - - def _parse_rmt_iscsi_info(self, iscsi_info): - if not (iscsi_info and iscsi_info.strip()): - return [] - - # Consider iscsi_info value: - # ' {Name:xxx ;;TargetPortGroup: xxx};\n' - # '{Name:\t\rxxx;CHAPinfo: mm-usr#mm-pwd} ' - - # Step 1, ignore whitespace characters, convert to: - # '{Name:xxx;;TargetPortGroup:xxx};{Name:xxx;CHAPinfo:mm-usr#mm-pwd}' - iscsi_info = ''.join(iscsi_info.split()) - - # Step 2, make initiators configure list, convert to: - # ['Name:xxx;;TargetPortGroup:xxx', 'Name:xxx;CHAPinfo:mm-usr#mm-pwd'] - initiator_infos = iscsi_info[1:-1].split('};{') - - # Step 3, get initiator configure pairs, convert to: - # [['Name:xxx', '', 'TargetPortGroup:xxx'], - # ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']] - initiator_infos = map(lambda x: x.split(';'), initiator_infos) - - # Step 4, remove invalid configure pairs, convert to: - # [['Name:xxx', 'TargetPortGroup:xxx'], - # ['Name:xxx', 'CHAPinfo:mm-usr#mm-pwd']] - initiator_infos = map(lambda x: [y for y in x if y], - initiator_infos) - - # Step 5, make initiators configure dict, convert to: - # [{'TargetPortGroup': 'xxx', 'Name': 'xxx'}, - # {'Name': 'xxx', 'CHAPinfo': 'mm-usr#mm-pwd'}] - get_opts = lambda x: x.split(':', 1) - initiator_infos = map(lambda x: dict(map(get_opts, x)), - initiator_infos) - # Convert generator to list for py3 compatibility. - initiator_infos = list(initiator_infos) - - # Step 6, replace CHAPinfo 'user#pwd' to 'user;pwd' - key = 'CHAPinfo' - for info in initiator_infos: - if key in info: - info[key] = info[key].replace('#', ';', 1) - - return initiator_infos - - def get_replication_devices(self): - devs = self.conf.safe_get('replication_device') - if not devs: - return [] - - devs_config = [] - for dev in devs: - dev_config = {} - dev_config['backend_id'] = dev['backend_id'] - dev_config['san_address'] = dev['san_address'].split(';') - dev_config['san_user'] = dev['san_user'] - dev_config['san_password'] = dev['san_password'] - dev_config['storage_pool'] = dev['storage_pool'].split(';') - dev_config['iscsi_info'] = self._parse_rmt_iscsi_info( - dev.get('iscsi_info')) - dev_config['iscsi_default_target_ip'] = ( - dev['iscsi_default_target_ip'].split(';') - if 'iscsi_default_target_ip' in dev - else []) - devs_config.append(dev_config) - - return devs_config - - def get_local_device(self): - dev_config = { - 'backend_id': "default", - 'san_address': self.conf.san_address, - 'san_user': self.conf.san_user, - 'san_password': self.conf.san_password, - 'storage_pool': self.conf.storage_pools, - 'iscsi_info': self.conf.iscsi_info, - 'iscsi_default_target_ip': self.conf.iscsi_default_target_ip, - } - return dev_config diff --git a/cinder/volume/drivers/huawei/huawei_driver.py b/cinder/volume/drivers/huawei/huawei_driver.py deleted file mode 100644 index 3657e90d0..000000000 --- a/cinder/volume/drivers/huawei/huawei_driver.py +++ /dev/null @@ -1,2416 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import json -import math -import re -import six -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.huawei import constants -from cinder.volume.drivers.huawei import fc_zone_helper -from cinder.volume.drivers.huawei import huawei_conf -from cinder.volume.drivers.huawei import huawei_utils -from cinder.volume.drivers.huawei import hypermetro -from cinder.volume.drivers.huawei import replication -from cinder.volume.drivers.huawei import rest_client -from cinder.volume.drivers.huawei import smartx -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -huawei_opts = [ - cfg.StrOpt('cinder_huawei_conf_file', - default='/etc/cinder/cinder_huawei_conf.xml', - help='The configuration file for the Cinder Huawei driver.'), - cfg.StrOpt('hypermetro_devices', - default=None, - help='The remote device hypermetro will use.'), - cfg.StrOpt('metro_san_user', - default=None, - help='The remote metro device san user.'), - cfg.StrOpt('metro_san_password', - default=None, - secret=True, - help='The remote metro device san password.'), - cfg.StrOpt('metro_domain_name', - default=None, - help='The remote metro device domain name.'), - cfg.StrOpt('metro_san_address', - default=None, - help='The remote metro device request url.'), - cfg.StrOpt('metro_storage_pools', - default=None, - help='The remote metro device pool names.'), -] - -CONF = cfg.CONF -CONF.register_opts(huawei_opts, group=configuration.SHARED_CONF_GROUP) - -snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') -Snapshot = collections.namedtuple('Snapshot', snap_attrs) -vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') -Volume = collections.namedtuple('Volume', vol_attrs) - - -class HuaweiBaseDriver(driver.VolumeDriver): - - # ThirdPartySytems wiki page - CI_WIKI_NAME = "Huawei_volume_CI" - - def __init__(self, *args, **kwargs): - super(HuaweiBaseDriver, self).__init__(*args, **kwargs) - - if not self.configuration: - msg = _('Configuration is not found.') - raise exception.InvalidInput(reason=msg) - - self.active_backend_id = kwargs.get('active_backend_id') - - self.configuration.append_config_values(huawei_opts) - self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) - self.support_func = None - self.metro_flag = False - self.replica = None - - def check_func_support(self, obj_name): - try: - self.client._get_object_count(obj_name) - return True - except Exception: - return False - - def get_local_and_remote_dev_conf(self): - self.loc_dev_conf = self.huawei_conf.get_local_device() - - # Now just support one replication device. - replica_devs = self.huawei_conf.get_replication_devices() - self.replica_dev_conf = replica_devs[0] if replica_devs else {} - - def get_local_and_remote_client_conf(self): - if self.active_backend_id: - return self.replica_dev_conf, self.loc_dev_conf - else: - return self.loc_dev_conf, self.replica_dev_conf - - def do_setup(self, context): - """Instantiate common class and login storage system.""" - # Set huawei private configuration into Configuration object. - self.huawei_conf.update_config_value() - - self.get_local_and_remote_dev_conf() - client_conf, replica_client_conf = ( - self.get_local_and_remote_client_conf()) - - # init local client - if not client_conf: - msg = _('Get active client failed.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - self.client = rest_client.RestClient(self.configuration, - **client_conf) - self.client.login() - - # init remote client - metro_san_address = self.configuration.safe_get("metro_san_address") - metro_san_user = self.configuration.safe_get("metro_san_user") - metro_san_password = self.configuration.safe_get("metro_san_password") - if metro_san_address and metro_san_user and metro_san_password: - metro_san_address = metro_san_address.split(";") - self.rmt_client = rest_client.RestClient(self.configuration, - metro_san_address, - metro_san_user, - metro_san_password) - - self.rmt_client.login() - self.metro_flag = True - else: - self.metro_flag = False - LOG.warning("Remote device not configured in cinder.conf") - # init replication manager - if replica_client_conf: - self.replica_client = rest_client.RestClient(self.configuration, - **replica_client_conf) - self.replica_client.try_login() - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - - def check_for_setup_error(self): - pass - - def get_volume_stats(self, refresh=False): - """Get volume status and reload huawei config file.""" - self.huawei_conf.update_config_value() - stats = self.client.update_volume_stats() - stats = self.update_support_capability(stats) - - if self.replica: - stats = self.replica.update_replica_capability(stats) - targets = [self.replica_dev_conf['backend_id']] - stats['replication_targets'] = targets - stats['replication_enabled'] = True - - return stats - - def update_support_capability(self, stats): - for pool in stats['pools']: - pool['smartpartition'] = ( - self.check_func_support("SMARTCACHEPARTITION")) - pool['smartcache'] = self.check_func_support("smartcachepool") - pool['QoS_support'] = self.check_func_support("ioclass") - pool['splitmirror'] = self.check_func_support("splitmirror") - pool['luncopy'] = self.check_func_support("luncopy") - pool['thick_provisioning_support'] = True - pool['thin_provisioning_support'] = True - pool['smarttier'] = True - pool['consistent_group_snapshot_enabled'] = True - - if self.configuration.san_product == "Dorado": - pool['smarttier'] = False - pool['thick_provisioning_support'] = False - - if self.metro_flag: - pool['hypermetro'] = self.check_func_support("HyperMetroPair") - - # assign the support function to global parameter. - self.support_func = pool - - return stats - - def _get_volume_type(self, volume): - volume_type = None - type_id = volume.volume_type_id - if type_id: - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - - return volume_type - - def _get_volume_params(self, volume_type): - """Return the parameters for creating the volume.""" - specs = {} - if volume_type: - specs = dict(volume_type).get('extra_specs') - - opts = self._get_volume_params_from_specs(specs) - return opts - - def _get_group_type(self, group): - opts = [] - vol_types = group.volume_types - - for vol_type in vol_types: - specs = vol_type.extra_specs - opts.append(self._get_volume_params_from_specs(specs)) - - return opts - - def _check_volume_type_support(self, opts, vol_type): - if not opts: - return False - - support = True - for opt in opts: - if opt.get(vol_type) != 'true': - support = False - break - - return support - - def _get_volume_params_from_specs(self, specs): - """Return the volume parameters from extra specs.""" - opts_capability = { - 'smarttier': False, - 'smartcache': False, - 'smartpartition': False, - 'thin_provisioning_support': False, - 'thick_provisioning_support': False, - 'hypermetro': False, - 'replication_enabled': False, - 'replication_type': 'async', - } - - opts_value = { - 'policy': None, - 'partitionname': None, - 'cachename': None, - } - - opts_associate = { - 'smarttier': 'policy', - 'smartcache': 'cachename', - 'smartpartition': 'partitionname', - } - - opts = self._get_opts_from_specs(opts_capability, - opts_value, - opts_associate, - specs) - opts = smartx.SmartX().get_smartx_specs_opts(opts) - opts = replication.get_replication_opts(opts) - LOG.debug('volume opts %(opts)s.', {'opts': opts}) - return opts - - def _get_opts_from_specs(self, opts_capability, opts_value, - opts_associate, specs): - """Get the well defined extra specs.""" - opts = {} - opts.update(opts_capability) - opts.update(opts_value) - - for key, value in specs.items(): - # Get the scope, if it is using scope format. - scope = None - key_split = key.split(':') - if len(key_split) > 2 and key_split[0] != "capabilities": - continue - - if len(key_split) == 1: - key = key_split[0].lower() - else: - scope = key_split[0].lower() - key = key_split[1].lower() - - if ((not scope or scope == 'capabilities') - and key in opts_capability): - words = value.split() - if words and len(words) == 2 and words[0] in ('', ''): - opts[key] = words[1].lower() - elif key == 'replication_type': - LOG.error("Extra specs must be specified as " - "replication_type=' sync' or " - "' async'.") - else: - LOG.error("Extra specs must be specified as " - "capabilities:%s=' True'.", key) - - if ((scope in opts_capability) - and (key in opts_value) - and (scope in opts_associate) - and (opts_associate[scope] == key)): - opts[key] = value - - return opts - - def _get_lun_params(self, volume, opts): - pool_name = volume_utils.extract_host(volume.host, level='pool') - params = { - 'TYPE': '11', - 'NAME': huawei_utils.encode_name(volume.id), - 'PARENTTYPE': '216', - 'PARENTID': self.client.get_pool_id(pool_name), - 'DESCRIPTION': volume.name, - 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), - 'CAPACITY': huawei_utils.get_volume_size(volume), - 'WRITEPOLICY': self.configuration.lun_write_type, - 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, - 'PREFETCHVALUE': self.configuration.lun_prefetch_value, - 'DATATRANSFERPOLICY': - opts.get('policy', self.configuration.lun_policy), - 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, - 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } - - LOG.info('volume: %(volume)s, lun params: %(params)s.', - {'volume': volume.id, 'params': params}) - return params - - def _create_volume(self, lun_params): - # Create LUN on the array. - lun_info = self.client.create_lun(lun_params) - metadata = {'huawei_lun_id': lun_info['ID'], - 'huawei_lun_wwn': lun_info['WWN']} - model_update = {'metadata': metadata} - - return lun_info, model_update - - def _create_base_type_volume(self, opts, volume, volume_type): - """Create volume and add some base type. - - Base type is the service type which doesn't conflict with the other. - """ - lun_params = self._get_lun_params(volume, opts) - lun_info, model_update = self._create_volume(lun_params) - lun_id = lun_info['ID'] - - try: - qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) - if qos: - if not self.support_func.get('QoS_support'): - msg = (_("Can't support qos on the array")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - smart_qos = smartx.SmartQos(self.client) - smart_qos.add(qos, lun_id) - - smartpartition = smartx.SmartPartition(self.client) - smartpartition.add(opts, lun_id) - - smartcache = smartx.SmartCache(self.client) - smartcache.add(opts, lun_id) - except Exception as err: - self._delete_lun_with_check(lun_id) - msg = _('Create volume error. Because %s.') % six.text_type(err) - raise exception.VolumeBackendAPIException(data=msg) - - return lun_params, lun_info, model_update - - def _add_extend_type_to_volume(self, opts, lun_params, lun_info, - model_update): - """Add the extend type. - - Extend type is the service type which may conflict with the other. - So add it after those services. - """ - lun_id = lun_info['ID'] - if opts.get('hypermetro') == 'true': - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - try: - metro_info = metro.create_hypermetro(lun_id, lun_params) - model_update['metadata'].update(metro_info) - except exception.VolumeBackendAPIException as err: - LOG.error('Create hypermetro error: %s.', err) - self._delete_lun_with_check(lun_id) - raise - - if opts.get('replication_enabled') == 'true': - replica_model = opts.get('replication_type') - try: - replica_info = self.replica.create_replica(lun_info, - replica_model) - model_update.update(replica_info) - except Exception as err: - LOG.exception('Create replication volume error.') - self._delete_lun_with_check(lun_id) - raise - - return model_update - - def create_volume(self, volume): - """Create a volume.""" - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if (opts.get('hypermetro') == 'true' - and opts.get('replication_enabled') == 'true'): - err_msg = _("Hypermetro and Replication can not be " - "used in the same volume_type.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - lun_params, lun_info, model_update = ( - self._create_base_type_volume(opts, volume, volume_type)) - - model_update = self._add_extend_type_to_volume(opts, lun_params, - lun_info, model_update) - - model_update['provider_location'] = huawei_utils.to_string( - **model_update.pop('metadata')) - - return model_update - - def _delete_volume(self, volume): - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = metadata.get('huawei_lun_id') - if not lun_id: - return - - lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) - if lun_group_ids and len(lun_group_ids) == 1: - self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) - - self.client.delete_lun(lun_id) - - def delete_volume(self, volume): - """Delete a volume. - - Three steps: - Firstly, remove associate from lungroup. - Secondly, remove associate from QoS policy. - Thirdly, remove the lun. - """ - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_WARN) - if not lun_id: - return - - if self.support_func.get('QoS_support'): - qos_id = self.client.get_qosid_by_lunid(lun_id) - if qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(qos_id, lun_id) - - metadata = huawei_utils.get_lun_metadata(volume) - if 'hypermetro_id' in metadata: - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - try: - metro.delete_hypermetro(volume) - except exception.VolumeBackendAPIException as err: - LOG.error('Delete hypermetro error: %s.', err) - # We have checked the LUN WWN above, - # no need to check again here. - self._delete_volume(volume) - raise - - # Delete a replication volume - replica_data = volume.replication_driver_data - if replica_data: - try: - self.replica.delete_replica(volume) - except exception.VolumeBackendAPIException as err: - with excutils.save_and_reraise_exception(): - LOG.exception("Delete replication error.") - self._delete_volume(volume) - - self._delete_volume(volume) - - def _delete_lun_with_check(self, lun_id, lun_wwn=None): - if not lun_id: - return - - if self.client.check_lun_exist(lun_id, lun_wwn): - if self.support_func.get('QoS_support'): - qos_id = self.client.get_qosid_by_lunid(lun_id) - if qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(qos_id, lun_id) - - self.client.delete_lun(lun_id) - - def _is_lun_migration_complete(self, src_id, dst_id): - result = self.client.get_lun_migration_task() - found_migration_task = False - if 'data' not in result: - return False - - for item in result['data']: - if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): - found_migration_task = True - if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: - return True - if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: - msg = _("Lun migration error.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not found_migration_task: - err_msg = _("Cannot find migration task.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - return False - - def _is_lun_migration_exist(self, src_id, dst_id): - try: - result = self.client.get_lun_migration_task() - except Exception: - LOG.error("Get LUN migration error.") - return False - - if 'data' in result: - for item in result['data']: - if (src_id == item['PARENTID'] - and dst_id == item['TARGETLUNID']): - return True - return False - - def _migrate_lun(self, src_id, dst_id): - try: - self.client.create_lun_migration(src_id, dst_id) - - def _is_lun_migration_complete(): - return self._is_lun_migration_complete(src_id, dst_id) - - wait_interval = constants.MIGRATION_WAIT_INTERVAL - huawei_utils.wait_for_condition(_is_lun_migration_complete, - wait_interval, - self.configuration.lun_timeout) - # Clean up if migration failed. - except Exception as ex: - raise exception.VolumeBackendAPIException(data=ex) - finally: - if self._is_lun_migration_exist(src_id, dst_id): - self.client.delete_lun_migration(src_id, dst_id) - self._delete_lun_with_check(dst_id) - - LOG.debug("Migrate lun %s successfully.", src_id) - return True - - def _wait_volume_ready(self, lun_id): - wait_interval = self.configuration.lun_ready_wait_interval - - def _volume_ready(): - result = self.client.get_lun_info(lun_id) - if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH - and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): - return True - return False - - huawei_utils.wait_for_condition(_volume_ready, - wait_interval, - wait_interval * 10) - - def _get_original_status(self, volume): - return 'in-use' if volume.volume_attachment else 'available' - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status=None): - original_name = huawei_utils.encode_name(volume.id) - current_name = huawei_utils.encode_name(new_volume.id) - - lun_id = self.client.get_lun_id_by_name(current_name) - try: - self.client.rename_lun(lun_id, original_name) - except exception.VolumeBackendAPIException: - LOG.error('Unable to rename lun %s on array.', current_name) - return {'_name_id': new_volume.name_id} - - LOG.debug("Renamed lun from %(current_name)s to %(original_name)s " - "successfully.", - {'current_name': current_name, - 'original_name': original_name}) - - model_update = {'_name_id': None} - - return model_update - - def migrate_volume(self, ctxt, volume, host, new_type=None): - """Migrate a volume within the same array.""" - self._check_volume_exist_on_array(volume, - constants.VOLUME_NOT_EXISTS_RAISE) - - # NOTE(jlc): Replication volume can't migrate. But retype - # can remove replication relationship first then do migrate. - # So don't add this judgement into _check_migration_valid(). - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - return (False, None) - - return self._migrate_volume(volume, host, new_type) - - def _check_migration_valid(self, host, volume): - if 'pool_name' not in host['capabilities']: - return False - - target_device = host['capabilities']['location_info'] - - # Source and destination should be on same array. - if target_device != self.client.device_id: - return False - - # Same protocol should be used if volume is in-use. - protocol = self.configuration.san_protocol - if (host['capabilities']['storage_protocol'] != protocol - and self._get_original_status(volume) == 'in-use'): - return False - - pool_name = host['capabilities']['pool_name'] - if len(pool_name) == 0: - return False - - return True - - def _migrate_volume(self, volume, host, new_type=None): - if not self._check_migration_valid(host, volume): - return (False, None) - - type_id = volume.volume_type_id - - volume_type = None - if type_id: - volume_type = volume_types.get_volume_type(None, type_id) - - pool_name = host['capabilities']['pool_name'] - pools = self.client.get_all_pools() - pool_info = self.client.get_pool_info(pool_name, pools) - src_volume_name = huawei_utils.encode_name(volume.id) - dst_volume_name = six.text_type(hash(src_volume_name)) - - metadata = huawei_utils.get_lun_metadata(volume) - src_id = metadata['huawei_lun_id'] - - opts = None - qos = None - if new_type: - # If new type exists, use new type. - new_specs = new_type['extra_specs'] - opts = self._get_volume_params_from_specs(new_specs) - if 'LUNType' not in opts: - opts['LUNType'] = self.configuration.lun_type - - qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - elif volume_type: - qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) - - if not opts: - opts = self._get_volume_params(volume_type) - - lun_info = self.client.get_lun_info(src_id) - - if opts['policy']: - policy = opts['policy'] - else: - policy = lun_info.get('DATATRANSFERPOLICY', - self.configuration.lun_policy) - - lun_params = { - 'NAME': dst_volume_name, - 'PARENTID': pool_info['ID'], - 'DESCRIPTION': lun_info['DESCRIPTION'], - 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), - 'CAPACITY': lun_info['CAPACITY'], - 'WRITEPOLICY': lun_info['WRITEPOLICY'], - 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], - 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], - 'DATATRANSFERPOLICY': policy, - 'READCACHEPOLICY': lun_info.get( - 'READCACHEPOLICY', - self.configuration.lun_read_cache_policy), - 'WRITECACHEPOLICY': lun_info.get( - 'WRITECACHEPOLICY', - self.configuration.lun_write_cache_policy), - 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } - - for item in lun_params.keys(): - if lun_params.get(item) == '--': - del lun_params[item] - - lun_info = self.client.create_lun(lun_params) - lun_id = lun_info['ID'] - - if qos: - LOG.info('QoS: %s.', qos) - SmartQos = smartx.SmartQos(self.client) - SmartQos.add(qos, lun_id) - if opts: - smartpartition = smartx.SmartPartition(self.client) - smartpartition.add(opts, lun_id) - smartcache = smartx.SmartCache(self.client) - smartcache.add(opts, lun_id) - - dst_id = lun_info['ID'] - self._wait_volume_ready(dst_id) - moved = self._migrate_lun(src_id, dst_id) - - return moved, {} - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot. - - We use LUNcopy to copy a new volume from snapshot. - The time needed increases as volume size does. - """ - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if (opts.get('hypermetro') == 'true' - and opts.get('replication_enabled') == 'true'): - err_msg = _("Hypermetro and Replication can not be " - "used in the same volume_type.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - snapshotname = huawei_utils.encode_name(snapshot.id) - metadata = huawei_utils.get_snapshot_metadata(snapshot) - snapshot_id = metadata.get('huawei_snapshot_id') - if snapshot_id is None: - snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) - if snapshot_id is None: - err_msg = (_( - 'create_volume_from_snapshot: Snapshot %(name)s ' - 'does not exist.') - % {'name': snapshotname}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - lun_params, lun_info, model_update = ( - self._create_base_type_volume(opts, volume, volume_type)) - - tgt_lun_id = lun_info['ID'] - luncopy_name = huawei_utils.encode_name(volume.id) - LOG.info( - 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' - 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.', - {'src_lun_id': snapshot_id, - 'tgt_lun_id': tgt_lun_id, - 'copy_name': luncopy_name}) - - wait_interval = self.configuration.lun_ready_wait_interval - - def _volume_ready(): - result = self.client.get_lun_info(tgt_lun_id) - - if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH - and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): - return True - return False - - huawei_utils.wait_for_condition(_volume_ready, - wait_interval, - wait_interval * 10) - - self._copy_volume(volume, luncopy_name, - snapshot_id, tgt_lun_id) - - # NOTE(jlc): Actually, we just only support replication here right - # now, not hypermetro. - model_update = self._add_extend_type_to_volume(opts, lun_params, - lun_info, model_update) - model_update['provider_location'] = huawei_utils.to_string( - **model_update.pop('metadata')) - - return model_update - - def create_cloned_volume(self, volume, src_vref): - """Clone a new volume from an existing volume.""" - self._check_volume_exist_on_array(src_vref, - constants.VOLUME_NOT_EXISTS_RAISE) - - # Form the snapshot structure. - snapshot = Snapshot(id=uuid.uuid4().__str__(), - volume_id=src_vref.id, - volume=src_vref, - provider_location=None) - - # Create snapshot. - self.create_snapshot(snapshot) - - try: - # Create volume from snapshot. - model_update = self.create_volume_from_snapshot(volume, snapshot) - finally: - try: - # Delete snapshot. - self.delete_snapshot(snapshot) - except exception.VolumeBackendAPIException: - LOG.warning( - 'Failure deleting the snapshot %(snapshot_id)s ' - 'of volume %(volume_id)s.', - {'snapshot_id': snapshot.id, - 'volume_id': src_vref.id},) - - return model_update - - def _check_volume_exist_on_array(self, volume, action): - """Check whether the volume exists on the array. - - If the volume exists on the array, return the LUN ID. - If not exists, raise or log warning. - """ - # Firstly, try to find LUN ID by volume.provider_location. - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = metadata.get('huawei_lun_id') - # If LUN ID not recorded, find LUN ID by LUN NAME. - if not lun_id: - volume_name = huawei_utils.encode_name(volume.id) - lun_id = self.client.get_lun_id_by_name(volume_name) - if not lun_id: - msg = (_("Volume %s does not exist on the array.") - % volume.id) - if action == constants.VOLUME_NOT_EXISTS_WARN: - LOG.warning(msg) - if action == constants.VOLUME_NOT_EXISTS_RAISE: - raise exception.VolumeBackendAPIException(data=msg) - return - - lun_wwn = metadata.get('huawei_lun_wwn') - if not lun_wwn: - LOG.debug("No LUN WWN recorded for volume %s.", volume.id) - - if not self.client.check_lun_exist(lun_id, lun_wwn): - msg = (_("Volume %s does not exist on the array.") - % volume.id) - if action == constants.VOLUME_NOT_EXISTS_WARN: - LOG.warning(msg) - if action == constants.VOLUME_NOT_EXISTS_RAISE: - raise exception.VolumeBackendAPIException(data=msg) - return - return lun_id - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - - volume_type = self._get_volume_type(volume) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - msg = (_("Can't extend replication volume, volume: %(id)s") % - {"id": volume.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - lun_info = self.client.get_lun_info(lun_id) - old_size = int(lun_info.get('CAPACITY')) - - new_size = int(new_size) * units.Gi / 512 - - if new_size == old_size: - LOG.info("New size is equal to the real size from backend" - " storage, no need to extend." - " realsize: %(oldsize)s, newsize: %(newsize)s.", - {'oldsize': old_size, - 'newsize': new_size}) - return - if new_size < old_size: - msg = (_("New size should be bigger than the real size from " - "backend storage." - " realsize: %(oldsize)s, newsize: %(newsize)s."), - {'oldsize': old_size, - 'newsize': new_size}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - volume_name = huawei_utils.encode_name(volume.id) - - LOG.info( - 'Extend volume: %(volumename)s, ' - 'oldsize: %(oldsize)s, newsize: %(newsize)s.', - {'volumename': volume_name, - 'oldsize': old_size, - 'newsize': new_size}) - - self.client.extend_lun(lun_id, new_size) - - def create_snapshot(self, snapshot): - volume = snapshot.volume - if not volume: - msg = (_("Can't get volume id from snapshot, snapshot: %(id)s") - % {"id": snapshot.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - volume_name = huawei_utils.encode_name(snapshot.volume_id) - lun_id = self.client.get_lun_id(volume, volume_name) - snapshot_name = huawei_utils.encode_name(snapshot.id) - snapshot_description = snapshot.id - snapshot_info = self.client.create_snapshot(lun_id, - snapshot_name, - snapshot_description) - snapshot_id = snapshot_info['ID'] - self.client.activate_snapshot(snapshot_id) - - location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id) - return {'provider_location': location, - 'lun_info': snapshot_info} - - def delete_snapshot(self, snapshot): - snapshotname = huawei_utils.encode_name(snapshot.id) - volume_name = huawei_utils.encode_name(snapshot.volume_id) - - LOG.info( - 'stop_snapshot: snapshot name: %(snapshot)s, ' - 'volume name: %(volume)s.', - {'snapshot': snapshotname, - 'volume': volume_name},) - - metadata = huawei_utils.get_snapshot_metadata(snapshot) - snapshot_id = metadata.get('huawei_snapshot_id') - if snapshot_id is None: - snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) - - if snapshot_id and self.client.check_snapshot_exist(snapshot_id): - self.client.stop_snapshot(snapshot_id) - self.client.delete_snapshot(snapshot_id) - else: - LOG.warning("Can't find snapshot on the array.") - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " - "diff=%(diff)s, host=%(host)s.", {'id': volume.id, - 'new_type': new_type, - 'diff': diff, - 'host': host}) - self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - - # Check what changes are needed - migration, change_opts, lun_id = self.determine_changes_when_retype( - volume, new_type, host) - - model_update = {} - replica_enabled_change = change_opts.get('replication_enabled') - replica_type_change = change_opts.get('replication_type') - if replica_enabled_change and replica_enabled_change[0] == 'true': - try: - self.replica.delete_replica(volume) - model_update.update({'replication_status': 'disabled', - 'replication_driver_data': None}) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error. ' - 'Delete replication failed.') - return False - - try: - if migration: - LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " - "change %(change_opts)s.", - {"lun_id": lun_id, "change_opts": change_opts}) - if not self._migrate_volume(volume, host, new_type): - LOG.warning("Storage-assisted migration failed during " - "retype.") - return False - else: - # Modify lun to change policy - self.modify_lun(lun_id, change_opts) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error.') - return False - - if replica_enabled_change and replica_enabled_change[1] == 'true': - try: - # If replica_enabled_change is not None, the - # replica_type_change won't be None. See function - # determine_changes_when_retype. - lun_info = self.client.get_lun_info(lun_id) - replica_info = self.replica.create_replica( - lun_info, replica_type_change[1]) - model_update.update(replica_info) - except exception.VolumeBackendAPIException: - LOG.exception('Retype volume error. ' - 'Create replication failed.') - return False - - return (True, model_update) - - def modify_lun(self, lun_id, change_opts): - if change_opts.get('partitionid'): - old, new = change_opts['partitionid'] - old_id = old[0] - old_name = old[1] - new_id = new[0] - new_name = new[1] - if old_id: - self.client.remove_lun_from_partition(lun_id, old_id) - if new_id: - self.client.add_lun_to_partition(lun_id, new_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) success.", - {"lun_id": lun_id, - "old_id": old_id, "old_name": old_name, - "new_id": new_id, "new_name": new_name}) - - if change_opts.get('cacheid'): - old, new = change_opts['cacheid'] - old_id = old[0] - old_name = old[1] - new_id = new[0] - new_name = new[1] - if old_id: - self.client.remove_lun_from_cache(lun_id, old_id) - if new_id: - self.client.add_lun_to_cache(lun_id, new_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " - "(name: %(old_name)s, id: %(old_id)s) to " - "(name: %(new_name)s, id: %(new_id)s) successfully.", - {'lun_id': lun_id, - 'old_id': old_id, "old_name": old_name, - 'new_id': new_id, "new_name": new_name}) - - if change_opts.get('policy'): - old_policy, new_policy = change_opts['policy'] - self.client.change_lun_smarttier(lun_id, new_policy) - LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " - "%(old_policy)s to %(new_policy)s success.", - {'lun_id': lun_id, - 'old_policy': old_policy, - 'new_policy': new_policy}) - - if change_opts.get('qos'): - old_qos, new_qos = change_opts['qos'] - old_qos_id = old_qos[0] - old_qos_value = old_qos[1] - if old_qos_id: - smart_qos = smartx.SmartQos(self.client) - smart_qos.remove(old_qos_id, lun_id) - if new_qos: - smart_qos = smartx.SmartQos(self.client) - smart_qos.add(new_qos, lun_id) - LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " - "%(old_qos_value)s to %(new_qos)s success.", - {'lun_id': lun_id, - 'old_qos_value': old_qos_value, - 'new_qos': new_qos}) - - def get_lun_specs(self, lun_id): - lun_opts = { - 'policy': None, - 'partitionid': None, - 'cacheid': None, - 'LUNType': None, - } - - lun_info = self.client.get_lun_info(lun_id) - lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) - if lun_info.get('DATATRANSFERPOLICY'): - lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] - if lun_info.get('SMARTCACHEPARTITIONID'): - lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] - if lun_info.get('CACHEPARTITIONID'): - lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] - - return lun_opts - - def _check_capability_support(self, new_opts, new_type): - new_cache_name = new_opts['cachename'] - if new_cache_name: - if not self.support_func.get('smartcache'): - msg = (_( - "Can't support cache on the array, cache name is: " - "%(name)s.") % {'name': new_cache_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_partition_name = new_opts['partitionname'] - if new_partition_name: - if not self.support_func.get('smartpartition'): - msg = (_( - "Can't support partition on the array, partition name is: " - "%(name)s.") % {'name': new_partition_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if new_opts['policy']: - if (not self.support_func.get('smarttier') - and new_opts['policy'] != '0'): - msg = (_("Can't support tier on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - if not self.support_func.get('QoS_support'): - if new_qos: - msg = (_("Can't support qos on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _check_needed_changes(self, lun_id, old_opts, new_opts, - change_opts, new_type): - new_cache_id = None - new_cache_name = new_opts['cachename'] - if new_cache_name: - if self.support_func.get('smartcache'): - new_cache_id = self.client.get_cache_id_by_name( - new_cache_name) - if new_cache_id is None: - msg = (_( - "Can't find cache name on the array, cache name is: " - "%(name)s.") % {'name': new_cache_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - new_partition_id = None - new_partition_name = new_opts['partitionname'] - if new_partition_name: - if self.support_func.get('smartpartition'): - new_partition_id = self.client.get_partition_id_by_name( - new_partition_name) - if new_partition_id is None: - msg = (_( - "Can't find partition name on the array, partition name " - "is: %(name)s.") % {'name': new_partition_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # smarttier - if old_opts['policy'] != new_opts['policy']: - if not (old_opts['policy'] == '--' - and new_opts['policy'] is None): - change_opts['policy'] = (old_opts['policy'], - new_opts['policy']) - - # smartcache - old_cache_id = old_opts['cacheid'] - if old_cache_id == '--': - old_cache_id = None - if old_cache_id != new_cache_id: - old_cache_name = None - if self.support_func.get('smartcache'): - if old_cache_id: - cache_info = self.client.get_cache_info_by_id( - old_cache_id) - old_cache_name = cache_info['NAME'] - change_opts['cacheid'] = ([old_cache_id, old_cache_name], - [new_cache_id, new_cache_name]) - - # smartpartition - old_partition_id = old_opts['partitionid'] - if old_partition_id == '--': - old_partition_id = None - if old_partition_id != new_partition_id: - old_partition_name = None - if self.support_func.get('smartpartition'): - if old_partition_id: - partition_info = self.client.get_partition_info_by_id( - old_partition_id) - old_partition_name = partition_info['NAME'] - - change_opts['partitionid'] = ([old_partition_id, - old_partition_name], - [new_partition_id, - new_partition_name]) - - # smartqos - new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) - if not self.support_func.get('QoS_support'): - if new_qos: - msg = (_("Can't support qos on the array.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - old_qos_id = self.client.get_qosid_by_lunid(lun_id) - old_qos = self._get_qos_specs_from_array(old_qos_id) - if old_qos != new_qos: - change_opts['qos'] = ([old_qos_id, old_qos], new_qos) - - return change_opts - - def determine_changes_when_retype(self, volume, new_type, host): - migration = False - change_opts = { - 'policy': None, - 'partitionid': None, - 'cacheid': None, - 'qos': None, - 'host': None, - 'LUNType': None, - 'replication_enabled': None, - 'replication_type': None, - } - - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = metadata['huawei_lun_id'] - old_opts = self.get_lun_specs(lun_id) - - new_specs = new_type['extra_specs'] - new_opts = self._get_volume_params_from_specs(new_specs) - - if 'LUNType' not in new_opts: - new_opts['LUNType'] = self.configuration.lun_type - - if volume.host != host['host']: - migration = True - change_opts['host'] = (volume.host, host['host']) - if old_opts['LUNType'] != new_opts['LUNType']: - migration = True - change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) - - volume_type = self._get_volume_type(volume) - volume_opts = self._get_volume_params(volume_type) - if (volume_opts['replication_enabled'] == 'true' - or new_opts['replication_enabled'] == 'true'): - # If replication_enabled changes, - # then replication_type in change_opts will be set. - change_opts['replication_enabled'] = ( - volume_opts['replication_enabled'], - new_opts['replication_enabled']) - - change_opts['replication_type'] = (volume_opts['replication_type'], - new_opts['replication_type']) - - change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, - change_opts, new_type) - - LOG.debug("Determine changes when retype. Migration: " - "%(migration)s, change_opts: %(change_opts)s.", - {'migration': migration, 'change_opts': change_opts}) - return migration, change_opts, lun_id - - def _get_qos_specs_from_array(self, qos_id): - qos = {} - qos_info = {} - if qos_id: - qos_info = self.client.get_qos_info(qos_id) - - for key, value in qos_info.items(): - key = key.upper() - if key in constants.QOS_KEYS: - if key == 'LATENCY' and value == '0': - continue - else: - qos[key] = value - return qos - - def create_export(self, context, volume, connector): - """Export a volume.""" - pass - - def ensure_export(self, context, volume): - """Synchronously recreate an export for a volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a volume.""" - pass - - def create_export_snapshot(self, context, snapshot, connector): - """Export a snapshot.""" - pass - - def remove_export_snapshot(self, context, snapshot): - """Remove an export for a snapshot.""" - pass - - def backup_use_temp_snapshot(self): - # This config option has a default to be False, So just return it. - return self.configuration.safe_get("backup_use_temp_snapshot") - - def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): - metadata = huawei_utils.get_volume_metadata(volume) - copyspeed = metadata.get('copyspeed') - luncopy_id = self.client.create_luncopy(copy_name, - src_lun, - tgt_lun, - copyspeed) - wait_interval = self.configuration.lun_copy_wait_interval - - try: - self.client.start_luncopy(luncopy_id) - - def _luncopy_complete(): - luncopy_info = self.client.get_luncopy_info(luncopy_id) - if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: - # luncopy_info['status'] means for the running status of - # the luncopy. If luncopy_info['status'] is equal to '40', - # this luncopy is completely ready. - return True - elif luncopy_info['state'] != constants.STATUS_HEALTH: - # luncopy_info['state'] means for the healthy status of the - # luncopy. If luncopy_info['state'] is not equal to '1', - # this means that an error occurred during the LUNcopy - # operation and we should abort it. - err_msg = (_( - 'An error occurred during the LUNcopy operation. ' - 'LUNcopy name: %(luncopyname)s. ' - 'LUNcopy status: %(luncopystatus)s. ' - 'LUNcopy state: %(luncopystate)s.') - % {'luncopyname': luncopy_id, - 'luncopystatus': luncopy_info['status'], - 'luncopystate': luncopy_info['state']},) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - huawei_utils.wait_for_condition(_luncopy_complete, - wait_interval, - self.configuration.lun_timeout) - - except Exception: - with excutils.save_and_reraise_exception(): - self.client.delete_luncopy(luncopy_id) - self.delete_volume(volume) - - self.client.delete_luncopy(luncopy_id) - - def _check_lun_valid_for_manage(self, lun_info, external_ref): - lun_id = lun_info.get('ID') - lun_name = lun_info.get('NAME') - - # Check whether the LUN is already in LUN group. - if lun_info.get('ISADD2LUNGROUP') == 'true': - msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " - "group.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN is Normal. - if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: - msg = _("Can't import LUN %s to Cinder. LUN status is not " - "normal.") % lun_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a HyperMetroPair. - if self.support_func.get('hypermetro'): - try: - hypermetro_pairs = self.client.get_hypermetro_pairs() - except exception.VolumeBackendAPIException: - hypermetro_pairs = [] - LOG.debug("Can't get hypermetro info, pass the check.") - - for pair in hypermetro_pairs: - if pair.get('LOCALOBJID') == lun_id: - msg = (_("Can't import LUN %s to Cinder. Already exists " - "in a HyperMetroPair.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a SplitMirror. - if self.support_func.get('splitmirror'): - try: - split_mirrors = self.client.get_split_mirrors() - except exception.VolumeBackendAPIException as ex: - if re.search('License is unavailable', ex.msg): - # Can't check whether the LUN has SplitMirror with it, - # just pass the check and log it. - split_mirrors = [] - LOG.warning('No license for SplitMirror.') - else: - msg = _("Failed to get SplitMirror.") - raise exception.VolumeBackendAPIException(data=msg) - - for mirror in split_mirrors: - try: - target_luns = self.client.get_target_luns(mirror.get('ID')) - except exception.VolumeBackendAPIException: - msg = _("Failed to get target LUN of SplitMirror.") - raise exception.VolumeBackendAPIException(data=msg) - - if ((mirror.get('PRILUNID') == lun_id) - or (lun_id in target_luns)): - msg = (_("Can't import LUN %s to Cinder. Already exists " - "in a SplitMirror.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a migration task. - try: - migration_tasks = self.client.get_migration_task() - except exception.VolumeBackendAPIException as ex: - if re.search('License is unavailable', ex.msg): - # Can't check whether the LUN has migration task with it, - # just pass the check and log it. - migration_tasks = [] - LOG.warning('No license for migration.') - else: - msg = _("Failed to get migration task.") - raise exception.VolumeBackendAPIException(data=msg) - - for migration in migration_tasks: - if lun_id in (migration.get('PARENTID'), - migration.get('TARGETLUNID')): - msg = (_("Can't import LUN %s to Cinder. Already exists in a " - "migration task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a LUN copy task. - if self.support_func.get('luncopy'): - lun_copy = lun_info.get('LUNCOPYIDS') - if lun_copy and lun_copy[1:-1]: - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a LUN copy task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a remote replication task. - rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') - if rmt_replication and rmt_replication[1:-1]: - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a remote replication task.") % lun_id) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check whether the LUN exists in a LUN mirror. - if self.client.is_lun_in_mirror(lun_name): - msg = (_("Can't import LUN %s to Cinder. Already exists in " - "a LUN mirror.") % lun_name) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - def manage_existing(self, volume, external_ref): - """Manage an existing volume on the backend storage.""" - # Check whether the LUN is belonged to the specified pool. - pool = volume_utils.extract_host(volume.host, 'pool') - LOG.debug("Pool specified is: %s.", pool) - lun_info = self._get_lun_info_by_ref(external_ref) - lun_id = lun_info.get('ID') - description = lun_info.get('DESCRIPTION', '') - if len(description) <= ( - constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): - description = volume.name + ' ' + description - - lun_pool = lun_info.get('PARENTNAME') - LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", - {"lun": lun_id, "pool": lun_pool}) - if pool != lun_pool: - msg = (_("The specified LUN does not belong to the given " - "pool: %s.") % pool) - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - # Check other stuffs to determine whether this LUN can be imported. - self._check_lun_valid_for_manage(lun_info, external_ref) - type_id = volume.volume_type_id - new_opts = None - if type_id: - # Handle volume type if specified. - old_opts = self.get_lun_specs(lun_id) - volume_type = volume_types.get_volume_type(None, type_id) - new_specs = volume_type.get('extra_specs') - new_opts = self._get_volume_params_from_specs(new_specs) - if ('LUNType' in new_opts and - old_opts['LUNType'] != new_opts['LUNType']): - msg = (_("Can't import LUN %(lun_id)s to Cinder. " - "LUN type mismatched.") % lun_id) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - if volume_type: - self._check_capability_support(new_opts, volume_type) - - change_opts = {'policy': None, 'partitionid': None, - 'cacheid': None, 'qos': None} - - change_opts = self._check_needed_changes(lun_id, old_opts, - new_opts, change_opts, - volume_type) - self.modify_lun(lun_id, change_opts) - - # Rename the LUN to make it manageable for Cinder. - new_name = huawei_utils.encode_name(volume.id) - LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", - {'old_name': lun_info.get('NAME'), - 'new_name': new_name}) - self.client.rename_lun(lun_id, new_name, description) - - location = huawei_utils.to_string(huawei_lun_id=lun_id, - huawei_lun_wwn=lun_info['WWN']) - model_update = {'provider_location': location} - - if new_opts and new_opts.get('replication_enabled'): - LOG.debug("Manage volume need to create replication.") - try: - lun_info = self.client.get_lun_info(lun_id) - replica_info = self.replica.create_replica( - lun_info, new_opts.get('replication_type')) - model_update.update(replica_info) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - LOG.exception("Manage exist volume failed.") - - return model_update - - def _get_lun_info_by_ref(self, external_ref): - LOG.debug("Get external_ref: %s", external_ref) - name = external_ref.get('source-name') - id = external_ref.get('source-id') - if not (name or id): - msg = _('Must specify source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - lun_id = id or self.client.get_lun_id_by_name(name) - if not lun_id: - msg = _("Can't find LUN on the array, please check the " - "source-name or source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - lun_info = self.client.get_lun_info(lun_id) - return lun_info - - def unmanage(self, volume): - """Export Huawei volume from Cinder.""" - LOG.debug("Unmanage volume: %s.", volume.id) - - def manage_existing_get_size(self, volume, external_ref): - """Get the size of the existing volume.""" - lun_info = self._get_lun_info_by_ref(external_ref) - size = int(math.ceil(lun_info.get('CAPACITY') / - constants.CAPACITY_UNIT)) - return size - - def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): - snapshot_id = snapshot_info.get('ID') - - # Check whether the snapshot is normal. - if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: - msg = _("Can't import snapshot %s to Cinder. " - "Snapshot status is not normal" - " or running status is not online.") % snapshot_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': - msg = _("Can't import snapshot %s to Cinder. " - "Snapshot is exposed to initiator.") % snapshot_id - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - def _get_snapshot_info_by_ref(self, external_ref): - LOG.debug("Get snapshot external_ref: %s.", external_ref) - name = external_ref.get('source-name') - id = external_ref.get('source-id') - if not (name or id): - msg = _('Must specify snapshot source-name or source-id.') - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - snapshot_id = id or self.client.get_snapshot_id_by_name(name) - if not snapshot_id: - msg = _("Can't find snapshot on array, please check the " - "source-name or source-id.") - raise exception.ManageExistingInvalidReference( - existing_ref=external_ref, reason=msg) - - snapshot_info = self.client.get_snapshot_info(snapshot_id) - return snapshot_info - - def manage_existing_snapshot(self, snapshot, existing_ref): - snapshot_info = self._get_snapshot_info_by_ref(existing_ref) - snapshot_id = snapshot_info.get('ID') - parent_metadata = huawei_utils.get_lun_metadata(snapshot.volume) - parent_lun_id = parent_metadata.get('huawei_lun_id') - if parent_lun_id != snapshot_info.get('PARENTID'): - msg = (_("Can't import snapshot %s to Cinder. " - "Snapshot doesn't belong to volume."), snapshot_id) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - - # Check whether this snapshot can be imported. - self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) - - # Rename the snapshot to make it manageable for Cinder. - description = snapshot.id - snapshot_name = huawei_utils.encode_name(snapshot.id) - self.client.rename_snapshot(snapshot_id, snapshot_name, description) - if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: - self.client.activate_snapshot(snapshot_id) - - LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", - {'old_name': snapshot_info.get('NAME'), - 'new_name': snapshot_name}) - - location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id) - return {'provider_location': location} - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Get the size of the existing snapshot.""" - snapshot_info = self._get_snapshot_info_by_ref(existing_ref) - size = int(math.ceil(snapshot_info.get('USERCAPACITY') / - constants.CAPACITY_UNIT)) - return size - - def unmanage_snapshot(self, snapshot): - """Unmanage the specified snapshot from Cinder management.""" - LOG.debug("Unmanage snapshot: %s.", snapshot.id) - - def remove_host_with_check(self, host_id): - wwns_in_host = ( - self.client.get_host_fc_initiators(host_id)) - iqns_in_host = ( - self.client.get_host_iscsi_initiators(host_id)) - if not (wwns_in_host or iqns_in_host or - self.client.is_host_associated_to_hostgroup(host_id)): - self.client.remove_host(host_id) - - @huawei_utils.check_whether_operate_consistency_group - def create_group(self, context, group): - """Creates a group.""" - model_update = {'status': fields.GroupStatus.AVAILABLE} - opts = self._get_group_type(group) - if self._check_volume_type_support(opts, 'hypermetro'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - metro.create_consistencygroup(group) - return model_update - - # Array will create group at create_group_snapshot time. Cinder will - # maintain the group and volumes relationship in the db. - return model_update - - @huawei_utils.check_whether_operate_consistency_group - def delete_group(self, context, group, volumes): - opts = self._get_group_type(group) - if self._check_volume_type_support(opts, 'hypermetro'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - return metro.delete_consistencygroup(context, group, volumes) - - model_update = {} - volumes_model_update = [] - model_update.update({'status': fields.GroupStatus.DELETED}) - - for volume_ref in volumes: - try: - self.delete_volume(volume_ref) - volumes_model_update.append( - {'id': volume_ref.id, 'status': 'deleted'}) - except Exception: - volumes_model_update.append( - {'id': volume_ref.id, 'status': 'error_deleting'}) - - return model_update, volumes_model_update - - @huawei_utils.check_whether_operate_consistency_group - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - model_update = {'status': fields.GroupStatus.AVAILABLE} - opts = self._get_group_type(group) - if self._check_volume_type_support(opts, 'hypermetro'): - metro = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - metro.update_consistencygroup(context, group, - add_volumes, - remove_volumes) - return model_update, None, None - - # Array will create group at create_group_snapshot time. Cinder will - # maintain the group and volumes relationship in the db. - return model_update, None, None - - @huawei_utils.check_whether_operate_consistency_group - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - err_msg = _("Huawei Storage doesn't support create_group_from_src.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - @huawei_utils.check_whether_operate_consistency_group - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Create group snapshot.""" - LOG.info('Create group snapshot for group' - ': %(group_id)s', {'group_id': group_snapshot.group_id}) - - model_update = {} - snapshots_model_update = [] - added_snapshots_info = [] - - try: - for snapshot in snapshots: - volume = snapshot.volume - if not volume: - msg = (_("Can't get volume id from snapshot, " - "snapshot: %(id)s") % {"id": snapshot.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - volume_name = huawei_utils.encode_name(volume.id) - - lun_id = self.client.get_lun_id(volume, volume_name) - snapshot_name = huawei_utils.encode_name(snapshot.id) - snapshot_description = snapshot.id - info = self.client.create_snapshot(lun_id, - snapshot_name, - snapshot_description) - location = huawei_utils.to_string( - huawei_snapshot_id=info['ID']) - snap_model_update = {'id': snapshot.id, - 'status': fields.SnapshotStatus.AVAILABLE, - 'provider_location': location} - snapshots_model_update.append(snap_model_update) - added_snapshots_info.append(info) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Create group snapshots failed. " - "Group snapshot id: %s.", group_snapshot.id) - snapshot_ids = [added_snapshot['ID'] - for added_snapshot in added_snapshots_info] - try: - self.client.activate_snapshot(snapshot_ids) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Active group snapshots failed. " - "Group snapshot id: %s.", group_snapshot.id) - - model_update['status'] = fields.GroupSnapshotStatus.AVAILABLE - - return model_update, snapshots_model_update - - @huawei_utils.check_whether_operate_consistency_group - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Delete group snapshot.""" - LOG.info('Delete group snapshot %(snap_id)s for group: ' - '%(group_id)s', - {'snap_id': group_snapshot.id, - 'group_id': group_snapshot.group_id}) - - model_update = {} - snapshots_model_update = [] - model_update['status'] = fields.GroupSnapshotStatus.DELETED - - for snapshot in snapshots: - try: - self.delete_snapshot(snapshot) - snapshot_model = {'id': snapshot.id, - 'status': fields.SnapshotStatus.DELETED} - snapshots_model_update.append(snapshot_model) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Delete group snapshot failed. " - "Group snapshot id: %s", group_snapshot.id) - - return model_update, snapshots_model_update - - def _classify_volume(self, volumes): - normal_volumes = [] - replica_volumes = [] - - for v in volumes: - volume_type = self._get_volume_type(v) - opts = self._get_volume_params(volume_type) - if opts.get('replication_enabled') == 'true': - replica_volumes.append(v) - else: - normal_volumes.append(v) - - return normal_volumes, replica_volumes - - def _failback_normal_volumes(self, volumes): - volumes_update = [] - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - metadata = huawei_utils.get_volume_metadata(v) - old_status = 'available' - if 'old_status' in metadata: - old_status = metadata.pop('old_status') - v_update['updates'] = {'status': old_status, - 'metadata': metadata} - volumes_update.append(v_update) - - return volumes_update - - def _failback(self, volumes): - if self.active_backend_id in ('', None): - return 'default', [] - - normal_volumes, replica_volumes = self._classify_volume(volumes) - volumes_update = [] - - replica_volumes_update = self.replica.failback(replica_volumes) - volumes_update.extend(replica_volumes_update) - - normal_volumes_update = self._failback_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) - - self.active_backend_id = "" - secondary_id = 'default' - - # Switch array connection. - self.client, self.replica_client = self.replica_client, self.client - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - return secondary_id, volumes_update - - def _failover_normal_volumes(self, volumes): - volumes_update = [] - - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - metadata = huawei_utils.get_volume_metadata(v) - metadata.update({'old_status': v.status}) - v_update['updates'] = {'status': 'error', - 'metadata': metadata} - volumes_update.append(v_update) - - return volumes_update - - def _failover(self, volumes): - if self.active_backend_id not in ('', None): - return self.replica_dev_conf['backend_id'], [] - - normal_volumes, replica_volumes = self._classify_volume(volumes) - volumes_update = [] - - replica_volumes_update = self.replica.failover(replica_volumes) - volumes_update.extend(replica_volumes_update) - - normal_volumes_update = self._failover_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) - - self.active_backend_id = self.replica_dev_conf['backend_id'] - secondary_id = self.active_backend_id - - # Switch array connection. - self.client, self.replica_client = self.replica_client, self.client - self.replica = replication.ReplicaPairManager(self.client, - self.replica_client, - self.configuration) - return secondary_id, volumes_update - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover all volumes to secondary.""" - if secondary_id == 'default': - secondary_id, volumes_update = self._failback(volumes) - elif (secondary_id == self.replica_dev_conf['backend_id'] - or secondary_id is None): - secondary_id, volumes_update = self._failover(volumes) - else: - msg = _("Invalid secondary id %s.") % secondary_id - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return secondary_id, volumes_update, [] - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Map a snapshot to a host and return target iSCSI information.""" - # From the volume structure. - volume = Volume(id=snapshot.id, - provider_location=snapshot.provider_location, - lun_type=constants.SNAPSHOT_TYPE, - metadata=None) - - return self.initialize_connection(volume, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Delete map between a snapshot and a host.""" - # From the volume structure. - volume = Volume(id=snapshot.id, - provider_location=snapshot.provider_location, - lun_type=constants.SNAPSHOT_TYPE, - metadata=None) - - return self.terminate_connection(volume, connector) - - def get_lun_id_and_type(self, volume): - if hasattr(volume, 'lun_type'): - metadata = huawei_utils.get_snapshot_metadata(volume) - lun_id = metadata['huawei_snapshot_id'] - lun_type = constants.SNAPSHOT_TYPE - else: - lun_id = self._check_volume_exist_on_array( - volume, constants.VOLUME_NOT_EXISTS_RAISE) - lun_type = constants.LUN_TYPE - - return lun_id, lun_type - - -@interface.volumedriver -class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): - """ISCSI driver for Huawei storage arrays. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Provide Huawei OceanStor storage 18000 driver - 1.1.1 - Code refactor - CHAP support - Multiple pools support - ISCSI multipath support - SmartX support - Volume migration support - Volume retype support - 2.0.0 - Rename to HuaweiISCSIDriver - 2.0.1 - Manage/unmanage volume support - 2.0.2 - Refactor HuaweiISCSIDriver - 2.0.3 - Manage/unmanage snapshot support - 2.0.5 - Replication V2 support - 2.0.6 - Support iSCSI configuration in Replication - 2.0.7 - Hypermetro support - Hypermetro consistency group support - Consistency group support - Cgsnapshot support - 2.0.8 - Backup snapshot optimal path support - 2.0.9 - Support reporting disk type of pool - """ - - VERSION = "2.0.9" - - def __init__(self, *args, **kwargs): - super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) - - def get_volume_stats(self, refresh=False): - """Get volume status.""" - data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['storage_protocol'] = 'iSCSI' - data['driver_version'] = self.VERSION - data['vendor_name'] = 'Huawei' - return data - - @utils.synchronized('huawei', external=True) - def initialize_connection(self, volume, connector): - """Map a volume to a host and return target iSCSI information.""" - lun_id, lun_type = self.get_lun_id_and_type(volume) - initiator_name = connector['initiator'] - LOG.info( - 'initiator name: %(initiator_name)s, ' - 'LUN ID: %(lun_id)s.', - {'initiator_name': initiator_name, - 'lun_id': lun_id}) - - (iscsi_iqns, - target_ips, - portgroup_id) = self.client.get_iscsi_params(connector) - LOG.info('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' - 'target_ip: %(target_ip)s, ' - 'portgroup_id: %(portgroup_id)s.', - {'iscsi_iqn': iscsi_iqns, - 'target_ip': target_ips, - 'portgroup_id': portgroup_id},) - - # Create hostgroup if not exist. - original_host_name = connector['host'] - host_name = huawei_utils.encode_host_name(original_host_name) - host_id = self.client.add_host_with_check(host_name, - original_host_name) - - # Add initiator to the host. - self.client.ensure_initiator_added(initiator_name, - host_id) - hostgroup_id = self.client.add_host_to_hostgroup(host_id) - - # Mapping lungroup and hostgroup to view. - self.client.do_mapping(lun_id, hostgroup_id, - host_id, portgroup_id, - lun_type) - - hostlun_id = self.client.get_host_lun_id(host_id, lun_id, - lun_type) - - LOG.info("initialize_connection, host lun id is: %s.", - hostlun_id) - - chapinfo = self.client.find_chap_info(self.client.iscsi_info, - initiator_name) - - # Return iSCSI properties. - properties = {} - properties['target_discovered'] = False - properties['volume_id'] = volume.id - multipath = connector.get('multipath', False) - hostlun_id = int(hostlun_id) - if not multipath: - properties['target_portal'] = ('%s:3260' % target_ips[0]) - properties['target_iqn'] = iscsi_iqns[0] - properties['target_lun'] = hostlun_id - else: - properties['target_iqns'] = [iqn for iqn in iscsi_iqns] - properties['target_portals'] = [ - '%s:3260' % ip for ip in target_ips] - properties['target_luns'] = [hostlun_id] * len(target_ips) - - # If use CHAP, return CHAP info. - if chapinfo: - chap_username, chap_password = chapinfo.split(';') - properties['auth_method'] = 'CHAP' - properties['auth_username'] = chap_username - properties['auth_password'] = chap_password - - LOG.info("initialize_connection success. Return data: %s.", - properties) - return {'driver_volume_type': 'iscsi', 'data': properties} - - @utils.synchronized('huawei', external=True) - def terminate_connection(self, volume, connector, **kwargs): - """Delete map between a volume and a host.""" - lun_id, lun_type = self.get_lun_id_and_type(volume) - initiator_name = connector['initiator'] - host_name = connector['host'] - lungroup_id = None - - LOG.info( - 'terminate_connection: initiator name: %(ini)s, ' - 'LUN ID: %(lunid)s.', - {'ini': initiator_name, - 'lunid': lun_id},) - - portgroup = None - portgroup_id = None - view_id = None - left_lunnum = -1 - for ini in self.client.iscsi_info: - if ini['Name'] == initiator_name: - for key in ini: - if key == 'TargetPortGroup': - portgroup = ini['TargetPortGroup'] - break - - if portgroup: - portgroup_id = self.client.get_tgt_port_group(portgroup) - host_name = huawei_utils.encode_host_name(host_name) - host_id = self.client.get_host_id_by_name(host_name) - if host_id: - mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id - view_id = self.client.find_mapping_view(mapping_view_name) - if view_id: - lungroup_id = self.client.find_lungroup_from_map(view_id) - - # Remove lun from lungroup. - if lun_id and lungroup_id: - lungroup_ids = self.client.get_lungroupids_by_lunid( - lun_id, lun_type) - if lungroup_id in lungroup_ids: - self.client.remove_lun_from_lungroup(lungroup_id, - lun_id, - lun_type) - else: - LOG.warning("LUN is not in lungroup. " - "LUN ID: %(lun_id)s. " - "Lungroup id: %(lungroup_id)s.", - {"lun_id": lun_id, - "lungroup_id": lungroup_id}) - - # Remove portgroup from mapping view if no lun left in lungroup. - if lungroup_id: - left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) - - if portgroup_id and view_id and (int(left_lunnum) <= 0): - if self.client.is_portgroup_associated_to_view(view_id, - portgroup_id): - self.client.delete_portgroup_mapping_view(view_id, - portgroup_id) - if view_id and (int(left_lunnum) <= 0): - self.client.remove_chap(initiator_name) - - if self.client.lungroup_associated(view_id, lungroup_id): - self.client.delete_lungroup_mapping_view(view_id, - lungroup_id) - self.client.delete_lungroup(lungroup_id) - if self.client.is_initiator_associated_to_host(initiator_name, - host_id): - self.client.remove_iscsi_from_host(initiator_name) - hostgroup_name = constants.HOSTGROUP_PREFIX + host_id - hostgroup_id = self.client.find_hostgroup(hostgroup_name) - if hostgroup_id: - if self.client.hostgroup_associated(view_id, hostgroup_id): - self.client.delete_hostgoup_mapping_view(view_id, - hostgroup_id) - self.client.remove_host_from_hostgroup(hostgroup_id, - host_id) - self.client.delete_hostgroup(hostgroup_id) - self.client.remove_host(host_id) - self.client.delete_mapping_view(view_id) - - -@interface.volumedriver -class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): - """FC driver for Huawei OceanStor storage arrays. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver - 1.1.1 - Code refactor - Multiple pools support - SmartX support - Volume migration support - Volume retype support - FC zone enhancement - Volume hypermetro support - 2.0.0 - Rename to HuaweiFCDriver - 2.0.1 - Manage/unmanage volume support - 2.0.2 - Refactor HuaweiFCDriver - 2.0.3 - Manage/unmanage snapshot support - 2.0.4 - Balanced FC port selection - 2.0.5 - Replication V2 support - 2.0.7 - Hypermetro support - Hypermetro consistency group support - Consistency group support - Cgsnapshot support - 2.0.8 - Backup snapshot optimal path support - 2.0.9 - Support reporting disk type of pool - """ - - VERSION = "2.0.9" - - def __init__(self, *args, **kwargs): - super(HuaweiFCDriver, self).__init__(*args, **kwargs) - self.fcsan = None - - def get_volume_stats(self, refresh=False): - """Get volume status.""" - data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['storage_protocol'] = 'FC' - data['driver_version'] = self.VERSION - data['vendor_name'] = 'Huawei' - return data - - @utils.synchronized('huawei', external=True) - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - lun_id, lun_type = self.get_lun_id_and_type(volume) - wwns = connector['wwpns'] - LOG.info( - 'initialize_connection, initiator: %(wwpns)s,' - ' LUN ID: %(lun_id)s.', - {'wwpns': wwns, - 'lun_id': lun_id},) - - portg_id = None - - original_host_name = connector['host'] - host_name = huawei_utils.encode_host_name(original_host_name) - host_id = self.client.add_host_with_check(host_name, - original_host_name) - - if not self.fcsan: - self.fcsan = fczm_utils.create_lookup_service() - - if self.fcsan: - # Use FC switch. - zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) - try: - (tgt_port_wwns, portg_id, init_targ_map) = ( - zone_helper.build_ini_targ_map(wwns, host_id, lun_id, - lun_type)) - except Exception as err: - self.remove_host_with_check(host_id) - msg = _('build_ini_targ_map fails. %s') % err - raise exception.VolumeBackendAPIException(data=msg) - - for ini in init_targ_map: - self.client.ensure_fc_initiator_added(ini, host_id) - else: - # Not use FC switch. - online_wwns_in_host = ( - self.client.get_host_online_fc_initiators(host_id)) - online_free_wwns = self.client.get_online_free_wwns() - for wwn in wwns: - if (wwn not in online_wwns_in_host - and wwn not in online_free_wwns): - wwns_in_host = ( - self.client.get_host_fc_initiators(host_id)) - iqns_in_host = ( - self.client.get_host_iscsi_initiators(host_id)) - if not (wwns_in_host or iqns_in_host or - self.client.is_host_associated_to_hostgroup(host_id)): - self.client.remove_host(host_id) - - msg = _('No FC initiator can be added to host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for wwn in wwns: - if wwn in online_free_wwns: - self.client.add_fc_port_to_host(host_id, wwn) - - (tgt_port_wwns, init_targ_map) = ( - self.client.get_init_targ_map(wwns)) - - # Add host into hostgroup. - hostgroup_id = self.client.add_host_to_hostgroup(host_id) - - metadata = huawei_utils.get_lun_metadata(volume) - LOG.info("initialize_connection, metadata is: %s.", metadata) - hypermetro_lun = 'hypermetro_id' in metadata - - map_info = self.client.do_mapping(lun_id, hostgroup_id, - host_id, portg_id, - lun_type, hypermetro_lun) - host_lun_id = self.client.get_host_lun_id(host_id, lun_id, - lun_type) - - # Return FC properties. - fc_info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': int(host_lun_id), - 'target_discovered': True, - 'target_wwn': tgt_port_wwns, - 'volume_id': volume.id, - 'initiator_target_map': init_targ_map, - 'map_info': map_info}, } - - # Deal with hypermetro connection. - if hypermetro_lun: - loc_tgt_wwn = fc_info['data']['target_wwn'] - local_ini_tgt_map = fc_info['data']['initiator_target_map'] - hyperm = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - rmt_fc_info = hyperm.connect_volume_fc(volume, connector) - - rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] - rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] - fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) - wwns = connector['wwpns'] - for wwn in wwns: - if (wwn in local_ini_tgt_map - and wwn in rmt_ini_tgt_map): - fc_info['data']['initiator_target_map'][wwn].extend( - rmt_ini_tgt_map[wwn]) - - elif (wwn not in local_ini_tgt_map - and wwn in rmt_ini_tgt_map): - fc_info['data']['initiator_target_map'][wwn] = ( - rmt_ini_tgt_map[wwn]) - # else, do nothing - - loc_map_info = fc_info['data']['map_info'] - rmt_map_info = rmt_fc_info['data']['map_info'] - same_host_id = self._get_same_hostid(loc_map_info, - rmt_map_info) - - self.client.change_hostlun_id(loc_map_info, same_host_id) - hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) - - fc_info['data']['target_lun'] = same_host_id - hyperm.rmt_client.logout() - - LOG.info("Return FC info is: %s.", fc_info) - return fc_info - - def _get_same_hostid(self, loc_fc_info, rmt_fc_info): - loc_aval_luns = loc_fc_info['aval_luns'] - loc_aval_luns = json.loads(loc_aval_luns) - - rmt_aval_luns = rmt_fc_info['aval_luns'] - rmt_aval_luns = json.loads(rmt_aval_luns) - same_host_id = None - - for i in range(1, 512): - if i in rmt_aval_luns and i in loc_aval_luns: - same_host_id = i - break - - LOG.info("The same hostid is: %s.", same_host_id) - if not same_host_id: - msg = _("Can't find the same host id from arrays.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return same_host_id - - @utils.synchronized('huawei', external=True) - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Delete map between a volume and a host.""" - lun_id, lun_type = self.get_lun_id_and_type(volume) - wwns = connector['wwpns'] - - host_name = connector['host'] - left_lunnum = -1 - lungroup_id = None - view_id = None - LOG.info('terminate_connection: wwpns: %(wwns)s, ' - 'LUN ID: %(lun_id)s.', - {'wwns': wwns, 'lun_id': lun_id}) - - host_name = huawei_utils.encode_host_name(host_name) - host_id = self.client.get_host_id_by_name(host_name) - if host_id: - mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id - view_id = self.client.find_mapping_view(mapping_view_name) - if view_id: - lungroup_id = self.client.find_lungroup_from_map(view_id) - - if lun_id and lungroup_id: - lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id, - lun_type) - if lungroup_id in lungroup_ids: - self.client.remove_lun_from_lungroup(lungroup_id, - lun_id, - lun_type) - else: - LOG.warning("LUN is not in lungroup. " - "LUN ID: %(lun_id)s. " - "Lungroup id: %(lungroup_id)s.", - {"lun_id": lun_id, - "lungroup_id": lungroup_id}) - - else: - LOG.warning("Can't find lun on the array.") - if lungroup_id: - left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) - if int(left_lunnum) > 0: - fc_info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - else: - fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( - wwns, host_id) - if lungroup_id: - if view_id and self.client.lungroup_associated( - view_id, lungroup_id): - self.client.delete_lungroup_mapping_view(view_id, - lungroup_id) - self.client.delete_lungroup(lungroup_id) - if portg_id: - if view_id and self.client.is_portgroup_associated_to_view( - view_id, portg_id): - self.client.delete_portgroup_mapping_view(view_id, - portg_id) - self.client.delete_portgroup(portg_id) - - if host_id: - hostgroup_name = constants.HOSTGROUP_PREFIX + host_id - hostgroup_id = self.client.find_hostgroup(hostgroup_name) - if hostgroup_id: - if view_id and self.client.hostgroup_associated( - view_id, hostgroup_id): - self.client.delete_hostgoup_mapping_view( - view_id, hostgroup_id) - self.client.remove_host_from_hostgroup( - hostgroup_id, host_id) - self.client.delete_hostgroup(hostgroup_id) - - if not self.client.check_fc_initiators_exist_in_host( - host_id): - self.client.remove_host(host_id) - - if view_id: - self.client.delete_mapping_view(view_id) - - # Deal with hypermetro connection. - metadata = huawei_utils.get_lun_metadata(volume) - LOG.info("Detach Volume, metadata is: %s.", metadata) - - if 'hypermetro_id' in metadata: - hyperm = hypermetro.HuaweiHyperMetro(self.client, - self.rmt_client, - self.configuration) - hyperm.disconnect_volume_fc(volume, connector) - - LOG.info("terminate_connection, return data is: %s.", - fc_info) - - return fc_info - - def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): - # Get tgt_port_wwns and init_targ_map to remove zone. - portg_id = None - if not self.fcsan: - self.fcsan = fczm_utils.create_lookup_service() - if self.fcsan: - zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, - self.client) - (tgt_port_wwns, portg_id, init_targ_map) = ( - zone_helper.get_init_targ_map(wwns, host_id)) - else: - (tgt_port_wwns, init_targ_map) = ( - self.client.get_init_targ_map(wwns)) - - # Remove the initiators from host if need. - if host_id: - fc_initiators = self.client.get_host_fc_initiators(host_id) - for wwn in wwns: - if wwn in fc_initiators: - self.client.remove_fc_from_host(wwn) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': tgt_port_wwns, - 'initiator_target_map': init_targ_map}} - return info, portg_id diff --git a/cinder/volume/drivers/huawei/huawei_utils.py b/cinder/volume/drivers/huawei/huawei_utils.py deleted file mode 100644 index c9f673054..000000000 --- a/cinder/volume/drivers/huawei/huawei_utils.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import six -import time - -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.volume.drivers.huawei import constants -from cinder.volume import utils - -LOG = logging.getLogger(__name__) - - -def encode_name(name): - pre_name = name.split("-")[0] - vol_encoded = six.text_type(hash(name)) - if vol_encoded.startswith('-'): - newuuid = pre_name + vol_encoded - else: - newuuid = pre_name + '-' + vol_encoded - return newuuid - - -def encode_host_name(name): - if name and (len(name) > constants.MAX_HOSTNAME_LENGTH): - name = six.text_type(hash(name)) - return name - - -def wait_for_condition(func, interval, timeout): - start_time = time.time() - - def _inner(): - try: - res = func() - except Exception as ex: - raise exception.VolumeBackendAPIException(data=ex) - - if res: - raise loopingcall.LoopingCallDone() - - if int(time.time()) - start_time > timeout: - msg = (_('wait_for_condition: %s timed out.') - % func.__name__) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - timer = loopingcall.FixedIntervalLoopingCall(_inner) - timer.start(interval=interval).wait() - - -def get_volume_size(volume): - """Calculate the volume size. - - We should divide the given volume size by 512 for the 18000 system - calculates volume size with sectors, which is 512 bytes. - """ - volume_size = units.Gi / 512 # 1G - if int(volume.size) != 0: - volume_size = int(volume.size) * units.Gi / 512 - - return volume_size - - -def get_volume_metadata(volume): - if type(volume) is objects.Volume: - return volume.metadata - - if 'volume_metadata' in volume: - metadata = volume.get('volume_metadata') - return {item['key']: item['value'] for item in metadata} - - return {} - - -def get_admin_metadata(volume): - admin_metadata = {} - if 'admin_metadata' in volume: - admin_metadata = volume.admin_metadata - elif 'volume_admin_metadata' in volume: - metadata = volume.get('volume_admin_metadata', []) - admin_metadata = {item['key']: item['value'] for item in metadata} - - LOG.debug("Volume ID: %(id)s, admin_metadata: %(admin_metadata)s.", - {"id": volume.id, "admin_metadata": admin_metadata}) - return admin_metadata - - -def get_snapshot_metadata_value(snapshot): - if type(snapshot) is objects.Snapshot: - return snapshot.metadata - - if 'snapshot_metadata' in snapshot: - metadata = snapshot.snapshot_metadata - return {item['key']: item['value'] for item in metadata} - - return {} - - -def check_whether_operate_consistency_group(func): - def wrapper(self, context, group, *args, **kwargs): - if not utils.is_group_a_cg_snapshot_type(group): - msg = _("%s, the group or group snapshot is not cg or " - "cg_snapshot") % func.__name__ - LOG.debug(msg) - raise NotImplementedError(msg) - return func(self, context, group, *args, **kwargs) - return wrapper - - -def to_string(**kwargs): - return json.dumps(kwargs) if kwargs else '' - - -def get_lun_metadata(volume): - if not volume.provider_location: - return {} - - info = json.loads(volume.provider_location) - if isinstance(info, dict): - return info - - # To keep compatible with old driver version - admin_metadata = get_admin_metadata(volume) - metadata = get_volume_metadata(volume) - return {'huawei_lun_id': six.text_type(info), - 'huawei_lun_wwn': admin_metadata.get('huawei_lun_wwn'), - 'hypermetro_id': metadata.get('hypermetro_id'), - 'remote_lun_id': metadata.get('remote_lun_id') - } - - -def get_snapshot_metadata(snapshot): - if not snapshot.provider_location: - return {} - - info = json.loads(snapshot.provider_location) - if isinstance(info, dict): - return info - - # To keep compatible with old driver version - return {'huawei_snapshot_id': six.text_type(info)} diff --git a/cinder/volume/drivers/huawei/hypermetro.py b/cinder/volume/drivers/huawei/hypermetro.py deleted file mode 100644 index ff7485845..000000000 --- a/cinder/volume/drivers/huawei/hypermetro.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume.drivers.huawei import constants -from cinder.volume.drivers.huawei import huawei_utils - -LOG = logging.getLogger(__name__) - - -class HuaweiHyperMetro(object): - - def __init__(self, client, rmt_client, configuration): - self.client = client - self.rmt_client = rmt_client - self.configuration = configuration - - def create_hypermetro(self, local_lun_id, lun_params): - """Create hypermetro.""" - - try: - # Get the remote pool info. - config_pool = self.configuration.metro_storage_pools - remote_pool = self.rmt_client.get_all_pools() - pool = self.rmt_client.get_pool_info(config_pool, remote_pool) - if not pool: - err_msg = _("Remote pool cannot be found.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - # Create remote lun. - lun_params['PARENTID'] = pool['ID'] - remotelun_info = self.rmt_client.create_lun(lun_params) - remote_lun_id = remotelun_info['ID'] - - # Get hypermetro domain. - try: - domain_name = self.configuration.metro_domain_name - domain_id = self.rmt_client.get_hyper_domain_id(domain_name) - self._wait_volume_ready(remote_lun_id) - hypermetro = self._create_hypermetro_pair(domain_id, - local_lun_id, - remote_lun_id) - - LOG.info("Hypermetro id: %(metro_id)s. " - "Remote lun id: %(remote_lun_id)s.", - {'metro_id': hypermetro['ID'], - 'remote_lun_id': remote_lun_id}) - - return {'hypermetro_id': hypermetro['ID'], - 'remote_lun_id': remote_lun_id} - except exception.VolumeBackendAPIException as err: - self.rmt_client.delete_lun(remote_lun_id) - msg = _('Create hypermetro error. %s.') % err - raise exception.VolumeBackendAPIException(data=msg) - except exception.VolumeBackendAPIException: - raise - - def delete_hypermetro(self, volume): - """Delete hypermetro.""" - metadata = huawei_utils.get_lun_metadata(volume) - metro_id = metadata['hypermetro_id'] - remote_lun_id = metadata['remote_lun_id'] - - if metro_id: - self.check_metro_need_to_stop(volume) - - # Delete hypermetro - self.client.delete_hypermetro(metro_id) - - # Delete remote lun. - if remote_lun_id and self.rmt_client.check_lun_exist(remote_lun_id): - self.rmt_client.delete_lun(remote_lun_id) - - def _create_hypermetro_pair(self, domain_id, lun_id, remote_lun_id): - """Create a HyperMetroPair.""" - hcp_param = {"DOMAINID": domain_id, - "HCRESOURCETYPE": '1', - "ISFIRSTSYNC": False, - "LOCALOBJID": lun_id, - "RECONVERYPOLICY": '1', - "REMOTEOBJID": remote_lun_id, - "SPEED": '2'} - - return self.client.create_hypermetro(hcp_param) - - def connect_volume_fc(self, volume, connector): - """Create map between a volume and a host for FC.""" - wwns = connector['wwpns'] - volume_name = huawei_utils.encode_name(volume.id) - - LOG.info( - 'initialize_connection_fc, initiator: %(wwpns)s,' - ' volume name: %(volume)s.', - {'wwpns': wwns, - 'volume': volume_name}) - - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = metadata['remote_lun_id'] - - if lun_id is None: - lun_id = self.rmt_client.get_lun_id_by_name(volume_name) - if lun_id is None: - msg = _("Can't get volume id. Volume name: %s.") % volume_name - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - original_host_name = connector['host'] - host_name = huawei_utils.encode_host_name(original_host_name) - host_id = self.client.add_host_with_check(host_name, - original_host_name) - - # Create hostgroup if not exist. - host_id = self.rmt_client.add_host_with_check( - host_name, original_host_name) - - online_wwns_in_host = ( - self.rmt_client.get_host_online_fc_initiators(host_id)) - online_free_wwns = self.rmt_client.get_online_free_wwns() - for wwn in wwns: - if (wwn not in online_wwns_in_host - and wwn not in online_free_wwns): - wwns_in_host = ( - self.rmt_client.get_host_fc_initiators(host_id)) - iqns_in_host = ( - self.rmt_client.get_host_iscsi_initiators(host_id)) - if not (wwns_in_host or iqns_in_host): - self.rmt_client.remove_host(host_id) - - msg = _('Can not add FC port to host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for wwn in wwns: - if wwn in online_free_wwns: - self.rmt_client.add_fc_port_to_host(host_id, wwn) - - (tgt_port_wwns, init_targ_map) = ( - self.rmt_client.get_init_targ_map(wwns)) - - # Add host into hostgroup. - hostgroup_id = self.rmt_client.add_host_to_hostgroup(host_id) - map_info = self.rmt_client.do_mapping(lun_id, hostgroup_id, host_id, - hypermetro_lun=True) - if not map_info: - msg = _('Map info is None due to array version ' - 'not supporting hypermetro.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - host_lun_id = self.rmt_client.get_host_lun_id(host_id, lun_id) - - # Return FC properties. - fc_info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': int(host_lun_id), - 'target_discovered': True, - 'target_wwn': tgt_port_wwns, - 'volume_id': volume.id, - 'initiator_target_map': init_targ_map, - 'map_info': map_info}, - } - - LOG.info('Remote return FC info is: %s.', fc_info) - - return fc_info - - def disconnect_volume_fc(self, volume, connector): - """Delete map between a volume and a host for FC.""" - wwns = connector['wwpns'] - volume_name = huawei_utils.encode_name(volume.id) - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = metadata['remote_lun_id'] - host_name = connector['host'] - left_lunnum = -1 - lungroup_id = None - view_id = None - - LOG.info('terminate_connection_fc: volume name: %(volume)s, ' - 'wwpns: %(wwns)s, ' - 'lun_id: %(lunid)s.', - {'volume': volume_name, - 'wwns': wwns, - 'lunid': lun_id},) - - host_name = huawei_utils.encode_host_name(host_name) - hostid = self.rmt_client.get_host_id_by_name(host_name) - if hostid: - mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid - view_id = self.rmt_client.find_mapping_view( - mapping_view_name) - if view_id: - lungroup_id = self.rmt_client.find_lungroup_from_map( - view_id) - - if lun_id and self.rmt_client.check_lun_exist(lun_id): - if lungroup_id: - lungroup_ids = self.rmt_client.get_lungroupids_by_lunid( - lun_id) - if lungroup_id in lungroup_ids: - self.rmt_client.remove_lun_from_lungroup( - lungroup_id, lun_id) - else: - LOG.warning("Lun is not in lungroup. " - "Lun id: %(lun_id)s, " - "lungroup id: %(lungroup_id)s", - {"lun_id": lun_id, - "lungroup_id": lungroup_id}) - - (tgt_port_wwns, init_targ_map) = ( - self.rmt_client.get_init_targ_map(wwns)) - - hostid = self.rmt_client.get_host_id_by_name(host_name) - if hostid: - mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid - view_id = self.rmt_client.find_mapping_view( - mapping_view_name) - if view_id: - lungroup_id = self.rmt_client.find_lungroup_from_map( - view_id) - if lungroup_id: - left_lunnum = self.rmt_client.get_obj_count_from_lungroup( - lungroup_id) - - if int(left_lunnum) > 0: - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - else: - info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': tgt_port_wwns, - 'initiator_target_map': init_targ_map}, } - - return info - - def _wait_volume_ready(self, lun_id): - wait_interval = self.configuration.lun_ready_wait_interval - - def _volume_ready(): - result = self.rmt_client.get_lun_info(lun_id) - if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH - and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): - return True - return False - - huawei_utils.wait_for_condition(_volume_ready, - wait_interval, - wait_interval * 10) - - def retype(self, volume, new_type): - return False - - def get_hypermetro_stats(self, hypermetro_id): - pass - - def create_consistencygroup(self, group): - LOG.info("Create Consistency Group: %(group)s.", - {'group': group.id}) - group_name = huawei_utils.encode_name(group.id) - domain_name = self.configuration.metro_domain_name - domain_id = self.client.get_hyper_domain_id(domain_name) - if not domain_name or not domain_id: - msg = _("The domain_name config in cinder.conf is wrong.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - self.client.create_metrogroup(group_name, group.id, domain_id) - - def delete_consistencygroup(self, context, group, volumes): - LOG.info("Delete Consistency Group: %(group)s.", - {'group': group.id}) - model_update = {} - volumes_model_update = [] - model_update['status'] = fields.GroupStatus.DELETED - metrogroup_id = self.check_consistencygroup_need_to_stop(group) - if metrogroup_id: - self.client.delete_metrogroup(metrogroup_id) - - # Deal with the return volumes info - for volume_ref in volumes: - volume_update = {'id': volume_ref.id} - volume_update['status'] = 'deleted' - volumes_model_update.append(volume_update) - - return model_update, volumes_model_update - - def update_consistencygroup(self, context, group, - add_volumes, remove_volumes): - LOG.info("Update Consistency Group: %(group)s. " - "This adds or removes volumes from a CG.", - {'group': group.id}) - metrogroup_id = self.check_consistencygroup_need_to_stop(group) - if metrogroup_id: - # Deal with add volumes to CG - for volume in add_volumes: - metro_id = self.check_metro_need_to_stop(volume) - self.client.add_metro_to_metrogroup(metrogroup_id, - metro_id) - - # Deal with remove volumes from CG - for volume in remove_volumes: - metro_id = self.check_metro_need_to_stop(volume) - self.client.remove_metro_from_metrogroup(metrogroup_id, - metro_id) - self.client.sync_hypermetro(metro_id) - - new_group_info = self.client.get_metrogroup_by_id(metrogroup_id) - is_empty = new_group_info["ISEMPTY"] - if is_empty == 'false': - self.client.sync_metrogroup(metrogroup_id) - - # if CG not exist on array - else: - msg = _("The CG does not exist on array.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def check_metro_need_to_stop(self, volume): - metadata = huawei_utils.get_lun_metadata(volume) - metro_id = metadata['hypermetro_id'] - metro_existed = self.client.check_hypermetro_exist(metro_id) - - if metro_existed: - metro_info = self.client.get_hypermetro_by_id(metro_id) - metro_health_status = metro_info['HEALTHSTATUS'] - metro_running_status = metro_info['RUNNINGSTATUS'] - - if (metro_health_status == constants.HEALTH_NORMAL and - (metro_running_status == constants.RUNNING_NORMAL or - metro_running_status == constants.RUNNING_SYNC)): - self.client.stop_hypermetro(metro_id) - - return metro_id - - def check_consistencygroup_need_to_stop(self, group): - group_name = huawei_utils.encode_name(group.id) - metrogroup_id = self.client.get_metrogroup_by_name(group_name) - - if metrogroup_id: - metrogroup_info = self.client.get_metrogroup_by_id(metrogroup_id) - health_status = metrogroup_info['HEALTHSTATUS'] - running_status = metrogroup_info['RUNNINGSTATUS'] - - if (health_status == constants.HEALTH_NORMAL - and (running_status == constants.RUNNING_NORMAL - or running_status == constants.RUNNING_SYNC)): - self.client.stop_metrogroup(metrogroup_id) - - return metrogroup_id diff --git a/cinder/volume/drivers/huawei/replication.py b/cinder/volume/drivers/huawei/replication.py deleted file mode 100644 index 3a1b58e40..000000000 --- a/cinder/volume/drivers/huawei/replication.py +++ /dev/null @@ -1,651 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import json - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.huawei import constants -from cinder.volume.drivers.huawei import huawei_utils - -LOG = logging.getLogger(__name__) - - -class AbsReplicaOp(object): - def __init__(self, client): - self.client = client - - def create(self, **kwargs): - pass - - def delete(self, replica_id): - pass - - def protect_second(self, replica_id): - pass - - def unprotect_second(self, replica_id): - pass - - def sync(self, replica_id): - pass - - def split(self, replica_id): - pass - - def switch(self, replica_id): - pass - - def is_primary(self, replica_info): - flag = replica_info.get('ISPRIMARY') - if flag and flag.lower() == 'true': - return True - return False - - def get_replica_info(self, replica_id): - return {} - - def _is_status(self, status_key, status, replica_info): - if type(status) in (list, tuple): - return replica_info.get(status_key, '') in status - if type(status) is str: - return replica_info.get(status_key, '') == status - - return False - - def is_running_status(self, status, replica_info): - return self._is_status(constants.REPLICA_RUNNING_STATUS_KEY, - status, replica_info) - - def is_health_status(self, status, replica_info): - return self._is_status(constants.REPLICA_HEALTH_STATUS_KEY, - status, replica_info) - - -class PairOp(AbsReplicaOp): - def create(self, local_lun_id, rmt_lun_id, rmt_dev_id, - rmt_dev_name, replica_model, - speed=constants.REPLICA_SPEED, - period=constants.REPLICA_PERIOD, - **kwargs): - super(PairOp, self).create(**kwargs) - - params = { - "LOCALRESID": local_lun_id, - "LOCALRESTYPE": '11', - "REMOTEDEVICEID": rmt_dev_id, - "REMOTEDEVICENAME": rmt_dev_name, - "REMOTERESID": rmt_lun_id, - "REPLICATIONMODEL": replica_model, - # recovery policy. 1: auto, 2: manual - "RECOVERYPOLICY": '1', - "SPEED": speed, - } - - if replica_model == constants.REPLICA_ASYNC_MODEL: - # Synchronize type values: - # 1, manual - # 2, timed wait when synchronization begins - # 3, timed wait when synchronization ends - params['SYNCHRONIZETYPE'] = '2' - params['TIMINGVAL'] = period - - try: - pair_info = self.client.create_pair(params) - except Exception as err: - msg = _('Create replication pair failed. Error: %s.') % err - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return pair_info - - def split(self, pair_id): - self.client.split_pair(pair_id) - - def delete(self, pair_id, force=False): - self.client.delete_pair(pair_id, force) - - def protect_second(self, pair_id): - self.client.set_pair_second_access(pair_id, - constants.REPLICA_SECOND_RO) - - def unprotect_second(self, pair_id): - self.client.set_pair_second_access(pair_id, - constants.REPLICA_SECOND_RW) - - def sync(self, pair_id): - self.client.sync_pair(pair_id) - - def switch(self, pair_id): - self.client.switch_pair(pair_id) - - def get_replica_info(self, pair_id): - return self.client.get_pair_by_id(pair_id) - - -class CGOp(AbsReplicaOp): - pass - - -class ReplicaCommonDriver(object): - def __init__(self, conf, replica_op): - self.conf = conf - self.op = replica_op - - def protect_second(self, replica_id): - info = self.op.get_replica_info(replica_id) - if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RO: - return - - self.op.protect_second(replica_id) - self.wait_second_access(replica_id, constants.REPLICA_SECOND_RO) - - def unprotect_second(self, replica_id): - info = self.op.get_replica_info(replica_id) - if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RW: - return - - self.op.unprotect_second(replica_id) - self.wait_second_access(replica_id, constants.REPLICA_SECOND_RW) - - def sync(self, replica_id, wait_complete=False): - self.protect_second(replica_id) - - expect_status = (constants.REPLICA_RUNNING_STATUS_NORMAL, - constants.REPLICA_RUNNING_STATUS_SYNC, - constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) - info = self.op.get_replica_info(replica_id) - - # When running status is synchronizing or normal, - # it's not necessary to do synchronize again. - if (info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL - and self.op.is_running_status(expect_status, info)): - return - - self.op.sync(replica_id) - self.wait_expect_state(replica_id, expect_status) - - if wait_complete: - self.wait_replica_ready(replica_id) - - def split(self, replica_id): - running_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, - constants.REPLICA_RUNNING_STATUS_INVALID, - constants.REPLICA_RUNNING_STATUS_ERRUPTED) - info = self.op.get_replica_info(replica_id) - if self.op.is_running_status(running_status, info): - return - - try: - self.op.split(replica_id) - except Exception as err: - LOG.warning('Split replication exception: %s.', err) - - try: - self.wait_expect_state(replica_id, running_status) - except Exception as err: - msg = _('Split replication failed.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def enable(self, replica_id, wait_sync_complete=False): - info = self.op.get_replica_info(replica_id) - if not self.op.is_primary(info): - self.switch(replica_id) - self.sync(replica_id) - return None - - def switch(self, replica_id): - self.split(replica_id) - self.unprotect_second(replica_id) - self.op.switch(replica_id) - - # Wait to be primary - def _wait_switch_to_primary(): - info = self.op.get_replica_info(replica_id) - if self.op.is_primary(info): - return True - return False - - interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL - timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT - huawei_utils.wait_for_condition(_wait_switch_to_primary, - interval, - timeout) - - def failover(self, replica_id): - """Failover replication. - - Purpose: - 1. Split replication. - 2. Set secondary access read & write. - """ - info = self.op.get_replica_info(replica_id) - if self.op.is_primary(info): - msg = _('We should not do switch over on primary array.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - sync_status_set = (constants.REPLICA_RUNNING_STATUS_SYNC, - constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) - if self.op.is_running_status(sync_status_set, info): - self.wait_replica_ready(replica_id) - - self.split(replica_id) - self.op.unprotect_second(replica_id) - - def wait_replica_ready(self, replica_id, interval=None, timeout=None): - LOG.debug('Wait synchronize complete.') - running_status_normal = (constants.REPLICA_RUNNING_STATUS_NORMAL, - constants.REPLICA_RUNNING_STATUS_SYNCED) - running_status_sync = (constants.REPLICA_RUNNING_STATUS_SYNC, - constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) - health_status_normal = constants.REPLICA_HEALTH_STATUS_NORMAL - - def _replica_ready(): - info = self.op.get_replica_info(replica_id) - if (self.op.is_running_status(running_status_normal, info) - and self.op.is_health_status(health_status_normal, info)): - return True - - if not self.op.is_running_status(running_status_sync, info): - msg = (_('Wait synchronize failed. Running status: %s.') % - info.get(constants.REPLICA_RUNNING_STATUS_KEY)) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return False - - if not interval: - interval = constants.DEFAULT_WAIT_INTERVAL - if not timeout: - timeout = constants.DEFAULT_WAIT_TIMEOUT - - huawei_utils.wait_for_condition(_replica_ready, - interval, - timeout) - - def wait_second_access(self, replica_id, access_level): - def _check_access(): - info = self.op.get_replica_info(replica_id) - if info.get('SECRESACCESS') == access_level: - return True - return False - - interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL - timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT - huawei_utils.wait_for_condition(_check_access, - interval, - timeout) - - def wait_expect_state(self, replica_id, - running_status, health_status=None, - interval=None, timeout=None): - def _check_state(): - info = self.op.get_replica_info(replica_id) - if self.op.is_running_status(running_status, info): - if (not health_status - or self.op.is_health_status(health_status, info)): - return True - return False - - if not interval: - interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL - if not timeout: - timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT - - huawei_utils.wait_for_condition(_check_state, interval, timeout) - - -def get_replication_driver_data(volume): - if volume.replication_driver_data: - return json.loads(volume.replication_driver_data) - return {} - - -def to_string(dict_data): - if dict_data: - return json.dumps(dict_data) - return '' - - -class ReplicaPairManager(object): - def __init__(self, local_client, rmt_client, conf): - self.local_client = local_client - self.rmt_client = rmt_client - self.conf = conf - - # Now just support one remote pool. - self.rmt_pool = self.rmt_client.storage_pools[0] - - self.local_op = PairOp(self.local_client) - self.local_driver = ReplicaCommonDriver(self.conf, self.local_op) - self.rmt_op = PairOp(self.rmt_client) - self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op) - - def try_get_remote_wwn(self): - try: - info = self.rmt_client.get_array_info() - return info.get('wwn') - except Exception as err: - LOG.warning('Get remote array wwn failed. Error: %s.', err) - return None - - def get_remote_device_by_wwn(self, wwn): - devices = {} - try: - devices = self.local_client.get_remote_devices() - except Exception as err: - LOG.warning('Get remote devices failed. Error: %s.', err) - - for device in devices: - if device.get('WWN') == wwn: - return device - - return {} - - def check_remote_available(self): - # We get device wwn in every check time. - # If remote array changed, we can run normally. - wwn = self.try_get_remote_wwn() - if not wwn: - return False - - device = self.get_remote_device_by_wwn(wwn) - # Check remote device is available to use. - # If array type is replication, 'ARRAYTYPE' == '1'. - # If health status is normal, 'HEALTHSTATUS' == '1'. - if (device and device.get('ARRAYTYPE') == '1' - and device.get('HEALTHSTATUS') == '1' - and device.get('RUNNINGSTATUS') == constants.STATUS_RUNNING): - return True - - return False - - def update_replica_capability(self, stats): - is_rmt_dev_available = self.check_remote_available() - if not is_rmt_dev_available: - LOG.warning('Remote device is unavailable.') - return stats - - for pool in stats['pools']: - pool['replication_enabled'] = True - pool['replication_type'] = ['sync', 'async'] - - return stats - - def get_rmt_dev_info(self): - wwn = self.try_get_remote_wwn() - if not wwn: - return None, None - - device = self.get_remote_device_by_wwn(wwn) - if not device: - return None, None - - return device.get('ID'), device.get('NAME') - - def build_rmt_lun_params(self, local_lun_info): - params = { - 'TYPE': '11', - 'NAME': local_lun_info['NAME'], - 'PARENTTYPE': '216', - 'PARENTID': self.rmt_client.get_pool_id(self.rmt_pool), - 'DESCRIPTION': local_lun_info['DESCRIPTION'], - 'ALLOCTYPE': local_lun_info['ALLOCTYPE'], - 'CAPACITY': local_lun_info['CAPACITY'], - 'WRITEPOLICY': self.conf.lun_write_type, - 'PREFETCHPOLICY': self.conf.lun_prefetch_type, - 'PREFETCHVALUE': self.conf.lun_prefetch_value, - 'DATATRANSFERPOLICY': self.conf.lun_policy, - 'READCACHEPOLICY': self.conf.lun_read_cache_policy, - 'WRITECACHEPOLICY': self.conf.lun_write_cache_policy, - } - - LOG.debug('Remote lun params: %s.', params) - return params - - def wait_volume_online(self, client, lun_info, - interval=None, timeout=None): - online_status = constants.STATUS_VOLUME_READY - if lun_info.get('RUNNINGSTATUS') == online_status: - return - - lun_id = lun_info['ID'] - - def _wait_online(): - info = client.get_lun_info(lun_id) - return info.get('RUNNINGSTATUS') == online_status - - if not interval: - interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL - if not timeout: - timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT - - huawei_utils.wait_for_condition(_wait_online, - interval, - timeout) - - def create_rmt_lun(self, local_lun_info): - # Create on rmt array. If failed, raise exception. - lun_params = self.build_rmt_lun_params(local_lun_info) - lun_info = self.rmt_client.create_lun(lun_params) - try: - self.wait_volume_online(self.rmt_client, lun_info) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - self.rmt_client.delete_lun(lun_info['ID']) - - return lun_info - - def create_replica(self, local_lun_info, replica_model): - """Create remote LUN and replication pair. - - Purpose: - 1. create remote lun - 2. create replication pair - 3. enable replication pair - """ - LOG.debug(('Create replication, local lun info: %(info)s, ' - 'replication model: %(model)s.'), - {'info': local_lun_info, 'model': replica_model}) - - local_lun_id = local_lun_info['ID'] - self.wait_volume_online(self.local_client, local_lun_info) - - # step1, create remote lun - rmt_lun_info = self.create_rmt_lun(local_lun_info) - rmt_lun_id = rmt_lun_info['ID'] - - # step2, get remote device info - rmt_dev_id, rmt_dev_name = self.get_rmt_dev_info() - if not rmt_lun_id or not rmt_dev_name: - self._delete_rmt_lun(rmt_lun_id) - msg = _('Get remote device info failed.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # step3, create replication pair - try: - pair_info = self.local_op.create(local_lun_id, - rmt_lun_id, rmt_dev_id, - rmt_dev_name, replica_model) - pair_id = pair_info['ID'] - except Exception as err: - with excutils.save_and_reraise_exception(): - LOG.error('Create pair failed. Error: %s.', err) - self._delete_rmt_lun(rmt_lun_id) - - # step4, start sync manually. If replication type is sync, - # then wait for sync complete. - wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL) - try: - self.local_driver.sync(pair_id, wait_complete) - except Exception as err: - with excutils.save_and_reraise_exception(): - LOG.error('Start synchronization failed. Error: %s.', err) - self._delete_pair(pair_id) - self._delete_rmt_lun(rmt_lun_id) - - model_update = {} - driver_data = {'pair_id': pair_id, - 'rmt_lun_id': rmt_lun_id, - 'rmt_lun_wwn': rmt_lun_info['WWN']} - model_update['replication_driver_data'] = to_string(driver_data) - model_update['replication_status'] = 'available' - LOG.debug('Create replication, return info: %s.', model_update) - return model_update - - def _delete_pair(self, pair_id): - if (not pair_id - or not self.local_client.check_pair_exist(pair_id)): - return - - self.local_driver.split(pair_id) - self.local_op.delete(pair_id) - - def _delete_rmt_lun(self, lun_id): - if lun_id and self.rmt_client.check_lun_exist(lun_id): - self.rmt_client.delete_lun(lun_id) - - def delete_replica(self, volume): - """Delete replication pair and remote lun. - - Purpose: - 1. delete replication pair - 2. delete remote_lun - """ - LOG.debug('Delete replication, volume: %s.', volume.id) - info = get_replication_driver_data(volume) - pair_id = info.get('pair_id') - if pair_id: - self._delete_pair(pair_id) - - # Delete remote_lun - rmt_lun_id = info.get('rmt_lun_id') - if rmt_lun_id: - self._delete_rmt_lun(rmt_lun_id) - - def failback(self, volumes): - """Failover volumes back to primary backend. - - The main steps: - 1. Switch the role of replication pairs. - 2. Copy the second LUN data back to primary LUN. - 3. Split replication pairs. - 4. Switch the role of replication pairs. - 5. Enable replications. - """ - volumes_update = [] - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - drv_data = get_replication_driver_data(v) - pair_id = drv_data.get('pair_id') - if not pair_id: - LOG.warning("No pair id in volume %s.", v.id) - v_update['updates'] = {'replication_status': 'error'} - volumes_update.append(v_update) - continue - - rmt_lun_id = drv_data.get('rmt_lun_id') - if not rmt_lun_id: - LOG.warning("No remote lun id in volume %s.", v.id) - v_update['updates'] = {'replication_status': 'error'} - volumes_update.append(v_update) - continue - - # Switch replication pair role, and start synchronize. - self.local_driver.enable(pair_id) - - # Wait for synchronize complete. - self.local_driver.wait_replica_ready(pair_id) - - # Split replication pair again - self.rmt_driver.failover(pair_id) - - # Switch replication pair role, and start synchronize. - self.rmt_driver.enable(pair_id) - - local_metadata = huawei_utils.get_lun_metadata(v) - new_drv_data = to_string( - {'pair_id': pair_id, - 'rmt_lun_id': local_metadata.get('huawei_lun_id'), - 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) - location = huawei_utils.to_string( - huawei_lun_id=rmt_lun_id, - huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) - - v_update['updates'] = {'provider_location': location, - 'replication_status': 'available', - 'replication_driver_data': new_drv_data} - volumes_update.append(v_update) - - return volumes_update - - def failover(self, volumes): - """Failover volumes back to secondary array. - - Split the replication pairs and make the secondary LUNs R&W. - """ - volumes_update = [] - for v in volumes: - v_update = {} - v_update['volume_id'] = v.id - drv_data = get_replication_driver_data(v) - pair_id = drv_data.get('pair_id') - if not pair_id: - LOG.warning("No pair id in volume %s.", v.id) - v_update['updates'] = {'replication_status': 'error'} - volumes_update.append(v_update) - continue - - rmt_lun_id = drv_data.get('rmt_lun_id') - if not rmt_lun_id: - LOG.warning("No remote lun id in volume %s.", v.id) - v_update['updates'] = {'replication_status': 'error'} - volumes_update.append(v_update) - continue - - self.rmt_driver.failover(pair_id) - - local_metadata = huawei_utils.get_lun_metadata(v) - new_drv_data = to_string( - {'pair_id': pair_id, - 'rmt_lun_id': local_metadata.get('huawei_lun_id'), - 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) - location = huawei_utils.to_string( - huawei_lun_id=rmt_lun_id, - huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) - - v_update['updates'] = {'provider_location': location, - 'replication_status': 'failed-over', - 'replication_driver_data': new_drv_data} - volumes_update.append(v_update) - - return volumes_update - - -def get_replication_opts(opts): - if opts.get('replication_type') == 'sync': - opts['replication_type'] = constants.REPLICA_SYNC_MODEL - else: - opts['replication_type'] = constants.REPLICA_ASYNC_MODEL - - return opts diff --git a/cinder/volume/drivers/huawei/rest_client.py b/cinder/volume/drivers/huawei/rest_client.py deleted file mode 100644 index 0843baa93..000000000 --- a/cinder/volume/drivers/huawei/rest_client.py +++ /dev/null @@ -1,2372 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import re -import requests -import six -import time - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.huawei import constants -from cinder.volume.drivers.huawei import huawei_utils - -LOG = logging.getLogger(__name__) - - -class RestClient(object): - """Common class for Huawei OceanStor storage system.""" - - def __init__(self, configuration, san_address, san_user, san_password, - **kwargs): - self.configuration = configuration - self.san_address = san_address - self.san_user = san_user - self.san_password = san_password - self.storage_pools = kwargs.get('storage_pools', - self.configuration.storage_pools) - self.iscsi_info = kwargs.get('iscsi_info', - self.configuration.iscsi_info) - self.iscsi_default_target_ip = kwargs.get( - 'iscsi_default_target_ip', - self.configuration.iscsi_default_target_ip) - self.session = None - self.url = None - self.device_id = None - - def init_http_head(self): - self.url = None - self.session = requests.Session() - self.session.headers.update({ - "Connection": "keep-alive", - "Content-Type": "application/json"}) - self.session.verify = False - - def do_call(self, url, data, method, - calltimeout=constants.SOCKET_TIMEOUT, log_filter_flag=False): - """Send requests to Huawei storage server. - - Send HTTPS call, get response in JSON. - Convert response into Python Object and return it. - """ - if self.url: - url = self.url + url - - kwargs = {'timeout': calltimeout} - if data: - kwargs['data'] = json.dumps(data) - - if method in ('POST', 'PUT', 'GET', 'DELETE'): - func = getattr(self.session, method.lower()) - else: - msg = _("Request method %s is invalid.") % method - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - res = func(url, **kwargs) - except Exception as err: - LOG.exception('Bad response from server: %(url)s.' - ' Error: %(err)s', {'url': url, 'err': err}) - return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER, - "description": "Connect to server error."}} - - try: - res.raise_for_status() - except requests.HTTPError as exc: - return {"error": {"code": exc.response.status_code, - "description": six.text_type(exc)}} - - res_json = res.json() - if not log_filter_flag: - LOG.info('\n\n\n\nRequest URL: %(url)s\n\n' - 'Call Method: %(method)s\n\n' - 'Request Data: %(data)s\n\n' - 'Response Data:%(res)s\n\n', - {'url': url, - 'method': method, - 'data': data, - 'res': res_json}) - - return res_json - - def login(self): - """Login Huawei storage array.""" - device_id = None - for item_url in self.san_address: - url = item_url + "xx/sessions" - data = {"username": self.san_user, - "password": self.san_password, - "scope": "0"} - self.init_http_head() - result = self.do_call(url, data, 'POST', - calltimeout=constants.LOGIN_SOCKET_TIMEOUT, - log_filter_flag=True) - - if (result['error']['code'] != 0) or ("data" not in result): - LOG.error("Login error. URL: %(url)s\n" - "Reason: %(reason)s.", - {"url": item_url, "reason": result}) - continue - - LOG.debug('Login success: %(url)s', {'url': item_url}) - device_id = result['data']['deviceid'] - self.device_id = device_id - self.url = item_url + device_id - self.session.headers['iBaseToken'] = result['data']['iBaseToken'] - if (result['data']['accountstate'] - in (constants.PWD_EXPIRED, constants.PWD_RESET)): - self.logout() - msg = _("Password has expired or has been reset, " - "please change the password.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - break - - if device_id is None: - msg = _("Failed to login with all rest URLs.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return device_id - - def try_login(self): - try: - self.login() - except Exception as err: - LOG.warning('Login failed. Error: %s.', err) - - @utils.synchronized('huawei_cinder_call') - def call(self, url, data=None, method=None, log_filter_flag=False): - """Send requests to server. - - If fail, try another RestURL. - """ - device_id = None - old_url = self.url - result = self.do_call(url, data, method, - log_filter_flag=log_filter_flag) - error_code = result['error']['code'] - if (error_code == constants.ERROR_CONNECT_TO_SERVER - or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): - LOG.error("Can't open the recent url, relogin.") - device_id = self.login() - - if device_id is not None: - LOG.debug('Replace URL: \n' - 'Old URL: %(old_url)s\n,' - 'New URL: %(new_url)s\n.', - {'old_url': old_url, - 'new_url': self.url}) - result = self.do_call(url, data, method, - log_filter_flag=log_filter_flag) - if result['error']['code'] in constants.RELOGIN_ERROR_PASS: - result['error']['code'] = 0 - return result - - def logout(self): - """Logout the session.""" - url = "/sessions" - if self.url: - result = self.do_call(url, None, "DELETE") - self._assert_rest_result(result, _('Logout session error.')) - - def _assert_rest_result(self, result, err_str): - if result['error']['code'] != 0: - msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, - 'res': result}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _assert_data_in_result(self, result, msg): - if 'data' not in result: - err_msg = _('%s "data" is not in result.') % msg - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - def create_lun(self, lun_params): - # Set the mirror switch always on - lun_params['MIRRORPOLICY'] = '1' - url = "/lun" - result = self.call(url, lun_params, 'POST') - if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: - lun_id = self.get_lun_id_by_name(lun_params['NAME']) - if lun_id: - return self.get_lun_info(lun_id) - - msg = _('Create lun error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'] - - def check_lun_exist(self, lun_id, lun_wwn=None): - url = "/lun/" + lun_id - result = self.call(url, None, "GET") - error_code = result['error']['code'] - if error_code != 0: - return False - - if lun_wwn and result['data']['WWN'] != lun_wwn: - LOG.debug("LUN ID %(id)s with WWN %(wwn)s does not exist on " - "the array.", {"id": lun_id, "wwn": lun_wwn}) - return False - - return True - - def delete_lun(self, lun_id): - url = "/lun/" + lun_id - data = {"TYPE": "11", - "ID": lun_id} - result = self.call(url, data, "DELETE") - self._assert_rest_result(result, _('Delete lun error.')) - - def get_all_pools(self): - url = "/storagepool" - result = self.call(url, None, "GET", log_filter_flag=True) - msg = _('Query resource pool error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - return result['data'] - - def get_pool_info(self, pool_name=None, pools=None): - info = {} - if not pool_name: - return info - - for pool in pools: - if pool_name.strip() != pool['NAME']: - continue - - if pool.get('USAGETYPE') == constants.FILE_SYSTEM_POOL_TYPE: - break - - info['ID'] = pool['ID'] - info['CAPACITY'] = pool.get('DATASPACE', pool['USERFREECAPACITY']) - info['TOTALCAPACITY'] = pool.get('USERTOTALCAPACITY', '0') - info['TIER0CAPACITY'] = pool.get('TIER0CAPACITY', '0') - info['TIER1CAPACITY'] = pool.get('TIER1CAPACITY', '0') - info['TIER2CAPACITY'] = pool.get('TIER2CAPACITY', '0') - - return info - - def get_pool_id(self, pool_name): - pools = self.get_all_pools() - pool_info = self.get_pool_info(pool_name, pools) - if not pool_info: - # The following code is to keep compatibility with old version of - # Huawei driver. - for pool_name in self.storage_pools: - pool_info = self.get_pool_info(pool_name, pools) - if pool_info: - break - - if not pool_info: - msg = _('Can not get pool info. pool: %s') % pool_name - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return pool_info['ID'] - - def _get_id_from_result(self, result, name, key): - if 'data' in result: - for item in result['data']: - if name == item.get(key): - return item['ID'] - - def get_lun_id_by_name(self, name): - if not name: - return - - url = "/lun?filter=NAME::%s" % name - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get lun id by name error.')) - - return self._get_id_from_result(result, name, 'NAME') - - def activate_snapshot(self, snapshot_id): - url = "/snapshot/activate" - data = ({"SNAPSHOTLIST": snapshot_id} - if type(snapshot_id) in (list, tuple) - else {"SNAPSHOTLIST": [snapshot_id]}) - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Activate snapshot error.')) - - def create_snapshot(self, lun_id, snapshot_name, snapshot_description): - url = "/snapshot" - data = {"TYPE": "27", - "NAME": snapshot_name, - "PARENTTYPE": "11", - "DESCRIPTION": snapshot_description, - "PARENTID": lun_id} - result = self.call(url, data, 'POST') - - msg = _('Create snapshot error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'] - - def get_lun_id(self, volume, volume_name): - metadata = huawei_utils.get_lun_metadata(volume) - lun_id = (metadata.get('huawei_lun_id') or - self.get_lun_id_by_name(volume_name)) - - if not lun_id: - msg = (_("Can't find lun info on the array. " - "volume: %(id)s, lun name: %(name)s.") % - {'id': volume.id, 'name': volume_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return lun_id - - def check_snapshot_exist(self, snapshot_id): - url = "/snapshot/%s" % snapshot_id - result = self.call(url, None, "GET") - error_code = result['error']['code'] - if error_code != 0: - return False - - return True - - def stop_snapshot(self, snapshot_id): - url = "/snapshot/stop" - stopdata = {"ID": snapshot_id} - result = self.call(url, stopdata, "PUT") - self._assert_rest_result(result, _('Stop snapshot error.')) - - def delete_snapshot(self, snapshotid): - url = "/snapshot/%s" % snapshotid - data = {"TYPE": "27", "ID": snapshotid} - result = self.call(url, data, "DELETE") - self._assert_rest_result(result, _('Delete snapshot error.')) - - def get_snapshot_id_by_name(self, name): - if not name: - return - - url = "/snapshot?filter=NAME::%s" % name - description = 'The snapshot license file is unavailable.' - result = self.call(url, None, "GET") - if 'error' in result: - if description == result['error']['description']: - return - self._assert_rest_result(result, _('Get snapshot id error.')) - - return self._get_id_from_result(result, name, 'NAME') - - def create_luncopy(self, luncopyname, srclunid, tgtlunid, copyspeed): - """Create a luncopy.""" - url = "/luncopy" - if copyspeed not in constants.LUN_COPY_SPEED_TYPES: - LOG.warning('The copy speed %(copyspeed)s is not valid, ' - 'using default value %(default)s instead.', - {'copyspeed': copyspeed, - 'default': constants.LUN_COPY_SPEED_MEDIUM}) - copyspeed = constants.LUN_COPY_SPEED_MEDIUM - - data = {"TYPE": 219, - "NAME": luncopyname, - "DESCRIPTION": luncopyname, - "COPYSPEED": copyspeed, - "LUNCOPYTYPE": "1", - "SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID" - % srclunid), - "TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID" - % tgtlunid)} - result = self.call(url, data, 'POST') - - msg = _('Create luncopy error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data']['ID'] - - def add_host_to_hostgroup(self, host_id): - """Associate host to hostgroup. - - If hostgroup doesn't exist, create one. - """ - hostgroup_name = constants.HOSTGROUP_PREFIX + host_id - hostgroup_id = self.create_hostgroup_with_check(hostgroup_name) - is_associated = self._is_host_associate_to_hostgroup(hostgroup_id, - host_id) - if not is_associated: - self._associate_host_to_hostgroup(hostgroup_id, host_id) - - return hostgroup_id - - def get_tgt_port_group(self, tgt_port_group): - """Find target portgroup id by target port group name.""" - url = "/portgroup?range=[0-8191]&TYPE=257" - result = self.call(url, None, "GET") - - msg = _('Find portgroup error.') - self._assert_rest_result(result, msg) - - return self._get_id_from_result(result, tgt_port_group, 'NAME') - - def _associate_portgroup_to_view(self, view_id, portgroup_id): - url = "/MAPPINGVIEW/CREATE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "257", - "ASSOCIATEOBJID": portgroup_id, - "TYPE": "245", - "ID": view_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Associate portgroup to mapping ' - 'view error.')) - - def _portgroup_associated(self, view_id, portgroup_id): - url = ("/mappingview/associate?TYPE=245&" - "ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=%s" % portgroup_id) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Check portgroup associate error.')) - - if self._get_id_from_result(result, view_id, 'ID'): - return True - return False - - def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None, - lun_type=constants.LUN_TYPE, hypermetro_lun=False): - """Add hostgroup and lungroup to mapping view.""" - lungroup_name = constants.LUNGROUP_PREFIX + host_id - mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id - lungroup_id = self._find_lungroup(lungroup_name) - view_id = self.find_mapping_view(mapping_view_name) - map_info = {} - - LOG.info( - 'do_mapping, lun_group: %(lun_group)s, ' - 'view_id: %(view_id)s, lun_id: %(lun_id)s.', - {'lun_group': lungroup_id, - 'view_id': view_id, - 'lun_id': lun_id}) - - try: - # Create lungroup and add LUN into to lungroup. - if lungroup_id is None: - lungroup_id = self._create_lungroup(lungroup_name) - is_associated = self._is_lun_associated_to_lungroup(lungroup_id, - lun_id, - lun_type) - if not is_associated: - self.associate_lun_to_lungroup(lungroup_id, lun_id, lun_type) - - if view_id is None: - view_id = self._add_mapping_view(mapping_view_name) - self._associate_hostgroup_to_view(view_id, hostgroup_id) - self._associate_lungroup_to_view(view_id, lungroup_id) - if portgroup_id: - self._associate_portgroup_to_view(view_id, portgroup_id) - - else: - if not self.hostgroup_associated(view_id, hostgroup_id): - self._associate_hostgroup_to_view(view_id, hostgroup_id) - if not self.lungroup_associated(view_id, lungroup_id): - self._associate_lungroup_to_view(view_id, lungroup_id) - if portgroup_id: - if not self._portgroup_associated(view_id, - portgroup_id): - self._associate_portgroup_to_view(view_id, - portgroup_id) - - if hypermetro_lun: - aval_luns = self.find_view_by_id(view_id) - map_info["lun_id"] = lun_id - map_info["view_id"] = view_id - map_info["aval_luns"] = aval_luns - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error( - 'Error occurred when adding hostgroup and lungroup to ' - 'view. Remove lun from lungroup now.') - self.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) - - return map_info - - def check_iscsi_initiators_exist_in_host(self, host_id): - url = "/iscsi_initiator?range=[0-256]&PARENTID=%s" % host_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, 'Get host initiators info failed.') - if "data" in result: - return True - - return False - - def ensure_initiator_added(self, initiator_name, host_id): - added = self._initiator_is_added_to_array(initiator_name) - if not added: - self._add_initiator_to_array(initiator_name) - if not self.is_initiator_associated_to_host(initiator_name, host_id): - self._associate_initiator_to_host(initiator_name, - host_id) - - def _get_iscsi_tgt_port(self): - url = "/iscsidevicename" - result = self.call(url, None, 'GET') - - msg = _('Get iSCSI target port error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'][0]['CMO_ISCSI_DEVICE_NAME'] - - def find_hostgroup(self, groupname): - """Get the given hostgroup id.""" - url = "/hostgroup?range=[0-8191]" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get hostgroup information error.')) - - return self._get_id_from_result(result, groupname, 'NAME') - - def _find_lungroup(self, lungroup_name): - """Get the given hostgroup id.""" - url = "/lungroup?range=[0-8191]" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get lungroup information error.')) - - return self._get_id_from_result(result, lungroup_name, 'NAME') - - def create_hostgroup_with_check(self, hostgroup_name): - """Check if host exists on the array, or create it.""" - hostgroup_id = self.find_hostgroup(hostgroup_name) - if hostgroup_id: - LOG.info( - 'create_hostgroup_with_check. ' - 'hostgroup name: %(name)s, ' - 'hostgroup id: %(id)s', - {'name': hostgroup_name, - 'id': hostgroup_id}) - return hostgroup_id - - try: - hostgroup_id = self._create_hostgroup(hostgroup_name) - except Exception: - LOG.info( - 'Failed to create hostgroup: %(name)s. ' - 'Please check if it exists on the array.', - {'name': hostgroup_name}) - hostgroup_id = self.find_hostgroup(hostgroup_name) - if hostgroup_id is None: - err_msg = (_( - 'Failed to create hostgroup: %(name)s. ' - 'Check if it exists on the array.') - % {'name': hostgroup_name}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - LOG.info( - 'create_hostgroup_with_check. ' - 'Create hostgroup success. ' - 'hostgroup name: %(name)s, ' - 'hostgroup id: %(id)s', - {'name': hostgroup_name, - 'id': hostgroup_id}) - return hostgroup_id - - def _create_hostgroup(self, hostgroup_name): - url = "/hostgroup" - data = {"TYPE": "14", "NAME": hostgroup_name} - result = self.call(url, data, 'POST') - - msg = _('Create hostgroup error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data']['ID'] - - def _create_lungroup(self, lungroup_name): - url = "/lungroup" - data = {"DESCRIPTION": lungroup_name, - "APPTYPE": '0', - "GROUPTYPE": '0', - "NAME": lungroup_name} - result = self.call(url, data, 'POST') - - msg = _('Create lungroup error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data']['ID'] - - def delete_lungroup(self, lungroup_id): - url = "/LUNGroup/" + lungroup_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Delete lungroup error.')) - - def lungroup_associated(self, view_id, lungroup_id): - url = ("/mappingview/associate?TYPE=245&" - "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Check lungroup associate error.')) - - if self._get_id_from_result(result, view_id, 'ID'): - return True - return False - - def hostgroup_associated(self, view_id, hostgroup_id): - url = ("/mappingview/associate?TYPE=245&" - "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Check hostgroup associate error.')) - - if self._get_id_from_result(result, view_id, 'ID'): - return True - return False - - def get_host_lun_id(self, host_id, lun_id, lun_type=constants.LUN_TYPE): - cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' - url = ("/%s/associate?TYPE=%s&ASSOCIATEOBJTYPE=21" - "&ASSOCIATEOBJID=%s" % (cmd_type, lun_type, host_id)) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find host lun id error.')) - - host_lun_id = 1 - if 'data' in result: - for item in result['data']: - if lun_id == item['ID']: - associate_data = item['ASSOCIATEMETADATA'] - try: - hostassoinfo = json.loads(associate_data) - host_lun_id = hostassoinfo['HostLUNID'] - break - except Exception as err: - LOG.error("JSON transfer data error. %s.", err) - raise - return host_lun_id - - def get_host_id_by_name(self, host_name): - """Get the given host ID.""" - url = "/host?range=[0-65535]" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find host in hostgroup error.')) - - return self._get_id_from_result(result, host_name, 'NAME') - - def add_host_with_check(self, host_name, host_name_before_hash): - host_id = self.get_host_id_by_name(host_name) - if host_id: - LOG.info( - 'add_host_with_check. ' - 'host name: %(name)s, ' - 'host id: %(id)s', - {'name': host_name, - 'id': host_id}) - return host_id - - try: - host_id = self._add_host(host_name, host_name_before_hash) - except Exception: - LOG.info( - 'Failed to create host: %(name)s. ' - 'Check if it exists on the array.', - {'name': host_name}) - host_id = self.get_host_id_by_name(host_name) - if not host_id: - err_msg = (_( - 'Failed to create host: %(name)s. ' - 'Please check if it exists on the array.'), - {'name': host_name}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - LOG.info( - 'add_host_with_check. ' - 'create host success. ' - 'host name: %(name)s, ' - 'host id: %(id)s', - {'name': host_name, - 'id': host_id}) - return host_id - - def _add_host(self, hostname, host_name_before_hash): - """Add a new host.""" - url = "/host" - data = {"TYPE": "21", - "NAME": hostname, - "OPERATIONSYSTEM": "0", - "DESCRIPTION": host_name_before_hash} - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Add new host error.')) - - if 'data' in result: - return result['data']['ID'] - - def _is_host_associate_to_hostgroup(self, hostgroup_id, host_id): - """Check whether the host is associated to the hostgroup.""" - url = ("/host/associate?TYPE=21&" - "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) - - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Check hostgroup associate error.')) - - if self._get_id_from_result(result, host_id, 'ID'): - return True - - return False - - def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id, - lun_type=constants.LUN_TYPE): - """Check whether the lun is associated to the lungroup.""" - cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' - url = ("/%s/associate?TYPE=%s&" - "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" - % (cmd_type, lun_type, lungroup_id)) - - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Check lungroup associate error.')) - - if self._get_id_from_result(result, lun_id, 'ID'): - return True - - return False - - def _associate_host_to_hostgroup(self, hostgroup_id, host_id): - url = "/hostgroup/associate" - data = {"TYPE": "14", - "ID": hostgroup_id, - "ASSOCIATEOBJTYPE": "21", - "ASSOCIATEOBJID": host_id} - - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Associate host to hostgroup ' - 'error.')) - - def associate_lun_to_lungroup(self, lungroup_id, lun_id, - lun_type=constants.LUN_TYPE): - """Associate lun to lungroup.""" - url = "/lungroup/associate" - data = {"ID": lungroup_id, - "ASSOCIATEOBJTYPE": lun_type, - "ASSOCIATEOBJID": lun_id} - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Associate lun to lungroup error.')) - - def remove_lun_from_lungroup(self, lungroup_id, lun_id, - lun_type=constants.LUN_TYPE): - """Remove lun from lungroup.""" - url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=%s" - "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_type, lun_id)) - - result = self.call(url, None, 'DELETE') - self._assert_rest_result( - result, _('Delete associated lun from lungroup error.')) - - def _initiator_is_added_to_array(self, ininame): - """Check whether the initiator is already added on the array.""" - url = "/iscsi_initiator?range=[0-256]" - result = self.call(url, None, "GET") - self._assert_rest_result(result, - _('Check initiator added to array error.')) - - if self._get_id_from_result(result, ininame, 'ID'): - return True - return False - - def is_initiator_associated_to_host(self, ininame, host_id): - """Check whether the initiator is associated to the host.""" - url = "/iscsi_initiator?range=[0-256]" - result = self.call(url, None, "GET") - self._assert_rest_result( - result, _('Check initiator associated to host error.')) - - for item in result.get('data'): - if item['ID'] == ininame: - if item['ISFREE'] == "true": - return False - if item['PARENTID'] == host_id: - return True - else: - msg = (_("Initiator %(ini)s has been added to another " - "host %(host)s.") % {"ini": ininame, - "host": item['PARENTNAME']}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return True - - def _add_initiator_to_array(self, initiator_name): - """Add a new initiator to storage device.""" - url = "/iscsi_initiator" - data = {"TYPE": "222", - "ID": initiator_name, - "USECHAP": "false"} - result = self.call(url, data, "POST") - self._assert_rest_result(result, - _('Add initiator to array error.')) - - def _add_initiator_to_host(self, initiator_name, host_id): - url = "/iscsi_initiator/" + initiator_name - data = {"TYPE": "222", - "ID": initiator_name, - "USECHAP": "false", - "PARENTTYPE": "21", - "PARENTID": host_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, - _('Associate initiator to host error.')) - - def _associate_initiator_to_host(self, - initiator_name, - host_id): - """Associate initiator with the host.""" - chapinfo = self.find_chap_info(self.iscsi_info, - initiator_name) - multipath_type = self._find_alua_info(self.iscsi_info, - initiator_name) - if chapinfo: - LOG.info('Use CHAP when adding initiator to host.') - self._use_chap(chapinfo, initiator_name, host_id) - else: - self._add_initiator_to_host(initiator_name, host_id) - - if multipath_type: - LOG.info('Use ALUA when adding initiator to host.') - self._use_alua(initiator_name, multipath_type) - - def find_chap_info(self, iscsi_info, initiator_name): - """Find CHAP info from xml.""" - chapinfo = None - for ini in iscsi_info: - if ini['Name'] == initiator_name: - if 'CHAPinfo' in ini: - chapinfo = ini['CHAPinfo'] - break - - return chapinfo - - def _find_alua_info(self, iscsi_info, initiator_name): - """Find ALUA info from xml.""" - multipath_type = 0 - for ini in iscsi_info: - if ini['Name'] == initiator_name: - if 'ALUA' in ini: - if ini['ALUA'] != '1' and ini['ALUA'] != '0': - msg = (_( - 'Invalid ALUA value. ' - 'ALUA value must be 1 or 0.')) - LOG.error(msg) - raise exception.InvalidInput(msg) - else: - multipath_type = ini['ALUA'] - break - return multipath_type - - def _use_chap(self, chapinfo, initiator_name, host_id): - """Use CHAP when adding initiator to host.""" - (chap_username, chap_password) = chapinfo.split(";") - - url = "/iscsi_initiator/" + initiator_name - data = {"TYPE": "222", - "USECHAP": "true", - "CHAPNAME": chap_username, - "CHAPPASSWORD": chap_password, - "ID": initiator_name, - "PARENTTYPE": "21", - "PARENTID": host_id} - result = self.call(url, data, "PUT", log_filter_flag=True) - msg = _('Use CHAP to associate initiator to host error. ' - 'Please check the CHAP username and password.') - self._assert_rest_result(result, msg) - - def _use_alua(self, initiator_name, multipath_type): - """Use ALUA when adding initiator to host.""" - url = "/iscsi_initiator" - data = {"ID": initiator_name, - "MULTIPATHTYPE": multipath_type} - result = self.call(url, data, "PUT") - - self._assert_rest_result( - result, _('Use ALUA to associate initiator to host error.')) - - def remove_chap(self, initiator_name): - """Remove CHAP when terminate connection.""" - url = "/iscsi_initiator" - data = {"USECHAP": "false", - "MULTIPATHTYPE": "0", - "ID": initiator_name} - result = self.call(url, data, "PUT") - - self._assert_rest_result(result, _('Remove CHAP error.')) - - def find_mapping_view(self, name): - """Find mapping view.""" - url = "/mappingview?range=[0-8191]" - result = self.call(url, None, "GET") - - msg = _('Find mapping view error.') - self._assert_rest_result(result, msg) - - return self._get_id_from_result(result, name, 'NAME') - - def _add_mapping_view(self, name): - url = "/mappingview" - data = {"NAME": name, "TYPE": "245"} - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Add mapping view error.')) - - return result['data']['ID'] - - def _associate_hostgroup_to_view(self, view_id, hostgroup_id): - url = "/MAPPINGVIEW/CREATE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "14", - "ASSOCIATEOBJID": hostgroup_id, - "TYPE": "245", - "ID": view_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Associate host to mapping view ' - 'error.')) - - def _associate_lungroup_to_view(self, view_id, lungroup_id): - url = "/MAPPINGVIEW/CREATE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "256", - "ASSOCIATEOBJID": lungroup_id, - "TYPE": "245", - "ID": view_id} - - result = self.call(url, data, "PUT") - self._assert_rest_result( - result, _('Associate lungroup to mapping view error.')) - - def delete_lungroup_mapping_view(self, view_id, lungroup_id): - """Remove lungroup associate from the mapping view.""" - url = "/mappingview/REMOVE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "256", - "ASSOCIATEOBJID": lungroup_id, - "TYPE": "245", - "ID": view_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Delete lungroup from mapping view ' - 'error.')) - - def delete_hostgoup_mapping_view(self, view_id, hostgroup_id): - """Remove hostgroup associate from the mapping view.""" - url = "/mappingview/REMOVE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "14", - "ASSOCIATEOBJID": hostgroup_id, - "TYPE": "245", - "ID": view_id} - - result = self.call(url, data, "PUT") - self._assert_rest_result( - result, _('Delete hostgroup from mapping view error.')) - - def delete_portgroup_mapping_view(self, view_id, portgroup_id): - """Remove portgroup associate from the mapping view.""" - url = "/mappingview/REMOVE_ASSOCIATE" - data = {"ASSOCIATEOBJTYPE": "257", - "ASSOCIATEOBJID": portgroup_id, - "TYPE": "245", - "ID": view_id} - - result = self.call(url, data, "PUT") - self._assert_rest_result( - result, _('Delete portgroup from mapping view error.')) - - def delete_mapping_view(self, view_id): - """Remove mapping view from the storage.""" - url = "/mappingview/" + view_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Delete mapping view error.')) - - def get_obj_count_from_lungroup(self, lungroup_id): - """Get all objects count associated to the lungroup.""" - lun_count = self._get_obj_count_from_lungroup_by_type( - lungroup_id, constants.LUN_TYPE) - snapshot_count = self._get_obj_count_from_lungroup_by_type( - lungroup_id, constants.SNAPSHOT_TYPE) - return int(lun_count) + int(snapshot_count) - - def _get_obj_count_from_lungroup_by_type(self, lungroup_id, - lun_type=constants.LUN_TYPE): - cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' - lunnum = 0 - if not lungroup_id: - return lunnum - - url = ("/%s/count?TYPE=%s&ASSOCIATEOBJTYPE=256&" - "ASSOCIATEOBJID=%s" % (cmd_type, lun_type, lungroup_id)) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find obj number error.')) - if 'data' in result: - lunnum = int(result['data']['COUNT']) - return lunnum - - def is_portgroup_associated_to_view(self, view_id, portgroup_id): - """Check whether the port group is associated to the mapping view.""" - url = ("/portgroup/associate?ASSOCIATEOBJTYPE=245&" - "ASSOCIATEOBJID=%s&range=[0-8191]" % view_id) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find portgroup from mapping view ' - 'error.')) - - if self._get_id_from_result(result, portgroup_id, 'ID'): - return True - return False - - def find_lungroup_from_map(self, view_id): - """Get lungroup from the given map""" - url = ("/mappingview/associate/lungroup?TYPE=256&" - "ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=%s" % view_id) - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Find lun group from mapping view ' - 'error.')) - lungroup_id = None - if 'data' in result: - # One map can have only one lungroup. - for item in result['data']: - lungroup_id = item['ID'] - - return lungroup_id - - def start_luncopy(self, luncopy_id): - """Start a LUNcopy.""" - url = "/LUNCOPY/start" - data = {"TYPE": "219", "ID": luncopy_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Start LUNcopy error.')) - - def _get_capacity(self, pool_name, result): - """Get free capacity and total capacity of the pool.""" - pool_info = self.get_pool_info(pool_name, result) - pool_capacity = {'total_capacity': 0.0, - 'free_capacity': 0.0} - - if pool_info: - total = float(pool_info['TOTALCAPACITY']) / constants.CAPACITY_UNIT - free = float(pool_info['CAPACITY']) / constants.CAPACITY_UNIT - pool_capacity['total_capacity'] = total - pool_capacity['free_capacity'] = free - - return pool_capacity - - def _get_disk_type(self, pool_name, result): - """Get disk type of the pool.""" - pool_info = self.get_pool_info(pool_name, result) - if not pool_info: - return None - - pool_disk = [] - for i, x in enumerate(['ssd', 'sas', 'nl_sas']): - if pool_info['TIER%dCAPACITY' % i] != '0': - pool_disk.append(x) - - if len(pool_disk) > 1: - pool_disk = ['mix'] - - return pool_disk[0] if pool_disk else None - - def get_luncopy_info(self, luncopy_id): - """Get LUNcopy information.""" - url = "/LUNCOPY?range=[0-1023]" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get LUNcopy information error.')) - - luncopyinfo = {} - if 'data' in result: - for item in result['data']: - if luncopy_id == item['ID']: - luncopyinfo['name'] = item['NAME'] - luncopyinfo['id'] = item['ID'] - luncopyinfo['state'] = item['HEALTHSTATUS'] - luncopyinfo['status'] = item['RUNNINGSTATUS'] - break - return luncopyinfo - - def delete_luncopy(self, luncopy_id): - """Delete a LUNcopy.""" - url = "/LUNCOPY/%s" % luncopy_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Delete LUNcopy error.')) - - def get_init_targ_map(self, wwns): - init_targ_map = {} - tgt_port_wwns = [] - for wwn in wwns: - tgtwwpns = self.get_fc_target_wwpns(wwn) - if not tgtwwpns: - continue - - init_targ_map[wwn] = tgtwwpns - for tgtwwpn in tgtwwpns: - if tgtwwpn not in tgt_port_wwns: - tgt_port_wwns.append(tgtwwpn) - - return (tgt_port_wwns, init_targ_map) - - def get_online_free_wwns(self): - """Get online free WWNs. - - If no new ports connected, return an empty list. - """ - url = "/fc_initiator?ISFREE=true&range=[0-8191]" - result = self.call(url, None, "GET") - - msg = _('Get connected free FC wwn error.') - self._assert_rest_result(result, msg) - - wwns = [] - if 'data' in result: - for item in result['data']: - if item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE: - wwns.append(item['ID']) - - return wwns - - def add_fc_port_to_host(self, host_id, wwn): - """Add a FC port to the host.""" - url = "/fc_initiator/" + wwn - data = {"TYPE": "223", - "ID": wwn, - "PARENTTYPE": 21, - "PARENTID": host_id} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Add FC port to host error.')) - - def _get_iscsi_port_info(self, ip): - """Get iscsi port info in order to build the iscsi target iqn.""" - url = "/eth_port" - result = self.call(url, None, "GET") - - msg = _('Get iSCSI port information error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - iscsi_port_info = None - for item in result['data']: - if ip == item['IPV4ADDR']: - iscsi_port_info = item['LOCATION'] - break - - return iscsi_port_info - - def _get_tgt_iqn(self, iscsi_ip): - """Get target iSCSI iqn.""" - ip_info = self._get_iscsi_port_info(iscsi_ip) - iqn_prefix = self._get_iscsi_tgt_port() - if not ip_info: - err_msg = (_( - 'Get iSCSI port info error, please check the target IP ' - 'configured in huawei conf file.')) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - LOG.debug('Request ip info is: %s.', ip_info) - split_list = ip_info.split(".") - newstr = split_list[1] + split_list[2] - LOG.info('New str info is: %s.', newstr) - - if ip_info: - if newstr[0] == 'A': - ctr = "0" - elif newstr[0] == 'B': - ctr = "1" - interface = '0' + newstr[1] - port = '0' + newstr[3] - iqn_suffix = ctr + '02' + interface + port - for i in range(0, len(iqn_suffix)): - if iqn_suffix[i] != '0': - iqn_suffix = iqn_suffix[i:] - break - iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsi_ip - LOG.info('_get_tgt_iqn: iSCSI target iqn is: %s.', iqn) - return iqn - - def get_fc_target_wwpns(self, wwn): - url = ("/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn) - result = self.call(url, None, "GET") - - msg = _('Get FC target wwpn error.') - self._assert_rest_result(result, msg) - - fc_wwpns = [] - if "data" in result: - for item in result['data']: - if wwn == item['INITIATOR_PORT_WWN']: - fc_wwpns.append(item['TARGET_PORT_WWN']) - - return fc_wwpns - - def update_volume_stats(self): - data = {} - data['pools'] = [] - result = self.get_all_pools() - for pool_name in self.storage_pools: - capacity = self._get_capacity(pool_name, result) - disk_type = self._get_disk_type(pool_name, result) - pool = {} - pool.update(dict( - location_info=self.device_id, - pool_name=pool_name, - total_capacity_gb=capacity['total_capacity'], - free_capacity_gb=capacity['free_capacity'], - reserved_percentage=self.configuration.safe_get( - 'reserved_percentage'), - max_over_subscription_ratio=self.configuration.safe_get( - 'max_over_subscription_ratio'), - )) - if disk_type: - pool['disk_type'] = disk_type - - data['pools'].append(pool) - return data - - def _find_qos_policy_info(self, policy_name): - url = "/ioclass" - result = self.call(url, None, "GET") - - msg = _('Get QoS policy error.') - self._assert_rest_result(result, msg) - - qos_info = {} - if 'data' in result: - for item in result['data']: - if policy_name == item['NAME']: - qos_info['ID'] = item['ID'] - lun_list = json.loads(item['LUNLIST']) - qos_info['LUNLIST'] = lun_list - qos_info['RUNNINGSTATUS'] = item['RUNNINGSTATUS'] - break - - return qos_info - - def _update_qos_policy_lunlist(self, lun_list, policy_id): - url = "/ioclass/" + policy_id - data = {"TYPE": "230", - "ID": policy_id, - "LUNLIST": lun_list} - - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Update QoS policy error.')) - - def _get_tgt_ip_from_portgroup(self, portgroup_id): - target_ips = [] - url = ("/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" - "&ASSOCIATEOBJID=%s" % portgroup_id) - result = self.call(url, None, "GET") - - msg = _('Get target IP error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - if 'data' in result: - for item in result['data']: - if (item['IPV4ADDR'] and item['HEALTHSTATUS'] == - constants.STATUS_HEALTH - and item['RUNNINGSTATUS'] == constants.STATUS_RUNNING): - target_ip = item['IPV4ADDR'] - LOG.info('_get_tgt_ip_from_portgroup: Get ip: %s.', - target_ip) - target_ips.append(target_ip) - - return target_ips - - def get_iscsi_params(self, connector): - """Get target iSCSI params, including iqn, IP.""" - initiator = connector['initiator'] - multipath = connector['multipath'] - target_ips = [] - target_iqns = [] - temp_tgt_ips = [] - portgroup = None - portgroup_id = None - - if multipath: - for ini in self.iscsi_info: - if ini['Name'] == initiator: - portgroup = ini.get('TargetPortGroup') - if portgroup: - portgroup_id = self.get_tgt_port_group(portgroup) - temp_tgt_ips = self._get_tgt_ip_from_portgroup(portgroup_id) - valid_port_info = self._get_tgt_port_ip_from_rest() - valid_tgt_ips = valid_port_info - - for ip in temp_tgt_ips: - if ip in valid_tgt_ips: - target_ips.append(ip) - - if not target_ips: - msg = (_( - 'get_iscsi_params: No valid port in portgroup. ' - 'portgroup_id: %(id)s, please check it on storage.') - % {'id': portgroup_id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - else: - target_ips = self._get_target_ip(initiator) - - else: - target_ips = self._get_target_ip(initiator) - - # Deal with the remote tgt ip. - if 'remote_target_ip' in connector: - target_ips.append(connector['remote_target_ip']) - LOG.info('Get the default ip: %s.', target_ips) - - for ip in target_ips: - target_iqn = self._get_tgt_iqn_from_rest(ip) - if not target_iqn: - target_iqn = self._get_tgt_iqn(ip) - if target_iqn: - target_iqns.append(target_iqn) - - return (target_iqns, target_ips, portgroup_id) - - def _get_target_ip(self, initiator): - target_ips = [] - for ini in self.iscsi_info: - if ini['Name'] == initiator: - if ini.get('TargetIP'): - target_ips.append(ini.get('TargetIP')) - - # If not specify target IP for some initiators, use default IP. - if not target_ips: - default_target_ips = self.iscsi_default_target_ip - if default_target_ips: - target_ips.append(default_target_ips[0]) - - else: - msg = (_( - 'get_iscsi_params: Failed to get target IP ' - 'for initiator %(ini)s, please check config file.') - % {'ini': initiator}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return target_ips - - def _get_tgt_port_ip_from_rest(self): - url = "/iscsi_tgt_port" - result = self.call(url, None, "GET") - info_list = [] - target_ips = [] - if result['error']['code'] != 0: - LOG.warning("Can't find target port info from rest.") - return target_ips - - elif not result['data']: - msg = (_( - "Can't find valid IP from rest, please check it on storage.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data = msg) - - if 'data' in result: - for item in result['data']: - info_list.append(item['ID']) - - if not info_list: - LOG.warning("Can't find target port info from rest.") - return target_ips - - for info in info_list: - split_list = info.split(",") - info_before = split_list[0] - iqn_info = info_before.split("+") - target_iqn = iqn_info[1] - ip_info = target_iqn.split(":") - target_ip = ip_info[-1] - target_ips.append(target_ip) - return target_ips - - def _get_tgt_iqn_from_rest(self, target_ip): - url = "/iscsi_tgt_port" - result = self.call(url, None, "GET") - - target_iqn = None - if result['error']['code'] != 0: - LOG.warning("Can't find target iqn from rest.") - return target_iqn - ip_pattern = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') - if 'data' in result: - for item in result['data']: - ips = re.findall(ip_pattern, item['ID']) - for ip in ips: - if target_ip == ip: - target_iqn = item['ID'] - break - - if not target_iqn: - LOG.warning("Can't find target iqn from rest.") - return target_iqn - - split_list = target_iqn.split(",") - target_iqn_before = split_list[0] - - split_list_new = target_iqn_before.split("+") - target_iqn = split_list_new[1] - - return target_iqn - - def create_qos_policy(self, qos, lun_id): - # Get local time. - localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) - # Package QoS name. - qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime - - data = {"TYPE": "230", - "NAME": qos_name, - "LUNLIST": ["%s" % lun_id], - "CLASSTYPE": "1", - "SCHEDULEPOLICY": "2", - "SCHEDULESTARTTIME": "1410969600", - "STARTTIME": "08:00", - "DURATION": "86400", - "CYCLESET": "[1,2,3,4,5,6,0]", - } - data.update(qos) - url = "/ioclass" - - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Create QoS policy error.')) - - return result['data']['ID'] - - def delete_qos_policy(self, qos_id): - """Delete a QoS policy.""" - url = "/ioclass/" + qos_id - data = {"TYPE": "230", "ID": qos_id} - - result = self.call(url, data, 'DELETE') - self._assert_rest_result(result, _('Delete QoS policy error.')) - - def activate_deactivate_qos(self, qos_id, enablestatus): - """Activate or deactivate QoS. - - enablestatus: true (activate) - enbalestatus: false (deactivate) - """ - url = "/ioclass/active/" + qos_id - data = {"TYPE": 230, - "ID": qos_id, - "ENABLESTATUS": enablestatus} - result = self.call(url, data, "PUT") - self._assert_rest_result( - result, _('Activate or deactivate QoS error.')) - - def get_qos_info(self, qos_id): - """Get QoS information.""" - url = "/ioclass/" + qos_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get QoS information error.')) - - return result['data'] - - def get_lun_list_in_qos(self, qos_id, qos_info): - """Get the lun list in QoS.""" - lun_list = [] - lun_string = qos_info['LUNLIST'][1:-1] - - for lun in lun_string.split(","): - str = lun[1:-1] - lun_list.append(str) - - return lun_list - - def remove_lun_from_qos(self, lun_id, lun_list, qos_id): - """Remove lun from QoS.""" - lun_list = [i for i in lun_list if i != lun_id] - url = "/ioclass/" + qos_id - data = {"LUNLIST": lun_list, - "TYPE": 230, - "ID": qos_id} - result = self.call(url, data, "PUT") - - msg = _('Remove lun from QoS error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def change_lun_priority(self, lun_id): - """Change lun priority to high.""" - url = "/lun/" + lun_id - data = {"TYPE": "11", - "ID": lun_id, - "IOPRIORITY": "3"} - - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Change lun priority error.')) - - def change_lun_smarttier(self, lunid, smarttier_policy): - """Change lun smarttier policy.""" - url = "/lun/" + lunid - data = {"TYPE": "11", - "ID": lunid, - "DATATRANSFERPOLICY": smarttier_policy} - - result = self.call(url, data, "PUT") - self._assert_rest_result( - result, _('Change lun smarttier policy error.')) - - def get_qosid_by_lunid(self, lun_id): - """Get QoS id by lun id.""" - url = "/lun/" + lun_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get QoS id by lun id error.')) - - return result['data']['IOCLASSID'] - - def get_lungroupids_by_lunid(self, lun_id, lun_type=constants.LUN_TYPE): - """Get lungroup ids by lun id.""" - url = ("/lungroup/associate?TYPE=256" - "&ASSOCIATEOBJTYPE=%s&ASSOCIATEOBJID=%s" % (lun_type, lun_id)) - - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get lungroup id by lun id error.')) - - lungroup_ids = [] - if 'data' in result: - for item in result['data']: - lungroup_ids.append(item['ID']) - - return lungroup_ids - - def get_lun_info(self, lun_id, lun_type = constants.LUN_TYPE): - cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' - url = ("/%s/%s" % (cmd_type, lun_id)) - result = self.call(url, None, "GET") - - msg = _('Get volume error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'] - - def get_snapshot_info(self, snapshot_id): - url = "/snapshot/" + snapshot_id - result = self.call(url, None, "GET") - - msg = _('Get snapshot error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'] - - def extend_lun(self, lun_id, new_volume_size): - url = "/lun/expand" - data = {"TYPE": 11, "ID": lun_id, - "CAPACITY": new_volume_size} - result = self.call(url, data, 'PUT') - - msg = _('Extend volume error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - return result['data'] - - def create_lun_migration(self, src_id, dst_id, speed=2): - url = "/LUN_MIGRATION" - data = {"TYPE": '253', - "PARENTID": src_id, - "TARGETLUNID": dst_id, - "SPEED": speed, - "WORKMODE": 0} - - result = self.call(url, data, "POST") - msg = _('Create lun migration error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def get_lun_migration_task(self): - url = '/LUN_MIGRATION?range=[0-256]' - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get lun migration task error.')) - return result - - def delete_lun_migration(self, src_id, dst_id): - url = '/LUN_MIGRATION/' + src_id - result = self.call(url, None, "DELETE") - msg = _('Delete lun migration error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def get_partition_id_by_name(self, name): - url = "/cachepartition" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get partition by name error.')) - - return self._get_id_from_result(result, name, 'NAME') - - def get_partition_info_by_id(self, partition_id): - - url = '/cachepartition/' + partition_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, - _('Get partition by partition id error.')) - - return result['data'] - - def add_lun_to_partition(self, lun_id, partition_id): - url = "/lun/associate/cachepartition" - data = {"ID": partition_id, - "ASSOCIATEOBJTYPE": 11, - "ASSOCIATEOBJID": lun_id} - result = self.call(url, data, "POST") - self._assert_rest_result(result, _('Add lun to partition error.')) - - def remove_lun_from_partition(self, lun_id, partition_id): - url = ('/lun/associate/cachepartition?ID=' + partition_id - + '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=' + lun_id) - - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Remove lun from partition error.')) - - def get_cache_id_by_name(self, name): - url = "/SMARTCACHEPARTITION" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get cache by name error.')) - - return self._get_id_from_result(result, name, 'NAME') - - def get_cache_info_by_id(self, cacheid): - url = "/SMARTCACHEPARTITION/" + cacheid - data = {"TYPE": "273", - "ID": cacheid} - - result = self.call(url, data, "GET") - self._assert_rest_result( - result, _('Get smartcache by cache id error.')) - - return result['data'] - - def remove_lun_from_cache(self, lun_id, cache_id): - url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" - data = {"ID": cache_id, - "ASSOCIATEOBJTYPE": 11, - "ASSOCIATEOBJID": lun_id, - "TYPE": 273} - - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Remove lun from cache error.')) - - def get_qos(self): - url = "/ioclass" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get QoS information error.')) - return result - - def find_available_qos(self, qos): - """"Find available QoS on the array.""" - qos_id = None - lun_list = [] - extra_qos = [i for i in constants.EXTRA_QOS_KEYS if i not in qos] - result = self.get_qos() - - if 'data' in result: - for items in result['data']: - qos_flag = 0 - extra_flag = False - if 'LATENCY' not in qos and items['LATENCY'] != '0': - extra_flag = True - else: - for item in items: - if item in extra_qos: - extra_flag = True - break - for key in qos: - if key not in items: - break - elif qos[key] != items[key]: - break - qos_flag = qos_flag + 1 - lun_num = len(items['LUNLIST'].split(",")) - qos_name = items['NAME'] - qos_status = items['RUNNINGSTATUS'] - # We use this QoS only if the LUNs in it is less than 64, - # created by OpenStack and does not contain filesystem, - # else we cannot add LUN to this QoS any more. - if (qos_flag == len(qos) - and not extra_flag - and lun_num < constants.MAX_LUN_NUM_IN_QOS - and qos_name.startswith(constants.QOS_NAME_PREFIX) - and qos_status == constants.STATUS_QOS_ACTIVE - and items['FSLIST'] == '[""]'): - qos_id = items['ID'] - lun_list = items['LUNLIST'] - break - - return (qos_id, lun_list) - - def add_lun_to_qos(self, qos_id, lun_id, lun_list): - """Add lun to QoS.""" - url = "/ioclass/" + qos_id - new_lun_list = [] - lun_list_string = lun_list[1:-1] - for lun_string in lun_list_string.split(","): - tmp_lun_id = lun_string[1:-1] - if '' != tmp_lun_id and tmp_lun_id != lun_id: - new_lun_list.append(tmp_lun_id) - - new_lun_list.append(lun_id) - - data = {"LUNLIST": new_lun_list, - "TYPE": 230, - "ID": qos_id} - result = self.call(url, data, "PUT") - msg = _('Associate lun to QoS error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def add_lun_to_cache(self, lun_id, cache_id): - url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" - data = {"ID": cache_id, - "ASSOCIATEOBJTYPE": 11, - "ASSOCIATEOBJID": lun_id, - "TYPE": 273} - result = self.call(url, data, "PUT") - - self._assert_rest_result(result, _('Add lun to cache error.')) - - def get_array_info(self): - url = "/system/" - result = self.call(url, None, "GET", log_filter_flag=True) - self._assert_rest_result(result, _('Get array info error.')) - return result.get('data', None) - - def find_array_version(self): - info = self.get_array_info() - return info.get('PRODUCTVERSION', None) - - def remove_host(self, host_id): - url = "/host/%s" % host_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Remove host from array error.')) - - def delete_hostgroup(self, hostgroup_id): - url = "/hostgroup/%s" % hostgroup_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Delete hostgroup error.')) - - def remove_host_from_hostgroup(self, hostgroup_id, host_id): - url_subfix001 = "/host/associate?TYPE=14&ID=%s" % hostgroup_id - url_subfix002 = "&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s" % host_id - url = url_subfix001 + url_subfix002 - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, - _('Remove host from hostgroup error.')) - - def remove_iscsi_from_host(self, initiator): - url = "/iscsi_initiator/remove_iscsi_from_host" - data = {"TYPE": '222', - "ID": initiator} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Remove iscsi from host error.')) - - def get_host_online_fc_initiators(self, host_id): - url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id - result = self.call(url, None, "GET") - - initiators = [] - if 'data' in result: - for item in result['data']: - if (('PARENTID' in item) and (item['PARENTID'] == host_id) - and (item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE)): - initiators.append(item['ID']) - - return initiators - - def get_host_fc_initiators(self, host_id): - url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id - result = self.call(url, None, "GET") - - initiators = [] - if 'data' in result: - for item in result['data']: - if (('PARENTID' in item) and (item['PARENTID'] == host_id)): - initiators.append(item['ID']) - - return initiators - - def get_host_iscsi_initiators(self, host_id): - url = "/iscsi_initiator?PARENTTYPE=21&PARENTID=%s" % host_id - result = self.call(url, None, "GET") - - initiators = [] - if 'data' in result: - for item in result['data']: - if (('PARENTID' in item) and (item['PARENTID'] == host_id)): - initiators.append(item['ID']) - - return initiators - - def rename_lun(self, lun_id, new_name, description=None): - url = "/lun/" + lun_id - data = {"NAME": new_name} - if description: - data.update({"DESCRIPTION": description}) - result = self.call(url, data, "PUT") - msg = _('Rename lun on array error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def rename_snapshot(self, snapshot_id, new_name, description=None): - url = "/snapshot/" + snapshot_id - data = {"NAME": new_name} - if description: - data.update({"DESCRIPTION": description}) - result = self.call(url, data, "PUT") - msg = _('Rename snapshot on array error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - - def is_fc_initiator_associated_to_host(self, ininame): - """Check whether the initiator is associated to the host.""" - url = '/fc_initiator?range=[0-256]' - result = self.call(url, None, "GET") - self._assert_rest_result(result, - 'Check initiator associated to host error.') - - if "data" in result: - for item in result['data']: - if item['ID'] == ininame and item['ISFREE'] != "true": - return True - return False - - def remove_fc_from_host(self, initiator): - url = '/fc_initiator/remove_fc_from_host' - data = {"TYPE": '223', - "ID": initiator} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Remove fc from host error.')) - - def check_fc_initiators_exist_in_host(self, host_id): - url = "/fc_initiator?range=[0-256]&PARENTID=%s" % host_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get host initiators info failed.')) - if 'data' in result: - return True - - return False - - def _fc_initiator_is_added_to_array(self, ininame): - """Check whether the fc initiator is already added on the array.""" - url = "/fc_initiator/" + ininame - result = self.call(url, None, "GET") - error_code = result['error']['code'] - if error_code != 0: - return False - - return True - - def _add_fc_initiator_to_array(self, ininame): - """Add a fc initiator to storage device.""" - url = '/fc_initiator/' - data = {"TYPE": '223', - "ID": ininame} - result = self.call(url, data, 'POST') - self._assert_rest_result(result, _('Add fc initiator to array error.')) - - def ensure_fc_initiator_added(self, initiator_name, host_id): - added = self._fc_initiator_is_added_to_array(initiator_name) - if not added: - self._add_fc_initiator_to_array(initiator_name) - # Just add, no need to check whether have been added. - self.add_fc_port_to_host(host_id, initiator_name) - - def get_fc_ports_on_array(self): - url = '/fc_port' - result = self.call(url, None, "GET") - msg = _('Get FC ports from array error.') - self._assert_rest_result(result, msg) - - return result['data'] - - def get_fc_ports_from_contr(self, contr): - port_list_from_contr = [] - location = [] - data = self.get_fc_ports_on_array() - for item in data: - location = item['PARENTID'].split('.') - if (location[0][1] == contr) and (item['RUNNINGSTATUS'] == - constants.FC_PORT_CONNECTED): - port_list_from_contr.append(item['WWN']) - return port_list_from_contr - - def get_hyper_domain_id(self, domain_name): - url = "/HyperMetroDomain?range=[0-32]" - result = self.call(url, None, "GET") - domain_id = None - if "data" in result: - for item in result['data']: - if domain_name == item['NAME']: - domain_id = item['ID'] - break - - msg = _('get_hyper_domain_id error.') - self._assert_rest_result(result, msg) - return domain_id - - def create_hypermetro(self, hcp_param): - url = "/HyperMetroPair" - result = self.call(url, hcp_param, "POST") - - msg = _('create_hypermetro_pair error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - return result['data'] - - def delete_hypermetro(self, metro_id): - url = "/HyperMetroPair/" + metro_id - result = self.call(url, None, "DELETE") - - msg = _('delete_hypermetro error.') - self._assert_rest_result(result, msg) - - def sync_hypermetro(self, metro_id): - url = "/HyperMetroPair/synchronize_hcpair" - - data = {"ID": metro_id, - "TYPE": "15361"} - result = self.call(url, data, "PUT") - - msg = _('sync_hypermetro error.') - self._assert_rest_result(result, msg) - - def stop_hypermetro(self, metro_id): - url = '/HyperMetroPair/disable_hcpair' - - data = {"ID": metro_id, - "TYPE": "15361"} - result = self.call(url, data, "PUT") - - msg = _('stop_hypermetro error.') - self._assert_rest_result(result, msg) - - def get_hypermetro_by_id(self, metro_id): - url = "/HyperMetroPair/" + metro_id - result = self.call(url, None, "GET") - - msg = _('get_hypermetro_by_id error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - return result['data'] - - def check_hypermetro_exist(self, metro_id): - url = "/HyperMetroPair/" + metro_id - result = self.call(url, None, "GET") - error_code = result['error']['code'] - - if (error_code == constants.ERROR_CONNECT_TO_SERVER - or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): - LOG.error("Can not open the recent url, login again.") - self.login() - result = self.call(url, None, "GET") - - error_code = result['error']['code'] - if (error_code == constants.ERROR_CONNECT_TO_SERVER - or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): - msg = _("check_hypermetro_exist error.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if error_code != 0: - return False - - return True - - def change_hostlun_id(self, map_info, hostlun_id): - url = "/mappingview" - view_id = six.text_type(map_info['view_id']) - lun_id = six.text_type(map_info['lun_id']) - hostlun_id = six.text_type(hostlun_id) - data = {"TYPE": 245, - "ID": view_id, - "ASSOCIATEOBJTYPE": 11, - "ASSOCIATEOBJID": lun_id, - "ASSOCIATEMETADATA": [{"LUNID": lun_id, - "hostLUNId": hostlun_id}]} - - result = self.call(url, data, "PUT") - - msg = 'change hostlun id error.' - self._assert_rest_result(result, msg) - - def find_view_by_id(self, view_id): - url = "/MAPPINGVIEW/" + view_id - result = self.call(url, None, "GET") - - msg = _('Change hostlun id error.') - self._assert_rest_result(result, msg) - if 'data' in result: - return result["data"]["AVAILABLEHOSTLUNIDLIST"] - - def get_metrogroup_by_name(self, name): - url = "/HyperMetro_ConsistentGroup?type='15364'" - result = self.call(url, None, "GET") - - msg = _('Get hypermetro group by name error.') - self._assert_rest_result(result, msg) - return self._get_id_from_result(result, name, 'NAME') - - def get_metrogroup_by_id(self, id): - url = "/HyperMetro_ConsistentGroup/" + id - result = self.call(url, None, "GET") - - msg = _('Get hypermetro group by id error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - return result['data'] - - def create_metrogroup(self, name, description, domain_id): - url = "/HyperMetro_ConsistentGroup" - data = {"NAME": name, - "TYPE": "15364", - "DESCRIPTION": description, - "RECOVERYPOLICY": "1", - "SPEED": "2", - "PRIORITYSTATIONTYPE": "0", - "DOMAINID": domain_id} - result = self.call(url, data, "POST") - - msg = _('create hypermetro group error.') - self._assert_rest_result(result, msg) - if 'data' in result: - return result["data"]["ID"] - - def delete_metrogroup(self, metrogroup_id): - url = "/HyperMetro_ConsistentGroup/" + metrogroup_id - result = self.call(url, None, "DELETE") - - msg = _('Delete hypermetro group error.') - self._assert_rest_result(result, msg) - - def get_metrogroup(self, metrogroup_id): - url = "/HyperMetro_ConsistentGroup/" + metrogroup_id - result = self.call(url, None, "GET") - - msg = _('Get hypermetro group error.') - self._assert_rest_result(result, msg) - - def stop_metrogroup(self, metrogroup_id): - url = "/HyperMetro_ConsistentGroup/stop" - data = {"TYPE": "15364", - "ID": metrogroup_id - } - result = self.call(url, data, "PUT") - - msg = _('stop hypermetro group error.') - self._assert_rest_result(result, msg) - - def sync_metrogroup(self, metrogroup_id): - url = "/HyperMetro_ConsistentGroup/sync" - data = {"TYPE": "15364", - "ID": metrogroup_id - } - result = self.call(url, data, "PUT") - - msg = _('sync hypermetro group error.') - self._assert_rest_result(result, msg) - - def add_metro_to_metrogroup(self, metrogroup_id, metro_id): - url = "/hyperMetro/associate/pair" - data = {"TYPE": "15364", - "ID": metrogroup_id, - "ASSOCIATEOBJTYPE": "15361", - "ASSOCIATEOBJID": metro_id} - result = self.call(url, data, "POST") - - msg = _('Add hypermetro to metrogroup error.') - self._assert_rest_result(result, msg) - - def remove_metro_from_metrogroup(self, metrogroup_id, metro_id): - url = "/hyperMetro/associate/pair" - data = {"TYPE": "15364", - "ID": metrogroup_id, - "ASSOCIATEOBJTYPE": "15361", - "ASSOCIATEOBJID": metro_id} - result = self.call(url, data, "DELETE") - - msg = _('Delete hypermetro from metrogroup error.') - self._assert_rest_result(result, msg) - - def get_hypermetro_pairs(self): - url = "/HyperMetroPair?range=[0-4095]" - result = self.call(url, None, "GET") - msg = _('Get HyperMetroPair error.') - self._assert_rest_result(result, msg) - - return result.get('data', []) - - def get_split_mirrors(self): - url = "/splitmirror?range=[0-8191]" - result = self.call(url, None, "GET") - if result['error']['code'] == constants.NO_SPLITMIRROR_LICENSE: - msg = _('License is unavailable.') - raise exception.VolumeBackendAPIException(data=msg) - msg = _('Get SplitMirror error.') - self._assert_rest_result(result, msg) - - return result.get('data', []) - - def get_target_luns(self, id): - url = ("/SPLITMIRRORTARGETLUN/targetLUN?TYPE=228&PARENTID=%s&" - "PARENTTYPE=220") % id - result = self.call(url, None, "GET") - msg = _('Get target LUN of SplitMirror error.') - self._assert_rest_result(result, msg) - - target_luns = [] - for item in result.get('data', []): - target_luns.append(item.get('ID')) - return target_luns - - def get_migration_task(self): - url = "/LUN_MIGRATION?range=[0-256]" - result = self.call(url, None, "GET") - if result['error']['code'] == constants.NO_MIGRATION_LICENSE: - msg = _('License is unavailable.') - raise exception.VolumeBackendAPIException(data=msg) - msg = _('Get migration task error.') - self._assert_rest_result(result, msg) - - return result.get('data', []) - - def is_lun_in_mirror(self, name): - if not name: - return False - - url = "/lun?filter=NAME::%s" % name - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get volume by name error.')) - for item in result.get('data', []): - rss_obj = item.get('HASRSSOBJECT') - if rss_obj: - rss_obj = json.loads(rss_obj) - if rss_obj.get('LUNMirror') == 'TRUE': - return True - return False - - def get_portgs_by_portid(self, port_id): - portgs = [] - if not port_id: - return portgs - url = ("/portgroup/associate/fc_port?TYPE=257&ASSOCIATEOBJTYPE=212&" - "ASSOCIATEOBJID=%s") % port_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get port groups by port error.')) - for item in result.get("data", []): - portgs.append(item["ID"]) - return portgs - - def get_views_by_portg(self, portg_id): - views = [] - if not portg_id: - return views - url = ("/mappingview/associate/portgroup?TYPE=245&ASSOCIATEOBJTYPE=" - "257&ASSOCIATEOBJID=%s") % portg_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get views by port group error.')) - for item in result.get("data", []): - views.append(item["ID"]) - return views - - def get_lungroup_by_view(self, view_id): - if not view_id: - return None - url = ("/lungroup/associate/mappingview?TYPE=256&ASSOCIATEOBJTYPE=" - "245&ASSOCIATEOBJID=%s") % view_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get LUN group by view error.')) - for item in result.get("data", []): - # In fact, there is just one lungroup in a view. - return item["ID"] - - def get_portgroup_by_view(self, view_id): - if not view_id: - return None - url = ("/portgroup/associate/mappingview?TYPE=257&ASSOCIATEOBJTYPE=" - "245&ASSOCIATEOBJID=%s") % view_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get port group by view error.')) - return result.get("data", []) - - def get_fc_ports_by_portgroup(self, portg_id): - ports = {} - if not portg_id: - return ports - url = ("/fc_port/associate/portgroup?TYPE=212&ASSOCIATEOBJTYPE=257" - "&ASSOCIATEOBJID=%s") % portg_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get FC ports by port group ' - 'error.')) - for item in result.get("data", []): - ports[item["WWN"]] = item["ID"] - return ports - - def create_portg(self, portg_name, description=""): - url = "/PortGroup" - data = {"DESCRIPTION": description, - "NAME": portg_name, - "TYPE": 257} - result = self.call(url, data, "POST") - self._assert_rest_result(result, _('Create port group error.')) - if "data" in result: - return result['data']['ID'] - - def add_port_to_portg(self, portg_id, port_id): - url = "/port/associate/portgroup" - data = {"ASSOCIATEOBJID": port_id, - "ASSOCIATEOBJTYPE": 212, - "ID": portg_id, - "TYPE": 257} - result = self.call(url, data, "POST") - self._assert_rest_result(result, _('Add port to port group error.')) - - def delete_portgroup(self, portg_id): - url = "/PortGroup/%s" % portg_id - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Delete port group error.')) - - def remove_port_from_portgroup(self, portg_id, port_id): - url = (("/port/associate/portgroup?ID=%(portg_id)s&TYPE=257&" - "ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(port_id)s") - % {"portg_id": portg_id, "port_id": port_id}) - result = self.call(url, None, "DELETE") - self._assert_rest_result(result, _('Remove port from port group' - ' error.')) - - def get_all_engines(self): - url = "/storageengine" - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get engines error.')) - - return result.get("data", []) - - def get_portg_info(self, portg_id): - url = "/portgroup/%s" % portg_id - result = self.call(url, None, "GET") - self._assert_rest_result(result, _('Get port group error.')) - - return result.get("data", {}) - - def append_portg_desc(self, portg_id, description): - portg_info = self.get_portg_info(portg_id) - new_description = portg_info.get('DESCRIPTION') + ',' + description - url = "/portgroup/%s" % portg_id - data = {"DESCRIPTION": new_description, - "ID": portg_id, - "TYPE": 257} - result = self.call(url, data, "PUT") - self._assert_rest_result(result, _('Append port group description' - ' error.')) - - def get_ports_by_portg(self, portg_id): - wwns = [] - url = ("/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" - "&ASSOCIATEOBJID=%s" % portg_id) - result = self.call(url, None, "GET") - - msg = _('Get ports by port group error.') - self._assert_rest_result(result, msg) - for item in result.get('data', []): - wwns.append(item['WWN']) - return wwns - - def get_remote_devices(self): - url = "/remote_device" - result = self.call(url, None, "GET", log_filter_flag=True) - self._assert_rest_result(result, _('Get remote devices error.')) - return result.get('data', []) - - def create_pair(self, pair_params): - url = "/REPLICATIONPAIR" - result = self.call(url, pair_params, "POST") - - msg = _('Create replication error.') - self._assert_rest_result(result, msg) - self._assert_data_in_result(result, msg) - return result['data'] - - def get_pair_by_id(self, pair_id): - url = "/REPLICATIONPAIR/" + pair_id - result = self.call(url, None, "GET") - - msg = _('Get pair failed.') - self._assert_rest_result(result, msg) - return result.get('data', {}) - - def switch_pair(self, pair_id): - url = '/REPLICATIONPAIR/switch' - data = {"ID": pair_id, - "TYPE": "263"} - result = self.call(url, data, "PUT") - - msg = _('Switch over pair error.') - self._assert_rest_result(result, msg) - - def split_pair(self, pair_id): - url = '/REPLICATIONPAIR/split' - data = {"ID": pair_id, - "TYPE": "263"} - result = self.call(url, data, "PUT") - - msg = _('Split pair error.') - self._assert_rest_result(result, msg) - - def delete_pair(self, pair_id, force=False): - url = "/REPLICATIONPAIR/" + pair_id - data = None - if force: - data = {"ISLOCALDELETE": force} - - result = self.call(url, data, "DELETE") - - msg = _('delete_replication error.') - self._assert_rest_result(result, msg) - - def sync_pair(self, pair_id): - url = "/REPLICATIONPAIR/sync" - data = {"ID": pair_id, - "TYPE": "263"} - result = self.call(url, data, "PUT") - - msg = _('Sync pair error.') - self._assert_rest_result(result, msg) - - def check_pair_exist(self, pair_id): - url = "/REPLICATIONPAIR/" + pair_id - result = self.call(url, None, "GET") - return result['error']['code'] == 0 - - def set_pair_second_access(self, pair_id, access): - url = "/REPLICATIONPAIR/" + pair_id - data = {"ID": pair_id, - "SECRESACCESS": access} - result = self.call(url, data, "PUT") - - msg = _('Set pair secondary access error.') - self._assert_rest_result(result, msg) - - def is_host_associated_to_hostgroup(self, host_id): - url = "/host/" + host_id - result = self.call(url, None, "GET") - data = result.get('data') - if data is not None: - return data.get('ISADD2HOSTGROUP') == 'true' - return False - - def _get_object_count(self, obj_name): - url = "/" + obj_name + "/count" - result = self.call(url, None, "GET", log_filter_flag=True) - - if result['error']['code'] != 0: - raise - - if result.get("data"): - return result.get("data").get("COUNT") diff --git a/cinder/volume/drivers/huawei/smartx.py b/cinder/volume/drivers/huawei/smartx.py deleted file mode 100644 index b1d35093f..000000000 --- a/cinder/volume/drivers/huawei/smartx.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) 2016 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.huawei import constants -from cinder.volume import qos_specs - -LOG = logging.getLogger(__name__) - - -class SmartQos(object): - def __init__(self, client): - self.client = client - - @staticmethod - def get_qos_by_volume_type(volume_type): - # We prefer the qos_specs association - # and override any existing extra-specs settings - # if present. - if not volume_type: - return {} - - qos_specs_id = volume_type.get('qos_specs_id') - if not qos_specs_id: - return {} - - qos = {} - io_type_flag = None - ctxt = context.get_admin_context() - consumer = qos_specs.get_qos_specs(ctxt, qos_specs_id)['consumer'] - if consumer == 'front-end': - return {} - - kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - LOG.info('The QoS sepcs is: %s.', kvs) - for k, v in kvs.items(): - if k not in constants.HUAWEI_VALID_KEYS: - continue - if k != 'IOType' and int(v) <= 0: - msg = _('QoS config is wrong. %s must > 0.') % k - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - if k == 'IOType': - if v not in ['0', '1', '2']: - msg = _('Illegal value specified for IOTYPE: 0, 1, or 2.') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - io_type_flag = 1 - qos[k.upper()] = v - else: - qos[k.upper()] = v - - if not io_type_flag: - msg = (_('QoS policy must specify for IOTYPE: 0, 1, or 2, ' - 'QoS policy: %(qos_policy)s ') % {'qos_policy': qos}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - # QoS policy must specify for IOTYPE and another qos_specs. - if len(qos) < 2: - msg = (_('QoS policy must specify for IOTYPE and another ' - 'qos_specs, QoS policy: %(qos_policy)s.') - % {'qos_policy': qos}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - for upper_limit in constants.UPPER_LIMIT_KEYS: - for lower_limit in constants.LOWER_LIMIT_KEYS: - if upper_limit in qos and lower_limit in qos: - msg = (_('QoS policy upper_limit and lower_limit ' - 'conflict, QoS policy: %(qos_policy)s.') - % {'qos_policy': qos}) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - return qos - - def _is_high_priority(self, qos): - """Check QoS priority.""" - for key, value in qos.items(): - if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): - return True - - return False - - @utils.synchronized('huawei_qos', external=True) - def add(self, qos, lun_id): - policy_id = None - try: - # Check QoS priority. - if self._is_high_priority(qos): - self.client.change_lun_priority(lun_id) - # Create QoS policy and activate it. - version = self.client.find_array_version() - if version >= constants.ARRAY_VERSION: - (qos_id, lun_list) = self.client.find_available_qos(qos) - if qos_id: - self.client.add_lun_to_qos(qos_id, lun_id, lun_list) - else: - policy_id = self.client.create_qos_policy(qos, lun_id) - self.client.activate_deactivate_qos(policy_id, True) - else: - policy_id = self.client.create_qos_policy(qos, lun_id) - self.client.activate_deactivate_qos(policy_id, True) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - if policy_id is not None: - self.client.delete_qos_policy(policy_id) - - @utils.synchronized('huawei_qos', external=True) - def remove(self, qos_id, lun_id): - qos_info = self.client.get_qos_info(qos_id) - lun_list = self.client.get_lun_list_in_qos(qos_id, qos_info) - if len(lun_list) <= 1: - qos_status = qos_info['RUNNINGSTATUS'] - # 2: Active status. - if qos_status != constants.STATUS_QOS_INACTIVE: - self.client.activate_deactivate_qos(qos_id, False) - self.client.delete_qos_policy(qos_id) - else: - self.client.remove_lun_from_qos(lun_id, lun_list, qos_id) - - -class SmartPartition(object): - def __init__(self, client): - self.client = client - - def add(self, opts, lun_id): - if opts['smartpartition'] != 'true': - return - if not opts['partitionname']: - raise exception.InvalidInput( - reason=_('Partition name is None, please set ' - 'smartpartition:partitionname in key.')) - - partition_id = self.client.get_partition_id_by_name( - opts['partitionname']) - if not partition_id: - raise exception.InvalidInput( - reason=(_('Can not find partition id by name %(name)s.') - % {'name': opts['partitionname']})) - - self.client.add_lun_to_partition(lun_id, partition_id) - - -class SmartCache(object): - def __init__(self, client): - self.client = client - - def add(self, opts, lun_id): - if opts['smartcache'] != 'true': - return - if not opts['cachename']: - raise exception.InvalidInput( - reason=_('Cache name is None, please set ' - 'smartcache:cachename in key.')) - - cache_id = self.client.get_cache_id_by_name(opts['cachename']) - if not cache_id: - raise exception.InvalidInput( - reason=(_('Can not find cache id by cache name %(name)s.') - % {'name': opts['cachename']})) - - self.client.add_lun_to_cache(lun_id, cache_id) - - -class SmartX(object): - def get_smartx_specs_opts(self, opts): - # Check that smarttier is 0/1/2/3 - opts = self.get_smarttier_opts(opts) - opts = self.get_smartthin_opts(opts) - opts = self.get_smartcache_opts(opts) - opts = self.get_smartpartition_opts(opts) - return opts - - def get_smarttier_opts(self, opts): - if opts['smarttier'] == 'true': - if not opts['policy']: - opts['policy'] = '1' - elif opts['policy'] not in ['0', '1', '2', '3']: - raise exception.InvalidInput( - reason=(_('Illegal value specified for smarttier: ' - 'set to either 0, 1, 2, or 3.'))) - else: - opts['policy'] = '0' - - return opts - - def get_smartthin_opts(self, opts): - if opts['thin_provisioning_support'] == 'true': - if opts['thick_provisioning_support'] == 'true': - raise exception.InvalidInput( - reason=(_('Illegal value specified for thin: ' - 'Can not set thin and thick at the same time.'))) - else: - opts['LUNType'] = constants.THIN_LUNTYPE - if opts['thick_provisioning_support'] == 'true': - opts['LUNType'] = constants.THICK_LUNTYPE - - return opts - - def get_smartcache_opts(self, opts): - if opts['smartcache'] == 'true': - if not opts['cachename']: - raise exception.InvalidInput( - reason=_('Cache name is None, please set ' - 'smartcache:cachename in key.')) - else: - opts['cachename'] = None - - return opts - - def get_smartpartition_opts(self, opts): - if opts['smartpartition'] == 'true': - if not opts['partitionname']: - raise exception.InvalidInput( - reason=_('Partition name is None, please set ' - 'smartpartition:partitionname in key.')) - else: - opts['partitionname'] = None - - return opts diff --git a/cinder/volume/drivers/ibm/__init__.py b/cinder/volume/drivers/ibm/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/ibm/flashsystem_common.py b/cinder/volume/drivers/ibm/flashsystem_common.py deleted file mode 100644 index c37d4d4f5..000000000 --- a/cinder/volume/drivers/ibm/flashsystem_common.py +++ /dev/null @@ -1,1184 +0,0 @@ -# Copyright 2015 IBM Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Volume driver for IBM FlashSystem storage systems. - -Limitations: -1. Cinder driver only works when open_access_enabled=off. - -""" - -import re -import string - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import strutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0' -FLASHSYSTEM_VOL_IOGRP = 0 - -flashsystem_opts = [ - cfg.StrOpt('flashsystem_connection_protocol', - default='FC', - help='Connection protocol should be FC. ' - '(Default is FC.)'), - cfg.BoolOpt('flashsystem_multihostmap_enabled', - default=True, - help='Allows vdisk to multi host mapping. ' - '(Default is True)') -] - -CONF = cfg.CONF -CONF.register_opts(flashsystem_opts, group=configuration.SHARED_CONF_GROUP) - - -class FlashSystemDriver(san.SanDriver, - driver.ManageableVD, - driver.BaseVD): - """IBM FlashSystem volume driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.0.1 - Code clean up - 1.0.2 - Add lock into vdisk map/unmap, connection - initialize/terminate - 1.0.3 - Initial driver for iSCSI - 1.0.4 - Split Flashsystem driver into common and FC - 1.0.5 - Report capability of volume multiattach - 1.0.6 - Fix bug #1469581, add I/T mapping check in - terminate_connection - 1.0.7 - Fix bug #1505477, add host name check in - _find_host_exhaustive for FC - 1.0.8 - Fix bug #1572743, multi-attach attribute - should not be hardcoded, only in iSCSI - 1.0.9 - Fix bug #1570574, Cleanup host resource - leaking, changes only in iSCSI - 1.0.10 - Fix bug #1585085, add host name check in - _find_host_exhaustive for iSCSI - 1.0.11 - Update driver to use ABC metaclasses - 1.0.12 - Update driver to support Manage/Unmanage - existing volume - """ - - VERSION = "1.0.12" - - def __init__(self, *args, **kwargs): - super(FlashSystemDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(flashsystem_opts) - self._storage_nodes = {} - self._protocol = None - self._context = None - self._system_name = None - self._system_id = None - self._check_lock_interval = 5 - self._vdisk_copy_in_progress = set() - self._vdisk_copy_lock = None - - def _ssh(self, ssh_cmd, check_exit_code=True): - try: - return self._run_ssh(ssh_cmd, check_exit_code) - except processutils.ProcessExecutionError as e: - msg = (_('CLI Exception output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'cmd': ssh_cmd, 'out': e.stdout, - 'err': e.stderr}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _append_dict(self, dict_, key, value): - key, value = key.strip(), value.strip() - obj = dict_.get(key, None) - if obj is None: - dict_[key] = value - elif isinstance(obj, list): - obj.append(value) - dict_[key] = obj - else: - dict_[key] = [obj, value] - return dict_ - - def _assert_ssh_return(self, test, fun, ssh_cmd, out, err): - self._driver_assert(test, - (_('%(fun)s: Failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\n stdout: %(out)s\n ' - 'stderr: %(err)s') - % {'fun': fun, 'cmd': ssh_cmd, - 'out': six.text_type(out), - 'err': six.text_type(err)})) - - def _build_default_params(self): - return {'protocol': self.configuration.flashsystem_connection_protocol} - - def _build_initiator_target_map(self, initiator_wwpns, target_wwpns): - map = {} - for i_wwpn in initiator_wwpns: - idx = six.text_type(i_wwpn) - map[idx] = [] - for t_wwpn in target_wwpns: - map[idx].append(t_wwpn) - return map - - def _check_vdisk_params(self, params): - raise NotImplementedError() - - def _connector_to_hostname_prefix(self, connector): - """Translate connector info to storage system host name. - - Translate a host's name and IP to the prefix of its hostname on the - storage subsystem. We create a host name from the host and - IP address, replacing any invalid characters (at most 55 characters), - and adding a random 8-character suffix to avoid collisions. The total - length should be at most 63 characters. - - """ - - # Build cleanup translation tables for host names - invalid_ch_in_host = '' - for num in range(0, 128): - ch = six.text_type(chr(num)) - if not ch.isalnum() and ch not in [' ', '.', '-', '_']: - invalid_ch_in_host = invalid_ch_in_host + ch - - host_name = connector['host'] - if isinstance(host_name, six.text_type): - unicode_host_name_filter = {ord(six.text_type(char)): u'-' - for char in invalid_ch_in_host} - host_name = host_name.translate(unicode_host_name_filter) - elif isinstance(host_name, str): - string_host_name_filter = string.maketrans( - invalid_ch_in_host, '-' * len(invalid_ch_in_host)) - host_name = host_name.translate(string_host_name_filter) - else: - msg = _('_create_host: Can not translate host name. Host name ' - 'is not unicode or string.') - LOG.error(msg) - raise exception.NoValidBackend(reason=msg) - - host_name = six.text_type(host_name) - - # FlashSystem family doesn't like hostname that starts with number. - if not re.match('^[A-Za-z]', host_name): - host_name = '_' + host_name - - return host_name[:55] - - def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, - dest_vdisk_name, dest_vdisk_id): - """Copy data from src vdisk to dest vdisk. - - To be able to copy data between vdisks, we must ensure that both - vdisks have been mapped to host. If vdisk has not been mapped, - it must be mapped firstly. When data copy completed, vdisk - should be restored to previous mapped or non-mapped status. - """ - - LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', - {'src': src_vdisk_name, 'dest': dest_vdisk_name}) - - connector = utils.brick_get_connector_properties() - (src_map, src_lun_id) = self._is_vdisk_map( - src_vdisk_name, connector) - (dest_map, dest_lun_id) = self._is_vdisk_map( - dest_vdisk_name, connector) - - src_map_device = None - src_properties = None - dest_map_device = None - dest_properties = None - - try: - if not src_map: - src_lun_id = self._map_vdisk_to_host(src_vdisk_name, - connector) - if not dest_map: - dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name, - connector) - src_properties = self._get_vdisk_map_properties( - connector, src_lun_id, src_vdisk_name, - src_vdisk_id, self._get_vdisk_params(None)) - src_map_device = self._scan_device(src_properties) - - dest_properties = self._get_vdisk_map_properties( - connector, dest_lun_id, dest_vdisk_name, - dest_vdisk_id, self._get_vdisk_params(None)) - dest_map_device = self._scan_device(dest_properties) - - src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) - - # vdisk capacity is bytes, translate into MB - size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi - volume_utils.copy_volume( - src_map_device['path'], - dest_map_device['path'], - size_in_mb, - self.configuration.volume_dd_blocksize) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to copy %(src)s to %(dest)s.', - {'src': src_vdisk_name, 'dest': dest_vdisk_name}) - finally: - if not dest_map: - self._unmap_vdisk_from_host(dest_vdisk_name, connector) - self._remove_device(dest_properties, dest_map_device) - if not src_map: - self._unmap_vdisk_from_host(src_vdisk_name, connector) - self._remove_device(src_properties, src_map_device) - - LOG.debug( - 'leave: _copy_vdisk_data: %(src)s -> %(dest)s.', - {'src': src_vdisk_name, 'dest': dest_vdisk_name}) - - def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, - dest_vdisk_name, dest_vdisk_id): - vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) - self._driver_assert( - vdisk_attr is not None, - (_('_create_and_copy_vdisk_data: Failed to get attributes for ' - 'vdisk %s.') % src_vdisk_name)) - - self._create_vdisk(dest_vdisk_name, vdisk_attr['capacity'], 'b', None) - - # create a timer to lock vdisk that will be used to data copy - timer = loopingcall.FixedIntervalLoopingCall( - self._set_vdisk_copy_in_progress, - [src_vdisk_name, dest_vdisk_name]) - timer.start(interval=self._check_lock_interval).wait() - - try: - self._copy_vdisk_data(src_vdisk_name, src_vdisk_id, - dest_vdisk_name, dest_vdisk_id) - finally: - self._unset_vdisk_copy_in_progress( - [src_vdisk_name, dest_vdisk_name]) - - def _create_host(self, connector): - raise NotImplementedError() - - def _create_vdisk(self, name, size, unit, opts): - """Create a new vdisk.""" - - LOG.debug('enter: _create_vdisk: vdisk %s.', name) - - ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', - FLASHSYSTEM_VOLPOOL_NAME, '-iogrp', - six.text_type(FLASHSYSTEM_VOL_IOGRP), - '-size', size, '-unit', unit] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return(out.strip(), '_create_vdisk', - ssh_cmd, out, err) - - # Ensure that the output is as expected - match_obj = re.search( - r'Virtual Disk, id \[([0-9]+)\], successfully created', out) - - self._driver_assert( - match_obj is not None, - (_('_create_vdisk %(name)s - did not find ' - 'success message in CLI output.\n ' - 'stdout: %(out)s\n stderr: %(err)s') - % {'name': name, 'out': six.text_type(out), - 'err': six.text_type(err)})) - - LOG.debug('leave: _create_vdisk: vdisk %s.', name) - - def _delete_host(self, host_name): - """Delete a host on the storage system.""" - - LOG.debug('enter: _delete_host: host %s.', host_name) - - ssh_cmd = ['svctask', 'rmhost', host_name] - out, err = self._ssh(ssh_cmd) - # No output should be returned from rmhost - self._assert_ssh_return( - (not out.strip()), - '_delete_host', ssh_cmd, out, err) - - LOG.debug('leave: _delete_host: host %s.', host_name) - - def _delete_vdisk(self, name, force): - """Deletes existing vdisks.""" - - LOG.debug('enter: _delete_vdisk: vdisk %s.', name) - - # Try to delete volume only if found on the storage - vdisk_defined = self._is_vdisk_defined(name) - if not vdisk_defined: - LOG.warning('warning: Tried to delete vdisk %s but ' - 'it does not exist.', name) - return - - ssh_cmd = ['svctask', 'rmvdisk', '-force', name] - if not force: - ssh_cmd.remove('-force') - out, err = self._ssh(ssh_cmd) - # No output should be returned from rmvdisk - self._assert_ssh_return( - (not out.strip()), - ('_delete_vdisk %(name)s') % {'name': name}, - ssh_cmd, out, err) - - LOG.debug('leave: _delete_vdisk: vdisk %s.', name) - - def _driver_assert(self, assert_condition, exception_message): - """Internal assertion mechanism for CLI output.""" - if not assert_condition: - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - def _execute_command_and_parse_attributes(self, ssh_cmd): - """Execute command on the FlashSystem and parse attributes. - - Exception is raised if the information from the system - can not be obtained. - - """ - - LOG.debug( - 'enter: _execute_command_and_parse_attributes: ' - 'command: %s.', six.text_type(ssh_cmd)) - - try: - out, err = self._ssh(ssh_cmd) - except processutils.ProcessExecutionError: - LOG.warning('Failed to run command: %s.', ssh_cmd) - # Does not raise exception when command encounters error. - # Only return and the upper logic decides what to do. - return None - - self._assert_ssh_return( - out, - '_execute_command_and_parse_attributes', ssh_cmd, out, err) - - attributes = {} - for attrib_line in out.split('\n'): - # If '!' not found, return the string and two empty strings - attrib_name, foo, attrib_value = attrib_line.partition('!') - if attrib_name is not None and attrib_name.strip(): - self._append_dict(attributes, attrib_name, attrib_value) - - LOG.debug( - 'leave: _execute_command_and_parse_attributes: ' - 'command: %(cmd)s attributes: %(attr)s.', - {'cmd': six.text_type(ssh_cmd), - 'attr': six.text_type(attributes)}) - - return attributes - - def _find_host_exhaustive(self, connector, hosts): - raise NotImplementedError() - - def _get_hdr_dic(self, header, row, delim): - """Return CLI row data as a dictionary indexed by names from header. - - The strings are converted to columns using the delimiter in delim. - """ - - attributes = header.split(delim) - values = row.split(delim) - self._driver_assert( - len(values) == len(attributes), - (_('_get_hdr_dic: attribute headers and values do not match.\n ' - 'Headers: %(header)s\n Values: %(row)s.') - % {'header': six.text_type(header), 'row': six.text_type(row)})) - dic = {a: v for a, v in zip(attributes, values)} - return dic - - def _get_host_from_connector(self, connector): - """List the hosts defined in the storage. - - Return the host name with the given connection info, or None if there - is no host fitting that information. - - """ - - LOG.debug('enter: _get_host_from_connector: %s.', connector) - - # Get list of host in the storage - ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] - out, err = self._ssh(ssh_cmd) - - if not out.strip(): - return None - - # If we have FC information, we have a faster lookup option - hostname = None - - host_lines = out.strip().split('\n') - self._assert_ssh_return( - host_lines, - '_get_host_from_connector', ssh_cmd, out, err) - header = host_lines.pop(0).split('!') - self._assert_ssh_return( - 'name' in header, - '_get_host_from_connector', ssh_cmd, out, err) - name_index = header.index('name') - hosts = [x.split('!')[name_index] for x in host_lines] - hostname = self._find_host_exhaustive(connector, hosts) - - LOG.debug('leave: _get_host_from_connector: host %s.', hostname) - - return hostname - - def _get_hostvdisk_mappings(self, host_name): - """Return the defined storage mappings for a host.""" - - return_data = {} - ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name] - out, err = self._ssh(ssh_cmd) - - mappings = out.strip().split('\n') - if mappings: - header = mappings.pop(0) - for mapping_line in mappings: - mapping_data = self._get_hdr_dic(header, mapping_line, '!') - return_data[mapping_data['vdisk_name']] = mapping_data - - return return_data - - def _get_node_data(self): - """Get and verify node configuration.""" - - # Get storage system name and id - ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] - attributes = self._execute_command_and_parse_attributes(ssh_cmd) - if not attributes or not ('name' in attributes): - msg = _('Could not get system name.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - self._system_name = attributes['name'] - self._system_id = attributes['id'] - - # Validate value of open_access_enabled flag, for now only - # support when open_access_enabled is off - if not attributes or not ('open_access_enabled' in attributes) or ( - attributes['open_access_enabled'] != 'off'): - msg = _('open_access_enabled is not off.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Validate that the array exists - pool = FLASHSYSTEM_VOLPOOL_NAME - ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] - attributes = self._execute_command_and_parse_attributes(ssh_cmd) - if not attributes: - msg = _('Unable to parse attributes.') - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - if not ('status' in attributes) or ( - attributes['status'] == 'offline'): - msg = (_('Array does not exist or is offline. ' - 'Current status of array is %s.') - % attributes['status']) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - # Get the iSCSI names of the FlashSystem nodes - ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return( - out.strip(), '_get_config_data', ssh_cmd, out, err) - - nodes = out.strip().splitlines() - self._assert_ssh_return(nodes, '_get_node_data', ssh_cmd, out, err) - header = nodes.pop(0) - for node_line in nodes: - try: - node_data = self._get_hdr_dic(header, node_line, '!') - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - self._log_cli_output_error('_get_node_data', - ssh_cmd, out, err) - try: - node = { - 'id': node_data['id'], - 'name': node_data['name'], - 'IO_group': node_data['IO_group_id'], - 'WWNN': node_data['WWNN'], - 'status': node_data['status'], - 'WWPN': [], - 'protocol': None, - 'iscsi_name': node_data['iscsi_name'], - 'config_node': node_data['config_node'], - 'ipv4': [], - 'ipv6': [], - } - if node['status'] == 'online': - self._storage_nodes[node['id']] = node - except KeyError: - self._handle_keyerror('lsnode', header) - - def _get_vdisk_attributes(self, vdisk_ref): - """Return vdisk attributes - - Exception is raised if the information from system can not be - parsed/matched to a single vdisk. - - :param vdisk_ref: vdisk name or vdisk id - """ - - ssh_cmd = [ - 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_ref] - - return self._execute_command_and_parse_attributes(ssh_cmd) - - def _get_vdisk_map_properties( - self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): - raise NotImplementedError() - - def _get_vdiskhost_mappings(self, vdisk_name): - """Return the defined storage mappings for a vdisk.""" - - return_data = {} - ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name] - out, err = self._ssh(ssh_cmd) - - mappings = out.strip().split('\n') - if mappings: - header = mappings.pop(0) - for mapping_line in mappings: - mapping_data = self._get_hdr_dic(header, mapping_line, '!') - return_data[mapping_data['host_name']] = mapping_data - - return return_data - - def _get_vdisk_params(self, type_id): - params = self._build_default_params() - if type_id: - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - specs = volume_type.get('extra_specs') - for k, value in specs.items(): - # Get the scope, if using scope format - key_split = k.split(':') - if len(key_split) == 1: - scope = None - key = key_split[0] - else: - scope = key_split[0] - key = key_split[1] - - # We generally do not look at capabilities in the driver, but - # protocol is a special case where the user asks for a given - # protocol and we want both the scheduler and the driver to act - # on the value. - if ((not scope or scope == 'capabilities') and - key == 'storage_protocol'): - scope = None - key = 'protocol' - - # Anything keys that the driver should look at should have the - # 'drivers' scope. - if scope and scope != "drivers": - continue - - if key in params: - this_type = type(params[key]).__name__ - if this_type == 'int': - value = int(value) - elif this_type == 'bool': - value = strutils.bool_from_string(value) - params[key] = value - - self._check_vdisk_params(params) - - return params - - def _handle_keyerror(self, function, header): - msg = (_('Did not find expected column in %(fun)s: %(hdr)s.') - % {'fun': function, 'hdr': header}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _is_vdisk_defined(self, vdisk_name): - """Check if vdisk is defined.""" - LOG.debug('enter: _is_vdisk_defined: vdisk %s.', vdisk_name) - - vdisk_attributes = self._get_vdisk_attributes(vdisk_name) - - LOG.debug( - 'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.', - {'vol': vdisk_name, 'str': vdisk_attributes is not None}) - - if vdisk_attributes is None: - return False - else: - return True - - def _is_vdisk_copy_in_progress(self, vdisk_name): - LOG.debug( - '_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', - {'vdisk': vdisk_name, - 'vdisk_in_progress': - six.text_type(self._vdisk_copy_in_progress)}) - if vdisk_name not in self._vdisk_copy_in_progress: - LOG.debug( - '_is_vdisk_copy_in_progress: ' - 'vdisk copy is not in progress.') - raise loopingcall.LoopingCallDone(retvalue=True) - - def _is_vdisk_map(self, vdisk_name, connector): - """Check if vdisk is mapped. - - If map, return True and lun id. - If not map, return False and expected lun id. - - """ - - LOG.debug('enter: _is_vdisk_map: %(src)s.', {'src': vdisk_name}) - - map_flag = False - result_lun = '-1' - - host_name = self._get_host_from_connector(connector) - if host_name is None: - return (map_flag, int(result_lun)) - - mapping_data = self._get_hostvdisk_mappings(host_name) - - if vdisk_name in mapping_data: - map_flag = True - result_lun = mapping_data[vdisk_name]['SCSI_id'] - else: - lun_used = [int(v['SCSI_id']) for v in mapping_data.values()] - lun_used.sort() - - # Start from 1 due to problems with lun id being 0. - result_lun = 1 - for lun_id in lun_used: - if result_lun < lun_id: - break - elif result_lun == lun_id: - result_lun += 1 - - LOG.debug( - 'leave: _is_vdisk_map: %(src)s ' - 'mapped %(map_flag)s %(result_lun)s.', - {'src': vdisk_name, - 'map_flag': six.text_type(map_flag), - 'result_lun': result_lun}) - - return (map_flag, int(result_lun)) - - def _log_cli_output_error(self, function, cmd, out, err): - LOG.error('%(fun)s: Failed with unexpected CLI output.\n ' - 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n', - {'fun': function, - 'cmd': cmd, - 'out': six.text_type(out), - 'err': six.text_type(err)}) - - def _manage_input_check(self, existing_ref): - """Verify the input of manage function.""" - # Check that the reference is valid - if 'source-name' in existing_ref: - manage_source = existing_ref['source-name'] - vdisk = self._get_vdisk_attributes(manage_source) - elif 'source-id' in existing_ref: - manage_source = existing_ref['source-id'] - vdisk = self._get_vdisk_attributes(manage_source) - else: - reason = _('Reference must contain source-id or ' - 'source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - if vdisk is None: - reason = (_('No vdisk with the ID specified by ref %s.') - % manage_source) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - return vdisk - - @utils.synchronized('flashsystem-map', external=True) - def _map_vdisk_to_host(self, vdisk_name, connector): - """Create a mapping between a vdisk to a host.""" - - LOG.debug( - 'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to ' - 'host %(host)s.', - {'vdisk_name': vdisk_name, 'host': connector}) - - # Check if a host object is defined for this host name - host_name = self._get_host_from_connector(connector) - if host_name is None: - # Host does not exist - add a new host to FlashSystem - host_name = self._create_host(connector) - # Verify that create_new_host succeeded - self._driver_assert( - host_name is not None, - (_('_create_host failed to return the host name.'))) - - (map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector) - - # Volume is not mapped to host, create a new LUN - if not map_flag: - ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name, - '-scsi', six.text_type(result_lun), vdisk_name] - out, err = self._ssh(ssh_cmd, check_exit_code=False) - if err and err.startswith('CMMVC6071E'): - if not self.configuration.flashsystem_multihostmap_enabled: - msg = _('flashsystem_multihostmap_enabled is set ' - 'to False, not allow multi host mapping. ' - 'CMMVC6071E The VDisk-to-host mapping ' - 'was not created because the VDisk is ' - 'already mapped to a host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for i in range(len(ssh_cmd)): - if ssh_cmd[i] == 'mkvdiskhostmap': - ssh_cmd.insert(i + 1, '-force') - - # try to map one volume to multiple hosts - out, err = self._ssh(ssh_cmd) - LOG.info('Volume %s is mapping to multiple hosts.', - vdisk_name) - self._assert_ssh_return( - 'successfully created' in out, - '_map_vdisk_to_host', ssh_cmd, out, err) - else: - self._assert_ssh_return( - 'successfully created' in out, - '_map_vdisk_to_host', ssh_cmd, out, err) - - LOG.debug( - ('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk ' - '%(vdisk_name)s, host %(host_name)s.'), - {'result_lun': result_lun, - 'vdisk_name': vdisk_name, 'host_name': host_name}) - - return int(result_lun) - - def _port_conf_generator(self, cmd): - ssh_cmd = cmd + ['-delim', '!'] - out, err = self._ssh(ssh_cmd) - - if not out.strip(): - return - port_lines = out.strip().split('\n') - if not port_lines: - return - - header = port_lines.pop(0) - yield header - for portip_line in port_lines: - try: - port_data = self._get_hdr_dic(header, portip_line, '!') - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - self._log_cli_output_error('_port_conf_generator', - ssh_cmd, out, err) - yield port_data - - def _remove_device(self, properties, device): - LOG.debug('enter: _remove_device') - - if not properties or not device: - LOG.warning('_remove_device: invalid properties or device.') - return - - use_multipath = self.configuration.use_multipath_for_image_xfer - device_scan_attempts = self.configuration.num_volume_device_scan_tries - protocol = properties['driver_volume_type'] - connector = utils.brick_get_connector(protocol, - use_multipath=use_multipath, - device_scan_attempts= - device_scan_attempts, - conn=properties) - - connector.disconnect_volume(properties['data'], device) - - LOG.debug('leave: _remove_device') - - def _rename_vdisk(self, vdisk_name, new_name): - """Rename vdisk""" - # Try to rename volume only if found on the storage - vdisk_defined = self._is_vdisk_defined(vdisk_name) - if not vdisk_defined: - LOG.warning('warning: Tried to rename vdisk %s but ' - 'it does not exist.', vdisk_name) - return - ssh_cmd = [ - 'svctask', 'chvdisk', '-name', new_name, vdisk_name] - out, err = self._ssh(ssh_cmd) - # No output should be returned from chvdisk - self._assert_ssh_return( - (not out.strip()), - '_rename_vdisk %(name)s' % {'name': vdisk_name}, - ssh_cmd, out, err) - - LOG.info('Renamed %(vdisk)s to %(newname)s .', - {'vdisk': vdisk_name, 'newname': new_name}) - - def _scan_device(self, properties): - LOG.debug('enter: _scan_device') - - use_multipath = self.configuration.use_multipath_for_image_xfer - device_scan_attempts = self.configuration.num_volume_device_scan_tries - protocol = properties['driver_volume_type'] - connector = utils.brick_get_connector(protocol, - use_multipath=use_multipath, - device_scan_attempts= - device_scan_attempts, - conn=properties) - device = connector.connect_volume(properties['data']) - host_device = device['path'] - - if not connector.check_valid_device(host_device): - msg = (_('Unable to access the backend storage ' - 'via the path %(path)s.') % {'path': host_device}) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('leave: _scan_device') - return device - - @utils.synchronized('flashsystem-unmap', external=True) - def _unmap_vdisk_from_host(self, vdisk_name, connector): - if 'host' in connector: - host_name = self._get_host_from_connector(connector) - self._driver_assert( - host_name is not None, - (_('_get_host_from_connector failed to return the host name ' - 'for connector.'))) - else: - host_name = None - - # Check if vdisk-host mapping exists, remove if it does. If no host - # name was given, but only one mapping exists, we can use that. - mapping_data = self._get_vdiskhost_mappings(vdisk_name) - if not mapping_data: - LOG.warning('_unmap_vdisk_from_host: No mapping of volume ' - '%(vol_name)s to any host found.', - {'vol_name': vdisk_name}) - return host_name - if host_name is None: - if len(mapping_data) > 1: - LOG.warning('_unmap_vdisk_from_host: Multiple mappings of ' - 'volume %(vdisk_name)s found, no host ' - 'specified.', - {'vdisk_name': vdisk_name}) - return - else: - host_name = list(mapping_data.keys())[0] - else: - if host_name not in mapping_data: - LOG.error('_unmap_vdisk_from_host: No mapping of volume ' - '%(vol_name)s to host %(host_name)s found.', - {'vol_name': vdisk_name, 'host_name': host_name}) - return host_name - - # We have a valid host_name now - ssh_cmd = ['svctask', 'rmvdiskhostmap', - '-host', host_name, vdisk_name] - out, err = self._ssh(ssh_cmd) - # Verify CLI behaviour - no output is returned from rmvdiskhostmap - self._assert_ssh_return( - (not out.strip()), - '_unmap_vdisk_from_host', ssh_cmd, out, err) - - # If this host has no more mappings, delete it - mapping_data = self._get_hostvdisk_mappings(host_name) - if not mapping_data: - self._delete_host(host_name) - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats.") - - data = { - 'vendor_name': 'IBM', - 'driver_version': self.VERSION, - 'storage_protocol': self._protocol, - 'total_capacity_gb': 0, - 'free_capacity_gb': 0, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'multiattach': self.configuration.flashsystem_multihostmap_enabled, - } - - pool = FLASHSYSTEM_VOLPOOL_NAME - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = '%s_%s' % (self._system_name, pool) - data['volume_backend_name'] = backend_name - - ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] - attributes = self._execute_command_and_parse_attributes(ssh_cmd) - if not attributes: - msg = _('_update_volume_stats: Could not get storage pool data.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - data['total_capacity_gb'] = ( - float(attributes['capacity']) / units.Gi) - data['free_capacity_gb'] = ( - float(attributes['free_capacity']) / units.Gi) - data['easytier_support'] = False # Do not support easy tier - data['location_info'] = ( - 'FlashSystemDriver:%(sys_id)s:%(pool)s' - % {'sys_id': self._system_id, 'pool': pool}) - - self._stats = data - - def _set_vdisk_copy_in_progress(self, vdisk_list): - LOG.debug( - '_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', - {'vdisk': six.text_type(vdisk_list), - 'vdisk_in_progress': - six.text_type(self._vdisk_copy_in_progress)}) - get_lock = True - self._vdisk_copy_lock.acquire() - for vdisk in vdisk_list: - if vdisk in self._vdisk_copy_in_progress: - get_lock = False - break - if get_lock: - self._vdisk_copy_in_progress.update(vdisk_list) - self._vdisk_copy_lock.release() - if get_lock: - LOG.debug( - '_set_vdisk_copy_in_progress: %s.', - six.text_type(self._vdisk_copy_in_progress)) - raise loopingcall.LoopingCallDone(retvalue=True) - - def _unset_vdisk_copy_in_progress(self, vdisk_list): - LOG.debug( - '_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', - {'vdisk': six.text_type(vdisk_list), - 'vdisk_in_progress': - six.text_type(self._vdisk_copy_in_progress)}) - self._vdisk_copy_lock.acquire() - for vdisk in vdisk_list: - if vdisk in self._vdisk_copy_in_progress: - self._vdisk_copy_in_progress.remove(vdisk) - self._vdisk_copy_lock.release() - - def _wait_vdisk_copy_completed(self, vdisk_name): - timer = loopingcall.FixedIntervalLoopingCall( - self._is_vdisk_copy_in_progress, vdisk_name) - timer.start(interval=self._check_lock_interval).wait() - - def check_for_setup_error(self): - """Ensure that the flags are set properly.""" - LOG.debug('enter: check_for_setup_error') - - # Check that we have the system ID information - if self._system_name is None: - msg = ( - _('check_for_setup_error: Unable to determine system name.')) - raise exception.VolumeBackendAPIException(data=msg) - if self._system_id is None: - msg = _('check_for_setup_error: Unable to determine system id.') - raise exception.VolumeBackendAPIException(data=msg) - - required_flags = ['san_ip', 'san_ssh_port', 'san_login'] - for flag in required_flags: - if not self.configuration.safe_get(flag): - msg = (_('%s is not set.') % flag) - raise exception.InvalidInput(reason=msg) - - # Ensure that either password or keyfile were set - if not (self.configuration.san_password or - self.configuration.san_private_key): - msg = _('check_for_setup_error: Password or SSH private key ' - 'is required for authentication: set either ' - 'san_password or san_private_key option.') - raise exception.InvalidInput(reason=msg) - - params = self._build_default_params() - self._check_vdisk_params(params) - - LOG.debug('leave: check_for_setup_error') - - def create_volume(self, volume): - """Create volume.""" - vdisk_name = volume['name'] - vdisk_params = self._get_vdisk_params(volume['volume_type_id']) - vdisk_size = six.text_type(volume['size']) - return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params) - - def delete_volume(self, volume): - """Delete volume.""" - vdisk_name = volume['name'] - self._wait_vdisk_copy_completed(vdisk_name) - self._delete_vdisk(vdisk_name, False) - - def extend_volume(self, volume, new_size): - """Extend volume.""" - LOG.debug('enter: extend_volume: volume %s.', volume['name']) - - vdisk_name = volume['name'] - self._wait_vdisk_copy_completed(vdisk_name) - - extend_amt = int(new_size) - volume['size'] - ssh_cmd = (['svctask', 'expandvdisksize', '-size', - six.text_type(extend_amt), '-unit', 'gb', vdisk_name]) - out, err = self._ssh(ssh_cmd) - # No output should be returned from expandvdisksize - self._assert_ssh_return( - (not out.strip()), - 'extend_volume', ssh_cmd, out, err) - - LOG.debug('leave: extend_volume: volume %s.', volume['name']) - - def create_snapshot(self, snapshot): - """Create snapshot from volume.""" - - LOG.debug( - 'enter: create_snapshot: create %(snap)s from %(vol)s.', - {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) - - status = snapshot['volume']['status'] - if status not in ['available', 'in-use']: - msg = (_( - 'create_snapshot: Volume status must be "available" or ' - '"in-use" for snapshot. The invalid status is %s.') % status) - raise exception.InvalidVolume(msg) - - self._create_and_copy_vdisk_data(snapshot['volume']['name'], - snapshot['volume']['id'], - snapshot['name'], - snapshot['id']) - - LOG.debug( - 'leave: create_snapshot: create %(snap)s from %(vol)s.', - {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) - - def delete_snapshot(self, snapshot): - """Delete snapshot.""" - - LOG.debug( - 'enter: delete_snapshot: delete %(snap)s.', - {'snap': snapshot['name']}) - - self._wait_vdisk_copy_completed(snapshot['name']) - - self._delete_vdisk(snapshot['name'], False) - - LOG.debug( - 'leave: delete_snapshot: delete %(snap)s.', - {'snap': snapshot['name']}) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot.""" - - LOG.debug( - 'enter: create_volume_from_snapshot: create %(vol)s from ' - '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) - - if volume['size'] != snapshot['volume_size']: - msg = _('create_volume_from_snapshot: Volume size is different ' - 'from snapshot based volume.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - status = snapshot['status'] - if status != 'available': - msg = (_('create_volume_from_snapshot: Snapshot status ' - 'must be "available" for creating volume. ' - 'The invalid status is: %s.') % status) - raise exception.InvalidSnapshot(msg) - - self._create_and_copy_vdisk_data(snapshot['name'], - snapshot['id'], - volume['name'], - volume['id']) - - LOG.debug( - 'leave: create_volume_from_snapshot: create %(vol)s from ' - '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) - - def create_cloned_volume(self, volume, src_volume): - """Create volume from a source volume.""" - - LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.', - {'src': src_volume['name'], 'vol': volume['name']}) - - if src_volume['size'] != volume['size']: - msg = _('create_cloned_volume: Source and destination ' - 'size differ.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - self._create_and_copy_vdisk_data(src_volume['name'], - src_volume['id'], - volume['name'], - volume['id']) - - LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.', - {'src': src_volume['name'], 'vol': volume['name']}) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If we haven't gotten stats yet or 'refresh' is True, - run update the stats first. - """ - if not self._stats or refresh: - self._update_volume_stats() - - return self._stats - - def manage_existing(self, volume, existing_ref): - """Manages an existing vdisk. - - Renames the vdisk to match the expected name for the volume. - """ - LOG.debug('enter: manage_existing: volume %(vol)s ref %(ref)s.', - {'vol': volume, 'ref': existing_ref}) - vdisk = self._manage_input_check(existing_ref) - new_name = 'volume-' + volume['id'] - self._rename_vdisk(vdisk['name'], new_name) - LOG.debug('leave: manage_existing: volume %(vol)s ref %(ref)s.', - {'vol': volume, 'ref': existing_ref}) - return - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing.""" - vdisk = self._manage_input_check(existing_ref) - if self._get_vdiskhost_mappings(vdisk['name']): - reason = _('The specified vdisk is mapped to a host.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - return int(vdisk['capacity']) / units.Gi - - def unmanage(self, volume): - """Removes the specified volume from Cinder management.""" - LOG.debug('unmanage: volume %(vol)s is no longer managed by cinder.', - {'vol': volume}) - pass diff --git a/cinder/volume/drivers/ibm/flashsystem_fc.py b/cinder/volume/drivers/ibm/flashsystem_fc.py deleted file mode 100644 index 9a242a1d4..000000000 --- a/cinder/volume/drivers/ibm/flashsystem_fc.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Volume driver for IBM FlashSystem storage systems with FC protocol. - -Limitations: -1. Cinder driver only works when open_access_enabled=off. - -""" - -import random -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.ibm import flashsystem_common as fscommon -from cinder.volume.drivers.san import san -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -flashsystem_fc_opts = [ - cfg.BoolOpt('flashsystem_multipath_enabled', - default=False, - help='This option no longer has any affect. It is deprecated ' - 'and will be removed in the next release.', - deprecated_for_removal=True) -] - -CONF = cfg.CONF -CONF.register_opts(flashsystem_fc_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class FlashSystemFCDriver(fscommon.FlashSystemDriver): - """IBM FlashSystem FC volume driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.0.1 - Code clean up - 1.0.2 - Add lock into vdisk map/unmap, connection - initialize/terminate - 1.0.3 - Initial driver for iSCSI - 1.0.4 - Split Flashsystem driver into common and FC - 1.0.5 - Report capability of volume multiattach - 1.0.6 - Fix bug #1469581, add I/T mapping check in - terminate_connection - 1.0.7 - Fix bug #1505477, add host name check in - _find_host_exhaustive for FC - 1.0.8 - Fix bug #1572743, multi-attach attribute - should not be hardcoded, only in iSCSI - 1.0.9 - Fix bug #1570574, Cleanup host resource - leaking, changes only in iSCSI - 1.0.10 - Fix bug #1585085, add host name check in - _find_host_exhaustive for iSCSI - 1.0.11 - Update driver to use ABC metaclasses - 1.0.12 - Update driver to support Manage/Unmanage - existing volume - """ - - VERSION = "1.0.12" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_STORAGE_CI" - - def __init__(self, *args, **kwargs): - super(FlashSystemFCDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(fscommon.flashsystem_opts) - self.configuration.append_config_values(flashsystem_fc_opts) - self.configuration.append_config_values(san.san_opts) - - def _check_vdisk_params(self, params): - # Check that the requested protocol is enabled - if params['protocol'] != self._protocol: - msg = (_("Illegal value '%(prot)s' specified for " - "flashsystem_connection_protocol: " - "valid value(s) are %(enabled)s.") - % {'prot': params['protocol'], - 'enabled': self._protocol}) - raise exception.InvalidInput(reason=msg) - - def _create_host(self, connector): - """Create a new host on the storage system. - - We create a host and associate it with the given connection - information. - - """ - - LOG.debug('enter: _create_host: host %s.', connector['host']) - - rand_id = six.text_type(random.randint(0, 99999999)).zfill(8) - host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), - rand_id) - - ports = [] - if 'FC' == self._protocol and 'wwpns' in connector: - for wwpn in connector['wwpns']: - ports.append('-hbawwpn %s' % wwpn) - - self._driver_assert(ports, - (_('_create_host: No connector ports.'))) - port1 = ports.pop(0) - arg_name, arg_val = port1.split() - ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', - '"%s"' % host_name] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return('successfully created' in out, - '_create_host', ssh_cmd, out, err) - - for port in ports: - arg_name, arg_val = port.split() - ssh_cmd = ['svctask', 'addhostport', '-force', - arg_name, arg_val, host_name] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return( - (not out.strip()), - '_create_host', ssh_cmd, out, err) - - LOG.debug( - 'leave: _create_host: host %(host)s - %(host_name)s.', - {'host': connector['host'], 'host_name': host_name}) - - return host_name - - def _find_host_exhaustive(self, connector, hosts): - hname = connector['host'] - hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts] - if hname in hnames: - host = hosts[hnames.index(hname)] - ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return( - out.strip(), - '_find_host_exhaustive', ssh_cmd, out, err) - attr_lines = [attr_line for attr_line in out.split('\n')] - attr_parm = {} - for attr_line in attr_lines: - attr_name, foo, attr_val = attr_line.partition('!') - attr_parm[attr_name] = attr_val - if ('WWPN' in attr_parm.keys() and 'wwpns' in connector and - attr_parm['WWPN'].lower() in - map(str.lower, map(str, connector['wwpns']))): - return host - else: - LOG.warning('Host %(host)s was not found on backend storage.', - {'host': hname}) - return None - - def _get_conn_fc_wwpns(self): - wwpns = [] - - cmd = ['svcinfo', 'lsportfc'] - - generator = self._port_conf_generator(cmd) - header = next(generator, None) - if not header: - return wwpns - - for port_data in generator: - try: - if port_data['status'] == 'active': - wwpns.append(port_data['WWPN']) - except KeyError: - self._handle_keyerror('lsportfc', header) - - return wwpns - - def _get_fc_wwpns(self): - for key in self._storage_nodes: - node = self._storage_nodes[key] - ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']] - attributes = self._execute_command_and_parse_attributes(ssh_cmd) - wwpns = set(node['WWPN']) - for i, s in zip(attributes['port_id'], attributes['port_status']): - if 'unconfigured' != s: - wwpns.add(i) - node['WWPN'] = list(wwpns) - LOG.info('WWPN on node %(node)s: %(wwpn)s.', - {'node': node['id'], 'wwpn': node['WWPN']}) - - def _get_vdisk_map_properties( - self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): - """Get the map properties of vdisk.""" - - LOG.debug( - 'enter: _get_vdisk_map_properties: vdisk ' - '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) - - IO_group = '0' - - io_group_nodes = [] - for k, node in self._storage_nodes.items(): - if vdisk_params['protocol'] != node['protocol']: - continue - if node['IO_group'] == IO_group: - io_group_nodes.append(node) - - if not io_group_nodes: - msg = (_('_get_vdisk_map_properties: No node found in ' - 'I/O group %(gid)s for volume %(vol)s.') - % {'gid': IO_group, 'vol': vdisk_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - properties = {} - properties['target_discovered'] = False - properties['target_lun'] = lun_id - properties['volume_id'] = vdisk_id - - type_str = 'fibre_channel' - conn_wwpns = self._get_conn_fc_wwpns() - - if not conn_wwpns: - msg = _('_get_vdisk_map_properties: Could not get FC ' - 'connection information for the host-volume ' - 'connection. Is the host configured properly ' - 'for FC connections?') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - properties['target_wwn'] = conn_wwpns - - if "zvm_fcp" in connector: - properties['zvm_fcp'] = connector['zvm_fcp'] - - properties['initiator_target_map'] = self._build_initiator_target_map( - connector['wwpns'], conn_wwpns) - - LOG.debug( - 'leave: _get_vdisk_map_properties: vdisk ' - '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) - - return {'driver_volume_type': type_str, 'data': properties} - - @fczm_utils.add_fc_zone - @utils.synchronized('flashsystem-init-conn', external=True) - def initialize_connection(self, volume, connector): - """Perform work so that an FC connection can be made. - - To be able to create a FC connection from a given host to a - volume, we must: - 1. Translate the given WWNN to a host name - 2. Create new host on the storage system if it does not yet exist - 3. Map the volume to the host if it is not already done - 4. Return the connection information for relevant nodes (in the - proper I/O group) - - """ - - LOG.debug( - 'enter: initialize_connection: volume %(vol)s with ' - 'connector %(conn)s.', {'vol': volume, 'conn': connector}) - - vdisk_name = volume['name'] - vdisk_id = volume['id'] - vdisk_params = self._get_vdisk_params(volume['volume_type_id']) - - # TODO(edwin): might fix it after vdisk copy function is - # ready in FlashSystem thin-provision layer. As this validation - # is to check the vdisk which is in copying, at present in firmware - # level vdisk doesn't allow to map host which it is copy. New - # vdisk clone and snapshot function will cover it. After that the - # _wait_vdisk_copy_completed need some modification. - self._wait_vdisk_copy_completed(vdisk_name) - - self._driver_assert( - self._is_vdisk_defined(vdisk_name), - (_('initialize_connection: vdisk %s is not defined.') - % vdisk_name)) - - lun_id = self._map_vdisk_to_host(vdisk_name, connector) - - properties = {} - try: - properties = self._get_vdisk_map_properties( - connector, lun_id, vdisk_name, vdisk_id, vdisk_params) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - self.terminate_connection(volume, connector) - LOG.error('initialize_connection: Failed to collect ' - 'return properties for volume %(vol)s and ' - 'connector %(conn)s.', - {'vol': volume, 'conn': connector}) - - LOG.debug( - 'leave: initialize_connection:\n volume: %(vol)s\n connector ' - '%(conn)s\n properties: %(prop)s.', - {'vol': volume, - 'conn': connector, - 'prop': properties}) - - return properties - - @fczm_utils.remove_fc_zone - @utils.synchronized('flashsystem-term-conn', external=True) - def terminate_connection(self, volume, connector, **kwargs): - """Cleanup after connection has been terminated. - - When we clean up a terminated connection between a given connector - and volume, we: - 1. Translate the given connector to a host name - 2. Remove the volume-to-host mapping if it exists - 3. Delete the host if it has no more mappings (hosts are created - automatically by this driver when mappings are created) - """ - LOG.debug( - 'enter: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s.', - {'vol': volume, 'conn': connector}) - - return_data = { - 'driver_volume_type': 'fibre_channel', - 'data': {}, - } - - vdisk_name = volume['name'] - self._wait_vdisk_copy_completed(vdisk_name) - self._unmap_vdisk_from_host(vdisk_name, connector) - - host_name = self._get_host_from_connector(connector) - if not host_name: - properties = {} - conn_wwpns = self._get_conn_fc_wwpns() - properties['target_wwn'] = conn_wwpns - properties['initiator_target_map'] = ( - self._build_initiator_target_map( - connector['wwpns'], conn_wwpns)) - return_data['data'] = properties - - LOG.debug( - 'leave: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s.', {'vol': volume, 'conn': connector}) - - return return_data - - def do_setup(self, ctxt): - """Check that we have all configuration details from the storage.""" - - self._context = ctxt - - # Get data of configured node - self._get_node_data() - - # Get the WWPNs of the FlashSystem nodes - self._get_fc_wwpns() - - # For each node, check what connection modes it supports. Delete any - # nodes that do not support any types (may be partially configured). - to_delete = [] - for k, node in self._storage_nodes.items(): - if not node['WWPN']: - to_delete.append(k) - - for delkey in to_delete: - del self._storage_nodes[delkey] - - # Make sure we have at least one node configured - self._driver_assert(self._storage_nodes, - 'do_setup: No configured nodes.') - - self._protocol = node['protocol'] = 'FC' - - # Set for vdisk synchronization - self._vdisk_copy_in_progress = set() - self._vdisk_copy_lock = threading.Lock() - self._check_lock_interval = 5 - - def validate_connector(self, connector): - """Check connector.""" - if 'FC' == self._protocol and 'wwpns' not in connector: - LOG.error('The connector does not contain the ' - 'required information: wwpns is missing') - raise exception.InvalidConnectorException(missing='wwpns') diff --git a/cinder/volume/drivers/ibm/flashsystem_iscsi.py b/cinder/volume/drivers/ibm/flashsystem_iscsi.py deleted file mode 100644 index 60e440f60..000000000 --- a/cinder/volume/drivers/ibm/flashsystem_iscsi.py +++ /dev/null @@ -1,415 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Volume driver for IBM FlashSystem storage systems with iSCSI protocol. - -Limitations: -1. Cinder driver only works when open_access_enabled=off. - -""" - -import random -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration as conf -from cinder.volume.drivers.ibm import flashsystem_common as fscommon -from cinder.volume.drivers.san import san - -LOG = logging.getLogger(__name__) - -flashsystem_iscsi_opts = [ - cfg.IntOpt('flashsystem_iscsi_portid', - default=0, - help='Default iSCSI Port ID of FlashSystem. ' - '(Default port is 0.)') -] - -CONF = cfg.CONF -CONF.register_opts(flashsystem_iscsi_opts, group=conf.SHARED_CONF_GROUP) - - -@interface.volumedriver -class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): - """IBM FlashSystem iSCSI volume driver. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver - 1.0.1 - Code clean up - 1.0.2 - Add lock into vdisk map/unmap, connection - initialize/terminate - 1.0.3 - Initial driver for iSCSI - 1.0.4 - Split Flashsystem driver into common and FC - 1.0.5 - Report capability of volume multiattach - 1.0.6 - Fix bug #1469581, add I/T mapping check in - terminate_connection - 1.0.7 - Fix bug #1505477, add host name check in - _find_host_exhaustive for FC - 1.0.8 - Fix bug #1572743, multi-attach attribute - should not be hardcoded, only in iSCSI - 1.0.9 - Fix bug #1570574, Cleanup host resource - leaking, changes only in iSCSI - 1.0.10 - Fix bug #1585085, add host name check in - _find_host_exhaustive for iSCSI - 1.0.11 - Update driver to use ABC metaclasses - 1.0.12 - Update driver to support Manage/Unmanage - existing volume - """ - - VERSION = "1.0.12" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_STORAGE_CI" - - def __init__(self, *args, **kwargs): - super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(fscommon.flashsystem_opts) - self.configuration.append_config_values(flashsystem_iscsi_opts) - self.configuration.append_config_values(san.san_opts) - - def _check_vdisk_params(self, params): - # Check that the requested protocol is enabled - if not params['protocol'] in self._protocol: - msg = (_("'%(prot)s' is invalid for " - "flashsystem_connection_protocol " - "in config file. valid value(s) are " - "%(enabled)s.") - % {'prot': params['protocol'], - 'enabled': self._protocol}) - raise exception.InvalidInput(reason=msg) - - # Check if iscsi_ip is set when protocol is iSCSI - if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None': - msg = _("iscsi_ip_address must be set in config file when " - "using protocol 'iSCSI'.") - raise exception.InvalidInput(reason=msg) - - def _create_host(self, connector): - """Create a new host on the storage system. - - We create a host and associate it with the given connection - information. - """ - - LOG.debug('enter: _create_host: host %s.', connector['host']) - - rand_id = six.text_type(random.randint(0, 99999999)).zfill(8) - host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), - rand_id) - - ports = [] - - if 'iSCSI' == self._protocol and 'initiator' in connector: - ports.append('-iscsiname %s' % connector['initiator']) - - self._driver_assert(ports, - (_('_create_host: No connector ports.'))) - port1 = ports.pop(0) - arg_name, arg_val = port1.split() - ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', - '"%s"' % host_name] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return('successfully created' in out, - '_create_host', ssh_cmd, out, err) - - for port in ports: - arg_name, arg_val = port.split() - ssh_cmd = ['svctask', 'addhostport', '-force', - arg_name, arg_val, host_name] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return( - (not out.strip()), - '_create_host', ssh_cmd, out, err) - - LOG.debug( - 'leave: _create_host: host %(host)s - %(host_name)s.', - {'host': connector['host'], 'host_name': host_name}) - - return host_name - - def _find_host_exhaustive(self, connector, hosts): - LOG.debug('enter: _find_host_exhaustive hosts: %s.', hosts) - hname = connector['host'] - hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts] - if hname in hnames: - host = hosts[hnames.index(hname)] - ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] - out, err = self._ssh(ssh_cmd) - self._assert_ssh_return( - out.strip(), - '_find_host_exhaustive', ssh_cmd, out, err) - for attr_line in out.split('\n'): - attr_name, foo, attr_val = attr_line.partition('!') - if (attr_name == 'iscsi_name' and - 'initiator' in connector and - attr_val == connector['initiator']): - LOG.debug( - 'leave: _find_host_exhaustive connector: %s.', - connector) - return host - else: - LOG.warning('Host %(host)s was not found on backend storage.', - {'host': hname}) - return None - - def _get_vdisk_map_properties( - self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): - """Get the map properties of vdisk.""" - - LOG.debug( - 'enter: _get_vdisk_map_properties: vdisk ' - '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) - - preferred_node = '0' - IO_group = '0' - - # Get preferred node and other nodes in I/O group - preferred_node_entry = None - io_group_nodes = [] - for k, node in self._storage_nodes.items(): - if vdisk_params['protocol'] != node['protocol']: - continue - if node['id'] == preferred_node: - preferred_node_entry = node - if node['IO_group'] == IO_group: - io_group_nodes.append(node) - - if not io_group_nodes: - msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.') - % {'gid': IO_group, 'vol': vdisk_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not preferred_node_entry: - # Get 1st node in I/O group - preferred_node_entry = io_group_nodes[0] - LOG.warning('_get_vdisk_map_properties: Did not find a ' - 'preferred node for vdisk %s.', vdisk_name) - properties = { - 'target_discovered': False, - 'target_lun': lun_id, - 'volume_id': vdisk_id, - } - - type_str = 'iscsi' - if preferred_node_entry['ipv4']: - ipaddr = preferred_node_entry['ipv4'][0] - else: - ipaddr = preferred_node_entry['ipv6'][0] - iscsi_port = self.configuration.iscsi_port - properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port) - properties['target_iqn'] = preferred_node_entry['iscsi_name'] - - LOG.debug( - 'leave: _get_vdisk_map_properties: vdisk ' - '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) - - return {'driver_volume_type': type_str, 'data': properties} - - @utils.synchronized('flashsystem-init-conn', external=True) - def initialize_connection(self, volume, connector): - """Perform work so that an iSCSI connection can be made. - - To be able to create an iSCSI connection from a given host to a - volume, we must: - 1. Translate the given iSCSI name to a host name - 2. Create new host on the storage system if it does not yet exist - 3. Map the volume to the host if it is not already done - 4. Return the connection information for relevant nodes (in the - proper I/O group) - - """ - - LOG.debug( - 'enter: initialize_connection: volume %(vol)s with ' - 'connector %(conn)s.', {'vol': volume, 'conn': connector}) - - vdisk_name = volume['name'] - vdisk_id = volume['id'] - vdisk_params = self._get_vdisk_params(volume['volume_type_id']) - - self._wait_vdisk_copy_completed(vdisk_name) - - self._driver_assert( - self._is_vdisk_defined(vdisk_name), - (_('vdisk %s is not defined.') - % vdisk_name)) - - lun_id = self._map_vdisk_to_host(vdisk_name, connector) - - properties = {} - try: - properties = self._get_vdisk_map_properties( - connector, lun_id, vdisk_name, vdisk_id, vdisk_params) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - self.terminate_connection(volume, connector) - LOG.error('Failed to collect return properties for ' - 'volume %(vol)s and connector %(conn)s.', - {'vol': volume, 'conn': connector}) - - LOG.debug( - 'leave: initialize_connection:\n volume: %(vol)s\n connector ' - '%(conn)s\n properties: %(prop)s.', - {'vol': volume, - 'conn': connector, - 'prop': properties}) - - return properties - - @utils.synchronized('flashsystem-term-conn', external=True) - def terminate_connection(self, volume, connector, **kwargs): - """Cleanup after connection has been terminated. - - When we clean up a terminated connection between a given connector - and volume, we: - 1. Translate the given connector to a host name - 2. Remove the volume-to-host mapping if it exists - 3. Delete the host if it has no more mappings (hosts are created - automatically by this driver when mappings are created) - """ - LOG.debug( - 'enter: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s.', - {'vol': volume, 'conn': connector}) - - vdisk_name = volume['name'] - self._wait_vdisk_copy_completed(vdisk_name) - host_name = self._unmap_vdisk_from_host(vdisk_name, connector) - # checking if host_name none, if not then, check if the host has - # any mappings, if not the host gets deleted. - if host_name: - if not self._get_hostvdisk_mappings(host_name): - self._delete_host(host_name) - - LOG.debug( - 'leave: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s.', {'vol': volume, 'conn': connector}) - - return {'driver_volume_type': 'iscsi'} - - def _get_iscsi_ip_addrs(self): - """get ip address of iSCSI interface.""" - - LOG.debug('enter: _get_iscsi_ip_addrs') - - cmd = ['svcinfo', 'lsportip'] - generator = self._port_conf_generator(cmd) - header = next(generator, None) - if not header: - return - - for key in self._storage_nodes: - if self._storage_nodes[key]['config_node'] == 'yes': - node = self._storage_nodes[key] - break - - if node is None: - msg = _('No config node found.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - for port_data in generator: - try: - port_ipv4 = port_data['IP_address'] - port_ipv6 = port_data['IP_address_6'] - state = port_data['state'] - speed = port_data['speed'] - except KeyError: - self._handle_keyerror('lsportip', header) - if port_ipv4 == self.configuration.iscsi_ip_address and ( - port_data['id'] == ( - six.text_type( - self.configuration.flashsystem_iscsi_portid))): - if state not in ('configured', 'online'): - msg = (_('State of node is wrong. Current state is %s.') - % state) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if state in ('configured', 'online') and speed != 'NONE': - if port_ipv4: - node['ipv4'].append(port_ipv4) - if port_ipv6: - node['ipv6'].append(port_ipv6) - break - if not (len(node['ipv4']) or len(node['ipv6'])): - msg = _('No ip address found.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('leave: _get_iscsi_ip_addrs') - - def do_setup(self, ctxt): - """Check that we have all configuration details from the storage.""" - - LOG.debug('enter: do_setup') - - self._context = ctxt - - # Get data of configured node - self._get_node_data() - - # Get the iSCSI IP addresses of the FlashSystem nodes - self._get_iscsi_ip_addrs() - - for k, node in self._storage_nodes.items(): - if self.configuration.flashsystem_connection_protocol == 'iSCSI': - if (len(node['ipv4']) or len(node['ipv6']) and - len(node['iscsi_name'])): - node['protocol'] = 'iSCSI' - - self._protocol = 'iSCSI' - - # Set for vdisk synchronization - self._vdisk_copy_in_progress = set() - self._vdisk_copy_lock = threading.Lock() - self._check_lock_interval = 5 - - LOG.debug('leave: do_setup') - - def _build_default_params(self): - protocol = self.configuration.flashsystem_connection_protocol - if protocol.lower() == 'iscsi': - protocol = 'iSCSI' - return { - 'protocol': protocol, - 'iscsi_ip': self.configuration.iscsi_ip_address, - 'iscsi_port': self.configuration.iscsi_port, - 'iscsi_ported': self.configuration.flashsystem_iscsi_portid, - } - - def validate_connector(self, connector): - """Check connector for enabled protocol.""" - valid = False - if 'iSCSI' == self._protocol and 'initiator' in connector: - valid = True - if not valid: - LOG.error('The connector does not contain the ' - 'required information: initiator is missing') - raise exception.InvalidConnectorException(missing=( - 'initiator')) diff --git a/cinder/volume/drivers/ibm/gpfs.py b/cinder/volume/drivers/ibm/gpfs.py deleted file mode 100644 index 80d8968a8..000000000 --- a/cinder/volume/drivers/ibm/gpfs.py +++ /dev/null @@ -1,1646 +0,0 @@ -# Copyright IBM Corp. 2013 All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -GPFS Volume Driver. - -""" -import math -import os -import re -import shutil - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import paramiko -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers import nfs -from cinder.volume.drivers import remotefs -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils - -GPFS_CLONE_MIN_RELEASE = 1200 -GPFS_ENC_MIN_RELEASE = 1404 -MIGRATION_ALLOWED_DEST_TYPE = ['GPFSDriver', 'GPFSNFSDriver'] - -LOG = logging.getLogger(__name__) - - -gpfs_opts = [ - cfg.StrOpt('gpfs_mount_point_base', - help='Specifies the path of the GPFS directory where Block ' - 'Storage volume and snapshot files are stored.'), - cfg.StrOpt('gpfs_images_dir', - help='Specifies the path of the Image service repository in ' - 'GPFS. Leave undefined if not storing images in GPFS.'), - cfg.StrOpt('gpfs_images_share_mode', - choices=['copy', 'copy_on_write', None], - help='Specifies the type of image copy to be used. Set this ' - 'when the Image service repository also uses GPFS so ' - 'that image files can be transferred efficiently from ' - 'the Image service to the Block Storage service. There ' - 'are two valid values: "copy" specifies that a full copy ' - 'of the image is made; "copy_on_write" specifies that ' - 'copy-on-write optimization strategy is used and ' - 'unmodified blocks of the image file are shared ' - 'efficiently.'), - cfg.IntOpt('gpfs_max_clone_depth', - default=0, - help='Specifies an upper limit on the number of indirections ' - 'required to reach a specific block due to snapshots or ' - 'clones. A lengthy chain of copy-on-write snapshots or ' - 'clones can have a negative impact on performance, but ' - 'improves space utilization. 0 indicates unlimited ' - 'clone depth.'), - cfg.BoolOpt('gpfs_sparse_volumes', - default=True, - help=('Specifies that volumes are created as sparse files ' - 'which initially consume no space. If set to False, the ' - 'volume is created as a fully allocated file, in which ' - 'case, creation may take a significantly longer time.')), - cfg.StrOpt('gpfs_storage_pool', - default='system', - help=('Specifies the storage pool that volumes are assigned ' - 'to. By default, the system storage pool is used.')), -] - -gpfs_remote_ssh_opts = [ - cfg.ListOpt('gpfs_hosts', - default=[], - help='Comma-separated list of IP address or ' - 'hostnames of GPFS nodes.'), - cfg.StrOpt('gpfs_user_login', - default='root', - help='Username for GPFS nodes.'), - cfg.StrOpt('gpfs_user_password', - default='', - help='Password for GPFS node user.', - secret=True), - cfg.StrOpt('gpfs_private_key', - default='', - help='Filename of private key to use for SSH authentication.'), - cfg.PortOpt('gpfs_ssh_port', - default=22, - help='SSH port to use.'), - cfg.StrOpt('gpfs_hosts_key_file', - default='$state_path/ssh_known_hosts', - help='File containing SSH host keys for the gpfs nodes ' - 'with which driver needs to communicate. ' - 'Default=$state_path/ssh_known_hosts'), - cfg.BoolOpt('gpfs_strict_host_key_policy', - default=False, - help='Option to enable strict gpfs host key checking while ' - 'connecting to gpfs nodes. Default=False'), -] - -CONF = cfg.CONF -CONF.register_opts(gpfs_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(gpfs_remote_ssh_opts, group=configuration.SHARED_CONF_GROUP) - - -def _different(difference_tuple): - """Return true if two elements of a tuple are different.""" - if difference_tuple: - member1, member2 = difference_tuple - return member1 != member2 - else: - return False - - -def _same_filesystem(path1, path2): - """Return true if the two paths are in the same GPFS file system.""" - return os.lstat(path1).st_dev == os.lstat(path2).st_dev - - -def _sizestr(size_in_g): - """Convert the specified size into a string value.""" - return '%sG' % size_in_g - - -@interface.volumedriver -class GPFSDriver(driver.CloneableImageVD, - driver.MigrateVD, - driver.BaseVD): - """Implements volume functions using GPFS primitives. - - Version history: - 1.0.0 - Initial driver - 1.1.0 - Add volume retype, refactor volume migration - 1.2.0 - Add consistency group support - 1.3.0 - Add NFS based GPFS storage backend support - 1.3.1 - Add GPFS native encryption (encryption of data at rest) support - """ - - VERSION = "1.3.1" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_GPFS_CI" - - def __init__(self, *args, **kwargs): - super(GPFSDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(gpfs_opts) - self.gpfs_execute = self._gpfs_local_execute - self._execute = utils.execute - self.GPFS_PATH = '' - - def _gpfs_local_execute(self, *cmd, **kwargs): - if 'run_as_root' not in kwargs: - kwargs.update({'run_as_root': True}) - - return utils.execute(*cmd, **kwargs) - - def _get_gpfs_state(self): - """Return GPFS state information.""" - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmgetstate', '-Y') - return out - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmgetstate command, error: %s.', - exc.stderr) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - def _check_gpfs_state(self): - """Raise VolumeBackendAPIException if GPFS is not active.""" - out = self._get_gpfs_state() - lines = out.splitlines() - state_token = lines[0].split(':').index('state') - gpfs_state = lines[1].split(':')[state_token] - if gpfs_state != 'active': - LOG.error('GPFS is not active. Detailed output: %s.', out) - raise exception.VolumeBackendAPIException( - data=_('GPFS is not running, state: %s.') % gpfs_state) - - def _get_filesystem_from_path(self, path): - """Return filesystem for specified path.""" - try: - (out, err) = self.gpfs_execute('df', path) - lines = out.splitlines() - filesystem = lines[1].split()[0] - return filesystem - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue df command for path %(path)s, ' - 'error: %(error)s.', - {'path': path, - 'error': exc.stderr}) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - def _get_gpfs_cluster_id(self): - """Return the id for GPFS cluster being used.""" - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsconfig', - 'clusterId', '-Y') - lines = out.splitlines() - value_token = lines[0].split(':').index('value') - cluster_id = lines[1].split(':')[value_token] - return cluster_id - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsconfig command, error: %s.', - exc.stderr) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - def _get_fileset_from_path(self, path): - """Return the GPFS fileset for specified path.""" - fs_regex = re.compile(r'.*fileset.name:\s+(?P\w+)', re.S) - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsattr', '-L', - path) - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsattr command on path %(path)s, ' - 'error: %(error)s', - {'path': path, - 'error': exc.stderr}) - raise exception.VolumeBackendAPIException(data=exc.stderr) - try: - fileset = fs_regex.match(out).group('fileset') - return fileset - except AttributeError as exc: - msg = (_('Failed to find fileset for path %(path)s, command ' - 'output: %(cmdout)s.') % - {'path': path, - 'cmdout': out}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _verify_gpfs_pool(self, storage_pool): - """Return true if the specified pool is a valid GPFS storage pool.""" - try: - self.gpfs_execute(self.GPFS_PATH + 'mmlspool', self._gpfs_device, - storage_pool) - return True - except processutils.ProcessExecutionError: - return False - - def _update_volume_storage_pool(self, local_path, new_pool): - """Set the storage pool for a volume to the specified value.""" - if new_pool is None: - new_pool = 'system' - - if not self._verify_gpfs_pool(new_pool): - msg = (_('Invalid storage pool %s requested. Retype failed.') % - new_pool) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - self.gpfs_execute(self.GPFS_PATH + 'mmchattr', '-P', new_pool, - local_path) - LOG.debug('Updated storage pool with mmchattr to %s.', new_pool) - return True - except processutils.ProcessExecutionError as exc: - LOG.info('Could not update storage pool with mmchattr to ' - '%(pool)s, error: %(error)s', - {'pool': new_pool, - 'error': exc.stderr}) - return False - - def _get_gpfs_fs_release_level(self, path): - """Return the GPFS version of the specified file system. - - The file system is specified by any valid path it contains. - """ - filesystem = self._get_filesystem_from_path(path) - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsfs', - filesystem, '-V', '-Y') - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsfs command for path %(path)s, ' - 'error: %(error)s.', - {'path': path, - 'error': exc.stderr}) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - lines = out.splitlines() - value_token = lines[0].split(':').index('data') - fs_release_level_str = lines[1].split(':')[value_token] - # at this point, release string looks like "13.23 (3.5.0.7)" - # extract first token and convert to whole number value - fs_release_level = int(float(fs_release_level_str.split()[0]) * 100) - return filesystem, fs_release_level - - def _get_gpfs_cluster_release_level(self): - """Return the GPFS version of current cluster.""" - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsconfig', - 'minreleaseLeveldaemon', - '-Y') - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsconfig command, error: %s.', - exc.stderr) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - lines = out.splitlines() - value_token = lines[0].split(':').index('value') - min_release_level = lines[1].split(':')[value_token] - return int(min_release_level) - - def _is_gpfs_path(self, directory): - """Determine if the specified path is in a gpfs file system. - - If not part of a gpfs file system, raise ProcessExecutionError. - """ - try: - self.gpfs_execute(self.GPFS_PATH + 'mmlsattr', directory) - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsattr command ' - 'for path %(path)s, ' - 'error: %(error)s.', - {'path': directory, - 'error': exc.stderr}) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - def _is_same_fileset(self, path1, path2): - """Return true if the two paths are in the same GPFS fileset.""" - if self._get_fileset_from_path(path1) == \ - self._get_fileset_from_path(path2): - return True - return False - - def _same_cluster(self, host): - """Return true if the host is a member of the same GPFS cluster.""" - dest_location = host['capabilities'].get('location_info') - if self._stats['location_info'] == dest_location: - return True - return False - - def _set_rw_permission(self, path, modebits='660'): - """Set permission bits for the path.""" - self.gpfs_execute('chmod', modebits, path) - - def _can_migrate_locally(self, host): - """Return true if the host can migrate a volume locally.""" - if 'location_info' not in host['capabilities']: - LOG.debug('Evaluate migration: no location info, ' - 'cannot migrate locally.') - return None - info = host['capabilities']['location_info'] - try: - (dest_type, dest_id, dest_path) = info.split(':') - except ValueError: - LOG.debug('Evaluate migration: unexpected location info, ' - 'cannot migrate locally: %s.', info) - return None - if (dest_id != self._cluster_id or - dest_type not in MIGRATION_ALLOWED_DEST_TYPE): - LOG.debug('Evaluate migration: different destination driver or ' - 'cluster id in location info: %s.', info) - return None - - LOG.debug('Evaluate migration: use local migration.') - return dest_path - - def do_setup(self, ctxt): - """Determine storage back end capabilities.""" - try: - self._cluster_id = self._get_gpfs_cluster_id() - except Exception as setup_exception: - msg = (_('Could not find GPFS cluster id: %s.') % - setup_exception) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - try: - gpfs_base = self.configuration.gpfs_mount_point_base - self._gpfs_device = self._get_filesystem_from_path(gpfs_base) - except Exception as setup_exception: - msg = (_('Could not find GPFS file system device: %s.') % - setup_exception) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - pool = self.configuration.safe_get('gpfs_storage_pool') - self._storage_pool = pool - if not self._verify_gpfs_pool(self._storage_pool): - msg = (_('Invalid storage pool %s specificed.') % - self._storage_pool) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() - if _gpfs_cluster_release_level >= GPFS_ENC_MIN_RELEASE: - self._encryption_state = self._get_gpfs_encryption_status() - else: - LOG.info('Downlevel GPFS Cluster Detected. GPFS ' - 'encryption-at-rest feature not enabled in cluster ' - 'daemon level %(cur)s - must be at least at ' - 'level %(min)s.', - {'cur': _gpfs_cluster_release_level, - 'min': GPFS_ENC_MIN_RELEASE}) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - self._check_gpfs_state() - - if self.configuration.gpfs_mount_point_base is None: - msg = _('Option gpfs_mount_point_base is not set correctly.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if(self.configuration.gpfs_images_share_mode and - self.configuration.gpfs_images_dir is None): - msg = _('Option gpfs_images_dir is not set correctly.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and - not _same_filesystem(self.configuration.gpfs_mount_point_base, - self.configuration.gpfs_images_dir)): - msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' - '%(vol)s and %(img)s belong to different file ' - 'systems.') % - {'vol': self.configuration.gpfs_mount_point_base, - 'img': self.configuration.gpfs_images_dir}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and - not self._is_same_fileset(self.configuration.gpfs_mount_point_base, - self.configuration.gpfs_images_dir)): - msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' - '%(vol)s and %(img)s belong to different filesets.') % - {'vol': self.configuration.gpfs_mount_point_base, - 'img': self.configuration.gpfs_images_dir}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() - if not _gpfs_cluster_release_level >= GPFS_CLONE_MIN_RELEASE: - msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature ' - 'not enabled in cluster daemon level %(cur)s - must ' - 'be at least at level %(min)s.') % - {'cur': _gpfs_cluster_release_level, - 'min': GPFS_CLONE_MIN_RELEASE}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for directory in [self.configuration.gpfs_mount_point_base, - self.configuration.gpfs_images_dir]: - if directory is None: - continue - - if not directory.startswith('/'): - msg = (_('%s must be an absolute path.') % directory) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not os.path.isdir(directory): - msg = (_('%s is not a directory.') % directory) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Check if GPFS is mounted - self._verify_gpfs_path_state(directory) - - filesystem, fslevel = \ - self._get_gpfs_fs_release_level(directory) - if not fslevel >= GPFS_CLONE_MIN_RELEASE: - msg = (_('The GPFS filesystem %(fs)s is not at the required ' - 'release level. Current level is %(cur)s, must be ' - 'at least %(min)s.') % - {'fs': filesystem, - 'cur': fslevel, - 'min': GPFS_CLONE_MIN_RELEASE}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _create_sparse_file(self, path, size): - """Creates file with 0 disk usage.""" - - sizestr = _sizestr(size) - self.gpfs_execute('truncate', '-s', sizestr, path) - - def _allocate_file_blocks(self, path, size): - """Preallocate file blocks by writing zeros.""" - - block_size_mb = 1 - block_count = size * units.Gi / (block_size_mb * units.Mi) - - self.gpfs_execute('dd', 'if=/dev/zero', 'of=%s' % path, - 'bs=%dM' % block_size_mb, - 'count=%d' % block_count) - - def _gpfs_change_attributes(self, options, path): - """Update GPFS attributes on the specified file.""" - - cmd = [self.GPFS_PATH + 'mmchattr'] - cmd.extend(options) - cmd.append(path) - LOG.debug('Update volume attributes with mmchattr to %s.', options) - self.gpfs_execute(*cmd) - - def _set_volume_attributes(self, volume, path, metadata): - """Set various GPFS attributes for this volume.""" - - set_pool = False - options = [] - for item in metadata: - if item == 'data_pool_name': - options.extend(['-P', metadata[item]]) - set_pool = True - elif item == 'replicas': - options.extend(['-r', metadata[item], '-m', metadata[item]]) - elif item == 'dio': - options.extend(['-D', metadata[item]]) - elif item == 'write_affinity_depth': - options.extend(['--write-affinity-depth', metadata[item]]) - elif item == 'block_group_factor': - options.extend(['--block-group-factor', metadata[item]]) - elif item == 'write_affinity_failure_group': - options.extend(['--write-affinity-failure-group', - metadata[item]]) - - # metadata value has precedence over value set in volume type - if self.configuration.gpfs_storage_pool and not set_pool: - options.extend(['-P', self.configuration.gpfs_storage_pool]) - - if options: - self._gpfs_change_attributes(options, path) - - fstype = None - fslabel = None - for item in metadata: - if item == 'fstype': - fstype = metadata[item] - elif item == 'fslabel': - fslabel = metadata[item] - if fstype: - self._mkfs(volume, fstype, fslabel) - - def create_volume(self, volume): - """Creates a GPFS volume.""" - # Check if GPFS is mounted - self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) - - volume_path = self._get_volume_path(volume) - volume_size = volume['size'] - - # Create a sparse file first; allocate blocks later if requested - self._create_sparse_file(volume_path, volume_size) - self._set_rw_permission(volume_path) - # Set the attributes prior to allocating any blocks so that - # they are allocated according to the policy - self._set_volume_attributes(volume, volume_path, volume.metadata) - - if not self.configuration.gpfs_sparse_volumes: - self._allocate_file_blocks(volume_path, volume_size) - - def _create_volume_from_snapshot(self, volume, snapshot): - snapshot_path = self._get_snapshot_path(snapshot) - # check if the snapshot lies in the same CG as the volume to be created - # if yes, clone the volume from the snapshot, else perform full copy - clone = False - ctxt = context.get_admin_context() - snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id']) - if (volume['group_id'] == - snap_parent_vol['group_id']): - clone = True - volume_path = self._get_volume_path(volume) - if clone: - self._create_gpfs_copy(src=snapshot_path, dest=volume_path) - self._gpfs_redirect(volume_path) - else: - self._gpfs_full_copy(snapshot_path, volume_path) - - self._set_rw_permission(volume_path) - self._set_volume_attributes(volume, volume_path, volume.metadata) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a GPFS volume from a snapshot.""" - self._create_volume_from_snapshot(volume, snapshot) - virt_size = self._resize_volume_file(volume, volume['size']) - return {'size': math.ceil(virt_size / units.Gi)} - - def _get_volume_path(self, volume): - return self.local_path(volume) - - def _create_cloned_volume(self, volume, src_vref): - src = self._get_volume_path(src_vref) - dest = self._get_volume_path(volume) - if (volume['group_id'] == src_vref['group_id']): - self._create_gpfs_clone(src, dest) - else: - self._gpfs_full_copy(src, dest) - self._set_rw_permission(dest) - self._set_volume_attributes(volume, dest, volume.metadata) - - def create_cloned_volume(self, volume, src_vref): - """Create a GPFS volume from another volume.""" - self._create_cloned_volume(volume, src_vref) - virt_size = self._resize_volume_file(volume, volume['size']) - return {'size': math.ceil(virt_size / units.Gi)} - - def _delete_gpfs_file(self, fchild, mount_point=None): - """Delete a GPFS file and cleanup clone children.""" - - if mount_point is None: - if not os.path.exists(fchild): - return - else: - fchild_local_path = os.path.join(mount_point, - os.path.basename(fchild)) - if not os.path.exists(fchild_local_path): - return - - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', - fchild) - fparent = None - delete_parent = False - inode_regex = re.compile( - r'.*\s+(?:yes|no)\s+\d+\s+(?P\d+)', re.M | re.S) - match = inode_regex.match(out) - if match: - inode = match.group('inode') - if mount_point is None: - path = os.path.dirname(fchild) - else: - path = mount_point - - # -ignore_readdir_race is to prevent the command from exiting - # with nonzero RC when some files in the directory are removed - # by other delete operations. -quit is to end the execution as - # soon as we get one filename; it is not expected that two or - # more filenames found. - (out, err) = self._execute('find', path, '-maxdepth', '1', - '-ignore_readdir_race', - '-inum', inode, '-print0', '-quit', - run_as_root=True) - if out: - fparent = out.split('\0', 1)[0] - - if mount_point is None: - self._execute( - 'rm', '-f', fchild, check_exit_code=False, run_as_root=True) - else: - self._execute( - 'rm', '-f', fchild_local_path, check_exit_code=False, - run_as_root=True) - - # There is no need to check for volume references on this snapshot - # because 'rm -f' itself serves as a simple and implicit check. If the - # parent is referenced by another volume, GPFS doesn't allow deleting - # it. 'rm -f' silently fails and the subsequent check on the path - # indicates whether there are any volumes derived from that snapshot. - # If there are such volumes, we quit recursion and let the other - # volumes delete the snapshot later. If there are no references, rm - # would succeed and the snapshot is deleted. - if mount_point is None: - if not os.path.exists(fchild) and fparent: - delete_parent = True - else: - if not os.path.exists(fchild_local_path) and fparent: - delete_parent = True - - if delete_parent: - fpbase = os.path.basename(fparent) - if fpbase.endswith('.snap') or fpbase.endswith('.ts'): - if mount_point is None: - self._delete_gpfs_file(fparent) - else: - fparent_remote_path = os.path.join(os.path.dirname(fchild), - fpbase) - fparent_mount_path = os.path.dirname(fparent) - self._delete_gpfs_file(fparent_remote_path, - fparent_mount_path) - - def delete_volume(self, volume): - """Deletes a logical volume.""" - # Check if GPFS is mounted - self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) - - volume_path = self.local_path(volume) - self._delete_gpfs_file(volume_path) - - def _gpfs_redirect(self, src): - """Removes the copy_on_write dependency between src and parent. - - Remove the copy_on_write dependency between the src file and its - immediate parent such that the length of dependency chain is reduced - by 1. - """ - max_depth = self.configuration.gpfs_max_clone_depth - if max_depth == 0: - return False - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', src) - depth_regex = re.compile(r'.*\s+no\s+(?P\d+)', re.M | re.S) - match = depth_regex.match(out) - if match: - depth = int(match.group('depth')) - if depth > max_depth: - self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'redirect', src) - return True - return False - - def _create_gpfs_clone(self, src, dest): - """Create a GPFS file clone parent for the specified file.""" - snap = dest + ".snap" - self._create_gpfs_snap(src, snap) - self._create_gpfs_copy(snap, dest) - if self._gpfs_redirect(src) and self._gpfs_redirect(dest): - self._execute('rm', '-f', snap, run_as_root=True) - - def _create_gpfs_copy(self, src, dest): - """Create a GPFS file clone copy for the specified file.""" - self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'copy', src, dest) - - def _gpfs_full_copy(self, src, dest): - """Create a full copy from src to dest.""" - self.gpfs_execute('cp', src, dest, check_exit_code=True) - - def _create_gpfs_snap(self, src, dest=None): - """Create a GPFS file clone snapshot for the specified file.""" - if dest is None: - self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'snap', src) - else: - self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'snap', src, dest) - - def _is_gpfs_parent_file(self, gpfs_file): - """Return true if the specified file is a gpfs clone parent.""" - out, err = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', - gpfs_file) - ptoken = out.splitlines().pop().split()[0] - return ptoken == 'yes' - - def create_snapshot(self, snapshot): - """Creates a GPFS snapshot.""" - snapshot_path = self._get_snapshot_path(snapshot) - volume_path = os.path.join(os.path.dirname(snapshot_path), - snapshot.volume.name) - self._create_gpfs_snap(src=volume_path, dest=snapshot_path) - self._set_rw_permission(snapshot_path, modebits='640') - self._gpfs_redirect(volume_path) - - def delete_snapshot(self, snapshot): - """Deletes a GPFS snapshot.""" - # Rename the deleted snapshot to indicate it no longer exists in - # cinder db. Attempt to delete the snapshot. If the snapshot has - # clone children, the delete will fail silently. When volumes that - # are clone children are deleted in the future, the remaining ts - # snapshots will also be deleted. - snapshot_path = self._get_snapshot_path(snapshot) - snapshot_ts_path = '%s.ts' % snapshot_path - self.gpfs_execute('mv', snapshot_path, snapshot_ts_path) - self.gpfs_execute('rm', '-f', snapshot_ts_path, - check_exit_code=False) - - def _get_snapshot_path(self, snapshot): - snap_parent_vol_path = self.local_path(snapshot.volume) - snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), - snapshot.name) - return snapshot_path - - def local_path(self, volume): - """Return the local path for the specified volume.""" - # Check if the volume is part of a consistency group and return - # the local_path accordingly. - if volume.group_id is not None: - if volume_utils.is_group_a_cg_snapshot_type(volume.group): - cgname = "consisgroup-%s" % volume.group_id - volume_path = os.path.join( - self.configuration.gpfs_mount_point_base, - cgname, - volume.name - ) - return volume_path - - volume_path = os.path.join( - self.configuration.gpfs_mount_point_base, - volume.name - ) - return volume_path - - def _get_gpfs_encryption_status(self): - """Determine if the backend is configured with key manager.""" - try: - (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsfs', - self._gpfs_device, - '--encryption', '-Y') - lines = out.splitlines() - value_token = lines[0].split(':').index('data') - encryption_status = lines[1].split(':')[value_token] - return encryption_status - except processutils.ProcessExecutionError as exc: - LOG.error('Failed to issue mmlsfs command, error: %s.', - exc.stderr) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def initialize_connection(self, volume, connector): - return { - 'driver_volume_type': 'gpfs', - 'data': { - 'name': volume['name'], - 'device_path': self.local_path(volume), - } - } - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, or stats have never been updated, run update - the stats first. - """ - if not self._stats or refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats.") - gpfs_base = self.configuration.gpfs_mount_point_base - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or 'GPFS' - data["vendor_name"] = 'IBM' - data["driver_version"] = self.VERSION - data["storage_protocol"] = 'file' - free, capacity = self._get_available_capacity(self.configuration. - gpfs_mount_point_base) - data['total_capacity_gb'] = math.ceil(capacity / units.Gi) - data['free_capacity_gb'] = math.ceil(free / units.Gi) - data['reserved_percentage'] = 0 - data['QoS_support'] = False - data['storage_pool'] = self._storage_pool - data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' % - {'cluster_id': self._cluster_id, - 'root_path': gpfs_base}) - - data['consistencygroup_support'] = 'True' - data['consistent_group_snapshot_enabled'] = True - - if self._encryption_state.lower() == 'yes': - data['gpfs_encryption_rest'] = 'True' - - self._stats = data - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Create a volume from the specified image.""" - return self._clone_image(volume, image_location, image_meta['id']) - - def _is_cloneable(self, image_id): - """Return true if the specified image can be cloned by GPFS.""" - if not((self.configuration.gpfs_images_dir and - self.configuration.gpfs_images_share_mode)): - reason = 'glance repository not configured to use GPFS' - return False, reason, None - - image_path = os.path.join(self.configuration.gpfs_images_dir, image_id) - try: - self._is_gpfs_path(image_path) - except processutils.ProcessExecutionError: - reason = 'image file not in GPFS' - return False, reason, None - - return True, None, image_path - - def _clone_image(self, volume, image_location, image_id): - """Attempt to create a volume by efficiently copying image to volume. - - If both source and target are backed by gpfs storage and the source - image is in raw format move the image to create a volume using either - gpfs clone operation or with a file copy. If the image format is not - raw, convert it to raw at the volume path. - """ - # Check if GPFS is mounted - self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) - - cloneable_image, reason, image_path = self._is_cloneable(image_id) - if not cloneable_image: - LOG.debug('Image %(img)s not cloneable: %(reas)s.', - {'img': image_id, 'reas': reason}) - return (None, False) - - vol_path = self.local_path(volume) - - data = image_utils.qemu_img_info(image_path) - - # if image format is already raw either clone it or - # copy it depending on config file settings - if data.file_format == 'raw': - if (self.configuration.gpfs_images_share_mode == - 'copy_on_write'): - LOG.debug('Clone image to vol %s using mmclone.', - volume['id']) - # if the image is not already a GPFS snap file make it so - if not self._is_gpfs_parent_file(image_path): - self._create_gpfs_snap(image_path) - - self._create_gpfs_copy(image_path, vol_path) - elif self.configuration.gpfs_images_share_mode == 'copy': - LOG.debug('Clone image to vol %s using copyfile.', - volume['id']) - shutil.copyfile(image_path, vol_path) - - # if image is not raw convert it to raw into vol_path destination - else: - LOG.debug('Clone image to vol %s using qemu convert.', - volume['id']) - image_utils.convert_image(image_path, vol_path, 'raw') - - self._set_rw_permission(vol_path) - self._resize_volume_file(volume, volume['size']) - - return {'provider_location': None}, True - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume. - - Note that cinder.volume.flows.create_volume will attempt to use - clone_image to efficiently create volume from image when both - source and target are backed by gpfs storage. If that is not the - case, this function is invoked and uses fetch_to_raw to create the - volume. - """ - # Check if GPFS is mounted - self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) - - LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.', - volume['id']) - image_utils.fetch_to_raw(context, image_service, image_id, - self.local_path(volume), - self.configuration.volume_dd_blocksize, - size=volume['size']) - self._resize_volume_file(volume, volume['size']) - - def _resize_volume_file(self, volume, new_size): - """Resize volume file to new size.""" - vol_path = self.local_path(volume) - try: - image_utils.resize_image(vol_path, new_size, run_as_root=True) - except processutils.ProcessExecutionError as exc: - LOG.error("Failed to resize volume " - "%(volume_id)s, error: %(error)s.", - {'volume_id': volume['id'], - 'error': exc.stderr}) - raise exception.VolumeBackendAPIException(data=exc.stderr) - - data = image_utils.qemu_img_info(vol_path) - return data.virtual_size - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - self._resize_volume_file(volume, new_size) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_utils.upload_volume(context, - image_service, - image_meta, - self.local_path(volume)) - - def _migrate_volume(self, volume, host): - """Migrate vol if source and dest are managed by same GPFS cluster.""" - LOG.debug('Migrate volume request %(vol)s to %(host)s.', - {'vol': volume['name'], - 'host': host['host']}) - dest_path = self._can_migrate_locally(host) - - if dest_path is None: - LOG.debug('Cannot migrate volume locally, use generic migration.') - return (False, None) - if dest_path == self.configuration.gpfs_mount_point_base: - LOG.debug('Migration target is same cluster and path, ' - 'no work needed.') - return (True, None) - - LOG.debug('Migration target is same cluster but different path, ' - 'move the volume file.') - local_path = self._get_volume_path(volume) - new_path = os.path.join(dest_path, volume['name']) - try: - self.gpfs_execute('mv', local_path, new_path) - return (True, None) - except processutils.ProcessExecutionError as exc: - LOG.error('Driver-based migration of volume %(vol)s failed. ' - 'Move from %(src)s to %(dst)s failed with error: ' - '%(error)s.', - {'vol': volume['name'], - 'src': local_path, - 'dst': new_path, - 'error': exc.stderr}) - return (False, None) - - def migrate_volume(self, context, volume, host): - """Attempt to migrate a volume to specified host.""" - return self._migrate_volume(volume, host) - - def retype(self, context, volume, new_type, diff, host): - """Modify volume to be of new type.""" - LOG.debug('Retype volume request %(vol)s to be %(type)s ' - '(host: %(host)s), diff %(diff)s.', - {'vol': volume['name'], - 'type': new_type, - 'host': host, - 'diff': diff}) - - retyped = False - migrated = False - pools = diff['extra_specs'].get('capabilities:storage_pool') - - backends = diff['extra_specs'].get('volume_backend_name') - hosts = (volume['host'], host['host']) - - # if different backends let migration create a new volume and copy - # data because the volume is considered to be substantially different - if _different(backends): - backend1, backend2 = backends - LOG.debug('Retype request is for different backends, ' - 'use migration: %(backend1)s %(backend2)s.', - {'backend1': backend1, 'backend2': backend1}) - return False - - if _different(pools): - old, new = pools - LOG.debug('Retype pool attribute from %(old)s to %(new)s.', - {'old': old, 'new': new}) - retyped = self._update_volume_storage_pool(self.local_path(volume), - new) - - if _different(hosts): - source, destination = hosts - LOG.debug('Retype hosts migrate from: %(source)s to ' - '%(destination)s.', {'source': source, - 'destination': destination}) - migrated, mdl_update = self._migrate_volume(volume, host) - if migrated: - updates = {'host': host['host']} - self.db.volume_update(context, volume['id'], updates) - - return retyped or migrated - - def _mkfs(self, volume, filesystem, label=None): - """Initialize volume to be specified filesystem type.""" - if filesystem == 'swap': - cmd = ['mkswap'] - else: - cmd = ['mkfs', '-t', filesystem] - - if filesystem in ('ext3', 'ext4'): - cmd.append('-F') - if label: - if filesystem in ('msdos', 'vfat'): - label_opt = '-n' - else: - label_opt = '-L' - cmd.extend([label_opt, label]) - - path = self.local_path(volume) - cmd.append(path) - try: - self._execute(*cmd, run_as_root=True) - except processutils.ProcessExecutionError as exc: - exception_message = (_("mkfs failed on volume %(vol)s, " - "error message was: %(err)s.") - % {'vol': volume['name'], 'err': exc.stderr}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def _get_available_capacity(self, path): - """Calculate available space on path.""" - # Check if GPFS is mounted - try: - self._verify_gpfs_path_state(path) - mounted = True - except exception.VolumeBackendAPIException: - mounted = False - - # If GPFS is not mounted, return zero capacity. So that the volume - # request can be scheduled to another volume service. - if not mounted: - return 0, 0 - - out, err = self.gpfs_execute('df', '-P', '-B', '1', path, - run_as_root=True) - out = out.splitlines()[1] - size = int(out.split()[1]) - available = int(out.split()[3]) - return available, size - - def _verify_gpfs_path_state(self, path): - """Examine if GPFS is active and file system is mounted or not.""" - try: - self._is_gpfs_path(path) - except processutils.ProcessExecutionError: - msg = (_('%s cannot be accessed. Verify that GPFS is active and ' - 'file system is mounted.') % path) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _create_consistencygroup(self, context, group): - """Create consistency group of GPFS volumes.""" - cgname = "consisgroup-%s" % group['id'] - fsdev = self._gpfs_device - cgpath = os.path.join(self.configuration.gpfs_mount_point_base, - cgname) - try: - self.gpfs_execute(self.GPFS_PATH + 'mmcrfileset', fsdev, cgname, - '--inode-space', 'new') - except processutils.ProcessExecutionError as e: - msg = (_('Failed to create consistency group: %(cgid)s. ' - 'Error: %(excmsg)s.') % - {'cgid': group['id'], 'excmsg': six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - self.gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev, cgname, - '-J', cgpath) - except processutils.ProcessExecutionError as e: - msg = (_('Failed to link fileset for the share %(cgname)s. ' - 'Error: %(excmsg)s.') % - {'cgname': cgname, 'excmsg': six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - self.gpfs_execute('chmod', '770', cgpath) - except processutils.ProcessExecutionError as e: - msg = (_('Failed to set permissions for the consistency group ' - '%(cgname)s. ' - 'Error: %(excmsg)s.') % - {'cgname': cgname, 'excmsg': six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update - - def _delete_consistencygroup(self, context, group, volumes): - """Delete consistency group of GPFS volumes.""" - cgname = "consisgroup-%s" % group['id'] - fsdev = self._gpfs_device - delete_fileset = True - - model_update = {} - model_update['status'] = group['status'] - - try: - self.gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev, cgname) - except processutils.ProcessExecutionError as e: - if e.exit_code == 2: - msg = (_('The fileset associated with consistency group ' - '%(cgname)s does not exist') % - {'cgname': cgname}) - LOG.info(msg) - delete_fileset = False - - # Unlink and delete the fileset associated with the consistency group. - # All of the volumes and volume snapshot data will also be deleted. - if delete_fileset: - try: - self.gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev, - cgname, '-f') - except processutils.ProcessExecutionError as e: - msg = (_('Failed to unlink fileset for consistency group ' - '%(cgname)s. Error: %(excmsg)s.') % - {'cgname': cgname, 'excmsg': six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - try: - self.gpfs_execute(self.GPFS_PATH + 'mmdelfileset', - fsdev, cgname, '-f') - except processutils.ProcessExecutionError as e: - msg = (_('Failed to delete fileset for consistency group ' - '%(cgname)s. Error: %(excmsg)s.') % - {'cgname': cgname, 'excmsg': six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - for volume_ref in volumes: - volume_ref['status'] = 'deleted' - - model_update = {'status': group['status']} - - return None, None - - def _create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Create snapshot of a consistency group of GPFS volumes.""" - model_update = {'status': fields.GroupStatus.AVAILABLE} - snapshots_model_update = [] - - try: - for snapshot in snapshots: - self.create_snapshot(snapshot) - except exception.VolumeBackendAPIException as err: - model_update['status'] = ( - fields.GroupStatus.ERROR) - LOG.error("Failed to create the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s.", - {'snap': snapshot.name, 'exception': err}) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': model_update['status']}) - - return model_update, snapshots_model_update - - def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Delete snapshot of a consistency group of GPFS volumes.""" - model_update = {'status': fields.GroupStatus.DELETED} - snapshots_model_update = [] - - try: - for snapshot in snapshots: - self.delete_snapshot(snapshot) - except exception.VolumeBackendAPIException as err: - model_update['status'] = ( - fields.GroupStatus.ERROR_DELETING) - LOG.error("Failed to delete the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s.", - {'snap': snapshot.name, 'exception': err}) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot.id, - 'status': model_update['status']}) - - return model_update, snapshots_model_update - - def _update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - msg = _('Updating a consistency group is not supported.') - LOG.error(msg) - raise exception.GPFSDriverUnsupportedOperation(msg=msg) - - def _create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - msg = _('Creating a consistency group from any source consistency ' - 'group or consistency group snapshot is not supported.') - LOG.error(msg) - raise exception.GPFSDriverUnsupportedOperation(msg=msg) - - def create_group(self, ctxt, group): - """Creates a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be created. - :returns: model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._create_consistencygroup(ctxt, group) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def delete_group(self, ctxt, group, volumes): - """Deletes a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be deleted. - :param volumes: a list of Volume objects in the group. - :returns: model_update, volumes_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._delete_consistencygroup(ctxt, group, volumes) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def update_group(self, ctxt, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be updated. - :param add_volumes: a list of Volume objects to be added. - :param remove_volumes: a list of Volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - """ - - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._update_consistencygroup(ctxt, - group, - add_volumes, - remove_volumes) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def create_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param ctxt: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._create_cgsnapshot(ctxt, group_snapshot, snapshots) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param ctxt: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._delete_cgsnapshot(ctxt, group_snapshot, snapshots) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param ctxt: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._create_consistencygroup_from_src(ctxt, - group, - volumes, - group_snapshot, - snapshots, - source_group, - source_vols) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - -@interface.volumedriver -class GPFSRemoteDriver(GPFSDriver, san.SanDriver): - """GPFS cinder driver extension. - - This extends the capability of existing GPFS cinder driver - to be able to run the driver when cinder volume service - is not running on GPFS node where as Nova Compute is a GPFS - client. This deployment is typically in Container based - OpenStack environment. - """ - - VERSION = "1.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_GPFS_REMOTE_CI" - - def __init__(self, *args, **kwargs): - super(GPFSRemoteDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(san.san_opts) - self.configuration.append_config_values(gpfs_remote_ssh_opts) - self.configuration.san_login = self.configuration.gpfs_user_login - self.configuration.san_password = ( - self.configuration.gpfs_user_password) - self.configuration.san_private_key = ( - self.configuration.gpfs_private_key) - self.configuration.san_ssh_port = self.configuration.gpfs_ssh_port - self.gpfs_execute = self._gpfs_remote_execute - self.GPFS_PATH = '/usr/lpp/mmfs/bin/' - - def _gpfs_remote_execute(self, *cmd, **kwargs): - check_exit_code = kwargs.pop('check_exit_code', None) - return self._run_ssh(cmd, check_exit_code) - - def do_setup(self, ctxt): - self.configuration.san_ip = self._get_active_gpfs_node_ip() - super(GPFSRemoteDriver, self).do_setup(ctxt) - - def _get_active_gpfs_node_ip(self): - """Set the san_ip to active gpfs node IP""" - active_gpfs_node_ip = None - gpfs_node_ips = self.configuration.gpfs_hosts - ssh = paramiko.SSHClient() - - # Validate good config setting here. - # Paramiko handles the case where the file is inaccessible. - if not self.configuration.gpfs_hosts_key_file: - raise exception.ParameterNotFound(param='gpfs_hosts_key_file') - elif not os.path.isfile(self.configuration.gpfs_hosts_key_file): - # If using the default path, just create the file. - if CONF.state_path in self.configuration.gpfs_hosts_key_file: - open(self.configuration.gpfs_hosts_key_file, 'a').close() - else: - msg = (_("Unable to find ssh_hosts_key_file: %s") % - self.configuration.gpfs_hosts_key_file) - raise exception.InvalidInput(reason=msg) - - ssh.load_host_keys(self.configuration.gpfs_hosts_key_file) - if self.configuration.gpfs_strict_host_key_policy: - ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) - else: - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - if ((not self.configuration.gpfs_user_password) and - (not self.configuration.gpfs_private_key)): - msg = _("Specify a password or private_key") - raise exception.VolumeDriverException(msg) - for ip in gpfs_node_ips: - try: - if self.configuration.gpfs_user_password: - ssh.connect(ip, - port=self.configuration.gpfs_ssh_port, - username=self.configuration.gpfs_user_login, - password=self.configuration.gpfs_user_password, - timeout=self.configuration.ssh_conn_timeout) - elif self.configuration.gpfs_private_key: - pkfile = os.path.expanduser( - self.configuration.gpfs_private_key) - privatekey = paramiko.RSAKey.from_private_key_file(pkfile) - ssh.connect(ip, - port=self.configuration.gpfs_ssh_port, - username=self.configuration.gpfs_user_login, - pkey=privatekey, - timeout=self.configuration.ssh_conn_timeout) - except Exception as e: - LOG.info("Cannot connect to GPFS node %(ip)s. " - "Error is: %(err)s. " - "Continuing to next node", - {'ip': ip, 'err': e}) - continue - try: - # check if GPFS state is active on the node - (out, __) = processutils.ssh_execute(ssh, self.GPFS_PATH + - 'mmgetstate -Y') - lines = out.splitlines() - state_token = lines[0].split(':').index('state') - gpfs_state = lines[1].split(':')[state_token] - if gpfs_state != 'active': - LOG.info("GPFS is not active on node %(ip)s. " - "Continuing to next node", - {'ip': ip}) - continue - # check if filesystem is mounted on the node - processutils.ssh_execute( - ssh, - 'df ' + self.configuration.gpfs_mount_point_base) - except processutils.ProcessExecutionError as e: - LOG.info("GPFS is not active on node %(ip)s. " - "Error is: %(err)s. " - "Continuing to next node", - {'ip': ip, 'err': e}) - continue - # set the san_ip to the active gpfs node IP - LOG.debug("Setting active GPFS node IP to %s", ip) - active_gpfs_node_ip = ip - break - else: - msg = _("No GPFS node is active") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return active_gpfs_node_ip - - -@interface.volumedriver -class GPFSNFSDriver(GPFSDriver, nfs.NfsDriver, san.SanDriver): - """GPFS cinder driver extension. - - This extends the capability of existing GPFS cinder driver - to be able to create cinder volumes when cinder volume service - is not running on GPFS node. - """ - - VERSION = "1.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_GPFS_NFS_CI" - - def __init__(self, *args, **kwargs): - self._context = None - self._storage_pool = None - self._cluster_id = None - super(GPFSNFSDriver, self).__init__(*args, **kwargs) - self.gpfs_execute = self._gpfs_remote_execute - self.configuration.append_config_values(remotefs.nas_opts) - self.configuration.san_ip = self.configuration.nas_host - self.configuration.san_login = self.configuration.nas_login - self.configuration.san_password = self.configuration.nas_password - self.configuration.san_private_key = ( - self.configuration.nas_private_key) - self.configuration.san_ssh_port = self.configuration.nas_ssh_port - self.GPFS_PATH = '/usr/lpp/mmfs/bin/' - - def _gpfs_remote_execute(self, *cmd, **kwargs): - check_exit_code = kwargs.pop('check_exit_code', None) - return self._run_ssh(cmd, check_exit_code) - - def do_setup(self, context): - super(GPFSNFSDriver, self).do_setup(context) - self._context = context - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, or stats have never been updated, run update - the stats first. - """ - if not self._stats or refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Enter _update_volume_stats.") - gpfs_base = self.configuration.gpfs_mount_point_base - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or 'GPFSNFS' - data['vendor_name'] = 'IBM' - data['driver_version'] = self.get_version() - data['storage_protocol'] = 'file' - - self._ensure_shares_mounted() - - global_capacity = 0 - global_free = 0 - for share in self._mounted_shares: - capacity, free, _used = self._get_capacity_info(share) - global_capacity += capacity - global_free += free - - data['total_capacity_gb'] = global_capacity / float(units.Gi) - data['free_capacity_gb'] = global_free / float(units.Gi) - data['reserved_percentage'] = 0 - data['QoS_support'] = False - data['storage_pool'] = self._storage_pool - data['location_info'] = ('GPFSNFSDriver:%(cluster_id)s:%(root_path)s' % - {'cluster_id': self._cluster_id, - 'root_path': gpfs_base}) - - data['consistencygroup_support'] = 'True' - data['consistent_group_snapshot_enabled'] = True - self._stats = data - LOG.debug("Exit _update_volume_stats.") - - def _get_volume_path(self, volume): - """Returns remote GPFS path for the given volume.""" - export_path = self.configuration.gpfs_mount_point_base - if volume.group_id is not None: - if volume_utils.is_group_a_cg_snapshot_type(volume.group): - cgname = "consisgroup-%s" % volume.group_id - return os.path.join(export_path, cgname, volume.name) - return os.path.join(export_path, volume.name) - - def local_path(self, volume): - """Returns the local path for the specified volume.""" - remotefs_share = volume['provider_location'] - base_local_path = self._get_mount_point_for_share(remotefs_share) - - # Check if the volume is part of a consistency group and return - # the local_path accordingly. - if volume.group_id is not None: - if volume_utils.is_group_a_cg_snapshot_type(volume.group): - cgname = "consisgroup-%s" % volume.group_id - return os.path.join(base_local_path, cgname, volume.name) - return os.path.join(base_local_path, volume.name) - - def _get_snapshot_path(self, snapshot): - """Returns remote GPFS path for the given snapshot.""" - snap_parent_vol = self.db.volume_get(self._context, - snapshot['volume_id']) - snap_parent_vol_path = self._get_volume_path(snap_parent_vol) - snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), - snapshot['name']) - return snapshot_path - - def create_volume(self, volume): - """Creates a GPFS volume.""" - super(GPFSNFSDriver, self).create_volume(volume) - volume['provider_location'] = self._find_share(volume) - return {'provider_location': volume['provider_location']} - - def delete_volume(self, volume): - """Deletes a logical volume.""" - # Check if GPFS is mounted - self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) - - volume_path = self._get_volume_path(volume) - mount_point = os.path.dirname(self.local_path(volume)) - # Delete all dependent snapshots, the snapshot will get deleted - # if the link count goes to zero, else rm will fail silently - self._delete_gpfs_file(volume_path, mount_point) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a GPFS volume from a snapshot.""" - self._create_volume_from_snapshot(volume, snapshot) - volume['provider_location'] = self._find_share(volume) - self._resize_volume_file(volume, volume['size']) - return {'provider_location': volume['provider_location']} - - def create_cloned_volume(self, volume, src_vref): - """Create a GPFS volume from another volume.""" - self._create_cloned_volume(volume, src_vref) - volume['provider_location'] = self._find_share(volume) - self._resize_volume_file(volume, volume['size']) - return {'provider_location': volume['provider_location']} diff --git a/cinder/volume/drivers/ibm/ibm_storage/__init__.py b/cinder/volume/drivers/ibm/ibm_storage/__init__.py deleted file mode 100644 index 3613249cc..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/__init__.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import exception -from cinder.i18n import _ - -BLOCKS_PER_GIGABYTE = 2097152 -XIV_LOG_PREFIX = "[IBM XIV STORAGE]:" -XIV_CONNECTION_TYPE_ISCSI = 'iscsi' -XIV_CONNECTION_TYPE_FC = 'fibre_channel' -XIV_CONNECTION_TYPE_FC_ECKD = 'fibre_channel_eckd' -CHAP_NONE = 'disabled' -CHAP_ENABLED = 'enabled' -STORAGE_DRIVER_XIV = 'xiv' -STORAGE_DRIVER_DS8K = 'ds8k' - - -CONF_KEYS = { - 'driver': "volume_driver", - 'proxy': "proxy", - 'user': "san_login", - 'password': "san_password", - 'storage_pool': "san_clustername", - 'address': "san_ip", - 'driver_version': "ibm_storage_driver_version", - 'volume_api_class': "volume_api_class", - 'volume_backend': "volume_backend_name", - 'connection_type': "connection_type", - 'management_ips': "management_ips", - 'chap': 'chap', - 'system_id': 'system_id', - 'replication_device': 'replication_device' -} -CONF_BACKEND_KEYS = { - 'user': "san_login", - 'password': "san_password", - 'storage_pool': "san_clustername", - 'address': "san_ip", - 'volume_backend': "volume_backend_name", - 'connection_type': "connection_type", - 'management_ips': "management_ips", -} -FLAG_KEYS = { - 'user': "user", - 'password': "password", - 'storage_pool': "vol_pool", - 'address': "address", - 'connection_type': "connection_type", - 'bypass_connection_check': "XIV_BYPASS_CONNECTION_CHECK", - 'management_ips': "management_ips" -} -METADATA_KEYS = { - 'ibm_storage_version': 'openstack_ibm_storage_driver_version', - 'openstack_version': 'openstack_version', - 'pool_host_key': 'openstack_compute_node_%(hostname)s', - 'pool_volume_os': 'openstack_volume_os', - 'pool_volume_hostname': 'openstack_volume_hostname' -} - - -def get_host_or_create_from_iqn(connector, connection=None): - """Get host name. - - Return the hostname if existing at the connector (nova-compute info) - If not, generate one from the IQN or HBA - """ - if connection is None and connector.get('host', None): - return connector['host'] - - if connection != XIV_CONNECTION_TYPE_FC and 'initiator' in connector: - try: - initiator = connector['initiator'] - iqn_suffix = initiator.split('.')[-1].replace(":", "_") - except Exception: - if connector.get('initiator', 'None'): - raise exception.VolumeDriverException(message=( - _("Initiator format: %(iqn)s")) % - {'iqn': connector.get('initiator', 'None')}) - else: - raise exception.VolumeDriverException( - message=_("Initiator is missing from connector object")) - return "nova-compute-%s" % iqn_suffix - - if connection != XIV_CONNECTION_TYPE_ISCSI and len( - connector.get('wwpns', []) - ) > 0: - return "nova-compute-%s" % connector['wwpns'][0].replace(":", "_") - - raise exception.VolumeDriverException( - message=_("Compute host missing either iSCSI initiator or FC wwpns")) - - -def gigabytes_to_blocks(gigabytes): - return int(BLOCKS_PER_GIGABYTE * float(gigabytes)) - - -def get_online_iscsi_ports(ibm_storage_cli): - """Returns online iscsi ports.""" - iscsi_ports = [ - { - 'ip': p.get('address'), - # ipinterface_list returns ports field in Gen3, and - # port field in BlueRidge - 'port': p.get('ports', p.get('port')), - 'module': p.get('module') - } for p in ibm_storage_cli.cmd.ipinterface_list() - if p.type == 'iSCSI'] - - iscsi_connected_ports = [ - { - 'port': p.index, - 'module': p.get('module_id') - } for p in ibm_storage_cli.cmd.ipinterface_list_ports() - if p.is_link_up == 'yes' and p.role == 'iSCSI'] - - to_return = [] - for ip in iscsi_ports: - if len([ - p for p in iscsi_connected_ports - if (p.get('port') == ip.get('port') and - p.get('module') == ip.get('module')) - ]) > 0: - to_return += [ip.get('ip')] - - return to_return diff --git a/cinder/volume/drivers/ibm/ibm_storage/certificate.py b/cinder/volume/drivers/ibm/ibm_storage/certificate.py deleted file mode 100644 index cc9accc70..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/certificate.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os -import tempfile - -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -class CertificateCollector(object): - - def __init__(self, paths=None): - self.paths_checked = [ - '/etc/ssl/certs', '/etc/ssl/certs/xiv', '/etc/pki', '/etc/pki/xiv'] - if paths: - self.paths_checked.extend(paths) - self.paths_checked = set(self.paths_checked) - self.tmp_fd = None - self.tmp_path = None - - def collect_certificate(self): - self.tmp_fd, self.tmp_path = tempfile.mkstemp() - for path in self.paths_checked: - if os.path.exists(path) and os.path.isdir(path): - dir_contents = os.listdir(path) - for f in dir_contents: - full_path = os.path.join(path, f) - if (os.path.isfile(full_path) and - f.startswith('XIV') and - f.endswith('.pem')): - try: - cert_file = open(full_path, 'r') - os.write(self.tmp_fd, cert_file.read()) - cert_file.close() - except Exception: - LOG.exception("Failed to process certificate") - os.close(self.tmp_fd) - fsize = os.path.getsize(self.tmp_path) - if fsize > 0: - return self.tmp_path - else: - return None - - def free_certificate(self): - if self.tmp_path: - try: - os.remove(self.tmp_path) - except Exception: - pass - self.tmp_path = None diff --git a/cinder/volume/drivers/ibm/ibm_storage/cryptish.py b/cinder/volume/drivers/ibm/ibm_storage/cryptish.py deleted file mode 100644 index 2e36eae8a..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/cryptish.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import base64 - - -def encrypt(string): - return base64.b64encode(string.encode('UTF-8')) - - -def decrypt(string): - missing_padding = len(string) % 4 - if missing_padding != 0: - string += b'=' * (4 - missing_padding) - return base64.b64decode(string) diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py deleted file mode 100644 index acbe02d6f..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import datetime -import hashlib -import re -import ssl - -from oslo_log import log as logging -from requests.packages.urllib3 import connection -from requests.packages.urllib3 import connectionpool -from requests.packages.urllib3 import poolmanager - -from cinder.i18n import _ - -LOG = logging.getLogger(__name__) - -try: - from OpenSSL.crypto import FILETYPE_ASN1 - from OpenSSL.crypto import load_certificate -except ImportError: - load_certificate = None - FILETYPE_ASN1 = None - -_PEM_RE = re.compile(u"""-----BEGIN CERTIFICATE-----\r? -.+?\r?-----END CERTIFICATE-----\r?\n?""", re.DOTALL) - - -class DS8KHTTPSConnection(connection.VerifiedHTTPSConnection): - """Extend the HTTPS Connection to do our own Certificate Verification.""" - - def _verify_cert(self, sock, ca_certs): - # If they asked us to not verify the Certificate then nothing to do - if not ca_certs: - return - - # Retrieve the Existing Certificates from the File in Binary Form - peercert = sock.getpeercert(True) - try: - with open(ca_certs, 'r') as f: - certs_str = f.read() - except Exception: - raise ssl.SSLError(_("Failed to read certificate from %s") - % ca_certs) - - # Verify the Existing Certificates - found = False - certs = [match.group(0) for match in _PEM_RE.finditer(certs_str)] - for cert in certs: - existcert = ssl.PEM_cert_to_DER_cert(cert) - # First check to make sure the 2 certificates are the same ones - if (hashlib.sha256(existcert).digest() == - hashlib.sha256(peercert).digest()): - found = True - break - if not found: - raise ssl.SSLError( - _("The certificate doesn't match the trusted one " - "in %s.") % ca_certs) - - if load_certificate is None and FILETYPE_ASN1 is None: - raise ssl.SSLError( - _("Missing 'pyOpenSSL' python module, ensure the " - "library is installed.")) - - # Throw an exception if the certificate given to us has expired - x509 = load_certificate(FILETYPE_ASN1, peercert) - if x509.has_expired(): - raise ssl.SSLError( - _("The certificate expired: %s") % x509.get_notAfter()) - - def connect(self): - """Override the Connect Method to fix the Certificate Verification.""" - # Add certificate verification - conn = self._new_conn() - - if getattr(self, '_tunnel_host', None): - # _tunnel_host was added in Python 2.6.3 - # (See: http://hg.python.org/cpython/rev/0f57b30a152f) - - self.sock = conn - # Calls self._set_hostport(), so self.host is - # self._tunnel_host below. - # - # disable pylint because pylint doesn't support importing - # from six.moves yet. see: - # https://bitbucket.org/logilab/pylint/issue/550/ - self._tunnel() # pylint: disable=E1101 - # Mark this connection as not reusable - self.auto_open = 0 - - # The RECENT_DATE is originally taken from requests. The date is just - # an arbitrary value that is used as a sanity test to identify hosts - # that are using the default time after bootup (e.g. 1970), and - # provides information for debugging - RECENT_DATE = datetime.date(2014, 1, 1) - is_time_off = datetime.date.today() < RECENT_DATE - if is_time_off: - LOG.warning('System time is way off (before %s). This will ' - 'probably lead to SSL verification errors.', - RECENT_DATE) - - # Wrap socket using verification with the root certs in - # trusted_root_certs - self.sock = ssl.wrap_socket(conn) - - self._verify_cert(self.sock, self.ca_certs) - self.is_verified = True - - def putrequest(self, method, url, **kwargs): - """Override the Put Request method take the DS8K off of the URL.""" - if url and url.startswith('httpsds8k://'): - url = 'https://' + url[12:] - return super(DS8KHTTPSConnection, - self).putrequest(method, url, **kwargs) - - def request(self, method, url, **kwargs): - """Override the Request method take the DS8K off of the URL.""" - if url and url.startswith('httpsds8k://'): - url = 'https://' + url[12:] - return super(DS8KHTTPSConnection, self).request(method, url, **kwargs) - - -class DS8KConnectionPool(connectionpool.HTTPSConnectionPool): - """Extend the HTTPS Connection Pool to our own Certificate verification.""" - - scheme = 'httpsds8k' - ConnectionCls = DS8KHTTPSConnection - - def urlopen(self, method, url, **kwargs): - """Override URL Open method to take DS8K out of the URL protocol.""" - if url and url.startswith('httpsds8k://'): - url = 'https://' + url[12:] - return super(DS8KConnectionPool, self).urlopen(method, url, **kwargs) - -if hasattr(poolmanager, 'key_fn_by_scheme'): - poolmanager.key_fn_by_scheme["httpsds8k"] = ( - poolmanager.key_fn_by_scheme["https"]) -poolmanager.pool_classes_by_scheme["httpsds8k"] = DS8KConnectionPool diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py deleted file mode 100644 index 59c412d56..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py +++ /dev/null @@ -1,1224 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import collections -import copy -import distutils.version as dist_version # pylint: disable=E0611 -import eventlet -import math -import os -import six -import string - -from oslo_log import log as logging - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import cryptish -from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient -from cinder.volume.drivers.ibm.ibm_storage import proxy -from cinder.volume.drivers.ibm.ibm_storage import strings - -LOG = logging.getLogger(__name__) - -LSS_VOL_SLOTS = 0x100 -LSS_SLOTS = 0xFF - -VALID_HOST_TYPES = ( - 'auto', 'AMDLinuxRHEL', 'AMDLinuxSuse', - 'AppleOSX', 'Fujitsu', 'Hp', 'HpTru64', - 'HpVms', 'LinuxDT', 'LinuxRF', 'LinuxRHEL', - 'LinuxSuse', 'Novell', 'SGI', 'SVC', - 'SanFsAIX', 'SanFsLinux', 'Sun', 'VMWare', - 'Win2000', 'Win2003', 'Win2008', 'Win2012', - 'iLinux', 'nSeries', 'pLinux', 'pSeries', - 'pSeriesPowerswap', 'zLinux', 'iSeries' -) - - -def filter_alnum(s): - return ''.join(x if x in string.ascii_letters + - string.digits else '_' for x in s) if s else '' - - -class DS8KCommonHelper(object): - """Manage the primary backend, it is common class too.""" - - OPTIONAL_PARAMS = ['ds8k_host_type', 'lss_range_for_cg'] - # if use new REST API, please update the version below - VALID_REST_VERSION_5_7_MIN = '5.7.51.1047' - VALID_REST_VERSION_5_8_MIN = '' - INVALID_STORAGE_VERSION = '8.0.1' - - def __init__(self, conf, HTTPConnectorObject=None): - self.conf = conf - self._connector_obj = HTTPConnectorObject - self._storage_pools = None - self._disable_thin_provision = False - self._connection_type = self._get_value('connection_type') - self._existing_lss = None - self.backend = {} - self.setup() - - @staticmethod - def _gb2b(gb): - return gb * (2 ** 30) - - def _get_value(self, key): - if getattr(self.conf, 'safe_get', 'get') == 'get': - value = self.conf.get(key) - else: - value = self.conf.safe_get(key) - if not value and key not in self.OPTIONAL_PARAMS: - raise exception.InvalidParameterValue( - err=(_('Param [%s] should be provided.') % key)) - return value - - def get_thin_provision(self): - return self._disable_thin_provision - - def get_storage_pools(self): - return self._storage_pools - - def get_connection_type(self): - return self._connection_type - - def get_pool(self, lss): - node = int(lss, 16) % 2 - pids = [ - pid for pid, p in self._storage_pools.items() if p['node'] == node] - return pids[0] if pids else None - - def setup(self): - self._create_client() - self._get_storage_information() - self._check_host_type() - self.backend['pools_str'] = self._get_value('san_clustername') - self._storage_pools = self.get_pools() - self.verify_pools(self._storage_pools) - self._get_lss_ids_for_cg() - self._verify_version() - - def update_client(self): - self._client.close() - self._create_client() - - def _get_certificate(self, host): - cert_file = strings.CERTIFICATES_PATH + host + '.pem' - LOG.debug("certificate file for DS8K %(host)s: %(cert)s", - {'host': host, 'cert': cert_file}) - # Use the certificate if it exists, otherwise use the System CA Bundle - if os.path.exists(cert_file): - return cert_file - else: - LOG.debug("certificate file not found.") - return True - - def _create_client(self): - san_ip = self._get_value('san_ip') - try: - clear_pass = cryptish.decrypt(self._get_value('san_password')) - except TypeError: - raise exception.InvalidParameterValue( - err=_('Param [san_password] is invalid.')) - verify = self._get_certificate(san_ip) - try: - self._client = restclient.RESTScheduler( - san_ip, - self._get_value('san_login'), - clear_pass, - self._connector_obj, - verify) - except restclient.TimeoutException: - raise restclient.APIException( - data=(_("Can't connect to %(host)s") % {'host': san_ip})) - self.backend['rest_version'] = self._get_version()['bundle_version'] - LOG.info("Connection to DS8K storage system %(host)s has been " - "established successfully, the version of REST is %(rest)s.", - {'host': self._get_value('san_ip'), - 'rest': self.backend['rest_version']}) - - def _get_storage_information(self): - storage_info = self.get_systems() - self.backend['storage_unit'] = storage_info['id'] - self.backend['storage_wwnn'] = storage_info['wwnn'] - self.backend['storage_version'] = storage_info['release'] - - def _get_lss_ids_for_cg(self): - lss_range = self._get_value('lss_range_for_cg') - if lss_range: - lss_range = lss_range.replace(' ', '').split('-') - if len(lss_range) == 1: - begin = int(lss_range[0], 16) - end = begin - else: - begin = int(lss_range[0], 16) - end = int(lss_range[1], 16) - if begin > 0xFF or end > 0xFF or begin > end: - raise exception.InvalidParameterValue( - err=_('Param [lss_range_for_cg] is invalid, it ' - 'should be within 00-FF.')) - self.backend['lss_ids_for_cg'] = set( - ('%02x' % i).upper() for i in range(begin, end + 1)) - else: - self.backend['lss_ids_for_cg'] = set() - - def _check_host_type(self): - ds8k_host_type = self._get_value('ds8k_host_type') - if (ds8k_host_type and - (ds8k_host_type not in VALID_HOST_TYPES)): - msg = (_("Param [ds8k_host_type] must be one of: %(values)s.") - % {'values': VALID_HOST_TYPES[1:-1]}) - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - self.backend['host_type_override'] = ( - None if ds8k_host_type == 'auto' else ds8k_host_type) - - def _verify_version(self): - if self.backend['storage_version'] == self.INVALID_STORAGE_VERSION: - raise exception.VolumeDriverException( - message=(_("%s does not support bulk deletion of volumes, " - "if you want to use this version of driver, " - "please upgrade the CCL.") - % self.INVALID_STORAGE_VERSION)) - if ('5.7' in self.backend['rest_version'] and - dist_version.LooseVersion(self.backend['rest_version']) < - dist_version.LooseVersion(self.VALID_REST_VERSION_5_7_MIN)): - raise exception.VolumeDriverException( - message=(_("REST version %(invalid)s is lower than " - "%(valid)s, please upgrade it in DS8K.") - % {'invalid': self.backend['rest_version'], - 'valid': self.VALID_REST_VERSION_5_7_MIN})) - - def verify_pools(self, storage_pools): - if self._connection_type == storage.XIV_CONNECTION_TYPE_FC: - ptype = 'fb' - elif self._connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: - ptype = 'ckd' - else: - raise exception.InvalidParameterValue( - err=_('Param [connection_type] is invalid.')) - for pid, pool in storage_pools.items(): - if pool['stgtype'] != ptype: - LOG.error('The stgtype of pool %(pool)s is %(ptype)s.', - {'pool': pid, 'ptype': pool['stgtype']}) - raise exception.InvalidParameterValue( - err='Param [san_clustername] is invalid.') - - @proxy.logger - def get_pools(self, specific_pools=None): - if specific_pools: - pools_str = specific_pools.replace(' ', '').upper().split(',') - else: - pools_str = self.backend['pools_str'].replace( - ' ', '').upper().split(',') - pools = self._get_pools(pools_str) - unsorted_pools = self._format_pools(pools) - storage_pools = collections.OrderedDict(sorted( - unsorted_pools, key=lambda i: i[1]['capavail'], reverse=True)) - return storage_pools - - @proxy.logger - def update_storage_pools(self, storage_pools): - self._storage_pools = storage_pools - - def _format_pools(self, pools): - return ((p['id'], { - 'name': p['name'], - 'node': int(p['node']), - 'stgtype': p['stgtype'], - 'cap': int(p['cap']), - 'capavail': int(p['capavail']) - }) for p in pools) - - def verify_lss_ids(self, specified_lss_ids): - if not specified_lss_ids: - return None - lss_ids = specified_lss_ids.upper().replace(' ', '').split(',') - # verify LSS IDs. - for lss_id in lss_ids: - if int(lss_id, 16) > 255: - raise exception.InvalidParameterValue( - _('LSS %s should be within 00-FF.') % lss_id) - # verify address group - self._existing_lss = self.get_all_lss() - ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in - self._existing_lss if lss['type'] == 'ckd') - fb_addrgrps = set((int(lss, 16) // 16) for lss in lss_ids) - intersection = ckd_addrgrps & fb_addrgrps - if intersection: - raise exception.VolumeDriverException( - message=_('LSSes in the address group %s are reserved ' - 'for CKD volumes') % list(intersection)) - # verify whether LSSs specified have been reserved for - # consistency group or not. - if self.backend['lss_ids_for_cg']: - for lss_id in lss_ids: - if lss_id in self.backend['lss_ids_for_cg']: - raise exception.InvalidParameterValue( - _('LSS %s has been reserved for CG.') % lss_id) - return lss_ids - - @proxy.logger - def find_pool_lss_pair(self, pool, find_new_pid, excluded_lss): - if pool: - node = int(pool[1:], 16) % 2 - lss = self._find_lss(node, excluded_lss) - if lss: - return (pool, lss) - else: - if not find_new_pid: - raise restclient.LssIDExhaustError( - message=_('All LSS/LCU IDs for configured pools ' - 'on storage are exhausted.')) - # find new pool id and lss for lun - return self.find_biggest_pool_and_lss(excluded_lss) - - @proxy.logger - def find_biggest_pool_and_lss(self, excluded_lss, specified_pool_lss=None): - if specified_pool_lss: - # pool and lss should be verified every time user create volume or - # snapshot, because they can be changed in extra-sepcs at any time. - specified_pool_ids, specified_lss_ids = specified_pool_lss - storage_pools = self.get_pools(specified_pool_ids) - self.verify_pools(storage_pools) - storage_lss = self.verify_lss_ids(specified_lss_ids) - else: - storage_pools, storage_lss = self._storage_pools, None - # pools are ordered by capacity - for pool_id, pool in storage_pools.items(): - lss = self._find_lss(pool['node'], excluded_lss, storage_lss) - if lss: - return pool_id, lss - raise restclient.LssIDExhaustError( - message=_("All LSS/LCU IDs for configured pools are exhausted.")) - - @proxy.logger - def _find_lss(self, node, excluded_lss, specified_lss_ids=None): - if specified_lss_ids: - existing_lss = self._existing_lss - else: - existing_lss = self.get_all_lss() - LOG.info("Existing LSS IDs are: %s.", - ','.join([lss['id'] for lss in existing_lss])) - saved_existing_lss = copy.copy(existing_lss) - - # exclude LSSs that are full. - existing_lss = [lss for lss in existing_lss - if lss['id'] not in excluded_lss] - if not existing_lss: - LOG.info("All LSSs are full.") - return None - - # user specify LSSs in extra-specs. - if specified_lss_ids: - specified_lss_ids = [lss for lss in specified_lss_ids - if lss not in excluded_lss] - if specified_lss_ids: - existing_lss = [lss for lss in existing_lss - if lss['id'] in specified_lss_ids] - nonexistent_lss_ids = (set(specified_lss_ids) - - set(lss['id'] for lss in existing_lss)) - lss = None - for lss_id in nonexistent_lss_ids: - if int(lss_id, 16) % 2 == node: - lss = lss_id - break - if not lss: - lss = self._find_from_existing_lss( - node, existing_lss, True) - else: - LOG.info("All appropriate LSSs specified are full.") - return None - else: - # exclude LSSs that reserved for CG. - if self.backend['lss_ids_for_cg']: - existing_lss_cg, nonexistent_lss_cg = ( - self._classify_lss_for_cg(existing_lss)) - existing_lss = [lss for lss in existing_lss - if lss['id'] not in existing_lss_cg] - else: - existing_lss_cg = set() - nonexistent_lss_cg = set() - lss = self._find_from_existing_lss(node, existing_lss) - if not lss: - lss = self._find_from_nonexistent_lss(node, saved_existing_lss, - nonexistent_lss_cg) - return lss - - def _classify_lss_for_cg(self, existing_lss): - existing_lss_ids = set(lss['id'] for lss in existing_lss) - existing_lss_cg = existing_lss_ids & self.backend['lss_ids_for_cg'] - nonexistent_lss_cg = self.backend['lss_ids_for_cg'] - existing_lss_cg - return existing_lss_cg, nonexistent_lss_cg - - def _find_from_existing_lss(self, node, existing_lss, ignore_pprc=False): - if not ignore_pprc: - # exclude LSSs that are used by PPRC paths. - lss_in_pprc = self.get_lss_in_pprc_paths() - if lss_in_pprc: - existing_lss = [lss for lss in existing_lss - if lss['id'] not in lss_in_pprc] - # exclude wrong type of LSSs and those that are not in expected node. - existing_lss = [lss for lss in existing_lss if lss['type'] == 'fb' - and int(lss['group']) == node] - lss_id = None - if existing_lss: - # look for the emptiest lss from existing lss - lss = sorted(existing_lss, key=lambda k: int(k['configvols']))[0] - if int(lss['configvols']) < LSS_VOL_SLOTS: - lss_id = lss['id'] - LOG.info('_find_from_existing_lss: choose %(lss)s. ' - 'now it has %(num)s volumes.', - {'lss': lss_id, 'num': lss['configvols']}) - return lss_id - - def _find_from_nonexistent_lss(self, node, existing_lss, lss_cg=None): - ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in existing_lss if - lss['type'] == 'ckd' and int(lss['group']) == node) - full_lss = set(int(lss['id'], 16) for lss in existing_lss if - lss['type'] == 'fb' and int(lss['group']) == node) - cg_lss = set(int(lss, 16) for lss in lss_cg) if lss_cg else set() - # look for an available lss from nonexistent lss - lss_id = None - for lss in range(node, LSS_SLOTS, 2): - addrgrp = lss // 16 - if (addrgrp not in ckd_addrgrps and - lss not in full_lss and - lss not in cg_lss): - lss_id = ("%02x" % lss).upper() - break - LOG.info('_find_from_unexisting_lss: choose %s.', lss_id) - return lss_id - - def create_lun(self, lun): - volData = { - 'cap': self._gb2b(lun.size), - 'captype': 'bytes', - 'stgtype': 'fb', - 'tp': 'ese' if lun.type_thin else 'none' - } - lun.data_type = lun.data_type if lun.data_type else 'FB 512' - if lun.type_os400: - volData['os400'] = lun.type_os400 - volData['name'] = lun.ds_name - volData['pool'], volData['lss'] = lun.pool_lss_pair['source'] - lun.ds_id = self._create_lun(volData) - return lun - - def delete_lun(self, luns): - lun_ids = [] - luns = [luns] if not isinstance(luns, list) else luns - for lun in luns: - if lun.ds_id is None: - # create_lun must have failed and not returned the id - LOG.error("delete_lun: volume id is None.") - continue - if not self.lun_exists(lun.ds_id): - LOG.error("delete_lun: volume %s not found.", lun.ds_id) - continue - lun_ids.append(lun.ds_id) - - # Max 32 volumes could be deleted by specifying ids parameter - while lun_ids: - if len(lun_ids) > 32: - lun_ids_str = ','.join(lun_ids[0:32]) - del lun_ids[0:32] - else: - lun_ids_str = ','.join(lun_ids) - lun_ids = [] - LOG.info("Deleting volumes: %s.", lun_ids_str) - self._delete_lun(lun_ids_str) - - def get_lss_in_pprc_paths(self): - # TODO(Jiamin): when the REST API that get the licenses installed - # in DS8K is ready, this function should be improved. - try: - paths = self.get_pprc_paths() - except restclient.APIException: - paths = [] - LOG.exception("Can not get the LSS") - lss_ids = set(p['source_lss_id'] for p in paths) - LOG.info('LSS in PPRC paths are: %s.', ','.join(lss_ids)) - return lss_ids - - def wait_flashcopy_finished(self, src_luns, tgt_luns): - finished = False - try: - fc_state = [False] * len(tgt_luns) - while True: - eventlet.sleep(5) - for i in range(len(tgt_luns)): - if not fc_state[i]: - fcs = self.get_flashcopy(tgt_luns[i].ds_id) - if not fcs: - fc_state[i] = True - continue - if fcs[0]['state'] not in ('valid', - 'validation_required'): - raise restclient.APIException( - data=(_('Flashcopy ended up in bad state %s. ' - 'Rolling back.') % fcs[0]['state'])) - if fc_state.count(False) == 0: - break - finished = True - finally: - if not finished: - for src_lun, tgt_lun in zip(src_luns, tgt_luns): - self.delete_flashcopy(src_lun.ds_id, tgt_lun.ds_id) - return finished - - def wait_pprc_copy_finished(self, vol_ids, state, delete=True): - LOG.info("Wait for PPRC pair to enter into state %s", state) - vol_ids = sorted(vol_ids) - min_vol_id = min(vol_ids) - max_vol_id = max(vol_ids) - try: - finished = False - while True: - eventlet.sleep(2) - pairs = self.get_pprc_pairs(min_vol_id, max_vol_id) - pairs = [ - p for p in pairs if p['source_volume']['name'] in vol_ids] - finished_pairs = [p for p in pairs if p['state'] == state] - if len(finished_pairs) == len(pairs): - finished = True - break - - invalid_states = [ - 'target_suspended', - 'invalid', - 'volume_inaccessible' - ] - if state == 'full_duplex': - invalid_states.append('suspended') - elif state == 'suspended': - invalid_states.append('valid') - - unfinished_pairs = [p for p in pairs if p['state'] != state] - for p in unfinished_pairs: - if p['state'] in invalid_states: - raise restclient.APIException( - data=(_('Metro Mirror pair %(id)s enters into ' - 'state %(state)s. ') - % {'id': p['id'], 'state': p['state']})) - finally: - if not finished and delete: - pair_ids = {'ids': ','.join([p['id'] for p in pairs])} - self.delete_pprc_pair_by_pair_id(pair_ids) - - def _get_host(self, connector): - # DS8K doesn't support hostname which is longer than 32 chars. - hname = ('OShost:%s' % filter_alnum(connector['host']))[:32] - os_type = connector.get('os_type') - platform = connector.get('platform') - - if self.backend['host_type_override']: - htype = self.backend['host_type_override'] - elif os_type == 'OS400': - htype = 'iSeries' - elif os_type == 'AIX': - htype = 'pSeries' - elif platform in ('s390', 's390x') and os_type == 'linux2': - htype = 'zLinux' - else: - htype = 'LinuxRHEL' - return collections.namedtuple('Host', ('name', 'type'))(hname, htype) - - @coordination.synchronized('ibm-ds8k-{connector[host]}') - def initialize_connection(self, vol_id, connector, **kwargs): - host = self._get_host(connector) - # Find defined host and undefined host ports - host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) - host_ports = self._get_host_ports(host_wwpn_set) - LOG.debug("host_ports: %s", host_ports) - defined_hosts = set( - hp['host_id'] for hp in host_ports if hp['host_id']) - unknown_ports = host_wwpn_set - set( - hp['wwpn'] for hp in host_ports) - unconfigured_ports = set( - hp['wwpn'] for hp in host_ports if not hp['host_id']) - LOG.debug("initialize_connection: defined_hosts: %(defined)s, " - "unknown_ports: %(unknown)s, unconfigured_ports: " - "%(unconfigured)s.", {"defined": defined_hosts, - "unknown": unknown_ports, - "unconfigured": unconfigured_ports}) - # Create host if it is not defined - if not defined_hosts: - host_id = self._create_host(host)['id'] - elif len(defined_hosts) == 1: - host_id = defined_hosts.pop() - else: - raise restclient.APIException( - message='More than one host defined for requested ports.') - LOG.info('Volume will be attached to host %s.', host_id) - - # Create missing host ports - if unknown_ports or unconfigured_ports: - self._assign_host_port(host_id, - list(unknown_ports | unconfigured_ports)) - - # Map the volume to host - lun_id = self._map_volume_to_host(host_id, vol_id) - target_ports = [p['wwpn'] for p in self._get_ioports()] - return { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': False, - 'target_lun': int(lun_id, 16), - 'target_wwn': target_ports, - 'initiator_target_map': {initiator: target_ports for - initiator in host_wwpn_set} - } - } - - @coordination.synchronized('ibm-ds8k-{connector[host]}') - def terminate_connection(self, vol_id, connector, force, **kwargs): - host = self._get_host(connector) - host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) - host_ports = self._get_host_ports(host_wwpn_set) - defined_hosts = set( - hp['host_id'] for hp in host_ports if hp['host_id']) - delete_ports = set( - hp['wwpn'] for hp in host_ports if not hp['host_id']) - LOG.debug("terminate_connection: host_ports: %(host)s, " - "defined_hosts: %(defined)s, delete_ports: %(delete)s.", - {"host": host_ports, - "defined": defined_hosts, - "delete": delete_ports}) - - if not defined_hosts: - LOG.info('Could not find host.') - return None - elif len(defined_hosts) > 1: - raise restclient.APIException(_('More than one host found.')) - else: - host_id = defined_hosts.pop() - mappings = self._get_mappings(host_id) - lun_ids = [ - m['lunid'] for m in mappings if m['volume']['id'] == vol_id] - LOG.info('Volumes attached to host %(host)s are %(vols)s.', - {'host': host_id, 'vols': ','.join(lun_ids)}) - for lun_id in lun_ids: - self._delete_mappings(host_id, lun_id) - if not lun_ids: - LOG.warning("Volume %(vol)s is already not mapped to " - "host %(host)s.", - {'vol': vol_id, 'host': host.name}) - # if this host only has volumes that have been detached, - # remove the host and its ports - ret_info = { - 'driver_volume_type': 'fibre_channel', - 'data': {} - } - if len(mappings) == len(lun_ids): - for port in delete_ports: - self._delete_host_ports(port) - self._delete_host(host_id) - target_ports = [p['wwpn'] for p in self._get_ioports()] - target_map = {initiator.upper(): target_ports - for initiator in connector['wwpns']} - ret_info['data']['initiator_target_map'] = target_map - return ret_info - - def create_group(self, group): - return {'status': fields.GroupStatus.AVAILABLE} - - def delete_group(self, group, src_luns): - volumes_model_update = [] - model_update = {'status': fields.GroupStatus.DELETED} - if src_luns: - try: - self.delete_lun(src_luns) - except restclient.APIException as e: - model_update['status'] = fields.GroupStatus.ERROR_DELETING - LOG.exception( - "Failed to delete the volumes in group %(group)s, " - "Exception = %(ex)s", - {'group': group.id, 'ex': e}) - - for src_lun in src_luns: - volumes_model_update.append({ - 'id': src_lun.os_id, - 'status': model_update['status'] - }) - return model_update, volumes_model_update - - def delete_group_snapshot(self, group_snapshot, tgt_luns): - snapshots_model_update = [] - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - if tgt_luns: - try: - self.delete_lun(tgt_luns) - except restclient.APIException as e: - model_update['status'] = ( - fields.GroupSnapshotStatus.ERROR_DELETING) - LOG.error("Failed to delete snapshots in group snapshot " - "%(gsnapshot)s, Exception = %(ex)s", - {'gsnapshot': group_snapshot.id, 'ex': e}) - for tgt_lun in tgt_luns: - snapshots_model_update.append({ - 'id': tgt_lun.os_id, - 'status': model_update['status'] - }) - return model_update, snapshots_model_update - - def _delete_lun(self, lun_ids_str): - self._client.send('DELETE', '/volumes', - params={'ids': lun_ids_str}) - - def delete_lun_by_id(self, lun_id): - self._client.send('DELETE', '/volumes/%s' % lun_id) - - def _get_version(self): - return self._client.fetchone('GET', '') - - @proxy.logger - def _create_lun(self, volData): - return self._client.fetchid('POST', '/volumes', volData) - - def _get_pools(self, pools_str): - return [self._client.fetchone('GET', '/pools/%s' % pid, - fields=['id', 'name', 'node', 'stgtype', 'cap', 'capavail']) - for pid in pools_str] - - def start_flashcopy(self, vol_pairs, freeze=False): - options = [ - "permit_space_efficient_target", - "fail_space_efficient_target_out_of_space" - ] - if freeze: - options.append("freeze_consistency") - self._client.send('POST', '/cs/flashcopies', { - "volume_pairs": vol_pairs, - "options": options - }) - - def get_pprc_paths(self, specific_lss=None): - if specific_lss: - lss_range = { - 'source_lss_id_from': specific_lss, - 'source_lss_id_to': specific_lss - } - else: - # get all of PPRC paths between source DS8K and target DS8K. - lss_range = { - 'source_lss_id_from': '00', - 'source_lss_id_to': 'FF' - } - - return self._client.fetchall('GET', '/cs/pprcs/paths', - params=lss_range) - - def get_flashcopy(self, vol_id): - return self._client.fetchall('GET', '/volumes/%s/flashcopy' % vol_id) - - def delete_flashcopy(self, src_lun_id, tgt_lun_id): - # no exception if failed - self._client.statusok( - 'DELETE', '/cs/flashcopies/%s:%s' % (src_lun_id, tgt_lun_id)) - - def _get_host_ports(self, host_wwpn_set): - return self._client.fetchall( - 'GET', '/host_ports', - params={ - 'wwpns': ",".join(host_wwpn_set), - 'state': 'logged in,logged out' - }, - fields=['host_id', 'wwpn']) - - def _create_host(self, host): - return self._client.fetchone( - 'POST', '/hosts', {'name': host.name, 'hosttype': host.type}) - - def _assign_host_port(self, host_id, ports): - self._client.send('POST', '/host_ports/assign', { - 'host_id': host_id, 'host_port_wwpns': ports}) - - def _map_volume_to_host(self, host_id, vol_id): - return self._client.fetchid( - 'POST', '/hosts%5Bid=' + host_id + '%5D/mappings', - {'volumes': [vol_id]}) - - def _get_mappings(self, host_id): - return self._client.fetchall( - 'GET', '/hosts%5Bid=' + host_id + '%5D/mappings') - - def _delete_mappings(self, host_id, lun_id): - self._client.send( - 'DELETE', '/hosts%5Bid=' + host_id + '%5D/mappings/' + lun_id) - - def _delete_host_ports(self, port): - self._client.send('DELETE', '/host_ports/%s' % port) - - def _delete_host(self, host_id): - # delete the host will delete all of the ports belong to it - self._client.send('DELETE', '/hosts%5Bid=' + host_id + '%5D') - - def _get_ioports(self): - return self._client.fetchall('GET', '/ioports', fields=['wwpn']) - - def unfreeze_lss(self, lss_ids): - self._client.send( - 'POST', '/cs/flashcopies/unfreeze', {"lss_ids": lss_ids}) - - def get_all_lss(self, fields=None): - fields = (fields if fields else - ['id', 'type', 'group', 'configvols']) - return self._client.fetchall('GET', '/lss', fields=fields) - - def lun_exists(self, lun_id): - return self._client.statusok('GET', '/volumes/%s' % lun_id) - - def get_lun_pool(self, lun_id): - return self._client.fetchone( - 'GET', '/volumes/%s' % lun_id, fields=['pool'])['pool'] - - def change_lun(self, lun_id, param): - self._client.send('PUT', '/volumes/%s' % lun_id, param) - - def get_physical_links(self, target_id): - return self._client.fetchall( - 'GET', '/cs/pprcs/physical_links', - params={ - 'target_system_wwnn': target_id, - 'source_lss_id': 00, - 'target_lss_id': 00 - }) - - def get_systems(self): - return self._client.fetchone( - 'GET', '/systems', fields=['id', 'wwnn', 'release']) - - def get_lun_number_in_lss(self, lss_id): - return int(self._client.fetchone( - 'GET', '/lss/%s' % lss_id, - fields=['configvols'])['configvols']) - - def create_pprc_path(self, pathData): - self._client.send('POST', '/cs/pprcs/paths', pathData) - - def get_pprc_path(self, path_id): - return self._client.fetchone( - 'GET', '/cs/pprcs/paths/%s' % path_id, - fields=['port_pairs']) - - def delete_pprc_path(self, path_id): - self._client.send('DELETE', '/cs/pprcs/paths/%s' % path_id) - - def create_pprc_pair(self, pairData): - self._client.send('POST', '/cs/pprcs', pairData) - - def delete_pprc_pair_by_pair_id(self, pids): - self._client.statusok('DELETE', '/cs/pprcs', params=pids) - - def do_failback(self, pairData): - self._client.send('POST', '/cs/pprcs/resume', pairData) - - def get_pprc_pairs(self, min_vol_id, max_vol_id): - return self._client.fetchall( - 'GET', '/cs/pprcs', - params={ - 'volume_id_from': min_vol_id, - 'volume_id_to': max_vol_id - }) - - def delete_pprc_pair(self, vol_id): - # check pprc pairs exist or not. - if not self.get_pprc_pairs(vol_id, vol_id): - return None - # don't use pprc pair ID to delete it, because it may have - # communication issues. - pairData = { - 'volume_full_ids': [{ - 'volume_id': vol_id, - 'system_id': self.backend['storage_unit'] - }], - 'options': ['unconditional', 'issue_source'] - } - self._client.send('POST', '/cs/pprcs/delete', pairData) - - -class DS8KReplicationSourceHelper(DS8KCommonHelper): - """Manage source storage for replication.""" - - @proxy.logger - def find_pool_and_lss(self, excluded_lss=None): - for pool_id, pool in self._storage_pools.items(): - lss = self._find_lss_for_type_replication(pool['node'], - excluded_lss) - if lss: - return pool_id, lss - raise restclient.LssIDExhaustError( - message=_("All LSS/LCU IDs for configured pools are exhausted.")) - - @proxy.logger - def _find_lss_for_type_replication(self, node, excluded_lss): - # prefer to choose non-existing one first. - existing_lss = self.get_all_lss() - LOG.info("existing LSS IDs are %s", - ','.join([lss['id'] for lss in existing_lss])) - existing_lss_cg, nonexistent_lss_cg = ( - self._classify_lss_for_cg(existing_lss)) - lss_id = self._find_from_nonexistent_lss(node, existing_lss, - nonexistent_lss_cg) - if not lss_id: - if excluded_lss: - existing_lss = [lss for lss in existing_lss - if lss['id'] not in excluded_lss] - candidates = [lss for lss in existing_lss - if lss['id'] not in existing_lss_cg] - lss_id = self._find_from_existing_lss(node, candidates) - return lss_id - - -class DS8KReplicationTargetHelper(DS8KReplicationSourceHelper): - """Manage target storage for replication.""" - - OPTIONAL_PARAMS = ['ds8k_host_type', 'port_pairs'] - - def setup(self): - self._create_client() - self._get_storage_information() - self._get_replication_information() - self._check_host_type() - self.backend['pools_str'] = self._get_value( - 'san_clustername').replace('_', ',') - self._storage_pools = self.get_pools() - self.verify_pools(self._storage_pools) - self._verify_version() - - def _get_replication_information(self): - port_pairs = [] - pairs = self._get_value('port_pairs') - if pairs: - for pair in pairs.replace(' ', '').upper().split(';'): - pair = pair.split('-') - port_pair = { - 'source_port_id': pair[0], - 'target_port_id': pair[1] - } - port_pairs.append(port_pair) - self.backend['port_pairs'] = port_pairs - self.backend['id'] = self._get_value('backend_id') - - @proxy.logger - def _find_lss_for_type_replication(self, node, excluded_lss): - # prefer to choose non-existing one first. - existing_lss = self.get_all_lss() - LOG.info("existing LSS IDs are %s", - ','.join([lss['id'] for lss in existing_lss])) - lss_id = self._find_from_nonexistent_lss(node, existing_lss) - if not lss_id: - if excluded_lss: - existing_lss = [lss for lss in existing_lss - if lss['id'] not in excluded_lss] - lss_id = self._find_from_existing_lss(node, existing_lss) - return lss_id - - def create_lun(self, lun): - volData = { - 'cap': self._gb2b(lun.size), - 'captype': 'bytes', - 'stgtype': 'fb', - 'tp': 'ese' if lun.type_thin else 'none' - } - lun.data_type = lun.data_type if lun.data_type else 'FB 512' - if lun.type_os400: - volData['os400'] = lun.type_os400 - - volData['name'] = lun.replica_ds_name - volData['pool'], volData['lss'] = lun.pool_lss_pair['target'] - volID = self._create_lun(volData) - lun.replication_driver_data.update( - {self.backend['id']: {'vol_hex_id': volID}}) - return lun - - def delete_pprc_pair(self, vol_id): - if not self.get_pprc_pairs(vol_id, vol_id): - return None - pairData = { - 'volume_full_ids': [{ - 'volume_id': vol_id, - 'system_id': self.backend['storage_unit'] - }], - 'options': ['unconditional', 'issue_target'] - } - self._client.send('POST', '/cs/pprcs/delete', pairData) - - -class DS8KECKDHelper(DS8KCommonHelper): - """Manage ECKD volume.""" - - OPTIONAL_PARAMS = ['ds8k_host_type', 'port_pairs', 'ds8k_ssid_prefix', - 'lss_range_for_cg'] - # if use new REST API, please update the version below - VALID_REST_VERSION_5_7_MIN = '5.7.51.1068' - VALID_REST_VERSION_5_8_MIN = '5.8.20.1059' - MIN_VALID_STORAGE_VERSION = '8.1' - INVALID_STORAGE_VERSION = '8.0.1' - - @staticmethod - def _gb2cyl(gb): - # now only support 3390, no 3380 or 3390-A - cyl = int(math.ceil(gb * 1263.28)) - if cyl > 65520: - raise exception.VolumeDriverException( - message=(_("For 3390 volume, capacity can be in the range " - "1-65520(849KiB to 55.68GiB) cylinders, now it " - "is %(gb)d GiB, equals to %(cyl)d cylinders.") - % {'gb': gb, 'cyl': cyl})) - return cyl - - @staticmethod - def _cyl2b(cyl): - return cyl * 849960 - - def _get_cula(self, lcu): - return self.backend['device_mapping'][lcu] - - def disable_thin_provision(self): - self._disable_thin_provision = True - - def setup(self): - self._create_client() - self._get_storage_information() - self._check_host_type() - self._get_lss_ids_for_cg() - self.backend['pools_str'] = self._get_value('san_clustername') - self._storage_pools = self.get_pools() - self.verify_pools(self._storage_pools) - ssid_prefix = self._get_value('ds8k_ssid_prefix') - self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF' - self.backend['device_mapping'] = self._get_device_mapping() - self._verify_version() - - def _verify_version(self): - if self.backend['storage_version'] == self.INVALID_STORAGE_VERSION: - raise exception.VolumeDriverException( - message=(_("%s does not support bulk deletion of volumes, " - "if you want to use this version of driver, " - "please upgrade the CCL.") - % self.INVALID_STORAGE_VERSION)) - # DS8K supports ECKD ESE volume from 8.1 - if (dist_version.LooseVersion(self.backend['storage_version']) < - dist_version.LooseVersion(self.MIN_VALID_STORAGE_VERSION)): - self._disable_thin_provision = True - - if (('5.7' in self.backend['rest_version'] and - dist_version.LooseVersion(self.backend['rest_version']) < - dist_version.LooseVersion(self.VALID_REST_VERSION_5_7_MIN)) or - ('5.8' in self.backend['rest_version'] and - dist_version.LooseVersion(self.backend['rest_version']) < - dist_version.LooseVersion(self.VALID_REST_VERSION_5_8_MIN))): - raise exception.VolumeDriverException( - message=(_("REST version %(invalid)s is lower than " - "%(valid)s, please upgrade it in DS8K.") - % {'invalid': self.backend['rest_version'], - 'valid': (self.VALID_REST_VERSION_5_7_MIN if '5.7' - in self.backend['rest_version'] else - self.VALID_REST_VERSION_5_8_MIN)})) - - def _get_device_mapping(self): - map_str = self._get_value('ds8k_devadd_unitadd_mapping') - mappings = map_str.replace(' ', '').upper().split(';') - pairs = [m.split('-') for m in mappings] - self.verify_lss_ids(','.join([p[1] for p in pairs])) - return {p[1]: int(p[0], 16) for p in pairs} - - @proxy.logger - def verify_lss_ids(self, specified_lcu_ids): - if not specified_lcu_ids: - return None - lcu_ids = specified_lcu_ids.upper().replace(' ', '').split(',') - # verify the LCU ID. - for lcu in lcu_ids: - if int(lcu, 16) > 255: - raise exception.InvalidParameterValue( - err=_('LCU %s should be within 00-FF.') % lcu) - - # verify address group - self._existing_lss = self.get_all_lss() - fb_addrgrps = set(int(lss['id'], 16) // 16 for lss in - self._existing_lss if lss['type'] == 'fb') - ckd_addrgrps = set((int(lcu, 16) // 16) for lcu in lcu_ids) - intersection = ckd_addrgrps & fb_addrgrps - if intersection: - raise exception.VolumeDriverException( - message=_('LCUs in the address group %s are reserved ' - 'for FB volumes') % list(intersection)) - - # create LCU that doesn't exist - nonexistent_lcu = set(lcu_ids) - set( - lss['id'] for lss in self._existing_lss if lss['type'] == 'ckd') - if nonexistent_lcu: - LOG.info('LCUs %s do not exist in DS8K, they will be ' - 'created.', ','.join(nonexistent_lcu)) - for lcu in nonexistent_lcu: - try: - self._create_lcu(self.backend['ssid_prefix'], lcu) - except restclient.APIException as e: - raise exception.VolumeDriverException( - message=(_('Can not create lcu %(lcu)s, ' - 'Exception = %(e)s.') - % {'lcu': lcu, 'e': six.text_type(e)})) - return lcu_ids - - def _format_pools(self, pools): - return ((p['id'], { - 'name': p['name'], - 'node': int(p['node']), - 'stgtype': p['stgtype'], - 'cap': self._cyl2b(int(p['cap'])), - 'capavail': self._cyl2b(int(p['capavail'])) - }) for p in pools) - - @proxy.logger - def find_pool_and_lss(self, excluded_lss=None): - return self.find_biggest_pool_and_lss(excluded_lss) - - @proxy.logger - def _find_lss(self, node, excluded_lcu, specified_lcu_ids=None): - # all LCUs have existed, unlike LSS. - if specified_lcu_ids: - for lcu_id in specified_lcu_ids: - if lcu_id not in self.backend['device_mapping'].keys(): - raise exception.InvalidParameterValue( - err=_("LCU %s is not in parameter " - "ds8k_devadd_unitadd_mapping, " - "Please specify LCU in it, otherwise " - "driver can not attach volume.") % lcu_id) - all_lss = self._existing_lss - else: - all_lss = self.get_all_lss() - existing_lcu = [lcu for lcu in all_lss if - lcu['type'] == 'ckd' and - lcu['id'] in self.backend['device_mapping'].keys() and - lcu['group'] == six.text_type(node)] - LOG.info("All appropriate LCUs are %s.", - ','.join([lcu['id'] for lcu in existing_lcu])) - - # exclude full LCUs. - if excluded_lcu: - existing_lcu = [lcu for lcu in existing_lcu if - lcu['id'] not in excluded_lcu] - if not existing_lcu: - LOG.info("All appropriate LCUs are full.") - return None - - ignore_pprc = False - if specified_lcu_ids: - # user specify LCUs in extra-specs. - existing_lcu = [lcu for lcu in existing_lcu - if lcu['id'] in specified_lcu_ids] - ignore_pprc = True - - # exclude LCUs reserved for CG. - existing_lcu = [lcu for lcu in existing_lcu if lcu['id'] - not in self.backend['lss_ids_for_cg']] - if not existing_lcu: - LOG.info("All appropriate LCUs have been reserved for " - "for consistency group.") - return None - - if not ignore_pprc: - # prefer to use LCU that is not in PPRC path first. - lcu_pprc = self.get_lss_in_pprc_paths() & set( - self.backend['device_mapping'].keys()) - if lcu_pprc: - lcu_non_pprc = [ - lcu for lcu in existing_lcu if lcu['id'] not in lcu_pprc] - if lcu_non_pprc: - existing_lcu = lcu_non_pprc - - # return LCU which has max number of empty slots. - emptiest_lcu = sorted( - existing_lcu, key=lambda i: int(i['configvols']))[0] - if int(emptiest_lcu['configvols']) == LSS_VOL_SLOTS: - return None - else: - return emptiest_lcu['id'] - - def _create_lcu(self, ssid_prefix, lcu): - self._client.send('POST', '/lss', { - 'id': lcu, - 'type': 'ckd', - 'sub_system_identifier': ssid_prefix + lcu - }) - - def create_lun(self, lun): - volData = { - 'cap': self._gb2cyl(lun.size), - 'captype': 'cyl', - 'stgtype': 'ckd', - 'tp': 'ese' if lun.type_thin else 'none' - } - lun.data_type = '3390' - volData['name'] = lun.ds_name - volData['pool'], volData['lss'] = lun.pool_lss_pair['source'] - lun.ds_id = self._create_lun(volData) - return lun - - def initialize_connection(self, vol_id, connector, **kwargs): - return { - 'driver_volume_type': 'fibre_channel_eckd', - 'data': { - 'target_discovered': True, - 'cula': self._get_cula(vol_id[0:2]), - 'unit_address': int(vol_id[2:4], 16), - 'discard': False - } - } - - def terminate_connection(self, vol_id, connector, force, **kwargs): - return None - - -class DS8KReplicationTargetECKDHelper(DS8KECKDHelper, - DS8KReplicationTargetHelper): - """Manage ECKD volume in replication target.""" - - def setup(self): - self._create_client() - self._get_storage_information() - self._get_replication_information() - self._check_host_type() - self.backend['pools_str'] = self._get_value( - 'san_clustername').replace('_', ',') - self._storage_pools = self.get_pools() - self.verify_pools(self._storage_pools) - ssid_prefix = self._get_value('ds8k_ssid_prefix') - self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF' - self.backend['device_mapping'] = self._get_device_mapping() - self._verify_version() - - def create_lun(self, lun): - volData = { - 'cap': self._gb2cyl(lun.size), - 'captype': 'cyl', - 'stgtype': 'ckd', - 'tp': 'ese' if lun.type_thin else 'none' - } - lun.data_type = '3390' - - volData['name'] = lun.replica_ds_name - volData['pool'], volData['lss'] = lun.pool_lss_pair['target'] - volID = self._create_lun(volData) - lun.replication_driver_data.update( - {self.backend['id']: {'vol_hex_id': volID}}) - return lun diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py deleted file mode 100644 index c580e4b08..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py +++ /dev/null @@ -1,1235 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -This is the driver that allows openstack to talk to DS8K. - -All volumes are thin provisioned by default, if the machine is licensed for it. -This can be overridden by creating a volume type and specifying a key like so: - -.. code:: console - - #> cinder type-create my_type - #> cinder type-key my_type set drivers:thin_provision=False - #> cinder create --volume-type my_type 123 - - -Sample settings for cinder.conf: - -.. code:: ini - - enabled_backends = ibm_ds8k_1, ibm_ds8k_2 - [ibm_ds8k_1] - proxy = cinder.volume.drivers.ibm.ibm_storage.ds8k_proxy.DS8KProxy - volume_backend_name = ibm_ds8k_1 - san_clustername = P2,P3 - san_password = actual_password - san_login = actual_username - san_ip = foo.com - volume_driver = - cinder.volume.drivers.ibm.ibm_storage.ibm_storage.IBMStorageDriver - chap = disabled - connection_type = fibre_channel - replication_device = connection_type: fibre_channel, backend_id: bar, - san_ip: bar.com, san_login: actual_username, - san_password: actual_password, san_clustername: P4, - port_pairs: I0236-I0306; I0237-I0307 - - [ibm_ds8k_2] - proxy = cinder.volume.drivers.ibm.ibm_storage.ds8k_proxy.DS8KProxy - volume_backend_name = ibm_ds8k_2 - san_clustername = P4,P5 - san_password = actual_password - san_login = actual_username - san_ip = bar.com - volume_driver = - cinder.volume.drivers.ibm.ibm_storage.ibm_storage.IBMStorageDriver - chap = disabled - connection_type = fibre_channel - -""" -import ast -import collections -import json -import six - -from oslo_config import cfg -from oslo_log import log as logging - -from cinder import context -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder.volume import configuration -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import ( - ds8k_replication as replication) -from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper -from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient -from cinder.volume.drivers.ibm.ibm_storage import proxy -from cinder.volume.drivers.ibm.ibm_storage import strings -from cinder.volume import utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -VALID_OS400_VOLUME_TYPES = { - 'A01': 8, 'A02': 17, 'A04': 66, - 'A05': 33, 'A06': 132, 'A07': 263, - 'A81': 8, 'A82': 17, 'A84': 66, - 'A85': 33, 'A86': 132, 'A87': 263, - '050': '', '099': '' -} - -EXTRA_SPECS_DEFAULTS = { - 'thin': True, - 'replication_enabled': False, - 'consistency': False, - 'os400': '', - 'storage_pool_ids': '', - 'storage_lss_ids': '' -} - -ds8k_opts = [ - cfg.StrOpt( - 'ds8k_devadd_unitadd_mapping', - default='', - help='Mapping between IODevice address and unit address.'), - cfg.StrOpt( - 'ds8k_ssid_prefix', - default='FF', - help='Set the first two digits of SSID.'), - cfg.StrOpt( - 'lss_range_for_cg', - default='', - help='Reserve LSSs for consistency group.'), - cfg.StrOpt( - 'ds8k_host_type', - default='auto', - help='Set to zLinux if your OpenStack version is prior to ' - 'Liberty and you\'re connecting to zLinux systems. ' - 'Otherwise set to auto. Valid values for this parameter ' - 'are: %s.' % six.text_type(helper.VALID_HOST_TYPES)[1:-1]) -] - -CONF = cfg.CONF -CONF.register_opts(ds8k_opts, group=configuration.SHARED_CONF_GROUP) - - -class Lun(object): - """provide volume information for driver from volume db object. - - Version history: - - .. code-block:: none - - 1.0.0 - initial revision. - 2.1.0 - Added support for specify pool and lss, also improve the code. - """ - - VERSION = "2.1.0" - - class FakeLun(object): - - def __init__(self, lun, **overrides): - self.size = lun.size - self.os_id = lun.os_id - self.cinder_name = lun.cinder_name - self.is_snapshot = lun.is_snapshot - self.ds_name = lun.ds_name - self.ds_id = lun.ds_id - self.type_thin = lun.type_thin - self.type_os400 = lun.type_os400 - self.data_type = lun.data_type - self.type_replication = lun.type_replication - self.group = lun.group - self.specified_pool = lun.specified_pool - self.specified_lss = lun.specified_lss - if not self.is_snapshot: - self.replica_ds_name = lun.replica_ds_name - self.replication_driver_data = ( - lun.replication_driver_data.copy()) - self.replication_status = lun.replication_status - self.pool_lss_pair = lun.pool_lss_pair - - def update_volume(self, lun): - lun.data_type = self.data_type - volume_update = lun.get_volume_update() - volume_update['provider_location'] = six.text_type({ - 'vol_hex_id': self.ds_id}) - if self.type_replication: - volume_update['replication_driver_data'] = json.dumps( - self.replication_driver_data) - volume_update['metadata']['replication'] = six.text_type( - self.replication_driver_data) - else: - volume_update.pop('replication_driver_data', None) - volume_update['metadata'].pop('replication', None) - volume_update['metadata']['vol_hex_id'] = self.ds_id - return volume_update - - def __init__(self, volume, is_snapshot=False): - volume_type_id = volume.get('volume_type_id') - self.specs = volume_types.get_volume_type_extra_specs( - volume_type_id) if volume_type_id else {} - os400 = self.specs.get( - 'drivers:os400', EXTRA_SPECS_DEFAULTS['os400'] - ).strip().upper() - self.type_thin = self.specs.get( - 'drivers:thin_provision', '%s' % EXTRA_SPECS_DEFAULTS['thin'] - ).upper() == 'TRUE' - self.type_replication = self.specs.get( - 'replication_enabled', - ' %s' % EXTRA_SPECS_DEFAULTS['replication_enabled'] - ).upper() == strings.METADATA_IS_TRUE - self.specified_pool = self.specs.get( - 'drivers:storage_pool_ids', - EXTRA_SPECS_DEFAULTS['storage_pool_ids'] - ) - self.specified_lss = self.specs.get( - 'drivers:storage_lss_ids', - EXTRA_SPECS_DEFAULTS['storage_lss_ids'] - ) - - if volume.provider_location: - provider_location = ast.literal_eval(volume.provider_location) - self.ds_id = provider_location['vol_hex_id'] - else: - self.ds_id = None - self.cinder_name = volume.display_name - self.pool_lss_pair = {} - self.is_snapshot = is_snapshot - if self.is_snapshot: - self.group = (Group(volume.group_snapshot, True) - if volume.group_snapshot else None) - self.size = volume.volume_size - # ds8k supports at most 16 chars - self.ds_name = ( - "OS%s:%s" % ('snap', helper.filter_alnum(self.cinder_name)) - )[:16] - else: - self.group = Group(volume.group) if volume.group else None - self.size = volume.size - self.ds_name = ( - "OS%s:%s" % ('vol', helper.filter_alnum(self.cinder_name)) - )[:16] - self.replica_ds_name = ( - "OS%s:%s" % ('Replica', helper.filter_alnum(self.cinder_name)) - )[:16] - self.replication_status = volume.replication_status - self.replication_driver_data = ( - json.loads(volume.replication_driver_data) - if volume.replication_driver_data else {}) - if self.replication_driver_data: - # now only support one replication target. - replication_target = sorted( - self.replication_driver_data.values())[0] - replica_id = replication_target['vol_hex_id'] - self.pool_lss_pair = { - 'source': (None, self.ds_id[0:2]), - 'target': (None, replica_id[0:2]) - } - - if os400: - if os400 not in VALID_OS400_VOLUME_TYPES.keys(): - raise restclient.APIException( - data=(_("The OS400 volume type provided, %s, is not " - "a valid volume type.") % os400)) - self.type_os400 = os400 - if os400 not in ['050', '099']: - self.size = VALID_OS400_VOLUME_TYPES[os400] - else: - self.type_os400 = EXTRA_SPECS_DEFAULTS['os400'] - - self.data_type = self._create_datatype(self.type_os400) - self.os_id = volume.id - self.status = volume.status - self.volume = volume - - def _get_volume_metadata(self, volume): - if 'volume_metadata' in volume: - metadata = volume.volume_metadata - return {m['key']: m['value'] for m in metadata} - if 'metadata' in volume: - return volume.metadata - - return {} - - def _get_snapshot_metadata(self, snapshot): - if 'snapshot_metadata' in snapshot: - metadata = snapshot.snapshot_metadata - return {m['key']: m['value'] for m in metadata} - if 'metadata' in snapshot: - return snapshot.metadata - - return {} - - def shallow_copy(self, **overrides): - return Lun.FakeLun(self, **overrides) - - def _create_datatype(self, t): - if t[0:2] == 'A0': - datatype = t + ' FB 520P' - elif t[0:2] == 'A8': - datatype = t + ' FB 520U' - elif t == '050': - datatype = t + ' FB 520UV' - elif t == '099': - datatype = t + ' FB 520PV' - else: - datatype = None - return datatype - - # Note: updating metadata in vol related funcs deletes all prior metadata - def get_volume_update(self): - volume_update = {} - volume_update['provider_location'] = six.text_type( - {'vol_hex_id': self.ds_id}) - - # update metadata - if self.is_snapshot: - metadata = self._get_snapshot_metadata(self.volume) - else: - metadata = self._get_volume_metadata(self.volume) - if self.type_replication: - metadata['replication'] = six.text_type( - self.replication_driver_data) - else: - metadata.pop('replication', None) - volume_update['replication_driver_data'] = json.dumps( - self.replication_driver_data) - volume_update['replication_status'] = self.replication_status - - metadata['data_type'] = (self.data_type if self.data_type else - metadata['data_type']) - metadata['vol_hex_id'] = self.ds_id - volume_update['metadata'] = metadata - - # need to update volume size for OS400 - if self.type_os400: - volume_update['size'] = self.size - - return volume_update - - -class Group(object): - """provide group information for driver from group db object.""" - - def __init__(self, group, is_snapshot=False): - self.id = group.id - self.host = group.host - if is_snapshot: - self.snapshots = group.snapshots - else: - self.volumes = group.volumes - self.consisgroup_enabled = utils.is_group_a_cg_snapshot_type(group) - - -class DS8KProxy(proxy.IBMStorageProxy): - prefix = "[IBM DS8K STORAGE]:" - - def __init__(self, storage_info, logger, exception, driver, - active_backend_id=None, HTTPConnectorObject=None): - proxy.IBMStorageProxy.__init__( - self, storage_info, logger, exception, driver, active_backend_id) - self._helper = None - self._replication = None - self._connector_obj = HTTPConnectorObject - self._replication_enabled = False - self._active_backend_id = active_backend_id - self.configuration = driver.configuration - self.configuration.append_config_values(ds8k_opts) - # TODO(jiamin): this cache is used to handle concurrency issue, but it - # hurts HA, we will find whether is it possible to store it in storage. - self.consisgroup_cache = {} - - @proxy._trace_time - def setup(self, ctxt): - LOG.info("Initiating connection to IBM DS8K storage system.") - connection_type = self.configuration.safe_get('connection_type') - replication_devices = self.configuration.safe_get('replication_device') - if connection_type == storage.XIV_CONNECTION_TYPE_FC: - if not replication_devices: - self._helper = helper.DS8KCommonHelper(self.configuration, - self._connector_obj) - else: - self._helper = ( - helper.DS8KReplicationSourceHelper(self.configuration, - self._connector_obj)) - elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: - self._helper = helper.DS8KECKDHelper(self.configuration, - self._connector_obj) - else: - raise exception.InvalidParameterValue( - err=(_("Param [connection_type] %s is invalid.") - % connection_type)) - - if replication_devices: - self._do_replication_setup(replication_devices, self._helper) - - @proxy.logger - def _do_replication_setup(self, devices, src_helper): - if len(devices) >= 2: - raise exception.InvalidParameterValue( - err=_("Param [replication_device] is invalid, Driver " - "support only one replication target.")) - - self._replication = replication.Replication(src_helper, devices[0]) - self._replication.check_physical_links() - self._replication.check_connection_type() - if self._active_backend_id: - self._switch_backend_connection(self._active_backend_id) - self._replication_enabled = True - - @proxy.logger - def _switch_backend_connection(self, backend_id, repl_luns=None): - repl_luns = self._replication.switch_source_and_target(backend_id, - repl_luns) - self._helper = self._replication._source_helper - return repl_luns - - @staticmethod - def _b2gb(b): - return b // (2 ** 30) - - @proxy._trace_time - def _update_stats(self): - if self._helper: - storage_pools = self._helper.get_pools() - if not len(storage_pools): - msg = _('No pools found - make sure san_clustername ' - 'is defined in the config file and that the ' - 'pools exist on the storage.') - LOG.error(msg) - raise exception.CinderException(message=msg) - self._helper.update_storage_pools(storage_pools) - else: - raise exception.VolumeDriverException( - message=(_('Backend %s is not initialized.') - % self.configuration.volume_backend_name)) - - stats = { - "volume_backend_name": self.configuration.volume_backend_name, - "serial_number": self._helper.backend['storage_unit'], - "extent_pools": self._helper.backend['pools_str'], - "vendor_name": 'IBM', - "driver_version": self.full_version, - "storage_protocol": self._helper.get_connection_type(), - "total_capacity_gb": self._b2gb( - sum(p['cap'] for p in storage_pools.values())), - "free_capacity_gb": self._b2gb( - sum(p['capavail'] for p in storage_pools.values())), - "reserved_percentage": self.configuration.reserved_percentage, - "consistent_group_snapshot_enabled": True, - "multiattach": False - } - - if self._replication_enabled: - stats['replication_enabled'] = self._replication_enabled - - self.meta['stat'] = stats - - def _assert(self, assert_condition, exception_message=''): - if not assert_condition: - LOG.error(exception_message) - raise exception.VolumeDriverException(message=exception_message) - - @proxy.logger - def _create_lun_helper(self, lun, pool=None, find_new_pid=True): - connection_type = self._helper.get_connection_type() - if connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: - if lun.type_thin: - if self._helper.get_thin_provision(): - msg = (_("Backend %s can not support ECKD ESE volume.") - % self._helper.backend['storage_unit']) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - if lun.type_replication: - target_helper = self._replication._target_helper - # PPRC can not copy from ESE volume to standard volume - # or vice versa. - if target_helper.get_thin_provision(): - msg = (_("Secondary storage %s can not support ECKD " - "ESE volume.") - % target_helper.backend['storage_unit']) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - # There is a time gap between find available LSS slot and - # lun actually occupies it. - excluded_lss = set() - while True: - try: - if lun.specified_pool or lun.specified_lss: - lun.pool_lss_pair = { - 'source': self._find_pool_lss_pair_from_spec( - lun, excluded_lss)} - elif lun.group and lun.group.consisgroup_enabled: - lun.pool_lss_pair = { - 'source': self._find_pool_lss_pair_for_cg( - lun, excluded_lss)} - else: - if lun.type_replication and not lun.is_snapshot: - lun.pool_lss_pair = ( - self._replication.find_pool_lss_pair( - excluded_lss)) - else: - lun.pool_lss_pair = { - 'source': self._helper.find_pool_lss_pair( - pool, find_new_pid, excluded_lss)} - return self._helper.create_lun(lun) - except restclient.LssFullException: - LOG.warning("LSS %s is full, find another one.", - lun.pool_lss_pair['source'][1]) - excluded_lss.add(lun.pool_lss_pair['source'][1]) - - def _find_pool_lss_pair_from_spec(self, lun, excluded_lss): - if lun.group and lun.group.consisgroup_enabled: - msg = _("No support for specifying pool or lss for " - "volumes that belong to consistency group.") - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - pool, lss = self._helper.find_biggest_pool_and_lss( - excluded_lss, (lun.specified_pool, lun.specified_lss)) - return (pool, lss) - - @coordination.synchronized('{self.prefix}-consistency-group') - def _find_pool_lss_pair_for_cg(self, lun, excluded_lss): - lss_in_cache = self.consisgroup_cache.get(lun.group.id, set()) - if not lss_in_cache: - lss_in_cg = self._get_lss_in_cg(lun.group, lun.is_snapshot) - LOG.debug("LSSs used by CG %(cg)s are %(lss)s.", - {'cg': lun.group.id, 'lss': ','.join(lss_in_cg)}) - available_lss = lss_in_cg - excluded_lss - else: - available_lss = lss_in_cache - excluded_lss - if not available_lss: - available_lss = self._find_lss_for_cg() - - pid, lss = self._find_pool_for_lss(available_lss) - if pid: - lss_in_cache.add(lss) - self.consisgroup_cache[lun.group.id] = lss_in_cache - else: - raise exception.VolumeDriverException( - message=_('There are still some available LSSs for CG, ' - 'but they are not in the same node as pool.')) - return (pid, lss) - - def _get_lss_in_cg(self, group, is_snapshot=False): - # Driver can not support the case that dedicating LSS for CG while - # user enable multiple backends which use the same DS8K. - try: - volume_backend_name = ( - group.host[group.host.index('@') + 1:group.host.index('#')]) - except ValueError: - raise exception.VolumeDriverException( - message=(_('Invalid host %(host)s in group %(group)s') - % {'host': group.host, 'group': group.id})) - lss_in_cg = set() - if volume_backend_name == self.configuration.volume_backend_name: - if is_snapshot: - luns = [Lun(snapshot, is_snapshot=True) - for snapshot in group.snapshots] - else: - luns = [Lun(volume) for volume in group.volumes] - lss_in_cg = set(lun.ds_id[:2] for lun in luns if lun.ds_id) - return lss_in_cg - - def _find_lss_for_cg(self): - # Unable to get CGs/groups belonging to the current tenant, so - # get all of them, this function will consume some time if there - # are so many CGs/groups. - lss_used = set() - ctxt = context.get_admin_context() - existing_groups = objects.GroupList.get_all( - ctxt, filters={'status': 'available'}) - for group in existing_groups: - if Group(group).consisgroup_enabled: - lss_used = lss_used | self._get_lss_in_cg(group) - existing_groupsnapshots = objects.GroupSnapshotList.get_all( - ctxt, filters={'status': 'available'}) - for group in existing_groupsnapshots: - if Group(group, True).consisgroup_enabled: - lss_used = lss_used | self._get_lss_in_cg(group, True) - available_lss = set(self._helper.backend['lss_ids_for_cg']) - lss_used - for lss_set in self.consisgroup_cache.values(): - available_lss -= lss_set - self._assert(available_lss, - "All LSSs reserved for CG have been used out, " - "please reserve more LSS for CG if there are still" - "some empty LSSs left.") - LOG.debug('_find_lss_for_cg: available LSSs for consistency ' - 'group are %s', ','.join(available_lss)) - return available_lss - - @proxy.logger - def _find_pool_for_lss(self, available_lss): - for lss in available_lss: - pid = self._helper.get_pool(lss) - if pid: - return (pid, lss) - raise exception.VolumeDriverException( - message=(_("Can not find pool for LSSs %s.") - % ','.join(available_lss))) - - @proxy.logger - def _clone_lun(self, src_lun, tgt_lun): - self._assert(src_lun.size <= tgt_lun.size, - _('Target volume should be bigger or equal ' - 'to the Source volume in size.')) - self._ensure_vol_not_fc_target(src_lun.ds_id) - # image volume cache brings two cases for clone lun: - # 1. volume ID of src_lun and tgt_lun will be the same one because - # _clone_image_volume does not pop the provider_location. - # 2. if creating image volume failed at the first time, tgt_lun will be - # deleted, so when it is sent to driver again, it will not exist. - if (tgt_lun.ds_id is None or - src_lun.ds_id == tgt_lun.ds_id or - not self._helper.lun_exists(tgt_lun.ds_id)): - # It is a preferred practice to locate the FlashCopy target - # volume on the same DS8000 server as the FlashCopy source volume. - pool = self._helper.get_pool(src_lun.ds_id[0:2]) - # flashcopy to larger target only works with thick vols, so we - # emulate for thin by extending after copy - if tgt_lun.type_thin and tgt_lun.size > src_lun.size: - tmp_size = tgt_lun.size - tgt_lun.size = src_lun.size - self._create_lun_helper(tgt_lun, pool) - tgt_lun.size = tmp_size - else: - self._create_lun_helper(tgt_lun, pool) - else: - self._assert( - src_lun.size == tgt_lun.size, - _('When target volume is pre-created, it must be equal ' - 'in size to source volume.')) - - finished = False - try: - vol_pairs = [{ - "source_volume": src_lun.ds_id, - "target_volume": tgt_lun.ds_id - }] - self._helper.start_flashcopy(vol_pairs) - fc_finished = self._helper.wait_flashcopy_finished( - [src_lun], [tgt_lun]) - if (fc_finished and - tgt_lun.type_thin and - tgt_lun.size > src_lun.size): - param = { - 'cap': self._helper._gb2b(tgt_lun.size), - 'captype': 'bytes' - } - self._helper.change_lun(tgt_lun.ds_id, param) - finished = fc_finished - finally: - if not finished: - self._helper.delete_lun(tgt_lun) - - return tgt_lun - - def _ensure_vol_not_fc_target(self, vol_hex_id): - for cp in self._helper.get_flashcopy(vol_hex_id): - if cp['targetvolume']['id'] == vol_hex_id: - raise restclient.APIException( - data=(_('Volume %s is currently a target of another ' - 'FlashCopy operation') % vol_hex_id)) - - def _create_replica_helper(self, lun): - if not lun.pool_lss_pair.get('target'): - lun = self._replication.enable_replication(lun, True) - else: - lun = self._replication.create_replica(lun) - return lun - - @proxy._trace_time - def create_volume(self, volume): - lun = self._create_lun_helper(Lun(volume)) - if lun.type_replication: - lun = self._create_replica_helper(lun) - return lun.get_volume_update() - - @proxy._trace_time - def create_cloned_volume(self, target_vol, source_vol): - lun = self._clone_lun(Lun(source_vol), Lun(target_vol)) - if lun.type_replication: - lun = self._create_replica_helper(lun) - return lun.get_volume_update() - - @proxy._trace_time - def create_volume_from_snapshot(self, volume, snapshot): - lun = self._clone_lun(Lun(snapshot, is_snapshot=True), Lun(volume)) - if lun.type_replication: - lun = self._create_replica_helper(lun) - return lun.get_volume_update() - - @proxy._trace_time - def extend_volume(self, volume, new_size): - lun = Lun(volume) - param = { - 'cap': self._helper._gb2b(new_size), - 'captype': 'bytes' - } - if lun.type_replication: - if not self._active_backend_id: - self._replication.delete_pprc_pairs(lun) - self._helper.change_lun(lun.ds_id, param) - self._replication.extend_replica(lun, param) - self._replication.create_pprc_pairs(lun) - else: - raise exception.VolumeDriverException( - message=(_("The volume %s has been failed over, it is " - "not suggested to extend it.") % lun.ds_id)) - else: - self._helper.change_lun(lun.ds_id, param) - - @proxy._trace_time - def volume_exists(self, volume): - return self._helper.lun_exists(Lun(volume).ds_id) - - @proxy._trace_time - def delete_volume(self, volume): - lun = Lun(volume) - if lun.type_replication: - lun = self._replication.delete_replica(lun) - self._helper.delete_lun(lun) - - @proxy._trace_time - def create_snapshot(self, snapshot): - return self._clone_lun(Lun(snapshot['volume']), Lun( - snapshot, is_snapshot=True)).get_volume_update() - - @proxy._trace_time - def delete_snapshot(self, snapshot): - self._helper.delete_lun(Lun(snapshot, is_snapshot=True)) - - @proxy._trace_time - def migrate_volume(self, ctxt, volume, backend): - # this and retype is a complete mess, pending cinder changes for fix. - # currently this is only for migrating between pools on the same - # physical machine but different cinder.conf backends. - # volume not allowed to get here if cg or repl - # should probably check volume['status'] in ['available', 'in-use'], - # especially for flashcopy - lun = Lun(volume) - if lun.type_replication: - raise exception.VolumeDriverException( - message=_('Driver does not support migrate replicated ' - 'volume, it can be done via retype.')) - stats = self.meta['stat'] - if backend['capabilities']['vendor_name'] != stats['vendor_name']: - raise exception.VolumeDriverException(_( - 'source and destination vendors differ.')) - if backend['capabilities']['serial_number'] != stats['serial_number']: - raise exception.VolumeDriverException(_( - 'source and destination serial numbers differ.')) - new_pools = self._helper.get_pools( - backend['capabilities']['extent_pools']) - - cur_pool_id = self._helper.get_lun_pool(lun.ds_id)['id'] - cur_node = self._helper.get_storage_pools()[cur_pool_id]['node'] - - # try pools in same rank - for pid, pool in new_pools.items(): - if pool['node'] == cur_node: - try: - self._helper.change_lun(lun.ds_id, {'pool': pid}) - return (True, None) - except Exception: - pass - - # try pools in opposite rank - for pid, pool in new_pools.items(): - if pool['node'] != cur_node: - try: - new_lun = lun.shallow_copy() - self._create_lun_helper(new_lun, pid, False) - self._clone_lun(lun, new_lun) - volume_update = new_lun.update_volume(lun) - try: - self._helper.delete_lun(lun) - except Exception: - pass - return (True, volume_update) - except Exception: - # will ignore missing ds_id if failed create volume - self._helper.delete_lun(new_lun) - - return (False, None) - - @proxy._trace_time - def retype(self, ctxt, volume, new_type, diff, host): - """retype the volume. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - def _check_extra_specs(key, value=None): - extra_specs = diff.get('extra_specs') - specific_type = extra_specs.get(key) if extra_specs else None - old_type = None - new_type = None - if specific_type: - old_type, new_type = specific_type - if value: - old_type = (True if old_type and old_type.upper() == value - else False) - new_type = (True if new_type and new_type.upper() == value - else False) - return old_type, new_type - - lun = Lun(volume) - # check user specify pool or lss or not - old_specified_pool, new_specified_pool = _check_extra_specs( - 'drivers:storage_pool_ids') - old_specified_lss, new_specified_lss = _check_extra_specs( - 'drivers:storage_lss_ids') - - # check thin or thick - old_type_thick, new_type_thick = _check_extra_specs( - 'drivers:thin_provision', 'FALSE') - - # check replication capability - old_type_replication, new_type_replication = _check_extra_specs( - 'replication_enabled', strings.METADATA_IS_TRUE) - - # start retype, please note that the order here is important - # because of rollback problem once failed to retype. - new_props = {} - if old_type_thick != new_type_thick: - new_props['type_thin'] = not new_type_thick - - if (old_specified_pool == new_specified_pool and - old_specified_lss == new_specified_lss): - LOG.info("Same pool and lss.") - elif ((old_specified_pool or old_specified_lss) and - (new_specified_pool or new_specified_lss)): - raise exception.VolumeDriverException( - message=_("Retype does not support to move volume from " - "specified pool or lss to another specified " - "pool or lss.")) - elif ((old_specified_pool is None and new_specified_pool) or - (old_specified_lss is None and new_specified_lss)): - storage_pools = self._helper.get_pools(new_specified_pool) - self._helper.verify_pools(storage_pools) - storage_lss = self._helper.verify_lss_ids(new_specified_lss) - vol_pool = self._helper.get_lun_pool(lun.ds_id)['id'] - vol_lss = lun.ds_id[:2].upper() - # if old volume is in the specified LSS, but it is needed - # to be changed from thin to thick or vice versa, driver - # needs to make sure the new volume will be created in the - # specified LSS. - if ((storage_lss and vol_lss not in storage_lss) or - new_props.get('type_thin')): - new_props['specified_pool'] = new_specified_pool - new_props['specified_lss'] = new_specified_lss - elif vol_pool not in storage_pools.keys(): - vol_node = int(vol_lss, 16) % 2 - new_pool_id = None - for pool_id, pool in storage_pools.items(): - if vol_node == pool['node']: - new_pool_id = pool_id - break - if new_pool_id: - self._helper.change_lun(lun.ds_id, {'pool': new_pool_id}) - else: - raise exception.VolumeDriverException( - message=_("Can not change the pool volume allocated.")) - - new_lun = None - if new_props: - new_lun = lun.shallow_copy() - for key, value in new_props.items(): - setattr(new_lun, key, value) - self._clone_lun(lun, new_lun) - - volume_update = None - if new_lun: - # if new lun meets all requirements of retype sucessfully, - # exception happens during clean up can be ignored. - if new_type_replication: - new_lun.type_replication = True - new_lun = self._replication.enable_replication(new_lun, True) - elif old_type_replication: - new_lun.type_replication = False - try: - self._replication.delete_replica(lun) - except Exception: - pass - try: - self._helper.delete_lun(lun) - except Exception: - pass - volume_update = new_lun.update_volume(lun) - else: - # if driver does not create new lun, don't delete source - # lun when failed to enable replication or delete replica. - if not old_type_replication and new_type_replication: - lun.type_replication = True - lun = self._replication.enable_replication(lun) - elif old_type_replication and not new_type_replication: - lun = self._replication.delete_replica(lun) - lun.type_replication = False - volume_update = lun.get_volume_update() - return True, volume_update - - @proxy._trace_time - @proxy.logger - def initialize_connection(self, volume, connector, **kwargs): - """Attach a volume to the host.""" - vol_id = Lun(volume).ds_id - LOG.info('Attach the volume %s.', vol_id) - return self._helper.initialize_connection(vol_id, connector, **kwargs) - - @proxy._trace_time - @proxy.logger - def terminate_connection(self, volume, connector, force=False, **kwargs): - """Detach a volume from a host.""" - vol_id = Lun(volume).ds_id - LOG.info('Detach the volume %s.', vol_id) - return self._helper.terminate_connection(vol_id, connector, - force, **kwargs) - - @proxy.logger - def create_group(self, ctxt, group): - """Create generic volume group.""" - if Group(group).consisgroup_enabled: - self._assert(self._helper.backend['lss_ids_for_cg'], - 'No LSS(s) for CG, please make sure you have ' - 'reserved LSS for CG via param lss_range_for_cg.') - return self._helper.create_group(group) - - @proxy.logger - def delete_group(self, ctxt, group, volumes): - """Delete group and the volumes in the group.""" - luns = [Lun(volume) for volume in volumes] - if Group(group).consisgroup_enabled: - return self._delete_group_with_lock(group, luns) - else: - return self._helper.delete_group(group, luns) - - @coordination.synchronized('{self.prefix}-consistency-group') - def _delete_group_with_lock(self, group, luns): - model_update, volumes_model_update = ( - self._helper.delete_group(group, luns)) - if model_update['status'] == fields.GroupStatus.DELETED: - self._update_consisgroup_cache(group.id) - return model_update, volumes_model_update - - @proxy.logger - def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Delete volume group snapshot.""" - tgt_luns = [Lun(s, is_snapshot=True) for s in snapshots] - if Group(group_snapshot, True).consisgroup_enabled: - return self._delete_group_snapshot_with_lock( - group_snapshot, tgt_luns) - else: - return self._helper.delete_group_snapshot( - group_snapshot, tgt_luns) - - @coordination.synchronized('{self.prefix}-consistency-group') - def _delete_group_snapshot_with_lock(self, group_snapshot, tgt_luns): - model_update, snapshots_model_update = ( - self._helper.delete_group_snapshot(group_snapshot, tgt_luns)) - if model_update['status'] == fields.GroupStatus.DELETED: - self._update_consisgroup_cache(group_snapshot.id) - return model_update, snapshots_model_update - - @proxy.logger - def create_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Create volume group snapshot.""" - snapshots_model_update = [] - model_update = {'status': fields.GroupStatus.AVAILABLE} - - src_luns = [Lun(snapshot['volume']) for snapshot in snapshots] - tgt_luns = [Lun(snapshot, is_snapshot=True) for snapshot in snapshots] - - try: - if src_luns and tgt_luns: - self._clone_group(src_luns, tgt_luns) - except restclient.APIException: - model_update['status'] = fields.GroupStatus.ERROR - LOG.exception('Failed to create group snapshot.') - - for tgt_lun in tgt_luns: - snapshot_model_update = tgt_lun.get_volume_update() - snapshot_model_update.update({ - 'id': tgt_lun.os_id, - 'status': model_update['status'] - }) - snapshots_model_update.append(snapshot_model_update) - - return model_update, snapshots_model_update - - @proxy.logger - def update_group(self, ctxt, group, add_volumes, remove_volumes): - """Update generic volume group.""" - if Group(group).consisgroup_enabled: - return self._update_group(group, add_volumes, remove_volumes) - else: - return None, None, None - - def _update_group(self, group, add_volumes, remove_volumes): - add_volumes_update = [] - group_volume_ids = [vol.id for vol in group.volumes] - add_volumes = [vol for vol in add_volumes - if vol.id not in group_volume_ids] - remove_volumes = [vol for vol in remove_volumes - if vol.id in group_volume_ids] - if add_volumes: - add_luns = [Lun(vol) for vol in add_volumes] - lss_in_cg = [Lun(vol).ds_id[:2] for vol in group.volumes] - if not lss_in_cg: - lss_in_cg = self._find_lss_for_empty_group(group, add_luns) - add_volumes_update = self._add_volumes_into_group( - group, add_luns, lss_in_cg) - if remove_volumes: - self._remove_volumes_in_group(group, add_volumes, remove_volumes) - return None, add_volumes_update, None - - @coordination.synchronized('{self.prefix}-consistency-group') - def _find_lss_for_empty_group(self, group, luns): - sorted_lss_ids = collections.Counter([lun.ds_id[:2] for lun in luns]) - available_lss = self._find_lss_for_cg() - lss_for_cg = None - for lss_id in sorted_lss_ids: - if lss_id in available_lss: - lss_for_cg = lss_id - break - if not lss_for_cg: - lss_for_cg = available_lss.pop() - self._update_consisgroup_cache(group.id, lss_for_cg) - return lss_for_cg - - def _add_volumes_into_group(self, group, add_luns, lss_in_cg): - add_volumes_update = [] - luns = [lun for lun in add_luns if lun.ds_id[:2] not in lss_in_cg] - for lun in luns: - if lun.type_replication: - new_lun = self._clone_lun_for_group(group, lun) - new_lun.type_replication = True - new_lun = self._replication.enable_replication(new_lun, True) - lun = self._replication.delete_replica(lun) - else: - new_lun = self._clone_lun_for_group(group, lun) - self._helper.delete_lun(lun) - volume_update = new_lun.update_volume(lun) - volume_update['id'] = new_lun.os_id - add_volumes_update.append(volume_update) - return add_volumes_update - - def _clone_lun_for_group(self, group, lun): - lun.group = Group(group) - new_lun = lun.shallow_copy() - new_lun.type_replication = False - self._clone_lun(lun, new_lun) - return new_lun - - @coordination.synchronized('{self.prefix}-consistency-group') - def _remove_volumes_in_group(self, group, add_volumes, remove_volumes): - if len(remove_volumes) == len(group.volumes) + len(add_volumes): - self._update_consisgroup_cache(group.id) - - @proxy.logger - def _update_consisgroup_cache(self, group_id, lss_id=None): - if lss_id: - self.consisgroup_cache[group_id] = set([lss_id]) - else: - if self.consisgroup_cache.get(group_id): - LOG.debug('Group %(id)s owns LSS %(lss)s in the cache.', { - 'id': group_id, - 'lss': ','.join(self.consisgroup_cache[group_id]) - }) - self.consisgroup_cache.pop(group_id) - - @proxy._trace_time - def create_group_from_src(self, ctxt, group, volumes, group_snapshot, - sorted_snapshots, source_group, - sorted_source_vols): - """Create volume group from volume group or volume group snapshot.""" - model_update = {'status': fields.GroupStatus.AVAILABLE} - volumes_model_update = [] - - if group_snapshot and sorted_snapshots: - src_luns = [Lun(snapshot, is_snapshot=True) - for snapshot in sorted_snapshots] - elif source_group and sorted_source_vols: - src_luns = [Lun(source_vol) - for source_vol in sorted_source_vols] - else: - msg = _("_create_group_from_src supports a group snapshot " - "source or a group source, other sources can not " - "be used.") - LOG.error(msg) - raise exception.InvalidInput(message=msg) - - try: - # Don't use paramter volumes because it has DetachedInstanceError - # issue frequently. here tries to get and sort new volumes, a lot - # of cases have been guaranteed by the _sort_source_vols in - # manange.py, so not verify again. - sorted_volumes = [] - for vol in volumes: - found_vols = [v for v in group.volumes if v['id'] == vol['id']] - sorted_volumes.extend(found_vols) - volumes = sorted_volumes - - tgt_luns = [Lun(volume) for volume in volumes] - if src_luns and tgt_luns: - self._clone_group(src_luns, tgt_luns) - for tgt_lun in tgt_luns: - if tgt_lun.type_replication: - self._create_replica_helper(tgt_lun) - except restclient.APIException: - model_update['status'] = fields.GroupStatus.ERROR - LOG.exception("Failed to create group from group snapshot.") - - for tgt_lun in tgt_luns: - volume_model_update = tgt_lun.get_volume_update() - volume_model_update.update({ - 'id': tgt_lun.os_id, - 'status': model_update['status'] - }) - volumes_model_update.append(volume_model_update) - - return model_update, volumes_model_update - - def _clone_group(self, src_luns, tgt_luns): - for src_lun in src_luns: - self._ensure_vol_not_fc_target(src_lun.ds_id) - finished = False - try: - vol_pairs = [] - for src_lun, tgt_lun in zip(src_luns, tgt_luns): - pool = self._helper.get_pool(src_lun.ds_id[0:2]) - if tgt_lun.ds_id is None: - self._create_lun_helper(tgt_lun, pool) - vol_pairs.append({ - "source_volume": src_lun.ds_id, - "target_volume": tgt_lun.ds_id - }) - if tgt_lun.group.consisgroup_enabled: - self._do_flashcopy_with_freeze(vol_pairs) - else: - self._helper.start_flashcopy(vol_pairs) - finished = self._helper.wait_flashcopy_finished(src_luns, tgt_luns) - finally: - if not finished: - self._helper.delete_lun(tgt_luns) - - @coordination.synchronized('{self.prefix}-consistency-group') - @proxy._trace_time - def _do_flashcopy_with_freeze(self, vol_pairs): - # issue flashcopy with freeze - self._helper.start_flashcopy(vol_pairs, True) - # unfreeze the LSS where source volumes are in - lss_ids = list(set(p['source_volume'][0:2] for p in vol_pairs)) - LOG.debug('Unfreezing the LSS: %s', ','.join(lss_ids)) - self._helper.unfreeze_lss(lss_ids) - - def freeze_backend(self, ctxt): - """Notify the backend that it's frozen.""" - pass - - def thaw_backend(self, ctxt): - """Notify the backend that it's unfrozen/thawed.""" - pass - - @proxy.logger - @proxy._trace_time - def failover_host(self, ctxt, volumes, secondary_id, groups=None): - """Fail over the volume back and forth. - - if secondary_id is 'default', volumes will be failed back, - otherwize failed over. - """ - volume_update_list = [] - if secondary_id == strings.PRIMARY_BACKEND_ID: - if not self._active_backend_id: - LOG.info("Host has been failed back. doesn't need " - "to fail back again.") - return self._active_backend_id, volume_update_list, [] - else: - if self._active_backend_id: - LOG.info("Host has been failed over to %s.", - self._active_backend_id) - return self._active_backend_id, volume_update_list, [] - - backend_id = self._replication._target_helper.backend['id'] - if secondary_id is None: - secondary_id = backend_id - elif secondary_id != backend_id: - raise exception.InvalidReplicationTarget( - message=(_('Invalid secondary_backend_id specified. ' - 'Valid backend id is %s.') % backend_id)) - - LOG.debug("Starting failover to %s.", secondary_id) - - replicated_luns = [] - for volume in volumes: - lun = Lun(volume) - if lun.type_replication and lun.status == "available": - replicated_luns.append(lun) - else: - volume_update = ( - self._replication.failover_unreplicated_volume(lun)) - volume_update_list.append(volume_update) - - if replicated_luns: - try: - if secondary_id != strings.PRIMARY_BACKEND_ID: - self._replication.do_pprc_failover(replicated_luns, - secondary_id) - self._active_backend_id = secondary_id - replicated_luns = self._switch_backend_connection( - secondary_id, replicated_luns) - else: - self._replication.start_pprc_failback( - replicated_luns, self._active_backend_id) - self._active_backend_id = "" - self._helper = self._replication._source_helper - except restclient.APIException as e: - raise exception.UnableToFailOver( - reason=(_("Unable to failover host to %(id)s. " - "Exception= %(ex)s") - % {'id': secondary_id, 'ex': six.text_type(e)})) - - for lun in replicated_luns: - volume_update = lun.get_volume_update() - volume_update['replication_status'] = ( - 'failed-over' if self._active_backend_id else 'enabled') - model_update = {'volume_id': lun.os_id, - 'updates': volume_update} - volume_update_list.append(model_update) - else: - LOG.info("No volume has replication capability.") - if secondary_id != strings.PRIMARY_BACKEND_ID: - LOG.info("Switch to the target %s", secondary_id) - self._switch_backend_connection(secondary_id) - self._active_backend_id = secondary_id - else: - LOG.info("Switch to the primary %s", secondary_id) - self._switch_backend_connection(self._active_backend_id) - self._active_backend_id = "" - - return secondary_id, volume_update_list, [] diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py deleted file mode 100644 index f15499aa6..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import ast -import eventlet -import six - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper -from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient -from cinder.volume.drivers.ibm.ibm_storage import proxy - -LOG = logging.getLogger(__name__) - -PPRC_PATH_NOT_EXIST = 0x00 -PPRC_PATH_HEALTHY = 0x01 -PPRC_PATH_UNHEALTHY = 0x02 -PPRC_PATH_FULL = 0x03 - - -class MetroMirrorManager(object): - """Manage metro mirror for replication.""" - - def __init__(self, source, target): - self._source = source - self._target = target - - def switch_source_and_target(self): - self._source, self._target = self._target, self._source - - def check_physical_links(self): - ports = self._source.get_physical_links( - self._target.backend['storage_wwnn']) - if not ports: - raise exception.VolumeDriverException( - message=((_("%(tgt)s is not connected to %(src)s!") % { - 'tgt': self._target.backend['storage_wwnn'], - 'src': self._source.backend['storage_wwnn'] - }))) - - pairs = [{ - 'source_port_id': p['source_port_id'], - 'target_port_id': p['target_port_id'] - } for p in ports] - if not self._target.backend['port_pairs']: - # if there are more than eight physical links, - # choose eight of them. - self._target.backend['port_pairs'] = ( - pairs[:8] if len(pairs) > 8 else pairs) - else: - # verify the port pairs user set - for pair in self._target.backend['port_pairs']: - if pair not in pairs: - valid_pairs = ';'.join( - ["%s-%s" % (p['source_port_id'], - p['target_port_id']) - for p in pairs]) - invalid_pair = "%s-%s" % (pair['source_port_id'], - pair['target_port_id']) - raise exception.VolumeDriverException( - message=((_("Invalid port pair: %(invalid)s, valid " - "port pair(s) are: %(valid)s") - % {'invalid': invalid_pair, - 'valid': valid_pairs}))) - self._source.backend['port_pairs'] = [{ - 'source_port_id': p['target_port_id'], - 'target_port_id': p['source_port_id'] - } for p in self._target.backend['port_pairs']] - - def is_target_alive(self): - try: - self._target.get_systems() - except restclient.TimeoutException as e: - LOG.info("REST request time out, backend may be not available " - "any more. Exception: %s", e) - return False - - return True - - def find_from_pprc_paths(self, specified_lss=None, excluded_lss=None): - """find lss from existing pprc paths and pool id for it. - - the format of pool_lss_pair returned is as below: - {'source': (pid, lss), 'target': (pid, lss)} - """ - state, paths = self._filter_pprc_paths(specified_lss) - if state != PPRC_PATH_HEALTHY: - # check whether the physical links are available or not, - # or have been changed. - self.check_physical_links() - return state, None - if excluded_lss: - paths = [p for p in paths - if p['source_lss_id'] not in excluded_lss] - # only enable_replication will specify the source LSS - # and it need to reuse LSS reserved for CG if this LSS - # is in PPRC path. - if not specified_lss: - paths = [p for p in paths if p['source_lss_id'] not in - self._source.backend['lss_ids_for_cg']] - - # sort pairs according to the number of luns in their LSSes, - # and get the pair which LSS has least luns. - candidates = [] - source_lss_set = set(p['source_lss_id'] for p in paths) - for lss in source_lss_set: - # get the number of luns in source. - src_luns = self._source.get_lun_number_in_lss(lss) - if src_luns == helper.LSS_VOL_SLOTS and not specified_lss: - continue - - spec_paths = [p for p in paths if p['source_lss_id'] == lss] - for path in spec_paths: - # get the number of luns in target. - try: - tgt_luns = self._target.get_lun_number_in_lss( - path['target_lss_id']) - except restclient.APIException: - # if DS8K can fix this problem, then remove the - # exception here. - LOG.error("Target LSS %s in PPRC path may doesn't " - "exist although PPRC path is available.", - path['target_lss_id']) - tgt_luns = 0 - candidates.append((path['source_lss_id'], - path['target_lss_id'], - src_luns + tgt_luns)) - if not candidates: - return PPRC_PATH_FULL, None - else: - src_lss, tgt_lss, num = sorted(candidates, key=lambda c: c[2])[0] - return PPRC_PATH_HEALTHY, { - 'source': (self._source.get_pool(src_lss), src_lss), - 'target': (self._target.get_pool(tgt_lss), tgt_lss) - } - - def _filter_pprc_paths(self, lss): - paths = self._source.get_pprc_paths(lss) - if paths: - # get the paths only connected to replication target - paths = [p for p in paths if p['target_system_wwnn'] in - self._target.backend['storage_wwnn']] - else: - LOG.info("No PPRC paths found in primary DS8K.") - return PPRC_PATH_NOT_EXIST, None - - # get the paths whose port pairs have been set in configuration file. - expected_port_pairs = [(p['source_port_id'], p['target_port_id']) - for p in self._target.backend['port_pairs']] - for path in paths[:]: - port_pairs = [(p['source_port_id'], p['target_port_id']) - for p in path['port_pairs']] - if not (set(port_pairs) & set(expected_port_pairs)): - paths.remove(path) - if not paths: - LOG.info("Existing PPRC paths do not use port pairs that " - "are set.") - return PPRC_PATH_NOT_EXIST, None - - # abandon PPRC paths according to volume type(fb/ckd) - source_lss_set = set(p['source_lss_id'] for p in paths) - if self._source.backend.get('device_mapping'): - source_lss_set = source_lss_set & set( - self._source.backend['device_mapping'].keys()) - else: - all_lss = self._source.get_all_lss(['id', 'type']) - fb_lss = set( - lss['id'] for lss in all_lss if lss['type'] == 'fb') - source_lss_set = source_lss_set & fb_lss - paths = [p for p in paths if p['source_lss_id'] in source_lss_set] - if not paths: - LOG.info("No source LSS in PPRC paths has correct volume type.") - return PPRC_PATH_NOT_EXIST, None - - # if the group property of lss doesn't match pool node, - # abandon these paths. - discarded_src_lss = [] - discarded_tgt_lss = [] - for lss in source_lss_set: - spec_paths = [p for p in paths if p['source_lss_id'] == lss] - if self._source.get_pool(lss) is None: - discarded_src_lss.append(lss) - continue - - for spec_path in spec_paths: - tgt_lss = spec_path['target_lss_id'] - if self._target.get_pool(tgt_lss) is None: - discarded_tgt_lss.append(tgt_lss) - - if discarded_src_lss: - paths = [p for p in paths if p['source_lss_id'] not in - discarded_src_lss] - if discarded_tgt_lss: - paths = [p for p in paths if p['target_lss_id'] not in - discarded_tgt_lss] - if not paths: - LOG.info("No PPRC paths can be re-used.") - return PPRC_PATH_NOT_EXIST, None - - # abandon unhealthy PPRC paths. - for path in paths[:]: - failed_port_pairs = [ - p for p in path['port_pairs'] if p['state'] != 'success'] - if len(failed_port_pairs) == len(path['port_pairs']): - paths.remove(path) - if not paths: - LOG.info("PPRC paths between primary and target DS8K " - "are unhealthy.") - return PPRC_PATH_UNHEALTHY, None - - return PPRC_PATH_HEALTHY, paths - - def create_pprc_path(self, pool_lss_pair): - src_lss = pool_lss_pair['source'][1] - tgt_lss = pool_lss_pair['target'][1] - # check whether the pprc path exists and is healthy or not firstly. - pid = (self._source.backend['storage_wwnn'] + '_' + src_lss + ':' + - self._target.backend['storage_wwnn'] + '_' + tgt_lss) - state = self._is_pprc_paths_healthy(pid) - LOG.info("The state of PPRC path %(path)s is %(state)s.", - {'path': pid, 'state': state}) - if state == PPRC_PATH_HEALTHY: - return - - # create the pprc path - pathData = { - 'target_system_wwnn': self._target.backend['storage_wwnn'], - 'source_lss_id': src_lss, - 'target_lss_id': tgt_lss, - 'port_pairs': self._target.backend['port_pairs'] - } - LOG.info("PPRC path %(src)s:%(tgt)s will be created.", - {'src': src_lss, 'tgt': tgt_lss}) - self._source.create_pprc_path(pathData) - - # check the state of the pprc path - LOG.debug("Checking the state of the new PPRC path.") - for retry in range(4): - eventlet.sleep(2) - if self._is_pprc_paths_healthy(pid) == PPRC_PATH_HEALTHY: - break - if retry == 3: - self._source.delete_pprc_path(pid) - raise restclient.APIException( - data=(_("Failed to create PPRC path %(src)s:%(tgt)s.") - % {'src': src_lss, 'tgt': tgt_lss})) - LOG.debug("Create the new PPRC path successfully.") - - def _is_pprc_paths_healthy(self, path_id): - try: - path = self._source.get_pprc_path(path_id) - except restclient.APIException: - return PPRC_PATH_NOT_EXIST - - for port in path['port_pairs']: - if port['state'] == 'success': - return PPRC_PATH_HEALTHY - - return PPRC_PATH_UNHEALTHY - - def create_pprc_pairs(self, lun): - tgt_vol_id = lun.replication_driver_data[ - self._target.backend['id']]['vol_hex_id'] - tgt_stg_id = self._target.backend['storage_unit'] - - vol_pairs = [{ - 'source_volume': lun.ds_id, - 'source_system_id': self._source.backend['storage_unit'], - 'target_volume': tgt_vol_id, - 'target_system_id': tgt_stg_id - }] - pairData = { - "volume_pairs": vol_pairs, - "type": "metro_mirror", - "options": ["permit_space_efficient_target", - "initial_copy_full"] - } - LOG.debug("Creating pprc pair, pairData is %s.", pairData) - self._source.create_pprc_pair(pairData) - self._source.wait_pprc_copy_finished([lun.ds_id], 'full_duplex') - LOG.info("The state of PPRC pair has become full_duplex.") - - def delete_pprc_pairs(self, lun): - self._source.delete_pprc_pair(lun.ds_id) - if self.is_target_alive() and lun.replication_driver_data: - replica = sorted(lun.replication_driver_data.values())[0] - self._target.delete_pprc_pair(replica['vol_hex_id']) - - def do_pprc_failover(self, luns, backend_id): - vol_pairs = [] - target_vol_ids = [] - for lun in luns: - target_vol_id = ( - lun.replication_driver_data[backend_id]['vol_hex_id']) - if not self._target.lun_exists(target_vol_id): - LOG.info("Target volume %(volid)s doesn't exist in " - "DS8K %(storage)s.", - {'volid': target_vol_id, - 'storage': self._target.backend['storage_unit']}) - continue - - vol_pairs.append({ - 'source_volume': target_vol_id, - 'source_system_id': self._target.backend['storage_unit'], - 'target_volume': lun.ds_id, - 'target_system_id': self._source.backend['storage_unit'] - }) - target_vol_ids.append(target_vol_id) - - pairData = { - "volume_pairs": vol_pairs, - "type": "metro_mirror", - "options": ["failover"] - } - - LOG.info("Begin to fail over to %s", - self._target.backend['storage_unit']) - self._target.create_pprc_pair(pairData) - self._target.wait_pprc_copy_finished(target_vol_ids, - 'suspended', False) - LOG.info("Failover from %(src)s to %(tgt)s is finished.", { - 'src': self._source.backend['storage_unit'], - 'tgt': self._target.backend['storage_unit'] - }) - - def do_pprc_failback(self, luns, backend_id): - pprc_ids = [] - vol_ids = [] - for lun in luns: - target_vol_id = ( - lun.replication_driver_data[backend_id]['vol_hex_id']) - if not self._target.lun_exists(target_vol_id): - LOG.info("Target volume %(volume)s doesn't exist in " - "DS8K %(storage)s.", - {'volume': lun.ds_id, - 'storage': self._target.backend['storage_unit']}) - continue - - pprc_id = (self._source.backend['storage_unit'] + '_' + - lun.ds_id + ':' + - self._target.backend['storage_unit'] + - '_' + target_vol_id) - pprc_ids.append(pprc_id) - vol_ids.append(lun.ds_id) - - pairData = {"pprc_ids": pprc_ids, - "type": "metro_mirror", - "options": ["failback"]} - - LOG.info("Begin to run failback in %s.", - self._source.backend['storage_unit']) - self._source.do_failback(pairData) - self._source.wait_pprc_copy_finished(vol_ids, 'full_duplex', False) - LOG.info("Run failback in %s is finished.", - self._source.backend['storage_unit']) - - -class Replication(object): - """Metro Mirror and Global Mirror will be used by it. - - Version history: - - .. code-block:: none - - 1.0.0 - initial revision. - 2.1.0 - ignore exception during cleanup when creating or deleting - replica failed. - """ - - VERSION = "2.1.0" - - def __init__(self, source_helper, target_device): - self._source_helper = source_helper - connection_type = target_device.get('connection_type') - if connection_type == storage.XIV_CONNECTION_TYPE_FC: - self._target_helper = ( - helper.DS8KReplicationTargetHelper(target_device)) - elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: - self._target_helper = ( - helper.DS8KReplicationTargetECKDHelper(target_device)) - else: - raise exception.InvalidParameterValue( - err=(_("Param [connection_type] %s in replication_device " - "is invalid.") % connection_type)) - - self._target_helper.backend['lss_ids_for_cg'] = ( - self._source_helper.backend['lss_ids_for_cg']) - self._mm_manager = MetroMirrorManager(self._source_helper, - self._target_helper) - - def check_connection_type(self): - src_conn_type = self._source_helper.get_connection_type() - tgt_conn_type = self._target_helper.get_connection_type() - if src_conn_type != tgt_conn_type: - raise exception.VolumeDriverException( - message=(_("The connection type in primary backend is " - "%(primary)s, but in secondary backend it is " - "%(secondary)s") - % {'primary': src_conn_type, - 'secondary': tgt_conn_type})) - - def check_physical_links(self): - self._mm_manager.check_physical_links() - - def switch_source_and_target(self, secondary_id, luns=None): - # switch the helper in metro mirror manager - self._mm_manager.switch_source_and_target() - # switch the helper - self._source_helper, self._target_helper = ( - self._target_helper, self._source_helper) - # switch the volume id - if luns: - for lun in luns: - backend = lun.replication_driver_data.get(secondary_id, None) - lun.replication_driver_data.update( - {secondary_id: {'vol_hex_id': lun.ds_id}}) - lun.ds_id = backend['vol_hex_id'] - return luns - - @proxy.logger - def find_pool_lss_pair(self, excluded_lss): - state, pool_lss_pair = ( - self._mm_manager.find_from_pprc_paths(None, excluded_lss)) - if pool_lss_pair is None: - pool_lss_pair = self.find_new_lss_for_source(excluded_lss) - pool_lss_pair.update(self.find_new_lss_for_target()) - return pool_lss_pair - - @proxy.logger - def find_new_lss_for_source(self, excluded_lss): - src_pid, src_lss = self._source_helper.find_pool_and_lss(excluded_lss) - return {'source': (src_pid, src_lss)} - - @proxy.logger - def find_new_lss_for_target(self): - tgt_pid, tgt_lss = self._target_helper.find_pool_and_lss() - return {'target': (tgt_pid, tgt_lss)} - - @proxy.logger - def enable_replication(self, lun, delete_source=False): - state, lun.pool_lss_pair = ( - self._mm_manager.find_from_pprc_paths(lun.ds_id[0:2])) - LOG.debug("enable_replication: pool_lss_pair is %s.", - lun.pool_lss_pair) - if state == PPRC_PATH_UNHEALTHY: - raise restclient.APIException( - data=(_("The path(s) for volume %(name)s isn't available " - "any more, please make sure the state of the path(s) " - "which source LSS is %(lss)s is success.") - % {'name': lun.cinder_name, 'lss': lun.ds_id[0:2]})) - elif state == PPRC_PATH_NOT_EXIST: - pid = self._source_helper.get_pool(lun.ds_id[0:2]) - lun.pool_lss_pair = {'source': (pid, lun.ds_id[0:2])} - lun.pool_lss_pair.update(self.find_new_lss_for_target()) - lun = self.create_replica(lun, delete_source) - return lun - - @proxy.logger - @coordination.synchronized('ibm-ds8k-replication') - def create_replica(self, lun, delete_source=True): - try: - self._target_helper.create_lun(lun) - # create PPRC paths if need. - self._mm_manager.create_pprc_path(lun.pool_lss_pair) - # create pprc pair - self._mm_manager.create_pprc_pairs(lun) - except restclient.APIException: - with excutils.save_and_reraise_exception(): - try: - self.delete_replica(lun) - if delete_source: - self._source_helper.delete_lun(lun) - except restclient.APIException as ex: - LOG.info("Failed to cleanup replicated volume %(id)s, " - "Exception: %(ex)s.", - {'id': lun.ds_id, 'ex': ex}) - lun.replication_status = 'enabled' - return lun - - @proxy.logger - def delete_replica(self, lun, delete_source=False): - if lun.ds_id is not None: - try: - self._mm_manager.delete_pprc_pairs(lun) - self._delete_replica(lun) - except restclient.APIException as e: - if delete_source: - try: - self._source_helper.delete_lun(lun) - except restclient.APIException as ex: - LOG.info("Failed to delete source volume %(id)s, " - "Exception: %(ex)s.", - {'id': lun.ds_id, 'ex': ex}) - raise exception.VolumeDriverException( - message=(_('Failed to delete the target volume for ' - 'volume %(volume)s, Exception: %(ex)s.') - % {'volume': lun.ds_id, 'ex': six.text_type(e)})) - lun.replication_status = 'disabled' - lun.replication_driver_data = {} - return lun - - @proxy.logger - def _delete_replica(self, lun): - if not lun.replication_driver_data: - LOG.error("No replica ID for lun %s, maybe there is something " - "wrong when creating the replica for lun.", lun.ds_id) - return None - - for backend_id, backend in lun.replication_driver_data.items(): - if not self._mm_manager.is_target_alive(): - return None - - if not self._target_helper.lun_exists(backend['vol_hex_id']): - LOG.debug("Replica %s not found.", backend['vol_hex_id']) - continue - - LOG.debug("Deleting replica %s.", backend['vol_hex_id']) - self._target_helper.delete_lun_by_id(backend['vol_hex_id']) - - def extend_replica(self, lun, param): - for backend_id, backend in lun.replication_driver_data.items(): - self._target_helper.change_lun(backend['vol_hex_id'], param) - - def delete_pprc_pairs(self, lun): - self._mm_manager.delete_pprc_pairs(lun) - - def create_pprc_pairs(self, lun): - self._mm_manager.create_pprc_pairs(lun) - - def do_pprc_failover(self, luns, backend_id): - self._mm_manager.do_pprc_failover(luns, backend_id) - - @proxy.logger - def start_pprc_failback(self, luns, backend_id): - # check whether primary client is alive or not. - if not self._mm_manager.is_target_alive(): - try: - self._target_helper.update_client() - except restclient.APIException: - msg = _("Can not connect to the primary backend, " - "please make sure it is back.") - LOG.error(msg) - raise exception.UnableToFailOver(reason=msg) - - LOG.debug("Failback starts, backend id is %s.", backend_id) - for lun in luns: - self._mm_manager.create_pprc_path(lun.pool_lss_pair) - self._mm_manager.do_pprc_failback(luns, backend_id) - # revert the relationship of source volume and target volume - self.do_pprc_failover(luns, backend_id) - self.switch_source_and_target(backend_id, luns) - self._mm_manager.do_pprc_failback(luns, backend_id) - LOG.debug("Failback ends, backend id is %s.", backend_id) - - @proxy.logger - def failover_unreplicated_volume(self, lun): - provider_location = ast.literal_eval(lun.volume['provider_location']) - if 'old_status' in provider_location: - updates = {'status': provider_location['old_status']} - del provider_location['old_status'] - updates['provider_location'] = six.text_type(provider_location) - else: - provider_location['old_status'] = lun.status - updates = { - 'status': 'error', - 'provider_location': six.text_type(provider_location) - } - return {'volume_id': lun.os_id, 'updates': updates} diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py deleted file mode 100644 index cf56d7fa2..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import abc -import eventlet -import importlib -import json -import six -from six.moves import urllib - -import requests -from requests import exceptions as req_exception - -from cinder import exception -from cinder.i18n import _ - -TOKEN_ERROR_CODES = ('BE7A001B', 'BE7A001A') -# remove BE7A0032 after REST fixed the problem of throwing message -# which shows all LSS are full but actually only one LSS is full. -LSS_ERROR_CODES = ('BE7A0031', 'BE7A0032') -AUTHENTICATION_ERROR_CODES = ( - 'BE7A001B', 'BE7A001A', 'BE7A0027', - 'BE7A0028', 'BE7A0029', 'BE7A002A', - 'BE7A002B', 'BE7A002C', 'BE7A002D' -) - - -class APIException(exception.VolumeBackendAPIException): - """Exception raised for errors in the REST APIs.""" - - """ - Attributes: - message -- explanation of the error - """ - pass - - -class APIAuthenticationException(APIException): - """Exception raised for errors in the Authentication.""" - - """ - Attributes: - message -- explanation of the error - """ - pass - - -class LssFullException(APIException): - """Exception raised for errors when LSS is full.""" - - """ - Attributes: - message -- explanation of the error - """ - pass - - -class LssIDExhaustError(exception.VolumeBackendAPIException): - """Exception raised for errors when can not find available LSS.""" - - """ - Attributes: - message -- explanation of the error - """ - pass - - -class TimeoutException(APIException): - """Exception raised when the request is time out.""" - - """ - Attributes: - message -- explanation of the error - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class AbstractRESTConnector(object): - """Inherit this class when you define your own connector.""" - - @abc.abstractmethod - def close(self): - """close the connector. - - If the connector uses persistent connection, please provide - a way to close it in this method, otherwise you can just leave - this method empty. - - Input: None - Output: None - Exception: can raise any exceptions - """ - pass - - @abc.abstractmethod - def send(self, method='', url='', headers=None, payload='', timeout=900): - """send the request. - - Input: see above - Output: - - if we reached the server and read an HTTP response: - - .. code:: text - - (INTEGER__HTTP_RESPONSE_STATUS_CODE, - STRING__BODY_OF_RESPONSE_EVEN_IF_STATUS_NOT_200) - - if we were not able to reach the server or response - was invalid HTTP(like certificate error, or could not - resolve domain etc): - - .. code:: text - - (False, STRING__SHORT_EXPLANATION_OF_REASON_FOR_NOT_ - REACHING_SERVER_OR_GETTING_INVALID_RESPONSE) - - Exception: should not raise any exceptions itself as all - the expected scenarios are covered above. Unexpected - exceptions are permitted. - - """ - pass - - -class DefaultRESTConnector(AbstractRESTConnector): - """User can write their own connector and pass it to RESTScheduler.""" - - def __init__(self, verify): - # overwrite certificate validation method only when using - # default connector, and not globally import the new scheme. - if isinstance(verify, six.string_types): - importlib.import_module("cinder.volume.drivers.ibm.ibm_storage." - "ds8k_connection") - self.session = None - self.verify = verify - - def connect(self): - if self.session is None: - self.session = requests.Session() - if isinstance(self.verify, six.string_types): - self.session.mount('httpsds8k://', - requests.adapters.HTTPAdapter()) - else: - self.session.mount('https://', - requests.adapters.HTTPAdapter()) - self.session.verify = self.verify - - def close(self): - self.session.close() - self.session = None - - def send(self, method='', url='', headers=None, payload='', timeout=900): - self.connect() - try: - if isinstance(self.verify, six.string_types): - url = url.replace('https://', 'httpsds8k://') - resp = self.session.request(method, - url, - headers=headers, - data=payload, - timeout=timeout) - return resp.status_code, resp.text - except req_exception.ConnectTimeout as e: - self.close() - return 408, "Connection time out: %s" % six.text_type(e) - except req_exception.SSLError as e: - self.close() - return False, "SSL error: %s" % six.text_type(e) - except Exception as e: - self.close() - return False, "Unexcepted exception: %s" % six.text_type(e) - - -class RESTScheduler(object): - """This class is multithread friendly. - - it isn't optimally (token handling) but good enough for low-mid traffic. - """ - - def __init__(self, host, user, passw, connector_obj, verify=False): - if not host: - raise APIException('The host parameter must not be empty.') - # the api incorrectly transforms an empty password to a missing - # password paramter, so we have to catch it here - if not user or not passw: - raise APIAuthenticationException( - _('The username and the password parameters must ' - 'not be empty.')) - self.token = '' - self.host = host - self.port = '8452' - self.user = user - self.passw = passw - self.connector = connector_obj or DefaultRESTConnector(verify) - self.connect() - - def connect(self): - # one retry when connecting, 60s should be enough to get the token, - # usually it is within 30s. - try: - response = self.send( - 'POST', '/tokens', - {'username': self.user, 'password': self.passw}, - timeout=60) - except Exception: - eventlet.sleep(2) - response = self.send( - 'POST', '/tokens', - {'username': self.user, 'password': self.passw}, - timeout=60) - self.token = response['token']['token'] - - def close(self): - self.connector.close() - - # usually NI responses within 15min. - def send(self, method, endpoint, data=None, badStatusException=True, - params=None, fields=None, timeout=900): - # verify the method - if method not in ('GET', 'POST', 'PUT', 'DELETE'): - msg = _("Invalid HTTP method: %s") % method - raise APIException(data=msg) - - # prepare the url - url = "https://%s:%s/api/v1%s" % (self.host, self.port, endpoint) - if fields: - params = params or {} - params['data_fields'] = ','.join(fields) - if params: - url += (('&' if '?' in url else '?') + - urllib.parse.urlencode(params)) - - # prepare the data - data = json.dumps({'request': {'params': data}}) if data else None - # make a REST request to DS8K and get one retry if logged out - for attempts in range(2): - headers = {'Content-Type': 'application/json', - 'X-Auth-Token': self.token} - code, body = self.connector.send(method, url, headers, - data, timeout) - # parse the returned code - if code == 200: - try: - response = json.loads(body) - except ValueError: - response = {'server': { - 'status': 'failed', - 'message': 'Unable to parse server response into json.' - }} - elif code == 408: - response = {'server': {'status': 'timeout', 'message': body}} - elif code is not False: - try: - response = json.loads(body) - # make sure has useful message - response['server']['message'] - except Exception: - response = {'server': { - 'status': 'failed', - 'message': 'HTTP %s: %s' % (code, body) - }} - else: - response = {'server': {'status': 'failed', 'message': body}} - - # handle the response - if (response['server'].get('code') in TOKEN_ERROR_CODES and - attempts == 0): - self.connect() - elif response['server'].get('code') in AUTHENTICATION_ERROR_CODES: - raise APIAuthenticationException( - data=(_('Authentication failed for host %(host)s. ' - 'Exception= %(e)s') % - {'host': self.host, - 'e': response['server']['message']})) - elif response['server'].get('code') in LSS_ERROR_CODES: - raise LssFullException( - data=(_('Can not put the volume in LSS: %s') - % response['server']['message'])) - elif response['server']['status'] == 'timeout': - raise TimeoutException( - data=(_('Request to storage API time out: %s') - % response['server']['message'])) - elif (response['server']['status'] != 'ok' and - (badStatusException or 'code' not in response['server'])): - # if code is not in response means that error was in - # transport so we raise exception even if asked not to - # via badStatusException=False, but will retry it to - # confirm the problem. - if attempts == 1: - raise APIException( - data=(_("Request to storage API failed: %(err)s, " - "(%(url)s).") - % {'err': response['server']['message'], - 'url': url})) - eventlet.sleep(2) - else: - return response - - # same as the send method above but returns first item from - # response data, must receive only one item. - def fetchall(self, *args, **kwargs): - r = self.send(*args, **kwargs)['data'] - if len(r) != 1: - raise APIException( - data=(_('Expected one result but got %d.') % len(r))) - else: - return r.popitem()[1] - - # the api for some reason returns a list when you request details - # of a specific item. - def fetchone(self, *args, **kwargs): - r = self.fetchall(*args, **kwargs) - if len(r) != 1: - raise APIException( - data=(_('Expected one item in result but got %d.') % len(r))) - return r[0] - - # same as the send method above but returns the last element of the - # link property in the response. - def fetchid(self, *args, **kwargs): - r = self.send(*args, **kwargs) - if 'responses' in r: - if len(r['responses']) != 1: - raise APIException( - data=(_('Expected one item in result responses but ' - 'got %d.') % len(r['responses']))) - r = r['responses'][0] - return r['link']['href'].split('/')[-1] - - # the api unfortunately has no way to differentiate between api error - # and error in DS8K resources. this method returns True if "ok", False - # if "failed", exception otherwise. - def statusok(self, *args, **kwargs): - return self.send(*args, badStatusException=False, - **kwargs)['server']['status'] == 'ok' diff --git a/cinder/volume/drivers/ibm/ibm_storage/ibm_storage.py b/cinder/volume/drivers/ibm/ibm_storage/ibm_storage.py deleted file mode 100644 index ac46cfed8..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/ibm_storage.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright 2013 IBM Corp. -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum Accelerate, -FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder import exception -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.zonemanager import utils as fczm_utils - -driver_opts = [ - cfg.StrOpt( - 'proxy', - default='cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy', - help='Proxy driver that connects to the IBM Storage Array'), - cfg.StrOpt( - 'connection_type', - default='iscsi', - choices=['fibre_channel', 'iscsi'], - help='Connection type to the IBM Storage Array'), - cfg.StrOpt( - 'chap', - default='disabled', - choices=['disabled', 'enabled'], - help='CHAP authentication mode, effective only for iscsi' - ' (disabled|enabled)'), - cfg.StrOpt( - 'management_ips', - default='', - help='List of Management IP addresses (separated by commas)'), -] - -CONF = cfg.CONF -CONF.register_opts(driver_opts, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class IBMStorageDriver(san.SanDriver, - driver.ManageableVD, - driver.MigrateVD, - driver.CloneableImageVD): - """IBM Storage driver - - IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum - Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage - systems. - - Version history: - - .. code-block:: none - - 2.0 - First open source driver version - 2.1.0 - Support Consistency groups through Generic volume groups - - Support XIV/A9000 Volume independent QoS - - Support Consistency groups replication - """ - - VERSION = "2.1.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_STORAGE_CI" - - def __init__(self, *args, **kwargs): - """Initialize the driver.""" - - super(IBMStorageDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(driver_opts) - - proxy = importutils.import_class(self.configuration.proxy) - - active_backend_id = kwargs.get('active_backend_id', None) - - # Driver additional flags should be specified in the cinder.conf - # preferably in each backend configuration. - - self.proxy = proxy( - { - "user": self.configuration.san_login, - "password": self.configuration.san_password, - "address": self.configuration.san_ip, - "vol_pool": self.configuration.san_clustername, - "connection_type": self.configuration.connection_type, - "chap": self.configuration.chap, - "management_ips": self.configuration.management_ips - }, - LOG, - exception, - driver=self, - active_backend_id=active_backend_id) - - def do_setup(self, context): - """Setup and verify connection to IBM Storage.""" - - self.proxy.setup(context) - - def ensure_export(self, context, volume): - """Ensure an export.""" - - return self.proxy.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - """Create an export.""" - - return self.proxy.create_export(context, volume) - - def create_volume(self, volume): - """Create a volume on the IBM Storage system.""" - - return self.proxy.create_volume(volume) - - def delete_volume(self, volume): - """Delete a volume on the IBM Storage system.""" - - self.proxy.delete_volume(volume) - - def remove_export(self, context, volume): - """Disconnect a volume from an attached instance.""" - - return self.proxy.remove_export(context, volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Map the created volume.""" - - return self.proxy.initialize_connection(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Terminate a connection to a volume.""" - - return self.proxy.terminate_connection(volume, connector) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - - return self.proxy.create_volume_from_snapshot( - volume, - snapshot) - - def create_snapshot(self, snapshot): - """Create a snapshot.""" - - return self.proxy.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - - return self.proxy.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Get volume stats.""" - - return self.proxy.get_volume_stats(refresh) - - def create_cloned_volume(self, tgt_volume, src_volume): - """Create Cloned Volume.""" - - return self.proxy.create_cloned_volume(tgt_volume, src_volume) - - def extend_volume(self, volume, new_size): - """Extend Created Volume.""" - - self.proxy.extend_volume(volume, new_size) - - def migrate_volume(self, context, volume, host): - """Migrate the volume to the specified host.""" - - return self.proxy.migrate_volume(context, volume, host) - - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object to Cinder management.""" - - return self.proxy.manage_volume(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing.""" - - return self.proxy.manage_volume_get_size(volume, existing_ref) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management.""" - - return self.proxy.unmanage_volume(volume) - - def freeze_backend(self, context): - """Notify the backend that it's frozen. """ - - return self.proxy.freeze_backend(context) - - def thaw_backend(self, context): - """Notify the backend that it's unfrozen/thawed. """ - - return self.proxy.thaw_backend(context) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover a backend to a secondary replication target. """ - - return self.proxy.failover_host( - context, volumes, secondary_id, groups) - - def get_replication_status(self, context, volume): - """Return replication status.""" - - return self.proxy.get_replication_status(context, volume) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - - return self.proxy.retype(ctxt, volume, new_type, diff, host) - - def create_group(self, context, group): - """Creates a group.""" - - return self.proxy.create_group(context, group) - - def delete_group(self, context, group, volumes): - """Deletes a group.""" - - return self.proxy.delete_group(context, group, volumes) - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group snapshot.""" - - return self.proxy.create_group_snapshot( - context, group_snapshot, snapshots) - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group snapshot.""" - - return self.proxy.delete_group_snapshot( - context, group_snapshot, snapshots) - - def update_group(self, context, group, add_volumes, remove_volumes): - """Adds or removes volume(s) to/from an existing group.""" - - return self.proxy.update_group( - context, group, add_volumes, remove_volumes) - - def create_group_from_src( - self, context, group, volumes, group_snapshot, snapshots, - source_cg=None, source_vols=None): - """Creates a group from source.""" - - return self.proxy.create_group_from_src( - context, group, volumes, group_snapshot, snapshots, - source_cg, source_vols) - - def enable_replication(self, context, group, volumes): - """Enable replication.""" - - return self.proxy.enable_replication(context, group, volumes) - - def disable_replication(self, context, group, volumes): - """Disable replication.""" - - return self.proxy.disable_replication(context, group, volumes) - - def failover_replication(self, context, group, volumes, - secondary_backend_id): - """Failover replication.""" - - return self.proxy.failover_replication(context, group, volumes, - secondary_backend_id) - - def get_replication_error_status(self, context, groups): - """Returns error info for replicated groups and its volumes.""" - - return self.proxy.get_replication_error_status(context, groups) diff --git a/cinder/volume/drivers/ibm/ibm_storage/proxy.py b/cinder/volume/drivers/ibm/ibm_storage/proxy.py deleted file mode 100644 index cbc17f97a..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/proxy.py +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import functools -import gettext -import inspect -import platform -import six - -from oslo_log import log as logging -from oslo_utils import timeutils - -from cinder.i18n import _ -from cinder import version -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import strings - -LOG = logging.getLogger(__name__) -gettext.install('cinder') - - -def get_total_seconds(td): - return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 - - -def logger(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - frm = inspect.stack()[1] - log = getattr(inspect.getmodule(frm[0]), 'LOG') - log.debug("Enter %s()", func.__name__) - log.debug("Args: %(args)s %(kwargs)s", - {'args': args, 'kwargs': kwargs}) - result = func(*args, **kwargs) - log.debug("Exit %s()", func.__name__) - log.debug("Return: %s", result) - return result - return wrapper - - -def _trace_time(fnc): - @functools.wraps(fnc) - def wrapper(self, *args, **kwargs): - method = fnc.__name__ - start = timeutils.utcnow() - LOG.debug("Entered '%(method)s' at %(when)s.", - {'method': method, 'when': start}) - result = fnc(self, *args, **kwargs) - current = timeutils.utcnow() - delta = current - start - LOG.debug( - "Exited '%(method)s' at %(when)s, after %(seconds)f seconds.", - {'method': method, 'when': start, - 'seconds': get_total_seconds(delta)}) - return result - return wrapper - - -class IBMStorageProxy(object): - """Base class for connecting to storage. - - Abstract Proxy between the XIV/DS8K Cinder Volume and Spectrum Accelerate - Storage (e.g. XIV, Spectruam Accelerate, A9000, A9000R) - """ - - prefix = storage.XIV_LOG_PREFIX - - def __init__(self, storage_info, logger, exception, - driver=None, active_backend_id=None): - """Initialize Proxy.""" - - self.storage_info = storage_info - self.meta = dict() - self.logger = logger - - self.meta['exception'] = exception - self.meta['openstack_version'] = "cinder-%s" % version.version_string() - self.meta['stat'] = None - self.driver = driver - if driver is not None: - self.full_version = "%(title)s (v%(version)s)" % { - 'title': strings.TITLE, - 'version': driver.VERSION} - else: - self.full_version = strings.TITLE - self.active_backend_id = active_backend_id - self.targets = {} - self._read_replication_devices() - self.meta['bypass_connection_check'] = ( - self._get_safely_from_configuration( - storage.FLAG_KEYS['bypass_connection_check'], False)) - - @_trace_time - def setup(self, context): - """Driver setup.""" - pass - - @_trace_time - def create_volume(self, volume): - """Creates a volume.""" - pass - - @_trace_time - def ensure_export(self, context, volume): - ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" - LOG.debug("ensure_export: %(volume)s context : %(ctxt)s", - {'volume': volume['name'], 'ctxt': ctxt}) - return 1 - - @_trace_time - def create_export(self, context, volume): - ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" - LOG.debug("create_export: %(volume)s context : %(ctxt)s", - {'volume': volume['name'], 'ctxt': ctxt}) - - return {} - - @_trace_time - def delete_volume(self, volume): - """Deletes a volume on the IBM Storage machine.""" - pass - - @_trace_time - def remove_export(self, context, volume): - """Remove export. - - Disconnect a volume from an attached instance - """ - ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" - LOG.debug("remove_export: %(volume)s context : %(ctxt)s", - {'volume': volume['name'], 'ctxt': ctxt}) - - @_trace_time - def initialize_connection(self, volume, connector): - """Initialize connection. - - Maps the created volume to the cinder volume node, - and returns the iSCSI/FC targets to be used in the instance - """ - pass - - @_trace_time - def terminate_connection(self, volume, connector): - """Terminate connection.""" - pass - - @_trace_time - def create_volume_from_snapshot(self, volume, snapshot): - """create volume from snapshot.""" - pass - - @_trace_time - def create_snapshot(self, snapshot): - """create snapshot""" - pass - - @_trace_time - def delete_snapshot(self, snapshot): - """delete snapshot.""" - pass - - @_trace_time - def get_volume_stats(self, refresh=False): - """get volume stats.""" - if self.meta['stat'] is None or refresh: - self._update_stats() - return self.meta['stat'] - - @_trace_time - def _update_stats(self): - """fetch and update stats.""" - pass - - @_trace_time - def check_for_export(self, context, volume_id): - pass - - @_trace_time - def copy_volume_to_image(self, context, volume, image_service, image_id): - """Copy volume to image. - - Handled by ISCSiDriver - """ - LOG.info("The copy_volume_to_image feature is not implemented.") - raise NotImplementedError() - - @_trace_time - def create_cloned_volume(self, volume, src_vref): - """Create cloned volume.""" - pass - - @_trace_time - def volume_exists(self, volume): - """Checks if a volume exists on xiv.""" - pass - - @_trace_time - def validate_connection(self): - """Validates ibm_storage connection info.""" - pass - - @_trace_time - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - pass - - @_trace_time - def _get_bunch_from_host( - self, connector, host_id=0, host_name=None, chap=None): - """Get's a Bunch describing a host""" - if not host_name: - LOG.debug("Connector %(conn)s", {'conn': connector}) - current_host_name = host_name or storage.get_host_or_create_from_iqn( - connector) - initiator = connector.get('initiator', None) - wwpns = connector.get("wwpns", []) - if len(wwpns) == 0 and "wwnns" in connector: - wwpns = connector.get("wwns", []) - - return {'name': current_host_name, - 'initiator': initiator, - 'id': host_id, - 'wwpns': wwpns, - 'chap': chap} - - @_trace_time - def _get_os_type(self): - """Gets a string representation of the current os""" - dist = platform.dist() - return "%s-%s-%s" % (dist[0], dist[1], platform.processor()) - - def _log(self, level, message, **kwargs): - """Wrapper around the logger""" - to_log = _(self.prefix + message) # NOQA - if len(kwargs) > 0: - to_log = to_log % kwargs - getattr(self.logger, level)(to_log) - - def _get_exception(self): - """Get's Cinder exception""" - return self.meta['exception'].CinderException - - def _get_code_and_status_or_message(self, exception): - """Returns status message - - returns a string made out of code and status if present, else message - """ - - if (getattr(exception, "code", None) is not None and - getattr(exception, "status", None) is not None): - return "Status: '%s', Code: %s" % ( - exception.status, exception.code) - - return six.text_type(exception) - - def _get_driver_super(self): - """Gets the IBM Storage Drivers super class - - returns driver super class - """ - return super(self.driver.__class__, self.driver) - - def _get_connection_type(self): - """Get Connection Type(iscsi|fibre_channel) - - :returns: iscsi|fibre_channel - """ - return self._get_safely_from_configuration( - storage.CONF_KEYS['connection_type'], - default=storage.XIV_CONNECTION_TYPE_ISCSI) - - def _is_iscsi(self): - """Checks if connection type is iscsi""" - connection_type = self._get_connection_type() - return connection_type == storage.XIV_CONNECTION_TYPE_ISCSI - - def _get_management_ips(self): - """Gets the management IP addresses from conf""" - return self._get_safely_from_configuration( - storage.CONF_KEYS['management_ips'], - default='') - - def _get_chap_type(self): - """Get CHAP Type(disabled|enabled) - - :returns: disabled|enabled - """ - LOG.debug("_get_chap_type chap: %(chap)s", - {'chap': storage.CONF_KEYS['chap']}) - return self._get_safely_from_configuration( - storage.CONF_KEYS['chap'], - default=storage.CHAP_NONE) - - def _get_safely_from_configuration(self, key, default=None): - """Get value of key from configuration - - Get's a key from the backend configuration if available. - If not available returns default value - """ - if not self.driver: - LOG.debug("self.driver is missing") - return default - config_value = self.driver.configuration.safe_get(key) - if not config_value: - LOG.debug("missing key %(key)s ", {'key': key}) - return default - return config_value - - # Backend_id values: - # - The primary backend_id is marked 'default' - # - The secondary backend_ids are the values of the targets. - # - In most cases the given value is one of the above, but in some cases - # it can be None. For example in failover_host, the value None means - # that the function should select a target by itself (consider multiple - # targets) - - def _get_primary_backend_id(self): - return strings.PRIMARY_BACKEND_ID - - def _get_secondary_backend_id(self): - return self._get_target() - - def _get_active_backend_id(self): - if self.active_backend_id == strings.PRIMARY_BACKEND_ID: - return self._get_primary_backend_id() - else: - return self._get_secondary_backend_id() - - def _get_inactive_backend_id(self): - if self.active_backend_id != strings.PRIMARY_BACKEND_ID: - return self._get_primary_backend_id() - else: - return self._get_secondary_backend_id() - - def _get_target_params(self, target): - if not self.targets: - LOG.debug("No targets available") - return None - try: - params = self.targets[target] - return params - except Exception: - LOG.debug("No target called '%(target)s'", {'target': target}) - return None - - def _get_target(self): - """returns an arbitrary target if available""" - if not self.targets: - return None - try: - target = self.targets.iterkeys().next() - return target - except Exception: - return None - - @_trace_time - def _read_replication_devices(self): - """Read replication devices from configuration - - Several replication devices are permitted. - If an entry already exists an error is assumed. - - The format is: - replication_device = backend_id:vendor-id-1,unique_key:val.... - """ - if not self.driver: - return - replication_devices = self._get_safely_from_configuration( - 'replication_device', default={}) - if not replication_devices: - LOG.debug('No replication devices were found') - for dev in replication_devices: - LOG.debug('Replication device found: %(dev)s', {'dev': dev}) - backend_id = dev.get('backend_id', None) - if backend_id is None: - LOG.error("Replication is missing backend_id: %(dev)s", - {'dev': dev}) - elif self.targets.get(backend_id, None): - LOG.error("Multiple entries for replication %(dev)s", - {'dev': dev}) - else: - self.targets[backend_id] = {} - device = self.targets[backend_id] - for k, v in dev.iteritems(): - if k != 'backend_id': - device[k] = v diff --git a/cinder/volume/drivers/ibm/ibm_storage/strings.py b/cinder/volume/drivers/ibm/ibm_storage/strings.py deleted file mode 100644 index fd6bc616d..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/strings.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# General -TITLE = "IBM Storage" -DEFAULT = "Default" - -# PROMPTS -CERTIFICATES_PATH = "/opt/ibm/ds8k_certs/" - -# DEFAULT INSTALLED VALUES -XIV_BACKEND_PREFIX = "IBM-XIV" -DS8K_BACKEND_PREFIX = "IBM-DS8K" - -# Replication Status Strings -REPLICATION_STATUS_DISABLED = 'disabled' # no replication -REPLICATION_STATUS_ERROR = 'error' # replication in error state -# replication copying data to secondary (inconsistent) -REPLICATION_STATUS_COPYING = 'copying' -# replication copying data to secondary (consistent) -REPLICATION_STATUS_ACTIVE = 'active' -# replication copying data to secondary (consistent) -REPLICATION_STATUS_ACTIVE_STOPPED = 'active-stopped' -# replication copying data to secondary (consistent) -REPLICATION_STATUS_INACTIVE = 'inactive' - -# Replication Failback String -PRIMARY_BACKEND_ID = 'default' - -# Volume Extra Metadata Default Value -METADATA_IS_TRUE = ' TRUE' diff --git a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py deleted file mode 100644 index 349e82f57..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py +++ /dev/null @@ -1,2722 +0,0 @@ -# Copyright (c) 2016 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import datetime -import re -import six -import socket - - -from oslo_log import log as logging -from oslo_utils import importutils - -pyxcli = importutils.try_import("pyxcli") -if pyxcli: - from pyxcli import client - from pyxcli import errors - from pyxcli.events import events - from pyxcli.mirroring import mirrored_entities - from pyxcli import transports - -from cinder import context -from cinder.i18n import _ -from cinder.objects import fields -from cinder import volume as c_volume -import cinder.volume.drivers.ibm.ibm_storage as storage -from cinder.volume.drivers.ibm.ibm_storage import certificate -from cinder.volume.drivers.ibm.ibm_storage import cryptish -from cinder.volume.drivers.ibm.ibm_storage import proxy -from cinder.volume.drivers.ibm.ibm_storage import strings -from cinder.volume.drivers.ibm.ibm_storage import xiv_replication as repl -from cinder.volume import group_types -from cinder.volume import qos_specs -from cinder.volume import utils -from cinder.volume import volume_types - - -OPENSTACK_PRODUCT_NAME = "OpenStack" -PERF_CLASS_NAME_PREFIX = "cinder-qos" -HOST_BAD_NAME = "HOST_BAD_NAME" -VOLUME_IS_MAPPED = "VOLUME_IS_MAPPED" -CONNECTIONS_PER_MODULE = 2 -MIN_LUNID = 1 -MAX_LUNID = 511 -SYNC = 'sync' -ASYNC = 'async' -SYNC_TIMEOUT = 300 -SYNCHED_STATES = ['synchronized', 'rpo ok'] - -LOG = logging.getLogger(__name__) - -# performance class strings - used in exceptions -PERF_CLASS_ERROR = _("Unable to create or get performance class: %(details)s") -PERF_CLASS_ADD_ERROR = _("Unable to add volume to performance class: " - "%(details)s") -PERF_CLASS_VALUES_ERROR = _("A performance class with the same name but " - "different values exists: %(details)s") - -# setup strings - used in exceptions -SETUP_BASE_ERROR = _("Unable to connect to %(title)s: %(details)s") -SETUP_INVALID_ADDRESS = _("Unable to connect to the storage system " - "at '%(address)s', invalid address.") - -# create volume strings - used in exceptions -CREATE_VOLUME_BASE_ERROR = _("Unable to create volume: %(details)s") - -# initialize connection strings - used in exceptions -CONNECTIVITY_FC_NO_TARGETS = _("Unable to detect FC connection between the " - "compute host and the storage, please ensure " - "that zoning is set up correctly.") - -# terminate connection strings - used in logging -TERMINATE_CONNECTION_BASE_ERROR = ("Unable to terminate the connection " - "for volume '%(volume)s': %(error)s.") -TERMINATE_CONNECTION_HOST_ERROR = ("Terminate connection for volume " - "'%(volume)s': for volume '%(volume)s': " - "%(host)s %(error)s.") - -# delete volume strings - used in logging -DELETE_VOLUME_BASE_ERROR = ("Unable to delete volume '%(volume)s': " - "%(error)s.") - -# manage volume strings - used in exceptions -MANAGE_VOLUME_BASE_ERROR = _("Unable to manage the volume '%(volume)s': " - "%(error)s.") - - -class XIVProxy(proxy.IBMStorageProxy): - """Proxy between the Cinder Volume and Spectrum Accelerate Storage. - - Supports IBM XIV, Spectrum Accelerate, A9000, A9000R - Version: 2.1.0 - Required pyxcli version: 1.1.4 - - .. code:: text - - 2.0 - First open source driver version - 2.1.0 - Support Consistency groups through Generic volume groups - - Support XIV/A9000 Volume independent QoS - - Support groups replication - - """ - - def __init__(self, storage_info, logger, exception, - driver=None, active_backend_id=None): - """Initialize Proxy.""" - if not active_backend_id: - active_backend_id = strings.PRIMARY_BACKEND_ID - proxy.IBMStorageProxy.__init__( - self, storage_info, logger, exception, driver, active_backend_id) - LOG.info("__init__: storage_info: %(keys)s", - {'keys': self.storage_info}) - if active_backend_id: - LOG.info("__init__: active_backend_id: %(id)s", - {'id': active_backend_id}) - self.ibm_storage_cli = None - self.meta['ibm_storage_portal'] = None - self.meta['ibm_storage_iqn'] = None - self.ibm_storage_remote_cli = None - self.meta['ibm_storage_fc_targets'] = [] - self.meta['storage_version'] = None - self.system_id = None - - @proxy._trace_time - def setup(self, context): - """Connect ssl client.""" - LOG.info("Setting up connection to %(title)s...\n" - "Active backend_id: '%(id)s'.", - {'title': strings.TITLE, - 'id': self.active_backend_id}) - - self.ibm_storage_cli = self._init_xcli(self.active_backend_id) - - if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: - self.meta['ibm_storage_iqn'] = ( - self._call_xiv_xcli("config_get"). - as_dict('name')['iscsi_name'].value) - - portals = storage.get_online_iscsi_ports(self.ibm_storage_cli) - if len(portals) == 0: - msg = (SETUP_BASE_ERROR, - {'title': strings.TITLE, - 'details': "No iSCSI portals available on the Storage." - }) - raise self._get_exception()( - _("%(prefix)s %(portals)s") % - {'prefix': storage.XIV_LOG_PREFIX, - 'portals': msg}) - - self.meta['ibm_storage_portal'] = "%s:3260" % portals[:1][0] - - remote_id = self._get_secondary_backend_id() - if remote_id: - self.ibm_storage_remote_cli = self._init_xcli(remote_id) - self._event_service_start() - self._update_stats() - LOG.info("IBM Storage %(common_ver)s " - "xiv_proxy %(proxy_ver)s. ", - {'common_ver': self.full_version, - 'proxy_ver': self.full_version}) - self._update_system_id() - if remote_id: - self._update_active_schedule_objects() - self._update_remote_schedule_objects() - LOG.info("Connection to the IBM storage " - "system established successfully.") - - @proxy._trace_time - def _update_active_schedule_objects(self): - """Set schedule objects on active backend. - - The value 00:20:00 is covered in XIV by a pre-defined object named - min_interval. - """ - schedules = self._call_xiv_xcli("schedule_list").as_dict('name') - for rate in repl.Replication.async_rates: - if rate.schedule == '00:00:20': - continue - name = rate.schedule_name - schedule = schedules.get(name, None) - if schedule: - LOG.debug('Exists on local backend %(sch)s', {'sch': name}) - interval = schedule.get('interval', '') - if interval != rate.schedule: - msg = (_("Schedule %(sch)s exists with incorrect " - "value %(int)s") - % {'sch': name, 'int': interval}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - else: - LOG.debug('create %(sch)s', {'sch': name}) - try: - self._call_xiv_xcli("schedule_create", - schedule=name, type='interval', - interval=rate.schedule) - except errors.XCLIError: - msg = (_("Setting up Async mirroring failed, " - "schedule %(sch)s is not supported on system: " - " %(id)s.") - % {'sch': name, 'id': self.system_id}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - @proxy._trace_time - def _update_remote_schedule_objects(self): - """Set schedule objects on remote backend. - - The value 00:20:00 is covered in XIV by a pre-defined object named - min_interval. - """ - schedules = self._call_remote_xiv_xcli("schedule_list").as_dict('name') - for rate in repl.Replication.async_rates: - if rate.schedule == '00:00:20': - continue - name = rate.schedule_name - if schedules.get(name, None): - LOG.debug('Exists on remote backend %(sch)s', {'sch': name}) - interval = schedules.get(name, None)['interval'] - if interval != rate.schedule: - msg = (_("Schedule %(sch)s exists with incorrect " - "value %(int)s") - % {'sch': name, 'int': interval}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - else: - try: - self._call_remote_xiv_xcli("schedule_create", - schedule=name, type='interval', - interval=rate.schedule) - except errors.XCLIError: - msg = (_("Setting up Async mirroring failed, " - "schedule %(sch)s is not supported on system: " - " %(id)s.") - % {'sch': name, 'id': self.system_id}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - def _get_extra_specs(self, type_id): - """get extra specs to match the type_id - - type_id can derive from volume or from consistency_group - """ - if type_id is None: - return {} - return c_volume.volume_types.get_volume_type_extra_specs(type_id) - - def _update_system_id(self): - if self.system_id: - return - local_ibm_storage_cli = self._init_xcli(strings.PRIMARY_BACKEND_ID) - if not local_ibm_storage_cli: - LOG.error('Failed to connect to main backend. ' - 'Cannot retrieve main backend system_id') - return - system_id = local_ibm_storage_cli.cmd.config_get().as_dict( - 'name')['system_id'].value - LOG.debug('system_id: %(id)s', {'id': system_id}) - self.system_id = system_id - - @proxy._trace_time - def _get_qos_specs(self, type_id): - """Gets the qos specs from cinder.""" - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type(ctxt, type_id) - if not volume_type: - return None - qos_specs_id = volume_type.get('qos_specs_id', None) - if qos_specs_id: - return qos_specs.get_qos_specs( - ctxt, qos_specs_id).get('specs', None) - return None - - @proxy._trace_time - def _qos_create_kwargs_for_xcli(self, specs): - args = {} - for key in specs: - if key == 'bw': - args['max_bw_rate'] = specs[key] - if key == 'iops': - args['max_io_rate'] = specs[key] - return args - - def _qos_remove_vol(self, volume): - try: - self._call_xiv_xcli("perf_class_remove_vol", - vol=volume['name']) - - except errors.VolumeNotConnectedToPerfClassError as e: - details = self._get_code_and_status_or_message(e) - LOG.debug(details) - return True - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg_data = (_("Unable to add volume to performance " - "class: %(details)s") % {'details': details}) - LOG.error(msg_data) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg_data) - return True - - def _qos_add_vol(self, volume, perf_class_name): - try: - self._call_xiv_xcli("perf_class_add_vol", - vol=volume['name'], - perf_class=perf_class_name) - except errors.VolumeAlreadyInPerfClassError as e: - details = self._get_code_and_status_or_message(e) - LOG.debug(details) - return True - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg = PERF_CLASS_ADD_ERROR % {'details': details} - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - return True - - def _check_perf_class_on_backend(self, specs): - """Checking if class exists on backend. if not - create it.""" - perf_class_name = PERF_CLASS_NAME_PREFIX - if specs is None or specs == {}: - return '' - - for key, value in specs.items(): - perf_class_name += '_' + key + '_' + value - - try: - classes_list = self._call_xiv_xcli("perf_class_list", - perf_class=perf_class_name - ).as_list - - # list is not empty, check if class has the right values - for perf_class in classes_list: - if (not perf_class.get('max_iops', - None) == specs.get('iops', '0') or - not perf_class.get('max_bw', - None) == specs.get('bw', '0')): - raise self.meta['exception'].VolumeBackendAPIException( - data=PERF_CLASS_VALUES_ERROR % - {'details': perf_class_name}) - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg = PERF_CLASS_ERROR % {'details': details} - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - # class does not exist, create it - if not classes_list: - self._create_qos_class(perf_class_name, specs) - return perf_class_name - - def _get_type_from_perf_class_name(self, perf_class_name): - _type = re.findall('type_(independent|shared)', perf_class_name) - return _type[0] if _type else None - - def _create_qos_class(self, perf_class_name, specs): - """Create the qos class on the backend.""" - try: - # check if we have a shared (default) perf class - # or an independent perf class - _type = self._get_type_from_perf_class_name(perf_class_name) - if _type: - self._call_xiv_xcli("perf_class_create", - perf_class=perf_class_name, - type=_type) - else: - self._call_xiv_xcli("perf_class_create", - perf_class=perf_class_name) - - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg = PERF_CLASS_ERROR % {'details': details} - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - try: - args = self._qos_create_kwargs_for_xcli(specs) - self._call_xiv_xcli("perf_class_set_rate", - perf_class=perf_class_name, - **args) - return perf_class_name - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - # attempt to clean up - self._call_xiv_xcli("perf_class_delete", - perf_class=perf_class_name) - msg = PERF_CLASS_ERROR % {'details': details} - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - def _qos_specs_from_volume(self, volume): - """Returns qos_specs of volume. - - checks if there is a type on the volume - if so, checks if it has been associated with a qos class - returns the name of that class - """ - type_id = volume.get('volume_type_id', None) - if not type_id: - return None - return self._get_qos_specs(type_id) - - def _get_replication_info(self, specs): - - info, msg = repl.Replication.extract_replication_info_from_specs(specs) - if not info: - LOG.error(msg) - raise self._get_exception()(message=msg) - - return info - - @proxy._trace_time - def _create_volume(self, volume): - """Internal implementation to create a volume.""" - size = storage.gigabytes_to_blocks(float(volume['size'])) - pool = self.storage_info[storage.FLAG_KEYS['storage_pool']] - try: - self._call_xiv_xcli( - "vol_create", vol=volume['name'], size_blocks=size, pool=pool) - except errors.SystemOutOfSpaceError: - msg = _("Unable to create volume: System is out of space.") - LOG.error(msg) - raise self._get_exception()(msg) - except errors.PoolOutOfSpaceError: - msg = (_("Unable to create volume: pool '%(pool)s' is " - "out of space.") - % {'pool': pool}) - LOG.error(msg) - raise self._get_exception()(msg) - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - @proxy._trace_time - def create_volume(self, volume): - """Creates a volume.""" - # read replication information - specs = self._get_extra_specs(volume.get('volume_type_id', None)) - replication_info = self._get_replication_info(specs) - - self._create_volume(volume) - return self.handle_created_vol_properties(replication_info, - volume) - - def handle_created_vol_properties(self, replication_info, volume): - volume_update = {} - - LOG.debug('checking replication_info %(rep)s', - {'rep': replication_info}) - volume_update['replication_status'] = 'disabled' - cg = volume.group and utils.is_group_a_cg_snapshot_type(volume.group) - if replication_info['enabled']: - try: - repl.VolumeReplication(self).create_replication( - volume.name, replication_info) - except Exception as e: - details = self._get_code_and_status_or_message(e) - msg = ('Failed create_replication for ' - 'volume %(vol)s: %(err)s', - {'vol': volume['name'], 'err': details}) - LOG.error(msg) - if cg: - cg_name = self._cg_name_from_volume(volume) - self._silent_delete_volume_from_cg(volume, cg_name) - self._silent_delete_volume(volume=volume) - raise - volume_update['replication_status'] = 'enabled' - - if cg: - if volume.group.is_replicated: - # for replicated Consistency Group: - # The Volume must be mirrored, and its mirroring settings must - # be identical to those of the Consistency Group: - # mirroring type (e.g., synchronous), - # mirroring status, mirroring target(backend) - group_specs = group_types.get_group_type_specs( - volume.group.group_type_id) - group_rep_info = self._get_replication_info(group_specs) - - msg = None - if volume_update['replication_status'] != 'enabled': - msg = ('Cannot add non-replicated volume into' - ' replicated group') - elif replication_info['mode'] != group_rep_info['mode']: - msg = ('Volume replication type and Group replication type' - ' should be the same') - elif volume.host != volume.group.host: - msg = 'Cannot add volume to Group on different host' - else: - group_name = self._cg_name_from_group(volume.group) - me = mirrored_entities.MirroredEntities( - self.ibm_storage_cli) - me_objs = me.get_mirror_resources_by_name_map() - vol_sync_state = me_objs['volumes'][volume.name].sync_state - cg_sync_state = me_objs['cgs'][group_name].sync_state - - if (vol_sync_state != 'Synchronized' or - cg_sync_state != 'Synchronized'): - msg = ('Cannot add volume to Group. Both volume and ' - 'group should have sync_state = Synchronized') - if msg: - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - try: - cg_name = self._cg_name_from_volume(volume) - self._call_xiv_xcli( - "cg_add_vol", vol=volume['name'], cg=cg_name) - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - self._silent_delete_volume(volume=volume) - msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - perf_class_name = None - specs = self._qos_specs_from_volume(volume) - if specs: - try: - perf_class_name = self._check_perf_class_on_backend(specs) - if perf_class_name: - self._call_xiv_xcli("perf_class_add_vol", - vol=volume['name'], - perf_class=perf_class_name) - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - if cg: - cg_name = self._cg_name_from_volume(volume) - self._silent_delete_volume_from_cg(volume, cg_name) - self._silent_delete_volume(volume=volume) - msg = PERF_CLASS_ADD_ERROR % {'details': details} - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - return volume_update - - def get_group_specs_by_group_resource(self, context, group): - group_type = group.get('group_type_id', None) - if group_type is None: - msg = ('No group specs inside group type.') - return None, msg - group_specs = group_types.get_group_type_specs(group_type) - keyword = 'consistent_group_replication_enabled' - if not group_specs.get(keyword) == ' True': - msg = ('No cg replication field in group specs.') - return None, msg - return group_specs, '' - - @proxy._trace_time - def enable_replication(self, context, group, volumes): - """Enable cg replication""" - # fetch replication info - group_specs = group_types.get_group_type_specs(group.group_type_id) - if not group_specs: - msg = 'No group specs inside group type' - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - # Add this field to adjust it to generic replication (for volumes) - replication_info = self._get_replication_info(group_specs) - if utils.is_group_a_cg_snapshot_type(group): - # take every vol out of cg - we can't mirror the cg otherwise. - if volumes: - self._update_consistencygroup(context, group, - remove_volumes=volumes) - for volume in volumes: - repl.VolumeReplication(self).create_replication( - volume.name, replication_info) - - # mirror entire group - group_name = self._cg_name_from_group(group) - self._create_consistencygroup_on_remote(context, group_name) - repl.GroupReplication(self).create_replication(group_name, - replication_info) - - updated_volumes = [] - if volumes: - # add volumes back to cg - self._update_consistencygroup(context, group, - add_volumes=volumes) - for volume in volumes: - updated_volumes.append( - {'id': volume['id'], - 'replication_status': - fields.ReplicationStatus.ENABLED}) - return ({'replication_status': fields.ReplicationStatus.ENABLED}, - updated_volumes) - else: - # For generic groups we replicate all the volumes - updated_volumes = [] - for volume in volumes: - repl.VolumeReplication(self).create_replication( - volume.name, replication_info) - - # update status - for volume in volumes: - updated_volumes.append( - {'id': volume['id'], - 'replication_status': fields.ReplicationStatus.ENABLED}) - return ({'replication_status': fields.ReplicationStatus.ENABLED}, - updated_volumes) - - @proxy._trace_time - def disable_replication(self, context, group, volumes): - """disables CG replication""" - group_specs = group_types.get_group_type_specs(group.group_type_id) - if not group_specs: - msg = 'No group specs inside group type' - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - replication_info = self._get_replication_info(group_specs) - updated_volumes = [] - if utils.is_group_a_cg_snapshot_type(group): - # one call deletes replication for cgs and volumes together. - repl.GroupReplication(self).delete_replication(group, - replication_info) - for volume in volumes: - # xiv locks volumes after deletion of replication. - # we need to unlock it for further use. - try: - self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name) - except errors.XCLIError as e: - details = self._get_code_and_status_or_message(e) - msg = ('Failed to unlock volumes %(details)s' % - {'details': details}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - updated_volumes.append( - {'id': volume.id, - 'replication_status': fields.ReplicationStatus.DISABLED}) - else: - # For generic groups we replicate all the volumes - updated_volumes = [] - for volume in volumes: - repl.VolumeReplication(self).delete_replication( - volume.name, replication_info) - - # update status - for volume in volumes: - updated_volumes.append( - {'id': volume['id'], - 'replication_status': fields.ReplicationStatus.DISABLED}) - return ({'replication_status': fields.ReplicationStatus.DISABLED}, - updated_volumes) - - def get_secondary_backend_id(self, secondary_backend_id): - if secondary_backend_id is None: - secondary_backend_id = self._get_target() - if secondary_backend_id is None: - msg = _("No targets defined. Can't perform failover.") - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - return secondary_backend_id - - def check_for_splitbrain(self, volumes, pool_master, pool_slave): - if volumes: - # check for split brain situations - # check for files that are available on both volumes - # and are not in an active mirroring relation - split_brain = self._potential_split_brain( - self.ibm_storage_cli, - self.ibm_storage_remote_cli, - volumes, pool_master, - pool_slave) - if split_brain: - # if such a situation exists stop and raise an exception! - msg = (_("A potential split brain condition has been found " - "with the following volumes: \n'%(volumes)s.'") % - {'volumes': split_brain}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - def failover_replication(self, context, group, volumes, - secondary_backend_id): - """Failover a cg with all it's volumes. - - if secondery_id is default, cg needs to be failed back. - - """ - volumes_updated = [] - goal_status = '' - pool_master = None - group_updated = {'replication_status': group.replication_status} - LOG.info("failover_replication: of cg %(cg)s " - "from %(active)s to %(id)s", - {'cg': group.get('name'), - 'active': self.active_backend_id, - 'id': secondary_backend_id}) - if secondary_backend_id == strings.PRIMARY_BACKEND_ID: - # default as active backend id - if self._using_default_backend(): - LOG.info("CG has been failed back. " - "No need to fail back again.") - return group_updated, volumes_updated - # get the master pool, not using default id. - pool_master = self._get_target_params( - self.active_backend_id)['san_clustername'] - pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] - goal_status = 'available' - else: - if self._using_default_backend(): - LOG.info("cg already failed over.") - return group_updated, volumes_updated - # using same api as Cheesecake, we need - # replciation_device entry. so we use get_targets. - secondary_backend_id = self.get_secondary_backend_id( - secondary_backend_id) - pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] - pool_slave = self._get_target_params( - secondary_backend_id)['san_clustername'] - goal_status = fields.ReplicationStatus.FAILED_OVER - # we should have secondary_backend_id by here. - self.ibm_storage_remote_cli = self._init_xcli(secondary_backend_id) - - # check for split brain in mirrored volumes - self.check_for_splitbrain(volumes, pool_master, pool_slave) - group_specs, msg = self.get_group_specs_by_group_resource(context, - group) - if group_specs is None: - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - failback = (secondary_backend_id == strings.PRIMARY_BACKEND_ID) - - result, details = repl.GroupReplication.failover(group, failback) - - if result: - status = goal_status - group_updated['replication_status'] = status - else: - status = 'error' - updates = {'status': status} - if status == 'error': - group_updated['replication_extended_status'] = details - # if replication on cg was successful, then all of the volumes - # have been successfully replicated as well. - for volume in volumes: - volumes_updated.append({ - 'volume_id': volume.id, - 'updates': updates - }) - # replace between active and secondary xcli - self._replace_xcli_to_remote_xcli() - - return group_updated, volumes_updated - - def _replace_xcli_to_remote_xcli(self): - temp_ibm_storage_cli = self.ibm_storage_cli - self.ibm_storage_cli = self.ibm_storage_remote_cli - self.ibm_storage_remote_cli = temp_ibm_storage_cli - - def _get_replication_target_params(self): - LOG.debug('_get_replication_target_params.') - if not self.targets: - msg = _("No targets available for replication") - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - no_of_targets = len(self.targets) - if no_of_targets > 1: - msg = _("Too many targets configured. Only one is supported") - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - - LOG.debug('_get_replication_target_params selecting target...') - target = self._get_target() - if not target: - msg = _("No targets available for replication.") - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - params = self._get_target_params(target) - if not params: - msg = (_("Missing target information for target '%(target)s'"), - {'target': target}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException(data=msg) - return target, params - - def _delete_volume(self, vol_name): - """Deletes a volume on the Storage.""" - LOG.debug("_delete_volume: %(volume)s", - {'volume': vol_name}) - try: - self._call_xiv_xcli("vol_delete", vol=vol_name) - except errors.VolumeBadNameError: - # Don't throw error here, allow the cinder volume manager - # to set the volume as deleted if it's not available - # on the XIV box - LOG.info("Volume '%(volume)s' not found on storage", - {'volume': vol_name}) - - def _silent_delete_volume(self, volume): - """Silently delete a volume. - - silently delete a volume in case of an immediate failure - within a function that created it. - """ - try: - self._delete_volume(vol_name=volume['name']) - except errors.XCLIError as e: - error = self._get_code_and_status_or_message(e) - LOG.error(DELETE_VOLUME_BASE_ERROR, - {'volume': volume['name'], 'error': error}) - - def _silent_delete_volume_from_cg(self, volume, cgname): - """Silently delete a volume from CG. - - silently delete a volume in case of an immediate failure - within a function that created it. - """ - try: - self._call_xiv_xcli( - "cg_remove_vol", vol=volume['name']) - except errors.XCLIError as e: - LOG.error("Failed removing volume %(vol)s from " - "consistency group %(cg)s: %(err)s", - {'vol': volume['name'], - 'cg': cgname, - 'err': self._get_code_and_status_or_message(e)}) - self._silent_delete_volume(volume=volume) - - @proxy._trace_time - def delete_volume(self, volume): - """Deletes a volume on the Storage machine.""" - LOG.debug("delete_volume: %(volume)s", - {'volume': volume['name']}) - # read replication information - specs = self._get_extra_specs(volume.get('volume_type_id', None)) - replication_info = self._get_replication_info(specs) - if replication_info['enabled']: - try: - repl.VolumeReplication(self).delete_replication( - volume.name, replication_info) - except Exception as e: - error = self._get_code_and_status_or_message(e) - LOG.error(DELETE_VOLUME_BASE_ERROR, - {'volume': volume['name'], 'error': error}) - # continue even if failed - - # attempt to delete volume at target - target = None - try: - target, params = self._get_replication_target_params() - LOG.info('Target %(target)s: %(params)s', - {'target': target, 'params': params}) - except Exception as e: - LOG.error("Unable to delete replicated volume " - "'%(volume)s': %(error)s.", - {'error': self._get_code_and_status_or_message(e), - 'volume': volume['name']}) - if target: - try: - self._call_remote_xiv_xcli( - "vol_delete", vol=volume['name']) - except errors.XCLIError as e: - LOG.error( - "Unable to delete replicated volume " - "'%(volume)s': %(error)s.", - {'error': self._get_code_and_status_or_message(e), - 'volume': volume['name']}) - - try: - self._delete_volume(volume['name']) - except errors.XCLIError as e: - LOG.error(DELETE_VOLUME_BASE_ERROR, - {'volume': volume['name'], - 'error': self._get_code_and_status_or_message(e)}) - - @proxy._trace_time - def initialize_connection(self, volume, connector): - """Initialize connection to instance. - - Maps the created volume to the nova volume node, - and returns the iSCSI target to be used in the instance - """ - - connection_type = self._get_connection_type() - LOG.debug("initialize_connection: %(volume)s %(connector)s" - " connection_type: %(connection_type)s", - {'volume': volume['name'], 'connector': connector, - 'connection_type': connection_type}) - - # This call does all the work.. - fc_targets, host = self._get_host_and_fc_targets( - volume, connector) - - lun_id = self._vol_map_and_get_lun_id( - volume, connector, host) - - meta = { - 'driver_volume_type': connection_type, - 'data': { - 'target_discovered': True, - 'target_lun': lun_id, - 'volume_id': volume['id'], - }, - } - if connection_type == storage.XIV_CONNECTION_TYPE_ISCSI: - meta['data']['target_portal'] = self.meta['ibm_storage_portal'] - meta['data']['target_iqn'] = self.meta['ibm_storage_iqn'] - meta['data']['provider_location'] = "%s,1 %s %s" % ( - self.meta['ibm_storage_portal'], - self.meta['ibm_storage_iqn'], lun_id) - - chap_type = self._get_chap_type() - LOG.debug("initialize_connection: %(volume)s." - " chap_type:%(chap_type)s", - {'volume': volume['name'], - 'chap_type': chap_type}) - - if chap_type == storage.CHAP_ENABLED: - chap = self._create_chap(host) - meta['data']['auth_method'] = 'CHAP' - meta['data']['auth_username'] = chap[0] - meta['data']['auth_password'] = chap[1] - else: - all_storage_wwpns = self._get_fc_targets(None) - meta['data']['all_storage_wwpns'] = all_storage_wwpns - modules = set() - for wwpn in fc_targets: - modules.add(wwpn[-2]) - meta['data']['recommended_connections'] = ( - len(modules) * CONNECTIONS_PER_MODULE) - meta['data']['target_wwn'] = fc_targets - if fc_targets == []: - fc_targets = all_storage_wwpns - meta['data']['initiator_target_map'] = ( - self._build_initiator_target_map(fc_targets, connector)) - - LOG.debug(six.text_type(meta)) - return meta - - @proxy._trace_time - def terminate_connection(self, volume, connector): - """Terminate connection. - - Unmaps volume. If this is the last connection from the host, undefines - the host from the storage. - """ - - LOG.debug("terminate_connection: %(volume)s %(connector)s", - {'volume': volume['name'], 'connector': connector}) - - host = self._get_host(connector) - if host is None: - LOG.error(TERMINATE_CONNECTION_BASE_ERROR, - {'volume': volume['name'], - 'error': "Host not found."}) - return - - fc_targets = {} - if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC: - fc_targets = self._get_fc_targets(host) - - try: - self._call_xiv_xcli( - "unmap_vol", - vol=volume['name'], - host=host.get('name')) - except errors.VolumeBadNameError: - LOG.error(TERMINATE_CONNECTION_BASE_ERROR, - {'volume': volume['name'], - 'error': "Volume not found."}) - except errors.XCLIError as err: - details = self._get_code_and_status_or_message(err) - LOG.error(TERMINATE_CONNECTION_BASE_ERROR, - {'volume': volume['name'], - 'error': details}) - - # check if there are still mapped volumes or we can - # remove this host - host_mappings = [] - try: - host_mappings = self._call_xiv_xcli( - "mapping_list", - host=host.get('name')).as_list - if len(host_mappings) == 0: - LOG.info("Terminate connection for volume '%(volume)s': " - "%(host)s %(info)s.", - {'volume': volume['name'], - 'host': host.get('name'), - 'info': "will be deleted"}) - if not self._is_iscsi(): - # The following meta data is provided so that zoning can - # be cleared - - meta = { - 'driver_volume_type': self._get_connection_type(), - 'data': {'volume_id': volume['id'], }, - } - meta['data']['target_wwn'] = fc_targets - meta['data']['initiator_target_map'] = ( - self._build_initiator_target_map(fc_targets, - connector)) - self._call_xiv_xcli("host_delete", host=host.get('name')) - if not self._is_iscsi(): - return meta - return None - else: - LOG.debug(("Host '%(host)s' has additional mapped " - "volumes %(mappings)s"), - {'host': host.get('name'), - 'mappings': host_mappings}) - - except errors.HostBadNameError: - LOG.error(TERMINATE_CONNECTION_HOST_ERROR, - {'volume': volume['name'], - 'host': host.get('name'), - 'error': "Host not found."}) - except errors.XCLIError as err: - details = self._get_code_and_status_or_message(err) - LOG.error(TERMINATE_CONNECTION_HOST_ERROR, - {'volume': volume['name'], - 'host': host.get('name'), - 'error': details}) - - def _create_volume_from_snapshot(self, volume, - snapshot_name, snapshot_size): - """Create volume from snapshot internal implementation. - - used for regular snapshot and cgsnapshot - """ - LOG.debug("_create_volume_from_snapshot: %(volume)s from %(name)s", - {'volume': volume['name'], 'name': snapshot_name}) - - # TODO(alonma): Refactor common validation - volume_size = float(volume['size']) - if volume_size < snapshot_size: - error = (_("Volume size (%(vol_size)sGB) cannot be smaller than " - "the snapshot size (%(snap_size)sGB)..") - % {'vol_size': volume_size, - 'snap_size': snapshot_size}) - LOG.error(error) - raise self._get_exception()(error) - self.create_volume(volume) - try: - self._call_xiv_xcli( - "vol_copy", vol_src=snapshot_name, vol_trg=volume['name']) - except errors.XCLIError as e: - error = (_("Fatal error in copying volume: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._silent_delete_volume(volume) - raise self._get_exception()(error) - # A side effect of vol_copy is the resizing of the destination volume - # to the size of the source volume. If the size is different we need - # to get it back to the desired size - if snapshot_size == volume_size: - return - size = storage.gigabytes_to_blocks(volume_size) - try: - self._call_xiv_xcli( - "vol_resize", vol=volume['name'], size_blocks=size) - except errors.XCLIError as e: - error = (_("Fatal error in resize volume: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._silent_delete_volume(volume) - raise self._get_exception()(error) - - @proxy._trace_time - def create_volume_from_snapshot(self, volume, snapshot): - """create volume from snapshot.""" - - snapshot_size = float(snapshot['volume_size']) - if not snapshot['group_snapshot_id']: - snapshot_name = snapshot['name'] - else: - groupname = self._group_name_from_cgsnapshot_id( - snapshot['group_snapshot_id']) - snapshot_name = self._volume_name_from_cg_snapshot( - groupname, snapshot.volume_name) - self._create_volume_from_snapshot( - volume, snapshot_name, snapshot_size) - - @proxy._trace_time - def create_snapshot(self, snapshot): - """create snapshot.""" - - try: - self._call_xiv_xcli( - "snapshot_create", vol=snapshot['volume_name'], - name=snapshot['name']) - except errors.XCLIError as e: - error = (_("Fatal error in snapshot_create: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - @proxy._trace_time - def delete_snapshot(self, snapshot): - """delete snapshot.""" - - try: - self._call_xiv_xcli( - "snapshot_delete", snapshot=snapshot['name']) - except errors.XCLIError as e: - error = (_("Fatal error in snapshot_delete: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - @proxy._trace_time - def extend_volume(self, volume, new_size): - """Resize volume.""" - volume_size = float(volume['size']) - wanted_size = float(new_size) - if wanted_size == volume_size: - return - shrink = 'yes' if wanted_size < volume_size else 'no' - size = storage.gigabytes_to_blocks(wanted_size) - try: - self._call_xiv_xcli( - "vol_resize", vol=volume['name'], - size_blocks=size, shrink_volume=shrink) - except errors.XCLIError as e: - error = (_("Fatal error in vol_resize: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - @proxy._trace_time - def migrate_volume(self, context, volume, host): - """Migrate volume to another backend. - - Optimize the migration if the destination is on the same server. - - If the specified host is another back-end on the same server, and - the volume is not attached, we can do the migration locally without - going through iSCSI. - - Storage-assisted migration... - """ - - false_ret = (False, None) - if 'location_info' not in host['capabilities']: - return false_ret - info = host['capabilities']['location_info'] - try: - dest, dest_host, dest_pool = info.split(':') - except ValueError: - return false_ret - volume_host = volume.host.split('_')[1] - if dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host: - return false_ret - - if volume.attach_status == 'attached': - LOG.info("Storage-assisted volume migration: Volume " - "%(volume)s is attached", - {'volume': volume.id}) - - try: - self._call_xiv_xcli( - "vol_move", vol=volume.name, - pool=dest_pool) - except errors.XCLIError as e: - error = (_("Fatal error in vol_move: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - return (True, None) - - @proxy._trace_time - def manage_volume(self, volume, reference): - """Brings an existing backend storage object under Cinder management. - - reference value is passed straight from the get_volume_list helper - function. it is up to the driver how this should be interpreted. - It should be sufficient to identify a storage object that the driver - should somehow associate with the newly-created cinder volume - structure. - There are two ways to do this: - - 1. Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - 2. Place some metadata on the volume, or somewhere in the backend, that - allows other driver requests (e.g. delete, clone, attach, detach...) - to locate the backend storage object when required. - - If the reference doesn't make sense, or doesn't refer to an existing - backend storage object, raise a ManageExistingInvalidReference - exception. - - The volume may have a volume_type, and the driver can inspect that and - compare against the properties of the referenced backend storage - object. If they are incompatible, raise a - ManageExistingVolumeTypeMismatch, specifying a reason for the failure. - """ - - existing_volume = reference['source-name'] - LOG.debug("manage_volume: %(volume)s", {'volume': existing_volume}) - # check that volume exists - try: - volumes = self._call_xiv_xcli( - "vol_list", vol=existing_volume).as_list - except errors.XCLIError as e: - error = (MANAGE_VOLUME_BASE_ERROR - % {'volume': existing_volume, - 'error': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - if len(volumes) != 1: - error = (MANAGE_VOLUME_BASE_ERROR - % {'volume': existing_volume, - 'error': 'Volume does not exist'}) - LOG.error(error) - raise self._get_exception()(error) - - volume['size'] = float(volumes[0]['size']) - - # option 1: - # rename volume to volume['name'] - try: - self._call_xiv_xcli( - "vol_rename", - vol=existing_volume, - new_name=volume['name']) - except errors.XCLIError as e: - error = (MANAGE_VOLUME_BASE_ERROR - % {'volume': existing_volume, - 'error': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - # option 2: - # return volume name as admin metadata - # update the admin metadata DB - - # Need to do the ~same in create data. use the metadata instead of the - # volume name - - return {} - - @proxy._trace_time - def manage_volume_get_size(self, volume, reference): - """Return size of volume to be managed by manage_volume. - - When calculating the size, round up to the next GB. - """ - existing_volume = reference['source-name'] - - # check that volume exists - try: - volumes = self._call_xiv_xcli( - "vol_list", vol=existing_volume).as_list - except errors.XCLIError as e: - error = (_("Fatal error in vol_list: %(details)s") - % {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - if len(volumes) != 1: - error = (_("Volume %(volume)s is not available on storage") % - {'volume': existing_volume}) - LOG.error(error) - raise self._get_exception()(error) - - return float(volumes[0]['size']) - - @proxy._trace_time - def unmanage_volume(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - """ - pass - - @proxy._trace_time - def get_replication_status(self, context, volume): - """Return replication status.""" - pass - - def freeze_backend(self, context): - """Notify the backend that it's frozen.""" - # go over volumes in backend that are replicated and lock them - - pass - - def thaw_backend(self, context): - """Notify the backend that it's unfrozen/thawed.""" - - # go over volumes in backend that are replicated and unlock them - pass - - def _using_default_backend(self): - return ((self.active_backend_id is None) or - (self.active_backend_id == strings.PRIMARY_BACKEND_ID)) - - def _is_vol_split_brain(self, xcli_master, xcli_slave, vol): - mirror_master = xcli_master.cmd.mirror_list(vol=vol).as_list - mirror_slave = xcli_slave.cmd.mirror_list(vol=vol).as_list - if (len(mirror_master) == 1 and len(mirror_slave) == 1 and - mirror_master[0].current_role == 'Master' and - mirror_slave[0].current_role == 'Slave' and - mirror_master[0].sync_state.lower() in SYNCHED_STATES): - return False - else: - return True - - def _potential_split_brain(self, xcli_master, xcli_slave, - volumes, pool_master, pool_slave): - potential_split_brain = [] - if xcli_master is None or xcli_slave is None: - return potential_split_brain - try: - vols_master = xcli_master.cmd.vol_list( - pool=pool_master).as_dict('name') - except Exception: - msg = "Failed getting information from the active storage." - LOG.debug(msg) - return potential_split_brain - try: - vols_slave = xcli_slave.cmd.vol_list( - pool=pool_slave).as_dict('name') - except Exception: - msg = "Failed getting information from the target storage." - LOG.debug(msg) - return potential_split_brain - - vols_requested = set(vol['name'] for vol in volumes) - common_vols = set(vols_master).intersection( - set(vols_slave)).intersection(set(vols_requested)) - for name in common_vols: - if self._is_vol_split_brain(xcli_master=xcli_master, - xcli_slave=xcli_slave, vol=name): - potential_split_brain.append(name) - return potential_split_brain - - @proxy._trace_time - def failover_host(self, context, volumes, secondary_id, groups=None): - """Failover a full backend. - - Fails over the volume back and forth, if secondary_id is 'default', - volumes will be failed back, otherwize failed over. - - Note that the resulting status depends on the direction: - in case of failover it will be 'failed-over' and in case of - failback it will be 'available' - """ - volume_update_list = [] - - LOG.info("failover_host: from %(active)s to %(id)s", - {'active': self.active_backend_id, 'id': secondary_id}) - # special cases to handle - if secondary_id == strings.PRIMARY_BACKEND_ID: - # case: already failed back - if self._using_default_backend(): - LOG.info("Host has been failed back. No need " - "to fail back again.") - return self.active_backend_id, volume_update_list, [] - pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] - pool_master = self._get_target_params( - self.active_backend_id)['san_clustername'] - goal_status = 'available' - else: - if not self._using_default_backend(): - LOG.info("Already failed over. No need to failover again.") - return self.active_backend_id, volume_update_list, [] - # case: need to select a target - secondary_id = self.get_secondary_backend_id(secondary_id) - pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] - try: - pool_slave = self._get_target_params( - secondary_id)['san_clustername'] - except Exception: - msg = _("Invalid target information. Can't perform failover") - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] - goal_status = fields.ReplicationStatus.FAILED_OVER - - # connnect xcli to secondary storage according to backend_id by - # calling _init_xcli with secondary_id - self.ibm_storage_remote_cli = self._init_xcli(secondary_id) - - # get replication_info for all volumes at once - if len(volumes): - # check for split brain situations - # check for files that are available on both volumes - # and are not in an active mirroring relation - self.check_for_splitbrain(volumes, pool_master, pool_slave) - - # loop over volumes and attempt failover - for volume in volumes: - LOG.debug("Attempting to failover '%(vol)s'", - {'vol': volume['name']}) - - result, details = repl.VolumeReplication(self).failover( - volume, failback=(secondary_id == strings.PRIMARY_BACKEND_ID)) - - if result: - status = goal_status - else: - status = 'error' - - updates = {'status': status} - if status == 'error': - updates['replication_extended_status'] = details - volume_update_list.append({ - 'volume_id': volume['id'], - 'updates': updates - }) - - # set active xcli to secondary xcli - self._replace_xcli_to_remote_xcli() - # set active backend id to secondary id - self.active_backend_id = secondary_id - - return secondary_id, volume_update_list, [] - - @proxy._trace_time - def retype(self, ctxt, volume, new_type, diff, host): - """Change volume type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities - """ - LOG.debug("retype: volume = %(vol)s type = %(ntype)s", - {'vol': volume.get('display_name'), - 'ntype': new_type['name']}) - - if 'location_info' not in host['capabilities']: - return False - info = host['capabilities']['location_info'] - try: - (dest, dest_host, dest_pool) = info.split(':') - except ValueError: - return False - volume_host = volume.get('host').split('_')[1] - if (dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host): - return False - - pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']] - - # if pool is different. else - we're on the same pool and retype is ok. - if (pool_name != dest_pool): - # The input host and pool are already "linked" to the new_type, - # otherwise the scheduler does not assign them as candidates for - # the retype thus we just need to migrate the volume to the new - # pool - LOG.debug("retype: migrate volume %(vol)s to " - "host=%(host)s, pool=%(pool)s", - {'vol': volume.get('display_name'), - 'host': dest_host, 'pool': dest_pool}) - (mig_result, model) = self.migrate_volume( - context=ctxt, volume=volume, host=host) - - if not mig_result: - raise self.meta['exception'].VolumeBackendAPIException( - data=PERF_CLASS_ADD_ERROR) - - # Migration occurred, retype has finished. - # We need to check for type and QoS. - # getting the old specs - old_specs = self._qos_specs_from_volume(volume) - new_specs = self._get_qos_specs(new_type.get('id', None)) - if not new_specs: - if old_specs: - LOG.debug("qos: removing qos class for %(vol)s.", - {'vol': volume.display_name}) - self._qos_remove_vol(volume) - return True - - perf_class_name_old = self._check_perf_class_on_backend(old_specs) - perf_class_name_new = self._check_perf_class_on_backend(new_specs) - if perf_class_name_new != perf_class_name_old: - # add new qos to vol. (removed from old qos automatically) - self._qos_add_vol(volume, perf_class_name_new) - return True - - @proxy._trace_time - def _check_storage_version_for_qos_support(self): - if self.meta['storage_version'] is None: - self.meta['storage_version'] = self._call_xiv_xcli( - "version_get").as_single_element.system_version - - if int(self.meta['storage_version'][0:2]) >= 12: - return 'True' - return 'False' - - @proxy._trace_time - def _update_stats(self): - """fetch and update stats.""" - - LOG.debug("Entered XIVProxy::_update_stats:") - - self.meta['stat'] = {} - connection_type = self._get_connection_type() - backend_name = None - if self.driver: - backend_name = self.driver.configuration.safe_get( - 'volume_backend_name') - self.meta['stat']["volume_backend_name"] = ( - backend_name or '%s_%s_%s_%s' % ( - strings.XIV_BACKEND_PREFIX, - self.storage_info[storage.FLAG_KEYS['address']], - self.storage_info[storage.FLAG_KEYS['storage_pool']], - connection_type)) - self.meta['stat']["vendor_name"] = 'IBM' - self.meta['stat']["driver_version"] = self.full_version - self.meta['stat']["storage_protocol"] = connection_type - self.meta['stat']['multiattach'] = False - self.meta['stat']['group_replication_enabled'] = True - self.meta['stat']['consistent_group_replication_enabled'] = True - self.meta['stat']['QoS_support'] = ( - self._check_storage_version_for_qos_support()) - - self.meta['stat']['location_info'] = ( - ('%(destination)s:%(hostname)s:%(pool)s' % - {'destination': strings.XIV_BACKEND_PREFIX, - 'hostname': self.storage_info[storage.FLAG_KEYS['address']], - 'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']] - })) - - pools = self._call_xiv_xcli( - "pool_list", - pool=self.storage_info[storage.FLAG_KEYS['storage_pool']]).as_list - if len(pools) != 1: - LOG.error( - "_update_stats: Pool %(pool)s not available on storage", - {'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]}) - return - pool = pools[0] - - # handle different fields in pool_list between Gen3 and BR - soft_size = pool.get('soft_size') - if soft_size is None: - soft_size = pool.get('size') - hard_size = 0 - else: - hard_size = pool.hard_size - self.meta['stat']['total_capacity_gb'] = int(soft_size) - self.meta['stat']['free_capacity_gb'] = int( - pool.get('empty_space_soft', pool.get('empty_space'))) - self.meta['stat']['reserved_percentage'] = ( - self.driver.configuration.safe_get('reserved_percentage')) - self.meta['stat']['consistent_group_snapshot_enabled'] = True - - # thin/thick provision - self.meta['stat']['thin_provision'] = ('True' if soft_size > hard_size - else 'False') - - if self.targets: - self.meta['stat']['replication_enabled'] = True - self.meta['stat']['replication_type'] = [SYNC, ASYNC] - self.meta['stat']['rpo'] = repl.Replication.get_supported_rpo() - self.meta['stat']['replication_count'] = len(self.targets) - self.meta['stat']['replication_targets'] = [target for target in - six.iterkeys( - self.targets)] - - self.meta['stat']['timestamp'] = datetime.datetime.utcnow() - - LOG.debug("Exiting XIVProxy::_update_stats: %(stat)s", - {'stat': self.meta['stat']}) - - @proxy._trace_time - def create_cloned_volume(self, volume, src_vref): - """Create cloned volume.""" - - # read replication information - specs = self._get_extra_specs(volume.get('volume_type_id', None)) - replication_info = self._get_replication_info(specs) - - # TODO(alonma): Refactor to use more common code - src_vref_size = float(src_vref['size']) - volume_size = float(volume['size']) - if volume_size < src_vref_size: - error = (_("New volume size (%(vol_size)s GB) cannot be less" - "than the source volume size (%(src_size)s GB)..") - % {'vol_size': volume_size, 'src_size': src_vref_size}) - LOG.error(error) - raise self._get_exception()(error) - - self._create_volume(volume) - try: - self._call_xiv_xcli( - "vol_copy", - vol_src=src_vref['name'], - vol_trg=volume['name']) - except errors.XCLIError as e: - error = (_("Failed to copy from '%(src)s' to '%(vol)s': " - "%(details)s") - % {'src': src_vref.get('name', ''), - 'vol': volume.get('name', ''), - 'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._silent_delete_volume(volume=volume) - raise self._get_exception()(error) - # A side effect of vol_copy is the resizing of the destination volume - # to the size of the source volume. If the size is different we need - # to get it back to the desired size - if src_vref_size != volume_size: - size = storage.gigabytes_to_blocks(volume_size) - try: - self._call_xiv_xcli( - "vol_resize", - vol=volume['name'], - size_blocks=size) - except errors.XCLIError as e: - error = (_("Fatal error in vol_resize: %(details)s") - % {'details': - self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._silent_delete_volume(volume=volume) - raise self._get_exception()(error) - self.handle_created_vol_properties(replication_info, volume) - - @proxy._trace_time - def volume_exists(self, volume): - """Checks if a volume exists on xiv.""" - - return len(self._call_xiv_xcli( - "vol_list", vol=volume['name']).as_list) > 0 - - def _cg_name_from_id(self, id): - '''Get storage CG name from id. - - A utility method to translate from id - to CG name on the storage - ''' - return "cg_%(id)s" % {'id': id} - - def _group_name_from_id(self, id): - '''Get storage group name from id. - - A utility method to translate from id - to Snapshot Group name on the storage - ''' - return "cgs_%(id)s" % {'id': id} - - def _cg_name_from_volume(self, volume): - '''Get storage CG name from volume. - - A utility method to translate from openstack volume - to CG name on the storage - ''' - LOG.debug("_cg_name_from_volume: %(vol)s", - {'vol': volume['name']}) - cg_id = volume.get('group_id', None) - if cg_id: - cg_name = self._cg_name_from_id(cg_id) - LOG.debug("Volume %(vol)s is in CG %(cg)s", - {'vol': volume['name'], 'cg': cg_name}) - return cg_name - else: - LOG.debug("Volume %(vol)s not in CG", - {'vol': volume['name']}) - return None - - def _cg_name_from_group(self, group): - '''Get storage CG name from group. - - A utility method to translate from openstack group - to CG name on the storage - ''' - return self._cg_name_from_id(group['id']) - - def _cg_name_from_cgsnapshot(self, cgsnapshot): - '''Get storage CG name from snapshot. - - A utility method to translate from openstack cgsnapshot - to CG name on the storage - ''' - return self._cg_name_from_id(cgsnapshot['group_id']) - - def _group_name_from_cgsnapshot_id(self, cgsnapshot_id): - '''Get storage Snaphost Group name from snapshot. - - A utility method to translate from openstack cgsnapshot - to Snapshot Group name on the storage - ''' - return self._group_name_from_id(cgsnapshot_id) - - def _volume_name_from_cg_snapshot(self, cgs, vol): - # Note: The string is limited by the storage to 63 characters - return ('%(cgs)s.%(vol)s' % {'cgs': cgs, 'vol': vol})[0:62] - - @proxy._trace_time - def create_group(self, context, group): - """Creates a group.""" - - if utils.is_group_a_cg_snapshot_type(group): - cgname = self._cg_name_from_group(group) - return self._create_consistencygroup(context, cgname) - # For generic group, create is executed by manager - raise NotImplementedError() - - def _create_consistencygroup(self, context, cgname): - """Creates a consistency group.""" - - LOG.info("Creating consistency group %(name)s.", - {'name': cgname}) - - # call XCLI - try: - self._call_xiv_xcli( - "cg_create", cg=cgname, - pool=self.storage_info[ - storage.FLAG_KEYS['storage_pool']]).as_list - except errors.CgNameExistsError as e: - error = (_("consistency group %s already exists on backend") % - cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.CgLimitReachedError as e: - error = _("Reached Maximum number of consistency groups") - LOG.error(error) - raise self._get_exception()(error) - except errors.XCLIError as e: - error = (_("Fatal error in cg_create: %(details)s") % - {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update - - def _create_consistencygroup_on_remote(self, context, cgname): - """Creates a consistency group on secondary machine. - - Return group available even if it already exists (for replication) - """ - - LOG.info("Creating consistency group %(name)s on secondary.", - {'name': cgname}) - - # call remote XCLI - try: - self._call_remote_xiv_xcli( - "cg_create", cg=cgname, - pool=self.storage_info[ - storage.FLAG_KEYS['storage_pool']]).as_list - except errors.CgNameExistsError: - model_update = {'status': fields.GroupStatus.AVAILABLE} - except errors.CgLimitReachedError: - error = _("Maximum number of consistency groups reached") - LOG.error(error) - raise self._get_exception()(error) - except errors.XCLIError as e: - error = (_("Fatal error in cg_create on remote: %(details)s") % - {'details': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update - - def _silent_cleanup_consistencygroup_from_src(self, context, group, - volumes, cgname): - """Silent cleanup of volumes from CG. - - Silently cleanup volumes and created consistency-group from - storage. This function is called after a failure already occurred - and just logs errors, but does not raise exceptions - """ - for volume in volumes: - self._silent_delete_volume_from_cg(volume=volume, cgname=cgname) - try: - self._delete_consistencygroup(context, group, []) - except Exception as e: - details = self._get_code_and_status_or_message(e) - LOG.error('Failed to cleanup CG %(details)s', - {'details': details}) - - @proxy._trace_time - def create_group_from_src(self, context, group, volumes, group_snapshot, - sorted_snapshots, source_group, - sorted_source_vols): - """Create volume group from volume group or volume group snapshot.""" - if utils.is_group_a_cg_snapshot_type(group): - return self._create_consistencygroup_from_src(context, group, - volumes, - group_snapshot, - sorted_snapshots, - source_group, - sorted_source_vols) - else: - raise NotImplementedError() - - def _create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot, snapshots, source_cg, - sorted_source_vols): - """Creates a consistency group from source. - - Source can be a cgsnapshot with the relevant list of snapshots, - or another CG with its list of volumes. - """ - cgname = self._cg_name_from_group(group) - LOG.info("Creating consistency group %(cg)s from src.", - {'cg': cgname}) - - volumes_model_update = [] - if cgsnapshot and snapshots: - LOG.debug("Creating from cgsnapshot %(cg)s", - {'cg': self._cg_name_from_group(cgsnapshot)}) - try: - self._create_consistencygroup(context, cgname) - except Exception as e: - LOG.error( - "Creating CG from cgsnapshot failed: %(details)s", - {'details': self._get_code_and_status_or_message(e)}) - raise - created_volumes = [] - try: - groupname = self._group_name_from_cgsnapshot_id( - cgsnapshot['id']) - for volume, source in zip(volumes, snapshots): - vol_name = source.volume_name - LOG.debug("Original volume: %(vol_name)s", - {'vol_name': vol_name}) - snapshot_name = self._volume_name_from_cg_snapshot( - groupname, vol_name) - LOG.debug("create volume (vol)s from snapshot %(snap)s", - {'vol': vol_name, - 'snap': snapshot_name}) - - snapshot_size = float(source['volume_size']) - self._create_volume_from_snapshot( - volume, snapshot_name, snapshot_size) - created_volumes.append(volume) - volumes_model_update.append( - { - 'id': volume['id'], - 'status': 'available', - 'size': snapshot_size, - }) - except Exception as e: - details = self._get_code_and_status_or_message(e) - msg = (CREATE_VOLUME_BASE_ERROR % {'details': details}) - LOG.error(msg) - # cleanup and then raise exception - self._silent_cleanup_consistencygroup_from_src( - context, group, created_volumes, cgname) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - elif source_cg and sorted_source_vols: - LOG.debug("Creating from CG %(cg)s .", - {'cg': self._cg_name_from_group(source_cg)}) - LOG.debug("Creating from CG %(cg)s .", {'cg': source_cg['id']}) - try: - self._create_consistencygroup(context, group) - except Exception as e: - LOG.error("Creating CG from CG failed: %(details)s", - {'details': self._get_code_and_status_or_message(e)}) - raise - created_volumes = [] - try: - for volume, source in zip(volumes, sorted_source_vols): - self.create_cloned_volume(volume, source) - created_volumes.append(volume) - volumes_model_update.append( - { - 'id': volume['id'], - 'status': 'available', - 'size': source['size'], - }) - except Exception as e: - details = self._get_code_and_status_or_message(e) - msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) - LOG.error(msg) - # cleanup and then raise exception - self._silent_cleanup_consistencygroup_from_src( - context, group, created_volumes, cgname) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - - else: - error = 'create_consistencygroup_from_src called without a source' - raise self._get_exception()(error) - - model_update = {'status': fields.GroupStatus.AVAILABLE} - return model_update, volumes_model_update - - @proxy._trace_time - def delete_group(self, context, group, volumes): - """Deletes a group.""" - if utils.is_group_a_cg_snapshot_type(group): - return self._delete_consistencygroup(context, group, volumes) - else: - # For generic group delete the volumes only - executed by manager - raise NotImplementedError() - - def _delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - - cgname = self._cg_name_from_group(group) - LOG.info("Deleting consistency group %(name)s.", - {'name': cgname}) - model_update = {} - model_update['status'] = group.get('status', - fields.GroupStatus.DELETING) - - # clean up volumes - volumes_model_update = [] - for volume in volumes: - try: - self._call_xiv_xcli( - "cg_remove_vol", vol=volume['name']) - except errors.XCLIError as e: - LOG.error("Failed removing volume %(vol)s from " - "consistency group %(cg)s: %(err)s", - {'vol': volume['name'], - 'cg': cgname, - 'err': self._get_code_and_status_or_message(e)}) - # continue in spite of error - - try: - self._delete_volume(volume['name']) - # size and volume_type_id are required in liberty code - # they are maintained here for backwards compatability - volumes_model_update.append( - { - 'id': volume['id'], - 'status': 'deleted', - }) - except errors.XCLIError as e: - LOG.error(DELETE_VOLUME_BASE_ERROR, - {'volume': volume['name'], - 'error': self._get_code_and_status_or_message(e)}) - model_update['status'] = fields.GroupStatus.ERROR_DELETING - # size and volume_type_id are required in liberty code - # they are maintained here for backwards compatibility - volumes_model_update.append( - { - 'id': volume['id'], - 'status': 'error_deleting', - }) - - # delete CG from cinder.volume.drivers.ibm.ibm_storage - if model_update['status'] != fields.GroupStatus.ERROR_DELETING: - try: - self._call_xiv_xcli( - "cg_delete", cg=cgname).as_list - model_update['status'] = fields.GroupStatus.DELETED - except (errors.CgDoesNotExistError, errors.CgBadNameError): - LOG.warning("consistency group %(cgname)s does not " - "exist on backend", - {'cgname': cgname}) - # if the object was already deleted on the backend, we can - # continue and delete the openstack object - model_update['status'] = fields.GroupStatus.DELETED - except errors.CgHasMirrorError: - error = (_("consistency group %s is being mirrored") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.CgNotEmptyError: - error = (_("consistency group %s is not empty") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.XCLIError as e: - error = (_("Fatal: %(code)s. CG: %(cgname)s") % - {'code': self._get_code_and_status_or_message(e), - 'cgname': cgname}) - LOG.error(error) - raise self._get_exception()(error) - return model_update, volumes_model_update - - @proxy._trace_time - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group.""" - if utils.is_group_a_cg_snapshot_type(group): - return self._update_consistencygroup(context, group, add_volumes, - remove_volumes) - else: - # For generic group update executed by manager - raise NotImplementedError() - - def _update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a consistency group.""" - - cgname = self._cg_name_from_group(group) - LOG.info("Updating consistency group %(name)s.", {'name': cgname}) - model_update = {'status': fields.GroupStatus.AVAILABLE} - - add_volumes_update = [] - if add_volumes: - for volume in add_volumes: - try: - self._call_xiv_xcli( - "cg_add_vol", vol=volume['name'], cg=cgname) - except errors.XCLIError as e: - error = (_("Failed adding volume %(vol)s to " - "consistency group %(cg)s: %(err)s") - % {'vol': volume['name'], - 'cg': cgname, - 'err': - self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._cleanup_consistencygroup_update( - context, group, add_volumes_update, None) - raise self._get_exception()(error) - add_volumes_update.append({'name': volume['name']}) - - remove_volumes_update = [] - if remove_volumes: - for volume in remove_volumes: - try: - self._call_xiv_xcli( - "cg_remove_vol", vol=volume['name']) - except (errors.VolumeNotInConsGroup, - errors.VolumeBadNameError) as e: - # ignore the error if the volume exists in storage but - # not in cg, or the volume does not exist in the storage - details = self._get_code_and_status_or_message(e) - LOG.debug(details) - except errors.XCLIError as e: - error = (_("Failed removing volume %(vol)s from " - "consistency group %(cg)s: %(err)s") - % {'vol': volume['name'], - 'cg': cgname, - 'err': - self._get_code_and_status_or_message(e)}) - LOG.error(error) - self._cleanup_consistencygroup_update( - context, group, add_volumes_update, - remove_volumes_update) - raise self._get_exception()(error) - remove_volumes_update.append({'name': volume['name']}) - - return model_update, None, None - - def _cleanup_consistencygroup_update(self, context, group, - add_volumes, remove_volumes): - if add_volumes: - for volume in add_volumes: - try: - self._call_xiv_xcli( - "cg_remove_vol", vol=volume['name']) - except Exception: - LOG.debug("cg_remove_vol(%s) failed", volume['name']) - - if remove_volumes: - cgname = self._cg_name_from_group(group) - for volume in remove_volumes: - try: - self._call_xiv_xcli( - "cg_add_vol", vol=volume['name'], cg=cgname) - except Exception: - LOG.debug("cg_add_vol(%(name)s, %(cgname)s) failed", - {'name': volume['name'], 'cgname': cgname}) - - @proxy._trace_time - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Create volume group snapshot.""" - - if utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._create_cgsnapshot(context, group_snapshot, snapshots) - else: - # For generic group snapshot create executed by manager - raise NotImplementedError() - - def _create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a CG snapshot.""" - model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} - - cgname = self._cg_name_from_cgsnapshot(cgsnapshot) - groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id']) - LOG.info("Creating snapshot %(group)s for CG %(cg)s.", - {'group': groupname, 'cg': cgname}) - - # call XCLI - try: - self._call_xiv_xcli( - "cg_snapshots_create", cg=cgname, - snap_group=groupname).as_list - except errors.CgDoesNotExistError as e: - error = (_("Consistency group %s does not exist on backend") % - cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.CgBadNameError as e: - error = (_("Consistency group %s has an illegal name") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.SnapshotGroupDoesNotExistError as e: - error = (_("Snapshot group %s has an illegal name") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.PoolSnapshotLimitReachedError as e: - error = _("Reached maximum snapshots allocation size") - LOG.error(error) - raise self._get_exception()(error) - except errors.CgEmptyError as e: - error = (_("Consistency group %s is empty") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except (errors.MaxVolumesReachedError, - errors.DomainMaxVolumesReachedError) as e: - error = _("Reached Maximum number of volumes") - LOG.error(error) - raise self._get_exception()(error) - except errors.SnapshotGroupIsReservedError as e: - error = (_("Consistency group %s name is reserved") % cgname) - LOG.error(error) - raise self._get_exception()(error) - except errors.SnapshotGroupAlreadyExistsError as e: - error = (_("Snapshot group %s already exists") % groupname) - LOG.error(error) - raise self._get_exception()(error) - except errors.XCLIError as e: - error = (_("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % - {'cg': cgname, - 'group': groupname, - 'err': self._get_code_and_status_or_message(e)}) - LOG.error(error) - raise self._get_exception()(error) - - snapshots_model_update = [] - for snapshot in snapshots: - snapshots_model_update.append( - { - 'id': snapshot['id'], - 'status': fields.SnapshotStatus.AVAILABLE, - }) - return model_update, snapshots_model_update - - @proxy._trace_time - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Delete volume group snapshot.""" - if utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._delete_cgsnapshot(context, group_snapshot, snapshots) - else: - # For generic group snapshot delete is executed by manager - raise NotImplementedError() - - def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a CG snapshot.""" - - cgname = self._cg_name_from_cgsnapshot(cgsnapshot) - groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id']) - LOG.info("Deleting snapshot %(group)s for CG %(cg)s.", - {'group': groupname, 'cg': cgname}) - - # call XCLI - try: - self._call_xiv_xcli( - "snap_group_delete", snap_group=groupname).as_list - except errors.CgDoesNotExistError: - error = _("consistency group %s not found on backend") % cgname - LOG.error(error) - raise self._get_exception()(error) - except errors.PoolSnapshotLimitReachedError: - error = _("Reached Maximum size allocated for snapshots") - LOG.error(error) - raise self._get_exception()(error) - except errors.CgEmptyError: - error = _("Consistency group %s is empty") % cgname - LOG.error(error) - raise self._get_exception()(error) - except errors.XCLIError as e: - error = _("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % { - 'cg': cgname, - 'group': groupname, - 'err': self._get_code_and_status_or_message(e) - } - LOG.error(error) - raise self._get_exception()(error) - - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - snapshots_model_update = [] - for snapshot in snapshots: - snapshots_model_update.append( - { - 'id': snapshot['id'], - 'status': fields.SnapshotStatus.DELETED, - }) - - return model_update, snapshots_model_update - - def _generate_chap_secret(self, chap_name): - """Returns chap secret generated according to chap_name - - chap secret must be between 12-16 chaqnracters - """ - name = chap_name - chap_secret = "" - while len(chap_secret) < 12: - chap_secret = cryptish.encrypt(name)[:16] - name = name + '_' - LOG.debug("_generate_chap_secret: %(secret)s", - {'secret': chap_secret}) - return chap_secret - - @proxy._trace_time - def _create_chap(self, host=None): - """Get CHAP name and secret - - returns chap name and secret - chap_name and chap_secret must be 12-16 characters long - """ - - if host: - if host['chap']: - chap_name = host['chap'][0] - LOG.debug("_create_chap: %(chap_name)s ", - {'chap_name': chap_name}) - else: - chap_name = host['name'] - else: - LOG.info("_create_chap: host missing!!!") - chap_name = "12345678901234" - chap_secret = self._generate_chap_secret(chap_name) - LOG.debug("_create_chap (new): %(chap_name)s ", - {'chap_name': chap_name}) - return (chap_name, chap_secret) - - @proxy._trace_time - def _get_host(self, connector): - """Returns a host looked up via initiator.""" - - try: - host_bunch = self._get_bunch_from_host(connector) - except Exception as e: - details = self._get_code_and_status_or_message(e) - msg = (_("%(prefix)s. Invalid connector: '%(details)s.'") % - {'prefix': storage.XIV_LOG_PREFIX, 'details': details}) - raise self._get_exception()(msg) - host = [] - chap = None - all_hosts = self._call_xiv_xcli("host_list").as_list - if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: - host = [host_obj for host_obj in all_hosts - if host_bunch['initiator'] - in host_obj.iscsi_ports.split(',')] - else: - if 'wwpns' in connector: - if len(host_bunch['wwpns']) > 0: - wwpn_set = set([wwpn.lower() for wwpn - in host_bunch['wwpns']]) - host = [host_obj for host_obj in all_hosts if - len(wwpn_set.intersection(host_obj.get( - 'fc_ports', '').lower().split(','))) > 0] - else: # fake connector created by nova - host = [host_obj for host_obj in all_hosts - if host_obj.get('name', '') == connector['host']] - if len(host) == 1: - if self._is_iscsi() and host[0].iscsi_chap_name: - chap = (host[0].iscsi_chap_name, - self._generate_chap_secret(host[0].iscsi_chap_name)) - LOG.debug("_get_host: chap_name %(chap_name)s ", - {'chap_name': host[0].iscsi_chap_name}) - return self._get_bunch_from_host( - connector, host[0].id, host[0].name, chap) - - LOG.debug("_get_host: returns None") - return None - - @proxy._trace_time - def _call_host_define(self, host, - chap_name=None, chap_secret=None, domain_name=None): - """Call host_define using XCLI.""" - LOG.debug("host_define with domain: %s)", domain_name) - if domain_name: - if chap_name: - return self._call_xiv_xcli( - "host_define", - host=host, - iscsi_chap_name=chap_name, - iscsi_chap_secret=chap_secret, - domain=domain_name - ).as_list[0] - else: - return self._call_xiv_xcli( - "host_define", - host=host, - domain=domain_name - ).as_list[0] - else: - # No domain - if chap_name: - return self._call_xiv_xcli( - "host_define", - host=host, - iscsi_chap_name=chap_name, - iscsi_chap_secret=chap_secret - ).as_list[0] - else: - return self._call_xiv_xcli( - "host_define", - host=host - ).as_list[0] - - @proxy._trace_time - def _define_host_according_to_chap(self, host, in_domain): - """Check on chap state and define host accordingly.""" - chap_name = None - chap_secret = None - if (self._get_connection_type() == - storage.XIV_CONNECTION_TYPE_ISCSI and - self._get_chap_type() == storage.CHAP_ENABLED): - host_bunch = {'name': host, 'chap': None, } - chap = self._create_chap(host=host_bunch) - chap_name = chap[0] - chap_secret = chap[1] - LOG.debug("_define_host_according_to_chap: " - "%(name)s : %(secret)s", - {'name': chap_name, 'secret': chap_secret}) - return self._call_host_define( - host=host, - chap_name=chap_name, - chap_secret=chap_secret, - domain_name=in_domain) - - def _define_ports(self, host_bunch): - """Defines ports in XIV.""" - fc_targets = [] - LOG.debug(host_bunch.get('name')) - if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: - self._define_iscsi(host_bunch) - else: - fc_targets = self._define_fc(host_bunch) - fc_targets = list(set(fc_targets)) - fc_targets.sort(key=self._sort_last_digit) - return fc_targets - - def _get_pool_domain(self, connector): - pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']] - LOG.debug("pool name from configuration: %s", pool_name) - domain = None - try: - domain = self._call_xiv_xcli( - "pool_list", pool=pool_name).as_list[0].get('domain') - LOG.debug("Pool's domain: %s", domain) - except AttributeError: - pass - return domain - - @proxy._trace_time - def _define_host(self, connector): - """Defines a host in XIV.""" - domain = self._get_pool_domain(connector) - host_bunch = self._get_bunch_from_host(connector) - host = self._call_xiv_xcli( - "host_list", host=host_bunch['name']).as_list - connection_type = self._get_connection_type() - if len(host) == 0: - LOG.debug("Non existing host, defining") - host = self._define_host_according_to_chap( - host=host_bunch['name'], in_domain=domain) - host_bunch = self._get_bunch_from_host(connector, - host.get('id')) - else: - host_bunch = self._get_bunch_from_host(connector, - host[0].get('id')) - LOG.debug("Generating hostname for connector %(conn)s", - {'conn': connector}) - generated_hostname = storage.get_host_or_create_from_iqn( - connector, connection=connection_type) - generated_host = self._call_xiv_xcli( - "host_list", - host=generated_hostname).as_list - if len(generated_host) == 0: - host = self._define_host_according_to_chap( - host=generated_hostname, - in_domain=domain) - else: - host = generated_host[0] - host_bunch = self._get_bunch_from_host( - connector, host.get('id'), host_name=generated_hostname) - LOG.debug("The host_bunch: %s", host_bunch) - return host_bunch - - @proxy._trace_time - def _define_fc(self, host_bunch): - """Define FC Connectivity.""" - - fc_targets = [] - if len(host_bunch.get('wwpns')) > 0: - connected_wwpns = [] - for wwpn in host_bunch.get('wwpns'): - component_ids = list(set( - [p.component_id for p in - self._call_xiv_xcli( - "fc_connectivity_list", - wwpn=wwpn.replace(":", ""))])) - wwpn_fc_target_lists = [] - for component in component_ids: - wwpn_fc_target_lists += [fc_p.wwpn for fc_p in - self._call_xiv_xcli( - "fc_port_list", - fcport=component)] - LOG.debug("got %(tgts)s fc targets for wwpn %(wwpn)s", - {'tgts': wwpn_fc_target_lists, 'wwpn': wwpn}) - if len(wwpn_fc_target_lists) > 0: - connected_wwpns += [wwpn] - fc_targets += wwpn_fc_target_lists - LOG.debug("adding fc port %s", wwpn) - self._call_xiv_xcli( - "host_add_port", host=host_bunch.get('name'), - fcaddress=wwpn) - if len(connected_wwpns) == 0: - LOG.error(CONNECTIVITY_FC_NO_TARGETS) - raise self._get_exception()(CONNECTIVITY_FC_NO_TARGETS) - else: - msg = _("No Fibre Channel HBA's are defined on the host.") - LOG.error(msg) - raise self._get_exception()(msg) - - return fc_targets - - @proxy._trace_time - def _define_iscsi(self, host_bunch): - """Add iscsi ports.""" - if host_bunch.get('initiator'): - LOG.debug("adding iscsi") - self._call_xiv_xcli( - "host_add_port", host=host_bunch.get('name'), - iscsi_name=host_bunch.get('initiator')) - else: - msg = _("No iSCSI initiator found!") - LOG.error(msg) - raise self._get_exception()(msg) - - @proxy._trace_time - def _event_service_start(self): - """Send an event when cinder service starts.""" - LOG.debug("send event SERVICE_STARTED") - service_start_evnt_prop = { - "openstack_version": self.meta['openstack_version'], - "pool_name": self.storage_info[storage.FLAG_KEYS['storage_pool']]} - ev_mgr = events.EventsManager(self.ibm_storage_cli, - OPENSTACK_PRODUCT_NAME, - self.full_version) - ev_mgr.send_event('SERVICE_STARTED', service_start_evnt_prop) - - @proxy._trace_time - def _event_volume_attached(self): - """Send an event when volume is attached to host.""" - LOG.debug("send event VOLUME_ATTACHED") - compute_host_name = socket.getfqdn() - vol_attach_evnt_prop = { - "openstack_version": self.meta['openstack_version'], - "pool_name": self.storage_info[storage.FLAG_KEYS['storage_pool']], - "compute_hostname": compute_host_name} - - ev_mgr = events.EventsManager(self.ibm_storage_cli, - OPENSTACK_PRODUCT_NAME, - self.full_version) - ev_mgr.send_event('VOLUME_ATTACHED', vol_attach_evnt_prop) - - @proxy._trace_time - def _build_initiator_target_map(self, fc_targets, connector): - """Build the target_wwns and the initiator target map.""" - init_targ_map = {} - wwpns = connector.get('wwpns', []) - for initiator in wwpns: - init_targ_map[initiator] = fc_targets - - LOG.debug("_build_initiator_target_map: %(init_targ_map)s", - {'init_targ_map': init_targ_map}) - return init_targ_map - - @proxy._trace_time - def _get_host_and_fc_targets(self, volume, connector): - """Returns the host and its FC targets.""" - - LOG.debug("_get_host_and_fc_targets %(volume)s", - {'volume': volume['name']}) - - fc_targets = [] - host = self._get_host(connector) - if not host: - host = self._define_host(connector) - fc_targets = self._define_ports(host) - elif self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC: - fc_targets = self._get_fc_targets(host) - if len(fc_targets) == 0: - LOG.error(CONNECTIVITY_FC_NO_TARGETS) - raise self._get_exception()(CONNECTIVITY_FC_NO_TARGETS) - - return (fc_targets, host) - - def _vol_map_and_get_lun_id(self, volume, connector, host): - """Maps volume to instance. - - Maps a volume to the nova volume node as host, - and return the created lun id - """ - vol_name = volume['name'] - LOG.debug("_vol_map_and_get_lun_id %(volume)s", - {'volume': vol_name}) - - try: - mapped_vols = self._call_xiv_xcli( - "vol_mapping_list", - vol=vol_name).as_dict('host') - if host['name'] in mapped_vols: - LOG.info("Volume '%(volume)s' was already attached to " - "the host '%(host)s'.", - {'host': host['name'], - 'volume': volume['name']}) - return int(mapped_vols[host['name']].lun) - except errors.VolumeBadNameError: - LOG.error("Volume not found. '%s'", volume['name']) - raise self.meta['exception'].VolumeNotFound(volume_id=volume['id']) - used_luns = [int(mapped.get('lun')) for mapped in - self._call_xiv_xcli( - "mapping_list", - host=host['name']).as_list] - luns = six.moves.xrange(MIN_LUNID, MAX_LUNID) # pylint: disable=E1101 - for lun_id in luns: - if lun_id not in used_luns: - self._call_xiv_xcli( - "map_vol", - lun=lun_id, - host=host['name'], - vol=vol_name) - self._event_volume_attached() - return lun_id - msg = _("All free LUN IDs were already mapped.") - LOG.error(msg) - raise self._get_exception()(msg) - - @proxy._trace_time - def _get_fc_targets(self, host): - """Get FC targets - - :host: host bunch - :returns: array of FC target WWPNs - """ - target_wwpns = [] - - fc_port_list = self._call_xiv_xcli("fc_port_list") - if host is None: - target_wwpns += ( - [t.get('wwpn') for t in - fc_port_list if - t.get('wwpn') != '0000000000000000' and - t.get('role') == 'Target' and - t.get('port_state') == 'Online']) - else: - host_conect_list = self._call_xiv_xcli("host_connectivity_list", - host=host.get('name')) - for connection in host_conect_list: - fc_port = connection.get('local_fc_port') - target_wwpns += ( - [t.get('wwpn') for t in - fc_port_list if - t.get('wwpn') != '0000000000000000' and - t.get('role') == 'Target' and - t.get('port_state') == 'Online' and - t.get('component_id') == fc_port]) - - fc_targets = list(set(target_wwpns)) - fc_targets.sort(key=self._sort_last_digit) - LOG.debug("fc_targets : %s", fc_targets) - return fc_targets - - def _sort_last_digit(self, a): - return a[-1:] - - @proxy._trace_time - def _get_xcli(self, xcli, backend_id): - """Wrapper around XCLI to ensure that connection is up.""" - if self.meta['bypass_connection_check']: - LOG.debug("_get_xcli(bypass mode)") - else: - if not xcli.is_connected(): - xcli = self._init_xcli(backend_id) - return xcli - - @proxy._trace_time - def _call_xiv_xcli(self, method, *args, **kwargs): - """Wrapper around XCLI to call active storage.""" - self.ibm_storage_cli = self._get_xcli( - self.ibm_storage_cli, self.active_backend_id) - - if self.ibm_storage_cli: - LOG.info("_call_xiv_xcli #1: %s", method) - else: - LOG.debug("_call_xiv_xcli #2: %s", method) - return getattr(self.ibm_storage_cli.cmd, method)(*args, **kwargs) - - @proxy._trace_time - def _call_remote_xiv_xcli(self, method, *args, **kwargs): - """Wrapper around XCLI to call remote storage.""" - remote_id = self._get_secondary_backend_id() - if not remote_id: - raise self._get_exception()(_("No remote backend found.")) - self.ibm_storage_remote_cli = self._get_xcli( - self.ibm_storage_remote_cli, remote_id) - - LOG.debug("_call_remote_xiv_xcli: %s", method) - return getattr(self.ibm_storage_remote_cli.cmd, method)( - *args, - **kwargs) - - def _verify_xiv_flags(self, address, user, password): - """Verify that the XIV flags were passed.""" - if not user or not password: - raise self._get_exception()(_("No credentials found.")) - - if not address: - raise self._get_exception()(_("No host found.")) - - def _get_connection_params(self, backend_id=strings.PRIMARY_BACKEND_ID): - """Get connection parameters. - - returns a tuple containing address list, user, password, - according to backend_id - """ - if not backend_id or backend_id == strings.PRIMARY_BACKEND_ID: - if self._get_management_ips(): - address = [e.strip(" ") for e in self.storage_info[ - storage.FLAG_KEYS['management_ips']].split(",")] - else: - address = self.storage_info[storage.FLAG_KEYS['address']] - user = self.storage_info[storage.FLAG_KEYS['user']] - password = self.storage_info[storage.FLAG_KEYS['password']] - else: - params = self._get_target_params(backend_id) - if not params: - msg = (_("Missing target information for target '%(target)s'"), - {'target': backend_id}) - LOG.error(msg) - raise self.meta['exception'].VolumeBackendAPIException( - data=msg) - if params.get('management_ips', None): - address = [e.strip(" ") for e in - params['management_ips'].split(",")] - else: - address = params['san_ip'] - user = params['san_login'] - password = params['san_password'] - - return (address, user, password) - - @proxy._trace_time - def _init_xcli(self, backend_id=strings.PRIMARY_BACKEND_ID): - """Initilize XCLI connection. - - returns an XCLIClient object - """ - - try: - address, user, password = self._get_connection_params(backend_id) - except Exception as e: - details = self._get_code_and_status_or_message(e) - ex_details = (SETUP_BASE_ERROR, - {'title': strings.TITLE, 'details': details}) - LOG.error(ex_details) - raise self.meta['exception'].InvalidParameterValue( - (_("%(prefix)s %(ex_details)s") % - {'prefix': storage.XIV_LOG_PREFIX, - 'ex_details': ex_details})) - - self._verify_xiv_flags(address, user, password) - - try: - clear_pass = cryptish.decrypt(password) - except TypeError: - ex_details = (SETUP_BASE_ERROR, - {'title': strings.TITLE, - 'details': "Invalid password."}) - LOG.error(ex_details) - raise self.meta['exception'].InvalidParameterValue( - (_("%(prefix)s %(ex_details)s") % - {'prefix': storage.XIV_LOG_PREFIX, - 'ex_details': ex_details})) - - certs = certificate.CertificateCollector() - path = certs.collect_certificate() - try: - LOG.debug('connect_multiendpoint_ssl with: %s', address) - xcli = client.XCLIClient.connect_multiendpoint_ssl( - user, - clear_pass, - address, - ca_certs=path) - except errors.CredentialsError: - LOG.error(SETUP_BASE_ERROR, - {'title': strings.TITLE, - 'details': "Invalid credentials."}) - raise self.meta['exception'].NotAuthorized() - except (errors.ConnectionError, transports.ClosedTransportError): - err_msg = (SETUP_INVALID_ADDRESS, {'address': address}) - LOG.error(err_msg) - raise self.meta['exception'].HostNotFound(host=err_msg) - except Exception as er: - err_msg = (SETUP_BASE_ERROR % - {'title': strings.TITLE, 'details': er}) - LOG.error(err_msg) - raise self._get_exception()(err_msg) - finally: - certs.free_certificate() - - return xcli diff --git a/cinder/volume/drivers/ibm/ibm_storage/xiv_replication.py b/cinder/volume/drivers/ibm/ibm_storage/xiv_replication.py deleted file mode 100644 index fd9f71509..000000000 --- a/cinder/volume/drivers/ibm/ibm_storage/xiv_replication.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright (c) 2017 IBM Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import six - -from oslo_log import log as logging -from oslo_utils import importutils - -pyxcli = importutils.try_import("pyxcli") -if pyxcli: - from pyxcli import errors - from pyxcli.mirroring import cg_recovery_manager - from pyxcli.mirroring import errors as m_errors - from pyxcli.mirroring import volume_recovery_manager - -from cinder.i18n import _ -from cinder.volume.drivers.ibm.ibm_storage import strings - -SYNC = 'sync' -ASYNC = 'async' - -LOG = logging.getLogger(__name__) - - -class Rate(object): - - def __init__(self, rpo, schedule): - self.rpo = rpo - self.schedule = schedule - self.schedule_name = self._schedule_name_from_schedule(self.schedule) - - def _schedule_name_from_schedule(self, schedule): - if schedule == '00:00:20': - return 'min_interval' - return ("cinder_%(sched)s" % - {'sched': schedule.replace(':', '_')}) - - -class Replication(object): - - async_rates = ( - Rate(rpo=120, schedule='00:01:00'), - Rate(rpo=300, schedule='00:02:00'), - Rate(rpo=600, schedule='00:05:00'), - Rate(rpo=1200, schedule='00:10:00'), - ) - - def __init__(self, proxy): - self.proxy = proxy - - @staticmethod - def get_schedule_from_rpo(rpo): - schedule = [rate for rate in Replication.async_rates - if rate.rpo == rpo][0].schedule_name - if schedule: - LOG.debug('schedule %(sched)s: for rpo %(rpo)s', - {'sched': schedule, 'rpo': rpo}) - else: - LOG.error('Failed to find schedule for rpo %(rpo)s', - {'rpo': rpo}) - return schedule - - @staticmethod - def get_supported_rpo(): - return [rate.rpo for rate in Replication.async_rates] - - def get_recovery_mgr(self): - # Recovery manager is set in derived classes - raise NotImplementedError - - def get_remote_recovery_mgr(self): - # Recovery manager is set in derived classes - raise NotImplementedError - - def replication_create_mirror(self, resource, replication_info, - target, pool): - raise NotImplementedError - - @staticmethod - def extract_replication_info_from_specs(specs): - info = {'enabled': False, 'mode': None, 'rpo': 0} - msg = "" - if specs: - LOG.debug('extract_replication_info_from_specs: specs %(specs)s', - {'specs': specs}) - - info['enabled'] = ( - specs.get('replication_enabled', '').upper() in - (u'TRUE', strings.METADATA_IS_TRUE) or - specs.get('group_replication_enabled', '').upper() in - (u'TRUE', strings.METADATA_IS_TRUE)) - - replication_type = specs.get('replication_type', SYNC).lower() - if replication_type in (u'sync', u' sync'): - info['mode'] = SYNC - elif replication_type in (u'async', u' async'): - info['mode'] = ASYNC - else: - msg = (_("Unsupported replication mode %(mode)s") - % {'mode': replication_type}) - return None, msg - info['rpo'] = int(specs.get('rpo', u' 0')[5:]) - supported_rpos = Replication.get_supported_rpo() - if info['rpo'] and info['rpo'] not in supported_rpos: - msg = (_("Unsupported replication RPO %(rpo)s"), - {'rpo': info['rpo']}) - return None, msg - - LOG.debug('extract_replication_info_from_specs: info %(info)s', - {'info': info}) - return info, msg - - def failover(self, resource, failback): - raise NotImplementedError - - def create_replication(self, resource_name, replication_info): - LOG.debug('Replication::create_replication replication_info %(rep)s', - {'rep': replication_info}) - - target, params = self.proxy._get_replication_target_params() - LOG.info('Target %(target)s: %(params)s', - {'target': target, 'params': six.text_type(params)}) - - try: - pool = params['san_clustername'] - except Exception: - msg = (_("Missing pool information for target '%(target)s'") % - {'target': target}) - LOG.error(msg) - raise self.proxy.meta['exception'].VolumeBackendAPIException( - data=msg) - - self.replication_create_mirror(resource_name, replication_info, - target, pool) - - def delete_replication(self, resource_name, replication_info): - LOG.debug('Replication::delete_replication replication_info %(rep)s', - {'rep': replication_info}) - - recovery_mgr = self.get_recovery_mgr() - - try: - recovery_mgr.deactivate_mirror(resource_id=resource_name) - except Exception as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = (_("Failed ending replication for %(resource)s: " - "'%(details)s'") % {'resource': resource_name, - 'details': details}) - LOG.error(msg) - raise self.proxy.meta['exception'].VolumeBackendAPIException( - data=msg) - try: - recovery_mgr.delete_mirror(resource_id=resource_name) - except Exception as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = (_("Failed deleting replica for %(resource)s: " - "'%(details)s'") % {'resource': resource_name, - 'details': details}) - LOG.error(msg) - raise self.proxy.meta['exception'].VolumeBackendAPIException( - data=msg) - - def _failover_resource(self, resource, recovery_mgr, failover_rep_mgr, - rep_type, failback): - # check if mirror is defined and active - LOG.debug('Check if mirroring is active on %(res)s', - {'res': resource['name']}) - try: - active = recovery_mgr.is_mirror_active( - resource_id=resource['name']) - except Exception: - active = False - state = 'active' if active else 'inactive' - LOG.debug('Mirroring is %(state)s', {'state': state}) - - # In case of failback, mirroring must be active - # In case of failover we attempt to move in any condition - if failback and not active: - msg = ("%(rep_type)s %(res)s: no active mirroring and can not " - "failback" % {'rep_type': rep_type, - 'res': resource['name']}) - LOG.error(msg) - return False, msg - - try: - recovery_mgr.switch_roles(resource_id=resource['name']) - return True, None - except Exception as e: - # failed attempt to switch_roles from the master - details = self.proxy._get_code_and_status_or_message(e) - LOG.warning('Failed to perform switch_roles on' - ' %(res)s: %(err)s. ' - 'Continue to change_role', - {'res': resource['name'], 'err': details}) - try: - # this is the ugly stage we come to brute force - if failback: - role = 'Slave' - else: - role = 'Master' - LOG.warning('Attempt to change_role to %(role)s', {'role': role}) - failover_rep_mgr.change_role(resource_id=resource['name'], - new_role=role) - return True, None - except m_errors.NoMirrorDefinedError as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = ("%(rep_type)s %(res)s no replication defined: %(err)s" % - {'rep_type': rep_type, 'res': resource['name'], - 'err': details}) - LOG.error(msg) - return False, msg - except Exception as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = ('%(rep_type)s %(res)s change_role failed: %(err)s' % - {'rep_type': rep_type, 'res': resource['name'], - 'err': details}) - LOG.error(msg) - return False, msg - - -class VolumeReplication(Replication): - - def __init__(self, proxy): - super(VolumeReplication, self).__init__(proxy) - - def get_recovery_mgr(self): - return volume_recovery_manager.VolumeRecoveryManager( - False, self.proxy.ibm_storage_cli) - - def get_remote_recovery_mgr(self): - return volume_recovery_manager.VolumeRecoveryManager( - True, self.proxy.ibm_storage_remote_cli) - - def replication_create_mirror(self, resource_name, replication_info, - target, pool): - LOG.debug('VolumeReplication::replication_create_mirror') - - schedule = None - if replication_info['rpo']: - schedule = Replication.get_schedule_from_rpo( - replication_info['rpo']) - try: - recovery_mgr = self.get_recovery_mgr() - recovery_mgr.create_mirror( - resource_name=resource_name, - target_name=target, - mirror_type=replication_info['mode'], - slave_resource_name=resource_name, - create_slave='yes', - remote_pool=pool, - rpo=replication_info['rpo'], - schedule=schedule, - activate_mirror='yes') - except errors.VolumeMasterError: - LOG.debug('Volume %(vol)s has been already mirrored', - {'vol': resource_name}) - except Exception as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = (_("Failed replication for %(resource)s: '%(details)s'") % - {'resource': resource_name, 'details': details}) - LOG.error(msg) - raise self.proxy.meta['exception'].VolumeBackendAPIException( - data=msg) - - def failover(self, resource, failback): - """Failover a single volume. - - Attempts to failover a single volume - Sequence: - 1. attempt to switch roles from master - 2. attempt to change role to master on secondary - - returns (success, failure_reason) - """ - LOG.debug("VolumeReplication::failover %(vol)s", - {'vol': resource['name']}) - - recovery_mgr = self.get_recovery_mgr() - remote_recovery_mgr = self.get_remote_recovery_mgr() - return self._failover_resource(resource, recovery_mgr, - remote_recovery_mgr, 'vol', failback) - - -class GroupReplication(Replication): - - def __init__(self, proxy): - super(GroupReplication, self).__init__(proxy) - - def get_recovery_mgr(self): - return cg_recovery_manager.CGRecoveryManager( - False, self.proxy.ibm_storage_cli) - - def get_remote_recovery_mgr(self): - return volume_recovery_manager.CGRecoveryManager( - True, self.proxy.ibm_storage_remote_cli) - - def replication_create_mirror(self, resource_name, replication_info, - target, pool): - LOG.debug('GroupReplication::replication_create_mirror') - schedule = None - if replication_info['rpo']: - schedule = Replication.get_schedule_from_rpo( - replication_info['rpo']) - try: - recovery_mgr = self.get_recovery_mgr() - recovery_mgr.create_mirror( - resource_name=resource_name, - target_name=target, - mirror_type=replication_info['mode'], - slave_resource_name=resource_name, - rpo=replication_info['rpo'], - schedule=schedule, - activate_mirror='yes') - except Exception as e: - details = self.proxy._get_code_and_status_or_message(e) - msg = (_("Failed replication for %(resource)s: '%(details)s'"), - {'resource': resource_name, 'details': details}) - LOG.error(msg) - raise self.proxy.meta['exception'].VolumeBackendAPIException( - data=msg) - - def failover(self, resource, failback): - LOG.debug("GroupReplication::failover %(cg)s", - {'cg': resource['name']}) - - recovery_mgr = self.get_recovery_mgr() - remote_recovery_mgr = self.get_remote_recovery_mgr() - - return self._failover_resource(resource, recovery_mgr, - remote_recovery_mgr, 'cg', failback) diff --git a/cinder/volume/drivers/ibm/storwize_svc/__init__.py b/cinder/volume/drivers/ibm/storwize_svc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/ibm/storwize_svc/replication.py b/cinder/volume/drivers/ibm/storwize_svc/replication.py deleted file mode 100644 index faa3e20b4..000000000 --- a/cinder/volume/drivers/ibm/storwize_svc/replication.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import random - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import ssh_utils -from cinder import utils -from cinder.volume.drivers.ibm.storwize_svc import storwize_const - -LOG = logging.getLogger(__name__) - - -class StorwizeSVCReplication(object): - def __init__(self, driver, replication_target=None): - self.driver = driver - self.target = replication_target or {} - - def failover_volume_host(self, context, vref): - pass - - def replication_failback(self, volume): - pass - - def volume_replication_setup(self, context, vref): - pass - - -class StorwizeSVCReplicationGlobalMirror(StorwizeSVCReplication): - """Support for Storwize/SVC global mirror mode replication. - - Global Mirror establishes a Global Mirror relationship between - two volumes of equal size. The volumes in a Global Mirror relationship - are referred to as the master (source) volume and the auxiliary - (target) volume. This mode is dedicated to the asynchronous volume - replication. - """ - - asyncmirror = True - - def __init__(self, driver, replication_target=None, target_helpers=None): - super(StorwizeSVCReplicationGlobalMirror, self).__init__( - driver, replication_target) - self.target_helpers = target_helpers - - def volume_replication_setup(self, context, vref): - LOG.debug('enter: volume_replication_setup: volume %s', vref['name']) - - target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'] - try: - attr = self.target_helpers.get_vdisk_attributes(target_vol_name) - if not attr: - opts = self.driver._get_vdisk_params(vref['volume_type_id']) - pool = self.target.get('pool_name') - src_attr = self.driver._helpers.get_vdisk_attributes( - vref['name']) - opts['iogrp'] = src_attr['IO_group_id'] - self.target_helpers.create_vdisk(target_vol_name, - six.text_type(vref['size']), - 'gb', pool, opts) - - system_info = self.target_helpers.get_system_info() - self.driver._helpers.create_relationship( - vref['name'], target_vol_name, system_info.get('system_name'), - self.asyncmirror) - except Exception as e: - msg = (_("Unable to set up mirror mode replication for %(vol)s. " - "Exception: %(err)s.") % {'vol': vref['id'], - 'err': e}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - LOG.debug('leave: volume_replication_setup:volume %s', vref['name']) - - def failover_volume_host(self, context, vref): - LOG.debug('enter: failover_volume_host: vref=%(vref)s', - {'vref': vref['name']}) - target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'] - - try: - rel_info = self.target_helpers.get_relationship_info(target_vol) - # Reverse the role of the primary and secondary volumes - self.target_helpers.switch_relationship(rel_info['name']) - return {'replication_status': - fields.ReplicationStatus.FAILED_OVER} - except Exception as e: - LOG.exception('Unable to fail-over the volume %(id)s to the ' - 'secondary back-end by switchrcrelationship ' - 'command, error: %(error)s', - {"id": vref['id'], "error": e}) - # If the switch command fail, try to make the aux volume - # writeable again. - try: - self.target_helpers.stop_relationship(target_vol, - access=True) - return {'replication_status': - fields.ReplicationStatus.FAILED_OVER} - except Exception as e: - msg = (_('Unable to fail-over the volume %(id)s to the ' - 'secondary back-end, error: %(error)s') % - {"id": vref['id'], "error": e}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - def replication_failback(self, volume): - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - rel_info = self.target_helpers.get_relationship_info(tgt_volume) - if rel_info: - try: - self.target_helpers.switch_relationship(rel_info['name'], - aux=False) - return {'replication_status': - fields.ReplicationStatus.ENABLED, - 'status': 'available'} - except Exception as e: - msg = (_('Unable to fail-back the volume:%(vol)s to the ' - 'master back-end, error:%(error)s') % - {"vol": volume['name'], "error": e}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - -class StorwizeSVCReplicationMetroMirror( - StorwizeSVCReplicationGlobalMirror): - """Support for Storwize/SVC metro mirror mode replication. - - Metro Mirror establishes a Metro Mirror relationship between - two volumes of equal size. The volumes in a Metro Mirror relationship - are referred to as the master (source) volume and the auxiliary - (target) volume. - """ - - asyncmirror = False - - def __init__(self, driver, replication_target=None, target_helpers=None): - super(StorwizeSVCReplicationMetroMirror, self).__init__( - driver, replication_target, target_helpers) - - -class StorwizeSVCReplicationGMCV(StorwizeSVCReplicationGlobalMirror): - """Support for Storwize/SVC global mirror with change volumes mode replication. - - Global Mirror with Change Volumes(GMCV) provides asynchronous replication - based on point-in-time copies of data. The volumes in a GMCV relationship - are referred to as the master (source) volume, master change volume, the - auxiliary (target) volume and auxiliary change volume. - """ - - asyncmirror = True - - def __init__(self, driver, replication_target=None, target_helpers=None): - super(StorwizeSVCReplicationGMCV, self).__init__( - driver, replication_target, target_helpers) - - def volume_replication_setup(self, context, vref): - LOG.debug('enter: volume_replication_setup: volume %s', vref['name']) - source_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + - vref['name']) - target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'] - target_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + - target_vol_name) - try: - src_attr = self.driver._helpers.get_vdisk_attributes( - vref['name']) - # Create source change volume if it doesn't exist - src_change_attr = self.driver._helpers.get_vdisk_attributes( - source_change_vol_name) - if not src_change_attr: - src_change_opts = self.driver._get_vdisk_params( - vref['volume_type_id']) - src_change_opts['iogrp'] = src_attr['IO_group_id'] - # Change volumes would usually be thin-provisioned - src_change_opts['autoexpand'] = True - self.driver._helpers.create_vdisk(source_change_vol_name, - six.text_type(vref['size']), - 'gb', - src_attr['mdisk_grp_id'], - src_change_opts) - # Create target volume if it doesn't exist - target_attr = self.target_helpers.get_vdisk_attributes( - target_vol_name) - if not target_attr: - target_opts = self.driver._get_vdisk_params( - vref['volume_type_id']) - target_pool = self.target.get('pool_name') - target_opts['iogrp'] = src_attr['IO_group_id'] - self.target_helpers.create_vdisk(target_vol_name, - six.text_type(vref['size']), - 'gb', - target_pool, - target_opts) - - # Create target change volume if it doesn't exist - target_change_attr = self.target_helpers.get_vdisk_attributes( - target_change_vol_name) - if not target_change_attr: - target_change_opts = self.driver._get_vdisk_params( - vref['volume_type_id']) - target_change_pool = self.target.get('pool_name') - target_change_opts['iogrp'] = src_attr['IO_group_id'] - # Change Volumes would usually be thin-provisioned - target_change_opts['autoexpand'] = True - self.target_helpers.create_vdisk(target_change_vol_name, - six.text_type(vref['size']), - 'gb', - target_change_pool, - target_change_opts) - - system_info = self.target_helpers.get_system_info() - # Get cycle_period_seconds - src_change_opts = self.driver._get_vdisk_params( - vref['volume_type_id']) - cycle_period_seconds = src_change_opts.get('cycle_period_seconds') - self.driver._helpers.create_relationship( - vref['name'], target_vol_name, system_info.get('system_name'), - self.asyncmirror, True, source_change_vol_name, - cycle_period_seconds) - # Set target change volume - self.target_helpers.change_relationship_changevolume( - target_vol_name, target_change_vol_name, False) - # Start gmcv relationship - self.driver._helpers.start_relationship(vref['name']) - except Exception as e: - msg = (_("Unable to set up gmcv mode replication for %(vol)s. " - "Exception: %(err)s.") % {'vol': vref['id'], - 'err': six.text_type(e)}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - LOG.debug('leave: volume_replication_setup:volume %s', vref['name']) - - def failover_volume_host(self, context, vref): - LOG.debug('enter: failover_volume_host: vref=%(vref)s', - {'vref': vref['name']}) - # Make the aux volume writeable. - try: - self.target_helpers.stop_relationship( - storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'], - access=True) - return {'replication_status': - fields.ReplicationStatus.FAILED_OVER} - except Exception as e: - msg = (_('Unable to fail-over the volume %(id)s to the ' - 'secondary back-end, error: %(error)s') % - {"id": vref['id'], "error": six.text_type(e)}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - def replication_failback(self, volume): - LOG.debug('enter: replication_failback: volume=%(volume)s', - {'volume': volume['name']}) - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - rel_info = self.target_helpers.get_relationship_info(tgt_volume) - if rel_info: - try: - self.target_helpers.stop_relationship(tgt_volume, access=True) - self.target_helpers.start_relationship(tgt_volume, 'master') - return {'replication_status': - fields.ReplicationStatus.ENABLED, - 'status': 'available'} - except Exception as e: - msg = (_('Unable to fail-back the volume:%(vol)s to the ' - 'master back-end, error:%(error)s') % - {"vol": volume['name'], "error": six.text_type(e)}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - -class StorwizeSVCReplicationManager(object): - - def __init__(self, driver, replication_target=None, target_helpers=None): - self.sshpool = None - self.driver = driver - self.target = replication_target - self.target_helpers = target_helpers(self._run_ssh) - self._master_helpers = self.driver._master_backend_helpers - self.global_m = StorwizeSVCReplicationGlobalMirror( - self.driver, replication_target, self.target_helpers) - self.metro_m = StorwizeSVCReplicationMetroMirror( - self.driver, replication_target, self.target_helpers) - self.gmcv = StorwizeSVCReplicationGMCV( - self.driver, replication_target, self.target_helpers) - - def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): - utils.check_ssh_injection(cmd_list) - # TODO(vhou): We'll have a common method in ssh_utils to take - # care of this _run_ssh method. - command = ' '. join(cmd_list) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool( - self.target.get('san_ip'), - self.target.get('san_ssh_port', 22), - self.target.get('ssh_conn_timeout', 30), - self.target.get('san_login'), - password=self.target.get('san_password'), - privatekey=self.target.get('san_private_key', ''), - min_size=self.target.get('ssh_min_pool_conn', 1), - max_size=self.target.get('ssh_max_pool_conn', 5),) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - return processutils.ssh_execute( - ssh, command, check_exit_code=check_exit_code) - except Exception as e: - LOG.error(six.text_type(e)) - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", command) - - def get_target_helpers(self): - return self.target_helpers - - def get_replica_obj(self, rep_type): - if rep_type == storwize_const.GLOBAL: - return self.global_m - elif rep_type == storwize_const.METRO: - return self.metro_m - elif rep_type == storwize_const.GMCV: - return self.gmcv - else: - return None - - def _partnership_validate_create(self, client, remote_name, remote_ip): - try: - partnership_info = client.get_partnership_info( - remote_name) - if not partnership_info: - candidate_info = client.get_partnershipcandidate_info( - remote_name) - if candidate_info: - client.mkfcpartnership(remote_name) - else: - client.mkippartnership(remote_ip) - except Exception: - msg = (_('Unable to establish the partnership with ' - 'the Storwize cluster %s.'), remote_name) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def _partnership_start(self, client, remote_name): - try: - partnership_info = client.get_partnership_info( - remote_name) - if (partnership_info and - partnership_info['partnership'] != 'fully_configured'): - client.chpartnership(partnership_info['id']) - except Exception: - msg = (_('Unable to start the partnership with ' - 'the Storwize cluster %s.'), remote_name) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def establish_target_partnership(self): - local_system_info = self._master_helpers.get_system_info() - target_system_info = self.target_helpers.get_system_info() - local_system_name = local_system_info['system_name'] - target_system_name = target_system_info['system_name'] - local_ip = self.driver.configuration.safe_get('san_ip') - target_ip = self.target.get('san_ip') - # Establish partnership only when the local system and the replication - # target system is different. - if target_system_name != local_system_name: - self._partnership_validate_create(self._master_helpers, - target_system_name, target_ip) - self._partnership_validate_create(self.target_helpers, - local_system_name, local_ip) - self._partnership_start(self._master_helpers, target_system_name) - self._partnership_start(self.target_helpers, local_system_name) diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py deleted file mode 100644 index b97994ba2..000000000 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# product id is 2145 for SVC 6.1.0+. no product id for older version. -DEV_MODEL_SVC = '2145' -DEV_MODEL_STORWIZE = '2076' -DEV_MODEL_STORWIZE_V3500 = '2071' -DEV_MODEL_STORWIZE_V3700 = '2072' -DEV_MODEL_STORWIZE_V7000 = '2076' -DEV_MODEL_STORWIZE_V5000 = '2078' -DEV_MODEL_STORWIZE_V5000_1YR = '2077' -DEV_MODEL_FLASH_V9000 = '9846' -DEV_MODEL_FLEX = '4939' - -REP_CAP_DEVS = (DEV_MODEL_SVC, DEV_MODEL_STORWIZE, DEV_MODEL_STORWIZE_V5000, - DEV_MODEL_STORWIZE_V5000_1YR, DEV_MODEL_FLASH_V9000, - DEV_MODEL_FLEX) - -# constants used for replication -GLOBAL = 'global' -METRO = 'metro' -GMCV = 'gmcv' -GMCV_MULTI = 'multi' -VALID_REP_TYPES = (GLOBAL, METRO, GMCV) -FAILBACK_VALUE = 'default' - -DEFAULT_RC_TIMEOUT = 3600 * 24 * 7 -DEFAULT_RC_INTERVAL = 5 - -REPLICA_AUX_VOL_PREFIX = 'aux_' -REPLICA_CHG_VOL_PREFIX = 'chg_' - -# remote mirror copy status -REP_CONSIS_SYNC = 'consistent_synchronized' -REP_CONSIS_COPYING = 'consistent_copying' -REP_CONSIS_STOP = 'consistent_stopped' -REP_SYNC = 'synchronized' -REP_IDL = 'idling' -REP_IDL_DISC = 'idling_disconnected' -REP_STATUS_ON_LINE = 'online' diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py deleted file mode 100644 index 6dd1b6158..000000000 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ /dev/null @@ -1,4056 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import math -import paramiko -import random -import re -import time -import unicodedata - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils as json -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import strutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import ssh_utils -from cinder import utils as cinder_utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.ibm.storwize_svc import ( - replication as storwize_rep) -from cinder.volume.drivers.ibm.storwize_svc import storwize_const -from cinder.volume.drivers.san import san -from cinder.volume import qos_specs -from cinder.volume import utils -from cinder.volume import volume_types - - -INTERVAL_1_SEC = 1 -DEFAULT_TIMEOUT = 15 -LOG = logging.getLogger(__name__) - -storwize_svc_opts = [ - cfg.ListOpt('storwize_svc_volpool_name', - default=['volpool'], - help='Comma separated list of storage system storage ' - 'pools for volumes.'), - cfg.IntOpt('storwize_svc_vol_rsize', - default=2, - min=-1, max=100, - help='Storage system space-efficiency parameter for volumes ' - '(percentage)'), - cfg.IntOpt('storwize_svc_vol_warning', - default=0, - min=-1, max=100, - help='Storage system threshold for volume capacity warnings ' - '(percentage)'), - cfg.BoolOpt('storwize_svc_vol_autoexpand', - default=True, - help='Storage system autoexpand parameter for volumes ' - '(True/False)'), - cfg.IntOpt('storwize_svc_vol_grainsize', - default=256, - help='Storage system grain size parameter for volumes ' - '(32/64/128/256)'), - cfg.BoolOpt('storwize_svc_vol_compression', - default=False, - help='Storage system compression option for volumes'), - cfg.BoolOpt('storwize_svc_vol_easytier', - default=True, - help='Enable Easy Tier for volumes'), - cfg.StrOpt('storwize_svc_vol_iogrp', - default='0', - help='The I/O group in which to allocate volumes. It can be a ' - 'comma-separated list in which case the driver will select an ' - 'io_group based on least number of volumes associated with the ' - 'io_group.'), - cfg.IntOpt('storwize_svc_flashcopy_timeout', - default=120, - min=1, max=600, - help='Maximum number of seconds to wait for FlashCopy to be ' - 'prepared.'), - cfg.BoolOpt('storwize_svc_multihostmap_enabled', - default=True, - help='This option no longer has any affect. It is deprecated ' - 'and will be removed in the next release.', - deprecated_for_removal=True), - cfg.BoolOpt('storwize_svc_allow_tenant_qos', - default=False, - help='Allow tenants to specify QOS on create'), - cfg.StrOpt('storwize_svc_stretched_cluster_partner', - default=None, - help='If operating in stretched cluster mode, specify the ' - 'name of the pool in which mirrored copies are stored.' - 'Example: "pool2"'), - cfg.StrOpt('storwize_san_secondary_ip', - default=None, - help='Specifies secondary management IP or hostname to be ' - 'used if san_ip is invalid or becomes inaccessible.'), - cfg.BoolOpt('storwize_svc_vol_nofmtdisk', - default=False, - help='Specifies that the volume not be formatted during ' - 'creation.'), - cfg.IntOpt('storwize_svc_flashcopy_rate', - default=50, - min=1, max=100, - help='Specifies the Storwize FlashCopy copy rate to be used ' - 'when creating a full volume copy. The default is rate ' - 'is 50, and the valid rates are 1-100.'), - cfg.StrOpt('storwize_svc_mirror_pool', - default=None, - help='Specifies the name of the pool in which mirrored copy ' - 'is stored. Example: "pool2"'), - cfg.IntOpt('cycle_period_seconds', - default=300, - min=60, max=86400, - help='This defines an optional cycle period that applies to ' - 'Global Mirror relationships with a cycling mode of multi. ' - 'A Global Mirror relationship using the multi cycling_mode ' - 'performs a complete cycle at most once each period. ' - 'The default is 300 seconds, and the valid seconds ' - 'are 60-86400.'), -] - -CONF = cfg.CONF -CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP) - - -class StorwizeSSH(object): - """SSH interface to IBM Storwize family and SVC storage systems.""" - def __init__(self, run_ssh): - self._ssh = run_ssh - - def _run_ssh(self, ssh_cmd): - try: - return self._ssh(ssh_cmd) - except processutils.ProcessExecutionError as e: - msg = (_('CLI Exception output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s.') % - {'cmd': ssh_cmd, - 'out': e.stdout, - 'err': e.stderr}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def run_ssh_info(self, ssh_cmd, delim='!', with_header=False): - """Run an SSH command and return parsed output.""" - raw = self._run_ssh(ssh_cmd) - return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim, - with_header=with_header) - - def run_ssh_assert_no_output(self, ssh_cmd): - """Run an SSH command and assert no output returned.""" - out, err = self._run_ssh(ssh_cmd) - if len(out.strip()) != 0: - msg = (_('Expected no output from CLI command %(cmd)s, ' - 'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def run_ssh_check_created(self, ssh_cmd): - """Run an SSH command and return the ID of the created object.""" - out, err = self._run_ssh(ssh_cmd) - try: - match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) - return match_obj.group(1) - except (AttributeError, IndexError): - msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s.') % - {'cmd': ssh_cmd, - 'out': out, - 'err': err}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def lsnode(self, node_id=None): - with_header = True - ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] - if node_id: - with_header = False - ssh_cmd.append(node_id) - return self.run_ssh_info(ssh_cmd, with_header=with_header) - - def lslicense(self): - ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!'] - return self.run_ssh_info(ssh_cmd)[0] - - def lsguicapabilities(self): - ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!'] - return self.run_ssh_info(ssh_cmd)[0] - - def lssystem(self): - ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] - return self.run_ssh_info(ssh_cmd)[0] - - def lsmdiskgrp(self, pool): - ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', - '"%s"' % pool] - try: - return self.run_ssh_info(ssh_cmd)[0] - except exception.VolumeBackendAPIException as ex: - LOG.warning("Failed to get pool %(pool)s info. " - "Exception: %(ex)s.", {'pool': pool, - 'ex': ex}) - return None - - def lsiogrp(self): - ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lsportip(self): - ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - @staticmethod - def _create_port_arg(port_type, port_name): - if port_type == 'initiator': - port = ['-iscsiname'] - else: - port = ['-hbawwpn'] - port.append(port_name) - return port - - def mkhost(self, host_name, port_type, port_name): - port = self._create_port_arg(port_type, port_name) - ssh_cmd = ['svctask', 'mkhost', '-force'] + port - ssh_cmd += ['-name', '"%s"' % host_name] - return self.run_ssh_check_created(ssh_cmd) - - def addhostport(self, host, port_type, port_name): - port = self._create_port_arg(port_type, port_name) - ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host] - self.run_ssh_assert_no_output(ssh_cmd) - - def lshost(self, host=None): - with_header = True - ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] - if host: - with_header = False - ssh_cmd.append('"%s"' % host) - return self.run_ssh_info(ssh_cmd, with_header=with_header) - - def add_chap_secret(self, secret, host): - ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host] - self.run_ssh_assert_no_output(ssh_cmd) - - def lsiscsiauth(self): - ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lsfabric(self, wwpn=None, host=None): - ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!'] - if wwpn: - ssh_cmd.extend(['-wwpn', wwpn]) - elif host: - ssh_cmd.extend(['-host', '"%s"' % host]) - else: - msg = (_('Must pass wwpn or host to lsfabric.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - return self.run_ssh_info(ssh_cmd, with_header=True) - - def mkvdiskhostmap(self, host, vdisk, lun, multihostmap): - """Map vdisk to host. - - If vdisk already mapped and multihostmap is True, use the force flag. - """ - ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, vdisk] - - if lun: - ssh_cmd.insert(ssh_cmd.index(vdisk), '-scsi') - ssh_cmd.insert(ssh_cmd.index(vdisk), lun) - - if multihostmap: - ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') - try: - self.run_ssh_check_created(ssh_cmd) - result_lun = self.get_vdiskhostmapid(vdisk, host) - if result_lun is None or (lun and lun != result_lun): - msg = (_('mkvdiskhostmap error:\n command: %(cmd)s\n ' - 'lun: %(lun)s\n result_lun: %(result_lun)s') % - {'cmd': ssh_cmd, - 'lun': lun, - 'result_lun': result_lun}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - return result_lun - except Exception as ex: - if (not multihostmap and hasattr(ex, 'message') and - 'CMMVC6071E' in ex.message): - LOG.error('storwize_svc_multihostmap_enabled is set ' - 'to False, not allowing multi host mapping.') - raise exception.VolumeDriverException( - message=_('CMMVC6071E The VDisk-to-host mapping was not ' - 'created because the VDisk is already mapped ' - 'to a host.\n"')) - with excutils.save_and_reraise_exception(): - LOG.error('Error mapping VDisk-to-host') - - def mkrcrelationship(self, master, aux, system, asyncmirror, - cyclingmode=False): - ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master, - '-aux', aux, '-cluster', system] - if asyncmirror: - ssh_cmd.append('-global') - if cyclingmode: - ssh_cmd.extend(['-cyclingmode', 'multi']) - return self.run_ssh_check_created(ssh_cmd) - - def rmrcrelationship(self, relationship, force=False): - ssh_cmd = ['svctask', 'rmrcrelationship'] - if force: - ssh_cmd += ['-force'] - ssh_cmd += [relationship] - self.run_ssh_assert_no_output(ssh_cmd) - - def switchrelationship(self, relationship, aux=True): - primary = 'aux' if aux else 'master' - ssh_cmd = ['svctask', 'switchrcrelationship', '-primary', - primary, relationship] - self.run_ssh_assert_no_output(ssh_cmd) - - def startrcrelationship(self, rc_rel, primary=None): - ssh_cmd = ['svctask', 'startrcrelationship', '-force'] - if primary: - ssh_cmd.extend(['-primary', primary]) - ssh_cmd.append(rc_rel) - self.run_ssh_assert_no_output(ssh_cmd) - - def ch_rcrelationship_cycleperiod(self, relationship, - cycle_period_seconds): - # Note: Can only change one attribute at a time, - # so define two ch_rcrelationship_xxx here - if cycle_period_seconds: - ssh_cmd = ['svctask', 'chrcrelationship'] - ssh_cmd.extend(['-cycleperiodseconds', - six.text_type(cycle_period_seconds)]) - ssh_cmd.append(relationship) - self.run_ssh_assert_no_output(ssh_cmd) - - def ch_rcrelationship_changevolume(self, relationship, - changevolume, master): - # Note: Can only change one attribute at a time, - # so define two ch_rcrelationship_xxx here - if changevolume: - ssh_cmd = ['svctask', 'chrcrelationship'] - if master: - ssh_cmd.extend(['-masterchange', changevolume]) - else: - ssh_cmd.extend(['-auxchange', changevolume]) - ssh_cmd.append(relationship) - self.run_ssh_assert_no_output(ssh_cmd) - - def stoprcrelationship(self, relationship, access=False): - ssh_cmd = ['svctask', 'stoprcrelationship'] - if access: - ssh_cmd.append('-access') - ssh_cmd.append(relationship) - self.run_ssh_assert_no_output(ssh_cmd) - - def lsrcrelationship(self, rc_rel): - ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel] - return self.run_ssh_info(ssh_cmd) - - def lspartnership(self, system_name): - key_value = 'name=%s' % system_name - ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue', - key_value, '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lspartnershipcandidate(self): - ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def mkippartnership(self, ip_v4, bandwith=1000, backgroundcopyrate=50): - ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4', - '-clusterip', ip_v4, '-linkbandwidthmbits', - six.text_type(bandwith), - '-backgroundcopyrate', six.text_type(backgroundcopyrate)] - return self.run_ssh_assert_no_output(ssh_cmd) - - def mkfcpartnership(self, system_name, bandwith=1000, - backgroundcopyrate=50): - ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits', - six.text_type(bandwith), - '-backgroundcopyrate', six.text_type(backgroundcopyrate), - system_name] - return self.run_ssh_assert_no_output(ssh_cmd) - - def chpartnership(self, partnership_id, start=True): - action = '-start' if start else '-stop' - ssh_cmd = ['svctask', 'chpartnership', action, partnership_id] - return self.run_ssh_assert_no_output(ssh_cmd) - - def rmvdiskhostmap(self, host, vdisk): - ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, - '"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def lsvdiskhostmap(self, vdisk): - ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lshostvdiskmap(self, host): - ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def get_vdiskhostmapid(self, vdisk, host): - resp = self.lsvdiskhostmap(vdisk) - for mapping_info in resp: - if mapping_info['host_name'] == host: - lun_id = mapping_info['SCSI_id'] - return lun_id - return None - - def rmhost(self, host): - ssh_cmd = ['svctask', 'rmhost', '"%s"' % host] - self.run_ssh_assert_no_output(ssh_cmd) - - def mkvdisk(self, name, size, units, pool, opts, params): - ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp', - '"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']), - '-size', size, '-unit', units] + params - try: - return self.run_ssh_check_created(ssh_cmd) - except Exception as ex: - if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg: - vdisk = self.lsvdisk(name) - if vdisk: - LOG.warning('CMMVC6372W The virtualized storage ' - 'capacity that the cluster is using is ' - 'approaching the virtualized storage ' - 'capacity that is licensed.') - return vdisk['id'] - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to create vdisk %(vol)s.', - {'vol': name}) - - def rmvdisk(self, vdisk, force=True): - ssh_cmd = ['svctask', 'rmvdisk'] - if force: - ssh_cmd += ['-force'] - ssh_cmd += ['"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def lsvdisk(self, vdisk): - """Return vdisk attributes or None if it doesn't exist.""" - ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', - '"%s"' % vdisk] - out, err = self._ssh(ssh_cmd, check_exit_code=False) - if not err: - return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', - with_header=False)[0] - if 'CMMVC5754E' in err: - return None - msg = (_('CLI Exception output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s.') % - {'cmd': ssh_cmd, - 'out': out, - 'err': err}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def lsvdisks_from_filter(self, filter_name, value): - """Performs an lsvdisk command, filtering the results as specified. - - Returns an iterable for all matching vdisks. - """ - ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', - '-filtervalue', '%s=%s' % (filter_name, value)] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def chvdisk(self, vdisk, params): - ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def movevdisk(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def expandvdisksize(self, vdisk, amount): - ssh_cmd = ( - ['svctask', 'expandvdisksize', '-size', six.text_type(amount), - '-unit', 'gb', '"%s"' % vdisk]) - self.run_ssh_assert_no_output(ssh_cmd) - - def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None): - ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target', - '"%s"' % target, '-autodelete'] - if not full_copy: - ssh_cmd.extend(['-copyrate', '0']) - else: - ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)]) - if consistgrp: - ssh_cmd.extend(['-consistgrp', consistgrp]) - out, err = self._ssh(ssh_cmd, check_exit_code=False) - if 'successfully created' not in out: - msg = (_('CLI Exception output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s.') % - {'cmd': ssh_cmd, - 'out': out, - 'err': err}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - try: - match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], ' - 'successfully created', out) - fc_map_id = match_obj.group(1) - except (AttributeError, IndexError): - msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' - 'stdout: %(out)s\n stderr: %(err)s.') % - {'cmd': ssh_cmd, - 'out': out, - 'err': err}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return fc_map_id - - def prestartfcmap(self, fc_map_id): - ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id] - self.run_ssh_assert_no_output(ssh_cmd) - - def startfcmap(self, fc_map_id): - ssh_cmd = ['svctask', 'startfcmap', fc_map_id] - self.run_ssh_assert_no_output(ssh_cmd) - - def prestartfcconsistgrp(self, fc_consist_group): - ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group] - self.run_ssh_assert_no_output(ssh_cmd) - - def startfcconsistgrp(self, fc_consist_group): - ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group] - self.run_ssh_assert_no_output(ssh_cmd) - - def stopfcconsistgrp(self, fc_consist_group): - ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group] - self.run_ssh_assert_no_output(ssh_cmd) - - def chfcmap(self, fc_map_id, copyrate='50', autodel='on'): - ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate, - '-autodelete', autodel, fc_map_id] - self.run_ssh_assert_no_output(ssh_cmd) - - def stopfcmap(self, fc_map_id): - ssh_cmd = ['svctask', 'stopfcmap', fc_map_id] - self.run_ssh_assert_no_output(ssh_cmd) - - def rmfcmap(self, fc_map_id): - ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id] - self.run_ssh_assert_no_output(ssh_cmd) - - def lsvdiskfcmappings(self, vdisk): - ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', - '"%s"' % vdisk] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lsfcmap(self, fc_map_id): - ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue', - 'id=%s' % fc_map_id, '-delim', '!'] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def lsfcconsistgrp(self, fc_consistgrp): - ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp] - out, err = self._ssh(ssh_cmd) - return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', - with_header=False) - - def mkfcconsistgrp(self, fc_consist_group): - ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group] - return self.run_ssh_check_created(ssh_cmd) - - def rmfcconsistgrp(self, fc_consist_group): - ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group] - return self.run_ssh_assert_no_output(ssh_cmd) - - def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete): - ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp', - '"%s"' % dest_pool]) - if auto_delete: - ssh_cmd += ['-autodelete'] - ssh_cmd += ['"%s"' % vdisk] - return self.run_ssh_check_created(ssh_cmd) - - def lsvdiskcopy(self, vdisk, copy_id=None): - ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!'] - with_header = True - if copy_id: - ssh_cmd += ['-copy', copy_id] - with_header = False - ssh_cmd += ['"%s"' % vdisk] - return self.run_ssh_info(ssh_cmd, with_header=with_header) - - def lsvdisksyncprogress(self, vdisk, copy_id): - ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!', - '-copy', copy_id, '"%s"' % vdisk] - return self.run_ssh_info(ssh_cmd, with_header=True)[0] - - def rmvdiskcopy(self, vdisk, copy_id): - ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def addvdiskaccess(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, - '"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def rmvdiskaccess(self, vdisk, iogrp): - ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - def lsportfc(self, node_id): - ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!', - '-filtervalue', 'node_id=%s' % node_id] - return self.run_ssh_info(ssh_cmd, with_header=True) - - def migratevdisk(self, vdisk, dest_pool, copy_id='0'): - ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy', - copy_id, '-vdisk', vdisk] - self.run_ssh_assert_no_output(ssh_cmd) - - -class StorwizeHelpers(object): - - # All the supported QoS key are saved in this dict. When a new - # key is going to add, three values MUST be set: - # 'default': to indicate the value, when the parameter is disabled. - # 'param': to indicate the corresponding parameter in the command. - # 'type': to indicate the type of this value. - WAIT_TIME = 5 - svc_qos_keys = {'IOThrottling': {'default': '0', - 'param': 'rate', - 'type': int}} - - def __init__(self, run_ssh): - self.ssh = StorwizeSSH(run_ssh) - self.check_fcmapping_interval = 3 - - @staticmethod - def handle_keyerror(cmd, out): - msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') - % {'out': out, 'cmd': cmd}) - raise exception.VolumeBackendAPIException(data=msg) - - def compression_enabled(self): - """Return whether or not compression is enabled for this system.""" - resp = self.ssh.lslicense() - keys = ['license_compression_enclosures', - 'license_compression_capacity'] - for key in keys: - if resp.get(key, '0') != '0': - return True - - # lslicense is not used for V9000 compression check - # compression_enclosures and compression_capacity are - # always 0. V9000 uses license_scheme 9846 as an - # indicator and can always do compression - try: - resp = self.ssh.lsguicapabilities() - if resp.get('license_scheme', '0') == '9846': - return True - except exception.VolumeBackendAPIException: - LOG.exception("Failed to fetch licensing scheme.") - return False - - def replication_licensed(self): - """Return whether or not replication is enabled for this system.""" - # Uses product_key as an indicator to check - # whether replication is supported in storage. - try: - resp = self.ssh.lsguicapabilities() - product_key = resp.get('product_key', '0') - if product_key in storwize_const.REP_CAP_DEVS: - return True - except exception.VolumeBackendAPIException as war: - LOG.warning("Failed to run lsguicapability. Exception: %s.", war) - return False - - def get_system_info(self): - """Return system's name, ID, and code level.""" - resp = self.ssh.lssystem() - level = resp['code_level'] - match_obj = re.search('([0-9].){3}[0-9]', level) - if match_obj is None: - msg = _('Failed to get code level (%s).') % level - raise exception.VolumeBackendAPIException(data=msg) - code_level = match_obj.group().split('.') - return {'code_level': tuple([int(x) for x in code_level]), - 'system_name': resp['name'], - 'system_id': resp['id']} - - def get_pool_attrs(self, pool): - """Return attributes for the specified pool.""" - return self.ssh.lsmdiskgrp(pool) - - def is_pool_defined(self, pool_name): - """Check if vdisk is defined.""" - attrs = self.get_pool_attrs(pool_name) - return attrs is not None - - def get_available_io_groups(self): - """Return list of available IO groups.""" - iogrps = [] - resp = self.ssh.lsiogrp() - for iogrp in resp: - try: - if int(iogrp['node_count']) > 0: - iogrps.append(int(iogrp['id'])) - except KeyError: - self.handle_keyerror('lsiogrp', iogrp) - except ValueError: - msg = (_('Expected integer for node_count, ' - 'svcinfo lsiogrp returned: %(node)s.') % - {'node': iogrp['node_count']}) - raise exception.VolumeBackendAPIException(data=msg) - return iogrps - - def get_vdisk_count_by_io_group(self): - res = {} - resp = self.ssh.lsiogrp() - for iogrp in resp: - try: - if int(iogrp['node_count']) > 0: - res[int(iogrp['id'])] = int(iogrp['vdisk_count']) - except KeyError: - self.handle_keyerror('lsiogrp', iogrp) - except ValueError: - msg = (_('Expected integer for node_count, ' - 'svcinfo lsiogrp returned: %(node)s') % - {'node': iogrp['node_count']}) - raise exception.VolumeBackendAPIException(data=msg) - return res - - def select_io_group(self, state, opts): - selected_iog = 0 - iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts) - if len(iog_list) == 0: - raise exception.InvalidInput( - reason=_('Given I/O group(s) %(iogrp)s not valid; available ' - 'I/O groups are %(avail)s.') - % {'iogrp': opts['iogrp'], - 'avail': state['available_iogrps']}) - iog_vdc = self.get_vdisk_count_by_io_group() - LOG.debug("IO group current balance %s", iog_vdc) - min_vdisk_count = iog_vdc[iog_list[0]] - selected_iog = iog_list[0] - for iog in iog_list: - if iog_vdc[iog] < min_vdisk_count: - min_vdisk_count = iog_vdc[iog] - selected_iog = iog - LOG.debug("Selected io_group is %d", selected_iog) - return selected_iog - - def get_volume_io_group(self, vol_name): - vdisk = self.ssh.lsvdisk(vol_name) - if vdisk: - resp = self.ssh.lsiogrp() - for iogrp in resp: - if iogrp['name'] == vdisk['IO_group_name']: - return int(iogrp['id']) - return None - - def get_node_info(self): - """Return dictionary containing information on system's nodes.""" - nodes = {} - resp = self.ssh.lsnode() - for node_data in resp: - try: - if node_data['status'] != 'online': - continue - node = {} - node['id'] = node_data['id'] - node['name'] = node_data['name'] - node['IO_group'] = node_data['IO_group_id'] - node['iscsi_name'] = node_data['iscsi_name'] - node['WWNN'] = node_data['WWNN'] - node['status'] = node_data['status'] - node['WWPN'] = [] - node['ipv4'] = [] - node['ipv6'] = [] - node['enabled_protocols'] = [] - nodes[node['id']] = node - except KeyError: - self.handle_keyerror('lsnode', node_data) - return nodes - - def add_iscsi_ip_addrs(self, storage_nodes): - """Add iSCSI IP addresses to system node information.""" - resp = self.ssh.lsportip() - for ip_data in resp: - try: - state = ip_data['state'] - if ip_data['node_id'] in storage_nodes and ( - state == 'configured' or state == 'online'): - node = storage_nodes[ip_data['node_id']] - if len(ip_data['IP_address']): - node['ipv4'].append(ip_data['IP_address']) - if len(ip_data['IP_address_6']): - node['ipv6'].append(ip_data['IP_address_6']) - except KeyError: - self.handle_keyerror('lsportip', ip_data) - - def add_fc_wwpns(self, storage_nodes): - """Add FC WWPNs to system node information.""" - for key in storage_nodes: - node = storage_nodes[key] - wwpns = set(node['WWPN']) - resp = self.ssh.lsportfc(node_id=node['id']) - for port_info in resp: - if (port_info['type'] == 'fc' and - port_info['status'] == 'active'): - wwpns.add(port_info['WWPN']) - node['WWPN'] = list(wwpns) - LOG.info('WWPN on node %(node)s: %(wwpn)s.', - {'node': node['id'], 'wwpn': node['WWPN']}) - - def add_chap_secret_to_host(self, host_name): - """Generate and store a randomly-generated CHAP secret for the host.""" - chap_secret = utils.generate_password() - self.ssh.add_chap_secret(chap_secret, host_name) - return chap_secret - - def get_chap_secret_for_host(self, host_name): - """Generate and store a randomly-generated CHAP secret for the host.""" - resp = self.ssh.lsiscsiauth() - host_found = False - for host_data in resp: - try: - if host_data['name'] == host_name: - host_found = True - if host_data['iscsi_auth_method'] == 'chap': - return host_data['iscsi_chap_secret'] - except KeyError: - self.handle_keyerror('lsiscsiauth', host_data) - if not host_found: - msg = _('Failed to find host %s.') % host_name - raise exception.VolumeBackendAPIException(data=msg) - return None - - def get_conn_fc_wwpns(self, host): - wwpns = set() - resp = self.ssh.lsfabric(host=host) - for wwpn in resp.select('local_wwpn'): - if wwpn is not None: - wwpns.add(wwpn) - return list(wwpns) - - def get_host_from_connector(self, connector, volume_name=None, - iscsi=False): - """Return the Storwize host described by the connector.""" - LOG.debug('Enter: get_host_from_connector: %s.', connector) - - # If we have FC information, we have a faster lookup option - host_name = None - if 'wwpns' in connector and not iscsi: - for wwpn in connector['wwpns']: - resp = self.ssh.lsfabric(wwpn=wwpn) - for wwpn_info in resp: - try: - if (wwpn_info['remote_wwpn'] and - wwpn_info['name'] and - wwpn_info['remote_wwpn'].lower() == - wwpn.lower()): - host_name = wwpn_info['name'] - break - except KeyError: - self.handle_keyerror('lsfabric', wwpn_info) - if host_name: - break - if host_name: - LOG.debug('Leave: get_host_from_connector: host %s.', host_name) - return host_name - - def update_host_list(host, host_list): - idx = host_list.index(host) - del host_list[idx] - host_list.insert(0, host) - - # That didn't work, so try exhaustive search - hosts_info = self.ssh.lshost() - host_list = list(hosts_info.select('name')) - # If we have a "real" connector, we might be able to find the - # host entry with fewer queries if we move the host entries - # that contain the connector's host property value to the front - # of the list - if 'host' in connector: - # order host_list such that the host entries that - # contain the connector's host name are at the - # beginning of the list - for host in host_list: - if re.search(connector['host'], host): - update_host_list(host, host_list) - # If we have a volume name we have a potential fast path - # for finding the matching host for that volume. - # Add the host_names that have mappings for our volume to the - # head of the list of host names to search them first - if volume_name: - hosts_map_info = self.ssh.lsvdiskhostmap(volume_name) - hosts_map_info_list = list(hosts_map_info.select('host_name')) - # remove the fast path host names from the end of the list - # and move to the front so they are only searched for once. - for host in hosts_map_info_list: - update_host_list(host, host_list) - found = False - for name in host_list: - try: - resp = self.ssh.lshost(host=name) - except exception.VolumeBackendAPIException as ex: - LOG.debug("Exception message: %s", ex.msg) - if 'CMMVC5754E' in ex.msg: - LOG.debug("CMMVC5754E found in CLI exception.") - # CMMVC5754E: The specified object does not exist - # The host has been deleted while walking the list. - # This is a result of a host change on the SVC that - # is out of band to this request. - continue - # unexpected error so reraise it - with excutils.save_and_reraise_exception(): - pass - if iscsi: - if 'initiator' in connector: - for iscsi in resp.select('iscsi_name'): - if iscsi == connector['initiator']: - host_name = name - found = True - break - elif 'wwpns' in connector and len(connector['wwpns']): - connector_wwpns = [str(x).lower() for x in connector['wwpns']] - for wwpn in resp.select('WWPN'): - if wwpn and wwpn.lower() in connector_wwpns: - host_name = name - found = True - break - if found: - break - - LOG.debug('Leave: get_host_from_connector: host %s.', host_name) - return host_name - - def create_host(self, connector, iscsi=False): - """Create a new host on the storage system. - - We create a host name and associate it with the given connection - information. The host name will be a cleaned up version of the given - host name (at most 55 characters), plus a random 8-character suffix to - avoid collisions. The total length should be at most 63 characters. - """ - LOG.debug('Enter: create_host: host %s.', connector['host']) - - # Before we start, make sure host name is a string and that we have at - # least one port. - host_name = connector['host'] - if not isinstance(host_name, six.string_types): - msg = _('create_host: Host name is not unicode or string.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - ports = [] - if iscsi: - if 'initiator' in connector: - ports.append(['initiator', '%s' % connector['initiator']]) - else: - msg = _('create_host: No initiators supplied.') - else: - if 'wwpns' in connector: - for wwpn in connector['wwpns']: - ports.append(['wwpn', '%s' % wwpn]) - else: - msg = _('create_host: No wwpns supplied.') - if not len(ports): - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - # Build a host name for the Storwize host - first clean up the name - if isinstance(host_name, six.text_type): - host_name = unicodedata.normalize('NFKD', host_name).encode( - 'ascii', 'replace').decode('ascii') - - for num in range(0, 128): - ch = str(chr(num)) - if not ch.isalnum() and ch not in [' ', '.', '-', '_']: - host_name = host_name.replace(ch, '-') - - # Storwize doesn't like hostname that doesn't starts with letter or _. - if not re.match('^[A-Za-z]', host_name): - host_name = '_' + host_name - - # Add a random 8-character suffix to avoid collisions - rand_id = str(random.randint(0, 99999999)).zfill(8) - host_name = '%s-%s' % (host_name[:55], rand_id) - - # Create a host with one port - port = ports.pop(0) - self.ssh.mkhost(host_name, port[0], port[1]) - - # Add any additional ports to the host - for port in ports: - self.ssh.addhostport(host_name, port[0], port[1]) - - LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.', - {'host': connector['host'], 'host_name': host_name}) - return host_name - - def delete_host(self, host_name): - self.ssh.rmhost(host_name) - - def map_vol_to_host(self, volume_name, host_name, multihostmap): - """Create a mapping between a volume to a host.""" - - LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to ' - 'host %(host_name)s.', - {'volume_name': volume_name, 'host_name': host_name}) - - # Check if this volume is already mapped to this host - result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name) - if result_lun is None: - result_lun = self.ssh.mkvdiskhostmap(host_name, volume_name, None, - multihostmap) - - LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume ' - '%(volume_name)s, host %(host_name)s.', - {'result_lun': result_lun, - 'volume_name': volume_name, - 'host_name': host_name}) - return int(result_lun) - - def unmap_vol_from_host(self, volume_name, host_name): - """Unmap the volume and delete the host if it has no more mappings.""" - - LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from ' - 'host %(host_name)s.', - {'volume_name': volume_name, 'host_name': host_name}) - - # Check if the mapping exists - resp = self.ssh.lsvdiskhostmap(volume_name) - if not len(resp): - LOG.warning('unmap_vol_from_host: No mapping of volume ' - '%(vol_name)s to any host found.', - {'vol_name': volume_name}) - return host_name - if host_name is None: - if len(resp) > 1: - LOG.warning('unmap_vol_from_host: Multiple mappings of ' - 'volume %(vol_name)s found, no host ' - 'specified.', {'vol_name': volume_name}) - return - else: - host_name = resp[0]['host_name'] - else: - found = False - for h in resp.select('host_name'): - if h == host_name: - found = True - if not found: - LOG.warning('unmap_vol_from_host: No mapping of volume ' - '%(vol_name)s to host %(host)s found.', - {'vol_name': volume_name, 'host': host_name}) - return host_name - # We now know that the mapping exists - self.ssh.rmvdiskhostmap(host_name, volume_name) - - LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from ' - 'host %(host_name)s.', - {'volume_name': volume_name, 'host_name': host_name}) - return host_name - - def check_host_mapped_vols(self, host_name): - return self.ssh.lshostvdiskmap(host_name) - - @staticmethod - def build_default_opts(config): - # Ignore capitalization - - cluster_partner = config.storwize_svc_stretched_cluster_partner - opt = {'rsize': config.storwize_svc_vol_rsize, - 'warning': config.storwize_svc_vol_warning, - 'autoexpand': config.storwize_svc_vol_autoexpand, - 'grainsize': config.storwize_svc_vol_grainsize, - 'compression': config.storwize_svc_vol_compression, - 'easytier': config.storwize_svc_vol_easytier, - 'iogrp': config.storwize_svc_vol_iogrp, - 'qos': None, - 'stretched_cluster': cluster_partner, - 'replication': False, - 'nofmtdisk': config.storwize_svc_vol_nofmtdisk, - 'mirror_pool': config.storwize_svc_mirror_pool, - 'cycle_period_seconds': config.cycle_period_seconds} - return opt - - @staticmethod - def check_vdisk_opts(state, opts): - # Check that grainsize is 32/64/128/256 - if opts['grainsize'] not in [32, 64, 128, 256]: - raise exception.InvalidInput( - reason=_('Illegal value specified for ' - 'storwize_svc_vol_grainsize: set to either ' - '32, 64, 128, or 256.')) - - # Check that compression is supported - if opts['compression'] and not state['compression_enabled']: - raise exception.InvalidInput( - reason=_('System does not support compression.')) - - # Check that rsize is set if compression is set - if opts['compression'] and opts['rsize'] == -1: - raise exception.InvalidInput( - reason=_('If compression is set to True, rsize must ' - 'also be set (not equal to -1).')) - - # Check cycle_period_seconds are in 60-86400 - if opts['cycle_period_seconds'] not in range(60, 86401): - raise exception.InvalidInput( - reason=_('cycle_period_seconds should be integer ' - 'between 60 and 86400.')) - - iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts) - - if len(iogs) == 0: - raise exception.InvalidInput( - reason=_('Given I/O group(s) %(iogrp)s not valid; available ' - 'I/O groups are %(avail)s.') - % {'iogrp': opts['iogrp'], - 'avail': state['available_iogrps']}) - - if opts['nofmtdisk'] and opts['rsize'] != -1: - raise exception.InvalidInput( - reason=_('If nofmtdisk is set to True, rsize must ' - 'also be set to -1.')) - - @staticmethod - def _get_valid_requested_io_groups(state, opts): - given_iogs = str(opts['iogrp']) - iog_list = given_iogs.split(',') - # convert to int - iog_list = list(map(int, iog_list)) - LOG.debug("Requested iogroups %s", iog_list) - LOG.debug("Available iogroups %s", state['available_iogrps']) - filtiog = set(iog_list).intersection(state['available_iogrps']) - iog_list = list(filtiog) - LOG.debug("Filtered (valid) requested iogroups %s", iog_list) - return iog_list - - def _get_opts_from_specs(self, opts, specs): - qos = {} - for k, value in specs.items(): - # Get the scope, if using scope format - key_split = k.split(':') - if len(key_split) == 1: - scope = None - key = key_split[0] - else: - scope = key_split[0] - key = key_split[1] - - # We generally do not look at capabilities in the driver, but - # replication is a special case where the user asks for - # a volume to be replicated, and we want both the scheduler and - # the driver to act on the value. - if ((not scope or scope == 'capabilities') and - key == 'replication'): - scope = None - key = 'replication' - words = value.split() - if not (words and len(words) == 2 and words[0] == ''): - LOG.error('Replication must be specified as ' - '\' True\' or \' False\'.') - del words[0] - value = words[0] - - # Add the QoS. - if scope and scope == 'qos': - if key in self.svc_qos_keys.keys(): - try: - type_fn = self.svc_qos_keys[key]['type'] - value = type_fn(value) - qos[key] = value - except ValueError: - continue - - # Any keys that the driver should look at should have the - # 'drivers' scope. - if scope and scope != 'drivers': - continue - - if key in opts: - this_type = type(opts[key]).__name__ - if this_type == 'int': - value = int(value) - elif this_type == 'bool': - value = strutils.bool_from_string(value) - opts[key] = value - if len(qos) != 0: - opts['qos'] = qos - return opts - - def _get_qos_from_volume_metadata(self, volume_metadata): - """Return the QoS information from the volume metadata.""" - qos = {} - for i in volume_metadata: - k = i.get('key', None) - value = i.get('value', None) - key_split = k.split(':') - if len(key_split) == 1: - scope = None - key = key_split[0] - else: - scope = key_split[0] - key = key_split[1] - # Add the QoS. - if scope and scope == 'qos': - if key in self.svc_qos_keys.keys(): - try: - type_fn = self.svc_qos_keys[key]['type'] - value = type_fn(value) - qos[key] = value - except ValueError: - continue - return qos - - def _wait_for_a_condition(self, testmethod, timeout=None, - interval=INTERVAL_1_SEC, - raise_exception=False): - start_time = time.time() - if timeout is None: - timeout = DEFAULT_TIMEOUT - - def _inner(): - try: - testValue = testmethod() - except Exception as ex: - if raise_exception: - LOG.exception("_wait_for_a_condition: %s" - " execution failed.", - testmethod.__name__) - raise exception.VolumeBackendAPIException(data=ex) - else: - testValue = False - LOG.debug('Helper.' - '_wait_for_condition: %(method_name)s ' - 'execution failed for %(exception)s.', - {'method_name': testmethod.__name__, - 'exception': ex.message}) - if testValue: - raise loopingcall.LoopingCallDone() - - if int(time.time()) - start_time > timeout: - msg = (_('CommandLineHelper._wait_for_condition: %s timeout.') - % testmethod.__name__) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - timer = loopingcall.FixedIntervalLoopingCall(_inner) - timer.start(interval=interval).wait() - - def get_vdisk_params(self, config, state, type_id, - volume_type=None, volume_metadata=None): - """Return the parameters for creating the vdisk. - - Takes volume type and defaults from config options into account. - """ - opts = self.build_default_opts(config) - ctxt = context.get_admin_context() - if volume_type is None and type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - if volume_type: - qos_specs_id = volume_type.get('qos_specs_id') - specs = dict(volume_type).get('extra_specs') - - # NOTE(vhou): We prefer the qos_specs association - # and over-ride any existing - # extra-specs settings if present - if qos_specs_id is not None: - kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - # Merge the qos_specs into extra_specs and qos_specs has higher - # priority than extra_specs if they have different values for - # the same key. - specs.update(kvs) - opts = self._get_opts_from_specs(opts, specs) - if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos - and volume_metadata): - qos = self._get_qos_from_volume_metadata(volume_metadata) - if len(qos) != 0: - opts['qos'] = qos - - self.check_vdisk_opts(state, opts) - return opts - - @staticmethod - def _get_vdisk_create_params(opts, add_copies=False): - easytier = 'on' if opts['easytier'] else 'off' - if opts['rsize'] == -1: - params = [] - if opts['nofmtdisk']: - params.append('-nofmtdisk') - else: - params = ['-rsize', '%s%%' % str(opts['rsize']), - '-autoexpand', '-warning', - '%s%%' % str(opts['warning'])] - if not opts['autoexpand']: - params.remove('-autoexpand') - - if opts['compression']: - params.append('-compressed') - else: - params.extend(['-grainsize', str(opts['grainsize'])]) - - if add_copies and opts['mirror_pool']: - params.extend(['-copies', '2']) - - params.extend(['-easytier', easytier]) - return params - - def create_vdisk(self, name, size, units, pool, opts): - LOG.debug('Enter: create_vdisk: vdisk %s.', name) - mdiskgrp = pool - if opts['mirror_pool']: - if not self.is_pool_defined(opts['mirror_pool']): - raise exception.InvalidInput( - reason=_('The pool %s in which mirrored copy is stored ' - 'is invalid') % opts['mirror_pool']) - # The syntax of pool SVC expects is pool:mirror_pool in - # mdiskgrp for mirror volume - mdiskgrp = '%s:%s' % (pool, opts['mirror_pool']) - params = self._get_vdisk_create_params( - opts, add_copies=True if opts['mirror_pool'] else False) - self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params) - LOG.debug('Leave: _create_vdisk: volume %s.', name) - - def get_vdisk_attributes(self, vdisk): - attrs = self.ssh.lsvdisk(vdisk) - return attrs - - def is_vdisk_defined(self, vdisk_name): - """Check if vdisk is defined.""" - attrs = self.get_vdisk_attributes(vdisk_name) - return attrs is not None - - def find_vdisk_copy_id(self, vdisk, pool): - resp = self.ssh.lsvdiskcopy(vdisk) - for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'): - if mdisk_grp == pool: - return copy_id - msg = _('Failed to find a vdisk copy in the expected pool.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def get_vdisk_copy_attrs(self, vdisk, copy_id): - return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0] - - def get_vdisk_copies(self, vdisk): - copies = {'primary': None, - 'secondary': None} - - resp = self.ssh.lsvdiskcopy(vdisk) - for copy_id, status, sync, primary, mdisk_grp in ( - resp.select('copy_id', 'status', 'sync', - 'primary', 'mdisk_grp_name')): - copy = {'copy_id': copy_id, - 'status': status, - 'sync': sync, - 'primary': primary, - 'mdisk_grp_name': mdisk_grp, - 'sync_progress': None} - if copy['sync'] != 'yes': - progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id) - copy['sync_progress'] = progress_info['progress'] - if copy['primary'] == 'yes': - copies['primary'] = copy - else: - copies['secondary'] = copy - return copies - - def _prepare_fc_map(self, fc_map_id, timeout): - self.ssh.prestartfcmap(fc_map_id) - mapping_ready = False - max_retries = (timeout // self.WAIT_TIME) + 1 - for try_number in range(1, max_retries): - mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) - if (mapping_attrs is None or - 'status' not in mapping_attrs): - break - if mapping_attrs['status'] == 'prepared': - mapping_ready = True - break - elif mapping_attrs['status'] == 'stopped': - self.ssh.prestartfcmap(fc_map_id) - elif mapping_attrs['status'] != 'preparing': - msg = (_('Unexecpted mapping status %(status)s for mapping ' - '%(id)s. Attributes: %(attr)s.') - % {'status': mapping_attrs['status'], - 'id': fc_map_id, - 'attr': mapping_attrs}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - greenthread.sleep(self.WAIT_TIME) - - if not mapping_ready: - msg = (_('Mapping %(id)s prepare failed to complete within the' - 'allotted %(to)d seconds timeout. Terminating.') - % {'id': fc_map_id, - 'to': timeout}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def start_fc_consistgrp(self, fc_consistgrp): - self.ssh.startfcconsistgrp(fc_consistgrp) - - def create_fc_consistgrp(self, fc_consistgrp): - self.ssh.mkfcconsistgrp(fc_consistgrp) - - def delete_fc_consistgrp(self, fc_consistgrp): - self.ssh.rmfcconsistgrp(fc_consistgrp) - - def stop_fc_consistgrp(self, fc_consistgrp): - self.ssh.stopfcconsistgrp(fc_consistgrp) - - def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state, - config, timeout): - model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} - snapshots_model_update = [] - try: - for snapshot in snapshots: - opts = self.get_vdisk_params(config, state, - snapshot['volume_type_id']) - volume = snapshot.volume - if not volume: - msg = (_("Can't get volume from snapshot: %(id)s") - % {"id": snapshot.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - pool = utils.extract_host(volume.host, 'pool') - self.create_flashcopy_to_consistgrp(snapshot['volume_name'], - snapshot['name'], - fc_consistgrp, - config, opts, False, - pool=pool) - - self.prepare_fc_consistgrp(fc_consistgrp, timeout) - self.start_fc_consistgrp(fc_consistgrp) - # There is CG limitation that could not create more than 128 CGs. - # After start CG, we delete CG to avoid CG limitation. - # Cinder general will maintain the CG and snapshots relationship. - self.delete_fc_consistgrp(fc_consistgrp) - except exception.VolumeBackendAPIException as err: - model_update['status'] = fields.GroupSnapshotStatus.ERROR - # Release cg - self.delete_fc_consistgrp(fc_consistgrp) - LOG.error("Failed to create CGSnapshot. " - "Exception: %s.", err) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot['id'], - 'status': model_update['status']}) - - return model_update, snapshots_model_update - - def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots): - """Delete flashcopy maps and consistent group.""" - model_update = {'status': fields.GroupSnapshotStatus.DELETED} - snapshots_model_update = [] - - try: - for snapshot in snapshots: - self.delete_vdisk(snapshot['name'], True) - except exception.VolumeBackendAPIException as err: - model_update['status'] = ( - fields.GroupSnapshotStatus.ERROR_DELETING) - LOG.error("Failed to delete the snapshot %(snap)s of " - "CGSnapshot. Exception: %(exception)s.", - {'snap': snapshot['name'], 'exception': err}) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot['id'], - 'status': model_update['status']}) - - return model_update, snapshots_model_update - - def prepare_fc_consistgrp(self, fc_consistgrp, timeout): - """Prepare FC Consistency Group.""" - self.ssh.prestartfcconsistgrp(fc_consistgrp) - - def prepare_fc_consistgrp_success(): - mapping_ready = False - mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp) - if (mapping_attrs is None or - 'status' not in mapping_attrs): - pass - if mapping_attrs['status'] == 'prepared': - mapping_ready = True - elif mapping_attrs['status'] == 'stopped': - self.ssh.prestartfcconsistgrp(fc_consistgrp) - elif mapping_attrs['status'] != 'preparing': - msg = (_('Unexpected mapping status %(status)s for mapping' - '%(id)s. Attributes: %(attr)s.') % - {'status': mapping_attrs['status'], - 'id': fc_consistgrp, - 'attr': mapping_attrs}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return mapping_ready - self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout) - - def create_cg_from_source(self, group, fc_consistgrp, - sources, targets, state, - config, timeout): - """Create consistence group from source""" - LOG.debug('Enter: create_cg_from_source: cg %(cg)s' - ' source %(source)s, target %(target)s', - {'cg': fc_consistgrp, 'source': sources, 'target': targets}) - model_update = {'status': fields.GroupStatus.AVAILABLE} - ctxt = context.get_admin_context() - try: - for source, target in zip(sources, targets): - opts = self.get_vdisk_params(config, state, - source['volume_type_id']) - pool = utils.extract_host(target['host'], 'pool') - self.create_flashcopy_to_consistgrp(source['name'], - target['name'], - fc_consistgrp, - config, opts, - True, pool=pool) - self.prepare_fc_consistgrp(fc_consistgrp, timeout) - self.start_fc_consistgrp(fc_consistgrp) - self.delete_fc_consistgrp(fc_consistgrp) - volumes_model_update = self._get_volume_model_updates( - ctxt, targets, group['id'], model_update['status']) - except exception.VolumeBackendAPIException as err: - model_update['status'] = fields.GroupStatus.ERROR - volumes_model_update = self._get_volume_model_updates( - ctxt, targets, group['id'], model_update['status']) - with excutils.save_and_reraise_exception(): - # Release cg - self.delete_fc_consistgrp(fc_consistgrp) - LOG.error("Failed to create CG from CGsnapshot. " - "Exception: %s", err) - return model_update, volumes_model_update - - LOG.debug('Leave: create_cg_from_source.') - return model_update, volumes_model_update - - def _get_volume_model_updates(self, ctxt, volumes, cgId, - status='available'): - """Update the volume model's status and return it.""" - volume_model_updates = [] - LOG.info("Updating status for CG: %(id)s.", - {'id': cgId}) - if volumes: - for volume in volumes: - volume_model_updates.append({'id': volume['id'], - 'status': status}) - else: - LOG.info("No volume found for CG: %(cg)s.", - {'cg': cgId}) - return volume_model_updates - - def run_flashcopy(self, source, target, timeout, copy_rate, - full_copy=True): - """Create a FlashCopy mapping from the source to the target.""" - LOG.debug('Enter: run_flashcopy: execute FlashCopy from source ' - '%(source)s to target %(target)s.', - {'source': source, 'target': target}) - - fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate) - self._prepare_fc_map(fc_map_id, timeout) - self.ssh.startfcmap(fc_map_id) - - LOG.debug('Leave: run_flashcopy: FlashCopy started from ' - '%(source)s to %(target)s.', - {'source': source, 'target': target}) - - def create_flashcopy_to_consistgrp(self, source, target, consistgrp, - config, opts, full_copy=False, - pool=None): - """Create a FlashCopy mapping and add to consistent group.""" - LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy' - ' from source %(source)s to target %(target)s' - 'Then add the flashcopy to %(cg)s.', - {'source': source, 'target': target, 'cg': consistgrp}) - - src_attrs = self.get_vdisk_attributes(source) - if src_attrs is None: - msg = (_('create_copy: Source vdisk %(src)s ' - 'does not exist.') % {'src': source}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - src_size = src_attrs['capacity'] - # In case we need to use a specific pool - if not pool: - pool = src_attrs['mdisk_grp_name'] - opts['iogrp'] = src_attrs['IO_group_id'] - self.create_vdisk(target, src_size, 'b', pool, opts) - - self.ssh.mkfcmap(source, target, full_copy, - config.storwize_svc_flashcopy_rate, - consistgrp=consistgrp) - - LOG.debug('Leave: create_flashcopy_to_consistgrp: ' - 'FlashCopy started from %(source)s to %(target)s.', - {'source': source, 'target': target}) - - def _get_vdisk_fc_mappings(self, vdisk): - """Return FlashCopy mappings that this vdisk is associated with.""" - mapping_ids = [] - resp = self.ssh.lsvdiskfcmappings(vdisk) - for id in resp.select('id'): - mapping_ids.append(id) - return mapping_ids - - def _get_flashcopy_mapping_attributes(self, fc_map_id): - resp = self.ssh.lsfcmap(fc_map_id) - if not len(resp): - return None - return resp[0] - - def _get_flashcopy_consistgrp_attr(self, fc_map_id): - resp = self.ssh.lsfcconsistgrp(fc_map_id) - if not len(resp): - return None - return resp[0] - - def _check_vdisk_fc_mappings(self, name, - allow_snaps=True, allow_fctgt=False): - """FlashCopy mapping check helper.""" - LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name) - mapping_ids = self._get_vdisk_fc_mappings(name) - wait_for_copy = False - for map_id in mapping_ids: - attrs = self._get_flashcopy_mapping_attributes(map_id) - # We should ignore GMCV flash copies - if not attrs or 'yes' == attrs['rc_controlled']: - continue - source = attrs['source_vdisk_name'] - target = attrs['target_vdisk_name'] - copy_rate = attrs['copy_rate'] - status = attrs['status'] - - if allow_fctgt and target == name and status == 'copying': - self.ssh.stopfcmap(map_id) - attrs = self._get_flashcopy_mapping_attributes(map_id) - if attrs: - status = attrs['status'] - - if copy_rate == '0': - if source == name: - # Vdisk with snapshots. Return False if snapshot - # not allowed. - if not allow_snaps: - raise loopingcall.LoopingCallDone(retvalue=False) - self.ssh.chfcmap(map_id, copyrate='50', autodel='on') - wait_for_copy = True - else: - # A snapshot - if target != name: - msg = (_('Vdisk %(name)s not involved in ' - 'mapping %(src)s -> %(tgt)s.') % - {'name': name, 'src': source, 'tgt': target}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - if status in ['copying', 'prepared']: - self.ssh.stopfcmap(map_id) - # Need to wait for the fcmap to change to - # stopped state before remove fcmap - wait_for_copy = True - elif status in ['stopping', 'preparing']: - wait_for_copy = True - else: - self.ssh.rmfcmap(map_id) - # Case 4: Copy in progress - wait and will autodelete - else: - if status == 'prepared': - self.ssh.stopfcmap(map_id) - self.ssh.rmfcmap(map_id) - elif status in ['idle_or_copied', 'stopped']: - # Prepare failed or stopped - self.ssh.rmfcmap(map_id) - else: - wait_for_copy = True - if not wait_for_copy or not len(mapping_ids): - raise loopingcall.LoopingCallDone(retvalue=True) - - def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True, - allow_fctgt=False): - """Ensure vdisk has no flashcopy mappings.""" - timer = loopingcall.FixedIntervalLoopingCall( - self._check_vdisk_fc_mappings, name, - allow_snaps, allow_fctgt) - # Create a timer greenthread. The default volume service heart - # beat is every 10 seconds. The flashcopy usually takes hours - # before it finishes. Don't set the sleep interval shorter - # than the heartbeat. Otherwise volume service heartbeat - # will not be serviced. - LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.', - name) - ret = timer.start(interval=self.check_fcmapping_interval).wait() - timer.stop() - return ret - - def start_relationship(self, volume_name, primary=None): - vol_attrs = self.get_vdisk_attributes(volume_name) - if vol_attrs['RC_name']: - self.ssh.startrcrelationship(vol_attrs['RC_name'], primary) - - def stop_relationship(self, volume_name, access=False): - vol_attrs = self.get_vdisk_attributes(volume_name) - if vol_attrs['RC_name']: - self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access) - - def create_relationship(self, master, aux, system, asyncmirror, - cyclingmode=False, masterchange=None, - cycle_period_seconds=None): - try: - rc_id = self.ssh.mkrcrelationship(master, aux, system, - asyncmirror, cyclingmode) - except exception.VolumeBackendAPIException as e: - # CMMVC5959E is the code in Stowize storage, meaning that - # there is a relationship that already has this name on the - # master cluster. - if 'CMMVC5959E' not in e: - # If there is no relation between the primary and the - # secondary back-end storage, the exception is raised. - raise - if rc_id: - # We need setup master and aux change volumes for gmcv - # before we can start remote relationship - # aux change volume must be set on target site - if cycle_period_seconds: - self.change_relationship_cycleperiod(master, - cycle_period_seconds) - if masterchange: - self.change_relationship_changevolume(master, - masterchange, True) - else: - self.start_relationship(master) - - def change_relationship_changevolume(self, volume_name, - change_volume, master): - vol_attrs = self.get_vdisk_attributes(volume_name) - if vol_attrs['RC_name'] and change_volume: - self.ssh.ch_rcrelationship_changevolume(vol_attrs['RC_name'], - change_volume, master) - - def change_relationship_cycleperiod(self, volume_name, - cycle_period_seconds): - vol_attrs = self.get_vdisk_attributes(volume_name) - if vol_attrs['RC_name'] and cycle_period_seconds: - self.ssh.ch_rcrelationship_cycleperiod(vol_attrs['RC_name'], - cycle_period_seconds) - - def delete_relationship(self, volume_name): - vol_attrs = self.get_vdisk_attributes(volume_name) - if vol_attrs['RC_name']: - self.ssh.rmrcrelationship(vol_attrs['RC_name'], True) - - def get_relationship_info(self, volume_name): - vol_attrs = self.get_vdisk_attributes(volume_name) - if not vol_attrs or not vol_attrs['RC_name']: - LOG.info("Unable to get remote copy information for " - "volume %s", volume_name) - return - - relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) - return relationship[0] if len(relationship) > 0 else None - - def delete_rc_volume(self, volume_name, target_vol=False): - vol_name = volume_name - if target_vol: - vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume_name - - try: - rel_info = self.get_relationship_info(vol_name) - if rel_info: - self.delete_relationship(vol_name) - # Delete change volume - self.delete_vdisk( - storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name, False) - self.delete_vdisk(vol_name, False) - except Exception as e: - msg = (_('Unable to delete the volume for ' - 'volume %(vol)s. Exception: %(err)s.'), - {'vol': vol_name, 'err': e}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - def switch_relationship(self, relationship, aux=True): - self.ssh.switchrelationship(relationship, aux) - - def get_partnership_info(self, system_name): - partnership = self.ssh.lspartnership(system_name) - return partnership[0] if len(partnership) > 0 else None - - def get_partnershipcandidate_info(self, system_name): - candidates = self.ssh.lspartnershipcandidate() - for candidate in candidates: - if system_name == candidate['name']: - return candidate - return None - - def mkippartnership(self, ip_v4, bandwith=1000, copyrate=50): - self.ssh.mkippartnership(ip_v4, bandwith, copyrate) - - def mkfcpartnership(self, system_name, bandwith=1000, copyrate=50): - self.ssh.mkfcpartnership(system_name, bandwith, copyrate) - - def chpartnership(self, partnership_id): - self.ssh.chpartnership(partnership_id) - - def delete_vdisk(self, vdisk, force): - """Ensures that vdisk is not part of FC mapping and deletes it.""" - LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk) - if not self.is_vdisk_defined(vdisk): - LOG.info('Tried to delete non-existent vdisk %s.', vdisk) - return - self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True, - allow_fctgt=True) - self.ssh.rmvdisk(vdisk, force=force) - LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk) - - def create_copy(self, src, tgt, src_id, config, opts, - full_copy, pool=None): - """Create a new snapshot using FlashCopy.""" - LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.', - {'tgt': tgt, 'src': src}) - - src_attrs = self.get_vdisk_attributes(src) - if src_attrs is None: - msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' - 'does not exist.') % {'src': src, 'src_id': src_id}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - src_size = src_attrs['capacity'] - # In case we need to use a specific pool - if not pool: - pool = src_attrs['mdisk_grp_name'] - - opts['iogrp'] = src_attrs['IO_group_id'] - self.create_vdisk(tgt, src_size, 'b', pool, opts) - timeout = config.storwize_svc_flashcopy_timeout - try: - self.run_flashcopy(src, tgt, timeout, - config.storwize_svc_flashcopy_rate, - full_copy=full_copy) - except Exception: - with excutils.save_and_reraise_exception(): - self.delete_vdisk(tgt, True) - - LOG.debug('Leave: _create_copy: snapshot %(tgt)s from ' - 'vdisk %(src)s.', - {'tgt': tgt, 'src': src}) - - def extend_vdisk(self, vdisk, amount): - self.ssh.expandvdisksize(vdisk, amount) - - def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config, - auto_delete=False): - """Add a vdisk copy in the given pool.""" - resp = self.ssh.lsvdiskcopy(vdisk) - if len(resp) > 1: - msg = (_('add_vdisk_copy failed: A copy of volume %s exists. ' - 'Adding another copy would exceed the limit of ' - '2 copies.') % vdisk) - raise exception.VolumeDriverException(message=msg) - orig_copy_id = resp[0].get("copy_id", None) - - if orig_copy_id is None: - msg = (_('add_vdisk_copy started without a vdisk copy in the ' - 'expected pool.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - if volume_type is None: - opts = self.get_vdisk_params(config, state, None) - else: - opts = self.get_vdisk_params(config, state, volume_type['id'], - volume_type=volume_type) - params = self._get_vdisk_create_params(opts) - try: - new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params, - auto_delete) - except exception.VolumeBackendAPIException as e: - msg = (_('Unable to add vdiskcopy for volume %(vol)s. ' - 'Exception: %(err)s.'), - {'vol': vdisk, 'err': e}) - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - return (orig_copy_id, new_copy_id) - - def is_vdisk_copy_synced(self, vdisk, copy_id): - sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync'] - if sync == 'yes': - return True - return False - - def rm_vdisk_copy(self, vdisk, copy_id): - self.ssh.rmvdiskcopy(vdisk, copy_id) - - def lsvdiskcopy(self, vdisk, copy_id=None): - return self.ssh.lsvdiskcopy(vdisk, copy_id) - - @staticmethod - def can_migrate_to_host(host, state): - if 'location_info' not in host['capabilities']: - return None - info = host['capabilities']['location_info'] - try: - (dest_type, dest_id, dest_pool) = info.split(':') - except ValueError: - return None - if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']): - return None - return dest_pool - - def add_vdisk_qos(self, vdisk, qos): - """Add the QoS configuration to the volume.""" - for key, value in qos.items(): - if key in self.svc_qos_keys.keys(): - param = self.svc_qos_keys[key]['param'] - self.ssh.chvdisk(vdisk, ['-' + param, str(value)]) - - def update_vdisk_qos(self, vdisk, qos): - """Update all the QoS in terms of a key and value. - - svc_qos_keys saves all the supported QoS parameters. Going through - this dict, we set the new values to all the parameters. If QoS is - available in the QoS configuration, the value is taken from it; - if not, the value will be set to default. - """ - for key, value in self.svc_qos_keys.items(): - param = value['param'] - if key in qos.keys(): - # If the value is set in QoS, take the value from - # the QoS configuration. - v = qos[key] - else: - # If not, set the value to default. - v = value['default'] - self.ssh.chvdisk(vdisk, ['-' + param, str(v)]) - - def disable_vdisk_qos(self, vdisk, qos): - """Disable the QoS.""" - for key, value in qos.items(): - if key in self.svc_qos_keys.keys(): - param = self.svc_qos_keys[key]['param'] - # Take the default value. - value = self.svc_qos_keys[key]['default'] - self.ssh.chvdisk(vdisk, ['-' + param, value]) - - def change_vdisk_options(self, vdisk, changes, opts, state): - if 'warning' in opts: - opts['warning'] = '%s%%' % str(opts['warning']) - if 'easytier' in opts: - opts['easytier'] = 'on' if opts['easytier'] else 'off' - if 'autoexpand' in opts: - opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off' - - for key in changes: - self.ssh.chvdisk(vdisk, ['-' + key, opts[key]]) - - def change_vdisk_iogrp(self, vdisk, state, iogrp): - if state['code_level'] < (6, 4, 0, 0): - LOG.debug('Ignore change IO group as storage code level is ' - '%(code_level)s, below the required 6.4.0.0.', - {'code_level': state['code_level']}) - else: - self.ssh.movevdisk(vdisk, str(iogrp[0])) - self.ssh.addvdiskaccess(vdisk, str(iogrp[0])) - self.ssh.rmvdiskaccess(vdisk, str(iogrp[1])) - - def vdisk_by_uid(self, vdisk_uid): - """Returns the properties of the vdisk with the specified UID. - - Returns None if no such disk exists. - """ - - vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid) - - if len(vdisks) == 0: - return None - - if len(vdisks) != 1: - msg = (_('Expected single vdisk returned from lsvdisk when ' - 'filtering on vdisk_UID. %(count)s were returned.') % - {'count': len(vdisks)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - vdisk = vdisks.result[0] - - return self.ssh.lsvdisk(vdisk['name']) - - def is_vdisk_in_use(self, vdisk): - """Returns True if the specified vdisk is mapped to at least 1 host.""" - resp = self.ssh.lsvdiskhostmap(vdisk) - return len(resp) != 0 - - def rename_vdisk(self, vdisk, new_name): - self.ssh.chvdisk(vdisk, ['-name', new_name]) - - def change_vdisk_primary_copy(self, vdisk, copy_id): - self.ssh.chvdisk(vdisk, ['-primary', copy_id]) - - def migratevdisk(self, vdisk, dest_pool, copy_id='0'): - self.ssh.migratevdisk(vdisk, dest_pool, copy_id) - - -class CLIResponse(object): - """Parse SVC CLI output and generate iterable.""" - - def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): - super(CLIResponse, self).__init__() - if ssh_cmd: - self.ssh_cmd = ' '.join(ssh_cmd) - else: - self.ssh_cmd = 'None' - self.raw = raw - self.delim = delim - self.with_header = with_header - self.result = self._parse() - - def select(self, *keys): - for a in self.result: - vs = [] - for k in keys: - v = a.get(k, None) - if isinstance(v, six.string_types) or v is None: - v = [v] - if isinstance(v, list): - vs.append(v) - for item in zip(*vs): - if len(item) == 1: - yield item[0] - else: - yield item - - def __getitem__(self, key): - try: - return self.result[key] - except KeyError: - msg = (_('Did not find the expected key %(key)s in %(fun)s: ' - '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, - 'raw': self.raw}) - raise exception.VolumeBackendAPIException(data=msg) - - def __iter__(self): - for a in self.result: - yield a - - def __len__(self): - return len(self.result) - - def _parse(self): - def get_reader(content, delim): - for line in content.lstrip().splitlines(): - line = line.strip() - if line: - yield line.split(delim) - else: - yield [] - - if isinstance(self.raw, six.string_types): - stdout, stderr = self.raw, '' - else: - stdout, stderr = self.raw - reader = get_reader(stdout, self.delim) - result = [] - - if self.with_header: - hds = tuple() - for row in reader: - hds = row - break - for row in reader: - cur = dict() - if len(hds) != len(row): - msg = (_('Unexpected CLI response: header/row mismatch. ' - 'header: %(header)s, row: %(row)s.') - % {'header': hds, - 'row': row}) - raise exception.VolumeBackendAPIException(data=msg) - for k, v in zip(hds, row): - CLIResponse.append_dict(cur, k, v) - result.append(cur) - else: - cur = dict() - for row in reader: - if row: - CLIResponse.append_dict(cur, row[0], ' '.join(row[1:])) - elif cur: # start new section - result.append(cur) - cur = dict() - if cur: - result.append(cur) - return result - - @staticmethod - def append_dict(dict_, key, value): - key, value = key.strip(), value.strip() - obj = dict_.get(key, None) - if obj is None: - dict_[key] = value - elif isinstance(obj, list): - obj.append(value) - dict_[key] = obj - else: - dict_[key] = [obj, value] - return dict_ - - -class StorwizeSVCCommonDriver(san.SanDriver, - driver.ManageableVD, - driver.MigrateVD, - driver.CloneableImageVD): - """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers. - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1 - FC support, create_cloned_volume, volume type support, - get_volume_stats, minor bug fixes - 1.2.0 - Added retype - 1.2.1 - Code refactor, improved exception handling - 1.2.2 - Fix bug #1274123 (races in host-related functions) - 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim - to lsfabric, clear unused data from connections, ensure - matching WWPNs by comparing lower case - 1.2.4 - Fix bug #1278035 (async migration/retype) - 1.2.5 - Added support for manage_existing (unmanage is inherited) - 1.2.6 - Added QoS support in terms of I/O throttling rate - 1.3.1 - Added support for volume replication - 1.3.2 - Added support for consistency group - 1.3.3 - Update driver to use ABC metaclasses - 2.0 - Code refactor, split init file and placed shared methods - for FC and iSCSI within the StorwizeSVCCommonDriver class - 2.1 - Added replication V2 support to the global/metro mirror - mode - 2.1.1 - Update replication to version 2.1 - """ - - VERSION = "2.1.1" - VDISKCOPYOPS_INTERVAL = 600 - DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0 - - def __init__(self, *args, **kwargs): - super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(storwize_svc_opts) - self._backend_name = self.configuration.safe_get('volume_backend_name') - self.active_ip = self.configuration.san_ip - self.inactive_ip = self.configuration.storwize_san_secondary_ip - self._master_backend_helpers = StorwizeHelpers(self._run_ssh) - self._aux_backend_helpers = None - self._helpers = self._master_backend_helpers - self._vdiskcopyops = {} - self._vdiskcopyops_loop = None - self.protocol = None - self._state = {'storage_nodes': {}, - 'enabled_protocols': set(), - 'compression_enabled': False, - 'available_iogrps': [], - 'system_name': None, - 'system_id': None, - 'code_level': None, - } - self._active_backend_id = kwargs.get('active_backend_id') - - # This dictionary is used to map each replication target to certain - # replication manager object. - self.replica_manager = {} - - # One driver can be configured with only one replication target - # to failover. - self._replica_target = {} - - # This boolean is used to indicate whether replication is supported - # by this storage. - self._replica_enabled = False - - # This list is used to save the supported replication modes. - self._supported_replica_types = [] - - # This is used to save the available pools in failed-over status - self._secondary_pools = None - - # Storwize has the limitation that can not burst more than 3 new ssh - # connections within 1 second. So slow down the initialization. - time.sleep(1) - - def do_setup(self, ctxt): - """Check that we have all configuration details from the storage.""" - LOG.debug('enter: do_setup') - - # v2.1 replication setup - self._get_storwize_config() - - # Update the storwize state - self._update_storwize_state() - - # Validate that the pool exists - self._validate_pools_exist() - - # Build the list of in-progress vdisk copy operations - if ctxt is None: - admin_context = context.get_admin_context() - else: - admin_context = ctxt.elevated() - volumes = objects.VolumeList.get_all_by_host(admin_context, self.host) - - for volume in volumes: - metadata = volume.admin_metadata - curr_ops = metadata.get('vdiskcopyops', None) - if curr_ops: - ops = [tuple(x.split(':')) for x in curr_ops.split(';')] - self._vdiskcopyops[volume['id']] = ops - - # if vdiskcopy exists in database, start the looping call - if len(self._vdiskcopyops) >= 1: - self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( - self._check_volume_copy_ops) - self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) - LOG.debug('leave: do_setup') - - def _update_storwize_state(self): - # Get storage system name, id, and code level - self._state.update(self._helpers.get_system_info()) - - # Check if compression is supported - self._state['compression_enabled'] = (self._helpers. - compression_enabled()) - - # Get the available I/O groups - self._state['available_iogrps'] = (self._helpers. - get_available_io_groups()) - - # Get the iSCSI and FC names of the Storwize/SVC nodes - self._state['storage_nodes'] = self._helpers.get_node_info() - - # Add the iSCSI IP addresses and WWPNs to the storage node info - self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes']) - self._helpers.add_fc_wwpns(self._state['storage_nodes']) - - # For each node, check what connection modes it supports. Delete any - # nodes that do not support any types (may be partially configured). - to_delete = [] - for k, node in self._state['storage_nodes'].items(): - if ((len(node['ipv4']) or len(node['ipv6'])) - and len(node['iscsi_name'])): - node['enabled_protocols'].append('iSCSI') - self._state['enabled_protocols'].add('iSCSI') - if len(node['WWPN']): - node['enabled_protocols'].append('FC') - self._state['enabled_protocols'].add('FC') - if not len(node['enabled_protocols']): - to_delete.append(k) - for delkey in to_delete: - del self._state['storage_nodes'][delkey] - - def _get_backend_pools(self): - if not self._active_backend_id: - return self.configuration.storwize_svc_volpool_name - elif not self._secondary_pools: - self._secondary_pools = [self._replica_target.get('pool_name')] - return self._secondary_pools - - def _validate_pools_exist(self): - # Validate that the pool exists - pools = self._get_backend_pools() - for pool in pools: - if not self._helpers.is_pool_defined(pool): - reason = (_('Failed getting details for pool %s.') % pool) - raise exception.InvalidInput(reason=reason) - - def check_for_setup_error(self): - """Ensure that the flags are set properly.""" - LOG.debug('enter: check_for_setup_error') - - # Check that we have the system ID information - if self._state['system_name'] is None: - exception_msg = (_('Unable to determine system name.')) - raise exception.VolumeBackendAPIException(data=exception_msg) - if self._state['system_id'] is None: - exception_msg = (_('Unable to determine system id.')) - raise exception.VolumeBackendAPIException(data=exception_msg) - - # Make sure we have at least one node configured - if not len(self._state['storage_nodes']): - msg = _('do_setup: No configured nodes.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - if self.protocol not in self._state['enabled_protocols']: - # TODO(mc_nair): improve this error message by looking at - # self._state['enabled_protocols'] to tell user what driver to use - raise exception.InvalidInput( - reason=_('The storage device does not support %(prot)s. ' - 'Please configure the device to support %(prot)s or ' - 'switch to a driver using a different protocol.') - % {'prot': self.protocol}) - - required_flags = ['san_ip', 'san_ssh_port', 'san_login', - 'storwize_svc_volpool_name'] - for flag in required_flags: - if not self.configuration.safe_get(flag): - raise exception.InvalidInput(reason=_('%s is not set.') % flag) - - # Ensure that either password or keyfile were set - if not (self.configuration.san_password or - self.configuration.san_private_key): - raise exception.InvalidInput( - reason=_('Password or SSH private key is required for ' - 'authentication: set either san_password or ' - 'san_private_key option.')) - - opts = self._helpers.build_default_opts(self.configuration) - self._helpers.check_vdisk_opts(self._state, opts) - - LOG.debug('leave: check_for_setup_error') - - def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): - cinder_utils.check_ssh_injection(cmd_list) - command = ' '.join(cmd_list) - if not self.sshpool: - try: - self.sshpool = self._set_up_sshpool(self.active_ip) - except paramiko.SSHException: - LOG.warning('Unable to use san_ip to create SSHPool. Now ' - 'attempting to use storwize_san_secondary_ip ' - 'to create SSHPool.') - if self._toggle_ip(): - self.sshpool = self._set_up_sshpool(self.active_ip) - else: - LOG.warning('Unable to create SSHPool using san_ip ' - 'and not able to use ' - 'storwize_san_secondary_ip since it is ' - 'not configured.') - raise - try: - return self._ssh_execute(self.sshpool, command, - check_exit_code, attempts) - - except Exception: - # Need to check if creating an SSHPool storwize_san_secondary_ip - # before raising an error. - try: - if self._toggle_ip(): - LOG.warning("Unable to execute SSH command with " - "%(inactive)s. Attempting to execute SSH " - "command with %(active)s.", - {'inactive': self.inactive_ip, - 'active': self.active_ip}) - self.sshpool = self._set_up_sshpool(self.active_ip) - return self._ssh_execute(self.sshpool, command, - check_exit_code, attempts) - else: - LOG.warning('Not able to use ' - 'storwize_san_secondary_ip since it is ' - 'not configured.') - raise - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", - command) - - def _set_up_sshpool(self, ip): - password = self.configuration.san_password - privatekey = self.configuration.san_private_key - min_size = self.configuration.ssh_min_pool_conn - max_size = self.configuration.ssh_max_pool_conn - sshpool = ssh_utils.SSHPool( - ip, - self.configuration.san_ssh_port, - self.configuration.ssh_conn_timeout, - self.configuration.san_login, - password=password, - privatekey=privatekey, - min_size=min_size, - max_size=max_size) - - return sshpool - - def _ssh_execute(self, sshpool, command, - check_exit_code = True, attempts=1): - try: - with sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - return processutils.ssh_execute( - ssh, - command, - check_exit_code=check_exit_code) - except Exception as e: - LOG.error('Error has occurred: %s', e) - last_exception = e - greenthread.sleep(self.DEFAULT_GR_SLEEP) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", command) - - def _toggle_ip(self): - # Change active_ip if storwize_san_secondary_ip is set. - if self.configuration.storwize_san_secondary_ip is None: - return False - - self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip - LOG.info('Toggle active_ip from %(old)s to %(new)s.', - {'old': self.inactive_ip, - 'new': self.active_ip}) - return True - - def ensure_export(self, ctxt, volume): - """Check that the volume exists on the storage. - - The system does not "export" volumes as a Linux iSCSI target does, - and therefore we just check that the volume exists on the storage. - """ - vol_name = self._get_target_vol(volume) - volume_defined = self._helpers.is_vdisk_defined(vol_name) - - if not volume_defined: - LOG.error('ensure_export: Volume %s not found on storage.', - volume['name']) - - def create_export(self, ctxt, volume, connector): - model_update = None - return model_update - - def remove_export(self, ctxt, volume): - pass - - def _get_vdisk_params(self, type_id, volume_type=None, - volume_metadata=None): - return self._helpers.get_vdisk_params(self.configuration, - self._state, type_id, - volume_type=volume_type, - volume_metadata=volume_metadata) - - def create_volume(self, volume): - LOG.debug('enter: create_volume: volume %s', volume['name']) - opts = self._get_vdisk_params(volume['volume_type_id'], - volume_metadata= - volume.get('volume_metadata')) - ctxt = context.get_admin_context() - rep_type = self._get_volume_replicated_type(ctxt, volume) - - pool = utils.extract_host(volume['host'], 'pool') - if opts['mirror_pool'] and rep_type: - reason = _('Create mirror volume with replication enabled is ' - 'not supported.') - raise exception.InvalidInput(reason=reason) - opts['iogrp'] = self._helpers.select_io_group(self._state, opts) - self._helpers.create_vdisk(volume['name'], str(volume['size']), - 'gb', pool, opts) - if opts['qos']: - self._helpers.add_vdisk_qos(volume['name'], opts['qos']) - - model_update = None - - if rep_type: - replica_obj = self._get_replica_obj(rep_type) - replica_obj.volume_replication_setup(ctxt, volume) - model_update = {'replication_status': - fields.ReplicationStatus.ENABLED} - - LOG.debug('leave: create_volume:\n volume: %(vol)s\n ' - 'model_update %(model_update)s', - {'vol': volume['name'], - 'model_update': model_update}) - return model_update - - def delete_volume(self, volume): - LOG.debug('enter: delete_volume: volume %s', volume['name']) - ctxt = context.get_admin_context() - - rep_type = self._get_volume_replicated_type(ctxt, volume) - if rep_type: - if self._aux_backend_helpers: - self._aux_backend_helpers.delete_rc_volume(volume['name'], - target_vol=True) - if not self._active_backend_id: - self._master_backend_helpers.delete_rc_volume(volume['name']) - else: - # If it's in fail over state, also try to delete the volume - # in master backend - try: - self._master_backend_helpers.delete_rc_volume( - volume['name']) - except Exception as ex: - LOG.error('Failed to get delete volume %(volume)s in ' - 'master backend. Exception: %(err)s.', - {'volume': volume['name'], - 'err': ex}) - else: - if self._active_backend_id: - msg = (_('Error: delete non-replicate volume in failover mode' - ' is not allowed.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - self._helpers.delete_vdisk(volume['name'], False) - - if volume['id'] in self._vdiskcopyops: - del self._vdiskcopyops[volume['id']] - - if not len(self._vdiskcopyops): - self._vdiskcopyops_loop.stop() - self._vdiskcopyops_loop = None - LOG.debug('leave: delete_volume: volume %s', volume['name']) - - def create_snapshot(self, snapshot): - ctxt = context.get_admin_context() - try: - # TODO(zhaochy): change to use snapshot.volume - source_vol = self.db.volume_get(ctxt, snapshot['volume_id']) - except Exception: - msg = (_('create_snapshot: get source volume failed.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - rep_type = self._get_volume_replicated_type( - ctxt, None, source_vol['volume_type_id']) - if rep_type == storwize_const.GMCV: - # GMCV volume will have problem to failback - # when it has flash copy relationship besides change volumes - msg = _('create_snapshot: Create snapshot to ' - 'gmcv replication volume is not allowed.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - pool = utils.extract_host(source_vol['host'], 'pool') - opts = self._get_vdisk_params(source_vol['volume_type_id']) - self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], - snapshot['volume_id'], self.configuration, - opts, False, pool=pool) - - def delete_snapshot(self, snapshot): - self._helpers.delete_vdisk(snapshot['name'], False) - - def create_volume_from_snapshot(self, volume, snapshot): - if snapshot['volume_size'] > volume['size']: - msg = (_("create_volume_from_snapshot: snapshot %(snapshot_name)s " - "size is %(snapshot_size)dGB and doesn't fit in target " - "volume %(volume_name)s of size %(volume_size)dGB.") % - {'snapshot_name': snapshot['name'], - 'snapshot_size': snapshot['volume_size'], - 'volume_name': volume['name'], - 'volume_size': volume['size']}) - LOG.error(msg) - raise exception.InvalidInput(message=msg) - - opts = self._get_vdisk_params(volume['volume_type_id'], - volume_metadata= - volume.get('volume_metadata')) - pool = utils.extract_host(volume['host'], 'pool') - self._helpers.create_copy(snapshot['name'], volume['name'], - snapshot['id'], self.configuration, - opts, True, pool=pool) - # The volume size is equal to the snapshot size in most - # of the cases. But in some scenario, the volume size - # may be bigger than the source volume size. - # SVC does not support flashcopy between two volumes - # with two different size. So use the snapshot size to - # create volume first and then extend the volume to- - # the target size. - if volume['size'] > snapshot['volume_size']: - # extend the new created target volume to expected size. - self._extend_volume_op(volume, volume['size'], - snapshot['volume_size']) - if opts['qos']: - self._helpers.add_vdisk_qos(volume['name'], opts['qos']) - - ctxt = context.get_admin_context() - rep_type = self._get_volume_replicated_type(ctxt, volume) - - if rep_type: - self._validate_replication_enabled() - replica_obj = self._get_replica_obj(rep_type) - replica_obj.volume_replication_setup(ctxt, volume) - return {'replication_status': fields.ReplicationStatus.ENABLED} - - def create_cloned_volume(self, tgt_volume, src_volume): - """Creates a clone of the specified volume.""" - - if src_volume['size'] > tgt_volume['size']: - msg = (_("create_cloned_volume: source volume %(src_vol)s " - "size is %(src_size)dGB and doesn't fit in target " - "volume %(tgt_vol)s of size %(tgt_size)dGB.") % - {'src_vol': src_volume['name'], - 'src_size': src_volume['size'], - 'tgt_vol': tgt_volume['name'], - 'tgt_size': tgt_volume['size']}) - LOG.error(msg) - raise exception.InvalidInput(message=msg) - - opts = self._get_vdisk_params(tgt_volume['volume_type_id'], - volume_metadata= - tgt_volume.get('volume_metadata')) - pool = utils.extract_host(tgt_volume['host'], 'pool') - self._helpers.create_copy(src_volume['name'], tgt_volume['name'], - src_volume['id'], self.configuration, - opts, True, pool=pool) - - # The source volume size is equal to target volume size - # in most of the cases. But in some scenarios, the target - # volume size may be bigger than the source volume size. - # SVC does not support flashcopy between two volumes - # with two different sizes. So use source volume size to - # create target volume first and then extend target - # volume to original size. - if tgt_volume['size'] > src_volume['size']: - # extend the new created target volume to expected size. - self._extend_volume_op(tgt_volume, tgt_volume['size'], - src_volume['size']) - - if opts['qos']: - self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos']) - - ctxt = context.get_admin_context() - rep_type = self._get_volume_replicated_type(ctxt, tgt_volume) - - if rep_type: - self._validate_replication_enabled() - replica_obj = self._get_replica_obj(rep_type) - replica_obj.volume_replication_setup(ctxt, tgt_volume) - return {'replication_status': fields.ReplicationStatus.ENABLED} - - def extend_volume(self, volume, new_size): - self._extend_volume_op(volume, new_size) - - def _extend_volume_op(self, volume, new_size, old_size=None): - LOG.debug('enter: _extend_volume_op: volume %s', volume['id']) - volume_name = self._get_target_vol(volume) - ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name, - allow_snaps=False) - if not ret: - msg = (_('_extend_volume_op: Extending a volume with snapshots is ' - 'not supported.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - if old_size is None: - old_size = volume.size - extend_amt = int(new_size) - old_size - - rel_info = self._helpers.get_relationship_info(volume_name) - if rel_info: - LOG.warning('_extend_volume_op: Extending a volume with ' - 'remote copy is not recommended.') - try: - rep_type = rel_info['copy_type'] - cyclingmode = rel_info['cycling_mode'] - self._master_backend_helpers.delete_relationship( - volume.name) - tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + - volume.name) - self._master_backend_helpers.extend_vdisk(volume.name, - extend_amt) - self._aux_backend_helpers.extend_vdisk(tgt_vol, extend_amt) - tgt_sys = self._aux_backend_helpers.get_system_info() - if storwize_const.GMCV_MULTI == cyclingmode: - tgt_change_vol = ( - storwize_const.REPLICA_CHG_VOL_PREFIX + - tgt_vol) - source_change_vol = ( - storwize_const.REPLICA_CHG_VOL_PREFIX + - volume.name) - self._master_backend_helpers.extend_vdisk( - source_change_vol, extend_amt) - self._aux_backend_helpers.extend_vdisk( - tgt_change_vol, extend_amt) - src_change_opts = self._get_vdisk_params( - volume.volume_type_id) - cycle_period_seconds = src_change_opts.get( - 'cycle_period_seconds') - self._master_backend_helpers.create_relationship( - volume.name, tgt_vol, tgt_sys.get('system_name'), - True, True, source_change_vol, cycle_period_seconds) - self._aux_backend_helpers.change_relationship_changevolume( - tgt_vol, tgt_change_vol, False) - self._master_backend_helpers.start_relationship( - volume.name) - else: - self._master_backend_helpers.create_relationship( - volume.name, tgt_vol, tgt_sys.get('system_name'), - True if storwize_const.GLOBAL == rep_type else False) - except Exception as e: - msg = (_('Failed to extend a volume with remote copy ' - '%(volume)s. Exception: ' - '%(err)s.') % {'volume': volume.id, - 'err': e}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - self._helpers.extend_vdisk(volume_name, extend_amt) - LOG.debug('leave: _extend_volume_op: volume %s', volume.id) - - def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False): - return self._helpers.add_vdisk_copy(volume, dest_pool, - vol_type, self._state, - self.configuration, - auto_delete=auto_delete) - - def _add_vdisk_copy_op(self, ctxt, volume, new_op): - metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), - volume['id']) - curr_ops = metadata.get('vdiskcopyops', None) - if curr_ops: - curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] - new_ops_list = curr_ops_list.append(new_op) - else: - new_ops_list = [new_op] - new_ops_str = ';'.join([':'.join(x) for x in new_ops_list]) - self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], - {'vdiskcopyops': new_ops_str}, - False) - if volume['id'] in self._vdiskcopyops: - self._vdiskcopyops[volume['id']].append(new_op) - else: - self._vdiskcopyops[volume['id']] = [new_op] - - # We added the first copy operation, so start the looping call - if len(self._vdiskcopyops) == 1: - self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( - self._check_volume_copy_ops) - self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) - - def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id): - try: - self._vdiskcopyops[volume['id']].remove((orig_copy_id, - new_copy_id)) - if not len(self._vdiskcopyops[volume['id']]): - del self._vdiskcopyops[volume['id']] - if not len(self._vdiskcopyops): - self._vdiskcopyops_loop.stop() - self._vdiskcopyops_loop = None - except KeyError: - LOG.error('_rm_vdisk_copy_op: Volume %s does not have any ' - 'registered vdisk copy operations.', volume['id']) - return - except ValueError: - LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have ' - 'the specified vdisk copy operation: orig=%(orig)s ' - 'new=%(new)s.', - {'vol': volume['id'], 'orig': orig_copy_id, - 'new': new_copy_id}) - return - - metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), - volume['id']) - curr_ops = metadata.get('vdiskcopyops', None) - if not curr_ops: - LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not ' - 'have any registered vdisk copy operations.', - volume['id']) - return - curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] - try: - curr_ops_list.remove((orig_copy_id, new_copy_id)) - except ValueError: - LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does ' - 'not have the specified vdisk copy operation: ' - 'orig=%(orig)s new=%(new)s.', - {'vol': volume['id'], 'orig': orig_copy_id, - 'new': new_copy_id}) - return - - if len(curr_ops_list): - new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list]) - self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], - {'vdiskcopyops': new_ops_str}, - False) - else: - self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'], - 'vdiskcopyops') - - def _check_volume_copy_ops(self): - LOG.debug("Enter: update volume copy status.") - ctxt = context.get_admin_context() - copy_items = list(self._vdiskcopyops.items()) - for vol_id, copy_ops in copy_items: - try: - volume = self.db.volume_get(ctxt, vol_id) - except Exception: - LOG.warning('Volume %s does not exist.', vol_id) - del self._vdiskcopyops[vol_id] - if not len(self._vdiskcopyops): - self._vdiskcopyops_loop.stop() - self._vdiskcopyops_loop = None - continue - - for copy_op in copy_ops: - try: - synced = self._helpers.is_vdisk_copy_synced(volume['name'], - copy_op[1]) - except Exception: - LOG.info('_check_volume_copy_ops: Volume %(vol)s does ' - 'not have the specified vdisk copy ' - 'operation: orig=%(orig)s new=%(new)s.', - {'vol': volume['id'], 'orig': copy_op[0], - 'new': copy_op[1]}) - else: - if synced: - self._helpers.rm_vdisk_copy(volume['name'], copy_op[0]) - self._rm_vdisk_copy_op(ctxt, volume, copy_op[0], - copy_op[1]) - LOG.debug("Exit: update volume copy status.") - - # #### V2.1 replication methods #### # - def failover_host(self, context, volumes, secondary_id=None, groups=None): - LOG.debug('enter: failover_host: secondary_id=%(id)s', - {'id': secondary_id}) - if not self._replica_enabled: - msg = _("Replication is not properly enabled on backend.") - LOG.error(msg) - raise exception.UnableToFailOver(reason=msg) - - if storwize_const.FAILBACK_VALUE == secondary_id: - # In this case the administrator would like to fail back. - secondary_id, volumes_update = self._replication_failback(context, - volumes) - elif (secondary_id == self._replica_target['backend_id'] - or secondary_id is None): - # In this case the administrator would like to fail over. - secondary_id, volumes_update = self._replication_failover(context, - volumes) - else: - msg = (_("Invalid secondary id %s.") % secondary_id) - LOG.error(msg) - raise exception.InvalidReplicationTarget(reason=msg) - - LOG.debug('leave: failover_host: secondary_id=%(id)s', - {'id': secondary_id}) - return secondary_id, volumes_update, [] - - def _replication_failback(self, ctxt, volumes): - """Fail back all the volume on the secondary backend.""" - - volumes_update = [] - if not self._active_backend_id: - LOG.info("Host has been failed back. doesn't need " - "to fail back again") - return None, volumes_update - - try: - self._master_backend_helpers.get_system_info() - except Exception: - msg = (_("Unable to failback due to primary is not reachable.")) - LOG.error(msg) - raise exception.UnableToFailOver(reason=msg) - - unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes) - - # start synchronize from aux volume to master volume - self._sync_with_aux(ctxt, rep_volumes) - self._wait_replica_ready(ctxt, rep_volumes) - - rep_volumes_update = self._failback_replica_volumes(ctxt, - rep_volumes) - volumes_update.extend(rep_volumes_update) - - unrep_volumes_update = self._failover_unreplicated_volume( - unrep_volumes) - volumes_update.extend(unrep_volumes_update) - - self._helpers = self._master_backend_helpers - self._active_backend_id = None - - # Update the storwize state - self._update_storwize_state() - self._update_volume_stats() - return storwize_const.FAILBACK_VALUE, volumes_update - - def _failback_replica_volumes(self, ctxt, rep_volumes): - LOG.debug('enter: _failback_replica_volumes') - volumes_update = [] - - for volume in rep_volumes: - rep_type = self._get_volume_replicated_type(ctxt, volume) - replica_obj = self._get_replica_obj(rep_type) - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - rep_info = self._helpers.get_relationship_info(tgt_volume) - if not rep_info: - volumes_update.append( - {'volume_id': volume['id'], - 'updates': - {'replication_status': - fields.ReplicationStatus.ERROR, - 'status': 'error'}}) - LOG.error('_failback_replica_volumes:no rc-releationship ' - 'is established between master: %(master)s and ' - 'aux %(aux)s. Please re-establish the ' - 'relationship and synchronize the volumes on ' - 'backend storage.', - {'master': volume['name'], 'aux': tgt_volume}) - continue - LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol=' - '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s' - 'primary=%(primary)s', - {'vol': volume['name'], - 'master_vol': rep_info['master_vdisk_name'], - 'aux_vol': rep_info['aux_vdisk_name'], - 'state': rep_info['state'], - 'primary': rep_info['primary']}) - try: - model_updates = replica_obj.replication_failback(volume) - volumes_update.append( - {'volume_id': volume['id'], - 'updates': model_updates}) - except exception.VolumeDriverException: - LOG.error('Unable to fail back volume %(volume_id)s', - {'volume_id': volume.id}) - volumes_update.append( - {'volume_id': volume['id'], - 'updates': {'replication_status': - fields.ReplicationStatus.ERROR, - 'status': 'error'}}) - LOG.debug('leave: _failback_replica_volumes ' - 'volumes_update=%(volumes_update)s', - {'volumes_update': volumes_update}) - return volumes_update - - def _failover_unreplicated_volume(self, unreplicated_vols): - volumes_update = [] - for vol in unreplicated_vols: - if vol.replication_driver_data: - rep_data = json.loads(vol.replication_driver_data) - update_status = rep_data['previous_status'] - rep_data = '' - else: - update_status = 'error' - rep_data = json.dumps({'previous_status': vol.status}) - - volumes_update.append( - {'volume_id': vol.id, - 'updates': {'status': update_status, - 'replication_driver_data': rep_data}}) - - return volumes_update - - def _sync_with_aux(self, ctxt, volumes): - LOG.debug('enter: _sync_with_aux ') - try: - rep_mgr = self._get_replica_mgr() - rep_mgr.establish_target_partnership() - except Exception as ex: - LOG.warning('Fail to establish partnership in backend. ' - 'error=%(ex)s', {'error': ex}) - for volume in volumes: - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - rep_info = self._helpers.get_relationship_info(tgt_volume) - if not rep_info: - LOG.error('_sync_with_aux: no rc-releationship is ' - 'established between master: %(master)s and aux ' - '%(aux)s. Please re-establish the relationship ' - 'and synchronize the volumes on backend ' - 'storage.', {'master': volume['name'], - 'aux': tgt_volume}) - continue - LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol=' - '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' - 'primary=%(primary)s', - {'volume': volume['name'], - 'master_vol': rep_info['master_vdisk_name'], - 'aux_vol': rep_info['aux_vdisk_name'], - 'state': rep_info['state'], - 'primary': rep_info['primary']}) - try: - if (rep_info['state'] not in - [storwize_const.REP_CONSIS_SYNC, - storwize_const.REP_CONSIS_COPYING]): - if rep_info['primary'] == 'master': - self._helpers.start_relationship(tgt_volume) - else: - self._helpers.start_relationship(tgt_volume, - primary='aux') - except Exception as ex: - LOG.warning('Fail to copy data from aux to master. master:' - ' %(master)s and aux %(aux)s. Please ' - 're-establish the relationship and synchronize' - ' the volumes on backend storage. error=' - '%(ex)s', {'master': volume['name'], - 'aux': tgt_volume, - 'error': ex}) - LOG.debug('leave: _sync_with_aux.') - - def _wait_replica_ready(self, ctxt, volumes): - for volume in volumes: - tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - try: - self._wait_replica_vol_ready(ctxt, tgt_volume) - except Exception as ex: - LOG.error('_wait_replica_ready: wait for volume:%(volume)s' - ' remote copy synchronization failed due to ' - 'error:%(err)s.', {'volume': tgt_volume, - 'err': ex}) - - def _wait_replica_vol_ready(self, ctxt, volume): - LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s', - {'volume': volume}) - - def _replica_vol_ready(): - rep_info = self._helpers.get_relationship_info(volume) - if not rep_info: - msg = (_('_wait_replica_vol_ready: no rc-releationship' - 'is established for volume:%(volume)s. Please ' - 're-establish the rc-relationship and ' - 'synchronize the volumes on backend storage.'), - {'volume': volume}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: ' - 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' - 'state=%(state)s, primary=%(primary)s', - {'volume': volume, - 'master_vol': rep_info['master_vdisk_name'], - 'aux_vol': rep_info['aux_vdisk_name'], - 'state': rep_info['state'], - 'primary': rep_info['primary']}) - if (rep_info['state'] in - [storwize_const.REP_CONSIS_SYNC, - storwize_const.REP_CONSIS_COPYING]): - return True - elif rep_info['state'] == storwize_const.REP_IDL_DISC: - msg = (_('Wait synchronize failed. volume: %(volume)s'), - {'volume': volume}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - return False - - self._helpers._wait_for_a_condition( - _replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT, - interval=storwize_const.DEFAULT_RC_INTERVAL, - raise_exception=True) - LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s', - {'volume': volume}) - - def _replication_failover(self, ctxt, volumes): - volumes_update = [] - if self._active_backend_id: - LOG.info("Host has been failed over to %s", - self._active_backend_id) - return self._active_backend_id, volumes_update - - try: - self._aux_backend_helpers.get_system_info() - except Exception as ex: - msg = (_("Unable to failover due to replication target is not " - "reachable. error=%(ex)s"), {'error': ex}) - LOG.error(msg) - raise exception.UnableToFailOver(reason=msg) - - unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes) - - rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes) - volumes_update.extend(rep_volumes_update) - - unrep_volumes_update = self._failover_unreplicated_volume( - unrep_volumes) - volumes_update.extend(unrep_volumes_update) - - self._helpers = self._aux_backend_helpers - self._active_backend_id = self._replica_target['backend_id'] - self._secondary_pools = [self._replica_target['pool_name']] - - # Update the storwize state - self._update_storwize_state() - self._update_volume_stats() - return self._active_backend_id, volumes_update - - def _failover_replica_volumes(self, ctxt, rep_volumes): - LOG.debug('enter: _failover_replica_volumes') - volumes_update = [] - - for volume in rep_volumes: - rep_type = self._get_volume_replicated_type(ctxt, volume) - replica_obj = self._get_replica_obj(rep_type) - # Try do the fail-over. - try: - rep_info = self._aux_backend_helpers.get_relationship_info( - storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) - if not rep_info: - volumes_update.append( - {'volume_id': volume['id'], - 'updates': - {'replication_status': - fields.ReplicationStatus.FAILOVER_ERROR, - 'status': 'error'}}) - LOG.error('_failover_replica_volumes: no rc-' - 'releationship is established for master:' - '%(master)s. Please re-establish the rc-' - 'relationship and synchronize the volumes on' - ' backend storage.', - {'master': volume['name']}) - continue - LOG.debug('_failover_replica_volumes: vol=%(vol)s, ' - 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' - 'state=%(state)s, primary=%(primary)s', - {'vol': volume['name'], - 'master_vol': rep_info['master_vdisk_name'], - 'aux_vol': rep_info['aux_vdisk_name'], - 'state': rep_info['state'], - 'primary': rep_info['primary']}) - model_updates = replica_obj.failover_volume_host(ctxt, volume) - volumes_update.append( - {'volume_id': volume['id'], - 'updates': model_updates}) - except exception.VolumeDriverException: - LOG.error('Unable to failover to aux volume. Please make ' - 'sure that the aux volume is ready.') - volumes_update.append( - {'volume_id': volume['id'], - 'updates': {'status': 'error', - 'replication_status': - fields.ReplicationStatus.FAILOVER_ERROR}}) - LOG.debug('leave: _failover_replica_volumes ' - 'volumes_update=%(volumes_update)s', - {'volumes_update': volumes_update}) - return volumes_update - - def _classify_volume(self, ctxt, volumes): - normal_volumes = [] - replica_volumes = [] - - for v in volumes: - volume_type = self._get_volume_replicated_type(ctxt, v) - if volume_type and v['status'] == 'available': - replica_volumes.append(v) - else: - normal_volumes.append(v) - - return normal_volumes, replica_volumes - - def _get_replica_obj(self, rep_type): - replica_manager = self.replica_manager[ - self._replica_target['backend_id']] - return replica_manager.get_replica_obj(rep_type) - - def _get_replica_mgr(self): - replica_manager = self.replica_manager[ - self._replica_target['backend_id']] - return replica_manager - - def _get_target_vol(self, volume): - tgt_vol = volume['name'] - if self._active_backend_id: - ctxt = context.get_admin_context() - rep_type = self._get_volume_replicated_type(ctxt, volume) - if rep_type: - tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + - volume['name']) - return tgt_vol - - def _validate_replication_enabled(self): - if not self._replica_enabled: - msg = _("Replication is not properly configured on backend.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _get_specs_replicated_type(self, volume_type): - replication_type = None - extra_specs = volume_type.get("extra_specs", {}) - rep_val = extra_specs.get('replication_enabled') - if rep_val == " True": - replication_type = extra_specs.get('replication_type', - storwize_const.GLOBAL) - # The format for replication_type in extra spec is in - # " global". Otherwise, the code will - # not reach here. - if replication_type != storwize_const.GLOBAL: - # Pick up the replication type specified in the - # extra spec from the format like " global". - replication_type = replication_type.split()[1] - if replication_type not in storwize_const.VALID_REP_TYPES: - msg = (_("Invalid replication type %s.") % replication_type) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - return replication_type - - def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None): - replication_type = None - volume_type = None - volume_type_id = volume.volume_type_id if volume else vol_type_id - if volume_type_id: - volume_type = objects.VolumeType.get_by_name_or_id( - ctxt, volume_type_id) - if volume_type: - replication_type = self._get_specs_replicated_type(volume_type) - return replication_type - - def _get_storwize_config(self): - self._do_replication_setup() - - if self._active_backend_id and self._replica_target: - self._helpers = self._aux_backend_helpers - - self._replica_enabled = (True if (self._helpers.replication_licensed() - and self._replica_target) else False) - if self._replica_enabled: - self._supported_replica_types = storwize_const.VALID_REP_TYPES - - def _do_replication_setup(self): - rep_devs = self.configuration.safe_get('replication_device') - if not rep_devs: - return - - if len(rep_devs) > 1: - raise exception.InvalidInput( - reason='Multiple replication devices are configured. ' - 'Now only one replication_device is supported.') - - required_flags = ['san_ip', 'backend_id', 'san_login', - 'san_password', 'pool_name'] - for flag in required_flags: - if flag not in rep_devs[0]: - raise exception.InvalidInput( - reason=_('%s is not set.') % flag) - - rep_target = {} - rep_target['san_ip'] = rep_devs[0].get('san_ip') - rep_target['backend_id'] = rep_devs[0].get('backend_id') - rep_target['san_login'] = rep_devs[0].get('san_login') - rep_target['san_password'] = rep_devs[0].get('san_password') - rep_target['pool_name'] = rep_devs[0].get('pool_name') - - # Each replication target will have a corresponding replication. - self._replication_initialize(rep_target) - - def _replication_initialize(self, target): - rep_manager = storwize_rep.StorwizeSVCReplicationManager( - self, target, StorwizeHelpers) - - if self._active_backend_id: - if self._active_backend_id != target['backend_id']: - msg = (_("Invalid secondary id %s.") % self._active_backend_id) - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - # Setup partnership only in non-failover state - else: - try: - rep_manager.establish_target_partnership() - except exception.VolumeDriverException: - LOG.error('The replication src %(src)s has not ' - 'successfully established partnership with the ' - 'replica target %(tgt)s.', - {'src': self.configuration.san_ip, - 'tgt': target['backend_id']}) - - self._aux_backend_helpers = rep_manager.get_target_helpers() - self.replica_manager[target['backend_id']] = rep_manager - self._replica_target = target - - def migrate_volume(self, ctxt, volume, host): - """Migrate directly if source and dest are managed by same storage. - - We create a new vdisk copy in the desired pool, and add the original - vdisk copy to the admin_metadata of the volume to be deleted. The - deletion will occur using a periodic task once the new copy is synced. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s', - {'id': volume['id'], 'host': host['host']}) - - false_ret = (False, None) - dest_pool = self._helpers.can_migrate_to_host(host, self._state) - if dest_pool is None: - return false_ret - - ctxt = context.get_admin_context() - volume_type_id = volume['volume_type_id'] - if volume_type_id is not None: - vol_type = volume_types.get_volume_type(ctxt, volume_type_id) - else: - vol_type = None - - resp = self._helpers.lsvdiskcopy(volume.name) - if len(resp) > 1: - copies = self._helpers.get_vdisk_copies(volume.name) - self._helpers.migratevdisk(volume.name, dest_pool, - copies['primary']['copy_id']) - else: - self.add_vdisk_copy(volume.name, dest_pool, vol_type, - auto_delete=True) - - LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s', - {'id': volume.id, 'host': host['host']}) - return (True, None) - - def _verify_retype_params(self, volume, new_opts, old_opts, need_copy, - change_mirror, new_rep_type, old_rep_type): - # Some volume parameters can not be changed or changed at the same - # time during volume retype operation. This function checks the - # retype parameters. - resp = self._helpers.lsvdiskcopy(volume.name) - if old_opts['mirror_pool'] and len(resp) == 1: - msg = (_('Unable to retype: volume %s is a mirrorred vol. But it ' - 'has only one copy in storage.') % volume.name) - raise exception.VolumeDriverException(message=msg) - - if need_copy: - # mirror volume can not add volume-copy again. - if len(resp) > 1: - msg = (_('Unable to retype: current action needs volume-copy. ' - 'A copy of volume %s exists. Adding another copy ' - 'would exceed the limit of 2 copies.') % volume.name) - raise exception.VolumeDriverException(message=msg) - if old_opts['mirror_pool'] or new_opts['mirror_pool']: - msg = (_('Unable to retype: current action needs volume-copy, ' - 'it is not allowed for mirror volume ' - '%s.') % volume.name) - raise exception.VolumeDriverException(message=msg) - - if change_mirror: - if (new_opts['mirror_pool'] and - not self._helpers.is_pool_defined( - new_opts['mirror_pool'])): - msg = (_('Unable to retype: The pool %s in which mirror copy ' - 'is stored is not valid') % new_opts['mirror_pool']) - raise exception.VolumeDriverException(message=msg) - - # There are four options for rep_type: None, metro, global, gmcv - if new_rep_type or old_rep_type: - # If volume is replicated, can't copy - if need_copy or new_opts['mirror_pool'] or old_opts['mirror_pool']: - msg = (_('Unable to retype: current action needs volume-copy, ' - 'it is not allowed for replication type. ' - 'Volume = %s') % volume.id) - raise exception.VolumeDriverException(message=msg) - - if new_rep_type != old_rep_type: - old_io_grp = self._helpers.get_volume_io_group(volume.name) - if (old_io_grp not in - StorwizeHelpers._get_valid_requested_io_groups( - self._state, new_opts)): - msg = (_('Unable to retype: it is not allowed to change ' - 'replication type and io group at the same time.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - if new_rep_type and old_rep_type: - msg = (_('Unable to retype: it is not allowed to change ' - '%(old_rep_type)s volume to %(new_rep_type)s ' - 'volume.') % - {'old_rep_type': old_rep_type, - 'new_rep_type': new_rep_type}) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - elif storwize_const.GMCV == new_rep_type: - # To gmcv, we may change cycle_period_seconds if needed - previous_cps = old_opts.get('cycle_period_seconds') - new_cps = new_opts.get('cycle_period_seconds') - if previous_cps != new_cps: - self._helpers.change_relationship_cycleperiod(volume.name, - new_cps) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - def retype_iogrp_property(volume, new, old): - if new != old: - self._helpers.change_vdisk_iogrp(volume['name'], - self._state, (new, old)) - - LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' - 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], - 'new_type': new_type, - 'diff': diff, - 'host': host}) - - no_copy_keys = ['warning', 'autoexpand', 'easytier'] - copy_keys = ['rsize', 'grainsize', 'compression'] - all_keys = no_copy_keys + copy_keys - old_opts = self._get_vdisk_params(volume['volume_type_id'], - volume_metadata= - volume.get('volume_matadata')) - new_opts = self._get_vdisk_params(new_type['id'], - volume_type=new_type) - - vdisk_changes = [] - need_copy = False - change_mirror = False - - for key in all_keys: - if old_opts[key] != new_opts[key]: - if key in copy_keys: - need_copy = True - break - elif key in no_copy_keys: - vdisk_changes.append(key) - - if (utils.extract_host(volume['host'], 'pool') != - utils.extract_host(host['host'], 'pool')): - need_copy = True - - if old_opts['mirror_pool'] != new_opts['mirror_pool']: - change_mirror = True - - # Check if retype affects volume replication - model_update = None - new_rep_type = self._get_specs_replicated_type(new_type) - old_rep_type = self._get_volume_replicated_type(ctxt, volume) - old_io_grp = self._helpers.get_volume_io_group(volume['name']) - new_io_grp = self._helpers.select_io_group(self._state, new_opts) - - self._verify_retype_params(volume, new_opts, old_opts, need_copy, - change_mirror, new_rep_type, old_rep_type) - if need_copy: - self._check_volume_copy_ops() - dest_pool = self._helpers.can_migrate_to_host(host, self._state) - if dest_pool is None: - return False - - retype_iogrp_property(volume, - new_io_grp, old_io_grp) - try: - self.add_vdisk_copy(volume['name'], dest_pool, new_type, - auto_delete=True) - except exception.VolumeDriverException: - # roll back changing iogrp property - retype_iogrp_property(volume, old_io_grp, new_io_grp) - msg = (_('Unable to retype: A copy of volume %s exists. ' - 'Retyping would exceed the limit of 2 copies.'), - volume['id']) - raise exception.VolumeDriverException(message=msg) - else: - retype_iogrp_property(volume, new_io_grp, old_io_grp) - - self._helpers.change_vdisk_options(volume['name'], vdisk_changes, - new_opts, self._state) - if change_mirror: - copies = self._helpers.get_vdisk_copies(volume.name) - if not old_opts['mirror_pool'] and new_opts['mirror_pool']: - # retype from non mirror vol to mirror vol - self.add_vdisk_copy(volume['name'], - new_opts['mirror_pool'], new_type) - elif old_opts['mirror_pool'] and not new_opts['mirror_pool']: - # retype from mirror vol to non mirror vol - secondary = copies['secondary'] - if secondary: - self._helpers.rm_vdisk_copy( - volume.name, secondary['copy_id']) - else: - # migrate the second copy to another pool. - self._helpers.migratevdisk( - volume.name, new_opts['mirror_pool'], - copies['secondary']['copy_id']) - if new_opts['qos']: - # Add the new QoS setting to the volume. If the volume has an - # old QoS setting, it will be overwritten. - self._helpers.update_vdisk_qos(volume['name'], new_opts['qos']) - elif old_opts['qos']: - # If the old_opts contain QoS keys, disable them. - self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos']) - - # Delete replica if needed - if old_rep_type and not new_rep_type: - self._aux_backend_helpers.delete_rc_volume(volume['name'], - target_vol=True) - if storwize_const.GMCV == old_rep_type: - self._helpers.delete_vdisk( - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], - False) - model_update = {'replication_status': - fields.ReplicationStatus.DISABLED, - 'replication_driver_data': None, - 'replication_extended_status': None} - # Add replica if needed - if not old_rep_type and new_rep_type: - replica_obj = self._get_replica_obj(new_rep_type) - replica_obj.volume_replication_setup(ctxt, volume) - if storwize_const.GMCV == new_rep_type: - # Set cycle_period_seconds if needed - self._helpers.change_relationship_cycleperiod( - volume['name'], - new_opts.get('cycle_period_seconds')) - model_update = {'replication_status': - fields.ReplicationStatus.ENABLED} - - LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,' - 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], - 'new_type': new_type, - 'diff': diff, - 'host': host['host']}) - return True, model_update - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update from Storwize for migrated volume. - - This method should rename the back-end volume name(id) on the - destination host back to its original name(id) on the source host. - - :param ctxt: The context used to run the method update_migrated_volume - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - current_name = CONF.volume_name_template % new_volume['id'] - original_volume_name = CONF.volume_name_template % volume['id'] - try: - self._helpers.rename_vdisk(current_name, original_volume_name) - rep_type = self._get_volume_replicated_type(ctxt, new_volume) - if rep_type: - rel_info = self._helpers.get_relationship_info(current_name) - aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + - original_volume_name) - self._aux_backend_helpers.rename_vdisk( - rel_info['aux_vdisk_name'], aux_vol) - except exception.VolumeBackendAPIException: - LOG.error('Unable to rename the logical volume ' - 'for volume: %s', volume['id']) - return {'_name_id': new_volume['_name_id'] or new_volume['id']} - # If the back-end name(id) for the volume has been renamed, - # it is OK for the volume to keep the original name(id) and there is - # no need to use the column "_name_id" to establish the mapping - # relationship between the volume id and the back-end volume - # name(id). - # Set the key "_name_id" to None for a successful rename. - model_update = {'_name_id': None} - return model_update - - def manage_existing(self, volume, ref): - """Manages an existing vdisk. - - Renames the vdisk to match the expected name for the volume. - Error checking done by manage_existing_get_size is not repeated - - if we got here then we have a vdisk that isn't in use (or we don't - care if it is in use. - """ - # Check that the reference is valid - vdisk = self._manage_input_check(ref) - vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name']) - if vdisk_io_grp not in self._state['available_iogrps']: - msg = (_("Failed to manage existing volume due to " - "the volume to be managed is not in a valid " - "I/O group.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - # Add replication check - ctxt = context.get_admin_context() - rep_type = self._get_volume_replicated_type(ctxt, volume) - vol_rep_type = None - rel_info = self._helpers.get_relationship_info(vdisk['name']) - copies = self._helpers.get_vdisk_copies(vdisk['name']) - if rel_info: - vol_rep_type = ( - storwize_const.GMCV if - storwize_const.GMCV_MULTI == rel_info['cycling_mode'] - else rel_info['copy_type']) - - aux_info = self._aux_backend_helpers.get_system_info() - if rel_info['aux_cluster_id'] != aux_info['system_id']: - msg = (_("Failed to manage existing volume due to the aux " - "cluster for volume %(volume)s is %(aux_id)s. The " - "configured cluster id is %(cfg_id)s") % - {'volume': vdisk['name'], - 'aux_id': rel_info['aux_cluster_id'], - 'cfg_id': aux_info['system_id']}) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if vol_rep_type != rep_type: - msg = (_("Failed to manage existing volume due to " - "the replication type of the volume to be managed is " - "mismatch with the provided replication type.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - elif storwize_const.GMCV == rep_type: - if volume['volume_type_id']: - rep_opts = self._get_vdisk_params( - volume['volume_type_id'], - volume_metadata=volume.get('volume_metadata')) - # Check cycle_period_seconds - rep_cps = six.text_type(rep_opts.get('cycle_period_seconds')) - if rel_info['cycle_period_seconds'] != rep_cps: - msg = (_("Failed to manage existing volume due to " - "the cycle_period_seconds %(vol_cps)s of " - "the volume to be managed is mismatch with " - "cycle_period_seconds %(type_cps)s in " - "the provided gmcv replication type.") % - {'vol_cps': rel_info['cycle_period_seconds'], - 'type_cps': rep_cps}) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if volume['volume_type_id']: - opts = self._get_vdisk_params(volume['volume_type_id'], - volume_metadata= - volume.get('volume_metadata')) - resp = self._helpers.lsvdiskcopy(vdisk['name']) - expected_copy_num = 2 if opts['mirror_pool'] else 1 - if len(resp) != expected_copy_num: - msg = (_("Failed to manage existing volume due to mirror type " - "mismatch. Volume to be managed has %(resp_len)s " - "copies. mirror_pool of the chosen type is " - "%(mirror_pool)s.") % - {'resp_len': len(resp), - 'mirror_pool': opts['mirror_pool']}) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - if (opts['mirror_pool']and opts['mirror_pool'] != - copies['secondary']['mdisk_grp_name']): - msg = (_("Failed to manage existing volume due to mirror pool " - "mismatch. The secondary pool of the volume to be " - "managed is %(sec_copy_pool)s. mirror_pool of the " - "chosen type is %(mirror_pool)s.") % - {'sec_copy_pool': copies['secondary']['mdisk_grp_name'], - 'mirror_pool': opts['mirror_pool']}) - raise exception.ManageExistingVolumeTypeMismatch( - reason=msg) - - vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0') - if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1: - msg = (_("Failed to manage existing volume due to " - "the volume to be managed is thin, but " - "the volume type chosen is thick.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if not vdisk_copy['autoexpand'] and opts['rsize'] != -1: - msg = (_("Failed to manage existing volume due to " - "the volume to be managed is thick, but " - "the volume type chosen is thin.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if (vdisk_copy['compressed_copy'] == 'no' and - opts['compression']): - msg = (_("Failed to manage existing volume due to the " - "volume to be managed is not compress, but " - "the volume type chosen is compress.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if (vdisk_copy['compressed_copy'] == 'yes' and - not opts['compression']): - msg = (_("Failed to manage existing volume due to the " - "volume to be managed is compress, but " - "the volume type chosen is not compress.")) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - if (vdisk_io_grp not in - StorwizeHelpers._get_valid_requested_io_groups( - self._state, opts)): - msg = (_("Failed to manage existing volume due to " - "I/O group mismatch. The I/O group of the " - "volume to be managed is %(vdisk_iogrp)s. I/O group" - "of the chosen type is %(opt_iogrp)s.") % - {'vdisk_iogrp': vdisk['IO_group_name'], - 'opt_iogrp': opts['iogrp']}) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - pool = utils.extract_host(volume['host'], 'pool') - if copies['primary']['mdisk_grp_name'] != pool: - msg = (_("Failed to manage existing volume due to the " - "pool of the volume to be managed does not " - "match the backend pool. Pool of the " - "volume to be managed is %(vdisk_pool)s. Pool " - "of the backend is %(backend_pool)s.") % - {'vdisk_pool': copies['primary']['mdisk_grp_name'], - 'backend_pool': pool}) - raise exception.ManageExistingVolumeTypeMismatch(reason=msg) - - model_update = {} - self._helpers.rename_vdisk(vdisk['name'], volume['name']) - if vol_rep_type: - aux_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] - self._aux_backend_helpers.rename_vdisk(rel_info['aux_vdisk_name'], - aux_vol) - if storwize_const.GMCV == vol_rep_type: - self._helpers.rename_vdisk( - rel_info['master_change_vdisk_name'], - storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name']) - self._aux_backend_helpers.rename_vdisk( - rel_info['aux_change_vdisk_name'], - storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vol) - model_update = {'replication_status': - fields.ReplicationStatus.ENABLED} - return model_update - - def manage_existing_get_size(self, volume, ref): - """Return size of an existing Vdisk for manage_existing. - - existing_ref is a dictionary of the form: - {'source-id': } or - {'source-name': } - - Optional elements are: - 'manage_if_in_use': True/False (default is False) - If set to True, a volume will be managed even if it is currently - attached to a host system. - """ - - # Check that the reference is valid - vdisk = self._manage_input_check(ref) - - # Check if the disk is in use, if we need to. - manage_if_in_use = ref.get('manage_if_in_use', False) - if (not manage_if_in_use and - self._helpers.is_vdisk_in_use(vdisk['name'])): - reason = _('The specified vdisk is mapped to a host.') - raise exception.ManageExistingInvalidReference(existing_ref=ref, - reason=reason) - - return int(math.ceil(float(vdisk['capacity']) / units.Gi)) - - def unmanage(self, volume): - """Remove the specified volume from Cinder management.""" - pass - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If we haven't gotten stats yet or 'refresh' is True, - run update the stats first. - """ - if not self._stats or refresh: - self._update_volume_stats() - - return self._stats - - # Add CG capability to generic volume groups - def create_group(self, context, group): - """Creates a group. - - :param context: the context of the caller. - :param group: the group object. - :returns: model_update - """ - - LOG.debug("Creating group.") - model_update = {'status': fields.GroupStatus.AVAILABLE} - - for vol_type_id in group.volume_type_ids: - replication_type = self._get_volume_replicated_type( - context, None, vol_type_id) - if replication_type: - # An unsupported configuration - LOG.error('Unable to create group: create group with ' - 'replication volume type is not supported.') - model_update = {'status': fields.GroupStatus.ERROR} - return model_update - - if utils.is_group_a_cg_snapshot_type(group): - return {'status': fields.GroupStatus.AVAILABLE} - # we'll rely on the generic group implementation if it is not a - # consistency group request. - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - """Deletes a group. - - :param context: the context of the caller. - :param group: the group object. - :param volumes: a list of volume objects in the group. - :returns: model_update, volumes_model_update - """ - LOG.debug("Deleting group.") - if not utils.is_group_a_cg_snapshot_type(group): - # we'll rely on the generic group implementation if it is - # not a consistency group request. - raise NotImplementedError() - - model_update = {'status': fields.GroupStatus.DELETED} - volumes_model_update = [] - - for volume in volumes: - try: - self._helpers.delete_vdisk(volume['name'], True) - volumes_model_update.append( - {'id': volume['id'], 'status': 'deleted'}) - except exception.VolumeBackendAPIException as err: - model_update['status'] = ( - fields.GroupStatus.ERROR_DELETING) - LOG.error("Failed to delete the volume %(vol)s of CG. " - "Exception: %(exception)s.", - {'vol': volume['name'], 'exception': err}) - volumes_model_update.append( - {'id': volume['id'], - 'status': fields.GroupStatus.ERROR_DELETING}) - - return model_update, volumes_model_update - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - """Updates a group. - - :param context: the context of the caller. - :param group: the group object. - :param add_volumes: a list of volume objects to be added. - :param remove_volumes: a list of volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - """ - - LOG.debug("Updating group.") - if utils.is_group_a_cg_snapshot_type(group): - return None, None, None - - # we'll rely on the generic group implementation if it is not a - # consistency group request. - raise NotImplementedError() - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - LOG.debug('Enter: create_group_from_src.') - if not utils.is_group_a_cg_snapshot_type(group): - # we'll rely on the generic volume groups implementation if it is - # not a consistency group request. - raise NotImplementedError() - - if group_snapshot and snapshots: - cg_name = 'cg-' + group_snapshot.id - sources = snapshots - - elif source_group and source_vols: - cg_name = 'cg-' + source_group.id - sources = source_vols - - else: - error_msg = _("create_group_from_src must be creating from a " - "group snapshot, or a source group.") - raise exception.InvalidInput(reason=error_msg) - - LOG.debug('create_group_from_src: cg_name %(cg_name)s' - ' %(sources)s', {'cg_name': cg_name, 'sources': sources}) - self._helpers.create_fc_consistgrp(cg_name) - timeout = self.configuration.storwize_svc_flashcopy_timeout - model_update, snapshots_model = ( - self._helpers.create_cg_from_source(group, - cg_name, - sources, - volumes, - self._state, - self.configuration, - timeout)) - LOG.debug("Leave: create_group_from_src.") - return model_update, snapshots_model - - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - if not utils.is_group_a_cg_snapshot_type(group_snapshot): - # we'll rely on the generic group implementation if it is not a - # consistency group request. - raise NotImplementedError() - - # Use group_snapshot id as cg name - cg_name = 'cg_snap-' + group_snapshot.id - # Create new cg as cg_snapshot - self._helpers.create_fc_consistgrp(cg_name) - - timeout = self.configuration.storwize_svc_flashcopy_timeout - - model_update, snapshots_model = ( - self._helpers.run_consistgrp_snapshots(cg_name, - snapshots, - self._state, - self.configuration, - timeout)) - - return model_update, snapshots_model - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param context: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - - if not utils.is_group_a_cg_snapshot_type(group_snapshot): - # we'll rely on the generic group implementation if it is not a - # consistency group request. - raise NotImplementedError() - - cgsnapshot_id = group_snapshot.id - cg_name = 'cg_snap-' + cgsnapshot_id - - model_update, snapshots_model = ( - self._helpers.delete_consistgrp_snapshots(cg_name, - snapshots)) - - return model_update, snapshots_model - - def get_pool(self, volume): - attr = self._helpers.get_vdisk_attributes(volume['name']) - - if attr is None: - msg = (_('get_pool: Failed to get attributes for volume ' - '%s') % volume['name']) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - return attr['mdisk_grp_name'] - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats.") - data = {} - - data['vendor_name'] = 'IBM' - data['driver_version'] = self.VERSION - data['storage_protocol'] = self.protocol - data['pools'] = [] - - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = (backend_name or - self._state['system_name']) - - data['pools'] = [self._build_pool_stats(pool) - for pool in - self._get_backend_pools()] - if self._replica_enabled: - data['replication'] = self._replica_enabled - data['replication_enabled'] = self._replica_enabled - data['replication_targets'] = self._get_replication_targets() - self._stats = data - - def _build_pool_stats(self, pool): - """Build pool status""" - QoS_support = True - pool_stats = {} - try: - pool_data = self._helpers.get_pool_attrs(pool) - if pool_data: - easy_tier = pool_data['easy_tier'] in ['on', 'auto'] - total_capacity_gb = float(pool_data['capacity']) / units.Gi - free_capacity_gb = float(pool_data['free_capacity']) / units.Gi - allocated_capacity_gb = (float(pool_data['used_capacity']) / - units.Gi) - provisioned_capacity_gb = float( - pool_data['virtual_capacity']) / units.Gi - - rsize = self.configuration.safe_get( - 'storwize_svc_vol_rsize') - # rsize of -1 or 100 means fully allocate the mdisk - use_thick_provisioning = rsize == -1 or rsize == 100 - over_sub_ratio = self.configuration.safe_get( - 'max_over_subscription_ratio') - location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' % - {'sys_id': self._state['system_id'], - 'pool': pool_data['name']}) - multiattach = (self.configuration. - storwize_svc_multihostmap_enabled) - pool_stats = { - 'pool_name': pool_data['name'], - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'allocated_capacity_gb': allocated_capacity_gb, - 'provisioned_capacity_gb': provisioned_capacity_gb, - 'compression_support': self._state['compression_enabled'], - 'reserved_percentage': - self.configuration.reserved_percentage, - 'QoS_support': QoS_support, - 'consistencygroup_support': True, - 'location_info': location_info, - 'easytier_support': easy_tier, - 'multiattach': multiattach, - 'thin_provisioning_support': not use_thick_provisioning, - 'thick_provisioning_support': use_thick_provisioning, - 'max_over_subscription_ratio': over_sub_ratio, - 'consistent_group_snapshot_enabled': True, - } - if self._replica_enabled: - pool_stats.update({ - 'replication_enabled': self._replica_enabled, - 'replication_type': self._supported_replica_types, - 'replication_targets': self._get_replication_targets(), - 'replication_count': len(self._get_replication_targets()) - }) - - except exception.VolumeBackendAPIException: - msg = _('Failed getting details for pool %s.') % pool - raise exception.VolumeBackendAPIException(data=msg) - - return pool_stats - - def _get_replication_targets(self): - return [self._replica_target['backend_id']] - - def _manage_input_check(self, ref): - """Verify the input of manage function.""" - # Check that the reference is valid - if 'source-name' in ref: - manage_source = ref['source-name'] - vdisk = self._helpers.get_vdisk_attributes(manage_source) - elif 'source-id' in ref: - manage_source = ref['source-id'] - vdisk = self._helpers.vdisk_by_uid(manage_source) - else: - reason = _('Reference must contain source-id or ' - 'source-name element.') - raise exception.ManageExistingInvalidReference(existing_ref=ref, - reason=reason) - - if vdisk is None: - reason = (_('No vdisk with the UID specified by ref %s.') - % manage_source) - raise exception.ManageExistingInvalidReference(existing_ref=ref, - reason=reason) - return vdisk diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py deleted file mode 100644 index 350c9e5e2..000000000 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2015 IBM Corp. -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Volume FC driver for IBM Storwize family and SVC storage systems. - -Notes: -1. If you specify both a password and a key file, this driver will use the -key file only. -2. When using a key file for authentication, it is up to the user or -system administrator to store the private key in a safe manner. -3. The defaults for creating volumes are "-rsize 2% -autoexpand --grainsize 256 -warning 0". These can be changed in the configuration -file or by using volume types(recommended only for advanced users). - -Limitations: -1. The driver expects CLI output in English, error messages may be in a -localized format. -2. Clones and creating volumes from snapshots, where the source and target -are of different sizes, is not supported. - -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.ibm.storwize_svc import ( - storwize_svc_common as storwize_common) -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -storwize_svc_fc_opts = [ - cfg.BoolOpt('storwize_svc_multipath_enabled', - default=False, - help='Connect with multipath (FC only; iSCSI multipath is ' - 'controlled by Nova)'), -] - -CONF = cfg.CONF -CONF.register_opts(storwize_svc_fc_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): - """IBM Storwize V7000 and SVC FC volume driver. - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1 - FC support, create_cloned_volume, volume type support, - get_volume_stats, minor bug fixes - 1.2.0 - Added retype - 1.2.1 - Code refactor, improved exception handling - 1.2.2 - Fix bug #1274123 (races in host-related functions) - 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim - to lsfabric, clear unused data from connections, ensure - matching WWPNs by comparing lower case - 1.2.4 - Fix bug #1278035 (async migration/retype) - 1.2.5 - Added support for manage_existing (unmanage is inherited) - 1.2.6 - Added QoS support in terms of I/O throttling rate - 1.3.1 - Added support for volume replication - 1.3.2 - Added support for consistency group - 1.3.3 - Update driver to use ABC metaclasses - 2.0 - Code refactor, split init file and placed shared methods - for FC and iSCSI within the StorwizeSVCCommonDriver class - 2.0.1 - Added support for multiple pools with model update - 2.1 - Added replication V2 support to the global/metro mirror - mode - 2.1.1 - Update replication to version 2.1 - 2.2 - Add CG capability to generic volume groups - 2.2.1 - Add vdisk mirror/stretch cluster support - """ - - VERSION = "2.2.1" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_STORAGE_CI" - - def __init__(self, *args, **kwargs): - super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs) - self.protocol = 'FC' - self.configuration.append_config_values( - storwize_svc_fc_opts) - - def validate_connector(self, connector): - """Check connector for at least one enabled FC protocol.""" - if 'wwpns' not in connector: - LOG.error('The connector does not contain the required ' - 'information.') - raise exception.InvalidConnectorException( - missing='wwpns') - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Perform necessary work to make a FC connection.""" - @utils.synchronized('storwize-host' + self._state['system_id'] + - connector['host'], external=True) - def _do_initialize_connection_locked(): - return self._do_initialize_connection(volume, connector) - return _do_initialize_connection_locked() - - def _do_initialize_connection(self, volume, connector): - """Perform necessary work to make a FC connection. - - To be able to create an FC connection from a given host to a - volume, we must: - 1. Translate the given WWNN to a host name - 2. Create new host on the storage system if it does not yet exist - 3. Map the volume to the host if it is not already done - 4. Return the connection information for relevant nodes (in the - proper I/O group) - - """ - LOG.debug('enter: initialize_connection: volume %(vol)s with connector' - ' %(conn)s', {'vol': volume['id'], 'conn': connector}) - volume_name = self._get_target_vol(volume) - - # Check if a host object is defined for this host name - host_name = self._helpers.get_host_from_connector(connector) - if host_name is None: - # Host does not exist - add a new host to Storwize/SVC - host_name = self._helpers.create_host(connector) - - volume_attributes = self._helpers.get_vdisk_attributes(volume_name) - if volume_attributes is None: - msg = (_('initialize_connection: Failed to get attributes' - ' for volume %s.') % volume_name) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - multihostmap = self.configuration.storwize_svc_multihostmap_enabled - lun_id = self._helpers.map_vol_to_host(volume_name, host_name, - multihostmap) - try: - preferred_node = volume_attributes['preferred_node_id'] - IO_group = volume_attributes['IO_group_id'] - except KeyError as e: - LOG.error('Did not find expected column name in ' - 'lsvdisk: %s.', e) - raise exception.VolumeBackendAPIException( - data=_('initialize_connection: Missing volume attribute for ' - 'volume %s.') % volume_name) - - try: - # Get preferred node and other nodes in I/O group - preferred_node_entry = None - io_group_nodes = [] - for node in self._state['storage_nodes'].values(): - if node['id'] == preferred_node: - preferred_node_entry = node - if node['IO_group'] == IO_group: - io_group_nodes.append(node) - - if not len(io_group_nodes): - msg = (_('initialize_connection: No node found in ' - 'I/O group %(gid)s for volume %(vol)s.') % - {'gid': IO_group, 'vol': volume_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not preferred_node_entry: - # Get 1st node in I/O group - preferred_node_entry = io_group_nodes[0] - LOG.warning('initialize_connection: Did not find a ' - 'preferred node for volume %s.', volume_name) - - properties = {} - properties['target_discovered'] = False - properties['target_lun'] = lun_id - properties['volume_id'] = volume['id'] - - conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name) - - # If conn_wwpns is empty, then that means that there were - # no target ports with visibility to any of the initiators - # so we return all target ports. - if len(conn_wwpns) == 0: - for node in self._state['storage_nodes'].values(): - conn_wwpns.extend(node['WWPN']) - - properties['target_wwn'] = conn_wwpns - - i_t_map = self._make_initiator_target_map(connector['wwpns'], - conn_wwpns) - properties['initiator_target_map'] = i_t_map - - # specific for z/VM, refer to cinder bug 1323993 - if "zvm_fcp" in connector: - properties['zvm_fcp'] = connector['zvm_fcp'] - except Exception: - with excutils.save_and_reraise_exception(): - self._do_terminate_connection(volume, connector) - LOG.error('initialize_connection: Failed ' - 'to collect return ' - 'properties for volume %(vol)s and connector ' - '%(conn)s.\n', {'vol': volume, - 'conn': connector}) - - LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' - 'connector %(conn)s\n properties: %(prop)s', - {'vol': volume['id'], 'conn': connector, - 'prop': properties}) - - return {'driver_volume_type': 'fibre_channel', 'data': properties, } - - def _make_initiator_target_map(self, initiator_wwpns, target_wwpns): - """Build a simplistic all-to-all mapping.""" - i_t_map = {} - for i_wwpn in initiator_wwpns: - i_t_map[str(i_wwpn)] = [] - for t_wwpn in target_wwpns: - i_t_map[i_wwpn].append(t_wwpn) - - return i_t_map - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Cleanup after an FC connection has been terminated.""" - # If a fake connector is generated by nova when the host - # is down, then the connector will not have a host property, - # In this case construct the lock without the host property - # so that all the fake connectors to an SVC are serialized - host = connector['host'] if 'host' in connector else "" - - @utils.synchronized('storwize-host' + self._state['system_id'] + host, - external=True) - def _do_terminate_connection_locked(): - return self._do_terminate_connection(volume, connector, - **kwargs) - return _do_terminate_connection_locked() - - def _do_terminate_connection(self, volume, connector, **kwargs): - """Cleanup after an FC connection has been terminated. - - When we clean up a terminated connection between a given connector - and volume, we: - 1. Translate the given connector to a host name - 2. Remove the volume-to-host mapping if it exists - 3. Delete the host if it has no more mappings (hosts are created - automatically by this driver when mappings are created) - """ - LOG.debug('enter: terminate_connection: volume %(vol)s with connector' - ' %(conn)s', {'vol': volume['id'], 'conn': connector}) - vol_name = self._get_target_vol(volume) - info = {} - if 'host' in connector: - # get host according to FC protocol - connector = connector.copy() - - connector.pop('initiator', None) - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - host_name = self._helpers.get_host_from_connector( - connector, volume_name=vol_name) - if host_name is None: - msg = (_('terminate_connection: Failed to get host name from' - ' connector.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - # See bug #1244257 - host_name = None - - # Unmap volumes, if hostname is None, need to get value from vdiskmap - host_name = self._helpers.unmap_vol_from_host(vol_name, host_name) - - # Host_name could be none - if host_name: - resp = self._helpers.check_host_mapped_vols(host_name) - if not len(resp): - LOG.info("Need to remove FC Zone, building initiator " - "target map.") - # Build info data structure for zone removing - if 'wwpns' in connector and host_name: - target_wwpns = [] - # Returning all target_wwpns in storage_nodes, since - # we cannot determine which wwpns are logged in during - # a VM deletion. - for node in self._state['storage_nodes'].values(): - target_wwpns.extend(node['WWPN']) - init_targ_map = (self._make_initiator_target_map - (connector['wwpns'], - target_wwpns)) - info['data'] = {'initiator_target_map': init_targ_map} - # No volume mapped to the host, delete host from array - self._helpers.delete_host(host_name) - - LOG.debug('leave: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s', {'vol': volume['id'], - 'conn': connector}) - return info diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py deleted file mode 100644 index 1af2fb827..000000000 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2015 IBM Corp. -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -ISCSI volume driver for IBM Storwize family and SVC storage systems. - -Notes: -1. If you specify both a password and a key file, this driver will use the -key file only. -2. When using a key file for authentication, it is up to the user or -system administrator to store the private key in a safe manner. -3. The defaults for creating volumes are "-rsize 2% -autoexpand --grainsize 256 -warning 0". These can be changed in the configuration -file or by using volume types(recommended only for advanced users). - -Limitations: -1. The driver expects CLI output in English, error messages may be in a -localized format. -2. Clones and creating volumes from snapshots, where the source and target -are of different sizes, is not supported. - -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration as conf - -from cinder.volume.drivers.ibm.storwize_svc import ( - storwize_svc_common as storwize_common) - -LOG = logging.getLogger(__name__) - -storwize_svc_iscsi_opts = [ - cfg.BoolOpt('storwize_svc_iscsi_chap_enabled', - default=True, - help='Configure CHAP authentication for iSCSI connections ' - '(Default: Enabled)'), -] - -CONF = cfg.CONF -CONF.register_opts(storwize_svc_iscsi_opts, group=conf.SHARED_CONF_GROUP) - - -@interface.volumedriver -class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): - """IBM Storwize V7000 and SVC iSCSI volume driver. - - Version history: - - .. code-block:: none - - 1.0 - Initial driver - 1.1 - FC support, create_cloned_volume, volume type support, - get_volume_stats, minor bug fixes - 1.2.0 - Added retype - 1.2.1 - Code refactor, improved exception handling - 1.2.2 - Fix bug #1274123 (races in host-related functions) - 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim - to lsfabric, clear unused data from connections, ensure - matching WWPNs by comparing lower case - 1.2.4 - Fix bug #1278035 (async migration/retype) - 1.2.5 - Added support for manage_existing (unmanage is inherited) - 1.2.6 - Added QoS support in terms of I/O throttling rate - 1.3.1 - Added support for volume replication - 1.3.2 - Added support for consistency group - 1.3.3 - Update driver to use ABC metaclasses - 2.0 - Code refactor, split init file and placed shared methods - for FC and iSCSI within the StorwizeSVCCommonDriver class - 2.0.1 - Added support for multiple pools with model update - 2.1 - Added replication V2 support to the global/metro mirror - mode - 2.1.1 - Update replication to version 2.1 - 2.2 - Add CG capability to generic volume groups - 2.2.1 - Add vdisk mirror/stretch cluster support - """ - - VERSION = "2.2.1" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "IBM_STORAGE_CI" - - def __init__(self, *args, **kwargs): - super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs) - self.protocol = 'iSCSI' - self.configuration.append_config_values( - storwize_svc_iscsi_opts) - - def validate_connector(self, connector): - """Check connector for at least one enabled iSCSI protocol.""" - if 'initiator' not in connector: - LOG.error('The connector does not contain the required ' - 'information.') - raise exception.InvalidConnectorException( - missing='initiator') - - def initialize_connection(self, volume, connector): - """Perform necessary work to make an iSCSI connection.""" - @utils.synchronized('storwize-host' + self._state['system_id'] + - connector['host'], external=True) - def _do_initialize_connection_locked(): - return self._do_initialize_connection(volume, connector) - return _do_initialize_connection_locked() - - def _do_initialize_connection(self, volume, connector): - """Perform necessary work to make an iSCSI connection. - - To be able to create an iSCSI connection from a given host to a - volume, we must: - 1. Translate the given iSCSI name to a host name - 2. Create new host on the storage system if it does not yet exist - 3. Map the volume to the host if it is not already done - 4. Return the connection information for relevant nodes (in the - proper I/O group) - """ - LOG.debug('enter: initialize_connection: volume %(vol)s with connector' - ' %(conn)s', {'vol': volume['id'], 'conn': connector}) - volume_name = self._get_target_vol(volume) - - # Check if a host object is defined for this host name - host_name = self._helpers.get_host_from_connector(connector, - iscsi=True) - if host_name is None: - # Host does not exist - add a new host to Storwize/SVC - host_name = self._helpers.create_host(connector, iscsi=True) - - chap_secret = self._helpers.get_chap_secret_for_host(host_name) - chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled - if chap_enabled and chap_secret is None: - chap_secret = self._helpers.add_chap_secret_to_host(host_name) - elif not chap_enabled and chap_secret: - LOG.warning('CHAP secret exists for host but CHAP is disabled.') - - multihostmap = self.configuration.storwize_svc_multihostmap_enabled - lun_id = self._helpers.map_vol_to_host(volume_name, host_name, - multihostmap) - - try: - properties = self._get_single_iscsi_data(volume, connector, - lun_id, chap_secret) - multipath = connector.get('multipath', False) - if multipath: - properties = self._get_multi_iscsi_data(volume, connector, - lun_id, properties) - except Exception: - with excutils.save_and_reraise_exception(): - self._do_terminate_connection(volume, connector) - LOG.error('initialize_connection: Failed ' - 'to collect return ' - 'properties for volume %(vol)s and connector ' - '%(conn)s.\n', {'vol': volume, - 'conn': connector}) - - LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' - 'connector: %(conn)s\n properties: %(prop)s', - {'vol': volume['id'], 'conn': connector, - 'prop': properties}) - - return {'driver_volume_type': 'iscsi', 'data': properties, } - - def _get_single_iscsi_data(self, volume, connector, lun_id, chap_secret): - LOG.debug('enter: _get_single_iscsi_data: volume %(vol)s with ' - 'connector %(conn)s lun_id %(lun_id)s', - {'vol': volume['id'], 'conn': connector, - 'lun_id': lun_id}) - - volume_name = self._get_target_vol(volume) - volume_attributes = self._helpers.get_vdisk_attributes(volume_name) - if volume_attributes is None: - msg = (_('_get_single_iscsi_data: Failed to get attributes' - ' for volume %s.') % volume_name) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - try: - preferred_node = volume_attributes['preferred_node_id'] - IO_group = volume_attributes['IO_group_id'] - except KeyError as e: - msg = (_('_get_single_iscsi_data: Did not find expected column' - ' name in %(volume)s: %(key)s %(error)s.'), - {'volume': volume_name, 'key': e.args[0], - 'error': e}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Get preferred node and other nodes in I/O group - preferred_node_entry = None - io_group_nodes = [] - for node in self._state['storage_nodes'].values(): - if self.protocol not in node['enabled_protocols']: - continue - - if node['IO_group'] != IO_group: - continue - io_group_nodes.append(node) - if node['id'] == preferred_node: - preferred_node_entry = node - - if not len(io_group_nodes): - msg = (_('_get_single_iscsi_data: No node found in ' - 'I/O group %(gid)s for volume %(vol)s.') % { - 'gid': IO_group, 'vol': volume_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if not preferred_node_entry: - # Get 1st node in I/O group - preferred_node_entry = io_group_nodes[0] - LOG.warning('_get_single_iscsi_data: Did not find a ' - 'preferred node for volume %s.', volume_name) - - properties = { - 'target_discovered': False, - 'target_lun': lun_id, - 'volume_id': volume.id} - - if preferred_node_entry['ipv4']: - ipaddr = preferred_node_entry['ipv4'][0] - else: - ipaddr = preferred_node_entry['ipv6'][0] - properties['target_portal'] = '%s:%s' % (ipaddr, '3260') - properties['target_iqn'] = preferred_node_entry['iscsi_name'] - if chap_secret: - properties.update(auth_method='CHAP', - auth_username=connector['initiator'], - auth_password=chap_secret, - discovery_auth_method='CHAP', - discovery_auth_username=connector['initiator'], - discovery_auth_password=chap_secret) - LOG.debug('leave: _get_single_iscsi_data:\n volume: %(vol)s\n ' - 'connector: %(conn)s\n lun_id: %(lun_id)s\n ' - 'properties: %(prop)s', - {'vol': volume.id, 'conn': connector, 'lun_id': lun_id, - 'prop': properties}) - return properties - - def _get_multi_iscsi_data(self, volume, connector, lun_id, properties): - LOG.debug('enter: _get_multi_iscsi_data: volume %(vol)s with ' - 'connector %(conn)s lun_id %(lun_id)s', - {'vol': volume.id, 'conn': connector, - 'lun_id': lun_id}) - - try: - resp = self._helpers.ssh.lsportip() - except Exception as ex: - msg = (_('_get_multi_iscsi_data: Failed to ' - 'get port ip because of exception: ' - '%s.') % ex) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - properties['target_iqns'] = [] - properties['target_portals'] = [] - properties['target_luns'] = [] - for node in self._state['storage_nodes'].values(): - for ip_data in resp: - if ip_data['node_id'] != node['id']: - continue - link_state = ip_data.get('link_state', None) - valid_port = '' - if ((ip_data['state'] == 'configured' and - link_state == 'active') or - ip_data['state'] == 'online'): - valid_port = (ip_data['IP_address'] or - ip_data['IP_address_6']) - if valid_port: - properties['target_portals'].append( - '%s:%s' % (valid_port, '3260')) - properties['target_iqns'].append( - node['iscsi_name']) - properties['target_luns'].append(lun_id) - - if not len(properties['target_portals']): - msg = (_('_get_multi_iscsi_data: Failed to find valid port ' - 'for volume %s.') % volume.name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('leave: _get_multi_iscsi_data:\n volume: %(vol)s\n ' - 'connector: %(conn)s\n lun_id: %(lun_id)s\n ' - 'properties: %(prop)s', - {'vol': volume.id, 'conn': connector, 'lun_id': lun_id, - 'prop': properties}) - - return properties - - def terminate_connection(self, volume, connector, **kwargs): - """Cleanup after an iSCSI connection has been terminated.""" - # If a fake connector is generated by nova when the host - # is down, then the connector will not have a host property, - # In this case construct the lock without the host property - # so that all the fake connectors to an SVC are serialized - host = connector['host'] if 'host' in connector else "" - - @utils.synchronized('storwize-host' + self._state['system_id'] + host, - external=True) - def _do_terminate_connection_locked(): - return self._do_terminate_connection(volume, connector, - **kwargs) - return _do_terminate_connection_locked() - - def _do_terminate_connection(self, volume, connector, **kwargs): - """Cleanup after an iSCSI connection has been terminated. - - When we clean up a terminated connection between a given connector - and volume, we: - 1. Translate the given connector to a host name - 2. Remove the volume-to-host mapping if it exists - 3. Delete the host if it has no more mappings (hosts are created - automatically by this driver when mappings are created) - """ - LOG.debug('enter: terminate_connection: volume %(vol)s with connector' - ' %(conn)s', {'vol': volume['id'], 'conn': connector}) - vol_name = self._get_target_vol(volume) - - info = {} - if 'host' in connector: - # get host according to iSCSI protocol - info = {'driver_volume_type': 'iscsi', - 'data': {}} - host_name = self._helpers.get_host_from_connector(connector, - iscsi=True) - if host_name is None: - msg = (_('terminate_connection: Failed to get host name from' - ' connector.')) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - # See bug #1244257 - host_name = None - - # Unmap volumes, if hostname is None, need to get value from vdiskmap - host_name = self._helpers.unmap_vol_from_host(vol_name, host_name) - - # Host_name could be none - if host_name: - resp = self._helpers.check_host_mapped_vols(host_name) - if not len(resp): - self._helpers.delete_host(host_name) - - LOG.debug('leave: terminate_connection: volume %(vol)s with ' - 'connector %(conn)s', {'vol': volume['id'], - 'conn': connector}) - return info diff --git a/cinder/volume/drivers/infinidat.py b/cinder/volume/drivers/infinidat.py deleted file mode 100644 index a7fc334cd..000000000 --- a/cinder/volume/drivers/infinidat.py +++ /dev/null @@ -1,755 +0,0 @@ -# Copyright 2016 Infinidat Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -INFINIDAT InfiniBox Volume Driver -""" - -from contextlib import contextmanager -import functools -import platform -import socket - -import mock -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder import version -from cinder.volume import configuration -from cinder.volume.drivers.san import san -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -try: - # we check that infinisdk is installed. the other imported modules - # are dependencies, so if any of the dependencies are not importable - # we assume infinisdk is not installed - import capacity - from infi.dtypes import iqn - from infi.dtypes import wwn - import infinisdk -except ImportError: - capacity = None - infinisdk = None - iqn = None - wwn = None - - -LOG = logging.getLogger(__name__) - -VENDOR_NAME = 'INFINIDAT' -BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) -QOS_MAX_IOPS = 'maxIOPS' -QOS_MAX_BWS = 'maxBWS' - -infinidat_opts = [ - cfg.StrOpt('infinidat_pool_name', - help='Name of the pool from which volumes are allocated'), - # We can't use the existing "storage_protocol" option because its default - # is "iscsi", but for backward-compatibility our default must be "fc" - cfg.StrOpt('infinidat_storage_protocol', - ignore_case=True, - default='fc', - choices=['iscsi', 'fc'], - help='Protocol for transferring data between host and ' - 'storage back-end.'), - cfg.ListOpt('infinidat_iscsi_netspaces', - default=[], - help='List of names of network spaces to use for iSCSI ' - 'connectivity'), - cfg.BoolOpt('infinidat_use_compression', - default=False, - help='Specifies whether to turn on compression for newly ' - 'created volumes.'), -] - -CONF = cfg.CONF -CONF.register_opts(infinidat_opts, group=configuration.SHARED_CONF_GROUP) - - -def infinisdk_to_cinder_exceptions(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except infinisdk.core.exceptions.InfiniSDKException as ex: - # string formatting of 'ex' includes http code and url - msg = _('Caught exception from infinisdk: %s') % ex - LOG.exception(msg) - raise exception.VolumeBackendAPIException(data=msg) - return wrapper - - -@interface.volumedriver -class InfiniboxVolumeDriver(san.SanISCSIDriver): - VERSION = '1.5' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "INFINIDAT_Cinder_CI" - - def __init__(self, *args, **kwargs): - super(InfiniboxVolumeDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(infinidat_opts) - self._lookup_service = fczm_utils.create_lookup_service() - - def do_setup(self, context): - """Driver initialization""" - if infinisdk is None: - msg = _("Missing 'infinisdk' python module, ensure the library" - " is installed and available.") - raise exception.VolumeDriverException(message=msg) - auth = (self.configuration.san_login, - self.configuration.san_password) - self.management_address = self.configuration.san_ip - self._system = infinisdk.InfiniBox(self.management_address, auth=auth) - self._system.login() - backend_name = self.configuration.safe_get('volume_backend_name') - self._backend_name = backend_name or self.__class__.__name__ - self._volume_stats = None - if self.configuration.infinidat_storage_protocol.lower() == 'iscsi': - self._protocol = 'iSCSI' - if len(self.configuration.infinidat_iscsi_netspaces) == 0: - msg = _('No iSCSI network spaces configured') - raise exception.VolumeDriverException(message=msg) - else: - self._protocol = 'FC' - if (self.configuration.infinidat_use_compression and - not self._system.compat.has_compression()): - # InfiniBox systems support compression only from v3.0 and up - msg = _('InfiniBox system does not support volume compression.\n' - 'Compression is available on InfiniBox 3.0 onward.\n' - 'Please disable volume compression by setting ' - 'infinidat_use_compression to False in the Cinder ' - 'configuration file.') - raise exception.VolumeDriverException(message=msg) - LOG.debug('setup complete') - - def _make_volume_name(self, cinder_volume): - return 'openstack-vol-%s' % cinder_volume.id - - def _make_snapshot_name(self, cinder_snapshot): - return 'openstack-snap-%s' % cinder_snapshot.id - - def _make_host_name(self, port): - return 'openstack-host-%s' % str(port).replace(":", ".") - - def _make_cg_name(self, cinder_group): - return 'openstack-cg-%s' % cinder_group.id - - def _make_group_snapshot_name(self, cinder_group_snap): - return 'openstack-group-snap-%s' % cinder_group_snap.id - - def _set_cinder_object_metadata(self, infinidat_object, cinder_object): - data = dict(system="openstack", - openstack_version=version.version_info.release_string(), - cinder_id=cinder_object.id, - cinder_name=cinder_object.name) - infinidat_object.set_metadata_from_dict(data) - - def _set_host_metadata(self, infinidat_object): - data = dict(system="openstack", - openstack_version=version.version_info.release_string(), - hostname=socket.gethostname(), - platform=platform.platform()) - infinidat_object.set_metadata_from_dict(data) - - def _get_infinidat_volume_by_name(self, name): - volume = self._system.volumes.safe_get(name=name) - if volume is None: - msg = _('Volume "%s" not found') % name - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - return volume - - def _get_infinidat_snapshot_by_name(self, name): - snapshot = self._system.volumes.safe_get(name=name) - if snapshot is None: - msg = _('Snapshot "%s" not found') % name - LOG.error(msg) - raise exception.InvalidSnapshot(reason=msg) - return snapshot - - def _get_infinidat_volume(self, cinder_volume): - volume_name = self._make_volume_name(cinder_volume) - return self._get_infinidat_volume_by_name(volume_name) - - def _get_infinidat_snapshot(self, cinder_snapshot): - snap_name = self._make_snapshot_name(cinder_snapshot) - return self._get_infinidat_snapshot_by_name(snap_name) - - def _get_infinidat_pool(self): - pool_name = self.configuration.infinidat_pool_name - pool = self._system.pools.safe_get(name=pool_name) - if pool is None: - msg = _('Pool "%s" not found') % pool_name - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - return pool - - def _get_infinidat_cg(self, cinder_group): - group_name = self._make_cg_name(cinder_group) - infinidat_cg = self._system.cons_groups.safe_get(name=group_name) - if infinidat_cg is None: - msg = _('Consistency group "%s" not found') % group_name - LOG.error(msg) - raise exception.InvalidGroup(message=msg) - return infinidat_cg - - def _get_or_create_host(self, port): - host_name = self._make_host_name(port) - infinidat_host = self._system.hosts.safe_get(name=host_name) - if infinidat_host is None: - infinidat_host = self._system.hosts.create(name=host_name) - infinidat_host.add_port(port) - self._set_host_metadata(infinidat_host) - return infinidat_host - - def _get_mapping(self, host, volume): - existing_mapping = host.get_luns() - for mapping in existing_mapping: - if mapping.get_volume() == volume: - return mapping - - def _get_or_create_mapping(self, host, volume): - mapping = self._get_mapping(host, volume) - if mapping: - return mapping - # volume not mapped. map it - return host.map_volume(volume) - - def _get_backend_qos_specs(self, cinder_volume): - type_id = cinder_volume.volume_type_id - if type_id is None: - return None - qos_specs = volume_types.get_volume_type_qos_specs(type_id) - if qos_specs is None: - return None - qos_specs = qos_specs['qos_specs'] - if qos_specs is None: - return None - consumer = qos_specs['consumer'] - # Front end QoS specs are handled by nova. We ignore them here. - if consumer not in BACKEND_QOS_CONSUMERS: - return None - max_iops = qos_specs['specs'].get(QOS_MAX_IOPS) - max_bws = qos_specs['specs'].get(QOS_MAX_BWS) - if max_iops is None and max_bws is None: - return None - return { - 'id': qos_specs['id'], - QOS_MAX_IOPS: max_iops, - QOS_MAX_BWS: max_bws, - } - - def _get_or_create_qos_policy(self, qos_specs): - qos_policy = self._system.qos_policies.safe_get(name=qos_specs['id']) - if qos_policy is None: - qos_policy = self._system.qos_policies.create( - name=qos_specs['id'], - type="VOLUME", - max_ops=qos_specs[QOS_MAX_IOPS], - max_bps=qos_specs[QOS_MAX_BWS]) - return qos_policy - - def _set_qos(self, cinder_volume, infinidat_volume): - if (hasattr(self._system.compat, "has_qos") and - self._system.compat.has_qos()): - qos_specs = self._get_backend_qos_specs(cinder_volume) - if qos_specs: - policy = self._get_or_create_qos_policy(qos_specs) - policy.assign_entity(infinidat_volume) - - def _get_online_fc_ports(self): - nodes = self._system.components.nodes.get_all() - for node in nodes: - for port in node.get_fc_ports(): - if (port.get_link_state().lower() == 'up' and - port.get_state() == 'OK'): - yield str(port.get_wwpn()) - - def _initialize_connection_fc(self, volume, connector): - volume_name = self._make_volume_name(volume) - infinidat_volume = self._get_infinidat_volume_by_name(volume_name) - ports = [wwn.WWN(wwpn) for wwpn in connector['wwpns']] - for port in ports: - infinidat_host = self._get_or_create_host(port) - mapping = self._get_or_create_mapping(infinidat_host, - infinidat_volume) - lun = mapping.get_lun() - # Create initiator-target mapping. - target_wwpns = list(self._get_online_fc_ports()) - target_wwpns, init_target_map = self._build_initiator_target_map( - connector, target_wwpns) - return dict(driver_volume_type='fibre_channel', - data=dict(target_discovered=False, - target_wwn=target_wwpns, - target_lun=lun, - initiator_target_map=init_target_map)) - - def _get_iscsi_network_space(self, netspace_name): - netspace = self._system.network_spaces.safe_get( - service='ISCSI_SERVICE', - name=netspace_name) - if netspace is None: - msg = (_('Could not find iSCSI network space with name "%s"') % - netspace_name) - raise exception.VolumeDriverException(message=msg) - return netspace - - def _get_iscsi_portal(self, netspace): - for netpsace_interface in netspace.get_ips(): - if netpsace_interface.enabled: - port = netspace.get_properties().iscsi_tcp_port - return "%s:%s" % (netpsace_interface.ip_address, port) - # if we get here it means there are no enabled ports - msg = (_('No available interfaces in iSCSI network space %s') % - netspace.get_name()) - raise exception.VolumeDriverException(message=msg) - - def _initialize_connection_iscsi(self, volume, connector): - volume_name = self._make_volume_name(volume) - infinidat_volume = self._get_infinidat_volume_by_name(volume_name) - port = iqn.IQN(connector['initiator']) - infinidat_host = self._get_or_create_host(port) - if self.configuration.use_chap_auth: - chap_username = (self.configuration.chap_username or - vol_utils.generate_username()) - chap_password = (self.configuration.chap_password or - vol_utils.generate_password()) - infinidat_host.update_fields( - security_method='CHAP', - security_chap_inbound_username=chap_username, - security_chap_inbound_secret=chap_password) - mapping = self._get_or_create_mapping(infinidat_host, - infinidat_volume) - lun = mapping.get_lun() - netspace_names = self.configuration.infinidat_iscsi_netspaces - target_portals = [] - target_iqns = [] - target_luns = [] - for netspace_name in netspace_names: - netspace = self._get_iscsi_network_space(netspace_name) - target_portals.append(self._get_iscsi_portal(netspace)) - target_iqns.append(netspace.get_properties().iscsi_iqn) - target_luns.append(lun) - result_data = dict(target_discovered=True, - target_portal=target_portals[0], - target_iqn=target_iqns[0], - target_lun=target_luns[0]) - if len(target_portals) > 1: - # multiple network spaces defined - result_data.update(dict(target_portals=target_portals, - target_iqns=target_iqns, - target_luns=target_luns)) - if self.configuration.use_chap_auth: - result_data.update(dict(auth_method='CHAP', - auth_username=chap_username, - auth_password=chap_password)) - return dict(driver_volume_type='iscsi', - data=result_data) - - @fczm_utils.add_fc_zone - @infinisdk_to_cinder_exceptions - @coordination.synchronized('infinidat-{self.management_address}-lock') - def initialize_connection(self, volume, connector): - """Map an InfiniBox volume to the host""" - if self._protocol == 'FC': - return self._initialize_connection_fc(volume, connector) - else: - return self._initialize_connection_iscsi(volume, connector) - - @fczm_utils.remove_fc_zone - @infinisdk_to_cinder_exceptions - @coordination.synchronized('infinidat-{self.management_address}-lock') - def terminate_connection(self, volume, connector, **kwargs): - """Unmap an InfiniBox volume from the host""" - infinidat_volume = self._get_infinidat_volume(volume) - if self._protocol == 'FC': - volume_type = 'fibre_channel' - ports = [wwn.WWN(wwpn) for wwpn in connector['wwpns']] - else: - volume_type = 'iscsi' - ports = [iqn.IQN(connector['initiator'])] - result_data = dict() - for port in ports: - host_name = self._make_host_name(port) - host = self._system.hosts.safe_get(name=host_name) - if host is None: - # not found. ignore. - continue - # unmap - try: - host.unmap_volume(infinidat_volume) - except KeyError: - continue # volume mapping not found - # check if the host now doesn't have mappings - if host is not None and len(host.get_luns()) == 0: - host.safe_delete() - if self._protocol == 'FC': - # Create initiator-target mapping to delete host entry - target_wwpns = list(self._get_online_fc_ports()) - target_wwpns, target_map = self._build_initiator_target_map( - connector, target_wwpns) - result_data = dict(target_wwn=target_wwpns, - initiator_target_map=target_map) - return dict(driver_volume_type=volume_type, - data=result_data) - - @infinisdk_to_cinder_exceptions - def get_volume_stats(self, refresh=False): - if self._volume_stats is None or refresh: - pool = self._get_infinidat_pool() - free_capacity_bytes = (pool.get_free_physical_capacity() / - capacity.byte) - physical_capacity_bytes = (pool.get_physical_capacity() / - capacity.byte) - free_capacity_gb = float(free_capacity_bytes) / units.Gi - total_capacity_gb = float(physical_capacity_bytes) / units.Gi - qos_support = (hasattr(self._system.compat, "has_qos") and - self._system.compat.has_qos()) - self._volume_stats = dict(volume_backend_name=self._backend_name, - vendor_name=VENDOR_NAME, - driver_version=self.VERSION, - storage_protocol=self._protocol, - consistencygroup_support=False, - total_capacity_gb=total_capacity_gb, - free_capacity_gb=free_capacity_gb, - consistent_group_snapshot_enabled=True, - QoS_support=qos_support) - return self._volume_stats - - def _create_volume(self, volume): - pool = self._get_infinidat_pool() - volume_name = self._make_volume_name(volume) - provtype = "THIN" if self.configuration.san_thin_provision else "THICK" - size = volume.size * capacity.GiB - create_kwargs = dict(name=volume_name, - pool=pool, - provtype=provtype, - size=size) - if self._system.compat.has_compression(): - create_kwargs["compression_enabled"] = ( - self.configuration.infinidat_use_compression) - infinidat_volume = self._system.volumes.create(**create_kwargs) - self._set_qos(volume, infinidat_volume) - self._set_cinder_object_metadata(infinidat_volume, volume) - return infinidat_volume - - @infinisdk_to_cinder_exceptions - def create_volume(self, volume): - """Create a new volume on the backend.""" - # this is the same as _create_volume but without the return statement - self._create_volume(volume) - - @infinisdk_to_cinder_exceptions - def delete_volume(self, volume): - """Delete a volume from the backend.""" - volume_name = self._make_volume_name(volume) - try: - infinidat_volume = self._get_infinidat_volume_by_name(volume_name) - except exception.InvalidVolume: - return # volume not found - if infinidat_volume.has_children(): - # can't delete a volume that has a live snapshot - raise exception.VolumeIsBusy(volume_name=volume_name) - infinidat_volume.safe_delete() - - @infinisdk_to_cinder_exceptions - def extend_volume(self, volume, new_size): - """Extend the size of a volume.""" - volume = self._get_infinidat_volume(volume) - volume.resize(new_size * capacity.GiB) - - @infinisdk_to_cinder_exceptions - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - volume = self._get_infinidat_volume(snapshot.volume) - name = self._make_snapshot_name(snapshot) - infinidat_snapshot = volume.create_snapshot(name=name) - self._set_cinder_object_metadata(infinidat_snapshot, snapshot) - - @contextmanager - def _connection_context(self, volume): - use_multipath = self.configuration.use_multipath_for_image_xfer - enforce_multipath = self.configuration.enforce_multipath_for_image_xfer - connector = utils.brick_get_connector_properties(use_multipath, - enforce_multipath) - connection = self.initialize_connection(volume, connector) - try: - yield connection - finally: - self.terminate_connection(volume, connector) - - @contextmanager - def _attach_context(self, connection): - use_multipath = self.configuration.use_multipath_for_image_xfer - device_scan_attempts = self.configuration.num_volume_device_scan_tries - protocol = connection['driver_volume_type'] - connector = utils.brick_get_connector( - protocol, - use_multipath=use_multipath, - device_scan_attempts=device_scan_attempts, - conn=connection) - attach_info = None - try: - attach_info = self._connect_device(connection) - yield attach_info - except exception.DeviceUnavailable as exc: - attach_info = exc.kwargs.get('attach_info', None) - raise - finally: - if attach_info: - connector.disconnect_volume(attach_info['conn']['data'], - attach_info['device']) - - @contextmanager - def _device_connect_context(self, volume): - with self._connection_context(volume) as connection: - with self._attach_context(connection) as attach_info: - yield attach_info - - @infinisdk_to_cinder_exceptions - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot. - - InfiniBox does not yet support detached clone so use dd to copy data. - This could be a lengthy operation. - - - create a clone from snapshot and map it - - create a volume and map it - - copy data from clone to volume - - unmap volume and clone and delete the clone - """ - infinidat_snapshot = self._get_infinidat_snapshot(snapshot) - clone_name = self._make_volume_name(volume) + '-internal' - infinidat_clone = infinidat_snapshot.create_child(name=clone_name) - # we need a cinder-volume-like object to map the clone by name - # (which is derived from the cinder id) but the clone is internal - # so there is no such object. mock one - clone = mock.Mock(id=str(volume.id) + '-internal') - try: - infinidat_volume = self._create_volume(volume) - try: - src_ctx = self._device_connect_context(clone) - dst_ctx = self._device_connect_context(volume) - with src_ctx as src_dev, dst_ctx as dst_dev: - dd_block_size = self.configuration.volume_dd_blocksize - vol_utils.copy_volume(src_dev['device']['path'], - dst_dev['device']['path'], - snapshot.volume.size * units.Ki, - dd_block_size, - sparse=True) - except Exception: - infinidat_volume.delete() - raise - finally: - infinidat_clone.delete() - - @infinisdk_to_cinder_exceptions - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - try: - snapshot = self._get_infinidat_snapshot(snapshot) - except exception.InvalidSnapshot: - return # snapshot not found - snapshot.safe_delete() - - def _asssert_volume_not_mapped(self, volume): - # copy is not atomic so we can't clone while the volume is mapped - infinidat_volume = self._get_infinidat_volume(volume) - if len(infinidat_volume.get_logical_units()) == 0: - return - - # volume has mappings - msg = _("INFINIDAT Cinder driver does not support clone of an " - "attached volume. " - "To get this done, create a snapshot from the attached " - "volume and then create a volume from the snapshot.") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - @infinisdk_to_cinder_exceptions - def create_cloned_volume(self, volume, src_vref): - """Create a clone from source volume. - - InfiniBox does not yet support detached clone so use dd to copy data. - This could be a lengthy operation. - - * map source volume - * create and map new volume - * copy data from source to new volume - * unmap both volumes - """ - self._asssert_volume_not_mapped(src_vref) - infinidat_volume = self._create_volume(volume) - try: - src_ctx = self._device_connect_context(src_vref) - dst_ctx = self._device_connect_context(volume) - with src_ctx as src_dev, dst_ctx as dst_dev: - dd_block_size = self.configuration.volume_dd_blocksize - vol_utils.copy_volume(src_dev['device']['path'], - dst_dev['device']['path'], - src_vref.size * units.Ki, - dd_block_size, - sparse=True) - except Exception: - infinidat_volume.delete() - raise - - def _build_initiator_target_map(self, connector, all_target_wwns): - """Build the target_wwns and the initiator target map.""" - target_wwns = [] - init_targ_map = {} - - if self._lookup_service is not None: - # use FC san lookup. - dev_map = self._lookup_service.get_device_mapping_from_network( - connector.get('wwpns'), - all_target_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - target_wwns = list(set(target_wwns)) - else: - initiator_wwns = connector.get('wwpns', []) - target_wwns = all_target_wwns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map - - @infinisdk_to_cinder_exceptions - def create_group(self, context, group): - """Creates a group.""" - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - obj = self._system.cons_groups.create(name=self._make_cg_name(group), - pool=self._get_infinidat_pool()) - self._set_cinder_object_metadata(obj, group) - return {'status': fields.GroupStatus.AVAILABLE} - - @infinisdk_to_cinder_exceptions - def delete_group(self, context, group, volumes): - """Deletes a group.""" - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - try: - infinidat_cg = self._get_infinidat_cg(group) - except exception.InvalidGroup: - pass # group not found - else: - infinidat_cg.safe_delete() - for volume in volumes: - self.delete_volume(volume) - return None, None - - @infinisdk_to_cinder_exceptions - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group.""" - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - add_volumes = add_volumes if add_volumes else [] - remove_volumes = remove_volumes if remove_volumes else [] - infinidat_cg = self._get_infinidat_cg(group) - for vol in add_volumes: - infinidat_volume = self._get_infinidat_volume(vol) - infinidat_cg.add_member(infinidat_volume) - for vol in remove_volumes: - infinidat_volume = self._get_infinidat_volume(vol) - infinidat_cg.remove_member(infinidat_volume) - return None, None, None - - @infinisdk_to_cinder_exceptions - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source.""" - # The source is either group_snapshot+snapshots or - # source_group+source_vols. The target is group+voluems - # we assume the source (source_vols / snapshots) are in the same - # order as the target (volumes) - - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - self.create_group(context, group) - new_infinidat_group = self._get_infinidat_cg(group) - if group_snapshot is not None and snapshots is not None: - for volume, snapshot in zip(volumes, snapshots): - self.create_volume_from_snapshot(volume, snapshot) - new_infinidat_volume = self._get_infinidat_volume(volume) - new_infinidat_group.add_member(new_infinidat_volume) - elif source_group is not None and source_vols is not None: - for volume, src_vol in zip(volumes, source_vols): - self.create_cloned_volume(volume, src_vol) - new_infinidat_volume = self._get_infinidat_volume(volume) - new_infinidat_group.add_member(new_infinidat_volume) - return None, None - - @infinisdk_to_cinder_exceptions - def create_group_snapshot(self, context, group_snapshot, snapshots): - """Creates a group_snapshot.""" - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - infinidat_cg = self._get_infinidat_cg(group_snapshot.group) - group_snap_name = self._make_group_snapshot_name(group_snapshot) - new_group = infinidat_cg.create_snapshot(name=group_snap_name) - # update the names of the individual snapshots in the new snapgroup - # to match the names we use for cinder snapshots - for infinidat_snapshot in new_group.get_members(): - parent_name = infinidat_snapshot.get_parent().get_name() - for cinder_snapshot in snapshots: - if cinder_snapshot.volume_id in parent_name: - snapshot_name = self._make_snapshot_name(cinder_snapshot) - infinidat_snapshot.update_name(snapshot_name) - return None, None - - @infinisdk_to_cinder_exceptions - def delete_group_snapshot(self, context, group_snapshot, snapshots): - """Deletes a group_snapshot.""" - # let generic volume group support handle non-cgsnapshots - if not vol_utils.is_group_a_cg_snapshot_type(group_snapshot): - raise NotImplementedError() - cgsnap_name = self._make_group_snapshot_name(group_snapshot) - infinidat_cgsnap = self._system.cons_groups.safe_get(name=cgsnap_name) - if infinidat_cgsnap is not None: - if not infinidat_cgsnap.is_snapgroup(): - msg = _('Group "%s" is not a snapshot group') % cgsnap_name - LOG.error(msg) - raise exception.InvalidGroupSnapshot(message=msg) - infinidat_cgsnap.safe_delete() - for snapshot in snapshots: - self.delete_snapshot(snapshot) - return None, None diff --git a/cinder/volume/drivers/infortrend/__init__.py b/cinder/volume/drivers/infortrend/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py b/cinder/volume/drivers/infortrend/infortrend_fc_cli.py deleted file mode 100644 index 55b6e36e9..000000000 --- a/cinder/volume/drivers/infortrend/infortrend_fc_cli.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Fibre Channel Driver for Infortrend Eonstor based on CLI. -""" - - -from oslo_log import log as logging - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class InfortrendCLIFCDriver(driver.FibreChannelDriver): - - """Infortrend Fibre Channel Driver for Eonstor DS using CLI. - - Version history: - 1.0.0 - Initial driver - 1.0.1 - Support DS4000 - 1.0.2 - Support GS Series - """ - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Infortrend_Storage_CI" - VERSION = common_cli.InfortrendCommon.VERSION - - # TODO(smcginnis) Remove driver in Queens if CI issues not resolved - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs) - self.common = common_cli.InfortrendCommon( - 'FC', configuration=self.configuration) - self.VERSION = self.common.VERSION - - def check_for_setup_error(self): - LOG.debug('check_for_setup_error start') - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a volume. - - Can optionally return a Dictionary of changes - to the volume object to be persisted. - """ - LOG.debug('create_volume volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - return self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - LOG.debug( - 'create_volume_from_snapshot volume id=%(volume_id)s ' - 'snapshot id=%(snapshot_id)s', { - 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) - return self.common.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - LOG.debug( - 'create_cloned_volume volume id=%(volume_id)s ' - 'src_vref provider_location=%(provider_location)s', { - 'volume_id': volume['id'], - 'provider_location': src_vref['provider_location']}) - return self.common.create_cloned_volume(volume, src_vref) - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - LOG.debug( - 'extend_volume volume id=%(volume_id)s new size=%(size)s', { - 'volume_id': volume['id'], 'size': new_size}) - self.common.extend_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - LOG.debug('delete_volume volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - return self.common.delete_volume(volume) - - def migrate_volume(self, ctxt, volume, host): - """Migrate the volume to the specified host. - - Returns a boolean indicating whether the migration occurred, as well as - model_update. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { - 'volume_id': volume['id'], 'host': host['host']}) - return self.common.migrate_volume(volume, host) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - LOG.debug( - 'create_snapshot snapshot id=%(snapshot_id)s ' - 'volume id=%(volume_id)s', { - 'snapshot_id': snapshot['id'], - 'volume_id': snapshot['volume_id']}) - return self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.debug( - 'delete_snapshot snapshot id=%(snapshot_id)s ' - 'volume id=%(volume_id)s', { - 'snapshot_id': snapshot['id'], - 'volume_id': snapshot['volume_id']}) - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume. - - Can optionally return a Dictionary of changes - to the volume object to be persisted. - """ - LOG.debug( - 'create_export volume provider_location=%(provider_location)s', { - 'provider_location': volume['provider_location']}) - return self.common.create_export(context, volume) - - def remove_export(self, context, volume): - """Removes an export for a volume.""" - pass - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection information. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - The initiator_target_map is a map that represents the remote wwn(s) - and a list of wwns which are visible to the remote wwn(s). - Example return values: - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '1234567890123', - 'initiator_target_map': { - '1122334455667788': ['1234567890123'] - } - } - } - - or - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - 'initiator_target_map': { - '1122334455667788': ['1234567890123', - '0987654321321'] - } - } - } - """ - LOG.debug( - 'initialize_connection volume id=%(volume_id)s ' - 'connector initiator=%(initiator)s', { - 'volume_id': volume['id'], - 'initiator': connector['initiator']}) - return self.common.initialize_connection(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - LOG.debug('terminate_connection volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - return self.common.terminate_connection(volume, connector) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - LOG.debug('get_volume_stats refresh=%(refresh)s', { - 'refresh': refresh}) - return self.common.get_volume_stats(refresh) - - def manage_existing(self, volume, existing_ref): - """Manage an existing lun in the array. - - The lun should be in a manageable pool backend, otherwise - error would return. - Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - .. code-block:: default - - existing_ref:{ - 'id':lun_id - } - """ - LOG.debug( - 'manage_existing volume id=%(volume_id)s ' - 'existing_ref source id=%(source_id)s', { - 'volume_id': volume['id'], - 'source_id': existing_ref['source-id']}) - return self.common.manage_existing(volume, existing_ref) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - :param volume: Cinder volume to unmanage - """ - LOG.debug('unmanage volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - self.common.unmanage(volume) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - """ - LOG.debug( - 'manage_existing_get_size volume id=%(volume_id)s ' - 'existing_ref source id=%(source_id)s', { - 'volume_id': volume['id'], - 'source_id': existing_ref['source-id']}) - return self.common.manage_existing_get_size(volume, existing_ref) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug( - 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { - 'volume_id': volume['id'], 'type_id': new_type['id']}) - return self.common.retype(ctxt, volume, new_type, diff, host) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - LOG.debug( - 'update migrated volume original volume id= %(volume_id)s ' - 'new volume id=%(new_volume_id)s', { - 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) - return self.common.update_migrated_volume(ctxt, volume, new_volume, - original_volume_status) diff --git a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py b/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py deleted file mode 100644 index 246c452e9..000000000 --- a/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -iSCSI Driver for Infortrend Eonstor based on CLI. -""" - -from oslo_log import log as logging - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class InfortrendCLIISCSIDriver(driver.ISCSIDriver): - - """Infortrend iSCSI Driver for Eonstor DS using CLI. - - Version history: - 1.0.0 - Initial driver - 1.0.1 - Support DS4000 - 1.0.2 - Support GS Series - """ - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Infortrend_Storage_CI" - VERSION = common_cli.InfortrendCommon.VERSION - - # TODO(smcginnis) Remove driver in Queens if CI issues not resolved - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs) - self.common = common_cli.InfortrendCommon( - 'iSCSI', configuration=self.configuration) - self.VERSION = self.common.VERSION - - def check_for_setup_error(self): - LOG.debug('check_for_setup_error start') - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a volume. - - Can optionally return a Dictionary of changes - to the volume object to be persisted. - """ - LOG.debug('create_volume volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - return self.common.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - LOG.debug( - 'create_volume_from_snapshot volume id=%(volume_id)s ' - 'snapshot id=%(snapshot_id)s', { - 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) - return self.common.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - LOG.debug( - 'create_cloned_volume volume id=%(volume_id)s ' - 'src_vref provider_location=%(provider_location)s', { - 'volume_id': volume['id'], - 'provider_location': src_vref['provider_location']}) - return self.common.create_cloned_volume(volume, src_vref) - - def extend_volume(self, volume, new_size): - """Extend a volume.""" - LOG.debug( - 'extend_volume volume id=%(volume_id)s new size=%(size)s', { - 'volume_id': volume['id'], 'size': new_size}) - self.common.extend_volume(volume, new_size) - - def delete_volume(self, volume): - """Deletes a volume.""" - LOG.debug('delete_volume volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - return self.common.delete_volume(volume) - - def migrate_volume(self, ctxt, volume, host): - """Migrate the volume to the specified host. - - Returns a boolean indicating whether the migration occurred, as well as - model_update. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { - 'volume_id': volume['id'], 'host': host['host']}) - return self.common.migrate_volume(volume, host) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - LOG.debug( - 'create_snapshot snapshot id=%(snapshot_id)s ' - 'volume_id=%(volume_id)s', { - 'snapshot_id': snapshot['id'], - 'volume_id': snapshot['volume_id']}) - return self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.debug( - 'delete_snapshot snapshot id=%(snapshot_id)s ' - 'volume_id=%(volume_id)s', { - 'snapshot_id': snapshot['id'], - 'volume_id': snapshot['volume_id']}) - self.common.delete_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a volume.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume. - - Can optionally return a Dictionary of changes - to the volume object to be persisted. - """ - LOG.debug( - 'create_export volume provider_location=%(provider_location)s', { - 'provider_location': volume['provider_location']}) - return self.common.create_export(context, volume) - - def remove_export(self, context, volume): - """Removes an export for a volume.""" - pass - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection information. - - The iscsi driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value:: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': 1, - } - } - """ - LOG.debug( - 'initialize_connection volume id=%(volume_id)s ' - 'connector initiator=%(initiator)s', { - 'volume_id': volume['id'], - 'initiator': connector['initiator']}) - return self.common.initialize_connection(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - LOG.debug('terminate_connection volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - self.common.terminate_connection(volume, connector) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - LOG.debug('get_volume_stats refresh=%(refresh)s', { - 'refresh': refresh}) - return self.common.get_volume_stats(refresh) - - def manage_existing(self, volume, existing_ref): - """Manage an existing lun in the array. - - The lun should be in a manageable pool backend, otherwise - error would return. - Rename the backend storage object so that it matches the, - volume['name'] which is how drivers traditionally map between a - cinder volume and the associated backend storage object. - - .. code-block:: default - - existing_ref:{ - 'id':lun_id - } - """ - LOG.debug( - 'manage_existing volume id=%(volume_id)s ' - 'existing_ref source id=%(source_id)s', { - 'volume_id': volume['id'], - 'source_id': existing_ref['source-id']}) - return self.common.manage_existing(volume, existing_ref) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - :param volume: Cinder volume to unmanage - """ - LOG.debug('unmanage volume id=%(volume_id)s', { - 'volume_id': volume['id']}) - self.common.unmanage(volume) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - """ - LOG.debug( - 'manage_existing_get_size volume id=%(volume_id)s ' - 'existing_ref source id=%(source_id)s', { - 'volume_id': volume['id'], - 'source_id': existing_ref['source-id']}) - return self.common.manage_existing_get_size(volume, existing_ref) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug( - 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { - 'volume_id': volume['id'], 'type_id': new_type['id']}) - return self.common.retype(ctxt, volume, new_type, diff, host) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - LOG.debug( - 'update migrated volume original volume id= %(volume_id)s ' - 'new volume id=%(new_volume_id)s', { - 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) - return self.common.update_migrated_volume(ctxt, volume, new_volume, - original_volume_status) diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py b/cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py b/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py deleted file mode 100644 index 0ff4674b0..000000000 --- a/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py +++ /dev/null @@ -1,736 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Infortrend basic CLI factory. -""" - -import abc - -from oslo_concurrency import processutils -from oslo_log import log as logging -import six - -from cinder import utils - -LOG = logging.getLogger(__name__) - -DEFAULT_RETRY_TIME = 5 - - -def retry_cli(func): - def inner(self, *args, **kwargs): - total_retry_time = self.cli_retry_time - - if total_retry_time is None: - total_retry_time = DEFAULT_RETRY_TIME - - retry_time = 0 - while retry_time < total_retry_time: - rc, out = func(self, *args, **kwargs) - retry_time += 1 - - if rc == 0: - break - - LOG.error( - 'Retry %(retry)s times: %(method)s Failed ' - '%(rc)s: %(reason)s', { - 'retry': retry_time, - 'method': self.__class__.__name__, - 'rc': rc, - 'reason': out}) - LOG.debug( - 'Method: %(method)s Return Code: %(rc)s ' - 'Output: %(out)s', { - 'method': self.__class__.__name__, 'rc': rc, 'out': out}) - return rc, out - return inner - - -def util_execute(command_line): - content, err = utils.execute(command_line, shell=True) - return content - - -def strip_empty_in_list(list): - result = [] - for entry in list: - entry = entry.strip() - if entry != "": - result.append(entry) - - return result - - -def table_to_dict(table): - tableHeader = table[0].split(" ") - tableHeaderList = strip_empty_in_list(tableHeader) - - result = [] - - for i in range(len(table) - 2): - if table[i + 2].strip() == "": - break - - resultEntry = {} - tableEntry = table[i + 2].split(" ") - tableEntryList = strip_empty_in_list(tableEntry) - - for key, value in zip(tableHeaderList, tableEntryList): - resultEntry[key] = value - - result.append(resultEntry) - return result - - -def content_lines_to_dict(content_lines): - result = [] - resultEntry = {} - - for content_line in content_lines: - - if content_line.strip() == "": - result.append(resultEntry) - resultEntry = {} - continue - - split_entry = content_line.strip().split(": ", 1) - resultEntry[split_entry[0]] = split_entry[1] - - return result - - -@six.add_metaclass(abc.ABCMeta) -class BaseCommand(object): - - """The BaseCommand abstract class.""" - - def __init__(self): - super(BaseCommand, self).__init__() - - @abc.abstractmethod - def execute(self, *args, **kwargs): - pass - - -class ExecuteCommand(BaseCommand): - - """The Common ExecuteCommand.""" - - def __init__(self, cli_conf): - super(ExecuteCommand, self).__init__() - self.cli_retry_time = cli_conf.get('cli_retry_time') - - @retry_cli - def execute(self, *args, **kwargs): - result = None - rc = 0 - try: - result, err = utils.execute(*args, **kwargs) - except processutils.ProcessExecutionError as pe: - rc = pe.exit_code - result = pe.stdout - result = result.replace('\n', '\\n') - LOG.error( - 'Error on execute command. ' - 'Error code: %(exit_code)d Error msg: %(result)s', { - 'exit_code': pe.exit_code, 'result': result}) - return rc, result - - -class CLIBaseCommand(BaseCommand): - - """The CLIBaseCommand class.""" - - def __init__(self, cli_conf): - super(CLIBaseCommand, self).__init__() - self.java = "java -jar" - self.execute_file = cli_conf.get('path') - self.ip = cli_conf.get('ip') - self.password = cli_conf.get('password') - self.cli_retry_time = cli_conf.get('cli_retry_time') - self.command = "" - self.parameters = () - self.command_line = "" - - def _generate_command(self, parameters): - """Generate execute Command. use java, execute, command, parameters.""" - self.parameters = parameters - parameters_line = ' '.join(parameters) - - if self.password: - parameters_line = 'password=%s %s' % ( - self.password, parameters_line) - - self.command_line = "{0} {1} {2} {3} {4}".format( - self.java, - self.execute_file, - self.ip, - self.command, - parameters_line) - - return self.command_line - - def _parser(self, content=None): - """The parser to parse command result. - - :param content: The parse Content - :returns: parse result - """ - content = content.replace("\r", "") - content = content.replace("\\/-", "") - content = content.strip() - LOG.debug(content) - - if content is not None: - content_lines = content.split("\n") - rc, out = self._parse_return(content_lines) - - if rc != 0: - return rc, out - else: - return rc, content_lines - - return -1, None - - @retry_cli - def execute(self, *args, **kwargs): - command_line = self._generate_command(args) - LOG.debug('Executing: %(command)s', {'command': command_line}) - rc = 0 - result = None - try: - content = self._execute(command_line) - rc, result = self._parser(content) - except processutils.ProcessExecutionError as pe: - rc = -2 # prevent confusing with cli real rc - result = pe.stdout - result = result.replace('\n', '\\n') - LOG.error( - 'Error on execute %(command)s. ' - 'Error code: %(exit_code)d Error msg: %(result)s', { - 'command': command_line, - 'exit_code': pe.exit_code, - 'result': result}) - return rc, result - - def _execute(self, command_line): - return util_execute(command_line) - - def set_ip(self, ip): - """Set the Raid's ip.""" - self.ip = ip - - def _parse_return(self, content_lines): - """Get the end of command line result.""" - rc = 0 - return_value = content_lines[-1].strip().split(' ', 1)[1] - return_cli_result = content_lines[-2].strip().split(' ', 1)[1] - - rc = int(return_value, 16) - - return rc, return_cli_result - - -class CreateLD(CLIBaseCommand): - - """The Create LD Command.""" - - def __init__(self, *args, **kwargs): - super(CreateLD, self).__init__(*args, **kwargs) - self.command = "create ld" - - -class CreateLV(CLIBaseCommand): - - """The Create LV Command.""" - - def __init__(self, *args, **kwargs): - super(CreateLV, self).__init__(*args, **kwargs) - self.command = "create lv" - - -class CreatePartition(CLIBaseCommand): - - """Create Partition. - - create part [LV-ID] [name] [size={partition-size}] - [min={minimal-reserve-size}] [init={switch}] - [tier={tier-level-list}] - """ - - def __init__(self, *args, **kwargs): - super(CreatePartition, self).__init__(*args, **kwargs) - self.command = "create part" - - -class DeletePartition(CLIBaseCommand): - - """Delete Partition. - - delete part [partition-ID] [-y] - """ - - def __init__(self, *args, **kwargs): - super(DeletePartition, self).__init__(*args, **kwargs) - self.command = "delete part" - - -class SetPartition(CLIBaseCommand): - - """Set Partition. - - .. code-block:: bash - - set part [partition-ID] [name={partition-name}] - [min={minimal-reserve-size}] - set part expand [partition-ID] [size={expand-size}] - set part purge [partition-ID] [number] [rule-type] - set part reclaim [partition-ID] - """ - - def __init__(self, *args, **kwargs): - super(SetPartition, self).__init__(*args, **kwargs) - self.command = "set part" - - -class CreateMap(CLIBaseCommand): - - """Map the Partition on the channel. - - create map [part] [partition-ID] [Channel-ID] - [Target-ID] [LUN-ID] [assign={assign-to}] - """ - - def __init__(self, *args, **kwargs): - super(CreateMap, self).__init__(*args, **kwargs) - self.command = "create map" - - -class DeleteMap(CLIBaseCommand): - - """Unmap the Partition on the channel. - - delete map [part] [partition-ID] [Channel-ID] - [Target-ID] [LUN-ID] [-y] - """ - - def __init__(self, *args, **kwargs): - super(DeleteMap, self).__init__(*args, **kwargs) - self.command = "delete map" - - -class CreateSnapshot(CLIBaseCommand): - - """Create partition's Snapshot. - - create si [part] [partition-ID] - """ - - def __init__(self, *args, **kwargs): - super(CreateSnapshot, self).__init__(*args, **kwargs) - self.command = "create si" - - -class DeleteSnapshot(CLIBaseCommand): - - """Delete partition's Snapshot. - - delete si [snapshot-image-ID] [-y] - """ - - def __init__(self, *args, **kwargs): - super(DeleteSnapshot, self).__init__(*args, **kwargs) - self.command = "delete si" - - -class CreateReplica(CLIBaseCommand): - - """Create partition or snapshot's replica. - - create replica [name] [part | si] [source-volume-ID] - [part] [target-volume-ID] [type={replication-mode}] - [priority={level}] [desc={description}] - [incremental={switch}] [timeout={value}] - [compression={switch}] - """ - - def __init__(self, *args, **kwargs): - super(CreateReplica, self).__init__(*args, **kwargs) - self.command = "create replica" - - -class DeleteReplica(CLIBaseCommand): - - """Delete and terminate specific replication job. - - delete replica [volume-pair-ID] [-y] - """ - - def __init__(self, *args, **kwargs): - super(DeleteReplica, self).__init__(*args, **kwargs) - self.command = "delete replica" - - -class CreateIQN(CLIBaseCommand): - - """Create host iqn for CHAP or lun filter. - - create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}] - [target={name}] [target-password={secret}] [ip={ip-address}] - [mask={netmask-ip}] - """ - - def __init__(self, *args, **kwargs): - super(CreateIQN, self).__init__(*args, **kwargs) - self.command = "create iqn" - - -class DeleteIQN(CLIBaseCommand): - - """Delete host iqn by name. - - delete iqn [name] - """ - - def __init__(self, *args, **kwargs): - super(DeleteIQN, self).__init__(*args, **kwargs) - self.command = "delete iqn" - - -class ShowCommand(CLIBaseCommand): - - """Basic Show Command.""" - - def __init__(self, *args, **kwargs): - super(ShowCommand, self).__init__(*args, **kwargs) - self.param_detail = "-l" - self.default_type = "table" - self.start_key = "" - - def _parser(self, content=None): - """Parse Table or Detail format into dict. - - # Table format - - ID Name LD-amount - ---------------------- - 123 LV-1 1 - - # Result - - { - 'ID': '123', - 'Name': 'LV-1', - 'LD-amount': '1' - } - - # Detail format - - ID: 5DE94FF775D81C30 - Name: LV-1 - LD-amount: 1 - - # Result - - { - 'ID': '123', - 'Name': 'LV-1', - 'LD-amount': '1' - } - - :param content: The parse Content. - :returns: parse result - """ - rc, out = super(ShowCommand, self)._parser(content) - - # Error. - if rc != 0: - return rc, out - - # No content. - if len(out) < 6: - return rc, [] - - detect_type = self.detect_type() - - # Show detail content. - if detect_type == "list": - - start_id = self.detect_detail_start_index(out) - if start_id < 0: - return rc, [] - - result = content_lines_to_dict(out[start_id:-2]) - else: - - start_id = self.detect_table_start_index(out) - if start_id < 0: - return rc, [] - - result = table_to_dict(out[start_id:-3]) - - return rc, result - - def detect_type(self): - if self.param_detail in self.parameters: - detect_type = "list" - else: - detect_type = self.default_type - return detect_type - - def detect_table_start_index(self, content): - for i in range(3, len(content)): - key = content[i].strip().split(' ') - if self.start_key in key[0].strip(): - return i - - return -1 - - def detect_detail_start_index(self, content): - for i in range(3, len(content)): - split_entry = content[i].strip().split(' ') - if len(split_entry) >= 2 and ':' in split_entry[0]: - return i - - return -1 - - -class ShowLD(ShowCommand): - - """Show LD. - - show ld [index-list] - """ - - def __init__(self, *args, **kwargs): - super(ShowLD, self).__init__(*args, **kwargs) - self.command = "show ld" - - -class ShowLV(ShowCommand): - - """Show LV. - - show lv [lv={LV-IDs}] [-l] - """ - - def __init__(self, *args, **kwargs): - super(ShowLV, self).__init__(*args, **kwargs) - self.command = "show lv" - self.start_key = "ID" - - def detect_table_start_index(self, content): - if "tier" in self.parameters: - self.start_key = "LV-Name" - - for i in range(3, len(content)): - key = content[i].strip().split(' ') - if self.start_key in key[0].strip(): - return i - - return -1 - - -class ShowPartition(ShowCommand): - - """Show Partition. - - show part [part={partition-IDs} | lv={LV-IDs}] [-l] - """ - - def __init__(self, *args, **kwargs): - super(ShowPartition, self).__init__(*args, **kwargs) - self.command = "show part" - self.start_key = "ID" - - -class ShowSnapshot(ShowCommand): - - """Show Snapshot. - - show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l] - """ - - def __init__(self, *args, **kwargs): - super(ShowSnapshot, self).__init__(*args, **kwargs) - self.command = "show si" - self.start_key = "Index" - - -class ShowDevice(ShowCommand): - - """Show Device. - - show device - """ - - def __init__(self, *args, **kwargs): - super(ShowDevice, self).__init__(*args, **kwargs) - self.command = "show device" - self.start_key = "Index" - - -class ShowChannel(ShowCommand): - - """Show Channel. - - show channel - """ - - def __init__(self, *args, **kwargs): - super(ShowChannel, self).__init__(*args, **kwargs) - self.command = "show channel" - self.start_key = "Ch" - - -class ShowDisk(ShowCommand): - - """The Show Disk Command. - - show disk [disk-index-list | channel={ch}] - """ - - def __init__(self, *args, **kwargs): - super(ShowDisk, self).__init__(*args, **kwargs) - self.command = "show disk" - - -class ShowMap(ShowCommand): - - """Show Map. - - show map [part={partition-IDs} | channel={channel-IDs}] [-l] - """ - - def __init__(self, *args, **kwargs): - super(ShowMap, self).__init__(*args, **kwargs) - self.command = "show map" - self.start_key = "Ch" - - -class ShowNet(ShowCommand): - - """Show IP network. - - show net [id={channel-IDs}] [-l] - """ - - def __init__(self, *args, **kwargs): - super(ShowNet, self).__init__(*args, **kwargs) - self.command = "show net" - self.start_key = "ID" - - -class ShowLicense(ShowCommand): - - """Show License. - - show license - """ - - def __init__(self, *args, **kwargs): - super(ShowLicense, self).__init__(*args, **kwargs) - self.command = "show license" - self.start_key = "License" - - def _parser(self, content=None): - """Parse License format. - - # License format - - License Amount(Partition/Subsystem) Expired - ------------------------------------------------ - EonPath --- True - - # Result - - { - 'EonPath': { - 'Amount': '---', - 'Support': True - } - } - - :param content: The parse Content. - :returns: parse result - """ - rc, out = super(ShowLicense, self)._parser(content) - - if rc != 0: - return rc, out - - if len(out) > 0: - result = {} - for entry in out: - if entry['Expired'] == '---' or entry['Expired'] == 'Expired': - support = False - else: - support = True - result[entry['License']] = { - 'Amount': - entry['Amount(Partition/Subsystem)'], - 'Support': support - } - return rc, result - - return rc, [] - - -class ShowReplica(ShowCommand): - - """Show information of all replication jobs or specific job. - - show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs} - """ - - def __init__(self, *args, **kwargs): - super(ShowReplica, self).__init__(*args, **kwargs) - self.command = 'show replica' - - -class ShowWWN(ShowCommand): - - """Show Fibre network. - - show wwn - """ - - def __init__(self, *args, **kwargs): - super(ShowWWN, self).__init__(*args, **kwargs) - self.command = "show wwn" - self.start_key = "CH" - - -class ShowIQN(ShowCommand): - - """Show iSCSI initiator IQN which is set by create iqn. - - show iqn - """ - - LIST_START_LINE = "List of initiator IQN(s):" - - def __init__(self, *args, **kwargs): - super(ShowIQN, self).__init__(*args, **kwargs) - self.command = "show iqn" - self.default_type = "list" - - def detect_detail_start_index(self, content): - for i in range(3, len(content)): - if content[i].strip() == self.LIST_START_LINE: - return i + 2 - - return -1 diff --git a/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py b/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py deleted file mode 100644 index 92bdd2a18..000000000 --- a/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py +++ /dev/null @@ -1,1951 +0,0 @@ -# Copyright (c) 2015 Infortrend Technology, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Infortrend Common CLI. -""" -import math -import time - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import timeutils -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration as conf -from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli -from cinder.volume.drivers.san import san -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - -infortrend_esds_opts = [ - cfg.StrOpt('infortrend_pools_name', - default='', - help='Infortrend raid pool name list. ' - 'It is separated with comma.'), - cfg.StrOpt('infortrend_cli_path', - default='/opt/bin/Infortrend/raidcmd_ESDS10.jar', - help='The Infortrend CLI absolute path. ' - 'By default, it is at ' - '/opt/bin/Infortrend/raidcmd_ESDS10.jar'), - cfg.IntOpt('infortrend_cli_max_retries', - default=5, - help='Maximum retry time for cli. Default is 5.'), - cfg.IntOpt('infortrend_cli_timeout', - default=30, - help='Default timeout for CLI copy operations in minutes. ' - 'Support: migrate volume, create cloned volume and ' - 'create volume from snapshot. ' - 'By Default, it is 30 minutes.'), - cfg.StrOpt('infortrend_slots_a_channels_id', - default='0,1,2,3,4,5,6,7', - help='Infortrend raid channel ID list on Slot A ' - 'for OpenStack usage. It is separated with comma. ' - 'By default, it is the channel 0~7.'), - cfg.StrOpt('infortrend_slots_b_channels_id', - default='0,1,2,3,4,5,6,7', - help='Infortrend raid channel ID list on Slot B ' - 'for OpenStack usage. It is separated with comma. ' - 'By default, it is the channel 0~7.'), -] - -infortrend_esds_extra_opts = [ - cfg.StrOpt('infortrend_provisioning', - default='full', - help='Let the volume use specific provisioning. ' - 'By default, it is the full provisioning. ' - 'The supported options are full or thin.'), - cfg.StrOpt('infortrend_tiering', - default='0', - help='Let the volume use specific tiering level. ' - 'By default, it is the level 0. ' - 'The supported levels are 0,2,3,4.'), -] - -CONF = cfg.CONF -CONF.register_opts(infortrend_esds_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(infortrend_esds_extra_opts, group=conf.SHARED_CONF_GROUP) - -CLI_RC_FILTER = { - 'CreatePartition': {'error': _('Failed to create partition.')}, - 'DeletePartition': {'error': _('Failed to delete partition.')}, - 'SetPartition': {'error': _('Failed to set partition.')}, - 'CreateMap': { - 'warning': {20: 'The MCS Channel is grouped.'}, - 'error': _('Failed to create map.'), - }, - 'DeleteMap': { - 'warning': {11: 'No mapping.'}, - 'error': _('Failed to delete map.'), - }, - 'CreateSnapshot': {'error': _('Failed to create snapshot.')}, - 'DeleteSnapshot': {'error': _('Failed to delete snapshot.')}, - 'CreateReplica': {'error': _('Failed to create replica.')}, - 'DeleteReplica': {'error': _('Failed to delete replica.')}, - 'CreateIQN': { - 'warning': {20: 'IQN already existed.'}, - 'error': _('Failed to create iqn.'), - }, - 'DeleteIQN': { - 'warning': { - 20: 'IQN has been used to create map.', - 11: 'No such host alias name.', - }, - 'error': _('Failed to delete iqn.'), - }, - 'ShowLV': {'error': _('Failed to get lv info.')}, - 'ShowPartition': {'error': _('Failed to get partition info.')}, - 'ShowSnapshot': {'error': _('Failed to get snapshot info.')}, - 'ShowDevice': {'error': _('Failed to get device info.')}, - 'ShowChannel': {'error': _('Failed to get channel info.')}, - 'ShowMap': {'error': _('Failed to get map info.')}, - 'ShowNet': {'error': _('Failed to get network info.')}, - 'ShowLicense': {'error': _('Failed to get license info.')}, - 'ShowReplica': {'error': _('Failed to get replica info.')}, - 'ShowWWN': {'error': _('Failed to get wwn info.')}, - 'ShowIQN': {'error': _('Failed to get iqn info.')}, - 'ExecuteCommand': {'error': _('Failed to execute common command.')}, -} - - -def log_func(func): - def inner(self, *args, **kwargs): - LOG.debug('Entering: %(method)s', {'method': func.__name__}) - start = timeutils.utcnow() - ret = func(self, *args, **kwargs) - end = timeutils.utcnow() - LOG.debug( - 'Leaving: %(method)s, ' - 'Spent: %(time)s sec, ' - 'Return: %(ret)s.', { - 'method': func.__name__, - 'time': timeutils.delta_seconds(start, end), - 'ret': ret}) - return ret - return inner - - -def mi_to_gi(mi_size): - return mi_size * units.Mi / units.Gi - - -def gi_to_mi(gi_size): - return gi_size * units.Gi / units.Mi - - -class InfortrendCommon(object): - - """The Infortrend's Common Command using CLI. - - Version history: - 1.0.0 - Initial driver - 1.0.1 - Support DS4000 - 1.0.2 - Support GS Series - """ - - VERSION = '1.0.2' - - constants = { - 'ISCSI_PORT': 3260, - 'MAX_LUN_MAP_PER_CHL': 128 - } - - provisioning_values = ['thin', 'full'] - - tiering_values = ['0', '2', '3', '4'] - - def __init__(self, protocol, configuration=None): - - self.protocol = protocol - self.configuration = configuration - self.configuration.append_config_values(san.san_opts) - self.configuration.append_config_values(infortrend_esds_opts) - self.configuration.append_config_values(infortrend_esds_extra_opts) - - self.iscsi_multipath = self.configuration.use_multipath_for_image_xfer - self.path = self.configuration.infortrend_cli_path - self.password = self.configuration.san_password - self.ip = self.configuration.san_ip - self.cli_retry_time = self.configuration.infortrend_cli_max_retries - self.cli_timeout = self.configuration.infortrend_cli_timeout * 60 - self.iqn = 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s' - self.unmanaged_prefix = 'cinder-unmanaged-%s' - - if self.ip == '': - msg = _('san_ip is not set.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - self.fc_lookup_service = fczm_utils.create_lookup_service() - - self._volume_stats = None - self._model_type = 'R' - self._replica_timeout = self.cli_timeout - - self.map_dict = { - 'slot_a': {}, - 'slot_b': {}, - } - self.map_dict_init = False - - self.target_dict = { - 'slot_a': {}, - 'slot_b': {}, - } - - if self.protocol == 'iSCSI': - self.mcs_dict = { - 'slot_a': {}, - 'slot_b': {}, - } - - self._init_pool_list() - self._init_channel_list() - - self.cli_conf = { - 'path': self.path, - 'password': self.password, - 'ip': self.ip, - 'cli_retry_time': int(self.cli_retry_time), - } - - def _init_pool_list(self): - pools_name = self.configuration.infortrend_pools_name - if pools_name == '': - msg = _('Pools name is not set.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - tmp_pool_list = pools_name.split(',') - self.pool_list = [pool.strip() for pool in tmp_pool_list] - - def _init_channel_list(self): - self.channel_list = { - 'slot_a': [], - 'slot_b': [], - } - tmp_channel_list = ( - self.configuration.infortrend_slots_a_channels_id.split(',') - ) - self.channel_list['slot_a'] = ( - [channel.strip() for channel in tmp_channel_list] - ) - tmp_channel_list = ( - self.configuration.infortrend_slots_b_channels_id.split(',') - ) - self.channel_list['slot_b'] = ( - [channel.strip() for channel in tmp_channel_list] - ) - - def _execute_command(self, cli_type, *args, **kwargs): - command = getattr(cli, cli_type) - return command(self.cli_conf).execute(*args, **kwargs) - - def _execute(self, cli_type, *args, **kwargs): - LOG.debug('Executing command type: %(type)s.', {'type': cli_type}) - - rc, out = self._execute_command(cli_type, *args, **kwargs) - - if rc != 0: - if ('warning' in CLI_RC_FILTER[cli_type] and - rc in CLI_RC_FILTER[cli_type]['warning']): - LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc]) - else: - msg = CLI_RC_FILTER[cli_type]['error'] - LOG.error(msg) - raise exception.InfortrendCliException( - err=msg, param=args, rc=rc, out=out) - return rc, out - - @log_func - def _init_map_info(self, multipath=False): - if not self.map_dict_init: - - rc, channel_info = self._execute('ShowChannel') - - if 'BID' in channel_info[0]: - self._model_type = 'R' - else: - self._model_type = 'G' - - self._set_channel_id(channel_info, 'slot_a', multipath) - - if multipath and self._model_type == 'R': - self._set_channel_id(channel_info, 'slot_b', multipath) - - self.map_dict_init = True - - @log_func - def _update_map_info(self, multipath=False): - """Record the driver mapping information. - - map_dict = { - 'slot_a': { - '0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4 - }, - 'slot_b' : { - '1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3 - } - } - """ - rc, map_info = self._execute('ShowMap') - - self._update_map_info_by_slot(map_info, 'slot_a') - - if multipath and self._model_type == 'R': - self._update_map_info_by_slot(map_info, 'slot_b') - return map_info - - @log_func - def _update_map_info_by_slot(self, map_info, slot_key): - for key, value in self.map_dict[slot_key].items(): - self.map_dict[slot_key][key] = list( - range(self.constants['MAX_LUN_MAP_PER_CHL'])) - - if len(map_info) > 0 and isinstance(map_info, list): - for entry in map_info: - ch = entry['Ch'] - lun = entry['LUN'] - if ch not in self.map_dict[slot_key].keys(): - continue - - target_id = self.target_dict[slot_key][ch] - if (entry['Target'] == target_id and - int(lun) in self.map_dict[slot_key][ch]): - self.map_dict[slot_key][ch].remove(int(lun)) - - def _check_initiator_has_lun_map(self, initiator_info, map_info): - if not isinstance(initiator_info, list): - initiator_info = (initiator_info,) - for initiator_name in initiator_info: - for entry in map_info: - if initiator_name.lower() == entry['Host-ID'].lower(): - return True - return False - - @log_func - def _set_channel_id( - self, channel_info, controller='slot_a', multipath=False): - - if self.protocol == 'iSCSI': - check_channel_type = ('NETWORK', 'LAN') - else: - check_channel_type = ('FIBRE', 'Fibre') - - for entry in channel_info: - if entry['Type'] in check_channel_type: - if entry['Ch'] in self.channel_list[controller]: - self.map_dict[controller][entry['Ch']] = [] - - if self.protocol == 'iSCSI': - self._update_mcs_dict( - entry['Ch'], entry['MCS'], controller) - - self._update_target_dict(entry, controller) - - @log_func - def _update_target_dict(self, channel, controller): - """Record the target id for mapping. - - # R model - target_dict = { - 'slot_a': { - '0': '0', - '1': '0', - }, - 'slot_b': { - '0': '1', - '1': '1', - }, - } - - # G model - target_dict = { - 'slot_a': { - '2': '32', - '3': '112', - } - } - """ - if self._model_type == 'G': - self.target_dict[controller][channel['Ch']] = channel['ID'] - else: - if controller == 'slot_a': - self.target_dict[controller][channel['Ch']] = channel['AID'] - else: - self.target_dict[controller][channel['Ch']] = channel['BID'] - - def _update_mcs_dict(self, channel_id, mcs_id, controller): - """Record the iSCSI MCS topology. - - # R model with mcs, but it not working with iSCSI multipath - mcs_dict = { - 'slot_a': { - '0': ['0', '1'], - '1': ['2'] - }, - 'slot_b': { - '0': ['0', '1'], - '1': ['2'] - } - } - - # G model with mcs - mcs_dict = { - 'slot_a': { - '0': ['0', '1'], - '1': ['2'] - }, - 'slot_b': {} - } - """ - if mcs_id not in self.mcs_dict[controller]: - self.mcs_dict[controller][mcs_id] = [] - self.mcs_dict[controller][mcs_id].append(channel_id) - - def _check_tiers_setup(self): - tiering = self.configuration.infortrend_tiering - if tiering != '0': - self._check_extraspec_value( - tiering, self.tiering_values) - tier_levels_list = list(range(int(tiering))) - tier_levels_list = list(map(str, tier_levels_list)) - - rc, lv_info = self._execute('ShowLV', 'tier') - - for pool in self.pool_list: - support_tier_levels = tier_levels_list[:] - for entry in lv_info: - if (entry['LV-Name'] == pool and - entry['Tier'] in support_tier_levels): - support_tier_levels.remove(entry['Tier']) - if len(support_tier_levels) == 0: - break - if len(support_tier_levels) != 0: - msg = _('Please create %(tier_levels)s ' - 'tier in pool %(pool)s in advance!') % { - 'tier_levels': support_tier_levels, - 'pool': pool} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def _check_pools_setup(self): - pool_list = self.pool_list[:] - - rc, lv_info = self._execute('ShowLV') - - for lv in lv_info: - if lv['Name'] in pool_list: - pool_list.remove(lv['Name']) - if len(pool_list) == 0: - break - - if len(pool_list) != 0: - msg = _('Please create %(pool_list)s pool in advance!') % { - 'pool_list': pool_list} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def check_for_setup_error(self): - self._check_pools_setup() - self._check_tiers_setup() - - def create_volume(self, volume): - """Create a Infortrend partition.""" - volume_id = volume['id'].replace('-', '') - - self._create_partition_by_default(volume) - part_id = self._get_part_id(volume_id) - - system_id = self._get_system_id(self.ip) - - model_dict = { - 'system_id': system_id, - 'partition_id': part_id, - } - - model_update = { - "provider_location": self._concat_provider_location(model_dict), - } - LOG.info('Create Volume %(volume_id)s completed.', { - 'volume_id': volume_id}) - return model_update - - def _create_partition_by_default(self, volume): - pool_id = self._get_target_pool_id(volume) - self._create_partition_with_pool(volume, pool_id) - - def _create_partition_with_pool( - self, volume, pool_id, extraspecs=None): - - volume_id = volume['id'].replace('-', '') - volume_size = gi_to_mi(volume['size']) - - if extraspecs is None: - extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) - - provisioning = self._get_extraspecs_value(extraspecs, 'provisioning') - tiering = self._get_extraspecs_value(extraspecs, 'tiering') - - extraspecs_dict = {} - cmd = '' - if provisioning == 'thin': - provisioning = int(volume_size * 0.2) - extraspecs_dict['provisioning'] = provisioning - extraspecs_dict['init'] = 'disable' - else: - self._check_extraspec_value( - provisioning, self.provisioning_values) - - if tiering != '0': - self._check_extraspec_value( - tiering, self.tiering_values) - tier_levels_list = list(range(int(tiering))) - tier_levels_list = list(map(str, tier_levels_list)) - self._check_tiering_existing(tier_levels_list, pool_id) - extraspecs_dict['provisioning'] = 0 - extraspecs_dict['init'] = 'disable' - - if extraspecs_dict: - cmd = self._create_part_parameters_str(extraspecs_dict) - - commands = (pool_id, volume_id, 'size=%s' % int(volume_size), cmd) - self._execute('CreatePartition', *commands) - - def _create_part_parameters_str(self, extraspecs_dict): - parameters_list = [] - parameters = { - 'provisioning': 'min=%sMB', - 'tiering': 'tier=%s', - 'init': 'init=%s', - } - for extraspec in sorted(extraspecs_dict.keys()): - value = parameters[extraspec] % (extraspecs_dict[extraspec]) - parameters_list.append(value) - - cmd = ' '.join(parameters_list) - return cmd - - def _check_tiering_existing(self, tier_levels, pool_id): - rc, lv_info = self._execute('ShowLV', 'tier') - - for entry in lv_info: - if entry['LV-ID'] == pool_id and entry['Tier'] in tier_levels: - tier_levels.remove(entry['Tier']) - if len(tier_levels) == 0: - break - if len(tier_levels) != 0: - msg = _('Have not created %(tier_levels)s tier(s).') % { - 'tier_levels': tier_levels} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - @log_func - def _create_map_with_lun_filter( - self, part_id, channel_id, lun_id, host, controller='slot_a'): - - host_filter = self._create_target_id_and_host_filter( - controller, host) - target_id = self.target_dict[controller][channel_id] - - commands = ( - 'part', part_id, channel_id, target_id, lun_id, host_filter - ) - self._execute('CreateMap', *commands) - - @log_func - def _create_map_with_mcs( - self, part_id, channel_list, lun_id, host, controller='slot_a'): - - map_channel_id = None - for channel_id in channel_list: - - host_filter = self._create_target_id_and_host_filter( - controller, host) - target_id = self.target_dict[controller][channel_id] - - commands = ( - 'part', part_id, channel_id, target_id, lun_id, - host_filter - ) - rc, out = self._execute('CreateMap', *commands) - if rc == 0: - map_channel_id = channel_id - break - - if map_channel_id is None: - msg = _('Failed to create map on mcs, no channel can map.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - return map_channel_id - - def _create_target_id_and_host_filter(self, controller, host): - if self.protocol == 'iSCSI': - host_filter = 'iqn=%s' % host - else: - host_filter = 'wwn=%s' % host - - return host_filter - - def _get_extraspecs_dict(self, volume_type_id): - extraspecs = {} - if volume_type_id: - extraspecs = volume_types.get_volume_type_extra_specs( - volume_type_id) - - return extraspecs - - def _get_extraspecs_value(self, extraspecs, key): - value = None - if key == 'provisioning': - if (extraspecs and - 'infortrend_provisioning' in extraspecs.keys()): - value = extraspecs['infortrend_provisioning'].lower() - else: - value = self.configuration.infortrend_provisioning.lower() - elif key == 'tiering': - value = self.configuration.infortrend_tiering - return value - - def _select_most_free_capacity_pool_id(self, lv_info): - largest_free_capacity_gb = 0.0 - dest_pool_id = None - - for lv in lv_info: - if lv['Name'] in self.pool_list: - available_space = float(lv['Available'].split(' ', 1)[0]) - free_capacity_gb = round(mi_to_gi(available_space)) - if free_capacity_gb > largest_free_capacity_gb: - largest_free_capacity_gb = free_capacity_gb - dest_pool_id = lv['ID'] - return dest_pool_id - - def _get_target_pool_id(self, volume): - extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) - pool_id = None - rc, lv_info = self._execute('ShowLV') - - if 'pool_name' in extraspecs.keys(): - poolname = extraspecs['pool_name'] - - for entry in lv_info: - if entry['Name'] == poolname: - pool_id = entry['ID'] - else: - pool_id = self._select_most_free_capacity_pool_id(lv_info) - - if pool_id is None: - msg = _('Failed to get pool id with volume %(volume_id)s.') % { - 'volume_id': volume['id']} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return pool_id - - def _get_system_id(self, system_ip): - rc, device_info = self._execute('ShowDevice') - - for entry in device_info: - if system_ip == entry['Connected-IP']: - return str(int(entry['ID'], 16)) - return - - @log_func - def _get_lun_id(self, ch_id, controller='slot_a'): - lun_id = -1 - - if len(self.map_dict[controller][ch_id]) > 0: - lun_id = self.map_dict[controller][ch_id][0] - self.map_dict[controller][ch_id].remove(lun_id) - - if lun_id == -1: - msg = _('LUN number is out of bound ' - 'on channel id: %(ch_id)s.') % {'ch_id': ch_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - return lun_id - - @log_func - def _get_mapping_info(self, multipath): - if self.iscsi_multipath or multipath: - return self._get_mapping_info_with_mcs() - else: - return self._get_mapping_info_with_normal() - - def _get_mapping_info_with_mcs(self): - """Get the minimun mapping channel id and multi lun id mapping info. - - # R model with mcs - map_chl = { - 'slot_a': ['0', '1'] - } - map_lun = ['0'] - - # G model with mcs - map_chl = { - 'slot_a': ['1', '2'] - } - map_lun = ['0'] - - :returns: minimun mapping channel id per slot and multi lun id - """ - map_chl = { - 'slot_a': [] - } - - min_lun_num = 0 - map_mcs_group = None - for mcs in self.mcs_dict['slot_a']: - if len(self.mcs_dict['slot_a'][mcs]) > 1: - if min_lun_num < self._get_mcs_channel_lun_map_num(mcs): - min_lun_num = self._get_mcs_channel_lun_map_num(mcs) - map_mcs_group = mcs - - if map_mcs_group is None: - msg = _('Raid did not have MCS Channel.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - map_chl['slot_a'] = self.mcs_dict['slot_a'][map_mcs_group] - map_lun = self._get_mcs_channel_lun_map(map_chl['slot_a']) - return map_chl, map_lun, map_mcs_group - - def _get_mcs_channel_lun_map_num(self, mcs_id): - lun_num = 0 - for channel in self.mcs_dict['slot_a'][mcs_id]: - lun_num += len(self.map_dict['slot_a'][channel]) - return lun_num - - def _get_mcs_channel_lun_map(self, channel_list): - """Find the common lun id in mcs channel.""" - - map_lun = [] - for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): - check_map = True - for channel_id in channel_list: - if lun_id not in self.map_dict['slot_a'][channel_id]: - check_map = False - if check_map: - map_lun.append(str(lun_id)) - break - return map_lun - - @log_func - def _get_mapping_info_with_normal(self): - """Get the minimun mapping channel id and lun id mapping info. - - # G model and R model - map_chl = { - 'slot_a': ['1'] - } - map_lun = ['0'] - - :returns: minimun mapping channel id per slot and lun id - """ - map_chl = { - 'slot_a': [] - } - map_lun = [] - - ret_chl = self._get_minimun_mapping_channel_id('slot_a') - lun_id = self._get_lun_id(ret_chl, 'slot_a') - mcs_id = self._get_mcs_id_by_channel_id(ret_chl) - - map_chl['slot_a'].append(ret_chl) - map_lun.append(str(lun_id)) - - return map_chl, map_lun, mcs_id - - @log_func - def _get_minimun_mapping_channel_id(self, controller): - empty_lun_num = 0 - min_map_chl = -1 - - # Sort items to get a reliable behaviour. Dictionary items - # are iterated in a random order because of hash randomization. - for key, value in sorted(self.map_dict[controller].items()): - if empty_lun_num < len(value): - min_map_chl = key - empty_lun_num = len(value) - - if int(min_map_chl) < 0: - msg = _('LUN map overflow on every channel.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - return min_map_chl - - def _get_common_lun_map_id(self, wwpn_channel_info): - map_lun = None - - for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): - lun_id_exist = False - for slot_name in ['slot_a', 'slot_b']: - for wwpn in wwpn_channel_info: - channel_id = wwpn_channel_info[wwpn]['channel'] - if channel_id not in self.map_dict[slot_name]: - continue - elif lun_id not in self.map_dict[slot_name][channel_id]: - lun_id_exist = True - if not lun_id_exist: - map_lun = str(lun_id) - break - return map_lun - - def _get_mcs_id_by_channel_id(self, channel_id): - mcs_id = None - - for mcs in self.mcs_dict['slot_a']: - if channel_id in self.mcs_dict['slot_a'][mcs]: - mcs_id = mcs - break - - if mcs_id is None: - msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % { - 'channel_id': channel_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - return mcs_id - - def _concat_provider_location(self, model_dict): - keys = sorted(model_dict.keys()) - return '@'.join([i + '^' + str(model_dict[i]) for i in keys]) - - def delete_volume(self, volume): - """Delete the specific volume.""" - - volume_id = volume['id'].replace('-', '') - has_pair = False - have_map = False - - part_id = self._extract_specific_provider_location( - volume['provider_location'], 'partition_id') - - (check_exist, have_map, part_id) = ( - self._check_volume_exist(volume_id, part_id) - ) - - if not check_exist: - LOG.warning('Volume %(volume_id)s already deleted.', { - 'volume_id': volume_id}) - return - - rc, replica_list = self._execute('ShowReplica', '-l') - - for entry in replica_list: - if (volume_id == entry['Source-Name'] and - part_id == entry['Source']): - if not self._check_replica_completed(entry): - has_pair = True - LOG.warning('Volume still %(status)s ' - 'Cannot delete volume.', - {'status': entry['Status']}) - else: - have_map = entry['Source-Mapped'] == 'Yes' - self._execute('DeleteReplica', entry['Pair-ID'], '-y') - - elif (volume_id == entry['Target-Name'] and - part_id == entry['Target']): - have_map = entry['Target-Mapped'] == 'Yes' - self._execute('DeleteReplica', entry['Pair-ID'], '-y') - - if not has_pair: - - rc, snapshot_list = self._execute( - 'ShowSnapshot', 'part=%s' % part_id) - - for snapshot in snapshot_list: - si_has_pair = self._delete_pair_with_snapshot( - snapshot['SI-ID'], replica_list) - - if si_has_pair: - msg = _('Failed to delete SI ' - 'for volume_id: %(volume_id)s ' - 'because it has pair.') % { - 'volume_id': volume_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - self._execute('DeleteSnapshot', snapshot['SI-ID'], '-y') - - rc, map_info = self._execute('ShowMap', 'part=%s' % part_id) - - if have_map or len(map_info) > 0: - self._execute('DeleteMap', 'part', part_id, '-y') - - self._execute('DeletePartition', part_id, '-y') - - LOG.info('Delete Volume %(volume_id)s completed.', { - 'volume_id': volume_id}) - else: - msg = _('Failed to delete volume ' - 'for volume_id: %(volume_id)s ' - 'because it has pair.') % { - 'volume_id': volume_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def _check_replica_completed(self, replica): - if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or - (replica['Type'] == 'Mirror' and - replica['Status'] == 'Mirror')): - return True - - return False - - def _check_volume_exist(self, volume_id, part_id): - check_exist = False - have_map = False - result_part_id = part_id - - rc, part_list = self._execute('ShowPartition', '-l') - - for entry in part_list: - if entry['Name'] == volume_id: - check_exist = True - - if part_id is None: - result_part_id = entry['ID'] - if entry['Mapped'] == 'true': - have_map = True - - if check_exist: - return (check_exist, have_map, result_part_id) - else: - return (False, False, None) - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the volume by volume copy.""" - - volume_id = volume['id'].replace('-', '') - # Step1 create a snapshot of the volume - src_part_id = self._extract_specific_provider_location( - src_vref['provider_location'], 'partition_id') - - if src_part_id is None: - src_part_id = self._get_part_id(volume_id) - - model_update = self._create_volume_from_volume(volume, src_part_id) - - LOG.info('Create Cloned Volume %(volume_id)s completed.', { - 'volume_id': volume['id']}) - return model_update - - def _create_volume_from_volume(self, dst_volume, src_part_id): - # create the target volume for volume copy - dst_volume_id = dst_volume['id'].replace('-', '') - - self._create_partition_by_default(dst_volume) - - dst_part_id = self._get_part_id(dst_volume_id) - # prepare return value - system_id = self._get_system_id(self.ip) - model_dict = { - 'system_id': system_id, - 'partition_id': dst_part_id, - } - - model_info = self._concat_provider_location(model_dict) - model_update = {"provider_location": model_info} - - # clone the volume from the origin partition - commands = ( - 'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id - ) - self._execute('CreateReplica', *commands) - self._wait_replica_complete(dst_part_id) - - return model_update - - def _extract_specific_provider_location(self, provider_location, key): - provider_location_dict = self._extract_all_provider_location( - provider_location) - - result = provider_location_dict.get(key, None) - return result - - @log_func - def _extract_all_provider_location(self, provider_location): - provider_location_dict = {} - dict_entry = provider_location.split("@") - for entry in dict_entry: - key, value = entry.split('^', 1) - if value == 'None': - value = None - provider_location_dict[key] = value - - return provider_location_dict - - def create_export(self, context, volume): - model_update = volume['provider_location'] - - LOG.info('Create export done from Volume %(volume_id)s.', { - 'volume_id': volume['id']}) - - return {'provider_location': model_update} - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If refresh is True, update the status first. - """ - if self._volume_stats is None or refresh: - self._update_volume_stats() - - LOG.info( - 'Successfully update volume stats. ' - 'backend: %(volume_backend_name)s, ' - 'vendor: %(vendor_name)s, ' - 'driver version: %(driver_version)s, ' - 'storage protocol: %(storage_protocol)s.', self._volume_stats) - - return self._volume_stats - - def _update_volume_stats(self): - - backend_name = self.configuration.safe_get('volume_backend_name') - - data = { - 'volume_backend_name': backend_name, - 'vendor_name': 'Infortrend', - 'driver_version': self.VERSION, - 'storage_protocol': self.protocol, - 'pools': self._update_pools_stats(), - } - self._volume_stats = data - - def _update_pools_stats(self): - enable_specs_dict = self._get_enable_specs_on_array() - - if 'Thin Provisioning' in enable_specs_dict.keys(): - provisioning = 'thin' - provisioning_support = True - else: - provisioning = 'full' - provisioning_support = False - - rc, part_list = self._execute('ShowPartition', '-l') - rc, pools_info = self._execute('ShowLV') - pools = [] - - for pool in pools_info: - if pool['Name'] in self.pool_list: - total_space = float(pool['Size'].split(' ', 1)[0]) - available_space = float(pool['Available'].split(' ', 1)[0]) - - total_capacity_gb = round(mi_to_gi(total_space), 2) - free_capacity_gb = round(mi_to_gi(available_space), 2) - provisioning_factor = self.configuration.safe_get( - 'max_over_subscription_ratio') - provisioned_space = self._get_provisioned_space( - pool['ID'], part_list) - provisioned_capacity_gb = round(mi_to_gi(provisioned_space), 2) - - new_pool = { - 'pool_name': pool['Name'], - 'pool_id': pool['ID'], - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb, - 'reserved_percentage': 0, - 'QoS_support': False, - 'provisioned_capacity_gb': provisioned_capacity_gb, - 'max_over_subscription_ratio': provisioning_factor, - 'thin_provisioning_support': provisioning_support, - 'thick_provisioning_support': True, - 'infortrend_provisioning': provisioning, - } - pools.append(new_pool) - return pools - - def _get_provisioned_space(self, pool_id, part_list): - provisioning_space = 0 - for entry in part_list: - if entry['LV-ID'] == pool_id: - provisioning_space += int(entry['Size']) - return provisioning_space - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - - snapshot_id = snapshot['id'].replace('-', '') - volume_id = snapshot['volume_id'].replace('-', '') - - LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.', - {'snapshot': snapshot_id, 'volume': volume_id}) - - model_update = {} - part_id = self._get_part_id(volume_id) - - if part_id is None: - msg = _('Failed to get Partition ID for volume %(volume_id)s.') % { - 'volume_id': volume_id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - @lockutils.synchronized( - 'snapshot-' + part_id, 'infortrend-', True) - def do_create_snapshot(): - self._execute('CreateSnapshot', 'part', part_id) - rc, tmp_snapshot_list = self._execute( - 'ShowSnapshot', 'part=%s' % part_id) - return tmp_snapshot_list - - snapshot_list = do_create_snapshot() - - LOG.info( - 'Create success. ' - 'Snapshot: %(snapshot)s, ' - 'Snapshot ID in raid: %(raid_snapshot_id)s, ' - 'volume: %(volume)s.', { - 'snapshot': snapshot_id, - 'raid_snapshot_id': snapshot_list[-1]['SI-ID'], - 'volume': volume_id}) - model_update['provider_location'] = snapshot_list[-1]['SI-ID'] - return model_update - - def delete_snapshot(self, snapshot): - """Delete the snapshot.""" - - snapshot_id = snapshot['id'].replace('-', '') - volume_id = snapshot['volume_id'].replace('-', '') - - LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.', - {'snapshot': snapshot_id, 'volume': volume_id}) - - raid_snapshot_id = self._get_raid_snapshot_id(snapshot) - - if raid_snapshot_id: - - rc, replica_list = self._execute('ShowReplica', '-l') - - has_pair = self._delete_pair_with_snapshot( - raid_snapshot_id, replica_list) - - if not has_pair: - self._execute('DeleteSnapshot', raid_snapshot_id, '-y') - - LOG.info('Delete Snapshot %(snapshot_id)s completed.', { - 'snapshot_id': snapshot_id}) - else: - msg = _('Failed to delete snapshot ' - 'for snapshot_id: %s ' - 'because it has pair.') % snapshot_id - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - msg = _( - 'Failed to get Raid Snapshot ID ' - 'from Snapshot %(snapshot_id)s.') % { - 'snapshot_id': snapshot_id} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def _get_raid_snapshot_id(self, snapshot): - if 'provider_location' not in snapshot: - LOG.warning( - 'Failed to get Raid Snapshot ID and ' - 'did not store in snapshot.') - return - return snapshot['provider_location'] - - def _delete_pair_with_snapshot(self, snapshot_id, replica_list): - has_pair = False - for entry in replica_list: - if entry['Source'] == snapshot_id: - - if not self._check_replica_completed(entry): - has_pair = True - LOG.warning( - 'Snapshot still %(status)s Cannot delete snapshot.', - {'status': entry['Status']}) - else: - self._execute('DeleteReplica', entry['Pair-ID'], '-y') - return has_pair - - def _get_part_id(self, volume_id, pool_id=None, part_list=None): - if part_list is None: - rc, part_list = self._execute('ShowPartition') - for entry in part_list: - if pool_id is None: - if entry['Name'] == volume_id: - return entry['ID'] - else: - if entry['Name'] == volume_id and entry['LV-ID'] == pool_id: - return entry['ID'] - return - - def create_volume_from_snapshot(self, volume, snapshot): - raid_snapshot_id = self._get_raid_snapshot_id(snapshot) - - if raid_snapshot_id is None: - msg = _('Failed to get Raid Snapshot ID ' - 'from snapshot: %(snapshot_id)s.') % { - 'snapshot_id': snapshot['id']} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - src_part_id = self._check_snapshot_filled_block(raid_snapshot_id) - - model_update = self._create_volume_from_snapshot_id( - volume, raid_snapshot_id, src_part_id) - - LOG.info( - 'Create Volume %(volume_id)s from ' - 'snapshot %(snapshot_id)s completed.', { - 'volume_id': volume['id'], - 'snapshot_id': snapshot['id']}) - - return model_update - - def _check_snapshot_filled_block(self, raid_snapshot_id): - rc, snapshot_list = self._execute( - 'ShowSnapshot', 'si=%s' % raid_snapshot_id, '-l') - - if snapshot_list and snapshot_list[0]['Total-filled-block'] == '0': - return snapshot_list[0]['Partition-ID'] - return - - def _create_volume_from_snapshot_id( - self, dst_volume, raid_snapshot_id, src_part_id): - # create the target volume for volume copy - dst_volume_id = dst_volume['id'].replace('-', '') - - self._create_partition_by_default(dst_volume) - - dst_part_id = self._get_part_id(dst_volume_id) - # prepare return value - system_id = self._get_system_id(self.ip) - model_dict = { - 'system_id': system_id, - 'partition_id': dst_part_id, - } - - model_info = self._concat_provider_location(model_dict) - model_update = {"provider_location": model_info} - - if src_part_id: - # clone the volume from the origin partition - commands = ( - 'Cinder-Snapshot', 'part', src_part_id, 'part', dst_part_id - ) - self._execute('CreateReplica', *commands) - self._wait_replica_complete(dst_part_id) - - # clone the volume from the snapshot - commands = ( - 'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id - ) - self._execute('CreateReplica', *commands) - self._wait_replica_complete(dst_part_id) - - return model_update - - @lockutils.synchronized('connection', 'infortrend-', True) - def initialize_connection(self, volume, connector): - if self.protocol == 'iSCSI': - multipath = connector.get('multipath', False) - return self._initialize_connection_iscsi( - volume, connector, multipath) - elif self.protocol == 'FC': - return self._initialize_connection_fc( - volume, connector) - else: - msg = _('Unknown protocol: %(protocol)s.') % { - 'protocol': self.protocol} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def _initialize_connection_fc(self, volume, connector): - self._init_map_info(True) - self._update_map_info(True) - - map_lun, target_wwpns, initiator_target_map = ( - self._do_fc_connection(volume, connector) - ) - - properties = self._generate_fc_connection_properties( - map_lun, target_wwpns, initiator_target_map) - - LOG.info('Successfully initialized connection. ' - 'target_wwn: %(target_wwn)s, ' - 'initiator_target_map: %(initiator_target_map)s, ' - 'lun: %(target_lun)s.', properties['data']) - return properties - - def _do_fc_connection(self, volume, connector): - volume_id = volume['id'].replace('-', '') - target_wwpns = [] - - partition_data = self._extract_all_provider_location( - volume['provider_location']) - part_id = partition_data['partition_id'] - - if part_id is None: - part_id = self._get_part_id(volume_id) - - wwpn_list, wwpn_channel_info = self._get_wwpn_list() - - initiator_target_map, target_wwpns = self._build_initiator_target_map( - connector, wwpn_list) - - map_lun = self._get_common_lun_map_id(wwpn_channel_info) - - # Sort items to get a reliable behaviour. Dictionary items - # are iterated in a random order because of hash randomization. - for initiator_wwpn in sorted(initiator_target_map): - for target_wwpn in initiator_target_map[initiator_wwpn]: - channel_id = wwpn_channel_info[target_wwpn.upper()]['channel'] - controller = wwpn_channel_info[target_wwpn.upper()]['slot'] - self._create_map_with_lun_filter( - part_id, channel_id, map_lun, initiator_wwpn, - controller=controller) - - return map_lun, target_wwpns, initiator_target_map - - def _build_initiator_target_map(self, connector, all_target_wwpns): - initiator_target_map = {} - target_wwpns = [] - - if self.fc_lookup_service: - lookup_map = ( - self.fc_lookup_service.get_device_mapping_from_network( - connector['wwpns'], all_target_wwpns) - ) - for fabric_name in lookup_map: - fabric = lookup_map[fabric_name] - target_wwpns.extend(fabric['target_port_wwn_list']) - for initiator in fabric['initiator_port_wwn_list']: - initiator_target_map[initiator] = ( - fabric['target_port_wwn_list'] - ) - else: - initiator_wwns = connector['wwpns'] - target_wwpns = all_target_wwpns - for initiator in initiator_wwns: - initiator_target_map[initiator] = all_target_wwpns - - return initiator_target_map, target_wwpns - - def _generate_fc_connection_properties( - self, lun_id, target_wwpns, initiator_target_map): - - return { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': int(lun_id), - 'target_wwn': target_wwpns, - 'initiator_target_map': initiator_target_map, - }, - } - - @log_func - def _initialize_connection_iscsi(self, volume, connector, multipath): - self._init_map_info(multipath) - self._update_map_info(multipath) - - volume_id = volume['id'].replace('-', '') - - partition_data = self._extract_all_provider_location( - volume['provider_location']) # system_id, part_id - - part_id = partition_data['partition_id'] - - if part_id is None: - part_id = self._get_part_id(volume_id) - - self._set_host_iqn(connector['initiator']) - - map_chl, map_lun, mcs_id = self._get_mapping_info(multipath) - - lun_id = map_lun[0] - - if self.iscsi_multipath or multipath: - channel_id = self._create_map_with_mcs( - part_id, map_chl['slot_a'], lun_id, connector['initiator']) - else: - channel_id = map_chl['slot_a'][0] - - self._create_map_with_lun_filter( - part_id, channel_id, lun_id, connector['initiator']) - - rc, net_list = self._execute('ShowNet') - ip = self._get_ip_by_channel(channel_id, net_list) - - if ip is None: - msg = _( - 'Failed to get ip on Channel %(channel_id)s ' - 'with volume: %(volume_id)s.') % { - 'channel_id': channel_id, 'volume_id': volume_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - partition_data = self._combine_channel_lun_target_id( - partition_data, mcs_id, lun_id, channel_id) - - property_value = [{ - 'lun_id': partition_data['lun_id'], - 'iqn': self._generate_iqn(partition_data), - 'ip': ip, - 'port': self.constants['ISCSI_PORT'], - }] - - properties = self._generate_iscsi_connection_properties( - property_value, volume) - LOG.info('Successfully initialized connection ' - 'with volume: %(volume_id)s.', properties['data']) - return properties - - @log_func - def _combine_channel_lun_target_id( - self, partition_data, mcs_id, lun_id, channel_id): - - target_id = self.target_dict['slot_a'][channel_id] - - partition_data['mcs_id'] = mcs_id - partition_data['lun_id'] = lun_id - partition_data['target_id'] = target_id - partition_data['slot_id'] = 1 - - return partition_data - - def _set_host_iqn(self, host_iqn): - - rc, iqn_list = self._execute('ShowIQN') - - check_iqn_exist = False - for entry in iqn_list: - if entry['IQN'] == host_iqn: - check_iqn_exist = True - - if not check_iqn_exist: - self._execute( - 'CreateIQN', host_iqn, self._truncate_host_name(host_iqn)) - - def _truncate_host_name(self, iqn): - if len(iqn) > 16: - return iqn[-16:] - else: - return iqn - - @log_func - def _generate_iqn(self, partition_data): - return self.iqn % ( - partition_data['system_id'], - partition_data['mcs_id'], - partition_data['target_id'], - partition_data['slot_id']) - - @log_func - def _get_ip_by_channel( - self, channel_id, net_list, controller='slot_a'): - - slot_name = 'slotA' if controller == 'slot_a' else 'slotB' - - for entry in net_list: - if entry['ID'] == channel_id and entry['Slot'] == slot_name: - return entry['IPv4'] - return - - def _get_wwpn_list(self): - rc, wwn_list = self._execute('ShowWWN') - - wwpn_list = [] - wwpn_channel_info = {} - - for entry in wwn_list: - channel_id = entry['CH'] - if 'BID' in entry['ID']: - slot_name = 'slot_b' - else: - slot_name = 'slot_a' - - if channel_id in self.map_dict[slot_name]: - wwpn_list.append(entry['WWPN']) - - wwpn_channel_info[entry['WWPN']] = { - 'channel': channel_id, - 'slot': slot_name, - } - - return wwpn_list, wwpn_channel_info - - @log_func - def _generate_iscsi_connection_properties( - self, property_value, volume): - - properties = {} - discovery_exist = False - - specific_property = property_value[0] - - discovery_ip = '%s:%s' % ( - specific_property['ip'], specific_property['port']) - discovery_iqn = specific_property['iqn'] - - if self._do_iscsi_discovery(discovery_iqn, discovery_ip): - properties['target_portal'] = discovery_ip - properties['target_iqn'] = discovery_iqn - properties['target_lun'] = int(specific_property['lun_id']) - discovery_exist = True - - if not discovery_exist: - msg = _( - 'Could not find iSCSI target ' - 'for volume: %(volume_id)s.') % { - 'volume_id': volume['id']} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - properties['target_discovered'] = discovery_exist - properties['volume_id'] = volume['id'] - - if 'provider_auth' in volume: - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - @log_func - def _do_iscsi_discovery(self, target_iqn, target_ip): - rc, out = self._execute( - 'ExecuteCommand', - 'iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', - target_ip, - run_as_root=True) - - if rc != 0: - LOG.error( - 'Can not discovery in %(target_ip)s with %(target_iqn)s.', - {'target_ip': target_ip, 'target_iqn': target_iqn}) - return False - else: - for target in out.splitlines(): - if target_iqn in target and target_ip in target: - return True - return False - - def extend_volume(self, volume, new_size): - volume_id = volume['id'].replace('-', '') - - part_id = self._extract_specific_provider_location( - volume['provider_location'], 'partition_id') - - if part_id is None: - part_id = self._get_part_id(volume_id) - - expand_size = new_size - volume['size'] - - if '.' in ('%s' % expand_size): - expand_size = round(gi_to_mi(float(expand_size))) - expand_command = 'size=%sMB' % expand_size - else: - expand_command = 'size=%sGB' % expand_size - - self._execute('SetPartition', 'expand', part_id, expand_command) - - LOG.info( - 'Successfully extended volume %(volume_id)s to size %(size)s.', { - 'volume_id': volume['id'], 'size': new_size}) - - @lockutils.synchronized('connection', 'infortrend-', True) - def terminate_connection(self, volume, connector): - volume_id = volume['id'].replace('-', '') - multipath = connector.get('multipath', False) - conn_info = None - - part_id = self._extract_specific_provider_location( - volume['provider_location'], 'partition_id') - - if part_id is None: - part_id = self._get_part_id(volume_id) - - self._execute('DeleteMap', 'part', part_id, '-y') - map_info = self._update_map_info(multipath) - - if self.protocol == 'iSCSI': - initiator_iqn = self._truncate_host_name(connector['initiator']) - lun_map_exist = self._check_initiator_has_lun_map( - initiator_iqn, map_info) - - if not lun_map_exist: - self._execute('DeleteIQN', initiator_iqn) - - elif self.protocol == 'FC': - conn_info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - lun_map_exist = self._check_initiator_has_lun_map( - connector['wwpns'], map_info) - - if not lun_map_exist: - - wwpn_list, wwpn_channel_info = self._get_wwpn_list() - init_target_map, target_wwpns = ( - self._build_initiator_target_map(connector, wwpn_list) - ) - conn_info['data']['initiator_target_map'] = init_target_map - - LOG.info( - 'Successfully terminated connection for volume: %(volume_id)s.', - {'volume_id': volume['id']}) - - return conn_info - - def migrate_volume(self, volume, host, new_extraspecs=None): - is_valid, dst_pool_id = ( - self._is_valid_for_storage_assisted_migration(host) - ) - if not is_valid: - return (False, None) - - model_dict = self._migrate_volume_with_pool( - volume, dst_pool_id, new_extraspecs) - - model_update = { - "provider_location": self._concat_provider_location(model_dict), - } - - LOG.info('Migrate Volume %(volume_id)s completed.', { - 'volume_id': volume['id']}) - - return (True, model_update) - - def _is_valid_for_storage_assisted_migration(self, host): - if 'pool_id' not in host['capabilities']: - LOG.warning('Failed to get target pool id.') - return (False, None) - - dst_pool_id = host['capabilities']['pool_id'] - if dst_pool_id is None: - return (False, None) - - return (True, dst_pool_id) - - def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None): - volume_id = volume['id'].replace('-', '') - - # Get old partition data for delete map - partition_data = self._extract_all_provider_location( - volume['provider_location']) - - src_part_id = partition_data['partition_id'] - - if src_part_id is None: - src_part_id = self._get_part_id(volume_id) - - # Create New Partition - self._create_partition_with_pool(volume, dst_pool_id, extraspecs) - - dst_part_id = self._get_part_id( - volume_id, pool_id=dst_pool_id) - - if dst_part_id is None: - msg = _('Failed to get new part id in new pool: %(pool_id)s.') % { - 'pool_id': dst_pool_id} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - # Volume Mirror from old partition into new partition - commands = ( - 'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id, - 'type=mirror' - ) - self._execute('CreateReplica', *commands) - - self._wait_replica_complete(dst_part_id) - - self._execute('DeleteMap', 'part', src_part_id, '-y') - self._execute('DeletePartition', src_part_id, '-y') - - model_dict = { - 'system_id': partition_data['system_id'], - 'partition_id': dst_part_id, - } - - return model_dict - - def _wait_replica_complete(self, part_id): - start_time = int(time.time()) - timeout = self._replica_timeout - - def _inner(): - check_done = False - try: - rc, replica_list = self._execute('ShowReplica', '-l') - for entry in replica_list: - if (entry['Target'] == part_id and - self._check_replica_completed(entry)): - check_done = True - self._execute('DeleteReplica', entry['Pair-ID'], '-y') - except Exception: - check_done = False - LOG.exception('Cannot detect replica status.') - - if check_done: - raise loopingcall.LoopingCallDone() - - if int(time.time()) - start_time > timeout: - msg = _('Wait replica complete timeout.') - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - timer = loopingcall.FixedIntervalLoopingCall(_inner) - timer.start(interval=10).wait() - - def _check_extraspec_value(self, extraspec, validvalues): - if not extraspec: - LOG.debug("The given extraspec is None.") - elif extraspec not in validvalues: - msg = _("The extraspec: %(extraspec)s is not valid.") % { - 'extraspec': extraspec} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - def _get_enable_specs_on_array(self): - enable_specs = {} - rc, license_list = self._execute('ShowLicense') - - for key, value in license_list.items(): - if value['Support']: - enable_specs[key] = value - - return enable_specs - - def manage_existing_get_size(self, volume, ref): - """Return size of volume to be managed by manage_existing.""" - - volume_name = self._get_existing_volume_ref_name(ref) - part_entry = self._get_latter_volume_dict(volume_name) - - if part_entry is None: - msg = _('Specified logical volume does not exist.') - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=ref, reason=msg) - - rc, map_info = self._execute('ShowMap', 'part=%s' % part_entry['ID']) - - if len(map_info) != 0: - msg = _('The specified volume is mapped to a host.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return int(math.ceil(mi_to_gi(float(part_entry['Size'])))) - - def manage_existing(self, volume, ref): - volume_name = self._get_existing_volume_ref_name(ref) - volume_id = volume['id'].replace('-', '') - - part_entry = self._get_latter_volume_dict(volume_name) - - if part_entry is None: - msg = _('Specified logical volume does not exist.') - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=ref, reason=msg) - - self._execute('SetPartition', part_entry['ID'], 'name=%s' % volume_id) - - model_dict = { - 'system_id': self._get_system_id(self.ip), - 'partition_id': part_entry['ID'], - } - model_update = { - "provider_location": self._concat_provider_location(model_dict), - } - - LOG.info('Rename Volume %(volume_id)s completed.', { - 'volume_id': volume['id']}) - - return model_update - - def _get_existing_volume_ref_name(self, ref): - volume_name = None - if 'source-name' in ref: - volume_name = ref['source-name'] - elif 'source-id' in ref: - volume_name = self._get_unmanaged_volume_name( - ref['source-id'].replace('-', '')) - else: - msg = _('Reference must contain source-id or source-name.') - LOG.error(msg) - raise exception.ManageExistingInvalidReference( - existing_ref=ref, reason=msg) - - return volume_name - - def unmanage(self, volume): - volume_id = volume['id'].replace('-', '') - part_id = self._extract_specific_provider_location( - volume['provider_location'], 'partition_id') - - if part_id is None: - part_id = self._get_part_id(volume_id) - - new_vol_name = self._get_unmanaged_volume_name(volume_id) - self._execute('SetPartition', part_id, 'name=%s' % new_vol_name) - - LOG.info('Unmanage volume %(volume_id)s completed.', { - 'volume_id': volume_id}) - - def _get_unmanaged_volume_name(self, volume_id): - return self.unmanaged_prefix % volume_id[:-17] - - def _get_specific_volume_dict(self, volume_id): - ref_dict = {} - rc, part_list = self._execute('ShowPartition') - - for entry in part_list: - if entry['Name'] == volume_id: - ref_dict = entry - break - - return ref_dict - - def _get_latter_volume_dict(self, volume_name): - rc, part_list = self._execute('ShowPartition', '-l') - - latest_timestamps = 0 - ref_dict = None - - for entry in part_list: - if entry['Name'] == volume_name: - - timestamps = self._get_part_timestamps( - entry['Creation-time']) - - if timestamps > latest_timestamps: - ref_dict = entry - latest_timestamps = timestamps - - return ref_dict - - def _get_part_timestamps(self, time_string): - """Transform 'Sat, Jan 11 22:18:40 2020' into timestamps with sec.""" - - first, value = time_string.split(',') - timestamps = time.mktime( - time.strptime(value, " %b %d %H:%M:%S %Y")) - - return timestamps - - def _check_volume_attachment(self, volume): - if not volume['volume_attachment']: - return False - return True - - def _check_volume_has_snapshot(self, volume): - part_id = self._extract_specific_provider_location( - volume['provider_location'], 'partition_id') - - rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id) - - if len(snapshot_list) > 0: - return True - return False - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - - if volume['host'] != host['host']: - if self._check_volume_attachment(volume): - LOG.warning( - 'Volume %(volume_id)s cannot be retyped ' - 'during attachment.', { - 'volume_id': volume['id']}) - return False - - if self._check_volume_has_snapshot(volume): - LOG.warning( - 'Volume %(volume_id)s cannot be retyped ' - 'because it has snapshot.', { - 'volume_id': volume['id']}) - return False - - new_extraspecs = new_type['extra_specs'] - rc, model_update = self.migrate_volume( - volume, host, new_extraspecs) - - if rc: - LOG.info( - 'Retype Volume %(volume_id)s is done ' - 'and migrated to pool %(pool_id)s.', { - 'volume_id': volume['id'], - 'pool_id': host['capabilities']['pool_id']}) - - return (rc, model_update) - else: - if ('infortrend_provisioning' in diff['extra_specs'] and - (diff['extra_specs']['infortrend_provisioning'][0] != - diff['extra_specs']['infortrend_provisioning'][1])): - - LOG.warning( - 'The provisioning: %(provisioning)s is not valid.', - {'provisioning': - diff['extra_specs']['infortrend_provisioning'][1]}) - return False - - LOG.info('Retype Volume %(volume_id)s is completed.', { - 'volume_id': volume['id']}) - return True - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume.""" - - src_volume_id = volume['id'].replace('-', '') - dst_volume_id = new_volume['id'].replace('-', '') - part_id = self._extract_specific_provider_location( - new_volume['provider_location'], 'partition_id') - - if part_id is None: - part_id = self._get_part_id(dst_volume_id) - - LOG.debug( - 'Rename partition %(part_id)s ' - 'into new volume %(new_volume)s.', { - 'part_id': part_id, 'new_volume': dst_volume_id}) - try: - self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) - except exception.InfortrendCliException: - LOG.exception('Failed to rename %(new_volume)s into ' - '%(volume)s.', {'new_volume': new_volume['id'], - 'volume': volume['id']}) - return {'_name_id': new_volume['_name_id'] or new_volume['id']} - - LOG.info('Update migrated volume %(new_volume)s completed.', { - 'new_volume': new_volume['id']}) - - model_update = { - '_name_id': None, - 'provider_location': new_volume['provider_location'], - } - return model_update diff --git a/cinder/volume/drivers/kaminario/__init__.py b/cinder/volume/drivers/kaminario/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/kaminario/kaminario_common.py b/cinder/volume/drivers/kaminario/kaminario_common.py deleted file mode 100644 index 95fb94661..000000000 --- a/cinder/volume/drivers/kaminario/kaminario_common.py +++ /dev/null @@ -1,1135 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" - -import math -import re -import threading -import time - -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import units -from oslo_utils import versionutils -import requests -import six - -import cinder -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.san import san -from cinder.volume import utils as vol_utils - -krest = importutils.try_import("krest") - -K2_MIN_VERSION = '2.2.0' -K2_LOCK_PREFIX = 'Kaminario' -MAX_K2_RETRY = 5 -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) - -kaminario_opts = [ - cfg.BoolOpt('auto_calc_max_oversubscription_ratio', - default=False, - help="K2 driver will calculate max_oversubscription_ratio " - "on setting this option as True.")] - -CONF = cfg.CONF -CONF.register_opts(kaminario_opts, group=configuration.SHARED_CONF_GROUP) - -K2HTTPError = requests.exceptions.HTTPError -K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC", - "MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT") - -if krest: - class KrestWrap(krest.EndPoint): - def __init__(self, *args, **kwargs): - self.krestlock = threading.Lock() - super(KrestWrap, self).__init__(*args, **kwargs) - - def _should_retry(self, err_code, err_msg): - if err_code == 400: - for er in K2_RETRY_ERRORS: - if er in err_msg: - LOG.debug("Retry ERROR: %d with status %s", - err_code, err_msg) - return True - return False - - @utils.retry(exception.KaminarioRetryableException, - retries=MAX_K2_RETRY) - def _request(self, method, *args, **kwargs): - try: - self.krestlock.acquire() - return super(KrestWrap, self)._request(method, - *args, **kwargs) - except K2HTTPError as err: - err_code = err.response.status_code - err_msg = err.response.text - if self._should_retry(err_code, err_msg): - raise exception.KaminarioRetryableException( - reason=six.text_type(err_msg)) - raise - finally: - self.krestlock.release() - - -def kaminario_logger(func): - """Return a function wrapper. - - The wrapper adds log for entry and exit to the function. - """ - def func_wrapper(*args, **kwargs): - LOG.debug('Entering %(function)s of %(class)s with arguments: ' - ' %(args)s, %(kwargs)s', - {'class': args[0].__class__.__name__, - 'function': func.__name__, - 'args': args[1:], - 'kwargs': kwargs}) - ret = func(*args, **kwargs) - LOG.debug('Exiting %(function)s of %(class)s ' - 'having return value: %(ret)s', - {'class': args[0].__class__.__name__, - 'function': func.__name__, - 'ret': ret}) - return ret - return func_wrapper - - -class Replication(object): - def __init__(self, config, *args, **kwargs): - self.backend_id = config.get('backend_id') - self.login = config.get('login') - self.password = config.get('password') - self.rpo = config.get('rpo') - - -class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): - VENDOR = "Kaminario" - stats = {} - - def __init__(self, *args, **kwargs): - super(KaminarioCinderDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(san.san_opts) - self.configuration.append_config_values(kaminario_opts) - self.replica = None - self._protocol = None - k2_lock_sfx = self.configuration.safe_get('san_ip') - self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx) - - @kaminario_logger - def check_for_setup_error(self): - if krest is None: - msg = _("Unable to import 'krest' python module.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - else: - conf = self.configuration - self.client = KrestWrap(conf.san_ip, - conf.san_login, - conf.san_password, - ssl_validate=False) - if self.replica: - self.target = KrestWrap(self.replica.backend_id, - self.replica.login, - self.replica.password, - ssl_validate=False) - v_rs = self.client.search("system/state") - if hasattr(v_rs, 'hits') and v_rs.total != 0: - ver = v_rs.hits[0].rest_api_version - ver_exist = versionutils.convert_version_to_int(ver) - ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) - if ver_exist < ver_min: - msg = _("K2 REST API version should be " - ">= %s.") % K2_MIN_VERSION - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - else: - msg = _("K2 REST API version search failed.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - def _check_ops(self): - """Ensure that the options we care about are set.""" - required_ops = ['san_ip', 'san_login', 'san_password'] - for attr in required_ops: - if not getattr(self.configuration, attr, None): - raise exception.InvalidInput(reason=_('%s is not set.') % attr) - - replica = self.configuration.safe_get('replication_device') - if replica and isinstance(replica, list): - replica_ops = ['backend_id', 'login', 'password', 'rpo'] - for attr in replica_ops: - if attr not in replica[0]: - msg = _('replication_device %s is not set.') % attr - raise exception.InvalidInput(reason=msg) - self.replica = Replication(replica[0]) - - @kaminario_logger - def do_setup(self, context): - super(KaminarioCinderDriver, self).do_setup(context) - self._check_ops() - - @kaminario_logger - def create_volume(self, volume): - """Volume creation in K2 needs a volume group. - - - create a volume group - - create a volume in the volume group - """ - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - prov_type = self._get_is_dedup(volume.get('volume_type')) - try: - LOG.debug("Creating volume group with name: %(name)s, " - "quota: unlimited and dedup_support: %(dedup)s", - {'name': vg_name, 'dedup': prov_type}) - - vg = self.client.new("volume_groups", name=vg_name, quota=0, - is_dedup=prov_type).save() - LOG.debug("Creating volume with name: %(name)s, size: %(size)s " - "GB, volume_group: %(vg)s", - {'name': vol_name, 'size': volume.size, 'vg': vg_name}) - vol = self.client.new("volumes", name=vol_name, - size=volume.size * units.Mi, - volume_group=vg).save() - except Exception as ex: - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total != 0: - LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name) - vg_rs.hits[0].delete() - LOG.exception("Creation of volume %s failed.", vol_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - if self._get_is_replica(volume.volume_type) and self.replica: - self._create_volume_replica(volume, vg, vol, self.replica.rpo) - - @kaminario_logger - def _create_volume_replica(self, volume, vg, vol, rpo): - """Volume replica creation in K2 needs session and remote volume. - - - create a session - - create a volume in the volume group - - """ - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - - rvg_name = self.get_rep_name(vg.name) - rvol_name = self.get_rep_name(vol.name) - - k2peer_rs = self.client.search("replication/peer_k2arrays", - mgmt_host=self.replica.backend_id) - if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: - k2peer = k2peer_rs.hits[0] - else: - msg = _("Unable to find K2peer in source K2:") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - LOG.debug("Creating source session with name: %(sname)s and " - " target session name: %(tname)s", - {'sname': session_name, 'tname': rsession_name}) - src_ssn = self.client.new("replication/sessions") - src_ssn.replication_peer_k2array = k2peer - src_ssn.auto_configure_peer_volumes = "False" - src_ssn.local_volume_group = vg - src_ssn.replication_peer_volume_group_name = rvg_name - src_ssn.remote_replication_session_name = rsession_name - src_ssn.name = session_name - src_ssn.rpo = rpo - src_ssn.save() - LOG.debug("Creating remote volume with name: %s", - rvol_name) - self.client.new("replication/peer_volumes", - local_volume=vol, - name=rvol_name, - replication_session=src_ssn).save() - src_ssn.state = "in_sync" - src_ssn.save() - except Exception as ex: - LOG.exception("Replication for the volume %s has " - "failed.", vol.name) - self._delete_by_ref(self.client, "replication/sessions", - session_name, 'session') - self._delete_by_ref(self.target, "replication/sessions", - rsession_name, 'remote session') - self._delete_by_ref(self.target, "volumes", - rvol_name, 'remote volume') - self._delete_by_ref(self.client, "volumes", vol.name, "volume") - self._delete_by_ref(self.target, "volume_groups", - rvg_name, "remote vg") - self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def _create_failover_volume_replica(self, volume, vg_name, vol_name): - """Volume replica creation in K2 needs session and remote volume. - - - create a session - - create a volume in the volume group - - """ - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - rvg = self.target.search("volume_groups", name=rvg_name).hits[0] - rvol = self.target.search("volumes", name=rvol_name).hits[0] - k2peer_rs = self.target.search("replication/peer_k2arrays", - mgmt_host=self.configuration.san_ip) - if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: - k2peer = k2peer_rs.hits[0] - else: - msg = _("Unable to find K2peer in source K2:") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - LOG.debug("Creating source session with name: %(sname)s and " - " target session name: %(tname)s", - {'sname': rsession_name, 'tname': session_name}) - tgt_ssn = self.target.new("replication/sessions") - tgt_ssn.replication_peer_k2array = k2peer - tgt_ssn.auto_configure_peer_volumes = "False" - tgt_ssn.local_volume_group = rvg - tgt_ssn.replication_peer_volume_group_name = vg_name - tgt_ssn.remote_replication_session_name = session_name - tgt_ssn.name = rsession_name - tgt_ssn.rpo = self.replica.rpo - tgt_ssn.save() - LOG.debug("Creating remote volume with name: %s", - rvol_name) - self.target.new("replication/peer_volumes", - local_volume=rvol, - name=vol_name, - replication_session=tgt_ssn).save() - tgt_ssn.state = "in_sync" - tgt_ssn.save() - except Exception as ex: - LOG.exception("Replication for the volume %s has " - "failed.", rvol_name) - self._delete_by_ref(self.target, "replication/sessions", - rsession_name, 'session') - self._delete_by_ref(self.client, "replication/sessions", - session_name, 'remote session') - self._delete_by_ref(self.client, "volumes", vol_name, "volume") - self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def _delete_by_ref(self, device, url, name, msg): - rs = device.search(url, name=name) - for result in rs.hits: - result.delete() - LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) - - @kaminario_logger - def _failover_volume(self, volume): - """Promoting a secondary volume to primary volume.""" - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - tgt_ssn = self.target.search("replication/sessions", - name=rsession_name).hits[0] - if tgt_ssn.state == 'in_sync': - tgt_ssn.state = 'failed_over' - tgt_ssn.save() - LOG.debug("The target session: %s state is " - "changed to failed_over ", rsession_name) - - @kaminario_logger - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover to replication target.""" - volume_updates = [] - back_end_ip = None - svc_host = vol_utils.extract_host(self.host, 'backend') - service = objects.Service.get_by_args(context, svc_host, - 'cinder-volume') - - if secondary_id and secondary_id != self.replica.backend_id: - LOG.error("Kaminario driver received failover_host " - "request, But backend is non replicated device") - raise exception.UnableToFailOver(reason=_("Failover requested " - "on non replicated " - "backend.")) - - if (service.active_backend_id and - service.active_backend_id != self.configuration.san_ip): - self.snap_updates = [] - rep_volumes = [] - # update status for non-replicated primary volumes - for v in volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - if v.replication_status != K2_REP_FAILED_OVER and vol.total: - status = 'available' - if v.volume_attachment: - map_rs = self.client.search("mappings", - volume=vol.hits[0]) - status = 'in-use' - if map_rs.total: - map_rs.hits[0].delete() - volume_updates.append({'volume_id': v['id'], - 'updates': - {'status': status}}) - else: - rep_volumes.append(v) - - # In-sync from secondaray array to primary array - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - - if (tgt_ssn.state == 'failed_over' and - tgt_ssn.current_role == 'target' and vol.total and src_ssn): - map_rs = self.client.search("mappings", volume=vol.hits[0]) - if map_rs.total: - map_rs.hits[0].delete() - tgt_ssn.state = 'in_sync' - tgt_ssn.save() - self._check_for_status(src_ssn, 'in_sync') - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - gen_no = self._create_volume_replica_user_snap(self.target, - tgt_ssn) - self.snap_updates.append({'tgt_ssn': tgt_ssn, - 'gno': gen_no, - 'stime': time.time()}) - LOG.debug("The target session: %s state is " - "changed to in sync", rsession_name) - - self._is_user_snap_sync_finished() - - # Delete secondary volume mappings and create snapshot - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - map_rs = self.target.search("mappings", - volume=rvol.hits[0]) - if map_rs.total: - map_rs.hits[0].delete() - gen_no = self._create_volume_replica_user_snap(self.target, - tgt_ssn) - self.snap_updates.append({'tgt_ssn': tgt_ssn, - 'gno': gen_no, - 'stime': time.time()}) - self._is_user_snap_sync_finished() - # changing source sessions to failed-over - for v in rep_volumes: - vol_name = self.get_volume_name(v['id']) - vol = self.client.search("volumes", name=vol_name) - rvol_name = self.get_rep_name(vol_name) - rvol = self.target.search("volumes", name=rvol_name) - session_name = self.get_session_name(v['id']) - rsession_name = self.get_rep_name(session_name) - ssn = self.target.search("replication/sessions", - name=rsession_name) - if ssn.total: - tgt_ssn = ssn.hits[0] - ssn = self.client.search("replication/sessions", - name=session_name) - if ssn.total: - src_ssn = ssn.hits[0] - if (rvol.total and src_ssn.state == 'in_sync' and - src_ssn.current_role == 'target'): - src_ssn.state = 'failed_over' - src_ssn.save() - self._check_for_status(tgt_ssn, 'suspended') - LOG.debug("The target session: %s state is " - "changed to failed over", session_name) - - src_ssn.state = 'in_sync' - src_ssn.save() - LOG.debug("The target session: %s state is " - "changed to in sync", session_name) - rep_status = fields.ReplicationStatus.DISABLED - volume_updates.append({'volume_id': v['id'], - 'updates': - {'replication_status': rep_status}}) - - back_end_ip = self.configuration.san_ip - else: - """Failover to replication target.""" - for v in volumes: - vol_name = self.get_volume_name(v['id']) - rv = self.get_rep_name(vol_name) - if self.target.search("volumes", name=rv).total: - self._failover_volume(v) - volume_updates.append( - {'volume_id': v['id'], - 'updates': - {'replication_status': K2_REP_FAILED_OVER}}) - else: - volume_updates.append({'volume_id': v['id'], - 'updates': {'status': 'error', }}) - back_end_ip = self.replica.backend_id - return back_end_ip, volume_updates, [] - - @kaminario_logger - def _create_volume_replica_user_snap(self, k2, sess): - snap = k2.new("snapshots") - snap.is_application_consistent = "False" - snap.replication_session = sess - snap.save() - return snap.generation_number - - def _is_user_snap_sync_finished(self): - # waiting for user snapshot to be synced - while len(self.snap_updates) > 0: - for l in self.snap_updates: - sess = l.get('tgt_ssn') - gno = l.get('gno') - stime = l.get('stime') - sess.refresh() - if (sess.generation_number == gno and - sess.current_snapshot_progress == 100 - and sess.current_snapshot_id is None): - if time.time() - stime > 300: - gen_no = self._create_volume_replica_user_snap( - self.target, - sess) - self.snap_updates.append({'tgt_ssn': sess, - 'gno': gen_no, - 'stime': time.time()}) - self.snap_updates.remove(l) - eventlet.sleep(1) - - @kaminario_logger - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot. - - - search for snapshot and retention_policy - - create a view from snapshot and attach view - - create a volume and attach volume - - copy data from attached view to attached volume - - detach volume and view and finally delete view - """ - snap_name = self.get_snap_name(snapshot.id) - view_name = self.get_view_name(volume.id) - vol_name = self.get_volume_name(volume.id) - cview = src_attach_info = dest_attach_info = None - rpolicy = self.get_policy() - properties = utils.brick_get_connector_properties() - LOG.debug("Searching for snapshot: %s in K2.", snap_name) - snap_rs = self.client.search("snapshots", short_name=snap_name) - if hasattr(snap_rs, 'hits') and snap_rs.total != 0: - snap = snap_rs.hits[0] - LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s", - {'view': view_name, 'snap': snap_name}) - try: - cview = self.client.new("snapshots", - short_name=view_name, - source=snap, retention_policy=rpolicy, - is_exposable=True).save() - except Exception as ex: - LOG.exception("Creating a view: %(view)s from snapshot: " - "%(snap)s failed", {"view": view_name, - "snap": snap_name}) - raise exception.KaminarioCinderDriverException(reason=ex) - - else: - msg = _("Snapshot: %s search failed in K2.") % snap_name - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - - try: - conn = self.initialize_connection(cview, properties) - src_attach_info = self._connect_device(conn) - self.create_volume(volume) - conn = self.initialize_connection(volume, properties) - dest_attach_info = self._connect_device(conn) - vol_utils.copy_volume(src_attach_info['device']['path'], - dest_attach_info['device']['path'], - snapshot.volume.size * units.Ki, - self.configuration.volume_dd_blocksize, - sparse=True) - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(volume, properties) - self.terminate_connection(cview, properties) - cview.delete() - except Exception as ex: - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(cview, properties) - self.terminate_connection(volume, properties) - cview.delete() - self.delete_volume(volume) - LOG.exception("Copy to volume: %(vol)s from view: %(view)s " - "failed", {"vol": vol_name, "view": view_name}) - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def create_cloned_volume(self, volume, src_vref): - """Create a clone from source volume. - - - attach source volume - - create and attach new volume - - copy data from attached source volume to attached new volume - - detach both volumes - """ - clone_name = self.get_volume_name(volume.id) - src_name = self.get_volume_name(src_vref.id) - src_vol = self.client.search("volumes", name=src_name) - src_map = self.client.search("mappings", volume=src_vol) - src_attach_info = dest_attach_info = None - if src_map.total != 0: - msg = _("K2 driver does not support clone of a attached volume. " - "To get this done, create a snapshot from the attached " - "volume and then create a volume from the snapshot.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - try: - properties = utils.brick_get_connector_properties() - conn = self.initialize_connection(src_vref, properties) - src_attach_info = self._connect_device(conn) - self.create_volume(volume) - conn = self.initialize_connection(volume, properties) - dest_attach_info = self._connect_device(conn) - vol_utils.copy_volume(src_attach_info['device']['path'], - dest_attach_info['device']['path'], - src_vref.size * units.Ki, - self.configuration.volume_dd_blocksize, - sparse=True) - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(volume, properties) - self.terminate_connection(src_vref, properties) - except Exception as ex: - self._kaminario_disconnect_volume(src_attach_info, - dest_attach_info) - self.terminate_connection(src_vref, properties) - self.terminate_connection(volume, properties) - self.delete_volume(volume) - LOG.exception("Create a clone: %s failed.", clone_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def delete_volume(self, volume): - """Volume in K2 exists in a volume group. - - - delete the volume - - delete the corresponding volume group - """ - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - try: - if self._get_is_replica(volume.volume_type) and self.replica: - self._delete_volume_replica(volume, vg_name, vol_name) - - LOG.debug("Searching and deleting volume: %s in K2.", vol_name) - vol_rs = self.client.search("volumes", name=vol_name) - if vol_rs.total != 0: - vol_rs.hits[0].delete() - LOG.debug("Searching and deleting vg: %s in K2.", vg_name) - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total != 0: - vg_rs.hits[0].delete() - except Exception as ex: - LOG.exception("Deletion of volume %s failed.", vol_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def _delete_volume_replica(self, volume, vg_name, vol_name): - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - src_ssn = self.client.search('replication/sessions', - name=session_name).hits[0] - tgt_ssn = self.target.search('replication/sessions', - name=rsession_name).hits[0] - src_ssn.state = 'suspended' - src_ssn.save() - self._check_for_status(tgt_ssn, 'suspended') - src_ssn.state = 'idle' - src_ssn.save() - self._check_for_status(tgt_ssn, 'idle') - tgt_ssn.delete() - src_ssn.delete() - - LOG.debug("Searching and deleting snapshots for volume groups:" - "%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name}) - vg = self.client.search('volume_groups', name=vg_name).hits - rvg = self.target.search('volume_groups', name=rvg_name).hits - snaps = self.client.search('snapshots', volume_group=vg).hits - for s in snaps: - s.delete() - rsnaps = self.target.search('snapshots', volume_group=rvg).hits - for s in rsnaps: - s.delete() - - self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') - self._delete_by_ref(self.target, "volume_groups", - rvg_name, "remote vg") - - @kaminario_logger - def _delete_failover_volume_replica(self, volume, vg_name, vol_name): - rvg_name = self.get_rep_name(vg_name) - rvol_name = self.get_rep_name(vol_name) - session_name = self.get_session_name(volume.id) - rsession_name = self.get_rep_name(session_name) - tgt_ssn = self.target.search('replication/sessions', - name=rsession_name).hits[0] - tgt_ssn.state = 'idle' - tgt_ssn.save() - tgt_ssn.delete() - - LOG.debug("Searching and deleting snapshots for target volume group " - "and target volume: %(vol)s, %(vg)s in K2.", - {'vol': rvol_name, 'vg': rvg_name}) - rvg = self.target.search('volume_groups', name=rvg_name).hits - rsnaps = self.target.search('snapshots', volume_group=rvg).hits - for s in rsnaps: - s.delete() - - def _check_for_status(self, obj, status): - while obj.state != status: - obj.refresh() - eventlet.sleep(1) - - @kaminario_logger - def get_volume_stats(self, refresh=False): - if refresh: - self.update_volume_stats() - stats = self.stats - stats['storage_protocol'] = self._protocol - stats['driver_version'] = self.VERSION - stats['vendor_name'] = self.VENDOR - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = (backend_name or - self.__class__.__name__) - return stats - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - @kaminario_logger - def create_snapshot(self, snapshot): - """Create a snapshot from a volume_group.""" - vg_name = self.get_volume_group_name(snapshot.volume_id) - snap_name = self.get_snap_name(snapshot.id) - rpolicy = self.get_policy() - try: - LOG.debug("Searching volume_group: %s in K2.", vg_name) - vg = self.client.search("volume_groups", name=vg_name).hits[0] - LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s", - {'snap': snap_name, 'vg': vg_name}) - self.client.new("snapshots", short_name=snap_name, - source=vg, retention_policy=rpolicy, - is_auto_deleteable=False).save() - except Exception as ex: - LOG.exception("Creation of snapshot: %s failed.", snap_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - snap_name = self.get_snap_name(snapshot.id) - try: - LOG.debug("Searching and deleting snapshot: %s in K2.", snap_name) - snap_rs = self.client.search("snapshots", short_name=snap_name) - if snap_rs.total != 0: - snap_rs.hits[0].delete() - except Exception as ex: - LOG.exception("Deletion of snapshot: %s failed.", snap_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - @kaminario_logger - def extend_volume(self, volume, new_size): - """Extend volume.""" - vol_name = self.get_volume_name(volume.id) - try: - LOG.debug("Searching volume: %s in K2.", vol_name) - vol = self.client.search("volumes", name=vol_name).hits[0] - vol.size = new_size * units.Mi - LOG.debug("Extending volume: %s in K2.", vol_name) - vol.save() - except Exception as ex: - LOG.exception("Extending volume: %s failed.", vol_name) - raise exception.KaminarioCinderDriverException(reason=ex) - - def update_volume_stats(self): - conf = self.configuration - LOG.debug("Searching system capacity in K2.") - cap = self.client.search("system/capacity").hits[0] - LOG.debug("Searching total volumes in K2 for updating stats.") - total_volumes = self.client.search("volumes").total - 1 - provisioned_vol = cap.provisioned_volumes - if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned - and (cap.total - cap.free) != 0): - ratio = provisioned_vol / float(cap.total - cap.free) - else: - ratio = conf.max_over_subscription_ratio - self.stats = {'QoS_support': False, - 'free_capacity_gb': cap.free / units.Mi, - 'total_capacity_gb': cap.total / units.Mi, - 'thin_provisioning_support': True, - 'sparse_copy_volume': True, - 'total_volumes': total_volumes, - 'thick_provisioning_support': False, - 'provisioned_capacity_gb': provisioned_vol / units.Mi, - 'max_oversubscription_ratio': ratio, - 'kaminario:thin_prov_type': 'dedup/nodedup', - 'replication_enabled': True, - 'kaminario:replication': True} - - def get_initiator_host_name(self, connector): - """Return the initiator host name. - - Valid characters: 0-9, a-z, A-Z, '-', '_' - All other characters are replaced with '_'. - Total characters in initiator host name: 32 - """ - return re.sub('[^0-9a-zA-Z-_]', '_', connector.get('host', ''))[:32] - - def get_volume_group_name(self, vid): - """Return the volume group name.""" - return "cvg-{0}".format(vid) - - def get_volume_name(self, vid): - """Return the volume name.""" - return "cv-{0}".format(vid) - - def get_session_name(self, vid): - """Return the volume name.""" - return "ssn-{0}".format(vid) - - def get_snap_name(self, sid): - """Return the snapshot name.""" - return "cs-{0}".format(sid) - - def get_view_name(self, vid): - """Return the view name.""" - return "cview-{0}".format(vid) - - def get_rep_name(self, name): - """Return the corresponding replication names.""" - return "r{0}".format(name) - - @kaminario_logger - def _delete_host_by_name(self, name): - """Deleting host by name.""" - host_rs = self.client.search("hosts", name=name) - if hasattr(host_rs, "hits") and host_rs.total != 0: - host = host_rs.hits[0] - host.delete() - - def get_policy(self): - """Return the retention policy.""" - try: - LOG.debug("Searching for retention_policy in K2.") - return self.client.search("retention_policies", - name="Best_Effort_Retention").hits[0] - except Exception as ex: - LOG.exception("Retention policy search failed in K2.") - raise exception.KaminarioCinderDriverException(reason=ex) - - def _get_volume_object(self, volume): - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - vol_name = self.get_rep_name(vol_name) - LOG.debug("Searching volume : %s in K2.", vol_name) - vol_rs = self.client.search("volumes", name=vol_name) - if not hasattr(vol_rs, 'hits') or vol_rs.total == 0: - msg = _("Unable to find volume: %s from K2.") % vol_name - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return vol_rs.hits[0] - - def _get_lun_number(self, vol, host): - volsnap = None - LOG.debug("Searching volsnaps in K2.") - volsnap_rs = self.client.search("volsnaps", snapshot=vol) - if hasattr(volsnap_rs, 'hits') and volsnap_rs.total != 0: - volsnap = volsnap_rs.hits[0] - - LOG.debug("Searching mapping of volsnap in K2.") - map_rs = self.client.search("mappings", volume=volsnap, host=host) - return map_rs.hits[0].lun - - def initialize_connection(self, volume, connector): - pass - - @kaminario_logger - def terminate_connection(self, volume, connector): - """Terminate connection of volume from host.""" - # Get volume object - if type(volume).__name__ != 'RestObject': - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - vol_name = self.get_rep_name(vol_name) - LOG.debug("Searching volume: %s in K2.", vol_name) - volume_rs = self.client.search("volumes", name=vol_name) - if hasattr(volume_rs, "hits") and volume_rs.total != 0: - volume = volume_rs.hits[0] - else: - vol_name = volume.name - - # Get host object. - host_name = self.get_initiator_host_name(connector) - host_rs = self.client.search("hosts", name=host_name) - if hasattr(host_rs, "hits") and host_rs.total != 0 and volume: - host = host_rs.hits[0] - LOG.debug("Searching and deleting mapping of volume: %(name)s to " - "host: %(host)s", {'host': host_name, 'name': vol_name}) - map_rs = self.client.search("mappings", volume=volume, host=host) - if hasattr(map_rs, "hits") and map_rs.total != 0: - map_rs.hits[0].delete() - if self.client.search("mappings", host=host).total == 0: - LOG.debug("Deleting initiator hostname: %s in K2.", host_name) - host.delete() - else: - LOG.warning("Host: %s not found on K2.", host_name) - - @kaminario_logger - def k2_initialize_connection(self, volume, connector): - # Get volume object. - if type(volume).__name__ != 'RestObject': - vol = self._get_volume_object(volume) - else: - vol = volume - # Get host object. - host, host_rs, host_name = self._get_host_object(connector) - try: - # Map volume object to host object. - LOG.debug("Mapping volume: %(vol)s to host: %(host)s", - {'host': host_name, 'vol': vol.name}) - mapping = self.client.new("mappings", volume=vol, host=host).save() - except Exception as ex: - if host_rs.total == 0: - self._delete_host_by_name(host_name) - LOG.exception("Unable to map volume: %(vol)s to host: " - "%(host)s", {'host': host_name, - 'vol': vol.name}) - raise exception.KaminarioCinderDriverException(reason=ex) - # Get lun number. - if type(volume).__name__ == 'RestObject': - return self._get_lun_number(vol, host) - else: - return mapping.lun - - def _get_host_object(self, connector): - pass - - def _get_is_dedup(self, vol_type): - if vol_type: - specs_val = vol_type.get('extra_specs', {}).get( - 'kaminario:thin_prov_type') - if specs_val == 'nodedup': - return False - else: - return True - else: - return True - - def _get_is_replica(self, vol_type): - replica = False - if vol_type and vol_type.get('extra_specs'): - specs = vol_type.get('extra_specs') - if (specs.get('kaminario:replication') == 'enabled' and - self.replica): - replica = True - return replica - - def _get_replica_status(self, vg_name): - vg_rs = self.client.search("volume_groups", name=vg_name) - if vg_rs.total: - vg = vg_rs.hits[0] - if self.client.search("replication/sessions", - local_volume_group=vg).total: - return True - return False - - @kaminario_logger - def manage_existing(self, volume, existing_ref): - vol_name = existing_ref['source-name'] - new_name = self.get_volume_name(volume.id) - vg_new_name = self.get_volume_group_name(volume.id) - vg_name = None - is_dedup = self._get_is_dedup(volume.get('volume_type')) - reason = None - try: - LOG.debug("Searching volume: %s in K2.", vol_name) - vol = self.client.search("volumes", name=vol_name).hits[0] - vg = vol.volume_group - nvol = self.client.search("volumes", volume_group=vg).total - vg_replica = self._get_replica_status(vg.name) - vol_map = False - if self.client.search("mappings", volume=vol).total != 0: - vol_map = True - if is_dedup != vg.is_dedup: - reason = 'dedup type mismatch for K2 volume group.' - elif vg_replica: - reason = 'replication enabled K2 volume group.' - elif vol_map: - reason = 'attached K2 volume.' - elif nvol != 1: - reason = 'multiple volumes in K2 volume group.' - if reason: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_('Unable to manage K2 volume due to: %s') % reason) - vol.name = new_name - vg_name = vg.name - LOG.debug("Manage new volume name: %s", new_name) - vg.name = vg_new_name - LOG.debug("Manage volume group name: %s", vg_new_name) - vg.save() - LOG.debug("Manage volume: %s in K2.", vol_name) - vol.save() - except exception.ManageExistingInvalidReference: - LOG.exception("manage volume: %s failed.", vol_name) - raise - except Exception: - LOG.exception("manage volume: %s failed.", vol_name) - vg_rs = self.client.search("volume_groups", name=vg_new_name) - if hasattr(vg_rs, 'hits') and vg_rs.total != 0: - vg = vg_rs.hits[0] - if vg_name and vg.name == vg_new_name: - vg.name = vg_name - LOG.debug("Updating vg new name to old name: %s ", vg_name) - vg.save() - raise - - @kaminario_logger - def manage_existing_get_size(self, volume, existing_ref): - vol_name = existing_ref['source-name'] - v_rs = self.client.search("volumes", name=vol_name) - if hasattr(v_rs, 'hits') and v_rs.total != 0: - vol = v_rs.hits[0] - size = vol.size / units.Mi - return math.ceil(size) - else: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_('Unable to get size of manage volume.')) - - @kaminario_logger - def after_volume_copy(self, ctxt, volume, new_volume, remote=None): - self.delete_volume(volume) - vg_name_old = self.get_volume_group_name(volume.id) - vol_name_old = self.get_volume_name(volume.id) - vg_name_new = self.get_volume_group_name(new_volume.id) - vol_name_new = self.get_volume_name(new_volume.id) - vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0] - vg_new.name = vg_name_old - vg_new.save() - vol_new = self.client.search("volumes", name=vol_name_new).hits[0] - vol_new.name = vol_name_old - vol_new.save() - - @kaminario_logger - def retype(self, ctxt, volume, new_type, diff, host): - old_type = volume.get('volume_type') - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - vol_rs = self.client.search("volumes", name=vol_name) - if vol_rs.total: - vol = vol_rs.hits[0] - vmap = self.client.search("mappings", volume=vol).total - old_rep_type = self._get_replica_status(vg_name) - new_rep_type = self._get_is_replica(new_type) - new_prov_type = self._get_is_dedup(new_type) - old_prov_type = self._get_is_dedup(old_type) - # Change dedup<->nodedup with add/remove replication is complex in K2 - # since K2 does not have api to change dedup<->nodedup. - if new_prov_type == old_prov_type: - if not old_rep_type and new_rep_type: - self._add_replication(volume) - return True - elif old_rep_type and not new_rep_type: - self._delete_replication(volume) - return True - elif not new_rep_type and not old_rep_type: - msg = ("Use '--migration-policy on-demand' to change 'dedup " - "without replication'<->'nodedup without replication'.") - if vol_rs.total and vmap: - msg = "Unattach volume and {0}".format(msg) - LOG.debug(msg) - return False - else: - LOG.error('Change from type1: %(type1)s to type2: %(type2)s ' - 'is not supported directly in K2.', - {'type1': old_type, 'type2': new_type}) - return False - - def _add_replication(self, volume): - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - self._create_failover_volume_replica(volume, vg_name, vol_name) - else: - LOG.debug("Searching volume group with name: %(name)s", - {'name': vg_name}) - vg = self.client.search("volume_groups", name=vg_name).hits[0] - LOG.debug("Searching volume with name: %(name)s", - {'name': vol_name}) - vol = self.client.search("volumes", name=vol_name).hits[0] - self._create_volume_replica(volume, vg, vol, self.replica.rpo) - - def _delete_replication(self, volume): - vg_name = self.get_volume_group_name(volume.id) - vol_name = self.get_volume_name(volume.id) - if volume.replication_status == K2_REP_FAILED_OVER: - self._delete_failover_volume_replica(volume, vg_name, vol_name) - else: - self._delete_volume_replica(volume, vg_name, vol_name) - - def _kaminario_disconnect_volume(self, *attach_info): - for info in attach_info: - if (info and info.get('connector') and - info.get('conn', {}).get('data') and info.get('device')): - info['connector'].disconnect_volume(info['conn']['data'], - info['device']) diff --git a/cinder/volume/drivers/kaminario/kaminario_fc.py b/cinder/volume/drivers/kaminario/kaminario_fc.py deleted file mode 100644 index 2f9eef5b5..000000000 --- a/cinder/volume/drivers/kaminario/kaminario_fc.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" -from oslo_log import log as logging - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder.volume.drivers.kaminario import kaminario_common as common -from cinder.zonemanager import utils as fczm_utils - -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) -kaminario_logger = common.kaminario_logger - - -class KaminarioFCDriver(common.KaminarioCinderDriver): - """Kaminario K2 FC Volume Driver. - - Version history: - 1.0 - Initial driver - 1.1 - Added manage/unmanage and extra-specs support for nodedup - 1.2 - Added replication support - 1.3 - Added retype support - 1.4 - Added replication failback support - """ - - VERSION = '1.4' - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "Kaminario_K2_CI" - - @kaminario_logger - def __init__(self, *args, **kwargs): - super(KaminarioFCDriver, self).__init__(*args, **kwargs) - self._protocol = 'FC' - self.lookup_service = fczm_utils.create_lookup_service() - - @fczm_utils.add_fc_zone - @kaminario_logger - @coordination.synchronized('{self.k2_lock_name}') - def initialize_connection(self, volume, connector): - """Attach K2 volume to host.""" - # Check wwpns in host connector. - if not connector.get('wwpns'): - msg = _("No wwpns found in host connector.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - # Get target wwpns. - target_wwpns = self.get_target_info(volume) - # Map volume. - lun = self.k2_initialize_connection(volume, connector) - # Create initiator-target mapping. - target_wwpns, init_target_map = self._build_initiator_target_map( - connector, target_wwpns) - # To support replication failback - if temp_client: - self.client = temp_client - # Return target volume information. - return {'driver_volume_type': 'fibre_channel', - 'data': {"target_discovered": True, - "target_lun": lun, - "target_wwn": target_wwpns, - "initiator_target_map": init_target_map}} - - @fczm_utils.remove_fc_zone - @kaminario_logger - @coordination.synchronized('{self.k2_lock_name}') - def terminate_connection(self, volume, connector, **kwargs): - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - super(KaminarioFCDriver, self).terminate_connection(volume, connector) - properties = {"driver_volume_type": "fibre_channel", "data": {}} - host_name = self.get_initiator_host_name(connector) - host_rs = self.client.search("hosts", name=host_name) - # In terminate_connection, host_entry is deleted if host - # is not attached to any volume - if host_rs.total == 0: - # Get target wwpns. - target_wwpns = self.get_target_info(volume) - target_wwpns, init_target_map = self._build_initiator_target_map( - connector, target_wwpns) - properties["data"] = {"target_wwn": target_wwpns, - "initiator_target_map": init_target_map} - # To support replication failback - if temp_client: - self.client = temp_client - return properties - - def get_target_info(self, volume): - LOG.debug("Searching target wwpns in K2.") - fc_ports_rs = self.client.search("system/fc_ports") - target_wwpns = [] - if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0: - for port in fc_ports_rs.hits: - if port.pwwn: - target_wwpns.append((port.pwwn).replace(':', '')) - if not target_wwpns: - msg = _("Unable to get FC target wwpns from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return target_wwpns - - @kaminario_logger - def _get_host_object(self, connector): - host_name = self.get_initiator_host_name(connector) - LOG.debug("Searching initiator hostname: %s in K2.", host_name) - host_rs = self.client.search("hosts", name=host_name) - host_wwpns = connector['wwpns'] - if host_rs.total == 0: - try: - LOG.debug("Creating initiator hostname: %s in K2.", host_name) - host = self.client.new("hosts", name=host_name, - type="Linux").save() - except Exception as ex: - LOG.exception("Unable to create host : %s in K2.", - host_name) - raise exception.KaminarioCinderDriverException(reason=ex) - else: - # Use existing host. - LOG.debug("Use existing initiator hostname: %s in K2.", host_name) - host = host_rs.hits[0] - # Adding host wwpn. - for wwpn in host_wwpns: - wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)]) - if self.client.search("host_fc_ports", pwwn=wwpn, - host=host).total == 0: - LOG.debug("Adding wwpn: %(wwpn)s to host: " - "%(host)s in K2.", {'wwpn': wwpn, - 'host': host_name}) - try: - self.client.new("host_fc_ports", pwwn=wwpn, - host=host).save() - except Exception as ex: - if host_rs.total == 0: - self._delete_host_by_name(host_name) - LOG.exception("Unable to add wwpn : %(wwpn)s to " - "host: %(host)s in K2.", - {'wwpn': wwpn, 'host': host_name}) - raise exception.KaminarioCinderDriverException(reason=ex) - return host, host_rs, host_name - - def _build_initiator_target_map(self, connector, all_target_wwns): - """Build the target_wwns and the initiator target map.""" - target_wwns = [] - init_targ_map = {} - - if self.lookup_service is not None: - # use FC san lookup. - dev_map = self.lookup_service.get_device_mapping_from_network( - connector.get('wwpns'), - all_target_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - target_wwns = list(set(target_wwns)) - else: - initiator_wwns = connector.get('wwpns', []) - target_wwns = all_target_wwns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map diff --git a/cinder/volume/drivers/kaminario/kaminario_iscsi.py b/cinder/volume/drivers/kaminario/kaminario_iscsi.py deleted file mode 100644 index 03165fe7a..000000000 --- a/cinder/volume/drivers/kaminario/kaminario_iscsi.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2016 by Kaminario Technologies, Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Volume driver for Kaminario K2 all-flash arrays.""" -from oslo_log import log as logging - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder.volume.drivers.kaminario import kaminario_common as common - -ISCSI_TCP_PORT = "3260" -K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER -LOG = logging.getLogger(__name__) -kaminario_logger = common.kaminario_logger - - -@interface.volumedriver -class KaminarioISCSIDriver(common.KaminarioCinderDriver): - """Kaminario K2 iSCSI Volume Driver. - - Version history: - 1.0 - Initial driver - 1.1 - Added manage/unmanage and extra-specs support for nodedup - 1.2 - Added replication support - 1.3 - Added retype support - 1.4 - Added replication failback support - """ - - VERSION = '1.4' - - # ThirdPartySystems wiki page name - CI_WIKI_NAME = "Kaminario_K2_CI" - - @kaminario_logger - def __init__(self, *args, **kwargs): - super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) - self._protocol = 'iSCSI' - - @kaminario_logger - @coordination.synchronized('{self.k2_lock_name}') - def initialize_connection(self, volume, connector): - """Attach K2 volume to host.""" - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - # Get target_portal and target iqn. - iscsi_portal, target_iqn = self.get_target_info(volume) - # Map volume. - lun = self.k2_initialize_connection(volume, connector) - # To support replication failback - if temp_client: - self.client = temp_client - # Return target volume information. - return {"driver_volume_type": "iscsi", - "data": {"target_iqn": target_iqn, - "target_portal": iscsi_portal, - "target_lun": lun, - "target_discovered": True}} - - @kaminario_logger - @coordination.synchronized('{self.k2_lock_name}') - def terminate_connection(self, volume, connector, **kwargs): - # To support replication failback - temp_client = None - if (hasattr(volume, 'replication_status') and - volume.replication_status == K2_REP_FAILED_OVER): - temp_client = self.client - self.client = self.target - super(KaminarioISCSIDriver, self).terminate_connection(volume, - connector) - # To support replication failback - if temp_client: - self.client = temp_client - - def get_target_info(self, volume): - LOG.debug("Searching first iscsi port ip without wan in K2.") - iscsi_ip_rs = self.client.search("system/net_ips") - iscsi_ip = target_iqn = None - if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0: - for ip in iscsi_ip_rs.hits: - if not ip.wan_port: - iscsi_ip = ip.ip_address - break - if not iscsi_ip: - msg = _("Unable to get ISCSI IP address from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT) - LOG.debug("Searching system state for target iqn in K2.") - sys_state_rs = self.client.search("system/state") - - if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0: - target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name - - if not target_iqn: - msg = _("Unable to get target iqn from K2.") - LOG.error(msg) - raise exception.KaminarioCinderDriverException(reason=msg) - return iscsi_portal, target_iqn - - @kaminario_logger - def _get_host_object(self, connector): - host_name = self.get_initiator_host_name(connector) - LOG.debug("Searching initiator hostname: %s in K2.", host_name) - host_rs = self.client.search("hosts", name=host_name) - """Create a host if not exists.""" - if host_rs.total == 0: - try: - LOG.debug("Creating initiator hostname: %s in K2.", host_name) - host = self.client.new("hosts", name=host_name, - type="Linux").save() - LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.", - {'iqn': connector['initiator'], 'host': host_name}) - iqn = self.client.new("host_iqns", iqn=connector['initiator'], - host=host) - iqn.save() - except Exception as ex: - self._delete_host_by_name(host_name) - LOG.exception("Unable to create host: %s in K2.", - host_name) - raise exception.KaminarioCinderDriverException(reason=ex) - else: - LOG.debug("Use existing initiator hostname: %s in K2.", host_name) - host = host_rs.hits[0] - return host, host_rs, host_name diff --git a/cinder/volume/drivers/lenovo/__init__.py b/cinder/volume/drivers/lenovo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/lenovo/lenovo_client.py b/cinder/volume/drivers/lenovo/lenovo_client.py deleted file mode 100644 index 07e807cdb..000000000 --- a/cinder/volume/drivers/lenovo/lenovo_client.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 DotHill Systems -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder.volume.drivers.dothill import dothill_client - - -class LenovoClient(dothill_client.DotHillClient): - - def __init__(self, host, login, password, protocol, ssl_verify): - super(LenovoClient, self).__init__(host, login, password, protocol, - ssl_verify) diff --git a/cinder/volume/drivers/lenovo/lenovo_common.py b/cinder/volume/drivers/lenovo/lenovo_common.py deleted file mode 100644 index 7e92296ab..000000000 --- a/cinder/volume/drivers/lenovo/lenovo_common.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 DotHill Systems -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg - -from cinder.volume import configuration -from cinder.volume.drivers.dothill import dothill_common -from cinder.volume.drivers.lenovo import lenovo_client - -common_opts = [ - cfg.StrOpt('lenovo_backend_name', - default='A', - help="Pool or Vdisk name to use for volume creation."), - cfg.StrOpt('lenovo_backend_type', - choices=['linear', 'virtual'], - default='virtual', - help="linear (for VDisk) or virtual (for Pool)."), - cfg.StrOpt('lenovo_api_protocol', - choices=['http', 'https'], - default='https', - help="Lenovo api interface protocol."), - cfg.BoolOpt('lenovo_verify_certificate', - default=False, - help="Whether to verify Lenovo array SSL certificate."), - cfg.StrOpt('lenovo_verify_certificate_path', - help="Lenovo array SSL certificate path.") -] - -iscsi_opts = [ - cfg.ListOpt('lenovo_iscsi_ips', - default=[], - help="List of comma-separated target iSCSI IP addresses."), -] - -CONF = cfg.CONF -CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) - - -class LenovoCommon(dothill_common.DotHillCommon): - VERSION = "1.6" - - def __init__(self, config): - self.config = config - self.vendor_name = "Lenovo" - self.backend_name = self.config.lenovo_backend_name - self.backend_type = self.config.lenovo_backend_type - self.api_protocol = self.config.lenovo_api_protocol - ssl_verify = False - if (self.api_protocol == 'https' and - self.config.lenovo_verify_certificate): - ssl_verify = self.config.lenovo_verify_certificate_path or True - self.client = lenovo_client.LenovoClient(self.config.san_ip, - self.config.san_login, - self.config.san_password, - self.api_protocol, - ssl_verify) diff --git a/cinder/volume/drivers/lenovo/lenovo_fc.py b/cinder/volume/drivers/lenovo/lenovo_fc.py deleted file mode 100644 index 448508e71..000000000 --- a/cinder/volume/drivers/lenovo/lenovo_fc.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import interface -from cinder.volume.drivers.dothill import dothill_fc -from cinder.volume.drivers.lenovo import lenovo_common - - -@interface.volumedriver -class LenovoFCDriver(dothill_fc.DotHillFCDriver): - """OpenStack Fibre Channel cinder drivers for Lenovo Storage arrays. - - Version history: - 1.0 - Inheriting from DotHill cinder drivers. - 1.6 - Add management path redundancy and reduce load placed - on management controller. - """ - - VERSION = "1.6" - - SUPPORTED = True - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Lenovo_Storage_CI" - - def __init__(self, *args, **kwargs): - super(LenovoFCDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(lenovo_common.common_opts) - - def _init_common(self): - return lenovo_common.LenovoCommon(self.configuration) diff --git a/cinder/volume/drivers/lenovo/lenovo_iscsi.py b/cinder/volume/drivers/lenovo/lenovo_iscsi.py deleted file mode 100644 index de292854c..000000000 --- a/cinder/volume/drivers/lenovo/lenovo_iscsi.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import interface -from cinder.volume.drivers.dothill import dothill_iscsi -from cinder.volume.drivers.lenovo import lenovo_common - - -@interface.volumedriver -class LenovoISCSIDriver(dothill_iscsi.DotHillISCSIDriver): - """OpenStack iSCSI cinder drivers for Lenovo Storage arrays. - - Version history: - 1.0 - Inheriting from DotHill cinder drivers. - 1.6 - Add management path redundancy and reduce load placed - on management controller. - """ - - VERSION = "1.6" - - SUPPORTED = True - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Lenovo_Storage_CI" - - def __init__(self, *args, **kwargs): - super(LenovoISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(lenovo_common.common_opts) - self.configuration.append_config_values(lenovo_common.iscsi_opts) - self.iscsi_ips = self.configuration.lenovo_iscsi_ips - - def _init_common(self): - return lenovo_common.LenovoCommon(self.configuration) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py deleted file mode 100644 index 21002c8cb..000000000 --- a/cinder/volume/drivers/lvm.py +++ /dev/null @@ -1,844 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Driver for Linux servers running LVM. - -""" - -import math -import os -import socket - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import units -import six - -from cinder.brick.local_dev import lvm as lvm -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils as volutils - -LOG = logging.getLogger(__name__) - -# FIXME(jdg): We'll put the lvm_ prefix back on these when we -# move over to using this as the real LVM driver, for now we'll -# rename them so that the config generation utility doesn't barf -# on duplicate entries. -volume_opts = [ - cfg.StrOpt('volume_group', - default='cinder-volumes', - help='Name for the VG that will contain exported volumes'), - cfg.IntOpt('lvm_mirrors', - default=0, - help='If >0, create LVs with multiple mirrors. Note that ' - 'this requires lvm_mirrors + 2 PVs with available space'), - cfg.StrOpt('lvm_type', - default='auto', - choices=['default', 'thin', 'auto'], - help='Type of LVM volumes to deploy; (default, thin, or auto). ' - 'Auto defaults to thin if thin is supported.'), - cfg.StrOpt('lvm_conf_file', - default='/etc/cinder/lvm.conf', - help='LVM conf file to use for the LVM driver in Cinder; ' - 'this setting is ignored if the specified file does ' - 'not exist (You can also specify \'None\' to not use ' - 'a conf file even if one exists).'), - cfg.FloatOpt('lvm_max_over_subscription_ratio', - # This option exists to provide a default value for the - # LVM driver which is different than the global default. - default=1.0, - help='max_over_subscription_ratio setting for the LVM ' - 'driver. This takes precedence over the general ' - 'max_over_subscription_ratio by default. If set ' - 'to None, the general max_over_subscription_ratio ' - 'is used.'), - cfg.BoolOpt('lvm_suppress_fd_warnings', - default=False, - help='Suppress leaked file descriptor warnings in LVM ' - 'commands.') -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class LVMVolumeDriver(driver.VolumeDriver): - """Executes commands relating to Volumes.""" - - VERSION = '3.0.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - - def __init__(self, vg_obj=None, *args, **kwargs): - # Parent sets db, host, _execute and base config - super(LVMVolumeDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(volume_opts) - self.hostname = socket.gethostname() - self.vg = vg_obj - self.backend_name =\ - self.configuration.safe_get('volume_backend_name') or 'LVM' - - # Target Driver is what handles data-transport - # Transport specific code should NOT be in - # the driver (control path), this way - # different target drivers can be added (iscsi, FC etc) - target_driver = \ - self.target_mapping[self.configuration.safe_get('iscsi_helper')] - - LOG.debug('Attempting to initialize LVM driver with the ' - 'following target_driver: %s', - target_driver) - - self.target_driver = importutils.import_object( - target_driver, - configuration=self.configuration, - db=self.db, - executor=self._execute) - self.protocol = self.target_driver.protocol - self._sparse_copy_volume = False - - if self.configuration.lvm_max_over_subscription_ratio is not None: - self.configuration.max_over_subscription_ratio = \ - self.configuration.lvm_max_over_subscription_ratio - - def _sizestr(self, size_in_g): - return '%sg' % size_in_g - - def _volume_not_present(self, volume_name): - return self.vg.get_volume(volume_name) is None - - def _delete_volume(self, volume, is_snapshot=False): - """Deletes a logical volume.""" - if self.configuration.volume_clear != 'none' and \ - self.configuration.lvm_type != 'thin': - self._clear_volume(volume, is_snapshot) - - name = volume['name'] - if is_snapshot: - name = self._escape_snapshot(volume['name']) - self.vg.delete(name) - - def _clear_volume(self, volume, is_snapshot=False): - # zero out old volumes to prevent data leaking between users - # TODO(ja): reclaiming space should be done lazy and low priority - if is_snapshot: - # if the volume to be cleared is a snapshot of another volume - # we need to clear out the volume using the -cow instead of the - # directly volume path. We need to skip this if we are using - # thin provisioned LVs. - # bug# lp1191812 - dev_path = self.local_path(volume) + "-cow" - else: - dev_path = self.local_path(volume) - - # TODO(jdg): Maybe we could optimize this for snaps by looking at - # the cow table and only overwriting what's necessary? - # for now we're still skipping on snaps due to hang issue - if not os.path.exists(dev_path): - msg = (_('Volume device file path %s does not exist.') - % dev_path) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - size_in_g = (volume.get('volume_size') if is_snapshot - else volume.get('size')) - if size_in_g is None: - msg = (_("Size for volume: %s not found, cannot secure delete.") - % volume['id']) - LOG.error(msg) - raise exception.InvalidParameterValue(msg) - - # clear_volume expects sizes in MiB, we store integer GiB - # be sure to convert before passing in - vol_sz_in_meg = size_in_g * units.Ki - - volutils.clear_volume( - vol_sz_in_meg, dev_path, - volume_clear=self.configuration.volume_clear, - volume_clear_size=self.configuration.volume_clear_size) - - def _escape_snapshot(self, snapshot_name): - # Linux LVM reserves name that starts with snapshot, so that - # such volume name can't be created. Mangle it. - if not snapshot_name.startswith('snapshot'): - return snapshot_name - return '_' + snapshot_name - - def _unescape_snapshot(self, snapshot_name): - # Undo snapshot name change done by _escape_snapshot() - if not snapshot_name.startswith('_snapshot'): - return snapshot_name - return snapshot_name[1:] - - def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): - vg_ref = self.vg - if vg is not None: - vg_ref = vg - - vg_ref.create_volume(name, size, lvm_type, mirror_count) - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats") - if self.vg is None: - LOG.warning('Unable to update stats on non-initialized ' - 'Volume Group: %s', - self.configuration.volume_group) - return - - self.vg.update_volume_group_info() - data = {} - - # Note(zhiteng): These information are driver/backend specific, - # each driver may define these values in its own config options - # or fetch from driver specific configuration file. - data["volume_backend_name"] = self.backend_name - data["vendor_name"] = 'Open Source' - data["driver_version"] = self.VERSION - data["storage_protocol"] = self.protocol - data["pools"] = [] - - total_capacity = 0 - free_capacity = 0 - - if self.configuration.lvm_mirrors > 0: - total_capacity =\ - self.vg.vg_mirror_size(self.configuration.lvm_mirrors) - free_capacity =\ - self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) - provisioned_capacity = round( - float(total_capacity) - float(free_capacity), 2) - elif self.configuration.lvm_type == 'thin': - total_capacity = self.vg.vg_thin_pool_size - free_capacity = self.vg.vg_thin_pool_free_space - provisioned_capacity = self.vg.vg_provisioned_capacity - else: - total_capacity = self.vg.vg_size - free_capacity = self.vg.vg_free_space - provisioned_capacity = round( - float(total_capacity) - float(free_capacity), 2) - - location_info = \ - ('LVMVolumeDriver:%(hostname)s:%(vg)s' - ':%(lvm_type)s:%(lvm_mirrors)s' % - {'hostname': self.hostname, - 'vg': self.configuration.volume_group, - 'lvm_type': self.configuration.lvm_type, - 'lvm_mirrors': self.configuration.lvm_mirrors}) - - thin_enabled = self.configuration.lvm_type == 'thin' - - # Calculate the total volumes used by the VG group. - # This includes volumes and snapshots. - total_volumes = len(self.vg.get_volumes()) - - # Skip enabled_pools setting, treat the whole backend as one pool - # XXX FIXME if multipool support is added to LVM driver. - single_pool = {} - single_pool.update(dict( - pool_name=data["volume_backend_name"], - total_capacity_gb=total_capacity, - free_capacity_gb=free_capacity, - reserved_percentage=self.configuration.reserved_percentage, - location_info=location_info, - QoS_support=False, - provisioned_capacity_gb=provisioned_capacity, - max_over_subscription_ratio=( - self.configuration.max_over_subscription_ratio), - thin_provisioning_support=thin_enabled, - thick_provisioning_support=not thin_enabled, - total_volumes=total_volumes, - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function(), - multiattach=False - )) - data["pools"].append(single_pool) - - # Check availability of sparse volume copy. - data['sparse_copy_volume'] = self._sparse_copy_volume - - self._stats = data - - def check_for_setup_error(self): - """Verify that requirements are in place to use LVM driver.""" - if self.vg is None: - root_helper = utils.get_root_helper() - - lvm_conf_file = self.configuration.lvm_conf_file - if lvm_conf_file.lower() == 'none': - lvm_conf_file = None - - try: - lvm_type = self.configuration.lvm_type - if lvm_type == 'auto': - if volutils.supports_thin_provisioning(): - lvm_type = 'thin' - else: - lvm_type = 'default' - self.vg = lvm.LVM( - self.configuration.volume_group, - root_helper, - lvm_type=lvm_type, - executor=self._execute, - lvm_conf=lvm_conf_file, - suppress_fd_warn=( - self.configuration.lvm_suppress_fd_warnings)) - - except exception.VolumeGroupNotFound: - message = (_("Volume Group %s does not exist") % - self.configuration.volume_group) - raise exception.VolumeBackendAPIException(data=message) - - vg_list = volutils.get_all_volume_groups( - self.configuration.volume_group) - vg_dict = \ - next(vg for vg in vg_list if vg['name'] == self.vg.vg_name) - if vg_dict is None: - message = (_("Volume Group %s does not exist") % - self.configuration.volume_group) - raise exception.VolumeBackendAPIException(data=message) - - pool_name = "%s-pool" % self.configuration.volume_group - - if self.configuration.lvm_type == 'auto': - # Default to thin provisioning if it is supported and - # the volume group is empty, or contains a thin pool - # for us to use. - self.vg.update_volume_group_info() - - self.configuration.lvm_type = 'default' - - if volutils.supports_thin_provisioning(): - if self.vg.get_volume(pool_name) is not None: - LOG.info('Enabling LVM thin provisioning by default ' - 'because a thin pool exists.') - self.configuration.lvm_type = 'thin' - elif len(self.vg.get_volumes()) == 0: - LOG.info('Enabling LVM thin provisioning by default ' - 'because no LVs exist.') - self.configuration.lvm_type = 'thin' - - if self.configuration.lvm_type == 'thin': - # Specific checks for using Thin provisioned LV's - if not volutils.supports_thin_provisioning(): - message = _("Thin provisioning not supported " - "on this version of LVM.") - raise exception.VolumeBackendAPIException(data=message) - - if self.vg.get_volume(pool_name) is None: - try: - self.vg.create_thin_pool(pool_name) - except processutils.ProcessExecutionError as exc: - exception_message = (_("Failed to create thin pool, " - "error message was: %s") - % six.text_type(exc.stderr)) - raise exception.VolumeBackendAPIException( - data=exception_message) - - # Enable sparse copy since lvm_type is 'thin' - self._sparse_copy_volume = True - - def create_volume(self, volume): - """Creates a logical volume.""" - mirror_count = 0 - if self.configuration.lvm_mirrors: - mirror_count = self.configuration.lvm_mirrors - - self._create_volume(volume['name'], - self._sizestr(volume['size']), - self.configuration.lvm_type, - mirror_count) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update from LVM for migrated volume. - - This method should rename the back-end volume name(id) on the - destination host back to its original name(id) on the source host. - - :param ctxt: The context used to run the method update_migrated_volume - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - name_id = None - provider_location = None - if original_volume_status == 'available': - current_name = CONF.volume_name_template % new_volume['id'] - original_volume_name = CONF.volume_name_template % volume['id'] - try: - self.vg.rename_volume(current_name, original_volume_name) - except processutils.ProcessExecutionError: - LOG.error('Unable to rename the logical volume ' - 'for volume: %s', volume['id']) - # If the rename fails, _name_id should be set to the new - # volume id and provider_location should be set to the - # one from the new volume as well. - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - else: - # The back-end will not be renamed. - name_id = new_volume['_name_id'] or new_volume['id'] - provider_location = new_volume['provider_location'] - return {'_name_id': name_id, 'provider_location': provider_location} - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self._create_volume(volume['name'], - self._sizestr(volume['size']), - self.configuration.lvm_type, - self.configuration.lvm_mirrors) - - # Some configurations of LVM do not automatically activate - # ThinLVM snapshot LVs. - self.vg.activate_lv(snapshot['name'], is_snapshot=True) - - # copy_volume expects sizes in MiB, we store integer GiB - # be sure to convert before passing in - volutils.copy_volume(self.local_path(snapshot), - self.local_path(volume), - snapshot['volume_size'] * units.Ki, - self.configuration.volume_dd_blocksize, - execute=self._execute, - sparse=self._sparse_copy_volume) - - def delete_volume(self, volume): - """Deletes a logical volume.""" - - # NOTE(jdg): We don't need to explicitly call - # remove export here because we already did it - # in the manager before we got here. - - if self._volume_not_present(volume['name']): - # If the volume isn't present, then don't attempt to delete - return True - - if self.vg.lv_has_snapshot(volume['name']): - LOG.error('Unable to delete due to existing snapshot ' - 'for volume: %s', volume['name']) - raise exception.VolumeIsBusy(volume_name=volume['name']) - - self._delete_volume(volume) - LOG.info('Successfully deleted volume: %s', volume['id']) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - - self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), - snapshot['volume_name'], - self.configuration.lvm_type) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - if self._volume_not_present(self._escape_snapshot(snapshot['name'])): - # If the snapshot isn't present, then don't attempt to delete - LOG.warning("snapshot: %s not found, " - "skipping delete operations", snapshot['name']) - LOG.info('Successfully deleted snapshot: %s', snapshot['id']) - return True - - # TODO(yamahata): zeroing out the whole snapshot triggers COW. - # it's quite slow. - self._delete_volume(snapshot, is_snapshot=True) - - def revert_to_snapshot(self, context, volume, snapshot): - """Revert a volume to a snapshot""" - - # NOTE(tommylikehu): We still can revert the volume because Cinder - # will try the alternative approach if 'NotImplementedError' - # is raised here. - if self.configuration.lvm_type == 'thin': - msg = _("Revert volume to snapshot not implemented for thin LVM.") - raise NotImplementedError(msg) - else: - self.vg.revert(self._escape_snapshot(snapshot.name)) - self.vg.deactivate_lv(volume.name) - self.vg.activate_lv(volume.name) - # Recreate the snapshot that was destroyed by the revert - self.create_snapshot(snapshot) - - def local_path(self, volume, vg=None): - if vg is None: - vg = self.configuration.volume_group - # NOTE(vish): stops deprecation warning - escaped_group = vg.replace('-', '--') - escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') - return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - image_utils.fetch_to_raw(context, - image_service, - image_id, - self.local_path(volume), - self.configuration.volume_dd_blocksize, - size=volume['size']) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_utils.upload_volume(context, - image_service, - image_meta, - self.local_path(volume)) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - if self.configuration.lvm_type == 'thin': - self.vg.create_lv_snapshot(volume['name'], - src_vref['name'], - self.configuration.lvm_type) - if volume['size'] > src_vref['size']: - LOG.debug("Resize the new volume to %s.", volume['size']) - self.extend_volume(volume, volume['size']) - self.vg.activate_lv(volume['name'], is_snapshot=True, - permanent=True) - return - - mirror_count = 0 - if self.configuration.lvm_mirrors: - mirror_count = self.configuration.lvm_mirrors - LOG.info('Creating clone of volume: %s', src_vref['id']) - volume_name = src_vref['name'] - temp_id = 'tmp-snap-%s' % volume['id'] - temp_snapshot = {'volume_name': volume_name, - 'size': src_vref['size'], - 'volume_size': src_vref['size'], - 'name': 'clone-snap-%s' % volume['id'], - 'id': temp_id} - - self.create_snapshot(temp_snapshot) - - # copy_volume expects sizes in MiB, we store integer GiB - # be sure to convert before passing in - try: - self._create_volume(volume['name'], - self._sizestr(volume['size']), - self.configuration.lvm_type, - mirror_count) - - self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) - volutils.copy_volume( - self.local_path(temp_snapshot), - self.local_path(volume), - src_vref['size'] * units.Ki, - self.configuration.volume_dd_blocksize, - execute=self._execute, - sparse=self._sparse_copy_volume) - finally: - self.delete_snapshot(temp_snapshot) - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - return None, False - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - - if refresh: - self._update_volume_stats() - - return self._stats - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - self.vg.extend_volume(volume['name'], - self._sizestr(new_size)) - - def manage_existing(self, volume, existing_ref): - """Manages an existing LV. - - Renames the LV to match the expected name for the volume. - Error checking done by manage_existing_get_size is not repeated. - """ - lv_name = existing_ref['source-name'] - self.vg.get_volume(lv_name) - - vol_id = volutils.extract_id_from_volume_name(lv_name) - if volutils.check_already_managed_volume(vol_id): - raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name) - - # Attempt to rename the LV to match the OpenStack internal name. - try: - self.vg.rename_volume(lv_name, volume['name']) - except processutils.ProcessExecutionError as exc: - exception_message = (_("Failed to rename logical volume %(name)s, " - "error message was: %(err_msg)s") - % {'name': lv_name, - 'err_msg': exc.stderr}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def manage_existing_object_get_size(self, existing_object, existing_ref, - object_type): - """Return size of an existing LV for manage existing volume/snapshot. - - existing_ref is a dictionary of the form: - {'source-name': } - """ - - # Check that the reference is valid - if 'source-name' not in existing_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - lv_name = existing_ref['source-name'] - lv = self.vg.get_volume(lv_name) - - # Raise an exception if we didn't find a suitable LV. - if not lv: - kwargs = {'existing_ref': lv_name, - 'reason': 'Specified logical volume does not exist.'} - raise exception.ManageExistingInvalidReference(**kwargs) - - # LV size is returned in gigabytes. Attempt to parse size as a float - # and round up to the next integer. - try: - lv_size = int(math.ceil(float(lv['size']))) - except ValueError: - exception_message = (_("Failed to manage existing %(type)s " - "%(name)s, because reported size %(size)s " - "was not a floating-point number.") - % {'type': object_type, - 'name': lv_name, - 'size': lv['size']}) - raise exception.VolumeBackendAPIException( - data=exception_message) - return lv_size - - def manage_existing_get_size(self, volume, existing_ref): - return self.manage_existing_object_get_size(volume, existing_ref, - "volume") - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - if not isinstance(existing_ref, dict): - existing_ref = {"source-name": existing_ref} - return self.manage_existing_object_get_size(snapshot, existing_ref, - "snapshot") - - def manage_existing_snapshot(self, snapshot, existing_ref): - dest_name = self._escape_snapshot(snapshot['name']) - snapshot_temp = {"name": dest_name} - if not isinstance(existing_ref, dict): - existing_ref = {"source-name": existing_ref} - return self.manage_existing(snapshot_temp, existing_ref) - - def _get_manageable_resource_info(self, cinder_resources, resource_type, - marker, limit, offset, sort_keys, - sort_dirs): - entries = [] - lvs = self.vg.get_volumes() - cinder_ids = [resource['id'] for resource in cinder_resources] - - for lv in lvs: - is_snap = self.vg.lv_is_snapshot(lv['name']) - if ((resource_type == 'volume' and is_snap) or - (resource_type == 'snapshot' and not is_snap)): - continue - - if resource_type == 'volume': - potential_id = volutils.extract_id_from_volume_name(lv['name']) - else: - unescape = self._unescape_snapshot(lv['name']) - potential_id = volutils.extract_id_from_snapshot_name(unescape) - lv_info = {'reference': {'source-name': lv['name']}, - 'size': int(math.ceil(float(lv['size']))), - 'cinder_id': None, - 'extra_info': None} - - if potential_id in cinder_ids: - lv_info['safe_to_manage'] = False - lv_info['reason_not_safe'] = 'already managed' - lv_info['cinder_id'] = potential_id - elif self.vg.lv_is_open(lv['name']): - lv_info['safe_to_manage'] = False - lv_info['reason_not_safe'] = '%s in use' % resource_type - else: - lv_info['safe_to_manage'] = True - lv_info['reason_not_safe'] = None - - if resource_type == 'snapshot': - origin = self.vg.lv_get_origin(lv['name']) - lv_info['source_reference'] = {'source-name': origin} - - entries.append(lv_info) - - return volutils.paginate_entries_list(entries, marker, limit, offset, - sort_keys, sort_dirs) - - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - return self._get_manageable_resource_info(cinder_volumes, 'volume', - marker, limit, - offset, sort_keys, sort_dirs) - - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - return self._get_manageable_resource_info(cinder_snapshots, 'snapshot', - marker, limit, - offset, sort_keys, sort_dirs) - - def retype(self, context, volume, new_type, diff, host): - """Retypes a volume, allow QoS and extra_specs change.""" - - LOG.debug('LVM retype called for volume %s. No action ' - 'required for LVM volumes.', - volume['id']) - return True - - def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): - """Optimize the migration if the destination is on the same server. - - If the specified host is another back-end on the same server, and - the volume is not attached, we can do the migration locally without - going through iSCSI. - """ - - false_ret = (False, None) - if volume['status'] != 'available': - return false_ret - if 'location_info' not in host['capabilities']: - return false_ret - info = host['capabilities']['location_info'] - try: - (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ - info.split(':') - lvm_mirrors = int(lvm_mirrors) - except ValueError: - return false_ret - if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): - return false_ret - - if dest_vg == self.vg.vg_name: - message = (_("Refusing to migrate volume ID: %(id)s. Please " - "check your configuration because source and " - "destination are the same Volume Group: %(name)s.") % - {'id': volume['id'], 'name': self.vg.vg_name}) - LOG.error(message) - raise exception.VolumeBackendAPIException(data=message) - - vg_list = volutils.get_all_volume_groups() - try: - next(vg for vg in vg_list if vg['name'] == dest_vg) - except StopIteration: - LOG.error("Destination Volume Group %s does not exist", - dest_vg) - return false_ret - - helper = utils.get_root_helper() - - lvm_conf_file = self.configuration.lvm_conf_file - if lvm_conf_file.lower() == 'none': - lvm_conf_file = None - - dest_vg_ref = lvm.LVM(dest_vg, helper, - lvm_type=lvm_type, - executor=self._execute, - lvm_conf=lvm_conf_file) - - self._create_volume(volume['name'], - self._sizestr(volume['size']), - lvm_type, - lvm_mirrors, - dest_vg_ref) - # copy_volume expects sizes in MiB, we store integer GiB - # be sure to convert before passing in - size_in_mb = int(volume['size']) * units.Ki - try: - volutils.copy_volume(self.local_path(volume), - self.local_path(volume, vg=dest_vg), - size_in_mb, - self.configuration.volume_dd_blocksize, - execute=self._execute, - sparse=self._sparse_copy_volume) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error("Volume migration failed due to " - "exception: %(reason)s.", - {'reason': six.text_type(e)}, resource=volume) - dest_vg_ref.delete(volume) - self._delete_volume(volume) - return (True, None) - - def get_pool(self, volume): - return self.backend_name - - # ####### Interface methods for DataPath (Target Driver) ######## - - def ensure_export(self, context, volume): - volume_path = "/dev/%s/%s" % (self.configuration.volume_group, - volume['name']) - - self.vg.activate_lv(volume['name']) - - model_update = \ - self.target_driver.ensure_export(context, volume, volume_path) - return model_update - - def create_export(self, context, volume, connector, vg=None): - if vg is None: - vg = self.configuration.volume_group - - volume_path = "/dev/%s/%s" % (vg, volume['name']) - - self.vg.activate_lv(volume['name']) - - export_info = self.target_driver.create_export( - context, - volume, - volume_path) - return {'provider_location': export_info['location'], - 'provider_auth': export_info['auth'], } - - def remove_export(self, context, volume): - self.target_driver.remove_export(context, volume) - - def initialize_connection(self, volume, connector): - return self.target_driver.initialize_connection(volume, connector) - - def validate_connector(self, connector): - return self.target_driver.validate_connector(connector) - - def terminate_connection(self, volume, connector, **kwargs): - # NOTE(jdg): LVM has a single export for each volume, so what - # we need to do here is check if there is more than one attachment for - # the volume, if there is; let the caller know that they should NOT - # remove the export. - has_shared_connections = False - if len(volume.volume_attachment) > 1: - has_shared_connections = True - - # NOTE(jdg): For the TGT driver this is a noop, for LIO this removes - # the initiator IQN from the targets access list, so we're good - - self.target_driver.terminate_connection(volume, connector, - **kwargs) - return has_shared_connections diff --git a/cinder/volume/drivers/nec/__init__.py b/cinder/volume/drivers/nec/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/nec/cli.py b/cinder/volume/drivers/nec/cli.py deleted file mode 100644 index c0d1e9d22..000000000 --- a/cinder/volume/drivers/nec/cli.py +++ /dev/null @@ -1,792 +0,0 @@ -# -# Copyright (c) 2016 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import select -import time -import traceback - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils - -LOG = logging.getLogger(__name__) - -retry_msgids = ['iSM31005', 'iSM31015', 'iSM42408', 'iSM42412'] - - -class MStorageISMCLI(object): - """SSH client.""" - - def __init__(self, properties): - super(MStorageISMCLI, self).__init__() - - self._sshpool = None - self._properties = properties - - def _execute(self, command, expected_status=[0], raise_exec=True): - return self._sync_execute(command, self._properties['diskarray_name'], - expected_status, raise_exec) - - @coordination.synchronized('mstorage_ismcli_execute_{diskarray_name}') - def _sync_execute(self, command, diskarray_name, - expected_status=[0], raise_exec=True): - retry_flag = True - retry_count = 0 - while retry_flag is True: - try: - out, err, status = self._cli_execute(command, expected_status, - False) - if status != 0: - errflg = 0 - errnum = out + err - LOG.debug('ismcli failed (errnum=%s).', errnum) - for retry_msgid in retry_msgids: - if errnum.find(retry_msgid) >= 0: - LOG.debug('`%(command)s` failed. ' - '%(name)s %(errnum)s ' - 'retry_count=%(retry_count)d', - {'command': command, - 'name': __name__, - 'errnum': errnum, - 'retry_count': retry_count}) - errflg = 1 - break - if errflg == 1: - retry_count += 1 - if retry_count >= 60: - msg = (_('Timeout `%(command)s`.' - ' status=%(status)d, ' - 'out="%(out)s", ' - 'err="%(err)s".') % - {'command': command, - 'status': status, - 'out': out, - 'err': err}) - raise exception.APITimeout(msg) - time.sleep(5) - continue - else: - if raise_exec is True: - msg = _('Command `%s` failed.') % command - raise exception.VolumeBackendAPIException(data=msg) - except EOFError: - with excutils.save_and_reraise_exception() as ctxt: - LOG.debug('EOFError has occurred. ' - '%(name)s retry_count=%(retry_count)d', - {'name': __name__, - 'retry_count': retry_count}) - retry_count += 1 - if retry_count < 60: - ctxt.reraise = False - time.sleep(5) - continue - retry_flag = False - - return out, err, status - - def _execute_nolock(self, command, expected_status=[0], raise_exec=True): - retry_flag = True - retry_count = 0 - while retry_flag is True: - try: - out, err, status = self._cli_execute(command, expected_status, - raise_exec) - except EOFError: - with excutils.save_and_reraise_exception() as ctxt: - LOG.debug('EOFError has occurred. ' - '%(name)s retry_count=%(retry_count)d', - {'name': __name__, - 'retry_count': retry_count}) - retry_count += 1 - if retry_count < 60: - ctxt.reraise = False - time.sleep(5) - continue - retry_flag = False - return out, err, status - - def _cli_execute(self, command, expected_status=[0], raise_exec=True): - if not self._sshpool: - LOG.debug('ssh_utils.SSHPool execute.') - self._sshpool = ssh_utils.SSHPool( - self._properties['cli_fip'], - self._properties['ssh_pool_port_number'], - self._properties['ssh_conn_timeout'], - self._properties['cli_user'], - self._properties['cli_password'], - privatekey=self._properties['cli_privkey']) - - with self._sshpool.item() as ssh: - LOG.debug('`%s` executing...', command) - stdin, stdout, stderr = ssh.exec_command(command) - stdin.close() - channel = stdout.channel - _out, _err = [], [] - while 1: - select.select([channel], [], []) - if channel.recv_ready(): - _out.append(channel.recv(4096)) - continue - if channel.recv_stderr_ready(): - _err.append(channel.recv_stderr(4096)) - continue - if channel.exit_status_ready(): - status = channel.recv_exit_status() - break - LOG.debug('`%(command)s` done. status=%(status)d.', - {'command': command, 'status': status}) - out, err = ''.join(_out), ''.join(_err) - if expected_status is not None and status not in expected_status: - LOG.debug('`%(command)s` failed. status=%(status)d, ' - 'out="%(out)s", err="%(err)s".', - {'command': command, 'status': status, - 'out': out, 'err': err}) - if raise_exec is True: - msg = _('Command `%s` failed.') % command - raise exception.VolumeBackendAPIException(data=msg) - return out, err, status - - def view_all(self, conf_ismview_path=None, delete_ismview=True, - cmd_lock=True): - if self._properties['queryconfig_view'] is True: - command = 'clioutmsg xml; iSMview' - if self._properties['ismview_alloptimize'] is True: - command += ' --alloptimize' - else: - command += ' -all' - else: - command = 'iSMquery -cinder -xml -all' - if cmd_lock is True: - out, err, status = self._execute(command) - else: - out, err, status = self._execute_nolock(command) - - exstats = re.compile("(.*)ExitStatus(.*)\n") - tmpout = exstats.sub('', out) - out = tmpout - if conf_ismview_path is not None: - if delete_ismview: - if os.path.exists(conf_ismview_path): - os.remove(conf_ismview_path) - LOG.debug('Remove clioutmsg xml to %s.', - conf_ismview_path) - else: - with open(conf_ismview_path, 'w+') as f: - f.write(out) - LOG.debug('Wrote clioutmsg xml to %s.', - conf_ismview_path) - return out - - def ldbind(self, name, pool, ldn, size): - """Bind an LD and attach a nickname to it.""" - errnum = "" - cmd = ('iSMcfg ldbind -poolnumber %(poolnumber)d -ldn %(ldn)d ' - '-capacity %(capacity)d -immediate' - % {'poolnumber': pool, 'ldn': ldn, - 'capacity': size}) - out, err, status = self._execute(cmd, [0], False) - errnum = err - if status != 0: - return False, errnum - - cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s ' - '-immediate' - % {'ldn': ldn, 'newname': name}) - self._execute(cmd) - return True, errnum - - def unbind(self, name): - """Unbind an LD.""" - cmd = 'iSMcfg ldunbind -ldname %s' % name - self._execute(cmd) - - def expand(self, ldn, capacity): - """Expand a LD.""" - cmd = ('iSMcfg ldexpand -ldn %(ldn)d -capacity %(capacity)d ' - '-unit gb' - % {'ldn': ldn, 'capacity': capacity}) - self._execute(cmd) - - def addldsetld(self, ldset, ldname, lun=None): - """Add an LD to specified LD Set.""" - if lun is None: - cmd = ('iSMcfg addldsetld -ldset %(ldset)s ' - '-ldname %(ldname)s' - % {'ldset': ldset, 'ldname': ldname}) - self._execute(cmd) - else: - cmd = ('iSMcfg addldsetld -ldset %(ldset)s -ldname %(ldname)s ' - '-lun %(lun)d' - % {'ldset': ldset, 'ldname': ldname, - 'lun': lun}) - self._execute(cmd) - - def delldsetld(self, ldset, ldname): - """Delete an LD from specified LD Set.""" - rtn = True - errnum = "" - cmd = ('iSMcfg delldsetld -ldset %(ldset)s ' - '-ldname %(ldname)s' - % {'ldset': ldset, - 'ldname': ldname}) - out, err, status = self._execute(cmd, [0], False) - errnum = err - if status != 0: - rtn = False - return rtn, errnum - - def changeldname(self, ldn, new_name, old_name=None): - """Rename nickname of LD.""" - if old_name is None: - cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s ' - '-immediate' - % {'ldn': ldn, 'newname': new_name}) - self._execute(cmd) - else: - cmd = ('iSMcfg nickname -ldname %(ldname)s ' - '-newname %(newname)s' - % {'ldname': old_name, - 'newname': new_name}) - self._execute(cmd) - - def setpair(self, mvname, rvname): - """Set pair.""" - cmd = ('iSMrc_pair -pair -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - - LOG.debug('Pair command completed. MV = %(mv)s RV = %(rv)s.', - {'mv': mvname, 'rv': rvname}) - - def unpair(self, mvname, rvname, flag): - """Unset pair.""" - if flag == 'normal': - cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - elif flag == 'force': - cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld -force all' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - else: - LOG.debug('unpair flag ERROR. flag = %s', flag) - - LOG.debug('Unpair command completed. MV = %(mv)s, RV = %(rv)s.', - {'mv': mvname, 'rv': rvname}) - - def replicate(self, mvname, rvname, flag): - if flag == 'full': - cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld -nowait -cprange full ' - '-cpmode bg' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - else: - cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld -nowait -cpmode bg' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - - LOG.debug('Replicate command completed. MV = %(mv)s RV = %(rv)s.', - {'mv': mvname, 'rv': rvname}) - - def separate(self, mvname, rvname, flag): - """Separate for backup.""" - if flag == 'backup': - cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld ' - '-rvacc ro -rvuse complete -nowait' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - elif flag == 'restore' or flag == 'clone': - cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld ' - '-rvacc rw -rvuse immediate -nowait' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - elif flag == 'esv_restore' or flag == 'migrate': - cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' - '-rv %(rv)s -rvflg ld ' - '-rvacc rw -rvuse complete -nowait' - % {'mv': mvname, 'rv': rvname}) - self._execute(cmd) - else: - LOG.debug('separate flag ERROR. flag = %s', flag) - - LOG.debug('Separate command completed. MV = %(mv)s RV = %(rv)s.', - {'mv': mvname, 'rv': rvname}) - - def query_MV_RV_status(self, ldname, rpltype): - if rpltype == 'MV': - cmd = ('iSMrc_query -mv %s -mvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "Sync State" ]]; ' - 'then builtin echo ${line:10};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - elif rpltype == 'RV': - cmd = ('iSMrc_query -rv %s -rvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "Sync State" ]]; ' - 'then builtin echo ${line:10};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - else: - LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) - - query_status = out.strip() - return query_status - - def query_MV_RV_name(self, ldname, rpltype): - if rpltype == 'MV': - cmd = ('iSMrc_query -mv %s -mvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "LD Name" ]]; ' - 'then builtin echo ${line:7};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - out = out.replace(ldname, "") - elif rpltype == 'RV': - cmd = ('iSMrc_query -rv %s -rvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "LD Name" ]]; ' - 'then builtin echo ${line:7};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - out = out.replace(ldname, "") - else: - LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) - - query_name = out.strip() - return query_name - - def query_MV_RV_diff(self, ldname, rpltype): - if rpltype == 'MV': - cmd = ('iSMrc_query -mv %s -mvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "Separate Diff" ]]; ' - 'then builtin echo ${line:13};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - elif rpltype == 'RV': - cmd = ('iSMrc_query -rv %s -rvflg ld | ' - 'while builtin read line;' - 'do if [[ "$line" =~ "Separate Diff" ]]; ' - 'then builtin echo ${line:13};fi;' - 'done' % ldname) - out, err, status = self._execute(cmd) - else: - LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) - - query_status = out.strip() - return query_status - - def backup_restore(self, volume_properties, unpairWait, canPairing=True): - - # Setting Pair. - flag = 'full' - if canPairing is True: - self.setpair(volume_properties['mvname'][3:], - volume_properties['rvname'][3:]) - else: - rv_diff = self.query_MV_RV_diff(volume_properties['rvname'][3:], - 'RV') - rv_diff = int(rv_diff.replace('KB', ''), 10) // units.Ki - if rv_diff != volume_properties['capacity']: - flag = None - - # Replicate. - self.replicate(volume_properties['mvname'][3:], - volume_properties['rvname'][3:], flag) - - # Separate. - self.separate(volume_properties['mvname'][3:], - volume_properties['rvname'][3:], - volume_properties['flag']) - - unpairProc = unpairWait(volume_properties, self) - unpairProc.run() - - def check_ld_existed_rplstatus(self, lds, ldname, snapshot, flag): - - if ldname not in lds: - if flag == 'backup': - LOG.debug('Volume Id not found. ' - 'LD name = %(name)s volume_id = %(id)s.', - {'name': ldname, 'id': snapshot.volume_id}) - raise exception.NotFound(_('Logical Disk does not exist.')) - elif flag == 'restore': - LOG.debug('Snapshot Id not found. ' - 'LD name = %(name)s snapshot_id = %(id)s.', - {'name': ldname, 'id': snapshot.id}) - raise exception.NotFound(_('Logical Disk does not exist.')) - elif flag == 'delete': - LOG.debug('LD `%(name)s` already unbound? ' - 'snapshot_id = %(id)s.', - {'name': ldname, 'id': snapshot.id}) - return None - else: - LOG.debug('check_ld_existed_rplstatus flag error flag = %s.', - flag) - raise exception.NotFound(_('Logical Disk does not exist.')) - - ld = lds[ldname] - - if ld['RPL Attribute'] == 'IV': - pass - elif ld['RPL Attribute'] == 'MV': - query_status = self.query_MV_RV_status(ldname[3:], 'MV') - LOG.debug('query_status : %s.', query_status) - if(query_status == 'separated'): - # unpair. - rvname = self.query_MV_RV_name(ldname[3:], 'MV') - self.unpair(ldname[3:], rvname, 'force') - else: - msg = _('Specified Logical Disk %s has been copied.') % ldname - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - elif ld['RPL Attribute'] == 'RV': - query_status = self.query_MV_RV_status(ldname[3:], 'RV') - if query_status == 'separated': - # unpair. - mvname = self.query_MV_RV_name(ldname[3:], 'RV') - self.unpair(mvname, ldname[3:], 'force') - else: - msg = _('Specified Logical Disk %s has been copied.') % ldname - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return ld - - def get_pair_lds(self, ldname, lds): - query_status = self.query_MV_RV_name(ldname[3:], 'MV') - query_status = query_status.split('\n') - query_status = [query for query in query_status if query != ''] - LOG.debug('query_status=%s.', query_status) - - pair_lds = {} - for rvname in query_status: - rvname = self._properties['ld_backupname_format'] % rvname - if rvname not in lds: - LOG.debug('LD `%s` is RDR pair?', rvname) - else: - ld = lds[rvname] - ldn = ld['ldn'] - pair_lds[ldn] = ld - - LOG.debug('pair_lds=%s.', pair_lds) - return pair_lds - - def snapshot_create(self, bvname, svname, poolnumber): - """Snapshot create.""" - cmd = ('iSMcfg generationadd -bvname %(bvname)s ' - '-poolnumber %(poolnumber)d -count 1 ' - '-svname %(svname)s' - % {'bvname': bvname, - 'poolnumber': poolnumber, - 'svname': svname}) - self._execute(cmd) - - cmd = ('iSMsc_create -bv %(bv)s -bvflg ld -sv %(sv)s ' - '-svflg ld' - % {'bv': bvname[3:], 'sv': svname}) - self._execute(cmd) - - def snapshot_delete(self, bvname, svname): - """Snapshot delete.""" - query_status = self.query_BV_SV_status(bvname[3:], svname) - if query_status == 'snap/active': - cmd = ('iSMsc_delete -bv %(bv)s -bvflg ld -sv %(sv)s ' - '-svflg ld' - % {'bv': bvname[3:], 'sv': svname}) - self._execute(cmd) - - while True: - query_status = self.query_BV_SV_status(bvname[3:], svname) - if query_status == 'snap/deleting': - LOG.debug('Sleep 1 seconds Start') - time.sleep(1) - else: - break - else: - LOG.debug('The snapshot data does not exist,' - ' because already forced deletion.' - ' bvname=%(bvname)s, svname=%(svname)s', - {'bvname': bvname, 'svname': svname}) - - cmd = 'iSMcfg generationdel -bvname %s -count 1' % bvname - self._execute(cmd) - - def query_BV_SV_status(self, bvname, svname): - cmd = ('iSMsc_query -bv %(bv)s -bvflg ld -sv %(sv)s -svflg ld ' - '-summary | ' - 'while builtin read line;do ' - 'if [[ "$line" =~ "%(line)s" ]]; ' - 'then builtin echo "$line";fi;done' - % {'bv': bvname, 'sv': svname, 'line': svname}) - out, err, status = self._execute(cmd) - - query_status = out[34:48].strip() - LOG.debug('snap/state:%s.', query_status) - return query_status - - def set_io_limit(self, ldname, specs, force_delete=True): - if specs['upperlimit'] is not None: - upper = int(specs['upperlimit'], 10) - else: - upper = None - - if specs['lowerlimit'] is not None: - lower = int(specs['lowerlimit'], 10) - else: - lower = None - - report = specs['upperreport'] - if upper is None and lower is None and report is None: - return - cmd = 'iSMioc setlimit -ldname %s' % ldname - if upper is not None: - cmd += ' -upperlimit %d' % upper - if lower is not None: - cmd += ' -lowerlimit %d' % lower - if report is not None: - cmd += ' -upperreport %s' % report - try: - self._execute(cmd) - except Exception: - with excutils.save_and_reraise_exception(): - if force_delete: - self.unbind(ldname) - - def lvbind(self, bvname, lvname, lvnumber): - """Link Volume create.""" - cmd = ('iSMcfg lvbind -bvname %(bvname)s ' - '-lvn %(lvnumber)d -lvname %(lvname)s' - % {'bvname': bvname, - 'lvnumber': lvnumber, - 'lvname': lvname}) - self._execute(cmd) - - def lvunbind(self, lvname): - """Link Volume delete.""" - cmd = ('iSMcfg lvunbind -ldname %(lvname)s' - % {'lvname': lvname}) - self._execute(cmd) - - def lvlink(self, svname, lvname): - """Link to snapshot volume.""" - cmd = ('iSMsc_link -lv %(lvname)s -lvflg ld ' - '-sv %(svname)s -svflg ld -lvacc ro' - % {'lvname': lvname, - 'svname': svname}) - self._execute(cmd) - - def lvunlink(self, lvname): - """Unlink from snapshot volume.""" - cmd = ('iSMsc_unlink -lv %(lvname)s -lvflg ld' - % {'lvname': lvname}) - self._execute(cmd) - - -class UnpairWait(object): - error_updates = {'status': 'error', - 'progress': '100%', - 'migration_status': None} - - def __init__(self, volume_properties, cli): - super(UnpairWait, self).__init__() - self._volume_properties = volume_properties - self._mvname = volume_properties['mvname'][3:] - self._rvname = volume_properties['rvname'][3:] - self._mvID = volume_properties['mvid'] - self._rvID = volume_properties['rvid'] - self._flag = volume_properties['flag'] - self._context = volume_properties['context'] - self._cli = cli - self._local_conf = self._cli._properties - - def _wait(self, unpair=True): - timeout = self._local_conf['thread_timeout'] * 24 - start_time = time.time() - while True: - cur_time = time.time() - if (cur_time - start_time) > timeout: - raise exception.APITimeout(_('UnpairWait wait timeout.')) - - LOG.debug('Sleep 60 seconds Start') - time.sleep(60) - - query_status = self._cli.query_MV_RV_status(self._rvname, 'RV') - if query_status == 'separated': - if unpair is True: - self._cli.unpair(self._mvname, self._rvname, 'normal') - break - elif query_status == 'sep/exec': - continue - else: - LOG.debug('iSMrc_query command result abnormal.' - 'Query status = %(status)s, RV = %(rv)s.', - {'status': query_status, 'rv': self._rvname}) - break - - def run(self): - try: - self._execute() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.debug('UnpairWait Unexpected error. ' - 'exception=%(exception)s, MV = %(mv)s, RV = %(rv)s.', - {'exception': traceback.format_exc(), - 'mv': self._mvname, 'rv': self._rvname}) - - def _execute(self): - pass - - -class UnpairWaitForBackup(UnpairWait): - def __init__(self, volume_properties, cli): - super(UnpairWaitForBackup, self).__init__(volume_properties, cli) - - def _execute(self): - LOG.debug('UnpairWaitForBackup start.') - - self._wait(True) - - -class UnpairWaitForRestore(UnpairWait): - def __init__(self, volume_properties, cli): - super(UnpairWaitForRestore, self).__init__(volume_properties, cli) - - self._rvldn = None - if ('rvldn' in volume_properties and - volume_properties['rvldn'] is not None): - self._rvldn = volume_properties['rvldn'] - - self._rvcapacity = None - if ('rvcapacity' in volume_properties and - volume_properties['rvcapacity'] is not None): - self._rvcapacity = volume_properties['rvcapacity'] - - def _execute(self): - LOG.debug('UnpairWaitForRestore start.') - - self._wait(True) - - if self._rvcapacity is not None: - try: - self._cli.expand(self._rvldn, self._rvcapacity) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.debug('UnpairWaitForDDRRestore expand error. ' - 'exception=%(exception)s, ' - 'MV = %(mv)s, RV = %(rv)s.', - {'exception': traceback.format_exc(), - 'mv': self._mvname, 'rv': self._rvname}) - - -class UnpairWaitForClone(UnpairWait): - def __init__(self, volume_properties, cli): - super(UnpairWaitForClone, self).__init__(volume_properties, cli) - - self._rvldn = None - if ('rvldn' in volume_properties and - volume_properties['rvldn'] is not None): - self._rvldn = volume_properties['rvldn'] - - self._rvcapacity = None - if ('rvcapacity' in volume_properties and - volume_properties['rvcapacity'] is not None): - self._rvcapacity = volume_properties['rvcapacity'] - - def _execute(self): - LOG.debug('UnpairWaitForClone start.') - - self._wait(True) - - if self._rvcapacity is not None: - try: - self._cli.expand(self._rvldn, self._rvcapacity) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.debug('UnpairWaitForClone expand error. ' - 'exception=%(exception)s, ' - 'MV = %(mv)s, RV = %(rv)s.', - {'exception': traceback.format_exc(), - 'mv': self._mvname, 'rv': self._rvname}) - - -class UnpairWaitForMigrate(UnpairWait): - def __init__(self, volume_properties, cli): - super(UnpairWaitForMigrate, self).__init__(volume_properties, cli) - - def _execute(self): - LOG.debug('UnpairWaitForMigrate start.') - - self._wait(True) - - self._cli.unbind(self._volume_properties['mvname']) - self._cli.changeldname(None, self._volume_properties['mvname'], - self._volume_properties['rvname']) - - -class UnpairWaitForDDRBackup(UnpairWaitForBackup): - def __init__(self, volume_properties, cli): - super(UnpairWaitForDDRBackup, self).__init__(volume_properties, cli) - - def _execute(self): - LOG.debug('UnpairWaitForDDRBackup start.') - - self._wait(False) - - -class UnpairWaitForDDRRestore(UnpairWaitForRestore): - def __init__(self, volume_properties, cli): - super(UnpairWaitForDDRRestore, self).__init__(volume_properties, cli) - - self._prev_mvname = None - if ('prev_mvname' in volume_properties and - volume_properties['prev_mvname'] is not None): - self._prev_mvname = volume_properties['prev_mvname'][3:] - - def _execute(self): - LOG.debug('UnpairWaitForDDRRestore start.') - - self._wait(True) - - if self._rvcapacity is not None: - try: - self._cli.expand(self._rvldn, self._rvcapacity) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.debug('UnpairWaitForDDRRestore expand error. ' - 'exception=%(exception)s, ' - 'MV = %(mv)s, RV = %(rv)s.', - {'exception': traceback.format_exc(), - 'mv': self._mvname, 'rv': self._rvname}) - - if self._prev_mvname is not None: - self._cli.setpair(self._prev_mvname, self._mvname) diff --git a/cinder/volume/drivers/nec/product.xml b/cinder/volume/drivers/nec/product.xml deleted file mode 100644 index 6bbbb1b76..000000000 --- a/cinder/volume/drivers/nec/product.xml +++ /dev/null @@ -1,27 +0,0 @@ - - - NEC - - 8192 - 8192 - 4096 - 4096 - 1024 - 1024 - 8192 - 8192 - 4096 - 4096 - 1024 - 1024 - 8192 - 8192 - 8192 - 4096 - 4096 - 4096 - 4096 - 1024 - 1024 - - diff --git a/cinder/volume/drivers/nec/volume.py b/cinder/volume/drivers/nec/volume.py deleted file mode 100644 index 78135c08c..000000000 --- a/cinder/volume/drivers/nec/volume.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# Copyright (c) 2016 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Drivers for M-Series Storage.""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.nec import volume_helper -from cinder.zonemanager import utils as fczm_utils - - -@interface.volumedriver -class MStorageISCSIDriver(volume_helper.MStorageDSVDriver, - driver.ISCSIDriver): - """M-Series Storage Snapshot iSCSI Driver.""" - - def __init__(self, *args, **kwargs): - super(MStorageISCSIDriver, self).__init__(*args, **kwargs) - self._set_config(self.configuration, self.host, - self.__class__.__name__) - - def create_export(self, context, volume, connector): - return self.iscsi_do_export(context, volume, connector) - - def ensure_export(self, context, volume): - pass - - def get_volume_stats(self, refresh=False): - return self.iscsi_get_volume_stats(refresh) - - def initialize_connection(self, volume, connector): - return self.iscsi_initialize_connection(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - return self.iscsi_terminate_connection(volume, connector) - - def create_export_snapshot(self, context, snapshot, connector): - return self.iscsi_do_export_snapshot(context, snapshot, connector) - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - return self.iscsi_initialize_connection_snapshot(snapshot, - connector, - **kwargs) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - return self.iscsi_terminate_connection_snapshot(snapshot, - connector, - **kwargs) - - -@interface.volumedriver -class MStorageFCDriver(volume_helper.MStorageDSVDriver, - driver.FibreChannelDriver): - """M-Series Storage Snapshot FC Driver.""" - - def __init__(self, *args, **kwargs): - super(MStorageFCDriver, self).__init__(*args, **kwargs) - self._set_config(self.configuration, self.host, - self.__class__.__name__) - - def create_export(self, context, volume, connector): - return self.fc_do_export(context, volume, connector) - - def ensure_export(self, context, volume): - pass - - def get_volume_stats(self, refresh=False): - return self.fc_get_volume_stats(refresh) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - return self.fc_initialize_connection(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - return self.fc_terminate_connection(volume, connector) - - def create_export_snapshot(self, context, snapshot, connector): - return self.fc_do_export_snapshot(context, snapshot, connector) - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - return self.fc_initialize_connection_snapshot(snapshot, - connector, - **kwargs) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - return self.fc_terminate_connection_snapshot(snapshot, - connector, - **kwargs) diff --git a/cinder/volume/drivers/nec/volume_common.py b/cinder/volume/drivers/nec/volume_common.py deleted file mode 100644 index fbd0658a7..000000000 --- a/cinder/volume/drivers/nec/volume_common.py +++ /dev/null @@ -1,919 +0,0 @@ -# -# Copyright (c) 2016 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -from lxml import etree -import os -import re -import six -import traceback - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration -from cinder.volume.drivers.nec import cli -from cinder.volume.drivers.san import san -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) - -FLAGS = cfg.CONF - -mstorage_opts = [ - cfg.IPOpt('nec_ismcli_fip', - default=None, - help='FIP address of M-Series Storage iSMCLI.'), - cfg.StrOpt('nec_ismcli_user', - default='', - help='User name for M-Series Storage iSMCLI.'), - cfg.StrOpt('nec_ismcli_password', - secret=True, - default='', - help='Password for M-Series Storage iSMCLI.'), - cfg.StrOpt('nec_ismcli_privkey', - default='', - help='Filename of RSA private key for ' - 'M-Series Storage iSMCLI.'), - cfg.StrOpt('nec_ldset', - default='', - help='M-Series Storage LD Set name for Compute Node.'), - cfg.StrOpt('nec_ldname_format', - default='LX:%s', - help='M-Series Storage LD name format for volumes.'), - cfg.StrOpt('nec_backup_ldname_format', - default='LX:%s', - help='M-Series Storage LD name format for snapshots.'), - cfg.StrOpt('nec_diskarray_name', - default='', - help='Diskarray name of M-Series Storage.'), - cfg.StrOpt('nec_ismview_dir', - default='/tmp/nec/cinder', - help='Output path of iSMview file.'), - cfg.StrOpt('nec_ldset_for_controller_node', - default='', - help='M-Series Storage LD Set name for Controller Node.'), - cfg.IntOpt('nec_ssh_pool_port_number', - default=22, - help='Port number of ssh pool.'), - cfg.IntOpt('nec_unpairthread_timeout', - default=3600, - help='Timeout value of Unpairthread.'), - cfg.IntOpt('nec_backend_max_ld_count', - default=1024, - help='Maximum number of managing sessions.'), - cfg.BoolOpt('nec_actual_free_capacity', - default=False, - help='Return actual free capacity.'), - cfg.BoolOpt('nec_ismview_alloptimize', - default=False, - help='Use legacy iSMCLI command with optimization.'), - cfg.ListOpt('nec_pools', - default=[], - help='M-Series Storage pool numbers list to be used.'), - cfg.ListOpt('nec_backup_pools', - default=[], - help='M-Series Storage backup pool number to be used.'), - cfg.BoolOpt('nec_queryconfig_view', - default=False, - help='Use legacy iSMCLI command.'), - cfg.IntOpt('nec_iscsi_portals_per_cont', - default=1, - help='Number of iSCSI portals.'), -] - -FLAGS.register_opts(mstorage_opts, group=configuration.SHARED_CONF_GROUP) - - -def convert_to_name(uuid): - alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - num = int(uuid.replace(("-"), ""), 16) - - convertname = "" - while num != 0: - convertname = alnum[num % len(alnum)] + convertname - num = num - num % len(alnum) - num = num // len(alnum) - return convertname - - -def convert_to_id(value62): - alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - length = len(value62) - - weight = 0 - value = 0 - index = 0 - for i in reversed(range(0, length)): - num = alnum.find(value62[i]) - if index != 0: - value += int(weight * (num)) - else: - value = num - index += 1 - weight = 62 ** index - - value = '%032x' % value - - uuid = value[0:8] - uuid += '-' - uuid += value[8:12] - uuid += '-' - uuid += value[12:16] - uuid += '-' - uuid += value[16:20] - uuid += '-' - uuid += value[20:] - - return uuid - - -class MStorageVolumeCommon(object): - """M-Series Storage volume common class.""" - - VERSION = '1.9.1' - WIKI_NAME = 'NEC_Cinder_CI' - - def do_setup(self, context): - self._context = context - - def check_for_setup_error(self): - if len(getattr(self._local_conf, 'nec_pools', [])) == 0: - raise exception.ParameterNotFound(param='nec_pools') - - def _set_config(self, configuration, host, driver_name): - self._configuration = configuration - self._host = host - self._driver_name = driver_name - self._numofld_per_pool = 1024 - - self._configuration.append_config_values(mstorage_opts) - self._configuration.append_config_values(san.san_opts) - self._config_group = self._configuration.config_group - - if self._config_group: - FLAGS.register_opts(mstorage_opts, group=self._config_group) - self._local_conf = FLAGS._get(self._config_group) - else: - FLAGS.register_opts(mstorage_opts) - self._local_conf = FLAGS - - self._check_flags() - self._properties = self._set_properties() - self._cli = self._properties['cli'] - - def _check_flags(self): - for flag in ['nec_ismcli_fip', 'nec_ismcli_user']: - if getattr(self._local_conf, flag, '') == '': - raise exception.ParameterNotFound(param=flag) - if (getattr(self._local_conf, 'nec_ismcli_password', '') == '' and - getattr(self._local_conf, 'nec_ismcli_privkey', '') == ''): - msg = _('nec_ismcli_password nor nec_ismcli_privkey') - raise exception.ParameterNotFound(param=msg) - - def _create_ismview_dir(self, - ismview_dir, - diskarray_name, - driver_name, - host): - """Create ismview directory.""" - filename = diskarray_name - if filename == '': - filename = driver_name + '_' + host - - ismview_path = os.path.join(ismview_dir, filename) - LOG.debug('ismview_path=%s.', ismview_path) - try: - if os.path.exists(ismview_path): - os.remove(ismview_path) - except OSError as e: - with excutils.save_and_reraise_exception() as ctxt: - if e.errno == errno.ENOENT: - ctxt.reraise = False - - try: - os.makedirs(ismview_dir) - except OSError as e: - with excutils.save_and_reraise_exception() as ctxt: - if e.errno == errno.EEXIST: - ctxt.reraise = False - - return ismview_path - - def get_conf(self, host): - """Get another host group configurations.""" - hostname = host['host'] - hostname = hostname[:hostname.rindex('#')] - if '@' in hostname: - group = hostname.split('@')[1] - FLAGS.register_opts(mstorage_opts, group=group) - conf = FLAGS._get(group) - else: - FLAGS.register_opts(mstorage_opts) - conf = FLAGS - return conf - - def get_conf_properties(self, conf=None): - if conf is None: - return self._properties - pool_pools = [] - for pool in getattr(conf, 'nec_pools', []): - if pool.endswith('h'): - pool_pools.append(int(pool[:-1], 16)) - else: - pool_pools.append(int(pool, 10)) - pool_backup_pools = [] - for pool in getattr(conf, 'nec_backup_pools', []): - if pool.endswith('h'): - pool_backup_pools.append(int(pool[:-1], 16)) - else: - pool_backup_pools.append(int(pool, 10)) - ldset_name = getattr(conf, 'nec_ldset', '') - ldset_controller_node_name = getattr(conf, - 'nec_ldset_for_controller_node', - '') - - return { - 'cli_fip': conf.nec_ismcli_fip, - 'cli_user': conf.nec_ismcli_user, - 'cli_password': conf.nec_ismcli_password, - 'cli_privkey': conf.nec_ismcli_privkey, - 'pool_pools': pool_pools, - 'pool_backup_pools': pool_backup_pools, - 'pool_actual_free_capacity': conf.nec_actual_free_capacity, - 'ldset_name': ldset_name, - 'ldset_controller_node_name': ldset_controller_node_name, - 'ld_name_format': conf.nec_ldname_format, - 'ld_backupname_format': conf.nec_backup_ldname_format, - 'ld_backend_max_count': conf.nec_backend_max_ld_count, - 'thread_timeout': conf.nec_unpairthread_timeout, - 'ismview_dir': conf.nec_ismview_dir, - 'ismview_alloptimize': conf.nec_ismview_alloptimize, - 'ssh_pool_port_number': conf.nec_ssh_pool_port_number, - 'diskarray_name': conf.nec_diskarray_name, - 'queryconfig_view': conf.nec_queryconfig_view, - 'portal_number': conf.nec_iscsi_portals_per_cont - } - - def _set_properties(self): - conf_properties = self.get_conf_properties(self._local_conf) - - ismview_path = self._create_ismview_dir( - self._local_conf.nec_ismview_dir, - self._local_conf.nec_diskarray_name, - self._driver_name, - self._host) - - vendor_name, _product_dict = self.get_oem_parameter() - - backend_name = self._configuration.safe_get('volume_backend_name') - ssh_timeout = self._configuration.safe_get('ssh_conn_timeout') - reserved_per = self._configuration.safe_get('reserved_percentage') - - conf_properties['ssh_conn_timeout'] = ssh_timeout - conf_properties['reserved_percentage'] = reserved_per - conf_properties['ismview_path'] = ismview_path - conf_properties['driver_name'] = self._driver_name - conf_properties['config_group'] = self._config_group - conf_properties['configuration'] = self._configuration - conf_properties['vendor_name'] = vendor_name - conf_properties['products'] = _product_dict - conf_properties['backend_name'] = backend_name - conf_properties['cli'] = cli.MStorageISMCLI(conf_properties) - - return conf_properties - - def get_oem_parameter(self): - product = os.path.join(os.path.dirname(__file__), 'product.xml') - try: - with open(product, 'r') as f: - xml = f.read() - root = etree.fromstring(xml) - vendor_name = root.xpath('./VendorName')[0].text - - product_dict = {} - product_map = root.xpath('./ProductMap/Product') - for s in product_map: - product_dict[s.attrib['Name']] = int(s.text, 10) - - return vendor_name, product_dict - except OSError as e: - with excutils.save_and_reraise_exception() as ctxt: - if e.errno == errno.ENOENT: - ctxt.reraise = False - raise exception.NotFound(_('%s not found.') % product) - - @staticmethod - def get_ldname(volid, volformat): - alnum = ('0123456789' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz') - ldname = "" - num = int(volid.replace(("-"), ""), 16) - while num != 0: - ldname = alnum[num % len(alnum)] + ldname - num = num - num % len(alnum) - num = num // len(alnum) - - return volformat % ldname - - def get_ldset(self, ldsets, metadata=None): - ldset = None - if metadata is not None and 'ldset' in metadata: - ldset_meta = metadata['ldset'] - LOG.debug('ldset(metadata)=%s.', ldset_meta) - for tldset in six.itervalues(ldsets): - if tldset['ldsetname'] == ldset_meta: - ldset = ldsets[ldset_meta] - LOG.debug('ldset information(metadata specified)=%s.', - ldset) - break - if ldset is None: - msg = _('Logical Disk Set could not be found.') - LOG.error(msg) - raise exception.NotFound(msg) - elif self._properties['ldset_name'] == '': - nldset = len(ldsets) - if nldset == 0: - msg = _('Logical Disk Set could not be found.') - raise exception.NotFound(msg) - else: - ldset = None - else: - if self._properties['ldset_name'] not in ldsets: - msg = (_('Logical Disk Set `%s` could not be found.') % - self._properties['ldset_name']) - raise exception.NotFound(msg) - ldset = ldsets[self._properties['ldset_name']] - return ldset - - def get_pool_capacity(self, pools, ldsets): - pools = [pool for (pn, pool) in pools.items() - if len(self._properties['pool_pools']) == 0 or - pn in self._properties['pool_pools']] - - free_capacity_gb = 0 - total_capacity_gb = 0 - for pool in pools: - # Convert to GB. - tmp_total = int(pool['total'] // units.Gi) - tmp_free = int(pool['free'] // units.Gi) - - if free_capacity_gb < tmp_free: - total_capacity_gb = tmp_total - free_capacity_gb = tmp_free - - return {'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb} - - def set_backend_max_ld_count(self, xml, root): - section = root.xpath('./CMD_REQUEST')[0] - version = section.get('version').replace('Version ', '')[0:3] - version = float(version) - if version < 9.1: - if 512 < self._properties['ld_backend_max_count']: - self._properties['ld_backend_max_count'] = 512 - else: - if 1024 < self._properties['ld_backend_max_count']: - self._properties['ld_backend_max_count'] = 1024 - - def get_diskarray_max_ld_count(self, xml, root): - max_ld_count = 0 - for section in root.xpath( - './' - 'CMD_REQUEST/' - 'CHAPTER[@name="Disk Array"]/' - 'OBJECT[@name="Disk Array"]/' - 'SECTION[@name="Disk Array Detail Information"]'): - unit = section.find('./UNIT[@name="Product ID"]') - if unit is None: - msg = (_('UNIT[@name="Product ID"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - else: - product_id = unit.text - if product_id in self._properties['products']: - max_ld_count = self._properties['products'][product_id] - else: - max_ld_count = 8192 - LOG.debug('UNIT[@name="Product ID"] unknown id. ' - 'productId=%s', product_id) - LOG.debug('UNIT[@name="Product ID"] max_ld_count=%d.', - max_ld_count) - return max_ld_count - - def get_pool_config(self, xml, root): - pools = {} - for xmlobj in root.xpath('./' - 'CMD_REQUEST/' - 'CHAPTER[@name="Pool"]/' - 'OBJECT[@name="Pool"]'): - section = xmlobj.find('./SECTION[@name="Pool Detail Information"]') - if section is None: - msg = (_('SECTION[@name="Pool Detail Information"] ' - 'not found. line=%(line)d out="%(out)s"') % - {'line': xmlobj.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - unit = section.find('./UNIT[@name="Pool No.(h)"]') - if unit is None: - msg = (_('UNIT[@name="Pool No.(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - pool_num = int(unit.text, 16) - unit = section.find('UNIT[@name="Pool Capacity"]') - if unit is None: - msg = (_('UNIT[@name="Pool Capacity"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - total = int(unit.text, 10) - unit = section.find('UNIT[@name="Free Pool Capacity"]') - if unit is None: - msg = (_('UNIT[@name="Free Pool Capacity"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - free = int(unit.text, 10) - if self._properties['pool_actual_free_capacity']: - unit = section.find('UNIT[@name="Used Pool Capacity"]') - if unit is None: - msg = (_('UNIT[@name="Used Pool Capacity"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - used = int(unit.text, 10) - for section in xmlobj.xpath('./SECTION[@name=' - '"Virtual Capacity Pool ' - 'Information"]'): - unit = section.find('UNIT[@name="Actual Capacity"]') - if unit is None: - msg = (_('UNIT[@name="Actual Capacity"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - total = int(unit.text, 10) - free = total - used - pool = {'pool_num': pool_num, - 'total': total, - 'free': free, - 'ld_list': []} - pools[pool_num] = pool - return pools - - def get_ld_config(self, xml, root, pools): - lds = {} - used_ldns = [] - for section in root.xpath('./' - 'CMD_REQUEST/' - 'CHAPTER[@name="Logical Disk"]/' - 'OBJECT[@name="Logical Disk"]/' - 'SECTION[@name="LD Detail Information"]'): - unit = section.find('./UNIT[@name="LDN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LDN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldn = int(unit.text, 16) - unit = section.find('./UNIT[@name="OS Type"]') - if unit is None: - msg = (_('UNIT[@name="OS Type"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ostype = unit.text if unit.text is not None else '' - unit = section.find('./UNIT[@name="LD Name"]') - if unit is None: - msg = (_('UNIT[@name="LD Name"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldname = ostype + ':' + unit.text - unit = section.find('./UNIT[@name="Pool No.(h)"]') - if unit is None: - msg = (_('UNIT[@name="Pool No.(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - pool_num = int(unit.text, 16) - - unit = section.find('./UNIT[@name="LD Capacity"]') - if unit is None: - msg = (_('UNIT[@name="LD Capacity"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # byte capacity transform GB capacity. - ld_capacity = int(unit.text, 10) // units.Gi - - unit = section.find('./UNIT[@name="RPL Attribute"]') - if unit is None: - msg = (_('UNIT[@name="RPL Attribute"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - rplatr = unit.text - - unit = section.find('./UNIT[@name="Purpose"]') - if unit is None: - msg = (_('UNIT[@name="Purpose"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - purpose = unit.text - - ld = {'ldname': ldname, - 'ldn': ldn, - 'pool_num': pool_num, - 'ld_capacity': ld_capacity, - 'RPL Attribute': rplatr, - 'Purpose': purpose} - pools[pool_num]['ld_list'].append(ld) - lds[ldname] = ld - used_ldns.append(ldn) - return lds, used_ldns - - def get_iscsi_ldset_config(self, xml, root): - ldsets = {} - for xmlobj in root.xpath('./' - 'CMD_REQUEST/' - 'CHAPTER[@name="Access Control"]/' - 'OBJECT[@name="LD Set(iSCSI)"]'): - ldsetlds = {} - portals = [] - initiators = [] - for unit in xmlobj.xpath('./SECTION[@name="Portal"]/' - 'UNIT[@name="Portal"]'): - if not unit.text.startswith('0.0.0.0:'): - portals.append(unit.text) - - for unit in xmlobj.xpath('./SECTION[@name="Initiator List"]/' - 'UNIT[@name="Initiator List"]'): - initiators.append(unit.text) - - section = xmlobj.find('./SECTION[@name="LD Set(iSCSI)' - ' Information"]') - if section is None: - return ldsets - unit = section.find('./UNIT[@name="Platform"]') - if unit is None: - msg = (_('UNIT[@name="Platform"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - platform = unit.text - unit = section.find('./UNIT[@name="LD Set Name"]') - if unit is None: - msg = (_('UNIT[@name="LD Set Name"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldsetname = platform + ':' + unit.text - unit = section.find('./UNIT[@name="Target Mode"]') - if unit is None: - msg = (_('UNIT[@name="Target Mode"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - tmode = unit.text - if tmode == 'Normal': - unit = section.find('./UNIT[@name="Target Name"]') - if unit is None: - msg = (_('UNIT[@name="Target Name"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - iqn = unit.text - for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'): - unit = section.find('./UNIT[@name="LDN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LDN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldn = int(unit.text, 16) - unit = section.find('./UNIT[@name="LUN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LUN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - lun = int(unit.text, 16) - ld = {'ldn': ldn, - 'lun': lun, - 'iqn': iqn} - ldsetlds[ldn] = ld - elif tmode == 'Multi-Target': - for section in xmlobj.xpath('./SECTION[@name=' - '"Target Information For ' - 'Multi-Target Mode"]'): - unit = section.find('./UNIT[@name="Target Name"]') - if unit is None: - msg = (_('UNIT[@name="Target Name"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - iqn = unit.text - unit = section.find('./UNIT[@name="LDN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LDN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if unit.text.startswith('-'): - continue - ldn = int(unit.text, 16) - unit = section.find('./UNIT[@name="LUN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LUN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if unit.text.startswith('-'): - continue - lun = int(unit.text, 16) - ld = {'ldn': ldn, - 'lun': lun, - 'iqn': iqn} - ldsetlds[ldn] = ld - else: - LOG.debug('`%(mode)s` Unknown Target Mode. ' - 'line=%(line)d out="%(out)s"', - {'mode': tmode, 'line': unit.sourceline, 'out': xml}) - ldset = {'ldsetname': ldsetname, - 'protocol': 'iSCSI', - 'portal_list': portals, - 'lds': ldsetlds, - 'initiator_list': initiators} - ldsets[ldsetname] = ldset - return ldsets - - def get_fc_ldset_config(self, xml, root): - ldsets = {} - for xmlobj in root.xpath('./' - 'CMD_REQUEST/' - 'CHAPTER[@name="Access Control"]/' - 'OBJECT[@name="LD Set(FC)"]'): - ldsetlds = {} - section = xmlobj.find('./SECTION[@name="LD Set(FC)' - ' Information"]') - if section is None: - return ldsets - unit = section.find('./UNIT[@name="Platform"]') - if unit is None: - msg = (_('UNIT[@name="Platform"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - platform = unit.text - unit = section.find('./UNIT[@name="LD Set Name"]') - if unit is None: - msg = (_('UNIT[@name="LD Set Name"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldsetname = platform + ':' + unit.text - wwpns = [] - ports = [] - for section in xmlobj.xpath('./SECTION[@name="Path List"]'): - unit = section.find('./UNIT[@name="Path"]') - if unit is None: - msg = (_('UNIT[@name="Path"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if unit.text.find('(') != -1: - ports.append(unit.text) - else: - wwpns.append(unit.text) - for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'): - unit = section.find('./UNIT[@name="LDN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LDN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - ldn = int(unit.text, 16) - unit = section.find('./UNIT[@name="LUN(h)"]') - if unit is None: - msg = (_('UNIT[@name="LUN(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - lun = int(unit.text, 16) - ld = {'ldn': ldn, - 'lun': lun} - ldsetlds[ldn] = ld - ldset = {'ldsetname': ldsetname, - 'lds': ldsetlds, - 'protocol': 'FC', - 'wwpn': wwpns, - 'port': ports} - ldsets[ldsetname] = ldset - return ldsets - - def get_hostport_config(self, xml, root): - hostports = {} - for section in root.xpath('./' - 'CMD_REQUEST/' - 'CHAPTER[@name="Controller"]/' - 'OBJECT[@name="Host Port"]/' - 'SECTION[@name="Host Director' - '/Host Port Information"]'): - unit = section.find('./UNIT[@name="Port No.(h)"]') - if unit is None: - msg = (_('UNIT[@name="Port No.(h)"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - units = unit.text.split('-') - director = int(units[0], 16) - port = int(units[1], 16) - unit = section.find('./UNIT[@name="IP Address"]') - if unit is None: - unit = section.find('./UNIT[@name="WWPN"]') - if unit is None: - msg = (_('UNIT[@name="WWPN"] not found. ' - 'line=%(line)d out="%(out)s"') % - {'line': section.sourceline, 'out': xml}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - wwpn = unit.text - hostport = { - 'director': director, - 'port': port, - 'wwpn': wwpn, - 'protocol': 'FC', - } - else: - ip = unit.text - if ip == '0.0.0.0': - continue - - hostport = { - 'director': director, - 'port': port, - 'ip': ip, - 'protocol': 'iSCSI', - } - if director not in hostports: - hostports[director] = [] - hostports[director].append(hostport) - return hostports - - def configs(self, xml): - root = etree.fromstring(xml) - pools = self.get_pool_config(xml, root) - lds, used_ldns = self.get_ld_config(xml, root, pools) - iscsi_ldsets = self.get_iscsi_ldset_config(xml, root) - fc_ldsets = self.get_fc_ldset_config(xml, root) - hostports = self.get_hostport_config(xml, root) - diskarray_max_ld_count = self.get_diskarray_max_ld_count(xml, root) - - self.set_backend_max_ld_count(xml, root) - - ldsets = {} - ldsets.update(iscsi_ldsets) - ldsets.update(fc_ldsets) - - return pools, lds, ldsets, used_ldns, hostports, diskarray_max_ld_count - - def get_xml(self): - ismview_path = self._properties['ismview_path'] - if os.path.exists(ismview_path) and os.path.isfile(ismview_path): - with open(ismview_path, 'r') as f: - xml = f.read() - LOG.debug('loaded from %s.', ismview_path) - else: - xml = self._cli.view_all(ismview_path, False, False) - return xml - - def parse_xml(self): - try: - xml = self.get_xml() - return self.configs(xml) - except Exception: - LOG.debug('parse_xml Unexpected error. exception=%s', - traceback.format_exc()) - xml = self._cli.view_all(self._properties['ismview_path'], False) - return self.configs(xml) - - def get_volume_type_qos_specs(self, volume): - specs = {} - - ctxt = context.get_admin_context() - type_id = volume.volume_type_id - if type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is not None: - specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - - LOG.debug('get_volume_type_qos_specs ' - 'volume_type=%(volume_type)s, ' - 'qos_specs_id=%(qos_spec_id)s ' - 'specs=%(specs)s', - {'volume_type': volume_type, - 'qos_spec_id': qos_specs_id, - 'specs': specs}) - return specs - - def check_io_parameter(self, specs): - if ('upperlimit' not in specs and - 'lowerlimit' not in specs and - 'upperreport' not in specs): - specs['upperlimit'] = None - specs['lowerlimit'] = None - specs['upperreport'] = None - LOG.debug('qos parameter not found.') - else: - if 'upperlimit' in specs and specs['upperlimit'] is not None: - if self.validates_number(specs['upperlimit']) is True: - upper_limit = int(specs['upperlimit'], 10) - if ((upper_limit != 0) and - ((upper_limit < 10) or (upper_limit > 1000000))): - raise exception.InvalidConfigurationValue( - value=upper_limit, option='upperlimit') - else: - raise exception.InvalidConfigurationValue( - value=specs['upperlimit'], option='upperlimit') - else: - specs['upperlimit'] = None - - if 'lowerlimit' in specs and specs['lowerlimit'] is not None: - if self.validates_number(specs['lowerlimit']) is True: - lower_limit = int(specs['lowerlimit'], 10) - if (lower_limit != 0 and (lower_limit < 10 or - lower_limit > 1000000)): - raise exception.InvalidConfigurationValue( - value=lower_limit, option='lowerlimit') - else: - raise exception.InvalidConfigurationValue( - value=specs['lowerlimit'], option='lowerlimit') - else: - specs['lowerlimit'] = None - - if 'upperreport' in specs: - if specs['upperreport'] not in ['on', 'off']: - LOG.debug('Illegal arguments. ' - 'upperreport is not on or off.' - 'upperreport=%s', specs['upperreport']) - specs['upperreport'] = None - else: - specs['upperreport'] = None - - def validates_number(self, value): - return re.match(r'^(?![-+]0+$)[-+]?([1-9][0-9]*)?[0-9](\.[0-9]+)?$', - '%s' % value) and True or False diff --git a/cinder/volume/drivers/nec/volume_helper.py b/cinder/volume/drivers/nec/volume_helper.py deleted file mode 100644 index a3251ed85..000000000 --- a/cinder/volume/drivers/nec/volume_helper.py +++ /dev/null @@ -1,1764 +0,0 @@ -# -# Copyright (c) 2016 NEC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import six -import traceback - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.nec import cli -from cinder.volume.drivers.nec import volume_common - - -LOG = logging.getLogger(__name__) - - -class MStorageDriver(volume_common.MStorageVolumeCommon): - """M-Series Storage helper class.""" - - def _convert_id2name(self, volume): - ldname = (self.get_ldname(volume.id, - self._properties['ld_name_format'])) - return ldname - - def _convert_id2snapname(self, volume): - ldname = (self.get_ldname(volume.id, - self._properties['ld_backupname_format'])) - return ldname - - def _convert_id2migratename(self, volume): - ldname = self._convert_id2name(volume) - ldname = ldname + '_m' - return ldname - - def _convert_id2name_in_migrate(self, volume): - """If LD has migrate_status, get LD name from source LD UUID.""" - LOG.debug('migration_status:%s', volume.migration_status) - migstat = volume.migration_status - if migstat is not None and 'target:' in migstat: - index = migstat.find('target:') - if index != -1: - migstat = migstat[len('target:'):] - ldname = (self.get_ldname(migstat, - self._properties['ld_name_format'])) - else: - ldname = (self.get_ldname(volume.id, - self._properties['ld_name_format'])) - - LOG.debug('ldname=%s.', ldname) - return ldname - - def _select_ldnumber(self, used_ldns, max_ld_count): - """Pick up unused LDN.""" - for ldn in range(0, max_ld_count + 1): - if ldn not in used_ldns: - break - if ldn > max_ld_count - 1: - msg = _('All Logical Disk Numbers are used. ' - 'No more volumes can be created.') - raise exception.VolumeBackendAPIException(data=msg) - return ldn - - def _return_poolnumber(self, nominated_pools): - """Select pool form nominated pools.""" - selected_pool = -1 - min_ldn = 0 - for pool in nominated_pools: - nld = len(pool['ld_list']) - if (nld < self._numofld_per_pool and - (selected_pool == -1 or min_ldn > nld)): - selected_pool = pool['pool_num'] - min_ldn = nld - if selected_pool < 0: - msg = _('No available pools found.') - raise exception.VolumeBackendAPIException(data=msg) - return selected_pool - - def _select_leastused_poolnumber(self, volume, pools, - xml, option=None): - """Pick up least used pool.""" - size = volume.size * units.Gi - pools = [pool for (pn, pool) in pools.items() - if pool['free'] >= size and - (len(self._properties['pool_pools']) == 0 or - pn in self._properties['pool_pools'])] - return self._return_poolnumber(pools) - - def _select_migrate_poolnumber(self, volume, pools, xml, option): - """Pick up migration target pool.""" - tmpPools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - ldname = self.get_ldname(volume.id, - self._properties['ld_name_format']) - ld = lds[ldname] - temp_conf_properties = self.get_conf_properties(option) - - size = volume.size * units.Gi - pools = [pool for (pn, pool) in pools.items() - if pool['free'] >= size and - (len(temp_conf_properties['pool_pools']) == 0 or - pn in temp_conf_properties['pool_pools'])] - - selected_pool = self._return_poolnumber(pools) - if selected_pool == ld['pool_num']: - # it is not necessary to create new volume. - selected_pool = -1 - return selected_pool - - def _select_dsv_poolnumber(self, volume, pools, option=None): - """Pick up backup pool for DSV.""" - pools = [pool for (pn, pool) in pools.items() - if pn in self._properties['pool_backup_pools']] - return self._return_poolnumber(pools) - - def _select_ddr_poolnumber(self, volume, pools, xml, option): - """Pick up backup pool for DDR.""" - size = option * units.Gi - pools = [pool for (pn, pool) in pools.items() - if pool['free'] >= size and - pn in self._properties['pool_backup_pools']] - return self._return_poolnumber(pools) - - def _select_volddr_poolnumber(self, volume, pools, xml, option): - """Pick up backup pool for DDR.""" - size = option * units.Gi - pools = [pool for (pn, pool) in pools.items() - if pool['free'] >= size and - pn in self._properties['pool_pools']] - return self._return_poolnumber(pools) - - def _bind_ld(self, volume, capacity, validator, - nameselector, poolselector, option=None): - return self._sync_bind_ld(volume, capacity, validator, - nameselector, poolselector, - self._properties['diskarray_name'], - option) - - @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') - def _sync_bind_ld(self, volume, capacity, validator, nameselector, - poolselector, diskarray_name, option=None): - """Get storage state and bind ld. - - volume: ld information - capacity: capacity in GB - validator: validate method(volume, xml) - nameselector: select ld name method(volume) - poolselector: select ld location method(volume, pools) - diskarray_name: target diskarray name - option: optional info - """ - LOG.debug('_bind_ld Start.') - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # execute validator function. - if validator is not None: - result = validator(volume, xml) - if result is False: - msg = _('Invalid bind Logical Disk info.') - raise exception.VolumeBackendAPIException(data=msg) - - # generate new ld name. - ldname = nameselector(volume) - # pick up least used pool and unused LDN. - selected_pool = poolselector(volume, pools, xml, option) - selected_ldn = self._select_ldnumber(used_ldns, max_ld_count) - if selected_pool < 0 or selected_ldn < 0: - LOG.debug('NOT necessary LD bind. ' - 'Name=%(name)s ' - 'Size=%(size)dGB ' - 'LDN=%(ldn)04xh ' - 'Pool=%(pool)04xh.', - {'name': ldname, - 'size': capacity, - 'ldn': selected_ldn, - 'pool': selected_pool}) - return ldname, selected_ldn, selected_pool - - # bind LD. - retnum, errnum = (self._cli.ldbind(ldname, - selected_pool, - selected_ldn, - capacity)) - if retnum is False: - if 'iSM31077' in errnum: - msg = _('Logical Disk number is duplicated (%s).') % errnum - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Failed to bind Logical Disk (%s).') % errnum - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('LD bound. Name=%(name)s Size=%(size)dGB ' - 'LDN=%(ldn)04xh Pool=%(pool)04xh.', - {'name': ldname, 'size': capacity, - 'ldn': selected_ldn, 'pool': selected_pool}) - return ldname, selected_ldn, selected_pool - - def _validate_ld_exist(self, lds, vol_id, name_format): - ldname = self.get_ldname(vol_id, name_format) - if ldname not in lds: - msg = _('Logical Disk `%s` could not be found.') % ldname - LOG.error(msg) - raise exception.NotFound(msg) - return ldname - - def _validate_iscsildset_exist(self, ldsets, connector, metadata=None): - ldset = self.get_ldset(ldsets, metadata) - if ldset is None: - for tldset in six.itervalues(ldsets): - if 'initiator_list' not in tldset: - continue - n = tldset['initiator_list'].count(connector['initiator']) - if n > 0: - ldset = tldset - break - if ldset is None: - msg = _('Appropriate Logical Disk Set could not be found.') - raise exception.NotFound(msg) - if len(ldset['portal_list']) < 1: - msg = (_('Logical Disk Set `%s` has no portal.') % - ldset['ldsetname']) - raise exception.NotFound(msg) - return ldset - - def _validate_fcldset_exist(self, ldsets, connector, metadata=None): - ldset = self.get_ldset(ldsets, metadata) - if ldset is None: - for conect in connector['wwpns']: - length = len(conect) - findwwpn = '-'.join([conect[i:i + 4] - for i in range(0, length, 4)]) - findwwpn = findwwpn.upper() - for tldset in six.itervalues(ldsets): - if 'wwpn' in tldset and findwwpn in tldset['wwpn']: - ldset = tldset - break - if ldset is not None: - break - if ldset is None: - msg = _('Appropriate Logical Disk Set could not be found.') - raise exception.NotFound(msg) - return ldset - - def _enumerate_iscsi_portals(self, hostports, ldset, prefered_director=0): - nominated = [] - for director in [prefered_director, 1 - prefered_director]: - if director not in hostports: - continue - dirportal = [] - for port in hostports[director]: - if not port['protocol'].lower() == 'iscsi': - continue - for portal in ldset['portal_list']: - if portal.startswith(port['ip'] + ':'): - dirportal.append(portal) - break - if (self._properties['portal_number'] > 0 and - len(dirportal) > self._properties['portal_number']): - nominated.extend(random.sample( - dirportal, self._properties['portal_number'])) - else: - nominated.extend(dirportal) - - if len(nominated) == 0: - raise exception.NotFound( - _('No portal matches to any host ports.')) - - return nominated - - def create_volume(self, volume): - msgparm = ('Volume ID = %(id)s, Size = %(size)dGB' - % {'id': volume.id, 'size': volume.size}) - try: - self._create_volume(volume) - LOG.info('Created Volume (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create Volume (%(msgparm)s) ' - '(%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _create_volume(self, volume): - LOG.debug('_create_volume Start.') - - # select ld number and LD bind. - (ldname, - ldn, - selected_pool) = self._bind_ld(volume, - volume.size, - None, - self._convert_id2name_in_migrate, - self._select_leastused_poolnumber) - - # check io limit. - specs = self.get_volume_type_qos_specs(volume) - self.check_io_parameter(specs) - # set io limit. - self._cli.set_io_limit(ldname, specs) - - LOG.debug('LD bound. ' - 'Name=%(name)s ' - 'Size=%(size)dGB ' - 'LDN=%(ldn)04xh ' - 'Pool=%(pool)04xh ' - 'Specs=%(specs)s.', - {'name': ldname, - 'size': volume.size, - 'ldn': ldn, - 'pool': selected_pool, - 'specs': specs}) - - def _can_extend_capacity(self, new_size, pools, lds, ld): - rvs = {} - ld_count_in_pool = {} - if ld['RPL Attribute'] == 'MV': - pair_lds = self._cli.get_pair_lds(ld['ldname'], lds) - for (ldn, pair_ld) in pair_lds.items(): - rv_name = pair_ld['ldname'] - pool_number = pair_ld['pool_num'] - ldn = pair_ld['ldn'] - rvs[ldn] = pair_ld - # check rv status. - query_status = self._cli.query_MV_RV_status(rv_name[3:], 'RV') - if query_status != 'separated': - msg = (_('Specified Logical Disk %s has been copied.') % - rv_name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - # get pool number. - if pool_number in ld_count_in_pool: - ld_count_in_pool[pool_number].append(ldn) - else: - ld_count_in_pool[pool_number] = [ldn] - - # check pool capacity. - for (pool_number, tmp_ldn_list) in ld_count_in_pool.items(): - ld_capacity = ( - ld['ld_capacity'] * units.Gi) - new_size_byte = new_size * units.Gi - size_increase = new_size_byte - ld_capacity - pool = pools[pool_number] - ld_count = len(tmp_ldn_list) - if pool['free'] < size_increase * ld_count: - msg = (_('Not enough pool capacity. ' - 'pool_number=%(pool)d, size_increase=%(sizeinc)d') % - {'pool': pool_number, - 'sizeinc': size_increase * ld_count}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - return rvs - - def extend_volume(self, volume, new_size): - msgparm = ('Volume ID = %(id)s, New Size = %(newsize)dGB, ' - 'Old Size = %(oldsize)dGB' - % {'id': volume.id, 'newsize': new_size, - 'oldsize': volume.size}) - try: - self._extend_volume(volume, new_size) - LOG.info('Extended Volume (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Extend Volume (%(msgparm)s) ' - '(%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _extend_volume(self, volume, new_size): - LOG.debug('_extend_volume(Volume ID = %(id)s, ' - 'new_size = %(size)s) Start.', - {'id': volume.id, 'size': new_size}) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get volume. - ldname = self._validate_ld_exist( - lds, volume.id, self._properties['ld_name_format']) - ld = lds[ldname] - ldn = ld['ldn'] - - # check pools capacity. - rvs = self._can_extend_capacity(new_size, pools, lds, ld) - - # volume expand. - self._cli.expand(ldn, new_size) - - # rv expand. - if ld['RPL Attribute'] == 'MV': - # ld expand. - for (ldn, rv) in rvs.items(): - self._cli.expand(ldn, new_size) - elif ld['RPL Attribute'] != 'IV': - msg = (_('RPL Attribute Error. RPL Attribute = %s.') - % ld['RPL Attribute']) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug('_extend_volume(Volume ID = %(id)s, ' - 'new_size = %(newsize)s) End.', - {'id': volume.id, 'newsize': new_size}) - - def create_cloned_volume(self, volume, src_vref): - msgparm = ('Volume ID = %(id)s, ' - 'Source Volume ID = %(src_id)s' - % {'id': volume.id, - 'src_id': src_vref.id}) - try: - self._create_cloned_volume(volume, src_vref) - LOG.info('Created Cloned Volume (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create Cloned Volume ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - LOG.debug('_create_cloned_volume' - '(Volume ID = %(id)s, Source ID = %(src_id)s ) Start.', - {'id': volume.id, 'src_id': src_vref.id}) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # check MV existence and get MV info. - source_name = ( - self.get_ldname(src_vref.id, - self._properties['ld_name_format'])) - if source_name not in lds: - msg = (_('Logical Disk `%(name)s` has unbound already. ' - 'volume_id = %(id)s.') % - {'name': source_name, 'id': src_vref.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - source_ld = lds[source_name] - - # check temporarily released pairs existence. - if source_ld['RPL Attribute'] == 'MV': - # get pair lds. - pair_lds = self._cli.get_pair_lds(source_name, lds) - if len(pair_lds) == 3: - msg = (_('Cannot create clone volume. ' - 'number of pairs reached 3. ' - '%(msg)s. ldname=%(ldname)s') % - {'msg': msg, 'ldname': source_name}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Creating Cloned Volume. - (volume_name, - ldn, - selected_pool) = self._bind_ld(volume, - src_vref.size, - None, - self._convert_id2name, - self._select_leastused_poolnumber) - - # check io limit. - specs = self.get_volume_type_qos_specs(volume) - self.check_io_parameter(specs) - - # set io limit. - self._cli.set_io_limit(volume_name, specs) - - LOG.debug('LD bound. Name=%(name)s ' - 'Size=%(size)dGB ' - 'LDN=%(ldn)04xh ' - 'Pool=%(pool)04xh.', - {'name': volume_name, - 'size': volume.size, - 'ldn': ldn, - 'pool': selected_pool}) - LOG.debug('source_name=%(src_name)s, volume_name=%(name)s.', - {'src_name': source_name, 'name': volume_name}) - - # compare volume size and copy data to RV. - mv_capacity = src_vref.size - rv_capacity = volume.size - if rv_capacity <= mv_capacity: - rv_capacity = None - - volume_properties = { - 'mvname': source_name, - 'rvname': volume_name, - 'capacity': mv_capacity, - 'mvid': src_vref.id, - 'rvid': volume.id, - 'rvldn': ldn, - 'rvcapacity': rv_capacity, - 'flag': 'clone', - 'context': self._context - } - self._cli.backup_restore(volume_properties, cli.UnpairWaitForClone) - LOG.debug('_create_cloned_volume(Volume ID = %(id)s, ' - 'Source ID = %(src_id)s ) End.', - {'id': volume.id, 'src_id': src_vref.id}) - - def _validate_migrate_volume(self, volume, xml): - """Validate source volume information.""" - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get ld object - ldname = self._validate_ld_exist( - lds, volume.id, self._properties['ld_name_format']) - - # check volume status. - if volume.status != 'available': - msg = _('Specified Logical Disk %s is not available.') % ldname - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # check rpl attribute. - ld = lds[ldname] - if ld['Purpose'] != '---': - msg = (_('Specified Logical Disk %(ld)s ' - 'has an invalid attribute (%(purpose)s).') - % {'ld': ldname, 'purpose': ld['Purpose']}) - raise exception.VolumeBackendAPIException(data=msg) - return True - - def migrate_volume(self, context, volume, host): - msgparm = ('Volume ID = %(id)s, ' - 'Destination Host = %(dsthost)s' - % {'id': volume.id, - 'dsthost': host}) - try: - ret = self._migrate_volume(context, volume, host) - LOG.info('Migrated Volume (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Migrate Volume ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _migrate_volume(self, context, volume, host): - """Migrate the volume to the specified host. - - Returns a boolean indicating whether the migration occurred, as well as - model_update. - """ - LOG.debug('_migrate_volume(' - 'Volume ID = %(id)s, ' - 'Volume Name = %(name)s, ' - 'host = %(host)s) Start.', - {'id': volume.id, - 'name': volume.name, - 'host': host}) - - false_ret = (False, None) - - if 'capabilities' not in host: - LOG.debug('Host not in capabilities. Host = %s ', host) - return false_ret - - capabilities = host['capabilities'] - if capabilities.get('vendor_name') != self._properties['vendor_name']: - LOG.debug('Vendor is not %(vendor)s. ' - 'capabilities = %(capabilities)s ', - {'vendor': self._properties['vendor_name'], - 'capabilities': capabilities}) - return false_ret - - # get another host group configurations. - temp_conf = self.get_conf(host) - temp_conf_properties = self.get_conf_properties(temp_conf) - - # another storage configuration is not supported. - if temp_conf_properties['cli_fip'] != self._properties['cli_fip']: - LOG.debug('FIP is mismatch. FIP = %(tempfip)s != %(fip)s', - {'tempfip': temp_conf_properties['cli_fip'], - 'fip': self._properties['cli_fip']}) - return false_ret - - # bind LD. - (rvname, - ldn, - selected_pool) = self._bind_ld(volume, - volume.size, - self._validate_migrate_volume, - self._convert_id2migratename, - self._select_migrate_poolnumber, - temp_conf) - - if selected_pool >= 0: - # check io limit. - specs = self.get_volume_type_qos_specs(volume) - self.check_io_parameter(specs) - - # set io limit. - self._cli.set_io_limit(rvname, specs) - - volume_properties = { - 'mvname': - self.get_ldname( - volume.id, self._properties['ld_name_format']), - 'rvname': rvname, - 'capacity': - volume.size * units.Gi, - 'mvid': volume.id, - 'rvid': None, - 'flag': 'migrate', - 'context': self._context - } - # replicate LD. - self._cli.backup_restore(volume_properties, - cli.UnpairWaitForMigrate) - - LOG.debug('_migrate_volume(Volume ID = %(id)s, ' - 'Host = %(host)s) End.', - {'id': volume.id, 'host': host}) - - return (True, []) - - def check_for_export(self, context, volume_id): - pass - - def backup_use_temp_snapshot(self): - return True - - def iscsi_do_export(self, _ctx, volume, connector, ensure=False): - msgparm = ('Volume ID = %(id)s, ' - 'Initiator Name = %(initiator)s' - % {'id': volume.id, - 'initiator': connector['initiator']}) - try: - ret = self._iscsi_do_export(_ctx, volume, connector, ensure) - LOG.info('Created iSCSI Export (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create iSCSI Export ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _iscsi_do_export(self, _ctx, volume, connector, ensure): - LOG.debug('_iscsi_do_export' - '(Volume ID = %(id)s, connector = %(connector)s) Start.', - {'id': volume.id, 'connector': connector}) - while True: - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # find LD Set. - - # get target LD Set name. - metadata = {} - # image to volume or volume to image. - if (volume.status in ['downloading', 'uploading'] and - self._properties['ldset_controller_node_name'] != ''): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('image to volume or volume to image:%s', - volume.status) - # migrate. - elif (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - self._properties['ldset_controller_node_name'] != ''): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('migrate:%s', volume.migration_status) - - ldset = self._validate_iscsildset_exist( - ldsets, connector, metadata) - - if (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - 'target:' in volume.migration_status): - LOG.debug('migration_status:%s', volume.migration_status) - migstat = volume.migration_status - index = migstat.find('target:') - if index != -1: - migstat = migstat[len('target:'):] - ldname = ( - self.get_ldname( - migstat, self._properties['ld_name_format'])) - else: - ldname = ( - self.get_ldname( - volume.id, self._properties['ld_name_format'])) - - # add LD to LD set. - if ldname not in lds: - msg = _('Logical Disk `%s` could not be found.') % ldname - raise exception.NotFound(msg) - ld = lds[ldname] - - if ld['ldn'] not in ldset['lds']: - # Check the LD is remaining on ldset_controller_node. - ldset_controller_node_name = ( - self._properties['ldset_controller_node_name']) - if ldset_controller_node_name != '': - if ldset_controller_node_name != ldset['ldsetname']: - ldset_controller = ldsets[ldset_controller_node_name] - if ld['ldn'] in ldset_controller['lds']: - LOG.debug( - 'delete remaining the LD from ' - 'ldset_controller_node. ' - 'Ldset Name=%s.', - ldset_controller_node_name) - self._cli.delldsetld(ldset_controller_node_name, - ldname) - # assign the LD to LD Set. - self._cli.addldsetld(ldset['ldsetname'], ldname) - - LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.', - {'ld': ldname, 'ldset': ldset['ldsetname']}) - else: - break - - # enumerate portals for iscsi multipath. - prefered_director = ld['pool_num'] % 2 - nominated = self._enumerate_iscsi_portals(hostports, ldset, - prefered_director) - location = ('%(list)s,1 %(iqn)s %(lun)d' - % {'list': ';'.join(nominated), - 'iqn': ldset['lds'][ld['ldn']]['iqn'], - 'lun': ldset['lds'][ld['ldn']]['lun']}) - - LOG.debug('%(ensure)sexport LD `%(name)s` via `%(location)s`.', - {'ensure': 'ensure_' if ensure else '', - 'name': ldname, - 'location': location}) - return {'provider_location': location} - - def fc_do_export(self, _ctx, volume, connector, ensure=False): - msgparm = ('Volume ID = %(id)s, ' - 'Initiator WWPNs = %(wwpns)s' - % {'id': volume.id, - 'wwpns': connector['wwpns']}) - try: - ret = self._fc_do_export(_ctx, volume, connector, ensure) - LOG.info('Created FC Export (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create FC Export ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _fc_do_export(self, _ctx, volume, connector, ensure): - LOG.debug('_fc_do_export' - '(Volume ID = %(id)s, connector = %(connector)s) Start.', - {'id': volume.id, 'connector': connector}) - while True: - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # find LD Set. - - # get target LD Set. - metadata = {} - # image to volume or volume to image. - if (volume.status in ['downloading', 'uploading'] and - self._properties['ldset_controller_node_name'] != ''): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('image to volume or volume to image:%s', - volume.status) - # migrate. - elif (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - self._properties['ldset_controller_node_name'] != '' - ): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('migrate:%s', volume.migration_status) - - ldset = self._validate_fcldset_exist(ldsets, connector, metadata) - - # get free lun. - luns = [] - ldsetlds = ldset['lds'] - for ld in six.itervalues(ldsetlds): - luns.append(ld['lun']) - - target_lun = 0 - for lun in sorted(luns): - if target_lun < lun: - break - target_lun += 1 - - if (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - 'target:' in volume.migration_status): - LOG.debug('migration_status:%s', volume.migration_status) - migstat = volume.migration_status - index = migstat.find('target:') - if index != -1: - migstat = migstat[len('target:'):] - ldname = ( - self.get_ldname( - migstat, - self._properties['ld_name_format'])) - else: - ldname = ( - self.get_ldname( - volume.id, - self._properties['ld_name_format'])) - - # add LD to LD set. - if ldname not in lds: - msg = _('Logical Disk `%s` could not be found.') % ldname - raise exception.NotFound(msg) - ld = lds[ldname] - - if ld['ldn'] not in ldset['lds']: - # Check the LD is remaining on ldset_controller_node. - ldset_controller_node_name = ( - self._properties['ldset_controller_node_name']) - if ldset_controller_node_name != '': - if ldset_controller_node_name != ldset['ldsetname']: - ldset_controller = ldsets[ldset_controller_node_name] - if ld['ldn'] in ldset_controller['lds']: - LOG.debug( - 'delete remaining the LD from ' - 'ldset_controller_node. ' - 'Ldset Name=%s.', ldset_controller_node_name) - self._cli.delldsetld(ldset_controller_node_name, - ldname) - # assign the LD to LD Set. - self._cli.addldsetld(ldset['ldsetname'], ldname, target_lun) - - LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.', - {'ld': ldname, 'ldset': ldset['ldsetname']}) - else: - break - - LOG.debug('%(ensure)sexport LD `%(ld)s`.', - {'ensure': 'ensure_' if ensure else '', - 'ld': ldname}) - - def iscsi_do_export_snapshot(self, context, snapshot, connector): - """Exports the snapshot.""" - msgparm = 'Snapshot ID = %s' % snapshot.id - try: - ret = self._iscsi_do_export_snapshot( - context, snapshot, connector, - self._properties['diskarray_name']) - LOG.info('Create Export Snapshot (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create Export Snapshot ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') - def _iscsi_do_export_snapshot(self, context, snapshot, connector, - diskarray_name): - LOG.debug('_iscsi_do_export_snapshot(Snapshot ID = %s) Start.', - snapshot.id) - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - LOG.debug('validate data.') - svname = self._validate_ld_exist( - lds, snapshot.id, self._properties['ld_name_format']) - bvname = self._validate_ld_exist( - lds, snapshot.volume_id, self._properties['ld_name_format']) - lvname = svname + '_l' - ldset = self._validate_iscsildset_exist(ldsets, connector) - svstatus = self._cli.query_BV_SV_status(bvname[3:], svname[3:]) - if svstatus != 'snap/active': - msg = _('Logical Disk (%s) is invalid snapshot.') % svname - raise exception.VolumeBackendAPIException(data=msg) - lvldn = self._select_ldnumber(used_ldns, max_ld_count) - - LOG.debug('configure backend.') - self._cli.lvbind(bvname, lvname[3:], lvldn) - self._cli.lvlink(svname[3:], lvname[3:]) - self._cli.addldsetld(ldset['ldsetname'], lvname) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - ld = lds[lvname] - ldset = self._validate_iscsildset_exist(ldsets, connector) - - LOG.debug('enumerate portals for iscsi multipath.') - prefered_director = ld['pool_num'] % 2 - nominated = self._enumerate_iscsi_portals(hostports, ldset, - prefered_director) - location = ('%(list)s,1 %(iqn)s %(lun)d' - % {'list': ';'.join(nominated), - 'iqn': ldset['lds'][ld['ldn']]['iqn'], - 'lun': ldset['lds'][ld['ldn']]['lun']}) - - LOG.debug('create_export_snapshot location:(%s)', location) - return {'provider_location': location} - - def fc_do_export_snapshot(self, context, snapshot, connector, - ensure=False): - msgparm = ('Volume ID = %(id)s, ' - 'Initiator WWPNs = %(wwpns)s' - % {'id': snapshot.id, - 'wwpns': connector['wwpns']}) - try: - ret = self._fc_do_export_snapshot( - context, snapshot, connector, ensure, - self._properties['diskarray_name']) - LOG.info('Created FC Export snapshot(%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create FC Export snapshot' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') - def _fc_do_export_snapshot(self, context, snapshot, connector, ensure, - diskarray_name): - LOG.debug('_fc_do_export_snapshot(Snapshot ID = %s) Start.', - snapshot.id) - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - LOG.debug('validate data.') - svname = self._validate_ld_exist( - lds, snapshot.id, self._properties['ld_name_format']) - bvname = self._validate_ld_exist( - lds, snapshot.volume_id, self._properties['ld_name_format']) - lvname = svname + '_l' - ldset = self._validate_fcldset_exist(ldsets, connector) - svstatus = self._cli.query_BV_SV_status(bvname[3:], svname[3:]) - if svstatus != 'snap/active': - msg = _('Logical Disk (%s) is invalid snapshot.') % svname - raise exception.VolumeBackendAPIException(data=msg) - lvldn = self._select_ldnumber(used_ldns, max_ld_count) - - LOG.debug('configure backend.') - self._cli.lvbind(bvname, lvname[3:], lvldn) - self._cli.lvlink(svname[3:], lvname[3:]) - - luns = [] - ldsetlds = ldset['lds'] - for ld in six.itervalues(ldsetlds): - luns.append(ld['lun']) - target_lun = 0 - for lun in sorted(luns): - if target_lun < lun: - break - target_lun += 1 - - self._cli.addldsetld(ldset['ldsetname'], lvname, target_lun) - LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.', - {'ld': lvname, 'ldset': ldset['ldsetname']}) - LOG.debug('%(ensure)sexport LD `%(ld)s`.', - {'ensure': 'ensure_' if ensure else '', - 'ld': lvname}) - - def remove_export(self, context, volume): - msgparm = 'Volume ID = %s' % volume.id - try: - self._remove_export(context, volume) - LOG.info('Removed Export (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Remove Export ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _remove_export(self, context, volume): - if (volume.status == 'uploading' and - volume.attach_status == 'attached'): - return - else: - LOG.debug('_remove_export Start.') - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get target LD Set. - metadata = {} - # image to volume or volume to image. - if (volume.status in ['downloading', 'uploading'] and - self._properties['ldset_controller_node_name'] != ''): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('image to volume or volume to image:%s', - volume.status) - # migrate. - elif (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - self._properties['ldset_controller_node_name'] != '' - ): - metadata['ldset'] = ( - self._properties['ldset_controller_node_name']) - LOG.debug('migrate:%s', volume.migration_status) - - ldset = self.get_ldset(ldsets, metadata) - - if (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - 'target:' in volume.migration_status): - LOG.debug('migration_status:%s', volume.migration_status) - migstat = volume.migration_status - index = migstat.find('target:') - if index != -1: - migstat = migstat[len('target:'):] - ldname = ( - self.get_ldname( - migstat, - self._properties['ld_name_format'])) - else: - ldname = ( - self.get_ldname( - volume.id, - self._properties['ld_name_format'])) - - if ldname not in lds: - LOG.debug('LD `%s` already unbound?', ldname) - return - - ld = lds[ldname] - ldsetlist = [] - - if ldset is None: - for tldset in six.itervalues(ldsets): - if ld['ldn'] in tldset['lds']: - ldsetlist.append(tldset) - LOG.debug('ldset=%s.', tldset) - if len(ldsetlist) == 0: - LOG.debug('LD `%s` already deleted from LD Set?', - ldname) - return - else: - if ld['ldn'] not in ldset['lds']: - LOG.debug('LD `%(ld)s` already deleted ' - 'from LD Set `%(ldset)s`?', - {'ld': ldname, 'ldset': ldset['ldsetname']}) - return - ldsetlist.append(ldset) - - # delete LD from LD set. - for tagetldset in ldsetlist: - retnum, errnum = (self._cli.delldsetld( - tagetldset['ldsetname'], ldname)) - - if retnum is not True: - if 'iSM31065' in errnum: - LOG.debug( - 'LD `%(ld)s` already deleted ' - 'from LD Set `%(ldset)s`?', - {'ld': ldname, 'ldset': tagetldset['ldsetname']}) - else: - msg = (_('Failed to unregister Logical Disk from ' - 'Logical Disk Set (%s)') % errnum) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.', - {'ld': ldname, 'ldset': tagetldset['ldsetname']}) - - LOG.debug('_remove_export(Volume ID = %s) End.', volume.id) - - def remove_export_snapshot(self, context, snapshot): - """Removes an export for a snapshot.""" - msgparm = 'Snapshot ID = %s' % snapshot.id - try: - self._remove_export_snapshot(context, snapshot) - LOG.info('Removed Export Snapshot(%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Remove Export Snapshot' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _remove_export_snapshot(self, context, snapshot): - LOG.debug('_remove_export_snapshot(Snapshot ID = %s) Start.', - snapshot.id) - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - LOG.debug('validate data.') - svname = self._validate_ld_exist( - lds, snapshot.id, self._properties['ld_name_format']) - lvname = svname + '_l' - if lvname not in lds: - LOG.debug('Logical Disk `%s` is already unexported.', lvname) - return - - ld = lds[lvname] - ldsetlist = [] - if ld is None: - msg = _('Exported snapshot could not be found.') - raise exception.VolumeBackendAPIException(data=msg) - for tldset in six.itervalues(ldsets): - if ld['ldn'] in tldset['lds']: - ldsetlist.append(tldset) - if len(ldsetlist) == 0: - LOG.debug('Specified Logical Disk is already removed.') - return - - LOG.debug('configure backend.') - for tagetldset in ldsetlist: - retnum, errnum = self._cli.delldsetld(tagetldset['ldsetname'], - lvname) - if retnum is not True: - msg = (_('Failed to remove export Logical Disk from ' - 'Logical Disk Set (%s)') % errnum) - raise exception.VolumeBackendAPIException(data=msg) - LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.', - {'ld': lvname, 'ldset': tagetldset['ldsetname']}) - - try: - self._cli.lvunlink(lvname[3:]) - except Exception: - LOG.debug('LV unlink error.') - - try: - self._cli.lvunbind(lvname) - except Exception: - LOG.debug('LV unbind error.') - - LOG.debug('_remove_export_snapshot(Snapshot ID = %s) End.', - snapshot.id) - - def iscsi_initialize_connection(self, volume, connector): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': volume.id, 'connector': connector}) - - try: - ret = self._iscsi_initialize_connection(volume, connector) - LOG.info('Initialized iSCSI Connection (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Initialize iSCSI Connection ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _iscsi_initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The iscsi driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value:: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': 1, - 'access_mode': 'rw' - } - } - - """ - LOG.debug('_iscsi_initialize_connection' - '(Volume ID = %(id)s, connector = %(connector)s) Start.', - {'id': volume.id, 'connector': connector}) - - provider_location = volume.provider_location - provider_location = provider_location.split() - info = {'driver_volume_type': 'iscsi', - 'data': {'target_portal': random.choice( - provider_location[0][0:-2].split(";")), - 'target_iqn': provider_location[1], - 'target_lun': int(provider_location[2]), - 'target_discovered': False, - 'volume_id': volume.id} - } - if connector.get('multipath'): - portals_len = len(provider_location[0][0:-2].split(";")) - info['data'].update({'target_portals': - provider_location[0][0:-2].split(";"), - 'target_iqns': [provider_location[1]] * - portals_len, - 'target_luns': [int(provider_location[2])] * - portals_len}) - LOG.debug('_iscsi_initialize_connection' - '(Volume ID = %(id)s, connector = %(connector)s, ' - 'info = %(info)s) End.', - {'id': volume.id, - 'connector': connector, - 'info': info}) - return info - - def iscsi_initialize_connection_snapshot(self, snapshot, connector, - **kwargs): - """Allow connection to connector and return connection info. - - :param snapshot: The snapshot to be attached - :param connector: Dictionary containing information about what - is being connected to. - :returns conn_info: A dictionary of connection information. This - can optionally include a "initiator_updates" - field. - """ - msgparm = ('Snapshot ID = %(id)s, Connector = %(connector)s' - % {'id': snapshot.id, 'connector': connector}) - - try: - ret = self._iscsi_initialize_connection(snapshot, connector) - LOG.info('Initialized iSCSI Connection snapshot(%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Initialize iSCSI Connection snapshot' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - return ret - - def iscsi_terminate_connection(self, volume, connector): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': volume.id, 'connector': connector}) - LOG.info('Terminated iSCSI Connection (%s)', msgparm) - - def iscsi_terminate_connection_snapshot(self, snapshot, connector, - **kwargs): - """Disallow connection from connector.""" - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': snapshot.id, 'connector': connector}) - self.remove_export_snapshot(None, snapshot) - LOG.info('Terminated iSCSI Connection snapshot(%s)', msgparm) - - def fc_initialize_connection(self, volume, connector): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': volume.id, 'connector': connector}) - - try: - ret = self._fc_initialize_connection(volume, connector) - LOG.info('Initialized FC Connection (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Initialize FC Connection ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _fc_initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '1234567890123', - 'access_mode': 'rw' - } - } - - or - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['1234567890123', '0987654321321'], - 'access_mode': 'rw' - } - } - """ - - LOG.debug('_fc_initialize_connection' - '(Volume ID = %(id)s, connector = %(connector)s) Start.', - {'id': volume.id, 'connector': connector}) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get target wwpns and initiator/target map. - - fc_ports = [] - for director, hostport in hostports.items(): - for port in hostport: - if port['protocol'].lower() == 'fc': - fc_ports.append(port) - target_wwns, init_targ_map = ( - self._build_initiator_target_map(connector, fc_ports)) - - if (hasattr(volume, 'migration_status') and - volume.migration_status is not None and - 'target:' in volume.migration_status): - LOG.debug('migration_status:%s', volume.migration_status) - migstat = volume.migration_status - index = migstat.find('target:') - if index != -1: - migstat = migstat[len('target:'):] - ldname = ( - self.get_ldname(migstat, - self._properties['ld_name_format'])) - else: - ldname = ( - self.get_ldname(volume.id, - self._properties['ld_name_format'])) - - # get lun. - if ldname not in lds: - msg = (_('Logical Disk %(ld)s has unbound already. ' - 'volume_id = %(id)s.') % - {'ld': ldname, 'id': volume.id}) - LOG.error(msg) - raise exception.NotFound(msg) - lvname = ldname + '_l' - if lvname in lds: - ldn = lds[lvname]['ldn'] - else: - ldn = lds[ldname]['ldn'] - - lun = None - for ldset in six.itervalues(ldsets): - if ldn in ldset['lds']: - lun = ldset['lds'][ldn]['lun'] - break - - info = { - 'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': lun, - 'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map}} - - LOG.debug('_fc_initialize_connection' - '(Volume ID = %(id)s, connector = %(connector)s, ' - 'info = %(info)s) End.', - {'id': volume.id, - 'connector': connector, - 'info': info}) - return info - - def fc_initialize_connection_snapshot(self, snapshot, connector): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': snapshot.id, 'connector': connector}) - - try: - ret = self._fc_initialize_connection(snapshot, connector) - LOG.info('Initialized FC Connection snapshot(%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Initialize FC Connection snapshot' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def fc_terminate_connection(self, volume, connector): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': volume.id, 'connector': connector}) - - try: - ret = self._fc_terminate_connection(volume, connector) - LOG.info('Terminated FC Connection (%s)', msgparm) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Terminate FC Connection ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _fc_terminate_connection(self, volume, connector): - """Disallow connection from connector.""" - LOG.debug('_fc_terminate_connection' - '(Volume ID = %(id)s, connector = %(connector)s) Start.', - {'id': volume.id, 'connector': connector}) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get target wwpns and initiator/target map. - fc_ports = [] - for director, hostport in hostports.items(): - for port in hostport: - if port['protocol'].lower() == 'fc': - fc_ports.append(port) - target_wwns, init_targ_map = ( - self._build_initiator_target_map(connector, fc_ports)) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map}} - LOG.debug('_fc_terminate_connection' - '(Volume ID = %(id)s, connector = %(connector)s, ' - 'info = %(info)s) End.', - {'id': volume.id, - 'connector': connector, - 'info': info}) - return info - - def fc_terminate_connection_snapshot(self, snapshot, connector, **kwargs): - msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' - % {'id': snapshot.id, 'connector': connector}) - try: - ret = self._fc_terminate_connection(snapshot, connector) - LOG.info('Terminated FC Connection snapshot(%s)', msgparm) - self.remove_export_snapshot(None, snapshot) - return ret - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Terminate FC Connection snapshot' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _build_initiator_target_map(self, connector, fc_ports): - target_wwns = [] - for port in fc_ports: - target_wwns.append(port['wwpn']) - - initiator_wwns = connector['wwpns'] - - init_targ_map = {} - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map - - def _update_volume_status(self): - """Retrieve status info from volume group.""" - - data = {} - - data['volume_backend_name'] = (self._properties['backend_name'] or - self._driver_name) - data['vendor_name'] = self._properties['vendor_name'] - data['driver_version'] = self.VERSION - data['reserved_percentage'] = self._properties['reserved_percentage'] - data['QoS_support'] = True - - # Get xml data from file and parse. - try: - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.parse_xml()) - - # Get capacities from pools. - pool_capacity = self.get_pool_capacity(pools, ldsets) - - data['total_capacity_gb'] = pool_capacity['total_capacity_gb'] - data['free_capacity_gb'] = pool_capacity['free_capacity_gb'] - except Exception: - LOG.debug('_update_volume_status Unexpected error. ' - 'exception=%s', - traceback.format_exc()) - data['total_capacity_gb'] = 0 - data['free_capacity_gb'] = 0 - return data - - def iscsi_get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._stats = self._update_volume_status() - self._stats['storage_protocol'] = 'iSCSI' - LOG.debug('data=%(data)s, config_group=%(group)s', - {'data': self._stats, - 'group': self._properties['config_group']}) - - return self._stats - - def fc_get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - - if refresh: - self._stats = self._update_volume_status() - self._stats['storage_protocol'] = 'FC' - LOG.debug('data=%(data)s, config_group=%(group)s', - {'data': self._stats, - 'group': self._properties['config_group']}) - - return self._stats - - def get_pool(self, volume): - LOG.debug('backend_name=%s', self._properties['backend_name']) - return self._properties['backend_name'] - - def delete_volume(self, volume): - msgparm = 'Volume ID = %s' % volume.id - try: - self._delete_volume(volume) - LOG.info('Deleted Volume (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Delete Volume ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _delete_volume(self, volume): - LOG.debug('_delete_volume Start.') - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - ldname = ( - self.get_ldname(volume.id, - self._properties['ld_name_format'])) - if ldname not in lds: - LOG.debug('LD `%s` already unbound?', ldname) - return - - ld = lds[ldname] - - if ld['RPL Attribute'] == 'IV': - pass - - elif ld['RPL Attribute'] == 'MV': - query_status = self._cli.query_MV_RV_status(ldname[3:], 'MV') - if query_status == 'separated': - # unpair. - rvname = self._cli.query_MV_RV_name(ldname[3:], 'MV') - self._cli.unpair(ldname[3:], rvname, 'force') - else: - msg = _('Specified Logical Disk %s has been copied.') % ldname - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - elif ld['RPL Attribute'] == 'RV': - query_status = self._cli.query_MV_RV_status(ldname[3:], 'RV') - if query_status == 'separated': - # unpair. - mvname = self._cli.query_MV_RV_name(ldname[3:], 'RV') - self._cli.unpair(mvname, ldname[3:], 'force') - else: - msg = _('Specified Logical Disk %s has been copied.') % ldname - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - else: - msg = (_('RPL Attribute Error. RPL Attribute = %s.') - % ld['RPL Attribute']) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # Check the LD is remaining on ldset_controller_node. - ldset_controller_node_name = ( - self._properties['ldset_controller_node_name']) - if ldset_controller_node_name != '': - if ldset_controller_node_name in ldsets: - ldset = ldsets[ldset_controller_node_name] - if ld['ldn'] in ldset['lds']: - LOG.debug('delete LD from ldset_controller_node. ' - 'Ldset Name=%s.', - ldset_controller_node_name) - self._cli.delldsetld(ldset_controller_node_name, ldname) - - # unbind LD. - self._cli.unbind(ldname) - LOG.debug('LD unbound. Name=%s.', ldname) - - -class MStorageDSVDriver(MStorageDriver): - """M-Series Storage Snapshot helper class.""" - - def create_snapshot(self, snapshot): - msgparm = ('Snapshot ID = %(snap_id)s, ' - 'Snapshot Volume ID = %(snapvol_id)s' - % {'snap_id': snapshot.id, - 'snapvol_id': snapshot.volume_id}) - try: - self._create_snapshot(snapshot, - self._properties['diskarray_name']) - LOG.info('Created Snapshot (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create Snapshot ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') - def _create_snapshot(self, snapshot, diskarray_name): - LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, ' - 'Snapshot ID = %(snap_id)s ) Start.', - {'snapvol_id': snapshot.volume_id, - 'snap_id': snapshot.id}) - - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - if len(self._properties['pool_backup_pools']) == 0: - LOG.error('backup_pools is not set.') - raise exception.ParameterNotFound(param='backup_pools') - - # get BV name. - ldname = self._validate_ld_exist( - lds, snapshot.volume_id, self._properties['ld_name_format']) - - selected_pool = self._select_dsv_poolnumber(snapshot, pools, None) - snapshotname = self._convert_id2snapname(snapshot) - self._cli.snapshot_create(ldname, snapshotname[3:], selected_pool) - - LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, ' - 'Snapshot ID = %(snap_id)s) End.', - {'snapvol_id': snapshot.volume_id, - 'snap_id': snapshot.id}) - - def delete_snapshot(self, snapshot): - msgparm = ('Snapshot ID = %(snap_id)s, ' - 'Snapshot Volume ID = %(snapvol_id)s' - % {'snap_id': snapshot.id, - 'snapvol_id': snapshot.volume_id}) - try: - self._delete_snapshot(snapshot) - LOG.info('Deleted Snapshot (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Delete Snapshot ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _delete_snapshot(self, snapshot): - LOG.debug('_delete_snapshot(Snapshot ID = %s) Start.', - snapshot.id) - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get BV name. - ldname = self.get_ldname(snapshot.volume_id, - self._properties['ld_name_format']) - if ldname not in lds: - LOG.debug('LD(MV) `%s` already unbound?', ldname) - return - - # get SV name. - snapshotname = ( - self.get_ldname(snapshot.id, - self._properties['ld_backupname_format'])) - if snapshotname not in lds: - LOG.debug('LD(SV) `%s` already unbound?', snapshotname) - return - - self._cli.snapshot_delete(ldname, snapshotname[3:]) - - LOG.debug('_delete_snapshot(Snapshot ID = %s) End.', snapshot.id) - - def create_volume_from_snapshot(self, volume, snapshot): - msgparm = ('Volume ID = %(vol_id)s, ' - 'Snapshot ID = %(snap_id)s, ' - 'Snapshot Volume ID = %(snapvol_id)s' - % {'vol_id': volume.id, - 'snap_id': snapshot.id, - 'snapvol_id': snapshot.volume_id}) - try: - self._create_volume_from_snapshot(volume, snapshot) - LOG.info('Created Volume from Snapshot (%s)', msgparm) - except exception.CinderException as e: - with excutils.save_and_reraise_exception(): - LOG.warning('Failed to Create Volume from Snapshot ' - '(%(msgparm)s) (%(exception)s)', - {'msgparm': msgparm, 'exception': e}) - - def _create_volume_from_snapshot(self, volume, snapshot): - LOG.debug('_create_volume_from_snapshot' - '(Volume ID = %(vol_id)s, Snapshot ID(SV) = %(snap_id)s, ' - 'Snapshot ID(BV) = %(snapvol_id)s) Start.', - {'vol_id': volume.id, - 'snap_id': snapshot.id, - 'snapvol_id': snapshot.volume_id}) - xml = self._cli.view_all(self._properties['ismview_path']) - pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( - self.configs(xml)) - - # get BV name. - mvname = ( - self.get_ldname(snapshot.volume_id, - self._properties['ld_name_format'])) - - # get SV name. - rvname = ( - self.get_ldname(snapshot.id, - self._properties['ld_backupname_format'])) - - if rvname not in lds: - msg = _('Logical Disk `%s` has unbound already.') % rvname - LOG.error(msg) - raise exception.NotFound(msg) - rv = lds[rvname] - - # check snapshot status. - query_status = self._cli.query_BV_SV_status(mvname[3:], rvname[3:]) - if query_status != 'snap/active': - msg = (_('Cannot create volume from snapshot, ' - 'because the snapshot data does not exist. ' - 'bvname=%(bvname)s, svname=%(svname)s') % - {'bvname': mvname, 'svname': rvname}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - mv_capacity = rv['ld_capacity'] - rv_capacity = volume.size - - (new_rvname, - rvnumber, - selected_pool) = self._bind_ld(volume, - mv_capacity, - None, - self._convert_id2name, - self._select_volddr_poolnumber, - mv_capacity) - - # check io limit. - specs = self.get_volume_type_qos_specs(volume) - self.check_io_parameter(specs) - - # set io limit. - self._cli.set_io_limit(new_rvname, specs) - - if rv_capacity <= mv_capacity: - rvnumber = None - rv_capacity = None - - # Restore Start. - volume_properties = { - 'mvname': rvname, - 'rvname': new_rvname, - 'prev_mvname': None, - 'capacity': mv_capacity, - 'mvid': snapshot.id, - 'rvid': volume.id, - 'rvldn': rvnumber, - 'rvcapacity': rv_capacity, - 'flag': 'esv_restore', - 'context': self._context - } - self._cli.backup_restore(volume_properties, - cli.UnpairWaitForDDRRestore) - - LOG.debug('_create_volume_from_snapshot(Volume ID = %(vol_id)s, ' - 'Snapshot ID(SV) = %(snap_id)s, ' - 'Snapshot ID(BV) = %(snapvol_id)s, ' - 'Specs=%(specs)s) End.', - {'vol_id': volume.id, - 'snap_id': snapshot.id, - 'snapvol_id': snapshot.volume_id, - 'specs': specs}) diff --git a/cinder/volume/drivers/netapp/__init__.py b/cinder/volume/drivers/netapp/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/common.py b/cinder/volume/drivers/netapp/common.py deleted file mode 100644 index 8d7422e5d..000000000 --- a/cinder/volume/drivers/netapp/common.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Alex Meade. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unified driver for NetApp storage systems. - -Supports multiple storage systems of different families and protocols. -""" - -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import driver -from cinder.volume.drivers.netapp import options -from cinder.volume.drivers.netapp import utils as na_utils - - -LOG = logging.getLogger(__name__) - -DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap' -ESERIES_PATH = 'cinder.volume.drivers.netapp.eseries' - -# Add new drivers here, no other code changes required. -NETAPP_UNIFIED_DRIVER_REGISTRY = { - 'ontap_cluster': - { - 'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver', - 'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver', - 'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver' - }, - 'ontap_7mode': - { - 'iscsi': DATAONTAP_PATH + '.iscsi_7mode.NetApp7modeISCSIDriver', - 'nfs': DATAONTAP_PATH + '.nfs_7mode.NetApp7modeNfsDriver', - 'fc': DATAONTAP_PATH + '.fc_7mode.NetApp7modeFibreChannelDriver' - }, - 'eseries': - { - 'iscsi': ESERIES_PATH + '.iscsi_driver.NetAppEseriesISCSIDriver', - 'fc': ESERIES_PATH + '.fc_driver.NetAppEseriesFibreChannelDriver' - }} - - -class NetAppDriver(driver.ProxyVD): - """NetApp unified block storage driver. - - Acts as a factory to create NetApp storage drivers based on the - storage family and protocol configured. - """ - - REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol'] - - def __new__(cls, *args, **kwargs): - - config = kwargs.get('configuration', None) - if not config: - raise exception.InvalidInput( - reason=_('Required configuration not found')) - - config.append_config_values(options.netapp_proxy_opts) - na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) - - app_version = na_utils.OpenStackInfo().info() - LOG.info('OpenStack OS Version Info: %(info)s', - {'info': app_version}) - kwargs['app_version'] = app_version - - return NetAppDriver.create_driver(config.netapp_storage_family, - config.netapp_storage_protocol, - *args, **kwargs) - - @staticmethod - def create_driver(storage_family, storage_protocol, *args, **kwargs): - """Creates an appropriate driver based on family and protocol.""" - - storage_family = storage_family.lower() - storage_protocol = storage_protocol.lower() - - fmt = {'storage_family': storage_family, - 'storage_protocol': storage_protocol} - LOG.info('Requested unified config: %(storage_family)s and ' - '%(storage_protocol)s.', fmt) - - family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) - if family_meta is None: - raise exception.InvalidInput( - reason=_('Storage family %s is not supported.') - % storage_family) - - driver_loc = family_meta.get(storage_protocol) - if driver_loc is None: - raise exception.InvalidInput( - reason=_('Protocol %(storage_protocol)s is not supported ' - 'for storage family %(storage_family)s.') % fmt) - - kwargs = kwargs or {} - kwargs['netapp_mode'] = 'proxy' - driver = importutils.import_object(driver_loc, *args, **kwargs) - LOG.info('NetApp driver of family %(storage_family)s and protocol ' - '%(storage_protocol)s loaded.', fmt) - return driver diff --git a/cinder/volume/drivers/netapp/dataontap/__init__.py b/cinder/volume/drivers/netapp/dataontap/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/dataontap/block_7mode.py b/cinder/volume/drivers/netapp/dataontap/block_7mode.py deleted file mode 100644 index 006a7327f..000000000 --- a/cinder/volume/drivers/netapp/dataontap/block_7mode.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2014 Jeff Applewhite. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver library for NetApp 7-mode block storage systems. -""" - -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import timeutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap.client import client_7mode -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils - - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class NetAppBlockStorage7modeLibrary(block_base.NetAppBlockStorageLibrary): - """NetApp block storage library for Data ONTAP (7-mode).""" - - def __init__(self, driver_name, driver_protocol, **kwargs): - super(NetAppBlockStorage7modeLibrary, self).__init__(driver_name, - driver_protocol, - **kwargs) - self.configuration.append_config_values(na_opts.netapp_7mode_opts) - self.driver_mode = '7mode' - - def do_setup(self, context): - super(NetAppBlockStorage7modeLibrary, self).do_setup(context) - - self.volume_list = [] - - self.vfiler = self.configuration.netapp_vfiler - - self.zapi_client = client_7mode.Client( - self.volume_list, - transport_type=self.configuration.netapp_transport_type, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, - vfiler=self.vfiler) - - self._do_partner_setup() - - self.vol_refresh_time = None - self.vol_refresh_interval = 1800 - self.vol_refresh_running = False - self.vol_refresh_voluntary = False - self.root_volume_name = self._get_root_volume_name() - self.perf_library = perf_7mode.Performance7modeLibrary( - self.zapi_client) - # This driver has been marked 'deprecated' in the Ocata release and - # can be removed in Queens. - msg = _("The 7-mode Data ONTAP driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - - def _do_partner_setup(self): - partner_backend = self.configuration.netapp_partner_backend_name - if partner_backend: - config = configuration.Configuration(na_opts.netapp_7mode_opts, - partner_backend) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_transport_opts) - - self.partner_zapi_client = client_7mode.Client( - None, - transport_type=config.netapp_transport_type, - username=config.netapp_login, - password=config.netapp_password, - hostname=config.netapp_server_hostname, - port=config.netapp_server_port, - vfiler=None) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate.""" - api_version = self.zapi_client.get_ontapi_version() - if api_version: - major, minor = api_version - if major == 1 and minor < 9: - msg = _("Unsupported Data ONTAP version." - " Data ONTAP version 7.3.1 and above is supported.") - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _("API version could not be determined.") - raise exception.VolumeBackendAPIException(data=msg) - - self._refresh_volume_info() - - if not self.volume_list: - msg = _('No pools are available for provisioning volumes. ' - 'Ensure that the configuration option ' - 'netapp_pool_name_search_pattern is set correctly.') - raise exception.NetAppDriverException(msg) - self._add_looping_tasks() - super(NetAppBlockStorage7modeLibrary, self).check_for_setup_error() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval.""" - super(NetAppBlockStorage7modeLibrary, self)._add_looping_tasks() - - def _handle_ems_logging(self): - """Log autosupport messages.""" - - base_ems_message = dot_utils.build_ems_log_message_0( - self.driver_name, self.app_version, self.driver_mode) - self.zapi_client.send_ems_log_message(base_ems_message) - - pool_ems_message = dot_utils.build_ems_log_message_1( - self.driver_name, self.app_version, None, self.volume_list, []) - self.zapi_client.send_ems_log_message(pool_ems_message) - - def _get_volume_model_update(self, volume): - """Provide any updates necessary for a volume being created/managed.""" - - def _create_lun(self, volume_name, lun_name, size, - metadata, qos_policy_group_name=None): - """Creates a LUN, handling Data ONTAP differences as needed.""" - if qos_policy_group_name is not None: - msg = _('Data ONTAP operating in 7-Mode does not support QoS ' - 'policy groups.') - raise exception.VolumeDriverException(msg) - self.zapi_client.create_lun( - volume_name, lun_name, size, metadata, qos_policy_group_name) - - self.vol_refresh_voluntary = True - - def _get_root_volume_name(self): - # switch to volume-get-root-name API when possible - vols = self.zapi_client.get_filer_volumes() - for vol in vols: - volume_name = vol.get_child_content('name') - if self._get_vol_option(volume_name, 'root') == 'true': - return volume_name - LOG.warning('Could not determine root volume name on %s.', - self._get_owner()) - return None - - def _get_owner(self): - if self.vfiler: - owner = '%s:%s' % (self.configuration.netapp_server_hostname, - self.vfiler) - else: - owner = self.configuration.netapp_server_hostname - return owner - - def _create_lun_handle(self, metadata): - """Returns LUN handle based on filer type.""" - owner = self._get_owner() - return '%s:%s' % (owner, metadata['Path']) - - def _find_mapped_lun_igroup(self, path, initiator_list): - """Find an igroup for a LUN mapped to the given initiator(s).""" - initiator_set = set(initiator_list) - - result = self.zapi_client.get_lun_map(path) - initiator_groups = result.get_child_by_name('initiator-groups') - if initiator_groups: - for initiator_group_info in initiator_groups.get_children(): - - initiator_set_for_igroup = set() - for initiator_info in initiator_group_info.get_child_by_name( - 'initiators').get_children(): - initiator_set_for_igroup.add( - initiator_info.get_child_content('initiator-name')) - - if initiator_set == initiator_set_for_igroup: - igroup = initiator_group_info.get_child_content( - 'initiator-group-name') - lun_id = initiator_group_info.get_child_content( - 'lun-id') - return igroup, lun_id - - return None, None - - def _has_luns_mapped_to_initiators(self, initiator_list, - include_partner=True): - """Checks whether any LUNs are mapped to the given initiator(s).""" - if self.zapi_client.has_luns_mapped_to_initiators(initiator_list): - return True - if include_partner and self.partner_zapi_client and \ - self.partner_zapi_client.has_luns_mapped_to_initiators( - initiator_list): - return True - return False - - def _clone_lun(self, name, new_name, space_reserved=None, - qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None, is_snapshot=False): - """Clone LUN with the given handle to the new name. - - :param: is_snapshot Not used, present for method signature consistency - """ - - if not space_reserved: - space_reserved = self.lun_space_reservation - if qos_policy_group_name is not None: - msg = _('Data ONTAP operating in 7-Mode does not support QoS ' - 'policy groups.') - raise exception.VolumeDriverException(msg) - - metadata = self._get_lun_attr(name, 'metadata') - path = metadata['Path'] - (parent, _splitter, name) = path.rpartition('/') - clone_path = '%s/%s' % (parent, new_name) - - self.zapi_client.clone_lun(path, clone_path, name, new_name, - space_reserved, src_block=src_block, - dest_block=dest_block, - block_count=block_count, - source_snapshot=source_snapshot) - - self.vol_refresh_voluntary = True - luns = self.zapi_client.get_lun_by_args(path=clone_path) - cloned_lun = luns[0] - self.zapi_client.set_space_reserve(clone_path, space_reserved) - clone_meta = self._create_lun_meta(cloned_lun) - handle = self._create_lun_handle(clone_meta) - self._add_lun_to_table( - block_base.NetAppLun(handle, new_name, - cloned_lun.get_child_content('size'), - clone_meta)) - - def _create_lun_meta(self, lun): - """Creates LUN metadata dictionary.""" - self.zapi_client.check_is_naelement(lun) - meta_dict = {} - meta_dict['Path'] = lun.get_child_content('path') - meta_dict['Volume'] = lun.get_child_content('path').split('/')[2] - meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') - meta_dict['SpaceReserved'] = lun.get_child_content( - 'is-space-reservation-enabled') - meta_dict['UUID'] = lun.get_child_content('uuid') - return meta_dict - - def _get_fc_target_wwpns(self, include_partner=True): - wwpns = self.zapi_client.get_fc_target_wwpns() - if include_partner and self.partner_zapi_client: - wwpns.extend(self.partner_zapi_client.get_fc_target_wwpns()) - return wwpns - - def _update_volume_stats(self, filter_function=None, - goodness_function=None): - """Retrieve stats info from filer.""" - - # ensure we get current data - self.vol_refresh_voluntary = True - self._refresh_volume_info() - - LOG.debug('Updating volume stats') - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.driver_name - data['vendor_name'] = 'NetApp' - data['driver_version'] = self.VERSION - data['storage_protocol'] = self.driver_protocol - data['pools'] = self._get_pool_stats( - filter_function=filter_function, - goodness_function=goodness_function) - data['sparse_copy_volume'] = True - - self._stats = data - - def _get_pool_stats(self, filter_function=None, goodness_function=None): - """Retrieve pool (i.e. Data ONTAP volume) stats info from volumes.""" - - pools = [] - self.perf_library.update_performance_cache() - - for vol in self.vols: - - volume_name = vol.get_child_content('name') - - # omit volumes not specified in the config - if self.volume_list and volume_name not in self.volume_list: - continue - - # omit root volume - if volume_name == self.root_volume_name: - continue - - # ensure good volume state - state = vol.get_child_content('state') - inconsistent = vol.get_child_content('is-inconsistent') - invalid = vol.get_child_content('is-invalid') - if (state != 'online' or - inconsistent != 'false' or - invalid != 'false'): - continue - - pool = dict() - pool['pool_name'] = volume_name - pool['QoS_support'] = False - pool['multiattach'] = False - pool['reserved_percentage'] = ( - self.reserved_percentage) - pool['max_over_subscription_ratio'] = ( - self.max_over_subscription_ratio) - - # convert sizes to GB - total = float(vol.get_child_content('size-total') or 0) - total /= units.Gi - pool['total_capacity_gb'] = na_utils.round_down(total, '0.01') - - free = float(vol.get_child_content('size-available') or 0) - free /= units.Gi - pool['free_capacity_gb'] = na_utils.round_down(free, '0.01') - - pool['provisioned_capacity_gb'] = (round( - pool['total_capacity_gb'] - pool['free_capacity_gb'], 2)) - - thick = ( - self.configuration.netapp_lun_space_reservation == 'enabled') - pool['thick_provisioning_support'] = thick - pool['thin_provisioning_support'] = not thick - - utilization = self.perf_library.get_node_utilization() - pool['utilization'] = na_utils.round_down(utilization, '0.01') - pool['filter_function'] = filter_function - pool['goodness_function'] = goodness_function - - pool['consistencygroup_support'] = True - - pools.append(pool) - - return pools - - def _get_filtered_pools(self): - """Return available pools filtered by a pool name search pattern.""" - - # Inform deprecation of legacy option. - if self.configuration.safe_get('netapp_volume_list'): - msg = ("The option 'netapp_volume_list' is deprecated and " - "will be removed in the future releases. Please use " - "the option 'netapp_pool_name_search_pattern' instead.") - versionutils.report_deprecated_feature(LOG, msg) - - pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) - - filtered_pools = [] - for vol in self.vols: - vol_name = vol.get_child_content('name') - if pool_regex.match(vol_name): - msg = ("Volume '%(vol_name)s' matches against regular " - "expression: %(vol_pattern)s") - LOG.debug(msg, {'vol_name': vol_name, - 'vol_pattern': pool_regex.pattern}) - filtered_pools.append(vol_name) - else: - msg = ("Volume '%(vol_name)s' does not match against regular " - "expression: %(vol_pattern)s") - LOG.debug(msg, {'vol_name': vol_name, - 'vol_pattern': pool_regex.pattern}) - - return filtered_pools - - def _get_lun_block_count(self, path): - """Gets block counts for the LUN.""" - bs = super(NetAppBlockStorage7modeLibrary, - self)._get_lun_block_count(path) - api_version = self.zapi_client.get_ontapi_version() - if api_version: - major = api_version[0] - minor = api_version[1] - if major == 1 and minor < 15: - bs -= 1 - return bs - - def _refresh_volume_info(self): - """Saves the volume information for the filer.""" - - if (self.vol_refresh_time is None or self.vol_refresh_voluntary or - timeutils.is_newer_than(self.vol_refresh_time, - self.vol_refresh_interval)): - try: - job_set = na_utils.set_safe_attr(self, 'vol_refresh_running', - True) - if not job_set: - LOG.warning("Volume refresh job already running. " - "Returning...") - return - self.vol_refresh_voluntary = False - self.vols = self.zapi_client.get_filer_volumes() - self.volume_list = self._get_filtered_pools() - self.vol_refresh_time = timeutils.utcnow() - except Exception as e: - LOG.warning("Error refreshing volume info. Message: %s", - e) - finally: - na_utils.set_safe_attr(self, 'vol_refresh_running', False) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - super(NetAppBlockStorage7modeLibrary, self).delete_volume(volume) - self.vol_refresh_voluntary = True - LOG.debug('Deleted LUN with name %s', volume['name']) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - super(NetAppBlockStorage7modeLibrary, self).delete_snapshot(snapshot) - self.vol_refresh_voluntary = True - - def _is_lun_valid_on_storage(self, lun): - """Validate LUN specific to storage system.""" - if self.volume_list: - lun_vol = lun.get_metadata_property('Volume') - if lun_vol not in self.volume_list: - return False - return True - - def _check_volume_type_for_lun(self, volume, lun, existing_ref, - extra_specs): - """Check if LUN satisfies volume type.""" - if extra_specs: - legacy_policy = extra_specs.get('netapp:qos_policy_group') - if legacy_policy is not None: - raise exception.ManageExistingVolumeTypeMismatch( - reason=_("Setting LUN QoS policy group is not supported " - "on this storage family and ONTAP version.")) - volume_type = na_utils.get_volume_type_from_volume(volume) - if volume_type is None: - return - spec = na_utils.get_backend_qos_spec_from_volume_type(volume_type) - if spec is not None: - raise exception.ManageExistingVolumeTypeMismatch( - reason=_("Back-end QoS specs are not supported on this " - "storage family and ONTAP version.")) - - def _get_preferred_target_from_list(self, target_details_list, - filter=None): - # 7-mode iSCSI LIFs migrate from controller to controller - # in failover and flap operational state in transit, so - # we don't filter these on operational state. - - return (super(NetAppBlockStorage7modeLibrary, self) - ._get_preferred_target_from_list(target_details_list)) - - def _get_backing_flexvol_names(self): - """Returns a list of backing flexvol names.""" - return self.volume_list or [] diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py deleted file mode 100644 index 9a035ca6f..000000000 --- a/cinder/volume/drivers/netapp/dataontap/block_base.py +++ /dev/null @@ -1,1175 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2014 Jeff Applewhite. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. -# Copyright (c) 2016 Chuck Fouts. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver library for NetApp 7/C-mode block storage systems. -""" - -import copy -import math -import sys -import uuid - -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -class NetAppLun(object): - """Represents a LUN on NetApp storage.""" - - def __init__(self, handle, name, size, metadata_dict): - self.handle = handle - self.name = name - self.size = size - self.metadata = metadata_dict or {} - - def get_metadata_property(self, prop): - """Get the metadata property of a LUN.""" - if prop in self.metadata: - return self.metadata[prop] - name = self.name - LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s", - {'prop': prop, 'name': name}) - - def __str__(self, *args, **kwargs): - return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % ( - self.handle, self.name, self.size, self.metadata) - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class NetAppBlockStorageLibrary(object): - """NetApp block storage library for Data ONTAP.""" - - # do not increment this as it may be used in volume type definitions - VERSION = "1.0.0" - REQUIRED_FLAGS = ['netapp_login', 'netapp_password', - 'netapp_server_hostname'] - ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows', - 'windows_2008', 'windows_gpt', 'solaris', - 'solaris_efi', 'netware', 'openvms', 'hyper_v'] - ALLOWED_IGROUP_HOST_TYPES = ['linux', 'aix', 'hpux', 'windows', 'solaris', - 'netware', 'default', 'vmware', 'openvms', - 'xen', 'hyper_v'] - DEFAULT_LUN_OS = 'linux' - DEFAULT_HOST_TYPE = 'linux' - DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' - DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' - - def __init__(self, driver_name, driver_protocol, **kwargs): - - na_utils.validate_instantiation(**kwargs) - - self.driver_name = driver_name - self.driver_protocol = driver_protocol - self.zapi_client = None - self._stats = {} - self.lun_table = {} - self.lun_ostype = None - self.host_type = None - self.lun_space_reservation = 'true' - self.lookup_service = fczm_utils.create_lookup_service() - self.app_version = kwargs.get("app_version", "unknown") - self.host = kwargs.get('host') - self.backend_name = self.host.split('@')[1] - - self.configuration = kwargs['configuration'] - self.configuration.append_config_values(na_opts.netapp_connection_opts) - self.configuration.append_config_values(na_opts.netapp_basicauth_opts) - self.configuration.append_config_values(na_opts.netapp_transport_opts) - self.configuration.append_config_values( - na_opts.netapp_provisioning_opts) - self.configuration.append_config_values(na_opts.netapp_san_opts) - self.max_over_subscription_ratio = ( - self.configuration.max_over_subscription_ratio) - self.reserved_percentage = self._get_reserved_percentage() - self.loopingcalls = loopingcalls.LoopingCalls() - - def _get_reserved_percentage(self): - # If the legacy config option if it is set to the default - # value, use the more general configuration option. - if self.configuration.netapp_size_multiplier == ( - na_opts.NETAPP_SIZE_MULTIPLIER_DEFAULT): - return self.configuration.reserved_percentage - - # If the legacy config option has a non-default value, - # honor it for one release. Note that the "size multiplier" - # actually acted as a divisor in the code and didn't apply - # to the file size (as the help message for this option suggest), - # but rather to total and free size for the pool. - divisor = self.configuration.netapp_size_multiplier - reserved_ratio = round(1 - (1 / divisor), 2) - reserved_percentage = 100 * int(reserved_ratio) - msg = ('The "netapp_size_multiplier" configuration option is ' - 'deprecated and will be removed in the Mitaka release. ' - 'Please set "reserved_percentage = %d" instead.') % ( - reserved_percentage) - versionutils.report_deprecated_feature(LOG, msg) - return reserved_percentage - - def do_setup(self, context): - na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) - self.lun_ostype = (self.configuration.netapp_lun_ostype - or self.DEFAULT_LUN_OS) - self.host_type = (self.configuration.netapp_host_type - or self.DEFAULT_HOST_TYPE) - if self.configuration.netapp_lun_space_reservation == 'enabled': - self.lun_space_reservation = 'true' - else: - self.lun_space_reservation = 'false' - - def check_for_setup_error(self): - """Check that the driver is working and can communicate. - - Discovers the LUNs on the NetApp server. - """ - if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES: - msg = _("Invalid value for NetApp configuration" - " option netapp_lun_ostype.") - LOG.error(msg) - raise exception.NetAppDriverException(msg) - if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES: - msg = _("Invalid value for NetApp configuration" - " option netapp_host_type.") - LOG.error(msg) - raise exception.NetAppDriverException(msg) - lun_list = self.zapi_client.get_lun_list() - self._extract_and_populate_luns(lun_list) - LOG.debug("Success getting list of LUNs from server.") - self.loopingcalls.start_tasks() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval. - - Inheriting class overrides and then explicitly calls this method. - """ - - # Add the task that deletes snapshots marked for deletion. - self.loopingcalls.add_task( - self._delete_snapshots_marked_for_deletion, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE) - - # Add the task that logs EMS messages - self.loopingcalls.add_task( - self._handle_ems_logging, - loopingcalls.ONE_HOUR) - - def _delete_snapshots_marked_for_deletion(self): - volume_list = self._get_backing_flexvol_names() - snapshots = self.zapi_client.get_snapshots_marked_for_deletion( - volume_list) - for snapshot in snapshots: - self.zapi_client.delete_snapshot( - snapshot['volume_name'], snapshot['name']) - - def _handle_ems_logging(self): - """Log autosupport messages.""" - raise NotImplementedError() - - def get_pool(self, volume): - """Return pool name where volume resides. - - :param volume: The volume hosted by the driver. - :return: Name of the pool where given volume is hosted. - """ - name = volume['name'] - metadata = self._get_lun_attr(name, 'metadata') or dict() - return metadata.get('Volume', None) - - def create_volume(self, volume): - """Driver entry point for creating a new volume (Data ONTAP LUN).""" - - LOG.debug('create_volume on %s', volume['host']) - - # get Data ONTAP volume name as pool name - pool_name = volume_utils.extract_host(volume['host'], level='pool') - - if pool_name is None: - msg = _("Pool is not available in the volume host field.") - raise exception.InvalidHost(reason=msg) - - extra_specs = na_utils.get_volume_extra_specs(volume) - - lun_name = volume['name'] - - size = int(volume['size']) * units.Gi - - metadata = {'OsType': self.lun_ostype, - 'SpaceReserved': self.lun_space_reservation, - 'Path': '/vol/%s/%s' % (pool_name, lun_name)} - - qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) - qos_policy_group_name = ( - na_utils.get_qos_policy_group_name_from_info( - qos_policy_group_info)) - - try: - self._create_lun(pool_name, lun_name, size, metadata, - qos_policy_group_name) - except Exception: - LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.", - {'name': lun_name, 'pool': pool_name}) - self._mark_qos_policy_group_for_deletion(qos_policy_group_info) - msg = _("Volume %s could not be created.") - raise exception.VolumeBackendAPIException(data=msg % ( - volume['name'])) - LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', - {'name': lun_name, 'qos': qos_policy_group_info}) - - metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) - metadata['Volume'] = pool_name - metadata['Qtree'] = None - - handle = self._create_lun_handle(metadata) - self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) - - model_update = self._get_volume_model_update(volume) - - return model_update - - def _setup_qos_for_volume(self, volume, extra_specs): - return None - - def _get_volume_model_update(self, volume): - """Provide any updates necessary for a volume being created/managed.""" - raise NotImplementedError - - def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): - return - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - self._delete_lun(volume['name']) - - def _delete_lun(self, lun_name): - """Helper method to delete LUN backing a volume or snapshot.""" - - metadata = self._get_lun_attr(lun_name, 'metadata') - if metadata: - self.zapi_client.destroy_lun(metadata['Path']) - self.lun_table.pop(lun_name) - else: - LOG.warning("No entry in LUN table for volume/snapshot" - " %(name)s.", {'name': lun_name}) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - handle = self._get_lun_attr(volume['name'], 'handle') - return {'provider_location': handle} - - def create_export(self, context, volume): - """Driver entry point to get the export info for a new volume.""" - handle = self._get_lun_attr(volume['name'], 'handle') - return {'provider_location': handle} - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume. - - Since exporting is idempotent in this driver, we have nothing - to do for unexporting. - """ - - pass - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot. - - This driver implements snapshots by using efficient single-file - (LUN) cloning. - """ - - vol_name = snapshot['volume_name'] - snapshot_name = snapshot['name'] - lun = self._get_lun_from_table(vol_name) - self._clone_lun(lun.name, snapshot_name, space_reserved='false', - is_snapshot=True) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - self._delete_lun(snapshot['name']) - LOG.debug("Snapshot %s deletion successful", snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot): - source = {'name': snapshot['name'], 'size': snapshot['volume_size']} - return self._clone_source_to_destination(source, volume) - - def create_cloned_volume(self, volume, src_vref): - src_lun = self._get_lun_from_table(src_vref['name']) - source = {'name': src_lun.name, 'size': src_vref['size']} - return self._clone_source_to_destination(source, volume) - - def _clone_source_to_destination(self, source, destination_volume): - source_size = source['size'] - destination_size = destination_volume['size'] - - source_name = source['name'] - destination_name = destination_volume['name'] - - extra_specs = na_utils.get_volume_extra_specs(destination_volume) - - qos_policy_group_info = self._setup_qos_for_volume( - destination_volume, extra_specs) - qos_policy_group_name = ( - na_utils.get_qos_policy_group_name_from_info( - qos_policy_group_info)) - - try: - self._clone_lun(source_name, destination_name, - space_reserved=self.lun_space_reservation, - qos_policy_group_name=qos_policy_group_name) - - if destination_size != source_size: - - try: - self._extend_volume(destination_volume, - destination_size, - qos_policy_group_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Resizing %s failed. Cleaning volume.", - destination_volume['id']) - self.delete_volume(destination_volume) - - return self._get_volume_model_update(destination_volume) - - except Exception: - LOG.exception("Exception cloning volume %(name)s from source " - "volume %(source)s.", - {'name': destination_name, 'source': source_name}) - - self._mark_qos_policy_group_for_deletion(qos_policy_group_info) - - msg = _("Volume %s could not be created from source volume.") - raise exception.VolumeBackendAPIException( - data=msg % destination_name) - - def _create_lun(self, volume_name, lun_name, size, - metadata, qos_policy_group_name=None): - """Creates a LUN, handling Data ONTAP differences as needed.""" - raise NotImplementedError() - - def _create_lun_handle(self, metadata): - """Returns LUN handle based on filer type.""" - raise NotImplementedError() - - def _extract_lun_info(self, lun): - """Extracts the LUNs from API and populates the LUN table.""" - - meta_dict = self._create_lun_meta(lun) - path = lun.get_child_content('path') - (_rest, _splitter, name) = path.rpartition('/') - handle = self._create_lun_handle(meta_dict) - size = lun.get_child_content('size') - return NetAppLun(handle, name, size, meta_dict) - - def _extract_and_populate_luns(self, api_luns): - """Extracts the LUNs from API and populates the LUN table.""" - - for lun in api_luns: - discovered_lun = self._extract_lun_info(lun) - self._add_lun_to_table(discovered_lun) - - def _map_lun(self, name, initiator_list, initiator_type, lun_id=None): - """Maps LUN to the initiator(s) and returns LUN ID assigned.""" - metadata = self._get_lun_attr(name, 'metadata') - path = metadata['Path'] - igroup_name, ig_host_os, ig_type = self._get_or_create_igroup( - initiator_list, initiator_type, self.host_type) - if ig_host_os != self.host_type: - LOG.warning("LUN misalignment may occur for current" - " initiator group %(ig_nm)s) with host OS type" - " %(ig_os)s. Please configure initiator group" - " manually according to the type of the" - " host OS.", - {'ig_nm': igroup_name, 'ig_os': ig_host_os}) - try: - return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id) - except netapp_api.NaApiError: - exc_info = sys.exc_info() - (_igroup, lun_id) = self._find_mapped_lun_igroup(path, - initiator_list) - if lun_id is not None: - return lun_id - else: - six.reraise(*exc_info) - - def _unmap_lun(self, path, initiator_list): - """Unmaps a LUN from given initiator.""" - (igroup_name, _lun_id) = self._find_mapped_lun_igroup(path, - initiator_list) - self.zapi_client.unmap_lun(path, igroup_name) - - def _find_mapped_lun_igroup(self, path, initiator_list): - """Find an igroup for a LUN mapped to the given initiator(s).""" - raise NotImplementedError() - - def _has_luns_mapped_to_initiators(self, initiator_list): - """Checks whether any LUNs are mapped to the given initiator(s).""" - return self.zapi_client.has_luns_mapped_to_initiators(initiator_list) - - def _get_or_create_igroup(self, initiator_list, initiator_group_type, - host_os_type): - """Checks for an igroup for a set of one or more initiators. - - Creates igroup if not already present with given host os type, - igroup type and adds initiators. - """ - igroups = self.zapi_client.get_igroup_by_initiators(initiator_list) - igroup_name = None - - if igroups: - igroup = igroups[0] - igroup_name = igroup['initiator-group-name'] - host_os_type = igroup['initiator-group-os-type'] - initiator_group_type = igroup['initiator-group-type'] - - if not igroup_name: - igroup_name = self._create_igroup_add_initiators( - initiator_group_type, host_os_type, initiator_list) - return igroup_name, host_os_type, initiator_group_type - - def _create_igroup_add_initiators(self, initiator_group_type, - host_os_type, initiator_list): - """Creates igroup and adds initiators.""" - igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4()) - self.zapi_client.create_igroup(igroup_name, initiator_group_type, - host_os_type) - for initiator in initiator_list: - self.zapi_client.add_igroup_initiator(igroup_name, initiator) - return igroup_name - - def _add_lun_to_table(self, lun): - """Adds LUN to cache table.""" - if not isinstance(lun, NetAppLun): - msg = _("Object is not a NetApp LUN.") - raise exception.VolumeBackendAPIException(data=msg) - self.lun_table[lun.name] = lun - - def _get_lun_from_table(self, name): - """Gets LUN from cache table. - - Refreshes cache if LUN not found in cache. - """ - lun = self.lun_table.get(name) - if lun is None: - lun_list = self.zapi_client.get_lun_list() - self._extract_and_populate_luns(lun_list) - lun = self.lun_table.get(name) - if lun is None: - raise exception.VolumeNotFound(volume_id=name) - return lun - - def _clone_lun(self, name, new_name, space_reserved='true', - qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None, is_snapshot=False): - """Clone LUN with the given name to the new name.""" - raise NotImplementedError() - - def _get_lun_attr(self, name, attr): - """Get the LUN attribute if found else None.""" - try: - attr = getattr(self._get_lun_from_table(name), attr) - return attr - except exception.VolumeNotFound as e: - LOG.error("Message: %s", e.msg) - except Exception as e: - LOG.error("Error getting LUN attribute. Exception: %s", e) - return None - - def _create_lun_meta(self, lun): - raise NotImplementedError() - - def _get_fc_target_wwpns(self, include_partner=True): - raise NotImplementedError() - - def get_volume_stats(self, refresh=False, filter_function=None, - goodness_function=None): - """Get volume stats. - - If 'refresh' is True, update the stats first. - """ - - if refresh: - self._update_volume_stats(filter_function=filter_function, - goodness_function=goodness_function) - return self._stats - - def _update_volume_stats(self, filter_function=None, - goodness_function=None): - raise NotImplementedError() - - def get_default_filter_function(self): - """Get the default filter_function string.""" - return self.DEFAULT_FILTER_FUNCTION - - def get_default_goodness_function(self): - """Get the default goodness_function string.""" - return self.DEFAULT_GOODNESS_FUNCTION - - def extend_volume(self, volume, new_size): - """Driver entry point to increase the size of a volume.""" - - extra_specs = na_utils.get_volume_extra_specs(volume) - - # Create volume copy with new size for size-dependent QOS specs - volume_copy = copy.copy(volume) - volume_copy['size'] = new_size - - qos_policy_group_info = self._setup_qos_for_volume(volume_copy, - extra_specs) - qos_policy_group_name = ( - na_utils.get_qos_policy_group_name_from_info( - qos_policy_group_info)) - - try: - self._extend_volume(volume, new_size, qos_policy_group_name) - except Exception: - with excutils.save_and_reraise_exception(): - # If anything went wrong, revert QoS settings - self._setup_qos_for_volume(volume, extra_specs) - - def _extend_volume(self, volume, new_size, qos_policy_group_name): - """Extend an existing volume to the new size.""" - name = volume['name'] - lun = self._get_lun_from_table(name) - path = lun.metadata['Path'] - curr_size_bytes = six.text_type(lun.size) - new_size_bytes = six.text_type(int(new_size) * units.Gi) - # Reused by clone scenarios. - # Hence comparing the stored size. - if curr_size_bytes != new_size_bytes: - lun_geometry = self.zapi_client.get_lun_geometry(path) - if (lun_geometry and lun_geometry.get("max_resize") - and int(lun_geometry.get("max_resize")) >= - int(new_size_bytes)): - self.zapi_client.do_direct_resize(path, new_size_bytes) - else: - self._do_sub_clone_resize( - path, new_size_bytes, - qos_policy_group_name=qos_policy_group_name) - self.lun_table[name].size = new_size_bytes - else: - LOG.info("No need to extend volume %s" - " as it is already the requested new size.", name) - - def _get_vol_option(self, volume_name, option_name): - """Get the value for the volume option.""" - value = None - options = self.zapi_client.get_volume_options(volume_name) - for opt in options: - if opt.get_child_content('name') == option_name: - value = opt.get_child_content('value') - break - return value - - def _do_sub_clone_resize(self, lun_path, new_size_bytes, - qos_policy_group_name=None): - """Resize a LUN beyond its original geometry using sub-LUN cloning. - - Clones the block ranges, swaps the LUNs, and deletes the source LUN. - """ - seg = lun_path.split("/") - LOG.info("Resizing LUN %s using clone operation.", seg[-1]) - lun_name = seg[-1] - vol_name = seg[2] - lun = self._get_lun_from_table(lun_name) - metadata = lun.metadata - - compression = self._get_vol_option(vol_name, 'compression') - if compression == "on": - msg = _('%s cannot be resized using clone operation' - ' as it is hosted on compressed volume') - raise exception.VolumeBackendAPIException(data=msg % lun_name) - - block_count = self._get_lun_block_count(lun_path) - if block_count == 0: - msg = _('%s cannot be resized using clone operation' - ' as it contains no blocks.') - raise exception.VolumeBackendAPIException(data=msg % lun_name) - - new_lun_name = 'new-%s' % lun_name - self.zapi_client.create_lun( - vol_name, new_lun_name, new_size_bytes, metadata, - qos_policy_group_name=qos_policy_group_name) - try: - self._clone_lun(lun_name, new_lun_name, block_count=block_count) - self._post_sub_clone_resize(lun_path) - except Exception: - with excutils.save_and_reraise_exception(): - new_lun_path = '/vol/%s/%s' % (vol_name, new_lun_name) - self.zapi_client.destroy_lun(new_lun_path) - - def _post_sub_clone_resize(self, path): - """Try post sub clone resize in a transactional manner.""" - st_tm_mv, st_nw_mv, st_del_old = None, None, None - seg = path.split("/") - LOG.info("Post clone resize LUN %s", seg[-1]) - new_lun = 'new-%s' % (seg[-1]) - tmp_lun = 'tmp-%s' % (seg[-1]) - tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) - new_path = "/vol/%s/%s" % (seg[2], new_lun) - try: - st_tm_mv = self.zapi_client.move_lun(path, tmp_path) - st_nw_mv = self.zapi_client.move_lun(new_path, path) - st_del_old = self.zapi_client.destroy_lun(tmp_path) - except Exception as e: - if st_tm_mv is None: - msg = _("Failure staging LUN %s to tmp.") - raise exception.VolumeBackendAPIException(data=msg % (seg[-1])) - else: - if st_nw_mv is None: - self.zapi_client.move_lun(tmp_path, path) - msg = _("Failure moving new cloned LUN to %s.") - raise exception.VolumeBackendAPIException( - data=msg % (seg[-1])) - elif st_del_old is None: - LOG.error("Failure deleting staged tmp LUN %s.", - tmp_lun) - else: - LOG.error("Unknown exception in" - " post clone resize LUN %s.", seg[-1]) - LOG.error("Exception details: %s", e) - - def _get_lun_block_count(self, path): - """Gets block counts for the LUN.""" - LOG.debug("Getting LUN block count.") - lun_infos = self.zapi_client.get_lun_by_args(path=path) - if not lun_infos: - seg = path.split('/') - msg = _('Failure getting LUN info for %s.') - raise exception.VolumeBackendAPIException(data=msg % seg[-1]) - lun_info = lun_infos[-1] - bs = int(lun_info.get_child_content('block-size')) - ls = int(lun_info.get_child_content('size')) - block_count = ls / bs - return block_count - - def _check_volume_type_for_lun(self, volume, lun, existing_ref, - extra_specs): - """Checks if LUN satisfies the volume type.""" - - def manage_existing(self, volume, existing_ref): - """Brings an existing storage object under Cinder management. - - existing_ref can contain source-id or source-name or both. - source-id: lun uuid. - source-name: complete lun path eg. /vol/vol0/lun. - """ - lun = self._get_existing_vol_with_manage_ref(existing_ref) - - extra_specs = na_utils.get_volume_extra_specs(volume) - - self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs) - - qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) - qos_policy_group_name = ( - na_utils.get_qos_policy_group_name_from_info( - qos_policy_group_info)) - - path = lun.get_metadata_property('Path') - if lun.name == volume['name']: - new_path = path - LOG.info("LUN with given ref %s need not be renamed " - "during manage operation.", existing_ref) - else: - (rest, splitter, name) = path.rpartition('/') - new_path = '%s/%s' % (rest, volume['name']) - self.zapi_client.move_lun(path, new_path) - lun = self._get_existing_vol_with_manage_ref( - {'source-name': new_path}) - - if qos_policy_group_name is not None: - self.zapi_client.set_lun_qos_policy_group(new_path, - qos_policy_group_name) - self._add_lun_to_table(lun) - LOG.info("Manage operation completed for LUN with new path" - " %(path)s and uuid %(uuid)s.", - {'path': lun.get_metadata_property('Path'), - 'uuid': lun.get_metadata_property('UUID')}) - - return self._get_volume_model_update(volume) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - """ - lun = self._get_existing_vol_with_manage_ref(existing_ref) - return int(math.ceil(float(lun.size) / units.Gi)) - - def _get_existing_vol_with_manage_ref(self, existing_ref): - """Get the corresponding LUN from the storage server.""" - - uuid = existing_ref.get('source-id') - path = existing_ref.get('source-name') - - lun_info = {} - if path: - lun_info['path'] = path - elif uuid: - if not hasattr(self, 'vserver'): - reason = _('Volume manage identifier with source-id is only ' - 'supported with clustered Data ONTAP.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - lun_info['uuid'] = uuid - else: - reason = _('Volume manage identifier must contain either ' - 'source-id or source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - - luns = self.zapi_client.get_lun_by_args(**lun_info) - - for lun in luns: - netapp_lun = self._extract_lun_info(lun) - if self._is_lun_valid_on_storage(netapp_lun): - return netapp_lun - - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=(_('LUN not found with given ref %s.') % existing_ref)) - - def _is_lun_valid_on_storage(self, lun): - """Validate lun specific to storage system.""" - return True - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - """ - managed_lun = self._get_lun_from_table(volume['name']) - LOG.info("Unmanaged LUN with current path %(path)s and uuid " - "%(uuid)s.", - {'path': managed_lun.get_metadata_property('Path'), - 'uuid': managed_lun.get_metadata_property('UUID') - or 'unknown'}) - - def initialize_connection_iscsi(self, volume, connector): - """Driver entry point to attach a volume to an instance. - - Do the LUN masking on the storage system so the initiator can access - the LUN on the target. Also return the iSCSI properties so the - initiator can find the LUN. This implementation does not call - _get_iscsi_properties() to get the properties because cannot store the - LUN number in the database. We only find out what the LUN number will - be during this method call so we construct the properties dictionary - ourselves. - """ - - initiator_name = connector['initiator'] - name = volume['name'] - lun_id = self._map_lun(name, [initiator_name], 'iscsi', None) - - LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s", - {'name': name, 'initiator_name': initiator_name}) - - target_list = self.zapi_client.get_iscsi_target_details() - if not target_list: - raise exception.VolumeBackendAPIException( - data=_('Failed to get LUN target list for the LUN %s') % name) - - LOG.debug("Successfully fetched target list for LUN %(name)s and " - "initiator %(initiator_name)s", - {'name': name, 'initiator_name': initiator_name}) - - preferred_target = self._get_preferred_target_from_list( - target_list) - if preferred_target is None: - msg = _('Failed to get target portal for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % name) - (address, port) = (preferred_target['address'], - preferred_target['port']) - - iqn = self.zapi_client.get_iscsi_service_details() - if not iqn: - msg = _('Failed to get target IQN for the LUN %s') - raise exception.VolumeBackendAPIException(data=msg % name) - - properties = na_utils.get_iscsi_connection_properties(lun_id, volume, - iqn, address, - port) - - if self.configuration.use_chap_auth: - chap_username, chap_password = self._configure_chap(initiator_name) - self._add_chap_properties(properties, chap_username, chap_password) - - return properties - - def _configure_chap(self, initiator_name): - password = volume_utils.generate_password(na_utils.CHAP_SECRET_LENGTH) - username = na_utils.DEFAULT_CHAP_USER_NAME - - self.zapi_client.set_iscsi_chap_authentication(initiator_name, - username, - password) - LOG.debug("Set iSCSI CHAP authentication.") - - return username, password - - def _add_chap_properties(self, properties, username, password): - properties['data']['auth_method'] = 'CHAP' - properties['data']['auth_username'] = username - properties['data']['auth_password'] = password - properties['data']['discovery_auth_method'] = 'CHAP' - properties['data']['discovery_auth_username'] = username - properties['data']['discovery_auth_password'] = password - - def _get_preferred_target_from_list(self, target_details_list, - filter=None): - preferred_target = None - for target in target_details_list: - if filter and target['address'] not in filter: - continue - if target.get('interface-enabled', 'true') == 'true': - preferred_target = target - break - if preferred_target is None and len(target_details_list) > 0: - preferred_target = target_details_list[0] - return preferred_target - - def terminate_connection_iscsi(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given initiator can no - longer access it. - """ - - initiator_name = connector['initiator'] - name = volume['name'] - metadata = self._get_lun_attr(name, 'metadata') - path = metadata['Path'] - self._unmap_lun(path, [initiator_name]) - LOG.debug("Unmapped LUN %(name)s from the initiator " - "%(initiator_name)s", - {'name': name, 'initiator_name': initiator_name}) - - def initialize_connection_fc(self, volume, connector): - """Initializes the connection and returns connection info. - - Assign any created volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - - .. code-block:: default - - { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '500a098280feeba5', - 'initiator_target_map': { - '21000024ff406cc3': ['500a098280feeba5'], - '21000024ff406cc2': ['500a098280feeba5'] - } - } - } - - Or - - .. code-block:: default - - { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['500a098280feeba5', '500a098290feeba5', - '500a098190feeba5', '500a098180feeba5'], - 'initiator_target_map': { - '21000024ff406cc3': ['500a098280feeba5', - '500a098290feeba5'], - '21000024ff406cc2': ['500a098190feeba5', - '500a098180feeba5'] - } - } - } - - """ - - initiators = [fczm_utils.get_formatted_wwn(wwpn) - for wwpn in connector['wwpns']] - volume_name = volume['name'] - - lun_id = self._map_lun(volume_name, initiators, 'fcp', None) - - LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s", - {'name': volume_name, 'initiators': initiators}) - - target_wwpns, initiator_target_map, num_paths = ( - self._build_initiator_target_map(connector)) - - if target_wwpns: - LOG.debug("Successfully fetched target details for LUN %(name)s " - "and initiator(s) %(initiators)s", - {'name': volume_name, 'initiators': initiators}) - else: - raise exception.VolumeBackendAPIException( - data=_('Failed to get LUN target details for ' - 'the LUN %s') % volume_name) - - target_info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_discovered': True, - 'target_lun': int(lun_id), - 'target_wwn': target_wwpns, - 'initiator_target_map': initiator_target_map}} - - return target_info - - def terminate_connection_fc(self, volume, connector, **kwargs): - """Disallow connection from connector. - - Return empty data if other volumes are in the same zone. - The FibreChannel ZoneManager doesn't remove zones - if there isn't an initiator_target_map in the - return of terminate_connection. - - :returns: data - the target_wwns and initiator_target_map if the - zone is to be removed, otherwise the same map with - an empty dict for the 'data' key - """ - - initiators = [fczm_utils.get_formatted_wwn(wwpn) - for wwpn in connector['wwpns']] - name = volume['name'] - metadata = self._get_lun_attr(name, 'metadata') - path = metadata['Path'] - - self._unmap_lun(path, initiators) - - LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s", - {'name': name, 'initiators': initiators}) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - if not self._has_luns_mapped_to_initiators(initiators): - # No more exports for this host, so tear down zone. - LOG.info("Need to remove FC Zone, building initiator target map") - - target_wwpns, initiator_target_map, num_paths = ( - self._build_initiator_target_map(connector)) - - info['data'] = {'target_wwn': target_wwpns, - 'initiator_target_map': initiator_target_map} - - return info - - def _build_initiator_target_map(self, connector): - """Build the target_wwns and the initiator target map.""" - - # get WWPNs from controller and strip colons - all_target_wwpns = self._get_fc_target_wwpns() - all_target_wwpns = [six.text_type(wwpn).replace(':', '') - for wwpn in all_target_wwpns] - - target_wwpns = [] - init_targ_map = {} - num_paths = 0 - - if self.lookup_service is not None: - # Use FC SAN lookup to determine which ports are visible. - dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], - all_target_wwpns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwpns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - for target in init_targ_map[initiator]: - num_paths += 1 - target_wwpns = list(set(target_wwpns)) - else: - initiator_wwns = connector['wwpns'] - target_wwpns = all_target_wwpns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwpns - - return target_wwpns, init_targ_map, num_paths - - def create_consistencygroup(self, group): - """Driver entry point for creating a consistency group. - - ONTAP does not maintain an actual CG construct. As a result, no - communication to the backend is necessary for consistency group - creation. - - :return: Hard-coded model update for consistency group model. - """ - model_update = {'status': 'available'} - return model_update - - def delete_consistencygroup(self, group, volumes): - """Driver entry point for deleting a consistency group. - - :return: Updated consistency group model and list of volume models - for the volumes that were deleted. - """ - model_update = {'status': 'deleted'} - volumes_model_update = [] - for volume in volumes: - try: - self._delete_lun(volume['name']) - volumes_model_update.append( - {'id': volume['id'], 'status': 'deleted'}) - except Exception: - volumes_model_update.append( - {'id': volume['id'], 'status': 'error_deleting'}) - LOG.exception("Volume %(vol)s in the consistency group " - "could not be deleted.", {'vol': volume}) - return model_update, volumes_model_update - - def update_consistencygroup(self, group, add_volumes=None, - remove_volumes=None): - """Driver entry point for updating a consistency group. - - Since no actual CG construct is ever created in ONTAP, it is not - necessary to update any metadata on the backend. Since this is a NO-OP, - there is guaranteed to be no change in any of the volumes' statuses. - """ - return None, None, None - - def create_cgsnapshot(self, cgsnapshot, snapshots): - """Creates a Cinder cgsnapshot object. - - The Cinder cgsnapshot object is created by making use of an - ephemeral ONTAP CG in order to provide write-order consistency for a - set of flexvol snapshots. First, a list of the flexvols backing the - given Cinder CG must be gathered. An ONTAP cg-snapshot of these - flexvols will create a snapshot copy of all the Cinder volumes in the - CG group. For each Cinder volume in the CG, it is then necessary to - clone its backing LUN from the ONTAP cg-snapshot. The naming convention - used for the clones is what indicates the clone's role as a Cinder - snapshot and its inclusion in a Cinder CG. The ONTAP CG-snapshot of - the flexvols is no longer required after having cloned the LUNs - backing the Cinder volumes in the Cinder CG. - - :return: An implicit update for cgsnapshot and snapshots models that - is interpreted by the manager to set their models to - available. - """ - flexvols = set() - for snapshot in snapshots: - flexvols.add(volume_utils.extract_host(snapshot['volume']['host'], - level='pool')) - - self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id']) - - for snapshot in snapshots: - self._clone_lun(snapshot['volume']['name'], snapshot['name'], - source_snapshot=cgsnapshot['id']) - - for flexvol in flexvols: - try: - self.zapi_client.wait_for_busy_snapshot( - flexvol, cgsnapshot['id']) - self.zapi_client.delete_snapshot( - flexvol, cgsnapshot['id']) - except exception.SnapshotIsBusy: - self.zapi_client.mark_snapshot_for_deletion( - flexvol, cgsnapshot['id']) - - return None, None - - def delete_cgsnapshot(self, cgsnapshot, snapshots): - """Delete LUNs backing each snapshot in the cgsnapshot. - - :return: An implicit update for snapshots models that is interpreted - by the manager to set their models to deleted. - """ - for snapshot in snapshots: - self._delete_lun(snapshot['name']) - LOG.debug("Snapshot %s deletion successful", snapshot['name']) - - return None, None - - def create_consistencygroup_from_src(self, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates a CG from a either a cgsnapshot or group of cinder vols. - - :return: An implicit update for the volumes model that is - interpreted by the manager as a successful operation. - """ - LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes]) - volume_model_updates = [] - - if cgsnapshot: - vols = zip(volumes, snapshots) - - for volume, snapshot in vols: - source = { - 'name': snapshot['name'], - 'size': snapshot['volume_size'], - } - volume_model_update = self._clone_source_to_destination( - source, volume) - if volume_model_update is not None: - volume_model_update['id'] = volume['id'] - volume_model_updates.append(volume_model_update) - - else: - vols = zip(volumes, source_vols) - - for volume, old_src_vref in vols: - src_lun = self._get_lun_from_table(old_src_vref['name']) - source = {'name': src_lun.name, 'size': old_src_vref['size']} - volume_model_update = self._clone_source_to_destination( - source, volume) - if volume_model_update is not None: - volume_model_update['id'] = volume['id'] - volume_model_updates.append(volume_model_update) - - return None, volume_model_updates - - def _get_backing_flexvol_names(self): - """Returns a list of backing flexvol names.""" - raise NotImplementedError() diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py deleted file mode 100644 index 5b3fb18c6..000000000 --- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py +++ /dev/null @@ -1,463 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Andrew Kerr. All rights reserved. -# Copyright (c) 2014 Jeff Applewhite. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver library for NetApp C-mode block storage systems. -""" - -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import utils -from cinder.volume.drivers.netapp.dataontap import block_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp.dataontap.utils import capabilities -from cinder.volume.drivers.netapp.dataontap.utils import data_motion -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils - - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, - data_motion.DataMotionMixin): - """NetApp block storage library for Data ONTAP (Cluster-mode).""" - - REQUIRED_CMODE_FLAGS = ['netapp_vserver'] - - def __init__(self, driver_name, driver_protocol, **kwargs): - super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name, - driver_protocol, - **kwargs) - self.configuration.append_config_values(na_opts.netapp_cluster_opts) - self.driver_mode = 'cluster' - self.failed_over_backend_name = kwargs.get('active_backend_id') - self.failed_over = self.failed_over_backend_name is not None - self.replication_enabled = ( - True if self.get_replication_backend_names( - self.configuration) else False) - - def do_setup(self, context): - super(NetAppBlockStorageCmodeLibrary, self).do_setup(context) - na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) - - # cDOT API client - self.zapi_client = dot_utils.get_client_for_backend( - self.failed_over_backend_name or self.backend_name) - self.vserver = self.zapi_client.vserver - self.using_cluster_credentials = \ - self.zapi_client.check_for_cluster_credentials() - - # Performance monitoring library - self.perf_library = perf_cmode.PerformanceCmodeLibrary( - self.zapi_client) - - # Storage service catalog - self.ssc_library = capabilities.CapabilitiesLibrary( - self.driver_protocol, self.vserver, self.zapi_client, - self.configuration) - - def _update_zapi_client(self, backend_name): - """Set cDOT API client for the specified config backend stanza name.""" - - self.zapi_client = dot_utils.get_client_for_backend(backend_name) - self.vserver = self.zapi_client.vserver - self.ssc_library._update_for_failover(self.zapi_client, - self._get_flexvol_to_pool_map()) - ssc = self.ssc_library.get_ssc() - self.perf_library._update_for_failover(self.zapi_client, ssc) - # Clear LUN table cache - self.lun_table = {} - - def check_for_setup_error(self): - """Check that the driver is working and can communicate.""" - self.ssc_library.check_api_permissions() - - if not self._get_flexvol_to_pool_map(): - msg = _('No pools are available for provisioning volumes. ' - 'Ensure that the configuration option ' - 'netapp_pool_name_search_pattern is set correctly.') - raise exception.NetAppDriverException(msg) - self._add_looping_tasks() - super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval.""" - - # Note(cknight): Run the update once in the current thread to prevent a - # race with the first invocation of _update_volume_stats. - self._update_ssc() - - # Add the task that updates the slow-changing storage service catalog - self.loopingcalls.add_task(self._update_ssc, - loopingcalls.ONE_HOUR, - loopingcalls.ONE_HOUR) - - # Add the task that harvests soft-deleted QoS policy groups. - self.loopingcalls.add_task( - self.zapi_client.remove_unused_qos_policy_groups, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE) - - self.loopingcalls.add_task( - self._handle_housekeeping_tasks, - loopingcalls.TEN_MINUTES, - 0) - - super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks() - - def _handle_housekeeping_tasks(self): - """Handle various cleanup activities.""" - - # Harvest soft-deleted QoS policy groups - self.zapi_client.remove_unused_qos_policy_groups() - - active_backend = self.failed_over_backend_name or self.backend_name - - LOG.debug("Current service state: Replication enabled: %(" - "replication)s. Failed-Over: %(failed)s. Active Backend " - "ID: %(active)s", - { - 'replication': self.replication_enabled, - 'failed': self.failed_over, - 'active': active_backend, - }) - - # Create pool mirrors if whole-backend replication configured - if self.replication_enabled and not self.failed_over: - self.ensure_snapmirrors( - self.configuration, self.backend_name, - self.ssc_library.get_ssc_flexvol_names()) - - def _handle_ems_logging(self): - """Log autosupport messages.""" - - base_ems_message = dot_utils.build_ems_log_message_0( - self.driver_name, self.app_version, self.driver_mode) - self.zapi_client.send_ems_log_message(base_ems_message) - - pool_ems_message = dot_utils.build_ems_log_message_1( - self.driver_name, self.app_version, self.vserver, - self.ssc_library.get_ssc_flexvol_names(), []) - self.zapi_client.send_ems_log_message(pool_ems_message) - - def _create_lun(self, volume_name, lun_name, size, - metadata, qos_policy_group_name=None): - """Creates a LUN, handling Data ONTAP differences as needed.""" - - self.zapi_client.create_lun( - volume_name, lun_name, size, metadata, qos_policy_group_name) - - def _create_lun_handle(self, metadata, vserver=None): - """Returns LUN handle based on filer type.""" - vserver = vserver or self.vserver - return '%s:%s' % (self.vserver, metadata['Path']) - - def _find_mapped_lun_igroup(self, path, initiator_list): - """Find an igroup for a LUN mapped to the given initiator(s).""" - initiator_igroups = self.zapi_client.get_igroup_by_initiators( - initiator_list) - lun_maps = self.zapi_client.get_lun_map(path) - if initiator_igroups and lun_maps: - for igroup in initiator_igroups: - igroup_name = igroup['initiator-group-name'] - if igroup_name.startswith(na_utils.OPENSTACK_PREFIX): - for lun_map in lun_maps: - if lun_map['initiator-group'] == igroup_name: - return igroup_name, lun_map['lun-id'] - return None, None - - def _clone_lun(self, name, new_name, space_reserved=None, - qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None, is_snapshot=False): - """Clone LUN with the given handle to the new name.""" - if not space_reserved: - space_reserved = self.lun_space_reservation - metadata = self._get_lun_attr(name, 'metadata') - volume = metadata['Volume'] - - self.zapi_client.clone_lun(volume, name, new_name, space_reserved, - qos_policy_group_name=qos_policy_group_name, - src_block=src_block, dest_block=dest_block, - block_count=block_count, - source_snapshot=source_snapshot, - is_snapshot=is_snapshot) - - LOG.debug("Cloned LUN with new name %s", new_name) - lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, - path='/vol/%s/%s' - % (volume, new_name)) - if len(lun) == 0: - msg = _("No cloned LUN named %s found on the filer") - raise exception.VolumeBackendAPIException(data=msg % new_name) - clone_meta = self._create_lun_meta(lun[0]) - self._add_lun_to_table( - block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'], - clone_meta['Path']), - new_name, - lun[0].get_child_content('size'), - clone_meta)) - - def _create_lun_meta(self, lun): - """Creates LUN metadata dictionary.""" - self.zapi_client.check_is_naelement(lun) - meta_dict = {} - meta_dict['Vserver'] = lun.get_child_content('vserver') - meta_dict['Volume'] = lun.get_child_content('volume') - meta_dict['Qtree'] = lun.get_child_content('qtree') - meta_dict['Path'] = lun.get_child_content('path') - meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') - meta_dict['SpaceReserved'] = \ - lun.get_child_content('is-space-reservation-enabled') - meta_dict['UUID'] = lun.get_child_content('uuid') - return meta_dict - - def _get_fc_target_wwpns(self, include_partner=True): - return self.zapi_client.get_fc_target_wwpns() - - def _update_volume_stats(self, filter_function=None, - goodness_function=None): - """Retrieve backend stats.""" - - LOG.debug('Updating volume stats') - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.driver_name - data['vendor_name'] = 'NetApp' - data['driver_version'] = self.VERSION - data['storage_protocol'] = self.driver_protocol - data['pools'] = self._get_pool_stats( - filter_function=filter_function, - goodness_function=goodness_function) - data['sparse_copy_volume'] = True - - # Used for service state report - data['replication_enabled'] = self.replication_enabled - - self._stats = data - - def _get_pool_stats(self, filter_function=None, goodness_function=None): - """Retrieve pool (Data ONTAP flexvol) stats. - - Pool statistics are assembled from static driver capabilities, the - Storage Service Catalog of flexvol attributes, and real-time capacity - and controller utilization metrics. The pool name is the flexvol name. - """ - - pools = [] - - ssc = self.ssc_library.get_ssc() - if not ssc: - return pools - - # Utilization and performance metrics require cluster-scoped - # credentials - if self.using_cluster_credentials: - # Get up-to-date node utilization metrics just once - self.perf_library.update_performance_cache(ssc) - - # Get up-to-date aggregate capacities just once - aggregates = self.ssc_library.get_ssc_aggregates() - aggr_capacities = self.zapi_client.get_aggregate_capacities( - aggregates) - else: - aggr_capacities = {} - - for ssc_vol_name, ssc_vol_info in ssc.items(): - - pool = dict() - - # Add storage service catalog data - pool.update(ssc_vol_info) - - # Add driver capabilities and config info - pool['QoS_support'] = True - pool['multiattach'] = False - pool['consistencygroup_support'] = True - pool['reserved_percentage'] = self.reserved_percentage - pool['max_over_subscription_ratio'] = ( - self.max_over_subscription_ratio) - - # Add up-to-date capacity info - capacity = self.zapi_client.get_flexvol_capacity( - flexvol_name=ssc_vol_name) - - size_total_gb = capacity['size-total'] / units.Gi - pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) - - size_available_gb = capacity['size-available'] / units.Gi - pool['free_capacity_gb'] = na_utils.round_down(size_available_gb) - - pool['provisioned_capacity_gb'] = round( - pool['total_capacity_gb'] - pool['free_capacity_gb'], 2) - - if self.using_cluster_credentials: - dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent( - ssc_vol_name) - else: - dedupe_used = 0.0 - pool['netapp_dedupe_used_percent'] = na_utils.round_down( - dedupe_used) - - aggregate_name = ssc_vol_info.get('netapp_aggregate') - aggr_capacity = aggr_capacities.get(aggregate_name, {}) - pool['netapp_aggregate_used_percent'] = aggr_capacity.get( - 'percent-used', 0) - - # Add utilization data - utilization = self.perf_library.get_node_utilization_for_pool( - ssc_vol_name) - pool['utilization'] = na_utils.round_down(utilization) - pool['filter_function'] = filter_function - pool['goodness_function'] = goodness_function - - # Add replication capabilities/stats - pool.update( - self.get_replication_backend_stats(self.configuration)) - - pools.append(pool) - - return pools - - def _update_ssc(self): - """Refresh the storage service catalog with the latest set of pools.""" - - self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) - - def _get_flexvol_to_pool_map(self): - """Get the flexvols that match the pool name search pattern. - - The map is of the format suitable for seeding the storage service - catalog: { : {'pool_name': }} - """ - - pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) - - pools = {} - flexvol_names = self.zapi_client.list_flexvols() - - for flexvol_name in flexvol_names: - - msg_args = { - 'flexvol': flexvol_name, - 'vol_pattern': pool_regex.pattern, - } - - if pool_regex.match(flexvol_name): - msg = "Volume '%(flexvol)s' matches %(vol_pattern)s" - LOG.debug(msg, msg_args) - pools[flexvol_name] = {'pool_name': flexvol_name} - else: - msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s" - LOG.debug(msg, msg_args) - - return pools - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume) - except exception.Invalid: - # Delete even if there was invalid qos policy specified for the - # volume. - qos_policy_group_info = None - self._mark_qos_policy_group_for_deletion(qos_policy_group_info) - - msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s' - LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info}) - - def _get_preferred_target_from_list(self, target_details_list, - filter=None): - # cDOT iSCSI LIFs do not migrate from controller to controller - # in failover. Rather, an iSCSI LIF must be configured on each - # controller and the initiator has to take responsibility for - # using a LIF that is UP. In failover, the iSCSI LIF on the - # downed controller goes DOWN until the controller comes back up. - # - # Currently Nova only accepts a single target when obtaining - # target details from Cinder, so we pass back the first portal - # with an UP iSCSI LIF. There are plans to have Nova accept - # and try multiple targets. When that happens, we can and should - # remove this filter and return all targets since their operational - # state could change between the time we test here and the time - # Nova uses the target. - - operational_addresses = ( - self.zapi_client.get_operational_lif_addresses()) - - return (super(NetAppBlockStorageCmodeLibrary, self) - ._get_preferred_target_from_list(target_details_list, - filter=operational_addresses)) - - def _setup_qos_for_volume(self, volume, extra_specs): - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume, extra_specs) - except exception.Invalid: - msg = _('Invalid QoS specification detected while getting QoS ' - 'policy for volume %s') % volume['id'] - raise exception.VolumeBackendAPIException(data=msg) - self.zapi_client.provision_qos_policy_group(qos_policy_group_info) - return qos_policy_group_info - - def _get_volume_model_update(self, volume): - """Provide any updates necessary for a volume being created/managed.""" - if self.replication_enabled: - return {'replication_status': fields.ReplicationStatus.ENABLED} - - def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): - self.zapi_client.mark_qos_policy_group_for_deletion( - qos_policy_group_info) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - """ - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume) - except exception.Invalid: - # Unmanage even if there was invalid qos policy specified for the - # volume. - qos_policy_group_info = None - self._mark_qos_policy_group_for_deletion(qos_policy_group_info) - super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover a backend to a secondary replication target.""" - - return self._failover_host(volumes, secondary_id=secondary_id) - - def _get_backing_flexvol_names(self): - """Returns a list of backing flexvol names.""" - return self.ssc_library.get_ssc().keys() diff --git a/cinder/volume/drivers/netapp/dataontap/client/__init__.py b/cinder/volume/drivers/netapp/dataontap/client/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py deleted file mode 100644 index aae2c3e00..000000000 --- a/cinder/volume/drivers/netapp/dataontap/client/api.py +++ /dev/null @@ -1,614 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Glenn Gobeli. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Alex Meade. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -NetApp API for Data ONTAP and OnCommand DFM. - -Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. -""" - -from eventlet import greenthread -from eventlet import semaphore - -from lxml import etree -from oslo_log import log as logging -import random -import six -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils - -LOG = logging.getLogger(__name__) - -EAPIERROR = '13001' -EAPIPRIVILEGE = '13003' -EAPINOTFOUND = '13005' -ESNAPSHOTNOTALLOWED = '13023' -ESIS_CLONE_NOT_LICENSED = '14956' -EOBJECTNOTFOUND = '15661' -ESOURCE_IS_DIFFERENT = '17105' -ERELATION_EXISTS = '17122' -ERELATION_NOT_QUIESCED = '17127' -ENOTRANSFER_IN_PROGRESS = '17130' -EANOTHER_OP_ACTIVE = '17131' -ETRANSFER_IN_PROGRESS = '17137' - - -class NaServer(object): - """Encapsulates server connection logic.""" - - TRANSPORT_TYPE_HTTP = 'http' - TRANSPORT_TYPE_HTTPS = 'https' - SERVER_TYPE_FILER = 'filer' - SERVER_TYPE_DFM = 'dfm' - URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' - URL_DFM = 'apis/XMLrequest' - NETAPP_NS = 'http://www.netapp.com/filer/admin' - STYLE_LOGIN_PASSWORD = 'basic_auth' - STYLE_CERTIFICATE = 'certificate_auth' - - def __init__(self, host, server_type=SERVER_TYPE_FILER, - transport_type=TRANSPORT_TYPE_HTTP, - style=STYLE_LOGIN_PASSWORD, username=None, - password=None, port=None): - self._host = host - self.set_server_type(server_type) - self.set_transport_type(transport_type) - self.set_style(style) - if port: - self.set_port(port) - self._username = username - self._password = password - self._refresh_conn = True - - LOG.debug('Using NetApp controller: %s', self._host) - - def set_transport_type(self, transport_type): - """Set the transport type protocol for API. - - Supports http and https transport types. - """ - if not transport_type: - raise ValueError('No transport type specified') - if transport_type.lower() not in ( - NaServer.TRANSPORT_TYPE_HTTP, - NaServer.TRANSPORT_TYPE_HTTPS): - raise ValueError('Unsupported transport type') - self._protocol = transport_type.lower() - if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: - if self._server_type == NaServer.SERVER_TYPE_FILER: - self.set_port(80) - else: - self.set_port(8088) - else: - if self._server_type == NaServer.SERVER_TYPE_FILER: - self.set_port(443) - else: - self.set_port(8488) - self._refresh_conn = True - - def set_style(self, style): - """Set the authorization style for communicating with the server. - - Supports basic_auth for now. Certificate_auth mode to be done. - """ - if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, - NaServer.STYLE_CERTIFICATE): - raise ValueError('Unsupported authentication style') - self._auth_style = style.lower() - - def set_server_type(self, server_type): - """Set the target server type. - - Supports filer and dfm server types. - """ - if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, - NaServer.SERVER_TYPE_DFM): - raise ValueError('Unsupported server type') - self._server_type = server_type.lower() - if self._server_type == NaServer.SERVER_TYPE_FILER: - self._url = NaServer.URL_FILER - else: - self._url = NaServer.URL_DFM - self._ns = NaServer.NETAPP_NS - self._refresh_conn = True - - def set_api_version(self, major, minor): - """Set the API version.""" - try: - self._api_major_version = int(major) - self._api_minor_version = int(minor) - self._api_version = six.text_type(major) + "." + \ - six.text_type(minor) - except ValueError: - raise ValueError('Major and minor versions must be integers') - self._refresh_conn = True - - def get_api_version(self): - """Gets the API version tuple.""" - if hasattr(self, '_api_version'): - return (self._api_major_version, self._api_minor_version) - return None - - def set_port(self, port): - """Set the server communication port.""" - try: - int(port) - except ValueError: - raise ValueError('Port must be integer') - self._port = six.text_type(port) - self._refresh_conn = True - - def set_timeout(self, seconds): - """Sets the timeout in seconds.""" - try: - self._timeout = int(seconds) - except ValueError: - raise ValueError('timeout in seconds must be integer') - - def set_vfiler(self, vfiler): - """Set the vfiler to use if tunneling gets enabled.""" - self._vfiler = vfiler - - def set_vserver(self, vserver): - """Set the vserver to use if tunneling gets enabled.""" - self._vserver = vserver - - @utils.trace_api - def send_http_request(self, na_element, enable_tunneling=False): - """Invoke the API on the server.""" - if not na_element or not isinstance(na_element, NaElement): - raise ValueError('NaElement must be supplied to invoke API') - - request, request_element = self._create_request(na_element, - enable_tunneling) - - if not hasattr(self, '_opener') or not self._opener \ - or self._refresh_conn: - self._build_opener() - try: - if hasattr(self, '_timeout'): - response = self._opener.open(request, timeout=self._timeout) - else: - response = self._opener.open(request) - except urllib.error.HTTPError as e: - raise NaApiError(e.code, e.msg) - except Exception: - LOG.exception("Error communicating with NetApp filer.") - raise NaApiError('Unexpected error') - - response_xml = response.read() - response_element = self._get_result(response_xml) - - return response_element - - def invoke_successfully(self, na_element, enable_tunneling=False): - """Invokes API and checks execution status as success. - - Need to set enable_tunneling to True explicitly to achieve it. - This helps to use same connection instance to enable or disable - tunneling. The vserver or vfiler should be set before this call - otherwise tunneling remains disabled. - """ - result = self.send_http_request(na_element, enable_tunneling) - if result.has_attr('status') and result.get_attr('status') == 'passed': - return result - code = result.get_attr('errno')\ - or result.get_child_content('errorno')\ - or 'ESTATUSFAILED' - if code == ESIS_CLONE_NOT_LICENSED: - msg = 'Clone operation failed: FlexClone not licensed.' - else: - msg = result.get_attr('reason')\ - or result.get_child_content('reason')\ - or 'Execution status is failed due to unknown reason' - raise NaApiError(code, msg) - - def _create_request(self, na_element, enable_tunneling=False): - """Creates request in the desired format.""" - netapp_elem = NaElement('netapp') - netapp_elem.add_attr('xmlns', self._ns) - if hasattr(self, '_api_version'): - netapp_elem.add_attr('version', self._api_version) - if enable_tunneling: - self._enable_tunnel_request(netapp_elem) - netapp_elem.add_child_elem(na_element) - request_d = netapp_elem.to_string() - request = urllib.request.Request( - self._get_url(), data=request_d, - headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) - return request, netapp_elem - - def _enable_tunnel_request(self, netapp_elem): - """Enables vserver or vfiler tunneling.""" - if hasattr(self, '_vfiler') and self._vfiler: - if hasattr(self, '_api_major_version') and \ - hasattr(self, '_api_minor_version') and \ - self._api_major_version >= 1 and \ - self._api_minor_version >= 7: - netapp_elem.add_attr('vfiler', self._vfiler) - else: - raise ValueError('ontapi version has to be atleast 1.7' - ' to send request to vfiler') - if hasattr(self, '_vserver') and self._vserver: - if hasattr(self, '_api_major_version') and \ - hasattr(self, '_api_minor_version') and \ - self._api_major_version >= 1 and \ - self._api_minor_version >= 15: - netapp_elem.add_attr('vfiler', self._vserver) - else: - raise ValueError('ontapi version has to be atleast 1.15' - ' to send request to vserver') - - def _parse_response(self, response): - """Get the NaElement for the response.""" - if not response: - raise NaApiError('No response received') - xml = etree.XML(response) - return NaElement(xml) - - def _get_result(self, response): - """Gets the call result.""" - processed_response = self._parse_response(response) - return processed_response.get_child_by_name('results') - - def _get_url(self): - return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, - self._url) - - def _build_opener(self): - if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: - auth_handler = self._create_basic_auth_handler() - else: - auth_handler = self._create_certificate_auth_handler() - opener = urllib.request.build_opener(auth_handler) - self._opener = opener - - def _create_basic_auth_handler(self): - password_man = urllib.request.HTTPPasswordMgrWithDefaultRealm() - password_man.add_password(None, self._get_url(), self._username, - self._password) - auth_handler = urllib.request.HTTPBasicAuthHandler(password_man) - return auth_handler - - def _create_certificate_auth_handler(self): - raise NotImplementedError() - - def __str__(self): - return "server: %s" % self._host - - -class NaElement(object): - """Class wraps basic building block for NetApp API request.""" - - def __init__(self, name): - """Name of the element or etree.Element.""" - if isinstance(name, etree._Element): - self._element = name - else: - self._element = etree.Element(name) - - def get_name(self): - """Returns the tag name of the element.""" - return self._element.tag - - def set_content(self, text): - """Set the text string for the element.""" - self._element.text = text - - def get_content(self): - """Get the text for the element.""" - return self._element.text - - def add_attr(self, name, value): - """Add the attribute to the element.""" - self._element.set(name, value) - - def add_attrs(self, **attrs): - """Add multiple attributes to the element.""" - for attr in attrs.keys(): - self._element.set(attr, attrs.get(attr)) - - def add_child_elem(self, na_element): - """Add the child element to the element.""" - if isinstance(na_element, NaElement): - self._element.append(na_element._element) - return - raise - - def get_child_by_name(self, name): - """Get the child element by the tag name.""" - for child in self._element.iterchildren(): - if child.tag == name or etree.QName(child.tag).localname == name: - return NaElement(child) - return None - - def get_child_content(self, name): - """Get the content of the child.""" - for child in self._element.iterchildren(): - if child.tag == name or etree.QName(child.tag).localname == name: - return child.text - return None - - def get_children(self): - """Get the children for the element.""" - return [NaElement(el) for el in self._element.iterchildren()] - - def has_attr(self, name): - """Checks whether element has attribute.""" - attributes = self._element.attrib or {} - return name in attributes.keys() - - def get_attr(self, name): - """Get the attribute with the given name.""" - attributes = self._element.attrib or {} - return attributes.get(name) - - def get_attr_names(self): - """Returns the list of attribute names.""" - attributes = self._element.attrib or {} - return list(attributes.keys()) - - def add_new_child(self, name, content, convert=False): - """Add child with tag name and content. - - Convert replaces entity refs to chars. - """ - child = NaElement(name) - if convert: - content = NaElement._convert_entity_refs(content) - child.set_content(content) - self.add_child_elem(child) - - @staticmethod - def _convert_entity_refs(text): - """Converts entity refs to chars to handle etree auto conversions.""" - text = text.replace("<", "<") - text = text.replace(">", ">") - return text - - @staticmethod - def create_node_with_children(node, **children): - """Creates and returns named node with children.""" - parent = NaElement(node) - for child in children.keys(): - parent.add_new_child(child, children.get(child, None)) - return parent - - def add_node_with_children(self, node, **children): - """Creates named node with children.""" - parent = NaElement.create_node_with_children(node, **children) - self.add_child_elem(parent) - - def to_string(self, pretty=False, method='xml', encoding='UTF-8'): - """Prints the element to string.""" - return etree.tostring(self._element, method=method, encoding=encoding, - pretty_print=pretty) - - def __str__(self): - xml = self.to_string(pretty=True) - if six.PY3: - xml = xml.decode('utf-8') - return xml - - def __eq__(self, other): - return str(self) == str(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash(str(self)) - - def __repr__(self): - return str(self) - - def __getitem__(self, key): - """Dict getter method for NaElement. - - Returns NaElement list if present, - text value in case no NaElement node - children or attribute value if present. - """ - - child = self.get_child_by_name(key) - if child: - if child.get_children(): - return child - else: - return child.get_content() - elif self.has_attr(key): - return self.get_attr(key) - raise KeyError(_('No element by given name %s.') % (key)) - - def __setitem__(self, key, value): - """Dict setter method for NaElement. - - Accepts dict, list, tuple, str, int, float and long as valid value. - """ - if key: - if value: - if isinstance(value, NaElement): - child = NaElement(key) - child.add_child_elem(value) - self.add_child_elem(child) - elif isinstance(value, six.integer_types + (str, float)): - self.add_new_child(key, six.text_type(value)) - elif isinstance(value, (list, tuple, dict)): - child = NaElement(key) - child.translate_struct(value) - self.add_child_elem(child) - else: - raise TypeError(_('Not a valid value for NaElement.')) - else: - self.add_child_elem(NaElement(key)) - else: - raise KeyError(_('NaElement name cannot be null.')) - - def translate_struct(self, data_struct): - """Convert list, tuple, dict to NaElement and appends. - - Example usage: - - 1. - - .. code-block:: xml - - - vl1 - vl2 - vl3 - - - The above can be achieved by doing - - .. code-block:: python - - root = NaElement('root') - root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', - 'elem3': 'vl3'}) - - 2. - - .. code-block:: xml - - - vl1 - vl2 - vl3 - - - The above can be achieved by doing - - .. code-block:: python - - root = NaElement('root') - root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, - {'elem1': 'vl3'}]) - """ - if isinstance(data_struct, (list, tuple)): - for el in data_struct: - if isinstance(el, (list, tuple, dict)): - self.translate_struct(el) - else: - self.add_child_elem(NaElement(el)) - elif isinstance(data_struct, dict): - for k in data_struct.keys(): - child = NaElement(k) - if isinstance(data_struct[k], (dict, list, tuple)): - child.translate_struct(data_struct[k]) - else: - if data_struct[k]: - child.set_content(six.text_type(data_struct[k])) - self.add_child_elem(child) - else: - raise ValueError(_('Type cannot be converted into NaElement.')) - - -class NaApiError(Exception): - """Base exception class for NetApp API errors.""" - - def __init__(self, code='unknown', message='unknown'): - self.code = code - self.message = message - - def __str__(self, *args, **kwargs): - return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) - - -class SSHUtil(object): - """Encapsulates connection logic and command execution for SSH client.""" - - MAX_CONCURRENT_SSH_CONNECTIONS = 5 - RECV_TIMEOUT = 3 - CONNECTION_KEEP_ALIVE = 600 - WAIT_ON_STDOUT_TIMEOUT = 3 - - def __init__(self, host, username, password, port=22): - self.ssh_pool = self._init_ssh_pool(host, port, username, password) - - # Note(cfouts) Number of SSH connections made to the backend need to be - # limited. Use of SSHPool allows connections to be cached and reused - # instead of creating a new connection each time a command is executed - # via SSH. - self.ssh_connect_semaphore = semaphore.Semaphore( - self.MAX_CONCURRENT_SSH_CONNECTIONS) - - def _init_ssh_pool(self, host, port, username, password): - return ssh_utils.SSHPool(host, - port, - self.CONNECTION_KEEP_ALIVE, - username, - password) - - def execute_command(self, client, command_text, timeout=RECV_TIMEOUT): - LOG.debug("execute_command() - Sending command.") - stdin, stdout, stderr = client.exec_command(command_text) - stdin.close() - self._wait_on_stdout(stdout, timeout) - output = stdout.read() - LOG.debug("Output of length %(size)d received.", - {'size': len(output)}) - stdout.close() - stderr.close() - return output - - def execute_command_with_prompt(self, - client, - command, - expected_prompt_text, - prompt_response, - timeout=RECV_TIMEOUT): - LOG.debug("execute_command_with_prompt() - Sending command.") - stdin, stdout, stderr = client.exec_command(command) - self._wait_on_stdout(stdout, timeout) - response = stdout.channel.recv(999) - if response.strip() != expected_prompt_text: - msg = _("Unexpected output. Expected [%(expected)s] but " - "received [%(output)s]") % { - 'expected': expected_prompt_text, - 'output': response.strip(), - } - LOG.error(msg) - stdin.close() - stdout.close() - stderr.close() - raise exception.VolumeBackendAPIException(msg) - else: - LOG.debug("execute_command_with_prompt() - Sending answer") - stdin.write(prompt_response + '\n') - stdin.flush() - stdin.close() - stdout.close() - stderr.close() - - def _wait_on_stdout(self, stdout, timeout=WAIT_ON_STDOUT_TIMEOUT): - wait_time = 0.0 - # NOTE(cfouts): The server does not always indicate when EOF is reached - # for stdout. The timeout exists for this reason and an attempt is made - # to read from stdout. - while not stdout.channel.exit_status_ready(): - # period is 10 - 25 centiseconds - period = random.randint(10, 25) / 100.0 - greenthread.sleep(period) - wait_time += period - if wait_time > timeout: - LOG.debug("Timeout exceeded while waiting for exit status.") - break diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py b/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py deleted file mode 100644 index 9671fa9d0..000000000 --- a/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py +++ /dev/null @@ -1,604 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import math -import time - -from oslo_log import log as logging -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base - -from oslo_utils import strutils - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class Client(client_base.Client): - - def __init__(self, volume_list=None, **kwargs): - super(Client, self).__init__(**kwargs) - vfiler = kwargs.get('vfiler', None) - self.connection.set_vfiler(vfiler) - - (major, minor) = self.get_ontapi_version(cached=False) - self.connection.set_api_version(major, minor) - - self.volume_list = volume_list - self._init_features() - - def _init_features(self): - super(Client, self)._init_features() - - ontapi_version = self.get_ontapi_version() # major, minor - - ontapi_1_20 = ontapi_version >= (1, 20) - self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_20) - - def send_ems_log_message(self, message_dict): - """Sends a message to the Data ONTAP EMS log.""" - - # NOTE(cknight): Cannot use deepcopy on the connection context - node_client = copy.copy(self) - node_client.connection = copy.copy(self.connection) - node_client.connection.set_timeout(25) - - try: - node_client.connection.set_vfiler(None) - node_client.send_request('ems-autosupport-log', message_dict) - LOG.debug('EMS executed successfully.') - except netapp_api.NaApiError as e: - LOG.warning('Failed to invoke EMS. %s', e) - - def get_iscsi_target_details(self): - """Gets the iSCSI target portal details.""" - iscsi_if_iter = netapp_api.NaElement('iscsi-portal-list-info') - result = self.connection.invoke_successfully(iscsi_if_iter, True) - tgt_list = [] - portal_list_entries = result.get_child_by_name( - 'iscsi-portal-list-entries') - if portal_list_entries: - portal_list = portal_list_entries.get_children() - for iscsi_if in portal_list: - d = dict() - d['address'] = iscsi_if.get_child_content('ip-address') - d['port'] = iscsi_if.get_child_content('ip-port') - d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') - tgt_list.append(d) - return tgt_list - - def check_iscsi_initiator_exists(self, iqn): - """Returns True if initiator exists.""" - initiator_exists = True - try: - auth_list = netapp_api.NaElement('iscsi-initiator-auth-list-info') - auth_list.add_new_child('initiator', iqn) - self.connection.invoke_successfully(auth_list, True) - except netapp_api.NaApiError: - initiator_exists = False - - return initiator_exists - - def get_fc_target_wwpns(self): - """Gets the FC target details.""" - wwpns = [] - port_name_list_api = netapp_api.NaElement('fcp-port-name-list-info') - result = self.connection.invoke_successfully(port_name_list_api) - port_names = result.get_child_by_name('fcp-port-names') - if port_names: - for port_name_info in port_names.get_children(): - wwpn = port_name_info.get_child_content('port-name').lower() - wwpns.append(wwpn) - return wwpns - - def get_iscsi_service_details(self): - """Returns iscsi iqn.""" - iscsi_service_iter = netapp_api.NaElement('iscsi-node-get-name') - result = self.connection.invoke_successfully(iscsi_service_iter, True) - return result.get_child_content('node-name') - - def set_iscsi_chap_authentication(self, iqn, username, password): - """Provides NetApp host's CHAP credentials to the backend.""" - - command = ("iscsi security add -i %(iqn)s -s CHAP " - "-p %(password)s -n %(username)s") % { - 'iqn': iqn, - 'password': password, - 'username': username, - } - - LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn}) - - try: - ssh_pool = self.ssh_client.ssh_pool - with ssh_pool.item() as ssh: - self.ssh_client.execute_command(ssh, command) - except Exception as e: - msg = _('Failed to set CHAP authentication for target IQN ' - '%(iqn)s. Details: %(ex)s') % { - 'iqn': iqn, - 'ex': e, - } - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def get_lun_list(self): - """Gets the list of LUNs on filer.""" - lun_list = [] - if self.volume_list: - for vol in self.volume_list: - try: - luns = self._get_vol_luns(vol) - if luns: - lun_list.extend(luns) - except netapp_api.NaApiError: - LOG.warning("Error finding LUNs for volume %s." - " Verify volume exists.", vol) - else: - luns = self._get_vol_luns(None) - lun_list.extend(luns) - return lun_list - - def _get_vol_luns(self, vol_name): - """Gets the LUNs for a volume.""" - api = netapp_api.NaElement('lun-list-info') - if vol_name: - api.add_new_child('volume-name', vol_name) - result = self.connection.invoke_successfully(api, True) - luns = result.get_child_by_name('luns') - return luns.get_children() - - def get_igroup_by_initiators(self, initiator_list): - """Get igroups exactly matching a set of initiators.""" - igroup_list = [] - if not initiator_list: - return igroup_list - - initiator_set = set(initiator_list) - - igroup_list_info = netapp_api.NaElement('igroup-list-info') - result = self.connection.invoke_successfully(igroup_list_info, True) - - initiator_groups = result.get_child_by_name( - 'initiator-groups') or netapp_api.NaElement('none') - for initiator_group_info in initiator_groups.get_children(): - - initiator_set_for_igroup = set() - initiators = initiator_group_info.get_child_by_name( - 'initiators') or netapp_api.NaElement('none') - for initiator_info in initiators.get_children(): - initiator_set_for_igroup.add( - initiator_info.get_child_content('initiator-name')) - - if initiator_set == initiator_set_for_igroup: - igroup = {'initiator-group-os-type': - initiator_group_info.get_child_content( - 'initiator-group-os-type'), - 'initiator-group-type': - initiator_group_info.get_child_content( - 'initiator-group-type'), - 'initiator-group-name': - initiator_group_info.get_child_content( - 'initiator-group-name')} - igroup_list.append(igroup) - - return igroup_list - - def clone_lun(self, path, clone_path, name, new_name, - space_reserved='true', src_block=0, - dest_block=0, block_count=0, source_snapshot=None): - # zAPI can only handle 2^24 blocks per range - bc_limit = 2 ** 24 # 8GB - # zAPI can only handle 32 block ranges per call - br_limit = 32 - z_limit = br_limit * bc_limit # 256 GB - z_calls = int(math.ceil(block_count / float(z_limit))) - zbc = block_count - if z_calls == 0: - z_calls = 1 - for _call in range(0, z_calls): - if zbc > z_limit: - block_count = z_limit - zbc -= z_limit - else: - block_count = zbc - - zapi_args = { - 'source-path': path, - 'destination-path': clone_path, - 'no-snap': 'true', - } - if source_snapshot: - zapi_args['snapshot-name'] = source_snapshot - clone_start = netapp_api.NaElement.create_node_with_children( - 'clone-start', **zapi_args) - if block_count > 0: - block_ranges = netapp_api.NaElement("block-ranges") - # zAPI can only handle 2^24 block ranges - bc_limit = 2 ** 24 # 8GB - segments = int(math.ceil(block_count / float(bc_limit))) - bc = block_count - for _segment in range(0, segments): - if bc > bc_limit: - block_count = bc_limit - bc -= bc_limit - else: - block_count = bc - block_range =\ - netapp_api.NaElement.create_node_with_children( - 'block-range', - **{'source-block-number': - six.text_type(src_block), - 'destination-block-number': - six.text_type(dest_block), - 'block-count': - six.text_type(block_count)}) - block_ranges.add_child_elem(block_range) - src_block += int(block_count) - dest_block += int(block_count) - clone_start.add_child_elem(block_ranges) - result = self.connection.invoke_successfully(clone_start, True) - clone_id_el = result.get_child_by_name('clone-id') - cl_id_info = clone_id_el.get_child_by_name('clone-id-info') - vol_uuid = cl_id_info.get_child_content('volume-uuid') - clone_id = cl_id_info.get_child_content('clone-op-id') - if vol_uuid: - self._check_clone_status(clone_id, vol_uuid, name, new_name) - - def _check_clone_status(self, clone_id, vol_uuid, name, new_name): - """Checks for the job till completed.""" - clone_status = netapp_api.NaElement('clone-list-status') - cl_id = netapp_api.NaElement('clone-id') - clone_status.add_child_elem(cl_id) - cl_id.add_node_with_children('clone-id-info', - **{'clone-op-id': clone_id, - 'volume-uuid': vol_uuid}) - running = True - clone_ops_info = None - while running: - result = self.connection.invoke_successfully(clone_status, True) - status = result.get_child_by_name('status') - ops_info = status.get_children() - if ops_info: - for info in ops_info: - if info.get_child_content('clone-state') == 'running': - time.sleep(1) - break - else: - running = False - clone_ops_info = info - break - else: - if clone_ops_info: - fmt = {'name': name, 'new_name': new_name} - if clone_ops_info.get_child_content('clone-state')\ - == 'completed': - LOG.debug("Clone operation with src %(name)s" - " and dest %(new_name)s completed", fmt) - else: - LOG.debug("Clone operation with src %(name)s" - " and dest %(new_name)s failed", fmt) - raise netapp_api.NaApiError( - clone_ops_info.get_child_content('error'), - clone_ops_info.get_child_content('reason')) - - def get_lun_by_args(self, **args): - """Retrieves LUNs with specified args.""" - lun_info = netapp_api.NaElement.create_node_with_children( - 'lun-list-info', **args) - result = self.connection.invoke_successfully(lun_info, True) - luns = result.get_child_by_name('luns') - return luns.get_children() - - def get_filer_volumes(self, volume=None): - """Returns list of filer volumes in API format.""" - vol_request = netapp_api.NaElement('volume-list-info') - res = self.connection.invoke_successfully(vol_request, True) - volumes = res.get_child_by_name('volumes') - if volumes: - return volumes.get_children() - return [] - - def get_lun_map(self, path): - lun_map_list = netapp_api.NaElement.create_node_with_children( - 'lun-map-list-info', - **{'path': path}) - return self.connection.invoke_successfully(lun_map_list, True) - - def set_space_reserve(self, path, enable): - """Sets the space reserve info.""" - space_res = netapp_api.NaElement.create_node_with_children( - 'lun-set-space-reservation-info', - **{'path': path, 'enable': enable}) - self.connection.invoke_successfully(space_res, True) - - def get_actual_path_for_export(self, export_path): - """Gets the actual path on the filer for export path.""" - storage_path = netapp_api.NaElement.create_node_with_children( - 'nfs-exportfs-storage-path', **{'pathname': export_path}) - result = self.connection.invoke_successfully(storage_path, - enable_tunneling=True) - if result.get_child_content('actual-pathname'): - return result.get_child_content('actual-pathname') - raise exception.NotFound(_('No storage path found for export path %s') - % (export_path)) - - def clone_file(self, src_path, dest_path, source_snapshot=None): - LOG.debug("Cloning with src %(src_path)s, dest %(dest_path)s", - {'src_path': src_path, 'dest_path': dest_path}) - zapi_args = { - 'source-path': src_path, - 'destination-path': dest_path, - 'no-snap': 'true', - } - if source_snapshot: - zapi_args['snapshot-name'] = source_snapshot - - clone_start = netapp_api.NaElement.create_node_with_children( - 'clone-start', **zapi_args) - result = self.connection.invoke_successfully(clone_start, - enable_tunneling=True) - clone_id_el = result.get_child_by_name('clone-id') - cl_id_info = clone_id_el.get_child_by_name('clone-id-info') - vol_uuid = cl_id_info.get_child_content('volume-uuid') - clone_id = cl_id_info.get_child_content('clone-op-id') - - if vol_uuid: - try: - self._wait_for_clone_finish(clone_id, vol_uuid) - except netapp_api.NaApiError as e: - if e.code != 'UnknownCloneId': - self._clear_clone(clone_id) - raise - - def _wait_for_clone_finish(self, clone_op_id, vol_uuid): - """Waits till a clone operation is complete or errored out.""" - clone_ls_st = netapp_api.NaElement('clone-list-status') - clone_id = netapp_api.NaElement('clone-id') - clone_ls_st.add_child_elem(clone_id) - clone_id.add_node_with_children('clone-id-info', - **{'clone-op-id': clone_op_id, - 'volume-uuid': vol_uuid}) - task_running = True - while task_running: - result = self.connection.invoke_successfully(clone_ls_st, - enable_tunneling=True) - status = result.get_child_by_name('status') - ops_info = status.get_children() - if ops_info: - state = ops_info[0].get_child_content('clone-state') - if state == 'completed': - task_running = False - elif state == 'failed': - code = ops_info[0].get_child_content('error') - reason = ops_info[0].get_child_content('reason') - raise netapp_api.NaApiError(code, reason) - else: - time.sleep(1) - else: - raise netapp_api.NaApiError( - 'UnknownCloneId', - 'No clone operation for clone id %s found on the filer' - % (clone_id)) - - def _clear_clone(self, clone_id): - """Clear the clone information. - - Invoke this in case of failed clone. - """ - - clone_clear = netapp_api.NaElement.create_node_with_children( - 'clone-clear', - **{'clone-id': clone_id}) - retry = 3 - while retry: - try: - self.connection.invoke_successfully(clone_clear, - enable_tunneling=True) - break - except netapp_api.NaApiError: - # Filer might be rebooting - time.sleep(5) - retry = retry - 1 - - def get_file_usage(self, path): - """Gets the file unique bytes.""" - LOG.debug('Getting file usage for %s', path) - file_use = netapp_api.NaElement.create_node_with_children( - 'file-usage-get', **{'path': path}) - res = self.connection.invoke_successfully(file_use) - bytes = res.get_child_content('unique-bytes') - LOG.debug('file-usage for path %(path)s is %(bytes)s', - {'path': path, 'bytes': bytes}) - return bytes - - def get_ifconfig(self): - ifconfig = netapp_api.NaElement('net-ifconfig-get') - return self.connection.invoke_successfully(ifconfig) - - def get_flexvol_capacity(self, flexvol_path): - """Gets total capacity and free capacity, in bytes, of the flexvol.""" - - api_args = {'volume': flexvol_path, 'verbose': 'false'} - - result = self.send_request('volume-list-info', api_args) - - flexvol_info_list = result.get_child_by_name('volumes') - flexvol_info = flexvol_info_list.get_children()[0] - - size_total = float(flexvol_info.get_child_content('size-total')) - size_available = float( - flexvol_info.get_child_content('size-available')) - - return { - 'size-total': size_total, - 'size-available': size_available, - } - - def get_performance_instance_names(self, object_name): - """Get names of performance instances for a node.""" - - api_args = {'objectname': object_name} - - result = self.send_request('perf-object-instance-list-info', - api_args, - enable_tunneling=False) - - instance_names = [] - - instances = result.get_child_by_name( - 'instances') or netapp_api.NaElement('None') - - for instance_info in instances.get_children(): - instance_names.append(instance_info.get_child_content('name')) - - return instance_names - - def get_performance_counters(self, object_name, instance_names, - counter_names): - """Gets or or more 7-mode Data ONTAP performance counters.""" - - api_args = { - 'objectname': object_name, - 'instances': [ - {'instance': instance} for instance in instance_names - ], - 'counters': [ - {'counter': counter} for counter in counter_names - ], - } - - result = self.send_request('perf-object-get-instances', - api_args, - enable_tunneling=False) - - counter_data = [] - - timestamp = result.get_child_content('timestamp') - - instances = result.get_child_by_name( - 'instances') or netapp_api.NaElement('None') - for instance in instances.get_children(): - - instance_name = instance.get_child_content('name') - - counters = instance.get_child_by_name( - 'counters') or netapp_api.NaElement('None') - for counter in counters.get_children(): - - counter_name = counter.get_child_content('name') - counter_value = counter.get_child_content('value') - - counter_data.append({ - 'instance-name': instance_name, - 'timestamp': timestamp, - counter_name: counter_value, - }) - - return counter_data - - def get_system_name(self): - """Get the name of the 7-mode Data ONTAP controller.""" - - result = self.send_request('system-get-info', - {}, - enable_tunneling=False) - - system_info = result.get_child_by_name('system-info') - system_name = system_info.get_child_content('system-name') - return system_name - - def get_snapshot(self, volume_name, snapshot_name): - """Gets a single snapshot.""" - snapshot_list_info = netapp_api.NaElement('snapshot-list-info') - snapshot_list_info.add_new_child('volume', volume_name) - result = self.connection.invoke_successfully(snapshot_list_info, - enable_tunneling=True) - - snapshots = result.get_child_by_name('snapshots') - if not snapshots: - msg = _('No snapshots could be found on volume %s.') - raise exception.VolumeBackendAPIException(data=msg % volume_name) - snapshot_list = snapshots.get_children() - snapshot = None - for s in snapshot_list: - if (snapshot_name == s.get_child_content('name')) and (snapshot - is None): - snapshot = { - 'name': s.get_child_content('name'), - 'volume': s.get_child_content('volume'), - 'busy': strutils.bool_from_string( - s.get_child_content('busy')), - } - snapshot_owners_list = s.get_child_by_name( - 'snapshot-owners-list') or netapp_api.NaElement('none') - snapshot_owners = set([snapshot_owner.get_child_content( - 'owner') for snapshot_owner in - snapshot_owners_list.get_children()]) - snapshot['owners'] = snapshot_owners - elif (snapshot_name == s.get_child_content('name')) and ( - snapshot is not None): - msg = _('Could not find unique snapshot %(snap)s on ' - 'volume %(vol)s.') - msg_args = {'snap': snapshot_name, 'vol': volume_name} - raise exception.VolumeBackendAPIException(data=msg % msg_args) - if not snapshot: - raise exception.SnapshotNotFound(snapshot_id=snapshot_name) - - return snapshot - - def get_snapshots_marked_for_deletion(self, volume_list=None): - """Get a list of snapshots marked for deletion.""" - snapshots = [] - - for volume_name in volume_list: - api_args = { - 'target-name': volume_name, - 'target-type': 'volume', - 'terse': 'true', - } - result = self.send_request('snapshot-list-info', api_args) - snapshots.extend( - self._parse_snapshot_list_info_result(result, volume_name)) - - return snapshots - - def _parse_snapshot_list_info_result(self, result, volume_name): - snapshots = [] - snapshots_elem = result.get_child_by_name( - 'snapshots') or netapp_api.NaElement('none') - snapshot_info_list = snapshots_elem.get_children() - for snapshot_info in snapshot_info_list: - snapshot_name = snapshot_info.get_child_content('name') - snapshot_busy = strutils.bool_from_string( - snapshot_info.get_child_content('busy')) - snapshot_id = snapshot_info.get_child_content( - 'snapshot-instance-uuid') - if (not snapshot_busy and - snapshot_name.startswith(client_base.DELETED_PREFIX)): - snapshots.append({ - 'name': snapshot_name, - 'instance_id': snapshot_id, - 'volume_name': volume_name, - }) - - return snapshots diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_base.py b/cinder/volume/drivers/netapp/dataontap/client/client_base.py deleted file mode 100644 index 1d24c6d1a..000000000 --- a/cinder/volume/drivers/netapp/dataontap/client/client_base.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo_log import log as logging -from oslo_utils import excutils - -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp import utils as na_utils - - -LOG = logging.getLogger(__name__) - -DELETED_PREFIX = 'deleted_cinder_' - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class Client(object): - - def __init__(self, **kwargs): - host = kwargs['hostname'] - username = kwargs['username'] - password = kwargs['password'] - self.connection = netapp_api.NaServer( - host=host, - transport_type=kwargs['transport_type'], - port=kwargs['port'], - username=username, - password=password) - - self.ssh_client = self._init_ssh_client(host, username, password) - - def _init_ssh_client(self, host, username, password): - return netapp_api.SSHUtil( - host=host, - username=username, - password=password) - - def _init_features(self): - """Set up the repository of available Data ONTAP features.""" - self.features = na_utils.Features() - - def get_ontapi_version(self, cached=True): - """Gets the supported ontapi version.""" - - if cached: - return self.connection.get_api_version() - - ontapi_version = netapp_api.NaElement('system-get-ontapi-version') - res = self.connection.invoke_successfully(ontapi_version, False) - major = res.get_child_content('major-version') - minor = res.get_child_content('minor-version') - return major, minor - - def _strip_xml_namespace(self, string): - if string.startswith('{') and '}' in string: - return string.split('}', 1)[1] - return string - - def check_is_naelement(self, elem): - """Checks if object is instance of NaElement.""" - if not isinstance(elem, netapp_api.NaElement): - raise ValueError('Expects NaElement') - - def send_request(self, api_name, api_args=None, enable_tunneling=True): - """Sends request to Ontapi.""" - request = netapp_api.NaElement(api_name) - if api_args: - request.translate_struct(api_args) - return self.connection.invoke_successfully(request, enable_tunneling) - - def create_lun(self, volume_name, lun_name, size, metadata, - qos_policy_group_name=None): - """Issues API request for creating LUN on volume.""" - - path = '/vol/%s/%s' % (volume_name, lun_name) - lun_create = netapp_api.NaElement.create_node_with_children( - 'lun-create-by-size', - **{'path': path, 'size': six.text_type(size), - 'ostype': metadata['OsType'], - 'space-reservation-enabled': metadata['SpaceReserved']}) - if qos_policy_group_name: - lun_create.add_new_child('qos-policy-group', qos_policy_group_name) - - try: - self.connection.invoke_successfully(lun_create, True) - except netapp_api.NaApiError as ex: - with excutils.save_and_reraise_exception(): - LOG.error("Error provisioning volume %(lun_name)s on " - "%(volume_name)s. Details: %(ex)s", - {'lun_name': lun_name, - 'volume_name': volume_name, - 'ex': ex}) - - def destroy_lun(self, path, force=True): - """Destroys the LUN at the path.""" - lun_destroy = netapp_api.NaElement.create_node_with_children( - 'lun-destroy', - **{'path': path}) - if force: - lun_destroy.add_new_child('force', 'true') - self.connection.invoke_successfully(lun_destroy, True) - seg = path.split("/") - LOG.debug("Destroyed LUN %s", seg[-1]) - - def map_lun(self, path, igroup_name, lun_id=None): - """Maps LUN to the initiator and returns LUN id assigned.""" - lun_map = netapp_api.NaElement.create_node_with_children( - 'lun-map', **{'path': path, - 'initiator-group': igroup_name}) - if lun_id: - lun_map.add_new_child('lun-id', lun_id) - try: - result = self.connection.invoke_successfully(lun_map, True) - return result.get_child_content('lun-id-assigned') - except netapp_api.NaApiError as e: - code = e.code - message = e.message - LOG.warning('Error mapping LUN. Code :%(code)s, Message: ' - '%(message)s', {'code': code, 'message': message}) - raise - - def unmap_lun(self, path, igroup_name): - """Unmaps a LUN from given initiator.""" - lun_unmap = netapp_api.NaElement.create_node_with_children( - 'lun-unmap', - **{'path': path, 'initiator-group': igroup_name}) - try: - self.connection.invoke_successfully(lun_unmap, True) - except netapp_api.NaApiError as e: - exc_info = sys.exc_info() - LOG.warning("Error unmapping LUN. Code :%(code)s, Message: " - "%(message)s", {'code': e.code, - 'message': e.message}) - # if the LUN is already unmapped - if e.code == '13115' or e.code == '9016': - pass - else: - six.reraise(*exc_info) - - def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): - """Creates igroup with specified args.""" - igroup_create = netapp_api.NaElement.create_node_with_children( - 'igroup-create', - **{'initiator-group-name': igroup, - 'initiator-group-type': igroup_type, - 'os-type': os_type}) - self.connection.invoke_successfully(igroup_create, True) - - def add_igroup_initiator(self, igroup, initiator): - """Adds initiators to the specified igroup.""" - igroup_add = netapp_api.NaElement.create_node_with_children( - 'igroup-add', - **{'initiator-group-name': igroup, - 'initiator': initiator}) - self.connection.invoke_successfully(igroup_add, True) - - def do_direct_resize(self, path, new_size_bytes, force=True): - """Resize the LUN.""" - seg = path.split("/") - LOG.info("Resizing LUN %s directly to new size.", seg[-1]) - lun_resize = netapp_api.NaElement.create_node_with_children( - 'lun-resize', - **{'path': path, - 'size': new_size_bytes}) - if force: - lun_resize.add_new_child('force', 'true') - self.connection.invoke_successfully(lun_resize, True) - - def get_lun_geometry(self, path): - """Gets the LUN geometry.""" - geometry = {} - lun_geo = netapp_api.NaElement("lun-get-geometry") - lun_geo.add_new_child('path', path) - try: - result = self.connection.invoke_successfully(lun_geo, True) - geometry['size'] = result.get_child_content("size") - geometry['bytes_per_sector'] =\ - result.get_child_content("bytes-per-sector") - geometry['sectors_per_track'] =\ - result.get_child_content("sectors-per-track") - geometry['tracks_per_cylinder'] =\ - result.get_child_content("tracks-per-cylinder") - geometry['cylinders'] =\ - result.get_child_content("cylinders") - geometry['max_resize'] =\ - result.get_child_content("max-resize-size") - except Exception as e: - LOG.error("LUN %(path)s geometry failed. Message - %(msg)s", - {'path': path, 'msg': six.text_type(e)}) - return geometry - - def get_volume_options(self, volume_name): - """Get the value for the volume option.""" - opts = [] - vol_option_list = netapp_api.NaElement("volume-options-list-info") - vol_option_list.add_new_child('volume', volume_name) - result = self.connection.invoke_successfully(vol_option_list, True) - options = result.get_child_by_name("options") - if options: - opts = options.get_children() - return opts - - def move_lun(self, path, new_path): - """Moves the LUN at path to new path.""" - seg = path.split("/") - new_seg = new_path.split("/") - LOG.debug("Moving LUN %(name)s to %(new_name)s.", - {'name': seg[-1], 'new_name': new_seg[-1]}) - lun_move = netapp_api.NaElement("lun-move") - lun_move.add_new_child("path", path) - lun_move.add_new_child("new-path", new_path) - self.connection.invoke_successfully(lun_move, True) - - def get_iscsi_target_details(self): - """Gets the iSCSI target portal details.""" - raise NotImplementedError() - - def get_fc_target_wwpns(self): - """Gets the FC target details.""" - raise NotImplementedError() - - def get_iscsi_service_details(self): - """Returns iscsi iqn.""" - raise NotImplementedError() - - def check_iscsi_initiator_exists(self, iqn): - """Returns True if initiator exists.""" - raise NotImplementedError() - - def set_iscsi_chap_authentication(self, iqn, username, password): - """Provides NetApp host's CHAP credentials to the backend.""" - raise NotImplementedError() - - def get_lun_list(self): - """Gets the list of LUNs on filer.""" - raise NotImplementedError() - - def get_igroup_by_initiators(self, initiator_list): - """Get igroups exactly matching a set of initiators.""" - raise NotImplementedError() - - def _has_luns_mapped_to_initiator(self, initiator): - """Checks whether any LUNs are mapped to the given initiator.""" - lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info') - lun_list_api.add_new_child('initiator', initiator) - result = self.connection.invoke_successfully(lun_list_api, True) - lun_maps_container = result.get_child_by_name( - 'lun-maps') or netapp_api.NaElement('none') - return len(lun_maps_container.get_children()) > 0 - - def has_luns_mapped_to_initiators(self, initiator_list): - """Checks whether any LUNs are mapped to the given initiator(s).""" - for initiator in initiator_list: - if self._has_luns_mapped_to_initiator(initiator): - return True - return False - - def get_lun_by_args(self, **args): - """Retrieves LUNs with specified args.""" - raise NotImplementedError() - - def get_performance_counter_info(self, object_name, counter_name): - """Gets info about one or more Data ONTAP performance counters.""" - - api_args = {'objectname': object_name} - result = self.send_request('perf-object-counter-list-info', - api_args, - enable_tunneling=False) - - counters = result.get_child_by_name( - 'counters') or netapp_api.NaElement('None') - - for counter in counters.get_children(): - - if counter.get_child_content('name') == counter_name: - - labels = [] - label_list = counter.get_child_by_name( - 'labels') or netapp_api.NaElement('None') - for label in label_list.get_children(): - labels.extend(label.get_content().split(',')) - base_counter = counter.get_child_content('base-counter') - - return { - 'name': counter_name, - 'labels': labels, - 'base-counter': base_counter, - } - else: - raise exception.NotFound(_('Counter %s not found') % counter_name) - - def delete_snapshot(self, volume_name, snapshot_name): - """Deletes a volume snapshot.""" - api_args = {'volume': volume_name, 'snapshot': snapshot_name} - self.send_request('snapshot-delete', api_args) - - def create_cg_snapshot(self, volume_names, snapshot_name): - """Creates a consistency group snapshot out of one or more flexvols. - - ONTAP requires an invocation of cg-start to first fence off the - flexvols to be included in the snapshot. If cg-start returns - success, a cg-commit must be executed to finalized the snapshot and - unfence the flexvols. - """ - cg_id = self._start_cg_snapshot(volume_names, snapshot_name) - if not cg_id: - msg = _('Could not start consistency group snapshot %s.') - raise exception.VolumeBackendAPIException(data=msg % snapshot_name) - self._commit_cg_snapshot(cg_id) - - def _start_cg_snapshot(self, volume_names, snapshot_name): - snapshot_init = { - 'snapshot': snapshot_name, - 'timeout': 'relaxed', - 'volumes': [ - {'volume-name': volume_name} for volume_name in volume_names - ], - } - result = self.send_request('cg-start', snapshot_init) - return result.get_child_content('cg-id') - - def _commit_cg_snapshot(self, cg_id): - snapshot_commit = {'cg-id': cg_id} - self.send_request('cg-commit', snapshot_commit) - - def get_snapshot(self, volume_name, snapshot_name): - """Gets a single snapshot.""" - raise NotImplementedError() - - @utils.retry(exception.SnapshotIsBusy) - def wait_for_busy_snapshot(self, flexvol, snapshot_name): - """Checks for and handles a busy snapshot. - - If a snapshot is busy, for reasons other than cloning, an exception is - raised immediately. Otherwise, wait for a period of time for the clone - dependency to finish before giving up. If the snapshot is not busy then - no action is taken and the method exits. - """ - snapshot = self.get_snapshot(flexvol, snapshot_name) - if not snapshot['busy']: - LOG.debug("Backing consistency group snapshot %s available for " - "deletion.", snapshot_name) - return - else: - LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting " - "for volume clone dependency to clear.", - {"snap": snapshot_name, "vol": flexvol}) - raise exception.SnapshotIsBusy(snapshot_name=snapshot_name) - - def mark_snapshot_for_deletion(self, volume, snapshot_name): - """Mark snapshot for deletion by renaming snapshot.""" - return self.rename_snapshot( - volume, snapshot_name, DELETED_PREFIX + snapshot_name) - - def rename_snapshot(self, volume, current_name, new_name): - """Renames a snapshot.""" - api_args = { - 'volume': volume, - 'current-name': current_name, - 'new-name': new_name, - } - return self.send_request('snapshot-rename', api_args) diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py deleted file mode 100644 index a3aa786ae..000000000 --- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py +++ /dev/null @@ -1,2271 +0,0 @@ -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# Copyright (c) 2017 Jose Porrua. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import math -import re - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.client import client_base -from cinder.volume.drivers.netapp import utils as na_utils - -from oslo_utils import strutils - - -LOG = logging.getLogger(__name__) -DEFAULT_MAX_PAGE_LENGTH = 50 - - -@six.add_metaclass(utils.TraceWrapperMetaclass) -class Client(client_base.Client): - - def __init__(self, **kwargs): - super(Client, self).__init__(**kwargs) - self.vserver = kwargs.get('vserver', None) - self.connection.set_vserver(self.vserver) - - # Default values to run first api - self.connection.set_api_version(1, 15) - (major, minor) = self.get_ontapi_version(cached=False) - self.connection.set_api_version(major, minor) - self._init_features() - - def _init_features(self): - super(Client, self)._init_features() - - ontapi_version = self.get_ontapi_version() # major, minor - - ontapi_1_20 = ontapi_version >= (1, 20) - ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) - ontapi_1_30 = ontapi_version >= (1, 30) - ontapi_1_100 = ontapi_version >= (1, 100) - ontapi_1_1xx = (1, 100) <= ontapi_version < (1, 200) - - self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) - self.features.add_feature('USER_CAPABILITY_LIST', - supported=ontapi_1_20) - self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) - self.features.add_feature('CLONE_SPLIT_STATUS', supported=ontapi_1_30) - self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30) - self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', - supported=ontapi_1_30) - self.features.add_feature('ADVANCED_DISK_PARTITIONING', - supported=ontapi_1_30) - self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontapi_1_100) - self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30) - self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontapi_1_1xx) - - def _invoke_vserver_api(self, na_element, vserver): - server = copy.copy(self.connection) - server.set_vserver(vserver) - result = server.invoke_successfully(na_element, True) - return result - - def _has_records(self, api_result_element): - num_records = api_result_element.get_child_content('num-records') - return bool(num_records and '0' != num_records) - - def _get_record_count(self, api_result_element): - try: - return int(api_result_element.get_child_content('num-records')) - except TypeError: - msg = _('Missing record count for NetApp iterator API invocation.') - raise exception.NetAppDriverException(msg) - - def set_vserver(self, vserver): - self.vserver = vserver - self.connection.set_vserver(vserver) - - def send_iter_request(self, api_name, api_args=None, enable_tunneling=True, - max_page_length=DEFAULT_MAX_PAGE_LENGTH): - """Invoke an iterator-style getter API.""" - - if not api_args: - api_args = {} - - api_args['max-records'] = max_page_length - - # Get first page - result = self.send_request( - api_name, api_args, enable_tunneling=enable_tunneling) - - # Most commonly, we can just return here if there is no more data - next_tag = result.get_child_content('next-tag') - if not next_tag: - return result - - # Ensure pagination data is valid and prepare to store remaining pages - num_records = self._get_record_count(result) - attributes_list = result.get_child_by_name('attributes-list') - if not attributes_list: - msg = _('Missing attributes list for API %s.') % api_name - raise exception.NetAppDriverException(msg) - - # Get remaining pages, saving data into first page - while next_tag is not None: - next_api_args = copy.deepcopy(api_args) - next_api_args['tag'] = next_tag - next_result = self.send_request( - api_name, next_api_args, enable_tunneling=enable_tunneling) - - next_attributes_list = next_result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - for record in next_attributes_list.get_children(): - attributes_list.add_child_elem(record) - - num_records += self._get_record_count(next_result) - next_tag = next_result.get_child_content('next-tag') - - result.get_child_by_name('num-records').set_content( - six.text_type(num_records)) - result.get_child_by_name('next-tag').set_content('') - return result - - def list_vservers(self, vserver_type='data'): - """Get the names of vservers present, optionally filtered by type.""" - query = { - 'vserver-info': { - 'vserver-type': vserver_type, - } - } if vserver_type else None - - api_args = { - 'desired-attributes': { - 'vserver-info': { - 'vserver-name': None, - }, - }, - } - if query: - api_args['query'] = query - - result = self.send_iter_request('vserver-get-iter', api_args, - enable_tunneling=False) - vserver_info_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - return [vserver_info.get_child_content('vserver-name') - for vserver_info in vserver_info_list.get_children()] - - def _get_ems_log_destination_vserver(self): - """Returns the best vserver destination for EMS messages.""" - major, minor = self.get_ontapi_version(cached=True) - - if (major > 1) or (major == 1 and minor > 15): - # Prefer admin Vserver (requires cluster credentials). - admin_vservers = self.list_vservers(vserver_type='admin') - if admin_vservers: - return admin_vservers[0] - - # Fall back to data Vserver. - data_vservers = self.list_vservers(vserver_type='data') - if data_vservers: - return data_vservers[0] - - # If older API version, or no other Vservers found, use node Vserver. - node_vservers = self.list_vservers(vserver_type='node') - if node_vservers: - return node_vservers[0] - - raise exception.NotFound("No Vserver found to receive EMS messages.") - - def send_ems_log_message(self, message_dict): - """Sends a message to the Data ONTAP EMS log.""" - - # NOTE(cknight): Cannot use deepcopy on the connection context - node_client = copy.copy(self) - node_client.connection = copy.copy(self.connection) - node_client.connection.set_timeout(25) - - try: - node_client.set_vserver(self._get_ems_log_destination_vserver()) - node_client.send_request('ems-autosupport-log', message_dict) - LOG.debug('EMS executed successfully.') - except netapp_api.NaApiError as e: - LOG.warning('Failed to invoke EMS. %s', e) - - def get_iscsi_target_details(self): - """Gets the iSCSI target portal details.""" - iscsi_if_iter = netapp_api.NaElement('iscsi-interface-get-iter') - result = self.connection.invoke_successfully(iscsi_if_iter, True) - tgt_list = [] - num_records = result.get_child_content('num-records') - if num_records and int(num_records) >= 1: - attr_list = result.get_child_by_name('attributes-list') - iscsi_if_list = attr_list.get_children() - for iscsi_if in iscsi_if_list: - d = dict() - d['address'] = iscsi_if.get_child_content('ip-address') - d['port'] = iscsi_if.get_child_content('ip-port') - d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') - d['interface-enabled'] = iscsi_if.get_child_content( - 'is-interface-enabled') - tgt_list.append(d) - return tgt_list - - def set_iscsi_chap_authentication(self, iqn, username, password): - """Provides NetApp host's CHAP credentials to the backend.""" - initiator_exists = self.check_iscsi_initiator_exists(iqn) - - command_template = ('iscsi security %(mode)s -vserver %(vserver)s ' - '-initiator-name %(iqn)s -auth-type CHAP ' - '-user-name %(username)s') - - if initiator_exists: - LOG.debug('Updating CHAP authentication for %(iqn)s.', - {'iqn': iqn}) - command = command_template % { - 'mode': 'modify', - 'vserver': self.vserver, - 'iqn': iqn, - 'username': username, - } - else: - LOG.debug('Adding initiator %(iqn)s with CHAP authentication.', - {'iqn': iqn}) - command = command_template % { - 'mode': 'create', - 'vserver': self.vserver, - 'iqn': iqn, - 'username': username, - } - - try: - with self.ssh_client.ssh_connect_semaphore: - ssh_pool = self.ssh_client.ssh_pool - with ssh_pool.item() as ssh: - self.ssh_client.execute_command_with_prompt(ssh, - command, - 'Password:', - password) - except Exception as e: - msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.' - ' Details: %(ex)s') % { - 'iqn': iqn, - 'ex': e, - } - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def check_iscsi_initiator_exists(self, iqn): - """Returns True if initiator exists.""" - initiator_exists = True - try: - auth_list = netapp_api.NaElement('iscsi-initiator-get-auth') - auth_list.add_new_child('initiator', iqn) - self.connection.invoke_successfully(auth_list, True) - except netapp_api.NaApiError: - initiator_exists = False - - return initiator_exists - - def get_fc_target_wwpns(self): - """Gets the FC target details.""" - wwpns = [] - port_name_list_api = netapp_api.NaElement('fcp-port-name-get-iter') - port_name_list_api.add_new_child('max-records', '100') - result = self.connection.invoke_successfully(port_name_list_api, True) - num_records = result.get_child_content('num-records') - if num_records and int(num_records) >= 1: - for port_name_info in result.get_child_by_name( - 'attributes-list').get_children(): - - if port_name_info.get_child_content('is-used') != 'true': - continue - - wwpn = port_name_info.get_child_content('port-name').lower() - wwpns.append(wwpn) - - return wwpns - - def get_iscsi_service_details(self): - """Returns iscsi iqn.""" - iscsi_service_iter = netapp_api.NaElement('iscsi-service-get-iter') - result = self.connection.invoke_successfully(iscsi_service_iter, True) - if result.get_child_content('num-records') and\ - int(result.get_child_content('num-records')) >= 1: - attr_list = result.get_child_by_name('attributes-list') - iscsi_service = attr_list.get_child_by_name('iscsi-service-info') - return iscsi_service.get_child_content('node-name') - LOG.debug('No iSCSI service found for vserver %s', self.vserver) - return None - - def get_lun_list(self): - """Gets the list of LUNs on filer. - - Gets the LUNs from cluster with vserver. - """ - - luns = [] - tag = None - while True: - api = netapp_api.NaElement('lun-get-iter') - api.add_new_child('max-records', '100') - if tag: - api.add_new_child('tag', tag, True) - lun_info = netapp_api.NaElement('lun-info') - lun_info.add_new_child('vserver', self.vserver) - query = netapp_api.NaElement('query') - query.add_child_elem(lun_info) - api.add_child_elem(query) - result = self.connection.invoke_successfully(api, True) - if result.get_child_by_name('num-records') and\ - int(result.get_child_content('num-records')) >= 1: - attr_list = result.get_child_by_name('attributes-list') - luns.extend(attr_list.get_children()) - tag = result.get_child_content('next-tag') - if tag is None: - break - return luns - - def get_lun_map(self, path): - """Gets the LUN map by LUN path.""" - tag = None - map_list = [] - while True: - lun_map_iter = netapp_api.NaElement('lun-map-get-iter') - lun_map_iter.add_new_child('max-records', '100') - if tag: - lun_map_iter.add_new_child('tag', tag, True) - query = netapp_api.NaElement('query') - lun_map_iter.add_child_elem(query) - query.add_node_with_children('lun-map-info', **{'path': path}) - result = self.connection.invoke_successfully(lun_map_iter, True) - tag = result.get_child_content('next-tag') - if result.get_child_content('num-records') and \ - int(result.get_child_content('num-records')) >= 1: - attr_list = result.get_child_by_name('attributes-list') - lun_maps = attr_list.get_children() - for lun_map in lun_maps: - lun_m = dict() - lun_m['initiator-group'] = lun_map.get_child_content( - 'initiator-group') - lun_m['lun-id'] = lun_map.get_child_content('lun-id') - lun_m['vserver'] = lun_map.get_child_content('vserver') - map_list.append(lun_m) - if tag is None: - break - return map_list - - def _get_igroup_by_initiator_query(self, initiator, tag): - igroup_get_iter = netapp_api.NaElement('igroup-get-iter') - igroup_get_iter.add_new_child('max-records', '100') - if tag: - igroup_get_iter.add_new_child('tag', tag, True) - - query = netapp_api.NaElement('query') - igroup_info = netapp_api.NaElement('initiator-group-info') - query.add_child_elem(igroup_info) - igroup_info.add_new_child('vserver', self.vserver) - initiators = netapp_api.NaElement('initiators') - igroup_info.add_child_elem(initiators) - igroup_get_iter.add_child_elem(query) - initiators.add_node_with_children( - 'initiator-info', **{'initiator-name': initiator}) - - # limit results to just the attributes of interest - desired_attrs = netapp_api.NaElement('desired-attributes') - desired_igroup_info = netapp_api.NaElement('initiator-group-info') - desired_igroup_info.add_node_with_children( - 'initiators', **{'initiator-info': None}) - desired_igroup_info.add_new_child('vserver', None) - desired_igroup_info.add_new_child('initiator-group-name', None) - desired_igroup_info.add_new_child('initiator-group-type', None) - desired_igroup_info.add_new_child('initiator-group-os-type', None) - desired_attrs.add_child_elem(desired_igroup_info) - igroup_get_iter.add_child_elem(desired_attrs) - - return igroup_get_iter - - def get_igroup_by_initiators(self, initiator_list): - """Get igroups exactly matching a set of initiators.""" - tag = None - igroup_list = [] - if not initiator_list: - return igroup_list - - initiator_set = set(initiator_list) - - while True: - # C-mode getter APIs can't do an 'and' query, so match the first - # initiator (which will greatly narrow the search results) and - # filter the rest in this method. - query = self._get_igroup_by_initiator_query(initiator_list[0], tag) - result = self.connection.invoke_successfully(query, True) - - tag = result.get_child_content('next-tag') - num_records = result.get_child_content('num-records') - if num_records and int(num_records) >= 1: - - for igroup_info in result.get_child_by_name( - 'attributes-list').get_children(): - - initiator_set_for_igroup = set() - for initiator_info in igroup_info.get_child_by_name( - 'initiators').get_children(): - - initiator_set_for_igroup.add( - initiator_info.get_child_content('initiator-name')) - - if initiator_set == initiator_set_for_igroup: - igroup = {'initiator-group-os-type': - igroup_info.get_child_content( - 'initiator-group-os-type'), - 'initiator-group-type': - igroup_info.get_child_content( - 'initiator-group-type'), - 'initiator-group-name': - igroup_info.get_child_content( - 'initiator-group-name')} - igroup_list.append(igroup) - - if tag is None: - break - - return igroup_list - - def clone_lun(self, volume, name, new_name, space_reserved='true', - qos_policy_group_name=None, src_block=0, dest_block=0, - block_count=0, source_snapshot=None, is_snapshot=False): - # zAPI can only handle 2^24 blocks per range - bc_limit = 2 ** 24 # 8GB - # zAPI can only handle 32 block ranges per call - br_limit = 32 - z_limit = br_limit * bc_limit # 256 GB - z_calls = int(math.ceil(block_count / float(z_limit))) - zbc = block_count - if z_calls == 0: - z_calls = 1 - for _call in range(0, z_calls): - if zbc > z_limit: - block_count = z_limit - zbc -= z_limit - else: - block_count = zbc - - zapi_args = { - 'volume': volume, - 'source-path': name, - 'destination-path': new_name, - 'space-reserve': space_reserved, - } - if source_snapshot: - zapi_args['snapshot-name'] = source_snapshot - if is_snapshot and self.features.BACKUP_CLONE_PARAM: - zapi_args['is-backup'] = 'true' - clone_create = netapp_api.NaElement.create_node_with_children( - 'clone-create', **zapi_args) - if qos_policy_group_name is not None: - clone_create.add_new_child('qos-policy-group-name', - qos_policy_group_name) - if block_count > 0: - block_ranges = netapp_api.NaElement("block-ranges") - segments = int(math.ceil(block_count / float(bc_limit))) - bc = block_count - for _segment in range(0, segments): - if bc > bc_limit: - block_count = bc_limit - bc -= bc_limit - else: - block_count = bc - block_range =\ - netapp_api.NaElement.create_node_with_children( - 'block-range', - **{'source-block-number': - six.text_type(src_block), - 'destination-block-number': - six.text_type(dest_block), - 'block-count': - six.text_type(block_count)}) - block_ranges.add_child_elem(block_range) - src_block += int(block_count) - dest_block += int(block_count) - clone_create.add_child_elem(block_ranges) - self.connection.invoke_successfully(clone_create, True) - - def get_lun_by_args(self, **args): - """Retrieves LUN with specified args.""" - lun_iter = netapp_api.NaElement('lun-get-iter') - lun_iter.add_new_child('max-records', '100') - query = netapp_api.NaElement('query') - lun_iter.add_child_elem(query) - query.add_node_with_children('lun-info', **args) - luns = self.connection.invoke_successfully(lun_iter, True) - attr_list = luns.get_child_by_name('attributes-list') - if not attr_list: - return [] - return attr_list.get_children() - - def file_assign_qos(self, flex_vol, qos_policy_group_name, file_path): - """Assigns the named QoS policy-group to a file.""" - api_args = { - 'volume': flex_vol, - 'qos-policy-group-name': qos_policy_group_name, - 'file': file_path, - 'vserver': self.vserver, - } - return self.send_request('file-assign-qos', api_args, False) - - def provision_qos_policy_group(self, qos_policy_group_info): - """Create QOS policy group on the backend if appropriate.""" - if qos_policy_group_info is None: - return - - # Legacy QOS uses externally provisioned QOS policy group, - # so we don't need to create one on the backend. - legacy = qos_policy_group_info.get('legacy') - if legacy is not None: - return - - spec = qos_policy_group_info.get('spec') - if spec is not None: - if not self.qos_policy_group_exists(spec['policy_name']): - self.qos_policy_group_create(spec['policy_name'], - spec['max_throughput']) - else: - self.qos_policy_group_modify(spec['policy_name'], - spec['max_throughput']) - - def qos_policy_group_exists(self, qos_policy_group_name): - """Checks if a QOS policy group exists.""" - api_args = { - 'query': { - 'qos-policy-group-info': { - 'policy-group': qos_policy_group_name, - }, - }, - 'desired-attributes': { - 'qos-policy-group-info': { - 'policy-group': None, - }, - }, - } - result = self.send_request('qos-policy-group-get-iter', - api_args, - False) - return self._has_records(result) - - def qos_policy_group_create(self, qos_policy_group_name, max_throughput): - """Creates a QOS policy group.""" - api_args = { - 'policy-group': qos_policy_group_name, - 'max-throughput': max_throughput, - 'vserver': self.vserver, - } - return self.send_request('qos-policy-group-create', api_args, False) - - def qos_policy_group_modify(self, qos_policy_group_name, max_throughput): - """Modifies a QOS policy group.""" - api_args = { - 'policy-group': qos_policy_group_name, - 'max-throughput': max_throughput, - } - return self.send_request('qos-policy-group-modify', api_args, False) - - def qos_policy_group_delete(self, qos_policy_group_name): - """Attempts to delete a QOS policy group.""" - api_args = {'policy-group': qos_policy_group_name} - return self.send_request('qos-policy-group-delete', api_args, False) - - def qos_policy_group_rename(self, qos_policy_group_name, new_name): - """Renames a QOS policy group.""" - api_args = { - 'policy-group-name': qos_policy_group_name, - 'new-name': new_name, - } - return self.send_request('qos-policy-group-rename', api_args, False) - - def mark_qos_policy_group_for_deletion(self, qos_policy_group_info): - """Do (soft) delete of backing QOS policy group for a cinder volume.""" - if qos_policy_group_info is None: - return - - spec = qos_policy_group_info.get('spec') - - # For cDOT we want to delete the QoS policy group that we created for - # this cinder volume. Because the QoS policy may still be "in use" - # after the zapi call to delete the volume itself returns successfully, - # we instead rename the QoS policy group using a specific pattern and - # later attempt on a best effort basis to delete any QoS policy groups - # matching that pattern. - if spec is not None: - current_name = spec['policy_name'] - new_name = client_base.DELETED_PREFIX + current_name - try: - self.qos_policy_group_rename(current_name, new_name) - except netapp_api.NaApiError as ex: - LOG.warning('Rename failure in cleanup of cDOT QOS policy ' - 'group %(name)s: %(ex)s', - {'name': current_name, 'ex': ex}) - - # Attempt to delete any QoS policies named "delete-openstack-*". - self.remove_unused_qos_policy_groups() - - def remove_unused_qos_policy_groups(self): - """Deletes all QOS policy groups that are marked for deletion.""" - api_args = { - 'query': { - 'qos-policy-group-info': { - 'policy-group': '%s*' % client_base.DELETED_PREFIX, - 'vserver': self.vserver, - } - }, - 'max-records': 3500, - 'continue-on-failure': 'true', - 'return-success-list': 'false', - 'return-failure-list': 'false', - } - - try: - self.send_request('qos-policy-group-delete-iter', api_args, False) - except netapp_api.NaApiError as ex: - msg = 'Could not delete QOS policy groups. Details: %(ex)s' - msg_args = {'ex': ex} - LOG.debug(msg, msg_args) - - def set_lun_qos_policy_group(self, path, qos_policy_group): - """Sets qos_policy_group on a LUN.""" - api_args = { - 'path': path, - 'qos-policy-group': qos_policy_group, - } - return self.send_request('lun-set-qos-policy-group', api_args) - - def get_if_info_by_ip(self, ip): - """Gets the network interface info by ip.""" - net_if_iter = netapp_api.NaElement('net-interface-get-iter') - net_if_iter.add_new_child('max-records', '10') - query = netapp_api.NaElement('query') - net_if_iter.add_child_elem(query) - query.add_node_with_children( - 'net-interface-info', - **{'address': na_utils.resolve_hostname(ip)}) - result = self.connection.invoke_successfully(net_if_iter, True) - num_records = result.get_child_content('num-records') - if num_records and int(num_records) >= 1: - attr_list = result.get_child_by_name('attributes-list') - return attr_list.get_children() - raise exception.NotFound( - _('No interface found on cluster for ip %s') % ip) - - def get_vol_by_junc_vserver(self, vserver, junction): - """Gets the volume by junction path and vserver.""" - vol_iter = netapp_api.NaElement('volume-get-iter') - vol_iter.add_new_child('max-records', '10') - query = netapp_api.NaElement('query') - vol_iter.add_child_elem(query) - vol_attrs = netapp_api.NaElement('volume-attributes') - query.add_child_elem(vol_attrs) - vol_attrs.add_node_with_children( - 'volume-id-attributes', - **{'junction-path': junction, - 'owning-vserver-name': vserver}) - des_attrs = netapp_api.NaElement('desired-attributes') - des_attrs.add_node_with_children('volume-attributes', - **{'volume-id-attributes': None}) - vol_iter.add_child_elem(des_attrs) - result = self._invoke_vserver_api(vol_iter, vserver) - num_records = result.get_child_content('num-records') - if num_records and int(num_records) >= 1: - attr_list = result.get_child_by_name('attributes-list') - vols = attr_list.get_children() - vol_id = vols[0].get_child_by_name('volume-id-attributes') - return vol_id.get_child_content('name') - msg_fmt = {'vserver': vserver, 'junction': junction} - raise exception.NotFound(_("No volume on cluster with vserver " - "%(vserver)s and junction path " - "%(junction)s ") % msg_fmt) - - def clone_file(self, flex_vol, src_path, dest_path, vserver, - dest_exists=False, source_snapshot=None, - is_snapshot=False): - """Clones file on vserver.""" - LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, " - "dest %(dest_path)s, vserver %(vserver)s," - "source_snapshot %(source_snapshot)s", - {'volume': flex_vol, 'src_path': src_path, - 'dest_path': dest_path, 'vserver': vserver, - 'source_snapshot': source_snapshot}) - zapi_args = { - 'volume': flex_vol, - 'source-path': src_path, - 'destination-path': dest_path, - } - if is_snapshot and self.features.BACKUP_CLONE_PARAM: - zapi_args['is-backup'] = 'true' - if source_snapshot: - zapi_args['snapshot-name'] = source_snapshot - clone_create = netapp_api.NaElement.create_node_with_children( - 'clone-create', **zapi_args) - major, minor = self.connection.get_api_version() - if major == 1 and minor >= 20 and dest_exists: - clone_create.add_new_child('destination-exists', 'true') - self._invoke_vserver_api(clone_create, vserver) - - def get_file_usage(self, path, vserver): - """Gets the file unique bytes.""" - LOG.debug('Getting file usage for %s', path) - file_use = netapp_api.NaElement.create_node_with_children( - 'file-usage-get', **{'path': path}) - res = self._invoke_vserver_api(file_use, vserver) - unique_bytes = res.get_child_content('unique-bytes') - LOG.debug('file-usage for path %(path)s is %(bytes)s', - {'path': path, 'bytes': unique_bytes}) - return unique_bytes - - def check_cluster_api(self, object_name, operation_name, api): - """Checks the availability of a cluster API. - - Returns True if the specified cluster API exists and may be called by - the current user. The API is *called* on Data ONTAP versions prior to - 8.2, while versions starting with 8.2 utilize an API designed for - this purpose. - """ - - if not self.features.USER_CAPABILITY_LIST: - return self._check_cluster_api_legacy(api) - else: - return self._check_cluster_api(object_name, operation_name, api) - - def _check_cluster_api(self, object_name, operation_name, api): - """Checks the availability of a cluster API. - - Returns True if the specified cluster API exists and may be called by - the current user. This method assumes Data ONTAP 8.2 or higher. - """ - - api_args = { - 'query': { - 'capability-info': { - 'object-name': object_name, - 'operation-list': { - 'operation-info': { - 'name': operation_name, - }, - }, - }, - }, - 'desired-attributes': { - 'capability-info': { - 'operation-list': { - 'operation-info': { - 'api-name': None, - }, - }, - }, - }, - } - result = self.send_request( - 'system-user-capability-get-iter', api_args, False) - - if not self._has_records(result): - return False - - capability_info_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - for capability_info in capability_info_list.get_children(): - - operation_list = capability_info.get_child_by_name( - 'operation-list') or netapp_api.NaElement('none') - - for operation_info in operation_list.get_children(): - api_name = operation_info.get_child_content('api-name') or '' - api_names = api_name.split(',') - if api in api_names: - return True - - return False - - def _check_cluster_api_legacy(self, api): - """Checks the availability of a cluster API. - - Returns True if the specified cluster API exists and may be called by - the current user. This method should only be used for Data ONTAP 8.1, - and only getter APIs may be tested because the API is actually called - to perform the check. - """ - - if not re.match(".*-get$|.*-get-iter$|.*-list-info$", api): - raise ValueError(_('Non-getter API passed to API test method.')) - - try: - self.send_request(api, enable_tunneling=False) - except netapp_api.NaApiError as ex: - if ex.code in (netapp_api.EAPIPRIVILEGE, netapp_api.EAPINOTFOUND): - return False - - return True - - def list_cluster_nodes(self): - """Get all available cluster nodes.""" - - api_args = { - 'desired-attributes': { - 'node-details-info': { - 'node': None, - }, - }, - } - result = self.send_iter_request('system-node-get-iter', api_args) - nodes_info_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - return [node_info.get_child_content('node') for node_info - in nodes_info_list.get_children()] - - def check_for_cluster_credentials(self): - """Checks whether cluster-scoped credentials are being used or not.""" - - try: - self.list_cluster_nodes() - # API succeeded, so definitely a cluster management LIF - return True - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPINOTFOUND: - LOG.debug('Not connected to cluster management LIF.') - else: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to get the list of nodes.') - return False - - def get_operational_lif_addresses(self): - """Gets the IP addresses of operational LIFs on the vserver.""" - - net_interface_get_iter_args = { - 'query': { - 'net-interface-info': { - 'operational-status': 'up' - } - }, - 'desired-attributes': { - 'net-interface-info': { - 'address': None, - } - } - } - result = self.send_iter_request('net-interface-get-iter', - net_interface_get_iter_args) - - lif_info_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - return [lif_info.get_child_content('address') for lif_info in - lif_info_list.get_children()] - - def get_flexvol_capacity(self, flexvol_path=None, flexvol_name=None): - """Gets total capacity and free capacity, in bytes, of the flexvol.""" - - volume_id_attributes = {} - if flexvol_path: - volume_id_attributes['junction-path'] = flexvol_path - if flexvol_name: - volume_id_attributes['name'] = flexvol_name - - api_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': volume_id_attributes, - } - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-space-attributes': { - 'size-available': None, - 'size-total': None, - } - } - }, - } - - result = self.send_iter_request('volume-get-iter', api_args) - if self._get_record_count(result) != 1: - msg = _('Volume %s not found.') - msg_args = flexvol_path or flexvol_name - raise exception.NetAppDriverException(msg % msg_args) - - attributes_list = result.get_child_by_name('attributes-list') - volume_attributes = attributes_list.get_child_by_name( - 'volume-attributes') - volume_space_attributes = volume_attributes.get_child_by_name( - 'volume-space-attributes') - - size_available = float( - volume_space_attributes.get_child_content('size-available')) - size_total = float( - volume_space_attributes.get_child_content('size-total')) - - return { - 'size-total': size_total, - 'size-available': size_available, - } - - def list_flexvols(self): - """Returns the names of the flexvols on the controller.""" - - api_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': { - 'type': 'rw', - 'style': 'flex', - }, - 'volume-state-attributes': { - 'is-vserver-root': 'false', - 'is-inconsistent': 'false', - 'is-invalid': 'false', - 'state': 'online', - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None, - }, - }, - }, - } - result = self.send_iter_request('volume-get-iter', api_args) - if not self._has_records(result): - return [] - - volumes = [] - - attributes_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - for volume_attributes in attributes_list.get_children(): - - volume_id_attributes = volume_attributes.get_child_by_name( - 'volume-id-attributes') or netapp_api.NaElement('none') - - volumes.append(volume_id_attributes.get_child_content('name')) - - return volumes - - def get_flexvol(self, flexvol_path=None, flexvol_name=None): - """Get flexvol attributes needed for the storage service catalog.""" - - volume_id_attributes = {'type': 'rw', 'style': 'flex'} - if flexvol_path: - volume_id_attributes['junction-path'] = flexvol_path - if flexvol_name: - volume_id_attributes['name'] = flexvol_name - - api_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': volume_id_attributes, - 'volume-state-attributes': { - 'is-vserver-root': 'false', - 'is-inconsistent': 'false', - 'is-invalid': 'false', - 'state': 'online', - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None, - 'owning-vserver-name': None, - 'junction-path': None, - 'containing-aggregate-name': None, - 'type': None, - }, - 'volume-mirror-attributes': { - 'is-data-protection-mirror': None, - 'is-replica-volume': None, - }, - 'volume-space-attributes': { - 'is-space-guarantee-enabled': None, - 'space-guarantee': None, - 'percentage-snapshot-reserve': None, - 'size': None, - }, - 'volume-qos-attributes': { - 'policy-group-name': None, - }, - 'volume-snapshot-attributes': { - 'snapshot-policy': None, - }, - 'volume-language-attributes': { - 'language-code': None, - }, - }, - }, - } - result = self.send_iter_request('volume-get-iter', api_args) - - if self._get_record_count(result) != 1: - msg = _('Could not find unique volume %(vol)s.') - msg_args = {'vol': flexvol_name} - raise exception.VolumeBackendAPIException(data=msg % msg_args) - - attributes_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - volume_attributes = attributes_list.get_child_by_name( - 'volume-attributes') or netapp_api.NaElement('none') - - volume_id_attributes = volume_attributes.get_child_by_name( - 'volume-id-attributes') or netapp_api.NaElement('none') - volume_space_attributes = volume_attributes.get_child_by_name( - 'volume-space-attributes') or netapp_api.NaElement('none') - volume_qos_attributes = volume_attributes.get_child_by_name( - 'volume-qos-attributes') or netapp_api.NaElement('none') - volume_snapshot_attributes = volume_attributes.get_child_by_name( - 'volume-snapshot-attributes') or netapp_api.NaElement('none') - volume_language_attributes = volume_attributes.get_child_by_name( - 'volume-language-attributes') or netapp_api.NaElement('none') - - volume = { - 'name': volume_id_attributes.get_child_content('name'), - 'vserver': volume_id_attributes.get_child_content( - 'owning-vserver-name'), - 'junction-path': volume_id_attributes.get_child_content( - 'junction-path'), - 'aggregate': volume_id_attributes.get_child_content( - 'containing-aggregate-name'), - 'type': volume_id_attributes.get_child_content('type'), - 'space-guarantee-enabled': strutils.bool_from_string( - volume_space_attributes.get_child_content( - 'is-space-guarantee-enabled')), - 'space-guarantee': volume_space_attributes.get_child_content( - 'space-guarantee'), - 'percentage-snapshot-reserve': ( - volume_space_attributes.get_child_content( - 'percentage-snapshot-reserve')), - 'size': volume_space_attributes.get_child_content('size'), - 'qos-policy-group': volume_qos_attributes.get_child_content( - 'policy-group-name'), - 'snapshot-policy': volume_snapshot_attributes.get_child_content( - 'snapshot-policy'), - 'language': volume_language_attributes.get_child_content( - 'language-code'), - } - - return volume - - def get_flexvol_dedupe_info(self, flexvol_name): - """Get dedupe attributes needed for the storage service catalog.""" - - api_args = { - 'query': { - 'sis-status-info': { - 'path': '/vol/%s' % flexvol_name, - }, - }, - 'desired-attributes': { - 'sis-status-info': { - 'state': None, - 'is-compression-enabled': None, - 'logical-data-size': None, - 'logical-data-limit': None, - }, - }, - } - - no_dedupe_response = { - 'compression': False, - 'dedupe': False, - 'logical-data-size': 0, - 'logical-data-limit': 1, - } - - try: - result = self.send_iter_request('sis-get-iter', api_args) - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPIPRIVILEGE: - LOG.debug('Dedup info for volume %(name)s will not be ' - 'collected. This API requires cluster-scoped ' - 'credentials.', {'name': flexvol_name}) - else: - LOG.exception('Failed to get dedupe info for volume %s.', - flexvol_name) - return no_dedupe_response - - if self._get_record_count(result) != 1: - return no_dedupe_response - - attributes_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - sis_status_info = attributes_list.get_child_by_name( - 'sis-status-info') or netapp_api.NaElement('none') - - logical_data_size = sis_status_info.get_child_content( - 'logical-data-size') or 0 - logical_data_limit = sis_status_info.get_child_content( - 'logical-data-limit') or 1 - - sis = { - 'compression': strutils.bool_from_string( - sis_status_info.get_child_content('is-compression-enabled')), - 'dedupe': na_utils.to_bool( - sis_status_info.get_child_content('state')), - 'logical-data-size': int(logical_data_size), - 'logical-data-limit': int(logical_data_limit), - } - - return sis - - def get_flexvol_dedupe_used_percent(self, flexvol_name): - """Determine how close a flexvol is to its shared block limit.""" - - # Note(cknight): The value returned by this method is computed from - # values returned by two different APIs, one of which was new in - # Data ONTAP 8.3. - if not self.features.CLONE_SPLIT_STATUS: - return 0.0 - - dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) - clone_split_info = self.get_clone_split_info(flexvol_name) - - total_dedupe_blocks = (dedupe_info.get('logical-data-size') + - clone_split_info.get('unsplit-size')) - dedupe_used_percent = (100.0 * float(total_dedupe_blocks) / - dedupe_info.get('logical-data-limit')) - return dedupe_used_percent - - def get_clone_split_info(self, flexvol_name): - """Get the status of unsplit file/LUN clones in a flexvol.""" - - try: - result = self.send_request('clone-split-status', - {'volume-name': flexvol_name}) - except netapp_api.NaApiError: - LOG.exception('Failed to get clone split info for volume %s.', - flexvol_name) - return {'unsplit-size': 0, 'unsplit-clone-count': 0} - - clone_split_info = result.get_child_by_name( - 'clone-split-info') or netapp_api.NaElement('none') - - unsplit_size = clone_split_info.get_child_content('unsplit-size') or 0 - unsplit_clone_count = clone_split_info.get_child_content( - 'unsplit-clone-count') or 0 - - return { - 'unsplit-size': int(unsplit_size), - 'unsplit-clone-count': int(unsplit_clone_count), - } - - def is_flexvol_mirrored(self, flexvol_name, vserver_name): - """Check if flexvol is a SnapMirror source.""" - - api_args = { - 'query': { - 'snapmirror-info': { - 'source-vserver': vserver_name, - 'source-volume': flexvol_name, - 'mirror-state': 'snapmirrored', - 'relationship-type': 'data_protection', - }, - }, - 'desired-attributes': { - 'snapmirror-info': None, - }, - } - - try: - result = self.send_iter_request('snapmirror-get-iter', api_args) - except netapp_api.NaApiError: - LOG.exception('Failed to get SnapMirror info for volume %s.', - flexvol_name) - return False - - if not self._has_records(result): - return False - - return True - - def is_flexvol_encrypted(self, flexvol_name, vserver_name): - """Check if a flexvol is encrypted.""" - - if not self.features.FLEXVOL_ENCRYPTION: - return False - - api_args = { - 'query': { - 'volume-attributes': { - 'encrypt': 'true', - 'volume-id-attributes': { - 'name': flexvol_name, - 'owning-vserver-name': vserver_name, - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'encrypt': None, - }, - }, - } - - try: - result = self.send_iter_request('volume-get-iter', api_args) - except netapp_api.NaApiError: - LOG.exception('Failed to get Encryption info for volume %s.', - flexvol_name) - return False - - if not self._has_records(result): - return False - - return True - - def create_flexvol(self, flexvol_name, aggregate_name, size_gb, - space_guarantee_type=None, snapshot_policy=None, - language=None, dedupe_enabled=False, - compression_enabled=False, snapshot_reserve=None, - volume_type='rw'): - - """Creates a volume.""" - api_args = { - 'containing-aggr-name': aggregate_name, - 'size': six.text_type(size_gb) + 'g', - 'volume': flexvol_name, - 'volume-type': volume_type, - } - if volume_type == 'dp': - snapshot_policy = None - else: - api_args['junction-path'] = '/%s' % flexvol_name - if snapshot_policy is not None: - api_args['snapshot-policy'] = snapshot_policy - if space_guarantee_type: - api_args['space-reserve'] = space_guarantee_type - if language is not None: - api_args['language-code'] = language - if snapshot_reserve is not None: - api_args['percentage-snapshot-reserve'] = six.text_type( - snapshot_reserve) - self.send_request('volume-create', api_args) - - # cDOT compression requires that deduplication be enabled. - if dedupe_enabled or compression_enabled: - self.enable_flexvol_dedupe(flexvol_name) - if compression_enabled: - self.enable_flexvol_compression(flexvol_name) - - def flexvol_exists(self, volume_name): - """Checks if a flexvol exists on the storage array.""" - LOG.debug('Checking if volume %s exists', volume_name) - - api_args = { - 'query': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': volume_name, - }, - }, - }, - 'desired-attributes': { - 'volume-attributes': { - 'volume-id-attributes': { - 'name': None, - }, - }, - }, - } - result = self.send_iter_request('volume-get-iter', api_args) - return self._has_records(result) - - def rename_flexvol(self, orig_flexvol_name, new_flexvol_name): - """Set flexvol name.""" - api_args = { - 'volume': orig_flexvol_name, - 'new-volume-name': new_flexvol_name, - } - self.send_request('volume-rename', api_args) - - def mount_flexvol(self, flexvol_name, junction_path=None): - """Mounts a volume on a junction path.""" - api_args = { - 'volume-name': flexvol_name, - 'junction-path': (junction_path if junction_path - else '/%s' % flexvol_name) - } - self.send_request('volume-mount', api_args) - - def enable_flexvol_dedupe(self, flexvol_name): - """Enable deduplication on volume.""" - api_args = {'path': '/vol/%s' % flexvol_name} - self.send_request('sis-enable', api_args) - - def disable_flexvol_dedupe(self, flexvol_name): - """Disable deduplication on volume.""" - api_args = {'path': '/vol/%s' % flexvol_name} - self.send_request('sis-disable', api_args) - - def enable_flexvol_compression(self, flexvol_name): - """Enable compression on volume.""" - api_args = { - 'path': '/vol/%s' % flexvol_name, - 'enable-compression': 'true' - } - self.send_request('sis-set-config', api_args) - - def disable_flexvol_compression(self, flexvol_name): - """Disable compression on volume.""" - api_args = { - 'path': '/vol/%s' % flexvol_name, - 'enable-compression': 'false' - } - self.send_request('sis-set-config', api_args) - - @utils.trace_method - def delete_file(self, path_to_file): - """Delete file at path.""" - - api_args = { - 'path': path_to_file, - } - # Use fast clone deletion engine if it is supported. - if self.features.FAST_CLONE_DELETE: - api_args['is-clone-file'] = 'true' - self.send_request('file-delete-file', api_args, True) - - def _get_aggregates(self, aggregate_names=None, desired_attributes=None): - - query = { - 'aggr-attributes': { - 'aggregate-name': '|'.join(aggregate_names), - } - } if aggregate_names else None - - api_args = {} - if query: - api_args['query'] = query - if desired_attributes: - api_args['desired-attributes'] = desired_attributes - - result = self.send_request('aggr-get-iter', - api_args, - enable_tunneling=False) - if not self._has_records(result): - return [] - else: - return result.get_child_by_name('attributes-list').get_children() - - def get_node_for_aggregate(self, aggregate_name): - """Get home node for the specified aggregate. - - This API could return None, most notably if it was sent - to a Vserver LIF, so the caller must be able to handle that case. - """ - - if not aggregate_name: - return None - - desired_attributes = { - 'aggr-attributes': { - 'aggregate-name': None, - 'aggr-ownership-attributes': { - 'home-name': None, - }, - }, - } - - try: - aggrs = self._get_aggregates(aggregate_names=[aggregate_name], - desired_attributes=desired_attributes) - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPINOTFOUND: - return None - else: - raise - - if len(aggrs) < 1: - return None - - aggr_ownership_attrs = aggrs[0].get_child_by_name( - 'aggr-ownership-attributes') or netapp_api.NaElement('none') - return aggr_ownership_attrs.get_child_content('home-name') - - def get_aggregate(self, aggregate_name): - """Get aggregate attributes needed for the storage service catalog.""" - - if not aggregate_name: - return {} - - desired_attributes = { - 'aggr-attributes': { - 'aggregate-name': None, - 'aggr-raid-attributes': { - 'raid-type': None, - 'is-hybrid': None, - }, - }, - } - - try: - aggrs = self._get_aggregates(aggregate_names=[aggregate_name], - desired_attributes=desired_attributes) - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPINOTFOUND: - LOG.debug('Aggregate info can only be collected with ' - 'cluster-scoped credentials.') - else: - LOG.exception('Failed to get info for aggregate %s.', - aggregate_name) - return {} - - if len(aggrs) < 1: - return {} - - aggr_attributes = aggrs[0] - aggr_raid_attrs = aggr_attributes.get_child_by_name( - 'aggr-raid-attributes') or netapp_api.NaElement('none') - - aggregate = { - 'name': aggr_attributes.get_child_content('aggregate-name'), - 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), - 'is-hybrid': strutils.bool_from_string( - aggr_raid_attrs.get_child_content('is-hybrid')), - } - - return aggregate - - def get_aggregate_disk_types(self, aggregate_name): - """Get the disk type(s) of an aggregate.""" - - disk_types = set() - disk_types.update(self._get_aggregate_disk_types(aggregate_name)) - if self.features.ADVANCED_DISK_PARTITIONING: - disk_types.update(self._get_aggregate_disk_types(aggregate_name, - shared=True)) - - return list(disk_types) if disk_types else None - - def _get_aggregate_disk_types(self, aggregate_name, shared=False): - """Get the disk type(s) of an aggregate (may be a list).""" - - disk_types = set() - - if shared: - disk_raid_info = { - 'disk-shared-info': { - 'aggregate-list': { - 'shared-aggregate-info': { - 'aggregate-name': aggregate_name, - }, - }, - }, - } - else: - disk_raid_info = { - 'disk-aggregate-info': { - 'aggregate-name': aggregate_name, - }, - } - - api_args = { - 'query': { - 'storage-disk-info': { - 'disk-raid-info': disk_raid_info, - }, - }, - 'desired-attributes': { - 'storage-disk-info': { - 'disk-raid-info': { - 'effective-disk-type': None, - }, - }, - }, - } - - try: - result = self.send_iter_request( - 'storage-disk-get-iter', api_args, enable_tunneling=False) - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPINOTFOUND: - LOG.debug('Disk types can only be collected with ' - 'cluster scoped credentials.') - else: - LOG.exception('Failed to get disk info for aggregate %s.', - aggregate_name) - return disk_types - - attributes_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - - for storage_disk_info in attributes_list.get_children(): - - disk_raid_info = storage_disk_info.get_child_by_name( - 'disk-raid-info') or netapp_api.NaElement('none') - disk_type = disk_raid_info.get_child_content( - 'effective-disk-type') - if disk_type: - disk_types.add(disk_type) - - return disk_types - - def get_aggregate_capacities(self, aggregate_names): - """Gets capacity info for multiple aggregates.""" - - if not isinstance(aggregate_names, list): - return {} - - aggregates = {} - for aggregate_name in aggregate_names: - aggregates[aggregate_name] = self.get_aggregate_capacity( - aggregate_name) - - return aggregates - - def get_aggregate_capacity(self, aggregate_name): - """Gets capacity info for an aggregate.""" - - desired_attributes = { - 'aggr-attributes': { - 'aggr-space-attributes': { - 'percent-used-capacity': None, - 'size-available': None, - 'size-total': None, - }, - }, - } - - try: - aggrs = self._get_aggregates(aggregate_names=[aggregate_name], - desired_attributes=desired_attributes) - except netapp_api.NaApiError as e: - if e.code == netapp_api.EAPINOTFOUND: - LOG.debug('Aggregate capacity can only be collected with ' - 'cluster scoped credentials.') - else: - LOG.exception('Failed to get info for aggregate %s.', - aggregate_name) - return {} - - if len(aggrs) < 1: - return {} - - aggr_attributes = aggrs[0] - aggr_space_attributes = aggr_attributes.get_child_by_name( - 'aggr-space-attributes') or netapp_api.NaElement('none') - - percent_used = int(aggr_space_attributes.get_child_content( - 'percent-used-capacity')) - size_available = float(aggr_space_attributes.get_child_content( - 'size-available')) - size_total = float( - aggr_space_attributes.get_child_content('size-total')) - - return { - 'percent-used': percent_used, - 'size-available': size_available, - 'size-total': size_total, - } - - def get_performance_instance_uuids(self, object_name, node_name): - """Get UUIDs of performance instances for a cluster node.""" - - api_args = { - 'objectname': object_name, - 'query': { - 'instance-info': { - 'uuid': node_name + ':*', - } - } - } - - result = self.send_request('perf-object-instance-list-info-iter', - api_args, - enable_tunneling=False) - - uuids = [] - - instances = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('None') - - for instance_info in instances.get_children(): - uuids.append(instance_info.get_child_content('uuid')) - - return uuids - - def get_performance_counters(self, object_name, instance_uuids, - counter_names): - """Gets or or more cDOT performance counters.""" - - api_args = { - 'objectname': object_name, - 'instance-uuids': [ - {'instance-uuid': instance_uuid} - for instance_uuid in instance_uuids - ], - 'counters': [ - {'counter': counter} for counter in counter_names - ], - } - - result = self.send_request('perf-object-get-instances', - api_args, - enable_tunneling=False) - - counter_data = [] - - timestamp = result.get_child_content('timestamp') - - instances = result.get_child_by_name( - 'instances') or netapp_api.NaElement('None') - for instance in instances.get_children(): - - instance_name = instance.get_child_content('name') - instance_uuid = instance.get_child_content('uuid') - node_name = instance_uuid.split(':')[0] - - counters = instance.get_child_by_name( - 'counters') or netapp_api.NaElement('None') - for counter in counters.get_children(): - - counter_name = counter.get_child_content('name') - counter_value = counter.get_child_content('value') - - counter_data.append({ - 'instance-name': instance_name, - 'instance-uuid': instance_uuid, - 'node-name': node_name, - 'timestamp': timestamp, - counter_name: counter_value, - }) - - return counter_data - - def get_snapshots_marked_for_deletion(self, volume_list=None): - """Get a list of snapshots marked for deletion. - - :param volume_list: placeholder parameter to match 7mode client method - signature. - """ - - api_args = { - 'query': { - 'snapshot-info': { - 'name': client_base.DELETED_PREFIX + '*', - 'vserver': self.vserver, - 'busy': 'false', - }, - }, - 'desired-attributes': { - 'snapshot-info': { - 'name': None, - 'volume': None, - 'snapshot-instance-uuid': None, - } - }, - } - - result = self.send_request('snapshot-get-iter', api_args) - - snapshots = [] - - attributes = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - snapshot_info_list = attributes.get_children() - for snapshot_info in snapshot_info_list: - snapshot_name = snapshot_info.get_child_content('name') - snapshot_id = snapshot_info.get_child_content( - 'snapshot-instance-uuid') - snapshot_volume = snapshot_info.get_child_content('volume') - - snapshots.append({ - 'name': snapshot_name, - 'instance_id': snapshot_id, - 'volume_name': snapshot_volume, - }) - - return snapshots - - def get_snapshot(self, volume_name, snapshot_name): - """Gets a single snapshot.""" - api_args = { - 'query': { - 'snapshot-info': { - 'name': snapshot_name, - 'volume': volume_name, - }, - }, - 'desired-attributes': { - 'snapshot-info': { - 'name': None, - 'volume': None, - 'busy': None, - 'snapshot-owners-list': { - 'snapshot-owner': None, - } - }, - }, - } - result = self.send_request('snapshot-get-iter', api_args) - - self._handle_get_snapshot_return_failure(result, snapshot_name) - - attributes_list = result.get_child_by_name( - 'attributes-list') or netapp_api.NaElement('none') - snapshot_info_list = attributes_list.get_children() - - self._handle_snapshot_not_found(result, snapshot_info_list, - snapshot_name, volume_name) - - snapshot_info = snapshot_info_list[0] - snapshot = { - 'name': snapshot_info.get_child_content('name'), - 'volume': snapshot_info.get_child_content('volume'), - 'busy': strutils.bool_from_string( - snapshot_info.get_child_content('busy')), - } - - snapshot_owners_list = snapshot_info.get_child_by_name( - 'snapshot-owners-list') or netapp_api.NaElement('none') - snapshot_owners = set([ - snapshot_owner.get_child_content('owner') - for snapshot_owner in snapshot_owners_list.get_children()]) - snapshot['owners'] = snapshot_owners - - return snapshot - - def _handle_get_snapshot_return_failure(self, result, snapshot_name): - error_record_list = result.get_child_by_name( - 'volume-errors') or netapp_api.NaElement('none') - errors = error_record_list.get_children() - - if errors: - error = errors[0] - error_code = error.get_child_content('errno') - error_reason = error.get_child_content('reason') - msg = _('Could not read information for snapshot %(name)s. ' - 'Code: %(code)s. Reason: %(reason)s') - msg_args = { - 'name': snapshot_name, - 'code': error_code, - 'reason': error_reason, - } - if error_code == netapp_api.ESNAPSHOTNOTALLOWED: - raise exception.SnapshotUnavailable(data=msg % msg_args) - else: - raise exception.VolumeBackendAPIException(data=msg % msg_args) - - def _handle_snapshot_not_found(self, result, snapshot_info_list, - snapshot_name, volume_name): - if not self._has_records(result): - raise exception.SnapshotNotFound(snapshot_id=snapshot_name) - elif len(snapshot_info_list) > 1: - msg = _('Could not find unique snapshot %(snap)s on ' - 'volume %(vol)s.') - msg_args = {'snap': snapshot_name, 'vol': volume_name} - raise exception.VolumeBackendAPIException(data=msg % msg_args) - - def create_cluster_peer(self, addresses, username=None, password=None, - passphrase=None): - """Creates a cluster peer relationship.""" - - api_args = { - 'peer-addresses': [ - {'remote-inet-address': address} for address in addresses - ], - } - if username: - api_args['user-name'] = username - if password: - api_args['password'] = password - if passphrase: - api_args['passphrase'] = passphrase - - self.send_request('cluster-peer-create', api_args) - - def get_cluster_peers(self, remote_cluster_name=None): - """Gets one or more cluster peer relationships.""" - - api_args = {} - if remote_cluster_name: - api_args['query'] = { - 'cluster-peer-info': { - 'remote-cluster-name': remote_cluster_name, - } - } - - result = self.send_iter_request('cluster-peer-get-iter', api_args) - if not self._has_records(result): - return [] - - cluster_peers = [] - - for cluster_peer_info in result.get_child_by_name( - 'attributes-list').get_children(): - - cluster_peer = { - 'active-addresses': [], - 'peer-addresses': [] - } - - active_addresses = cluster_peer_info.get_child_by_name( - 'active-addresses') or netapp_api.NaElement('none') - for address in active_addresses.get_children(): - cluster_peer['active-addresses'].append(address.get_content()) - - peer_addresses = cluster_peer_info.get_child_by_name( - 'peer-addresses') or netapp_api.NaElement('none') - for address in peer_addresses.get_children(): - cluster_peer['peer-addresses'].append(address.get_content()) - - cluster_peer['availability'] = cluster_peer_info.get_child_content( - 'availability') - cluster_peer['cluster-name'] = cluster_peer_info.get_child_content( - 'cluster-name') - cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content( - 'cluster-uuid') - cluster_peer['remote-cluster-name'] = ( - cluster_peer_info.get_child_content('remote-cluster-name')) - cluster_peer['serial-number'] = ( - cluster_peer_info.get_child_content('serial-number')) - cluster_peer['timeout'] = cluster_peer_info.get_child_content( - 'timeout') - - cluster_peers.append(cluster_peer) - - return cluster_peers - - def delete_cluster_peer(self, cluster_name): - """Deletes a cluster peer relationship.""" - - api_args = {'cluster-name': cluster_name} - self.send_request('cluster-peer-delete', api_args) - - def get_cluster_peer_policy(self): - """Gets the cluster peering policy configuration.""" - - if not self.features.CLUSTER_PEER_POLICY: - return {} - - result = self.send_request('cluster-peer-policy-get') - - attributes = result.get_child_by_name( - 'attributes') or netapp_api.NaElement('none') - cluster_peer_policy = attributes.get_child_by_name( - 'cluster-peer-policy') or netapp_api.NaElement('none') - - policy = { - 'is-unauthenticated-access-permitted': - cluster_peer_policy.get_child_content( - 'is-unauthenticated-access-permitted'), - 'passphrase-minimum-length': - cluster_peer_policy.get_child_content( - 'passphrase-minimum-length'), - } - - if policy['is-unauthenticated-access-permitted'] is not None: - policy['is-unauthenticated-access-permitted'] = ( - strutils.bool_from_string( - policy['is-unauthenticated-access-permitted'])) - if policy['passphrase-minimum-length'] is not None: - policy['passphrase-minimum-length'] = int( - policy['passphrase-minimum-length']) - - return policy - - def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None, - passphrase_minimum_length=None): - """Modifies the cluster peering policy configuration.""" - - if not self.features.CLUSTER_PEER_POLICY: - return - - if (is_unauthenticated_access_permitted is None and - passphrase_minimum_length is None): - return - - api_args = {} - if is_unauthenticated_access_permitted is not None: - api_args['is-unauthenticated-access-permitted'] = ( - 'true' if strutils.bool_from_string( - is_unauthenticated_access_permitted) else 'false') - if passphrase_minimum_length is not None: - api_args['passphrase-minlength'] = six.text_type( - passphrase_minimum_length) - - self.send_request('cluster-peer-policy-modify', api_args) - - def create_vserver_peer(self, vserver_name, peer_vserver_name): - """Creates a Vserver peer relationship for SnapMirrors.""" - api_args = { - 'vserver': vserver_name, - 'peer-vserver': peer_vserver_name, - 'applications': [ - {'vserver-peer-application': 'snapmirror'}, - ], - } - self.send_request('vserver-peer-create', api_args) - - def delete_vserver_peer(self, vserver_name, peer_vserver_name): - """Deletes a Vserver peer relationship.""" - - api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} - self.send_request('vserver-peer-delete', api_args) - - def accept_vserver_peer(self, vserver_name, peer_vserver_name): - """Accepts a pending Vserver peer relationship.""" - - api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} - self.send_request('vserver-peer-accept', api_args) - - def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): - """Gets one or more Vserver peer relationships.""" - - api_args = None - if vserver_name or peer_vserver_name: - api_args = {'query': {'vserver-peer-info': {}}} - if vserver_name: - api_args['query']['vserver-peer-info']['vserver'] = ( - vserver_name) - if peer_vserver_name: - api_args['query']['vserver-peer-info']['peer-vserver'] = ( - peer_vserver_name) - - result = self.send_iter_request('vserver-peer-get-iter', api_args) - if not self._has_records(result): - return [] - - vserver_peers = [] - - for vserver_peer_info in result.get_child_by_name( - 'attributes-list').get_children(): - - vserver_peer = { - 'vserver': vserver_peer_info.get_child_content('vserver'), - 'peer-vserver': - vserver_peer_info.get_child_content('peer-vserver'), - 'peer-state': - vserver_peer_info.get_child_content('peer-state'), - 'peer-cluster': - vserver_peer_info.get_child_content('peer-cluster'), - } - vserver_peers.append(vserver_peer) - - return vserver_peers - - def _ensure_snapmirror_v2(self): - """Verify support for SnapMirror control plane v2.""" - if not self.features.SNAPMIRROR_V2: - msg = _('SnapMirror features require Data ONTAP 8.2 or later.') - raise exception.NetAppDriverException(msg) - - def create_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume, - schedule=None, policy=None, - relationship_type='data_protection'): - """Creates a SnapMirror relationship (cDOT 8.2 or later only).""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - 'relationship-type': relationship_type, - } - if schedule: - api_args['schedule'] = schedule - if policy: - api_args['policy'] = policy - - try: - self.send_request('snapmirror-create', api_args) - except netapp_api.NaApiError as e: - if e.code != netapp_api.ERELATION_EXISTS: - raise - - def initialize_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume, - source_snapshot=None, transfer_priority=None): - """Initializes a SnapMirror relationship (cDOT 8.2 or later only).""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - if source_snapshot: - api_args['source-snapshot'] = source_snapshot - if transfer_priority: - api_args['transfer-priority'] = transfer_priority - - result = self.send_request('snapmirror-initialize', api_args) - - result_info = {} - result_info['operation-id'] = result.get_child_content( - 'result-operation-id') - result_info['status'] = result.get_child_content('result-status') - result_info['jobid'] = result.get_child_content('result-jobid') - result_info['error-code'] = result.get_child_content( - 'result-error-code') - result_info['error-message'] = result.get_child_content( - 'result-error-message') - - return result_info - - def release_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume, - relationship_info_only=False): - """Removes a SnapMirror relationship on the source endpoint.""" - self._ensure_snapmirror_v2() - - api_args = { - 'query': { - 'snapmirror-destination-info': { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - 'relationship-info-only': ('true' if relationship_info_only - else 'false'), - } - } - } - self.send_request('snapmirror-release-iter', api_args) - - def quiesce_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Disables future transfers to a SnapMirror destination.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - self.send_request('snapmirror-quiesce', api_args) - - def abort_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume, - clear_checkpoint=False): - """Stops ongoing transfers for a SnapMirror relationship.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - 'clear-checkpoint': 'true' if clear_checkpoint else 'false', - } - try: - self.send_request('snapmirror-abort', api_args) - except netapp_api.NaApiError as e: - if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS: - raise - - def break_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Breaks a data protection SnapMirror relationship.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - self.send_request('snapmirror-break', api_args) - - def modify_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume, - schedule=None, policy=None, tries=None, - max_transfer_rate=None): - """Modifies a SnapMirror relationship.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - if schedule: - api_args['schedule'] = schedule - if policy: - api_args['policy'] = policy - if tries is not None: - api_args['tries'] = tries - if max_transfer_rate is not None: - api_args['max-transfer-rate'] = max_transfer_rate - - self.send_request('snapmirror-modify', api_args) - - def delete_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Destroys a SnapMirror relationship.""" - self._ensure_snapmirror_v2() - - api_args = { - 'query': { - 'snapmirror-info': { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - } - } - self.send_request('snapmirror-destroy-iter', api_args) - - def update_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Schedules a SnapMirror update.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - try: - self.send_request('snapmirror-update', api_args) - except netapp_api.NaApiError as e: - if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and - e.code != netapp_api.EANOTHER_OP_ACTIVE): - raise - - def resume_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Resume a SnapMirror relationship if it is quiesced.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - try: - self.send_request('snapmirror-resume', api_args) - except netapp_api.NaApiError as e: - if e.code != netapp_api.ERELATION_NOT_QUIESCED: - raise - - def resync_snapmirror(self, source_vserver, source_volume, - destination_vserver, destination_volume): - """Resync a SnapMirror relationship.""" - self._ensure_snapmirror_v2() - - api_args = { - 'source-volume': source_volume, - 'source-vserver': source_vserver, - 'destination-volume': destination_volume, - 'destination-vserver': destination_vserver, - } - self.send_request('snapmirror-resync', api_args) - - def _get_snapmirrors(self, source_vserver=None, source_volume=None, - destination_vserver=None, destination_volume=None, - desired_attributes=None): - - query = None - if (source_vserver or source_volume or destination_vserver or - destination_volume): - query = {'snapmirror-info': {}} - if source_volume: - query['snapmirror-info']['source-volume'] = source_volume - if destination_volume: - query['snapmirror-info']['destination-volume'] = ( - destination_volume) - if source_vserver: - query['snapmirror-info']['source-vserver'] = source_vserver - if destination_vserver: - query['snapmirror-info']['destination-vserver'] = ( - destination_vserver) - - api_args = {} - if query: - api_args['query'] = query - if desired_attributes: - api_args['desired-attributes'] = desired_attributes - - result = self.send_iter_request('snapmirror-get-iter', api_args) - if not self._has_records(result): - return [] - else: - return result.get_child_by_name('attributes-list').get_children() - - def get_snapmirrors(self, source_vserver, source_volume, - destination_vserver, destination_volume, - desired_attributes=None): - """Gets one or more SnapMirror relationships. - - Either the source or destination info may be omitted. - Desired attributes should be a flat list of attribute names. - """ - self._ensure_snapmirror_v2() - - if desired_attributes is not None: - desired_attributes = { - 'snapmirror-info': {attr: None for attr in desired_attributes}, - } - - result = self._get_snapmirrors( - source_vserver=source_vserver, - source_volume=source_volume, - destination_vserver=destination_vserver, - destination_volume=destination_volume, - desired_attributes=desired_attributes) - - snapmirrors = [] - - for snapmirror_info in result: - snapmirror = {} - for child in snapmirror_info.get_children(): - name = self._strip_xml_namespace(child.get_name()) - snapmirror[name] = child.get_content() - snapmirrors.append(snapmirror) - - return snapmirrors - - def get_provisioning_options_from_flexvol(self, flexvol_name): - """Get a dict of provisioning options matching existing flexvol.""" - - flexvol_info = self.get_flexvol(flexvol_name=flexvol_name) - dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) - - provisioning_opts = { - 'aggregate': flexvol_info['aggregate'], - # space-guarantee can be 'none', 'file', 'volume' - 'space_guarantee_type': flexvol_info.get('space-guarantee'), - 'snapshot_policy': flexvol_info['snapshot-policy'], - 'language': flexvol_info['language'], - 'dedupe_enabled': dedupe_info['dedupe'], - 'compression_enabled': dedupe_info['compression'], - 'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'], - 'volume_type': flexvol_info['type'], - 'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)), - } - - return provisioning_opts diff --git a/cinder/volume/drivers/netapp/dataontap/fc_7mode.py b/cinder/volume/drivers/netapp/dataontap/fc_7mode.py deleted file mode 100644 index 7480d3c9e..000000000 --- a/cinder/volume/drivers/netapp/dataontap/fc_7mode.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) - 2014, Clinton Knight. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap import block_7mode -from cinder.zonemanager import utils as fczm_utils - - -@interface.volumedriver -class NetApp7modeFibreChannelDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp 7-mode FibreChannel volume driver.""" - - DRIVER_NAME = 'NetApp_FibreChannel_7mode_direct' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs) - self.library = block_7mode.NetAppBlockStorage7modeLibrary( - self.DRIVER_NAME, 'FC', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - return self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - return self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh, - self.get_filter_function(), - self.get_goodness_function()) - - def get_default_filter_function(self): - return self.library.get_default_filter_function() - - def get_default_goodness_function(self): - return self.library.get_default_goodness_function() - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - return self.library.initialize_connection_fc(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_fc(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup(group, add_volumes=None, - remove_volumes=None) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, - source_cg=source_cg, source_vols=source_vols) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - raise NotImplementedError() diff --git a/cinder/volume/drivers/netapp/dataontap/fc_cmode.py b/cinder/volume/drivers/netapp/dataontap/fc_cmode.py deleted file mode 100644 index a69de7691..000000000 --- a/cinder/volume/drivers/netapp/dataontap/fc_cmode.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) - 2014, Clinton Knight. All rights reserved. -# Copyright (c) - 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap import block_cmode -from cinder.zonemanager import utils as fczm_utils - - -@interface.volumedriver -class NetAppCmodeFibreChannelDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp C-mode FibreChannel volume driver.""" - - DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs) - self.library = block_cmode.NetAppBlockStorageCmodeLibrary( - self.DRIVER_NAME, 'FC', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - return self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - return self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh, - self.get_filter_function(), - self.get_goodness_function()) - - def get_default_filter_function(self): - return self.library.get_default_filter_function() - - def get_default_goodness_function(self): - return self.library.get_default_goodness_function() - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - return self.library.initialize_connection_fc(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_fc(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup(group, add_volumes=None, - remove_volumes=None) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, - source_cg=source_cg, source_vols=source_vols) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - return self.library.failover_host( - context, volumes, secondary_id=secondary_id) diff --git a/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py b/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py deleted file mode 100644 index ff6e42e9d..000000000 --- a/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap import block_7mode - - -@interface.volumedriver -class NetApp7modeISCSIDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp 7-mode iSCSI volume driver.""" - - DRIVER_NAME = 'NetApp_iSCSI_7mode_direct' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - VERSION = block_7mode.NetAppBlockStorage7modeLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs) - self.library = block_7mode.NetAppBlockStorage7modeLibrary( - self.DRIVER_NAME, 'iSCSI', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - return self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - return self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh, - self.get_filter_function(), - self.get_goodness_function()) - - def get_default_filter_function(self): - return self.library.get_default_filter_function() - - def get_default_goodness_function(self): - return self.library.get_default_goodness_function() - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - def initialize_connection(self, volume, connector): - return self.library.initialize_connection_iscsi(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_iscsi(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup(group, add_volumes=None, - remove_volumes=None) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, - source_cg=source_cg, source_vols=source_vols) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - raise NotImplementedError() diff --git a/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py b/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py deleted file mode 100644 index 13f6fb3ab..000000000 --- a/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2016 Mike Rooney. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp Data ONTAP (C-mode) iSCSI storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap import block_cmode - - -@interface.volumedriver -class NetAppCmodeISCSIDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp C-mode iSCSI volume driver.""" - - DRIVER_NAME = 'NetApp_iSCSI_Cluster_direct' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) - self.library = block_cmode.NetAppBlockStorageCmodeLibrary( - self.DRIVER_NAME, 'iSCSI', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - return self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - return self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh, - self.get_filter_function(), - self.get_goodness_function()) - - def get_default_filter_function(self): - return self.library.get_default_filter_function() - - def get_default_goodness_function(self): - return self.library.get_default_goodness_function() - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - def initialize_connection(self, volume, connector): - return self.library.initialize_connection_iscsi(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_iscsi(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup(group, add_volumes=None, - remove_volumes=None) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots, - source_cg=source_cg, source_vols=source_vols) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - return self.library.failover_host( - context, volumes, secondary_id=secondary_id) diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py b/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py deleted file mode 100644 index 878b4d5d1..000000000 --- a/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Bob Callaway. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp NFS storage. -""" - -import os - -from oslo_log import log as logging -from oslo_log import versionutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import client_7mode -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -@interface.volumedriver -class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver): - """NetApp NFS driver for Data ONTAP (7-mode).""" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - - def __init__(self, *args, **kwargs): - super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs) - self.driver_name = 'NetApp_NFS_7mode_direct' - self.driver_mode = '7mode' - self.configuration.append_config_values(na_opts.netapp_7mode_opts) - - def do_setup(self, context): - """Do the customized set up on client if any for 7 mode.""" - super(NetApp7modeNfsDriver, self).do_setup(context) - - self.zapi_client = client_7mode.Client( - transport_type=self.configuration.netapp_transport_type, - username=self.configuration.netapp_login, - password=self.configuration.netapp_password, - hostname=self.configuration.netapp_server_hostname, - port=self.configuration.netapp_server_port, - vfiler=self.configuration.netapp_vfiler) - - self.perf_library = perf_7mode.Performance7modeLibrary( - self.zapi_client) - - # This driver has been marked 'deprecated' in the Ocata release and - # can be removed in Queens. - msg = _("The 7-mode Data ONTAP driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - - def check_for_setup_error(self): - """Checks if setup occurred properly.""" - api_version = self.zapi_client.get_ontapi_version() - if api_version: - major, minor = api_version - if major == 1 and minor < 9: - msg = _("Unsupported Data ONTAP version." - " Data ONTAP version 7.3.1 and above is supported.") - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _("Data ONTAP API version could not be determined.") - raise exception.VolumeBackendAPIException(data=msg) - self._add_looping_tasks() - super(NetApp7modeNfsDriver, self).check_for_setup_error() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval.""" - super(NetApp7modeNfsDriver, self)._add_looping_tasks() - - def _handle_ems_logging(self): - """Log autosupport messages.""" - - base_ems_message = dot_utils.build_ems_log_message_0( - self.driver_name, self.app_version, self.driver_mode) - self.zapi_client.send_ems_log_message(base_ems_message) - - pool_ems_message = dot_utils.build_ems_log_message_1( - self.driver_name, self.app_version, None, - self._get_backing_flexvol_names(), []) - self.zapi_client.send_ems_log_message(pool_ems_message) - - def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None, - is_snapshot=False, - source_snapshot=None): - """Clone backing file for Cinder volume. - - :param: is_snapshot Not used, present for method signature consistency - """ - (_host_ip, export_path) = self._get_export_ip_path(volume_id, share) - storage_path = self.zapi_client.get_actual_path_for_export(export_path) - target_path = '%s/%s' % (storage_path, clone_name) - self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name), - target_path, source_snapshot) - - def _update_volume_stats(self): - """Retrieve stats info from vserver.""" - - self._ensure_shares_mounted() - - LOG.debug('Updating volume stats') - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.driver_name - data['vendor_name'] = 'NetApp' - data['driver_version'] = self.VERSION - data['storage_protocol'] = 'nfs' - data['pools'] = self._get_pool_stats( - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function()) - data['sparse_copy_volume'] = True - - self._spawn_clean_cache_job() - self._stats = data - - def _get_pool_stats(self, filter_function=None, goodness_function=None): - """Retrieve pool (i.e. NFS share) stats info from SSC volumes.""" - - pools = [] - self.perf_library.update_performance_cache() - - for nfs_share in self._mounted_shares: - - capacity = self._get_share_capacity_info(nfs_share) - - pool = dict() - pool['pool_name'] = nfs_share - pool['QoS_support'] = False - pool['multiattach'] = False - pool.update(capacity) - - thick = not self.configuration.nfs_sparsed_volumes - pool['thick_provisioning_support'] = thick - pool['thin_provisioning_support'] = not thick - - utilization = self.perf_library.get_node_utilization() - pool['utilization'] = na_utils.round_down(utilization, '0.01') - pool['filter_function'] = filter_function - pool['goodness_function'] = goodness_function - pool['consistencygroup_support'] = True - - pools.append(pool) - - return pools - - def _shortlist_del_eligible_files(self, share, old_files): - """Prepares list of eligible files to be deleted from cache.""" - file_list = [] - (_, export_path) = self._get_export_ip_path(share=share) - exported_volume = self.zapi_client.get_actual_path_for_export( - export_path) - for old_file in old_files: - path = os.path.join(exported_volume, old_file) - u_bytes = self.zapi_client.get_file_usage(path) - file_list.append((old_file, u_bytes)) - LOG.debug('Shortlisted files eligible for deletion: %s', file_list) - return file_list - - def _is_filer_ip(self, ip): - """Checks whether ip is on the same filer.""" - try: - ifconfig = self.zapi_client.get_ifconfig() - if_info = ifconfig.get_child_by_name('interface-config-info') - if if_info: - ifs = if_info.get_children() - for intf in ifs: - v4_addr = intf.get_child_by_name('v4-primary-address') - if v4_addr: - ip_info = v4_addr.get_child_by_name('ip-address-info') - if ip_info: - address = ip_info.get_child_content('address') - if ip == address: - return True - else: - continue - except Exception: - return False - return False - - def _share_match_for_ip(self, ip, shares): - """Returns the share that is served by ip. - - Multiple shares can have same dir path but - can be served using different ips. It finds the - share which is served by ip on same nfs server. - """ - if self._is_filer_ip(ip) and shares: - for share in shares: - ip_sh = share.split(':')[0] - if self._is_filer_ip(ip_sh): - LOG.debug('Share match found for ip %s', ip) - return share - LOG.debug('No share match found for ip %s', ip) - return None - - def _is_share_clone_compatible(self, volume, share): - """Checks if share is compatible with volume to host its clone.""" - thin = self.configuration.nfs_sparsed_volumes - return self._share_has_space_for_clone(share, volume['size'], thin) - - def _check_volume_type(self, volume, share, file_name, extra_specs): - """Matches a volume type for share file.""" - qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \ - if extra_specs else None - if qos_policy_group: - raise exception.ManageExistingVolumeTypeMismatch( - reason=(_("Setting file qos policy group is not supported" - " on this storage family and ontap version."))) - volume_type = na_utils.get_volume_type_from_volume(volume) - if volume_type and 'qos_spec_id' in volume_type: - raise exception.ManageExistingVolumeTypeMismatch( - reason=_("QoS specs are not supported" - " on this storage family and ONTAP version.")) - - def _do_qos_for_volume(self, volume, extra_specs, cleanup=False): - """Set QoS policy on backend from volume type information.""" - # 7-mode DOT does not support QoS. - return - - def _get_volume_model_update(self, volume): - """Provide any updates necessary for a volume being created/managed.""" - - def _get_backing_flexvol_names(self): - """Returns a list of backing flexvol names.""" - flexvol_names = [] - for nfs_share in self._mounted_shares: - flexvol_name = nfs_share.rsplit('/', 1)[1] - flexvol_names.append(flexvol_name) - LOG.debug("Found flexvol %s", flexvol_name) - - return flexvol_names - - def _get_flexvol_names_from_hosts(self, hosts): - """Returns a set of flexvol names.""" - flexvols = set() - for host in hosts: - pool_name = volume_utils.extract_host(host, level='pool') - flexvol_name = pool_name.rsplit('/', 1)[1] - flexvols.add(flexvol_name) - return flexvols - - @utils.trace_method - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Delete files backing each snapshot in the cgsnapshot. - - :return: An implicit update of snapshot models that the manager will - interpret and subsequently set the model state to deleted. - """ - for snapshot in snapshots: - self._delete_file(snapshot['volume_id'], snapshot['name']) - LOG.debug("Snapshot %s deletion successful", snapshot['name']) - - return None, None diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py deleted file mode 100644 index 4ea22b8c1..000000000 --- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py +++ /dev/null @@ -1,1224 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Bob Callaway. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp NFS storage. -""" - -import copy -import math -import os -import re -import shutil -import threading -import time - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume.drivers import nfs -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes - - -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -class NetAppNfsDriver(driver.ManageableVD, - driver.CloneableImageVD, - nfs.NfsDriver): - """Base class for NetApp NFS driver for Data ONTAP.""" - - # do not increment this as it may be used in volume type definitions - VERSION = "1.0.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_CI" - - REQUIRED_FLAGS = ['netapp_login', 'netapp_password', - 'netapp_server_hostname'] - DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' - DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' - - def __init__(self, *args, **kwargs): - na_utils.validate_instantiation(**kwargs) - self._execute = None - self._context = None - self.app_version = kwargs.pop("app_version", "unknown") - super(NetAppNfsDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(na_opts.netapp_connection_opts) - self.configuration.append_config_values(na_opts.netapp_basicauth_opts) - self.configuration.append_config_values(na_opts.netapp_transport_opts) - self.configuration.append_config_values(na_opts.netapp_img_cache_opts) - self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts) - self.backend_name = self.host.split('@')[1] - self.loopingcalls = loopingcalls.LoopingCalls() - - def do_setup(self, context): - super(NetAppNfsDriver, self).do_setup(context) - self._context = context - na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) - self.zapi_client = None - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - super(NetAppNfsDriver, self).check_for_setup_error() - self.loopingcalls.start_tasks() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval. - - Inheriting class overrides and then explicitly calls this method. - """ - # Add the task that deletes snapshots marked for deletion. - self.loopingcalls.add_task( - self._delete_snapshots_marked_for_deletion, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE) - - # Add the task that logs EMS messages - self.loopingcalls.add_task( - self._handle_ems_logging, - loopingcalls.ONE_HOUR) - - def _delete_snapshots_marked_for_deletion(self): - volume_list = self._get_backing_flexvol_names() - snapshots = self.zapi_client.get_snapshots_marked_for_deletion( - volume_list) - for snapshot in snapshots: - self.zapi_client.delete_snapshot( - snapshot['volume_name'], snapshot['name']) - - def _handle_ems_logging(self): - """Log autosupport messages.""" - raise NotImplementedError() - - def get_pool(self, volume): - """Return pool name where volume resides. - - :param volume: The volume hosted by the driver. - :return: Name of the pool where given volume is hosted. - """ - return volume['provider_location'] - - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume reference - """ - LOG.debug('create_volume on %s', volume['host']) - self._ensure_shares_mounted() - - # get share as pool name - pool_name = volume_utils.extract_host(volume['host'], level='pool') - - if pool_name is None: - msg = _("Pool is not available in the volume host field.") - raise exception.InvalidHost(reason=msg) - - extra_specs = na_utils.get_volume_extra_specs(volume) - - try: - volume['provider_location'] = pool_name - LOG.debug('Using pool %s.', pool_name) - self._do_create_volume(volume) - self._do_qos_for_volume(volume, extra_specs) - model_update = self._get_volume_model_update(volume) or {} - model_update['provider_location'] = volume['provider_location'] - return model_update - except Exception: - LOG.exception("Exception creating vol %(name)s on " - "pool %(pool)s.", - {'name': volume['name'], - 'pool': volume['provider_location']}) - # We need to set this for the model update in order for the - # manager to behave correctly. - volume['provider_location'] = None - - msg = _("Volume %(vol)s could not be created in pool %(pool)s.") - raise exception.VolumeBackendAPIException(data=msg % { - 'vol': volume['name'], 'pool': pool_name}) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - source = { - 'name': snapshot['name'], - 'size': snapshot['volume_size'], - 'id': snapshot['volume_id'], - } - return self._clone_source_to_destination_volume(source, volume) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - source = {'name': src_vref['name'], - 'size': src_vref['size'], - 'id': src_vref['id']} - - return self._clone_source_to_destination_volume(source, volume) - - def _clone_source_to_destination_volume(self, source, destination_volume): - share = self._get_volume_location(source['id']) - - extra_specs = na_utils.get_volume_extra_specs(destination_volume) - - try: - destination_volume['provider_location'] = share - self._clone_with_extension_check( - source, destination_volume) - self._do_qos_for_volume(destination_volume, extra_specs) - - model_update = ( - self._get_volume_model_update(destination_volume) or {}) - model_update['provider_location'] = destination_volume[ - 'provider_location'] - - return model_update - except Exception: - LOG.exception("Exception creating volume %(name)s from source " - "%(source)s on share %(share)s.", - {'name': destination_volume['id'], - 'source': source['name'], - 'share': destination_volume['provider_location']}) - msg = _("Volume %s could not be created on shares.") - raise exception.VolumeBackendAPIException(data=msg % ( - destination_volume['id'])) - - def _clone_with_extension_check(self, source, destination_volume): - source_size = source['size'] - source_id = source['id'] - source_name = source['name'] - destination_volume_size = destination_volume['size'] - self._clone_backing_file_for_volume(source_name, - destination_volume['name'], - source_id) - path = self.local_path(destination_volume) - if self._discover_file_till_timeout(path): - self._set_rw_permissions(path) - if destination_volume_size != source_size: - try: - self.extend_volume(destination_volume, - destination_volume_size) - except Exception: - LOG.error("Resizing %s failed. Cleaning " - "volume.", destination_volume['name']) - self._cleanup_volume_on_failure(destination_volume) - raise exception.CinderException( - _("Resizing clone %s failed.") - % destination_volume['name']) - else: - raise exception.CinderException(_("NFS file %s not discovered.") - % destination_volume['name']) - - def _cleanup_volume_on_failure(self, volume): - LOG.debug('Cleaning up, failed operation on %s', volume['name']) - vol_path = self.local_path(volume) - if os.path.exists(vol_path): - LOG.debug('Found %s, deleting ...', vol_path) - self._delete_file_at_path(vol_path) - else: - LOG.debug('Could not find %s, continuing ...', vol_path) - - def _do_qos_for_volume(self, volume, extra_specs, cleanup=False): - """Set QoS policy on backend from volume type information.""" - raise NotImplementedError() - - def _get_volume_model_update(self, volume): - """Provide any updates necessary for a volume being created/managed.""" - raise NotImplementedError() - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self._clone_backing_file_for_volume(snapshot['volume_name'], - snapshot['name'], - snapshot['volume_id'], - is_snapshot=True) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self._delete_file(snapshot.volume_id, snapshot.name) - - def _delete_file(self, file_id, file_name): - nfs_share = self._get_provider_location(file_id) - - if self._volume_not_present(nfs_share, file_name): - LOG.debug('File %(file_name)s not found when attempting to delete ' - 'from share %(share)s', - {'file_name': file_name, 'share': nfs_share}) - return - - path = self._get_volume_path(nfs_share, file_name) - self._delete(path) - - def _get_volume_location(self, volume_id): - """Returns NFS mount address as :.""" - nfs_server_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - return nfs_server_ip + ':' + export_path - - def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None, - is_snapshot=False, - source_snapshot=None): - """Clone backing file for Cinder volume.""" - raise NotImplementedError() - - def _get_backing_flexvol_names(self): - """Returns backing flexvol names.""" - raise NotImplementedError() - - def _get_flexvol_names_from_hosts(self, hosts): - """Returns a set of flexvol names.""" - raise NotImplementedError() - - def _get_provider_location(self, volume_id): - """Returns provider location for given volume.""" - volume = self.db.volume_get(self._context, volume_id) - return volume.provider_location - - def _get_host_ip(self, volume_id): - """Returns IP address for the given volume.""" - return self._get_provider_location(volume_id).rsplit(':')[0] - - def _get_export_path(self, volume_id): - """Returns NFS export path for the given volume.""" - return self._get_provider_location(volume_id).rsplit(':')[1] - - def _volume_not_present(self, nfs_mount, volume_name): - """Check if volume exists.""" - try: - self._try_execute('ls', self._get_volume_path(nfs_mount, - volume_name)) - except processutils.ProcessExecutionError: - # If the volume isn't present - return True - return False - - def _try_execute(self, *command, **kwargs): - # NOTE(vish): Volume commands can partially fail due to timing, but - # running them a second time on failure will usually - # recover nicely. - tries = 0 - while True: - try: - self._execute(*command, **kwargs) - return True - except processutils.ProcessExecutionError: - tries += 1 - if tries >= self.configuration.num_shell_tries: - raise - LOG.exception("Recovering from a failed execute. " - "Try number %s", tries) - time.sleep(tries ** 2) - - def _get_volume_path(self, nfs_share, volume_name): - """Get volume path. - - Get volume path (local fs path) for given volume name on given nfs - share. - :param nfs_share: string, example 172.18.194.100:/var/nfs - :param volume_name: string, - example volume-91ee65ec-c473-4391-8c09-162b00c68a8c - """ - - return os.path.join(self._get_mount_point_for_share(nfs_share), - volume_name) - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - raise NotImplementedError() - - def get_default_filter_function(self): - """Get the default filter_function string.""" - return self.DEFAULT_FILTER_FUNCTION - - def get_default_goodness_function(self): - """Get the default goodness_function string.""" - return self.DEFAULT_GOODNESS_FUNCTION - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - super(NetAppNfsDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - LOG.info('Copied image to volume %s using regular download.', - volume['id']) - self._register_image_in_cache(volume, image_id) - - def _register_image_in_cache(self, volume, image_id): - """Stores image in the cache.""" - file_name = 'img-cache-%s' % image_id - LOG.info("Registering image in cache %s", file_name) - try: - self._do_clone_rel_img_cache( - volume['name'], file_name, - volume['provider_location'], file_name) - except Exception as e: - LOG.warning('Exception while registering image %(image_id)s' - ' in cache. Exception: %(exc)s', - {'image_id': image_id, 'exc': e}) - - def _find_image_in_cache(self, image_id): - """Finds image in cache and returns list of shares with file name.""" - result = [] - if getattr(self, '_mounted_shares', None): - for share in self._mounted_shares: - dir = self._get_mount_point_for_share(share) - file_name = 'img-cache-%s' % image_id - file_path = '%s/%s' % (dir, file_name) - if os.path.isfile(file_path): - LOG.debug('Found cache file for image %(image_id)s' - ' on share %(share)s', - {'image_id': image_id, 'share': share}) - result.append((share, file_name)) - return result - - def _do_clone_rel_img_cache(self, src, dst, share, cache_file): - """Do clone operation w.r.t image cache file.""" - @utils.synchronized(cache_file, external=True) - def _do_clone(): - dir = self._get_mount_point_for_share(share) - file_path = '%s/%s' % (dir, dst) - if not os.path.exists(file_path): - LOG.info('Cloning from cache to destination %s', dst) - self._clone_backing_file_for_volume(src, dst, volume_id=None, - share=share) - src_path = '%s/%s' % (dir, src) - os.utime(src_path, None) - _do_clone() - - @utils.synchronized('clean_cache') - def _spawn_clean_cache_job(self): - """Spawns a clean task if not running.""" - if getattr(self, 'cleaning', None): - LOG.debug('Image cache cleaning in progress. Returning... ') - return - else: - # Set cleaning to True - self.cleaning = True - t = threading.Timer(0, self._clean_image_cache) - t.start() - - def _clean_image_cache(self): - """Clean the image cache files in cache of space crunch.""" - try: - LOG.debug('Image cache cleaning in progress.') - thres_size_perc_start =\ - self.configuration.thres_avl_size_perc_start - thres_size_perc_stop = \ - self.configuration.thres_avl_size_perc_stop - for share in getattr(self, '_mounted_shares', []): - try: - total_size, total_avl = \ - self._get_capacity_info(share) - avl_percent = int((float(total_avl) / total_size) * 100) - if avl_percent <= thres_size_perc_start: - LOG.info('Cleaning cache for share %s.', share) - eligible_files = self._find_old_cache_files(share) - threshold_size = int( - (thres_size_perc_stop * total_size) / 100) - bytes_to_free = int(threshold_size - total_avl) - LOG.debug('Files to be queued for deletion %s', - eligible_files) - self._delete_files_till_bytes_free( - eligible_files, share, bytes_to_free) - else: - continue - except Exception as e: - LOG.warning('Exception during cache cleaning' - ' %(share)s. Message - %(ex)s', - {'share': share, 'ex': e}) - continue - finally: - LOG.debug('Image cache cleaning done.') - self.cleaning = False - - def _shortlist_del_eligible_files(self, share, old_files): - """Prepares list of eligible files to be deleted from cache.""" - raise NotImplementedError() - - def _find_old_cache_files(self, share): - """Finds the old files in cache.""" - mount_fs = self._get_mount_point_for_share(share) - threshold_minutes = self.configuration.expiry_thres_minutes - cmd = ['find', mount_fs, '-maxdepth', '1', '-name', - 'img-cache*', '-amin', '+%s' % threshold_minutes] - res, _err = self._execute(*cmd, run_as_root=self._execute_as_root) - if res: - old_file_paths = res.strip('\n').split('\n') - mount_fs_len = len(mount_fs) - old_files = [x[mount_fs_len + 1:] for x in old_file_paths] - eligible_files = self._shortlist_del_eligible_files( - share, old_files) - return eligible_files - return [] - - def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0): - """Delete files from disk till bytes are freed or list exhausted.""" - LOG.debug('Bytes to free %s', bytes_to_free) - if file_list and bytes_to_free > 0: - sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True) - mount_fs = self._get_mount_point_for_share(share) - for f in sorted_files: - if f: - file_path = '%s/%s' % (mount_fs, f[0]) - LOG.debug('Delete file path %s', file_path) - - @utils.synchronized(f[0], external=True) - def _do_delete(): - if self._delete_file_at_path(file_path): - return True - return False - - if _do_delete(): - bytes_to_free -= int(f[1]) - if bytes_to_free <= 0: - return - - def _delete_file_at_path(self, path): - """Delete file from disk and return result as boolean.""" - try: - LOG.debug('Deleting file at path %s', path) - cmd = ['rm', '-f', path] - self._execute(*cmd, run_as_root=self._execute_as_root) - return True - except Exception as ex: - LOG.warning('Exception during deleting %s', ex) - return False - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Create a volume efficiently from an existing image. - - image_location is a string whose format depends on the - image service backend in use. The driver should use it - to determine whether cloning is possible. - - Returns a dict of volume properties eg. provider_location, - boolean indicating whether cloning occurred. - """ - image_id = image_meta['id'] - cloned = False - post_clone = False - - extra_specs = na_utils.get_volume_extra_specs(volume) - - try: - cache_result = self._find_image_in_cache(image_id) - if cache_result: - cloned = self._clone_from_cache(volume, image_id, cache_result) - else: - cloned = self._direct_nfs_clone(volume, image_location, - image_id) - if cloned: - self._do_qos_for_volume(volume, extra_specs) - post_clone = self._post_clone_image(volume) - except Exception as e: - msg = e.msg if getattr(e, 'msg', None) else e - LOG.info('Image cloning unsuccessful for image' - ' %(image_id)s. Message: %(msg)s', - {'image_id': image_id, 'msg': msg}) - finally: - cloned = cloned and post_clone - share = volume['provider_location'] if cloned else None - bootable = True if cloned else False - return {'provider_location': share, 'bootable': bootable}, cloned - - def _clone_from_cache(self, volume, image_id, cache_result): - """Clones a copy from image cache.""" - cloned = False - LOG.info('Cloning image %s from cache', image_id) - for res in cache_result: - # Repeat tries in other shares if failed in some - (share, file_name) = res - LOG.debug('Cache share: %s', share) - if (share and - self._is_share_clone_compatible(volume, share)): - try: - self._do_clone_rel_img_cache( - file_name, volume['name'], share, file_name) - cloned = True - volume['provider_location'] = share - break - except Exception: - LOG.warning('Unexpected exception during' - ' image cloning in share %s', share) - return cloned - - def _direct_nfs_clone(self, volume, image_location, image_id): - """Clone directly in nfs share.""" - LOG.info('Checking image clone %s from glance share.', image_id) - cloned = False - image_locations = self._construct_image_nfs_url(image_location) - run_as_root = self._execute_as_root - for loc in image_locations: - share = self._is_cloneable_share(loc) - if share and self._is_share_clone_compatible(volume, share): - LOG.debug('Share is cloneable %s', share) - volume['provider_location'] = share - (__, ___, img_file) = loc.rpartition('/') - dir_path = self._get_mount_point_for_share(share) - img_path = '%s/%s' % (dir_path, img_file) - img_info = image_utils.qemu_img_info(img_path, - run_as_root=run_as_root) - if img_info.file_format == 'raw': - LOG.debug('Image is raw %s', image_id) - self._clone_backing_file_for_volume( - img_file, volume['name'], - volume_id=None, share=share) - cloned = True - break - else: - LOG.info( - 'Image will locally be converted to raw %s', - image_id) - dst = '%s/%s' % (dir_path, volume['name']) - image_utils.convert_image(img_path, dst, 'raw', - run_as_root=run_as_root) - data = image_utils.qemu_img_info(dst, - run_as_root=run_as_root) - if data.file_format != "raw": - raise exception.InvalidResults( - _("Converted to raw, but" - " format is now %s") % data.file_format) - else: - cloned = True - self._register_image_in_cache( - volume, image_id) - break - return cloned - - def _post_clone_image(self, volume): - """Do operations post image cloning.""" - LOG.info('Performing post clone for %s', volume['name']) - vol_path = self.local_path(volume) - if self._discover_file_till_timeout(vol_path): - self._set_rw_permissions(vol_path) - self._resize_image_file(vol_path, volume['size']) - return True - raise exception.InvalidResults( - _("NFS file could not be discovered.")) - - def _resize_image_file(self, path, new_size): - """Resize the image file on share to new size.""" - LOG.debug('Checking file for resize') - if self._is_file_size_equal(path, new_size): - return - else: - LOG.info('Resizing file to %sG', new_size) - image_utils.resize_image(path, new_size, - run_as_root=self._execute_as_root) - if self._is_file_size_equal(path, new_size): - return - else: - raise exception.InvalidResults( - _('Resizing image file failed.')) - - def _is_file_size_equal(self, path, size): - """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path, - run_as_root=self._execute_as_root) - virt_size = data.virtual_size / units.Gi - if virt_size == size: - return True - else: - return False - - @utils.trace_method - def _touch_path_to_refresh(self, path): - try: - # Touching parent directory forces NFS client to flush its cache. - self._execute('touch', path, run_as_root=self._execute_as_root) - except processutils.ProcessExecutionError: - LOG.exception("Failed to touch path %s.", path) - - @utils.trace_method - def _discover_file_till_timeout(self, path, timeout=75): - """Checks if file size at path is equal to size.""" - # Sometimes nfs takes time to discover file - # Retrying in case any unexpected situation occurs - # - # The NFS client by default has a 60 second maximum - # cache time before it refreshes directory information. - # (See man 5 nfs acdirmax.) Allow 75 seconds for - # retries to ensure that this cache has refreshed. - retry_seconds = timeout - sleep_interval = 2 - base_path = os.path.dirname(path) - self._touch_path_to_refresh(base_path) - - while True: - if os.path.exists(path): - return True - else: - if retry_seconds <= 0: - LOG.warning('Discover file retries exhausted.') - return False - else: - time.sleep(sleep_interval) - retry_seconds -= sleep_interval - self._touch_path_to_refresh(base_path) - - def _is_cloneable_share(self, image_location): - """Finds if the image at location is cloneable.""" - conn, dr = self._check_get_nfs_path_segs(image_location) - return self._check_share_in_use(conn, dr) - - def _check_get_nfs_path_segs(self, image_location): - """Checks if the nfs path format is matched. - - WebNFS url format with relative-path is supported. - Accepting all characters in path-names and checking - against the mounted shares which will contain only - allowed path segments. Returns connection and dir details. - """ - conn, dr = None, None - if image_location: - nfs_loc_pattern = \ - (r'^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)' - r'*(/[^\/\\\\]+)$)') - matched = re.match(nfs_loc_pattern, image_location, flags=0) - if not matched: - LOG.debug('Image location not in the' - ' expected format %s', image_location) - else: - conn = matched.group(2) - dr = matched.group(3) or '/' - return conn, dr - - def _share_match_for_ip(self, ip, shares): - """Returns the share that is served by ip. - - Multiple shares can have same dir path but - can be served using different ips. It finds the - share which is served by ip on same nfs server. - """ - raise NotImplementedError() - - def _check_share_in_use(self, conn, dir): - """Checks if share is cinder mounted and returns it.""" - try: - if conn: - host = conn.split(':')[0] - ip = na_utils.resolve_hostname(host) - share_candidates = [] - for sh in self._mounted_shares: - sh_exp = sh.split(':')[1] - if sh_exp == dir: - share_candidates.append(sh) - if share_candidates: - LOG.debug('Found possible share matches %s', - share_candidates) - return self._share_match_for_ip(ip, share_candidates) - except Exception: - LOG.warning("Unexpected exception while " - "short listing used share.") - return None - - def _construct_image_nfs_url(self, image_location): - """Construct direct url for nfs backend. - - It creates direct url from image_location - which is a tuple with direct_url and locations. - Returns array of urls with nfs scheme if nfs store - else returns url. It needs to be verified - by backend before use. - """ - - direct_url, locations = image_location - if not direct_url and not locations: - raise exception.NotFound(_('Image location not present.')) - - urls = [] - if not locations: - urls.append(direct_url) - else: - for location in locations: - if not location['metadata']: - continue - location_type = location['metadata'].get('type') - if not location_type or location_type.lower() != "nfs": - continue - share_location = location['metadata'].get('share_location') - mountpoint = location['metadata'].get('mountpoint') - if not share_location or not mountpoint: - continue - url = location['url'] - url_parse = urllib.parse.urlparse(url) - abs_path = os.path.join(url_parse.netloc, url_parse.path) - rel_path = os.path.relpath(abs_path, mountpoint) - direct_url = "%s/%s" % (share_location, rel_path) - urls.append(direct_url) - return urls - - def extend_volume(self, volume, new_size): - """Extend an existing volume to the new size.""" - - LOG.info('Extending volume %s.', volume['name']) - - try: - path = self.local_path(volume) - self._resize_image_file(path, new_size) - except Exception as err: - exception_msg = (_("Failed to extend volume " - "%(name)s, Error msg: %(msg)s.") % - {'name': volume['name'], - 'msg': six.text_type(err)}) - raise exception.VolumeBackendAPIException(data=exception_msg) - - try: - extra_specs = na_utils.get_volume_extra_specs(volume) - - # Create volume copy with new size for size-dependent QOS specs - volume_copy = copy.copy(volume) - volume_copy['size'] = new_size - - self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) - except Exception as err: - exception_msg = (_("Failed to set QoS for existing volume " - "%(name)s, Error msg: %(msg)s.") % - {'name': volume['name'], - 'msg': six.text_type(err)}) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def _is_share_clone_compatible(self, volume, share): - """Checks if share is compatible with volume to host its clone.""" - raise NotImplementedError() - - def _share_has_space_for_clone(self, share, size_in_gib, thin=True): - """Is there space on the share for a clone given the original size?""" - requested_size = size_in_gib * units.Gi - - total_size, total_available = self._get_capacity_info(share) - - reserved_ratio = self.reserved_percentage / 100.0 - reserved = int(round(total_size * reserved_ratio)) - available = max(0, total_available - reserved) - if thin: - available = available * self.max_over_subscription_ratio - - return available >= requested_size - - def _check_share_can_hold_size(self, share, size): - """Checks if volume can hold image with size.""" - _tot_size, tot_available = self._get_capacity_info( - share) - if tot_available < size: - msg = _("Container size smaller than required file size.") - raise exception.VolumeDriverException(msg) - - def _move_nfs_file(self, source_path, dest_path): - """Moves source to destination.""" - - @utils.synchronized(dest_path, external=True) - def _move_file(src, dst): - if os.path.exists(dst): - LOG.warning("Destination %s already exists.", dst) - return False - self._execute('mv', src, dst, run_as_root=self._execute_as_root) - return True - - try: - return _move_file(source_path, dest_path) - except Exception as e: - LOG.warning('Exception moving file %(src)s. Message - %(e)s', - {'src': source_path, 'e': e}) - return False - - def _get_export_ip_path(self, volume_id=None, share=None): - """Returns export ip and path. - - One of volume id or share is used to return the values. - """ - - if volume_id: - host_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - elif share: - host_ip = share.split(':')[0] - export_path = share.split(':')[1] - else: - raise exception.InvalidInput( - 'A volume ID or share was not specified.') - return host_ip, export_path - - def _get_share_capacity_info(self, nfs_share): - """Returns the share capacity metrics needed by the scheduler.""" - - capacity = dict() - capacity['reserved_percentage'] = self.reserved_percentage - capacity['max_over_subscription_ratio'] = ( - self.max_over_subscription_ratio) - total_size, total_available = self._get_capacity_info(nfs_share) - capacity['total_capacity_gb'] = na_utils.round_down( - total_size / units.Gi) - capacity['free_capacity_gb'] = na_utils.round_down( - total_available / units.Gi) - capacity['provisioned_capacity_gb'] = (round( - capacity['total_capacity_gb'] - capacity['free_capacity_gb'], 2)) - - return capacity - - def _get_capacity_info(self, nfs_share): - """Get total capacity and free capacity in bytes for an nfs share.""" - export_path = nfs_share.rsplit(':', 1)[1] - capacity = self.zapi_client.get_flexvol_capacity( - flexvol_path=export_path) - return capacity['size-total'], capacity['size-available'] - - def _check_volume_type(self, volume, share, file_name, extra_specs): - """Match volume type for share file.""" - - def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): - """Converts the share point name to an IP address - - The volume reference may have a DNS name portion in the share name. - Convert that to an IP address and then restore the entire path. - - :param vol_ref: Driver-specific information used to identify a volume - :return: A volume reference where share is in IP format. - """ - # First strip out share and convert to IP format. - share_split = vol_ref.rsplit(':', 1) - - vol_ref_share_ip = na_utils.resolve_hostname(share_split[0]) - - # Now place back into volume reference. - vol_ref_share = vol_ref_share_ip + ':' + share_split[1] - - return vol_ref_share - - def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): - """Get the NFS share, the NFS mount, and the volume from reference - - Determine the NFS share point, the NFS mount point, and the volume - (with possible path) from the given volume reference. Raise exception - if unsuccessful. - - :param vol_ref: Driver-specific information used to identify a volume - :return: NFS Share, NFS mount, volume path or raise error - """ - # Check that the reference is valid. - if 'source-name' not in vol_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, reason=reason) - vol_ref_name = vol_ref['source-name'] - - self._ensure_shares_mounted() - - # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config - # file, but the admin tries to manage the file located at - # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below - # when searching self._mounted_shares to see if we have an existing - # mount that would work to access the volume-to-be-managed (a string - # comparison is done instead of IP comparison). - vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( - vol_ref_name) - for nfs_share in self._mounted_shares: - cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) - (orig_share, work_share, file_path) = \ - vol_ref_share.partition(cfg_share) - if work_share == cfg_share: - file_path = file_path[1:] # strip off leading path divider - LOG.debug("Found possible share %s; checking mount.", - work_share) - nfs_mount = self._get_mount_point_for_share(nfs_share) - vol_full_path = os.path.join(nfs_mount, file_path) - if os.path.isfile(vol_full_path): - LOG.debug("Found share %(share)s and vol %(path)s on " - "mount %(mnt)s", - {'share': nfs_share, 'path': file_path, - 'mnt': nfs_mount}) - return nfs_share, nfs_mount, file_path - else: - LOG.debug("vol_ref %(ref)s not on share %(share)s.", - {'ref': vol_ref_share, 'share': nfs_share}) - - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, - reason=_('Volume not found on configured storage backend.')) - - def manage_existing(self, volume, existing_vol_ref): - """Manages an existing volume. - - The specified Cinder volume is to be taken into Cinder management. - The driver will verify its existence and then rename it to the - new Cinder volume name. It is expected that the existing volume - reference is an NFS share point and some [/path]/volume; - e.g., 10.10.32.1:/openstack/vol_to_manage - or 10.10.32.1:/openstack/some_directory/vol_to_manage - - :param volume: Cinder volume to manage - :param existing_vol_ref: Driver-specific information used to identify a - volume - """ - # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_path) = \ - self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - - LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s", - {'vol': volume['id'], - 'ref': existing_vol_ref['source-name']}) - - extra_specs = na_utils.get_volume_extra_specs(volume) - - self._check_volume_type(volume, nfs_share, vol_path, extra_specs) - - if vol_path == volume['name']: - LOG.debug("New Cinder volume %s name matches reference name: " - "no need to rename.", volume['name']) - else: - src_vol = os.path.join(nfs_mount, vol_path) - dst_vol = os.path.join(nfs_mount, volume['name']) - try: - shutil.move(src_vol, dst_vol) - LOG.debug("Setting newly managed Cinder volume name to %s", - volume['name']) - self._set_rw_permissions_for_all(dst_vol) - except (OSError, IOError) as err: - exception_msg = (_("Failed to manage existing volume %(name)s," - " because rename operation failed:" - " Error msg: %(msg)s."), - {'name': existing_vol_ref['source-name'], - 'msg': err}) - raise exception.VolumeBackendAPIException(data=exception_msg) - try: - self._do_qos_for_volume(volume, extra_specs, cleanup=False) - except Exception as err: - exception_msg = (_("Failed to set QoS for existing volume " - "%(name)s, Error msg: %(msg)s.") % - {'name': existing_vol_ref['source-name'], - 'msg': six.text_type(err)}) - raise exception.VolumeBackendAPIException(data=exception_msg) - - model_update = self._get_volume_model_update(volume) or {} - model_update['provider_location'] = nfs_share - - return model_update - - def manage_existing_get_size(self, volume, existing_vol_ref): - """Returns the size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_vol_ref: Existing volume to take under management - """ - # Attempt to find NFS share, NFS mount, and volume path from vol_ref. - (nfs_share, nfs_mount, vol_path) = \ - self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) - - try: - LOG.debug("Asked to get size of NFS vol_ref %s.", - existing_vol_ref['source-name']) - - file_path = os.path.join(nfs_mount, vol_path) - file_size = float(utils.get_file_size(file_path)) / units.Gi - vol_size = int(math.ceil(file_size)) - except (OSError, ValueError): - exception_message = (_("Failed to manage existing volume " - "%(name)s, because of error in getting " - "volume size."), - {'name': existing_vol_ref['source-name']}) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.", - {'ref': existing_vol_ref['source-name'], 'size': vol_size}) - - return vol_size - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. A log entry - will be made to notify the Admin that the volume is no longer being - managed. - - :param volume: Cinder volume to unmanage - """ - vol_str = CONF.volume_name_template % volume['id'] - vol_path = os.path.join(volume['provider_location'], vol_str) - LOG.info('Cinder NFS volume with current path "%(cr)s" is ' - 'no longer being managed.', {'cr': vol_path}) - - @utils.trace_method - def create_consistencygroup(self, context, group): - """Driver entry point for creating a consistency group. - - ONTAP does not maintain an actual CG construct. As a result, no - communtication to the backend is necessary for consistency group - creation. - - :return: Hard-coded model update for consistency group model. - """ - model_update = {'status': 'available'} - return model_update - - @utils.trace_method - def delete_consistencygroup(self, context, group, volumes): - """Driver entry point for deleting a consistency group. - - :return: Updated consistency group model and list of volume models - for the volumes that were deleted. - """ - model_update = {'status': 'deleted'} - volumes_model_update = [] - for volume in volumes: - try: - self._delete_file(volume['id'], volume['name']) - volumes_model_update.append( - {'id': volume['id'], 'status': 'deleted'}) - except Exception: - volumes_model_update.append( - {'id': volume['id'], 'status': 'error_deleting'}) - LOG.exception("Volume %(vol)s in the consistency group " - "could not be deleted.", {'vol': volume}) - return model_update, volumes_model_update - - @utils.trace_method - def update_consistencygroup(self, context, group, add_volumes=None, - remove_volumes=None): - """Driver entry point for updating a consistency group. - - Since no actual CG construct is ever created in ONTAP, it is not - necessary to update any metadata on the backend. Since this is a NO-OP, - there is guaranteed to be no change in any of the volumes' statuses. - """ - return None, None, None - - @utils.trace_method - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a Cinder cgsnapshot object. - - The Cinder cgsnapshot object is created by making use of an ONTAP CG - snapshot in order to provide write-order consistency for a set of - backing flexvols. First, a list of the flexvols backing the given - Cinder volumes in the CG is determined. An ONTAP CG snapshot of the - flexvols creates a write-order consistent snapshot of each backing - flexvol. For each Cinder volume in the CG, it is then necessary to - clone its volume from the ONTAP CG snapshot. The naming convention - used to create the clones indicates the clone's role as a Cinder - snapshot and its inclusion in a Cinder CG snapshot. The ONTAP CG - snapshots, of each backing flexvol, are deleted after the cloning - operation is completed. - - :return: An implicit update for the cgsnapshot and snapshot models that - is then used by the manager to set the models to available. - """ - - hosts = [snapshot['volume']['host'] for snapshot in snapshots] - flexvols = self._get_flexvol_names_from_hosts(hosts) - - # Create snapshot for backing flexvol - self.zapi_client.create_cg_snapshot(flexvols, cgsnapshot['id']) - - # Start clone process for snapshot files - for snapshot in snapshots: - self._clone_backing_file_for_volume( - snapshot['volume']['name'], snapshot['name'], - snapshot['volume']['id'], source_snapshot=cgsnapshot['id']) - - # Delete backing flexvol snapshots - for flexvol_name in flexvols: - try: - self.zapi_client.wait_for_busy_snapshot( - flexvol_name, cgsnapshot['id']) - self.zapi_client.delete_snapshot( - flexvol_name, cgsnapshot['id']) - except exception.SnapshotIsBusy: - self.zapi_client.mark_snapshot_for_deletion( - flexvol_name, cgsnapshot['id']) - - return None, None - - @utils.trace_method - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Delete files backing each snapshot in the cgsnapshot.""" - raise NotImplementedError() - - @utils.trace_method - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates a CG from a either a cgsnapshot or group of cinder vols. - - :return: An implicit update for the volumes model that is - interpreted by the manager as a successful operation. - """ - LOG.debug("VOLUMES %s ", [dict(vol) for vol in volumes]) - model_update = None - volumes_model_update = [] - - if cgsnapshot: - vols = zip(volumes, snapshots) - - for volume, snapshot in vols: - update = self.create_volume_from_snapshot( - volume, snapshot) - update['id'] = volume['id'] - volumes_model_update.append(update) - - elif source_cg and source_vols: - hosts = [source_vol['host'] for source_vol in source_vols] - flexvols = self._get_flexvol_names_from_hosts(hosts) - - # Create snapshot for backing flexvol - snapshot_name = 'snapshot-temp-' + source_cg['id'] - self.zapi_client.create_cg_snapshot(flexvols, snapshot_name) - - # Start clone process for new volumes - vols = zip(volumes, source_vols) - for volume, source_vol in vols: - self._clone_backing_file_for_volume( - source_vol['name'], volume['name'], - source_vol['id'], source_snapshot=snapshot_name) - volume_model_update = ( - self._get_volume_model_update(volume) or {}) - volume_model_update.update({ - 'id': volume['id'], - 'provider_location': source_vol['provider_location'], - }) - volumes_model_update.append(volume_model_update) - - # Delete backing flexvol snapshots - for flexvol_name in flexvols: - self.zapi_client.wait_for_busy_snapshot( - flexvol_name, snapshot_name) - self.zapi_client.delete_snapshot(flexvol_name, snapshot_name) - else: - LOG.error("Unexpected set of parameters received when " - "creating consistency group from source.") - model_update = {} - model_update['status'] = 'error' - - return model_update, volumes_model_update diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py deleted file mode 100644 index 0d0d0a3ea..000000000 --- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py +++ /dev/null @@ -1,746 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Ben Swartzlander. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2014 Alex Meade. All rights reserved. -# Copyright (c) 2014 Bob Callaway. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp NFS storage. -""" - -import os -import uuid - -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume.drivers.netapp.dataontap import nfs_base -from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode -from cinder.volume.drivers.netapp.dataontap.utils import capabilities -from cinder.volume.drivers.netapp.dataontap.utils import data_motion -from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls -from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils - - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -@six.add_metaclass(utils.TraceWrapperWithABCMetaclass) -class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver, - data_motion.DataMotionMixin): - """NetApp NFS driver for Data ONTAP (Cluster-mode).""" - - REQUIRED_CMODE_FLAGS = ['netapp_vserver'] - - def __init__(self, *args, **kwargs): - super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) - self.driver_name = 'NetApp_NFS_Cluster_direct' - self.driver_mode = 'cluster' - self.configuration.append_config_values(na_opts.netapp_cluster_opts) - self.failed_over_backend_name = kwargs.get('active_backend_id') - self.failed_over = self.failed_over_backend_name is not None - self.replication_enabled = ( - True if self.get_replication_backend_names( - self.configuration) else False) - - def do_setup(self, context): - """Do the customized set up on client for cluster mode.""" - super(NetAppCmodeNfsDriver, self).do_setup(context) - na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) - - # cDOT API client - self.zapi_client = dot_utils.get_client_for_backend( - self.failed_over_backend_name or self.backend_name) - self.vserver = self.zapi_client.vserver - - # Performance monitoring library - self.perf_library = perf_cmode.PerformanceCmodeLibrary( - self.zapi_client) - - # Storage service catalog - self.ssc_library = capabilities.CapabilitiesLibrary( - 'nfs', self.vserver, self.zapi_client, self.configuration) - - def _update_zapi_client(self, backend_name): - """Set cDOT API client for the specified config backend stanza name.""" - - self.zapi_client = dot_utils.get_client_for_backend(backend_name) - self.vserver = self.zapi_client.vserver - self.ssc_library._update_for_failover(self.zapi_client, - self._get_flexvol_to_pool_map()) - ssc = self.ssc_library.get_ssc() - self.perf_library._update_for_failover(self.zapi_client, ssc) - - @utils.trace_method - def check_for_setup_error(self): - """Check that the driver is working and can communicate.""" - self.ssc_library.check_api_permissions() - self._add_looping_tasks() - super(NetAppCmodeNfsDriver, self).check_for_setup_error() - - def _add_looping_tasks(self): - """Add tasks that need to be executed at a fixed interval.""" - - # Note(cknight): Run the update once in the current thread to prevent a - # race with the first invocation of _update_volume_stats. - self._update_ssc() - - # Add the task that updates the slow-changing storage service catalog - self.loopingcalls.add_task(self._update_ssc, - loopingcalls.ONE_HOUR, - loopingcalls.ONE_HOUR) - - # Add the task that harvests soft-deleted QoS policy groups. - self.loopingcalls.add_task( - self.zapi_client.remove_unused_qos_policy_groups, - loopingcalls.ONE_MINUTE, - loopingcalls.ONE_MINUTE) - - # Add the task that runs other housekeeping tasks, such as deletion - # of previously soft-deleted storage artifacts. - self.loopingcalls.add_task( - self._handle_housekeeping_tasks, - loopingcalls.TEN_MINUTES, - 0) - - super(NetAppCmodeNfsDriver, self)._add_looping_tasks() - - def _handle_ems_logging(self): - """Log autosupport messages.""" - - base_ems_message = dot_utils.build_ems_log_message_0( - self.driver_name, self.app_version, self.driver_mode) - self.zapi_client.send_ems_log_message(base_ems_message) - - pool_ems_message = dot_utils.build_ems_log_message_1( - self.driver_name, self.app_version, self.vserver, - self._get_backing_flexvol_names(), []) - self.zapi_client.send_ems_log_message(pool_ems_message) - - def _handle_housekeeping_tasks(self): - """Handle various cleanup activities.""" - - # Harvest soft-deleted QoS policy groups - self.zapi_client.remove_unused_qos_policy_groups() - - active_backend = self.failed_over_backend_name or self.backend_name - - LOG.debug("Current service state: Replication enabled: %(" - "replication)s. Failed-Over: %(failed)s. Active Backend " - "ID: %(active)s", - { - 'replication': self.replication_enabled, - 'failed': self.failed_over, - 'active': active_backend, - }) - - # Create pool mirrors if whole-backend replication configured - if self.replication_enabled and not self.failed_over: - self.ensure_snapmirrors( - self.configuration, self.backend_name, - self.ssc_library.get_ssc_flexvol_names()) - - def _do_qos_for_volume(self, volume, extra_specs, cleanup=True): - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume, extra_specs) - self.zapi_client.provision_qos_policy_group(qos_policy_group_info) - self._set_qos_policy_group_on_volume(volume, qos_policy_group_info) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Setting QoS for %s failed", volume['id']) - if cleanup: - LOG.debug("Cleaning volume %s", volume['id']) - self._cleanup_volume_on_failure(volume) - - def _get_volume_model_update(self, volume): - """Provide model updates for a volume being created.""" - if self.replication_enabled: - return {'replication_status': fields.ReplicationStatus.ENABLED} - - def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info): - if qos_policy_group_info is None: - return - qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info( - qos_policy_group_info) - if qos_policy_group_name is None: - return - target_path = '%s' % (volume['name']) - share = volume_utils.extract_host(volume['host'], level='pool') - export_path = share.split(':')[1] - flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver, - export_path) - self.zapi_client.file_assign_qos(flex_vol_name, - qos_policy_group_name, - target_path) - - def _clone_backing_file_for_volume(self, volume_name, clone_name, - volume_id, share=None, - is_snapshot=False, - source_snapshot=None): - """Clone backing file for Cinder volume.""" - (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share) - self.zapi_client.clone_file(exp_volume, volume_name, clone_name, - vserver, is_snapshot=is_snapshot) - - def _get_vserver_and_exp_vol(self, volume_id=None, share=None): - """Gets the vserver and export volume for share.""" - (host_ip, export_path) = self._get_export_ip_path(volume_id, share) - ifs = self.zapi_client.get_if_info_by_ip(host_ip) - vserver = ifs[0].get_child_content('vserver') - exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver, - export_path) - return vserver, exp_volume - - def _update_volume_stats(self): - """Retrieve stats info from vserver.""" - - LOG.debug('Updating volume stats') - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.driver_name - data['vendor_name'] = 'NetApp' - data['driver_version'] = self.VERSION - data['storage_protocol'] = 'nfs' - data['pools'] = self._get_pool_stats( - filter_function=self.get_filter_function(), - goodness_function=self.get_goodness_function()) - data['sparse_copy_volume'] = True - - # Used for service state report - data['replication_enabled'] = self.replication_enabled - - self._spawn_clean_cache_job() - self._stats = data - - def _get_pool_stats(self, filter_function=None, goodness_function=None): - """Retrieve pool (Data ONTAP flexvol) stats. - - Pool statistics are assembled from static driver capabilities, the - Storage Service Catalog of flexvol attributes, and real-time capacity - and controller utilization metrics. The pool name is the NFS share - path. - """ - - pools = [] - - ssc = self.ssc_library.get_ssc() - if not ssc: - return pools - - # Get up-to-date node utilization metrics just once - self.perf_library.update_performance_cache(ssc) - - # Get up-to-date aggregate capacities just once - aggregates = self.ssc_library.get_ssc_aggregates() - aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates) - - for ssc_vol_name, ssc_vol_info in ssc.items(): - - pool = dict() - - # Add storage service catalog data - pool.update(ssc_vol_info) - - # Add driver capabilities and config info - pool['QoS_support'] = True - pool['consistencygroup_support'] = True - pool['multiattach'] = False - - # Add up-to-date capacity info - nfs_share = ssc_vol_info['pool_name'] - capacity = self._get_share_capacity_info(nfs_share) - pool.update(capacity) - - dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent( - ssc_vol_name) - pool['netapp_dedupe_used_percent'] = na_utils.round_down( - dedupe_used) - - aggregate_name = ssc_vol_info.get('netapp_aggregate') - aggr_capacity = aggr_capacities.get(aggregate_name, {}) - pool['netapp_aggregate_used_percent'] = aggr_capacity.get( - 'percent-used', 0) - - # Add utilization data - utilization = self.perf_library.get_node_utilization_for_pool( - ssc_vol_name) - pool['utilization'] = na_utils.round_down(utilization) - pool['filter_function'] = filter_function - pool['goodness_function'] = goodness_function - - # Add replication capabilities/stats - pool.update( - self.get_replication_backend_stats(self.configuration)) - - pools.append(pool) - - return pools - - def _update_ssc(self): - """Refresh the storage service catalog with the latest set of pools.""" - - self._ensure_shares_mounted() - self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) - - def _get_flexvol_to_pool_map(self): - """Get the flexvols that back all mounted shares. - - The map is of the format suitable for seeding the storage service - catalog: { : {'pool_name': }} - """ - - pools = {} - vserver_addresses = self.zapi_client.get_operational_lif_addresses() - - for share in self._mounted_shares: - - host = share.split(':')[0] - junction_path = share.split(':')[1] - address = na_utils.resolve_hostname(host) - - if address not in vserver_addresses: - LOG.warning('Address not found for NFS share %s.', share) - continue - - try: - flexvol = self.zapi_client.get_flexvol( - flexvol_path=junction_path) - pools[flexvol['name']] = {'pool_name': share} - except exception.VolumeBackendAPIException: - LOG.exception('Flexvol not found for NFS share %s.', share) - - return pools - - def _shortlist_del_eligible_files(self, share, old_files): - """Prepares list of eligible files to be deleted from cache.""" - file_list = [] - (vserver, exp_volume) = self._get_vserver_and_exp_vol( - volume_id=None, share=share) - for old_file in old_files: - path = '/vol/%s/%s' % (exp_volume, old_file) - u_bytes = self.zapi_client.get_file_usage(path, vserver) - file_list.append((old_file, u_bytes)) - LOG.debug('Shortlisted files eligible for deletion: %s', file_list) - return file_list - - def _share_match_for_ip(self, ip, shares): - """Returns the share that is served by ip. - - Multiple shares can have same dir path but - can be served using different ips. It finds the - share which is served by ip on same nfs server. - """ - ip_vserver = self._get_vserver_for_ip(ip) - if ip_vserver and shares: - for share in shares: - ip_sh = share.split(':')[0] - sh_vserver = self._get_vserver_for_ip(ip_sh) - if sh_vserver == ip_vserver: - LOG.debug('Share match found for ip %s', ip) - return share - LOG.debug('No share match found for ip %s', ip) - return None - - def _get_vserver_for_ip(self, ip): - """Get vserver for the mentioned ip.""" - try: - ifs = self.zapi_client.get_if_info_by_ip(ip) - vserver = ifs[0].get_child_content('vserver') - return vserver - except Exception: - return None - - def _is_share_clone_compatible(self, volume, share): - """Checks if share is compatible with volume to host its clone.""" - flexvol_name = self._get_flexvol_name_for_share(share) - thin = self._is_volume_thin_provisioned(flexvol_name) - return ( - self._share_has_space_for_clone(share, volume['size'], thin) and - self._is_share_vol_type_match(volume, share, flexvol_name) - ) - - def _is_volume_thin_provisioned(self, flexvol_name): - """Checks if a flexvol is thin (sparse file or thin provisioned).""" - ssc_info = self.ssc_library.get_ssc_for_flexvol(flexvol_name) - return ssc_info.get('thin_provisioning_support') or False - - def _is_share_vol_type_match(self, volume, share, flexvol_name): - """Checks if share matches volume type.""" - LOG.debug("Found volume %(vol)s for share %(share)s.", - {'vol': flexvol_name, 'share': share}) - extra_specs = na_utils.get_volume_extra_specs(volume) - flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs( - extra_specs) - return flexvol_name in flexvol_names - - def _get_flexvol_name_for_share(self, nfs_share): - """Queries the SSC for the flexvol containing an NFS share.""" - ssc = self.ssc_library.get_ssc() - for ssc_vol_name, ssc_vol_info in ssc.items(): - if nfs_share == ssc_vol_info.get('pool_name'): - return ssc_vol_name - return None - - @utils.trace_method - def delete_volume(self, volume): - """Deletes a logical volume.""" - self._delete_backing_file_for_volume(volume) - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume) - self.zapi_client.mark_qos_policy_group_for_deletion( - qos_policy_group_info) - except Exception: - # Don't blow up here if something went wrong de-provisioning the - # QoS policy for the volume. - pass - - def _delete_backing_file_for_volume(self, volume): - """Deletes file on nfs share that backs a cinder volume.""" - try: - LOG.debug('Deleting backing file for volume %s.', volume['id']) - self._delete_file(volume['id'], volume['name']) - except Exception: - LOG.exception('Could not delete volume %s on backend, ' - 'falling back to exec of "rm" command.', - volume['id']) - try: - super(NetAppCmodeNfsDriver, self).delete_volume(volume) - except Exception: - LOG.exception('Exec of "rm" command on backing file for ' - '%s was unsuccessful.', volume['id']) - - def _delete_file(self, file_id, file_name): - (_vserver, flexvol) = self._get_export_ip_path(volume_id=file_id) - path_on_backend = '/vol' + flexvol + '/' + file_name - LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on ' - 'backend.', {'path': path_on_backend, 'file_id': file_id}) - self.zapi_client.delete_file(path_on_backend) - - @utils.trace_method - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self._delete_backing_file_for_snapshot(snapshot) - - def _delete_backing_file_for_snapshot(self, snapshot): - """Deletes file on nfs share that backs a cinder volume.""" - try: - LOG.debug('Deleting backing file for snapshot %s.', snapshot['id']) - self._delete_file(snapshot['volume_id'], snapshot['name']) - except Exception: - LOG.exception('Could not delete snapshot %s on backend, ' - 'falling back to exec of "rm" command.', - snapshot['id']) - try: - # delete_file_from_share - super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) - except Exception: - LOG.exception('Exec of "rm" command on backing file for' - ' %s was unsuccessful.', snapshot['id']) - - @utils.trace_method - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - copy_success = False - try: - major, minor = self.zapi_client.get_ontapi_version() - col_path = self.configuration.netapp_copyoffload_tool_path - # Search the local image cache before attempting copy offload - cache_result = self._find_image_in_cache(image_id) - if cache_result: - copy_success = self._copy_from_cache(volume, image_id, - cache_result) - if copy_success: - LOG.info('Copied image %(img)s to volume %(vol)s ' - 'using local image cache.', - {'img': image_id, 'vol': volume['id']}) - # Image cache was not present, attempt copy offload workflow - if (not copy_success and col_path and - major == 1 and minor >= 20): - LOG.debug('No result found in image cache') - self._copy_from_img_service(context, volume, image_service, - image_id) - LOG.info('Copied image %(img)s to volume %(vol)s using' - ' copy offload workflow.', - {'img': image_id, 'vol': volume['id']}) - copy_success = True - except Exception: - LOG.exception('Copy offload workflow unsuccessful.') - finally: - if not copy_success: - super(NetAppCmodeNfsDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - - def _get_ip_verify_on_cluster(self, host): - """Verifies if host on same cluster and returns ip.""" - ip = na_utils.resolve_hostname(host) - vserver = self._get_vserver_for_ip(ip) - if not vserver: - raise exception.NotFound(_("Unable to locate an SVM that is " - "managing the IP address '%s'") % ip) - return ip - - def _copy_from_cache(self, volume, image_id, cache_result): - """Try copying image file_name from cached file_name.""" - LOG.debug("Trying copy from cache using copy offload.") - copied = False - cache_copy, found_local = self._find_image_location(cache_result, - volume['id']) - - try: - if found_local: - (nfs_share, file_name) = cache_copy - self._clone_file_dst_exists( - nfs_share, file_name, volume['name'], dest_exists=True) - LOG.debug("Copied image from cache to volume %s using " - "cloning.", volume['id']) - copied = True - elif (cache_copy and - self.configuration.netapp_copyoffload_tool_path): - self._copy_from_remote_cache(volume, image_id, cache_copy) - copied = True - - if copied: - self._post_clone_image(volume) - - except Exception: - LOG.exception('Error in workflow copy from cache.') - return copied - - def _find_image_location(self, cache_result, volume_id): - """Finds the location of a cached image. - - Returns image location local to the NFS share, that matches the - volume_id, if it exists. Otherwise returns the last entry in - cache_result or None if cache_result is empty. - """ - - found_local_copy = False - cache_copy = None - provider_location = self._get_provider_location(volume_id) - for res in cache_result: - (share, file_name) = res - if share == provider_location: - cache_copy = res - found_local_copy = True - break - else: - cache_copy = res - return cache_copy, found_local_copy - - def _copy_from_remote_cache(self, volume, image_id, cache_copy): - """Copies the remote cached image to the provided volume. - - Executes the copy offload binary which copies the cached image to - the destination path of the provided volume. Also registers the new - copy of the image as a cached image. - """ - - (nfs_share, file_name) = cache_copy - col_path = self.configuration.netapp_copyoffload_tool_path - src_ip, src_path = self._get_source_ip_and_path(nfs_share, file_name) - dest_ip, dest_path = self._get_destination_ip_and_path(volume) - - # Always run copy offload as regular user, it's sufficient - # and rootwrap doesn't allow copy offload to run as root anyways. - self._execute(col_path, src_ip, dest_ip, src_path, dest_path, - run_as_root=False, check_exit_code=0) - self._register_image_in_cache(volume, image_id) - LOG.debug("Copied image from cache to volume %s using copy offload.", - volume['id']) - - def _get_source_ip_and_path(self, nfs_share, file_name): - src_ip = self._get_ip_verify_on_cluster(nfs_share.split(':')[0]) - src_path = os.path.join(nfs_share.split(':')[1], file_name) - return src_ip, src_path - - def _get_destination_ip_and_path(self, volume): - dest_ip = self._get_ip_verify_on_cluster( - self._get_host_ip(volume['id'])) - dest_path = os.path.join(self._get_export_path( - volume['id']), volume['name']) - return dest_ip, dest_path - - def _clone_file_dst_exists(self, share, src_name, dst_name, - dest_exists=False): - """Clone file even if dest exists.""" - (vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share) - self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver, - dest_exists=dest_exists) - - def _copy_from_img_service(self, context, volume, image_service, - image_id): - """Copies from the image service using copy offload.""" - LOG.debug("Trying copy from image service using copy offload.") - image_loc = image_service.get_location(context, image_id) - locations = self._construct_image_nfs_url(image_loc) - src_ip = None - selected_loc = None - # this will match the first location that has a valid IP on cluster - for location in locations: - conn, dr = self._check_get_nfs_path_segs(location) - if conn: - try: - src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0]) - selected_loc = location - break - except exception.NotFound: - pass - if src_ip is None: - raise exception.NotFound(_("Source host details not found.")) - (__, ___, img_file) = selected_loc.rpartition('/') - src_path = os.path.join(dr, img_file) - dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip( - volume['id'])) - # tmp file is required to deal with img formats - tmp_img_file = six.text_type(uuid.uuid4()) - col_path = self.configuration.netapp_copyoffload_tool_path - img_info = image_service.show(context, image_id) - dst_share = self._get_provider_location(volume['id']) - self._check_share_can_hold_size(dst_share, img_info['size']) - run_as_root = self._execute_as_root - - dst_dir = self._get_mount_point_for_share(dst_share) - dst_img_local = os.path.join(dst_dir, tmp_img_file) - try: - # If src and dst share not equal - if (('%s:%s' % (src_ip, dr)) != - ('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))): - dst_img_serv_path = os.path.join( - self._get_export_path(volume['id']), tmp_img_file) - # Always run copy offload as regular user, it's sufficient - # and rootwrap doesn't allow copy offload to run as root - # anyways. - self._execute(col_path, src_ip, dst_ip, src_path, - dst_img_serv_path, run_as_root=False, - check_exit_code=0) - else: - self._clone_file_dst_exists(dst_share, img_file, tmp_img_file) - self._discover_file_till_timeout(dst_img_local, timeout=120) - LOG.debug('Copied image %(img)s to tmp file %(tmp)s.', - {'img': image_id, 'tmp': tmp_img_file}) - dst_img_cache_local = os.path.join(dst_dir, - 'img-cache-%s' % image_id) - if img_info['disk_format'] == 'raw': - LOG.debug('Image is raw %s.', image_id) - self._clone_file_dst_exists(dst_share, tmp_img_file, - volume['name'], dest_exists=True) - self._move_nfs_file(dst_img_local, dst_img_cache_local) - LOG.debug('Copied raw image %(img)s to volume %(vol)s.', - {'img': image_id, 'vol': volume['id']}) - else: - LOG.debug('Image will be converted to raw %s.', image_id) - img_conv = six.text_type(uuid.uuid4()) - dst_img_conv_local = os.path.join(dst_dir, img_conv) - - # Checking against image size which is approximate check - self._check_share_can_hold_size(dst_share, img_info['size']) - try: - image_utils.convert_image(dst_img_local, - dst_img_conv_local, 'raw', - run_as_root=run_as_root) - data = image_utils.qemu_img_info(dst_img_conv_local, - run_as_root=run_as_root) - if data.file_format != "raw": - raise exception.InvalidResults( - _("Converted to raw, but format is now %s.") - % data.file_format) - else: - self._clone_file_dst_exists(dst_share, img_conv, - volume['name'], - dest_exists=True) - self._move_nfs_file(dst_img_conv_local, - dst_img_cache_local) - LOG.debug('Copied locally converted raw image' - ' %(img)s to volume %(vol)s.', - {'img': image_id, 'vol': volume['id']}) - finally: - if os.path.exists(dst_img_conv_local): - self._delete_file_at_path(dst_img_conv_local) - self._post_clone_image(volume) - finally: - if os.path.exists(dst_img_local): - self._delete_file_at_path(dst_img_local) - - @utils.trace_method - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. A log entry - will be made to notify the Admin that the volume is no longer being - managed. - - :param volume: Cinder volume to unmanage - """ - try: - qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( - volume) - self.zapi_client.mark_qos_policy_group_for_deletion( - qos_policy_group_info) - except Exception: - # Unmanage even if there was a problem deprovisioning the - # associated qos policy group. - pass - - super(NetAppCmodeNfsDriver, self).unmanage(volume) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover a backend to a secondary replication target.""" - - return self._failover_host(volumes, secondary_id=secondary_id) - - def _get_backing_flexvol_names(self): - """Returns a list of backing flexvol names.""" - return self.ssc_library.get_ssc().keys() - - def _get_flexvol_names_from_hosts(self, hosts): - """Returns a set of flexvol names.""" - flexvols = set() - ssc = self.ssc_library.get_ssc() - - for host in hosts: - pool_name = volume_utils.extract_host(host, level='pool') - - for flexvol_name, ssc_volume_data in ssc.items(): - if ssc_volume_data['pool_name'] == pool_name: - flexvols.add(flexvol_name) - - return flexvols - - @utils.trace_method - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Delete files backing each snapshot in the cgsnapshot. - - :return: An implicit update of snapshot models that the manager will - interpret and subsequently set the model state to deleted. - """ - for snapshot in snapshots: - self._delete_backing_file_for_snapshot(snapshot) - LOG.debug("Snapshot %s deletion successful", snapshot['name']) - - return None, None diff --git a/cinder/volume/drivers/netapp/dataontap/performance/__init__.py b/cinder/volume/drivers/netapp/dataontap/performance/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py deleted file mode 100644 index 00f299c98..000000000 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_7mode.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Performance metrics functions and cache for NetApp 7-mode Data ONTAP systems. -""" - -from oslo_log import log as logging - -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.performance import perf_base - - -LOG = logging.getLogger(__name__) - - -class Performance7modeLibrary(perf_base.PerformanceLibrary): - - def __init__(self, zapi_client): - super(Performance7modeLibrary, self).__init__(zapi_client) - - self.performance_counters = [] - self.utilization = perf_base.DEFAULT_UTILIZATION - self.node_name = self.zapi_client.get_system_name() - - def _init_counter_info(self): - """Set a few counter names based on Data ONTAP version.""" - - super(Performance7modeLibrary, self)._init_counter_info() - - if self.zapi_client.features.SYSTEM_METRICS: - self.system_object_name = 'system' - try: - self.avg_processor_busy_base_counter_name = ( - self._get_base_counter_name('system', - 'avg_processor_busy')) - except netapp_api.NaApiError: - self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' - LOG.exception('Could not get performance base counter ' - 'name. Performance-based scheduler ' - 'functions may not be available.') - - def update_performance_cache(self): - """Called periodically to update node utilization metrics.""" - - # Nothing to do on older systems - if not self.zapi_client.features.SYSTEM_METRICS: - return - - # Get new performance counters and save only the last 10 - counters = self._get_node_utilization_counters() - if not counters: - return - - self.performance_counters.append(counters) - self.performance_counters = self.performance_counters[-10:] - - # Update utilization using newest & oldest sample - if len(self.performance_counters) < 2: - self.utilization = perf_base.DEFAULT_UTILIZATION - else: - self.utilization = self._get_node_utilization( - self.performance_counters[0], self.performance_counters[-1], - self.node_name) - - def get_node_utilization(self): - """Get the node utilization, if available.""" - - return self.utilization - - def _get_node_utilization_counters(self): - """Get all performance counters for calculating node utilization.""" - - try: - return (self._get_node_utilization_system_counters() + - self._get_node_utilization_wafl_counters() + - self._get_node_utilization_processor_counters()) - except netapp_api.NaApiError: - LOG.exception('Could not get utilization counters from node ' - '%s', self.node_name) - return None - - def _get_node_utilization_system_counters(self): - """Get the system counters for calculating node utilization.""" - - system_instance_names = ( - self.zapi_client.get_performance_instance_names( - self.system_object_name)) - - system_counter_names = [ - 'avg_processor_busy', - self.avg_processor_busy_base_counter_name, - ] - if 'cpu_elapsed_time1' in system_counter_names: - system_counter_names.append('cpu_elapsed_time') - - system_counters = self.zapi_client.get_performance_counters( - self.system_object_name, system_instance_names, - system_counter_names) - - return system_counters - - def _get_node_utilization_wafl_counters(self): - """Get the WAFL counters for calculating node utilization.""" - - wafl_instance_names = self.zapi_client.get_performance_instance_names( - 'wafl') - - wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] - wafl_counters = self.zapi_client.get_performance_counters( - 'wafl', wafl_instance_names, wafl_counter_names) - - # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] - for counter in wafl_counters: - if 'cp_phase_times' in counter: - self._expand_performance_array( - 'wafl', 'cp_phase_times', counter) - - return wafl_counters - - def _get_node_utilization_processor_counters(self): - """Get the processor counters for calculating node utilization.""" - - processor_instance_names = ( - self.zapi_client.get_performance_instance_names('processor')) - - processor_counter_names = ['domain_busy', 'processor_elapsed_time'] - processor_counters = self.zapi_client.get_performance_counters( - 'processor', processor_instance_names, processor_counter_names) - - # Expand array data so we can use processor:domain_busy[kahuna] - for counter in processor_counters: - if 'domain_busy' in counter: - self._expand_performance_array( - 'processor', 'domain_busy', counter) - - return processor_counters diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py deleted file mode 100644 index f88a9843a..000000000 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Performance metrics functions and cache for NetApp systems. -""" - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ - - -LOG = logging.getLogger(__name__) -DEFAULT_UTILIZATION = 50 - - -class PerformanceLibrary(object): - - def __init__(self, zapi_client): - - self.zapi_client = zapi_client - self._init_counter_info() - - def _init_counter_info(self): - """Set a few counter names based on Data ONTAP version.""" - - self.system_object_name = None - self.avg_processor_busy_base_counter_name = None - - def _get_node_utilization(self, counters_t1, counters_t2, node_name): - """Get node utilization from two sets of performance counters.""" - - try: - # Time spent in the single-threaded Kahuna domain - kahuna_percent = self._get_kahuna_utilization(counters_t1, - counters_t2) - - # If Kahuna is using >60% of the CPU, the controller is fully busy - if kahuna_percent > 60: - return 100.0 - - # Average CPU busyness across all processors - avg_cpu_percent = 100.0 * self._get_average_cpu_utilization( - counters_t1, counters_t2) - - # Total Consistency Point (CP) time - total_cp_time_msec = self._get_total_consistency_point_time( - counters_t1, counters_t2) - - # Time spent in CP Phase 2 (buffer flush) - p2_flush_time_msec = self._get_consistency_point_p2_flush_time( - counters_t1, counters_t2) - - # Wall-clock time between the two counter sets - poll_time_msec = self._get_total_time(counters_t1, - counters_t2, - 'total_cp_msecs') - - # If two polls happened in quick succession, use CPU utilization - if total_cp_time_msec == 0 or poll_time_msec == 0: - return max(min(100.0, avg_cpu_percent), 0) - - # Adjusted Consistency Point time - adjusted_cp_time_msec = self._get_adjusted_consistency_point_time( - total_cp_time_msec, p2_flush_time_msec) - adjusted_cp_percent = (100.0 * - adjusted_cp_time_msec / poll_time_msec) - - # Utilization is the greater of CPU busyness & CP time - node_utilization = max(avg_cpu_percent, adjusted_cp_percent) - return max(min(100.0, node_utilization), 0) - - except Exception: - LOG.exception('Could not calculate node utilization for ' - 'node %s.', node_name) - return DEFAULT_UTILIZATION - - def _get_kahuna_utilization(self, counters_t1, counters_t2): - """Get time spent in the single-threaded Kahuna domain.""" - - # Note(cknight): Because Kahuna is single-threaded, running only on - # one CPU at a time, we can safely sum the Kahuna CPU usage - # percentages across all processors in a node. - return sum(self._get_performance_counter_average_multi_instance( - counters_t1, counters_t2, 'domain_busy:kahuna', - 'processor_elapsed_time')) * 100.0 - - def _get_average_cpu_utilization(self, counters_t1, counters_t2): - """Get average CPU busyness across all processors.""" - - return self._get_performance_counter_average( - counters_t1, counters_t2, 'avg_processor_busy', - self.avg_processor_busy_base_counter_name) - - def _get_total_consistency_point_time(self, counters_t1, counters_t2): - """Get time spent in Consistency Points in msecs.""" - - return float(self._get_performance_counter_delta( - counters_t1, counters_t2, 'total_cp_msecs')) - - def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2): - """Get time spent in CP Phase 2 (buffer flush) in msecs.""" - - return float(self._get_performance_counter_delta( - counters_t1, counters_t2, 'cp_phase_times:p2_flush')) - - def _get_total_time(self, counters_t1, counters_t2, counter_name): - """Get wall clock time between two successive counters in msecs.""" - - timestamp_t1 = float(self._find_performance_counter_timestamp( - counters_t1, counter_name)) - timestamp_t2 = float(self._find_performance_counter_timestamp( - counters_t2, counter_name)) - return (timestamp_t2 - timestamp_t1) * 1000.0 - - def _get_adjusted_consistency_point_time(self, total_cp_time, - p2_flush_time): - """Get adjusted CP time by limiting CP phase 2 flush time to 20%.""" - - return (total_cp_time - p2_flush_time) * 1.20 - - def _get_performance_counter_delta(self, counters_t1, counters_t2, - counter_name): - """Calculate a delta value from two performance counters.""" - - counter_t1 = int( - self._find_performance_counter_value(counters_t1, counter_name)) - counter_t2 = int( - self._find_performance_counter_value(counters_t2, counter_name)) - - return counter_t2 - counter_t1 - - def _get_performance_counter_average(self, counters_t1, counters_t2, - counter_name, base_counter_name, - instance_name=None): - """Calculate an average value from two performance counters.""" - - counter_t1 = float(self._find_performance_counter_value( - counters_t1, counter_name, instance_name)) - counter_t2 = float(self._find_performance_counter_value( - counters_t2, counter_name, instance_name)) - base_counter_t1 = float(self._find_performance_counter_value( - counters_t1, base_counter_name, instance_name)) - base_counter_t2 = float(self._find_performance_counter_value( - counters_t2, base_counter_name, instance_name)) - - return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1) - - def _get_performance_counter_average_multi_instance(self, counters_t1, - counters_t2, - counter_name, - base_counter_name): - """Calculate an average value from multiple counter instances.""" - - averages = [] - instance_names = [] - for counter in counters_t1: - if counter_name in counter: - instance_names.append(counter['instance-name']) - - for instance_name in instance_names: - average = self._get_performance_counter_average( - counters_t1, counters_t2, counter_name, base_counter_name, - instance_name) - averages.append(average) - - return averages - - def _find_performance_counter_value(self, counters, counter_name, - instance_name=None): - """Given a counter set, return the value of a named instance.""" - - for counter in counters: - if counter_name in counter: - if (instance_name is None - or counter['instance-name'] == instance_name): - return counter[counter_name] - else: - raise exception.NotFound(_('Counter %s not found') % counter_name) - - def _find_performance_counter_timestamp(self, counters, counter_name, - instance_name=None): - """Given a counter set, return the timestamp of a named instance.""" - - for counter in counters: - if counter_name in counter: - if (instance_name is None - or counter['instance-name'] == instance_name): - return counter['timestamp'] - else: - raise exception.NotFound(_('Counter %s not found') % counter_name) - - def _expand_performance_array(self, object_name, counter_name, counter): - """Get array labels and expand counter data array.""" - - # Get array labels for counter value - counter_info = self.zapi_client.get_performance_counter_info( - object_name, counter_name) - - array_labels = [counter_name + ':' + label.lower() - for label in counter_info['labels']] - array_values = counter[counter_name].split(',') - - # Combine labels and values, and then mix into existing counter - array_data = dict(zip(array_labels, array_values)) - counter.update(array_data) - - def _get_base_counter_name(self, object_name, counter_name): - """Get the name of the base counter for the specified counter.""" - - counter_info = self.zapi_client.get_performance_counter_info( - object_name, counter_name) - return counter_info['base-counter'] diff --git a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py b/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py deleted file mode 100644 index 2695602e7..000000000 --- a/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) 2016 Clinton Knight -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Performance metrics functions and cache for NetApp cDOT systems. -""" - -from oslo_log import log as logging - -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.performance import perf_base - - -LOG = logging.getLogger(__name__) - - -class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): - - def __init__(self, zapi_client): - super(PerformanceCmodeLibrary, self).__init__(zapi_client) - - self.performance_counters = {} - self.pool_utilization = {} - - def _init_counter_info(self): - """Set a few counter names based on Data ONTAP version.""" - - super(PerformanceCmodeLibrary, self)._init_counter_info() - - try: - if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: - self.system_object_name = 'system:constituent' - self.avg_processor_busy_base_counter_name = ( - self._get_base_counter_name('system:constituent', - 'avg_processor_busy')) - elif self.zapi_client.features.SYSTEM_METRICS: - self.system_object_name = 'system' - self.avg_processor_busy_base_counter_name = ( - self._get_base_counter_name('system', - 'avg_processor_busy')) - except netapp_api.NaApiError: - if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: - self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' - else: - self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' - LOG.exception('Could not get performance base counter ' - 'name. Performance-based scheduler ' - 'functions may not be available.') - - def update_performance_cache(self, ssc_pools): - """Called periodically to update per-pool node utilization metrics.""" - - # Nothing to do on older systems - if not (self.zapi_client.features.SYSTEM_METRICS or - self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS): - return - - # Get aggregates and nodes for all known pools - aggr_names = self._get_aggregates_for_pools(ssc_pools) - node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names) - - # Update performance counter cache for each node - node_utilization = {} - for node_name in node_names: - if node_name not in self.performance_counters: - self.performance_counters[node_name] = [] - - # Get new performance counters and save only the last 10 - counters = self._get_node_utilization_counters(node_name) - if not counters: - continue - - self.performance_counters[node_name].append(counters) - self.performance_counters[node_name] = ( - self.performance_counters[node_name][-10:]) - - # Update utilization for each node using newest & oldest sample - counters = self.performance_counters[node_name] - if len(counters) < 2: - node_utilization[node_name] = perf_base.DEFAULT_UTILIZATION - else: - node_utilization[node_name] = self._get_node_utilization( - counters[0], counters[-1], node_name) - - # Update pool utilization map atomically - pool_utilization = {} - for pool_name, pool_info in ssc_pools.items(): - aggr_name = pool_info.get('netapp_aggregate', 'unknown') - node_name = aggr_node_map.get(aggr_name) - if node_name: - pool_utilization[pool_name] = node_utilization.get( - node_name, perf_base.DEFAULT_UTILIZATION) - else: - pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION - - self.pool_utilization = pool_utilization - - def get_node_utilization_for_pool(self, pool_name): - """Get the node utilization for the specified pool, if available.""" - - return self.pool_utilization.get(pool_name, - perf_base.DEFAULT_UTILIZATION) - - def _update_for_failover(self, zapi_client, ssc_pools): - self.zapi_client = zapi_client - self.update_performance_cache(ssc_pools) - - def _get_aggregates_for_pools(self, ssc_pools): - """Get the set of aggregates that contain the specified pools.""" - - aggr_names = set() - for pool_name, pool_info in ssc_pools.items(): - aggr_names.add(pool_info.get('netapp_aggregate')) - return aggr_names - - def _get_nodes_for_aggregates(self, aggr_names): - """Get the cluster nodes that own the specified aggregates.""" - - node_names = set() - aggr_node_map = {} - - for aggr_name in aggr_names: - node_name = self.zapi_client.get_node_for_aggregate(aggr_name) - if node_name: - node_names.add(node_name) - aggr_node_map[aggr_name] = node_name - - return node_names, aggr_node_map - - def _get_node_utilization_counters(self, node_name): - """Get all performance counters for calculating node utilization.""" - - try: - return (self._get_node_utilization_system_counters(node_name) + - self._get_node_utilization_wafl_counters(node_name) + - self._get_node_utilization_processor_counters(node_name)) - except netapp_api.NaApiError: - LOG.exception('Could not get utilization counters from node %s', - node_name) - return None - - def _get_node_utilization_system_counters(self, node_name): - """Get the system counters for calculating node utilization.""" - - system_instance_uuids = ( - self.zapi_client.get_performance_instance_uuids( - self.system_object_name, node_name)) - - system_counter_names = [ - 'avg_processor_busy', - self.avg_processor_busy_base_counter_name, - ] - if 'cpu_elapsed_time1' in system_counter_names: - system_counter_names.append('cpu_elapsed_time') - - system_counters = self.zapi_client.get_performance_counters( - self.system_object_name, system_instance_uuids, - system_counter_names) - - return system_counters - - def _get_node_utilization_wafl_counters(self, node_name): - """Get the WAFL counters for calculating node utilization.""" - - wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids( - 'wafl', node_name) - - wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] - wafl_counters = self.zapi_client.get_performance_counters( - 'wafl', wafl_instance_uuids, wafl_counter_names) - - # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] - for counter in wafl_counters: - if 'cp_phase_times' in counter: - self._expand_performance_array( - 'wafl', 'cp_phase_times', counter) - - return wafl_counters - - def _get_node_utilization_processor_counters(self, node_name): - """Get the processor counters for calculating node utilization.""" - - processor_instance_uuids = ( - self.zapi_client.get_performance_instance_uuids('processor', - node_name)) - - processor_counter_names = ['domain_busy', 'processor_elapsed_time'] - processor_counters = self.zapi_client.get_performance_counters( - 'processor', processor_instance_uuids, processor_counter_names) - - # Expand array data so we can use processor:domain_busy[kahuna] - for counter in processor_counters: - if 'domain_busy' in counter: - self._expand_performance_array( - 'processor', 'domain_busy', counter) - - return processor_counters diff --git a/cinder/volume/drivers/netapp/dataontap/utils/__init__.py b/cinder/volume/drivers/netapp/dataontap/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py deleted file mode 100644 index 34373cb47..000000000 --- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (c) 2016 Clinton Knight. All rights reserved. -# Copyright (c) 2017 Jose Porrua. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Storage service catalog (SSC) functions and classes for NetApp cDOT systems. -""" - -import copy -import re - -from oslo_log import log as logging -import six - -from cinder import exception -from cinder.i18n import _ - - -LOG = logging.getLogger(__name__) - -# NOTE(cknight): The keys in this map are tuples that contain arguments needed -# for efficient use of the system-user-capability-get-iter cDOT API. The -# values are SSC extra specs associated with the APIs listed in the keys. -SSC_API_MAP = { - ('storage.aggregate', 'show', 'aggr-options-list-info'): [ - 'netapp_raid_type', - ], - ('storage.disk', 'show', 'storage-disk-get-iter'): [ - 'netapp_disk_type', - ], - ('snapmirror', 'show', 'snapmirror-get-iter'): [ - 'netapp_mirrored', - ], - ('volume.efficiency', 'show', 'sis-get-iter'): [ - 'netapp_dedup', - 'netapp_compression', - ], - ('volume', 'show', 'volume-get-iter'): [ - 'netapp_flexvol_encryption', - ], -} - - -class CapabilitiesLibrary(object): - - def __init__(self, protocol, vserver_name, zapi_client, configuration): - - self.protocol = protocol.lower() - self.vserver_name = vserver_name - self.zapi_client = zapi_client - self.configuration = configuration - self.backend_name = self.configuration.safe_get('volume_backend_name') - self.ssc = {} - - def check_api_permissions(self): - """Check which APIs that support SSC functionality are available.""" - - inaccessible_apis = [] - invalid_extra_specs = [] - - for api_tuple, extra_specs in SSC_API_MAP.items(): - object_name, operation_name, api = api_tuple - if not self.zapi_client.check_cluster_api(object_name, - operation_name, - api): - inaccessible_apis.append(api) - invalid_extra_specs.extend(extra_specs) - - if inaccessible_apis: - if 'volume-get-iter' in inaccessible_apis: - msg = _('User not permitted to query Data ONTAP volumes.') - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.warning('The configured user account does not have ' - 'sufficient privileges to use all needed ' - 'APIs. The following extra specs will fail ' - 'or be ignored: %s.', invalid_extra_specs) - - def get_ssc(self): - """Get a copy of the Storage Service Catalog.""" - - return copy.deepcopy(self.ssc) - - def get_ssc_flexvol_names(self): - """Get the names of the FlexVols in the Storage Service Catalog.""" - ssc = self.get_ssc() - return ssc.keys() - - def get_ssc_for_flexvol(self, flexvol_name): - """Get map of Storage Service Catalog entries for a single flexvol.""" - - return copy.deepcopy(self.ssc.get(flexvol_name, {})) - - def get_ssc_aggregates(self): - """Get a list of aggregates for all SSC flexvols.""" - - aggregates = set() - for __, flexvol_info in self.ssc.items(): - if 'netapp_aggregate' in flexvol_info: - aggregates.add(flexvol_info['netapp_aggregate']) - return list(aggregates) - - def update_ssc(self, flexvol_map): - """Periodically runs to update Storage Service Catalog data. - - The self.ssc attribute is updated with the following format. - { : {: }} - """ - LOG.info("Updating storage service catalog information for " - "backend '%s'", self.backend_name) - - ssc = {} - - for flexvol_name, flexvol_info in flexvol_map.items(): - - ssc_volume = {} - - # Add metadata passed from the driver, including pool name - ssc_volume.update(flexvol_info) - - # Get volume info - ssc_volume.update(self._get_ssc_flexvol_info(flexvol_name)) - ssc_volume.update(self._get_ssc_dedupe_info(flexvol_name)) - ssc_volume.update(self._get_ssc_mirror_info(flexvol_name)) - ssc_volume.update(self._get_ssc_encryption_info(flexvol_name)) - - # Get aggregate info - aggregate_name = ssc_volume.get('netapp_aggregate') - ssc_volume.update(self._get_ssc_aggregate_info(aggregate_name)) - - ssc[flexvol_name] = ssc_volume - - self.ssc = ssc - - def _update_for_failover(self, zapi_client, flexvol_map): - - self.zapi_client = zapi_client - self.update_ssc(flexvol_map) - - def _get_ssc_flexvol_info(self, flexvol_name): - """Gather flexvol info and recast into SSC-style volume stats.""" - - volume_info = self.zapi_client.get_flexvol(flexvol_name=flexvol_name) - - netapp_thick = (volume_info.get('space-guarantee-enabled') and - (volume_info.get('space-guarantee') == 'file' or - volume_info.get('space-guarantee') == 'volume')) - thick = self._get_thick_provisioning_support(netapp_thick) - - return { - 'netapp_thin_provisioned': six.text_type(not netapp_thick).lower(), - 'thick_provisioning_support': thick, - 'thin_provisioning_support': not thick, - 'netapp_aggregate': volume_info.get('aggregate'), - } - - def _get_thick_provisioning_support(self, netapp_thick): - """Get standard thick/thin values for a flexvol. - - The values reported for the standard thick_provisioning_support and - thin_provisioning_support flags depend on both the flexvol state as - well as protocol-specific configuration values. - """ - - if self.protocol == 'nfs': - return (netapp_thick and - not self.configuration.nfs_sparsed_volumes) - else: - return (netapp_thick and - (self.configuration.netapp_lun_space_reservation == - 'enabled')) - - def _get_ssc_dedupe_info(self, flexvol_name): - """Gather dedupe info and recast into SSC-style volume stats.""" - - dedupe_info = self.zapi_client.get_flexvol_dedupe_info(flexvol_name) - - dedupe = dedupe_info.get('dedupe') - compression = dedupe_info.get('compression') - - return { - 'netapp_dedup': six.text_type(dedupe).lower(), - 'netapp_compression': six.text_type(compression).lower(), - } - - def _get_ssc_encryption_info(self, flexvol_name): - """Gather flexvol encryption info and recast into SSC-style stats.""" - encrypted = self.zapi_client.is_flexvol_encrypted( - flexvol_name, self.vserver_name) - - return {'netapp_flexvol_encryption': six.text_type(encrypted).lower()} - - def _get_ssc_mirror_info(self, flexvol_name): - """Gather SnapMirror info and recast into SSC-style volume stats.""" - - mirrored = self.zapi_client.is_flexvol_mirrored( - flexvol_name, self.vserver_name) - - return {'netapp_mirrored': six.text_type(mirrored).lower()} - - def _get_ssc_aggregate_info(self, aggregate_name): - """Gather aggregate info and recast into SSC-style volume stats.""" - - aggregate = self.zapi_client.get_aggregate(aggregate_name) - hybrid = (six.text_type(aggregate.get('is-hybrid')).lower() - if 'is-hybrid' in aggregate else None) - disk_types = self.zapi_client.get_aggregate_disk_types(aggregate_name) - - return { - 'netapp_raid_type': aggregate.get('raid-type'), - 'netapp_hybrid_aggregate': hybrid, - 'netapp_disk_type': disk_types, - } - - def get_matching_flexvols_for_extra_specs(self, extra_specs): - """Return a list of flexvol names that match a set of extra specs.""" - - extra_specs = self._modify_extra_specs_for_comparison(extra_specs) - matching_flexvols = [] - - for flexvol_name, flexvol_info in self.get_ssc().items(): - - if self._flexvol_matches_extra_specs(flexvol_info, extra_specs): - matching_flexvols.append(flexvol_name) - - return matching_flexvols - - def _flexvol_matches_extra_specs(self, flexvol_info, extra_specs): - """Check whether the SSC data for a FlexVol matches extra specs. - - A set of extra specs is considered a match for a FlexVol if, for each - extra spec, the value matches what is in SSC or the key is unknown to - SSC. - """ - - for extra_spec_key, extra_spec_value in extra_specs.items(): - - if extra_spec_key in flexvol_info: - if not self._extra_spec_matches(extra_spec_value, - flexvol_info[extra_spec_key]): - return False - - return True - - def _extra_spec_matches(self, extra_spec_value, ssc_flexvol_value): - """Check whether an extra spec value matches something in the SSC. - - The SSC values may be scalars or lists, so the extra spec value must be - compared to the SSC value if it is a scalar, or it must be sought in - the list. - """ - - if isinstance(ssc_flexvol_value, list): - return extra_spec_value in ssc_flexvol_value - else: - return extra_spec_value == ssc_flexvol_value - - def _modify_extra_specs_for_comparison(self, extra_specs): - """Adjust extra spec values for simple comparison to SSC values. - - Most extra-spec key-value tuples may be directly compared. But the - boolean values that take the form ' True' or ' False' must be - modified to allow comparison with the values we keep in the SSC and - report to the scheduler. - """ - - modified_extra_specs = copy.deepcopy(extra_specs) - - for key, value in extra_specs.items(): - - if isinstance(value, six.string_types): - if re.match(r'\s+True', value, re.I): - modified_extra_specs[key] = True - elif re.match(r'\s+False', value, re.I): - modified_extra_specs[key] = False - - return modified_extra_specs diff --git a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py deleted file mode 100644 index 5aba6bf27..000000000 --- a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright (c) 2016 Alex Meade. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -NetApp Data ONTAP data motion library. - -This library handles transferring data from a source to a destination. Its -responsibility is to handle this as efficiently as possible given the -location of the data's source and destination. This includes cloning, -SnapMirror, and copy-offload as improvements to brute force data transfer. -""" - -from oslo_log import log -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import utils -from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api -from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils -from cinder.volume import utils as volume_utils - -LOG = log.getLogger(__name__) -ENTRY_DOES_NOT_EXIST = "(entry doesn't exist)" -QUIESCE_RETRY_INTERVAL = 5 - - -class DataMotionMixin(object): - - def get_replication_backend_names(self, config): - """Get the backend names for all configured replication targets.""" - - backend_names = [] - - replication_devices = config.safe_get('replication_device') - if replication_devices: - for replication_device in replication_devices: - backend_id = replication_device.get('backend_id') - if backend_id: - backend_names.append(backend_id) - - return backend_names - - def get_replication_backend_stats(self, config): - """Get the driver replication info for merging into volume stats.""" - - backend_names = self.get_replication_backend_names(config) - - if len(backend_names) > 0: - stats = { - 'replication_enabled': True, - 'replication_count': len(backend_names), - 'replication_targets': backend_names, - 'replication_type': 'async', - } - else: - stats = {'replication_enabled': False} - - return stats - - def _get_replication_aggregate_map(self, src_backend_name, - target_backend_name): - """Get the aggregate mapping config between src and destination.""" - - aggregate_map = {} - - config = config_utils.get_backend_configuration(src_backend_name) - - all_replication_aggregate_maps = config.safe_get( - 'netapp_replication_aggregate_map') - if all_replication_aggregate_maps: - for replication_aggregate_map in all_replication_aggregate_maps: - if (replication_aggregate_map.get('backend_id') == - target_backend_name): - replication_aggregate_map.pop('backend_id') - aggregate_map = replication_aggregate_map - break - - return aggregate_map - - def get_snapmirrors(self, src_backend_name, dest_backend_name, - src_flexvol_name=None, dest_flexvol_name=None): - """Get info regarding SnapMirror relationship/s for given params.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - src_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = src_backend_config.netapp_vserver - - snapmirrors = dest_client.get_snapmirrors( - src_vserver, src_flexvol_name, - dest_vserver, dest_flexvol_name, - desired_attributes=[ - 'relationship-status', - 'mirror-state', - 'source-vserver', - 'source-volume', - 'destination-vserver', - 'destination-volume', - 'last-transfer-end-timestamp', - 'lag-time', - ]) - return snapmirrors - - def create_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Set up a SnapMirror relationship b/w two FlexVols (cinder pools) - - 1. Create SnapMirror relationship - 2. Initialize data transfer asynchronously - - If a SnapMirror relationship already exists and is broken off or - quiesced, resume and re-sync the mirror. - """ - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - # 1. Create destination 'dp' FlexVol if it doesn't exist - if not dest_client.flexvol_exists(dest_flexvol_name): - self.create_destination_flexvol(src_backend_name, - dest_backend_name, - src_flexvol_name, - dest_flexvol_name) - - # 2. Check if SnapMirror relationship exists - existing_mirrors = dest_client.get_snapmirrors( - src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) - - msg_payload = { - 'src_vserver': src_vserver, - 'src_volume': src_flexvol_name, - 'dest_vserver': dest_vserver, - 'dest_volume': dest_flexvol_name, - } - - # 3. Create and initialize SnapMirror if it doesn't already exist - if not existing_mirrors: - # TODO(gouthamr): Change the schedule from hourly to a config value - msg = ("Creating a SnapMirror relationship between " - "%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:" - "%(dest_volume)s.") - LOG.debug(msg, msg_payload) - - dest_client.create_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name, - schedule='hourly') - - msg = ("Initializing SnapMirror transfers between " - "%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:" - "%(dest_volume)s.") - LOG.debug(msg, msg_payload) - - # Initialize async transfer of the initial data - dest_client.initialize_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - # 4. Try to repair SnapMirror if existing - else: - snapmirror = existing_mirrors[0] - if snapmirror.get('mirror-state') != 'snapmirrored': - try: - msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s " - "and %(dest_vserver)s:%(dest_volume)s is in " - "'%(state)s' state. Attempting to repair it.") - msg_payload['state'] = snapmirror.get('mirror-state') - LOG.debug(msg, msg_payload) - dest_client.resume_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - dest_client.resync_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - except netapp_api.NaApiError: - LOG.exception("Could not re-sync SnapMirror.") - - def delete_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name, release=True): - """Ensure all information about a SnapMirror relationship is removed. - - 1. Abort SnapMirror - 2. Delete the SnapMirror - 3. Release SnapMirror to cleanup SnapMirror metadata and snapshots - """ - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - # 1. Abort any ongoing transfers - try: - dest_client.abort_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name, - clear_checkpoint=False) - except netapp_api.NaApiError: - # Snapmirror is already deleted - pass - - # 2. Delete SnapMirror Relationship and cleanup destination snapshots - try: - dest_client.delete_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - except netapp_api.NaApiError as e: - with excutils.save_and_reraise_exception() as exc_context: - if (e.code == netapp_api.EOBJECTNOTFOUND or - e.code == netapp_api.ESOURCE_IS_DIFFERENT or - ENTRY_DOES_NOT_EXIST in e.message): - LOG.info('No SnapMirror relationship to delete.') - exc_context.reraise = False - - if release: - # If the source is unreachable, do not perform the release - try: - src_client = config_utils.get_client_for_backend( - src_backend_name, vserver_name=src_vserver) - except Exception: - src_client = None - # 3. Cleanup SnapMirror relationship on source - try: - if src_client: - src_client.release_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - except netapp_api.NaApiError as e: - with excutils.save_and_reraise_exception() as exc_context: - if (e.code == netapp_api.EOBJECTNOTFOUND or - e.code == netapp_api.ESOURCE_IS_DIFFERENT or - ENTRY_DOES_NOT_EXIST in e.message): - # Handle the case where the SnapMirror is already - # cleaned up - exc_context.reraise = False - - def update_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Schedule a SnapMirror update on the backend.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - # Update SnapMirror - dest_client.update_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - def quiesce_then_abort(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Quiesce a SnapMirror and wait with retries before aborting.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - # 1. Attempt to quiesce, then abort - dest_client.quiesce_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - retries = (source_backend_config.netapp_snapmirror_quiesce_timeout / - QUIESCE_RETRY_INTERVAL) - - @utils.retry(exception.NetAppDriverException, - interval=QUIESCE_RETRY_INTERVAL, - retries=retries, backoff_rate=1) - def wait_for_quiesced(): - snapmirror = dest_client.get_snapmirrors( - src_vserver, src_flexvol_name, dest_vserver, - dest_flexvol_name, - desired_attributes=['relationship-status', 'mirror-state'])[0] - if snapmirror.get('relationship-status') != 'quiesced': - msg = _("SnapMirror relationship is not quiesced.") - raise exception.NetAppDriverException(reason=msg) - - try: - wait_for_quiesced() - except exception.NetAppDriverException: - dest_client.abort_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name, - clear_checkpoint=False) - - def break_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Break SnapMirror relationship. - - 1. Quiesce any ongoing SnapMirror transfers - 2. Wait until SnapMirror finishes transfers and enters quiesced state - 3. Break SnapMirror - 4. Mount the destination volume so it is given a junction path - """ - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - # 1. Attempt to quiesce, then abort - self.quiesce_then_abort(src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name) - - # 2. Break SnapMirror - dest_client.break_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - # 3. Mount the destination volume and create a junction path - dest_client.mount_flexvol(dest_flexvol_name) - - def resync_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Re-sync (repair / re-establish) SnapMirror relationship.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - dest_client.resync_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - def resume_snapmirror(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Resume SnapMirror relationship from a quiesced state.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - - dest_client.resume_snapmirror(src_vserver, - src_flexvol_name, - dest_vserver, - dest_flexvol_name) - - def create_destination_flexvol(self, src_backend_name, dest_backend_name, - src_flexvol_name, dest_flexvol_name): - """Create a SnapMirror mirror target FlexVol for a given source.""" - dest_backend_config = config_utils.get_backend_configuration( - dest_backend_name) - dest_vserver = dest_backend_config.netapp_vserver - dest_client = config_utils.get_client_for_backend( - dest_backend_name, vserver_name=dest_vserver) - - source_backend_config = config_utils.get_backend_configuration( - src_backend_name) - src_vserver = source_backend_config.netapp_vserver - src_client = config_utils.get_client_for_backend( - src_backend_name, vserver_name=src_vserver) - - provisioning_options = ( - src_client.get_provisioning_options_from_flexvol( - src_flexvol_name) - ) - - # If the source is encrypted then the destination needs to be - # encrypted too. Using is_flexvol_encrypted because it includes - # a simple check to ensure that the NVE feature is supported. - if src_client.is_flexvol_encrypted(src_flexvol_name, src_vserver): - provisioning_options['encrypt'] = 'true' - - # Remove size and volume_type - size = provisioning_options.pop('size', None) - if not size: - msg = _("Unable to read the size of the source FlexVol (%s) " - "to create a SnapMirror destination.") - raise exception.NetAppDriverException(msg % src_flexvol_name) - provisioning_options.pop('volume_type', None) - - source_aggregate = provisioning_options.pop('aggregate') - aggregate_map = self._get_replication_aggregate_map( - src_backend_name, dest_backend_name) - - if not aggregate_map.get(source_aggregate): - msg = _("Unable to find configuration matching the source " - "aggregate (%s) and the destination aggregate. Option " - "netapp_replication_aggregate_map may be incorrect.") - raise exception.NetAppDriverException( - message=msg % source_aggregate) - - destination_aggregate = aggregate_map[source_aggregate] - - # NOTE(gouthamr): The volume is intentionally created as a Data - # Protection volume; junction-path will be added on breaking - # the mirror. - dest_client.create_flexvol(dest_flexvol_name, - destination_aggregate, - size, - volume_type='dp', - **provisioning_options) - - def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names): - """Ensure all the SnapMirrors needed for whole-backend replication.""" - backend_names = self.get_replication_backend_names(config) - for dest_backend_name in backend_names: - for src_flexvol_name in src_flexvol_names: - - dest_flexvol_name = src_flexvol_name - - self.create_snapmirror(src_backend_name, - dest_backend_name, - src_flexvol_name, - dest_flexvol_name) - - def break_snapmirrors(self, config, src_backend_name, src_flexvol_names, - chosen_target): - """Break all existing SnapMirror relationships for a given back end.""" - failed_to_break = [] - backend_names = self.get_replication_backend_names(config) - for dest_backend_name in backend_names: - for src_flexvol_name in src_flexvol_names: - - dest_flexvol_name = src_flexvol_name - try: - self.break_snapmirror(src_backend_name, - dest_backend_name, - src_flexvol_name, - dest_flexvol_name) - except netapp_api.NaApiError: - msg = _("Unable to break SnapMirror between FlexVol " - "%(src)s and Flexvol %(dest)s. Associated volumes " - "will have their replication state set to error.") - payload = { - 'src': ':'.join([src_backend_name, src_flexvol_name]), - 'dest': ':'.join([dest_backend_name, - dest_flexvol_name]), - } - if dest_backend_name == chosen_target: - failed_to_break.append(src_flexvol_name) - LOG.exception(msg, payload) - - return failed_to_break - - def update_snapmirrors(self, config, src_backend_name, src_flexvol_names): - """Update all existing SnapMirror relationships on a given back end.""" - backend_names = self.get_replication_backend_names(config) - for dest_backend_name in backend_names: - for src_flexvol_name in src_flexvol_names: - - dest_flexvol_name = src_flexvol_name - try: - self.update_snapmirror(src_backend_name, - dest_backend_name, - src_flexvol_name, - dest_flexvol_name) - except netapp_api.NaApiError: - # Ignore any errors since the current source may be - # unreachable - pass - - def _choose_failover_target(self, backend_name, flexvols, - replication_targets): - target_lag_times = [] - - for target in replication_targets: - all_target_mirrors = self.get_snapmirrors( - backend_name, target, None, None) - flexvol_mirrors = self._filter_and_sort_mirrors( - all_target_mirrors, flexvols) - - if not flexvol_mirrors: - msg = ("Ignoring replication target %(target)s because no " - "SnapMirrors were found for any of the flexvols " - "in (%(flexvols)s).") - payload = { - 'flexvols': ', '.join(flexvols), - 'target': target, - } - LOG.debug(msg, payload) - continue - - target_lag_times.append( - { - 'target': target, - 'highest-lag-time': flexvol_mirrors[0]['lag-time'], - } - ) - - # The best target is one with the least 'worst' lag time. - best_target = (sorted(target_lag_times, - key=lambda x: int(x['highest-lag-time']))[0] - if len(target_lag_times) > 0 else {}) - - return best_target.get('target') - - def _filter_and_sort_mirrors(self, mirrors, flexvols): - """Return mirrors reverse-sorted by lag time. - - The 'slowest' mirror determines the best update that occurred on a - given replication target. - """ - filtered_mirrors = [x for x in mirrors - if x.get('destination-volume')in flexvols] - sorted_mirrors = sorted(filtered_mirrors, - key=lambda x: int(x.get('lag-time')), - reverse=True) - - return sorted_mirrors - - def _complete_failover(self, source_backend_name, replication_targets, - flexvols, volumes, failover_target=None): - """Failover a backend to a secondary replication target.""" - volume_updates = [] - - active_backend_name = failover_target or self._choose_failover_target( - source_backend_name, flexvols, replication_targets) - - if active_backend_name is None: - msg = _("No suitable host was found to failover.") - raise exception.NetAppDriverException(msg) - - source_backend_config = config_utils.get_backend_configuration( - source_backend_name) - - # 1. Start an update to try to get a last minute transfer before we - # quiesce and break - self.update_snapmirrors(source_backend_config, source_backend_name, - flexvols) - # 2. Break SnapMirrors - failed_to_break = self.break_snapmirrors(source_backend_config, - source_backend_name, - flexvols, active_backend_name) - - # 3. Update cinder volumes within this host - for volume in volumes: - replication_status = fields.ReplicationStatus.FAILED_OVER - volume_pool = volume_utils.extract_host(volume['host'], - level='pool') - if volume_pool in failed_to_break: - replication_status = 'error' - - volume_update = { - 'volume_id': volume['id'], - 'updates': { - 'replication_status': replication_status, - }, - } - volume_updates.append(volume_update) - - return active_backend_name, volume_updates - - def _failover_host(self, volumes, secondary_id=None, groups=None): - - if secondary_id == self.backend_name: - msg = _("Cannot failover to the same host as the primary.") - raise exception.InvalidReplicationTarget(reason=msg) - - replication_targets = self.get_replication_backend_names( - self.configuration) - - if not replication_targets: - msg = _("No replication targets configured for backend " - "%s. Cannot failover.") - raise exception.InvalidReplicationTarget(reason=msg % self.host) - elif secondary_id and secondary_id not in replication_targets: - msg = _("%(target)s is not among replication targets configured " - "for back end %(host)s. Cannot failover.") - payload = { - 'target': secondary_id, - 'host': self.host, - } - raise exception.InvalidReplicationTarget(reason=msg % payload) - - flexvols = self.ssc_library.get_ssc_flexvol_names() - - try: - active_backend_name, volume_updates = self._complete_failover( - self.backend_name, replication_targets, flexvols, volumes, - failover_target=secondary_id) - except exception.NetAppDriverException as e: - msg = _("Could not complete failover: %s") % e - raise exception.UnableToFailOver(reason=msg) - - # Update the ZAPI client to the backend we failed over to - self._update_zapi_client(active_backend_name) - - self.failed_over = True - self.failed_over_backend_name = active_backend_name - - return active_backend_name, volume_updates, [] diff --git a/cinder/volume/drivers/netapp/dataontap/utils/loopingcalls.py b/cinder/volume/drivers/netapp/dataontap/utils/loopingcalls.py deleted file mode 100644 index 2037e80aa..000000000 --- a/cinder/volume/drivers/netapp/dataontap/utils/loopingcalls.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2016 Chuck Fouts. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Collects and starts tasks created from oslo_service.loopingcall. -""" - - -from collections import namedtuple -from oslo_service import loopingcall - -LoopingTask = namedtuple('LoopingTask', - ['looping_call', 'interval', 'initial_delay']) - -# Time intervals in seconds -ONE_MINUTE = 60 -TEN_MINUTES = 600 -ONE_HOUR = 3600 - - -class LoopingCalls(object): - - def __init__(self): - self.tasks = [] - - def add_task(self, call_function, interval, initial_delay=0): - looping_call = loopingcall.FixedIntervalLoopingCall(call_function) - task = LoopingTask(looping_call, interval, initial_delay) - self.tasks.append(task) - - def start_tasks(self): - for task in self.tasks: - task.looping_call.start(task.interval, task.initial_delay) diff --git a/cinder/volume/drivers/netapp/dataontap/utils/utils.py b/cinder/volume/drivers/netapp/dataontap/utils/utils.py deleted file mode 100644 index d8a204365..000000000 --- a/cinder/volume/drivers/netapp/dataontap/utils/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utilities for NetApp FAS drivers. - -This module contains common utilities to be used by one or more -NetApp FAS drivers to achieve the desired functionality. -""" - -import json -import socket - -from oslo_config import cfg -from oslo_log import log - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.netapp.dataontap.client import client_cmode -from cinder.volume.drivers.netapp import options as na_opts - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -def get_backend_configuration(backend_name): - """Get a cDOT configuration object for a specific backend.""" - - config_stanzas = CONF.list_all_sections() - if backend_name not in config_stanzas: - msg = _("Could not find backend stanza %(backend_name)s in " - "configuration. Available stanzas are %(stanzas)s") - params = { - "stanzas": config_stanzas, - "backend_name": backend_name, - } - raise exception.ConfigNotFound(message=msg % params) - - config = configuration.Configuration(driver.volume_opts, - config_group=backend_name) - config.append_config_values(na_opts.netapp_proxy_opts) - config.append_config_values(na_opts.netapp_connection_opts) - config.append_config_values(na_opts.netapp_transport_opts) - config.append_config_values(na_opts.netapp_basicauth_opts) - config.append_config_values(na_opts.netapp_provisioning_opts) - config.append_config_values(na_opts.netapp_cluster_opts) - config.append_config_values(na_opts.netapp_san_opts) - config.append_config_values(na_opts.netapp_replication_opts) - - return config - - -def get_client_for_backend(backend_name, vserver_name=None): - """Get a cDOT API client for a specific backend.""" - - config = get_backend_configuration(backend_name) - client = client_cmode.Client( - transport_type=config.netapp_transport_type, - username=config.netapp_login, - password=config.netapp_password, - hostname=config.netapp_server_hostname, - port=config.netapp_server_port, - vserver=vserver_name or config.netapp_vserver, - trace=utils.TRACE_API) - - return client - - -def _build_base_ems_log_message(driver_name, app_version): - - ems_log = { - 'computer-name': socket.gethostname() or 'Cinder_node', - 'event-source': 'Cinder driver %s' % driver_name, - 'app-version': app_version, - 'category': 'provisioning', - 'log-level': '5', - 'auto-support': 'false', - } - return ems_log - - -def build_ems_log_message_0(driver_name, app_version, driver_mode): - """Construct EMS Autosupport log message with deployment info.""" - - dest = 'cluster node' if driver_mode == 'cluster' else '7 mode controller' - - ems_log = _build_base_ems_log_message(driver_name, app_version) - ems_log['event-id'] = '0' - ems_log['event-description'] = 'OpenStack Cinder connected to %s' % dest - return ems_log - - -def build_ems_log_message_1(driver_name, app_version, vserver, - flexvol_pools, aggregate_pools): - """Construct EMS Autosupport log message with storage pool info.""" - - message = { - 'pools': { - 'vserver': vserver, - 'aggregates': aggregate_pools, - 'flexvols': flexvol_pools, - }, - } - - ems_log = _build_base_ems_log_message(driver_name, app_version) - ems_log['event-id'] = '1' - ems_log['event-description'] = json.dumps(message) - return ems_log diff --git a/cinder/volume/drivers/netapp/eseries/__init__.py b/cinder/volume/drivers/netapp/eseries/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/netapp/eseries/client.py b/cinder/volume/drivers/netapp/eseries/client.py deleted file mode 100644 index 974787eac..000000000 --- a/cinder/volume/drivers/netapp/eseries/client.py +++ /dev/null @@ -1,1056 +0,0 @@ -# Copyright (c) 2014 NetApp, Inc -# Copyright (c) 2014 Navneet Singh -# Copyright (c) 2015 Alex Meade -# Copyright (c) 2015 Rushil Chugh -# Copyright (c) 2015 Yogesh Kshirsagar -# Copyright (c) 2015 Jose Porrua -# Copyright (c) 2015 Michael Price -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Client classes for web services. -""" - -import copy -import json -import uuid - -from oslo_log import log as logging -import requests -from simplejson import scanner -import six -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -import cinder.utils as cinder_utils -from cinder.volume.drivers.netapp.eseries import exception as es_exception -from cinder.volume.drivers.netapp.eseries import utils -from cinder.volume.drivers.netapp import utils as na_utils - - -LOG = logging.getLogger(__name__) - - -class WebserviceClient(object): - """Base client for NetApp Storage web services.""" - - def __init__(self, scheme, host, port, service_path, username, - password, **kwargs): - self._validate_params(scheme, host, port) - self._create_endpoint(scheme, host, port, service_path) - self._username = username - self._password = password - self._init_connection() - - def _validate_params(self, scheme, host, port): - """Does some basic validation for web service params.""" - if host is None or port is None or scheme is None: - msg = _('One of the required inputs from host, ' - 'port or scheme was not found.') - raise exception.InvalidInput(reason=msg) - if scheme not in ('http', 'https'): - raise exception.InvalidInput(reason=_("Invalid transport type.")) - - def _create_endpoint(self, scheme, host, port, service_path): - """Creates end point url for the service.""" - netloc = '%s:%s' % (host, port) - self._endpoint = urllib.parse.urlunparse((scheme, netloc, service_path, - None, None, None)) - - def _init_connection(self): - """Do client specific set up for session and connection pooling.""" - self.conn = requests.Session() - if self._username and self._password: - self.conn.auth = (self._username, self._password) - - def invoke_service(self, method='GET', url=None, params=None, data=None, - headers=None, timeout=None, verify=False): - url = url or self._endpoint - try: - response = self.conn.request(method, url, params, data, - headers=headers, timeout=timeout, - verify=verify) - # Catching error conditions other than the perceived ones. - # Helps propagating only known exceptions back to the caller. - except Exception as e: - LOG.exception("Unexpected error while invoking web service." - " Error - %s.", e) - raise exception.NetAppDriverException( - _("Invoking web service failed.")) - return response - - -class RestClient(WebserviceClient): - """REST client specific to e-series storage service.""" - - ID = 'id' - WWN = 'worldWideName' - NAME = 'label' - - ASUP_VALID_VERSION = (1, 52, 9000, 3) - CHAP_VALID_VERSION = (1, 53, 9010, 15) - # We need to check for both the release and the pre-release versions - SSC_VALID_VERSIONS = ((1, 53, 9000, 1), (1, 53, 9010, 17)) - REST_1_3_VERSION = (1, 53, 9000, 1) - REST_1_4_VERSIONS = ((1, 54, 9000, 1), (1, 54, 9090, 0)) - - RESOURCE_PATHS = { - 'volumes': '/storage-systems/{system-id}/volumes', - 'volume': '/storage-systems/{system-id}/volumes/{object-id}', - 'pool_operation_progress': - '/storage-systems/{system-id}/storage-pools/{object-id}' - '/action-progress', - 'volume_expand': - '/storage-systems/{system-id}/volumes/{object-id}/expand', - 'thin_volume_expand': - '/storage-systems/{system-id}/thin-volumes/{object-id}/expand', - 'ssc_volumes': '/storage-systems/{system-id}/ssc/volumes', - 'ssc_volume': '/storage-systems/{system-id}/ssc/volumes/{object-id}', - 'snapshot_groups': '/storage-systems/{system-id}/snapshot-groups', - 'snapshot_group': - '/storage-systems/{system-id}/snapshot-groups/{object-id}', - 'snapshot_volumes': '/storage-systems/{system-id}/snapshot-volumes', - 'snapshot_volume': - '/storage-systems/{system-id}/snapshot-volumes/{object-id}', - 'snapshot_images': '/storage-systems/{system-id}/snapshot-images', - 'snapshot_image': - '/storage-systems/{system-id}/snapshot-images/{object-id}', - 'cgroup': - '/storage-systems/{system-id}/consistency-groups/{object-id}', - 'cgroups': '/storage-systems/{system-id}/consistency-groups', - 'cgroup_members': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/member-volumes', - 'cgroup_member': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/member-volumes/{vol-id}', - 'cgroup_snapshots': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/snapshots', - 'cgroup_snapshot': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/snapshots/{seq-num}', - 'cgroup_snapshots_by_seq': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/snapshots/{seq-num}', - 'cgroup_cgsnap_view': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/views/{seq-num}', - 'cgroup_cgsnap_views': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/views/', - 'cgroup_snapshot_views': - '/storage-systems/{system-id}/consistency-groups/{object-id}' - '/views/{view-id}/views', - 'persistent-stores': '/storage-systems/{' - 'system-id}/persistent-records/', - 'persistent-store': '/storage-systems/{' - 'system-id}/persistent-records/{key}' - } - - def __init__(self, scheme, host, port, service_path, username, - password, **kwargs): - - super(RestClient, self).__init__(scheme, host, port, service_path, - username, password, **kwargs) - - kwargs = kwargs or {} - - self._system_id = kwargs.get('system_id') - self._content_type = kwargs.get('content_type') or 'json' - - def _init_features(self): - """Sets up and initializes E-Series feature support map.""" - self.features = na_utils.Features() - self.api_operating_mode, self.api_version = self.get_eseries_api_info( - verify=False) - - api_version_tuple = tuple(int(version) - for version in self.api_version.split('.')) - - chap_valid_version = self._validate_version( - self.CHAP_VALID_VERSION, api_version_tuple) - self.features.add_feature('CHAP_AUTHENTICATION', - supported=chap_valid_version, - min_version=self._version_tuple_to_str( - self.CHAP_VALID_VERSION)) - - asup_api_valid_version = self._validate_version( - self.ASUP_VALID_VERSION, api_version_tuple) - self.features.add_feature('AUTOSUPPORT', - supported=asup_api_valid_version, - min_version=self._version_tuple_to_str( - self.ASUP_VALID_VERSION)) - - rest_1_3_api_valid_version = self._validate_version( - self.REST_1_3_VERSION, api_version_tuple) - - rest_1_4_api_valid_version = any( - self._validate_version(valid_version, api_version_tuple) - for valid_version in self.REST_1_4_VERSIONS) - - ssc_api_valid_version = any(self._validate_version(valid_version, - api_version_tuple) - for valid_version - in self.SSC_VALID_VERSIONS) - self.features.add_feature('SSC_API_V2', - supported=ssc_api_valid_version, - min_version=self._version_tuple_to_str( - self.SSC_VALID_VERSIONS[0])) - self.features.add_feature( - 'REST_1_3_RELEASE', supported=rest_1_3_api_valid_version, - min_version=self._version_tuple_to_str(self.REST_1_3_VERSION)) - self.features.add_feature( - 'REST_1_4_RELEASE', supported=rest_1_4_api_valid_version, - min_version=self._version_tuple_to_str(self.REST_1_4_VERSIONS[0])) - - def _version_tuple_to_str(self, version): - return ".".join([str(part) for part in version]) - - def _validate_version(self, version, actual_version): - """Determine if version is newer than, or equal to the actual version - - The proxy version number is formatted as AA.BB.CCCC.DDDD - A: Major version part 1 - B: Major version part 2 - C: Release version: 9000->Release, 9010->Pre-release, 9090->Integration - D: Minor version - - Examples: - 02.53.9000.0010 - 02.52.9010.0001 - - Note: The build version is actually 'newer' the lower the release - (CCCC) number is. - - :param version: The version to validate - :param actual_version: The running version of the Webservice - :returns: True if the actual_version is equal or newer than the - current running version, otherwise False - """ - major_1, major_2, release, minor = version - actual_major_1, actual_major_2, actual_release, actual_minor = ( - actual_version) - - # We need to invert the release number for it to work with this - # comparison - return (actual_major_1, actual_major_2, 10000 - actual_release, - actual_minor) >= (major_1, major_2, 10000 - release, minor) - - def set_system_id(self, system_id): - """Set the storage system id.""" - self._system_id = system_id - - def get_system_id(self): - """Get the storage system id.""" - return getattr(self, '_system_id', None) - - def _get_resource_url(self, path, use_system=True, **kwargs): - """Creates end point url for rest service.""" - kwargs = kwargs or {} - if use_system: - if not self._system_id: - raise exception.NotFound(_('Storage system id not set.')) - kwargs['system-id'] = self._system_id - path = path.format(**kwargs) - if not self._endpoint.endswith('/'): - self._endpoint = '%s/' % self._endpoint - return urllib.parse.urljoin(self._endpoint, path.lstrip('/')) - - def _invoke(self, method, path, data=None, use_system=True, - timeout=None, verify=False, **kwargs): - """Invokes end point for resource on path.""" - url = self._get_resource_url(path, use_system, **kwargs) - if self._content_type == 'json': - headers = {'Accept': 'application/json', - 'Content-Type': 'application/json'} - if cinder_utils.TRACE_API: - self._log_http_request(method, url, headers, data) - data = json.dumps(data) if data else None - res = self.invoke_service(method, url, data=data, - headers=headers, - timeout=timeout, verify=verify) - - try: - res_dict = res.json() if res.text else None - # This should only occur if we expected JSON, but were sent - # something else - except scanner.JSONDecodeError: - res_dict = None - - if cinder_utils.TRACE_API: - self._log_http_response(res.status_code, dict(res.headers), - res_dict) - - self._eval_response(res) - return res_dict - else: - raise exception.NetAppDriverException( - _("Content type not supported.")) - - def _to_pretty_dict_string(self, data): - """Convert specified dict to pretty printed string.""" - return json.dumps(data, sort_keys=True, - indent=2, separators=(',', ': ')) - - def _log_http_request(self, verb, url, headers, body): - scrubbed_body = copy.deepcopy(body) - if scrubbed_body: - if 'password' in scrubbed_body: - scrubbed_body['password'] = "****" - if 'storedPassword' in scrubbed_body: - scrubbed_body['storedPassword'] = "****" - - params = {'verb': verb, 'path': url, - 'body': self._to_pretty_dict_string(scrubbed_body) or "", - 'headers': self._to_pretty_dict_string(headers)} - LOG.debug("Invoking ESeries Rest API, Request:\n" - "HTTP Verb: %(verb)s\n" - "URL Path: %(path)s\n" - "HTTP Headers:\n" - "%(headers)s\n" - "Body:\n" - "%(body)s\n", (params)) - - def _log_http_response(self, status, headers, body): - params = {'status': status, - 'body': self._to_pretty_dict_string(body) or "", - 'headers': self._to_pretty_dict_string(headers)} - LOG.debug("ESeries Rest API, Response:\n" - "HTTP Status Code: %(status)s\n" - "HTTP Headers:\n" - "%(headers)s\n" - "Body:\n" - "%(body)s\n", (params)) - - def _eval_response(self, response): - """Evaluates response before passing result to invoker.""" - status_code = int(response.status_code) - # codes >= 300 are not ok and to be treated as errors - if status_code >= 300: - # Response code 422 returns error code and message - if status_code == 422: - msg = _("Response error - %s.") % response.text - json_response = response.json() - if json_response is not None: - ret_code = json_response.get('retcode', '') - if ret_code == '30' or ret_code == 'authFailPassword': - msg = _("The storage array password for %s is " - "incorrect, please update the configured " - "password.") % self._system_id - elif status_code == 424: - msg = _("Response error - The storage-system is offline.") - else: - msg = _("Response error code - %s.") % status_code - raise es_exception.WebServiceException(msg, - status_code=status_code) - - def _get_volume_api_path(self, path_key): - """Retrieve the correct API path based on API availability - - :param path_key: The volume API to request (volume or volumes) - :raise KeyError: If the path_key is not valid - """ - if self.features.SSC_API_V2: - path_key = 'ssc_' + path_key - return self.RESOURCE_PATHS[path_key] - - def create_volume(self, pool, label, size, unit='gb', seg_size=0, - read_cache=None, write_cache=None, flash_cache=None, - data_assurance=None, thin_provision=False): - """Creates a volume on array with the configured attributes - - Note: if read_cache, write_cache, flash_cache, or data_assurance - are not provided, the default will be utilized by the Webservice. - - :param pool: The pool unique identifier - :param label: The unqiue label for the volume - :param size: The capacity in units - :param unit: The unit for capacity - :param seg_size: The segment size for the volume, expressed in KB. - Default will allow the Webservice to choose. - :param read_cache: If true, enable read caching, if false, - explicitly disable it. - :param write_cache: If true, enable write caching, if false, - explicitly disable it. - :param flash_cache: If true, add the volume to a Flash Cache - :param data_assurance: If true, enable the Data Assurance capability - :returns: The created volume - """ - - # Utilize the new API if it is available - if self.features.SSC_API_V2: - path = "/storage-systems/{system-id}/ssc/volumes" - data = {'poolId': pool, 'name': label, 'sizeUnit': unit, - 'size': int(size), 'dataAssuranceEnable': data_assurance, - 'flashCacheEnable': flash_cache, - 'readCacheEnable': read_cache, - 'writeCacheEnable': write_cache, - 'thinProvision': thin_provision} - # Use the old API - else: - # Determine if there are were extra specs provided that are not - # supported - extra_specs = [read_cache, write_cache] - unsupported_spec = any([spec is not None for spec in extra_specs]) - if(unsupported_spec): - msg = _("E-series proxy API version %(current_version)s does " - "not support full set of SSC extra specs. The proxy" - " version must be at at least %(min_version)s.") - min_version = self.features.SSC_API_V2.minimum_version - raise exception.NetAppDriverException(msg % - {'current_version': - self.api_version, - 'min_version': - min_version}) - - path = "/storage-systems/{system-id}/volumes" - data = {'poolId': pool, 'name': label, 'sizeUnit': unit, - 'size': int(size), 'segSize': seg_size} - return self._invoke('POST', path, data) - - def delete_volume(self, object_id): - """Deletes given volume from array.""" - if self.features.SSC_API_V2: - path = self.RESOURCE_PATHS.get('ssc_volume') - else: - path = self.RESOURCE_PATHS.get('volume') - return self._invoke('DELETE', path, **{'object-id': object_id}) - - def list_volumes(self): - """Lists all volumes in storage array.""" - if self.features.SSC_API_V2: - path = self.RESOURCE_PATHS.get('ssc_volumes') - else: - path = self.RESOURCE_PATHS.get('volumes') - - return self._invoke('GET', path) - - def list_volume(self, object_id): - """Retrieve the given volume from array. - - :param object_id: The volume id, label, or wwn - :returns: The volume identified by object_id - :raise: VolumeNotFound if the volume could not be found - """ - - if self.features.SSC_API_V2: - return self._list_volume_v2(object_id) - # The new API is not available, - else: - # Search for the volume with label, id, or wwn. - return self._list_volume_v1(object_id) - - def _list_volume_v1(self, object_id): - # Search for the volume with label, id, or wwn. - for vol in self.list_volumes(): - if (object_id == vol.get(self.NAME) or object_id == vol.get( - self.WWN) or object_id == vol.get(self.ID)): - return vol - # The volume could not be found - raise exception.VolumeNotFound(volume_id=object_id) - - def _list_volume_v2(self, object_id): - path = self.RESOURCE_PATHS.get('ssc_volume') - try: - return self._invoke('GET', path, **{'object-id': object_id}) - except es_exception.WebServiceException as e: - if 404 == e.status_code: - raise exception.VolumeNotFound(volume_id=object_id) - else: - raise - - def update_volume(self, object_id, label): - """Renames given volume on array.""" - if self.features.SSC_API_V2: - path = self.RESOURCE_PATHS.get('ssc_volume') - else: - path = self.RESOURCE_PATHS.get('volume') - data = {'name': label} - return self._invoke('POST', path, data, **{'object-id': object_id}) - - def create_consistency_group(self, name, warn_at_percent_full=75, - rollback_priority='medium', - full_policy='failbasewrites'): - """Define a new consistency group""" - path = self.RESOURCE_PATHS.get('cgroups') - data = { - 'name': name, - 'fullWarnThresholdPercent': warn_at_percent_full, - 'repositoryFullPolicy': full_policy, - # A non-zero threshold enables auto-deletion - 'autoDeleteThreshold': 0, - 'rollbackPriority': rollback_priority, - } - - return self._invoke('POST', path, data) - - def get_consistency_group(self, object_id): - """Retrieve the consistency group identified by object_id""" - path = self.RESOURCE_PATHS.get('cgroup') - - return self._invoke('GET', path, **{'object-id': object_id}) - - def list_consistency_groups(self): - """Retrieve all consistency groups defined on the array""" - path = self.RESOURCE_PATHS.get('cgroups') - - return self._invoke('GET', path) - - def delete_consistency_group(self, object_id): - path = self.RESOURCE_PATHS.get('cgroup') - - self._invoke('DELETE', path, **{'object-id': object_id}) - - def add_consistency_group_member(self, volume_id, cg_id, - repo_percent=20.0): - """Add a volume to a consistency group - - :param volume_id the eseries volume id - :param cg_id: the eseries cg id - :param repo_percent: percentage capacity of the volume to use for - capacity of the copy-on-write repository - """ - path = self.RESOURCE_PATHS.get('cgroup_members') - data = {'volumeId': volume_id, 'repositoryPercent': repo_percent} - - return self._invoke('POST', path, data, **{'object-id': cg_id}) - - def remove_consistency_group_member(self, volume_id, cg_id): - """Remove a volume from a consistency group""" - path = self.RESOURCE_PATHS.get('cgroup_member') - - self._invoke('DELETE', path, **{'object-id': cg_id, - 'vol-id': volume_id}) - - def create_consistency_group_snapshot(self, cg_id): - """Define a consistency group snapshot""" - path = self.RESOURCE_PATHS.get('cgroup_snapshots') - - return self._invoke('POST', path, **{'object-id': cg_id}) - - def delete_consistency_group_snapshot(self, cg_id, seq_num): - """Define a consistency group snapshot""" - path = self.RESOURCE_PATHS.get('cgroup_snapshot') - - return self._invoke('DELETE', path, **{'object-id': cg_id, - 'seq-num': seq_num}) - - def get_consistency_group_snapshots(self, cg_id): - """Retrieve all snapshots defined for a consistency group""" - path = self.RESOURCE_PATHS.get('cgroup_snapshots') - - return self._invoke('GET', path, **{'object-id': cg_id}) - - def create_cg_snapshot_view(self, cg_id, name, snap_id): - """Define a snapshot view for the cgsnapshot - - In order to define a snapshot view for a snapshot defined under a - consistency group, the view must be defined at the cgsnapshot - level. - - :param cg_id: E-Series cg identifier - :param name: the label for the view - :param snap_id: E-Series snapshot view to locate - :raise NetAppDriverException: if the snapshot view cannot be - located for the snapshot identified - by snap_id - :return: snapshot view for snapshot identified by snap_id - """ - path = self.RESOURCE_PATHS.get('cgroup_cgsnap_views') - - data = { - 'name': name, - 'accessMode': 'readOnly', - # Only define a view for this snapshot - 'pitId': snap_id, - } - # Define a view for the cgsnapshot - cgsnapshot_view = self._invoke( - 'POST', path, data, **{'object-id': cg_id}) - - # Retrieve the snapshot views associated with our cgsnapshot view - views = self.list_cg_snapshot_views(cg_id, cgsnapshot_view[ - 'cgViewRef']) - # Find the snapshot view defined for our snapshot - for view in views: - if view['basePIT'] == snap_id: - return view - else: - try: - self.delete_cg_snapshot_view(cg_id, cgsnapshot_view['id']) - finally: - raise exception.NetAppDriverException( - 'Unable to create snapshot view.') - - def list_cg_snapshot_views(self, cg_id, view_id): - path = self.RESOURCE_PATHS.get('cgroup_snapshot_views') - - return self._invoke('GET', path, **{'object-id': cg_id, - 'view-id': view_id}) - - def delete_cg_snapshot_view(self, cg_id, view_id): - path = self.RESOURCE_PATHS.get('cgroup_snap_view') - - return self._invoke('DELETE', path, **{'object-id': cg_id, - 'view-id': view_id}) - - def get_pool_operation_progress(self, object_id): - """Retrieve the progress long-running operations on a storage pool - - Example: - - .. code-block:: python - - [ - { - "volumeRef": "3232....", # Volume being referenced - "progressPercentage": 0, # Approxmate percent complete - "estimatedTimeToCompletion": 0, # ETA in minutes - "currentAction": "none" # Current volume action - } - ... - ] - - :param object_id: A pool id - :returns: A dict representing the action progress - """ - path = self.RESOURCE_PATHS.get('pool_operation_progress') - return self._invoke('GET', path, **{'object-id': object_id}) - - def expand_volume(self, object_id, new_capacity, thin_provisioned, - capacity_unit='gb'): - """Increase the capacity of a volume""" - if thin_provisioned: - path = self.RESOURCE_PATHS.get('thin_volume_expand') - data = {'newVirtualSize': new_capacity, 'sizeUnit': capacity_unit, - 'newRepositorySize': new_capacity} - return self._invoke('POST', path, data, **{'object-id': object_id}) - else: - path = self.RESOURCE_PATHS.get('volume_expand') - data = {'expansionSize': new_capacity, 'sizeUnit': capacity_unit} - return self._invoke('POST', path, data, **{'object-id': object_id}) - - def get_volume_mappings(self): - """Creates volume mapping on array.""" - path = "/storage-systems/{system-id}/volume-mappings" - return self._invoke('GET', path) - - def get_volume_mappings_for_volume(self, volume): - """Gets all host mappings for given volume from array.""" - mappings = self.get_volume_mappings() or [] - return [x for x in mappings - if x.get('volumeRef') == volume['volumeRef']] - - def get_volume_mappings_for_host(self, host_ref): - """Gets all volume mappings for given host from array.""" - mappings = self.get_volume_mappings() or [] - return [x for x in mappings if x.get('mapRef') == host_ref] - - def get_volume_mappings_for_host_group(self, hg_ref): - """Gets all volume mappings for given host group from array.""" - mappings = self.get_volume_mappings() or [] - return [x for x in mappings if x.get('mapRef') == hg_ref] - - def create_volume_mapping(self, object_id, target_id, lun): - """Creates volume mapping on array.""" - path = "/storage-systems/{system-id}/volume-mappings" - data = {'mappableObjectId': object_id, 'targetId': target_id, - 'lun': lun} - return self._invoke('POST', path, data) - - def delete_volume_mapping(self, map_object_id): - """Deletes given volume mapping from array.""" - path = "/storage-systems/{system-id}/volume-mappings/{object-id}" - return self._invoke('DELETE', path, **{'object-id': map_object_id}) - - def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id): - """Moves a map from one host/host_group object to another.""" - - path = "/storage-systems/{system-id}/symbol/moveLUNMapping" - data = {'lunMappingRef': map_ref, - 'lun': int(lun_id), - 'mapRef': to_ref} - return_code = self._invoke('POST', path, data) - if return_code == 'ok': - return {'lun': lun_id} - msg = _("Failed to move LUN mapping. Return code: %s") % return_code - raise exception.NetAppDriverException(msg) - - def list_hardware_inventory(self): - """Lists objects in the hardware inventory.""" - path = "/storage-systems/{system-id}/hardware-inventory" - return self._invoke('GET', path) - - def list_target_wwpns(self): - """Lists the world-wide port names of the target.""" - inventory = self.list_hardware_inventory() - fc_ports = inventory.get("fibrePorts", []) - wwpns = [port['portName'] for port in fc_ports] - return wwpns - - def create_host_group(self, label): - """Creates a host group on the array.""" - path = "/storage-systems/{system-id}/host-groups" - data = {'name': label} - return self._invoke('POST', path, data) - - def get_host_group(self, host_group_ref): - """Gets a single host group from the array.""" - path = "/storage-systems/{system-id}/host-groups/{object-id}" - try: - return self._invoke('GET', path, **{'object-id': host_group_ref}) - except exception.NetAppDriverException: - raise exception.NotFound(_("Host group with ref %s not found") % - host_group_ref) - - def get_host_group_by_name(self, name): - """Gets a single host group by name from the array.""" - host_groups = self.list_host_groups() - matching = [host_group for host_group in host_groups - if host_group['label'] == name] - if len(matching): - return matching[0] - raise exception.NotFound(_("Host group with name %s not found") % name) - - def list_host_groups(self): - """Lists host groups on the array.""" - path = "/storage-systems/{system-id}/host-groups" - return self._invoke('GET', path) - - def list_hosts(self): - """Lists host objects in the system.""" - path = "/storage-systems/{system-id}/hosts" - return self._invoke('GET', path) - - def create_host(self, label, host_type, ports=None, group_id=None): - """Creates host on array.""" - path = "/storage-systems/{system-id}/hosts" - data = {'name': label, 'hostType': host_type} - data.setdefault('groupId', group_id if group_id else None) - data.setdefault('ports', ports if ports else None) - return self._invoke('POST', path, data) - - def create_host_with_ports(self, label, host_type, port_ids, - port_type='iscsi', group_id=None): - """Creates host on array with given port information.""" - if port_type == 'fc': - port_ids = [six.text_type(wwpn).replace(':', '') - for wwpn in port_ids] - ports = [] - for port_id in port_ids: - port_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) - port = {'type': port_type, 'port': port_id, 'label': port_label} - ports.append(port) - return self.create_host(label, host_type, ports, group_id) - - def update_host(self, host_ref, data): - """Updates host type for a given host.""" - path = "/storage-systems/{system-id}/hosts/{object-id}" - return self._invoke('POST', path, data, **{'object-id': host_ref}) - - def get_host(self, host_ref): - """Gets a single host from the array.""" - path = "/storage-systems/{system-id}/hosts/{object-id}" - return self._invoke('GET', path, **{'object-id': host_ref}) - - def update_host_type(self, host_ref, host_type): - """Updates host type for a given host.""" - data = {'hostType': host_type} - return self.update_host(host_ref, data) - - def set_host_group_for_host(self, host_ref, host_group_ref=utils.NULL_REF): - """Sets or clears which host group a host is in.""" - data = {'groupId': host_group_ref} - self.update_host(host_ref, data) - - def list_host_types(self): - """Lists host types in storage system.""" - path = "/storage-systems/{system-id}/host-types" - return self._invoke('GET', path) - - def list_snapshot_groups(self): - """Lists snapshot groups.""" - path = self.RESOURCE_PATHS['snapshot_groups'] - return self._invoke('GET', path) - - def list_snapshot_group(self, object_id): - """Retrieve given snapshot group from the array.""" - path = self.RESOURCE_PATHS['snapshot_group'] - return self._invoke('GET', path, **{'object-id': object_id}) - - def create_snapshot_group(self, label, object_id, storage_pool_id=None, - repo_percent=99, warn_thres=99, auto_del_limit=0, - full_policy='failbasewrites'): - """Creates snapshot group on array.""" - path = self.RESOURCE_PATHS['snapshot_groups'] - data = {'baseMappableObjectId': object_id, 'name': label, - 'storagePoolId': storage_pool_id, - 'repositoryPercentage': repo_percent, - 'warningThreshold': warn_thres, - 'autoDeleteLimit': auto_del_limit, 'fullPolicy': full_policy} - return self._invoke('POST', path, data) - - def update_snapshot_group(self, group_id, label): - """Modify a snapshot group on the array.""" - path = self.RESOURCE_PATHS['snapshot_group'] - data = {'name': label} - return self._invoke('POST', path, data, **{'object-id': group_id}) - - def delete_snapshot_group(self, object_id): - """Deletes given snapshot group from array.""" - path = self.RESOURCE_PATHS['snapshot_group'] - return self._invoke('DELETE', path, **{'object-id': object_id}) - - def create_snapshot_image(self, group_id): - """Creates snapshot image in snapshot group.""" - path = self.RESOURCE_PATHS['snapshot_images'] - data = {'groupId': group_id} - return self._invoke('POST', path, data) - - def delete_snapshot_image(self, object_id): - """Deletes given snapshot image in snapshot group.""" - path = self.RESOURCE_PATHS['snapshot_image'] - return self._invoke('DELETE', path, **{'object-id': object_id}) - - def list_snapshot_image(self, object_id): - """Retrieve given snapshot image from the array.""" - path = self.RESOURCE_PATHS['snapshot_image'] - return self._invoke('GET', path, **{'object-id': object_id}) - - def list_snapshot_images(self): - """Lists snapshot images.""" - path = self.RESOURCE_PATHS['snapshot_images'] - return self._invoke('GET', path) - - def create_snapshot_volume(self, image_id, label, base_object_id, - storage_pool_id=None, - repo_percent=99, full_thres=99, - view_mode='readOnly'): - """Creates snapshot volume.""" - path = self.RESOURCE_PATHS['snapshot_volumes'] - data = {'snapshotImageId': image_id, 'fullThreshold': full_thres, - 'storagePoolId': storage_pool_id, - 'name': label, 'viewMode': view_mode, - 'repositoryPercentage': repo_percent, - 'baseMappableObjectId': base_object_id, - 'repositoryPoolId': storage_pool_id} - return self._invoke('POST', path, data) - - def update_snapshot_volume(self, snap_vol_id, label=None, full_thres=None): - """Modify an existing snapshot volume.""" - path = self.RESOURCE_PATHS['snapshot_volume'] - data = {'name': label, 'fullThreshold': full_thres} - return self._invoke('POST', path, data, **{'object-id': snap_vol_id}) - - def delete_snapshot_volume(self, object_id): - """Deletes given snapshot volume.""" - path = self.RESOURCE_PATHS['snapshot_volume'] - return self._invoke('DELETE', path, **{'object-id': object_id}) - - def list_snapshot_volumes(self): - """Lists snapshot volumes/views defined on the array.""" - path = self.RESOURCE_PATHS['snapshot_volumes'] - return self._invoke('GET', path) - - def list_ssc_storage_pools(self): - """Lists pools and their service quality defined on the array.""" - path = "/storage-systems/{system-id}/ssc/pools" - return self._invoke('GET', path) - - def get_ssc_storage_pool(self, volume_group_ref): - """Get storage pool service quality information from the array.""" - path = "/storage-systems/{system-id}/ssc/pools/{object-id}" - return self._invoke('GET', path, **{'object-id': volume_group_ref}) - - def list_storage_pools(self): - """Lists storage pools in the array.""" - path = "/storage-systems/{system-id}/storage-pools" - return self._invoke('GET', path) - - def get_storage_pool(self, volume_group_ref): - """Get storage pool information from the array.""" - path = "/storage-systems/{system-id}/storage-pools/{object-id}" - return self._invoke('GET', path, **{'object-id': volume_group_ref}) - - def list_drives(self): - """Lists drives in the array.""" - path = "/storage-systems/{system-id}/drives" - return self._invoke('GET', path) - - def list_storage_systems(self): - """Lists managed storage systems registered with web service.""" - path = "/storage-systems" - return self._invoke('GET', path, use_system=False) - - def list_storage_system(self): - """List current storage system registered with web service.""" - path = "/storage-systems/{system-id}" - return self._invoke('GET', path) - - def register_storage_system(self, controller_addresses, password=None, - wwn=None): - """Registers storage system with web service.""" - path = "/storage-systems" - data = {'controllerAddresses': controller_addresses} - data.setdefault('wwn', wwn if wwn else None) - data.setdefault('password', password if password else None) - return self._invoke('POST', path, data, use_system=False) - - def update_stored_system_password(self, password): - """Update array password stored on web service.""" - path = "/storage-systems/{system-id}" - data = {'storedPassword': password} - return self._invoke('POST', path, data) - - def create_volume_copy_job(self, src_id, tgt_id, priority='priority4', - tgt_wrt_protected='true'): - """Creates a volume copy job.""" - path = "/storage-systems/{system-id}/volume-copy-jobs" - data = {'sourceId': src_id, 'targetId': tgt_id, - 'copyPriority': priority, - 'targetWriteProtected': tgt_wrt_protected} - return self._invoke('POST', path, data) - - def control_volume_copy_job(self, obj_id, control='start'): - """Controls a volume copy job.""" - path = ("/storage-systems/{system-id}/volume-copy-jobs-control" - "/{object-id}?control={String}") - return self._invoke('PUT', path, **{'object-id': obj_id, - 'String': control}) - - def list_vol_copy_job(self, object_id): - """List volume copy job.""" - path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}" - return self._invoke('GET', path, **{'object-id': object_id}) - - def delete_vol_copy_job(self, object_id): - """Delete volume copy job.""" - path = "/storage-systems/{system-id}/volume-copy-jobs/{object-id}" - return self._invoke('DELETE', path, **{'object-id': object_id}) - - def set_chap_authentication(self, target_iqn, chap_username, - chap_password): - """Configures CHAP credentials for target IQN from backend.""" - path = "/storage-systems/{system-id}/iscsi/target-settings/" - data = { - 'iqn': target_iqn, - 'enableChapAuthentication': True, - 'alias': chap_username, - 'authMethod': 'chap', - 'chapSecret': chap_password, - } - return self._invoke('POST', path, data) - - def add_autosupport_data(self, key, data): - """Register driver statistics via autosupport log.""" - path = ('/key-values/%s' % key) - self._invoke('POST', path, json.dumps(data)) - - def set_counter(self, key, value): - path = ('/counters/%s/setCounter?value=%d' % (key, value)) - self._invoke('POST', path) - - def get_asup_info(self): - """Returns a dictionary of relevant autosupport information. - - Currently returned fields are: - model -- E-series model name - serial_numbers -- Serial number for each controller - firmware_version -- Version of active firmware - chassis_sn -- Serial number for whole chassis - """ - asup_info = {} - - controllers = self.list_hardware_inventory().get('controllers') - if controllers: - asup_info['model'] = controllers[0].get('modelName', 'unknown') - serial_numbers = [value['serialNumber'].rstrip() - for __, value in enumerate(controllers)] - serial_numbers.sort() - for index, value in enumerate(serial_numbers): - if not value: - serial_numbers[index] = 'unknown' - asup_info['serial_numbers'] = serial_numbers - else: - asup_info['model'] = 'unknown' - asup_info['serial_numbers'] = ['unknown', 'unknown'] - - system_info = self.list_storage_system() - if system_info: - asup_info['firmware_version'] = system_info['fwVersion'] - asup_info['chassis_sn'] = system_info['chassisSerialNumber'] - else: - asup_info['firmware_version'] = 'unknown' - asup_info['chassis_sn'] = 'unknown' - - return asup_info - - def get_eseries_api_info(self, verify=False): - """Get E-Series API information from the array.""" - api_operating_mode = 'embedded' - path = 'devmgr/utils/about' - headers = {'Content-Type': 'application/json', - 'Accept': 'application/json'} - url = self._get_resource_url(path, True).replace( - '/devmgr/v2', '', 1) - result = self.invoke_service(method='GET', url=url, - headers=headers, - verify=verify) - about_response_dict = result.json() - mode_is_proxy = about_response_dict['runningAsProxy'] - if mode_is_proxy: - api_operating_mode = 'proxy' - return api_operating_mode, about_response_dict['version'] - - def list_backend_store(self, key): - """Retrieve data by key from the persistent store on the backend. - - Example response: {"key": "cinder-snapshots", "value": "[]"} - - :param key: the persistent store to retrieve - :returns: a json body representing the value of the store, - or an empty json object - """ - path = self.RESOURCE_PATHS.get('persistent-store') - try: - resp = self._invoke('GET', path, **{'key': key}) - except exception.NetAppDriverException: - return dict() - else: - data = resp['value'] - if data: - return json.loads(data) - return dict() - - def save_backend_store(self, key, store_data): - """Save a json value to the persistent storage on the backend. - - The storage backend provides a small amount of persistent storage - that we can utilize for storing driver information. - - :param key: The key utilized for storing/retrieving the data - :param store_data: a python data structure that will be stored as a - json value - """ - path = self.RESOURCE_PATHS.get('persistent-stores') - store_data = json.dumps(store_data, separators=(',', ':')) - - data = { - 'key': key, - 'value': store_data - } - - self._invoke('POST', path, data) diff --git a/cinder/volume/drivers/netapp/eseries/exception.py b/cinder/volume/drivers/netapp/eseries/exception.py deleted file mode 100644 index c2c517127..000000000 --- a/cinder/volume/drivers/netapp/eseries/exception.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2015 Alex Meade. All Rights Reserved. -# Copyright (c) 2015 Michael Price. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder import exception -from cinder.i18n import _ - - -class VolumeNotMapped(exception.NetAppDriverException): - message = _("Volume %(volume_id)s is not currently mapped to host " - "%(host)s") - - -class UnsupportedHostGroup(exception.NetAppDriverException): - message = _("Volume %(volume_id)s is currently mapped to unsupported " - "host group %(group)s") - - -class WebServiceException(exception.NetAppDriverException): - def __init__(self, message=None, status_code=None): - self.status_code = status_code - super(WebServiceException, self).__init__(message=message) diff --git a/cinder/volume/drivers/netapp/eseries/fc_driver.py b/cinder/volume/drivers/netapp/eseries/fc_driver.py deleted file mode 100644 index 301a7a4ef..000000000 --- a/cinder/volume/drivers/netapp/eseries/fc_driver.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) - 2014, Alex Meade. All rights reserved. -# Copyright (c) - 2015, Yogesh Kshirsagar. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp E-Series FibreChannel storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.eseries import library -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.zonemanager import utils as fczm_utils - - -@interface.volumedriver -class NetAppEseriesFibreChannelDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp E-Series FibreChannel volume driver.""" - - DRIVER_NAME = 'NetApp_FibreChannel_ESeries' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_Eseries_CI" - VERSION = library.NetAppESeriesLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetAppEseriesFibreChannelDriver, self).__init__(*args, **kwargs) - na_utils.validate_instantiation(**kwargs) - self.library = library.NetAppESeriesLibrary(self.DRIVER_NAME, - 'FC', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - return self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh) - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector, **kwargs): - return self.library.initialize_connection_fc(volume, connector) - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_fc(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup( - group, add_volumes, remove_volumes) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot, snapshots, source_cg, source_vols) diff --git a/cinder/volume/drivers/netapp/eseries/host_mapper.py b/cinder/volume/drivers/netapp/eseries/host_mapper.py deleted file mode 100644 index 1d8ed300b..000000000 --- a/cinder/volume/drivers/netapp/eseries/host_mapper.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright (c) 2015 Alex Meade. All Rights Reserved. -# Copyright (c) 2015 Yogesh Kshirsagar. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" This module handles mapping E-Series volumes to E-Series Hosts and Host -Groups. -""" - -import collections -import random - -from oslo_log import log as logging -from six.moves import range - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import fields -from cinder import utils as cinder_utils -from cinder.volume.drivers.netapp.eseries import exception as eseries_exc -from cinder.volume.drivers.netapp.eseries import utils - - -LOG = logging.getLogger(__name__) - - -@cinder_utils.trace_method -@cinder_utils.synchronized('map_es_volume') -def map_volume_to_single_host(client, volume, eseries_vol, host, - vol_map, multiattach_enabled): - """Maps the e-series volume to host with initiator.""" - LOG.debug("Attempting to map volume %s to single host.", volume['id']) - - # If volume is not mapped on the backend, map directly to host - if not vol_map: - mappings = client.get_volume_mappings_for_host(host['hostRef']) - lun = _get_free_lun(client, host, multiattach_enabled, mappings) - return client.create_volume_mapping(eseries_vol['volumeRef'], - host['hostRef'], lun) - - # If volume is already mapped to desired host - if vol_map.get('mapRef') == host['hostRef']: - return vol_map - - multiattach_cluster_ref = None - try: - host_group = client.get_host_group_by_name( - utils.MULTI_ATTACH_HOST_GROUP_NAME) - multiattach_cluster_ref = host_group['clusterRef'] - except exception.NotFound: - pass - - # Volume is mapped to the multiattach host group - if vol_map.get('mapRef') == multiattach_cluster_ref: - LOG.debug("Volume %s is mapped to multiattach host group.", - volume['id']) - - # If volume is not currently attached according to Cinder, it is - # safe to delete the mapping - if not (volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED): - LOG.debug("Volume %(vol)s is not currently attached, moving " - "existing mapping to host %(host)s.", - {'vol': volume['id'], 'host': host['label']}) - mappings = client.get_volume_mappings_for_host( - host['hostRef']) - lun = _get_free_lun(client, host, multiattach_enabled, mappings) - return client.move_volume_mapping_via_symbol( - vol_map.get('mapRef'), host['hostRef'], lun - ) - - # If we got this far, volume must be mapped to something else - msg = _("Cannot attach already attached volume %s; " - "multiattach is disabled via the " - "'netapp_enable_multiattach' configuration option.") - raise exception.NetAppDriverException(msg % volume['id']) - - -@cinder_utils.trace_method -@cinder_utils.synchronized('map_es_volume') -def map_volume_to_multiple_hosts(client, volume, eseries_vol, target_host, - mapping): - """Maps the e-series volume to multiattach host group.""" - - LOG.debug("Attempting to map volume %s to multiple hosts.", volume['id']) - - # If volume is already mapped to desired host, return the mapping - if mapping['mapRef'] == target_host['hostRef']: - LOG.debug("Volume %(vol)s already mapped to host %(host)s", - {'vol': volume['id'], 'host': target_host['label']}) - return mapping - - # If target host in a host group, ensure it is the multiattach host group - if target_host['clusterRef'] != utils.NULL_REF: - host_group = client.get_host_group(target_host[ - 'clusterRef']) - if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: - msg = _("Specified host to map to volume %(vol)s is in " - "unsupported host group with %(group)s.") - params = {'vol': volume['id'], 'group': host_group['label']} - raise eseries_exc.UnsupportedHostGroup(msg % params) - - mapped_host_group = None - multiattach_host_group = None - try: - mapped_host_group = client.get_host_group(mapping['mapRef']) - # If volume is mapped to a foreign host group raise an error - if mapped_host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: - raise eseries_exc.UnsupportedHostGroup( - volume_id=volume['id'], group=mapped_host_group['label']) - multiattach_host_group = mapped_host_group - except exception.NotFound: - pass - - if not multiattach_host_group: - multiattach_host_group = client.get_host_group_by_name( - utils.MULTI_ATTACH_HOST_GROUP_NAME) - - # If volume is mapped directly to a host, move the host into the - # multiattach host group. Error if the host is in a foreign host group - if not mapped_host_group: - current_host = client.get_host(mapping['mapRef']) - if current_host['clusterRef'] != utils.NULL_REF: - host_group = client.get_host_group(current_host[ - 'clusterRef']) - if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: - msg = _("Currently mapped host for volume %(vol)s is in " - "unsupported host group with %(group)s.") - params = {'vol': volume['id'], 'group': host_group['label']} - raise eseries_exc.UnsupportedHostGroup(msg % params) - client.set_host_group_for_host(current_host['hostRef'], - multiattach_host_group['clusterRef']) - - # Move destination host into multiattach host group - client.set_host_group_for_host(target_host[ - 'hostRef'], multiattach_host_group['clusterRef']) - - # Once both existing and target hosts are in the multiattach host group, - # move the volume mapping to said group. - if not mapped_host_group: - LOG.debug("Moving mapping for volume %s to multiattach host group.", - volume['id']) - return client.move_volume_mapping_via_symbol( - mapping.get('lunMappingRef'), - multiattach_host_group['clusterRef'], - mapping['lun'] - ) - - return mapping - - -def _get_free_lun(client, host, multiattach_enabled, mappings): - """Returns least used LUN ID available on the given host.""" - if not _is_host_full(client, host): - unused_luns = _get_unused_lun_ids(mappings) - if unused_luns: - chosen_lun = random.sample(unused_luns, 1) - return chosen_lun[0] - elif multiattach_enabled: - msg = _("No unused LUN IDs are available on the host; " - "multiattach is enabled which requires that all LUN IDs " - "to be unique across the entire host group.") - raise exception.NetAppDriverException(msg) - used_lun_counts = _get_used_lun_id_counter(mappings) - # most_common returns an arbitrary tuple of members with same frequency - for lun_id, __ in reversed(used_lun_counts.most_common()): - if _is_lun_id_available_on_host(client, host, lun_id): - return lun_id - msg = _("No free LUN IDs left. Maximum number of volumes that can be " - "attached to host (%s) has been exceeded.") - raise exception.NetAppDriverException(msg % utils.MAX_LUNS_PER_HOST) - - -def _get_unused_lun_ids(mappings): - """Returns unused LUN IDs given mappings.""" - used_luns = _get_used_lun_ids_for_mappings(mappings) - - unused_luns = (set(range(utils.MAX_LUNS_PER_HOST)) - set(used_luns)) - return unused_luns - - -def _get_used_lun_id_counter(mapping): - """Returns used LUN IDs with count as a dictionary.""" - used_luns = _get_used_lun_ids_for_mappings(mapping) - used_lun_id_counter = collections.Counter(used_luns) - return used_lun_id_counter - - -def _is_host_full(client, host): - """Checks whether maximum volumes attached to a host have been reached.""" - luns = client.get_volume_mappings_for_host(host['hostRef']) - return len(luns) >= utils.MAX_LUNS_PER_HOST - - -def _is_lun_id_available_on_host(client, host, lun_id): - """Returns a boolean value depending on whether a LUN ID is available.""" - mapping = client.get_volume_mappings_for_host(host['hostRef']) - used_lun_ids = _get_used_lun_ids_for_mappings(mapping) - return lun_id not in used_lun_ids - - -def _get_used_lun_ids_for_mappings(mappings): - """Returns used LUNs when provided with mappings.""" - used_luns = set(map(lambda lun: int(lun['lun']), mappings)) - # E-Series uses LUN ID 0 for special purposes and should not be - # assigned for general use - used_luns.add(0) - return used_luns - - -def unmap_volume_from_host(client, volume, host, mapping): - # Volume is mapped directly to host, so delete the mapping - if mapping.get('mapRef') == host['hostRef']: - LOG.debug("Volume %(vol)s is mapped directly to host %(host)s; " - "removing mapping.", {'vol': volume['id'], - 'host': host['label']}) - client.delete_volume_mapping(mapping['lunMappingRef']) - return - - try: - host_group = client.get_host_group(mapping['mapRef']) - except exception.NotFound: - # Volumes is mapped but to a different initiator - raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], - host=host['label']) - # If volume is mapped to a foreign host group raise error - if host_group['label'] != utils.MULTI_ATTACH_HOST_GROUP_NAME: - raise eseries_exc.UnsupportedHostGroup(volume_id=volume['id'], - group=host_group['label']) - # If target host is not in the multiattach host group - if host['clusterRef'] != host_group['clusterRef']: - raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], - host=host['label']) - - # Volume is mapped to multiattach host group - # Remove mapping if volume should no longer be attached after this - # operation. - if volume['status'] == 'detaching': - LOG.debug("Volume %s is mapped directly to multiattach host group but " - "is not currently attached; removing mapping.", volume['id']) - client.delete_volume_mapping(mapping['lunMappingRef']) diff --git a/cinder/volume/drivers/netapp/eseries/iscsi_driver.py b/cinder/volume/drivers/netapp/eseries/iscsi_driver.py deleted file mode 100644 index 7870c8a9e..000000000 --- a/cinder/volume/drivers/netapp/eseries/iscsi_driver.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2014 NetApp, Inc. All Rights Reserved. -# Copyright (c) 2015 Alex Meade. All Rights Reserved. -# Copyright (c) 2015 Rushil Chugh. All Rights Reserved. -# Copyright (c) 2015 Navneet Singh. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for NetApp E-Series iSCSI storage systems. -""" - -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.netapp.eseries import library -from cinder.volume.drivers.netapp import utils as na_utils - - -@interface.volumedriver -class NetAppEseriesISCSIDriver(driver.BaseVD, - driver.ManageableVD): - """NetApp E-Series iSCSI volume driver.""" - - DRIVER_NAME = 'NetApp_iSCSI_ESeries' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_Eseries_CI" - VERSION = library.NetAppESeriesLibrary.VERSION - - def __init__(self, *args, **kwargs): - super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs) - na_utils.validate_instantiation(**kwargs) - self.library = library.NetAppESeriesLibrary(self.DRIVER_NAME, - 'iSCSI', **kwargs) - - def do_setup(self, context): - self.library.do_setup(context) - - def check_for_setup_error(self): - self.library.check_for_setup_error() - - def create_volume(self, volume): - self.library.create_volume(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - self.library.create_volume_from_snapshot(volume, snapshot) - - def create_cloned_volume(self, volume, src_vref): - self.library.create_cloned_volume(volume, src_vref) - - def delete_volume(self, volume): - self.library.delete_volume(volume) - - def create_snapshot(self, snapshot): - return self.library.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - self.library.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - return self.library.get_volume_stats(refresh) - - def extend_volume(self, volume, new_size): - self.library.extend_volume(volume, new_size) - - def ensure_export(self, context, volume): - return self.library.ensure_export(context, volume) - - def create_export(self, context, volume, connector): - return self.library.create_export(context, volume) - - def remove_export(self, context, volume): - self.library.remove_export(context, volume) - - def manage_existing(self, volume, existing_ref): - return self.library.manage_existing(volume, existing_ref) - - def manage_existing_get_size(self, volume, existing_ref): - return self.library.manage_existing_get_size(volume, existing_ref) - - def unmanage(self, volume): - return self.library.unmanage(volume) - - def initialize_connection(self, volume, connector): - return self.library.initialize_connection_iscsi(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - return self.library.terminate_connection_iscsi(volume, connector, - **kwargs) - - def get_pool(self, volume): - return self.library.get_pool(volume) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.create_cgsnapshot(cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - return self.library.delete_cgsnapshot(cgsnapshot, snapshots) - - def create_consistencygroup(self, context, group): - return self.library.create_consistencygroup(group) - - def delete_consistencygroup(self, context, group, volumes): - return self.library.delete_consistencygroup(group, volumes) - - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - return self.library.update_consistencygroup( - group, add_volumes, remove_volumes) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - return self.library.create_consistencygroup_from_src( - group, volumes, cgsnapshot, snapshots, source_cg, source_vols) diff --git a/cinder/volume/drivers/netapp/eseries/library.py b/cinder/volume/drivers/netapp/eseries/library.py deleted file mode 100644 index 1770123d3..000000000 --- a/cinder/volume/drivers/netapp/eseries/library.py +++ /dev/null @@ -1,2138 +0,0 @@ -# Copyright (c) 2015 Alex Meade -# Copyright (c) 2015 Rushil Chugh -# Copyright (c) 2015 Navneet Singh -# Copyright (c) 2015 Yogesh Kshirsagar -# Copyright (c) 2015 Jose Porrua -# Copyright (c) 2015 Michael Price -# Copyright (c) 2015 Tom Barron -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import math -import socket -import time -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_log import versionutils -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils as cinder_utils -from cinder.volume.drivers.netapp.eseries import client -from cinder.volume.drivers.netapp.eseries import exception as eseries_exc -from cinder.volume.drivers.netapp.eseries import host_mapper -from cinder.volume.drivers.netapp.eseries import utils -from cinder.volume.drivers.netapp import options as na_opts -from cinder.volume.drivers.netapp import utils as na_utils -from cinder.volume import utils as volume_utils -from cinder.zonemanager import utils as fczm_utils - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -@six.add_metaclass(cinder_utils.TraceWrapperMetaclass) -class NetAppESeriesLibrary(object): - """Executes commands relating to Volumes.""" - - DRIVER_NAME = 'NetApp_iSCSI_ESeries' - AUTOSUPPORT_INTERVAL_SECONDS = 3600 # hourly - VERSION = "1.0.0" - REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips', - 'netapp_login', 'netapp_password'] - SLEEP_SECS = 5 - HOST_TYPES = {'factoryDefault': 'FactoryDefault', - 'linux_atto': 'LnxTPGSALUA', - 'linux_dm_mp': 'LnxALUA', - 'linux_mpp_rdac': 'LNX', - 'linux_pathmanager': 'LnxTPGSALUA_PM', - 'linux_sf': 'LnxTPGSALUA_SF', - 'ontap': 'ONTAP_ALUA', - 'ontap_rdac': 'ONTAP_RDAC', - 'vmware': 'VmwTPGSALUA', - 'windows': 'W2KNETNCL', - 'windows_atto': 'WinTPGSALUA', - 'windows_clustered': 'W2KNETCL', - } - # NOTE(ameade): This maps what is reported by the e-series api to a - # consistent set of values that are reported by all NetApp drivers - # to the cinder scheduler. - SSC_DISK_TYPE_MAPPING = { - 'scsi': 'SCSI', - 'fibre': 'FCAL', - 'sas': 'SAS', - 'sata': 'SATA', - 'ssd': 'SSD', - } - SSC_RAID_TYPE_MAPPING = { - 'raidDiskPool': 'DDP', - 'raid0': 'raid0', - 'raid1': 'raid1', - # RAID3 is being deprecated and is actually implemented as RAID5 - 'raid3': 'raid5', - 'raid5': 'raid5', - 'raid6': 'raid6', - } - READ_CACHE_Q_SPEC = 'netapp:read_cache' - WRITE_CACHE_Q_SPEC = 'netapp:write_cache' - DA_UQ_SPEC = 'netapp_eseries_data_assurance' - FLASH_CACHE_UQ_SPEC = 'netapp_eseries_flash_read_cache' - DISK_TYPE_UQ_SPEC = 'netapp_disk_type' - ENCRYPTION_UQ_SPEC = 'netapp_disk_encryption' - SPINDLE_SPD_UQ_SPEC = 'netapp_eseries_disk_spindle_speed' - RAID_UQ_SPEC = 'netapp_raid_type' - THIN_UQ_SPEC = 'netapp_thin_provisioned' - SSC_UPDATE_INTERVAL = 60 # seconds - SA_COMM_TIMEOUT = 30 - WORLDWIDENAME = 'worldWideName' - - DEFAULT_HOST_TYPE = 'linux_dm_mp' - DEFAULT_CHAP_USER_NAME = 'eserieschapuser' - - # Define name marker string to use in snapshot groups that are for copying - # volumes. This is to differentiate them from ordinary snapshot groups. - SNAPSHOT_VOL_COPY_SUFFIX = 'SGCV' - # Define a name marker string used to identify snapshot volumes that have - # an underlying snapshot that is awaiting deletion. - SNAPSHOT_VOL_DEL_SUFFIX = '_DEL' - # Maximum number of snapshots per snapshot group - MAX_SNAPSHOT_COUNT = 32 - # Maximum number of snapshot groups - MAX_SNAPSHOT_GROUP_COUNT = 4 - RESERVED_SNAPSHOT_GROUP_COUNT = 1 - SNAPSHOT_PERSISTENT_STORE_KEY = 'cinder-snapshots' - SNAPSHOT_PERSISTENT_STORE_LOCK = str(uuid.uuid4()) - - def __init__(self, driver_name, driver_protocol="iSCSI", - configuration=None, **kwargs): - self.configuration = configuration - self._app_version = kwargs.pop("app_version", "unknown") - self.configuration.append_config_values(na_opts.netapp_basicauth_opts) - self.configuration.append_config_values( - na_opts.netapp_connection_opts) - self.configuration.append_config_values(na_opts.netapp_transport_opts) - self.configuration.append_config_values(na_opts.netapp_eseries_opts) - self.configuration.append_config_values(na_opts.netapp_san_opts) - self.lookup_service = fczm_utils.create_lookup_service() - self._backend_name = self.configuration.safe_get( - "volume_backend_name") or "NetApp_ESeries" - self.driver_name = driver_name - self.driver_protocol = driver_protocol - self._stats = {} - self._ssc_stats = {} - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - self.context = context - na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration) - - self._client = self._create_rest_client(self.configuration) - self._check_mode_get_or_register_storage_system() - self._version_check() - if self.configuration.netapp_enable_multiattach: - self._ensure_multi_attach_host_group_exists() - - # This driver has been marked 'deprecated' in the Pike release and - # can be removed in Queens. - msg = _("The NetApp E-Series driver is deprecated and will be " - "removed in a future release.") - versionutils.report_deprecated_feature(LOG, msg) - - def _create_rest_client(self, configuration): - port = configuration.netapp_server_port - scheme = configuration.netapp_transport_type.lower() - if port is None: - if scheme == 'http': - port = 8080 - elif scheme == 'https': - port = 8443 - - return client.RestClient( - scheme=scheme, - host=configuration.netapp_server_hostname, - port=port, - service_path=configuration.netapp_webservice_path, - username=configuration.netapp_login, - password=configuration.netapp_password) - - def _version_check(self): - """Ensure that the minimum version of the REST API is available""" - if not self._client.features.REST_1_4_RELEASE: - min_version = ( - self._client.features.REST_1_4_RELEASE.minimum_version) - raise exception.NetAppDriverException( - 'This version (%(cur)s of the NetApp SANtricity Webservices ' - 'Proxy is not supported. Install version %(supp)s or ' - 'later.' % {'cur': self._client.api_version, - 'supp': min_version}) - - def _start_periodic_tasks(self): - ssc_periodic_task = loopingcall.FixedIntervalLoopingCall( - self._update_ssc_info) - ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL) - - # Start the task that logs autosupport (ASUP) data to the controller - asup_periodic_task = loopingcall.FixedIntervalLoopingCall( - self._create_asup, CONF.host) - asup_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS, - initial_delay=0) - - def check_for_setup_error(self): - self._check_host_type() - self._check_multipath() - # It is important that this be called before any other methods that - # interact with the storage-system. It blocks until the - # storage-system comes online. - self._check_storage_system() - self._check_pools() - self._start_periodic_tasks() - - def _check_host_type(self): - """Validate that the configured host-type is available for the array. - - Not all host-types are available on every firmware version. - """ - requested_host_type = (self.configuration.netapp_host_type - or self.DEFAULT_HOST_TYPE) - actual_host_type = ( - self.HOST_TYPES.get(requested_host_type, requested_host_type)) - - for host_type in self._client.list_host_types(): - if(host_type.get('code') == actual_host_type or - host_type.get('name') == actual_host_type): - self.host_type = host_type.get('code') - return - exc_msg = _("The host-type '%s' is not supported on this storage " - "system.") - raise exception.NetAppDriverException(exc_msg % requested_host_type) - - def _check_multipath(self): - if not self.configuration.use_multipath_for_image_xfer: - LOG.warning('Production use of "%(backend)s" backend requires ' - 'the Cinder controller to have multipathing ' - 'properly set up and the configuration option ' - '"%(mpflag)s" to be set to "True".', - {'backend': self._backend_name, - 'mpflag': 'use_multipath_for_image_xfer'}) - - def _check_pools(self): - """Ensure that the pool listing contains at least one pool""" - if not self._get_storage_pools(): - msg = _('No pools are available for provisioning volumes. ' - 'Ensure that the configuration option ' - 'netapp_pool_name_search_pattern is set correctly.') - raise exception.NetAppDriverException(msg) - - def _ensure_multi_attach_host_group_exists(self): - try: - host_group = self._client.get_host_group_by_name( - utils.MULTI_ATTACH_HOST_GROUP_NAME) - LOG.info("The multi-attach E-Series host group '%(label)s' " - "already exists with clusterRef %(clusterRef)s", - host_group) - except exception.NotFound: - host_group = self._client.create_host_group( - utils.MULTI_ATTACH_HOST_GROUP_NAME) - LOG.info("Created multi-attach E-Series host group %(label)s " - "with clusterRef %(clusterRef)s", host_group) - - def _check_mode_get_or_register_storage_system(self): - """Does validity checks for storage system registry and health.""" - def _resolve_host(host): - try: - ip = na_utils.resolve_hostname(host) - return ip - except socket.gaierror as e: - LOG.error('Error resolving host %(host)s. Error - %(e)s.', - {'host': host, 'e': e}) - raise exception.NoValidBackend( - _("Controller IP '%(host)s' could not be resolved: %(e)s.") - % {'host': host, 'e': e}) - - ips = self.configuration.netapp_controller_ips - ips = [i.strip() for i in ips.split(",")] - ips = [x for x in ips if _resolve_host(x)] - host = na_utils.resolve_hostname( - self.configuration.netapp_server_hostname) - if host in ips: - LOG.info('Embedded mode detected.') - system = self._client.list_storage_systems()[0] - else: - LOG.info('Proxy mode detected.') - system = self._client.register_storage_system( - ips, password=self.configuration.netapp_sa_password) - self._client.set_system_id(system.get('id')) - self._client._init_features() - - def _check_password_status(self, system): - """Determine if the storage system's password status is valid. - - The password status has the following possible states: unknown, valid, - invalid. - - If the password state cannot be retrieved from the storage system, - an empty string will be returned as the status, and the password - status will be assumed to be valid. This is done to ensure that - access to a storage system will not be blocked in the event of a - problem with the API. - - This method returns a tuple consisting of the storage system's - password status and whether or not the status is valid. - - Example: (invalid, True) - - :returns: (str, bool) - """ - - status = system.get('passwordStatus') - status = status.lower() if status else '' - return status, status not in ['invalid', 'unknown'] - - def _check_storage_system_status(self, system): - """Determine if the storage system's status is valid. - - The storage system status has the following possible states: - neverContacted, offline, optimal, needsAttn. - - If the storage system state cannot be retrieved, an empty string will - be returned as the status, and the storage system's status will be - assumed to be valid. This is done to ensure that access to a storage - system will not be blocked in the event of a problem with the API. - - This method returns a tuple consisting of the storage system's - password status and whether or not the status is valid. - - Example: (needsAttn, True) - - :returns: (str, bool) - """ - status = system.get('status') - status = status.lower() if status else '' - return status, status not in ['nevercontacted', 'offline'] - - def _check_storage_system(self): - """Checks whether system is registered and has good status.""" - try: - self._client.list_storage_system() - except exception.NetAppDriverException: - with excutils.save_and_reraise_exception(): - LOG.info("System with controller addresses [%s] is not " - "registered with web service.", - self.configuration.netapp_controller_ips) - - # Update the stored password - # We do this to trigger the webservices password validation routine - new_pwd = self.configuration.netapp_sa_password - self._client.update_stored_system_password(new_pwd) - - start_time = int(time.time()) - - def check_system_status(): - system = self._client.list_storage_system() - pass_status, pass_status_valid = ( - self._check_password_status(system)) - status, status_valid = self._check_storage_system_status(system) - msg_dict = {'id': system.get('id'), 'status': status, - 'pass_status': pass_status} - # wait if array not contacted or - # password was not in sync previously. - if not (pass_status_valid and status_valid): - if not pass_status_valid: - LOG.info('Waiting for web service to validate the ' - 'configured password.') - else: - LOG.info('Waiting for web service array communication.') - if int(time.time() - start_time) >= self.SA_COMM_TIMEOUT: - if not status_valid: - raise exception.NetAppDriverException( - _("System %(id)s found with bad status - " - "%(status)s.") % msg_dict) - else: - raise exception.NetAppDriverException( - _("System %(id)s found with bad password status - " - "%(pass_status)s.") % msg_dict) - - # The system was found to have a good status - else: - LOG.info("System %(id)s has %(status)s status.", msg_dict) - raise loopingcall.LoopingCallDone() - - checker = loopingcall.FixedIntervalLoopingCall(f=check_system_status) - checker.start(interval = self.SLEEP_SECS, - initial_delay=self.SLEEP_SECS).wait() - - return True - - def _get_volume(self, uid): - """Retrieve a volume by its label""" - if uid is None: - raise exception.InvalidInput(_('The volume label is required' - ' as input.')) - - uid = utils.convert_uuid_to_es_fmt(uid) - - return self._client.list_volume(uid) - - def _get_snapshot_group_for_snapshot(self, snapshot): - snapshot = self._get_snapshot(snapshot) - try: - return self._client.list_snapshot_group(snapshot['pitGroupRef']) - except (exception.NetAppDriverException, - eseries_exc.WebServiceException): - msg = _("Specified snapshot group with id %s could not be found.") - raise exception.NotFound(msg % snapshot['pitGroupRef']) - - def _get_snapshot_legacy(self, snapshot): - """Find a E-Series snapshot by the name of the snapshot group. - - Snapshots were previously identified by the unique name of the - snapshot group. A snapshot volume is now utilized to uniquely - identify the snapshot, so any snapshots previously defined in this - way must be updated. - - :param snapshot_id: Cinder snapshot identifer - :return: An E-Series snapshot image - """ - label = utils.convert_uuid_to_es_fmt(snapshot['id']) - for group in self._client.list_snapshot_groups(): - if group['label'] == label: - image = self._get_oldest_image_in_snapshot_group(group['id']) - group_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) - # Modify the group label so we don't have a name collision - self._client.update_snapshot_group(group['id'], - group_label) - - snapshot.update({'provider_id': image['id']}) - snapshot.save() - - return image - - raise exception.NotFound(_('Snapshot with id of %s could not be ' - 'found.') % snapshot['id']) - - def _get_snapshot(self, snapshot): - """Find a E-Series snapshot by its Cinder identifier - - An E-Series snapshot image does not have a configuration name/label, - so we define a snapshot volume underneath of it that will help us to - identify it. We retrieve the snapshot volume with the matching name, - and then we find its underlying snapshot. - - :param snapshot_id: Cinder snapshot identifer - :return: An E-Series snapshot image - """ - try: - return self._client.list_snapshot_image( - snapshot.get('provider_id')) - except (eseries_exc.WebServiceException, - exception.NetAppDriverException): - try: - LOG.debug('Unable to locate snapshot by its id, falling ' - 'back to legacy behavior.') - return self._get_snapshot_legacy(snapshot) - except exception.NetAppDriverException: - raise exception.NotFound(_('Snapshot with id of %s could not' - ' be found.') % snapshot['id']) - - def _get_snapshot_group(self, snapshot_group_id): - try: - return self._client.list_snapshot_group(snapshot_group_id) - except exception.NetAppDriverException: - raise exception.NotFound(_('Unable to retrieve snapshot group ' - 'with id of %s.') % snapshot_group_id) - - def _get_ordered_images_in_snapshot_group(self, snapshot_group_id): - images = self._client.list_snapshot_images() - if images: - filtered_images = [img for img in images if img['pitGroupRef'] == - snapshot_group_id] - sorted_imgs = sorted(filtered_images, key=lambda x: x[ - 'pitTimestamp']) - return sorted_imgs - return list() - - def _get_oldest_image_in_snapshot_group(self, snapshot_group_id): - group = self._get_snapshot_group(snapshot_group_id) - images = self._get_ordered_images_in_snapshot_group(snapshot_group_id) - if images: - return images[0] - - msg = _("No snapshot image found in snapshot group %s.") - raise exception.NotFound(msg % group['label']) - - def _get_latest_image_in_snapshot_group(self, snapshot_group_id): - group = self._get_snapshot_group(snapshot_group_id) - images = self._get_ordered_images_in_snapshot_group(snapshot_group_id) - if images: - return images[-1] - - msg = _("No snapshot image found in snapshot group %s.") - raise exception.NotFound(msg % group['label']) - - def _is_volume_containing_snaps(self, label): - """Checks if volume contains snapshot groups.""" - vol_id = utils.convert_es_fmt_to_uuid(label) - for snap in self._client.list_snapshot_groups(): - if snap['baseVolume'] == vol_id: - return True - return False - - def get_pool(self, volume): - """Return pool name where volume resides. - - :param volume: The volume hosted by the driver. - :returns: Name of the pool where given volume is hosted. - """ - eseries_volume = self._get_volume(volume['name_id']) - storage_pool = self._client.get_storage_pool( - eseries_volume['volumeGroupRef']) - if storage_pool: - return storage_pool.get('label') - - def _add_volume_to_consistencygroup(self, volume): - if volume.get('consistencygroup_id'): - es_cg = self._get_consistencygroup(volume['consistencygroup']) - self._update_consistency_group_members(es_cg, [volume], []) - - def create_volume(self, volume): - """Creates a volume.""" - - LOG.debug('create_volume on %s', volume['host']) - - # get E-series pool label as pool name - eseries_pool_label = volume_utils.extract_host(volume['host'], - level='pool') - - if eseries_pool_label is None: - msg = _("Pool is not available in the volume host field.") - raise exception.InvalidHost(reason=msg) - - eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id']) - - extra_specs = na_utils.get_volume_extra_specs(volume) - - # get size of the requested volume creation - size_gb = int(volume['size']) - self._create_volume(eseries_pool_label, eseries_volume_label, size_gb, - extra_specs) - - self._add_volume_to_consistencygroup(volume) - - def _create_volume(self, eseries_pool_label, eseries_volume_label, - size_gb, extra_specs=None): - """Creates volume with given label and size.""" - if extra_specs is None: - extra_specs = {} - - if self.configuration.netapp_enable_multiattach: - volumes = self._client.list_volumes() - # NOTE(ameade): Ensure we do not create more volumes than we could - # map to the multi attach ESeries host group. - if len(volumes) > utils.MAX_LUNS_PER_HOST_GROUP: - msg = (_("Cannot create more than %(req)s volumes on the " - "ESeries array when 'netapp_enable_multiattach' is " - "set to true.") % - {'req': utils.MAX_LUNS_PER_HOST_GROUP}) - raise exception.NetAppDriverException(msg) - - # These must be either boolean values, or None - read_cache = extra_specs.get(self.READ_CACHE_Q_SPEC) - if read_cache is not None: - read_cache = na_utils.to_bool(read_cache) - - write_cache = extra_specs.get(self.WRITE_CACHE_Q_SPEC) - if write_cache is not None: - write_cache = na_utils.to_bool(write_cache) - - flash_cache = extra_specs.get(self.FLASH_CACHE_UQ_SPEC) - if flash_cache is not None: - flash_cache = na_utils.to_bool(flash_cache) - - data_assurance = extra_specs.get(self.DA_UQ_SPEC) - if data_assurance is not None: - data_assurance = na_utils.to_bool(data_assurance) - - thin_provision = extra_specs.get(self.THIN_UQ_SPEC) - if(thin_provision is not None): - thin_provision = na_utils.to_bool(thin_provision) - - target_pool = None - - pools = self._get_storage_pools() - for pool in pools: - if pool["label"] == eseries_pool_label: - target_pool = pool - break - - if not target_pool: - msg = _("Pools %s does not exist") - raise exception.NetAppDriverException(msg % eseries_pool_label) - - try: - vol = self._client.create_volume(target_pool['volumeGroupRef'], - eseries_volume_label, size_gb, - read_cache=read_cache, - write_cache=write_cache, - flash_cache=flash_cache, - data_assurance=data_assurance, - thin_provision=thin_provision) - LOG.info("Created volume with label %s.", eseries_volume_label) - except exception.NetAppDriverException as e: - with excutils.save_and_reraise_exception(): - LOG.error("Error creating volume. Msg - %s.", e) - # There was some kind failure creating the volume, make sure no - # partial flawed work exists - try: - bad_vol = self._get_volume(eseries_volume_label) - except Exception: - # Swallowing the exception intentionally because this is - # emergency cleanup to make sure no intermediate volumes - # were left. In this whole error situation, the more - # common route would be for no volume to have been created. - pass - else: - # Some sort of partial volume was created despite the - # error. Lets clean it out so no partial state volumes or - # orphans are left. - try: - self._client.delete_volume(bad_vol["id"]) - except exception.NetAppDriverException as e2: - LOG.error( - "Error cleaning up failed volume creation. " - "Msg - %s.", e2) - - return vol - - def _is_data_assurance_supported(self): - """Determine if the storage backend is PI (DataAssurance) compatible""" - return self.driver_protocol != "iSCSI" - - def _schedule_and_create_volume(self, label, size_gb): - """Creates volume with given label and size.""" - avl_pools = self._get_sorted_available_storage_pools(size_gb) - for pool in avl_pools: - try: - vol = self._client.create_volume(pool['volumeGroupRef'], - label, size_gb) - LOG.info("Created volume with label %s.", label) - return vol - except exception.NetAppDriverException as e: - LOG.error("Error creating volume. Msg - %s.", e) - msg = _("Failure creating volume %s.") - raise exception.NetAppDriverException(msg % label) - - def _create_volume_from_snapshot(self, volume, image): - """Define a new volume based on an E-Series snapshot image. - - This method should be synchronized on the snapshot id. - - :param volume: a Cinder volume - :param image: an E-Series snapshot image - :return: the clone volume - """ - label = utils.convert_uuid_to_es_fmt(volume['id']) - size = volume['size'] - - dst_vol = self._schedule_and_create_volume(label, size) - src_vol = None - try: - src_vol = self._create_snapshot_volume(image) - self._copy_volume_high_priority_readonly(src_vol, dst_vol) - LOG.info("Created volume with label %s.", label) - except exception.NetAppDriverException: - with excutils.save_and_reraise_exception(): - self._client.delete_volume(dst_vol['volumeRef']) - finally: - if src_vol: - try: - self._client.delete_snapshot_volume(src_vol['id']) - except exception.NetAppDriverException as e: - LOG.error("Failure restarting snap vol. Error: %s.", e) - else: - LOG.warning("Snapshot volume creation failed for " - "snapshot %s.", image['id']) - - return dst_vol - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - es_snapshot = self._get_snapshot(snapshot) - cinder_utils.synchronized(snapshot['id'])( - self._create_volume_from_snapshot)(volume, es_snapshot) - - self._add_volume_to_consistencygroup(volume) - - def _copy_volume_high_priority_readonly(self, src_vol, dst_vol): - """Copies src volume to dest volume.""" - LOG.info("Copying src vol %(src)s to dest vol %(dst)s.", - {'src': src_vol['label'], 'dst': dst_vol['label']}) - job = None - try: - job = self._client.create_volume_copy_job( - src_vol['id'], dst_vol['volumeRef']) - - def wait_for_copy(): - j_st = self._client.list_vol_copy_job(job['volcopyRef']) - if (j_st['status'] in ['inProgress', 'pending', 'unknown']): - return - if j_st['status'] == 'failed' or j_st['status'] == 'halted': - LOG.error("Vol copy job status %s.", j_st['status']) - raise exception.NetAppDriverException( - _("Vol copy job for dest %s failed.") % - dst_vol['label']) - LOG.info("Vol copy job completed for dest %s.", - dst_vol['label']) - raise loopingcall.LoopingCallDone() - - checker = loopingcall.FixedIntervalLoopingCall(wait_for_copy) - checker.start(interval=self.SLEEP_SECS, - initial_delay=self.SLEEP_SECS, - stop_on_exception=True).wait() - finally: - if job: - try: - self._client.delete_vol_copy_job(job['volcopyRef']) - except exception.NetAppDriverException: - LOG.warning("Failure deleting job %s.", job['volcopyRef']) - else: - LOG.warning('Volume copy job for src vol %s not found.', - src_vol['id']) - LOG.info('Copy job to dest vol %s completed.', dst_vol['label']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - es_vol = self._get_volume(src_vref['id']) - - es_snapshot = self._create_es_snapshot_for_clone(es_vol) - - try: - self._create_volume_from_snapshot(volume, es_snapshot) - self._add_volume_to_consistencygroup(volume) - finally: - try: - self._client.delete_snapshot_group(es_snapshot['pitGroupRef']) - except exception.NetAppDriverException: - LOG.warning("Failure deleting temp snapshot %s.", - es_snapshot['id']) - - def delete_volume(self, volume): - """Deletes a volume.""" - try: - vol = self._get_volume(volume['name_id']) - self._client.delete_volume(vol['volumeRef']) - except (exception.NetAppDriverException, exception.VolumeNotFound): - LOG.warning("Volume %s already deleted.", volume['id']) - return - - def _is_cgsnapshot(self, snapshot_image): - """Determine if an E-Series snapshot image is part of a cgsnapshot""" - cg_id = snapshot_image.get('consistencyGroupId') - # A snapshot that is not part of a consistency group may have a - # cg_id of either none or a string of all 0's, so we check for both - return not (cg_id is None or utils.NULL_REF == cg_id) - - def _create_snapshot_volume(self, image): - """Creates snapshot volume for given group with snapshot_id.""" - group = self._get_snapshot_group(image['pitGroupRef']) - - LOG.debug("Creating snap vol for group %s", group['label']) - - label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) - - if self._is_cgsnapshot(image): - return self._client.create_cg_snapshot_view( - image['consistencyGroupId'], label, image['id']) - else: - return self._client.create_snapshot_volume( - image['pitRef'], label, image['baseVol']) - - def _create_snapshot_group(self, label, volume, percentage_capacity=20.0): - """Define a new snapshot group for a volume - - :param label: the label for the snapshot group - :param volume: an E-Series volume - :param percentage_capacity: an optional repository percentage - :return: a new snapshot group - """ - - # Newer versions of the REST API are capable of automatically finding - # the best pool candidate - if not self._client.features.REST_1_3_RELEASE: - vol_size_gb = int(volume['totalSizeInBytes']) / units.Gi - pools = self._get_sorted_available_storage_pools(vol_size_gb) - volume_pool = next(pool for pool in pools if volume[ - 'volumeGroupRef'] == pool['id']) - - # A disk pool can only utilize a candidate from its own pool - if volume_pool.get('raidLevel') == 'raidDiskPool': - pool_id_to_use = volume_pool['volumeGroupRef'] - - # Otherwise, choose the best available pool - else: - pool_id_to_use = pools[0]['volumeGroupRef'] - group = self._client.create_snapshot_group( - label, volume['volumeRef'], pool_id_to_use, - repo_percent=percentage_capacity) - - else: - group = self._client.create_snapshot_group( - label, volume['volumeRef'], repo_percent=percentage_capacity) - - return group - - def _get_snapshot_groups_for_volume(self, vol): - """Find all snapshot groups associated with an E-Series volume - - :param vol: An E-Series volume object - :return: A list of snapshot groups - :raise NetAppDriverException: if the list of snapshot groups cannot be - retrieved - """ - return [grp for grp in self._client.list_snapshot_groups() - if grp['baseVolume'] == vol['id']] - - def _get_available_snapshot_group(self, vol): - """Find a snapshot group that has remaining capacity for snapshots. - - In order to minimize repository usage, we prioritize the snapshot - group with remaining snapshot capacity that has most recently had a - snapshot defined on it. - - :param vol: An E-Series volume object - :return: A valid snapshot group that has available snapshot capacity, - or None - :raise NetAppDriverException: if the list of snapshot groups cannot be - retrieved - """ - groups_for_v = self._get_snapshot_groups_for_volume(vol) - - # Filter out reserved snapshot groups - groups = [g for g in groups_for_v - if self.SNAPSHOT_VOL_COPY_SUFFIX not in g['label']] - - # Filter out groups that are part of a consistency group - groups = [g for g in groups if not g['consistencyGroup']] - # Find all groups with free snapshot capacity - groups = [group for group in groups if group.get('snapshotCount') < - self.MAX_SNAPSHOT_COUNT] - - # Order by the last defined snapshot on the group - if len(groups) > 1: - group_by_id = {g['id']: g for g in groups} - - snap_imgs = list() - for group in groups: - try: - snap_imgs.append( - self._get_latest_image_in_snapshot_group(group['id'])) - except exception.NotFound: - pass - - snap_imgs = sorted(snap_imgs, key=lambda x: x['pitSequenceNumber']) - - if snap_imgs: - # The newest image - img = snap_imgs[-1] - return group_by_id[img['pitGroupRef']] - else: - return groups[0] if groups else None - - # Skip the snapshot image checks if there is only one snapshot group - elif groups: - return groups[0] - else: - return None - - def _create_es_snapshot_for_clone(self, vol): - group_name = (utils.convert_uuid_to_es_fmt(uuid.uuid4()) + - self.SNAPSHOT_VOL_COPY_SUFFIX) - return self._create_es_snapshot(vol, group_name) - - def _create_es_snapshot(self, vol, group_name=None): - snap_grp, snap_image = None, None - try: - snap_grp = self._get_available_snapshot_group(vol) - # If a snapshot group is not available, create one if possible - if snap_grp is None: - snap_groups_for_vol = self._get_snapshot_groups_for_volume( - vol) - - # We need a reserved snapshot group - if (group_name is not None and - (self.SNAPSHOT_VOL_COPY_SUFFIX in group_name)): - - # First we search for an existing reserved group - for grp in snap_groups_for_vol: - if grp['label'].endswith( - self.SNAPSHOT_VOL_COPY_SUFFIX): - snap_grp = grp - break - - # No reserved group exists, so we create it - if (snap_grp is None and - (len(snap_groups_for_vol) < - self.MAX_SNAPSHOT_GROUP_COUNT)): - snap_grp = self._create_snapshot_group(group_name, - vol) - - # Ensure we don't exceed the snapshot group limit - elif (len(snap_groups_for_vol) < - (self.MAX_SNAPSHOT_GROUP_COUNT - - self.RESERVED_SNAPSHOT_GROUP_COUNT)): - - label = group_name if group_name is not None else ( - utils.convert_uuid_to_es_fmt(uuid.uuid4())) - - snap_grp = self._create_snapshot_group(label, vol) - LOG.info("Created snap grp with label %s.", label) - - # We couldn't retrieve or create a snapshot group - if snap_grp is None: - raise exception.SnapshotLimitExceeded( - allowed=(self.MAX_SNAPSHOT_COUNT * - (self.MAX_SNAPSHOT_GROUP_COUNT - - self.RESERVED_SNAPSHOT_GROUP_COUNT))) - - return self._client.create_snapshot_image( - snap_grp['id']) - - except exception.NetAppDriverException: - with excutils.save_and_reraise_exception(): - if snap_image is None and snap_grp: - self._delete_snapshot_group(snap_grp['id']) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: The Cinder snapshot - :param group_name: An optional label for the snapshot group - :returns: An E-Series snapshot image - """ - - os_vol = snapshot['volume'] - vol = self._get_volume(os_vol['name_id']) - - snap_image = cinder_utils.synchronized(vol['id'])( - self._create_es_snapshot)(vol) - model_update = { - 'provider_id': snap_image['id'] - } - - return model_update - - def _delete_es_snapshot(self, es_snapshot): - """Perform a soft-delete on an E-Series snapshot. - - Mark the snapshot image as no longer needed, so that it can be - purged from the backend when no other snapshots are dependent upon it. - - :param es_snapshot: an E-Series snapshot image - :return: None - """ - index = self._get_soft_delete_map() - snapgroup_ref = es_snapshot['pitGroupRef'] - if snapgroup_ref in index: - bitset = na_utils.BitSet(int((index[snapgroup_ref]))) - else: - bitset = na_utils.BitSet(0) - - images = [img for img in self._client.list_snapshot_images() if - img['pitGroupRef'] == snapgroup_ref] - for i, image in enumerate(sorted(images, key=lambda x: x[ - 'pitSequenceNumber'])): - if(image['pitSequenceNumber'] == es_snapshot[ - 'pitSequenceNumber']): - bitset.set(i) - break - - index_update, keys_to_del = ( - self._cleanup_snapshot_images(images, bitset)) - - self._merge_soft_delete_changes(index_update, keys_to_del) - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - try: - es_snapshot = self._get_snapshot(snapshot) - except exception.NotFound: - LOG.warning("Snapshot %s already deleted.", snapshot['id']) - else: - os_vol = snapshot['volume'] - vol = self._get_volume(os_vol['name_id']) - - cinder_utils.synchronized(vol['id'])(self._delete_es_snapshot)( - es_snapshot) - - def _get_soft_delete_map(self): - """Retrieve the snapshot index from the storage backend""" - return self._client.list_backend_store( - self.SNAPSHOT_PERSISTENT_STORE_KEY) - - @cinder_utils.synchronized(SNAPSHOT_PERSISTENT_STORE_LOCK) - def _merge_soft_delete_changes(self, index_update, keys_to_del): - """Merge changes to the snapshot index and save it on the backend - - This method merges provided changes into the index, locking, to ensure - that concurrent changes that don't overlap are not overwritten. No - update will occur if neither an update or keys to delete are provided. - - :param index_update: a dict of keys/value pairs to update in the index - :param keys_to_del: a list of keys to purge from the index - """ - if index_update or keys_to_del: - index = self._get_soft_delete_map() - if index_update: - index.update(index_update) - if keys_to_del: - for key in keys_to_del: - if key in index: - del index[key] - - self._client.save_backend_store( - self.SNAPSHOT_PERSISTENT_STORE_KEY, index) - - def _cleanup_snapshot_images(self, images, bitset): - """Delete snapshot images that are marked for removal from the backend. - - This method will iterate over all snapshots (beginning with the - oldest), that are defined on the same snapshot group as the provided - snapshot image. If the snapshot is marked for deletion, it will be - purged from the backend. Otherwise, the method will return because - no further snapshots can be purged. - - The bitset will be updated based on the return from this method. - Any updates to the index will be provided as a dict, and any keys - to be removed from the index should be returned as (dict, list). - - :param images: a list of E-Series snapshot images - :param bitset: a bitset representing the snapshot images that are - no longer needed on the backend (and may be deleted when possible) - :return (dict, list): a tuple containing a dict of updates for the - index and a list of keys to remove from the index - """ - snap_grp_ref = images[0]['pitGroupRef'] - # All images are marked as deleted, we can delete the snapshot group - if bitset == 2 ** len(images) - 1: - try: - self._delete_snapshot_group(snap_grp_ref) - except exception.NetAppDriverException as e: - LOG.warning("Unable to remove snapshot group - %s.", e.msg) - return None, [snap_grp_ref] - else: - # Order by their sequence number, from oldest to newest - snapshots = sorted(images, - key=lambda x: x['pitSequenceNumber']) - deleted = 0 - - for i, snapshot in enumerate(snapshots): - if bitset.is_set(i): - self._delete_snapshot_image(snapshot) - deleted += 1 - else: - # Snapshots must be deleted in order, so if the current - # snapshot is not pending deletion, we don't want to - # process any more - break - - if deleted: - # Update the bitset based on the deleted snapshots - bitset >>= deleted - LOG.debug('Deleted %(count)s snapshot images from snapshot ' - 'group: %(grp)s.', {'count': deleted, - 'grp': snap_grp_ref}) - if deleted >= len(images): - try: - self._delete_snapshot_group(snap_grp_ref) - except exception.NetAppDriverException as e: - LOG.warning("Unable to remove snapshot group - %s.", - e.msg) - return None, [snap_grp_ref] - - return {snap_grp_ref: repr(bitset)}, None - - def _delete_snapshot_group(self, group_id): - try: - self._client.delete_snapshot_group(group_id) - except eseries_exc.WebServiceException as e: - raise exception.NetAppDriverException(e.msg) - - def _delete_snapshot_image(self, es_snapshot): - """Remove a snapshot image from the storage backend - - If a snapshot group has no remaining snapshot images associated with - it, it will be deleted as well. When the snapshot is deleted, - any snapshot volumes that are associated with it will be orphaned, - so they are also deleted. - - :param es_snapshot: An E-Series snapshot image - :param snapshot_volumes: Snapshot volumes associated with the snapshot - """ - self._client.delete_snapshot_image(es_snapshot['id']) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a volume.""" - pass - - def create_export(self, context, volume): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a volume.""" - pass - - def map_volume_to_host(self, volume, eseries_volume, initiators): - """Ensures the specified initiator has access to the volume.""" - existing_maps = self._client.get_volume_mappings_for_volume( - eseries_volume) - host = self._get_or_create_host(initiators, self.host_type) - # There can only be one or zero mappings on a volume in E-Series - current_map = existing_maps[0] if existing_maps else None - - if self.configuration.netapp_enable_multiattach and current_map: - self._ensure_multi_attach_host_group_exists() - mapping = host_mapper.map_volume_to_multiple_hosts(self._client, - volume, - eseries_volume, - host, - current_map) - else: - mapping = host_mapper.map_volume_to_single_host( - self._client, volume, eseries_volume, host, current_map, - self.configuration.netapp_enable_multiattach) - return mapping - - def initialize_connection_fc(self, volume, connector): - """Initializes the connection and returns connection info. - - Assigns the specified volume to a compute node/host so that it can be - used from that host. - - The driver returns a driver_volume_type of 'fibre_channel'. - The target_wwn can be a single entry or a list of wwns that - correspond to the list of remote wwn(s) that will export the volume. - Example return values: - - .. code-block:: python - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': '500a098280feeba5', - 'initiator_target_map': { - '21000024ff406cc3': ['500a098280feeba5'], - '21000024ff406cc2': ['500a098280feeba5'] - } - } - } - - or - - .. code-block:: python - - { - 'driver_volume_type': 'fibre_channel' - 'data': { - 'target_discovered': True, - 'target_lun': 1, - 'target_wwn': ['500a098280feeba5', '500a098290feeba5', - '500a098190feeba5', '500a098180feeba5'], - 'initiator_target_map': { - '21000024ff406cc3': ['500a098280feeba5', - '500a098290feeba5'], - '21000024ff406cc2': ['500a098190feeba5', - '500a098180feeba5'] - } - } - } - """ - - initiators = [fczm_utils.get_formatted_wwn(wwpn) - for wwpn in connector['wwpns']] - - eseries_vol = self._get_volume(volume['name_id']) - mapping = self.map_volume_to_host(volume, eseries_vol, - initiators) - lun_id = mapping['lun'] - - initiator_info = self._build_initiator_target_map_fc(connector) - target_wwpns, initiator_target_map, num_paths = initiator_info - - if target_wwpns: - msg = ("Successfully fetched target details for LUN %(id)s " - "and initiator(s) %(initiators)s.") - msg_fmt = {'id': volume['id'], 'initiators': initiators} - LOG.debug(msg, msg_fmt) - else: - msg = _('Failed to get LUN target details for the LUN %s.') - raise exception.VolumeBackendAPIException(data=msg % volume['id']) - - target_info = {'driver_volume_type': 'fibre_channel', - 'data': {'target_discovered': True, - 'target_lun': int(lun_id), - 'target_wwn': target_wwpns, - 'initiator_target_map': initiator_target_map}} - - return target_info - - def terminate_connection_fc(self, volume, connector, **kwargs): - """Disallow connection from connector. - - Return empty data if other volumes are in the same zone. - The FibreChannel ZoneManager doesn't remove zones - if there isn't an initiator_target_map in the - return of terminate_connection. - - :returns: data - the target_wwns and initiator_target_map if the - zone is to be removed, otherwise the same map with - an empty dict for the 'data' key - """ - - eseries_vol = self._get_volume(volume['name_id']) - initiators = [fczm_utils.get_formatted_wwn(wwpn) - for wwpn in connector['wwpns']] - host = self._get_host_with_matching_port(initiators) - mappings = eseries_vol.get('listOfMappings', []) - - # There can only be one or zero mappings on a volume in E-Series - mapping = mappings[0] if mappings else None - - if not mapping: - raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], - host=host['label']) - host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) - - info = {'driver_volume_type': 'fibre_channel', - 'data': {}} - - if len(self._client.get_volume_mappings_for_host( - host['hostRef'])) == 0: - # No more exports for this host, so tear down zone. - LOG.info("Need to remove FC Zone, building initiator target map.") - - initiator_info = self._build_initiator_target_map_fc(connector) - target_wwpns, initiator_target_map, num_paths = initiator_info - - info['data'] = {'target_wwn': target_wwpns, - 'initiator_target_map': initiator_target_map} - - return info - - def _build_initiator_target_map_fc(self, connector): - """Build the target_wwns and the initiator target map.""" - - # get WWPNs from controller and strip colons - all_target_wwpns = self._client.list_target_wwpns() - all_target_wwpns = [six.text_type(wwpn).replace(':', '') - for wwpn in all_target_wwpns] - - target_wwpns = [] - init_targ_map = {} - num_paths = 0 - - if self.lookup_service: - # Use FC SAN lookup to determine which ports are visible. - dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], - all_target_wwpns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwpns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - for target in init_targ_map[initiator]: - num_paths += 1 - target_wwpns = list(set(target_wwpns)) - else: - initiator_wwns = connector['wwpns'] - target_wwpns = all_target_wwpns - - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwpns - - return target_wwpns, init_targ_map, num_paths - - def initialize_connection_iscsi(self, volume, connector): - """Allow connection to connector and return connection info.""" - initiator_name = connector['initiator'] - eseries_vol = self._get_volume(volume['name_id']) - mapping = self.map_volume_to_host(volume, eseries_vol, - [initiator_name]) - - lun_id = mapping['lun'] - msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name} - LOG.debug("Mapped volume %(id)s to the initiator %(initiator_name)s.", - msg_fmt) - - iscsi_details = self._get_iscsi_service_details() - iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol, - iscsi_details) - LOG.debug("Successfully fetched target details for volume %(id)s and " - "initiator %(initiator_name)s.", msg_fmt) - iqn = iscsi_portal['iqn'] - address = iscsi_portal['ip'] - port = iscsi_portal['tcp_port'] - properties = na_utils.get_iscsi_connection_properties(lun_id, volume, - iqn, address, - port) - if self.configuration.use_chap_auth: - if self._client.features.CHAP_AUTHENTICATION: - chap_username, chap_password = self._configure_chap(iqn) - properties['data']['auth_username'] = chap_username - properties['data']['auth_password'] = chap_password - properties['data']['auth_method'] = 'CHAP' - properties['data']['discovery_auth_username'] = chap_username - properties['data']['discovery_auth_password'] = chap_password - properties['data']['discovery_auth_method'] = 'CHAP' - else: - msg = _("E-series proxy API version %(current_version)s does " - "not support CHAP authentication. The proxy version " - "must be at least %(min_version)s.") - min_version = (self._client.features. - CHAP_AUTHENTICATION.minimum_version) - msg = msg % {'current_version': self._client.api_version, - 'min_version': min_version} - - LOG.info(msg) - raise exception.NetAppDriverException(msg) - return properties - - def _configure_chap(self, target_iqn): - chap_username = self.DEFAULT_CHAP_USER_NAME - chap_password = volume_utils.generate_password() - self._client.set_chap_authentication(target_iqn, - chap_username, - chap_password) - return chap_username, chap_password - - def _get_iscsi_service_details(self): - """Gets iscsi iqn, ip and port information.""" - ports = [] - hw_inventory = self._client.list_hardware_inventory() - iscsi_ports = hw_inventory.get('iscsiPorts') - if iscsi_ports: - for port in iscsi_ports: - if (port.get('ipv4Enabled') and port.get('iqn') and - port.get('ipv4Data') and - port['ipv4Data'].get('ipv4AddressData') and - port['ipv4Data']['ipv4AddressData'] - .get('ipv4Address') and port['ipv4Data'] - ['ipv4AddressData'].get('configState') - == 'configured'): - iscsi_det = {} - iscsi_det['ip'] =\ - port['ipv4Data']['ipv4AddressData']['ipv4Address'] - iscsi_det['iqn'] = port['iqn'] - iscsi_det['tcp_port'] = port.get('tcpListenPort') - iscsi_det['controller'] = port.get('controllerId') - ports.append(iscsi_det) - if not ports: - msg = _('No good iscsi portals found for %s.') - raise exception.NetAppDriverException( - msg % self._client.get_system_id()) - return ports - - def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True): - """Get the iscsi portal info relevant to volume.""" - for portal in portals: - if portal.get('controller') == volume.get('currentManager'): - return portal - if anyController and portals: - return portals[0] - msg = _('No good iscsi portal found in supplied list for %s.') - raise exception.NetAppDriverException( - msg % self._client.get_system_id()) - - def _get_or_create_host(self, port_ids, host_type): - """Fetch or create a host by given port.""" - try: - host = self._get_host_with_matching_port(port_ids) - ht_def = self._get_host_type_definition(host_type) - if host.get('hostTypeIndex') != ht_def.get('index'): - try: - host = self._client.update_host_type( - host['hostRef'], ht_def) - except exception.NetAppDriverException as e: - LOG.warning("Unable to update host type for host with " - "label %(l)s. %(e)s", - {'l': host['label'], 'e': e.msg}) - return host - except exception.NotFound as e: - LOG.warning("Message - %s.", e.msg) - return self._create_host(port_ids, host_type) - - def _get_host_with_matching_port(self, port_ids): - """Gets or creates a host with given port id.""" - # Remove any extra colons - port_ids = [six.text_type(wwpn).replace(':', '') - for wwpn in port_ids] - hosts = self._client.list_hosts() - for port_id in port_ids: - for host in hosts: - if host.get('hostSidePorts'): - ports = host.get('hostSidePorts') - for port in ports: - address = port.get('address').upper().replace(':', '') - if address == port_id.upper(): - return host - msg = _("Host with ports %(ports)s not found.") - raise exception.NotFound(msg % {'ports': port_ids}) - - def _create_host(self, port_ids, host_type, host_group=None): - """Creates host on system with given initiator as port_id.""" - LOG.info("Creating host with ports %s.", port_ids) - host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4()) - host_type = self._get_host_type_definition(host_type) - port_type = self.driver_protocol.lower() - return self._client.create_host_with_ports(host_label, - host_type, - port_ids, - group_id=host_group, - port_type=port_type) - - def _get_host_type_definition(self, host_type): - """Gets supported host type if available on storage system.""" - host_types = self._client.list_host_types() - for ht in host_types: - if ht.get('name', 'unknown').lower() == host_type.lower(): - return ht - raise exception.NotFound(_("Host type %s not supported.") % host_type) - - def terminate_connection_iscsi(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - eseries_vol = self._get_volume(volume['name_id']) - initiator = connector['initiator'] - host = self._get_host_with_matching_port([initiator]) - mappings = eseries_vol.get('listOfMappings', []) - - # There can only be one or zero mappings on a volume in E-Series - mapping = mappings[0] if mappings else None - - if not mapping: - raise eseries_exc.VolumeNotMapped(volume_id=volume['id'], - host=host['label']) - host_mapper.unmap_volume_from_host(self._client, volume, host, mapping) - - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service.""" - if refresh: - if not self._ssc_stats: - self._update_ssc_info() - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Update volume statistics.""" - LOG.debug("Updating volume stats.") - data = dict() - data["volume_backend_name"] = self._backend_name - data["vendor_name"] = "NetApp" - data["driver_version"] = self.VERSION - data["storage_protocol"] = self.driver_protocol - data["pools"] = [] - - for storage_pool in self._get_storage_pools(): - cinder_pool = {} - cinder_pool["pool_name"] = storage_pool.get("label") - cinder_pool["QoS_support"] = False - cinder_pool["reserved_percentage"] = ( - self.configuration.reserved_percentage) - cinder_pool["max_over_subscription_ratio"] = ( - self.configuration.max_over_subscription_ratio) - tot_bytes = int(storage_pool.get("totalRaidedSpace", 0)) - used_bytes = int(storage_pool.get("usedSpace", 0)) - cinder_pool["provisioned_capacity_gb"] = used_bytes / units.Gi - cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) / - units.Gi) - cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi - - pool_ssc_stats = self._ssc_stats.get( - storage_pool["volumeGroupRef"]) - - if pool_ssc_stats: - thin = pool_ssc_stats.get(self.THIN_UQ_SPEC) or False - cinder_pool.update(pool_ssc_stats) - else: - thin = False - cinder_pool["thin_provisioning_support"] = thin - # All E-Series pools support thick provisioning - cinder_pool["thick_provisioning_support"] = True - - data["pools"].append(cinder_pool) - - self._stats = data - self._garbage_collect_tmp_vols() - - def _create_asup(self, cinder_host): - if not self._client.features.AUTOSUPPORT: - LOG.info("E-series proxy API version %s does not support " - "autosupport logging.", self._client.api_version) - return - - event_source = ("Cinder driver %s" % self.DRIVER_NAME) - category = "provisioning" - event_description = "OpenStack Cinder connected to E-Series proxy" - asup_info = self._client.get_asup_info() - model = asup_info.get('model') - firmware_version = asup_info.get('firmware_version') - serial_numbers = asup_info.get('serial_numbers') - chassis_sn = asup_info.get('chassis_sn') - - key = ("openstack-%s-%s-%s" - % (cinder_host, serial_numbers[0], serial_numbers[1])) - - # The counter is being set here to a key-value combination - # comprised of serial numbers and cinder host with a default - # heartbeat of 1. The counter is set to inform the user that the - # key does not have a stale value. - self._client.set_counter("%s-heartbeat" % key, value=1) - data = { - 'computer-name': cinder_host, - 'event-source': event_source, - 'app-version': self._app_version, - 'category': category, - 'event-description': event_description, - 'controller1-serial': serial_numbers[0], - 'controller2-serial': serial_numbers[1], - 'chassis-serial-number': chassis_sn, - 'model': model, - 'system-version': firmware_version, - 'operating-mode': self._client.api_operating_mode - } - self._client.add_autosupport_data(key, data) - - @cinder_utils.synchronized("netapp_update_ssc_info", external=False) - def _update_ssc_info(self): - """Periodically runs to update ssc information from the backend. - - The self._ssc_stats attribute is updated with the following format. - { : {: }} - """ - LOG.info("Updating storage service catalog information for " - "backend '%s'", self._backend_name) - - relevant_pools = self._get_storage_pools() - - if self._client.features.SSC_API_V2: - self._update_ssc_info_v2(relevant_pools) - else: - self._update_ssc_info_v1(relevant_pools) - - def _update_ssc_info_v1(self, relevant_pools): - """Update ssc data using the legacy API - - :param relevant_pools: The pools that this driver cares about - """ - LOG.info("E-series proxy API version %(version)s does not " - "support full set of SSC extra specs. The proxy version" - " must be at at least %(min_version)s.", - {'version': self._client.api_version, - 'min_version': - self._client.features.SSC_API_V2.minimum_version}) - - self._ssc_stats = ( - self._update_ssc_disk_encryption(relevant_pools)) - self._ssc_stats = ( - self._update_ssc_disk_types(relevant_pools)) - self._ssc_stats = ( - self._update_ssc_raid_type(relevant_pools)) - - def _update_ssc_info_v2(self, relevant_pools): - """Update the ssc dictionary with ssc info for relevant pools - - :param relevant_pools: The pools that this driver cares about - """ - ssc_stats = copy.deepcopy(self._ssc_stats) - - storage_pool_labels = [pool['label'] for pool in relevant_pools] - - ssc_data = self._client.list_ssc_storage_pools() - ssc_data = [pool for pool in ssc_data - if pool['name'] in storage_pool_labels] - - for pool in ssc_data: - poolId = pool['poolId'] - if poolId not in ssc_stats: - ssc_stats[poolId] = {} - - pool_ssc_info = ssc_stats[poolId] - - pool_ssc_info['consistencygroup_support'] = True - - pool_ssc_info[self.ENCRYPTION_UQ_SPEC] = ( - six.text_type(pool['encrypted']).lower()) - - pool_ssc_info[self.SPINDLE_SPD_UQ_SPEC] = (pool['spindleSpeed']) - - flash_cache_capable = pool['flashCacheCapable'] - pool_ssc_info[self.FLASH_CACHE_UQ_SPEC] = ( - six.text_type(flash_cache_capable).lower()) - - # Data Assurance is not compatible with some backend types - da_capable = pool['dataAssuranceCapable'] and ( - self._is_data_assurance_supported()) - pool_ssc_info[self.DA_UQ_SPEC] = ( - six.text_type(da_capable).lower()) - - pool_ssc_info[self.RAID_UQ_SPEC] = ( - self.SSC_RAID_TYPE_MAPPING.get(pool['raidLevel'], 'unknown')) - - pool_ssc_info[self.THIN_UQ_SPEC] = ( - six.text_type(pool['thinProvisioningCapable']).lower()) - - if pool['pool'].get("driveMediaType") == 'ssd': - pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = 'SSD' - else: - pool_ssc_info[self.DISK_TYPE_UQ_SPEC] = ( - self.SSC_DISK_TYPE_MAPPING.get( - pool['pool'].get('drivePhysicalType'), 'unknown')) - - self._ssc_stats = ssc_stats - - def _update_ssc_disk_types(self, storage_pools): - """Updates the given ssc dictionary with new disk type information. - - :param storage_pools: The storage pools this driver cares about - """ - ssc_stats = copy.deepcopy(self._ssc_stats) - all_disks = self._client.list_drives() - - pool_ids = set(pool.get("volumeGroupRef") for pool in storage_pools) - - relevant_disks = [x for x in all_disks - if x.get('currentVolumeGroupRef') in pool_ids] - for drive in relevant_disks: - current_vol_group = drive.get('currentVolumeGroupRef') - if current_vol_group not in ssc_stats: - ssc_stats[current_vol_group] = {} - - if drive.get("driveMediaType") == 'ssd': - ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = 'SSD' - else: - disk_type = drive.get('interfaceType').get('driveType') - ssc_stats[current_vol_group][self.DISK_TYPE_UQ_SPEC] = ( - self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')) - - return ssc_stats - - def _update_ssc_disk_encryption(self, storage_pools): - """Updates the given ssc dictionary with new disk encryption information. - - :param storage_pools: The storage pools this driver cares about - """ - ssc_stats = copy.deepcopy(self._ssc_stats) - for pool in storage_pools: - current_vol_group = pool.get('volumeGroupRef') - if current_vol_group not in ssc_stats: - ssc_stats[current_vol_group] = {} - - ssc_stats[current_vol_group][self.ENCRYPTION_UQ_SPEC] = ( - six.text_type(pool['securityType'] == 'enabled').lower() - ) - - return ssc_stats - - def _update_ssc_raid_type(self, storage_pools): - """Updates the given ssc dictionary with new RAID type information. - - :param storage_pools: The storage pools this driver cares about - """ - ssc_stats = copy.deepcopy(self._ssc_stats) - for pool in storage_pools: - current_vol_group = pool.get('volumeGroupRef') - if current_vol_group not in ssc_stats: - ssc_stats[current_vol_group] = {} - - raid_type = pool.get('raidLevel') - ssc_stats[current_vol_group]['netapp_raid_type'] = ( - self.SSC_RAID_TYPE_MAPPING.get(raid_type, 'unknown')) - - return ssc_stats - - def _get_storage_pools(self): - """Retrieve storage pools that match user-configured search pattern.""" - - # Inform deprecation of legacy option. - if self.configuration.safe_get('netapp_storage_pools'): - msg = ("The option 'netapp_storage_pools' is deprecated and " - "will be removed in the future releases. Please use " - "the option 'netapp_pool_name_search_pattern' instead.") - versionutils.report_deprecated_feature(LOG, msg) - - pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) - - storage_pools = self._client.list_storage_pools() - - filtered_pools = [] - for pool in storage_pools: - pool_name = pool['label'] - - if pool_regex.match(pool_name): - msg = ("Pool '%(pool_name)s' matches against regular " - "expression: %(pool_pattern)s") - LOG.debug(msg, {'pool_name': pool_name, - 'pool_pattern': pool_regex.pattern}) - filtered_pools.append(pool) - else: - msg = ("Pool '%(pool_name)s' does not match against regular " - "expression: %(pool_pattern)s") - LOG.debug(msg, {'pool_name': pool_name, - 'pool_pattern': pool_regex.pattern}) - - return filtered_pools - - def _get_sorted_available_storage_pools(self, size_gb): - """Returns storage pools sorted on available capacity.""" - size = size_gb * units.Gi - sorted_pools = sorted(self._get_storage_pools(), key=lambda x: - (int(x.get('totalRaidedSpace', 0)) - - int(x.get('usedSpace', 0))), reverse=True) - avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) - - int(x.get('usedSpace', 0)) >= size)), - sorted_pools) - - if not avl_pools: - LOG.warning("No storage pool found with available capacity %s.", - size_gb) - return avl_pools - - def _is_thin_provisioned(self, volume): - """Determine if a volume is thin provisioned""" - return volume.get('objectType') == 'thinVolume' or volume.get( - 'thinProvisioned', False) - - def _get_pool_operation_progress(self, pool_id, action=None): - """Retrieve the progress of a long running operation on a pool - - The return will be a tuple containing: a bool representing whether - or not the operation is complete, a set of actions that are - currently running on the storage pool, and the estimated time - remaining in minutes. - - An action type may be passed in such that once no actions of that type - remain active on the pool, the operation will be considered - completed. If no action str is passed in, it is assumed that - multiple actions compose the operation, and none are terminal, - so the operation will not be considered completed until there are no - actions remaining to be completed on any volume on the pool. - - :param pool_id: The id of a storage pool - :param action: The anticipated action - :returns: A tuple (bool, set(str), int) - """ - actions = set() - eta = 0 - for progress in self._client.get_pool_operation_progress(pool_id): - actions.add(progress.get('currentAction')) - eta += progress.get('estimatedTimeToCompletion', 0) - if action is not None: - complete = action not in actions - else: - complete = not actions - return complete, actions, eta - - def extend_volume(self, volume, new_size): - """Extend an existing volume to the new size.""" - src_vol = self._get_volume(volume['name_id']) - thin_provisioned = self._is_thin_provisioned(src_vol) - self._client.expand_volume(src_vol['id'], new_size, thin_provisioned) - - # If the volume is thin or defined on a disk pool, there is no need - # to block. - if not (thin_provisioned or src_vol.get('diskPool')): - # Wait for the expansion to start - - def check_progress(): - complete, actions, eta = ( - self._get_pool_operation_progress(src_vol[ - 'volumeGroupRef'], - 'remappingDve')) - if complete: - raise loopingcall.LoopingCallDone() - else: - LOG.info("Waiting for volume expansion of %(vol)s to " - "complete, current remaining actions are " - "%(action)s. ETA: %(eta)s mins.", - {'vol': volume['name_id'], - 'action': ', '.join(actions), 'eta': eta}) - - checker = loopingcall.FixedIntervalLoopingCall( - check_progress) - - checker.start(interval=self.SLEEP_SECS, - initial_delay=self.SLEEP_SECS, - stop_on_exception=True).wait() - - def create_cgsnapshot(self, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - cg_id = cgsnapshot['consistencygroup_id'] - cg_name = utils.convert_uuid_to_es_fmt(cg_id) - - # Retrieve the E-Series consistency group - es_cg = self._get_consistencygroup_by_name(cg_name) - - # Define an E-Series CG Snapshot - es_snaphots = self._client.create_consistency_group_snapshot( - es_cg['id']) - - # Build the snapshot updates - snapshot_updates = list() - for snap in snapshots: - es_vol = self._get_volume(snap['volume']['id']) - for es_snap in es_snaphots: - if es_snap['baseVol'] == es_vol['id']: - snapshot_updates.append({ - 'id': snap['id'], - # Directly track the backend snapshot ID - 'provider_id': es_snap['id'], - 'status': 'available' - }) - - return None, snapshot_updates - - def delete_cgsnapshot(self, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - - cg_id = cgsnapshot['consistencygroup_id'] - cg_name = utils.convert_uuid_to_es_fmt(cg_id) - - # Retrieve the E-Series consistency group - es_cg = self._get_consistencygroup_by_name(cg_name) - - # Find the smallest sequence number defined on the group - min_seq_num = min(es_cg['uniqueSequenceNumber']) - - es_snapshots = self._client.get_consistency_group_snapshots( - es_cg['id']) - es_snap_ids = set(snap.get('provider_id') for snap in snapshots) - - # We need to find a single snapshot that is a part of the CG snap - seq_num = None - for snap in es_snapshots: - if snap['id'] in es_snap_ids: - seq_num = snap['pitSequenceNumber'] - break - - if seq_num is None: - raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_id) - - # Perform a full backend deletion of the cgsnapshot - if int(seq_num) <= int(min_seq_num): - self._client.delete_consistency_group_snapshot( - es_cg['id'], seq_num) - return None, None - else: - # Perform a soft-delete, removing this snapshot from cinder - # management, and marking it as available for deletion. - return cinder_utils.synchronized(cg_id)( - self._soft_delete_cgsnapshot)( - es_cg, seq_num) - - def _soft_delete_cgsnapshot(self, es_cg, snap_seq_num): - """Mark a cgsnapshot as available for deletion from the backend. - - E-Series snapshots cannot be deleted out of order, as older - snapshots in the snapshot group are dependent on the newer - snapshots. A "soft delete" results in the cgsnapshot being removed - from Cinder management, with the snapshot marked as available for - deletion once all snapshots dependent on it are also deleted. - - :param es_cg: E-Series consistency group - :param snap_seq_num: unique sequence number of the cgsnapshot - :return: an update to the snapshot index - """ - - index = self._get_soft_delete_map() - cg_ref = es_cg['id'] - if cg_ref in index: - bitset = na_utils.BitSet(int((index[cg_ref]))) - else: - bitset = na_utils.BitSet(0) - - seq_nums = ( - set([snap['pitSequenceNumber'] for snap in - self._client.get_consistency_group_snapshots(cg_ref)])) - - # Determine the relative index of the snapshot's sequence number - for i, seq_num in enumerate(sorted(seq_nums)): - if snap_seq_num == seq_num: - bitset.set(i) - break - - index_update = ( - self._cleanup_cg_snapshots(cg_ref, seq_nums, bitset)) - - self._merge_soft_delete_changes(index_update, None) - - return None, None - - def _cleanup_cg_snapshots(self, cg_ref, seq_nums, bitset): - """Delete cg snapshot images that are marked for removal - - The snapshot index tracks all snapshots that have been removed from - Cinder, and are therefore available for deletion when this operation - is possible. - - CG snapshots are tracked by unique sequence numbers that are - associated with 1 or more snapshot images. The sequence numbers are - tracked (relative to the 32 images allowed per group), within the - snapshot index. - - This method will purge CG snapshots that have been marked as - available for deletion within the backend persistent store. - - :param cg_ref: reference to an E-Series consistent group - :param seq_nums: set of unique sequence numbers associated with the - consistency group - :param bitset: the bitset representing which sequence numbers are - marked for deletion - :return: update for the snapshot index - """ - deleted = 0 - # Order by their sequence number, from oldest to newest - for i, seq_num in enumerate(sorted(seq_nums)): - if bitset.is_set(i): - self._client.delete_consistency_group_snapshot(cg_ref, - seq_num) - deleted += 1 - else: - # Snapshots must be deleted in order, so if the current - # snapshot is not pending deletion, we don't want to - # process any more - break - - if deleted: - # We need to update the bitset to reflect the fact that older - # snapshots have been deleted, so snapshot relative indexes - # have now been updated. - bitset >>= deleted - - LOG.debug('Deleted %(count)s snapshot images from ' - 'consistency group: %(grp)s.', {'count': deleted, - 'grp': cg_ref}) - # Update the index - return {cg_ref: repr(bitset)} - - def create_consistencygroup(self, cinder_cg): - """Define a consistency group.""" - self._create_consistency_group(cinder_cg) - - return {'status': 'available'} - - def _create_consistency_group(self, cinder_cg): - """Define a new consistency group on the E-Series backend""" - name = utils.convert_uuid_to_es_fmt(cinder_cg['id']) - return self._client.create_consistency_group(name) - - def _get_consistencygroup(self, cinder_cg): - """Retrieve an E-Series consistency group""" - name = utils.convert_uuid_to_es_fmt(cinder_cg['id']) - return self._get_consistencygroup_by_name(name) - - def _get_consistencygroup_by_name(self, name): - """Retrieve an E-Series consistency group by name""" - - for cg in self._client.list_consistency_groups(): - if name == cg['name']: - return cg - - raise exception.ConsistencyGroupNotFound(consistencygroup_id=name) - - def delete_consistencygroup(self, group, volumes): - """Deletes a consistency group.""" - - volume_update = list() - - for volume in volumes: - LOG.info('Deleting volume %s.', volume['id']) - volume_update.append({ - 'status': 'deleted', 'id': volume['id'], - }) - self.delete_volume(volume) - - try: - cg = self._get_consistencygroup(group) - except exception.ConsistencyGroupNotFound: - LOG.warning('Consistency group already deleted.') - else: - self._client.delete_consistency_group(cg['id']) - try: - self._merge_soft_delete_changes(None, [cg['id']]) - except (exception.NetAppDriverException, - eseries_exc.WebServiceException): - LOG.warning('Unable to remove CG from the deletion map.') - - model_update = {'status': 'deleted'} - - return model_update, volume_update - - def _update_consistency_group_members(self, es_cg, - add_volumes, remove_volumes): - """Add or remove consistency group members - - :param es_cg: The E-Series consistency group - :param add_volumes: A list of Cinder volumes to add to the - consistency group - :param remove_volumes: A list of Cinder volumes to remove from the - consistency group - :return: None - """ - for volume in remove_volumes: - es_vol = self._get_volume(volume['id']) - LOG.info( - 'Removing volume %(v)s from consistency group %(''cg)s.', - {'v': es_vol['label'], 'cg': es_cg['label']}) - self._client.remove_consistency_group_member(es_vol['id'], - es_cg['id']) - - for volume in add_volumes: - es_vol = self._get_volume(volume['id']) - LOG.info('Adding volume %(v)s to consistency group %(cg)s.', - {'v': es_vol['label'], 'cg': es_cg['label']}) - self._client.add_consistency_group_member( - es_vol['id'], es_cg['id']) - - def update_consistencygroup(self, group, - add_volumes, remove_volumes): - """Add or remove volumes from an existing consistency group""" - cg = self._get_consistencygroup(group) - - self._update_consistency_group_members( - cg, add_volumes, remove_volumes) - - return None, None, None - - def create_consistencygroup_from_src(self, group, volumes, - cgsnapshot, snapshots, - source_cg, source_vols): - """Define a consistency group based on an existing group - - Define a new consistency group from a source consistency group. If - only a source_cg is provided, then clone each base volume and add - it to a new consistency group. If a cgsnapshot is provided, - clone each snapshot image to a new volume and add it to the cg. - - :param group: The new consistency group to define - :param volumes: The volumes to add to the consistency group - :param cgsnapshot: The cgsnapshot to base the group on - :param snapshots: The list of snapshots on the source cg - :param source_cg: The source consistency group - :param source_vols: The volumes added to the source cg - """ - cg = self._create_consistency_group(group) - if cgsnapshot: - for vol, snap in zip(volumes, snapshots): - image = self._get_snapshot(snap) - self._create_volume_from_snapshot(vol, image) - else: - for vol, src in zip(volumes, source_vols): - es_vol = self._get_volume(src['id']) - es_snapshot = self._create_es_snapshot_for_clone(es_vol) - try: - self._create_volume_from_snapshot(vol, es_snapshot) - finally: - self._delete_es_snapshot(es_snapshot) - - self._update_consistency_group_members(cg, volumes, []) - - return None, None - - def _garbage_collect_tmp_vols(self): - """Removes tmp vols with no snapshots.""" - try: - if not na_utils.set_safe_attr(self, 'clean_job_running', True): - LOG.warning('Returning as clean tmp vol job already running.') - return - - for vol in self._client.list_volumes(): - label = vol['label'] - if (label.startswith('tmp-') and - not self._is_volume_containing_snaps(label)): - try: - self._client.delete_volume(vol['volumeRef']) - except exception.NetAppDriverException as e: - LOG.debug("Error deleting vol with label %(label)s:" - " %(error)s.", {'label': label, 'error': e}) - finally: - na_utils.set_safe_attr(self, 'clean_job_running', False) - - @cinder_utils.synchronized('manage_existing') - def manage_existing(self, volume, existing_ref): - """Brings an existing storage object under Cinder management.""" - vol = self._get_existing_vol_with_manage_ref(existing_ref) - label = utils.convert_uuid_to_es_fmt(volume['id']) - if label == vol['label']: - LOG.info("Volume with given ref %s need not be renamed during" - " manage operation.", existing_ref) - managed_vol = vol - else: - managed_vol = self._client.update_volume(vol['id'], label) - LOG.info("Manage operation completed for volume with new label" - " %(label)s and wwn %(wwn)s.", - {'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]}) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - """ - vol = self._get_existing_vol_with_manage_ref(existing_ref) - return int(math.ceil(float(vol['capacity']) / units.Gi)) - - def _get_existing_vol_with_manage_ref(self, existing_ref): - try: - vol_id = existing_ref.get('source-name') or existing_ref.get( - 'source-id') - if vol_id is None: - raise exception.InvalidInput(message='No valid identifier ' - 'was available for the ' - 'volume.') - return self._client.list_volume(vol_id) - except exception.InvalidInput: - reason = _('Reference must contain either source-name' - ' or source-id element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - except exception.VolumeNotFound: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_('Volume not found on configured storage pools.')) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. Logs a - message to indicate the volume is no longer under Cinder's control. - """ - managed_vol = self._get_volume(volume['id']) - LOG.info("Unmanaged volume with current label %(label)s and wwn " - "%(wwn)s.", {'label': managed_vol['label'], - 'wwn': managed_vol[self.WORLDWIDENAME]}) diff --git a/cinder/volume/drivers/netapp/eseries/utils.py b/cinder/volume/drivers/netapp/eseries/utils.py deleted file mode 100644 index 25c053f3c..000000000 --- a/cinder/volume/drivers/netapp/eseries/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utilities for NetApp E-series drivers. -""" - -import base64 -import binascii -import uuid - -import six - - -MULTI_ATTACH_HOST_GROUP_NAME = 'cinder-multi-attach' -NULL_REF = '0000000000000000000000000000000000000000' -MAX_LUNS_PER_HOST = 256 -MAX_LUNS_PER_HOST_GROUP = 256 - - -def encode_hex_to_base32(hex_string): - """Encodes hex to base32 bit as per RFC4648.""" - bin_form = binascii.unhexlify(hex_string) - return base64.b32encode(bin_form) - - -def decode_base32_to_hex(base32_string): - """Decodes base32 string to hex string.""" - bin_form = base64.b32decode(base32_string) - return binascii.hexlify(bin_form) - - -def convert_uuid_to_es_fmt(uuid_str): - """Converts uuid to e-series compatible name format.""" - uuid_base32 = encode_hex_to_base32(uuid.UUID(six.text_type(uuid_str)).hex) - es_label = uuid_base32.strip(b'=') - if six.PY3: - es_label = es_label.decode('ascii') - return es_label - - -def convert_es_fmt_to_uuid(es_label): - """Converts e-series name format to uuid.""" - if isinstance(es_label, six.text_type): - es_label = es_label.encode('utf-8') - if es_label.startswith(b'tmp-'): - es_label = es_label[4:] - es_label = es_label.ljust(32, b'=') - es_label = binascii.hexlify(base64.b32decode(es_label)) - if six.PY3: - es_label = es_label.decode('ascii') - return uuid.UUID(es_label) diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py deleted file mode 100644 index e1c6d58a0..000000000 --- a/cinder/volume/drivers/netapp/options.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Bob Callaway. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Contains configuration options for NetApp drivers. - -Common place to hold configuration options for all NetApp drivers. -Options need to be grouped into granular units to be able to be reused -by different modules and classes. This does not restrict declaring options in -individual modules. If options are not re usable then can be declared in -individual modules. It is recommended to Keep options at a single -place to ensure re usability and better management of configuration options. -""" - -from oslo_config import cfg -from oslo_config import types - -from cinder.volume import configuration as conf - -NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2 - -netapp_proxy_opts = [ - cfg.StrOpt('netapp_storage_family', - default='ontap_cluster', - choices=['ontap_7mode', 'ontap_cluster', 'eseries'], - help=('The storage family type used on the storage system; ' - 'valid values are ontap_7mode for using Data ONTAP ' - 'operating in 7-Mode, ontap_cluster for using ' - 'clustered Data ONTAP, or eseries for using E-Series.')), - cfg.StrOpt('netapp_storage_protocol', - choices=['iscsi', 'fc', 'nfs'], - help=('The storage protocol to be used on the data path with ' - 'the storage system.')), ] - -netapp_connection_opts = [ - cfg.StrOpt('netapp_server_hostname', - help='The hostname (or IP address) for the storage system or ' - 'proxy server.'), - cfg.IntOpt('netapp_server_port', - help=('The TCP port to use for communication with the storage ' - 'system or proxy server. If not specified, Data ONTAP ' - 'drivers will use 80 for HTTP and 443 for HTTPS; ' - 'E-Series will use 8080 for HTTP and 8443 for HTTPS.')), ] - -netapp_transport_opts = [ - cfg.StrOpt('netapp_transport_type', - default='http', - choices=['http', 'https'], - help=('The transport protocol used when communicating with ' - 'the storage system or proxy server.')), ] - -netapp_basicauth_opts = [ - cfg.StrOpt('netapp_login', - help=('Administrative user account name used to access the ' - 'storage system or proxy server.')), - cfg.StrOpt('netapp_password', - help=('Password for the administrative user account ' - 'specified in the netapp_login option.'), - secret=True), ] - -netapp_provisioning_opts = [ - cfg.FloatOpt('netapp_size_multiplier', - default=NETAPP_SIZE_MULTIPLIER_DEFAULT, - help=('The quantity to be multiplied by the requested ' - 'volume size to ensure enough space is available on ' - 'the virtual storage server (Vserver) to fulfill ' - 'the volume creation request. Note: this option ' - 'is deprecated and will be removed in favor of ' - '"reserved_percentage" in the Mitaka release.')), - cfg.StrOpt('netapp_lun_space_reservation', - default='enabled', - choices=['enabled', 'disabled'], - help=('This option determines if storage space is reserved ' - 'for LUN allocation. If enabled, LUNs are thick ' - 'provisioned. If space reservation is disabled, ' - 'storage space is allocated on demand.')), ] - -netapp_cluster_opts = [ - cfg.StrOpt('netapp_vserver', - help=('This option specifies the virtual storage server ' - '(Vserver) name on the storage cluster on which ' - 'provisioning of block storage volumes should occur.')), ] - -netapp_7mode_opts = [ - cfg.StrOpt('netapp_vfiler', - help=('The vFiler unit on which provisioning of block storage ' - 'volumes will be done. This option is only used by the ' - 'driver when connecting to an instance with a storage ' - 'family of Data ONTAP operating in 7-Mode. Only use this ' - 'option when utilizing the MultiStore feature on the ' - 'NetApp storage system.')), - cfg.StrOpt('netapp_partner_backend_name', - help=('The name of the config.conf stanza for a Data ONTAP ' - '(7-mode) HA partner. This option is only used by the ' - 'driver when connecting to an instance with a storage ' - 'family of Data ONTAP operating in 7-Mode, and it is ' - 'required if the storage protocol selected is FC.')), ] - -netapp_img_cache_opts = [ - cfg.IntOpt('thres_avl_size_perc_start', - default=20, - help=('If the percentage of available space for an NFS share ' - 'has dropped below the value specified by this option, ' - 'the NFS image cache will be cleaned.')), - cfg.IntOpt('thres_avl_size_perc_stop', - default=60, - help=('When the percentage of available space on an NFS share ' - 'has reached the percentage specified by this option, ' - 'the driver will stop clearing files from the NFS image ' - 'cache that have not been accessed in the last M ' - 'minutes, where M is the value of the ' - 'expiry_thres_minutes configuration option.')), - cfg.IntOpt('expiry_thres_minutes', - default=720, - help=('This option specifies the threshold for last access ' - 'time for images in the NFS image cache. When a cache ' - 'cleaning cycle begins, images in the cache that have ' - 'not been accessed in the last M minutes, where M is ' - 'the value of this parameter, will be deleted from the ' - 'cache to create free space on the NFS share.')), ] - -netapp_eseries_opts = [ - cfg.StrOpt('netapp_webservice_path', - default='/devmgr/v2', - help=('This option is used to specify the path to the E-Series ' - 'proxy application on a proxy server. The value is ' - 'combined with the value of the netapp_transport_type, ' - 'netapp_server_hostname, and netapp_server_port options ' - 'to create the URL used by the driver to connect to the ' - 'proxy application.')), - cfg.StrOpt('netapp_controller_ips', - help=('This option is only utilized when the storage family ' - 'is configured to eseries. This option is used to ' - 'restrict provisioning to the specified controllers. ' - 'Specify the value of this option to be a comma ' - 'separated list of controller hostnames or IP addresses ' - 'to be used for provisioning.')), - cfg.StrOpt('netapp_sa_password', - help=('Password for the NetApp E-Series storage array.'), - secret=True), - cfg.BoolOpt('netapp_enable_multiattach', - default=False, - help='This option specifies whether the driver should allow ' - 'operations that require multiple attachments to a ' - 'volume. An example would be live migration of servers ' - 'that have volumes attached. When enabled, this backend ' - 'is limited to 256 total volumes in order to ' - 'guarantee volumes can be accessed by more than one ' - 'host.'), -] -netapp_nfs_extra_opts = [ - cfg.StrOpt('netapp_copyoffload_tool_path', - help=('This option specifies the path of the NetApp copy ' - 'offload tool binary. Ensure that the binary has execute ' - 'permissions set which allow the effective user of the ' - 'cinder-volume process to execute the file.')), ] -netapp_san_opts = [ - cfg.StrOpt('netapp_lun_ostype', - help=('This option defines the type of operating system that' - ' will access a LUN exported from Data ONTAP; it is' - ' assigned to the LUN at the time it is created.')), - cfg.StrOpt('netapp_host_type', - deprecated_name='netapp_eseries_host_type', - help=('This option defines the type of operating system for' - ' all initiators that can access a LUN. This information' - ' is used when mapping LUNs to individual hosts or' - ' groups of hosts.')), - cfg.StrOpt('netapp_pool_name_search_pattern', - deprecated_opts=[cfg.DeprecatedOpt(name='netapp_volume_list'), - cfg.DeprecatedOpt(name='netapp_storage_pools') - ], - default="(.+)", - help=('This option is used to restrict provisioning to the ' - 'specified pools. Specify the value of ' - 'this option to be a regular expression which will be ' - 'applied to the names of objects from the storage ' - 'backend which represent pools in Cinder. This option ' - 'is only utilized when the storage protocol is ' - 'configured to use iSCSI or FC.')), ] - -netapp_replication_opts = [ - cfg.MultiOpt('netapp_replication_aggregate_map', - item_type=types.Dict(), - help="Multi opt of dictionaries to represent the aggregate " - "mapping between source and destination back ends when " - "using whole back end replication. For every " - "source aggregate associated with a cinder pool (NetApp " - "FlexVol), you would need to specify the destination " - "aggregate on the replication target device. A " - "replication target device is configured with the " - "configuration option replication_device. Specify this " - "option as many times as you have replication devices. " - "Each entry takes the standard dict config form: " - "netapp_replication_aggregate_map = " - "backend_id:," - "src_aggr_name1:dest_aggr_name1," - "src_aggr_name2:dest_aggr_name2,..."), - cfg.IntOpt('netapp_snapmirror_quiesce_timeout', - min=0, - default=3600, # One Hour - help='The maximum time in seconds to wait for existing ' - 'SnapMirror transfers to complete before aborting ' - 'during a failover.'), ] - -CONF = cfg.CONF -CONF.register_opts(netapp_proxy_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_connection_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_transport_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_basicauth_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_cluster_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_7mode_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_provisioning_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_img_cache_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_eseries_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_nfs_extra_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_san_opts, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(netapp_replication_opts, group=conf.SHARED_CONF_GROUP) diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py deleted file mode 100644 index 38be5df23..000000000 --- a/cinder/volume/drivers/netapp/utils.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# Copyright (c) 2014 Navneet Singh. All rights reserved. -# Copyright (c) 2014 Clinton Knight. All rights reserved. -# Copyright (c) 2015 Tom Barron. All rights reserved. -# Copyright (c) 2016 Michael Price. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utilities for NetApp drivers. - -This module contains common utilities to be used by one or more -NetApp drivers to achieve the desired functionality. -""" - - -import decimal -import platform -import re -import socket - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder import version -from cinder.volume import qos_specs -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) - - -OPENSTACK_PREFIX = 'openstack-' -OBSOLETE_SSC_SPECS = {'netapp:raid_type': 'netapp_raid_type', - 'netapp:disk_type': 'netapp_disk_type'} -DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored', - 'netapp_nodedup': 'netapp_dedup', - 'netapp_nocompression': 'netapp_compression', - 'netapp_thick_provisioned': 'netapp_thin_provisioned'} -QOS_KEYS = frozenset(['maxIOPS', 'maxIOPSperGiB', 'maxBPS', 'maxBPSperGiB']) -BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) - -# Secret length cannot be less than 96 bits. http://tools.ietf.org/html/rfc3723 -CHAP_SECRET_LENGTH = 16 -DEFAULT_CHAP_USER_NAME = 'NetApp_iSCSI_CHAP_Username' - - -def validate_instantiation(**kwargs): - """Checks if a driver is instantiated other than by the unified driver. - - Helps check direct instantiation of netapp drivers. - Call this function in every netapp block driver constructor. - """ - if kwargs and kwargs.get('netapp_mode') == 'proxy': - return - LOG.warning("It is not the recommended way to use drivers by NetApp. " - "Please use NetAppDriver to achieve the functionality.") - - -def check_flags(required_flags, configuration): - """Ensure that the flags we care about are set.""" - for flag in required_flags: - if not getattr(configuration, flag, None): - msg = _('Configuration value %s is not set.') % flag - raise exception.InvalidInput(reason=msg) - - -def to_bool(val): - """Converts true, yes, y, 1 to True, False otherwise.""" - if val: - strg = six.text_type(val).lower() - if (strg == 'true' or strg == 'y' - or strg == 'yes' or strg == 'enabled' - or strg == '1'): - return True - else: - return False - else: - return False - - -@utils.synchronized("safe_set_attr") -def set_safe_attr(instance, attr, val): - """Sets the attribute in a thread safe manner. - - Returns if new val was set on attribute. - If attr already had the value then False. - """ - - if not instance or not attr: - return False - old_val = getattr(instance, attr, None) - if val is None and old_val is None: - return False - elif val == old_val: - return False - else: - setattr(instance, attr, val) - return True - - -def get_volume_extra_specs(volume): - """Provides extra specs associated with volume.""" - ctxt = context.get_admin_context() - type_id = volume.get('volume_type_id') - if type_id is None: - return {} - volume_type = volume_types.get_volume_type(ctxt, type_id) - if volume_type is None: - return {} - extra_specs = volume_type.get('extra_specs', {}) - log_extra_spec_warnings(extra_specs) - return extra_specs - - -def resolve_hostname(hostname): - """Resolves host name to IP address.""" - res = socket.getaddrinfo(hostname, None)[0] - family, socktype, proto, canonname, sockaddr = res - return sockaddr[0] - - -def round_down(value, precision='0.00'): - return float(decimal.Decimal(six.text_type(value)).quantize( - decimal.Decimal(precision), rounding=decimal.ROUND_DOWN)) - - -def log_extra_spec_warnings(extra_specs): - for spec in (set(extra_specs.keys() if extra_specs else []) & - set(OBSOLETE_SSC_SPECS.keys())): - LOG.warning('Extra spec %(old)s is obsolete. Use %(new)s ' - 'instead.', {'old': spec, - 'new': OBSOLETE_SSC_SPECS[spec]}) - for spec in (set(extra_specs.keys() if extra_specs else []) & - set(DEPRECATED_SSC_SPECS.keys())): - LOG.warning('Extra spec %(old)s is deprecated. Use %(new)s ' - 'instead.', {'old': spec, - 'new': DEPRECATED_SSC_SPECS[spec]}) - - -def get_iscsi_connection_properties(lun_id, volume, iqn, - address, port): - - properties = {} - properties['target_discovered'] = False - properties['target_portal'] = '%s:%s' % (address, port) - properties['target_iqn'] = iqn - properties['target_lun'] = int(lun_id) - properties['volume_id'] = volume['id'] - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - -def validate_qos_spec(qos_spec): - """Check validity of Cinder qos spec for our backend.""" - if qos_spec is None: - return - normalized_qos_keys = [key.lower() for key in QOS_KEYS] - keylist = [] - for key, value in qos_spec.items(): - lower_case_key = key.lower() - if lower_case_key not in normalized_qos_keys: - msg = _('Unrecognized QOS keyword: "%s"') % key - raise exception.Invalid(msg) - keylist.append(lower_case_key) - # Modify the following check when we allow multiple settings in one spec. - if len(keylist) > 1: - msg = _('Only one limit can be set in a QoS spec.') - raise exception.Invalid(msg) - - -def get_volume_type_from_volume(volume): - """Provides volume type associated with volume.""" - type_id = volume.get('volume_type_id') - if type_id is None: - return {} - ctxt = context.get_admin_context() - return volume_types.get_volume_type(ctxt, type_id) - - -def map_qos_spec(qos_spec, volume): - """Map Cinder QOS spec to limit/throughput-value as used in client API.""" - if qos_spec is None: - return None - - qos_spec = map_dict_to_lower(qos_spec) - spec = dict(policy_name=get_qos_policy_group_name(volume), - max_throughput=None) - - # QoS specs are exclusive of one another. - if 'maxiops' in qos_spec: - spec['max_throughput'] = '%siops' % qos_spec['maxiops'] - elif 'maxiopspergib' in qos_spec: - spec['max_throughput'] = '%siops' % six.text_type( - int(qos_spec['maxiopspergib']) * int(volume['size'])) - elif 'maxbps' in qos_spec: - spec['max_throughput'] = '%sB/s' % qos_spec['maxbps'] - elif 'maxbpspergib' in qos_spec: - spec['max_throughput'] = '%sB/s' % six.text_type( - int(qos_spec['maxbpspergib']) * int(volume['size'])) - - return spec - - -def map_dict_to_lower(input_dict): - """Return an equivalent to the input dictionary with lower-case keys.""" - lower_case_dict = {} - for key in input_dict: - lower_case_dict[key.lower()] = input_dict[key] - return lower_case_dict - - -def get_qos_policy_group_name(volume): - """Return the name of backend QOS policy group based on its volume id.""" - if 'id' in volume: - return OPENSTACK_PREFIX + volume['id'] - return None - - -def get_qos_policy_group_name_from_info(qos_policy_group_info): - """Return the name of a QOS policy group given qos policy group info.""" - if qos_policy_group_info is None: - return None - legacy = qos_policy_group_info.get('legacy') - if legacy is not None: - return legacy['policy_name'] - spec = qos_policy_group_info.get('spec') - if spec is not None: - return spec['policy_name'] - return None - - -def get_pool_name_filter_regex(configuration): - """Build the regex for filtering pools by name - - :param configuration: The volume driver configuration - :raise InvalidConfigurationValue: if configured regex pattern is invalid - :returns: A compiled regex for filtering pool names - """ - - # If the configuration parameter is specified as an empty string - # (interpreted as matching all pools), we replace it here with - # (.+) to be explicit with CSV compatibility support implemented below. - pool_patterns = configuration.netapp_pool_name_search_pattern or '(.+)' - - # Strip whitespace from start/end and then 'or' all regex patterns - pool_patterns = '|'.join(['^' + pool_pattern.strip('^$ \t') + '$' for - pool_pattern in pool_patterns.split(',')]) - - try: - return re.compile(pool_patterns) - except re.error: - raise exception.InvalidConfigurationValue( - option='netapp_pool_name_search_pattern', - value=configuration.netapp_pool_name_search_pattern) - - -def get_valid_qos_policy_group_info(volume, extra_specs=None): - """Given a volume, return information for QOS provisioning.""" - info = dict(legacy=None, spec=None) - try: - volume_type = get_volume_type_from_volume(volume) - except KeyError: - LOG.exception('Cannot get QoS spec for volume %s.', volume['id']) - return info - if volume_type is None: - return info - if extra_specs is None: - extra_specs = volume_type.get('extra_specs', {}) - info['legacy'] = get_legacy_qos_policy(extra_specs) - info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, - volume_type) - msg = 'QoS policy group info for volume %(vol)s: %(info)s' - LOG.debug(msg, {'vol': volume['name'], 'info': info}) - check_for_invalid_qos_spec_combination(info, volume_type) - return info - - -def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): - """Given a volume type, return the associated Cinder QoS spec.""" - spec_key_values = get_backend_qos_spec_from_volume_type(volume_type) - if spec_key_values is None: - return None - validate_qos_spec(spec_key_values) - return map_qos_spec(spec_key_values, volume) - - -def get_backend_qos_spec_from_volume_type(volume_type): - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is None: - return None - ctxt = context.get_admin_context() - qos_spec = qos_specs.get_qos_specs(ctxt, qos_specs_id) - if qos_spec is None: - return None - consumer = qos_spec['consumer'] - # Front end QoS specs are handled by libvirt and we ignore them here. - if consumer not in BACKEND_QOS_CONSUMERS: - return None - spec_key_values = qos_spec['specs'] - return spec_key_values - - -def check_for_invalid_qos_spec_combination(info, volume_type): - """Invalidate QOS spec if both legacy and non-legacy info is present.""" - if info['legacy'] and info['spec']: - msg = _('Conflicting QoS specifications in volume type ' - '%s: when QoS spec is associated to volume ' - 'type, legacy "netapp:qos_policy_group" is not allowed in ' - 'the volume type extra specs.') % volume_type['id'] - raise exception.Invalid(msg) - - -def get_legacy_qos_policy(extra_specs): - """Return legacy qos policy information if present in extra specs.""" - external_policy_name = extra_specs.get('netapp:qos_policy_group') - if external_policy_name is None: - return None - return dict(policy_name=external_policy_name) - - -class hashabledict(dict): - """A hashable dictionary that is comparable (i.e. in unit tests, etc.)""" - def __hash__(self): - return hash(tuple(sorted(self.items()))) - - -class OpenStackInfo(object): - """OS/distribution, release, and version. - - NetApp uses these fields as content for EMS log entry. - """ - - PACKAGE_NAME = 'python-cinder' - - def __init__(self): - self._version = 'unknown version' - self._release = 'unknown release' - self._vendor = 'unknown vendor' - self._platform = 'unknown platform' - - def _update_version_from_version_string(self): - try: - self._version = version.version_info.version_string() - except Exception: - pass - - def _update_release_from_release_string(self): - try: - self._release = version.version_info.release_string() - except Exception: - pass - - def _update_platform(self): - try: - self._platform = platform.platform() - except Exception: - pass - - @staticmethod - def _get_version_info_version(): - return version.version_info.version - - @staticmethod - def _get_version_info_release(): - return version.version_info.release - - def _update_info_from_version_info(self): - try: - ver = self._get_version_info_version() - if ver: - self._version = ver - except Exception: - pass - try: - rel = self._get_version_info_release() - if rel: - self._release = rel - except Exception: - pass - - # RDO, RHEL-OSP, Mirantis on Redhat, SUSE - def _update_info_from_rpm(self): - LOG.debug('Trying rpm command.') - try: - out, err = putils.execute("rpm", "-q", "--queryformat", - "'%{version}\t%{release}\t%{vendor}'", - self.PACKAGE_NAME) - if not out: - LOG.info('No rpm info found for %(pkg)s package.', { - 'pkg': self.PACKAGE_NAME}) - return False - parts = out.split() - self._version = parts[0] - self._release = parts[1] - self._vendor = ' '.join(parts[2::]) - return True - except Exception as e: - LOG.info('Could not run rpm command: %(msg)s.', {'msg': e}) - return False - - # ubuntu, mirantis on ubuntu - def _update_info_from_dpkg(self): - LOG.debug('Trying dpkg-query command.') - try: - _vendor = None - out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", - self.PACKAGE_NAME) - if not out: - LOG.info('No dpkg-query info found for %(pkg)s package.', - {'pkg': self.PACKAGE_NAME}) - return False - # debian format: [epoch:]upstream_version[-debian_revision] - deb_version = out - # in case epoch or revision is missing, copy entire string - _release = deb_version - if ':' in deb_version: - deb_epoch, upstream_version = deb_version.split(':') - _release = upstream_version - if '-' in deb_version: - deb_revision = deb_version.split('-')[1] - _vendor = deb_revision - self._release = _release - if _vendor: - self._vendor = _vendor - return True - except Exception as e: - LOG.info('Could not run dpkg-query command: %(msg)s.', { - 'msg': e}) - return False - - def _update_openstack_info(self): - self._update_version_from_version_string() - self._update_release_from_release_string() - self._update_platform() - # some distributions override with more meaningful information - self._update_info_from_version_info() - # see if we have still more targeted info from rpm or apt - found_package = self._update_info_from_rpm() - if not found_package: - self._update_info_from_dpkg() - - def info(self): - self._update_openstack_info() - return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % { - 'version': self._version, 'release': self._release, - 'vendor': self._vendor, 'platform': self._platform} - - -class Features(object): - - def __init__(self): - self.defined_features = set() - - def add_feature(self, name, supported=True, min_version=None): - if not isinstance(supported, bool): - raise TypeError("Feature value must be a bool type.") - self.defined_features.add(name) - setattr(self, name, FeatureState(supported, min_version)) - - def __getattr__(self, name): - # NOTE(cknight): Needed to keep pylint happy. - raise AttributeError - - -class FeatureState(object): - - def __init__(self, supported=True, minimum_version=None): - """Represents the current state of enablement for a Feature - - :param supported: True if supported, false otherwise - :param minimum_version: The minimum version that this feature is - supported at - """ - self.supported = supported - self.minimum_version = minimum_version - - def __nonzero__(self): - """Allow a FeatureState object to be tested for truth value - - :returns: True if the feature is supported, otherwise False - """ - return self.supported - - def __bool__(self): - """py3 Allow a FeatureState object to be tested for truth value - - :returns: True if the feature is supported, otherwise False - """ - return self.supported - - -class BitSet(object): - def __init__(self, value=0): - self._value = value - - def set(self, bit): - self._value |= 1 << bit - return self - - def unset(self, bit): - self._value &= ~(1 << bit) - return self - - def is_set(self, bit): - return self._value & 1 << bit - - def __and__(self, other): - self._value &= other - return self - - def __or__(self, other): - self._value |= other - return self - - def __invert__(self): - self._value = ~self._value - return self - - def __xor__(self, other): - self._value ^= other - return self - - def __lshift__(self, other): - self._value <<= other - return self - - def __rshift__(self, other): - self._value >>= other - return self - - def __int__(self): - return self._value - - def __str__(self): - return bin(self._value) - - def __repr__(self): - return str(self._value) - - def __eq__(self, other): - return (isinstance(other, self.__class__) and self._value == - other._value) or self._value == int(other) - - def __ne__(self, other): - return not self.__eq__(other) diff --git a/cinder/volume/drivers/nexenta/__init__.py b/cinder/volume/drivers/nexenta/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/nexenta/iscsi.py b/cinder/volume/drivers/nexenta/iscsi.py deleted file mode 100644 index 0240f22d2..000000000 --- a/cinder/volume/drivers/nexenta/iscsi.py +++ /dev/null @@ -1,727 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.nexenta import jsonrpc -from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils - -VERSION = '1.3.1' -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class NexentaISCSIDriver(driver.ISCSIDriver): - """Executes volume driver commands on Nexenta Appliance. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver version. - 1.0.1 - Fixed bug #1236626: catch "does not exist" exception of - lu_exists. - 1.1.0 - Changed class name to NexentaISCSIDriver. - 1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy. - 1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs - clone. - 1.1.3 - Extended volume stats provided by _update_volume_stats method. - 1.2.0 - Added volume migration with storage assist method. - 1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location - of migrated volume; after migrating volume migrate_volume - destroy snapshot on migration destination. - 1.3.0 - Added retype method. - 1.3.0.1 - Target creation refactor. - 1.3.1 - Added ZFS cleanup. - """ - - VERSION = VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Nexenta_CI" - - def __init__(self, *args, **kwargs): - super(NexentaISCSIDriver, self).__init__(*args, **kwargs) - self.nms = None - self.targets = {} - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_ISCSI_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - self.configuration.append_config_values( - options.NEXENTA_RRMGR_OPTS) - self.nms_protocol = self.configuration.nexenta_rest_protocol - self.nms_host = self.configuration.nexenta_host - self.nms_port = self.configuration.nexenta_rest_port - self.nms_user = self.configuration.nexenta_user - self.nms_password = self.configuration.nexenta_password - self.volume = self.configuration.nexenta_volume - self.volume_compression = ( - self.configuration.nexenta_dataset_compression) - self.volume_deduplication = self.configuration.nexenta_dataset_dedup - self.volume_description = ( - self.configuration.nexenta_dataset_description) - self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression - self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size - self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections - self.iscsi_target_portal_port = ( - self.configuration.nexenta_iscsi_target_portal_port) - - self._needless_objects = set() - - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - - def do_setup(self, context): - if self.nms_protocol == 'auto': - protocol, auto = 'http', True - else: - protocol, auto = self.nms_protocol, False - self.nms = jsonrpc.NexentaJSONProxy( - protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user, - self.nms_password, auto=auto) - - def check_for_setup_error(self): - """Verify that the volume for our zvols exists. - - :raise: :py:exc:`LookupError` - """ - if not self.nms.volume.object_exists(self.volume): - raise LookupError(_("Volume %s does not exist in Nexenta SA") % - self.volume) - - def _get_zvol_name(self, volume_name): - """Return zvol name that corresponds given volume name.""" - return '%s/%s' % (self.volume, volume_name) - - def _create_target(self, target_idx): - target_name = '%s%s-%i' % ( - self.configuration.nexenta_target_prefix, - self.nms_host, - target_idx - ) - target_group_name = self._get_target_group_name(target_name) - - if not self._target_exists(target_name): - try: - self.nms.iscsitarget.create_target({ - 'target_name': target_name}) - except exception.NexentaException as exc: - if 'already' in exc.args[0]: - LOG.info('Ignored target creation error "%s" while ' - 'ensuring export.', - exc) - else: - raise - if not self._target_group_exists(target_group_name): - try: - self.nms.stmf.create_targetgroup(target_group_name) - except exception.NexentaException as exc: - if ('already' in exc.args[0]): - LOG.info('Ignored target group creation error "%s" ' - 'while ensuring export.', - exc) - else: - raise - if not self._target_member_in_target_group(target_group_name, - target_name): - try: - self.nms.stmf.add_targetgroup_member(target_group_name, - target_name) - except exception.NexentaException as exc: - if ('already' in exc.args[0]): - LOG.info('Ignored target group member addition error ' - '"%s" while ensuring export.', - exc) - else: - raise - - self.targets[target_name] = [] - return target_name - - def _get_target_name(self, volume): - """Return iSCSI target name with least LUs.""" - provider_location = volume.get('provider_location') - target_names = self.targets.keys() - if provider_location: - target_name = provider_location.split(',1 ')[1].split(' ')[0] - if not(self.targets.get(target_name)): - self.targets[target_name] = [] - if not(volume['name'] in self.targets[target_name]): - self.targets[target_name].append(volume['name']) - elif not(target_names): - # create first target and target group - target_name = self._create_target(0) - self.targets[target_name].append(volume['name']) - else: - target_name = target_names[0] - for target in target_names: - if len(self.targets[target]) < len(self.targets[target_name]): - target_name = target - if len(self.targets[target_name]) >= 20: - # create new target and target group - target_name = self._create_target(len(target_names)) - if not(volume['name'] in self.targets[target_name]): - self.targets[target_name].append(volume['name']) - return target_name - - def _get_target_group_name(self, target_name): - """Return Nexenta iSCSI target group name for volume.""" - return target_name.replace( - self.configuration.nexenta_target_prefix, - self.configuration.nexenta_target_group_prefix - ) - - @staticmethod - def _get_clone_snapshot_name(volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume - - @staticmethod - def _is_clone_snapshot_name(snapshot): - """Check if snapshot is created for cloning.""" - name = snapshot.split('@')[-1] - return name.startswith('cinder-clone-snapshot-') - - def create_volume(self, volume): - """Create a zvol on appliance. - - :param volume: volume reference - :return: model update dict for volume reference - """ - self.nms.zvol.create( - self._get_zvol_name(volume['name']), - '%sG' % (volume['size'],), - six.text_type(self.configuration.nexenta_blocksize), - self.configuration.nexenta_sparse) - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: volume reference - :param new_size: volume new size in GB - """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) - self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']), - 'volsize', '%sG' % new_size) - - def delete_volume(self, volume): - """Destroy a zvol on appliance. - - :param volume: volume reference - """ - volume_name = self._get_zvol_name(volume['name']) - try: - props = self.nms.zvol.get_child_props(volume_name, 'origin') or {} - self.nms.zvol.destroy(volume_name, '') - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.info('Volume %s does not exist, it ' - 'seems it was already deleted.', volume_name) - return - if 'zvol has children' in exc.args[0]: - self._mark_as_garbage(volume_name) - LOG.info('Volume %s will be deleted later.', volume_name) - return - raise - origin = props.get('origin') - self._collect_garbage(origin) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: new volume reference - :param src_vref: source volume reference - """ - snapshot = {'volume_name': src_vref['name'], - 'name': self._get_clone_snapshot_name(volume), - 'volume_size': src_vref['size']} - LOG.debug('Creating temp snapshot of the original volume: ' - '%(volume_name)s@%(name)s', snapshot) - # We don't delete this snapshot, because this snapshot will be origin - # of new volume. This snapshot will be automatically promoted by NMS - # when user will delete origin volume. But when cloned volume deleted - # we check its origin property and delete source snapshot if needed. - self.create_snapshot(snapshot) - try: - self.create_volume_from_snapshot(volume, snapshot) - self._mark_as_garbage('@'.join( - (self._get_zvol_name(src_vref['name']), snapshot['name']))) - except exception.NexentaException: - with excutils.save_and_reraise_exception(): - LOG.exception( - 'Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s', snapshot) - try: - self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s', snapshot) - raise - - def _get_zfs_send_recv_cmd(self, src, dst): - """Returns rrmgr command for source and destination.""" - return utils.get_rrmgr_cmd(src, dst, - compression=self.rrmgr_compression, - tcp_buf_size=self.rrmgr_tcp_buf_size, - connections=self.rrmgr_connections) - - @staticmethod - def get_nms_for_url(url): - """Returns initialized nms object for url.""" - auto, scheme, user, password, host, port, path = ( - utils.parse_nms_url(url)) - return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, - password, auto=auto) - - def migrate_volume(self, ctxt, volume, host): - """Migrate if volume and host are managed by Nexenta appliance. - - :param ctxt: context - :param volume: a dictionary describing the volume to migrate - :param host: a dictionary describing the host to migrate to - """ - LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', - {'id': volume['id'], 'host': host}) - false_ret = (False, None) - - if volume['status'] not in ('available', 'retyping'): - return false_ret - - if 'capabilities' not in host: - return false_ret - - capabilities = host['capabilities'] - - if ('location_info' not in capabilities or - 'iscsi_target_portal_port' not in capabilities or - 'nms_url' not in capabilities): - return false_ret - - nms_url = capabilities['nms_url'] - dst_parts = capabilities['location_info'].split(':') - - if (capabilities.get('vendor_name') != 'Nexenta' or - dst_parts[0] != self.__class__.__name__ or - capabilities['free_capacity_gb'] < volume['size']): - return false_ret - - dst_host, dst_volume = dst_parts[1:] - - ssh_bound = False - ssh_bindings = self.nms.appliance.ssh_list_bindings() - for bind in ssh_bindings: - if dst_host.startswith(ssh_bindings[bind][3]): - ssh_bound = True - break - if not ssh_bound: - LOG.warning("Remote NexentaStor appliance at %s should be " - "SSH-bound.", dst_host) - - # Create temporary snapshot of volume on NexentaStor Appliance. - snapshot = { - 'volume_name': volume['name'], - 'name': utils.get_migrate_snapshot_name(volume) - } - self.create_snapshot(snapshot) - - src = '%(volume)s/%(zvol)s@%(snapshot)s' % { - 'volume': self.volume, - 'zvol': volume['name'], - 'snapshot': snapshot['name'] - } - dst = ':'.join([dst_host, dst_volume]) - - try: - self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) - except exception.NexentaException as exc: - LOG.warning("Cannot send source snapshot %(src)s to " - "destination %(dst)s. Reason: %(exc)s", - {'src': src, 'dst': dst, 'exc': exc}) - return false_ret - finally: - try: - self.delete_snapshot(snapshot) - except exception.NexentaException as exc: - LOG.warning("Cannot delete temporary source snapshot " - "%(src)s on NexentaStor Appliance: %(exc)s", - {'src': src, 'exc': exc}) - try: - self.delete_volume(volume) - except exception.NexentaException as exc: - LOG.warning("Cannot delete source volume %(volume)s on " - "NexentaStor Appliance: %(exc)s", - {'volume': volume['name'], 'exc': exc}) - - dst_nms = self.get_nms_for_url(nms_url) - dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'], - snapshot['name']) - try: - dst_nms.snapshot.destroy(dst_snapshot, '') - except exception.NexentaException as exc: - LOG.warning("Cannot delete temporary destination snapshot " - "%(dst)s on NexentaStor Appliance: %(exc)s", - {'dst': dst_snapshot, 'exc': exc}) - return True, None - - def retype(self, context, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - :param context: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('Retype volume request %(vol)s to be %(type)s ' - '(host: %(host)s), diff %(diff)s.', - {'vol': volume['name'], - 'type': new_type, - 'host': host, - 'diff': diff}) - - options = dict( - compression='compression', - dedup='dedup', - description='nms:description' - ) - - retyped = False - migrated = False - - capabilities = host['capabilities'] - src_backend = self.__class__.__name__ - dst_backend = capabilities['location_info'].split(':')[0] - if src_backend != dst_backend: - LOG.warning('Cannot retype from %(src_backend)s to ' - '%(dst_backend)s.', - {'src_backend': src_backend, - 'dst_backend': dst_backend}) - return False - - hosts = (volume['host'], host['host']) - old, new = hosts - if old != new: - migrated, provider_location = self.migrate_volume( - context, volume, host) - - if not migrated: - nms = self.nms - else: - nms_url = capabilities['nms_url'] - nms = self.get_nms_for_url(nms_url) - - zvol = '%s/%s' % ( - capabilities['location_info'].split(':')[-1], volume['name']) - - for opt in options: - old, new = diff.get('extra_specs').get(opt, (False, False)) - if old != new: - LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', - {'opt': opt, 'old': old, 'new': new}) - try: - nms.zvol.set_child_prop( - zvol, options[opt], new) - retyped = True - except exception.NexentaException: - LOG.error('Error trying to change %(opt)s' - ' from %(old)s to %(new)s', - {'opt': opt, 'old': old, 'new': new}) - return False, None - return retyped or migrated, None - - def create_snapshot(self, snapshot): - """Create snapshot of existing zvol on appliance. - - :param snapshot: snapshot reference - """ - self.nms.zvol.create_snapshot( - self._get_zvol_name(snapshot['volume_name']), - snapshot['name'], '') - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other's snapshot on appliance. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - self.nms.zvol.clone( - '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), - snapshot['name']), - self._get_zvol_name(volume['name'])) - if (('size' in volume) and ( - volume['size'] > snapshot['volume_size'])): - self.extend_volume(volume, volume['size']) - - def delete_snapshot(self, snapshot): - """Delete volume's snapshot on appliance. - - :param snapshot: snapshot reference - """ - volume_name = self._get_zvol_name(snapshot['volume_name']) - snapshot_name = '%s@%s' % (volume_name, snapshot['name']) - try: - self.nms.snapshot.destroy(snapshot_name, '') - except exception.NexentaException as exc: - if "does not exist" in exc.args[0]: - LOG.info('Snapshot %s does not exist, it seems it was ' - 'already deleted.', snapshot_name) - return - elif "snapshot has dependent clones" in exc.args[0]: - self._mark_as_garbage(snapshot_name) - LOG.info('Snapshot %s has dependent clones, will be ' - 'deleted later.', snapshot_name) - return - raise - self._collect_garbage(volume_name) - - def local_path(self, volume): - """Return local path to existing local volume. - - We never have local volumes, so it raises NotImplementedError. - - :raise: :py:exc:`NotImplementedError` - """ - raise NotImplementedError - - def _target_exists(self, target): - """Check if iSCSI target exist. - - :param target: target name - :return: True if target exist, else False - """ - targets = self.nms.stmf.list_targets() - if not targets: - return False - return (target in self.nms.stmf.list_targets()) - - def _target_group_exists(self, target_group): - """Check if target group exist. - - :param target_group: target group - :return: True if target group exist, else False - """ - groups = self.nms.stmf.list_targetgroups() - if not groups: - return False - return target_group in groups - - def _target_member_in_target_group(self, target_group, target_member): - """Check if target member in target group. - - :param target_group: target group - :param target_member: target member - :return: True if target member in target group, else False - :raises NexentaException: if target group doesn't exist - """ - members = self.nms.stmf.list_targetgroup_members(target_group) - if not members: - return False - return target_member in members - - def _lu_exists(self, zvol_name): - """Check if LU exists on appliance. - - :param zvol_name: Zvol name - :raises NexentaException: if zvol not exists - :return: True if LU exists, else False - """ - try: - return bool(self.nms.scsidisk.lu_exists(zvol_name)) - except exception.NexentaException as exc: - if 'does not exist' not in exc.args[0]: - raise - return False - - def _is_lu_shared(self, zvol_name): - """Check if LU exists on appliance and shared. - - :param zvol_name: Zvol name - :raises NexentaException: if Zvol not exist - :return: True if LU exists and shared, else False - """ - try: - shared = self.nms.scsidisk.lu_shared(zvol_name) > 0 - except exception.NexentaException as exc: - if 'does not exist for zvol' not in exc.args[0]: - raise # Zvol does not exists - shared = False # LU does not exist - return shared - - def create_export(self, _ctx, volume, connector): - """Create new export for zvol. - - :param volume: reference of volume to be exported - :return: iscsiadm-formatted provider location string - """ - model_update = self._do_export(_ctx, volume) - return model_update - - def ensure_export(self, _ctx, volume): - self._do_export(_ctx, volume) - - def _do_export(self, _ctx, volume): - """Recreate parts of export if necessary. - - :param volume: reference of volume to be exported - """ - zvol_name = self._get_zvol_name(volume['name']) - target_name = self._get_target_name(volume) - target_group_name = self._get_target_group_name(target_name) - - entry = None - if not self._lu_exists(zvol_name): - try: - entry = self.nms.scsidisk.create_lu(zvol_name, {}) - except exception.NexentaException as exc: - if 'in use' not in exc.args[0]: - raise - LOG.info('Ignored LU creation error "%s" while ensuring ' - 'export.', exc) - if not self._is_lu_shared(zvol_name): - try: - entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { - 'target_group': target_group_name}) - except exception.NexentaException as exc: - if 'view entry exists' not in exc.args[0]: - raise - LOG.info('Ignored LUN mapping entry addition error "%s" ' - 'while ensuring export.', exc) - model_update = {} - if entry: - provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.nms_host, - 'port': self.configuration.nexenta_iscsi_target_portal_port, - 'name': target_name, - 'lun': entry['lun'], - } - model_update = {'provider_location': provider_location} - return model_update - - def remove_export(self, _ctx, volume): - """Destroy all resources created to export zvol. - - :param volume: reference of volume to be unexported - """ - target_name = self._get_target_name(volume) - self.targets[target_name].remove(volume['name']) - zvol_name = self._get_zvol_name(volume['name']) - self.nms.scsidisk.delete_lu(zvol_name) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - - stats = self.nms.volume.get_child_props( - self.configuration.nexenta_volume, 'health|size|used|available') - - total_amount = utils.str2gib_size(stats['size']) - free_amount = utils.str2gib_size(stats['available']) - - location_info = '%(driver)s:%(host)s:%(volume)s' % { - 'driver': self.__class__.__name__, - 'host': self.nms_host, - 'volume': self.volume - } - self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.volume_deduplication, - 'compression': self.volume_compression, - 'description': self.volume_description, - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': total_amount, - 'free_capacity_gb': free_amount, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'volume_backend_name': self.backend_name, - 'location_info': location_info, - 'iscsi_target_portal_port': self.iscsi_target_portal_port, - 'nms_url': self.nms.url - } - - def _collect_garbage(self, zfs_object): - """Destroys ZFS parent objects - - Recursively destroys ZFS parent volumes and snapshots if they are - marked as garbage - - :param zfs_object: full path to a volume or a snapshot - """ - if zfs_object and zfs_object in self._needless_objects: - sp = zfs_object.split('/') - path = '/'.join(sp[:-1]) - name = sp[-1] - if '@' in name: # it's a snapshot: - volume, snap = name.split('@') - parent = '/'.join((path, volume)) - try: - self.nms.snapshot.destroy(zfs_object, '') - except exception.NexentaException as exc: - LOG.debug('Error occurred while trying to delete a ' - 'snapshot: %s', exc) - return - else: - try: - props = self.nms.zvol.get_child_props( - zfs_object, 'origin') or {} - except exception.NexentaException: - props = {} - parent = (props['origin'] if 'origin' in props and - props['origin'] else '') - try: - self.nms.zvol.destroy(zfs_object, '') - except exception.NexentaException as exc: - LOG.debug('Error occurred while trying to delete a ' - 'volume: %s', exc) - return - self._needless_objects.remove(zfs_object) - self._collect_garbage(parent) - - def _mark_as_garbage(self, zfs_object): - """Puts ZFS object into list for further removal - - :param zfs_object: full path to a volume or a snapshot - """ - self._needless_objects.add(zfs_object) diff --git a/cinder/volume/drivers/nexenta/jsonrpc.py b/cinder/volume/drivers/nexenta/jsonrpc.py deleted file mode 100644 index 161249a52..000000000 --- a/cinder/volume/drivers/nexenta/jsonrpc.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -import requests - -from cinder import exception -from cinder.utils import retry - -LOG = logging.getLogger(__name__) -TIMEOUT = 60 - - -class NexentaJSONProxy(object): - - retry_exc_tuple = (requests.exceptions.ConnectionError,) - - def __init__(self, scheme, host, port, path, user, password, auto=False, - obj=None, method=None, session=None): - if session: - self.session = session - else: - self.session = requests.Session() - self.session.auth = (user, password) - self.session.headers.update({'Content-Type': 'application/json'}) - self.scheme = scheme.lower() - self.host = host - self.port = port - self.path = path - self.user = user - self.password = password - self.auto = auto - self.obj = obj - self.method = method - - def __getattr__(self, name): - if not self.obj: - obj, method = name, None - elif not self.method: - obj, method = self.obj, name - else: - obj, method = '%s.%s' % (self.obj, self.method), name - return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, - self.user, self.password, self.auto, obj, - method, self.session) - - @property - def url(self): - return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) - - def __hash__(self): - return self.url.__hash__() - - def __repr__(self): - return 'NMS proxy: %s' % self.url - - @retry(retry_exc_tuple, retries=6) - def __call__(self, *args): - data = jsonutils.dumps({ - 'object': self.obj, - 'method': self.method, - 'params': args - }) - - LOG.debug('Sending JSON data: %s', data) - r = self.session.post(self.url, data=data, timeout=TIMEOUT) - response = r.json() - - LOG.debug('Got response: %s', response) - if response.get('error') is not None: - message = response['error'].get('message', '') - raise exception.NexentaException(message) - return response.get('result') diff --git a/cinder/volume/drivers/nexenta/nfs.py b/cinder/volume/drivers/nexenta/nfs.py deleted file mode 100644 index 598a3dafe..000000000 --- a/cinder/volume/drivers/nexenta/nfs.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib -import os -import re -import six - -from eventlet import greenthread -from oslo_log import log as logging -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume.drivers.nexenta import jsonrpc -from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils -from cinder.volume.drivers import nfs - -VERSION = '1.3.1' -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921 - """Executes volume driver commands on Nexenta Appliance. - - Version history: - - .. code-block:: none - - 1.0.0 - Initial driver version. - 1.1.0 - Auto sharing for enclosing folder. - 1.1.1 - Added caching for NexentaStor appliance 'volroot' value. - 1.1.2 - Ignore "folder does not exist" error in delete_volume and - delete_snapshot method. - 1.1.3 - Redefined volume_backend_name attribute inherited from - RemoteFsDriver. - 1.2.0 - Added migrate and retype methods. - 1.3.0 - Extend volume method. - 1.3.1 - Cache capacity info and check shared folders on setup. - """ - - driver_prefix = 'nexenta' - volume_backend_name = 'NexentaNfsDriver' - VERSION = VERSION - VOLUME_FILE_NAME = 'volume' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Nexenta_CI" - - def __init__(self, *args, **kwargs): - super(NexentaNfsDriver, self).__init__(*args, **kwargs) - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_NFS_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - self.configuration.append_config_values( - options.NEXENTA_RRMGR_OPTS) - - self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot - self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression - self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size - self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections - self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base - self.volume_compression = ( - self.configuration.nexenta_dataset_compression) - self.volume_deduplication = self.configuration.nexenta_dataset_dedup - self.volume_description = ( - self.configuration.nexenta_dataset_description) - self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes - self._nms2volroot = {} - self.share2nms = {} - self.nfs_versions = {} - self.shares_with_capacities = {} - - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - - def do_setup(self, context): - shares_config = getattr(self.configuration, self.driver_prefix + - '_shares_config') - if shares_config: - self.configuration.nfs_shares_config = shares_config - super(NexentaNfsDriver, self).do_setup(context) - self._load_shares_config(shares_config) - self._mount_subfolders() - - def check_for_setup_error(self): - """Verify that the volume for our folder exists. - - :raise: :py:exc:`LookupError` - """ - if self.share2nms: - for nfs_share in self.share2nms: - nms = self.share2nms[nfs_share] - volume_name, dataset = self._get_share_datasets(nfs_share) - if not nms.volume.object_exists(volume_name): - raise LookupError(_("Volume %s does not exist in Nexenta " - "Store appliance"), volume_name) - folder = '%s/%s' % (volume_name, dataset) - if not nms.folder.object_exists(folder): - raise LookupError(_("Folder %s does not exist in Nexenta " - "Store appliance"), folder) - if (folder not in nms.netstorsvc.get_shared_folders( - 'svc:/network/nfs/server:default', '')): - self._share_folder(nms, volume_name, dataset) - self._get_capacity_info(nfs_share) - - def migrate_volume(self, ctxt, volume, host): - """Migrate if volume and host are managed by Nexenta appliance. - - :param ctxt: context - :param volume: a dictionary describing the volume to migrate - :param host: a dictionary describing the host to migrate to - """ - LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', - {'id': volume['id'], 'host': host}) - - false_ret = (False, None) - - if volume['status'] not in ('available', 'retyping'): - LOG.warning("Volume status must be 'available' or 'retyping'." - " Current volume status: %s", volume['status']) - return false_ret - - if 'capabilities' not in host: - LOG.warning("Unsupported host. No capabilities found") - return false_ret - - capabilities = host['capabilities'] - ns_shares = capabilities['ns_shares'] - dst_parts = capabilities['location_info'].split(':') - dst_host, dst_volume = dst_parts[1:] - - if (capabilities.get('vendor_name') != 'Nexenta' or - dst_parts[0] != self.__class__.__name__ or - capabilities['free_capacity_gb'] < volume['size']): - return false_ret - - nms = self.share2nms[volume['provider_location']] - ssh_bindings = nms.appliance.ssh_list_bindings() - shares = [] - for bind in ssh_bindings: - for share in ns_shares: - if (share.startswith(ssh_bindings[bind][3]) and - ns_shares[share] >= volume['size']): - shares.append(share) - if len(shares) == 0: - LOG.warning("Remote NexentaStor appliance at %s should be " - "SSH-bound.", share) - return false_ret - share = sorted(shares, key=ns_shares.get, reverse=True)[0] - snapshot = { - 'volume_name': volume['name'], - 'volume_id': volume['id'], - 'name': utils.get_migrate_snapshot_name(volume) - } - self.create_snapshot(snapshot) - location = volume['provider_location'] - src = '%(share)s/%(volume)s@%(snapshot)s' % { - 'share': location.split(':')[1].split('volumes/')[1], - 'volume': volume['name'], - 'snapshot': snapshot['name'] - } - dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]]) - try: - nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) - except exception.NexentaException as exc: - LOG.warning("Cannot send source snapshot %(src)s to " - "destination %(dst)s. Reason: %(exc)s", - {'src': src, 'dst': dst, 'exc': exc}) - return false_ret - finally: - try: - self.delete_snapshot(snapshot) - except exception.NexentaException as exc: - LOG.warning("Cannot delete temporary source snapshot " - "%(src)s on NexentaStor Appliance: %(exc)s", - {'src': src, 'exc': exc}) - try: - self.delete_volume(volume) - except exception.NexentaException as exc: - LOG.warning("Cannot delete source volume %(volume)s on " - "NexentaStor Appliance: %(exc)s", - {'volume': volume['name'], 'exc': exc}) - - dst_nms = self._get_nms_for_url(capabilities['nms_url']) - dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1], - volume['name'], snapshot['name']) - try: - dst_nms.snapshot.destroy(dst_snapshot, '') - except exception.NexentaException as exc: - LOG.warning("Cannot delete temporary destination snapshot " - "%(dst)s on NexentaStor Appliance: %(exc)s", - {'dst': dst_snapshot, 'exc': exc}) - return True, {'provider_location': share} - - def _get_zfs_send_recv_cmd(self, src, dst): - """Returns rrmgr command for source and destination.""" - return utils.get_rrmgr_cmd(src, dst, - compression=self.rrmgr_compression, - tcp_buf_size=self.rrmgr_tcp_buf_size, - connections=self.rrmgr_connections) - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - export = '%s/%s' % (volume['provider_location'], volume['name']) - data = {'export': export, 'name': 'volume'} - if volume['provider_location'] in self.shares: - data['options'] = self.shares[volume['provider_location']] - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data - } - - def retype(self, context, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - :param context: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities. - """ - LOG.debug('Retype volume request %(vol)s to be %(type)s ' - '(host: %(host)s), diff %(diff)s.', - {'vol': volume['name'], - 'type': new_type, - 'host': host, - 'diff': diff}) - - options = dict( - compression='compression', - dedup='dedup', - description='nms:description' - ) - - retyped = False - migrated = False - model_update = None - - src_backend = self.__class__.__name__ - dst_backend = host['capabilities']['location_info'].split(':')[0] - if src_backend != dst_backend: - LOG.warning('Cannot retype from %(src_backend)s to ' - '%(dst_backend)s.', - {'src_backend': src_backend, - 'dst_backend': dst_backend}) - return False - - hosts = (volume['host'], host['host']) - old, new = hosts - if old != new: - migrated, provider_location = self.migrate_volume( - context, volume, host) - - if not migrated: - provider_location = volume['provider_location'] - nms = self.share2nms[provider_location] - else: - nms_url = host['capabilities']['nms_url'] - nms = self._get_nms_for_url(nms_url) - model_update = provider_location - provider_location = provider_location['provider_location'] - - share = provider_location.split(':')[1].split('volumes/')[1] - folder = '%(share)s/%(volume)s' % { - 'share': share, - 'volume': volume['name'] - } - - for opt in options: - old, new = diff.get('extra_specs').get(opt, (False, False)) - if old != new: - LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', - {'opt': opt, 'old': old, 'new': new}) - try: - nms.folder.set_child_prop( - folder, options[opt], new) - retyped = True - except exception.NexentaException: - LOG.error('Error trying to change %(opt)s' - ' from %(old)s to %(new)s', - {'opt': opt, 'old': old, 'new': new}) - return False, None - return retyped or migrated, model_update - - def _do_create_volume(self, volume): - nfs_share = volume['provider_location'] - nms = self.share2nms[nfs_share] - - vol, dataset = self._get_share_datasets(nfs_share) - folder = '%s/%s' % (dataset, volume['name']) - LOG.debug('Creating folder on Nexenta Store %s', folder) - nms.folder.create_with_props( - vol, folder, - {'compression': self.configuration.nexenta_dataset_compression} - ) - - volume_path = self.remote_path(volume) - volume_size = volume['size'] - try: - self._share_folder(nms, vol, folder) - - if getattr(self.configuration, - self.driver_prefix + '_sparsed_volumes'): - self._create_sparsed_file(nms, volume_path, volume_size) - else: - folder_path = '%s/%s' % (vol, folder) - compression = nms.folder.get_child_prop( - folder_path, 'compression') - if compression != 'off': - # Disable compression, because otherwise will not use space - # on disk. - nms.folder.set_child_prop( - folder_path, 'compression', 'off') - try: - self._create_regular_file(nms, volume_path, volume_size) - finally: - if compression != 'off': - # Backup default compression value if it was changed. - nms.folder.set_child_prop( - folder_path, 'compression', compression) - - self._set_rw_permissions_for_all(nms, volume_path) - - if self._get_nfs_server_version(nfs_share) < 4: - sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, - volume) - self._ensure_share_mounted(sub_share, mnt_path) - self._get_capacity_info(nfs_share) - except exception.NexentaException: - try: - nms.folder.destroy('%s/%s' % (vol, folder)) - except exception.NexentaException: - LOG.warning("Cannot destroy created folder: " - "%(vol)s/%(folder)s", - {'vol': vol, 'folder': folder}) - raise - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other's snapshot on appliance. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - self._ensure_shares_mounted() - - snapshot_vol = self._get_snapshot_volume(snapshot) - nfs_share = snapshot_vol['provider_location'] - volume['provider_location'] = nfs_share - nms = self.share2nms[nfs_share] - - vol, dataset = self._get_share_datasets(nfs_share) - snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'], - snapshot['name']) - folder = '%s/%s' % (dataset, volume['name']) - nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder)) - - try: - self._share_folder(nms, vol, folder) - except exception.NexentaException: - try: - nms.folder.destroy('%s/%s' % (vol, folder), '') - except exception.NexentaException: - LOG.warning("Cannot destroy cloned folder: " - "%(vol)s/%(folder)s", - {'vol': vol, 'folder': folder}) - raise - - if self._get_nfs_server_version(nfs_share) < 4: - sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, - volume) - self._ensure_share_mounted(sub_share, mnt_path) - - if (('size' in volume) and ( - volume['size'] > snapshot['volume_size'])): - self.extend_volume(volume, volume['size']) - - return {'provider_location': volume['provider_location']} - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: new volume reference - :param src_vref: source volume reference - """ - LOG.info('Creating clone of volume: %s', src_vref['id']) - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._get_clone_snapshot_name(volume)} - # We don't delete this snapshot, because this snapshot will be origin - # of new volume. This snapshot will be automatically promoted by NMS - # when user will delete its origin. - self.create_snapshot(snapshot) - try: - return self.create_volume_from_snapshot(volume, snapshot) - except exception.NexentaException: - LOG.error('Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s', snapshot) - try: - self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s', snapshot) - raise - - def delete_volume(self, volume): - """Deletes a logical volume. - - :param volume: volume reference - """ - nfs_share = volume.get('provider_location') - if nfs_share: - nms = self.share2nms[nfs_share] - vol, parent_folder = self._get_share_datasets(nfs_share) - folder = '%s/%s/%s' % (vol, parent_folder, volume['name']) - mount_path = self.remote_path(volume).strip( - '/%s' % self.VOLUME_FILE_NAME) - if mount_path in self._remotefsclient._read_mounts(): - self._execute('umount', mount_path, run_as_root=True) - try: - props = nms.folder.get_child_props(folder, 'origin') or {} - nms.folder.destroy(folder, '-r') - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.info('Folder %s does not exist, it was ' - 'already deleted.', folder) - return - raise - self._get_capacity_info(nfs_share) - origin = props.get('origin') - if origin and self._is_clone_snapshot_name(origin): - try: - nms.snapshot.destroy(origin, '') - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.info('Snapshot %s does not exist, it was ' - 'already deleted.', origin) - return - raise - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: volume reference - :param new_size: volume new size in GB - """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) - nfs_share = volume['provider_location'] - nms = self.share2nms[nfs_share] - volume_path = self.remote_path(volume) - if getattr(self.configuration, - self.driver_prefix + '_sparsed_volumes'): - self._create_sparsed_file(nms, volume_path, new_size) - else: - block_size_mb = 1 - block_count = ((new_size - volume['size']) * units.Gi / - (block_size_mb * units.Mi)) - - nms.appliance.execute( - 'dd if=/dev/zero seek=%(seek)d of=%(path)s' - ' bs=%(bs)dM count=%(count)d' % { - 'seek': volume['size'] * units.Gi / block_size_mb, - 'path': volume_path, - 'bs': block_size_mb, - 'count': block_count - } - ) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: snapshot reference - """ - volume = self._get_snapshot_volume(snapshot) - nfs_share = volume['provider_location'] - nms = self.share2nms[nfs_share] - vol, dataset = self._get_share_datasets(nfs_share) - folder = '%s/%s/%s' % (vol, dataset, volume['name']) - nms.folder.create_snapshot(folder, snapshot['name'], '-r') - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: snapshot reference - """ - volume = self._get_snapshot_volume(snapshot) - nfs_share = volume['provider_location'] - nms = self.share2nms[nfs_share] - vol, dataset = self._get_share_datasets(nfs_share) - folder = '%s/%s/%s' % (vol, dataset, volume['name']) - try: - nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.info('Snapshot %(folder)s@%(snapshot)s does not ' - 'exist, it was already deleted.', - {'folder': folder, - 'snapshot': snapshot}) - return - elif 'has dependent clones' in exc.args[0]: - LOG.info('Snapshot %(folder)s@%(snapshot)s has dependent ' - 'clones, it will be deleted later.', - {'folder': folder, - 'snapshot': snapshot}) - return - - def _create_sparsed_file(self, nms, path, size): - """Creates file with 0 disk usage. - - :param nms: nms object - :param path: path to new file - :param size: size of file - """ - nms.appliance.execute( - 'truncate --size %(size)dG %(path)s' % { - 'path': path, - 'size': size - } - ) - - def _create_regular_file(self, nms, path, size): - """Creates regular file of given size. - - Takes a lot of time for large files. - - :param nms: nms object - :param path: path to new file - :param size: size of file - """ - block_size_mb = 1 - block_count = size * units.Gi / (block_size_mb * units.Mi) - - LOG.info('Creating regular file: %s.' - 'This may take some time.', path) - - nms.appliance.execute( - 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % { - 'path': path, - 'bs': block_size_mb, - 'count': block_count - } - ) - - LOG.info('Regular file: %s created.', path) - - def _set_rw_permissions_for_all(self, nms, path): - """Sets 666 permissions for the path. - - :param nms: nms object - :param path: path to file - """ - nms.appliance.execute('chmod ugo+rw %s' % path) - - def local_path(self, volume): - """Get volume path (mounted locally fs path) for given volume. - - :param volume: volume reference - """ - nfs_share = volume['provider_location'] - return os.path.join(self._get_mount_point_for_share(nfs_share), - volume['name'], 'volume') - - def _get_mount_point_for_share(self, nfs_share): - """Returns path to mount point NFS share. - - :param nfs_share: example 172.18.194.100:/var/nfs - """ - nfs_share = nfs_share.encode('utf-8') - return os.path.join(self.configuration.nexenta_mount_point_base, - hashlib.md5(nfs_share).hexdigest()) - - def remote_path(self, volume): - """Get volume path (mounted remotely fs path) for given volume. - - :param volume: volume reference - """ - nfs_share = volume['provider_location'] - share = nfs_share.split(':')[1].rstrip('/') - return '%s/%s/volume' % (share, volume['name']) - - def _share_folder(self, nms, volume, folder): - """Share NFS folder on NexentaStor Appliance. - - :param nms: nms object - :param volume: volume name - :param folder: folder name - """ - path = '%s/%s' % (volume, folder.lstrip('/')) - share_opts = { - 'read_write': '*', - 'read_only': '', - 'root': 'nobody', - 'extra_options': 'anon=0', - 'recursive': 'true', - 'anonymous_rw': 'true', - } - LOG.debug('Sharing folder %s on Nexenta Store', folder) - nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path, - share_opts) - - def _load_shares_config(self, share_file): - self.shares = {} - self.share2nms = {} - - for share in self._read_config_file(share_file): - # A configuration line may be either: - # host:/share_name http://user:pass@host:[port]/ - # or - # host:/share_name http://user:pass@host:[port]/ - # -o options=123,rw --other - if not share.strip(): - continue - if share.startswith('#'): - continue - - share_info = re.split(r'\s+', share, 2) - - share_address = share_info[0].strip() - nms_url = share_info[1].strip() - share_opts = share_info[2].strip() if len(share_info) > 2 else None - - if not re.match(r'.+:/.+', share_address): - LOG.warning("Share %s ignored due to invalid format. " - "Must be of form address:/export.", - share_address) - continue - - self.shares[share_address] = share_opts - self.share2nms[share_address] = self._get_nms_for_url(nms_url) - - LOG.debug('Shares loaded: %s', self.shares) - - def _get_subshare_mount_point(self, nfs_share, volume): - mnt_path = '%s/%s' % ( - self._get_mount_point_for_share(nfs_share), volume['name']) - sub_share = '%s/%s' % (nfs_share, volume['name']) - return sub_share, mnt_path - - def _ensure_share_mounted(self, nfs_share, mount_path=None): - """Ensure that NFS share is mounted on the host. - - Unlike the parent method this one accepts mount_path as an optional - parameter and uses it as a mount point if provided. - - :param nfs_share: NFS share name - :param mount_path: mount path on the host - """ - mnt_flags = [] - if self.shares.get(nfs_share) is not None: - mnt_flags = self.shares[nfs_share].split() - num_attempts = max(1, self.configuration.nfs_mount_attempts) - for attempt in range(num_attempts): - try: - if mount_path is None: - self._remotefsclient.mount(nfs_share, mnt_flags) - else: - if mount_path in self._remotefsclient._read_mounts(): - LOG.info('Already mounted: %s', mount_path) - return - - self._execute('mkdir', '-p', mount_path, - check_exit_code=False) - self._remotefsclient._mount_nfs(nfs_share, mount_path, - mnt_flags) - return - except Exception as e: - if attempt == (num_attempts - 1): - LOG.error('Mount failure for %(share)s after ' - '%(count)d attempts.', - {'share': nfs_share, - 'count': num_attempts}) - raise exception.NfsException(six.text_type(e)) - LOG.warning( - 'Mount attempt %(attempt)d failed: %(error)s. ' - 'Retrying mount ...', - {'attempt': attempt, 'error': e}) - greenthread.sleep(1) - - def _mount_subfolders(self): - ctxt = context.get_admin_context() - vol_entries = self.db.volume_get_all_by_host(ctxt, self.host) - for vol in vol_entries: - nfs_share = vol['provider_location'] - if ((nfs_share in self.shares) and - (self._get_nfs_server_version(nfs_share) < 4)): - sub_share, mnt_path = self._get_subshare_mount_point( - nfs_share, vol) - self._ensure_share_mounted(sub_share, mnt_path) - - def _get_nfs_server_version(self, share): - if not self.nfs_versions.get(share): - nms = self.share2nms[share] - nfs_opts = nms.netsvc.get_confopts( - 'svc:/network/nfs/server:default', 'configure') - try: - self.nfs_versions[share] = int( - nfs_opts['nfs_server_versmax']['current']) - except KeyError: - self.nfs_versions[share] = int( - nfs_opts['server_versmax']['current']) - return self.nfs_versions[share] - - def _get_capacity_info(self, nfs_share): - """Calculate available space on the NFS share. - - :param nfs_share: example 172.18.194.100:/var/nfs - """ - nms = self.share2nms[nfs_share] - ns_volume, ns_folder = self._get_share_datasets(nfs_share) - folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume, - ns_folder), - 'used|available') - free = utils.str2size(folder_props['available']) - allocated = utils.str2size(folder_props['used']) - self.shares_with_capacities[nfs_share] = { - 'free': utils.str2gib_size(free), - 'total': utils.str2gib_size(free + allocated)} - return free + allocated, free, allocated - - def _get_nms_for_url(self, url): - """Returns initialized nms object for url.""" - auto, scheme, user, password, host, port, path = ( - utils.parse_nms_url(url)) - return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, - password, auto=auto) - - def _get_snapshot_volume(self, snapshot): - ctxt = context.get_admin_context() - return db.volume_get(ctxt, snapshot['volume_id']) - - def _get_volroot(self, nms): - """Returns volroot property value from NexentaStor appliance.""" - if not self.nms_cache_volroot: - return nms.server.get_prop('volroot') - if nms not in self._nms2volroot: - self._nms2volroot[nms] = nms.server.get_prop('volroot') - return self._nms2volroot[nms] - - def _get_share_datasets(self, nfs_share): - nms = self.share2nms[nfs_share] - volroot = self._get_volroot(nms) - path = nfs_share.split(':')[1][len(volroot):].strip('/') - volume_name = path.split('/')[0] - folder_name = '/'.join(path.split('/')[1:]) - return volume_name, folder_name - - def _get_clone_snapshot_name(self, volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume - - def _is_clone_snapshot_name(self, snapshot): - """Check if snapshot is created for cloning.""" - name = snapshot.split('@')[-1] - return name.startswith('cinder-clone-snapshot-') - - def _update_volume_stats(self): - """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - total_space = 0 - free_space = 0 - share = None - for _share in self._mounted_shares: - if self.shares_with_capacities[_share]['free'] > free_space: - free_space = self.shares_with_capacities[_share]['free'] - total_space = self.shares_with_capacities[_share]['total'] - share = _share - - location_info = '%(driver)s:%(share)s' % { - 'driver': self.__class__.__name__, - 'share': share - } - nms_url = self.share2nms[share].url - self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.volume_deduplication, - 'compression': self.volume_compression, - 'description': self.volume_description, - 'nms_url': nms_url, - 'ns_shares': self.shares_with_capacities, - 'driver_version': self.VERSION, - 'storage_protocol': 'NFS', - 'total_capacity_gb': total_space, - 'free_capacity_gb': free_space, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'location_info': location_info, - 'volume_backend_name': self.backend_name, - 'nfs_mount_point_base': self.nfs_mount_point_base - } diff --git a/cinder/volume/drivers/nexenta/ns5/__init__.py b/cinder/volume/drivers/nexenta/ns5/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/nexenta/ns5/iscsi.py b/cinder/volume/drivers/nexenta/ns5/iscsi.py deleted file mode 100644 index dc8ebfa94..000000000 --- a/cinder/volume/drivers/nexenta/ns5/iscsi.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_log import log as logging -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.nexenta.ns5 import jsonrpc -from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils - -VERSION = '1.1.0' -LOG = logging.getLogger(__name__) -TARGET_GROUP_PREFIX = 'cinder-tg-' - - -@interface.volumedriver -class NexentaISCSIDriver(driver.ISCSIDriver): - """Executes volume driver commands on Nexenta Appliance. - - Version history: - 1.0.0 - Initial driver version. - 1.1.0 - Added HTTPS support. - Added use of sessions for REST calls. - """ - - VERSION = VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Nexenta_CI" - - def __init__(self, *args, **kwargs): - super(NexentaISCSIDriver, self).__init__(*args, **kwargs) - self.nef = None - # mapping of targets and groups. Groups are the keys - self.targets = {} - # list of volumes in target group. Groups are the keys - self.volumes = {} - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_ISCSI_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - self.configuration.append_config_values( - options.NEXENTA_RRMGR_OPTS) - self.use_https = self.configuration.nexenta_use_https - self.nef_host = self.configuration.nexenta_host - self.nef_port = self.configuration.nexenta_rest_port - self.nef_user = self.configuration.nexenta_user - self.nef_password = self.configuration.nexenta_password - self.storage_pool = self.configuration.nexenta_volume - self.volume_group = self.configuration.nexenta_volume_group - self.dataset_compression = ( - self.configuration.nexenta_dataset_compression) - self.dataset_deduplication = self.configuration.nexenta_dataset_dedup - self.dataset_description = ( - self.configuration.nexenta_dataset_description) - self.iscsi_target_portal_port = ( - self.configuration.nexenta_iscsi_target_portal_port) - - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - - def do_setup(self, context): - self.nef = jsonrpc.NexentaJSONProxy( - self.nef_host, self.nef_port, self.nef_user, - self.nef_password, self.use_https) - url = 'storage/pools/%s/volumeGroups' % self.storage_pool - data = { - 'name': self.volume_group, - 'volumeBlockSize': ( - self.configuration.nexenta_ns5_blocksize * units.Ki) - } - try: - self.nef.post(url, data) - except exception.NexentaException as e: - if 'EEXIST' in e.args[0]: - LOG.debug('volumeGroup already exists, skipping') - else: - raise - - self._fetch_volumes() - - def _fetch_volumes(self): - url = 'san/iscsi/targets?fields=alias,name&limit=50000' - for target in self.nef.get(url)['data']: - tg_name = target['alias'] - if tg_name.startswith(TARGET_GROUP_PREFIX): - self.targets[tg_name] = target['name'] - self._fill_volumes(tg_name) - - def check_for_setup_error(self): - """Verify that the zfs volumes exist. - - :raise: :py:exc:`LookupError` - """ - url = 'storage/pools/%(pool)s/volumeGroups/%(group)s' % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } - try: - self.nef.get(url) - except exception.NexentaException: - raise LookupError(_( - "Dataset group %s not found at Nexenta SA"), '/'.join( - [self.storage_pool, self.volume_group])) - services = self.nef.get('services') - for service in services['data']: - if service['name'] == 'iscsit': - if service['state'] != 'online': - raise exception.NexentaException( - 'iSCSI service is not running on NS appliance') - break - - def _get_volume_path(self, volume): - """Return zfs volume name that corresponds given volume name.""" - return '%s/%s/%s' % (self.storage_pool, self.volume_group, - volume['name']) - - @staticmethod - def _get_clone_snapshot_name(volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume - - def create_volume(self, volume): - """Create a zfs volume on appliance. - - :param volume: volume reference - :return: model update dict for volume reference - """ - url = 'storage/pools/%(pool)s/volumeGroups/%(group)s/volumes' % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } - data = { - 'name': volume['name'], - 'volumeSize': volume['size'] * units.Gi, - 'volumeBlockSize': ( - self.configuration.nexenta_ns5_blocksize * units.Ki), - 'sparseVolume': self.configuration.nexenta_sparse - } - self.nef.post(url, data) - - def delete_volume(self, volume): - """Destroy a zfs volume on appliance. - - :param volume: volume reference - """ - - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s' - '/volumes/%(name)s') % { - 'pool': self.storage_pool, - 'group': self.volume_group, - 'name': volume['name'] - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - # We assume that volume is gone - LOG.warning('Got error trying to delete volume %(volume)s,' - ' assuming it is already gone: %(exc)s', - {'volume': volume, 'exc': exc}) - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: volume reference - :param new_size: volume new size in GB - """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) - pool, group, name = self._get_volume_path(volume).split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(name)s') % { - 'pool': pool, - 'group': group, - 'name': name - } - self.nef.put(url, {'volumeSize': new_size * units.Gi}) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: snapshot reference - """ - snapshot_vol = self._get_snapshot_volume(snapshot) - LOG.info('Creating snapshot %(snap)s of volume %(vol)s', { - 'snap': snapshot['name'], - 'vol': snapshot_vol['name'] - }) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, volume = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots') % { - 'pool': pool, - 'group': group, - 'volume': snapshot_vol['name'] - } - self.nef.post(url, {'name': snapshot['name']}) - - def delete_snapshot(self, snapshot): - """Delete volume's snapshot on appliance. - - :param snapshot: snapshot reference - """ - LOG.info('Deleting snapshot: %s', snapshot['name']) - snapshot_vol = self._get_snapshot_volume(snapshot) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, volume = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots/%(snapshot)s') % { - 'pool': pool, - 'group': group, - 'volume': volume, - 'snapshot': snapshot['name'] - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'EBUSY' in exc.args[0]: - LOG.warning( - 'Could not delete snapshot %s - it has dependencies', - snapshot['name']) - else: - LOG.warning(exc) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other's snapshot on appliance. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - LOG.info('Creating volume from snapshot: %s', snapshot['name']) - snapshot_vol = self._get_snapshot_volume(snapshot) - volume_path = self._get_volume_path(snapshot_vol) - pool, group, snapshot_vol = volume_path.split('/') - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s/' - 'volumes/%(volume)s/snapshots/%(snapshot)s/clone') % { - 'pool': pool, - 'group': group, - 'volume': snapshot_vol, - 'snapshot': snapshot['name'] - } - self.nef.post(url, {'targetPath': self._get_volume_path(volume)}) - if (('size' in volume) and ( - volume['size'] > snapshot['volume_size'])): - self.extend_volume(volume, volume['size']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: new volume reference - :param src_vref: source volume reference - """ - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._get_clone_snapshot_name(volume)} - LOG.debug('Creating temp snapshot of the original volume: ' - '%s@%s', snapshot['volume_name'], snapshot['name']) - self.create_snapshot(snapshot) - try: - self.create_volume_from_snapshot(volume, snapshot) - except exception.NexentaException: - LOG.error('Volume creation failed, deleting created snapshot %s', - '@'.join([snapshot['volume_name'], snapshot['name']])) - try: - self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot %s', - '@'.join([snapshot['volume_name'], - snapshot['name']])) - raise - - def _get_snapshot_volume(self, snapshot): - ctxt = context.get_admin_context() - return db.volume_get(ctxt, snapshot['volume_id']) - - def _do_export(self, _ctx, volume): - """Do all steps to get zfs volume exported at separate target. - - :param volume: reference of volume to be exported - """ - volume_path = self._get_volume_path(volume) - - # Find out whether the volume is exported - vol_map_url = 'san/lunMappings?volume=%s&fields=lun' % ( - volume_path.replace('/', '%2F')) - data = self.nef.get(vol_map_url).get('data') - if data: - model_update = {} - else: - # Choose the best target group among existing ones - tg_name = None - for tg in self.volumes.keys(): - if len(self.volumes[tg]) < 20: - tg_name = tg - break - if tg_name: - target_name = self.targets[tg_name] - else: - tg_name = TARGET_GROUP_PREFIX + uuid.uuid4().hex - - # Create new target - url = 'san/iscsi/targets' - data = { - "portals": [ - {"address": self.nef_host} - ], - 'alias': tg_name - } - self.nef.post(url, data) - - # Get the name of just created target - data = self.nef.get( - '%(url)s?fields=name&alias=%(tg_name)s' % { - 'url': url, - 'tg_name': tg_name - })['data'] - target_name = data[0]['name'] - - self._create_target_group(tg_name, target_name) - - self.targets[tg_name] = target_name - self.volumes[tg_name] = set() - - # Export the volume - url = 'san/lunMappings' - data = { - "hostGroup": "all", - "targetGroup": tg_name, - 'volume': volume_path - } - try: - self.nef.post(url, data) - self.volumes[tg_name].add(volume_path) - except exception.NexentaException as e: - if 'No such target group' in e.args[0]: - self._create_target_group(tg_name, target_name) - self._fill_volumes(tg_name) - self.nef.post(url, data) - else: - raise - - # Get LUN of just created volume - data = self.nef.get(vol_map_url).get('data') - lun = data[0]['lun'] - - provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { - 'host': self.nef_host, - 'port': self.configuration.nexenta_iscsi_target_portal_port, - 'name': target_name, - 'lun': lun, - } - model_update = {'provider_location': provider_location} - return model_update - - def create_export(self, _ctx, volume, connector): - """Create new export for zfs volume. - - :param volume: reference of volume to be exported - :return: iscsiadm-formatted provider location string - """ - model_update = self._do_export(_ctx, volume) - return model_update - - def ensure_export(self, _ctx, volume): - """Recreate parts of export if necessary. - - :param volume: reference of volume to be exported - """ - self._do_export(_ctx, volume) - - def remove_export(self, _ctx, volume): - """Destroy all resources created to export zfs volume. - - :param volume: reference of volume to be unexported - """ - volume_path = self._get_volume_path(volume) - - # Get ID of a LUN mapping if the volume is exported - url = 'san/lunMappings?volume=%s&fields=id' % ( - volume_path.replace('/', '%2F')) - data = self.nef.get(url)['data'] - if data: - url = 'san/lunMappings/%s' % data[0]['id'] - self.nef.delete(url) - else: - LOG.debug('LU already deleted from appliance') - - for tg in self.volumes: - if volume_path in self.volumes[tg]: - self.volumes[tg].remove(volume_path) - break - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - - url = ('storage/pools/%(pool)s/volumeGroups/%(group)s' - '?fields=bytesAvailable,bytesUsed') % { - 'pool': self.storage_pool, - 'group': self.volume_group, - } - stats = self.nef.get(url) - total_amount = utils.str2gib_size(stats['bytesAvailable']) - free_amount = utils.str2gib_size( - stats['bytesAvailable'] - stats['bytesUsed']) - - location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { - 'driver': self.__class__.__name__, - 'host': self.nef_host, - 'pool': self.storage_pool, - 'group': self.volume_group, - } - self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.dataset_deduplication, - 'compression': self.dataset_compression, - 'description': self.dataset_description, - 'driver_version': self.VERSION, - 'storage_protocol': 'iSCSI', - 'total_capacity_gb': total_amount, - 'free_capacity_gb': free_amount, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'volume_backend_name': self.backend_name, - 'location_info': location_info, - 'iscsi_target_portal_port': self.iscsi_target_portal_port, - 'nef_url': self.nef.url - } - - def _fill_volumes(self, tg_name): - url = ('san/lunMappings?targetGroup=%s&fields=volume' - '&limit=50000' % tg_name) - self.volumes[tg_name] = { - mapping['volume'] for mapping in self.nef.get(url)['data']} - - def _create_target_group(self, tg_name, target_name): - # Create new target group - url = 'san/targetgroups' - data = { - 'name': tg_name, - 'members': [target_name] - } - self.nef.post(url, data) diff --git a/cinder/volume/drivers/nexenta/ns5/jsonrpc.py b/cinder/volume/drivers/nexenta/ns5/jsonrpc.py deleted file mode 100644 index 8fba11fed..000000000 --- a/cinder/volume/drivers/nexenta/ns5/jsonrpc.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests -import time - -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from cinder import exception -from cinder.i18n import _ -from cinder.utils import retry - -LOG = logging.getLogger(__name__) -TIMEOUT = 60 - - -def check_error(response): - code = response.status_code - if code not in (200, 201, 202): - reason = response.reason - body = response.content - try: - content = jsonutils.loads(body) if body else None - except ValueError: - raise exception.VolumeBackendAPIException( - data=_( - 'Could not parse response: %(code)s %(reason)s ' - '%(content)s') % { - 'code': code, 'reason': reason, 'content': body}) - if content and 'code' in content: - raise exception.NexentaException(content) - raise exception.VolumeBackendAPIException( - data=_( - 'Got bad response: %(code)s %(reason)s %(content)s') % { - 'code': code, 'reason': reason, 'content': content}) - - -class RESTCaller(object): - - retry_exc_tuple = ( - requests.exceptions.ConnectionError, - requests.exceptions.ConnectTimeout - ) - - def __init__(self, proxy, method): - self.__proxy = proxy - self.__method = method - - def get_full_url(self, path): - return '/'.join((self.__proxy.url, path)) - - @retry(retry_exc_tuple, interval=1, retries=6) - def __call__(self, *args): - url = self.get_full_url(args[0]) - kwargs = {'timeout': TIMEOUT, 'verify': False} - data = None - if len(args) > 1: - data = args[1] - kwargs['json'] = data - - LOG.debug('Sending JSON data: %s, method: %s, data: %s', - url, self.__method, data) - - response = getattr(self.__proxy.session, self.__method)(url, **kwargs) - check_error(response) - content = (jsonutils.loads(response.content) - if response.content else None) - LOG.debug("Got response: %(code)s %(reason)s %(content)s", { - 'code': response.status_code, - 'reason': response.reason, - 'content': content}) - - if response.status_code == 202 and content: - url = self.get_full_url(content['links'][0]['href']) - keep_going = True - while keep_going: - time.sleep(1) - response = self.__proxy.session.get(url, verify=False) - check_error(response) - LOG.debug("Got response: %(code)s %(reason)s", { - 'code': response.status_code, - 'reason': response.reason}) - content = response.json() if response.content else None - keep_going = response.status_code == 202 - return content - - -class HTTPSAuth(requests.auth.AuthBase): - - def __init__(self, url, username, password): - self.url = url - self.username = username - self.password = password - self.token = None - - def __eq__(self, other): - return all([ - self.url == getattr(other, 'url', None), - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None), - self.token == getattr(other, 'token', None) - ]) - - def __ne__(self, other): - return not self == other - - def handle_401(self, r, **kwargs): - if r.status_code == 401: - LOG.debug('Got 401. Trying to reauth...') - self.token = self.https_auth() - # Consume content and release the original connection - # to allow our new request to reuse the same one. - r.content - r.close() - prep = r.request.copy() - requests.cookies.extract_cookies_to_jar( - prep._cookies, r.request, r.raw) - prep.prepare_cookies(prep._cookies) - - prep.headers['Authorization'] = 'Bearer %s' % self.token - _r = r.connection.send(prep, **kwargs) - _r.history.append(r) - _r.request = prep - - return _r - return r - - def __call__(self, r): - if not self.token: - self.token = self.https_auth() - r.headers['Authorization'] = 'Bearer %s' % self.token - r.register_hook('response', self.handle_401) - return r - - def https_auth(self): - LOG.debug('Sending auth request...') - url = '/'.join((self.url, 'auth/login')) - headers = {'Content-Type': 'application/json'} - data = {'username': self.username, 'password': self.password} - response = requests.post(url, json=data, verify=False, - headers=headers, timeout=TIMEOUT) - check_error(response) - response.close() - if response.content: - content = jsonutils.loads(response.content) - token = content['token'] - del content['token'] - LOG.debug("Got response: %(code)s %(reason)s %(content)s", { - 'code': response.status_code, - 'reason': response.reason, - 'content': content}) - return token - raise exception.VolumeBackendAPIException( - data=_( - 'Got bad response: %(code)s %(reason)s') % { - 'code': response.status_code, 'reason': response.reason}) - - -class NexentaJSONProxy(object): - - def __init__(self, host, port, user, password, use_https): - self.session = requests.Session() - self.session.headers.update({'Content-Type': 'application/json'}) - self.host = host - if use_https: - self.scheme = 'https' - self.port = port if port else 8443 - self.session.auth = HTTPSAuth(self.url, user, password) - else: - self.scheme = 'http' - self.port = port if port else 8080 - self.session.auth = (user, password) - - @property - def url(self): - return '%(scheme)s://%(host)s:%(port)s' % { - 'scheme': self.scheme, - 'host': self.host, - 'port': self.port} - - def __getattr__(self, name): - if name in ('get', 'post', 'put', 'delete'): - return RESTCaller(self, name) - return super(NexentaJSONProxy, self).__getattribute__(name) - - def __repr__(self): - return 'HTTP JSON proxy: %s' % self.url diff --git a/cinder/volume/drivers/nexenta/ns5/nfs.py b/cinder/volume/drivers/nexenta/ns5/nfs.py deleted file mode 100644 index d5d40f3ef..000000000 --- a/cinder/volume/drivers/nexenta/ns5/nfs.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib -import os - -from oslo_log import log as logging -from oslo_utils import units - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume.drivers.nexenta.ns5 import jsonrpc -from cinder.volume.drivers.nexenta import options -from cinder.volume.drivers.nexenta import utils -from cinder.volume.drivers import nfs - -VERSION = '1.2.0' -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class NexentaNfsDriver(nfs.NfsDriver): - """Executes volume driver commands on Nexenta Appliance. - - Version history: - 1.0.0 - Initial driver version. - 1.1.0 - Added HTTPS support. - Added use of sessions for REST calls. - 1.2.0 - Support for extend volume. - Support for extending the volume in - create_volume_from_snapshot if the size of new volume is larger - than original volume size. - """ - - driver_prefix = 'nexenta' - volume_backend_name = 'NexentaNfsDriver' - VERSION = VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Nexenta_CI" - - def __init__(self, *args, **kwargs): - super(NexentaNfsDriver, self).__init__(*args, **kwargs) - if self.configuration: - self.configuration.append_config_values( - options.NEXENTA_CONNECTION_OPTS) - self.configuration.append_config_values( - options.NEXENTA_NFS_OPTS) - self.configuration.append_config_values( - options.NEXENTA_DATASET_OPTS) - - self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base - self.dataset_compression = ( - self.configuration.nexenta_dataset_compression) - self.dataset_deduplication = self.configuration.nexenta_dataset_dedup - self.dataset_description = ( - self.configuration.nexenta_dataset_description) - self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes - self.nef = None - self.use_https = self.configuration.nexenta_use_https - self.nef_host = self.configuration.nas_host - self.share = self.configuration.nas_share_path - self.nef_port = self.configuration.nexenta_rest_port - self.nef_user = self.configuration.nexenta_user - self.nef_password = self.configuration.nexenta_password - - @property - def backend_name(self): - backend_name = None - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - return backend_name - - def do_setup(self, context): - self.nef = jsonrpc.NexentaJSONProxy( - self.nef_host, self.nef_port, self.nef_user, - self.nef_password, self.use_https) - - def check_for_setup_error(self): - """Verify that the volume for our folder exists. - - :raise: :py:exc:`LookupError` - """ - pool_name, fs = self._get_share_datasets(self.share) - url = 'storage/pools/%s' % pool_name - self.nef.get(url) - url = 'storage/pools/%s/filesystems/%s' % ( - pool_name, self._escape_path(fs)) - self.nef.get(url) - - shared = False - response = self.nef.get('nas/nfs') - for share in response['data']: - if share.get('filesystem') == self.share: - shared = True - break - if not shared: - raise LookupError(_("Dataset %s is not shared in Nexenta " - "Store appliance") % self.share) - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - data = {'export': volume['provider_location'], 'name': 'volume'} - if volume['provider_location'] in self.shares: - data['options'] = self.shares[volume['provider_location']] - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data - } - - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume reference - :returns: provider_location update dict for database - """ - self._do_create_volume(volume) - return {'provider_location': volume['provider_location']} - - def _do_create_volume(self, volume): - pool, fs = self._get_share_datasets(self.share) - filesystem = '%s/%s/%s' % (pool, fs, volume['name']) - LOG.debug('Creating filesystem on NexentaStor %s', filesystem) - url = 'storage/pools/%s/filesystems' % pool - data = { - 'name': '/'.join([fs, volume['name']]), - 'compressionMode': self.dataset_compression, - 'dedupMode': self.dataset_deduplication, - } - self.nef.post(url, data) - volume['provider_location'] = '%s:/%s/%s' % ( - self.nef_host, self.share, volume['name']) - try: - self._share_folder(fs, volume['name']) - self._ensure_share_mounted('%s:/%s/%s' % ( - self.nef_host, self.share, volume['name'])) - - volume_size = volume['size'] - if getattr(self.configuration, - self.driver_prefix + '_sparsed_volumes'): - self._create_sparsed_file(self.local_path(volume), volume_size) - else: - url = 'storage/pools/%s/filesystems/%s' % ( - pool, '%2F'.join([self._escape_path(fs), volume['name']])) - compression = self.nef.get(url).get('compressionMode') - if compression != 'off': - # Disable compression, because otherwise will not use space - # on disk. - self.nef.put(url, {'compressionMode': 'off'}) - try: - self._create_regular_file( - self.local_path(volume), volume_size) - finally: - if compression != 'off': - # Backup default compression value if it was changed. - self.nef.put(url, {'compressionMode': compression}) - - except exception.NexentaException: - try: - url = 'storage/pools/%s/filesystems/%s' % ( - pool, '%2F'.join([self._escape_path(fs), volume['name']])) - self.nef.delete(url) - except exception.NexentaException: - LOG.warning("Cannot destroy created folder: " - "%(vol)s/%(folder)s", - {'vol': pool, 'folder': '/'.join( - [fs, volume['name']])}) - raise - - def delete_volume(self, volume): - """Deletes a logical volume. - - :param volume: volume reference - """ - pool, fs_ = self._get_share_datasets(self.share) - fs = self._escape_path(fs_) - url = ('storage/pools/%(pool)s/filesystems/%(fs)s') % { - 'pool': pool, - 'fs': '%2F'.join([fs, volume['name']]) - } - origin = self.nef.get(url).get('originalSnapshot') - url = ('storage/pools/%(pool)s/filesystems/' - '%(fs)s?snapshots=true') % { - 'pool': pool, - 'fs': '%2F'.join([fs, volume['name']]) - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'Failed to destroy snapshot' in exc.args[0]: - LOG.debug('Snapshot has dependent clones, skipping') - else: - raise - try: - if origin and self._is_clone_snapshot_name(origin): - path, snap = origin.split('@') - pool, fs = path.split('/', 1) - snap_url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s') % { - 'pool': pool, - 'fs': fs, - 'snap': snap - } - self.nef.delete(snap_url) - except exception.NexentaException as exc: - if 'does not exist' in exc.args[0]: - LOG.debug( - 'Volume %s does not exist on appliance', '/'.join( - [pool, fs_])) - - def extend_volume(self, volume, new_size): - """Extend an existing volume. - - :param volume: volume reference - :param new_size: volume new size in GB - """ - LOG.info('Extending volume: %(id)s New size: %(size)s GB', - {'id': volume['id'], 'size': new_size}) - if self.sparsed_volumes: - self._execute('truncate', '-s', '%sG' % new_size, - self.local_path(volume), - run_as_root=self._execute_as_root) - else: - block_size_mb = 1 - block_count = ((new_size - volume['size']) * units.Gi // - (block_size_mb * units.Mi)) - self._execute( - 'dd', 'if=/dev/zero', - 'seek=%d' % (volume['size'] * units.Gi / block_size_mb), - 'of=%s' % self.local_path(volume), - 'bs=%dM' % block_size_mb, - 'count=%d' % block_count, - run_as_root=True) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: snapshot reference - """ - volume = self._get_snapshot_volume(snapshot) - pool, fs = self._get_share_datasets(self.share) - url = 'storage/pools/%(pool)s/filesystems/%(fs)s/snapshots' % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])), - } - data = {'name': snapshot['name']} - self.nef.post(url, data) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot. - - :param snapshot: snapshot reference - """ - volume = self._get_snapshot_volume(snapshot) - pool, fs = self._get_share_datasets(self.share) - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])), - 'snap': snapshot['name'] - } - try: - self.nef.delete(url) - except exception.NexentaException as exc: - if 'EBUSY' is exc: - LOG.warning( - 'Could not delete snapshot %s - it has dependencies', - snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create new volume from other's snapshot on appliance. - - :param volume: reference of volume to be created - :param snapshot: reference of source snapshot - """ - snapshot_vol = self._get_snapshot_volume(snapshot) - volume['provider_location'] = snapshot_vol['provider_location'] - - pool, fs = self._get_share_datasets(self.share) - dataset_path = '%s/%s' % (pool, fs) - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s/snapshots/%(snap)s/clone') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, snapshot_vol['name']])), - 'snap': snapshot['name'] - } - path = '/'.join([pool, fs, volume['name']]) - data = {'targetPath': path} - self.nef.post(url, data) - - try: - self._share_folder(fs, volume['name']) - except exception.NexentaException: - try: - url = ('storage/pools/%(pool)s/' - 'filesystems/%(fs)s') % { - 'pool': pool, - 'fs': self._escape_path('/'.join([fs, volume['name']])) - } - self.nef.delete(url) - except exception.NexentaException: - LOG.warning("Cannot destroy cloned filesystem: " - "%(vol)s/%(filesystem)s", - {'vol': dataset_path, - 'filesystem': volume['name']}) - raise - if volume['size'] > snapshot['volume_size']: - new_size = volume['size'] - volume['size'] = snapshot['volume_size'] - self.extend_volume(volume, new_size) - volume['size'] = new_size - return {'provider_location': volume['provider_location']} - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume. - - :param volume: new volume reference - :param src_vref: source volume reference - """ - LOG.info('Creating clone of volume: %s', src_vref['id']) - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._get_clone_snapshot_name(volume)} - self.create_snapshot(snapshot) - try: - return self.create_volume_from_snapshot(volume, snapshot) - except exception.NexentaException: - LOG.error('Volume creation failed, deleting created snapshot ' - '%(volume_name)s@%(name)s', snapshot) - try: - self.delete_snapshot(snapshot) - except (exception.NexentaException, exception.SnapshotIsBusy): - LOG.warning('Failed to delete zfs snapshot ' - '%(volume_name)s@%(name)s', snapshot) - raise - - def local_path(self, volume): - """Get volume path (mounted locally fs path) for given volume. - - :param volume: volume reference - """ - nfs_share = volume['provider_location'] - return os.path.join(self._get_mount_point_for_share(nfs_share), - 'volume') - - def _get_mount_point_for_share(self, nfs_share): - """Returns path to mount point NFS share. - - :param nfs_share: example 172.18.194.100:/var/nfs - """ - nfs_share = nfs_share.encode('utf-8') - return os.path.join(self.configuration.nexenta_mount_point_base, - hashlib.md5(nfs_share).hexdigest()) - - def _share_folder(self, path, filesystem): - """Share NFS filesystem on NexentaStor Appliance. - - :param nef: nef object - :param path: path to parent filesystem - :param filesystem: filesystem that needs to be shared - """ - pool = self.share.split('/')[0] - LOG.debug( - 'Creating ACL for filesystem %s on Nexenta Store', filesystem) - url = 'storage/pools/%s/filesystems/%s/acl' % ( - pool, self._escape_path('/'.join([path, filesystem]))) - data = { - "type": "allow", - "principal": "everyone@", - "permissions": [ - "list_directory", - "read_data", - "add_file", - "write_data", - "add_subdirectory", - "append_data", - "read_xattr", - "write_xattr", - "execute", - "delete_child", - "read_attributes", - "write_attributes", - "delete", - "read_acl", - "write_acl", - "write_owner", - "synchronize" - ], - "flags": [ - "file_inherit", - "dir_inherit" - ] - } - self.nef.post(url, data) - - LOG.debug( - 'Successfully shared filesystem %s', '/'.join( - [path, filesystem])) - - def _get_capacity_info(self, path): - """Calculate available space on the NFS share. - - :param path: example pool/nfs - """ - pool, fs = self._get_share_datasets(path) - url = 'storage/pools/%s/filesystems/%s' % ( - pool, self._escape_path(fs)) - data = self.nef.get(url) - total = utils.str2size(data['bytesAvailable']) - allocated = utils.str2size(data['bytesUsed']) - free = total - allocated - return total, free, allocated - - def _get_snapshot_volume(self, snapshot): - ctxt = context.get_admin_context() - return db.volume_get(ctxt, snapshot['volume_id']) - - def _get_share_datasets(self, nfs_share): - pool_name, fs = nfs_share.split('/', 1) - return pool_name, fs - - def _get_clone_snapshot_name(self, volume): - """Return name for snapshot that will be used to clone the volume.""" - return 'cinder-clone-snapshot-%(id)s' % volume - - def _is_clone_snapshot_name(self, snapshot): - """Check if snapshot is created for cloning.""" - name = snapshot.split('@')[-1] - return name.startswith('cinder-clone-snapshot-') - - def _update_volume_stats(self): - """Retrieve stats info for NexentaStor appliance.""" - LOG.debug('Updating volume stats') - share = ':/'.join([self.nef_host, self.share]) - total, free, allocated = self._get_capacity_info(self.share) - total_space = utils.str2gib_size(total) - free_space = utils.str2gib_size(free) - - location_info = '%(driver)s:%(share)s' % { - 'driver': self.__class__.__name__, - 'share': share - } - self._stats = { - 'vendor_name': 'Nexenta', - 'dedup': self.dataset_deduplication, - 'compression': self.dataset_compression, - 'description': self.dataset_description, - 'nef_url': self.nef_host, - 'driver_version': self.VERSION, - 'storage_protocol': 'NFS', - 'total_capacity_gb': total_space, - 'free_capacity_gb': free_space, - 'reserved_percentage': self.configuration.reserved_percentage, - 'QoS_support': False, - 'location_info': location_info, - 'volume_backend_name': self.backend_name, - 'nfs_mount_point_base': self.nfs_mount_point_base - } - - def _escape_path(self, path): - return path.replace('/', '%2F') diff --git a/cinder/volume/drivers/nexenta/options.py b/cinder/volume/drivers/nexenta/options.py deleted file mode 100644 index 6c74cd481..000000000 --- a/cinder/volume/drivers/nexenta/options.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2016 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from cinder.volume import configuration as conf - -NEXENTA_EDGE_OPTS = [ - cfg.StrOpt('nexenta_nbd_symlinks_dir', - default='/dev/disk/by-path', - help='NexentaEdge logical path of directory to store symbolic ' - 'links to NBDs'), - cfg.StrOpt('nexenta_rest_address', - default='', - help='IP address of NexentaEdge management REST API endpoint'), - cfg.StrOpt('nexenta_rest_user', - default='admin', - help='User name to connect to NexentaEdge'), - cfg.StrOpt('nexenta_rest_password', - default='nexenta', - help='Password to connect to NexentaEdge', - secret=True), - cfg.StrOpt('nexenta_lun_container', - default='', - help='NexentaEdge logical path of bucket for LUNs'), - cfg.StrOpt('nexenta_iscsi_service', - default='', - help='NexentaEdge iSCSI service name'), - cfg.StrOpt('nexenta_client_address', - default='', - help='NexentaEdge iSCSI Gateway client ' - 'address for non-VIP service'), - cfg.IntOpt('nexenta_chunksize', - default=32768, - help='NexentaEdge iSCSI LUN object chunk size') -] - -NEXENTA_CONNECTION_OPTS = [ - cfg.StrOpt('nexenta_host', - default='', - help='IP address of Nexenta SA'), - cfg.IntOpt('nexenta_rest_port', - default=0, - help='HTTP(S) port to connect to Nexenta REST API server. ' - 'If it is equal zero, 8443 for HTTPS and 8080 for HTTP ' - 'is used'), - cfg.StrOpt('nexenta_rest_protocol', - default='auto', - choices=['http', 'https', 'auto'], - help='Use http or https for REST connection (default auto)'), - cfg.BoolOpt('nexenta_use_https', - default=True, - help='Use secure HTTP for REST connection (default True)'), - cfg.StrOpt('nexenta_user', - default='admin', - help='User name to connect to Nexenta SA'), - cfg.StrOpt('nexenta_password', - default='nexenta', - help='Password to connect to Nexenta SA', - secret=True), -] - -NEXENTA_ISCSI_OPTS = [ - cfg.IntOpt('nexenta_iscsi_target_portal_port', - default=3260, - help='Nexenta target portal port'), - cfg.StrOpt('nexenta_volume', - default='cinder', - help='SA Pool that holds all volumes'), - cfg.StrOpt('nexenta_target_prefix', - default='iqn.1986-03.com.sun:02:cinder-', - help='IQN prefix for iSCSI targets'), - cfg.StrOpt('nexenta_target_group_prefix', - default='cinder/', - help='Prefix for iSCSI target groups on SA'), - cfg.StrOpt('nexenta_volume_group', - default='iscsi', - help='Volume group for ns5'), -] - -NEXENTA_NFS_OPTS = [ - cfg.StrOpt('nexenta_shares_config', - default='/etc/cinder/nfs_shares', - help='File with the list of available nfs shares'), - cfg.StrOpt('nexenta_mount_point_base', - default='$state_path/mnt', - help='Base directory that contains NFS share mount points'), - cfg.BoolOpt('nexenta_sparsed_volumes', - default=True, - help='Enables or disables the creation of volumes as ' - 'sparsed files that take no space. If disabled ' - '(False), volume is created as a regular file, ' - 'which takes a long time.'), - cfg.BoolOpt('nexenta_nms_cache_volroot', - default=True, - help=('If set True cache NexentaStor appliance volroot option ' - 'value.')) -] - -NEXENTA_DATASET_OPTS = [ - cfg.StrOpt('nexenta_dataset_compression', - default='on', - choices=['on', 'off', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', - 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', - 'gzip-9', 'lzjb', 'zle', 'lz4'], - help='Compression value for new ZFS folders.'), - cfg.StrOpt('nexenta_dataset_dedup', - default='off', - choices=['on', 'off', 'sha256', 'verify', 'sha256, verify'], - help='Deduplication value for new ZFS folders.'), - cfg.StrOpt('nexenta_dataset_description', - default='', - help='Human-readable description for the folder.'), - cfg.IntOpt('nexenta_blocksize', - default=4096, - help='Block size for datasets'), - cfg.IntOpt('nexenta_ns5_blocksize', - default=32, - help='Block size for datasets'), - cfg.BoolOpt('nexenta_sparse', - default=False, - help='Enables or disables the creation of sparse datasets'), -] - -NEXENTA_RRMGR_OPTS = [ - cfg.IntOpt('nexenta_rrmgr_compression', - default=0, - help=('Enable stream compression, level 1..9. 1 - gives best ' - 'speed; 9 - gives best compression.')), - cfg.IntOpt('nexenta_rrmgr_tcp_buf_size', - default=4096, - help='TCP Buffer size in KiloBytes.'), - cfg.IntOpt('nexenta_rrmgr_connections', - default=2, - help='Number of TCP connections.'), -] - -CONF = cfg.CONF -CONF.register_opts(NEXENTA_CONNECTION_OPTS, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(NEXENTA_ISCSI_OPTS, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(NEXENTA_DATASET_OPTS, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(NEXENTA_NFS_OPTS, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(NEXENTA_RRMGR_OPTS, group=conf.SHARED_CONF_GROUP) -CONF.register_opts(NEXENTA_EDGE_OPTS, group=conf.SHARED_CONF_GROUP) diff --git a/cinder/volume/drivers/nexenta/utils.py b/cinder/volume/drivers/nexenta/utils.py deleted file mode 100644 index e61a8e3bb..000000000 --- a/cinder/volume/drivers/nexenta/utils.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2013 Nexenta Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import six - -from oslo_utils import units -import six.moves.urllib.parse as urlparse - -from cinder.i18n import _ - - -def str2size(s, scale=1024): - """Convert size-string. - - String format: [:space:] to bytes. - - :param s: size-string - :param scale: base size - """ - if not s: - return 0 - - if isinstance(s, six.integer_types): - return s - - match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) - if match is None: - raise ValueError(_('Invalid value: "%s"') % s) - - groups = match.groups() - value = float(groups[0]) - suffix = len(groups) > 1 and groups[1].upper() or 'B' - - types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') - for i, t in enumerate(types): - if suffix == t: - return int(value * pow(scale, i)) - - -def str2gib_size(s): - """Covert size-string to size in gigabytes.""" - size_in_bytes = str2size(s) - return size_in_bytes // units.Gi - - -def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None, - connections=None): - """Returns rrmgr command for source and destination.""" - cmd = ['rrmgr', '-s', 'zfs'] - if compression: - cmd.extend(['-c', '%s' % compression]) - cmd.append('-q') - cmd.append('-e') - if tcp_buf_size: - cmd.extend(['-w', six.text_type(tcp_buf_size)]) - if connections: - cmd.extend(['-n', six.text_type(connections)]) - cmd.extend([src, dst]) - return ' '.join(cmd) - - -def parse_nms_url(url): - """Parse NMS url into normalized parts like scheme, user, host and others. - - Example NMS URL: - auto://admin:nexenta@192.168.1.1:2000/ - - NMS URL parts: - - .. code-block:: none - - auto True if url starts with auto://, protocol - will be automatically switched to https - if http not supported; - scheme (auto) connection protocol (http or https); - user (admin) NMS user; - password (nexenta) NMS password; - host (192.168.1.1) NMS host; - port (2000) NMS port. - - :param url: url string - :return: tuple (auto, scheme, user, password, host, port, path) - """ - pr = urlparse.urlparse(url) - scheme = pr.scheme - auto = scheme == 'auto' - if auto: - scheme = 'http' - user = 'admin' - password = 'nexenta' - if '@' not in pr.netloc: - host_and_port = pr.netloc - else: - user_and_password, host_and_port = pr.netloc.split('@', 1) - if ':' in user_and_password: - user, password = user_and_password.split(':') - else: - user = user_and_password - if ':' in host_and_port: - host, port = host_and_port.split(':', 1) - else: - host, port = host_and_port, '2000' - return auto, scheme, user, password, host, port, '/rest/nms/' - - -def parse_nef_url(url): - """Parse NMS url into normalized parts like scheme, user, host and others. - - Example NMS URL: - auto://admin:nexenta@192.168.1.1:8080/ - - NMS URL parts: - - .. code-block:: none - - auto True if url starts with auto://, protocol - will be automatically switched to https - if http not supported; - scheme (auto) connection protocol (http or https); - user (admin) NMS user; - password (nexenta) NMS password; - host (192.168.1.1) NMS host; - port (8080) NMS port. - - :param url: url string - :return: tuple (auto, scheme, user, password, host, port) - """ - pr = urlparse.urlparse(url) - scheme = pr.scheme - auto = scheme == 'auto' - if auto: - scheme = 'http' - user = 'admin' - password = 'nexenta' - if '@' not in pr.netloc: - host_and_port = pr.netloc - else: - user_and_password, host_and_port = pr.netloc.split('@', 1) - if ':' in user_and_password: - user, password = user_and_password.split(':') - else: - user = user_and_password - if ':' in host_and_port: - host, port = host_and_port.split(':', 1) - else: - host, port = host_and_port, '8080' - return auto, scheme, user, password, host, port - - -def get_migrate_snapshot_name(volume): - """Return name for snapshot that will be used to migrate the volume.""" - return 'cinder-migrate-snapshot-%(id)s' % volume diff --git a/cinder/volume/drivers/nfs.py b/cinder/volume/drivers/nfs.py deleted file mode 100644 index 8044a9e29..000000000 --- a/cinder/volume/drivers/nfs.py +++ /dev/null @@ -1,608 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. -# Copyright (c) 2016 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import os -import time - -from os_brick.remotefs import remotefs as remotefs_brick -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers import remotefs - -VERSION = '1.4.0' - -LOG = logging.getLogger(__name__) - - -nfs_opts = [ - cfg.StrOpt('nfs_shares_config', - default='/etc/cinder/nfs_shares', - help='File with the list of available NFS shares.'), - cfg.BoolOpt('nfs_sparsed_volumes', - default=True, - help='Create volumes as sparsed files which take no space. ' - 'If set to False volume is created as regular file. ' - 'In such case volume creation takes a lot of time.'), - cfg.BoolOpt('nfs_qcow2_volumes', - default=False, - help='Create volumes as QCOW2 files rather than raw files.'), - cfg.StrOpt('nfs_mount_point_base', - default='$state_path/mnt', - help='Base dir containing mount points for NFS shares.'), - cfg.StrOpt('nfs_mount_options', - help='Mount options passed to the NFS client. See section ' - 'of the NFS man page for details.'), - cfg.IntOpt('nfs_mount_attempts', - default=3, - help='The number of attempts to mount NFS shares before ' - 'raising an error. At least one attempt will be ' - 'made to mount an NFS share, regardless of the ' - 'value specified.'), - cfg.BoolOpt('nfs_snapshot_support', - default=False, - help='Enable support for snapshots on the NFS driver. ' - 'Platforms using libvirt <1.2.7 will encounter issues ' - 'with this feature.'), -] - -CONF = cfg.CONF -CONF.register_opts(nfs_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class NfsDriver(remotefs.RemoteFSSnapDriverDistributed, driver.ExtendVD): - """NFS based cinder driver. - - Creates file on NFS share for using it as block device on hypervisor. - """ - - driver_volume_type = 'nfs' - driver_prefix = 'nfs' - volume_backend_name = 'Generic_NFS' - VERSION = VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - - def __init__(self, execute=putils.execute, *args, **kwargs): - self._remotefsclient = None - super(NfsDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(nfs_opts) - root_helper = utils.get_root_helper() - # base bound to instance is used in RemoteFsConnector. - self.base = getattr(self.configuration, - 'nfs_mount_point_base') - self.base = os.path.realpath(self.base) - opts = getattr(self.configuration, - 'nfs_mount_options') - - nas_mount_options = getattr(self.configuration, - 'nas_mount_options', - None) - if nas_mount_options is not None: - LOG.debug('overriding nfs_mount_options with nas_mount_options') - opts = nas_mount_options - - self._remotefsclient = remotefs_brick.RemoteFsClient( - 'nfs', root_helper, execute=execute, - nfs_mount_point_base=self.base, - nfs_mount_options=opts) - - self._sparse_copy_volume_data = True - self.reserved_percentage = self.configuration.reserved_percentage - self.max_over_subscription_ratio = ( - self.configuration.max_over_subscription_ratio) - - def initialize_connection(self, volume, connector): - - LOG.debug('Initializing connection to volume %(vol)s. ' - 'Connector: %(con)s', {'vol': volume.id, 'con': connector}) - - active_vol = self.get_active_image_from_info(volume) - volume_dir = self._local_volume_dir(volume) - path_to_vol = os.path.join(volume_dir, active_vol) - info = self._qemu_img_info(path_to_vol, volume['name']) - - data = {'export': volume.provider_location, - 'name': active_vol} - if volume.provider_location in self.shares: - data['options'] = self.shares[volume.provider_location] - - conn_info = { - 'driver_volume_type': self.driver_volume_type, - 'data': data, - 'mount_point_base': self._get_mount_point_base() - } - - # Test file for raw vs. qcow2 format - if info.file_format not in ['raw', 'qcow2']: - msg = _('nfs volume must be a valid raw or qcow2 image.') - raise exception.InvalidVolume(reason=msg) - - conn_info['data']['format'] = info.file_format - LOG.debug('NfsDriver: conn_info: %s', conn_info) - return conn_info - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(NfsDriver, self).do_setup(context) - - nas_host = getattr(self.configuration, - 'nas_host', - None) - nas_share_path = getattr(self.configuration, - 'nas_share_path', - None) - - # If both nas_host and nas_share_path are set we are not - # going to use the nfs_shares_config file. So, only check - # for its existence if it is going to be used. - if((not nas_host) or (not nas_share_path)): - config = self.configuration.nfs_shares_config - if not config: - msg = (_("There's no NFS config file configured (%s)") % - 'nfs_shares_config') - LOG.warning(msg) - raise exception.NfsException(msg) - if not os.path.exists(config): - msg = (_("NFS config file at %(config)s doesn't exist") % - {'config': config}) - LOG.warning(msg) - raise exception.NfsException(msg) - - self.shares = {} # address : options - - # Check if mount.nfs is installed on this system; note that we - # need to be root, to also find mount.nfs on distributions, where - # it is not located in an unprivileged users PATH (e.g. /sbin). - package = 'mount.nfs' - try: - self._execute(package, check_exit_code=False, - run_as_root=True) - except OSError as exc: - if exc.errno == errno.ENOENT: - msg = _('%s is not installed') % package - raise exception.NfsException(msg) - else: - raise - - # Now that all configuration data has been loaded (shares), - # we can "set" our final NAS file security options. - self.set_nas_security_options(self._is_voldb_empty_at_startup) - self._check_snapshot_support(setup_checking=True) - - def _ensure_share_mounted(self, nfs_share): - mnt_flags = [] - if self.shares.get(nfs_share) is not None: - mnt_flags = self.shares[nfs_share].split() - num_attempts = max(1, self.configuration.nfs_mount_attempts) - for attempt in range(num_attempts): - try: - self._remotefsclient.mount(nfs_share, mnt_flags) - return - except Exception as e: - if attempt == (num_attempts - 1): - LOG.error('Mount failure for %(share)s after ' - '%(count)d attempts.', - {'share': nfs_share, - 'count': num_attempts}) - raise exception.NfsException(six.text_type(e)) - LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n' - 'Retrying mount ...', - {'attempt': attempt, 'exc': e}) - time.sleep(1) - - def _find_share(self, volume): - """Choose NFS share among available ones for given volume size. - - For instances with more than one share that meets the criteria, the - share with the least "allocated" space will be selected. - - :param volume: the volume to be created. - """ - - if not self._mounted_shares: - raise exception.NfsNoSharesMounted() - - target_share = None - target_share_reserved = 0 - - for nfs_share in self._mounted_shares: - total_size, total_available, total_allocated = ( - self._get_capacity_info(nfs_share)) - share_info = {'total_size': total_size, - 'total_available': total_available, - 'total_allocated': total_allocated, - } - if not self._is_share_eligible(nfs_share, - volume.size, - share_info): - continue - if target_share is not None: - if target_share_reserved > total_allocated: - target_share = nfs_share - target_share_reserved = total_allocated - else: - target_share = nfs_share - target_share_reserved = total_allocated - - if target_share is None: - raise exception.NfsNoSuitableShareFound( - volume_size=volume.size) - - LOG.debug('Selected %s as target NFS share.', target_share) - - return target_share - - def _is_share_eligible(self, nfs_share, volume_size_in_gib, - share_info=None): - """Verifies NFS share is eligible to host volume with given size. - - First validation step: ratio of actual space (used_space / total_space) - is less than used_ratio. Second validation step: apparent space - allocated (differs from actual space used when using sparse files) - and compares the apparent available - space (total_available * oversub_ratio) to ensure enough space is - available for the new volume. - - :param nfs_share: NFS share - :param volume_size_in_gib: int size in GB - """ - # Because the generic NFS driver aggregates over all shares - # when reporting capacity and usage stats to the scheduler, - # we still have to perform some scheduler-like capacity - # checks here, and these have to take into account - # configuration for reserved space and oversubscription. - # It would be better to do all this in the scheduler, but - # this requires either pool support for the generic NFS - # driver or limiting each NFS backend driver to a single share. - - # derive used_ratio from reserved percentage - if share_info is None: - total_size, total_available, total_allocated = ( - self._get_capacity_info(nfs_share)) - share_info = {'total_size': total_size, - 'total_available': total_available, - 'total_allocated': total_allocated, - } - used_percentage = 100 - self.reserved_percentage - used_ratio = used_percentage / 100.0 - - requested_volume_size = volume_size_in_gib * units.Gi - - apparent_size = max(0, share_info['total_size'] * - self.max_over_subscription_ratio) - - apparent_available = max(0, apparent_size - - share_info['total_allocated']) - - actual_used_ratio = ((share_info['total_size'] - - share_info['total_available']) / - float(share_info['total_size'])) - if actual_used_ratio > used_ratio: - # NOTE(morganfainberg): We check the used_ratio first since - # with oversubscription it is possible to not have the actual - # available space but be within our oversubscription limit - # therefore allowing this share to still be selected as a valid - # target. - LOG.debug('%s is not eligible - used ratio exceeded.', - nfs_share) - return False - if apparent_available <= requested_volume_size: - LOG.debug('%s is not eligible - insufficient (apparent) available ' - 'space.', - nfs_share) - return False - if share_info['total_allocated'] / share_info['total_size'] >= ( - self.max_over_subscription_ratio): - LOG.debug('%s is not eligible - utilization exceeds max ' - 'over subscription ratio.', - nfs_share) - return False - return True - - def _get_mount_point_for_share(self, nfs_share): - """Needed by parent class.""" - return self._remotefsclient.get_mount_point(nfs_share) - - def _get_capacity_info(self, nfs_share): - """Calculate available space on the NFS share. - - :param nfs_share: example 172.18.194.100:/var/nfs - """ - mount_point = self._get_mount_point_for_share(nfs_share) - - df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point, - run_as_root=self._execute_as_root) - block_size, blocks_total, blocks_avail = map(float, df.split()) - total_available = block_size * blocks_avail - total_size = block_size * blocks_total - - du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude', - '*snapshot*', mount_point, - run_as_root=self._execute_as_root) - total_allocated = float(du.split()[0]) - return total_size, total_available, total_allocated - - def _get_mount_point_base(self): - return self.base - - def extend_volume(self, volume, new_size): - """Extend an existing volume to the new size.""" - LOG.info('Extending volume %s.', volume.id) - extend_by = int(new_size) - volume.size - if not self._is_share_eligible(volume.provider_location, - extend_by): - raise exception.ExtendVolumeError(reason='Insufficient space to' - ' extend volume %s to %sG' - % (volume.id, new_size)) - path = self.local_path(volume) - LOG.info('Resizing file to %sG...', new_size) - image_utils.resize_image(path, new_size, - run_as_root=self._execute_as_root) - if not self._is_file_size_equal(path, new_size): - raise exception.ExtendVolumeError( - reason='Resizing image file failed.') - - def _is_file_size_equal(self, path, size): - """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path, - run_as_root=self._execute_as_root) - virt_size = int(data.virtual_size / units.Gi) - return virt_size == size - - def set_nas_security_options(self, is_new_cinder_install): - """Determine the setting to use for Secure NAS options. - - Value of each NAS Security option is checked and updated. If the - option is currently 'auto', then it is set to either true or false - based upon if this is a new Cinder installation. The RemoteFS variable - '_execute_as_root' will be updated for this driver. - - :param is_new_cinder_install: bool indication of new Cinder install - """ - doc_html = "http://docs.openstack.org/admin-guide" \ - "/blockstorage_nfs_backend.html" - - self._ensure_shares_mounted() - if not self._mounted_shares: - raise exception.NfsNoSharesMounted() - - nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0]) - - self.configuration.nas_secure_file_permissions = \ - self._determine_nas_security_option_setting( - self.configuration.nas_secure_file_permissions, - nfs_mount, is_new_cinder_install) - - LOG.debug('NAS variable secure_file_permissions setting is: %s', - self.configuration.nas_secure_file_permissions) - - if self.configuration.nas_secure_file_permissions == 'false': - LOG.warning("The NAS file permissions mode will be 666 " - "(allowing other/world read & write access). " - "This is considered an insecure NAS environment. " - "Please see %s for information on a secure " - "NFS configuration.", - doc_html) - - self.configuration.nas_secure_file_operations = \ - self._determine_nas_security_option_setting( - self.configuration.nas_secure_file_operations, - nfs_mount, is_new_cinder_install) - - # If secure NAS, update the '_execute_as_root' flag to not - # run as the root user; run as process' user ID. - - # TODO(eharney): need to separate secure NAS vs. execute as root. - # There are requirements to run some commands as root even - # when running in secure NAS mode. (i.e. read volume file - # attached to an instance and owned by qemu:qemu) - if self.configuration.nas_secure_file_operations == 'true': - self._execute_as_root = False - - LOG.debug('NAS secure file operations setting is: %s', - self.configuration.nas_secure_file_operations) - - if self.configuration.nas_secure_file_operations == 'false': - LOG.warning("The NAS file operations will be run as " - "root: allowing root level access at the storage " - "backend. This is considered an insecure NAS " - "environment. Please see %s " - "for information on a secure NAS configuration.", - doc_html) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return the keys and values updated from NFS for migrated volume. - - This method should rename the back-end volume name(id) on the - destination host back to its original name(id) on the source host. - - :param ctxt: The context used to run the method update_migrated_volume - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - name_id = None - if original_volume_status == 'available': - current_name = CONF.volume_name_template % new_volume.id - original_volume_name = CONF.volume_name_template % volume.id - current_path = self.local_path(new_volume) - # Replace the volume name with the original volume name - original_path = current_path.replace(current_name, - original_volume_name) - try: - os.rename(current_path, original_path) - except OSError: - LOG.error('Unable to rename the logical volume ' - 'for volume: %s', volume.id) - # If the rename fails, _name_id should be set to the new - # volume id and provider_location should be set to the - # one from the new volume as well. - name_id = new_volume._name_id or new_volume.id - else: - # The back-end will not be renamed. - name_id = new_volume._name_id or new_volume.id - return {'_name_id': name_id, - 'provider_location': new_volume.provider_location} - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - super(NfsDriver, self)._update_volume_stats() - self._stats['sparse_copy_volume'] = True - data = self._stats - - global_capacity = data['total_capacity_gb'] - global_free = data['free_capacity_gb'] - - thin_enabled = self.configuration.nfs_sparsed_volumes - if thin_enabled: - provisioned_capacity = self._get_provisioned_capacity() - else: - provisioned_capacity = round(global_capacity - global_free, 2) - - data['provisioned_capacity_gb'] = provisioned_capacity - data['max_over_subscription_ratio'] = self.max_over_subscription_ratio - data['reserved_percentage'] = self.reserved_percentage - data['thin_provisioning_support'] = thin_enabled - data['thick_provisioning_support'] = not thin_enabled - - self._stats = data - - @coordination.synchronized('{self.driver_prefix}-{volume[id]}') - def create_volume(self, volume): - """Apply locking to the create volume operation.""" - - return super(NfsDriver, self).create_volume(volume) - - @coordination.synchronized('{self.driver_prefix}-{volume[id]}') - def delete_volume(self, volume): - """Deletes a logical volume.""" - - LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s', - {'vol': volume.id, 'loc': volume.provider_location}) - - if not volume.provider_location: - LOG.warning('Volume %s does not have provider_location ' - 'specified, skipping', volume.name) - return - - info_path = self._local_path_volume_info(volume) - info = self._read_info_file(info_path, empty_if_missing=True) - - if info: - base_volume_path = os.path.join(self._local_volume_dir(volume), - info['active']) - self._delete(info_path) - else: - base_volume_path = self._local_path_volume(volume) - - self._delete(base_volume_path) - - def _qemu_img_info(self, path, volume_name): - return super(NfsDriver, self)._qemu_img_info_base( - path, - volume_name, - self.configuration.nfs_mount_point_base, - run_as_root=True) - - def _check_snapshot_support(self, setup_checking=False): - """Ensure snapshot support is enabled in config.""" - - if (not self.configuration.nfs_snapshot_support and - not setup_checking): - msg = _("NFS driver snapshot support is disabled in cinder.conf.") - raise exception.VolumeDriverException(message=msg) - - if (self.configuration.nas_secure_file_operations == 'true' and - self.configuration.nfs_snapshot_support): - msg = _("Snapshots are not supported with " - "nas_secure_file_operations enabled ('true' or 'auto'). " - "Please set it to 'false' if you intend to have " - "it enabled.") - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') - def create_snapshot(self, snapshot): - """Apply locking to the create snapshot operation.""" - - self._check_snapshot_support() - return self._create_snapshot(snapshot) - - @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') - def delete_snapshot(self, snapshot): - """Apply locking to the delete snapshot operation.""" - - self._check_snapshot_support() - return self._delete_snapshot(snapshot) - - def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): - """Copy data from snapshot to destination volume. - - This is done with a qemu-img convert to raw/qcow2 from the snapshot - qcow2. - """ - - LOG.debug("Copying snapshot: %(snap)s -> volume: %(vol)s, " - "volume_size: %(size)s GB", - {'snap': snapshot.id, - 'vol': volume.id, - 'size': volume_size}) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - vol_path = self._local_volume_dir(snapshot.volume) - forward_file = snap_info[snapshot.id] - forward_path = os.path.join(vol_path, forward_file) - - # Find the file which backs this file, which represents the point - # when this snapshot was created. - img_info = self._qemu_img_info(forward_path, snapshot.volume.name) - path_to_snap_img = os.path.join(vol_path, img_info.backing_file) - - path_to_new_vol = self._local_path_volume(volume) - - LOG.debug("will copy from snapshot at %s", path_to_snap_img) - - if self.configuration.nfs_qcow2_volumes: - out_format = 'qcow2' - else: - out_format = 'raw' - - image_utils.convert_image(path_to_snap_img, - path_to_new_vol, - out_format, - run_as_root=self._execute_as_root) - - self._set_rw_permissions_for_all(path_to_new_vol) diff --git a/cinder/volume/drivers/nimble.py b/cinder/volume/drivers/nimble.py deleted file mode 100644 index 6b8253971..000000000 --- a/cinder/volume/drivers/nimble.py +++ /dev/null @@ -1,1690 +0,0 @@ -# Nimble Storage, Inc. (c) 2013-2014 -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Nimble Storage. - -This driver supports Nimble Storage controller CS-Series and Nimble AF Arrays. - -""" -import abc -import eventlet -import functools -import json -import random -import re -import requests -import six -import string -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import volume -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -DRIVER_VERSION = "4.0.1" -AES_256_XTS_CIPHER = 'aes_256_xts' -DEFAULT_CIPHER = 'none' -EXTRA_SPEC_ENCRYPTION = 'nimble:encryption' -EXTRA_SPEC_PERF_POLICY = 'nimble:perfpol-name' -EXTRA_SPEC_MULTI_INITIATOR = 'nimble:multi-initiator' -EXTRA_SPEC_DEDUPE = 'nimble:dedupe' -EXTRA_SPEC_IOPS_LIMIT = 'nimble:iops-limit' -EXTRA_SPEC_FOLDER = 'nimble:folder' -DEFAULT_PERF_POLICY_SETTING = 'default' -DEFAULT_ENCRYPTION_SETTING = 'no' -DEFAULT_DEDUPE_SETTING = 'false' -DEFAULT_IOPS_LIMIT_SETTING = None -DEFAULT_MULTI_INITIATOR_SETTING = 'false' -DEFAULT_FOLDER_SETTING = None -DEFAULT_SNAP_QUOTA = sys.maxsize -BACKUP_VOL_PREFIX = 'backup-vol-' -AGENT_TYPE_OPENSTACK = 'openstack' -AGENT_TYPE_OPENSTACK_GST = 'openstackv2' -AGENT_TYPE_NONE = 'none' -SM_SUBNET_DATA = 'data' -SM_SUBNET_MGMT_PLUS_DATA = 'mgmt-data' -SM_STATE_MSG = "is already in requested state" -SM_OBJ_EXIST_MSG = "Object exists" -SM_OBJ_ENOENT_MSG = "No such object" -IOPS_ERR_MSG = "Please set valid IOPS limit in the range" -LUN_ID = '0' -WARN_LEVEL = 80 -DEFAULT_SLEEP = 5 -MIN_IOPS = 256 -MAX_IOPS = 4294967294 -NimbleDefaultVersion = 1 - - -LOG = logging.getLogger(__name__) - -nimble_opts = [ - cfg.StrOpt('nimble_pool_name', - default='default', - help='Nimble Controller pool name'), - cfg.StrOpt('nimble_subnet_label', - default='*', - help='Nimble Subnet Label'), - cfg.BoolOpt('nimble_verify_certificate', - default=False, - help='Whether to verify Nimble SSL Certificate'), - cfg.StrOpt('nimble_verify_cert_path', - help='Path to Nimble Array SSL certificate'), ] - -CONF = cfg.CONF -CONF.register_opts(nimble_opts, group=configuration.SHARED_CONF_GROUP) - - -class NimbleDriverException(exception.VolumeDriverException): - message = _("Nimble Cinder Driver exception") - - -class NimbleAPIException(exception.VolumeBackendAPIException): - message = _("Unexpected response from Nimble API") - - -class NimbleBaseVolumeDriver(san.SanDriver): - """OpenStack driver to enable Nimble Controller. - - Version history: - - .. code-block:: none - - - 1.0 - Initial driver - 1.1.1 - Updated VERSION to Nimble driver version - 1.1.2 - Update snap-quota to unlimited - 2.0.0 - Added Extra Spec Capability - Correct capacity reporting - Added Manage/Unmanage volume support - 2.0.1 - Added multi-initiator support through extra-specs - 2.0.2 - Fixed supporting extra specs while cloning vols - 3.0.0 - Newton Support for Force Backup - 3.1.0 - Fibre Channel Support - 4.0.0 - Migrate from SOAP to REST API - Add support for Group Scoped Target - 4.0.1 - Add QoS and dedupe support - """ - VERSION = DRIVER_VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Nimble_Storage_CI" - - def __init__(self, *args, **kwargs): - super(NimbleBaseVolumeDriver, self).__init__(*args, **kwargs) - self.APIExecutor = None - self.group_stats = {} - self.api_protocol = None - self._storage_protocol = None - self._group_target_enabled = False - self.configuration.append_config_values(nimble_opts) - self.verify = False - if self.configuration.nimble_verify_certificate is True: - self.verify = self.configuration.nimble_verify_cert_path or True - - def _check_config(self): - """Ensure that the flags we care about are set.""" - required_config = ['san_ip', 'san_login', 'san_password'] - for attr in required_config: - if not getattr(self.configuration, attr, None): - raise exception.InvalidInput(reason=_('%s is not set.') % - attr) - - def create_volume(self, volume): - """Create a new volume.""" - reserve = not self.configuration.san_thin_provision - self.APIExecutor.create_vol( - volume, - self.configuration.nimble_pool_name, reserve, - self._storage_protocol, - self._group_target_enabled) - return self._get_model_info(volume['name']) - - def is_volume_backup_clone(self, volume): - """check if the volume is created through cinder-backup workflow. - - :param volume - """ - vol_info = self.APIExecutor.get_vol_info(volume['name']) - LOG.debug("is_clone: %(is_clone)s base_snap_id: %(snap)s, " - "parent_vol_id: %(vol)s", - {'is_clone': vol_info['clone'], - 'snap': vol_info['base_snap_id'], - 'vol': vol_info['parent_vol_id']}) - - if vol_info['base_snap_id'] and ( - vol_info['parent_vol_id'] is not None): - LOG.debug("Nimble base-snap exists for volume %(vol)s", - {'vol': volume['name']}) - volume_name_prefix = volume['name'].replace(volume['id'], "") - LOG.debug("volume_name_prefix : %(prefix)s", - {'prefix': volume_name_prefix}) - snap_id = self.APIExecutor.get_snap_info_by_id( - vol_info['base_snap_id'], - vol_info['parent_vol_id']) - snap_info = self.APIExecutor.get_snap_info_detail(snap_id['id']) - LOG.debug("snap_info description %(snap_info)s", - {'snap_info': snap_info['description']}) - if snap_info['description'] and BACKUP_VOL_PREFIX in ( - snap_info['description']): - # TODO(rkumar): get parent vol id from parent volume name - parent_vol_name = self.APIExecutor.get_volume_name( - vol_info['parent_vol_id']) - parent_vol_id = parent_vol_name. replace( - volume_name_prefix, "") - if BACKUP_VOL_PREFIX + parent_vol_id in snap_info[ - 'description']: - LOG.info('Nimble backup-snapshot exists name=%(' - 'name)s', {'name': snap_info['name']}) - snap_vol_name = self.APIExecutor.get_volume_name( - snap_info['vol_id']) - LOG.debug("snap_vol_name %(snap)s", - {'snap': snap_vol_name}) - return snap_info['name'], snap_vol_name - return "", "" - - def delete_volume(self, volume): - """Delete the specified volume.""" - backup_snap_name, backup_vol_name = self.is_volume_backup_clone(volume) - eventlet.sleep(DEFAULT_SLEEP) - self.APIExecutor.online_vol(volume['name'], False) - LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']}) - self.APIExecutor.delete_vol(volume['name']) - # Nimble backend does not delete the snapshot from the parent volume - # if there is a dependent clone. So the deletes need to be in reverse - # order i.e. - # 1. First delete the clone volume used for backup - # 2. Delete the base snapshot used for clone from the parent volume. - # This is only done for the force backup clone operation as it is - # a temporary operation in which we are certain that the snapshot does - # not need to be preserved after the backup is completed. - - if (backup_snap_name != "" and backup_vol_name != "") and ( - backup_snap_name is not None): - LOG.debug("Delete volume backup vol: %(vol)s snap: %(snap)s", - {'vol': backup_vol_name, - 'snap': backup_snap_name}) - self.APIExecutor.online_snap(backup_vol_name, - False, - backup_snap_name) - - self.APIExecutor.delete_snap(backup_vol_name, - backup_snap_name) - - def _generate_random_string(self, length): - """Generates random_string.""" - char_set = string.ascii_lowercase - return ''.join(random.sample(char_set, length)) - - def _clone_volume_from_snapshot(self, volume, snapshot): - """Clone volume from snapshot. - - Extend the volume if the size of the volume is more than the snapshot. - """ - reserve = not self.configuration.san_thin_provision - pool_name = self.configuration.nimble_pool_name - self.APIExecutor.clone_vol(volume, snapshot, reserve, - self._group_target_enabled, - self._storage_protocol, - pool_name) - if(volume['size'] > snapshot['volume_size']): - vol_size = volume['size'] * units.Ki - reserve_size = 100 if reserve else 0 - data = {"data": {'size': vol_size, - 'reserve': reserve_size, - 'warn_level': int(WARN_LEVEL), - 'limit': 100, - 'snap_limit': DEFAULT_SNAP_QUOTA}} - LOG.debug("Edit Vol request %(data)s", {'data': data}) - self.APIExecutor.edit_vol(volume['name'], data) - return self._get_model_info(volume['name']) - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume.""" - snapshot_name = ('openstack-clone-' + - volume['name'] + '-' + - self._generate_random_string(12)) - snapshot = {'volume_name': src_vref['name'], - 'name': snapshot_name, - 'volume_size': src_vref['size'], - 'display_name': volume.display_name, - 'display_description': ''} - self.APIExecutor.snap_vol(snapshot) - self._clone_volume_from_snapshot(volume, snapshot) - return self._get_model_info(volume['name']) - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - return self._get_model_info(volume['name']) - - def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" - return self._get_model_info(volume['name']) - - def create_snapshot(self, snapshot): - """Create a snapshot.""" - self.APIExecutor.snap_vol(snapshot) - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - self.APIExecutor.online_snap( - snapshot['volume_name'], - False, - snapshot['name']) - self.APIExecutor.delete_snap(snapshot['volume_name'], - snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - self._clone_volume_from_snapshot(volume, snapshot) - return self._get_model_info(volume['name']) - - def _enable_group_scoped_target(self, group_info): - if 'version_current' in group_info: - current_version = group_info['version_current'] - major_minor = current_version.split(".") - if len(major_minor) >= 3: - major = major_minor[0] - minor = major_minor[1] - # TODO(rkumar): Fix the major version - if int(major) >= 4 and int(minor) >= 0: - # Enforce group scoped target - if 'group_target_enabled' in group_info: - if group_info['group_target_enabled'] is False: - try: - self.APIExecutor.enable_group_scoped_target() - except Exception: - raise NimbleAPIException(_("Unable to enable" - " GST")) - self._group_target_enabled = True - LOG.info("Group Scoped Target enabled for " - "group %(group)s: %(ip)s", - {'group': group_info['name'], - 'ip': self.configuration.san_ip}) - elif 'group_target_enabled' not in group_info: - LOG.info("Group Scoped Target NOT " - "present for group %(group)s: " - "%(ip)s", - {'group': group_info['name'], - 'ip': self.configuration.san_ip}) - else: - raise NimbleAPIException(_("Unable to get current software " - "version for %s"), - self.configuration.san_ip) - - def get_volume_stats(self, refresh=False): - """Get volume stats. This is more of getting group stats.""" - if refresh: - group_info = self.APIExecutor.get_group_info() - if 'usage_valid' not in group_info: - raise NimbleDriverException(_('SpaceInfo returned by' - 'array is invalid')) - total_capacity = (group_info['usable_capacity_bytes'] / - float(units.Gi)) - used_space = ((group_info['compressed_vol_usage_bytes'] + - group_info['compressed_snap_usage_bytes'] + - group_info['unused_reserve_bytes']) / - float(units.Gi)) - - free_space = total_capacity - used_space - LOG.debug('total_capacity=%(capacity)f ' - 'used_space=%(used)f free_space=%(free)f', - {'capacity': total_capacity, - 'used': used_space, - 'free': free_space}) - - backend_name = self.configuration.safe_get( - 'volume_backend_name') or self.__class__.__name__ - self.group_stats = {'volume_backend_name': backend_name, - 'vendor_name': 'Nimble', - 'driver_version': DRIVER_VERSION, - 'storage_protocol': self._storage_protocol} - # Just use a single pool for now, FIXME to support multiple - # pools - single_pool = dict( - pool_name=backend_name, - total_capacity_gb=total_capacity, - free_capacity_gb=free_space, - reserved_percentage=0, - QoS_support=False) - self.group_stats['pools'] = [single_pool] - return self.group_stats - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - volume_name = volume['name'] - LOG.info('Entering extend_volume volume=%(vol)s ' - 'new_size=%(size)s', - {'vol': volume_name, 'size': new_size}) - vol_size = int(new_size) * units.Ki - reserve = not self.configuration.san_thin_provision - reserve_size = 100 if reserve else 0 - LOG.debug("new volume size in MB (size)s", {'size': vol_size}) - data = {"data": {'size': vol_size, - 'reserve': reserve_size, - 'warn_level': int(WARN_LEVEL), - 'limit': 100, - 'snap_limit': DEFAULT_SNAP_QUOTA}} - self.APIExecutor.edit_vol(volume_name, data) - - def _get_existing_volume_ref_name(self, existing_ref): - """Returns the volume name of an existing ref""" - vol_name = None - if 'source-name' in existing_ref: - vol_name = existing_ref['source-name'] - else: - reason = _("Reference must contain source-name.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason) - - return vol_name - - def manage_existing(self, volume, external_ref): - """Manage an existing nimble volume (import to cinder)""" - - # Get the volume name from the external reference - target_vol_name = self._get_existing_volume_ref_name(external_ref) - LOG.debug('Entering manage_existing. ' - 'Target_volume_name =%s', target_vol_name) - - # Get vol info from the volume name obtained from the reference - vol_info = self.APIExecutor.get_vol_info(target_vol_name) - - # Check if volume is already managed by OpenStack - if vol_info['agent_type'] == AGENT_TYPE_OPENSTACK or ( - vol_info['agent_type'] == AGENT_TYPE_OPENSTACK_GST): - raise exception.ManageExistingAlreadyManaged( - volume_ref=volume['id']) - - # If agent-type is not None then raise exception - if vol_info['agent_type'] != AGENT_TYPE_NONE: - msg = (_('Volume should have agent-type set as None.')) - raise exception.InvalidVolume(reason=msg) - - new_vol_name = volume['name'] - LOG.info("Volume status before managing it : %(status)s", - {'status': vol_info['online']}) - if vol_info['online'] is True: - msg = (_('Volume %s is online. Set volume to offline for ' - 'managing using OpenStack.') % target_vol_name) - raise exception.InvalidVolume(reason=msg) - - # edit the volume - data = {'data': {'name': new_vol_name}} - if self._group_target_enabled is True: - # check if any ACL's are attached to this volume - if 'access_control_records' in vol_info and ( - vol_info['access_control_records'] is not None): - msg = (_('Volume %s has ACL associated with it. Remove ACL ' - 'for managing using OpenStack') % target_vol_name) - raise exception.InvalidVolume(reason=msg) - data['data']['agent_type'] = AGENT_TYPE_OPENSTACK_GST - else: - data['data']['agent_type'] = AGENT_TYPE_OPENSTACK - - LOG.debug("Data for edit %(data)s", {'data': data}) - self.APIExecutor.edit_vol(target_vol_name, data) - - # make the volume online after rename - self.APIExecutor.online_vol(new_vol_name, True) - - return self._get_model_info(new_vol_name) - - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing volume""" - - LOG.debug('Volume name : %(name)s External ref : %(ref)s', - {'name': volume['name'], 'ref': external_ref}) - - target_vol_name = self._get_existing_volume_ref_name(external_ref) - - # get vol info - vol_info = self.APIExecutor.get_vol_info(target_vol_name) - - LOG.debug('Volume size : %(size)s Volume-name : %(name)s', - {'size': vol_info['size'], 'name': vol_info['name']}) - - return int(vol_info['size'] / units.Ki) - - def unmanage(self, volume): - """Removes the specified volume from Cinder management.""" - - vol_name = volume['name'] - LOG.debug("Entering unmanage_volume volume =%s", vol_name) - - # check agent type - vol_info = self.APIExecutor.get_vol_info(vol_name) - if vol_info['agent_type'] != AGENT_TYPE_OPENSTACK and ( - vol_info['agent_type'] != AGENT_TYPE_OPENSTACK_GST): - msg = (_('Only volumes managed by OpenStack can be unmanaged.')) - raise exception.InvalidVolume(reason=msg) - - data = {'data': {'agent_type': AGENT_TYPE_NONE}} - # update the agent-type to None - self.APIExecutor.edit_vol(vol_name, data) - - # offline the volume - self.APIExecutor.online_vol(vol_name, False) - - def do_setup(self, context): - """Setup the Nimble Cinder volume driver.""" - self._check_config() - # Setup API Executor - try: - self.APIExecutor = NimbleRestAPIExecutor( - username=self.configuration.san_login, - password=self.configuration.san_password, - ip=self.configuration.san_ip, - verify=self.verify) - if self._storage_protocol == "iSCSI": - group_info = self.APIExecutor.get_group_info() - self._enable_group_scoped_target(group_info) - except Exception: - LOG.error('Failed to create REST client. ' - 'Check san_ip, username, password' - ' and make sure the array version is compatible') - raise - self._update_existing_vols_agent_type(context) - - def _update_existing_vols_agent_type(self, context): - backend_name = self.configuration.safe_get('volume_backend_name') - all_vols = volume.VolumeList.get_all( - context, None, None, None, None, {'status': 'available'}) - for vol in all_vols: - if backend_name in vol.host: - try: - vol_info = self.APIExecutor.get_vol_info(vol.name) - # update agent_type only if no ACL's are present - if 'access_control_records' in vol_info and ( - vol_info['access_control_records'] is None): - if self._group_target_enabled: - LOG.debug("Updating %(vol)s to have agent_type :" - "%(agent)s", - {'vol': vol.name, - 'agent': AGENT_TYPE_OPENSTACK_GST}) - # check if this is an upgrade case from - # openstack to openstackv2 - if vol_info['agent_type'] == AGENT_TYPE_NONE: - data = {'data': {'agent_type': - AGENT_TYPE_OPENSTACK_GST}} - self.APIExecutor.edit_vol(vol.name, data) - elif vol_info['agent_type'] == ( - AGENT_TYPE_OPENSTACK): - # 1. update the agent type to None - data = {'data': {'agent_type': - AGENT_TYPE_NONE}} - self.APIExecutor.edit_vol(vol.name, data) - # 2. update the agent type to openstack_gst - data = {'data': {'agent_type': - AGENT_TYPE_OPENSTACK_GST}} - self.APIExecutor.edit_vol(vol.name, data) - else: - LOG.debug("Updating %(vol)s to have agent_type :" - "%(agent)s", - {'vol': vol.name, - 'agent': AGENT_TYPE_OPENSTACK_GST}) - if vol_info['agent_type'] == AGENT_TYPE_NONE: - data = {'data': {'agent_type': - AGENT_TYPE_OPENSTACK}} - self.APIExecutor.edit_vol(vol.name, data) - elif vol_info['agent_type'] == ( - AGENT_TYPE_OPENSTACK_GST): - # 1. update the agent type to None - data = {'data': {'agent_type': - AGENT_TYPE_NONE}} - self.APIExecutor.edit_vol(vol.name, data) - # 2. update the agent type to openstack - data = {'data': {'agent_type': - AGENT_TYPE_OPENSTACK}} - self.APIExecutor.edit_vol(vol.name, data) - except NimbleAPIException: - LOG.warning('Error updating agent-type for ' - 'volume %s.', vol.name) - raise - - def _get_model_info(self, volume_name): - """Get model info for the volume.""" - return ( - {'provider_location': self._get_provider_location(volume_name), - 'provider_auth': None}) - - @abc.abstractmethod - def _get_provider_location(self, volume_name): - """Volume info for iSCSI and FC""" - - pass - - def _create_igroup_for_initiator(self, initiator_name, wwpns): - """Creates igroup for an initiator and returns the igroup name.""" - igrp_name = 'openstack-' + self._generate_random_string(12) - LOG.info('Creating initiator group %(grp)s ' - 'with initiator %(iname)s', - {'grp': igrp_name, 'iname': initiator_name}) - if self._storage_protocol == "iSCSI": - self.APIExecutor.create_initiator_group(igrp_name) - self.APIExecutor.add_initiator_to_igroup(igrp_name, initiator_name) - elif self._storage_protocol == "FC": - self.APIExecutor.create_initiator_group_fc(igrp_name) - for wwpn in wwpns: - self.APIExecutor.add_initiator_to_igroup_fc(igrp_name, wwpn) - return igrp_name - - def _get_igroupname_for_initiator_fc(self, initiator_wwpns): - initiator_groups = self.APIExecutor.get_initiator_grp_list() - for initiator_group in initiator_groups: - if 'fc_initiators' in initiator_group and initiator_group[ - 'fc_initiators'] is not None: - wwpns_list = [] - for initiator in initiator_group['fc_initiators']: - wwpn = str(initiator['wwpn']).replace(":", "") - wwpns_list.append(wwpn) - LOG.debug("initiator_wwpns=%(initiator)s " - "wwpns_list_from_array=%(wwpns)s", - {'initiator': initiator_wwpns, - 'wwpns': wwpns_list}) - if set(initiator_wwpns) == set(wwpns_list): - LOG.info('igroup %(grp)s found for ' - 'initiator %(wwpns_list)s', - {'grp': initiator_group['name'], - 'wwpns_list': wwpns_list}) - return initiator_group['name'] - LOG.info('No igroup found for initiators %s', initiator_wwpns) - return '' - - def _get_igroupname_for_initiator(self, initiator_name): - initiator_groups = self.APIExecutor.get_initiator_grp_list() - for initiator_group in initiator_groups: - if initiator_group['iscsi_initiators'] is not None: - if (len(initiator_group['iscsi_initiators']) == 1 and - initiator_group['iscsi_initiators'][0]['iqn'] == - initiator_name): - LOG.info('igroup %(grp)s found for ' - 'initiator %(iname)s', - {'grp': initiator_group['name'], - 'iname': initiator_name}) - return initiator_group['name'] - LOG.info('No igroup found for initiator %s', initiator_name) - return '' - - def get_lun_number(self, volume, initiator_group_name): - vol_info = self.APIExecutor.get_vol_info(volume['name']) - for acl in vol_info['access_control_records']: - if (initiator_group_name == acl['initiator_group_name']): - LOG.info("access_control_record =%(acl)s", - {'acl': acl}) - lun = acl['lun'] - LOG.info("LUN : %(lun)s", {"lun": lun}) - return lun - raise NimbleAPIException(_("Lun number not found for volume %(vol)s " - "with initiator_group: %(igroup)s") % - {'vol': volume['name'], - 'igroup': initiator_group_name}) - - -@interface.volumedriver -class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): - - """OpenStack driver to enable Nimble ISCSI Controller.""" - - def __init__(self, *args, **kwargs): - super(NimbleISCSIDriver, self).__init__(*args, **kwargs) - self._storage_protocol = "iSCSI" - self._group_target_name = None - - def _set_gst_for_group(self): - group_info = self.APIExecutor.get_group_info() - if 'group_target_enabled' in group_info and ( - group_info['group_target_enabled']) is True and ( - 'group_target_name' in group_info) and ( - group_info['group_target_name'] is not None): - self._group_target_name = group_info['group_target_name'] - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance.""" - LOG.info('Entering initialize_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s', - {'vol': volume, - 'conn': connector, - 'loc': volume['provider_location']}) - initiator_name = connector['initiator'] - initiator_group_name = self._get_igroupname_for_initiator( - initiator_name) - if not initiator_group_name: - initiator_group_name = self._create_igroup_for_initiator( - initiator_name, None) - LOG.info('Initiator group name is %(grp)s for initiator ' - '%(iname)s', - {'grp': initiator_group_name, 'iname': initiator_name}) - self.APIExecutor.add_acl(volume, initiator_group_name) - (iscsi_portal, iqn) = volume['provider_location'].split() - if self._group_target_name is not None: - lun_num = self.get_lun_number(volume, initiator_group_name) - else: - lun_num = LUN_ID - - properties = {} - properties['target_portal'] = iscsi_portal - properties['target_iqn'] = iqn - properties['target_lun'] = int(lun_num) - properties['volume_id'] = volume['id'] # used by xen currently - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - LOG.info('Entering terminate_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s.', - {'vol': volume['name'], - 'conn': connector, - 'loc': volume['provider_location']}) - initiator_name = connector['initiator'] - initiator_group_name = self._get_igroupname_for_initiator( - initiator_name) - if not initiator_group_name: - raise NimbleDriverException(_('No initiator group found for ' - 'initiator %s') % initiator_name) - self.APIExecutor.remove_acl(volume, initiator_group_name) - eventlet.sleep(DEFAULT_SLEEP) - - def _get_provider_location(self, volume_name): - """Get volume iqn for initiator access.""" - vol_info = self.APIExecutor.get_vol_info(volume_name) - netconfig = self.APIExecutor.get_netconfig('active') - - self._set_gst_for_group() - if self._group_target_name: - iqn = self._group_target_name - target_ipaddr = self._get_data_ip(netconfig) - iscsi_portal = target_ipaddr + ':3260' - else: - iqn = vol_info['target_name'] - target_ipaddr = self._get_discovery_ip(netconfig) - iscsi_portal = target_ipaddr + ':3260' - provider_location = '%s %s' % (iscsi_portal, iqn) - LOG.info('vol_name=%(name)s provider_location=%(loc)s', - {'name': volume_name, 'loc': provider_location}) - return provider_location - - def _get_data_ip(self, netconfig): - """Get data ip.""" - subnet_label = self.configuration.nimble_subnet_label - LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s', - {'netlabel': subnet_label, 'netconf': netconfig}) - ret_data_ip = '' - for subnet in netconfig['array_list'][0]['nic_list']: - LOG.info('Exploring array subnet label %s', subnet[ - 'subnet_label']) - if subnet['data_ip']: - if subnet_label == '*': - # Use the first data subnet, save mgmt+data for later - LOG.info('Data ip %(data_ip)s is used ' - 'on data subnet %(net_label)s', - {'data_ip': subnet['data_ip'], - 'net_label': subnet['subnet_label']}) - return subnet['data_ip'] - elif subnet_label == subnet['subnet_label']: - LOG.info('Data ip %(data_ip)s is used' - ' on subnet %(net_label)s', - {'data_ip': subnet['data_ip'], - 'net_label': subnet['subnet_label']}) - return subnet['data_ip'] - if ret_data_ip: - LOG.info('Data ip %s is used on mgmt+data subnet', - ret_data_ip) - return ret_data_ip - else: - raise NimbleDriverException(_('No suitable data ip found')) - - def _get_discovery_ip(self, netconfig): - """Get discovery ip.""" - subnet_label = self.configuration.nimble_subnet_label - LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s', - {'netlabel': subnet_label, 'netconf': netconfig}) - ret_discovery_ip = '' - for subnet in netconfig['subnet_list']: - LOG.info('Exploring array subnet label %s', subnet['label']) - if subnet_label == '*': - # Use the first data subnet, save mgmt+data for later - if subnet['type'] == SM_SUBNET_DATA: - LOG.info('Discovery ip %(disc_ip)s is used ' - 'on data subnet %(net_label)s', - {'disc_ip': subnet['discovery_ip'], - 'net_label': subnet['label']}) - return subnet['discovery_ip'] - elif (subnet['type'] == SM_SUBNET_MGMT_PLUS_DATA): - LOG.info('Discovery ip %(disc_ip)s is found' - ' on mgmt+data subnet %(net_label)s', - {'disc_ip': subnet['discovery_ip'], - 'net_label': subnet['label']}) - ret_discovery_ip = subnet['discovery_ip'] - # If subnet is specified and found, use the subnet - elif subnet_label == subnet['label']: - LOG.info('Discovery ip %(disc_ip)s is used' - ' on subnet %(net_label)s', - {'disc_ip': subnet['discovery_ip'], - 'net_label': subnet['label']}) - return subnet['discovery_ip'] - if ret_discovery_ip: - LOG.info('Discovery ip %s is used on mgmt+data subnet', - ret_discovery_ip) - return ret_discovery_ip - else: - raise NimbleDriverException(_('No suitable discovery ip found')) - - -@interface.volumedriver -class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): - """OpenStack driver to enable Nimble FC Driver Controller.""" - - def __init__(self, *args, **kwargs): - super(NimbleFCDriver, self).__init__(*args, **kwargs) - self._storage_protocol = "FC" - self._lookup_service = fczm_utils.create_lookup_service() - - def _get_provider_location(self, volume_name): - """Get array info wwn details.""" - netconfig = self.APIExecutor.get_netconfig('active') - array_name = netconfig['group_leader_array'] - provider_location = '%s' % (array_name) - LOG.info('vol_name=%(name)s provider_location=%(loc)s', - {'name': volume_name, 'loc': provider_location}) - return provider_location - - def _build_initiator_target_map(self, target_wwns, connector): - """Build the target_wwns and the initiator target map.""" - LOG.debug("_build_initiator_target_map for %(wwns)s", - {'wwns': target_wwns}) - init_targ_map = {} - - if self._lookup_service: - # use FC san lookup to determine which wwpns to use - # for the new VLUN. - dev_map = self._lookup_service.get_device_mapping_from_network( - connector['wwpns'], - target_wwns) - map_fabric = dev_map - LOG.info("dev_map =%(fabric)s", {'fabric': map_fabric}) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - else: - init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns) - - return init_targ_map - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance.""" - LOG.info('Entering initialize_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s', - {'vol': volume, - 'conn': connector, - 'loc': volume['provider_location']}) - wwpns = [] - initiator_name = connector['initiator'] - for wwpn in connector['wwpns']: - wwpns.append(wwpn) - initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns) - - if not initiator_group_name: - initiator_group_name = self._create_igroup_for_initiator( - initiator_name, wwpns) - - LOG.info('Initiator group name is %(grp)s for initiator ' - '%(iname)s', - {'grp': initiator_group_name, 'iname': initiator_name}) - self.APIExecutor.add_acl(volume, initiator_group_name) - lun = self.get_lun_number(volume, initiator_group_name) - init_targ_map = {} - (array_name) = volume['provider_location'].split() - - target_wwns = self.get_wwpns_from_array(array_name) - - init_targ_map = self._build_initiator_target_map(target_wwns, - connector) - - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_lun': lun, - 'target_discovered': True, - 'target_wwn': target_wwns, - 'initiator_target_map': init_targ_map}} - - LOG.info("Return FC data for zone addition: %(data)s.", - {'data': data}) - - return data - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - LOG.info('Entering terminate_connection volume=%(vol)s' - ' connector=%(conn)s location=%(loc)s.', - {'vol': volume, - 'conn': connector, - 'loc': volume['provider_location']}) - - wwpns = [] - initiator_name = connector['initiator'] - for wwpn in connector['wwpns']: - wwpns.append(wwpn) - init_targ_map = {} - (array_name) = volume['provider_location'].split() - target_wwns = self.get_wwpns_from_array(array_name) - init_targ_map = self._build_initiator_target_map(target_wwns, - connector) - initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns) - if not initiator_group_name: - raise NimbleDriverException( - _('No initiator group found for initiator %s') % - initiator_name) - LOG.debug("initiator_target_map %s", init_targ_map) - self.APIExecutor.remove_acl(volume, initiator_group_name) - eventlet.sleep(DEFAULT_SLEEP) - # FIXME to check for other volumes attached to the host and then - # return the data. Bug https://bugs.launchpad.net/cinder/+bug/1617472 - - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': target_wwns}} - - return data - - def get_wwpns_from_array(self, array_name): - """Retrieve the wwpns from the array""" - LOG.debug("get_wwpns_from_array %s", array_name) - target_wwpns = [] - interface_info = self.APIExecutor.get_fc_interface_list(array_name) - LOG.info("interface_info %(interface_info)s", - {"interface_info": interface_info}) - for wwpn_list in interface_info: - wwpn = wwpn_list['wwpn'] - wwpn = wwpn.replace(":", "") - target_wwpns.append(wwpn) - - return target_wwpns - - def _convert_string_to_colon_separated_wwnn(self, wwnn): - return ':'.join(a + b for a, b in zip(wwnn[::2], wwnn[1::2])) - - -def _connection_checker(func): - """Decorator to re-establish and re-run the api if session has expired.""" - @functools.wraps(func) - def inner_connection_checker(self, *args, **kwargs): - for attempts in range(2): - try: - return func(self, *args, **kwargs) - except Exception as e: - if attempts < 1 and (re.search("Failed to execute", - six.text_type(e))): - LOG.info('Session might have expired.' - ' Trying to relogin') - self.login() - continue - else: - LOG.error('Re-throwing Exception %s', e) - raise - return inner_connection_checker - - -class NimbleRestAPIExecutor(object): - - """Makes Nimble REST API calls.""" - - def __init__(self, api_version=NimbleDefaultVersion, *args, **kwargs): - self.token_id = None - self.ip = kwargs['ip'] - self.username = kwargs['username'] - self.password = kwargs['password'] - self.verify = kwargs['verify'] - self.api_version = api_version - self.uri = "https://%(ip)s:5392/v%(version)s/" % { - 'ip': self.ip, - 'version': self.api_version} - self.login() - - def login(self): - data = {'data': {"username": self.username, - "password": self.password, - "app_name": "NimbleCinderDriver"}} - r = requests.post(self.uri + "tokens", - data=json.dumps(data), - verify=self.verify) - - if r.status_code != 201 and r.status_code != 200: - msg = _("Failed to login for user %s"), self.username - raise NimbleAPIException(msg) - self.token_id = r.json()['data']['session_token'] - self.headers = {'X-Auth-Token': self.token_id} - - def get_group_id(self): - api = 'groups' - r = self.get(api) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve Group Object for : " - "%s") % self.ip) - return r.json()['data'][0]['id'] - - def get_group_info(self): - group_id = self.get_group_id() - api = 'groups/' + six.text_type(group_id) - r = self.get(api) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve Group info for: %s") - % group_id) - return r.json()['data'] - - def get_folder_id(self, folder_name): - api = 'folders' - filter = {"name": folder_name} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve information for " - "Folder: %s") % folder_name) - return r.json()['data'][0]['id'] - - def get_folder_info(self, folder_name): - folder_id = self.get_folder_id(folder_name) - api = "folders/" + six.text_type(folder_id) - r = self.get(api) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve Folder info for: " - "%s") % folder_id) - return r.json()['data'] - - def get_performance_policy_id(self, perf_policy_name): - api = 'performance_policies/' - filter = {'name': perf_policy_name} - LOG.debug("Performance policy Name %s", perf_policy_name) - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("No performance policy found for:" - "%(perf)s") % {'perf': perf_policy_name}) - LOG.debug("Performance policy ID :%(perf)s", - {'perf': r.json()['data'][0]['id']}) - return r.json()['data'][0]['id'] - - def get_netconfig(self, role): - api = "network_configs/detail" - filter = {'role': role} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("No %s network config exists") % role) - return r.json()['data'][0] - - def _get_volumetype_extraspecs(self, volume): - specs = {} - - type_id = volume['volume_type_id'] - if type_id is not None: - specs = volume_types.get_volume_type_extra_specs(type_id) - return specs - - def _get_extra_spec_values(self, extra_specs): - """Nimble specific extra specs.""" - perf_policy_name = extra_specs.get(EXTRA_SPEC_PERF_POLICY, - DEFAULT_PERF_POLICY_SETTING) - encryption = extra_specs.get(EXTRA_SPEC_ENCRYPTION, - DEFAULT_ENCRYPTION_SETTING) - multi_initiator = extra_specs.get(EXTRA_SPEC_MULTI_INITIATOR, - DEFAULT_MULTI_INITIATOR_SETTING) - iops_limit = extra_specs.get(EXTRA_SPEC_IOPS_LIMIT, - DEFAULT_IOPS_LIMIT_SETTING) - folder_name = extra_specs.get(EXTRA_SPEC_FOLDER, - DEFAULT_FOLDER_SETTING) - dedupe = extra_specs.get(EXTRA_SPEC_DEDUPE, - DEFAULT_DEDUPE_SETTING) - extra_specs_map = {} - extra_specs_map[EXTRA_SPEC_PERF_POLICY] = perf_policy_name - extra_specs_map[EXTRA_SPEC_ENCRYPTION] = encryption - extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR] = multi_initiator - extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] = iops_limit - extra_specs_map[EXTRA_SPEC_DEDUPE] = dedupe - extra_specs_map[EXTRA_SPEC_FOLDER] = folder_name - - return extra_specs_map - - def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled): - response = self._execute_create_vol(volume, pool_name, reserve, - protocol, is_gst_enabled) - LOG.info('Successfully created volume %(name)s', - {'name': response['name']}) - return response['name'] - - def _execute_create_vol(self, volume, pool_name, reserve, protocol, - is_gst_enabled): - """Create volume - - :return: r['data'] - """ - - # Set volume size, display name and description - volume_size = volume['size'] * units.Ki - reserve_size = 100 if reserve else 0 - # Set volume description - display_list = [getattr(volume, 'display_name', ''), - getattr(volume, 'display_description', '')] - description = ':'.join(filter(None, display_list)) - # Limit description size to 254 characters - description = description[:254] - pool_id = self.get_pool_id(pool_name) - - specs = self._get_volumetype_extraspecs(volume) - extra_specs_map = self._get_extra_spec_values(specs) - perf_policy_name = extra_specs_map[EXTRA_SPEC_PERF_POLICY] - perf_policy_id = self.get_performance_policy_id(perf_policy_name) - encrypt = extra_specs_map[EXTRA_SPEC_ENCRYPTION] - multi_initiator = extra_specs_map[EXTRA_SPEC_MULTI_INITIATOR] - folder_name = extra_specs_map[EXTRA_SPEC_FOLDER] - iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] - dedupe = extra_specs_map[EXTRA_SPEC_DEDUPE] - - cipher = DEFAULT_CIPHER - if encrypt.lower() == 'yes': - cipher = AES_256_XTS_CIPHER - if is_gst_enabled is True: - agent_type = AGENT_TYPE_OPENSTACK_GST - else: - agent_type = AGENT_TYPE_OPENSTACK - - LOG.debug('Creating a new volume=%(vol)s size=%(size)s' - ' reserve=%(reserve)s in pool=%(pool)s' - ' description=%(description)s with Extra Specs' - ' perfpol-name=%(perfpol-name)s' - ' encryption=%(encryption)s cipher=%(cipher)s' - ' agent-type=%(agent-type)s' - ' multi-initiator=%(multi-initiator)s', - {'vol': volume['name'], - 'size': volume_size, - 'reserve': reserve_size, - 'pool': pool_name, - 'description': description, - 'perfpol-name': perf_policy_name, - 'encryption': encrypt, - 'cipher': cipher, - 'agent-type': agent_type, - 'multi-initiator': multi_initiator}) - data = {"data": - {'name': volume['name'], - 'description': description, - 'size': volume_size, - 'reserve': reserve_size, - 'warn_level': int(WARN_LEVEL), - 'limit': 100, - 'snap_limit': DEFAULT_SNAP_QUOTA, - 'online': True, - 'pool_id': pool_id, - 'agent_type': agent_type, - 'perfpolicy_id': perf_policy_id, - 'encryption_cipher': cipher}} - - if protocol == "iSCSI": - data['data']['multi_initiator'] = multi_initiator - - if dedupe.lower() == 'true': - data['data']['dedupe_enabled'] = True - - folder_id = None - if folder_name is not None: - # validate if folder exists in pool_name - pool_info = self.get_pool_info(pool_id) - if 'folder_list' in pool_info and (pool_info['folder_list'] is - not None): - for folder_list in pool_info['folder_list']: - LOG.debug("folder_list : %s", folder_list) - if folder_list['fqn'] == "/" + folder_name: - LOG.debug("Folder %(folder)s present in pool " - "%(pool)s", - {'folder': folder_name, - 'pool': pool_name}) - folder_id = self.get_folder_id(folder_name) - if folder_id is not None: - data['data']["folder_id"] = folder_id - if folder_id is None: - raise NimbleAPIException(_("Folder '%(folder)s' not " - "present in pool '%(pool)s'") % - {'folder': folder_name, - 'pool': pool_name}) - else: - raise NimbleAPIException(_("Folder '%(folder)s' not present in" - " pool '%(pool)s'") % - {'folder': folder_name, - 'pool': pool_name}) - - if iops_limit is not None: - if not iops_limit.isdigit() or ( - int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): - raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % - {'err': IOPS_ERR_MSG, - 'min': MIN_IOPS, - 'max': MAX_IOPS}) - - data['data']['limit_iops'] = iops_limit - - LOG.debug("Volume metadata :%s", volume.metadata) - for key, value in volume.metadata.items(): - LOG.debug("Key %(key)s Value %(value)s", - {'key': key, 'value': value}) - if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit(): - if type(value) == int or int(value) < MIN_IOPS or ( - int(value) > MAX_IOPS): - raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % - {'err': IOPS_ERR_MSG, - 'min': MIN_IOPS, - 'max': MAX_IOPS}) - LOG.debug("IOPS Limit %s", value) - data['data']['limit_iops'] = value - LOG.debug("Data : %s", data) - - api = 'volumes' - r = self.post(api, data) - return r['data'] - - def create_initiator_group(self, initiator_grp_name): - api = "initiator_groups" - data = {"data": {"name": initiator_grp_name, - "access_protocol": "iscsi", - }} - r = self.post(api, data) - return r['data'] - - def create_initiator_group_fc(self, initiator_grp_name): - api = "initiator_groups" - - data = {} - data["data"] = {} - data["data"]["name"] = initiator_grp_name - data["data"]["access_protocol"] = "fc" - r = self.post(api, data) - return r['data'] - - def get_initiator_grp_id(self, initiator_grp_name): - api = "initiator_groups" - filter = {'name': initiator_grp_name} - r = self.get_query(api, filter) - return r.json()['data'][0]['id'] - - def add_initiator_to_igroup(self, initiator_grp_name, initiator_name): - initiator_group_id = self.get_initiator_grp_id(initiator_grp_name) - api = "initiators" - data = {"data": { - "access_protocol": "iscsi", - "initiator_group_id": initiator_group_id, - "label": initiator_name, - "iqn": initiator_name - }} - r = self.post(api, data) - return r['data'] - - def add_initiator_to_igroup_fc(self, initiator_grp_name, wwpn): - initiator_group_id = self.get_initiator_grp_id(initiator_grp_name) - api = "initiators" - data = {"data": { - "access_protocol": "fc", - "initiator_group_id": initiator_group_id, - "wwpn": self._format_to_wwpn(wwpn) - }} - r = self.post(api, data) - return r['data'] - - def get_pool_id(self, pool_name): - api = "pools/" - filter = {'name': pool_name} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve information for " - "pool : %(pool)s") % - {'pool': pool_name}) - return r.json()['data'][0]['id'] - - def get_pool_info(self, pool_id): - api = 'pools/' + six.text_type(pool_id) - r = self.get(api) - return r.json()['data'] - - def get_initiator_grp_list(self): - api = "initiator_groups/detail" - r = self.get(api) - if 'data' not in r.json(): - raise NimbleAPIException(_("Unable to retrieve initiator group " - "list")) - LOG.info('Successfully retrieved InitiatorGrpList') - return r.json()['data'] - - def get_initiator_grp_id_by_name(self, initiator_group_name): - api = 'initiator_groups' - filter = {"name": initiator_group_name} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve information for" - "initiator group : %s") % - initiator_group_name) - return r.json()['data'][0]['id'] - - def get_volume_id_by_name(self, name): - api = "volumes" - filter = {"name": name} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve information for " - "volume: %s") % name) - return r.json()['data'][0]['id'] - - def get_volume_name(self, volume_id): - api = "volumes/" + six.text_type(volume_id) - r = self.get(api) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve information for " - "volume: %s") % volume_id) - return r.json()['data']['name'] - - def add_acl(self, volume, initiator_group_name): - initiator_group_id = self.get_initiator_grp_id_by_name( - initiator_group_name) - volume_id = self.get_volume_id_by_name(volume['name']) - data = {'data': {"apply_to": 'both', - "initiator_group_id": initiator_group_id, - "vol_id": volume_id - }} - api = 'access_control_records' - try: - self.post(api, data) - except NimbleAPIException as ex: - LOG.debug("add_acl_exception: %s", ex) - if SM_OBJ_EXIST_MSG in six.text_type(ex): - LOG.warning('Volume %(vol)s : %(state)s', - {'vol': volume['name'], - 'state': SM_OBJ_EXIST_MSG}) - else: - msg = (_("Add access control failed with error: %s") % - six.text_type(ex)) - raise NimbleAPIException(msg) - - def get_acl_record(self, volume_id, initiator_group_id): - filter = {"vol_id": volume_id, - "initiator_group_id": initiator_group_id} - api = "access_control_records" - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve ACL for volume: " - "%(vol)s %(igroup)s ") % - {'vol': volume_id, - 'igroup': initiator_group_id}) - return r.json()['data'][0] - - def remove_acl(self, volume, initiator_group_name): - LOG.info("removing ACL from volume=%(vol)s" - "and %(igroup)s", - {"vol": volume['name'], - "igroup": initiator_group_name}) - initiator_group_id = self.get_initiator_grp_id_by_name( - initiator_group_name) - volume_id = self.get_volume_id_by_name(volume['name']) - - try: - acl_record = self.get_acl_record(volume_id, initiator_group_id) - LOG.debug("ACL Record %(acl)s", {"acl": acl_record}) - acl_id = acl_record['id'] - api = 'access_control_records/' + six.text_type(acl_id) - self.delete(api) - except NimbleAPIException as ex: - LOG.debug("remove_acl_exception: %s", ex) - if SM_OBJ_ENOENT_MSG in six.text_type(ex): - LOG.warning('Volume %(vol)s : %(state)s', - {'vol': volume['name'], - 'state': SM_OBJ_ENOENT_MSG}) - else: - msg = (_("Remove access control failed with error: %s") % - six.text_type(ex)) - raise NimbleAPIException(msg) - - def get_snap_info_by_id(self, snap_id, vol_id): - filter = {"id": snap_id, "vol_id": vol_id} - api = 'snapshots' - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Unable to retrieve snapshot info for " - "snap_id: %(snap)s volume id: %(vol)s") - % {'snap': snap_id, - 'vol': vol_id}) - LOG.debug("SnapInfo :%s", r.json()['data'][0]) - return r.json()['data'][0] - - def get_snap_info(self, snap_name, vol_name): - filter = {"name": snap_name, "vol_name": vol_name} - api = 'snapshots' - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Snapshot: %(snap)s of Volume: %(vol)s " - "doesn't exist") % - {'snap': snap_name, - 'vol': vol_name}) - return r.json()['data'][0] - - def get_snap_info_detail(self, snap_id): - api = 'snapshots/detail' - filter = {'id': snap_id} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("Snapshot: %s doesnt exist") % snap_id) - return r.json()['data'][0] - - @utils.retry(NimbleAPIException, 2, 3) - def online_vol(self, volume_name, online_flag): - volume_id = self.get_volume_id_by_name(volume_name) - LOG.debug("volume_id %s", six.text_type(volume_id)) - eventlet.sleep(DEFAULT_SLEEP) - api = "volumes/" + six.text_type(volume_id) - data = {'data': {"online": online_flag, 'force': True}} - try: - LOG.debug("data :%s", data) - self.put(api, data) - LOG.debug("Volume %(vol)s is in requested online state :%(flag)s", - {'vol': volume_name, - 'flag': online_flag}) - except Exception as ex: - msg = (_("Error %s") % ex) - LOG.debug("online_vol_exception: %s", msg) - if msg.__contains__("Object is %s" % SM_STATE_MSG): - LOG.warning('Volume %(vol)s : %(state)s', - {'vol': volume_name, - 'state': SM_STATE_MSG}) - # TODO(rkumar): Check if we need to ignore the connected - # initiator - elif msg.__contains__("Initiators are connected to"): - raise NimbleAPIException(msg) - else: - raise exception.InvalidVolume(reason=msg) - - def online_snap(self, volume_name, online_flag, snap_name): - snap_info = self.get_snap_info(snap_name, volume_name) - api = "snapshots/" + six.text_type(snap_info['id']) - data = {'data': {"online": online_flag}} - try: - self.put(api, data) - LOG.debug("Snapshot %(snap)s is in requested online state " - ":%(flag)s", - {'snap': snap_name, 'flag': online_flag}) - except Exception as ex: - LOG.debug("online_snap_exception: %s", ex) - if six.text_type(ex).__contains__("Object %s" % SM_STATE_MSG): - LOG.warning('Snapshot %(snap)s :%(state)s', - {'snap': snap_name, - 'state': SM_STATE_MSG}) - else: - raise - - @utils.retry(NimbleAPIException, 2, 3) - def get_vol_info(self, volume_name): - volume_id = self.get_volume_id_by_name(volume_name) - api = 'volumes/' + six.text_type(volume_id) - r = self.get(api) - if not r.json()['data']: - raise exception.VolumeNotFound(_("Volume: %s not found") % - volume_name) - return r.json()['data'] - - def delete_vol(self, volume_name): - volume_id = self.get_volume_id_by_name(volume_name) - api = "volumes/" + six.text_type(volume_id) - self.delete(api) - - def snap_vol(self, snapshot): - api = "snapshots" - volume_name = snapshot['volume_name'] - vol_id = self.get_volume_id_by_name(volume_name) - snap_name = snapshot['name'] - # Set snapshot description - display_list = [ - getattr(snapshot, 'display_name', snapshot['display_name']), - getattr(snapshot, 'display_description', '')] - snap_description = ':'.join(filter(None, display_list)) - # Limit to 254 characters - snap_description = snap_description[:254] - data = {"data": {"name": snap_name, - "description": snap_description, - "vol_id": vol_id - } - } - r = self.post(api, data) - return r['data'] - - def clone_vol(self, volume, snapshot, reserve, is_gst_enabled, - protocol, pool_name): - api = "volumes" - volume_name = snapshot['volume_name'] - snap_name = snapshot['name'] - snap_info = self.get_snap_info(snap_name, volume_name) - clone_name = volume['name'] - snap_size = snapshot['volume_size'] - reserve_size = 100 if reserve else 0 - - specs = self._get_volumetype_extraspecs(volume) - extra_specs_map = self._get_extra_spec_values(specs) - perf_policy_name = extra_specs_map.get(EXTRA_SPEC_PERF_POLICY) - perf_policy_id = self.get_performance_policy_id(perf_policy_name) - encrypt = extra_specs_map.get(EXTRA_SPEC_ENCRYPTION) - multi_initiator = extra_specs_map.get(EXTRA_SPEC_MULTI_INITIATOR) - iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] - folder_name = extra_specs_map[EXTRA_SPEC_FOLDER] - pool_id = self.get_pool_id(pool_name) - # default value of cipher for encryption - cipher = DEFAULT_CIPHER - if encrypt.lower() == 'yes': - cipher = AES_256_XTS_CIPHER - if is_gst_enabled is True: - agent_type = AGENT_TYPE_OPENSTACK_GST - else: - agent_type = AGENT_TYPE_OPENSTACK - - LOG.info('Cloning volume from snapshot volume=%(vol)s ' - 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s ' - 'reserve=%(reserve)s' 'agent-type=%(agent-type)s ' - 'perfpol-name=%(perfpol-name)s ' - 'encryption=%(encryption)s cipher=%(cipher)s ' - 'multi-initiator=%(multi-initiator)s', - {'vol': volume_name, - 'snap': snap_name, - 'clone': clone_name, - 'size': snap_size, - 'reserve': reserve_size, - 'agent-type': agent_type, - 'perfpol-name': perf_policy_name, - 'encryption': encrypt, - 'cipher': cipher, - 'multi-initiator': multi_initiator}) - - data = {"data": {"name": clone_name, - "clone": 'true', - "base_snap_id": snap_info['id'], - 'snap_limit': DEFAULT_SNAP_QUOTA, - 'warn_level': int(WARN_LEVEL), - 'limit': 100, - "online": 'true', - "reserve": reserve_size, - "agent_type": agent_type, - "perfpolicy_id": perf_policy_id, - "encryption_cipher": cipher - } - } - if protocol == "iSCSI": - data['data']['multi_initiator'] = multi_initiator - - folder_id = None - if folder_name is not None: - # validate if folder exists in pool_name - pool_info = self.get_pool_info(pool_id) - if 'folder_list' in pool_info and (pool_info['folder_list'] is - not None): - for folder_list in pool_info['folder_list']: - LOG.debug("folder_list : %s", folder_list) - if folder_list['fqn'] == "/" + folder_name: - LOG.debug("Folder %(folder)s present in pool " - "%(pool)s", - {'folder': folder_name, - 'pool': pool_name}) - folder_id = self.get_folder_id(folder_name) - if folder_id is not None: - data['data']["folder_id"] = folder_id - if folder_id is None: - raise NimbleAPIException(_("Folder '%(folder)s' not " - "present in pool '%(pool)s'") % - {'folder': folder_name, - 'pool': pool_name}) - else: - raise NimbleAPIException(_("Folder '%(folder)s' not present in" - " pool '%(pool)s'") % - {'folder': folder_name, - 'pool': pool_name}) - - if iops_limit is not None: - if not iops_limit.isdigit() or ( - int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): - raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % - {'err': IOPS_ERR_MSG, - 'min': MIN_IOPS, - 'max': MAX_IOPS}) - - data['data']['limit_iops'] = iops_limit - if iops_limit is not None: - if not iops_limit.isdigit() or ( - int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): - raise NimbleAPIException(_("Please set valid IOPS limit" - " in the range [%(min)s, %(max)s]") % - {'min': MIN_IOPS, - 'max': MAX_IOPS}) - data['data']['limit_iops'] = iops_limit - - LOG.debug("Volume metadata :%s", volume.metadata) - for key, value in volume.metadata.items(): - LOG.debug("Key %(key)s Value %(value)s", - {'key': key, 'value': value}) - if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit(): - if type(value) == int or int(value) < MIN_IOPS or ( - int(value) > MAX_IOPS): - raise NimbleAPIException(_("Please enter valid IOPS " - "limit in the range [" - "%(min)s, %(max)s]") % - {'min': MIN_IOPS, - 'max': MAX_IOPS}) - LOG.debug("IOPS Limit %s", value) - data['data']['limit_iops'] = value - - r = self.post(api, data) - return r['data'] - - def edit_vol(self, volume_name, data): - vol_id = self.get_volume_id_by_name(volume_name) - api = "volumes/" + six.text_type(vol_id) - self.put(api, data) - - def delete_snap(self, volume_name, snap_name): - snap_info = self.get_snap_info(snap_name, volume_name) - api = "snapshots/" + six.text_type(snap_info['id']) - self.delete(api) - - @_connection_checker - def get(self, api): - return self.get_query(api, None) - - @_connection_checker - def get_query(self, api, query): - url = self.uri + api - return requests.get(url, headers=self.headers, - params=query, verify=False) - - @_connection_checker - def put(self, api, payload): - url = self.uri + api - r = requests.put(url, data=json.dumps(payload), - headers=self.headers, verify=self.verify) - if r.status_code != 201 and r.status_code != 200: - base = "Failed to execute api %(api)s : Error Code :%(code)s" % { - 'api': api, - 'code': r.status_code} - LOG.debug("Base error : %(base)s", {'base': base}) - try: - msg = _("%(base)s Message: %(msg)s") % { - 'base': base, - 'msg': r.json()['messages'][1]['text']} - except IndexError: - msg = _("%(base)s Message: %(msg)s") % { - 'base': base, - 'msg': six.text_type(r.json())} - raise NimbleAPIException(msg) - return r.json() - - @_connection_checker - def post(self, api, payload): - url = self.uri + api - r = requests.post(url, data=json.dumps(payload), - headers=self.headers, verify=self.verify) - if r.status_code != 201 and r.status_code != 200: - msg = _("Failed to execute api %(api)s : %(msg)s : %(code)s") % { - 'api': api, - 'msg': r.json()['messages'][1]['text'], - 'code': r.status_code} - raise NimbleAPIException(msg) - return r.json() - - @_connection_checker - def delete(self, api): - url = self.uri + api - r = requests.delete(url, headers=self.headers, verify=self.verify) - if r.status_code != 201 and r.status_code != 200: - msg = _("Failed to execute api %(api) : %(msg)s %(code)s") % { - 'api': api, - 'msg': r.json()['messages'][1]['text'], - 'code': r.status_code} - raise NimbleAPIException(msg) - return r.json() - - def _format_to_wwpn(self, string_wwpn): - return ':'.join(a + b for a, b in zip(* [iter(string_wwpn)] * 2)) - - def get_fc_interface_list(self, array_name): - """getFibreChannelInterfaceList API to get FC interfaces on array.""" - api = 'fibre_channel_interfaces/detail' - filter = {'array_name_or_serial': array_name} - r = self.get_query(api, filter) - if not r.json()['data']: - raise NimbleAPIException(_("No fc interfaces for array %s") % - array_name) - return r.json()['data'] - - def enable_group_scoped_target(self): - group_id = self.get_group_id() - api = "groups/" + six.text_type(group_id) - data = {'data': {'group_target_enabled': True}} - self.put(api, data) diff --git a/cinder/volume/drivers/prophetstor/__init__.py b/cinder/volume/drivers/prophetstor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/prophetstor/dpl_fc.py b/cinder/volume/drivers/prophetstor/dpl_fc.py deleted file mode 100644 index ca9450be3..000000000 --- a/cinder/volume/drivers/prophetstor/dpl_fc.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) 2014 ProphetStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.prophetstor import dplcommon -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class DPLFCDriver(dplcommon.DPLCOMMONDriver, - driver.FibreChannelDriver): - def __init__(self, *args, **kwargs): - super(DPLFCDriver, self).__init__(*args, **kwargs) - - def _get_fc_channel(self): - """Get FibreChannel info. - - :returns: fcInfos[uuid] - fcInfo[uuid]['display_name'] - fcInfo[uuid]['display_description'] - fcInfo[uuid]['hardware_address'] - fcInfo[uuid]['type'] - fcInfo[uuid]['speed'] - fcInfo[uuid]['state'] - """ - output = None - fcInfos = {} - try: - retCode, output = self.dpl.get_server_info() - if retCode == 0 and output: - fcUuids = output.get('metadata', - {}).get('storage_adapter', {}).keys() - for fcUuid in fcUuids: - fcInfo = output.get('metadata', - {}).get('storage_adapter', - {}).get(fcUuid) - if fcInfo['type'] == 'fc': - fcInfos[fcUuid] = fcInfo - except Exception as e: - LOG.error("Failed to get fiber channel info from storage " - "due to %(stat)s", {'stat': e}) - return fcInfos - - def _get_targets(self): - """Get targets. - - :returns: targetInfos[uuid] = targetInfo - targetInfo['targetUuid'] - targetInfo['targetName'] - targetInfo['targetAddr'] - """ - output = None - targetInfos = {} - try: - retCode, output = self.dpl.get_target_list('target') - if retCode == 0 and output: - for targetInfo in output.get('children', []): - targetI = {} - targetI['targetUuid'] = targetInfo[0] - targetI['targetName'] = targetInfo[1] - targetI['targetAddr'] = targetInfo[2] - targetInfos[str(targetInfo[0])] = targetI - except Exception as e: - targetInfos = {} - LOG.error("Failed to get fiber channel target from " - "storage server due to %(stat)s", - {'stat': e}) - return targetInfos - - def _get_targetwpns(self, volumeid, initiatorWwpns): - lstargetWwpns = [] - try: - ret, output = self.dpl.get_vdev(volumeid) - if ret == 0 and output: - exports = output.get('exports', {}) - fc_infos = exports.get('Network/FC', {}) - for fc_info in fc_infos: - for p in fc_info.get('permissions', []): - if p.get(initiatorWwpns, None): - targetWwpns = fc_info.get('target_identifier', '') - lstargetWwpns.append(targetWwpns) - except Exception as e: - LOG.error("Failed to get target wwpns from storage due " - "to %(stat)s", {'stat': e}) - lstargetWwpns = [] - return lstargetWwpns - - def _is_initiator_wwpn_active(self, targetWwpn, initiatorWwpn): - fActive = False - output = None - try: - retCode, output = self.dpl.get_sns_table(targetWwpn) - if retCode == 0 and output: - for fdwwpn, fcport in output.get('metadata', - {}).get('sns_table', - []): - if fdwwpn == initiatorWwpn: - fActive = True - break - except Exception: - LOG.error('Failed to get sns table') - return fActive - - def _convertHex2String(self, wwpns): - szwwpns = '' - if len(str(wwpns)) == 16: - szwwpns = '%2s:%2s:%2s:%2s:%2s:%2s:%2s:%2s' % ( - str(wwpns)[0:2], - str(wwpns)[2:4], - str(wwpns)[4:6], - str(wwpns)[6:8], - str(wwpns)[8:10], - str(wwpns)[10:12], - str(wwpns)[12:14], - str(wwpns)[14:16]) - return szwwpns - - def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename): - ret = 0 - output = '' - LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s', - {'volume': volumeid, 'wwpns': targetwwpns, - 'iqn': initiatorwwpns, 'volumename': volumename}) - try: - ret, output = self.dpl.assign_vdev_fc( - self._conver_uuid2hex(volumeid), targetwwpns, - initiatorwwpns, volumename) - except Exception: - LOG.error('Volume %(volumeid)s failed to send assign command, ' - 'ret: %(status)s output: %(output)s', - {'volumeid': volumeid, 'status': ret, 'output': output}) - ret = errno.EFAULT - - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if len(event_uuid): - ret = 0 - status = self._wait_event( - self.dpl.get_vdev_status, - self._conver_uuid2hex(volumeid), event_uuid) - if status['state'] == 'error': - ret = errno.EFAULT - msg = _('Flexvisor failed to assign volume %(id)s: ' - '%(status)s.') % {'id': volumeid, - 'status': status} - raise exception.VolumeBackendAPIException(data=msg) - else: - ret = errno.EFAULT - msg = _('Flexvisor failed to assign volume %(id)s due to ' - 'unable to query status by event ' - 'id.') % {'id': volumeid} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor assign volume failed:%(id)s:' - '%(status)s.') % {'id': volumeid, 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - - return ret - - def _delete_export_fc(self, volumeid, targetwwpns, initiatorwwpns): - ret = 0 - output = '' - ret, output = self.dpl.unassign_vdev_fc( - self._conver_uuid2hex(volumeid), - targetwwpns, initiatorwwpns) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0 and len(event_uuid): - status = self._wait_event( - self.dpl.get_vdev_status, volumeid, event_uuid) - if status['state'] == 'error': - ret = errno.EFAULT - msg = _('Flexvisor failed to unassign volume %(id)s:' - ' %(status)s.') % {'id': volumeid, - 'status': status} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to unassign volume (get event) ' - '%(id)s.') % {'id': volumeid} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor unassign volume failed:%(id)s:' - '%(status)s.') % {'id': volumeid, 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info('Flexvisor succeeded to unassign volume %(id)s.', - {'id': volumeid}) - - return ret - - def _build_initiator_target_map(self, connector, tgtwwns): - """Build the target_wwns and the initiator target map.""" - init_targ_map = {} - initiator_wwns = connector['wwpns'] - for initiator in initiator_wwns: - init_targ_map[initiator] = tgtwwns - - return init_targ_map - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - """ - connector = {'ip': CONF.my_ip, - 'host': CONF.host, - 'initiator': self._initiator, - 'wwnns': self._fc_wwnns, - 'wwpns': self._fc_wwpns} - - """ - dc_fc = {} - dc_target = {} - lsTargetWwpn = [] - output = None - properties = {} - preferTargets = {} - ret = 0 - targetIdentifier = [] - szwwpns = [] - LOG.info('initialize_connection volume: %(volume)s, connector:' - ' %(connector)s', - {"volume": volume, "connector": connector}) - # Get Storage Fiber channel controller - dc_fc = self._get_fc_channel() - - # Get existed FC target list to decide target wwpn - dc_target = self._get_targets() - if len(dc_target) == 0: - msg = _('Backend storage did not configure fiber channel ' - 'target.') - raise exception.VolumeBackendAPIException(data=msg) - - for keyFc in dc_fc.keys(): - for targetuuid in dc_target.keys(): - if dc_fc[keyFc]['hardware_address'] == \ - dc_target[targetuuid]['targetAddr']: - preferTargets[targetuuid] = dc_target[targetuuid] - break - # Confirm client wwpn is existed in sns table - # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format - for dwwpn in connector['wwpns']: - szwwpn = self._convertHex2String(dwwpn) - if len(szwwpn) == 0: - msg = _('Invalid wwpns format %(wwpns)s') % \ - {'wwpns': connector['wwpns']} - raise exception.VolumeBackendAPIException(data=msg) - szwwpns.append(szwwpn) - - if len(szwwpns): - for targetUuid in preferTargets.keys(): - targetWwpn = '' - targetWwpn = preferTargets.get(targetUuid, - {}).get('targetAddr', '') - lsTargetWwpn.append(targetWwpn) - # Use wwpns to assign volume. - LOG.info('Prefer use target wwpn %(wwpn)s', - {'wwpn': lsTargetWwpn}) - # Start to create export in all FC target node. - assignedTarget = [] - for pTarget in lsTargetWwpn: - try: - ret = self._export_fc(volume['id'], str(pTarget), szwwpns, - volume['name']) - if ret: - break - else: - assignedTarget.append(pTarget) - except Exception as e: - LOG.error('Failed to export fiber channel target ' - 'due to %s', e) - ret = errno.EFAULT - break - if ret == 0: - ret, output = self.dpl.get_vdev(self._conver_uuid2hex( - volume['id'])) - nLun = -1 - if ret == 0: - try: - for p in output['exports']['Network/FC']: - # check initiator wwpn existed in target initiator list - for initI in p.get('permissions', []): - for szwpn in szwwpns: - if initI.get(szwpn, None): - nLun = initI[szwpn] - break - if nLun != -1: - break - - if nLun != -1: - targetIdentifier.append( - str(p['target_identifier']).replace(':', '')) - - except Exception: - msg = _('Invalid connection initialization response of ' - 'volume %(name)s: ' - '%(output)s') % {'name': volume['name'], - 'output': output} - raise exception.VolumeBackendAPIException(data=msg) - - if nLun != -1: - init_targ_map = self._build_initiator_target_map(connector, - targetIdentifier) - properties['target_discovered'] = True - properties['target_wwn'] = targetIdentifier - properties['target_lun'] = int(nLun) - properties['volume_id'] = volume['id'] - properties['initiator_target_map'] = init_targ_map - LOG.info('%(volume)s assign type fibre_channel, properties ' - '%(properties)s', - {'volume': volume['id'], 'properties': properties}) - else: - msg = _('Invalid connection initialization response of ' - 'volume %(name)s') % {'name': volume['name']} - raise exception.VolumeBackendAPIException(data=msg) - LOG.info('Connect initialization info: ' - '{driver_volume_type: fibre_channel, ' - 'data: %(properties)s', {'properties': properties}) - return {'driver_volume_type': 'fibre_channel', - 'data': properties} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - """ - connector = {'ip': CONF.my_ip, - 'host': CONF.host, - 'initiator': self._initiator, - 'wwnns': self._fc_wwnns, - 'wwpns': self._fc_wwpns} - """ - lstargetWwpns = [] - lsTargets = [] - szwwpns = [] - ret = 0 - info = {'driver_volume_type': 'fibre_channel', 'data': {}} - LOG.info('terminate_connection volume: %(volume)s, ' - 'connector: %(con)s', - {'volume': volume, 'con': connector}) - # Query targetwwpns. - # Get all target list of volume. - for dwwpn in connector['wwpns']: - szwwpn = self._convertHex2String(dwwpn) - if len(szwwpn) == 0: - msg = _('Invalid wwpns format %(wwpns)s') % \ - {'wwpns': connector['wwpns']} - raise exception.VolumeBackendAPIException(data=msg) - szwwpns.append(szwwpn) - - if len(szwwpns) == 0: - ret = errno.EFAULT - msg = _('Invalid wwpns format %(wwpns)s') % \ - {'wwpns': connector['wwpns']} - raise exception.VolumeBackendAPIException(data=msg) - else: - for szwwpn in szwwpns: - lstargetWwpns = self._get_targetwpns( - self._conver_uuid2hex(volume['id']), szwwpn) - lsTargets = list(set(lsTargets + lstargetWwpns)) - - # Remove all export target - try: - for ptarget in lsTargets: - ret = self._delete_export_fc(volume['id'], ptarget, szwwpns) - if ret: - break - except Exception: - ret = errno.EFAULT - finally: - if ret: - msg = _('Faield to unassign %(volume)s') % (volume['id']) - raise exception.VolumeBackendAPIException(data=msg) - - # Failed to delete export with fibre channel - if ret: - init_targ_map = self._build_initiator_target_map(connector, - lsTargets) - info['data'] = {'target_wwn': lsTargets, - 'initiator_target_map': init_targ_map} - - return info - - def get_volume_stats(self, refresh=False): - if refresh: - data = super(DPLFCDriver, self).get_volume_stats(refresh) - if data: - data['storage_protocol'] = 'FC' - backend_name = \ - self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = (backend_name or 'DPLFCDriver') - self._stats = data - return self._stats diff --git a/cinder/volume/drivers/prophetstor/dpl_iscsi.py b/cinder/volume/drivers/prophetstor/dpl_iscsi.py deleted file mode 100644 index cf86b2abf..000000000 --- a/cinder/volume/drivers/prophetstor/dpl_iscsi.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) 2014 ProphetStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -import cinder.volume.driver -from cinder.volume.drivers.prophetstor import dplcommon - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, - cinder.volume.driver.ISCSIDriver): - def __init__(self, *args, **kwargs): - super(DPLISCSIDriver, self).__init__(*args, **kwargs) - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - properties = {} - properties['target_lun'] = None - properties['target_discovered'] = True - properties['target_portal'] = '' - properties['target_iqn'] = None - properties['volume_id'] = volume['id'] - - dpl_server = self.configuration.san_ip - dpl_iscsi_port = self.configuration.iscsi_port - ret, output = self.dpl.assign_vdev(self._conver_uuid2hex( - volume['id']), connector['initiator'].lower(), volume['id'], - '%s:%d' % (dpl_server, dpl_iscsi_port), 0) - - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if len(event_uuid): - ret = 0 - status = self._wait_event( - self.dpl.get_vdev_status, self._conver_uuid2hex( - volume['id']), event_uuid) - if status['state'] == 'error': - ret = errno.EFAULT - msg = _('Flexvisor failed to assign volume %(id)s: ' - '%(status)s.') % {'id': volume['id'], - 'status': status} - raise exception.VolumeBackendAPIException(data=msg) - else: - ret = errno.EFAULT - msg = _('Flexvisor failed to assign volume %(id)s due to ' - 'unable to query status by event ' - 'id.') % {'id': volume['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor assign volume failed.:%(id)s:' - '%(status)s.') % {'id': volume['id'], 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - - if ret == 0: - ret, output = self.dpl.get_vdev( - self._conver_uuid2hex(volume['id'])) - if ret == 0: - for tgInfo in output['exports']['Network/iSCSI']: - if tgInfo['permissions'] and \ - isinstance(tgInfo['permissions'][0], dict): - for assign in tgInfo['permissions']: - if connector['initiator'].lower() in assign.keys(): - for tgportal in tgInfo.get('portals', {}): - properties['target_portal'] = tgportal - break - properties['target_lun'] = \ - int(assign[connector['initiator'].lower()]) - break - - if properties['target_portal'] != '': - properties['target_iqn'] = tgInfo['target_identifier'] - break - else: - if connector['initiator'].lower() in tgInfo['permissions']: - for tgportal in tgInfo.get('portals', {}): - properties['target_portal'] = tgportal - break - - if properties['target_portal'] != '': - properties['target_lun'] = \ - int(tgInfo['logical_unit_number']) - properties['target_iqn'] = \ - tgInfo['target_identifier'] - break - - if not (ret == 0 or properties['target_portal']): - msg = _('Flexvisor failed to assign volume %(volume)s ' - 'iqn %(iqn)s.') % {'volume': volume['id'], - 'iqn': connector['initiator']} - raise exception.VolumeBackendAPIException(data=msg) - - return {'driver_volume_type': 'iscsi', 'data': properties} - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - ret, output = self.dpl.unassign_vdev( - self._conver_uuid2hex(volume['id']), - connector['initiator']) - - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event( - self.dpl.get_vdev_status, volume['id'], event_uuid) - if status['state'] == 'error': - ret = errno.EFAULT - msg = _('Flexvisor failed to unassign volume %(id)s:' - ' %(status)s.') % {'id': volume['id'], - 'status': status} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to unassign volume (get event) ' - '%(id)s.') % {'id': volume['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret == errno.ENODATA: - LOG.info('Flexvisor already unassigned volume %(id)s.', - {'id': volume['id']}) - elif ret != 0: - msg = _('Flexvisor failed to unassign volume:%(id)s:' - '%(status)s.') % {'id': volume['id'], 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - - def get_volume_stats(self, refresh=False): - if refresh: - try: - data = super(DPLISCSIDriver, self).get_volume_stats(refresh) - if data: - data['storage_protocol'] = 'iSCSI' - backend_name = \ - self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = \ - (backend_name or 'DPLISCSIDriver') - self._stats = data - except Exception as exc: - LOG.warning('Cannot get volume status %(exc)s.', {'exc': exc}) - return self._stats diff --git a/cinder/volume/drivers/prophetstor/dplcommon.py b/cinder/volume/drivers/prophetstor/dplcommon.py deleted file mode 100644 index 6e1ecfdd0..000000000 --- a/cinder/volume/drivers/prophetstor/dplcommon.py +++ /dev/null @@ -1,1518 +0,0 @@ -# Copyright (c) 2014 ProphetStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Implementation of the class of ProphetStor DPL storage adapter of Federator. - # v2.0.1 Consistency group support - # v2.0.2 Pool aware scheduler - # v2.0.3 Consistency group modification support - # v2.0.4 Port ProphetStor driver to use new driver model - # v2.0.5 Move from httplib to requests -""" - -import base64 -import errno -import json -import random -import requests -import time - -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units -import six -from six.moves import http_client - -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder.volume import driver -from cinder.volume.drivers.prophetstor import options -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -CONNECTION_RETRY = 10 -MAXSNAPSHOTS = 1024 -DISCOVER_SERVER_TYPE = 'dpl' -DPL_BLOCKSTOR = '/dpl_blockstor' -DPL_SYSTEM = '/dpl_system' - -DPL_VER_V1 = 'v1' -DPL_OBJ_POOL = 'dpl_pool' -DPL_OBJ_DISK = 'dpl_disk' -DPL_OBJ_VOLUME = 'dpl_volume' -DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup' -DPL_OBJ_SNAPSHOT = 'cdmi_snapshots' -DPL_OBJ_EXPORT = 'dpl_export' - -DPL_OBJ_REPLICATION = 'cdmi_replication' -DPL_OBJ_TARGET = 'dpl_target' -DPL_OBJ_SYSTEM = 'dpl_system' -DPL_OBJ_SNS = 'sns_table' - - -class DPLCommand(object): - """DPL command interface.""" - - def __init__(self, ip, port, username, password, cert_verify=False, - cert_path=None): - self.ip = ip - self.port = port - self.username = username - self.password = password - self.cert_verify = cert_verify - self.cert_path = cert_path - - def send_cmd(self, method, url, params, expected_status): - """Send command to DPL.""" - retcode = 0 - data = {} - header = {'Content-Type': 'application/cdmi-container', - 'Accept': 'application/cdmi-container', - 'x-cdmi-specification-version': '1.0.2'} - # base64 encode the username and password - auth = base64.encodestring('%s:%s' - % (self.username, - self.password)).replace('\n', '') - header['Authorization'] = 'Basic %s' % auth - - if not params: - payload = None - else: - try: - payload = json.dumps(params, ensure_ascii=False) - payload.encode('utf-8') - except Exception as e: - LOG.error('JSON encode params %(param)s error:' - ' %(status)s.', {'param': params, 'status': e}) - retcode = errno.EINVAL - - retry = CONNECTION_RETRY - func = getattr(requests, method.lower()) - - cert_path = False - if self.cert_verify: - cert_path = self.cert_path - else: - cert_path = False - - while (retry): - try: - r = func( - url="https://%s:%s%s" % (self.ip, self.port, url), - data=payload, headers=header, verify=cert_path) - - if r.status_code == http_client.SERVICE_UNAVAILABLE: - LOG.error("The flexvisor service is unavailable.") - continue - else: - break - except Exception as e: - msg = (_("failed to %(method)s due to %(error)s") - % {"method": method, "error": six.text_type(e)}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if (r.status_code in expected_status and - r.status_code == http_client.NOT_FOUND): - retcode = errno.ENODATA - elif r.status_code not in expected_status: - LOG.error('%(method)s %(url)s unexpected response status: ' - '%(response)s (expects: %(expects)s).', - {'method': method, - 'url': url, - 'response': http_client.responses[r.status_code], - 'expects': expected_status}) - if r.status_code == http_client.UNAUTHORIZED: - raise exception.NotAuthorized - else: - retcode = errno.EIO - elif r.status_code is http_client.NOT_FOUND: - retcode = errno.ENODATA - elif r.status_code is http_client.ACCEPTED: - retcode = errno.EAGAIN - try: - data = r.json() - except (TypeError, ValueError) as e: - LOG.error('Call to json.loads() raised an exception: %s.', - e) - retcode = errno.ENOEXEC - except Exception as e: - LOG.error('Read response raised an exception: %s.', - e) - retcode = errno.ENOEXEC - elif (r.status_code in [http_client.OK, http_client.CREATED] and - http_client.NO_CONTENT not in expected_status): - try: - data = r.json() - except (TypeError, ValueError) as e: - LOG.error('Call to json.loads() raised an exception: %s.', - e) - retcode = errno.ENOEXEC - except Exception as e: - LOG.error('Read response raised an exception: %s.', - e) - retcode = errno.ENOEXEC - - return retcode, data - - -class DPLVolume(object): - - def __init__(self, dplServer, dplPort, dplUser, dplPassword, - cert_verify=False, cert_path=None): - self.objCmd = DPLCommand( - dplServer, dplPort, dplUser, dplPassword, cert_verify=cert_verify, - cert_path=cert_path) - - def _execute(self, method, url, params, expected_status): - if self.objCmd: - return self.objCmd.send_cmd(method, url, params, expected_status) - else: - return -1, None - - def _gen_snapshot_url(self, vdevid, snapshotid): - snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid) - return snapshot_url - - def get_server_info(self): - method = 'GET' - url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM)) - return self._execute(method, url, None, - [http_client.OK, http_client.ACCEPTED]) - - def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize, - fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, - snapshot_quota=None): - method = 'PUT' - metadata = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) - - if volumeName is None or volumeName == '': - metadata['display_name'] = volumeID - else: - metadata['display_name'] = volumeName - metadata['display_description'] = volumeDesc - metadata['pool_uuid'] = poolID - metadata['total_capacity'] = volumeSize - metadata['maximum_snapshot'] = maximum_snapshot - if snapshot_quota is not None: - metadata['snapshot_quota'] = int(snapshot_quota) - metadata['properties'] = dict(thin_provision=fthinprovision) - params['metadata'] = metadata - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize, - maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): - method = 'PUT' - metadata = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) - - if volumeName is None or volumeName == '': - metadata['display_name'] = volumeID - else: - metadata['display_name'] = volumeName - metadata['display_description'] = volumeDesc - metadata['total_capacity'] = int(volumeSize) - metadata['maximum_snapshot'] = maximum_snapshot - if snapshot_quota is not None: - metadata['snapshot_quota'] = snapshot_quota - params['metadata'] = metadata - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def delete_vdev(self, volumeID, force=True): - method = 'DELETE' - metadata = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) - - metadata['force'] = force - params['metadata'] = metadata - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.NOT_FOUND, http_client.NO_CONTENT]) - - def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc, - snapshotID, poolID, fthinprovision=True, - maximum_snapshot=MAXSNAPSHOTS, - snapshot_quota=None): - method = 'PUT' - metadata = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID) - metadata['snapshot_operation'] = 'copy' - if vdevDisplayName is None or vdevDisplayName == "": - metadata['display_name'] = vdevID - else: - metadata['display_name'] = vdevDisplayName - metadata['display_description'] = vdevDesc - metadata['pool_uuid'] = poolID - metadata['properties'] = {} - metadata['maximum_snapshot'] = maximum_snapshot - if snapshot_quota: - metadata['snapshot_quota'] = snapshot_quota - metadata['properties'] = dict(thin_provision=fthinprovision) - - params['metadata'] = metadata - params['copy'] = self._gen_snapshot_url(vdevID, snapshotID) - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id, - vol_display_name, description, snap_id): - method = 'PUT' - params = {} - metadata = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id) - - metadata['snapshot_operation'] = 'spawn' - if vol_display_name is None or vol_display_name == '': - metadata['display_name'] = new_vol_id - else: - metadata['display_name'] = vol_display_name - metadata['display_description'] = description - params['metadata'] = metadata - params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id) - - return self._execute(method, url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def get_pools(self): - method = 'GET' - url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL) - return self._execute(method, url, None, [http_client.OK]) - - def get_pool(self, poolid): - method = 'GET' - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid) - return self._execute(method, url, None, - [http_client.OK, http_client.ACCEPTED]) - - def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName, - volumeDesc, volumeSize, fthinprovision=True, - maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): - method = 'PUT' - params = {} - metadata = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID) - metadata["snapshot_operation"] = "clone" - if volumeName is None or volumeName == '': - metadata["display_name"] = NewVolumeID - else: - metadata["display_name"] = volumeName - metadata["display_description"] = volumeDesc - metadata["pool_uuid"] = poolID - metadata["total_capacity"] = volumeSize - metadata["maximum_snapshot"] = maximum_snapshot - if snapshot_quota: - metadata["snapshot_quota"] = snapshot_quota - metadata["properties"] = dict(thin_provision=fthinprovision) - params["metadata"] = metadata - params["copy"] = SourceVolumeID - - return self._execute(method, - url, params, - [http_client.OK, http_client.CREATED, - http_client.ACCEPTED]) - - def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='', - snapshotdes='', isgroup=False): - method = 'PUT' - metadata = {} - params = {} - if isgroup: - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid) - else: - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - if not snapshotname: - metadata['display_name'] = snapshotid - else: - metadata['display_name'] = snapshotname - metadata['display_description'] = snapshotdes - - params['metadata'] = metadata - params['snapshot'] = snapshotid - - return self._execute(method, - url, params, - [http_client.OK, http_client.CREATED, - http_client.ACCEPTED]) - - def get_vdev(self, vdevid): - method = 'GET' - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - return self._execute(method, - url, None, - [http_client.OK, http_client.ACCEPTED, - http_client.NOT_FOUND]) - - def get_vdev_status(self, vdevid, eventid): - method = 'GET' - url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME, - vdevid, eventid)) - - return self._execute(method, - url, None, - [http_client.OK, http_client.NOT_FOUND]) - - def get_pool_status(self, poolid, eventid): - method = 'GET' - url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL, - poolid, eventid)) - - return self._execute(method, - url, None, - [http_client.OK, http_client.NOT_FOUND]) - - def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0): - method = 'PUT' - metadata = {} - exports = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - metadata['export_operation'] = 'assign' - exports['Network/iSCSI'] = {} - target_info = {} - target_info['logical_unit_number'] = 0 - target_info['logical_unit_name'] = lunname - permissions = [] - portals = [] - portals.append(portal) - permissions.append(iqn) - target_info['permissions'] = permissions - target_info['portals'] = portals - exports['Network/iSCSI'] = target_info - - params['metadata'] = metadata - params['exports'] = exports - - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname, - lunid=-1): - method = 'PUT' - metadata = {} - exports = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - metadata['export_operation'] = 'assign' - exports['Network/FC'] = {} - target_info = {} - target_info['target_identifier'] = targetwwpn - target_info['logical_unit_number'] = lunid - target_info['logical_unit_name'] = lunname - target_info['permissions'] = initiatorwwpn - exports['Network/FC'] = target_info - - params['metadata'] = metadata - params['exports'] = exports - - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''): - method = 'PUT' - metadata = {} - exports = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - metadata['export_operation'] = 'unassign' - params['metadata'] = metadata - - exports['Network/iSCSI'] = {} - exports['Network/iSCSI']['target_identifier'] = targetIqn - permissions = [] - permissions.append(initiatorIqn) - exports['Network/iSCSI']['permissions'] = permissions - - params['exports'] = exports - - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.NO_CONTENT, http_client.NOT_FOUND]) - - def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns): - method = 'PUT' - metadata = {} - exports = {} - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - metadata['export_operation'] = 'unassign' - params['metadata'] = metadata - - exports['Network/FC'] = {} - exports['Network/FC']['target_identifier'] = targetwwpn - permissions = initiatorwwpns - exports['Network/FC']['permissions'] = permissions - - params['exports'] = exports - - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.NO_CONTENT, http_client.NOT_FOUND]) - - def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False): - method = 'DELETE' - if isGroup: - url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, - DPL_OBJ_VOLUMEGROUP, - objID, - DPL_OBJ_SNAPSHOT, snapshotID)) - else: - url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, - DPL_OBJ_VOLUME, objID, - DPL_OBJ_SNAPSHOT, snapshotID)) - - return self._execute(method, - url, None, - [http_client.OK, http_client.ACCEPTED, - http_client.NO_CONTENT, http_client.NOT_FOUND]) - - def rollback_vdev(self, vdevid, snapshotid): - method = 'PUT' - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) - - params['copy'] = self._gen_snapshot_url(vdevid, snapshotid) - - return self._execute(method, - url, params, - [http_client.OK, http_client.ACCEPTED]) - - def list_vdev_snapshots(self, vdevid, isGroup=False): - method = 'GET' - if isGroup: - url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, - DPL_OBJ_SNAPSHOT)) - else: - url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, - vdevid, DPL_OBJ_SNAPSHOT)) - - return self._execute(method, - url, None, - [http_client.OK]) - - def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False): - method = 'GET' - if isGroup: - url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, - vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) - else: - url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, - DPL_OBJ_SNAPSHOT, snapshotID)) - - return self._execute(method, - url, None, - [http_client.OK]) - - def create_target(self, targetID, protocol, displayName, targetAddress, - description=''): - method = 'PUT' - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) - params['metadata'] = {} - metadata = params['metadata'] - metadata['type'] = 'target' - metadata['protocol'] = protocol - if displayName is None or displayName == '': - metadata['display_name'] = targetID - else: - metadata['display_name'] = displayName - metadata['display_description'] = description - metadata['address'] = targetAddress - return self._execute(method, url, params, [http_client.OK]) - - def get_target(self, targetID): - method = 'GET' - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) - return self._execute(method, url, None, [http_client.OK]) - - def delete_target(self, targetID): - method = 'DELETE' - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) - return self._execute(method, - url, None, - [http_client.OK, http_client.ACCEPTED, - http_client.NOT_FOUND]) - - def get_target_list(self, type='target'): - # type = target/initiator - method = 'GET' - if type is None: - url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT) - else: - url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type) - return self._execute(method, url, None, [http_client.OK]) - - def get_sns_table(self, wwpn): - method = 'PUT' - params = {} - url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS) - params['metadata'] = {} - params['metadata']['protocol'] = 'fc' - params['metadata']['address'] = str(wwpn) - return self._execute(method, url, params, [http_client.OK]) - - def create_vg(self, groupID, groupName, groupDesc='', listVolume=None, - maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True): - method = 'PUT' - metadata = {} - params = {} - properties = {} - url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) - if listVolume: - metadata['volume'] = listVolume - else: - metadata['volume'] = [] - metadata['display_name'] = groupName - metadata['display_description'] = groupDesc - metadata['maximum_snapshot'] = maxSnapshots - properties['snapshot_rotation'] = rotationSnapshot - metadata['properties'] = properties - params['metadata'] = metadata - return self._execute(method, url, params, - [http_client.OK, http_client.ACCEPTED, - http_client.CREATED]) - - def get_vg_list(self, vgtype=None): - method = 'GET' - if vgtype: - url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype) - else: - url = '/%s/' % (DPL_OBJ_VOLUMEGROUP) - return self._execute(method, url, None, [http_client.OK]) - - def get_vg(self, groupID): - method = 'GET' - url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) - return self._execute(method, url, None, [http_client.OK]) - - def delete_vg(self, groupID, force=True): - method = 'DELETE' - metadata = {} - params = {} - url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) - metadata['force'] = force - params['metadata'] = metadata - return self._execute(method, url, params, - [http_client.NO_CONTENT, http_client.NOT_FOUND]) - - def join_vg(self, volumeID, groupID): - method = 'PUT' - metadata = {} - params = {} - url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) - metadata['volume_group_operation'] = 'join' - metadata['volume'] = [] - metadata['volume'].append(volumeID) - params['metadata'] = metadata - return self._execute(method, url, params, - [http_client.OK, http_client.ACCEPTED]) - - def leave_vg(self, volumeID, groupID): - method = 'PUT' - metadata = {} - params = {} - url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) - metadata['volume_group_operation'] = 'leave' - metadata['volume'] = [] - metadata['volume'].append(volumeID) - params['metadata'] = metadata - return self._execute(method, url, params, - [http_client.OK, http_client.ACCEPTED]) - - -class DPLCOMMONDriver(driver.CloneableImageVD, - driver.BaseVD): - """Class of dpl storage adapter.""" - VERSION = '2.0.5' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "ProphetStor_CI" - - def __init__(self, *args, **kwargs): - cert_path = None - cert_verify = False - super(DPLCOMMONDriver, self).__init__(*args, **kwargs) - if self.configuration: - self.configuration.append_config_values(options.DPL_OPTS) - self.configuration.append_config_values(san.san_opts) - cert_verify = self.configuration.driver_ssl_cert_verify - cert_path = self.configuration.driver_ssl_cert_path - - if cert_verify: - if not cert_path: - LOG.warning( - "Flexvisor: cert_verify is enabled but required cert_path" - " option is missing.") - cert_path = None - else: - cert_path = None - - self.dpl = DPLVolume(self.configuration.san_ip, - self.configuration.dpl_port, - self.configuration.san_login, - self.configuration.san_password, - cert_verify=cert_verify, - cert_path=cert_path) - self._stats = {} - - def _convert_size_GB(self, size): - s = round(float(size) / units.Gi, 2) - if s > 0: - return s - else: - return 0 - - def _conver_uuid2hex(self, strID): - if strID: - return strID.replace('-', '') - else: - return None - - def _get_event_uuid(self, output): - ret = 0 - event_uuid = "" - - if (type(output) is dict and - output.get("metadata") and output["metadata"]): - if (output["metadata"].get("event_uuid") and - output["metadata"]["event_uuid"]): - event_uuid = output["metadata"]["event_uuid"] - else: - ret = errno.EINVAL - else: - ret = errno.EINVAL - return ret, event_uuid - - def _wait_event(self, callFun, objuuid, eventid=None): - nRetry = 30 - fExit = False - status = {} - status['state'] = 'error' - status['output'] = {} - while nRetry: - try: - if eventid: - ret, output = callFun( - self._conver_uuid2hex(objuuid), - self._conver_uuid2hex(eventid)) - else: - ret, output = callFun(self._conver_uuid2hex(objuuid)) - - if ret == 0: - if output['completionStatus'] == 'Complete': - fExit = True - status['state'] = 'available' - status['output'] = output - elif output['completionStatus'] == 'Error': - fExit = True - status['state'] = 'error' - raise loopingcall.LoopingCallDone(retvalue=False) - else: - nsleep = random.randint(0, 10) - value = round(float(nsleep) / 10, 2) - time.sleep(value) - elif ret == errno.ENODATA: - status['state'] = 'deleted' - fExit = True - else: - nRetry -= 1 - time.sleep(3) - continue - - except Exception as e: - LOG.error('Flexvisor failed to get event %(volume)s ' - '(%(status)s).', - {'volume': eventid, 'status': e}) - raise loopingcall.LoopingCallDone(retvalue=False) - - if fExit is True: - break - return status - - def _join_volume_group(self, volume, cgId): - # Join volume group if consistency group id not empty - msg = '' - try: - ret, output = self.dpl.join_vg( - self._conver_uuid2hex(volume['id']), - self._conver_uuid2hex(cgId)) - except Exception as e: - ret = errno.EFAULT - msg = _('Fexvisor failed to add volume %(id)s ' - 'due to %(reason)s.') % {"id": volume['id'], - "reason": six.text_type(e)} - if ret: - if not msg: - msg = _('Flexvisor failed to add volume %(id)s ' - 'to group %(cgid)s.') % {'id': volume['id'], - 'cgid': cgId} - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info('Flexvisor succeeded to add volume %(id)s to ' - 'group %(cgid)s.', - {'id': volume['id'], 'cgid': cgId}) - - def _leave_volume_group(self, volume, cgId): - # Leave volume group if consistency group id not empty - msg = '' - try: - ret, output = self.dpl.leave_vg( - self._conver_uuid2hex(volume['id']), - self._conver_uuid2hex(cgId)) - except Exception as e: - ret = errno.EFAULT - msg = _('Fexvisor failed to remove volume %(id)s ' - 'due to %(reason)s.') % {"id": volume['id'], - "reason": six.text_type(e)} - if ret: - if not msg: - msg = _('Flexvisor failed to remove volume %(id)s ' - 'from group %(cgid)s.') % {'id': volume['id'], - 'cgid': cgId} - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info('Flexvisor succeeded to remove volume %(id)s from ' - 'group %(cgid)s.', - {'id': volume['id'], 'cgid': cgId}) - - def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID): - snapshotID = None - ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True) - if ret == 0: - volumes = out.get('metadata', {}).get('member', {}) - if volumes: - snapshotID = volumes.get(volumeID, None) - else: - msg = _('Flexvisor failed to get snapshot id of volume ' - '%(id)s from group %(vgid)s.') % {'id': volumeID, - 'vgid': vgID} - raise exception.VolumeBackendAPIException(data=msg) - if not snapshotID: - msg = _('Flexvisor could not find volume %(id)s snapshot in' - ' the group %(vgid)s snapshot ' - '%(vgsid)s.') % {'id': volumeID, 'vgid': vgID, - 'vgsid': vgsnapshotID} - raise exception.VolumeBackendAPIException(data=msg) - return snapshotID - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def _create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - LOG.info('Start to create consistency group: %(group_name)s ' - 'id: %(id)s', - {'group_name': group.name, 'id': group.id}) - model_update = {'status': fields.GroupStatus.AVAILABLE} - try: - ret, output = self.dpl.create_vg( - self._conver_uuid2hex(group.id), - group.name, - group.description) - if ret: - msg = _('Failed to create consistency group ' - '%(id)s:%(ret)s.') % {'id': group.id, - 'ret': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - return model_update - except Exception as e: - msg = _('Failed to create consistency group ' - '%(id)s due to %(reason)s.') % {'id': group.id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - def _delete_consistencygroup(self, context, group, volumes): - """Delete a consistency group.""" - ret = 0 - volumes = self.db.volume_get_all_by_group( - context, group.id) - model_update = {} - model_update['status'] = group.status - LOG.info('Start to delete consistency group: %(cg_name)s', - {'cg_name': group.id}) - try: - self.dpl.delete_vg(self._conver_uuid2hex(group.id)) - except Exception as e: - msg = _('Failed to delete consistency group %(id)s ' - 'due to %(reason)s.') % {'id': group.id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - for volume_ref in volumes: - try: - self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id'])) - volume_ref['status'] = 'deleted' - except Exception: - ret = errno.EFAULT - volume_ref['status'] = 'error_deleting' - model_update['status'] = ( - fields.GroupStatus.ERROR_DELETING) - if ret == 0: - model_update['status'] = fields.GroupStatus.DELETED - return model_update, volumes - - def _create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - snapshots = objects.SnapshotList().get_all_for_group_snapshot( - context, cgsnapshot.id) - model_update = {} - LOG.info('Start to create cgsnapshot for consistency group' - ': %(group_name)s', - {'group_name': cgsnapshot.group_id}) - try: - self.dpl.create_vdev_snapshot( - self._conver_uuid2hex(cgsnapshot.group_id), - self._conver_uuid2hex(cgsnapshot.id), - cgsnapshot.name, - '', - True) - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.AVAILABLE - except Exception as e: - msg = _('Failed to create cg snapshot %(id)s ' - 'due to %(reason)s.') % {'id': cgsnapshot.id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - model_update['status'] = 'available' - - return model_update, snapshots - - def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - snapshots = objects.SnapshotList().get_all_for_group_snapshot( - context, cgsnapshot.id) - model_update = {} - model_update['status'] = cgsnapshot.status - LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' - '%(group_name)s', - {'snap_name': cgsnapshot.id, - 'group_name': cgsnapshot.group_id}) - try: - self.dpl.delete_vdev_snapshot( - self._conver_uuid2hex(cgsnapshot.group_id), - self._conver_uuid2hex(cgsnapshot.id), True) - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.DELETED - except Exception as e: - msg = _('Failed to delete cgsnapshot %(id)s due to ' - '%(reason)s.') % {'id': cgsnapshot.id, - 'reason': six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - - model_update['status'] = 'deleted' - return model_update, snapshots - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - addvollist = [] - removevollist = [] - cgid = group.id - vid = '' - model_update = {'status': fields.GroupStatus.AVAILABLE} - if not volume_utils.is_group_a_cg_snapshot_type(group): - raise NotImplementedError() - # Get current group info in backend storage. - ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid)) - if ret == 0: - group_members = output.get('children', []) - - if add_volumes: - addvollist = add_volumes - if remove_volumes: - removevollist = remove_volumes - - # Process join volumes. - try: - for volume in addvollist: - vid = volume['id'] - # Verify the volume exists in the group or not. - if self._conver_uuid2hex(vid) in group_members: - continue - self._join_volume_group(volume, cgid) - except Exception as e: - msg = _("Fexvisor failed to join the volume %(vol)s in the " - "group %(group)s due to " - "%(ret)s.") % {"vol": vid, "group": cgid, - "ret": six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - # Process leave volumes. - try: - for volume in removevollist: - vid = volume['id'] - if self._conver_uuid2hex(vid) in group_members: - self._leave_volume_group(volume, cgid) - except Exception as e: - msg = _("Fexvisor failed to remove the volume %(vol)s in the " - "group %(group)s due to " - "%(ret)s.") % {"vol": vid, "group": cgid, - "ret": six.text_type(e)} - raise exception.VolumeBackendAPIException(data=msg) - return model_update, None, None - - def create_group(self, context, group): - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._create_consistencygroup(context, group) - raise NotImplementedError() - - def delete_group(self, context, group, volumes): - if volume_utils.is_group_a_cg_snapshot_type(group): - return self._delete_consistencygroup(context, group, volumes) - raise NotImplementedError() - - def create_group_snapshot(self, context, group_snapshot, snapshots): - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._create_cgsnapshot(context, group_snapshot, snapshots) - raise NotImplementedError() - - def delete_group_snapshot(self, context, group_snapshot, snapshots): - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._delete_cgsnapshot(context, group_snapshot, snapshots) - raise NotImplementedError() - - def create_group_from_src(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - err_msg = _("Prophet Storage doesn't support create_group_from_src.") - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - def create_volume(self, volume): - """Create a volume.""" - pool = volume_utils.extract_host(volume['host'], - level='pool') - if not pool: - if not self.configuration.dpl_pool: - msg = _("Pool is not available in the volume host fields.") - raise exception.InvalidHost(reason=msg) - else: - pool = self.configuration.dpl_pool - - ret, output = self.dpl.create_vdev( - self._conver_uuid2hex(volume['id']), - volume.get('display_name', ''), - volume.get('display_description', ''), - pool, - int(volume['size']) * units.Gi, - self.configuration.san_thin_provision) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - volume['id'], - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to create volume %(volume)s: ' - '%(status)s.') % {'volume': volume['id'], - 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to create volume (get event) ' - '%s.') % (volume['id']) - raise exception.VolumeBackendAPIException( - data=msg) - elif ret != 0: - msg = _('Flexvisor create volume failed.:%(volumeid)s:' - '%(status)s.') % {'volumeid': volume['id'], - 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - LOG.info('Flexvisor succeeded to create volume %(id)s.', - {'id': volume['id']}) - - if volume.group_id: - group = volume_utils.group_get_by_id(volume.group_id) - if volume_utils.is_group_a_cg_snapshot_type(group): - try: - self._join_volume_group(volume, volume.group_id) - except Exception: - # Delete volume if volume failed to join group. - self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) - msg = _('Flexvisor failed to create volume %(id)s in the ' - 'group %(vgid)s.') % { - 'id': volume['id'], - 'vgid': volume.group_id} - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - src_volume = None - vgID = None - # Detect whether a member of the group. - snapshotID = snapshot['id'] - # Try to get cgid if volume belong in the group. - src_volumeID = snapshot['volume_id'] - cgsnapshotID = snapshot.get('group_snapshot_id', None) - if cgsnapshotID: - try: - src_volume = self.db.volume_get(src_volumeID) - except Exception: - msg = _("Flexvisor unable to find the source volume " - "%(id)s info.") % {'id': src_volumeID} - raise exception.VolumeBackendAPIException(data=msg) - if src_volume: - vgID = src_volume.group_id - - # Get the volume origin snapshot id if the source snapshot is group - # snapshot. - if vgID: - snapshotID = self._get_snapshotid_of_vgsnapshot( - self._conver_uuid2hex(vgID), - self._conver_uuid2hex(cgsnapshotID), - self._conver_uuid2hex(src_volumeID)) - - pool = volume_utils.extract_host(volume['host'], - level='pool') - if not pool: - if not self.configuration.dpl_pool: - msg = _("Pool is not available in the volume host fields.") - raise exception.InvalidHost(reason=msg) - else: - pool = self.configuration.dpl_pool - - ret, output = self.dpl.create_vdev_from_snapshot( - self._conver_uuid2hex(volume['id']), - volume.get('display_name', ''), - volume.get('display_description', ''), - self._conver_uuid2hex(snapshotID), - pool, - self.configuration.san_thin_provision) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - volume['id'], - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to create volume from ' - 'snapshot %(id)s:' - '%(status)s.') % {'id': snapshot['id'], - 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - msg = _('Flexvisor failed to create volume from snapshot ' - '(failed to get event) ' - '%(id)s.') % {'id': snapshot['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor failed to create volume from snapshot ' - '%(id)s: %(status)s.') % {'id': snapshot['id'], - 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - LOG.info('Flexvisor succeeded to create volume %(id)s ' - 'from snapshot.', {'id': volume['id']}) - - if volume['size'] > snapshot['volume_size']: - self.extend_volume(volume, volume['size']) - - if volume.group_id: - group = volume_utils.group_get_by_id(volume.group_id) - if volume_utils.is_group_a_cg_snapshot_type(group): - try: - self._join_volume_group(volume, volume.group_id) - except Exception: - # Delete volume if volume failed to join group. - self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) - raise - - def spawn_volume_from_snapshot(self, volume, snapshot): - """Spawn a REFERENCED volume from a snapshot.""" - ret, output = self.dpl.spawn_vdev_from_snapshot( - self._conver_uuid2hex(volume['id']), - self._conver_uuid2hex(snapshot['volume_id']), - volume.get('display_name', ''), - volume.get('display_description', ''), - self._conver_uuid2hex(snapshot['id'])) - - if ret == errno.EAGAIN: - # its an async process - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - volume['id'], event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to spawn volume from snapshot ' - '%(id)s:%(status)s.') % {'id': snapshot['id'], - 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to spawn volume from snapshot ' - '(failed to get event) ' - '%(id)s.') % {'id': snapshot['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor failed to create volume from snapshot ' - '%(id)s: %(status)s.') % {'id': snapshot['id'], - 'status': ret} - - raise exception.VolumeBackendAPIException( - data=msg) - else: - LOG.info('Flexvisor succeeded to create volume %(id)s ' - 'from snapshot.', {'id': volume['id']}) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - pool = volume_utils.extract_host(volume['host'], - level='pool') - if not pool: - if not self.configuration.dpl_pool: - msg = _("Pool is not available in the volume host fields.") - raise exception.InvalidHost(reason=msg) - else: - pool = self.configuration.dpl_pool - - ret, output = self.dpl.clone_vdev( - self._conver_uuid2hex(src_vref['id']), - self._conver_uuid2hex(volume['id']), - pool, - volume.get('display_name', ''), - volume.get('display_description', ''), - int(volume['size']) * units.Gi, - self.configuration.san_thin_provision) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - volume['id'], - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to clone volume %(id)s: ' - '%(status)s.') % {'id': src_vref['id'], - 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to clone volume (failed to' - ' get event) %(id)s.') % {'id': src_vref['id']} - raise exception.VolumeBackendAPIException( - data=msg) - elif ret != 0: - msg = _('Flexvisor failed to clone volume %(id)s: ' - '%(status)s.') % {'id': src_vref['id'], 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - LOG.info('Flexvisor succeeded to clone volume %(id)s.', - {'id': volume['id']}) - - if volume.group_id: - group = volume_utils.group_get_by_id(volume.group_id) - if volume_utils.is_group_a_cg_snapshot_type(group): - try: - self._join_volume_group(volume, volume.group_id) - except Exception: - # Delete volume if volume failed to join group. - self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) - msg = _('Flexvisor volume %(id)s failed to join group ' - '%(vgid)s.') % {'id': volume['id'], - 'vgid': volume.group_id} - raise exception.VolumeBackendAPIException(data=msg) - - def delete_volume(self, volume): - """Deletes a volume.""" - ret = 0 - if volume.group_id: - group = volume_utils.group_get_by_id(volume.group_id) - if group and volume_utils.is_group_a_cg_snapshot_type(group): - msg = '' - try: - ret, out = self.dpl.leave_vg( - self._conver_uuid2hex(volume['id']), - self._conver_uuid2hex(volume.group_id)) - if ret: - LOG.warning('Flexvisor failed to delete volume ' - '%(id)s from the group %(vgid)s.', - {'id': volume['id'], - 'vgid': volume.group_id}) - except Exception as e: - LOG.warning('Flexvisor failed to delete volume %(id)s ' - 'from group %(vgid)s due to %(status)s.', - {'id': volume['id'], - 'vgid': volume.group_id, - 'status': e}) - - if ret: - ret = 0 - - ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) - if ret == errno.EAGAIN: - status = self._wait_event(self.dpl.get_vdev, volume['id']) - if status['state'] == 'error': - msg = _('Flexvisor failed deleting volume %(id)s: ' - '%(status)s.') % {'id': volume['id'], 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - elif ret == errno.ENODATA: - ret = 0 - LOG.info('Flexvisor volume %(id)s does not ' - 'exist.', {'id': volume['id']}) - elif ret != 0: - msg = _('Flexvisor failed to delete volume %(id)s: ' - '%(status)s.') % {'id': volume['id'], 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - - def extend_volume(self, volume, new_size): - ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']), - volume.get('display_name', ''), - volume.get('display_description', - ''), - new_size * units.Gi) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - volume['id'], - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to extend volume ' - '%(id)s:%(status)s.') % {'id': volume, - 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - msg = _('Flexvisor failed to extend volume ' - '(failed to get event) ' - '%(id)s.') % {'id': volume['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor failed to extend volume ' - '%(id)s: %(status)s.') % {'id': volume['id'], - 'status': ret} - raise exception.VolumeBackendAPIException( - data=msg) - else: - LOG.info('Flexvisor succeeded to extend volume' - ' %(id)s.', {'id': volume['id']}) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - ret, output = self.dpl.create_vdev_snapshot( - self._conver_uuid2hex(snapshot['volume_id']), - self._conver_uuid2hex(snapshot['id']), - snapshot.get('display_name', ''), - snapshot.get('display_description', '')) - - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - snapshot['volume_id'], - event_uuid) - if status['state'] != 'available': - msg = (_('Flexvisor failed to create snapshot for volume ' - '%(id)s: %(status)s.') % - {'id': snapshot['volume_id'], 'status': ret}) - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = (_('Flexvisor failed to create snapshot for volume ' - '(failed to get event) %(id)s.') % - {'id': snapshot['volume_id']}) - raise exception.VolumeBackendAPIException(data=msg) - elif ret != 0: - msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' - '%(status)s.') % {'id': snapshot['volume_id'], - 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - ret, output = self.dpl.delete_vdev_snapshot( - self._conver_uuid2hex(snapshot['volume_id']), - self._conver_uuid2hex(snapshot['id'])) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_vdev_status, - snapshot['volume_id'], - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to delete snapshot %(id)s: ' - '%(status)s.') % {'id': snapshot['id'], - 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - msg = _('Flexvisor failed to delete snapshot (failed to ' - 'get event) %(id)s.') % {'id': snapshot['id']} - raise exception.VolumeBackendAPIException(data=msg) - elif ret == errno.ENODATA: - LOG.info('Flexvisor snapshot %(id)s not existed.', - {'id': snapshot['id']}) - elif ret != 0: - msg = _('Flexvisor failed to delete snapshot %(id)s: ' - '%(status)s.') % {'id': snapshot['id'], 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.info('Flexvisor succeeded to delete snapshot %(id)s.', - {'id': snapshot['id']}) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_stats() - - return self._stats - - def _get_pools(self): - pools = [] - qpools = [] - # Defined access pool by cinder configuration. - defined_pool = self.configuration.dpl_pool - if defined_pool: - qpools.append(defined_pool) - else: - try: - ret, output = self.dpl.get_pools() - if ret == 0: - for poolUuid, poolName in output.get('children', []): - qpools.append(poolUuid) - else: - LOG.error("Flexvisor failed to get pool list." - " (Error: %d)", ret) - except Exception as e: - LOG.error("Flexvisor failed to get pool list due to " - "%s.", e) - - # Query pool detail information - for poolid in qpools: - ret, output = self._get_pool_info(poolid) - if ret == 0: - pool = {} - pool['pool_name'] = output['metadata']['pool_uuid'] - pool['total_capacity_gb'] = ( - self._convert_size_GB( - int(output['metadata']['total_capacity']))) - pool['free_capacity_gb'] = ( - self._convert_size_GB( - int(output['metadata']['available_capacity']))) - pool['allocated_capacity_gb'] = ( - self._convert_size_GB( - int(output['metadata']['used_capacity']))) - pool['QoS_support'] = False - pool['reserved_percentage'] = 0 - pools.append(pool) - else: - LOG.warning("Failed to query pool %(id)s status " - "%(ret)d.", {'id': poolid, 'ret': ret}) - continue - return pools - - def _update_volume_stats(self, refresh=False): - """Return the current state of the volume service. - - If 'refresh' is True, run the update first. - """ - data = {} - pools = self._get_pools() - data['volume_backend_name'] = ( - self.configuration.safe_get('volume_backend_name')) - location_info = '%(driver)s:%(host)s:%(volume)s' % { - 'driver': self.__class__.__name__, - 'host': self.configuration.san_ip, - 'volume': self.configuration.dpl_pool - } - try: - ret, output = self.dpl.get_server_info() - if ret == 0: - data['vendor_name'] = output['metadata']['vendor'] - data['driver_version'] = output['metadata']['version'] - data['storage_protocol'] = 'iSCSI' - data['location_info'] = location_info - data['consistencygroup_support'] = True - data['consistent_group_snapshot_enabled'] = True - data['pools'] = pools - self._stats = data - except Exception as e: - LOG.error('Failed to get server info due to ' - '%(state)s.', {'state': e}) - return self._stats - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - self.context = context - LOG.info('Activate Flexvisor cinder volume driver.') - - def check_for_setup_error(self): - """Check DPL can connect properly.""" - pass - - def _get_pool_info(self, poolid): - """Query pool information.""" - ret, output = self.dpl.get_pool(poolid) - if ret == errno.EAGAIN: - ret, event_uuid = self._get_event_uuid(output) - if ret == 0: - status = self._wait_event(self.dpl.get_pool_status, poolid, - event_uuid) - if status['state'] != 'available': - msg = _('Flexvisor failed to get pool info %(id)s: ' - '%(status)s.') % {'id': poolid, 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - ret = 0 - output = status.get('output', {}) - else: - LOG.error('Flexvisor failed to get pool %(id)s info.', - {'id': poolid}) - raise exception.VolumeBackendAPIException( - data="failed to get event") - elif ret != 0: - msg = _('Flexvisor failed to get pool info %(id)s: ' - '%(status)s.') % {'id': poolid, 'status': ret} - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.debug('Flexvisor succeeded to get pool info.') - return ret, output diff --git a/cinder/volume/drivers/prophetstor/options.py b/cinder/volume/drivers/prophetstor/options.py deleted file mode 100644 index e5eb051e0..000000000 --- a/cinder/volume/drivers/prophetstor/options.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2014 ProphetStor, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg - -from cinder.volume import configuration - -DPL_OPTS = [ - cfg.StrOpt('dpl_pool', - default='', - help='DPL pool uuid in which DPL volumes are stored.'), - cfg.PortOpt('dpl_port', - default=8357, - help='DPL port number.'), -] - -CONF = cfg.CONF -CONF.register_opts(DPL_OPTS, group=configuration.SHARED_CONF_GROUP) diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py deleted file mode 100644 index f13ed3ece..000000000 --- a/cinder/volume/drivers/pure.py +++ /dev/null @@ -1,2027 +0,0 @@ -# Copyright (c) 2014 Pure Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Pure Storage FlashArray storage system. - -This driver requires Purity version 4.0.0 or later. -""" - -import functools -import math -import platform -import re -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -try: - from purestorage import purestorage -except ImportError: - purestorage = None - -LOG = logging.getLogger(__name__) - -PURE_OPTS = [ - cfg.StrOpt("pure_api_token", - help="REST API authorization token."), - cfg.BoolOpt("pure_automatic_max_oversubscription_ratio", - default=True, - help="Automatically determine an oversubscription ratio based " - "on the current total data reduction values. If used " - "this calculated value will override the " - "max_over_subscription_ratio config option."), - # These are used as default settings. In future these can be overridden - # by settings in volume-type. - cfg.IntOpt("pure_replica_interval_default", default=900, - help="Snapshot replication interval in seconds."), - cfg.IntOpt("pure_replica_retention_short_term_default", default=14400, - help="Retain all snapshots on target for this " - "time (in seconds.)"), - cfg.IntOpt("pure_replica_retention_long_term_per_day_default", default=3, - help="Retain how many snapshots for each day."), - cfg.IntOpt("pure_replica_retention_long_term_default", default=7, - help="Retain snapshots per day on target for this time " - "(in days.)"), - cfg.BoolOpt("pure_eradicate_on_delete", - default=False, - help="When enabled, all Pure volumes, snapshots, and " - "protection groups will be eradicated at the time of " - "deletion in Cinder. Data will NOT be recoverable after " - "a delete with this set to True! When disabled, volumes " - "and snapshots will go into pending eradication state " - "and can be recovered." - ) -] - -CONF = cfg.CONF -CONF.register_opts(PURE_OPTS, group=configuration.SHARED_CONF_GROUP) - -INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]") -GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$") - -REPLICATION_CG_NAME = "cinder-group" - -CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET" - -ERR_MSG_NOT_EXIST = "does not exist" -ERR_MSG_HOST_NOT_EXIST = "Host " + ERR_MSG_NOT_EXIST -ERR_MSG_NO_SUCH_SNAPSHOT = "No such volume or snapshot" -ERR_MSG_PENDING_ERADICATION = "has been destroyed" -ERR_MSG_ALREADY_EXISTS = "already exists" -ERR_MSG_COULD_NOT_BE_FOUND = "could not be found" -ERR_MSG_ALREADY_INCLUDES = "already includes" -ERR_MSG_ALREADY_ALLOWED = "already allowed on" -ERR_MSG_NOT_CONNECTED = "is not connected" -ERR_MSG_ALREADY_BELONGS = "already belongs to" -ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections" -ERR_MSG_ALREADY_IN_USE = "already in use" - -EXTRA_SPECS_REPL_ENABLED = "replication_enabled" - -UNMANAGED_SUFFIX = '-unmanaged' -MANAGE_SNAP_REQUIRED_API_VERSIONS = ['1.4', '1.5'] -REPLICATION_REQUIRED_API_VERSIONS = ['1.3', '1.4', '1.5'] - -REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL = 5 # 5 seconds -REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36 # 36 * 5 = 180 seconds - -HOST_CREATE_MAX_RETRIES = 5 - -USER_AGENT_BASE = 'OpenStack Cinder' - - -def pure_driver_debug_trace(f): - """Log the method entrance and exit including active backend name. - - This should only be used on VolumeDriver class methods. It depends on - having a 'self' argument that is a PureBaseVolumeDriver. - """ - @functools.wraps(f) - def wrapper(*args, **kwargs): - driver = args[0] # self - cls_name = driver.__class__.__name__ - method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name, - "method": f.__name__} - backend_name = driver._get_current_array()._backend_id - LOG.debug("[%(backend_name)s] Enter %(method_name)s", - {"method_name": method_name, "backend_name": backend_name}) - result = f(*args, **kwargs) - LOG.debug("[%(backend_name)s] Leave %(method_name)s", - {"method_name": method_name, "backend_name": backend_name}) - return result - - return wrapper - - -class PureBaseVolumeDriver(san.SanDriver): - """Performs volume management on Pure Storage FlashArray.""" - - SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4', '1.5'] - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Pure_Storage_CI" - - def __init__(self, *args, **kwargs): - execute = kwargs.pop("execute", utils.execute) - super(PureBaseVolumeDriver, self).__init__(execute=execute, *args, - **kwargs) - self.configuration.append_config_values(PURE_OPTS) - self._array = None - self._storage_protocol = None - self._backend_name = (self.configuration.volume_backend_name or - self.__class__.__name__) - self._replication_target_arrays = [] - self._replication_pg_name = REPLICATION_CG_NAME - self._replication_interval = None - self._replication_retention_short_term = None - self._replication_retention_long_term = None - self._replication_retention_long_term_per_day = None - self._is_replication_enabled = False - self._active_backend_id = kwargs.get('active_backend_id', None) - self._failed_over_primary_array = None - self._user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': self.__class__.__name__, - 'version': self.VERSION, - 'platform': platform.platform() - } - - def parse_replication_configs(self): - self._replication_interval = ( - self.configuration.pure_replica_interval_default) - self._replication_retention_short_term = ( - self.configuration.pure_replica_retention_short_term_default) - self._replication_retention_long_term = ( - self.configuration.pure_replica_retention_long_term_default) - self._replication_retention_long_term_per_day = ( - self.configuration. - pure_replica_retention_long_term_per_day_default) - - retention_policy = self._generate_replication_retention() - replication_devices = self.configuration.safe_get( - 'replication_device') - - primary_array = self._get_current_array() - if replication_devices: - for replication_device in replication_devices: - backend_id = replication_device["backend_id"] - san_ip = replication_device["san_ip"] - api_token = replication_device["api_token"] - verify_https = replication_device.get("ssl_cert_verify", False) - ssl_cert_path = replication_device.get("ssl_cert_path", None) - target_array = self._get_flasharray( - san_ip, - api_token, - verify_https=verify_https, - ssl_cert_path=ssl_cert_path - ) - target_array._backend_id = backend_id - LOG.debug("Adding san_ip %(san_ip)s to replication_targets.", - {"san_ip": san_ip}) - api_version = target_array.get_rest_version() - if api_version not in REPLICATION_REQUIRED_API_VERSIONS: - msg = _('Unable to do replication with Purity REST ' - 'API version %(api_version)s, requires one of ' - '%(required_versions)s.') % { - 'api_version': api_version, - 'required_versions': REPLICATION_REQUIRED_API_VERSIONS - } - raise exception.PureDriverException(reason=msg) - target_array_info = target_array.get() - target_array.array_name = target_array_info["array_name"] - target_array.array_id = target_array_info["id"] - LOG.debug("secondary array name: %s", target_array.array_name) - LOG.debug("secondary array id: %s", target_array.array_id) - self._replication_target_arrays.append(target_array) - self._setup_replicated_pgroups(primary_array, - self._replication_target_arrays, - self._replication_pg_name, - self._replication_interval, - retention_policy) - - def do_setup(self, context): - """Performs driver initialization steps that could raise exceptions.""" - if purestorage is None: - msg = _("Missing 'purestorage' python module, ensure the library" - " is installed and available.") - raise exception.PureDriverException(msg) - - # Raises PureDriverException if unable to connect and PureHTTPError - # if unable to authenticate. - purestorage.FlashArray.supported_rest_versions = \ - self.SUPPORTED_REST_API_VERSIONS - self._array = self._get_flasharray( - self.configuration.san_ip, - api_token=self.configuration.pure_api_token, - verify_https=self.configuration.driver_ssl_cert_verify, - ssl_cert_path=self.configuration.driver_ssl_cert_path - ) - self._array._backend_id = self._backend_name - LOG.debug("Primary array backend_id: %s", - self.configuration.config_group) - LOG.debug("Primary array name: %s", self._array.array_name) - LOG.debug("Primary array id: %s", self._array.array_id) - - self.do_setup_replication() - - # If we have failed over at some point we need to adjust our current - # array based on the one that we have failed over to - if (self._active_backend_id is not None and - self._active_backend_id != self._array._backend_id): - for array in self._replication_target_arrays: - if array._backend_id == self._active_backend_id: - self._failed_over_primary_array = self._array - self._array = array - break - - def do_setup_replication(self): - replication_devices = self.configuration.safe_get( - 'replication_device') - if replication_devices: - self.parse_replication_configs() - self._is_replication_enabled = True - - def check_for_setup_error(self): - # Avoid inheriting check_for_setup_error from SanDriver, which checks - # for san_password or san_private_key, not relevant to our driver. - pass - - @pure_driver_debug_trace - def create_volume(self, volume): - """Creates a volume.""" - vol_name = self._get_vol_name(volume) - vol_size = volume["size"] * units.Gi - current_array = self._get_current_array() - current_array.create_volume(vol_name, vol_size) - - self._add_to_group_if_needed(volume, vol_name) - self._enable_replication_if_needed(current_array, volume) - - @pure_driver_debug_trace - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - vol_name = self._get_vol_name(volume) - if snapshot['group_snapshot'] or snapshot['cgsnapshot']: - snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot) - else: - snap_name = self._get_snap_name(snapshot) - - if not snap_name: - msg = _('Unable to determine snapshot name in Purity for snapshot ' - '%(id)s.') % {'id': snapshot['id']} - raise exception.PureDriverException(reason=msg) - - current_array = self._get_current_array() - - current_array.copy_volume(snap_name, vol_name) - self._extend_if_needed(current_array, - vol_name, - snapshot["volume_size"], - volume["size"]) - - self._add_to_group_if_needed(volume, vol_name) - self._enable_replication_if_needed(current_array, volume) - - def _enable_replication_if_needed(self, array, volume): - if self._is_volume_replicated_type(volume): - self._enable_replication(array, volume) - - def _enable_replication(self, array, volume): - """Add volume to replicated protection group.""" - try: - array.set_pgroup(self._replication_pg_name, - addvollist=[self._get_vol_name(volume)]) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_ALREADY_BELONGS in err.text): - # Happens if the volume already added to PG. - ctxt.reraise = False - LOG.warning("Adding Volume to Protection Group " - "failed with message: %s", err.text) - - @pure_driver_debug_trace - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - vol_name = self._get_vol_name(volume) - src_name = self._get_vol_name(src_vref) - - # Check which backend the source volume is on. In case of failover - # the source volume may be on the secondary array. - current_array = self._get_current_array() - current_array.copy_volume(src_name, vol_name) - self._extend_if_needed(current_array, - vol_name, - src_vref["size"], - volume["size"]) - - self._add_to_group_if_needed(volume, vol_name) - self._enable_replication_if_needed(current_array, volume) - - def _extend_if_needed(self, array, vol_name, src_size, vol_size): - """Extend the volume from size src_size to size vol_size.""" - if vol_size > src_size: - vol_size = vol_size * units.Gi - array.extend_volume(vol_name, vol_size) - - @pure_driver_debug_trace - def delete_volume(self, volume): - """Disconnect all hosts and delete the volume""" - vol_name = self._get_vol_name(volume) - current_array = self._get_current_array() - try: - connected_hosts = current_array.list_volume_private_connections( - vol_name) - for host_info in connected_hosts: - host_name = host_info["host"] - self._disconnect_host(current_array, host_name, vol_name) - current_array.destroy_volume(vol_name) - if self.configuration.pure_eradicate_on_delete: - current_array.eradicate_volume(vol_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_NOT_EXIST in err.text): - # Happens if the volume does not exist. - ctxt.reraise = False - LOG.warning("Volume deletion failed with message: %s", - err.text) - - @pure_driver_debug_trace - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - - # Get current array in case we have failed over via replication. - current_array = self._get_current_array() - vol_name, snap_suff = self._get_snap_name(snapshot).split(".") - current_array.create_snapshot(vol_name, suffix=snap_suff) - - @pure_driver_debug_trace - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - - # Get current array in case we have failed over via replication. - current_array = self._get_current_array() - - snap_name = self._get_snap_name(snapshot) - try: - current_array.destroy_volume(snap_name) - if self.configuration.pure_eradicate_on_delete: - current_array.eradicate_volume(snap_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ( - ERR_MSG_NOT_EXIST in err.text or - ERR_MSG_NO_SUCH_SNAPSHOT in err.text or - ERR_MSG_PENDING_ERADICATION in err.text): - # Happens if the snapshot does not exist. - ctxt.reraise = False - LOG.warning("Unable to delete snapshot, assuming " - "already deleted. Error: %s", err.text) - - def ensure_export(self, context, volume): - pass - - def create_export(self, context, volume, connector): - pass - - def initialize_connection(self, volume, connector): - """Connect the volume to the specified initiator in Purity. - - This implementation is specific to the host type (iSCSI, FC, etc). - """ - raise NotImplementedError - - def _get_host(self, array, connector): - """Get a Purity Host that corresponds to the host in the connector. - - This implementation is specific to the host type (iSCSI, FC, etc). - """ - raise NotImplementedError - - def _disconnect(self, array, volume, connector, **kwargs): - """Disconnect the volume from the host described by the connector. - - If no connector is specified it will remove *all* attachments for - the volume. - - Returns True if it was the hosts last connection. - """ - vol_name = self._get_vol_name(volume) - if connector is None: - # If no connector was provided it is a force-detach, remove all - # host connections for the volume - LOG.warning("Removing ALL host connections for volume %s", - vol_name) - connections = array.list_volume_private_connections(vol_name) - for connection in connections: - self._disconnect_host(array, connection['host'], vol_name) - return False - else: - # Normal case with a specific initiator to detach it from - host = self._get_host(array, connector) - if host: - host_name = host["name"] - return self._disconnect_host(array, host_name, vol_name) - else: - LOG.error("Unable to disconnect host from volume, could not " - "determine Purity host") - return False - - @pure_driver_debug_trace - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection.""" - # Get current array in case we have failed over via replication. - current_array = self._get_current_array() - self._disconnect(current_array, volume, connector, **kwargs) - - @pure_driver_debug_trace - def _disconnect_host(self, array, host_name, vol_name): - """Return value indicates if host should be cleaned up.""" - try: - array.disconnect_host(host_name, vol_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ERR_MSG_NOT_CONNECTED in err.text: - # Happens if the host and volume are not connected. - ctxt.reraise = False - LOG.error("Disconnection failed with message: " - "%(msg)s.", {"msg": err.text}) - connections = None - try: - connections = array.list_host_connections(host_name, private=True) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ERR_MSG_NOT_EXIST in err.text: - ctxt.reraise = False - - # Assume still used if volumes are attached - host_still_used = bool(connections) - - if GENERATED_NAME.match(host_name) and not host_still_used: - LOG.info("Attempting to delete unneeded host %(host_name)r.", - {"host_name": host_name}) - try: - array.delete_host(host_name) - host_still_used = False - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400: - if ERR_MSG_NOT_EXIST in err.text: - # Happens if the host is already deleted. - # This is fine though, just log so we know what - # happened. - ctxt.reraise = False - host_still_used = False - LOG.debug("Purity host deletion failed: " - "%(msg)s.", {"msg": err.text}) - if ERR_MSG_EXISTING_CONNECTIONS in err.text: - # If someone added a connection underneath us - # that's ok, just keep going. - ctxt.reraise = False - host_still_used = True - LOG.debug("Purity host deletion ignored: %(msg)s", - {"msg": err.text}) - return not host_still_used - - @pure_driver_debug_trace - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service. - - If 'refresh' is True, run the update first. - """ - - if refresh: - LOG.debug("Updating volume stats.") - self._update_stats() - return self._stats - - def _update_stats(self): - """Set self._stats with relevant information.""" - current_array = self._get_current_array() - - # Collect info from the array - space_info = current_array.get(space=True) - perf_info = current_array.get(action='monitor')[0] # Always index 0 - hosts = current_array.list_hosts() - snaps = current_array.list_volumes(snap=True, pending=True) - pgroups = current_array.list_pgroups(pending=True) - - # Perform some translations and calculations - total_capacity = float(space_info["capacity"]) / units.Gi - used_space = float(space_info["total"]) / units.Gi - free_space = float(total_capacity - used_space) - prov_space, total_vols = self._get_provisioned_space() - total_hosts = len(hosts) - total_snaps = len(snaps) - total_pgroups = len(pgroups) - provisioned_space = float(prov_space) / units.Gi - thin_provisioning = self._get_thin_provisioning(provisioned_space, - used_space) - - # Start with some required info - data = dict( - volume_backend_name=self._backend_name, - vendor_name='Pure Storage', - driver_version=self.VERSION, - storage_protocol=self._storage_protocol, - ) - - # Add flags for supported features - data['consistencygroup_support'] = True - data['thin_provisioning_support'] = True - data['multiattach'] = False - data['QoS_support'] = False - - # Add capacity info for scheduler - data['total_capacity_gb'] = total_capacity - data['free_capacity_gb'] = free_space - data['reserved_percentage'] = self.configuration.reserved_percentage - data['provisioned_capacity'] = provisioned_space - data['max_over_subscription_ratio'] = thin_provisioning - - # Add the filtering/goodness functions - data['filter_function'] = self.get_filter_function() - data['goodness_function'] = self.get_goodness_function() - - # Add array metadata counts for filtering and weighing functions - data['total_volumes'] = total_vols - data['total_snapshots'] = total_snaps - data['total_hosts'] = total_hosts - data['total_pgroups'] = total_pgroups - - # Add performance stats for filtering and weighing functions - # IOPS - data['writes_per_sec'] = perf_info['writes_per_sec'] - data['reads_per_sec'] = perf_info['reads_per_sec'] - - # Bandwidth - data['input_per_sec'] = perf_info['input_per_sec'] - data['output_per_sec'] = perf_info['output_per_sec'] - - # Latency - data['usec_per_read_op'] = perf_info['usec_per_read_op'] - data['usec_per_write_op'] = perf_info['usec_per_write_op'] - data['queue_depth'] = perf_info['queue_depth'] - - # Replication - data["replication_enabled"] = self._is_replication_enabled - data["replication_type"] = ["async"] - data["replication_count"] = len(self._replication_target_arrays) - data["replication_targets"] = [array._backend_id for array - in self._replication_target_arrays] - self._stats = data - - def _get_provisioned_space(self): - """Sum up provisioned size of all volumes on array""" - volumes = self._get_current_array().list_volumes(pending=True) - return sum(item["size"] for item in volumes), len(volumes) - - def _get_thin_provisioning(self, provisioned_space, used_space): - """Get the current value for the thin provisioning ratio. - - If pure_automatic_max_oversubscription_ratio is True we will calculate - a value, if not we will respect the configuration option for the - max_over_subscription_ratio. - """ - if (self.configuration.pure_automatic_max_oversubscription_ratio and - used_space != 0 and provisioned_space != 0): - # If array is empty we can not calculate a max oversubscription - # ratio. In this case we look to the config option as a starting - # point. Once some volumes are actually created and some data is - # stored on the array a much more accurate number will be - # presented based on current usage. - thin_provisioning = provisioned_space / used_space - else: - thin_provisioning = self.configuration.max_over_subscription_ratio - - return thin_provisioning - - @pure_driver_debug_trace - def extend_volume(self, volume, new_size): - """Extend volume to new_size.""" - - # Get current array in case we have failed over via replication. - current_array = self._get_current_array() - - vol_name = self._get_vol_name(volume) - new_size = new_size * units.Gi - current_array.extend_volume(vol_name, new_size) - - def _add_volume_to_consistency_group(self, group_id, vol_name): - pgroup_name = self._get_pgroup_name_from_id(group_id) - current_array = self._get_current_array() - current_array.set_pgroup(pgroup_name, addvollist=[vol_name]) - - @pure_driver_debug_trace - def create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - - current_array = self._get_current_array() - current_array.create_pgroup(self._get_pgroup_name_from_id(group.id)) - - model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - return model_update - - def _create_cg_from_cgsnap(self, volumes, snapshots): - """Creates a new consistency group from a cgsnapshot. - - The new volumes will be consistent with the snapshot. - """ - for volume, snapshot in zip(volumes, snapshots): - self.create_volume_from_snapshot(volume, snapshot) - - def _create_cg_from_cg(self, group, source_group, volumes, source_vols): - """Creates a new consistency group from an existing cg. - - The new volumes will be in a consistent state, but this requires - taking a new temporary group snapshot and cloning from that. - """ - pgroup_name = self._get_pgroup_name_from_id(source_group.id) - tmp_suffix = '%s-tmp' % uuid.uuid4() - tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % { - 'pgroup_name': pgroup_name, - 'pgsnap_suffix': tmp_suffix, - } - LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s ' - 'while cloning Consistency Group %(source_group)s.', - {'snap_name': tmp_pgsnap_name, - 'source_group': source_group.id}) - current_array = self._get_current_array() - current_array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix) - try: - for source_vol, cloned_vol in zip(source_vols, volumes): - source_snap_name = self._get_pgroup_vol_snap_name( - pgroup_name, - tmp_suffix, - self._get_vol_name(source_vol) - ) - cloned_vol_name = self._get_vol_name(cloned_vol) - current_array.copy_volume(source_snap_name, cloned_vol_name) - self._add_volume_to_consistency_group( - group.id, - cloned_vol_name - ) - finally: - self._delete_pgsnapshot(tmp_pgsnap_name) - - @pure_driver_debug_trace - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - self.create_consistencygroup(context, group) - if cgsnapshot and snapshots: - self._create_cg_from_cgsnap(volumes, - snapshots) - elif source_cg: - self._create_cg_from_cg(group, source_cg, volumes, source_vols) - - return None, None - - @pure_driver_debug_trace - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - - try: - pgroup_name = self._get_pgroup_name_from_id(group.id) - current_array = self._get_current_array() - current_array.destroy_pgroup(pgroup_name) - if self.configuration.pure_eradicate_on_delete: - current_array.eradicate_pgroup(pgroup_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - (ERR_MSG_PENDING_ERADICATION in err.text or - ERR_MSG_NOT_EXIST in err.text)): - # Treat these as a "success" case since we are trying - # to delete them anyway. - ctxt.reraise = False - LOG.warning("Unable to delete Protection Group: %s", - err.text) - - for volume in volumes: - self.delete_volume(volume) - - return None, None - - @pure_driver_debug_trace - def update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - - pgroup_name = self._get_pgroup_name_from_id(group.id) - if add_volumes: - addvollist = [self._get_vol_name(vol) for vol in add_volumes] - else: - addvollist = [] - - if remove_volumes: - remvollist = [self._get_vol_name(vol) for vol in remove_volumes] - else: - remvollist = [] - - current_array = self._get_current_array() - current_array.set_pgroup(pgroup_name, addvollist=addvollist, - remvollist=remvollist) - - return None, None, None - - @pure_driver_debug_trace - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - - cg_id = self._get_group_id_from_snap(cgsnapshot) - pgroup_name = self._get_pgroup_name_from_id(cg_id) - pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot) - current_array = self._get_current_array() - current_array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix) - - return None, None - - def _delete_pgsnapshot(self, pgsnap_name): - current_array = self._get_current_array() - try: - # FlashArray.destroy_pgroup is also used for deleting - # pgroup snapshots. The underlying REST API is identical. - current_array.destroy_pgroup(pgsnap_name) - if self.configuration.pure_eradicate_on_delete: - current_array.eradicate_pgroup(pgsnap_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - (ERR_MSG_PENDING_ERADICATION in err.text or - ERR_MSG_NOT_EXIST in err.text)): - # Treat these as a "success" case since we are trying - # to delete them anyway. - ctxt.reraise = False - LOG.warning("Unable to delete Protection Group " - "Snapshot: %s", err.text) - - @pure_driver_debug_trace - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - - pgsnap_name = self._get_pgroup_snap_name(cgsnapshot) - self._delete_pgsnapshot(pgsnap_name) - - return None, None - - def _validate_manage_existing_ref(self, existing_ref, is_snap=False): - """Ensure that an existing_ref is valid and return volume info - - If the ref is not valid throw a ManageExistingInvalidReference - exception with an appropriate error. - - Will return volume or snapshot information from the array for - the object specified by existing_ref. - """ - if "name" not in existing_ref or not existing_ref["name"]: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("manage_existing requires a 'name'" - " key to identify an existing volume.")) - - if is_snap: - # Purity snapshot names are prefixed with the source volume name. - ref_vol_name, ref_snap_suffix = existing_ref['name'].split('.') - else: - ref_vol_name = existing_ref['name'] - - current_array = self._get_current_array() - try: - volume_info = current_array.get_volume(ref_vol_name, snap=is_snap) - if volume_info: - if is_snap: - for snap in volume_info: - if snap['name'] == existing_ref['name']: - return snap - else: - return volume_info - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_NOT_EXIST in err.text): - ctxt.reraise = False - - # If volume information was unable to be retrieved we need - # to throw a Invalid Reference exception. - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("Unable to find Purity ref with name=%s") % ref_vol_name) - - def _add_to_group_if_needed(self, volume, vol_name): - if volume['group_id']: - # If the query blows up just let it raise up the stack, the volume - # should be put into an error state - group = volume_utils.group_get_by_id(volume['group_id']) - if volume_utils.is_group_a_cg_snapshot_type(group): - self._add_volume_to_consistency_group( - volume['group_id'], - vol_name - ) - elif volume['consistencygroup_id']: - self._add_volume_to_consistency_group( - volume['consistencygroup_id'], - vol_name - ) - - def create_group(self, ctxt, group): - """Creates a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be created. - :returns: model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.create_consistencygroup(ctxt, group) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def delete_group(self, ctxt, group, volumes): - """Deletes a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be deleted. - :param volumes: a list of Volume objects in the group. - :returns: model_update, volumes_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.delete_consistencygroup(ctxt, group, volumes) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def update_group(self, ctxt, group, - add_volumes=None, remove_volumes=None): - """Updates a group. - - :param ctxt: the context of the caller. - :param group: the Group object of the group to be updated. - :param add_volumes: a list of Volume objects to be added. - :param remove_volumes: a list of Volume objects to be removed. - :returns: model_update, add_volumes_update, remove_volumes_update - """ - - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.update_consistencygroup(ctxt, - group, - add_volumes, - remove_volumes) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param ctxt: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of Volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group): - return self.create_consistencygroup_from_src(ctxt, - group, - volumes, - group_snapshot, - snapshots, - source_group, - source_vols) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def create_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Creates a group_snapshot. - - :param ctxt: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be created. - :param snapshots: a list of Snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.create_cgsnapshot(ctxt, group_snapshot, snapshots) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): - """Deletes a group_snapshot. - - :param ctxt: the context of the caller. - :param group_snapshot: the GroupSnapshot object to be deleted. - :param snapshots: a list of snapshot objects in the group_snapshot. - :returns: model_update, snapshots_model_update - """ - if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self.delete_cgsnapshot(ctxt, group_snapshot, snapshots) - - # If it wasn't a consistency group request ignore it and we'll rely on - # the generic group implementation. - raise NotImplementedError() - - @pure_driver_debug_trace - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object under Cinder management. - - We expect a volume name in the existing_ref that matches one in Purity. - """ - - self._validate_manage_existing_ref(existing_ref) - - ref_vol_name = existing_ref['name'] - current_array = self._get_current_array() - connected_hosts = \ - current_array.list_volume_private_connections(ref_vol_name) - if len(connected_hosts) > 0: - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=_("%(driver)s manage_existing cannot manage a volume " - "connected to hosts. Please disconnect this volume " - "from existing hosts before importing" - ) % {'driver': self.__class__.__name__}) - new_vol_name = self._get_vol_name(volume) - LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", - {"ref_name": ref_vol_name, "new_name": new_vol_name}) - self._rename_volume_object(ref_vol_name, - new_vol_name, - raise_not_exist=True) - return None - - @pure_driver_debug_trace - def manage_existing_get_size(self, volume, existing_ref): - """Return size of volume to be managed by manage_existing. - - We expect a volume name in the existing_ref that matches one in Purity. - """ - - volume_info = self._validate_manage_existing_ref(existing_ref) - size = self._round_bytes_to_gib(volume_info['size']) - - return size - - def _rename_volume_object(self, old_name, new_name, raise_not_exist=False): - """Rename a volume object (could be snapshot) in Purity. - - This will not raise an exception if the object does not exist - """ - current_array = self._get_current_array() - try: - current_array.rename_volume(old_name, new_name) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_NOT_EXIST in err.text): - ctxt.reraise = raise_not_exist - LOG.warning("Unable to rename %(old_name)s, error " - "message: %(error)s", - {"old_name": old_name, "error": err.text}) - return new_name - - @pure_driver_debug_trace - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - The volume will be renamed with "-unmanaged" as a suffix - """ - - vol_name = self._get_vol_name(volume) - unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX - LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", - {"ref_name": vol_name, "new_name": unmanaged_vol_name}) - self._rename_volume_object(vol_name, unmanaged_vol_name) - - def _verify_manage_snap_api_requirements(self): - current_array = self._get_current_array() - api_version = current_array.get_rest_version() - if api_version not in MANAGE_SNAP_REQUIRED_API_VERSIONS: - msg = _('Unable to do manage snapshot operations with Purity REST ' - 'API version %(api_version)s, requires ' - '%(required_versions)s.') % { - 'api_version': api_version, - 'required_versions': MANAGE_SNAP_REQUIRED_API_VERSIONS - } - raise exception.PureDriverException(reason=msg) - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Brings an existing backend storage object under Cinder management. - - We expect a snapshot name in the existing_ref that matches one in - Purity. - """ - self._verify_manage_snap_api_requirements() - self._validate_manage_existing_ref(existing_ref, is_snap=True) - ref_snap_name = existing_ref['name'] - new_snap_name = self._get_snap_name(snapshot) - LOG.info("Renaming existing snapshot %(ref_name)s to " - "%(new_name)s", {"ref_name": ref_snap_name, - "new_name": new_snap_name}) - self._rename_volume_object(ref_snap_name, - new_snap_name, - raise_not_exist=True) - return None - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of snapshot to be managed by manage_existing. - - We expect a snapshot name in the existing_ref that matches one in - Purity. - """ - self._verify_manage_snap_api_requirements() - snap_info = self._validate_manage_existing_ref(existing_ref, - is_snap=True) - size = self._round_bytes_to_gib(snap_info['size']) - return size - - def unmanage_snapshot(self, snapshot): - """Removes the specified snapshot from Cinder management. - - Does not delete the underlying backend storage object. - - We expect a snapshot name in the existing_ref that matches one in - Purity. - """ - self._verify_manage_snap_api_requirements() - snap_name = self._get_snap_name(snapshot) - unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX - LOG.info("Renaming existing snapshot %(ref_name)s to " - "%(new_name)s", {"ref_name": snap_name, - "new_name": unmanaged_snap_name}) - self._rename_volume_object(snap_name, unmanaged_snap_name) - - def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, - sort_keys, sort_dirs): - """List volumes on the backend available for management by Cinder. - - Rule out volumes that are attached to a Purity host or that - are already in the list of cinder_volumes. We return references - of the volume names for any others. - """ - array = self._get_current_array() - pure_vols = array.list_volumes() - hosts_with_connections = array.list_hosts(all=True) - - # Put together a map of volumes that are connected to hosts - connected_vols = {} - for host in hosts_with_connections: - vol = host.get('vol') - if vol: - connected_vols[vol] = host['name'] - - # Put together a map of existing cinder volumes on the array - # so we can lookup cinder id's by purity volume names - existing_vols = {} - for cinder_vol in cinder_volumes: - existing_vols[self._get_vol_name(cinder_vol)] = cinder_vol.name_id - - manageable_vols = [] - for pure_vol in pure_vols: - vol_name = pure_vol['name'] - cinder_id = existing_vols.get(vol_name) - is_safe = True - reason_not_safe = None - host = connected_vols.get(vol_name) - - if host: - is_safe = False - reason_not_safe = _('Volume connected to host %s.') % host - - if cinder_id: - is_safe = False - reason_not_safe = _('Volume already managed.') - - manageable_vols.append({ - 'reference': {'name': vol_name}, - 'size': self._round_bytes_to_gib(pure_vol['size']), - 'safe_to_manage': is_safe, - 'reason_not_safe': reason_not_safe, - 'cinder_id': cinder_id, - 'extra_info': None, - }) - - return volume_utils.paginate_entries_list( - manageable_vols, marker, limit, offset, sort_keys, sort_dirs) - - def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, - sort_keys, sort_dirs): - """List snapshots on the backend available for management by Cinder.""" - array = self._get_current_array() - pure_snapshots = array.list_volumes(snap=True) - - # Put together a map of existing cinder snapshots on the array - # so we can lookup cinder id's by purity snapshot names - existing_snapshots = {} - for cinder_snap in cinder_snapshots: - name = self._get_snap_name(cinder_snap) - existing_snapshots[name] = cinder_snap.id - - manageable_snaps = [] - for pure_snap in pure_snapshots: - snap_name = pure_snap['name'] - cinder_id = existing_snapshots.get(snap_name) - is_safe = True - reason_not_safe = None - - if cinder_id: - is_safe = False - reason_not_safe = _("Snapshot already managed.") - - manageable_snaps.append({ - 'reference': {'name': snap_name}, - 'size': self._round_bytes_to_gib(pure_snap['size']), - 'safe_to_manage': is_safe, - 'reason_not_safe': reason_not_safe, - 'cinder_id': cinder_id, - 'extra_info': None, - 'source_reference': {'name': pure_snap['source']}, - }) - - return volume_utils.paginate_entries_list( - manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) - - @staticmethod - def _round_bytes_to_gib(size): - return int(math.ceil(float(size) / units.Gi)) - - def _get_flasharray(self, san_ip, api_token, rest_version=None, - verify_https=None, ssl_cert_path=None): - - array = purestorage.FlashArray(san_ip, - api_token=api_token, - rest_version=rest_version, - verify_https=verify_https, - ssl_cert=ssl_cert_path, - user_agent=self._user_agent) - array_info = array.get() - array.array_name = array_info["array_name"] - array.array_id = array_info["id"] - LOG.debug("connected to %(array_name)s with REST API %(api_version)s", - {"array_name": array.array_name, - "api_version": array._rest_version}) - return array - - @staticmethod - def _client_version_greater_than(version): - module_version = [int(v) for v in purestorage.VERSION.split('.')] - for limit_version, actual_version in zip(version, module_version): - if actual_version > limit_version: - return True - return False - - @staticmethod - def _get_vol_name(volume): - """Return the name of the volume Purity will use.""" - return volume["name"] + "-cinder" - - @staticmethod - def _get_snap_name(snapshot): - """Return the name of the snapshot that Purity will use.""" - return "%s-cinder.%s" % (snapshot["volume_name"], snapshot["name"]) - - @staticmethod - def _get_pgroup_name_from_id(id): - return "consisgroup-%s-cinder" % id - - @staticmethod - def _get_pgroup_snap_suffix(group_snapshot): - return "cgsnapshot-%s-cinder" % group_snapshot['id'] - - @staticmethod - def _get_group_id_from_snap(group_snap): - # We don't really care what kind of group it is, if we are calling - # this look for a group_id and fall back to using a consistencygroup_id - id = None - try: - id = group_snap['group_id'] - except AttributeError: - pass - if id is None: - try: - id = group_snap['consistencygroup_id'] - except AttributeError: - pass - return id - - @classmethod - def _get_pgroup_snap_name(cls, group_snapshot): - """Return the name of the pgroup snapshot that Purity will use""" - group_id = cls._get_group_id_from_snap(group_snapshot) - return "%s.%s" % (cls._get_pgroup_name_from_id(group_id), - cls._get_pgroup_snap_suffix(group_snapshot)) - - @staticmethod - def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name): - return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % { - 'pgroup_name': pg_name, - 'pgsnap_suffix': pgsnap_suffix, - 'volume_name': volume_name, - } - - def _get_pgroup_snap_name_from_snapshot(self, snapshot): - """Return the name of the snapshot that Purity will use.""" - - group_snap = None - if snapshot.group_snapshot: - group_snap = snapshot.group_snapshot - elif snapshot.cgsnapshot: - group_snap = snapshot.cgsnapshot - - pg_vol_snap_name = "%(group_snap)s.%(volume_name)s-cinder" % { - 'group_snap': self._get_pgroup_snap_name(group_snap), - 'volume_name': snapshot.volume_name - } - return pg_vol_snap_name - - @staticmethod - def _generate_purity_host_name(name): - """Return a valid Purity host name based on the name passed in.""" - if len(name) > 23: - name = name[0:23] - name = INVALID_CHARACTERS.sub("-", name) - name = name.lstrip("-") - return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex) - - @staticmethod - def _connect_host_to_vol(array, host_name, vol_name): - connection = None - try: - connection = array.connect_host(host_name, vol_name) - except purestorage.PureHTTPError as err: - if err.code == 400 and ERR_MSG_HOST_NOT_EXIST in err.text: - LOG.debug('Unable to attach volume to host: %s', err.text) - raise exception.PureRetryableException() - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_ALREADY_EXISTS in err.text): - # Happens if the volume is already connected to the host. - # Treat this as a success. - ctxt.reraise = False - LOG.debug("Volume connection already exists for Purity " - "host with message: %s", err.text) - - # Get the info for the existing connection. - connected_hosts = ( - array.list_volume_private_connections(vol_name)) - for host_info in connected_hosts: - if host_info["host"] == host_name: - connection = host_info - break - if not connection: - raise exception.PureDriverException( - reason=_("Unable to connect or find connection to host")) - - return connection - - def retype(self, context, volume, new_type, diff, host): - """Retype from one volume type to another on the same backend. - - For a Pure Array there is currently no differentiation between types - of volumes other than some being part of a protection group to be - replicated. - """ - previous_vol_replicated = self._is_volume_replicated_type(volume) - - new_vol_replicated = False - if new_type: - specs = new_type.get("extra_specs") - if specs and EXTRA_SPECS_REPL_ENABLED in specs: - replication_capability = specs[EXTRA_SPECS_REPL_ENABLED] - # Do not validate settings, ignore invalid. - new_vol_replicated = (replication_capability == " True") - - if previous_vol_replicated and not new_vol_replicated: - # Remove from protection group. - self._disable_replication(volume) - elif not previous_vol_replicated and new_vol_replicated: - # Add to protection group. - self._enable_replication(self._get_current_array(), volume) - - return True, None - - @pure_driver_debug_trace - def _disable_replication(self, volume): - """Disable replication on the given volume.""" - - current_array = self._get_current_array() - LOG.debug("Disabling replication for volume %(id)s residing on " - "array %(backend_id)s.", - {"id": volume["id"], - "backend_id": current_array._backend_id}) - try: - current_array.set_pgroup(self._replication_pg_name, - remvollist=([self._get_vol_name(volume)])) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_COULD_NOT_BE_FOUND in err.text): - ctxt.reraise = False - LOG.warning("Disable replication on volume failed: " - "already disabled: %s", err.text) - else: - LOG.error("Disable replication on volume failed with " - "message: %s", err.text) - - @pure_driver_debug_trace - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover backend to a secondary array - - This action will not affect the original volumes in any - way and it will stay as is. If a subsequent failover is performed we - will simply overwrite the original (now unmanaged) volumes. - """ - - if secondary_id == 'default': - # We are going back to the 'original' driver config, just put - # our current array back to the primary. - if self._failed_over_primary_array: - self._set_current_array(self._failed_over_primary_array) - return secondary_id, [], [] - else: - msg = _('Unable to failback to "default", this can only be ' - 'done after a failover has completed.') - raise exception.InvalidReplicationTarget(message=msg) - - current_array = self._get_current_array() - LOG.debug("Failover replication for array %(primary)s to " - "%(secondary)s.", - {"primary": current_array._backend_id, - "secondary": secondary_id}) - - if secondary_id == current_array._backend_id: - raise exception.InvalidReplicationTarget( - reason=_("Secondary id can not be the same as primary array, " - "backend_id = %(secondary)s.") % - {"secondary": secondary_id} - ) - - secondary_array, pg_snap = self._find_failover_target(secondary_id) - LOG.debug("Starting failover from %(primary)s to %(secondary)s", - {"primary": current_array.array_name, - "secondary": secondary_array.array_name}) - - # NOTE(patrickeast): This currently requires a call with REST API 1.3. - # If we need to, create a temporary FlashArray for this operation. - api_version = secondary_array.get_rest_version() - LOG.debug("Current REST API for array id %(id)s is %(api_version)s", - {"id": secondary_array.array_id, "api_version": api_version}) - if api_version != '1.3': - target_array = self._get_flasharray( - secondary_array._target, - api_token=secondary_array._api_token, - rest_version='1.3', - verify_https=secondary_array._verify_https, - ssl_cert_path=secondary_array._ssl_cert - ) - else: - target_array = secondary_array - - volume_snaps = target_array.get_volume(pg_snap['name'], - snap=True, - pgroup=True) - - # We only care about volumes that are in the list we are given. - vol_names = set() - for vol in volumes: - vol_names.add(self._get_vol_name(vol)) - - for snap in volume_snaps: - vol_name = snap['name'].split('.')[-1] - if vol_name in vol_names: - vol_names.remove(vol_name) - LOG.debug('Creating volume %(vol)s from replicated snapshot ' - '%(snap)s', {'vol': vol_name, 'snap': snap['name']}) - secondary_array.copy_volume(snap['name'], - vol_name, - overwrite=True) - else: - LOG.debug('Ignoring unmanaged volume %(vol)s from replicated ' - 'snapshot %(snap)s.', {'vol': vol_name, - 'snap': snap['name']}) - # The only volumes remaining in the vol_names set have been left behind - # on the array and should be considered as being in an error state. - model_updates = [] - for vol in volumes: - if self._get_vol_name(vol) in vol_names: - model_updates.append({ - 'volume_id': vol['id'], - 'updates': { - 'status': 'error', - } - }) - - # After failover we want our current array to be swapped for the - # secondary array we just failed over to. - self._failed_over_primary_array = self._get_current_array() - self._set_current_array(secondary_array) - return secondary_array._backend_id, model_updates, [] - - def _does_pgroup_exist(self, array, pgroup_name): - """Return True/False""" - try: - array.get_pgroup(pgroup_name) - return True - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ERR_MSG_NOT_EXIST in err.text: - ctxt.reraise = False - return False - # Any unexpected exception to be handled by caller. - - @pure_driver_debug_trace - @utils.retry(exception.PureDriverException, - REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, - REPL_SETTINGS_PROPAGATE_MAX_RETRIES) - def _wait_until_target_group_setting_propagates( - self, target_array, pgroup_name_on_target): - # Wait for pgroup to show up on target array. - if self._does_pgroup_exist(target_array, pgroup_name_on_target): - return - else: - raise exception.PureDriverException(message= - _('Protection Group not ' - 'ready.')) - - @pure_driver_debug_trace - @utils.retry(exception.PureDriverException, - REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, - REPL_SETTINGS_PROPAGATE_MAX_RETRIES) - def _wait_until_source_array_allowed(self, source_array, pgroup_name): - result = source_array.get_pgroup(pgroup_name) - if result["targets"][0]["allowed"]: - return - else: - raise exception.PureDriverException(message=_('Replication not ' - 'allowed yet.')) - - def _get_pgroup_name_on_target(self, source_array_name, pgroup_name): - return "%s:%s" % (source_array_name, pgroup_name) - - @pure_driver_debug_trace - def _setup_replicated_pgroups(self, primary, secondaries, pg_name, - replication_interval, retention_policy): - self._create_protection_group_if_not_exist( - primary, pg_name) - - # Apply retention policies to a protection group. - # These retention policies will be applied on the replicated - # snapshots on the target array. - primary.set_pgroup(pg_name, **retention_policy) - - # Configure replication propagation frequency on a - # protection group. - primary.set_pgroup(pg_name, - replicate_frequency=replication_interval) - for target_array in secondaries: - try: - # Configure PG to replicate to target_array. - primary.set_pgroup(pg_name, - addtargetlist=[target_array.array_name]) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ( - ERR_MSG_ALREADY_INCLUDES - in err.text): - ctxt.reraise = False - LOG.info("Skipping add target %(target_array)s" - " to protection group %(pgname)s" - " since it's already added.", - {"target_array": target_array.array_name, - "pgname": pg_name}) - - # Wait until "Target Group" setting propagates to target_array. - pgroup_name_on_target = self._get_pgroup_name_on_target( - primary.array_name, pg_name) - - for target_array in secondaries: - self._wait_until_target_group_setting_propagates( - target_array, - pgroup_name_on_target) - try: - # Configure the target_array to allow replication from the - # PG on source_array. - target_array.set_pgroup(pgroup_name_on_target, - allowed=True) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if (err.code == 400 and - ERR_MSG_ALREADY_ALLOWED in err.text): - ctxt.reraise = False - LOG.info("Skipping allow pgroup %(pgname)s on " - "target array %(target_array)s since " - "it is already allowed.", - {"pgname": pg_name, - "target_array": target_array.array_name}) - - # Wait until source array acknowledges previous operation. - self._wait_until_source_array_allowed(primary, pg_name) - # Start replication on the PG. - primary.set_pgroup(pg_name, replicate_enabled=True) - - @pure_driver_debug_trace - def _generate_replication_retention(self): - """Generates replication retention settings in Purity compatible format - - An example of the settings: - target_all_for = 14400 (i.e. 4 hours) - target_per_day = 6 - target_days = 4 - The settings above configure the target array to retain 4 hours of - the most recent snapshots. - After the most recent 4 hours, the target will choose 4 snapshots - per day from the previous 6 days for retention - - :return: a dictionary representing replication retention settings - """ - replication_retention = dict( - target_all_for=self._replication_retention_short_term, - target_per_day=self._replication_retention_long_term_per_day, - target_days=self._replication_retention_long_term - ) - return replication_retention - - @pure_driver_debug_trace - def _get_latest_replicated_pg_snap(self, - target_array, - source_array_name, - pgroup_name): - # Get all protection group snapshots. - snap_name = "%s:%s" % (source_array_name, pgroup_name) - LOG.debug("Looking for snap %(snap)s on array id %(array_id)s", - {"snap": snap_name, "array_id": target_array.array_id}) - pg_snaps = target_array.get_pgroup(snap_name, snap=True, transfer=True) - LOG.debug("Retrieved snapshots on target %(pg_snaps)s", - {"pg_snaps": pg_snaps}) - - # Only use snapshots that are replicated completely. - pg_snaps_filtered = [s for s in pg_snaps if s["progress"] == 1] - LOG.debug("Filtered list of snapshots %(pg_snaps_filtered)s", - {"pg_snaps_filtered": pg_snaps_filtered}) - - # Go through the protection group snapshots, latest first .... - # stop when we find required volume snapshot. - pg_snaps_filtered.sort(key=lambda x: x["created"], reverse=True) - LOG.debug("Sorted list of snapshots %(pg_snaps_filtered)s", - {"pg_snaps_filtered": pg_snaps_filtered}) - - pg_snap = pg_snaps_filtered[0] if pg_snaps_filtered else None - LOG.debug("Selecting snapshot %(pg_snap)s for failover.", - {"pg_snap": pg_snap}) - - return pg_snap - - @pure_driver_debug_trace - def _create_protection_group_if_not_exist(self, source_array, pgname): - try: - source_array.create_pgroup(pgname) - except purestorage.PureHTTPError as err: - with excutils.save_and_reraise_exception() as ctxt: - if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text: - # Happens if the PG already exists - ctxt.reraise = False - LOG.warning("Skipping creation of PG %s since it " - "already exists.", pgname) - # We assume PG has already been setup with correct - # replication settings. - return - if err.code == 400 and ( - ERR_MSG_PENDING_ERADICATION in err.text): - ctxt.reraise = False - LOG.warning("Protection group %s is deleted but not" - " eradicated - will recreate.", pgname) - source_array.eradicate_pgroup(pgname) - source_array.create_pgroup(pgname) - - def _is_volume_replicated_type(self, volume): - ctxt = context.get_admin_context() - replication_flag = False - if volume["volume_type_id"]: - volume_type = volume_types.get_volume_type( - ctxt, volume["volume_type_id"]) - - specs = volume_type.get("extra_specs") - if specs and EXTRA_SPECS_REPL_ENABLED in specs: - replication_capability = specs[EXTRA_SPECS_REPL_ENABLED] - # Do not validate settings, ignore invalid. - replication_flag = (replication_capability == " True") - return replication_flag - - def _find_failover_target(self, secondary): - if not self._replication_target_arrays: - raise exception.PureDriverException( - reason=_("Unable to find failover target, no " - "secondary targets configured.")) - secondary_array = None - pg_snap = None - if secondary: - for array in self._replication_target_arrays: - if array._backend_id == secondary: - secondary_array = array - break - - if not secondary_array: - raise exception.InvalidReplicationTarget( - reason=_("Unable to determine secondary_array from" - " supplied secondary: %(secondary)s.") % - {"secondary": secondary} - ) - pg_snap = self._get_latest_replicated_pg_snap( - secondary_array, - self._get_current_array().array_name, - self._replication_pg_name - ) - else: - LOG.debug('No secondary array id specified, checking all targets.') - for array in self._replication_target_arrays: - try: - secondary_array = array - pg_snap = self._get_latest_replicated_pg_snap( - secondary_array, - self._get_current_array().array_name, - self._replication_pg_name - ) - if pg_snap: - break - except Exception: - LOG.exception('Error finding replicated pg snapshot ' - 'on %(secondary)s.', - {'secondary': array._backend_id}) - - if not secondary_array: - raise exception.PureDriverException( - reason=_("Unable to find viable secondary array from" - "configured targets: %(targets)s.") % - {"targets": six.text_type(self._replication_target_arrays)} - ) - - if not pg_snap: - raise exception.PureDriverException( - reason=_("Unable to find viable pg snapshot to use for" - "failover on selected secondary array: %(id)s.") % - {"id": secondary_array._backend_id} - ) - - return secondary_array, pg_snap - - def _get_current_array(self): - return self._array - - def _set_current_array(self, array): - self._array = array - - -@interface.volumedriver -class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): - """OpenStack Volume Driver to support Pure Storage FlashArray. - - This version of the driver enables the use of iSCSI for - the underlying storage connectivity with the FlashArray. - """ - - VERSION = "6.0.0" - - def __init__(self, *args, **kwargs): - execute = kwargs.pop("execute", utils.execute) - super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs) - self._storage_protocol = "iSCSI" - - def _get_host(self, array, connector): - """Return dict describing existing Purity host object or None.""" - hosts = array.list_hosts() - for host in hosts: - if connector["initiator"] in host["iqn"]: - return host - return None - - @pure_driver_debug_trace - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - connection = self._connect(volume, connector) - target_ports = self._get_target_iscsi_ports() - multipath = connector.get("multipath", False) - - properties = self._build_connection_properties(connection, - target_ports, - multipath) - - if self.configuration.use_chap_auth: - properties["data"]["auth_method"] = "CHAP" - properties["data"]["auth_username"] = connection["auth_username"] - properties["data"]["auth_password"] = connection["auth_password"] - - initiator_update = connection.get("initiator_update", False) - if initiator_update: - properties["initiator_update"] = initiator_update - - return properties - - def _build_connection_properties(self, connection, target_ports, - multipath): - props = { - "driver_volume_type": "iscsi", - "data": { - "target_discovered": False, - "discard": True, - }, - } - - port_iter = iter(target_ports) - - target_luns = [] - target_iqns = [] - target_portals = [] - - for port in port_iter: - target_luns.append(connection["lun"]) - target_iqns.append(port["iqn"]) - target_portals.append(port["portal"]) - - # If we have multiple ports always report them. - if target_luns and target_iqns and target_portals: - props["data"]["target_luns"] = target_luns - props["data"]["target_iqns"] = target_iqns - props["data"]["target_portals"] = target_portals - - return props - - def _get_target_iscsi_ports(self): - """Return list of iSCSI-enabled port descriptions.""" - current_array = self._get_current_array() - ports = current_array.list_ports() - iscsi_ports = [port for port in ports if port["iqn"]] - if not iscsi_ports: - raise exception.PureDriverException( - reason=_("No iSCSI-enabled ports on target array.")) - return iscsi_ports - - @staticmethod - def _generate_chap_secret(): - return volume_utils.generate_password() - - def _get_chap_secret_from_init_data(self, initiator): - data = self.driver_utils.get_driver_initiator_data(initiator) - if data: - for d in data: - if d["key"] == CHAP_SECRET_KEY: - return d["value"] - return None - - def _get_chap_credentials(self, host, initiator): - username = host - password = self._get_chap_secret_from_init_data(initiator) - if not password: - password = self._generate_chap_secret() - success = self.driver_utils.insert_driver_initiator_data( - initiator, CHAP_SECRET_KEY, password) - if not success: - # The only reason the save would have failed is if someone - # else (read: another thread/instance of the driver) set - # one before we did. In that case just do another query. - password = self._get_chap_secret_from_init_data(initiator) - - return username, password - - @utils.retry(exception.PureRetryableException, - retries=HOST_CREATE_MAX_RETRIES) - def _connect(self, volume, connector): - """Connect the host and volume; return dict describing connection.""" - iqn = connector["initiator"] - - if self.configuration.use_chap_auth: - (chap_username, chap_password) = \ - self._get_chap_credentials(connector['host'], iqn) - - current_array = self._get_current_array() - vol_name = self._get_vol_name(volume) - host = self._get_host(current_array, connector) - - if host: - host_name = host["name"] - LOG.info("Re-using existing purity host %(host_name)r", - {"host_name": host_name}) - if self.configuration.use_chap_auth: - if not GENERATED_NAME.match(host_name): - LOG.error("Purity host %(host_name)s is not managed " - "by Cinder and can't have CHAP credentials " - "modified. Remove IQN %(iqn)s from the host " - "to resolve this issue.", - {"host_name": host_name, - "iqn": connector["initiator"]}) - raise exception.PureDriverException( - reason=_("Unable to re-use a host that is not " - "managed by Cinder with use_chap_auth=True,")) - elif chap_username is None or chap_password is None: - LOG.error("Purity host %(host_name)s is managed by " - "Cinder but CHAP credentials could not be " - "retrieved from the Cinder database.", - {"host_name": host_name}) - raise exception.PureDriverException( - reason=_("Unable to re-use host with unknown CHAP " - "credentials configured.")) - else: - host_name = self._generate_purity_host_name(connector["host"]) - LOG.info("Creating host object %(host_name)r with IQN:" - " %(iqn)s.", {"host_name": host_name, "iqn": iqn}) - try: - current_array.create_host(host_name, iqnlist=[iqn]) - except purestorage.PureHTTPError as err: - if (err.code == 400 and - (ERR_MSG_ALREADY_EXISTS in err.text or - ERR_MSG_ALREADY_IN_USE in err.text)): - # If someone created it before we could just retry, we will - # pick up the new host. - LOG.debug('Unable to create host: %s', err.text) - raise exception.PureRetryableException() - - if self.configuration.use_chap_auth: - try: - current_array.set_host(host_name, - host_user=chap_username, - host_password=chap_password) - except purestorage.PureHTTPError as err: - if (err.code == 400 and - ERR_MSG_HOST_NOT_EXIST in err.text): - # If the host disappeared out from under us that's ok, - # we will just retry and snag a new host. - LOG.debug('Unable to set CHAP info: %s', err.text) - raise exception.PureRetryableException() - - connection = self._connect_host_to_vol(current_array, - host_name, - vol_name) - - if self.configuration.use_chap_auth: - connection["auth_username"] = chap_username - connection["auth_password"] = chap_password - - return connection - - -@interface.volumedriver -class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): - """OpenStack Volume Driver to support Pure Storage FlashArray. - - This version of the driver enables the use of Fibre Channel for - the underlying storage connectivity with the FlashArray. It fully - supports the Cinder Fibre Channel Zone Manager. - """ - - VERSION = "4.0.0" - - def __init__(self, *args, **kwargs): - execute = kwargs.pop("execute", utils.execute) - super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs) - self._storage_protocol = "FC" - self._lookup_service = fczm_utils.create_lookup_service() - - def _get_host(self, array, connector): - """Return dict describing existing Purity host object or None.""" - hosts = array.list_hosts() - for host in hosts: - for wwn in connector["wwpns"]: - if wwn.lower() in str(host["wwn"]).lower(): - return host - - @staticmethod - def _get_array_wwns(array): - """Return list of wwns from the array""" - ports = array.list_ports() - return [port["wwn"] for port in ports if port["wwn"]] - - @fczm_utils.add_fc_zone - @pure_driver_debug_trace - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - current_array = self._get_current_array() - connection = self._connect(volume, connector) - target_wwns = self._get_array_wwns(current_array) - init_targ_map = self._build_initiator_target_map(target_wwns, - connector) - properties = { - "driver_volume_type": "fibre_channel", - "data": { - 'target_discovered': True, - "target_lun": connection["lun"], - "target_wwn": target_wwns, - 'initiator_target_map': init_targ_map, - "discard": True, - } - } - - return properties - - @utils.retry(exception.PureRetryableException, - retries=HOST_CREATE_MAX_RETRIES) - def _connect(self, volume, connector): - """Connect the host and volume; return dict describing connection.""" - wwns = connector["wwpns"] - - current_array = self._get_current_array() - vol_name = self._get_vol_name(volume) - host = self._get_host(current_array, connector) - - if host: - host_name = host["name"] - LOG.info("Re-using existing purity host %(host_name)r", - {"host_name": host_name}) - else: - host_name = self._generate_purity_host_name(connector["host"]) - LOG.info("Creating host object %(host_name)r with WWN:" - " %(wwn)s.", {"host_name": host_name, "wwn": wwns}) - try: - current_array.create_host(host_name, wwnlist=wwns) - except purestorage.PureHTTPError as err: - if (err.code == 400 and - (ERR_MSG_ALREADY_EXISTS in err.text or - ERR_MSG_ALREADY_IN_USE in err.text)): - # If someone created it before we could just retry, we will - # pick up the new host. - LOG.debug('Unable to create host: %s', err.text) - raise exception.PureRetryableException() - - return self._connect_host_to_vol(current_array, host_name, vol_name) - - def _build_initiator_target_map(self, target_wwns, connector): - """Build the target_wwns and the initiator target map.""" - init_targ_map = {} - - if self._lookup_service: - # use FC san lookup to determine which NSPs to use - # for the new VLUN. - dev_map = self._lookup_service.get_device_mapping_from_network( - connector['wwpns'], - target_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list(set( - init_targ_map[initiator])) - else: - init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns) - - return init_targ_map - - @fczm_utils.remove_fc_zone - @pure_driver_debug_trace - def terminate_connection(self, volume, connector, **kwargs): - """Terminate connection.""" - current_array = self._get_current_array() - - no_more_connections = self._disconnect(current_array, volume, - connector, **kwargs) - - properties = {"driver_volume_type": "fibre_channel", "data": {}} - - if no_more_connections: - target_wwns = self._get_array_wwns(current_array) - init_targ_map = self._build_initiator_target_map(target_wwns, - connector) - properties["data"] = {"target_wwn": target_wwns, - "initiator_target_map": init_targ_map} - - return properties diff --git a/cinder/volume/drivers/qnap.py b/cinder/volume/drivers/qnap.py deleted file mode 100644 index 4edbb48e7..000000000 --- a/cinder/volume/drivers/qnap.py +++ /dev/null @@ -1,1571 +0,0 @@ -# Copyright (c) 2016 QNAP Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for QNAP Storage. -This driver supports QNAP Storage for iSCSI. -""" -import base64 -import eventlet -import functools -import re -import ssl -import time -try: - import xml.etree.cElementTree as ET -except ImportError: - import xml.etree.ElementTree as ET - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import units -import six -from six.moves import http_client -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume.drivers.san import san - -LOG = logging.getLogger(__name__) - -qnap_opts = [ - cfg.URIOpt('qnap_management_url', - help='The URL to management QNAP Storage'), - cfg.StrOpt('qnap_poolname', - help='The pool name in the QNAP Storage'), - cfg.StrOpt('qnap_storage_protocol', - default='iscsi', - help='Communication protocol to access QNAP storage'), -] - -CONF = cfg.CONF -CONF.register_opts(qnap_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class QnapISCSIDriver(san.SanISCSIDriver): - """OpenStack driver to enable QNAP Storage. - - Version history: - 1.0.0 - Initial driver (Only iSCSI) - """ - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "QNAP_CI" - - # TODO(smcginnis) Either remove this if CI requirement are met, or - # remove this driver in the Queens release per normal deprecation - SUPPORTED = False - - VERSION = '1.0.0' - - TIME_INTERVAL = 3 - - def __init__(self, *args, **kwargs): - """Initialize QnapISCSIDriver.""" - super(QnapISCSIDriver, self).__init__(*args, **kwargs) - self.api_executor = None - self.group_stats = {} - self.configuration.append_config_values(qnap_opts) - - def _check_config(self): - """Ensure that the flags we care about are set.""" - LOG.debug('in _check_config') - required_config = ['qnap_management_url', - 'san_login', - 'san_password', - 'qnap_poolname', - 'qnap_storage_protocol'] - - for attr in required_config: - if not getattr(self.configuration, attr, None): - raise exception.InvalidConfigurationValue( - reason=_('%s is not set.') % attr) - - def do_setup(self, context): - """Setup the QNAP Cinder volume driver.""" - self._check_config() - self.ctxt = context - LOG.debug('context: %s', context) - - # Setup API Executor - try: - self.api_executor = self.creat_api_executor() - except Exception: - LOG.error('Failed to create HTTP client. ' - 'Check ip, port, username, password' - ' and make sure the array version is compatible') - msg = _('Failed to create HTTP client.') - raise exception.VolumeDriverException(message=msg) - - def check_for_setup_error(self): - """Check the status of setup.""" - pass - - def creat_api_executor(self): - """Create api executor by nas model.""" - self.api_executor = QnapAPIExecutor( - username=self.configuration.san_login, - password=self.configuration.san_password, - management_url=self.configuration.qnap_management_url) - - nas_model_name, internal_model_name, fw_version = ( - self.api_executor.get_basic_info( - self.configuration.qnap_management_url)) - - pattern = re.compile(r"^([A-Z]+)-?[A-Z]{0,2}(\d+)\d{2}(U|[a-z]*)") - matches = pattern.match(nas_model_name) - - if not matches: - return None - model_type = matches.group(1) - - ts_model_types = [ - "TS", "SS", "IS", "TVS", "TDS", "TBS" - ] - tes_model_types = [ - "TES" - ] - es_model_types = [ - "ES" - ] - - if model_type in ts_model_types: - if (fw_version.startswith("4.2") or fw_version.startswith("4.3")): - LOG.debug('Create TS API Executor') - # modify the pool name to pool index - self.configuration.qnap_poolname = ( - self._get_ts_model_pool_id( - self.configuration.qnap_poolname)) - - return (QnapAPIExecutorTS( - username=self.configuration.san_login, - password=self.configuration.san_password, - management_url=self.configuration.qnap_management_url)) - elif model_type in tes_model_types: - if 'TS' in internal_model_name: - if (fw_version.startswith("4.2") or - fw_version.startswith("4.3")): - LOG.debug('Create TS API Executor') - # modify the pool name to poole index - self.configuration.qnap_poolname = ( - self._get_ts_model_pool_id( - self.configuration.qnap_poolname)) - return (QnapAPIExecutorTS( - username=self.configuration.san_login, - password=self.configuration.san_password, - management_url=self.configuration.qnap_management_url)) - - if (fw_version.startswith("1.1.2") or - fw_version.startswith("1.1.3")): - LOG.debug('Create TES API Executor') - return (QnapAPIExecutorTES( - username=self.configuration.san_login, - password=self.configuration.san_password, - management_url=self.configuration.qnap_management_url)) - elif model_type in es_model_types: - if (fw_version.startswith("1.1.2") or - fw_version.startswith("1.1.3")): - LOG.debug('Create ES API Executor') - return (QnapAPIExecutor( - username=self.configuration.san_login, - password=self.configuration.san_password, - management_url=self.configuration.qnap_management_url)) - - msg = _('Model not support') - raise exception.VolumeDriverException(message=msg) - - def _get_ts_model_pool_id(self, pool_name): - """Modify the pool name to poole index.""" - pattern = re.compile(r"^(\d+)+|^Storage Pool (\d+)+") - matches = pattern.match(pool_name) - LOG.debug('matches.group(1): %s', matches.group(1)) - LOG.debug('matches.group(2): %s', matches.group(2)) - if matches.group(1): - return matches.group(1) - else: - return matches.group(2) - - def _gen_random_name(self): - return "cinder-{0}".format(timeutils. - utcnow(). - strftime('%Y%m%d%H%M%S%f')) - - def _get_volume_metadata(self, volume): - volume_metadata = {} - if 'volume_metadata' in volume: - for metadata in volume['volume_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - return volume_metadata - - def _gen_lun_name(self): - create_lun_name = '' - while True: - create_lun_name = self._gen_random_name() - # If lunname with the name exists, need to change to - # a different name - created_lun = self.api_executor.get_lun_info( - LUNName=create_lun_name) - if created_lun is None: - break - return create_lun_name - - def create_volume(self, volume): - """Create a new volume.""" - start_time = time.time() - LOG.debug('in create_volume') - LOG.debug('volume: %s', volume.__dict__) - reserve = self.configuration.san_thin_provision - - # User could create two volume with the same name on horizon. - # Therefore, We should not use displayname to create lun on nas. - create_lun_name = self._gen_lun_name() - - create_lun_index = self.api_executor.create_lun( - volume, - self.configuration.qnap_poolname, - create_lun_name, - reserve) - - max_wait_sec = 600 - try_times = 0 - lun_naa = "" - while True: - created_lun = self.api_executor.get_lun_info( - LUNIndex=create_lun_index) - if created_lun.find('LUNNAA') is not None: - lun_naa = created_lun.find('LUNNAA').text - - try_times = try_times + 3 - eventlet.sleep(self.TIME_INTERVAL) - if(try_times > max_wait_sec or lun_naa is not None): - break - - LOG.debug('LUNNAA: %s', lun_naa) - _metadata = self._get_volume_metadata(volume) - _metadata['LUNNAA'] = lun_naa - _metadata['LunName'] = create_lun_name - - elapsed_time = time.time() - start_time - LOG.debug('create_volume elapsed_time: %s', elapsed_time) - - return {'metadata': _metadata} - - def delete_volume(self, volume): - """Delete the specified volume.""" - start_time = time.time() - LOG.debug('volume: %s', volume.__dict__) - lun_naa = self._get_lun_naa_from_volume_metadata(volume) - if lun_naa == '': - LOG.debug('Volume %s does not exist.', volume.id) - return - - del_lun = self.api_executor.get_lun_info(LUNNAA=lun_naa) - if del_lun is None: - LOG.debug('Volume %s does not exist.', lun_naa) - return - - lun_index = del_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - - # if lun is mapping at target, the delete action will fail - if del_lun.find('LUNStatus').text == '2': - target_index = (del_lun.find('LUNTargetList') - .find('row').find('targetIndex').text) - LOG.debug('target_index: %s', target_index) - self.api_executor.disable_lun(lun_index, target_index) - self.api_executor.unmap_lun(lun_index, target_index) - - is_lun_busy = False - while True: - is_lun_busy = self.api_executor.delete_lun(lun_index) - if not is_lun_busy: - break - - elapsed_time = time.time() - start_time - LOG.debug('delete_volume elapsed_time: %s', elapsed_time) - - def _get_lun_naa_from_volume_metadata(self, volume): - lun_naa = '' - for metadata in volume['volume_metadata']: - if metadata['key'] == 'LUNNAA': - lun_naa = metadata['value'] - break - return lun_naa - - def _extend_lun(self, volume, lun_naa): - LOG.debug('volume: %s', volume.__dict__) - if lun_naa == '': - lun_naa = self._get_lun_naa_from_volume_metadata(volume) - - LOG.debug('lun_naa: %s', lun_naa) - selected_lun = self.api_executor.get_lun_info( - LUNNAA=lun_naa) - lun_index = selected_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - lun_name = selected_lun.find('LUNName').text - LOG.debug('LUNName: %s', lun_name) - lun_thin_allocate = selected_lun.find('LUNThinAllocate').text - LOG.debug('LUNThinAllocate: %s', lun_thin_allocate) - lun_path = '' - if selected_lun.find('LUNPath') is not None: - lun_path = selected_lun.find('LUNPath').text - LOG.debug('LUNPath: %s', lun_path) - lun_status = selected_lun.find('LUNStatus').text - LOG.debug('LUNStatus: %s', lun_status) - - lun = {'LUNName': lun_name, - 'LUNCapacity': volume['size'], - 'LUNIndex': lun_index, - 'LUNThinAllocate': lun_thin_allocate, - 'LUNPath': lun_path, - 'LUNStatus': lun_status} - self.api_executor.edit_lun(lun) - - def _create_snapshot_name(self, lun_index): - create_snapshot_name = '' - while True: - # If snapshot with the name exists, need to change to - # a different name - create_snapshot_name = self._gen_random_name() - snapshot = self.api_executor.get_snapshot_info( - lun_index=lun_index, snapshot_name=create_snapshot_name) - if snapshot is None: - break - return create_snapshot_name - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume.""" - LOG.debug('Entering create_cloned_volume...') - LOG.debug('volume: %s', volume.__dict__) - LOG.debug('src_vref: %s', src_vref.__dict__) - LOG.debug('volume_metadata: %s', volume['volume_metadata']) - src_lun_naa = self._get_lun_naa_from_volume_metadata(src_vref) - # Below is to clone a volume from a snapshot in the snapshot manager - src_lun = self.api_executor.get_lun_info( - LUNNAA=src_lun_naa) - lun_index = src_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - - # User could create two snapshot with the same name on horizon. - # Therefore, we should not use displayname to create snapshot on nas. - create_snapshot_name = self._create_snapshot_name(lun_index) - - self.api_executor.create_snapshot_api(lun_index, create_snapshot_name) - created_snapshot = self.api_executor.get_snapshot_info( - lun_index=lun_index, snapshot_name=create_snapshot_name) - snapshot_id = created_snapshot.find('snapshot_id').text - LOG.debug('snapshot_id: %s', snapshot_id) - - # User could create two volume with the same name on horizon. - # Therefore, We should not use displayname to create lun on nas. - while True: - cloned_lun_name = self._gen_random_name() - # If lunname with the name exists, need to change to - # a different name - cloned_lun = self.api_executor.get_lun_info( - LUNName=cloned_lun_name) - - if cloned_lun is None: - break - - self.api_executor.clone_snapshot(snapshot_id, cloned_lun_name) - - max_wait_sec = 600 - try_times = 0 - lun_naa = "" - while True: - created_lun = self.api_executor.get_lun_info( - LUNName=cloned_lun_name) - if created_lun.find('LUNNAA') is not None: - lun_naa = created_lun.find('LUNNAA').text - - try_times = try_times + 3 - eventlet.sleep(self.TIME_INTERVAL) - if(try_times > max_wait_sec or lun_naa is not None): - break - - LOG.debug('LUNNAA: %s', lun_naa) - if (volume['size'] > src_vref['size']): - self._extend_lun(volume, lun_naa) - - _metadata = self._get_volume_metadata(volume) - _metadata['LUNNAA'] = lun_naa - _metadata['LunName'] = cloned_lun_name - return {'metadata': _metadata} - - def create_snapshot(self, snapshot): - """Create a snapshot.""" - LOG.debug('snapshot: %s', snapshot.__dict__) - LOG.debug('snapshot id: %s', snapshot['id']) - - # Below is to create snapshot in the snapshot manager - LOG.debug('volume_metadata: %s', snapshot.volume['metadata']) - volume_metadata = snapshot.volume['metadata'] - LOG.debug('lun_naa: %s', volume_metadata['LUNNAA']) - lun_naa = volume_metadata['LUNNAA'] - src_lun = self.api_executor.get_lun_info(LUNNAA=lun_naa) - lun_index = src_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - - # User could create two snapshot with the same name on horizon. - # Therefore, We should not use displayname to create snapshot on nas. - create_snapshot_name = self._create_snapshot_name(lun_index) - LOG.debug('create_snapshot_name: %s', create_snapshot_name) - - self.api_executor.create_snapshot_api(lun_index, create_snapshot_name) - max_wait_sec = 600 - try_times = 0 - snapshot_id = "" - while True: - created_snapshot = self.api_executor.get_snapshot_info( - lun_index=lun_index, snapshot_name=create_snapshot_name) - if created_snapshot is not None: - snapshot_id = created_snapshot.find('snapshot_id').text - - try_times = try_times + 3 - eventlet.sleep(self.TIME_INTERVAL) - if(try_times > max_wait_sec or created_snapshot is not None): - break - - LOG.debug('created_snapshot: %s', created_snapshot) - LOG.debug('snapshot_id: %s', snapshot_id) - - _metadata = snapshot['metadata'] - _metadata['snapshot_id'] = snapshot_id - _metadata['SnapshotName'] = create_snapshot_name - return {'metadata': _metadata} - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - LOG.debug('snapshot: %s', snapshot.__dict__) - - # Below is to delete snapshot in the snapshot manager - snap_metadata = snapshot['metadata'] - if 'snapshot_id' not in snap_metadata: - return - LOG.debug('snapshot_id: %s', snap_metadata['snapshot_id']) - snapshot_id = snap_metadata['snapshot_id'] - - self.api_executor.api_delete_snapshot(snapshot_id) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a volume from a snapshot.""" - LOG.debug('in create_volume_from_snapshot') - LOG.debug('volume: %s', volume.__dict__) - LOG.debug('snapshot: %s', snapshot.__dict__) - # Below is to clone a volume from a snapshot in the snapshot manager - snap_metadata = snapshot['metadata'] - if 'snapshot_id' not in snap_metadata: - LOG.debug('Metadata of the snapshot is invalid') - msg = _('Metadata of the snapshot is invalid') - raise exception.VolumeDriverException(message=msg) - LOG.debug('snapshot_id: %s', snap_metadata['snapshot_id']) - snapshot_id = snap_metadata['snapshot_id'] - - # User could create two volume with the same name on horizon. - # Therefore, We should not use displayname to create lun on nas. - create_lun_name = self._gen_lun_name() - - self.api_executor.clone_snapshot( - snapshot_id, create_lun_name) - - max_wait_sec = 600 - try_times = 0 - lun_naa = "" - while True: - created_lun = self.api_executor.get_lun_info( - LUNName=create_lun_name) - if created_lun.find('LUNNAA') is not None: - lun_naa = created_lun.find('LUNNAA').text - - try_times = try_times + 3 - eventlet.sleep(self.TIME_INTERVAL) - if(try_times > max_wait_sec or lun_naa is not None): - break - - if (volume['size'] > snapshot['volume_size']): - self._extend_lun(volume, lun_naa) - - _metadata = self._get_volume_metadata(volume) - _metadata['LUNNAA'] = lun_naa - _metadata['LunName'] = create_lun_name - return {'metadata': _metadata} - - def get_volume_stats(self, refresh=False): - """Get volume stats. This is more of getting group stats.""" - LOG.debug('in get_volume_stats') - - if refresh: - backend_name = (self.configuration.safe_get( - 'volume_backend_name') or - self.__class__.__name__) - LOG.debug('backend_name=%(backend_name)s', - {'backend_name': backend_name}) - - selected_pool = self.api_executor.get_specific_poolinfo( - self.configuration.qnap_poolname) - capacity_bytes = int(selected_pool.find('capacity_bytes').text) - LOG.debug('capacity_bytes: %s GB', capacity_bytes / units.Gi) - freesize_bytes = int(selected_pool.find('freesize_bytes').text) - LOG.debug('freesize_bytes: %s GB', freesize_bytes / units.Gi) - provisioned_bytes = int(selected_pool.find('allocated_bytes').text) - driver_protocol = self.configuration.qnap_storage_protocol - LOG.debug( - 'provisioned_bytes: %s GB', provisioned_bytes / units.Gi) - self.group_stats = {'volume_backend_name': backend_name, - 'vendor_name': 'QNAP', - 'driver_version': self.VERSION, - 'storage_protocol': driver_protocol} - # single pool now, need support multiple pools in the future - single_pool = dict( - pool_name=self.configuration.qnap_poolname, - total_capacity_gb=capacity_bytes / units.Gi, - free_capacity_gb=freesize_bytes / units.Gi, - provisioned_capacity_gb=provisioned_bytes / units.Gi, - reserved_percentage=self.configuration.reserved_percentage, - QoS_support=False) - self.group_stats['pools'] = [single_pool] - - return self.group_stats - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - LOG.debug('Entering extend_volume volume=%(vol)s ' - 'new_size=%(size)s', - {'vol': volume['display_name'], 'size': new_size}) - - volume['size'] = new_size - self._extend_lun(volume, '') - - def initialize_connection(self, volume, connector): - """Create a target with initiator iqn to attach a volume.""" - start_time = time.time() - LOG.debug('in initialize_connection') - LOG.debug('volume: %s', volume.__dict__) - LOG.debug('connector: %s', connector) - - lun_status = self.enum('createing', 'unmapped', 'mapped') - - ret = self.api_executor.get_iscsi_portal_info() - root = ET.fromstring(ret['data']) - iscsi_port = root.find('iSCSIPortal').find('servicePort').text - LOG.debug('iscsiPort: %s', iscsi_port) - target_iqn_prefix = root.find( - 'iSCSIPortal').find('targetIQNPrefix').text - LOG.debug('targetIQNPrefix: %s', target_iqn_prefix) - target_iqn_postfix = (root.find('iSCSIPortal'). - find('targetIQNPostfix').text) - LOG.debug('target_iqn_postfix: %s', target_iqn_postfix) - - lun_naa = self._get_lun_naa_from_volume_metadata(volume) - if lun_naa == '': - msg = (_("Volume %s does not exist.") % volume.id) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - LOG.debug('volume[name]: %s', volume['name']) - LOG.debug('volume[display_name]: %s', volume['display_name']) - - selected_lun = self.api_executor.get_lun_info(LUNNAA=lun_naa) - lun_index = selected_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - - lun_owner = '' - lun_slot_id = '' - if selected_lun.find('lun_owner') is not None: - lun_owner = selected_lun.find('lun_owner').text - LOG.debug('lun_owner: %s', lun_owner) - lun_slot_id = '0' if (lun_owner == 'SCA') else '1' - LOG.debug('lun_slot_id: %s', lun_slot_id) - - ret = self.api_executor.get_all_iscsi_portal_setting() - root = ET.fromstring(ret['data']) - - target_index = '' - target_iqn = '' - - # find the targets have acl with connector['initiator'] - target_with_initiator_list = [] - target_acl_tree = root.find('targetACL') - target_acl_list = target_acl_tree.findall('row') - tmp_target_iqn = '' - for targetACL in target_acl_list: - tmp_target_iqn = targetACL.find('targetIQN').text - # If lun and the targetiqn in different controller, - # skip the targetiqn, in case lun in sca map to target of scb - LOG.debug('lun_slot_id: %s', lun_slot_id) - LOG.debug('tmp_target_iqn[-1]: %s', tmp_target_iqn[-1]) - if (lun_slot_id != ''): - if (lun_slot_id != tmp_target_iqn[-1]): - LOG.debug('skip the targetiqn') - continue - - target_init_info_list = targetACL.findall('targetInitInfo') - for targetInitInfo in target_init_info_list: - if(targetInitInfo.find('initiatorIQN').text == - connector['initiator']): - target_with_initiator_list.append( - targetACL.find('targetIndex').text) - - # find the target in target_with_initiator_list with ready status - target_tree = root.find('iSCSITargetList') - target_list = target_tree.findall('targetInfo') - for target_with_initiator in target_with_initiator_list: - for target in target_list: - if(target_with_initiator == target.find('targetIndex').text): - if int(target.find('targetStatus').text) >= 0: - target_index = target_with_initiator - target_iqn = target.find('targetIQN').text - - # create a new target if no target has ACL connector['initiator'] - LOG.debug('exist target_index: %s', target_index) - if not target_index: - target_name = self._gen_random_name() - LOG.debug('target_name: %s', target_name) - target_index = self.api_executor.create_target( - target_name, lun_owner) - LOG.debug('targetIndex: %s', target_index) - target_info = self.api_executor.get_target_info(target_index) - target_iqn = target_info.find('targetIQN').text - LOG.debug('target_iqn: %s', target_iqn) - - # TS NAS have to remove default ACL - default_acl = target_iqn_prefix[:target_iqn_prefix.find(":") + 1] - default_acl = default_acl + "all:iscsi.default.ffffff" - LOG.debug('default_acl: %s', default_acl) - self.api_executor.remove_target_init(target_iqn, default_acl) - # add ACL - self.api_executor.add_target_init( - target_iqn, connector['initiator']) - - LOG.debug('LUNStatus: %s', selected_lun.find('LUNStatus').text) - # lun does not map to any target - if selected_lun.find('LUNStatus').text == str(lun_status.unmapped): - self.api_executor.map_lun(lun_index, target_index) - - properties = {} - properties['target_discovered'] = True - properties['target_portal'] = (self.configuration.iscsi_ip_address + - ':' + iscsi_port) - - properties['target_iqn'] = target_iqn - LOG.debug('properties[target_iqn]: %s', properties['target_iqn']) - lun_naa = self._get_lun_naa_from_volume_metadata(volume) - LOG.debug('LUNNAA: %s', lun_naa) - # LUNNumber of lun will be updated after map lun to target, so here - # get lnu info again - mapped_lun = self.api_executor.get_lun_info(LUNNAA=lun_naa) - target_lun_id = int(mapped_lun.find('LUNTargetList').find( - 'row').find('LUNNumber').text) - LOG.debug('target_lun_id: %s', target_lun_id) - properties['target_lun'] = target_lun_id - properties['volume_id'] = volume['id'] # used by xen currently - - """Below are settings for multipath""" - target_iqns = [] - eth_list = self.api_executor.get_ethernet_ip(type='data') - target_portals = [] - target_portals.append( - self.configuration.iscsi_ip_address + ':' + iscsi_port) - target_iqns.append(target_iqn) - for eth in eth_list: - if eth == self.configuration.iscsi_ip_address: - continue - target_portals.append(eth + ':' + iscsi_port) - target_iqns.append(target_iqn) - - properties['target_portals'] = target_portals - properties['target_iqns'] = target_iqns - properties['target_luns'] = [target_lun_id] * len(target_portals) - LOG.debug('properties: %s', properties) - - elapsed_time = time.time() - start_time - LOG.debug('initialize_connection elapsed_time: %s', elapsed_time) - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def enum(self, *sequential, **named): - """Enum method.""" - enums = dict(zip(sequential, range(len(sequential))), **named) - return type('Enum', (), enums) - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - start_time = time.time() - LOG.debug('in terminate_connection') - LOG.debug('volume: %s', volume.__dict__) - LOG.debug('connector: %s', connector) - - # get lun index - lun_naa = self._get_lun_naa_from_volume_metadata(volume) - LOG.debug('lun_naa: %s', lun_naa) - selected_lun = self.api_executor.get_lun_info( - LUNNAA=lun_naa) - lun_index = selected_lun.find('LUNIndex').text - LOG.debug('LUNIndex: %s', lun_index) - - lun_status = self.enum('createing', 'unmapped', 'mapped') - - LOG.debug('LUNStatus: %s', selected_lun.find('LUNStatus').text) - LOG.debug('lun_status.mapped: %s', six.text_type(lun_status.mapped)) - # lun does not map to any target - if (selected_lun.find('LUNStatus').text) != ( - six.text_type(lun_status.mapped)): - return - - target_index = (selected_lun.find('LUNTargetList') - .find('row').find('targetIndex').text) - LOG.debug('target_index: %s', target_index) - - self.api_executor.disable_lun(lun_index, target_index) - self.api_executor.unmap_lun(lun_index, target_index) - - elapsed_time = time.time() - start_time - LOG.debug('terminate_connection elapsed_time : %s', elapsed_time) - - def update_migrated_volume( - self, context, volume, new_volume, original_volume_status): - """Return model update for migrated volume.""" - LOG.debug('volume: %s', volume.__dict__) - LOG.debug('new_volume: %s', new_volume.__dict__) - LOG.debug('original_volume_status: %s', original_volume_status) - - _metadata = self._get_volume_metadata(new_volume) - - # metadata will not be swap after migration wiht liberty version - # , and the metadata of new volume is diifferent with the metadata - # of original volume. Therefore, we need to update the migrated volume - if not hasattr(new_volume, '_orig_metadata'): - model_update = {'metadata': _metadata} - return model_update - - -def _connection_checker(func): - """Decorator to check session has expired or not.""" - @functools.wraps(func) - def inner_connection_checker(self, *args, **kwargs): - LOG.debug('in _connection_checker') - for attempts in range(5): - try: - return func(self, *args, **kwargs) - except exception.VolumeBackendAPIException as e: - pattern = re.compile( - r".*Session id expired$") - matches = pattern.match(six.text_type(e)) - if matches: - if attempts < 5: - LOG.debug('Session might have expired.' - ' Trying to relogin') - self._login() - continue - - LOG.error('Re-throwing Exception %s', e) - raise - return inner_connection_checker - - -class QnapAPIExecutor(object): - """Makes QNAP API calls for ES NAS.""" - - def __init__(self, *args, **kwargs): - """Init function.""" - self.sid = None - self.username = kwargs['username'] - self.password = kwargs['password'] - self.ip, self.port, self.ssl = ( - self._parse_management_url(kwargs['management_url'])) - self._login() - - def _parse_management_url(self, management_url): - pattern = re.compile(r"(http|https)\:\/\/(\S+)\:(\d+)") - matches = pattern.match(management_url) - if matches.group(1) == 'http': - management_ssl = False - else: - management_ssl = True - management_ip = matches.group(2) - management_port = matches.group(3) - return management_ip, management_port, management_ssl - - def get_basic_info(self, management_url): - """Get the basic information of NAS.""" - LOG.debug('in get_basic_info') - management_ip, management_port, management_ssl = ( - self._parse_management_url(management_url)) - connection = None - if management_ssl: - if hasattr(ssl, '_create_unverified_context'): - context = ssl._create_unverified_context() - connection = http_client.HTTPSConnection(management_ip, - port=management_port, - context=context) - else: - connection = http_client.HTTPSConnection(management_ip, - port=management_port) - else: - connection = ( - http_client.HTTPConnection(management_ip, management_port)) - - connection.request('GET', '/cgi-bin/authLogin.cgi') - response = connection.getresponse() - data = response.read() - LOG.debug('response data: %s', data) - - root = ET.fromstring(data) - - nas_model_name = root.find('model/displayModelName').text - internal_model_name = root.find('model/internalModelName').text - fw_version = root.find('firmware/version').text - - return nas_model_name, internal_model_name, fw_version - - def _execute_and_get_response_details(self, nas_ip, url, post_parm=None): - """Will prepare response after executing an http request.""" - LOG.debug('port: %(port)s, ssl: %(ssl)s', - {'port': self.port, 'ssl': self.ssl}) - - res_details = {} - - # Prepare the connection - if self.ssl: - if hasattr(ssl, '_create_unverified_context'): - context = ssl._create_unverified_context() - connection = http_client.HTTPSConnection(nas_ip, - port=self.port, - context=context) - else: - connection = http_client.HTTPSConnection( - nas_ip, port=self.port) - else: - connection = http_client.HTTPConnection(nas_ip, self.port) - - # Make the connection - if post_parm is None: - connection.request('GET', url) - else: - headers = { - "Content-Type": "application/x-www-form-urlencoded", - "charset": "utf-8"} - connection.request('POST', url, post_parm, headers) - - # Extract the response as the connection was successful - start_time = time.time() - response = connection.getresponse() - elapsed_time = time.time() - start_time - LOG.debug('cgi elapsed_time: %s', elapsed_time) - # Read the response - data = response.read() - LOG.debug('response data: %s', data) - # Extract http error msg if any - error_details = None - res_details['data'] = data - res_details['error'] = error_details - res_details['http_status'] = response.status - - connection.close() - return res_details - - def execute_login(self): - """Login and return sid.""" - params = {} - params['user'] = self.username - params['pwd'] = base64.b64encode(self.password.encode("utf-8")) - params['serviceKey'] = '1' - - sanitized_params = {} - - for key in params: - value = params[key] - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - url = ('/cgi-bin/authLogin.cgi?') - - res_details = self._execute_and_get_response_details( - self.ip, url, sanitized_params) - root = ET.fromstring(res_details['data']) - session_id = root.find('authSid').text - return session_id - - def _login(self): - """Execute Https Login API.""" - self.sid = self.execute_login() - LOG.debug('sid: %s', self.sid) - - def _get_res_details(self, url, **kwargs): - sanitized_params = {} - - for key, value in kwargs.items(): - LOG.debug('%(key)s = %(val)s', - {'key': key, 'val': value}) - if value is not None: - sanitized_params[key] = six.text_type(value) - - sanitized_params = urllib.parse.urlencode(sanitized_params) - LOG.debug('sanitized_params: %s', sanitized_params) - url = url + sanitized_params - LOG.debug('url: %s', url) - - res_details = self._execute_and_get_response_details(self.ip, url) - - return res_details - - @_connection_checker - def create_lun(self, volume, pool_name, create_lun_name, reserve): - """Create lun.""" - lun_thin_allocate = '' - if reserve: - lun_thin_allocate = '1' - else: - lun_thin_allocate = '0' - - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_lun_setting.cgi?', - func='add_lun', - FileIO='no', - LUNThinAllocate=lun_thin_allocate, - LUNName=create_lun_name, - LUNPath=create_lun_name, - poolID=pool_name, - lv_ifssd='no', - LUNCapacity=volume['size'], - lv_threshold='80', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Create volume %s failed') % volume['display_name']) - - return root.find('result').text - - @_connection_checker - def delete_lun(self, vol_id, *args, **kwargs): - """Execute delete lun API.""" - LOG.debug('Deleting volume id %s', vol_id) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_lun_setting.cgi?', - func='remove_lun', - run_background='1', - ha_sync='1', - LUNIndex=vol_id, - sid=self.sid) - - data_set_is_busy = "-205041" - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - # dataset is busy, retry to delete - if root.find('result').text == data_set_is_busy: - return True - if root.find('result').text < '0': - msg = (_('Volume %s delete failed') % vol_id) - raise exception.VolumeBackendAPIException(data=msg) - - return False - - @_connection_checker - def get_specific_poolinfo(self, pool_id): - """Execute deleteInitiatorGrp API.""" - res_details = self._get_res_details( - '/cgi-bin/disk/disk_manage.cgi?', - store='poolInfo', - func='extra_get', - poolID=pool_id, - Pool_Info='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('get_specific_poolinfo failed')) - - pool_list = root.find('Pool_Index') - pool_info_tree = pool_list.findall('row') - for pool in pool_info_tree: - if pool_id == pool.find('poolID').text: - LOG.debug('poolID: %s', pool.find('poolID').text) - return pool - - @_connection_checker - def create_target(self, target_name, controller_name): - """Create target on nas and return target index.""" - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='add_target', - targetName=target_name, - targetAlias=target_name, - bTargetDataDigest='0', - bTargetHeaderDigest='0', - bTargetClusterEnable='1', - controller_name=controller_name, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Create target failed')) - - root = ET.fromstring(res_details['data']) - target_index = root.find('result').text - return target_index - - @_connection_checker - def add_target_init(self, target_iqn, init_iqn): - """Add target acl.""" - LOG.debug('targetIqn = %(tgt)s, initIqn = %(init)s', - {'tgt': target_iqn, 'init': init_iqn}) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='add_init', - targetIQN=target_iqn, - initiatorIQN=init_iqn, - initiatorAlias=init_iqn, - bCHAPEnable='0', - CHAPUserName='', - CHAPPasswd='', - bMutualCHAPEnable='0', - mutualCHAPUserName='', - mutualCHAPPasswd='', - ha_sync='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Add target acl failed')) - - def remove_target_init(self, target_iqn, init_iqn): - """Remote target acl.""" - pass - - @_connection_checker - def map_lun(self, lun_index, target_index): - """Map lun to sepecific target.""" - LOG.debug('LUNIndex: %(lun)s, targetIndex: %(tgt)s', - {'lun': lun_index, 'tgt': target_index}) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='add_lun', - LUNIndex=lun_index, - targetIndex=target_index, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException(data=_( - "Map lun %(lun_index)s to target %(target_index)s failed") % - {'lun_index': six.text_type(lun_index), - 'target_index': six.text_type(target_index)}) - - @_connection_checker - def disable_lun(self, lun_index, target_index): - """Disable lun from sepecific target.""" - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='edit_lun', - LUNIndex=lun_index, - targetIndex=target_index, - LUNEnable=0, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException(data=_( - 'Disable lun %(lun_index)s from target %(target_index)s failed' - ) % {'lun_index': lun_index, 'target_index': target_index}) - - @_connection_checker - def unmap_lun(self, lun_index, target_index): - """Unmap lun to sepecific target.""" - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='remove_lun', - LUNIndex=lun_index, - targetIndex=target_index, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException(data=_( - 'Unmap lun %(lun_index)s from target %(target_index)s failed') - % {'lun_index': lun_index, 'target_index': target_index}) - - @_connection_checker - def get_iscsi_portal_info(self): - """Get iscsi portal info.""" - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_portal_setting.cgi?', - func='extra_get', - iSCSI_portal='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - else: - return res_details - - @_connection_checker - def get_lun_info(self, **kwargs): - """Execute get_lun_info API.""" - for key, value in kwargs.items(): - LOG.debug('%(key)s = %(val)s', - {'key': key, 'val': value}) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_portal_setting.cgi?', - func='extra_get', - lunList='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - - if (('LUNIndex' in kwargs) or ('LUNName' in kwargs) or - ('LUNNAA' in kwargs)): - - lun_list = root.find('iSCSILUNList') - lun_info_tree = lun_list.findall('LUNInfo') - for lun in lun_info_tree: - if ('LUNIndex' in kwargs): - if (kwargs['LUNIndex'] == lun.find('LUNIndex').text): - LOG.debug('LUNIndex:%s', - lun.find('LUNIndex').text) - return lun - elif ('LUNName' in kwargs): - if (kwargs['LUNName'] == lun.find('LUNName').text): - LOG.debug('LUNName:%s', lun.find('LUNName').text) - return lun - elif ('LUNNAA' in kwargs): - if (kwargs['LUNNAA'] == lun.find('LUNNAA').text): - LOG.debug('LUNNAA:%s', lun.find('LUNNAA').text) - return lun - - return None - - @_connection_checker - def get_snapshot_info(self, **kwargs): - """Execute get_snapshot_info API.""" - for key, value in kwargs.items(): - LOG.debug('%(key)s = %(val)s', - {'key': key, 'val': value}) - res_details = self._get_res_details( - '/cgi-bin/disk/snapshot.cgi?', - func='extra_get', - LUNIndex=kwargs['lun_index'], - snapshot_list='1', - snap_start='0', - snap_count='100', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Unexpected response from QNAP API')) - - snapshot_list = root.find('SnapshotList') - if snapshot_list is None: - return None - snapshot_tree = snapshot_list.findall('row') - for snapshot in snapshot_tree: - if (kwargs['snapshot_name'] == - snapshot.find('snapshot_name').text): - LOG.debug('snapshot_name:%s', kwargs['snapshot_name']) - return snapshot - - return None - - @_connection_checker - def create_snapshot_api(self, lun_id, snapshot_name): - """Execute CGI to create snapshot from source lun NAA.""" - res_details = self._get_res_details( - '/cgi-bin/disk/snapshot.cgi?', - func='create_snapshot', - lunID=lun_id, - snapshot_name=snapshot_name, - expire_min='0', - vital='1', - snapshot_type='0', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('create snapshot failed')) - - @_connection_checker - def api_delete_snapshot(self, snapshot_id): - """Execute CGI to delete snapshot from source lun NAA.""" - res_details = self._get_res_details( - '/cgi-bin/disk/snapshot.cgi?', - func='del_snapshots', - snapshotID=snapshot_id, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - # snapshot not exist - if root.find('result').text == '-206021': - return - # lun not exist - if root.find('result').text == '-200005': - return - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('delete snapshot %s failed') % snapshot_id) - - @_connection_checker - def clone_snapshot(self, snapshot_id, new_lunname): - """Execute CGI to clone snapshot as unmap lun.""" - res_details = self._get_res_details( - '/cgi-bin/disk/snapshot.cgi?', - func='clone_qsnapshot', - by_lun='1', - snapshotID=snapshot_id, - new_name=new_lunname, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException(data=_( - 'Clone lun %(lunname)s from snapshot %(snapshot_id)s failed' - ) % {'lunname': new_lunname, 'snapshot_id': snapshot_id}) - - @_connection_checker - def edit_lun(self, lun): - """Extend lun.""" - LOG.debug( - 'LUNName:%(name)s, LUNCapacity:%(cap)s, LUNIndex:%(id)s'), ( - {'name': lun['LUNName'], - 'cap': lun['LUNCapacity'], - 'id': lun['LUNIndex']}) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_lun_setting.cgi?', - func='edit_lun', - LUNName=lun['LUNName'], - LUNCapacity=lun['LUNCapacity'], - LUNIndex=lun['LUNIndex'], - LUNThinAllocate=lun['LUNThinAllocate'], - LUNPath=lun['LUNPath'], - LUNStatus=lun['LUNStatus'], - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Extend lun %s failed') % lun['LUNIndex']) - - @_connection_checker - def get_all_iscsi_portal_setting(self): - """Execute get_all_iscsi_portal_setting API.""" - LOG.debug('in get_all_iscsi_portal_setting') - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_portal_setting.cgi?', - func='get_all', - sid=self.sid) - - return res_details - - @_connection_checker - def get_ethernet_ip(self, **kwargs): - """Execute get_ethernet_ip API.""" - LOG.debug('in get_ethernet_ip') - res_details = self._get_res_details( - '/cgi-bin/sys/sysRequest.cgi?', - subfunc='net_setting', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - - if ('type' in kwargs): - return_ip = [] - ip_list = root.find('func').find('ownContent') - ip_list_tree = ip_list.findall('IPInfo') - for IP in ip_list_tree: - ipv4 = (IP.find('IP').find('IP1').text + '.' + - IP.find('IP').find('IP2').text + '.' + - IP.find('IP').find('IP3').text + '.' + - IP.find('IP').find('IP4').text) - LOG.debug('ipv4 = %s', ipv4) - if ((kwargs['type'] == 'data') and - (IP.find('isManagePort').text != '1') and - (IP.find('status').text == '1')): - return_ip.append(ipv4) - elif ((kwargs['type'] == 'manage') and - (IP.find('isManagePort').text == '1') and - (IP.find('status').text == '1')): - return_ip.append(ipv4) - elif ((kwargs['type'] == 'all') and - (IP.find('status').text == '1')): - return_ip.append(ipv4) - LOG.debug('return_ip = %s', return_ip) - - return return_ip - - @_connection_checker - def get_target_info(self, target_index): - """Get target info.""" - LOG.debug('target_index: %s', target_index) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_portal_setting.cgi?', - func='extra_get', - targetInfo=1, - targetIndex=target_index, - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Get target info failed')) - - target_list = root.find('targetInfo') - target_tree = target_list.findall('row') - for target in target_tree: - if target_index == target.find('targetIndex').text: - LOG.debug('targetIQN: %s', - target.find('targetIQN').text) - return target - - -class QnapAPIExecutorTS(QnapAPIExecutor): - """Makes QNAP API calls for TS NAS.""" - - @_connection_checker - def remove_target_init(self, target_iqn, init_iqn): - """Remove target acl.""" - LOG.debug('targetIqn = %(tgt)s, initIqn = %(init)s', - {'tgt': target_iqn, 'init': init_iqn}) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='remove_init', - targetIQN=target_iqn, - initiatorIQN=init_iqn, - ha_sync='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Remove target acl failed')) - - @_connection_checker - def get_target_info(self, target_index): - """Get nas target info.""" - LOG.debug('targetIndex: %s', target_index) - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_portal_setting.cgi?', - func='extra_get', - targetInfo=1, - targetIndex=target_index, - ha_sync='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Get target info failed')) - - target_list = root.find('targetInfo') - target_tree = target_list.findall('row') - for target in target_tree: - if target_index == target.find('targetIndex').text: - LOG.debug('targetIQN: %s', - target.find('targetIQN').text) - return target - - @_connection_checker - def get_ethernet_ip(self, **kwargs): - """Execute get_ethernet_ip API.""" - LOG.debug('in get_ethernet_ip') - res_details = self._get_res_details( - '/cgi-bin/sys/sysRequest.cgi?', - subfunc='net_setting', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - - if ('type' in kwargs): - return_ip = [] - ip_list = root.find('func').find('ownContent') - ip_list_tree = ip_list.findall('IPInfo') - for IP in ip_list_tree: - ipv4 = (IP.find('IP').find('IP1').text + '.' + - IP.find('IP').find('IP2').text + '.' + - IP.find('IP').find('IP3').text + '.' + - IP.find('IP').find('IP4').text) - LOG.debug('ipv4 = %s', ipv4) - if (IP.find('status').text == '1'): - return_ip.append(ipv4) - LOG.debug('return_ip = %s', return_ip) - - return return_ip - - @_connection_checker - def get_snapshot_info(self, **kwargs): - """Execute get_snapshot_info API.""" - for key, value in kwargs.items(): - LOG.debug('%(key)s = %(val)s', - {'key': key, 'val': value}) - LOG.debug('in get_ethernet_ip') - res_details = self._get_res_details( - '/cgi-bin/disk/snapshot.cgi?', - func='extra_get', - LUNIndex=kwargs['lun_index'], - smb_snapshot_list='1', - smb_snapshot='1', - snapshot_list='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Unexpected response from QNAP API')) - - snapshot_list = root.find('SnapshotList') - if snapshot_list is None: - return None - snapshot_tree = snapshot_list.findall('row') - for snapshot in snapshot_tree: - if (kwargs['snapshot_name'] == - snapshot.find('snapshot_name').text): - LOG.debug('snapshot_name:%s', kwargs['snapshot_name']) - return snapshot - - return None - - @_connection_checker - def create_target(self, target_name, controller_name): - """Create target on nas and return target index.""" - res_details = self._get_res_details( - '/cgi-bin/disk/iscsi_target_setting.cgi?', - func='add_target', - targetName=target_name, - targetAlias=target_name, - bTargetDataDigest='0', - bTargetHeaderDigest='0', - bTargetClusterEnable='1', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - if root.find('result').text < '0': - raise exception.VolumeBackendAPIException( - data=_('Create target failed')) - - root = ET.fromstring(res_details['data']) - target_index = root.find('result').text - return target_index - - -class QnapAPIExecutorTES(QnapAPIExecutor): - """Makes QNAP API calls for TES NAS.""" - - @_connection_checker - def get_ethernet_ip(self, **kwargs): - """Execute get_ethernet_ip API.""" - LOG.debug('in get_ethernet_ip') - res_details = self._get_res_details( - '/cgi-bin/sys/sysRequest.cgi?', - subfunc='net_setting', - sid=self.sid) - - root = ET.fromstring(res_details['data']) - if root.find('authPassed').text == '0': - raise exception.VolumeBackendAPIException( - data=_('Session id expired')) - - if ('type' in kwargs): - return_ip = [] - ip_list = root.find('func').find('ownContent') - ip_list_tree = ip_list.findall('IPInfo') - for IP in ip_list_tree: - ipv4 = (IP.find('IP').find('IP1').text + '.' + - IP.find('IP').find('IP2').text + '.' + - IP.find('IP').find('IP3').text + '.' + - IP.find('IP').find('IP4').text) - LOG.debug('ipv4 = %s', ipv4) - if (IP.find('status').text == '1'): - return_ip.append(ipv4) - LOG.debug('return_ip = %s', return_ip) - - return return_ip diff --git a/cinder/volume/drivers/quobyte.py b/cinder/volume/drivers/quobyte.py deleted file mode 100644 index bf6f23113..000000000 --- a/cinder/volume/drivers/quobyte.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) 2014 Quobyte Inc. -# Copyright (c) 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno -import os -import psutil - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils - -from cinder import compute -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import remotefs as remotefs_drv - -VERSION = '1.1.5' - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('quobyte_volume_url', - help=('Quobyte URL to the Quobyte volume e.g.,' - ' quobyte://, /')), - cfg.StrOpt('quobyte_client_cfg', - help=('Path to a Quobyte Client configuration file.')), - cfg.BoolOpt('quobyte_sparsed_volumes', - default=True, - help=('Create volumes as sparse files which take no space.' - ' If set to False, volume is created as regular file.' - 'In such case volume creation takes a lot of time.')), - cfg.BoolOpt('quobyte_qcow2_volumes', - default=True, - help=('Create volumes as QCOW2 files rather than raw files.')), - cfg.StrOpt('quobyte_mount_point_base', - default='$state_path/mnt', - help=('Base dir containing the mount point' - ' for the Quobyte volume.')), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): - """Cinder driver for Quobyte USP. - - Volumes are stored as files on the mounted Quobyte volume. The hypervisor - will expose them as block devices. - - Unlike other similar drivers, this driver uses exactly one Quobyte volume - because Quobyte USP is a distributed storage system. To add or remove - capacity, administrators can add or remove storage servers to/from the - volume. - - For different types of volumes e.g., SSD vs. rotating disks, - use multiple backends in Cinder. - - Note: To be compliant with the inherited RemoteFSSnapDriver, Quobyte - volumes are also referred to as shares. - - Version history: - 1.0 - Initial driver. - 1.1 - Adds optional insecure NAS settings - 1.1.1 - Removes getfattr calls from driver - 1.1.2 - Fixes a bug in the creation of cloned volumes - 1.1.3 - Explicitely mounts Quobyte volumes w/o xattrs - 1.1.4 - Fixes capability to configure redundancy in quobyte_volume_url - 1.1.5 - Enables extension of volumes with snapshots - - """ - - driver_volume_type = 'quobyte' - driver_prefix = 'quobyte' - volume_backend_name = 'Quobyte' - VERSION = VERSION - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Quobyte_CI" - - def __init__(self, execute=processutils.execute, *args, **kwargs): - super(QuobyteDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(volume_opts) - - # Used to manage snapshots which are currently attached to a VM. - self._nova = None - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(QuobyteDriver, self).do_setup(context) - - self.set_nas_security_options(is_new_cinder_install=False) - self.shares = {} # address : options - self._nova = compute.API() - - def check_for_setup_error(self): - if not self.configuration.quobyte_volume_url: - msg = (_("There's no Quobyte volume configured (%s). Example:" - " quobyte:///") % - 'quobyte_volume_url') - LOG.warning(msg) - raise exception.VolumeDriverException(msg) - - # Check if mount.quobyte is installed - try: - self._execute('mount.quobyte', check_exit_code=False, - run_as_root=False) - except OSError as exc: - if exc.errno == errno.ENOENT: - raise exception.VolumeDriverException( - 'mount.quobyte is not installed') - else: - raise - - def set_nas_security_options(self, is_new_cinder_install): - self._execute_as_root = False - - LOG.debug("nas_secure_file_* settings are %(ops)s and %(perm)s", - {'ops': self.configuration.nas_secure_file_operations, - 'perm': self.configuration.nas_secure_file_permissions} - ) - - if self.configuration.nas_secure_file_operations == 'auto': - """Note (kaisers): All previous Quobyte driver versions ran with - secure settings hardcoded to 'True'. Therefore the default 'auto' - setting can safely be mapped to the same, secure, setting. - """ - LOG.debug("Mapping 'auto' value to 'true' for" - " nas_secure_file_operations.") - self.configuration.nas_secure_file_operations = 'true' - - if self.configuration.nas_secure_file_permissions == 'auto': - """Note (kaisers): All previous Quobyte driver versions ran with - secure settings hardcoded to 'True'. Therefore the default 'auto' - setting can safely be mapped to the same, secure, setting. - """ - LOG.debug("Mapping 'auto' value to 'true' for" - " nas_secure_file_permissions.") - self.configuration.nas_secure_file_permissions = 'true' - - if self.configuration.nas_secure_file_operations == 'false': - LOG.warning("The NAS file operations will be run as " - "root, allowing root level access at the storage " - "backend.") - self._execute_as_root = True - else: - LOG.info("The NAS file operations will be run as" - " non privileged user in secure mode. Please" - " ensure your libvirtd settings have been configured" - " accordingly (see section 'OpenStack' in the Quobyte" - " Manual.") - - if self.configuration.nas_secure_file_permissions == 'false': - LOG.warning("The NAS file permissions mode will be 666 " - "(allowing other/world read & write access).") - - def _qemu_img_info(self, path, volume_name): - return super(QuobyteDriver, self)._qemu_img_info_base( - path, volume_name, self.configuration.quobyte_mount_point_base) - - @utils.synchronized('quobyte', external=False) - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - return self._create_cloned_volume(volume, src_vref) - - @utils.synchronized('quobyte', external=False) - def create_volume(self, volume): - return super(QuobyteDriver, self).create_volume(volume) - - @utils.synchronized('quobyte', external=False) - def create_volume_from_snapshot(self, volume, snapshot): - return self._create_volume_from_snapshot(volume, snapshot) - - def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): - """Copy data from snapshot to destination volume. - - This is done with a qemu-img convert to raw/qcow2 from the snapshot - qcow2. - """ - - LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ", - {'snap': snapshot.id, - 'vol': volume.id, - 'size': volume_size}) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - vol_path = self._local_volume_dir(snapshot.volume) - forward_file = snap_info[snapshot.id] - forward_path = os.path.join(vol_path, forward_file) - - # Find the file which backs this file, which represents the point - # when this snapshot was created. - img_info = self._qemu_img_info(forward_path, - snapshot.volume.name) - path_to_snap_img = os.path.join(vol_path, img_info.backing_file) - - path_to_new_vol = self._local_path_volume(volume) - - LOG.debug("will copy from snapshot at %s", path_to_snap_img) - - if self.configuration.quobyte_qcow2_volumes: - out_format = 'qcow2' - else: - out_format = 'raw' - - image_utils.convert_image(path_to_snap_img, - path_to_new_vol, - out_format, - run_as_root=self._execute_as_root) - - self._set_rw_permissions_for_all(path_to_new_vol) - - @utils.synchronized('quobyte', external=False) - def delete_volume(self, volume): - """Deletes a logical volume.""" - - if not volume.provider_location: - LOG.warning('Volume %s does not have provider_location ' - 'specified, skipping', volume.name) - return - - self._ensure_share_mounted(volume.provider_location) - - volume_dir = self._local_volume_dir(volume) - mounted_path = os.path.join(volume_dir, - self.get_active_image_from_info(volume)) - - self._execute('rm', '-f', mounted_path, - run_as_root=self._execute_as_root) - - # If an exception (e.g. timeout) occurred during delete_snapshot, the - # base volume may linger around, so just delete it if it exists - base_volume_path = self._local_path_volume(volume) - fileutils.delete_if_exists(base_volume_path) - - info_path = self._local_path_volume_info(volume) - fileutils.delete_if_exists(info_path) - - @utils.synchronized('quobyte', external=False) - def create_snapshot(self, snapshot): - """Apply locking to the create snapshot operation.""" - - return self._create_snapshot(snapshot) - - @utils.synchronized('quobyte', external=False) - def delete_snapshot(self, snapshot): - """Apply locking to the delete snapshot operation.""" - self._delete_snapshot(snapshot) - - @utils.synchronized('quobyte', external=False) - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - - # Find active qcow2 file - active_file = self.get_active_image_from_info(volume) - path = '%s/%s/%s' % (self.configuration.quobyte_mount_point_base, - self._get_hash_str(volume.provider_location), - active_file) - - data = {'export': volume.provider_location, - 'name': active_file} - if volume.provider_location in self.shares: - data['options'] = self.shares[volume.provider_location] - - # Test file for raw vs. qcow2 format - info = self._qemu_img_info(path, volume.name) - data['format'] = info.file_format - if data['format'] not in ['raw', 'qcow2']: - msg = _('%s must be a valid raw or qcow2 image.') % path - raise exception.InvalidVolume(msg) - - return { - 'driver_volume_type': 'quobyte', - 'data': data, - 'mount_point_base': self.configuration.quobyte_mount_point_base - } - - @utils.synchronized('quobyte', external=False) - def copy_volume_to_image(self, context, volume, image_service, image_meta): - self._copy_volume_to_image(context, volume, image_service, - image_meta) - - @utils.synchronized('quobyte', external=False) - def extend_volume(self, volume, size_gb): - volume_path = self.local_path(volume) - - info = self._qemu_img_info(volume_path, volume.name) - backing_fmt = info.file_format - - if backing_fmt not in ['raw', 'qcow2']: - msg = _('Unrecognized backing format: %s') - raise exception.InvalidVolume(msg % backing_fmt) - - # qemu-img can resize both raw and qcow2 files - active_path = os.path.join( - self._get_mount_point_for_share(volume.provider_location), - self.get_active_image_from_info(volume)) - image_utils.resize_image(active_path, size_gb) - - def _do_create_volume(self, volume): - """Create a volume on given Quobyte volume. - - :param volume: volume reference - """ - volume_path = self.local_path(volume) - volume_size = volume.size - - if self.configuration.quobyte_qcow2_volumes: - self._create_qcow2_file(volume_path, volume_size) - else: - if self.configuration.quobyte_sparsed_volumes: - self._create_sparsed_file(volume_path, volume_size) - else: - self._create_regular_file(volume_path, volume_size) - - self._set_rw_permissions_for_all(volume_path) - - def _load_shares_config(self, share_file=None): - """Put 'quobyte_volume_url' into the 'shares' list. - - :param share_file: string, Not used because the user has to specify - the Quobyte volume directly. - """ - self.shares = {} - - url = self.configuration.quobyte_volume_url - - # Strip quobyte:// from the URL - protocol = self.driver_volume_type + "://" - if url.startswith(protocol): - url = url[len(protocol):] - - self.shares[url] = None # None = No extra mount options. - - LOG.debug("Quobyte Volume URL set to: %s", self.shares) - - def _ensure_share_mounted(self, quobyte_volume): - """Mount Quobyte volume. - - :param quobyte_volume: string - """ - mount_path = self._get_mount_point_for_share(quobyte_volume) - self._mount_quobyte(quobyte_volume, mount_path, ensure=True) - - @utils.synchronized('quobyte_ensure', external=False) - def _ensure_shares_mounted(self): - """Mount the Quobyte volume. - - Used for example by RemoteFsDriver._update_volume_stats - """ - self._mounted_shares = [] - - self._load_shares_config() - - for share in self.shares.keys(): - try: - self._ensure_share_mounted(share) - self._mounted_shares.append(share) - except Exception as exc: - LOG.warning('Exception during mounting %s', exc) - - LOG.debug('Available shares %s', self._mounted_shares) - - def _find_share(self, volume): - """Returns the mounted Quobyte volume. - - Multiple shares are not supported because the virtualization of - multiple storage devices is taken care of at the level of Quobyte USP. - - For different types of volumes e.g., SSD vs. rotating disks, use - multiple backends in Cinder. - - :param volume: the volume to be created. - """ - - if not self._mounted_shares: - raise exception.NotFound() - - assert len(self._mounted_shares) == 1, 'There must be exactly' \ - ' one Quobyte volume.' - target_volume = self._mounted_shares[0] - - LOG.debug('Selected %s as target Quobyte volume.', target_volume) - - return target_volume - - def _get_mount_point_for_share(self, quobyte_volume): - """Return mount point for Quobyte volume. - - :param quobyte_volume: Example: storage-host/openstack-volumes - """ - return os.path.join(self.configuration.quobyte_mount_point_base, - self._get_hash_str(quobyte_volume)) - - # open() wrapper to mock reading from /proc/mount. - @staticmethod - def read_proc_mount(): # pragma: no cover - return open('/proc/mounts') - - def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False): - """Mount Quobyte volume to mount path.""" - mounted = False - for l in QuobyteDriver.read_proc_mount(): - if l.split()[1] == mount_path: - mounted = True - break - - if mounted: - try: - os.stat(mount_path) - except OSError as exc: - if exc.errno == errno.ENOTCONN: - mounted = False - try: - LOG.info('Fixing previous mount %s which was not' - ' unmounted correctly.', mount_path) - self._execute('umount.quobyte', mount_path, - run_as_root=self._execute_as_root) - except processutils.ProcessExecutionError as exc: - LOG.warning("Failed to unmount previous mount: " - "%s", exc) - else: - # TODO(quobyte): Extend exc analysis in here? - LOG.warning("Unknown error occurred while checking " - "mount point: %s Trying to continue.", - exc) - - if not mounted: - if not os.path.isdir(mount_path): - self._execute('mkdir', '-p', mount_path) - - command = ['mount.quobyte', '--disable-xattrs', - quobyte_volume, mount_path] - if self.configuration.quobyte_client_cfg: - command.extend(['-c', self.configuration.quobyte_client_cfg]) - - try: - LOG.info('Mounting volume: %s ...', quobyte_volume) - self._execute(*command, run_as_root=self._execute_as_root) - LOG.info('Mounting volume: %s succeeded', quobyte_volume) - mounted = True - except processutils.ProcessExecutionError as exc: - if ensure and 'already mounted' in exc.stderr: - LOG.warning("%s is already mounted", quobyte_volume) - else: - raise - - if mounted: - self._validate_volume(mount_path) - - def _validate_volume(self, mount_path): - """Runs a number of tests on the expect Quobyte mount""" - partitions = psutil.disk_partitions(all=True) - for p in partitions: - if mount_path == p.mountpoint: - if p.device.startswith("quobyte@"): - try: - statresult = os.stat(mount_path) - if statresult.st_size == 0: - # client looks healthy - if not os.access(mount_path, - os.W_OK | os.X_OK): - LOG.warning("Volume is not writable. " - "Please broaden the file" - " permissions." - " Mount: %s", - mount_path) - return # we're happy here - else: - msg = (_("The mount %(mount_path)s is not a " - "valid Quobyte volume. Stale mount?") - % {'mount_path': mount_path}) - raise exception.VolumeDriverException(msg) - except Exception as exc: - msg = (_("The mount %(mount_path)s is not a valid" - " Quobyte volume. Error: %(exc)s . " - " Possibly a Quobyte client crash?") - % {'mount_path': mount_path, 'exc': exc}) - raise exception.VolumeDriverException(msg) - else: - msg = (_("The mount %(mount_path)s is not a valid" - " Quobyte volume according to partition list.") - % {'mount_path': mount_path}) - raise exception.VolumeDriverException(msg) - msg = (_("No matching Quobyte mount entry for %(mount_path)s" - " could be found for validation in partition list.") - % {'mount_path': mount_path}) - raise exception.VolumeDriverException(msg) diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py deleted file mode 100644 index 82964e3c6..000000000 --- a/cinder/volume/drivers/rbd.py +++ /dev/null @@ -1,1398 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""RADOS Block Device Driver""" - -from __future__ import absolute_import -import json -import math -import os -import tempfile - -from eventlet import tpool -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import units -import six -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver - -try: - import rados - import rbd -except ImportError: - rados = None - rbd = None - - -LOG = logging.getLogger(__name__) - -RBD_OPTS = [ - cfg.StrOpt('rbd_cluster_name', - default='ceph', - help='The name of ceph cluster'), - cfg.StrOpt('rbd_pool', - default='rbd', - help='The RADOS pool where rbd volumes are stored'), - cfg.StrOpt('rbd_user', - help='The RADOS client name for accessing rbd volumes ' - '- only set when using cephx authentication'), - cfg.StrOpt('rbd_ceph_conf', - default='', # default determined by librados - help='Path to the ceph configuration file'), - cfg.StrOpt('rbd_keyring_conf', - default='', - help='Path to the ceph keyring file'), - cfg.BoolOpt('rbd_flatten_volume_from_snapshot', - default=False, - help='Flatten volumes created from snapshots to remove ' - 'dependency from volume to snapshot'), - cfg.StrOpt('rbd_secret_uuid', - help='The libvirt uuid of the secret for the rbd_user ' - 'volumes'), - cfg.IntOpt('rbd_max_clone_depth', - default=5, - help='Maximum number of nested volume clones that are ' - 'taken before a flatten occurs. Set to 0 to disable ' - 'cloning.'), - cfg.IntOpt('rbd_store_chunk_size', default=4, - help='Volumes will be chunked into objects of this size ' - '(in megabytes).'), - cfg.IntOpt('rados_connect_timeout', default=-1, - help='Timeout value (in seconds) used when connecting to ' - 'ceph cluster. If value < 0, no timeout is set and ' - 'default librados value is used.'), - cfg.IntOpt('rados_connection_retries', default=3, - help='Number of retries if connection to ceph cluster ' - 'failed.'), - cfg.IntOpt('rados_connection_interval', default=5, - help='Interval value (in seconds) between connection ' - 'retries to ceph cluster.'), - cfg.IntOpt('replication_connect_timeout', default=5, - help='Timeout value (in seconds) used when connecting to ' - 'ceph cluster to do a demotion/promotion of volumes. ' - 'If value < 0, no timeout is set and default librados ' - 'value is used.'), -] - -CONF = cfg.CONF -CONF.register_opts(RBD_OPTS, group=configuration.SHARED_CONF_GROUP) - -EXTRA_SPECS_REPL_ENABLED = "replication_enabled" - - -class RBDVolumeProxy(object): - """Context manager for dealing with an existing rbd volume. - - This handles connecting to rados and opening an ioctx automatically, and - otherwise acts like a librbd Image object. - - The underlying librados client and ioctx can be accessed as the attributes - 'client' and 'ioctx'. - """ - def __init__(self, driver, name, pool=None, snapshot=None, - read_only=False, remote=None, timeout=None): - client, ioctx = driver._connect_to_rados(pool, remote, timeout) - if snapshot is not None: - snapshot = utils.convert_str(snapshot) - - try: - self.volume = driver.rbd.Image(ioctx, - utils.convert_str(name), - snapshot=snapshot, - read_only=read_only) - self.volume = tpool.Proxy(self.volume) - except driver.rbd.Error: - LOG.exception("error opening rbd image %s", name) - driver._disconnect_from_rados(client, ioctx) - raise - self.driver = driver - self.client = client - self.ioctx = ioctx - - def __enter__(self): - return self - - def __exit__(self, type_, value, traceback): - try: - self.volume.close() - finally: - self.driver._disconnect_from_rados(self.client, self.ioctx) - - def __getattr__(self, attrib): - return getattr(self.volume, attrib) - - -class RADOSClient(object): - """Context manager to simplify error handling for connecting to ceph.""" - def __init__(self, driver, pool=None): - self.driver = driver - self.cluster, self.ioctx = driver._connect_to_rados(pool) - - def __enter__(self): - return self - - def __exit__(self, type_, value, traceback): - self.driver._disconnect_from_rados(self.cluster, self.ioctx) - - @property - def features(self): - features = self.cluster.conf_get('rbd_default_features') - if ((features is None) or (int(features) == 0)): - features = self.driver.rbd.RBD_FEATURE_LAYERING - return int(features) - - -@interface.volumedriver -class RBDDriver(driver.CloneableImageVD, - driver.MigrateVD, driver.ManageableVD, driver.BaseVD): - """Implements RADOS block device (RBD) volume commands.""" - - VERSION = '1.2.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - - SYSCONFDIR = '/etc/ceph/' - - def __init__(self, active_backend_id=None, *args, **kwargs): - super(RBDDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(RBD_OPTS) - self._stats = {} - # allow overrides for testing - self.rados = kwargs.get('rados', rados) - self.rbd = kwargs.get('rbd', rbd) - - # All string args used with librbd must be None or utf-8 otherwise - # librbd will break. - for attr in ['rbd_cluster_name', 'rbd_user', - 'rbd_ceph_conf', 'rbd_pool']: - val = getattr(self.configuration, attr) - if val is not None: - setattr(self.configuration, attr, utils.convert_str(val)) - - self._backend_name = (self.configuration.volume_backend_name or - self.__class__.__name__) - self._active_backend_id = active_backend_id - self._active_config = {} - self._is_replication_enabled = False - self._replication_targets = [] - self._target_names = [] - - def _get_target_config(self, target_id): - """Get a replication target from known replication targets.""" - for target in self._replication_targets: - if target['name'] == target_id: - return target - if not target_id or target_id == 'default': - return { - 'name': self.configuration.rbd_cluster_name, - 'conf': self.configuration.rbd_ceph_conf, - 'user': self.configuration.rbd_user - } - raise exception.InvalidReplicationTarget( - reason=_('RBD: Unknown failover target host %s.') % target_id) - - def do_setup(self, context): - """Performs initialization steps that could raise exceptions.""" - self._do_setup_replication() - self._active_config = self._get_target_config(self._active_backend_id) - - def _do_setup_replication(self): - replication_devices = self.configuration.safe_get( - 'replication_device') - if replication_devices: - self._parse_replication_configs(replication_devices) - self._is_replication_enabled = True - self._target_names.append('default') - - def _parse_replication_configs(self, replication_devices): - for replication_device in replication_devices: - if 'backend_id' not in replication_device: - msg = _('Missing backend_id in replication_device ' - 'configuration.') - raise exception.InvalidConfigurationValue(msg) - - name = replication_device['backend_id'] - conf = replication_device.get('conf', - self.SYSCONFDIR + name + '.conf') - user = replication_device.get( - 'user', self.configuration.rbd_user or 'cinder') - # Pool has to be the same in all clusters - replication_target = {'name': name, - 'conf': utils.convert_str(conf), - 'user': utils.convert_str(user)} - LOG.info('Adding replication target: %s.', name) - self._replication_targets.append(replication_target) - self._target_names.append(name) - - def _get_config_tuple(self, remote=None): - if not remote: - remote = self._active_config - return (remote.get('name'), remote.get('conf'), remote.get('user')) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - if rados is None: - msg = _('rados and rbd python libraries not found') - raise exception.VolumeBackendAPIException(data=msg) - - for attr in ['rbd_cluster_name', 'rbd_pool']: - val = getattr(self.configuration, attr) - if not val: - raise exception.InvalidConfigurationValue(option=attr, - value=val) - # NOTE: Checking connection to ceph - # RADOSClient __init__ method invokes _connect_to_rados - # so no need to check for self.rados.Error here. - with RADOSClient(self): - pass - - def RBDProxy(self): - return tpool.Proxy(self.rbd.RBD()) - - def _ceph_args(self): - args = [] - - name, conf, user = self._get_config_tuple() - - if user: - args.extend(['--id', user]) - if name: - args.extend(['--cluster', name]) - if conf: - args.extend(['--conf', conf]) - - return args - - def _connect_to_rados(self, pool=None, remote=None, timeout=None): - @utils.retry(exception.VolumeBackendAPIException, - self.configuration.rados_connection_interval, - self.configuration.rados_connection_retries) - def _do_conn(pool, remote, timeout): - name, conf, user = self._get_config_tuple(remote) - - if pool is not None: - pool = utils.convert_str(pool) - else: - pool = self.configuration.rbd_pool - - if timeout is None: - timeout = self.configuration.rados_connect_timeout - - LOG.debug("connecting to %(name)s (timeout=%(timeout)s).", - {'name': name, 'timeout': timeout}) - - client = self.rados.Rados(rados_id=user, - clustername=name, - conffile=conf) - - try: - if timeout >= 0: - timeout = six.text_type(timeout) - client.conf_set('rados_osd_op_timeout', timeout) - client.conf_set('rados_mon_op_timeout', timeout) - client.conf_set('client_mount_timeout', timeout) - - client.connect() - ioctx = client.open_ioctx(pool) - return client, ioctx - except self.rados.Error: - msg = _("Error connecting to ceph cluster.") - LOG.exception(msg) - client.shutdown() - raise exception.VolumeBackendAPIException(data=msg) - - return _do_conn(pool, remote, timeout) - - def _disconnect_from_rados(self, client, ioctx): - # closing an ioctx cannot raise an exception - ioctx.close() - client.shutdown() - - def _get_backup_snaps(self, rbd_image): - """Get list of any backup snapshots that exist on this volume. - - There should only ever be one but accept all since they need to be - deleted before the volume can be. - """ - # NOTE(dosaboy): we do the import here otherwise we get import conflict - # issues between the rbd driver and the ceph backup driver. These - # issues only seem to occur when NOT using them together and are - # triggered when the ceph backup driver imports the rbd volume driver. - from cinder.backup.drivers import ceph - return ceph.CephBackupDriver.get_backup_snaps(rbd_image) - - def _get_mon_addrs(self): - args = ['ceph', 'mon', 'dump', '--format=json'] - args.extend(self._ceph_args()) - out, _ = self._execute(*args) - lines = out.split('\n') - if lines[0].startswith('dumped monmap epoch'): - lines = lines[1:] - monmap = json.loads('\n'.join(lines)) - addrs = [mon['addr'] for mon in monmap['mons']] - hosts = [] - ports = [] - for addr in addrs: - host_port = addr[:addr.rindex('/')] - host, port = host_port.rsplit(':', 1) - hosts.append(host.strip('[]')) - ports.append(port) - return hosts, ports - - def _iterate_cb(self, offset, length, exists): - if exists: - self._total_usage += length - - def _get_usage_info(self): - with RADOSClient(self) as client: - for t in self.RBDProxy().list(client.ioctx): - if t.startswith('volume'): - # Only check for "volume" to allow some flexibility with - # non-default volume_name_template settings. Template - # must start with "volume". - with RBDVolumeProxy(self, t, read_only=True) as v: - v.diff_iterate(0, v.size(), None, self._iterate_cb) - - def _update_volume_stats(self): - stats = { - 'vendor_name': 'Open Source', - 'driver_version': self.VERSION, - 'storage_protocol': 'ceph', - 'total_capacity_gb': 'unknown', - 'free_capacity_gb': 'unknown', - 'provisioned_capacity_gb': 0, - 'reserved_percentage': ( - self.configuration.safe_get('reserved_percentage')), - 'multiattach': False, - 'thin_provisioning_support': True, - 'max_over_subscription_ratio': ( - self.configuration.safe_get('max_over_subscription_ratio')) - - } - backend_name = self.configuration.safe_get('volume_backend_name') - stats['volume_backend_name'] = backend_name or 'RBD' - - stats['replication_enabled'] = self._is_replication_enabled - if self._is_replication_enabled: - stats['replication_targets'] = self._target_names - - try: - with RADOSClient(self) as client: - ret, outbuf, _outs = client.cluster.mon_command( - '{"prefix":"df", "format":"json"}', '') - if ret != 0: - LOG.warning('Unable to get rados pool stats.') - else: - outbuf = json.loads(outbuf) - pool_stats = [pool for pool in outbuf['pools'] if - pool['name'] == - self.configuration.rbd_pool][0]['stats'] - stats['free_capacity_gb'] = round((float( - pool_stats['max_avail']) / units.Gi), 2) - used_capacity_gb = float( - pool_stats['bytes_used']) / units.Gi - stats['total_capacity_gb'] = round( - (stats['free_capacity_gb'] + used_capacity_gb), 2) - - self._total_usage = 0 - self._get_usage_info() - total_usage_gb = math.ceil(float(self._total_usage) / units.Gi) - stats['provisioned_capacity_gb'] = total_usage_gb - except self.rados.Error: - # just log and return unknown capacities - LOG.exception('error refreshing volume stats') - self._stats = stats - - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service. - - If 'refresh' is True, run the update first. - """ - if refresh: - self._update_volume_stats() - return self._stats - - def _get_clone_depth(self, client, volume_name, depth=0): - """Returns the number of ancestral clones of the given volume.""" - parent_volume = self.rbd.Image(client.ioctx, volume_name) - try: - _pool, parent, _snap = self._get_clone_info(parent_volume, - volume_name) - finally: - parent_volume.close() - - if not parent: - return depth - - # If clone depth was reached, flatten should have occurred so if it has - # been exceeded then something has gone wrong. - if depth > self.configuration.rbd_max_clone_depth: - raise Exception(_("clone depth exceeds limit of %s") % - (self.configuration.rbd_max_clone_depth)) - - return self._get_clone_depth(client, parent, depth + 1) - - def _extend_if_required(self, volume, src_vref): - """Extends a volume if required - - In case src_vref size is smaller than the size if the requested - new volume call _resize(). - """ - if volume.size != src_vref.size: - LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " - "%(dst_size)d", - {'dst_vol': volume.name, 'src_size': src_vref.size, - 'dst_size': volume.size}) - self._resize(volume) - - def create_cloned_volume(self, volume, src_vref): - """Create a cloned volume from another volume. - - Since we are cloning from a volume and not a snapshot, we must first - create a snapshot of the source volume. - - The user has the option to limit how long a volume's clone chain can be - by setting rbd_max_clone_depth. If a clone is made of another clone - and that clone has rbd_max_clone_depth clones behind it, the source - volume will be flattened. - """ - src_name = utils.convert_str(src_vref.name) - dest_name = utils.convert_str(volume.name) - flatten_parent = False - - # Do full copy if requested - if self.configuration.rbd_max_clone_depth <= 0: - with RBDVolumeProxy(self, src_name, read_only=True) as vol: - vol.copy(vol.ioctx, dest_name) - self._extend_if_required(volume, src_vref) - return - - # Otherwise do COW clone. - with RADOSClient(self) as client: - depth = self._get_clone_depth(client, src_name) - # If source volume is a clone and rbd_max_clone_depth reached, - # flatten the source before cloning. Zero rbd_max_clone_depth - # means infinite is allowed. - if depth == self.configuration.rbd_max_clone_depth: - LOG.debug("maximum clone depth (%d) has been reached - " - "flattening source volume", - self.configuration.rbd_max_clone_depth) - flatten_parent = True - - src_volume = self.rbd.Image(client.ioctx, src_name) - try: - # First flatten source volume if required. - if flatten_parent: - _pool, parent, snap = self._get_clone_info(src_volume, - src_name) - # Flatten source volume - LOG.debug("flattening source volume %s", src_name) - src_volume.flatten() - # Delete parent clone snap - parent_volume = self.rbd.Image(client.ioctx, parent) - try: - parent_volume.unprotect_snap(snap) - parent_volume.remove_snap(snap) - finally: - parent_volume.close() - - # Create new snapshot of source volume - clone_snap = "%s.clone_snap" % dest_name - LOG.debug("creating snapshot='%s'", clone_snap) - src_volume.create_snap(clone_snap) - src_volume.protect_snap(clone_snap) - except Exception: - # Only close if exception since we still need it. - src_volume.close() - raise - - # Now clone source volume snapshot - try: - LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to " - "'%(dest)s'", - {'src_vol': src_name, 'src_snap': clone_snap, - 'dest': dest_name}) - self.RBDProxy().clone(client.ioctx, src_name, clone_snap, - client.ioctx, dest_name, - features=client.features) - except Exception: - src_volume.unprotect_snap(clone_snap) - src_volume.remove_snap(clone_snap) - src_volume.close() - raise - - try: - volume_update = self._enable_replication_if_needed(volume) - except Exception: - self.RBDProxy().remove(client.ioctx, dest_name) - err_msg = (_('Failed to enable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - finally: - src_volume.close() - - self._extend_if_required(volume, src_vref) - - LOG.debug("clone created successfully") - return volume_update - - def _enable_replication(self, volume): - """Enable replication for a volume. - - Returns required volume update. - """ - vol_name = utils.convert_str(volume.name) - with RBDVolumeProxy(self, vol_name) as image: - had_journaling = image.features() & self.rbd.RBD_FEATURE_JOURNALING - if not had_journaling: - image.update_features(self.rbd.RBD_FEATURE_JOURNALING, True) - image.mirror_image_enable() - - driver_data = self._dumps({'had_journaling': bool(had_journaling)}) - return {'replication_status': fields.ReplicationStatus.ENABLED, - 'replication_driver_data': driver_data} - - def _is_replicated_type(self, volume_type): - # We do a safe attribute get because volume_type could be None - specs = getattr(volume_type, 'extra_specs', {}) - return specs.get(EXTRA_SPECS_REPL_ENABLED) == " True" - - def _enable_replication_if_needed(self, volume): - if self._is_replicated_type(volume.volume_type): - return self._enable_replication(volume) - if self._is_replication_enabled: - return {'replication_status': fields.ReplicationStatus.DISABLED} - return None - - def create_volume(self, volume): - """Creates a logical volume.""" - - if volume.encryption_key_id: - message = _("Encryption is not yet supported.") - raise exception.VolumeDriverException(message=message) - - size = int(volume.size) * units.Gi - - LOG.debug("creating volume '%s'", volume.name) - - chunk_size = self.configuration.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) - vol_name = utils.convert_str(volume.name) - - with RADOSClient(self) as client: - self.RBDProxy().create(client.ioctx, - vol_name, - size, - order, - old_format=False, - features=client.features) - - try: - volume_update = self._enable_replication_if_needed(volume) - except Exception: - self.RBDProxy().remove(client.ioctx, vol_name) - err_msg = (_('Failed to enable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - return volume_update - - def _flatten(self, pool, volume_name): - LOG.debug('flattening %(pool)s/%(img)s', - dict(pool=pool, img=volume_name)) - with RBDVolumeProxy(self, volume_name, pool) as vol: - vol.flatten() - - def _clone(self, volume, src_pool, src_image, src_snap): - LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s', - dict(pool=src_pool, img=src_image, snap=src_snap, - dst=volume.name)) - - chunk_size = self.configuration.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) - vol_name = utils.convert_str(volume.name) - - with RADOSClient(self, src_pool) as src_client: - with RADOSClient(self) as dest_client: - self.RBDProxy().clone(src_client.ioctx, - utils.convert_str(src_image), - utils.convert_str(src_snap), - dest_client.ioctx, - vol_name, - features=src_client.features, - order=order) - - try: - volume_update = self._enable_replication_if_needed(volume) - except Exception: - self.RBDProxy().remove(dest_client.ioctx, vol_name) - err_msg = (_('Failed to enable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - return volume_update or {} - - def _resize(self, volume, **kwargs): - size = kwargs.get('size', None) - if not size: - size = int(volume.size) * units.Gi - - with RBDVolumeProxy(self, volume.name) as vol: - vol.resize(size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - volume_update = self._clone(volume, self.configuration.rbd_pool, - snapshot.volume_name, snapshot.name) - if self.configuration.rbd_flatten_volume_from_snapshot: - self._flatten(self.configuration.rbd_pool, volume.name) - if int(volume.size): - self._resize(volume) - return volume_update - - def _delete_backup_snaps(self, rbd_image): - backup_snaps = self._get_backup_snaps(rbd_image) - if backup_snaps: - for snap in backup_snaps: - rbd_image.remove_snap(snap['name']) - else: - LOG.debug("volume has no backup snaps") - - def _get_clone_info(self, volume, volume_name, snap=None): - """If volume is a clone, return its parent info. - - Returns a tuple of (pool, parent, snap). A snapshot may optionally be - provided for the case where a cloned volume has been flattened but it's - snapshot still depends on the parent. - """ - try: - if snap: - volume.set_snap(snap) - pool, parent, parent_snap = tuple(volume.parent_info()) - if snap: - volume.set_snap(None) - # Strip the tag off the end of the volume name since it will not be - # in the snap name. - if volume_name.endswith('.deleted'): - volume_name = volume_name[:-len('.deleted')] - # Now check the snap name matches. - if parent_snap == "%s.clone_snap" % volume_name: - return pool, parent, parent_snap - except self.rbd.ImageNotFound: - LOG.debug("Volume %s is not a clone.", volume_name) - volume.set_snap(None) - - return (None, None, None) - - def _get_children_info(self, volume, snap): - """List children for the given snapshot of a volume(image). - - Returns a list of (pool, image). - """ - - children_list = [] - - if snap: - volume.set_snap(snap) - children_list = volume.list_children() - volume.set_snap(None) - - return children_list - - def _delete_clone_parent_refs(self, client, parent_name, parent_snap): - """Walk back up the clone chain and delete references. - - Deletes references i.e. deleted parent volumes and snapshots. - """ - parent_rbd = self.rbd.Image(client.ioctx, parent_name) - parent_has_snaps = False - try: - # Check for grandparent - _pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd, - parent_name, - parent_snap) - - LOG.debug("deleting parent snapshot %s", parent_snap) - parent_rbd.unprotect_snap(parent_snap) - parent_rbd.remove_snap(parent_snap) - - parent_has_snaps = bool(list(parent_rbd.list_snaps())) - finally: - parent_rbd.close() - - # If parent has been deleted in Cinder, delete the silent reference and - # keep walking up the chain if it is itself a clone. - if (not parent_has_snaps) and parent_name.endswith('.deleted'): - LOG.debug("deleting parent %s", parent_name) - self.RBDProxy().remove(client.ioctx, parent_name) - - # Now move up to grandparent if there is one - if g_parent: - self._delete_clone_parent_refs(client, g_parent, g_parent_snap) - - def delete_volume(self, volume): - """Deletes a logical volume.""" - # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are - # utf-8 otherwise librbd will barf. - volume_name = utils.convert_str(volume.name) - with RADOSClient(self) as client: - try: - rbd_image = self.rbd.Image(client.ioctx, volume_name) - except self.rbd.ImageNotFound: - LOG.info("volume %s no longer exists in backend", - volume_name) - return - - clone_snap = None - parent = None - - # Ensure any backup snapshots are deleted - self._delete_backup_snaps(rbd_image) - - # If the volume has non-clone snapshots this delete is expected to - # raise VolumeIsBusy so do so straight away. - try: - snaps = rbd_image.list_snaps() - for snap in snaps: - if snap['name'].endswith('.clone_snap'): - LOG.debug("volume has clone snapshot(s)") - # We grab one of these and use it when fetching parent - # info in case the volume has been flattened. - clone_snap = snap['name'] - break - - raise exception.VolumeIsBusy(volume_name=volume_name) - - # Determine if this volume is itself a clone - _pool, parent, parent_snap = self._get_clone_info(rbd_image, - volume_name, - clone_snap) - finally: - rbd_image.close() - - @utils.retry(self.rbd.ImageBusy, - self.configuration.rados_connection_interval, - self.configuration.rados_connection_retries) - def _try_remove_volume(client, volume_name): - self.RBDProxy().remove(client.ioctx, volume_name) - - if clone_snap is None: - LOG.debug("deleting rbd volume %s", volume_name) - try: - _try_remove_volume(client, volume_name) - except self.rbd.ImageBusy: - msg = (_("ImageBusy error raised while deleting rbd " - "volume. This may have been caused by a " - "connection from a client that has crashed and, " - "if so, may be resolved by retrying the delete " - "after 30 seconds has elapsed.")) - LOG.warning(msg) - # Now raise this so that volume stays available so that we - # delete can be retried. - raise exception.VolumeIsBusy(msg, volume_name=volume_name) - except self.rbd.ImageNotFound: - LOG.info("RBD volume %s not found, allowing delete " - "operation to proceed.", volume_name) - return - - # If it is a clone, walk back up the parent chain deleting - # references. - if parent: - LOG.debug("volume is a clone so cleaning references") - self._delete_clone_parent_refs(client, parent, parent_snap) - else: - # If the volume has copy-on-write clones we will not be able to - # delete it. Instead we will keep it as a silent volume which - # will be deleted when it's snapshot and clones are deleted. - new_name = "%s.deleted" % (volume_name) - self.RBDProxy().rename(client.ioctx, volume_name, new_name) - - def create_snapshot(self, snapshot): - """Creates an rbd snapshot.""" - with RBDVolumeProxy(self, snapshot.volume_name) as volume: - snap = utils.convert_str(snapshot.name) - volume.create_snap(snap) - volume.protect_snap(snap) - - def delete_snapshot(self, snapshot): - """Deletes an rbd snapshot.""" - # NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are - # utf-8 otherwise librbd will barf. - volume_name = utils.convert_str(snapshot.volume_name) - snap_name = utils.convert_str(snapshot.name) - - with RBDVolumeProxy(self, volume_name) as volume: - try: - volume.unprotect_snap(snap_name) - except self.rbd.InvalidArgument: - LOG.info( - "InvalidArgument: Unable to unprotect snapshot %s.", - snap_name) - except self.rbd.ImageNotFound: - LOG.info( - "ImageNotFound: Unable to unprotect snapshot %s.", - snap_name) - except self.rbd.ImageBusy: - children_list = self._get_children_info(volume, snap_name) - - if children_list: - for (pool, image) in children_list: - LOG.info('Image %(pool)s/%(image)s is dependent ' - 'on the snapshot %(snap)s.', - {'pool': pool, - 'image': image, - 'snap': snap_name}) - - raise exception.SnapshotIsBusy(snapshot_name=snap_name) - try: - volume.remove_snap(snap_name) - except self.rbd.ImageNotFound: - LOG.info("Snapshot %s does not exist in backend.", - snap_name) - - def _disable_replication(self, volume): - """Disable replication on the given volume.""" - vol_name = utils.convert_str(volume.name) - with RBDVolumeProxy(self, vol_name) as image: - image.mirror_image_disable(False) - driver_data = json.loads(volume.replication_driver_data) - # If we didn't have journaling enabled when we enabled replication - # we must remove journaling since it we added it for the - # replication - if not driver_data['had_journaling']: - image.update_features(self.rbd.RBD_FEATURE_JOURNALING, False) - return {'replication_status': fields.ReplicationStatus.DISABLED, - 'replication_driver_data': None} - - def retype(self, context, volume, new_type, diff, host): - """Retype from one volume type to another on the same backend.""" - old_vol_replicated = self._is_replicated_type(volume.volume_type) - new_vol_replicated = self._is_replicated_type(new_type) - - if old_vol_replicated and not new_vol_replicated: - try: - return True, self._disable_replication(volume) - except Exception: - err_msg = (_('Failed to disable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - elif not old_vol_replicated and new_vol_replicated: - try: - return True, self._enable_replication(volume) - except Exception: - err_msg = (_('Failed to enable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - - if not new_vol_replicated and self._is_replication_enabled: - update = {'replication_status': fields.ReplicationStatus.DISABLED} - else: - update = None - return True, update - - def _dumps(self, obj): - return json.dumps(obj, separators=(',', ':')) - - def _exec_on_volume(self, volume_name, remote, operation, *args, **kwargs): - @utils.retry(rbd.ImageBusy, - self.configuration.rados_connection_interval, - self.configuration.rados_connection_retries) - def _do_exec(): - timeout = self.configuration.replication_connect_timeout - with RBDVolumeProxy(self, volume_name, self.configuration.rbd_pool, - remote=remote, timeout=timeout) as rbd_image: - return getattr(rbd_image, operation)(*args, **kwargs) - return _do_exec() - - def _failover_volume(self, volume, remote, is_demoted, replication_status): - """Process failover for a volume. - - There are 3 different cases that will return different update values - for the volume: - - - Volume has replication enabled and failover succeeded: Set - replication status to failed-over. - - Volume has replication enabled and failover fails: Set status to - error, replication status to failover-error, and store previous - status in previous_status field. - - Volume replication is disabled: Set status to error, and store - status in previous_status field. - """ - # Failover is allowed when volume has it enabled or it has already - # failed over, because we may want to do a second failover. - if self._is_replicated_type(volume.volume_type): - vol_name = utils.convert_str(volume.name) - try: - self._exec_on_volume(vol_name, remote, - 'mirror_image_promote', not is_demoted) - - return {'volume_id': volume.id, - 'updates': {'replication_status': replication_status}} - except Exception as e: - replication_status = fields.ReplicationStatus.FAILOVER_ERROR - LOG.error('Failed to failover volume %(volume)s with ' - 'error: %(error)s.', - {'volume': volume.name, 'error': e}) - else: - replication_status = fields.ReplicationStatus.NOT_CAPABLE - LOG.debug('Skipping failover for non replicated volume ' - '%(volume)s with status: %(status)s', - {'volume': volume.name, 'status': volume.status}) - - # Failover did not happen - error_result = { - 'volume_id': volume.id, - 'updates': { - 'status': 'error', - 'previous_status': volume.status, - 'replication_status': replication_status - } - } - - return error_result - - def _demote_volumes(self, volumes, until_failure=True): - """Try to demote volumes on the current primary cluster.""" - result = [] - try_demoting = True - for volume in volumes: - demoted = False - if try_demoting and self._is_replicated_type(volume.volume_type): - vol_name = utils.convert_str(volume.name) - try: - self._exec_on_volume(vol_name, self._active_config, - 'mirror_image_demote') - demoted = True - except Exception as e: - LOG.debug('Failed to demote %(volume)s with error: ' - '%(error)s.', - {'volume': volume.name, 'error': e}) - try_demoting = not until_failure - result.append(demoted) - return result - - def _get_failover_target_config(self, secondary_id=None): - if not secondary_id: - # In auto mode exclude failback and active - candidates = set(self._target_names).difference( - ('default', self._active_backend_id)) - if not candidates: - raise exception.InvalidReplicationTarget( - reason=_('RBD: No available failover target host.')) - secondary_id = candidates.pop() - return secondary_id, self._get_target_config(secondary_id) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover to replication target.""" - LOG.info('RBD driver failover started.') - if not self._is_replication_enabled: - raise exception.UnableToFailOver( - reason=_('RBD: Replication is not enabled.')) - - if secondary_id == 'default': - replication_status = fields.ReplicationStatus.ENABLED - else: - replication_status = fields.ReplicationStatus.FAILED_OVER - - secondary_id, remote = self._get_failover_target_config(secondary_id) - - # Try to demote the volumes first - demotion_results = self._demote_volumes(volumes) - # Do the failover taking into consideration if they have been demoted - updates = [self._failover_volume(volume, remote, is_demoted, - replication_status) - for volume, is_demoted in zip(volumes, demotion_results)] - self._active_backend_id = secondary_id - self._active_config = remote - LOG.info('RBD driver failover completed.') - return secondary_id, updates, [] - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def _get_keyring_contents(self): - # NOTE(danpawlik) If keyring is not provided in Cinder configuration, - # os-brick library will take keyring from default path. - keyring_file = self.configuration.rbd_keyring_conf - keyring_data = None - try: - if os.path.isfile(keyring_file): - with open(keyring_file, 'r') as k_file: - keyring_data = k_file.read() - except IOError: - LOG.debug('Cannot read RBD keyring file: %s.', keyring_file) - - return keyring_data - - def initialize_connection(self, volume, connector): - hosts, ports = self._get_mon_addrs() - data = { - 'driver_volume_type': 'rbd', - 'data': { - 'name': '%s/%s' % (self.configuration.rbd_pool, - volume.name), - 'hosts': hosts, - 'ports': ports, - 'cluster_name': self.configuration.rbd_cluster_name, - 'auth_enabled': (self.configuration.rbd_user is not None), - 'auth_username': self.configuration.rbd_user, - 'secret_type': 'ceph', - 'secret_uuid': self.configuration.rbd_secret_uuid, - 'volume_id': volume.id, - "discard": True, - 'keyring': self._get_keyring_contents(), - } - } - LOG.debug('connection data: %s', data) - return data - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def _parse_location(self, location): - prefix = 'rbd://' - if not location.startswith(prefix): - reason = _('Not stored in rbd') - raise exception.ImageUnacceptable(image_id=location, reason=reason) - pieces = [urllib.parse.unquote(loc) - for loc in location[len(prefix):].split('/')] - if any(map(lambda p: p == '', pieces)): - reason = _('Blank components') - raise exception.ImageUnacceptable(image_id=location, reason=reason) - if len(pieces) != 4: - reason = _('Not an rbd snapshot') - raise exception.ImageUnacceptable(image_id=location, reason=reason) - return pieces - - def _get_fsid(self): - with RADOSClient(self) as client: - return client.cluster.get_fsid() - - def _is_cloneable(self, image_location, image_meta): - try: - fsid, pool, image, snapshot = self._parse_location(image_location) - except exception.ImageUnacceptable as e: - LOG.debug('not cloneable: %s.', e) - return False - - if self._get_fsid() != fsid: - LOG.debug('%s is in a different ceph cluster.', image_location) - return False - - if image_meta['disk_format'] != 'raw': - LOG.debug("rbd image clone requires image format to be " - "'raw' but image %(image)s is '%(format)s'", - {"image": image_location, - "format": image_meta['disk_format']}) - return False - - # check that we can read the image - try: - with RBDVolumeProxy(self, image, - pool=pool, - snapshot=snapshot, - read_only=True): - return True - except self.rbd.Error as e: - LOG.debug('Unable to open image %(loc)s: %(err)s.', - dict(loc=image_location, err=e)) - return False - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - if image_location: - # Note: image_location[0] is glance image direct_url. - # image_location[1] contains the list of all locations (including - # direct_url) or None if show_multiple_locations is False in - # glance configuration. - if image_location[1]: - url_locations = [location['url'] for - location in image_location[1]] - else: - url_locations = [image_location[0]] - - # iterate all locations to look for a cloneable one. - for url_location in url_locations: - if url_location and self._is_cloneable( - url_location, image_meta): - _prefix, pool, image, snapshot = \ - self._parse_location(url_location) - volume_update = self._clone(volume, pool, image, snapshot) - volume_update['provider_location'] = None - self._resize(volume) - return volume_update, True - return ({}, False) - - def _image_conversion_dir(self): - tmpdir = (CONF.image_conversion_dir or - tempfile.gettempdir()) - - # ensure temporary directory exists - if not os.path.exists(tmpdir): - os.makedirs(tmpdir) - - return tmpdir - - def copy_image_to_volume(self, context, volume, image_service, image_id): - - tmp_dir = self._image_conversion_dir() - - with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: - image_utils.fetch_to_raw(context, image_service, image_id, - tmp.name, - self.configuration.volume_dd_blocksize, - size=volume.size) - - self.delete_volume(volume) - - chunk_size = self.configuration.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) - # keep using the command line import instead of librbd since it - # detects zeroes to preserve sparseness in the image - args = ['rbd', 'import', - '--pool', self.configuration.rbd_pool, - '--order', order, - tmp.name, volume.name, - '--new-format'] - args.extend(self._ceph_args()) - self._try_execute(*args) - self._resize(volume) - # We may need to re-enable replication because we have deleted the - # original image and created a new one using the command line import. - try: - self._enable_replication_if_needed(volume) - except Exception: - err_msg = (_('Failed to enable image replication')) - raise exception.ReplicationError(reason=err_msg, - volume_id=volume.id) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - tmp_dir = self._image_conversion_dir() - tmp_file = os.path.join(tmp_dir, - volume.name + '-' + image_meta['id']) - with fileutils.remove_path_on_error(tmp_file): - args = ['rbd', 'export', - '--pool', self.configuration.rbd_pool, - volume.name, tmp_file] - args.extend(self._ceph_args()) - self._try_execute(*args) - image_utils.upload_volume(context, image_service, - image_meta, tmp_file) - os.unlink(tmp_file) - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - old_size = volume.size - - try: - size = int(new_size) * units.Gi - self._resize(volume, size=size) - except Exception: - msg = _('Failed to Extend Volume ' - '%(volname)s') % {'volname': volume.name} - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", - {'old_size': old_size, 'new_size': new_size}) - - def manage_existing(self, volume, existing_ref): - """Manages an existing image. - - Renames the image name to match the expected name for the volume. - Error checking done by manage_existing_get_size is not repeated. - - :param volume: - volume ref info to be set - :param existing_ref: - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Raise an exception if we didn't find a suitable rbd image. - with RADOSClient(self) as client: - rbd_name = existing_ref['source-name'] - self.RBDProxy().rename(client.ioctx, - utils.convert_str(rbd_name), - utils.convert_str(volume.name)) - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of an existing image for manage_existing. - - :param volume: - volume ref info to be set - :param existing_ref: - existing_ref is a dictionary of the form: - {'source-name': } - """ - - # Check that the reference is valid - if 'source-name' not in existing_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - - rbd_name = utils.convert_str(existing_ref['source-name']) - - with RADOSClient(self) as client: - # Raise an exception if we didn't find a suitable rbd image. - try: - rbd_image = self.rbd.Image(client.ioctx, rbd_name) - except self.rbd.ImageNotFound: - kwargs = {'existing_ref': rbd_name, - 'reason': 'Specified rbd image does not exist.'} - raise exception.ManageExistingInvalidReference(**kwargs) - - image_size = rbd_image.size() - rbd_image.close() - - # RBD image size is returned in bytes. Attempt to parse - # size as a float and round up to the next integer. - try: - convert_size = int(math.ceil(float(image_size) / units.Gi)) - return convert_size - except ValueError: - exception_message = (_("Failed to manage existing volume " - "%(name)s, because reported size " - "%(size)s was not a floating-point" - " number.") - % {'name': rbd_name, - 'size': image_size}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def unmanage(self, volume): - pass - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update from RBD for migrated volume. - - This method should rename the back-end volume name(id) on the - destination host back to its original name(id) on the source host. - - :param ctxt: The context used to run the method update_migrated_volume - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - name_id = None - provider_location = None - - existing_name = CONF.volume_name_template % new_volume.id - wanted_name = CONF.volume_name_template % volume.id - with RADOSClient(self) as client: - try: - self.RBDProxy().rename(client.ioctx, - utils.convert_str(existing_name), - utils.convert_str(wanted_name)) - except self.rbd.ImageNotFound: - LOG.error('Unable to rename the logical volume ' - 'for volume %s.', volume.id) - # If the rename fails, _name_id should be set to the new - # volume id and provider_location should be set to the - # one from the new volume as well. - name_id = new_volume._name_id or new_volume.id - provider_location = new_volume['provider_location'] - return {'_name_id': name_id, 'provider_location': provider_location} - - def migrate_volume(self, context, volume, host): - return (False, None) - - def manage_existing_snapshot_get_size(self, snapshot, existing_ref): - """Return size of an existing image for manage_existing. - - :param snapshot: - snapshot ref info to be set - :param existing_ref: - existing_ref is a dictionary of the form: - {'source-name': } - """ - # Check that the reference is valid - if not isinstance(existing_ref, dict): - existing_ref = {"source-name": existing_ref} - if 'source-name' not in existing_ref: - reason = _('Reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - - volume_name = utils.convert_str(snapshot.volume_name) - snapshot_name = utils.convert_str(existing_ref['source-name']) - - with RADOSClient(self) as client: - # Raise an exception if we didn't find a suitable rbd image. - try: - rbd_snapshot = self.rbd.Image(client.ioctx, volume_name, - snapshot=snapshot_name) - except self.rbd.ImageNotFound: - kwargs = {'existing_ref': snapshot_name, - 'reason': 'Specified snapshot does not exist.'} - raise exception.ManageExistingInvalidReference(**kwargs) - - snapshot_size = rbd_snapshot.size() - rbd_snapshot.close() - - # RBD image size is returned in bytes. Attempt to parse - # size as a float and round up to the next integer. - try: - convert_size = int(math.ceil(float(snapshot_size) / units.Gi)) - return convert_size - except ValueError: - exception_message = (_("Failed to manage existing snapshot " - "%(name)s, because reported size " - "%(size)s was not a floating-point" - " number.") - % {'name': snapshot_name, - 'size': snapshot_size}) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def manage_existing_snapshot(self, snapshot, existing_ref): - """Manages an existing snapshot. - - Renames the snapshot name to match the expected name for the snapshot. - Error checking done by manage_existing_get_size is not repeated. - - :param snapshot: - snapshot ref info to be set - :param existing_ref: - existing_ref is a dictionary of the form: - {'source-name': } - """ - if not isinstance(existing_ref, dict): - existing_ref = {"source-name": existing_ref} - volume_name = utils.convert_str(snapshot.volume_name) - with RBDVolumeProxy(self, volume_name) as volume: - snapshot_name = existing_ref['source-name'] - volume.rename_snap(utils.convert_str(snapshot_name), - utils.convert_str(snapshot.name)) diff --git a/cinder/volume/drivers/reduxio/__init__.py b/cinder/volume/drivers/reduxio/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/reduxio/rdx_cli_api.py b/cinder/volume/drivers/reduxio/rdx_cli_api.py deleted file mode 100644 index 208569196..000000000 --- a/cinder/volume/drivers/reduxio/rdx_cli_api.py +++ /dev/null @@ -1,543 +0,0 @@ -# Copyright (c) 2016 Reduxio Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Reduxio CLI intrface class for Reduxio Cinder Driver.""" -import datetime -import json - -import eventlet -from oslo_log import log as logging -import paramiko -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import utils - - -CONNECTION_RETRY_NUM = 5 - -VOLUMES = "volumes" -HOSTS = "hosts" -HG_DIR = "hostgroups" -NEW_COMMAND = "new" -UPDATE_COMMAND = "update" -LS_COMMAND = "ls" -DELETE_COMMAND = "delete" -LIST_ASSIGN_CMD = "list-assignments" -CLI_DATE_FORMAT = "%m-%Y-%d %H:%M:%S" -CONNECT_LOCK_NAME = "reduxio_cli_Lock" -CLI_CONNECTION_RETRY_SLEEP = 5 -CLI_SSH_CMD_TIMEOUT = 20 -CLI_CONNECT_TIMEOUT = 50 - -LOG = logging.getLogger(__name__) - - -class RdxApiCmd(object): - """A Builder class for Reduxio CLI Command.""" - - def __init__(self, cmd_prefix, argument=None, flags=None, - boolean_flags=None, force=None): - """Initialize a command object.""" - if isinstance(cmd_prefix, list): - cmd_prefix = map(lambda x: x.strip(), cmd_prefix) - self.cmd = " ".join(cmd_prefix) - else: - self.cmd = cmd_prefix - - self.arg = None - self.flags = {} - self.booleanFlags = {} - - if argument is not None: - self.set_argument(argument) - - if flags is not None: - if isinstance(flags, list): - for flag in flags: - self.add_flag(flag[0], flag[1]) - else: - for key in flags: - self.add_flag(key, flags[key]) - - if boolean_flags is not None: - for flag in boolean_flags: - self.add_boolean_flag(flag) - - if force: - self.force_command() - - def set_argument(self, value): - """Set a command argument.""" - self.arg = value - - def add_flag(self, name, value): - """Set a flag and its value.""" - if value is not None: - self.flags[name.strip()] = value - - def add_boolean_flag(self, name): - """Set a boolean flag.""" - if name is not None: - self.booleanFlags[name.strip()] = True - - def build(self): - """Return the command line which represents the command object.""" - argument_str = "" if self.arg is None else self.arg - flags_str = "" - - for key in sorted(self.flags): - flags_str += (" -%(flag)s \"%(value)s\"" % - {"flag": key, "value": self.flags[key]}) - - for booleanFlag in sorted(self.booleanFlags): - flags_str += " -%s" % booleanFlag - - return ("%(cmd)s %(arg)s%(flag)s" % - {"cmd": self.cmd, "arg": argument_str, "flag": flags_str}) - - def force_command(self): - """Add a force flag.""" - self.add_boolean_flag("force") - - def set_json_output(self): - """Add a json output flag.""" - self.add_flag("output", "json") - - def __str__(self): - """Override toString.""" - return self.build() - - def __repr__(self): - return self.__str__() - - def __eq__(self, other): - """Compare commands based on their str command representations.""" - if isinstance(other, self.__class__): - return six.text_type(self).strip() == six.text_type(other).strip() - else: - return False - - -class ReduxioAPI(object): - def __init__(self, host, user, password): - """Get credentials and connects to Reduxio CLI.""" - self.host = host - self.user = user - self.password = password - self.ssh = None # type: paramiko.SSHClient - self._connect() - - def _reconnect_if_needed(self): - if not self.connected: - self._connect() - - def _connect(self): - self.connected = False - self.ssh = paramiko.SSHClient() - self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - try: - self.ssh.connect(self.host, username=self.user, - password=self.password, - timeout=CLI_CONNECT_TIMEOUT) - self.connected = True - except paramiko.ssh_exception.AuthenticationException: - raise exception.RdxAPIConnectionException(_( - "Authentication Error. Check login credentials")) - except Exception: - LOG.exception("Exception in connecting to Reduxio CLI") - raise exception.RdxAPIConnectionException(_( - "Failed to create ssh connection to Reduxio." - " Please check network connection or Reduxio hostname/IP.")) - - @utils.synchronized(CONNECT_LOCK_NAME, external=True) - def _run_cmd(self, cmd): - """Run the command and returns a dictionary of the response. - - On failure, the function retries the command. After retry threshold - the function throws an error. - """ - cmd.set_json_output() - LOG.info("Running cmd: %s", cmd) - success = False - for x in range(1, CONNECTION_RETRY_NUM): - try: - self._reconnect_if_needed() - stdin, stdout, stderr = self.ssh.exec_command( # nosec - # command input from authorized users on command line - command=six.text_type(cmd), timeout=CLI_SSH_CMD_TIMEOUT) - success = True - break - except Exception: - LOG.exception("Error in running Reduxio CLI command") - LOG.error( - "retrying(%(cur)s/%(overall)s)", - {'cur': x, 'overall': CONNECTION_RETRY_NUM} - ) - self.connected = False - eventlet.sleep(CLI_CONNECTION_RETRY_SLEEP) - - if not success: - raise exception.RdxAPIConnectionException(_( - "Failed to connect to Redxuio CLI." - " Check your username, password or Reduxio Hostname/IP")) - - str_out = stdout.read() - # Python 2.7/3.4 compatibility with the decode method - if hasattr(str_out, "decode"): - data = json.loads(str_out.decode("utf8")) - else: - data = json.loads(str_out) - - if stdout.channel.recv_exit_status() != 0: - LOG.error("Failed running cli command: %s", data["msg"]) - raise exception.RdxAPICommandException(data["msg"]) - - LOG.debug("Command output is: %s", str_out) - - return data["data"] - - @staticmethod - def _utc_to_cli_date(utc_date): - if utc_date is None: - return None - date = datetime.datetime.fromtimestamp(utc_date) - return date.strftime(CLI_DATE_FORMAT) - - # Volumes - - def create_volume(self, name, size, description=None, historypolicy=None, - blocksize=None): - """Create a new volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, NEW_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("size", size) - cmd.add_flag("description", description) - cmd.add_flag("policy", historypolicy) - cmd.add_flag("blocksize", blocksize) - - self._run_cmd(cmd) - - def list_volumes(self): - """List all volumes.""" - return self._run_cmd(RdxApiCmd(cmd_prefix=[VOLUMES, LS_COMMAND]))[ - "volumes"] - - def list_clones(self, name): - """List all clones of a volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "list-clones"]) - - cmd.set_argument(name) - - return self._run_cmd(cmd) - - def find_volume_by_name(self, name): - """Get a single volume by its name.""" - cmd = RdxApiCmd(cmd_prefix=[LS_COMMAND, VOLUMES + "/" + name]) - - return self._run_cmd(cmd)["volumes"][0] - - def find_volume_by_wwid(self, wwid): - """Get a single volume by its WWN.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "find-by-wwid"]) - - cmd.set_argument(wwid) - - return self._run_cmd(cmd) - - def delete_volume(self, name): - """Delete a volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, DELETE_COMMAND]) - - cmd.set_argument(name) - cmd.force_command() - - return self._run_cmd(cmd) - - def update_volume(self, name, new_name=None, description=None, size=None, - history_policy=None): - """Update volume's properties. None value keeps the current value.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, UPDATE_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("size", size) - cmd.add_flag("new-name", new_name) - cmd.add_flag("policy", history_policy) - cmd.add_flag("size", size) - cmd.add_flag("description", description) - - self._run_cmd(cmd) - - def revert_volume(self, name, utc_date=None, bookmark_name=None): - """Revert a volume to a specific date or by a bookmark.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "revert"]) - - cmd.set_argument(name) - cmd.add_flag("timestamp", ReduxioAPI._utc_to_cli_date(utc_date)) - cmd.add_flag("bookmark", bookmark_name) - cmd.force_command() - - return self._run_cmd(cmd) - - def clone_volume(self, parent_name, clone_name, utc_date=None, - str_date=None, bookmark_name=None, description=None): - """Clone a volume our of an existing volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "clone"]) - - cmd.set_argument(parent_name) - cmd.add_flag("name", clone_name) - if str_date is not None: - cmd.add_flag("timestamp", str_date) - else: - cmd.add_flag("timestamp", ReduxioAPI._utc_to_cli_date(utc_date)) - cmd.add_flag("bookmark", bookmark_name) - cmd.add_flag("description", description) - - self._run_cmd(cmd) - - def list_vol_bookmarks(self, vol): - """List all bookmarks of a volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "list-bookmarks"]) - - cmd.set_argument(vol) - - return self._run_cmd(cmd) - - def add_vol_bookmark(self, vol, bm_name, utc_date=None, str_date=None, - bm_type=None): - """Create a new bookmark for a given volume.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "bookmark"]) - - cmd.set_argument(vol) - cmd.add_flag("name", bm_name) - if str_date is not None: - cmd.add_flag("timestamp", str_date) - else: - cmd.add_flag("timestamp", ReduxioAPI._utc_to_cli_date(utc_date)) - cmd.add_flag("type", bm_type) - - return self._run_cmd(cmd) - - def delete_vol_bookmark(self, vol, bm_name): - """Delete a volume's bookmark.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "delete-bookmark"]) - - cmd.set_argument(vol) - cmd.add_flag("name", bm_name) - - return self._run_cmd(cmd) - - # Hosts - - def list_hosts(self): - """List all hosts.""" - return self._run_cmd(RdxApiCmd(cmd_prefix=[HOSTS, LS_COMMAND]))[ - "hosts"] - - def create_host(self, name, iscsi_name, description=None, user_chap=None, - pwd_chap=None): - """Create a new host.""" - cmd = RdxApiCmd(cmd_prefix=[HOSTS, NEW_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("iscsi-name", iscsi_name) - cmd.add_flag("description", description) - cmd.add_flag("user-chap", user_chap) - cmd.add_flag("pwd-chap", pwd_chap) - - return self._run_cmd(cmd) - - def delete_host(self, name): - """Delete an existing host.""" - cmd = RdxApiCmd(cmd_prefix=[HOSTS, DELETE_COMMAND]) - - cmd.set_argument(name) - cmd.force_command() - - return self._run_cmd(cmd) - - def update_host(self, name, new_name=None, description=None, - user_chap=None, pwd_chap=None): - """Update host's attributes.""" - cmd = RdxApiCmd(cmd_prefix=[HOSTS, UPDATE_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("new-name", new_name) - cmd.add_flag("user-chap", user_chap) - cmd.add_flag("pwd-chap", pwd_chap) - cmd.add_flag("description", description) - - return self._run_cmd(cmd) - - # HostGroups - - def list_hostgroups(self): - """List all hostgroups.""" - return self._run_cmd(RdxApiCmd(cmd_prefix=[HG_DIR, LS_COMMAND]))[ - "hostgroups"] - - def create_hostgroup(self, name, description=None): - """Create a new hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, NEW_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("description", description) - - return self._run_cmd(cmd) - - def delete_hostgroup(self, name): - """Delete an existing hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, DELETE_COMMAND]) - - cmd.set_argument(name) - cmd.force_command() - - return self._run_cmd(cmd) - - def update_hostgroup(self, name, new_name=None, description=None): - """Update an existing hostgroup's attributes.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, UPDATE_COMMAND]) - - cmd.set_argument(name) - cmd.add_flag("new-name", new_name) - cmd.add_flag("description", description) - - return self._run_cmd(cmd) - - def list_hosts_in_hostgroup(self, name): - """List all hosts that are part of the given hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, "list-hosts"]) - cmd.set_argument(name) - - return self._run_cmd(cmd) - - def add_host_to_hostgroup(self, name, host_name): - """Join a host to a hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, "add-host"]) - cmd.set_argument(name) - cmd.add_flag("host", host_name) - - return self._run_cmd(cmd) - - def remove_host_from_hostgroup(self, name, host_name): - """Remove a host from a hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, "remove-host"]) - cmd.set_argument(name) - cmd.add_flag("host", host_name) - - return self._run_cmd(cmd) - - def add_hg_bookmark(self, hg_name, bm_name, utc_date=None, str_date=None, - bm_type=None): - """Bookmark all volumes that are assigned to the hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, "add-bookmark"]) - - cmd.set_argument(hg_name) - cmd.add_flag("name", bm_name) - if str_date is not None: - cmd.add_flag("timestamp", str_date) - else: - cmd.add_flag("timestamp", ReduxioAPI._utc_to_cli_date(utc_date)) - cmd.add_flag("type", bm_type) - - return self._run_cmd(cmd) - - # Assignments - - def assign(self, vol_name, host_name=None, hostgroup_name=None, lun=None): - """Create an assignment between a volume to host/hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "assign"]) - - cmd.set_argument(vol_name) - cmd.add_flag("host", host_name) - cmd.add_flag("group", hostgroup_name) - cmd.add_flag("lun", lun) - - return self._run_cmd(cmd) - - def unassign(self, vol_name, host_name=None, hostgroup_name=None): - """Unassign a volume from a host/hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, "unassign"]) - - cmd.set_argument(vol_name) - cmd.add_flag("host", host_name) - cmd.add_flag("group", hostgroup_name) - - return self._run_cmd(cmd) - - def list_assignments(self, vol=None, host=None, hg=None): - """List all assignments for a given volume/host/hostgroup.""" - cmd = RdxApiCmd(cmd_prefix=[VOLUMES, LIST_ASSIGN_CMD]) - if vol is not None: - cmd.set_argument(vol) - elif host is not None: - cmd = RdxApiCmd(cmd_prefix=[HOSTS, LIST_ASSIGN_CMD]) - cmd.set_argument(host) - elif host is not None: - cmd = RdxApiCmd(cmd_prefix=[HG_DIR, LIST_ASSIGN_CMD]) - cmd.set_argument(hg) - - return self._run_cmd(cmd) - - def get_single_assignment(self, vol, host, raise_on_non_exists=True): - """Get a single assignment details between a host and a volume.""" - for assign in self.list_assignments(vol=vol): - if assign["host"] == host: - return assign - if raise_on_non_exists: - raise exception.RdxAPICommandException(_( - "No such assignment vol:%(vol)s, host:%(host)s") % - {'vol': vol, 'host': host} - ) - else: - return None - - # Settings - - def get_settings(self): - """List all Reduxio settings.""" - cli_hash = self._run_cmd( - RdxApiCmd(cmd_prefix=["settings", LS_COMMAND])) - return self._translate_settings_to_hash(cli_hash) - - @staticmethod - def _translate_settings_to_hash(cli_hash): - new_hash = {} - for key, value in cli_hash.items(): - if key == "directories": - continue - if key == "email_recipient_list": - continue - - new_hash[key] = {} - for inter_hash in value: - if "Name" in inter_hash: - new_hash[key][inter_hash["Name"]] = inter_hash["value"] - else: - new_hash[key][inter_hash["name"]] = inter_hash["value"] - return new_hash - - # Statistics - - def get_savings_ratio(self): - """Get current savings ratio.""" - return self._run_cmd(RdxApiCmd(cmd_prefix=["system", "status"]))[0][ - "savings-ratio"] - - def get_current_space_usage(self): - """Get current space usage.""" - cmd = RdxApiCmd(cmd_prefix=["statistics", "space-usage"]) - return self._run_cmd(cmd)[0] diff --git a/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py b/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py deleted file mode 100644 index 0f0373332..000000000 --- a/cinder/volume/drivers/reduxio/rdx_iscsi_driver.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright (c) 2016 Reduxio Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""ISCSI Volume driver for Reduxio.""" -import random -import string - -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -import cinder.interface as cinder_interface -from cinder import utils as cinder_utils -from cinder.volume.drivers.reduxio import rdx_cli_api -from cinder.volume.drivers.san import san - - -# Constants -REDUXIO_NAME_PREFIX_NUMERIC_REPLACEMENT = "a" -REDUXIO_CLI_HOST_RAND_LENGTH = 12 -REDUXIO_CLI_HOST_PREFIX = 'openstack-' -REDUXIO_STORAGE_PROTOCOL = 'iSCSI' -REDUXIO_VENDOR_NAME = 'Reduxio' -AGENT_TYPE_KEY = "agent-type" -AGENT_TYPE_OPENSTACK = "openstack" -EXTERNAL_VOL_ID_KEY = "external-vol-id" -METADATA_KEY = "metadata" -BACKDATE_META_FIELD = "backdate" -RDX_CLI_MAX_VOL_LENGTH = 31 -DRIVER_VERSION = '1.0.1' -HX550_INITIAL_PHYSICAL_CAPACITY = 32 * 1024 -HX550_CAPACITY_LIMIT = 200 * 1024 - -LOG = logging.getLogger(__name__) - - -@cinder_interface.volumedriver -class ReduxioISCSIDriver(san.SanISCSIDriver): - """OpenStack driver to support Reduxio storage systems. - - Version history: - 1.0.0 - Initial version - volume management, snapshots, BackDating(TM). - 1.0.1 - Capacity stats, fixed error handling for volume deletions. - """ - VERSION = '1.0.1' - CI_WIKI_NAME = "Reduxio_HX550_CI" - - # TODO(smcginnis) Remove driver in Queens if CI issues haven't been fixed - SUPPORTED = False - - def __init__(self, *args, **kwargs): - """Initialize Reduxio ISCSI Driver.""" - LOG.info("Initializing Reduxio ISCSI Driver") - super(ReduxioISCSIDriver, self).__init__(*args, **kwargs) - self.rdxApi = None # type: rdx_cli_api.ReduxioAPI - self._stats = {} - - def _check_config(self): - """Ensure that the flags we care about are set.""" - required_config = ['san_ip', 'san_login', 'san_password'] - for attr in required_config: - if not getattr(self.configuration, attr, None): - raise exception.InvalidInput(reason=_('%s is not set.') % attr) - - def do_setup(self, context): - """Set up the driver.""" - self._check_config() - self.rdxApi = rdx_cli_api.ReduxioAPI( - user=self.configuration.san_login, - password=self.configuration.san_password, - host=self.configuration.san_ip) - - # Reduxio entities names (which are also ids) are restricted to at most - # 31 chars. The following function maps cinder unique id to reduxio name. - # Reduxio name also cannot begin with a number, so we replace this number - # with a constant letter. The probability of a uuid conflict is still low. - def _cinder_id_to_rdx(self, cinder_id): - normalized = cinder_id.replace("-", "")[:RDX_CLI_MAX_VOL_LENGTH] - if normalized[0].isalpha(): - return normalized - else: - return REDUXIO_NAME_PREFIX_NUMERIC_REPLACEMENT + normalized[1:] - - # We use Reduxio volume description to represent metadata regarding - # the cinder agent, in order to avoid multi managing the same volume - # from multiple cinder volume. - def _create_vol_managed_description(self, volume): - return AGENT_TYPE_OPENSTACK + "_" + volume["name"] - - # This function parses the cli volume description and returns a dictionary - # containing the managed data (agent, cinder_volume_id) - def _get_managed_info(self, cli_vol): - try: - splited = cli_vol["description"].split("_") - if len(splited) == 0: - return {AGENT_TYPE_KEY: None} - return {AGENT_TYPE_KEY: splited[0], - EXTERNAL_VOL_ID_KEY: splited[1]} - except Exception: - return {AGENT_TYPE_KEY: None} - - def _get_existing_volume_ref_name(self, existing_ref): - """Return the volume name of an existing ref.""" - if 'source-name' in existing_ref: - vol_name = existing_ref['source-name'] - else: - reason = _("Reference must contain source-name.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, - reason=reason) - - return vol_name - - @cinder_utils.trace - def create_volume(self, volume): - """Create a new volume.""" - LOG.info( - "Creating a new volume(%(name)s) with size(%(size)s)", - {'name': volume["name"], 'size': volume["size"]}) - vol_name = self._cinder_id_to_rdx(volume["id"]) - self.rdxApi.create_volume( - name=vol_name, - size=volume["size"], - description=self._create_vol_managed_description(volume) - ) - - @cinder_utils.trace - def manage_existing(self, volume, external_ref): - """Create a new Cinder volume out of an existing Reduxio volume.""" - LOG.info("Manage existing volume(%(cinder_vol)s) " - "from Reduxio Volume(%(rdx_vol)s)", - {'cinder_vol': volume["id"], - 'rdx_vol': external_ref["source-name"]}) - # Get the volume name from the external reference - target_vol_name = self._get_existing_volume_ref_name(external_ref) - - # Get vol info from the volume name obtained from the reference - cli_vol = self.rdxApi.find_volume_by_name(target_vol_name) - managed_info = self._get_managed_info(cli_vol) - - # Check if volume is already managed by OpenStack - if managed_info[AGENT_TYPE_KEY] == AGENT_TYPE_OPENSTACK: - raise exception.ManageExistingAlreadyManaged( - volume_ref=volume['id']) - - # If agent-type is not None then raise exception - if not managed_info[AGENT_TYPE_KEY] is None: - msg = _('Volume should have agent-type set as None.') - raise exception.InvalidVolume(reason=msg) - - new_vol_name = self._cinder_id_to_rdx(volume['id']) - - # edit the volume - self.rdxApi.update_volume( - target_vol_name, - new_name=new_vol_name, - description=self._create_vol_managed_description(volume) - ) - - @cinder_utils.trace - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing volume.""" - target_vol_name = self._get_existing_volume_ref_name(external_ref) - cli_vol = self.rdxApi.find_volume_by_name(target_vol_name) - - return int(cli_vol['size'] / units.Gi) - - @cinder_utils.trace - def unmanage(self, volume): - """Remove the specified volume from Cinder management.""" - LOG.info("Unmanaging volume(%s)", volume["id"]) - vol_name = self._cinder_id_to_rdx(volume['id']) - cli_vol = self.rdxApi.find_volume_by_name(vol_name) - managed_info = self._get_managed_info(cli_vol) - - if managed_info['agent-type'] != AGENT_TYPE_OPENSTACK: - msg = _('Only volumes managed by OpenStack can be unmanaged.') - raise exception.InvalidVolume(reason=msg) - - # update the agent-type to None - self.rdxApi.update_volume(name=vol_name, description="") - - @cinder_utils.trace - def delete_volume(self, volume): - """Delete the specified volume.""" - LOG.info("Deleting volume(%s)", volume["id"]) - try: - self.rdxApi.delete_volume( - name=self._cinder_id_to_rdx(volume["id"])) - except exception.RdxAPICommandException as e: - if "No such volume" not in six.text_type(e): - raise - - @cinder_utils.trace - def create_volume_from_snapshot(self, volume, snapshot): - """Clone volume from snapshot. - - Extend the volume if the size of the volume is more than the snapshot. - """ - LOG.info( - "cloning new volume(%(new_vol)s) from snapshot(%(snapshot)s)," - " src volume(%(src_vol)s)", - {'new_vol': volume["name"], - 'snapshot': snapshot["name"], - 'src_vol': snapshot["volume_name"]} - ) - - parent_name = self._cinder_id_to_rdx(snapshot["volume_id"]) - clone_name = self._cinder_id_to_rdx(volume["id"]) - bookmark_name = self._cinder_id_to_rdx(snapshot["id"]) - - self.rdxApi.clone_volume( - parent_name=parent_name, - clone_name=clone_name, - bookmark_name=bookmark_name, - description=self._create_vol_managed_description(volume) - ) - - if volume['size'] > snapshot['volume_size']: - self.rdxApi.update_volume(name=clone_name, size=volume["size"]) - - @cinder_utils.trace - def create_cloned_volume(self, volume, src_vref): - """Clone volume from existing cinder volume. - - :param volume: The clone volume object. - If the volume 'metadata' field contains a 'backdate' key - (If using Cinder CLI, should be provided by --meta flag), - then we create a clone from the specified time. - The 'backdate' metadata value should be in the format of - Reduxio CLI date: mm/dd/yyyy-hh:mm:ss. - for example: '02/17/2015-11:39:00. - Note: Different timezones might be configured - for Reduxio and OpenStack. - The specified date must be related to Reduxio time settings. - - If meta key 'backdate' was not specified, - then we create a clone from the volume's current state. - :param src_vref: The source volume to clone from - :return: None - """ - LOG.info("cloning new volume(%(clone)s) from src(%(src)s)", - {'clone': volume['name'], 'src': src_vref['name']}) - parent_name = self._cinder_id_to_rdx(src_vref["id"]) - clone_name = self._cinder_id_to_rdx(volume["id"]) - description = self._create_vol_managed_description(volume) - if BACKDATE_META_FIELD in volume[METADATA_KEY]: - LOG.info("Cloning from backdate %s", - volume[METADATA_KEY][BACKDATE_META_FIELD]) - - self.rdxApi.clone_volume( - parent_name=parent_name, - clone_name=clone_name, - description=description, - str_date=volume[METADATA_KEY][BACKDATE_META_FIELD] - ) - else: - LOG.info("Cloning from now") - self.rdxApi.clone_volume( - parent_name=parent_name, - clone_name=clone_name, - description=description - ) - - if src_vref['size'] < volume['size']: - self.rdxApi.update_volume(name=clone_name, size=volume["size"]) - - @cinder_utils.trace - def create_snapshot(self, snapshot): - """Create a snapshot from an existing Cinder volume. - - We use Reduxio manual bookmark to represent a snapshot. - - :param snapshot: The snapshot object. - If the snapshot 'metadata' field contains a 'backdate' key - (If using Cinder CLI, should be provided by --meta flag), - then we create a snapshot from the specified time. - The 'backdate' metadata value should be in the format of - Reduxio CLI date: mm/dd/yyyy-hh:mm:ss. - for example: '02/17/2015-11:39:00'. - Note: Different timezones might be configured - for Reduxio and OpenStack. - The specified date must be related to Reduxio time settings. - - If meta key 'backdate' was not specified, then we create a snapshot - from the volume's current state. - :return: None - """ - LOG.info( - "Creating snapshot(%(snap)s) from volume(%(vol)s)", - {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) - cli_vol_name = self._cinder_id_to_rdx(snapshot['volume_id']) - cli_bookmark_name = self._cinder_id_to_rdx(snapshot['id']) - bookmark_type = "manual" - if BACKDATE_META_FIELD in snapshot[METADATA_KEY]: - self.rdxApi.add_vol_bookmark(vol=cli_vol_name, - bm_name=cli_bookmark_name, - bm_type=bookmark_type, - str_date=snapshot[METADATA_KEY][ - BACKDATE_META_FIELD] - ) - else: - self.rdxApi.add_vol_bookmark(vol=cli_vol_name, - bm_name=cli_bookmark_name, - bm_type=bookmark_type) - - @cinder_utils.trace - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - LOG.info("Deleting snapshot(%(snap)s) from volume(%(vol)s)", - {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) - - volume_name = self._cinder_id_to_rdx(snapshot['volume_id']) - bookmark_name = self._cinder_id_to_rdx(snapshot['id']) - try: - self.rdxApi.delete_vol_bookmark(vol=volume_name, - bm_name=bookmark_name) - except exception.RdxAPICommandException as e: - if "No such bookmark" not in six.text_type(e): - raise - - @cinder_utils.trace - def get_volume_stats(self, refresh=False): - """Get Reduxio Storage attributes.""" - if refresh: - backend_name = self.configuration.safe_get( - 'volume_backend_name') or self.__class__.__name__ - ratio = self.rdxApi.get_savings_ratio() - total = HX550_INITIAL_PHYSICAL_CAPACITY * ratio - - if total > HX550_CAPACITY_LIMIT: - total = HX550_CAPACITY_LIMIT - - current_space_usage = self.rdxApi.get_current_space_usage() - physical_used = current_space_usage["physical_total"] / units.Gi - free = (HX550_INITIAL_PHYSICAL_CAPACITY - physical_used) * ratio - - if free > HX550_CAPACITY_LIMIT: - free = HX550_CAPACITY_LIMIT - - self._stats = { - 'volume_backend_name': backend_name, - 'vendor_name': REDUXIO_VENDOR_NAME, - 'driver_version': DRIVER_VERSION, - 'storage_protocol': REDUXIO_STORAGE_PROTOCOL, - 'consistencygroup_support': False, - 'pools': [{ - "pool_name": backend_name, - "total_capacity_gb": total, - "free_capacity_gb": free, - "reserved_percentage": - self.configuration.reserved_percentage, - "QoS_support": False, - 'multiattach': False - }]} - - return self._stats - - @cinder_utils.trace - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - volume_name = self._cinder_id_to_rdx(volume['id']) - self.rdxApi.update_volume(volume_name, size=new_size) - - @cinder_utils.trace - def _generate_initiator_name(self): - """Generates random host name for reduxio cli.""" - char_set = string.ascii_lowercase - rand_str = ''.join( - random.sample(char_set, REDUXIO_CLI_HOST_RAND_LENGTH)) - return "%s%s" % (REDUXIO_CLI_HOST_PREFIX, rand_str) - - @cinder_utils.trace - def _get_target_portal(self, settings, controller, port): - network = "iscsi_network%s" % port - iscsi_port = six.text_type( - settings["network_configuration"]["iscsi_target_tcp_port"]) - controller_port_key = ("controller_%(controller)s_port_%(port)s" - % {"controller": controller, "port": port}) - return settings[network][controller_port_key] + ":" + iscsi_port - - @cinder_utils.trace - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance.""" - LOG.info( - "Assigning volume(%(vol)s) with initiator(%(initiator)s)", - {'vol': volume['name'], 'initiator': connector['initiator']}) - - initiator_iqn = connector['initiator'] - vol_rdx_name = self._cinder_id_to_rdx(volume["id"]) - initiator_name = None - found = False - - # Get existing cli initiator name by its iqn, or create a new one - # if it doesnt exist - for host in self.rdxApi.list_hosts(): - if host["iscsi_name"] == initiator_iqn: - LOG.info("initiator exists in Reduxio") - found = True - initiator_name = host["name"] - break - if not found: - LOG.info("Initiator doesn't exist in Reduxio, Creating it") - initiator_name = self._generate_initiator_name() - self.rdxApi.create_host(name=initiator_name, - iscsi_name=initiator_iqn) - - existing_assignment = self.rdxApi.get_single_assignment( - vol=vol_rdx_name, host=initiator_name, raise_on_non_exists=False) - - if existing_assignment is None: - # Create assignment between the host and the volume - LOG.info("Creating assignment") - self.rdxApi.assign(vol_rdx_name, host_name=initiator_name) - else: - LOG.debug("Assignment already exists") - - # Query cli settings in order to fill requested output - settings = self.rdxApi.get_settings() - - target_iqn = settings["network_configuration"]["iscsi_target_iqn"] - target_portal = self._get_target_portal(settings, 1, 1) - - if existing_assignment is None: - target_lun = self.rdxApi.get_single_assignment( - vol=vol_rdx_name, - host=initiator_name)["lun"] - else: - target_lun = existing_assignment["lun"] - - properties = { - 'driver_volume_type': 'iscsi', - 'data': { - 'target_discovered': False, - 'discard': False, - 'volume_id': volume['id'], - 'target_iqn': target_iqn, - 'target_portal': target_portal, - 'target_lun': target_lun, - } - } - - # if iscsi_network2 is not available, - # than multipath is disabled (ReduxioVE) - connector_multipath = connector.get("multipath", False) - rdx_multipath = "iscsi_network2" in settings - if rdx_multipath and connector_multipath: - target_portal2 = self._get_target_portal(settings, 2, 1) - target_portal3 = self._get_target_portal(settings, 1, 2) - target_portal4 = self._get_target_portal(settings, 2, 2) - - properties['data']['target_portals'] = [ - target_portal, - target_portal2, - target_portal3, - target_portal4 - ] - # Reduxio is a single iqn storage - properties['data']['target_iqns'] = [target_iqn] * 4 - # Lun num is the same for each path - properties['data']['target_luns'] = [target_lun] * 4 - - LOG.info("Assignment complete. Assignment details: %s", - properties) - - return properties - - @cinder_utils.trace - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance.""" - iqn = connector['initiator'] - LOG.info("Deleting assignment volume(%(vol)s) with " - "initiator(%(initiator)s)", - {'vol': volume['name'], 'initiator': iqn}) - - for cli_host in self.rdxApi.list_hosts(): - if cli_host["iscsi_name"] == iqn: - try: - self.rdxApi.unassign( - self._cinder_id_to_rdx(volume["id"]), - host_name=cli_host["name"] - ) - except exception.RdxAPICommandException as e: - error_msg = six.text_type(e) - if "No such assignment" not in error_msg: - raise - else: - LOG.debug("Assignment doesn't exist") - return - - LOG.warning("Did not find matching reduxio host for initiator %s", - iqn) diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py deleted file mode 100644 index ba43893cf..000000000 --- a/cinder/volume/drivers/remotefs.py +++ /dev/null @@ -1,1712 +0,0 @@ -# Copyright (c) 2012 NetApp, Inc. -# Copyright (c) 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import hashlib -import inspect -import json -import os -import re -import shutil -import tempfile -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import compute -from cinder import coordination -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - - -nas_opts = [ - cfg.StrOpt('nas_host', - default='', - help='IP address or Hostname of NAS system.', - deprecated_name='nas_ip'), - cfg.StrOpt('nas_login', - default='admin', - help='User name to connect to NAS system.'), - cfg.StrOpt('nas_password', - default='', - help='Password to connect to NAS system.', - secret=True), - cfg.PortOpt('nas_ssh_port', - default=22, - help='SSH port to use to connect to NAS system.'), - cfg.StrOpt('nas_private_key', - default='', - help='Filename of private key to use for SSH authentication.'), - cfg.StrOpt('nas_secure_file_operations', - default='auto', - help=('Allow network-attached storage systems to operate in a ' - 'secure environment where root level access is not ' - 'permitted. If set to False, access is as the root user ' - 'and insecure. If set to True, access is not as root. ' - 'If set to auto, a check is done to determine if this is ' - 'a new installation: True is used if so, otherwise ' - 'False. Default is auto.')), - cfg.StrOpt('nas_secure_file_permissions', - default='auto', - help=('Set more secure file permissions on network-attached ' - 'storage volume files to restrict broad other/world ' - 'access. If set to False, volumes are created with open ' - 'permissions. If set to True, volumes are created with ' - 'permissions for the cinder user and group (660). If ' - 'set to auto, a check is done to determine if ' - 'this is a new installation: True is used if so, ' - 'otherwise False. Default is auto.')), - cfg.StrOpt('nas_share_path', - default='', - help=('Path to the share to use for storing Cinder volumes. ' - 'For example: "/srv/export1" for an NFS server export ' - 'available at 10.0.5.10:/srv/export1 .')), - cfg.StrOpt('nas_mount_options', - help=('Options used to mount the storage backend file system ' - 'where Cinder volumes are stored.')), -] - -volume_opts = [ - cfg.StrOpt('nas_volume_prov_type', - default='thin', - choices=['thin', 'thick'], - help=('Provisioning type that will be used when ' - 'creating volumes.')), -] - -CONF = cfg.CONF -CONF.register_opts(nas_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -# TODO(bluex): remove when drivers stop using it -def locked_volume_id_operation(f, external=False): - """Lock decorator for volume operations. - - Takes a named lock prior to executing the operation. The lock is named - with the id of the volume. This lock can be used by driver methods - to prevent conflicts with other operations modifying the same volume. - - May be applied to methods that take a 'volume' or 'snapshot' argument. - """ - - def lvo_inner1(inst, *args, **kwargs): - lock_tag = inst.driver_prefix - call_args = inspect.getcallargs(f, inst, *args, **kwargs) - - if call_args.get('volume'): - volume_id = call_args['volume'].id - elif call_args.get('snapshot'): - volume_id = call_args['snapshot'].volume.id - else: - err_msg = _('The decorated method must accept either a volume or ' - 'a snapshot object') - raise exception.VolumeBackendAPIException(data=err_msg) - - @utils.synchronized('%s-%s' % (lock_tag, volume_id), - external=external) - def lvo_inner2(): - return f(inst, *args, **kwargs) - return lvo_inner2() - return lvo_inner1 - - -class RemoteFSDriver(driver.BaseVD): - """Common base for drivers that work like NFS.""" - - driver_volume_type = None - driver_prefix = 'remotefs' - volume_backend_name = None - vendor_name = 'Open Source' - SHARE_FORMAT_REGEX = r'.+:/.+' - - def __init__(self, *args, **kwargs): - super(RemoteFSDriver, self).__init__(*args, **kwargs) - self.shares = {} - self._mounted_shares = [] - self._execute_as_root = True - self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None) - self._supports_encryption = False - - # We let the drivers inheriting this specify - # whether thin provisioning is supported or not. - self._thin_provisioning_support = False - - if self.configuration: - self.configuration.append_config_values(nas_opts) - self.configuration.append_config_values(volume_opts) - - def check_for_setup_error(self): - """Just to override parent behavior.""" - pass - - @utils.trace - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - data = {'export': volume.provider_location, - 'name': volume.name} - if volume.provider_location in self.shares: - data['options'] = self.shares[volume.provider_location] - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data, - 'mount_point_base': self._get_mount_point_base() - } - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(RemoteFSDriver, self).do_setup(context) - - # Validate the settings for our secure file options. - self.configuration.nas_secure_file_permissions = \ - self.configuration.nas_secure_file_permissions.lower() - self.configuration.nas_secure_file_operations = \ - self.configuration.nas_secure_file_operations.lower() - valid_secure_opts = ['auto', 'true', 'false'] - secure_options = {'nas_secure_file_permissions': - self.configuration.nas_secure_file_permissions, - 'nas_secure_file_operations': - self.configuration.nas_secure_file_operations} - - LOG.debug('NAS config: %s', secure_options) - for opt_name, opt_value in secure_options.items(): - if opt_value not in valid_secure_opts: - err_parms = {'name': opt_name, 'value': opt_value} - msg = _("NAS config '%(name)s=%(value)s' invalid. Must be " - "'auto', 'true', or 'false'") % err_parms - LOG.error(msg) - raise exception.InvalidConfigurationValue(msg) - - def _get_provisioned_capacity(self): - """Returns the provisioned capacity. - - Get the sum of sizes of volumes, snapshots and any other - files on the mountpoint. - """ - provisioned_size = 0.0 - for share in self.shares.keys(): - mount_path = self._get_mount_point_for_share(share) - out, _ = self._execute('du', '--bytes', mount_path, - run_as_root=self._execute_as_root) - provisioned_size += int(out.split()[0]) - return round(provisioned_size / units.Gi, 2) - - def _get_mount_point_base(self): - """Returns the mount point base for the remote fs. - - This method facilitates returning mount point base - for the specific remote fs. Override this method - in the respective driver to return the entry to be - used while attach/detach using brick in cinder. - If not overridden then it returns None without - raising exception to continue working for cases - when not used with brick. - """ - LOG.debug("Driver specific implementation needs to return" - " mount_point_base.") - return None - - @staticmethod - def _validate_state(current_state, - acceptable_states, - obj_description='volume', - invalid_exc=exception.InvalidVolume): - if current_state not in acceptable_states: - message = _('Invalid %(obj_description)s state. ' - 'Acceptable states for this operation: ' - '%(acceptable_states)s. ' - 'Current %(obj_description)s state: ' - '%(current_state)s.') - raise invalid_exc( - message=message % - dict(obj_description=obj_description, - acceptable_states=acceptable_states, - current_state=current_state)) - - @utils.trace - def create_volume(self, volume): - """Creates a volume. - - :param volume: volume reference - :returns: provider_location update dict for database - """ - - if volume.encryption_key_id and not self._supports_encryption: - message = _("Encryption is not yet supported.") - raise exception.VolumeDriverException(message=message) - - LOG.debug('Creating volume %(vol)s', {'vol': volume.id}) - self._ensure_shares_mounted() - - volume.provider_location = self._find_share(volume) - - LOG.info('casted to %s', volume.provider_location) - - self._do_create_volume(volume) - - return {'provider_location': volume.provider_location} - - def _do_create_volume(self, volume): - """Create a volume on given remote share. - - :param volume: volume reference - """ - volume_path = self.local_path(volume) - volume_size = volume.size - - if getattr(self.configuration, - self.driver_prefix + '_qcow2_volumes', False): - # QCOW2 volumes are inherently sparse, so this setting - # will override the _sparsed_volumes setting. - self._create_qcow2_file(volume_path, volume_size) - elif getattr(self.configuration, - self.driver_prefix + '_sparsed_volumes', False): - self._create_sparsed_file(volume_path, volume_size) - else: - self._create_regular_file(volume_path, volume_size) - - self._set_rw_permissions(volume_path) - - def _ensure_shares_mounted(self): - """Look for remote shares in the flags and mount them locally.""" - mounted_shares = [] - - self._load_shares_config(getattr(self.configuration, - self.driver_prefix + - '_shares_config')) - - for share in self.shares.keys(): - try: - self._ensure_share_mounted(share) - mounted_shares.append(share) - except Exception as exc: - LOG.error('Exception during mounting %s', exc) - - self._mounted_shares = mounted_shares - - LOG.debug('Available shares %s', self._mounted_shares) - - @utils.trace - def delete_volume(self, volume): - """Deletes a logical volume. - - :param volume: volume reference - """ - - LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s', - {'vol': volume.id, 'loc': volume.provider_location}) - if not volume.provider_location: - LOG.warning('Volume %s does not have ' - 'provider_location specified, ' - 'skipping', volume.name) - return - - self._ensure_share_mounted(volume.provider_location) - - mounted_path = self.local_path(volume) - - self._delete(mounted_path) - - def ensure_export(self, ctx, volume): - """Synchronously recreates an export for a logical volume.""" - self._ensure_share_mounted(volume.provider_location) - - def create_export(self, ctx, volume, connector): - """Exports the volume. - - Can optionally return a dictionary of changes - to the volume object to be persisted. - """ - pass - - def remove_export(self, ctx, volume): - """Removes an export for a logical volume.""" - pass - - def delete_snapshot(self, snapshot): - """Delete snapshot. - - Do nothing for this driver, but allow manager to handle deletion - of snapshot in error state. - """ - pass - - def _delete(self, path): - # Note(lpetrut): this method is needed in order to provide - # interoperability with Windows as it will be overridden. - self._execute('rm', '-f', path, run_as_root=self._execute_as_root) - - def _create_sparsed_file(self, path, size): - """Creates a sparse file of a given size in GiB.""" - self._execute('truncate', '-s', '%sG' % size, - path, run_as_root=self._execute_as_root) - - def _create_regular_file(self, path, size): - """Creates a regular file of given size in GiB.""" - - block_size_mb = 1 - block_count = size * units.Gi // (block_size_mb * units.Mi) - - self._execute('dd', 'if=/dev/zero', 'of=%s' % path, - 'bs=%dM' % block_size_mb, - 'count=%d' % block_count, - run_as_root=self._execute_as_root) - - def _create_qcow2_file(self, path, size_gb): - """Creates a QCOW2 file of a given size in GiB.""" - - self._execute('qemu-img', 'create', '-f', 'qcow2', - '-o', 'preallocation=metadata', - path, str(size_gb * units.Gi), - run_as_root=self._execute_as_root) - - def _set_rw_permissions(self, path): - """Sets access permissions for given NFS path. - - Volume file permissions are set based upon the value of - secure_file_permissions: 'true' sets secure access permissions and - 'false' sets more open (insecure) access permissions. - - :param path: the volume file path. - """ - if self.configuration.nas_secure_file_permissions == 'true': - permissions = '660' - LOG.debug('File path %(path)s is being set with permissions: ' - '%(permissions)s', - {'path': path, 'permissions': permissions}) - else: - permissions = 'ugo+rw' - LOG.warning('%(path)s is being set with open permissions: ' - '%(perm)s', {'path': path, 'perm': permissions}) - - self._execute('chmod', permissions, path, - run_as_root=self._execute_as_root) - - def _set_rw_permissions_for_all(self, path): - """Sets 666 permissions for the path.""" - self._execute('chmod', 'ugo+rw', path, - run_as_root=self._execute_as_root) - - def _set_rw_permissions_for_owner(self, path): - """Sets read-write permissions to the owner for the path.""" - self._execute('chmod', 'u+rw', path, - run_as_root=self._execute_as_root) - - def local_path(self, volume): - """Get volume path (mounted locally fs path) for given volume. - - :param volume: volume reference - """ - remotefs_share = volume.provider_location - return os.path.join(self._get_mount_point_for_share(remotefs_share), - volume.name) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - - image_utils.fetch_to_raw(context, - image_service, - image_id, - self.local_path(volume), - self.configuration.volume_dd_blocksize, - size=volume.size, - run_as_root=self._execute_as_root) - - # NOTE (leseb): Set the virtual size of the image - # the raw conversion overwrote the destination file - # (which had the correct size) - # with the fetched glance image size, - # thus the initial 'size' parameter is not honored - # this sets the size to the one asked in the first place by the user - # and then verify the final virtual size - image_utils.resize_image(self.local_path(volume), volume.size, - run_as_root=self._execute_as_root) - - data = image_utils.qemu_img_info(self.local_path(volume), - run_as_root=self._execute_as_root) - virt_size = data.virtual_size // units.Gi - if virt_size != volume.size: - raise exception.ImageUnacceptable( - image_id=image_id, - reason=(_("Expected volume size was %d") % volume.size) - + (_(" but size is now %d") % virt_size)) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_utils.upload_volume(context, - image_service, - image_meta, - self.local_path(volume), - run_as_root=self._execute_as_root) - - def _read_config_file(self, config_file): - # Returns list of lines in file - with open(config_file) as f: - return f.readlines() - - def _load_shares_config(self, share_file=None): - self.shares = {} - - if all((self.configuration.nas_host, - self.configuration.nas_share_path)): - LOG.debug('Using nas_host and nas_share_path configuration.') - - nas_host = self.configuration.nas_host - nas_share_path = self.configuration.nas_share_path - - share_address = '%s:%s' % (nas_host, nas_share_path) - - if not re.match(self.SHARE_FORMAT_REGEX, share_address): - msg = (_("Share %s ignored due to invalid format. Must " - "be of form address:/export. Please check the " - "nas_host and nas_share_path settings."), - share_address) - raise exception.InvalidConfigurationValue(msg) - - self.shares[share_address] = self.configuration.nas_mount_options - - elif share_file is not None: - LOG.debug('Loading shares from %s.', share_file) - - for share in self._read_config_file(share_file): - # A configuration line may be either: - # host:/vol_name - # or - # host:/vol_name -o options=123,rw --other - if not share.strip(): - # Skip blank or whitespace-only lines - continue - if share.startswith('#'): - continue - - share_info = share.split(' ', 1) - # results in share_info = - # [ 'address:/vol', '-o options=123,rw --other' ] - - share_address = share_info[0].strip() - # Replace \040 with a space, to support paths with spaces - share_address = share_address.replace("\\040", " ") - share_opts = None - if len(share_info) > 1: - share_opts = share_info[1].strip() - - if not re.match(self.SHARE_FORMAT_REGEX, share_address): - LOG.error("Share %s ignored due to invalid format. " - "Must be of form address:/export.", - share_address) - continue - - self.shares[share_address] = share_opts - - LOG.debug("shares loaded: %s", self.shares) - - def _get_mount_point_for_share(self, path): - raise NotImplementedError() - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - pass - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, update the stats first. - """ - if refresh or not self._stats: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.volume_backend_name - data['vendor_name'] = 'Open Source' - data['driver_version'] = self.get_version() - data['storage_protocol'] = self.driver_volume_type - - self._ensure_shares_mounted() - - global_capacity = 0 - global_free = 0 - for share in self._mounted_shares: - capacity, free, used = self._get_capacity_info(share) - global_capacity += capacity - global_free += free - - data['total_capacity_gb'] = global_capacity / float(units.Gi) - data['free_capacity_gb'] = global_free / float(units.Gi) - data['reserved_percentage'] = self.configuration.reserved_percentage - data['QoS_support'] = False - self._stats = data - - def _get_capacity_info(self, share): - raise NotImplementedError() - - def _find_share(self, volume): - raise NotImplementedError() - - def _ensure_share_mounted(self, share): - raise NotImplementedError() - - def secure_file_operations_enabled(self): - """Determine if driver is operating in Secure File Operations mode. - - The Cinder Volume driver needs to query if this driver is operating - in a secure file mode; check our nas_secure_file_operations flag. - """ - if self.configuration.nas_secure_file_operations == 'true': - return True - return False - - def set_nas_security_options(self, is_new_cinder_install): - """Determine the setting to use for Secure NAS options. - - This method must be overridden by child wishing to use secure - NAS file operations. This base method will set the NAS security - options to false. - """ - doc_html = ("http://docs.openstack.org/admin-guide" - "/blockstorage_nfs_backend.html") - self.configuration.nas_secure_file_operations = 'false' - LOG.warning("The NAS file operations will be run as root: " - "allowing root level access at the storage backend. " - "This is considered an insecure NAS environment. " - "Please see %s for information on a secure NAS " - "configuration.", - doc_html) - self.configuration.nas_secure_file_permissions = 'false' - LOG.warning("The NAS file permissions mode will be 666 (allowing " - "other/world read & write access). This is considered " - "an insecure NAS environment. Please see %s for " - "information on a secure NFS configuration.", - doc_html) - - def _determine_nas_security_option_setting(self, nas_option, mount_point, - is_new_cinder_install): - """Determine NAS security option setting when 'auto' is assigned. - - This method determines the final 'true'/'false' setting of an NAS - security option when the default value of 'auto' has been detected. - If the nas option isn't 'auto' then its current value is used. - - :param nas_option: The NAS security option value loaded from config. - :param mount_point: Mount where indicator file is written. - :param is_new_cinder_install: boolean for new Cinder installation. - :return string: 'true' or 'false' for new option setting. - """ - if nas_option == 'auto': - # For auto detection, we first check to see if we have been - # through this process before by checking for the existence of - # the Cinder secure environment indicator file. - file_name = '.cinderSecureEnvIndicator' - file_path = os.path.join(mount_point, file_name) - if os.path.isfile(file_path): - nas_option = 'true' - LOG.info('Cinder secure environment ' - 'indicator file exists.') - else: - # The indicator file does not exist. If it is a new - # installation, set to 'true' and create the indicator file. - if is_new_cinder_install: - nas_option = 'true' - try: - with open(file_path, 'w') as fh: - fh.write('Detector file for Cinder secure ' - 'environment usage.\n') - fh.write('Do not delete this file.\n') - - # Set the permissions on our special marker file to - # protect from accidental removal (owner write only). - self._execute('chmod', '640', file_path, - run_as_root=self._execute_as_root) - LOG.info('New Cinder secure environment indicator' - ' file created at path %s.', file_path) - except IOError as err: - LOG.error('Failed to created Cinder secure ' - 'environment indicator file: %s', - err) - else: - # For existing installs, we default to 'false'. The - # admin can always set the option at the driver config. - nas_option = 'false' - - return nas_option - - -class RemoteFSSnapDriverBase(RemoteFSDriver): - """Base class for remotefs drivers implementing qcow2 snapshots. - - Driver must implement: - _local_volume_dir(self, volume) - """ - - _VALID_IMAGE_EXTENSIONS = [] - # The following flag may be overriden by the concrete drivers in order - # to avoid using temporary volume snapshots when creating volume clones, - # when possible. - - _always_use_temp_snap_when_cloning = True - - def __init__(self, *args, **kwargs): - self._remotefsclient = None - self.base = None - self._nova = None - super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs) - - def do_setup(self, context): - super(RemoteFSSnapDriverBase, self).do_setup(context) - - self._nova = compute.API() - - def _local_volume_dir(self, volume): - share = volume.provider_location - local_dir = self._get_mount_point_for_share(share) - return local_dir - - def _local_path_volume(self, volume): - path_to_disk = os.path.join( - self._local_volume_dir(volume), - volume.name) - - return path_to_disk - - def _get_new_snap_path(self, snapshot): - vol_path = self.local_path(snapshot.volume) - snap_path = '%s.%s' % (vol_path, snapshot.id) - return snap_path - - def _local_path_volume_info(self, volume): - return '%s%s' % (self.local_path(volume), '.info') - - def _read_file(self, filename): - """This method is to make it easier to stub out code for testing. - - Returns a string representing the contents of the file. - """ - - with open(filename, 'r') as f: - return f.read() - - def _write_info_file(self, info_path, snap_info): - if 'active' not in snap_info.keys(): - msg = _("'active' must be present when writing snap_info.") - raise exception.RemoteFSException(msg) - - with open(info_path, 'w') as f: - json.dump(snap_info, f, indent=1, sort_keys=True) - - def _qemu_img_info_base(self, path, volume_name, basedir, - run_as_root=False): - """Sanitize image_utils' qemu_img_info. - - This code expects to deal only with relative filenames. - """ - - run_as_root = run_as_root or self._execute_as_root - - info = image_utils.qemu_img_info(path, - run_as_root=run_as_root) - if info.image: - info.image = os.path.basename(info.image) - if info.backing_file: - if self._VALID_IMAGE_EXTENSIONS: - valid_ext = r'(\.(%s))?' % '|'.join( - self._VALID_IMAGE_EXTENSIONS) - else: - valid_ext = '' - - backing_file_template = \ - "(%(basedir)s/[0-9a-f]+/)?%" \ - "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % { - 'basedir': basedir, - 'volname': volume_name, - 'valid_ext': valid_ext, - } - if not re.match(backing_file_template, info.backing_file, - re.IGNORECASE): - msg = _("File %(path)s has invalid backing file " - "%(bfile)s, aborting.") % {'path': path, - 'bfile': info.backing_file} - raise exception.RemoteFSException(msg) - - info.backing_file = os.path.basename(info.backing_file) - - return info - - def _qemu_img_info(self, path, volume_name): - raise NotImplementedError() - - def _img_commit(self, path): - # TODO(eharney): this is not using the correct permissions for - # NFS snapshots - # It needs to run as root for volumes attached to instances, but - # does not when in secure mode. - self._execute('qemu-img', 'commit', path, - run_as_root=self._execute_as_root) - self._delete(path) - - def _rebase_img(self, image, backing_file, volume_format): - # qemu-img create must run as root, because it reads from the - # backing file, which will be owned by qemu:qemu if attached to an - # instance. - # TODO(erlon): Sanity check this. - self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image, - '-F', volume_format, run_as_root=self._execute_as_root) - - def _read_info_file(self, info_path, empty_if_missing=False): - """Return dict of snapshot information. - - :param: info_path: path to file - :param: empty_if_missing: True=return empty dict if no file - """ - - if not os.path.exists(info_path): - if empty_if_missing is True: - return {} - - return json.loads(self._read_file(info_path)) - - def _get_higher_image_path(self, snapshot): - volume = snapshot.volume - info_path = self._local_path_volume_info(volume) - snap_info = self._read_info_file(info_path) - - snapshot_file = snap_info[snapshot.id] - active_file = self.get_active_image_from_info(volume) - active_file_path = os.path.join(self._local_volume_dir(volume), - active_file) - backing_chain = self._get_backing_chain_for_path( - volume, active_file_path) - higher_file = next((os.path.basename(f['filename']) - for f in backing_chain - if utils.paths_normcase_equal( - f.get('backing-filename', ''), - snapshot_file)), - None) - return higher_file - - def _get_backing_chain_for_path(self, volume, path): - """Returns list of dicts containing backing-chain information. - - Includes 'filename', and 'backing-filename' for each - applicable entry. - - Consider converting this to use --backing-chain and --output=json - when environment supports qemu-img 1.5.0. - - :param volume: volume reference - :param path: path to image file at top of chain - - """ - - output = [] - - info = self._qemu_img_info(path, volume.name) - new_info = {} - new_info['filename'] = os.path.basename(path) - new_info['backing-filename'] = info.backing_file - - output.append(new_info) - - while new_info['backing-filename']: - filename = new_info['backing-filename'] - path = os.path.join(self._local_volume_dir(volume), filename) - info = self._qemu_img_info(path, volume.name) - backing_filename = info.backing_file - new_info = {} - new_info['filename'] = filename - new_info['backing-filename'] = backing_filename - - output.append(new_info) - - return output - - def _get_hash_str(self, base_str): - """Return a string that represents hash of base_str. - - Returns string in a hex format. - """ - if isinstance(base_str, six.text_type): - base_str = base_str.encode('utf-8') - return hashlib.md5(base_str).hexdigest() - - def _get_mount_point_for_share(self, share): - """Return mount point for share. - - :param share: example 172.18.194.100:/var/fs - """ - return self._remotefsclient.get_mount_point(share) - - def _get_available_capacity(self, share): - """Calculate available space on the share. - - :param share: example 172.18.194.100:/var/fs - """ - mount_point = self._get_mount_point_for_share(share) - - out, _ = self._execute('df', '--portability', '--block-size', '1', - mount_point, - run_as_root=self._execute_as_root) - out = out.splitlines()[1] - - size = int(out.split()[1]) - available = int(out.split()[3]) - - return available, size - - def _get_capacity_info(self, remotefs_share): - available, size = self._get_available_capacity(remotefs_share) - return size, available, size - available - - def _get_mount_point_base(self): - return self.base - - def _ensure_share_writable(self, path): - """Ensure that the Cinder user can write to the share. - - If not, raise an exception. - - :param path: path to test - :raises: RemoteFSException - :returns: None - """ - - prefix = '.cinder-write-test-' + str(os.getpid()) + '-' - - try: - tempfile.NamedTemporaryFile(prefix=prefix, dir=path) - except OSError: - msg = _('Share at %(dir)s is not writable by the ' - 'Cinder volume service. Snapshot operations will not be ' - 'supported.') % {'dir': path} - raise exception.RemoteFSException(msg) - - def _copy_volume_to_image(self, context, volume, image_service, - image_meta): - """Copy the volume to the specified image.""" - - # If snapshots exist, flatten to a temporary image, and upload it - - active_file = self.get_active_image_from_info(volume) - active_file_path = os.path.join(self._local_volume_dir(volume), - active_file) - info = self._qemu_img_info(active_file_path, volume.name) - backing_file = info.backing_file - - root_file_fmt = info.file_format - - tmp_params = { - 'prefix': '%s.temp_image.%s' % (volume.id, image_meta['id']), - 'suffix': '.img' - } - with image_utils.temporary_file(**tmp_params) as temp_path: - if backing_file or (root_file_fmt != 'raw'): - # Convert due to snapshots - # or volume data not being stored in raw format - # (upload_volume assumes raw format input) - image_utils.convert_image(active_file_path, temp_path, 'raw', - run_as_root=self._execute_as_root) - upload_path = temp_path - else: - upload_path = active_file_path - - image_utils.upload_volume(context, - image_service, - image_meta, - upload_path, - run_as_root=self._execute_as_root) - - def get_active_image_from_info(self, volume): - """Returns filename of the active image from the info file.""" - - info_file = self._local_path_volume_info(volume) - - snap_info = self._read_info_file(info_file, empty_if_missing=True) - - if not snap_info: - # No info file = no snapshots exist - vol_path = os.path.basename(self.local_path(volume)) - return vol_path - - return snap_info['active'] - - def _local_path_active_image(self, volume): - active_fname = self.get_active_image_from_info(volume) - vol_dir = self._local_volume_dir(volume) - - active_fpath = os.path.join(vol_dir, active_fname) - return active_fpath - - def _snapshots_exist(self, volume): - if not volume.provider_location: - return False - - active_fpath = self._local_path_active_image(volume) - base_vol_path = self.local_path(volume) - - return not utils.paths_normcase_equal(active_fpath, base_vol_path) - - def _create_cloned_volume(self, volume, src_vref): - LOG.info('Cloning volume %(src)s to volume %(dst)s', - {'src': src_vref.id, - 'dst': volume.id}) - - acceptable_states = ['available', 'backing-up', 'downloading'] - self._validate_state(src_vref.status, - acceptable_states, - obj_description='source volume') - - volume_name = CONF.volume_name_template % volume.id - - # Create fake volume and snapshot objects - vol_attrs = ['provider_location', 'size', 'id', 'name', 'status', - 'volume_type', 'metadata'] - Volume = collections.namedtuple('Volume', vol_attrs) - - volume_info = Volume(provider_location=src_vref.provider_location, - size=src_vref.size, - id=volume.id, - name=volume_name, - status=src_vref.status, - volume_type=src_vref.volume_type, - metadata=src_vref.metadata) - - if (self._always_use_temp_snap_when_cloning or - self._snapshots_exist(src_vref)): - snap_attrs = ['volume_name', 'volume_size', 'name', - 'volume_id', 'id', 'volume'] - Snapshot = collections.namedtuple('Snapshot', snap_attrs) - - temp_snapshot = Snapshot(volume_name=volume_name, - volume_size=src_vref.size, - name='clone-snap-%s' % src_vref.id, - volume_id=src_vref.id, - id='tmp-snap-%s' % src_vref.id, - volume=src_vref) - - self._create_snapshot(temp_snapshot) - try: - self._copy_volume_from_snapshot(temp_snapshot, - volume_info, - volume.size) - - finally: - self._delete_snapshot(temp_snapshot) - else: - self._copy_volume_image(self.local_path(src_vref), - self.local_path(volume_info)) - self._extend_volume(volume_info, volume.size) - - return {'provider_location': src_vref.provider_location} - - def _copy_volume_image(self, src_path, dest_path): - shutil.copyfile(src_path, dest_path) - self._set_rw_permissions(dest_path) - - def _delete_stale_snapshot(self, snapshot): - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - - snapshot_file = snap_info[snapshot.id] - active_file = self.get_active_image_from_info(snapshot.volume) - snapshot_path = os.path.join( - self._local_volume_dir(snapshot.volume), snapshot_file) - if utils.paths_normcase_equal(snapshot_file, active_file): - return - - LOG.info('Deleting stale snapshot: %s', snapshot.id) - self._delete(snapshot_path) - del(snap_info[snapshot.id]) - self._write_info_file(info_path, snap_info) - - def _delete_snapshot(self, snapshot): - """Delete a snapshot. - - If volume status is 'available', delete snapshot here in Cinder - using qemu-img. - - If volume status is 'in-use', calculate what qcow2 files need to - merge, and call to Nova to perform this operation. - - :raises: InvalidVolume if status not acceptable - :raises: RemoteFSException(msg) if operation fails - :returns: None - - """ - - LOG.debug('Deleting %(type)s snapshot %(snap)s of volume %(vol)s', - {'snap': snapshot.id, 'vol': snapshot.volume.id, - 'type': ('online' if snapshot.volume.status == 'in-use' - else 'offline')}) - - volume_status = snapshot.volume.status - acceptable_states = ['available', 'in-use', 'backing-up', 'deleting', - 'downloading'] - self._validate_state(volume_status, acceptable_states) - - vol_path = self._local_volume_dir(snapshot.volume) - self._ensure_share_writable(vol_path) - - # Determine the true snapshot file for this snapshot - # based on the .info file - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path, empty_if_missing=True) - - if snapshot.id not in snap_info: - # If snapshot info file is present, but snapshot record does not - # exist, do not attempt to delete. - # (This happens, for example, if snapshot_create failed due to lack - # of permission to write to the share.) - LOG.info('Snapshot record for %s is not present, allowing ' - 'snapshot_delete to proceed.', snapshot.id) - return - - snapshot_file = snap_info[snapshot.id] - LOG.debug('snapshot_file for this snap is: %s', snapshot_file) - snapshot_path = os.path.join( - self._local_volume_dir(snapshot.volume), - snapshot_file) - - snapshot_path_img_info = self._qemu_img_info( - snapshot_path, - snapshot.volume.name) - - base_file = snapshot_path_img_info.backing_file - if base_file is None: - # There should always be at least the original volume - # file as base. - LOG.warning('No backing file found for %s, allowing ' - 'snapshot to be deleted.', snapshot_path) - - # Snapshot may be stale, so just delete it and update the - # info file instead of blocking - return self._delete_stale_snapshot(snapshot) - - base_path = os.path.join(vol_path, base_file) - base_file_img_info = self._qemu_img_info(base_path, - snapshot.volume.name) - - # Find what file has this as its backing file - active_file = self.get_active_image_from_info(snapshot.volume) - - if volume_status == 'in-use': - # Online delete - context = snapshot._context - - new_base_file = base_file_img_info.backing_file - - base_id = None - for key, value in snap_info.items(): - if utils.paths_normcase_equal(value, - base_file) and key != 'active': - base_id = key - break - if base_id is None: - # This means we are deleting the oldest snapshot - LOG.debug('No %(base_id)s found for %(file)s', - {'base_id': 'base_id', 'file': snapshot_file}) - - online_delete_info = { - 'active_file': active_file, - 'snapshot_file': snapshot_file, - 'base_file': base_file, - 'base_id': base_id, - 'new_base_file': new_base_file - } - - return self._delete_snapshot_online(context, - snapshot, - online_delete_info) - - if utils.paths_normcase_equal(snapshot_file, active_file): - # There is no top file - # T0 | T1 | - # base | snapshot_file | None - # (guaranteed to| (being deleted, | - # exist) | committed down) | - - self._img_commit(snapshot_path) - # Active file has changed - snap_info['active'] = base_file - else: - # T0 | T1 | T2 | T3 - # base | snapshot_file | higher_file | highest_file - # (guaranteed to | (being deleted, | (guaranteed to | (may exist) - # exist, not | committed down) | exist, needs | - # used here) | | ptr update) | - - # This file is guaranteed to exist since we aren't operating on - # the active file. - higher_file = self._get_higher_image_path(snapshot) - if higher_file is None: - msg = _('No file found with %s as backing file.') %\ - snapshot_file - raise exception.RemoteFSException(msg) - - higher_id = next((i for i in snap_info - if utils.paths_normcase_equal(snap_info[i], - higher_file) - and i != 'active'), - None) - if higher_id is None: - msg = _('No snap found with %s as backing file.') %\ - higher_file - raise exception.RemoteFSException(msg) - - self._img_commit(snapshot_path) - - higher_file_path = os.path.join(vol_path, higher_file) - base_file_fmt = base_file_img_info.file_format - self._rebase_img(higher_file_path, base_file, base_file_fmt) - - # Remove snapshot_file from info - del(snap_info[snapshot.id]) - self._write_info_file(info_path, snap_info) - - def _create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - Snapshot must not be the active snapshot. (offline) - """ - - LOG.debug('Creating volume %(vol)s from snapshot %(snap)s', - {'vol': volume.id, 'snap': snapshot.id}) - - if snapshot.status != 'available': - msg = _('Snapshot status must be "available" to clone. ' - 'But is: %(status)s') % {'status': snapshot.status} - - raise exception.InvalidSnapshot(msg) - - self._ensure_shares_mounted() - - volume.provider_location = self._find_share(volume) - - self._do_create_volume(volume) - - self._copy_volume_from_snapshot(snapshot, - volume, - volume.size) - - return {'provider_location': volume.provider_location} - - def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): - raise NotImplementedError() - - def _do_create_snapshot(self, snapshot, backing_filename, - new_snap_path): - """Create a QCOW2 file backed by another file. - - :param snapshot: snapshot reference - :param backing_filename: filename of file that will back the - new qcow2 file - :param new_snap_path: filename of new qcow2 file - """ - backing_path_full_path = os.path.join( - self._local_volume_dir(snapshot.volume), - backing_filename) - - info = self._qemu_img_info(backing_path_full_path, - snapshot.volume.name) - backing_fmt = info.file_format - - command = ['qemu-img', 'create', '-f', 'qcow2', '-o', - 'backing_file=%s,backing_fmt=%s' % - (backing_path_full_path, backing_fmt), - new_snap_path, - "%dG" % snapshot.volume.size] - self._execute(*command, run_as_root=self._execute_as_root) - - command = ['qemu-img', 'rebase', '-u', - '-b', backing_filename, - '-F', backing_fmt, - new_snap_path] - - # qemu-img rebase must run as root for the same reasons as above - self._execute(*command, run_as_root=self._execute_as_root) - - self._set_rw_permissions(new_snap_path) - - # if in secure mode, chown new file - if self.secure_file_operations_enabled(): - ref_file = backing_path_full_path - log_msg = 'Setting permissions: %(file)s -> %(user)s:%(group)s' % { - 'file': ref_file, 'user': os.stat(ref_file).st_uid, - 'group': os.stat(ref_file).st_gid} - LOG.debug(log_msg) - command = ['chown', - '--reference=%s' % ref_file, - new_snap_path] - self._execute(*command, run_as_root=self._execute_as_root) - - def _create_snapshot(self, snapshot): - """Create a snapshot. - - If volume is attached, call to Nova to create snapshot, providing a - qcow2 file. Cinder creates and deletes qcow2 files, but Nova is - responsible for transitioning the VM between them and handling live - transfers of data between files as required. - - If volume is detached, create locally with qemu-img. Cinder handles - manipulation of qcow2 files. - - A file named volume-.info is stored with the volume - data and is a JSON table which contains a mapping between - Cinder snapshot UUIDs and filenames, as these associations - will change as snapshots are deleted. - - - Basic snapshot operation: - - 1. Initial volume file: - volume-1234 - - 2. Snapshot created: - volume-1234 <- volume-1234.aaaa - - volume-1234.aaaa becomes the new "active" disk image. - If the volume is not attached, this filename will be used to - attach the volume to a VM at volume-attach time. - If the volume is attached, the VM will switch to this file as - part of the snapshot process. - - Note that volume-1234.aaaa represents changes after snapshot - 'aaaa' was created. So the data for snapshot 'aaaa' is actually - in the backing file(s) of volume-1234.aaaa. - - This file has a qcow2 header recording the fact that volume-1234 is - its backing file. Delta changes since the snapshot was created are - stored in this file, and the backing file (volume-1234) does not - change. - - info file: { 'active': 'volume-1234.aaaa', - 'aaaa': 'volume-1234.aaaa' } - - 3. Second snapshot created: - volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb - - volume-1234.bbbb now becomes the "active" disk image, recording - changes made to the volume. - - info file: { 'active': 'volume-1234.bbbb', (* changed!) - 'aaaa': 'volume-1234.aaaa', - 'bbbb': 'volume-1234.bbbb' } (* added!) - - 4. Snapshot deletion when volume is attached ('in-use' state): - - * When first snapshot is deleted, Cinder calls Nova for online - snapshot deletion. Nova deletes snapshot with id "aaaa" and - makes snapshot with id "bbbb" point to the base image. - Snapshot with id "bbbb" is the active image. - - volume-1234 <- volume-1234.bbbb - - info file: { 'active': 'volume-1234.bbbb', - 'bbbb': 'volume-1234.bbbb' - } - - * When second snapshot is deleted, Cinder calls Nova for online - snapshot deletion. Nova deletes snapshot with id "bbbb" by - pulling volume-1234's data into volume-1234.bbbb. This - (logically) removes snapshot with id "bbbb" and the active - file remains the same. - - volume-1234.bbbb - - info file: { 'active': 'volume-1234.bbbb' } - - TODO (deepakcs): Change this once Nova supports blockCommit for - in-use volumes. - - 5. Snapshot deletion when volume is detached ('available' state): - - * When first snapshot is deleted, Cinder does the snapshot - deletion. volume-1234.aaaa is removed from the snapshot chain. - The data from it is merged into its parent. - - volume-1234.bbbb is rebased, having volume-1234 as its new - parent. - - volume-1234 <- volume-1234.bbbb - - info file: { 'active': 'volume-1234.bbbb', - 'bbbb': 'volume-1234.bbbb' - } - - * When second snapshot is deleted, Cinder does the snapshot - deletion. volume-1234.aaaa is removed from the snapshot chain. - The base image, volume-1234 becomes the active image for this - volume again. - - volume-1234 - - info file: { 'active': 'volume-1234' } (* changed!) - """ - - LOG.debug('Creating %(type)s snapshot %(snap)s of volume %(vol)s', - {'snap': snapshot.id, 'vol': snapshot.volume.id, - 'type': ('online' if snapshot.volume.status == 'in-use' - else 'offline')}) - - status = snapshot.volume.status - - acceptable_states = ['available', 'in-use', 'backing-up'] - if snapshot.id.startswith('tmp-snap-'): - # This is an internal volume snapshot. In order to support - # image caching, we'll allow creating/deleting such snapshots - # while having volumes in 'downloading' state. - acceptable_states.append('downloading') - - self._validate_state(status, acceptable_states) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path, empty_if_missing=True) - backing_filename = self.get_active_image_from_info( - snapshot.volume) - new_snap_path = self._get_new_snap_path(snapshot) - - if status == 'in-use': - self._create_snapshot_online(snapshot, - backing_filename, - new_snap_path) - else: - self._do_create_snapshot(snapshot, - backing_filename, - new_snap_path) - - snap_info['active'] = os.path.basename(new_snap_path) - snap_info[snapshot.id] = os.path.basename(new_snap_path) - self._write_info_file(info_path, snap_info) - - def _create_snapshot_online(self, snapshot, backing_filename, - new_snap_path): - # Perform online snapshot via Nova - context = snapshot._context - - self._do_create_snapshot(snapshot, - backing_filename, - new_snap_path) - - connection_info = { - 'type': 'qcow2', - 'new_file': os.path.basename(new_snap_path), - 'snapshot_id': snapshot.id - } - - try: - result = self._nova.create_volume_snapshot( - context, - snapshot.volume_id, - connection_info) - LOG.debug('nova call result: %s', result) - except Exception: - LOG.exception('Call to Nova to create snapshot failed') - raise - - # Loop and wait for result - # Nova will call Cinderclient to update the status in the database - # An update of progress = '90%' means that Nova is done - seconds_elapsed = 0 - increment = 1 - timeout = 600 - while True: - s = db.snapshot_get(context, snapshot.id) - - LOG.debug('Status of snapshot %(id)s is now %(status)s', - {'id': snapshot['id'], - 'status': s['status']}) - - if s['status'] == fields.SnapshotStatus.CREATING: - if s['progress'] == '90%': - # Nova tasks completed successfully - break - - time.sleep(increment) - seconds_elapsed += increment - elif s['status'] == fields.SnapshotStatus.ERROR: - - msg = _('Nova returned "error" status ' - 'while creating snapshot.') - raise exception.RemoteFSException(msg) - - elif (s['status'] == fields.SnapshotStatus.DELETING or - s['status'] == fields.SnapshotStatus.ERROR_DELETING): - msg = _('Snapshot %(id)s has been asked to be deleted while ' - 'waiting for it to become available. Perhaps a ' - 'concurrent request was made.') % {'id': - snapshot.id} - raise exception.RemoteFSConcurrentRequest(msg) - - if 10 < seconds_elapsed <= 20: - increment = 2 - elif 20 < seconds_elapsed <= 60: - increment = 5 - elif 60 < seconds_elapsed: - increment = 10 - - if seconds_elapsed > timeout: - msg = _('Timed out while waiting for Nova update ' - 'for creation of snapshot %s.') % snapshot.id - raise exception.RemoteFSException(msg) - - def _delete_snapshot_online(self, context, snapshot, info): - # Update info over the course of this method - # active file never changes - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - - if utils.paths_normcase_equal(info['active_file'], - info['snapshot_file']): - # blockRebase/Pull base into active - # info['base'] => snapshot_file - - file_to_delete = info['base_file'] - if info['base_id'] is None: - # Passing base=none to blockRebase ensures that - # libvirt blanks out the qcow2 backing file pointer - new_base = None - else: - new_base = info['new_base_file'] - snap_info[info['base_id']] = info['snapshot_file'] - - delete_info = {'file_to_merge': new_base, - 'merge_target_file': None, # current - 'type': 'qcow2', - 'volume_id': snapshot.volume.id} - - del(snap_info[snapshot.id]) - else: - # blockCommit snapshot into base - # info['base'] <= snapshot_file - # delete record of snapshot - file_to_delete = info['snapshot_file'] - - delete_info = {'file_to_merge': info['snapshot_file'], - 'merge_target_file': info['base_file'], - 'type': 'qcow2', - 'volume_id': snapshot.volume.id} - - del(snap_info[snapshot.id]) - - self._nova_assisted_vol_snap_delete(context, snapshot, delete_info) - - # Write info file updated above - self._write_info_file(info_path, snap_info) - - # Delete stale file - path_to_delete = os.path.join( - self._local_volume_dir(snapshot.volume), file_to_delete) - self._delete(path_to_delete) - - def _nova_assisted_vol_snap_delete(self, context, snapshot, delete_info): - try: - self._nova.delete_volume_snapshot( - context, - snapshot.id, - delete_info) - except Exception: - LOG.exception('Call to Nova delete snapshot failed') - raise - - # Loop and wait for result - # Nova will call Cinderclient to update the status in the database - # An update of progress = '90%' means that Nova is done - seconds_elapsed = 0 - increment = 1 - timeout = 7200 - while True: - s = db.snapshot_get(context, snapshot.id) - - if s['status'] == fields.SnapshotStatus.DELETING: - if s['progress'] == '90%': - # Nova tasks completed successfully - break - else: - LOG.debug('status of snapshot %s is still "deleting"... ' - 'waiting', snapshot.id) - time.sleep(increment) - seconds_elapsed += increment - else: - msg = _('Unable to delete snapshot %(id)s, ' - 'status: %(status)s.') % {'id': snapshot.id, - 'status': s['status']} - raise exception.RemoteFSException(msg) - - if 10 < seconds_elapsed <= 20: - increment = 2 - elif 20 < seconds_elapsed <= 60: - increment = 5 - elif 60 < seconds_elapsed: - increment = 10 - - if seconds_elapsed > timeout: - msg = _('Timed out while waiting for Nova update ' - 'for deletion of snapshot %(id)s.') %\ - {'id': snapshot.id} - raise exception.RemoteFSException(msg) - - def _extend_volume(self, volume, size_gb): - raise NotImplementedError() - - -class RemoteFSSnapDriver(RemoteFSSnapDriverBase): - @locked_volume_id_operation - def create_snapshot(self, snapshot): - """Apply locking to the create snapshot operation.""" - - return self._create_snapshot(snapshot) - - @locked_volume_id_operation - def delete_snapshot(self, snapshot): - """Apply locking to the delete snapshot operation.""" - - return self._delete_snapshot(snapshot) - - @locked_volume_id_operation - def create_volume_from_snapshot(self, volume, snapshot): - return self._create_volume_from_snapshot(volume, snapshot) - - @locked_volume_id_operation - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - return self._create_cloned_volume(volume, src_vref) - - @locked_volume_id_operation - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - - return self._copy_volume_to_image(context, volume, image_service, - image_meta) - - @locked_volume_id_operation - def extend_volume(self, volume, size_gb): - return self._extend_volume(volume, size_gb) - - -class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase): - @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') - def create_snapshot(self, snapshot): - """Apply locking to the create snapshot operation.""" - - return self._create_snapshot(snapshot) - - @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') - def delete_snapshot(self, snapshot): - """Apply locking to the delete snapshot operation.""" - - return self._delete_snapshot(snapshot) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def create_volume_from_snapshot(self, volume, snapshot): - return self._create_volume_from_snapshot(volume, snapshot) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - return self._create_cloned_volume(volume, src_vref) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - - return self._copy_volume_to_image(context, volume, image_service, - image_meta) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def extend_volume(self, volume, size_gb): - return self._extend_volume(volume, size_gb) - - -class RemoteFSPoolMixin(object): - """Drivers inheriting this will report each share as a pool.""" - - def _find_share(self, volume): - # We let the scheduler choose a pool for us. - pool_name = self._get_pool_name_from_volume(volume) - share = self._get_share_from_pool_name(pool_name) - return share - - def _get_pool_name_from_volume(self, volume): - pool_name = volume_utils.extract_host(volume['host'], - level='pool') - return pool_name - - def _get_pool_name_from_share(self, share): - raise NotImplementedError() - - def _get_share_from_pool_name(self, pool_name): - # To be implemented by drivers using pools. - raise NotImplementedError() - - def _update_volume_stats(self): - data = {} - pools = [] - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.volume_backend_name - data['vendor_name'] = self.vendor_name - data['driver_version'] = self.get_version() - data['storage_protocol'] = self.driver_volume_type - - self._ensure_shares_mounted() - - for share in self._mounted_shares: - (share_capacity, - share_free, - share_used) = self._get_capacity_info(share) - - pool = {'pool_name': self._get_pool_name_from_share(share), - 'total_capacity_gb': share_capacity / float(units.Gi), - 'free_capacity_gb': share_free / float(units.Gi), - 'provisioned_capacity_gb': share_used / float(units.Gi), - 'allocated_capacity_gb': ( - share_capacity - share_free) / float(units.Gi), - 'reserved_percentage': ( - self.configuration.reserved_percentage), - 'max_over_subscription_ratio': ( - self.configuration.max_over_subscription_ratio), - 'thin_provisioning_support': ( - self._thin_provisioning_support), - 'QoS_support': False, - } - - pools.append(pool) - - data['total_capacity_gb'] = 0 - data['free_capacity_gb'] = 0 - data['pools'] = pools - - self._stats = data diff --git a/cinder/volume/drivers/san/__init__.py b/cinder/volume/drivers/san/__init__.py deleted file mode 100644 index 408bce579..000000000 --- a/cinder/volume/drivers/san/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`cinder.volume.drivers.san` -- Cinder San Drivers -===================================================== - -.. automodule:: cinder.volume.drivers.san - :platform: Unix - :synopsis: Module containing all the Cinder San drivers. -""" - -# Adding imports for backwards compatibility in loading volume_driver. -from cinder.volume.drivers.san.san import SanISCSIDriver # noqa diff --git a/cinder/volume/drivers/san/hp/__init__.py b/cinder/volume/drivers/san/hp/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/san/hp/hpmsa_client.py b/cinder/volume/drivers/san/hp/hpmsa_client.py deleted file mode 100644 index bec7bfd44..000000000 --- a/cinder/volume/drivers/san/hp/hpmsa_client.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 DotHill Systems -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder.volume.drivers.dothill import dothill_client - - -class HPMSAClient(dothill_client.DotHillClient): - - def __init__(self, host, login, password, protocol, ssl_verify): - super(HPMSAClient, self).__init__(host, login, password, - protocol, ssl_verify) diff --git a/cinder/volume/drivers/san/hp/hpmsa_common.py b/cinder/volume/drivers/san/hp/hpmsa_common.py deleted file mode 100644 index 7bc100944..000000000 --- a/cinder/volume/drivers/san/hp/hpmsa_common.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 DotHill Systems -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg - -from cinder.volume import configuration -from cinder.volume.drivers.dothill import dothill_common -from cinder.volume.drivers.san.hp import hpmsa_client - -common_opts = [ - cfg.StrOpt('hpmsa_backend_name', - default='A', - help="Pool or Vdisk name to use for volume creation."), - cfg.StrOpt('hpmsa_backend_type', - choices=['linear', 'virtual'], - default='virtual', - help="linear (for Vdisk) or virtual (for Pool)."), - cfg.StrOpt('hpmsa_api_protocol', - choices=['http', 'https'], - default='https', - help="HPMSA API interface protocol."), - cfg.BoolOpt('hpmsa_verify_certificate', - default=False, - help="Whether to verify HPMSA array SSL certificate."), - cfg.StrOpt('hpmsa_verify_certificate_path', - help="HPMSA array SSL certificate path."), - -] - -iscsi_opts = [ - cfg.ListOpt('hpmsa_iscsi_ips', - default=[], - help="List of comma-separated target iSCSI IP addresses."), -] - -CONF = cfg.CONF -CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) -CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) - - -class HPMSACommon(dothill_common.DotHillCommon): - VERSION = "1.6" - - def __init__(self, config): - self.config = config - self.vendor_name = "HPMSA" - self.backend_name = self.config.hpmsa_backend_name - self.backend_type = self.config.hpmsa_backend_type - self.api_protocol = self.config.hpmsa_api_protocol - ssl_verify = False - if (self.api_protocol == 'https' and - self.config.hpmsa_verify_certificate): - ssl_verify = self.config.hpmsa_verify_certificate_path or True - - self.client = hpmsa_client.HPMSAClient(self.config.san_ip, - self.config.san_login, - self.config.san_password, - self.api_protocol, - ssl_verify) diff --git a/cinder/volume/drivers/san/hp/hpmsa_fc.py b/cinder/volume/drivers/san/hp/hpmsa_fc.py deleted file mode 100644 index 008a5ae51..000000000 --- a/cinder/volume/drivers/san/hp/hpmsa_fc.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import interface -from cinder.volume.drivers.dothill import dothill_fc -from cinder.volume.drivers.san.hp import hpmsa_common - - -@interface.volumedriver -class HPMSAFCDriver(dothill_fc.DotHillFCDriver): - """OpenStack Fibre Channel cinder drivers for HPMSA arrays. - - Version history: - 1.0 - Inheriting from DotHill cinder drivers. - 1.6 - Add management path redundancy and reduce load placed - on management controller. - """ - - VERSION = "1.6" - - CI_WIKI_NAME = "Vedams-HPMSA_FCISCSIDriver_CI" - - SUPPORTED = True - - def __init__(self, *args, **kwargs): - super(HPMSAFCDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(hpmsa_common.common_opts) - - def _init_common(self): - return hpmsa_common.HPMSACommon(self.configuration) diff --git a/cinder/volume/drivers/san/hp/hpmsa_iscsi.py b/cinder/volume/drivers/san/hp/hpmsa_iscsi.py deleted file mode 100644 index 098223c32..000000000 --- a/cinder/volume/drivers/san/hp/hpmsa_iscsi.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014 Objectif Libre -# Copyright 2015 Dot Hill Systems Corp. -# Copyright 2016 Seagate Technology or one of its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import interface -from cinder.volume.drivers.dothill import dothill_iscsi -from cinder.volume.drivers.san.hp import hpmsa_common - - -@interface.volumedriver -class HPMSAISCSIDriver(dothill_iscsi.DotHillISCSIDriver): - """OpenStack iSCSI cinder drivers for HPMSA arrays. - - Version history: - 1.0 - Inheriting from DotHill cinder drivers. - 1.6 - Add management path redundancy and reduce load placed - on management controller. - """ - - VERSION = "1.6" - - CI_WIKI_NAME = "Vedams-HPMSA_FCISCSIDriver_CI" - - SUPPORTED = True - - def __init__(self, *args, **kwargs): - super(HPMSAISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(hpmsa_common.common_opts) - self.configuration.append_config_values(hpmsa_common.iscsi_opts) - self.iscsi_ips = self.configuration.hpmsa_iscsi_ips - - def _init_common(self): - return hpmsa_common.HPMSACommon(self.configuration) diff --git a/cinder/volume/drivers/san/san.py b/cinder/volume/drivers/san/san.py deleted file mode 100644 index a5de68831..000000000 --- a/cinder/volume/drivers/san/san.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Default Driver for san-stored volumes. - -The unique thing about a SAN is that we don't expect that we can run the volume -controller on the SAN hardware. We expect to access it over SSH or some API. -""" - -import random - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver - -LOG = logging.getLogger(__name__) - -san_opts = [ - cfg.BoolOpt('san_thin_provision', - default=True, - help='Use thin provisioning for SAN volumes?'), - cfg.StrOpt('san_ip', - default='', - help='IP address of SAN controller'), - cfg.StrOpt('san_login', - default='admin', - help='Username for SAN controller'), - cfg.StrOpt('san_password', - default='', - help='Password for SAN controller', - secret=True), - cfg.StrOpt('san_private_key', - default='', - help='Filename of private key to use for SSH authentication'), - cfg.StrOpt('san_clustername', - default='', - help='Cluster name to use for creating volumes'), - cfg.PortOpt('san_ssh_port', - default=22, - help='SSH port to use with SAN'), - cfg.BoolOpt('san_is_local', - default=False, - help='Execute commands locally instead of over SSH; ' - 'use if the volume service is running on the SAN device'), - cfg.IntOpt('ssh_conn_timeout', - default=30, - help="SSH connection timeout in seconds"), - cfg.IntOpt('ssh_min_pool_conn', - default=1, - help='Minimum ssh connections in the pool'), - cfg.IntOpt('ssh_max_pool_conn', - default=5, - help='Maximum ssh connections in the pool'), -] - -CONF = cfg.CONF -CONF.register_opts(san_opts, group=configuration.SHARED_CONF_GROUP) - - -class SanDriver(driver.BaseVD): - """Base class for SAN-style storage volumes - - A SAN-style storage value is 'different' because the volume controller - probably won't run on it, so we need to access is over SSH or another - remote protocol. - """ - - def __init__(self, *args, **kwargs): - execute = kwargs.pop('execute', self.san_execute) - super(SanDriver, self).__init__(execute=execute, - *args, **kwargs) - self.configuration.append_config_values(san_opts) - self.run_local = self.configuration.san_is_local - self.sshpool = None - - def san_execute(self, *cmd, **kwargs): - if self.run_local: - return utils.execute(*cmd, **kwargs) - else: - check_exit_code = kwargs.pop('check_exit_code', None) - return self._run_ssh(cmd, check_exit_code) - - def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): - utils.check_ssh_injection(cmd_list) - command = ' '. join(cmd_list) - - if not self.sshpool: - password = self.configuration.san_password - privatekey = self.configuration.san_private_key - min_size = self.configuration.ssh_min_pool_conn - max_size = self.configuration.ssh_max_pool_conn - self.sshpool = ssh_utils.SSHPool( - self.configuration.san_ip, - self.configuration.san_ssh_port, - self.configuration.ssh_conn_timeout, - self.configuration.san_login, - password=password, - privatekey=privatekey, - min_size=min_size, - max_size=max_size) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - return processutils.ssh_execute( - ssh, - command, - check_exit_code=check_exit_code) - except Exception as e: - LOG.error(e) - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", command) - - def ensure_export(self, context, volume): - """Synchronously recreates an export for a logical volume.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - if not self.run_local: - if not (self.configuration.san_password or - self.configuration.san_private_key): - raise exception.InvalidInput( - reason=_('Specify san_password or san_private_key')) - - # The san_ip must always be set, because we use it for the target - if not self.configuration.san_ip: - raise exception.InvalidInput(reason=_("san_ip must be set")) - - -class SanISCSIDriver(SanDriver, driver.ISCSIDriver): - def __init__(self, *args, **kwargs): - super(SanISCSIDriver, self).__init__(*args, **kwargs) - - def _build_iscsi_target_name(self, volume): - return "%s%s" % (self.configuration.iscsi_target_prefix, - volume['name']) diff --git a/cinder/volume/drivers/sheepdog.py b/cinder/volume/drivers/sheepdog.py deleted file mode 100644 index a2b4bc513..000000000 --- a/cinder/volume/drivers/sheepdog.py +++ /dev/null @@ -1,641 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright (c) 2013 Zelin.io -# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SheepDog Volume Driver. - -""" -import errno -import eventlet -import io -import random -import re - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver - - -LOG = logging.getLogger(__name__) - -sheepdog_opts = [ - cfg.StrOpt('sheepdog_store_address', - default='127.0.0.1', - help=('IP address of sheep daemon.')), - cfg.PortOpt('sheepdog_store_port', - default=7000, - help=('Port of sheep daemon.')) -] - -CONF = cfg.CONF -CONF.import_opt("image_conversion_dir", "cinder.image.image_utils") -CONF.register_opts(sheepdog_opts, group=configuration.SHARED_CONF_GROUP) - - -class SheepdogClient(object): - """Sheepdog command executor.""" - - QEMU_SHEEPDOG_PREFIX = 'sheepdog:' - DOG_RESP_CONNECTION_ERROR = 'failed to connect to' - DOG_RESP_CLUSTER_RUNNING = 'Cluster status: running' - DOG_RESP_CLUSTER_NOT_FORMATTED = ('Cluster status: ' - 'Waiting for cluster to be formatted') - DOG_RESP_CLUSTER_WAITING = ('Cluster status: ' - 'Waiting for other nodes to join cluster') - DOG_RESP_VDI_ALREADY_EXISTS = ': VDI exists already' - DOG_RESP_VDI_NOT_FOUND = ': No VDI found' - DOG_RESP_VDI_SHRINK_NOT_SUPPORT = 'Shrinking VDIs is not implemented' - DOG_RESP_VDI_SIZE_TOO_LARGE = 'New VDI size is too large' - DOG_RESP_SNAPSHOT_VDI_NOT_FOUND = ': No VDI found' - DOG_RESP_SNAPSHOT_NOT_FOUND = ': Failed to find requested tag' - DOG_RESP_SNAPSHOT_EXISTED = 'tag (%(snapname)s) is existed' - QEMU_IMG_RESP_CONNECTION_ERROR = ('Failed to connect socket: ' - 'Connection refused') - QEMU_IMG_RESP_ALREADY_EXISTS = ': VDI exists already' - QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND = 'Failed to find the requested tag' - QEMU_IMG_RESP_VDI_NOT_FOUND = 'No vdi found' - QEMU_IMG_RESP_SIZE_TOO_LARGE = 'An image is too large.' - - def __init__(self, node_list, port): - self.node_list = node_list - self.port = port - - def get_addr(self): - """Get a random node in sheepdog cluster.""" - return self.node_list[random.randint(0, len(self.node_list) - 1)] - - def local_path(self, volume): - """Return a sheepdog location path.""" - return "sheepdog:%(addr)s:%(port)s:%(name)s" % { - 'addr': self.get_addr(), - 'port': self.port, - 'name': volume['name']} - - def _run_dog(self, command, subcommand, *params): - """Execute dog command wrapper.""" - addr = self.get_addr() - cmd = ('env', 'LC_ALL=C', 'LANG=C', 'dog', command, subcommand, - '-a', addr, '-p', self.port) + params - try: - (_stdout, _stderr) = utils.execute(*cmd) - if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR): - # NOTE(tishizaki) - # Dog command does not return error_code although - # dog command cannot connect to sheep process. - # That is a Sheepdog's bug. - # To avoid a Sheepdog's bug, now we need to check stderr. - # If Sheepdog has been fixed, this check logic is needed - # by old Sheepdog users. - reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': addr, 'port': self.port}) - raise exception.SheepdogError(reason=reason) - return (_stdout, _stderr) - except OSError as e: - with excutils.save_and_reraise_exception(): - if e.errno == errno.ENOENT: - msg = 'Sheepdog is not installed. OSError: command is %s.' - else: - msg = 'OSError: command is %s.' - LOG.error(msg, cmd) - except processutils.ProcessExecutionError as e: - _stderr = e.stderr - if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR): - reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': addr, 'port': self.port}) - raise exception.SheepdogError(reason=reason) - raise exception.SheepdogCmdError( - cmd=e.cmd, - exit_code=e.exit_code, - stdout=e.stdout.replace('\n', '\\n'), - stderr=e.stderr.replace('\n', '\\n')) - - def _run_qemu_img(self, command, *params): - """Executes qemu-img command wrapper.""" - addr = self.get_addr() - cmd = ['env', 'LC_ALL=C', 'LANG=C', 'qemu-img', command] - for param in params: - if param.startswith(self.QEMU_SHEEPDOG_PREFIX): - # replace 'sheepdog:vdiname[:snapshotname]' to - # 'sheepdog:addr:port:vdiname[:snapshotname]' - param = param.replace(self.QEMU_SHEEPDOG_PREFIX, - '%(prefix)s%(addr)s:%(port)s:' % - {'prefix': self.QEMU_SHEEPDOG_PREFIX, - 'addr': addr, 'port': self.port}, - 1) - cmd.append(param) - try: - return utils.execute(*cmd) - except OSError as e: - with excutils.save_and_reraise_exception(): - if e.errno == errno.ENOENT: - msg = ('Qemu-img is not installed. OSError: command is ' - '%(cmd)s.') - else: - msg = 'OSError: command is %(cmd)s.' - LOG.error(msg, {'cmd': tuple(cmd)}) - except processutils.ProcessExecutionError as e: - _stderr = e.stderr - if self.QEMU_IMG_RESP_CONNECTION_ERROR in _stderr: - reason = (_('Failed to connect to sheep daemon. ' - 'addr: %(addr)s, port: %(port)s'), - {'addr': addr, 'port': self.port}) - raise exception.SheepdogError(reason=reason) - raise exception.SheepdogCmdError( - cmd=e.cmd, - exit_code=e.exit_code, - stdout=e.stdout.replace('\n', '\\n'), - stderr=e.stderr.replace('\n', '\\n')) - - def check_cluster_status(self): - try: - (_stdout, _stderr) = self._run_dog('cluster', 'info') - except exception.SheepdogCmdError as e: - cmd = e.kwargs['cmd'] - with excutils.save_and_reraise_exception(): - LOG.error('Failed to check cluster status.' - '(command: %s)', cmd) - - if _stdout.startswith(self.DOG_RESP_CLUSTER_RUNNING): - LOG.debug('Sheepdog cluster is running.') - return - - reason = _('Invalid sheepdog cluster status.') - if _stdout.startswith(self.DOG_RESP_CLUSTER_NOT_FORMATTED): - reason = _('Cluster is not formatted. ' - 'You should probably perform "dog cluster format".') - elif _stdout.startswith(self.DOG_RESP_CLUSTER_WAITING): - reason = _('Waiting for all nodes to join cluster. ' - 'Ensure all sheep daemons are running.') - raise exception.SheepdogError(reason=reason) - - def create(self, vdiname, size): - try: - self._run_dog('vdi', 'create', vdiname, '%sG' % size) - except exception.SheepdogCmdError as e: - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - if _stderr.rstrip('\\n').endswith( - self.DOG_RESP_VDI_ALREADY_EXISTS): - LOG.error('Volume already exists. %s', vdiname) - else: - LOG.error('Failed to create volume. %s', vdiname) - - def delete(self, vdiname): - try: - (_stdout, _stderr) = self._run_dog('vdi', 'delete', vdiname) - if _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): - LOG.warning('Volume not found. %s', vdiname) - except exception.SheepdogCmdError as e: - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - LOG.error('Failed to delete volume. %s', vdiname) - - def create_snapshot(self, vdiname, snapname): - try: - self._run_dog('vdi', 'snapshot', '-s', snapname, vdiname) - except exception.SheepdogCmdError as e: - cmd = e.kwargs['cmd'] - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - if _stderr.rstrip('\\n').endswith( - self.DOG_RESP_SNAPSHOT_VDI_NOT_FOUND): - LOG.error('Volume "%s" not found. Please check the ' - 'results of "dog vdi list".', - vdiname) - elif _stderr.rstrip('\\n').endswith( - self.DOG_RESP_SNAPSHOT_EXISTED % - {'snapname': snapname}): - LOG.error('Snapshot "%s" already exists.', snapname) - else: - LOG.error('Failed to create snapshot. (command: %s)', - cmd) - - def delete_snapshot(self, vdiname, snapname): - try: - (_stdout, _stderr) = self._run_dog('vdi', 'delete', '-s', - snapname, vdiname) - if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND): - LOG.warning('Snapshot "%s" not found.', snapname) - elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND): - LOG.warning('Volume "%s" not found.', vdiname) - except exception.SheepdogCmdError as e: - cmd = e.kwargs['cmd'] - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - LOG.error('Failed to delete snapshot. (command: %s)', - cmd) - - def clone(self, src_vdiname, src_snapname, dst_vdiname, size): - try: - self._run_qemu_img('create', '-b', - 'sheepdog:%(src_vdiname)s:%(src_snapname)s' % - {'src_vdiname': src_vdiname, - 'src_snapname': src_snapname}, - 'sheepdog:%s' % dst_vdiname, '%sG' % size) - except exception.SheepdogCmdError as e: - cmd = e.kwargs['cmd'] - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - if self.QEMU_IMG_RESP_ALREADY_EXISTS in _stderr: - LOG.error('Clone volume "%s" already exists. ' - 'Please check the results of "dog vdi list".', - dst_vdiname) - elif self.QEMU_IMG_RESP_VDI_NOT_FOUND in _stderr: - LOG.error('Src Volume "%s" not found. ' - 'Please check the results of "dog vdi list".', - src_vdiname) - elif self.QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND in _stderr: - LOG.error('Snapshot "%s" not found. ' - 'Please check the results of "dog vdi list".', - src_snapname) - elif self.QEMU_IMG_RESP_SIZE_TOO_LARGE in _stderr: - LOG.error('Volume size "%sG" is too large.', size) - else: - LOG.error('Failed to clone volume.(command: %s)', cmd) - - def resize(self, vdiname, size): - size = int(size) * units.Gi - try: - (_stdout, _stderr) = self._run_dog('vdi', 'resize', vdiname, size) - except exception.SheepdogCmdError as e: - _stderr = e.kwargs['stderr'] - with excutils.save_and_reraise_exception(): - if _stderr.rstrip('\\n').endswith( - self.DOG_RESP_VDI_NOT_FOUND): - LOG.error('Failed to resize vdi. vdi not found. %s', - vdiname) - elif _stderr.startswith(self.DOG_RESP_VDI_SHRINK_NOT_SUPPORT): - LOG.error('Failed to resize vdi. ' - 'Shrinking vdi not supported. ' - 'vdi: %(vdiname)s new size: %(size)s', - {'vdiname': vdiname, 'size': size}) - elif _stderr.startswith(self.DOG_RESP_VDI_SIZE_TOO_LARGE): - LOG.error('Failed to resize vdi. ' - 'Too large volume size. ' - 'vdi: %(vdiname)s new size: %(size)s', - {'vdiname': vdiname, 'size': size}) - else: - LOG.error('Failed to resize vdi. ' - 'vdi: %(vdiname)s new size: %(size)s', - {'vdiname': vdiname, 'size': size}) - - def get_volume_stats(self): - try: - (_stdout, _stderr) = self._run_dog('node', 'info', '-r') - except exception.SheepdogCmdError as e: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to get volume status. %s', e) - return _stdout - - def get_vdi_info(self, vdiname): - # Get info of the specified vdi. - try: - (_stdout, _stderr) = self._run_dog('vdi', 'list', vdiname, '-r') - except exception.SheepdogCmdError as e: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to get vdi info. %s', e) - return _stdout - - def update_node_list(self): - try: - (_stdout, _stderr) = self._run_dog('node', 'list', '-r') - except exception.SheepdogCmdError as e: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to get node list. %s', e) - node_list = [] - stdout = _stdout.strip('\n') - for line in stdout.split('\n'): - node_list.append(line.split()[1].split(':')[0]) - self.node_list = node_list - - -class SheepdogIOWrapper(io.RawIOBase): - """File-like object with Sheepdog backend.""" - - def __init__(self, addr, port, volume, snapshot_name=None): - self._addr = addr - self._port = port - self._vdiname = volume['name'] - self._snapshot_name = snapshot_name - self._offset = 0 - # SheepdogIOWrapper instance becomes invalid if a write error occurs. - self._valid = True - - def _execute(self, cmd, data=None): - try: - # NOTE(yamada-h): processutils.execute causes busy waiting - # under eventlet. - # To avoid wasting CPU resources, it should not be used for - # the command which takes long time to execute. - # For workaround, we replace a subprocess module with - # the original one while only executing a read/write command. - _processutils_subprocess = processutils.subprocess - processutils.subprocess = eventlet.patcher.original('subprocess') - return processutils.execute(*cmd, process_input=data)[0] - except (processutils.ProcessExecutionError, OSError): - self._valid = False - msg = _('Sheepdog I/O Error, command was: "%s".') % ' '.join(cmd) - raise exception.VolumeDriverException(message=msg) - finally: - processutils.subprocess = _processutils_subprocess - - def read(self, length=None): - if not self._valid: - msg = _('An error occurred while reading volume "%s".' - ) % self._vdiname - raise exception.VolumeDriverException(message=msg) - - cmd = ['dog', 'vdi', 'read', '-a', self._addr, '-p', self._port] - if self._snapshot_name: - cmd.extend(('-s', self._snapshot_name)) - cmd.extend((self._vdiname, self._offset)) - if length: - cmd.append(length) - data = self._execute(cmd) - self._offset += len(data) - return data - - def write(self, data): - if not self._valid: - msg = _('An error occurred while writing to volume "%s".' - ) % self._vdiname - raise exception.VolumeDriverException(message=msg) - - length = len(data) - cmd = ('dog', 'vdi', 'write', '-a', self._addr, '-p', self._port, - self._vdiname, self._offset, length) - self._execute(cmd, data) - self._offset += length - return length - - def seek(self, offset, whence=0): - if not self._valid: - msg = _('An error occurred while seeking for volume "%s".' - ) % self._vdiname - raise exception.VolumeDriverException(message=msg) - - if whence == 0: - # SEEK_SET or 0 - start of the stream (the default); - # offset should be zero or positive - new_offset = offset - elif whence == 1: - # SEEK_CUR or 1 - current stream position; offset may be negative - new_offset = self._offset + offset - else: - # SEEK_END or 2 - end of the stream; offset is usually negative - # TODO(yamada-h): Support SEEK_END - raise IOError(_("Invalid argument - whence=%s not supported.") % - whence) - - if new_offset < 0: - raise IOError(_("Invalid argument - negative seek offset.")) - - self._offset = new_offset - - def tell(self): - return self._offset - - def flush(self): - pass - - def fileno(self): - """Sheepdog does not have support for fileno so we raise IOError. - - Raising IOError is recommended way to notify caller that interface is - not supported - see http://docs.python.org/2/library/io.html#io.IOBase - """ - raise IOError(_("fileno is not supported by SheepdogIOWrapper")) - - -@interface.volumedriver -class SheepdogDriver(driver.VolumeDriver): - """Executes commands relating to Sheepdog Volumes.""" - - VERSION = "1.0.0" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Cinder_Jenkins" - - def __init__(self, *args, **kwargs): - super(SheepdogDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(sheepdog_opts) - addr = self.configuration.sheepdog_store_address - self.port = self.configuration.sheepdog_store_port - self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*') - self._stats = {} - self.node_list = [addr] - self.client = SheepdogClient(self.node_list, self.port) - - def check_for_setup_error(self): - """Check cluster status and update node list.""" - self.client.check_cluster_status() - self.client.update_node_list() - - def _is_cloneable(self, image_location, image_meta): - """Check the image can be clone or not.""" - if image_location is None: - return False - - prefix = 'sheepdog://' - if not image_location.startswith(prefix): - LOG.debug("Image is not stored in sheepdog.") - return False - - if image_meta['disk_format'] != 'raw': - LOG.debug("Image clone requires image format to be " - "'raw' but image %s(%s) is '%s'.", - image_location, - image_meta['id'], - image_meta['disk_format']) - return False - - # check whether volume is stored in sheepdog - # The image location would be like - # "sheepdog://192.168.10.2:7000:Alice" - (ip, port, name) = image_location[len(prefix):].split(":", 2) - - stdout = self.client.get_vdi_info(name) - # Dog command return 0 and has a null output if the volume not exists - if stdout: - return True - else: - LOG.debug("Can not find vdi %(image)s, is not cloneable", - {'image': name}) - return False - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Create a volume efficiently from an existing image.""" - image_location = image_location[0] if image_location else None - if not self._is_cloneable(image_location, image_meta): - return {}, False - - volume_ref = {'name': image_meta['id'], 'size': image_meta['size']} - self.create_cloned_volume(volume, volume_ref) - self.client.resize(volume.name, volume.size) - - vol_path = self.client.local_path(volume) - return {'provider_location': vol_path}, True - - def create_cloned_volume(self, volume, src_vref): - """Clone a sheepdog volume from another volume.""" - snapshot_name = 'tmp-snap-%s-%s' % (src_vref['name'], volume.id) - snapshot = { - 'name': snapshot_name, - 'volume_name': src_vref['name'], - 'volume_size': src_vref['size'], - } - - self.client.create_snapshot(snapshot['volume_name'], snapshot_name) - - try: - self.client.clone(snapshot['volume_name'], snapshot_name, - volume.name, volume.size) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to create cloned volume %s.', - volume.name) - finally: - # Delete temp Snapshot - self.client.delete_snapshot(snapshot['volume_name'], snapshot_name) - - def create_volume(self, volume): - """Create a sheepdog volume.""" - self.client.create(volume.name, volume.size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Create a sheepdog volume from a snapshot.""" - self.client.clone(snapshot.volume_name, snapshot.name, - volume.name, volume.size) - - def delete_volume(self, volume): - """Delete a logical volume.""" - self.client.delete(volume.name) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - with image_utils.temporary_file() as tmp: - # (wenhao): we don't need to convert to raw for sheepdog. - image_utils.fetch_verify_image(context, image_service, - image_id, tmp) - - # remove the image created by import before this function. - # see volume/drivers/manager.py:_create_volume - self.client.delete(volume.name) - # convert and store into sheepdog - image_utils.convert_image(tmp, self.client.local_path(volume), - 'raw') - self.client.resize(volume.name, volume.size) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - image_id = image_meta['id'] - with image_utils.temporary_file() as tmp: - # image_utils.convert_image doesn't support "sheepdog:" source, - # so we use the qemu-img directly. - # Sheepdog volume is always raw-formatted. - cmd = ('qemu-img', - 'convert', - '-f', 'raw', - '-t', 'none', - '-O', 'raw', - self.client.local_path(volume), - tmp) - self._try_execute(*cmd) - - with open(tmp, 'rb') as image_file: - image_service.update(context, image_id, {}, image_file) - - def create_snapshot(self, snapshot): - """Create a sheepdog snapshot.""" - self.client.create_snapshot(snapshot.volume_name, snapshot.name) - - def delete_snapshot(self, snapshot): - """Delete a sheepdog snapshot.""" - self.client.delete_snapshot(snapshot.volume_name, snapshot.name) - - def ensure_export(self, context, volume): - """Safely and synchronously recreate an export for a logical volume.""" - pass - - def create_export(self, context, volume, connector): - """Export a volume.""" - pass - - def remove_export(self, context, volume): - """Remove an export for a logical volume.""" - pass - - def initialize_connection(self, volume, connector): - return { - 'driver_volume_type': 'sheepdog', - 'data': { - 'name': volume['name'], - 'hosts': [self.client.get_addr()], - 'ports': ["%d" % self.port], - } - } - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def _update_volume_stats(self): - stats = {} - - backend_name = "sheepdog" - if self.configuration: - backend_name = self.configuration.safe_get('volume_backend_name') - stats["volume_backend_name"] = backend_name or 'sheepdog' - stats['vendor_name'] = 'Open Source' - stats['driver_version'] = self.VERSION - stats['storage_protocol'] = 'sheepdog' - stats['total_capacity_gb'] = 'unknown' - stats['free_capacity_gb'] = 'unknown' - stats['reserved_percentage'] = 0 - stats['QoS_support'] = False - - stdout = self.client.get_volume_stats() - m = self.stats_pattern.match(stdout) - total = float(m.group(1)) - used = float(m.group(2)) - stats['total_capacity_gb'] = total / units.Gi - stats['free_capacity_gb'] = (total - used) / units.Gi - - self._stats = stats - - def get_volume_stats(self, refresh=False): - if refresh: - self._update_volume_stats() - return self._stats - - def extend_volume(self, volume, new_size): - """Extend an Existing Volume.""" - self.client.resize(volume.name, new_size) - LOG.debug('Extend volume from %(old_size)s GB to %(new_size)s GB.', - {'old_size': volume.size, 'new_size': new_size}) diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py deleted file mode 100644 index d985f2147..000000000 --- a/cinder/volume/drivers/solidfire.py +++ /dev/null @@ -1,2197 +0,0 @@ -# All Rights Reserved. -# Copyright 2013 SolidFire Inc - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import json -import math -import random -import re -import socket -import string -import time -import warnings - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import timeutils -from oslo_utils import units -import requests -from requests.packages.urllib3 import exceptions -import six - -from cinder import context -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.objects import fields -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers.san import san -from cinder.volume import qos_specs -from cinder.volume.targets import iscsi as iscsi_driver -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -sf_opts = [ - cfg.BoolOpt('sf_emulate_512', - default=True, - help='Set 512 byte emulation on volume creation; '), - - cfg.BoolOpt('sf_allow_tenant_qos', - default=False, - help='Allow tenants to specify QOS on create'), - - cfg.StrOpt('sf_account_prefix', - help='Create SolidFire accounts with this prefix. Any string ' - 'can be used here, but the string \"hostname\" is special ' - 'and will create a prefix using the cinder node hostname ' - '(previous default behavior). The default is NO prefix.'), - - cfg.StrOpt('sf_volume_prefix', - default='UUID-', - help='Create SolidFire volumes with this prefix. Volume names ' - 'are of the form . ' - 'The default is to use a prefix of \'UUID-\'.'), - - cfg.StrOpt('sf_template_account_name', - default='openstack-vtemplate', - help='Account name on the SolidFire Cluster to use as owner of ' - 'template/cache volumes (created if does not exist).'), - - cfg.BoolOpt('sf_allow_template_caching', - default=True, - help='Create an internal cache of copy of images when ' - 'a bootable volume is created to eliminate fetch from ' - 'glance and qemu-conversion on subsequent calls.'), - - cfg.StrOpt('sf_svip', - help='Overrides default cluster SVIP with the one specified. ' - 'This is required or deployments that have implemented ' - 'the use of VLANs for iSCSI networks in their cloud.'), - - cfg.BoolOpt('sf_enable_volume_mapping', - default=True, - help='Create an internal mapping of volume IDs and account. ' - 'Optimizes lookups and performance at the expense of ' - 'memory, very large deployments may want to consider ' - 'setting to False.'), - - cfg.PortOpt('sf_api_port', - default=443, - help='SolidFire API port. Useful if the device api is behind ' - 'a proxy on a different port.'), - - cfg.BoolOpt('sf_enable_vag', - default=False, - help='Utilize volume access groups on a per-tenant basis.')] - -CONF = cfg.CONF -CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP) - -# SolidFire API Error Constants -xExceededLimit = 'xExceededLimit' -xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup' -xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist' -xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup' - - -def retry(exc_tuple, tries=5, delay=1, backoff=2): - def retry_dec(f): - @six.wraps(f) - def func_retry(*args, **kwargs): - _tries, _delay = tries, delay - while _tries > 1: - try: - return f(*args, **kwargs) - except exc_tuple: - time.sleep(_delay) - _tries -= 1 - _delay *= backoff - LOG.debug('Retrying %(args)s, %(tries)s attempts ' - 'remaining...', - {'args': args, 'tries': _tries}) - # NOTE(jdg): Don't log the params passed here - # some cmds like createAccount will have sensitive - # info in the params, grab only the second tuple - # which should be the Method - msg = (_('Retry count exceeded for command: %s') % - (args[1],)) - LOG.error(msg) - raise exception.SolidFireAPIException(message=msg) - return func_retry - return retry_dec - - -@interface.volumedriver -class SolidFireDriver(san.SanISCSIDriver): - """OpenStack driver to enable SolidFire cluster. - - Version history: - 1.0 - Initial driver - 1.1 - Refactor, clone support, qos by type and minor bug fixes - 1.2 - Add xfr and retype support - 1.2.1 - Add export/import support - 1.2.2 - Catch VolumeNotFound on accept xfr - 2.0.0 - Move from httplib to requests - 2.0.1 - Implement SolidFire Snapshots - 2.0.2 - Implement secondary account - 2.0.3 - Implement cluster pairing - 2.0.4 - Implement volume replication - 2.0.5 - Try and deal with the stupid retry/clear issues from objects - and tflow - 2.0.6 - Add a lock decorator around the clone_image method - 2.0.7 - Add scaled IOPS - 2.0.8 - Add active status filter to get volume ops - 2.0.9 - Always purge on delete volume - 2.0.10 - Add response to debug on retryable errors - """ - - VERSION = '2.0.10' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "NetApp_SolidFire_CI" - - driver_prefix = 'solidfire' - - sf_qos_dict = {'slow': {'minIOPS': 100, - 'maxIOPS': 200, - 'burstIOPS': 200}, - 'medium': {'minIOPS': 200, - 'maxIOPS': 400, - 'burstIOPS': 400}, - 'fast': {'minIOPS': 500, - 'maxIOPS': 1000, - 'burstIOPS': 1000}, - 'performant': {'minIOPS': 2000, - 'maxIOPS': 4000, - 'burstIOPS': 4000}, - 'off': None} - - sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] - sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst'] - sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100} - sf_iops_lim_max = {'minIOPS': 15000, - 'maxIOPS': 200000, - 'burstIOPS': 200000} - cluster_stats = {} - retry_exc_tuple = (exception.SolidFireRetryableException, - requests.exceptions.ConnectionError) - retryable_errors = ['xDBVersionMismatch', - 'xMaxSnapshotsPerVolumeExceeded', - 'xMaxClonesPerVolumeExceeded', - 'xMaxSnapshotsPerNodeExceeded', - 'xMaxClonesPerNodeExceeded', - 'xSliceNotRegistered', - 'xNotReadyForIO'] - - def __init__(self, *args, **kwargs): - super(SolidFireDriver, self).__init__(*args, **kwargs) - self.failed_over_id = kwargs.get('active_backend_id', None) - self.active_cluster_info = {} - self.configuration.append_config_values(sf_opts) - self.template_account_id = None - self.max_volumes_per_account = 1990 - self.volume_map = {} - self.cluster_pairs = [] - self.replication_enabled = False - self.failed_over = False - self.target_driver = SolidFireISCSI(solidfire_driver=self, - configuration=self.configuration) - if self.failed_over_id: - remote_info = self._get_remote_info_by_id(self.failed_over_id) - if remote_info: - self._set_active_cluster_info(remote_info['endpoint']) - else: - LOG.error('Failed to initialize SolidFire driver to ' - 'a remote cluster specified at id: %s', - self.failed_over_id) - else: - self._set_active_cluster_info() - - try: - self._update_cluster_status() - except exception.SolidFireAPIException: - pass - - if self.configuration.sf_allow_template_caching: - account = self.configuration.sf_template_account_name - self.template_account_id = self._create_template_account(account) - - if not self.failed_over_id: - self._set_cluster_pairs() - - def locked_image_id_operation(f, external=False): - def lvo_inner1(inst, *args, **kwargs): - lock_tag = inst.driver_prefix - call_args = inspect.getcallargs(f, inst, *args, **kwargs) - - if call_args.get('image_meta'): - image_id = call_args['image_meta']['id'] - else: - err_msg = _('The decorated method must accept image_meta.') - raise exception.VolumeBackendAPIException(data=err_msg) - - @utils.synchronized('%s-%s' % (lock_tag, image_id), - external=external) - def lvo_inner2(): - return f(inst, *args, **kwargs) - return lvo_inner2() - return lvo_inner1 - - def locked_source_id_operation(f, external=False): - def lvo_inner1(inst, *args, **kwargs): - lock_tag = inst.driver_prefix - call_args = inspect.getcallargs(f, inst, *args, **kwargs) - src_arg = call_args.get('source', None) - if src_arg and src_arg.get('id', None): - source_id = call_args['source']['id'] - else: - err_msg = _('The decorated method must accept src_uuid.') - raise exception.VolumeBackendAPIException(message=err_msg) - - @utils.synchronized('%s-%s' % (lock_tag, source_id), - external=external) - def lvo_inner2(): - return f(inst, *args, **kwargs) - return lvo_inner2() - return lvo_inner1 - - def __getattr__(self, attr): - if hasattr(self.target_driver, attr): - return getattr(self.target_driver, attr) - else: - msg = _('Attribute: %s not found.') % attr - raise NotImplementedError(msg) - - def _get_remote_info_by_id(self, backend_id): - remote_info = None - for rd in self.configuration.get('replication_device', []): - if rd.get('backend_id', None) == backend_id: - remote_endpoint = self._build_endpoint_info(**rd) - remote_info = self._get_remote_cluster_info(remote_endpoint) - remote_info['endpoint'] = remote_endpoint - if not remote_info['endpoint']['svip']: - remote_info['endpoint']['svip'] = ( - remote_info['svip'] + ':3260') - return remote_info - - def _create_remote_pairing(self, remote_device): - try: - pairing_info = self._issue_api_request('StartClusterPairing', - {}, version='8.0')['result'] - pair_id = self._issue_api_request( - 'CompleteClusterPairing', - {'clusterPairingKey': pairing_info['clusterPairingKey']}, - version='8.0', - endpoint=remote_device['endpoint'])['result']['clusterPairID'] - except exception.SolidFireAPIException as ex: - if 'xPairingAlreadExists' in ex.msg: - LOG.debug('Pairing already exists during init.') - else: - with excutils.save_and_reraise_exception(): - LOG.error('Cluster pairing failed: %s', ex.msg) - LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id) - remote_device['clusterPairID'] = pair_id - return pair_id - - def _get_remote_cluster_info(self, remote_endpoint): - return self._issue_api_request( - 'GetClusterInfo', - {}, - endpoint=remote_endpoint)['result']['clusterInfo'] - - def _set_cluster_pairs(self): - if not self.configuration.get('replication_device', None): - self.replication = False - return - - existing_pairs = self._issue_api_request( - 'ListClusterPairs', - {}, - version='8.0')['result']['clusterPairs'] - - remote_pair = {} - for rd in self.configuration.get('replication_device', []): - remote_endpoint = self._build_endpoint_info(**rd) - remote_info = self._get_remote_cluster_info(remote_endpoint) - remote_info['endpoint'] = remote_endpoint - if not remote_info['endpoint']['svip']: - remote_info['endpoint']['svip'] = remote_info['svip'] + ':3260' - - for ep in existing_pairs: - if rd['backend_id'] == ep['mvip']: - remote_pair = ep - LOG.debug("Found remote pair: %s", remote_pair) - remote_info['clusterPairID'] = ep['clusterPairID'] - break - - if not remote_pair: - # NOTE(jdg): create_remote_pairing sets the - # clusterPairID in remote_info for us - self._create_remote_pairing(remote_info) - self.cluster_pairs.append(remote_info) - LOG.debug("Setting replication_enabled to True.") - self.replication_enabled = True - - def _set_active_cluster_info(self, endpoint=None): - if not endpoint: - self.active_cluster_info['endpoint'] = self._build_endpoint_info() - else: - self.active_cluster_info['endpoint'] = endpoint - - for k, v in self._issue_api_request( - 'GetClusterInfo', - {})['result']['clusterInfo'].items(): - self.active_cluster_info[k] = v - - # Add a couple extra things that are handy for us - self.active_cluster_info['clusterAPIVersion'] = ( - self._issue_api_request('GetClusterVersionInfo', - {})['result']['clusterAPIVersion']) - if self.configuration.get('sf_svip', None): - self.active_cluster_info['svip'] = ( - self.configuration.get('sf_svip')) - - def _create_provider_id_string(self, - resource_id, - account_or_vol_id): - # NOTE(jdg): We use the same format, but in the case - # of snapshots, we don't have an account id, we instead - # swap that with the parent volume id - return "%s %s %s" % (resource_id, - account_or_vol_id, - self.active_cluster_info['uuid']) - - def _init_snapshot_mappings(self, srefs): - updates = [] - sf_snaps = self._issue_api_request( - 'ListSnapshots', {}, version='6.0')['result']['snapshots'] - for s in srefs: - seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) - sfsnap = next( - (ss for ss in sf_snaps if ss['name'] == seek_name), None) - if sfsnap: - id_string = self._create_provider_id_string( - sfsnap['snapshotID'], - sfsnap['volumeID']) - if s.get('provider_id') != id_string: - updates.append( - {'id': s['id'], - 'provider_id': id_string}) - return updates - - def _init_volume_mappings(self, vrefs): - updates = [] - sf_vols = self._issue_api_request('ListActiveVolumes', - {})['result']['volumes'] - self.volume_map = {} - for v in vrefs: - seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) - sfvol = next( - (sv for sv in sf_vols if sv['name'] == seek_name), None) - if sfvol: - if v.get('provider_id', 'nil') != sfvol['volumeID']: - updates.append( - {'id': v['id'], - 'provider_id': self._create_provider_id_string( - sfvol['volumeID'], sfvol['accountID'])}) - - return updates - - def update_provider_info(self, vrefs, snaprefs): - volume_updates = self._init_volume_mappings(vrefs) - snapshot_updates = self._init_snapshot_mappings(snaprefs) - return (volume_updates, snapshot_updates) - - def _create_template_account(self, account_name): - # We raise an API exception if the account doesn't exist - - # We need to take account_prefix settings into consideration - # This just uses the same method to do template account create - # as we use for any other OpenStack account - account_name = self._get_sf_account_name(account_name) - try: - id = self._issue_api_request( - 'GetAccountByName', - {'username': account_name})['result']['account']['accountID'] - except exception.SolidFireAPIException: - chap_secret = self._generate_random_string(12) - params = {'username': account_name, - 'initiatorSecret': chap_secret, - 'targetSecret': chap_secret, - 'attributes': {}} - id = self._issue_api_request('AddAccount', - params)['result']['accountID'] - return id - - def _build_endpoint_info(self, **kwargs): - endpoint = {} - - endpoint['mvip'] = ( - kwargs.get('mvip', self.configuration.san_ip)) - endpoint['login'] = ( - kwargs.get('login', self.configuration.san_login)) - endpoint['passwd'] = ( - kwargs.get('passwd', self.configuration.san_password)) - endpoint['port'] = ( - kwargs.get('port', self.configuration.sf_api_port)) - endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'], - endpoint['port']) - endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip) - if not endpoint.get('mvip', None) and kwargs.get('backend_id', None): - endpoint['mvip'] = kwargs.get('backend_id') - return endpoint - - @retry(retry_exc_tuple, tries=6) - def _issue_api_request(self, method, params, version='1.0', endpoint=None): - if params is None: - params = {} - if endpoint is None: - endpoint = self.active_cluster_info['endpoint'] - - payload = {'method': method, 'params': params} - url = '%s/json-rpc/%s/' % (endpoint['url'], version) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) - req = requests.post(url, - data=json.dumps(payload), - auth=(endpoint['login'], endpoint['passwd']), - verify=False, - timeout=30) - response = req.json() - req.close() - if (('error' in response) and - (response['error']['name'] in self.retryable_errors)): - msg = ('Retryable error (%s) encountered during ' - 'SolidFire API call.' % response['error']['name']) - LOG.debug(msg) - LOG.debug("API response: %s", response) - raise exception.SolidFireRetryableException(message=msg) - - if 'error' in response: - msg = _('API response: %s') % response - raise exception.SolidFireAPIException(msg) - - return response - - def _get_active_volumes_by_sfaccount(self, account_id, endpoint=None): - return [v for v in self._get_volumes_by_sfaccount(account_id, endpoint) - if v['status'] == "active"] - - def _get_volumes_by_sfaccount(self, account_id, endpoint=None): - """Get all volumes on cluster for specified account.""" - params = {'accountID': account_id} - return self._issue_api_request( - 'ListVolumesForAccount', - params, - endpoint=endpoint)['result']['volumes'] - - def _get_sfaccount_by_name(self, sf_account_name, endpoint=None): - """Get SolidFire account object by name.""" - sfaccount = None - params = {'username': sf_account_name} - try: - data = self._issue_api_request('GetAccountByName', - params, - endpoint=endpoint) - if 'result' in data and 'account' in data['result']: - LOG.debug('Found solidfire account: %s', sf_account_name) - sfaccount = data['result']['account'] - except exception.SolidFireAPIException as ex: - if 'xUnknownAccount' in ex.msg: - return sfaccount - else: - raise - return sfaccount - - def _get_sf_account_name(self, project_id): - """Build the SolidFire account name to use.""" - prefix = self.configuration.sf_account_prefix or '' - if prefix == 'hostname': - prefix = socket.gethostname() - return '%s%s%s' % (prefix, '-' if prefix else '', project_id) - - def _get_sfaccount(self, project_id): - sf_account_name = self._get_sf_account_name(project_id) - sfaccount = self._get_sfaccount_by_name(sf_account_name) - if sfaccount is None: - raise exception.SolidFireAccountNotFound( - account_name=sf_account_name) - - return sfaccount - - def _create_sfaccount(self, project_id): - """Create account on SolidFire device if it doesn't already exist. - - We're first going to check if the account already exists, if it does - just return it. If not, then create it. - - """ - - sf_account_name = self._get_sf_account_name(project_id) - sfaccount = self._get_sfaccount_by_name(sf_account_name) - if sfaccount is None: - LOG.debug('solidfire account: %s does not exist, create it...', - sf_account_name) - chap_secret = self._generate_random_string(12) - params = {'username': sf_account_name, - 'initiatorSecret': chap_secret, - 'targetSecret': chap_secret, - 'attributes': {}} - self._issue_api_request('AddAccount', params) - sfaccount = self._get_sfaccount_by_name(sf_account_name) - - return sfaccount - - def _generate_random_string(self, length): - """Generates random_string to use for CHAP password.""" - - char_set = string.ascii_uppercase + string.digits - return ''.join(random.sample(char_set, length)) - - def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None): - """Gets the connection info for specified account and volume.""" - if endpoint: - iscsi_portal = endpoint['svip'] - else: - iscsi_portal = self.active_cluster_info['svip'] - - if ':' not in iscsi_portal: - iscsi_portal += ':3260' - - chap_secret = sfaccount['targetSecret'] - - found_volume = False - iteration_count = 0 - while not found_volume and iteration_count < 600: - volume_list = self._get_volumes_by_sfaccount( - sfaccount['accountID'], endpoint=endpoint) - iqn = None - for v in volume_list: - if v['volumeID'] == sf_volume_id: - iqn = v['iqn'] - found_volume = True - break - if not found_volume: - time.sleep(2) - iteration_count += 1 - - if not found_volume: - LOG.error('Failed to retrieve volume SolidFire-' - 'ID: %s in get_by_account!', sf_volume_id) - raise exception.VolumeNotFound(volume_id=sf_volume_id) - - model_update = {} - # NOTE(john-griffith): SF volumes are always at lun 0 - model_update['provider_location'] = ('%s %s %s' - % (iscsi_portal, iqn, 0)) - model_update['provider_auth'] = ('CHAP %s %s' - % (sfaccount['username'], - chap_secret)) - if not self.configuration.sf_emulate_512: - model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) - model_update['provider_id'] = ( - self._create_provider_id_string(sf_volume_id, - sfaccount['accountID'])) - return model_update - - def _snapshot_discovery(self, src_uuid, params, vref): - # NOTE(jdg): First check the SF snapshots - # if we don't find a snap by the given name, just move on to check - # volumes. This may be a running system that was updated from - # before we did snapshots, so need to check both - is_clone = False - sf_vol = None - snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) - snaps = self._get_sf_snapshots() - snap = next((s for s in snaps if s["name"] == snap_name), None) - if snap: - params['snapshotID'] = int(snap['snapshotID']) - params['volumeID'] = int(snap['volumeID']) - params['newSize'] = int(vref['size'] * units.Gi) - else: - sf_vol = self._get_sf_volume(src_uuid) - if sf_vol is None: - raise exception.VolumeNotFound(volume_id=src_uuid) - params['volumeID'] = int(sf_vol['volumeID']) - params['newSize'] = int(vref['size'] * units.Gi) - is_clone = True - return params, is_clone, sf_vol - - def _do_clone_volume(self, src_uuid, - vref, sf_src_snap=None): - """Create a clone of an existing volume or snapshot.""" - attributes = {} - sf_account = self._get_create_account(vref['project_id']) - params = {'name': '%(prefix)s%(id)s' % - {'prefix': self.configuration.sf_volume_prefix, - 'id': vref['id']}, - 'newAccountID': sf_account['accountID']} - - is_clone = False - sf_vol = None - if sf_src_snap: - # In some scenarios we are passed the snapshot information that we - # are supposed to clone. - params['snapshotID'] = sf_src_snap['snapshotID'] - params['volumeID'] = sf_src_snap['volumeID'] - params['newSize'] = int(vref['size'] * units.Gi) - else: - params, is_clone, sf_vol = self._snapshot_discovery(src_uuid, - params, - vref) - data = self._issue_api_request('CloneVolume', params, version='6.0') - if (('result' not in data) or ('volumeID' not in data['result'])): - msg = _("API response: %s") % data - raise exception.SolidFireAPIException(msg) - - sf_volume_id = data['result']['volumeID'] - - # NOTE(jdg): all attributes are copied via clone, need to do an update - # to set any that were provided - qos = self._retrieve_qos_setting(vref) - params = {'volumeID': sf_volume_id} - if qos: - params['qos'] = qos - create_time = vref['created_at'].isoformat() - attributes = {'uuid': vref['id'], - 'is_clone': 'True', - 'src_uuid': src_uuid, - 'created_at': create_time} - - params['attributes'] = attributes - data = self._issue_api_request('ModifyVolume', params) - - model_update = self._get_model_info(sf_account, sf_volume_id) - if model_update is None: - mesg = _('Failed to get model update from clone') - raise exception.SolidFireAPIException(mesg) - - # Increment the usage count, just for data collection - # We're only doing this for clones, not create_from snaps - if is_clone: - data = self._update_attributes(sf_vol) - return (data, sf_account, model_update) - - def _update_attributes(self, sf_vol): - cloned_count = sf_vol['attributes'].get('cloned_count', 0) - cloned_count += 1 - attributes = sf_vol['attributes'] - attributes['cloned_count'] = cloned_count - - params = {'volumeID': int(sf_vol['volumeID'])} - params['attributes'] = attributes - return self._issue_api_request('ModifyVolume', params) - - def _do_volume_create(self, sf_account, params, endpoint=None): - params['accountID'] = sf_account['accountID'] - sf_volid = self._issue_api_request( - 'CreateVolume', params, endpoint=endpoint)['result']['volumeID'] - return self._get_model_info(sf_account, sf_volid, endpoint=endpoint) - - def _do_snapshot_create(self, params): - model_update = {} - snapshot_id = self._issue_api_request( - 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] - snaps = self._get_sf_snapshots() - snap = ( - next((s for s in snaps if int(s["snapshotID"]) == - int(snapshot_id)), None)) - model_update['provider_id'] = ( - self._create_provider_id_string(snap['snapshotID'], - snap['volumeID'])) - return model_update - - def _set_qos_presets(self, volume): - qos = {} - valid_presets = self.sf_qos_dict.keys() - - # First look to see if they included a preset - presets = [i.value for i in volume.get('volume_metadata') - if i.key == 'sf-qos' and i.value in valid_presets] - if len(presets) > 0: - if len(presets) > 1: - LOG.warning('More than one valid preset was ' - 'detected, using %s', presets[0]) - qos = self.sf_qos_dict[presets[0]] - else: - # look for explicit settings - for i in volume.get('volume_metadata'): - if i.key in self.sf_qos_keys: - qos[i.key] = int(i.value) - return qos - - def _set_qos_by_volume_type(self, ctxt, type_id, vol_size): - qos = {} - scale_qos = {} - volume_type = volume_types.get_volume_type(ctxt, type_id) - qos_specs_id = volume_type.get('qos_specs_id') - specs = volume_type.get('extra_specs') - - # NOTE(jdg): We prefer the qos_specs association - # and over-ride any existing - # extra-specs settings if present - if qos_specs_id is not None: - kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - else: - kvs = specs - - for key, value in kvs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - if key in self.sf_qos_keys: - qos[key] = int(value) - if key in self.sf_scale_qos_keys: - scale_qos[key] = value - - # look for the 'scaledIOPS' key and scale QoS if set - if 'scaledIOPS' in scale_qos: - scale_qos.pop('scaledIOPS') - for key, value in scale_qos.items(): - if key == 'scaleMin': - qos['minIOPS'] = (qos['minIOPS'] + - (int(value) * (vol_size - 1))) - elif key == 'scaleMax': - qos['maxIOPS'] = (qos['maxIOPS'] + - (int(value) * (vol_size - 1))) - elif key == 'scaleBurst': - qos['burstIOPS'] = (qos['burstIOPS'] + - (int(value) * (vol_size - 1))) - # Cap the IOPS values at their limits - capped = False - for key, value in qos.items(): - if value > self.sf_iops_lim_max[key]: - qos[key] = self.sf_iops_lim_max[key] - capped = True - if value < self.sf_iops_lim_min[key]: - qos[key] = self.sf_iops_lim_min[key] - capped = True - if capped: - LOG.debug("A SolidFire QoS value was capped at the defined limits") - # Check that minIOPS <= maxIOPS <= burstIOPS - if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or - qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)): - msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= " - "burstIOPS. Currently: Min: %(min)s, Max: " - "%(max)s, Burst: %(burst)s.") % - {"min": qos['minIOPS'], - "max": qos['maxIOPS'], - "burst": qos['burstIOPS']}) - raise exception.InvalidQoSSpecs(reason=msg) - return qos - - def _get_sf_volume(self, uuid, params=None): - if params: - vols = [v for v in self._issue_api_request( - 'ListVolumesForAccount', params)['result']['volumes'] if - v['status'] == "active"] - else: - vols = self._issue_api_request( - 'ListActiveVolumes', params)['result']['volumes'] - - found_count = 0 - sf_volref = None - for v in vols: - # NOTE(jdg): In the case of "name" we can't - # update that on manage/import, so we use - # the uuid attribute - meta = v.get('attributes') - alt_id = '' - if meta: - alt_id = meta.get('uuid', '') - - if uuid in v['name'] or uuid in alt_id: - found_count += 1 - sf_volref = v - LOG.debug("Mapped SolidFire volumeID %(volume_id)s " - "to cinder ID %(uuid)s.", - {'volume_id': v['volumeID'], 'uuid': uuid}) - - if found_count == 0: - # NOTE(jdg): Previously we would raise here, but there are cases - # where this might be a cleanup for a failed delete. - # Until we get better states we'll just log an error - LOG.error("Volume %s, not found on SF Cluster.", uuid) - - if found_count > 1: - LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.", - {'count': found_count, - 'uuid': uuid}) - raise exception.DuplicateSfVolumeNames(vol_name=uuid) - - return sf_volref - - def _get_sf_snapshots(self, sf_volid=None): - params = {} - if sf_volid: - params = {'volumeID': sf_volid} - return self._issue_api_request( - 'ListSnapshots', params, version='6.0')['result']['snapshots'] - - def _create_image_volume(self, context, - image_meta, image_service, - image_id): - with image_utils.TemporaryImages.fetch(image_service, - context, - image_id) as tmp_image: - data = image_utils.qemu_img_info(tmp_image) - fmt = data.file_format - if fmt is None: - raise exception.ImageUnacceptable( - reason=_("'qemu-img info' parsing failed."), - image_id=image_id) - - backing_file = data.backing_file - if backing_file is not None: - raise exception.ImageUnacceptable( - image_id=image_id, - reason=_("fmt=%(fmt)s backed by:%(backing_file)s") - % {'fmt': fmt, 'backing_file': backing_file, }) - - virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi)) - attributes = {} - attributes['image_info'] = {} - attributes['image_info']['image_updated_at'] = ( - image_meta['updated_at'].isoformat()) - attributes['image_info']['image_name'] = ( - image_meta['name']) - attributes['image_info']['image_created_at'] = ( - image_meta['created_at'].isoformat()) - attributes['image_info']['image_id'] = image_meta['id'] - params = {'name': 'OpenStackIMG-%s' % image_id, - 'accountID': self.template_account_id, - 'sliceCount': 1, - 'totalSize': int(virtual_size * units.Gi), - 'enable512e': self.configuration.sf_emulate_512, - 'attributes': attributes, - 'qos': {}} - - sf_account = self._issue_api_request( - 'GetAccountByID', - {'accountID': self.template_account_id})['result']['account'] - template_vol = self._do_volume_create(sf_account, params) - - tvol = {} - tvol['id'] = image_id - tvol['provider_location'] = template_vol['provider_location'] - tvol['provider_auth'] = template_vol['provider_auth'] - - try: - connector = {'multipath': False} - conn = self.initialize_connection(tvol, connector) - attach_info = super(SolidFireDriver, self)._connect_device( - conn) - properties = 'na' - image_utils.convert_image(tmp_image, - attach_info['device']['path'], - 'raw', - run_as_root=True) - data = image_utils.qemu_img_info(attach_info['device']['path'], - run_as_root=True) - if data.file_format != 'raw': - raise exception.ImageUnacceptable( - image_id=image_id, - reason=_("Converted to %(vol_format)s, but format is " - "now %(file_format)s") % {'vol_format': 'raw', - 'file_format': data. - file_format}) - except Exception as exc: - vol = self._get_sf_volume(image_id) - LOG.error('Failed image conversion during ' - 'cache creation: %s', - exc) - LOG.debug('Removing SolidFire Cache Volume (SF ID): %s', - vol['volumeID']) - self._detach_volume(context, attach_info, tvol, properties) - self._issue_api_request('DeleteVolume', params) - self._issue_api_request('PurgeDeletedVolume', params) - return - - self._detach_volume(context, attach_info, tvol, properties) - sf_vol = self._get_sf_volume(image_id, params) - LOG.debug('Successfully created SolidFire Image Template ' - 'for image-id: %s', image_id) - return sf_vol - - def _verify_image_volume(self, context, image_meta, image_service): - # This method just verifies that IF we have a cache volume that - # it's still up to date and current WRT the image in Glance - # ie an image-update hasn't occurred since we grabbed it - - # If it's out of date, just delete it and we'll create a new one - # Any other case we don't care and just return without doing anything - - params = {'accountID': self.template_account_id} - sf_vol = self._get_sf_volume(image_meta['id'], params) - if not sf_vol: - self._create_image_volume(context, - image_meta, - image_service, - image_meta['id']) - return - - if sf_vol['attributes']['image_info']['image_updated_at'] != ( - image_meta['updated_at'].isoformat()): - params = {'accountID': self.template_account_id} - params['volumeID'] = sf_vol['volumeID'] - self._issue_api_request('DeleteVolume', params) - self._issue_api_request('PurgeDeletedVolume', params) - self._create_image_volume(context, - image_meta, - image_service, - image_meta['id']) - - def _get_sfaccounts_for_tenant(self, cinder_project_id): - accounts = self._issue_api_request( - 'ListAccounts', {})['result']['accounts'] - - # Note(jdg): On SF we map account-name to OpenStack's tenant ID - # we use tenantID in here to get secondaries that might exist - # Also: we expect this to be sorted, so we get the primary first - # in the list - return sorted([acc for acc in accounts if - cinder_project_id in acc['username']]) - - def _get_all_active_volumes(self, cinder_uuid=None): - params = {} - volumes = self._issue_api_request('ListActiveVolumes', - params)['result']['volumes'] - if cinder_uuid: - vols = ([v for v in volumes if - cinder_uuid in v.name]) - else: - vols = [v for v in volumes] - - return vols - - def _get_all_deleted_volumes(self, cinder_uuid=None): - params = {} - vols = self._issue_api_request('ListDeletedVolumes', - params)['result']['volumes'] - if cinder_uuid: - deleted_vols = ([v for v in vols if - cinder_uuid in v['name']]) - else: - deleted_vols = [v for v in vols] - return deleted_vols - - def _get_account_create_availability(self, accounts): - # we'll check both the primary and the secondary - # if it exists and return whichever one has count - # available. - for acc in accounts: - if self._get_volumes_for_account( - acc['accountID']) > self.max_volumes_per_account: - return acc - if len(accounts) == 1: - sfaccount = self._create_sfaccount(accounts[0]['name'] + '_') - return sfaccount - return None - - def _get_create_account(self, proj_id): - # Retrieve SolidFire accountID to be used for creating volumes. - sf_accounts = self._get_sfaccounts_for_tenant(proj_id) - if not sf_accounts: - sf_account = self._create_sfaccount(proj_id) - else: - # Check availability for creates - sf_account = self._get_account_create_availability(sf_accounts) - if not sf_account: - msg = _('Volumes/account exceeded on both primary and ' - 'secondary SolidFire accounts.') - raise exception.SolidFireDriverException(msg) - return sf_account - - def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None): - # ListVolumesForAccount gives both Active and Deleted - # we require the solidfire accountID, uuid of volume - # is optional - vols = self._get_active_volumes_by_sfaccount(sf_account_id) - if cinder_uuid: - vlist = [v for v in vols if - cinder_uuid in v['name']] - else: - vlist = [v for v in vols] - vlist = sorted(vlist, key=lambda k: k['volumeID']) - return vlist - - def _create_vag(self, iqn, vol_id=None): - """Create a volume access group(vag). - - Returns the vag_id. - """ - vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) - params = {'name': vag_name, - 'initiators': [iqn], - 'volumes': [vol_id], - 'attributes': {'openstack': True}} - try: - result = self._issue_api_request('CreateVolumeAccessGroup', - params, - version='7.0') - return result['result']['volumeAccessGroupID'] - except exception.SolidFireAPIException as error: - if xExceededLimit in error.msg: - if iqn in error.msg: - # Initiator double registered. - return self._safe_create_vag(iqn, vol_id) - else: - # VAG limit reached. Purge and start over. - self._purge_vags() - return self._safe_create_vag(iqn, vol_id) - else: - raise - - def _safe_create_vag(self, iqn, vol_id=None): - # Potential race condition with simultaneous volume attaches to the - # same host. To help avoid this, VAG creation makes a best attempt at - # finding and using an existing VAG. - - vags = self._get_vags_by_name(iqn) - if vags: - # Filter through the vags and find the one with matching initiator - vag = next((v for v in vags if iqn in v['initiators']), None) - if vag: - return vag['volumeAccessGroupID'] - else: - # No matches, use the first result, add initiator IQN. - vag_id = vags[0]['volumeAccessGroupID'] - return self._add_initiator_to_vag(iqn, vag_id) - return self._create_vag(iqn, vol_id) - - def _base_get_vags(self): - params = {} - vags = self._issue_api_request( - 'ListVolumeAccessGroups', - params, - version='7.0')['result']['volumeAccessGroups'] - return vags - - def _get_vags_by_name(self, iqn): - """Retrieve SolidFire volume access group objects by name. - - Returns an array of vags with a matching name value. - Returns an empty array if there are no matches. - """ - vags = self._base_get_vags() - vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) - matching_vags = [vag for vag in vags if vag['name'] == vag_name] - return matching_vags - - def _add_initiator_to_vag(self, iqn, vag_id): - # Added a vag_id return as there is a chance that we might have to - # create a new VAG if our target VAG is deleted underneath us. - params = {"initiators": [iqn], - "volumeAccessGroupID": vag_id} - try: - self._issue_api_request('AddInitiatorsToVolumeAccessGroup', - params, - version='7.0') - return vag_id - except exception.SolidFireAPIException as error: - if xAlreadyInVolumeAccessGroup in error.msg: - return vag_id - elif xVolumeAccessGroupIDDoesNotExist in error.msg: - # No locking means sometimes a VAG can be removed by a parallel - # volume detach against the same host. - return self._safe_create_vag(iqn) - else: - raise - - def _add_volume_to_vag(self, vol_id, iqn, vag_id): - # Added a vag_id return to be consistent with add_initiator_to_vag. It - # isn't necessary but may be helpful in the future. - params = {"volumeAccessGroupID": vag_id, - "volumes": [vol_id]} - try: - self._issue_api_request('AddVolumesToVolumeAccessGroup', - params, - version='7.0') - return vag_id - - except exception.SolidFireAPIException as error: - if xAlreadyInVolumeAccessGroup in error.msg: - return vag_id - elif xVolumeAccessGroupIDDoesNotExist in error.msg: - return self._safe_create_vag(iqn, vol_id) - else: - raise - - def _remove_volume_from_vag(self, vol_id, vag_id): - params = {"volumeAccessGroupID": vag_id, - "volumes": [vol_id]} - try: - self._issue_api_request('RemoveVolumesFromVolumeAccessGroup', - params, - version='7.0') - except exception.SolidFireAPIException as error: - if xNotInVolumeAccessGroup in error.msg: - pass - elif xVolumeAccessGroupIDDoesNotExist in error.msg: - pass - else: - raise - - def _remove_volume_from_vags(self, vol_id): - # Due to all sorts of uncertainty around multiattach, on volume - # deletion we make a best attempt at removing the vol_id from VAGs. - vags = self._base_get_vags() - targets = [v for v in vags if vol_id in v['volumes']] - for vag in targets: - self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID']) - - def _remove_vag(self, vag_id): - params = {"volumeAccessGroupID": vag_id} - try: - self._issue_api_request('DeleteVolumeAccessGroup', - params, - version='7.0') - except exception.SolidFireAPIException as error: - if xVolumeAccessGroupIDDoesNotExist not in error.msg: - raise - - def _purge_vags(self, limit=10): - # Purge up to limit number of VAGs that have no active volumes, - # initiators, and an OpenStack attribute. Purge oldest VAGs first. - vags = self._base_get_vags() - targets = [v for v in vags if v['volumes'] == [] and - v['initiators'] == [] and - v['deletedVolumes'] == [] and - v['attributes'].get('openstack')] - sorted_targets = sorted(targets, - key=lambda k: k['volumeAccessGroupID']) - for vag in sorted_targets[:limit]: - self._remove_vag(vag['volumeAccessGroupID']) - - @locked_image_id_operation - def clone_image(self, context, - volume, image_location, - image_meta, image_service): - """Clone an existing image volume.""" - public = False - # Check out pre-requisites: - # Is template caching enabled? - if not self.configuration.sf_allow_template_caching: - return None, False - - # NOTE(jdg): Glance V2 moved from is_public to visibility - # so we check both, as we don't necessarily know or want - # to care which we're using. Will need to look at - # future handling of things like shared and community - # but for now, it's owner or public and that's it - visibility = image_meta.get('visibility', None) - if visibility and visibility == 'public': - public = True - elif image_meta.get('is_public', False): - public = True - else: - if image_meta['owner'] == volume['project_id']: - public = True - if not public: - LOG.warning("Requested image is not " - "accessible by current Tenant.") - return None, False - - try: - self._verify_image_volume(context, - image_meta, - image_service) - except exception.SolidFireAPIException: - return None, False - - # Ok, should be good to go now, try it again - (data, sfaccount, model) = self._do_clone_volume(image_meta['id'], - volume) - return model, True - - def _retrieve_qos_setting(self, volume): - qos = {} - if (self.configuration.sf_allow_tenant_qos and - volume.get('volume_metadata')is not None): - qos = self._set_qos_presets(volume) - - ctxt = context.get_admin_context() - type_id = volume.get('volume_type_id', None) - if type_id is not None: - qos = self._set_qos_by_volume_type(ctxt, type_id, - volume.get('size')) - return qos - - def create_volume(self, volume): - """Create volume on SolidFire device. - - The account is where CHAP settings are derived from, volume is - created and exported. Note that the new volume is immediately ready - for use. - - One caveat here is that an existing user account must be specified - in the API call to create a new volume. We use a set algorithm to - determine account info based on passed in cinder volume object. First - we check to see if the account already exists (and use it), or if it - does not already exist, we'll go ahead and create it. - - """ - slice_count = 1 - attributes = {} - - sf_account = self._get_create_account(volume['project_id']) - qos = self._retrieve_qos_setting(volume) - - create_time = volume['created_at'].isoformat() - attributes = {'uuid': volume['id'], - 'is_clone': 'False', - 'created_at': create_time} - - vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id']) - params = {'name': vname, - 'accountID': sf_account['accountID'], - 'sliceCount': slice_count, - 'totalSize': int(volume['size'] * units.Gi), - 'enable512e': self.configuration.sf_emulate_512, - 'attributes': attributes, - 'qos': qos} - - # NOTE(jdg): Check if we're a migration tgt, if so - # use the old volume-id here for the SF Name - migration_status = volume.get('migration_status', None) - if migration_status and 'target' in migration_status: - k, v = migration_status.split(':') - vname = '%s%s' % (self.configuration.sf_volume_prefix, v) - params['name'] = vname - params['attributes']['migration_uuid'] = volume['id'] - params['attributes']['uuid'] = v - - model_update = self._do_volume_create(sf_account, params) - try: - rep_settings = self._retrieve_replication_settings(volume) - if self.replication_enabled and rep_settings: - volume['volumeID'] = ( - int(model_update['provider_id'].split()[0])) - self._replicate_volume(volume, params, - sf_account, rep_settings) - except exception.SolidFireAPIException: - # NOTE(jdg): Something went wrong after the source create, due to - # the way TFLOW works and it's insistence on retrying the same - # command over and over coupled with the fact that the introduction - # of objects now sets host to None on failures we'll end up with an - # orphaned volume on the backend for every one of these segments - # that fail, for n-retries. Sad Sad Panda!! We'll just do it - # ourselves until we can get a general fix in Cinder further up the - # line - with excutils.save_and_reraise_exception(): - sf_volid = int(model_update['provider_id'].split()[0]) - self._issue_api_request('DeleteVolume', {'volumeID': sf_volid}) - self._issue_api_request('PurgeDeletedVolume', - {'volumeID': sf_volid}) - return model_update - - def _retrieve_replication_settings(self, volume): - rep_data = {} - ctxt = context.get_admin_context() - type_id = volume.get('volume_type_id', None) - if type_id is not None: - rep_data = self._set_rep_by_volume_type(ctxt, type_id) - return rep_data - - def _set_rep_by_volume_type(self, ctxt, type_id): - rep_opts = {} - type_ref = volume_types.get_volume_type(ctxt, type_id) - specs = type_ref.get('extra_specs') - - if specs.get('replication', 'disabled').lower() == 'enabled': - rep_opts['targets'] = specs.get( - 'solidfire:replication_targets', self.cluster_pairs[0]) - return rep_opts - - def _replicate_volume(self, volume, src_params, - parent_sfaccount, rep_info): - params = {} - - # TODO(jdg): Right now we just go to first pair, - # need to add parsing of rep_info eventually - # in other words "rep_info" is not used yet! - tgt_endpoint = self.cluster_pairs[0]['endpoint'] - LOG.debug("Replicating volume on remote cluster: %s", tgt_endpoint) - params['attributes'] = src_params['attributes'] - params['username'] = self._get_sf_account_name(volume['project_id']) - try: - params['initiatorSecret'] = parent_sfaccount['initiatorSecret'] - params['targetSecret'] = parent_sfaccount['targetSecret'] - self._issue_api_request( - 'AddAccount', - params, - endpoint=tgt_endpoint)['result']['accountID'] - except exception.SolidFireAPIException as ex: - if 'xDuplicateUsername' not in ex.msg: - raise - - remote_account = ( - self._get_sfaccount_by_name(params['username'], - endpoint=tgt_endpoint)) - - # Create the volume on the remote cluster w/same params as original - params = src_params - params['accountID'] = remote_account['accountID'] - LOG.debug("Create remote volume on: %(endpoint)s with account: " - "%(account)s", - {'endpoint': tgt_endpoint['url'], 'account': remote_account}) - model_update = self._do_volume_create( - remote_account, params, endpoint=tgt_endpoint) - - tgt_sfid = int(model_update['provider_id'].split()[0]) - params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'} - self._issue_api_request('ModifyVolume', - params, - '8.0', - endpoint=tgt_endpoint) - - # Enable volume pairing - LOG.debug("Start volume pairing on volume ID: %s", - volume['volumeID']) - params = {'volumeID': volume['volumeID']} - rep_key = self._issue_api_request('StartVolumePairing', - params, - '8.0')['result']['volumePairingKey'] - params = {'volumeID': tgt_sfid, - 'volumePairingKey': rep_key} - LOG.debug("Issue CompleteVolumePairing request on remote: " - "%(endpoint)s, %(parameters)s", - {'endpoint': tgt_endpoint['url'], 'parameters': params}) - self._issue_api_request('CompleteVolumePairing', - params, - '8.0', - endpoint=tgt_endpoint) - LOG.debug("Completed volume pairing.") - return model_update - - @locked_source_id_operation - def create_cloned_volume(self, volume, source): - """Create a clone of an existing volume.""" - (_data, _sfaccount, model) = self._do_clone_volume( - source['id'], - volume) - - return model - - def delete_volume(self, volume): - """Delete SolidFire Volume from device. - - SolidFire allows multiple volumes with same name, - volumeID is what's guaranteed unique. - - """ - sf_vol = None - accounts = self._get_sfaccounts_for_tenant(volume['project_id']) - if accounts is None: - LOG.error("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "delete_volume operation!", volume['id']) - LOG.error("This usually means the volume was never " - "successfully created.") - return - - for acc in accounts: - vols = self._get_volumes_for_account(acc['accountID'], - volume['name_id']) - if vols: - sf_vol = vols[0] - break - - if sf_vol is not None: - for vp in sf_vol.get('volumePairs', []): - LOG.debug("Deleting paired volume on remote cluster...") - pair_id = vp['clusterPairID'] - for cluster in self.cluster_pairs: - if cluster['clusterPairID'] == pair_id: - params = {'volumeID': vp['remoteVolumeID']} - LOG.debug("Issue Delete request on cluster: " - "%(remote)s with params: %(parameters)s", - {'remote': cluster['endpoint']['url'], - 'parameters': params}) - self._issue_api_request('DeleteVolume', params, - endpoint=cluster['endpoint']) - self._issue_api_request('PurgeDeletedVolume', params, - endpoint=cluster['endpoint']) - - if sf_vol['status'] == 'active': - params = {'volumeID': sf_vol['volumeID']} - self._issue_api_request('DeleteVolume', params) - self._issue_api_request('PurgeDeletedVolume', params) - if volume.get('multiattach'): - self._remove_volume_from_vags(sf_vol['volumeID']) - else: - LOG.error("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "delete_volume operation!", volume['id']) - - def delete_snapshot(self, snapshot): - """Delete the specified snapshot from the SolidFire cluster.""" - sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, - snapshot['id']) - accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) - snap = None - for acct in accounts: - params = {'accountID': acct['accountID']} - sf_vol = self._get_sf_volume(snapshot['volume_id'], params) - if sf_vol: - sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) - snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), - None) - if snap: - params = {'snapshotID': snap['snapshotID']} - self._issue_api_request('DeleteSnapshot', - params, - version='6.0') - return - # Make sure it's not "old style" using clones as snaps - LOG.debug("Snapshot not found, checking old style clones.") - self.delete_volume(snapshot) - - def create_snapshot(self, snapshot): - sfaccount = self._get_sfaccount(snapshot['project_id']) - if sfaccount is None: - LOG.error("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "create_snapshot operation!", snapshot['volume_id']) - - params = {'accountID': sfaccount['accountID']} - sf_vol = self._get_sf_volume(snapshot['volume_id'], params) - - if sf_vol is None: - raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) - params = {'volumeID': sf_vol['volumeID'], - 'name': '%s%s' % (self.configuration.sf_volume_prefix, - snapshot['id'])} - return self._do_snapshot_create(params) - - @locked_source_id_operation - def create_volume_from_snapshot(self, volume, source): - """Create a volume from the specified snapshot.""" - if source.get('group_snapshot_id'): - # We're creating a volume from a snapshot that resulted from a - # consistency group snapshot. Because of the way that SolidFire - # creates cgsnaps, we have to search for the correct snapshot. - group_snapshot_id = source.get('group_snapshot_id') - snapshot_id = source.get('volume_id') - sf_name = self.configuration.sf_volume_prefix + group_snapshot_id - sf_group_snap = self._get_group_snapshot_by_name(sf_name) - return self._create_clone_from_sf_snapshot(snapshot_id, - group_snapshot_id, - sf_group_snap, - volume) - - (_data, _sfaccount, model) = self._do_clone_volume( - source['id'], - volume) - - return model - - # Consistency group helpers - def _sf_create_group_snapshot(self, name, sf_volumes): - # Group snapshot is our version of a consistency group snapshot. - vol_ids = [vol['volumeID'] for vol in sf_volumes] - params = {'name': name, - 'volumes': vol_ids} - snapshot_id = self._issue_api_request('CreateGroupSnapshot', - params, - version='7.0') - return snapshot_id['result'] - - def _group_snapshot_creator(self, gsnap_name, src_vol_ids): - # Common helper that takes in an array of OpenStack Volume UUIDs and - # creates a SolidFire group snapshot with them. - vol_names = [self.configuration.sf_volume_prefix + vol_id - for vol_id in src_vol_ids] - active_sf_vols = self._get_all_active_volumes() - target_vols = [vol for vol in active_sf_vols - if vol['name'] in vol_names] - if len(src_vol_ids) != len(target_vols): - msg = (_("Retrieved a different amount of SolidFire volumes for " - "the provided Cinder volumes. Retrieved: %(ret)s " - "Desired: %(des)s") % {"ret": len(target_vols), - "des": len(src_vol_ids)}) - raise exception.SolidFireDriverException(msg) - - result = self._sf_create_group_snapshot(gsnap_name, target_vols) - return result - - def _create_temp_group_snapshot(self, source_cg, source_vols): - # Take a temporary snapshot to create the volumes for a new - # consistency group. - gsnap_name = ("%(prefix)s%(id)s-tmp" % - {"prefix": self.configuration.sf_volume_prefix, - "id": source_cg['id']}) - vol_ids = [vol['id'] for vol in source_vols] - self._group_snapshot_creator(gsnap_name, vol_ids) - return gsnap_name - - def _list_group_snapshots(self): - result = self._issue_api_request('ListGroupSnapshots', - {}, - version='7.0') - return result['result']['groupSnapshots'] - - def _get_group_snapshot_by_name(self, name): - target_snaps = self._list_group_snapshots() - target = next((snap for snap in target_snaps - if snap['name'] == name), None) - return target - - def _delete_group_snapshot(self, gsnapid): - params = {'groupSnapshotID': gsnapid} - self._issue_api_request('DeleteGroupSnapshot', - params, - version='7.0') - - def _delete_cgsnapshot_by_name(self, snap_name): - # Common function used to find and delete a snapshot. - target = self._get_group_snapshot_by_name(snap_name) - if not target: - msg = _("Failed to find group snapshot named: %s") % snap_name - raise exception.SolidFireDriverException(msg) - self._delete_group_snapshot(target['groupSnapshotID']) - - def _find_linked_snapshot(self, target_uuid, group_snap): - # Because group snapshots name each individual snapshot the group - # snapshot name, we have to trawl through the SolidFire snapshots to - # find the SolidFire snapshot from the group that is linked with the - # SolidFire volumeID that is linked to the Cinder snapshot source - # volume. - source_vol = self._get_sf_volume(target_uuid) - target_snap = next((sn for sn in group_snap['members'] - if sn['volumeID'] == source_vol['volumeID']), None) - return target_snap - - def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid, - sf_group_snap, vol): - # Find the correct SolidFire backing snapshot. - sf_src_snap = self._find_linked_snapshot(target_uuid, - sf_group_snap) - _data, _sfaccount, model = self._do_clone_volume(src_uuid, - vol, - sf_src_snap) - model['id'] = vol['id'] - model['status'] = 'available' - return model - - def _map_sf_volumes(self, cinder_volumes, endpoint=None): - """Get a list of SolidFire volumes. - - Creates a list of SolidFire volumes based - on matching a list of cinder volume ID's, - also adds an 'cinder_id' key to match cinder. - """ - vols = self._issue_api_request( - 'ListActiveVolumes', {}, - endpoint=endpoint)['result']['volumes'] - vlist = ( - [sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in - sfvol['name']]) - for v in vlist: - v['cinder_id'] = v['name'].split( - self.configuration.sf_volume_prefix)[1] - return vlist - - # Generic Volume Groups. - def create_group(self, ctxt, group): - # SolidFire does not have the concept of volume groups. We're going to - # play along with the group song and dance. There will be a lot of - # no-ops because of this. - if vol_utils.is_group_a_cg_snapshot_type(group): - return {'status': fields.GroupStatus.AVAILABLE} - - # Blatantly ripping off this pattern from other drivers. - raise NotImplementedError() - - def create_group_from_src(self, ctxt, group, volumes, group_snapshots=None, - snapshots=None, source_group=None, - source_vols=None): - # At this point this is just a pass-through. - if vol_utils.is_group_a_cg_snapshot_type(group): - return self._create_consistencygroup_from_src( - ctxt, - group, - volumes, - group_snapshots, - snapshots, - source_group, - source_vols) - - # Default implementation handles other scenarios. - raise NotImplementedError() - - def create_group_snapshot(self, ctxt, group_snapshot, snapshots): - # This is a pass-through to the old consistency group stuff. - if vol_utils.is_group_a_cg_snapshot_type(group_snapshot): - return self._create_cgsnapshot(ctxt, group_snapshot, snapshots) - - # Default implementation handles other scenarios. - raise NotImplementedError() - - def delete_group(self, ctxt, group, volumes): - # Delete a volume group. SolidFire does not track volume groups, - # however we do need to actually remove the member volumes of the - # group. Right now only consistent volume groups are supported. - if vol_utils.is_group_a_cg_snapshot_type(group): - return self._delete_consistencygroup(ctxt, group, volumes) - - # Default implementation handles other scenarios. - raise NotImplementedError() - - def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): - # Regarding consistency groups SolidFire does not track volumes, so - # this is a no-op. In the future with replicated volume groups this - # might actually do something. - if vol_utils.is_group_a_cg_snapshot_type(group): - return self._update_consistencygroup(ctxt, - group, - add_volumes, - remove_volumes) - - # Default implementation handles other scenarios. - raise NotImplementedError() - - def _create_consistencygroup_from_src(self, ctxt, group, volumes, - cgsnapshot, snapshots, - source_cg, source_vols): - if cgsnapshot and snapshots: - sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] - sf_group_snap = self._get_group_snapshot_by_name(sf_name) - - # Go about creating volumes from provided snaps. - vol_models = [] - for vol, snap in zip(volumes, snapshots): - vol_models.append(self._create_clone_from_sf_snapshot( - snap['volume_id'], - snap['id'], - sf_group_snap, - vol)) - return ({'status': fields.GroupStatus.AVAILABLE}, - vol_models) - - elif source_cg and source_vols: - # Create temporary group snapshot. - gsnap_name = self._create_temp_group_snapshot(source_cg, - source_vols) - try: - sf_group_snap = self._get_group_snapshot_by_name(gsnap_name) - # For each temporary snapshot clone the volume. - vol_models = [] - for vol in volumes: - vol_models.append(self._create_clone_from_sf_snapshot( - vol['source_volid'], - vol['source_volid'], - sf_group_snap, - vol)) - finally: - self._delete_cgsnapshot_by_name(gsnap_name) - return {'status': fields.GroupStatus.AVAILABLE}, vol_models - - def _create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): - vol_ids = [snapshot['volume_id'] for snapshot in snapshots] - vol_names = [self.configuration.sf_volume_prefix + vol_id - for vol_id in vol_ids] - active_sf_vols = self._get_all_active_volumes() - target_vols = [vol for vol in active_sf_vols - if vol['name'] in vol_names] - if len(snapshots) != len(target_vols): - msg = (_("Retrieved a different amount of SolidFire volumes for " - "the provided Cinder snapshots. Retrieved: %(ret)s " - "Desired: %(des)s") % {"ret": len(target_vols), - "des": len(snapshots)}) - raise exception.SolidFireDriverException(msg) - snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] - self._sf_create_group_snapshot(snap_name, target_vols) - return None, None - - def _update_consistencygroup(self, context, group, - add_volumes=None, remove_volumes=None): - # Similar to create_consistencygroup, SolidFire's lack of a consistency - # group object means there is nothing to update on the cluster. - return None, None, None - - def _delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): - snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] - self._delete_cgsnapshot_by_name(snap_name) - return None, None - - def _delete_consistencygroup(self, ctxt, group, volumes): - # TODO(chris_morrell): exception handling and return correctly updated - # volume_models. - for vol in volumes: - self.delete_volume(vol) - - return None, None - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update first. - The name is a bit misleading as - the majority of the data here is cluster - data - """ - if refresh: - try: - self._update_cluster_status() - except exception.SolidFireAPIException: - pass - - return self.cluster_stats - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - sfaccount = self._get_sfaccount(volume['project_id']) - params = {'accountID': sfaccount['accountID']} - - sf_vol = self._get_sf_volume(volume['id'], params) - - if sf_vol is None: - LOG.error("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "extend_volume operation!", volume['id']) - raise exception.VolumeNotFound(volume_id=volume['id']) - - params = { - 'volumeID': sf_vol['volumeID'], - 'totalSize': int(new_size * units.Gi) - } - self._issue_api_request('ModifyVolume', - params, version='5.0') - - def _update_cluster_status(self): - """Retrieve status info for the Cluster.""" - params = {} - - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or self.__class__.__name__ - data["vendor_name"] = 'SolidFire Inc' - data["driver_version"] = self.VERSION - data["storage_protocol"] = 'iSCSI' - data['consistencygroup_support'] = True - data['consistent_group_snapshot_enabled'] = True - data['replication_enabled'] = self.replication_enabled - if self.replication_enabled: - data['replication'] = 'enabled' - data['active_cluster_mvip'] = self.active_cluster_info['mvip'] - data['reserved_percentage'] = self.configuration.reserved_percentage - data['QoS_support'] = True - - try: - results = self._issue_api_request('GetClusterCapacity', params) - except exception.SolidFireAPIException: - data['total_capacity_gb'] = 0 - data['free_capacity_gb'] = 0 - self.cluster_stats = data - return - - results = results['result']['clusterCapacity'] - free_capacity = ( - results['maxProvisionedSpace'] - results['usedSpace']) - - data['total_capacity_gb'] = ( - float(results['maxProvisionedSpace'] / units.Gi)) - - data['free_capacity_gb'] = float(free_capacity / units.Gi) - data['compression_percent'] = ( - results['compressionPercent']) - data['deduplicaton_percent'] = ( - results['deDuplicationPercent']) - data['thin_provision_percent'] = ( - results['thinProvisioningPercent']) - self.cluster_stats = data - - def initialize_connection(self, volume, connector): - """Initialize the connection and return connection info. - - Optionally checks and utilizes volume access groups. - """ - properties = self._sf_initialize_connection(volume, connector) - properties['data']['discard'] = True - return properties - - def attach_volume(self, context, volume, - instance_uuid, host_name, - mountpoint): - - sfaccount = self._get_sfaccount(volume['project_id']) - params = {'accountID': sfaccount['accountID']} - - sf_vol = self._get_sf_volume(volume['id'], params) - if sf_vol is None: - LOG.error("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "attach_volume operation!", volume['id']) - raise exception.VolumeNotFound(volume_id=volume['id']) - - attributes = sf_vol['attributes'] - attributes['attach_time'] = volume.get('attach_time', None) - attributes['attached_to'] = instance_uuid - params = { - 'volumeID': sf_vol['volumeID'], - 'attributes': attributes - } - - self._issue_api_request('ModifyVolume', params) - - def terminate_connection(self, volume, properties, force): - return self._sf_terminate_connection(volume, - properties, - force) - - def detach_volume(self, context, volume, attachment=None): - sfaccount = self._get_sfaccount(volume['project_id']) - params = {'accountID': sfaccount['accountID']} - - sf_vol = self._get_sf_volume(volume['id'], params) - if sf_vol is None: - LOG.error("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "detach_volume operation!", volume['id']) - raise exception.VolumeNotFound(volume_id=volume['id']) - - attributes = sf_vol['attributes'] - attributes['attach_time'] = None - attributes['attached_to'] = None - params = { - 'volumeID': sf_vol['volumeID'], - 'attributes': attributes - } - - self._issue_api_request('ModifyVolume', params) - - def accept_transfer(self, context, volume, - new_user, new_project): - - sfaccount = self._get_sfaccount(volume['project_id']) - params = {'accountID': sfaccount['accountID']} - sf_vol = self._get_sf_volume(volume['id'], params) - if sf_vol is None: - LOG.error("Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "accept_transfer operation!", volume['id']) - raise exception.VolumeNotFound(volume_id=volume['id']) - if new_project != volume['project_id']: - # do a create_sfaccount here as this tenant - # may not exist on the cluster yet - sfaccount = self._create_sfaccount(new_project) - - params = { - 'volumeID': sf_vol['volumeID'], - 'accountID': sfaccount['accountID'] - } - self._issue_api_request('ModifyVolume', - params, version='5.0') - - volume['project_id'] = new_project - volume['user_id'] = new_user - return self.target_driver.ensure_export(context, volume, None) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - Returns a boolean indicating whether the retype occurred. - - :param ctxt: Context - :param volume: A dictionary describing the volume to migrate - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities (Not Used). - - """ - qos = {} - attributes = {} - - sfaccount = self._get_sfaccount(volume['project_id']) - params = {'accountID': sfaccount['accountID']} - sf_vol = self._get_sf_volume(volume['id'], params) - - if sf_vol is None: - raise exception.VolumeNotFound(volume_id=volume['id']) - - attributes = sf_vol['attributes'] - attributes['retyped_at'] = timeutils.utcnow().isoformat() - params = {'volumeID': sf_vol['volumeID']} - qos = self._set_qos_by_volume_type(ctxt, new_type['id'], - volume.get('size')) - - if qos: - params['qos'] = qos - - self._issue_api_request('ModifyVolume', params) - return True - - def manage_existing(self, volume, external_ref): - """Manages an existing SolidFire Volume (import to Cinder). - - Renames the Volume to match the expected name for the volume. - Also need to consider things like QoS, Emulation, account/tenant. - """ - sfid = external_ref.get('source-id', None) - sfname = external_ref.get('name', None) - if sfid is None: - raise exception.SolidFireAPIException(_("Manage existing volume " - "requires 'source-id'.")) - - # First get the volume on the SF cluster (MUST be active) - params = {'startVolumeID': sfid, - 'limit': 1} - vols = self._issue_api_request( - 'ListActiveVolumes', params)['result']['volumes'] - - sf_ref = vols[0] - sfaccount = self._create_sfaccount(volume['project_id']) - - attributes = {} - qos = self._retrieve_qos_setting(volume) - - import_time = volume['created_at'].isoformat() - attributes = {'uuid': volume['id'], - 'is_clone': 'False', - 'os_imported_at': import_time, - 'old_name': sfname} - - params = {'name': volume['name'], - 'volumeID': sf_ref['volumeID'], - 'accountID': sfaccount['accountID'], - 'enable512e': self.configuration.sf_emulate_512, - 'attributes': attributes, - 'qos': qos} - - self._issue_api_request('ModifyVolume', - params, version='5.0') - - return self._get_model_info(sfaccount, sf_ref['volumeID']) - - def manage_existing_get_size(self, volume, external_ref): - """Return size of an existing LV for manage_existing. - - existing_ref is a dictionary of the form: - {'name': } - """ - sfid = external_ref.get('source-id', None) - if sfid is None: - raise exception.SolidFireAPIException(_("Manage existing get size " - "requires 'id'.")) - - params = {'startVolumeID': int(sfid), - 'limit': 1} - vols = self._issue_api_request( - 'ListActiveVolumes', params)['result']['volumes'] - return int(math.ceil(float(vols[0]['totalSize']) / units.Gi)) - - def unmanage(self, volume): - """Mark SolidFire Volume as unmanaged (export from Cinder).""" - sfaccount = self._get_sfaccount(volume['project_id']) - if sfaccount is None: - LOG.error("Account for Volume ID %s was not found on " - "the SolidFire Cluster while attempting " - "unmanage operation!", volume['id']) - raise exception.SolidFireAPIException(_("Failed to find account " - "for volume.")) - - params = {'accountID': sfaccount['accountID']} - sf_vol = self._get_sf_volume(volume['id'], params) - if sf_vol is None: - raise exception.VolumeNotFound(volume_id=volume['id']) - - export_time = timeutils.utcnow().isoformat() - attributes = sf_vol['attributes'] - attributes['os_exported_at'] = export_time - params = {'volumeID': int(sf_vol['volumeID']), - 'attributes': attributes} - - self._issue_api_request('ModifyVolume', - params, version='5.0') - - def _failover_volume(self, remote_vol, remote): - """Modify remote volume to R/W mode.""" - self._issue_api_request( - 'RemoveVolumePair', - {'volumeID': remote_vol['volumeID']}, - endpoint=remote['endpoint'], version='7.0') - - params = {'volumeID': remote_vol['volumeID'], - 'access': 'readWrite'} - self._issue_api_request('ModifyVolume', params, - endpoint=remote['endpoint']) - - def failover_host(self, context, volumes, secondary_id=None, groups=None): - """Failover to replication target.""" - volume_updates = [] - remote = None - - if secondary_id: - for rc in self.cluster_pairs: - if rc['mvip'] == secondary_id: - remote = rc - break - if not remote: - LOG.error("SolidFire driver received failover_host " - "but was unable to find specified replication " - "pair with id: %s.", secondary_id) - raise exception.InvalidReplicationTarget - else: - remote = self.cluster_pairs[0] - - if not remote or not self.replication_enabled: - LOG.error("SolidFire driver received failover_host " - "request, however replication is NOT " - "enabled, or there are no available " - "targets to fail-over to.") - raise exception.UnableToFailOver(reason=_("Failover requested " - "on non replicated " - "backend.")) - - remote_vols = self._map_sf_volumes(volumes, - endpoint=remote['endpoint']) - primary_vols = self._map_sf_volumes(volumes) - for v in volumes: - remote_vlist = [sfv for sfv in remote_vols - if sfv['cinder_id'] == v['id']] - - if len(remote_vlist) > 0: - remote_vol = remote_vlist[0] - self._failover_volume(remote_vol, remote) - primary_vol = [sfv for sfv in primary_vols if - sfv['cinder_id'] == v['id']][0] - if len(primary_vol['volumePairs']) > 0: - self._issue_api_request( - 'RemoveVolumePair', - {'volumeID': primary_vol['volumeID']}, - version='7.0') - iqn = remote_vol['iqn'] - volume_updates.append( - {'volume_id': v['id'], - 'updates': { - 'provider_location': ('%s %s %s' % - (remote['endpoint']['svip'], - iqn, - 0)), - 'replication_status': 'failed-over'}}) - else: - volume_updates.append({'volume_id': v['id'], - 'updates': {'status': 'error', }}) - - # FIXME(jdg): This introduces a problem for us, up until now our driver - # has been pretty much stateless and has allowed customers to run - # active/active HA c-vol services with SolidFire. The introduction of - # the active_cluster and failed_over attributes is going to break that - # but for now that's going to be the trade off of using replciation - self.active_cluster_info = remote - self.failed_over = True - return remote['mvip'], volume_updates, [] - - def freeze_backend(self, context): - """Freeze backend notification.""" - pass - - def thaw_backend(self, context): - """Thaw backend notification.""" - pass - - -class SolidFireISCSI(iscsi_driver.SanISCSITarget): - def __init__(self, *args, **kwargs): - super(SolidFireISCSI, self).__init__(*args, **kwargs) - self.sf_driver = kwargs.get('solidfire_driver') - - def __getattr__(self, attr): - if hasattr(self.sf_driver, attr): - return getattr(self.sf_driver, attr) - else: - msg = _('Attribute: %s not found.') % attr - raise NotImplementedError(msg) - - def _do_iscsi_export(self, volume): - sfaccount = self._get_sfaccount(volume['project_id']) - model_update = {} - model_update['provider_auth'] = ('CHAP %s %s' - % (sfaccount['username'], - sfaccount['targetSecret'])) - - return model_update - - def create_export(self, context, volume, volume_path): - return self._do_iscsi_export(volume) - - def ensure_export(self, context, volume, volume_path): - try: - return self._do_iscsi_export(volume) - except exception.SolidFireAPIException: - return None - - # Following are abc's that we make sure are caught and - # paid attention to. In our case we don't use them - # so just stub them out here. - def remove_export(self, context, volume): - pass - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def _sf_initialize_connection(self, volume, connector): - """Initialize the connection and return connection info. - - Optionally checks and utilizes volume access groups. - """ - if self.configuration.sf_enable_vag: - iqn = connector['initiator'] - provider_id = volume['provider_id'] - vol_id = int(provider_id.split()[0]) - - # safe_create_vag may opt to reuse vs create a vag, so we need to - # add our vol_id. - vag_id = self._safe_create_vag(iqn, vol_id) - self._add_volume_to_vag(vol_id, iqn, vag_id) - - # Continue along with default behavior - return super(SolidFireISCSI, self).initialize_connection(volume, - connector) - - def _sf_terminate_connection(self, volume, properties, force): - """Terminate the volume connection. - - Optionally remove volume from volume access group. - If the VAG is empty then the VAG is also removed. - """ - if self.configuration.sf_enable_vag: - iqn = properties['initiator'] - vag = self._get_vags_by_name(iqn) - provider_id = volume['provider_id'] - vol_id = int(provider_id.split()[0]) - - if vag and not volume['multiattach']: - # Multiattach causes problems with removing volumes from VAGs. - # Compromise solution for now is to remove multiattach volumes - # from VAGs during volume deletion. - vag = vag[0] - vag_id = vag['volumeAccessGroupID'] - if [vol_id] == vag['volumes']: - self._remove_vag(vag_id) - elif vol_id in vag['volumes']: - self._remove_volume_from_vag(vol_id, vag_id) - - return super(SolidFireISCSI, self).terminate_connection(volume, - properties, - force=force) diff --git a/cinder/volume/drivers/synology/__init__.py b/cinder/volume/drivers/synology/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/synology/synology_common.py b/cinder/volume/drivers/synology/synology_common.py deleted file mode 100644 index 67a0ae9d7..000000000 --- a/cinder/volume/drivers/synology/synology_common.py +++ /dev/null @@ -1,1309 +0,0 @@ -# Copyright (c) 2016 Synology Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import base64 -import functools -import hashlib -import json -import math -from os import urandom -from random import randint -import string - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.asymmetric import padding -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives.ciphers import algorithms -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers import modes -from cryptography.hazmat.primitives import hashes -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -import requests -from six.moves import urllib -from six import string_types - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import snapshot -from cinder.objects import volume -from cinder import utils -from cinder.volume import configuration -from cinder.volume import utils as volutils - - -cinder_opts = [ - cfg.StrOpt('synology_pool_name', - default='', - help='Volume on Synology storage to be used for creating lun.'), - cfg.PortOpt('synology_admin_port', - default=5000, - help='Management port for Synology storage.'), - cfg.StrOpt('synology_username', - default='admin', - help='Administrator of Synology storage.'), - cfg.StrOpt('synology_password', - default='', - help='Password of administrator for logging in ' - 'Synology storage.', - secret=True), - cfg.BoolOpt('synology_ssl_verify', - default=True, - help='Do certificate validation or not if ' - '$driver_use_ssl is True'), - cfg.StrOpt('synology_one_time_pass', - default=None, - help='One time password of administrator for logging in ' - 'Synology storage if OTP is enabled.', - secret=True), - cfg.StrOpt('synology_device_id', - default=None, - help='Device id for skip one time password check for ' - 'logging in Synology storage if OTP is enabled.'), -] - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.register_opts(cinder_opts, group=configuration.SHARED_CONF_GROUP) - - -class AESCipher(object): - """Encrypt with OpenSSL-compatible way""" - - SALT_MAGIC = 'Salted__' - - def __init__(self, password, key_length=32): - self._bs = 16 - self._salt = urandom(self._bs - len(self.SALT_MAGIC)) - - self._key, self._iv = self._derive_key_and_iv(password, - self._salt, - key_length, - self._bs) - - def _pad(self, s): - bs = self._bs - return s + (bs - len(s) % bs) * chr(bs - len(s) % bs) - - def _derive_key_and_iv(self, password, salt, key_length, iv_length): - d = d_i = '' - while len(d) < key_length + iv_length: - md5_str = d_i + password + salt - d_i = hashlib.md5(md5_str).digest() - d += d_i - return d[:key_length], d[key_length:key_length + iv_length] - - def encrypt(self, text): - cipher = Cipher( - algorithms.AES(self._key), - modes.CBC(self._iv), - backend = default_backend() - ) - encryptor = cipher.encryptor() - ciphertext = encryptor.update(self._pad(text)) + encryptor.finalize() - - return "%s%s%s" % (self.SALT_MAGIC, self._salt, ciphertext) - - -class Session(object): - def __init__(self, - host, - port, - username, - password, - https=False, - ssl_verify=True, - one_time_pass=None, - device_id=None): - self._proto = 'https' if https else 'http' - self._host = host - self._port = port - self._sess = 'dsm' - self._https = https - self._url_prefix = self._proto + '://' + host + ':' + str(port) - self._url = self._url_prefix + '/webapi/auth.cgi' - self._ssl_verify = ssl_verify - self._sid = None - self._did = device_id - - data = {'api': 'SYNO.API.Auth', - 'method': 'login', - 'version': 6} - - params = {'account': username, - 'passwd': password, - 'session': self._sess, - 'format': 'sid'} - - if one_time_pass: - if device_id: - params.update(device_id=device_id) - else: - params.update(otp_code=one_time_pass, - enable_device_token='yes') - - if not https: - params = self._encrypt_params(params) - - data.update(params) - - resp = requests.post(self._url, - data=data, - verify=self._ssl_verify) - result = resp.json() - - if result and result['success']: - self._sid = result['data']['sid'] - if one_time_pass and not device_id: - self._did = result['data']['did'] - else: - raise exception.SynoAuthError(reason=_('Login failed.')) - - def _random_AES_passphrase(self, length): - available = ('0123456789' - 'abcdefghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - '~!@#$%^&*()_+-/') - key = '' - - while length > 0: - key += available[randint(0, len(available) - 1)] - length -= 1 - - return key - - def _get_enc_info(self): - url = self.url_prefix() + '/webapi/encryption.cgi' - data = {"api": "SYNO.API.Encryption", - "method": "getinfo", - "version": 1, - "format": "module"} - - resp = requests.post(url, data=data, verify=self._ssl_verify) - result = resp.json() - - return result["data"] - - def _encrypt_RSA(self, modulus, passphrase, text): - key = rsa.generate_private_key( - key_size = modulus, - public_exponent = passphrase, - backend = default_backend() - ) - public_key = key.public_key() - - ciphertext = public_key.encrypt( - text, - padding.PKCS1v15( - mgf = padding.PKCS1v15(algorithm = hashes.SHA1()), - algorithm = hashes.SHA1() - ) - ) - - return ciphertext - - def _encrypt_AES(self, passphrase, text): - cipher = AESCipher(passphrase) - - return cipher.encrypt(text) - - def _encrypt_params(self, params): - enc_info = self._get_enc_info() - public_key = enc_info["public_key"] - cipher_key = enc_info["cipherkey"] - cipher_token = enc_info["ciphertoken"] - server_time = enc_info["server_time"] - random_passphrase = self._random_AES_passphrase(501) - - params[cipher_token] = server_time - - encrypted_passphrase = self._encrypt_RSA(string.atol(public_key, 16), - string.atol("10001", 16), - random_passphrase) - - encrypted_params = self._encrypt_AES(random_passphrase, - urllib.parse.urlencode(params)) - - enc_params = {"rsa": base64.b64encode(encrypted_passphrase), - "aes": base64.b64encode(encrypted_params)} - - return {cipher_key: json.dumps(enc_params)} - - def sid(self): - return self._sid - - def did(self): - return self._did - - def url_prefix(self): - return self._url_prefix - - def query(self, api): - url = self._url_prefix + '/webapi/query.cgi' - data = {'api': 'SYNO.API.Info', - 'version': 1, - 'method': 'query', - 'query': api} - - resp = requests.post(url, - data=data, - verify=self._ssl_verify) - result = resp.json() - - if 'success' in result and result['success']: - return result['data'][api] - else: - return None - - def __del__(self): - if not hasattr(self, '_sid'): - return - - data = {'api': 'SYNO.API.Auth', - 'version': 1, - 'method': 'logout', - 'session': self._sess, - '_sid': self._sid} - - requests.post(self._url, data=data, verify=self._ssl_verify) - - -def _connection_checker(func): - """Decorator to check session has expired or not.""" - @functools.wraps(func) - def inner_connection_checker(self, *args, **kwargs): - LOG.debug('in _connection_checker') - for attempts in range(2): - try: - return func(self, *args, **kwargs) - except exception.SynoAuthError as e: - if attempts < 1: - LOG.debug('Session might have expired.' - ' Trying to relogin') - self.new_session() - continue - else: - LOG.error('Try to renew session: [%s]', e) - raise - return inner_connection_checker - - -class APIRequest(object): - def __init__(self, - host, - port, - username, - password, - https=False, - ssl_verify=True, - one_time_pass=None, - device_id=None): - self._host = host - self._port = port - self._username = username - self._password = password - self._https = https - self._ssl_verify = ssl_verify - self._one_time_pass = one_time_pass - self._device_id = device_id - - self.new_session() - - def new_session(self): - self.__session = Session(self._host, - self._port, - self._username, - self._password, - self._https, - self._ssl_verify, - self._one_time_pass, - self._device_id) - if not self._device_id: - self._device_id = self.__session.did() - - def _start(self, api, version): - apiInfo = self.__session.query(api) - self._jsonFormat = apiInfo['requestFormat'] == 'JSON' - if (apiInfo and (apiInfo['minVersion'] <= version) - and (apiInfo['maxVersion'] >= version)): - return apiInfo['path'] - else: - raise exception.APIException(service=api) - - def _encode_param(self, params): - # Json encode - if self._jsonFormat: - for key, value in params.items(): - params[key] = json.dumps(value) - # url encode - return urllib.parse.urlencode(params) - - @utils.synchronized('Synology') - @_connection_checker - def request(self, api, method, version, **params): - cgi_path = self._start(api, version) - s = self.__session - url = s.url_prefix() + '/webapi/' + cgi_path - data = {'api': api, - 'version': version, - 'method': method, - '_sid': s.sid() - } - - data.update(params) - - LOG.debug('[%s]', url) - LOG.debug('%s', json.dumps(data, indent=4)) - - # Send HTTP Post Request - resp = requests.post(url, - data=self._encode_param(data), - verify=self._ssl_verify) - - http_status = resp.status_code - result = resp.json() - - LOG.debug('%s', json.dumps(result, indent=4)) - - # Check for status code - if (200 != http_status): - result = {'http_status': http_status} - elif 'success' not in result: - reason = _("'success' not found") - raise exception.MalformedResponse(cmd=json.dumps(data, indent=4), - reason=reason) - - if ('error' in result and 'code' in result["error"] - and result['error']['code'] == 105): - raise exception.SynoAuthError(reason=_('Session might have ' - 'expired.')) - - return result - - -class SynoCommon(object): - """Manage Cinder volumes on Synology storage""" - - TARGET_NAME_PREFIX = 'Cinder-Target-' - CINDER_LUN = 'CINDER' - METADATA_DS_SNAPSHOT_UUID = 'ds_snapshot_UUID' - - def __init__(self, config, driver_type): - if not config.safe_get('iscsi_ip_address'): - raise exception.InvalidConfigurationValue( - option='iscsi_ip_address', - value='') - if not config.safe_get('synology_pool_name'): - raise exception.InvalidConfigurationValue( - option='synology_pool_name', - value='') - - self.config = config - self.vendor_name = 'Synology' - self.driver_type = driver_type - self.volume_backend_name = self._get_backend_name() - self.iscsi_port = self.config.safe_get('iscsi_port') - - api = APIRequest(self.config.iscsi_ip_address, - self.config.synology_admin_port, - self.config.synology_username, - self.config.synology_password, - self.config.safe_get('driver_use_ssl'), - self.config.safe_get('synology_ssl_verify'), - self.config.safe_get('synology_one_time_pass'), - self.config.safe_get('synology_device_id'),) - self.synoexec = api.request - self.host_uuid = self._get_node_uuid() - - def _get_node_uuid(self): - try: - out = self.exec_webapi('SYNO.Core.ISCSI.Node', - 'list', - 1) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_node_uuid.') - - if (not self.check_value_valid(out, ['data', 'nodes'], list) - or 0 >= len(out['data']['nodes']) - or not self.check_value_valid(out['data']['nodes'][0], - ['uuid'], - string_types)): - msg = _('Failed to _get_node_uuid.') - raise exception.VolumeDriverException(message=msg) - - return out['data']['nodes'][0]['uuid'] - - def _get_pool_info(self): - pool_name = self.config.synology_pool_name - if not pool_name: - raise exception.InvalidConfigurationValue(option='pool_name', - value='') - try: - out = self.exec_webapi('SYNO.Core.Storage.Volume', - 'get', - 1, - volume_path='/' + pool_name) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_pool_status.') - - if not self.check_value_valid(out, ['data', 'volume'], object): - raise exception.MalformedResponse(cmd='_get_pool_info', - reason=_('no data found')) - - return out['data']['volume'] - - def _get_pool_size(self): - info = self._get_pool_info() - - if 'size_free_byte' not in info or 'size_total_byte' not in info: - raise exception.MalformedResponse(cmd='_get_pool_size', - reason=_('size not found')) - - free_capacity_gb = int(int(info['size_free_byte']) / units.Gi) - total_capacity_gb = int(int(info['size_total_byte']) / units.Gi) - other_user_data_gb = int(math.ceil((float(info['size_total_byte']) - - float(info['size_free_byte']) - - float(info['eppool_used_byte'])) / - units.Gi)) - - return free_capacity_gb, total_capacity_gb, other_user_data_gb - - def _get_pool_lun_provisioned_size(self): - pool_name = self.config.synology_pool_name - if not pool_name: - raise exception.InvalidConfigurationValue(option='pool_name', - value=pool_name) - try: - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'list', - 1, - location='/' + pool_name) - - self.check_response(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_pool_lun_provisioned_size.') - - if not self.check_value_valid(out, ['data', 'luns'], list): - raise exception.MalformedResponse( - cmd='_get_pool_lun_provisioned_size', - reason=_('no data found')) - - size = 0 - for lun in out['data']['luns']: - size += lun['size'] - - return int(math.ceil(float(size) / units.Gi)) - - def _get_lun_info(self, lun_name, additional=None): - if not lun_name: - err = _('Param [lun_name] is invalid.') - raise exception.InvalidParameterValue(err=err) - - params = {'uuid': lun_name} - if additional is not None: - params['additional'] = additional - - try: - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'get', - 1, - **params) - - self.check_response(out, uuid=lun_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_lun_info. [%s]', lun_name) - - if not self.check_value_valid(out, ['data', 'lun'], object): - raise exception.MalformedResponse(cmd='_get_lun_info', - reason=_('lun info not found')) - - return out['data']['lun'] - - def _get_lun_uuid(self, lun_name): - if not lun_name: - err = _('Param [lun_name] is invalid.') - raise exception.InvalidParameterValue(err=err) - - try: - lun_info = self._get_lun_info(lun_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_lun_uuid. [%s]', lun_name) - - if not self.check_value_valid(lun_info, ['uuid'], string_types): - raise exception.MalformedResponse(cmd='_get_lun_uuid', - reason=_('uuid not found')) - - return lun_info['uuid'] - - def _get_lun_status(self, lun_name): - if not lun_name: - err = _('Param [lun_name] is invalid.') - raise exception.InvalidParameterValue(err=err) - - try: - lun_info = self._get_lun_info(lun_name, - ['status', 'is_action_locked']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_lun_status. [%s]', lun_name) - - if not self.check_value_valid(lun_info, ['status'], string_types): - raise exception.MalformedResponse(cmd='_get_lun_status', - reason=_('status not found')) - if not self.check_value_valid(lun_info, ['is_action_locked'], bool): - raise exception.MalformedResponse(cmd='_get_lun_status', - reason=_('action_locked ' - 'not found')) - - return lun_info['status'], lun_info['is_action_locked'] - - def _get_snapshot_info(self, snapshot_uuid, additional=None): - if not snapshot_uuid: - err = _('Param [snapshot_uuid] is invalid.') - raise exception.InvalidParameterValue(err=err) - - params = {'snapshot_uuid': snapshot_uuid} - if additional is not None: - params['additional'] = additional - - try: - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'get_snapshot', - 1, - **params) - - self.check_response(out, snapshot_id=snapshot_uuid) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_snapshot_info. [%s]', - snapshot_uuid) - - if not self.check_value_valid(out, ['data', 'snapshot'], object): - raise exception.MalformedResponse(cmd='_get_snapshot_info', - reason=_('snapshot info not ' - 'found')) - - return out['data']['snapshot'] - - def _get_snapshot_status(self, snapshot_uuid): - if not snapshot_uuid: - err = _('Param [snapshot_uuid] is invalid.') - raise exception.InvalidParameterValue(err=err) - - try: - snapshot_info = self._get_snapshot_info(snapshot_uuid, - ['status', - 'is_action_locked']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _get_snapshot_info. [%s]', - snapshot_uuid) - - if not self.check_value_valid(snapshot_info, ['status'], string_types): - raise exception.MalformedResponse(cmd='_get_snapshot_status', - reason=_('status not found')) - if not self.check_value_valid(snapshot_info, - ['is_action_locked'], - bool): - raise exception.MalformedResponse(cmd='_get_snapshot_status', - reason=_('action_locked ' - 'not found')) - - return snapshot_info['status'], snapshot_info['is_action_locked'] - - def _get_metadata_value(self, obj, key): - if key not in obj['metadata']: - if isinstance(obj, volume.Volume): - raise exception.VolumeMetadataNotFound( - volume_id=obj['id'], - metadata_key=key) - elif isinstance(obj, snapshot.Snapshot): - raise exception.SnapshotMetadataNotFound( - snapshot_id=obj['id'], - metadata_key=key) - else: - raise exception.MetadataAbsent() - - return obj['metadata'][key] - - def _get_backend_name(self): - return self.config.safe_get('volume_backend_name') or 'Synology' - - def _target_create(self, identifier): - if not identifier: - err = _('Param [identifier] is invalid.') - raise exception.InvalidParameterValue(err=err) - - # 0 for no auth, 1 for single chap, 2 for mutual chap - auth_type = 0 - chap_username = '' - chap_password = '' - provider_auth = '' - if self.config.safe_get('use_chap_auth') and self.config.use_chap_auth: - auth_type = 1 - chap_username = (self.config.safe_get('chap_username') or - volutils.generate_username(12)) - chap_password = (self.config.safe_get('chap_password') or - volutils.generate_password()) - provider_auth = ' '.join(('CHAP', chap_username, chap_password)) - - trg_prefix = self.config.safe_get('iscsi_target_prefix') - trg_name = (self.TARGET_NAME_PREFIX + '%s') % identifier - iqn = trg_prefix + trg_name - - try: - out = self.exec_webapi('SYNO.Core.ISCSI.Target', - 'create', - 1, - name=trg_name, - iqn=iqn, - auth_type=auth_type, - user=chap_username, - password=chap_password, - max_sessions=0) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _target_create. [%s]', - identifier) - - if not self.check_value_valid(out, ['data', 'target_id']): - msg = _('Failed to get target_id of target [%s]') % trg_name - raise exception.VolumeDriverException(message=msg) - - trg_id = out['data']['target_id'] - - return iqn, trg_id, provider_auth - - def _target_delete(self, trg_id): - if 0 > trg_id: - err = _('trg_id is invalid: %d.') % trg_id - raise exception.InvalidParameterValue(err=err) - - try: - out = self.exec_webapi('SYNO.Core.ISCSI.Target', - 'delete', - 1, - target_id=('%d' % trg_id)) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _target_delete. [%d]', trg_id) - - # is_map True for map, False for ummap - def _lun_map_unmap_target(self, volume_name, is_map, trg_id): - if 0 > trg_id: - err = _('trg_id is invalid: %d.') % trg_id - raise exception.InvalidParameterValue(err=err) - - try: - lun_uuid = self._get_lun_uuid(volume_name) - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'map_target' if is_map else 'unmap_target', - 1, - uuid=lun_uuid, - target_ids=['%d' % trg_id]) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _lun_map_unmap_target. ' - '[%(action)s][%(vol)s].', - {'action': ('map_target' if is_map - else 'unmap_target'), - 'vol': volume_name}) - - def _lun_map_target(self, volume_name, trg_id): - self._lun_map_unmap_target(volume_name, True, trg_id) - - def _lun_unmap_target(self, volume_name, trg_id): - self._lun_map_unmap_target(volume_name, False, trg_id) - - def _modify_lun_name(self, name, new_name): - try: - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'set', - 1, - uuid=name, - new_name=new_name) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _modify_lun_name [%s].', name) - - def _check_lun_status_normal(self, volume_name): - status = '' - try: - while True: - status, locked = self._get_lun_status(volume_name) - if not locked: - break - eventlet.sleep(2) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to get lun status. [%s]', - volume_name) - - LOG.debug('Lun [%(vol)s], status [%(status)s].', - {'vol': volume_name, - 'status': status}) - return status == 'normal' - - def _check_snapshot_status_healthy(self, snapshot_uuid): - status = '' - try: - while True: - status, locked = self._get_snapshot_status(snapshot_uuid) - if not locked: - break - eventlet.sleep(2) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to get snapshot status. [%s]', - snapshot_uuid) - - LOG.debug('Lun [%(snapshot)s], status [%(status)s].', - {'snapshot': snapshot_uuid, - 'status': status}) - return status == 'Healthy' - - def _check_storage_response(self, out, **kwargs): - data = 'internal error' - exc = exception.VolumeBackendAPIException(data=data) - message = 'Internal error' - - return (message, exc) - - def _check_iscsi_response(self, out, **kwargs): - LUN_BAD_LUN_UUID = 18990505 - LUN_NO_SUCH_SNAPSHOT = 18990532 - - if not self.check_value_valid(out, ['error', 'code'], int): - raise exception.MalformedResponse(cmd='_check_iscsi_response', - reason=_('no error code found')) - - code = out['error']['code'] - exc = None - message = '' - - if code == LUN_BAD_LUN_UUID: - exc = exception.SynoLUNNotExist(**kwargs) - message = 'Bad LUN UUID' - elif code == LUN_NO_SUCH_SNAPSHOT: - exc = exception.SnapshotNotFound(**kwargs) - message = 'No such snapshot' - else: - data = 'internal error' - exc = exception.VolumeBackendAPIException(data=data) - message = 'Internal error' - - message = '%s [%d]' % (message, code) - - return (message, exc) - - def _check_ds_pool_status(self): - pool_info = self._get_pool_info() - if not self.check_value_valid(pool_info, ['readonly'], bool): - raise exception.MalformedResponse(cmd='_check_ds_pool_status', - reason=_('no readonly found')) - - if pool_info['readonly']: - message = (_('pool [%s] is not writable') % - self.config.synology_pool_name) - raise exception.VolumeDriverException(message=message) - - def _check_ds_version(self): - try: - out = self.exec_webapi('SYNO.Core.System', - 'info', - 1, - type='firmware') - - self.check_response(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _check_ds_version') - - if not self.check_value_valid(out, - ['data', 'firmware_ver'], - string_types): - raise exception.MalformedResponse(cmd='_check_ds_version', - reason=_('data not found')) - firmware_version = out['data']['firmware_ver'] - - # e.g. 'DSM 6.1-7610', 'DSM 6.0.1-7370', 'DSM 6.0-7321 update 3' - version = firmware_version.split()[1].split('-')[0] - versions = version.split('.') - major, minor, hotfix = (versions[0], - versions[1], - versions[2] if len(versions) is 3 else '0') - - major, minor, hotfix = (int(major), int(minor), int(hotfix)) - - if (6 > major) or (major is 6 and minor is 0 and hotfix < 2): - m = (_('DS version %s is not supperted') % - firmware_version) - raise exception.VolumeDriverException(message=m) - - def _check_ds_ability(self): - try: - out = self.exec_webapi('SYNO.Core.System', - 'info', - 1, - type='define') - - self.check_response(out) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _check_ds_ability') - - if not self.check_value_valid(out, ['data'], dict): - raise exception.MalformedResponse(cmd='_check_ds_ability', - reason=_('data not found')) - define = out['data'] - - if 'usbstation' in define and define['usbstation'] == 'yes': - m = _('usbstation is not supported') - raise exception.VolumeDriverException(message=m) - - if ('support_storage_mgr' not in define - or define['support_storage_mgr'] != 'yes'): - m = _('Storage Manager is not supported in DS') - raise exception.VolumeDriverException(message=m) - - if ('support_iscsi_target' not in define - or define['support_iscsi_target'] != 'yes'): - m = _('iSCSI target feature is not supported in DS') - raise exception.VolumeDriverException(message=m) - - if ('support_vaai' not in define - or define['support_vaai'] != 'yes'): - m = _('VAAI feature is not supported in DS') - raise exception.VolumeDriverException(message=m) - - if ('supportsnapshot' not in define - or define['supportsnapshot'] != 'yes'): - m = _('Snapshot feature is not supported in DS') - raise exception.VolumeDriverException(message=m) - - def check_response(self, out, **kwargs): - if out['success']: - return - - data = 'internal error' - exc = exception.VolumeBackendAPIException(data=data) - message = 'Internal error' - - api = out['api_info']['api'] - - if (api.startswith('SYNO.Core.ISCSI.')): - message, exc = self._check_iscsi_response(out, **kwargs) - elif (api.startswith('SYNO.Core.Storage.')): - message, exc = self._check_storage_response(out, **kwargs) - - LOG.exception('%(message)s', {'message': message}) - - raise exc - - def exec_webapi(self, api, method, version, **kwargs): - result = self.synoexec(api, method, version, **kwargs) - - if 'http_status' in result and 200 != result['http_status']: - raise exception.SynoAPIHTTPError(code=result['http_status']) - - result['api_info'] = {'api': api, - 'method': method, - 'version': version} - return result - - def check_value_valid(self, obj, key_array, value_type=None): - curr_obj = obj - for key in key_array: - if key not in curr_obj: - LOG.error('key [%(key)s] is not in %(obj)s', - {'key': key, - 'obj': curr_obj}) - return False - curr_obj = curr_obj[key] - - if value_type and not isinstance(curr_obj, value_type): - LOG.error('[%(obj)s] is %(type)s, not %(value_type)s', - {'obj': curr_obj, - 'type': type(curr_obj), - 'value_type': value_type}) - return False - - return True - - def get_ip(self): - return self.config.iscsi_ip_address - - def get_provider_location(self, iqn, trg_id): - portals = ['%(ip)s:%(port)d' % {'ip': self.get_ip(), - 'port': self.iscsi_port}] - sec_ips = self.config.safe_get('iscsi_secondary_ip_addresses') - for ip in sec_ips: - portals.append('%(ip)s:%(port)d' % - {'ip': ip, - 'port': self.iscsi_port}) - - return '%s,%d %s 0' % ( - ';'.join(portals), - trg_id, - iqn) - - def is_lun_mapped(self, lun_name): - if not lun_name: - err = _('Param [lun_name] is invalid.') - raise exception.InvalidParameterValue(err=err) - - try: - lun_info = self._get_lun_info(lun_name, ['is_mapped']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to _is_lun_mapped. [%s]', lun_name) - - if not self.check_value_valid(lun_info, ['is_mapped'], bool): - raise exception.MalformedResponse(cmd='_is_lun_mapped', - reason=_('is_mapped not found')) - - return lun_info['is_mapped'] - - def check_for_setup_error(self): - self._check_ds_pool_status() - self._check_ds_version() - self._check_ds_ability() - - def update_volume_stats(self): - """Update volume statistics. - - Three kinds of data are stored on the Synology backend pool: - 1. Thin volumes (LUNs on the pool), - 2. Thick volumes (LUNs on the pool), - 3. Other user data. - - other_user_data_gb is the size of the 3rd one. - lun_provisioned_gb is the summation of all thin/thick volume - provisioned size. - - Only thin type is available for Cinder volumes. - """ - - free_gb, total_gb, other_user_data_gb = self._get_pool_size() - lun_provisioned_gb = self._get_pool_lun_provisioned_size() - - data = {} - data['volume_backend_name'] = self.volume_backend_name - data['vendor_name'] = self.vendor_name - data['storage_protocol'] = self.config.iscsi_protocol - data['consistencygroup_support'] = False - data['QoS_support'] = False - data['thin_provisioning_support'] = True - data['thick_provisioning_support'] = False - data['reserved_percentage'] = self.config.reserved_percentage - - data['free_capacity_gb'] = free_gb - data['total_capacity_gb'] = total_gb - data['provisioned_capacity_gb'] = (lun_provisioned_gb + - other_user_data_gb) - data['max_over_subscription_ratio'] = (self.config. - max_over_subscription_ratio) - - data['iscsi_ip_address'] = self.config.iscsi_ip_address - data['pool_name'] = self.config.synology_pool_name - data['backend_info'] = ('%s:%s:%s' % - (self.vendor_name, - self.driver_type, - self.host_uuid)) - - return data - - def create_volume(self, volume): - try: - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'create', - 1, - name=volume['name'], - type=self.CINDER_LUN, - location=('/' + - self.config.synology_pool_name), - size=volume['size'] * units.Gi) - - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to create_volume. [%s]', - volume['name']) - - if not self._check_lun_status_normal(volume['name']): - message = _('Lun [%s] status is not normal') % volume['name'] - raise exception.VolumeDriverException(message=message) - - def delete_volume(self, volume): - try: - lun_uuid = self._get_lun_uuid(volume['name']) - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'delete', - 1, - uuid=lun_uuid) - - self.check_response(out) - - except exception.SynoLUNNotExist: - LOG.warning('LUN does not exist') - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to delete_volume. [%s]', - volume['name']) - - def create_cloned_volume(self, volume, src_vref): - try: - src_lun_uuid = self._get_lun_uuid(src_vref['name']) - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'clone', - 1, - src_lun_uuid=src_lun_uuid, - dst_lun_name=volume['name'], - is_same_pool=True, - clone_type='CINDER') - self.check_response(out) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to create_cloned_volume. [%s]', - volume['name']) - - if not self._check_lun_status_normal(volume['name']): - message = _('Lun [%s] status is not normal.') % volume['name'] - raise exception.VolumeDriverException(message=message) - - if src_vref['size'] < volume['size']: - self.extend_volume(volume, volume['size']) - - def extend_volume(self, volume, new_size): - try: - lun_uuid = self._get_lun_uuid(volume['name']) - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'set', - 1, - uuid=lun_uuid, - new_size=new_size * units.Gi) - - self.check_response(out) - - except Exception as e: - LOG.exception('Failed to extend_volume. [%s]', - volume['name']) - raise exception.ExtendVolumeError(reason=e.msg) - - def update_migrated_volume(self, volume, new_volume): - try: - self._modify_lun_name(new_volume['name'], volume['name']) - except Exception: - reason = _('Failed to _modify_lun_name [%s].') % new_volume['name'] - raise exception.VolumeMigrationFailed(reason=reason) - - return {'_name_id': None} - - def create_snapshot(self, snapshot): - desc = '(Cinder) ' + (snapshot['id'] or '') - - try: - resp = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'take_snapshot', - 1, - src_lun_uuid=snapshot['volume']['name'], - is_app_consistent=False, - is_locked=False, - taken_by='Cinder', - description=desc) - - self.check_response(resp) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to create_snapshot. [%s]', - snapshot['volume']['name']) - - if not self.check_value_valid(resp, - ['data', 'snapshot_uuid'], - string_types): - raise exception.MalformedResponse(cmd='create_snapshot', - reason=_('uuid not found')) - - snapshot_uuid = resp['data']['snapshot_uuid'] - if not self._check_snapshot_status_healthy(snapshot_uuid): - message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status ' - 'is not healthy.') % - {'vol': snapshot['volume']['name'], - 'snapshot': snapshot_uuid}) - raise exception.VolumeDriverException(message=message) - - metadata = snapshot['metadata'] - metadata.update({ - self.METADATA_DS_SNAPSHOT_UUID: snapshot_uuid - }) - - return {'metadata': metadata} - - def delete_snapshot(self, snapshot): - try: - ds_snapshot_uuid = (self._get_metadata_value - (snapshot, self.METADATA_DS_SNAPSHOT_UUID)) - - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'delete_snapshot', - 1, - snapshot_uuid=ds_snapshot_uuid, - deleted_by='Cinder') - - self.check_response(out, snapshot_id=snapshot['id']) - - except (exception.SnapshotNotFound, - exception.SnapshotMetadataNotFound): - return - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to delete_snapshot. [%s]', - snapshot['id']) - - def create_volume_from_snapshot(self, volume, snapshot): - try: - ds_snapshot_uuid = (self._get_metadata_value - (snapshot, self.METADATA_DS_SNAPSHOT_UUID)) - - out = self.exec_webapi('SYNO.Core.ISCSI.LUN', - 'clone_snapshot', - 1, - src_lun_uuid=snapshot['volume']['name'], - snapshot_uuid=ds_snapshot_uuid, - cloned_lun_name=volume['name'], - clone_type='CINDER') - - self.check_response(out) - - except exception.SnapshotMetadataNotFound: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to get snapshot UUID. [%s]', - snapshot['id']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to create_volume_from_snapshot. [%s]', - snapshot['id']) - - if not self._check_lun_status_normal(volume['name']): - message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status ' - 'is not healthy.') % - {'vol': snapshot['volume']['name'], - 'snapshot': ds_snapshot_uuid}) - raise exception.VolumeDriverException(message=message) - - if snapshot['volume_size'] < volume['size']: - self.extend_volume(volume, volume['size']) - - def get_iqn_and_trgid(self, location): - if not location: - err = _('Param [location] is invalid.') - raise exception.InvalidParameterValue(err=err) - - result = location.split(' ') - if len(result) < 2: - raise exception.InvalidInput(reason=location) - - data = result[0].split(',') - if len(data) < 2: - raise exception.InvalidInput(reason=location) - - iqn = result[1] - trg_id = data[1] - - return iqn, int(trg_id, 10) - - def get_iscsi_properties(self, volume): - if not volume['provider_location']: - err = _("Param volume['provider_location'] is invalid.") - raise exception.InvalidParameterValue(err=err) - - iqn, trg_id = self.get_iqn_and_trgid(volume['provider_location']) - - iscsi_properties = { - 'target_discovered': False, - 'target_iqn': iqn, - 'target_portal': '%(ip)s:%(port)d' % {'ip': self.get_ip(), - 'port': self.iscsi_port}, - 'volume_id': volume['id'], - 'access_mode': 'rw', - 'discard': False - } - ips = self.config.safe_get('iscsi_secondary_ip_addresses') - if ips: - target_portals = [iscsi_properties['target_portal']] - for ip in ips: - target_portals.append('%(ip)s:%(port)d' % - {'ip': ip, - 'port': self.iscsi_port}) - iscsi_properties.update(target_portals=target_portals) - count = len(target_portals) - iscsi_properties.update(target_iqns= - [iscsi_properties['target_iqn']] * count) - iscsi_properties.update(target_lun=0) - iscsi_properties.update(target_luns= - [iscsi_properties['target_lun']] * count) - - if 'provider_auth' in volume: - auth = volume['provider_auth'] - if auth: - try: - (auth_method, auth_username, auth_password) = auth.split() - iscsi_properties['auth_method'] = auth_method - iscsi_properties['auth_username'] = auth_username - iscsi_properties['auth_password'] = auth_password - except Exception: - LOG.error('Invalid provider_auth: %s', auth) - - return iscsi_properties - - def create_iscsi_export(self, volume_name, identifier): - iqn, trg_id, provider_auth = self._target_create(identifier) - self._lun_map_target(volume_name, trg_id) - - return iqn, trg_id, provider_auth - - def remove_iscsi_export(self, volume_name, trg_id): - self._lun_unmap_target(volume_name, trg_id) - self._target_delete(trg_id) diff --git a/cinder/volume/drivers/synology/synology_iscsi.py b/cinder/volume/drivers/synology/synology_iscsi.py deleted file mode 100644 index 0b7a5e1da..000000000 --- a/cinder/volume/drivers/synology/synology_iscsi.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) 2016 Synology Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.synology import synology_common as common - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class SynoISCSIDriver(driver.ISCSIDriver): - """OpenStack Cinder drivers for Synology storage. - - Version history: - 1.0.0 - Initial driver. Provide Cinder minimum features - """ - # ThirdPartySystems wiki page - CI_WIKI_NAME = 'Synology_DSM_CI' - VERSION = '1.0.0' - - # TODO(smcginnis) Remove driver in Queens if CI issues not addressed - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(SynoISCSIDriver, self).__init__(*args, **kwargs) - - self.common = None - self.configuration.append_config_values(common.cinder_opts) - self.stats = {} - - def do_setup(self, context): - self.common = common.SynoCommon(self.configuration, 'iscsi') - - def check_for_setup_error(self): - self.common.check_for_setup_error() - - def create_volume(self, volume): - """Creates a logical volume.""" - - self.common.create_volume(volume) - - def delete_volume(self, volume): - """Deletes a logical volume.""" - - self.common.delete_volume(volume) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - self.common.create_cloned_volume(volume, src_vref) - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - - if volume['size'] >= new_size: - LOG.error('New size is smaller than original size. ' - 'New: [%(new)d] Old: [%(old)d]', - {'new': new_size, - 'old': volume['size']}) - return - - self.common.extend_volume(volume, new_size) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - - self.common.create_volume_from_snapshot(volume, snapshot) - - def update_migrated_volume(self, ctxt, volume, new_volume, status): - """Return model update for migrated volume.""" - - return self.common.update_migrated_volume(volume, new_volume) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - - return self.common.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - - self.common.delete_snapshot(snapshot) - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - - try: - if refresh or not self.stats: - self.stats = self.common.update_volume_stats() - self.stats['driver_version'] = self.VERSION - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to get_volume_stats.') - - return self.stats - - def ensure_export(self, context, volume): - pass - - def create_export(self, context, volume, connector): - model_update = {} - - try: - if self.common.is_lun_mapped(volume['name']): - return model_update - iqn, trg_id, provider_auth = (self.common.create_iscsi_export - (volume['name'], volume['id'])) - except Exception as e: - LOG.exception('Failed to remove_export.') - raise exception.ExportFailure(reason=e) - - model_update['provider_location'] = (self.common.get_provider_location - (iqn, trg_id)) - model_update['provider_auth'] = provider_auth - - return model_update - - def remove_export(self, context, volume): - try: - if not self.common.is_lun_mapped(volume['name']): - return - except exception.SynoLUNNotExist: - LOG.warning("Volume not exist") - return - - try: - _, trg_id = (self.common.get_iqn_and_trgid - (volume['provider_location'])) - self.common.remove_iscsi_export(volume['name'], trg_id) - except Exception as e: - LOG.exception('Failed to remove_export.') - raise exception.RemoveExportException(volume=volume, - reason=e.msg) - - def initialize_connection(self, volume, connector): - LOG.debug('iSCSI initiator: %s', connector['initiator']) - - try: - iscsi_properties = self.common.get_iscsi_properties(volume) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to initialize_connection.') - - volume_type = self.configuration.safe_get('iscsi_protocol') or 'iscsi' - - return { - 'driver_volume_type': volume_type, - 'data': iscsi_properties - } - - def terminate_connection(self, volume, connector, **kwargs): - pass diff --git a/cinder/volume/drivers/tegile.py b/cinder/volume/drivers/tegile.py deleted file mode 100644 index d435dfd6d..000000000 --- a/cinder/volume/drivers/tegile.py +++ /dev/null @@ -1,662 +0,0 @@ -# Copyright (c) 2015 by Tegile Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Tegile storage. -""" - -import ast -import json -import requests - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import utils as volume_utils -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) -default_api_service = 'openstack' -TEGILE_API_PATH = 'zebi/api' -TEGILE_DEFAULT_BLOCK_SIZE = '32KB' -TEGILE_LOCAL_CONTAINER_NAME = 'Local' -DEBUG_LOGGING = False - -tegile_opts = [ - cfg.StrOpt('tegile_default_pool', - help='Create volumes in this pool'), - cfg.StrOpt('tegile_default_project', - help='Create volumes in this project')] - -CONF = cfg.CONF -CONF.register_opts(tegile_opts, group=configuration.SHARED_CONF_GROUP) - - -def debugger(func): - """Returns a wrapper that wraps func. - - The wrapper will log the entry and exit points of the function - """ - - def wrapper(*args, **kwds): - if DEBUG_LOGGING: - LOG.debug('Entering %(classname)s.%(funcname)s', - {'classname': args[0].__class__.__name__, - 'funcname': func.__name__}) - LOG.debug('Arguments: %(args)s, %(kwds)s', - {'args': args[1:], - 'kwds': kwds}) - f_result = func(*args, **kwds) - if DEBUG_LOGGING: - LOG.debug('Exiting %(classname)s.%(funcname)s', - {'classname': args[0].__class__.__name__, - 'funcname': func.__name__}) - LOG.debug('Results: %(result)s', - {'result': f_result}) - return f_result - - return wrapper - - -class TegileAPIExecutor(object): - def __init__(self, classname, hostname, username, password): - self._classname = classname - self._hostname = hostname - self._username = username - self._password = password - - @debugger - @utils.retry(exceptions=(requests.ConnectionError, requests.Timeout)) - def send_api_request(self, method, params=None, - request_type='post', - api_service=default_api_service, - fine_logging=DEBUG_LOGGING): - if params is not None: - params = json.dumps(params) - - url = 'https://%s/%s/%s/%s' % (self._hostname, - TEGILE_API_PATH, - api_service, - method) - if fine_logging: - LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' - 'url: %(url)s', {'classname': self._classname, - 'method': method, - 'url': url}) - if request_type == 'post': - if fine_logging: - LOG.debug('TegileAPIExecutor(%(classname)s) ' - 'method: %(method)s, payload: %(payload)s', - {'classname': self._classname, - 'method': method, - 'payload': params}) - req = requests.post(url, - data=params, - auth=(self._username, self._password), - verify=False) - else: - req = requests.get(url, - auth=(self._username, self._password), - verify=False) - - if fine_logging: - LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' - 'return code: %(retcode)s', - {'classname': self._classname, - 'method': method, - 'retcode': req}) - try: - response = req.json() - if fine_logging: - LOG.debug('TegileAPIExecutor(%(classname)s) ' - 'method: %(method)s, response: %(response)s', - {'classname': self._classname, - 'method': method, - 'response': response}) - except ValueError: - response = '' - req.close() - - if req.status_code != 200: - msg = _('API response: %(response)s') % {'response': response} - raise exception.TegileAPIException(msg) - - return response - - -class TegileIntelliFlashVolumeDriver(san.SanDriver): - """Tegile IntelliFlash Volume Driver.""" - - VENDOR = 'Tegile Systems Inc.' - VERSION = '1.0.0' - REQUIRED_OPTIONS = ['san_ip', 'san_login', - 'san_password', 'tegile_default_pool'] - SNAPSHOT_PREFIX = 'Manual-V-' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Tegile_Storage_CI" - - _api_executor = None - - def __init__(self, *args, **kwargs): - self._context = None - super(TegileIntelliFlashVolumeDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(tegile_opts) - self._protocol = 'iSCSI' # defaults to iscsi - hostname = getattr(self.configuration, 'san_ip') - username = getattr(self.configuration, 'san_login') - password = getattr(self.configuration, 'san_password') - self._default_pool = getattr(self.configuration, 'tegile_default_pool') - self._default_project = ( - getattr(self.configuration, 'tegile_default_project') or - 'openstack') - self._api_executor = TegileAPIExecutor(self.__class__.__name__, - hostname, - username, - password) - - @debugger - def do_setup(self, context): - super(TegileIntelliFlashVolumeDriver, self).do_setup(context) - self._context = context - self._check_ops(self.REQUIRED_OPTIONS, self.configuration) - - @debugger - def create_volume(self, volume): - pool = volume_utils.extract_host(volume['host'], level='pool', - default_pool_name=self._default_pool) - tegile_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, - 'datasetPath': '%s/%s/%s' % - (pool, - TEGILE_LOCAL_CONTAINER_NAME, - self._default_project), - 'local': 'true', - 'name': volume['name'], - 'poolName': '%s' % pool, - 'projectName': '%s' % self._default_project, - 'protocol': self._protocol, - 'thinProvision': 'true', - 'volSize': volume['size'] * units.Gi} - params = list() - params.append(tegile_volume) - params.append(True) - - self._api_executor.send_api_request(method='createVolume', - params=params) - - LOG.info("Created volume %(volname)s, volume id %(volid)s.", - {'volname': volume['name'], 'volid': volume['id']}) - - return self.get_additional_info(volume, pool, self._default_project) - - @debugger - def delete_volume(self, volume): - """Deletes a snapshot.""" - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - params.append(True) - params.append(False) - - self._api_executor.send_api_request('deleteVolume', params) - - @debugger - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - snap_name = snapshot['name'] - display_list = [getattr(snapshot, 'display_name', ''), - getattr(snapshot, 'display_description', '')] - snap_description = ':'.join(filter(None, display_list)) - # Limit to 254 characters - snap_description = snap_description[:254] - - pool, project, volume_name = self._get_pool_project_volume_name( - snapshot['volume']) - - volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, - 'datasetPath': '%s/%s/%s' % - (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project), - 'local': 'true', - 'name': volume_name, - 'poolName': '%s' % pool, - 'projectName': '%s' % project, - 'protocol': self._protocol, - 'thinProvision': 'true', - 'volSize': snapshot['volume']['size'] * units.Gi} - params = list() - params.append(volume) - params.append(snap_name) - params.append(False) - - LOG.info('Creating snapshot for volume_name=%(vol)s' - ' snap_name=%(name)s snap_description=%(desc)s', - {'vol': volume_name, - 'name': snap_name, - 'desc': snap_description}) - - self._api_executor.send_api_request('createVolumeSnapshot', params) - - @debugger - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - params = list() - pool, project, volume_name = self._get_pool_project_volume_name( - snapshot['volume']) - params.append('%s/%s/%s/%s@%s%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name, - self.SNAPSHOT_PREFIX, - snapshot['name'])) - params.append(False) - - self._api_executor.send_api_request('deleteVolumeSnapshot', params) - - @debugger - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from snapshot.""" - params = list() - pool, project, volume_name = self._get_pool_project_volume_name( - snapshot['volume']) - - params.append('%s/%s/%s/%s@%s%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name, - self.SNAPSHOT_PREFIX, - snapshot['name'])) - params.append(volume['name']) - params.append(True) - params.append(True) - - self._api_executor.send_api_request('cloneVolumeSnapshot', params) - return self.get_additional_info(volume, pool, project) - - @debugger - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - pool, project, volume_name = self._get_pool_project_volume_name( - src_vref) - data_set_path = '%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project) - source_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, - 'datasetPath': data_set_path, - 'local': 'true', - 'name': volume_name, - 'poolName': '%s' % pool, - 'projectName': '%s' % project, - 'protocol': self._protocol, - 'thinProvision': 'true', - 'volSize': src_vref['size'] * units.Gi} - - dest_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE, - 'datasetPath': data_set_path, - # clone can reside only in the source project - 'local': 'true', - 'name': volume['name'], - 'poolName': '%s' % pool, - 'projectName': '%s' % project, - 'protocol': self._protocol, - 'thinProvision': 'true', - 'volSize': volume['size'] * units.Gi} - - params = list() - params.append(source_volume) - params.append(dest_volume) - - self._api_executor.send_api_request(method='createClonedVolume', - params=params) - return self.get_additional_info(volume, pool, project) - - @debugger - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update first. - The name is a bit misleading as - the majority of the data here is cluster - data - """ - if refresh: - try: - self._update_volume_stats() - except Exception: - pass - - return self._stats - - @debugger - def _update_volume_stats(self): - """Retrieves stats info from volume group.""" - - try: - data = self._api_executor.send_api_request(method='getArrayStats', - request_type='get', - fine_logging=False) - # fixing values coming back here as String to float - data['total_capacity_gb'] = float(data.get('total_capacity_gb', 0)) - data['free_capacity_gb'] = float(data.get('free_capacity_gb', 0)) - for pool in data.get('pools', []): - pool['total_capacity_gb'] = float( - pool.get('total_capacity_gb', 0)) - pool['free_capacity_gb'] = float( - pool.get('free_capacity_gb', 0)) - pool['allocated_capacity_gb'] = float( - pool.get('allocated_capacity_gb', 0)) - - data['volume_backend_name'] = getattr(self.configuration, - 'volume_backend_name') - data['vendor_name'] = self.VENDOR - data['driver_version'] = self.VERSION - data['storage_protocol'] = self._protocol - - self._stats = data - except Exception as e: - LOG.warning('TegileIntelliFlashVolumeDriver(%(clsname)s) ' - '_update_volume_stats failed: %(error)s', - {'clsname': self.__class__.__name__, - 'error': e}) - - @debugger - def get_pool(self, volume): - """Returns pool name where volume resides. - - :param volume: The volume hosted by the driver. - :return: Name of the pool where given volume is hosted. - """ - pool = volume_utils.extract_host(volume['host'], level='pool', - default_pool_name=self._default_pool) - return pool - - @debugger - def extend_volume(self, volume, new_size): - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - vol_size = six.text_type(new_size) - params.append(vol_size) - params.append('GB') - self._api_executor.send_api_request(method='resizeVolume', - params=params) - - @debugger - def manage_existing(self, volume, existing_ref): - volume['name_id'] = existing_ref['name'] - pool, project, volume_name = self._get_pool_project_volume_name(volume) - additional_info = self.get_additional_info(volume, pool, project) - additional_info['_name_id'] = existing_ref['name'], - return additional_info - - @debugger - def manage_existing_get_size(self, volume, existing_ref): - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - existing_ref['name'])) - volume_size = self._api_executor.send_api_request( - method='getVolumeSizeinGB', - params=params) - - return volume_size - - @debugger - def _get_pool_project_volume_name(self, volume): - pool = volume_utils.extract_host(volume['host'], level='pool', - default_pool_name=self._default_pool) - try: - project = volume['metadata']['project'] - except (AttributeError, TypeError, KeyError): - project = self._default_project - - if volume['_name_id'] is not None: - volume_name = volume['_name_id'] - else: - volume_name = volume['name'] - - return pool, project, volume_name - - @debugger - def get_additional_info(self, volume, pool, project): - try: - metadata = self._get_volume_metadata(volume) - except Exception: - metadata = dict() - metadata['pool'] = pool - metadata['project'] = project - return {'metadata': metadata} - - @debugger - def _get_volume_metadata(self, volume): - volume_metadata = {} - if 'volume_metadata' in volume: - for metadata in volume['volume_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - if 'metadata' in volume: - metadata = volume['metadata'] - for key in metadata: - volume_metadata[key] = metadata[key] - return volume_metadata - - @debugger - def _check_ops(self, required_ops, configuration): - """Ensures that the options we care about are set.""" - for attr in required_ops: - if not getattr(configuration, attr, None): - raise exception.InvalidInput(reason=_('%(attr)s is not ' - 'set.') % {'attr': attr}) - - -@interface.volumedriver -class TegileISCSIDriver(TegileIntelliFlashVolumeDriver, san.SanISCSIDriver): - """Tegile ISCSI Driver.""" - - def __init__(self, *args, **kwargs): - super(TegileISCSIDriver, self).__init__(*args, **kwargs) - self._protocol = 'iSCSI' - - @debugger - def do_setup(self, context): - super(TegileISCSIDriver, self).do_setup(context) - - @debugger - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance.""" - - if getattr(self.configuration, 'use_chap_auth', False): - chap_username = getattr(self.configuration, 'chap_username', '') - chap_password = getattr(self.configuration, 'chap_password', '') - else: - chap_username = '' - chap_password = '' - - if volume['provider_location'] is None: - params = list() - pool, project, volume_name = ( - self._get_pool_project_volume_name(volume)) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - initiator_info = { - 'initiatorName': connector['initiator'], - 'chapUserName': chap_username, - 'chapSecret': chap_password - } - params.append(initiator_info) - mapping_info = self._api_executor.send_api_request( - method='getISCSIMappingForVolume', - params=params) - target_portal = mapping_info['target_portal'] - target_iqn = mapping_info['target_iqn'] - target_lun = mapping_info['target_lun'] - else: - (target_portal, target_iqn, target_lun) = ( - volume['provider_location'].split()) - - connection_data = dict() - connection_data['target_portal'] = target_portal - connection_data['target_iqn'] = target_iqn - connection_data['target_lun'] = int(target_lun) - connection_data['target_discovered'] = False, - connection_data['volume_id'] = volume['id'], - connection_data['discard'] = False - if getattr(self.configuration, 'use_chap_auth', False): - connection_data['auth_method'] = 'CHAP' - connection_data['auth_username'] = chap_username - connection_data['auth_password'] = chap_password - return { - 'driver_volume_type': 'iscsi', - 'data': connection_data - } - - @debugger - def terminate_connection(self, volume, connector, **kwargs): - pass - - @debugger - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - if getattr(self.configuration, 'use_chap_auth', False): - chap_username = getattr(self.configuration, 'chap_username', '') - chap_password = getattr(self.configuration, 'chap_password', '') - else: - chap_username = '' - chap_password = '' - - initiator_info = { - 'initiatorName': connector['initiator'], - 'chapUserName': chap_username, - 'chapSecret': chap_password - } - params.append(initiator_info) - mapping_info = self._api_executor.send_api_request( - method='getISCSIMappingForVolume', - params=params) - target_portal = mapping_info['target_portal'] - target_iqn = mapping_info['target_iqn'] - target_lun = int(mapping_info['target_lun']) - - provider_location = '%s %s %s' % (target_portal, - target_iqn, - target_lun) - if getattr(self.configuration, 'use_chap_auth', False): - provider_auth = ('CHAP %s %s' % (chap_username, - chap_password)) - else: - provider_auth = None - return ( - {'provider_location': provider_location, - 'provider_auth': provider_auth}) - - -@interface.volumedriver -class TegileFCDriver(TegileIntelliFlashVolumeDriver, - driver.FibreChannelDriver): - """Tegile FC driver.""" - - def __init__(self, *args, **kwargs): - super(TegileFCDriver, self).__init__(*args, **kwargs) - self._protocol = 'FC' - - @debugger - def do_setup(self, context): - super(TegileFCDriver, self).do_setup(context) - - @fczm_utils.add_fc_zone - @debugger - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info.""" - - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - wwpns = connector['wwpns'] - - connectors = ','.join(wwpns) - - params.append(connectors) - target_info = self._api_executor.send_api_request( - method='getFCPortsForVolume', - params=params) - initiator_target_map = target_info['initiator_target_map'] - connection_data = { - 'driver_volume_type': 'fibre_channel', - 'data': { - 'encrypted': False, - 'target_discovered': False, - 'target_lun': int(target_info['target_lun']), - 'target_wwn': ast.literal_eval(target_info['target_wwn']), - 'initiator_target_map': ast.literal_eval(initiator_target_map) - } - } - - return connection_data - - @fczm_utils.remove_fc_zone - @debugger - def terminate_connection(self, volume, connector, force=False, **kwargs): - - params = list() - pool, project, volume_name = self._get_pool_project_volume_name(volume) - params.append('%s/%s/%s/%s' % (pool, - TEGILE_LOCAL_CONTAINER_NAME, - project, - volume_name)) - wwpns = connector['wwpns'] - - connectors = ','.join(wwpns) - - params.append(connectors) - target_info = self._api_executor.send_api_request( - method='getFCPortsForVolume', - params=params) - initiator_target_map = target_info['initiator_target_map'] - - connection_data = { - 'data': { - 'target_wwn': ast.literal_eval(target_info['target_wwn']), - 'initiator_target_map': ast.literal_eval(initiator_target_map) - } - } - - return connection_data diff --git a/cinder/volume/drivers/tintri.py b/cinder/volume/drivers/tintri.py deleted file mode 100644 index 63bf80e9b..000000000 --- a/cinder/volume/drivers/tintri.py +++ /dev/null @@ -1,968 +0,0 @@ -# Copyright (c) 2015 Tintri. All rights reserved. -# Copyright (c) 2012 NetApp, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Tintri storage. -""" - -import datetime -import json -import math -import os -import re -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units -import requests -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers import nfs - -LOG = logging.getLogger(__name__) -default_api_version = 'v310' -img_prefix = 'image-' -tintri_path = '/tintri/' - - -tintri_opts = [ - cfg.StrOpt('tintri_server_hostname', - help='The hostname (or IP address) for the storage system'), - cfg.StrOpt('tintri_server_username', - help='User name for the storage system'), - cfg.StrOpt('tintri_server_password', - help='Password for the storage system', - secret=True), - cfg.StrOpt('tintri_api_version', - default=default_api_version, - help='API version for the storage system'), - cfg.IntOpt('tintri_image_cache_expiry_days', - default=30, - help='Delete unused image snapshots older than mentioned days'), - cfg.StrOpt('tintri_image_shares_config', - help='Path to image nfs shares file'), -] - -CONF = cfg.CONF -CONF.register_opts(tintri_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class TintriDriver(driver.ManageableVD, - driver.CloneableImageVD, - nfs.NfsDriver): - """Base class for Tintri driver. - - Version History - - .. code-block:: none - - 2.1.0.1 - Liberty driver - 2.2.0.1 - Mitaka driver - -- Retype - -- Image cache clean up - -- Direct image clone fix - """ - - VENDOR = 'Tintri' - VERSION = '2.2.0.1' - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Tintri_CI" - - REQUIRED_OPTIONS = ['tintri_server_hostname', 'tintri_server_username', - 'tintri_server_password'] - - def __init__(self, *args, **kwargs): - self._execute = None - self._context = None - super(TintriDriver, self).__init__(*args, **kwargs) - self._execute_as_root = True - self.configuration.append_config_values(tintri_opts) - self.cache_cleanup = False - self._mounted_image_shares = [] - - def do_setup(self, context): - self._image_shares_config = getattr(self.configuration, - 'tintri_image_shares_config') - super(TintriDriver, self).do_setup(context) - self._context = context - self._check_ops(self.REQUIRED_OPTIONS, self.configuration) - self._hostname = getattr(self.configuration, 'tintri_server_hostname') - self._username = getattr(self.configuration, 'tintri_server_username') - self._password = getattr(self.configuration, 'tintri_server_password') - self._api_version = getattr(self.configuration, 'tintri_api_version') - self._image_cache_expiry = getattr(self.configuration, - 'tintri_image_cache_expiry_days') - - def get_pool(self, volume): - """Returns pool name where volume resides. - - :param volume: The volume hosted by the driver. - :return: Name of the pool where given volume is hosted. - """ - return volume['provider_location'] - - def _get_client(self): - """Returns a Tintri REST client connection.""" - return TClient(self._hostname, self._username, self._password, - self._api_version) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - (__, path) = self._get_export_ip_path(snapshot.volume_id) - volume_path = '%s/%s' % (path, snapshot.volume_name) - volume_path = '%(path)s/%(volume_name)s' % { - 'path': path, - 'volume_name': snapshot.volume_name, - } - model_update = {} - with self._get_client() as c: - provider_id = c.create_snapshot(volume_path, - snapshot.volume.display_name or - snapshot.volume_name, - snapshot.volume_id, - snapshot.display_name or - snapshot.name) - snapshot.provider_id = provider_id - # Store Tintri snapshot ID as snapshot provider_id - model_update['provider_id'] = provider_id - - return model_update - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - if snapshot.provider_id: - with self._get_client() as c: - c.delete_snapshot(snapshot.provider_id) - else: - LOG.info('Snapshot %s not found', snapshot.name) - - def _check_ops(self, required_ops, configuration): - """Ensures that the options we care about are set.""" - for op in required_ops: - if not getattr(configuration, op): - LOG.error('Configuration value %s is not set.', op) - raise exception.InvalidConfigurationValue(option=op, - value=None) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from snapshot.""" - vol_size = volume.size - snap_size = snapshot.volume_size - - self._clone_snapshot(snapshot.provider_id, volume.name, - snapshot.volume_id) - share = self._get_provider_location(snapshot.volume_id) - volume['provider_location'] = share - path = self.local_path(volume) - - self._set_rw_permissions(path) - if vol_size != snap_size: - try: - self.extend_volume(volume, vol_size) - except Exception: - LOG.error('Resizing %s failed. Cleaning volume.', - volume.name) - self._delete_file(path) - raise - - return {'provider_location': volume['provider_location']} - - def _clone_snapshot(self, snapshot_id, clone_name, volume_id, share=None): - """Clones volume from snapshot.""" - (host, path) = self._get_export_ip_path(volume_id, share) - clone_path = '%s/%s-d' % (path, clone_name) - with self._get_client() as c: - c.clone_volume(snapshot_id, clone_path) - - self._move_cloned_volume(clone_name, volume_id, share) - - def _move_cloned_volume(self, clone_name, volume_id, share=None): - local_path = self._get_local_path(volume_id, share) - source_path = os.path.join(local_path, clone_name + '-d') - if self._is_volume_present(source_path): - source_file = os.listdir(source_path)[0] - source = os.path.join(source_path, source_file) - target = os.path.join(local_path, clone_name) - moved = self._move_file(source, target) - self._execute('rm', '-rf', source_path, - run_as_root=self._execute_as_root) - if not moved: - msg = (_('Failed to move volume %s.') % source) - raise exception.VolumeDriverException(msg) - else: - raise exception.VolumeDriverException( - _('Volume %s not found.') % source_path) - - def _clone_volume_to_volume(self, volume_name, clone_name, - volume_display_name, volume_id, - share=None, dst=None, image_id=None): - """Creates volume snapshot then clones volume.""" - (__, path) = self._get_export_ip_path(volume_id, share) - volume_path = '%s/%s' % (path, volume_name) - if dst: - (___, dst_path) = self._get_export_ip_path(None, dst) - clone_path = '%s/%s-d' % (dst_path, clone_name) - else: - clone_path = '%s/%s-d' % (path, clone_name) - with self._get_client() as c: - if share and image_id: - snapshot_id = self._create_image_snapshot(volume_name, - share, - image_id, - volume_display_name) - else: - snapshot_id = c.create_snapshot( - volume_path, volume_display_name, volume_id, volume_name, - deletion_policy='DELETE_ON_ZERO_CLONE_REFERENCES') - c.clone_volume(snapshot_id, clone_path) - - self._move_cloned_volume(clone_name, volume_id, dst or share) - - @utils.synchronized('cache_cleanup') - def _initiate_image_cache_cleanup(self): - if self.cache_cleanup: - LOG.debug('Image cache cleanup in progress.') - return - else: - self.cache_cleanup = True - timer = loopingcall.FixedIntervalLoopingCall( - self._cleanup_cache) - timer.start(interval=None) - return timer - - def _cleanup_cache(self): - LOG.debug('Cache cleanup: starting.') - try: - # Cleanup used cached image snapshots 30 days and older - t = datetime.datetime.utcnow() - datetime.timedelta( - days=self._image_cache_expiry) - date = t.strftime("%Y-%m-%dT%H:%M:%S") - with self._get_client() as c: - # Get eligible snapshots to clean - image_snaps = c.get_image_snapshots_to_date(date) - if image_snaps: - for snap in image_snaps: - uuid = snap['uuid']['uuid'] - LOG.debug( - 'Cache cleanup: deleting image snapshot %s', uuid) - try: - c.delete_snapshot(uuid) - except Exception: - LOG.exception('Unexpected exception during ' - 'cache cleanup of snapshot %s', - uuid) - else: - LOG.debug('Cache cleanup: nothing to clean') - finally: - self.cache_cleanup = False - LOG.debug('Cache cleanup: finished') - raise loopingcall.LoopingCallDone() - - def _update_volume_stats(self): - """Retrieves stats info from volume group.""" - - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.VENDOR - data['vendor_name'] = self.VENDOR - data['driver_version'] = self.get_version() - data['storage_protocol'] = self.driver_volume_type - - self._ensure_shares_mounted() - self._initiate_image_cache_cleanup() - pools = [] - for share in self._mounted_shares: - pool = dict() - capacity, free, used = self._get_capacity_info(share) - pool['pool_name'] = share - pool['total_capacity_gb'] = capacity / float(units.Gi) - pool['free_capacity_gb'] = free / float(units.Gi) - pool['reserved_percentage'] = 0 - pool['QoS_support'] = True - pools.append(pool) - data['pools'] = pools - - self._stats = data - - def _get_provider_location(self, volume_id): - """Returns provider location for given volume.""" - volume = self.db.volume_get(self._context, volume_id) - return volume.provider_location - - def _get_host_ip(self, volume_id): - """Returns IP address for the given volume.""" - return self._get_provider_location(volume_id).split(':')[0] - - def _get_export_path(self, volume_id): - """Returns NFS export path for the given volume.""" - return self._get_provider_location(volume_id).split(':')[1] - - def _resolve_hostname(self, hostname): - """Resolves host name to IP address.""" - res = socket.getaddrinfo(hostname, None)[0] - family, socktype, proto, canonname, sockaddr = res - return sockaddr[0] - - def _is_volume_present(self, volume_path): - """Checks if volume exists.""" - try: - self._execute('ls', volume_path, - run_as_root=self._execute_as_root) - except Exception: - return False - return True - - def _get_volume_path(self, nfs_share, volume_name): - """Gets local volume path for given volume name on given nfs share.""" - return os.path.join(self._get_mount_point_for_share(nfs_share), - volume_name) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - vol_size = volume.size - src_vol_size = src_vref.size - self._clone_volume_to_volume(src_vref.name, volume.name, - src_vref.display_name, - src_vref.id) - - share = self._get_provider_location(src_vref.id) - volume['provider_location'] = share - path = self.local_path(volume) - - self._set_rw_permissions(path) - if vol_size != src_vol_size: - try: - self.extend_volume(volume, vol_size) - except Exception: - LOG.error('Resizing %s failed. Cleaning volume.', - volume.name) - self._delete_file(path) - raise - - return {'provider_location': volume['provider_location']} - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetches the image from image_service and write it to the volume.""" - super(TintriDriver, self).copy_image_to_volume( - context, volume, image_service, image_id) - LOG.info('Copied image to volume %s using regular download.', - volume['name']) - self._create_image_snapshot(volume['name'], - volume['provider_location'], image_id, - img_prefix + image_id) - - def _create_image_snapshot(self, volume_name, share, image_id, image_name): - """Creates an image snapshot.""" - snapshot_name = img_prefix + image_id - LOG.info('Creating image snapshot %s', snapshot_name) - (host, path) = self._get_export_ip_path(None, share) - volume_path = '%s/%s' % (path, volume_name) - - @utils.synchronized(snapshot_name, external=True) - def _do_snapshot(): - with self._get_client() as c: - snapshot_id = c.get_snapshot(image_id) - if not snapshot_id: - snapshot_id = c.create_snapshot(volume_path, image_name, - image_id, snapshot_name) - return snapshot_id - - try: - return _do_snapshot() - except Exception as e: - LOG.warning('Exception while creating image %(image_id)s ' - 'snapshot. Exception: %(exc)s', - {'image_id': image_id, 'exc': e}) - - def _find_image_snapshot(self, image_id): - """Finds image snapshot.""" - with self._get_client() as c: - return c.get_snapshot(image_id) - - def _clone_image_snapshot(self, snapshot_id, dst, share): - """Clones volume from image snapshot.""" - file_path = self._get_volume_path(share, dst) - if not os.path.exists(file_path): - LOG.info('Cloning from snapshot to destination %s', dst) - self._clone_snapshot(snapshot_id, dst, volume_id=None, - share=share) - - def _delete_file(self, path): - """Deletes file from disk and return result as boolean.""" - try: - LOG.debug('Deleting file at path %s', path) - cmd = ['rm', '-f', path] - self._execute(*cmd, run_as_root=self._execute_as_root) - return True - except Exception as e: - LOG.warning('Exception during deleting %s', e) - return False - - def _move_file(self, source_path, dest_path): - """Moves source to destination.""" - - @utils.synchronized(dest_path, external=True) - def _do_move(src, dst): - if os.path.exists(dst): - LOG.warning('Destination %s already exists.', dst) - return False - self._execute('mv', src, dst, run_as_root=self._execute_as_root) - return True - - try: - return _do_move(source_path, dest_path) - except Exception as e: - LOG.warning('Exception moving file %(src)s. Message: %(e)s', - {'src': source_path, 'e': e}) - return False - - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Creates a volume efficiently from an existing image. - - image_location is a string whose format depends on the - image service backend in use. The driver should use it - to determine whether cloning is possible. - - Returns a dict of volume properties eg. provider_location, - boolean indicating whether cloning occurred. - """ - image_name = image_meta['name'] - image_id = image_meta['id'] - if 'properties' in image_meta: - provider_location = image_meta['properties'].get( - 'provider_location') - if provider_location: - image_location = (provider_location, None) - cloned = False - post_clone = False - try: - snapshot_id = self._find_image_snapshot(image_id) - if snapshot_id: - cloned = self._clone_from_snapshot(volume, image_id, - snapshot_id) - else: - cloned = self._direct_clone(volume, image_location, - image_id, image_name) - if cloned: - post_clone = self._post_clone_image(volume) - except Exception as e: - LOG.info('Image cloning unsuccessful for image ' - '%(image_id)s. Message: %(msg)s', - {'image_id': image_id, 'msg': e}) - vol_path = self.local_path(volume) - volume['provider_location'] = None - if os.path.exists(vol_path): - self._delete_file(vol_path) - finally: - cloned = cloned and post_clone - share = volume['provider_location'] if cloned else None - bootable = True if cloned else False - return {'provider_location': share, 'bootable': bootable}, cloned - - def _clone_from_snapshot(self, volume, image_id, snapshot_id): - """Clones a copy from image snapshot.""" - cloned = False - LOG.info('Cloning image %s from snapshot.', image_id) - for share in self._mounted_shares: - # Repeat tries in other shares if failed in some - LOG.debug('Image share: %s', share) - if (share and - self._is_share_vol_compatible(volume, share)): - try: - self._clone_image_snapshot(snapshot_id, volume['name'], - share) - cloned = True - volume['provider_location'] = share - break - except Exception: - LOG.warning('Unexpected exception during ' - 'image cloning in share %s', share) - return cloned - - def _direct_clone(self, volume, image_location, image_id, image_name): - """Clones directly in nfs share.""" - LOG.info('Checking image clone %s from glance share.', image_id) - cloned = False - image_location = self._get_image_nfs_url(image_location) - share = self._is_cloneable_share(image_location) - run_as_root = self._execute_as_root - - dst_share = None - for dst in self._mounted_shares: - if dst and self._is_share_vol_compatible(volume, dst): - dst_share = dst - LOG.debug('Image dst share: %s', dst) - break - if not dst_share: - return cloned - - LOG.debug('Share is cloneable %s', dst_share) - volume['provider_location'] = dst_share - (__, ___, img_file) = image_location.rpartition('/') - dir_path = self._get_mount_point_for_share(share) - dst_path = self._get_mount_point_for_share(dst_share) - img_path = '%s/%s' % (dir_path, img_file) - img_info = image_utils.qemu_img_info(img_path, - run_as_root=run_as_root) - if img_info.file_format == 'raw': - LOG.debug('Image is raw %s', image_id) - self._clone_volume_to_volume( - img_file, volume['name'], image_name, - volume_id=None, share=share, dst=dst_share, image_id=image_id) - cloned = True - else: - LOG.info('Image will locally be converted to raw %s', - image_id) - dst = '%s/%s' % (dst_path, volume['name']) - image_utils.convert_image(img_path, dst, 'raw', - run_as_root=run_as_root) - data = image_utils.qemu_img_info(dst, run_as_root=run_as_root) - if data.file_format != "raw": - raise exception.InvalidResults( - _('Converted to raw, but ' - 'format is now %s') % data.file_format) - else: - cloned = True - self._create_image_snapshot( - volume['name'], volume['provider_location'], - image_id, image_name) - return cloned - - def _post_clone_image(self, volume): - """Performs operations post image cloning.""" - LOG.info('Performing post clone for %s', volume['name']) - vol_path = self.local_path(volume) - self._set_rw_permissions(vol_path) - self._resize_image_file(vol_path, volume['size']) - return True - - def _resize_image_file(self, path, new_size): - """Resizes the image file on share to new size.""" - LOG.debug('Checking file for resize.') - if self._is_file_size_equal(path, new_size): - return - else: - LOG.info('Resizing file to %sG', new_size) - image_utils.resize_image(path, new_size, - run_as_root=self._execute_as_root) - if self._is_file_size_equal(path, new_size): - return - else: - raise exception.InvalidResults( - _('Resizing image file failed.')) - - def _is_cloneable_share(self, image_location): - """Finds if the image at location is cloneable.""" - conn, dr = self._check_nfs_path(image_location) - return self._is_share_in_use(conn, dr) - - def _check_nfs_path(self, image_location): - """Checks if the nfs path format is matched. - - WebNFS url format with relative-path is supported. - Accepting all characters in path-names and checking against - the mounted shares which will contain only allowed path segments. - Returns connection and dir details. - """ - conn, dr = None, None - if image_location: - nfs_loc_pattern = \ - r'^nfs://(([\w\-\.]+:[\d]+|[\w\-\.]+)(/[^/].*)*(/[^/\\\\]+))$' - matched = re.match(nfs_loc_pattern, image_location) - if not matched: - LOG.debug('Image location not in the expected format %s', - image_location) - else: - conn = matched.group(2) - dr = matched.group(3) or '/' - return conn, dr - - def _is_share_in_use(self, conn, dr): - """Checks if share is cinder mounted and returns it.""" - try: - if conn: - host = conn.split(':')[0] - ip = self._resolve_hostname(host) - for sh in self._mounted_shares + self._mounted_image_shares: - sh_ip = self._resolve_hostname(sh.split(':')[0]) - sh_exp = sh.split(':')[1] - if sh_ip == ip and sh_exp == dr: - LOG.debug('Found share match %s', sh) - return sh - except Exception: - LOG.warning('Unexpected exception while listing used share.') - - def _get_image_nfs_url(self, image_location): - """Gets direct url for nfs backend. - - It creates direct url from image_location - which is a tuple with direct_url and locations. - Returns url with nfs scheme if nfs store else returns url. - It needs to be verified by backend before use. - """ - - direct_url, locations = image_location - if not direct_url and not locations: - raise exception.NotFound(_('Image location not present.')) - - # Locations will be always a list of one until - # bp multiple-image-locations is introduced - if not locations: - return direct_url - location = locations[0] - url = location['url'] - if not location['metadata']: - return url - location_type = location['metadata'].get('type') - if not location_type or location_type.lower() != "nfs": - return url - share_location = location['metadata'].get('share_location') - mount_point = location['metadata'].get('mount_point') - if not share_location or not mount_point: - return url - url_parse = urllib.parse.urlparse(url) - abs_path = os.path.join(url_parse.netloc, url_parse.path) - rel_path = os.path.relpath(abs_path, mount_point) - direct_url = "%s/%s" % (share_location, rel_path) - return direct_url - - def _is_share_vol_compatible(self, volume, share): - """Checks if share is compatible with volume to host it.""" - return self._is_share_eligible(share, volume['size']) - - def _can_share_hold_size(self, share, size): - """Checks if volume can hold image with size.""" - _tot_size, tot_available, _tot_allocated = self._get_capacity_info( - share) - if tot_available < size: - msg = _('Container size smaller than required file size.') - raise exception.VolumeDriverException(msg) - - def _get_export_ip_path(self, volume_id=None, share=None): - """Returns export ip and path. - - One of volume id or share is used to return the values. - """ - - if volume_id: - host_ip = self._get_host_ip(volume_id) - export_path = self._get_export_path(volume_id) - elif share: - host_ip = share.split(':')[0] - export_path = share.split(':')[1] - else: - raise exception.InvalidInput( - reason=_('A volume ID or share was not specified.')) - return host_ip, export_path - - def _get_local_path(self, volume_id=None, share=None): - """Returns local path. - - One of volume id or share is used to return the values. - """ - - if volume_id: - local_path = self._get_mount_point_for_share( - self._get_provider_location(volume_id)) - elif share: - local_path = self._get_mount_point_for_share(share) - else: - raise exception.InvalidInput( - reason=_('A volume ID or share was not specified.')) - return local_path - - def manage_existing(self, volume, existing_ref): - """Brings an existing backend storage object under Cinder management. - - existing_ref is passed straight through from the API request's - manage_existing_ref value, and it is up to the driver how this should - be interpreted. It should be sufficient to identify a storage object - that the driver should somehow associate with the newly-created cinder - volume structure. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref) - - LOG.debug('Managing volume %(vol)s with ref %(ref)s', - {'vol': volume['id'], 'ref': existing_ref}) - if volume_name != volume['name']: - src = os.path.join(nfs_mount, volume_name) - dst = os.path.join(nfs_mount, volume['name']) - if not self._move_file(src, dst): - msg = (_('Failed to manage volume %s.') % - existing_ref['source-name']) - raise exception.VolumeDriverException(msg) - self._set_rw_permissions(dst) - - LOG.info('Manage volume %s', volume['name']) - return {'provider_location': nfs_share} - - def manage_existing_get_size(self, volume, existing_ref): - """Returns size of volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref) - - try: - volume_path = os.path.join(nfs_mount, volume_name) - vol_size = int(math.ceil(float(utils.get_file_size(volume_path)) / - units.Gi)) - except OSError: - msg = (_('Failed to get size of volume %s') % - existing_ref['source-name']) - raise exception.VolumeDriverException(msg) - - return vol_size - - def unmanage(self, volume): - """Removes the specified volume from Cinder management. - - Does not delete the underlying backend storage object. - - :param volume: Cinder volume to unmanage - """ - volume_path = self.local_path(volume) - LOG.info('Unmanage volume %s', volume_path) - - def _convert_volume_share(self, volume_share): - """Converts the share name to IP address.""" - share_split = volume_share.rsplit(':', 1) - return self._resolve_hostname(share_split[0]) + ':' + share_split[1] - - def _get_share_mount(self, vol_ref): - """Get the NFS share, NFS mount, and volume path from reference. - - :param vol_ref: Driver-specific information used to identify a volume - :return: NFS Share, NFS mount, volume path - """ - if 'source-name' not in vol_ref or not vol_ref['source-name']: - msg = _('Volume reference must contain source-name element.') - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, reason=msg) - - volume_share = self._convert_volume_share(vol_ref['source-name']) - for nfs_share in self._mounted_shares: - share = self._convert_volume_share(nfs_share) - (__, match, volume_name) = volume_share.partition(share) - if match == share: - volume_name = volume_name.lstrip('/') - nfs_mount = self._get_mount_point_for_share(nfs_share) - volume_path = os.path.join(nfs_mount, volume_name) - if os.path.isfile(volume_path): - LOG.debug('Found volume %(path)s on share %(share)s', - {'path': volume_path, 'share': nfs_share}) - return nfs_share, nfs_mount, volume_name - else: - LOG.debug('Volume ref %(ref)s not on share %(share)s', - {'ref': vol_ref, 'share': nfs_share}) - - raise exception.ManageExistingInvalidReference( - existing_ref=vol_ref, reason=_('Volume not found.')) - - def retype(self, context, volume, new_type, diff, host): - """Retype from one volume type to another. - - At this point Tintri VMstore does not differentiate between - volume types on the same array. This is a no-op for us. - """ - return True, None - - def _ensure_shares_mounted(self): - # Mount image shares, we do not need to store these mounts - # in _mounted_shares - mounted_image_shares = [] - if self._image_shares_config: - self._load_shares_config(self._image_shares_config) - for share in self.shares.keys(): - try: - self._ensure_share_mounted(share) - mounted_image_shares.append(share) - except Exception: - LOG.exception('Exception during mounting.') - self._mounted_image_shares = mounted_image_shares - - # Mount Cinder shares - super(TintriDriver, self)._ensure_shares_mounted() - - -class TClient(object): - """REST client for Tintri storage.""" - - def __init__(self, hostname, username, password, - api_version=default_api_version): - """Initializes a connection to Tintri server.""" - self.api_url = 'https://' + hostname + '/api' - self.api_version = api_version - self.session_id = self.login(username, password) - self.headers = {'content-type': 'application/json', - 'cookie': 'JSESSIONID=' + self.session_id} - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.logout() - - def get(self, api): - return self.get_query(api, None) - - def get_query(self, api, query): - url = self.api_url + api - - return requests.get(url, headers=self.headers, - params=query, verify=False) - - def delete(self, api): - url = self.api_url + api - - return requests.delete(url, headers=self.headers, verify=False) - - def put(self, api, payload): - url = self.api_url + api - - return requests.put(url, data=json.dumps(payload), - headers=self.headers, verify=False) - - def post(self, api, payload): - url = self.api_url + api - - return requests.post(url, data=json.dumps(payload), - headers=self.headers, verify=False) - - def login(self, username, password): - # Payload, header and URL for login - headers = {'content-type': 'application/json', - 'Tintri-Api-Client': - 'Tintri-Cinder-Driver-%s' % TintriDriver.VERSION} - payload = {'username': username, - 'password': password, - 'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.' - 'RestApiCredentials'} - url = self.api_url + '/' + self.api_version + '/session/login' - - r = requests.post(url, data=json.dumps(payload), - headers=headers, verify=False) - - if r.status_code != 200: - msg = _('Failed to login for user %s.') % username - raise exception.VolumeDriverException(msg) - - return r.cookies['JSESSIONID'] - - def logout(self): - url = self.api_url + '/' + self.api_version + '/session/logout' - - requests.get(url, headers=self.headers, verify=False) - - @staticmethod - def _remove_prefix(volume_path, prefix): - if volume_path.startswith(prefix): - return volume_path[len(prefix):] - else: - return volume_path - - def create_snapshot(self, volume_path, volume_name, volume_id, - snapshot_name, deletion_policy=None): - """Creates a volume snapshot.""" - request = {'typeId': 'com.tintri.api.rest.' + self.api_version + - '.dto.domain.beans.cinder.CinderSnapshotSpec', - 'file': TClient._remove_prefix(volume_path, tintri_path), - 'vmName': volume_name or snapshot_name, - 'description': snapshot_name + ' (' + volume_id + ')', - 'vmTintriUuid': volume_id, - 'instanceId': volume_id, - 'snapshotCreator': 'Cinder', - 'deletionPolicy': deletion_policy, - } - - payload = '/' + self.api_version + '/cinder/snapshot' - r = self.post(payload, request) - if r.status_code != 200: - msg = _('Failed to create snapshot for volume %s.') % volume_path - raise exception.VolumeDriverException(msg) - - return r.json()[0] - - def get_snapshot(self, volume_id): - """Gets a volume snapshot.""" - filter = {'vmUuid': volume_id} - - payload = '/' + self.api_version + '/snapshot' - r = self.get_query(payload, filter) - if r.status_code != 200: - msg = _('Failed to get snapshot for volume %s.') % volume_id - raise exception.VolumeDriverException(msg) - - if int(r.json()['filteredTotal']) > 0: - return r.json()['items'][0]['uuid']['uuid'] - - def get_image_snapshots_to_date(self, date): - filter = {'sortedBy': 'createTime', - 'target': 'SNAPSHOT', - 'consistency': 'CRASH_CONSISTENT', - 'hasClone': 'No', - 'type': 'CINDER_GENERATED_SNAPSHOT', - 'contain': 'image-', - 'limit': '100', - 'page': '1', - 'sortOrder': 'DESC', - 'since': '1970-01-01T00:00:00', - 'until': date, - } - payload = '/' + self.api_version + '/snapshot' - r = self.get_query(payload, filter) - if r.status_code != 200: - msg = _('Failed to get image snapshots.') - raise exception.VolumeDriverException(msg) - return r.json()['items'] - - def delete_snapshot(self, snapshot_uuid): - """Deletes a snapshot.""" - url = '/' + self.api_version + '/snapshot/' - self.delete(url + snapshot_uuid) - - def clone_volume(self, snapshot_uuid, volume_path): - """Clones a volume from snapshot.""" - request = {'typeId': 'com.tintri.api.rest.' + self.api_version + - '.dto.domain.beans.cinder.CinderCloneSpec', - 'destinationPaths': - [TClient._remove_prefix(volume_path, tintri_path)], - 'tintriSnapshotUuid': snapshot_uuid, - } - - url = '/' + self.api_version + '/cinder/clone' - r = self.post(url, request) - if r.status_code != 200 and r.status_code != 204: - msg = _('Failed to clone volume from snapshot %s.') % snapshot_uuid - raise exception.VolumeDriverException(msg) diff --git a/cinder/volume/drivers/veritas/__init__.py b/cinder/volume/drivers/veritas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/veritas/hs_constants.py b/cinder/volume/drivers/veritas/hs_constants.py deleted file mode 100644 index cf3c330b9..000000000 --- a/cinder/volume/drivers/veritas/hs_constants.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Error Codes -""" - -EX_BAD_PARAM = 10 -EX_BAD_MESSAGE = 106 -MSG_SUCCESS = 0 -MSG_ERROR = 1 - -""" -Constants -""" -HS_VHOST = "/" -ACK_YES = 1 -ACK_NO = 0 -BLK_YES = 1 -BLK_NO = 0 -EXCH_DIRECT = "direct" -EXCH_FANOUT = "fanout" -EXCH_TOPIC = "topic" - -MSG_REQUEST = 1 -MSG_RESPONSE = 2 -MSG_TOKEN = "token" -MSG_OWNER = "owner" -MSG_TYPE = "type" -MSG_ERROR = "err_code" -MSG_ACK = "ack" -MSG_BLK = "blocking" -MSG_BLK_INFO = "blocking_info" -MSG_BLK_NAME = "name" -MSG_BLK_BINDKEY = "bindkey" -MSG_BLK_TYPE = "type" -MSG_PAYLOAD = "payload" - -# HyperScale Controller Exchange -HS_CONTROLLER_EXCH = 'hyperscale-controller' -HS_RPC_EXCH = 'hyperscale-recv' -HS_DATANODE_EXCH = 'hyperscale-datanode' -HS_COMPUTE_EXCH = 'hyperscale-storage' - -SNAP_RESTORE_RF = 3 diff --git a/cinder/volume/drivers/veritas/utils.py b/cinder/volume/drivers/veritas/utils.py deleted file mode 100644 index df89959d7..000000000 --- a/cinder/volume/drivers/veritas/utils.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import uuid - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder import utils -from cinder.volume.drivers.veritas import hs_constants as constants - -LOG = logging.getLogger(__name__) - - -def _populate_message_body(kwargs): - message_body = {} - # Build message body from kwargs - for key, value in kwargs.items(): - if value is not None: - message_body[key] = value - - return message_body - - -def generate_routingkey(): - return six.text_type(uuid.uuid1()) - - -def get_guid_with_curly_brackets(guid): - return "{%s}" % guid if guid else guid - - -def get_hyperscale_image_id(): - return "{%s}" % uuid.uuid1() - - -def get_hyperscale_version(): - - version = None - cmd_err = None - try: - cmd_arg = {'operation': 'version'} - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - # call hscli for version - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be None in case of successful execution of cmd - if not cmd_err: - processed_output = process_cmd_out(cmd_out) - version = processed_output.get('payload') - else: - LOG.error("Error %s in getting hyperscale version", - cmd_err) - raise exception.ErrorInHyperScaleVersion(cmd_err=cmd_err) - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - LOG.error("Exception in running the command for version", - exc_info=True) - raise exception.UnableToExecuteHyperScaleCmd(message="version") - - return version - - -def get_datanode_id(): - - dnid = None - cmd_out = None - cmd_err = None - try: - cmd_arg = {'operation': 'get_datanode_id'} - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - # call hscli for get_datanode_id - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be None in case of successful execution of cmd - if not cmd_err: - processed_output = process_cmd_out(cmd_out) - dnid = processed_output.get('payload') - else: - LOG.error("Error %s in getting datanode hypervisor id", - cmd_err) - raise exception.UnableToExecuteHyperScaleCmd( - message=cmdarg_json) - except exception.UnableToExecuteHyperScaleCmd: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to execute get_datanode_id", exc_info=True) - - except exception.UnableToProcessHyperScaleCmdOutput: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to process get_datanode_id output", - exc_info=True) - return dnid - - -def episodic_snap(meta): - - cmd_out = None - cmd_err = None - out_meta = None - try: - cmd_arg = {} - cmd_arg['operation'] = 'episodic_snap' - cmd_arg['metadata'] = meta - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - # call hscli for episodic_snap - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be None in case of successful execution of cmd - if not cmd_err: - processed_output = process_cmd_out(cmd_out) - out_meta = processed_output.get('payload') - else: - LOG.error("Error %s in processing episodic_snap", - cmd_err) - raise exception.UnableToExecuteHyperScaleCmd( - message=cmdarg_json) - except exception.UnableToExecuteHyperScaleCmd: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to execute episodic_snap", exc_info=True) - - except exception.UnableToProcessHyperScaleCmdOutput: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to process episodic_snap output", - exc_info=True) - return out_meta - - -def get_image_path(image_id, op_type='image'): - - cmd_out = None - cmd_err = None - image_path = None - try: - cmd_arg = {} - if op_type == 'image': - cmd_arg['operation'] = 'get_image_path' - elif op_type == 'volume': - cmd_arg['operation'] = 'get_volume_path' - cmd_arg['image_id'] = image_id - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - # call hscli for get_image_path - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be None in case of successful execution of cmd - if not cmd_err: - processed_output = process_cmd_out(cmd_out) - image_path = processed_output.get('payload') - else: - LOG.error("Error %s in processing get_image_path", - cmd_err) - raise exception.UnableToExecuteHyperScaleCmd( - message=cmdarg_json) - except exception.UnableToExecuteHyperScaleCmd: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to execute get_image_path", exc_info=True) - - except exception.UnableToProcessHyperScaleCmdOutput: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to process get_image_path output", - exc_info=True) - return image_path - - -def update_image(image_path, volume_id, hs_img_id): - cmd_out = None - cmd_err = None - output = None - try: - cmd_arg = {} - cmd_arg['operation'] = 'update_image' - cmd_arg['image_path'] = image_path - cmd_arg['volume_id'] = volume_id - cmd_arg['hs_image_id'] = hs_img_id - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be None in case of successful execution of cmd - if not cmd_err: - output = process_cmd_out(cmd_out) - else: - LOG.error("Error %s in execution of update_image", - cmd_err) - raise exception.UnableToExecuteHyperScaleCmd( - message=cmdarg_json) - except exception.UnableToExecuteHyperScaleCmd: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to execute update_image", exc_info=True) - - except exception.UnableToProcessHyperScaleCmdOutput: - with excutils.save_and_reraise_exception(): - LOG.debug("Unable to process update_image output", - exc_info=True) - return output - - -def hsexecute(cmdarg_json): - - cmd_out = None - cmd_err = None - try: - # call hyperscale cli - (cmd_out, cmd_err) = utils.execute("hscli", - cmdarg_json, - run_as_root=True) - except (putils.UnknownArgumentError, putils.ProcessExecutionError, - exception.ErrorInParsingArguments, OSError): - LOG.error("Exception in running the command for %s", - cmdarg_json, - exc_info=True) - raise exception.UnableToExecuteHyperScaleCmd(message=cmdarg_json) - - except Exception: - LOG.error("Internal exception in cmd for %s", cmdarg_json, - exc_info=True) - raise exception.UnableToExecuteHyperScaleCmd(message=cmdarg_json) - - return (cmd_out, cmd_err) - - -def process_cmd_out(cmd_out): - """Process the cmd output.""" - - output = None - - try: - # get the python object from the cmd_out - output = json.loads(cmd_out) - error_code = output.get('err_code') - if error_code: - error_message = output.get('err_msg') - operation = output.get('token') - LOG.error("Failed to perform %(operation)s with error code" - " %(err_code)s, error message is %(err_msg)s", - {"operation": operation, - "err_code": error_code, - "err_msg": error_message}) - except ValueError: - raise exception.UnableToProcessHyperScaleCmdOutput(cmd_out=cmd_out) - - return output - - -def check_for_setup_error(): - return True - - -def get_configuration(persona): - """Get required configuration from controller.""" - - msg_body = {'persona': persona} - configuration = None - try: - cmd_out, cmd_error = message_controller( - constants.HS_CONTROLLER_EXCH, - 'hyperscale.controller.get.configuration', - **msg_body) - LOG.debug("Response Message from Controller: %s", cmd_out) - payload = cmd_out.get('payload') - configuration = payload.get('config_data') - - except (exception.ErrorInSendingMsg, - exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - LOG.exception("Failed to get configuration from controller") - raise exception.ErrorInFetchingConfiguration(persona=persona) - - return configuration - - -def _send_message(exchange, routing_key, message_token, **kwargs): - """Send message to specified node.""" - - cmd_out = None - cmd_err = None - processed_output = None - msg = None - try: - LOG.debug("Sending message: %s", message_token) - - # Build message from kwargs - message_body = _populate_message_body(kwargs) - cmd_arg = {} - cmd_arg["operation"] = "message" - cmd_arg["msg_body"] = message_body - cmd_arg["msg_token"] = message_token - # exchange name - cmd_arg["exchange_name"] = exchange - # routing key - cmd_arg["routing_key"] = routing_key - # create a json for cmd argument - cmdarg_json = json.dumps(cmd_arg) - - (cmd_out, cmd_err) = hsexecute(cmdarg_json) - - # cmd_err should be none in case of successful execution of cmd - if cmd_err: - LOG.debug("Sending message failed. Error %s", cmd_err) - raise exception.ErrorInSendingMsg(cmd_err=cmd_err) - else: - processed_output = process_cmd_out(cmd_out) - - except exception.UnableToExecuteHyperScaleCmd: - with excutils.save_and_reraise_exception(): - msg = ("Unable to execute HyperScale command for %(cmd)s" - " to exchange %(exch)s with key %(rt_key)s") - LOG.debug(msg, {"cmd": message_token, - "exch": exchange, - "rt_key": routing_key}, - exc_info=True) - - except exception.UnableToProcessHyperScaleCmdOutput: - with excutils.save_and_reraise_exception(): - msg = ("Unable to process msg %(message)s" - " to exchange %(exch)s with key %(rt_key)s") - LOG.debug(msg, {"message": message_token, - "exch": exchange, - "rt_key": routing_key}) - - return (processed_output, cmd_err) - - -def message_compute_plane(routing_key, message_token, **kwargs): - """Send message to compute plane.""" - - LOG.debug("Sending message to compute plane") - - return _send_message(constants.HS_COMPUTE_EXCH, - routing_key, - message_token, - **kwargs) - - -def message_data_plane(routing_key, message_token, **kwargs): - """Send message to data node.""" - - LOG.debug("Sending message to data plane") - - return _send_message(constants.HS_DATANODE_EXCH, - routing_key, - message_token, - **kwargs) - - -def message_controller(routing_key, message_token, **kwargs): - """Send message to controller.""" - - LOG.debug("Sending message to controller") - - return _send_message(constants.HS_CONTROLLER_EXCH, - routing_key, - message_token, - **kwargs) diff --git a/cinder/volume/drivers/veritas/vrtshyperscale.py b/cinder/volume/drivers/veritas/vrtshyperscale.py deleted file mode 100644 index 4ce370a47..000000000 --- a/cinder/volume/drivers/veritas/vrtshyperscale.py +++ /dev/null @@ -1,1001 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Cinder Driver for HyperScale -""" - -import os - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers.veritas import hs_constants as constants -from cinder.volume.drivers.veritas import utils as util - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -TYPE_EPISODIC_SNAP = '0' -TYPE_USER_SNAP = '1' -TYPE_WORKFLOW_SNAP = '2' - -BLOCK_SIZE = 8 -MAX_REPLICAS = 2 -DEFAULT_REPLICAS = 1 -POOL_NAME = '{30c39970-ad80-4950-5490-8431abfaaaf0}' -HYPERSCALE_VERSION = '1.0.0' -PROVIDER_LOCATION_MNT = "/hyperscale" -PROVIDER_LOCATION = 'hyperscale-sv:' + PROVIDER_LOCATION_MNT - - -@interface.volumedriver -class HyperScaleDriver(driver.VolumeDriver): - - VERSION = '1.0' - # ThirdPartySytems wiki page - CI_WIKI_NAME = "Veritas_HyperScale_CI" - - def __init__(self, *args, **kwargs): - """Initialization""" - - super(HyperScaleDriver, self).__init__(*args, **kwargs) - - self.compute_map = {} - self.vsa_map = {} - self.compute_meta_map = {} - self.vsa_compute_map = {} - self.old_total = 0 - self.old_free = 0 - self.my_dnid = None - - @staticmethod - def _fetch_config_for_controller(): - return HyperScaleDriver._fetch_config_information( - persona='controller') - - @staticmethod - def _fetch_config_for_compute(): - return HyperScaleDriver._fetch_config_information( - persona='compute') - - @staticmethod - def _fetch_config_for_datanode(): - return HyperScaleDriver._fetch_config_information( - persona='datanode') - - @staticmethod - def _fetch_config_information(persona): - # Get hyperscale config information for persona - configuration = util.get_configuration(persona) - return configuration - - @utils.trace_method - def check_for_setup_error(self): - # check if HyperScale has been installed correctly - try: - version = util.get_hyperscale_version() - - if version != HYPERSCALE_VERSION: - raise exception.VolumeBackendAPIException( - data=(_("Unsupported version: %s") % version)) - except (exception.ErrorInHyperScaleVersion, - exception.UnableToExecuteHyperScaleCmd): - err_msg = _('Exception in getting HyperScale version') - LOG.exception(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - def _get_replicas(self, volume, metadata): - """Get the replicas.""" - try: - ref_targets = self._get_volume_metadata_value(metadata, - 'reflection_targets') - if ref_targets is not None: - replicas = MAX_REPLICAS - else: - replicas = DEFAULT_REPLICAS - - except Exception: - LOG.exception("Exception in getting reflection targets") - replicas = DEFAULT_REPLICAS - - LOG.debug("Number of replicas: %s", replicas) - return replicas - - @utils.trace_method - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(HyperScaleDriver, self).do_setup(context) - - try: - # Get computes info - computes = HyperScaleDriver._fetch_config_for_compute() - if computes is None: - computes = {} - - for compute in computes.keys(): - if 'disabled' in computes[compute].keys(): - disabled = computes[compute]['disabled'] - if disabled == "1": - continue - vsa_ip = computes[compute]['vsa_ip'] - vsa_isolated_ip = computes[compute]['vsa_isolated_ip'] - vsa_section_header = computes[compute]['vsa_section_header'] - compute_name = computes[compute]['compute_name'] - self.compute_map[vsa_ip] = vsa_isolated_ip - self.vsa_map[vsa_ip] = vsa_section_header - self.compute_meta_map[compute_name] = vsa_ip - self.vsa_compute_map[vsa_ip] = compute_name - - # Get controller info - cntr_info = HyperScaleDriver._fetch_config_for_controller() - if cntr_info is None: - cntr_info = {} - - # Get data node info - self.my_dnid = util.get_datanode_id() - datanodes = HyperScaleDriver._fetch_config_for_datanode() - if datanodes is None: - datanodes = {} - - for key, value in datanodes.items(): - if self.my_dnid == value['hypervisor_id']: - self.datanode_hostname = value['datanode_name'] - self.datanode_ip = value['data_ip'] - self.dn_routing_key = value['hypervisor_id'] - - LOG.debug("In init compute_map %s", self.compute_map) - LOG.debug("In init vsa_map %s", self.vsa_map) - LOG.debug("In init compute_meta_map %s", self.compute_meta_map) - - except (exception.UnableToProcessHyperScaleCmdOutput, - exception.ErrorInFetchingConfiguration): - err_msg = _("Unable to initialise the Veritas cinder driver") - LOG.exception(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - except Exception: - err_msg = _("Internal error occurred") - LOG.exception(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - @utils.trace_method - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - LOG.debug("Clone volume") - model_update = {} - try: - LOG.debug("Clone new volume %(t_id)s from source volume %(s_id)s", - {"t_id": volume['id'], "s_id": src_vref['id']}) - # 1. Make a call to DN - # Check if current_dn_owner is set. - - rt_key = None - # Get metadata for volume - metadata = self._get_volume_metadata(src_vref) - rt_key = self._get_volume_metadata_value(metadata, - 'current_dn_owner') - if rt_key is None: - rt_key = self.dn_routing_key - - util.message_data_plane( - rt_key, - 'hyperscale.storage.dm.volume.clone', - pool_name=POOL_NAME, - display_name=util.get_guid_with_curly_brackets( - volume['id']), - version_name=util.get_guid_with_curly_brackets( - src_vref['id']), - volume_raw_size=volume['size'], - volume_qos=1, - parent_volume_guid=util.get_guid_with_curly_brackets( - src_vref['id']), - user_id=util.get_guid_with_curly_brackets( - volume['user_id']), - project_id=util.get_guid_with_curly_brackets( - volume['project_id']), - volume_guid=util.get_guid_with_curly_brackets( - volume['id'])) - - LOG.debug("Volume cloned successfully on data node") - - # Get metadata for volume - volume_metadata = self._get_volume_metadata(volume) - parent_cur_dn = self._get_volume_metadata_value(metadata, - 'current_dn_ip') - - metadata_update = {} - metadata_update['Primary_datanode_ip'] = parent_cur_dn - metadata_update['current_dn_owner'] = rt_key - metadata_update['current_dn_ip'] = parent_cur_dn - metadata_update['source_volid'] = src_vref['id'] - metadata_update['size'] = src_vref['size'] - - # 2. Choose a potential replica here. - # The actual decision to have potential replica is made in NOVA. - rt_key, rt_dn_ip = self._select_rt(volume, - volume_metadata, - only_select=True) - - if rt_key and rt_dn_ip: - metadata_update['Potential_secondary_key'] = rt_key - metadata_update['Potential_secondary_ip'] = rt_dn_ip - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception in clone volume', exc_info=True) - except exception.InvalidMetadataType: - with excutils.save_and_reraise_exception(): - LOG.exception('Exception updating metadata in clone' - ' volume', exc_info=True) - - volume_metadata.update(metadata_update) - volume['provider_location'] = PROVIDER_LOCATION - model_update = {'provider_location': volume['provider_location'], - 'metadata': volume_metadata} - - return model_update - - def _get_datanodes_info(self): - # Get hyperscale datanode config information from controller - - msg_body = {} - data = None - - try: - cmd_out, cmd_error = util.message_controller( - constants.HS_CONTROLLER_EXCH, - 'hyperscale.controller.get.membership', - **msg_body) - LOG.debug("Response Message from Controller: %s", - cmd_out) - payload = cmd_out.get('payload') - data = payload.get('of_membership') - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception("Failed to get datanode config " - "information from controller") - - return data - - def _select_rt(self, volume, metadata, only_select=False): - - # For the boot vdisk(first vdisk) of the instance, choose any - # reflection target other than this. For the data disks, - # retain the reflection target. - # It will be passed by the caller after reading it from instance - # metadata. - - LOG.debug("_select_rt ") - rt_key = self._get_volume_metadata_value(metadata, - 'Secondary_datanode_key') - rt_dn_ip = self._get_volume_metadata_value(metadata, - 'Secondary_datanode_ip') - current_dn_ip = self._get_volume_metadata_value(metadata, - 'current_dn_ip') - - if current_dn_ip is not None and rt_dn_ip == current_dn_ip: - return None, None - - if rt_key is not None and rt_dn_ip is not None: - return rt_key, rt_dn_ip - - rt_key = 'NA' - rt_dn_ip = 'NA' - datanodes = self._get_datanodes_info() - LOG.debug("Data nodes: %s", datanodes) - - for key, value in datanodes.items(): - if value['personality'] == 'datanode': - if self.my_dnid != value['hypervisor_id']: - LOG.debug("reflection target hypervisor_id: %s", - value['hypervisor_id']) - LOG.debug("my hypervisor_id: %s", self.my_dnid) - rt_dn_ip = value['data_ip'] - rt_key = value['hypervisor_id'] - - if only_select: - return rt_key, rt_dn_ip - - return rt_key, rt_dn_ip - - def _create_replica(self, volume, metadata): - """Create vdisk on peer data node.""" - - try: - reflection_target_ip = None - rt_routing_key, reflection_target_ip = ( - self._select_rt(volume, metadata)) - LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", - {"rt_key": rt_routing_key, - "rt_ip": reflection_target_ip}) - - metadata_update = {} - metadata_update['Secondary_datanode_key'] = rt_routing_key - metadata_update['Secondary_datanode_ip'] = reflection_target_ip - - if rt_routing_key is None or rt_routing_key == 'NA': - return False, None, metadata_update - - instance_id = self._get_volume_metadata_value(metadata, - 'InstanceId') - - util.message_data_plane( - rt_routing_key, - 'hyperscale.storage.dm.volume.create', - pool_name=POOL_NAME, - volume_guid=util.get_guid_with_curly_brackets( - volume['id']), - display_name=util.get_guid_with_curly_brackets( - volume['id']), - volume_raw_size=volume['size'], - vm_id=util.get_guid_with_curly_brackets( - six.text_type(instance_id)), - is_reflection_source=0, - dn_reflection_factor=1, - reflection_src_ip=self.datanode_ip, - user_id=util.get_guid_with_curly_brackets( - volume['user_id']), - project_id=util.get_guid_with_curly_brackets( - volume['project_id']), - volume_qos=1) - # Failure handling TBD. - ret = True - LOG.debug("Create volume sent to reflection target data node") - - except (exception.VolumeNotFound, - exception.UnableToProcessHyperScaleCmdOutput, - exception.ErrorInSendingMsg): - LOG.error("Exception in creating replica", exc_info = True) - metadata_update['Secondary_datanode_key'] = 'NA' - metadata_update['Secondary_datanode_ip'] = 'NA' - metadata_update['DN_Resiliency'] = 'degraded' - ret = False - return ret, reflection_target_ip, metadata_update - - def _get_volume_details_for_create_volume(self, - reflection_target_ip, - volume, - metadata): - - instance_id = self._get_volume_metadata_value(metadata, - 'InstanceId') - volume_details = {} - volume_details['pool_name'] = POOL_NAME - volume_details['volume_guid'] = ( - util.get_guid_with_curly_brackets(volume['id'])) - volume_details['display_name'] = ( - util.get_guid_with_curly_brackets(volume['id'])) - volume_details['volume_raw_size'] = volume['size'] - volume_details['vm_id'] = util.get_guid_with_curly_brackets( - six.text_type(instance_id)) - volume_details['user_id'] = util.get_guid_with_curly_brackets( - volume['user_id']) - volume_details['project_id'] = ( - util.get_guid_with_curly_brackets(volume['project_id'])) - volume_details['volume_qos'] = 1 - volume_details['dn_reflection_factor'] = 0 - - if reflection_target_ip is not None: - volume_details['is_reflection_source'] = 1 - volume_details['dn_reflection_factor'] = 1 - volume_details['reflection_target_ip'] = reflection_target_ip - - return volume_details - - def _get_volume_metadata(self, volume): - volume_metadata = {} - if 'volume_metadata' in volume: - for metadata in volume['volume_metadata']: - volume_metadata[metadata['key']] = metadata['value'] - return volume_metadata - - def _get_volume_metadata_value(self, metadata, metadata_key): - metadata_value = None - if metadata: - metadata_value = metadata.get(metadata_key) - - LOG.debug("Volume metadata key %(m_key)s, value %(m_val)s", - {"m_key": metadata_key, "m_val": metadata_value}) - return metadata_value - - @utils.trace_method - def create_volume(self, volume): - """Creates a hyperscale volume.""" - - model_update = {} - metadata_update = {} - reflection_target_ip = None - LOG.debug("Create volume") - try: - volume_metadata = self._get_volume_metadata(volume) - - # 1. Check how many replicas needs to be created. - replicas = self._get_replicas(volume, volume_metadata) - if replicas > 1: - # 2. Create replica on peer datanode. - LOG.debug("Create volume message sent to peer data node") - ret, reflection_target_ip, metadata_update = ( - self._create_replica(volume, volume_metadata)) - if ret is False: - metadata_update['DN_Resiliency'] = 'degraded' - # Do not fail volume creation, just create one replica. - reflection_target_ip = None - - # 3. Get volume details based on reflection factor - # for volume - volume_details = self._get_volume_details_for_create_volume( - reflection_target_ip, volume, volume_metadata) - - # 4. Send create volume to data node with volume details - util.message_data_plane( - self.dn_routing_key, - 'hyperscale.storage.dm.volume.create', - **volume_details) - LOG.debug("Create volume message sent to data node") - - volume_metadata['Primary_datanode_ip'] = self.datanode_ip - volume_metadata['current_dn_owner'] = self.dn_routing_key - volume_metadata['current_dn_ip'] = self.datanode_ip - volume_metadata['hs_image_id'] = util.get_hyperscale_image_id() - volume_metadata.update(metadata_update) - - volume['provider_location'] = PROVIDER_LOCATION - model_update = {'provider_location': volume['provider_location'], - 'metadata': volume_metadata} - - except (exception.UnableToProcessHyperScaleCmdOutput, - exception.ErrorInSendingMsg): - with excutils.save_and_reraise_exception(): - LOG.exception('Unable to create hyperscale volume') - - return model_update - - @utils.trace_method - def delete_volume(self, volume): - """Deletes a volume.""" - - LOG.debug("Delete volume with id %s", volume['id']) - # 1. Check for provider location - if not volume['provider_location']: - LOG.warning('Volume %s does not have provider_location specified', - volume['name']) - raise exception.VolumeMetadataNotFound( - volume_id=volume['id'], - metadata_key='provider_location') - - # 2. Message data plane for volume deletion - message_body = {'display_name': volume['name']} - - # if Secondary_datanode_key is present, - # delete the replica from secondary datanode. - rt_key = None - - # Get metadata for volume - metadata = self._get_volume_metadata(volume) - - rt_key = self._get_volume_metadata_value(metadata, - 'Secondary_datanode_key') - rt_dn_ip = self._get_volume_metadata_value(metadata, - 'Secondary_datanode_ip') - current_dn_ip = self._get_volume_metadata_value(metadata, - 'current_dn_ip') - if current_dn_ip is not None and rt_dn_ip == current_dn_ip: - rt_key = None - - # Send Delete Volume to Data Node - try: - if rt_key is not None: - util.message_data_plane( - rt_key, - 'hyperscale.storage.dm.volume.delete', - **message_body) - - util.message_data_plane( - self.dn_routing_key, - 'hyperscale.storage.dm.volume.delete', - **message_body) - - except (exception.UnableToProcessHyperScaleCmdOutput, - exception.ErrorInSendingMsg): - LOG.error('Exception while deleting volume', exc_info=True) - raise exception.VolumeIsBusy(volume_name=volume['name']) - - @utils.trace_method - def create_snapshot(self, snapshot): - """Create a snapshot.""" - - LOG.debug("Create Snapshot %s", snapshot['volume_id']) - workflow_id = None - last_in_eds_seq = None - model_update = {} - rt_key = None - - # Get metadata for volume - snapshot_volume = snapshot.get('volume') - metadata = snapshot_volume['metadata'] - rt_key = self._get_volume_metadata_value(metadata, - 'current_dn_owner') - if rt_key is None: - rt_key = self.dn_routing_key - - # Check for episodic based on metadata key - workflow_snap = 0 - - meta = snapshot.get('metadata') - LOG.debug('Snapshot metatadata %s', meta) - if 'SNAPSHOT-COOKIE' in meta.keys(): - snapsize = meta['SIZE'] - - # Call DataNode for episodic snapshots - LOG.debug('Calling Data Node for episodic snapshots') - message_body = {} - message_body['snapshot_id'] = ( - util.get_guid_with_curly_brackets(snapshot['id'])) - message_body['volume_guid'] = ( - util.get_guid_with_curly_brackets( - snapshot['volume_id'])) - message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] - - try: - # send message to data node - util.message_data_plane( - rt_key, - 'hyperscale.storage.dm.volume.snapshot.update', - **message_body) - - # Update size via cinder api - if snapsize is not None: - model_update['volume_size'] = snapsize.value - - # Set the episodic type metatdata for filtering purpose - meta['TYPE'] = TYPE_EPISODIC_SNAP - meta['status'] = 'available' - meta['datanode_ip'] = self.datanode_ip - - except (exception.VolumeNotFound, - exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception in create snapshot') - - model_update['metadata'] = meta - return model_update - - else: - out_meta = util.episodic_snap(meta) - if out_meta.get('update'): - meta['TYPE'] = out_meta.get('TYPE') - meta['status'] = out_meta.get('status') - meta['datanode_ip'] = self.datanode_ip - model_update['metadata'] = meta - return model_update - - if 'workflow_id' in meta.keys(): - workflow_snap = 1 - workflow_id = meta['workflow_id'] - - if 'monitor_snap' in meta.keys(): - if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: - last_in_eds_seq = 0 - else: - last_in_eds_seq = 1 - - # If code falls through here then it mean its user initiated snapshots - try: - # Get metadata for volume - vsa_routing_key = None - snapshot_volume = snapshot.get('volume') - metadata = snapshot_volume['metadata'] - LOG.debug('Calling Compute Node for user initiated snapshots') - vsa_ip = self._get_volume_metadata_value(metadata, - 'acting_vdisk_owner') - if vsa_ip is None: - vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') - - LOG.debug("Create snap on compute vsa %s", vsa_ip) - if vsa_ip: - vsa_routing_key = vsa_ip.replace('.', '') - - message_body = {} - # Set the parent volume id - message_body['vdisk_id_str'] = ( - util.get_guid_with_curly_brackets( - snapshot['volume_id'])) - # Set the snapshot details - message_body['snapshot_id_str'] = ( - util.get_guid_with_curly_brackets(snapshot['id'])) - message_body['snapshot_name'] = snapshot['name'] - - if workflow_snap == 1: - message_body['workflow_snapshot'] = 1 - else: - message_body['user_initiated'] = 1 - - if last_in_eds_seq is not None: - message_body['last_in_eds_seq'] = last_in_eds_seq - - # send message to compute node - util.message_compute_plane( - vsa_routing_key, - 'hyperscale.storage.nfs.volume.snapshot.create', - **message_body) - - # Set the snapshot type to either workflow or user initiated - # snapshot in metatdata for filtering purpose - if workflow_snap: - LOG.debug('__help request for WORKFLOW snapshot') - meta['TYPE'] = TYPE_WORKFLOW_SNAP - meta['status'] = 'creating' - meta['datanode_ip'] = self.datanode_ip - else: - LOG.debug('__help request for MANUAL snapshot') - meta['TYPE'] = TYPE_USER_SNAP - meta['status'] = 'creating' - meta['datanode_ip'] = self.datanode_ip - - if workflow_id is not None: - message_body = {} - message_body['workflow_id'] = workflow_id - message_body['skip_upto_sentinel'] = ( - 'hyperscale.vdisk.failback.snapmark_sentinel') - - # send message to controller node - util.message_controller( - constants.HS_CONTROLLER_EXCH, - 'hyperscale.controller.execute.workflow', - **message_body) - - except (exception.VolumeNotFound, - exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception in create snapshot') - - model_update['metadata'] = meta - return model_update - - @utils.trace_method - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - - meta = snapshot.get('metadata') - if 'force' in meta.keys(): - LOG.debug("Found force flag for snapshot metadata." - " Not sending call to datanode ") - LOG.debug('snapshot metadata %s', meta) - return - - if 'is_busy' in meta.keys(): - LOG.warning("Snapshot %s is being used, skipping delete", - snapshot['id']) - raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) - else: - LOG.warning("Snapshot %s is being deleted," - " is_busy key not present", snapshot['id']) - - message_body = {} - message_body['volume_guid'] = ( - util.get_guid_with_curly_brackets(snapshot['volume_id'])) - message_body['snapshot_id'] = ( - util.get_guid_with_curly_brackets(snapshot['id'])) - - # HyperScale snapshots whether Episodic or User initiated, all resides - # in the data plane. - # Hence delete snapshot operation will go to datanode - rt_key = None - - # Get metadata for volume - snapshot_volume = snapshot.get('volume') - metadata = snapshot_volume['metadata'] - rt_key = self._get_volume_metadata_value(metadata, - 'current_dn_owner') - if rt_key is None: - rt_key = self.dn_routing_key - - try: - # send message to data node - util.message_data_plane( - rt_key, - 'hyperscale.storage.dm.version.delete', - **message_body) - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception in delete snapshot') - - @utils.trace_method - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot.""" - - LOG.debug("Create volume from snapshot") - model_update = {} - try: - LOG.debug("Clone new volume %(t_id)s from snapshot with id" - " %(s_id)s", {"t_id": volume['id'], - "s_id": volume['snapshot_id']}) - # 1. Make a call to DN - # Check if current_dn_owner is set. - # Route the snapshot creation request to current_dn_owner - - rt_key = None - - # Get metadata for volume - snap_vol = snapshot['volume'] - metadata = snap_vol['metadata'] - rt_key = self._get_volume_metadata_value(metadata, - 'current_dn_owner') - if rt_key is None: - rt_key = self.dn_routing_key - - util.message_data_plane( - rt_key, - 'hyperscale.storage.dm.volume.clone.create', - pool_name=POOL_NAME, - display_name=util.get_guid_with_curly_brackets( - volume['id']), - version_name=util.get_guid_with_curly_brackets( - volume['snapshot_id']), - volume_raw_size=volume['size'], - volume_qos=1, - parent_volume_guid=util.get_guid_with_curly_brackets( - snapshot['volume_id']), - user_id=util.get_guid_with_curly_brackets( - volume['user_id']), - project_id=util.get_guid_with_curly_brackets( - volume['project_id']), - volume_guid=util.get_guid_with_curly_brackets( - volume['id'])) - - LOG.debug("Volume created successfully on data node") - - # Get metadata for volume - volume_metadata = self._get_volume_metadata(volume) - parent_cur_dn = self._get_volume_metadata_value(metadata, - 'current_dn_ip') - - metadata_update = {} - metadata_update['snapshot_id'] = snapshot['id'] - metadata_update['parent_volume_guid'] = ( - util.get_guid_with_curly_brackets( - snapshot['volume_id'])) - metadata_update['Primary_datanode_ip'] = parent_cur_dn - metadata_update['current_dn_owner'] = rt_key - metadata_update['current_dn_ip'] = parent_cur_dn - - # 2. Choose a potential replica here. - # The actual decision to have potential replica is made in NOVA. - rt_key, rt_dn_ip = self._select_rt(volume, - volume_metadata, - only_select=True) - - if rt_key and rt_dn_ip: - metadata_update['Potential_secondary_key'] = rt_key - metadata_update['Potential_secondary_ip'] = rt_dn_ip - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception in creating volume from snapshot') - except exception.InvalidMetadataType: - with excutils.save_and_reraise_exception(): - LOG.exception('Exception updating metadata in create' - ' volume from snapshot') - - volume_metadata.update(metadata_update) - - volume['provider_location'] = PROVIDER_LOCATION - model_update = {'provider_location': volume['provider_location'], - 'metadata': volume_metadata} - - return model_update - - @utils.trace_method - def get_volume_stats(self, refresh=False): - """Get volume status.""" - - # If 'refresh' is True, run update the stats first. - - LOG.debug("Get volume status") - - self._stats = self._fetch_volume_status() - new_total = self._stats['total_capacity_gb'] - new_free = self._stats['free_capacity_gb'] - - if self.old_total != new_total or self.old_free != new_free: - self.old_total = new_total - self.old_free = new_free - - message_body = {'hostname': self.datanode_hostname, - 'is_admin': 1, - 'total': new_total, - 'free': new_free} - try: - cmd_out, cmd_error = util.message_controller( - constants.HS_CONTROLLER_EXCH, - 'hyperscale.controller.set.datanode.storage.stats', - **message_body) - LOG.debug("Response Message from Controller: %s", - cmd_out) - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception during fetch stats') - - return self._stats - - @utils.trace_method - def extend_volume(self, volume, size_gb): - """Extend volume.""" - - LOG.debug("Extend volume") - try: - message_body = {} - message_body['volume_guid'] = ( - util.get_guid_with_curly_brackets(volume['id'])) - message_body['new_size'] = size_gb - - # Send Extend Volume message to Data Node - util.message_data_plane( - self.dn_routing_key, - 'hyperscale.storage.dm.volume.extend', - **message_body) - - except (exception.UnableToProcessHyperScaleCmdOutput, - exception.ErrorInSendingMsg): - msg = _('Exception in extend volume %s') % volume['name'] - LOG.exception(msg) - raise exception.VolumeDriverException(message=msg) - - def _fetch_volume_status(self): - """Retrieve Volume Stats from Datanode.""" - - LOG.debug("Request Volume Stats from Datanode") - - data = {} - data["volume_backend_name"] = 'Veritas_HyperScale' - data["vendor_name"] = 'Veritas Technologies LLC' - data["driver_version"] = self.VERSION - data["storage_protocol"] = 'nfs' - data['total_capacity_gb'] = 0.0 - data['free_capacity_gb'] = 0.0 - data['reserved_percentage'] = self.configuration.reserved_percentage - data['QoS_support'] = False - - try: - message_body = {} - # send message to data node - cmd_out, cmd_error = util.message_data_plane( - self.dn_routing_key, - 'hyperscale.storage.dm.discover.stats', - **message_body) - - LOG.debug("Response Message from Datanode: %s", cmd_out) - payload = cmd_out.get('payload') - if 'stats' in payload.keys(): - if 'total_capacity' in payload.get( - 'stats')[0].keys(): - total_capacity = payload.get( - 'stats')[0]['total_capacity'] - - if 'free_capacity' in payload.get( - 'stats')[0].keys(): - free_capacity = payload.get( - 'stats')[0]['free_capacity'] - - if total_capacity is not None: - data['total_capacity_gb'] = float(total_capacity) - data['free_capacity_gb'] = float(free_capacity) - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Exception during fetch stats') - - return data - - @utils.trace_method - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - data = {'export': volume['provider_location'], - 'name': volume['name']} - return { - 'driver_volume_type': 'veritas_hyperscale', - 'data': data - } - - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - pass - - def ensure_export(self, ctx, volume): - """Synchronously recreates an export for a logical volume.""" - pass - - def create_export(self, ctx, volume, connector): - - # Exports the volume. Can optionally return a Dictionary of changes - # to the volume object to be persisted.""" - pass - - def remove_export(self, ctx, volume): - """Removes an export for a logical volume.""" - pass - - @utils.trace_method - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - - LOG.debug("copy_image_to_volume volume: %(vol)s " - "image service: %(service)s image id: %(id)s.", - {'vol': volume, - 'service': six.text_type(image_service), - 'id': six.text_type(image_id)}) - - path = util.get_image_path(image_id) - try: - # Skip image creation if file already exists - if not os.path.isfile(path): - image_utils.fetch_to_raw(context, - image_service, - image_id, - path, - BLOCK_SIZE, - size=volume['size']) - metadata = self._get_volume_metadata(volume) - hs_img_id = self._get_volume_metadata_value(metadata, - 'hs_image_id') - util.update_image(path, volume['id'], hs_img_id) - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to copy_image_to_volume') - - @utils.trace_method - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - - LOG.debug("copy_volume_to_image volume: %(vol)s" - " image service:%(service)s image meta: %(meta)s.", - {'vol': volume, - 'service': six.text_type(image_service), - 'meta': six.text_type(image_meta)}) - - try: - metadata = self._get_volume_metadata(volume) - hs_img_id = self._get_volume_metadata_value(metadata, - 'hs_image_id') - path = util.get_image_path(hs_img_id, 'volume') - image_utils.upload_volume(context, - image_service, - image_meta, - path) - - except (exception.UnableToExecuteHyperScaleCmd, - exception.UnableToProcessHyperScaleCmdOutput): - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to copy_volume_to_image') diff --git a/cinder/volume/drivers/veritas_cnfs.py b/cinder/volume/drivers/veritas_cnfs.py deleted file mode 100644 index f08782d83..000000000 --- a/cinder/volume/drivers/veritas_cnfs.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2017 Veritas Technologies LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging -from oslo_utils import excutils - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume.drivers import nfs - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class VeritasCNFSDriver(nfs.NfsDriver): - - """Veritas Clustered NFS based cinder driver - - 1.0.0 - Initial driver implementations for Kilo. - 1.0.1 - Liberty release driver not implemented. - Place holder for Liberty release in case we - need to support. - 1.0.2 - cinder.interface.volumedriver decorator. - Mitaka/Newton/Okata Release - 1.0.3 - Seperate create_cloned_volume() and - create_volume_from_snapshot () functionality. - Pike Release - Executes commands relating to Volumes. - """ - - VERSION = "1.0.3" - # ThirdPartySytems wiki page - CI_WIKI_NAME = "Veritas_Access_CI" - DRIVER_VOLUME_TYPE = 'nfs' - - def __init__(self, *args, **kwargs): - self._execute = None - self._context = None - super(VeritasCNFSDriver, self).__init__(*args, **kwargs) - - def do_setup(self, context): - self._context = context - super(VeritasCNFSDriver, self).do_setup(context) - opts = self.configuration.nfs_mount_options - if not opts or opts.find('vers=3') == -1 or ( - opts.find('nfsvers=3')) == -1: - msg = _("NFS is not configured to use NFSv3") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from snapshot.""" - LOG.debug('VeritasNFSDriver create_volume_from_snapshot called ' - 'volume_id = %(volume)s and snapshot_id = %(snapshot)s', - {'volume': volume.id, 'snapshot': snapshot.id}) - snap_name = snapshot.name - vol_size = volume.size - snap_size = snapshot.volume_size - self._do_clone_volume(snapshot, snap_name, volume) - volume.provider_location = snapshot.provider_location - - if vol_size != snap_size: - try: - self.extend_volume(volume, vol_size) - except exception.ExtendVolumeError as ex: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to extend Volume: %s', ex.msg) - path = self.local_path(volume) - self._delete_file(path) - return {'provider_location': volume.provider_location} - - def _get_vol_by_id(self, volid): - vol = self.db.volume_get(self._context, volid) - return vol - - def _delete_file(self, path): - """Deletes file from disk and return result as boolean.""" - try: - LOG.debug('Deleting file at path %s', path) - self._execute('rm', '-f', path, run_as_root=True) - except OSError as ex: - LOG.warning('Exception during deleting %s', ex.strerror) - - def create_snapshot(self, snapshot): - """Create a snapshot of the volume.""" - src_vol_id = snapshot.volume_id - src_vol_name = snapshot.volume_name - src_vol = self._get_vol_by_id(src_vol_id) - self._do_clone_volume(src_vol, src_vol_name, snapshot) - snapshot.provider_location = src_vol.provider_location - LOG.debug("VeritasNFSDriver create_snapshot %r", - snapshot.provider_location) - return {'provider_location': snapshot.provider_location} - - def delete_snapshot(self, snapshot): - """Delete a snapshot.""" - if not snapshot.provider_location: - LOG.warning('Snapshot %s does not have provider_location ' - 'specified, skipping', snapshot.name) - return - self._ensure_share_mounted(snapshot.provider_location) - snap_path = self.local_path(snapshot) - self._delete_file(snap_path) - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the volume.""" - - LOG.debug('VeritasNFSDriver create_cloned_volume called ' - 'volume_id = %(volume)s and src_vol_id = %(src_vol_id)s', - {'volume': volume.id, 'src_vol_id': src_vref.id}) - src_vol_name = src_vref.name - vol_size = volume.size - src_vol_size = src_vref.size - self._do_clone_volume(src_vref, src_vol_name, volume) - volume.provider_location = src_vref.provider_location - - if vol_size != src_vol_size: - try: - self.extend_volume(volume, vol_size) - except exception.ExtendVolumeError as ex: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to extend Volume: %s', ex.msg) - path = self.local_path(volume) - self._delete_file(path) - return {'provider_location': volume.provider_location} - - def _get_local_volume_path(self, provider_loc, vol_name): - mnt_path = self._get_mount_point_for_share(provider_loc) - vol_path = os.path.join(mnt_path, vol_name) - return vol_path - - def _do_clone_volume(self, src_vol, src_vol_name, tgt_vol): - cnfs_share = src_vol.provider_location - tgt_vol_name = tgt_vol.name - tgt_vol_path = self._get_local_volume_path(cnfs_share, tgt_vol_name) - src_vol_path = self._get_local_volume_path(cnfs_share, src_vol_name) - tgt_vol_path_spl = tgt_vol_path + "::snap:vxfs:" - self._execute('ln', src_vol_path, tgt_vol_path_spl, run_as_root=True) - LOG.debug("VeritasNFSDriver: do_clone_volume %(src_vol_path)s " - "%(tgt_vol_path)s %(tgt_vol_path_spl)s", - {'src_vol_path': src_vol_path, - 'tgt_vol_path_spl': tgt_vol_path_spl, - 'tgt_vol_path': tgt_vol_path}) - if not os.path.exists(tgt_vol_path): - self._execute('rm', '-f', tgt_vol_path_spl, run_as_root=True) - msg = _("Filesnap over NFS is not supported, " - "removing the ::snap:vxfs: file") - LOG.error(msg) - raise exception.NfsException(msg) - - def extend_volume(self, volume, size): - """Extend the volume to new size""" - path = self.local_path(volume) - self._execute('truncate', '-s', '%sG' % size, path, run_as_root=True) - LOG.debug("VeritasNFSDriver: extend_volume volume_id = %s", volume.id) - - def _update_volume_stats(self): - super(VeritasCNFSDriver, self)._update_volume_stats() - backend_name = self.configuration.safe_get('volume_backend_name') - res_percentage = self.configuration.safe_get('reserved_percentage') - self._stats["volume_backend_name"] = backend_name or 'VeritasCNFS' - self._stats["vendor_name"] = 'Veritas' - self._stats["reserved_percentage"] = res_percentage or 0 - self._stats["driver_version"] = self.VERSION - self._stats["storage_protocol"] = self.DRIVER_VOLUME_TYPE diff --git a/cinder/volume/drivers/violin/__init__.py b/cinder/volume/drivers/violin/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/violin/v7000_common.py b/cinder/volume/drivers/violin/v7000_common.py deleted file mode 100644 index c2d4b17cb..000000000 --- a/cinder/volume/drivers/violin/v7000_common.py +++ /dev/null @@ -1,1091 +0,0 @@ -# Copyright 2015 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Violin Memory 7000 Series All-Flash Array Common Driver for OpenStack Cinder - -Provides common (ie., non-protocol specific) management functions for -V7000 series flash arrays. - -Backend array communication is handled via VMEM's python library -called 'vmemclient'. - -NOTE: this driver file requires the use of synchronization points for -certain types of backend operations, and as a result may not work -properly in an active-active HA configuration. See OpenStack Cinder -driver documentation for more information. -""" - -import math -import re -import six -import socket -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import units - -from cinder import context -from cinder.db.sqlalchemy import api -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume import configuration -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) - -try: - import vmemclient -except ImportError: - vmemclient = None -else: - LOG.info("Running with vmemclient version: %s", - vmemclient.__version__) - - -CONCERTO_SUPPORTED_VERSION_PATTERNS = ['Version 7.[0-9].?[0-9]?'] -CONCERTO_DEFAULT_PRIORITY = 'medium' -CONCERTO_DEFAULT_SRA_POLICY = 'preserveAll' -CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION = True -CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD = 50 -CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT = '1024MB' -CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE = None -CONCERTO_DEFAULT_SRA_ENABLE_SHRINK = False -CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS = 1000 -CONCERTO_DEFAULT_POLICY_RETENTION_MODE = 'All' -CONCERTO_LUN_TYPE_THICK = 'THICK' -LUN_ALLOC_SZ = 10 - -violin_opts = [ - cfg.IntOpt('violin_request_timeout', - default=300, - help='Global backend request timeout, in seconds.'), - cfg.ListOpt('violin_dedup_only_pools', - default=[], - help='Storage pools to be used to setup dedup luns only.' - '(Comma separated list)'), - cfg.ListOpt('violin_dedup_capable_pools', - default=[], - help='Storage pools capable of dedup and other luns.' - '(Comma separated list)'), - cfg.StrOpt('violin_pool_allocation_method', - default='random', - choices=['random', 'largest', 'smallest'], - help='Method of choosing a storage pool for a lun.'), - cfg.ListOpt('violin_iscsi_target_ips', - default=[], - help='Target iSCSI addresses to use.' - '(Comma separated list)'), -] - -CONF = cfg.CONF -CONF.register_opts(violin_opts, group=configuration.SHARED_CONF_GROUP) - - -class V7000Common(object): - """Contains common code for the Violin V7000 drivers.""" - - def __init__(self, config): - self.vmem_mg = None - self.container = "" - self.config = config - - def do_setup(self, context): - """Any initialization the driver does while starting.""" - if not self.config.san_ip: - raise exception.InvalidInput( - reason=_('Gateway VIP is not set')) - - if vmemclient: - self.vmem_mg = vmemclient.open(self.config.san_ip, - self.config.san_login, - self.config.san_password, - keepalive=True) - - if self.vmem_mg is None: - msg = _('Failed to connect to array') - raise exception.VolumeBackendAPIException(data=msg) - - if self.vmem_mg.utility.is_external_head: - # With an external storage pool configuration is a must - if (self.config.violin_dedup_only_pools == [] and - self.config.violin_dedup_capable_pools == []): - - LOG.warning("Storage pools not configured.") - raise exception.InvalidInput( - reason=_('Storage pool configuration is ' - 'mandatory for external head')) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - if vmemclient is None: - msg = _('vmemclient python library not found') - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info("CONCERTO version: %s", self.vmem_mg.version) - - if not self._is_supported_vmos_version(self.vmem_mg.version): - msg = _('CONCERTO version is not supported') - raise exception.ViolinInvalidBackendConfig(reason=msg) - - @utils.synchronized('vmem-lun') - def _create_lun(self, volume): - """Creates a new lun. - - :param volume: volume object provided by the Manager - """ - spec_dict = {} - selected_pool = {} - - size_mb = volume['size'] * units.Ki - full_size_mb = size_mb - - LOG.debug("Creating LUN %(name)s, %(size)s MB.", - {'name': volume['name'], 'size': size_mb}) - - spec_dict = self._process_extra_specs(volume) - - try: - selected_pool = self._get_storage_pool( - volume, - size_mb, - spec_dict['pool_type'], - "create_lun") - - except exception.ViolinResourceNotFound: - raise - except Exception: - msg = _('No suitable storage pool found') - LOG.exception(msg) - raise exception.ViolinBackendErr(message=msg) - - try: - # Note: In the following create_lun command for setting up a dedup - # or thin lun the size_mb parameter is ignored and 10% of the - # full_size_mb specified is the size actually allocated to - # the lun. full_size_mb is the size the lun is allowed to - # grow. On the other hand, if it is a thick lun, the - # full_size_mb is ignored and size_mb is the actual - # allocated size of the lun. - - self._send_cmd(self.vmem_mg.lun.create_lun, - "Create resource successfully.", - volume['id'], - spec_dict['size_mb'], - selected_pool['dedup'], - selected_pool['thin'], - full_size_mb, - storage_pool_id=selected_pool['storage_pool_id']) - - except exception.ViolinBackendErrExists: - LOG.debug("Lun %s already exists, continuing.", volume['id']) - - except Exception: - LOG.exception("Lun create for %s failed!", volume['id']) - raise - - @utils.synchronized('vmem-lun') - def _delete_lun(self, volume): - """Deletes a lun. - - :param volume: volume object provided by the Manager - """ - success_msgs = ['Delete resource successfully', ''] - - LOG.debug("Deleting lun %s.", volume['id']) - - # If the LUN has ever had a snapshot, it has an SRA and - # policy that must be deleted first. - self._delete_lun_snapshot_bookkeeping(volume['id']) - - try: - self._send_cmd(self.vmem_mg.lun.delete_lun, - success_msgs, volume['id']) - - except vmemclient.core.error.NoMatchingObjectIdError: - LOG.debug("Lun %s already deleted, continuing.", volume['id']) - - except exception.ViolinBackendErrExists: - LOG.exception("Lun %s has dependent snapshots, " - "skipping lun deletion.", volume['id']) - raise exception.VolumeIsBusy(volume_name=volume['id']) - - except Exception: - LOG.exception("Lun delete for %s failed!", volume['id']) - raise - - def _extend_lun(self, volume, new_size): - """Extend an existing volume's size. - - :param volume: volume object provided by the Manager - :param new_size: new size in GB to be applied - """ - v = self.vmem_mg - - typeid = volume['volume_type_id'] - - if typeid and not self.vmem_mg.utility.is_external_head: - spec_value = self._get_volume_type_extra_spec(volume, "dedup") - if spec_value and spec_value.lower() == "true": - # A Dedup lun's size cannot be modified in Concerto. - msg = _('Dedup luns cannot be extended') - raise exception.VolumeDriverException(message=msg) - - size_mb = volume['size'] * units.Ki - new_size_mb = new_size * units.Ki - - # Concerto lun extend requires number of MB to increase size by, - # not the final size value. - # - delta_mb = new_size_mb - size_mb - - LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s MB.", - {'id': volume['id'], 'size': size_mb, - 'new_size': new_size_mb}) - - try: - self._send_cmd(v.lun.extend_lun, - "Expand resource successfully", - volume['id'], delta_mb) - - except Exception: - LOG.exception("LUN extend failed!") - raise - - def _create_lun_snapshot(self, snapshot): - """Create a new cinder snapshot on a volume. - - This maps onto a Concerto 'timemark', but we must always first - ensure that a snapshot resource area (SRA) exists, and that a - snapshot policy exists. - - :param snapshot: cinder snapshot object provided by the Manager - - :raises: - VolumeBackendAPIException: If SRA could not be created, or - snapshot policy could not be created - RequestRetryTimeout: If backend could not complete the request - within the allotted timeout. - ViolinBackendErr: If backend reports an error during the - create snapshot phase. - """ - - cinder_volume_id = snapshot['volume_id'] - cinder_snapshot_id = snapshot['id'] - - LOG.debug("Creating LUN snapshot %(snap_id)s on volume " - "%(vol_id)s %(dpy_name)s.", - {'snap_id': cinder_snapshot_id, - 'vol_id': cinder_volume_id, - 'dpy_name': snapshot['display_name']}) - - self._ensure_snapshot_resource_area(cinder_volume_id) - - self._ensure_snapshot_policy(cinder_volume_id) - - try: - self._send_cmd( - self.vmem_mg.snapshot.create_lun_snapshot, - "Create TimeMark successfully", - lun=cinder_volume_id, - comment=self._compress_snapshot_id(cinder_snapshot_id), - priority=CONCERTO_DEFAULT_PRIORITY, - enable_notification=False) - except Exception: - LOG.exception("Lun create snapshot for " - "volume %(vol)s snapshot %(snap)s failed!", - {'vol': cinder_volume_id, - 'snap': cinder_snapshot_id}) - raise - - def _delete_lun_snapshot(self, snapshot): - """Delete the specified cinder snapshot. - - :param snapshot: cinder snapshot object provided by the Manager - - :raises: - RequestRetryTimeout: If backend could not complete the request - within the allotted timeout. - ViolinBackendErr: If backend reports an error during the - delete snapshot phase. - """ - LOG.debug("Deleting snapshot %(snap_id)s on volume " - "%(vol_id)s %(dpy_name)s", - {'snap_id': snapshot['id'], - 'vol_id': snapshot['volume_id'], - 'dpy_name': snapshot['display_name']}) - - return self._wait_run_delete_lun_snapshot(snapshot) - - def _create_volume_from_snapshot(self, snapshot, volume): - """Create a new cinder volume from a given snapshot of a lun - - This maps onto a Concerto 'copy snapshot to lun'. Concerto - creates the lun and then copies the snapshot into it. - - :param snapshot: cinder snapshot object provided by the Manager - :param volume: cinder volume to be created - """ - cinder_volume_id = volume['id'] - cinder_snapshot_id = snapshot['id'] - size_mb = volume['size'] * units.Ki - result = None - spec_dict = {} - - LOG.debug("Copying snapshot %(snap_id)s onto volume %(vol_id)s " - "%(dpy_name)s", - {'snap_id': cinder_snapshot_id, - 'vol_id': cinder_volume_id, - 'dpy_name': snapshot['display_name']}) - - source_lun_info = self.vmem_mg.lun.get_lun_info(snapshot['volume_id']) - self._validate_lun_type_for_copy(source_lun_info['subType']) - - spec_dict = self._process_extra_specs(volume) - try: - selected_pool = self._get_storage_pool(volume, - size_mb, - spec_dict['pool_type'], - "create_lun") - - except exception.ViolinResourceNotFound: - raise - except Exception: - msg = _('No suitable storage pool found') - LOG.exception(msg) - raise exception.ViolinBackendErr(message=msg) - - try: - result = self.vmem_mg.lun.copy_snapshot_to_new_lun( - source_lun=snapshot['volume_id'], - source_snapshot_comment=self._compress_snapshot_id( - cinder_snapshot_id), - destination=cinder_volume_id, - storage_pool_id=selected_pool['storage_pool_id']) - - if not result['success']: - self._check_error_code(result) - - except Exception: - LOG.exception("Copy snapshot to volume for " - "snapshot %(snap)s volume %(vol)s failed!", - {'snap': cinder_snapshot_id, - 'vol': cinder_volume_id}) - raise - - # get the destination lun info and extract virtualdeviceid - info = self.vmem_mg.lun.get_lun_info(object_id=result['object_id']) - - self._wait_for_lun_or_snap_copy( - snapshot['volume_id'], dest_vdev_id=info['virtualDeviceID']) - - def _create_lun_from_lun(self, src_vol, dest_vol): - """Copy the contents of a lun to a new lun (i.e., full clone). - - :param src_vol: cinder volume to clone - :param dest_vol: cinder volume to be created - """ - size_mb = dest_vol['size'] * units.Ki - src_vol_mb = src_vol['size'] * units.Ki - result = None - spec_dict = {} - - LOG.debug("Copying lun %(src_vol_id)s onto lun %(dest_vol_id)s.", - {'src_vol_id': src_vol['id'], - 'dest_vol_id': dest_vol['id']}) - - try: - source_lun_info = self.vmem_mg.lun.get_lun_info(src_vol['id']) - self._validate_lun_type_for_copy(source_lun_info['subType']) - - # in order to do a full clone the source lun must have a - # snapshot resource - self._ensure_snapshot_resource_area(src_vol['id']) - - spec_dict = self._process_extra_specs(dest_vol) - selected_pool = self._get_storage_pool(dest_vol, - size_mb, - spec_dict['pool_type'], - None) - - result = self.vmem_mg.lun.copy_lun_to_new_lun( - source=src_vol['id'], destination=dest_vol['id'], - storage_pool_id=selected_pool['storage_pool_id']) - - if not result['success']: - self._check_error_code(result) - - except Exception: - LOG.exception("Create new lun from lun for source " - "%(src)s => destination %(dest)s failed!", - {'src': src_vol['id'], 'dest': dest_vol['id']}) - raise - - self._wait_for_lun_or_snap_copy( - src_vol['id'], dest_obj_id=result['object_id']) - - # extend the copied lun if requested size is larger then original - if size_mb > src_vol_mb: - # dest_vol size has to be set to reflect what it is currently - dest_vol_size = dest_vol['size'] - dest_vol['size'] = src_vol['size'] - self._extend_lun(dest_vol, dest_vol_size) - dest_vol['size'] = dest_vol_size - - def _send_cmd(self, request_func, success_msgs, *args, **kwargs): - """Run an XG request function, and retry as needed. - - The request will be retried until it returns a success - message, a failure message, or the global request timeout is - hit. - - This wrapper is meant to deal with backend requests that can - fail for any variety of reasons, for instance, when the system - is already busy handling other LUN requests. If there is no - space left, or other "fatal" errors are returned (see - _fatal_error_code() for a list of all known error conditions). - - :param request_func: XG api method to call - :param success_msgs: Success messages expected from the backend - :param *args: argument array to be passed to the request_func - :param **kwargs: argument dictionary to be passed to request_func - :returns: the response dict from the last XG call - """ - resp = {} - start = time.time() - done = False - - if isinstance(success_msgs, six.string_types): - success_msgs = [success_msgs] - - while not done: - if time.time() - start >= self.config.violin_request_timeout: - raise exception.ViolinRequestRetryTimeout( - timeout=self.config.violin_request_timeout) - - resp = request_func(*args, **kwargs) - - if not resp['msg']: - # XG requests will return None for a message if no message - # string is passed in the raw response - resp['msg'] = '' - - for msg in success_msgs: - if resp['success'] and msg in resp['msg']: - done = True - break - - if not resp['success']: - self._check_error_code(resp) - done = True - break - - return resp - - def _send_cmd_and_verify(self, request_func, verify_func, - request_success_msgs='', rargs=None, vargs=None): - """Run an XG request function, retry if needed, and verify success. - - If the verification fails, then retry the request/verify cycle - until both functions are successful, the request function - returns a failure message, or the global request timeout is - hit. - - This wrapper is meant to deal with backend requests that can - fail for any variety of reasons, for instance, when the system - is already busy handling other LUN requests. It is also smart - enough to give up if clustering is down (eg no HA available), - there is no space left, or other "fatal" errors are returned - (see _fatal_error_code() for a list of all known error - conditions). - - :param request_func: XG api method to call - :param verify_func: function call to verify request was completed - :param request_success_msg: Success message expected for request_func - :param *rargs: argument array to be passed to request_func - :param *vargs: argument array to be passed to verify_func - :returns: the response dict from the last XG call - """ - resp = {} - start = time.time() - request_needed = True - verify_needed = True - - if isinstance(request_success_msgs, six.string_types): - request_success_msgs = [request_success_msgs] - - rargs = rargs if rargs else [] - vargs = vargs if vargs else [] - - while request_needed or verify_needed: - if time.time() - start >= self.config.violin_request_timeout: - raise exception.ViolinRequestRetryTimeout( - timeout=self.config.violin_request_timeout) - - if request_needed: - resp = request_func(*rargs) - - if not resp['msg']: - # XG requests will return None for a message if no message - # string is passed in the raw response - resp['msg'] = '' - - for msg in request_success_msgs: - if resp['success'] and msg in resp['msg']: - request_needed = False - break - - if not resp['success']: - self._check_error_code(resp) - request_needed = False - - elif verify_needed: - success = verify_func(*vargs) - if success: - # XG verify func was completed - verify_needed = False - - return resp - - def _ensure_snapshot_resource_area(self, volume_id): - """Make sure concerto snapshot resource area exists on volume. - - :param volume_id: Cinder volume ID corresponding to the backend LUN - - :raises: - VolumeBackendAPIException: if cinder volume does not exist - on backnd, or SRA could not be created. - """ - ctxt = context.get_admin_context() - volume = api.volume_get(ctxt, volume_id) - spec_dict = {} - - if not volume: - msg = (_("Failed to ensure snapshot resource area, could not " - "locate volume for id %s") % volume_id) - raise exception.VolumeBackendAPIException(data=msg) - - if not self.vmem_mg.snapshot.lun_has_a_snapshot_resource( - lun=volume_id): - # Per Concerto documentation, the SRA size should be computed - # as follows - # Size-of-original-LUN Reserve for SRA - # < 500MB 100% - # 500MB to 2G 50% - # >= 2G 20% - # Note: cinder volume.size is in GB, vmemclient wants MB. - lun_size_mb = volume['size'] * units.Ki - if lun_size_mb < 500: - snap_size_mb = lun_size_mb - elif lun_size_mb < 2000: - snap_size_mb = 0.5 * lun_size_mb - else: - snap_size_mb = 0.2 * lun_size_mb - - snap_size_mb = int(math.ceil(snap_size_mb)) - - spec_dict = self._process_extra_specs(volume) - - try: - selected_pool = self._get_storage_pool( - volume, - snap_size_mb, - spec_dict['pool_type'], - None) - - LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB " - "on %(vol_id)s", - {'ssmb': snap_size_mb, - 'lsmb': lun_size_mb, - 'vol_id': volume_id}) - - except exception.ViolinResourceNotFound: - raise - except Exception: - msg = _('No suitable storage pool found') - LOG.exception(msg) - raise exception.ViolinBackendErr(message=msg) - - res = self.vmem_mg.snapshot.create_snapshot_resource( - lun=volume_id, - size=snap_size_mb, - enable_notification=False, - policy=CONCERTO_DEFAULT_SRA_POLICY, - enable_expansion=CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION, - expansion_threshold=CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD, - expansion_increment=CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT, - expansion_max_size=CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE, - enable_shrink=CONCERTO_DEFAULT_SRA_ENABLE_SHRINK, - storage_pool_id=selected_pool['storage_pool_id']) - - if (not res['success']): - msg = (_("Failed to create snapshot resource area on " - "volume %(vol)s: %(res)s.") % - {'vol': volume_id, 'res': res['msg']}) - raise exception.VolumeBackendAPIException(data=msg) - - def _ensure_snapshot_policy(self, volume_id): - """Ensure concerto snapshot policy exists on cinder volume. - - A snapshot policy is required by concerto in order to create snapshots. - - :param volume_id: Cinder volume ID corresponding to the backend LUN - - :raises: - VolumeBackendAPIException: when snapshot policy cannot be created. - """ - if not self.vmem_mg.snapshot.lun_has_a_snapshot_policy( - lun=volume_id): - - res = self.vmem_mg.snapshot.create_snapshot_policy( - lun=volume_id, - max_snapshots=CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS, - enable_replication=False, - enable_snapshot_schedule=False, - enable_cdp=False, - retention_mode=CONCERTO_DEFAULT_POLICY_RETENTION_MODE) - - if not res['success']: - msg = (_( - "Failed to create snapshot policy on " - "volume %(vol)s: %(res)s.") % - {'vol': volume_id, 'res': res['msg']}) - raise exception.VolumeBackendAPIException(data=msg) - - def _delete_lun_snapshot_bookkeeping(self, volume_id): - """Clear residual snapshot support resources from LUN. - - :raises: - VolumeBackendAPIException: If snapshots still exist on the LUN. - """ - - # Make absolutely sure there are no snapshots present - try: - snaps = self.vmem_mg.snapshot.get_snapshots(volume_id) - if len(snaps) > 0: - msg = (_("Cannot delete LUN %s while snapshots exist.") % - volume_id) - raise exception.VolumeBackendAPIException(data=msg) - except vmemclient.core.error.NoMatchingObjectIdError: - pass - except vmemclient.core.error.MissingParameterError: - pass - - try: - res = self.vmem_mg.snapshot.delete_snapshot_policy( - lun=volume_id) - if not res['success']: - if 'TimeMark is disabled' in res['msg']: - LOG.debug("Verified no snapshot policy is on volume %s.", - volume_id) - else: - msg = (_("Unable to delete snapshot policy on " - "volume %s.") % volume_id) - raise exception.VolumeBackendAPIException(data=msg) - else: - LOG.debug("Deleted snapshot policy on volume " - "%(vol)s, result %(res)s.", - {'vol': volume_id, 'res': res}) - except vmemclient.core.error.NoMatchingObjectIdError: - LOG.debug("Verified no snapshot policy present on volume %s.", - volume_id) - pass - - try: - res = self.vmem_mg.snapshot.delete_snapshot_resource( - lun=volume_id) - LOG.debug("Deleted snapshot resource area on " - "volume %(vol)s, result %(res)s.", - {'vol': volume_id, 'res': res}) - except vmemclient.core.error.NoMatchingObjectIdError: - LOG.debug("Verified no snapshot resource area present on " - "volume %s.", volume_id) - pass - - def _compress_snapshot_id(self, cinder_snap_id): - """Compress cinder snapshot ID so it fits in backend. - - Compresses to fit in 32-chars. - """ - return ''.join(six.text_type(cinder_snap_id).split('-')) - - def _get_snapshot_from_lun_snapshots( - self, cinder_volume_id, cinder_snap_id): - """Locate backend snapshot dict associated with cinder snapshot id. - - :returns: Cinder snapshot dictionary if found, None otherwise. - """ - - try: - snaps = self.vmem_mg.snapshot.get_snapshots(cinder_volume_id) - except vmemclient.core.error.NoMatchingObjectIdError: - return None - - key = self._compress_snapshot_id(cinder_snap_id) - - for s in snaps: - if s['comment'] == key: - # Remap return dict to its uncompressed form - s['comment'] = cinder_snap_id - return s - - def _wait_for_lun_or_snap_copy(self, src_vol_id, dest_vdev_id=None, - dest_obj_id=None): - """Poll to see when a lun or snap copy to a lun is complete. - - :param src_vol_id: cinder volume ID of source volume - :param dest_vdev_id: virtual device ID of destination, for snap copy - :param dest_obj_id: lun object ID of destination, for lun copy - :returns: True if successful, False otherwise - """ - wait_id = None - wait_func = None - - if dest_vdev_id: - wait_id = dest_vdev_id - wait_func = self.vmem_mg.snapshot.get_snapshot_copy_status - elif dest_obj_id: - wait_id = dest_obj_id - wait_func = self.vmem_mg.lun.get_lun_copy_status - else: - return False - - def _loop_func(): - LOG.debug("Entering _wait_for_lun_or_snap_copy loop: " - "vdev=%s, objid=%s", dest_vdev_id, dest_obj_id) - - target_id, mb_copied, percent = wait_func(src_vol_id) - - if target_id is None: - # pre-copy transient result - LOG.debug("lun or snap copy prepping.") - pass - elif target_id != wait_id: - # the copy is complete, another lun is being copied - LOG.debug("lun or snap copy complete.") - raise loopingcall.LoopingCallDone(retvalue=True) - elif mb_copied is not None: - # copy is in progress - LOG.debug("MB copied: %(copied)d, percent done: %(percent)d.", - {'copied': mb_copied, 'percent': percent}) - pass - elif percent == 0: - # copy has just started - LOG.debug("lun or snap copy started.") - pass - elif percent == 100: - # copy is complete - LOG.debug("lun or snap copy complete.") - raise loopingcall.LoopingCallDone(retvalue=True) - else: - # unexpected case - LOG.debug("unexpected case (%(id)s, %(bytes)s, %(percent)s)", - {'id': target_id, - 'bytes': mb_copied, - 'percent': six.text_type(percent)}) - raise loopingcall.LoopingCallDone(retvalue=False) - - timer = loopingcall.FixedIntervalLoopingCall(_loop_func) - success = timer.start(interval=1).wait() - - return success - - def _is_supported_vmos_version(self, version_string): - """Check a version string for compatibility with OpenStack. - - Compare a version string against the global regex of versions - compatible with OpenStack. - - :param version_string: array's gateway version string - :returns: True if supported, false if not - """ - for pattern in CONCERTO_SUPPORTED_VERSION_PATTERNS: - if re.match(pattern, version_string): - return True - return False - - def _check_error_code(self, response): - """Raise an exception when backend returns certain errors. - - Error codes returned from the backend have to be examined - individually. Not all of them are fatal. For example, lun attach - failing becase the client is already attached is not a fatal error. - - :param response: a response dict result from the vmemclient request - """ - if "Error: 0x9001003c" in response['msg']: - # This error indicates a duplicate attempt to attach lun, - # non-fatal error - pass - elif "Error: 0x9002002b" in response['msg']: - # lun unexport failed - lun is not exported to any clients, - # non-fatal error - pass - elif "Error: 0x09010023" in response['msg']: - # lun delete failed - dependent snapshot copy in progress, - # fatal error - raise exception.ViolinBackendErr(message=response['msg']) - elif "Error: 0x09010048" in response['msg']: - # lun delete failed - dependent snapshots still exist, - # fatal error - raise exception.ViolinBackendErr(message=response['msg']) - elif "Error: 0x90010022" in response['msg']: - # lun create failed - lun with same name already exists, - # fatal error - raise exception.ViolinBackendErrExists() - elif "Error: 0x90010089" in response['msg']: - # lun export failed - lun is still being created as copy, - # fatal error - raise exception.ViolinBackendErr(message=response['msg']) - else: - # assume any other error is fatal - raise exception.ViolinBackendErr(message=response['msg']) - - def _get_volume_type_extra_spec(self, volume, spec_key): - """Parse data stored in a volume_type's extra_specs table. - - :param volume: volume object containing volume_type to query - :param spec_key: the metadata key to search for - :returns: string value associated with spec_key - """ - spec_value = None - ctxt = context.get_admin_context() - typeid = volume['volume_type_id'] - if typeid: - volume_type = volume_types.get_volume_type(ctxt, typeid) - volume_specs = volume_type.get('extra_specs') - for key, val in volume_specs.items(): - - # Strip the prefix "capabilities" - if ':' in key: - scope = key.split(':') - key = scope[1] - if key == spec_key: - spec_value = val - break - - return spec_value - - def _get_violin_extra_spec(self, volume, spec_key): - """Parse volume_type's extra_specs table for a violin-specific key. - - :param volume: volume object containing volume_type to query - :param spec_key: the metadata key to search for - :returns: string value associated with spec_key - """ - spec_value = None - ctxt = context.get_admin_context() - typeid = volume['volume_type_id'] - if typeid: - volume_type = volume_types.get_volume_type(ctxt, typeid) - volume_specs = volume_type.get('extra_specs') - for key, val in volume_specs.items(): - - # Strip the prefix "violin" - if ':' in key: - scope = key.split(':') - key = scope[1] - if scope[0] == "violin" and key == spec_key: - spec_value = val - break - return spec_value - - def _get_storage_pool(self, volume, size_in_mb, pool_type, usage): - # User-specified pool takes precedence over others - - pool = None - typeid = volume.get('volume_type_id') - if typeid: - # Extract the storage_pool name if one is specified - pool = self._get_violin_extra_spec(volume, "storage_pool") - - # Select a storage pool - selected_pool = self.vmem_mg.pool.select_storage_pool( - size_in_mb, - pool_type, - pool, - self.config.violin_dedup_only_pools, - self.config.violin_dedup_capable_pools, - self.config.violin_pool_allocation_method, - usage) - if selected_pool is None: - # Backend has not provided a suitable storage pool - msg = _("Backend does not have a suitable storage pool.") - raise exception.ViolinResourceNotFound(message=msg) - - LOG.debug("Storage pool returned is %s", - selected_pool['storage_pool']) - - return selected_pool - - def _process_extra_specs(self, volume): - spec_dict = {} - thin_lun = False - thick_lun = False - dedup = False - size_mb = volume['size'] * units.Ki - full_size_mb = size_mb - - if self.config.san_thin_provision: - thin_lun = True - # Set the actual allocation size for thin lun - # default here is 10% - size_mb = int(math.ceil(float(size_mb) / LUN_ALLOC_SZ)) - - typeid = volume.get('volume_type_id') - if typeid: - # extra_specs with thin specified overrides san_thin_provision - spec_value = self._get_volume_type_extra_spec(volume, "thin") - if not thin_lun and spec_value and spec_value.lower() == "true": - thin_lun = True - # Set the actual allocation size for thin lun - # default here is 10% - size_mb = int(math.ceil(float(size_mb) / LUN_ALLOC_SZ)) - - # Set thick lun before checking for dedup, - # since dedup is always thin - if not thin_lun: - thick_lun = True - - spec_value = self._get_volume_type_extra_spec(volume, "dedup") - if spec_value and spec_value.lower() == "true": - dedup = True - # A dedup lun is always a thin lun - thin_lun = True - thick_lun = False - # Set the actual allocation size for thin lun - # default here is 10%. The actual allocation may - # different, depending on other factors - size_mb = int(math.ceil(float(full_size_mb) / LUN_ALLOC_SZ)) - - if dedup: - spec_dict['pool_type'] = "dedup" - elif thin_lun: - spec_dict['pool_type'] = "thin" - else: - spec_dict['pool_type'] = "thick" - thick_lun = True - - spec_dict['size_mb'] = size_mb - spec_dict['thick'] = thick_lun - spec_dict['thin'] = thin_lun - spec_dict['dedup'] = dedup - - return spec_dict - - def _get_volume_stats(self, san_ip): - """Gathers array stats and converts them to GB values.""" - free_gb = 0 - total_gb = 0 - - owner = socket.getfqdn(san_ip) - # Store DNS lookups to prevent asking the same question repeatedly - owner_lookup = {san_ip: owner} - pools = self.vmem_mg.pool.get_storage_pools( - verify=True, - include_full_info=True, - ) - - for short_info, full_info in pools: - mod = '' - pool_free_mb = 0 - pool_total_mb = 0 - for dev in full_info.get('physicaldevices', []): - if dev['owner'] not in owner_lookup: - owner_lookup[dev['owner']] = socket.getfqdn(dev['owner']) - if owner_lookup[dev['owner']] == owner: - pool_free_mb += dev['availsize_mb'] - pool_total_mb += dev['size_mb'] - elif not mod: - mod = ' *' - LOG.debug('pool %(pool)s: %(avail)s / %(total)s MB free %(mod)s', - {'pool': short_info['name'], 'avail': pool_free_mb, - 'total': pool_total_mb, 'mod': mod}) - free_gb += int(pool_free_mb / units.Ki) - total_gb += int(pool_total_mb / units.Ki) - - data = { - 'vendor_name': 'Violin Memory, Inc.', - 'reserved_percentage': self.config.reserved_percentage, - 'QoS_support': False, - 'free_capacity_gb': free_gb, - 'total_capacity_gb': total_gb, - 'consistencygroup_support': False, - } - - return data - - def _wait_run_delete_lun_snapshot(self, snapshot): - """Run and wait for LUN snapshot to complete. - - :param snapshot -- cinder snapshot object provided by the Manager - """ - cinder_volume_id = snapshot['volume_id'] - cinder_snapshot_id = snapshot['id'] - - comment = self._compress_snapshot_id(cinder_snapshot_id) - oid = self.vmem_mg.snapshot.snapshot_comment_to_object_id( - cinder_volume_id, comment) - - def _loop_func(): - LOG.debug("Entering _wait_run_delete_lun_snapshot loop: " - "vol=%(vol)s, snap_id=%(snap_id)s, oid=%(oid)s", - {'vol': cinder_volume_id, - 'oid': oid, - 'snap_id': cinder_snapshot_id}) - - try: - ans = self.vmem_mg.snapshot.delete_lun_snapshot( - snapshot_object_id=oid) - except Exception: - msg = (_("Failed to delete snapshot " - "%(snap)s of volume %(vol)s") % - {'snap': cinder_snapshot_id, 'vol': cinder_volume_id}) - raise exception.ViolinBackendErr(msg) - - if ans['success']: - LOG.debug("Delete snapshot %(snap_id)s of %(vol)s: " - "success", {'vol': cinder_volume_id, - 'snap_id': cinder_snapshot_id}) - raise loopingcall.LoopingCallDone(retvalue=True) - else: - LOG.warning("Delete snapshot %(snap)s of %(vol)s " - "encountered temporary error: %(msg)s", - {'snap': cinder_snapshot_id, - 'vol': cinder_volume_id, - 'msg': ans['msg']}) - - timer = loopingcall.FixedIntervalLoopingCall(_loop_func) - success = timer.start(interval=1).wait() - - return success - - def _validate_lun_type_for_copy(self, lun_type): - """Make sure volume type is thick. - - :param lun_type: Cinder volume type - - :raises: - VolumeBackendAPIException: if volume type is not thick, - copying the lun is not possible. - """ - if lun_type != CONCERTO_LUN_TYPE_THICK: - msg = _('Lun copy currently only supported for thick luns') - LOG.error(msg) - raise exception.ViolinBackendErr(message=msg) diff --git a/cinder/volume/drivers/violin/v7000_fcp.py b/cinder/volume/drivers/violin/v7000_fcp.py deleted file mode 100644 index e987b6cfd..000000000 --- a/cinder/volume/drivers/violin/v7000_fcp.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright 2015 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Violin 7000 Series All-Flash Array Volume Driver - -Provides fibre channel specific LUN services for V7000 series flash -arrays. - -This driver requires Concerto v7.0.0 or newer software on the array. - -You will need to install the Violin Memory REST client library: -sudo pip install vmemclient - -Set the following in the cinder.conf file to enable the VMEM V7000 -Fibre Channel Driver along with the required flags: - -volume_driver=cinder.volume.drivers.violin.v7000_fcp.V7000FCDriver - -NOTE: this driver file requires the use of synchronization points for -certain types of backend operations, and as a result may not work -properly in an active-active HA configuration. See OpenStack Cinder -driver documentation for more information. -""" - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume.drivers.violin import v7000_common -from cinder.zonemanager import utils as fczm_utils - -import socket - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class V7000FCPDriver(driver.FibreChannelDriver): - """Executes commands relating to fibre channel based Violin Memory arrays. - - Version history: - 1.0 - Initial driver - """ - - VERSION = '1.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Violin_Memory_CI" - - # TODO(smcginnis) Either remove this if CI requirements are met, or - # remove this driver in the Queens release per normal deprecation - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(V7000FCPDriver, self).__init__(*args, **kwargs) - self.gateway_fc_wwns = [] - self.stats = {} - self.configuration.append_config_values(v7000_common.violin_opts) - self.configuration.append_config_values(san.san_opts) - self.common = v7000_common.V7000Common(self.configuration) - self.lookup_service = fczm_utils.create_lookup_service() - - LOG.info("Initialized driver %(name)s version: %(vers)s", - {'name': self.__class__.__name__, 'vers': self.VERSION}) - - def do_setup(self, context): - """Any initialization the driver does while starting.""" - super(V7000FCPDriver, self).do_setup(context) - - self.common.do_setup(context) - self.gateway_fc_wwns = self._get_active_fc_targets() - - # Register the client with the storage array - fc_version = self.VERSION + "-FCP" - self.common.vmem_mg.utility.set_managed_by_openstack_version( - fc_version) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - self.common.check_for_setup_error() - if len(self.gateway_fc_wwns) == 0: - raise exception.ViolinInvalidBackendConfig( - reason=_('No FCP targets found')) - - def create_volume(self, volume): - """Creates a volume.""" - self.common._create_lun(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common._create_volume_from_snapshot(snapshot, volume) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - self.common._create_lun_from_lun(src_vref, volume) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common._delete_lun(volume) - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - self.common._extend_lun(volume, new_size) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common._create_lun_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common._delete_lun_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Synchronously checks and re-exports volumes at cinder start time.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - - LOG.debug("Initialize_connection: initiator - %(initiator)s host - " - "%(host)s wwpns - %(wwpns)s", - {'initiator': connector['initiator'], - 'host': connector['host'], - 'wwpns': connector['wwpns']}) - - self.common.vmem_mg.client.create_client( - name=connector['host'], proto='FC', fc_wwns=connector['wwpns']) - - lun_id = self._export_lun(volume, connector) - - target_wwns, init_targ_map = self._build_initiator_target_map( - connector) - - properties = {} - properties['target_discovered'] = True - properties['target_wwn'] = target_wwns - properties['target_lun'] = lun_id - properties['initiator_target_map'] = init_targ_map - - LOG.debug("Return FC data for zone addition: %(properties)s.", - {'properties': properties}) - - return {'driver_volume_type': 'fibre_channel', 'data': properties} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - """Terminates the connection (target<-->initiator).""" - - self._unexport_lun(volume, connector) - - properties = {} - - if not self._is_initiator_connected_to_array(connector): - target_wwns, init_targ_map = self._build_initiator_target_map( - connector) - properties['target_wwn'] = target_wwns - properties['initiator_target_map'] = init_targ_map - - LOG.debug("Return FC data for zone deletion: %(properties)s.", - {'properties': properties}) - - return {'driver_volume_type': 'fibre_channel', 'data': properties} - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, update the stats first. - """ - if refresh or not self.stats: - self._update_volume_stats() - return self.stats - - @utils.synchronized('vmem-export') - def _export_lun(self, volume, connector=None): - """Generates the export configuration for the given volume. - - :param volume: volume object provided by the Manager - :param connector: connector object provided by the Manager - :returns: the LUN ID assigned by the backend - """ - lun_id = '' - v = self.common.vmem_mg - - if not connector: - raise exception.ViolinInvalidBackendConfig( - reason=_('No initiators found, cannot proceed')) - - LOG.debug("Exporting lun %(vol_id)s - initiator wwpns %(i_wwpns)s " - "- target wwpns %(t_wwpns)s.", - {'vol_id': volume['id'], 'i_wwpns': connector['wwpns'], - 't_wwpns': self.gateway_fc_wwns}) - - try: - lun_id = self.common._send_cmd_and_verify( - v.lun.assign_lun_to_client, - self._is_lun_id_ready, - "Assign SAN client successfully", - [volume['id'], connector['host'], - "ReadWrite"], - [volume['id'], connector['host']]) - - except exception.ViolinBackendErr: - LOG.exception("Backend returned err for lun export.") - raise - - except Exception: - raise exception.ViolinInvalidBackendConfig( - reason=_('LUN export failed!')) - - lun_id = self._get_lun_id(volume['id'], connector['host']) - LOG.info("Exported lun %(vol_id)s on lun_id %(lun_id)s.", - {'vol_id': volume['id'], 'lun_id': lun_id}) - - return lun_id - - @utils.synchronized('vmem-export') - def _unexport_lun(self, volume, connector=None): - """Removes the export configuration for the given volume. - - :param volume: volume object provided by the Manager - """ - v = self.common.vmem_mg - - LOG.info("Unexporting lun %s.", volume['id']) - - try: - self.common._send_cmd(v.lun.unassign_client_lun, - "Unassign SAN client successfully", - volume['id'], connector['host'], True) - - except exception.ViolinBackendErr: - LOG.exception("Backend returned err for lun export.") - raise - - except Exception: - LOG.exception("LUN unexport failed!") - raise - - def _update_volume_stats(self): - """Gathers array stats and converts them to GB values.""" - data = {} - total_gb = 0 - free_gb = 0 - v = self.common.vmem_mg.basic - array_name_triple = socket.gethostbyaddr(self.configuration.san_ip) - array_name = array_name_triple[0] - - phy_devices = v.get("/batch/physicalresource/physicaldevice") - - all_devices = [x for x in phy_devices['data']['physical_devices']] - - for x in all_devices: - if socket.getfqdn(x['owner']) == array_name: - total_gb += x['size_mb'] // 1024 - free_gb += x['availsize_mb'] // 1024 - - backend_name = self.configuration.volume_backend_name - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['vendor_name'] = 'Violin Memory, Inc.' - data['driver_version'] = self.VERSION - data['storage_protocol'] = 'fibre_channel' - data['reserved_percentage'] = 0 - data['QoS_support'] = False - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - for i in data: - LOG.debug("stat update: %(name)s=%(data)s", - {'name': i, 'data': data[i]}) - - self.stats = data - - def _get_active_fc_targets(self): - """Get a list of gateway WWNs that can be used as FCP targets. - - :param mg_conn: active XG connection to one of the gateways - :returns: list of WWNs in openstack format - """ - v = self.common.vmem_mg - active_gw_fcp_wwns = [] - - fc_info = v.adapter.get_fc_info() - for x in fc_info.values(): - active_gw_fcp_wwns.append(x[0]) - - return active_gw_fcp_wwns - - def _get_lun_id(self, volume_name, client_name): - """Get the lun ID for an exported volume. - - If the lun is successfully assigned (exported) to a client, the - client info has the lun_id. - - :param volume_name: name of volume to query for lun ID - :param client_name: name of client associated with the volume - :returns: integer value of lun ID - """ - v = self.common.vmem_mg - lun_id = None - - client_info = v.client.get_client_info(client_name) - - for x in client_info['FibreChannelDevices']: - if volume_name == x['name']: - lun_id = x['lun'] - break - - if lun_id: - lun_id = int(lun_id) - - return lun_id - - def _is_lun_id_ready(self, volume_name, client_name): - """Get the lun ID for an exported volume. - - If the lun is successfully assigned (exported) to a client, the - client info has the lun_id. - - :param volume_name: name of volume to query for lun ID - :param client_name: name of client associated with the volume - :returns: Returns True if lun is ready, False otherwise - """ - - lun_id = -1 - lun_id = self._get_lun_id(volume_name, client_name) - if lun_id is None: - return False - else: - return True - - def _build_initiator_target_map(self, connector): - """Build the target_wwns and the initiator target map.""" - target_wwns = [] - init_targ_map = {} - - if self.lookup_service: - dev_map = self.lookup_service.get_device_mapping_from_network( - connector['wwpns'], self.gateway_fc_wwns) - - for fabric_name in dev_map: - fabric = dev_map[fabric_name] - target_wwns += fabric['target_port_wwn_list'] - for initiator in fabric['initiator_port_wwn_list']: - if initiator not in init_targ_map: - init_targ_map[initiator] = [] - init_targ_map[initiator] += fabric['target_port_wwn_list'] - init_targ_map[initiator] = list( - set(init_targ_map[initiator])) - - target_wwns = list(set(target_wwns)) - - else: - initiator_wwns = connector['wwpns'] - target_wwns = self.gateway_fc_wwns - for initiator in initiator_wwns: - init_targ_map[initiator] = target_wwns - - return target_wwns, init_targ_map - - def _is_initiator_connected_to_array(self, connector): - """Check if any initiator wwns still have active sessions.""" - v = self.common.vmem_mg - - client = v.client.get_client_info(connector['host']) - - if len(client['FibreChannelDevices']): - # each entry in the FibreChannelDevices array is a dict - # describing an active lun assignment - return True - return False diff --git a/cinder/volume/drivers/violin/v7000_iscsi.py b/cinder/volume/drivers/violin/v7000_iscsi.py deleted file mode 100644 index f64455ddb..000000000 --- a/cinder/volume/drivers/violin/v7000_iscsi.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright 2016 Violin Memory, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Violin 7000 Series All-Flash Array iSCSI Volume Driver - -Provides ISCSI specific LUN services for V7000 series flash arrays. - -This driver requires Concerto v7.5.4 or newer software on the array. - -You will need to install the python VMEM REST client: -sudo pip install vmemclient - -Set the following in the cinder.conf file to enable the VMEM V7000 -ISCSI Driver along with the required flags: - -volume_driver=cinder.volume.drivers.violin.v7000_iscsi.V7000ISCSIDriver -""" - -import random -import uuid - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume.drivers.violin import v7000_common - -LOG = logging.getLogger(__name__) - - -@interface.volumedriver -class V7000ISCSIDriver(driver.ISCSIDriver): - """Executes commands relating to iscsi based Violin Memory arrays. - - Version history: - 1.0 - Initial driver - """ - - VERSION = '1.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Violin_Memory_CI" - - # TODO(smcginnis) Either remove this if CI requirements are met, or - # remove this driver in the Queens release per normal deprecation - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(V7000ISCSIDriver, self).__init__(*args, **kwargs) - self.stats = {} - self.gateway_iscsi_ip_addresses = [] - self.configuration.append_config_values(v7000_common.violin_opts) - self.configuration.append_config_values(san.san_opts) - self.common = v7000_common.V7000Common(self.configuration) - - LOG.info("Initialized driver %(name)s version: %(vers)s", - {'name': self.__class__.__name__, 'vers': self.VERSION}) - - def do_setup(self, context): - """Any initialization the driver does while starting.""" - super(V7000ISCSIDriver, self).do_setup(context) - - self.common.do_setup(context) - - # Register the client with the storage array - iscsi_version = self.VERSION + "-ISCSI" - self.common.vmem_mg.utility.set_managed_by_openstack_version( - iscsi_version, protocol="iSCSI") - - # Getting iscsi IPs from the array is incredibly expensive, - # so only do it once. - if not self.configuration.violin_iscsi_target_ips: - LOG.warning("iSCSI target ip addresses not configured.") - self.gateway_iscsi_ip_addresses = ( - self.common.vmem_mg.utility.get_iscsi_interfaces()) - else: - self.gateway_iscsi_ip_addresses = ( - self.configuration.violin_iscsi_target_ips) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met.""" - self.common.check_for_setup_error() - if len(self.gateway_iscsi_ip_addresses) == 0: - msg = _('No iSCSI IPs configured on SAN gateway') - raise exception.ViolinInvalidBackendConfig(reason=msg) - - def create_volume(self, volume): - """Creates a volume.""" - self.common._create_lun(volume) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - self.common._create_volume_from_snapshot(snapshot, volume) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - self.common._create_lun_from_lun(src_vref, volume) - - def delete_volume(self, volume): - """Deletes a volume.""" - self.common._delete_lun(volume) - - def extend_volume(self, volume, new_size): - """Extend an existing volume's size.""" - self.common._extend_lun(volume, new_size) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - self.common._create_lun_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - self.common._delete_lun_snapshot(snapshot) - - def ensure_export(self, context, volume): - """Synchronously checks and re-exports volumes at cinder start time.""" - pass - - def create_export(self, context, volume, connector): - """Exports the volume.""" - pass - - def remove_export(self, context, volume): - """Removes an export for a logical volume.""" - pass - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - resp = {} - - LOG.debug("Initialize_connection: initiator - %(initiator)s host - " - "%(host)s ip - %(ip)s", - {'initiator': connector['initiator'], - 'host': connector['host'], - 'ip': connector['ip']}) - - iqn = self._get_iqn(connector) - - # pick a random single target to give the connector since - # there is no multipathing support - tgt = self.gateway_iscsi_ip_addresses[random.randint( - 0, len(self.gateway_iscsi_ip_addresses) - 1)] - - resp = self.common.vmem_mg.client.create_client( - name=connector['host'], proto='iSCSI', - iscsi_iqns=connector['initiator']) - - # raise if we failed for any reason other than a 'client - # already exists' error code - if not resp['success'] and 'Error: 0x900100cd' not in resp['msg']: - msg = _("Failed to create iscsi client") - raise exception.ViolinBackendErr(message=msg) - - resp = self.common.vmem_mg.client.create_iscsi_target( - name=iqn, client_name=connector['host'], - ip=self.gateway_iscsi_ip_addresses, access_mode='ReadWrite') - - # same here, raise for any failure other than a 'target - # already exists' error code - if not resp['success'] and 'Error: 0x09024309' not in resp['msg']: - msg = _("Failed to create iscsi target") - raise exception.ViolinBackendErr(message=msg) - - lun_id = self._export_lun(volume, iqn, connector) - - properties = {} - properties['target_discovered'] = False - properties['target_iqn'] = iqn - properties['target_portal'] = '%s:%s' % (tgt, '3260') - properties['target_lun'] = lun_id - properties['volume_id'] = volume['id'] - - LOG.debug("Return ISCSI data: %(properties)s.", - {'properties': properties}) - - return {'driver_volume_type': 'iscsi', 'data': properties} - - def terminate_connection(self, volume, connector, **kwargs): - """Terminates the connection (target<-->initiator).""" - iqn = self._get_iqn(connector) - self._unexport_lun(volume, iqn, connector) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, update the stats first. - """ - if refresh or not self.stats: - self._update_volume_stats() - return self.stats - - def _update_volume_stats(self): - """Gathers array stats and converts them to GB values.""" - data = self.common._get_volume_stats(self.configuration.san_ip) - - backend_name = self.configuration.volume_backend_name - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['driver_version'] = self.VERSION - data['storage_protocol'] = 'iSCSI' - for i in data: - LOG.debug("stat update: %(name)s=%(data)s", - {'name': i, 'data': data[i]}) - - self.stats = data - - def _export_lun(self, volume, target, connector): - """Generates the export configuration for the given volume. - - :param volume: volume object provided by the Manager - :param connector: connector object provided by the Manager - :returns: the LUN ID assigned by the backend - """ - lun_id = '' - v = self.common.vmem_mg - - LOG.debug("Exporting lun %(vol_id)s - initiator iqns %(i_iqns)s " - "- target iqns %(t_iqns)s.", - {'vol_id': volume['id'], 'i_iqns': connector['initiator'], - 't_iqns': self.gateway_iscsi_ip_addresses}) - - try: - lun_id = self.common._send_cmd_and_verify( - v.lun.assign_lun_to_iscsi_target, - self._is_lun_id_ready, - "Assign device successfully", - [volume['id'], target], - [volume['id'], connector['host']]) - - except exception.ViolinBackendErr: - LOG.exception("Backend returned error for lun export.") - raise - - except Exception: - raise exception.ViolinInvalidBackendConfig( - reason=_('LUN export failed!')) - - lun_id = self._get_lun_id(volume['id'], connector['host']) - LOG.info("Exported lun %(vol_id)s on lun_id %(lun_id)s.", - {'vol_id': volume['id'], 'lun_id': lun_id}) - - return lun_id - - def _unexport_lun(self, volume, target, connector): - """Removes the export configuration for the given volume. - - The equivalent CLI command is "no lun export container - name " - - Arguments: - volume -- volume object provided by the Manager - """ - v = self.common.vmem_mg - - LOG.info("Unexporting lun %(vol)s host is %(host)s.", - {'vol': volume['id'], 'host': connector['host']}) - - try: - self.common._send_cmd(v.lun.unassign_lun_from_iscsi_target, - "Unassign device successfully", - volume['id'], target, True) - - except exception.ViolinBackendErrNotFound: - LOG.info("Lun %s already unexported, continuing...", - volume['id']) - - except Exception: - LOG.exception("LUN unexport failed!") - msg = _("LUN unexport failed") - raise exception.ViolinBackendErr(message=msg) - - def _is_lun_id_ready(self, volume_name, client_name): - """Get the lun ID for an exported volume. - - If the lun is successfully assigned (exported) to a client, the - client info has the lun_id. - - Note: The structure returned for iscsi is different from the - one returned for FC. Therefore this function is here instead of - common. - - Arguments: - volume_name -- name of volume to query for lun ID - client_name -- name of client associated with the volume - - Returns: - lun_id -- Returns True or False - """ - - lun_id = -1 - lun_id = self._get_lun_id(volume_name, client_name) - - if lun_id is None: - return False - else: - return True - - def _get_lun_id(self, volume_name, client_name): - """Get the lun ID for an exported volume. - - If the lun is successfully assigned (exported) to a client, the - client info has the lun_id. - - Note: The structure returned for iscsi is different from the - one returned for FC. Therefore this function is here instead of - common. - - Arguments: - volume_name -- name of volume to query for lun ID - client_name -- name of client associated with the volume - - Returns: - lun_id -- integer value of lun ID - """ - v = self.common.vmem_mg - lun_id = None - - client_info = v.client.get_client_info(client_name) - - for x in client_info['ISCSIDevices']: - if volume_name == x['name']: - lun_id = x['lun'] - break - - if lun_id: - lun_id = int(lun_id) - - return lun_id - - def _get_iqn(self, connector): - # The vmemclient connection properties list hostname field may - # change depending on failover cluster config. Use a UUID - # from the backend's IP address to avoid a potential naming - # issue. - host_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, self.configuration.san_ip) - return "%s%s.%s" % (self.configuration.iscsi_target_prefix, - connector['host'], host_uuid) diff --git a/cinder/volume/drivers/vmware/__init__.py b/cinder/volume/drivers/vmware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/vmware/datastore.py b/cinder/volume/drivers/vmware/datastore.py deleted file mode 100644 index 33cb3cb56..000000000 --- a/cinder/volume/drivers/vmware/datastore.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Classes and utility methods for datastore selection. -""" - -import random - -from oslo_log import log as logging -from oslo_vmware import pbm -from oslo_vmware import vim_util - -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions - - -LOG = logging.getLogger(__name__) - - -class DatastoreType(object): - """Supported datastore types.""" - - NFS = "nfs" - VMFS = "vmfs" - VSAN = "vsan" - VVOL = "vvol" - - _ALL_TYPES = {NFS, VMFS, VSAN, VVOL} - - @staticmethod - def get_all_types(): - return DatastoreType._ALL_TYPES - - -class DatastoreSelector(object): - """Class for selecting datastores which satisfy input requirements.""" - - HARD_AFFINITY_DS_TYPE = "hardAffinityDatastoreTypes" - HARD_ANTI_AFFINITY_DS = "hardAntiAffinityDatastores" - SIZE_BYTES = "sizeBytes" - PROFILE_NAME = "storageProfileName" - - # TODO(vbala) Remove dependency on volumeops. - def __init__(self, vops, session, max_objects): - self._vops = vops - self._session = session - self._max_objects = max_objects - - def get_profile_id(self, profile_name): - """Get vCenter profile ID for the given profile name. - - :param profile_name: profile name - :return: vCenter profile ID - :raises ProfileNotFoundException: - """ - profile_id = pbm.get_profile_id_by_name(self._session, profile_name) - if profile_id is None: - LOG.error("Storage profile: %s cannot be found in vCenter.", - profile_name) - raise vmdk_exceptions.ProfileNotFoundException( - storage_profile=profile_name) - LOG.debug("Storage profile: %(name)s resolved to vCenter profile ID: " - "%(id)s.", - {'name': profile_name, - 'id': profile_id}) - return profile_id - - def _filter_by_profile(self, datastores, profile_id): - """Filter out input datastores that do not match the given profile.""" - cf = self._session.pbm.client.factory - hubs = pbm.convert_datastores_to_hubs(cf, datastores) - hubs = pbm.filter_hubs_by_profile(self._session, hubs, profile_id) - hub_ids = [hub.hubId for hub in hubs] - return {k: v for k, v in datastores.items() if k.value in hub_ids} - - def _filter_datastores(self, - datastores, - size_bytes, - profile_id, - hard_anti_affinity_ds, - hard_affinity_ds_types, - valid_host_refs=None): - - if not datastores: - return - - def _is_valid_ds_type(summary): - ds_type = summary.type.lower() - return (ds_type in DatastoreType.get_all_types() and - (hard_affinity_ds_types is None or - ds_type in hard_affinity_ds_types)) - - def _is_ds_usable(summary): - return summary.accessible and not self._vops._in_maintenance( - summary) - - valid_host_refs = valid_host_refs or [] - valid_hosts = [host_ref.value for host_ref in valid_host_refs] - - def _is_ds_accessible_to_valid_host(host_mounts): - for host_mount in host_mounts: - if host_mount.key.value in valid_hosts: - return True - - def _is_ds_valid(ds_ref, ds_props): - summary = ds_props.get('summary') - host_mounts = ds_props.get('host') - if (summary is None or host_mounts is None): - return False - - if (hard_anti_affinity_ds and - ds_ref.value in hard_anti_affinity_ds): - return False - - if summary.freeSpace < size_bytes: - return False - - if (valid_hosts and - not _is_ds_accessible_to_valid_host(host_mounts)): - return False - - return _is_valid_ds_type(summary) and _is_ds_usable(summary) - - datastores = {k: v for k, v in datastores.items() - if _is_ds_valid(k, v)} - - if datastores and profile_id: - datastores = self._filter_by_profile(datastores, profile_id) - - return datastores - - def _get_object_properties(self, obj_content): - props = {} - if hasattr(obj_content, 'propSet'): - prop_set = obj_content.propSet - if prop_set: - props = {prop.name: prop.val for prop in prop_set} - return props - - def _get_datastores(self): - datastores = {} - retrieve_result = self._session.invoke_api( - vim_util, - 'get_objects', - self._session.vim, - 'Datastore', - self._max_objects, - properties_to_collect=['host', 'summary']) - - while retrieve_result: - if retrieve_result.objects: - for obj_content in retrieve_result.objects: - props = self._get_object_properties(obj_content) - if ('host' in props and - hasattr(props['host'], 'DatastoreHostMount')): - props['host'] = props['host'].DatastoreHostMount - datastores[obj_content.obj] = props - retrieve_result = self._session.invoke_api(vim_util, - 'continue_retrieval', - self._session.vim, - retrieve_result) - - return datastores - - def _get_host_properties(self, host_ref): - retrieve_result = self._session.invoke_api(vim_util, - 'get_object_properties', - self._session.vim, - host_ref, - ['runtime', 'parent']) - - if retrieve_result: - return self._get_object_properties(retrieve_result[0]) - - def _get_resource_pool(self, cluster_ref): - return self._session.invoke_api(vim_util, - 'get_object_property', - self._session.vim, - cluster_ref, - 'resourcePool') - - def _select_best_datastore(self, datastores, valid_host_refs=None): - - if not datastores: - return - - def _sort_key(ds_props): - host = ds_props.get('host') - summary = ds_props.get('summary') - space_utilization = (1.0 - - (summary.freeSpace / float(summary.capacity))) - return (-len(host), space_utilization) - - host_prop_map = {} - - def _is_host_usable(host_ref): - props = host_prop_map.get(host_ref.value) - if props is None: - props = self._get_host_properties(host_ref) - host_prop_map[host_ref.value] = props - - runtime = props.get('runtime') - parent = props.get('parent') - if runtime and parent: - return (runtime.connectionState == 'connected' and - not runtime.inMaintenanceMode) - else: - return False - - valid_host_refs = valid_host_refs or [] - valid_hosts = [host_ref.value for host_ref in valid_host_refs] - - def _select_host(host_mounts): - random.shuffle(host_mounts) - for host_mount in host_mounts: - if valid_hosts and host_mount.key.value not in valid_hosts: - continue - if (self._vops._is_usable(host_mount.mountInfo) and - _is_host_usable(host_mount.key)): - return host_mount.key - - sorted_ds_props = sorted(datastores.values(), key=_sort_key) - for ds_props in sorted_ds_props: - host_ref = _select_host(ds_props['host']) - if host_ref: - rp = self._get_resource_pool( - host_prop_map[host_ref.value]['parent']) - return (host_ref, rp, ds_props['summary']) - - def select_datastore(self, req, hosts=None): - """Selects a datastore satisfying the given requirements. - - A datastore which is connected to maximum number of hosts is - selected. Ties if any are broken based on space utilization-- - datastore with least space utilization is preferred. It returns - the selected datastore's summary along with a host and resource - pool where the volume can be created. - - :param req: selection requirements - :param hosts: list of hosts to consider - :return: (host, resourcePool, summary) - """ - LOG.debug("Using requirements: %s for datastore selection.", req) - - hard_affinity_ds_types = req.get( - DatastoreSelector.HARD_AFFINITY_DS_TYPE) - hard_anti_affinity_datastores = req.get( - DatastoreSelector.HARD_ANTI_AFFINITY_DS) - size_bytes = req[DatastoreSelector.SIZE_BYTES] - profile_name = req.get(DatastoreSelector.PROFILE_NAME) - - profile_id = None - if profile_name is not None: - profile_id = self.get_profile_id(profile_name) - - datastores = self._get_datastores() - datastores = self._filter_datastores(datastores, - size_bytes, - profile_id, - hard_anti_affinity_datastores, - hard_affinity_ds_types, - valid_host_refs=hosts) - res = self._select_best_datastore(datastores, valid_host_refs=hosts) - LOG.debug("Selected (host, resourcepool, datastore): %s", res) - return res - - def is_datastore_compliant(self, datastore, profile_name): - """Check if the datastore is compliant with given profile. - - :param datastore: datastore to check the compliance - :param profile_name: profile to check the compliance against - :return: True if the datastore is compliant; False otherwise - :raises ProfileNotFoundException: - """ - LOG.debug("Checking datastore: %(datastore)s compliance against " - "profile: %(profile)s.", - {'datastore': datastore, - 'profile': profile_name}) - if profile_name is None: - # Any datastore is trivially compliant with a None profile. - return True - - profile_id = self.get_profile_id(profile_name) - # _filter_by_profile expects a map of datastore references to its - # properties. It only uses the properties to construct a map of - # filtered datastores to its properties. Here we don't care about - # the datastore property, so pass it as None. - is_compliant = bool(self._filter_by_profile({datastore: None}, - profile_id)) - LOG.debug("Compliance is %(is_compliant)s for datastore: " - "%(datastore)s against profile: %(profile)s.", - {'is_compliant': is_compliant, - 'datastore': datastore, - 'profile': profile_name}) - return is_compliant diff --git a/cinder/volume/drivers/vmware/exceptions.py b/cinder/volume/drivers/vmware/exceptions.py deleted file mode 100644 index 5917b1df0..000000000 --- a/cinder/volume/drivers/vmware/exceptions.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2015 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Exception definitions. -""" - -from oslo_vmware import exceptions - -from cinder.i18n import _ - - -class InvalidAdapterTypeException(exceptions.VMwareDriverException): - """Thrown when the disk adapter type is invalid.""" - msg_fmt = _("Invalid disk adapter type: %(invalid_type)s.") - - -class InvalidDiskTypeException(exceptions.VMwareDriverException): - """Thrown when the disk type is invalid.""" - msg_fmt = _("Invalid disk type: %(disk_type)s.") - - -class VirtualDiskNotFoundException(exceptions.VMwareDriverException): - """Thrown when virtual disk is not found.""" - msg_fmt = _("There is no virtual disk device.") - - -class ProfileNotFoundException(exceptions.VMwareDriverException): - """Thrown when the given storage profile cannot be found.""" - msg_fmt = _("Storage profile: %(storage_profile)s not found.") - - -class NoValidDatastoreException(exceptions.VMwareDriverException): - """Thrown when there are no valid datastores.""" - msg_fmt = _("There are no valid datastores.") - - -class ClusterNotFoundException(exceptions.VMwareDriverException): - """Thrown when the given cluster cannot be found.""" - msg_fmt = _("Compute cluster: %(cluster)s not found.") - - -class NoValidHostException(exceptions.VMwareDriverException): - """Thrown when there are no valid ESX hosts.""" - msg_fmt = _("There are no valid ESX hosts.") diff --git a/cinder/volume/drivers/vmware/vmdk.py b/cinder/volume/drivers/vmware/vmdk.py deleted file mode 100644 index efae3756c..000000000 --- a/cinder/volume/drivers/vmware/vmdk.py +++ /dev/null @@ -1,1912 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Volume driver for VMware vCenter managed datastores. - -The volumes created by this driver are backed by VMDK (Virtual Machine -Disk) files stored in datastores. For ease of managing the VMDKs, the -driver creates a virtual machine for each of the volumes. This virtual -machine is never powered on and is often referred as the shadow VM. -""" - -import math - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -from oslo_utils import uuidutils -from oslo_utils import versionutils -from oslo_vmware import api -from oslo_vmware import exceptions -from oslo_vmware import image_transfer -from oslo_vmware import pbm -from oslo_vmware import vim_util - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.vmware import datastore as hub -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions -from cinder.volume.drivers.vmware import volumeops -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -THIN_VMDK_TYPE = 'thin' -THICK_VMDK_TYPE = 'thick' -EAGER_ZEROED_THICK_VMDK_TYPE = 'eagerZeroedThick' - -CREATE_PARAM_ADAPTER_TYPE = 'adapter_type' -CREATE_PARAM_DISK_LESS = 'disk_less' -CREATE_PARAM_BACKING_NAME = 'name' -CREATE_PARAM_DISK_SIZE = 'disk_size' -CREATE_PARAM_TEMP_BACKING = 'temp_backing' - -TMP_IMAGES_DATASTORE_FOLDER_PATH = "cinder_temp/" - -EXTRA_CONFIG_VOLUME_ID_KEY = "cinder.volume.id" - -vmdk_opts = [ - cfg.StrOpt('vmware_host_ip', - help='IP address for connecting to VMware vCenter server.'), - cfg.PortOpt('vmware_host_port', - default=443, - help='Port number for connecting to VMware vCenter server.'), - cfg.StrOpt('vmware_host_username', - help='Username for authenticating with VMware vCenter ' - 'server.'), - cfg.StrOpt('vmware_host_password', - help='Password for authenticating with VMware vCenter ' - 'server.', - secret=True), - cfg.StrOpt('vmware_wsdl_location', - help='Optional VIM service WSDL Location ' - 'e.g http:///vimService.wsdl. Optional over-ride ' - 'to default location for bug work-arounds.'), - cfg.IntOpt('vmware_api_retry_count', - default=10, - help='Number of times VMware vCenter server API must be ' - 'retried upon connection related issues.'), - cfg.FloatOpt('vmware_task_poll_interval', - default=2.0, - help='The interval (in seconds) for polling remote tasks ' - 'invoked on VMware vCenter server.'), - cfg.StrOpt('vmware_volume_folder', - default='Volumes', - help='Name of the vCenter inventory folder that will ' - 'contain Cinder volumes. This folder will be created ' - 'under "OpenStack/", where project_folder ' - 'is of format "Project ()".'), - cfg.IntOpt('vmware_image_transfer_timeout_secs', - default=7200, - help='Timeout in seconds for VMDK volume transfer between ' - 'Cinder and Glance.'), - cfg.IntOpt('vmware_max_objects_retrieval', - default=100, - help='Max number of objects to be retrieved per batch. ' - 'Query results will be obtained in batches from the ' - 'server and not in one shot. Server may still limit the ' - 'count to something less than the configured value.'), - cfg.StrOpt('vmware_host_version', - help='Optional string specifying the VMware vCenter server ' - 'version. ' - 'The driver attempts to retrieve the version from VMware ' - 'vCenter server. Set this configuration only if you want ' - 'to override the vCenter server version.'), - cfg.StrOpt('vmware_tmp_dir', - default='/tmp', - help='Directory where virtual disks are stored during volume ' - 'backup and restore.'), - cfg.StrOpt('vmware_ca_file', - help='CA bundle file to use in verifying the vCenter server ' - 'certificate.'), - cfg.BoolOpt('vmware_insecure', - default=False, - help='If true, the vCenter server certificate is not ' - 'verified. If false, then the default CA truststore is ' - 'used for verification. This option is ignored if ' - '"vmware_ca_file" is set.'), - cfg.MultiStrOpt('vmware_cluster_name', - help='Name of a vCenter compute cluster where volumes ' - 'should be created.'), - cfg.IntOpt('vmware_connection_pool_size', - default=10, - help='Maximum number of connections in http connection pool.'), -] - -CONF = cfg.CONF -CONF.register_opts(vmdk_opts, group=configuration.SHARED_CONF_GROUP) - - -def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None, - default_value=None): - """Get extra spec value. - - If the spec value is not present in the input possible_values, then - default_value will be returned. - If the type_id is None, then default_value is returned. - - The caller must not consider scope and the implementation adds/removes - scope. The scope used here is 'vmware' e.g. key 'vmware:vmdk_type' and - so the caller must pass vmdk_type as an input ignoring the scope. - - :param type_id: Volume type ID - :param spec_key: Extra spec key - :param possible_values: Permitted values for the extra spec if known - :param default_value: Default value for the extra spec incase of an - invalid value or if the entry does not exist - :return: extra spec value - """ - if not type_id: - return default_value - - spec_key = ('vmware:%s') % spec_key - spec_value = volume_types.get_volume_type_extra_specs(type_id, - spec_key) - if not spec_value: - LOG.debug("Returning default spec value: %s.", default_value) - return default_value - - if possible_values is None: - return spec_value - - if spec_value in possible_values: - LOG.debug("Returning spec value %s", spec_value) - return spec_value - - LOG.debug("Invalid spec value: %s specified.", spec_value) - - -class ImageDiskType(object): - """Supported disk types in images.""" - - PREALLOCATED = "preallocated" - SPARSE = "sparse" - STREAM_OPTIMIZED = "streamOptimized" - THIN = "thin" - - @staticmethod - def is_valid(extra_spec_disk_type): - """Check if the given disk type in extra_spec is valid. - - :param extra_spec_disk_type: disk type to check - :return: True if valid - """ - return extra_spec_disk_type in [ImageDiskType.PREALLOCATED, - ImageDiskType.SPARSE, - ImageDiskType.STREAM_OPTIMIZED, - ImageDiskType.THIN] - - @staticmethod - def validate(extra_spec_disk_type): - """Validate the given disk type in extra_spec. - - This method throws ImageUnacceptable if the disk type is not a - supported one. - - :param extra_spec_disk_type: disk type - :raises: ImageUnacceptable - """ - if not ImageDiskType.is_valid(extra_spec_disk_type): - raise exception.ImageUnacceptable(_("Invalid disk type: %s.") % - extra_spec_disk_type) - - -@interface.volumedriver -class VMwareVcVmdkDriver(driver.VolumeDriver): - """Manage volumes on VMware vCenter server.""" - - # 1.0 - initial version of driver - # 1.1.0 - selection of datastore based on number of host mounts - # 1.2.0 - storage profile volume types based placement of volumes - # 1.3.0 - support for volume backup/restore - # 1.4.0 - support for volume retype - # 1.5.0 - restrict volume placement to specific vCenter clusters - # 1.6.0 - support for manage existing - # 1.7.0 - new config option 'vmware_connection_pool_size' - # 1.7.1 - enforce vCenter server version 5.5 - VERSION = '1.7.1' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "VMware_CI" - - # Minimum supported vCenter version. - MIN_SUPPORTED_VC_VERSION = '5.5' - NEXT_MIN_SUPPORTED_VC_VERSION = '5.5' - - # PBM is enabled only for vCenter versions 5.5 and above - PBM_ENABLED_VC_VERSION = '5.5' - - def __init__(self, *args, **kwargs): - super(VMwareVcVmdkDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(vmdk_opts) - self._session = None - self._stats = None - self._volumeops = None - self._storage_policy_enabled = False - self._ds_sel = None - self._clusters = None - self._dc_cache = {} - - @property - def volumeops(self): - if not self._volumeops: - max_objects = self.configuration.vmware_max_objects_retrieval - self._volumeops = volumeops.VMwareVolumeOps(self.session, - max_objects) - return self._volumeops - - @property - def ds_sel(self): - if not self._ds_sel: - max_objects = self.configuration.vmware_max_objects_retrieval - self._ds_sel = hub.DatastoreSelector(self.volumeops, - self.session, - max_objects) - return self._ds_sel - - def _validate_params(self): - # Throw error if required parameters are not set. - required_params = ['vmware_host_ip', - 'vmware_host_username', - 'vmware_host_password'] - for param in required_params: - if not getattr(self.configuration, param, None): - reason = _("%s not set.") % param - raise exception.InvalidInput(reason=reason) - - def check_for_setup_error(self): - pass - - def get_volume_stats(self, refresh=False): - """Obtain status of the volume service. - - :param refresh: Whether to get refreshed information - """ - - if not self._stats: - backend_name = self.configuration.safe_get('volume_backend_name') - if not backend_name: - backend_name = self.__class__.__name__ - data = {'volume_backend_name': backend_name, - 'vendor_name': 'VMware', - 'driver_version': self.VERSION, - 'storage_protocol': 'vmdk', - 'reserved_percentage': 0, - 'total_capacity_gb': 'unknown', - 'free_capacity_gb': 'unknown'} - self._stats = data - return self._stats - - def _verify_volume_creation(self, volume): - """Verify that the volume can be created. - - Verify the vmdk type and storage profile if the volume is associated - with a volume type. - - :param volume: Volume object - """ - # validate disk type - self._get_disk_type(volume) - - # validate storage profile - profile_name = self._get_storage_profile(volume) - if profile_name: - self.ds_sel.get_profile_id(profile_name) - - LOG.debug("Verified disk type and storage profile of volume: %s.", - volume.name) - - def create_volume(self, volume): - """Creates a volume. - - We do not create any backing. We do it only the first time - it is being attached to a virtual machine. - - :param volume: Volume object - """ - self._verify_volume_creation(volume) - - def _delete_volume(self, volume): - """Delete the volume backing if it is present. - - :param volume: Volume object - """ - backing = self.volumeops.get_backing(volume['name']) - if not backing: - LOG.info("Backing not available, no operation " - "to be performed.") - return - self.volumeops.delete_backing(backing) - - def delete_volume(self, volume): - """Deletes volume backing. - - :param volume: Volume object - """ - self._delete_volume(volume) - - def _get_extra_spec_storage_profile(self, type_id): - """Get storage profile name in the given volume type's extra spec. - - If there is no storage profile in the extra spec, default is None. - """ - return _get_volume_type_extra_spec(type_id, 'storage_profile') - - def _get_storage_profile(self, volume): - """Get storage profile associated with the given volume's volume_type. - - :param volume: Volume whose storage profile should be queried - :return: String value of storage profile if volume type is associated - and contains storage_profile extra_spec option; None otherwise - """ - return self._get_extra_spec_storage_profile(volume['volume_type_id']) - - @staticmethod - def _get_extra_spec_disk_type(type_id): - """Get disk type from the given volume type's extra spec. - - If there is no disk type option, default is THIN_VMDK_TYPE. - """ - disk_type = _get_volume_type_extra_spec(type_id, - 'vmdk_type', - default_value=THIN_VMDK_TYPE) - volumeops.VirtualDiskType.validate(disk_type) - return disk_type - - @staticmethod - def _get_disk_type(volume): - """Get disk type from the given volume's volume type. - - :param volume: Volume object - :return: Disk type - """ - return VMwareVcVmdkDriver._get_extra_spec_disk_type( - volume['volume_type_id']) - - def _get_storage_profile_id(self, volume): - storage_profile = self._get_storage_profile(volume) - profile_id = None - if self._storage_policy_enabled and storage_profile: - profile = pbm.get_profile_id_by_name(self.session, storage_profile) - if profile: - profile_id = profile.uniqueId - return profile_id - - def _get_extra_config(self, volume): - return {EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], - volumeops.BACKING_UUID_KEY: volume['id']} - - def _create_backing(self, volume, host=None, create_params=None): - """Create volume backing under the given host. - - If host is unspecified, any suitable host is selected. - - :param volume: Volume object - :param host: Reference of the host - :param create_params: Dictionary specifying optional parameters for - backing VM creation - :return: Reference to the created backing - """ - create_params = create_params or {} - (host_ref, resource_pool, folder, - summary) = self._select_ds_for_volume(volume, host) - - # check if a storage profile needs to be associated with the backing VM - profile_id = self._get_storage_profile_id(volume) - - # Use volume name as the default backing name. - backing_name = create_params.get(CREATE_PARAM_BACKING_NAME, - volume['name']) - - extra_config = self._get_extra_config(volume) - # We shoudln't set backing UUID to volume UUID for temporary backing. - if create_params.get(CREATE_PARAM_TEMP_BACKING): - del extra_config[volumeops.BACKING_UUID_KEY] - - # default is a backing with single disk - disk_less = create_params.get(CREATE_PARAM_DISK_LESS, False) - if disk_less: - # create a disk-less backing-- disk can be added later; for e.g., - # by copying an image - return self.volumeops.create_backing_disk_less( - backing_name, - folder, - resource_pool, - host_ref, - summary.name, - profileId=profile_id, - extra_config=extra_config) - - # create a backing with single disk - disk_type = VMwareVcVmdkDriver._get_disk_type(volume) - size_kb = volume['size'] * units.Mi - adapter_type = create_params.get(CREATE_PARAM_ADAPTER_TYPE, - 'lsiLogic') - backing = self.volumeops.create_backing(backing_name, - size_kb, - disk_type, - folder, - resource_pool, - host_ref, - summary.name, - profileId=profile_id, - adapter_type=adapter_type, - extra_config=extra_config) - - self.volumeops.update_backing_disk_uuid(backing, volume['id']) - return backing - - def _get_hosts(self, clusters): - hosts = [] - if clusters: - for cluster in clusters: - cluster_hosts = self.volumeops.get_cluster_hosts(cluster) - hosts.extend(cluster_hosts) - return hosts - - def _select_datastore(self, req, host=None): - """Selects datastore satisfying the given requirements. - - :return: (host, resource_pool, summary) - """ - hosts = None - if host: - hosts = [host] - elif self._clusters: - hosts = self._get_hosts(self._clusters) - if not hosts: - LOG.error("There are no valid hosts available in " - "configured cluster(s): %s.", self._clusters) - raise vmdk_exceptions.NoValidHostException() - - best_candidate = self.ds_sel.select_datastore(req, hosts=hosts) - if not best_candidate: - LOG.error("There is no valid datastore satisfying " - "requirements: %s.", req) - raise vmdk_exceptions.NoValidDatastoreException() - - return best_candidate - - def _get_dc(self, resource_pool): - dc = self._dc_cache.get(resource_pool.value) - if not dc: - dc = self.volumeops.get_dc(resource_pool) - self._dc_cache[resource_pool.value] = dc - return dc - - def _select_ds_for_volume(self, volume, host=None, create_params=None): - """Select datastore that can accommodate the given volume's backing. - - Returns the selected datastore summary along with a compute host and - its resource pool and folder where the volume can be created - :return: (host, resource_pool, folder, summary) - """ - # Form requirements for datastore selection. - create_params = create_params or {} - size = create_params.get(CREATE_PARAM_DISK_SIZE, volume['size']) - - req = {} - req[hub.DatastoreSelector.SIZE_BYTES] = size * units.Gi - req[hub.DatastoreSelector.PROFILE_NAME] = self._get_storage_profile( - volume) - - (host_ref, resource_pool, summary) = self._select_datastore(req, host) - dc = self._get_dc(resource_pool) - folder = self._get_volume_group_folder(dc, volume['project_id']) - - return (host_ref, resource_pool, folder, summary) - - def _get_connection_info(self, volume, backing, connector): - connection_info = {'driver_volume_type': 'vmdk'} - connection_info['data'] = { - 'volume': backing.value, - 'volume_id': volume.id, - 'name': volume.name, - } - - # vmdk connector in os-brick needs additional connection info. - if 'platform' in connector and 'os_type' in connector: - connection_info['data']['vmdk_size'] = volume['size'] * units.Gi - - vmdk_path = self.volumeops.get_vmdk_path(backing) - connection_info['data']['vmdk_path'] = vmdk_path - - datastore = self.volumeops.get_datastore(backing) - connection_info['data']['datastore'] = datastore.value - - datacenter = self.volumeops.get_dc(backing) - connection_info['data']['datacenter'] = datacenter.value - - config = self.configuration - vmdk_connector_config = { - 'vmware_host_ip': config.vmware_host_ip, - 'vmware_host_port': config.vmware_host_port, - 'vmware_host_username': config.vmware_host_username, - 'vmware_host_password': config.vmware_host_password, - 'vmware_api_retry_count': config.vmware_api_retry_count, - 'vmware_task_poll_interval': config.vmware_task_poll_interval, - 'vmware_ca_file': config.vmware_ca_file, - 'vmware_insecure': config.vmware_insecure, - 'vmware_tmp_dir': config.vmware_tmp_dir, - 'vmware_image_transfer_timeout_secs': - config.vmware_image_transfer_timeout_secs, - } - connection_info['data']['config'] = vmdk_connector_config - - LOG.debug("Returning connection_info (volume: '%(volume)s', volume_id:" - " '%(volume_id)s') for connector: %(connector)s.", - {'volume': connection_info['data']['volume'], - 'volume_id': volume.id, - 'connector': connector}) - - return connection_info - - def _initialize_connection(self, volume, connector): - """Get information of volume's backing. - - If the volume does not have a backing yet. It will be created. - - :param volume: Volume object - :param connector: Connector information - :return: Return connection information - """ - backing = self.volumeops.get_backing(volume.name) - if 'instance' in connector: - # The instance exists - instance = vim_util.get_moref(connector['instance'], - 'VirtualMachine') - LOG.debug("The instance: %s for which initialize connection " - "is called, exists.", instance) - # Get host managing the instance - host = self.volumeops.get_host(instance) - if not backing: - # Create a backing in case it does not exist under the - # host managing the instance. - LOG.info("There is no backing for the volume: %s. " - "Need to create one.", volume.name) - backing = self._create_backing(volume, host) - else: - # Relocate volume is necessary - self._relocate_backing(volume, backing, host) - else: - # The instance does not exist - LOG.debug("The instance for which initialize connection " - "is called, does not exist.") - if not backing: - # Create a backing in case it does not exist. It is a bad use - # case to boot from an empty volume. - LOG.warning("Trying to boot from an empty volume: %s.", - volume.name) - # Create backing - backing = self._create_backing(volume) - - return self._get_connection_info(volume, backing, connector) - - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - The implementation returns the following information: - - .. code-block:: default - - { - 'driver_volume_type': 'vmdk', - 'data': {'volume': $VOLUME_MOREF_VALUE, - 'volume_id': $VOLUME_ID - } - } - - :param volume: Volume object - :param connector: Connector information - :return: Return connection information - """ - return self._initialize_connection(volume, connector) - - def terminate_connection(self, volume, connector, force=False, **kwargs): - pass - - def create_export(self, context, volume, connector): - pass - - def ensure_export(self, context, volume): - pass - - def remove_export(self, context, volume): - pass - - def _create_snapshot(self, snapshot): - """Creates a snapshot. - - If the volume does not have a backing then simply pass, else create - a snapshot. - Snapshot of only available volume is supported. - - :param snapshot: Snapshot object - """ - - volume = snapshot['volume'] - if volume['status'] != 'available': - msg = _("Snapshot of volume not supported in " - "state: %s.") % volume['status'] - LOG.error(msg) - raise exception.InvalidVolume(msg) - backing = self.volumeops.get_backing(snapshot['volume_name']) - if not backing: - LOG.info("There is no backing, so will not create " - "snapshot: %s.", snapshot['name']) - return - self.volumeops.create_snapshot(backing, snapshot['name'], - snapshot['display_description']) - LOG.info("Successfully created snapshot: %s.", snapshot['name']) - - def create_snapshot(self, snapshot): - """Creates a snapshot. - - :param snapshot: Snapshot object - """ - self._create_snapshot(snapshot) - - def _delete_snapshot(self, snapshot): - """Delete snapshot. - - If the volume does not have a backing or the snapshot does not exist - then simply pass, else delete the snapshot. Snapshot deletion of only - available volume is supported. - - :param snapshot: Snapshot object - """ - backing = self.volumeops.get_backing(snapshot.volume_name) - if not backing: - LOG.debug("Backing does not exist for volume.", - resource=snapshot.volume) - elif not self.volumeops.get_snapshot(backing, snapshot.name): - LOG.debug("Snapshot does not exist in backend.", resource=snapshot) - elif self._in_use(snapshot.volume): - msg = _("Delete snapshot of volume not supported in " - "state: %s.") % snapshot.volume.status - LOG.error(msg) - raise exception.InvalidSnapshot(reason=msg) - else: - self.volumeops.delete_snapshot(backing, snapshot.name) - - def delete_snapshot(self, snapshot): - """Delete snapshot. - - :param snapshot: Snapshot object - """ - self._delete_snapshot(snapshot) - - def _get_ds_name_folder_path(self, backing): - """Get datastore name and folder path of the given backing. - - :param backing: Reference to the backing entity - :return: datastore name and folder path of the backing - """ - vmdk_ds_file_path = self.volumeops.get_path_name(backing) - (datastore_name, - folder_path, _) = volumeops.split_datastore_path(vmdk_ds_file_path) - return (datastore_name, folder_path) - - @staticmethod - def _validate_disk_format(disk_format): - """Verify vmdk as disk format. - - :param disk_format: Disk format of the image - """ - if disk_format and disk_format.lower() != 'vmdk': - msg = _("Cannot create image of disk format: %s. Only vmdk " - "disk format is accepted.") % disk_format - LOG.error(msg) - raise exception.ImageUnacceptable(msg) - - def _copy_image(self, context, dc_ref, image_service, image_id, - image_size_in_bytes, ds_name, upload_file_path): - """Copy image (flat extent or sparse vmdk) to datastore.""" - - timeout = self.configuration.vmware_image_transfer_timeout_secs - host_ip = self.configuration.vmware_host_ip - port = self.configuration.vmware_host_port - ca_file = self.configuration.vmware_ca_file - insecure = self.configuration.vmware_insecure - cookies = self.session.vim.client.options.transport.cookiejar - dc_name = self.volumeops.get_entity_name(dc_ref) - - LOG.debug("Copying image: %(image_id)s to %(path)s.", - {'image_id': image_id, - 'path': upload_file_path}) - - # ca_file is used for verifying vCenter certificate if it is set. - # If ca_file is unset and insecure is False, the default CA truststore - # is used for verification. We should pass cacerts=True in this - # case. If ca_file is unset and insecure is True, there is no - # certificate verification, and we should pass cacerts=False. - cacerts = ca_file if ca_file else not insecure - image_transfer.download_flat_image(context, - timeout, - image_service, - image_id, - image_size=image_size_in_bytes, - host=host_ip, - port=port, - data_center_name=dc_name, - datastore_name=ds_name, - cookies=cookies, - file_path=upload_file_path, - cacerts=cacerts) - LOG.debug("Image: %(image_id)s copied to %(path)s.", - {'image_id': image_id, - 'path': upload_file_path}) - - def _delete_temp_disk(self, descriptor_ds_file_path, dc_ref): - """Deletes a temporary virtual disk.""" - - LOG.debug("Deleting temporary disk: %s.", descriptor_ds_file_path) - try: - self.volumeops.delete_vmdk_file( - descriptor_ds_file_path, dc_ref) - except exceptions.VimException: - LOG.warning("Error occurred while deleting temporary disk: %s.", - descriptor_ds_file_path, exc_info=True) - - def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref, - dest_path): - """Clones a temporary virtual disk and deletes it finally.""" - - try: - self.volumeops.copy_vmdk_file( - src_dc_ref, src_path.get_descriptor_ds_file_path(), - dest_path.get_descriptor_ds_file_path(), dest_dc_ref) - except exceptions.VimException: - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while copying %(src)s to " - "%(dst)s.", - {'src': src_path.get_descriptor_ds_file_path(), - 'dst': dest_path.get_descriptor_ds_file_path()}) - finally: - # Delete temporary disk. - self._delete_temp_disk(src_path.get_descriptor_ds_file_path(), - src_dc_ref) - - def _get_temp_image_folder(self, image_size_in_bytes): - """Get datastore folder for downloading temporary images.""" - # Form requirements for datastore selection. - req = {} - req[hub.DatastoreSelector.SIZE_BYTES] = image_size_in_bytes - # vSAN/VVOL datastores don't support virtual disk with - # flat extent; skip such datastores. - req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = ( - hub.DatastoreType.get_all_types() - - {hub.DatastoreType.VSAN, hub.DatastoreType.VVOL}) - - # Select datastore satisfying the requirements. - (host_ref, _resource_pool, summary) = self._select_datastore(req) - - ds_name = summary.name - dc_ref = self.volumeops.get_dc(host_ref) - - # Create temporary datastore folder. - folder_path = TMP_IMAGES_DATASTORE_FOLDER_PATH - self.volumeops.create_datastore_folder(ds_name, folder_path, dc_ref) - - return (dc_ref, ds_name, folder_path) - - def _create_virtual_disk_from_sparse_image( - self, context, image_service, image_id, image_size_in_bytes, - dc_ref, ds_name, folder_path, disk_name): - """Creates a flat extent virtual disk from sparse vmdk image.""" - - # Upload the image to a temporary virtual disk. - src_disk_name = uuidutils.generate_uuid() - src_path = volumeops.MonolithicSparseVirtualDiskPath(ds_name, - folder_path, - src_disk_name) - - LOG.debug("Creating temporary virtual disk: %(path)s from sparse vmdk " - "image: %(image_id)s.", - {'path': src_path.get_descriptor_ds_file_path(), - 'image_id': image_id}) - self._copy_image(context, dc_ref, image_service, image_id, - image_size_in_bytes, ds_name, - src_path.get_descriptor_file_path()) - - # Copy sparse disk to create a flat extent virtual disk. - dest_path = volumeops.FlatExtentVirtualDiskPath(ds_name, - folder_path, - disk_name) - self._copy_temp_virtual_disk(dc_ref, src_path, dc_ref, dest_path) - LOG.debug("Created virtual disk: %s from sparse vmdk image.", - dest_path.get_descriptor_ds_file_path()) - return dest_path - - def _create_virtual_disk_from_preallocated_image( - self, context, image_service, image_id, image_size_in_bytes, - dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, - adapter_type): - """Creates virtual disk from an image which is a flat extent.""" - - # Upload the image and use it as a flat extent to create a virtual - # disk. First, find the datastore folder to download the image. - (dc_ref, ds_name, - folder_path) = self._get_temp_image_folder(image_size_in_bytes) - - # pylint: disable=E1101 - if ds_name == dest_ds_name and dc_ref.value == dest_dc_ref.value: - # Temporary image folder and destination path are on the same - # datastore. We can directly download the image to the destination - # folder to save one virtual disk copy. - path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, - dest_folder_path, - dest_disk_name) - dest_path = path - else: - # Use the image to create a temporary virtual disk which is then - # copied to the destination folder. - disk_name = uuidutils.generate_uuid() - path = volumeops.FlatExtentVirtualDiskPath(ds_name, - folder_path, - disk_name) - dest_path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, - dest_folder_path, - dest_disk_name) - - LOG.debug("Creating virtual disk: %(path)s from (flat extent) image: " - "%(image_id)s.", - {'path': path.get_descriptor_ds_file_path(), - 'image_id': image_id}) - - # We first create a descriptor with desired settings. - self.volumeops.create_flat_extent_virtual_disk_descriptor( - dc_ref, path, image_size_in_bytes // units.Ki, adapter_type, - EAGER_ZEROED_THICK_VMDK_TYPE) - # Upload the image and use it as the flat extent. - try: - self._copy_image(context, dc_ref, image_service, image_id, - image_size_in_bytes, ds_name, - path.get_flat_extent_file_path()) - except Exception: - # Delete the descriptor. - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while copying image: " - "%(image_id)s to %(path)s.", - {'path': path.get_descriptor_ds_file_path(), - 'image_id': image_id}) - LOG.debug("Deleting descriptor: %s.", - path.get_descriptor_ds_file_path()) - try: - self.volumeops.delete_file( - path.get_descriptor_ds_file_path(), dc_ref) - except exceptions.VimException: - LOG.warning("Error occurred while deleting " - "descriptor: %s.", - path.get_descriptor_ds_file_path(), - exc_info=True) - - if dest_path != path: - # Copy temporary disk to given destination. - self._copy_temp_virtual_disk(dc_ref, path, dest_dc_ref, dest_path) - - LOG.debug("Created virtual disk: %s from flat extent image.", - dest_path.get_descriptor_ds_file_path()) - return dest_path - - def _check_disk_conversion(self, image_disk_type, extra_spec_disk_type): - """Check if disk type conversion is needed.""" - - if image_disk_type == ImageDiskType.SPARSE: - # We cannot reliably determine the destination disk type of a - # virtual disk copied from a sparse image. - return True - # Virtual disk created from flat extent is always of type - # eagerZeroedThick. - return not (volumeops.VirtualDiskType.get_virtual_disk_type( - extra_spec_disk_type) == - volumeops.VirtualDiskType.EAGER_ZEROED_THICK) - - def _delete_temp_backing(self, backing): - """Deletes temporary backing.""" - - LOG.debug("Deleting backing: %s.", backing) - try: - self.volumeops.delete_backing(backing) - except exceptions.VimException: - LOG.warning("Error occurred while deleting backing: %s.", - backing, exc_info=True) - - def _create_volume_from_non_stream_optimized_image( - self, context, volume, image_service, image_id, - image_size_in_bytes, adapter_type, image_disk_type): - """Creates backing VM from non-streamOptimized image. - - First, we create a disk-less backing. Then we create a virtual disk - using the image which is then attached to the backing VM. Finally, the - backing VM is cloned if disk type conversion is required. - """ - # We should use the disk type in volume type for backing's virtual - # disk. - disk_type = VMwareVcVmdkDriver._get_disk_type(volume) - - # First, create a disk-less backing. - create_params = {CREATE_PARAM_DISK_LESS: True} - - disk_conversion = self._check_disk_conversion(image_disk_type, - disk_type) - if disk_conversion: - # The initial backing is a temporary one and used as the source - # for clone operation. - disk_name = uuidutils.generate_uuid() - create_params[CREATE_PARAM_BACKING_NAME] = disk_name - create_params[CREATE_PARAM_TEMP_BACKING] = True - else: - disk_name = volume['name'] - - LOG.debug("Creating disk-less backing for volume: %(id)s with params: " - "%(param)s.", - {'id': volume['id'], - 'param': create_params}) - backing = self._create_backing(volume, create_params=create_params) - - try: - # Find the backing's datacenter, host, datastore and folder. - (ds_name, folder_path) = self._get_ds_name_folder_path(backing) - host = self.volumeops.get_host(backing) - dc_ref = self.volumeops.get_dc(host) - - vmdk_path = None - attached = False - - # Create flat extent virtual disk from the image. - if image_disk_type == ImageDiskType.SPARSE: - # Monolithic sparse image has embedded descriptor. - vmdk_path = self._create_virtual_disk_from_sparse_image( - context, image_service, image_id, image_size_in_bytes, - dc_ref, ds_name, folder_path, disk_name) - else: - # The image is just a flat extent. - vmdk_path = self._create_virtual_disk_from_preallocated_image( - context, image_service, image_id, image_size_in_bytes, - dc_ref, ds_name, folder_path, disk_name, adapter_type) - - # Attach the virtual disk to the backing. - LOG.debug("Attaching virtual disk: %(path)s to backing: " - "%(backing)s.", - {'path': vmdk_path.get_descriptor_ds_file_path(), - 'backing': backing}) - - profile_id = self._get_storage_profile_id(volume) - self.volumeops.attach_disk_to_backing( - backing, - image_size_in_bytes // units.Ki, disk_type, - adapter_type, - profile_id, - vmdk_path.get_descriptor_ds_file_path()) - attached = True - - if disk_conversion: - # Clone the temporary backing for disk type conversion. - (host, rp, folder, summary) = self._select_ds_for_volume( - volume) - datastore = summary.datastore - LOG.debug("Cloning temporary backing: %s for disk type " - "conversion.", backing) - extra_config = self._get_extra_config(volume) - clone = self.volumeops.clone_backing(volume['name'], - backing, - None, - volumeops.FULL_CLONE_TYPE, - datastore, - disk_type=disk_type, - host=host, - resource_pool=rp, - extra_config=extra_config, - folder=folder) - self._delete_temp_backing(backing) - backing = clone - - self.volumeops.update_backing_disk_uuid(backing, volume['id']) - except Exception: - # Delete backing and virtual disk created from image. - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while creating " - "volume: %(id)s" - " from image: %(image_id)s.", - {'id': volume['id'], - 'image_id': image_id}) - self._delete_temp_backing(backing) - # Delete virtual disk if exists and unattached. - if vmdk_path is not None and not attached: - self._delete_temp_disk( - vmdk_path.get_descriptor_ds_file_path(), dc_ref) - - def _fetch_stream_optimized_image(self, context, volume, image_service, - image_id, image_size, adapter_type): - """Creates volume from image using HttpNfc VM import. - - Uses Nfc API to download the VMDK file from Glance. Nfc creates the - backing VM that wraps the VMDK in the vCenter inventory. - This method assumes glance image is VMDK disk format and its - vmware_disktype is 'streamOptimized'. - """ - try: - # find host in which to create the volume - (_host, rp, folder, summary) = self._select_ds_for_volume(volume) - except exceptions.VimException as excep: - err_msg = (_("Exception in _select_ds_for_volume: " - "%s."), excep) - raise exception.VolumeBackendAPIException(data=err_msg) - - size_gb = volume['size'] - LOG.debug("Selected datastore %(ds)s for new volume of size " - "%(size)s GB.", {'ds': summary.name, 'size': size_gb}) - - # prepare create spec for backing vm - profile_id = self._get_storage_profile_id(volume) - disk_type = VMwareVcVmdkDriver._get_disk_type(volume) - - # The size of stream optimized glance image is often suspect, - # so better let vCenter figure out the disk capacity during import. - dummy_disk_size = 0 - extra_config = self._get_extra_config(volume) - vm_create_spec = self.volumeops.get_create_spec( - volume['name'], - dummy_disk_size, - disk_type, - summary.name, - profile_id=profile_id, - adapter_type=adapter_type, - extra_config=extra_config) - # convert vm_create_spec to vm_import_spec - cf = self.session.vim.client.factory - vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') - vm_import_spec.configSpec = vm_create_spec - - try: - # fetching image from glance will also create the backing - timeout = self.configuration.vmware_image_transfer_timeout_secs - host_ip = self.configuration.vmware_host_ip - port = self.configuration.vmware_host_port - LOG.debug("Fetching glance image: %(id)s to server: %(host)s.", - {'id': image_id, 'host': host_ip}) - backing = image_transfer.download_stream_optimized_image( - context, - timeout, - image_service, - image_id, - session=self.session, - host=host_ip, - port=port, - resource_pool=rp, - vm_folder=folder, - vm_import_spec=vm_import_spec, - image_size=image_size) - self.volumeops.update_backing_disk_uuid(backing, volume['id']) - except (exceptions.VimException, - exceptions.VMwareDriverException): - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while copying image: %(id)s " - "to volume: %(vol)s.", - {'id': image_id, 'vol': volume['name']}) - backing = self.volumeops.get_backing(volume['name']) - if backing: - # delete the backing - self.volumeops.delete_backing(backing) - - LOG.info("Done copying image: %(id)s to volume: %(vol)s.", - {'id': image_id, 'vol': volume['name']}) - - def _extend_backing(self, backing, new_size_in_gb): - """Extend volume backing's virtual disk. - - :param backing: volume backing - :param new_size_in_gb: new size of virtual disk - """ - root_vmdk_path = self.volumeops.get_vmdk_path(backing) - datacenter = self.volumeops.get_dc(backing) - self.volumeops.extend_virtual_disk(new_size_in_gb, root_vmdk_path, - datacenter) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Creates volume from image. - - This method only supports Glance image of VMDK disk format. - Uses flat vmdk file copy for "sparse" and "preallocated" disk types - Uses HttpNfc import API for "streamOptimized" disk types. This API - creates a backing VM that wraps the VMDK in the vCenter inventory. - - :param context: context - :param volume: Volume object - :param image_service: Glance image service - :param image_id: Glance image id - """ - LOG.debug("Copy glance image: %s to create new volume.", image_id) - - # Verify glance image is vmdk disk format - metadata = image_service.show(context, image_id) - VMwareVcVmdkDriver._validate_disk_format(metadata['disk_format']) - - # Validate container format; only 'bare' and 'ova' are supported. - container_format = metadata.get('container_format') - if (container_format and container_format not in ['bare', 'ova']): - msg = _("Container format: %s is unsupported, only 'bare' and " - "'ova' are supported.") % container_format - LOG.error(msg) - raise exception.ImageUnacceptable(image_id=image_id, reason=msg) - - # Get the disk type, adapter type and size of vmdk image - image_disk_type = ImageDiskType.PREALLOCATED - image_adapter_type = volumeops.VirtualDiskAdapterType.LSI_LOGIC - image_size_in_bytes = metadata['size'] - properties = metadata['properties'] - if properties: - if 'vmware_disktype' in properties: - image_disk_type = properties['vmware_disktype'] - if 'vmware_adaptertype' in properties: - image_adapter_type = properties['vmware_adaptertype'] - - try: - # validate disk and adapter types in image meta-data - volumeops.VirtualDiskAdapterType.validate(image_adapter_type) - ImageDiskType.validate(image_disk_type) - - if image_disk_type == ImageDiskType.STREAM_OPTIMIZED: - self._fetch_stream_optimized_image(context, volume, - image_service, image_id, - image_size_in_bytes, - image_adapter_type) - else: - self._create_volume_from_non_stream_optimized_image( - context, volume, image_service, image_id, - image_size_in_bytes, image_adapter_type, image_disk_type) - except (exceptions.VimException, - exceptions.VMwareDriverException): - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while copying image: %(id)s " - "to volume: %(vol)s.", - {'id': image_id, 'vol': volume['name']}) - - LOG.debug("Volume: %(id)s created from image: %(image_id)s.", - {'id': volume['id'], - 'image_id': image_id}) - - # If the user-specified volume size is greater than backing's - # current disk size, we should extend the disk. - volume_size = volume['size'] * units.Gi - backing = self.volumeops.get_backing(volume['name']) - disk_size = self.volumeops.get_disk_size(backing) - if volume_size > disk_size: - LOG.debug("Extending volume: %(name)s since the user specified " - "volume size (bytes): %(vol_size)d is greater than " - "backing's current disk size (bytes): %(disk_size)d.", - {'name': volume['name'], - 'vol_size': volume_size, - 'disk_size': disk_size}) - self._extend_backing(backing, volume['size']) - # TODO(vbala): handle volume_size < disk_size case. - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Creates glance image from volume. - - Upload of only available volume is supported. The uploaded glance image - has a vmdk disk type of "streamOptimized" that can only be downloaded - using the HttpNfc API. - Steps followed are: - 1. Get the name of the vmdk file which the volume points to right - now. Can be a chain of snapshots, so we need to know the last in the - chain. - 2. Use Nfc APIs to upload the contents of the vmdk file to glance. - """ - - # if volume is attached raise exception - if self._in_use(volume): - msg = _("Upload to glance of attached volume is not supported.") - LOG.error(msg) - raise exception.InvalidVolume(msg) - - # validate disk format is vmdk - LOG.debug("Copy Volume: %s to new image.", volume['name']) - VMwareVcVmdkDriver._validate_disk_format(image_meta['disk_format']) - - # get backing vm of volume and its vmdk path - backing = self.volumeops.get_backing(volume['name']) - if not backing: - LOG.info("Backing not found, creating for volume: %s", - volume['name']) - backing = self._create_backing(volume) - vmdk_file_path = self.volumeops.get_vmdk_path(backing) - - # Upload image from vmdk - timeout = self.configuration.vmware_image_transfer_timeout_secs - host_ip = self.configuration.vmware_host_ip - port = self.configuration.vmware_host_port - - image_transfer.upload_image(context, - timeout, - image_service, - image_meta['id'], - volume['project_id'], - session=self.session, - host=host_ip, - port=port, - vm=backing, - vmdk_file_path=vmdk_file_path, - vmdk_size=volume['size'] * units.Gi, - image_name=image_meta['name'], - image_version=1) - LOG.info("Done copying volume %(vol)s to a new image %(img)s", - {'vol': volume['name'], 'img': image_meta['name']}) - - def _in_use(self, volume): - """Check if the given volume is in use.""" - return (volume['volume_attachment'] and - len(volume['volume_attachment']) > 0) - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type. - - The retype is performed only if the volume is not in use. Retype is NOP - if the backing doesn't exist. If disk type conversion is needed, the - volume is cloned. If disk type conversion is needed and the volume - contains snapshots, the backing is relocated instead of cloning. The - backing is also relocated if the current datastore is not compliant - with the new storage profile (if any). Finally, the storage profile of - the backing VM is updated. - - :param ctxt: Context - :param volume: A dictionary describing the volume to retype - :param new_type: A dictionary describing the volume type to convert to - :param diff: A dictionary with the difference between the two types - :param host: A dictionary describing the host to migrate to, where - host['host'] is its name, and host['capabilities'] is a - dictionary of its reported capabilities (unused) - :returns: True if the retype occurred; False otherwise. - """ - # Can't attempt retype if the volume is in use. - if self._in_use(volume): - LOG.warning("Volume: %s is in use, can't retype.", - volume['name']) - return False - - # If the backing doesn't exist, retype is NOP. - backing = self.volumeops.get_backing(volume['name']) - if backing is None: - LOG.debug("Backing for volume: %s doesn't exist; retype is NOP.", - volume['name']) - return True - - # Check whether we need disk type conversion. - disk_type = VMwareVcVmdkDriver._get_disk_type(volume) - new_disk_type = VMwareVcVmdkDriver._get_extra_spec_disk_type( - new_type['id']) - need_disk_type_conversion = disk_type != new_disk_type - - # Check whether we need to relocate the backing. If the backing - # contains snapshots, relocate is the only way to achieve disk type - # conversion. - need_relocate = (need_disk_type_conversion and - self.volumeops.snapshot_exists(backing)) - - datastore = self.volumeops.get_datastore(backing) - - # Check whether we need to change the storage profile. - need_profile_change = False - is_compliant = True - new_profile = None - if self._storage_policy_enabled: - profile = self._get_storage_profile(volume) - new_profile = self._get_extra_spec_storage_profile(new_type['id']) - need_profile_change = profile != new_profile - # The current datastore may be compliant with the new profile. - is_compliant = self.ds_sel.is_datastore_compliant(datastore, - new_profile) - - # No need to relocate or clone if there is no disk type conversion and - # the current datastore is compliant with the new profile or storage - # policy is disabled. - if not need_disk_type_conversion and is_compliant: - LOG.debug("Backing: %(backing)s for volume: %(name)s doesn't need " - "disk type conversion.", - {'backing': backing, - 'name': volume['name']}) - if self._storage_policy_enabled: - LOG.debug("Backing: %(backing)s for volume: %(name)s is " - "compliant with the new profile: %(new_profile)s.", - {'backing': backing, - 'name': volume['name'], - 'new_profile': new_profile}) - else: - # Set requirements for datastore selection. - req = {} - req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * - units.Gi) - - if need_relocate: - LOG.debug("Backing: %s should be relocated.", backing) - req[hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = ( - [datastore.value]) - - if new_profile: - req[hub.DatastoreSelector.PROFILE_NAME] = new_profile - - # Select datastore satisfying the requirements. - try: - best_candidate = self._select_datastore(req) - except vmdk_exceptions.NoValidDatastoreException: - # No candidate datastores; can't retype. - LOG.warning("There are no datastores matching new " - "requirements; can't retype volume: %s.", - volume['name']) - return False - - (host, rp, summary) = best_candidate - dc = self._get_dc(rp) - folder = self._get_volume_group_folder(dc, volume['project_id']) - new_datastore = summary.datastore - if datastore.value != new_datastore.value: - # Datastore changed; relocate the backing. - LOG.debug("Backing: %s needs to be relocated for retype.", - backing) - self.volumeops.relocate_backing( - backing, new_datastore, rp, host, new_disk_type) - self.volumeops.move_backing_to_folder(backing, folder) - elif need_disk_type_conversion: - # Same datastore, but clone is needed for disk type conversion. - LOG.debug("Backing: %s needs to be cloned for retype.", - backing) - - new_backing = None - renamed = False - tmp_name = uuidutils.generate_uuid() - try: - self.volumeops.rename_backing(backing, tmp_name) - renamed = True - - new_backing = self.volumeops.clone_backing( - volume['name'], backing, None, - volumeops.FULL_CLONE_TYPE, datastore, - disk_type=new_disk_type, host=host, - resource_pool=rp, folder=folder) - self._delete_temp_backing(backing) - backing = new_backing - self.volumeops.update_backing_uuid(backing, volume['id']) - self.volumeops.update_backing_disk_uuid(backing, - volume['id']) - except exceptions.VimException: - with excutils.save_and_reraise_exception(): - LOG.exception("Error occurred while cloning backing: " - "%s during retype.", - backing) - if renamed and not new_backing: - LOG.debug("Undo rename of backing: %(backing)s; " - "changing name from %(new_name)s to " - "%(old_name)s.", - {'backing': backing, - 'new_name': tmp_name, - 'old_name': volume['name']}) - try: - self.volumeops.rename_backing(backing, - volume['name']) - except exceptions.VimException: - LOG.warning("Changing backing: " - "%(backing)s name from " - "%(new_name)s to %(old_name)s " - "failed.", - {'backing': backing, - 'new_name': tmp_name, - 'old_name': volume['name']}) - - # Update the backing's storage profile if needed. - if need_profile_change: - LOG.debug("Backing: %(backing)s needs a profile change to:" - " %(profile)s.", - {'backing': backing, - 'profile': new_profile}) - profile_id = None - if new_profile is not None: - profile_id = self.ds_sel.get_profile_id(new_profile) - self.volumeops.change_backing_profile(backing, profile_id) - - # Retype is done. - LOG.debug("Volume: %s retype is done.", volume['name']) - return True - - def extend_volume(self, volume, new_size): - """Extend volume to new size. - - Extends the volume backing's virtual disk to new size. First, try to - extend in place on the same datastore. If that fails due to - insufficient disk space, then try to relocate the volume to a different - datastore that can accommodate the backing with new size and retry - extend. - - :param volume: dictionary describing the existing 'available' volume - :param new_size: new size in GB to extend this volume to - """ - vol_name = volume['name'] - backing = self.volumeops.get_backing(vol_name) - if not backing: - LOG.info("There is no backing for volume: %s; no need to " - "extend the virtual disk.", vol_name) - return - - # try extending vmdk in place - try: - self._extend_backing(backing, new_size) - LOG.info("Successfully extended volume: %(vol)s to size: " - "%(size)s GB.", - {'vol': vol_name, 'size': new_size}) - return - except exceptions.NoDiskSpaceException: - LOG.warning("Unable to extend volume: %(vol)s to size: " - "%(size)s on current datastore due to insufficient" - " space.", - {'vol': vol_name, 'size': new_size}) - - # Insufficient disk space; relocate the volume to a different datastore - # and retry extend. - LOG.info("Relocating volume: %s to a different datastore due to " - "insufficient disk space on current datastore.", - vol_name) - try: - create_params = {CREATE_PARAM_DISK_SIZE: new_size} - (host, rp, folder, summary) = self._select_ds_for_volume( - volume, create_params=create_params) - self.volumeops.relocate_backing(backing, summary.datastore, rp, - host) - self.volumeops.move_backing_to_folder(backing, folder) - self._extend_backing(backing, new_size) - except exceptions.VMwareDriverException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to extend volume: %(vol)s to size: " - "%(size)s GB.", - {'vol': vol_name, 'size': new_size}) - - LOG.info("Successfully extended volume: %(vol)s to size: " - "%(size)s GB.", - {'vol': vol_name, 'size': new_size}) - - def _get_disk_device(self, vmdk_path, vm_inv_path): - # Get the VM that corresponds to the given inventory path. - vm = self.volumeops.get_entity_by_inventory_path(vm_inv_path) - if vm: - # Get the disk device that corresponds to the given vmdk path. - disk_device = self.volumeops.get_disk_device(vm, vmdk_path) - if disk_device: - return (vm, disk_device) - - def _get_existing(self, existing_ref): - src_name = existing_ref.get('source-name') - if not src_name: - raise exception.InvalidInput( - reason=_("source-name cannot be empty.")) - - # source-name format: vmdk_path@vm_inventory_path - parts = src_name.split('@') - if len(parts) != 2: - raise exception.InvalidInput( - reason=_("source-name format should be: " - "'vmdk_path@vm_inventory_path'.")) - - (vmdk_path, vm_inv_path) = parts - existing = self._get_disk_device(vmdk_path, vm_inv_path) - if not existing: - reason = _("%s does not exist.") % src_name - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=reason) - - return existing - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of the volume to be managed by manage_existing. - - When calculating the size, round up to the next GB. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - (_vm, disk) = self._get_existing(existing_ref) - return int(math.ceil(disk.capacityInKB * units.Ki / float(units.Gi))) - - def manage_existing(self, volume, existing_ref): - """Brings an existing virtual disk under Cinder management. - - Detaches the virtual disk identified by existing_ref and attaches - it to a volume backing. - - :param volume: Cinder volume to manage - :param existing_ref: Driver-specific information used to identify a - volume - """ - (vm, disk) = self._get_existing(existing_ref) - - # Create a backing for the volume. - create_params = {CREATE_PARAM_DISK_LESS: True} - backing = self._create_backing(volume, create_params=create_params) - - # Detach the disk to be managed from the source VM. - self.volumeops.detach_disk_from_backing(vm, disk) - - # Move the disk to the datastore folder of volume backing. - src_dc = self.volumeops.get_dc(vm) - dest_dc = self.volumeops.get_dc(backing) - (ds_name, folder_path) = self._get_ds_name_folder_path(backing) - dest_path = volumeops.VirtualDiskPath( - ds_name, folder_path, volume['name']) - self.volumeops.move_vmdk_file(src_dc, - disk.backing.fileName, - dest_path.get_descriptor_ds_file_path(), - dest_dc_ref=dest_dc) - - # Attach the disk to be managed to volume backing. - profile_id = self._get_storage_profile_id(volume) - self.volumeops.attach_disk_to_backing( - backing, - disk.capacityInKB, - VMwareVcVmdkDriver._get_disk_type(volume), - 'lsiLogic', - profile_id, - dest_path.get_descriptor_ds_file_path()) - self.volumeops.update_backing_disk_uuid(backing, volume['id']) - - def unmanage(self, volume): - backing = self.volumeops.get_backing(volume['name']) - if backing: - extra_config = self._get_extra_config(volume) - for key in extra_config: - extra_config[key] = '' - self.volumeops.update_backing_extra_config(backing, extra_config) - - @property - def session(self): - if not self._session: - ip = self.configuration.vmware_host_ip - port = self.configuration.vmware_host_port - username = self.configuration.vmware_host_username - password = self.configuration.vmware_host_password - api_retry_count = self.configuration.vmware_api_retry_count - task_poll_interval = self.configuration.vmware_task_poll_interval - wsdl_loc = self.configuration.safe_get('vmware_wsdl_location') - pbm_wsdl = self.pbm_wsdl if hasattr(self, 'pbm_wsdl') else None - ca_file = self.configuration.vmware_ca_file - insecure = self.configuration.vmware_insecure - pool_size = self.configuration.vmware_connection_pool_size - self._session = api.VMwareAPISession(ip, username, - password, api_retry_count, - task_poll_interval, - wsdl_loc=wsdl_loc, - pbm_wsdl_loc=pbm_wsdl, - port=port, - cacert=ca_file, - insecure=insecure, - pool_size=pool_size, - op_id_prefix='c-vol') - return self._session - - def _get_vc_version(self): - """Connect to vCenter server and fetch version. - - Can be over-ridden by setting 'vmware_host_version' config. - :returns: vCenter version as a LooseVersion object - """ - version_str = self.configuration.vmware_host_version - if version_str: - LOG.info("Using overridden vmware_host_version from config: %s", - version_str) - else: - version_str = vim_util.get_vc_version(self.session) - LOG.info("Fetched vCenter server version: %s", version_str) - return version_str - - def _validate_vcenter_version(self, vc_version): - if not versionutils.is_compatible( - self.MIN_SUPPORTED_VC_VERSION, vc_version, same_major=False): - msg = _('Running Cinder with a VMware vCenter version less than ' - '%s is not allowed.') % self.MIN_SUPPORTED_VC_VERSION - LOG.error(msg) - raise exceptions.VMwareDriverException(message=msg) - elif not versionutils.is_compatible(self.NEXT_MIN_SUPPORTED_VC_VERSION, - vc_version, - same_major=False): - LOG.warning('Running Cinder with a VMware vCenter version ' - 'less than %(ver)s is deprecated. The minimum ' - 'required version of vCenter server will be raised' - ' to %(ver)s in a future release.', - {'ver': self.NEXT_MIN_SUPPORTED_VC_VERSION}) - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - self._validate_params() - - # Validate vCenter version. - self._vc_version = self._get_vc_version() - self._validate_vcenter_version(self._vc_version) - - # Enable pbm only if vCenter version is 5.5+. - if (self._vc_version and - versionutils.is_compatible(self.PBM_ENABLED_VC_VERSION, - self._vc_version, - same_major=False)): - self.pbm_wsdl = pbm.get_pbm_wsdl_location(self._vc_version) - if not self.pbm_wsdl: - LOG.error("Not able to configure PBM for vCenter server: %s", - self._vc_version) - raise exceptions.VMwareDriverException() - self._storage_policy_enabled = True - # Destroy current session so that it is recreated with pbm enabled - self._session = None - - # recreate session and initialize volumeops and ds_sel - # TODO(vbala) remove properties: session, volumeops and ds_sel - max_objects = self.configuration.vmware_max_objects_retrieval - self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects) - self._ds_sel = hub.DatastoreSelector( - self.volumeops, self.session, max_objects) - - # Get clusters to be used for backing VM creation. - cluster_names = self.configuration.vmware_cluster_name - if cluster_names: - self._clusters = self.volumeops.get_cluster_refs( - cluster_names).values() - LOG.info("Using compute cluster(s): %s.", cluster_names) - - LOG.info("Successfully setup driver: %(driver)s for server: " - "%(ip)s.", {'driver': self.__class__.__name__, - 'ip': self.configuration.vmware_host_ip}) - - def _get_volume_group_folder(self, datacenter, project_id): - """Get inventory folder for organizing volume backings. - - The inventory folder for organizing volume backings has the following - hierarchy: - /OpenStack/Project ()/ - - where volume_folder is the vmdk driver config option - "vmware_volume_folder". - - :param datacenter: Reference to the datacenter - :param project_id: OpenStack project ID - :return: Reference to the inventory folder - """ - volume_folder_name = self.configuration.vmware_volume_folder - project_folder_name = "Project (%s)" % project_id - folder_names = ['OpenStack', project_folder_name, volume_folder_name] - return self.volumeops.create_vm_inventory_folder(datacenter, - folder_names) - - def _relocate_backing(self, volume, backing, host): - """Relocate volume backing to a datastore accessible to the given host. - - The backing is not relocated if the current datastore is already - accessible to the host and compliant with the backing's storage - profile. - - :param volume: Volume to be relocated - :param backing: Reference to the backing - :param host: Reference to the host - """ - # Check if the current datastore is visible to the host managing - # the instance and compliant with the storage profile. - datastore = self.volumeops.get_datastore(backing) - backing_profile = None - if self._storage_policy_enabled: - backing_profile = self._get_storage_profile(volume) - if (self.volumeops.is_datastore_accessible(datastore, host) and - self.ds_sel.is_datastore_compliant(datastore, - backing_profile)): - LOG.debug("Datastore: %(datastore)s of backing: %(backing)s is " - "already accessible to instance's host: %(host)s.", - {'backing': backing, - 'datastore': datastore, - 'host': host}) - if backing_profile: - LOG.debug("Backing: %(backing)s is compliant with " - "storage profile: %(profile)s.", - {'backing': backing, - 'profile': backing_profile}) - return - - # We need to relocate the backing to an accessible and profile - # compliant datastore. - req = {} - req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * - units.Gi) - req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile - - # Select datastore satisfying the requirements. - (host, resource_pool, summary) = self._select_datastore(req, host) - dc = self._get_dc(resource_pool) - folder = self._get_volume_group_folder(dc, volume['project_id']) - - self.volumeops.relocate_backing(backing, summary.datastore, - resource_pool, host) - self.volumeops.move_backing_to_folder(backing, folder) - - @staticmethod - def _get_clone_type(volume): - """Get clone type from volume type. - - :param volume: Volume object - :return: Clone type from the extra spec if present, else return - default 'full' clone type - """ - clone_type = _get_volume_type_extra_spec( - volume['volume_type_id'], - 'clone_type', - default_value=volumeops.FULL_CLONE_TYPE) - - if (clone_type != volumeops.FULL_CLONE_TYPE - and clone_type != volumeops.LINKED_CLONE_TYPE): - msg = (_("Clone type '%(clone_type)s' is invalid; valid values" - " are: '%(full_clone)s' and '%(linked_clone)s'.") % - {'clone_type': clone_type, - 'full_clone': volumeops.FULL_CLONE_TYPE, - 'linked_clone': volumeops.LINKED_CLONE_TYPE}) - LOG.error(msg) - raise exception.Invalid(message=msg) - - return clone_type - - def _clone_backing(self, volume, backing, snapshot, clone_type, src_vsize): - """Clone the backing. - - :param volume: New Volume object - :param backing: Reference to the backing entity - :param snapshot: Reference to the snapshot entity - :param clone_type: type of the clone - :param src_vsize: the size of the source volume - """ - if (clone_type == volumeops.LINKED_CLONE_TYPE and - volume.size > src_vsize): - # Volume extend will fail if the volume is a linked clone of - # another volume. Use full clone if extend is needed after cloning. - clone_type = volumeops.FULL_CLONE_TYPE - LOG.debug("Linked cloning not possible for creating volume " - "since volume needs to be extended after cloning.", - resource=volume) - - datastore = None - host = None - rp = None - folder = None - if not clone_type == volumeops.LINKED_CLONE_TYPE: - # Pick a datastore where to create the full clone under any host - (host, rp, folder, summary) = self._select_ds_for_volume(volume) - datastore = summary.datastore - extra_config = self._get_extra_config(volume) - clone = self.volumeops.clone_backing(volume['name'], backing, - snapshot, clone_type, datastore, - host=host, resource_pool=rp, - extra_config=extra_config, - folder=folder) - - # vCenter 6.0+ does not allow changing the UUID of delta disk created - # during linked cloning; skip setting UUID for vCenter 6.0+. - if (clone_type == volumeops.LINKED_CLONE_TYPE and - versionutils.is_compatible( - '6.0', self._vc_version, same_major=False)): - LOG.debug("Not setting vmdk UUID for volume: %s.", volume['id']) - else: - self.volumeops.update_backing_disk_uuid(clone, volume['id']) - - # If the volume size specified by the user is greater than - # the size of the source volume, the newly created volume will - # allocate the capacity to the size of the source volume in the backend - # VMDK datastore, though the volume information indicates it has a - # capacity of the volume size. If the volume size is greater, - # we need to extend/resize the capacity of the vmdk virtual disk from - # the size of the source volume to the volume size. - if volume['size'] > src_vsize: - self._extend_backing(clone, volume['size']) - LOG.info("Successfully created clone: %s.", clone) - - def _create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - If the snapshot does not exist or source volume's backing does not - exist, then pass. - - :param volume: New Volume object - :param snapshot: Reference to snapshot entity - """ - self._verify_volume_creation(volume) - backing = self.volumeops.get_backing(snapshot['volume_name']) - if not backing: - LOG.info("There is no backing for the snapshotted volume: " - "%(snap)s. Not creating any backing for the " - "volume: %(vol)s.", - {'snap': snapshot['name'], 'vol': volume['name']}) - return - snapshot_moref = self.volumeops.get_snapshot(backing, - snapshot['name']) - if not snapshot_moref: - LOG.info("There is no snapshot point for the snapshotted " - "volume: %(snap)s. Not creating any backing for " - "the volume: %(vol)s.", - {'snap': snapshot['name'], 'vol': volume['name']}) - return - clone_type = VMwareVcVmdkDriver._get_clone_type(volume) - self._clone_backing(volume, backing, snapshot_moref, clone_type, - snapshot['volume_size']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot. - - :param volume: New Volume object - :param snapshot: Reference to snapshot entity - """ - self._create_volume_from_snapshot(volume, snapshot) - - def _create_cloned_volume(self, volume, src_vref): - """Creates volume clone. - - If source volume's backing does not exist, then pass. - Linked clone of attached volume is not supported. - - :param volume: New Volume object - :param src_vref: Source Volume object - """ - self._verify_volume_creation(volume) - backing = self.volumeops.get_backing(src_vref['name']) - if not backing: - LOG.info("There is no backing for the source volume: %(src)s. " - "Not creating any backing for volume: %(vol)s.", - {'src': src_vref['name'], 'vol': volume['name']}) - return - clone_type = VMwareVcVmdkDriver._get_clone_type(volume) - snapshot = None - if clone_type == volumeops.LINKED_CLONE_TYPE: - if src_vref['status'] != 'available': - msg = _("Linked clone of source volume not supported " - "in state: %s.") % src_vref['status'] - LOG.error(msg) - raise exception.InvalidVolume(msg) - # To create a linked clone, we create a temporary snapshot of the - # source volume, and then create the clone off the temporary - # snapshot. - snap_name = 'temp-snapshot-%s' % volume['id'] - snapshot = self.volumeops.create_snapshot(backing, snap_name, None) - try: - self._clone_backing(volume, backing, snapshot, clone_type, - src_vref['size']) - finally: - if snapshot: - # Delete temporary snapshot. - try: - self.volumeops.delete_snapshot(backing, snap_name) - except exceptions.VimException: - LOG.debug("Unable to delete temporary snapshot: %s of " - "volume backing.", snap_name, resource=volume, - exc_info=True) - - def create_cloned_volume(self, volume, src_vref): - """Creates volume clone. - - :param volume: New Volume object - :param src_vref: Source Volume object - """ - self._create_cloned_volume(volume, src_vref) - - def accept_transfer(self, context, volume, new_user, new_project): - """Accept the transfer of a volume for a new user/project.""" - backing = self.volumeops.get_backing(volume.name) - if backing: - dc = self.volumeops.get_dc(backing) - new_folder = self._get_volume_group_folder(dc, new_project) - self.volumeops.move_backing_to_folder(backing, new_folder) diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py deleted file mode 100644 index fe55ad976..000000000 --- a/cinder/volume/drivers/vmware/volumeops.py +++ /dev/null @@ -1,1619 +0,0 @@ -# Copyright (c) 2013 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements operations on volumes residing on VMware datastores. -""" - - -from oslo_log import log as logging -from oslo_utils import units -from oslo_vmware import exceptions -from oslo_vmware import vim_util -from six.moves import urllib - -from cinder.i18n import _ -from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions - - -LOG = logging.getLogger(__name__) -LINKED_CLONE_TYPE = 'linked' -FULL_CLONE_TYPE = 'full' - -BACKING_UUID_KEY = 'instanceUuid' - - -def split_datastore_path(datastore_path): - """Split the datastore path to components. - - return the datastore name, relative folder path and the file name - - E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns - (datastore1, my_volume/, my_volume.vmdk) - - :param datastore_path: Datastore path of a file - :return: Parsed datastore name, relative folder path and file name - """ - splits = datastore_path.split('[', 1)[1].split(']', 1) - datastore_name = None - folder_path = None - file_name = None - if len(splits) == 1: - datastore_name = splits[0] - else: - datastore_name, path = splits - # Path will be of form my_volume/my_volume.vmdk - # we need into my_volumes/ and my_volume.vmdk - splits = path.split('/') - file_name = splits[len(splits) - 1] - folder_path = path[:-len(file_name)] - - return (datastore_name.strip(), folder_path.strip(), file_name.strip()) - - -class VirtualDiskPath(object): - """Class representing paths of files comprising a virtual disk.""" - - def __init__(self, ds_name, folder_path, disk_name): - """Creates path object for the given disk. - - :param ds_name: name of the datastore where disk is stored - :param folder_path: absolute path of the folder containing the disk - :param disk_name: name of the virtual disk - """ - self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name) - self._descriptor_ds_file_path = self.get_datastore_file_path( - ds_name, self._descriptor_file_path) - - def get_datastore_file_path(self, ds_name, file_path): - """Get datastore path corresponding to the given file path. - - :param ds_name: name of the datastore containing the file represented - by the given file path - :param file_path: absolute path of the file - :return: datastore file path - """ - return "[%s] %s" % (ds_name, file_path) - - def get_descriptor_file_path(self): - """Get absolute file path of the virtual disk descriptor.""" - return self._descriptor_file_path - - def get_descriptor_ds_file_path(self): - """Get datastore file path of the virtual disk descriptor.""" - return self._descriptor_ds_file_path - - -class FlatExtentVirtualDiskPath(VirtualDiskPath): - """Paths of files in a non-monolithic disk with a single flat extent.""" - - def __init__(self, ds_name, folder_path, disk_name): - """Creates path object for the given disk. - - :param ds_name: name of the datastore where disk is stored - :param folder_path: absolute path of the folder containing the disk - :param disk_name: name of the virtual disk - """ - super(FlatExtentVirtualDiskPath, self).__init__( - ds_name, folder_path, disk_name) - self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path, - disk_name) - self._flat_extent_ds_file_path = self.get_datastore_file_path( - ds_name, self._flat_extent_file_path) - - def get_flat_extent_file_path(self): - """Get absolute file path of the flat extent.""" - return self._flat_extent_file_path - - def get_flat_extent_ds_file_path(self): - """Get datastore file path of the flat extent.""" - return self._flat_extent_ds_file_path - - -class MonolithicSparseVirtualDiskPath(VirtualDiskPath): - """Paths of file comprising a monolithic sparse disk.""" - pass - - -class VirtualDiskType(object): - """Supported virtual disk types.""" - - EAGER_ZEROED_THICK = "eagerZeroedThick" - PREALLOCATED = "preallocated" - THIN = "thin" - - # thick in extra_spec means lazy-zeroed thick disk - EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK, - 'thick': PREALLOCATED, - 'thin': THIN - } - - @staticmethod - def is_valid(extra_spec_disk_type): - """Check if the given disk type in extra_spec is valid. - - :param extra_spec_disk_type: disk type in extra_spec - :return: True if valid - """ - return (extra_spec_disk_type in - VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT) - - @staticmethod - def validate(extra_spec_disk_type): - """Validate the given disk type in extra_spec. - - This method throws an instance of InvalidDiskTypeException if the given - disk type is invalid. - - :param extra_spec_disk_type: disk type in extra_spec - :raises: InvalidDiskTypeException - """ - if not VirtualDiskType.is_valid(extra_spec_disk_type): - raise vmdk_exceptions.InvalidDiskTypeException( - disk_type=extra_spec_disk_type) - - @staticmethod - def get_virtual_disk_type(extra_spec_disk_type): - """Return disk type corresponding to the extra_spec disk type. - - :param extra_spec_disk_type: disk type in extra_spec - :return: virtual disk type - :raises: InvalidDiskTypeException - """ - VirtualDiskType.validate(extra_spec_disk_type) - return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[ - extra_spec_disk_type]) - - -class VirtualDiskAdapterType(object): - """Supported virtual disk adapter types.""" - - LSI_LOGIC = "lsiLogic" - BUS_LOGIC = "busLogic" - LSI_LOGIC_SAS = "lsiLogicsas" - PARA_VIRTUAL = "paraVirtual" - IDE = "ide" - - @staticmethod - def is_valid(adapter_type): - """Check if the given adapter type is valid. - - :param adapter_type: adapter type to check - :return: True if valid - """ - return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC, - VirtualDiskAdapterType.BUS_LOGIC, - VirtualDiskAdapterType.LSI_LOGIC_SAS, - VirtualDiskAdapterType.PARA_VIRTUAL, - VirtualDiskAdapterType.IDE] - - @staticmethod - def validate(extra_spec_adapter_type): - """Validate the given adapter type in extra_spec. - - This method throws an instance of InvalidAdapterTypeException if the - given adapter type is invalid. - - :param extra_spec_adapter_type: adapter type in extra_spec - :raises: InvalidAdapterTypeException - """ - if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type): - raise vmdk_exceptions.InvalidAdapterTypeException( - invalid_type=extra_spec_adapter_type) - - @staticmethod - def get_adapter_type(extra_spec_adapter): - """Get the adapter type to be used in VirtualDiskSpec. - - :param extra_spec_adapter: adapter type in the extra_spec - :return: adapter type to be used in VirtualDiskSpec - """ - VirtualDiskAdapterType.validate(extra_spec_adapter) - # We set the adapter type as lsiLogic for lsiLogicsas/paraVirtual - # since it is not supported by VirtualDiskManager APIs. This won't - # be a problem because we attach the virtual disk to the correct - # controller type and the disk adapter type is always resolved using - # its controller key. - if (extra_spec_adapter == VirtualDiskAdapterType.LSI_LOGIC_SAS or - extra_spec_adapter == VirtualDiskAdapterType.PARA_VIRTUAL): - return VirtualDiskAdapterType.LSI_LOGIC - else: - return extra_spec_adapter - - -class ControllerType(object): - """Encapsulate various controller types.""" - - LSI_LOGIC = 'VirtualLsiLogicController' - BUS_LOGIC = 'VirtualBusLogicController' - LSI_LOGIC_SAS = 'VirtualLsiLogicSASController' - PARA_VIRTUAL = 'ParaVirtualSCSIController' - IDE = 'VirtualIDEController' - - CONTROLLER_TYPE_DICT = { - VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC, - VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC, - VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS, - VirtualDiskAdapterType.PARA_VIRTUAL: PARA_VIRTUAL, - VirtualDiskAdapterType.IDE: IDE} - - @staticmethod - def get_controller_type(adapter_type): - """Get the disk controller type based on the given adapter type. - - :param adapter_type: disk adapter type - :return: controller type corresponding to the given adapter type - :raises: InvalidAdapterTypeException - """ - if adapter_type in ControllerType.CONTROLLER_TYPE_DICT: - return ControllerType.CONTROLLER_TYPE_DICT[adapter_type] - raise vmdk_exceptions.InvalidAdapterTypeException( - invalid_type=adapter_type) - - @staticmethod - def is_scsi_controller(controller_type): - """Check if the given controller is a SCSI controller. - - :param controller_type: controller type - :return: True if the controller is a SCSI controller - """ - return controller_type in [ControllerType.LSI_LOGIC, - ControllerType.BUS_LOGIC, - ControllerType.LSI_LOGIC_SAS, - ControllerType.PARA_VIRTUAL] - - -class VMwareVolumeOps(object): - """Manages volume operations.""" - - def __init__(self, session, max_objects): - self._session = session - self._max_objects = max_objects - self._folder_cache = {} - - def get_backing(self, name): - """Get the backing based on name. - - :param name: Name of the backing - :return: Managed object reference to the backing - """ - - retrieve_result = self._session.invoke_api(vim_util, 'get_objects', - self._session.vim, - 'VirtualMachine', - self._max_objects) - while retrieve_result: - vms = retrieve_result.objects - for vm in vms: - if vm.propSet[0].val == name: - # We got the result, so cancel further retrieval. - self.cancel_retrieval(retrieve_result) - return vm.obj - # Result not obtained, continue retrieving results. - retrieve_result = self.continue_retrieval(retrieve_result) - - LOG.debug("Did not find any backing with name: %s", name) - - def delete_backing(self, backing): - """Delete the backing. - - :param backing: Managed object reference to the backing - """ - LOG.debug("Deleting the VM backing: %s.", backing) - task = self._session.invoke_api(self._session.vim, 'Destroy_Task', - backing) - LOG.debug("Initiated deletion of VM backing: %s.", backing) - self._session.wait_for_task(task) - LOG.info("Deleted the VM backing: %s.", backing) - - # TODO(kartikaditya) Keep the methods not specific to volume in - # a different file - def get_host(self, instance): - """Get host under which instance is present. - - :param instance: Managed object reference of the instance VM - :return: Host managing the instance VM - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, instance, - 'runtime.host') - - def get_hosts(self): - """Get all host from the inventory. - - :return: All the hosts from the inventory - """ - return self._session.invoke_api(vim_util, 'get_objects', - self._session.vim, - 'HostSystem', self._max_objects) - - def continue_retrieval(self, retrieve_result): - """Continue retrieval of results if necessary. - - :param retrieve_result: Result from RetrievePropertiesEx - """ - - return self._session.invoke_api(vim_util, 'continue_retrieval', - self._session.vim, retrieve_result) - - def cancel_retrieval(self, retrieve_result): - """Cancel retrieval of results if necessary. - - :param retrieve_result: Result from RetrievePropertiesEx - """ - - self._session.invoke_api(vim_util, 'cancel_retrieval', - self._session.vim, retrieve_result) - - # TODO(vbala): move this method to datastore module - def _is_usable(self, mount_info): - """Check if a datastore is usable as per the given mount info. - - The datastore is considered to be usable for a host only if it is - writable, mounted and accessible. - - :param mount_info: Host mount information - :return: True if datastore is usable - """ - writable = mount_info.accessMode == 'readWrite' - # If mounted attribute is not set, then default is True - mounted = getattr(mount_info, 'mounted', True) - # If accessible attribute is not set, then default is False - accessible = getattr(mount_info, 'accessible', False) - - return writable and mounted and accessible - - def get_connected_hosts(self, datastore): - """Get all the hosts to which the datastore is connected and usable. - - The datastore is considered to be usable for a host only if it is - writable, mounted and accessible. - - :param datastore: Reference to the datastore entity - :return: List of managed object references of all connected - hosts - """ - summary = self.get_summary(datastore) - if not summary.accessible: - return [] - - host_mounts = self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, datastore, - 'host') - if not hasattr(host_mounts, 'DatastoreHostMount'): - return [] - - connected_hosts = [] - for host_mount in host_mounts.DatastoreHostMount: - if self._is_usable(host_mount.mountInfo): - connected_hosts.append(host_mount.key.value) - - return connected_hosts - - def is_datastore_accessible(self, datastore, host): - """Check if the datastore is accessible to the given host. - - :param datastore: datastore reference - :return: True if the datastore is accessible - """ - hosts = self.get_connected_hosts(datastore) - return host.value in hosts - - # TODO(vbala): move this method to datastore module - def _in_maintenance(self, summary): - """Check if a datastore is entering maintenance or in maintenance. - - :param summary: Summary information about the datastore - :return: True if the datastore is entering maintenance or in - maintenance - """ - if hasattr(summary, 'maintenanceMode'): - return summary.maintenanceMode in ['enteringMaintenance', - 'inMaintenance'] - return False - - def _get_parent(self, child, parent_type): - """Get immediate parent of given type via 'parent' property. - - :param child: Child entity reference - :param parent_type: Entity type of the parent - :return: Immediate parent of specific type up the hierarchy via - 'parent' property - """ - - if not child: - return None - if child._type == parent_type: - return child - parent = self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, child, 'parent') - return self._get_parent(parent, parent_type) - - def get_dc(self, child): - """Get parent datacenter up the hierarchy via 'parent' property. - - :param child: Reference of the child entity - :return: Parent Datacenter of the param child entity - """ - return self._get_parent(child, 'Datacenter') - - def get_vmfolder(self, datacenter): - """Get the vmFolder. - - :param datacenter: Reference to the datacenter entity - :return: vmFolder property of the datacenter - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, datacenter, - 'vmFolder') - - def _get_child_folder(self, parent_folder, child_folder_name): - LOG.debug("Finding child folder: %s.", child_folder_name) - # Get list of child entities for the parent folder - prop_val = self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, parent_folder, - 'childEntity') - - if prop_val and hasattr(prop_val, 'ManagedObjectReference'): - child_entities = prop_val.ManagedObjectReference - - # Return if the child folder with input name is already present - for child_entity in child_entities: - if child_entity._type != 'Folder': - continue - child_entity_name = self.get_entity_name(child_entity) - if (child_entity_name - and (urllib.parse.unquote(child_entity_name) - == child_folder_name)): - return child_entity - - def create_folder(self, parent_folder, child_folder_name): - """Creates child folder under the given parent folder. - - :param parent_folder: Reference to the parent folder - :param child_folder_name: Name of the child folder - :return: Reference to the child folder - """ - LOG.debug("Creating folder: %(child_folder_name)s under parent " - "folder: %(parent_folder)s.", - {'child_folder_name': child_folder_name, - 'parent_folder': parent_folder}) - - try: - child_folder = self._session.invoke_api(self._session.vim, - 'CreateFolder', - parent_folder, - name=child_folder_name) - LOG.debug("Created child folder: %s.", child_folder) - except exceptions.DuplicateName: - # Another thread is trying to create the same folder, ignore - # the exception. - LOG.debug('Folder: %s already exists.', child_folder_name) - child_folder = self._get_child_folder(parent_folder, - child_folder_name) - return child_folder - - def create_vm_inventory_folder(self, datacenter, path_comp): - """Create and return a VM inventory folder. - - This method caches references to inventory folders returned. - - :param datacenter: Reference to datacenter - :param path_comp: Path components as a list - """ - LOG.debug("Creating inventory folder: %(path_comp)s under VM folder " - "of datacenter: %(datacenter)s.", - {'path_comp': path_comp, - 'datacenter': datacenter}) - path = "/" + datacenter.value - parent = self._folder_cache.get(path) - if not parent: - parent = self.get_vmfolder(datacenter) - self._folder_cache[path] = parent - - folder = None - for folder_name in path_comp: - path = "/".join([path, folder_name]) - folder = self._folder_cache.get(path) - if not folder: - folder = self.create_folder(parent, folder_name) - self._folder_cache[path] = folder - parent = folder - - LOG.debug("Inventory folder for path: %(path)s is %(folder)s.", - {'path': path, - 'folder': folder}) - return folder - - def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref, - eager_zero=False): - """Extend the virtual disk to the requested size. - - :param requested_size_in_gb: Size of the volume in GB - :param path: Datastore path of the virtual disk to extend - :param dc_ref: Reference to datacenter - :param eager_zero: Boolean determining if the free space - is zeroed out - """ - LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.", - {'path': path, 'size': requested_size_in_gb}) - diskMgr = self._session.vim.service_content.virtualDiskManager - - # VMWare API needs the capacity unit to be in KB, so convert the - # capacity unit from GB to KB. - size_in_kb = requested_size_in_gb * units.Mi - task = self._session.invoke_api(self._session.vim, - "ExtendVirtualDisk_Task", - diskMgr, - name=path, - datacenter=dc_ref, - newCapacityKb=size_in_kb, - eagerZero=eager_zero) - self._session.wait_for_task(task) - LOG.info("Successfully extended virtual disk: %(path)s to " - "%(size)s GB.", - {'path': path, 'size': requested_size_in_gb}) - - def _create_controller_config_spec(self, adapter_type): - """Returns config spec for adding a disk controller.""" - cf = self._session.vim.client.factory - - controller_type = ControllerType.get_controller_type(adapter_type) - controller_device = cf.create('ns0:%s' % controller_type) - controller_device.key = -100 - controller_device.busNumber = 0 - if ControllerType.is_scsi_controller(controller_type): - controller_device.sharedBus = 'noSharing' - - controller_spec = cf.create('ns0:VirtualDeviceConfigSpec') - controller_spec.operation = 'add' - controller_spec.device = controller_device - return controller_spec - - def _create_disk_backing(self, disk_type, vmdk_ds_file_path): - """Creates file backing for virtual disk.""" - cf = self._session.vim.client.factory - disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo') - - if disk_type == VirtualDiskType.EAGER_ZEROED_THICK: - disk_device_bkng.eagerlyScrub = True - elif disk_type == VirtualDiskType.THIN: - disk_device_bkng.thinProvisioned = True - - disk_device_bkng.fileName = vmdk_ds_file_path or '' - disk_device_bkng.diskMode = 'persistent' - - return disk_device_bkng - - def _create_virtual_disk_config_spec(self, size_kb, disk_type, - controller_key, profile_id, - vmdk_ds_file_path): - """Returns config spec for adding a virtual disk.""" - cf = self._session.vim.client.factory - - disk_device = cf.create('ns0:VirtualDisk') - # disk size should be at least 1024KB - disk_device.capacityInKB = max(units.Ki, int(size_kb)) - if controller_key < 0: - disk_device.key = controller_key - 1 - else: - disk_device.key = -101 - disk_device.unitNumber = 0 - disk_device.controllerKey = controller_key - disk_device.backing = self._create_disk_backing(disk_type, - vmdk_ds_file_path) - - disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') - disk_spec.operation = 'add' - if vmdk_ds_file_path is None: - disk_spec.fileOperation = 'create' - disk_spec.device = disk_device - if profile_id: - disk_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec') - disk_profile.profileId = profile_id - disk_spec.profile = [disk_profile] - - return disk_spec - - def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type, - profile_id, vmdk_ds_file_path=None): - """Create controller and disk config specs for adding a new disk. - - :param size_kb: disk size in KB - :param disk_type: disk provisioning type - :param adapter_type: disk adapter type - :param profile_id: storage policy profile identification - :param vmdk_ds_file_path: Optional datastore file path of an existing - virtual disk. If specified, file backing is - not created for the virtual disk. - :return: list containing controller and disk config specs - """ - controller_spec = None - if adapter_type == 'ide': - # For IDE disks, use one of the default IDE controllers (with keys - # 200 and 201) created as part of backing VM creation. - controller_key = 200 - else: - controller_spec = self._create_controller_config_spec(adapter_type) - controller_key = controller_spec.device.key - - disk_spec = self._create_virtual_disk_config_spec(size_kb, - disk_type, - controller_key, - profile_id, - vmdk_ds_file_path) - specs = [disk_spec] - if controller_spec is not None: - specs.append(controller_spec) - return specs - - def _get_extra_config_option_values(self, extra_config): - - cf = self._session.vim.client.factory - option_values = [] - - for key, value in extra_config.items(): - opt = cf.create('ns0:OptionValue') - opt.key = key - opt.value = value - option_values.append(opt) - - return option_values - - def _get_create_spec_disk_less(self, name, ds_name, profileId=None, - extra_config=None): - """Return spec for creating disk-less backing. - - :param name: Name of the backing - :param ds_name: Datastore name where the disk is to be provisioned - :param profileId: Storage profile ID for the backing - :param extra_config: Key-value pairs to be written to backing's - extra-config - :return: Spec for creation - """ - cf = self._session.vim.client.factory - vm_file_info = cf.create('ns0:VirtualMachineFileInfo') - vm_file_info.vmPathName = '[%s]' % ds_name - - create_spec = cf.create('ns0:VirtualMachineConfigSpec') - create_spec.name = name - create_spec.guestId = 'otherGuest' - create_spec.numCPUs = 1 - create_spec.memoryMB = 128 - create_spec.files = vm_file_info - # Set the hardware version to a compatible version supported by - # vSphere 5.0. This will ensure that the backing VM can be migrated - # without any incompatibility issues in a mixed cluster of ESX hosts - # with versions 5.0 or above. - create_spec.version = "vmx-08" - - if profileId: - vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec') - vmProfile.profileId = profileId - create_spec.vmProfile = [vmProfile] - - if extra_config: - if BACKING_UUID_KEY in extra_config: - create_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) - create_spec.extraConfig = self._get_extra_config_option_values( - extra_config) - - return create_spec - - def get_create_spec(self, name, size_kb, disk_type, ds_name, - profile_id=None, adapter_type='lsiLogic', - extra_config=None): - """Return spec for creating backing with a single disk. - - :param name: name of the backing - :param size_kb: disk size in KB - :param disk_type: disk provisioning type - :param ds_name: datastore name where the disk is to be provisioned - :param profile_id: storage policy profile identification - :param adapter_type: disk adapter type - :param extra_config: key-value pairs to be written to backing's - extra-config - :return: spec for creation - """ - create_spec = self._get_create_spec_disk_less( - name, ds_name, profileId=profile_id, extra_config=extra_config) - create_spec.deviceChange = self._create_specs_for_disk_add( - size_kb, disk_type, adapter_type, profile_id) - return create_spec - - def _create_backing_int(self, folder, resource_pool, host, create_spec): - """Helper for create backing methods.""" - LOG.debug("Creating volume backing with spec: %s.", create_spec) - task = self._session.invoke_api(self._session.vim, 'CreateVM_Task', - folder, config=create_spec, - pool=resource_pool, host=host) - task_info = self._session.wait_for_task(task) - backing = task_info.result - LOG.info("Successfully created volume backing: %s.", backing) - return backing - - def create_backing(self, name, size_kb, disk_type, folder, resource_pool, - host, ds_name, profileId=None, adapter_type='lsiLogic', - extra_config=None): - """Create backing for the volume. - - Creates a VM with one VMDK based on the given inputs. - - :param name: Name of the backing - :param size_kb: Size in KB of the backing - :param disk_type: VMDK type for the disk - :param folder: Folder, where to create the backing under - :param resource_pool: Resource pool reference - :param host: Host reference - :param ds_name: Datastore name where the disk is to be provisioned - :param profileId: Storage profile ID to be associated with backing - :param adapter_type: Disk adapter type - :param extra_config: Key-value pairs to be written to backing's - extra-config - :return: Reference to the created backing entity - """ - LOG.debug("Creating volume backing with name: %(name)s " - "disk_type: %(disk_type)s size_kb: %(size_kb)s " - "adapter_type: %(adapter_type)s profileId: %(profile)s at " - "folder: %(folder)s resource_pool: %(resource_pool)s " - "host: %(host)s datastore_name: %(ds_name)s.", - {'name': name, 'disk_type': disk_type, 'size_kb': size_kb, - 'folder': folder, 'resource_pool': resource_pool, - 'ds_name': ds_name, 'profile': profileId, 'host': host, - 'adapter_type': adapter_type}) - - create_spec = self.get_create_spec( - name, size_kb, disk_type, ds_name, profile_id=profileId, - adapter_type=adapter_type, extra_config=extra_config) - return self._create_backing_int(folder, resource_pool, host, - create_spec) - - def create_backing_disk_less(self, name, folder, resource_pool, - host, ds_name, profileId=None, - extra_config=None): - """Create disk-less volume backing. - - This type of backing is useful for creating volume from image. The - downloaded image from the image service can be copied to a virtual - disk of desired provisioning type and added to the backing VM. - - :param name: Name of the backing - :param folder: Folder where the backing is created - :param resource_pool: Resource pool reference - :param host: Host reference - :param ds_name: Name of the datastore used for VM storage - :param profileId: Storage profile ID to be associated with backing - :param extra_config: Key-value pairs to be written to backing's - extra-config - :return: Reference to the created backing entity - """ - LOG.debug("Creating disk-less volume backing with name: %(name)s " - "profileId: %(profile)s at folder: %(folder)s " - "resource pool: %(resource_pool)s host: %(host)s " - "datastore_name: %(ds_name)s.", - {'name': name, 'profile': profileId, 'folder': folder, - 'resource_pool': resource_pool, 'host': host, - 'ds_name': ds_name}) - - create_spec = self._get_create_spec_disk_less( - name, ds_name, profileId=profileId, extra_config=extra_config) - return self._create_backing_int(folder, resource_pool, host, - create_spec) - - def get_datastore(self, backing): - """Get datastore where the backing resides. - - :param backing: Reference to the backing - :return: Datastore reference to which the backing belongs - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, backing, - 'datastore').ManagedObjectReference[0] - - def get_summary(self, datastore): - """Get datastore summary. - - :param datastore: Reference to the datastore - :return: 'summary' property of the datastore - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, datastore, - 'summary') - - def _create_relocate_spec_disk_locator(self, datastore, disk_type, - disk_device): - """Creates spec for disk type conversion during relocate.""" - cf = self._session.vim.client.factory - disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator") - disk_locator.datastore = datastore - disk_locator.diskId = disk_device.key - disk_locator.diskBackingInfo = self._create_disk_backing(disk_type, - None) - return disk_locator - - def _get_relocate_spec(self, datastore, resource_pool, host, - disk_move_type, disk_type=None, disk_device=None): - """Return spec for relocating volume backing. - - :param datastore: Reference to the datastore - :param resource_pool: Reference to the resource pool - :param host: Reference to the host - :param disk_move_type: Disk move type option - :param disk_type: Destination disk type - :param disk_device: Virtual device corresponding to the disk - :return: Spec for relocation - """ - cf = self._session.vim.client.factory - relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec') - relocate_spec.datastore = datastore - relocate_spec.pool = resource_pool - relocate_spec.host = host - relocate_spec.diskMoveType = disk_move_type - - if disk_type is not None and disk_device is not None: - disk_locator = self._create_relocate_spec_disk_locator(datastore, - disk_type, - disk_device) - relocate_spec.disk = [disk_locator] - - LOG.debug("Spec for relocating the backing: %s.", relocate_spec) - return relocate_spec - - def relocate_backing( - self, backing, datastore, resource_pool, host, disk_type=None): - """Relocates backing to the input datastore and resource pool. - - The implementation uses moveAllDiskBackingsAndAllowSharing disk move - type. - - :param backing: Reference to the backing - :param datastore: Reference to the datastore - :param resource_pool: Reference to the resource pool - :param host: Reference to the host - :param disk_type: destination disk type - """ - LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s " - "and resource pool: %(rp)s with destination disk type: " - "%(disk_type)s.", - {'backing': backing, - 'ds': datastore, - 'rp': resource_pool, - 'disk_type': disk_type}) - - # Relocate the volume backing - disk_move_type = 'moveAllDiskBackingsAndAllowSharing' - - disk_device = None - if disk_type is not None: - disk_device = self._get_disk_device(backing) - - relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, - disk_move_type, disk_type, - disk_device) - - task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task', - backing, spec=relocate_spec) - LOG.debug("Initiated relocation of volume backing: %s.", backing) - self._session.wait_for_task(task) - LOG.info("Successfully relocated volume backing: %(backing)s " - "to datastore: %(ds)s and resource pool: %(rp)s.", - {'backing': backing, 'ds': datastore, 'rp': resource_pool}) - - def move_backing_to_folder(self, backing, folder): - """Move the volume backing to the folder. - - :param backing: Reference to the backing - :param folder: Reference to the folder - """ - LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.", - {'backing': backing, 'fol': folder}) - task = self._session.invoke_api(self._session.vim, - 'MoveIntoFolder_Task', folder, - list=[backing]) - LOG.debug("Initiated move of volume backing: %(backing)s into the " - "folder: %(fol)s.", {'backing': backing, 'fol': folder}) - self._session.wait_for_task(task) - LOG.info("Successfully moved volume " - "backing: %(backing)s into the " - "folder: %(fol)s.", {'backing': backing, 'fol': folder}) - - def create_snapshot(self, backing, name, description, quiesce=False): - """Create snapshot of the backing with given name and description. - - :param backing: Reference to the backing entity - :param name: Snapshot name - :param description: Snapshot description - :param quiesce: Whether to quiesce the backing when taking snapshot - :return: Created snapshot entity reference - """ - LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.", - {'backing': backing, 'name': name}) - task = self._session.invoke_api(self._session.vim, - 'CreateSnapshot_Task', - backing, name=name, - description=description, - memory=False, quiesce=quiesce) - LOG.debug("Initiated snapshot of volume backing: %(backing)s " - "named: %(name)s.", {'backing': backing, 'name': name}) - task_info = self._session.wait_for_task(task) - snapshot = task_info.result - LOG.info("Successfully created snapshot: %(snap)s for volume " - "backing: %(backing)s.", - {'snap': snapshot, 'backing': backing}) - return snapshot - - @staticmethod - def _get_snapshot_from_tree(name, root): - """Get snapshot by name from the snapshot tree root. - - :param name: Snapshot name - :param root: Current root node in the snapshot tree - :return: None in the snapshot tree with given snapshot name - """ - if not root: - return None - if root.name == name: - return root.snapshot - if (not hasattr(root, 'childSnapshotList') or - not root.childSnapshotList): - # When root does not have children, the childSnapshotList attr - # is missing sometime. Adding an additional check. - return None - for node in root.childSnapshotList: - snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node) - if snapshot: - return snapshot - - def get_snapshot(self, backing, name): - """Get snapshot of the backing with given name. - - :param backing: Reference to the backing entity - :param name: Snapshot name - :return: Snapshot entity of the backing with given name - """ - snapshot = self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, backing, - 'snapshot') - if not snapshot or not snapshot.rootSnapshotList: - return None - for root in snapshot.rootSnapshotList: - return VMwareVolumeOps._get_snapshot_from_tree(name, root) - - def snapshot_exists(self, backing): - """Check if the given backing contains snapshots.""" - snapshot = self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, backing, - 'snapshot') - if snapshot is None or snapshot.rootSnapshotList is None: - return False - return len(snapshot.rootSnapshotList) != 0 - - def delete_snapshot(self, backing, name): - """Delete a given snapshot from volume backing. - - :param backing: Reference to the backing entity - :param name: Snapshot name - """ - LOG.debug("Deleting the snapshot: %(name)s from backing: " - "%(backing)s.", - {'name': name, 'backing': backing}) - snapshot = self.get_snapshot(backing, name) - if not snapshot: - LOG.info("Did not find the snapshot: %(name)s for backing: " - "%(backing)s. Need not delete anything.", - {'name': name, 'backing': backing}) - return - task = self._session.invoke_api(self._session.vim, - 'RemoveSnapshot_Task', - snapshot, removeChildren=False) - LOG.debug("Initiated snapshot: %(name)s deletion for backing: " - "%(backing)s.", - {'name': name, 'backing': backing}) - self._session.wait_for_task(task) - LOG.info("Successfully deleted snapshot: %(name)s of backing: " - "%(backing)s.", {'backing': backing, 'name': name}) - - def _get_folder(self, backing): - """Get parent folder of the backing. - - :param backing: Reference to the backing entity - :return: Reference to parent folder of the backing entity - """ - return self._get_parent(backing, 'Folder') - - def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing, - disk_type, host=None, resource_pool=None, - extra_config=None): - """Get the clone spec. - - :param datastore: Reference to datastore - :param disk_move_type: Disk move type - :param snapshot: Reference to snapshot - :param backing: Source backing VM - :param disk_type: Disk type of clone - :param host: Target host - :param resource_pool: Target resource pool - :param extra_config: Key-value pairs to be written to backing's - extra-config - :return: Clone spec - """ - if disk_type is not None: - disk_device = self._get_disk_device(backing) - else: - disk_device = None - - relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, - disk_move_type, disk_type, - disk_device) - cf = self._session.vim.client.factory - clone_spec = cf.create('ns0:VirtualMachineCloneSpec') - clone_spec.location = relocate_spec - clone_spec.powerOn = False - clone_spec.template = False - clone_spec.snapshot = snapshot - - if extra_config: - config_spec = cf.create('ns0:VirtualMachineConfigSpec') - if BACKING_UUID_KEY in extra_config: - config_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) - config_spec.extraConfig = self._get_extra_config_option_values( - extra_config) - clone_spec.config = config_spec - - LOG.debug("Spec for cloning the backing: %s.", clone_spec) - return clone_spec - - def clone_backing(self, name, backing, snapshot, clone_type, datastore, - disk_type=None, host=None, resource_pool=None, - extra_config=None, folder=None): - """Clone backing. - - If the clone_type is 'full', then a full clone of the source volume - backing will be created. Else, if it is 'linked', then a linked clone - of the source volume backing will be created. - - :param name: Name for the clone - :param backing: Reference to the backing entity - :param snapshot: Snapshot point from which the clone should be done - :param clone_type: Whether a full clone or linked clone is to be made - :param datastore: Reference to the datastore entity - :param disk_type: Disk type of the clone - :param host: Target host - :param resource_pool: Target resource pool - :param extra_config: Key-value pairs to be written to backing's - extra-config - :param folder: The location of the clone - """ - LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, " - "clone type: %(type)s from snapshot: %(snap)s on " - "resource pool: %(resource_pool)s, host: %(host)s, " - "datastore: %(ds)s with disk type: %(disk_type)s.", - {'back': backing, 'name': name, 'type': clone_type, - 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type, - 'host': host, 'resource_pool': resource_pool}) - - if folder is None: - # Use source folder as the location of the clone. - folder = self._get_folder(backing) - - if clone_type == LINKED_CLONE_TYPE: - disk_move_type = 'createNewChildDiskBacking' - else: - disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' - clone_spec = self._get_clone_spec( - datastore, disk_move_type, snapshot, backing, disk_type, host=host, - resource_pool=resource_pool, extra_config=extra_config) - task = self._session.invoke_api(self._session.vim, 'CloneVM_Task', - backing, folder=folder, name=name, - spec=clone_spec) - LOG.debug("Initiated clone of backing: %s.", name) - task_info = self._session.wait_for_task(task) - new_backing = task_info.result - LOG.info("Successfully created clone: %s.", new_backing) - return new_backing - - def _reconfigure_backing(self, backing, reconfig_spec): - """Reconfigure backing VM with the given spec.""" - LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.", - {'backing': backing, - 'spec': reconfig_spec}) - reconfig_task = self._session.invoke_api(self._session.vim, - "ReconfigVM_Task", - backing, - spec=reconfig_spec) - LOG.debug("Task: %s created for reconfiguring backing VM.", - reconfig_task) - self._session.wait_for_task(reconfig_task) - - def attach_disk_to_backing(self, backing, size_in_kb, disk_type, - adapter_type, profile_id, vmdk_ds_file_path): - """Attach an existing virtual disk to the backing VM. - - :param backing: reference to the backing VM - :param size_in_kb: disk size in KB - :param disk_type: virtual disk type - :param adapter_type: disk adapter type - :param profile_id: storage policy profile identification - :param vmdk_ds_file_path: datastore file path of the virtual disk to - be attached - """ - LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: " - "%(path)s with size (KB): %(size)d and adapter type: " - "%(adapter_type)s.", - {'backing': backing, - 'path': vmdk_ds_file_path, - 'size': size_in_kb, - 'adapter_type': adapter_type}) - cf = self._session.vim.client.factory - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - specs = self._create_specs_for_disk_add( - size_in_kb, - disk_type, - adapter_type, - profile_id, - vmdk_ds_file_path=vmdk_ds_file_path) - reconfig_spec.deviceChange = specs - self._reconfigure_backing(backing, reconfig_spec) - LOG.debug("Backing VM: %s reconfigured with new disk.", backing) - - def _create_spec_for_disk_remove(self, disk_device): - cf = self._session.vim.client.factory - disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') - disk_spec.operation = 'remove' - disk_spec.device = disk_device - return disk_spec - - def detach_disk_from_backing(self, backing, disk_device): - """Detach the given disk from backing.""" - - LOG.debug("Reconfiguring backing VM: %(backing)s to remove disk: " - "%(disk_device)s.", - {'backing': backing, 'disk_device': disk_device}) - - cf = self._session.vim.client.factory - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - spec = self._create_spec_for_disk_remove(disk_device) - reconfig_spec.deviceChange = [spec] - self._reconfigure_backing(backing, reconfig_spec) - - def rename_backing(self, backing, new_name): - """Rename backing VM. - - :param backing: VM to be renamed - :param new_name: new VM name - """ - LOG.info("Renaming backing VM: %(backing)s to %(new_name)s.", - {'backing': backing, - 'new_name': new_name}) - rename_task = self._session.invoke_api(self._session.vim, - "Rename_Task", - backing, - newName=new_name) - LOG.debug("Task: %s created for renaming VM.", rename_task) - self._session.wait_for_task(rename_task) - LOG.info("Backing VM: %(backing)s renamed to %(new_name)s.", - {'backing': backing, - 'new_name': new_name}) - - def change_backing_profile(self, backing, profile_id): - """Change storage profile of the backing VM. - - The current profile is removed if the new profile is None. - """ - LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:" - " %(profile)s.", - {'backing': backing, - 'profile': profile_id}) - cf = self._session.vim.client.factory - - if profile_id is None: - vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec') - else: - vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec') - vm_profile.profileId = profile_id.uniqueId - - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - reconfig_spec.vmProfile = [vm_profile] - - disk_device = self._get_disk_device(backing) - disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') - disk_spec.device = disk_device - disk_spec.operation = 'edit' - disk_spec.profile = [vm_profile] - reconfig_spec.deviceChange = [disk_spec] - - self._reconfigure_backing(backing, reconfig_spec) - LOG.debug("Backing VM: %(backing)s reconfigured with new profile: " - "%(profile)s.", - {'backing': backing, - 'profile': profile_id}) - - def update_backing_disk_uuid(self, backing, disk_uuid): - """Update backing VM's disk UUID. - - :param backing: Reference to backing VM - :param disk_uuid: New disk UUID - """ - LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID " - "to: %(disk_uuid)s.", - {'backing': backing, - 'disk_uuid': disk_uuid}) - - disk_device = self._get_disk_device(backing) - disk_device.backing.uuid = disk_uuid - - cf = self._session.vim.client.factory - disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') - disk_spec.device = disk_device - disk_spec.operation = 'edit' - - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - reconfig_spec.deviceChange = [disk_spec] - self._reconfigure_backing(backing, reconfig_spec) - - LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: " - "%(disk_uuid)s.", - {'backing': backing, - 'disk_uuid': disk_uuid}) - - def update_backing_extra_config(self, backing, extra_config): - cf = self._session.vim.client.factory - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - if BACKING_UUID_KEY in extra_config: - reconfig_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) - reconfig_spec.extraConfig = self._get_extra_config_option_values( - extra_config) - self._reconfigure_backing(backing, reconfig_spec) - LOG.debug("Backing: %(backing)s reconfigured with extra config: " - "%(extra_config)s.", - {'backing': backing, - 'extra_config': extra_config}) - - def update_backing_uuid(self, backing, uuid): - cf = self._session.vim.client.factory - reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') - reconfig_spec.instanceUuid = uuid - self._reconfigure_backing(backing, reconfig_spec) - LOG.debug("Backing: %(backing)s reconfigured with uuid: %(uuid)s.", - {'backing': backing, - 'uuid': uuid}) - - def delete_file(self, file_path, datacenter=None): - """Delete file or folder on the datastore. - - :param file_path: Datastore path of the file or folder - """ - LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.", - {'file': file_path, 'dc': datacenter}) - fileManager = self._session.vim.service_content.fileManager - task = self._session.invoke_api(self._session.vim, - 'DeleteDatastoreFile_Task', - fileManager, - name=file_path, - datacenter=datacenter) - LOG.debug("Initiated deletion via task: %s.", task) - self._session.wait_for_task(task) - LOG.info("Successfully deleted file: %s.", file_path) - - def create_datastore_folder(self, ds_name, folder_path, datacenter): - """Creates a datastore folder. - - This method returns silently if the folder already exists. - - :param ds_name: datastore name - :param folder_path: path of folder to create - :param datacenter: datacenter of target datastore - """ - fileManager = self._session.vim.service_content.fileManager - ds_folder_path = "[%s] %s" % (ds_name, folder_path) - LOG.debug("Creating datastore folder: %s.", ds_folder_path) - try: - self._session.invoke_api(self._session.vim, - 'MakeDirectory', - fileManager, - name=ds_folder_path, - datacenter=datacenter) - LOG.info("Created datastore folder: %s.", folder_path) - except exceptions.FileAlreadyExistsException: - LOG.debug("Datastore folder: %s already exists.", folder_path) - - def get_path_name(self, backing): - """Get path name of the backing. - - :param backing: Reference to the backing entity - :return: Path name of the backing - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, backing, - 'config.files').vmPathName - - def get_entity_name(self, entity): - """Get name of the managed entity. - - :param entity: Reference to the entity - :return: Name of the managed entity - """ - return self._session.invoke_api(vim_util, 'get_object_property', - self._session.vim, entity, 'name') - - def _get_disk_device(self, backing): - """Get the virtual device corresponding to disk.""" - hardware_devices = self._session.invoke_api(vim_util, - 'get_object_property', - self._session.vim, - backing, - 'config.hardware.device') - if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": - hardware_devices = hardware_devices.VirtualDevice - for device in hardware_devices: - if device.__class__.__name__ == "VirtualDisk": - return device - - LOG.error("Virtual disk device of backing: %s not found.", backing) - raise vmdk_exceptions.VirtualDiskNotFoundException() - - def get_vmdk_path(self, backing): - """Get the vmdk file name of the backing. - - The vmdk file path of the backing returned is of the form: - "[datastore1] my_folder/my_vm.vmdk" - - :param backing: Reference to the backing - :return: VMDK file path of the backing - """ - disk_device = self._get_disk_device(backing) - backing = disk_device.backing - if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo": - msg = _("Invalid disk backing: %s.") % backing.__class__.__name__ - LOG.error(msg) - raise AssertionError(msg) - return backing.fileName - - def get_disk_size(self, backing): - """Get disk size of the backing. - - :param backing: backing VM reference - :return: disk size in bytes - """ - disk_device = self._get_disk_device(backing) - return disk_device.capacityInKB * units.Ki - - def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type, - disk_type): - """Return spec for file-backed virtual disk creation.""" - cf = self._session.vim.client.factory - spec = cf.create('ns0:FileBackedVirtualDiskSpec') - spec.capacityKb = size_in_kb - spec.adapterType = VirtualDiskAdapterType.get_adapter_type( - adapter_type) - spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type) - return spec - - def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb, - adapter_type='busLogic', disk_type='preallocated'): - """Create virtual disk with the given settings. - - :param dc_ref: datacenter reference - :param vmdk_ds_file_path: datastore file path of the virtual disk - :param size_in_kb: disk size in KB - :param adapter_type: disk adapter type - :param disk_type: vmdk type - """ - virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb, - adapter_type, - disk_type) - LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec) - disk_manager = self._session.vim.service_content.virtualDiskManager - task = self._session.invoke_api(self._session.vim, - 'CreateVirtualDisk_Task', - disk_manager, - name=vmdk_ds_file_path, - datacenter=dc_ref, - spec=virtual_disk_spec) - LOG.debug("Task: %s created for virtual disk creation.", task) - self._session.wait_for_task(task) - LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec) - - def create_flat_extent_virtual_disk_descriptor( - self, dc_ref, path, size_in_kb, adapter_type, disk_type): - """Create descriptor for a single flat extent virtual disk. - - To create the descriptor, we create a virtual disk and delete its flat - extent. - - :param dc_ref: reference to the datacenter - :param path: descriptor datastore file path - :param size_in_kb: size of the virtual disk in KB - :param adapter_type: virtual disk adapter type - :param disk_type: type of the virtual disk - """ - LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, " - "adapter_type: %(adapter_type)s and disk_type: " - "%(disk_type)s.", - {'path': path.get_descriptor_ds_file_path(), - 'size': size_in_kb, - 'adapter_type': adapter_type, - 'disk_type': disk_type - }) - self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(), - size_in_kb, adapter_type, disk_type) - self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref) - LOG.debug("Created descriptor: %s.", - path.get_descriptor_ds_file_path()) - - def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path, - dest_vmdk_file_path, dest_dc_ref=None): - """Copy contents of the src vmdk file to dest vmdk file. - - :param src_dc_ref: Reference to datacenter containing src datastore - :param src_vmdk_file_path: Source vmdk file path - :param dest_vmdk_file_path: Destination vmdk file path - :param dest_dc_ref: Reference to datacenter of dest datastore. - If unspecified, source datacenter is used. - """ - LOG.debug('Copying disk: %(src)s to %(dest)s.', - {'src': src_vmdk_file_path, - 'dest': dest_vmdk_file_path}) - - dest_dc_ref = dest_dc_ref or src_dc_ref - diskMgr = self._session.vim.service_content.virtualDiskManager - task = self._session.invoke_api(self._session.vim, - 'CopyVirtualDisk_Task', - diskMgr, - sourceName=src_vmdk_file_path, - sourceDatacenter=src_dc_ref, - destName=dest_vmdk_file_path, - destDatacenter=dest_dc_ref, - force=True) - - LOG.debug("Initiated copying disk data via task: %s.", task) - self._session.wait_for_task(task) - LOG.info("Successfully copied disk at: %(src)s to: %(dest)s.", - {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) - - def move_vmdk_file(self, src_dc_ref, src_vmdk_file_path, - dest_vmdk_file_path, dest_dc_ref=None): - """Move the given vmdk file to another datastore location. - - :param src_dc_ref: Reference to datacenter containing src datastore - :param src_vmdk_file_path: Source vmdk file path - :param dest_vmdk_file_path: Destination vmdk file path - :param dest_dc_ref: Reference to datacenter of dest datastore. - If unspecified, source datacenter is used. - """ - LOG.debug('Moving disk: %(src)s to %(dest)s.', - {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) - - dest_dc_ref = dest_dc_ref or src_dc_ref - diskMgr = self._session.vim.service_content.virtualDiskManager - task = self._session.invoke_api(self._session.vim, - 'MoveVirtualDisk_Task', - diskMgr, - sourceName=src_vmdk_file_path, - sourceDatacenter=src_dc_ref, - destName=dest_vmdk_file_path, - destDatacenter=dest_dc_ref, - force=True) - - self._session.wait_for_task(task) - - def delete_vmdk_file(self, vmdk_file_path, dc_ref): - """Delete given vmdk files. - - :param vmdk_file_path: VMDK file path to be deleted - :param dc_ref: Reference to datacenter that contains this VMDK file - """ - LOG.debug("Deleting vmdk file: %s.", vmdk_file_path) - diskMgr = self._session.vim.service_content.virtualDiskManager - task = self._session.invoke_api(self._session.vim, - 'DeleteVirtualDisk_Task', - diskMgr, - name=vmdk_file_path, - datacenter=dc_ref) - LOG.debug("Initiated deleting vmdk file via task: %s.", task) - self._session.wait_for_task(task) - LOG.info("Deleted vmdk file: %s.", vmdk_file_path) - - def _get_all_clusters(self): - clusters = {} - retrieve_result = self._session.invoke_api(vim_util, 'get_objects', - self._session.vim, - 'ClusterComputeResource', - self._max_objects) - while retrieve_result: - if retrieve_result.objects: - for cluster in retrieve_result.objects: - name = urllib.parse.unquote(cluster.propSet[0].val) - clusters[name] = cluster.obj - retrieve_result = self.continue_retrieval(retrieve_result) - return clusters - - def get_cluster_refs(self, names): - """Get references to given clusters. - - :param names: list of cluster names - :return: Dictionary of cluster names to references - """ - clusters_ref = {} - clusters = self._get_all_clusters() - for name in names: - if name not in clusters: - LOG.error("Compute cluster: %s not found.", name) - raise vmdk_exceptions.ClusterNotFoundException(cluster=name) - clusters_ref[name] = clusters[name] - - return clusters_ref - - def get_cluster_hosts(self, cluster): - """Get hosts in the given cluster. - - :param cluster: cluster reference - :return: references to hosts in the cluster - """ - hosts = self._session.invoke_api(vim_util, - 'get_object_property', - self._session.vim, - cluster, - 'host') - - host_refs = [] - if hosts and hosts.ManagedObjectReference: - host_refs.extend(hosts.ManagedObjectReference) - - return host_refs - - def get_entity_by_inventory_path(self, path): - """Returns the managed object identified by the given inventory path. - - :param path: Inventory path - :return: Reference to the managed object - """ - return self._session.invoke_api( - self._session.vim, - "FindByInventoryPath", - self._session.vim.service_content.searchIndex, - inventoryPath=path) - - def _get_disk_devices(self, vm): - disk_devices = [] - hardware_devices = self._session.invoke_api(vim_util, - 'get_object_property', - self._session.vim, - vm, - 'config.hardware.device') - - if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": - hardware_devices = hardware_devices.VirtualDevice - - for device in hardware_devices: - if device.__class__.__name__ == "VirtualDisk": - disk_devices.append(device) - - return disk_devices - - def get_disk_device(self, vm, vmdk_path): - """Get the disk device of the VM which corresponds to the given path. - - :param vm: VM reference - :param vmdk_path: Datastore path of virtual disk - :return: Matching disk device - """ - disk_devices = self._get_disk_devices(vm) - - for disk_device in disk_devices: - backing = disk_device.backing - if (backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo" - and backing.fileName == vmdk_path): - return disk_device diff --git a/cinder/volume/drivers/vzstorage.py b/cinder/volume/drivers/vzstorage.py deleted file mode 100644 index d7b3680c1..000000000 --- a/cinder/volume/drivers/vzstorage.py +++ /dev/null @@ -1,728 +0,0 @@ -# Copyright (c) 2015 Parallels IP Holdings GmbH -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import errno -import json -import os -import re - -from os_brick.remotefs import remotefs -from oslo_concurrency import processutils as putils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import imageutils -from oslo_utils import units - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import remotefs as remotefs_drv - -VERSION = '1.0' - -LOG = logging.getLogger(__name__) - -vzstorage_opts = [ - cfg.StrOpt('vzstorage_shares_config', - default='/etc/cinder/vzstorage_shares', - help='File with the list of available vzstorage shares.'), - cfg.BoolOpt('vzstorage_sparsed_volumes', - default=True, - help=('Create volumes as sparsed files which take no space ' - 'rather than regular files when using raw format, ' - 'in which case volume creation takes lot of time.')), - cfg.FloatOpt('vzstorage_used_ratio', - default=0.95, - help=('Percent of ACTUAL usage of the underlying volume ' - 'before no new volumes can be allocated to the volume ' - 'destination.')), - cfg.StrOpt('vzstorage_mount_point_base', - default='$state_path/mnt', - help=('Base dir containing mount points for ' - 'vzstorage shares.')), - cfg.ListOpt('vzstorage_mount_options', - help=('Mount options passed to the vzstorage client. ' - 'See section of the pstorage-mount man page ' - 'for details.')), - cfg.StrOpt('vzstorage_default_volume_format', - default='raw', - help=('Default format that will be used when creating volumes ' - 'if no volume format is specified.')), -] - -CONF = cfg.CONF -CONF.register_opts(vzstorage_opts, group=configuration.SHARED_CONF_GROUP) - -PLOOP_BASE_DELTA_NAME = 'root.hds' -DISK_FORMAT_RAW = 'raw' -DISK_FORMAT_QCOW2 = 'qcow2' -DISK_FORMAT_PLOOP = 'ploop' - -# Due to the inconsistency in qemu-img format convention -# it calls ploop disk format "parallels". -# Convert it here to properly name it in Cinder -# and, hence, in Nova and Libvirt -FROM_QEMU_FORMAT_MAP = {k: k for k in image_utils.VALID_DISK_FORMATS} -FROM_QEMU_FORMAT_MAP['parallels'] = DISK_FORMAT_PLOOP -TO_QEMU_FORMAT_MAP = {v: k for k, v in FROM_QEMU_FORMAT_MAP.items()} - - -def _to_qemu_format(fmt): - """Convert from Qemu format name - - param fmt: Qemu format name - """ - return TO_QEMU_FORMAT_MAP[fmt] - - -def _from_qemu_format(fmt): - """Convert to Qemu format name - - param fmt: conventional format name - """ - return FROM_QEMU_FORMAT_MAP[fmt] - - -class PloopDevice(object): - """Setup a ploop device for ploop image - - This class is for mounting ploop devices using with statement: - with PloopDevice('/vzt/private/my-ct/harddisk.hdd') as dev_path: - # do something - - :param path: A path to ploop harddisk dir - :param snapshot_id: Snapshot id to mount - :param execute: execute helper - """ - - def __init__(self, path, snapshot_id=None, read_only=True, - execute=putils.execute): - self.path = path - self.snapshot_id = snapshot_id - self.read_only = read_only - self.execute = execute - - def __enter__(self): - self.dd_path = os.path.join(self.path, 'DiskDescriptor.xml') - cmd = ['ploop', 'mount', self.dd_path] - - if self.snapshot_id: - cmd.append('-u') - cmd.append(self.snapshot_id) - - if self.read_only: - cmd.append('-r') - - out, err = self.execute(*cmd, run_as_root=True) - - m = re.search(r'dev=(\S+)', out) - if not m: - raise Exception('Invalid output from ploop mount: %s' % out) - - self.ploop_dev = m.group(1) - - return self.ploop_dev - - def _umount(self): - self.execute('ploop', 'umount', self.dd_path, run_as_root=True) - - def __exit__(self, type, value, traceback): - self._umount() - - -@interface.volumedriver -class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): - """Cinder driver for Virtuozzo Storage. - - Creates volumes as files on the mounted vzstorage cluster. - - Version history: - 1.0 - Initial driver. - """ - driver_volume_type = 'vzstorage' - driver_prefix = 'vzstorage' - volume_backend_name = 'Virtuozzo_Storage' - VERSION = VERSION - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Virtuozzo_Storage_CI" - - # TODO(smcginnis) Remove driver if CI not fixed by Queens - SUPPORTED = False - - SHARE_FORMAT_REGEX = r'(?:(\S+):\/)?([a-zA-Z0-9_-]+)(?::(\S+))?' - - def __init__(self, execute=putils.execute, *args, **kwargs): - self._remotefsclient = None - super(VZStorageDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(vzstorage_opts) - self._execute_as_root = False - root_helper = utils.get_root_helper() - # base bound to instance is used in RemoteFsConnector. - self.base = self.configuration.vzstorage_mount_point_base - opts = self.configuration.vzstorage_mount_options - - self._remotefsclient = remotefs.RemoteFsClient( - 'vzstorage', root_helper, execute=execute, - vzstorage_mount_point_base=self.base, - vzstorage_mount_options=opts) - - def _qemu_img_info(self, path, volume_name): - qemu_img_cache = path + ".qemu_img_info" - is_cache_outdated = True - if os.path.isdir(path): - # Ploop disks stored along with metadata xml as directories - # qemu-img should explore base data file inside - path = os.path.join(path, PLOOP_BASE_DELTA_NAME) - if os.path.isfile(qemu_img_cache): - info_tm = os.stat(qemu_img_cache).st_mtime - snap_tm = os.stat(path).st_mtime - if info_tm >= snap_tm: - is_cache_outdated = False - if is_cache_outdated: - LOG.debug("Cached qemu-img info %s not present or outdated," - " refresh", qemu_img_cache) - ret = super(VZStorageDriver, self)._qemu_img_info_base( - path, volume_name, - self.configuration.vzstorage_mount_point_base) - ret.file_format = _from_qemu_format(ret.file_format) - # We need only backing_file and file_format - d = {'file_format': ret.file_format, - 'backing_file': ret.backing_file} - with open(qemu_img_cache, "w") as f: - json.dump(d, f) - else: - ret = imageutils.QemuImgInfo() - with open(qemu_img_cache, "r") as f: - cached_data = json.load(f) - ret.file_format = cached_data['file_format'] - ret.backing_file = cached_data['backing_file'] - return ret - - @remotefs_drv.locked_volume_id_operation - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - # Find active image - active_file = self.get_active_image_from_info(volume) - - data = {'export': volume.provider_location, - 'format': self.get_volume_format(volume), - 'name': active_file, - } - - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data, - 'mount_point_base': self._get_mount_point_base(), - } - - def do_setup(self, context): - """Any initialization the volume driver does while starting.""" - super(VZStorageDriver, self).do_setup(context) - - config = self.configuration.vzstorage_shares_config - if not os.path.exists(config): - msg = (_("VzStorage config file at %(config)s doesn't exist.") % - {'config': config}) - LOG.error(msg) - raise exception.VzStorageException(msg) - - if not os.path.isabs(self.base): - msg = _("Invalid mount point base: %s.") % self.base - LOG.error(msg) - raise exception.VzStorageException(msg) - - used_ratio = self.configuration.vzstorage_used_ratio - if not ((used_ratio > 0) and (used_ratio <= 1)): - msg = _("VzStorage config 'vzstorage_used_ratio' invalid. " - "Must be > 0 and <= 1.0: %s.") % used_ratio - LOG.error(msg) - raise exception.VzStorageException(msg) - - self.shares = {} - - # Check if mount.fuse.pstorage is installed on this system; - # note that we don't need to be root to see if the package - # is installed. - package = 'mount.fuse.pstorage' - try: - self._execute(package, check_exit_code=False, - run_as_root=False) - except OSError as exc: - if exc.errno == errno.ENOENT: - msg = _('%s is not installed.') % package - raise exception.VzStorageException(msg) - else: - raise - - self.configuration.nas_secure_file_operations = 'true' - self.configuration.nas_secure_file_permissions = 'true' - - def _ensure_share_mounted(self, share): - m = re.search(self.SHARE_FORMAT_REGEX, share) - if not m: - msg = (_("Invalid Virtuozzo Storage share specification: %r. " - "Must be: [MDS1[,MDS2],...:/][:PASSWORD].") - % share) - raise exception.VzStorageException(msg) - cluster_name = m.group(2) - - # set up logging to non-default path, so that it will - # be possible to mount the same cluster to another mount - # point by hand with default options. - mnt_flags = ['-l', '/var/log/pstorage/%s-cinder.log.gz' % cluster_name] - if self.shares.get(share) is not None: - extra_flags = json.loads(self.shares[share]) - mnt_flags.extend(extra_flags) - self._remotefsclient.mount(share, mnt_flags) - - def _find_share(self, volume): - """Choose VzStorage share among available ones for given volume size. - - For instances with more than one share that meets the criteria, the - first suitable share will be selected. - - :param volume: the volume to be created. - """ - - if not self._mounted_shares: - raise exception.VzStorageNoSharesMounted() - - for share in self._mounted_shares: - if self._is_share_eligible(share, volume.size): - break - else: - raise exception.VzStorageNoSuitableShareFound( - volume_size=volume.size) - - LOG.debug('Selected %s as target VzStorage share.', share) - - return share - - def _is_share_eligible(self, vz_share, volume_size_in_gib): - """Verifies VzStorage share is eligible to host volume with given size. - - :param vz_share: vzstorage share - :param volume_size_in_gib: int size in GB - """ - - used_ratio = self.configuration.vzstorage_used_ratio - volume_size = volume_size_in_gib * units.Gi - - total_size, available, allocated = self._get_capacity_info(vz_share) - - if (allocated + volume_size) // total_size > used_ratio: - LOG.debug('_is_share_eligible: %s is above ' - 'vzstorage_used_ratio.', vz_share) - return False - - return True - - def choose_volume_format(self, volume): - vol_type = volume.volume_type - if vol_type: - extra_specs = vol_type.extra_specs or {} - else: - extra_specs = {} - - extra_specs.update(volume.metadata or {}) - - return (extra_specs.get('volume_format') or - self.configuration.vzstorage_default_volume_format) - - def get_volume_format(self, volume): - active_file = self.get_active_image_from_info(volume) - active_file_path = os.path.join(self._local_volume_dir(volume), - active_file) - img_info = self._qemu_img_info(active_file_path, volume.name) - return img_info.file_format - - def _create_ploop(self, volume_path, volume_size): - os.mkdir(volume_path) - try: - self._execute('ploop', 'init', '-s', '%sG' % volume_size, - os.path.join(volume_path, PLOOP_BASE_DELTA_NAME), - run_as_root=True) - except putils.ProcessExecutionError: - os.rmdir(volume_path) - raise - - def _do_create_volume(self, volume): - """Create a volume on given vzstorage share. - - :param volume: volume reference - """ - volume_format = self.choose_volume_format(volume) - volume_path = self.local_path(volume) - volume_size = volume.size - - LOG.debug("Creating new volume at %s.", volume_path) - - if os.path.exists(volume_path): - msg = _('File already exists at %s.') % volume_path - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - if volume_format == DISK_FORMAT_PLOOP: - self._create_ploop(volume_path, volume_size) - elif volume_format == DISK_FORMAT_QCOW2: - self._create_qcow2_file(volume_path, volume_size) - elif self.configuration.vzstorage_sparsed_volumes: - self._create_sparsed_file(volume_path, volume_size) - else: - self._create_regular_file(volume_path, volume_size) - - info_path = self._local_path_volume_info(volume) - snap_info = {'active': os.path.basename(volume_path)} - self._write_info_file(info_path, snap_info) - - # Query qemu-img info to cache the output - self._qemu_img_info(volume_path, volume.name) - - def _delete(self, path): - self._execute('rm', '-rf', path, run_as_root=True) - - @remotefs_drv.locked_volume_id_operation - def extend_volume(self, volume, size_gb): - LOG.info('Extending volume %s.', volume.id) - volume_format = self.get_volume_format(volume) - self._extend_volume(volume, size_gb, volume_format) - - def _extend_volume(self, volume, size_gb, volume_format): - volume_path = self.local_path(volume) - - self._check_extend_volume_support(volume, size_gb) - LOG.info('Resizing file to %sG...', size_gb) - - self._do_extend_volume(volume_path, size_gb, volume_format) - - def _do_extend_volume(self, volume_path, size_gb, volume_format): - if volume_format == DISK_FORMAT_PLOOP: - self._execute('ploop', 'resize', '-s', - '%dG' % size_gb, - os.path.join(volume_path, 'DiskDescriptor.xml'), - run_as_root=True) - else: - image_utils.resize_image(volume_path, size_gb) - if not self._is_file_size_equal(volume_path, size_gb): - raise exception.ExtendVolumeError( - reason='Resizing image file failed.') - - def _check_extend_volume_support(self, volume, size_gb): - volume_path = self.local_path(volume) - active_file = self.get_active_image_from_info(volume) - active_file_path = os.path.join(self._local_volume_dir(volume), - active_file) - - if active_file_path != volume_path: - msg = _('Extend volume is only supported for this ' - 'driver when no snapshots exist.') - raise exception.InvalidVolume(msg) - - extend_by = int(size_gb) - volume.size - if not self._is_share_eligible(volume.provider_location, - extend_by): - raise exception.ExtendVolumeError(reason='Insufficient space to ' - 'extend volume %s to %sG.' - % (volume.id, size_gb)) - - def _is_file_size_equal(self, path, size): - """Checks if file size at path is equal to size.""" - data = image_utils.qemu_img_info(path) - virt_size = data.virtual_size / units.Gi - return virt_size == size - - def _recreate_ploop_desc(self, image_dir, image_file): - self._delete(os.path.join(image_dir, 'DiskDescriptor.xml')) - - self._execute('ploop', 'restore-descriptor', image_dir, image_file) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - volume_format = self.get_volume_format(volume) - image_path = self.local_path(volume) - if volume_format == DISK_FORMAT_PLOOP: - image_path = os.path.join(image_path, PLOOP_BASE_DELTA_NAME) - - image_utils.fetch_to_volume_format( - context, image_service, image_id, - image_path, _to_qemu_format(volume_format), - self.configuration.volume_dd_blocksize) - - if volume_format == DISK_FORMAT_PLOOP: - self._recreate_ploop_desc(self.local_path(volume), image_path) - - self._do_extend_volume(self.local_path(volume), - volume.size, - volume_format) - # Query qemu-img info to cache the output - self._qemu_img_info(self.local_path(volume), volume.name) - - def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): - """Copy data from snapshot to destination volume. - - This is done with a qemu-img convert to raw/qcow2 from the snapshot - qcow2. - """ - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - vol_dir = self._local_volume_dir(snapshot.volume) - out_format = self.choose_volume_format(volume) - volume_format = self.get_volume_format(snapshot.volume) - volume_path = self.local_path(volume) - - if volume_format in (DISK_FORMAT_QCOW2, DISK_FORMAT_RAW): - forward_file = snap_info[snapshot.id] - forward_path = os.path.join(vol_dir, forward_file) - - # Find the file which backs this file, which represents the point - # when this snapshot was created. - img_info = self._qemu_img_info(forward_path, - snapshot.volume.name) - path_to_snap_img = os.path.join(vol_dir, img_info.backing_file) - - LOG.debug("_copy_volume_from_snapshot: will copy " - "from snapshot at %s.", path_to_snap_img) - - image_utils.convert_image(path_to_snap_img, - volume_path, - _to_qemu_format(out_format)) - elif volume_format == DISK_FORMAT_PLOOP: - with PloopDevice(self.local_path(snapshot.volume), - snapshot.id, - execute=self._execute) as dev: - base_file = os.path.join(volume_path, 'root.hds') - image_utils.convert_image(dev, - base_file, - _to_qemu_format(out_format)) - else: - msg = _("Unsupported volume format %s") % volume_format - raise exception.InvalidVolume(msg) - - self._extend_volume(volume, volume_size, out_format) - # Query qemu-img info to cache the output - img_info = self._qemu_img_info(volume_path, volume.name) - - @remotefs_drv.locked_volume_id_operation - def delete_volume(self, volume): - """Deletes a logical volume.""" - if not volume.provider_location: - msg = (_('Volume %s does not have provider_location ' - 'specified, skipping.') % volume.name) - LOG.error(msg) - return - - self._ensure_share_mounted(volume.provider_location) - volume_dir = self._local_volume_dir(volume) - mounted_path = os.path.join(volume_dir, - self.get_active_image_from_info(volume)) - if os.path.exists(mounted_path): - self._delete(mounted_path) - self._delete(mounted_path + ".qemu_img_info") - else: - LOG.info("Skipping deletion of volume %s " - "as it does not exist.", mounted_path) - - info_path = self._local_path_volume_info(volume) - self._delete(info_path) - - def _get_desc_path(self, volume): - return os.path.join(self.local_path(volume), 'DiskDescriptor.xml') - - def _create_snapshot_ploop(self, snapshot): - status = snapshot.volume.status - if status != 'available': - msg = (_('Volume status must be available for ' - 'snapshot %(id)s. (is %(status)s)') % - {'id': snapshot.id, 'status': status}) - raise exception.InvalidVolume(msg) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - self._execute('ploop', 'snapshot', '-u', '{%s}' % snapshot.id, - self._get_desc_path(snapshot.volume), - run_as_root=True) - snap_file = os.path.join('volume-%s' % snapshot.volume.id, snapshot.id) - snap_info[snapshot.id] = snap_file - self._write_info_file(info_path, snap_info) - - def _delete_snapshot_ploop(self, snapshot): - status = snapshot.volume.status - if status != 'available': - msg = (_('Volume status must be available for ' - 'snapshot %(id)s. (is %(status)s)') % - {'id': snapshot.id, 'status': status}) - raise exception.InvalidVolume(msg) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - self._execute('ploop', 'snapshot-delete', '-u', '{%s}' % snapshot.id, - self._get_desc_path(snapshot.volume), - run_as_root=True) - snap_info.pop(snapshot.id, None) - self._write_info_file(info_path, snap_info) - - @remotefs_drv.locked_volume_id_operation - def create_snapshot(self, snapshot): - volume_format = self.get_volume_format(snapshot.volume) - if volume_format == DISK_FORMAT_PLOOP: - self._create_snapshot_ploop(snapshot) - else: - super(VZStorageDriver, self)._create_snapshot(snapshot) - - def _do_create_snapshot(self, snapshot, backing_filename, - new_snap_path): - super(VZStorageDriver, self)._do_create_snapshot(snapshot, - backing_filename, - new_snap_path) - # Cache qemu-img info for created snapshot - self._qemu_img_info(new_snap_path, snapshot.volume.name) - - def _delete_snapshot_qcow2(self, snapshot): - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path, empty_if_missing=True) - if snapshot.id not in snap_info: - LOG.warning("Snapshot %s doesn't exist in snap_info", - snapshot.id) - return - - snap_file = os.path.join(self._local_volume_dir(snapshot.volume), - snap_info[snapshot.id]) - active_file = os.path.join(self._local_volume_dir(snapshot.volume), - snap_info['active']) - higher_file = self._get_higher_image_path(snapshot) - if higher_file: - higher_file = os.path.join(self._local_volume_dir(snapshot.volume), - higher_file) - elif active_file != snap_file: - msg = (_("Expected higher file exists for snapshot %s") % - snapshot.id) - raise exception.VzStorageException(msg) - - img_info = self._qemu_img_info(snap_file, snapshot.volume.name) - base_file = os.path.join(self._local_volume_dir(snapshot.volume), - img_info.backing_file) - - super(VZStorageDriver, self)._delete_snapshot(snapshot) - - def _qemu_info_cache(fn): - return fn + ".qemu_img_info" - - def _update_backing_file(info_src, info_dst): - with open(info_src, 'r') as fs, open(info_dst, 'r') as fd: - src = json.load(fs) - dst = json.load(fd) - dst['backing_file'] = src['backing_file'] - with open(info_dst, 'w') as fdw: - json.dump(dst, fdw) - - if snap_file != active_file: - # mv snap_file.info higher_file.info - _update_backing_file( - _qemu_info_cache(snap_file), - _qemu_info_cache(higher_file)) - self._delete(_qemu_info_cache(snap_file)) - elif snapshot.volume.status == 'in-use': - # mv base_file.info snap_file.info - _update_backing_file( - _qemu_info_cache(base_file), - _qemu_info_cache(snap_file)) - self._delete(_qemu_info_cache(base_file)) - else: - # rm snap_file.info - self._delete(_qemu_info_cache(snap_file)) - - @remotefs_drv.locked_volume_id_operation - def delete_snapshot(self, snapshot): - volume_format = self.get_volume_format(snapshot.volume) - if volume_format == DISK_FORMAT_PLOOP: - self._delete_snapshot_ploop(snapshot) - else: - self._delete_snapshot_qcow2(snapshot) - - def _copy_volume_to_image(self, context, volume, image_service, - image_meta): - """Copy the volume to the specified image.""" - - volume_format = self.get_volume_format(volume) - if volume_format == DISK_FORMAT_PLOOP: - with PloopDevice(self.local_path(volume), - execute=self._execute) as dev: - image_utils.upload_volume(context, - image_service, - image_meta, - dev, - volume_format='raw') - else: - super(VZStorageDriver, self)._copy_volume_to_image(context, volume, - image_service, - image_meta) - - def _create_cloned_volume(self, volume, src_vref): - LOG.info('Cloning volume %(src)s to volume %(dst)s', - {'src': src_vref.id, - 'dst': volume.id}) - - if src_vref.status != 'available': - msg = _("Volume status must be 'available'.") - raise exception.InvalidVolume(msg) - - volume_name = CONF.volume_name_template % volume.id - - # Create fake snapshot object - snap_attrs = ['volume_name', 'size', 'volume_size', 'name', - 'volume_id', 'id', 'volume'] - Snapshot = collections.namedtuple('Snapshot', snap_attrs) - - temp_snapshot = Snapshot(id=src_vref.id, - volume_name=volume_name, - size=src_vref.size, - volume_size=src_vref.size, - name='clone-snap-%s' % src_vref.id, - volume_id=src_vref.id, - volume=volume) - - self._create_snapshot_ploop(temp_snapshot) - try: - volume.provider_location = src_vref.provider_location - info_path = self._local_path_volume_info(volume) - snap_info = {'active': 'volume-%s' % volume.id} - self._write_info_file(info_path, snap_info) - self._copy_volume_from_snapshot(temp_snapshot, - volume, - volume.size) - - finally: - self.delete_snapshot(temp_snapshot) - - return {'provider_location': src_vref.provider_location} - - @remotefs_drv.locked_volume_id_operation - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - volume_format = self.get_volume_format(src_vref) - if volume_format == DISK_FORMAT_PLOOP: - self._create_cloned_volume(volume, src_vref) - else: - super(VZStorageDriver, self)._create_cloned_volume(volume, - src_vref) diff --git a/cinder/volume/drivers/windows/__init__.py b/cinder/volume/drivers/windows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/windows/constants.py b/cinder/volume/drivers/windows/constants.py deleted file mode 100644 index 457f6bfe2..000000000 --- a/cinder/volume/drivers/windows/constants.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -VHD_TYPE_FIXED = 2 -VHD_TYPE_DYNAMIC = 3 -VHD_TYPE_DIFFERENCING = 4 diff --git a/cinder/volume/drivers/windows/smbfs.py b/cinder/volume/drivers/windows/smbfs.py deleted file mode 100644 index 70693cca4..000000000 --- a/cinder/volume/drivers/windows/smbfs.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright (c) 2014 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from os_brick.remotefs import windows_remotefs as remotefs_brick -from os_win import utilsfactory -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import units - -from cinder import context -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import remotefs as remotefs_drv - -VERSION = '1.1.0' - -LOG = logging.getLogger(__name__) - -volume_opts = [ - cfg.StrOpt('smbfs_shares_config', - default=r'C:\OpenStack\smbfs_shares.txt', - help='File with the list of available smbfs shares.'), - cfg.StrOpt('smbfs_allocation_info_file_path', - default=r'C:\OpenStack\allocation_data.txt', - help=('The path of the automatically generated file containing ' - 'information about volume disk space allocation.'), - deprecated_for_removal=True, - deprecated_since="11.0.0", - deprecated_reason="This allocation file is no longer used."), - cfg.StrOpt('smbfs_default_volume_format', - default='vhd', - choices=['vhd', 'vhdx'], - help=('Default format that will be used when creating volumes ' - 'if no volume format is specified.')), - cfg.BoolOpt('smbfs_sparsed_volumes', - default=True, - help=('Create volumes as sparsed files which take no space ' - 'rather than regular files when using raw format, ' - 'in which case volume creation takes lot of time.')), - cfg.FloatOpt('smbfs_used_ratio', - default=0.95, - help=('Percent of ACTUAL usage of the underlying volume ' - 'before no new volumes can be allocated to the volume ' - 'destination.')), - cfg.FloatOpt('smbfs_oversub_ratio', - default=1.0, - help=('This will compare the allocated to available space on ' - 'the volume destination. If the ratio exceeds this ' - 'number, the destination will no longer be valid.')), - cfg.StrOpt('smbfs_mount_point_base', - default=r'C:\OpenStack\_mnt', - help=('Base dir containing mount points for smbfs shares.')), - cfg.DictOpt('smbfs_pool_mappings', - default={}, - help=('Mappings between share locations and pool names. ' - 'If not specified, the share names will be used as ' - 'pool names. Example: ' - '//addr/share:pool_name,//addr/share2:pool_name2')), -] - -CONF = cfg.CONF -CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) - - -@interface.volumedriver -class WindowsSmbfsDriver(remotefs_drv.RemoteFSPoolMixin, - remotefs_drv.RemoteFSSnapDriverDistributed): - VERSION = VERSION - - driver_volume_type = 'smbfs' - driver_prefix = 'smbfs' - volume_backend_name = 'Generic_SMBFS' - SHARE_FORMAT_REGEX = r'//.+/.+' - VERSION = VERSION - - _DISK_FORMAT_VHD = 'vhd' - _DISK_FORMAT_VHD_LEGACY = 'vpc' - _DISK_FORMAT_VHDX = 'vhdx' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Microsoft_iSCSI_CI" - - _MINIMUM_QEMU_IMG_VERSION = '1.6' - - _SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX] - _VALID_IMAGE_EXTENSIONS = _SUPPORTED_IMAGE_FORMATS - - _always_use_temp_snap_when_cloning = False - - def __init__(self, *args, **kwargs): - self._remotefsclient = None - super(WindowsSmbfsDriver, self).__init__(*args, **kwargs) - - self.configuration.append_config_values(volume_opts) - - self.base = getattr(self.configuration, - 'smbfs_mount_point_base') - self._remotefsclient = remotefs_brick.WindowsRemoteFsClient( - 'cifs', root_helper=None, smbfs_mount_point_base=self.base, - local_path_for_loopback=True) - - self._vhdutils = utilsfactory.get_vhdutils() - self._pathutils = utilsfactory.get_pathutils() - self._smbutils = utilsfactory.get_smbutils() - self._diskutils = utilsfactory.get_diskutils() - - def do_setup(self, context): - self._check_os_platform() - super(WindowsSmbfsDriver, self).do_setup(context) - - image_utils.check_qemu_img_version(self._MINIMUM_QEMU_IMG_VERSION) - - config = self.configuration.smbfs_shares_config - if not config: - msg = (_("SMBFS config file not set (smbfs_shares_config).")) - LOG.error(msg) - raise exception.SmbfsException(msg) - if not os.path.exists(config): - msg = (_("SMBFS config file at %(config)s doesn't exist.") % - {'config': config}) - LOG.error(msg) - raise exception.SmbfsException(msg) - if not os.path.isabs(self.base): - msg = _("Invalid mount point base: %s") % self.base - LOG.error(msg) - raise exception.SmbfsException(msg) - if not self.configuration.smbfs_oversub_ratio > 0: - msg = _( - "SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: " - "%s") % self.configuration.smbfs_oversub_ratio - - LOG.error(msg) - raise exception.SmbfsException(msg) - - if not 0 < self.configuration.smbfs_used_ratio <= 1: - msg = _("SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 " - "and <= 1.0: %s") % self.configuration.smbfs_used_ratio - LOG.error(msg) - raise exception.SmbfsException(msg) - - self.shares = {} # address : options - self._ensure_shares_mounted() - self._setup_pool_mappings() - - def _setup_pool_mappings(self): - self._pool_mappings = self.configuration.smbfs_pool_mappings - - pools = list(self._pool_mappings.values()) - duplicate_pools = set([pool for pool in pools - if pools.count(pool) > 1]) - if duplicate_pools: - msg = _("Found multiple mappings for pools %(pools)s. " - "Requested pool mappings: %(pool_mappings)s") - raise exception.SmbfsException( - msg % dict(pools=duplicate_pools, - pool_mappings=self._pool_mappings)) - - shares_missing_mappings = ( - set(self.shares).difference(set(self._pool_mappings))) - for share in shares_missing_mappings: - msg = ("No pool name was requested for share %(share)s " - "Using the share name instead.") - LOG.warning(msg, dict(share=share)) - - self._pool_mappings[share] = self._get_share_name(share) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info. - - :param volume: volume reference - :param connector: connector reference - """ - # Find active image - active_file = self.get_active_image_from_info(volume) - fmt = self.get_volume_format(volume) - - data = {'export': volume.provider_location, - 'format': fmt, - 'name': active_file} - if volume.provider_location in self.shares: - data['options'] = self.shares[volume.provider_location] - return { - 'driver_volume_type': self.driver_volume_type, - 'data': data, - 'mount_point_base': self._get_mount_point_base() - } - - def _check_os_platform(self): - if sys.platform != 'win32': - _msg = _("This system platform (%s) is not supported. This " - "driver supports only Win32 platforms.") % sys.platform - raise exception.SmbfsException(_msg) - - def _get_total_allocated(self, smbfs_share): - pool_name = self._get_pool_name_from_share(smbfs_share) - host = "#".join([self.host, pool_name]) - - vol_sz_sum = self.db.volume_data_get_for_host( - context=context.get_admin_context(), - host=host)[1] - return float(vol_sz_sum * units.Gi) - - def local_path(self, volume): - """Get volume path (mounted locally fs path) for given volume. - - :param volume: volume reference - """ - volume_path_template = self._get_local_volume_path_template(volume) - volume_path = self._lookup_local_volume_path(volume_path_template) - if volume_path: - return volume_path - - # The image does not exist, so retrieve the volume format - # in order to build the path. - fmt = self.get_volume_format(volume) - volume_path = volume_path_template + '.' + fmt - return volume_path - - def _get_local_volume_path_template(self, volume): - local_dir = self._local_volume_dir(volume) - local_path_template = os.path.join(local_dir, volume.name) - return local_path_template - - def _lookup_local_volume_path(self, volume_path_template): - for ext in self._SUPPORTED_IMAGE_FORMATS: - volume_path = (volume_path_template + '.' + ext - if ext else volume_path_template) - if os.path.exists(volume_path): - return volume_path - - def _get_new_snap_path(self, snapshot): - vol_path = self.local_path(snapshot.volume) - snap_path, ext = os.path.splitext(vol_path) - snap_path += '.' + snapshot.id + ext - return snap_path - - def get_volume_format(self, volume, qemu_format=False): - volume_path_template = self._get_local_volume_path_template(volume) - volume_path = self._lookup_local_volume_path(volume_path_template) - - if volume_path: - ext = os.path.splitext(volume_path)[1].strip('.').lower() - if ext in self._SUPPORTED_IMAGE_FORMATS: - volume_format = ext - else: - # Hyper-V relies on file extensions so we're enforcing them. - raise exception.SmbfsException( - _("Invalid image file extension: %s") % ext) - else: - volume_format = ( - self._get_volume_format_spec(volume) or - self.configuration.smbfs_default_volume_format) - - if qemu_format and volume_format == self._DISK_FORMAT_VHD: - volume_format = self._DISK_FORMAT_VHD_LEGACY - elif volume_format == self._DISK_FORMAT_VHD_LEGACY: - volume_format = self._DISK_FORMAT_VHD - - return volume_format - - def _get_volume_format_spec(self, volume): - vol_type = volume.volume_type - extra_specs = {} - if vol_type and vol_type.extra_specs: - extra_specs = vol_type.extra_specs - - extra_specs.update(volume.metadata or {}) - - return (extra_specs.get('volume_format') or - extra_specs.get('smbfs:volume_format') or - self.configuration.smbfs_default_volume_format) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def create_volume(self, volume): - return super(WindowsSmbfsDriver, self).create_volume(volume) - - def _do_create_volume(self, volume): - volume_path = self.local_path(volume) - volume_format = self.get_volume_format(volume) - volume_size_bytes = volume.size * units.Gi - - if os.path.exists(volume_path): - err_msg = _('File already exists at: %s') % volume_path - raise exception.InvalidVolume(err_msg) - - if volume_format not in self._SUPPORTED_IMAGE_FORMATS: - err_msg = _("Unsupported volume format: %s ") % volume_format - raise exception.InvalidVolume(err_msg) - - self._vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes) - - def _ensure_share_mounted(self, smbfs_share): - mnt_flags = None - if self.shares.get(smbfs_share) is not None: - mnt_flags = self.shares[smbfs_share] - self._remotefsclient.mount(smbfs_share, mnt_flags) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def delete_volume(self, volume): - """Deletes a logical volume.""" - if not volume.provider_location: - LOG.warning('Volume %s does not have provider_location ' - 'specified, skipping.', volume.name) - return - - self._ensure_share_mounted(volume.provider_location) - volume_dir = self._local_volume_dir(volume) - mounted_path = os.path.join(volume_dir, - self.get_active_image_from_info(volume)) - if os.path.exists(mounted_path): - self._delete(mounted_path) - else: - LOG.debug("Skipping deletion of volume %s as it does not exist.", - mounted_path) - - info_path = self._local_path_volume_info(volume) - self._delete(info_path) - - def _delete(self, path): - fileutils.delete_if_exists(path) - - def _get_capacity_info(self, smbfs_share): - """Calculate available space on the SMBFS share. - - :param smbfs_share: example //172.18.194.100/var/smbfs - """ - mount_point = self._get_mount_point_for_share(smbfs_share) - total_size, total_available = self._diskutils.get_disk_capacity( - mount_point) - total_allocated = self._get_total_allocated(smbfs_share) - return_value = [total_size, total_available, total_allocated] - LOG.info('Smb share %(share)s Total size %(size)s ' - 'Total allocated %(allocated)s', - {'share': smbfs_share, 'size': total_size, - 'allocated': total_allocated}) - return [float(x) for x in return_value] - - def _img_commit(self, snapshot_path): - self._vhdutils.merge_vhd(snapshot_path) - - def _rebase_img(self, image, backing_file, volume_format): - # Relative path names are not supported in this case. - image_dir = os.path.dirname(image) - backing_file_path = os.path.join(image_dir, backing_file) - self._vhdutils.reconnect_parent_vhd(image, backing_file_path) - - def _qemu_img_info(self, path, volume_name=None): - # This code expects to deal only with relative filenames. - # As this method is needed by the upper class and qemu-img does - # not fully support vhdx images, for the moment we'll use Win32 API - # for retrieving image information. - parent_path = self._vhdutils.get_vhd_parent_path(path) - file_format = os.path.splitext(path)[1][1:].lower() - - if parent_path: - backing_file_name = os.path.split(parent_path)[1].lower() - else: - backing_file_name = None - - class ImageInfo(object): - def __init__(self, image, backing_file): - self.image = image - self.backing_file = backing_file - self.file_format = file_format - - return ImageInfo(os.path.basename(path), - backing_file_name) - - def _do_create_snapshot(self, snapshot, backing_file, new_snap_path): - if snapshot.volume.status == 'in-use': - LOG.debug("Snapshot is in-use. Performing Nova " - "assisted creation.") - return - - backing_file_full_path = os.path.join( - self._local_volume_dir(snapshot.volume), - backing_file) - self._vhdutils.create_differencing_vhd(new_snap_path, - backing_file_full_path) - - def _extend_volume(self, volume, size_gb): - self._check_extend_volume_support(volume, size_gb) - - volume_path = self._local_path_active_image(volume) - - LOG.info('Resizing file %(volume_path)s to %(size_gb)sGB.', - dict(volume_path=volume_path, size_gb=size_gb)) - - self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi, - is_file_max_size=False) - - def _delete_snapshot(self, snapshot): - # NOTE(lpetrut): We're slightly diverging from the super class - # workflow. The reason is that we cannot query in-use vhd/x images, - # nor can we add or remove images from a vhd/x chain in this case. - volume_status = snapshot.volume.status - if volume_status != 'in-use': - return super(WindowsSmbfsDriver, self)._delete_snapshot(snapshot) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path, empty_if_missing=True) - - if snapshot.id not in snap_info: - LOG.info('Snapshot record for %s is not present, allowing ' - 'snapshot_delete to proceed.', snapshot.id) - return - - file_to_merge = snap_info[snapshot.id] - delete_info = {'file_to_merge': file_to_merge, - 'volume_id': snapshot.volume.id} - self._nova_assisted_vol_snap_delete( - snapshot._context, snapshot, delete_info) - - # At this point, the image file should no longer be in use, so we - # may safely query it so that we can update the 'active' image - # reference, if needed. - merged_img_path = os.path.join( - self._local_volume_dir(snapshot.volume), - file_to_merge) - if utils.paths_normcase_equal(snap_info['active'], file_to_merge): - new_active_file_path = self._vhdutils.get_vhd_parent_path( - merged_img_path).lower() - snap_info['active'] = os.path.basename(new_active_file_path) - - self._delete(merged_img_path) - - # TODO(lpetrut): drop snapshot info file usage. - del(snap_info[snapshot.id]) - self._write_info_file(info_path, snap_info) - - def _check_extend_volume_support(self, volume, size_gb): - snapshots_exist = self._snapshots_exist(volume) - fmt = self.get_volume_format(volume) - - if snapshots_exist and fmt == self._DISK_FORMAT_VHD: - msg = _('Extending volumes backed by VHD images is not supported ' - 'when snapshots exist. Please use VHDX images.') - raise exception.InvalidVolume(msg) - - @coordination.synchronized('{self.driver_prefix}-{volume.id}') - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - - # If snapshots exist, flatten to a temporary image, and upload it - - active_file = self.get_active_image_from_info(volume) - active_file_path = os.path.join(self._local_volume_dir(volume), - active_file) - backing_file = self._vhdutils.get_vhd_parent_path(active_file_path) - root_file_fmt = self.get_volume_format(volume) - - temp_path = None - - try: - if backing_file: - temp_file_name = '%s.temp_image.%s.%s' % ( - volume.id, - image_meta['id'], - root_file_fmt) - temp_path = os.path.join(self._local_volume_dir(volume), - temp_file_name) - - self._vhdutils.convert_vhd(active_file_path, temp_path) - upload_path = temp_path - else: - upload_path = active_file_path - - image_utils.upload_volume(context, - image_service, - image_meta, - upload_path, - root_file_fmt) - finally: - if temp_path: - self._delete(temp_path) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and write it to the volume.""" - volume_path = self.local_path(volume) - volume_format = self.get_volume_format(volume, qemu_format=True) - self._delete(volume_path) - - image_utils.fetch_to_volume_format( - context, image_service, image_id, - volume_path, volume_format, - self.configuration.volume_dd_blocksize) - - self._vhdutils.resize_vhd(self.local_path(volume), - volume.size * units.Gi, - is_file_max_size=False) - - def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): - """Copy data from snapshot to destination volume.""" - - LOG.debug("snapshot: %(snap)s, volume: %(vol)s, " - "volume_size: %(size)s", - {'snap': snapshot.id, - 'vol': volume.id, - 'size': snapshot.volume_size}) - - info_path = self._local_path_volume_info(snapshot.volume) - snap_info = self._read_info_file(info_path) - vol_dir = self._local_volume_dir(snapshot.volume) - - forward_file = snap_info[snapshot.id] - forward_path = os.path.join(vol_dir, forward_file) - - # Find the file which backs this file, which represents the point - # when this snapshot was created. - img_info = self._qemu_img_info(forward_path) - snapshot_path = os.path.join(vol_dir, img_info.backing_file) - - volume_path = self.local_path(volume) - self._delete(volume_path) - self._vhdutils.convert_vhd(snapshot_path, - volume_path) - self._vhdutils.resize_vhd(volume_path, volume_size * units.Gi, - is_file_max_size=False) - - def _copy_volume_image(self, src_path, dest_path): - self._pathutils.copy(src_path, dest_path) - - def _get_share_name(self, share): - return share.replace('/', '\\').lstrip('\\').split('\\', 1)[1] - - def _get_pool_name_from_share(self, share): - return self._pool_mappings[share] - - def _get_share_from_pool_name(self, pool_name): - mappings = {pool: share - for share, pool in self._pool_mappings.items()} - share = mappings.get(pool_name) - - if not share: - msg = _("Could not find any share for pool %(pool_name)s. " - "Pool mappings: %(pool_mappings)s.") - raise exception.SmbfsException( - msg % dict(pool_name=pool_name, - pool_mappings=self._pool_mappings)) - return share diff --git a/cinder/volume/drivers/windows/windows.py b/cinder/volume/drivers/windows/windows.py deleted file mode 100644 index 2226fd40e..000000000 --- a/cinder/volume/drivers/windows/windows.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright 2012 Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Windows Server 2012 - -This driver requires ISCSI target role installed - -""" - -import contextlib -import os - -from os_win import utilsfactory -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import units -from oslo_utils import uuidutils - -from cinder import exception -from cinder.image import image_utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume import utils - -LOG = logging.getLogger(__name__) - -windows_opts = [ - cfg.StrOpt('windows_iscsi_lun_path', - default=r'C:\iSCSIVirtualDisks', - help='Path to store VHD backed volumes'), -] - -CONF = cfg.CONF -CONF.register_opts(windows_opts, group=configuration.SHARED_CONF_GROUP) - - -class WindowsDriver(driver.ISCSIDriver): - """Executes volume driver commands on Windows Storage server.""" - - VERSION = '1.0.0' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Microsoft_iSCSI_CI" - - def __init__(self, *args, **kwargs): - super(WindowsDriver, self).__init__(*args, **kwargs) - self.configuration = kwargs.get('configuration', None) - if self.configuration: - self.configuration.append_config_values(windows_opts) - - self._vhdutils = utilsfactory.get_vhdutils() - self._tgt_utils = utilsfactory.get_iscsi_target_utils() - self._hostutils = utilsfactory.get_hostutils() - - def do_setup(self, context): - """Setup the Windows Volume driver. - - Called one time by the manager after the driver is loaded. - Validate the flags we care about - """ - fileutils.ensure_tree(self.configuration.windows_iscsi_lun_path) - fileutils.ensure_tree(CONF.image_conversion_dir) - - def check_for_setup_error(self): - """Check that the driver is working and can communicate.""" - self._get_portals() - - def _get_portals(self): - available_portals = set(self._tgt_utils.get_portal_locations( - available_only=True, - fail_if_none_found=True)) - LOG.debug("Available iSCSI portals: %s", available_portals) - - iscsi_port = self.configuration.iscsi_port - iscsi_ips = ([self.configuration.iscsi_ip_address] + - self.configuration.iscsi_secondary_ip_addresses) - requested_portals = {':'.join([iscsi_ip, str(iscsi_port)]) - for iscsi_ip in iscsi_ips} - - unavailable_portals = requested_portals - available_portals - if unavailable_portals: - LOG.warning("The following iSCSI portals were requested but " - "are not available: %s.", unavailable_portals) - - selected_portals = requested_portals & available_portals - if not selected_portals: - err_msg = "None of the configured iSCSI portals are available." - raise exception.VolumeDriverException(err_msg) - - return list(selected_portals) - - def _get_host_information(self, volume, multipath=False): - """Getting the portal and port information.""" - target_name = self._get_target_name(volume) - - available_portals = self._get_portals() - properties = self._tgt_utils.get_target_information(target_name) - - # Note(lpetrut): the WT_Host CHAPSecret field cannot be accessed - # for security reasons. - auth = volume.provider_auth - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - properties['target_portal'] = available_portals[0] - properties['target_discovered'] = False - properties['target_lun'] = 0 - properties['volume_id'] = volume.id - - if multipath: - properties['target_portals'] = available_portals - properties['target_iqns'] = [properties['target_iqn'] - for portal in available_portals] - properties['target_luns'] = [properties['target_lun'] - for portal in available_portals] - - return properties - - def initialize_connection(self, volume, connector): - """Driver entry point to attach a volume to an instance.""" - initiator_name = connector['initiator'] - target_name = volume.provider_location - - self._tgt_utils.associate_initiator_with_iscsi_target(initiator_name, - target_name) - - properties = self._get_host_information(volume, - connector.get('multipath')) - - return { - 'driver_volume_type': 'iscsi', - 'data': properties, - } - - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to unattach a volume from an instance. - - Unmask the LUN on the storage system so the given initiator can no - longer access it. - """ - initiator_name = connector['initiator'] - target_name = volume.provider_location - self._tgt_utils.deassociate_initiator(initiator_name, target_name) - - def create_volume(self, volume): - """Driver entry point for creating a new volume.""" - vhd_path = self.local_path(volume) - vol_name = volume.name - vol_size_mb = volume.size * 1024 - - self._tgt_utils.create_wt_disk(vhd_path, vol_name, - size_mb=vol_size_mb) - - def local_path(self, volume, disk_format=None): - base_vhd_folder = self.configuration.windows_iscsi_lun_path - if not disk_format: - disk_format = self._tgt_utils.get_supported_disk_format() - - disk_fname = "%s.%s" % (volume.name, disk_format) - return os.path.join(base_vhd_folder, disk_fname) - - def delete_volume(self, volume): - """Driver entry point for destroying existing volumes.""" - vol_name = volume.name - vhd_path = self.local_path(volume) - - self._tgt_utils.remove_wt_disk(vol_name) - fileutils.delete_if_exists(vhd_path) - - def create_snapshot(self, snapshot): - """Driver entry point for creating a snapshot.""" - # Getting WT_Snapshot class - vol_name = snapshot.volume_name - snapshot_name = snapshot.name - - self._tgt_utils.create_snapshot(vol_name, snapshot_name) - - def create_volume_from_snapshot(self, volume, snapshot): - """Driver entry point for exporting snapshots as volumes.""" - snapshot_name = snapshot.name - vol_name = volume.name - vhd_path = self.local_path(volume) - - self._tgt_utils.export_snapshot(snapshot_name, vhd_path) - self._tgt_utils.import_wt_disk(vhd_path, vol_name) - - def delete_snapshot(self, snapshot): - """Driver entry point for deleting a snapshot.""" - snapshot_name = snapshot.name - self._tgt_utils.delete_snapshot(snapshot_name) - - def ensure_export(self, context, volume): - # iSCSI targets exported by WinTarget persist after host reboot. - pass - - def _get_target_name(self, volume): - return "%s%s" % (self.configuration.iscsi_target_prefix, - volume.name) - - def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" - target_name = self._get_target_name(volume) - updates = {} - - if not self._tgt_utils.iscsi_target_exists(target_name): - self._tgt_utils.create_iscsi_target(target_name) - updates['provider_location'] = target_name - - if self.configuration.use_chap_auth: - chap_username = (self.configuration.chap_username or - utils.generate_username()) - chap_password = (self.configuration.chap_password or - utils.generate_password()) - - self._tgt_utils.set_chap_credentials(target_name, - chap_username, - chap_password) - - updates['provider_auth'] = ' '.join(('CHAP', - chap_username, - chap_password)) - - # This operation is idempotent - self._tgt_utils.add_disk_to_target(volume.name, target_name) - - return updates - - def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" - target_name = self._get_target_name(volume) - self._tgt_utils.delete_iscsi_target(target_name) - - def copy_image_to_volume(self, context, volume, image_service, image_id): - """Fetch the image from image_service and create a volume using it.""" - # Convert to VHD and file back to VHD - vhd_type = self._tgt_utils.get_supported_vhd_type() - with image_utils.temporary_file(suffix='.vhd') as tmp: - volume_path = self.local_path(volume) - image_utils.fetch_to_vhd(context, image_service, image_id, tmp, - self.configuration.volume_dd_blocksize) - # The vhd must be disabled and deleted before being replaced with - # the desired image. - self._tgt_utils.change_wt_disk_status(volume.name, - enabled=False) - os.unlink(volume_path) - self._vhdutils.convert_vhd(tmp, volume_path, - vhd_type) - self._vhdutils.resize_vhd(volume_path, - volume.size << 30, - is_file_max_size=False) - self._tgt_utils.change_wt_disk_status(volume.name, - enabled=True) - - @contextlib.contextmanager - def _temporary_snapshot(self, volume_name): - try: - snap_uuid = uuidutils.generate_uuid() - snapshot_name = '%s-tmp-snapshot-%s' % (volume_name, snap_uuid) - self._tgt_utils.create_snapshot(volume_name, snapshot_name) - yield snapshot_name - finally: - self._tgt_utils.delete_snapshot(snapshot_name) - - def copy_volume_to_image(self, context, volume, image_service, image_meta): - """Copy the volume to the specified image.""" - disk_format = self._tgt_utils.get_supported_disk_format() - temp_vhd_path = os.path.join(CONF.image_conversion_dir, - str(image_meta['id']) + '.' + disk_format) - - try: - with self._temporary_snapshot(volume.name) as tmp_snap_name: - # qemu-img cannot access VSS snapshots, for which reason it - # must be exported first. - self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path) - image_utils.upload_volume(context, image_service, image_meta, - temp_vhd_path, 'vhd') - finally: - fileutils.delete_if_exists(temp_vhd_path) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - src_vol_name = src_vref.name - vol_name = volume.name - vol_size = volume.size - - new_vhd_path = self.local_path(volume) - - with self._temporary_snapshot(src_vol_name) as tmp_snap_name: - self._tgt_utils.export_snapshot(tmp_snap_name, new_vhd_path) - self._vhdutils.resize_vhd(new_vhd_path, vol_size << 30, - is_file_max_size=False) - - self._tgt_utils.import_wt_disk(new_vhd_path, vol_name) - - def _get_capacity_info(self): - drive = os.path.splitdrive( - self.configuration.windows_iscsi_lun_path)[0] - (size, free_space) = self._hostutils.get_volume_info(drive) - - total_gb = size / units.Gi - free_gb = free_space / units.Gi - return (total_gb, free_gb) - - def _update_volume_stats(self): - """Retrieve stats info for Windows device.""" - LOG.debug("Updating volume stats") - total_gb, free_gb = self._get_capacity_info() - - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or self.__class__.__name__ - data["vendor_name"] = 'Microsoft' - data["driver_version"] = self.VERSION - data["storage_protocol"] = 'iSCSI' - data['total_capacity_gb'] = total_gb - data['free_capacity_gb'] = free_gb - data['reserved_percentage'] = self.configuration.reserved_percentage - data['QoS_support'] = False - - self._stats = data - - def extend_volume(self, volume, new_size): - """Extend an Existing Volume.""" - old_size = volume.size - LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", - {'old_size': old_size, 'new_size': new_size}) - additional_size_mb = (new_size - old_size) * 1024 - - self._tgt_utils.extend_wt_disk(volume.name, additional_size_mb) diff --git a/cinder/volume/drivers/xio.py b/cinder/volume/drivers/xio.py deleted file mode 100644 index 1b46e966f..000000000 --- a/cinder/volume/drivers/xio.py +++ /dev/null @@ -1,1633 +0,0 @@ -# Copyright (c) 2014 X-IO. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import base64 -from oslo_service import loopingcall -from six.moves import urllib - -from cinder import context -from cinder import exception -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume import qos_specs -from cinder.volume import volume_types -from cinder.zonemanager import utils as fczm_utils - -XIO_OPTS = [ - cfg.IntOpt('ise_storage_pool', default=1, - help='Default storage pool for volumes.'), - cfg.IntOpt('ise_raid', default=1, - help='Raid level for ISE volumes.'), - cfg.IntOpt('ise_connection_retries', default=5, - help='Number of retries (per port) when establishing ' - 'connection to ISE management port.'), - cfg.IntOpt('ise_retry_interval', default=1, - help='Interval (secs) between retries.'), - cfg.IntOpt('ise_completion_retries', default=30, - help='Number on retries to get completion status after ' - 'issuing a command to ISE.'), -] - - -CONF = cfg.CONF -CONF.register_opts(XIO_OPTS, group=configuration.SHARED_CONF_GROUP) - -LOG = logging.getLogger(__name__) - -OPERATIONAL_STATUS = 'OPERATIONAL' -PREPARED_STATUS = 'PREPARED' -INVALID_STATUS = 'VALID' -NOTFOUND_STATUS = 'NOT FOUND' - - -# Raise exception for X-IO driver -def RaiseXIODriverException(): - raise exception.XIODriverException() - - -class XIOISEDriver(driver.VolumeDriver): - - VERSION = '1.1.4' - - # Version Changes - # 1.0.0 Base driver - # 1.1.0 QoS, affinity, retype and thin support - # 1.1.1 Fix retry loop (Bug 1429283) - # 1.1.2 Fix host object deletion (Bug 1433450). - # 1.1.3 Wait for volume/snapshot to be deleted. - # 1.1.4 Force target_lun to be int (Bug 1549048) - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "X-IO_technologies_CI" - - # TODO(smcginnis) Remove driver in Queens if CI is not fixed - SUPPORTED = False - - def __init__(self, *args, **kwargs): - super(XIOISEDriver, self).__init__() - LOG.debug("XIOISEDriver __init__ called.") - self.configuration = kwargs.get('configuration', None) - self.ise_primary_ip = '' - self.ise_secondary_ip = '' - self.newquery = 1 - self.ise_globalid = None - self._vol_stats = {} - - def do_setup(self, context): - LOG.debug("XIOISEDriver do_setup called.") - self._get_ise_globalid() - - def check_for_setup_error(self): - LOG.debug("XIOISEDriver check_for_setup_error called.") - # The san_ip must always be set - if self.configuration.san_ip == "": - LOG.error("san ip must be configured!") - RaiseXIODriverException() - # The san_login must always be set - if self.configuration.san_login == "": - LOG.error("san_login must be configured!") - RaiseXIODriverException() - # The san_password must always be set - if self.configuration.san_password == "": - LOG.error("san_password must be configured!") - RaiseXIODriverException() - return - - def _get_version(self): - """Return driver version.""" - return self.VERSION - - def _send_query(self): - """Do initial query to populate ISE global id.""" - body = '' - url = '/query' - resp = self._connect('GET', url, body) - status = resp['status'] - if status != 200: - # unsuccessful - this is fatal as we need the global id - # to build REST requests. - LOG.error("Array query failed - No response (%d)!", status) - RaiseXIODriverException() - # Successfully fetched QUERY info. Parse out globalid along with - # ipaddress for Controller 1 and Controller 2. We assign primary - # ipaddress to use based on controller rank - xml_tree = etree.fromstring(resp['content']) - # first check that the ISE is running a supported FW version - support = {} - support['thin'] = False - support['clones'] = False - support['thin-clones'] = False - self.configuration.ise_affinity = False - self.configuration.ise_qos = False - capabilities = xml_tree.find('capabilities') - if capabilities is None: - LOG.error("Array query failed. No capabilities in response!") - RaiseXIODriverException() - for node in capabilities: - if node.tag != 'capability': - continue - capability = node - if capability.attrib['value'] == '49003': - self.configuration.ise_affinity = True - elif capability.attrib['value'] == '49004': - self.configuration.ise_qos = True - elif capability.attrib['value'] == '49005': - support['thin'] = True - elif capability.attrib['value'] == '49006': - support['clones'] = True - elif capability.attrib['value'] == '49007': - support['thin-clones'] = True - # Make sure ISE support necessary features - if not support['clones']: - LOG.error("ISE FW version is not compatible with OpenStack!") - RaiseXIODriverException() - # set up thin provisioning support - self.configuration.san_thin_provision = support['thin-clones'] - # Fill in global id, primary and secondary ip addresses - globalid = xml_tree.find('globalid') - if globalid is None: - LOG.error("Array query failed. No global id in XML response!") - RaiseXIODriverException() - self.ise_globalid = globalid.text - controllers = xml_tree.find('controllers') - if controllers is None: - LOG.error("Array query failed. No controllers in response!") - RaiseXIODriverException() - for node in controllers: - if node.tag != 'controller': - continue - # found a controller node - controller = node - ipaddress = controller.find('ipaddress') - ranktag = controller.find('rank') - if ipaddress is None: - continue - # found an ipaddress tag - # make sure rank tag is present - if ranktag is None: - continue - rank = ranktag.attrib['value'] - # make sure rank value is present - if rank is None: - continue - if rank == '1': - # rank 1 means primary (xo) - self.ise_primary_ip = ipaddress.text - LOG.debug('Setting primary IP to: %s.', - self.ise_primary_ip) - elif rank == '0': - # rank 0 means secondary (nxo) - self.ise_secondary_ip = ipaddress.text - LOG.debug('Setting secondary IP to: %s.', - self.ise_secondary_ip) - # clear out new query request flag on successful fetch of QUERY info. - self.newquery = 0 - return support - - def _get_ise_globalid(self): - """Return ISE globalid.""" - if self.ise_globalid is None or self.newquery == 1: - # this call will populate globalid - self._send_query() - if self.ise_globalid is None: - LOG.error("ISE globalid not set!") - RaiseXIODriverException() - return self.ise_globalid - - def _get_ise_primary_ip(self): - """Return Primary IP address to REST API.""" - if self.ise_primary_ip == '': - # Primary IP is set to ISE IP passed in from cinder.conf - self.ise_primary_ip = self.configuration.san_ip - if self.ise_primary_ip == '': - # No IP - fatal. - LOG.error("Primary IP must be set!") - RaiseXIODriverException() - return self.ise_primary_ip - - def _get_ise_secondary_ip(self): - """Return Secondary IP address to REST API.""" - if self.ise_secondary_ip != '': - return self.ise_secondary_ip - - def _get_uri_prefix(self): - """Returns prefix in form of http(s)://1.2.3.4""" - prefix = '' - # figure out if http or https should be used - if self.configuration.driver_use_ssl: - prefix = 'https://' - else: - prefix = 'http://' - # add the IP address - prefix += self._get_ise_primary_ip() - return prefix - - def _opener(self, method, url, body, header): - """Wrapper to handle connection""" - response = {} - response['status'] = 0 - response['content'] = '' - response['location'] = '' - # send the request - req = urllib.request.Request(url, body, header) - # Override method to allow GET, PUT, POST, DELETE - req.get_method = lambda: method - try: - # IP addr formed from code and cinder.conf so URL can be trusted - resp = urllib.request.urlopen(req) # nosec - except urllib.error.HTTPError as err: - # HTTP error. Return HTTP status and content and let caller - # handle retries. - response['status'] = err.code - response['content'] = err.read() - except urllib.error.URLError as err: - # Connection failure. Return a status of 0 to indicate error. - response['status'] = 0 - else: - # Successful. Return status code, content, - # and location header, if present. - response['status'] = resp.getcode() - response['content'] = resp.read() - response['location'] = \ - resp.info().getheader('Content-Location', '') - return response - - def _help_call_method(self, args, retry_count): - """Helper function used for prepare clone and delete REST calls.""" - # This function calls request method and URL and checks the response. - # Certain cases allows for retries, while success and fatal status - # will fall out and tell parent to break out of loop. - # initialize remaining to one less than retries - remaining = retry_count - resp = self._send_cmd(args['method'], args['url'], args['arglist']) - status = resp['status'] - if (status == 400): - reason = '' - if 'content' in resp: - reason = etree.fromstring(resp['content']) - if reason is not None: - reason = reason.text.upper() - if INVALID_STATUS in reason: - # Request failed with an invalid state. This can be because - # source volume is in a temporary unavailable state. - LOG.debug('REST call failed with invalid state: ' - '%(method)s - %(status)d - %(reason)s', - {'method': args['method'], - 'status': status, 'reason': reason}) - # Let parent check retry eligibility based on remaining retries - remaining -= 1 - else: - # Fatal error. Set remaining to 0 to make caller exit loop. - remaining = 0 - else: - # set remaining to 0 to make caller exit loop - # original waiter will handle the difference between success and - # fatal error based on resp['status']. - remaining = 0 - return (remaining, resp) - - def _help_call_opener(self, args, retry_count): - """Helper function to call _opener.""" - # This function calls _opener func and checks the response. - # If response is 0 it will decrement the remaining retry count. - # On successful connection it will set remaining to 0 to signal - # parent to break out of loop. - remaining = retry_count - response = self._opener(args['method'], args['url'], - args['body'], args['header']) - if response['status'] != 0: - # We are done - remaining = 0 - else: - # Let parent check retry eligibility based on remaining retries. - remaining -= 1 - # Return remaining and response - return (remaining, response) - - def _help_wait_for_status(self, args, retry_count): - """Helper function to wait for specified volume status""" - # This function calls _get_volume_info and checks the response. - # If the status strings do not match the specified status it will - # return the remaining retry count decremented by one. - # On successful match it will set remaining to 0 to signal - # parent to break out of loop. - remaining = retry_count - info = self._get_volume_info(args['name']) - status = args['status_string'] - if (status in info['string'] or status in info['details']): - remaining = 0 - else: - # Let parent check retry eligibility based on remaining retries. - remaining -= 1 - # return remaining and volume info - return (remaining, info) - - def _wait_for_completion(self, help_func, args, retry_count): - """Helper function to wait for completion of passed function""" - # Helper call loop function. - def _call_loop(loop_args): - remaining = loop_args['retries'] - args = loop_args['args'] - LOG.debug("In call loop (%(remaining)d) %(args)s", - {'remaining': remaining, 'args': args}) - (remaining, response) = loop_args['func'](args, remaining) - if remaining == 0: - # We are done - let our caller handle response - raise loopingcall.LoopingCallDone(response) - loop_args['retries'] = remaining - - # Setup retries, interval and call wait function. - loop_args = {} - loop_args['retries'] = retry_count - loop_args['func'] = help_func - loop_args['args'] = args - interval = self.configuration.ise_retry_interval - timer = loopingcall.FixedIntervalLoopingCall(_call_loop, loop_args) - return timer.start(interval).wait() - - def _connect(self, method, uri, body=''): - """Set up URL and HTML and call _opener to make request""" - url = '' - # see if we need to add prefix - # this call will force primary ip to be filled in as well - prefix = self._get_uri_prefix() - if prefix not in uri: - url = prefix - url += uri - # set up headers for XML and Auth - header = {'Content-Type': 'application/xml; charset=utf-8'} - auth_key = ('%s:%s' - % (self.configuration.san_login, - self.configuration.san_password)) - auth_key = base64.encode_as_text(auth_key) - header['Authorization'] = 'Basic %s' % auth_key - # We allow 5 retries on each IP address. If connection to primary - # fails, secondary will be tried. If connection to secondary is - # successful, the request flag for a new QUERY will be set. The QUERY - # will be sent on next connection attempt to figure out which - # controller is primary in case it has changed. - LOG.debug("Connect: %(method)s %(url)s %(body)s", - {'method': method, 'url': url, 'body': body}) - using_secondary = 0 - response = {} - response['status'] = 0 - response['location'] = '' - response['content'] = '' - primary_ip = self._get_ise_primary_ip() - secondary_ip = self._get_ise_secondary_ip() - # This will first try connecting to primary IP and then secondary IP. - args = {} - args['method'] = method - args['url'] = url - args['body'] = body - args['header'] = header - retries = self.configuration.ise_connection_retries - while True: - response = self._wait_for_completion(self._help_call_opener, - args, retries) - if response['status'] != 0: - # Connection succeeded. Request new query on next connection - # attempt if we used secondary ip to sort out who should be - # primary going forward - self.newquery = using_secondary - return response - # connection failed - check if we have any retries left - if using_secondary == 0: - # connection on primary ip failed - # try secondary ip - if secondary_ip is '': - # if secondary is not setup yet, then assert - # connection on primary and secondary ip failed - LOG.error("Connection to %s failed and no secondary!", - primary_ip) - RaiseXIODriverException() - # swap primary for secondary ip in URL - url = url.replace(primary_ip, secondary_ip) - LOG.debug('Trying secondary IP URL: %s', url) - using_secondary = 1 - continue - # connection failed on both IPs - break out of the loop - break - # connection on primary and secondary ip failed - LOG.error("Could not connect to %(primary)s or %(secondary)s!", - {'primary': primary_ip, 'secondary': secondary_ip}) - RaiseXIODriverException() - - def _param_string(self, params): - """Turn (name, value) pairs into single param string""" - param_str = [] - for name, value in params.items(): - if value != '': - param_str.append("%s=%s" % (name, value)) - return '&'.join(param_str) - - def _send_cmd(self, method, url, params=None): - """Prepare HTTP request and call _connect""" - params = params or {} - # Add params to appropriate field based on method - if method in ('GET', 'PUT'): - if params: - url += '?' + self._param_string(params) - body = '' - elif method == 'POST': - body = self._param_string(params) - else: - # method like 'DELETE' - body = '' - # ISE REST API is mostly synchronous but has some asynchronous - # streaks. Add retries to work around design of ISE REST API that - # does not allow certain operations to be in process concurrently. - # This is only an issue if lots of CREATE/DELETE/SNAPSHOT/CLONE ops - # are issued in short order. - return self._connect(method, url, body) - - def find_target_chap(self): - """Return target CHAP settings""" - chap = {} - chap['chap_user'] = '' - chap['chap_passwd'] = '' - url = '/storage/arrays/%s/ionetworks' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url) - status = resp['status'] - if status != 200: - LOG.warning("IOnetworks GET failed (%d)", status) - return chap - # Got a good response. Parse out CHAP info. First check if CHAP is - # enabled and if so parse out username and password. - root = etree.fromstring(resp['content']) - for element in root.iter(): - if element.tag != 'chap': - continue - chapin = element.find('chapin') - if chapin is None: - continue - if chapin.attrib['value'] != '1': - continue - # CHAP is enabled. Store username / pw - chap_user = chapin.find('username') - if chap_user is not None: - chap['chap_user'] = chap_user.text - chap_passwd = chapin.find('password') - if chap_passwd is not None: - chap['chap_passwd'] = chap_passwd.text - break - return chap - - def find_target_iqn(self, iscsi_ip): - """Find Target IQN string""" - url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url) - status = resp['status'] - if status != 200: - # Not good. Throw an exception. - LOG.error("Controller GET failed (%d)", status) - RaiseXIODriverException() - # Good response. Parse out IQN that matches iscsi_ip_address - # passed in from cinder.conf. IQN is 'hidden' in globalid field. - root = etree.fromstring(resp['content']) - for element in root.iter(): - if element.tag != 'ioport': - continue - ipaddrs = element.find('ipaddresses') - if ipaddrs is None: - continue - for ipaddr in ipaddrs.iter(): - # Look for match with iscsi_ip_address - if ipaddr is None or ipaddr.text != iscsi_ip: - continue - endpoint = element.find('endpoint') - if endpoint is None: - continue - global_id = endpoint.find('globalid') - if global_id is None: - continue - target_iqn = global_id.text - if target_iqn != '': - return target_iqn - # Did not find a matching IQN. Upsetting. - LOG.error("Failed to get IQN!") - RaiseXIODriverException() - - def find_target_wwns(self): - """Return target WWN""" - # Let's look for WWNs - target_wwns = [] - target = '' - url = '/storage/arrays/%s/controllers' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url) - status = resp['status'] - if status != 200: - # Not good. Throw an exception. - LOG.error("Controller GET failed (%d)", status) - RaiseXIODriverException() - # Good response. Parse out globalid (WWN) of endpoint that matches - # protocol and type (array). - controllers = etree.fromstring(resp['content']) - for controller in controllers.iter(): - if controller.tag != 'controller': - continue - fcports = controller.find('fcports') - if fcports is None: - continue - for fcport in fcports: - if fcport.tag != 'fcport': - continue - wwn_tag = fcport.find('wwn') - if wwn_tag is None: - continue - target = wwn_tag.text - target_wwns.append(target) - return target_wwns - - def _find_target_lun(self, location): - """Return LUN for allocation specified in location string""" - resp = self._send_cmd('GET', location) - status = resp['status'] - if status != 200: - # Not good. Throw an exception. - LOG.error("Failed to get allocation information (%d)!", - status) - RaiseXIODriverException() - # Good response. Parse out LUN. - xml_tree = etree.fromstring(resp['content']) - allocation = xml_tree.find('allocation') - if allocation is not None: - luntag = allocation.find('lun') - if luntag is not None: - return luntag.text - # Did not find LUN. Throw an exception. - LOG.error("Failed to get LUN information!") - RaiseXIODriverException() - - def _get_volume_info(self, vol_name): - """Return status of ISE volume""" - vol_info = {} - vol_info['value'] = '' - vol_info['string'] = NOTFOUND_STATUS - vol_info['details'] = '' - vol_info['location'] = '' - vol_info['size'] = '' - # Attempt to collect status value, string and details. Also pick up - # location string from response. Location is used in REST calls - # DELETE/SNAPSHOT/CLONE. - # We ask for specific volume, so response should only contain one - # volume entry. - url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url, {'name': vol_name}) - if resp['status'] != 200: - LOG.warning("Could not get status for %(name)s (%(status)d).", - {'name': vol_name, 'status': resp['status']}) - return vol_info - # Good response. Parse down to Volume tag in list of one. - root = etree.fromstring(resp['content']) - volume_node = root.find('volume') - if volume_node is None: - LOG.warning("No volume node in XML content.") - return vol_info - # Location can be found as an attribute in the volume node tag. - vol_info['location'] = volume_node.attrib['self'] - # Find status tag - status = volume_node.find('status') - if status is None: - LOG.warning("No status payload for volume %s.", vol_name) - return vol_info - # Fill in value and string from status tag attributes. - vol_info['value'] = status.attrib['value'] - vol_info['string'] = status.attrib['string'].upper() - # Detailed status has it's own list of tags. - details = status.find('details') - if details is not None: - detail = details.find('detail') - if detail is not None: - vol_info['details'] = detail.text.upper() - # Get volume size - size_tag = volume_node.find('size') - if size_tag is not None: - vol_info['size'] = size_tag.text - # Return value, string, details and location. - return vol_info - - def _alloc_location(self, volume, hostname, delete=0): - """Find location string for allocation. Also delete alloc per reqst""" - location = '' - url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url, {'name': volume['name'], - 'hostname': hostname}) - if resp['status'] != 200: - LOG.error("Could not GET allocation information (%d)!", - resp['status']) - RaiseXIODriverException() - # Good response. Find the allocation based on volume name. - allocation_tree = etree.fromstring(resp['content']) - for allocation in allocation_tree.iter(): - if allocation.tag != 'allocation': - continue - # verify volume name match - volume_tag = allocation.find('volume') - if volume_tag is None: - continue - volumename_tag = volume_tag.find('volumename') - if volumename_tag is None: - continue - volumename = volumename_tag.text - if volumename != volume['name']: - continue - # verified volume name match - # find endpoints list - endpoints = allocation.find('endpoints') - if endpoints is None: - continue - # Found endpoints list. Found matching host if hostname specified, - # otherwise any host is a go. This is used by the caller to - # delete all allocations (presentations) to a volume. - for endpoint in endpoints.iter(): - if hostname != '': - hname_tag = endpoint.find('hostname') - if hname_tag is None: - continue - if hname_tag.text.upper() != hostname.upper(): - continue - # Found hostname match. Location string is an attribute in - # allocation tag. - location = allocation.attrib['self'] - # Delete allocation if requested. - if delete == 1: - self._send_cmd('DELETE', location) - location = '' - break - else: - return location - return location - - def _present_volume(self, volume, hostname, lun): - """Present volume to host at specified LUN""" - # Set up params with volume name, host name and target lun, if - # specified. - target_lun = lun - params = {'volumename': volume['name'], - 'hostname': hostname} - # Fill in LUN if specified. - if target_lun != '': - params['lun'] = target_lun - # Issue POST call to allocation. - url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) - resp = self._send_cmd('POST', url, params) - status = resp['status'] - if status == 201: - LOG.info("Volume %s presented.", volume['name']) - elif status == 409: - LOG.warning("Volume %(name)s already presented (%(status)d)!", - {'name': volume['name'], 'status': status}) - else: - LOG.error("Failed to present volume %(name)s (%(status)d)!", - {'name': volume['name'], 'status': status}) - RaiseXIODriverException() - # Fetch LUN. In theory the LUN should be what caller requested. - # We try to use shortcut as location comes back in Location header. - # Make sure shortcut of using location header worked, if not ask - # for it explicitly. - location = resp['location'] - if location == '': - location = self._alloc_location(volume, hostname) - # Find target LUN - if location != '': - target_lun = self._find_target_lun(location) - # Success. Return target LUN. - LOG.debug("Volume %(volume)s presented: %(host)s %(lun)s", - {'volume': volume['name'], 'host': hostname, - 'lun': target_lun}) - return target_lun - - def find_allocations(self, hostname): - """Find allocations for specified host""" - alloc_cnt = 0 - url = '/storage/arrays/%s/allocations' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url, {'hostname': hostname}) - status = resp['status'] - if status != 200: - LOG.error("Failed to get allocation information: " - "%(host)s (%(status)d)!", - {'host': hostname, 'status': status}) - RaiseXIODriverException() - # Good response. Count the number of allocations. - allocation_tree = etree.fromstring(resp['content']) - for allocation in allocation_tree.iter(): - if allocation.tag != 'allocation': - continue - alloc_cnt += 1 - return alloc_cnt - - def _find_host(self, endpoints): - """Check if host entry exists on ISE based on endpoint (IQN, WWNs)""" - # FC host might have more than one endpoint. ISCSI has only one. - # Check if endpoints is a list, if so use first entry in list for - # host search. - if type(endpoints) is list: - for endpoint in endpoints: - ep = endpoint - break - else: - ep = endpoints - # Got single end point. Now make REST API call to fetch all hosts - LOG.debug("find_host: Looking for host %s.", ep) - host = {} - host['name'] = '' - host['type'] = '' - host['locator'] = '' - params = {} - url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) - resp = self._send_cmd('GET', url, params) - status = resp['status'] - if resp['status'] != 200: - LOG.error("Could not find any hosts (%s)", status) - RaiseXIODriverException() - # Good response. Try to match up a host based on end point string. - host_tree = etree.fromstring(resp['content']) - for host_node in host_tree.iter(): - if host_node.tag != 'host': - continue - # Found a host tag. Check if end point matches. - endpoints_node = host_node.find('endpoints') - if endpoints_node is None: - continue - for endpoint_node in endpoints_node.iter(): - if endpoint_node.tag != 'endpoint': - continue - gid = endpoint_node.find('globalid') - if gid is None: - continue - if gid.text.upper() != ep.upper(): - continue - # We have a match. Fill in host name, type and locator - host['locator'] = host_node.attrib['self'] - type_tag = host_node.find('type') - if type_tag is not None: - host['type'] = type_tag.text - name_tag = host_node.find('name') - if name_tag is not None: - host['name'] = name_tag.text - break - # This will be filled in or '' based on findings above. - return host - - def _create_host(self, hostname, endpoints): - """Create host entry on ISE for connector""" - # Create endpoint list for REST call. - endpoint_str = '' - if type(endpoints) is list: - ep_str = [] - ec = 0 - for endpoint in endpoints: - if ec == 0: - ep_str.append("%s" % (endpoint)) - else: - ep_str.append("endpoint=%s" % (endpoint)) - ec += 1 - endpoint_str = '&'.join(ep_str) - else: - endpoint_str = endpoints - # Log host creation. - LOG.debug("Create host %(host)s; %(endpoint)s", - {'host': hostname, 'endpoint': endpoint_str}) - # Issue REST call to create host entry of OpenStack type. - params = {'name': hostname, 'endpoint': endpoint_str, - 'os': 'openstack'} - url = '/storage/arrays/%s/hosts' % (self._get_ise_globalid()) - resp = self._send_cmd('POST', url, params) - status = resp['status'] - if status != 201 and status != 409: - LOG.error("POST for host create failed (%s)!", status) - RaiseXIODriverException() - # Successfully created host entry. Return host name. - return hostname - - def _create_clone(self, volume, clone, clone_type): - """Create clone worker function""" - # This function is called for both snapshot and clone - # clone_type specifies what type is being processed - # Creating snapshots and clones is a two step process on current ISE - # FW. First snapshot/clone is prepared and then created. - volume_name = '' - if clone_type == 'snapshot': - volume_name = volume['volume_name'] - elif clone_type == 'clone': - volume_name = volume['name'] - args = {} - # Make sure source volume is ready. This is another case where - # we have to work around asynchronous behavior in ISE REST API. - args['name'] = volume_name - args['status_string'] = OPERATIONAL_STATUS - retries = self.configuration.ise_completion_retries - vol_info = self._wait_for_completion(self._help_wait_for_status, - args, retries) - if vol_info['value'] == '0': - LOG.debug('Source volume %s ready.', volume_name) - else: - LOG.error("Source volume %s not ready!", volume_name) - RaiseXIODriverException() - # Prepare snapshot - # get extra_specs and qos specs from source volume - # these functions fill in default values for entries used below - ctxt = context.get_admin_context() - type_id = volume['volume_type_id'] - extra_specs = self._get_extra_specs(ctxt, type_id) - LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", - {'volume_name': volume['name'], 'extra_specs': extra_specs}) - qos = self._get_qos_specs(ctxt, type_id) - # Wait until snapshot/clone is prepared. - args['method'] = 'POST' - args['url'] = vol_info['location'] - args['status'] = 202 - args['arglist'] = {'name': clone['name'], - 'type': clone_type, - 'affinity': extra_specs['affinity'], - 'IOPSmin': qos['minIOPS'], - 'IOPSmax': qos['maxIOPS'], - 'IOPSburst': qos['burstIOPS']} - retries = self.configuration.ise_completion_retries - resp = self._wait_for_completion(self._help_call_method, - args, retries) - if resp['status'] != 202: - # clone prepare failed - bummer - LOG.error("Prepare clone failed for %s.", clone['name']) - RaiseXIODriverException() - # clone prepare request accepted - # make sure not to continue until clone prepared - args['name'] = clone['name'] - args['status_string'] = PREPARED_STATUS - retries = self.configuration.ise_completion_retries - clone_info = self._wait_for_completion(self._help_wait_for_status, - args, retries) - if PREPARED_STATUS in clone_info['details']: - LOG.debug('Clone %s prepared.', clone['name']) - else: - LOG.error("Clone %s not in prepared state!", clone['name']) - RaiseXIODriverException() - # Clone prepared, now commit the create - resp = self._send_cmd('PUT', clone_info['location'], - {clone_type: 'true'}) - if resp['status'] != 201: - LOG.error("Commit clone failed: %(name)s (%(status)d)!", - {'name': clone['name'], 'status': resp['status']}) - RaiseXIODriverException() - # Clone create request accepted. Make sure not to return until clone - # operational. - args['name'] = clone['name'] - args['status_string'] = OPERATIONAL_STATUS - retries = self.configuration.ise_completion_retries - clone_info = self._wait_for_completion(self._help_wait_for_status, - args, retries) - if OPERATIONAL_STATUS in clone_info['string']: - LOG.info("Clone %s created.", clone['name']) - else: - LOG.error("Commit failed for %s!", clone['name']) - RaiseXIODriverException() - return - - def _fill_in_available_capacity(self, node, pool): - """Fill in free capacity info for pool.""" - available = node.find('available') - if available is None: - pool['free_capacity_gb'] = 0 - return pool - pool['free_capacity_gb'] = int(available.get('total')) - # Fill in separate RAID level cap - byred = available.find('byredundancy') - if byred is None: - return pool - raid = byred.find('raid-0') - if raid is not None: - pool['free_capacity_gb_raid_0'] = int(raid.text) - raid = byred.find('raid-1') - if raid is not None: - pool['free_capacity_gb_raid_1'] = int(raid.text) - raid = byred.find('raid-5') - if raid is not None: - pool['free_capacity_gb_raid_5'] = int(raid.text) - raid = byred.find('raid-6') - if raid is not None: - pool['free_capacity_gb_raid_6'] = int(raid.text) - return pool - - def _fill_in_used_capacity(self, node, pool): - """Fill in used capacity info for pool.""" - used = node.find('used') - if used is None: - pool['allocated_capacity_gb'] = 0 - return pool - pool['allocated_capacity_gb'] = int(used.get('total')) - # Fill in separate RAID level cap - byred = used.find('byredundancy') - if byred is None: - return pool - raid = byred.find('raid-0') - if raid is not None: - pool['allocated_capacity_gb_raid_0'] = int(raid.text) - raid = byred.find('raid-1') - if raid is not None: - pool['allocated_capacity_gb_raid_1'] = int(raid.text) - raid = byred.find('raid-5') - if raid is not None: - pool['allocated_capacity_gb_raid_5'] = int(raid.text) - raid = byred.find('raid-6') - if raid is not None: - pool['allocated_capacity_gb_raid_6'] = int(raid.text) - return pool - - def _get_pools(self): - """Return information about all pools on ISE""" - pools = [] - pool = {} - vol_cnt = 0 - url = '/storage/pools' - resp = self._send_cmd('GET', url) - status = resp['status'] - if status != 200: - # Request failed. Return what we have, which isn't much. - LOG.warning("Could not get pool information (%s)!", status) - return (pools, vol_cnt) - # Parse out available (free) and used. Add them up to get total. - xml_tree = etree.fromstring(resp['content']) - for child in xml_tree: - if child.tag != 'pool': - continue - # Fill in ise pool name - tag = child.find('name') - if tag is not None: - pool['pool_ise_name'] = tag.text - # Fill in globalid - tag = child.find('globalid') - if tag is not None: - pool['globalid'] = tag.text - # Fill in pool name - tag = child.find('id') - if tag is not None: - pool['pool_name'] = tag.text - # Fill in pool status - tag = child.find('status') - if tag is not None: - pool['status'] = tag.attrib['string'] - details = tag.find('details') - if details is not None: - detail = details.find('detail') - if detail is not None: - pool['status_details'] = detail.text - # Fill in available capacity - pool = self._fill_in_available_capacity(child, pool) - # Fill in allocated capacity - pool = self._fill_in_used_capacity(child, pool) - # Fill in media health and type - media = child.find('media') - if media is not None: - medium = media.find('medium') - if medium is not None: - health = medium.find('health') - if health is not None: - pool['health'] = int(health.text) - tier = medium.find('tier') - if tier is not None: - pool['media'] = tier.attrib['string'] - cap = child.find('IOPSmincap') - if cap is not None: - pool['minIOPS_capacity'] = cap.text - cap = child.find('IOPSmaxcap') - if cap is not None: - pool['maxIOPS_capacity'] = cap.text - cap = child.find('IOPSburstcap') - if cap is not None: - pool['burstIOPS_capacity'] = cap.text - pool['total_capacity_gb'] = (int(pool['free_capacity_gb'] + - pool['allocated_capacity_gb'])) - pool['QoS_support'] = self.configuration.ise_qos - pool['reserved_percentage'] = 0 - pools.append(pool) - # count volumes - volumes = child.find('volumes') - if volumes is not None: - vol_cnt += len(volumes) - return (pools, vol_cnt) - - def _update_volume_stats(self): - """Update storage information""" - self._send_query() - data = {} - data["vendor_name"] = 'X-IO' - data["driver_version"] = self._get_version() - if self.configuration.volume_backend_name: - backend_name = self.configuration.volume_backend_name - else: - backend_name = self.__class__.__name__ - data["volume_backend_name"] = backend_name - data['reserved_percentage'] = 0 - # Get total and free capacity. - (pools, vol_cnt) = self._get_pools() - total_cap = 0 - free_cap = 0 - # fill in global capability support - # capacity - for pool in pools: - total_cap += int(pool['total_capacity_gb']) - free_cap += int(pool['free_capacity_gb']) - data['total_capacity_gb'] = int(total_cap) - data['free_capacity_gb'] = int(free_cap) - # QoS - data['QoS_support'] = self.configuration.ise_qos - # Volume affinity - data['affinity'] = self.configuration.ise_affinity - # Thin provisioning - data['thin'] = self.configuration.san_thin_provision - data['pools'] = pools - data['active_volumes'] = int(vol_cnt) - return data - - def get_volume_stats(self, refresh=False): - """Get volume stats.""" - if refresh: - self._vol_stats = self._update_volume_stats() - LOG.debug("ISE get_volume_stats (total, free): %(total)s, %(free)s", - {'total': self._vol_stats['total_capacity_gb'], - 'free': self._vol_stats['free_capacity_gb']}) - return self._vol_stats - - def _get_extra_specs(self, ctxt, type_id): - """Get extra specs from volume type.""" - specs = {} - specs['affinity'] = '' - specs['alloctype'] = '' - specs['pool'] = self.configuration.ise_storage_pool - specs['raid'] = self.configuration.ise_raid - if type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - extra_specs = volume_type.get('extra_specs') - # Parse out RAID, pool and affinity values - for key, value in extra_specs.items(): - subkey = '' - if ':' in key: - fields = key.split(':') - key = fields[0] - subkey = fields[1] - if key.upper() == 'Feature'.upper(): - if subkey.upper() == 'Raid'.upper(): - specs['raid'] = value - elif subkey.upper() == 'Pool'.upper(): - specs['pool'] = value - elif key.upper() == 'Affinity'.upper(): - # Only fill this in if ISE FW supports volume affinity - if self.configuration.ise_affinity: - if subkey.upper() == 'Type'.upper(): - specs['affinity'] = value - elif key.upper() == 'Alloc'.upper(): - # Only fill this in if ISE FW supports thin provisioning - if self.configuration.san_thin_provision: - if subkey.upper() == 'Type'.upper(): - specs['alloctype'] = value - return specs - - def _get_qos_specs(self, ctxt, type_id): - """Get QoS specs from volume type.""" - specs = {} - specs['minIOPS'] = '' - specs['maxIOPS'] = '' - specs['burstIOPS'] = '' - if type_id is not None: - volume_type = volume_types.get_volume_type(ctxt, type_id) - qos_specs_id = volume_type.get('qos_specs_id') - if qos_specs_id is not None: - kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] - else: - kvs = volume_type.get('extra_specs') - # Parse out min, max and burst values - for key, value in kvs.items(): - if ':' in key: - fields = key.split(':') - key = fields[1] - if key.upper() == 'minIOPS'.upper(): - specs['minIOPS'] = value - elif key.upper() == 'maxIOPS'.upper(): - specs['maxIOPS'] = value - elif key.upper() == 'burstIOPS'.upper(): - specs['burstIOPS'] = value - return specs - - def create_volume(self, volume): - """Create requested volume""" - LOG.debug("X-IO create_volume called.") - # get extra_specs and qos based on volume type - # these functions fill in default values for entries used below - ctxt = context.get_admin_context() - type_id = volume['volume_type_id'] - extra_specs = self._get_extra_specs(ctxt, type_id) - LOG.debug("Volume %(volume_name)s extra_specs %(extra_specs)s", - {'volume_name': volume['name'], 'extra_specs': extra_specs}) - qos = self._get_qos_specs(ctxt, type_id) - # Make create call - url = '/storage/arrays/%s/volumes' % (self._get_ise_globalid()) - resp = self._send_cmd('POST', url, - {'name': volume['name'], - 'size': volume['size'], - 'pool': extra_specs['pool'], - 'redundancy': extra_specs['raid'], - 'affinity': extra_specs['affinity'], - 'alloctype': extra_specs['alloctype'], - 'IOPSmin': qos['minIOPS'], - 'IOPSmax': qos['maxIOPS'], - 'IOPSburst': qos['burstIOPS']}) - if resp['status'] != 201: - LOG.error("Failed to create volume: %(name)s (%(status)s)", - {'name': volume['name'], 'status': resp['status']}) - RaiseXIODriverException() - # Good response. Make sure volume is in operational state before - # returning. Volume creation completes asynchronously. - args = {} - args['name'] = volume['name'] - args['status_string'] = OPERATIONAL_STATUS - retries = self.configuration.ise_completion_retries - vol_info = self._wait_for_completion(self._help_wait_for_status, - args, retries) - if OPERATIONAL_STATUS in vol_info['string']: - # Ready. - LOG.info("Volume %s created", volume['name']) - else: - LOG.error("Failed to create volume %s.", volume['name']) - RaiseXIODriverException() - return - - def create_cloned_volume(self, volume, src_vref): - """Create clone""" - LOG.debug("X-IO create_cloned_volume called.") - self._create_clone(src_vref, volume, 'clone') - - def create_snapshot(self, snapshot): - """Create snapshot""" - LOG.debug("X-IO create_snapshot called.") - # Creating a snapshot uses same interface as clone operation on - # ISE. Clone type ('snapshot' or 'clone') tells the ISE what kind - # of operation is requested. - self._create_clone(snapshot, snapshot, 'snapshot') - - def create_volume_from_snapshot(self, volume, snapshot): - """Create volume from snapshot""" - LOG.debug("X-IO create_volume_from_snapshot called.") - # ISE snapshots are just like a volume so this is a clone operation. - self._create_clone(snapshot, volume, 'clone') - - def _delete_volume(self, volume): - """Delete specified volume""" - # First unpresent volume from all hosts. - self._alloc_location(volume, '', 1) - # Get volume status. Location string for volume comes back - # in response. Used for DELETE call below. - vol_info = self._get_volume_info(volume['name']) - if vol_info['location'] == '': - LOG.warning("%s not found!", volume['name']) - return - # Make DELETE call. - args = {} - args['method'] = 'DELETE' - args['url'] = vol_info['location'] - args['arglist'] = {} - args['status'] = 204 - retries = self.configuration.ise_completion_retries - resp = self._wait_for_completion(self._help_call_method, args, retries) - if resp['status'] != 204: - LOG.warning("DELETE call failed for %s!", volume['name']) - return - # DELETE call successful, now wait for completion. - # We do that by waiting for the REST call to return Volume Not Found. - args['method'] = '' - args['url'] = '' - args['name'] = volume['name'] - args['status_string'] = NOTFOUND_STATUS - retries = self.configuration.ise_completion_retries - vol_info = self._wait_for_completion(self._help_wait_for_status, - args, retries) - if NOTFOUND_STATUS in vol_info['string']: - # Volume no longer present on the backend. - LOG.info("Successfully deleted %s.", volume['name']) - return - # If we come here it means the volume is still present - # on the backend. - LOG.error("Timed out deleting %s!", volume['name']) - return - - def delete_volume(self, volume): - """Delete specified volume""" - LOG.debug("X-IO delete_volume called.") - self._delete_volume(volume) - - def delete_snapshot(self, snapshot): - """Delete snapshot""" - LOG.debug("X-IO delete_snapshot called.") - # Delete snapshot and delete volume is identical to ISE. - self._delete_volume(snapshot) - - def _modify_volume(self, volume, new_attributes): - # Get volume status. Location string for volume comes back - # in response. Used for PUT call below. - vol_info = self._get_volume_info(volume['name']) - if vol_info['location'] == '': - LOG.error("modify volume: %s does not exist!", volume['name']) - RaiseXIODriverException() - # Make modify volume REST call using PUT. - # Location from above is used as identifier. - resp = self._send_cmd('PUT', vol_info['location'], new_attributes) - status = resp['status'] - if status == 201: - LOG.debug("Volume %s modified.", volume['name']) - return True - LOG.error("Modify volume PUT failed: %(name)s (%(status)d).", - {'name': volume['name'], 'status': status}) - RaiseXIODriverException() - - def extend_volume(self, volume, new_size): - """Extend volume to new size.""" - LOG.debug("extend_volume called") - ret = self._modify_volume(volume, {'size': new_size}) - if ret is True: - LOG.info("volume %(name)s extended to %(size)d.", - {'name': volume['name'], 'size': new_size}) - return - - def retype(self, ctxt, volume, new_type, diff, host): - """Convert the volume to be of the new type.""" - LOG.debug("X-IO retype called") - qos = self._get_qos_specs(ctxt, new_type['id']) - ret = self._modify_volume(volume, {'IOPSmin': qos['minIOPS'], - 'IOPSmax': qos['maxIOPS'], - 'IOPSburst': qos['burstIOPS']}) - if ret is True: - LOG.info("Volume %s retyped.", volume['name']) - return True - - def manage_existing(self, volume, ise_volume_ref): - """Convert an existing ISE volume to a Cinder volume.""" - LOG.debug("X-IO manage_existing called") - if 'source-name' not in ise_volume_ref: - LOG.error("manage_existing: No source-name in ref!") - RaiseXIODriverException() - # copy the source-name to 'name' for modify volume use - ise_volume_ref['name'] = ise_volume_ref['source-name'] - ctxt = context.get_admin_context() - qos = self._get_qos_specs(ctxt, volume['volume_type_id']) - ret = self._modify_volume(ise_volume_ref, - {'name': volume['name'], - 'IOPSmin': qos['minIOPS'], - 'IOPSmax': qos['maxIOPS'], - 'IOPSburst': qos['burstIOPS']}) - if ret is True: - LOG.info("Volume %s converted.", ise_volume_ref['name']) - return ret - - def manage_existing_get_size(self, volume, ise_volume_ref): - """Get size of an existing ISE volume.""" - LOG.debug("X-IO manage_existing_get_size called") - if 'source-name' not in ise_volume_ref: - LOG.error("manage_existing_get_size: No source-name in ref!") - RaiseXIODriverException() - ref_name = ise_volume_ref['source-name'] - # get volume status including size - vol_info = self._get_volume_info(ref_name) - if vol_info['location'] == '': - LOG.error("manage_existing_get_size: %s does not exist!", - ref_name) - RaiseXIODriverException() - return int(vol_info['size']) - - def unmanage(self, volume): - """Remove Cinder management from ISE volume""" - LOG.debug("X-IO unmanage called") - vol_info = self._get_volume_info(volume['name']) - if vol_info['location'] == '': - LOG.error("unmanage: Volume %s does not exist!", - volume['name']) - RaiseXIODriverException() - # This is a noop. ISE does not store any Cinder specific information. - - def ise_present(self, volume, hostname_in, endpoints): - """Set up presentation for volume and specified connector""" - LOG.debug("X-IO ise_present called.") - # Create host entry on ISE if necessary. - # Check to see if host entry already exists. - # Create if not found - host = self._find_host(endpoints) - if host['name'] == '': - # host not found, so create new host entry - # Use host name if filled in. If blank, ISE will make up a name. - self._create_host(hostname_in, endpoints) - host = self._find_host(endpoints) - if host['name'] == '': - # host still not found, this is fatal. - LOG.error("Host could not be found!") - RaiseXIODriverException() - elif host['type'].upper() != 'OPENSTACK': - # Make sure host type is marked as OpenStack host - params = {'os': 'openstack'} - resp = self._send_cmd('PUT', host['locator'], params) - status = resp['status'] - if status != 201 and status != 409: - LOG.error("Host PUT failed (%s).", status) - RaiseXIODriverException() - # We have a host object. - target_lun = '' - # Present volume to host. - target_lun = self._present_volume(volume, host['name'], target_lun) - # Fill in target information. - data = {} - data['target_lun'] = int(target_lun) - data['volume_id'] = volume['id'] - return data - - def ise_unpresent(self, volume, endpoints): - """Delete presentation between volume and connector""" - LOG.debug("X-IO ise_unpresent called.") - # Delete allocation uses host name. Go find it based on endpoints. - host = self._find_host(endpoints) - if host['name'] != '': - # Delete allocation based on hostname and volume. - self._alloc_location(volume, host['name'], 1) - return host['name'] - - def create_export(self, context, volume): - LOG.debug("X-IO create_export called.") - - def ensure_export(self, context, volume): - LOG.debug("X-IO ensure_export called.") - - def remove_export(self, context, volume): - LOG.debug("X-IO remove_export called.") - - def local_path(self, volume): - LOG.debug("X-IO local_path called.") - - def delete_host(self, endpoints): - """Delete ISE host object""" - host = self._find_host(endpoints) - if host['locator'] != '': - # Delete host - self._send_cmd('DELETE', host['locator']) - LOG.debug("X-IO: host %s deleted", host['name']) - - -# Protocol specific classes for entry. They are wrappers around base class -# above and every external API resuslts in a call to common function in base -# class. -@interface.volumedriver -class XIOISEISCSIDriver(driver.ISCSIDriver): - - """Requires ISE Running FW version 3.1.0 or higher""" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = 'X-IO_technologies_CI' - VERSION = XIOISEDriver.VERSION - - def __init__(self, *args, **kwargs): - super(XIOISEISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(XIO_OPTS) - self.configuration.append_config_values(san.san_opts) - - # The iscsi_ip_address must always be set. - if self.configuration.iscsi_ip_address == '': - LOG.error("iscsi_ip_address must be set!") - RaiseXIODriverException() - # Setup common driver - self.driver = XIOISEDriver(configuration=self.configuration) - - def do_setup(self, context): - return self.driver.do_setup(context) - - def check_for_setup_error(self): - return self.driver.check_for_setup_error() - - def local_path(self, volume): - return self.driver.local_path(volume) - - def get_volume_stats(self, refresh=False): - data = self.driver.get_volume_stats(refresh) - data["storage_protocol"] = 'iSCSI' - return data - - def create_volume(self, volume): - self.driver.create_volume(volume) - # Volume created successfully. Fill in CHAP information. - model_update = {} - chap = self.driver.find_target_chap() - if chap['chap_user'] != '': - model_update['provider_auth'] = 'CHAP %s %s' % \ - (chap['chap_user'], chap['chap_passwd']) - else: - model_update['provider_auth'] = '' - return model_update - - def create_cloned_volume(self, volume, src_vref): - return self.driver.create_cloned_volume(volume, src_vref) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.driver.create_volume_from_snapshot(volume, snapshot) - - def delete_volume(self, volume): - return self.driver.delete_volume(volume) - - def extend_volume(self, volume, new_size): - return self.driver.extend_volume(volume, new_size) - - def retype(self, ctxt, volume, new_type, diff, host): - return self.driver.retype(ctxt, volume, new_type, diff, host) - - def manage_existing(self, volume, ise_volume_ref): - ret = self.driver.manage_existing(volume, ise_volume_ref) - if ret is True: - # Volume converted successfully. Fill in CHAP information. - model_update = {} - chap = {} - chap = self.driver.find_target_chap() - if chap['chap_user'] != '': - model_update['provider_auth'] = 'CHAP %s %s' % \ - (chap['chap_user'], chap['chap_passwd']) - else: - model_update['provider_auth'] = '' - return model_update - - def manage_existing_get_size(self, volume, ise_volume_ref): - return self.driver.manage_existing_get_size(volume, ise_volume_ref) - - def unmanage(self, volume): - return self.driver.unmanage(volume) - - def initialize_connection(self, volume, connector): - hostname = '' - if 'host' in connector: - hostname = connector['host'] - data = self.driver.ise_present(volume, hostname, - connector['initiator']) - # find IP for target - data['target_portal'] = \ - '%s:3260' % (self.configuration.iscsi_ip_address) - # set IQN for target - data['target_discovered'] = False - data['target_iqn'] = \ - self.driver.find_target_iqn(self.configuration.iscsi_ip_address) - # Fill in authentication method (CHAP) - if 'provider_auth' in volume: - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - data['auth_method'] = auth_method - data['auth_username'] = auth_username - data['auth_password'] = auth_secret - return {'driver_volume_type': 'iscsi', - 'data': data} - - def terminate_connection(self, volume, connector, **kwargs): - hostname = self.driver.ise_unpresent(volume, connector['initiator']) - alloc_cnt = 0 - if hostname != '': - alloc_cnt = self.driver.find_allocations(hostname) - if alloc_cnt == 0: - # delete host object - self.driver.delete_host(connector['initiator']) - - def create_snapshot(self, snapshot): - return self.driver.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - return self.driver.delete_snapshot(snapshot) - - def create_export(self, context, volume, connector): - return self.driver.create_export(context, volume) - - def ensure_export(self, context, volume): - return self.driver.ensure_export(context, volume) - - def remove_export(self, context, volume): - return self.driver.remove_export(context, volume) - - -@interface.volumedriver -class XIOISEFCDriver(driver.FibreChannelDriver): - - """Requires ISE Running FW version 2.8.0 or higher""" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = 'X-IO_technologies_CI' - VERSION = XIOISEDriver.VERSION - - def __init__(self, *args, **kwargs): - super(XIOISEFCDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(XIO_OPTS) - self.configuration.append_config_values(san.san_opts) - self.driver = XIOISEDriver(configuration=self.configuration) - - def do_setup(self, context): - return self.driver.do_setup(context) - - def check_for_setup_error(self): - return self.driver.check_for_setup_error() - - def local_path(self, volume): - return self.driver.local_path(volume) - - def get_volume_stats(self, refresh=False): - data = self.driver.get_volume_stats(refresh) - data["storage_protocol"] = 'fibre_channel' - return data - - def create_volume(self, volume): - return self.driver.create_volume(volume) - - def create_cloned_volume(self, volume, src_vref): - return self.driver.create_cloned_volume(volume, src_vref) - - def create_volume_from_snapshot(self, volume, snapshot): - return self.driver.create_volume_from_snapshot(volume, snapshot) - - def delete_volume(self, volume): - return self.driver.delete_volume(volume) - - def extend_volume(self, volume, new_size): - return self.driver.extend_volume(volume, new_size) - - def retype(self, ctxt, volume, new_type, diff, host): - return self.driver.retype(ctxt, volume, new_type, diff, host) - - def manage_existing(self, volume, ise_volume_ref): - return self.driver.manage_existing(volume, ise_volume_ref) - - def manage_existing_get_size(self, volume, ise_volume_ref): - return self.driver.manage_existing_get_size(volume, ise_volume_ref) - - def unmanage(self, volume): - return self.driver.unmanage(volume) - - @fczm_utils.add_fc_zone - def initialize_connection(self, volume, connector): - hostname = '' - if 'host' in connector: - hostname = connector['host'] - data = self.driver.ise_present(volume, hostname, connector['wwpns']) - data['target_discovered'] = True - # set wwns for target - target_wwns = self.driver.find_target_wwns() - data['target_wwn'] = target_wwns - # build target initiator map - target_map = {} - for initiator in connector['wwpns']: - target_map[initiator] = target_wwns - data['initiator_target_map'] = target_map - return {'driver_volume_type': 'fibre_channel', - 'data': data} - - @fczm_utils.remove_fc_zone - def terminate_connection(self, volume, connector, **kwargs): - # now we are ready to tell ISE to delete presentations - hostname = self.driver.ise_unpresent(volume, connector['wwpns']) - # set target_wwn and initiator_target_map only if host - # has no more presentations - data = {} - alloc_cnt = 0 - if hostname != '': - alloc_cnt = self.driver.find_allocations(hostname) - if alloc_cnt == 0: - target_wwns = self.driver.find_target_wwns() - data['target_wwn'] = target_wwns - # build target initiator map - target_map = {} - for initiator in connector['wwpns']: - target_map[initiator] = target_wwns - data['initiator_target_map'] = target_map - # delete host object - self.driver.delete_host(connector['wwpns']) - - return {'driver_volume_type': 'fibre_channel', - 'data': data} - - def create_snapshot(self, snapshot): - return self.driver.create_snapshot(snapshot) - - def delete_snapshot(self, snapshot): - return self.driver.delete_snapshot(snapshot) - - def create_export(self, context, volume, connector): - return self.driver.create_export(context, volume) - - def ensure_export(self, context, volume): - return self.driver.ensure_export(context, volume) - - def remove_export(self, context, volume): - return self.driver.remove_export(context, volume) diff --git a/cinder/volume/drivers/zadara.py b/cinder/volume/drivers/zadara.py deleted file mode 100644 index 3b021ac2e..000000000 --- a/cinder/volume/drivers/zadara.py +++ /dev/null @@ -1,704 +0,0 @@ -# Copyright (c) 2016 Zadara Storage, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Volume driver for Zadara Virtual Private Storage Array (VPSA). - -This driver requires VPSA with API version 15.07 or higher. -""" - -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging -import requests -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.volume import configuration -from cinder.volume import driver - -LOG = logging.getLogger(__name__) - -zadara_opts = [ - cfg.BoolOpt('zadara_use_iser', - default=True, - help='VPSA - Use ISER instead of iSCSI'), - cfg.StrOpt('zadara_vpsa_host', - default=None, - help='VPSA - Management Host name or IP address'), - cfg.PortOpt('zadara_vpsa_port', - default=None, - help='VPSA - Port number'), - cfg.BoolOpt('zadara_vpsa_use_ssl', - default=False, - help='VPSA - Use SSL connection'), - cfg.BoolOpt('zadara_ssl_cert_verify', - default=True, - help='If set to True the http client will validate the SSL ' - 'certificate of the VPSA endpoint.'), - cfg.StrOpt('zadara_user', - default=None, - help='VPSA - Username'), - cfg.StrOpt('zadara_password', - default=None, - help='VPSA - Password', - secret=True), - cfg.StrOpt('zadara_vpsa_poolname', - default=None, - help='VPSA - Storage Pool assigned for volumes'), - cfg.BoolOpt('zadara_vol_encrypt', - default=False, - help='VPSA - Default encryption policy for volumes'), - cfg.StrOpt('zadara_vol_name_template', - default='OS_%s', - help='VPSA - Default template for VPSA volume names'), - cfg.BoolOpt('zadara_default_snap_policy', - default=False, - help="VPSA - Attach snapshot policy for volumes")] - -CONF = cfg.CONF -CONF.register_opts(zadara_opts, group=configuration.SHARED_CONF_GROUP) - - -class ZadaraVPSAConnection(object): - """Executes volume driver commands on VPSA.""" - - def __init__(self, conf): - self.conf = conf - self.access_key = None - - self.ensure_connection() - - def _generate_vpsa_cmd(self, cmd, **kwargs): - """Generate command to be sent to VPSA.""" - - def _joined_params(params): - param_str = [] - for k, v in params.items(): - param_str.append("%s=%s" % (k, v)) - return '&'.join(param_str) - - # Dictionary of applicable VPSA commands in the following format: - # 'command': (method, API_URL, {optional parameters}) - vpsa_commands = { - 'login': ('POST', - '/api/users/login.xml', - {'user': self.conf.zadara_user, - 'password': self.conf.zadara_password}), - - # Volume operations - 'create_volume': ('POST', - '/api/volumes.xml', - {'name': kwargs.get('name'), - 'capacity': kwargs.get('size'), - 'pool': self.conf.zadara_vpsa_poolname, - 'thin': 'YES', - 'crypt': 'YES' - if self.conf.zadara_vol_encrypt else 'NO', - 'attachpolicies': 'NO' - if not self.conf.zadara_default_snap_policy - else 'YES'}), - 'delete_volume': ('DELETE', - '/api/volumes/%s.xml' % kwargs.get('vpsa_vol'), - {'force': 'YES'}), - 'expand_volume': ('POST', - '/api/volumes/%s/expand.xml' - % kwargs.get('vpsa_vol'), - {'capacity': kwargs.get('size')}), - - # Snapshot operations - # Snapshot request is triggered for a single volume though the - # API call implies that snapshot is triggered for CG (legacy API). - 'create_snapshot': ('POST', - '/api/consistency_groups/%s/snapshots.xml' - % kwargs.get('cg_name'), - {'display_name': kwargs.get('snap_name')}), - 'delete_snapshot': ('DELETE', - '/api/snapshots/%s.xml' - % kwargs.get('snap_id'), - {}), - - 'create_clone_from_snap': ('POST', - '/api/consistency_groups/%s/clone.xml' - % kwargs.get('cg_name'), - {'name': kwargs.get('name'), - 'snapshot': kwargs.get('snap_id')}), - - 'create_clone': ('POST', - '/api/consistency_groups/%s/clone.xml' - % kwargs.get('cg_name'), - {'name': kwargs.get('name')}), - - # Server operations - 'create_server': ('POST', - '/api/servers.xml', - {'display_name': kwargs.get('initiator'), - 'iqn': kwargs.get('initiator')}), - - # Attach/Detach operations - 'attach_volume': ('POST', - '/api/servers/%s/volumes.xml' - % kwargs.get('vpsa_srv'), - {'volume_name[]': kwargs.get('vpsa_vol'), - 'force': 'NO'}), - 'detach_volume': ('POST', - '/api/volumes/%s/detach.xml' - % kwargs.get('vpsa_vol'), - {'server_name[]': kwargs.get('vpsa_srv'), - 'force': 'NO'}), - - # Get operations - 'list_volumes': ('GET', - '/api/volumes.xml', - {}), - 'list_pools': ('GET', - '/api/pools.xml', - {}), - 'list_controllers': ('GET', - '/api/vcontrollers.xml', - {}), - 'list_servers': ('GET', - '/api/servers.xml', - {}), - 'list_vol_attachments': ('GET', - '/api/volumes/%s/servers.xml' - % kwargs.get('vpsa_vol'), - {}), - 'list_vol_snapshots': ('GET', - '/api/consistency_groups/%s/snapshots.xml' - % kwargs.get('cg_name'), - {})} - - if cmd not in vpsa_commands: - raise exception.UnknownCmd(cmd=cmd) - else: - (method, url, params) = vpsa_commands[cmd] - - if method == 'GET': - # For GET commands add parameters to the URL - params.update(dict(access_key=self.access_key, - page=1, start=0, limit=0)) - url += '?' + _joined_params(params) - body = '' - - elif method == 'DELETE': - # For DELETE commands add parameters to the URL - params.update(dict(access_key=self.access_key)) - url += '?' + _joined_params(params) - body = '' - - elif method == 'POST': - if self.access_key: - params.update(dict(access_key=self.access_key)) - body = _joined_params(params) - - else: - msg = (_('Method %(method)s is not defined') % - {'method': method}) - LOG.error(msg) - raise AssertionError(msg) - - return (method, url, body) - - def ensure_connection(self, cmd=None): - """Retrieve access key for VPSA connection.""" - - if self.access_key or cmd == 'login': - return - - cmd = 'login' - xml_tree = self.send_cmd(cmd) - user = xml_tree.find('user') - if user is None: - raise (exception.MalformedResponse(cmd=cmd, - reason=_('no "user" field'))) - access_key = user.findtext('access-key') - if access_key is None: - raise (exception.MalformedResponse(cmd=cmd, - reason=_('no "access-key" field'))) - self.access_key = access_key - - def send_cmd(self, cmd, **kwargs): - """Send command to VPSA Controller.""" - - self.ensure_connection(cmd) - - (method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs) - LOG.debug('Invoking %(cmd)s using %(method)s request.', - {'cmd': cmd, 'method': method}) - - host = self.conf.zadara_vpsa_host - port = int(self.conf.zadara_vpsa_port) - - protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http" - if protocol == "https": - if not self.conf.zadara_ssl_cert_verify: - verify = False - else: - cert = ((self.conf.driver_ssl_cert_path) or None) - verify = cert if cert else True - else: - verify = False - - if port: - api_url = "%s://%s:%d%s" % (protocol, host, port, url) - else: - api_url = "%s://%s%s" % (protocol, host, url) - - try: - response = requests.request(method, api_url, data=body, - verify=verify) - except requests.exceptions.RequestException as e: - message = (_('Exception: %s') % six.text_type(e)) - raise exception.VolumeDriverException(message=message) - - if response.status_code != 200: - raise exception.BadHTTPResponseStatus(status=response.status_code) - - data = response.content - xml_tree = etree.fromstring(data) - status = xml_tree.findtext('status') - if status != '0': - raise exception.FailedCmdWithDump(status=status, data=data) - - if method in ['POST', 'DELETE']: - LOG.debug('Operation completed with status code %(status)s', - {'status': status}) - return xml_tree - - -@interface.volumedriver -class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): - """Zadara VPSA iSCSI/iSER volume driver. - - Version history: - 15.07 - Initial driver - 16.05 - Move from httplib to requests - """ - - VERSION = '16.05' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "ZadaraStorage_VPSA_CI" - - def __init__(self, *args, **kwargs): - super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(zadara_opts) - - def do_setup(self, context): - """Any initialization the volume driver does while starting. - - Establishes initial connection with VPSA and retrieves access_key. - """ - self.vpsa = ZadaraVPSAConnection(self.configuration) - - def check_for_setup_error(self): - """Returns an error (exception) if prerequisites aren't met.""" - self.vpsa.ensure_connection() - - def local_path(self, volume): - """Return local path to existing local volume.""" - raise NotImplementedError() - - def _xml_parse_helper(self, xml_tree, first_level, search_tuple, - first=True): - """Helper for parsing VPSA's XML output. - - Returns single item if first==True or list for multiple selection. - If second argument in search_tuple is None - returns all items with - appropriate key. - """ - - objects = xml_tree.find(first_level) - if objects is None: - return None - - result_list = [] - (key, value) = search_tuple - for object in objects.getchildren(): - found_value = object.findtext(key) - if found_value and (found_value == value or value is None): - if first: - return object - else: - result_list.append(object) - return result_list if result_list else None - - def _get_vpsa_volume_name_and_size(self, name): - """Return VPSA's name & size for the volume.""" - xml_tree = self.vpsa.send_cmd('list_volumes') - volume = self._xml_parse_helper(xml_tree, 'volumes', - ('display-name', name)) - if volume is not None: - return (volume.findtext('name'), - int(volume.findtext('virtual-capacity'))) - - return (None, None) - - def _get_vpsa_volume_name(self, name): - """Return VPSA's name for the volume.""" - (vol_name, size) = self._get_vpsa_volume_name_and_size(name) - return vol_name - - def _get_volume_cg_name(self, name): - """Return name of the consistency group for the volume. - - cg-name is a volume uniqe identifier (legacy attribute) - and not consistency group as it may imply. - """ - xml_tree = self.vpsa.send_cmd('list_volumes') - volume = self._xml_parse_helper(xml_tree, 'volumes', - ('display-name', name)) - if volume is not None: - return volume.findtext('cg-name') - - return None - - def _get_snap_id(self, cg_name, snap_name): - """Return snapshot ID for particular volume.""" - xml_tree = self.vpsa.send_cmd('list_vol_snapshots', - cg_name=cg_name) - snap = self._xml_parse_helper(xml_tree, 'snapshots', - ('display-name', snap_name)) - if snap is not None: - return snap.findtext('name') - - return None - - def _get_pool_capacity(self, pool_name): - """Return pool's total and available capacities.""" - xml_tree = self.vpsa.send_cmd('list_pools') - pool = self._xml_parse_helper(xml_tree, 'pools', - ('name', pool_name)) - if pool is not None: - total = int(pool.findtext('capacity')) - free = int(float(pool.findtext('available-capacity'))) - LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free', - {'name': pool_name, 'total': total, 'free': free}) - return (total, free) - - return ('unknown', 'unknown') - - def _get_active_controller_details(self): - """Return details of VPSA's active controller.""" - xml_tree = self.vpsa.send_cmd('list_controllers') - ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers', - ('state', 'active')) - if ctrl is not None: - return dict(target=ctrl.findtext('target'), - ip=ctrl.findtext('iscsi-ip'), - chap_user=ctrl.findtext('vpsa-chap-user'), - chap_passwd=ctrl.findtext('vpsa-chap-secret')) - return None - - def _get_server_name(self, initiator): - """Return VPSA's name for server object with given IQN.""" - xml_tree = self.vpsa.send_cmd('list_servers') - server = self._xml_parse_helper(xml_tree, 'servers', - ('iqn', initiator)) - if server is not None: - return server.findtext('name') - return None - - def _create_vpsa_server(self, initiator): - """Create server object within VPSA (if doesn't exist).""" - vpsa_srv = self._get_server_name(initiator) - if not vpsa_srv: - xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator) - vpsa_srv = xml_tree.findtext('server-name') - return vpsa_srv - - def create_volume(self, volume): - """Create volume.""" - self.vpsa.send_cmd( - 'create_volume', - name=self.configuration.zadara_vol_name_template % volume['name'], - size=volume['size']) - - def delete_volume(self, volume): - """Delete volume. - - Return ok if doesn't exist. Auto detach from all servers. - """ - # Get volume name - name = self.configuration.zadara_vol_name_template % volume['name'] - vpsa_vol = self._get_vpsa_volume_name(name) - if not vpsa_vol: - LOG.warning('Volume %s could not be found. ' - 'It might be already deleted', name) - return - - # Check attachment info and detach from all - xml_tree = self.vpsa.send_cmd('list_vol_attachments', - vpsa_vol=vpsa_vol) - servers = self._xml_parse_helper(xml_tree, 'servers', - ('iqn', None), first=False) - if servers: - for server in servers: - vpsa_srv = server.findtext('name') - if vpsa_srv: - self.vpsa.send_cmd('detach_volume', - vpsa_srv=vpsa_srv, - vpsa_vol=vpsa_vol) - - # Delete volume - self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol) - - def create_snapshot(self, snapshot): - """Creates a snapshot.""" - - LOG.debug('Create snapshot: %s', snapshot['name']) - - # Retrieve the CG name for the base volume - volume_name = (self.configuration.zadara_vol_name_template - % snapshot['volume_name']) - cg_name = self._get_volume_cg_name(volume_name) - if not cg_name: - msg = _('Volume %(name)s not found') % {'name': volume_name} - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - self.vpsa.send_cmd('create_snapshot', - cg_name=cg_name, - snap_name=snapshot['name']) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - - LOG.debug('Delete snapshot: %s', snapshot['name']) - - # Retrieve the CG name for the base volume - volume_name = (self.configuration.zadara_vol_name_template - % snapshot['volume_name']) - cg_name = self._get_volume_cg_name(volume_name) - if not cg_name: - # If the volume isn't present, then don't attempt to delete - LOG.warning('snapshot: original volume %s not found, ' - 'skipping delete operation', - volume_name) - return - - snap_id = self._get_snap_id(cg_name, snapshot['name']) - if not snap_id: - # If the snapshot isn't present, then don't attempt to delete - LOG.warning('snapshot: snapshot %s not found, ' - 'skipping delete operation', snapshot['name']) - return - - self.vpsa.send_cmd('delete_snapshot', - snap_id=snap_id) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" - - LOG.debug('Creating volume from snapshot: %s', snapshot['name']) - - # Retrieve the CG name for the base volume - volume_name = (self.configuration.zadara_vol_name_template - % snapshot['volume_name']) - cg_name = self._get_volume_cg_name(volume_name) - if not cg_name: - LOG.error('Volume %(name)s not found', {'name': volume_name}) - raise exception.VolumeNotFound(volume_id=volume['id']) - - snap_id = self._get_snap_id(cg_name, snapshot['name']) - if not snap_id: - LOG.error('Snapshot %(name)s not found', - {'name': snapshot['name']}) - raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) - - self.vpsa.send_cmd('create_clone_from_snap', - cg_name=cg_name, - name=self.configuration.zadara_vol_name_template - % volume['name'], - snap_id=snap_id) - - if (volume['size'] > snapshot['volume_size']): - self.extend_volume(volume, volume['size']) - - def create_cloned_volume(self, volume, src_vref): - """Creates a clone of the specified volume.""" - - LOG.debug('Creating clone of volume: %s', src_vref['name']) - - # Retrieve the CG name for the base volume - volume_name = (self.configuration.zadara_vol_name_template - % src_vref['name']) - cg_name = self._get_volume_cg_name(volume_name) - if not cg_name: - LOG.error('Volume %(name)s not found', {'name': volume_name}) - raise exception.VolumeNotFound(volume_id=volume['id']) - - self.vpsa.send_cmd('create_clone', - cg_name=cg_name, - name=self.configuration.zadara_vol_name_template - % volume['name']) - - if (volume['size'] > src_vref['size']): - self.extend_volume(volume, volume['size']) - - def extend_volume(self, volume, new_size): - """Extend an existing volume.""" - # Get volume name - name = self.configuration.zadara_vol_name_template % volume['name'] - (vpsa_vol, size) = self._get_vpsa_volume_name_and_size(name) - if not vpsa_vol: - msg = (_('Volume %(name)s could not be found. ' - 'It might be already deleted') % {'name': name}) - LOG.error(msg) - raise exception.ZadaraVolumeNotFound(reason=msg) - - if new_size < size: - raise exception.InvalidInput( - reason=_('%(new_size)s < current size %(size)s') % - {'new_size': new_size, 'size': size}) - - expand_size = new_size - size - self.vpsa.send_cmd('expand_volume', - vpsa_vol=vpsa_vol, - size=expand_size) - - def create_export(self, context, volume, vg=None): - """Irrelevant for VPSA volumes. Export created during attachment.""" - pass - - def ensure_export(self, context, volume): - """Irrelevant for VPSA volumes. Export created during attachment.""" - pass - - def remove_export(self, context, volume): - """Irrelevant for VPSA volumes. Export removed during detach.""" - pass - - def initialize_connection(self, volume, connector): - """Attach volume to initiator/host. - - During this call VPSA exposes volume to particular Initiator. It also - creates a 'server' entity for Initiator (if it was not created before) - - All necessary connection information is returned, including auth data. - Connection data (target, LUN) is not stored in the DB. - """ - - # Get/Create server name for IQN - initiator_name = connector['initiator'] - vpsa_srv = self._create_vpsa_server(initiator_name) - if not vpsa_srv: - raise exception.ZadaraServerCreateFailure(name=initiator_name) - - # Get volume name - name = self.configuration.zadara_vol_name_template % volume['name'] - vpsa_vol = self._get_vpsa_volume_name(name) - if not vpsa_vol: - raise exception.VolumeNotFound(volume_id=volume['id']) - - # Get Active controller details - ctrl = self._get_active_controller_details() - if not ctrl: - raise exception.ZadaraVPSANoActiveController() - - xml_tree = self.vpsa.send_cmd('list_vol_attachments', - vpsa_vol=vpsa_vol) - attach = self._xml_parse_helper(xml_tree, 'servers', - ('name', vpsa_srv)) - # Attach volume to server - if attach is None: - self.vpsa.send_cmd('attach_volume', - vpsa_srv=vpsa_srv, - vpsa_vol=vpsa_vol) - # Get connection info - xml_tree = self.vpsa.send_cmd('list_vol_attachments', - vpsa_vol=vpsa_vol) - server = self._xml_parse_helper(xml_tree, 'servers', - ('iqn', initiator_name)) - if server is None: - raise exception.ZadaraAttachmentsNotFound(name=name) - target = server.findtext('target') - lun = int(server.findtext('lun')) - if target is None or lun is None: - raise exception.ZadaraInvalidAttachmentInfo( - name=name, - reason=_('target=%(target)s, lun=%(lun)s') % - {'target': target, 'lun': lun}) - - properties = {} - properties['target_discovered'] = False - properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260') - properties['target_iqn'] = target - properties['target_lun'] = lun - properties['volume_id'] = volume['id'] - properties['auth_method'] = 'CHAP' - properties['auth_username'] = ctrl['chap_user'] - properties['auth_password'] = ctrl['chap_passwd'] - - LOG.debug('Attach properties: %(properties)s', - {'properties': properties}) - return {'driver_volume_type': - ('iser' if (self.configuration.safe_get('zadara_use_iser')) - else 'iscsi'), 'data': properties} - - def terminate_connection(self, volume, connector, **kwargs): - """Detach volume from the initiator.""" - # Get server name for IQN - initiator_name = connector['initiator'] - vpsa_srv = self._get_server_name(initiator_name) - if not vpsa_srv: - raise exception.ZadaraServerNotFound(name=initiator_name) - - # Get volume name - name = self.configuration.zadara_vol_name_template % volume['name'] - vpsa_vol = self._get_vpsa_volume_name(name) - if not vpsa_vol: - raise exception.VolumeNotFound(volume_id=volume['id']) - - # Detach volume from server - self.vpsa.send_cmd('detach_volume', - vpsa_srv=vpsa_srv, - vpsa_vol=vpsa_vol) - - def get_volume_stats(self, refresh=False): - """Get volume stats. - - If 'refresh' is True, run update the stats first. - """ - - if refresh: - self._update_volume_stats() - - return self._stats - - def _update_volume_stats(self): - """Retrieve stats info from volume group.""" - - LOG.debug("Updating volume stats") - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - storage_protocol = ('iSER' if - (self.configuration.safe_get('zadara_use_iser')) - else 'iSCSI') - data["volume_backend_name"] = backend_name or self.__class__.__name__ - data["vendor_name"] = 'Zadara Storage' - data["driver_version"] = self.VERSION - data["storage_protocol"] = storage_protocol - data['reserved_percentage'] = self.configuration.reserved_percentage - data['QoS_support'] = False - - (total, free) = self._get_pool_capacity(self.configuration. - zadara_vpsa_poolname) - data['total_capacity_gb'] = total - data['free_capacity_gb'] = free - - self._stats = data diff --git a/cinder/volume/drivers/zfssa/__init__.py b/cinder/volume/drivers/zfssa/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/drivers/zfssa/restclient.py b/cinder/volume/drivers/zfssa/restclient.py deleted file mode 100644 index d55e02acf..000000000 --- a/cinder/volume/drivers/zfssa/restclient.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ZFS Storage Appliance REST API Client Programmatic Interface -""" - -import json -import ssl -import time - -from oslo_log import log -import six -from six.moves import http_client -from six.moves import urllib - -LOG = log.getLogger(__name__) - - -class Status(object): - """Result HTTP Status""" - - def __init__(self): - pass - - #: Request return OK - OK = http_client.OK - - #: New resource created successfully - CREATED = http_client.CREATED - - #: Command accepted - ACCEPTED = http_client.ACCEPTED - - #: Command returned OK but no data will be returned - NO_CONTENT = http_client.NO_CONTENT - - #: Bad Request - BAD_REQUEST = http_client.BAD_REQUEST - - #: User is not authorized - UNAUTHORIZED = http_client.UNAUTHORIZED - - #: The request is not allowed - FORBIDDEN = http_client.FORBIDDEN - - #: The requested resource was not found - NOT_FOUND = http_client.NOT_FOUND - - #: The request is not allowed - NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED - - #: Request timed out - TIMEOUT = http_client.REQUEST_TIMEOUT - - #: Invalid request - CONFLICT = http_client.CONFLICT - - #: Service Unavailable - BUSY = http_client.SERVICE_UNAVAILABLE - - -class RestResult(object): - """Result from a REST API operation""" - def __init__(self, response=None, err=None): - """Initialize a RestResult containing the results from a REST call. - - :param response: HTTP response - """ - self.response = response - self.error = err - self.data = "" - self.status = 0 - if self.response: - self.status = self.response.getcode() - result = self.response.read() - while result: - self.data += result - result = self.response.read() - - if self.error: - self.status = self.error.code - self.data = http_client.responses[self.status] - - LOG.debug('Response code: %s', self.status) - LOG.debug('Response data: %s', self.data) - - def get_header(self, name): - """Get an HTTP header with the given name from the results - - :param name: HTTP header name - :return: The header value or None if no value is found - """ - if self.response is None: - return None - info = self.response.info() - return info.getheader(name) - - -class RestClientError(Exception): - """Exception for ZFS REST API client errors""" - def __init__(self, status, name="ERR_INTERNAL", message=None): - - """Create a REST Response exception - - :param status: HTTP response status - :param name: The name of the REST API error type - :param message: Descriptive error message returned from REST call - """ - super(RestClientError, self).__init__(message) - self.code = status - self.name = name - self.msg = message - if status in http_client.responses: - self.msg = http_client.responses[status] - - def __str__(self): - return "%d %s %s" % (self.code, self.name, self.msg) - - -class RestClientURL(object): - """ZFSSA urllib client""" - def __init__(self, url, **kwargs): - """Initialize a REST client. - - :param url: The ZFSSA REST API URL - :key session: HTTP Cookie value of x-auth-session obtained from a - normal BUI login. - :key timeout: Time in seconds to wait for command to complete. - (Default is 60 seconds) - """ - self.url = url - self.local = kwargs.get("local", False) - self.base_path = kwargs.get("base_path", "/api") - self.timeout = kwargs.get("timeout", 60) - self.headers = None - if kwargs.get('session'): - self.headers['x-auth-session'] = kwargs.get('session') - - self.headers = {"content-type": "application/json"} - self.do_logout = False - self.auth_str = None - - def _path(self, path, base_path=None): - """build rest url path""" - if path.startswith("http://") or path.startswith("https://"): - return path - if base_path is None: - base_path = self.base_path - if not path.startswith(base_path) and not ( - self.local and ("/api" + path).startswith(base_path)): - path = "%s%s" % (base_path, path) - if self.local and path.startswith("/api"): - path = path[4:] - return self.url + path - - def _authorize(self): - """Performs authorization setting x-auth-session""" - self.headers['authorization'] = 'Basic %s' % self.auth_str - if 'x-auth-session' in self.headers: - del self.headers['x-auth-session'] - - try: - result = self.post("/access/v1") - del self.headers['authorization'] - if result.status == http_client.CREATED: - self.headers['x-auth-session'] = \ - result.get_header('x-auth-session') - self.do_logout = True - LOG.info('ZFSSA version: %s', - result.get_header('x-zfssa-version')) - - elif result.status == http_client.NOT_FOUND: - raise RestClientError(result.status, name="ERR_RESTError", - message="REST Not Available: \ - Please Upgrade") - - except RestClientError: - del self.headers['authorization'] - raise - - def login(self, auth_str): - """Login to an appliance using a user name and password. - - Start a session like what is done logging into the BUI. This is not a - requirement to run REST commands, since the protocol is stateless. - What is does is set up a cookie session so that some server side - caching can be done. If login is used remember to call logout when - finished. - - :param auth_str: Authorization string (base64) - """ - self.auth_str = auth_str - self._authorize() - - def logout(self): - """Logout of an appliance""" - result = None - try: - result = self.delete("/access/v1", base_path="/api") - except RestClientError: - pass - - self.headers.clear() - self.do_logout = False - return result - - def islogin(self): - """return if client is login""" - return self.do_logout - - @staticmethod - def mkpath(*args, **kwargs): - """Make a path?query string for making a REST request - - :cmd_params args: The path part - :cmd_params kwargs: The query part - """ - buf = six.StringIO() - query = "?" - for arg in args: - buf.write("/") - buf.write(arg) - for k in kwargs: - buf.write(query) - if query == "?": - query = "&" - buf.write(k) - buf.write("=") - buf.write(kwargs[k]) - return buf.getvalue() - - def request(self, path, request, body=None, **kwargs): - """Make an HTTP request and return the results - - :param path: Path used with the initialized URL to make a request - :param request: HTTP request type (GET, POST, PUT, DELETE) - :param body: HTTP body of request - :key accept: Set HTTP 'Accept' header with this value - :key base_path: Override the base_path for this request - :key content: Set HTTP 'Content-Type' header with this value - """ - out_hdrs = dict.copy(self.headers) - if kwargs.get("accept"): - out_hdrs['accept'] = kwargs.get("accept") - - if body: - if isinstance(body, dict): - body = str(json.dumps(body)) - - if body and len(body): - out_hdrs['content-length'] = len(body) - - zfssaurl = self._path(path, kwargs.get("base_path")) - req = urllib.request.Request(zfssaurl, body, out_hdrs) - req.get_method = lambda: request - maxreqretries = kwargs.get("maxreqretries", 10) - retry = 0 - response = None - - LOG.debug('Request: %(request)s %(url)s', - {'request': request, 'url': zfssaurl}) - LOG.debug('Out headers: %s', out_hdrs) - if body and body != '': - LOG.debug('Body: %s', body) - - context = None - if hasattr(ssl, '_create_unverified_context'): - context = ssl._create_unverified_context() - else: - context = None - - while retry < maxreqretries: - try: - if context: - # only schemes that can be used will be http or https if it - # is given in the path variable, or the path will begin - # with the REST API location meaning invalid or unwanted - # schemes cannot be used - response = urllib.request.urlopen(req, # nosec - timeout=self.timeout, - context=context) - else: - response = urllib.request.urlopen(req, # nosec : see above - timeout=self.timeout) - except urllib.error.HTTPError as err: - if err.code == http_client.NOT_FOUND: - LOG.debug('REST Not Found: %s', err.code) - else: - LOG.error('REST Not Available: %s', err.code) - - if err.code == http_client.SERVICE_UNAVAILABLE and \ - retry < maxreqretries: - retry += 1 - time.sleep(1) - LOG.error('Server Busy retry request: %s', retry) - continue - if (err.code == http_client.UNAUTHORIZED or - err.code == http_client.INTERNAL_SERVER_ERROR) and \ - '/access/v1' not in zfssaurl: - try: - LOG.error('Authorizing request: %(zfssaurl)s ' - 'retry: %(retry)d.', - {'zfssaurl': zfssaurl, 'retry': retry}) - self._authorize() - req.add_header('x-auth-session', - self.headers['x-auth-session']) - except RestClientError: - pass - retry += 1 - time.sleep(1) - continue - - return RestResult(err=err) - - except urllib.error.URLError as err: - LOG.error('URLError: %s', err.reason) - raise RestClientError(-1, name="ERR_URLError", - message=err.reason) - - break - - if (response and - (response.getcode() == http_client.SERVICE_UNAVAILABLE and - retry >= maxreqretries)): - raise RestClientError(response.getcode(), name="ERR_HTTPError", - message="REST Not Available: Disabled") - - return RestResult(response=response) - - def get(self, path, **kwargs): - """Make an HTTP GET request - - :param path: Path to resource. - """ - return self.request(path, "GET", **kwargs) - - def post(self, path, body="", **kwargs): - """Make an HTTP POST request - - :param path: Path to resource. - :param body: Post data content - """ - return self.request(path, "POST", body, **kwargs) - - def put(self, path, body="", **kwargs): - """Make an HTTP PUT request - - :param path: Path to resource. - :param body: Put data content - """ - return self.request(path, "PUT", body, **kwargs) - - def delete(self, path, **kwargs): - """Make an HTTP DELETE request - - :param path: Path to resource that will be deleted. - """ - return self.request(path, "DELETE", **kwargs) - - def head(self, path, **kwargs): - """Make an HTTP HEAD request - - :param path: Path to resource. - """ - return self.request(path, "HEAD", **kwargs) diff --git a/cinder/volume/drivers/zfssa/webdavclient.py b/cinder/volume/drivers/zfssa/webdavclient.py deleted file mode 100644 index 9b04960f3..000000000 --- a/cinder/volume/drivers/zfssa/webdavclient.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ZFS Storage Appliance WebDAV Client -""" - -import time - -from oslo_log import log -from six.moves import http_client -from six.moves import urllib - -from cinder import exception -from cinder.i18n import _ - -LOG = log.getLogger(__name__) - -bad_gateway_err = _('Check the state of the http service. Also ensure that ' - 'the https port number is the same as the one specified ' - 'in cinder.conf.') - -WebDAVHTTPErrors = { - http_client.UNAUTHORIZED: _('User not authorized to perform WebDAV ' - 'operations.'), - http_client.BAD_GATEWAY: bad_gateway_err, - http_client.FORBIDDEN: _('Check access permissions for the ZFS share ' - 'assigned to this driver.'), - http_client.NOT_FOUND: _('The source volume for this WebDAV operation not ' - 'found.'), - http_client.INSUFFICIENT_STORAGE: _('Not enough storage space in the ZFS ' - 'share to perform this operation.') -} - -WebDAVErrors = { - 'BadStatusLine': _('http service may have been abruptly disabled or put ' - 'to maintenance state in the middle of this ' - 'operation.'), - 'Bad_Gateway': bad_gateway_err -} - -propertyupdate_data = """ - - - - prop_val - - - """ - - -class ZFSSAWebDAVClient(object): - def __init__(self, url, auth_str, **kwargs): - """Initialize WebDAV Client""" - self.https_path = url - self.auth_str = auth_str - - def _lookup_error(self, error): - msg = '' - if error in http_client.responses: - msg = http_client.responses[error] - - if error in WebDAVHTTPErrors: - msg = WebDAVHTTPErrors[error] - elif error in WebDAVErrors: - msg = WebDAVErrors[error] - - return msg - - def build_data(self, data, propname, value): - res = data.replace('prop_name', propname) - res = res.replace('prop_val', value) - return res - - def set_file_prop(self, filename, propname, propval): - data = self.build_data(propertyupdate_data, propname, propval) - return self.request(src_file=filename, data=data, method='PROPPATCH') - - def request(self, src_file="", dst_file="", method="", maxretries=10, - data=""): - retry = 0 - src_url = self.https_path + "/" + src_file - dst_url = self.https_path + "/" + dst_file - request = urllib.request.Request(url=src_url, data=data) - - if dst_file != "": - request.add_header('Destination', dst_url) - if method == "PROPPATCH": - request.add_header('Translate', 'F') - - request.add_header("Authorization", "Basic %s" % self.auth_str) - - request.get_method = lambda: method - - LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s', - {'method': method, 'src': src_url, 'des': dst_url}) - - while retry < maxretries: - try: - # URLs are prepended with self.https_path which is safe - # meaning that the URL will either be safe or nonexistant - response = urllib.request.urlopen( # nosec - request, timeout=None) - except urllib.error.HTTPError as err: - LOG.error('WebDAV returned with %(code)s error during ' - '%(method)s call.', - {'code': err.code, 'method': method}) - - if err.code == http_client.INTERNAL_SERVER_ERROR: - LOG.error('WebDAV operation failed with error code: ' - '%(code)s reason: %(reason)s Retry attempt ' - '%(retry)s in progress.', - {'code': err.code, - 'reason': err.reason, - 'retry': retry}) - if retry < maxretries: - retry += 1 - time.sleep(1) - continue - - msg = self._lookup_error(err.code) - raise exception.WebDAVClientError(msg=msg, code=err.code, - src=src_file, dst=dst_file, - method=method) - - except http_client.BadStatusLine as err: - msg = self._lookup_error('BadStatusLine') - code = 'http_client.BadStatusLine' - raise exception.WebDAVClientError(msg=msg, - code=code, - src=src_file, dst=dst_file, - method=method) - - except urllib.error.URLError as err: - reason = '' - if getattr(err, 'reason'): - reason = err.reason - - msg = self._lookup_error('Bad_Gateway') - raise exception.WebDAVClientError(msg=msg, - code=reason, src=src_file, - dst=dst_file, method=method) - - break - return response diff --git a/cinder/volume/drivers/zfssa/zfssaiscsi.py b/cinder/volume/drivers/zfssa/zfssaiscsi.py deleted file mode 100644 index 6367935fb..000000000 --- a/cinder/volume/drivers/zfssa/zfssaiscsi.py +++ /dev/null @@ -1,1246 +0,0 @@ -# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ZFS Storage Appliance Cinder Volume Driver -""" -import ast -import math - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import base64 -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder import utils -from cinder.volume import configuration -from cinder.volume import driver -from cinder.volume.drivers.san import san -from cinder.volume.drivers.zfssa import zfssarest -from cinder.volume import volume_types - -import taskflow.engines -from taskflow.patterns import linear_flow as lf -from taskflow import task - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -ZFSSA_OPTS = [ - cfg.StrOpt('zfssa_pool', - help='Storage pool name.'), - cfg.StrOpt('zfssa_project', - help='Project name.'), - cfg.StrOpt('zfssa_lun_volblocksize', default='8k', - choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k', - '128k'], - help='Block size.'), - cfg.BoolOpt('zfssa_lun_sparse', default=False, - help='Flag to enable sparse (thin-provisioned): True, False.'), - cfg.StrOpt('zfssa_lun_compression', default='off', - choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'], - help='Data compression.'), - cfg.StrOpt('zfssa_lun_logbias', default='latency', - choices=['latency', 'throughput'], - help='Synchronous write bias.'), - cfg.StrOpt('zfssa_initiator_group', default='', - help='iSCSI initiator group.'), - cfg.StrOpt('zfssa_initiator', default='', - help='iSCSI initiator IQNs. (comma separated)'), - cfg.StrOpt('zfssa_initiator_user', default='', - help='iSCSI initiator CHAP user (name).'), - cfg.StrOpt('zfssa_initiator_password', default='', - help='Secret of the iSCSI initiator CHAP user.', secret=True), - cfg.StrOpt('zfssa_initiator_config', default='', - help='iSCSI initiators configuration.'), - cfg.StrOpt('zfssa_target_group', default='tgt-grp', - help='iSCSI target group name.'), - cfg.StrOpt('zfssa_target_user', default='', - help='iSCSI target CHAP user (name).'), - cfg.StrOpt('zfssa_target_password', default='', secret=True, - help='Secret of the iSCSI target CHAP user.'), - cfg.StrOpt('zfssa_target_portal', - help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'), - cfg.StrOpt('zfssa_target_interfaces', - help='Network interfaces of iSCSI targets. (comma separated)'), - cfg.IntOpt('zfssa_rest_timeout', - help='REST connection timeout. (seconds)'), - cfg.StrOpt('zfssa_replication_ip', default='', - help='IP address used for replication data. (maybe the same as ' - 'data ip)'), - cfg.BoolOpt('zfssa_enable_local_cache', default=True, - help='Flag to enable local caching: True, False.'), - cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache', - help='Name of ZFSSA project where cache volumes are stored.'), - cfg.StrOpt('zfssa_manage_policy', default='loose', - choices=['loose', 'strict'], - help='Driver policy for volume manage.') -] - -CONF.register_opts(ZFSSA_OPTS, group=configuration.SHARED_CONF_GROUP) - -ZFSSA_LUN_SPECS = { - 'zfssa:volblocksize', - 'zfssa:sparse', - 'zfssa:compression', - 'zfssa:logbias', -} - - -def factory_zfssa(): - return zfssarest.ZFSSAApi() - - -@interface.volumedriver -class ZFSSAISCSIDriver(driver.ISCSIDriver): - """ZFSSA Cinder iSCSI volume driver. - - Version history: - - .. code-block:: none - - 1.0.1: - Backend enabled volume migration. - Local cache feature. - 1.0.2: - Volume manage/unmanage support. - 1.0.3: - Fix multi-connect to enable live-migration (LP#1565051). - """ - VERSION = '1.0.3' - protocol = 'iSCSI' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Oracle_ZFSSA_CI" - - def __init__(self, *args, **kwargs): - super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(ZFSSA_OPTS) - self.configuration.append_config_values(san.san_opts) - self.zfssa = None - self.tgt_zfssa = None - self._stats = None - self.tgtiqn = None - - def _get_target_alias(self): - """return target alias.""" - return self.configuration.zfssa_target_group - - def do_setup(self, context): - """Setup - create multiple elements. - - Project, initiators, initiatorgroup, target and targetgroup. - """ - lcfg = self.configuration - LOG.info('Connecting to host: %s.', lcfg.san_ip) - self.zfssa = factory_zfssa() - self.tgt_zfssa = factory_zfssa() - self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) - auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) - auth_str = base64.encode_as_text(auth_str) - self.zfssa.login(auth_str) - - self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, - compression=lcfg.zfssa_lun_compression, - logbias=lcfg.zfssa_lun_logbias) - - schemas = [ - {'property': 'cinder_managed', - 'description': 'Managed by Cinder', - 'type': 'Boolean'}] - - if lcfg.zfssa_enable_local_cache: - self.zfssa.create_project(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - compression=lcfg.zfssa_lun_compression, - logbias=lcfg.zfssa_lun_logbias) - schemas.extend([ - {'property': 'image_id', - 'description': 'OpenStack image ID', - 'type': 'String'}, - {'property': 'updated_at', - 'description': 'Most recent updated time of image', - 'type': 'String'}]) - - self.zfssa.create_schemas(schemas) - - if (lcfg.zfssa_initiator_config != ''): - initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) - for initiator_group in initiator_config: - zfssa_initiator_group = initiator_group - for zfssa_initiator in initiator_config[zfssa_initiator_group]: - self.zfssa.create_initiator( - zfssa_initiator['iqn'], - zfssa_initiator_group + '-' + zfssa_initiator['iqn'], - chapuser=zfssa_initiator['user'], - chapsecret=zfssa_initiator['password']) - - if (zfssa_initiator_group != 'default'): - self.zfssa.add_to_initiatorgroup( - zfssa_initiator['iqn'], - zfssa_initiator_group) - else: - LOG.warning('zfssa_initiator_config not found. ' - 'Using deprecated configuration options.') - - if not lcfg.zfssa_initiator_group: - LOG.error('zfssa_initiator_group cannot be empty. ' - 'Explicitly set the value "default" to use ' - 'the default initiator group.') - raise exception.InvalidConfigurationValue( - value='', option='zfssa_initiator_group') - - if (not lcfg.zfssa_initiator and - lcfg.zfssa_initiator_group != 'default'): - LOG.error('zfssa_initiator cannot be empty when ' - 'creating a zfssa_initiator_group.') - raise exception.InvalidConfigurationValue( - value='', option='zfssa_initiator') - - if lcfg.zfssa_initiator != '': - if lcfg.zfssa_initiator_group == 'default': - LOG.warning('zfssa_initiator: %(ini)s wont be used on ' - 'zfssa_initiator_group= %(inigrp)s.', - {'ini': lcfg.zfssa_initiator, - 'inigrp': lcfg.zfssa_initiator_group}) - - # Setup initiator and initiator group - else: - for initiator in lcfg.zfssa_initiator.split(','): - initiator = initiator.strip() - self.zfssa.create_initiator( - initiator, - lcfg.zfssa_initiator_group + '-' + initiator, - chapuser=lcfg.zfssa_initiator_user, - chapsecret=lcfg.zfssa_initiator_password) - self.zfssa.add_to_initiatorgroup( - initiator, lcfg.zfssa_initiator_group) - - # Parse interfaces - interfaces = [] - for intrface in lcfg.zfssa_target_interfaces.split(','): - if intrface == '': - continue - interfaces.append(intrface) - - # Setup target and target group - iqn = self.zfssa.create_target( - self._get_target_alias(), - interfaces, - tchapuser=lcfg.zfssa_target_user, - tchapsecret=lcfg.zfssa_target_password) - - self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group) - - if lcfg.zfssa_manage_policy not in ("loose", "strict"): - err_msg = (_("zfssa_manage_policy property needs to be set to " - "'strict' or 'loose'. Current value is: %s.") % - lcfg.zfssa_manage_policy) - LOG.error(err_msg) - raise exception.InvalidInput(reason=err_msg) - - # Lookup the zfssa_target_portal DNS name to an IP address - host, port = lcfg.zfssa_target_portal.split(':') - host_ip_addr = utils.resolve_hostname(host) - self.zfssa_target_portal = host_ip_addr + ':' + port - - def check_for_setup_error(self): - """Check that driver can login. - - Check also pool, project, initiators, initiatorgroup, target and - targetgroup. - """ - lcfg = self.configuration - - self.zfssa.verify_pool(lcfg.zfssa_pool) - self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project) - - if (lcfg.zfssa_initiator_config != ''): - initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) - for initiator_group in initiator_config: - zfssa_initiator_group = initiator_group - for zfssa_initiator in initiator_config[zfssa_initiator_group]: - self.zfssa.verify_initiator(zfssa_initiator['iqn']) - else: - if (lcfg.zfssa_initiator != '' and - lcfg.zfssa_initiator_group != '' and - lcfg.zfssa_initiator_group != 'default'): - for initiator in lcfg.zfssa_initiator.split(','): - self.zfssa.verify_initiator(initiator) - - self.zfssa.verify_target(self._get_target_alias()) - - def _get_provider_info(self): - """Return provider information.""" - lcfg = self.configuration - - if self.tgtiqn is None: - self.tgtiqn = self.zfssa.get_target(self._get_target_alias()) - - loc = "%s %s" % (self.zfssa_target_portal, self.tgtiqn) - LOG.debug('_get_provider_info: provider_location: %s', loc) - provider = {'provider_location': loc} - if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '': - provider['provider_auth'] = ('CHAP %s %s' % - (lcfg.zfssa_target_user, - lcfg.zfssa_target_password)) - - return provider - - def create_volume(self, volume): - """Create a volume on ZFSSA.""" - LOG.debug('zfssa.create_volume: volume=' + volume['name']) - lcfg = self.configuration - volsize = str(volume['size']) + 'g' - specs = self._get_voltype_specs(volume) - specs.update({'custom:cinder_managed': True}) - self.zfssa.create_lun(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name'], - volsize, - lcfg.zfssa_target_group, - specs) - - @utils.trace - def delete_volume(self, volume): - """Deletes a volume with the given volume['name'].""" - lcfg = self.configuration - - try: - lun2del = self.zfssa.get_lun(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name']) - except exception.VolumeNotFound: - # Sometimes a volume exists in cinder for which there is no - # corresponding LUN (e.g. LUN create failed). In this case, - # allow deletion to complete (without doing anything on the - # ZFSSA). Any other exception should be passed up. - LOG.warning('No LUN found on ZFSSA corresponding to volume ' - 'ID %s.', volume['id']) - return - - # Delete clone temp snapshot. see create_cloned_volume() - if 'origin' in lun2del and 'id' in volume: - if lun2del['nodestroy']: - self.zfssa.set_lun_props(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name'], - nodestroy=False) - - tmpsnap = 'tmp-snapshot-%s' % volume['id'] - if lun2del['origin']['snapshot'] == tmpsnap: - self.zfssa.delete_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_project, - lun2del['origin']['share'], - lun2del['origin']['snapshot']) - return - - self.zfssa.delete_lun(pool=lcfg.zfssa_pool, - project=lcfg.zfssa_project, - lun=volume['name']) - - if ('origin' in lun2del and - lun2del['origin']['project'] == lcfg.zfssa_cache_project): - self._check_origin(lun2del, volume['name']) - - def create_snapshot(self, snapshot): - """Creates a snapshot of a volume. - - Snapshot name: snapshot['name'] - Volume name: snapshot['volume_name'] - """ - LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name']) - lcfg = self.configuration - self.zfssa.create_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_project, - snapshot['volume_name'], - snapshot['name']) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name']) - lcfg = self.configuration - numclones = self.zfssa.num_clones(lcfg.zfssa_pool, - lcfg.zfssa_project, - snapshot['volume_name'], - snapshot['name']) - if numclones > 0: - LOG.error('Snapshot %s: has clones', snapshot['name']) - raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) - - self.zfssa.delete_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_project, - snapshot['volume_name'], - snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot - clone a snapshot.""" - LOG.debug('zfssa.create_volume_from_snapshot: volume=%s', - volume['name']) - LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s', - snapshot['name']) - - lcfg = self.configuration - - parent_lun = self.zfssa.get_lun(lcfg.zfssa_pool, - lcfg.zfssa_project, - snapshot['volume_name']) - parent_size = parent_lun['size'] - - child_size = volume['size'] * units.Gi - - if child_size < parent_size: - exception_msg = (_('Error clone [%(clone_id)s] ' - 'size [%(clone_size)d] cannot ' - 'be smaller than parent volume ' - '[%(parent_id)s] size ' - '[%(parent_size)d]') - % {'parent_id': snapshot['volume_name'], - 'parent_size': parent_size / units.Gi, - 'clone_id': volume['name'], - 'clone_size': volume['size']}) - LOG.error(exception_msg) - raise exception.InvalidInput(reason=exception_msg) - - specs = self._get_voltype_specs(volume) - specs.update({'custom:cinder_managed': True}) - - self.zfssa.clone_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_project, - snapshot['volume_name'], - snapshot['name'], - lcfg.zfssa_project, - volume['name'], - specs) - - if child_size > parent_size: - LOG.debug('zfssa.create_volume_from_snapshot: ' - 'Parent size [%(parent_size)d], ' - 'Child size [%(child_size)d] - resizing', - {'parent_size': parent_size, 'child_size': child_size}) - self.zfssa.set_lun_props(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name'], - volsize=child_size) - - def _update_volume_status(self): - """Retrieve status info from volume group.""" - LOG.debug("Updating volume status") - self._stats = None - data = {} - backend_name = self.configuration.safe_get('volume_backend_name') - data["volume_backend_name"] = backend_name or self.__class__.__name__ - data["vendor_name"] = 'Oracle' - data["driver_version"] = self.VERSION - data["storage_protocol"] = self.protocol - - lcfg = self.configuration - (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool, - lcfg.zfssa_project) - if avail is None or total is None: - return - - host = lcfg.san_ip - pool = lcfg.zfssa_pool - project = lcfg.zfssa_project - auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) - auth_str = base64.encode_as_text(auth_str) - zfssa_tgt_group = lcfg.zfssa_target_group - repl_ip = lcfg.zfssa_replication_ip - - data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool, - project, - zfssa_tgt_group, - repl_ip) - - data['total_capacity_gb'] = int(total) / units.Gi - data['free_capacity_gb'] = int(avail) / units.Gi - data['reserved_percentage'] = 0 - data['QoS_support'] = False - - pool_details = self.zfssa.get_pool_details(lcfg.zfssa_pool) - data['zfssa_poolprofile'] = pool_details['profile'] - data['zfssa_volblocksize'] = lcfg.zfssa_lun_volblocksize - data['zfssa_sparse'] = six.text_type(lcfg.zfssa_lun_sparse) - data['zfssa_compression'] = lcfg.zfssa_lun_compression - data['zfssa_logbias'] = lcfg.zfssa_lun_logbias - - self._stats = data - - def get_volume_stats(self, refresh=False): - """Get volume status. - - If 'refresh' is True, run update the stats first. - """ - if refresh: - self._update_volume_status() - return self._stats - - def create_export(self, context, volume, connector): - pass - - def remove_export(self, context, volume): - pass - - def ensure_export(self, context, volume): - pass - - def extend_volume(self, volume, new_size): - """Driver entry point to extent volume size.""" - LOG.debug('extend_volume: volume name: %s', volume['name']) - lcfg = self.configuration - self.zfssa.set_lun_props(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name'], - volsize=new_size * units.Gi) - - def create_cloned_volume(self, volume, src_vref): - """Create a clone of the specified volume.""" - zfssa_snapshot = {'volume_name': src_vref['name'], - 'name': 'tmp-snapshot-%s' % volume['id']} - self.create_snapshot(zfssa_snapshot) - try: - self.create_volume_from_snapshot(volume, zfssa_snapshot) - except exception.VolumeBackendAPIException: - LOG.error('Clone Volume: %(volume)s failed from source volume: ' - '%(src_vref)s', - {'volume': volume['name'], - 'src_vref': src_vref['name']}) - # Cleanup snapshot - self.delete_snapshot(zfssa_snapshot) - - @utils.synchronized('zfssaiscsi', external=True) - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Create a volume efficiently from an existing image. - - Verify the image ID being used: - - (1) If there is no existing cache volume, create one and transfer - image data to it. Take a snapshot. - - (2) If a cache volume already exists, verify if it is either alternated - or updated. If so try to remove it, raise exception if removal fails. - Create a new cache volume as in (1). - - Clone a volume from the cache volume and returns it to Cinder. - - A file lock is placed on this method to prevent: - - (a) a race condition when a cache volume has been verified, but then - gets deleted before it is cloned. - - (b) failure of subsequent clone_image requests if the first request is - still pending. - """ - LOG.debug('Cloning image %(image)s to volume %(volume)s', - {'image': image_meta['id'], 'volume': volume['name']}) - lcfg = self.configuration - if not lcfg.zfssa_enable_local_cache: - return None, False - - cachevol_size = image_meta['size'] - if 'virtual_size' in image_meta and image_meta['virtual_size']: - cachevol_size = image_meta['virtual_size'] - - cachevol_size_gb = int(math.ceil(float(cachevol_size) / units.Gi)) - - # Make sure the volume is big enough since cloning adds extra metadata. - # Having it as X Gi can cause creation failures. - if cachevol_size % units.Gi == 0: - cachevol_size_gb += 1 - - if cachevol_size_gb > volume['size']: - exception_msg = ('Image size %(img_size)dGB is larger ' - 'than volume size %(vol_size)dGB.', - {'img_size': cachevol_size_gb, - 'vol_size': volume['size']}) - LOG.error(exception_msg) - return None, False - - specs = self._get_voltype_specs(volume) - specs.update({'custom:cinder_managed': True}) - cachevol_props = {'size': cachevol_size_gb} - - try: - cache_vol, cache_snap = self._verify_cache_volume(context, - image_meta, - image_service, - specs, - cachevol_props) - # A cache volume and a snapshot should be ready by now - # Create a clone from the cache volume - self.zfssa.clone_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol, - cache_snap, - lcfg.zfssa_project, - volume['name'], - specs) - if cachevol_size_gb < volume['size']: - self.extend_volume(volume, volume['size']) - except exception.VolumeBackendAPIException as exc: - exception_msg = ('Cannot clone image %(image)s to ' - 'volume %(volume)s. Error: %(error)s.', - {'volume': volume['name'], - 'image': image_meta['id'], - 'error': exc.msg}) - LOG.error(exception_msg) - return None, False - - return None, True - - def _verify_cache_volume(self, context, img_meta, - img_service, specs, cachevol_props): - """Verify if we have a cache volume that we want. - - If we don't, create one. - If we do, check if it's been updated: - * If so, delete it and recreate a new volume - * If not, we are good. - - If it's out of date, delete it and create a new one. - After the function returns, there should be a cache volume available, - ready for cloning. - """ - lcfg = self.configuration - cachevol_name = 'os-cache-vol-%s' % img_meta['id'] - cachesnap_name = 'image-%s' % img_meta['id'] - cachevol_meta = { - 'cache_name': cachevol_name, - 'snap_name': cachesnap_name, - } - cachevol_props.update(cachevol_meta) - cache_vol, cache_snap = None, None - updated_at = six.text_type(img_meta['updated_at'].isoformat()) - LOG.debug('Verifying cache volume: %s', cachevol_name) - - try: - cache_vol = self.zfssa.get_lun(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cachevol_name) - if (not cache_vol.get('updated_at', None) or - not cache_vol.get('image_id', None)): - exc_msg = (_('Cache volume %s does not have required ' - 'properties') % cachevol_name) - LOG.error(exc_msg) - raise exception.VolumeBackendAPIException(data=exc_msg) - - cache_snap = self.zfssa.get_lun_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cachevol_name, - cachesnap_name) - except exception.VolumeNotFound: - # There is no existing cache volume, create one: - return self._create_cache_volume(context, - img_meta, - img_service, - specs, - cachevol_props) - except exception.SnapshotNotFound: - exception_msg = (_('Cache volume %(cache_vol)s ' - 'does not have snapshot %(cache_snap)s.'), - {'cache_vol': cachevol_name, - 'cache_snap': cachesnap_name}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - # A cache volume does exist, check if it's updated: - if ((cache_vol['updated_at'] != updated_at) or - (cache_vol['image_id'] != img_meta['id'])): - # The cache volume is updated, but has clones: - if cache_snap['numclones'] > 0: - exception_msg = (_('Cannot delete ' - 'cache volume: %(cachevol_name)s. ' - 'It was updated at %(updated_at)s ' - 'and currently has %(numclones)s ' - 'volume instances.'), - {'cachevol_name': cachevol_name, - 'updated_at': updated_at, - 'numclones': cache_snap['numclones']}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - # The cache volume is updated, but has no clone, so we delete it - # and re-create a new one: - self.zfssa.delete_lun(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cachevol_name) - return self._create_cache_volume(context, - img_meta, - img_service, - specs, - cachevol_props) - - return cachevol_name, cachesnap_name - - def _create_cache_volume(self, context, img_meta, - img_service, specs, cachevol_props): - """Create a cache volume from an image. - - Returns names of the cache volume and its snapshot. - """ - lcfg = self.configuration - cachevol_size = int(cachevol_props['size']) - lunsize = "%sg" % six.text_type(cachevol_size) - lun_props = { - 'custom:image_id': img_meta['id'], - 'custom:updated_at': ( - six.text_type(img_meta['updated_at'].isoformat())), - } - lun_props.update(specs) - - cache_vol = { - 'name': cachevol_props['cache_name'], - 'id': img_meta['id'], - 'size': cachevol_size, - } - LOG.debug('Creating cache volume %s.', cache_vol['name']) - - try: - self.zfssa.create_lun(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol['name'], - lunsize, - lcfg.zfssa_target_group, - lun_props) - super(ZFSSAISCSIDriver, self).copy_image_to_volume(context, - cache_vol, - img_service, - img_meta['id']) - self.zfssa.create_snapshot(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol['name'], - cachevol_props['snap_name']) - except Exception as exc: - exc_msg = (_('Fail to create cache volume %(volume)s. ' - 'Error: %(err)s'), - {'volume': cache_vol['name'], - 'err': six.text_type(exc)}) - LOG.error(exc_msg) - self.zfssa.delete_lun(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache_vol['name']) - raise exception.VolumeBackendAPIException(data=exc_msg) - - return cachevol_props['cache_name'], cachevol_props['snap_name'] - - def local_path(self, volume): - """Not implemented.""" - pass - - @utils.trace - def initialize_connection(self, volume, connector): - """Driver entry point to setup a connection for a volume.""" - lcfg = self.configuration - init_groups = self.zfssa.get_initiator_initiatorgroup( - connector['initiator']) - if not init_groups: - if lcfg.zfssa_initiator_group == 'default': - init_groups.append('default') - else: - exception_msg = (_('Failed to find iSCSI initiator group ' - 'containing %(initiator)s.') - % {'initiator': connector['initiator']}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - if ((lcfg.zfssa_enable_local_cache is True) and - (volume['name'].startswith('os-cache-vol-'))): - project = lcfg.zfssa_cache_project - else: - project = lcfg.zfssa_project - - lun = self.zfssa.get_lun(lcfg.zfssa_pool, project, volume['name']) - - # Construct a set (to avoid duplicates) of initiator groups by - # combining the list to which the LUN is already presented with - # the list for the new connector. - new_init_groups = set(lun['initiatorgroup'] + init_groups) - self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool, - project, - volume['name'], - sorted(list(new_init_groups))) - - iscsi_properties = {} - provider = self._get_provider_info() - - (target_portal, target_iqn) = provider['provider_location'].split() - iscsi_properties['target_discovered'] = False - iscsi_properties['target_portal'] = target_portal - iscsi_properties['target_iqn'] = target_iqn - - # Get LUN again to discover new initiator group mapping - lun = self.zfssa.get_lun(lcfg.zfssa_pool, project, volume['name']) - - # Construct a mapping of LU number to initiator group. - lu_map = dict(zip(lun['initiatorgroup'], lun['number'])) - - # When an initiator is a member of multiple groups, and a LUN is - # presented to all of them, the same LU number is assigned to all of - # them, so we can use the first initator group containing the - # initiator to lookup the right LU number in our mapping - iscsi_properties['target_lun'] = int(lu_map[init_groups[0]]) - - iscsi_properties['volume_id'] = volume['id'] - - if 'provider_auth' in provider: - (auth_method, auth_username, auth_password) = provider[ - 'provider_auth'].split() - iscsi_properties['auth_method'] = auth_method - iscsi_properties['auth_username'] = auth_username - iscsi_properties['auth_password'] = auth_password - - return { - 'driver_volume_type': 'iscsi', - 'data': iscsi_properties - } - - @utils.trace - def terminate_connection(self, volume, connector, **kwargs): - """Driver entry point to terminate a connection for a volume.""" - lcfg = self.configuration - project = lcfg.zfssa_project - pool = lcfg.zfssa_pool - - # If connector is None, assume that we're expected to disconnect - # the volume from all initiators - if connector is None: - new_init_groups = [] - else: - connector_init_groups = self.zfssa.get_initiator_initiatorgroup( - connector['initiator']) - if ((lcfg.zfssa_enable_local_cache is True) and - (volume['name'].startswith('os-cache-vol-'))): - project = lcfg.zfssa_cache_project - lun = self.zfssa.get_lun(pool, project, volume['name']) - # Construct the new set of initiator groups, starting with the list - # that the volume is currently connected to, then removing those - # associated with the connector that we're detaching from - new_init_groups = set(lun['initiatorgroup']) - new_init_groups -= set(connector_init_groups) - - self.zfssa.set_lun_initiatorgroup(pool, - project, - volume['name'], - sorted(list(new_init_groups))) - - def _get_voltype_specs(self, volume): - """Get specs suitable for volume creation.""" - vtype = volume.get('volume_type_id', None) - extra_specs = None - if vtype: - extra_specs = volume_types.get_volume_type_extra_specs(vtype) - - return self._get_specs(extra_specs) - - def _get_specs(self, xspecs): - """Return a dict with extra specs and/or config values.""" - result = {} - for spc in ZFSSA_LUN_SPECS: - val = None - prop = spc.split(':')[1] - cfg = 'zfssa_lun_' + prop - if xspecs: - val = xspecs.pop(spc, None) - - if val is None: - val = self.configuration.safe_get(cfg) - - if val is not None and val != '': - result.update({prop: val}) - - return result - - def migrate_volume(self, ctxt, volume, host): - LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, ' - 'host: %(host)s, status=%(status)s.', - {'id': volume['id'], - 'host': host, - 'status': volume['status']}) - - lcfg = self.configuration - default_ret = (False, None) - - if volume['status'] != "available": - LOG.debug('Only available volumes can be migrated using backend ' - 'assisted migration. Defaulting to generic migration.') - return default_ret - - if (host['capabilities']['vendor_name'] != 'Oracle' or - host['capabilities']['storage_protocol'] != self.protocol): - LOG.debug('Source and destination drivers need to be Oracle iSCSI ' - 'to use backend assisted migration. Defaulting to ' - 'generic migration.') - return default_ret - - if 'location_info' not in host['capabilities']: - LOG.debug('Could not find location_info in capabilities reported ' - 'by the destination driver. Defaulting to generic ' - 'migration.') - return default_ret - - loc_info = host['capabilities']['location_info'] - - try: - (tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup, - tgt_repl_ip) = loc_info.split(':') - except ValueError: - LOG.error("Location info needed for backend enabled volume " - "migration not in correct format: %s. Continuing " - "with generic volume migration.", loc_info) - return default_ret - - if tgt_repl_ip == '': - LOG.error("zfssa_replication_ip not set in cinder.conf. " - "zfssa_replication_ip is needed for backend enabled " - "volume migration. Continuing with generic volume " - "migration.") - return default_ret - - src_pool = lcfg.zfssa_pool - src_project = lcfg.zfssa_project - - try: - LOG.info('Connecting to target host: %s for backend enabled ' - 'migration.', tgt_host) - self.tgt_zfssa.set_host(tgt_host) - self.tgt_zfssa.login(auth_str) - - # Verify that the replication service is online - try: - self.zfssa.verify_service('replication') - self.tgt_zfssa.verify_service('replication') - except exception.VolumeBackendAPIException: - return default_ret - - # ensure that a target group by the same name exists on the target - # system also, if not, use default migration. - lun = self.zfssa.get_lun(src_pool, src_project, volume['name']) - - if lun['targetgroup'] != tgt_tgtgroup: - return default_ret - - tgt_asn = self.tgt_zfssa.get_asn() - src_asn = self.zfssa.get_asn() - - # verify on the source system that the destination has been - # registered as a replication target - tgts = self.zfssa.get_replication_targets() - targets = [] - for target in tgts['targets']: - if target['asn'] == tgt_asn: - targets.append(target) - - if targets == []: - LOG.debug('Target host: %(host)s for volume migration ' - 'not configured as a replication target ' - 'for volume: %(vol)s.', - {'host': tgt_repl_ip, - 'vol': volume['name']}) - return default_ret - - # Multiple ips from the same appliance may be configured - # as different targets - for target in targets: - if target['address'] == tgt_repl_ip + ':216': - break - - if target['address'] != tgt_repl_ip + ':216': - LOG.debug('Target with replication ip: %s not configured on ' - 'the source appliance for backend enabled volume ' - 'migration. Proceeding with default migration.', - tgt_repl_ip) - return default_ret - - flow = lf.Flow('zfssa_volume_migration').add( - MigrateVolumeInit(), - MigrateVolumeCreateAction(provides='action_id'), - MigrateVolumeSendReplUpdate(), - MigrateVolumeSeverRepl(), - MigrateVolumeMoveVol(), - MigrateVolumeCleanUp() - ) - taskflow.engines.run(flow, - store={'driver': self, - 'tgt_zfssa': self.tgt_zfssa, - 'tgt_pool': tgt_pool, - 'tgt_project': tgt_project, - 'volume': volume, - 'tgt_asn': tgt_asn, - 'src_zfssa': self.zfssa, - 'src_asn': src_asn, - 'src_pool': src_pool, - 'src_project': src_project, - 'target': target}) - - return(True, None) - - except Exception: - LOG.error("Error migrating volume: %s", volume['name']) - raise - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - - lcfg = self.configuration - original_name = CONF.volume_name_template % volume['id'] - current_name = CONF.volume_name_template % new_volume['id'] - - LOG.debug('Renaming migrated volume: %(cur)s to %(org)s', - {'cur': current_name, - 'org': original_name}) - self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project, - current_name, name=original_name) - return {'_name_id': None} - - @utils.synchronized('zfssaiscsi', external=True) - def _check_origin(self, lun, volname): - """Verify the cache volume of a bootable volume. - - If the cache no longer has clone, it will be deleted. - There is a small lag between the time a clone is deleted and the number - of clones being updated accordingly. There is also a race condition - when multiple volumes (clones of a cache volume) are deleted at once, - leading to the number of clones reported incorrectly. The file lock is - here to avoid such issues. - """ - lcfg = self.configuration - cache = lun['origin'] - numclones = -1 - if (cache['snapshot'].startswith('image-') and - cache['share'].startswith('os-cache-vol')): - try: - numclones = self.zfssa.num_clones(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache['share'], - cache['snapshot']) - except Exception: - LOG.debug('Cache volume is already deleted.') - return - - LOG.debug('Checking cache volume %(name)s, numclones = %(clones)d', - {'name': cache['share'], 'clones': numclones}) - - # Sometimes numclones still hold old values even when all clones - # have been deleted. So we handle this situation separately here: - if numclones == 1: - try: - self.zfssa.get_lun(lcfg.zfssa_pool, - lcfg.zfssa_project, - volname) - # The volume does exist, so return - return - except exception.VolumeNotFound: - # The volume is already deleted - numclones = 0 - - if numclones == 0: - try: - self.zfssa.delete_lun(lcfg.zfssa_pool, - lcfg.zfssa_cache_project, - cache['share']) - except exception.VolumeBackendAPIException: - LOG.warning("Volume %s exists but can't be deleted.", - cache['share']) - - def manage_existing(self, volume, existing_ref): - """Manage an existing volume in the ZFSSA backend. - - :param volume: Reference to the new volume. - :param existing_ref: Reference to the existing volume to be managed. - """ - lcfg = self.configuration - - existing_vol = self._get_existing_vol(existing_ref) - self._verify_volume_to_manage(existing_vol) - - new_vol_name = volume['name'] - - try: - self.zfssa.set_lun_props(lcfg.zfssa_pool, - lcfg.zfssa_project, - existing_vol['name'], - name=new_vol_name, - schema={"custom:cinder_managed": True}) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to rename volume %(existing)s to " - "%(new)s. Volume manage failed.", - {'existing': existing_vol['name'], - 'new': new_vol_name}) - return None - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of the volume to be managed by manage_existing.""" - existing_vol = self._get_existing_vol(existing_ref) - - size = existing_vol['size'] - return int(math.ceil(float(size) / units.Gi)) - - def unmanage(self, volume): - """Remove an existing volume from cinder management. - - :param volume: Reference to the volume to be unmanaged. - """ - lcfg = self.configuration - new_name = 'unmanaged-' + volume['name'] - try: - self.zfssa.set_lun_props(lcfg.zfssa_pool, - lcfg.zfssa_project, - volume['name'], - name=new_name, - schema={"custom:cinder_managed": False}) - except exception.VolumeBackendAPIException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to rename volume %(existing)s to " - "%(new)s. Volume unmanage failed.", - {'existing': volume['name'], - 'new': new_name}) - return None - - def _verify_volume_to_manage(self, volume): - lcfg = self.configuration - if lcfg.zfssa_manage_policy == 'loose': - return - - vol_name = volume['name'] - - if 'cinder_managed' not in volume: - err_msg = (_("Unknown if the volume: %s to be managed is " - "already being managed by Cinder. Aborting manage " - "volume. Please add 'cinder_managed' custom schema " - "property to the volume and set its value to False. " - "Alternatively, set the value of cinder config " - "policy 'zfssa_manage_policy' to 'loose' to " - "remove this restriction.") % vol_name) - LOG.error(err_msg) - raise exception.InvalidInput(reason=err_msg) - - if volume['cinder_managed'] is True: - msg = (_("Volume: %s is already being managed by Cinder.") - % vol_name) - LOG.error(msg) - raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name) - - def _get_existing_vol(self, existing_ref): - lcfg = self.configuration - if 'source-name' not in existing_ref: - msg = (_("Reference to volume: %s to be managed must contain " - "source-name.") % existing_ref) - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - try: - existing_vol = self.zfssa.get_lun(lcfg.zfssa_pool, - lcfg.zfssa_project, - existing_ref['source-name']) - except exception.VolumeNotFound: - err_msg = (_("Volume %s doesn't exist on the ZFSSA " - "backend.") % existing_vol['name']) - LOG.error(err_msg) - raise exception.InvalidInput(reason=err_msg) - return existing_vol - - -class MigrateVolumeInit(task.Task): - def execute(self, src_zfssa, volume, src_pool, src_project): - LOG.debug('Setting inherit flag on source backend to False.') - src_zfssa.edit_inherit_replication_flag(src_pool, src_project, - volume['name'], set=False) - - def revert(self, src_zfssa, volume, src_pool, src_project, **kwargs): - LOG.debug('Rollback: Setting inherit flag on source appliance to ' - 'True.') - src_zfssa.edit_inherit_replication_flag(src_pool, src_project, - volume['name'], set=True) - - -class MigrateVolumeCreateAction(task.Task): - def execute(self, src_zfssa, volume, src_pool, src_project, target, - tgt_pool): - LOG.debug('Creating replication action on source appliance.') - action_id = src_zfssa.create_replication_action(src_pool, - src_project, - target['label'], - tgt_pool, - volume['name']) - - self._action_id = action_id - return action_id - - def revert(self, src_zfssa, **kwargs): - if hasattr(self, '_action_id'): - LOG.debug('Rollback: deleting replication action on source ' - 'appliance.') - src_zfssa.delete_replication_action(self._action_id) - - -class MigrateVolumeSendReplUpdate(task.Task): - def execute(self, src_zfssa, action_id): - LOG.debug('Sending replication update from source appliance.') - src_zfssa.send_repl_update(action_id) - LOG.debug('Deleting replication action on source appliance.') - src_zfssa.delete_replication_action(action_id) - self._action_deleted = True - - -class MigrateVolumeSeverRepl(task.Task): - def execute(self, tgt_zfssa, src_asn, action_id, driver): - source = tgt_zfssa.get_replication_source(src_asn) - if not source: - err = (_('Source with host ip/name: %s not found on the ' - 'target appliance for backend enabled volume ' - 'migration, proceeding with default migration.'), - driver.configuration.san_ip) - LOG.error(err) - raise exception.VolumeBackendAPIException(data=err) - LOG.debug('Severing replication package on destination appliance.') - tgt_zfssa.sever_replication(action_id, source['name'], - project=action_id) - - -class MigrateVolumeMoveVol(task.Task): - def execute(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume): - LOG.debug('Moving LUN to destination project on destination ' - 'appliance.') - tgt_zfssa.move_volume(tgt_pool, action_id, volume['name'], tgt_project) - LOG.debug('Deleting temporary project on destination appliance.') - tgt_zfssa.delete_project(tgt_pool, action_id) - self._project_deleted = True - - def revert(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume, - **kwargs): - if not hasattr(self, '_project_deleted'): - LOG.debug('Rollback: deleting temporary project on destination ' - 'appliance.') - tgt_zfssa.delete_project(tgt_pool, action_id) - - -class MigrateVolumeCleanUp(task.Task): - def execute(self, driver, volume, tgt_zfssa): - LOG.debug('Finally, delete source volume on source appliance.') - driver.delete_volume(volume) - tgt_zfssa.logout() diff --git a/cinder/volume/drivers/zfssa/zfssanfs.py b/cinder/volume/drivers/zfssa/zfssanfs.py deleted file mode 100644 index 8e7d46d90..000000000 --- a/cinder/volume/drivers/zfssa/zfssanfs.py +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ZFS Storage Appliance NFS Cinder Volume Driver -""" -import datetime as dt -import errno -import math -import os - -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import base64 -from oslo_utils import excutils -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.image import image_utils -from cinder import interface -from cinder.objects.volume import Volume -from cinder import utils -from cinder.volume import configuration -from cinder.volume.drivers import nfs -from cinder.volume.drivers.san import san -from cinder.volume.drivers.zfssa import zfssarest - - -ZFSSA_OPTS = [ - cfg.StrOpt('zfssa_data_ip', - help='Data path IP address'), - cfg.StrOpt('zfssa_https_port', default='443', - help='HTTPS port number'), - cfg.StrOpt('zfssa_nfs_mount_options', default='', - help='Options to be passed while mounting share over nfs'), - cfg.StrOpt('zfssa_nfs_pool', default='', - help='Storage pool name.'), - cfg.StrOpt('zfssa_nfs_project', default='NFSProject', - help='Project name.'), - cfg.StrOpt('zfssa_nfs_share', default='nfs_share', - help='Share name.'), - cfg.StrOpt('zfssa_nfs_share_compression', default='off', - choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'], - help='Data compression.'), - cfg.StrOpt('zfssa_nfs_share_logbias', default='latency', - choices=['latency', 'throughput'], - help='Synchronous write bias-latency, throughput.'), - cfg.IntOpt('zfssa_rest_timeout', - help='REST connection timeout. (seconds)'), - cfg.BoolOpt('zfssa_enable_local_cache', default=True, - help='Flag to enable local caching: True, False.'), - cfg.StrOpt('zfssa_cache_directory', default='os-cinder-cache', - help='Name of directory inside zfssa_nfs_share where cache ' - 'volumes are stored.'), - cfg.StrOpt('zfssa_manage_policy', default='loose', - choices=['loose', 'strict'], - help='Driver policy for volume manage.') -] - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF -CONF.register_opts(ZFSSA_OPTS, group=configuration.SHARED_CONF_GROUP) - - -def factory_zfssa(): - return zfssarest.ZFSSANfsApi() - - -@interface.volumedriver -class ZFSSANFSDriver(nfs.NfsDriver): - """ZFSSA Cinder NFS volume driver. - - Version history: - - .. code-block:: none - - 1.0.1: - Backend enabled volume migration. - Local cache feature. - 1.0.2: - Volume manage/unmanage support. - """ - VERSION = '1.0.2' - volume_backend_name = 'ZFSSA_NFS' - protocol = driver_prefix = driver_volume_type = 'nfs' - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Oracle_ZFSSA_CI" - - def __init__(self, *args, **kwargs): - super(ZFSSANFSDriver, self).__init__(*args, **kwargs) - self.configuration.append_config_values(ZFSSA_OPTS) - self.configuration.append_config_values(san.san_opts) - self.zfssa = None - self._stats = None - - def do_setup(self, context): - if not self.configuration.max_over_subscription_ratio > 0: - msg = _("Config 'max_over_subscription_ratio' invalid. Must be > " - "0: %s") % self.configuration.max_over_subscription_ratio - LOG.error(msg) - raise exception.NfsException(msg) - - packages = ('mount.nfs', '/usr/sbin/mount') - for package in packages: - try: - self._execute(package, check_exit_code=False, run_as_root=True) - break - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - LOG.error('%s is not installed.', package) - else: - msg = utils.build_or_str(packages, '%s needs to be installed.') - raise exception.NfsException(msg) - - lcfg = self.configuration - LOG.info('Connecting to host: %s.', lcfg.san_ip) - - host = lcfg.san_ip - user = lcfg.san_login - password = lcfg.san_password - https_port = lcfg.zfssa_https_port - - credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip'] - - for cred in credentials: - if not getattr(lcfg, cred, None): - exception_msg = _('%s not set in cinder.conf') % cred - LOG.error(exception_msg) - raise exception.CinderException(exception_msg) - - self.zfssa = factory_zfssa() - self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout) - - auth_str = base64.encode_as_text('%s:%s' % (user, password)) - self.zfssa.login(auth_str) - - self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, - compression=lcfg.zfssa_nfs_share_compression, - logbias=lcfg.zfssa_nfs_share_logbias) - - share_args = { - 'sharedav': 'rw', - 'sharenfs': 'rw', - 'root_permissions': '777', - 'compression': lcfg.zfssa_nfs_share_compression, - 'logbias': lcfg.zfssa_nfs_share_logbias - } - - self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share, share_args) - - share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, - lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share) - - mountpoint = share_details['mountpoint'] - - self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint - https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \ - '/shares' + mountpoint - - LOG.debug('NFS mount path: %s', self.mount_path) - LOG.debug('WebDAV path to the share: %s', https_path) - - self.shares = {} - mnt_opts = self.configuration.zfssa_nfs_mount_options - self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None - - # Initialize the WebDAV client - self.zfssa.set_webdav(https_path, auth_str) - - # Edit http service so that WebDAV requests are always authenticated - args = {'https_port': https_port, - 'require_login': True} - - self.zfssa.modify_service('http', args) - self.zfssa.enable_service('http') - - if lcfg.zfssa_enable_local_cache: - LOG.debug('Creating local cache directory %s.', - lcfg.zfssa_cache_directory) - self.zfssa.create_directory(lcfg.zfssa_cache_directory) - - def _ensure_shares_mounted(self): - try: - self._ensure_share_mounted(self.mount_path) - except Exception as exc: - LOG.error('Exception during mounting %s.', exc) - - self._mounted_shares = [self.mount_path] - LOG.debug('Available shares %s', self._mounted_shares) - - def check_for_setup_error(self): - """Check that driver can login. - - Check also for properly configured pool, project and share - Check that the http and nfs services are enabled - """ - lcfg = self.configuration - - self.zfssa.verify_pool(lcfg.zfssa_nfs_pool) - self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project) - self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share) - self.zfssa.verify_service('http') - self.zfssa.verify_service('nfs') - - def create_volume(self, volume): - ret = super(ZFSSANFSDriver, self).create_volume(volume) - self.zfssa.set_file_props(volume.name, {'cinder_managed': 'True'}) - return ret - - def create_snapshot(self, snapshot): - """Creates a snapshot of a volume.""" - LOG.info('Creating snapshot: %s', snapshot['name']) - lcfg = self.configuration - snap_name = self._create_snapshot_name() - self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share, snap_name) - - src_file = snap_name + '/' + snapshot['volume_name'] - - try: - self.zfssa.create_snapshot_of_volume_file(src_file=src_file, - dst_file= - snapshot['name']) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.debug('Error thrown during snapshot: %s creation', - snapshot['name']) - finally: - self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool, - lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share, snap_name) - - def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - LOG.info('Deleting snapshot: %s', snapshot['name']) - self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name']) - - def create_volume_from_snapshot(self, volume, snapshot, method='COPY'): - LOG.info('Creatng volume from snapshot. volume: %s', - volume['name']) - LOG.info('Source Snapshot: %s', snapshot['name']) - - self._ensure_shares_mounted() - self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'], - dst_file=volume['name'], - method=method) - - volume['provider_location'] = self.mount_path - - if volume['size'] != snapshot['volume_size']: - try: - self.extend_volume(volume, volume['size']) - except Exception: - vol_path = self.local_path(volume) - with excutils.save_and_reraise_exception(): - LOG.error('Error in extending volume size: Volume: ' - '%(volume)s Vol_Size: %(vol_size)d with ' - 'Snapshot: %(snapshot)s Snap_Size: ' - '%(snap_size)d', - {'volume': volume['name'], - 'vol_size': volume['size'], - 'snapshot': snapshot['name'], - 'snap_size': snapshot['volume_size']}) - self._execute('rm', '-f', vol_path, run_as_root=True) - - volume_origin = {'origin': snapshot['volume_name'], - 'cinder_managed': 'True'} - self.zfssa.set_file_props(volume['name'], volume_origin) - - return {'provider_location': volume['provider_location']} - - def create_cloned_volume(self, volume, src_vref): - """Creates a snapshot and then clones the snapshot into a volume.""" - LOG.info('new cloned volume: %s', volume['name']) - LOG.info('source volume for cloning: %s', src_vref['name']) - - snapshot = {'volume_name': src_vref['name'], - 'volume_id': src_vref['id'], - 'volume_size': src_vref['size'], - 'name': self._create_snapshot_name()} - - self.create_snapshot(snapshot) - return self.create_volume_from_snapshot(volume, snapshot, - method='MOVE') - - def delete_volume(self, volume): - LOG.debug('Deleting volume %s.', volume.name) - lcfg = self.configuration - try: - vol_props = self.zfssa.get_volume(volume.name) - except exception.VolumeNotFound: - return - super(ZFSSANFSDriver, self).delete_volume(volume) - - if vol_props['origin'].startswith(lcfg.zfssa_cache_directory): - LOG.info('Checking origin %(origin)s of volume %(volume)s.', - {'origin': vol_props['origin'], - 'volume': volume.name}) - self._check_origin(vol_props['origin']) - - @utils.synchronized('zfssanfs', external=True) - def clone_image(self, context, volume, - image_location, image_meta, - image_service): - """Create a volume efficiently from an existing image. - - Verify the image ID being used: - - (1) If there is no existing cache volume, create one and transfer - image data to it. Take a snapshot. - - (2) If a cache volume already exists, verify if it is either alternated - or updated. If so try to remove it, raise exception if removal fails. - Create a new cache volume as in (1). - - Clone a volume from the cache volume and returns it to Cinder. - - A file lock is placed on this method to prevent: - (a) a race condition when a cache volume has been verified, but then - gets deleted before it is cloned. - - (b) failure of subsequent clone_image requests if the first request is - still pending. - """ - LOG.debug('Cloning image %(image)s to volume %(volume)s', - {'image': image_meta['id'], 'volume': volume['name']}) - lcfg = self.configuration - cachevol_size = 0 - if not lcfg.zfssa_enable_local_cache: - return None, False - - with image_utils.TemporaryImages.fetch( - image_service, context, image_meta['id']) as tmp_image: - info = image_utils.qemu_img_info(tmp_image) - cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi)) - - if cachevol_size > volume['size']: - exception_msg = ('Image size %(img_size)dGB is larger ' - 'than volume size %(vol_size)dGB.', - {'img_size': cachevol_size, - 'vol_size': volume['size']}) - LOG.error(exception_msg) - return None, False - - updated_at = six.text_type(image_meta['updated_at'].isoformat()) - cachevol_props = { - 'id': image_meta['id'], - 'size': cachevol_size, - 'updated_at': updated_at, - 'image_id': image_meta['id'], - } - - try: - cachevol_name = self._verify_cache_volume(context, - image_meta, - image_service, - cachevol_props) - # A cache volume should be ready by now - # Create a clone from the cache volume - cache_vol = { - 'name': cachevol_name, - 'size': cachevol_size, - 'id': image_meta['id'], - } - clone_vol = self.create_cloned_volume(volume, cache_vol) - self._update_origin(volume['name'], cachevol_name) - except exception.VolumeBackendAPIException as exc: - exception_msg = ('Cannot clone image %(image)s to ' - 'volume %(volume)s. Error: %(error)s.', - {'volume': volume['name'], - 'image': image_meta['id'], - 'error': exc.msg}) - LOG.error(exception_msg) - return None, False - - return clone_vol, True - - def _verify_cache_volume(self, context, img_meta, - img_service, cachevol_props): - """Verify if we have a cache volume that we want. - - If we don't, create one. - If we do, check if it's been updated: - * If so, delete it and recreate a new volume - * If not, we are good. - - If it's out of date, delete it and create a new one. - - After the function returns, there should be a cache volume available, - ready for cloning. - """ - lcfg = self.configuration - cache_dir = '%s/' % lcfg.zfssa_cache_directory - cache_vol_obj = Volume() - cache_vol_obj.provider_location = self.mount_path + '/' + cache_dir - cache_vol_obj._name_id = cachevol_props['id'] - cachevol_name = cache_dir + cache_vol_obj.name - - LOG.debug('Verifying cache volume %s:', cachevol_name) - - try: - cache_vol = self.zfssa.get_volume(cachevol_name) - except exception.VolumeNotFound: - # There is no existing cache volume, create one: - LOG.debug('Cache volume not found. Creating one...') - return self._create_cache_volume(context, - img_meta, - img_service, - cachevol_props) - - # A cache volume does exist, check if it's updated: - if ((cache_vol['updated_at'] != cachevol_props['updated_at']) or - (cache_vol['image_id'] != cachevol_props['image_id'])): - if cache_vol['numclones'] > 0: - # The cache volume is updated, but has clones - exception_msg = (_('Cannot delete ' - 'cache volume: %(cachevol_name)s. ' - 'It was updated at %(updated_at)s ' - 'and currently has %(numclones)d ' - 'volume instances.'), - {'cachevol_name': cachevol_name, - 'updated_at': cachevol_props['updated_at'], - 'numclones': cache_vol['numclones']}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - # The cache volume is updated, but has no clone, so we delete it - # and re-create a new one: - super(ZFSSANFSDriver, self).delete_volume(cache_vol_obj) - return self._create_cache_volume(context, - img_meta, - img_service, - cachevol_props) - - return cachevol_name - - def _create_cache_volume(self, context, img_meta, - img_service, cachevol_props): - """Create a cache volume from an image. - - Returns name of the cache volume. - """ - lcfg = self.configuration - cache_dir = '%s/' % lcfg.zfssa_cache_directory - cache_vol = Volume() - cache_vol.provider_location = self.mount_path - cache_vol._name_id = cachevol_props['id'] - cache_vol.size = cachevol_props['size'] - cache_vol_name = cache_dir + cache_vol.name - - LOG.debug('Creating cache volume %s', cache_vol_name) - try: - self.create_volume(cache_vol) - LOG.debug('Copying image data:') - super(ZFSSANFSDriver, self).copy_image_to_volume(context, - cache_vol, - img_service, - img_meta['id']) - self.zfssa.webdavclient.request(src_file=cache_vol.name, - dst_file=cache_vol_name, - method='MOVE') - - except Exception as exc: - exc_msg = (_('Fail to create cache volume %(volume)s. ' - 'Error: %(err)s'), - {'volume': cache_vol_name, - 'err': six.text_type(exc)}) - LOG.error(exc_msg) - self.zfssa.delete_file(cache_vol_name) - raise exception.VolumeBackendAPIException(data=exc_msg) - - cachevol_meta = { - 'updated_at': cachevol_props['updated_at'], - 'image_id': cachevol_props['image_id'], - } - cachevol_meta.update({'numclones': '0'}) - self.zfssa.set_file_props(cache_vol_name, cachevol_meta) - return cache_vol_name - - def _create_snapshot_name(self): - """Creates a snapshot name from the date and time.""" - - return ('cinder-zfssa-nfs-snapshot-%s' % - dt.datetime.utcnow().isoformat()) - - def _get_share_capacity_info(self): - """Get available and used capacity info for the NFS share.""" - lcfg = self.configuration - share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, - lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share) - - free = share_details['space_available'] - used = share_details['space_total'] - return free, used - - @utils.synchronized('zfssanfs', external=True) - def _check_origin(self, origin): - """Verify the cache volume of a bootable volume. - - If the cache no longer has clone, it will be deleted. - """ - try: - cachevol_props = self.zfssa.get_volume(origin) - except exception.VolumeNotFound: - LOG.debug('Origin %s does not exist', origin) - return - - numclones = cachevol_props['numclones'] - LOG.debug('Number of clones: %d', numclones) - if numclones <= 1: - # This cache vol does not have any other clone - self.zfssa.delete_file(origin) - else: - cachevol_props = {'numclones': six.text_type(numclones - 1)} - self.zfssa.set_file_props(origin, cachevol_props) - - def _update_origin(self, vol_name, cachevol_name): - """Update WebDAV property of a volume. - - WebDAV properties are used to keep track of: - (1) The number of clones of a cache volume. - (2) The cache volume name (origin) of a bootable volume. - - To avoid race conditions when multiple volumes are created and needed - to be updated, a file lock is used to ensure that the properties are - updated properly. - """ - volume_origin = {'origin': cachevol_name} - self.zfssa.set_file_props(vol_name, volume_origin) - - cache_props = self.zfssa.get_volume(cachevol_name) - cache_props.update({'numclones': - six.text_type(cache_props['numclones'] + 1)}) - self.zfssa.set_file_props(cachevol_name, cache_props) - - def _update_volume_stats(self): - """Get volume stats from zfssa""" - self._ensure_shares_mounted() - data = {} - lcfg = self.configuration - backend_name = self.configuration.safe_get('volume_backend_name') - data['volume_backend_name'] = backend_name or self.__class__.__name__ - data['vendor_name'] = 'Oracle' - data['driver_version'] = self.VERSION - data['storage_protocol'] = self.protocol - - asn = self.zfssa.get_asn() - data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share) - - free, used = self._get_share_capacity_info() - capacity = float(free) + float(used) - ratio_used = used / capacity - - data['QoS_support'] = False - data['reserved_percentage'] = 0 - - used_percentage_limit = 100 - self.configuration.reserved_percentage - used_ratio_limit = used_percentage_limit / 100.0 - if (ratio_used > used_ratio_limit or - ratio_used >= self.configuration.max_over_subscription_ratio): - data['reserved_percentage'] = 100 - - data['total_capacity_gb'] = float(capacity) / units.Gi - data['free_capacity_gb'] = float(free) / units.Gi - - share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, - lcfg.zfssa_nfs_project, - lcfg.zfssa_nfs_share) - pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool) - - data['zfssa_compression'] = share_details['compression'] - data['zfssa_encryption'] = share_details['encryption'] - data['zfssa_logbias'] = share_details['logbias'] - data['zfssa_poolprofile'] = pool_details['profile'] - data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes) - - self._stats = data - - def migrate_volume(self, ctxt, volume, host): - LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, ' - 'host: %(host)s, status=%(status)s', - {'id': volume['id'], - 'host': host, - 'status': volume['status']}) - - lcfg = self.configuration - default_ret = (False, None) - - if volume['status'] != "available": - LOG.debug('Only available volumes can be migrated using backend ' - 'assisted migration. Defaulting to generic migration.') - return default_ret - - if (host['capabilities']['vendor_name'] != 'Oracle' or - host['capabilities']['storage_protocol'] != self.protocol): - LOG.debug('Source and destination drivers need to be Oracle iSCSI ' - 'to use backend assisted migration. Defaulting to ' - 'generic migration.') - return default_ret - - if 'location_info' not in host['capabilities']: - LOG.debug('Could not find location_info in capabilities reported ' - 'by the destination driver. Defaulting to generic ' - 'migration.') - return default_ret - - loc_info = host['capabilities']['location_info'] - - try: - (tgt_asn, tgt_share) = loc_info.split(':') - except ValueError: - LOG.error("Location info needed for backend enabled volume " - "migration not in correct format: %s. Continuing " - "with generic volume migration.", loc_info) - return default_ret - - src_asn = self.zfssa.get_asn() - - if tgt_asn == src_asn and lcfg.zfssa_nfs_share == tgt_share: - LOG.info('Source and destination ZFSSA shares are the same. ' - 'Do nothing. volume: %s', volume['name']) - return (True, None) - - return (False, None) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - """Return model update for migrated volume. - - :param volume: The original volume that was migrated to this backend - :param new_volume: The migration volume object that was created on - this backend as part of the migration process - :param original_volume_status: The status of the original volume - :returns: model_update to update DB with any needed changes - """ - - original_name = CONF.volume_name_template % volume['id'] - current_name = CONF.volume_name_template % new_volume['id'] - - LOG.debug('Renaming migrated volume: %(cur)s to %(org)s.', - {'cur': current_name, - 'org': original_name}) - self.zfssa.create_volume_from_snapshot_file(src_file=current_name, - dst_file=original_name, - method='MOVE') - provider_location = new_volume['provider_location'] - return {'_name_id': None, 'provider_location': provider_location} - - def manage_existing(self, volume, existing_ref): - """Manage an existing volume in the ZFSSA backend. - - :param volume: Reference to the new volume. - :param existing_ref: Reference to the existing volume to be managed. - """ - existing_vol_name = self._get_existing_vol_name(existing_ref) - try: - vol_props = self.zfssa.get_volume(existing_vol_name) - except exception.VolumeNotFound: - err_msg = (_("Volume %s doesn't exist on the ZFSSA backend.") % - existing_vol_name) - LOG.error(err_msg) - raise exception.InvalidInput(reason=err_msg) - - self._verify_volume_to_manage(existing_vol_name, vol_props) - - try: - self.zfssa.rename_volume(existing_vol_name, volume['name']) - except Exception: - LOG.error("Failed to rename volume %(existing)s to %(new)s. " - "Volume manage failed.", - {'existing': existing_vol_name, - 'new': volume['name']}) - raise - - try: - self.zfssa.set_file_props(volume['name'], - {'cinder_managed': 'True'}) - except Exception: - self.zfssa.rename_volume(volume['name'], existing_vol_name) - LOG.error("Failed to set properties for volume %(existing)s. " - "Volume manage failed.", - {'existing': volume['name']}) - raise - - return {'provider_location': self.mount_path} - - def manage_existing_get_size(self, volume, existing_ref): - """Return size of the volume to be managed by manage_existing.""" - existing_vol_name = self._get_existing_vol_name(existing_ref) - - # The ZFSSA NFS driver only has one mounted share. - local_share_mount = self._get_mount_point_for_share( - self._mounted_shares[0]) - local_vol_path = os.path.join(local_share_mount, existing_vol_name) - - try: - if os.path.isfile(local_vol_path): - size = int(math.ceil(float( - utils.get_file_size(local_vol_path)) / units.Gi)) - except (OSError, ValueError): - err_msg = (_("Failed to get size of existing volume: %(vol)s. " - "Volume Manage failed."), {'vol': existing_vol_name}) - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - LOG.debug("Size volume: %(vol)s to be migrated is: %(size)s.", - {'vol': existing_vol_name, 'size': size}) - - return size - - def _verify_volume_to_manage(self, name, vol_props): - lcfg = self.configuration - - if lcfg.zfssa_manage_policy != 'strict': - return - - if vol_props['cinder_managed'] == "": - err_msg = (_("Unknown if the volume: %s to be managed is " - "already being managed by Cinder. Aborting manage " - "volume. Please add 'cinder_managed' custom schema " - "property to the volume and set its value to False. " - "Alternatively, Set the value of cinder config " - "policy 'zfssa_manage_policy' to 'loose' to " - "remove this restriction.") % name) - LOG.error(err_msg) - raise exception.InvalidInput(reason=err_msg) - - if vol_props['cinder_managed'] == 'True': - msg = (_("Volume: %s is already being managed by Cinder.") % name) - LOG.error(msg) - raise exception.ManageExistingAlreadyManaged(volume_ref=name) - - def unmanage(self, volume): - """Remove an existing volume from cinder management. - - :param volume: Reference to the volume to be unmanaged. - """ - new_name = 'unmanaged-' + volume['name'] - try: - self.zfssa.rename_volume(volume['name'], new_name) - except Exception: - LOG.error("Failed to rename volume %(existing)s to %(new)s. " - "Volume unmanage failed.", - {'existing': volume['name'], - 'new': new_name}) - raise - - try: - self.zfssa.set_file_props(new_name, {'cinder_managed': 'False'}) - except Exception: - self.zfssa.rename_volume(new_name, volume['name']) - LOG.error("Failed to set properties for volume %(existing)s. " - "Volume unmanage failed.", - {'existing': volume['name']}) - raise - - def _get_existing_vol_name(self, existing_ref): - if 'source-name' not in existing_ref: - msg = _("Reference to volume to be managed must contain " - "source-name.") - raise exception.ManageExistingInvalidReference( - existing_ref=existing_ref, reason=msg) - return existing_ref['source-name'] diff --git a/cinder/volume/drivers/zfssa/zfssarest.py b/cinder/volume/drivers/zfssa/zfssarest.py deleted file mode 100644 index e300a6f5d..000000000 --- a/cinder/volume/drivers/zfssa/zfssarest.py +++ /dev/null @@ -1,1347 +0,0 @@ -# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -ZFS Storage Appliance Proxy -""" -import json - -from oslo_log import log -from oslo_service import loopingcall - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.zfssa import restclient -from cinder.volume.drivers.zfssa import webdavclient - -LOG = log.getLogger(__name__) - - -def factory_restclient(url, **kwargs): - return restclient.RestClientURL(url, **kwargs) - - -class ZFSSAApi(object): - """ZFSSA API proxy class""" - - def __init__(self): - self.host = None - self.url = None - self.rclient = None - - def __del__(self): - if self.rclient and self.rclient.islogin(): - self.rclient.logout() - - def _is_pool_owned(self, pdata): - """Returns True if the pool's owner is the same as the host.""" - svc = '/api/system/v1/version' - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error getting version: ' - 'svc: %(svc)s.' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'svc': svc, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - vdata = json.loads(ret.data) - return vdata['version']['asn'] == pdata['pool']['asn'] and \ - vdata['version']['nodename'] == pdata['pool']['owner'] - - def get_pool_details(self, pool): - """Get properties of a pool.""" - svc = '/api/storage/v1/pools/%s' % pool - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting Pool Stats: ' - 'Pool: %(pool)s ' - 'Return code: %(status)d ' - 'Message: %(data)s.') - % {'pool': pool, - 'status': ret.status, - 'data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - - if not self._is_pool_owned(val): - exception_msg = (_('Error Pool ownership: ' - 'Pool %(pool)s is not owned ' - 'by %(host)s.') - % {'pool': pool, - 'host': self.host}) - LOG.error(exception_msg) - raise exception.InvalidInput(reason=exception_msg) - return val['pool'] - - def set_host(self, host, timeout=None): - self.host = host - self.url = "https://" + self.host + ":215" - self.rclient = factory_restclient(self.url, timeout=timeout) - - def login(self, auth_str): - """Login to the appliance""" - if self.rclient and not self.rclient.islogin(): - self.rclient.login(auth_str) - - def logout(self): - self.rclient.logout() - - def verify_service(self, service, status='online'): - """Checks whether a service is online or not""" - svc = '/api/service/v1/services/' + service - ret = self.rclient.get(svc) - - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying ' - 'Service: %(service)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'service': service, - 'ret.status': ret.status, - 'ret.data': ret.data}) - - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - data = json.loads(ret.data)['service'] - - if data[''] != status: - exception_msg = (_('%(service)s Service is not %(status)s ' - 'on storage appliance: %(host)s') - % {'service': service, - 'status': status, - 'host': self.host}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def get_asn(self): - """Returns appliance asn.""" - svc = '/api/system/v1/version' - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error getting appliance version details. ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['version']['asn'] - - def get_replication_targets(self): - """Returns all replication targets configured on the appliance.""" - svc = '/api/storage/v1/replication/targets' - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error getting replication target details. ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val - - def edit_inherit_replication_flag(self, pool, project, volume, set=True): - """Edit the inherit replication flag for volume.""" - svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' - '/filesystems/%(volume)s/replication' - % {'pool': pool, - 'project': project, - 'volume': volume}) - arg = {'inherited': set} - ret = self.rclient.put(svc, arg) - - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error setting replication inheritance ' - 'to %(set)s ' - 'for volume: %(vol)s ' - 'project %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'set': set, - 'project': project, - 'vol': volume, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_replication_action(self, host_pool, host_project, tgt_name, - tgt_pool, volume): - """Create a replication action.""" - arg = {'pool': host_pool, - 'project': host_project, - 'target_pool': tgt_pool, - 'target': tgt_name} - - if volume is not None: - arg.update({'share': volume}) - - svc = '/api/storage/v1/replication/actions' - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating replication action on: ' - 'pool: %(pool)s ' - 'Project: %(proj)s ' - 'volume: %(vol)s ' - 'for target: %(tgt)s and pool: %(tgt_pool)s' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'pool': host_pool, - 'proj': host_project, - 'vol': volume, - 'tgt': tgt_name, - 'tgt_pool': tgt_pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['action']['id'] - - def delete_replication_action(self, action_id): - """Delete a replication action.""" - svc = '/api/storage/v1/replication/actions/%s' % action_id - ret = self.rclient.delete(svc) - if ret.status != restclient.Status.NO_CONTENT: - exception_msg = (_('Error Deleting ' - 'replication action: %(id)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'id': action_id, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def send_repl_update(self, action_id): - """Send replication update - - Send replication update to the target appliance and then wait for - it to complete. - """ - - svc = '/api/storage/v1/replication/actions/%s/sendupdate' % action_id - ret = self.rclient.put(svc) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error sending replication update ' - 'for action id: %(id)s . ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'id': action_id, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def _loop_func(): - svc = '/api/storage/v1/replication/actions/%s' % action_id - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error getting replication action: %(id)s. ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'id': action_id, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - if val['action']['last_result'] == 'success': - raise loopingcall.LoopingCallDone() - elif (val['action']['last_result'] == '' and - val['action']['state'] == 'sending'): - pass - else: - exception_msg = (_('Error sending replication update. ' - 'Returned error: %(err)s. ' - 'Action: %(id)s.') - % {'err': val['action']['last_result'], - 'id': action_id}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - timer = loopingcall.FixedIntervalLoopingCall(_loop_func) - timer.start(interval=5).wait() - - def get_replication_source(self, asn): - """Return the replication source json which has a matching asn.""" - svc = "/api/storage/v1/replication/sources" - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error getting replication source details. ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - - for source in val['sources']: - if source['asn'] == asn: - return source - return None - - def sever_replication(self, package, src_name, project=None): - """Sever Replication at the destination. - - This method will sever the package and move the volume to a project, - if project name is not passed in then the package name is selected - as the project name - """ - - svc = ('/api/storage/v1/replication/sources/%(src)s/packages/%(pkg)s' - '/sever' % {'src': src_name, 'pkg': package}) - - if not project: - project = package - - arg = {'projname': project} - ret = self.rclient.put(svc, arg) - - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error severing the package: %(package)s ' - 'from source: %(src)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'package': package, - 'src': src_name, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def move_volume(self, pool, project, volume, tgt_project): - """Move a LUN from one project to another within the same pool.""" - svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' - '/filesystems/%(volume)s' % {'pool': pool, - 'project': project, - 'volume': volume}) - - arg = {'project': tgt_project} - - ret = self.rclient.put(svc, arg) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error moving volume: %(vol)s ' - 'from source project: %(src)s ' - 'to target project: %(tgt)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'vol': volume, - 'src': project, - 'tgt': tgt_project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def delete_project(self, pool, project): - """Delete a project.""" - svc = ('/api/storage/v1/pools/%(pool)s/projects/%(project)s' % - {'pool': pool, - 'project': project}) - ret = self.rclient.delete(svc) - if ret.status != restclient.Status.NO_CONTENT: - exception_msg = (_('Error Deleting ' - 'project: %(project)s ' - 'on pool: %(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'project': project, - 'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def get_project_stats(self, pool, project): - """Get project stats. - - Get available space and total space of a project - returns (avail, total). - """ - svc = '/api/storage/v1/pools/%s/projects/%s' % (pool, project) - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting Project Stats: ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - avail = val['project']['space_available'] - total = avail + val['project']['space_total'] - - return avail, total - - def create_project(self, pool, project, compression=None, logbias=None): - """Create a project on a pool. - - Check first whether the pool exists. - """ - self.verify_pool(pool) - svc = '/api/storage/v1/pools/' + pool + '/projects/' + project - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svc = '/api/storage/v1/pools/' + pool + '/projects' - arg = { - 'name': project - } - if compression and compression != '': - arg.update({'compression': compression}) - if logbias and logbias != '': - arg.update({'logbias': logbias}) - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating Project: ' - '%(project)s on ' - 'Pool: %(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'project': project, - 'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_initiator(self, initiator, alias, chapuser=None, - chapsecret=None): - """Create an iSCSI initiator.""" - - svc = '/api/san/v1/iscsi/initiators/alias=' + alias - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svc = '/api/san/v1/iscsi/initiators' - arg = { - 'initiator': initiator, - 'alias': alias - } - if chapuser and chapuser != '' and chapsecret and chapsecret != '': - arg.update({'chapuser': chapuser, - 'chapsecret': chapsecret}) - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating Initiator: ' - '%(initiator)s on ' - 'Alias: %(alias)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'initiator': initiator, - 'alias': alias, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def add_to_initiatorgroup(self, initiator, initiatorgroup): - """Add an iSCSI initiator to initiatorgroup""" - svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svc = '/api/san/v1/iscsi/initiator-groups' - arg = { - 'name': initiatorgroup, - 'initiators': [initiator] - } - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Adding Initiator: ' - '%(initiator)s on group' - 'InitiatorGroup: %(initiatorgroup)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'initiator': initiator, - 'initiatorgroup': initiatorgroup, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - else: - val = json.loads(ret.data) - inits = val['group']['initiators'] - if inits is None: - exception_msg = (_('Error Getting Initiators: ' - 'InitiatorGroup: %(initiatorgroup)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'initiatorgroup': initiatorgroup, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - if initiator in inits: - return - - inits.append(initiator) - svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup - arg = { - 'initiators': inits - } - ret = self.rclient.put(svc, arg) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error Adding Initiator: ' - '%(initiator)s on group' - 'InitiatorGroup: %(initiatorgroup)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'initiator': initiator, - 'initiatorgroup': initiatorgroup, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_target(self, alias, interfaces=None, tchapuser=None, - tchapsecret=None): - """Create an iSCSI target. - - :param interfaces: an array with network interfaces - :param tchapuser, tchapsecret: target's chapuser and chapsecret - :returns: target iqn - """ - svc = '/api/san/v1/iscsi/targets/alias=' + alias - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svc = '/api/san/v1/iscsi/targets' - arg = { - 'alias': alias - } - - if tchapuser and tchapuser != '' and tchapsecret and \ - tchapsecret != '': - arg.update({'targetchapuser': tchapuser, - 'targetchapsecret': tchapsecret, - 'auth': 'chap'}) - - if interfaces is not None and len(interfaces) > 0: - arg.update({'interfaces': interfaces}) - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating Target: ' - '%(alias)s' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'alias': alias, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['target']['iqn'] - - def get_target(self, alias): - """Get an iSCSI target iqn.""" - svc = '/api/san/v1/iscsi/targets/alias=' + alias - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting Target: ' - '%(alias)s' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'alias': alias, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['target']['iqn'] - - def add_to_targetgroup(self, iqn, targetgroup): - """Add an iSCSI target to targetgroup.""" - svc = '/api/san/v1/iscsi/target-groups/' + targetgroup - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svccrt = '/api/san/v1/iscsi/target-groups' - arg = { - 'name': targetgroup, - 'targets': [iqn] - } - - ret = self.rclient.post(svccrt, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating TargetGroup: ' - '%(targetgroup)s with' - 'IQN: %(iqn)s' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'targetgroup': targetgroup, - 'iqn': iqn, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - return - - arg = { - 'targets': [iqn] - } - - ret = self.rclient.put(svc, arg) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error Adding to TargetGroup: ' - '%(targetgroup)s with' - 'IQN: %(iqn)s' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'targetgroup': targetgroup, - 'iqn': iqn, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def verify_pool(self, pool): - """Checks whether pool exists.""" - svc = '/api/storage/v1/pools/' + pool - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying Pool: ' - '%(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def verify_project(self, pool, project): - """Checks whether project exists.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + project - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying ' - 'Project: %(project)s on ' - 'Pool: %(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'project': project, - 'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def verify_initiator(self, iqn): - """Check whether initiator iqn exists.""" - svc = '/api/san/v1/iscsi/initiators/' + iqn - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying ' - 'Initiator: %(iqn)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'initiator': iqn, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def verify_target(self, alias): - """Check whether target alias exists.""" - svc = '/api/san/v1/iscsi/targets/alias=' + alias - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying ' - 'Target: %(alias)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'alias': alias, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_lun(self, pool, project, lun, volsize, targetgroup, specs): - """Create a LUN. - - specs - contains volume properties (e.g blocksize, compression). - """ - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns' - arg = { - 'name': lun, - 'volsize': volsize, - 'targetgroup': targetgroup, - 'initiatorgroup': 'com.sun.ms.vss.hg.maskAll' - } - if specs: - arg.update(specs) - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating ' - 'Volume: %(lun)s ' - 'Size: %(size)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'lun': lun, - 'size': volsize, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val - - def get_lun(self, pool, project, lun): - """return iscsi lun properties.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + "/luns/" + lun - ret = self.rclient.get(svc) - if ret.status == restclient.Status.NOT_FOUND: - # Sometimes a volume exists in cinder for which there is no - # corresponding LUN (e.g. LUN create failed). In this case, - # allow deletion to complete (without doing anything on the - # ZFSSA). Any other exception should be passed up. - LOG.warning('LUN with name %(lun)s not found in project ' - '%(project)s, pool %(pool)s.', - {'lun': lun, - 'project': project, - 'pool': pool}) - raise exception.VolumeNotFound(volume_id=lun) - elif ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting ' - 'Volume: %(lun)s on ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - - # For backward-compatibility with 2013.1.2.x, convert initiatorgroup - # and number to lists if they're not already - def _listify(item): - return item if isinstance(item, list) else [item] - - initiatorgroup = _listify(val['lun']['initiatorgroup']) - number = _listify(val['lun']['assignednumber']) - - # Hide special maskAll value when LUN is not currently presented to - # any initiatorgroups: - if 'com.sun.ms.vss.hg.maskAll' in initiatorgroup: - initiatorgroup = [] - number = [] - - ret = { - 'name': val['lun']['name'], - 'guid': val['lun']['lunguid'], - 'number': number, - 'initiatorgroup': initiatorgroup, - 'size': val['lun']['volsize'], - 'nodestroy': val['lun']['nodestroy'], - 'targetgroup': val['lun']['targetgroup'] - } - if 'origin' in val['lun']: - ret.update({'origin': val['lun']['origin']}) - if 'custom:image_id' in val['lun']: - ret.update({'image_id': val['lun']['custom:image_id']}) - ret.update({'updated_at': val['lun']['custom:updated_at']}) - if 'custom:cinder_managed' in val['lun']: - ret.update({'cinder_managed': val['lun']['custom:cinder_managed']}) - - return ret - - def get_lun_snapshot(self, pool, project, lun, snapshot): - """Return iscsi lun snapshot properties.""" - svc = ('/api/storage/v1/pools/' + pool + '/projects/' + - project + '/luns/' + lun + '/snapshots/' + snapshot) - - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = ('Error Getting ' - 'Snapshot: %(snapshot)s of ' - 'Volume: %(lun)s in ' - 'Pool: %(pool)s, ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d, ' - 'Message: %(ret.data)s.', - {'snapshot': snapshot, - 'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.SnapshotNotFound(snapshot_id=snapshot) - - val = json.loads(ret.data)['snapshot'] - ret = { - 'name': val['name'], - 'numclones': val['numclones'], - } - return ret - - def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup): - """Set the initiatorgroup property of a LUN.""" - - # For backward-compatibility with 2013.1.2.x, set initiatorgroup - # to a single string if there's only one item in the list. - # Live-migration won't work, but existing functionality should still - # work. If the list is empty, substitute the special "maskAll" value. - if len(initiatorgroup) == 0: - initiatorgroup = 'com.sun.ms.vss.hg.maskAll' - elif len(initiatorgroup) == 1: - initiatorgroup = initiatorgroup[0] - - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun - arg = { - 'initiatorgroup': initiatorgroup - } - - LOG.debug('Setting LUN initiatorgroup. pool=%(pool)s, ' - 'project=%(project)s, lun=%(lun)s, ' - 'initiatorgroup=%(initiatorgroup)s', - {'project': project, - 'pool': pool, - 'lun': lun, - 'initiatorgroup': initiatorgroup}) - - ret = self.rclient.put(svc, arg) - if ret.status != restclient.Status.ACCEPTED: - LOG.error('Error Setting Volume: %(lun)s to InitiatorGroup: ' - '%(initiatorgroup)s Pool: %(pool)s Project: ' - '%(project)s Return code: %(ret.status)d Message: ' - '%(ret.data)s.', - {'lun': lun, - 'initiatorgroup': initiatorgroup, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - - def delete_lun(self, pool, project, lun): - """delete iscsi lun.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun - - ret = self.rclient.delete(svc) - if ret.status != restclient.Status.NO_CONTENT: - exception_msg = (_('Error Deleting Volume: %(lun)s from ' - 'Pool: %(pool)s, Project: %(project)s. ' - 'Return code: %(ret.status)d, ' - 'Message: %(ret.data)s.'), - {'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - if ret.status == restclient.Status.FORBIDDEN: - # This means that the lun exists but it can't be deleted: - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_snapshot(self, pool, project, lun, snapshot): - """create snapshot.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun + '/snapshots' - arg = { - 'name': snapshot - } - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating ' - 'Snapshot: %(snapshot)s on' - 'Volume: %(lun)s to ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.'), - {'snapshot': snapshot, - 'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def delete_snapshot(self, pool, project, lun, snapshot): - """delete snapshot.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun + '/snapshots/' + snapshot - - ret = self.rclient.delete(svc) - if ret.status != restclient.Status.NO_CONTENT: - exception_msg = (_('Error Deleting ' - 'Snapshot: %(snapshot)s on ' - 'Volume: %(lun)s to ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'snapshot': snapshot, - 'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def clone_snapshot(self, pool, project, lun, snapshot, clone_proj, clone, - specs): - """clone 'snapshot' to a lun named 'clone' in project 'clone_proj'.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone' - arg = { - 'project': clone_proj, - 'share': clone, - 'nodestroy': True - } - if specs: - arg.update(specs) - # API fails if volblocksize is specified when cloning - arg.pop('volblocksize', '') - - ret = self.rclient.put(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Cloning ' - 'Snapshot: %(snapshot)s on ' - 'Volume: %(lun)s of ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Clone project: %(clone_proj)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'snapshot': snapshot, - 'lun': lun, - 'pool': pool, - 'project': project, - 'clone_proj': clone_proj, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def set_lun_props(self, pool, project, lun, **kargs): - """set lun properties.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun - if kargs is None: - return - - if 'schema' in kargs: - kargs.update(kargs.pop('schema')) - - ret = self.rclient.put(svc, kargs) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error Setting props ' - 'Props: %(props)s on ' - 'Volume: %(lun)s of ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'props': kargs, - 'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def num_clones(self, pool, project, lun, snapshot): - """Checks whether snapshot has clones or not.""" - svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ - project + '/luns/' + lun + '/snapshots/' + snapshot - - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting ' - 'Snapshot: %(snapshot)s on ' - 'Volume: %(lun)s to ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'snapshot': snapshot, - 'lun': lun, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['snapshot']['numclones'] - - def get_initiator_initiatorgroup(self, initiator): - """Returns the initiator group of the initiator.""" - groups = [] - svc = "/api/san/v1/iscsi/initiator-groups" - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - msg = _('Error getting initiator groups.') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - val = json.loads(ret.data) - for initiator_group in val['groups']: - if initiator in initiator_group['initiators']: - groups.append(initiator_group["name"]) - return groups - - def create_schema(self, schema): - """Create a custom ZFSSA schema.""" - base = '/api/storage/v1/schema' - - svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']} - ret = self.rclient.get(svc) - if ret.status == restclient.Status.OK: - LOG.warning('Property %s already exists.', schema['property']) - return - - ret = self.rclient.post(base, schema) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating ' - 'Property: %(property)s ' - 'Type: %(type)s ' - 'Description: %(description)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'property': schema['property'], - 'type': schema['type'], - 'description': schema['description'], - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_schemas(self, schemas): - """Create multiple custom ZFSSA schemas.""" - ret = [] - for schema in schemas: - res = self.create_schema(schema) - ret.append(res) - return ret - - -class ZFSSANfsApi(ZFSSAApi): - """ZFSSA API proxy class for NFS driver""" - projects_path = '/api/storage/v1/pools/%s/projects' - project_path = projects_path + '/%s' - - shares_path = project_path + '/filesystems' - share_path = shares_path + '/%s' - share_snapshots_path = share_path + '/snapshots' - share_snapshot_path = share_snapshots_path + '/%s' - - services_path = '/api/service/v1/services/' - - def __init__(self, *args, **kwargs): - super(ZFSSANfsApi, self).__init__(*args, **kwargs) - self.webdavclient = None - - def set_webdav(self, https_path, auth_str): - self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path, - auth_str) - - def verify_share(self, pool, project, share): - """Checks whether the share exists""" - svc = self.share_path % (pool, project, share) - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Verifying ' - 'share: %(share)s on ' - 'Project: %(project)s and ' - 'Pool: %(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'share': share, - 'project': project, - 'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_snapshot(self, pool, project, share, snapshot): - """create snapshot of a share""" - svc = self.share_snapshots_path % (pool, project, share) - - arg = { - 'name': snapshot - } - - ret = self.rclient.post(svc, arg) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating ' - 'Snapshot: %(snapshot)s on' - 'share: %(share)s to ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'snapshot': snapshot, - 'share': share, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def delete_snapshot(self, pool, project, share, snapshot): - """delete snapshot of a share""" - svc = self.share_snapshot_path % (pool, project, share, snapshot) - - ret = self.rclient.delete(svc) - if ret.status != restclient.Status.NO_CONTENT: - exception_msg = (_('Error Deleting ' - 'Snapshot: %(snapshot)s on ' - 'Share: %(share)s to ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'snapshot': snapshot, - 'share': share, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def create_snapshot_of_volume_file(self, src_file="", dst_file=""): - src_file = '.zfs/snapshot/' + src_file - return self.webdavclient.request(src_file=src_file, dst_file=dst_file, - method='COPY') - - def delete_snapshot_of_volume_file(self, src_file=""): - return self.webdavclient.request(src_file=src_file, method='DELETE') - - def create_volume_from_snapshot_file(self, src_file="", dst_file="", - method='COPY'): - return self.webdavclient.request(src_file=src_file, dst_file=dst_file, - method=method) - - def _change_service_state(self, service, state=''): - svc = self.services_path + service + '/' + state - ret = self.rclient.put(svc) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error Verifying ' - 'Service: %(service)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'service': service, - 'ret.status': ret.status, - 'ret.data': ret.data}) - - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - data = json.loads(ret.data)['service'] - LOG.debug('%(service)s service state: %(data)s', - {'service': service, 'data': data}) - - status = 'online' if state == 'enable' else 'disabled' - - if data[''] != status: - exception_msg = (_('%(service)s Service is not %(status)s ' - 'on storage appliance: %(host)s') - % {'service': service, - 'status': status, - 'host': self.host}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def enable_service(self, service): - self._change_service_state(service, state='enable') - self.verify_service(service) - - def disable_service(self, service): - self._change_service_state(service, state='disable') - self.verify_service(service, status='offline') - - def modify_service(self, service, edit_args=None): - """Edit service properties""" - if edit_args is None: - edit_args = {} - - svc = self.services_path + service - - ret = self.rclient.put(svc, edit_args) - - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error modifying ' - 'Service: %(service)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'service': service, - 'ret.status': ret.status, - 'ret.data': ret.data}) - - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - data = json.loads(ret.data)['service'] - LOG.debug('Modify %(service)s service ' - 'return data: %(data)s', - {'service': service, - 'data': data}) - - def create_share(self, pool, project, share, args): - """Create a share in the specified pool and project""" - svc = self.share_path % (pool, project, share) - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - svc = self.shares_path % (pool, project) - args.update({'name': share}) - ret = self.rclient.post(svc, args) - if ret.status != restclient.Status.CREATED: - exception_msg = (_('Error Creating ' - 'Share: %(name)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'name': share, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - else: - LOG.debug('Editing properties of a pre-existing share') - ret = self.rclient.put(svc, args) - if ret.status != restclient.Status.ACCEPTED: - exception_msg = (_('Error editing share: ' - '%(share)s on ' - 'Pool: %(pool)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s .') - % {'share': share, - 'pool': pool, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - def get_share(self, pool, project, share): - """return share properties""" - svc = self.share_path % (pool, project, share) - ret = self.rclient.get(svc) - if ret.status != restclient.Status.OK: - exception_msg = (_('Error Getting ' - 'Share: %(share)s on ' - 'Pool: %(pool)s ' - 'Project: %(project)s ' - 'Return code: %(ret.status)d ' - 'Message: %(ret.data)s.') - % {'share': share, - 'pool': pool, - 'project': project, - 'ret.status': ret.status, - 'ret.data': ret.data}) - LOG.error(exception_msg) - raise exception.VolumeBackendAPIException(data=exception_msg) - - val = json.loads(ret.data) - return val['filesystem'] - - def get_volume(self, volume): - LOG.debug('Getting volume %s.', volume) - try: - resp = self.webdavclient.request(src_file=volume, - method='PROPFIND') - except Exception: - raise exception.VolumeNotFound(volume_id=volume) - - resp = resp.read() - numclones = self._parse_prop(resp, 'numclones') - result = { - 'numclones': int(numclones) if numclones != '' else 0, - 'updated_at': self._parse_prop(resp, 'updated_at'), - 'image_id': self._parse_prop(resp, 'image_id'), - 'origin': self._parse_prop(resp, 'origin'), - 'cinder_managed': self._parse_prop(resp, 'cinder_managed'), - } - return result - - def delete_file(self, filename): - try: - self.webdavclient.request(src_file=filename, method='DELETE') - except Exception: - exception_msg = ('Cannot delete file %s.', filename) - LOG.error(exception_msg) - - def set_file_props(self, file, specs): - """Set custom properties to a file.""" - for key in specs: - self.webdavclient.set_file_prop(file, key, specs[key]) - - def _parse_prop(self, response, prop): - """Parse a property value from the WebDAV response.""" - propval = "" - for line in response.split("\n"): - if prop in line: - try: - propval = line[(line.index('>') + 1):line.index(' 0: - thostinfo = ret['data']['tHostInfo'] - for hostindex in range(0, int(sdwhostnum)): - initiator_name = thostinfo[hostindex]['ucHostName'] - host_in_grp = { - 'ucInitName': initiator_name, - 'cMapGrpName': map_group_name} - ret = self._call_method('DelHostFromGrp', host_in_grp) - if ret['returncode'] == zte_pub.ZTE_ERR_GROUP_NOT_EXIST: - continue - if ret['returncode'] not in [zte_pub.ZTE_SUCCESS, - zte_pub.ZTE_ERR_HOST_NOT_EXIST]: - msg = _('delete host from group failed. ') - raise exception.VolumeDriverException(message=msg) - - ret = self._call_method( - 'GetHost', {"cHostAlias": initiator_name}) - if ret['returncode'] != zte_pub.ZTE_SUCCESS: - err_msg = (_('_map_delete_host:get host info failed. ' - 'host name:%(name)s with Return code: ' - '%(ret)s.') % - {'name': initiator_name, - 'ret': ret['returncode']}) - raise exception.VolumeBackendAPIException(data=err_msg) - - return_data = ret['data'] - portnum = return_data['sdwPortNum'] - for portindex in range(0, int(portnum)): - port_host_info = {} - port_info = return_data['tPort'] - port_name = port_info[portindex]['cPortName'] - port_host_info['cPortName'] = port_name - port_host_info['cHostAlias'] = initiator_name - - ret = self._call_method('DelPortFromHost', port_host_info) - if ret['returncode'] != zte_pub.ZTE_SUCCESS: - err_msg = (_('delete port from host failed. ' - 'host name:%(name)s, port name:%(port)s ' - 'with Return code: %(ret)s.') % - {'name': initiator_name, - 'port': port_name, - 'ret': ret['returncode']}) - raise exception.VolumeBackendAPIException(data=err_msg) - - ret = self._call_method( - 'DelHost', {"cHostAlias": initiator_name}) - if (ret['returncode'] not - in [zte_pub.ZTE_SUCCESS, - zte_pub.ZTE_ERR_HOSTNAME_NOT_EXIST]): - err_msg = (_('_map_delete_host: delete host failed. ' - 'host name:%(name)s with Return code: ' - '%(ret)s') % - {'name': initiator_name, - 'ret': ret['returncode']}) - raise exception.VolumeBackendAPIException(data=err_msg) diff --git a/cinder/volume/drivers/zte/zte_pub.py b/cinder/volume/drivers/zte/zte_pub.py deleted file mode 100644 index dc9747dfe..000000000 --- a/cinder/volume/drivers/zte/zte_pub.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 ZTE Corporation. All rights reserved -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Constants used in ZTE Volume driver. -""" - -ZTE_HOST_GROUP_NAME_PREFIX = 'HostGroup_' -ZTE_VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_' -ZTE_VOL_NAME_PREFIX_NEW = 'OpenCos_' -ZTE_CLONE_SUFFIX = 'c' -ZTE_GROUP_MAX_LUN = 255 -ZTE_SUCCESS = 0 -ZTE_VOLUME_IN_GROUP = 16917029 -ZTE_WEB_LOGIN_TYPE = 5 -ZTE_ERR_GROUP_NOT_EXIST = 16916997 -ZTE_ERR_GROUP_EXIST = 16916994 -ZTE_ERR_HOSTNAME_EXIST = 16918785 -ZTE_ERR_HOSTNAME_NOT_EXIST = 16918786 -ZTE_ERR_PORT_EXIST_OLD = 16918787 -ZTE_ERR_PORT_EXIST = 16918798 -ZTE_ERR_PORT_EXIST_INOTHER = 16918799 -ZTE_ERR_HOST_NOT_EXIST = 16917004 -ZTE_ERR_HOST_EXIST_OLD = 16917002 -ZTE_ERR_HOST_EXIST = 16917015 -ZTE_ERR_HOST_EXIST_INOTHER = 16917016 -ZTE_ERR_VOLUME_NOT_EXIST = 17108999 -ZTE_ERR_OBJECT_EXIST = 16917159 -ZTE_ERR_CLONE_OR_SNAP_NOT_EXIST = 16917040 -ZTE_ERR_VOL_EXISTS = 16917000 -ZTE_ERR_LUNDEV_NOT_EXIST = 16973826 -ZTE_ERR_VAS_OBJECT_NOT_EXIST = 17436681 -ZTE_VOLUME_TASK_NOT_FINISHED = 17436959 -ZTE_ERR_SNAP_EXIST_CLONE = 16917163 -ZTE_SESSION_EXIST = 1495 -ZTE_STATUS_OK = 1 - -ZTE_VOLUME = 0 -ZTE_SNAPSHOT = 1 - -ZTE_LUNID_NULL = -1 - -ZTE_VOLUME_ALLOCATION_RATIO = 20 -ZTE_VOLUME_SNAPSHOT_PERCENT = 50 -ZTE_DEFAULT_TIMEOUT = 720 - -ZTE_SNAPSHOT_MODE_READ_ONLY = 0 -ZTE_SNAPSHOT_MODE_RW = 1 diff --git a/cinder/volume/flows/__init__.py b/cinder/volume/flows/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/flows/api/__init__.py b/cinder/volume/flows/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py deleted file mode 100644 index 15777fbde..000000000 --- a/cinder/volume/flows/api/create_volume.py +++ /dev/null @@ -1,860 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import units -import six -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow.types import failure as ft - -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import policy -from cinder import quota -from cinder import quota_utils -from cinder import utils -from cinder.volume.flows import common -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -ACTION = 'volume:create' -CONF = cfg.CONF -GB = units.Gi -QUOTAS = quota.QUOTAS - -# Only in these 'sources' status can we attempt to create a volume from a -# source volume or a source snapshot, other status states we can not create -# from, 'error' being the common example. -SNAPSHOT_PROCEED_STATUS = (fields.SnapshotStatus.AVAILABLE,) -SRC_VOL_PROCEED_STATUS = ('available', 'in-use',) -REPLICA_PROCEED_STATUS = ('active', 'active-stopped',) -CG_PROCEED_STATUS = ('available', 'creating',) -CGSNAPSHOT_PROCEED_STATUS = ('available',) -GROUP_PROCEED_STATUS = ('available', 'creating',) - - -class ExtractVolumeRequestTask(flow_utils.CinderTask): - """Processes an api request values into a validated set of values. - - This tasks responsibility is to take in a set of inputs that will form - a potential volume request and validates those values against a set of - conditions and/or translates those values into a valid set and then returns - the validated/translated values for use by other tasks. - - Reversion strategy: N/A - """ - - # This task will produce the following outputs (said outputs can be - # saved to durable storage in the future so that the flow can be - # reconstructed elsewhere and continued). - default_provides = set(['availability_zone', 'size', 'snapshot_id', - 'source_volid', 'volume_type', 'volume_type_id', - 'encryption_key_id', 'source_replicaid', - 'consistencygroup_id', 'cgsnapshot_id', - 'qos_specs', 'group_id']) - - def __init__(self, image_service, availability_zones, **kwargs): - super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION], - **kwargs) - self.image_service = image_service - self.availability_zones = availability_zones - - @staticmethod - def _extract_resource(resource, allowed_vals, exc, resource_name, - props=('status',)): - """Extracts the resource id from the provided resource. - - This method validates the input resource dict and checks that the - properties which names are passed in `props` argument match - corresponding lists in `allowed` argument. In case of mismatch - exception of type exc is raised. - - :param resource: Resource dict. - :param allowed_vals: Tuple of allowed values lists. - :param exc: Exception type to raise. - :param resource_name: Name of resource - used to construct log message. - :param props: Tuple of resource properties names to validate. - :return: Id of a resource. - """ - - resource_id = None - if resource: - for prop, allowed_states in zip(props, allowed_vals): - if resource[prop] not in allowed_states: - msg = _("Originating %(res)s %(prop)s must be one of " - "'%(vals)s' values") - msg = msg % {'res': resource_name, - 'prop': prop, - 'vals': ', '.join(allowed_states)} - # TODO(harlowja): what happens if the status changes after - # this initial resource status check occurs??? Seems like - # someone could delete the resource after this check passes - # but before the volume is officially created? - raise exc(reason=msg) - resource_id = resource['id'] - return resource_id - - def _extract_consistencygroup(self, consistencygroup): - return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,), - exception.InvalidConsistencyGroup, - 'consistencygroup') - - def _extract_group(self, group): - return self._extract_resource(group, (GROUP_PROCEED_STATUS,), - exception.InvalidGroup, - 'group') - - def _extract_cgsnapshot(self, cgsnapshot): - return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,), - exception.InvalidCgSnapshot, - 'CGSNAPSHOT') - - def _extract_snapshot(self, snapshot): - return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,), - exception.InvalidSnapshot, 'snapshot') - - def _extract_source_volume(self, source_volume): - return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,), - exception.InvalidVolume, 'source volume') - - def _extract_source_replica(self, source_replica): - return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS, - REPLICA_PROCEED_STATUS), - exception.InvalidVolume, - 'replica', ('status', - 'replication_status')) - - @staticmethod - def _extract_size(size, source_volume, snapshot): - """Extracts and validates the volume size. - - This function will validate or when not provided fill in the provided - size variable from the source_volume or snapshot and then does - validation on the size that is found and returns said validated size. - """ - - def validate_snap_size(size): - if snapshot and size < snapshot.volume_size: - msg = _("Volume size '%(size)s'GB cannot be smaller than" - " the snapshot size %(snap_size)sGB. " - "They must be >= original snapshot size.") - msg = msg % {'size': size, - 'snap_size': snapshot.volume_size} - raise exception.InvalidInput(reason=msg) - - def validate_source_size(size): - if source_volume and size < source_volume['size']: - msg = _("Volume size '%(size)s'GB cannot be smaller than " - "original volume size %(source_size)sGB. " - "They must be >= original volume size.") - msg = msg % {'size': size, - 'source_size': source_volume['size']} - raise exception.InvalidInput(reason=msg) - - def validate_int(size): - if not isinstance(size, six.integer_types) or size <= 0: - msg = _("Volume size '%(size)s' must be an integer and" - " greater than 0") % {'size': size} - raise exception.InvalidInput(reason=msg) - - # Figure out which validation functions we should be applying - # on the size value that we extract. - validator_functors = [validate_int] - if source_volume: - validator_functors.append(validate_source_size) - elif snapshot: - validator_functors.append(validate_snap_size) - - # If the size is not provided then try to provide it. - if not size and source_volume: - size = source_volume['size'] - elif not size and snapshot: - size = snapshot.volume_size - - size = utils.as_int(size) - LOG.debug("Validating volume size '%(size)s' using %(functors)s", - {'size': size, - 'functors': ", ".join([common.make_pretty_name(func) - for func in validator_functors])}) - for func in validator_functors: - func(size) - return size - - def _get_image_metadata(self, context, image_id, size): - """Checks image existence and validates the image metadata. - - Returns: image metadata or None - """ - - # Check image existence - if image_id is None: - return None - - # NOTE(harlowja): this should raise an error if the image does not - # exist, this is expected as it signals that the image_id is missing. - image_meta = self.image_service.show(context, image_id) - - # check whether image is active - if image_meta['status'] != 'active': - msg = _('Image %(image_id)s is not active.')\ - % {'image_id': image_id} - raise exception.InvalidInput(reason=msg) - - # Check image size is not larger than volume size. - image_size = utils.as_int(image_meta['size'], quiet=False) - image_size_in_gb = (image_size + GB - 1) // GB - if image_size_in_gb > size: - msg = _('Size of specified image %(image_size)sGB' - ' is larger than volume size %(volume_size)sGB.') - msg = msg % {'image_size': image_size_in_gb, 'volume_size': size} - raise exception.InvalidInput(reason=msg) - - # Check image min_disk requirement is met for the particular volume - min_disk = image_meta.get('min_disk', 0) - if size < min_disk: - msg = _('Volume size %(volume_size)sGB cannot be smaller' - ' than the image minDisk size %(min_disk)sGB.') - msg = msg % {'volume_size': size, 'min_disk': min_disk} - raise exception.InvalidInput(reason=msg) - - return image_meta - - def _get_image_volume_type(self, context, image_id): - """Get cinder_img_volume_type property from the image metadata.""" - - # Check image existence - if image_id is None: - return None - - image_meta = self.image_service.show(context, image_id) - - # check whether image is active - if image_meta['status'] != 'active': - msg = (_('Image %(image_id)s is not active.') % - {'image_id': image_id}) - raise exception.InvalidInput(reason=msg) - - # Retrieve 'cinder_img_volume_type' property from glance image - # metadata. - image_volume_type = "cinder_img_volume_type" - properties = image_meta.get('properties') - if properties: - try: - img_vol_type = properties.get(image_volume_type) - if img_vol_type is None: - return None - volume_type = volume_types.get_volume_type_by_name( - context, - img_vol_type) - except exception.VolumeTypeNotFoundByName: - LOG.warning("Failed to retrieve volume_type from image " - "metadata. '%(img_vol_type)s' doesn't match " - "any volume types.", - {'img_vol_type': img_vol_type}) - return None - - LOG.debug("Retrieved volume_type from glance image metadata. " - "image_id: %(image_id)s, " - "image property: %(image_volume_type)s, " - "volume_type: %(volume_type)s.", - {'image_id': image_id, - 'image_volume_type': image_volume_type, - 'volume_type': volume_type}) - return volume_type - - def _extract_availability_zone(self, availability_zone, snapshot, - source_volume, group): - """Extracts and returns a validated availability zone. - - This function will extract the availability zone (if not provided) from - the snapshot or source_volume and then performs a set of validation - checks on the provided or extracted availability zone and then returns - the validated availability zone. - """ - - # If the volume will be created in a group, it should be placed in - # in same availability zone as the group. - if group: - try: - availability_zone = group['availability_zone'] - except (TypeError, KeyError): - pass - - # Try to extract the availability zone from the corresponding snapshot - # or source volume if either is valid so that we can be in the same - # availability zone as the source. - if availability_zone is None: - if snapshot: - try: - availability_zone = snapshot['volume']['availability_zone'] - except (TypeError, KeyError): - pass - if source_volume and availability_zone is None: - try: - availability_zone = source_volume['availability_zone'] - except (TypeError, KeyError): - pass - - if availability_zone is None: - if CONF.default_availability_zone: - availability_zone = CONF.default_availability_zone - else: - # For backwards compatibility use the storage_availability_zone - availability_zone = CONF.storage_availability_zone - - if availability_zone not in self.availability_zones: - if CONF.allow_availability_zone_fallback: - original_az = availability_zone - availability_zone = ( - CONF.default_availability_zone or - CONF.storage_availability_zone) - LOG.warning("Availability zone '%(s_az)s' " - "not found, falling back to " - "'%(s_fallback_az)s'.", - {'s_az': original_az, - 's_fallback_az': availability_zone}) - else: - msg = _("Availability zone '%(s_az)s' is invalid.") - msg = msg % {'s_az': availability_zone} - raise exception.InvalidInput(reason=msg) - - # If the configuration only allows cloning to the same availability - # zone then we need to enforce that. - if CONF.cloned_volume_same_az: - snap_az = None - try: - snap_az = snapshot['volume']['availability_zone'] - except (TypeError, KeyError): - pass - if snap_az and snap_az != availability_zone: - msg = _("Volume must be in the same " - "availability zone as the snapshot") - raise exception.InvalidInput(reason=msg) - source_vol_az = None - try: - source_vol_az = source_volume['availability_zone'] - except (TypeError, KeyError): - pass - if source_vol_az and source_vol_az != availability_zone: - msg = _("Volume must be in the same " - "availability zone as the source volume") - raise exception.InvalidInput(reason=msg) - - return availability_zone - - def _get_encryption_key_id(self, key_manager, context, volume_type_id, - snapshot, source_volume, - image_metadata): - encryption_key_id = None - if volume_types.is_encrypted(context, volume_type_id): - if snapshot is not None: # creating from snapshot - encryption_key_id = snapshot['encryption_key_id'] - elif source_volume is not None: # cloning volume - encryption_key_id = source_volume['encryption_key_id'] - elif image_metadata is not None: - # creating from image - encryption_key_id = image_metadata.get( - 'cinder_encryption_key_id') - - # NOTE(joel-coffman): References to the encryption key should *not* - # be copied because the key is deleted when the volume is deleted. - # Clone the existing key and associate a separate -- but - # identical -- key with each volume. - if encryption_key_id is not None: - encryption_key_id = key_manager.store( - context, key_manager.get(context, encryption_key_id)) - else: - volume_type_encryption = ( - volume_types.get_volume_type_encryption(context, - volume_type_id)) - cipher = volume_type_encryption.cipher - length = volume_type_encryption.key_size - - # NOTE(kaitlin-farr): dm-crypt expects the cipher in a - # hyphenated format (aes-xts-plain64). The algorithm needs - # to be parsed out to pass to the key manager (aes). - algorithm = cipher.split('-')[0] if cipher else None - encryption_key_id = key_manager.create_key(context, - algorithm=algorithm, - length=length) - - return encryption_key_id - - def _get_volume_type_id(self, volume_type, source_volume, snapshot): - if not volume_type and source_volume: - return source_volume['volume_type_id'] - elif snapshot is not None: - if volume_type: - current_volume_type_id = volume_type.get('id') - if current_volume_type_id != snapshot['volume_type_id']: - LOG.warning("Volume type will be changed to " - "be the same as the source volume.") - return snapshot['volume_type_id'] - else: - return volume_type.get('id') - - def execute(self, context, size, snapshot, image_id, source_volume, - availability_zone, volume_type, metadata, key_manager, - source_replica, consistencygroup, cgsnapshot, group): - - utils.check_exclusive_options(snapshot=snapshot, - imageRef=image_id, - source_volume=source_volume) - policy.enforce_action(context, ACTION) - - # TODO(harlowja): what guarantee is there that the snapshot or source - # volume will remain available after we do this initial verification?? - snapshot_id = self._extract_snapshot(snapshot) - source_volid = self._extract_source_volume(source_volume) - source_replicaid = self._extract_source_replica(source_replica) - size = self._extract_size(size, source_volume, snapshot) - consistencygroup_id = self._extract_consistencygroup(consistencygroup) - cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot) - group_id = self._extract_group(group) - - image_meta = self._get_image_metadata(context, - image_id, - size) - - availability_zone = self._extract_availability_zone(availability_zone, - snapshot, - source_volume, - group) - - # TODO(joel-coffman): This special handling of snapshots to ensure that - # their volume type matches the source volume is too convoluted. We - # should copy encryption metadata from the encrypted volume type to the - # volume upon creation and propagate that information to each snapshot. - # This strategy avoids any dependency upon the encrypted volume type. - def_vol_type = volume_types.get_default_volume_type() - if not volume_type and not source_volume and not snapshot: - image_volume_type = self._get_image_volume_type(context, image_id) - volume_type = (image_volume_type if image_volume_type else - def_vol_type) - - # When creating a clone of a replica (replication test), we can't - # use the volume type of the replica, therefore, we use the default. - # NOTE(ronenkat): this assumes the default type is not replicated. - if source_replicaid: - volume_type = def_vol_type - - volume_type_id = self._get_volume_type_id(volume_type, - source_volume, snapshot) - - encryption_key_id = self._get_encryption_key_id( - key_manager, - context, - volume_type_id, - snapshot, - source_volume, - image_meta) - - specs = {} - if volume_type_id: - qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id) - if qos_specs['qos_specs']: - specs = qos_specs['qos_specs'].get('specs', {}) - - # Determine default replication status - extra_specs = volume_types.get_volume_type_extra_specs( - volume_type_id) - if not specs: - # to make sure we don't pass empty dict - specs = None - extra_specs = None - - if vol_utils.is_replicated_spec(extra_specs): - replication_status = fields.ReplicationStatus.ENABLED - else: - replication_status = fields.ReplicationStatus.DISABLED - - return { - 'size': size, - 'snapshot_id': snapshot_id, - 'source_volid': source_volid, - 'availability_zone': availability_zone, - 'volume_type': volume_type, - 'volume_type_id': volume_type_id, - 'encryption_key_id': encryption_key_id, - 'qos_specs': specs, - 'source_replicaid': source_replicaid, - 'consistencygroup_id': consistencygroup_id, - 'cgsnapshot_id': cgsnapshot_id, - 'group_id': group_id, - 'replication_status': replication_status, - } - - -class EntryCreateTask(flow_utils.CinderTask): - """Creates an entry for the given volume creation in the database. - - Reversion strategy: remove the volume_id created from the database. - """ - - default_provides = set(['volume_properties', 'volume_id', 'volume']) - - def __init__(self): - requires = ['availability_zone', 'description', 'metadata', - 'name', 'reservations', 'size', 'snapshot_id', - 'source_volid', 'volume_type_id', 'encryption_key_id', - 'source_replicaid', 'consistencygroup_id', - 'cgsnapshot_id', 'multiattach', 'qos_specs', - 'group_id', ] - super(EntryCreateTask, self).__init__(addons=[ACTION], - requires=requires) - - def execute(self, context, optional_args, **kwargs): - """Creates a database entry for the given inputs and returns details. - - Accesses the database and creates a new entry for the to be created - volume using the given volume properties which are extracted from the - input kwargs (and associated requirements this task needs). These - requirements should be previously satisfied and validated by a - pre-cursor task. - """ - - src_volid = kwargs.get('source_volid') - src_vol = None - if src_volid is not None: - src_vol = objects.Volume.get_by_id(context, src_volid) - bootable = False - if src_vol is not None: - bootable = src_vol.bootable - - volume_properties = { - 'size': kwargs.pop('size'), - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'creating', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - 'encryption_key_id': kwargs.pop('encryption_key_id'), - # Rename these to the internal name. - 'display_description': kwargs.pop('description'), - 'display_name': kwargs.pop('name'), - 'multiattach': kwargs.pop('multiattach'), - 'bootable': bootable, - } - - # Merge in the other required arguments which should provide the rest - # of the volume property fields (if applicable). - volume_properties.update(kwargs) - volume = objects.Volume(context=context, **volume_properties) - volume.create() - - # FIXME(dulek): We're passing this volume_properties dict through RPC - # in request_spec. This shouldn't be needed, most data is replicated - # in both volume and other places. We should make Newton read data - # from just one correct place and leave just compatibility code. - # - # Right now - let's move it to versioned objects to be able to make - # non-backward compatible changes. - - volume_properties = objects.VolumeProperties(**volume_properties) - - return { - 'volume_id': volume['id'], - 'volume_properties': volume_properties, - # NOTE(harlowja): it appears like further usage of this volume - # result actually depend on it being a sqlalchemy object and not - # just a plain dictionary so that's why we are storing this here. - # - # In the future where this task results can be serialized and - # restored automatically for continued running we will need to - # resolve the serialization & recreation of this object since raw - # sqlalchemy objects can't be serialized. - 'volume': volume, - } - - def revert(self, context, result, optional_args, **kwargs): - if isinstance(result, ft.Failure): - # We never produced a result and therefore can't destroy anything. - return - - if optional_args['is_quota_committed']: - # If quota got committed we shouldn't rollback as the volume has - # already been created and the quota has already been absorbed. - return - - volume = result['volume'] - try: - volume.destroy() - except exception.CinderException: - # We are already reverting, therefore we should silence this - # exception since a second exception being active will be bad. - # - # NOTE(harlowja): Being unable to destroy a volume is pretty - # bad though!! - LOG.exception("Failed destroying volume entry %s", volume.id) - - -class QuotaReserveTask(flow_utils.CinderTask): - """Reserves a single volume with the given size & the given volume type. - - Reversion strategy: rollback the quota reservation. - - Warning Warning: if the process that is running this reserve and commit - process fails (or is killed before the quota is rolled back or committed - it does appear like the quota will never be rolled back). This makes - software upgrades hard (inflight operations will need to be stopped or - allowed to complete before the upgrade can occur). *In the future* when - taskflow has persistence built-in this should be easier to correct via - an automated or manual process. - """ - - default_provides = set(['reservations']) - - def __init__(self): - super(QuotaReserveTask, self).__init__(addons=[ACTION]) - - def execute(self, context, size, volume_type_id, optional_args): - try: - values = {'per_volume_gigabytes': size} - QUOTAS.limit_check(context, project_id=context.project_id, - **values) - except exception.OverQuota as e: - quotas = e.kwargs['quotas'] - raise exception.VolumeSizeExceedsLimit( - size=size, limit=quotas['per_volume_gigabytes']) - - try: - reserve_opts = {'volumes': 1, 'gigabytes': size} - QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id) - reservations = QUOTAS.reserve(context, **reserve_opts) - return { - 'reservations': reservations, - } - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota(context, e, - resource='volumes', - size=size) - - def revert(self, context, result, optional_args, **kwargs): - # We never produced a result and therefore can't destroy anything. - if isinstance(result, ft.Failure): - return - - if optional_args['is_quota_committed']: - # The reservations have already been committed and can not be - # rolled back at this point. - return - # We actually produced an output that we can revert so lets attempt - # to use said output to rollback the reservation. - reservations = result['reservations'] - try: - QUOTAS.rollback(context, reservations) - except exception.CinderException: - # We are already reverting, therefore we should silence this - # exception since a second exception being active will be bad. - LOG.exception("Failed rolling back quota for" - " %s reservations", reservations) - - -class QuotaCommitTask(flow_utils.CinderTask): - """Commits the reservation. - - Reversion strategy: N/A (the rollback will be handled by the task that did - the initial reservation (see: QuotaReserveTask). - - Warning Warning: if the process that is running this reserve and commit - process fails (or is killed before the quota is rolled back or committed - it does appear like the quota will never be rolled back). This makes - software upgrades hard (inflight operations will need to be stopped or - allowed to complete before the upgrade can occur). *In the future* when - taskflow has persistence built-in this should be easier to correct via - an automated or manual process. - """ - - def __init__(self): - super(QuotaCommitTask, self).__init__(addons=[ACTION]) - - def execute(self, context, reservations, volume_properties, - optional_args): - QUOTAS.commit(context, reservations) - # updating is_quota_committed attribute of optional_args dictionary - optional_args['is_quota_committed'] = True - return {'volume_properties': volume_properties} - - def revert(self, context, result, **kwargs): - # We never produced a result and therefore can't destroy anything. - if isinstance(result, ft.Failure): - return - - volume = result['volume_properties'] - try: - reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume['volume_type_id']) - reservations = QUOTAS.reserve(context, - project_id=context.project_id, - **reserve_opts) - if reservations: - QUOTAS.commit(context, reservations, - project_id=context.project_id) - except Exception: - LOG.exception("Failed to update quota for deleting " - "volume: %s", volume['id']) - - -class VolumeCastTask(flow_utils.CinderTask): - """Performs a volume create cast to the scheduler or to the volume manager. - - This will signal a transition of the api workflow to another child and/or - related workflow on another component. - - Reversion strategy: rollback source volume status and error out newly - created volume. - """ - - def __init__(self, scheduler_rpcapi, volume_rpcapi, db): - requires = ['image_id', 'scheduler_hints', 'snapshot_id', - 'source_volid', 'volume_id', 'volume', 'volume_type', - 'volume_properties', 'source_replicaid', - 'consistencygroup_id', 'cgsnapshot_id', 'group_id', ] - super(VolumeCastTask, self).__init__(addons=[ACTION], - requires=requires) - self.volume_rpcapi = volume_rpcapi - self.scheduler_rpcapi = scheduler_rpcapi - self.db = db - - def _cast_create_volume(self, context, request_spec, filter_properties): - source_volume_ref = None - source_volid = (request_spec['source_volid'] or - request_spec['source_replicaid']) - volume = request_spec['volume'] - snapshot_id = request_spec['snapshot_id'] - image_id = request_spec['image_id'] - cgroup_id = request_spec['consistencygroup_id'] - cgsnapshot_id = request_spec['cgsnapshot_id'] - group_id = request_spec['group_id'] - if cgroup_id: - # If cgroup_id existed, we should cast volume to the scheduler - # to choose a proper pool whose backend is same as CG's backend. - cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) - request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) - elif group_id: - # If group_id exists, we should cast volume to the scheduler - # to choose a proper pool whose backend is same as group's backend. - group = objects.Group.get_by_id(context, group_id) - # FIXME(wanghao): group_backend got added before request_spec was - # converted to versioned objects. We should make sure that this - # will be handled by object version translations once we add - # RequestSpec object. - request_spec['group_backend'] = vol_utils.extract_host(group.host) - elif snapshot_id and CONF.snapshot_same_host: - # NOTE(Rongze Zhu): A simple solution for bug 1008866. - # - # If snapshot_id is set and CONF.snapshot_same_host is True, make - # the call create volume directly to the volume host where the - # snapshot resides instead of passing it through the scheduler, so - # snapshot can be copied to the new volume. - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - source_volume_ref = snapshot.volume - elif source_volid: - source_volume_ref = objects.Volume.get_by_id(context, source_volid) - - if not source_volume_ref: - # Cast to the scheduler and let it handle whatever is needed - # to select the target host for this volume. - self.scheduler_rpcapi.create_volume( - context, - volume, - snapshot_id=snapshot_id, - image_id=image_id, - request_spec=request_spec, - filter_properties=filter_properties) - else: - # Bypass the scheduler and send the request directly to the volume - # manager. - volume.host = source_volume_ref.host - volume.cluster_name = source_volume_ref.cluster_name - volume.scheduled_at = timeutils.utcnow() - volume.save() - if not cgsnapshot_id: - self.volume_rpcapi.create_volume( - context, - volume, - request_spec, - filter_properties, - allow_reschedule=False) - - def execute(self, context, **kwargs): - scheduler_hints = kwargs.pop('scheduler_hints', None) - db_vt = kwargs.pop('volume_type') - kwargs['volume_type'] = None - if db_vt: - kwargs['volume_type'] = objects.VolumeType() - objects.VolumeType()._from_db_object(context, - kwargs['volume_type'], db_vt) - request_spec = objects.RequestSpec(**kwargs) - filter_properties = {} - if scheduler_hints: - filter_properties['scheduler_hints'] = scheduler_hints - self._cast_create_volume(context, request_spec, filter_properties) - - def revert(self, context, result, flow_failures, volume, **kwargs): - if isinstance(result, ft.Failure): - return - - # Restore the source volume status and set the volume to error status. - common.restore_source_status(context, self.db, kwargs) - common.error_out(volume) - LOG.error("Volume %s: create failed", volume.id) - exc_info = False - if all(flow_failures[-1].exc_info): - exc_info = flow_failures[-1].exc_info - LOG.error('Unexpected build error:', exc_info=exc_info) - - -def get_flow(db_api, image_service_api, availability_zones, create_what, - scheduler_rpcapi=None, volume_rpcapi=None): - """Constructs and returns the api entrypoint flow. - - This flow will do the following: - - 1. Inject keys & values for dependent tasks. - 2. Extracts and validates the input keys & values. - 3. Reserves the quota (reverts quota on any failures). - 4. Creates the database entry. - 5. Commits the quota. - 6. Casts to volume manager or scheduler for further processing. - """ - - flow_name = ACTION.replace(":", "_") + "_api" - api_flow = linear_flow.Flow(flow_name) - - api_flow.add(ExtractVolumeRequestTask( - image_service_api, - availability_zones, - rebind={'size': 'raw_size', - 'availability_zone': 'raw_availability_zone', - 'volume_type': 'raw_volume_type'})) - api_flow.add(QuotaReserveTask(), - EntryCreateTask(), - QuotaCommitTask()) - - if scheduler_rpcapi and volume_rpcapi: - # This will cast it out to either the scheduler or volume manager via - # the rpc apis provided. - api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api)) - - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(api_flow, store=create_what) diff --git a/cinder/volume/flows/api/manage_existing.py b/cinder/volume/flows/api/manage_existing.py deleted file mode 100644 index 9c7cb5437..000000000 --- a/cinder/volume/flows/api/manage_existing.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_log import log as logging -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow.types import failure as ft - -from cinder import exception -from cinder import flow_utils -from cinder import objects -from cinder.objects import fields -from cinder.volume.flows import common - -LOG = logging.getLogger(__name__) - -ACTION = 'volume:manage_existing' - - -class EntryCreateTask(flow_utils.CinderTask): - """Creates an entry for the given volume creation in the database. - - Reversion strategy: remove the volume_id created from the database. - """ - default_provides = set(['volume_properties', 'volume']) - - def __init__(self, db): - requires = ['availability_zone', 'description', 'metadata', - 'name', 'host', 'cluster_name', 'bootable', 'volume_type', - 'ref'] - super(EntryCreateTask, self).__init__(addons=[ACTION], - requires=requires) - self.db = db - - def execute(self, context, **kwargs): - """Creates a database entry for the given inputs and returns details. - - Accesses the database and creates a new entry for the to be created - volume using the given volume properties which are extracted from the - input kwargs. - """ - volume_type = kwargs.pop('volume_type') - volume_type_id = volume_type['id'] if volume_type else None - - volume_properties = { - 'size': 0, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': 'managing', - 'attach_status': fields.VolumeAttachStatus.DETACHED, - # Rename these to the internal name. - 'display_description': kwargs.pop('description'), - 'display_name': kwargs.pop('name'), - 'host': kwargs.pop('host'), - 'cluster_name': kwargs.pop('cluster_name'), - 'availability_zone': kwargs.pop('availability_zone'), - 'volume_type_id': volume_type_id, - 'metadata': kwargs.pop('metadata') or {}, - 'bootable': kwargs.pop('bootable'), - } - - volume = objects.Volume(context=context, **volume_properties) - volume.create() - - return { - 'volume_properties': volume_properties, - 'volume': volume, - } - - def revert(self, context, result, optional_args=None, **kwargs): - # We never produced a result and therefore can't destroy anything. - if isinstance(result, ft.Failure): - return - - vol_id = result['volume_id'] - try: - self.db.volume_destroy(context.elevated(), vol_id) - except exception.CinderException: - LOG.exception("Failed destroying volume entry: %s.", vol_id) - - -class ManageCastTask(flow_utils.CinderTask): - """Performs a volume manage cast to the scheduler and to the volume manager. - - This which will signal a transition of the api workflow to another child - and/or related workflow. - """ - - def __init__(self, scheduler_rpcapi, db): - requires = ['volume', 'volume_properties', 'volume_type', 'ref'] - super(ManageCastTask, self).__init__(addons=[ACTION], - requires=requires) - self.scheduler_rpcapi = scheduler_rpcapi - self.db = db - - def execute(self, context, volume, **kwargs): - request_spec = kwargs.copy() - request_spec['volume_id'] = volume.id - - # Call the scheduler to ensure that the host exists and that it can - # accept the volume - self.scheduler_rpcapi.manage_existing(context, volume, - request_spec=request_spec) - - def revert(self, context, result, flow_failures, volume, **kwargs): - # Restore the source volume status and set the volume to error status. - common.error_out(volume, status='error_managing') - LOG.error("Volume %s: manage failed.", volume.id) - exc_info = False - if all(flow_failures[-1].exc_info): - exc_info = flow_failures[-1].exc_info - LOG.error('Unexpected build error:', exc_info=exc_info) - - -def get_flow(scheduler_rpcapi, db_api, create_what): - """Constructs and returns the api entrypoint flow. - - This flow will do the following: - - 1. Inject keys & values for dependent tasks. - 2. Extracts and validates the input keys & values. - 3. Creates the database entry. - 4. Casts to volume manager and scheduler for further processing. - """ - - flow_name = ACTION.replace(":", "_") + "_api" - api_flow = linear_flow.Flow(flow_name) - - # This will cast it out to either the scheduler or volume manager via - # the rpc apis provided. - api_flow.add(EntryCreateTask(db_api), - ManageCastTask(scheduler_rpcapi, db_api)) - - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(api_flow, store=create_what) diff --git a/cinder/volume/flows/common.py b/cinder/volume/flows/common.py deleted file mode 100644 index d9fee1755..000000000 --- a/cinder/volume/flows/common.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. -# Copyright (c) 2013 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import six - -from cinder import exception - -LOG = logging.getLogger(__name__) - -# When a volume errors out we have the ability to save a piece of the exception -# that caused said failure, but we don't want to save the whole message since -# that could be very large, just save up to this number of characters. -REASON_LENGTH = 128 - - -def make_pretty_name(method): - """Makes a pretty name for a function/method.""" - meth_pieces = [method.__name__] - # If its an instance method attempt to tack on the class name - if hasattr(method, '__self__') and method.__self__ is not None: - try: - meth_pieces.insert(0, method.__self__.__class__.__name__) - except AttributeError: - pass - return ".".join(meth_pieces) - - -def restore_source_status(context, db, volume_spec): - # NOTE(harlowja): Only if the type of the volume that was being created is - # the source volume type should we try to reset the source volume status - # back to its original value. - if not volume_spec or volume_spec.get('type') != 'source_vol': - return - source_volid = volume_spec['source_volid'] - source_status = volume_spec['source_volstatus'] - try: - LOG.debug('Restoring source %(source_volid)s status to %(status)s', - {'status': source_status, 'source_volid': source_volid}) - db.volume_update(context, source_volid, {'status': source_status}) - except exception.CinderException: - # NOTE(harlowja): Don't let this cause further exceptions since this is - # a non-critical failure. - LOG.exception("Failed setting source " - "volume %(source_volid)s back to" - " its initial %(source_status)s status", - {'source_status': source_status, - 'source_volid': source_volid}) - - -def _clean_reason(reason): - if reason is None: - return 'Unknown reason' - reason = six.text_type(reason) - if len(reason) <= REASON_LENGTH: - return reason - else: - return reason[0:REASON_LENGTH] + '...' - - -def error_out(resource, reason=None, status='error'): - """Sets status to error for any persistent OVO.""" - reason = _clean_reason(reason) - try: - LOG.debug('Setting %(object_type)s %(object_id)s to error due to: ' - '%(reason)s', {'object_type': resource.obj_name(), - 'object_id': resource.id, - 'reason': reason}) - resource.status = status - resource.save() - except Exception: - # Don't let this cause further exceptions. - LOG.exception("Failed setting %(object_type)s %(object_id)s to " - " %(status)s status.", - {'object_type': resource.obj_name(), - 'object_id': resource.id, - 'status': status}) diff --git a/cinder/volume/flows/manager/__init__.py b/cinder/volume/flows/manager/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py deleted file mode 100644 index 3cc8de31a..000000000 --- a/cinder/volume/flows/manager/create_volume.py +++ /dev/null @@ -1,1082 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import traceback - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import timeutils -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow.types import failure as ft - -from cinder import context as cinder_context -from cinder import coordination -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder.image import glance -from cinder.image import image_utils -from cinder.message import api as message_api -from cinder.message import message_field -from cinder import objects -from cinder.objects import consistencygroup -from cinder import utils -from cinder.volume.flows import common -from cinder.volume import utils as volume_utils - -LOG = logging.getLogger(__name__) - -ACTION = 'volume:create' -CONF = cfg.CONF - -# These attributes we will attempt to save for the volume if they exist -# in the source image metadata. -IMAGE_ATTRIBUTES = ( - 'checksum', - 'container_format', - 'disk_format', - 'min_disk', - 'min_ram', - 'size', -) - - -class OnFailureRescheduleTask(flow_utils.CinderTask): - """Triggers a rescheduling request to be sent when reverting occurs. - - If rescheduling doesn't occur this task errors out the volume. - - Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets - sent to the scheduler rpc api to allow for an attempt X of Y for scheduling - this volume elsewhere. - """ - - def __init__(self, reschedule_context, db, driver, scheduler_rpcapi, - do_reschedule): - requires = ['filter_properties', 'request_spec', 'volume', - 'context'] - super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], - requires=requires) - self.do_reschedule = do_reschedule - self.scheduler_rpcapi = scheduler_rpcapi - self.db = db - self.driver = driver - self.reschedule_context = reschedule_context - # These exception types will trigger the volume to be set into error - # status rather than being rescheduled. - self.no_reschedule_types = [ - # Image copying happens after volume creation so rescheduling due - # to copy failure will mean the same volume will be created at - # another place when it still exists locally. - exception.ImageCopyFailure, - # Metadata updates happen after the volume has been created so if - # they fail, rescheduling will likely attempt to create the volume - # on another machine when it still exists locally. - exception.MetadataCopyFailure, - exception.MetadataCreateFailure, - exception.MetadataUpdateFailure, - # The volume/snapshot has been removed from the database, that - # can not be fixed by rescheduling. - exception.VolumeNotFound, - exception.SnapshotNotFound, - exception.VolumeTypeNotFound, - exception.ImageUnacceptable, - exception.ImageTooBig, - ] - - def execute(self, **kwargs): - pass - - def _pre_reschedule(self, volume): - """Actions that happen before the rescheduling attempt occur here.""" - - try: - # Update volume's timestamp and host. - # - # NOTE(harlowja): this is awkward to be done here, shouldn't - # this happen at the scheduler itself and not before it gets - # sent to the scheduler? (since what happens if it never gets - # there??). It's almost like we need a status of 'on-the-way-to - # scheduler' in the future. - # We don't need to update the volume's status to creating, since - # we haven't changed it to error. - update = { - 'scheduled_at': timeutils.utcnow(), - 'host': None, - } - LOG.debug("Updating volume %(volume_id)s with %(update)s.", - {'update': update, 'volume_id': volume.id}) - volume.update(update) - volume.save() - except exception.CinderException: - # Don't let updating the state cause the rescheduling to fail. - LOG.exception("Volume %s: update volume state failed.", - volume.id) - - def _reschedule(self, context, cause, request_spec, filter_properties, - volume): - """Actions that happen during the rescheduling attempt occur here.""" - - create_volume = self.scheduler_rpcapi.create_volume - if not filter_properties: - filter_properties = {} - if 'retry' not in filter_properties: - filter_properties['retry'] = {} - - retry_info = filter_properties['retry'] - num_attempts = retry_info.get('num_attempts', 0) - request_spec['volume_id'] = volume.id - - LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s " - "attempt %(num)d due to %(reason)s", - {'volume_id': volume.id, - 'method': common.make_pretty_name(create_volume), - 'num': num_attempts, - 'reason': cause.exception_str}) - - if all(cause.exc_info): - # Stringify to avoid circular ref problem in json serialization - retry_info['exc'] = traceback.format_exception(*cause.exc_info) - - return create_volume(context, volume, request_spec=request_spec, - filter_properties=filter_properties) - - def _post_reschedule(self, volume): - """Actions that happen after the rescheduling attempt occur here.""" - - LOG.debug("Volume %s: re-scheduled", volume.id) - - # NOTE(dulek): Here we should be sure that rescheduling occurred and - # host field will be erased. Just in case volume was already created at - # the backend, we attempt to delete it. - try: - self.driver.delete_volume(volume) - except Exception: - # Most likely the volume weren't created at the backend. We can - # safely ignore this. - pass - - def revert(self, context, result, flow_failures, volume, **kwargs): - # NOTE(dulek): Revert is occurring and manager need to know if - # rescheduling happened. We're returning boolean flag that will - # indicate that. It which will be available in flow engine store - # through get_revert_result method. - - # If do not want to be rescheduled, just set the volume's status to - # error and return. - if not self.do_reschedule: - common.error_out(volume) - LOG.error("Volume %s: create failed", volume.id) - return False - - # Check if we have a cause which can tell us not to reschedule and - # set the volume's status to error. - for failure in flow_failures.values(): - if failure.check(*self.no_reschedule_types): - common.error_out(volume) - LOG.error("Volume %s: create failed", volume.id) - return False - - # Use a different context when rescheduling. - if self.reschedule_context: - cause = list(flow_failures.values())[0] - context = self.reschedule_context - try: - self._pre_reschedule(volume) - self._reschedule(context, cause, volume=volume, **kwargs) - self._post_reschedule(volume) - return True - except exception.CinderException: - LOG.exception("Volume %s: rescheduling failed", volume.id) - - return False - - -class ExtractVolumeRefTask(flow_utils.CinderTask): - """Extracts volume reference for given volume id.""" - - default_provides = 'refreshed' - - def __init__(self, db, host, set_error=True): - super(ExtractVolumeRefTask, self).__init__(addons=[ACTION]) - self.db = db - self.host = host - self.set_error = set_error - - def execute(self, context, volume): - # NOTE(harlowja): this will fetch the volume from the database, if - # the volume has been deleted before we got here then this should fail. - # - # In the future we might want to have a lock on the volume_id so that - # the volume can not be deleted while its still being created? - volume.refresh() - return volume - - def revert(self, context, volume, result, **kwargs): - if isinstance(result, ft.Failure) or not self.set_error: - return - - reason = _('Volume create failed while extracting volume ref.') - common.error_out(volume, reason) - LOG.error("Volume %s: create failed", volume.id) - - -class ExtractVolumeSpecTask(flow_utils.CinderTask): - """Extracts a spec of a volume to be created into a common structure. - - This task extracts and organizes the input requirements into a common - and easier to analyze structure for later tasks to use. It will also - attach the underlying database volume reference which can be used by - other tasks to reference for further details about the volume to be. - - Reversion strategy: N/A - """ - - default_provides = 'volume_spec' - - def __init__(self, db): - requires = ['volume', 'request_spec'] - super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION], - requires=requires) - self.db = db - - def execute(self, context, volume, request_spec): - get_remote_image_service = glance.get_remote_image_service - - volume_name = volume.name - volume_size = utils.as_int(volume.size, quiet=False) - - # Create a dictionary that will represent the volume to be so that - # later tasks can easily switch between the different types and create - # the volume according to the volume types specifications (which are - # represented in this dictionary). - specs = { - 'status': volume.status, - 'type': 'raw', # This will have the type of the volume to be - # created, which should be one of [raw, snap, - # source_vol, image] - 'volume_id': volume.id, - 'volume_name': volume_name, - 'volume_size': volume_size, - } - - if volume.snapshot_id: - # We are making a snapshot based volume instead of a raw volume. - specs.update({ - 'type': 'snap', - 'snapshot_id': volume.snapshot_id, - }) - elif volume.source_volid: - # We are making a source based volume instead of a raw volume. - # - # NOTE(harlowja): This will likely fail if the source volume - # disappeared by the time this call occurred. - source_volid = volume.source_volid - source_volume_ref = objects.Volume.get_by_id(context, - source_volid) - specs.update({ - 'source_volid': source_volid, - # This is captured incase we have to revert and we want to set - # back the source volume status to its original status. This - # may or may not be sketchy to do?? - 'source_volstatus': source_volume_ref.status, - 'type': 'source_vol', - }) - elif request_spec.get('source_replicaid'): - # We are making a clone based on the replica. - # - # NOTE(harlowja): This will likely fail if the replica - # disappeared by the time this call occurred. - source_volid = request_spec['source_replicaid'] - source_volume_ref = objects.Volume.get_by_id(context, - source_volid) - specs.update({ - 'source_replicaid': source_volid, - 'source_replicastatus': source_volume_ref.status, - 'type': 'source_replica', - }) - elif request_spec.get('image_id'): - # We are making an image based volume instead of a raw volume. - image_href = request_spec['image_id'] - image_service, image_id = get_remote_image_service(context, - image_href) - specs.update({ - 'type': 'image', - 'image_id': image_id, - 'image_location': image_service.get_location(context, - image_id), - 'image_meta': image_service.show(context, image_id), - # Instead of refetching the image service later just save it. - # - # NOTE(harlowja): if we have to later recover this tasks output - # on another 'node' that this object won't be able to be - # serialized, so we will have to recreate this object on - # demand in the future. - 'image_service': image_service, - }) - - return specs - - def revert(self, context, result, **kwargs): - if isinstance(result, ft.Failure): - return - volume_spec = result.get('volume_spec') - # Restore the source volume status and set the volume to error status. - common.restore_source_status(context, self.db, volume_spec) - - -class NotifyVolumeActionTask(flow_utils.CinderTask): - """Performs a notification about the given volume when called. - - Reversion strategy: N/A - """ - - def __init__(self, db, event_suffix): - super(NotifyVolumeActionTask, self).__init__(addons=[ACTION, - event_suffix]) - self.db = db - self.event_suffix = event_suffix - - def execute(self, context, volume): - try: - volume_utils.notify_about_volume_usage(context, volume, - self.event_suffix, - host=volume.host) - except exception.CinderException: - # If notification sending of volume database entry reading fails - # then we shouldn't error out the whole workflow since this is - # not always information that must be sent for volumes to operate - LOG.exception("Failed notifying about the volume" - " action %(event)s for volume %(volume_id)s", - {'event': self.event_suffix, 'volume_id': volume.id}) - - -class CreateVolumeFromSpecTask(flow_utils.CinderTask): - """Creates a volume from a provided specification. - - Reversion strategy: N/A - """ - - def __init__(self, manager, db, driver, image_volume_cache=None): - super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION]) - self.manager = manager - self.db = db - self.driver = driver - self.image_volume_cache = image_volume_cache - self.message = message_api.API() - - def _handle_bootable_volume_glance_meta(self, context, volume, - **kwargs): - """Enable bootable flag and properly handle glance metadata. - - Caller should provide one and only one of snapshot_id,source_volid - and image_id. If an image_id specified, an image_meta should also be - provided, otherwise will be treated as an empty dictionary. - """ - - log_template = _("Copying metadata from %(src_type)s %(src_id)s to " - "%(vol_id)s.") - exception_template = _("Failed updating volume %(vol_id)s metadata" - " using the provided %(src_type)s" - " %(src_id)s metadata") - src_type = None - src_id = None - self._enable_bootable_flag(context, volume) - try: - if kwargs.get('snapshot_id'): - src_type = 'snapshot' - src_id = kwargs['snapshot_id'] - snapshot_id = src_id - LOG.debug(log_template, {'src_type': src_type, - 'src_id': src_id, - 'vol_id': volume.id}) - self.db.volume_glance_metadata_copy_to_volume( - context, volume.id, snapshot_id) - elif kwargs.get('source_volid'): - src_type = 'source volume' - src_id = kwargs['source_volid'] - source_volid = src_id - LOG.debug(log_template, {'src_type': src_type, - 'src_id': src_id, - 'vol_id': volume.id}) - self.db.volume_glance_metadata_copy_from_volume_to_volume( - context, - source_volid, - volume.id) - elif kwargs.get('source_replicaid'): - src_type = 'source replica' - src_id = kwargs['source_replicaid'] - source_replicaid = src_id - LOG.debug(log_template, {'src_type': src_type, - 'src_id': src_id, - 'vol_id': volume.id}) - self.db.volume_glance_metadata_copy_from_volume_to_volume( - context, - source_replicaid, - volume.id) - elif kwargs.get('image_id'): - src_type = 'image' - src_id = kwargs['image_id'] - image_id = src_id - image_meta = kwargs.get('image_meta', {}) - LOG.debug(log_template, {'src_type': src_type, - 'src_id': src_id, - 'vol_id': volume.id}) - self._capture_volume_image_metadata(context, volume.id, - image_id, image_meta) - except exception.GlanceMetadataNotFound: - # If volume is not created from image, No glance metadata - # would be available for that volume in - # volume glance metadata table - pass - except exception.CinderException as ex: - LOG.exception(exception_template, {'src_type': src_type, - 'src_id': src_id, - 'vol_id': volume.id}) - raise exception.MetadataCopyFailure(reason=ex) - - def _create_from_snapshot(self, context, volume, snapshot_id, - **kwargs): - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot(volume, - snapshot) - self._cleanup_cg_in_volume(volume) - # NOTE(harlowja): Subtasks would be useful here since after this - # point the volume has already been created and further failures - # will not destroy the volume (although they could in the future). - make_bootable = False - try: - originating_vref = objects.Volume.get_by_id(context, - snapshot.volume_id) - make_bootable = originating_vref.bootable - except exception.CinderException as ex: - LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable" - " flag using the provided glance snapshot " - "%(snapshot_ref_id)s volume reference", - {'snapshot_id': snapshot_id, - 'snapshot_ref_id': snapshot.volume_id}) - raise exception.MetadataUpdateFailure(reason=ex) - if make_bootable: - self._handle_bootable_volume_glance_meta(context, volume, - snapshot_id=snapshot_id) - return model_update - - def _enable_bootable_flag(self, context, volume): - try: - LOG.debug('Marking volume %s as bootable.', volume.id) - volume.bootable = True - volume.save() - except exception.CinderException as ex: - LOG.exception("Failed updating volume %(volume_id)s bootable " - "flag to true", {'volume_id': volume.id}) - raise exception.MetadataUpdateFailure(reason=ex) - - def _create_from_source_volume(self, context, volume, source_volid, - **kwargs): - # NOTE(harlowja): if the source volume has disappeared this will be our - # detection of that since this database call should fail. - # - # NOTE(harlowja): likely this is not the best place for this to happen - # and we should have proper locks on the source volume while actions - # that use the source volume are underway. - srcvol_ref = objects.Volume.get_by_id(context, source_volid) - model_update = self.driver.create_cloned_volume(volume, srcvol_ref) - self._cleanup_cg_in_volume(volume) - # NOTE(harlowja): Subtasks would be useful here since after this - # point the volume has already been created and further failures - # will not destroy the volume (although they could in the future). - if srcvol_ref.bootable: - self._handle_bootable_volume_glance_meta( - context, volume, source_volid=srcvol_ref.id) - return model_update - - def _create_from_source_replica(self, context, volume, source_replicaid, - **kwargs): - # NOTE(harlowja): if the source volume has disappeared this will be our - # detection of that since this database call should fail. - # - # NOTE(harlowja): likely this is not the best place for this to happen - # and we should have proper locks on the source volume while actions - # that use the source volume are underway. - srcvol_ref = objects.Volume.get_by_id(context, source_replicaid) - model_update = self.driver.create_replica_test_volume(volume, - srcvol_ref) - self._cleanup_cg_in_volume(volume) - # NOTE(harlowja): Subtasks would be useful here since after this - # point the volume has already been created and further failures - # will not destroy the volume (although they could in the future). - if srcvol_ref.bootable: - self._handle_bootable_volume_glance_meta( - context, - volume, - source_replicaid=source_replicaid) - return model_update - - def _copy_image_to_volume(self, context, volume, - image_meta, image_location, image_service): - - image_id = image_meta['id'] - """Downloads Glance image to the specified volume.""" - LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" - " to volume %(volume_id)s.", - {'image_id': image_id, 'volume_id': volume.id, - 'image_location': image_location}) - try: - image_properties = image_meta.get('properties', {}) - image_encryption_key = image_properties.get( - 'cinder_encryption_key_id') - - if volume.encryption_key_id and image_encryption_key: - # If the image provided an encryption key, we have - # already cloned it to the volume's key in - # _get_encryption_key_id, so we can do a direct copy. - self.driver.copy_image_to_volume( - context, volume, image_service, image_id) - elif volume.encryption_key_id: - # Creating an encrypted volume from a normal, unencrypted, - # image. - self.driver.copy_image_to_encrypted_volume( - context, volume, image_service, image_id) - else: - self.driver.copy_image_to_volume( - context, volume, image_service, image_id) - except processutils.ProcessExecutionError as ex: - LOG.exception("Failed to copy image %(image_id)s to volume: " - "%(volume_id)s", - {'volume_id': volume.id, 'image_id': image_id}) - raise exception.ImageCopyFailure(reason=ex.stderr) - except exception.ImageUnacceptable as ex: - LOG.exception("Failed to copy image to volume: %(volume_id)s", - {'volume_id': volume.id}) - raise exception.ImageUnacceptable(ex) - except exception.ImageTooBig as ex: - LOG.exception("Failed to copy image %(image_id)s to volume: " - "%(volume_id)s", - {'volume_id': volume.id, 'image_id': image_id}) - excutils.save_and_reraise_exception() - except Exception as ex: - LOG.exception("Failed to copy image %(image_id)s to " - "volume: %(volume_id)s", - {'volume_id': volume.id, 'image_id': image_id}) - if not isinstance(ex, exception.ImageCopyFailure): - raise exception.ImageCopyFailure(reason=ex) - else: - raise - - LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" - " to volume %(volume_id)s successfully.", - {'image_id': image_id, 'volume_id': volume.id, - 'image_location': image_location}) - - def _capture_volume_image_metadata(self, context, volume_id, - image_id, image_meta): - - # Save some base attributes into the volume metadata - base_metadata = { - 'image_id': image_id, - } - name = image_meta.get('name', None) - if name: - base_metadata['image_name'] = name - - # Save some more attributes into the volume metadata from the image - # metadata - for key in IMAGE_ATTRIBUTES: - if key not in image_meta: - continue - value = image_meta.get(key, None) - if value is not None: - base_metadata[key] = value - - # Save all the image metadata properties into the volume metadata - property_metadata = {} - image_properties = image_meta.get('properties', {}) - for (key, value) in image_properties.items(): - if value is not None: - property_metadata[key] = value - - volume_metadata = dict(property_metadata) - volume_metadata.update(base_metadata) - LOG.debug("Creating volume glance metadata for volume %(volume_id)s" - " backed by image %(image_id)s with: %(vol_metadata)s.", - {'volume_id': volume_id, 'image_id': image_id, - 'vol_metadata': volume_metadata}) - self.db.volume_glance_metadata_bulk_create(context, volume_id, - volume_metadata) - - def _clone_image_volume(self, context, volume, image_location, image_meta): - """Create a volume efficiently from an existing image. - - Returns a dict of volume properties eg. provider_location, - boolean indicating whether cloning occurred - """ - # NOTE (lixiaoy1): currently can't create volume from source vol with - # different encryptions, so just return. - if not image_location or volume.encryption_key_id: - return None, False - - if (image_meta.get('container_format') != 'bare' or - image_meta.get('disk_format') != 'raw'): - LOG.info("Requested image %(id)s is not in raw format.", - {'id': image_meta.get('id')}) - return None, False - - image_volume = None - direct_url, locations = image_location - urls = set([direct_url] + [loc.get('url') for loc in locations or []]) - image_volume_ids = [url[9:] for url in urls - if url and url.startswith('cinder://')] - image_volumes = self.db.volume_get_all_by_host( - context, volume['host'], filters={'id': image_volume_ids}) - - for image_volume in image_volumes: - # For the case image volume is stored in the service tenant, - # image_owner volume metadata should also be checked. - image_owner = None - volume_metadata = image_volume.get('volume_metadata') or {} - for m in volume_metadata: - if m['key'] == 'image_owner': - image_owner = m['value'] - if (image_meta['owner'] != volume['project_id'] and - image_meta['owner'] != image_owner): - LOG.info("Skipping image volume %(id)s because " - "it is not accessible by current Tenant.", - {'id': image_volume.id}) - continue - - LOG.info("Will clone a volume from the image volume " - "%(id)s.", {'id': image_volume.id}) - break - else: - LOG.debug("No accessible image volume for image %(id)s found.", - {'id': image_meta['id']}) - return None, False - - try: - ret = self.driver.create_cloned_volume(volume, image_volume) - self._cleanup_cg_in_volume(volume) - return ret, True - except (NotImplementedError, exception.CinderException): - LOG.exception('Failed to clone image volume %(id)s.', - {'id': image_volume['id']}) - return None, False - - def _create_from_image_download(self, context, volume, image_location, - image_meta, image_service): - # TODO(harlowja): what needs to be rolled back in the clone if this - # volume create fails?? Likely this should be a subflow or broken - # out task in the future. That will bring up the question of how - # do we make said subflow/task which is only triggered in the - # clone image 'path' resumable and revertable in the correct - # manner. - model_update = self.driver.create_volume(volume) or {} - self._cleanup_cg_in_volume(volume) - model_update['status'] = 'downloading' - try: - volume.update(model_update) - volume.save() - except exception.CinderException: - LOG.exception("Failed updating volume %(volume_id)s with " - "%(updates)s", - {'volume_id': volume.id, - 'updates': model_update}) - try: - self._copy_image_to_volume(context, volume, image_meta, - image_location, image_service) - except exception.ImageTooBig: - with excutils.save_and_reraise_exception(): - LOG.exception("Failed to copy image to volume " - "%(volume_id)s due to insufficient space", - {'volume_id': volume.id}) - return model_update - - def _create_from_image_cache(self, context, internal_context, volume, - image_id, image_meta): - """Attempt to create the volume using the image cache. - - Best case this will simply clone the existing volume in the cache. - Worst case the image is out of date and will be evicted. In that case - a clone will not be created and the image must be downloaded again. - """ - LOG.debug('Attempting to retrieve cache entry for image = ' - '%(image_id)s on host %(host)s.', - {'image_id': image_id, 'host': volume.host}) - # Currently can't create volume from source vol with different - # encryptions, so just return - if volume.encryption_key_id: - return None, False - - try: - cache_entry = self.image_volume_cache.get_entry(internal_context, - volume, - image_id, - image_meta) - if cache_entry: - LOG.debug('Creating from source image-volume %(volume_id)s', - {'volume_id': cache_entry['volume_id']}) - model_update = self._create_from_source_volume( - context, - volume, - cache_entry['volume_id'] - ) - return model_update, True - except NotImplementedError: - LOG.warning('Backend does not support creating image-volume ' - 'clone. Image will be downloaded from Glance.') - except exception.CinderException as e: - LOG.warning('Failed to create volume from image-volume cache, ' - 'image will be downloaded from Glance. Error: ' - '%(exception)s', {'exception': e}) - return None, False - - @coordination.synchronized('{image_id}') - def _create_from_image_cache_or_download(self, context, volume, - image_location, image_id, - image_meta, image_service): - # Try and use the image cache. - should_create_cache_entry = False - cloned = False - model_update = None - if self.image_volume_cache: - internal_context = cinder_context.get_internal_tenant_context() - if not internal_context: - LOG.info('Unable to get Cinder internal context, will ' - 'not use image-volume cache.') - else: - model_update, cloned = self._create_from_image_cache( - context, - internal_context, - volume, - image_id, - image_meta - ) - # Don't cache encrypted volume. - if not cloned and not volume.encryption_key_id: - should_create_cache_entry = True - - # Fall back to default behavior of creating volume, - # download the image data and copy it into the volume. - original_size = volume.size - backend_name = volume_utils.extract_host(volume.service_topic_queue) - try: - if not cloned: - try: - with image_utils.TemporaryImages.fetch( - image_service, context, image_id, - backend_name) as tmp_image: - # Try to create the volume as the minimal size, - # then we can extend once the image has been - # downloaded. - data = image_utils.qemu_img_info(tmp_image) - - virtual_size = image_utils.check_virtual_size( - data.virtual_size, volume.size, image_id) - - if should_create_cache_entry: - if virtual_size and virtual_size != original_size: - volume.size = virtual_size - volume.save() - model_update = self._create_from_image_download( - context, - volume, - image_location, - image_meta, - image_service - ) - except exception.ImageTooBig as e: - with excutils.save_and_reraise_exception(): - self.message.create( - context, - message_field.Action.COPY_IMAGE_TO_VOLUME, - resource_uuid=volume.id, - detail= - message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, - exception=e) - - if should_create_cache_entry: - # Update the newly created volume db entry before we clone it - # for the image-volume creation. - if model_update: - volume.update(model_update) - volume.save() - self.manager._create_image_cache_volume_entry(internal_context, - volume, - image_id, - image_meta) - finally: - # If we created the volume as the minimal size, extend it back to - # what was originally requested. If an exception has occurred or - # extending it back failed, we still need to put this back before - # letting it be raised further up the stack. - if volume.size != original_size: - try: - self.driver.extend_volume(volume, original_size) - finally: - volume.size = original_size - volume.save() - - return model_update - - def _create_from_image(self, context, volume, - image_location, image_id, image_meta, - image_service, **kwargs): - LOG.debug("Cloning %(volume_id)s from image %(image_id)s " - " at location %(image_location)s.", - {'volume_id': volume.id, - 'image_location': image_location, 'image_id': image_id}) - - # NOTE(e0ne): check for free space in image_conversion_dir before - # image downloading. - if (CONF.image_conversion_dir and not - os.path.exists(CONF.image_conversion_dir)): - os.makedirs(CONF.image_conversion_dir) - try: - image_utils.check_available_space(CONF.image_conversion_dir, - image_meta['size'], image_id) - except exception.ImageTooBig as err: - with excutils.save_and_reraise_exception(): - self.message.create( - context, - message_field.Action.COPY_IMAGE_TO_VOLUME, - resource_uuid=volume.id, - detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, - exception=err) - - virtual_size = image_meta.get('virtual_size') - if virtual_size: - virtual_size = image_utils.check_virtual_size(virtual_size, - volume.size, - image_id) - - # Create the volume from an image. - # - # First see if the driver can clone the image directly. - # - # NOTE (singn): two params need to be returned - # dict containing provider_location for cloned volume - # and clone status. - # NOTE (lixiaoy1): Currently all images are raw data, we can't - # use clone_image to copy data if new volume is encrypted. - volume_is_encrypted = volume.encryption_key_id is not None - cloned = False - model_update = None - if not volume_is_encrypted: - model_update, cloned = self.driver.clone_image(context, - volume, - image_location, - image_meta, - image_service) - - # Try and clone the image if we have it set as a glance location. - if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: - model_update, cloned = self._clone_image_volume(context, - volume, - image_location, - image_meta) - - # Try and use the image cache, and download if not cached. - if not cloned: - model_update = self._create_from_image_cache_or_download( - context, - volume, - image_location, - image_id, - image_meta, - image_service) - - self._handle_bootable_volume_glance_meta(context, volume, - image_id=image_id, - image_meta=image_meta) - return model_update - - def _create_raw_volume(self, volume, **kwargs): - ret = self.driver.create_volume(volume) - self._cleanup_cg_in_volume(volume) - return ret - - def execute(self, context, volume, volume_spec): - volume_spec = dict(volume_spec) - volume_id = volume_spec.pop('volume_id', None) - if not volume_id: - volume_id = volume.id - - # we can't do anything if the driver didn't init - if not self.driver.initialized: - driver_name = self.driver.__class__.__name__ - LOG.error("Unable to create volume. " - "Volume driver %s not initialized", driver_name) - raise exception.DriverNotInitialized() - - # NOTE(xyang): Populate consistencygroup_id and consistencygroup - # fields before passing to the driver. This is to support backward - # compatibility of consistencygroup. - if volume.group_id: - volume.consistencygroup_id = volume.group_id - cg = consistencygroup.ConsistencyGroup() - cg.from_group(volume.group) - volume.consistencygroup = cg - - create_type = volume_spec.pop('type', None) - LOG.info("Volume %(volume_id)s: being created as %(create_type)s " - "with specification: %(volume_spec)s", - {'volume_spec': volume_spec, 'volume_id': volume_id, - 'create_type': create_type}) - if create_type == 'raw': - model_update = self._create_raw_volume(volume, **volume_spec) - elif create_type == 'snap': - model_update = self._create_from_snapshot(context, volume, - **volume_spec) - elif create_type == 'source_vol': - model_update = self._create_from_source_volume( - context, volume, **volume_spec) - elif create_type == 'source_replica': - model_update = self._create_from_source_replica( - context, volume, **volume_spec) - elif create_type == 'image': - model_update = self._create_from_image(context, - volume, - **volume_spec) - else: - raise exception.VolumeTypeNotFound(volume_type_id=create_type) - - # Persist any model information provided on creation. - try: - if model_update: - with volume.obj_as_admin(): - volume.update(model_update) - volume.save() - except exception.CinderException: - # If somehow the update failed we want to ensure that the - # failure is logged (but not try rescheduling since the volume at - # this point has been created). - LOG.exception("Failed updating model of volume %(volume_id)s " - "with creation provided model %(model)s", - {'volume_id': volume_id, 'model': model_update}) - raise - - def _cleanup_cg_in_volume(self, volume): - # NOTE(xyang): Cannot have both group_id and consistencygroup_id. - # consistencygroup_id needs to be removed to avoid DB reference - # error because there isn't an entry in the consistencygroups table. - if (('group_id' in volume and volume.group_id) and - ('consistencygroup_id' in volume and - volume.consistencygroup_id)): - volume.consistencygroup_id = None - if 'consistencygroup' in volume: - volume.consistencygroup = None - - -class CreateVolumeOnFinishTask(NotifyVolumeActionTask): - """On successful volume creation this will perform final volume actions. - - When a volume is created successfully it is expected that MQ notifications - and database updates will occur to 'signal' to others that the volume is - now ready for usage. This task does those notifications and updates in a - reliable manner (not re-raising exceptions if said actions can not be - triggered). - - Reversion strategy: N/A - """ - - def __init__(self, db, event_suffix): - super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix) - self.status_translation = { - 'migration_target_creating': 'migration_target', - } - - def execute(self, context, volume, volume_spec): - new_status = self.status_translation.get(volume_spec.get('status'), - 'available') - update = { - 'status': new_status, - 'launched_at': timeutils.utcnow(), - } - try: - # TODO(harlowja): is it acceptable to only log if this fails?? - # or are there other side-effects that this will cause if the - # status isn't updated correctly (aka it will likely be stuck in - # 'creating' if this fails)?? - volume.update(update) - volume.save() - # Now use the parent to notify. - super(CreateVolumeOnFinishTask, self).execute(context, volume) - except exception.CinderException: - LOG.exception("Failed updating volume %(volume_id)s with " - "%(update)s", {'volume_id': volume.id, - 'update': update}) - # Even if the update fails, the volume is ready. - LOG.info("Volume %(volume_name)s (%(volume_id)s): " - "created successfully", - {'volume_name': volume_spec['volume_name'], - 'volume_id': volume.id}) - - -def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume, - allow_reschedule, reschedule_context, request_spec, - filter_properties, image_volume_cache=None): - - """Constructs and returns the manager entrypoint flow. - - This flow will do the following: - - 1. Determines if rescheduling is enabled (ahead of time). - 2. Inject keys & values for dependent tasks. - 3. Selects 1 of 2 activated only on *failure* tasks (one to update the db - status & notify or one to update the db status & notify & *reschedule*). - 4. Extracts a volume specification from the provided inputs. - 5. Notifies that the volume has started to be created. - 6. Creates a volume from the extracted volume specification. - 7. Attaches a on-success *only* task that notifies that the volume creation - has ended and performs further database status updates. - """ - - flow_name = ACTION.replace(":", "_") + "_manager" - volume_flow = linear_flow.Flow(flow_name) - - # This injects the initial starting flow values into the workflow so that - # the dependency order of the tasks provides/requires can be correctly - # determined. - create_what = { - 'context': context, - 'filter_properties': filter_properties, - 'request_spec': request_spec, - 'volume': volume, - } - - volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False)) - - retry = filter_properties.get('retry', None) - - # Always add OnFailureRescheduleTask and we handle the change of volume's - # status when reverting the flow. Meanwhile, no need to revert process of - # ExtractVolumeRefTask. - do_reschedule = allow_reschedule and request_spec and retry - volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver, - scheduler_rpcapi, do_reschedule)) - - LOG.debug("Volume reschedule parameters: %(allow)s " - "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry}) - - volume_flow.add(ExtractVolumeSpecTask(db), - NotifyVolumeActionTask(db, "create.start"), - CreateVolumeFromSpecTask(manager, - db, - driver, - image_volume_cache), - CreateVolumeOnFinishTask(db, "create.end")) - - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(volume_flow, store=create_what) diff --git a/cinder/volume/flows/manager/manage_existing.py b/cinder/volume/flows/manager/manage_existing.py deleted file mode 100644 index d55f9cec0..000000000 --- a/cinder/volume/flows/manager/manage_existing.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import excutils -import taskflow.engines -from taskflow.patterns import linear_flow - -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder.volume.flows.api import create_volume as create_api -from cinder.volume.flows import common as flow_common -from cinder.volume.flows.manager import create_volume as create_mgr - -LOG = logging.getLogger(__name__) - -ACTION = 'volume:manage_existing' - - -class PrepareForQuotaReservationTask(flow_utils.CinderTask): - """Gets the volume size from the driver.""" - - default_provides = set(['size', 'volume_type_id', 'volume_properties', - 'volume_spec']) - - def __init__(self, db, driver): - super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) - self.db = db - self.driver = driver - - def execute(self, context, volume, manage_existing_ref): - driver_name = self.driver.__class__.__name__ - if not self.driver.initialized: - LOG.error("Unable to manage existing volume. " - "Volume driver %s not initialized.", driver_name) - flow_common.error_out(volume, _("Volume driver %s not " - "initialized.") % driver_name, - status='error_managing') - raise exception.DriverNotInitialized() - - size = 0 - try: - size = self.driver.manage_existing_get_size(volume, - manage_existing_ref) - except Exception: - with excutils.save_and_reraise_exception(): - reason = _("Volume driver %s get exception.") % driver_name - flow_common.error_out(volume, reason, - status='error_managing') - - return {'size': size, - 'volume_type_id': volume.volume_type_id, - 'volume_properties': volume, - 'volume_spec': {'status': volume.status, - 'volume_name': volume.name, - 'volume_id': volume.id}} - - def revert(self, context, result, flow_failures, volume, **kwargs): - reason = _('Volume manage failed.') - flow_common.error_out(volume, reason=reason, - status='error_managing') - LOG.error("Volume %s: manage failed.", volume.id) - - -class ManageExistingTask(flow_utils.CinderTask): - """Brings an existing volume under Cinder management.""" - - default_provides = set(['volume']) - - def __init__(self, db, driver): - super(ManageExistingTask, self).__init__(addons=[ACTION]) - self.db = db - self.driver = driver - - def execute(self, context, volume, manage_existing_ref, size): - model_update = self.driver.manage_existing(volume, - manage_existing_ref) - - if not model_update: - model_update = {} - model_update.update({'size': size}) - try: - volume.update(model_update) - volume.save() - except exception.CinderException: - LOG.exception("Failed updating model of volume %(volume_id)s" - " with creation provided model %(model)s", - {'volume_id': volume.id, - 'model': model_update}) - raise - - return {'volume': volume} - - -def get_flow(context, db, driver, host, volume, ref): - """Constructs and returns the manager entrypoint flow.""" - - flow_name = ACTION.replace(":", "_") + "_manager" - volume_flow = linear_flow.Flow(flow_name) - - # This injects the initial starting flow values into the workflow so that - # the dependency order of the tasks provides/requires can be correctly - # determined. - create_what = { - 'context': context, - 'volume': volume, - 'manage_existing_ref': ref, - 'optional_args': {'is_quota_committed': False}, - } - - volume_flow.add(create_mgr.NotifyVolumeActionTask(db, - "manage_existing.start"), - PrepareForQuotaReservationTask(db, driver), - create_api.QuotaReserveTask(), - ManageExistingTask(db, driver), - create_api.QuotaCommitTask(), - create_mgr.CreateVolumeOnFinishTask(db, - "manage_existing.end")) - - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(volume_flow, store=create_what) diff --git a/cinder/volume/flows/manager/manage_existing_snapshot.py b/cinder/volume/flows/manager/manage_existing_snapshot.py deleted file mode 100644 index 574fa5037..000000000 --- a/cinder/volume/flows/manager/manage_existing_snapshot.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright (c) 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import taskflow.engines -from taskflow.patterns import linear_flow -from taskflow.types import failure as ft - -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder import objects -from cinder.objects import fields -from cinder import quota -from cinder import quota_utils -from cinder.volume.flows import common as flow_common -from cinder.volume import utils as volume_utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS - -ACTION = 'snapshot:manage_existing' - - -class ExtractSnapshotRefTask(flow_utils.CinderTask): - """Extracts snapshot reference for given snapshot id.""" - - default_provides = 'snapshot_ref' - - def __init__(self, db): - super(ExtractSnapshotRefTask, self).__init__(addons=[ACTION]) - self.db = db - - def execute(self, context, snapshot_id): - # NOTE(wanghao): this will fetch the snapshot from the database, if - # the snapshot has been deleted before we got here then this should - # fail. - # - # In the future we might want to have a lock on the snapshot_id so that - # the snapshot can not be deleted while its still being created? - snapshot_ref = objects.Snapshot.get_by_id(context, snapshot_id) - LOG.debug("ExtractSnapshotRefTask return" - " snapshot_ref: %s", snapshot_ref) - return snapshot_ref - - def revert(self, context, snapshot_id, result, **kwargs): - if isinstance(result, ft.Failure): - return - - flow_common.error_out(result) - LOG.error("Snapshot %s: create failed", result.id) - - -class NotifySnapshotActionTask(flow_utils.CinderTask): - """Performs a notification about the given snapshot when called. - - Reversion strategy: N/A - """ - - def __init__(self, db, event_suffix, host): - super(NotifySnapshotActionTask, self).__init__(addons=[ACTION, - event_suffix]) - self.db = db - self.event_suffix = event_suffix - self.host = host - - def execute(self, context, snapshot_ref): - snapshot_id = snapshot_ref['id'] - try: - volume_utils.notify_about_snapshot_usage(context, snapshot_ref, - self.event_suffix, - host=self.host) - except exception.CinderException: - # If notification sending of snapshot database entry reading fails - # then we shouldn't error out the whole workflow since this is - # not always information that must be sent for snapshots to operate - LOG.exception("Failed notifying about the snapshot " - "action %(event)s for snapshot %(snp_id)s.", - {'event': self.event_suffix, - 'snp_id': snapshot_id}) - - -class PrepareForQuotaReservationTask(flow_utils.CinderTask): - """Gets the snapshot size from the driver.""" - - default_provides = set(['size', 'snapshot_properties']) - - def __init__(self, db, driver): - super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) - self.db = db - self.driver = driver - - def execute(self, context, snapshot_ref, manage_existing_ref): - if not self.driver.initialized: - driver_name = (self.driver.configuration. - safe_get('volume_backend_name')) - LOG.error("Unable to manage existing snapshot. " - "Volume driver %s not initialized.", driver_name) - flow_common.error_out(snapshot_ref, reason=_("Volume driver %s " - "not initialized.") % - driver_name) - raise exception.DriverNotInitialized() - - size = self.driver.manage_existing_snapshot_get_size( - snapshot=snapshot_ref, - existing_ref=manage_existing_ref) - - return {'size': size, - 'snapshot_properties': snapshot_ref} - - -class QuotaReserveTask(flow_utils.CinderTask): - """Reserves a single snapshot with the given size. - - Reversion strategy: rollback the quota reservation. - - Warning Warning: if the process that is running this reserve and commit - process fails (or is killed before the quota is rolled back or committed - it does appear like the quota will never be rolled back). This makes - software upgrades hard (inflight operations will need to be stopped or - allowed to complete before the upgrade can occur). *In the future* when - taskflow has persistence built-in this should be easier to correct via - an automated or manual process. - """ - - default_provides = set(['reservations']) - - def __init__(self): - super(QuotaReserveTask, self).__init__(addons=[ACTION]) - - def execute(self, context, size, snapshot_ref, optional_args): - try: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': 1} - else: - reserve_opts = {'snapshots': 1, 'gigabytes': size} - volume = objects.Volume.get_by_id(context, snapshot_ref.volume_id) - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.volume_type_id) - reservations = QUOTAS.reserve(context, **reserve_opts) - return { - 'reservations': reservations, - } - except exception.OverQuota as e: - quota_utils.process_reserve_over_quota( - context, e, - resource='snapshots', - size=size) - - def revert(self, context, result, optional_args, **kwargs): - # We never produced a result and therefore can't destroy anything. - if isinstance(result, ft.Failure): - return - - if optional_args['is_quota_committed']: - # The reservations have already been committed and can not be - # rolled back at this point. - return - # We actually produced an output that we can revert so lets attempt - # to use said output to rollback the reservation. - reservations = result['reservations'] - try: - QUOTAS.rollback(context, reservations) - except exception.CinderException: - # We are already reverting, therefore we should silence this - # exception since a second exception being active will be bad. - LOG.exception("Failed rolling back quota for" - " %s reservations.", reservations) - - -class QuotaCommitTask(flow_utils.CinderTask): - """Commits the reservation. - - Reversion strategy: N/A (the rollback will be handled by the task that did - the initial reservation (see: QuotaReserveTask). - - Warning Warning: if the process that is running this reserve and commit - process fails (or is killed before the quota is rolled back or committed - it does appear like the quota will never be rolled back). This makes - software upgrades hard (inflight operations will need to be stopped or - allowed to complete before the upgrade can occur). *In the future* when - taskflow has persistence built-in this should be easier to correct via - an automated or manual process. - """ - - def __init__(self): - super(QuotaCommitTask, self).__init__(addons=[ACTION]) - - def execute(self, context, reservations, snapshot_properties, - optional_args): - QUOTAS.commit(context, reservations) - # updating is_quota_committed attribute of optional_args dictionary - optional_args['is_quota_committed'] = True - return {'snapshot_properties': snapshot_properties} - - def revert(self, context, result, **kwargs): - # We never produced a result and therefore can't destroy anything. - if isinstance(result, ft.Failure): - return - snapshot = result['snapshot_properties'] - try: - reserve_opts = {'snapshots': -1, - 'gigabytes': -snapshot['volume_size']} - reservations = QUOTAS.reserve(context, - project_id=context.project_id, - **reserve_opts) - if reservations: - QUOTAS.commit(context, reservations, - project_id=context.project_id) - except Exception: - LOG.exception("Failed to update quota while deleting " - "snapshots: %s", snapshot['id']) - - -class ManageExistingTask(flow_utils.CinderTask): - """Brings an existing snapshot under Cinder management.""" - - default_provides = set(['snapshot', 'new_status']) - - def __init__(self, db, driver): - super(ManageExistingTask, self).__init__(addons=[ACTION]) - self.db = db - self.driver = driver - - def execute(self, context, snapshot_ref, manage_existing_ref, size): - model_update = self.driver.manage_existing_snapshot( - snapshot=snapshot_ref, - existing_ref=manage_existing_ref) - if not model_update: - model_update = {} - model_update['volume_size'] = size - try: - snapshot_object = objects.Snapshot.get_by_id(context, - snapshot_ref['id']) - snapshot_object.update(model_update) - snapshot_object.save() - except exception.CinderException: - LOG.exception("Failed updating model of snapshot " - "%(snapshot_id)s with creation provided model " - "%(model)s.", - {'snapshot_id': snapshot_ref['id'], - 'model': model_update}) - raise - - return {'snapshot': snapshot_ref, - 'new_status': fields.SnapshotStatus.AVAILABLE} - - -class CreateSnapshotOnFinishTask(NotifySnapshotActionTask): - """Perform final snapshot actions. - - When a snapshot is created successfully it is expected that MQ - notifications and database updates will occur to 'signal' to others that - the snapshot is now ready for usage. This task does those notifications and - updates in a reliable manner (not re-raising exceptions if said actions can - not be triggered). - - Reversion strategy: N/A - """ - - def execute(self, context, snapshot, new_status): - LOG.debug("Begin to call CreateSnapshotOnFinishTask execute.") - snapshot_id = snapshot['id'] - LOG.debug("New status: %s", new_status) - update = { - 'status': new_status - } - try: - # TODO(harlowja): is it acceptable to only log if this fails?? - # or are there other side-effects that this will cause if the - # status isn't updated correctly (aka it will likely be stuck in - # 'building' if this fails)?? - snapshot_object = objects.Snapshot.get_by_id(context, - snapshot_id) - snapshot_object.update(update) - snapshot_object.save() - # Now use the parent to notify. - super(CreateSnapshotOnFinishTask, self).execute(context, snapshot) - except exception.CinderException: - LOG.exception("Failed updating snapshot %(snapshot_id)s with " - "%(update)s.", {'snapshot_id': snapshot_id, - 'update': update}) - # Even if the update fails, the snapshot is ready. - LOG.info("Snapshot %s created successfully.", snapshot_id) - - -def get_flow(context, db, driver, host, snapshot_id, ref): - """Constructs and returns the manager entry point flow.""" - - LOG.debug("Input parameters: context=%(context)s, db=%(db)s," - "driver=%(driver)s, host=%(host)s, " - "snapshot_id=(snapshot_id)s, ref=%(ref)s.", - {'context': context, - 'db': db, - 'driver': driver, - 'host': host, - 'snapshot_id': snapshot_id, - 'ref': ref} - ) - flow_name = ACTION.replace(":", "_") + "_manager" - snapshot_flow = linear_flow.Flow(flow_name) - - # This injects the initial starting flow values into the workflow so that - # the dependency order of the tasks provides/requires can be correctly - # determined. - create_what = { - 'context': context, - 'snapshot_id': snapshot_id, - 'manage_existing_ref': ref, - 'optional_args': {'is_quota_committed': False} - } - - notify_start_msg = "manage_existing_snapshot.start" - notify_end_msg = "manage_existing_snapshot.end" - snapshot_flow.add(ExtractSnapshotRefTask(db), - NotifySnapshotActionTask(db, notify_start_msg, - host=host), - PrepareForQuotaReservationTask(db, driver), - QuotaReserveTask(), - ManageExistingTask(db, driver), - QuotaCommitTask(), - CreateSnapshotOnFinishTask(db, notify_end_msg, - host=host)) - LOG.debug("Begin to return taskflow.engines." - "load(snapshot_flow,store=create_what).") - # Now load (but do not run) the flow using the provided initial data. - return taskflow.engines.load(snapshot_flow, store=create_what) diff --git a/cinder/volume/group_types.py b/cinder/volume/group_types.py deleted file mode 100644 index 56c226c90..000000000 --- a/cinder/volume/group_types.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) 2016 EMC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Built-in group type properties.""" - - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -import six -import webob - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -DEFAULT_CGSNAPSHOT_TYPE = "default_cgsnapshot_type" - - -def create(context, - name, - group_specs=None, - is_public=True, - projects=None, - description=None): - """Creates group types.""" - group_specs = group_specs or {} - projects = projects or [] - elevated = context if context.is_admin else context.elevated() - try: - type_ref = db.group_type_create(elevated, - dict(name=name, - group_specs=group_specs, - is_public=is_public, - description=description), - projects=projects) - except db_exc.DBError: - LOG.exception('DB error:') - raise exception.GroupTypeCreateFailed(name=name, - group_specs=group_specs) - return type_ref - - -def update(context, id, name, description, is_public=None): - """Update group type by id.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidGroupType(reason=msg) - elevated = context if context.is_admin else context.elevated() - try: - db.group_type_update(elevated, id, - dict(name=name, description=description, - is_public=is_public)) - except db_exc.DBError: - LOG.exception('DB error:') - raise exception.GroupTypeUpdateFailed(id=id) - - -def destroy(context, id): - """Marks group types as deleted.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidGroupType(reason=msg) - else: - elevated = context if context.is_admin else context.elevated() - try: - db.group_type_destroy(elevated, id) - except exception.GroupTypeInUse as e: - msg = _('Target group type is still in use. %s') % six.text_type(e) - raise webob.exc.HTTPBadRequest(explanation=msg) - - -def get_all_group_types(context, inactive=0, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Get all non-deleted group_types. - - Pass true as argument if you want deleted group types returned also. - - """ - grp_types = db.group_type_get_all(context, inactive, filters=filters, - marker=marker, limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, offset=offset, - list_result=list_result) - return grp_types - - -def get_group_type(ctxt, id, expected_fields=None): - """Retrieves single group type by id.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidGroupType(reason=msg) - - if ctxt is None: - ctxt = context.get_admin_context() - - return db.group_type_get(ctxt, id, expected_fields=expected_fields) - - -def get_group_type_by_name(context, name): - """Retrieves single group type by name.""" - if name is None: - msg = _("name cannot be None") - raise exception.InvalidGroupType(reason=msg) - - return db.group_type_get_by_name(context, name) - - -def get_default_group_type(): - """Get the default group type.""" - name = CONF.default_group_type - grp_type = {} - - if name is not None: - ctxt = context.get_admin_context() - try: - grp_type = get_group_type_by_name(ctxt, name) - except exception.GroupTypeNotFoundByName: - # Couldn't find group type with the name in default_group_type - # flag, record this issue and move on - LOG.exception('Default group type is not found. ' - 'Please check default_group_type config.') - - return grp_type - - -def get_default_cgsnapshot_type(): - """Get the default group type for migrating cgsnapshots. - - Get the default group type for migrating consistencygroups to - groups and cgsnapshots to group_snapshots. - """ - - grp_type = {} - - ctxt = context.get_admin_context() - try: - grp_type = get_group_type_by_name(ctxt, DEFAULT_CGSNAPSHOT_TYPE) - except exception.GroupTypeNotFoundByName: - # Couldn't find DEFAULT_CGSNAPSHOT_TYPE group type. - # Record this issue and move on. - LOG.exception('Default cgsnapshot type %s is not found.', - DEFAULT_CGSNAPSHOT_TYPE) - - return grp_type - - -def is_default_cgsnapshot_type(group_type_id): - cgsnap_type = get_default_cgsnapshot_type() - return group_type_id == cgsnap_type['id'] - - -def get_group_type_specs(group_type_id, key=False): - group_type = get_group_type(context.get_admin_context(), - group_type_id) - group_specs = group_type['group_specs'] - if key: - if group_specs.get(key): - return group_specs.get(key) - else: - return False - else: - return group_specs - - -def is_public_group_type(context, group_type_id): - """Return is_public boolean value of group type""" - group_type = db.group_type_get(context, group_type_id) - return group_type['is_public'] - - -def add_group_type_access(context, group_type_id, project_id): - """Add access to group type for project_id.""" - if group_type_id is None: - msg = _("group_type_id cannot be None") - raise exception.InvalidGroupType(reason=msg) - elevated = context if context.is_admin else context.elevated() - if is_public_group_type(elevated, group_type_id): - msg = _("Type access modification is not applicable to public group " - "type.") - raise exception.InvalidGroupType(reason=msg) - return db.group_type_access_add(elevated, group_type_id, project_id) - - -def remove_group_type_access(context, group_type_id, project_id): - """Remove access to group type for project_id.""" - if group_type_id is None: - msg = _("group_type_id cannot be None") - raise exception.InvalidGroupType(reason=msg) - elevated = context if context.is_admin else context.elevated() - if is_public_group_type(elevated, group_type_id): - msg = _("Type access modification is not applicable to public group " - "type.") - raise exception.InvalidGroupType(reason=msg) - return db.group_type_access_remove(elevated, group_type_id, project_id) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py deleted file mode 100644 index 8110280c9..000000000 --- a/cinder/volume/manager.py +++ /dev/null @@ -1,4808 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Volume manager manages creating, attaching, detaching, and persistent storage. - -Persistent storage volumes keep their state independent of instances. You can -attach to an instance, terminate the instance, spawn a new instance (even -one from a different image) and re-attach the volume with the same data -intact. - -**Related Flags** - -:volume_manager: The module name of a class derived from - :class:`manager.Manager` (default: - :class:`cinder.volume.manager.Manager`). -:volume_driver: Used by :class:`Manager`. Defaults to - :class:`cinder.volume.drivers.lvm.LVMVolumeDriver`. -:volume_group: Name of the group that will contain exported volumes (default: - `cinder-volumes`) -:num_shell_tries: Number of times to attempt to run commands (default: 3) - -""" - - -import requests -import time - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from oslo_service import periodic_task -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import timeutils -from oslo_utils import units -from oslo_utils import uuidutils -profiler = importutils.try_import('osprofiler.profiler') -import six -from taskflow import exceptions as tfe - -from cinder.common import constants -from cinder import compute -from cinder import context -from cinder import coordination -from cinder import db -from cinder import exception -from cinder import flow_utils -from cinder.i18n import _ -from cinder.image import cache as image_cache -from cinder.image import glance -from cinder.image import image_utils -from cinder import keymgr as key_manager -from cinder import manager -from cinder.message import api as message_api -from cinder.message import message_field -from cinder import objects -from cinder.objects import cgsnapshot -from cinder.objects import consistencygroup -from cinder.objects import fields -from cinder import quota -from cinder import utils -from cinder import volume as cinder_volume -from cinder.volume import configuration as config -from cinder.volume.flows.manager import create_volume -from cinder.volume.flows.manager import manage_existing -from cinder.volume.flows.manager import manage_existing_snapshot -from cinder.volume import group_types -from cinder.volume import rpcapi as volume_rpcapi -from cinder.volume import utils as vol_utils -from cinder.volume import volume_types - -LOG = logging.getLogger(__name__) - -QUOTAS = quota.QUOTAS -CGQUOTAS = quota.CGQUOTAS -GROUP_QUOTAS = quota.GROUP_QUOTAS -VALID_REMOVE_VOL_FROM_CG_STATUS = ( - 'available', - 'in-use', - 'error', - 'error_deleting') -VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( - 'available', - 'in-use', - 'error', - 'error_deleting') -VALID_ADD_VOL_TO_CG_STATUS = ( - 'available', - 'in-use') -VALID_ADD_VOL_TO_GROUP_STATUS = ( - 'available', - 'in-use') -VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,) -VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,) -VALID_CREATE_CG_SRC_CG_STATUS = ('available',) -VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',) -VA_LIST = objects.VolumeAttachmentList - -volume_manager_opts = [ - cfg.IntOpt('migration_create_volume_timeout_secs', - default=300, - help='Timeout for creating the volume to migrate to ' - 'when performing volume migration (seconds)'), - cfg.BoolOpt('volume_service_inithost_offload', - default=False, - help='Offload pending volume delete during ' - 'volume service startup'), -] - -volume_backend_opts = [ - cfg.StrOpt('volume_driver', - default='cinder.volume.drivers.lvm.LVMVolumeDriver', - help='Driver to use for volume creation'), - cfg.StrOpt('zoning_mode', - help='FC Zoning mode configured'), - cfg.StrOpt('extra_capabilities', - default='{}', - help='User defined capabilities, a JSON formatted string ' - 'specifying key/value pairs. The key/value pairs can ' - 'be used by the CapabilitiesFilter to select between ' - 'backends when requests specify volume types. For ' - 'example, specifying a service level or the geographical ' - 'location of a backend, then creating a volume type to ' - 'allow the user to select by these different ' - 'properties.'), - cfg.BoolOpt('suppress_requests_ssl_warnings', - default=False, - help='Suppress requests library SSL certificate warnings.'), -] - -CONF = cfg.CONF -CONF.register_opts(volume_manager_opts) -CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP) - -MAPPING = { - 'cinder.volume.drivers.hds.nfs.HDSNFSDriver': - 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver', - 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver': - 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver', - 'cinder.volume.drivers.ibm.xiv_ds8k': - 'cinder.volume.drivers.ibm.ibm_storage', - 'cinder.volume.drivers.emc.scaleio': - 'cinder.volume.drivers.dell_emc.scaleio.driver', - 'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver': - 'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver', - 'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver': - 'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver', - 'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver': - 'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver', - 'cinder.volume.drivers.datera.DateraDriver': - 'cinder.volume.drivers.datera.datera_iscsi.DateraDriver', - 'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver': - 'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver', - 'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver': - 'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver', - 'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver': - 'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver', - 'cinder.volume.drivers.dell.dell_storagecenter_iscsi.' - 'DellStorageCenterISCSIDriver': - 'cinder.volume.drivers.dell_emc.sc.storagecenter_iscsi.' - 'SCISCSIDriver', - 'cinder.volume.drivers.dell.dell_storagecenter_fc.' - 'DellStorageCenterFCDriver': - 'cinder.volume.drivers.dell_emc.sc.storagecenter_fc.' - 'SCFCDriver', -} - - -class VolumeManager(manager.CleanableManager, - manager.SchedulerDependentManager): - """Manages attachable block storage devices.""" - - RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION - - FAILBACK_SENTINEL = 'default' - - target = messaging.Target(version=RPC_API_VERSION) - - # On cloning a volume, we shouldn't copy volume_type, consistencygroup - # and volume_attachment, because the db sets that according to [field]_id, - # which we do copy. We also skip some other values that are set during - # creation of Volume object. - _VOLUME_CLONE_SKIP_PROPERTIES = { - 'id', '_name_id', 'name_id', 'name', 'status', - 'attach_status', 'migration_status', 'volume_type', - 'consistencygroup', 'volume_attachment', 'group'} - - def __init__(self, volume_driver=None, service_name=None, - *args, **kwargs): - """Load the driver from the one specified in args, or from flags.""" - # update_service_capabilities needs service_name to be volume - super(VolumeManager, self).__init__(service_name='volume', - *args, **kwargs) - # NOTE(dulek): service_name=None means we're running in unit tests. - service_name = service_name or 'backend_defaults' - self.configuration = config.Configuration(volume_backend_opts, - config_group=service_name) - self.stats = {} - - if not volume_driver: - # Get from configuration, which will get the default - # if its not using the multi backend - volume_driver = self.configuration.volume_driver - if volume_driver in MAPPING: - LOG.warning("Driver path %s is deprecated, update your " - "configuration to the new path.", volume_driver) - volume_driver = MAPPING[volume_driver] - - vol_db_empty = self._set_voldb_empty_at_startup_indicator( - context.get_admin_context()) - LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty) - - # We pass the current setting for service.active_backend_id to - # the driver on init, in case there was a restart or something - curr_active_backend_id = None - svc_host = vol_utils.extract_host(self.host, 'backend') - try: - service = objects.Service.get_by_args( - context.get_admin_context(), - svc_host, - constants.VOLUME_BINARY) - except exception.ServiceNotFound: - # NOTE(jdg): This is to solve problems with unit tests - LOG.info("Service not found for updating " - "active_backend_id, assuming default " - "for driver init.") - else: - curr_active_backend_id = service.active_backend_id - - if self.configuration.suppress_requests_ssl_warnings: - LOG.warning("Suppressing requests library SSL Warnings") - requests.packages.urllib3.disable_warnings( - requests.packages.urllib3.exceptions.InsecureRequestWarning) - requests.packages.urllib3.disable_warnings( - requests.packages.urllib3.exceptions.InsecurePlatformWarning) - - self.key_manager = key_manager.API(CONF) - self.driver = importutils.import_object( - volume_driver, - configuration=self.configuration, - db=self.db, - host=self.host, - cluster_name=self.cluster, - is_vol_db_empty=vol_db_empty, - active_backend_id=curr_active_backend_id) - - if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE: - msg = _('Active-Active configuration is not currently supported ' - 'by driver %s.') % volume_driver - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - self.message_api = message_api.API() - - if CONF.profiler.enabled and profiler is not None: - self.driver = profiler.trace_cls("driver")(self.driver) - try: - self.extra_capabilities = jsonutils.loads( - self.driver.configuration.extra_capabilities) - except AttributeError: - self.extra_capabilities = {} - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Invalid JSON: %s", - self.driver.configuration.extra_capabilities) - - # Check if a per-backend AZ has been specified - backend_zone = self.driver.configuration.safe_get( - 'backend_availability_zone') - if backend_zone: - self.availability_zone = backend_zone - - if self.driver.configuration.safe_get( - 'image_volume_cache_enabled'): - - max_cache_size = self.driver.configuration.safe_get( - 'image_volume_cache_max_size_gb') - max_cache_entries = self.driver.configuration.safe_get( - 'image_volume_cache_max_count') - - self.image_volume_cache = image_cache.ImageVolumeCache( - self.db, - cinder_volume.API(), - max_cache_size, - max_cache_entries - ) - LOG.info('Image-volume cache enabled for host %(host)s.', - {'host': self.host}) - else: - LOG.info('Image-volume cache disabled for host %(host)s.', - {'host': self.host}) - self.image_volume_cache = None - - def _count_allocated_capacity(self, ctxt, volume): - pool = vol_utils.extract_host(volume['host'], 'pool') - if pool is None: - # No pool name encoded in host, so this is a legacy - # volume created before pool is introduced, ask - # driver to provide pool info if it has such - # knowledge and update the DB. - try: - pool = self.driver.get_pool(volume) - except Exception: - LOG.exception('Fetch volume pool name failed.', - resource=volume) - return - - if pool: - new_host = vol_utils.append_host(volume['host'], - pool) - self.db.volume_update(ctxt, volume['id'], - {'host': new_host}) - else: - # Otherwise, put them into a special fixed pool with - # volume_backend_name being the pool name, if - # volume_backend_name is None, use default pool name. - # This is only for counting purpose, doesn't update DB. - pool = (self.driver.configuration.safe_get( - 'volume_backend_name') or vol_utils.extract_host( - volume['host'], 'pool', True)) - try: - pool_stat = self.stats['pools'][pool] - except KeyError: - # First volume in the pool - self.stats['pools'][pool] = dict( - allocated_capacity_gb=0) - pool_stat = self.stats['pools'][pool] - pool_sum = pool_stat['allocated_capacity_gb'] - pool_sum += volume['size'] - - self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum - self.stats['allocated_capacity_gb'] += volume['size'] - - def _set_voldb_empty_at_startup_indicator(self, ctxt): - """Determine if the Cinder volume DB is empty. - - A check of the volume DB is done to determine whether it is empty or - not at this point. - - :param ctxt: our working context - """ - vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None) - - if len(vol_entries) == 0: - LOG.info("Determined volume DB was empty at startup.") - return True - else: - LOG.info("Determined volume DB was not empty at startup.") - return False - - def _sync_provider_info(self, ctxt, volumes, snapshots): - # NOTE(jdg): For now this just updates provider_id, we can add more - # items to the update if they're relevant but we need to be safe in - # what we allow and add a list of allowed keys. Things that make sense - # are provider_*, replication_status etc - - updates, snapshot_updates = self.driver.update_provider_info( - volumes, snapshots) - - if updates: - for volume in volumes: - # NOTE(JDG): Make sure returned item is in this hosts volumes - update = ( - [updt for updt in updates if updt['id'] == - volume['id']]) - if update: - update = update[0] - self.db.volume_update( - ctxt, - update['id'], - {'provider_id': update['provider_id']}) - - # NOTE(jdg): snapshots are slighty harder, because - # we do not have a host column and of course no get - # all by host, so we use a get_all and bounce our - # response off of it - if snapshot_updates: - cinder_snaps = self.db.snapshot_get_all(ctxt) - for snap in cinder_snaps: - # NOTE(jdg): For now we only update those that have no entry - if not snap.get('provider_id', None): - update = ( - [updt for updt in snapshot_updates if updt['id'] == - snap['id']][0]) - if update: - self.db.snapshot_update( - ctxt, - update['id'], - {'provider_id': update['provider_id']}) - - def _include_resources_in_cluster(self, ctxt): - - LOG.info('Including all resources from host %(host)s in cluster ' - '%(cluster)s.', - {'host': self.host, 'cluster': self.cluster}) - num_vols = objects.VolumeList.include_in_cluster( - ctxt, self.cluster, host=self.host) - num_cgs = objects.ConsistencyGroupList.include_in_cluster( - ctxt, self.cluster, host=self.host) - num_cache = db.image_volume_cache_include_in_cluster( - ctxt, self.cluster, host=self.host) - LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, ' - 'and %(num_cache)s image volume caches from host ' - '%(host)s have been included in cluster %(cluster)s.', - {'num_vols': num_vols, 'num_cgs': num_cgs, - 'host': self.host, 'cluster': self.cluster, - 'num_cache': num_cache}) - - def init_host(self, added_to_cluster=None, **kwargs): - """Perform any required initialization.""" - ctxt = context.get_admin_context() - if not self.driver.supported: - utils.log_unsupported_driver_warning(self.driver) - - if not self.configuration.enable_unsupported_driver: - LOG.error("Unsupported drivers are disabled." - " You can re-enable by adding " - "enable_unsupported_driver=True to the " - "driver section in cinder.conf", - resource={'type': 'driver', - 'id': self.__class__.__name__}) - return - - # If we have just added this host to a cluster we have to include all - # our resources in that cluster. - if added_to_cluster: - self._include_resources_in_cluster(ctxt) - - LOG.info("Starting volume driver %(driver_name)s (%(version)s)", - {'driver_name': self.driver.__class__.__name__, - 'version': self.driver.get_version()}) - try: - self.driver.do_setup(ctxt) - self.driver.check_for_setup_error() - except Exception: - LOG.exception("Failed to initialize driver.", - resource={'type': 'driver', - 'id': self.__class__.__name__}) - # we don't want to continue since we failed - # to initialize the driver correctly. - return - - # Initialize backend capabilities list - self.driver.init_capabilities() - - volumes = self._get_my_volumes(ctxt) - snapshots = self._get_my_snapshots(ctxt) - self._sync_provider_info(ctxt, volumes, snapshots) - # FIXME volume count for exporting is wrong - - self.stats['pools'] = {} - self.stats.update({'allocated_capacity_gb': 0}) - - try: - for volume in volumes: - # available volume should also be counted into allocated - if volume['status'] in ['in-use', 'available']: - # calculate allocated capacity for driver - self._count_allocated_capacity(ctxt, volume) - - try: - if volume['status'] in ['in-use']: - self.driver.ensure_export(ctxt, volume) - except Exception: - LOG.exception("Failed to re-export volume, " - "setting to ERROR.", - resource=volume) - volume.conditional_update({'status': 'error'}, - {'status': 'in-use'}) - # All other cleanups are processed by parent class CleanableManager - - except Exception: - LOG.exception("Error during re-export on driver init.", - resource=volume) - return - - self.driver.set_throttle() - - # at this point the driver is considered initialized. - # NOTE(jdg): Careful though because that doesn't mean - # that an entry exists in the service table - self.driver.set_initialized() - - # Keep the image tmp file clean when init host. - backend_name = vol_utils.extract_host(self.service_topic_queue) - image_utils.cleanup_temporary_file(backend_name) - - # collect and publish service capabilities - self.publish_service_capabilities(ctxt) - LOG.info("Driver initialization completed successfully.", - resource={'type': 'driver', - 'id': self.driver.__class__.__name__}) - - # Make sure to call CleanableManager to do the cleanup - super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster, - **kwargs) - - def init_host_with_rpc(self): - LOG.info("Initializing RPC dependent components of volume " - "driver %(driver_name)s (%(version)s)", - {'driver_name': self.driver.__class__.__name__, - 'version': self.driver.get_version()}) - - try: - # Make sure the driver is initialized first - utils.log_unsupported_driver_warning(self.driver) - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - LOG.error("Cannot complete RPC initialization because " - "driver isn't initialized properly.", - resource={'type': 'driver', - 'id': self.driver.__class__.__name__}) - return - - stats = self.driver.get_volume_stats(refresh=True) - svc_host = vol_utils.extract_host(self.host, 'backend') - try: - service = objects.Service.get_by_args( - context.get_admin_context(), - svc_host, - constants.VOLUME_BINARY) - except exception.ServiceNotFound: - with excutils.save_and_reraise_exception(): - LOG.error("Service not found for updating replication_status.") - - if service.replication_status != ( - fields.ReplicationStatus.FAILED_OVER): - if stats and stats.get('replication_enabled', False): - service.replication_status = fields.ReplicationStatus.ENABLED - else: - service.replication_status = fields.ReplicationStatus.DISABLED - - service.save() - LOG.info("Driver post RPC initialization completed successfully.", - resource={'type': 'driver', - 'id': self.driver.__class__.__name__}) - - def _do_cleanup(self, ctxt, vo_resource): - if isinstance(vo_resource, objects.Volume): - if vo_resource.status == 'downloading': - self.driver.clear_download(ctxt, vo_resource) - - elif vo_resource.status == 'uploading': - # Set volume status to available or in-use. - self.db.volume_update_status_based_on_attachment( - ctxt, vo_resource.id) - - elif vo_resource.status == 'deleting': - if CONF.volume_service_inithost_offload: - # Offload all the pending volume delete operations to the - # threadpool to prevent the main volume service thread - # from being blocked. - self._add_to_threadpool(self.delete_volume, ctxt, - vo_resource, cascade=True) - else: - # By default, delete volumes sequentially - self.delete_volume(ctxt, vo_resource, cascade=True) - # We signal that we take care of cleaning the worker ourselves - # (with set_workers decorator in delete_volume method) so - # do_cleanup method doesn't need to remove it. - return True - - # For Volume creating and downloading and for Snapshot downloading - # statuses we have to set status to error - if vo_resource.status in ('creating', 'downloading'): - vo_resource.status = 'error' - vo_resource.save() - - def is_working(self): - """Return if Manager is ready to accept requests. - - This is to inform Service class that in case of volume driver - initialization failure the manager is actually down and not ready to - accept any requests. - """ - return self.driver.initialized - - def _set_resource_host(self, resource): - """Set the host field on the DB to our own when we are clustered.""" - if (resource.is_clustered and - not vol_utils.hosts_are_equivalent(resource.host, self.host)): - pool = vol_utils.extract_host(resource.host, 'pool') - resource.host = vol_utils.append_host(self.host, pool) - resource.save() - - @objects.Volume.set_workers - def create_volume(self, context, volume, request_spec=None, - filter_properties=None, allow_reschedule=True): - """Creates the volume.""" - # Log about unsupported drivers - utils.log_unsupported_driver_warning(self.driver) - - # Make sure the host in the DB matches our own when clustered - self._set_resource_host(volume) - - context_elevated = context.elevated() - if filter_properties is None: - filter_properties = {} - - if request_spec is None: - request_spec = objects.RequestSpec() - - try: - # NOTE(flaper87): Driver initialization is - # verified by the task itself. - flow_engine = create_volume.get_flow( - context_elevated, - self, - self.db, - self.driver, - self.scheduler_rpcapi, - self.host, - volume, - allow_reschedule, - context, - request_spec, - filter_properties, - image_volume_cache=self.image_volume_cache, - ) - except Exception: - msg = _("Create manager volume flow failed.") - LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) - raise exception.CinderException(msg) - - snapshot_id = request_spec.get('snapshot_id') - source_volid = request_spec.get('source_volid') - source_replicaid = request_spec.get('source_replicaid') - - if snapshot_id is not None: - # Make sure the snapshot is not deleted until we are done with it. - locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot') - elif source_volid is not None: - # Make sure the volume is not deleted until we are done with it. - locked_action = "%s-%s" % (source_volid, 'delete_volume') - elif source_replicaid is not None: - # Make sure the volume is not deleted until we are done with it. - locked_action = "%s-%s" % (source_replicaid, 'delete_volume') - else: - locked_action = None - - def _run_flow(): - # This code executes create volume flow. If something goes wrong, - # flow reverts all job that was done and reraises an exception. - # Otherwise, all data that was generated by flow becomes available - # in flow engine's storage. - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - - # NOTE(dulek): Flag to indicate if volume was rescheduled. Used to - # decide if allocated_capacity should be incremented. - rescheduled = False - - try: - if locked_action is None: - _run_flow() - else: - with coordination.COORDINATOR.get_lock(locked_action): - _run_flow() - finally: - try: - flow_engine.storage.fetch('refreshed') - except tfe.NotFound: - # If there's no vol_ref, then flow is reverted. Lets check out - # if rescheduling occurred. - try: - rescheduled = flow_engine.storage.get_revert_result( - create_volume.OnFailureRescheduleTask.make_name( - [create_volume.ACTION])) - except tfe.NotFound: - pass - - if not rescheduled: - # NOTE(dulek): Volume wasn't rescheduled so we need to update - # volume stats as these are decremented on delete. - self._update_allocated_capacity(volume) - - LOG.info("Created volume successfully.", resource=volume) - return volume.id - - def _check_is_our_resource(self, resource): - if resource.host: - res_backend = vol_utils.extract_host(resource.service_topic_queue) - backend = vol_utils.extract_host(self.service_topic_queue) - if res_backend != backend: - msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not ' - 'local to %(backend)s.') % - {'resource': resource.obj_name, 'id': resource.id, - 'backend': backend}) - raise exception.Invalid(msg) - - @coordination.synchronized('{volume.id}-{f_name}') - @objects.Volume.set_workers - def delete_volume(self, context, volume, unmanage_only=False, - cascade=False): - """Deletes and unexports volume. - - 1. Delete a volume(normal case) - Delete a volume and update quotas. - - 2. Delete a migration volume - If deleting the volume in a migration, we want to skip - quotas but we need database updates for the volume. - """ - - context = context.elevated() - - try: - volume.refresh() - except exception.VolumeNotFound: - # NOTE(thingee): It could be possible for a volume to - # be deleted when resuming deletes from init_host(). - LOG.debug("Attempted delete of non-existent volume: %s", volume.id) - return - - if context.project_id != volume.project_id: - project_id = volume.project_id - else: - project_id = context.project_id - - if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: - # Volume is still attached, need to detach first - raise exception.VolumeAttached(volume_id=volume.id) - self._check_is_our_resource(volume) - - if unmanage_only and cascade: - # This could be done, but is ruled out for now just - # for simplicity. - raise exception.Invalid( - reason=_("Unmanage and cascade delete options " - "are mutually exclusive.")) - - # The status 'deleting' is not included, because it only applies to - # the source volume to be deleted after a migration. No quota - # needs to be handled for it. - is_migrating = volume.migration_status not in (None, 'error', - 'success') - is_migrating_dest = (is_migrating and - volume.migration_status.startswith( - 'target:')) - notification = "delete.start" - if unmanage_only: - notification = "unmanage.start" - self._notify_about_volume_usage(context, volume, notification) - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - self.driver.remove_export(context, volume) - if unmanage_only: - self.driver.unmanage(volume) - elif cascade: - LOG.debug('Performing cascade delete.') - snapshots = objects.SnapshotList.get_all_for_volume(context, - volume.id) - for s in snapshots: - if s.status != fields.SnapshotStatus.DELETING: - self._clear_db(context, is_migrating_dest, volume, - 'error_deleting') - - msg = (_("Snapshot %(id)s was found in state " - "%(state)s rather than 'deleting' during " - "cascade delete.") % {'id': s.id, - 'state': s.status}) - raise exception.InvalidSnapshot(reason=msg) - - self.delete_snapshot(context, s) - - LOG.debug('Snapshots deleted, issuing volume delete') - self.driver.delete_volume(volume) - else: - self.driver.delete_volume(volume) - except exception.VolumeIsBusy: - LOG.error("Unable to delete busy volume.", - resource=volume) - # If this is a destination volume, we have to clear the database - # record to avoid user confusion. - self._clear_db(context, is_migrating_dest, volume, - 'available') - return - except Exception: - with excutils.save_and_reraise_exception(): - # If this is a destination volume, we have to clear the - # database record to avoid user confusion. - new_status = 'error_deleting' - if unmanage_only is True: - new_status = 'error_unmanaging' - - self._clear_db(context, is_migrating_dest, volume, - new_status) - - # If deleting source/destination volume in a migration, we should - # skip quotas. - if not is_migrating: - # Get reservations - try: - reservations = None - if volume.status != 'error_managing_deleting': - reserve_opts = {'volumes': -1, - 'gigabytes': -volume.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.volume_type_id) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - LOG.exception("Failed to update usages deleting volume.", - resource=volume) - - # Delete glance metadata if it exists - self.db.volume_glance_metadata_delete_by_volume(context, volume.id) - - volume.destroy() - - # If deleting source/destination volume in a migration, we should - # skip quotas. - if not is_migrating: - notification = "delete.end" - if unmanage_only: - notification = "unmanage.end" - self._notify_about_volume_usage(context, volume, notification) - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - pool = vol_utils.extract_host(volume.host, 'pool') - if pool is None: - # Legacy volume, put them into default pool - pool = self.driver.configuration.safe_get( - 'volume_backend_name') or vol_utils.extract_host( - volume.host, 'pool', True) - size = volume.size - - try: - self.stats['pools'][pool]['allocated_capacity_gb'] -= size - except KeyError: - self.stats['pools'][pool] = dict( - allocated_capacity_gb=-size) - - self.publish_service_capabilities(context) - - msg = "Deleted volume successfully." - if unmanage_only: - msg = "Unmanaged volume successfully." - LOG.info(msg, resource=volume) - - def _clear_db(self, context, is_migrating_dest, volume_ref, status): - # This method is called when driver.unmanage() or - # driver.delete_volume() fails in delete_volume(), so it is already - # in the exception handling part. - if is_migrating_dest: - volume_ref.destroy() - LOG.error("Unable to delete the destination volume " - "during volume migration, (NOTE: database " - "record needs to be deleted).", resource=volume_ref) - else: - volume_ref.status = status - volume_ref.save() - - def _revert_to_snapshot_generic(self, ctxt, volume, snapshot): - """Generic way to revert volume to a snapshot. - - the framework will use the generic way to implement the revert - to snapshot feature: - 1. create a temporary volume from snapshot - 2. mount two volumes to host - 3. copy data from temporary volume to original volume - 4. detach and destroy temporary volume - """ - temp_vol = None - - try: - v_options = {'display_name': '[revert] temporary volume created ' - 'from snapshot %s' % snapshot.id} - ctxt = context.get_internal_tenant_context() or ctxt - temp_vol = self.driver._create_temp_volume_from_snapshot( - ctxt, volume, snapshot, volume_options=v_options) - self._copy_volume_data(ctxt, temp_vol, volume) - self.driver.delete_volume(temp_vol) - temp_vol.destroy() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception( - "Failed to use snapshot %(snapshot)s to create " - "a temporary volume and copy data to volume " - " %(volume)s.", - {'snapshot': snapshot.id, - 'volume': volume.id}) - if temp_vol and temp_vol.status == 'available': - self.driver.delete_volume(temp_vol) - temp_vol.destroy() - - def _revert_to_snapshot(self, context, volume, snapshot): - """Use driver or generic method to rollback volume.""" - - self._notify_about_volume_usage(context, volume, "revert.start") - self._notify_about_snapshot_usage(context, snapshot, "revert.start") - try: - self.driver.revert_to_snapshot(context, volume, snapshot) - except (NotImplementedError, AttributeError): - LOG.info("Driver's 'revert_to_snapshot' is not found. " - "Try to use copy-snapshot-to-volume method.") - self._revert_to_snapshot_generic(context, volume, snapshot) - self._notify_about_volume_usage(context, volume, "revert.end") - self._notify_about_snapshot_usage(context, snapshot, "revert.end") - - def _create_backup_snapshot(self, context, volume): - kwargs = { - 'volume_id': volume.id, - 'user_id': context.user_id, - 'project_id': context.project_id, - 'status': fields.SnapshotStatus.CREATING, - 'progress': '0%', - 'volume_size': volume.size, - 'display_name': '[revert] volume %s backup snapshot' % volume.id, - 'display_description': 'This is only used for backup when ' - 'reverting. If the reverting process ' - 'failed, you can restore you data by ' - 'creating new volume with this snapshot.', - 'volume_type_id': volume.volume_type_id, - 'encryption_key_id': volume.encryption_key_id, - 'metadata': {} - } - snapshot = objects.Snapshot(context=context, **kwargs) - snapshot.create() - self.create_snapshot(context, snapshot) - return snapshot - - def revert_to_snapshot(self, context, volume, snapshot): - """Revert a volume to a snapshot. - - The process of reverting to snapshot consists of several steps: - 1. create a snapshot for backup (in case of data loss) - 2.1. use driver's specific logic to revert volume - 2.2. try the generic way to revert volume if driver's method is missing - 3. delete the backup snapshot - """ - backup_snapshot = None - try: - LOG.info("Start to perform revert to snapshot process.") - # Create a snapshot which can be used to restore the volume - # data by hand if revert process failed. - backup_snapshot = self._create_backup_snapshot(context, volume) - self._revert_to_snapshot(context, volume, snapshot) - except Exception as error: - with excutils.save_and_reraise_exception(): - self._notify_about_volume_usage(context, volume, - "revert.end") - self._notify_about_snapshot_usage(context, snapshot, - "revert.end") - msg = ('Volume %(v_id)s revert to ' - 'snapshot %(s_id)s failed with %(error)s.') - msg_args = {'v_id': volume.id, - 's_id': snapshot.id, - 'error': six.text_type(error)} - v_res = volume.update_single_status_where( - 'error', - 'reverting') - if not v_res: - msg_args = {"id": volume.id, - "status": 'error'} - msg += ("Failed to reset volume %(id)s " - "status to %(status)s.") % msg_args - - s_res = snapshot.update_single_status_where( - fields.SnapshotStatus.AVAILABLE, - fields.SnapshotStatus.RESTORING) - if not s_res: - msg_args = {"id": snapshot.id, - "status": - fields.SnapshotStatus.ERROR} - msg += ("Failed to reset snapshot %(id)s " - "status to %(status)s." % msg_args) - LOG.exception(msg, msg_args) - - v_res = volume.update_single_status_where( - 'available', 'reverting') - if not v_res: - msg_args = {"id": volume.id, - "status": 'available'} - msg = _("Revert finished, but failed to reset " - "volume %(id)s status to %(status)s, " - "please manually reset it.") % msg_args - raise exception.BadResetResourceStatus(message=msg) - - s_res = snapshot.update_single_status_where( - fields.SnapshotStatus.AVAILABLE, - fields.SnapshotStatus.RESTORING) - if not s_res: - msg_args = {"id": snapshot.id, - "status": - fields.SnapshotStatus.AVAILABLE} - msg = _("Revert finished, but failed to reset " - "snapshot %(id)s status to %(status)s, " - "please manually reset it.") % msg_args - raise exception.BadResetResourceStatus(message=msg) - if backup_snapshot: - self.delete_snapshot(context, - backup_snapshot, handle_quota=False) - msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s ' - 'successfully.') - msg_args = {'v_id': volume.id, 'snap_id': snapshot.id} - LOG.info(msg, msg_args) - - @objects.Snapshot.set_workers - def create_snapshot(self, context, snapshot): - """Creates and exports the snapshot.""" - context = context.elevated() - - self._notify_about_snapshot_usage( - context, snapshot, "create.start") - - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the snapshot status updated. - utils.require_driver_initialized(self.driver) - - # Pass context so that drivers that want to use it, can, - # but it is not a requirement for all drivers. - snapshot.context = context - - model_update = self.driver.create_snapshot(snapshot) - if model_update: - snapshot.update(model_update) - snapshot.save() - - except Exception: - with excutils.save_and_reraise_exception(): - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - - vol_ref = self.db.volume_get(context, snapshot.volume_id) - if vol_ref.bootable: - try: - self.db.volume_glance_metadata_copy_to_snapshot( - context, snapshot.id, snapshot.volume_id) - except exception.GlanceMetadataNotFound: - # If volume is not created from image, No glance metadata - # would be available for that volume in - # volume glance metadata table - pass - except exception.CinderException as ex: - LOG.exception("Failed updating snapshot" - " metadata using the provided volumes" - " %(volume_id)s metadata", - {'volume_id': snapshot.volume_id}, - resource=snapshot) - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - raise exception.MetadataCopyFailure(reason=six.text_type(ex)) - - snapshot.status = fields.SnapshotStatus.AVAILABLE - snapshot.progress = '100%' - snapshot.save() - - self._notify_about_snapshot_usage(context, snapshot, "create.end") - LOG.info("Create snapshot completed successfully", - resource=snapshot) - return snapshot.id - - @coordination.synchronized('{snapshot.id}-{f_name}') - def delete_snapshot(self, context, snapshot, - unmanage_only=False, handle_quota=True): - """Deletes and unexports snapshot.""" - context = context.elevated() - snapshot._context = context - project_id = snapshot.project_id - - self._notify_about_snapshot_usage( - context, snapshot, "delete.start") - - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the snapshot status updated. - utils.require_driver_initialized(self.driver) - - # Pass context so that drivers that want to use it, can, - # but it is not a requirement for all drivers. - snapshot.context = context - snapshot.save() - - if unmanage_only: - self.driver.unmanage_snapshot(snapshot) - else: - self.driver.delete_snapshot(snapshot) - except exception.SnapshotIsBusy: - LOG.error("Delete snapshot failed, due to snapshot busy.", - resource=snapshot) - snapshot.status = fields.SnapshotStatus.AVAILABLE - snapshot.save() - return - except Exception: - with excutils.save_and_reraise_exception(): - snapshot.status = fields.SnapshotStatus.ERROR_DELETING - snapshot.save() - - # Get reservations - reservations = None - try: - if handle_quota: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': -1} - else: - reserve_opts = { - 'snapshots': -1, - 'gigabytes': -snapshot.volume_size, - } - volume_ref = self.db.volume_get(context, snapshot.volume_id) - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume_ref.get('volume_type_id')) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - reservations = None - LOG.exception("Update snapshot usages failed.", - resource=snapshot) - self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) - snapshot.destroy() - self._notify_about_snapshot_usage(context, snapshot, "delete.end") - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - msg = "Delete snapshot completed successfully." - if unmanage_only: - msg = "Unmanage snapshot completed successfully." - LOG.info(msg, resource=snapshot) - - @coordination.synchronized('{volume_id}') - def attach_volume(self, context, volume_id, instance_uuid, host_name, - mountpoint, mode, volume=None): - """Updates db to show volume is attached.""" - # FIXME(lixiaoy1): Remove this in v4.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look - # up the volume by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - # Get admin_metadata. This needs admin context. - with volume.obj_as_admin(): - volume_metadata = volume.admin_metadata - # check the volume status before attaching - if volume.status == 'attaching': - if (volume_metadata.get('attached_mode') and - volume_metadata.get('attached_mode') != mode): - raise exception.InvalidVolume( - reason=_("being attached by different mode")) - - host_name_sanitized = utils.sanitize_hostname( - host_name) if host_name else None - if instance_uuid: - attachments = ( - VA_LIST.get_all_by_instance_uuid( - context, instance_uuid)) - else: - attachments = ( - VA_LIST.get_all_by_host( - context, host_name_sanitized)) - if attachments: - # check if volume<->instance mapping is already tracked in DB - for attachment in attachments: - if attachment['volume_id'] == volume_id: - volume.status = 'in-use' - volume.save() - return attachment - - if (volume.status == 'in-use' and not volume.multiattach - and not volume.migration_status): - raise exception.InvalidVolume( - reason=_("volume is already attached")) - - self._notify_about_volume_usage(context, volume, - "attach.start") - - attachment = volume.begin_attach(mode) - - if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): - attachment.attach_status = ( - fields.VolumeAttachStatus.ERROR_ATTACHING) - attachment.save() - raise exception.InvalidUUID(uuid=instance_uuid) - - try: - if volume_metadata.get('readonly') == 'True' and mode != 'ro': - raise exception.InvalidVolumeAttachMode(mode=mode, - volume_id=volume.id) - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - LOG.info('Attaching volume %(volume_id)s to instance ' - '%(instance)s at mountpoint %(mount)s on host ' - '%(host)s.', - {'volume_id': volume_id, 'instance': instance_uuid, - 'mount': mountpoint, 'host': host_name_sanitized}, - resource=volume) - self.driver.attach_volume(context, - volume, - instance_uuid, - host_name_sanitized, - mountpoint) - except Exception as excep: - with excutils.save_and_reraise_exception(): - self.message_api.create( - context, - message_field.Action.ATTACH_VOLUME, - resource_uuid=volume_id, - exception=excep) - attachment.attach_status = ( - fields.VolumeAttachStatus.ERROR_ATTACHING) - attachment.save() - - volume = attachment.finish_attach( - instance_uuid, - host_name_sanitized, - mountpoint, - mode) - - self._notify_about_volume_usage(context, volume, "attach.end") - LOG.info("Attach volume completed successfully.", - resource=volume) - return attachment - - @coordination.synchronized('{volume_id}-{f_name}') - def detach_volume(self, context, volume_id, attachment_id=None, - volume=None): - """Updates db to show volume is detached.""" - # TODO(vish): refactor this into a more general "unreserve" - # FIXME(lixiaoy1): Remove this in v4.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - - if attachment_id: - try: - attachment = objects.VolumeAttachment.get_by_id(context, - attachment_id) - except exception.VolumeAttachmentNotFound: - LOG.info("Volume detach called, but volume not attached.", - resource=volume) - # We need to make sure the volume status is set to the correct - # status. It could be in detaching status now, and we don't - # want to leave it there. - volume.finish_detach(attachment_id) - return - else: - # We can try and degrade gracefully here by trying to detach - # a volume without the attachment_id here if the volume only has - # one attachment. This is for backwards compatibility. - attachments = volume.volume_attachment - if len(attachments) > 1: - # There are more than 1 attachments for this volume - # we have to have an attachment id. - msg = _("Detach volume failed: More than one attachment, " - "but no attachment_id provided.") - LOG.error(msg, resource=volume) - raise exception.InvalidVolume(reason=msg) - elif len(attachments) == 1: - attachment = attachments[0] - else: - # there aren't any attachments for this volume. - # so set the status to available and move on. - LOG.info("Volume detach called, but volume not attached.", - resource=volume) - volume.status = 'available' - volume.attach_status = fields.VolumeAttachStatus.DETACHED - volume.save() - return - - self._notify_about_volume_usage(context, volume, "detach.start") - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - LOG.info('Detaching volume %(volume_id)s from instance ' - '%(instance)s.', - {'volume_id': volume_id, - 'instance': attachment.get('instance_uuid')}, - resource=volume) - self.driver.detach_volume(context, volume, attachment) - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_attachment_update( - context, attachment.get('id'), { - 'attach_status': - fields.VolumeAttachStatus.ERROR_DETACHING}) - - # NOTE(jdg): We used to do an ensure export here to - # catch upgrades while volumes were attached (E->F) - # this was necessary to convert in-use volumes from - # int ID's to UUID's. Don't need this any longer - - # We're going to remove the export here - # (delete the iscsi target) - try: - utils.require_driver_initialized(self.driver) - self.driver.remove_export(context.elevated(), volume) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - LOG.exception("Detach volume failed, due to " - "uninitialized driver.", - resource=volume) - except Exception as ex: - LOG.exception("Detach volume failed, due to " - "remove-export failure.", - resource=volume) - raise exception.RemoveExportException(volume=volume_id, - reason=six.text_type(ex)) - - volume.finish_detach(attachment.id) - self._notify_about_volume_usage(context, volume, "detach.end") - LOG.info("Detach volume completed successfully.", resource=volume) - - def _create_image_cache_volume_entry(self, ctx, volume_ref, - image_id, image_meta): - """Create a new image-volume and cache entry for it. - - This assumes that the image has already been downloaded and stored - in the volume described by the volume_ref. - """ - image_volume = None - try: - if not self.image_volume_cache.ensure_space(ctx, volume_ref): - LOG.warning('Unable to ensure space for image-volume in' - ' cache. Will skip creating entry for image' - ' %(image)s on %(service)s.', - {'image': image_id, - 'service': volume_ref.service_topic_queue}) - return - - image_volume = self._clone_image_volume(ctx, - volume_ref, - image_meta) - if not image_volume: - LOG.warning('Unable to clone image_volume for image ' - '%(image_id)s will not create cache entry.', - {'image_id': image_id}) - return - - self.image_volume_cache.create_cache_entry( - ctx, - image_volume, - image_id, - image_meta - ) - except exception.CinderException as e: - LOG.warning('Failed to create new image-volume cache entry.' - ' Error: %(exception)s', {'exception': e}) - if image_volume: - self.delete_volume(ctx, image_volume) - - def _clone_image_volume(self, ctx, volume, image_meta): - volume_type_id = volume.get('volume_type_id') - reserve_opts = {'volumes': 1, 'gigabytes': volume.size} - QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id) - reservations = QUOTAS.reserve(ctx, **reserve_opts) - try: - new_vol_values = {k: volume[k] for k in set(volume.keys()) - - self._VOLUME_CLONE_SKIP_PROPERTIES} - new_vol_values['volume_type_id'] = volume_type_id - new_vol_values['attach_status'] = ( - fields.VolumeAttachStatus.DETACHED) - new_vol_values['status'] = 'creating' - new_vol_values['project_id'] = ctx.project_id - new_vol_values['display_name'] = 'image-%s' % image_meta['id'] - new_vol_values['source_volid'] = volume.id - - LOG.debug('Creating image volume entry: %s.', new_vol_values) - image_volume = objects.Volume(context=ctx, **new_vol_values) - image_volume.create() - except Exception as ex: - LOG.exception('Create clone_image_volume: %(volume_id)s' - 'for image %(image_id)s, ' - 'failed (Exception: %(except)s)', - {'volume_id': volume.id, - 'image_id': image_meta['id'], - 'except': ex}) - QUOTAS.rollback(ctx, reservations) - return - - QUOTAS.commit(ctx, reservations, - project_id=new_vol_values['project_id']) - - try: - self.create_volume(ctx, image_volume, allow_reschedule=False) - image_volume = objects.Volume.get_by_id(ctx, image_volume.id) - if image_volume.status != 'available': - raise exception.InvalidVolume(_('Volume is not available.')) - - self.db.volume_admin_metadata_update(ctx.elevated(), - image_volume.id, - {'readonly': 'True'}, - False) - return image_volume - except exception.CinderException: - LOG.exception('Failed to clone volume %(volume_id)s for ' - 'image %(image_id)s.', - {'volume_id': volume.id, - 'image_id': image_meta['id']}) - try: - self.delete_volume(ctx, image_volume) - except exception.CinderException: - LOG.exception('Could not delete the image volume %(id)s.', - {'id': volume.id}) - return - - def _clone_image_volume_and_add_location(self, ctx, volume, image_service, - image_meta): - """Create a cloned volume and register its location to the image.""" - if (image_meta['disk_format'] != 'raw' or - image_meta['container_format'] != 'bare'): - return False - - image_volume_context = ctx - if self.driver.configuration.image_upload_use_internal_tenant: - internal_ctx = context.get_internal_tenant_context() - if internal_ctx: - image_volume_context = internal_ctx - - image_volume = self._clone_image_volume(image_volume_context, - volume, - image_meta) - if not image_volume: - return False - - # The image_owner metadata should be set before uri is added to - # the image so glance cinder store can check its owner. - image_volume_meta = {'image_owner': ctx.project_id} - self.db.volume_metadata_update(image_volume_context, - image_volume.id, - image_volume_meta, - False) - - uri = 'cinder://%s' % image_volume.id - image_registered = None - try: - image_registered = image_service.add_location( - ctx, image_meta['id'], uri, {}) - except (exception.NotAuthorized, exception.Invalid, - exception.NotFound): - LOG.exception('Failed to register image volume location ' - '%(uri)s.', {'uri': uri}) - - if not image_registered: - LOG.warning('Registration of image volume URI %(uri)s ' - 'to image %(image_id)s failed.', - {'uri': uri, 'image_id': image_meta['id']}) - try: - self.delete_volume(image_volume_context, image_volume) - except exception.CinderException: - LOG.exception('Could not delete failed image volume ' - '%(id)s.', {'id': image_volume.id}) - return False - - image_volume_meta['glance_image_id'] = image_meta['id'] - self.db.volume_metadata_update(image_volume_context, - image_volume.id, - image_volume_meta, - False) - return True - - def copy_volume_to_image(self, context, volume_id, image_meta): - """Uploads the specified volume to Glance. - - image_meta is a dictionary containing the following keys: - 'id', 'container_format', 'disk_format' - - """ - payload = {'volume_id': volume_id, 'image_id': image_meta['id']} - image_service = None - try: - volume = objects.Volume.get_by_id(context, volume_id) - - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - image_service, image_id = \ - glance.get_remote_image_service(context, image_meta['id']) - if (self.driver.configuration.image_upload_use_cinder_backend - and self._clone_image_volume_and_add_location( - context, volume, image_service, image_meta)): - LOG.debug("Registered image volume location to glance " - "image-id: %(image_id)s.", - {'image_id': image_meta['id']}, - resource=volume) - else: - self.driver.copy_volume_to_image(context, volume, - image_service, image_meta) - LOG.debug("Uploaded volume to glance image-id: %(image_id)s.", - {'image_id': image_meta['id']}, - resource=volume) - except Exception as error: - LOG.error("Upload volume to image encountered an error " - "(image-id: %(image_id)s).", - {'image_id': image_meta['id']}, - resource=volume) - self.message_api.create( - context, - message_field.Action.COPY_VOLUME_TO_IMAGE, - resource_uuid=volume_id, - exception=error, - detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME) - if image_service is not None: - # Deletes the image if it is in queued or saving state - self._delete_image(context, image_meta['id'], image_service) - with excutils.save_and_reraise_exception(): - payload['message'] = six.text_type(error) - finally: - self.db.volume_update_status_based_on_attachment(context, - volume_id) - LOG.info("Copy volume to image completed successfully.", - resource=volume) - - def _delete_image(self, context, image_id, image_service): - """Deletes an image stuck in queued or saving state.""" - try: - image_meta = image_service.show(context, image_id) - image_status = image_meta.get('status') - if image_status == 'queued' or image_status == 'saving': - LOG.warning("Deleting image in unexpected status: " - "%(image_status)s.", - {'image_status': image_status}, - resource={'type': 'image', 'id': image_id}) - image_service.delete(context, image_id) - except Exception: - LOG.warning("Image delete encountered an error.", - exc_info=True, resource={'type': 'image', - 'id': image_id}) - - def _parse_connection_options(self, context, volume, conn_info): - # Add qos_specs to connection info - typeid = volume.volume_type_id - specs = None - if typeid: - res = volume_types.get_volume_type_qos_specs(typeid) - qos = res['qos_specs'] - # only pass qos_specs that is designated to be consumed by - # front-end, or both front-end and back-end. - if qos and qos.get('consumer') in ['front-end', 'both']: - specs = qos.get('specs') - - if specs is not None: - # Compute fixed IOPS values for per-GB keys - if 'write_iops_sec_per_gb' in specs: - specs['write_iops_sec'] = ( - int(specs['write_iops_sec_per_gb']) * int(volume.size)) - specs.pop('write_iops_sec_per_gb') - - if 'read_iops_sec_per_gb' in specs: - specs['read_iops_sec'] = ( - int(specs['read_iops_sec_per_gb']) * int(volume.size)) - specs.pop('read_iops_sec_per_gb') - - if 'total_iops_sec_per_gb' in specs: - specs['total_iops_sec'] = ( - int(specs['total_iops_sec_per_gb']) * int(volume.size)) - specs.pop('total_iops_sec_per_gb') - - qos_spec = dict(qos_specs=specs) - conn_info['data'].update(qos_spec) - - # Add access_mode to connection info - volume_metadata = volume.admin_metadata - access_mode = volume_metadata.get('attached_mode') - if access_mode is None: - # NOTE(zhiyan): client didn't call 'os-attach' before - access_mode = ('ro' - if volume_metadata.get('readonly') == 'True' - else 'rw') - conn_info['data']['access_mode'] = access_mode - - # Add encrypted flag to connection_info if not set in the driver. - if conn_info['data'].get('encrypted') is None: - encrypted = bool(volume.encryption_key_id) - conn_info['data']['encrypted'] = encrypted - - # Add discard flag to connection_info if not set in the driver and - # configured to be reported. - if conn_info['data'].get('discard') is None: - discard_supported = (self.driver.configuration - .safe_get('report_discard_supported')) - if discard_supported: - conn_info['data']['discard'] = True - - return conn_info - - def initialize_connection(self, context, volume, connector): - """Prepare volume for connection from host represented by connector. - - This method calls the driver initialize_connection and returns - it to the caller. The connector parameter is a dictionary with - information about the host that will connect to the volume in the - following format:: - - { - 'ip': ip, - 'initiator': initiator, - } - - ip: the ip address of the connecting machine - - initiator: the iscsi initiator name of the connecting machine. - This can be None if the connecting machine does not support iscsi - connections. - - driver is responsible for doing any necessary security setup and - returning a connection_info dictionary in the following format:: - - { - 'driver_volume_type': driver_volume_type, - 'data': data, - } - - driver_volume_type: a string to identify the type of volume. This - can be used by the calling code to determine the - strategy for connecting to the volume. This could - be 'iscsi', 'rbd', 'sheepdog', etc. - - data: this is the data that the calling code will use to connect - to the volume. Keep in mind that this will be serialized to - json in various places, so it should not contain any non-json - data types. - """ - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - - # TODO(jdg): Add deprecation warning - utils.require_driver_initialized(self.driver) - try: - self.driver.validate_connector(connector) - except exception.InvalidConnectorException as err: - raise exception.InvalidInput(reason=six.text_type(err)) - except Exception as err: - err_msg = (_("Validate volume connection failed " - "(error: %(err)s).") % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=volume) - raise exception.VolumeBackendAPIException(data=err_msg) - - try: - model_update = self.driver.create_export(context.elevated(), - volume, connector) - except exception.CinderException as ex: - msg = _("Create export of volume failed (%s)") % ex.msg - LOG.exception(msg, resource=volume) - raise exception.VolumeBackendAPIException(data=msg) - - try: - if model_update: - volume.update(model_update) - volume.save() - except Exception as ex: - LOG.exception("Model update failed.", resource=volume) - try: - self.driver.remove_export(context.elevated(), volume) - except Exception: - LOG.exception('Could not remove export after DB model failed.') - raise exception.ExportFailure(reason=six.text_type(ex)) - - try: - conn_info = self.driver.initialize_connection(volume, connector) - except Exception as err: - err_msg = (_("Driver initialize connection failed " - "(error: %(err)s).") % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=volume) - - self.driver.remove_export(context.elevated(), volume) - - raise exception.VolumeBackendAPIException(data=err_msg) - - conn_info = self._parse_connection_options(context, volume, conn_info) - LOG.info("Initialize volume connection completed successfully.", - resource=volume) - return conn_info - - def initialize_connection_snapshot(self, ctxt, snapshot_id, connector): - utils.require_driver_initialized(self.driver) - snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) - try: - self.driver.validate_connector(connector) - except exception.InvalidConnectorException as err: - raise exception.InvalidInput(reason=six.text_type(err)) - except Exception as err: - err_msg = (_("Validate snapshot connection failed " - "(error: %(err)s).") % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=snapshot) - raise exception.VolumeBackendAPIException(data=err_msg) - - model_update = None - try: - LOG.debug("Snapshot %s: creating export.", snapshot.id) - model_update = self.driver.create_export_snapshot( - ctxt.elevated(), snapshot, connector) - if model_update: - snapshot.provider_location = model_update.get( - 'provider_location', None) - snapshot.provider_auth = model_update.get( - 'provider_auth', None) - snapshot.save() - except exception.CinderException as ex: - msg = _("Create export of snapshot failed (%s)") % ex.msg - LOG.exception(msg, resource=snapshot) - raise exception.VolumeBackendAPIException(data=msg) - - try: - if model_update: - snapshot.update(model_update) - snapshot.save() - except exception.CinderException as ex: - LOG.exception("Model update failed.", resource=snapshot) - raise exception.ExportFailure(reason=six.text_type(ex)) - - try: - conn = self.driver.initialize_connection_snapshot(snapshot, - connector) - except Exception as err: - try: - err_msg = (_('Unable to fetch connection information from ' - 'backend: %(err)s') % - {'err': six.text_type(err)}) - LOG.error(err_msg) - LOG.debug("Cleaning up failed connect initialization.") - self.driver.remove_export_snapshot(ctxt.elevated(), snapshot) - except Exception as ex: - ex_msg = (_('Error encountered during cleanup ' - 'of a failed attach: %(ex)s') % - {'ex': six.text_type(ex)}) - LOG.error(ex_msg) - raise exception.VolumeBackendAPIException(data=ex_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - LOG.info("Initialize snapshot connection completed successfully.", - resource=snapshot) - return conn - - def terminate_connection(self, context, volume_id, connector, force=False): - """Cleanup connection from host represented by connector. - - The format of connector is the same as for initialize_connection. - """ - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - volume_ref = self.db.volume_get(context, volume_id) - try: - self.driver.terminate_connection(volume_ref, connector, - force=force) - except Exception as err: - err_msg = (_('Terminate volume connection failed: %(err)s') - % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=volume_ref) - raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info("Terminate volume connection completed successfully.", - resource=volume_ref) - - def terminate_connection_snapshot(self, ctxt, snapshot_id, - connector, force=False): - utils.require_driver_initialized(self.driver) - - snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) - try: - self.driver.terminate_connection_snapshot(snapshot, connector, - force=force) - except Exception as err: - err_msg = (_('Terminate snapshot connection failed: %(err)s') - % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=snapshot) - raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info("Terminate snapshot connection completed successfully.", - resource=snapshot) - - def remove_export(self, context, volume_id): - """Removes an export for a volume.""" - utils.require_driver_initialized(self.driver) - volume_ref = self.db.volume_get(context, volume_id) - try: - self.driver.remove_export(context, volume_ref) - except Exception: - msg = _("Remove volume export failed.") - LOG.exception(msg, resource=volume_ref) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info("Remove volume export completed successfully.", - resource=volume_ref) - - def remove_export_snapshot(self, ctxt, snapshot_id): - """Removes an export for a snapshot.""" - utils.require_driver_initialized(self.driver) - snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) - try: - self.driver.remove_export_snapshot(ctxt, snapshot) - except Exception: - msg = _("Remove snapshot export failed.") - LOG.exception(msg, resource=snapshot) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.info("Remove snapshot export completed successfully.", - resource=snapshot) - - def accept_transfer(self, context, volume_id, new_user, new_project): - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - - # NOTE(jdg): need elevated context as we haven't "given" the vol - # yet - volume_ref = self.db.volume_get(context.elevated(), volume_id) - - # NOTE(jdg): Some drivers tie provider info (CHAP) to tenant - # for those that do allow them to return updated model info - model_update = self.driver.accept_transfer(context, - volume_ref, - new_user, - new_project) - - if model_update: - try: - self.db.volume_update(context.elevated(), - volume_id, - model_update) - except exception.CinderException: - with excutils.save_and_reraise_exception(): - LOG.exception("Update volume model for " - "transfer operation failed.", - resource=volume_ref) - self.db.volume_update(context.elevated(), - volume_id, - {'status': 'error'}) - - LOG.info("Transfer volume completed successfully.", - resource=volume_ref) - return model_update - - def _connect_device(self, conn): - use_multipath = self.configuration.use_multipath_for_image_xfer - device_scan_attempts = self.configuration.num_volume_device_scan_tries - protocol = conn['driver_volume_type'] - connector = utils.brick_get_connector( - protocol, - use_multipath=use_multipath, - device_scan_attempts=device_scan_attempts, - conn=conn) - vol_handle = connector.connect_volume(conn['data']) - - root_access = True - - if not connector.check_valid_device(vol_handle['path'], root_access): - if isinstance(vol_handle['path'], six.string_types): - raise exception.DeviceUnavailable( - path=vol_handle['path'], - reason=(_("Unable to access the backend storage via the " - "path %(path)s.") % - {'path': vol_handle['path']})) - else: - raise exception.DeviceUnavailable( - path=None, - reason=(_("Unable to access the backend storage via file " - "handle."))) - - return {'conn': conn, 'device': vol_handle, 'connector': connector} - - def _attach_volume(self, ctxt, volume, properties, remote=False, - attach_encryptor=False): - status = volume['status'] - - if remote: - rpcapi = volume_rpcapi.VolumeAPI() - try: - conn = rpcapi.initialize_connection(ctxt, volume, properties) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to attach volume %(vol)s.", - {'vol': volume['id']}) - self.db.volume_update(ctxt, volume['id'], - {'status': status}) - else: - conn = self.initialize_connection(ctxt, volume, properties) - - attach_info = self._connect_device(conn) - try: - if attach_encryptor and ( - volume_types.is_encrypted(ctxt, - volume.volume_type_id)): - encryption = self.db.volume_encryption_metadata_get( - ctxt.elevated(), volume.id) - if encryption: - utils.brick_attach_volume_encryptor(ctxt, - attach_info, - encryption) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to attach volume encryptor" - " %(vol)s.", {'vol': volume['id']}) - self._detach_volume(ctxt, attach_info, volume, properties) - return attach_info - - def _detach_volume(self, ctxt, attach_info, volume, properties, - force=False, remote=False, - attach_encryptor=False): - connector = attach_info['connector'] - if attach_encryptor and ( - volume_types.is_encrypted(ctxt, - volume.volume_type_id)): - encryption = self.db.volume_encryption_metadata_get( - ctxt.elevated(), volume.id) - if encryption: - utils.brick_detach_volume_encryptor(attach_info, encryption) - connector.disconnect_volume(attach_info['conn']['data'], - attach_info['device']) - - if remote: - rpcapi = volume_rpcapi.VolumeAPI() - rpcapi.terminate_connection(ctxt, volume, properties, force=force) - rpcapi.remove_export(ctxt, volume) - else: - try: - self.terminate_connection(ctxt, volume['id'], properties, - force=force) - self.remove_export(ctxt, volume['id']) - except Exception as err: - with excutils.save_and_reraise_exception(): - LOG.error('Unable to terminate volume connection: ' - '%(err)s.', {'err': err}) - - def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None): - """Copy data from src_vol to dest_vol.""" - - LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.', - {'src': src_vol['name'], 'dest': dest_vol['name']}) - attach_encryptor = False - # If the encryption method or key is changed, we have to - # copy data through dm-crypt. - if volume_types.volume_types_encryption_changed( - ctxt, - src_vol.volume_type_id, - dest_vol.volume_type_id): - attach_encryptor = True - properties = utils.brick_get_connector_properties() - - dest_remote = remote in ['dest', 'both'] - dest_attach_info = self._attach_volume( - ctxt, dest_vol, properties, - remote=dest_remote, - attach_encryptor=attach_encryptor) - - try: - src_remote = remote in ['src', 'both'] - src_attach_info = self._attach_volume( - ctxt, src_vol, properties, - remote=src_remote, - attach_encryptor=attach_encryptor) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to attach source volume for copy.") - self._detach_volume(ctxt, dest_attach_info, dest_vol, - properties, remote=dest_remote, - attach_encryptor=attach_encryptor) - - # Check the backend capabilities of migration destination host. - rpcapi = volume_rpcapi.VolumeAPI() - capabilities = rpcapi.get_capabilities(ctxt, - dest_vol.service_topic_queue, - False) - sparse_copy_volume = bool(capabilities and - capabilities.get('sparse_copy_volume', - False)) - - copy_error = True - try: - size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB - vol_utils.copy_volume(src_attach_info['device']['path'], - dest_attach_info['device']['path'], - size_in_mb, - self.configuration.volume_dd_blocksize, - sparse=sparse_copy_volume) - copy_error = False - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to copy volume %(src)s to %(dest)s.", - {'src': src_vol['id'], 'dest': dest_vol['id']}) - finally: - try: - self._detach_volume(ctxt, dest_attach_info, dest_vol, - properties, force=copy_error, - remote=dest_remote, - attach_encryptor=attach_encryptor) - finally: - self._detach_volume(ctxt, src_attach_info, src_vol, - properties, force=copy_error, - remote=src_remote, - attach_encryptor=attach_encryptor) - - def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id): - rpcapi = volume_rpcapi.VolumeAPI() - - # Create new volume on remote host - tmp_skip = {'snapshot_id', 'source_volid'} - skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host', - 'cluster_name'} - new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip} - if new_type_id: - new_vol_values['volume_type_id'] = new_type_id - if volume_types.volume_types_encryption_changed( - ctxt, volume.volume_type_id, new_type_id): - encryption_key_id = vol_utils.create_encryption_key( - ctxt, self.key_manager, new_type_id) - new_vol_values['encryption_key_id'] = encryption_key_id - - new_volume = objects.Volume( - context=ctxt, - host=backend['host'], - cluster_name=backend.get('cluster_name'), - status='creating', - attach_status=fields.VolumeAttachStatus.DETACHED, - migration_status='target:%s' % volume['id'], - **new_vol_values - ) - new_volume.create() - rpcapi.create_volume(ctxt, new_volume, None, None, - allow_reschedule=False) - - # Wait for new_volume to become ready - starttime = time.time() - deadline = starttime + CONF.migration_create_volume_timeout_secs - # TODO(thangp): Replace get_by_id with refresh when it is available - new_volume = objects.Volume.get_by_id(ctxt, new_volume.id) - tries = 0 - while new_volume.status != 'available': - tries += 1 - now = time.time() - if new_volume.status == 'error': - msg = _("failed to create new_volume on destination") - self._clean_temporary_volume(ctxt, volume, - new_volume, - clean_db_only=True) - raise exception.VolumeMigrationFailed(reason=msg) - elif now > deadline: - msg = _("timeout creating new_volume on destination") - self._clean_temporary_volume(ctxt, volume, - new_volume, - clean_db_only=True) - raise exception.VolumeMigrationFailed(reason=msg) - else: - time.sleep(tries ** 2) - # TODO(thangp): Replace get_by_id with refresh when it is - # available - new_volume = objects.Volume.get_by_id(ctxt, new_volume.id) - - # Set skipped value to avoid calling - # function except for _create_raw_volume - tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)} - if tmp_skipped_values: - new_volume.update(tmp_skipped_values) - new_volume.save() - - # Copy the source volume to the destination volume - try: - attachments = volume.volume_attachment - if not attachments: - # Pre- and post-copy driver-specific actions - self.driver.before_volume_copy(ctxt, volume, new_volume, - remote='dest') - self._copy_volume_data(ctxt, volume, new_volume, remote='dest') - self.driver.after_volume_copy(ctxt, volume, new_volume, - remote='dest') - - # The above call is synchronous so we complete the migration - self.migrate_volume_completion(ctxt, volume, new_volume, - error=False) - else: - nova_api = compute.API() - # This is an async call to Nova, which will call the completion - # when it's done - for attachment in attachments: - instance_uuid = attachment['instance_uuid'] - nova_api.update_server_volume(ctxt, instance_uuid, - volume.id, - new_volume.id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception( - "Failed to copy volume %(vol1)s to %(vol2)s", { - 'vol1': volume.id, 'vol2': new_volume.id}) - self._clean_temporary_volume(ctxt, volume, - new_volume) - - def _clean_temporary_volume(self, ctxt, volume, new_volume, - clean_db_only=False): - # If we're in the migrating phase, we need to cleanup - # destination volume because source volume is remaining - if volume.migration_status == 'migrating': - try: - if clean_db_only: - # The temporary volume is not created, only DB data - # is created - new_volume.destroy() - else: - # The temporary volume is already created - rpcapi = volume_rpcapi.VolumeAPI() - rpcapi.delete_volume(ctxt, new_volume) - except exception.VolumeNotFound: - LOG.info("Couldn't find the temporary volume " - "%(vol)s in the database. There is no need " - "to clean up this volume.", - {'vol': new_volume.id}) - else: - # If we're in the completing phase don't delete the - # destination because we may have already deleted the - # source! But the migration_status in database should - # be cleared to handle volume after migration failure - try: - new_volume.migration_status = None - new_volume.save() - except exception.VolumeNotFound: - LOG.info("Couldn't find destination volume " - "%(vol)s in the database. The entry might be " - "successfully deleted during migration " - "completion phase.", - {'vol': new_volume.id}) - - LOG.warning("Failed to migrate volume. The destination " - "volume %(vol)s is not deleted since the " - "source volume may have been deleted.", - {'vol': new_volume.id}) - - def migrate_volume_completion(self, ctxt, volume, new_volume, error=False): - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the migration status updated. - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - volume.migration_status = 'error' - volume.save() - - # NOTE(jdg): Things get a little hairy in here and we do a lot of - # things based on volume previous-status and current-status. At some - # point this should all be reworked but for now we need to maintain - # backward compatability and NOT change the API so we're going to try - # and make this work best we can - - LOG.debug("migrate_volume_completion: completing migration for " - "volume %(vol1)s (temporary volume %(vol2)s", - {'vol1': volume.id, 'vol2': new_volume.id}) - rpcapi = volume_rpcapi.VolumeAPI() - - orig_volume_status = volume.previous_status - - if error: - LOG.info("migrate_volume_completion is cleaning up an error " - "for volume %(vol1)s (temporary volume %(vol2)s", - {'vol1': volume['id'], 'vol2': new_volume.id}) - rpcapi.delete_volume(ctxt, new_volume) - updates = {'migration_status': 'error', - 'status': orig_volume_status} - volume.update(updates) - volume.save() - return volume.id - - volume.migration_status = 'completing' - volume.save() - - volume_attachments = [] - - # NOTE(jdg): With new attach flow, we deleted the attachment, so the - # original volume should now be listed as available, we still need to - # do the magic swappy thing of name.id etc but we're done with the - # original attachment record - - # In the "old flow" at this point the orig_volume_status will be in-use - # and the current status will be retyping. This is sort of a - # misleading deal, because Nova has already called terminate - # connection - - # New Attach Flow, Nova has gone ahead and deleted the attachemnt, this - # is the source/original volume, we've already migrated the data, we're - # basically done with it at this point. We don't need to issue the - # detach to toggle the status - if orig_volume_status == 'in-use' and volume.status != 'available': - for attachment in volume.volume_attachment: - # Save the attachments the volume currently have - volume_attachments.append(attachment) - try: - self.detach_volume(ctxt, volume.id, attachment.id) - except Exception as ex: - LOG.error("Detach migration source volume " - "%(volume.id)s from attachment " - "%(attachment.id)s failed: %(err)s", - {'err': ex, - 'volume.id': volume.id, - 'attachment.id': attachment.id}, - resource=volume) - - # Give driver (new_volume) a chance to update things as needed - # after a successful migration. - # Note this needs to go through rpc to the host of the new volume - # the current host and driver object is for the "existing" volume. - rpcapi.update_migrated_volume(ctxt, volume, new_volume, - orig_volume_status) - volume.refresh() - new_volume.refresh() - - # Swap src and dest DB records so we can continue using the src id and - # asynchronously delete the destination id - updated_new = volume.finish_volume_migration(new_volume) - updates = {'status': orig_volume_status, - 'previous_status': volume.status, - 'migration_status': 'success'} - - # NOTE(jdg): With new attachment API's nova will delete the - # attachment for the source volume for us before calling the - # migration-completion, now we just need to do the swapping on the - # volume record, but don't jack with the attachments other than - # updating volume_id - - # In the old flow at this point the volumes are in attaching and - # deleting status (dest/new is deleting, but we've done our magic - # swappy thing so it's a bit confusing, but it does unwind properly - # when you step through it) - - # In the new flow we simlified this and we don't need it, instead of - # doing a bunch of swapping we just do attachment-create/delete on the - # nova side, and then here we just do the ID swaps that are necessary - # to maintain the old beahvior - - # Restore the attachments for old flow use-case - if orig_volume_status == 'in-use' and volume.status in ['available', - 'reserved', - 'attaching']: - for attachment in volume_attachments: - LOG.debug('Re-attaching: %s', attachment) - # This is just a db state toggle, the volume is actually - # already attach and in-use, new attachment flow won't allow - # this - rpcapi.attach_volume(ctxt, volume, - attachment.instance_uuid, - attachment.attached_host, - attachment.mountpoint, - 'rw') - # At this point we now have done almost all of our swapping and - # state-changes. The target volume is now marked back to - # "in-use" the destination/worker volume is now in deleting - # state and the next steps will finish the deletion steps - volume.update(updates) - volume.save() - - # Asynchronous deletion of the source volume in the back-end (now - # pointed by the target volume id) - try: - rpcapi.delete_volume(ctxt, updated_new) - except Exception as ex: - LOG.error('Failed to request async delete of migration source ' - 'vol %(vol)s: %(err)s', - {'vol': volume.id, 'err': ex}) - - # For the new flow this is realy the key part. We just use the - # attachments to the worker/destination volumes that we created and - # used for the libvirt migration and we'll just swap their volume_id - # entries to coorespond with the volume.id swap we did - for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id): - attachment.volume_id = volume.id - attachment.save() - - # Phewww.. that was easy! Once we get to a point where the old attach - # flow can go away we really should rewrite all of this. - LOG.info("Complete-Migrate volume completed successfully.", - resource=volume) - return volume.id - - def migrate_volume(self, ctxt, volume, host, force_host_copy=False, - new_type_id=None): - """Migrate the volume to the specified host (called on source host).""" - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the migration status updated. - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - volume.migration_status = 'error' - volume.save() - - model_update = None - moved = False - - status_update = None - if volume.status in ('retyping', 'maintenance'): - status_update = {'status': volume.previous_status} - - volume.migration_status = 'migrating' - volume.save() - if not force_host_copy and new_type_id is None: - try: - LOG.debug("Issue driver.migrate_volume.", resource=volume) - moved, model_update = self.driver.migrate_volume(ctxt, - volume, - host) - if moved: - updates = {'host': host['host'], - 'cluster_name': host.get('cluster_name'), - 'migration_status': 'success', - 'previous_status': volume.status} - if status_update: - updates.update(status_update) - if model_update: - updates.update(model_update) - volume.update(updates) - volume.save() - except Exception: - with excutils.save_and_reraise_exception(): - updates = {'migration_status': 'error'} - if status_update: - updates.update(status_update) - volume.update(updates) - volume.save() - if not moved: - try: - self._migrate_volume_generic(ctxt, volume, host, new_type_id) - except Exception: - with excutils.save_and_reraise_exception(): - updates = {'migration_status': 'error'} - if status_update: - updates.update(status_update) - volume.update(updates) - volume.save() - LOG.info("Migrate volume completed successfully.", - resource=volume) - - @periodic_task.periodic_task - def _report_driver_status(self, context): - if not self.driver.initialized: - if self.driver.configuration.config_group is None: - config_group = '' - else: - config_group = ('(config name %s)' % - self.driver.configuration.config_group) - - LOG.warning("Update driver status failed: %(config_group)s " - "is uninitialized.", - {'config_group': config_group}, - resource={'type': 'driver', - 'id': self.driver.__class__.__name__}) - else: - volume_stats = self.driver.get_volume_stats(refresh=True) - if self.extra_capabilities: - volume_stats.update(self.extra_capabilities) - if volume_stats: - - # NOTE(xyang): If driver reports replication_status to be - # 'error' in volume_stats, get model updates from driver - # and update db - if volume_stats.get('replication_status') == ( - fields.ReplicationStatus.ERROR): - filters = self._get_cluster_or_host_filters() - groups = objects.GroupList.get_all_replicated( - context, filters=filters) - group_model_updates, volume_model_updates = ( - self.driver.get_replication_error_status(context, - groups)) - for grp_update in group_model_updates: - try: - grp_obj = objects.Group.get_by_id( - context, grp_update['group_id']) - grp_obj.update(grp_update) - grp_obj.save() - except exception.GroupNotFound: - # Group may be deleted already. Log a warning - # and continue. - LOG.warning("Group %(grp)s not found while " - "updating driver status.", - {'grp': grp_update['group_id']}, - resource={ - 'type': 'group', - 'id': grp_update['group_id']}) - for vol_update in volume_model_updates: - try: - vol_obj = objects.Volume.get_by_id( - context, vol_update['volume_id']) - vol_obj.update(vol_update) - vol_obj.save() - except exception.VolumeNotFound: - # Volume may be deleted already. Log a warning - # and continue. - LOG.warning("Volume %(vol)s not found while " - "updating driver status.", - {'vol': vol_update['volume_id']}, - resource={ - 'type': 'volume', - 'id': vol_update['volume_id']}) - - # Append volume stats with 'allocated_capacity_gb' - self._append_volume_stats(volume_stats) - - # Append filter and goodness function if needed - volume_stats = ( - self._append_filter_goodness_functions(volume_stats)) - - # queue it to be sent to the Schedulers. - self.update_service_capabilities(volume_stats) - - def _append_volume_stats(self, vol_stats): - pools = vol_stats.get('pools', None) - if pools and isinstance(pools, list): - for pool in pools: - pool_name = pool['pool_name'] - try: - pool_stats = self.stats['pools'][pool_name] - except KeyError: - # Pool not found in volume manager - pool_stats = dict(allocated_capacity_gb=0) - - pool.update(pool_stats) - - def _append_filter_goodness_functions(self, volume_stats): - """Returns volume_stats updated as needed.""" - - # Append filter_function if needed - if 'filter_function' not in volume_stats: - volume_stats['filter_function'] = ( - self.driver.get_filter_function()) - - # Append goodness_function if needed - if 'goodness_function' not in volume_stats: - volume_stats['goodness_function'] = ( - self.driver.get_goodness_function()) - - return volume_stats - - def publish_service_capabilities(self, context): - """Collect driver status and then publish.""" - self._report_driver_status(context) - self._publish_service_capabilities(context) - - def _notify_about_volume_usage(self, - context, - volume, - event_suffix, - extra_usage_info=None): - vol_utils.notify_about_volume_usage( - context, volume, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def _notify_about_snapshot_usage(self, - context, - snapshot, - event_suffix, - extra_usage_info=None): - vol_utils.notify_about_snapshot_usage( - context, snapshot, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def _notify_about_group_usage(self, - context, - group, - event_suffix, - volumes=None, - extra_usage_info=None): - vol_utils.notify_about_group_usage( - context, group, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - if not volumes: - volumes = self.db.volume_get_all_by_generic_group( - context, group.id) - if volumes: - for volume in volumes: - vol_utils.notify_about_volume_usage( - context, volume, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def _notify_about_group_snapshot_usage(self, - context, - group_snapshot, - event_suffix, - snapshots=None, - extra_usage_info=None): - vol_utils.notify_about_group_snapshot_usage( - context, group_snapshot, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - if not snapshots: - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - if snapshots: - for snapshot in snapshots: - vol_utils.notify_about_snapshot_usage( - context, snapshot, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def extend_volume(self, context, volume, new_size, reservations): - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - volume.status = 'error_extending' - volume.save() - - project_id = volume.project_id - size_increase = (int(new_size)) - volume.size - self._notify_about_volume_usage(context, volume, "resize.start") - try: - self.driver.extend_volume(volume, new_size) - except Exception: - LOG.exception("Extend volume failed.", - resource=volume) - try: - self.db.volume_update(context, volume.id, - {'status': 'error_extending'}) - raise exception.CinderException(_("Volume %s: Error trying " - "to extend volume") % - volume.id) - finally: - QUOTAS.rollback(context, reservations, project_id=project_id) - return - - QUOTAS.commit(context, reservations, project_id=project_id) - - attachments = volume.volume_attachment - if not attachments: - orig_volume_status = 'available' - else: - orig_volume_status = 'in-use' - - volume.update({'size': int(new_size), 'status': orig_volume_status}) - volume.save() - - if orig_volume_status == 'in-use': - nova_api = compute.API() - instance_uuids = [attachment.instance_uuid - for attachment in attachments] - nova_api.extend_volume(context, instance_uuids, volume.id) - - pool = vol_utils.extract_host(volume.host, 'pool') - if pool is None: - # Legacy volume, put them into default pool - pool = self.driver.configuration.safe_get( - 'volume_backend_name') or vol_utils.extract_host( - volume.host, 'pool', True) - - try: - self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase - except KeyError: - self.stats['pools'][pool] = dict( - allocated_capacity_gb=size_increase) - - self._notify_about_volume_usage( - context, volume, "resize.end", - extra_usage_info={'size': int(new_size)}) - LOG.info("Extend volume completed successfully.", - resource=volume) - - def _is_our_backend(self, host, cluster_name): - return ((not cluster_name and - vol_utils.hosts_are_equivalent(self.driver.host, host)) or - (cluster_name and - vol_utils.hosts_are_equivalent(self.driver.cluster_name, - cluster_name))) - - def retype(self, context, volume, new_type_id, host, - migration_policy='never', reservations=None, - old_reservations=None): - - def _retype_error(context, volume, old_reservations, - new_reservations, status_update): - try: - volume.update(status_update) - volume.save() - finally: - QUOTAS.rollback(context, old_reservations) - QUOTAS.rollback(context, new_reservations) - - status_update = {'status': volume.previous_status} - if context.project_id != volume.project_id: - project_id = volume.project_id - else: - project_id = context.project_id - - try: - # NOTE(flaper87): Verify the driver is enabled - # before going forward. The exception will be caught - # and the volume status updated. - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - # NOTE(flaper87): Other exceptions in this method don't - # set the volume status to error. Should that be done - # here? Setting the volume back to it's original status - # for now. - volume.update(status_update) - volume.save() - - # If old_reservations has been passed in from the API, we should - # skip quotas. - # TODO(ntpttr): These reservation checks are left in to be backwards - # compatible with Liberty and can be removed in N. - if not old_reservations: - # Get old reservations - try: - reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume.volume_type_id) - # NOTE(wanghao): We don't need to reserve volumes and gigabytes - # quota for retyping operation since they didn't changed, just - # reserve volume_type and type gigabytes is fine. - reserve_opts.pop('volumes') - reserve_opts.pop('gigabytes') - old_reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - volume.update(status_update) - volume.save() - msg = _("Failed to update quota usage while retyping volume.") - LOG.exception(msg, resource=volume) - raise exception.CinderException(msg) - - # We already got the new reservations - new_reservations = reservations - - # If volume types have the same contents, no need to do anything. - # Use the admin contex to be able to access volume extra_specs - retyped = False - diff, all_equal = volume_types.volume_types_diff( - context.elevated(), volume.volume_type_id, new_type_id) - if all_equal: - retyped = True - - # Call driver to try and change the type - retype_model_update = None - - # NOTE(jdg): Check to see if the destination host or cluster (depending - # if it's the volume is in a clustered backend or not) is the same as - # the current. If it's not don't call the driver.retype method, - # otherwise drivers that implement retype may report success, but it's - # invalid in the case of a migrate. - - # We assume that those that support pools do this internally - # so we strip off the pools designation - - if (not retyped and - not diff.get('encryption') and - self._is_our_backend(host['host'], host.get('cluster_name'))): - try: - new_type = volume_types.get_volume_type(context.elevated(), - new_type_id) - with volume.obj_as_admin(): - ret = self.driver.retype(context, - volume, - new_type, - diff, - host) - # Check if the driver retype provided a model update or - # just a retype indication - if type(ret) == tuple: - retyped, retype_model_update = ret - else: - retyped = ret - - if retyped: - LOG.info("Volume %s: retyped successfully.", volume.id) - except Exception: - retyped = False - LOG.exception("Volume %s: driver error when trying to " - "retype, falling back to generic " - "mechanism.", volume.id) - - # We could not change the type, so we need to migrate the volume, where - # the destination volume will be of the new type - if not retyped: - if migration_policy == 'never': - _retype_error(context, volume, old_reservations, - new_reservations, status_update) - msg = _("Retype requires migration but is not allowed.") - raise exception.VolumeMigrationFailed(reason=msg) - - snaps = objects.SnapshotList.get_all_for_volume(context, - volume.id) - if snaps: - _retype_error(context, volume, old_reservations, - new_reservations, status_update) - msg = _("Volume must not have snapshots.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - # Don't allow volume with replicas to be migrated - rep_status = volume.replication_status - if rep_status is not None and rep_status != 'disabled': - _retype_error(context, volume, old_reservations, - new_reservations, status_update) - msg = _("Volume must not be replicated.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - volume.migration_status = 'starting' - volume.save() - - try: - self.migrate_volume(context, volume, host, - new_type_id=new_type_id) - except Exception: - with excutils.save_and_reraise_exception(): - _retype_error(context, volume, old_reservations, - new_reservations, status_update) - else: - model_update = {'volume_type_id': new_type_id, - 'host': host['host'], - 'cluster_name': host.get('cluster_name'), - 'status': status_update['status']} - if retype_model_update: - model_update.update(retype_model_update) - self._set_replication_status(diff, model_update) - volume.update(model_update) - volume.save() - - if old_reservations: - QUOTAS.commit(context, old_reservations, project_id=project_id) - if new_reservations: - QUOTAS.commit(context, new_reservations, project_id=project_id) - self._notify_about_volume_usage( - context, volume, "retype", - extra_usage_info={'volume_type': new_type_id}) - self.publish_service_capabilities(context) - LOG.info("Retype volume completed successfully.", - resource=volume) - - @staticmethod - def _set_replication_status(diff, model_update): - """Update replication_status in model_update if it has changed.""" - if not diff or model_update.get('replication_status'): - return - - diff_specs = diff.get('extra_specs', {}) - replication_diff = diff_specs.get('replication_enabled') - - if replication_diff: - is_replicated = vol_utils.is_replicated_str(replication_diff[1]) - if is_replicated: - replication_status = fields.ReplicationStatus.ENABLED - else: - replication_status = fields.ReplicationStatus.DISABLED - model_update['replication_status'] = replication_status - - def manage_existing(self, ctxt, volume, ref=None): - vol_ref = self._run_manage_existing_flow_engine( - ctxt, volume, ref) - - self._update_stats_for_managed(vol_ref) - - LOG.info("Manage existing volume completed successfully.", - resource=vol_ref) - return vol_ref.id - - def _update_stats_for_managed(self, volume_reference): - # Update volume stats - pool = vol_utils.extract_host(volume_reference.host, 'pool') - if pool is None: - # Legacy volume, put them into default pool - pool = self.driver.configuration.safe_get( - 'volume_backend_name') or vol_utils.extract_host( - volume_reference.host, 'pool', True) - - try: - self.stats['pools'][pool]['allocated_capacity_gb'] \ - += volume_reference.size - except KeyError: - self.stats['pools'][pool] = dict( - allocated_capacity_gb=volume_reference.size) - - def _run_manage_existing_flow_engine(self, ctxt, volume, ref): - try: - flow_engine = manage_existing.get_flow( - ctxt, - self.db, - self.driver, - self.host, - volume, - ref, - ) - except Exception: - msg = _("Failed to create manage_existing flow.") - LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) - raise exception.CinderException(msg) - - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - - # Fetch created volume from storage - vol_ref = flow_engine.storage.fetch('volume') - - return vol_ref - - def _get_cluster_or_host_filters(self): - if self.cluster: - filters = {'cluster_name': self.cluster} - else: - filters = {'host': self.host} - return filters - - def _get_my_resources(self, ctxt, ovo_class_list): - filters = self._get_cluster_or_host_filters() - return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters) - - def _get_my_volumes(self, ctxt): - return self._get_my_resources(ctxt, objects.VolumeList) - - def _get_my_snapshots(self, ctxt): - return self._get_my_resources(ctxt, objects.SnapshotList) - - def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys, - sort_dirs, want_objects=False): - try: - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - LOG.exception("Listing manageable volumes failed, due " - "to uninitialized driver.") - - cinder_volumes = self._get_my_volumes(ctxt) - try: - driver_entries = self.driver.get_manageable_volumes( - cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) - if want_objects: - driver_entries = (objects.ManageableVolumeList. - from_primitives(ctxt, driver_entries)) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception("Listing manageable volumes failed, due " - "to driver error.") - return driver_entries - - def create_group(self, context, group): - """Creates the group.""" - context = context.elevated() - - # Make sure the host in the DB matches our own when clustered - self._set_resource_host(group) - - status = fields.GroupStatus.AVAILABLE - model_update = None - - self._notify_about_group_usage(context, group, "create.start") - - try: - utils.require_driver_initialized(self.driver) - - LOG.info("Group %s: creating", group.name) - - try: - model_update = self.driver.create_group(context, group) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group.group_type_id != cgsnap_type['id']: - model_update = self._create_group_generic(context, group) - else: - cg, __ = self._convert_group_to_cg(group, []) - model_update = self.driver.create_consistencygroup( - context, cg) - - if model_update: - if (model_update['status'] == - fields.GroupStatus.ERROR): - msg = (_('Create group failed.')) - LOG.error(msg, - resource={'type': 'group', - 'id': group.id}) - raise exception.VolumeDriverException(message=msg) - else: - group.update(model_update) - group.save() - except Exception: - with excutils.save_and_reraise_exception(): - group.status = fields.GroupStatus.ERROR - group.save() - LOG.error("Group %s: create failed", - group.name) - - group.status = status - group.created_at = timeutils.utcnow() - group.save() - LOG.info("Group %s: created successfully", group.name) - - self._notify_about_group_usage(context, group, "create.end") - - LOG.info("Create group completed successfully.", - resource={'type': 'group', - 'id': group.id}) - return group - - def create_group_from_src(self, context, group, - group_snapshot=None, source_group=None): - """Creates the group from source. - - The source can be a group snapshot or a source group. - """ - source_name = None - snapshots = None - source_vols = None - try: - volumes = objects.VolumeList.get_all_by_generic_group(context, - group.id) - if group_snapshot: - try: - # Check if group_snapshot still exists - group_snapshot = objects.GroupSnapshot.get_by_id( - context, group_snapshot.id) - except exception.GroupSnapshotNotFound: - LOG.error("Create group from snapshot-%(snap)s failed: " - "SnapshotNotFound.", - {'snap': group_snapshot.id}, - resource={'type': 'group', - 'id': group.id}) - raise - - source_name = _("snapshot-%s") % group_snapshot.id - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - for snap in snapshots: - if (snap.status not in - VALID_CREATE_GROUP_SRC_SNAP_STATUS): - msg = (_("Cannot create group " - "%(group)s because snapshot %(snap)s is " - "not in a valid state. Valid states are: " - "%(valid)s.") % - {'group': group.id, - 'snap': snap['id'], - 'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS}) - raise exception.InvalidGroup(reason=msg) - - if source_group: - try: - source_group = objects.Group.get_by_id( - context, source_group.id) - except exception.GroupNotFound: - LOG.error("Create group " - "from source group-%(group)s failed: " - "GroupNotFound.", - {'group': source_group.id}, - resource={'type': 'group', - 'id': group.id}) - raise - - source_name = _("group-%s") % source_group.id - source_vols = objects.VolumeList.get_all_by_generic_group( - context, source_group.id) - for source_vol in source_vols: - if (source_vol.status not in - VALID_CREATE_GROUP_SRC_GROUP_STATUS): - msg = (_("Cannot create group " - "%(group)s because source volume " - "%(source_vol)s is not in a valid " - "state. Valid states are: " - "%(valid)s.") % - {'group': group.id, - 'source_vol': source_vol.id, - 'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS}) - raise exception.InvalidGroup(reason=msg) - - # Sort source snapshots so that they are in the same order as their - # corresponding target volumes. - sorted_snapshots = None - if group_snapshot and snapshots: - sorted_snapshots = self._sort_snapshots(volumes, snapshots) - - # Sort source volumes so that they are in the same order as their - # corresponding target volumes. - sorted_source_vols = None - if source_group and source_vols: - sorted_source_vols = self._sort_source_vols(volumes, - source_vols) - - self._notify_about_group_usage( - context, group, "create.start") - - utils.require_driver_initialized(self.driver) - - try: - model_update, volumes_model_update = ( - self.driver.create_group_from_src( - context, group, volumes, group_snapshot, - sorted_snapshots, source_group, sorted_source_vols)) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group.group_type_id != cgsnap_type['id']: - model_update, volumes_model_update = ( - self._create_group_from_src_generic( - context, group, volumes, group_snapshot, - sorted_snapshots, source_group, - sorted_source_vols)) - else: - cg, volumes = self._convert_group_to_cg( - group, volumes) - cgsnapshot, sorted_snapshots = ( - self._convert_group_snapshot_to_cgsnapshot( - group_snapshot, sorted_snapshots, context)) - source_cg, sorted_source_vols = ( - self._convert_group_to_cg(source_group, - sorted_source_vols)) - model_update, volumes_model_update = ( - self.driver.create_consistencygroup_from_src( - context, cg, volumes, cgsnapshot, - sorted_snapshots, source_cg, sorted_source_vols)) - self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots) - self._remove_consistencygroup_id_from_volumes(volumes) - self._remove_consistencygroup_id_from_volumes( - sorted_source_vols) - - if volumes_model_update: - for update in volumes_model_update: - self.db.volume_update(context, update['id'], update) - - if model_update: - group.update(model_update) - group.save() - - except Exception: - with excutils.save_and_reraise_exception(): - group.status = fields.GroupStatus.ERROR - group.save() - LOG.error("Create group " - "from source %(source)s failed.", - {'source': source_name}, - resource={'type': 'group', - 'id': group.id}) - # Update volume status to 'error' as well. - self._remove_consistencygroup_id_from_volumes(volumes) - for vol in volumes: - vol.status = 'error' - vol.save() - - now = timeutils.utcnow() - status = 'available' - for vol in volumes: - update = {'status': status, 'created_at': now} - self._update_volume_from_src(context, vol, update, group=group) - self._update_allocated_capacity(vol) - - group.status = status - group.created_at = now - group.save() - - self._notify_about_group_usage( - context, group, "create.end") - LOG.info("Create group " - "from source-%(source)s completed successfully.", - {'source': source_name}, - resource={'type': 'group', - 'id': group.id}) - return group - - def _create_group_from_src_generic(self, context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None): - """Creates a group from source. - - :param context: the context of the caller. - :param group: the Group object to be created. - :param volumes: a list of volume objects in the group. - :param group_snapshot: the GroupSnapshot object as source. - :param snapshots: a list of snapshot objects in group_snapshot. - :param source_group: the Group object as source. - :param source_vols: a list of volume objects in the source_group. - :returns: model_update, volumes_model_update - """ - model_update = {'status': 'available'} - volumes_model_update = [] - for vol in volumes: - if snapshots: - for snapshot in snapshots: - if vol.snapshot_id == snapshot.id: - vol_model_update = {'id': vol.id} - try: - driver_update = ( - self.driver.create_volume_from_snapshot( - vol, snapshot)) - if driver_update: - driver_update.pop('id', None) - vol_model_update.update(driver_update) - if 'status' not in vol_model_update: - vol_model_update['status'] = 'available' - except Exception: - vol_model_update['status'] = 'error' - model_update['status'] = 'error' - volumes_model_update.append(vol_model_update) - break - elif source_vols: - for source_vol in source_vols: - if vol.source_volid == source_vol.id: - vol_model_update = {'id': vol.id} - try: - driver_update = self.driver.create_cloned_volume( - vol, source_vol) - if driver_update: - driver_update.pop('id', None) - vol_model_update.update(driver_update) - if 'status' not in vol_model_update: - vol_model_update['status'] = 'available' - except Exception: - vol_model_update['status'] = 'error' - model_update['status'] = 'error' - volumes_model_update.append(vol_model_update) - break - - return model_update, volumes_model_update - - def _sort_snapshots(self, volumes, snapshots): - # Sort source snapshots so that they are in the same order as their - # corresponding target volumes. Each source snapshot in the snapshots - # list should have a corresponding target volume in the volumes list. - if not volumes or not snapshots or len(volumes) != len(snapshots): - msg = _("Input volumes or snapshots are invalid.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - sorted_snapshots = [] - for vol in volumes: - found_snaps = [snap for snap in snapshots - if snap['id'] == vol['snapshot_id']] - if not found_snaps: - LOG.error("Source snapshot cannot be found for target " - "volume %(volume_id)s.", - {'volume_id': vol['id']}) - raise exception.SnapshotNotFound( - snapshot_id=vol['snapshot_id']) - sorted_snapshots.extend(found_snaps) - - return sorted_snapshots - - def _sort_source_vols(self, volumes, source_vols): - # Sort source volumes so that they are in the same order as their - # corresponding target volumes. Each source volume in the source_vols - # list should have a corresponding target volume in the volumes list. - if not volumes or not source_vols or len(volumes) != len(source_vols): - msg = _("Input volumes or source volumes are invalid.") - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - sorted_source_vols = [] - for vol in volumes: - found_source_vols = [source_vol for source_vol in source_vols - if source_vol['id'] == vol['source_volid']] - if not found_source_vols: - LOG.error("Source volumes cannot be found for target " - "volume %(volume_id)s.", - {'volume_id': vol['id']}) - raise exception.VolumeNotFound( - volume_id=vol['source_volid']) - sorted_source_vols.extend(found_source_vols) - - return sorted_source_vols - - def _update_volume_from_src(self, context, vol, update, group=None): - try: - snapshot_id = vol.get('snapshot_id') - source_volid = vol.get('source_volid') - if snapshot_id: - snapshot = objects.Snapshot.get_by_id(context, snapshot_id) - orig_vref = self.db.volume_get(context, - snapshot.volume_id) - if orig_vref.bootable: - update['bootable'] = True - self.db.volume_glance_metadata_copy_to_volume( - context, vol['id'], snapshot_id) - if source_volid: - source_vol = objects.Volume.get_by_id(context, source_volid) - if source_vol.bootable: - update['bootable'] = True - self.db.volume_glance_metadata_copy_from_volume_to_volume( - context, source_volid, vol['id']) - if source_vol.multiattach: - update['multiattach'] = True - - except exception.SnapshotNotFound: - LOG.error("Source snapshot %(snapshot_id)s cannot be found.", - {'snapshot_id': vol['snapshot_id']}) - self.db.volume_update(context, vol['id'], - {'status': 'error'}) - if group: - group.status = fields.GroupStatus.ERROR - group.save() - raise - except exception.VolumeNotFound: - LOG.error("The source volume %(volume_id)s " - "cannot be found.", - {'volume_id': snapshot.volume_id}) - self.db.volume_update(context, vol['id'], - {'status': 'error'}) - if group: - group.status = fields.GroupStatus.ERROR - group.save() - raise - except exception.CinderException as ex: - LOG.error("Failed to update %(volume_id)s" - " metadata using the provided snapshot" - " %(snapshot_id)s metadata.", - {'volume_id': vol['id'], - 'snapshot_id': vol['snapshot_id']}) - self.db.volume_update(context, vol['id'], - {'status': 'error'}) - if group: - group.status = fields.GroupStatus.ERROR - group.save() - raise exception.MetadataCopyFailure(reason=six.text_type(ex)) - - self.db.volume_update(context, vol['id'], update) - - def _update_allocated_capacity(self, vol): - # Update allocated capacity in volume stats - pool = vol_utils.extract_host(vol['host'], 'pool') - if pool is None: - # Legacy volume, put them into default pool - pool = self.driver.configuration.safe_get( - 'volume_backend_name') or vol_utils.extract_host( - vol['host'], 'pool', True) - - try: - self.stats['pools'][pool]['allocated_capacity_gb'] += ( - vol['size']) - except KeyError: - self.stats['pools'][pool] = dict( - allocated_capacity_gb=vol['size']) - - def delete_group(self, context, group): - """Deletes group and the volumes in the group.""" - context = context.elevated() - project_id = group.project_id - - if context.project_id != group.project_id: - project_id = group.project_id - else: - project_id = context.project_id - - volumes = objects.VolumeList.get_all_by_generic_group( - context, group.id) - - for vol_obj in volumes: - if vol_obj.attach_status == "attached": - # Volume is still attached, need to detach first - raise exception.VolumeAttached(volume_id=vol_obj.id) - self._check_is_our_resource(vol_obj) - - self._notify_about_group_usage( - context, group, "delete.start") - - volumes_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - try: - model_update, volumes_model_update = ( - self.driver.delete_group(context, group, volumes)) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group.group_type_id != cgsnap_type['id']: - model_update, volumes_model_update = ( - self._delete_group_generic(context, group, volumes)) - else: - cg, volumes = self._convert_group_to_cg( - group, volumes) - model_update, volumes_model_update = ( - self.driver.delete_consistencygroup(context, cg, - volumes)) - self._remove_consistencygroup_id_from_volumes(volumes) - - if volumes_model_update: - for update in volumes_model_update: - # If we failed to delete a volume, make sure the - # status for the group is set to error as well - if (update['status'] in ['error_deleting', 'error'] - and model_update['status'] not in - ['error_deleting', 'error']): - model_update['status'] = update['status'] - self.db.volumes_update(context, volumes_model_update) - - if model_update: - if model_update['status'] in ['error_deleting', 'error']: - msg = (_('Delete group failed.')) - LOG.error(msg, - resource={'type': 'group', - 'id': group.id}) - raise exception.VolumeDriverException(message=msg) - else: - group.update(model_update) - group.save() - - except Exception: - with excutils.save_and_reraise_exception(): - group.status = fields.GroupStatus.ERROR - group.save() - # Update volume status to 'error' if driver returns - # None for volumes_model_update. - if not volumes_model_update: - self._remove_consistencygroup_id_from_volumes(volumes) - for vol_obj in volumes: - vol_obj.status = 'error' - vol_obj.save() - - # Get reservations for group - try: - reserve_opts = {'groups': -1} - grpreservations = GROUP_QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - grpreservations = None - LOG.exception("Delete group " - "failed to update usages.", - resource={'type': 'group', - 'id': group.id}) - - for vol in volumes: - # Get reservations for volume - try: - reserve_opts = {'volumes': -1, - 'gigabytes': -vol.size} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - vol.volume_type_id) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - reservations = None - LOG.exception("Delete group " - "failed to update usages.", - resource={'type': 'group', - 'id': group.id}) - - # Delete glance metadata if it exists - self.db.volume_glance_metadata_delete_by_volume(context, vol.id) - - vol.destroy() - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - self.stats['allocated_capacity_gb'] -= vol.size - - if grpreservations: - GROUP_QUOTAS.commit(context, grpreservations, - project_id=project_id) - - group.destroy() - self._notify_about_group_usage( - context, group, "delete.end") - self.publish_service_capabilities(context) - LOG.info("Delete group " - "completed successfully.", - resource={'type': 'group', - 'id': group.id}) - - def _convert_group_to_cg(self, group, volumes): - if not group: - return None, None - cg = consistencygroup.ConsistencyGroup() - cg.from_group(group) - for vol in volumes: - vol.consistencygroup_id = vol.group_id - vol.consistencygroup = cg - - return cg, volumes - - def _remove_consistencygroup_id_from_volumes(self, volumes): - if not volumes: - return - for vol in volumes: - vol.consistencygroup_id = None - vol.consistencygroup = None - - def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots, - ctxt): - if not group_snapshot: - return None, None - cgsnap = cgsnapshot.CGSnapshot() - cgsnap.from_group_snapshot(group_snapshot) - - # Populate consistencygroup object - grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id) - cg, __ = self._convert_group_to_cg(grp, []) - cgsnap.consistencygroup = cg - - for snap in snapshots: - snap.cgsnapshot_id = snap.group_snapshot_id - snap.cgsnapshot = cgsnap - - return cgsnap, snapshots - - def _remove_cgsnapshot_id_from_snapshots(self, snapshots): - if not snapshots: - return - for snap in snapshots: - snap.cgsnapshot_id = None - snap.cgsnapshot = None - - def _create_group_generic(self, context, group): - """Creates a group.""" - # A group entry is already created in db. Just returns a status here. - model_update = {'status': fields.GroupStatus.AVAILABLE, - 'created_at': timeutils.utcnow()} - return model_update - - def _delete_group_generic(self, context, group, volumes): - """Deletes a group and volumes in the group.""" - model_update = {'status': group.status} - volume_model_updates = [] - for volume_ref in volumes: - volume_model_update = {'id': volume_ref.id} - try: - self.driver.remove_export(context, volume_ref) - self.driver.delete_volume(volume_ref) - volume_model_update['status'] = 'deleted' - except exception.VolumeIsBusy: - volume_model_update['status'] = 'available' - except Exception: - volume_model_update['status'] = 'error' - model_update['status'] = fields.GroupStatus.ERROR - volume_model_updates.append(volume_model_update) - - return model_update, volume_model_updates - - def _update_group_generic(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates a group.""" - # NOTE(xyang): The volume manager adds/removes the volume to/from the - # group in the database. This default implementation does not do - # anything in the backend storage. - return None, None, None - - def _collect_volumes_for_group(self, context, group, volumes, add=True): - if add: - valid_status = VALID_ADD_VOL_TO_GROUP_STATUS - else: - valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS - volumes_ref = [] - if not volumes: - return volumes_ref - for add_vol in volumes.split(','): - try: - add_vol_ref = objects.Volume.get_by_id(context, add_vol) - except exception.VolumeNotFound: - LOG.error("Update group " - "failed to %(op)s volume-%(volume_id)s: " - "VolumeNotFound.", - {'volume_id': add_vol_ref.id, - 'op': 'add' if add else 'remove'}, - resource={'type': 'group', - 'id': group.id}) - raise - if add_vol_ref.status not in valid_status: - msg = (_("Can not %(op)s volume %(volume_id)s to " - "group %(group_id)s because volume is in an invalid " - "state: %(status)s. Valid states are: %(valid)s.") % - {'volume_id': add_vol_ref.id, - 'group_id': group.id, - 'status': add_vol_ref.status, - 'valid': valid_status, - 'op': 'add' if add else 'remove'}) - raise exception.InvalidVolume(reason=msg) - if add: - self._check_is_our_resource(add_vol_ref) - volumes_ref.append(add_vol_ref) - return volumes_ref - - def update_group(self, context, group, - add_volumes=None, remove_volumes=None): - """Updates group. - - Update group by adding volumes to the group, - or removing volumes from the group. - """ - - add_volumes_ref = self._collect_volumes_for_group(context, - group, - add_volumes, - add=True) - remove_volumes_ref = self._collect_volumes_for_group(context, - group, - remove_volumes, - add=False) - self._notify_about_group_usage( - context, group, "update.start") - - try: - utils.require_driver_initialized(self.driver) - - try: - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_group( - context, group, - add_volumes=add_volumes_ref, - remove_volumes=remove_volumes_ref)) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group.group_type_id != cgsnap_type['id']: - model_update, add_volumes_update, remove_volumes_update = ( - self._update_group_generic( - context, group, - add_volumes=add_volumes_ref, - remove_volumes=remove_volumes_ref)) - else: - cg, remove_volumes_ref = self._convert_group_to_cg( - group, remove_volumes_ref) - model_update, add_volumes_update, remove_volumes_update = ( - self.driver.update_consistencygroup( - context, cg, - add_volumes=add_volumes_ref, - remove_volumes=remove_volumes_ref)) - self._remove_consistencygroup_id_from_volumes( - remove_volumes_ref) - - volumes_to_update = [] - if add_volumes_update: - volumes_to_update.extend(add_volumes_update) - if remove_volumes_update: - volumes_to_update.extend(remove_volumes_update) - self.db.volumes_update(context, volumes_to_update) - - if model_update: - if model_update['status'] in ( - [fields.GroupStatus.ERROR]): - msg = (_('Error occurred when updating group ' - '%s.') % group.id) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - group.update(model_update) - group.save() - - except Exception as e: - with excutils.save_and_reraise_exception(): - if isinstance(e, exception.VolumeDriverException): - LOG.error("Error occurred in the volume driver when " - "updating group %(group_id)s.", - {'group_id': group.id}) - else: - LOG.error("Failed to update group %(group_id)s.", - {'group_id': group.id}) - group.status = fields.GroupStatus.ERROR - group.save() - for add_vol in add_volumes_ref: - add_vol.status = 'error' - add_vol.save() - for rem_vol in remove_volumes_ref: - if isinstance(e, exception.VolumeDriverException): - rem_vol.consistencygroup_id = None - rem_vol.consistencygroup = None - rem_vol.status = 'error' - rem_vol.save() - - for add_vol in add_volumes_ref: - add_vol.group_id = group.id - add_vol.save() - for rem_vol in remove_volumes_ref: - rem_vol.group_id = None - rem_vol.save() - group.status = fields.GroupStatus.AVAILABLE - group.save() - - self._notify_about_group_usage( - context, group, "update.end") - LOG.info("Update group completed successfully.", - resource={'type': 'group', - 'id': group.id}) - - def create_group_snapshot(self, context, group_snapshot): - """Creates the group_snapshot.""" - caller_context = context - context = context.elevated() - - LOG.info("GroupSnapshot %s: creating.", group_snapshot.id) - - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - - self._notify_about_group_snapshot_usage( - context, group_snapshot, "create.start") - - snapshots_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - LOG.debug("Group snapshot %(grp_snap_id)s: creating.", - {'grp_snap_id': group_snapshot.id}) - - # Pass context so that drivers that want to use it, can, - # but it is not a requirement for all drivers. - group_snapshot.context = caller_context - for snapshot in snapshots: - snapshot.context = caller_context - - try: - model_update, snapshots_model_update = ( - self.driver.create_group_snapshot(context, group_snapshot, - snapshots)) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group_snapshot.group_type_id != cgsnap_type['id']: - model_update, snapshots_model_update = ( - self._create_group_snapshot_generic( - context, group_snapshot, snapshots)) - else: - cgsnapshot, snapshots = ( - self._convert_group_snapshot_to_cgsnapshot( - group_snapshot, snapshots, context)) - model_update, snapshots_model_update = ( - self.driver.create_cgsnapshot(context, cgsnapshot, - snapshots)) - self._remove_cgsnapshot_id_from_snapshots(snapshots) - if snapshots_model_update: - for snap_model in snapshots_model_update: - # Update db for snapshot. - # NOTE(xyang): snapshots is a list of snapshot objects. - # snapshots_model_update should be a list of dicts. - snap_id = snap_model.pop('id') - snap_obj = objects.Snapshot.get_by_id(context, snap_id) - snap_obj.update(snap_model) - snap_obj.save() - if (snap_model['status'] in [ - fields.SnapshotStatus.ERROR_DELETING, - fields.SnapshotStatus.ERROR] and - model_update['status'] not in - [fields.GroupSnapshotStatus.ERROR_DELETING, - fields.GroupSnapshotStatus.ERROR]): - model_update['status'] = snap_model['status'] - - if model_update: - if model_update['status'] == fields.GroupSnapshotStatus.ERROR: - msg = (_('Error occurred when creating group_snapshot ' - '%s.') % group_snapshot.id) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - - group_snapshot.update(model_update) - group_snapshot.save() - - except exception.CinderException: - with excutils.save_and_reraise_exception(): - group_snapshot.status = fields.GroupSnapshotStatus.ERROR - group_snapshot.save() - # Update snapshot status to 'error' if driver returns - # None for snapshots_model_update. - self._remove_cgsnapshot_id_from_snapshots(snapshots) - if not snapshots_model_update: - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - - for snapshot in snapshots: - volume_id = snapshot.volume_id - snapshot_id = snapshot.id - vol_obj = objects.Volume.get_by_id(context, volume_id) - if vol_obj.bootable: - try: - self.db.volume_glance_metadata_copy_to_snapshot( - context, snapshot_id, volume_id) - except exception.GlanceMetadataNotFound: - # If volume is not created from image, No glance metadata - # would be available for that volume in - # volume glance metadata table - pass - except exception.CinderException as ex: - LOG.error("Failed updating %(snapshot_id)s" - " metadata using the provided volumes" - " %(volume_id)s metadata.", - {'volume_id': volume_id, - 'snapshot_id': snapshot_id}) - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - raise exception.MetadataCopyFailure( - reason=six.text_type(ex)) - - snapshot.status = fields.SnapshotStatus.AVAILABLE - snapshot.progress = '100%' - snapshot.save() - - group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE - group_snapshot.save() - - LOG.info("group_snapshot %s: created successfully", - group_snapshot.id) - self._notify_about_group_snapshot_usage( - context, group_snapshot, "create.end") - return group_snapshot - - def _create_group_snapshot_generic(self, context, group_snapshot, - snapshots): - """Creates a group_snapshot.""" - model_update = {'status': 'available'} - snapshot_model_updates = [] - for snapshot in snapshots: - snapshot_model_update = {'id': snapshot.id} - try: - driver_update = self.driver.create_snapshot(snapshot) - if driver_update: - driver_update.pop('id', None) - snapshot_model_update.update(driver_update) - if 'status' not in snapshot_model_update: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.AVAILABLE) - except Exception: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.ERROR) - model_update['status'] = 'error' - snapshot_model_updates.append(snapshot_model_update) - - return model_update, snapshot_model_updates - - def _delete_group_snapshot_generic(self, context, group_snapshot, - snapshots): - """Deletes a group_snapshot.""" - model_update = {'status': group_snapshot.status} - snapshot_model_updates = [] - for snapshot in snapshots: - snapshot_model_update = {'id': snapshot.id} - try: - self.driver.delete_snapshot(snapshot) - snapshot_model_update['status'] = ( - fields.SnapshotStatus.DELETED) - except exception.SnapshotIsBusy: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.AVAILABLE) - except Exception: - snapshot_model_update['status'] = ( - fields.SnapshotStatus.ERROR) - model_update['status'] = 'error' - snapshot_model_updates.append(snapshot_model_update) - - return model_update, snapshot_model_updates - - def delete_group_snapshot(self, context, group_snapshot): - """Deletes group_snapshot.""" - caller_context = context - context = context.elevated() - project_id = group_snapshot.project_id - - LOG.info("group_snapshot %s: deleting", group_snapshot.id) - - snapshots = objects.SnapshotList.get_all_for_group_snapshot( - context, group_snapshot.id) - - self._notify_about_group_snapshot_usage( - context, group_snapshot, "delete.start") - - snapshots_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - LOG.debug("group_snapshot %(grp_snap_id)s: deleting", - {'grp_snap_id': group_snapshot.id}) - - # Pass context so that drivers that want to use it, can, - # but it is not a requirement for all drivers. - group_snapshot.context = caller_context - for snapshot in snapshots: - snapshot.context = caller_context - - try: - model_update, snapshots_model_update = ( - self.driver.delete_group_snapshot(context, group_snapshot, - snapshots)) - except NotImplementedError: - cgsnap_type = group_types.get_default_cgsnapshot_type() - if group_snapshot.group_type_id != cgsnap_type['id']: - model_update, snapshots_model_update = ( - self._delete_group_snapshot_generic( - context, group_snapshot, snapshots)) - else: - cgsnapshot, snapshots = ( - self._convert_group_snapshot_to_cgsnapshot( - group_snapshot, snapshots, context)) - model_update, snapshots_model_update = ( - self.driver.delete_cgsnapshot(context, cgsnapshot, - snapshots)) - self._remove_cgsnapshot_id_from_snapshots(snapshots) - - if snapshots_model_update: - for snap_model in snapshots_model_update: - # NOTE(xyang): snapshots is a list of snapshot objects. - # snapshots_model_update should be a list of dicts. - snap = next((item for item in snapshots if - item.id == snap_model['id']), None) - if snap: - snap_model.pop('id') - snap.update(snap_model) - snap.save() - - if (snap_model['status'] in - [fields.SnapshotStatus.ERROR_DELETING, - fields.SnapshotStatus.ERROR] and - model_update['status'] not in - ['error_deleting', 'error']): - model_update['status'] = snap_model['status'] - - if model_update: - if model_update['status'] in ['error_deleting', 'error']: - msg = (_('Error occurred when deleting group_snapshot ' - '%s.') % group_snapshot.id) - LOG.error(msg) - raise exception.VolumeDriverException(message=msg) - else: - group_snapshot.update(model_update) - group_snapshot.save() - - except exception.CinderException: - with excutils.save_and_reraise_exception(): - group_snapshot.status = fields.GroupSnapshotStatus.ERROR - group_snapshot.save() - # Update snapshot status to 'error' if driver returns - # None for snapshots_model_update. - if not snapshots_model_update: - self._remove_cgsnapshot_id_from_snapshots(snapshots) - for snapshot in snapshots: - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - - for snapshot in snapshots: - # Get reservations - try: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': -1} - else: - reserve_opts = { - 'snapshots': -1, - 'gigabytes': -snapshot.volume_size, - } - volume_ref = objects.Volume.get_by_id(context, - snapshot.volume_id) - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume_ref.volume_type_id) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - - except Exception: - reservations = None - LOG.exception("Failed to update usages deleting snapshot") - - self.db.volume_glance_metadata_delete_by_snapshot(context, - snapshot.id) - snapshot.destroy() - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - group_snapshot.destroy() - LOG.info("group_snapshot %s: deleted successfully", - group_snapshot.id) - self._notify_about_group_snapshot_usage(context, group_snapshot, - "delete.end", - snapshots) - - def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): - """Finalize migration process on backend device.""" - model_update = None - model_update_default = {'_name_id': new_volume.name_id, - 'provider_location': - new_volume.provider_location} - try: - model_update = self.driver.update_migrated_volume(ctxt, - volume, - new_volume, - volume_status) - except NotImplementedError: - # If update_migrated_volume is not implemented for the driver, - # _name_id and provider_location will be set with the values - # from new_volume. - model_update = model_update_default - if model_update: - model_update_default.update(model_update) - # Swap keys that were changed in the source so we keep their values - # in the temporary volume's DB record. - # Need to convert 'metadata' and 'admin_metadata' since - # they are not keys of volume, their corresponding keys are - # 'volume_metadata' and 'volume_admin_metadata'. - model_update_new = dict() - for key in model_update: - if key == 'metadata': - if volume.get('volume_metadata'): - model_update_new[key] = { - metadata['key']: metadata['value'] - for metadata in volume.volume_metadata} - elif key == 'admin_metadata': - model_update_new[key] = { - metadata['key']: metadata['value'] - for metadata in volume.volume_admin_metadata} - else: - model_update_new[key] = volume[key] - with new_volume.obj_as_admin(): - new_volume.update(model_update_new) - new_volume.save() - with volume.obj_as_admin(): - volume.update(model_update_default) - volume.save() - - # Replication V2.1 and a/a method - def failover(self, context, secondary_backend_id=None): - """Failover a backend to a secondary replication target. - - Instructs a replication capable/configured backend to failover - to one of it's secondary replication targets. host=None is - an acceetable input, and leaves it to the driver to failover - to the only configured target, or to choose a target on it's - own. All of the hosts volumes will be passed on to the driver - in order for it to determine the replicated volumes on the host, - if needed. - - :param context: security context - :param secondary_backend_id: Specifies backend_id to fail over to - """ - updates = {} - repl_status = fields.ReplicationStatus - - svc_host = vol_utils.extract_host(self.host, 'backend') - service = objects.Service.get_by_args(context, svc_host, - constants.VOLUME_BINARY) - - # TODO(geguileo): We should optimize these updates by doing them - # directly on the DB with just 3 queries, one to change the volumes - # another to change all the snapshots, and another to get replicated - # volumes. - - # Change non replicated volumes and their snapshots to error if we are - # failing over, leave them as they are for failback - volumes = self._get_my_volumes(context) - - replicated_vols = [] - for volume in volumes: - if volume.replication_status not in (repl_status.DISABLED, - repl_status.NOT_CAPABLE): - replicated_vols.append(volume) - elif secondary_backend_id != self.FAILBACK_SENTINEL: - volume.previous_status = volume.status - volume.status = 'error' - volume.replication_status = repl_status.NOT_CAPABLE - volume.save() - - for snapshot in volume.snapshots: - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - - volume_update_list = None - group_update_list = None - try: - # For non clustered we can call v2.1 failover_host, but for - # clustered we call a/a failover method. We know a/a method - # exists because BaseVD class wouldn't have started if it didn't. - failover = getattr(self.driver, - 'failover' if service.is_clustered - else 'failover_host') - # expected form of volume_update_list: - # [{volume_id: , updates: {'provider_id': xxxx....}}, - # {volume_id: , updates: {'provider_id': xxxx....}}] - # It includes volumes in replication groups and those not in them - # expected form of group_update_list: - # [{group_id: , updates: {'xxxx': xxxx....}}, - # {group_id: , updates: {'xxxx': xxxx....}}] - filters = self._get_cluster_or_host_filters() - groups = objects.GroupList.get_all_replicated(context, - filters=filters) - active_backend_id, volume_update_list, group_update_list = ( - failover(context, - replicated_vols, - secondary_id=secondary_backend_id, - groups=groups)) - try: - update_data = {u['volume_id']: u['updates'] - for u in volume_update_list} - except KeyError: - msg = "Update list, doesn't include volume_id" - raise exception.ProgrammingError(reason=msg) - try: - update_group_data = {g['group_id']: g['updates'] - for g in group_update_list} - except KeyError: - msg = "Update list, doesn't include group_id" - raise exception.ProgrammingError(reason=msg) - except Exception as exc: - # NOTE(jdg): Drivers need to be aware if they fail during - # a failover sequence, we're expecting them to cleanup - # and make sure the driver state is such that the original - # backend is still set as primary as per driver memory - - # We don't want to log the exception trace invalid replication - # target - if isinstance(exc, exception.InvalidReplicationTarget): - log_method = LOG.error - # Preserve the replication_status: Status should be failed over - # if we were failing back or if we were failing over from one - # secondary to another secondary. In both cases - # active_backend_id will be set. - if service.active_backend_id: - updates['replication_status'] = repl_status.FAILED_OVER - else: - updates['replication_status'] = repl_status.ENABLED - else: - log_method = LOG.exception - updates.update(disabled=True, - replication_status=repl_status.FAILOVER_ERROR) - - log_method("Error encountered during failover on host: %(host)s " - "to %(backend_id)s: %(error)s", - {'host': self.host, 'backend_id': secondary_backend_id, - 'error': exc}) - # We dump the update list for manual recovery - LOG.error('Failed update_list is: %s', volume_update_list) - self.finish_failover(context, service, updates) - return - - if secondary_backend_id == "default": - updates['replication_status'] = repl_status.ENABLED - updates['active_backend_id'] = '' - updates['disabled'] = service.frozen - updates['disabled_reason'] = 'frozen' if service.frozen else '' - else: - updates['replication_status'] = repl_status.FAILED_OVER - updates['active_backend_id'] = active_backend_id - updates['disabled'] = True - updates['disabled_reason'] = 'failed-over' - - self.finish_failover(context, service, updates) - - for volume in replicated_vols: - update = update_data.get(volume.id, {}) - if update.get('status', '') == 'error': - update['replication_status'] = repl_status.FAILOVER_ERROR - elif update.get('replication_status') in (None, - repl_status.FAILED_OVER): - update['replication_status'] = updates['replication_status'] - - if update['replication_status'] == repl_status.FAILOVER_ERROR: - update.setdefault('status', 'error') - # Set all volume snapshots to error - for snapshot in volume.snapshots: - snapshot.status = fields.SnapshotStatus.ERROR - snapshot.save() - if 'status' in update: - update['previous_status'] = volume.status - volume.update(update) - volume.save() - - for grp in groups: - update = update_group_data.get(grp.id, {}) - if update.get('status', '') == 'error': - update['replication_status'] = repl_status.FAILOVER_ERROR - elif update.get('replication_status') in (None, - repl_status.FAILED_OVER): - update['replication_status'] = updates['replication_status'] - - if update['replication_status'] == repl_status.FAILOVER_ERROR: - update.setdefault('status', 'error') - grp.update(update) - grp.save() - - LOG.info("Failed over to replication target successfully.") - - # TODO(geguileo): In P - remove this - failover_host = failover - - def finish_failover(self, context, service, updates): - """Completion of the failover locally or via RPC.""" - # If the service is clustered, broadcast the service changes to all - # volume services, including this one. - if service.is_clustered: - # We have to update the cluster with the same data, and we do it - # before broadcasting the failover_completed RPC call to prevent - # races with services that may be starting.. - for key, value in updates.items(): - setattr(service.cluster, key, value) - service.cluster.save() - rpcapi = volume_rpcapi.VolumeAPI() - rpcapi.failover_completed(context, service, updates) - else: - service.update(updates) - service.save() - - def failover_completed(self, context, updates): - """Finalize failover of this backend. - - When a service is clustered and replicated the failover has 2 stages, - one that does the failover of the volumes and another that finalizes - the failover of the services themselves. - - This method takes care of the last part and is called from the service - doing the failover of the volumes after finished processing the - volumes. - """ - svc_host = vol_utils.extract_host(self.host, 'backend') - service = objects.Service.get_by_args(context, svc_host, - constants.VOLUME_BINARY) - service.update(updates) - try: - self.driver.failover_completed(context, service.active_backend_id) - except Exception: - msg = _('Driver reported error during replication failover ' - 'completion.') - LOG.exception(msg) - service.disabled = True - service.disabled_reason = msg - service.replication_status = ( - fields.ReplicationStatus.ERROR) - service.save() - - def freeze_host(self, context): - """Freeze management plane on this backend. - - Basically puts the control/management plane into a - Read Only state. We should handle this in the scheduler, - however this is provided to let the driver know in case it - needs/wants to do something specific on the backend. - - :param context: security context - """ - # TODO(jdg): Return from driver? or catch? - # Update status column in service entry - try: - self.driver.freeze_backend(context) - except exception.VolumeDriverException: - # NOTE(jdg): In the case of freeze, we don't really - # need the backend's consent or anything, we'll just - # disable the service, so we can just log this and - # go about our business - LOG.warning('Error encountered on Cinder backend during ' - 'freeze operation, service is frozen, however ' - 'notification to driver has failed.') - svc_host = vol_utils.extract_host(self.host, 'backend') - - service = objects.Service.get_by_args( - context, - svc_host, - constants.VOLUME_BINARY) - service.disabled = True - service.disabled_reason = "frozen" - service.save() - LOG.info("Set backend status to frozen successfully.") - return True - - def thaw_host(self, context): - """UnFreeze management plane on this backend. - - Basically puts the control/management plane back into - a normal state. We should handle this in the scheduler, - however this is provided to let the driver know in case it - needs/wants to do something specific on the backend. - - :param context: security context - """ - - # TODO(jdg): Return from driver? or catch? - # Update status column in service entry - try: - self.driver.thaw_backend(context) - except exception.VolumeDriverException: - # NOTE(jdg): Thaw actually matters, if this call - # to the backend fails, we're stuck and can't re-enable - LOG.error('Error encountered on Cinder backend during ' - 'thaw operation, service will remain frozen.') - return False - svc_host = vol_utils.extract_host(self.host, 'backend') - - service = objects.Service.get_by_args( - context, - svc_host, - constants.VOLUME_BINARY) - service.disabled = False - service.disabled_reason = "" - service.save() - LOG.info("Thawed backend successfully.") - return True - - def manage_existing_snapshot(self, ctxt, snapshot, ref=None): - LOG.debug('manage_existing_snapshot: managing %s.', ref) - try: - flow_engine = manage_existing_snapshot.get_flow( - ctxt, - self.db, - self.driver, - self.host, - snapshot.id, - ref) - except Exception: - LOG.exception("Failed to create manage_existing flow: " - "%(object_type)s %(object_id)s.", - {'object_type': 'snapshot', - 'object_id': snapshot.id}) - raise exception.CinderException( - _("Failed to create manage existing flow.")) - - with flow_utils.DynamicLogListener(flow_engine, logger=LOG): - flow_engine.run() - return snapshot.id - - def get_manageable_snapshots(self, ctxt, marker, limit, offset, - sort_keys, sort_dirs, want_objects=False): - try: - utils.require_driver_initialized(self.driver) - except exception.DriverNotInitialized: - with excutils.save_and_reraise_exception(): - LOG.exception("Listing manageable snapshots failed, due " - "to uninitialized driver.") - - cinder_snapshots = self._get_my_snapshots(ctxt) - try: - driver_entries = self.driver.get_manageable_snapshots( - cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) - if want_objects: - driver_entries = (objects.ManageableSnapshotList. - from_primitives(ctxt, driver_entries)) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception("Listing manageable snapshots failed, due " - "to driver error.") - return driver_entries - - def get_capabilities(self, context, discover): - """Get capabilities of backend storage.""" - if discover: - self.driver.init_capabilities() - capabilities = self.driver.capabilities - LOG.debug("Obtained capabilities list: %s.", capabilities) - return capabilities - - def get_backup_device(self, ctxt, backup, want_objects=False): - (backup_device, is_snapshot) = ( - self.driver.get_backup_device(ctxt, backup)) - secure_enabled = self.driver.secure_file_operations_enabled() - backup_device_dict = {'backup_device': backup_device, - 'secure_enabled': secure_enabled, - 'is_snapshot': is_snapshot, } - # TODO(sborkows): from_primitive method will be removed in O, so there - # is a need to clean here then. - return (objects.BackupDeviceInfo.from_primitive(backup_device_dict, - ctxt) - if want_objects else backup_device_dict) - - def secure_file_operations_enabled(self, ctxt, volume): - secure_enabled = self.driver.secure_file_operations_enabled() - return secure_enabled - - def _connection_create(self, ctxt, volume, attachment, connector): - try: - self.driver.validate_connector(connector) - except exception.InvalidConnectorException as err: - raise exception.InvalidInput(reason=six.text_type(err)) - except Exception as err: - err_msg = (_("Validate volume connection failed " - "(error: %(err)s).") % {'err': six.text_type(err)}) - LOG.error(err_msg, resource=volume) - raise exception.VolumeBackendAPIException(data=err_msg) - - try: - model_update = self.driver.create_export(ctxt.elevated(), - volume, connector) - except exception.CinderException as ex: - err_msg = (_("Create export for volume failed (%s).") % ex.msg) - LOG.exception(err_msg, resource=volume) - raise exception.VolumeBackendAPIException(data=err_msg) - - try: - if model_update: - volume.update(model_update) - volume.save() - except exception.CinderException as ex: - LOG.exception("Model update failed.", resource=volume) - raise exception.ExportFailure(reason=six.text_type(ex)) - - try: - conn_info = self.driver.initialize_connection(volume, connector) - except Exception as err: - err_msg = (_("Driver initialize connection failed " - "(error: %(err)s).") % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=volume) - self.driver.remove_export(ctxt.elevated(), volume) - raise exception.VolumeBackendAPIException(data=err_msg) - conn_info = self._parse_connection_options(ctxt, volume, conn_info) - - # NOTE(jdg): Get rid of the nested dict (data key) - conn_data = conn_info.pop('data', {}) - connection_info = conn_data.copy() - connection_info.update(conn_info) - values = {'volume_id': volume.id, - 'attach_status': 'attaching', } - - self.db.volume_attachment_update(ctxt, attachment.id, values) - self.db.attachment_specs_update_or_create( - ctxt, - attachment.id, - connector) - - connection_info['attachment_id'] = attachment.id - return connection_info - - def attachment_update(self, - context, - vref, - connector, - attachment_id): - """Update/Finalize an attachment. - - This call updates a valid attachment record to associate with a volume - and provide the caller with the proper connection info. Note that - this call requires an `attachment_ref`. It's expected that prior to - this call that the volume and an attachment UUID has been reserved. - - param: vref: Volume object to create attachment for - param: connector: Connector object to use for attachment creation - param: attachment_ref: ID of the attachment record to update - """ - - mode = connector.get('mode', 'rw') - self._notify_about_volume_usage(context, vref, 'attach.start') - attachment_ref = objects.VolumeAttachment.get_by_id(context, - attachment_id) - connection_info = self._connection_create(context, - vref, - attachment_ref, - connector) - # FIXME(jdg): get rid of this admin_meta option here, the only thing - # it does is enforce that a volume is R/O, that should be done via a - # type and not *more* metadata - volume_metadata = self.db.volume_admin_metadata_update( - context.elevated(), - attachment_ref.volume_id, - {'attached_mode': mode}, False) - - try: - if volume_metadata.get('readonly') == 'True' and mode != 'ro': - raise exception.InvalidVolumeAttachMode(mode=mode, - volume_id=vref.id) - utils.require_driver_initialized(self.driver) - self.driver.attach_volume(context, - vref, - attachment_ref.instance_uuid, - connector.get('host', ''), - connector.get('mountpoint', 'na')) - except Exception as err: - self.message_api.create( - context, message_field.Action.UPDATE_ATTACHMENT, - resource_uuid=vref.id, - exception=err) - with excutils.save_and_reraise_exception(): - self.db.volume_attachment_update( - context, attachment_ref.id, - {'attach_status': - fields.VolumeAttachStatus.ERROR_ATTACHING}) - - self.db.volume_attached(context.elevated(), - attachment_ref.id, - attachment_ref.instance_uuid, - connector.get('host', ''), - connector.get('mountpoint', 'na'), - mode) - vref.refresh() - self._notify_about_volume_usage(context, vref, "attach.end") - LOG.info("Attach volume completed successfully.", - resource=vref) - attachment_ref = objects.VolumeAttachment.get_by_id(context, - attachment_id) - return connection_info - - def _connection_terminate(self, context, volume, - attachment, force=False): - """Remove a volume connection, but leave attachment.""" - utils.require_driver_initialized(self.driver) - - # TODO(jdg): Add an object method to cover this - connector = self.db.attachment_specs_get( - context, - attachment.id) - - try: - shared_connections = self.driver.terminate_connection(volume, - connector, - force=force) - if not isinstance(shared_connections, bool): - shared_connections = False - - except Exception as err: - err_msg = (_('Terminate volume connection failed: %(err)s') - % {'err': six.text_type(err)}) - LOG.exception(err_msg, resource=volume) - raise exception.VolumeBackendAPIException(data=err_msg) - LOG.info("Terminate volume connection completed successfully.", - resource=volume) - # NOTE(jdg): Return True/False if there are other outstanding - # attachments that share this connection. If True should signify - # caller to preserve the actual host connection (work should be - # done in the brick connector as it has the knowledge of what's - # going on here. - return shared_connections - - def attachment_delete(self, context, attachment_id, vref): - """Delete/Detach the specified attachment. - - Notifies the backend device that we're detaching the specified - attachment instance. - - param: vref: Volume object associated with the attachment - param: attachment: Attachment reference object to remove - - NOTE if the attachment reference is None, we remove all existing - attachments for the specified volume object. - """ - attachment_ref = objects.VolumeAttachment.get_by_id(context, - attachment_id) - if not attachment_ref: - for attachment in VA_LIST.get_all_by_volume_id(context, vref.id): - self._do_attachment_delete(context, vref, attachment) - else: - self._do_attachment_delete(context, vref, attachment_ref) - - def _do_attachment_delete(self, context, vref, attachment): - utils.require_driver_initialized(self.driver) - self._notify_about_volume_usage(context, vref, "detach.start") - has_shared_connection = self._connection_terminate(context, - vref, - attachment) - try: - LOG.debug('Deleting attachment %(attachment_id)s.', - {'attachment_id': attachment.id}, - resource=vref) - self.driver.detach_volume(context, vref, attachment) - if not has_shared_connection: - self.driver.remove_export(context.elevated(), vref) - except Exception: - # FIXME(jdg): Obviously our volume object is going to need some - # changes to deal with multi-attach and figuring out how to - # represent a single failed attach out of multiple attachments - - # TODO(jdg): object method here - self.db.volume_attachment_update( - context, attachment.get('id'), - {'attach_status': 'error_detaching'}) - else: - self.db.volume_detached(context.elevated(), vref.id, - attachment.get('id')) - self.db.volume_admin_metadata_delete(context.elevated(), - vref.id, - 'attached_mode') - self._notify_about_volume_usage(context, vref, "detach.end") - - # Replication group API (Tiramisu) - def enable_replication(self, ctxt, group): - """Enable replication.""" - group.refresh() - if group.replication_status != fields.ReplicationStatus.ENABLING: - msg = _("Replication status in group %s is not " - "enabling. Cannot enable replication.") % group.id - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = group.volumes - for vol in volumes: - vol.refresh() - if vol.replication_status != fields.ReplicationStatus.ENABLING: - msg = _("Replication status in volume %s is not " - "enabling. Cannot enable replication.") % vol.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - self._notify_about_group_usage( - ctxt, group, "enable_replication.start") - - volumes_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - model_update, volumes_model_update = ( - self.driver.enable_replication(ctxt, group, volumes)) - - if volumes_model_update: - for update in volumes_model_update: - vol_obj = objects.Volume.get_by_id(ctxt, update['id']) - vol_obj.update(update) - vol_obj.save() - # If we failed to enable a volume, make sure the status - # for the group is set to error as well - if (update.get('replication_status') == - fields.ReplicationStatus.ERROR and - model_update.get('replication_status') != - fields.ReplicationStatus.ERROR): - model_update['replication_status'] = update.get( - 'replication_status') - - if model_update: - if (model_update.get('replication_status') == - fields.ReplicationStatus.ERROR): - msg = _('Enable replication failed.') - LOG.error(msg, - resource={'type': 'group', - 'id': group.id}) - raise exception.VolumeDriverException(message=msg) - else: - group.update(model_update) - group.save() - - except exception.CinderException as ex: - group.status = fields.GroupStatus.ERROR - group.replication_status = fields.ReplicationStatus.ERROR - group.save() - # Update volume status to 'error' if driver returns - # None for volumes_model_update. - if not volumes_model_update: - for vol in volumes: - vol.status = 'error' - vol.replication_status = fields.ReplicationStatus.ERROR - vol.save() - err_msg = _("Enable replication group failed: " - "%s.") % six.text_type(ex) - raise exception.ReplicationGroupError(reason=err_msg, - group_id=group.id) - - for vol in volumes: - vol.replication_status = fields.ReplicationStatus.ENABLED - vol.save() - group.replication_status = fields.ReplicationStatus.ENABLED - group.save() - - self._notify_about_group_usage( - ctxt, group, "enable_replication.end", volumes) - LOG.info("Enable replication completed successfully.", - resource={'type': 'group', - 'id': group.id}) - - # Replication group API (Tiramisu) - def disable_replication(self, ctxt, group): - """Disable replication.""" - group.refresh() - if group.replication_status != fields.ReplicationStatus.DISABLING: - msg = _("Replication status in group %s is not " - "disabling. Cannot disable replication.") % group.id - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = group.volumes - for vol in volumes: - vol.refresh() - if (vol.replication_status != - fields.ReplicationStatus.DISABLING): - msg = _("Replication status in volume %s is not " - "disabling. Cannot disable replication.") % vol.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - self._notify_about_group_usage( - ctxt, group, "disable_replication.start") - - volumes_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - model_update, volumes_model_update = ( - self.driver.disable_replication(ctxt, group, volumes)) - - if volumes_model_update: - for update in volumes_model_update: - vol_obj = objects.Volume.get_by_id(ctxt, update['id']) - vol_obj.update(update) - vol_obj.save() - # If we failed to enable a volume, make sure the status - # for the group is set to error as well - if (update.get('replication_status') == - fields.ReplicationStatus.ERROR and - model_update.get('replication_status') != - fields.ReplicationStatus.ERROR): - model_update['replication_status'] = update.get( - 'replication_status') - - if model_update: - if (model_update.get('replication_status') == - fields.ReplicationStatus.ERROR): - msg = _('Disable replication failed.') - LOG.error(msg, - resource={'type': 'group', - 'id': group.id}) - raise exception.VolumeDriverException(message=msg) - else: - group.update(model_update) - group.save() - - except exception.CinderException as ex: - group.status = fields.GroupStatus.ERROR - group.replication_status = fields.ReplicationStatus.ERROR - group.save() - # Update volume status to 'error' if driver returns - # None for volumes_model_update. - if not volumes_model_update: - for vol in volumes: - vol.status = 'error' - vol.replication_status = fields.ReplicationStatus.ERROR - vol.save() - err_msg = _("Disable replication group failed: " - "%s.") % six.text_type(ex) - raise exception.ReplicationGroupError(reason=err_msg, - group_id=group.id) - - for vol in volumes: - vol.replication_status = fields.ReplicationStatus.DISABLED - vol.save() - group.replication_status = fields.ReplicationStatus.DISABLED - group.save() - - self._notify_about_group_usage( - ctxt, group, "disable_replication.end", volumes) - LOG.info("Disable replication completed successfully.", - resource={'type': 'group', - 'id': group.id}) - - # Replication group API (Tiramisu) - def failover_replication(self, ctxt, group, allow_attached_volume=False, - secondary_backend_id=None): - """Failover replication.""" - group.refresh() - if group.replication_status != fields.ReplicationStatus.FAILING_OVER: - msg = _("Replication status in group %s is not " - "failing-over. Cannot failover replication.") % group.id - LOG.error(msg) - raise exception.InvalidGroup(reason=msg) - - volumes = group.volumes - for vol in volumes: - vol.refresh() - if vol.status == 'in-use' and not allow_attached_volume: - msg = _("Volume %s is attached but allow_attached_volume flag " - "is False. Cannot failover replication.") % vol.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - if (vol.replication_status != - fields.ReplicationStatus.FAILING_OVER): - msg = _("Replication status in volume %s is not " - "failing-over. Cannot failover replication.") % vol.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - self._notify_about_group_usage( - ctxt, group, "failover_replication.start") - - volumes_model_update = None - model_update = None - try: - utils.require_driver_initialized(self.driver) - - model_update, volumes_model_update = ( - self.driver.failover_replication( - ctxt, group, volumes, secondary_backend_id)) - - if volumes_model_update: - for update in volumes_model_update: - vol_obj = objects.Volume.get_by_id(ctxt, update['id']) - vol_obj.update(update) - vol_obj.save() - # If we failed to enable a volume, make sure the status - # for the group is set to error as well - if (update.get('replication_status') == - fields.ReplicationStatus.ERROR and - model_update.get('replication_status') != - fields.ReplicationStatus.ERROR): - model_update['replication_status'] = update.get( - 'replication_status') - - if model_update: - if (model_update.get('replication_status') == - fields.ReplicationStatus.ERROR): - msg = _('Failover replication failed.') - LOG.error(msg, - resource={'type': 'group', - 'id': group.id}) - raise exception.VolumeDriverException(message=msg) - else: - group.update(model_update) - group.save() - - except exception.CinderException as ex: - group.status = fields.GroupStatus.ERROR - group.replication_status = fields.ReplicationStatus.ERROR - group.save() - # Update volume status to 'error' if driver returns - # None for volumes_model_update. - if not volumes_model_update: - for vol in volumes: - vol.status = 'error' - vol.replication_status = fields.ReplicationStatus.ERROR - vol.save() - err_msg = _("Failover replication group failed: " - "%s.") % six.text_type(ex) - raise exception.ReplicationGroupError(reason=err_msg, - group_id=group.id) - - for vol in volumes: - if secondary_backend_id == "default": - vol.replication_status = fields.ReplicationStatus.ENABLED - else: - vol.replication_status = ( - fields.ReplicationStatus.FAILED_OVER) - vol.save() - if secondary_backend_id == "default": - group.replication_status = fields.ReplicationStatus.ENABLED - else: - group.replication_status = fields.ReplicationStatus.FAILED_OVER - group.save() - - self._notify_about_group_usage( - ctxt, group, "failover_replication.end", volumes) - LOG.info("Failover replication completed successfully.", - resource={'type': 'group', - 'id': group.id}) - - def list_replication_targets(self, ctxt, group): - """Provide a means to obtain replication targets for a group. - - This method is used to find the replication_device config - info. 'backend_id' is a required key in 'replication_device'. - - Response Example for admin: - { - 'replication_targets': [ - { - 'backend_id': 'vendor-id-1', - 'unique_key': 'val1', - ...... - }, - { - 'backend_id': 'vendor-id-2', - 'unique_key': 'val2', - ...... - } - ] - } - - Response example for non-admin: - { - 'replication_targets': [ - { - 'backend_id': 'vendor-id-1' - }, - { - 'backend_id': 'vendor-id-2' - } - ] - } - - """ - - replication_targets = [] - try: - group = objects.Group.get_by_id(ctxt, group.id) - if self.configuration.replication_device: - if ctxt.is_admin: - for rep_dev in self.configuration.replication_device: - keys = rep_dev.keys() - dev = {} - for k in keys: - dev[k] = rep_dev[k] - replication_targets.append(dev) - else: - for rep_dev in self.configuration.replication_device: - dev = rep_dev.get('backend_id') - if dev: - replication_targets.append({'backend_id': dev}) - - except exception.GroupNotFound: - err_msg = (_("Get replication targets failed. Group %s not " - "found.") % group.id) - LOG.exception(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - - return {'replication_targets': replication_targets} diff --git a/cinder/volume/qos_specs.py b/cinder/volume/qos_specs.py deleted file mode 100644 index 33be69c29..000000000 --- a/cinder/volume/qos_specs.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright (c) 2013 eBay Inc. -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The QoS Specs Implementation""" - - -from oslo_db import exception as db_exc -from oslo_log import log as logging - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder.volume import volume_types - - -LOG = logging.getLogger(__name__) - -CONTROL_LOCATION = ['front-end', 'back-end', 'both'] - - -def create(context, name, specs=None): - """Creates qos_specs. - - :param specs dictionary that contains specifications for QoS - e.g. {'consumer': 'front-end', - 'total_iops_sec': 1000, - 'total_bytes_sec': 1024000} - """ - consumer = specs.get('consumer') - if consumer: - # If we need to modify specs, copy so we don't cause unintended - # consequences for the caller - specs = specs.copy() - del specs['consumer'] - - values = dict(name=name, consumer=consumer, specs=specs) - - LOG.debug("Dict for qos_specs: %s", values) - qos_spec = objects.QualityOfServiceSpecs(context, **values) - qos_spec.create() - return qos_spec - - -def update(context, qos_specs_id, specs): - """Update qos specs. - - :param specs: dictionary that contains key/value pairs for updating - existing specs. - e.g. {'consumer': 'front-end', - 'total_iops_sec': 500, - 'total_bytes_sec': 512000,} - """ - LOG.debug('qos_specs.update(): specs %s', specs) - - try: - qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, - qos_specs_id) - - if 'consumer' in specs: - qos_spec.consumer = specs['consumer'] - # If we need to modify specs, copy so we don't cause unintended - # consequences for the caller - specs = specs.copy() - del specs['consumer'] - - # Update any values in specs dict - qos_spec.specs.update(specs) - - qos_spec.save() - except db_exc.DBError: - LOG.exception('DB error:') - raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, - qos_specs=specs) - - return qos_spec - - -def delete(context, qos_specs_id, force=False): - """Marks qos specs as deleted. - - 'force' parameter is a flag to determine whether should destroy - should continue when there were entities associated with the qos specs. - force=True indicates caller would like to mark qos specs as deleted - even if there was entities associate with target qos specs. - Trying to delete a qos specs still associated with entities will - cause QoSSpecsInUse exception if force=False (default). - """ - if qos_specs_id is None: - msg = _("id cannot be None") - raise exception.InvalidQoSSpecs(reason=msg) - - qos_spec = objects.QualityOfServiceSpecs.get_by_id( - context, qos_specs_id) - - qos_spec.destroy(force) - - -def delete_keys(context, qos_specs_id, keys): - """Marks specified key of target qos specs as deleted.""" - if qos_specs_id is None: - msg = _("id cannot be None") - raise exception.InvalidQoSSpecs(reason=msg) - - qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) - - # Previous behavior continued to delete keys until it hit first unset one, - # so for now will mimic that. In the future it would be useful to have all - # or nothing deletion of keys (or at least delete all set keys), - # especially since order of keys from CLI to API is not preserved currently - try: - for key in keys: - try: - del qos_spec.specs[key] - except KeyError: - raise exception.QoSSpecsKeyNotFound( - specs_key=key, specs_id=qos_specs_id) - finally: - qos_spec.save() - - -def get_associations(context, qos_specs_id): - """Get all associations of given qos specs.""" - try: - types = objects.VolumeTypeList.get_all_types_for_qos(context, - qos_specs_id) - except db_exc.DBError: - LOG.exception('DB error:') - msg = _('Failed to get all associations of ' - 'qos specs %s') % qos_specs_id - LOG.warning(msg) - raise exception.CinderException(message=msg) - - result = [] - for vol_type in types: - result.append({ - 'association_type': 'volume_type', - 'name': vol_type.name, - 'id': vol_type.id - }) - - return result - - -def associate_qos_with_type(context, specs_id, type_id): - """Associate qos_specs with volume type. - - Associate target qos specs with specific volume type. - - :param specs_id: qos specs ID to associate with - :param type_id: volume type ID to associate with - :raises VolumeTypeNotFound: if volume type doesn't exist - :raises QoSSpecsNotFound: if qos specs doesn't exist - :raises InvalidVolumeType: if volume type is already associated - with qos specs other than given one. - :raises QoSSpecsAssociateFailed: if there was general DB error - """ - try: - get_qos_specs(context, specs_id) - res = volume_types.get_volume_type_qos_specs(type_id) - if res.get('qos_specs', None): - if res['qos_specs'].get('id') != specs_id: - msg = (_("Type %(type_id)s is already associated with another " - "qos specs: %(qos_specs_id)s") % - {'type_id': type_id, - 'qos_specs_id': res['qos_specs']['id']}) - raise exception.InvalidVolumeType(reason=msg) - else: - db.qos_specs_associate(context, specs_id, type_id) - except db_exc.DBError: - LOG.exception('DB error:') - LOG.warning('Failed to associate qos specs ' - '%(id)s with type: %(vol_type_id)s', - dict(id=specs_id, vol_type_id=type_id)) - raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, - type_id=type_id) - - -def disassociate_qos_specs(context, specs_id, type_id): - """Disassociate qos_specs from volume type.""" - try: - get_qos_specs(context, specs_id) - db.qos_specs_disassociate(context, specs_id, type_id) - except db_exc.DBError: - LOG.exception('DB error:') - LOG.warning('Failed to disassociate qos specs ' - '%(id)s with type: %(vol_type_id)s', - dict(id=specs_id, vol_type_id=type_id)) - raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, - type_id=type_id) - - -def disassociate_all(context, specs_id): - """Disassociate qos_specs from all entities.""" - try: - get_qos_specs(context, specs_id) - db.qos_specs_disassociate_all(context, specs_id) - except db_exc.DBError: - LOG.exception('DB error:') - LOG.warning('Failed to disassociate qos specs %s.', specs_id) - raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, - type_id=None) - - -def get_all_specs(context, filters=None, marker=None, limit=None, offset=None, - sort_keys=None, sort_dirs=None): - """Get all non-deleted qos specs.""" - return objects.QualityOfServiceSpecsList.get_all( - context, filters=filters, marker=marker, limit=limit, offset=offset, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -def get_qos_specs(ctxt, spec_id): - """Retrieves single qos specs by id.""" - if spec_id is None: - msg = _("id cannot be None") - raise exception.InvalidQoSSpecs(reason=msg) - - if ctxt is None: - ctxt = context.get_admin_context() - - return objects.QualityOfServiceSpecs.get_by_id(ctxt, spec_id) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py deleted file mode 100644 index 443bab6da..000000000 --- a/cinder/volume/rpcapi.py +++ /dev/null @@ -1,497 +0,0 @@ -# Copyright 2012, Intel, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from cinder.common import constants -from cinder import objects -from cinder import quota -from cinder import rpc -from cinder.volume import utils - - -QUOTAS = quota.QUOTAS - - -class VolumeAPI(rpc.RPCAPI): - """Client side of the volume rpc API. - - API version history: - - .. code-block:: none - - 1.0 - Initial version. - 1.1 - Adds clone volume option to create_volume. - 1.2 - Add publish_service_capabilities() method. - 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. - 1.4 - Add request_spec, filter_properties and - allow_reschedule arguments to create_volume(). - 1.5 - Add accept_transfer. - 1.6 - Add extend_volume. - 1.7 - Adds host_name parameter to attach_volume() - to allow attaching to host rather than instance. - 1.8 - Add migrate_volume, rename_volume. - 1.9 - Add new_user and new_project to accept_transfer. - 1.10 - Add migrate_volume_completion, remove rename_volume. - 1.11 - Adds mode parameter to attach_volume() - to support volume read-only attaching. - 1.12 - Adds retype. - 1.13 - Adds create_export. - 1.14 - Adds reservation parameter to extend_volume(). - 1.15 - Adds manage_existing and unmanage_only flag to delete_volume. - 1.16 - Removes create_export. - 1.17 - Add replica option to create_volume, promote_replica and - sync_replica. - 1.18 - Adds create_consistencygroup, delete_consistencygroup, - create_cgsnapshot, and delete_cgsnapshot. Also adds - the consistencygroup_id parameter in create_volume. - 1.19 - Adds update_migrated_volume - 1.20 - Adds support for sending objects over RPC in create_snapshot() - and delete_snapshot() - 1.21 - Adds update_consistencygroup. - 1.22 - Adds create_consistencygroup_from_src. - 1.23 - Adds attachment_id to detach_volume. - 1.24 - Removed duplicated parameters: snapshot_id, image_id, - source_volid, source_replicaid, consistencygroup_id and - cgsnapshot_id from create_volume. All off them are already - passed either in request_spec or available in the DB. - 1.25 - Add source_cg to create_consistencygroup_from_src. - 1.26 - Adds support for sending objects over RPC in - create_consistencygroup(), create_consistencygroup_from_src(), - update_consistencygroup() and delete_consistencygroup(). - 1.27 - Adds support for replication V2 - 1.28 - Adds manage_existing_snapshot - 1.29 - Adds get_capabilities. - 1.30 - Adds remove_export - 1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot() - and delete_cgsnapshot() to cast method only with necessary - args. Forwarding CGSnapshot object instead of CGSnapshot_id. - 1.32 - Adds support for sending objects over RPC in create_volume(). - 1.33 - Adds support for sending objects over RPC in delete_volume(). - 1.34 - Adds support for sending objects over RPC in retype(). - 1.35 - Adds support for sending objects over RPC in extend_volume(). - 1.36 - Adds support for sending objects over RPC in migrate_volume(), - migrate_volume_completion(), and update_migrated_volume(). - 1.37 - Adds old_reservations parameter to retype to support quota - checks in the API. - 1.38 - Scaling backup service, add get_backup_device() and - secure_file_operations_enabled() - 1.39 - Update replication methods to reflect new backend rep strategy - 1.40 - Add cascade option to delete_volume(). - - ... Mitaka supports messaging version 1.40. Any changes to existing - methods in 1.x after that point should be done so that they can handle - the version_cap being set to 1.40. - - 2.0 - Remove 1.x compatibility - 2.1 - Add get_manageable_volumes() and get_manageable_snapshots(). - 2.2 - Adds support for sending objects over RPC in manage_existing(). - 2.3 - Adds support for sending objects over RPC in - initialize_connection(). - 2.4 - Sends request_spec as object in create_volume(). - 2.5 - Adds create_group, delete_group, and update_group - 2.6 - Adds create_group_snapshot, delete_group_snapshot, and - create_group_from_src(). - - ... Newton supports messaging version 2.6. Any changes to existing - methods in 2.x after that point should be done so that they can handle - the version_cap being set to 2.6. - - 3.0 - Drop 2.x compatibility - 3.1 - Remove promote_replica and reenable_replication. This is - non-backward compatible, but the user-facing API was removed - back in Mitaka when introducing cheesecake replication. - 3.2 - Adds support for sending objects over RPC in - get_backup_device(). - 3.3 - Adds support for sending objects over RPC in attach_volume(). - 3.4 - Adds support for sending objects over RPC in detach_volume(). - 3.5 - Adds support for cluster in retype and migrate_volume - 3.6 - Switch to use oslo.messaging topics to indicate backends instead - of @backend suffixes in server names. - 3.7 - Adds do_cleanup method to do volume cleanups from other nodes - that we were doing in init_host. - 3.8 - Make failover_host cluster aware and add failover_completed. - 3.9 - Adds new attach/detach methods - 3.10 - Returning objects instead of raw dictionaries in - get_manageable_volumes & get_manageable_snapshots - 3.11 - Removes create_consistencygroup, delete_consistencygroup, - create_cgsnapshot, delete_cgsnapshot, update_consistencygroup, - and create_consistencygroup_from_src. - 3.12 - Adds set_log_levels and get_log_levels - 3.13 - Add initialize_connection_snapshot, - terminate_connection_snapshot, and remove_export_snapshot. - 3.14 - Adds enable_replication, disable_replication, - failover_replication, and list_replication_targets. - 3.15 - Add revert_to_snapshot method - """ - - RPC_API_VERSION = '3.15' - RPC_DEFAULT_VERSION = '3.0' - TOPIC = constants.VOLUME_TOPIC - BINARY = 'cinder-volume' - - def _get_cctxt(self, host=None, version=None, **kwargs): - if host: - server = utils.extract_host(host) - - # TODO(dulek): If we're pinned before 3.6, we should send stuff the - # old way - addressing server=host@backend, topic=cinder-volume. - # Otherwise we're addressing server=host, - # topic=cinder-volume.host@backend. This conditional can go away - # when we stop supporting 3.x. - if self.client.can_send_version('3.6'): - kwargs['topic'] = '%(topic)s.%(host)s' % {'topic': self.TOPIC, - 'host': server} - server = utils.extract_host(server, 'host') - kwargs['server'] = server - - return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs) - - def create_volume(self, ctxt, volume, request_spec, filter_properties, - allow_reschedule=True): - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'create_volume', - request_spec=request_spec, - filter_properties=filter_properties, - allow_reschedule=allow_reschedule, - volume=volume) - - @rpc.assert_min_rpc_version('3.15') - def revert_to_snapshot(self, ctxt, volume, snapshot): - version = self._compat_ver('3.15') - cctxt = self._get_cctxt(volume.host, version) - cctxt.cast(ctxt, 'revert_to_snapshot', volume=volume, - snapshot=snapshot) - - def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): - volume.create_worker() - cctxt = self._get_cctxt(volume.service_topic_queue) - msg_args = { - 'volume': volume, 'unmanage_only': unmanage_only, - 'cascade': cascade, - } - - cctxt.cast(ctxt, 'delete_volume', **msg_args) - - def create_snapshot(self, ctxt, volume, snapshot): - snapshot.create_worker() - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'create_snapshot', snapshot=snapshot) - - def delete_snapshot(self, ctxt, snapshot, unmanage_only=False): - cctxt = self._get_cctxt(snapshot.service_topic_queue) - cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot, - unmanage_only=unmanage_only) - - def attach_volume(self, ctxt, volume, instance_uuid, host_name, - mountpoint, mode): - msg_args = {'volume_id': volume.id, - 'instance_uuid': instance_uuid, - 'host_name': host_name, - 'mountpoint': mountpoint, - 'mode': mode, - 'volume': volume} - cctxt = self._get_cctxt(volume.service_topic_queue, ('3.3', '3.0')) - if not cctxt.can_send_version('3.3'): - msg_args.pop('volume') - return cctxt.call(ctxt, 'attach_volume', **msg_args) - - def detach_volume(self, ctxt, volume, attachment_id): - msg_args = {'volume_id': volume.id, - 'attachment_id': attachment_id, - 'volume': volume} - cctxt = self._get_cctxt(volume.service_topic_queue, ('3.4', '3.0')) - if not self.client.can_send_version('3.4'): - msg_args.pop('volume') - return cctxt.call(ctxt, 'detach_volume', **msg_args) - - def copy_volume_to_image(self, ctxt, volume, image_meta): - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'], - image_meta=image_meta) - - def initialize_connection(self, ctxt, volume, connector): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'initialize_connection', connector=connector, - volume=volume) - - def terminate_connection(self, ctxt, volume, connector, force=False): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'], - connector=connector, force=force) - - def remove_export(self, ctxt, volume): - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'remove_export', volume_id=volume['id']) - - def publish_service_capabilities(self, ctxt): - cctxt = self._get_cctxt(fanout=True) - cctxt.cast(ctxt, 'publish_service_capabilities') - - def accept_transfer(self, ctxt, volume, new_user, new_project): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'], - new_user=new_user, new_project=new_project) - - def extend_volume(self, ctxt, volume, new_size, reservations): - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size, - reservations=reservations) - - def migrate_volume(self, ctxt, volume, dest_backend, force_host_copy): - backend_p = {'host': dest_backend.host, - 'cluster_name': dest_backend.cluster_name, - 'capabilities': dest_backend.capabilities} - - version = '3.5' - if not self.client.can_send_version(version): - version = '3.0' - del backend_p['cluster_name'] - - cctxt = self._get_cctxt(volume.service_topic_queue, version) - cctxt.cast(ctxt, 'migrate_volume', volume=volume, host=backend_p, - force_host_copy=force_host_copy) - - def migrate_volume_completion(self, ctxt, volume, new_volume, error): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume, - new_volume=new_volume, error=error,) - - def retype(self, ctxt, volume, new_type_id, dest_backend, - migration_policy='never', reservations=None, - old_reservations=None): - backend_p = {'host': dest_backend.host, - 'cluster_name': dest_backend.cluster_name, - 'capabilities': dest_backend.capabilities} - version = '3.5' - if not self.client.can_send_version(version): - version = '3.0' - del backend_p['cluster_name'] - - cctxt = self._get_cctxt(volume.service_topic_queue, version) - cctxt.cast(ctxt, 'retype', volume=volume, new_type_id=new_type_id, - host=backend_p, migration_policy=migration_policy, - reservations=reservations, - old_reservations=old_reservations) - - def manage_existing(self, ctxt, volume, ref): - cctxt = self._get_cctxt(volume.service_topic_queue) - cctxt.cast(ctxt, 'manage_existing', ref=ref, volume=volume) - - def update_migrated_volume(self, ctxt, volume, new_volume, - original_volume_status): - cctxt = self._get_cctxt(new_volume['host']) - cctxt.call(ctxt, 'update_migrated_volume', - volume=volume, - new_volume=new_volume, - volume_status=original_volume_status) - - def freeze_host(self, ctxt, service): - """Set backend host to frozen.""" - cctxt = self._get_cctxt(service.service_topic_queue) - return cctxt.call(ctxt, 'freeze_host') - - def thaw_host(self, ctxt, service): - """Clear the frozen setting on a backend host.""" - cctxt = self._get_cctxt(service.service_topic_queue) - return cctxt.call(ctxt, 'thaw_host') - - def failover(self, ctxt, service, secondary_backend_id=None): - """Failover host to the specified backend_id (secondary). """ - version = '3.8' - method = 'failover' - if not self.client.can_send_version(version): - version = '3.0' - method = 'failover_host' - cctxt = self._get_cctxt(service.service_topic_queue, version) - cctxt.cast(ctxt, method, secondary_backend_id=secondary_backend_id) - - def failover_completed(self, ctxt, service, updates): - """Complete failover on all services of the cluster.""" - cctxt = self._get_cctxt(service.service_topic_queue, '3.8', - fanout=True) - cctxt.cast(ctxt, 'failover_completed', updates=updates) - - def manage_existing_snapshot(self, ctxt, snapshot, ref, backend): - cctxt = self._get_cctxt(backend) - cctxt.cast(ctxt, 'manage_existing_snapshot', - snapshot=snapshot, - ref=ref) - - def get_capabilities(self, ctxt, backend_id, discover): - cctxt = self._get_cctxt(backend_id) - return cctxt.call(ctxt, 'get_capabilities', discover=discover) - - def get_backup_device(self, ctxt, backup, volume): - cctxt = self._get_cctxt(volume.service_topic_queue, ('3.2', '3.0')) - if cctxt.can_send_version('3.2'): - backup_obj = cctxt.call(ctxt, 'get_backup_device', backup=backup, - want_objects=True) - else: - backup_dict = cctxt.call(ctxt, 'get_backup_device', backup=backup) - backup_obj = objects.BackupDeviceInfo.from_primitive(backup_dict, - ctxt) - return backup_obj - - def secure_file_operations_enabled(self, ctxt, volume): - cctxt = self._get_cctxt(volume.service_topic_queue) - return cctxt.call(ctxt, 'secure_file_operations_enabled', - volume=volume) - - def get_manageable_volumes(self, ctxt, service, marker, limit, offset, - sort_keys, sort_dirs): - version = ('3.10', '3.0') - cctxt = self._get_cctxt(service.service_topic_queue, version=version) - - msg_args = {'marker': marker, - 'limit': limit, - 'offset': offset, - 'sort_keys': sort_keys, - 'sort_dirs': sort_dirs, - } - - if cctxt.can_send_version('3.10'): - msg_args['want_objects'] = True - - return cctxt.call(ctxt, 'get_manageable_volumes', **msg_args) - - def get_manageable_snapshots(self, ctxt, service, marker, limit, offset, - sort_keys, sort_dirs): - version = ('3.10', '3.0') - cctxt = self._get_cctxt(service.service_topic_queue, version=version) - - msg_args = {'marker': marker, - 'limit': limit, - 'offset': offset, - 'sort_keys': sort_keys, - 'sort_dirs': sort_dirs, - } - - if cctxt.can_send_version('3.10'): - msg_args['want_objects'] = True - - return cctxt.call(ctxt, 'get_manageable_snapshots', **msg_args) - - def create_group(self, ctxt, group): - cctxt = self._get_cctxt(group.service_topic_queue) - cctxt.cast(ctxt, 'create_group', group=group) - - def delete_group(self, ctxt, group): - cctxt = self._get_cctxt(group.service_topic_queue) - cctxt.cast(ctxt, 'delete_group', group=group) - - def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): - cctxt = self._get_cctxt(group.service_topic_queue) - cctxt.cast(ctxt, 'update_group', group=group, add_volumes=add_volumes, - remove_volumes=remove_volumes) - - def create_group_from_src(self, ctxt, group, group_snapshot=None, - source_group=None): - cctxt = self._get_cctxt(group.service_topic_queue) - cctxt.cast(ctxt, 'create_group_from_src', group=group, - group_snapshot=group_snapshot, source_group=source_group) - - def create_group_snapshot(self, ctxt, group_snapshot): - cctxt = self._get_cctxt(group_snapshot.service_topic_queue) - cctxt.cast(ctxt, 'create_group_snapshot', - group_snapshot=group_snapshot) - - def delete_group_snapshot(self, ctxt, group_snapshot): - cctxt = self._get_cctxt(group_snapshot.service_topic_queue) - cctxt.cast(ctxt, 'delete_group_snapshot', - group_snapshot=group_snapshot) - - @rpc.assert_min_rpc_version('3.13') - def initialize_connection_snapshot(self, ctxt, snapshot, connector): - cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') - return cctxt.call(ctxt, 'initialize_connection_snapshot', - snapshot_id=snapshot.id, - connector=connector) - - @rpc.assert_min_rpc_version('3.13') - def terminate_connection_snapshot(self, ctxt, snapshot, connector, - force=False): - cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') - return cctxt.call(ctxt, 'terminate_connection_snapshot', - snapshot_id=snapshot.id, - connector=connector, force=force) - - @rpc.assert_min_rpc_version('3.13') - def remove_export_snapshot(self, ctxt, snapshot): - cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') - cctxt.cast(ctxt, 'remove_export_snapshot', snapshot_id=snapshot.id) - - @rpc.assert_min_rpc_version('3.9') - def attachment_update(self, ctxt, vref, connector, attachment_id): - version = self._compat_ver('3.9') - cctxt = self._get_cctxt(vref.host, version=version) - return cctxt.call(ctxt, - 'attachment_update', - vref=vref, - connector=connector, - attachment_id=attachment_id) - - @rpc.assert_min_rpc_version('3.9') - def attachment_delete(self, ctxt, attachment_id, vref): - version = self._compat_ver('3.9') - cctxt = self._get_cctxt(vref.host, version=version) - return cctxt.call(ctxt, - 'attachment_delete', - attachment_id=attachment_id, - vref=vref) - - @rpc.assert_min_rpc_version('3.7') - def do_cleanup(self, ctxt, cleanup_request): - """Perform this service/cluster resource cleanup as requested.""" - destination = cleanup_request.service_topic_queue - cctxt = self._get_cctxt(destination, '3.7') - # NOTE(geguileo): This call goes to do_cleanup code in - # cinder.manager.CleanableManager unless in the future we overwrite it - # in cinder.volume.manager - cctxt.cast(ctxt, 'do_cleanup', cleanup_request=cleanup_request) - - @rpc.assert_min_rpc_version('3.12') - def set_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(host=service.host, version='3.12') - cctxt.cast(context, 'set_log_levels', log_request=log_request) - - @rpc.assert_min_rpc_version('3.12') - def get_log_levels(self, context, service, log_request): - cctxt = self._get_cctxt(host=service.host, version='3.12') - return cctxt.call(context, 'get_log_levels', log_request=log_request) - - @rpc.assert_min_rpc_version('3.14') - def enable_replication(self, ctxt, group): - cctxt = self._get_cctxt(group.host, version='3.14') - cctxt.cast(ctxt, 'enable_replication', - group=group) - - @rpc.assert_min_rpc_version('3.14') - def disable_replication(self, ctxt, group): - cctxt = self._get_cctxt(group.host, version='3.14') - cctxt.cast(ctxt, 'disable_replication', - group=group) - - @rpc.assert_min_rpc_version('3.14') - def failover_replication(self, ctxt, group, allow_attached_volume=False, - secondary_backend_id=None): - cctxt = self._get_cctxt(group.host, version='3.14') - cctxt.cast(ctxt, 'failover_replication', - group=group, allow_attached_volume=allow_attached_volume, - secondary_backend_id=secondary_backend_id) - - @rpc.assert_min_rpc_version('3.14') - def list_replication_targets(self, ctxt, group): - cctxt = self._get_cctxt(group.host, version='3.14') - return cctxt.call(ctxt, 'list_replication_targets', - group=group) diff --git a/cinder/volume/targets/__init__.py b/cinder/volume/targets/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/volume/targets/cxt.py b/cinder/volume/targets/cxt.py deleted file mode 100644 index 44d7701ff..000000000 --- a/cinder/volume/targets/cxt.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2015 Chelsio Communications Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import fileutils -from oslo_utils import netutils - -from cinder import exception -from cinder import utils -from cinder.volume.targets import iscsi - -LOG = logging.getLogger(__name__) - - -class CxtAdm(iscsi.ISCSITarget): - """Chiscsi target configuration for block storage devices. - - This includes things like create targets, attach, detach - etc. - """ - - TARGET_FMT = """ - target: - TargetName=%s - TargetDevice=%s - PortalGroup=1@%s - """ - TARGET_FMT_WITH_CHAP = """ - target: - TargetName=%s - TargetDevice=%s - PortalGroup=1@%s - AuthMethod=CHAP - Auth_CHAP_Policy=Oneway - Auth_CHAP_Initiator=%s - """ - - cxt_subdir = 'cxt' - - def __init__(self, *args, **kwargs): - super(CxtAdm, self).__init__(*args, **kwargs) - self.volumes_dir = self.configuration.safe_get('volumes_dir') - self.volumes_dir = os.path.join(self.volumes_dir, self.cxt_subdir) - self.config = self.configuration.safe_get('chiscsi_conf') - - def _get_volumes_dir(self): - return self.volumes_dir - - def _get_target(self, iqn): - # We can use target=iqn here, but iscsictl has no --brief mode, and - # this way we save on a lot of unnecessary parsing - (out, err) = utils.execute('iscsictl', - '-c', - 'target=ALL', - run_as_root=True) - lines = out.split('\n') - for line in lines: - if iqn in line: - parsed = line.split() - tid = parsed[2] - return tid[3:].rstrip(',') - - return None - - def _get_iscsi_target(self, context, vol_id): - return 0 - - def _get_target_and_lun(self, context, volume): - lun = 0 # For chiscsi dev starts at lun 0 - iscsi_target = 1 - return iscsi_target, lun - - @staticmethod - def _get_portal(ip, port=None): - # ipv6 addresses use [ip]:port format, ipv4 use ip:port - portal_port = ':%d' % port if port else '' - - if netutils.is_valid_ipv4(ip): - portal_ip = ip - else: - portal_ip = '[' + ip + ']' - - return portal_ip + portal_port - - def create_iscsi_target(self, name, tid, lun, path, - chap_auth=None, **kwargs): - - (out, err) = utils.execute('iscsictl', - '-c', - 'target=ALL', - run_as_root=True) - LOG.debug("Targets prior to update: %s", out) - volumes_dir = self._get_volumes_dir() - fileutils.ensure_tree(volumes_dir) - - vol_id = name.split(':')[1] - - cfg_port = kwargs.get('portals_port') - cfg_ips = kwargs.get('portals_ips') - - portals = ','.join(map(lambda ip: self._get_portal(ip, cfg_port), - cfg_ips)) - - if chap_auth is None: - volume_conf = self.TARGET_FMT % (name, path, portals) - else: - volume_conf = self.TARGET_FMT_WITH_CHAP % (name, - path, portals, - '"%s":"%s"' % chap_auth) - LOG.debug('Creating iscsi_target for: %s', vol_id) - volume_path = os.path.join(volumes_dir, vol_id) - - if os.path.exists(volume_path): - LOG.warning('Persistence file already exists for volume, ' - 'found file at: %s', volume_path) - utils.robust_file_write(volumes_dir, vol_id, volume_conf) - LOG.debug('Created volume path %(vp)s,\n' - 'content: %(vc)s', - {'vp': volume_path, 'vc': volume_conf}) - - old_persist_file = None - old_name = kwargs.get('old_name', None) - if old_name: - LOG.debug('Detected old persistence file for volume ' - '%(vol)s at %(old_name)s', - {'vol': vol_id, 'old_name': old_name}) - old_persist_file = os.path.join(volumes_dir, old_name) - - try: - # With the persistent tgts we create them - # by creating the entry in the persist file - # and then doing an update to get the target - # created. - (out, err) = utils.execute('iscsictl', '-S', 'target=%s' % name, - '-f', volume_path, - '-x', self.config, - run_as_root=True) - except putils.ProcessExecutionError as e: - LOG.error("Failed to create iscsi target for volume " - "id:%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - - # Don't forget to remove the persistent file we created - os.unlink(volume_path) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - finally: - LOG.debug("StdOut from iscsictl -S: %s", out) - LOG.debug("StdErr from iscsictl -S: %s", err) - - # Grab targets list for debug - (out, err) = utils.execute('iscsictl', - '-c', - 'target=ALL', - run_as_root=True) - LOG.debug("Targets after update: %s", out) - - iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) - tid = self._get_target(iqn) - if tid is None: - LOG.error("Failed to create iscsi target for volume " - "id:%(vol_id)s. Please verify your configuration " - "in %(volumes_dir)s'", - {'vol_id': vol_id, - 'volumes_dir': volumes_dir, }) - raise exception.NotFound() - - if old_persist_file is not None and os.path.exists(old_persist_file): - os.unlink(old_persist_file) - - return tid - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info('Removing iscsi_target for: %s', vol_id) - vol_uuid_file = vol_name - volume_path = os.path.join(self._get_volumes_dir(), vol_uuid_file) - if not os.path.exists(volume_path): - LOG.warning('Volume path %s does not exist, ' - 'nothing to remove.', volume_path) - return - - if os.path.isfile(volume_path): - iqn = '%s%s' % (self.iscsi_target_prefix, - vol_uuid_file) - else: - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - target_exists = False - try: - (out, err) = utils.execute('iscsictl', - '-c', - 'target=%s' % iqn, - run_as_root=True) - LOG.debug("StdOut from iscsictl -c: %s", out) - LOG.debug("StdErr from iscsictl -c: %s", err) - except putils.ProcessExecutionError as e: - if "NOT found" in e.stdout: - LOG.info("No iscsi target present for volume " - "id:%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - return - else: - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - else: - target_exists = True - - try: - utils.execute('iscsictl', - '-s', - 'target=%s' % iqn, - run_as_root=True) - except putils.ProcessExecutionError as e: - # There exists a race condition where multiple calls to - # remove_iscsi_target come in simultaneously. If we can poll - # for a target successfully but it is gone before we can remove - # it, fail silently - if "is not found" in e.stderr and target_exists: - LOG.info("No iscsi target present for volume " - "id:%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - return - else: - LOG.error("Failed to remove iscsi target for volume " - "id:%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - # Carried over from tgt - # NOTE(jdg): This *should* be there still but incase - # it's not we don't care, so just ignore it if was - # somehow deleted between entry of this method - # and here - if os.path.exists(volume_path): - os.unlink(volume_path) - else: - LOG.debug('Volume path %s not found at end, ' - 'of remove_iscsi_target.', volume_path) diff --git a/cinder/volume/targets/driver.py b/cinder/volume/targets/driver.py deleted file mode 100644 index fbee7f613..000000000 --- a/cinder/volume/targets/driver.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -import six - -CONF = cfg.CONF - - -@six.add_metaclass(abc.ABCMeta) -class Target(object): - """Target object for block storage devices. - - Base class for target object, where target - is data transport mechanism (target) specific calls. - This includes things like create targets, attach, detach - etc. - - Base class here does nothing more than set an executor and db as - well as force implementation of required methods. - - """ - - def __init__(self, *args, **kwargs): - self.db = kwargs.get('db') - self.configuration = kwargs.get('configuration') - self._root_helper = kwargs.get('root_helper', - 'sudo cinder-rootwrap %s' % - CONF.rootwrap_config) - - @abc.abstractmethod - def ensure_export(self, context, volume, volume_path): - """Synchronously recreates an export for a volume.""" - pass - - @abc.abstractmethod - def create_export(self, context, volume, volume_path): - """Exports a Target/Volume. - - Can optionally return a Dict of changes to - the volume object to be persisted. - """ - pass - - @abc.abstractmethod - def remove_export(self, context, volume): - """Removes an export for a Target/Volume.""" - pass - - @abc.abstractmethod - def initialize_connection(self, volume, connector): - """Allow connection to connector and return connection info.""" - pass - - @abc.abstractmethod - def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" - pass diff --git a/cinder/volume/targets/fake.py b/cinder/volume/targets/fake.py deleted file mode 100644 index 97841bbac..000000000 --- a/cinder/volume/targets/fake.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from cinder.volume.targets import iscsi - - -class FakeTarget(iscsi.ISCSITarget): - VERSION = '0.1' - - def _get_target_and_lun(self, context, volume): - return(0, 0) - - def create_iscsi_target(self, name, tid, lun, path, - chap_auth, **kwargs): - pass - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - pass - - def _get_iscsi_target(self, context, vol_id): - pass - - def _get_target(self, iqn): - pass diff --git a/cinder/volume/targets/iet.py b/cinder/volume/targets/iet.py deleted file mode 100644 index b98cffe64..000000000 --- a/cinder/volume/targets/iet.py +++ /dev/null @@ -1,280 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import stat - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging - -from cinder import exception -from cinder import utils -from cinder.volume.targets import iscsi - -LOG = logging.getLogger(__name__) - - -class IetAdm(iscsi.ISCSITarget): - VERSION = '0.1' - - def __init__(self, *args, **kwargs): - super(IetAdm, self).__init__(*args, **kwargs) - self.iet_conf = self.configuration.safe_get('iet_conf') - self.iscsi_iotype = self.configuration.safe_get('iscsi_iotype') - self.auth_type = 'IncomingUser' - self.iet_sessions = '/proc/net/iet/session' - - def _get_target(self, iqn): - - # Find existing iSCSI target session from /proc/net/iet/session - # - # tid:2 name:iqn.2010-10.org:volume-222 - # sid:562950561399296 initiator:iqn.1994-05.com:5a6894679665 - # cid:0 ip:192.168.122.1 state:active hd:none dd:none - # tid:1 name:iqn.2010-10.org:volume-111 - # sid:281475567911424 initiator:iqn.1994-05.com:5a6894679665 - # cid:0 ip:192.168.122.1 state:active hd:none dd:none - - iscsi_target = 0 - try: - with open(self.iet_sessions, 'r') as f: - sessions = f.read() - except Exception: - LOG.exception("Failed to open iet session list for %s", iqn) - raise - - session_list = re.split('^tid:(?m)', sessions)[1:] - for ses in session_list: - m = re.match(r'(\d+) name:(\S+)\s+', ses) - if m and iqn in m.group(2): - return m.group(1) - - return iscsi_target - - def _get_iscsi_target(self, context, vol_id): - pass - - def _get_target_and_lun(self, context, volume): - - # For ietadm dev starts at lun 0 - lun = 0 - - # Using 0, ietadm tries to search empty tid for creating - # new iSCSI target - iscsi_target = 0 - - # Find existing iSCSI target based on iqn - iqn = '%svolume-%s' % (self.iscsi_target_prefix, volume['id']) - iscsi_target = self._get_target(iqn) - - return iscsi_target, lun - - def create_iscsi_target(self, name, tid, lun, path, - chap_auth=None, **kwargs): - - config_auth = None - vol_id = name.split(':')[1] - - # Check the target is already existing. - tmp_tid = self._get_target(name) - - # Create a new iSCSI target. If a target already exists, - # the command returns 234, but we ignore it. - try: - self._new_target(name, tid) - tid = self._get_target(name) - self._new_logicalunit(tid, lun, path) - - if chap_auth is not None: - (username, password) = chap_auth - config_auth = ' '.join((self.auth_type,) + chap_auth) - self._new_auth(tid, self.auth_type, username, password) - except putils.ProcessExecutionError: - LOG.exception("Failed to create iscsi target for volume " - "id:%s", vol_id) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - # Update config file only if new scsi target is created. - if not tmp_tid: - self.update_config_file(name, tid, path, config_auth) - - return tid - - def update_config_file(self, name, tid, path, config_auth): - - conf_file = self.iet_conf - vol_id = name.split(':')[1] - - # If config file does not exist, create a blank conf file and - # add configuration for the volume on the new file. - if not os.path.exists(conf_file): - try: - utils.execute("truncate", conf_file, "--size=0", - run_as_root=True) - except putils.ProcessExecutionError: - LOG.exception("Failed to create %(conf)s for volume " - "id:%(vol_id)s", - {'conf': conf_file, 'vol_id': vol_id}) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - try: - volume_conf = """ - Target %s - %s - Lun 0 Path=%s,Type=%s - """ % (name, config_auth, path, self._iotype(path)) - - with utils.temporary_chown(conf_file): - with open(conf_file, 'a+') as f: - f.write(volume_conf) - except Exception: - LOG.exception("Failed to update %(conf)s for volume " - "id:%(vol_id)s", - {'conf': conf_file, 'vol_id': vol_id}) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info("Removing iscsi_target for volume: %s", vol_id) - - try: - self._delete_logicalunit(tid, lun) - session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id) - if session_info: - sid, cid = session_info - self._force_delete_target(tid, sid, cid) - - self._delete_target(tid) - except putils.ProcessExecutionError: - LOG.exception("Failed to remove iscsi target for volume " - "id:%s", vol_id) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - vol_uuid_file = vol_name - conf_file = self.iet_conf - if os.path.exists(conf_file): - try: - with utils.temporary_chown(conf_file): - with open(conf_file, 'r+') as iet_conf_text: - full_txt = iet_conf_text.readlines() - new_iet_conf_txt = [] - count = 0 - for line in full_txt: - if count > 0: - count -= 1 - continue - elif vol_uuid_file in line: - count = 2 - continue - else: - new_iet_conf_txt.append(line) - - iet_conf_text.seek(0) - iet_conf_text.truncate(0) - iet_conf_text.writelines(new_iet_conf_txt) - except Exception: - LOG.exception("Failed to update %(conf)s for volume id " - "%(vol_id)s after removing iscsi target", - {'conf': conf_file, 'vol_id': vol_id}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - else: - LOG.warning("Failed to update %(conf)s for volume id " - "%(vol_id)s after removing iscsi target. " - "%(conf)s does not exist.", - {'conf': conf_file, 'vol_id': vol_id}) - - def _find_sid_cid_for_target(self, tid, name, vol_id): - """Find sid, cid for existing iscsi target""" - - try: - with open(self.iet_sessions, 'r') as f: - sessions = f.read() - except Exception as e: - LOG.info("Failed to open iet session list for " - "%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - return None - - session_list = re.split('^tid:(?m)', sessions)[1:] - for ses in session_list: - m = re.match(r'(\d+) name:(\S+)\s+sid:(\d+).+\s+cid:(\d+)', ses) - if m and tid in m.group(1) and name in m.group(2): - return m.group(3), m.group(4) - - def _is_block(self, path): - mode = os.stat(path).st_mode - return stat.S_ISBLK(mode) - - def _iotype(self, path): - if self.iscsi_iotype == 'auto': - return 'blockio' if self._is_block(path) else 'fileio' - else: - return self.iscsi_iotype - - def _new_target(self, name, tid): - """Create new scsi target using specified parameters. - - If the target already exists, ietadm returns - 'Invalid argument' and error code '234'. - This should be ignored for ensure export case. - """ - utils.execute('ietadm', '--op', 'new', - '--tid=%s' % tid, - '--params', 'Name=%s' % name, - run_as_root=True, check_exit_code=[0, 234]) - - def _delete_target(self, tid): - utils.execute('ietadm', '--op', 'delete', - '--tid=%s' % tid, - run_as_root=True) - - def _force_delete_target(self, tid, sid, cid): - utils.execute('ietadm', '--op', 'delete', - '--tid=%s' % tid, - '--sid=%s' % sid, - '--cid=%s' % cid, - run_as_root=True) - - def show_target(self, tid, iqn=None): - utils.execute('ietadm', '--op', 'show', - '--tid=%s' % tid, - run_as_root=True) - - def _new_logicalunit(self, tid, lun, path): - """Attach a new volume to scsi target as a logical unit. - - If a logical unit exists on the specified target lun, - ietadm returns 'File exists' and error code '239'. - This should be ignored for ensure export case. - """ - - utils.execute('ietadm', '--op', 'new', - '--tid=%s' % tid, - '--lun=%d' % lun, - '--params', - 'Path=%s,Type=%s' % (path, self._iotype(path)), - run_as_root=True, check_exit_code=[0, 239]) - - def _delete_logicalunit(self, tid, lun): - utils.execute('ietadm', '--op', 'delete', - '--tid=%s' % tid, - '--lun=%d' % lun, - run_as_root=True) - - def _new_auth(self, tid, type, username, password): - utils.execute('ietadm', '--op', 'new', - '--tid=%s' % tid, - '--user', - '--params=%s=%s,Password=%s' % (type, - username, - password), - run_as_root=True) diff --git a/cinder/volume/targets/iscsi.py b/cinder/volume/targets/iscsi.py deleted file mode 100644 index da6184b83..000000000 --- a/cinder/volume/targets/iscsi.py +++ /dev/null @@ -1,400 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_concurrency import processutils -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.targets import driver -from cinder.volume import utils as vutils - -LOG = logging.getLogger(__name__) - - -class ISCSITarget(driver.Target): - """Target object for block storage devices. - - Base class for target object, where target - is data transport mechanism (target) specific calls. - This includes things like create targets, attach, detach - etc. - """ - - def __init__(self, *args, **kwargs): - super(ISCSITarget, self).__init__(*args, **kwargs) - self.iscsi_target_prefix = \ - self.configuration.safe_get('iscsi_target_prefix') - self.iscsi_protocol = \ - self.configuration.safe_get('iscsi_protocol') - self.protocol = 'iSCSI' - self.volumes_dir = self.configuration.safe_get('volumes_dir') - - def _get_iscsi_properties(self, volume, multipath=False): - """Gets iscsi configuration - - We ideally get saved information in the volume entity, but fall back - to discovery if need be. Discovery may be completely removed in the - future. - - The properties are: - - :target_discovered: boolean indicating whether discovery was used - - :target_iqn: the IQN of the iSCSI target - - :target_portal: the portal of the iSCSI target - - :target_lun: the lun of the iSCSI target - - :volume_id: the uuid of the volume - - :auth_method:, :auth_username:, :auth_password: - - the authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - - :discard: boolean indicating if discard is supported - - In some of drivers that support multiple connections (for multipath - and for single path with failover on connection failure), it returns - :target_iqns, :target_portals, :target_luns, which contain lists of - multiple values. The main portal information is also returned in - :target_iqn, :target_portal, :target_lun for backward compatibility. - - Note that some of drivers don't return :target_portals even if they - support multipath. Then the connector should use sendtargets discovery - to find the other portals if it supports multipath. - """ - - properties = {} - - location = volume['provider_location'] - - if location: - # provider_location is the same format as iSCSI discovery output - properties['target_discovered'] = False - else: - location = self._do_iscsi_discovery(volume) - - if not location: - msg = (_("Could not find iSCSI export for volume %s") % - (volume['name'])) - raise exception.InvalidVolume(reason=msg) - - LOG.debug("ISCSI Discovery: Found %s", location) - properties['target_discovered'] = True - - results = location.split(" ") - portals = results[0].split(",")[0].split(";") - iqn = results[1] - nr_portals = len(portals) - try: - lun = int(results[2]) - except (IndexError, ValueError): - # NOTE(jdg): The following is carried over from the existing - # code. The trick here is that different targets use different - # default lun numbers, the base driver with tgtadm uses 1 - # others like LIO use 0. - if (self.configuration.volume_driver == - 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and - self.configuration.iscsi_helper == 'tgtadm'): - lun = 1 - else: - lun = 0 - - if nr_portals > 1 or multipath: - properties['target_portals'] = portals - properties['target_iqns'] = [iqn] * nr_portals - properties['target_luns'] = [lun] * nr_portals - properties['target_portal'] = portals[0] - properties['target_iqn'] = iqn - properties['target_lun'] = lun - - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - geometry = volume.get('provider_geometry', None) - if geometry: - (physical_block_size, logical_block_size) = geometry.split() - properties['physical_block_size'] = physical_block_size - properties['logical_block_size'] = logical_block_size - - encryption_key_id = volume.get('encryption_key_id', None) - properties['encrypted'] = encryption_key_id is not None - - return properties - - def _iscsi_authentication(self, chap, name, password): - return "%s %s %s" % (chap, name, password) - - def _do_iscsi_discovery(self, volume): - # TODO(justinsb): Deprecate discovery and use stored info - # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) - LOG.warning("ISCSI provider_location not stored, using discovery") - - volume_id = volume['id'] - - try: - # NOTE(griff) We're doing the split straight away which should be - # safe since using '@' in hostname is considered invalid - - (out, _err) = utils.execute('iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', - volume['host'].split('@')[0], - run_as_root=True) - except processutils.ProcessExecutionError as ex: - LOG.error("ISCSI discovery attempt failed for: %s", - volume['host'].split('@')[0]) - LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) - return None - - for target in out.splitlines(): - if (self.configuration.safe_get('iscsi_ip_address') in target - and volume_id in target): - return target - return None - - def _get_portals_config(self): - # Prepare portals configuration - portals_ips = ([self.configuration.iscsi_ip_address] - + self.configuration.iscsi_secondary_ip_addresses or []) - - return {'portals_ips': portals_ips, - 'portals_port': self.configuration.iscsi_port} - - def create_export(self, context, volume, volume_path): - """Creates an export for a logical volume.""" - # 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001' - iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, - volume['name']) - iscsi_target, lun = self._get_target_and_lun(context, volume) - - # Verify we haven't setup a CHAP creds file already - # if DNE no big deal, we'll just create it - chap_auth = self._get_target_chap_auth(context, volume) - if not chap_auth: - chap_auth = (vutils.generate_username(), - vutils.generate_password()) - - # Get portals ips and port - portals_config = self._get_portals_config() - - # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need - # should clean this all up at some point in the future - tid = self.create_iscsi_target(iscsi_name, - iscsi_target, - lun, - volume_path, - chap_auth, - **portals_config) - data = {} - data['location'] = self._iscsi_location( - self.configuration.iscsi_ip_address, tid, iscsi_name, lun, - self.configuration.iscsi_secondary_ip_addresses) - LOG.debug('Set provider_location to: %s', data['location']) - data['auth'] = self._iscsi_authentication( - 'CHAP', *chap_auth) - return data - - def remove_export(self, context, volume): - try: - iscsi_target, lun = self._get_target_and_lun(context, volume) - except exception.NotFound: - LOG.info("Skipping remove_export. No iscsi_target " - "provisioned for volume: %s", volume['id']) - return - try: - - # NOTE: provider_location may be unset if the volume hasn't - # been exported - location = volume['provider_location'].split(' ') - iqn = location[1] - - # ietadm show will exit with an error - # this export has already been removed - self.show_target(iscsi_target, iqn=iqn) - - except Exception: - LOG.info("Skipping remove_export. No iscsi_target " - "is presently exported for volume: %s", volume['id']) - return - - # NOTE: For TgtAdm case volume['id'] is the ONLY param we need - self.remove_iscsi_target(iscsi_target, lun, volume['id'], - volume['name']) - - def ensure_export(self, context, volume, volume_path): - """Recreates an export for a logical volume.""" - iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix, - volume['name']) - - chap_auth = self._get_target_chap_auth(context, volume) - - # Get portals ips and port - portals_config = self._get_portals_config() - - iscsi_target, lun = self._get_target_and_lun(context, volume) - self.create_iscsi_target( - iscsi_name, iscsi_target, lun, volume_path, - chap_auth, check_exit_code=False, - old_name=None, **portals_config) - - def initialize_connection(self, volume, connector): - """Initializes the connection and returns connection info. - - The iscsi driver returns a driver_volume_type of 'iscsi'. - The format of the driver data is defined in _get_iscsi_properties. - Example return value:: - - { - 'driver_volume_type': 'iscsi' - 'data': { - 'target_discovered': True, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', - 'target_portal': '127.0.0.0.1:3260', - 'volume_id': '9a0d35d0-175a-11e4-8c21-0800200c9a66', - 'discard': False, - } - } - """ - - iscsi_properties = self._get_iscsi_properties(volume, - connector.get( - 'multipath')) - return { - 'driver_volume_type': self.iscsi_protocol, - 'data': iscsi_properties - } - - def terminate_connection(self, volume, connector, **kwargs): - pass - - def validate_connector(self, connector): - # NOTE(jdg): api passes in connector which is initiator info - if 'initiator' not in connector: - err_msg = ('The volume driver requires the iSCSI initiator ' - 'name in the connector.') - LOG.error(err_msg) - raise exception.InvalidConnectorException(missing='initiator') - return True - - def _iscsi_location(self, ip, target, iqn, lun=None, ip_secondary=None): - ip_secondary = ip_secondary or [] - port = self.configuration.iscsi_port - portals = map(lambda x: "%s:%s" % (x, port), [ip] + ip_secondary) - return ("%(portals)s,%(target)s %(iqn)s %(lun)s" - % ({'portals': ";".join(portals), - 'target': target, 'iqn': iqn, 'lun': lun})) - - def show_target(self, iscsi_target, iqn, **kwargs): - if iqn is None: - raise exception.InvalidParameterValue( - err=_('valid iqn needed for show_target')) - - tid = self._get_target(iqn) - if tid is None: - raise exception.NotFound() - - def _get_target_chap_auth(self, context, volume): - """Get the current chap auth username and password.""" - try: - # Query DB to get latest state of volume - volume_info = self.db.volume_get(context, volume['id']) - # 'provider_auth': 'CHAP user_id password' - if volume_info['provider_auth']: - return tuple(volume_info['provider_auth'].split(' ', 3)[1:]) - except exception.NotFound: - LOG.debug('Failed to get CHAP auth from DB for %s.', volume['id']) - - @abc.abstractmethod - def _get_target_and_lun(self, context, volume): - """Get iscsi target and lun.""" - pass - - @abc.abstractmethod - def create_iscsi_target(self, name, tid, lun, path, - chap_auth, **kwargs): - pass - - @abc.abstractmethod - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - pass - - @abc.abstractmethod - def _get_iscsi_target(self, context, vol_id): - pass - - @abc.abstractmethod - def _get_target(self, iqn): - pass - - -class SanISCSITarget(ISCSITarget): - """iSCSI target for san devices. - - San devices are slightly different, they don't need to implement - all of the same things that we need to implement locally fro LVM - and local block devices when we create and manage our own targets. - - """ - @abc.abstractmethod - def create_export(self, context, volume, volume_path): - pass - - @abc.abstractmethod - def remove_export(self, context, volume): - pass - - @abc.abstractmethod - def ensure_export(self, context, volume, volume_path): - pass - - @abc.abstractmethod - def terminate_connection(self, volume, connector, **kwargs): - pass - - # NOTE(jdg): Items needed for local iSCSI target drivers, - # but NOT sans Stub them out here to make abc happy - - # Use care when looking at these to make sure something - # that's inheritted isn't dependent on one of - # these. - def _get_target_and_lun(self, context, volume): - pass - - def _get_target_chap_auth(self, context, volume): - pass - - def create_iscsi_target(self, name, tid, lun, path, - chap_auth, **kwargs): - pass - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - pass - - def _get_iscsi_target(self, context, vol_id): - pass - - def _get_target(self, iqn): - pass diff --git a/cinder/volume/targets/lio.py b/cinder/volume/targets/lio.py deleted file mode 100644 index 37d32698c..000000000 --- a/cinder/volume/targets/lio.py +++ /dev/null @@ -1,220 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging - -from cinder import exception -from cinder import utils -from cinder.volume.targets import iscsi - - -LOG = logging.getLogger(__name__) - - -class LioAdm(iscsi.ISCSITarget): - """iSCSI target administration for LIO using python-rtslib.""" - def __init__(self, *args, **kwargs): - super(LioAdm, self).__init__(*args, **kwargs) - - # FIXME(jdg): modify executor to use the cinder-rtstool - self.iscsi_target_prefix =\ - self.configuration.safe_get('iscsi_target_prefix') - - self._verify_rtstool() - - def _verify_rtstool(self): - try: - # This call doesn't need locking - utils.execute('cinder-rtstool', 'verify') - except (OSError, putils.ProcessExecutionError): - LOG.error('cinder-rtstool is not installed correctly') - raise - - @staticmethod - @utils.synchronized('lioadm', external=True) - def _execute(*args, **kwargs): - """Locked execution to prevent racing issues. - - Racing issues are derived from a bug in RTSLib: - https://github.com/agrover/rtslib-fb/issues/36 - """ - return utils.execute(*args, **kwargs) - - def _get_target(self, iqn): - (out, err) = self._execute('cinder-rtstool', - 'get-targets', - run_as_root=True) - lines = out.split('\n') - for line in lines: - if iqn in line: - return line - - return None - - def _get_targets(self): - (out, err) = self._execute('cinder-rtstool', - 'get-targets', - run_as_root=True) - return out - - def _get_iscsi_target(self, context, vol_id): - return 0 - - def _get_target_and_lun(self, context, volume): - lun = 0 # For lio, the lun starts at lun 0. - iscsi_target = 0 # NOTE: Not used by lio. - return iscsi_target, lun - - def _persist_configuration(self, vol_id): - try: - self._execute('cinder-rtstool', 'save', run_as_root=True) - - # On persistence failure we don't raise an exception, as target has - # been successfully created. - except putils.ProcessExecutionError: - LOG.warning("Failed to save iscsi LIO configuration when " - "modifying volume id: %(vol_id)s.", - {'vol_id': vol_id}) - - def _restore_configuration(self): - try: - self._execute('cinder-rtstool', 'restore', run_as_root=True) - - # On persistence failure we don't raise an exception, as target has - # been successfully created. - except putils.ProcessExecutionError: - LOG.warning("Failed to restore iscsi LIO configuration.") - - def create_iscsi_target(self, name, tid, lun, path, - chap_auth=None, **kwargs): - # tid and lun are not used - - vol_id = name.split(':')[1] - - LOG.info('Creating iscsi_target for volume: %s', vol_id) - - chap_auth_userid = "" - chap_auth_password = "" - if chap_auth is not None: - (chap_auth_userid, chap_auth_password) = chap_auth - - optional_args = [] - if 'portals_port' in kwargs: - optional_args.append('-p%s' % kwargs['portals_port']) - - if 'portals_ips' in kwargs: - optional_args.append('-a' + ','.join(kwargs['portals_ips'])) - - try: - command_args = ['cinder-rtstool', - 'create', - path, - name, - chap_auth_userid, - chap_auth_password, - self.iscsi_protocol == 'iser'] + optional_args - self._execute(*command_args, run_as_root=True) - except putils.ProcessExecutionError: - LOG.exception("Failed to create iscsi target for volume " - "id:%s.", vol_id) - - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) - tid = self._get_target(iqn) - if tid is None: - LOG.error("Failed to create iscsi target for volume id:%s.", - vol_id) - raise exception.NotFound() - - # We make changes persistent - self._persist_configuration(vol_id) - - return tid - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info('Removing iscsi_target: %s', vol_id) - vol_uuid_name = vol_name - iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name) - - try: - self._execute('cinder-rtstool', - 'delete', - iqn, - run_as_root=True) - except putils.ProcessExecutionError: - LOG.exception("Failed to remove iscsi target for volume id:%s.", - vol_id) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - # We make changes persistent - self._persist_configuration(vol_id) - - def initialize_connection(self, volume, connector): - volume_iqn = volume['provider_location'].split(' ')[1] - - (auth_method, auth_user, auth_pass) = \ - volume['provider_auth'].split(' ', 3) - - # Add initiator iqns to target ACL - try: - self._execute('cinder-rtstool', 'add-initiator', - volume_iqn, - auth_user, - auth_pass, - connector['initiator'], - run_as_root=True) - except putils.ProcessExecutionError: - LOG.exception("Failed to add initiator iqn %s to target", - connector['initiator']) - raise exception.ISCSITargetAttachFailed( - volume_id=volume['id']) - - # We make changes persistent - self._persist_configuration(volume['id']) - - return super(LioAdm, self).initialize_connection(volume, connector) - - def terminate_connection(self, volume, connector, **kwargs): - if volume['provider_location'] is None: - LOG.debug('No provider_location for volume %s.', - volume['id']) - return - - volume_iqn = volume['provider_location'].split(' ')[1] - - # Delete initiator iqns from target ACL - try: - self._execute('cinder-rtstool', 'delete-initiator', - volume_iqn, - connector['initiator'], - run_as_root=True) - except putils.ProcessExecutionError: - LOG.exception( - "Failed to delete initiator iqn %s from target.", - connector['initiator']) - raise exception.ISCSITargetDetachFailed(volume_id=volume['id']) - - # We make changes persistent - self._persist_configuration(volume['id']) - - def ensure_export(self, context, volume, volume_path): - """Recreate exports for logical volumes.""" - - # Restore saved configuration file if no target exists. - if not self._get_targets(): - LOG.info('Restoring iSCSI target from configuration file') - self._restore_configuration() - return - - LOG.info("Skipping ensure_export. Found existing iSCSI target.") diff --git a/cinder/volume/targets/scst.py b/cinder/volume/targets/scst.py deleted file mode 100644 index b72ac0571..000000000 --- a/cinder/volume/targets/scst.py +++ /dev/null @@ -1,394 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder import utils -from cinder.volume.targets import iscsi -from cinder.volume import utils as vutils - -LOG = logging.getLogger(__name__) - - -class SCSTAdm(iscsi.ISCSITarget): - - def __init__(self, *args, **kwargs): - super(SCSTAdm, self).__init__(*args, **kwargs) - self.volumes_dir = self.configuration.safe_get('volumes_dir') - self.iscsi_target_prefix = self.configuration.safe_get( - 'iscsi_target_prefix') - self.target_name = self.configuration.safe_get('scst_target_iqn_name') - self.target_driver = self.configuration.safe_get('scst_target_driver') - self.chap_username = self.configuration.safe_get('chap_username') - self.chap_password = self.configuration.safe_get('chap_password') - self.initiator_iqn = None - self.remove_initiator_iqn = None - - def scst_execute(self, *args): - return utils.execute('scstadmin', *args, run_as_root=True) - - def validate_connector(self, connector): - # iSCSI drivers require the initiator information - if 'initiator' not in connector: - err_msg = _('The volume driver requires the iSCSI initiator ' - 'name in the connector.') - LOG.error(err_msg) - raise exception.VolumeBackendAPIException(data=err_msg) - self.initiator_iqn = connector['initiator'] - - def terminate_connection(self, volume, connector, **kwargs): - self.remove_initiator_iqn = connector['initiator'] - - def _get_target(self, iqn): - (out, _err) = self.scst_execute('-list_target') - if iqn in out: - return self._target_attribute(iqn) - return None - - def _target_attribute(self, iqn): - (out, _err) = self.scst_execute('-list_tgt_attr', iqn, - '-driver', self.target_driver) - lines = out.split('\n') - for line in lines: - if "rel_tgt_id" in line: - parsed = line.split() - return parsed[1] - - def _get_group(self): - scst_group = "%s%s" % (self.initiator_iqn, self.target_name) - (out, _err) = self.scst_execute('-list_group') - if scst_group in out: - return out - return None - - def _get_luns_info(self): - scst_group = "%s%s" % (self.initiator_iqn, self.target_name) - (out, _err) = self.scst_execute('-list_group', scst_group, - '-driver', self.target_driver, - '-target', self.target_name) - - first = "Assigned LUNs:" - last = "Assigned Initiators:" - start = out.index(first) + len(first) - end = out.index(last, start) - out = out[start:end] - - luns = [] - for line in out.strip().split("\n")[2:]: - luns.append(int(line.strip().split(" ")[0])) - luns = sorted(set(luns)) - return luns - - def _get_target_and_lun(self, context, volume): - iscsi_target = 0 - if not self.target_name or not self._get_group(): - lun = 1 - return iscsi_target, lun - - luns = self._get_luns_info() - if (not luns) or (luns[0] != 1): - lun = 1 - return iscsi_target, lun - else: - for lun in luns: - if (luns[-1] == lun) or (luns[lun - 1] + 1 != luns[lun]): - return iscsi_target, (lun + 1) - - def create_iscsi_target(self, name, vol_id, tid, lun, path, - chap_auth=None): - scst_group = "%s%s" % (self.initiator_iqn, self.target_name) - vol_name = path.split("/")[3] - try: - (out, _err) = self.scst_execute('-noprompt', - '-set_drv_attr', - self.target_driver, - '-attributes', - 'enabled=1') - LOG.debug('StdOut from set driver attribute: %s', out) - except putils.ProcessExecutionError as e: - LOG.error("Failed to set attribute for enable target driver %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to enable SCST Target driver.") - - if self._get_target(name) is None: - try: - (out, _err) = self.scst_execute('-add_target', name, - '-driver', self.target_driver) - LOG.debug("StdOut from scstadmin create target: %s", out) - except putils.ProcessExecutionError as e: - LOG.error("Failed to create iscsi target for volume " - "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) - raise exception.ISCSITargetCreateFailed(volume_id=vol_name) - try: - (out, _err) = self.scst_execute('-enable_target', name, - '-driver', self.target_driver) - LOG.debug("StdOut from scstadmin enable target: %s", out) - except putils.ProcessExecutionError as e: - LOG.error("Failed to set 'enable' attribute for " - "SCST target %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_mesage="Failed to enable SCST Target.") - if chap_auth and self.target_name: - try: - chap_string = self._iscsi_authentication('IncomingUser=', - *chap_auth) - (out, _err) = self.scst_execute('-noprompt', - '-set_tgt_attr', name, - '-driver', - self.target_driver, - '-attributes', - chap_string) - LOG.debug("StdOut from scstadmin set target attribute:" - " %s.", out) - except putils.ProcessExecutionError: - msg = _("Failed to set attribute 'Incoming user' for " - "SCST target.") - LOG.exception(msg) - raise exception.ISCSITargetHelperCommandFailed( - error_mesage=msg) - - if self.target_name: - if self._get_group() is None: - try: - (out, _err) = self.scst_execute('-add_group', scst_group, - '-driver', - self.target_driver, - '-target', name) - LOG.debug("StdOut from scstadmin create group: %s", out) - except putils.ProcessExecutionError as e: - LOG.error("Failed to create group to SCST target %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to create group to SCST target.") - try: - (out, _err) = self.scst_execute('-add_init', - self.initiator_iqn, - '-driver', self.target_driver, - '-target', name, - '-group', scst_group) - LOG.debug("StdOut from scstadmin add initiator: %s", out) - except putils.ProcessExecutionError as e: - LOG.error("Failed to add initiator to group " - " for SCST target %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to add Initiator to group for " - "SCST target.") - - tid = self._get_target(name) - if self.target_name is None: - disk_id = "disk%s" % tid - else: - disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) - - try: - self.scst_execute('-open_dev', disk_id, - '-handler', 'vdisk_fileio', - '-attributes', 'filename=%s' % path) - except putils.ProcessExecutionError as e: - LOG.error("Failed to add device to handler %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to add device to SCST handler.") - - try: - if self.target_name: - self.scst_execute('-add_lun', lun, - '-driver', self.target_driver, - '-target', name, - '-device', disk_id, - '-group', scst_group) - else: - self.scst_execute('-add_lun', lun, - '-driver', self.target_driver, - '-target', name, - '-device', disk_id) - except putils.ProcessExecutionError as e: - LOG.error("Failed to add lun to SCST target " - "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to add LUN to SCST Target for " - "volume " + vol_name) - - # SCST uses /etc/scst.conf as the default configuration when it - # starts - try: - self.scst_execute('-write_config', '/etc/scst.conf') - except putils.ProcessExecutionError as e: - LOG.error("Failed to write in /etc/scst.conf.") - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to write in /etc/scst.conf.") - - return tid - - def _iscsi_location(self, ip, target, iqn, lun=None): - return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port, - target, iqn, lun) - - def _get_iscsi_name(self, volume): - if self.target_name is None: - return "%s%s" % (self.configuration.iscsi_target_prefix, - volume['name']) - else: - return self.target_name - - def _get_iscsi_target(self, context, vol_id): - # FIXME(jdg): Need to implement abc method - pass - - def _get_target_chap_auth(self, context, volume): - # FIXME(jdg): Need to implement abc method - - iscsi_name = self._get_iscsi_name(volume) - - if self._get_target(iscsi_name) is None: - return None - (out, _err) = self.scst_execute('-list_tgt_attr', iscsi_name, - '-driver', self.target_driver) - first = "KEY" - last = "Dynamic attributes" - start = out.index(first) + len(first) - end = out.index(last, start) - out = out[start:end] - out = out.split("\n")[2] - if "IncomingUser" in out: - out = out.split(" ") - out = [a for a in out if a != ""] - return (out[1], out[2]) - else: - return None - - def ensure_export(self, context, volume, volume_path): - iscsi_target, lun = self._get_target_and_lun(context, volume) - iscsi_name = self._get_iscsi_name(volume) - - if self.chap_username and self.chap_password: - chap_auth = (self.chap_username, self.chap_password) - else: - chap_auth = self._get_target_chap_auth(context, volume) - - self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, - lun, volume_path, chap_auth) - - def create_export(self, context, volume, volume_path): - """Creates an export for a logical volume.""" - iscsi_target, lun = self._get_target_and_lun(context, volume) - iscsi_name = self._get_iscsi_name(volume) - - if self.chap_username and self.chap_password: - chap_auth = (self.chap_username, self.chap_password) - else: - chap_auth = self._get_target_chap_auth(context, volume) - if not chap_auth: - chap_auth = (vutils.generate_username(), - vutils.generate_password()) - tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, - lun, volume_path, chap_auth) - - data = {} - data['location'] = self._iscsi_location( - self.configuration.iscsi_ip_address, tid, iscsi_name, lun) - LOG.debug('Set provider_location to: %s', data['location']) - data['auth'] = self._iscsi_authentication( - 'CHAP', *chap_auth) - return data - - def remove_export(self, context, volume): - try: - location = volume['provider_location'].split(' ') - iqn = location[1] - iscsi_target = self._get_target(iqn) - self.show_target(iscsi_target, iqn) - - except Exception: - LOG.error("Skipping remove_export. No iscsi_target is" - "presently exported for volume: %s", volume['id']) - return - vol = self.db.volume_get(context, volume['id']) - lun = "".join(vol['provider_location'].split(" ")[-1:]) - - self.remove_iscsi_target(iscsi_target, lun, - volume['id'], volume['name']) - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) - vol_uuid_file = vol_name - if self.target_name is None: - iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) - else: - iqn = self.target_name - - if self.target_name is None: - try: - self.scst_execute('-noprompt', - '-rem_target', iqn, - '-driver', 'iscsi') - except putils.ProcessExecutionError as e: - LOG.error("Failed to remove iscsi target for volume " - "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - try: - self.scst_execute('-noprompt', - '-close_dev', "disk%s" % tid, - '-handler', 'vdisk_fileio') - except putils.ProcessExecutionError as e: - LOG.error("Failed to close disk device %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to close disk device for " - "SCST handler.") - - if self._get_target(iqn): - try: - self.scst_execute('-noprompt', - '-rem_target', iqn, - '-driver', self.target_driver) - except putils.ProcessExecutionError as e: - LOG.error("Failed to remove iscsi target for " - "volume id:%(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - else: - if not int(lun) in self._get_luns_info(): - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - try: - scst_group = "%s%s" % (self.remove_initiator_iqn, - self.target_name) - self.scst_execute('-noprompt', '-rem_lun', lun, - '-driver', self.target_driver, - '-target', iqn, '-group', - scst_group) - except putils.ProcessExecutionError as e: - LOG.error("Failed to remove LUN %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to remove LUN for SCST Target.") - - try: - self.scst_execute('-noprompt', - '-close_dev', disk_id, - '-handler', 'vdisk_fileio') - except putils.ProcessExecutionError as e: - LOG.error("Failed to close disk device %s", e) - raise exception.ISCSITargetHelperCommandFailed( - error_message="Failed to close disk device for " - "SCST handler.") - - self.scst_execute('-write_config', '/etc/scst.conf') - - def show_target(self, tid, iqn): - if iqn is None: - raise exception.InvalidParameterValue( - err=_('valid iqn needed for show_target')) - - tid = self._get_target(iqn) - if tid is None: - raise exception.ISCSITargetHelperCommandFailed( - error_message="Target not found") diff --git a/cinder/volume/targets/tgt.py b/cinder/volume/targets/tgt.py deleted file mode 100644 index b54a7a83c..000000000 --- a/cinder/volume/targets/tgt.py +++ /dev/null @@ -1,315 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import textwrap -import time - -from oslo_concurrency import processutils as putils -from oslo_log import log as logging -from oslo_utils import fileutils - -from cinder import exception -from cinder import utils -from cinder.volume.targets import iscsi - -LOG = logging.getLogger(__name__) - - -class TgtAdm(iscsi.ISCSITarget): - """Target object for block storage devices. - - Base class for target object, where target - is data transport mechanism (target) specific calls. - This includes things like create targets, attach, detach - etc. - """ - - VOLUME_CONF = textwrap.dedent(""" - - backing-store %(path)s - driver %(driver)s - %(chap_auth)s - %(target_flags)s - write-cache %(write_cache)s - - """) - - def _get_target(self, iqn): - (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) - lines = out.split('\n') - for line in lines: - if iqn in line: - parsed = line.split() - tid = parsed[1] - return tid[:-1] - - return None - - def _verify_backing_lun(self, iqn, tid): - backing_lun = True - capture = False - target_info = [] - - (out, err) = utils.execute('tgt-admin', '--show', run_as_root=True) - lines = out.split('\n') - - for line in lines: - if iqn in line and "Target %s" % tid in line: - capture = True - if capture: - target_info.append(line) - if iqn not in line and 'Target ' in line: - capture = False - - if ' LUN: 1' not in target_info: - backing_lun = False - - return backing_lun - - def _recreate_backing_lun(self, iqn, tid, name, path): - LOG.warning('Attempting recreate of backing lun...') - - # Since we think the most common case of this is a dev busy - # (create vol from snapshot) we're going to add a sleep here - # this will hopefully give things enough time to stabilize - # how long should we wait?? I have no idea, let's go big - # and error on the side of caution - time.sleep(10) - - (out, err) = (None, None) - try: - (out, err) = utils.execute('tgtadm', '--lld', 'iscsi', - '--op', 'new', '--mode', - 'logicalunit', '--tid', - tid, '--lun', '1', '-b', - path, run_as_root=True) - except putils.ProcessExecutionError as e: - LOG.error("Failed recovery attempt to create " - "iscsi backing lun for Volume " - "ID:%(vol_id)s: %(e)s", - {'vol_id': name, 'e': e}) - finally: - LOG.debug('StdOut from recreate backing lun: %s', out) - LOG.debug('StdErr from recreate backing lun: %s', err) - - def _get_iscsi_target(self, context, vol_id): - return 0 - - def _get_target_and_lun(self, context, volume): - lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 - iscsi_target = 0 # NOTE(jdg): Not used by tgtadm - return iscsi_target, lun - - @utils.retry(putils.ProcessExecutionError) - def _do_tgt_update(self, name): - (out, err) = utils.execute('tgt-admin', '--update', name, - run_as_root=True) - LOG.debug("StdOut from tgt-admin --update: %s", out) - LOG.debug("StdErr from tgt-admin --update: %s", err) - - @utils.retry(exception.NotFound) - def create_iscsi_target(self, name, tid, lun, path, - chap_auth=None, **kwargs): - - # Note(jdg) tid and lun aren't used by TgtAdm but remain for - # compatibility - - # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078 - # for now, since we intermittently hit target already exists we're - # adding some debug info to try and pinpoint what's going on - (out, err) = utils.execute('tgtadm', - '--lld', - 'iscsi', - '--op', - 'show', - '--mode', - 'target', - run_as_root=True) - LOG.debug("Targets prior to update: %s", out) - fileutils.ensure_tree(self.volumes_dir) - - vol_id = name.split(':')[1] - write_cache = self.configuration.get('iscsi_write_cache', 'on') - driver = self.iscsi_protocol - chap_str = '' - - if chap_auth is not None: - chap_str = 'incominguser %s %s' % chap_auth - - target_flags = self.configuration.get('iscsi_target_flags', '') - if target_flags: - target_flags = 'bsoflags ' + target_flags - - volume_conf = self.VOLUME_CONF % { - 'name': name, 'path': path, 'driver': driver, - 'chap_auth': chap_str, 'target_flags': target_flags, - 'write_cache': write_cache} - - LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id) - volumes_dir = self.volumes_dir - volume_path = os.path.join(volumes_dir, vol_id) - - if os.path.exists(volume_path): - LOG.debug(('Persistence file already exists for volume, ' - 'found file at: %s'), volume_path) - utils.robust_file_write(volumes_dir, vol_id, volume_conf) - LOG.debug(('Created volume path %(vp)s,\n' - 'content: %(vc)s'), - {'vp': volume_path, 'vc': volume_conf}) - - old_persist_file = None - old_name = kwargs.get('old_name', None) - if old_name is not None: - LOG.debug('Detected old persistence file for volume ' - '%(vol)s at %(old_name)s', - {'vol': vol_id, 'old_name': old_name}) - old_persist_file = os.path.join(volumes_dir, old_name) - - try: - # With the persistent tgts we create them - # by creating the entry in the persist file - # and then doing an update to get the target - # created. - - self._do_tgt_update(name) - except putils.ProcessExecutionError as e: - if "target already exists" in e.stderr: - # Adding the additional Warning message below for a clear - # ER marker (Ref bug: #1398078). - LOG.warning('Could not create target because ' - 'it already exists for volume: %s', vol_id) - LOG.debug('Exception was: %s', e) - - else: - LOG.error("Failed to create iscsi target for Volume " - "ID: %(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - - # Don't forget to remove the persistent file we created - os.unlink(volume_path) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - # Grab targets list for debug - # Consider adding a check for lun 0 and 1 for tgtadm - # before considering this as valid - (out, err) = utils.execute('tgtadm', - '--lld', - 'iscsi', - '--op', - 'show', - '--mode', - 'target', - run_as_root=True) - LOG.debug("Targets after update: %s", out) - - iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) - tid = self._get_target(iqn) - if tid is None: - LOG.warning("Failed to create iscsi target for Volume " - "ID: %(vol_id)s. It could be caused by problem " - "with concurrency. " - "Also please ensure your tgtd config " - "file contains 'include %(volumes_dir)s/*'", - {'vol_id': vol_id, - 'volumes_dir': volumes_dir, }) - raise exception.NotFound() - - # NOTE(jdg): Sometimes we have some issues with the backing lun - # not being created, believe this is due to a device busy - # or something related, so we're going to add some code - # here that verifies the backing lun (lun 1) was created - # and we'll try and recreate it if it's not there - if not self._verify_backing_lun(iqn, tid): - try: - self._recreate_backing_lun(iqn, tid, name, path) - except putils.ProcessExecutionError: - os.unlink(volume_path) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - # Finally check once more and if no go, fail and punt - if not self._verify_backing_lun(iqn, tid): - os.unlink(volume_path) - raise exception.ISCSITargetCreateFailed(volume_id=vol_id) - - if old_persist_file is not None: - fileutils.delete_if_exists(old_persist_file) - - return tid - - def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): - LOG.info('Removing iscsi_target for Volume ID: %s', vol_id) - vol_uuid_file = vol_name - volume_path = os.path.join(self.volumes_dir, vol_uuid_file) - if not os.path.exists(volume_path): - LOG.warning('Volume path %s does not exist, ' - 'nothing to remove.', volume_path) - return - - if os.path.isfile(volume_path): - iqn = '%s%s' % (self.iscsi_target_prefix, - vol_uuid_file) - else: - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - try: - # NOTE(vish): --force is a workaround for bug: - # https://bugs.launchpad.net/cinder/+bug/1159948 - utils.execute('tgt-admin', - '--force', - '--delete', - iqn, - run_as_root=True) - except putils.ProcessExecutionError as e: - non_fatal_errors = ("can't find the target", - "access control rule does not exist") - - if any(error in e.stderr for error in non_fatal_errors): - LOG.warning("Failed target removal because target or " - "ACL's couldn't be found for iqn: %s.", iqn) - else: - LOG.error("Failed to remove iscsi target for Volume " - "ID: %(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - # NOTE(jdg): There's a bug in some versions of tgt that - # will sometimes fail silently when using the force flag - # https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343 - # For now work-around by checking if the target was deleted, - # if it wasn't, try again without the force. - - # This will NOT do any good for the case of mutliple sessions - # which the force was aded for but it will however address - # the cases pointed out in bug: - # https://bugs.launchpad.net/cinder/+bug/1304122 - if self._get_target(iqn): - try: - LOG.warning('Silent failure of target removal ' - 'detected, retry....') - utils.execute('tgt-admin', - '--delete', - iqn, - run_as_root=True) - except putils.ProcessExecutionError as e: - LOG.error("Failed to remove iscsi target for Volume " - "ID: %(vol_id)s: %(e)s", - {'vol_id': vol_id, 'e': e}) - raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) - - # NOTE(jdg): This *should* be there still but incase - # it's not we don't care, so just ignore it if was - # somehow deleted between entry of this method - # and here - if os.path.exists(volume_path): - os.unlink(volume_path) - else: - LOG.debug('Volume path %s not found at end, ' - 'of remove_iscsi_target.', volume_path) diff --git a/cinder/volume/throttling.py b/cinder/volume/throttling.py deleted file mode 100644 index 39cbbeb99..000000000 --- a/cinder/volume/throttling.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2015 Hitachi Data Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Volume copy throttling helpers.""" - - -import contextlib - -from oslo_concurrency import processutils -from oslo_log import log as logging - -from cinder import exception -from cinder import utils - - -LOG = logging.getLogger(__name__) - - -class Throttle(object): - """Base class for throttling disk I/O bandwidth""" - - DEFAULT = None - - @staticmethod - def set_default(throttle): - Throttle.DEFAULT = throttle - - @staticmethod - def get_default(): - return Throttle.DEFAULT or Throttle() - - def __init__(self, prefix=None): - self.prefix = prefix or [] - - @contextlib.contextmanager - def subcommand(self, srcpath, dstpath): - """Sub-command that reads from srcpath and writes to dstpath. - - Throttle disk I/O bandwidth used by a sub-command, such as 'dd', - that reads from srcpath and writes to dstpath. The sub-command - must be executed with the generated prefix command. - """ - yield {'prefix': self.prefix} - - -class BlkioCgroup(Throttle): - """Throttle disk I/O bandwidth using blkio cgroups.""" - - def __init__(self, bps_limit, cgroup_name): - self.bps_limit = bps_limit - self.cgroup = cgroup_name - self.srcdevs = {} - self.dstdevs = {} - - try: - utils.execute('cgcreate', '-g', 'blkio:%s' % self.cgroup, - run_as_root=True) - except processutils.ProcessExecutionError: - LOG.error('Failed to create blkio cgroup \'%(name)s\'.', - {'name': cgroup_name}) - raise - - def _get_device_number(self, path): - try: - return utils.get_blkdev_major_minor(path) - except exception.Error as e: - LOG.error('Failed to get device number for throttling: ' - '%(error)s', {'error': e}) - - def _limit_bps(self, rw, dev, bps): - try: - utils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d' - % (rw, dev, bps), self.cgroup, run_as_root=True) - except processutils.ProcessExecutionError: - LOG.warning('Failed to setup blkio cgroup to throttle the ' - 'device \'%(device)s\'.', {'device': dev}) - - def _set_limits(self, rw, devs): - total = sum(devs.values()) - for dev in sorted(devs): - self._limit_bps(rw, dev, self.bps_limit * devs[dev] / total) - - @utils.synchronized('BlkioCgroup') - def _inc_device(self, srcdev, dstdev): - if srcdev: - self.srcdevs[srcdev] = self.srcdevs.get(srcdev, 0) + 1 - self._set_limits('read', self.srcdevs) - if dstdev: - self.dstdevs[dstdev] = self.dstdevs.get(dstdev, 0) + 1 - self._set_limits('write', self.dstdevs) - - @utils.synchronized('BlkioCgroup') - def _dec_device(self, srcdev, dstdev): - if srcdev: - self.srcdevs[srcdev] -= 1 - if self.srcdevs[srcdev] == 0: - del self.srcdevs[srcdev] - self._set_limits('read', self.srcdevs) - if dstdev: - self.dstdevs[dstdev] -= 1 - if self.dstdevs[dstdev] == 0: - del self.dstdevs[dstdev] - self._set_limits('write', self.dstdevs) - - @contextlib.contextmanager - def subcommand(self, srcpath, dstpath): - srcdev = self._get_device_number(srcpath) - dstdev = self._get_device_number(dstpath) - - if srcdev is None and dstdev is None: - yield {'prefix': []} - return - - self._inc_device(srcdev, dstdev) - try: - yield {'prefix': ['cgexec', '-g', 'blkio:%s' % self.cgroup]} - finally: - self._dec_device(srcdev, dstdev) diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py deleted file mode 100644 index 3f556a5c1..000000000 --- a/cinder/volume/utils.py +++ /dev/null @@ -1,937 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Volume-related Utilities and helpers.""" - - -import ast -import functools -import json -import math -import operator -from os import urandom -import re -import time -import uuid - -import eventlet -from eventlet import tpool -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import units -from random import shuffle -import six -from six.moves import range - -from cinder.brick.local_dev import lvm as brick_lvm -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import objects -from cinder import rpc -from cinder import utils -from cinder.volume import group_types -from cinder.volume import throttling -from cinder.volume import volume_types - - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -def null_safe_str(s): - return str(s) if s else '' - - -def _usage_from_volume(context, volume_ref, **kw): - now = timeutils.utcnow() - launched_at = volume_ref['launched_at'] or now - created_at = volume_ref['created_at'] or now - volume_status = volume_ref['status'] - if volume_status == 'error_managing_deleting': - volume_status = 'deleting' - usage_info = dict( - tenant_id=volume_ref['project_id'], - host=volume_ref['host'], - user_id=volume_ref['user_id'], - availability_zone=volume_ref['availability_zone'], - volume_id=volume_ref['id'], - volume_type=volume_ref['volume_type_id'], - display_name=volume_ref['display_name'], - launched_at=launched_at.isoformat(), - created_at=created_at.isoformat(), - status=volume_status, - snapshot_id=volume_ref['snapshot_id'], - size=volume_ref['size'], - replication_status=volume_ref['replication_status'], - replication_extended_status=volume_ref['replication_extended_status'], - replication_driver_data=volume_ref['replication_driver_data'], - metadata=volume_ref.get('volume_metadata'),) - - usage_info.update(kw) - try: - attachments = db.volume_attachment_get_all_by_volume_id( - context, volume_ref['id']) - usage_info['volume_attachment'] = attachments - - glance_meta = db.volume_glance_metadata_get(context, volume_ref['id']) - if glance_meta: - usage_info['glance_metadata'] = glance_meta - except exception.GlanceMetadataNotFound: - pass - except exception.VolumeNotFound: - LOG.debug("Can not find volume %s at notify usage", volume_ref['id']) - - return usage_info - - -def _usage_from_backup(backup, **kw): - num_dependent_backups = backup.num_dependent_backups - usage_info = dict(tenant_id=backup.project_id, - user_id=backup.user_id, - availability_zone=backup.availability_zone, - backup_id=backup.id, - host=backup.host, - display_name=backup.display_name, - created_at=str(backup.created_at), - status=backup.status, - volume_id=backup.volume_id, - size=backup.size, - service_metadata=backup.service_metadata, - service=backup.service, - fail_reason=backup.fail_reason, - parent_id=backup.parent_id, - num_dependent_backups=num_dependent_backups, - snapshot_id=backup.snapshot_id, - ) - - usage_info.update(kw) - return usage_info - - -@utils.if_notifications_enabled -def notify_about_volume_usage(context, volume, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_volume(context, volume, **extra_usage_info) - - rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix, - usage_info) - - -@utils.if_notifications_enabled -def notify_about_backup_usage(context, backup, event_suffix, - extra_usage_info=None, - host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_backup(backup, **extra_usage_info) - - rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix, - usage_info) - - -def _usage_from_snapshot(snapshot, context, **extra_usage_info): - # (niedbalski) a snapshot might be related to a deleted - # volume, if that's the case, the volume information is still - # required for filling the usage_info, so we enforce to read - # the volume data even if the volume has been deleted. - context.read_deleted = "yes" - volume = db.volume_get(context, snapshot.volume_id) - usage_info = { - 'tenant_id': snapshot.project_id, - 'user_id': snapshot.user_id, - 'availability_zone': volume['availability_zone'], - 'volume_id': snapshot.volume_id, - 'volume_size': snapshot.volume_size, - 'snapshot_id': snapshot.id, - 'display_name': snapshot.display_name, - 'created_at': str(snapshot.created_at), - 'status': snapshot.status, - 'deleted': null_safe_str(snapshot.deleted), - 'metadata': null_safe_str(snapshot.metadata), - } - - usage_info.update(extra_usage_info) - return usage_info - - -@utils.if_notifications_enabled -def notify_about_snapshot_usage(context, snapshot, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info) - - rpc.get_notifier('snapshot', host).info(context, - 'snapshot.%s' % event_suffix, - usage_info) - - -def _usage_from_capacity(capacity, **extra_usage_info): - - capacity_info = { - 'name_to_id': capacity['name_to_id'], - 'total': capacity['total'], - 'free': capacity['free'], - 'allocated': capacity['allocated'], - 'provisioned': capacity['provisioned'], - 'virtual_free': capacity['virtual_free'], - 'reported_at': capacity['reported_at'] - } - - capacity_info.update(extra_usage_info) - return capacity_info - - -@utils.if_notifications_enabled -def notify_about_capacity_usage(context, capacity, suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_capacity(capacity, **extra_usage_info) - - rpc.get_notifier('capacity', host).info(context, - 'capacity.%s' % suffix, - usage_info) - - -@utils.if_notifications_enabled -def notify_about_replication_usage(context, volume, suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_volume(context, volume, - **extra_usage_info) - - rpc.get_notifier('replication', host).info(context, - 'replication.%s' % suffix, - usage_info) - - -@utils.if_notifications_enabled -def notify_about_replication_error(context, volume, suffix, - extra_error_info=None, host=None): - if not host: - host = CONF.host - - if not extra_error_info: - extra_error_info = {} - - usage_info = _usage_from_volume(context, volume, - **extra_error_info) - - rpc.get_notifier('replication', host).error(context, - 'replication.%s' % suffix, - usage_info) - - -def _usage_from_consistencygroup(group_ref, **kw): - usage_info = dict(tenant_id=group_ref.project_id, - user_id=group_ref.user_id, - availability_zone=group_ref.availability_zone, - consistencygroup_id=group_ref.id, - name=group_ref.name, - created_at=group_ref.created_at.isoformat(), - status=group_ref.status) - - usage_info.update(kw) - return usage_info - - -@utils.if_notifications_enabled -def notify_about_consistencygroup_usage(context, group, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_consistencygroup(group, - **extra_usage_info) - - rpc.get_notifier("consistencygroup", host).info( - context, - 'consistencygroup.%s' % event_suffix, - usage_info) - - -def _usage_from_group(group_ref, **kw): - usage_info = dict(tenant_id=group_ref.project_id, - user_id=group_ref.user_id, - availability_zone=group_ref.availability_zone, - group_id=group_ref.id, - group_type=group_ref.group_type_id, - name=group_ref.name, - created_at=group_ref.created_at.isoformat(), - status=group_ref.status) - - usage_info.update(kw) - return usage_info - - -@utils.if_notifications_enabled -def notify_about_group_usage(context, group, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_group(group, - **extra_usage_info) - - rpc.get_notifier("group", host).info( - context, - 'group.%s' % event_suffix, - usage_info) - - -def _usage_from_cgsnapshot(cgsnapshot, **kw): - usage_info = dict( - tenant_id=cgsnapshot.project_id, - user_id=cgsnapshot.user_id, - cgsnapshot_id=cgsnapshot.id, - name=cgsnapshot.name, - consistencygroup_id=cgsnapshot.consistencygroup_id, - created_at=cgsnapshot.created_at.isoformat(), - status=cgsnapshot.status) - - usage_info.update(kw) - return usage_info - - -def _usage_from_group_snapshot(group_snapshot, **kw): - usage_info = dict( - tenant_id=group_snapshot.project_id, - user_id=group_snapshot.user_id, - group_snapshot_id=group_snapshot.id, - name=group_snapshot.name, - group_id=group_snapshot.group_id, - group_type=group_snapshot.group_type_id, - created_at=group_snapshot.created_at.isoformat(), - status=group_snapshot.status) - - usage_info.update(kw) - return usage_info - - -@utils.if_notifications_enabled -def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_cgsnapshot(cgsnapshot, - **extra_usage_info) - - rpc.get_notifier("cgsnapshot", host).info( - context, - 'cgsnapshot.%s' % event_suffix, - usage_info) - - -@utils.if_notifications_enabled -def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix, - extra_usage_info=None, host=None): - if not host: - host = CONF.host - - if not extra_usage_info: - extra_usage_info = {} - - usage_info = _usage_from_group_snapshot(group_snapshot, - **extra_usage_info) - - rpc.get_notifier("group_snapshot", host).info( - context, - 'group_snapshot.%s' % event_suffix, - usage_info) - - -def _check_blocksize(blocksize): - - # Check if volume_dd_blocksize is valid - try: - # Rule out zero-sized/negative/float dd blocksize which - # cannot be caught by strutils - if blocksize.startswith(('-', '0')) or '.' in blocksize: - raise ValueError - strutils.string_to_bytes('%sB' % blocksize) - except ValueError: - LOG.warning("Incorrect value error: %(blocksize)s, " - "it may indicate that \'volume_dd_blocksize\' " - "was configured incorrectly. Fall back to default.", - {'blocksize': blocksize}) - # Fall back to default blocksize - CONF.clear_override('volume_dd_blocksize') - blocksize = CONF.volume_dd_blocksize - - return blocksize - - -def check_for_odirect_support(src, dest, flag='oflag=direct'): - - # Check whether O_DIRECT is supported - try: - # iflag=direct and if=/dev/zero combination does not work - # error: dd: failed to open '/dev/zero': Invalid argument - if (src == '/dev/zero' and flag == 'iflag=direct'): - return False - else: - utils.execute('dd', 'count=0', 'if=%s' % src, - 'of=%s' % dest, - flag, run_as_root=True) - return True - except processutils.ProcessExecutionError: - return False - - -def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize, - sync=False, execute=utils.execute, ionice=None, - sparse=False): - cmd = prefix[:] - - if ionice: - cmd.extend(('ionice', ionice)) - - blocksize = _check_blocksize(blocksize) - size_in_bytes = size_in_m * units.Mi - - cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr, - 'count=%d' % size_in_bytes, 'bs=%s' % blocksize)) - - # Use O_DIRECT to avoid thrashing the system buffer cache - odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct') - - cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes') - - if check_for_odirect_support(srcstr, deststr, 'oflag=direct'): - cmd.append('oflag=direct') - odirect = True - - # If the volume is being unprovisioned then - # request the data is persisted before returning, - # so that it's not discarded from the cache. - conv = [] - if sync and not odirect: - conv.append('fdatasync') - if sparse: - conv.append('sparse') - if conv: - conv_options = 'conv=' + ",".join(conv) - cmd.append(conv_options) - - # Perform the copy - start_time = timeutils.utcnow() - execute(*cmd, run_as_root=True) - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - - # NOTE(jdg): use a default of 1, mostly for unit test, but in - # some incredible event this is 0 (cirros image?) don't barf - if duration < 1: - duration = 1 - mbps = (size_in_m / duration) - LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, " - "size %(sz).2f MB, duration %(duration).2f sec", - {"src": srcstr, - "dest": deststr, - "sz": size_in_m, - "duration": duration}) - LOG.info("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s", - {'size_in_m': size_in_m, 'mbps': mbps}) - - -def _open_volume_with_path(path, mode): - try: - with utils.temporary_chown(path): - handle = open(path, mode) - return handle - except Exception: - LOG.error("Failed to open volume from %(path)s.", {'path': path}) - - -def _transfer_data(src, dest, length, chunk_size): - """Transfer data between files (Python IO objects).""" - - chunks = int(math.ceil(length / chunk_size)) - remaining_length = length - - LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.", - {'chunks': chunks, 'bytes': chunk_size}) - - for chunk in range(0, chunks): - before = time.time() - data = tpool.execute(src.read, min(chunk_size, remaining_length)) - - # If we have reached end of source, discard any extraneous bytes from - # destination volume if trim is enabled and stop writing. - if data == b'': - break - - tpool.execute(dest.write, data) - remaining_length -= len(data) - delta = (time.time() - before) - rate = (chunk_size / delta) / units.Ki - LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).", - {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate}) - - # yield to any other pending operations - eventlet.sleep(0) - - tpool.execute(dest.flush) - - -def _copy_volume_with_file(src, dest, size_in_m): - src_handle = src - if isinstance(src, six.string_types): - src_handle = _open_volume_with_path(src, 'rb') - - dest_handle = dest - if isinstance(dest, six.string_types): - dest_handle = _open_volume_with_path(dest, 'wb') - - if not src_handle: - raise exception.DeviceUnavailable( - _("Failed to copy volume, source device unavailable.")) - - if not dest_handle: - raise exception.DeviceUnavailable( - _("Failed to copy volume, destination device unavailable.")) - - start_time = timeutils.utcnow() - - _transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4) - - duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow())) - - if isinstance(src, six.string_types): - src_handle.close() - if isinstance(dest, six.string_types): - dest_handle.close() - - mbps = (size_in_m / duration) - LOG.info("Volume copy completed (%(size_in_m).2f MB at " - "%(mbps).2f MB/s).", - {'size_in_m': size_in_m, 'mbps': mbps}) - - -def copy_volume(src, dest, size_in_m, blocksize, sync=False, - execute=utils.execute, ionice=None, throttle=None, - sparse=False): - """Copy data from the source volume to the destination volume. - - The parameters 'src' and 'dest' are both typically of type str, which - represents the path to each volume on the filesystem. Connectors can - optionally return a volume handle of type RawIOBase for volumes that are - not available on the local filesystem for open/close operations. - - If either 'src' or 'dest' are not of type str, then they are assumed to be - of type RawIOBase or any derivative that supports file operations such as - read and write. In this case, the handles are treated as file handles - instead of file paths and, at present moment, throttling is unavailable. - """ - - if (isinstance(src, six.string_types) and - isinstance(dest, six.string_types)): - if not throttle: - throttle = throttling.Throttle.get_default() - with throttle.subcommand(src, dest) as throttle_cmd: - _copy_volume_with_path(throttle_cmd['prefix'], src, dest, - size_in_m, blocksize, sync=sync, - execute=execute, ionice=ionice, - sparse=sparse) - else: - _copy_volume_with_file(src, dest, size_in_m) - - -def clear_volume(volume_size, volume_path, volume_clear=None, - volume_clear_size=None, volume_clear_ionice=None, - throttle=None): - """Unprovision old volumes to prevent data leaking between users.""" - if volume_clear is None: - volume_clear = CONF.volume_clear - - if volume_clear_size is None: - volume_clear_size = CONF.volume_clear_size - - if volume_clear_size == 0: - volume_clear_size = volume_size - - if volume_clear_ionice is None: - volume_clear_ionice = CONF.volume_clear_ionice - - LOG.info("Performing secure delete on volume: %s", volume_path) - - # We pass sparse=False explicitly here so that zero blocks are not - # skipped in order to clear the volume. - if volume_clear == 'zero': - return copy_volume('/dev/zero', volume_path, volume_clear_size, - CONF.volume_dd_blocksize, - sync=True, execute=utils.execute, - ionice=volume_clear_ionice, - throttle=throttle, sparse=False) - else: - raise exception.InvalidConfigurationValue( - option='volume_clear', - value=volume_clear) - - -def supports_thin_provisioning(): - return brick_lvm.LVM.supports_thin_provisioning( - utils.get_root_helper()) - - -def get_all_physical_volumes(vg_name=None): - return brick_lvm.LVM.get_all_physical_volumes( - utils.get_root_helper(), - vg_name) - - -def get_all_volume_groups(vg_name=None): - return brick_lvm.LVM.get_all_volume_groups( - utils.get_root_helper(), - vg_name) - -# Default symbols to use for passwords. Avoids visually confusing characters. -# ~6 bits per symbol -DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 - 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O - 'abcdefghijkmnopqrstuvwxyz') # Removed: l - - -def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): - """Generate a random password from the supplied symbol groups. - - At least one symbol from each group will be included. Unpredictable - results if length is less than the number of symbol groups. - - Believed to be reasonably secure (with a reasonable password length!) - - """ - # NOTE(jerdfelt): Some password policies require at least one character - # from each group of symbols, so start off with one random character - # from each symbol group - - bytes = 1 # Number of random bytes to generate for each choice - - password = [s[ord(urandom(bytes)) % len(s)] - for s in symbolgroups] - # If length < len(symbolgroups), the leading characters will only - # be from the first length groups. Try our best to not be predictable - # by shuffling and then truncating. - shuffle(password) - password = password[:length] - length -= len(password) - - # then fill with random characters from all symbol groups - symbols = ''.join(symbolgroups) - password.extend( - [symbols[ord(urandom(bytes)) % len(symbols)] - for _i in range(length)]) - - # finally shuffle to ensure first x characters aren't from a - # predictable group - shuffle(password) - - return ''.join(password) - - -def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): - # Use the same implementation as the password generation. - return generate_password(length, symbolgroups) - - -DEFAULT_POOL_NAME = '_pool0' - - -def extract_host(host, level='backend', default_pool_name=False): - """Extract Host, Backend or Pool information from host string. - - :param host: String for host, which could include host@backend#pool info - :param level: Indicate which level of information should be extracted - from host string. Level can be 'host', 'backend' or 'pool', - default value is 'backend' - :param default_pool_name: this flag specify what to do if level == 'pool' - and there is no 'pool' info encoded in host - string. default_pool_name=True will return - DEFAULT_POOL_NAME, otherwise we return None. - Default value of this parameter is False. - :return: expected information, string or None - :raises: exception.InvalidVolume - - For example: - host = 'HostA@BackendB#PoolC' - ret = extract_host(host, 'host') - # ret is 'HostA' - ret = extract_host(host, 'backend') - # ret is 'HostA@BackendB' - ret = extract_host(host, 'pool') - # ret is 'PoolC' - - host = 'HostX@BackendY' - ret = extract_host(host, 'pool') - # ret is None - ret = extract_host(host, 'pool', True) - # ret is '_pool0' - """ - - if host is None: - msg = _("volume is not assigned to a host") - raise exception.InvalidVolume(reason=msg) - - if level == 'host': - # make sure pool is not included - hst = host.split('#')[0] - return hst.split('@')[0] - elif level == 'backend': - return host.split('#')[0] - elif level == 'pool': - lst = host.split('#') - if len(lst) == 2: - return lst[1] - elif default_pool_name is True: - return DEFAULT_POOL_NAME - else: - return None - - -def append_host(host, pool): - """Encode pool into host info.""" - if not host or not pool: - return host - - new_host = "#".join([host, pool]) - return new_host - - -def matching_backend_name(src_volume_type, volume_type): - if src_volume_type.get('volume_backend_name') and \ - volume_type.get('volume_backend_name'): - return src_volume_type.get('volume_backend_name') == \ - volume_type.get('volume_backend_name') - else: - return False - - -def hosts_are_equivalent(host_1, host_2): - # In case host_1 or host_2 are None - if not (host_1 and host_2): - return host_1 == host_2 - return extract_host(host_1) == extract_host(host_2) - - -def read_proc_mounts(): - """Read the /proc/mounts file. - - It's a dummy function but it eases the writing of unit tests as mocking - __builtin__open() for a specific file only is not trivial. - """ - with open('/proc/mounts') as mounts: - return mounts.readlines() - - -def extract_id_from_volume_name(vol_name): - regex = re.compile( - CONF.volume_name_template.replace('%s', '(?P.+)')) - match = regex.match(vol_name) - return match.group('uuid') if match else None - - -def check_already_managed_volume(vol_id): - """Check cinder db for already managed volume. - - :param vol_id: volume id parameter - :returns: bool -- return True, if db entry with specified - volume id exists, otherwise return False - """ - try: - return (vol_id and isinstance(vol_id, six.string_types) and - uuid.UUID(vol_id, version=4) and - objects.Volume.exists(context.get_admin_context(), vol_id)) - except ValueError: - return False - - -def extract_id_from_snapshot_name(snap_name): - """Return a snapshot's ID from its name on the backend.""" - regex = re.compile( - CONF.snapshot_name_template.replace('%s', '(?P.+)')) - match = regex.match(snap_name) - return match.group('uuid') if match else None - - -def paginate_entries_list(entries, marker, limit, offset, sort_keys, - sort_dirs): - """Paginate a list of entries. - - :param entries: list of dictionaries - :marker: The last element previously returned - :limit: The maximum number of items to return - :offset: The number of items to skip from the marker or from the first - element. - :sort_keys: A list of keys in the dictionaries to sort by - :sort_dirs: A list of sort directions, where each is either 'asc' or 'dec' - """ - comparers = [(operator.itemgetter(key.strip()), multiplier) - for (key, multiplier) in zip(sort_keys, sort_dirs)] - - def comparer(left, right): - for fn, d in comparers: - left_val = fn(left) - right_val = fn(right) - if isinstance(left_val, dict): - left_val = sorted(left_val.values())[0] - if isinstance(right_val, dict): - right_val = sorted(right_val.values())[0] - if left_val == right_val: - continue - if d == 'asc': - return -1 if left_val < right_val else 1 - else: - return -1 if left_val > right_val else 1 - else: - return 0 - sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer)) - - start_index = 0 - if offset is None: - offset = 0 - if marker: - if not isinstance(marker, dict): - try: - marker = json.loads(marker) - except ValueError: - msg = _('marker %s can not be analysed, please use json like ' - 'format') % marker - raise exception.InvalidInput(reason=msg) - start_index = -1 - for i, entry in enumerate(sorted_entries): - if entry['reference'] == marker: - start_index = i + 1 - break - if start_index < 0: - msg = _('marker not found: %s') % marker - raise exception.InvalidInput(reason=msg) - range_end = start_index + limit - return sorted_entries[start_index + offset:range_end + offset] - - -def convert_config_string_to_dict(config_string): - """Convert config file replication string to a dict. - - The only supported form is as follows: - "{'key-1'='val-1' 'key-2'='val-2'...}" - - :param config_string: Properly formatted string to convert to dict. - :response: dict of string values - """ - - resultant_dict = {} - - try: - st = config_string.replace("=", ":") - st = st.replace(" ", ", ") - resultant_dict = ast.literal_eval(st) - except Exception: - LOG.warning("Error encountered translating config_string: " - "%(config_string)s to dict", - {'config_string': config_string}) - - return resultant_dict - - -def create_encryption_key(context, key_manager, volume_type_id): - encryption_key_id = None - if volume_types.is_encrypted(context, volume_type_id): - volume_type_encryption = ( - volume_types.get_volume_type_encryption(context, - volume_type_id)) - cipher = volume_type_encryption.cipher - length = volume_type_encryption.key_size - algorithm = cipher.split('-')[0] if cipher else None - encryption_key_id = key_manager.create_key( - context, - algorithm=algorithm, - length=length) - return encryption_key_id - - -def is_replicated_str(str): - spec = (str or '').split() - return (len(spec) == 2 and - spec[0] == '' and strutils.bool_from_string(spec[1])) - - -def is_replicated_spec(extra_specs): - return (extra_specs and - is_replicated_str(extra_specs.get('replication_enabled'))) - - -def group_get_by_id(group_id): - ctxt = context.get_admin_context() - group = db.group_get(ctxt, group_id) - return group - - -def is_group_a_cg_snapshot_type(group_or_snap): - LOG.debug("Checking if %s is a consistent snapshot group", - group_or_snap) - if group_or_snap["group_type_id"] is not None: - spec = group_types.get_group_type_specs( - group_or_snap["group_type_id"], - key="consistent_group_snapshot_enabled" - ) - return spec == " True" - return False - - -def is_group_a_type(group, key): - if group.group_type_id is not None: - spec = group_types.get_group_type_specs( - group.group_type_id, key=key - ) - return spec == " True" - return False diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py deleted file mode 100644 index 5915357b4..000000000 --- a/cinder/volume/volume_types.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack Foundation -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2011 Ken Pepple -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Built-in volume type properties.""" - - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -from oslo_utils import uuidutils - -from cinder import context -from cinder import db -from cinder import exception -from cinder.i18n import _ -from cinder import quota -from cinder import rpc -from cinder import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS -ENCRYPTION_IGNORED_FIELDS = ['volume_type_id', 'created_at', 'updated_at', - 'deleted_at'] - - -def create(context, - name, - extra_specs=None, - is_public=True, - projects=None, - description=None): - """Creates volume types.""" - extra_specs = extra_specs or {} - projects = projects or [] - elevated = context if context.is_admin else context.elevated() - try: - type_ref = db.volume_type_create(elevated, - dict(name=name, - extra_specs=extra_specs, - is_public=is_public, - description=description), - projects=projects) - except db_exc.DBError: - LOG.exception('DB error:') - raise exception.VolumeTypeCreateFailed(name=name, - extra_specs=extra_specs) - return type_ref - - -def update(context, id, name, description, is_public=None): - """Update volume type by id.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidVolumeType(reason=msg) - elevated = context if context.is_admin else context.elevated() - old_volume_type = get_volume_type(elevated, id) - try: - db.volume_type_update(elevated, id, - dict(name=name, description=description, - is_public=is_public)) - # Rename resource in quota if volume type name is changed. - if name: - old_type_name = old_volume_type.get('name') - if old_type_name != name: - QUOTAS.update_quota_resource(elevated, - old_type_name, - name) - except db_exc.DBError: - LOG.exception('DB error:') - raise exception.VolumeTypeUpdateFailed(id=id) - - -def destroy(context, id): - """Marks volume types as deleted.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidVolumeType(reason=msg) - elevated = context if context.is_admin else context.elevated() - return db.volume_type_destroy(elevated, id) - - -def get_all_types(context, inactive=0, filters=None, marker=None, - limit=None, sort_keys=None, sort_dirs=None, - offset=None, list_result=False): - """Get all non-deleted volume_types. - - Pass true as argument if you want deleted volume types returned also. - - """ - vol_types = db.volume_type_get_all(context, inactive, filters=filters, - marker=marker, limit=limit, - sort_keys=sort_keys, - sort_dirs=sort_dirs, offset=offset, - list_result=list_result) - return vol_types - - -def get_all_types_by_group(context, group_id): - """Get all volume_types in a group.""" - vol_types = db.volume_type_get_all_by_group(context, group_id) - return vol_types - - -def get_volume_type(ctxt, id, expected_fields=None): - """Retrieves single volume type by id.""" - if id is None: - msg = _("id cannot be None") - raise exception.InvalidVolumeType(reason=msg) - - if ctxt is None: - ctxt = context.get_admin_context() - - return db.volume_type_get(ctxt, id, expected_fields=expected_fields) - - -def get_by_name_or_id(context, identity): - """Retrieves volume type by id or name""" - if not uuidutils.is_uuid_like(identity): - return get_volume_type_by_name(context, identity) - return get_volume_type(context, identity) - - -def get_volume_type_by_name(context, name): - """Retrieves single volume type by name.""" - if name is None: - msg = _("name cannot be None") - raise exception.InvalidVolumeType(reason=msg) - - return db.volume_type_get_by_name(context, name) - - -def get_default_volume_type(): - """Get the default volume type.""" - name = CONF.default_volume_type - vol_type = {} - - if name is not None: - ctxt = context.get_admin_context() - try: - vol_type = get_volume_type_by_name(ctxt, name) - except exception.VolumeTypeNotFoundByName: - # Couldn't find volume type with the name in default_volume_type - # flag, record this issue and move on - # TODO(zhiteng) consider add notification to warn admin - LOG.exception('Default volume type is not found. ' - 'Please check default_volume_type config:') - - return vol_type - - -def get_volume_type_extra_specs(volume_type_id, key=False): - volume_type = get_volume_type(context.get_admin_context(), - volume_type_id) - extra_specs = volume_type['extra_specs'] - if key: - if extra_specs.get(key): - return extra_specs.get(key) - else: - return False - else: - return extra_specs - - -def is_public_volume_type(context, volume_type_id): - """Return is_public boolean value of volume type""" - volume_type = db.volume_type_get(context, volume_type_id) - return volume_type['is_public'] - - -@utils.if_notifications_enabled -def notify_about_volume_type_access_usage(context, - volume_type_id, - project_id, - event_suffix, - host=None): - """Notify about successful usage type-access-(add/remove) command. - - :param context: security context - :param volume_type_id: volume type uuid - :param project_id: tenant uuid - :param event_suffix: name of called operation access-(add/remove) - :param host: hostname - """ - notifier_info = {'volume_type_id': volume_type_id, - 'project_id': project_id} - - if not host: - host = CONF.host - - notifier = rpc.get_notifier("volume_type_project", host) - notifier.info(context, - 'volume_type_project.%s' % event_suffix, - notifier_info) - - -def add_volume_type_access(context, volume_type_id, project_id): - """Add access to volume type for project_id.""" - if volume_type_id is None: - msg = _("volume_type_id cannot be None") - raise exception.InvalidVolumeType(reason=msg) - elevated = context if context.is_admin else context.elevated() - if is_public_volume_type(elevated, volume_type_id): - msg = _("Type access modification is not applicable to public volume " - "type.") - raise exception.InvalidVolumeType(reason=msg) - - db.volume_type_access_add(elevated, volume_type_id, project_id) - - notify_about_volume_type_access_usage(context, - volume_type_id, - project_id, - 'access.add') - - -def remove_volume_type_access(context, volume_type_id, project_id): - """Remove access to volume type for project_id.""" - if volume_type_id is None: - msg = _("volume_type_id cannot be None") - raise exception.InvalidVolumeType(reason=msg) - elevated = context if context.is_admin else context.elevated() - if is_public_volume_type(elevated, volume_type_id): - msg = _("Type access modification is not applicable to public volume " - "type.") - raise exception.InvalidVolumeType(reason=msg) - - db.volume_type_access_remove(elevated, volume_type_id, project_id) - - notify_about_volume_type_access_usage(context, - volume_type_id, - project_id, - 'access.remove') - - -def is_encrypted(context, volume_type_id): - return get_volume_type_encryption(context, volume_type_id) is not None - - -def get_volume_type_encryption(context, volume_type_id): - if volume_type_id is None: - return None - - encryption = db.volume_type_encryption_get(context, volume_type_id) - return encryption - - -def get_volume_type_qos_specs(volume_type_id): - """Get all qos specs for given volume type.""" - ctxt = context.get_admin_context() - res = db.volume_type_qos_specs_get(ctxt, - volume_type_id) - return res - - -def volume_types_diff(context, vol_type_id1, vol_type_id2): - """Returns a 'diff' of two volume types and whether they are equal. - - Returns a tuple of (diff, equal), where 'equal' is a boolean indicating - whether there is any difference, and 'diff' is a dictionary with the - following format: - - .. code-block:: default - - { - 'extra_specs': {'key1': (value_in_1st_vol_type, - value_in_2nd_vol_type), - 'key2': (value_in_1st_vol_type, - value_in_2nd_vol_type), - {...}} - 'qos_specs': {'key1': (value_in_1st_vol_type, - value_in_2nd_vol_type), - 'key2': (value_in_1st_vol_type, - value_in_2nd_vol_type), - {...}} - 'encryption': {'cipher': (value_in_1st_vol_type, - value_in_2nd_vol_type), - {'key_size': (value_in_1st_vol_type, - value_in_2nd_vol_type), - {...}} - } - """ - def _fix_qos_specs(qos_specs): - if qos_specs: - qos_specs.pop('id', None) - qos_specs.pop('name', None) - qos_specs.update(qos_specs.pop('specs', {})) - - def _fix_encryption_specs(encryption): - if encryption: - encryption = dict(encryption) - for param in ENCRYPTION_IGNORED_FIELDS: - encryption.pop(param, None) - return encryption - - def _dict_diff(dict1, dict2): - res = {} - equal = True - if dict1 is None: - dict1 = {} - if dict2 is None: - dict2 = {} - for k, v in dict1.items(): - res[k] = (v, dict2.get(k)) - if k not in dict2 or res[k][0] != res[k][1]: - equal = False - for k, v in dict2.items(): - res[k] = (dict1.get(k), v) - if k not in dict1 or res[k][0] != res[k][1]: - equal = False - return (res, equal) - - all_equal = True - diff = {} - vol_type_data = [] - for vol_type_id in (vol_type_id1, vol_type_id2): - if vol_type_id is None: - specs = {'extra_specs': None, - 'qos_specs': None, - 'encryption': None} - else: - specs = {} - vol_type = get_volume_type(context, vol_type_id) - specs['extra_specs'] = vol_type.get('extra_specs') - qos_specs = get_volume_type_qos_specs(vol_type_id) - specs['qos_specs'] = qos_specs.get('qos_specs') - _fix_qos_specs(specs['qos_specs']) - specs['encryption'] = get_volume_type_encryption(context, - vol_type_id) - specs['encryption'] = _fix_encryption_specs(specs['encryption']) - vol_type_data.append(specs) - - diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'], - vol_type_data[1]['extra_specs']) - if not equal: - all_equal = False - diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'], - vol_type_data[1]['qos_specs']) - if not equal: - all_equal = False - diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'], - vol_type_data[1]['encryption']) - if not equal: - all_equal = False - - return (diff, all_equal) - - -def volume_types_encryption_changed(context, vol_type_id1, vol_type_id2): - """Return whether encryptions of two volume types are same.""" - def _get_encryption(enc): - enc = dict(enc) - for param in ENCRYPTION_IGNORED_FIELDS: - enc.pop(param, None) - return enc - - enc1 = get_volume_type_encryption(context, vol_type_id1) - enc2 = get_volume_type_encryption(context, vol_type_id2) - - enc1_filtered = _get_encryption(enc1) if enc1 else None - enc2_filtered = _get_encryption(enc2) if enc2 else None - return enc1_filtered != enc2_filtered diff --git a/cinder/wsgi/__init__.py b/cinder/wsgi/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/wsgi/common.py b/cinder/wsgi/common.py deleted file mode 100644 index 9c30d5b13..000000000 --- a/cinder/wsgi/common.py +++ /dev/null @@ -1,152 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for working with WSGI servers.""" - -import webob.dec -import webob.exc - -from cinder.i18n import _ - - -class Request(webob.Request): - pass - - -class Application(object): - """Base WSGI application wrapper. Subclasses need to implement __call__.""" - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [app:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [app:wadl] - latest_version = 1.3 - paste.app_factory = cinder.api.fancy_api:Wadl.factory - - which would result in a call to the `Wadl` class as - - import cinder.api.fancy_api - fancy_api.Wadl(latest_version='1.3') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - return cls(**local_config) - - def __call__(self, environ, start_response): - r"""Subclasses will probably want to implement __call__ like this: - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - # Any of the following objects work as responses: - - # Option 1: simple string - res = 'message\n' - - # Option 2: a nicely formatted HTTP exception page - res = exc.HTTPForbidden(explanation='Nice try') - - # Option 3: a webob Response object (in case you need to play with - # headers, or you want to be treated like an iterable) - res = Response(); - res.app_iter = open('somefile') - - # Option 4: any wsgi app to be run next - res = self.application - - # Option 5: you can get a Response object for a wsgi app, too, to - # play with headers etc - res = req.get_response(self.application) - - # You can then just return your response... - return res - # ... or set req.response and return None. - req.response = res - - See the end of http://pythonpaste.org/webob/modules/dec.html - for more info. - - """ - raise NotImplementedError(_('You must implement __call__')) - - -class Middleware(Application): - """Base WSGI middleware. - - These classes require an application to be - initialized that will be called next. By default the middleware will - simply call its wrapped app, or you can override __call__ to customize its - behavior. - - """ - - @classmethod - def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config files. - - Any local configuration (that is, values under the [filter:APPNAME] - section of the paste config) will be passed into the `__init__` method - as kwargs. - - A hypothetical configuration would look like: - - [filter:analytics] - redis_host = 127.0.0.1 - paste.filter_factory = cinder.api.analytics:Analytics.factory - - which would result in a call to the `Analytics` class as - - import cinder.api.analytics - analytics.Analytics(app_from_paste, redis_host='127.0.0.1') - - You could of course re-implement the `factory` method in subclasses, - but using the kwarg passing it shouldn't be necessary. - - """ - def _factory(app): - return cls(app, **local_config) - return _factory - - def __init__(self, application): - self.application = application - - def process_request(self, req): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - """ - return None - - def process_response(self, response): - """Do whatever you'd like to the response.""" - return response - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - response = self.process_request(req) - if response: - return response - response = req.get_response(self.application) - return self.process_response(response) diff --git a/cinder/wsgi/eventlet_server.py b/cinder/wsgi/eventlet_server.py deleted file mode 100644 index 85ac4badb..000000000 --- a/cinder/wsgi/eventlet_server.py +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Methods for working with eventlet WSGI servers.""" - -from __future__ import print_function - -import socket - -from oslo_config import cfg -from oslo_service import wsgi -from oslo_utils import netutils - - -socket_opts = [ - cfg.BoolOpt('tcp_keepalive', - default=True, - help="Sets the value of TCP_KEEPALIVE (True/False) for each " - "server socket."), - cfg.IntOpt('tcp_keepalive_interval', - help="Sets the value of TCP_KEEPINTVL in seconds for each " - "server socket. Not supported on OS X."), - cfg.IntOpt('tcp_keepalive_count', - help="Sets the value of TCP_KEEPCNT for each " - "server socket. Not supported on OS X."), -] - - -CONF = cfg.CONF -CONF.register_opts(socket_opts) - - -class Server(wsgi.Server): - """Server class to manage a WSGI server, serving a WSGI application.""" - - def _set_socket_opts(self, _socket): - _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - # NOTE(praneshp): Call set_tcp_keepalive in oslo to set - # tcp keepalive parameters. Sockets can hang around forever - # without keepalive - netutils.set_tcp_keepalive(_socket, - self.conf.tcp_keepalive, - self.conf.tcp_keepidle, - self.conf.tcp_keepalive_count, - self.conf.tcp_keepalive_interval) - - return _socket diff --git a/cinder/wsgi/wsgi.py b/cinder/wsgi/wsgi.py deleted file mode 100644 index 7b2d71d71..000000000 --- a/cinder/wsgi/wsgi.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Cinder OS API WSGI application.""" - - -import sys -import warnings - -from cinder import objects - -warnings.simplefilter('once', DeprecationWarning) - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import wsgi - -from cinder import i18n -i18n.enable_lazy() - -# Need to register global_opts -from cinder.common import config -from cinder import rpc -from cinder import version - -CONF = cfg.CONF - - -def initialize_application(): - objects.register_all() - CONF(sys.argv[1:], project='cinder', - version=version.version_string()) - logging.setup(CONF, "cinder") - config.set_middleware_defaults() - - rpc.init(CONF) - return wsgi.Loader(CONF).load_app(name='osapi_volume') diff --git a/cinder/zonemanager/__init__.py b/cinder/zonemanager/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/zonemanager/drivers/__init__.py b/cinder/zonemanager/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/zonemanager/drivers/brocade/__init__.py b/cinder/zonemanager/drivers/brocade/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py b/cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py deleted file mode 100644 index 46e98dfbc..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py +++ /dev/null @@ -1,68 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from oslo_config import cfg -from oslo_log import log as logging - -from cinder.volume import configuration - -brcd_zone_opts = [ - cfg.StrOpt('fc_southbound_protocol', - default='HTTP', - choices=('SSH', 'HTTP', 'HTTPS'), - help='South bound connector for the fabric.'), - cfg.StrOpt('fc_fabric_address', - default='', - help='Management IP of fabric.'), - cfg.StrOpt('fc_fabric_user', - default='', - help='Fabric user ID.'), - cfg.StrOpt('fc_fabric_password', - default='', - help='Password for user.', - secret=True), - cfg.PortOpt('fc_fabric_port', - default=22, - help='Connecting port'), - cfg.StrOpt('fc_fabric_ssh_cert_path', - default='', - help='Local SSH certificate Path.'), - cfg.StrOpt('zoning_policy', - default='initiator-target', - help='Overridden zoning policy.'), - cfg.BoolOpt('zone_activate', - default=True, - help='Overridden zoning activation state.'), - cfg.StrOpt('zone_name_prefix', - default='openstack', - help='Overridden zone name prefix.'), - cfg.StrOpt('fc_virtual_fabric_id', - default=None, - help='Virtual Fabric ID.') -] - -CONF = cfg.CONF -CONF.register_opts(brcd_zone_opts, group='BRCD_FABRIC_EXAMPLE') -LOG = logging.getLogger(__name__) - - -def load_fabric_configurations(fabric_names): - fabric_configs = {} - for fabric_name in fabric_names: - config = configuration.Configuration(brcd_zone_opts, fabric_name) - LOG.debug("Loaded FC fabric config %(fabricname)s", - {'fabricname': fabric_name}) - fabric_configs[fabric_name] = config - return fabric_configs diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py b/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py deleted file mode 100644 index 65119575a..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py +++ /dev/null @@ -1,190 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts -from cinder.zonemanager import fc_san_lookup_service as fc_service -from cinder.zonemanager import utils as fczm_utils - -LOG = logging.getLogger(__name__) - - -class BrcdFCSanLookupService(fc_service.FCSanLookupService): - """The SAN lookup service that talks to Brocade switches. - - Version History: - 1.0.0 - Initial version - 1.1 - Add support to use config option for switch southbound protocol - - """ - - VERSION = "1.1" - - def __init__(self, **kwargs): - """Initializing the client.""" - super(BrcdFCSanLookupService, self).__init__(**kwargs) - self.configuration = kwargs.get('configuration', None) - self.create_configuration() - - def create_configuration(self): - """Configuration specific to SAN context values.""" - config = self.configuration - - fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] - LOG.debug('Fabric Names: %s', fabric_names) - - # There can be more than one SAN in the network and we need to - # get credentials for each for SAN context lookup later. - if len(fabric_names) > 0: - self.fabric_configs = fabric_opts.load_fabric_configurations( - fabric_names) - - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - """Provides the initiator/target map for available SAN contexts. - - Looks up nameserver of each fc SAN configured to find logged in devices - and returns a map of initiator and target port WWNs for each fabric. - - :param initiator_wwn_list: List of initiator port WWN - :param target_wwn_list: List of target port WWN - :returns: List -- device wwn map in following format - - .. code-block:: default - - { - : { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'..) - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121'..) - } - } - - :raises Exception: when connection to fabric is failed - """ - device_map = {} - formatted_target_list = [] - formatted_initiator_list = [] - fabric_map = {} - fabric_names = self.configuration.fc_fabric_names - fabrics = None - if not fabric_names: - raise exception.InvalidParameterValue( - err=_("Missing Fibre Channel SAN configuration " - "param - fc_fabric_names")) - - fabrics = [x.strip() for x in fabric_names.split(',')] - LOG.debug("FC Fabric List: %s", fabrics) - if fabrics: - for t in target_wwn_list: - formatted_target_list.append(fczm_utils.get_formatted_wwn(t)) - - for i in initiator_wwn_list: - formatted_initiator_list.append(fczm_utils. - get_formatted_wwn(i)) - - for fabric_name in fabrics: - fabric_ip = self.fabric_configs[fabric_name].safe_get( - 'fc_fabric_address') - - # Get name server data from fabric and find the targets - # logged in - nsinfo = '' - try: - LOG.debug("Getting name server data for " - "fabric %s", fabric_ip) - conn = self._get_southbound_client(fabric_name) - nsinfo = conn.get_nameserver_info() - except exception.FCSanLookupServiceException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed collecting name server info from" - " fabric %s", fabric_ip) - except Exception as e: - msg = _("SSH connection failed " - "for %(fabric)s with error: %(err)s" - ) % {'fabric': fabric_ip, 'err': e} - LOG.error(msg) - raise exception.FCSanLookupServiceException(message=msg) - - LOG.debug("Lookup service:nsinfo-%s", nsinfo) - LOG.debug("Lookup service:initiator list from " - "caller-%s", formatted_initiator_list) - LOG.debug("Lookup service:target list from " - "caller-%s", formatted_target_list) - visible_targets = [x for x in nsinfo - if x in formatted_target_list] - visible_initiators = [x for x in nsinfo - if x in formatted_initiator_list] - - if visible_targets: - LOG.debug("Filtered targets is: %s", visible_targets) - # getting rid of the : before returning - for idx, elem in enumerate(visible_targets): - elem = str(elem).replace(':', '') - visible_targets[idx] = elem - else: - LOG.debug("No targets are in the nameserver for SAN %s", - fabric_name) - - if visible_initiators: - # getting rid of the : before returning ~sk - for idx, elem in enumerate(visible_initiators): - elem = str(elem).replace(':', '') - visible_initiators[idx] = elem - else: - LOG.debug("No initiators are in the nameserver " - "for SAN %s", fabric_name) - - fabric_map = { - 'initiator_port_wwn_list': visible_initiators, - 'target_port_wwn_list': visible_targets - } - device_map[fabric_name] = fabric_map - LOG.debug("Device map for SAN context: %s", device_map) - return device_map - - def _get_southbound_client(self, fabric): - """Implementation to get SouthBound Connector. - - South bound connector will be - dynamically selected based on the configuration - - :param fabric: fabric information - """ - fabric_info = self.fabric_configs[fabric] - fc_ip = fabric_info.safe_get('fc_fabric_address') - sb_connector = fabric_info.safe_get('fc_southbound_protocol') - if sb_connector is None: - sb_connector = self.configuration.brcd_sb_connector - try: - conn_factory = importutils.import_object( - "cinder.zonemanager.drivers.brocade." - "brcd_fc_zone_connector_factory." - "BrcdFCZoneFactory") - client = conn_factory.get_connector(fabric_info, - sb_connector.upper()) - except Exception: - msg = _("Failed to create south bound connector for %s.") % fc_ip - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - return client diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py deleted file mode 100644 index 14f88f6c3..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py +++ /dev/null @@ -1,624 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Script to push the zone configuration to brocade SAN switches. -""" - -import random -import re - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils -import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant - -LOG = logging.getLogger(__name__) - - -class BrcdFCZoneClientCLI(object): - switch_ip = None - switch_port = '22' - switch_user = 'admin' - switch_pwd = 'none' - switch_key = 'none' - patrn = re.compile(r'[;\s]+') - - def __init__(self, ipaddress, username, - password, port, key): - """Initializing the client.""" - self.switch_ip = ipaddress - self.switch_port = port - self.switch_user = username - self.switch_pwd = password - self.switch_key = key - self.sshpool = None - - def get_active_zone_set(self): - """Return the active zone configuration. - - Return active zoneset from fabric. When none of the configurations - are active then it will return empty map. - - :returns: Map -- active zone set map in the following format - - .. code-block:: python - - { - 'zones': - {'openstack50060b0000c26604201900051ee8e329': - ['50060b0000c26604', '201900051ee8e329'] - }, - 'active_zone_config': 'OpenStack_Cfg' - } - """ - zone_set = {} - zone = {} - zone_member = None - zone_name = None - switch_data = None - zone_set_name = None - try: - switch_data = self._get_switch_info( - [zone_constant.GET_ACTIVE_ZONE_CFG]) - except exception.BrocadeZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed getting active zone set " - "from fabric %s", self.switch_ip) - try: - for line in switch_data: - line_split = re.split('\\t', line) - if len(line_split) > 2: - line_split = [x.replace( - '\n', '') for x in line_split] - line_split = [x.replace( - ' ', - '') for x in line_split] - if zone_constant.CFG_ZONESET in line_split: - zone_set_name = line_split[1] - continue - if line_split[1]: - zone_name = line_split[1] - zone[zone_name] = list() - if line_split[2]: - zone_member = line_split[2] - zone_member_list = zone.get(zone_name) - zone_member_list.append(zone_member) - zone_set[zone_constant.CFG_ZONES] = zone - zone_set[zone_constant.ACTIVE_ZONE_CONFIG] = zone_set_name - except Exception: - # In case of parsing error here, it should be malformed cli output. - msg = _("Malformed zone configuration: (switch=%(switch)s " - "zone_config=%(zone_config)s)." - ) % {'switch': self.switch_ip, - 'zone_config': switch_data} - LOG.exception(msg) - raise exception.FCZoneDriverException(reason=msg) - switch_data = None - return zone_set - - def add_zones(self, zones, activate, active_zone_set=None): - """Add zone configuration. - - This method will add the zone configuration passed by user. - - :param zones: zone names mapped to members. Zone members - are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1, zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True/False - :param active_zone_set: active zone set dict retrieved from - get_active_zone_set method - - """ - LOG.debug("Add Zones - Zones passed: %s", zones) - cfg_name = None - iterator_count = 0 - zone_with_sep = '' - if not active_zone_set: - active_zone_set = self.get_active_zone_set() - LOG.debug("Active zone set: %s", active_zone_set) - zone_list = active_zone_set[zone_constant.CFG_ZONES] - LOG.debug("zone list: %s", zone_list) - for zone in zones.keys(): - zone_members_with_sep = ';'.join(str(member) for - member in zones[zone]) - LOG.debug("Forming command for create zone") - cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % { - 'zone': zone, - 'zone_members_with_sep': zone_members_with_sep} - LOG.debug("Creating zone, cmd to run %s", cmd) - self.apply_zone_change(cmd.split()) - if(iterator_count > 0): - zone_with_sep += ';' - iterator_count += 1 - zone_with_sep += zone - if not zone_with_sep: - return - try: - # If zone_list exists, there are active zones, - # so add new zone to existing active config. - # Otherwise, create the zone config. - if zone_list: - cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG] - else: - cfg_name = None - cmd = None - if not cfg_name: - cfg_name = zone_constant.OPENSTACK_CFG_NAME - cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \ - % {'zoneset': cfg_name, 'zones': zone_with_sep} - else: - cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \ - % {'zoneset': cfg_name, 'zones': zone_with_sep} - LOG.debug("Zone config cmd to run %s", cmd) - self.apply_zone_change(cmd.split()) - if activate: - self.activate_zoneset(cfg_name) - else: - self._cfg_save() - except Exception as e: - self._cfg_trans_abort() - msg = _("Creating and activating zone set failed: " - "(Zone set=%(cfg_name)s error=%(err)s)." - ) % {'cfg_name': cfg_name, 'err': six.text_type(e)} - LOG.error(msg) - raise exception.BrocadeZoningCliException(reason=msg) - - def update_zones(self, zones, activate, operation, active_zone_set=None): - """Update the zone configuration. - - This method will update the zone configuration passed by user. - - :param zones: zone names mapped to members. Zone members - are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1, zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True/False - :param operation: zone add or zone remove - :param active_zone_set: active zone set dict retrieved from - get_active_zone_set method - - """ - LOG.debug("Update Zones - Zones passed: %s", zones) - cfg_name = None - iterator_count = 0 - zone_with_sep = '' - if not active_zone_set: - active_zone_set = self.get_active_zone_set() - LOG.debug("Active zone set: %s", active_zone_set) - zone_list = active_zone_set[zone_constant.CFG_ZONES] - LOG.debug("Active zone list: %s", zone_list) - for zone in zones.keys(): - zone_members_with_sep = ';'.join(str(member) for - member in zones[zone]) - cmd = '%(operation)s "%(zone)s", "%(zone_members_with_sep)s"' % { - 'operation': operation, - 'zone': zone, - 'zone_members_with_sep': zone_members_with_sep} - LOG.debug("Updating zone, cmd to run %s", cmd) - self.apply_zone_change(cmd.split()) - if(iterator_count > 0): - zone_with_sep += ';' - iterator_count += 1 - zone_with_sep += zone - if not zone_with_sep: - return - try: - cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG] - if activate: - self.activate_zoneset(cfg_name) - else: - self._cfg_save() - except Exception as e: - self._cfg_trans_abort() - msg = _("Activating zone set failed: " - "(Zone set=%(cfg_name)s error=%(err)s)." - ) % {'cfg_name': cfg_name, 'err': six.text_type(e)} - LOG.error(msg) - raise exception.BrocadeZoningCliException(reason=msg) - - def activate_zoneset(self, cfgname): - """Method to Activate the zone config. Param cfgname - ZonesetName.""" - cmd_list = [zone_constant.ACTIVATE_ZONESET, cfgname] - return self._ssh_execute(cmd_list, True, 1) - - def deactivate_zoneset(self): - """Method to deActivate the zone config.""" - return self._ssh_execute([zone_constant.DEACTIVATE_ZONESET], True, 1) - - def delete_zones(self, zone_names, activate, active_zone_set=None): - """Delete zones from fabric. - - Method to delete the active zone config zones - - :param zone_names: zoneNames separated by semicolon - :param activate: True/False - :param active_zone_set: the active zone set dict retrieved - from get_active_zone_set method - """ - active_zoneset_name = None - zone_list = [] - if not active_zone_set: - active_zone_set = self.get_active_zone_set() - active_zoneset_name = active_zone_set[ - zone_constant.ACTIVE_ZONE_CONFIG] - zone_list = active_zone_set[zone_constant.CFG_ZONES] - zones = self.patrn.split(''.join(zone_names)) - cmd = None - try: - if len(zones) == len(zone_list): - self.deactivate_zoneset() - cmd = 'cfgdelete "%(active_zoneset_name)s"' \ - % {'active_zoneset_name': active_zoneset_name} - # Active zoneset is being deleted, hence reset activate flag - activate = False - else: - cmd = 'cfgremove "%(active_zoneset_name)s", "%(zone_names)s"' \ - % {'active_zoneset_name': active_zoneset_name, - 'zone_names': zone_names - } - LOG.debug("Delete zones: Config cmd to run: %s", cmd) - self.apply_zone_change(cmd.split()) - for zone in zones: - self._zone_delete(zone) - if activate: - self.activate_zoneset(active_zoneset_name) - else: - self._cfg_save() - except Exception as e: - msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." - ) % {'cmd': cmd, 'err': six.text_type(e)} - LOG.error(msg) - self._cfg_trans_abort() - raise exception.BrocadeZoningCliException(reason=msg) - - def get_nameserver_info(self): - """Get name server data from fabric. - - This method will return the connected node port wwn list(local - and remote) for the given switch fabric - """ - cli_output = None - return_list = [] - try: - cmd = '%(nsshow)s;%(nscamshow)s' % { - 'nsshow': zone_constant.NS_SHOW, - 'nscamshow': zone_constant.NS_CAM_SHOW} - cli_output = self._get_switch_info([cmd]) - except exception.BrocadeZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed collecting nsshow " - "info for fabric %s", self.switch_ip) - if (cli_output): - return_list = self._parse_ns_output(cli_output) - cli_output = None - return return_list - - def _cfg_save(self): - self._ssh_execute([zone_constant.CFG_SAVE], True, 1) - - def _zone_delete(self, zone_name): - cmd = 'zonedelete "%(zone_name)s"' % {'zone_name': zone_name} - self.apply_zone_change(cmd.split()) - - def _cfg_trans_abort(self): - is_abortable = self._is_trans_abortable() - if(is_abortable): - self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT]) - - def _is_trans_abortable(self): - is_abortable = False - stdout, stderr = None, None - stdout, stderr = self._run_ssh( - [zone_constant.CFG_SHOW_TRANS], True, 1) - output = stdout.splitlines() - is_abortable = False - for line in output: - if(zone_constant.TRANS_ABORTABLE in line): - is_abortable = True - break - if stderr: - msg = _("Error while checking transaction status: %s") % stderr - raise exception.BrocadeZoningCliException(reason=msg) - else: - return is_abortable - - def apply_zone_change(self, cmd_list): - """Execute zoning cli with no status update. - - Executes CLI commands such as addZone where status return is - not expected. - """ - stdout, stderr = None, None - LOG.debug("Executing command via ssh: %s", cmd_list) - stdout, stderr = self._run_ssh(cmd_list, True, 1) - # no output expected, so output means there is an error - if stdout: - msg = _("Error while running zoning CLI: (command=%(cmd)s " - "error=%(err)s).") % {'cmd': cmd_list, 'err': stdout} - LOG.error(msg) - self._cfg_trans_abort() - raise exception.BrocadeZoningCliException(reason=msg) - - def is_supported_firmware(self): - """Check firmware version is v6.4 or higher. - - This API checks if the firmware version per the plug-in support level. - This only checks major and minor version. - """ - cmd = ['version'] - firmware = 0 - try: - stdout, stderr = self._execute_shell_cmd(cmd) - if (stdout): - for line in stdout: - if 'Fabric OS: v' in line: - LOG.debug("Firmware version string: %s", line) - ver = line.split('Fabric OS: v')[1].split('.') - if (ver): - firmware = int(ver[0] + ver[1]) - return firmware > 63 - else: - LOG.error("No CLI output for firmware version check") - return False - except processutils.ProcessExecutionError as e: - msg = _("Error while getting data via ssh: (command=%(cmd)s " - "error=%(err)s).") % {'cmd': cmd, 'err': six.text_type(e)} - LOG.error(msg) - raise exception.BrocadeZoningCliException(reason=msg) - - def _get_switch_info(self, cmd_list): - stdout, stderr, sw_data = None, None, None - try: - stdout, stderr = self._run_ssh(cmd_list, True, 1) - if (stdout): - sw_data = stdout.splitlines() - return sw_data - except processutils.ProcessExecutionError as e: - msg = _("Error while getting data via ssh: (command=%(cmd)s " - "error=%(err)s).") % {'cmd': cmd_list, - 'err': six.text_type(e)} - LOG.error(msg) - raise exception.BrocadeZoningCliException(reason=msg) - - def _parse_ns_output(self, switch_data): - """Parses name server data. - - Parses nameserver raw data and adds the device port wwns to the list - - :returns: List -- list of device port wwn from ns info - """ - return_list = [] - for line in switch_data: - if not(" NL " in line or " N " in line): - continue - linesplit = line.split(';') - if len(linesplit) > 2: - node_port_wwn = linesplit[2] - return_list.append(node_port_wwn) - else: - msg = _("Malformed nameserver string: %s") % line - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - return return_list - - def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): - # TODO(skolathur): Need to implement ssh_injection check - # currently, the check will fail for zonecreate command - # as zone members are separated by ';'which is a danger char - command = ' '. join(cmd_list) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - self.switch_key, - min_size=1, - max_size=5) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - return processutils.ssh_execute( - ssh, - command, - check_exit_code=check_exit_code) - except Exception as e: - LOG.exception('Error executing SSH command.') - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", command) - - def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): - """Execute cli with status update. - - Executes CLI commands such as cfgsave where status return is expected. - """ - utils.check_ssh_injection(cmd_list) - command = ' '. join(cmd_list) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - self.switch_key, - min_size=1, - max_size=5) - stdin, stdout, stderr = None, None, None - LOG.debug("Executing command via ssh: %s", command) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - stdin, stdout, stderr = ssh.exec_command(command) - stdin.write("%s\n" % zone_constant.YES) - channel = stdout.channel - exit_status = channel.recv_exit_status() - LOG.debug("Exit Status from ssh: %s", exit_status) - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug('Result was %s', exit_status) - if check_exit_code and exit_status != 0: - raise processutils.ProcessExecutionError( - exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=command) - else: - return True - else: - return True - except Exception as e: - LOG.exception('Error executing SSH command.') - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - LOG.debug("Handling error case after " - "SSH: %s", last_exception) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error("Error executing command via ssh: %s", e) - finally: - if stdin: - stdin.flush() - stdin.close() - if stdout: - stdout.close() - if stderr: - stderr.close() - - def _execute_shell_cmd(self, cmd): - """Run command over shell for older firmware versions. - - Invokes shell and issue the command and return the output. - This is primarily used for issuing read commands when we are not sure - if the firmware supports exec_command. - """ - utils.check_ssh_injection(cmd) - command = ' '. join(cmd) - stdout, stderr = None, None - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - self.switch_key, - min_size=1, - max_size=5) - with self.sshpool.item() as ssh: - LOG.debug('Running cmd (SSH): %s', command) - channel = ssh.invoke_shell() - stdin_stream = channel.makefile('wb') - stdout_stream = channel.makefile('rb') - stderr_stream = channel.makefile('rb') - stdin_stream.write('''%s -exit -''' % command) - stdin_stream.flush() - stdout = stdout_stream.readlines() - stderr = stderr_stream.readlines() - stdin_stream.close() - stdout_stream.close() - stderr_stream.close() - - exit_status = channel.recv_exit_status() - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug('Result was %s', exit_status) - if exit_status != 0: - LOG.debug("command %s failed", command) - raise processutils.ProcessExecutionError( - exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=command) - try: - channel.close() - except Exception: - LOG.exception('Error closing channel.') - LOG.debug("_execute_cmd: stdout to return: %s", stdout) - LOG.debug("_execute_cmd: stderr to return: %s", stderr) - return (stdout, stderr) - - def cleanup(self): - self.sshpool = None diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py deleted file mode 100644 index a6e7a8a66..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py +++ /dev/null @@ -1,85 +0,0 @@ -# (c) Copyright 2015 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Brocade Zone Connector Factory is responsible to dynamically create the -connection object based on the configuration -""" - -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder.zonemanager.drivers.brocade import fc_zone_constants - -LOG = logging.getLogger(__name__) - - -class BrcdFCZoneFactory(object): - - def __init__(self): - self.sb_conn_map = {} - - def get_connector(self, fabric, sb_connector): - """Returns Device Connector. - - Factory method to create and return - correct SB connector object based on the protocol - """ - - fabric_ip = fabric.safe_get('fc_fabric_address') - client = self.sb_conn_map.get(fabric_ip) - - if not client: - - fabric_user = fabric.safe_get('fc_fabric_user') - fabric_pwd = fabric.safe_get('fc_fabric_password') - fabric_port = fabric.safe_get('fc_fabric_port') - fc_vfid = fabric.safe_get('fc_virtual_fabric_id') - fabric_ssh_cert_path = fabric.safe_get('fc_fabric_ssh_cert_path') - - LOG.debug("Client not found. Creating connection client for" - " %(ip)s with %(connector)s protocol " - "for the user %(user)s at port %(port)s.", - {'ip': fabric_ip, - 'connector': sb_connector, - 'user': fabric_user, - 'port': fabric_port, - 'vf_id': fc_vfid}) - - if sb_connector.lower() in (fc_zone_constants.HTTP, - fc_zone_constants.HTTPS): - client = importutils.import_object( - "cinder.zonemanager.drivers.brocade." - "brcd_http_fc_zone_client.BrcdHTTPFCZoneClient", - ipaddress=fabric_ip, - username=fabric_user, - password=fabric_pwd, - port=fabric_port, - vfid=fc_vfid, - protocol=sb_connector - ) - else: - client = importutils.import_object( - "cinder.zonemanager.drivers.brocade." - "brcd_fc_zone_client_cli.BrcdFCZoneClientCLI", - ipaddress=fabric_ip, - username=fabric_user, - password=fabric_pwd, - key=fabric_ssh_cert_path, - port=fabric_port - ) - self.sb_conn_map.update({fabric_ip: client}) - return client diff --git a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py b/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py deleted file mode 100644 index 69c9edf5f..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py +++ /dev/null @@ -1,497 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Brocade Zone Driver is responsible to manage access control using FC zoning -for Brocade FC fabrics. -This is a concrete implementation of FCZoneDriver interface implementing -add_connection and delete_connection interfaces. - -**Related Flags** - -:zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True -:zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' -""" - - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -import six -import string - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts -from cinder.zonemanager.drivers.brocade import fc_zone_constants -from cinder.zonemanager.drivers import driver_utils -from cinder.zonemanager.drivers import fc_zone_driver -from cinder.zonemanager import utils - -LOG = logging.getLogger(__name__) - -SUPPORTED_CHARS = string.ascii_letters + string.digits + '_' -brcd_opts = [ - cfg.StrOpt('brcd_sb_connector', - default=fc_zone_constants.HTTP.upper(), - help='South bound connector for zoning operation'), -] - -CONF = cfg.CONF -CONF.register_opts(brcd_opts, group='fc-zone-manager') - - -@interface.fczmdriver -class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): - """Brocade FC zone driver implementation. - - OpenStack Fibre Channel zone driver to manage FC zoning in - Brocade SAN fabrics. - - Version history: - 1.0 - Initial Brocade FC zone driver - 1.1 - Implements performance enhancements - 1.2 - Added support for friendly zone name - 1.3 - Added HTTP connector support - 1.4 - Adds support to zone in Virtual Fabrics - 1.5 - Initiator zoning updates through zoneadd/zoneremove - """ - - VERSION = "1.5" - - # ThirdPartySystems wiki page - CI_WIKI_NAME = "Brocade_OpenStack_CI" - - def __init__(self, **kwargs): - super(BrcdFCZoneDriver, self).__init__(**kwargs) - self.sb_conn_map = {} - self.configuration = kwargs.get('configuration', None) - if self.configuration: - self.configuration.append_config_values(brcd_opts) - # Adding a hack to handle parameters from super classes - # in case configured with multiple back ends. - fabric_names = self.configuration.safe_get('fc_fabric_names') - base_san_opts = [] - if not fabric_names: - base_san_opts.append( - cfg.StrOpt('fc_fabric_names', - help='Comma separated list of fibre channel ' - 'fabric names. This list of names is used to' - ' retrieve other SAN credentials for connecting' - ' to each SAN fabric' - )) - if len(base_san_opts) > 0: - CONF.register_opts(base_san_opts) - self.configuration.append_config_values(base_san_opts) - - fc_fabric_names = self.configuration.fc_fabric_names - fabric_names = [x.strip() for x in fc_fabric_names.split(',')] - - # There can be more than one SAN in the network and we need to - # get credentials for each SAN. - if fabric_names: - self.fabric_configs = fabric_opts.load_fabric_configurations( - fabric_names) - - @lockutils.synchronized('brcd', 'fcfabric-', True) - def add_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Concrete implementation of add_connection. - - Based on zoning policy and state of each I-T pair, list of zone - members are created and pushed to the fabric to add zones. The - new zones created or zones updated are activated based on isActivate - flag set in cinder.conf returned by volume driver after attach - operation. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - """ - LOG.info("BrcdFCZoneDriver - Add connection for fabric " - "%(fabric)s for I-T map: %(i_t_map)s", - {'fabric': fabric, - 'i_t_map': initiator_target_map}) - zoning_policy = self.configuration.zoning_policy - zoning_policy_fab = self.fabric_configs[fabric].safe_get( - 'zoning_policy') - zone_name_prefix = self.fabric_configs[fabric].safe_get( - 'zone_name_prefix') - zone_activate = self.fabric_configs[fabric].safe_get( - 'zone_activate') - if zoning_policy_fab: - zoning_policy = zoning_policy_fab - LOG.info("Zoning policy for Fabric %(policy)s", - {'policy': zoning_policy}) - if (zoning_policy != 'initiator' - and zoning_policy != 'initiator-target'): - LOG.info("Zoning policy is not valid, " - "no zoning will be performed.") - return - - client = self._get_southbound_client(fabric) - cfgmap_from_fabric = self._get_active_zone_set(client) - - zone_names = [] - if cfgmap_from_fabric.get('zones'): - zone_names = cfgmap_from_fabric['zones'].keys() - # based on zoning policy, create zone member list and - # push changes to fabric. - for initiator_key in initiator_target_map.keys(): - zone_map = {} - zone_update_map = {} - initiator = initiator_key.lower() - target_list = initiator_target_map[initiator_key] - if zoning_policy == 'initiator-target': - for target in target_list: - zone_members = [utils.get_formatted_wwn(initiator), - utils.get_formatted_wwn(target)] - zone_name = driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - zone_name_prefix, - SUPPORTED_CHARS) - if (len(cfgmap_from_fabric) == 0 or ( - zone_name not in zone_names)): - zone_map[zone_name] = zone_members - else: - # This is I-T zoning, skip if zone already exists. - LOG.info("Zone exists in I-T mode. Skipping " - "zone creation for %(zonename)s", - {'zonename': zone_name}) - elif zoning_policy == 'initiator': - zone_members = [utils.get_formatted_wwn(initiator)] - for target in target_list: - zone_members.append(utils.get_formatted_wwn(target)) - - zone_name = driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - zone_name_prefix, - SUPPORTED_CHARS) - - # If zone exists, then do a zoneadd to update - # the zone members in the existing zone. Otherwise, - # do a zonecreate to create a new zone. - if len(zone_names) > 0 and (zone_name in zone_names): - # Verify that the target WWNs are not already members - # of the existing zone. If so, remove them from the - # list of members to add, otherwise error will be - # returned from the switch. - for t in target_list: - if t in cfgmap_from_fabric['zones'][zone_name]: - zone_members.remove(utils.get_formatted_wwn(t)) - if zone_members: - zone_update_map[zone_name] = zone_members - else: - zone_map[zone_name] = zone_members - - LOG.info("Zone map to create: %(zonemap)s", - {'zonemap': zone_map}) - LOG.info("Zone map to update: %(zone_update_map)s", - {'zone_update_map': zone_update_map}) - - try: - if zone_map: - client.add_zones(zone_map, zone_activate, - cfgmap_from_fabric) - LOG.debug("Zones created successfully: %(zonemap)s", - {'zonemap': zone_map}) - if zone_update_map: - client.update_zones(zone_update_map, zone_activate, - fc_zone_constants.ZONE_ADD, - cfgmap_from_fabric) - LOG.debug("Zones updated successfully: %(updatemap)s", - {'updatemap': zone_update_map}) - except (exception.BrocadeZoningCliException, - exception.BrocadeZoningHttpException) as brocade_ex: - raise exception.FCZoneDriverException(brocade_ex) - except Exception: - msg = _("Failed to add or update zoning configuration.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - finally: - client.cleanup() - - @lockutils.synchronized('brcd', 'fcfabric-', True) - def delete_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Concrete implementation of delete_connection. - - Based on zoning policy and state of each I-T pair, list of zones - are created for deletion. The zones are either updated deleted based - on the policy and attach/detach state of each I-T pair. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - """ - LOG.info("BrcdFCZoneDriver - Delete connection for fabric " - "%(fabric)s for I-T map: %(i_t_map)s", - {'fabric': fabric, - 'i_t_map': initiator_target_map}) - zoning_policy = self.configuration.zoning_policy - zoning_policy_fab = self.fabric_configs[fabric].safe_get( - 'zoning_policy') - zone_name_prefix = self.fabric_configs[fabric].safe_get( - 'zone_name_prefix') - zone_activate = self.fabric_configs[fabric].safe_get( - 'zone_activate') - if zoning_policy_fab: - zoning_policy = zoning_policy_fab - LOG.info("Zoning policy for fabric %(policy)s", - {'policy': zoning_policy}) - conn = self._get_southbound_client(fabric) - cfgmap_from_fabric = self._get_active_zone_set(conn) - - zone_names = [] - if cfgmap_from_fabric.get('zones'): - zone_names = cfgmap_from_fabric['zones'].keys() - - # Based on zoning policy, get zone member list and push changes to - # fabric. This operation could result in an update for zone config - # with new member list or deleting zones from active cfg. - LOG.debug("zone config from Fabric: %(cfgmap)s", - {'cfgmap': cfgmap_from_fabric}) - for initiator_key in initiator_target_map.keys(): - initiator = initiator_key.lower() - formatted_initiator = utils.get_formatted_wwn(initiator) - zone_map = {} - zones_to_delete = [] - t_list = initiator_target_map[initiator_key] - if zoning_policy == 'initiator-target': - # In this case, zone needs to be deleted. - for t in t_list: - target = t.lower() - zone_name = driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - zone_name_prefix, - SUPPORTED_CHARS) - LOG.debug("Zone name to delete: %(zonename)s", - {'zonename': zone_name}) - if len(zone_names) > 0 and (zone_name in zone_names): - # delete zone. - LOG.debug("Added zone to delete to list: %(zonename)s", - {'zonename': zone_name}) - zones_to_delete.append(zone_name) - - elif zoning_policy == 'initiator': - zone_members = [formatted_initiator] - for t in t_list: - target = t.lower() - zone_members.append(utils.get_formatted_wwn(target)) - - zone_name = driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - zone_name_prefix, - SUPPORTED_CHARS) - - if (zone_names and (zone_name in zone_names)): - # Check to see if there are other zone members - # in the zone besides the initiator and - # the targets being removed. - filtered_members = filter( - lambda x: x not in zone_members, - cfgmap_from_fabric['zones'][zone_name]) - - # If there are other zone members, proceed with - # zone update to remove the targets. Otherwise, - # delete the zone. - if filtered_members: - zone_members.remove(formatted_initiator) - # Verify that the zone members in target list - # are listed in zone definition. If not, remove - # the zone members from the list of members - # to remove, otherwise switch will return error. - zm_list = cfgmap_from_fabric['zones'][zone_name] - for t in t_list: - formatted_target = utils.get_formatted_wwn(t) - if formatted_target not in zm_list: - zone_members.remove(formatted_target) - if zone_members: - LOG.debug("Zone members to remove: " - "%(members)s", {'members': zone_members}) - zone_map[zone_name] = zone_members - else: - zones_to_delete.append(zone_name) - else: - LOG.warning("Zoning policy not recognized: %(policy)s", - {'policy': zoning_policy}) - LOG.debug("Zone map to update: %(zonemap)s", - {'zonemap': zone_map}) - LOG.debug("Zone list to delete: %(zones)s", - {'zones': zones_to_delete}) - try: - # Update zone membership. - if zone_map: - conn.update_zones(zone_map, zone_activate, - fc_zone_constants.ZONE_REMOVE, - cfgmap_from_fabric) - # Delete zones - if zones_to_delete: - zone_name_string = '' - num_zones = len(zones_to_delete) - for i in range(0, num_zones): - if i == 0: - zone_name_string = ( - '%s%s' % ( - zone_name_string, zones_to_delete[i])) - else: - zone_name_string = '%s;%s' % ( - zone_name_string, zones_to_delete[i]) - - conn.delete_zones( - zone_name_string, zone_activate, - cfgmap_from_fabric) - except (exception.BrocadeZoningCliException, - exception.BrocadeZoningHttpException) as brocade_ex: - raise exception.FCZoneDriverException(brocade_ex) - except Exception: - msg = _("Failed to update or delete zoning " - "configuration.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - finally: - conn.cleanup() - - def get_san_context(self, target_wwn_list): - """Lookup SAN context for visible end devices. - - Look up each SAN configured and return a map of SAN (fabric IP) to - list of target WWNs visible to the fabric. - """ - formatted_target_list = [] - fabric_map = {} - fc_fabric_names = self.configuration.fc_fabric_names - fabrics = [x.strip() for x in fc_fabric_names.split(',')] - LOG.debug("Fabric List: %(fabrics)s", {'fabrics': fabrics}) - LOG.debug("Target WWN list: %(targetwwns)s", - {'targetwwns': target_wwn_list}) - if len(fabrics) > 0: - for t in target_wwn_list: - formatted_target_list.append(utils.get_formatted_wwn(t)) - LOG.debug("Formatted target WWN list: %(targetlist)s", - {'targetlist': formatted_target_list}) - for fabric_name in fabrics: - conn = self._get_southbound_client(fabric_name) - - # Get name server data from fabric and get the targets - # logged in. - nsinfo = None - try: - nsinfo = conn.get_nameserver_info() - LOG.debug("Name server info from fabric: %(nsinfo)s", - {'nsinfo': nsinfo}) - except (exception.BrocadeZoningCliException, - exception.BrocadeZoningHttpException): - if not conn.is_supported_firmware(): - msg = _("Unsupported firmware on switch %s. Make sure " - "switch is running firmware v6.4 or higher" - ) % conn.switch_ip - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - with excutils.save_and_reraise_exception(): - LOG.exception("Error getting name server info.") - except Exception: - msg = _("Failed to get name server info.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - finally: - conn.cleanup() - visible_targets = filter( - lambda x: x in formatted_target_list, - nsinfo) - - if visible_targets: - LOG.info("Filtered targets for SAN is: %(targets)s", - {'targets': visible_targets}) - # getting rid of the ':' before returning - for idx, elem in enumerate(visible_targets): - visible_targets[idx] = str( - visible_targets[idx]).replace(':', '') - fabric_map[fabric_name] = visible_targets - else: - LOG.debug("No targets found in the nameserver " - "for fabric: %(fabric)s", - {'fabric': fabric_name}) - LOG.debug("Return SAN context output: %(fabricmap)s", - {'fabricmap': fabric_map}) - return fabric_map - - def _get_active_zone_set(self, conn): - cfgmap = None - try: - cfgmap = conn.get_active_zone_set() - except (exception.BrocadeZoningCliException, - exception.BrocadeZoningHttpException): - if not conn.is_supported_firmware(): - msg = _("Unsupported firmware on switch %s. Make sure " - "switch is running firmware v6.4 or higher" - ) % conn.switch_ip - LOG.error(msg) - raise exception.FCZoneDriverException(msg) - with excutils.save_and_reraise_exception(): - LOG.exception("Error getting name server info.") - except Exception as e: - msg = (_("Failed to retrieve active zoning configuration %s") - % six.text_type(e)) - LOG.error(msg) - raise exception.FCZoneDriverException(msg) - LOG.debug("Active zone set from fabric: %(cfgmap)s", - {'cfgmap': cfgmap}) - return cfgmap - - def _get_southbound_client(self, fabric): - """Implementation to get SouthBound Connector. - - South bound connector will be - dynamically selected based on the configuration - - :param fabric: fabric information - """ - fabric_info = self.fabric_configs[fabric] - fc_ip = fabric_info.safe_get('fc_fabric_address') - sb_connector = fabric_info.safe_get('fc_southbound_protocol') - if sb_connector is None: - sb_connector = self.configuration.brcd_sb_connector - try: - conn_factory = importutils.import_object( - "cinder.zonemanager.drivers.brocade." - "brcd_fc_zone_connector_factory." - "BrcdFCZoneFactory") - client = conn_factory.get_connector(fabric_info, - sb_connector.upper()) - except Exception: - msg = _("Failed to create south bound connector for %s.") % fc_ip - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - return client diff --git a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py b/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py deleted file mode 100644 index ea53350dd..000000000 --- a/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py +++ /dev/null @@ -1,967 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Brocade south bound connector to communicate with switch using -HTTP or HTTPS protocol. -""" - -from oslo_log import log as logging -from oslo_serialization import base64 -from oslo_utils import encodeutils -import requests -import six -import time - -from cinder import exception -from cinder.i18n import _ -import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant - - -LOG = logging.getLogger(__name__) - - -class BrcdHTTPFCZoneClient(object): - - def __init__(self, ipaddress, username, - password, port, vfid, protocol): - """Initializing the client with the parameters passed. - - Creates authentication token and authenticate with switch - to ensure the credentials are correct and change the VF context. - - :param ipaddress: IP Address of the device. - :param username: User id to login. - :param password: User password. - :param port: Device Communication port - :param vfid: Virtual Fabric ID. - :param protocol: Communication Protocol. - """ - self.switch_ip = ipaddress - self.switch_user = username - self.switch_pwd = password - self.protocol = protocol - self.vfid = vfid - self.cfgs = {} - self.zones = {} - self.alias = {} - self.qlps = {} - self.ifas = {} - self.active_cfg = '' - self.parsed_raw_zoneinfo = "" - self.random_no = '' - self.session = None - - # Create and assign the authentication header based on the credentials - self.auth_header = self.create_auth_token() - - # Authenticate with the switch - # If authenticated successfully, save the auth status and - # create auth header for future communication with the device. - self.is_auth, self.auth_header = self.authenticate() - self.check_change_vf_context() - - def connect(self, requestType, requestURL, payload='', header=None): - """Connect to the switch using HTTP/HTTPS protocol. - - :param requestType: Connection Request method - :param requestURL: Connection URL - :param payload: Data to send with POST request - :param header: Request Headers - - :returns: HTTP response data - :raises BrocadeZoningHttpException: - """ - try: - if header is None: - header = {} - header.update({"User-Agent": "OpenStack Zone Driver"}) - - # Ensure only one connection is made throughout the life cycle - protocol = zone_constant.HTTP - if self.protocol == zone_constant.PROTOCOL_HTTPS: - protocol = zone_constant.HTTPS - if self.session is None: - self.session = requests.Session() - adapter = requests.adapters.HTTPAdapter(pool_connections=1, - pool_maxsize=1) - self.session.mount(protocol + '://', adapter) - url = protocol + "://" + self.switch_ip + requestURL - response = None - if requestType == zone_constant.GET_METHOD: - response = self.session.get(url, - headers=(header), - verify=False) - elif requestType == zone_constant.POST_METHOD: - response = self.session.post(url, - payload, - headers=(header), - verify=False) - - # Throw exception when response status is not OK - if response.status_code != zone_constant.STATUS_OK: - msg = _("Error while querying page %(url)s on the switch, " - "reason %(error)s.") % {'url': url, - 'error': response.reason} - raise exception.BrocadeZoningHttpException(msg) - else: - return response.text - except requests.exceptions.ConnectionError as e: - msg = (_("Error while connecting the switch %(switch_id)s " - "with protocol %(protocol)s. Error: %(error)s.") - % {'switch_id': self.switch_ip, - 'protocol': self.protocol, - 'error': six.text_type(e)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - except exception.BrocadeZoningHttpException as ex: - msg = (_("Unexpected status code from the switch %(switch_id)s " - "with protocol %(protocol)s for url %(page)s. " - "Error: %(error)s") - % {'switch_id': self.switch_ip, - 'protocol': self.protocol, - 'page': requestURL, - 'error': six.text_type(ex)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def create_auth_token(self): - """Create the authentication token. - - Creates the authentication token to use in the authentication header - return authentication header (Base64(username:password:random no)). - - :returns: Authentication Header - :raises BrocadeZoningHttpException: - """ - try: - # Send GET request to secinfo.html to get random number - response = self.connect(zone_constant.GET_METHOD, - zone_constant.SECINFO_PAGE) - parsed_data = self.get_parsed_data(response, - zone_constant.SECINFO_BEGIN, - zone_constant.SECINFO_END) - - # Extract the random no from secinfo.html response - self.random_no = self.get_nvp_value(parsed_data, - zone_constant.RANDOM) - # Form the authentication string - auth_string = (self.switch_user + ":" + self.switch_pwd + - ":" + self.random_no) - auth_token = base64.encode_as_text(auth_string).strip() - auth_header = (zone_constant.AUTH_STRING + - auth_token) # Build the proper header - except Exception as e: - msg = (_("Error while creating authentication token: %s") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return auth_header - - def authenticate(self): - """Authenticate with the switch. - - Returns authentication status with modified authentication - header (Base64(username:xxx:random no)). - - :returns: Authentication status - :raises BrocadeZoningHttpException: - """ - headers = {zone_constant.AUTH_HEADER: self.auth_header} - try: - # GET Request to authenticate.html to verify the credentials - response = self.connect(zone_constant.GET_METHOD, - zone_constant.AUTHEN_PAGE, - header=headers) - parsed_data = self.get_parsed_data(response, - zone_constant.AUTHEN_BEGIN, - zone_constant.AUTHEN_END) - isauthenticated = self.get_nvp_value( - parsed_data, zone_constant.AUTHENTICATED) - if isauthenticated == "yes": - # Replace password in the authentication string with xxx - auth_string = (self.switch_user + - ":" + "xxx" + ":" + self.random_no) - auth_token = base64.encode_as_text(auth_string).strip() - auth_header = zone_constant.AUTH_STRING + auth_token - return True, auth_header - else: - auth_error_code = self.get_nvp_value(parsed_data, "errCode") - msg = (_("Authentication failed, verify the switch " - "credentials, error code %s.") % auth_error_code) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - except Exception as e: - msg = (_("Error while authenticating with switch: %s.") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def get_session_info(self): - """Get the session information from the switch - - :returns: Connection status information. - """ - try: - headers = {zone_constant.AUTH_HEADER: self.auth_header} - # GET request to session.html - response = self.connect(zone_constant.GET_METHOD, - zone_constant.SESSION_PAGE_ACTION, - header=headers) - except Exception as e: - msg = (_("Error while getting session information %s.") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return response - - def get_parsed_data(self, data, delim1, delim2): - """Return the sub string between the delimiters. - - :param data: String to manipulate - :param delim1: Delimiter 1 - :param delim2: Delimiter 2 - :returns: substring between the delimiters - """ - try: - start = data.index(delim1) - start = start + len(delim1) - end = data.index(delim2) - return data[start:end] - except ValueError as e: - msg = (_("Error while parsing the data: %s.") % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def get_nvp_value(self, data, keyname): - """Get the value for the key passed. - - :param data: NVP to manipulate - :param keyname: Key name - :returns: value for the NVP - """ - try: - start = data.index(keyname) - start = start + len(keyname) - temp = data[start:] - end = temp.index("\n") - return (temp[:end].lstrip('= ')) - except ValueError as e: - msg = (_("Error while getting nvp value: %s.") % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def get_managable_vf_list(self, session_info): - """List of VFIDs that can be managed. - - :param session_info: Session information from the switch - :returns: manageable VF list - :raises BrocadeZoningHttpException: - """ - try: - # Check the value of manageableLFList NVP, - # throw exception as not supported if the nvp not available - vf_list = self.get_nvp_value(session_info, - zone_constant.MANAGEABLE_VF) - if vf_list: - vf_list = vf_list.split(",") # convert the string to list - except exception.BrocadeZoningHttpException as e: - msg = (_("Error while checking whether " - "VF is available for management %s.") % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return vf_list[:-1] - - def change_vf_context(self, vfid, session_data): - """Change the VF context in the session. - - :param vfid: VFID to which context should be changed. - :param session_data: Session information from the switch - :raises BrocadeZoningHttpException: - """ - try: - managable_vf_list = self.get_managable_vf_list(session_data) - LOG.debug("Manageable VF IDs are %(vflist)s.", - {'vflist': managable_vf_list}) - # proceed changing the VF context - # if VF id can be managed if not throw exception - if vfid in managable_vf_list: - headers = {zone_constant.AUTH_HEADER: self.auth_header} - data = zone_constant.CHANGE_VF.format(vfid=vfid) - response = self.connect(zone_constant.POST_METHOD, - zone_constant.SESSION_PAGE, - data, - headers) - parsed_info = self.get_parsed_data(response, - zone_constant.SESSION_BEGIN, - zone_constant.SESSION_END) - session_LF_Id = self.get_nvp_value(parsed_info, - zone_constant.SESSION_LF_ID) - if session_LF_Id == vfid: - LOG.info("VF context is changed in the session.") - else: - msg = _("Cannot change VF context in the session.") - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - else: - msg = (_("Cannot change VF context, " - "specified VF is not available " - "in the manageable VF list %(vf_list)s.") - % {'vf_list': managable_vf_list}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - except exception.BrocadeZoningHttpException as e: - msg = (_("Error while changing VF context %s.") % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def get_zone_info(self): - """Parse all the zone information and store it in the dictionary.""" - - try: - self.cfgs = {} - self.zones = {} - self.active_cfg = '' - self.alias = {} - self.qlps = {} - self.ifas = {} - headers = {zone_constant.AUTH_HEADER: self.auth_header} - # GET request to gzoneinfo.htm - response = self.connect(zone_constant.GET_METHOD, - zone_constant.ZONE_PAGE, - header=headers) - # get the zone string from the response - self.parsed_raw_zoneinfo = self.get_parsed_data( - response, - zone_constant.ZONEINFO_BEGIN, - zone_constant.ZONEINFO_END).strip("\n") - LOG.debug("Original zone string from the switch: %(zoneinfo)s", - {'zoneinfo': self.parsed_raw_zoneinfo}) - # convert the zone string to list - zoneinfo = self.parsed_raw_zoneinfo.split() - i = 0 - while i < len(zoneinfo): - info = zoneinfo[i] - # check for the cfg delimiter - if zone_constant.CFG_DELIM in info: - # extract the cfg name - cfg_name = info.lstrip(zone_constant.CFG_DELIM) - # update the dict as - # self.cfgs={cfg_name:zone_name1;zone_name2} - self.cfgs.update({cfg_name: zoneinfo[i + 1]}) - i = i + 2 - # check for the zone delimiter - elif zone_constant.ZONE_DELIM in info: - # extract the zone name - zone_name = info.lstrip(zone_constant.ZONE_DELIM) - # update the dict as - # self.zones={zone_name:members1;members2} - self.zones.update({zone_name: zoneinfo[i + 1]}) - i = i + 2 - elif zone_constant.ALIAS_DELIM in info: - alias_name = info.lstrip(zone_constant.ALIAS_DELIM) - # update the dict as - # self.alias={alias_name:members1;members2} - self.alias.update({alias_name: zoneinfo[i + 1]}) - i = i + 2 - # check for quickloop zones - elif zone_constant.QLP_DELIM in info: - qlp_name = info.lstrip(zone_constant.QLP_DELIM) - # update the map as self.qlps={qlp_name:members1;members2} - self.qlps.update({qlp_name: zoneinfo[i + 1]}) - i = i + 2 - # check for fabric assist zones - elif zone_constant.IFA_DELIM in info: - ifa_name = info.lstrip(zone_constant.IFA_DELIM) - # update the map as self.ifas={ifa_name:members1;members2} - self.ifas.update({ifa_name: zoneinfo[i + 1]}) - i = i + 2 - elif zone_constant.ACTIVE_CFG_DELIM in info: - # update the string self.active_cfg=cfg_name - self.active_cfg = info.lstrip( - zone_constant.ACTIVE_CFG_DELIM) - if self.active_cfg == zone_constant.DEFAULT_CFG: - self.active_cfg = "" - i = i + 2 - else: - i = i + 1 - except Exception as e: - msg = (_("Error while changing VF context %s.") % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def is_supported_firmware(self): - """Check firmware version is v6.4 or higher. - - This API checks if the firmware version per the plug-in support level. - This only checks major and minor version. - - :returns: True if firmware is supported else False. - :raises BrocadeZoningHttpException: - """ - - isfwsupported = False - - try: - headers = {zone_constant.AUTH_HEADER: self.auth_header} - # GET request to switch.html - response = self.connect(zone_constant.GET_METHOD, - zone_constant.SWITCH_PAGE, - header=headers) - parsed_data = self.get_parsed_data(response, - zone_constant.SWITCHINFO_BEGIN, - zone_constant.SWITCHINFO_END) - - # get the firmware version nvp value - fwVersion = self.get_nvp_value( - parsed_data, - zone_constant.FIRMWARE_VERSION).lstrip('v') - - ver = fwVersion.split(".") - LOG.debug("Firmware version: %(version)s.", {'version': ver}) - if int(ver[0] + ver[1]) > 63: - isfwsupported = True - - except Exception as e: - msg = (_("Error while checking the firmware version %s.") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return isfwsupported - - def get_active_zone_set(self): - """Return the active zone configuration. - - Return active zoneset from fabric. When none of the configurations - are active then it will return empty map. - - :returns: Map -- active zone set map in the following format - - .. code-block:: python - - { - 'zones': - {'openstack50060b0000c26604201900051ee8e329': - ['50060b0000c26604', '201900051ee8e329'] - }, - 'active_zone_config': 'OpenStack_Cfg' - } - - :raises BrocadeZoningHttpException: - """ - active_zone_set = {} - zones_map = {} - try: - self.get_zone_info() # get the zone information of the switch - if self.active_cfg != '': - # get the zones list of the active_Cfg - zones_list = self.cfgs[self.active_cfg].split(";") - for n in zones_list: - # build the zones map - zones_map.update( - {n: self.zones[n].split(";")}) - # Format map in the correct format - active_zone_set = { - "active_zone_config": self.active_cfg, "zones": zones_map} - return active_zone_set - except Exception as e: - msg = (_("Failed getting active zone set from fabric %s.") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def add_zones(self, add_zones_info, activate, active_zone_set=None): - """Add zone configuration. - - This method will add the zone configuration passed by user. - - :param add_zones_info: Zone names mapped to members. Zone members - are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1,zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True will activate the zone config. - :param active_zone_set: Active zone set dict retrieved from - get_active_zone_set method - :raises BrocadeZoningHttpException: - """ - LOG.debug("Add zones - zones passed: %(zones)s.", - {'zones': add_zones_info}) - cfg_name = zone_constant.CFG_NAME - cfgs = self.cfgs - zones = self.zones - alias = self.alias - qlps = self.qlps - ifas = self.ifas - active_cfg = self.active_cfg - # update the active_cfg, zones and cfgs map with new information - zones, cfgs, active_cfg = self.add_zones_cfgs(cfgs, - zones, - add_zones_info, - active_cfg, - cfg_name) - # Build the zonestring with updated maps - data = self.form_zone_string(cfgs, - active_cfg, - zones, - alias, - qlps, - ifas, - activate) - LOG.debug("Add zones: final zone string after applying " - "to the switch: %(zonestring)s", {'zonestring': data}) - # Post the zone data to the switch - error_code, error_msg = self.post_zone_data(data) - if error_code != "0": - msg = (_("Applying the zones and cfgs to the switch failed " - "(error code=%(err_code)s error msg=%(err_msg)s.") - % {'err_code': error_code, 'err_msg': error_msg}) - - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def update_zones(self, zone_info, activate, operation, - active_zone_set=None): - """Update zone configuration. - - This method will update the zone configuration passed by user. - - :param zone_info: Zone names mapped to members. Zone members - are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1,zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True will activate the zone config. - :param operation: ZONE_ADD or ZONE_REMOVE - :param active_zone_set: Active zone set dict retrieved from - get_active_zone_set method - :raises BrocadeZoningHttpException: - """ - LOG.debug("Update zones - zones passed: %(zones)s.", - {'zones': zone_info}) - cfgs = self.cfgs - zones = self.zones - alias = self.alias - qlps = self.qlps - ifas = self.ifas - active_cfg = self.active_cfg - # update the zones with new information - zones = self._update_zones(zones, zone_info, operation) - # Build the zonestring with updated maps - data = self.form_zone_string(cfgs, - active_cfg, - zones, - alias, - qlps, - ifas, - activate) - LOG.debug("Update zones: final zone string after applying " - "to the switch: %(zonestring)s", {'zonestring': data}) - # Post the zone data to the switch - error_code, error_msg = self.post_zone_data(data) - if error_code != "0": - msg = (_("Applying the zones and cfgs to the switch failed " - "(error code=%(err_code)s error msg=%(err_msg)s.") - % {'err_code': error_code, 'err_msg': error_msg}) - - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def form_zone_string(self, cfgs, active_cfg, - zones, alias, qlps, ifas, activate): - """Build the zone string in the required format. - - :param cfgs: cfgs map - :param active_cfg: Active cfg string - :param zones: zones map - :param alias: alias map - :param qlps: qlps map - :param ifas: ifas map - :param activate: True will activate config. - :returns: zonestring in the required format - :raises BrocadeZoningHttpException: - """ - try: - zoneString = zone_constant.ZONE_STRING_PREFIX - - # based on the activate save only will be changed - saveonly = "false" if activate is True else "true" - - # Form the zone string based on the dictionary of each items - for cfg in sorted(cfgs.keys()): - zoneString += (zone_constant.CFG_DELIM + - cfg + " " + cfgs.get(cfg) + " ") - for zone in sorted(zones.keys()): - zoneString += (zone_constant.ZONE_DELIM + - zone + " " + zones.get(zone) + " ") - for al in sorted(alias.keys()): - zoneString += (zone_constant.ALIAS_DELIM + - al + " " + alias.get(al) + " ") - for qlp in sorted(qlps.keys()): - zoneString += (zone_constant.QLP_DELIM + - qlp + " " + qlps.get(qlp) + " ") - for ifa in sorted(ifas.keys()): - zoneString += (zone_constant.IFA_DELIM + - ifa + " " + ifas.get(ifa) + " ") - # append the active_cfg string only if it is not null and activate - # is true - if active_cfg != "" and activate: - zoneString += (zone_constant.ACTIVE_CFG_DELIM + - active_cfg + " null ") - # Build the final zone string - zoneString += zone_constant.ZONE_END_DELIM + saveonly - except Exception as e: - msg = (_("Exception while forming the zone string: %s.") - % six.text_type(e)) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - # Reconstruct the zoneString to type base string for OpenSSL - return encodeutils.safe_encode(zoneString) - - def add_zones_cfgs(self, cfgs, zones, add_zones_info, - active_cfg, cfg_name): - """Add the zones and cfgs map based on the new zones info. - - This method will return the updated zones,cfgs and active_cfg - - :param cfgs: Existing cfgs map - :param active_cfg: Existing Active cfg string - :param zones: Existing zones map - :param add_zones_info: Zones map to add - :param active_cfg: Existing active cfg - :param cfg_name: New cfg name - :returns: updated zones, zone configs map, and active_cfg - """ - cfg_string = "" - delimiter = "" - zones_in_active_cfg = "" - try: - if active_cfg: - zones_in_active_cfg = cfgs.get(active_cfg) - for zone_name, members in add_zones_info.items(): - # if new zone is not active_cfg, build the cfg string with the - # new zones - if zone_name not in zones_in_active_cfg: - cfg_string += delimiter + zone_name - delimiter = ";" - # add a new zone with the members - zones.update({zone_name: ";".join(members)}) - # update cfg string - if active_cfg: - if cfg_string: - # update the existing active cfg map with cfgs string - cfgs.update( - {active_cfg: cfg_string + ";" + cfgs.get(active_cfg)}) - else: - # create new cfg and update that cfgs map with the new cfg - active_cfg = cfg_name - cfgs.update({cfg_name: cfg_string}) - except Exception as e: - msg = (_("Error while updating the new zones and cfgs " - "in the zone string. Error %(description)s.") - % {'description': six.text_type(e)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return zones, cfgs, active_cfg - - def _update_zones(self, zones, updated_zones_info, operation): - """Update the zones based on the updated zones info. - - This method will return the updated zones - - :param zones: Existing zones map - :param updated_zones_info: Zones map to update - :param operation: ZONE_ADD or ZONE_REMOVE - :returns: updated zones - """ - try: - for zone_name in updated_zones_info: - members = updated_zones_info[zone_name] - # update the zone string - # if zone name already exists and dont have the new members - # already - current_members = zones.get(zone_name).split(";") - if operation == zone_constant.ZONE_ADD: - new_members = set(members).difference(set(current_members)) - if new_members: - # update the existing zone with new members - zones.update({zone_name: (";".join(new_members) + - ";" + zones.get(zone_name))}) - else: - new_members = set(current_members).difference(set(members)) - if new_members: - zones.pop(zone_name) - zones.update({zone_name: ";".join(new_members)}) - except Exception as e: - msg = (_("Error while updating the zones " - "in the zone string. Error %(description)s.") - % {'description': six.text_type(e)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return zones - - def is_vf_enabled(self): - """To check whether VF is enabled or not. - - :returns: boolean to indicate VF enabled and session information - """ - session_info = self.get_session_info() - parsed_data = self.get_parsed_data(session_info, - zone_constant.SESSION_BEGIN, - zone_constant.SESSION_END) - try: - is_vf_enabled = bool(self.get_nvp_value( - parsed_data, zone_constant.VF_ENABLED)) - except exception.BrocadeZoningHttpException: - is_vf_enabled = False - parsed_data = None - return is_vf_enabled, parsed_data - - def get_nameserver_info(self): - """Get name server data from fabric. - - Return the connected node port wwn list(local - and remote) for the given switch fabric. - - :returns: name server information. - """ - nsinfo = [] - headers = {zone_constant.AUTH_HEADER: self.auth_header} - response = self.connect(zone_constant.GET_METHOD, - zone_constant.NS_PAGE, - header=headers) # GET request to nsinfo.html - parsed_raw_zoneinfo = self.get_parsed_data( - response, - zone_constant.NSINFO_BEGIN, - zone_constant.NSINFO_END).strip("\t\n\r") - # build the name server information in the correct format - for line in parsed_raw_zoneinfo.splitlines(): - start_index = line.find(zone_constant.NS_DELIM) + 7 - if start_index != -1: - nsinfo.extend([line[start_index:start_index + 23].strip()]) - return nsinfo - - def delete_zones_cfgs( - self, cfgs, zones, - delete_zones_info, active_cfg): - """Delete the zones and cfgs map based on the new zones info. - - Return the updated zones, cfgs and active_cfg after deleting the - required items. - - :param cfgs: Existing cfgs map - :param active_cfg: Existing Active cfg string - :param zones: Existing zones map - :param delete_zones_info: Zones map to add - :param active_cfg: Existing active cfg - :returns: updated zones, zone config sets, and active zone config - :raises BrocadeZoningHttpException: - """ - try: - delete_zones_info = delete_zones_info.split(";") - for zone in delete_zones_info: - # remove the zones from the zone map - zones.pop(zone) - # iterated all the cfgs, but need to check since in SSH only - # active cfg is iterated - for k, v in cfgs.items(): - v = v.split(";") - if zone in v: - # remove the zone from the cfg string - v.remove(zone) - # if all the zones are removed, remove the cfg from the - # cfg map - if not v: - cfgs.pop(k) - # update the original cfg with the updated string - else: - cfgs[k] = ";".join(v) - - # if all the zones are removed in the active_cfg, update it with - # empty string - if active_cfg not in cfgs: - active_cfg = "" - except KeyError as e: - msg = (_("Error while removing the zones and cfgs " - "in the zone string: %(description)s.") - % {'description': six.text_type(e)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - return zones, cfgs, active_cfg - - def delete_zones(self, delete_zones_info, activate, active_zone_set=None): - """Delete zones from fabric. - - Deletes zones in the active zone config. - - :param zone_names: zoneNames separated by semicolon - :param activate: True/False - :param active_zone_set: the active zone set dict retrieved - from get_active_zone_set method - """ - cfgs = self.cfgs - zones = self.zones - alias = self.alias - qlps = self.qlps - ifas = self.ifas - active_cfg = self.active_cfg - # update the active_cfg, zones and cfgs map with required information - # being removed - zones, cfgs, active_cfg = self.delete_zones_cfgs( - cfgs, - zones, - delete_zones_info, - active_cfg) - # Build the zonestring with updated maps - data = self.form_zone_string(cfgs, - active_cfg, - zones, - alias, - qlps, - ifas, - activate) - LOG.debug("Delete zones: final zone string after applying " - "to the switch: %(zonestring)s", {'zonestring': data}) - error_code, error_msg = self.post_zone_data(data) - if error_code != "0": - msg = (_("Applying the zones and cfgs to the switch failed " - "(error code=%(err_code)s error msg=%(err_msg)s.") - % {'err_code': error_code, 'err_msg': error_msg}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def post_zone_data(self, data): - """Send POST request to the switch with the payload. - - :param data: payload to be sent to switch - """ - - status = "progress" - parsed_data_txn = "" - headers = {zone_constant.AUTH_HEADER: self.auth_header} - - LOG.debug("Requesting the switch with posting the zone string.") - # POST request to gzoneinfo with zonestring as payload - response = self.connect(zone_constant.POST_METHOD, - zone_constant.ZONE_PAGE, - data, - headers) - parsed_data = self.get_parsed_data(response, - zone_constant.ZONE_TX_BEGIN, - zone_constant.ZONE_TX_END) - transID = self.get_nvp_value(parsed_data, - zone_constant.ZONE_TX_ID) - transURL = zone_constant.ZONE_TRAN_STATUS.format(txnId=transID) - timeout = 360 - sleep_time = 3 - time_elapsed = 0 - while(status != "done"): - txn_response = self.connect( - zone_constant.GET_METHOD, transURL, "", headers) - parsed_data_txn = self.get_parsed_data(txn_response, - zone_constant.ZONE_TX_BEGIN, - zone_constant.ZONE_TX_END) - status = self.get_nvp_value(parsed_data_txn, - zone_constant.ZONE_TX_STATUS) - time.sleep(sleep_time) - time_elapsed += sleep_time - if time_elapsed > timeout: - break - if status != "done": - errorCode = -1 - errorMessage = ("Timed out, waiting for zone transaction on " - "the switch to complete") - else: - errorCode = self.get_nvp_value(parsed_data_txn, - zone_constant.ZONE_ERROR_CODE) - errorMessage = self.get_nvp_value(parsed_data_txn, - zone_constant.ZONE_ERROR_MSG) - return errorCode, errorMessage - - def check_change_vf_context(self): - """Check whether VF related configurations is valid and proceed.""" - vf_enabled, session_data = self.is_vf_enabled() - # VF enabled will be false if vf is disable or not supported - LOG.debug("VF enabled on switch: %(vfenabled)s.", - {'vfenabled': vf_enabled}) - # Change the VF context in the session - if vf_enabled: - if self.vfid is None: - msg = _("No VF ID is defined in the configuration file.") - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - elif self.vfid != 128: - self.change_vf_context(self.vfid, session_data) - else: - if self.vfid is not None: - msg = _("VF is not enabled.") - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def _disconnect(self): - """Disconnect from the switch using HTTP/HTTPS protocol. - - :raises BrocadeZoningHttpException: - """ - try: - headers = {zone_constant.AUTH_HEADER: self.auth_header} - response = self.connect(zone_constant.GET_METHOD, - zone_constant.LOGOUT_PAGE, - header=headers) - return response - except requests.exceptions.ConnectionError as e: - msg = (_("Error while connecting the switch %(switch_id)s " - "with protocol %(protocol)s. Error: %(error)s.") - % {'switch_id': self.switch_ip, - 'protocol': self.protocol, - 'error': six.text_type(e)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - except exception.BrocadeZoningHttpException as ex: - msg = (_("Unexpected status code from the switch %(switch_id)s " - "with protocol %(protocol)s for url %(page)s. " - "Error: %(error)s") - % {'switch_id': self.switch_ip, - 'protocol': self.protocol, - 'page': zone_constant.LOGOUT_PAGE, - 'error': six.text_type(ex)}) - LOG.error(msg) - raise exception.BrocadeZoningHttpException(reason=msg) - - def cleanup(self): - """Close session.""" - self._disconnect() - self.session.close() diff --git a/cinder/zonemanager/drivers/brocade/fc_zone_constants.py b/cinder/zonemanager/drivers/brocade/fc_zone_constants.py deleted file mode 100644 index 8af6333ea..000000000 --- a/cinder/zonemanager/drivers/brocade/fc_zone_constants.py +++ /dev/null @@ -1,104 +0,0 @@ -# (c) Copyright 2016 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Common constants used by Brocade FC Zone Driver. -""" -YES = 'y' -ACTIVE_ZONE_CONFIG = 'active_zone_config' -CFG_ZONESET = 'cfg:' -CFG_ZONES = 'zones' -OPENSTACK_CFG_NAME = 'OpenStack_Cfg' -SUCCESS = 'Success' -TRANS_ABORTABLE = 'It is abortable' - -""" -CLI Commands for FC zoning operations. -""" -GET_ACTIVE_ZONE_CFG = 'cfgactvshow' -ZONE_CREATE = 'zonecreate ' -ZONESET_CREATE = 'cfgcreate ' -CFG_SAVE = 'cfgsave' -CFG_ADD = 'cfgadd ' -ACTIVATE_ZONESET = 'cfgenable ' -DEACTIVATE_ZONESET = 'cfgdisable' -CFG_DELETE = 'cfgdelete ' -CFG_REMOVE = 'cfgremove ' -ZONE_DELETE = 'zonedelete ' -ZONE_ADD = 'zoneadd ' -ZONE_REMOVE = 'zoneremove ' -CFG_SHOW_TRANS = 'cfgtransshow' -CFG_ZONE_TRANS_ABORT = 'cfgtransabort' -NS_SHOW = 'nsshow' -NS_CAM_SHOW = 'nscamshow' - -""" -HTTPS connector constants -""" -AUTH_HEADER = "Authorization" -PROTOCOL_HTTPS = "HTTPS" -STATUS_OK = 200 -SECINFO_PAGE = "/secinfo.html" -AUTHEN_PAGE = "/authenticate.html" -GET_METHOD = "GET" -POST_METHOD = "POST" -SECINFO_BEGIN = "--BEGIN SECINFO" -SECINFO_END = "--END SECINFO" -RANDOM = "RANDOM" -AUTH_STRING = "Custom_Basic " # Trailing space is required, do not remove -AUTHEN_BEGIN = "--BEGIN AUTHENTICATE" -AUTHEN_END = "--END AUTHENTICATE" -AUTHENTICATED = "authenticated" -SESSION_PAGE_ACTION = "/session.html?action=query" -SESSION_BEGIN = "--BEGIN SESSION" -SESSION_END = "--END SESSION" -SESSION_PAGE = "/session.html" -LOGOUT_PAGE = "/logout.html" -ZONEINFO_BEGIN = "--BEGIN ZONE INFO" -ZONEINFO_END = "--END ZONE INFO" -SWITCH_PAGE = "/switch.html" -SWITCHINFO_BEGIN = "--BEGIN SWITCH INFORMATION" -SWITCHINFO_END = "--END SWITCH INFORMATION" -FIRMWARE_VERSION = "swFWVersion" -VF_ENABLED = "vfEnabled" -MANAGEABLE_VF = "manageableLFList" -CHANGE_VF = ("Session=--BEGIN SESSION\n\taction=apply\n\tLFId= {vfid} " - "\b\t--END SESSION") -ZONE_TRAN_STATUS = "/gzoneinfo.htm?txnId={txnId}" -CFG_DELIM = "\x01" -ZONE_DELIM = "\x02" -ALIAS_DELIM = "\x03" -QLP_DELIM = "\x04" -ZONE_END_DELIM = "\x05&saveonly=" -IFA_DELIM = "\x06" -ACTIVE_CFG_DELIM = "\x07" -DEFAULT_CFG = "d__efault__Cfg" -NS_PAGE = "/nsinfo.htm" -NSINFO_BEGIN = "--BEGIN NS INFO" -NSINFO_END = "--END NS INFO" -NS_DELIM = ";N ;" -ZONE_TX_BEGIN = "--BEGIN ZONE_TXN_INFO" -ZONE_TX_END = "--END ZONE_TXN_INFO" -ZONE_ERROR_CODE = "errorCode" -ZONE_PAGE = "/gzoneinfo.htm" -CFG_NAME = "openstack_cfg" -ZONE_STRING_PREFIX = "zonecfginfo=" -ZONE_ERROR_MSG = "errorMessage" -ZONE_TX_ID = "txnId" -ZONE_TX_STATUS = "status" -SESSION_LF_ID = "sessionLFId" -HTTP = "http" -HTTPS = "https" diff --git a/cinder/zonemanager/drivers/cisco/__init__.py b/cinder/zonemanager/drivers/cisco/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py b/cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py deleted file mode 100644 index 524dcc1e9..000000000 --- a/cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from oslo_config import cfg - -from cinder.volume import configuration - -cisco_zone_opts = [ - cfg.StrOpt('cisco_fc_fabric_address', - default='', - help='Management IP of fabric'), - cfg.StrOpt('cisco_fc_fabric_user', - default='', - help='Fabric user ID'), - cfg.StrOpt('cisco_fc_fabric_password', - default='', - help='Password for user', - secret=True), - cfg.PortOpt('cisco_fc_fabric_port', - default=22, - help='Connecting port'), - cfg.StrOpt('cisco_zoning_policy', - default='initiator-target', - help='overridden zoning policy'), - cfg.BoolOpt('cisco_zone_activate', - default=True, - help='overridden zoning activation state'), - cfg.StrOpt('cisco_zone_name_prefix', - help='overridden zone name prefix'), - cfg.StrOpt('cisco_zoning_vsan', - help='VSAN of the Fabric'), -] - -CONF = cfg.CONF -CONF.register_opts(cisco_zone_opts, group='CISCO_FABRIC_EXAMPLE') - - -def load_fabric_configurations(fabric_names): - fabric_configs = {} - for fabric_name in fabric_names: - config = configuration.Configuration(cisco_zone_opts, fabric_name) - fabric_configs[fabric_name] = config - - return fabric_configs diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py b/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py deleted file mode 100644 index 66daecdf0..000000000 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py +++ /dev/null @@ -1,358 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -import random - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils -from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts -import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant -from cinder.zonemanager import fc_san_lookup_service as fc_service -from cinder.zonemanager import utils as zm_utils - -LOG = logging.getLogger(__name__) - - -class CiscoFCSanLookupService(fc_service.FCSanLookupService): - """The SAN lookup service that talks to Cisco switches. - - Version History: - 1.0.0 - Initial version - - """ - - VERSION = "1.0.0" - - def __init__(self, **kwargs): - """Initializing the client.""" - super(CiscoFCSanLookupService, self).__init__(**kwargs) - self.configuration = kwargs.get('configuration', None) - self.create_configuration() - - self.switch_user = "" - self.switch_port = "" - self.switch_pwd = "" - self.switch_ip = "" - self.sshpool = None - - def create_configuration(self): - """Configuration specific to SAN context values.""" - config = self.configuration - - fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] - LOG.debug('Fabric Names: %s', fabric_names) - - # There can be more than one SAN in the network and we need to - # get credentials for each for SAN context lookup later. - # Cisco Zonesets require VSANs - if fabric_names: - self.fabric_configs = fabric_opts.load_fabric_configurations( - fabric_names) - - def get_device_mapping_from_network(self, - initiator_wwn_list, - target_wwn_list): - """Provides the initiator/target map for available SAN contexts. - - Looks up fcns database of each fc SAN configured to find logged in - devices and returns a map of initiator and target port WWNs for each - fabric. - - :param initiator_wwn_list: List of initiator port WWN - :param target_wwn_list: List of target port WWN - :returns: List -- device wwn map in following format - - .. code-block:: python - - { - : { - 'initiator_port_wwn_list': - ('200000051e55a100', '200000051e55a121'..) - 'target_port_wwn_list': - ('100000051e55a100', '100000051e55a121'..) - } - } - - :raises Exception: when connection to fabric is failed - """ - device_map = {} - formatted_target_list = [] - formatted_initiator_list = [] - fabric_map = {} - fabric_names = self.configuration.fc_fabric_names - - if not fabric_names: - raise exception.InvalidParameterValue( - err=_("Missing Fibre Channel SAN configuration " - "param - fc_fabric_names")) - - fabrics = [x.strip() for x in fabric_names.split(',')] - - LOG.debug("FC Fabric List: %s", fabrics) - if fabrics: - for t in target_wwn_list: - formatted_target_list.append(zm_utils.get_formatted_wwn(t)) - - for i in initiator_wwn_list: - formatted_initiator_list.append(zm_utils.get_formatted_wwn(i)) - - for fabric_name in fabrics: - self.switch_ip = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_address') - self.switch_user = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_user') - self.switch_pwd = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_password') - self.switch_port = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_port') - zoning_vsan = self.fabric_configs[fabric_name].safe_get( - 'cisco_zoning_vsan') - - # Get name server data from fabric and find the targets - # logged in - nsinfo = '' - LOG.debug("show fcns database for vsan %s", zoning_vsan) - nsinfo = self.get_nameserver_info(zoning_vsan) - - LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo) - LOG.debug("Lookup service:initiator list from caller-%s", - formatted_initiator_list) - LOG.debug("Lookup service:target list from caller-%s", - formatted_target_list) - visible_targets = [x for x in nsinfo - if x in formatted_target_list] - visible_initiators = [x for x in nsinfo - if x in formatted_initiator_list] - - if visible_targets: - LOG.debug("Filtered targets is: %s", visible_targets) - # getting rid of the : before returning - for idx, elem in enumerate(visible_targets): - elem = str(elem).replace(':', '') - visible_targets[idx] = elem - else: - LOG.debug("No targets are in the fcns database" - " for vsan %s", zoning_vsan) - - if visible_initiators: - # getting rid of the : before returning ~sk - for idx, elem in enumerate(visible_initiators): - elem = str(elem).replace(':', '') - visible_initiators[idx] = elem - else: - LOG.debug("No initiators are in the fcns database" - " for vsan %s", zoning_vsan) - - fabric_map = {'initiator_port_wwn_list': visible_initiators, - 'target_port_wwn_list': visible_targets - } - device_map[zoning_vsan] = fabric_map - LOG.debug("Device map for SAN context: %s", device_map) - return device_map - - def get_nameserver_info(self, fabric_vsan): - """Get fcns database info from fabric. - - This method will return the connected node port wwn list(local - and remote) for the given switch fabric - """ - cli_output = None - nsinfo_list = [] - try: - cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more']) - cli_output = self._get_switch_info(cmd) - except exception.FCSanLookupServiceException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed collecting show fcns database for fabric") - if cli_output: - nsinfo_list = self._parse_ns_output(cli_output) - - LOG.debug("Connector returning fcns info-%s", nsinfo_list) - return nsinfo_list - - def _get_switch_info(self, cmd_list): - stdout, stderr, sw_data = None, None, None - try: - stdout, stderr = self._run_ssh(cmd_list, True, 1) - LOG.debug("CLI output from ssh - output: %s", stdout) - if (stdout): - sw_data = stdout.splitlines() - return sw_data - except processutils.ProcessExecutionError as e: - msg = _("Error while getting data via ssh: (command=%(cmd)s " - "error=%(err)s).") % {'cmd': cmd_list, - 'err': six.text_type(e)} - LOG.error(msg) - raise exception.CiscoZoningCliException(reason=msg) - - def _parse_ns_output(self, switch_data): - """Parses name server data. - - Parses nameserver raw data and adds the device port wwns to the list - - :returns: list of device port wwn from ns info - """ - nsinfo_list = [] - for line in switch_data: - if not(" N " in line): - continue - linesplit = line.split() - if len(linesplit) > 2: - node_port_wwn = linesplit[2] - nsinfo_list.append(node_port_wwn) - else: - msg = _("Malformed fcns output string: %s") % line - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - return nsinfo_list - - def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): - - command = ' '.join(cmd_list) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - min_size=1, - max_size=5) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - return processutils.ssh_execute( - ssh, - command, - check_exit_code=check_exit_code) - except Exception as e: - msg = _("Exception: %s") % six.text_type(e) - LOG.error(msg) - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Error running SSH command: %s", command) - - def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): - """Execute cli with status update. - - Executes CLI commands where status return is expected. - - cmd_list is a list of commands, where each command is itself - a list of parameters. We use utils.check_ssh_injection to check each - command, but then join then with " ; " to form a single command. - """ - - # Check that each command is secure - for cmd in cmd_list: - utils.check_ssh_injection(cmd) - - # Combine into a single command. - command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - min_size=1, - max_size=5) - stdin, stdout, stderr = None, None, None - LOG.debug("Executing command via ssh: %s", command) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - stdin, stdout, stderr = ssh.exec_command(command) - greenthread.sleep(random.randint(20, 500) / 100.0) - channel = stdout.channel - exit_status = channel.recv_exit_status() - LOG.debug("Exit Status from ssh:%s", exit_status) - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug('Result was %s', exit_status) - if check_exit_code and exit_status != 0: - raise processutils.ProcessExecutionError( - exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=command) - else: - return True - else: - return True - except Exception as e: - msg = _("Exception: %s") % six.text_type(e) - LOG.error(msg) - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - LOG.debug("Handling error case after SSH:%s", last_exception) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception as e: - with excutils.save_and_reraise_exception(): - msg = (_("Error executing command via ssh: %s") % - six.text_type(e)) - LOG.error(msg) - finally: - if stdin: - stdin.flush() - stdin.close() - if stdout: - stdout.close() - if stderr: - stderr.close() - - def cleanup(self): - self.sshpool = None diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py deleted file mode 100644 index 2afecd8b7..000000000 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py +++ /dev/null @@ -1,524 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Script to push the zone configuration to Cisco SAN switches. -""" -import random -import re - -from eventlet import greenthread -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder import ssh_utils -from cinder import utils -import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant - -LOG = logging.getLogger(__name__) - - -class CiscoFCZoneClientCLI(object): - """Cisco FC zone client cli implementation. - - OpenStack Fibre Channel zone client cli connector - to manage FC zoning in Cisco SAN fabrics. - - Version history: - 1.0 - Initial Cisco FC zone client cli - """ - - switch_ip = None - switch_port = '22' - switch_user = 'admin' - switch_pwd = 'none' - - def __init__(self, ipaddress, username, password, port, vsan): - """initializing the client.""" - self.switch_ip = ipaddress - self.switch_port = port - self.switch_user = username - self.switch_pwd = password - self.fabric_vsan = vsan - self.sshpool = None - - def get_active_zone_set(self): - """Return the active zone configuration. - - Return active zoneset from fabric. When none of the configurations - are active then it will return empty map. - - :returns: Map -- active zone set map in the following format - - .. code-block:: python - - { - 'zones': - {'openstack50060b0000c26604201900051ee8e329': - ['50060b0000c26604', '201900051ee8e329'] - }, - 'active_zone_config': 'OpenStack_Cfg' - } - """ - zone_set = {} - zone = {} - zone_member = None - zone_name = None - switch_data = None - zone_set_name = None - try: - switch_data = self._get_switch_info( - [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, - ' | no-more']) - except exception.CiscoZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed getting active zone set " - "from fabric %s", self.switch_ip) - try: - for line in switch_data: - # Split on non-word characters, - line_split = re.split(r'[\s\[\]]+', line) - if ZoneConstant.CFG_ZONESET in line_split: - # zoneset name [name] vsan [vsan] - zone_set_name = \ - line_split[line_split.index(ZoneConstant.CFG_ZONESET) - + 2] - continue - if ZoneConstant.CFG_ZONE in line_split: - # zone name [name] vsan [vsan] - zone_name = \ - line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2] - zone[zone_name] = list() - continue - if ZoneConstant.CFG_ZONE_MEMBER in line_split: - # Examples: - # pwwn c0:50:76:05:15:9f:00:12 - # * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2] - zone_member = \ - line_split[ - line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1] - zone_member_list = zone.get(zone_name) - zone_member_list.append(zone_member) - - zone_set[ZoneConstant.CFG_ZONES] = zone - zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name - except Exception as ex: - # In case of parsing error here, it should be malformed cli output. - msg = _("Malformed zone configuration: (switch=%(switch)s " - "zone_config=%(zone_config)s)." - ) % {'switch': self.switch_ip, - 'zone_config': switch_data} - LOG.error(msg) - exc_msg = _("Exception: %s") % six.text_type(ex) - LOG.error(exc_msg) - raise exception.FCZoneDriverException(reason=msg) - - return zone_set - - def add_zones(self, zones, activate, fabric_vsan, active_zone_set, - zone_status): - """Add zone configuration. - - This method will add the zone configuration passed by user. - - :param zones: Zone names mapped to members and VSANs - Zone members are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1,zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True will activate the zone config. - :param fabric_vsan: - :param active_zone_set: Active zone set dict retrieved from - get_active_zone_set method - :param zone_status: Status of the zone - :raises CiscoZoningCliException: - """ - LOG.debug("Add Zones - Zones passed: %s", zones) - - LOG.debug("Active zone set: %s", active_zone_set) - zone_list = active_zone_set[ZoneConstant.CFG_ZONES] - LOG.debug("zone list: %s", zone_list) - LOG.debug("zone status: %s", zone_status) - - cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] - - zone_cmds = [['conf'], - ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] - - for zone in zones.keys(): - zone_cmds.append(['zone', 'name', zone]) - - for member in zones[zone]: - zone_cmds.append(['member', 'pwwn', member]) - - zone_cmds.append(['end']) - - try: - LOG.debug("Add zones: Config cmd to run: %s", zone_cmds) - self._ssh_execute(zone_cmds, True, 1) - - if activate: - self.activate_zoneset(cfg_name, fabric_vsan, zone_status) - self._cfg_save() - except Exception as e: - - msg = _("Creating and activating zone set failed: " - "(Zone set=%(zoneset)s error=%(err)s)." - ) % {'zoneset': cfg_name, 'err': six.text_type(e)} - LOG.error(msg) - raise exception.CiscoZoningCliException(reason=msg) - - def update_zones(self, zones, activate, fabric_vsan, operation, - active_zone_set, zone_status): - """Update the zone configuration. - - This method will update the zone configuration passed by user. - - :param zones: zone names mapped to members. Zone members - are colon separated but case-insensitive - - .. code-block:: python - - { zonename1:[zonememeber1, zonemember2,...], - zonename2:[zonemember1, zonemember2,...]...} - - e.g: - - { - 'openstack50060b0000c26604201900051ee8e329': - ['50:06:0b:00:00:c2:66:04', - '20:19:00:05:1e:e8:e3:29'] - } - - :param activate: True will activate the zone config. - :param operation: zone add or zone remove - :param fabric_vsan: Virtual San # - :param active_zone_set: Active zone set dict retrieved from - get_active_zone_set method - :param zone_status: Status of the zone - :raises CiscoZoningCliException: - """ - - LOG.debug("Update Zones - Operation: %(op)s - Zones " - "passed: %(zones)s", - {'op': operation, 'zones': zones}) - - cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] - - zone_cmds = [['conf'], - ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] - zone_mod_cmd = [] - if operation == ZoneConstant.ZONE_ADD: - zone_mod_cmd = ['member', 'pwwn'] - elif operation == ZoneConstant.ZONE_REMOVE: - zone_mod_cmd = ['no', 'member', 'pwwn'] - - for zone, zone_members in zones.items(): - zone_cmds.append(['zone', 'name', zone]) - for member in zone_members: - zone_cmds.append(zone_mod_cmd + [member]) - zone_cmds.append(['end']) - - try: - LOG.debug("Update zones: Config cmd to run: %s", zone_cmds) - self._ssh_execute(zone_cmds, True, 1) - - if activate: - self.activate_zoneset(cfg_name, fabric_vsan, zone_status) - self._cfg_save() - except Exception as e: - - msg = (_("Updating and activating zone set failed: " - "(Zone set=%(zoneset)s error=%(err)s).") - % {'zoneset': cfg_name, 'err': six.text_type(e)}) - LOG.error(msg) - raise exception.CiscoZoningCliException(reason=msg) - - def activate_zoneset(self, cfgname, fabric_vsan, zone_status): - """Method to Activate the zone config. Param cfgname - ZonesetName.""" - - LOG.debug("zone status: %s", zone_status) - - cmd_list = [['conf'], - ['zoneset', 'activate', 'name', cfgname, 'vsan', - self.fabric_vsan]] - if zone_status['mode'] == 'enhanced': - cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan]) - - cmd_list.append(['end']) - - return self._ssh_execute(cmd_list, True, 1) - - def get_zoning_status(self): - """Return the zoning mode and session for a zoneset.""" - zone_status = {} - - try: - switch_data = self._get_switch_info( - [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) - except exception.CiscoZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed getting zone status " - "from fabric %s", self.switch_ip) - try: - for line in switch_data: - # Split on non-word characters, - line_split = re.split(r'[\s\[\]]+', line) - if 'mode:' in line_split: - # mode: - zone_status['mode'] = line_split[line_split.index('mode:') - + 1] - continue - if 'session:' in line_split: - # session: - zone_status['session'] = \ - line_split[line_split.index('session:') + 1] - continue - except Exception as ex: - # In case of parsing error here, it should be malformed cli output. - msg = _("Malformed zone status: (switch=%(switch)s " - "zone_config=%(zone_config)s)." - ) % {'switch': self.switch_ip, - 'zone_status': switch_data} - LOG.error(msg) - exc_msg = _("Exception: %s") % six.text_type(ex) - LOG.error(exc_msg) - raise exception.FCZoneDriverException(reason=msg) - - return zone_status - - def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set, - zone_status): - """Delete zones from fabric. - - Method to delete the active zone config zones - - params zone_names: zoneNames separated by semicolon - params activate: True/False - """ - - LOG.debug("zone_names %s", zone_names) - active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] - - cmds = [['conf'], - ['zoneset', 'name', active_zoneset_name, 'vsan', - fabric_vsan]] - - try: - for zone in set(zone_names.split(';')): - cmds.append(['no', 'zone', 'name', zone]) - - cmds.append(['end']) - - LOG.debug("Delete zones: Config cmd to run: %s", cmds) - self._ssh_execute(cmds, True, 1) - - if activate: - self.activate_zoneset(active_zoneset_name, fabric_vsan, - zone_status) - self._cfg_save() - - except Exception as e: - msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." - ) % {'cmd': cmds, 'err': six.text_type(e)} - LOG.error(msg) - raise exception.CiscoZoningCliException(reason=msg) - - def get_nameserver_info(self): - """Get name server data from fabric. - - This method will return the connected node port wwn list(local - and remote) for the given switch fabric - - show fcns database - """ - cli_output = None - return_list = [] - try: - cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW, - self.fabric_vsan]) - except exception.CiscoZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.error("Failed collecting fcns database " - "info for fabric %s", self.switch_ip) - - if (cli_output): - return_list = self._parse_ns_output(cli_output) - - LOG.info("Connector returning fcnsinfo-%s", return_list) - - return return_list - - @utils.retry(processutils.ProcessExecutionError, retries=5) - def _cfg_save(self): - cmd = ['copy', 'running-config', 'startup-config'] - self._run_ssh(cmd, True) - - def _get_switch_info(self, cmd_list): - stdout, stderr, sw_data = None, None, None - try: - stdout, stderr = self._run_ssh(cmd_list, True) - LOG.debug("CLI output from ssh - output: %s", stdout) - if (stdout): - sw_data = stdout.splitlines() - return sw_data - except processutils.ProcessExecutionError as e: - msg = _("Error while getting data via ssh: (command=%(cmd)s " - "error=%(err)s).") % {'cmd': cmd_list, - 'err': six.text_type(e)} - LOG.error(msg) - raise exception.CiscoZoningCliException(reason=msg) - - def _parse_ns_output(self, switch_data): - """Parses name server data. - - Parses nameserver raw data and adds the device port wwns to the list - - :returns: List -- list of device port wwn from ns info - """ - return_list = [] - for line in switch_data: - if not(" N " in line): - continue - linesplit = line.split() - if len(linesplit) > 2: - node_port_wwn = linesplit[2] - return_list.append(node_port_wwn) - else: - msg = _("Malformed show fcns database string: %s") % line - LOG.error(msg) - raise exception.InvalidParameterValue(err=msg) - return return_list - - def _run_ssh(self, cmd_list, check_exit_code=True): - - command = ' '.join(cmd_list) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - min_size=1, - max_size=5) - try: - with self.sshpool.item() as ssh: - return processutils.ssh_execute( - ssh, - command, - check_exit_code=check_exit_code) - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.warning("Error running SSH command: %s", command) - - def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): - """Execute cli with status update. - - Executes CLI commands where status return is expected. - - cmd_list is a list of commands, where each command is itself - a list of parameters. We use utils.check_ssh_injection to check each - command, but then join then with " ; " to form a single command. - """ - - # Check that each command is secure - for cmd in cmd_list: - utils.check_ssh_injection(cmd) - - # Combine into a single command. - command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) - - if not self.sshpool: - self.sshpool = ssh_utils.SSHPool(self.switch_ip, - self.switch_port, - None, - self.switch_user, - self.switch_pwd, - min_size=1, - max_size=5) - stdin, stdout, stderr = None, None, None - LOG.debug("Executing command via ssh: %s", command) - last_exception = None - try: - with self.sshpool.item() as ssh: - while attempts > 0: - attempts -= 1 - try: - stdin, stdout, stderr = ssh.exec_command(command) - channel = stdout.channel - exit_status = channel.recv_exit_status() - LOG.debug("Exit Status from ssh: %s", exit_status) - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug('Result was %s', exit_status) - if check_exit_code and exit_status != 0: - raise processutils.ProcessExecutionError( - exit_code=exit_status, - stdout=stdout, - stderr=stderr, - cmd=command) - else: - return True - else: - return True - except Exception as e: - LOG.exception('Error executing SSH command.') - last_exception = e - greenthread.sleep(random.randint(20, 500) / 100.0) - LOG.debug("Handling error case after SSH: %s", last_exception) - try: - raise processutils.ProcessExecutionError( - exit_code=last_exception.exit_code, - stdout=last_exception.stdout, - stderr=last_exception.stderr, - cmd=last_exception.cmd) - except AttributeError: - raise processutils.ProcessExecutionError( - exit_code=-1, - stdout="", - stderr="Error running SSH command", - cmd=command) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception("Error executing command via ssh.") - finally: - if stdin: - stdin.flush() - stdin.close() - if stdout: - stdout.close() - if stderr: - stderr.close() - - def cleanup(self): - self.sshpool = None diff --git a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py b/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py deleted file mode 100644 index 993cbe900..000000000 --- a/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py +++ /dev/null @@ -1,550 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Cisco Zone Driver is responsible to manage access control using FC zoning -for Cisco FC fabrics. -This is a concrete implementation of FCZoneDriver interface implementing -add_connection and delete_connection interfaces. - -**Related Flags** - -:zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True -:zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' -""" - -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import importutils -import six -import string - -from cinder import exception -from cinder.i18n import _ -from cinder import interface -from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts -from cinder.zonemanager.drivers.cisco import fc_zone_constants as ZoneConstant -from cinder.zonemanager.drivers import driver_utils -from cinder.zonemanager.drivers import fc_zone_driver -from cinder.zonemanager import utils as zm_utils - -LOG = logging.getLogger(__name__) - -SUPPORTED_CHARS = string.ascii_letters + string.digits + '$' + '-' + '^' + '_' -cisco_opts = [ - cfg.StrOpt('cisco_sb_connector', - default='cinder.zonemanager.drivers.cisco' - '.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI', - help='Southbound connector for zoning operation'), -] - -CONF = cfg.CONF -CONF.register_opts(cisco_opts, group='fc-zone-manager') - - -@interface.fczmdriver -class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): - """Cisco FC zone driver implementation. - - OpenStack Fibre Channel zone driver to manage FC zoning in - Cisco SAN fabrics. - - Version history: - 1.0 - Initial Cisco FC zone driver - 1.1 - Added friendly zone name support - """ - - VERSION = "1.1.0" - - # ThirdPartySystems wiki name - CI_WIKI_NAME = "Cisco_ZM_CI" - - def __init__(self, **kwargs): - super(CiscoFCZoneDriver, self).__init__(**kwargs) - self.configuration = kwargs.get('configuration', None) - if self.configuration: - self.configuration.append_config_values(cisco_opts) - - # Adding a hack to handle parameters from super classes - # in case configured with multi backends. - fabric_names = self.configuration.safe_get('fc_fabric_names') - activate = self.configuration.safe_get('cisco_zone_activate') - prefix = self.configuration.safe_get('cisco_zone_name_prefix') - base_san_opts = [] - if not fabric_names: - base_san_opts.append( - cfg.StrOpt('fc_fabric_names', - help='Comma separated list of fibre channel ' - 'fabric names. This list of names is used to' - ' retrieve other SAN credentials for connecting' - ' to each SAN fabric' - )) - if not activate: - base_san_opts.append( - cfg.BoolOpt('cisco_zone_activate', - default=True, - help='Indicates whether zone should ' - 'be activated or not')) - if not prefix: - base_san_opts.append( - cfg.StrOpt('cisco_zone_name_prefix', - default="openstack", - help="A prefix to be used when naming zone")) - if len(base_san_opts) > 0: - CONF.register_opts(base_san_opts) - self.configuration.append_config_values(base_san_opts) - fabric_names = [x.strip() for x in self. - configuration.fc_fabric_names.split(',')] - - # There can be more than one SAN in the network and we need to - # get credentials for each SAN. - if fabric_names: - self.fabric_configs = fabric_opts.load_fabric_configurations( - fabric_names) - - @lockutils.synchronized('cisco', 'fcfabric-', True) - def add_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Concrete implementation of add_connection. - - Based on zoning policy and state of each I-T pair, list of zone - members are created and pushed to the fabric to add zones. The - new zones created or zones updated are activated based on isActivate - flag set in cinder.conf returned by volume driver after attach - operation. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - """ - - LOG.debug("Add connection for Fabric: %s", fabric) - LOG.info("CiscoFCZoneDriver - Add connection " - "for I-T map: %s", initiator_target_map) - fabric_ip = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_address') - fabric_user = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_user') - fabric_pwd = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_password') - fabric_port = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_port') - zoning_policy = self.configuration.zoning_policy - zoning_policy_fab = self.fabric_configs[fabric].safe_get( - 'cisco_zoning_policy') - if zoning_policy_fab: - zoning_policy = zoning_policy_fab - - zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - - LOG.info("Zoning policy for Fabric %s", zoning_policy) - - statusmap_from_fabric = self.get_zoning_status( - fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) - - if statusmap_from_fabric.get('session') == 'none': - - cfgmap_from_fabric = self.get_active_zone_set( - fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) - zone_names = [] - if cfgmap_from_fabric.get('zones'): - zone_names = cfgmap_from_fabric['zones'].keys() - # based on zoning policy, create zone member list and - # push changes to fabric. - for initiator_key in initiator_target_map.keys(): - zone_map = {} - zone_update_map = {} - initiator = initiator_key.lower() - t_list = initiator_target_map[initiator_key] - if zoning_policy == 'initiator-target': - for t in t_list: - target = t.lower() - zone_members = [ - zm_utils.get_formatted_wwn(initiator), - zm_utils.get_formatted_wwn(target)] - zone_name = ( - driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - self.configuration.cisco_zone_name_prefix, - SUPPORTED_CHARS)) - if (len(cfgmap_from_fabric) == 0 or ( - zone_name not in zone_names)): - zone_map[zone_name] = zone_members - else: - # This is I-T zoning, skip if zone exists. - LOG.info("Zone exists in I-T mode. " - "Skipping zone creation %s", - zone_name) - elif zoning_policy == 'initiator': - zone_members = [ - zm_utils.get_formatted_wwn(initiator)] - for t in t_list: - target = t.lower() - zone_members.append( - zm_utils.get_formatted_wwn(target)) - zone_name = ( - driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - self.configuration.cisco_zone_name_prefix, - SUPPORTED_CHARS)) - - # If zone exists, then perform a update_zone and add - # new members into existing zone. - if zone_name and (zone_name in zone_names): - zone_members = filter( - lambda x: x not in - cfgmap_from_fabric['zones'][zone_name], - zone_members) - if zone_members: - zone_update_map[zone_name] = zone_members - else: - zone_map[zone_name] = zone_members - else: - msg = _("Zoning Policy: %s, not" - " recognized") % zoning_policy - LOG.error(msg) - raise exception.FCZoneDriverException(msg) - - LOG.info("Zone map to add: %(zone_map)s", - {'zone_map': zone_map}) - LOG.info("Zone map to update add: %(zone_update_map)s", - {'zone_update_map': zone_update_map}) - if zone_map or zone_update_map: - conn = None - try: - conn = importutils.import_object( - self.configuration.cisco_sb_connector, - ipaddress=fabric_ip, - username=fabric_user, - password=fabric_pwd, - port=fabric_port, - vsan=zoning_vsan) - if zone_map: - conn.add_zones( - zone_map, - self.configuration.cisco_zone_activate, - zoning_vsan, cfgmap_from_fabric, - statusmap_from_fabric) - if zone_update_map: - conn.update_zones( - zone_update_map, - self.configuration.cisco_zone_activate, - zoning_vsan, ZoneConstant.ZONE_ADD, - cfgmap_from_fabric, - statusmap_from_fabric) - conn.cleanup() - except exception.CiscoZoningCliException as cisco_ex: - msg = _("Exception: %s") % six.text_type(cisco_ex) - raise exception.FCZoneDriverException(msg) - except Exception: - msg = _("Failed to add zoning configuration.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - LOG.debug("Zones added successfully: %s", zone_map) - else: - LOG.debug("Zones already exist - Initiator Target Map: %s", - initiator_target_map) - else: - LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) - - @lockutils.synchronized('cisco', 'fcfabric-', True) - def delete_connection(self, fabric, initiator_target_map, host_name=None, - storage_system=None): - """Concrete implementation of delete_connection. - - Based on zoning policy and state of each I-T pair, list of zones - are created for deletion. The zones are either updated deleted based - on the policy and attach/detach state of each I-T pair. - - :param fabric: Fabric name from cinder.conf file - :param initiator_target_map: Mapping of initiator to list of targets - """ - LOG.debug("Delete connection for fabric: %s", fabric) - LOG.info("CiscoFCZoneDriver - Delete connection for I-T map: %s", - initiator_target_map) - fabric_ip = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_address') - fabric_user = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_user') - fabric_pwd = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_password') - fabric_port = self.fabric_configs[fabric].safe_get( - 'cisco_fc_fabric_port') - zoning_policy = self.configuration.zoning_policy - zoning_policy_fab = self.fabric_configs[fabric].safe_get( - 'cisco_zoning_policy') - - if zoning_policy_fab: - zoning_policy = zoning_policy_fab - - zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') - - LOG.info("Zoning policy for fabric %s", zoning_policy) - - statusmap_from_fabric = self.get_zoning_status( - fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) - - if statusmap_from_fabric.get('session') == 'none': - cfgmap_from_fabric = self.get_active_zone_set( - fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) - - zone_names = [] - if cfgmap_from_fabric.get('zones'): - zone_names = cfgmap_from_fabric['zones'].keys() - - # Based on zoning policy, get zone member list and push - # changes to fabric. This operation could result in an update - # for zone config with new member list or deleting zones from - # active cfg. - - LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) - for initiator_key in initiator_target_map.keys(): - initiator = initiator_key.lower() - formatted_initiator = zm_utils.get_formatted_wwn(initiator) - zone_update_map = {} - zones_to_delete = [] - t_list = initiator_target_map[initiator_key] - if zoning_policy == 'initiator-target': - # In this case, zone needs to be deleted. - for t in t_list: - target = t.lower() - zone_name = ( - driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - self.configuration.cisco_zone_name_prefix, - SUPPORTED_CHARS)) - LOG.debug("Zone name to del: %s", zone_name) - if (len(zone_names) > 0 and (zone_name in zone_names)): - # delete zone. - LOG.debug("Added zone to delete to list: %s", - zone_name) - zones_to_delete.append(zone_name) - - elif zoning_policy == 'initiator': - zone_members = [formatted_initiator] - for t in t_list: - target = t.lower() - zone_members.append( - zm_utils.get_formatted_wwn(target)) - - zone_name = driver_utils.get_friendly_zone_name( - zoning_policy, - initiator, - target, - host_name, - storage_system, - self.configuration.cisco_zone_name_prefix, - SUPPORTED_CHARS) - # Check if there are zone members leftover after removal - if (zone_names and (zone_name in zone_names)): - filtered_members = filter( - lambda x: x not in zone_members, - cfgmap_from_fabric['zones'][zone_name]) - - # The assumption here is that initiator is always - # there in the zone as it is 'initiator' policy. - # If filtered list is empty, we remove that zone. - # If there are other members leftover, then perform - # update_zone to remove targets - LOG.debug("Zone delete - I mode: filtered targets: %s", - filtered_members) - if filtered_members: - remove_members = filter( - lambda x: x in - cfgmap_from_fabric['zones'][zone_name], - zone_members) - if remove_members: - # Do not want to remove the initiator - remove_members.remove(formatted_initiator) - LOG.debug("Zone members to remove: %s", - remove_members) - zone_update_map[zone_name] = remove_members - LOG.debug("Filtered zone Map to update: %s", - zone_update_map) - else: - zones_to_delete.append(zone_name) - else: - LOG.info("Zoning Policy: %s, not recognized", - zoning_policy) - LOG.debug("Zone map to remove update: %s", zone_update_map) - LOG.debug("Final Zone list to delete: %s", zones_to_delete) - conn = None - try: - conn = importutils.import_object( - self.configuration.cisco_sb_connector, - ipaddress=fabric_ip, - username=fabric_user, - password=fabric_pwd, - port=fabric_port, - vsan=zoning_vsan) - # Update zone membership. - if zone_update_map: - conn.update_zones( - zone_update_map, - self.configuration.cisco_zone_activate, - zoning_vsan, ZoneConstant.ZONE_REMOVE, - cfgmap_from_fabric, statusmap_from_fabric) - # Delete zones ~sk. - if zones_to_delete: - zone_name_string = '' - num_zones = len(zones_to_delete) - for i in range(0, num_zones): - if i == 0: - zone_name_string = ('%s%s' % ( - zone_name_string, - zones_to_delete[i])) - else: - zone_name_string = ('%s%s%s' % ( - zone_name_string, ';', - zones_to_delete[i])) - - conn.delete_zones(zone_name_string, - self.configuration. - cisco_zone_activate, - zoning_vsan, cfgmap_from_fabric, - statusmap_from_fabric) - conn.cleanup() - except Exception: - msg = _("Failed to update or delete zoning configuration") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - LOG.debug("Zones deleted successfully: %s", zone_update_map) - else: - LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) - - def get_san_context(self, target_wwn_list): - """Lookup SAN context for visible end devices. - - Look up each SAN configured and return a map of SAN (fabric IP) to - list of target WWNs visible to the fabric. - """ - formatted_target_list = [] - fabric_map = {} - fabrics = [x.strip() for x in self. - configuration.fc_fabric_names.split(',')] - LOG.debug("Fabric List: %s", fabrics) - LOG.debug("Target wwn List: %s", target_wwn_list) - if len(fabrics) > 0: - for t in target_wwn_list: - formatted_target_list.append( - zm_utils.get_formatted_wwn(t.lower())) - LOG.debug("Formatted Target wwn List: %s", formatted_target_list) - for fabric_name in fabrics: - fabric_ip = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_address') - fabric_user = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_user') - fabric_pwd = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_password') - fabric_port = self.fabric_configs[fabric_name].safe_get( - 'cisco_fc_fabric_port') - zoning_vsan = self.fabric_configs[fabric_name].safe_get( - 'cisco_zoning_vsan') - - # Get name server data from fabric and get the targets - # logged in. - nsinfo = None - try: - conn = importutils.import_object( - self.configuration.cisco_sb_connector, - ipaddress=fabric_ip, - username=fabric_user, - password=fabric_pwd, port=fabric_port, - vsan=zoning_vsan) - nsinfo = conn.get_nameserver_info() - LOG.debug("show fcns database info from fabric: %s", - nsinfo) - conn.cleanup() - except exception.CiscoZoningCliException: - with excutils.save_and_reraise_exception(): - LOG.exception("Error getting show fcns database info.") - except Exception: - msg = _("Failed to get show fcns database info.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - visible_targets = filter( - lambda x: x in formatted_target_list, nsinfo) - - if visible_targets: - LOG.info("Filtered targets for SAN is: %s", - {fabric_name: visible_targets}) - # getting rid of the ':' before returning - for idx, elem in enumerate(visible_targets): - visible_targets[idx] = six.text_type( - visible_targets[idx]).replace(':', '') - fabric_map[fabric_name] = visible_targets - else: - LOG.debug("No targets are in the fcns info for SAN %s", - fabric_name) - LOG.debug("Return SAN context output: %s", fabric_map) - return fabric_map - - def get_active_zone_set(self, fabric_ip, - fabric_user, fabric_pwd, fabric_port, - zoning_vsan): - """Gets active zoneset config for vsan.""" - cfgmap = {} - conn = None - try: - LOG.debug("Southbound connector: %s", - self.configuration.cisco_sb_connector) - conn = importutils.import_object( - self.configuration.cisco_sb_connector, - ipaddress=fabric_ip, username=fabric_user, - password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) - cfgmap = conn.get_active_zone_set() - conn.cleanup() - except Exception: - msg = _("Failed to access active zoning configuration.") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - LOG.debug("Active zone set from fabric: %s", cfgmap) - return cfgmap - - def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd, - fabric_port, zoning_vsan): - """Gets zoneset status and mode.""" - statusmap = {} - conn = None - try: - LOG.debug("Southbound connector: %s", - self.configuration.cisco_sb_connector) - conn = importutils.import_object( - self.configuration.cisco_sb_connector, - ipaddress=fabric_ip, username=fabric_user, - password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) - statusmap = conn.get_zoning_status() - conn.cleanup() - except Exception: - msg = _("Failed to access zoneset status:%s") - LOG.exception(msg) - raise exception.FCZoneDriverException(msg) - LOG.debug("Zoneset status from fabric: %s", statusmap) - return statusmap diff --git a/cinder/zonemanager/drivers/cisco/fc_zone_constants.py b/cinder/zonemanager/drivers/cisco/fc_zone_constants.py deleted file mode 100644 index 6182f80ed..000000000 --- a/cinder/zonemanager/drivers/cisco/fc_zone_constants.py +++ /dev/null @@ -1,33 +0,0 @@ -# (c) Copyright 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -""" -Common constants used by Cisco FC Zone Driver. -""" -ACTIVE_ZONE_CONFIG = 'active_zone_config' -CFG_ZONESET = 'zoneset' -CFG_ZONE = 'zone' -CFG_ZONE_MEMBER = 'pwwn' -CFG_ZONES = 'zones' -ZONE_ADD = 'zoneadd' -ZONE_REMOVE = 'zoneremove' -""" -CLI Commands for FC zoning operations. -""" -GET_ACTIVE_ZONE_CFG = 'show zoneset active vsan ' -FCNS_SHOW = 'show fcns database vsan ' -GET_ZONE_STATUS = 'show zone status vsan ' diff --git a/cinder/zonemanager/drivers/driver_utils.py b/cinder/zonemanager/drivers/driver_utils.py deleted file mode 100644 index 2927ed4b5..000000000 --- a/cinder/zonemanager/drivers/driver_utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import re - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -def get_friendly_zone_name(zoning_policy, initiator, target, - host_name, storage_system, zone_name_prefix, - supported_chars): - """Utility function implementation of _get_friendly_zone_name. - - Get friendly zone name is used to form the zone name - based on the details provided by the caller - - :param zoning_policy: determines the zoning policy is either - initiator-target or initiator - :param initiator: initiator WWN - :param target: target WWN - :param host_name: Host name returned from Volume Driver - :param storage_system: Storage name returned from Volume Driver - :param zone_name_prefix: user defined zone prefix configured - in cinder.conf - :param supported_chars: Supported character set of FC switch vendor. - Example: `abc123_-$`. These are defined in - the FC zone drivers. - """ - if host_name is None: - host_name = '' - if storage_system is None: - storage_system = '' - if zoning_policy == 'initiator-target': - host_name = host_name[:14] - storage_system = storage_system[:14] - if len(host_name) > 0 and len(storage_system) > 0: - zone_name = (host_name + "_" - + initiator.replace(':', '') + "_" - + storage_system + "_" - + target.replace(':', '')) - else: - zone_name = (zone_name_prefix - + initiator.replace(':', '') - + target.replace(':', '')) - LOG.info("Zone name created using prefix because either " - "host name or storage system is none.") - else: - host_name = host_name[:47] - if len(host_name) > 0: - zone_name = (host_name + "_" - + initiator.replace(':', '')) - else: - zone_name = (zone_name_prefix - + initiator.replace(':', '')) - LOG.info("Zone name created using prefix because host " - "name is none.") - - LOG.info("Friendly zone name after forming: %(zonename)s", - {'zonename': zone_name}) - zone_name = re.sub('[^%s]' % supported_chars, '', zone_name) - return zone_name diff --git a/cinder/zonemanager/drivers/fc_zone_driver.py b/cinder/zonemanager/drivers/fc_zone_driver.py deleted file mode 100644 index db8a8841d..000000000 --- a/cinder/zonemanager/drivers/fc_zone_driver.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Base Zone Driver is responsible to manage access control using FC zoning -Vendor specific implementations should extend this class to provide -concrete implementation for add_connection and delete_connection -interfaces. - -**Related Flags** - -:zoning_policy: Used by: class: 'FCZoneDriver'. Defaults to 'none' -:zone_driver: Used by: class: 'FCZoneDriver'. Defaults to 'none' - -""" - -from oslo_log import log as logging - -from cinder.interface import fczm_driver -from cinder.zonemanager import fc_common - -LOG = logging.getLogger(__name__) - - -class FCZoneDriver( - fc_common.FCCommon, fczm_driver.FibreChannelZoneManagerDriver): - """Interface to manage Connection control during attach/detach.""" - - # If a driver hasn't maintained their CI system, this will get set - # to False, which prevents the driver from starting. - # Add enable_unsupported_driver = True in cinder.conf to get the - # unsupported driver started. - SUPPORTED = True - - def __init__(self, **kwargs): - super(FCZoneDriver, self).__init__(**kwargs) - LOG.debug("Initializing FCZoneDriver") - - @property - def supported(self): - return self.SUPPORTED diff --git a/cinder/zonemanager/fc_common.py b/cinder/zonemanager/fc_common.py deleted file mode 100644 index 61b415da2..000000000 --- a/cinder/zonemanager/fc_common.py +++ /dev/null @@ -1,29 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -class FCCommon(object): - """Common interface for FC operations.""" - - VERSION = "1.0" - - def __init__(self, **kwargs): - pass - - def get_version(self): - return self.VERSION diff --git a/cinder/zonemanager/fc_san_lookup_service.py b/cinder/zonemanager/fc_san_lookup_service.py deleted file mode 100644 index d77a8f0c2..000000000 --- a/cinder/zonemanager/fc_san_lookup_service.py +++ /dev/null @@ -1,97 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Base Lookup Service for name server lookup to find the initiator to target port -mapping for available SAN contexts. -Vendor specific lookup classes are expected to implement the interfaces -defined in this class. - -""" - -from oslo_log import log as logging -from oslo_utils import importutils - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration as config -from cinder.zonemanager import fc_common -from cinder.zonemanager import fc_zone_manager - - -LOG = logging.getLogger(__name__) - - -class FCSanLookupService(fc_common.FCCommon): - """Base Lookup Service. - - Base Lookup Service for name server lookup to find the initiator to - target port mapping for available SAN contexts. - - """ - - lookup_service = None - - def __init__(self, **kwargs): - super(FCSanLookupService, self).__init__(**kwargs) - - opts = fc_zone_manager.zone_manager_opts - self.configuration = config.Configuration(opts, 'fc-zone-manager') - - def get_device_mapping_from_network(self, initiator_list, target_list): - """Get device mapping from FC network. - - Gets a filtered list of initiator ports and target ports for each SAN - available. - :param initiator_list: list of initiator port WWN - :param target_list: list of target port WWN - :returns: device wwn map in following format - - .. code-block:: python - - { - : { - 'initiator_port_wwn_list': - ('200000051E55A100', '200000051E55A121'..) - 'target_port_wwn_list': - ('100000051E55A100', '100000051E55A121'..) - } - } - - :raises Exception: when a lookup service implementation is not - specified in cinder.conf:fc_san_lookup_service - """ - # Initialize vendor specific implementation of FCZoneDriver - if (self.configuration.fc_san_lookup_service): - lookup_service = self.configuration.fc_san_lookup_service - LOG.debug("Lookup service to invoke: " - "%s", lookup_service) - self.lookup_service = importutils.import_object( - lookup_service, configuration=self.configuration) - else: - msg = _("Lookup service not configured. Config option for " - "fc_san_lookup_service needs to specify a concrete " - "implementation of the lookup service.") - LOG.error(msg) - raise exception.FCSanLookupServiceException(msg) - try: - device_map = self.lookup_service.get_device_mapping_from_network( - initiator_list, target_list) - except Exception as e: - LOG.exception('Unable to get device mapping from network.') - raise exception.FCSanLookupServiceException(e) - return device_map diff --git a/cinder/zonemanager/fc_zone_manager.py b/cinder/zonemanager/fc_zone_manager.py deleted file mode 100644 index e161ec9ac..000000000 --- a/cinder/zonemanager/fc_zone_manager.py +++ /dev/null @@ -1,367 +0,0 @@ -# (c) Copyright 2014 Brocade Communications Systems Inc. -# All Rights Reserved. -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -ZoneManager is responsible to manage access control using FC zoning -when zoning mode is set as 'fabric'. -ZoneManager provides interfaces to add connection and remove connection -for given initiator and target list associated with a FC volume attach and -detach operation. - -**Related Flags** - -:zone_driver: Used by:class:`ZoneManager`. - Defaults to - `cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver` -:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none' - -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.volume import configuration as config -from cinder.zonemanager import fc_common -import cinder.zonemanager.fczm_constants as zone_constant - - -LOG = logging.getLogger(__name__) - -zone_manager_opts = [ - cfg.StrOpt('zone_driver', - default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' - '.BrcdFCZoneDriver', - help='FC Zone Driver responsible for zone management'), - cfg.StrOpt('zoning_policy', - default='initiator-target', - help='Zoning policy configured by user; valid values include ' - '"initiator-target" or "initiator"'), - cfg.StrOpt('fc_fabric_names', - help='Comma separated list of Fibre Channel fabric names.' - ' This list of names is used to retrieve other SAN credentials' - ' for connecting to each SAN fabric'), - cfg.StrOpt('fc_san_lookup_service', - default='cinder.zonemanager.drivers.brocade' - '.brcd_fc_san_lookup_service.BrcdFCSanLookupService', - help='FC SAN Lookup Service'), - cfg.BoolOpt('enable_unsupported_driver', - default=False, - help="Set this to True when you want to allow an unsupported " - "zone manager driver to start. Drivers that haven't " - "maintained a working CI system and testing are marked " - "as unsupported until CI is working again. This also " - "marks a driver as deprecated and may be removed in the " - "next release."), - -] - -CONF = cfg.CONF -CONF.register_opts(zone_manager_opts, group='fc-zone-manager') - - -class ZoneManager(fc_common.FCCommon): - - """Manages Connection control during attach/detach. - - Version History: - 1.0 - Initial version - 1.0.1 - Added __new__ for singleton - 1.0.2 - Added friendly zone name - - """ - - VERSION = "1.0.2" - driver = None - _initialized = False - fabric_names = [] - - def __new__(class_, *args, **kwargs): - if not hasattr(class_, "_instance"): - class_._instance = object.__new__(class_) - return class_._instance - - def __init__(self, **kwargs): - """Load the driver from the one specified in args, or from flags.""" - super(ZoneManager, self).__init__(**kwargs) - - self.configuration = config.Configuration(zone_manager_opts, - 'fc-zone-manager') - self.set_initialized(False) - self._build_driver() - - def _build_driver(self): - zone_driver = self.configuration.zone_driver - LOG.debug("Zone driver from config: %(driver)s", - {'driver': zone_driver}) - - zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') - # Initialize vendor specific implementation of FCZoneDriver - self.driver = importutils.import_object( - zone_driver, - configuration=zm_config) - - if not self.driver.supported: - self._log_unsupported_driver_warning() - - if not self.configuration.enable_unsupported_driver: - LOG.error("Unsupported drivers are disabled." - " You can re-enable by adding " - "enable_unsupported_driver=True to the " - "fc-zone-manager section in cinder.conf", - resource={'type': 'zone_manager', - 'id': self.__class__.__name__}) - return - - self.set_initialized(True) - - @property - def initialized(self): - return self._initialized - - def set_initialized(self, value=True): - self._initialized = value - - def _require_initialized(self): - """Verifies that the zone manager has been properly initialized.""" - if not self.initialized: - LOG.error("Fibre Channel Zone Manager is not initialized.") - raise exception.ZoneManagerNotInitialized() - else: - self._log_unsupported_driver_warning() - - def _log_unsupported_driver_warning(self): - """Annoy the log about unsupported fczm drivers.""" - if not self.driver.supported: - LOG.warning("Zone Manager driver (%(driver_name)s %(version)s)" - " is currently unsupported and may be removed in " - "the next release of OpenStack. Use at your own " - "risk.", - {'driver_name': self.driver.__class__.__name__, - 'version': self.driver.get_version()}, - resource={'type': 'zone_manager', - 'id': self.driver.__class__.__name__}) - - def get_zoning_state_ref_count(self, initiator_wwn, target_wwn): - """Zone management state check. - - Performs state check for given I-T pair to return the current count of - active attach for the pair. - """ - # TODO(sk): ref count state management - count = 0 - # check the state for I-T pair - return count - - def add_connection(self, conn_info): - """Add connection control. - - Adds connection control for the given initiator target map. - initiator_target_map - each initiator WWN mapped to a list of one - or more target WWN: - - .. code-block:: python - - e.g.: - { - '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] - } - """ - connected_fabric = None - host_name = None - storage_system = None - - try: - # Make sure the driver is loaded and we are initialized - self._log_unsupported_driver_warning() - self._require_initialized() - except exception.ZoneManagerNotInitialized: - LOG.error("Cannot add Fibre Channel Zone because the " - "Zone Manager is not initialized properly.", - resource={'type': 'zone_manager', - 'id': self.__class__.__name__}) - return - - try: - initiator_target_map = ( - conn_info[zone_constant.DATA][zone_constant.IT_MAP]) - - if zone_constant.HOST in conn_info[zone_constant.DATA]: - host_name = conn_info[ - zone_constant.DATA][ - zone_constant.HOST].replace(" ", "_") - - if zone_constant.STORAGE in conn_info[zone_constant.DATA]: - storage_system = ( - conn_info[ - zone_constant.DATA][ - zone_constant.STORAGE].replace(" ", "_")) - - for initiator in initiator_target_map.keys(): - target_list = initiator_target_map[initiator] - LOG.debug("Target list : %(targets)s", - {'targets': target_list}) - - # get SAN context for the target list - fabric_map = self.get_san_context(target_list) - LOG.debug("Fabric map after context lookup: %(fabricmap)s", - {'fabricmap': fabric_map}) - # iterate over each SAN and apply connection control - for fabric in fabric_map.keys(): - connected_fabric = fabric - t_list = fabric_map[fabric] - # get valid I-T map to add connection control - i_t_map = {initiator: t_list} - valid_i_t_map = self.get_valid_initiator_target_map( - i_t_map, True) - LOG.info("Final filtered map for fabric: %(i_t_map)s", - {'i_t_map': valid_i_t_map}) - - # Call driver to add connection control - self.driver.add_connection(fabric, valid_i_t_map, - host_name, storage_system) - - LOG.info("Add connection: finished iterating " - "over all target list") - except Exception as e: - msg = _("Failed adding connection for fabric=%(fabric)s: " - "Error: %(err)s") % {'fabric': connected_fabric, - 'err': six.text_type(e)} - LOG.error(msg) - raise exception.ZoneManagerException(reason=msg) - - def delete_connection(self, conn_info): - """Delete connection. - - Updates/deletes connection control for the given initiator target map. - initiator_target_map - each initiator WWN mapped to a list of one - or more target WWN: - - .. code-block:: python - - e.g.: - { - '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] - } - """ - connected_fabric = None - host_name = None - storage_system = None - - try: - # Make sure the driver is loaded and we are initialized - self._log_unsupported_driver_warning() - self._require_initialized() - except exception.ZoneManagerNotInitialized: - LOG.error("Cannot delete fibre channel zone because the " - "Zone Manager is not initialized properly.", - resource={'type': 'zone_manager', - 'id': self.__class__.__name__}) - return - - try: - initiator_target_map = ( - conn_info[zone_constant.DATA][zone_constant.IT_MAP]) - - if zone_constant.HOST in conn_info[zone_constant.DATA]: - host_name = conn_info[zone_constant.DATA][zone_constant.HOST] - - if zone_constant.STORAGE in conn_info[zone_constant.DATA]: - storage_system = ( - conn_info[ - zone_constant.DATA][ - zone_constant.STORAGE].replace(" ", "_")) - - for initiator in initiator_target_map.keys(): - target_list = initiator_target_map[initiator] - LOG.info("Delete connection target list: %(targets)s", - {'targets': target_list}) - - # get SAN context for the target list - fabric_map = self.get_san_context(target_list) - LOG.debug("Delete connection fabric map from SAN " - "context: %(fabricmap)s", {'fabricmap': fabric_map}) - - # iterate over each SAN and apply connection control - for fabric in fabric_map.keys(): - connected_fabric = fabric - t_list = fabric_map[fabric] - # get valid I-T map to add connection control - i_t_map = {initiator: t_list} - valid_i_t_map = self.get_valid_initiator_target_map( - i_t_map, False) - LOG.info("Final filtered map for delete connection: " - "%(i_t_map)s", {'i_t_map': valid_i_t_map}) - - # Call driver to delete connection control - if len(valid_i_t_map) > 0: - self.driver.delete_connection(fabric, - valid_i_t_map, - host_name, - storage_system) - - LOG.debug("Delete connection - finished iterating over all" - " target list") - except Exception as e: - msg = _("Failed removing connection for fabric=%(fabric)s: " - "Error: %(err)s") % {'fabric': connected_fabric, - 'err': six.text_type(e)} - LOG.error(msg) - raise exception.ZoneManagerException(reason=msg) - - def get_san_context(self, target_wwn_list): - """SAN lookup for end devices. - - Look up each SAN configured and return a map of SAN (fabric IP) - to list of target WWNs visible to the fabric. - """ - fabric_map = self.driver.get_san_context(target_wwn_list) - LOG.debug("Got SAN context: %(fabricmap)s", {'fabricmap': fabric_map}) - return fabric_map - - def get_valid_initiator_target_map(self, initiator_target_map, - add_control): - """Reference count check for end devices. - - Looks up the reference count for each initiator-target pair from the - map and returns a filtered list based on the operation type - add_control - operation type can be true for add connection control - and false for remove connection control - """ - filtered_i_t_map = {} - for initiator in initiator_target_map.keys(): - t_list = initiator_target_map[initiator] - for target in t_list: - count = self.get_zoning_state_ref_count(initiator, target) - if add_control: - if count > 0: - t_list.remove(target) - # update count = count + 1 - else: - if count > 1: - t_list.remove(target) - # update count = count - 1 - if t_list: - filtered_i_t_map[initiator] = t_list - else: - LOG.info("No targets to add or remove connection for " - "initiator: %(init_wwn)s", - {'init_wwn': initiator}) - return filtered_i_t_map diff --git a/cinder/zonemanager/fczm_constants.py b/cinder/zonemanager/fczm_constants.py deleted file mode 100644 index d1af9055f..000000000 --- a/cinder/zonemanager/fczm_constants.py +++ /dev/null @@ -1,22 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" -Common constants used by FC Zone Manager. -""" -IT_MAP = 'initiator_target_map' -DATA = 'data' -HOST = 'host_name' -STORAGE = 'storage_system' -SYSTEM = 'system' diff --git a/cinder/zonemanager/utils.py b/cinder/zonemanager/utils.py deleted file mode 100644 index 7628491bc..000000000 --- a/cinder/zonemanager/utils.py +++ /dev/null @@ -1,121 +0,0 @@ -# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -""" -Utility functions related to the Zone Manager. - -""" -from oslo_log import log - -from cinder.volume import configuration -from cinder.volume import manager -from cinder.zonemanager import fc_san_lookup_service -from cinder.zonemanager import fc_zone_manager - -LOG = log.getLogger(__name__) - - -def create_zone_manager(): - """If zoning is enabled, build the Zone Manager.""" - config = configuration.Configuration(manager.volume_manager_opts) - LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) - if config.safe_get('zoning_mode') == 'fabric': - LOG.debug("FC Zone Manager enabled.") - zm = fc_zone_manager.ZoneManager() - if zm.initialized: - LOG.info("Using FC Zone Manager %(zm_version)s," - " Driver %(drv_name)s %(drv_version)s.", - {'zm_version': zm.get_version(), - 'drv_name': zm.driver.__class__.__name__, - 'drv_version': zm.driver.get_version()}) - return zm - else: - LOG.debug("FC Zone Manager %(zm_version)s disabled", - {"zm_version": zm.get_version()}) - return None - - else: - LOG.debug("FC Zone Manager not enabled in cinder.conf.") - return None - - -def create_lookup_service(): - config = configuration.Configuration(manager.volume_manager_opts) - LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) - if config.safe_get('zoning_mode') == 'fabric': - LOG.debug("FC Lookup Service enabled.") - lookup = fc_san_lookup_service.FCSanLookupService() - LOG.info("Using FC lookup service %s.", lookup.lookup_service) - return lookup - else: - LOG.debug("FC Lookup Service not enabled in cinder.conf.") - return None - - -def get_formatted_wwn(wwn_str): - """Utility API that formats WWN to insert ':'.""" - if (len(wwn_str) != 16): - return wwn_str.lower() - else: - return (':'.join([wwn_str[i:i + 2] - for i in range(0, len(wwn_str), 2)])).lower() - - -def add_fc_zone(initialize_connection): - """Decorator to add a FC Zone.""" - - def decorator(self, *args, **kwargs): - conn_info = initialize_connection(self, *args, **kwargs) - if not conn_info: - LOG.warning("Driver didn't return connection info, " - "can't add zone.") - return None - - vol_type = conn_info.get('driver_volume_type', None) - if vol_type == 'fibre_channel': - if 'initiator_target_map' in conn_info['data']: - zm = create_zone_manager() - if zm: - LOG.debug("add_fc_zone connection info: %(conninfo)s.", - {'conninfo': conn_info}) - zm.add_connection(conn_info) - - return conn_info - - return decorator - - -def remove_fc_zone(terminate_connection): - """Decorator for FC drivers to remove zone.""" - - def decorator(self, *args, **kwargs): - conn_info = terminate_connection(self, *args, **kwargs) - if not conn_info: - LOG.warning("Driver didn't return connection info from " - "terminate_connection call.") - return None - - vol_type = conn_info.get('driver_volume_type', None) - if vol_type == 'fibre_channel': - if 'initiator_target_map' in conn_info['data']: - zm = create_zone_manager() - if zm: - LOG.debug("remove_fc_zone connection info: %(conninfo)s.", - {'conninfo': conn_info}) - zm.delete_connection(conn_info) - - return conn_info - - return decorator diff --git a/contrib/block-box/.gitignore b/contrib/block-box/.gitignore deleted file mode 100644 index bc556fc14..000000000 --- a/contrib/block-box/.gitignore +++ /dev/null @@ -1 +0,0 @@ -db-data/* diff --git a/contrib/block-box/LICENSE b/contrib/block-box/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/contrib/block-box/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/contrib/block-box/Makefile b/contrib/block-box/Makefile deleted file mode 100644 index c0c79fa8d..000000000 --- a/contrib/block-box/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -CINDER_BRANCH ?= master # master, stable/ocata, refs/changes/67/418167/1 -NAME_PREFIX ?= "" -PLATFORM ?= debian # ubuntu, centos -TAG ?= latest - -all: base lvm devbox - -base: - docker build https://git.openstack.org/openstack/loci-cinder.git\#:$(PLATFORM) --tag cinder:$(TAG) --build-arg PROJECT_REF="stable/ocata" - -lvm: - docker build -t cinder-lvm -f ./docker_files/Dockerfile.cinder-lvm . - -devbox: - cp ../../test-requirements.txt ./docker_files/ - docker build -t cinder-devenv -f ./docker_files/Dockerfile.cinder-devenv . - rm ./docker_files/test-requirements.txt diff --git a/contrib/block-box/README.CVOL-LVM.md b/contrib/block-box/README.CVOL-LVM.md deleted file mode 100644 index 9b8b74752..000000000 --- a/contrib/block-box/README.CVOL-LVM.md +++ /dev/null @@ -1,49 +0,0 @@ -You'll need to modify how you're doing things to get to the iscsi Target. -Specifically, using a Docker network hoses everything because the IP of the -target is the internal containers IP NOT the IP of the host. - -Setting `network_mode: host` solves this, but that creates a new problem. -Can't use `link` when using network_mode: host. - -Sigh... so; docker run has "add-host=host:IP" that we should be able to find -equivalent in compose. We just need to define a network and assign IP's to the -other containers, then this should work. - -Compose looks like this: - extra_hosts: - - "hostname:1.1.1.1" - - "anotherhost:2.2.2.2" - -This just adds entries to /etc/hosts for you. Kinda handy - -So, if we create a network and assign IP's to the supporting cast (rabbit, -mariadb api etc etc) we can then just use this to make them accessible instead -of using `link` - -OHHHH! Add `ipc_mode: host`, shared memory; may speed things up a bit? - -Finally... for reference; The docker run command for this looks something -like: - `docker run -it \ - -v /dev/:/dev/ \ - -v /run/:/run/:shared -v - /etc/localtime:/etc/localtime:ro \ - --net host \ - --privileged cinder_debian \ - bash` - -### https://wiki.debian.org/LVM -vim /etc/lvm/lvm.conf - # disable udev_ stuff -/usr/sbin/tgtd -tgtadm --lld iscsi --op show --mode target -tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.2001-04.com.example:storage.disk2.amiens.sys1.xyz -tgtadm --lld iscsi --mode logicalunit --op new --tid 1 --lun 1 -b /dev/vg-group/lv -tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL - -## Notes here: https://wiki.debian.org/SAN/iSCSI/open-iscsi - -### Initiator side -iscsiadm -m discovery -t sendtargets -p -iscsiadm -m node --targetname= --login - diff --git a/contrib/block-box/README.md b/contrib/block-box/README.md deleted file mode 100644 index ceaafd6c3..000000000 --- a/contrib/block-box/README.md +++ /dev/null @@ -1,212 +0,0 @@ -# block-box -Standalone Cinder Containerized using Docker Compose - -## Cinder -Provides Block Storage as a service as part of the OpenStack Project. -This project deploys Cinder in containers using docker-compose and -also enabled the use of Cinder's noauth option which eliminates the -need for keystone. One could also easily add keystone into the -compose file along with an init script to set up endpoints. - -## LOCI (Lightweight Open Compute Initiative) -The block-box uses OpenStack Loci to build a base Cinder image to use -for each service. The examples use Debian as the base OS, but you can -choose between Debian, CentOS and Ubuntu. - -We're currently using Cinder's noauth option, but this pattern provides -flexibility to add a Keystone service if desired. - -## To build -Start by building the required images. This repo includes a Makefile to -enable building of openstack/loci images of Cinder. The -Makefile includes variables to select between platform (debian, ubuntu or -centos) and also allows which branch of each project to build the image from. -This includes master, stable/xyz as well as patch versions. Additional -variables are provided and can be passed to make using the `-e` option to -control things like naming and image tags. See the Makefile for more info. - -If you're going to utilize an external storage device (ie not using LVM), all -you need to build is the base Cinder image. Set the variable in the Makefile -to choose the Cinder Branch you'd like to use and Platform then simply run: - -```make base``` - -You can also build an image to run LVM (**NOTE**: This is dependent on the base cinder image): - -```make lvm``` - -Finally the last image is a devenv image that will mount the cinder repo you've -checked out into a container and includes test-requirements. - -```make base``` - -For more information and options, check out the openstack/loci page -on [github](https://github.com/openstack/loci). - -**NOTE** The loci project is moving fairly quickly, and it may or may not -continue to be a straight forward light weight method of building container -Images. The build has been known to now work at times, and if it becomes -bloated or burdensome it's easy to swap in another image builder (or write your -own even). - -This will result in some base images that we'll use: - -1. cinder (openstack/loci image) -2. cinder-lvm (special cinder image with LVM config) -3. cinder-devenv (provides a Cinder development env container) - -### cinder -Creates a base image with cinder installed via source. This base image is -enough to run all of the services including api, scheduler and volume with -the exception of cinder-volume with the LVM driver which needs some extra -packages installed like LVM2 and iSCSI target driver. - -Each Cinder service has an executable entrypoint at /usr/local/bin. - -**NOTE** If you choose to build images from something other than the default Debian -base, you'll need to modify the Dockerfile for this image as well. - -### cinder-lvm -This is a special image that is built from the base cinder image and adds the -necessary packages for LVM and iSCSI. - -### cinder-devenv -You might want to generate a conf file, or if you're like me, use Docker to do -some of your Cinder development. You can run this container which has all of -the current development packages and python test-requirements for Cinder. - -You can pass in your current source directory from your local machine using -v -in your run command, here's a trivial example that generates a sample config -file. Note we don't use tox because we're already in an isolated environment. - -```shell -docker run -it -v /home/jgriffith/src/cinder:/cinder \ - cinder-devenv \ - bash -c "cd cinder && oslo-config-generator \ - --config-file=cinder/config/cinder-config-generator.conf" -``` - -Keep in mind the command will execute and then exit, the result is written to -the cinder directory specified in the -v argument. In this example for -instance the result would be a newly generated cinder.conf.sample file in -/home/jgriffith/src/cinder/etc/cinder - -## Accessing via cinderclient -You can of course build a cinderclient container with a `cinder` entrypoint and -use that for access, but in order to take advantage of things like the -local-attach extension, you'll need to install the client tools on the host. - -The current release version in pypi doesn't include noauth -support, so you'll need to install from source, but that's not hard: - -```shell -sudo pip install pytz -sudo pip install git+https://github.com/openstack/python-cinderclient -sudo pip install git+https://github.com/openstack/python-brick-cinderclient-ext -``` -Before using, you must specify these env variables at least, -``OS_AUTH_TYPE``, ``CINDER_ENDPOINT``, ``OS_PROJECT_ID``, ``OS_USERNAME``. -You can utilize our sample file ``cinder.rc``, then you can use client -to communicate with your containerized cinder deployment with noauth!! - - -Remember, to perform local-attach/local-detach of volumes you'll need to use -sudo. To preserve your env variables don't forget to use `sudo -E cinder xxxxx` - -## To run -docker-compose up -d - -Don't forget to modify the `etc-cinder/cinder.conf` file as needed for your -specific driver. We'll be adding support for the LVM driver and LIO Tgts -shortly, but for now you won't have much luck without using an external -device (no worries, there are over 80 to choose from). - -**Note**: If you use ``cinder-lvm`` image, you must guarantee the required -volume group which is specified in the ``cinder.conf`` already exists in -the host environment before starting the service. - -## Adding your own driver -We don't do multi-backend in this type of environment; instead we just add -another container running the backend we want. We can easily add to the base -service we've create using additional compose files. - -The file `docker-compose-add-vol-service.yml` provides an example additional -compose file that will create another cinder-volume service configured to run -the SolidFire backend. - -After launching the main compose file: -```shell -docker-compose up -d -``` - -Once the services are initialized and the database is synchronized, you can add -another backend by running: -```shell -docker-compose -f ./docker-compose-add-vol-service.yml up -d -``` - -Note that things like network settings and ports are IMPORTANT here!! - -## Access using the cinderclient container - -You can use your own cinderclient and openrc, or use the provided cinderclient -container. You'll need to make sure and specify to use the same network -that was used by compose. - -```shell -docker run -it -e OS_AUTH_TYPE=noauth \ - -e CINDERCLIENT_BYPASS_URL=http://cinder-api:8776/v3 \ - -e OS_PROJECT_ID=foo \ - -e OS_VOLUME_API_VERSION=3.27 \ - --network blockbox_default cinderclient list -``` - -# Or without docker-compose -That's ok, you can always just run the commands yourself using docker run: -```shell - -# We set passwords and db creation in the docker-entrypoint-initdb.d script -docker run -d -p 3306:3306 \ - -v ~/block-box/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d \ - --name mariadb \ - --hostname mariadb \ - -e MYSQL_ROOT_PASSWORD=password \ - mariadb - -# Make sure the environment vars match the startup script for your database host -docker run -d -p 5000:5000 \ - -p 35357:35357 \ - --link mariadb \ - --name keystone \ - --hostname keystone \ - -e OS_PASSWORD=password \ - -e DEMO_PASSWORD=password \ - -e DB_HOST=mariadb \ - -e DB_PASSWORD=password \ - keystone - -docker run -d -p 5672:5672 --name rabbitmq --hostname rabbitmq rabbitmq - -docker run -d -p 8776:8776 \ - --link mariadb \ - --link rabbitmq \ - --name cinder-api \ - --hostname cinder-api \ - -v ~/block-box/etc-cinder:/etc/cinder \ - -v ~/block-box/init-scripts:/init-scripts - cinder_debian sh /init-scripts/cinder-api.sh - -docker run -d --name cinder-scheduler \ - --hostname cinder-scheduler \ - --link mariadb \ - --link rabbitmq \ - -v ~/block-box/etc-cinder:/etc/cinder \ - cinder_debian cinder-scheduler - -docker run -d --name cinder-volume \ - --hostname cinder-volume \ - --link mariadb \ - --link rabbitmq \ - -v ~/block-box/etc-cinder:/etc/cinder \ - cinder-debian cinder-volume -``` diff --git a/contrib/block-box/cinder.rc b/contrib/block-box/cinder.rc deleted file mode 100644 index bc1446ad1..000000000 --- a/contrib/block-box/cinder.rc +++ /dev/null @@ -1,5 +0,0 @@ -export OS_AUTH_SYSTEM=noauth -export CINDER_ENDPOINT=http://127.0.0.1:8776/v3 -export OS_PROJECT_ID=myproject -export OS_USERNAME=bubba -export OS_VOLUME_API_VERSION=3.27 diff --git a/contrib/block-box/contrib/cinderflex b/contrib/block-box/contrib/cinderflex deleted file mode 100644 index 020d81934..000000000 --- a/contrib/block-box/contrib/cinderflex +++ /dev/null @@ -1,208 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Based on this excellent blog post by Ivan - http://blog.e0ne.info/post/Attach-Cinder-Volume-to-the-Ironic-Instance-without-Nova.aspx - -export OS_PROJECT_DOMAIN_ID=default -export OS_USER_DOMAIN_ID=default -export OS_PROJECT_NAME=admin -export OS_PASSWORD=secret -export OS_AUTH_URL=http://10.0.0.2/identity -export OS_USERNAME=admin -export PATH=$PATH:/lib/udev - -# Notes: -# - Please install "jq" package before using this driver. -usage() { - err "Invalid usage. Usage: " - err "\t$0 init" - err "\t$0 attach " - err "\t$0 detach " - err "\t$0 waitforattach " - err "\t$0 mountdevice " - err "\t$0 unmountdevice " - err "\t$0 getvolumename " - err "\t$0 isattached " - exit 1 -} - -err() { - echo -ne $* 1>&2 -} - -log() { - echo -ne $* >&1 -} - -ismounted() { - MOUNT=`findmnt -n ${MNTPATH} 2>/dev/null | cut -d' ' -f1` - if [ "${MOUNT}" == "${MNTPATH}" ]; then - echo "1" - else - echo "0" - fi -} - -getdevice() { - VOLUMEID=$(echo ${JSON_PARAMS} | jq -r '.volumeID') - DMDEV="/dev/disk/by-path/$(ls -1 /dev/disk/by-path/ | grep $VOLUMEID)" - echo ${DMDEV} -} - -attach() { - JSON_PARAMS=$1 - SIZE=$(echo $1 | jq -r '.size') - - VOLUMEID=$(echo ${JSON_PARAMS} | jq -r '.volumeID') - sudo -E PATH=$PATH cinder local-attach $VOLUMEID >> /tmp/cinder2.log 2>&1 - - DMDEV=$(getdevice) - if [ ! -b "${DMDEV}" ]; then - err "{\"status\": \"Failure\", \"message\": \"Volume ${VOLUMEID} does not exist\"}" - exit 1 - fi - log "{\"status\": \"Success\", \"device\":\"${DMDEV}\"}" - exit 0 -} - -detach() { - log "{\"status\": \"Success\"}" - exit 0 -} - -waitforattach() { - #shift - #attach $* - log "{\"status\": \"Success\"}" - exit 0 - -} - -domountdevice() { - MNTPATH=$1 - JSON_PARAMS=$2 - DMDEV=$(getdevice) - FSTYPE=$(echo $JSON_PARAMS|jq -r '.["kubernetes.io/fsType"]') - - if [ ! -b "${DMDEV}" ]; then - err "{\"status\": \"Failure\", \"message\": \"${DMDEV} does not exist\"}" - exit 1 - fi - - if [ $(ismounted) -eq 1 ] ; then - log "{\"status\": \"Success\"}" - exit 0 - fi - - VOLFSTYPE=`blkid -o udev ${DMDEV} 2>/dev/null|grep "ID_FS_TYPE"|cut -d"=" -f2` - if [ "${VOLFSTYPE}" == "" ]; then - mkfs -t ${FSTYPE} ${DMDEV} >/dev/null 2>&1 - if [ $? -ne 0 ]; then - err "{ \"status\": \"Failure\", \"message\": \"Failed to create fs ${FSTYPE} on device ${DMDEV}\"}" - exit 1 - fi - fi - - mkdir -p ${MNTPATH} &> /dev/null - - mount ${DMDEV} ${MNTPATH} &> /dev/null - if [ $? -ne 0 ]; then - err "{ \"status\": \"Failure\", \"message\": \"Failed to mount device ${DMDEV} at ${MNTPATH}\"}" - exit 1 - fi - log "{\"status\": \"Success\"}" - exit 0 -} - -unmountdevice() { - MNTPATH=$1 - if [ ! -d ${MNTPATH} ]; then - log "{\"status\": \"Success\"}" - exit 0 - fi - - if [ $(ismounted) -eq 0 ] ; then - log "{\"status\": \"Success\"}" - exit 0 - fi - - umount ${MNTPATH} &> /dev/null - if [ $? -ne 0 ]; then - err "{ \"status\": \"Failed\", \"message\": \"Failed to unmount volume at ${MNTPATH}\"}" - exit 1 - fi - - $VOLUMEID=$(echo ${MNTPATH} | sed -r "s/^.*volume-(.*)-lun.*/\1/") - sudo -E PATH=$PATH cinder local-detach $VOLUMEID >> /tmp/cinder2.log 2>&1 - - log "{\"status\": \"Success\"}" - exit 0 -} - -getvolumename() { - JSON_PARAMS=$1 - UUID=$(echo ${JSON_PARAMS} | jq -r '.volumeID') - - log "{\"status\": \"Success\", \"volumeName\":\"${UUID}\"}" - exit 0 -} - -isattached() { - log "{\"status\": \"Success\", \"attached\":true}" - exit 0 -} - -op=$1 - -if [ "$op" = "init" ]; then - log "{\"status\": \"Success\"}" - exit 0 -fi - -if [ $# -lt 2 ]; then - usage -fi - -shift - -case "$op" in - attach) - attach $* - ;; - detach) - detach $* - ;; - waitforattach) - waitforattach $* - ;; - mountdevice) - domountdevice $* - ;; - unmountdevice) - unmountdevice $* - ;; - getvolumename) - getvolumename $* - ;; - isattached) - isattached $* - ;; - *) - err "{ \"status\": \"Not supported\" }" - exit 1 -esac - -exit 1 diff --git a/contrib/block-box/docker-compose-add-vol-service.yml b/contrib/block-box/docker-compose-add-vol-service.yml deleted file mode 100644 index 74e1a3bc9..000000000 --- a/contrib/block-box/docker-compose-add-vol-service.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "2" -services: - cinder-volume-sf: - image: cinder - hostname: cinder-volume-sf - privileged: true - volumes: - - ./etc-cinder:/etc/cinder - - /dev/:/dev/ - - /run/:/run/:shared - - /etc/localtime:/etc/localtime:ro - - /lib/modules:/lib/modules:ro - extra_hosts: - - "rabbitmq:172.49.49.5" - - "mariadb:172.49.49.6" - networks: - cindernet: - ipv4_address: 172.49.49.10 - command: bash -c "cinder-volume -d --config-file /etc/cinder/cinder.conf.sf" - -networks: - cindernet: - driver: bridge - ipam: - driver: default - config: - - - subnet: 172.49.49.0/24 diff --git a/contrib/block-box/docker-compose.yml b/contrib/block-box/docker-compose.yml deleted file mode 100644 index f7dba4a3c..000000000 --- a/contrib/block-box/docker-compose.yml +++ /dev/null @@ -1,86 +0,0 @@ -version: "2" -services: - rabbitmq: - image: rabbitmq - ports: - - "5672:5672" - hostname: rabbitmq - networks: - cindernet: - ipv4_address: 172.49.49.5 - mariadb: - image: mariadb - hostname: mariadb - environment: - - MYSQL_ROOT_PASSWORD=password - ports: - - 3306:3306 - networks: - cindernet: - ipv4_address: 172.49.49.6 - volumes: - - ./db-data:/var/lib/mysql - - ./docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d - cinder-api: - image: cinder - hostname: cinder-api - volumes: - - ./etc-cinder:/etc/cinder - - ./init-scripts:/init-scripts - network_mode: "host" - ports: - - 8776:8776 - depends_on: - - mariadb - extra_hosts: - - "rabbitmq:172.49.49.5" - - "mariadb:172.49.49.6" - environment: - - INIT_DB=true - command: sh /init-scripts/cinder-api.sh - cinder-scheduler: - image: cinder - hostname: cinder-scheduler - volumes: - - ./etc-cinder:/etc/cinder - depends_on: - - mariadb - - rabbitmq - - cinder-api - extra_hosts: - - "rabbitmq:172.49.49.5" - - "mariadb:172.49.49.6" - network_mode: "host" - depends_on: - - cinder-api - restart: on-failure - command: cinder-scheduler - cinder-volume: - image: cinder-lvm - hostname: cinder-lvm - privileged: true - volumes: - - ./etc-cinder:/etc/cinder - - /dev/:/dev/ - - /run/:/run/:shared - - /etc/localtime:/etc/localtime:ro - - /lib/modules:/lib/modules:ro - depends_on: - - cinder-scheduler - ports: - - 3260:3260 - extra_hosts: - - "rabbitmq:172.49.49.5" - - "mariadb:172.49.49.6" - network_mode: "host" - restart: on-failure - command: bash -c "sleep 5 && /usr/sbin/tgtd && cinder-volume -d" - -networks: - cindernet: - driver: bridge - ipam: - driver: default - config: - - - subnet: 172.49.49.0/24 diff --git a/contrib/block-box/docker-entrypoint-initdb.d/script.sql b/contrib/block-box/docker-entrypoint-initdb.d/script.sql deleted file mode 100755 index 3b0f123fd..000000000 --- a/contrib/block-box/docker-entrypoint-initdb.d/script.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE DATABASE cinder; -GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ - IDENTIFIED BY 'password'; -GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ -IDENTIFIED BY 'password'; diff --git a/contrib/block-box/docker_files/Dockerfile.cinder-devenv b/contrib/block-box/docker_files/Dockerfile.cinder-devenv deleted file mode 100644 index b811732d0..000000000 --- a/contrib/block-box/docker_files/Dockerfile.cinder-devenv +++ /dev/null @@ -1,12 +0,0 @@ -FROM python -COPY test-requirements.txt /test-requirements.txt -RUN apt-get update \ - && apt-get install -y \ - libssl-dev \ - git-core \ - libmysqlclient-dev \ - libpq-dev \ - libffi-dev \ - libxslt-dev \ - && pip install -r test-requirements.txt \ - && rm -rf /var/lib/apt/lists/* diff --git a/contrib/block-box/docker_files/Dockerfile.cinder-lvm b/contrib/block-box/docker_files/Dockerfile.cinder-lvm deleted file mode 100644 index 737cff2ff..000000000 --- a/contrib/block-box/docker_files/Dockerfile.cinder-lvm +++ /dev/null @@ -1,11 +0,0 @@ -FROM cinder - -RUN DEBIAN_FRONTEND=noninteractive apt-get update \ -&& apt-get install -y --no-install-recommends \ -vim lvm2 tgt \ -&& rm -rf /var/lib/apt -RUN sed -i -e 's/udev_sync = 1/udev_sync = 0/g' /etc/lvm/lvm.conf \ - && sed -i -e 's/udev_rules = 1/udev_rules = 0/g' /etc/lvm/lvm.conf \ - && sed -i -e 's/use_lvmetad = 0/use_lvmetad =1/g' /etc/lvm/lvm.conf \ - && echo "include /var/lib/cinder/volumes/*" >> /etc/tgt/targets.conf -CMD /usr/sbin/tgtd diff --git a/contrib/block-box/docker_files/README b/contrib/block-box/docker_files/README deleted file mode 100644 index b1a7d5f3e..000000000 --- a/contrib/block-box/docker_files/README +++ /dev/null @@ -1,6 +0,0 @@ -Need to turn off udev_ rules - s/udev_sync = 1/udev_sync = 0/ - s/udev_rules = 1/udev_rules = 0/ - -Need to turn ON use_lvmetad - s/use_lvmetad = 0/use_lvmetad = 1/ diff --git a/contrib/block-box/docker_files/lvm.conf b/contrib/block-box/docker_files/lvm.conf deleted file mode 100644 index ffe26de0f..000000000 --- a/contrib/block-box/docker_files/lvm.conf +++ /dev/null @@ -1,2095 +0,0 @@ -# This is an example configuration file for the LVM2 system. -# It contains the default settings that would be used if there was no -# /etc/lvm/lvm.conf file. -# -# Refer to 'man lvm.conf' for further information including the file layout. -# -# Refer to 'man lvm.conf' for information about how settings configured in -# this file are combined with built-in values and command line options to -# arrive at the final values used by LVM. -# -# Refer to 'man lvmconfig' for information about displaying the built-in -# and configured values used by LVM. -# -# If a default value is set in this file (not commented out), then a -# new version of LVM using this file will continue using that value, -# even if the new version of LVM changes the built-in default value. -# -# To put this file in a different directory and override /etc/lvm set -# the environment variable LVM_SYSTEM_DIR before running the tools. -# -# N.B. Take care that each setting only appears once if uncommenting -# example settings in this file. - - -# Configuration section config. -# How LVM configuration settings are handled. -config { - - # Configuration option config/checks. - # If enabled, any LVM configuration mismatch is reported. - # This implies checking that the configuration key is understood by - # LVM and that the value of the key is the proper type. If disabled, - # any configuration mismatch is ignored and the default value is used - # without any warning (a message about the configuration key not being - # found is issued in verbose mode only). - checks = 1 - - # Configuration option config/abort_on_errors. - # Abort the LVM process if a configuration mismatch is found. - abort_on_errors = 0 - - # Configuration option config/profile_dir. - # Directory where LVM looks for configuration profiles. - profile_dir = "/etc/lvm/profile" -} - -# Configuration section devices. -# How LVM uses block devices. -devices { - - # Configuration option devices/dir. - # Directory in which to create volume group device nodes. - # Commands also accept this as a prefix on volume group names. - # This configuration option is advanced. - dir = "/dev" - - # Configuration option devices/scan. - # Directories containing device nodes to use with LVM. - # This configuration option is advanced. - scan = [ "/dev" ] - - # Configuration option devices/obtain_device_list_from_udev. - # Obtain the list of available devices from udev. - # This avoids opening or using any inapplicable non-block devices or - # subdirectories found in the udev directory. Any device node or - # symlink not managed by udev in the udev directory is ignored. This - # setting applies only to the udev-managed device directory; other - # directories will be scanned fully. LVM needs to be compiled with - # udev support for this setting to apply. - obtain_device_list_from_udev = 1 - - # Configuration option devices/external_device_info_source. - # Select an external device information source. - # Some information may already be available in the system and LVM can - # use this information to determine the exact type or use of devices it - # processes. Using an existing external device information source can - # speed up device processing as LVM does not need to run its own native - # routines to acquire this information. For example, this information - # is used to drive LVM filtering like MD component detection, multipath - # component detection, partition detection and others. - # - # Accepted values: - # none - # No external device information source is used. - # udev - # Reuse existing udev database records. Applicable only if LVM is - # compiled with udev support. - # - external_device_info_source = "none" - - # Configuration option devices/preferred_names. - # Select which path name to display for a block device. - # If multiple path names exist for a block device, and LVM needs to - # display a name for the device, the path names are matched against - # each item in this list of regular expressions. The first match is - # used. Try to avoid using undescriptive /dev/dm-N names, if present. - # If no preferred name matches, or if preferred_names are not defined, - # the following built-in preferences are applied in order until one - # produces a preferred name: - # Prefer names with path prefixes in the order of: - # /dev/mapper, /dev/disk, /dev/dm-*, /dev/block. - # Prefer the name with the least number of slashes. - # Prefer a name that is a symlink. - # Prefer the path with least value in lexicographical order. - # - # Example - # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ] - # - # This configuration option does not have a default value defined. - - # Configuration option devices/filter. - # Limit the block devices that are used by LVM commands. - # This is a list of regular expressions used to accept or reject block - # device path names. Each regex is delimited by a vertical bar '|' - # (or any character) and is preceded by 'a' to accept the path, or - # by 'r' to reject the path. The first regex in the list to match the - # path is used, producing the 'a' or 'r' result for the device. - # When multiple path names exist for a block device, if any path name - # matches an 'a' pattern before an 'r' pattern, then the device is - # accepted. If all the path names match an 'r' pattern first, then the - # device is rejected. Unmatching path names do not affect the accept - # or reject decision. If no path names for a device match a pattern, - # then the device is accepted. Be careful mixing 'a' and 'r' patterns, - # as the combination might produce unexpected results (test changes.) - # Run vgscan after changing the filter to regenerate the cache. - # See the use_lvmetad comment for a special case regarding filters. - # - # Example - # Accept every block device: - # filter = [ "a|.*/|" ] - # Reject the cdrom drive: - # filter = [ "r|/dev/cdrom|" ] - # Work with just loopback devices, e.g. for testing: - # filter = [ "a|loop|", "r|.*|" ] - # Accept all loop devices and ide drives except hdc: - # filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ] - # Use anchors to be very specific: - # filter = [ "a|^/dev/hda8$|", "r|.*/|" ] - # - # This configuration option has an automatic default value. - # filter = [ "a|.*/|" ] - - # Configuration option devices/global_filter. - # Limit the block devices that are used by LVM system components. - # Because devices/filter may be overridden from the command line, it is - # not suitable for system-wide device filtering, e.g. udev and lvmetad. - # Use global_filter to hide devices from these LVM system components. - # The syntax is the same as devices/filter. Devices rejected by - # global_filter are not opened by LVM. - # This configuration option has an automatic default value. - # global_filter = [ "a|.*/|" ] - - # Configuration option devices/cache_dir. - # Directory in which to store the device cache file. - # The results of filtering are cached on disk to avoid rescanning dud - # devices (which can take a very long time). By default this cache is - # stored in a file named .cache. It is safe to delete this file; the - # tools regenerate it. If obtain_device_list_from_udev is enabled, the - # list of devices is obtained from udev and any existing .cache file - # is removed. - cache_dir = "/run/lvm" - - # Configuration option devices/cache_file_prefix. - # A prefix used before the .cache file name. See devices/cache_dir. - cache_file_prefix = "" - - # Configuration option devices/write_cache_state. - # Enable/disable writing the cache file. See devices/cache_dir. - write_cache_state = 1 - - # Configuration option devices/types. - # List of additional acceptable block device types. - # These are of device type names from /proc/devices, followed by the - # maximum number of partitions. - # - # Example - # types = [ "fd", 16 ] - # - # This configuration option is advanced. - # This configuration option does not have a default value defined. - - # Configuration option devices/sysfs_scan. - # Restrict device scanning to block devices appearing in sysfs. - # This is a quick way of filtering out block devices that are not - # present on the system. sysfs must be part of the kernel and mounted.) - sysfs_scan = 1 - - # Configuration option devices/multipath_component_detection. - # Ignore devices that are components of DM multipath devices. - multipath_component_detection = 1 - - # Configuration option devices/md_component_detection. - # Ignore devices that are components of software RAID (md) devices. - md_component_detection = 1 - - # Configuration option devices/fw_raid_component_detection. - # Ignore devices that are components of firmware RAID devices. - # LVM must use an external_device_info_source other than none for this - # detection to execute. - fw_raid_component_detection = 0 - - # Configuration option devices/md_chunk_alignment. - # Align PV data blocks with md device's stripe-width. - # This applies if a PV is placed directly on an md device. - md_chunk_alignment = 1 - - # Configuration option devices/default_data_alignment. - # Default alignment of the start of a PV data area in MB. - # If set to 0, a value of 64KiB will be used. - # Set to 1 for 1MiB, 2 for 2MiB, etc. - # This configuration option has an automatic default value. - # default_data_alignment = 1 - - # Configuration option devices/data_alignment_detection. - # Detect PV data alignment based on sysfs device information. - # The start of a PV data area will be a multiple of minimum_io_size or - # optimal_io_size exposed in sysfs. minimum_io_size is the smallest - # request the device can perform without incurring a read-modify-write - # penalty, e.g. MD chunk size. optimal_io_size is the device's - # preferred unit of receiving I/O, e.g. MD stripe width. - # minimum_io_size is used if optimal_io_size is undefined (0). - # If md_chunk_alignment is enabled, that detects the optimal_io_size. - # This setting takes precedence over md_chunk_alignment. - data_alignment_detection = 1 - - # Configuration option devices/data_alignment. - # Alignment of the start of a PV data area in KiB. - # If a PV is placed directly on an md device and md_chunk_alignment or - # data_alignment_detection are enabled, then this setting is ignored. - # Otherwise, md_chunk_alignment and data_alignment_detection are - # disabled if this is set. Set to 0 to use the default alignment or the - # page size, if larger. - data_alignment = 0 - - # Configuration option devices/data_alignment_offset_detection. - # Detect PV data alignment offset based on sysfs device information. - # The start of a PV aligned data area will be shifted by the - # alignment_offset exposed in sysfs. This offset is often 0, but may - # be non-zero. Certain 4KiB sector drives that compensate for windows - # partitioning will have an alignment_offset of 3584 bytes (sector 7 - # is the lowest aligned logical block, the 4KiB sectors start at - # LBA -1, and consequently sector 63 is aligned on a 4KiB boundary). - # pvcreate --dataalignmentoffset will skip this detection. - data_alignment_offset_detection = 1 - - # Configuration option devices/ignore_suspended_devices. - # Ignore DM devices that have I/O suspended while scanning devices. - # Otherwise, LVM waits for a suspended device to become accessible. - # This should only be needed in recovery situations. - ignore_suspended_devices = 0 - - # Configuration option devices/ignore_lvm_mirrors. - # Do not scan 'mirror' LVs to avoid possible deadlocks. - # This avoids possible deadlocks when using the 'mirror' segment type. - # This setting determines whether LVs using the 'mirror' segment type - # are scanned for LVM labels. This affects the ability of mirrors to - # be used as physical volumes. If this setting is enabled, it is - # impossible to create VGs on top of mirror LVs, i.e. to stack VGs on - # mirror LVs. If this setting is disabled, allowing mirror LVs to be - # scanned, it may cause LVM processes and I/O to the mirror to become - # blocked. This is due to the way that the mirror segment type handles - # failures. In order for the hang to occur, an LVM command must be run - # just after a failure and before the automatic LVM repair process - # takes place, or there must be failures in multiple mirrors in the - # same VG at the same time with write failures occurring moments before - # a scan of the mirror's labels. The 'mirror' scanning problems do not - # apply to LVM RAID types like 'raid1' which handle failures in a - # different way, making them a better choice for VG stacking. - ignore_lvm_mirrors = 1 - - # Configuration option devices/disable_after_error_count. - # Number of I/O errors after which a device is skipped. - # During each LVM operation, errors received from each device are - # counted. If the counter of a device exceeds the limit set here, - # no further I/O is sent to that device for the remainder of the - # operation. Setting this to 0 disables the counters altogether. - disable_after_error_count = 0 - - # Configuration option devices/require_restorefile_with_uuid. - # Allow use of pvcreate --uuid without requiring --restorefile. - require_restorefile_with_uuid = 1 - - # Configuration option devices/pv_min_size. - # Minimum size in KiB of block devices which can be used as PVs. - # In a clustered environment all nodes must use the same value. - # Any value smaller than 512KiB is ignored. The previous built-in - # value was 512. - pv_min_size = 2048 - - # Configuration option devices/issue_discards. - # Issue discards to PVs that are no longer used by an LV. - # Discards are sent to an LV's underlying physical volumes when the LV - # is no longer using the physical volumes' space, e.g. lvremove, - # lvreduce. Discards inform the storage that a region is no longer - # used. Storage that supports discards advertise the protocol-specific - # way discards should be issued by the kernel (TRIM, UNMAP, or - # WRITE SAME with UNMAP bit set). Not all storage will support or - # benefit from discards, but SSDs and thinly provisioned LUNs - # generally do. If enabled, discards will only be issued if both the - # storage and kernel provide support. - issue_discards = 0 - - # Configuration option devices/allow_changes_with_duplicate_pvs. - # Allow VG modification while a PV appears on multiple devices. - # When a PV appears on multiple devices, LVM attempts to choose the - # best device to use for the PV. If the devices represent the same - # underlying storage, the choice has minimal consequence. If the - # devices represent different underlying storage, the wrong choice - # can result in data loss if the VG is modified. Disabling this - # setting is the safest option because it prevents modifying a VG - # or activating LVs in it while a PV appears on multiple devices. - # Enabling this setting allows the VG to be used as usual even with - # uncertain devices. - allow_changes_with_duplicate_pvs = 0 -} - -# Configuration section allocation. -# How LVM selects space and applies properties to LVs. -allocation { - - # Configuration option allocation/cling_tag_list. - # Advise LVM which PVs to use when searching for new space. - # When searching for free space to extend an LV, the 'cling' allocation - # policy will choose space on the same PVs as the last segment of the - # existing LV. If there is insufficient space and a list of tags is - # defined here, it will check whether any of them are attached to the - # PVs concerned and then seek to match those PV tags between existing - # extents and new extents. - # - # Example - # Use the special tag "@*" as a wildcard to match any PV tag: - # cling_tag_list = [ "@*" ] - # LVs are mirrored between two sites within a single VG, and - # PVs are tagged with either @site1 or @site2 to indicate where - # they are situated: - # cling_tag_list = [ "@site1", "@site2" ] - # - # This configuration option does not have a default value defined. - - # Configuration option allocation/maximise_cling. - # Use a previous allocation algorithm. - # Changes made in version 2.02.85 extended the reach of the 'cling' - # policies to detect more situations where data can be grouped onto - # the same disks. This setting can be used to disable the changes - # and revert to the previous algorithm. - maximise_cling = 1 - - # Configuration option allocation/use_blkid_wiping. - # Use blkid to detect existing signatures on new PVs and LVs. - # The blkid library can detect more signatures than the native LVM - # detection code, but may take longer. LVM needs to be compiled with - # blkid wiping support for this setting to apply. LVM native detection - # code is currently able to recognize: MD device signatures, - # swap signature, and LUKS signatures. To see the list of signatures - # recognized by blkid, check the output of the 'blkid -k' command. - use_blkid_wiping = 1 - - # Configuration option allocation/wipe_signatures_when_zeroing_new_lvs. - # Look for and erase any signatures while zeroing a new LV. - # The --wipesignatures option overrides this setting. - # Zeroing is controlled by the -Z/--zero option, and if not specified, - # zeroing is used by default if possible. Zeroing simply overwrites the - # first 4KiB of a new LV with zeroes and does no signature detection or - # wiping. Signature wiping goes beyond zeroing and detects exact types - # and positions of signatures within the whole LV. It provides a - # cleaner LV after creation as all known signatures are wiped. The LV - # is not claimed incorrectly by other tools because of old signatures - # from previous use. The number of signatures that LVM can detect - # depends on the detection code that is selected (see - # use_blkid_wiping.) Wiping each detected signature must be confirmed. - # When this setting is disabled, signatures on new LVs are not detected - # or erased unless the --wipesignatures option is used directly. - wipe_signatures_when_zeroing_new_lvs = 1 - - # Configuration option allocation/mirror_logs_require_separate_pvs. - # Mirror logs and images will always use different PVs. - # The default setting changed in version 2.02.85. - mirror_logs_require_separate_pvs = 0 - - # Configuration option allocation/raid_stripe_all_devices. - # Stripe across all PVs when RAID stripes are not specified. - # If enabled, all PVs in the VG or on the command line are used for raid0/4/5/6/10 - # when the command does not specify the number of stripes to use. - # This was the default behaviour until release 2.02.162. - # This configuration option has an automatic default value. - # raid_stripe_all_devices = 0 - - # Configuration option allocation/cache_pool_metadata_require_separate_pvs. - # Cache pool metadata and data will always use different PVs. - cache_pool_metadata_require_separate_pvs = 0 - - # Configuration option allocation/cache_mode. - # The default cache mode used for new cache. - # - # Accepted values: - # writethrough - # Data blocks are immediately written from the cache to disk. - # writeback - # Data blocks are written from the cache back to disk after some - # delay to improve performance. - # - # This setting replaces allocation/cache_pool_cachemode. - # This configuration option has an automatic default value. - # cache_mode = "writethrough" - - # Configuration option allocation/cache_policy. - # The default cache policy used for new cache volume. - # Since kernel 4.2 the default policy is smq (Stochastic multique), - # otherwise the older mq (Multiqueue) policy is selected. - # This configuration option does not have a default value defined. - - # Configuration section allocation/cache_settings. - # Settings for the cache policy. - # See documentation for individual cache policies for more info. - # This configuration section has an automatic default value. - # cache_settings { - # } - - # Configuration option allocation/cache_pool_chunk_size. - # The minimal chunk size in KiB for cache pool volumes. - # Using a chunk_size that is too large can result in wasteful use of - # the cache, where small reads and writes can cause large sections of - # an LV to be mapped into the cache. However, choosing a chunk_size - # that is too small can result in more overhead trying to manage the - # numerous chunks that become mapped into the cache. The former is - # more of a problem than the latter in most cases, so the default is - # on the smaller end of the spectrum. Supported values range from - # 32KiB to 1GiB in multiples of 32. - # This configuration option does not have a default value defined. - - # Configuration option allocation/cache_pool_max_chunks. - # The maximum number of chunks in a cache pool. - # For cache target v1.9 the recommended maximumm is 1000000 chunks. - # Using cache pool with more chunks may degrade cache performance. - # This configuration option does not have a default value defined. - - # Configuration option allocation/thin_pool_metadata_require_separate_pvs. - # Thin pool metdata and data will always use different PVs. - thin_pool_metadata_require_separate_pvs = 0 - - # Configuration option allocation/thin_pool_zero. - # Thin pool data chunks are zeroed before they are first used. - # Zeroing with a larger thin pool chunk size reduces performance. - # This configuration option has an automatic default value. - # thin_pool_zero = 1 - - # Configuration option allocation/thin_pool_discards. - # The discards behaviour of thin pool volumes. - # - # Accepted values: - # ignore - # nopassdown - # passdown - # - # This configuration option has an automatic default value. - # thin_pool_discards = "passdown" - - # Configuration option allocation/thin_pool_chunk_size_policy. - # The chunk size calculation policy for thin pool volumes. - # - # Accepted values: - # generic - # If thin_pool_chunk_size is defined, use it. Otherwise, calculate - # the chunk size based on estimation and device hints exposed in - # sysfs - the minimum_io_size. The chunk size is always at least - # 64KiB. - # performance - # If thin_pool_chunk_size is defined, use it. Otherwise, calculate - # the chunk size for performance based on device hints exposed in - # sysfs - the optimal_io_size. The chunk size is always at least - # 512KiB. - # - # This configuration option has an automatic default value. - # thin_pool_chunk_size_policy = "generic" - - # Configuration option allocation/thin_pool_chunk_size. - # The minimal chunk size in KiB for thin pool volumes. - # Larger chunk sizes may improve performance for plain thin volumes, - # however using them for snapshot volumes is less efficient, as it - # consumes more space and takes extra time for copying. When unset, - # lvm tries to estimate chunk size starting from 64KiB. Supported - # values are in the range 64KiB to 1GiB. - # This configuration option does not have a default value defined. - - # Configuration option allocation/physical_extent_size. - # Default physical extent size in KiB to use for new VGs. - # This configuration option has an automatic default value. - # physical_extent_size = 4096 -} - -# Configuration section log. -# How LVM log information is reported. -log { - - # Configuration option log/report_command_log. - # Enable or disable LVM log reporting. - # If enabled, LVM will collect a log of operations, messages, - # per-object return codes with object identification and associated - # error numbers (errnos) during LVM command processing. Then the - # log is either reported solely or in addition to any existing - # reports, depending on LVM command used. If it is a reporting command - # (e.g. pvs, vgs, lvs, lvm fullreport), then the log is reported in - # addition to any existing reports. Otherwise, there's only log report - # on output. For all applicable LVM commands, you can request that - # the output has only log report by using --logonly command line - # option. Use log/command_log_cols and log/command_log_sort settings - # to define fields to display and sort fields for the log report. - # You can also use log/command_log_selection to define selection - # criteria used each time the log is reported. - # This configuration option has an automatic default value. - # report_command_log = 0 - - # Configuration option log/command_log_sort. - # List of columns to sort by when reporting command log. - # See --logonly --configreport log -o help - # for the list of possible fields. - # This configuration option has an automatic default value. - # command_log_sort = "log_seq_num" - - # Configuration option log/command_log_cols. - # List of columns to report when reporting command log. - # See --logonly --configreport log -o help - # for the list of possible fields. - # This configuration option has an automatic default value. - # command_log_cols = "log_seq_num,log_type,log_context,log_object_type,log_object_name,log_object_id,log_object_group,log_object_group_id,log_message,log_errno,log_ret_code" - - # Configuration option log/command_log_selection. - # Selection criteria used when reporting command log. - # You can define selection criteria that are applied each - # time log is reported. This way, it is possible to control the - # amount of log that is displayed on output and you can select - # only parts of the log that are important for you. To define - # selection criteria, use fields from log report. See also - # --logonly --configreport log -S help for the - # list of possible fields and selection operators. You can also - # define selection criteria for log report on command line directly - # using --configreport log -S - # which has precedence over log/command_log_selection setting. - # For more information about selection criteria in general, see - # lvm(8) man page. - # This configuration option has an automatic default value. - # command_log_selection = "!(log_type=status && message=success)" - - # Configuration option log/verbose. - # Controls the messages sent to stdout or stderr. - verbose = 0 - - # Configuration option log/silent. - # Suppress all non-essential messages from stdout. - # This has the same effect as -qq. When enabled, the following commands - # still produce output: dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, - # pvdisplay, pvs, version, vgcfgrestore -l, vgdisplay, vgs. - # Non-essential messages are shifted from log level 4 to log level 5 - # for syslog and lvm2_log_fn purposes. - # Any 'yes' or 'no' questions not overridden by other arguments are - # suppressed and default to 'no'. - silent = 0 - - # Configuration option log/syslog. - # Send log messages through syslog. - syslog = 1 - - # Configuration option log/file. - # Write error and debug log messages to a file specified here. - # This configuration option does not have a default value defined. - - # Configuration option log/overwrite. - # Overwrite the log file each time the program is run. - overwrite = 0 - - # Configuration option log/level. - # The level of log messages that are sent to the log file or syslog. - # There are 6 syslog-like log levels currently in use: 2 to 7 inclusive. - # 7 is the most verbose (LOG_DEBUG). - level = 0 - - # Configuration option log/indent. - # Indent messages according to their severity. - indent = 1 - - # Configuration option log/command_names. - # Display the command name on each line of output. - command_names = 0 - - # Configuration option log/prefix. - # A prefix to use before the log message text. - # (After the command name, if selected). - # Two spaces allows you to see/grep the severity of each message. - # To make the messages look similar to the original LVM tools use: - # indent = 0, command_names = 1, prefix = " -- " - prefix = " " - - # Configuration option log/activation. - # Log messages during activation. - # Don't use this in low memory situations (can deadlock). - activation = 0 - - # Configuration option log/debug_classes. - # Select log messages by class. - # Some debugging messages are assigned to a class and only appear in - # debug output if the class is listed here. Classes currently - # available: memory, devices, activation, allocation, lvmetad, - # metadata, cache, locking, lvmpolld. Use "all" to see everything. - debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ] -} - -# Configuration section backup. -# How LVM metadata is backed up and archived. -# In LVM, a 'backup' is a copy of the metadata for the current system, -# and an 'archive' contains old metadata configurations. They are -# stored in a human readable text format. -backup { - - # Configuration option backup/backup. - # Maintain a backup of the current metadata configuration. - # Think very hard before turning this off! - backup = 1 - - # Configuration option backup/backup_dir. - # Location of the metadata backup files. - # Remember to back up this directory regularly! - backup_dir = "/etc/lvm/backup" - - # Configuration option backup/archive. - # Maintain an archive of old metadata configurations. - # Think very hard before turning this off. - archive = 1 - - # Configuration option backup/archive_dir. - # Location of the metdata archive files. - # Remember to back up this directory regularly! - archive_dir = "/etc/lvm/archive" - - # Configuration option backup/retain_min. - # Minimum number of archives to keep. - retain_min = 10 - - # Configuration option backup/retain_days. - # Minimum number of days to keep archive files. - retain_days = 30 -} - -# Configuration section shell. -# Settings for running LVM in shell (readline) mode. -shell { - - # Configuration option shell/history_size. - # Number of lines of history to store in ~/.lvm_history. - history_size = 100 -} - -# Configuration section global. -# Miscellaneous global LVM settings. -global { - - # Configuration option global/umask. - # The file creation mask for any files and directories created. - # Interpreted as octal if the first digit is zero. - umask = 077 - - # Configuration option global/test. - # No on-disk metadata changes will be made in test mode. - # Equivalent to having the -t option on every command. - test = 0 - - # Configuration option global/units. - # Default value for --units argument. - units = "h" - - # Configuration option global/si_unit_consistency. - # Distinguish between powers of 1024 and 1000 bytes. - # The LVM commands distinguish between powers of 1024 bytes, - # e.g. KiB, MiB, GiB, and powers of 1000 bytes, e.g. KB, MB, GB. - # If scripts depend on the old behaviour, disable this setting - # temporarily until they are updated. - si_unit_consistency = 1 - - # Configuration option global/suffix. - # Display unit suffix for sizes. - # This setting has no effect if the units are in human-readable form - # (global/units = "h") in which case the suffix is always displayed. - suffix = 1 - - # Configuration option global/activation. - # Enable/disable communication with the kernel device-mapper. - # Disable to use the tools to manipulate LVM metadata without - # activating any logical volumes. If the device-mapper driver - # is not present in the kernel, disabling this should suppress - # the error messages. - activation = 1 - - # Configuration option global/fallback_to_lvm1. - # Try running LVM1 tools if LVM cannot communicate with DM. - # This option only applies to 2.4 kernels and is provided to help - # switch between device-mapper kernels and LVM1 kernels. The LVM1 - # tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1. - # They will stop working once the lvm2 on-disk metadata format is used. - # This configuration option has an automatic default value. - # fallback_to_lvm1 = 0 - - # Configuration option global/format. - # The default metadata format that commands should use. - # The -M 1|2 option overrides this setting. - # - # Accepted values: - # lvm1 - # lvm2 - # - # This configuration option has an automatic default value. - # format = "lvm2" - - # Configuration option global/format_libraries. - # Shared libraries that process different metadata formats. - # If support for LVM1 metadata was compiled as a shared library use - # format_libraries = "liblvm2format1.so" - # This configuration option does not have a default value defined. - - # Configuration option global/segment_libraries. - # This configuration option does not have a default value defined. - - # Configuration option global/proc. - # Location of proc filesystem. - # This configuration option is advanced. - proc = "/proc" - - # Configuration option global/etc. - # Location of /etc system configuration directory. - etc = "/etc" - - # Configuration option global/locking_type. - # Type of locking to use. - # - # Accepted values: - # 0 - # Turns off locking. Warning: this risks metadata corruption if - # commands run concurrently. - # 1 - # LVM uses local file-based locking, the standard mode. - # 2 - # LVM uses the external shared library locking_library. - # 3 - # LVM uses built-in clustered locking with clvmd. - # This is incompatible with lvmetad. If use_lvmetad is enabled, - # LVM prints a warning and disables lvmetad use. - # 4 - # LVM uses read-only locking which forbids any operations that - # might change metadata. - # 5 - # Offers dummy locking for tools that do not need any locks. - # You should not need to set this directly; the tools will select - # when to use it instead of the configured locking_type. - # Do not use lvmetad or the kernel device-mapper driver with this - # locking type. It is used by the --readonly option that offers - # read-only access to Volume Group metadata that cannot be locked - # safely because it belongs to an inaccessible domain and might be - # in use, for example a virtual machine image or a disk that is - # shared by a clustered machine. - # - locking_type = 1 - - # Configuration option global/wait_for_locks. - # When disabled, fail if a lock request would block. - wait_for_locks = 1 - - # Configuration option global/fallback_to_clustered_locking. - # Attempt to use built-in cluster locking if locking_type 2 fails. - # If using external locking (type 2) and initialisation fails, with - # this enabled, an attempt will be made to use the built-in clustered - # locking. Disable this if using a customised locking_library. - fallback_to_clustered_locking = 1 - - # Configuration option global/fallback_to_local_locking. - # Use locking_type 1 (local) if locking_type 2 or 3 fail. - # If an attempt to initialise type 2 or type 3 locking failed, perhaps - # because cluster components such as clvmd are not running, with this - # enabled, an attempt will be made to use local file-based locking - # (type 1). If this succeeds, only commands against local VGs will - # proceed. VGs marked as clustered will be ignored. - fallback_to_local_locking = 1 - - # Configuration option global/locking_dir. - # Directory to use for LVM command file locks. - # Local non-LV directory that holds file-based locks while commands are - # in progress. A directory like /tmp that may get wiped on reboot is OK. - locking_dir = "/run/lock/lvm" - - # Configuration option global/prioritise_write_locks. - # Allow quicker VG write access during high volume read access. - # When there are competing read-only and read-write access requests for - # a volume group's metadata, instead of always granting the read-only - # requests immediately, delay them to allow the read-write requests to - # be serviced. Without this setting, write access may be stalled by a - # high volume of read-only requests. This option only affects - # locking_type 1 viz. local file-based locking. - prioritise_write_locks = 1 - - # Configuration option global/library_dir. - # Search this directory first for shared libraries. - # This configuration option does not have a default value defined. - - # Configuration option global/locking_library. - # The external locking library to use for locking_type 2. - # This configuration option has an automatic default value. - # locking_library = "liblvm2clusterlock.so" - - # Configuration option global/abort_on_internal_errors. - # Abort a command that encounters an internal error. - # Treat any internal errors as fatal errors, aborting the process that - # encountered the internal error. Please only enable for debugging. - abort_on_internal_errors = 0 - - # Configuration option global/detect_internal_vg_cache_corruption. - # Internal verification of VG structures. - # Check if CRC matches when a parsed VG is used multiple times. This - # is useful to catch unexpected changes to cached VG structures. - # Please only enable for debugging. - detect_internal_vg_cache_corruption = 0 - - # Configuration option global/metadata_read_only. - # No operations that change on-disk metadata are permitted. - # Additionally, read-only commands that encounter metadata in need of - # repair will still be allowed to proceed exactly as if the repair had - # been performed (except for the unchanged vg_seqno). Inappropriate - # use could mess up your system, so seek advice first! - metadata_read_only = 0 - - # Configuration option global/mirror_segtype_default. - # The segment type used by the short mirroring option -m. - # The --type mirror|raid1 option overrides this setting. - # - # Accepted values: - # mirror - # The original RAID1 implementation from LVM/DM. It is - # characterized by a flexible log solution (core, disk, mirrored), - # and by the necessity to block I/O while handling a failure. - # There is an inherent race in the dmeventd failure handling logic - # with snapshots of devices using this type of RAID1 that in the - # worst case could cause a deadlock. (Also see - # devices/ignore_lvm_mirrors.) - # raid1 - # This is a newer RAID1 implementation using the MD RAID1 - # personality through device-mapper. It is characterized by a - # lack of log options. (A log is always allocated for every - # device and they are placed on the same device as the image, - # so no separate devices are required.) This mirror - # implementation does not require I/O to be blocked while - # handling a failure. This mirror implementation is not - # cluster-aware and cannot be used in a shared (active/active) - # fashion in a cluster. - # - mirror_segtype_default = "raid1" - - # Configuration option global/raid10_segtype_default. - # The segment type used by the -i -m combination. - # The --type raid10|mirror option overrides this setting. - # The --stripes/-i and --mirrors/-m options can both be specified - # during the creation of a logical volume to use both striping and - # mirroring for the LV. There are two different implementations. - # - # Accepted values: - # raid10 - # LVM uses MD's RAID10 personality through DM. This is the - # preferred option. - # mirror - # LVM layers the 'mirror' and 'stripe' segment types. The layering - # is done by creating a mirror LV on top of striped sub-LVs, - # effectively creating a RAID 0+1 array. The layering is suboptimal - # in terms of providing redundancy and performance. - # - raid10_segtype_default = "raid10" - - # Configuration option global/sparse_segtype_default. - # The segment type used by the -V -L combination. - # The --type snapshot|thin option overrides this setting. - # The combination of -V and -L options creates a sparse LV. There are - # two different implementations. - # - # Accepted values: - # snapshot - # The original snapshot implementation from LVM/DM. It uses an old - # snapshot that mixes data and metadata within a single COW - # storage volume and performs poorly when the size of stored data - # passes hundreds of MB. - # thin - # A newer implementation that uses thin provisioning. It has a - # bigger minimal chunk size (64KiB) and uses a separate volume for - # metadata. It has better performance, especially when more data - # is used. It also supports full snapshots. - # - sparse_segtype_default = "thin" - - # Configuration option global/lvdisplay_shows_full_device_path. - # Enable this to reinstate the previous lvdisplay name format. - # The default format for displaying LV names in lvdisplay was changed - # in version 2.02.89 to show the LV name and path separately. - # Previously this was always shown as /dev/vgname/lvname even when that - # was never a valid path in the /dev filesystem. - # This configuration option has an automatic default value. - # lvdisplay_shows_full_device_path = 0 - - # Configuration option global/use_lvmetad. - # Use lvmetad to cache metadata and reduce disk scanning. - # When enabled (and running), lvmetad provides LVM commands with VG - # metadata and PV state. LVM commands then avoid reading this - # information from disks which can be slow. When disabled (or not - # running), LVM commands fall back to scanning disks to obtain VG - # metadata. lvmetad is kept updated via udev rules which must be set - # up for LVM to work correctly. (The udev rules should be installed - # by default.) Without a proper udev setup, changes in the system's - # block device configuration will be unknown to LVM, and ignored - # until a manual 'pvscan --cache' is run. If lvmetad was running - # while use_lvmetad was disabled, it must be stopped, use_lvmetad - # enabled, and then started. When using lvmetad, LV activation is - # switched to an automatic, event-based mode. In this mode, LVs are - # activated based on incoming udev events that inform lvmetad when - # PVs appear on the system. When a VG is complete (all PVs present), - # it is auto-activated. The auto_activation_volume_list setting - # controls which LVs are auto-activated (all by default.) - # When lvmetad is updated (automatically by udev events, or directly - # by pvscan --cache), devices/filter is ignored and all devices are - # scanned by default. lvmetad always keeps unfiltered information - # which is provided to LVM commands. Each LVM command then filters - # based on devices/filter. This does not apply to other, non-regexp, - # filtering settings: component filters such as multipath and MD - # are checked during pvscan --cache. To filter a device and prevent - # scanning from the LVM system entirely, including lvmetad, use - # devices/global_filter. - use_lvmetad = 1 - - # Configuration option global/lvmetad_update_wait_time. - # The number of seconds a command will wait for lvmetad update to finish. - # After waiting for this period, a command will not use lvmetad, and - # will revert to disk scanning. - # This configuration option has an automatic default value. - # lvmetad_update_wait_time = 10 - - # Configuration option global/use_lvmlockd. - # Use lvmlockd for locking among hosts using LVM on shared storage. - # Applicable only if LVM is compiled with lockd support in which - # case there is also lvmlockd(8) man page available for more - # information. - use_lvmlockd = 0 - - # Configuration option global/lvmlockd_lock_retries. - # Retry lvmlockd lock requests this many times. - # Applicable only if LVM is compiled with lockd support - # This configuration option has an automatic default value. - # lvmlockd_lock_retries = 3 - - # Configuration option global/sanlock_lv_extend. - # Size in MiB to extend the internal LV holding sanlock locks. - # The internal LV holds locks for each LV in the VG, and after enough - # LVs have been created, the internal LV needs to be extended. lvcreate - # will automatically extend the internal LV when needed by the amount - # specified here. Setting this to 0 disables the automatic extension - # and can cause lvcreate to fail. Applicable only if LVM is compiled - # with lockd support - # This configuration option has an automatic default value. - # sanlock_lv_extend = 256 - - # Configuration option global/thin_check_executable. - # The full path to the thin_check command. - # LVM uses this command to check that a thin metadata device is in a - # usable state. When a thin pool is activated and after it is - # deactivated, this command is run. Activation will only proceed if - # the command has an exit status of 0. Set to "" to skip this check. - # (Not recommended.) Also see thin_check_options. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # thin_check_executable = "/usr/sbin/thin_check" - - # Configuration option global/thin_dump_executable. - # The full path to the thin_dump command. - # LVM uses this command to dump thin pool metadata. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # thin_dump_executable = "/usr/sbin/thin_dump" - - # Configuration option global/thin_repair_executable. - # The full path to the thin_repair command. - # LVM uses this command to repair a thin metadata device if it is in - # an unusable state. Also see thin_repair_options. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # thin_repair_executable = "/usr/sbin/thin_repair" - - # Configuration option global/thin_check_options. - # List of options passed to the thin_check command. - # With thin_check version 2.1 or newer you can add the option - # --ignore-non-fatal-errors to let it pass through ignorable errors - # and fix them later. With thin_check version 3.2 or newer you should - # include the option --clear-needs-check-flag. - # This configuration option has an automatic default value. - # thin_check_options = [ "-q", "--clear-needs-check-flag" ] - - # Configuration option global/thin_repair_options. - # List of options passed to the thin_repair command. - # This configuration option has an automatic default value. - # thin_repair_options = [ "" ] - - # Configuration option global/thin_disabled_features. - # Features to not use in the thin driver. - # This can be helpful for testing, or to avoid using a feature that is - # causing problems. Features include: block_size, discards, - # discards_non_power_2, external_origin, metadata_resize, - # external_origin_extend, error_if_no_space. - # - # Example - # thin_disabled_features = [ "discards", "block_size" ] - # - # This configuration option does not have a default value defined. - - # Configuration option global/cache_disabled_features. - # Features to not use in the cache driver. - # This can be helpful for testing, or to avoid using a feature that is - # causing problems. Features include: policy_mq, policy_smq. - # - # Example - # cache_disabled_features = [ "policy_smq" ] - # - # This configuration option does not have a default value defined. - - # Configuration option global/cache_check_executable. - # The full path to the cache_check command. - # LVM uses this command to check that a cache metadata device is in a - # usable state. When a cached LV is activated and after it is - # deactivated, this command is run. Activation will only proceed if the - # command has an exit status of 0. Set to "" to skip this check. - # (Not recommended.) Also see cache_check_options. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # cache_check_executable = "/usr/sbin/cache_check" - - # Configuration option global/cache_dump_executable. - # The full path to the cache_dump command. - # LVM uses this command to dump cache pool metadata. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # cache_dump_executable = "/usr/sbin/cache_dump" - - # Configuration option global/cache_repair_executable. - # The full path to the cache_repair command. - # LVM uses this command to repair a cache metadata device if it is in - # an unusable state. Also see cache_repair_options. - # (See package device-mapper-persistent-data or thin-provisioning-tools) - # This configuration option has an automatic default value. - # cache_repair_executable = "/usr/sbin/cache_repair" - - # Configuration option global/cache_check_options. - # List of options passed to the cache_check command. - # With cache_check version 5.0 or newer you should include the option - # --clear-needs-check-flag. - # This configuration option has an automatic default value. - # cache_check_options = [ "-q", "--clear-needs-check-flag" ] - - # Configuration option global/cache_repair_options. - # List of options passed to the cache_repair command. - # This configuration option has an automatic default value. - # cache_repair_options = [ "" ] - - # Configuration option global/system_id_source. - # The method LVM uses to set the local system ID. - # Volume Groups can also be given a system ID (by vgcreate, vgchange, - # or vgimport.) A VG on shared storage devices is accessible only to - # the host with a matching system ID. See 'man lvmsystemid' for - # information on limitations and correct usage. - # - # Accepted values: - # none - # The host has no system ID. - # lvmlocal - # Obtain the system ID from the system_id setting in the 'local' - # section of an lvm configuration file, e.g. lvmlocal.conf. - # uname - # Set the system ID from the hostname (uname) of the system. - # System IDs beginning localhost are not permitted. - # machineid - # Use the contents of the machine-id file to set the system ID. - # Some systems create this file at installation time. - # See 'man machine-id' and global/etc. - # file - # Use the contents of another file (system_id_file) to set the - # system ID. - # - system_id_source = "none" - - # Configuration option global/system_id_file. - # The full path to the file containing a system ID. - # This is used when system_id_source is set to 'file'. - # Comments starting with the character # are ignored. - # This configuration option does not have a default value defined. - - # Configuration option global/use_lvmpolld. - # Use lvmpolld to supervise long running LVM commands. - # When enabled, control of long running LVM commands is transferred - # from the original LVM command to the lvmpolld daemon. This allows - # the operation to continue independent of the original LVM command. - # After lvmpolld takes over, the LVM command displays the progress - # of the ongoing operation. lvmpolld itself runs LVM commands to - # manage the progress of ongoing operations. lvmpolld can be used as - # a native systemd service, which allows it to be started on demand, - # and to use its own control group. When this option is disabled, LVM - # commands will supervise long running operations by forking themselves. - # Applicable only if LVM is compiled with lvmpolld support. - use_lvmpolld = 1 - - # Configuration option global/notify_dbus. - # Enable D-Bus notification from LVM commands. - # When enabled, an LVM command that changes PVs, changes VG metadata, - # or changes the activation state of an LV will send a notification. - notify_dbus = 1 -} - -# Configuration section activation. -activation { - - # Configuration option activation/checks. - # Perform internal checks of libdevmapper operations. - # Useful for debugging problems with activation. Some of the checks may - # be expensive, so it's best to use this only when there seems to be a - # problem. - checks = 0 - - # Configuration option activation/udev_sync. - # Use udev notifications to synchronize udev and LVM. - # The --nodevsync option overrides this setting. - # When disabled, LVM commands will not wait for notifications from - # udev, but continue irrespective of any possible udev processing in - # the background. Only use this if udev is not running or has rules - # that ignore the devices LVM creates. If enabled when udev is not - # running, and LVM processes are waiting for udev, run the command - # 'dmsetup udevcomplete_all' to wake them up. - udev_sync = 0 - - # Configuration option activation/udev_rules. - # Use udev rules to manage LV device nodes and symlinks. - # When disabled, LVM will manage the device nodes and symlinks for - # active LVs itself. Manual intervention may be required if this - # setting is changed while LVs are active. - udev_rules = 0 - - # Configuration option activation/verify_udev_operations. - # Use extra checks in LVM to verify udev operations. - # This enables additional checks (and if necessary, repairs) on entries - # in the device directory after udev has completed processing its - # events. Useful for diagnosing problems with LVM/udev interactions. - verify_udev_operations = 0 - - # Configuration option activation/retry_deactivation. - # Retry failed LV deactivation. - # If LV deactivation fails, LVM will retry for a few seconds before - # failing. This may happen because a process run from a quick udev rule - # temporarily opened the device. - retry_deactivation = 1 - - # Configuration option activation/missing_stripe_filler. - # Method to fill missing stripes when activating an incomplete LV. - # Using 'error' will make inaccessible parts of the device return I/O - # errors on access. You can instead use a device path, in which case, - # that device will be used in place of missing stripes. Using anything - # other than 'error' with mirrored or snapshotted volumes is likely to - # result in data corruption. - # This configuration option is advanced. - missing_stripe_filler = "error" - - # Configuration option activation/use_linear_target. - # Use the linear target to optimize single stripe LVs. - # When disabled, the striped target is used. The linear target is an - # optimised version of the striped target that only handles a single - # stripe. - use_linear_target = 1 - - # Configuration option activation/reserved_stack. - # Stack size in KiB to reserve for use while devices are suspended. - # Insufficent reserve risks I/O deadlock during device suspension. - reserved_stack = 64 - - # Configuration option activation/reserved_memory. - # Memory size in KiB to reserve for use while devices are suspended. - # Insufficent reserve risks I/O deadlock during device suspension. - reserved_memory = 8192 - - # Configuration option activation/process_priority. - # Nice value used while devices are suspended. - # Use a high priority so that LVs are suspended - # for the shortest possible time. - process_priority = -18 - - # Configuration option activation/volume_list. - # Only LVs selected by this list are activated. - # If this list is defined, an LV is only activated if it matches an - # entry in this list. If this list is undefined, it imposes no limits - # on LV activation (all are allowed). - # - # Accepted values: - # vgname - # The VG name is matched exactly and selects all LVs in the VG. - # vgname/lvname - # The VG name and LV name are matched exactly and selects the LV. - # @tag - # Selects an LV if the specified tag matches a tag set on the LV - # or VG. - # @* - # Selects an LV if a tag defined on the host is also set on the LV - # or VG. See tags/hosttags. If any host tags exist but volume_list - # is not defined, a default single-entry list containing '@*' - # is assumed. - # - # Example - # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] - # - # This configuration option does not have a default value defined. - - # Configuration option activation/auto_activation_volume_list. - # Only LVs selected by this list are auto-activated. - # This list works like volume_list, but it is used only by - # auto-activation commands. It does not apply to direct activation - # commands. If this list is defined, an LV is only auto-activated - # if it matches an entry in this list. If this list is undefined, it - # imposes no limits on LV auto-activation (all are allowed.) If this - # list is defined and empty, i.e. "[]", then no LVs are selected for - # auto-activation. An LV that is selected by this list for - # auto-activation, must also be selected by volume_list (if defined) - # before it is activated. Auto-activation is an activation command that - # includes the 'a' argument: --activate ay or -a ay. The 'a' (auto) - # argument for auto-activation is meant to be used by activation - # commands that are run automatically by the system, as opposed to LVM - # commands run directly by a user. A user may also use the 'a' flag - # directly to perform auto-activation. Also see pvscan(8) for more - # information about auto-activation. - # - # Accepted values: - # vgname - # The VG name is matched exactly and selects all LVs in the VG. - # vgname/lvname - # The VG name and LV name are matched exactly and selects the LV. - # @tag - # Selects an LV if the specified tag matches a tag set on the LV - # or VG. - # @* - # Selects an LV if a tag defined on the host is also set on the LV - # or VG. See tags/hosttags. If any host tags exist but volume_list - # is not defined, a default single-entry list containing '@*' - # is assumed. - # - # Example - # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] - # - # This configuration option does not have a default value defined. - - # Configuration option activation/read_only_volume_list. - # LVs in this list are activated in read-only mode. - # If this list is defined, each LV that is to be activated is checked - # against this list, and if it matches, it is activated in read-only - # mode. This overrides the permission setting stored in the metadata, - # e.g. from --permission rw. - # - # Accepted values: - # vgname - # The VG name is matched exactly and selects all LVs in the VG. - # vgname/lvname - # The VG name and LV name are matched exactly and selects the LV. - # @tag - # Selects an LV if the specified tag matches a tag set on the LV - # or VG. - # @* - # Selects an LV if a tag defined on the host is also set on the LV - # or VG. See tags/hosttags. If any host tags exist but volume_list - # is not defined, a default single-entry list containing '@*' - # is assumed. - # - # Example - # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ] - # - # This configuration option does not have a default value defined. - - # Configuration option activation/raid_region_size. - # Size in KiB of each raid or mirror synchronization region. - # For raid or mirror segment types, this is the amount of data that is - # copied at once when initializing, or moved at once by pvmove. - raid_region_size = 512 - - # Configuration option activation/error_when_full. - # Return errors if a thin pool runs out of space. - # The --errorwhenfull option overrides this setting. - # When enabled, writes to thin LVs immediately return an error if the - # thin pool is out of data space. When disabled, writes to thin LVs - # are queued if the thin pool is out of space, and processed when the - # thin pool data space is extended. New thin pools are assigned the - # behavior defined here. - # This configuration option has an automatic default value. - # error_when_full = 0 - - # Configuration option activation/readahead. - # Setting to use when there is no readahead setting in metadata. - # - # Accepted values: - # none - # Disable readahead. - # auto - # Use default value chosen by kernel. - # - readahead = "auto" - - # Configuration option activation/raid_fault_policy. - # Defines how a device failure in a RAID LV is handled. - # This includes LVs that have the following segment types: - # raid1, raid4, raid5*, and raid6*. - # If a device in the LV fails, the policy determines the steps - # performed by dmeventd automatically, and the steps perfomed by the - # manual command lvconvert --repair --use-policies. - # Automatic handling requires dmeventd to be monitoring the LV. - # - # Accepted values: - # warn - # Use the system log to warn the user that a device in the RAID LV - # has failed. It is left to the user to run lvconvert --repair - # manually to remove or replace the failed device. As long as the - # number of failed devices does not exceed the redundancy of the LV - # (1 device for raid4/5, 2 for raid6), the LV will remain usable. - # allocate - # Attempt to use any extra physical volumes in the VG as spares and - # replace faulty devices. - # - raid_fault_policy = "warn" - - # Configuration option activation/mirror_image_fault_policy. - # Defines how a device failure in a 'mirror' LV is handled. - # An LV with the 'mirror' segment type is composed of mirror images - # (copies) and a mirror log. A disk log ensures that a mirror LV does - # not need to be re-synced (all copies made the same) every time a - # machine reboots or crashes. If a device in the LV fails, this policy - # determines the steps perfomed by dmeventd automatically, and the steps - # performed by the manual command lvconvert --repair --use-policies. - # Automatic handling requires dmeventd to be monitoring the LV. - # - # Accepted values: - # remove - # Simply remove the faulty device and run without it. If the log - # device fails, the mirror would convert to using an in-memory log. - # This means the mirror will not remember its sync status across - # crashes/reboots and the entire mirror will be re-synced. If a - # mirror image fails, the mirror will convert to a non-mirrored - # device if there is only one remaining good copy. - # allocate - # Remove the faulty device and try to allocate space on a new - # device to be a replacement for the failed device. Using this - # policy for the log is fast and maintains the ability to remember - # sync state through crashes/reboots. Using this policy for a - # mirror device is slow, as it requires the mirror to resynchronize - # the devices, but it will preserve the mirror characteristic of - # the device. This policy acts like 'remove' if no suitable device - # and space can be allocated for the replacement. - # allocate_anywhere - # Not yet implemented. Useful to place the log device temporarily - # on the same physical volume as one of the mirror images. This - # policy is not recommended for mirror devices since it would break - # the redundant nature of the mirror. This policy acts like - # 'remove' if no suitable device and space can be allocated for the - # replacement. - # - mirror_image_fault_policy = "remove" - - # Configuration option activation/mirror_log_fault_policy. - # Defines how a device failure in a 'mirror' log LV is handled. - # The mirror_image_fault_policy description for mirrored LVs also - # applies to mirrored log LVs. - mirror_log_fault_policy = "allocate" - - # Configuration option activation/snapshot_autoextend_threshold. - # Auto-extend a snapshot when its usage exceeds this percent. - # Setting this to 100 disables automatic extension. - # The minimum value is 50 (a smaller value is treated as 50.) - # Also see snapshot_autoextend_percent. - # Automatic extension requires dmeventd to be monitoring the LV. - # - # Example - # Using 70% autoextend threshold and 20% autoextend size, when a 1G - # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds - # 840M, it is extended to 1.44G: - # snapshot_autoextend_threshold = 70 - # - snapshot_autoextend_threshold = 100 - - # Configuration option activation/snapshot_autoextend_percent. - # Auto-extending a snapshot adds this percent extra space. - # The amount of additional space added to a snapshot is this - # percent of its current size. - # - # Example - # Using 70% autoextend threshold and 20% autoextend size, when a 1G - # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds - # 840M, it is extended to 1.44G: - # snapshot_autoextend_percent = 20 - # - snapshot_autoextend_percent = 20 - - # Configuration option activation/thin_pool_autoextend_threshold. - # Auto-extend a thin pool when its usage exceeds this percent. - # Setting this to 100 disables automatic extension. - # The minimum value is 50 (a smaller value is treated as 50.) - # Also see thin_pool_autoextend_percent. - # Automatic extension requires dmeventd to be monitoring the LV. - # - # Example - # Using 70% autoextend threshold and 20% autoextend size, when a 1G - # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds - # 840M, it is extended to 1.44G: - # thin_pool_autoextend_threshold = 70 - # - thin_pool_autoextend_threshold = 100 - - # Configuration option activation/thin_pool_autoextend_percent. - # Auto-extending a thin pool adds this percent extra space. - # The amount of additional space added to a thin pool is this - # percent of its current size. - # - # Example - # Using 70% autoextend threshold and 20% autoextend size, when a 1G - # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds - # 840M, it is extended to 1.44G: - # thin_pool_autoextend_percent = 20 - # - thin_pool_autoextend_percent = 20 - - # Configuration option activation/mlock_filter. - # Do not mlock these memory areas. - # While activating devices, I/O to devices being (re)configured is - # suspended. As a precaution against deadlocks, LVM pins memory it is - # using so it is not paged out, and will not require I/O to reread. - # Groups of pages that are known not to be accessed during activation - # do not need to be pinned into memory. Each string listed in this - # setting is compared against each line in /proc/self/maps, and the - # pages corresponding to lines that match are not pinned. On some - # systems, locale-archive was found to make up over 80% of the memory - # used by the process. - # - # Example - # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ] - # - # This configuration option is advanced. - # This configuration option does not have a default value defined. - - # Configuration option activation/use_mlockall. - # Use the old behavior of mlockall to pin all memory. - # Prior to version 2.02.62, LVM used mlockall() to pin the whole - # process's memory while activating devices. - use_mlockall = 0 - - # Configuration option activation/monitoring. - # Monitor LVs that are activated. - # The --ignoremonitoring option overrides this setting. - # When enabled, LVM will ask dmeventd to monitor activated LVs. - monitoring = 1 - - # Configuration option activation/polling_interval. - # Check pvmove or lvconvert progress at this interval (seconds). - # When pvmove or lvconvert must wait for the kernel to finish - # synchronising or merging data, they check and report progress at - # intervals of this number of seconds. If this is set to 0 and there - # is only one thing to wait for, there are no progress reports, but - # the process is awoken immediately once the operation is complete. - polling_interval = 15 - - # Configuration option activation/auto_set_activation_skip. - # Set the activation skip flag on new thin snapshot LVs. - # The --setactivationskip option overrides this setting. - # An LV can have a persistent 'activation skip' flag. The flag causes - # the LV to be skipped during normal activation. The lvchange/vgchange - # -K option is required to activate LVs that have the activation skip - # flag set. When this setting is enabled, the activation skip flag is - # set on new thin snapshot LVs. - # This configuration option has an automatic default value. - # auto_set_activation_skip = 1 - - # Configuration option activation/activation_mode. - # How LVs with missing devices are activated. - # The --activationmode option overrides this setting. - # - # Accepted values: - # complete - # Only allow activation of an LV if all of the Physical Volumes it - # uses are present. Other PVs in the Volume Group may be missing. - # degraded - # Like complete, but additionally RAID LVs of segment type raid1, - # raid4, raid5, radid6 and raid10 will be activated if there is no - # data loss, i.e. they have sufficient redundancy to present the - # entire addressable range of the Logical Volume. - # partial - # Allows the activation of any LV even if a missing or failed PV - # could cause data loss with a portion of the LV inaccessible. - # This setting should not normally be used, but may sometimes - # assist with data recovery. - # - activation_mode = "degraded" - - # Configuration option activation/lock_start_list. - # Locking is started only for VGs selected by this list. - # The rules are the same as those for volume_list. - # This configuration option does not have a default value defined. - - # Configuration option activation/auto_lock_start_list. - # Locking is auto-started only for VGs selected by this list. - # The rules are the same as those for auto_activation_volume_list. - # This configuration option does not have a default value defined. -} - -# Configuration section metadata. -# This configuration section has an automatic default value. -# metadata { - - # Configuration option metadata/check_pv_device_sizes. - # Check device sizes are not smaller than corresponding PV sizes. - # If device size is less than corresponding PV size found in metadata, - # there is always a risk of data loss. If this option is set, then LVM - # issues a warning message each time it finds that the device size is - # less than corresponding PV size. You should not disable this unless - # you are absolutely sure about what you are doing! - # This configuration option is advanced. - # This configuration option has an automatic default value. - # check_pv_device_sizes = 1 - - # Configuration option metadata/record_lvs_history. - # When enabled, LVM keeps history records about removed LVs in - # metadata. The information that is recorded in metadata for - # historical LVs is reduced when compared to original - # information kept in metadata for live LVs. Currently, this - # feature is supported for thin and thin snapshot LVs only. - # This configuration option has an automatic default value. - # record_lvs_history = 0 - - # Configuration option metadata/lvs_history_retention_time. - # Retention time in seconds after which a record about individual - # historical logical volume is automatically destroyed. - # A value of 0 disables this feature. - # This configuration option has an automatic default value. - # lvs_history_retention_time = 0 - - # Configuration option metadata/pvmetadatacopies. - # Number of copies of metadata to store on each PV. - # The --pvmetadatacopies option overrides this setting. - # - # Accepted values: - # 2 - # Two copies of the VG metadata are stored on the PV, one at the - # front of the PV, and one at the end. - # 1 - # One copy of VG metadata is stored at the front of the PV. - # 0 - # No copies of VG metadata are stored on the PV. This may be - # useful for VGs containing large numbers of PVs. - # - # This configuration option is advanced. - # This configuration option has an automatic default value. - # pvmetadatacopies = 1 - - # Configuration option metadata/vgmetadatacopies. - # Number of copies of metadata to maintain for each VG. - # The --vgmetadatacopies option overrides this setting. - # If set to a non-zero value, LVM automatically chooses which of the - # available metadata areas to use to achieve the requested number of - # copies of the VG metadata. If you set a value larger than the the - # total number of metadata areas available, then metadata is stored in - # them all. The value 0 (unmanaged) disables this automatic management - # and allows you to control which metadata areas are used at the - # individual PV level using pvchange --metadataignore y|n. - # This configuration option has an automatic default value. - # vgmetadatacopies = 0 - - # Configuration option metadata/pvmetadatasize. - # Approximate number of sectors to use for each metadata copy. - # VGs with large numbers of PVs or LVs, or VGs containing complex LV - # structures, may need additional space for VG metadata. The metadata - # areas are treated as circular buffers, so unused space becomes filled - # with an archive of the most recent previous versions of the metadata. - # This configuration option has an automatic default value. - # pvmetadatasize = 255 - - # Configuration option metadata/pvmetadataignore. - # Ignore metadata areas on a new PV. - # The --metadataignore option overrides this setting. - # If metadata areas on a PV are ignored, LVM will not store metadata - # in them. - # This configuration option is advanced. - # This configuration option has an automatic default value. - # pvmetadataignore = 0 - - # Configuration option metadata/stripesize. - # This configuration option is advanced. - # This configuration option has an automatic default value. - # stripesize = 64 - - # Configuration option metadata/dirs. - # Directories holding live copies of text format metadata. - # These directories must not be on logical volumes! - # It's possible to use LVM with a couple of directories here, - # preferably on different (non-LV) filesystems, and with no other - # on-disk metadata (pvmetadatacopies = 0). Or this can be in addition - # to on-disk metadata areas. The feature was originally added to - # simplify testing and is not supported under low memory situations - - # the machine could lock up. Never edit any files in these directories - # by hand unless you are absolutely sure you know what you are doing! - # Use the supplied toolset to make changes (e.g. vgcfgrestore). - # - # Example - # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ] - # - # This configuration option is advanced. - # This configuration option does not have a default value defined. -# } - -# Configuration section report. -# LVM report command output formatting. -# This configuration section has an automatic default value. -# report { - - # Configuration option report/output_format. - # Format of LVM command's report output. - # If there is more than one report per command, then the format - # is applied for all reports. You can also change output format - # directly on command line using --reportformat option which - # has precedence over log/output_format setting. - # Accepted values: - # basic - # Original format with columns and rows. If there is more than - # one report per command, each report is prefixed with report's - # name for identification. - # json - # JSON format. - # This configuration option has an automatic default value. - # output_format = "basic" - - # Configuration option report/compact_output. - # Do not print empty values for all report fields. - # If enabled, all fields that don't have a value set for any of the - # rows reported are skipped and not printed. Compact output is - # applicable only if report/buffered is enabled. If you need to - # compact only specified fields, use compact_output=0 and define - # report/compact_output_cols configuration setting instead. - # This configuration option has an automatic default value. - # compact_output = 0 - - # Configuration option report/compact_output_cols. - # Do not print empty values for specified report fields. - # If defined, specified fields that don't have a value set for any - # of the rows reported are skipped and not printed. Compact output - # is applicable only if report/buffered is enabled. If you need to - # compact all fields, use compact_output=1 instead in which case - # the compact_output_cols setting is then ignored. - # This configuration option has an automatic default value. - # compact_output_cols = "" - - # Configuration option report/aligned. - # Align columns in report output. - # This configuration option has an automatic default value. - # aligned = 1 - - # Configuration option report/buffered. - # Buffer report output. - # When buffered reporting is used, the report's content is appended - # incrementally to include each object being reported until the report - # is flushed to output which normally happens at the end of command - # execution. Otherwise, if buffering is not used, each object is - # reported as soon as its processing is finished. - # This configuration option has an automatic default value. - # buffered = 1 - - # Configuration option report/headings. - # Show headings for columns on report. - # This configuration option has an automatic default value. - # headings = 1 - - # Configuration option report/separator. - # A separator to use on report after each field. - # This configuration option has an automatic default value. - # separator = " " - - # Configuration option report/list_item_separator. - # A separator to use for list items when reported. - # This configuration option has an automatic default value. - # list_item_separator = "," - - # Configuration option report/prefixes. - # Use a field name prefix for each field reported. - # This configuration option has an automatic default value. - # prefixes = 0 - - # Configuration option report/quoted. - # Quote field values when using field name prefixes. - # This configuration option has an automatic default value. - # quoted = 1 - - # Configuration option report/columns_as_rows. - # Output each column as a row. - # If set, this also implies report/prefixes=1. - # This configuration option has an automatic default value. - # columns_as_rows = 0 - - # Configuration option report/binary_values_as_numeric. - # Use binary values 0 or 1 instead of descriptive literal values. - # For columns that have exactly two valid values to report - # (not counting the 'unknown' value which denotes that the - # value could not be determined). - # This configuration option has an automatic default value. - # binary_values_as_numeric = 0 - - # Configuration option report/time_format. - # Set time format for fields reporting time values. - # Format specification is a string which may contain special character - # sequences and ordinary character sequences. Ordinary character - # sequences are copied verbatim. Each special character sequence is - # introduced by the '%' character and such sequence is then - # substituted with a value as described below. - # - # Accepted values: - # %a - # The abbreviated name of the day of the week according to the - # current locale. - # %A - # The full name of the day of the week according to the current - # locale. - # %b - # The abbreviated month name according to the current locale. - # %B - # The full month name according to the current locale. - # %c - # The preferred date and time representation for the current - # locale (alt E) - # %C - # The century number (year/100) as a 2-digit integer. (alt E) - # %d - # The day of the month as a decimal number (range 01 to 31). - # (alt O) - # %D - # Equivalent to %m/%d/%y. (For Americans only. Americans should - # note that in other countries%d/%m/%y is rather common. This - # means that in international context this format is ambiguous and - # should not be used. - # %e - # Like %d, the day of the month as a decimal number, but a leading - # zero is replaced by a space. (alt O) - # %E - # Modifier: use alternative local-dependent representation if - # available. - # %F - # Equivalent to %Y-%m-%d (the ISO 8601 date format). - # %G - # The ISO 8601 week-based year with century as adecimal number. - # The 4-digit year corresponding to the ISO week number (see %V). - # This has the same format and value as %Y, except that if the - # ISO week number belongs to the previous or next year, that year - # is used instead. - # %g - # Like %G, but without century, that is, with a 2-digit year - # (00-99). - # %h - # Equivalent to %b. - # %H - # The hour as a decimal number using a 24-hour clock - # (range 00 to 23). (alt O) - # %I - # The hour as a decimal number using a 12-hour clock - # (range 01 to 12). (alt O) - # %j - # The day of the year as a decimal number (range 001 to 366). - # %k - # The hour (24-hour clock) as a decimal number (range 0 to 23); - # single digits are preceded by a blank. (See also %H.) - # %l - # The hour (12-hour clock) as a decimal number (range 1 to 12); - # single digits are preceded by a blank. (See also %I.) - # %m - # The month as a decimal number (range 01 to 12). (alt O) - # %M - # The minute as a decimal number (range 00 to 59). (alt O) - # %O - # Modifier: use alternative numeric symbols. - # %p - # Either "AM" or "PM" according to the given time value, - # or the corresponding strings for the current locale. Noon is - # treated as "PM" and midnight as "AM". - # %P - # Like %p but in lowercase: "am" or "pm" or a corresponding - # string for the current locale. - # %r - # The time in a.m. or p.m. notation. In the POSIX locale this is - # equivalent to %I:%M:%S %p. - # %R - # The time in 24-hour notation (%H:%M). For a version including - # the seconds, see %T below. - # %s - # The number of seconds since the Epoch, - # 1970-01-01 00:00:00 +0000 (UTC) - # %S - # The second as a decimal number (range 00 to 60). (The range is - # up to 60 to allow for occasional leap seconds.) (alt O) - # %t - # A tab character. - # %T - # The time in 24-hour notation (%H:%M:%S). - # %u - # The day of the week as a decimal, range 1 to 7, Monday being 1. - # See also %w. (alt O) - # %U - # The week number of the current year as a decimal number, - # range 00 to 53, starting with the first Sunday as the first - # day of week 01. See also %V and %W. (alt O) - # %V - # The ISO 8601 week number of the current year as a decimal number, - # range 01 to 53, where week 1 is the first week that has at least - # 4 days in the new year. See also %U and %W. (alt O) - # %w - # The day of the week as a decimal, range 0 to 6, Sunday being 0. - # See also %u. (alt O) - # %W - # The week number of the current year as a decimal number, - # range 00 to 53, starting with the first Monday as the first day - # of week 01. (alt O) - # %x - # The preferred date representation for the current locale without - # the time. (alt E) - # %X - # The preferred time representation for the current locale without - # the date. (alt E) - # %y - # The year as a decimal number without a century (range 00 to 99). - # (alt E, alt O) - # %Y - # The year as a decimal number including the century. (alt E) - # %z - # The +hhmm or -hhmm numeric timezone (that is, the hour and minute - # offset from UTC). - # %Z - # The timezone name or abbreviation. - # %% - # A literal '%' character. - # - # This configuration option has an automatic default value. - # time_format = "%Y-%m-%d %T %z" - - # Configuration option report/devtypes_sort. - # List of columns to sort by when reporting 'lvm devtypes' command. - # See 'lvm devtypes -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # devtypes_sort = "devtype_name" - - # Configuration option report/devtypes_cols. - # List of columns to report for 'lvm devtypes' command. - # See 'lvm devtypes -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # devtypes_cols = "devtype_name,devtype_max_partitions,devtype_description" - - # Configuration option report/devtypes_cols_verbose. - # List of columns to report for 'lvm devtypes' command in verbose mode. - # See 'lvm devtypes -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # devtypes_cols_verbose = "devtype_name,devtype_max_partitions,devtype_description" - - # Configuration option report/lvs_sort. - # List of columns to sort by when reporting 'lvs' command. - # See 'lvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # lvs_sort = "vg_name,lv_name" - - # Configuration option report/lvs_cols. - # List of columns to report for 'lvs' command. - # See 'lvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # lvs_cols = "lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv" - - # Configuration option report/lvs_cols_verbose. - # List of columns to report for 'lvs' command in verbose mode. - # See 'lvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # lvs_cols_verbose = "lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert_lv,lv_uuid,lv_profile" - - # Configuration option report/vgs_sort. - # List of columns to sort by when reporting 'vgs' command. - # See 'vgs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # vgs_sort = "vg_name" - - # Configuration option report/vgs_cols. - # List of columns to report for 'vgs' command. - # See 'vgs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # vgs_cols = "vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free" - - # Configuration option report/vgs_cols_verbose. - # List of columns to report for 'vgs' command in verbose mode. - # See 'vgs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # vgs_cols_verbose = "vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile" - - # Configuration option report/pvs_sort. - # List of columns to sort by when reporting 'pvs' command. - # See 'pvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvs_sort = "pv_name" - - # Configuration option report/pvs_cols. - # List of columns to report for 'pvs' command. - # See 'pvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free" - - # Configuration option report/pvs_cols_verbose. - # List of columns to report for 'pvs' command in verbose mode. - # See 'pvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid" - - # Configuration option report/segs_sort. - # List of columns to sort by when reporting 'lvs --segments' command. - # See 'lvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # segs_sort = "vg_name,lv_name,seg_start" - - # Configuration option report/segs_cols. - # List of columns to report for 'lvs --segments' command. - # See 'lvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # segs_cols = "lv_name,vg_name,lv_attr,stripes,segtype,seg_size" - - # Configuration option report/segs_cols_verbose. - # List of columns to report for 'lvs --segments' command in verbose mode. - # See 'lvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # segs_cols_verbose = "lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize" - - # Configuration option report/pvsegs_sort. - # List of columns to sort by when reporting 'pvs --segments' command. - # See 'pvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvsegs_sort = "pv_name,pvseg_start" - - # Configuration option report/pvsegs_cols. - # List of columns to sort by when reporting 'pvs --segments' command. - # See 'pvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvsegs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size" - - # Configuration option report/pvsegs_cols_verbose. - # List of columns to sort by when reporting 'pvs --segments' command in verbose mode. - # See 'pvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvsegs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges" - - # Configuration option report/vgs_cols_full. - # List of columns to report for lvm fullreport's 'vgs' subreport. - # See 'vgs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # vgs_cols_full = "vg_all" - - # Configuration option report/pvs_cols_full. - # List of columns to report for lvm fullreport's 'vgs' subreport. - # See 'pvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvs_cols_full = "pv_all" - - # Configuration option report/lvs_cols_full. - # List of columns to report for lvm fullreport's 'lvs' subreport. - # See 'lvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # lvs_cols_full = "lv_all" - - # Configuration option report/pvsegs_cols_full. - # List of columns to report for lvm fullreport's 'pvseg' subreport. - # See 'pvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvsegs_cols_full = "pvseg_all,pv_uuid,lv_uuid" - - # Configuration option report/segs_cols_full. - # List of columns to report for lvm fullreport's 'seg' subreport. - # See 'lvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # segs_cols_full = "seg_all,lv_uuid" - - # Configuration option report/vgs_sort_full. - # List of columns to sort by when reporting lvm fullreport's 'vgs' subreport. - # See 'vgs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # vgs_sort_full = "vg_name" - - # Configuration option report/pvs_sort_full. - # List of columns to sort by when reporting lvm fullreport's 'vgs' subreport. - # See 'pvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvs_sort_full = "pv_name" - - # Configuration option report/lvs_sort_full. - # List of columns to sort by when reporting lvm fullreport's 'lvs' subreport. - # See 'lvs -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # lvs_sort_full = "vg_name,lv_name" - - # Configuration option report/pvsegs_sort_full. - # List of columns to sort by when reporting for lvm fullreport's 'pvseg' subreport. - # See 'pvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # pvsegs_sort_full = "pv_uuid,pvseg_start" - - # Configuration option report/segs_sort_full. - # List of columns to sort by when reporting lvm fullreport's 'seg' subreport. - # See 'lvs --segments -o help' for the list of possible fields. - # This configuration option has an automatic default value. - # segs_sort_full = "lv_uuid,seg_start" - - # Configuration option report/mark_hidden_devices. - # Use brackets [] to mark hidden devices. - # This configuration option has an automatic default value. - # mark_hidden_devices = 1 - - # Configuration option report/two_word_unknown_device. - # Use the two words 'unknown device' in place of '[unknown]'. - # This is displayed when the device for a PV is not known. - # This configuration option has an automatic default value. - # two_word_unknown_device = 0 -# } - -# Configuration section dmeventd. -# Settings for the LVM event daemon. -dmeventd { - - # Configuration option dmeventd/mirror_library. - # The library dmeventd uses when monitoring a mirror device. - # libdevmapper-event-lvm2mirror.so attempts to recover from - # failures. It removes failed devices from a volume group and - # reconfigures a mirror as necessary. If no mirror library is - # provided, mirrors are not monitored through dmeventd. - mirror_library = "libdevmapper-event-lvm2mirror.so" - - # Configuration option dmeventd/raid_library. - # This configuration option has an automatic default value. - # raid_library = "libdevmapper-event-lvm2raid.so" - - # Configuration option dmeventd/snapshot_library. - # The library dmeventd uses when monitoring a snapshot device. - # libdevmapper-event-lvm2snapshot.so monitors the filling of snapshots - # and emits a warning through syslog when the usage exceeds 80%. The - # warning is repeated when 85%, 90% and 95% of the snapshot is filled. - snapshot_library = "libdevmapper-event-lvm2snapshot.so" - - # Configuration option dmeventd/thin_library. - # The library dmeventd uses when monitoring a thin device. - # libdevmapper-event-lvm2thin.so monitors the filling of a pool - # and emits a warning through syslog when the usage exceeds 80%. The - # warning is repeated when 85%, 90% and 95% of the pool is filled. - thin_library = "libdevmapper-event-lvm2thin.so" - - # Configuration option dmeventd/executable. - # The full path to the dmeventd binary. - # This configuration option has an automatic default value. - # executable = "/sbin/dmeventd" -} - -# Configuration section tags. -# Host tag settings. -# This configuration section has an automatic default value. -# tags { - - # Configuration option tags/hosttags. - # Create a host tag using the machine name. - # The machine name is nodename returned by uname(2). - # This configuration option has an automatic default value. - # hosttags = 0 - - # Configuration section tags/. - # Replace this subsection name with a custom tag name. - # Multiple subsections like this can be created. The '@' prefix for - # tags is optional. This subsection can contain host_list, which is a - # list of machine names. If the name of the local machine is found in - # host_list, then the name of this subsection is used as a tag and is - # applied to the local machine as a 'host tag'. If this subsection is - # empty (has no host_list), then the subsection name is always applied - # as a 'host tag'. - # - # Example - # The host tag foo is given to all hosts, and the host tag - # bar is given to the hosts named machine1 and machine2. - # tags { foo { } bar { host_list = [ "machine1", "machine2" ] } } - # - # This configuration section has variable name. - # This configuration section has an automatic default value. - # tag { - - # Configuration option tags//host_list. - # A list of machine names. - # These machine names are compared to the nodename returned - # by uname(2). If the local machine name matches an entry in - # this list, the name of the subsection is applied to the - # machine as a 'host tag'. - # This configuration option does not have a default value defined. - # } -# } diff --git a/contrib/block-box/etc-cinder/api-paste.ini b/contrib/block-box/etc-cinder/api-paste.ini deleted file mode 100644 index a761f53d0..000000000 --- a/contrib/block-box/etc-cinder/api-paste.ini +++ /dev/null @@ -1,75 +0,0 @@ -############# -# OpenStack # -############# - -[composite:osapi_volume] -use = call:cinder.api:root_app_factory -/: apiversions -/v1: openstack_volume_api_v1 -/v2: openstack_volume_api_v2 -/v3: openstack_volume_api_v3 - -[composite:openstack_volume_api_v1] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 - -[composite:openstack_volume_api_v2] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 - -[composite:openstack_volume_api_v3] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 - -[filter:request_id] -paste.filter_factory = oslo_middleware.request_id:RequestId.factory - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = cinder - -[filter:faultwrap] -paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory - -[filter:noauth] -paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory - -[filter:sizelimit] -paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory - -[app:apiv1] -paste.app_factory = cinder.api.v1.router:APIRouter.factory - -[app:apiv2] -paste.app_factory = cinder.api.v2.router:APIRouter.factory - -[app:apiv3] -paste.app_factory = cinder.api.v3.router:APIRouter.factory - -[pipeline:apiversions] -pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp - -[app:osvolumeversionapp] -paste.app_factory = cinder.api.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/contrib/block-box/etc-cinder/cinder.conf b/contrib/block-box/etc-cinder/cinder.conf deleted file mode 100644 index 1f6db3ea9..000000000 --- a/contrib/block-box/etc-cinder/cinder.conf +++ /dev/null @@ -1,23 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = noauth -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -volumes_dir = /var/lib/cinder/volumes -rabbit_host = rabbitmq -enabled_backends = lvm - -[database] -connection = mysql+pymysql://cinder:password@mariadb/cinder?charset=utf8 - -[lvm] -lvm_type = thin -volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver -volume_group = cinder-volumes -iscsi_protocol = iscsi -iscsi_helper = tgtadm -volume_backend_name=lvm diff --git a/contrib/block-box/etc-cinder/cinder.conf.keystone b/contrib/block-box/etc-cinder/cinder.conf.keystone deleted file mode 100644 index 23bf7a8c5..000000000 --- a/contrib/block-box/etc-cinder/cinder.conf.keystone +++ /dev/null @@ -1,35 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -iscsi_helper = tgtadm -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = keystone -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -volumes_dir = /var/lib/cinder/volumes -rabbit_host = rabbitmq -enabled_backends = solidfire - -[database] -connection = mysql+pymysql://cinder:password@mariadb/cinder?charset=utf8 - -[keystone_authtoken] -auth_uri = http://keystone:5000 -auth_url = http://keystone:35357 -memcached_servers = keystone:11211 -auth_type = password -project_domain_name = default -user_domain_name = default -project_name = service -username = cinder -password = password - -[solidfire] -volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver -san_ip=10.117.36.101 -san_login=admin -san_password=admin -volume_backend_name=solidfire -sf_allow_template_caching=false diff --git a/contrib/block-box/etc-cinder/cinder.conf.sf b/contrib/block-box/etc-cinder/cinder.conf.sf deleted file mode 100644 index d4397f1fa..000000000 --- a/contrib/block-box/etc-cinder/cinder.conf.sf +++ /dev/null @@ -1,24 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -iscsi_helper = tgtadm -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = noauth -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -volumes_dir = /var/lib/cinder/volumes -rabbit_host = rabbitmq -enabled_backends = solidfire - -[database] -connection = mysql+pymysql://cinder:password@mariadb/cinder?charset=utf8 - -[solidfire] -volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver -san_ip=10.117.36.101 -san_login=admin -san_password=admin -volume_backend_name=solidfire -sf_allow_template_caching=false diff --git a/contrib/block-box/etc-cinder/cinder.conf.solidfire b/contrib/block-box/etc-cinder/cinder.conf.solidfire deleted file mode 100644 index 2cbe4392b..000000000 --- a/contrib/block-box/etc-cinder/cinder.conf.solidfire +++ /dev/null @@ -1,23 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = noauth -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -volumes_dir = /var/lib/cinder/volumes -rabbit_host = rabbitmq -enabled_backends = solidfire - -[database] -connection = mysql+pymysql://cinder:password@mariadb/cinder?charset=utf8 - -[solidfire] -volume_driver=cinder.volume.drivers.solidfire.SolidFireDriver -san_ip=10.117.36.51 -san_login=admin -san_password=admin -volume_backend_name=solidfire -sf_allow_template_caching=false diff --git a/contrib/block-box/etc-cinder/logging.conf b/contrib/block-box/etc-cinder/logging.conf deleted file mode 100644 index 476425b8b..000000000 --- a/contrib/block-box/etc-cinder/logging.conf +++ /dev/null @@ -1,76 +0,0 @@ -[loggers] -keys = root, cinder - -[handlers] -keys = stderr, stdout, watchedfile, syslog, null - -[formatters] -keys = legacycinder, default - -[logger_root] -level = WARNING -handlers = null - -[logger_cinder] -level = INFO -handlers = stderr -qualname = cinder - -[logger_amqplib] -level = WARNING -handlers = stderr -qualname = amqplib - -[logger_sqlalchemy] -level = WARNING -handlers = stderr -qualname = sqlalchemy -# "level = INFO" logs SQL queries. -# "level = DEBUG" logs SQL queries and results. -# "level = WARNING" logs neither. (Recommended for production systems.) - -[logger_boto] -level = WARNING -handlers = stderr -qualname = boto - -[logger_suds] -level = INFO -handlers = stderr -qualname = suds - -[logger_eventletwsgi] -level = WARNING -handlers = stderr -qualname = eventlet.wsgi.server - -[handler_stderr] -class = StreamHandler -args = (sys.stderr,) -formatter = legacycinder - -[handler_stdout] -class = StreamHandler -args = (sys.stdout,) -formatter = legacycinder - -[handler_watchedfile] -class = handlers.WatchedFileHandler -args = ('cinder.log',) -formatter = legacycinder - -[handler_syslog] -class = handlers.SysLogHandler -args = ('/dev/log', handlers.SysLogHandler.LOG_USER) -formatter = legacycinder - -[handler_null] -class = cinder.log.NullHandler -formatter = default -args = () - -[formatter_legacycinder] -class = cinder.log.LegacyCinderFormatter - -[formatter_default] -format = %(message)s diff --git a/contrib/block-box/etc-cinder/policy.json b/contrib/block-box/etc-cinder/policy.json deleted file mode 100644 index 6a651220e..000000000 --- a/contrib/block-box/etc-cinder/policy.json +++ /dev/null @@ -1,141 +0,0 @@ -{ - "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "admin_api": "is_admin:True or (role:admin and is_admin_project:True)", - - "volume:create": "", - "volume:create_from_image": "", - "volume:delete": "rule:admin_or_owner", - "volume:force_delete": "rule:admin_api", - "volume:get": "rule:admin_or_owner", - "volume:get_all": "rule:admin_or_owner", - "volume:get_volume_metadata": "rule:admin_or_owner", - "volume:create_volume_metadata": "rule:admin_or_owner", - "volume:delete_volume_metadata": "rule:admin_or_owner", - "volume:update_volume_metadata": "rule:admin_or_owner", - "volume:get_volume_admin_metadata": "rule:admin_api", - "volume:update_volume_admin_metadata": "rule:admin_api", - "volume:get_snapshot": "rule:admin_or_owner", - "volume:get_all_snapshots": "rule:admin_or_owner", - "volume:create_snapshot": "rule:admin_or_owner", - "volume:delete_snapshot": "rule:admin_or_owner", - "volume:update_snapshot": "rule:admin_or_owner", - "volume:get_snapshot_metadata": "rule:admin_or_owner", - "volume:delete_snapshot_metadata": "rule:admin_or_owner", - "volume:update_snapshot_metadata": "rule:admin_or_owner", - "volume:extend": "rule:admin_or_owner", - "volume:update_readonly_flag": "rule:admin_or_owner", - "volume:retype": "rule:admin_or_owner", - "volume:update": "rule:admin_or_owner", - - "volume_extension:types_manage": "rule:admin_api", - "volume_extension:types_extra_specs": "rule:admin_api", - "volume_extension:access_types_qos_specs_id": "rule:admin_api", - "volume_extension:access_types_extra_specs": "rule:admin_api", - "volume_extension:volume_type_access": "rule:admin_or_owner", - "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", - "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", - "volume_extension:volume_type_encryption": "rule:admin_api", - "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", - "volume_extension:extended_snapshot_attributes": "rule:admin_or_owner", - "volume_extension:volume_image_metadata": "rule:admin_or_owner", - - "volume_extension:quotas:show": "", - "volume_extension:quotas:update": "rule:admin_api", - "volume_extension:quotas:delete": "rule:admin_api", - "volume_extension:quota_classes": "rule:admin_api", - "volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api", - - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", - "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", - "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", - - "volume_extension:volume_actions:upload_public": "rule:admin_api", - "volume_extension:volume_actions:upload_image": "rule:admin_or_owner", - - "volume_extension:volume_host_attribute": "rule:admin_api", - "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", - "volume_extension:volume_mig_status_attribute": "rule:admin_api", - "volume_extension:hosts": "rule:admin_api", - "volume_extension:services:index": "rule:admin_api", - "volume_extension:services:update" : "rule:admin_api", - - "volume_extension:volume_manage": "rule:admin_api", - "volume_extension:volume_unmanage": "rule:admin_api", - "volume_extension:list_manageable": "rule:admin_api", - - "volume_extension:capabilities": "rule:admin_api", - - "volume:create_transfer": "rule:admin_or_owner", - "volume:accept_transfer": "", - "volume:delete_transfer": "rule:admin_or_owner", - "volume:get_transfer": "rule:admin_or_owner", - "volume:get_all_transfers": "rule:admin_or_owner", - - "volume:failover_host": "rule:admin_api", - "volume:freeze_host": "rule:admin_api", - "volume:thaw_host": "rule:admin_api", - - "backup:create" : "", - "backup:delete": "rule:admin_or_owner", - "backup:get": "rule:admin_or_owner", - "backup:get_all": "rule:admin_or_owner", - "backup:restore": "rule:admin_or_owner", - "backup:backup-import": "rule:admin_api", - "backup:backup-export": "rule:admin_api", - "backup:update": "rule:admin_or_owner", - "backup:backup_project_attribute": "rule:admin_api", - - "snapshot_extension:snapshot_actions:update_snapshot_status": "", - "snapshot_extension:snapshot_manage": "rule:admin_api", - "snapshot_extension:snapshot_unmanage": "rule:admin_api", - "snapshot_extension:list_manageable": "rule:admin_api", - - "consistencygroup:create" : "group:nobody", - "consistencygroup:delete": "group:nobody", - "consistencygroup:update": "group:nobody", - "consistencygroup:get": "group:nobody", - "consistencygroup:get_all": "group:nobody", - - "consistencygroup:create_cgsnapshot" : "group:nobody", - "consistencygroup:delete_cgsnapshot": "group:nobody", - "consistencygroup:get_cgsnapshot": "group:nobody", - "consistencygroup:get_all_cgsnapshots": "group:nobody", - - "group:group_types_manage": "rule:admin_api", - "group:group_types_specs": "rule:admin_api", - "group:access_group_types_specs": "rule:admin_api", - "group:group_type_access": "rule:admin_or_owner", - - "group:create" : "", - "group:delete": "rule:admin_or_owner", - "group:update": "rule:admin_or_owner", - "group:get": "rule:admin_or_owner", - "group:get_all": "rule:admin_or_owner", - - "group:create_group_snapshot": "", - "group:delete_group_snapshot": "rule:admin_or_owner", - "group:update_group_snapshot": "rule:admin_or_owner", - "group:get_group_snapshot": "rule:admin_or_owner", - "group:get_all_group_snapshots": "rule:admin_or_owner", - "group:reset_group_snapshot_status":"rule:admin_api", - "group:reset_status":"rule:admin_api", - - "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", - "message:delete": "rule:admin_or_owner", - "message:get": "rule:admin_or_owner", - "message:get_all": "rule:admin_or_owner", - - "clusters:get": "rule:admin_api", - "clusters:get_all": "rule:admin_api", - "clusters:update": "rule:admin_api", - - "workers:cleanup": "rule:admin_api" -} diff --git a/contrib/block-box/etc-cinder/rootwrap.conf b/contrib/block-box/etc-cinder/rootwrap.conf deleted file mode 100644 index 4d280eae5..000000000 --- a/contrib/block-box/etc-cinder/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for cinder-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap - -# List of directories to search executables in, in case filters do not -# explicitely specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, local0, local1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/contrib/block-box/init-scripts/cinder-api.sh b/contrib/block-box/init-scripts/cinder-api.sh deleted file mode 100644 index a26df0093..000000000 --- a/contrib/block-box/init-scripts/cinder-api.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -INIT_DB=${INIT_DB:-true} - -if [ "$INIT_DB" = "true" ]; then -/bin/sh -c "cinder-manage db sync" -fi -cinder-api -d diff --git a/contrib/block-box/test-requirements.txt b/contrib/block-box/test-requirements.txt deleted file mode 100644 index 339f80aeb..000000000 --- a/contrib/block-box/test-requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Install bounded pep8/pyflakes first, then let flake8 install -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -coverage>=4.0 # Apache-2.0 -ddt>=1.0.1 # MIT -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -os-api-ref>=1.0.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -sphinx>=1.5.1 # BSD -PyMySQL>=0.7.6 # MIT License -psycopg2>=2.5 # LGPL/ZPL -python-subunit>=0.0.18 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -testresources>=0.2.4 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -oslosphinx>=4.7.0 # Apache-2.0 -oslo.versionedobjects[fixtures]>=1.17.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -tempest>=14.0.0 # Apache-2.0 -bandit>=1.1.0 # Apache-2.0 -reno>=1.8.0 # Apache-2.0 -doc8 # Apache-2.0 diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index 3bdf75a34..000000000 --- a/doc/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -_build/* -source/contributor/api/* -.autogenerated diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index 9c698bcbe..000000000 --- a/doc/README.rst +++ /dev/null @@ -1,33 +0,0 @@ -======================= -Cinder Development Docs -======================= - -Files under this directory tree are used for generating the documentation -for the Cinder source code. - -Developer documentation is built to: -http://docs.openstack.org/developer/cinder/ - -Tools -===== - -Sphinx - The Python Sphinx package is used to generate the documentation output. - Information on Sphinx, including formatting information for RST source - files, can be found in the - `Sphinx online documentation `_. - -Graphviz - Some of the diagrams are generated using the ``dot`` language - from Graphviz. See the `Graphviz documentation `_ - for Graphviz and dot language usage information. - - -Building Documentation -====================== - -Doc builds are performed using tox with the ``docs`` target:: - - % cd .. - % tox -e docs - diff --git a/doc/ext/__init__.py b/doc/ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/doc/ext/cinder_autodoc.py b/doc/ext/cinder_autodoc.py deleted file mode 100644 index 1d49fbbe5..000000000 --- a/doc/ext/cinder_autodoc.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import gettext -import os - -gettext.install('cinder') - -from cinder import utils - - -def setup(app): - print("**Autodocumenting from %s" % os.path.abspath(os.curdir)) - rv = utils.execute('./doc/generate_autodoc_index.sh') - print(rv[0]) diff --git a/doc/ext/cinder_driverlist.py b/doc/ext/cinder_driverlist.py deleted file mode 100644 index 5a7a61926..000000000 --- a/doc/ext/cinder_driverlist.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 Dell Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cinder import utils - - -def setup(app): - print('** Generating driver list...') - rv = utils.execute('./tools/generate_driver_list.py', ['docs']) - print(rv[0]) - diff --git a/doc/find_autodoc_modules.sh b/doc/find_autodoc_modules.sh deleted file mode 100755 index fb7e451a0..000000000 --- a/doc/find_autodoc_modules.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -CINDER_DIR='cinder/' # include trailing slash -DOCS_DIR='source' - -modules='' -for x in `find ${CINDER_DIR} -name '*.py' | grep -v cinder/tests`; do - if [ `basename ${x} .py` == "__init__" ] ; then - continue - fi - relative=cinder.`echo ${x} | sed -e 's$^'${CINDER_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` - modules="${modules} ${relative}" -done - -for mod in ${modules} ; do - if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; - then - echo ${mod} - fi -done diff --git a/doc/generate_autodoc_index.sh b/doc/generate_autodoc_index.sh deleted file mode 100755 index 335f72cfa..000000000 --- a/doc/generate_autodoc_index.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -SOURCEDIR=doc/source/contributor/api - -if [ ! -d ${SOURCEDIR} ] ; then - mkdir -p ${SOURCEDIR} -fi - -for x in `./doc/find_autodoc_modules.sh`; -do - echo "Generating ${SOURCEDIR}/${x}.rst" - echo "${SOURCEDIR}/${x}.rst" >> .autogenerated - heading="The :mod:\`${x}\` Module" - # Figure out how long the heading is - # and make sure to emit that many '=' under - # it to avoid heading format errors - # in Sphinx. - heading_len=$(echo "$heading" | wc -c) - underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') - ( cat < ${SOURCEDIR}/${x}.rst - -done - -if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then - - cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst - done - - echo ${SOURCEDIR}/autoindex.rst >> .autogenerated -fi diff --git a/doc/source/_ga/layout.html b/doc/source/_ga/layout.html deleted file mode 100644 index f29e90968..000000000 --- a/doc/source/_ga/layout.html +++ /dev/null @@ -1,17 +0,0 @@ -{% extends "!layout.html" %} - -{% block footer %} -{{ super() }} - - -{% endblock %} - diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/doc/source/admin/README.rst b/doc/source/admin/README.rst deleted file mode 100644 index 5debfdd7e..000000000 --- a/doc/source/admin/README.rst +++ /dev/null @@ -1,16 +0,0 @@ -=================================== -Cinder Administration Documentation -=================================== - -Introduction: -------------- - -This directory is intended to hold any documentation that relates to -how to run or operate Cinder. Previously, this content was in the -admin-guide section of openstack-manuals. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - - diff --git a/doc/source/admin/blockstorage-api-throughput.rst b/doc/source/admin/blockstorage-api-throughput.rst deleted file mode 100644 index 06c7ca996..000000000 --- a/doc/source/admin/blockstorage-api-throughput.rst +++ /dev/null @@ -1,34 +0,0 @@ -============================================= -Increase Block Storage API service throughput -============================================= - -By default, the Block Storage API service runs in one process. This -limits the number of API requests that the Block Storage service can -process at any given time. In a production environment, you should -increase the Block Storage API throughput by allowing the Block Storage -API service to run in as many processes as the machine capacity allows. - -.. note:: - - The Block Storage API service is named ``openstack-cinder-api`` on - the following distributions: CentOS, Fedora, openSUSE, Red Hat - Enterprise Linux, and SUSE Linux Enterprise. In Ubuntu and Debian - distributions, the Block Storage API service is named ``cinder-api``. - -To do so, use the Block Storage API service option ``osapi_volume_workers``. -This option allows you to specify the number of API service workers -(or OS processes) to launch for the Block Storage API service. - -To configure this option, open the ``/etc/cinder/cinder.conf`` -configuration file and set the ``osapi_volume_workers`` configuration -key to the number of CPU cores/threads on a machine. - -On distributions that include ``openstack-config``, you can configure -this by running the following command instead: - -.. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT osapi_volume_workers CORES - -Replace ``CORES`` with the number of CPU cores/threads on a machine. diff --git a/doc/source/admin/blockstorage-backup-disks.rst b/doc/source/admin/blockstorage-backup-disks.rst deleted file mode 100644 index a41695501..000000000 --- a/doc/source/admin/blockstorage-backup-disks.rst +++ /dev/null @@ -1,266 +0,0 @@ -=================================== -Back up Block Storage service disks -=================================== - -While you can use the LVM snapshot to create snapshots, you can also use -it to back up your volumes. By using LVM snapshot, you reduce the size -of the backup; only existing data is backed up instead of the entire -volume. - -To back up a volume, you must create a snapshot of it. An LVM snapshot -is the exact copy of a logical volume, which contains data in a frozen -state. This prevents data corruption because data cannot be manipulated -during the volume creation process. Remember that the volumes created -through an :command:`openstack volume create` command exist in an LVM -logical volume. - -You must also make sure that the operating system is not using the -volume and that all data has been flushed on the guest file systems. -This usually means that those file systems have to be unmounted during -the snapshot creation. They can be mounted again as soon as the logical -volume snapshot has been created. - -Before you create the snapshot you must have enough space to save it. -As a precaution, you should have at least twice as much space as the -potential snapshot size. If insufficient space is available, the snapshot -might become corrupted. - -For this example assume that a 100 GB volume named ``volume-00000001`` -was created for an instance while only 4 GB are used. This example uses -these commands to back up only those 4 GB: - -* :command:`lvm2` command. Directly manipulates the volumes. - -* :command:`kpartx` command. Discovers the partition table created inside the - instance. - -* :command:`tar` command. Creates a minimum-sized backup. - -* :command:`sha1sum` command. Calculates the backup checksum to check its - consistency. - -You can apply this process to volumes of any size. - -**To back up Block Storage service disks** - -#. Create a snapshot of a used volume - - * Use this command to list all volumes - - .. code-block:: console - - # lvdisplay - - * Create the snapshot; you can do this while the volume is attached - to an instance: - - .. code-block:: console - - # lvcreate --size 10G --snapshot --name volume-00000001-snapshot \ - /dev/cinder-volumes/volume-00000001 - - Use the ``--snapshot`` configuration option to tell LVM that you want a - snapshot of an already existing volume. The command includes the size - of the space reserved for the snapshot volume, the name of the snapshot, - and the path of an already existing volume. Generally, this path - is ``/dev/cinder-volumes/VOLUME_NAME``. - - The size does not have to be the same as the volume of the snapshot. - The ``--size`` parameter defines the space that LVM reserves - for the snapshot volume. As a precaution, the size should be the same - as that of the original volume, even if the whole space is not - currently used by the snapshot. - - * Run the :command:`lvdisplay` command again to verify the snapshot: - - .. code-block:: console - - --- Logical volume --- - LV Name /dev/cinder-volumes/volume-00000001 - VG Name cinder-volumes - LV UUID gI8hta-p21U-IW2q-hRN1-nTzN-UC2G-dKbdKr - LV Write Access read/write - LV snapshot status source of - /dev/cinder-volumes/volume-00000026-snap [active] - LV Status available - # open 1 - LV Size 15,00 GiB - Current LE 3840 - Segments 1 - Allocation inherit - Read ahead sectors auto - - currently set to 256 - Block device 251:13 - - --- Logical volume --- - LV Name /dev/cinder-volumes/volume-00000001-snap - VG Name cinder-volumes - LV UUID HlW3Ep-g5I8-KGQb-IRvi-IRYU-lIKe-wE9zYr - LV Write Access read/write - LV snapshot status active destination for /dev/cinder-volumes/volume-00000026 - LV Status available - # open 0 - LV Size 15,00 GiB - Current LE 3840 - COW-table size 10,00 GiB - COW-table LE 2560 - Allocated to snapshot 0,00% - Snapshot chunk size 4,00 KiB - Segments 1 - Allocation inherit - Read ahead sectors auto - - currently set to 256 - Block device 251:14 - -#. Partition table discovery - - * To exploit the snapshot with the :command:`tar` command, mount - your partition on the Block Storage service server. - - The :command:`kpartx` utility discovers and maps table partitions. - You can use it to view partitions that are created inside the - instance. Without using the partitions created inside instances, - you cannot see its content and create efficient backups. - - .. code-block:: console - - # kpartx -av /dev/cinder-volumes/volume-00000001-snapshot - - .. note:: - - On a Debian-based distribution, you can use the - :command:`apt-get install kpartx` command to install - :command:`kpartx`. - - If the tools successfully find and map the partition table, - no errors are returned. - - * To check the partition table map, run this command: - - .. code-block:: console - - $ ls /dev/mapper/nova* - - You can see the ``cinder--volumes-volume--00000001--snapshot1`` - partition. - - If you created more than one partition on that volume, you see - several partitions; for example: - ``cinder--volumes-volume--00000001--snapshot2``, - ``cinder--volumes-volume--00000001--snapshot3``, and so on. - - * Mount your partition - - .. code-block:: console - - # mount /dev/mapper/cinder--volumes-volume--volume--00000001--snapshot1 /mnt - - If the partition mounts successfully, no errors are returned. - - You can directly access the data inside the instance. If a message - prompts you for a partition or you cannot mount it, determine whether - enough space was allocated for the snapshot or the :command:`kpartx` - command failed to discover the partition table. - - Allocate more space to the snapshot and try the process again. - -#. Use the :command:`tar` command to create archives - - Create a backup of the volume: - - .. code-block:: console - - $ tar --exclude="lost+found" --exclude="some/data/to/exclude" -czf \ - volume-00000001.tar.gz -C /mnt/ /backup/destination - - This command creates a ``tar.gz`` file that contains the data, - *and data only*. This ensures that you do not waste space by backing - up empty sectors. - -#. Checksum calculation I - - You should always have the checksum for your backup files. When you - transfer the same file over the network, you can run a checksum - calculation to ensure that your file was not corrupted during its - transfer. The checksum is a unique ID for a file. If the checksums are - different, the file is corrupted. - - Run this command to run a checksum for your file and save the result - to a file: - - .. code-block:: console - - $ sha1sum volume-00000001.tar.gz > volume-00000001.checksum - - .. note:: - - Use the :command:`sha1sum` command carefully because the time it - takes to complete the calculation is directly proportional to the - size of the file. - - Depending on your CPU, the process might take a long time for - files larger than around 4 to 6 GB. - -#. After work cleaning - - Now that you have an efficient and consistent backup, use this command - to clean up the file system: - - * Unmount the volume. - - .. code-block:: console - - $ umount /mnt - - * Delete the partition table. - - .. code-block:: console - - $ kpartx -dv /dev/cinder-volumes/volume-00000001-snapshot - - * Remove the snapshot. - - .. code-block:: console - - $ lvremove -f /dev/cinder-volumes/volume-00000001-snapshot - - Repeat these steps for all your volumes. - -#. Automate your backups - - Because more and more volumes might be allocated to your Block Storage - service, you might want to automate your backups. - The `SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`_ script assists - you with this task. The script performs the operations from the previous - example, but also provides a mail report and runs the backup based on - the ``backups_retention_days`` setting. - - Launch this script from the server that runs the Block Storage service. - - This example shows a mail report: - - .. code-block:: console - - Backup Start Time - 07/10 at 01:00:01 - Current retention - 7 days - - The backup volume is mounted. Proceed... - Removing old backups... : /BACKUPS/EBS-VOL/volume-00000019/volume-00000019_28_09_2011.tar.gz - /BACKUPS/EBS-VOL/volume-00000019 - 0 h 1 m and 21 seconds. Size - 3,5G - - The backup volume is mounted. Proceed... - Removing old backups... : /BACKUPS/EBS-VOL/volume-0000001a/volume-0000001a_28_09_2011.tar.gz - /BACKUPS/EBS-VOL/volume-0000001a - 0 h 4 m and 15 seconds. Size - 6,9G - --------------------------------------- - Total backups size - 267G - Used space : 35% - Total execution time - 1 h 75 m and 35 seconds - - The script also enables you to SSH to your instances and run a - :command:`mysqldump` command into them. To make this work, enable - the connection to the Compute project keys. If you do not want to - run the :command:`mysqldump` command, you can add - ``enable_mysql_dump=0`` to the script to turn off this functionality. - - -.. Links -.. _`SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`: https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh diff --git a/doc/source/admin/blockstorage-boot-from-volume.rst b/doc/source/admin/blockstorage-boot-from-volume.rst deleted file mode 100644 index ca58dfe95..000000000 --- a/doc/source/admin/blockstorage-boot-from-volume.rst +++ /dev/null @@ -1,10 +0,0 @@ -================ -Boot from volume -================ - -In some cases, you can store and run instances from inside volumes. -For information, see the `Launch an instance from a volume`_ section -in the `OpenStack End User Guide`_. - -.. _`Launch an instance from a volume`: https://docs.openstack.org/user-guide/cli-nova-launch-instance-from-volume.html -.. _`OpenStack End User Guide`: https://docs.openstack.org/user-guide/ diff --git a/doc/source/admin/blockstorage-consistency-groups.rst b/doc/source/admin/blockstorage-consistency-groups.rst deleted file mode 100644 index 00f19f898..000000000 --- a/doc/source/admin/blockstorage-consistency-groups.rst +++ /dev/null @@ -1,355 +0,0 @@ -================== -Consistency groups -================== - -Consistency group support is available in OpenStack Block Storage. The -support is added for creating snapshots of consistency groups. This -feature leverages the storage level consistency technology. It allows -snapshots of multiple volumes in the same consistency group to be taken -at the same point-in-time to ensure data consistency. The consistency -group operations can be performed using the Block Storage command line. - -.. note:: - - Only Block Storage V2 API supports consistency groups. You can - specify ``--os-volume-api-version 2`` when using Block Storage - command line for consistency group operations. - -Before using consistency groups, make sure the Block Storage driver that -you are running has consistency group support by reading the Block -Storage manual or consulting the driver maintainer. There are a small -number of drivers that have implemented this feature. The default LVM -driver does not support consistency groups yet because the consistency -technology is not available at the storage level. - -Before using consistency groups, you must change policies for the -consistency group APIs in the ``/etc/cinder/policy.json`` file. -By default, the consistency group APIs are disabled. -Enable them before running consistency group operations. - -Here are existing policy entries for consistency groups: - -.. code-block:: json - - { - "consistencygroup:create": "group:nobody" - "consistencygroup:delete": "group:nobody", - "consistencygroup:update": "group:nobody", - "consistencygroup:get": "group:nobody", - "consistencygroup:get_all": "group:nobody", - "consistencygroup:create_cgsnapshot" : "group:nobody", - "consistencygroup:delete_cgsnapshot": "group:nobody", - "consistencygroup:get_cgsnapshot": "group:nobody", - "consistencygroup:get_all_cgsnapshots": "group:nobody", - } - -Remove ``group:nobody`` to enable these APIs: - -.. code-block:: json - - { - "consistencygroup:create": "", - "consistencygroup:delete": "", - "consistencygroup:update": "", - "consistencygroup:get": "", - "consistencygroup:get_all": "", - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - } - - -Restart Block Storage API service after changing policies. - -The following consistency group operations are supported: - -- Create a consistency group, given volume types. - - .. note:: - - A consistency group can support more than one volume type. The - scheduler is responsible for finding a back end that can support - all given volume types. - - A consistency group can only contain volumes hosted by the same - back end. - - A consistency group is empty upon its creation. Volumes need to - be created and added to it later. - -- Show a consistency group. - -- List consistency groups. - -- Create a volume and add it to a consistency group, given volume type - and consistency group id. - -- Create a snapshot for a consistency group. - -- Show a snapshot of a consistency group. - -- List consistency group snapshots. - -- Delete a snapshot of a consistency group. - -- Delete a consistency group. - -- Modify a consistency group. - -- Create a consistency group from the snapshot of another consistency - group. - -- Create a consistency group from a source consistency group. - -The following operations are not allowed if a volume is in a consistency -group: - -- Volume migration. - -- Volume retype. - -- Volume deletion. - - .. note:: - - A consistency group has to be deleted as a whole with all the - volumes. - -The following operations are not allowed if a volume snapshot is in a -consistency group snapshot: - -- Volume snapshot deletion. - - .. note:: - - A consistency group snapshot has to be deleted as a whole with - all the volume snapshots. - -The details of consistency group operations are shown in the following. - -.. note:: - - Currently, no OpenStack client command is available to run in - place of the cinder consistency group creation commands. Use the - cinder commands detailed in the following examples. - -**Create a consistency group**: - -.. code-block:: console - - cinder consisgroup-create - [--name name] - [--description description] - [--availability-zone availability-zone] - volume-types - -.. note:: - - The parameter ``volume-types`` is required. It can be a list of - names or UUIDs of volume types separated by commas without spaces in - between. For example, ``volumetype1,volumetype2,volumetype3.``. - -.. code-block:: console - - $ cinder consisgroup-create --name bronzeCG2 volume_type_1 - - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | availability_zone | nova | - | created_at | 2014-12-29T12:59:08.000000 | - | description | None | - | id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | name | bronzeCG2 | - | status | creating | - +-------------------+--------------------------------------+ - -**Show a consistency group**: - -.. code-block:: console - - $ cinder consisgroup-show 1de80c27-3b2f-47a6-91a7-e867cbe36462 - - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | availability_zone | nova | - | created_at | 2014-12-29T12:59:08.000000 | - | description | None | - | id | 2a6b2bda-1f43-42ce-9de8-249fa5cbae9a | - | name | bronzeCG2 | - | status | available | - | volume_types | volume_type_1 | - +-------------------+--------------------------------------+ - -**List consistency groups**: - -.. code-block:: console - - $ cinder consisgroup-list - - +--------------------------------------+-----------+-----------+ - | ID | Status | Name | - +--------------------------------------+-----------+-----------+ - | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | available | bronzeCG2 | - | 3a2b3c42-b612-479a-91eb-1ed45b7f2ad5 | error | bronzeCG | - +--------------------------------------+-----------+-----------+ - -**Create a volume and add it to a consistency group**: - -.. note:: - - When creating a volume and adding it to a consistency group, a - volume type and a consistency group id must be provided. This is - because a consistency group can support more than one volume type. - -.. code-block:: console - - $ openstack volume create --type volume_type_1 --consistency-group \ - 1de80c27-3b2f-47a6-91a7-e867cbe36462 --size 1 cgBronzeVol - - +---------------------------------------+--------------------------------------+ - | Field | Value | - +---------------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | created_at | 2014-12-29T13:16:47.000000 | - | description | None | - | encrypted | False | - | id | 5e6d1386-4592-489f-a56b-9394a81145fe | - | metadata | {} | - | name | cgBronzeVol | - | os-vol-host-attr:host | server-1@backend-1#pool-1 | - | os-vol-mig-status-attr:migstat | None | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | 1349b21da2a046d8aa5379f0ed447bed | - | os-volume-replication:driver_data | None | - | os-volume-replication:extended_status | None | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | user_id | 93bdea12d3e04c4b86f9a9f172359859 | - | volume_type | volume_type_1 | - +---------------------------------------+--------------------------------------+ - -**Create a snapshot for a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-create 1de80c27-3b2f-47a6-91a7-e867cbe36462 - - +---------------------+--------------------------------------+ - | Property | Value | - +---------------------+--------------------------------------+ - | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | created_at | 2014-12-29T13:19:44.000000 | - | description | None | - | id | d4aff465-f50c-40b3-b088-83feb9b349e9 | - | name | None | - | status | creating | - +---------------------+-------------------------------------+ - -**Show a snapshot of a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-show d4aff465-f50c-40b3-b088-83feb9b349e9 - -**List consistency group snapshots**: - -.. code-block:: console - - $ cinder cgsnapshot-list - - +--------------------------------------+--------+----------+ - | ID | Status | Name | - +--------------------------------------+--------+----------+ - | 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 | available | None | - | aa129f4d-d37c-4b97-9e2d-7efffda29de0 | available | None | - | bb5b5d82-f380-4a32-b469-3ba2e299712c | available | None | - | d4aff465-f50c-40b3-b088-83feb9b349e9 | available | None | - +--------------------------------------+--------+----------+ - -**Delete a snapshot of a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-delete d4aff465-f50c-40b3-b088-83feb9b349e9 - -**Delete a consistency group**: - -.. note:: - - The force flag is needed when there are volumes in the consistency - group: - - .. code-block:: console - - $ cinder consisgroup-delete --force 1de80c27-3b2f-47a6-91a7-e867cbe36462 - -**Modify a consistency group**: - -.. code-block:: console - - cinder consisgroup-update - [--name NAME] - [--description DESCRIPTION] - [--add-volumes UUID1,UUID2,......] - [--remove-volumes UUID3,UUID4,......] - CG - -The parameter ``CG`` is required. It can be a name or UUID of a consistency -group. UUID1,UUID2,...... are UUIDs of one or more volumes to be added -to the consistency group, separated by commas. Default is None. -UUID3,UUID4,...... are UUIDs of one or more volumes to be removed from -the consistency group, separated by commas. Default is None. - -.. code-block:: console - - $ cinder consisgroup-update --name 'new name' \ - --description 'new description' \ - --add-volumes 0b3923f5-95a4-4596-a536-914c2c84e2db,1c02528b-3781-4e32-929c-618d81f52cf3 \ - --remove-volumes 8c0f6ae4-efb1-458f-a8fc-9da2afcc5fb1,a245423f-bb99-4f94-8c8c-02806f9246d8 \ - 1de80c27-3b2f-47a6-91a7-e867cbe36462 - -**Create a consistency group from the snapshot of another consistency -group**: - -.. code-block:: console - - $ cinder consisgroup-create-from-src - [--cgsnapshot CGSNAPSHOT] - [--name NAME] - [--description DESCRIPTION] - -The parameter ``CGSNAPSHOT`` is a name or UUID of a snapshot of a -consistency group: - -.. code-block:: console - - $ cinder consisgroup-create-from-src \ - --cgsnapshot 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ - --name 'new cg' --description 'new cg from cgsnapshot' - -**Create a consistency group from a source consistency group**: - -.. code-block:: console - - $ cinder consisgroup-create-from-src - [--source-cg SOURCECG] - [--name NAME] - [--description DESCRIPTION] - -The parameter ``SOURCECG`` is a name or UUID of a source -consistency group: - -.. code-block:: console - - $ cinder consisgroup-create-from-src \ - --source-cg 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ - --name 'new cg' --description 'new cloned cg' diff --git a/doc/source/admin/blockstorage-driver-filter-weighing.rst b/doc/source/admin/blockstorage-driver-filter-weighing.rst deleted file mode 100644 index f045b5576..000000000 --- a/doc/source/admin/blockstorage-driver-filter-weighing.rst +++ /dev/null @@ -1,373 +0,0 @@ -.. _filter_weigh_scheduler: - -========================================================== -Configure and use driver filter and weighing for scheduler -========================================================== - -OpenStack Block Storage enables you to choose a volume back end based on -back-end specific properties by using the DriverFilter and -GoodnessWeigher for the scheduler. The driver filter and weigher -scheduling can help ensure that the scheduler chooses the best back end -based on requested volume properties as well as various back-end -specific properties. - -What is driver filter and weigher and when to use it -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The driver filter and weigher gives you the ability to more finely -control how the OpenStack Block Storage scheduler chooses the best back -end to use when handling a volume request. One example scenario where -using the driver filter and weigher can be if a back end that utilizes -thin-provisioning is used. The default filters use the ``free capacity`` -property to determine the best back end, but that is not always perfect. -If a back end has the ability to provide a more accurate back-end -specific value you can use that as part of the weighing. Another example -of when the driver filter and weigher can prove useful is if a back end -exists where there is a hard limit of 1000 volumes. The maximum volume -size is 500 GB. Once 75% of the total space is occupied the performance -of the back end degrades. The driver filter and weigher can provide a -way for these limits to be checked for. - -Enable driver filter and weighing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the driver filter, set the ``scheduler_default_filters`` option in -the ``cinder.conf`` file to ``DriverFilter`` or add it to the list if -other filters are already present. - -To enable the goodness filter as a weigher, set the -``scheduler_default_weighers`` option in the ``cinder.conf`` file to -``GoodnessWeigher`` or add it to the list if other weighers are already -present. - -You can choose to use the ``DriverFilter`` without the -``GoodnessWeigher`` or vice-versa. The filter and weigher working -together, however, create the most benefits when helping the scheduler -choose an ideal back end. - -.. important:: - - The support for the ``DriverFilter`` and ``GoodnessWeigher`` is - optional for back ends. If you are using a back end that does not - support the filter and weigher functionality you may not get the - full benefit. - -Example ``cinder.conf`` configuration file: - -.. code-block:: ini - - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - -.. note:: - - It is useful to use the other filters and weighers available in - OpenStack in combination with these custom ones. For example, the - ``CapacityFilter`` and ``CapacityWeigher`` can be combined with - these. - -Defining your own filter and goodness functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can define your own filter and goodness functions through the use of -various properties that OpenStack Block Storage has exposed. Properties -exposed include information about the volume request being made, -``volume_type`` settings, and back-end specific information about drivers. -All of these allow for a lot of control over how the ideal back end for -a volume request will be decided. - -The ``filter_function`` option is a string defining an equation that -will determine whether a back end should be considered as a potential -candidate in the scheduler. - -The ``goodness_function`` option is a string defining an equation that -will rate the quality of the potential host (0 to 100, 0 lowest, 100 -highest). - -.. important:: - - The drive filter and weigher will use default values for filter and - goodness functions for each back end if you do not define them - yourself. If complete control is desired then a filter and goodness - function should be defined for each of the back ends in - the ``cinder.conf`` file. - - -Supported operations in filter and goodness functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Below is a table of all the operations currently usable in custom filter -and goodness functions created by you: - -+--------------------------------+-------------------------+ -| Operations | Type | -+================================+=========================+ -| +, -, \*, /, ^ | standard math | -+--------------------------------+-------------------------+ -| not, and, or, &, \|, ! | logic | -+--------------------------------+-------------------------+ -| >, >=, <, <=, ==, <>, != | equality | -+--------------------------------+-------------------------+ -| +, - | sign | -+--------------------------------+-------------------------+ -| x ? a : b | ternary | -+--------------------------------+-------------------------+ -| abs(x), max(x, y), min(x, y) | math helper functions | -+--------------------------------+-------------------------+ - -.. caution:: - - Syntax errors you define in filter or goodness strings - are thrown at a volume request time. - -Available properties when creating custom functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are various properties that can be used in either the -``filter_function`` or the ``goodness_function`` strings. The properties allow -access to volume info, qos settings, extra specs, and so on. - -The following properties and their sub-properties are currently -available for use: - -Host stats for a back end -------------------------- -host - The host's name - -volume\_backend\_name - The volume back end name - -vendor\_name - The vendor name - -driver\_version - The driver version - -storage\_protocol - The storage protocol - -QoS\_support - Boolean signifying whether QoS is supported - -total\_capacity\_gb - The total capacity in GB - -allocated\_capacity\_gb - The allocated capacity in GB - -reserved\_percentage - The reserved storage percentage - -Capabilities specific to a back end ------------------------------------ - -These properties are determined by the specific back end -you are creating filter and goodness functions for. Some back ends -may not have any properties available here. - -Requested volume properties ---------------------------- - -status - Status for the requested volume - -volume\_type\_id - The volume type ID - -display\_name - The display name of the volume - -volume\_metadata - Any metadata the volume has - -reservations - Any reservations the volume has - -user\_id - The volume's user ID - -attach\_status - The attach status for the volume - -display\_description - The volume's display description - -id - The volume's ID - -replication\_status - The volume's replication status - -snapshot\_id - The volume's snapshot ID - -encryption\_key\_id - The volume's encryption key ID - -source\_volid - The source volume ID - -volume\_admin\_metadata - Any admin metadata for this volume - -source\_replicaid - The source replication ID - -consistencygroup\_id - The consistency group ID - -size - The size of the volume in GB - -metadata - General metadata - -The property most used from here will most likely be the ``size`` sub-property. - -Extra specs for the requested volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -View the available properties for volume types by running: - -.. code-block:: console - - $ cinder extra-specs-list - -Current QoS specs for the requested volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -View the available properties for volume types by running: - -.. code-block:: console - - $ openstack volume qos list - -In order to access these properties in a custom string use the following -format: - -``.`` - -Driver filter and weigher usage examples -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Below are examples for using the filter and weigher separately, -together, and using driver-specific properties. - -Example ``cinder.conf`` file configuration for customizing the filter -function: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - filter_function = "volume.size < 10" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - filter_function = "volume.size >= 10" - -The above example will filter volumes to different back ends depending -on the size of the requested volume. Default OpenStack Block Storage -scheduler weighing is done. Volumes with a size less than 10 GB are sent -to lvm-1 and volumes with a size greater than or equal to 10 GB are sent -to lvm-2. - -Example ``cinder.conf`` file configuration for customizing the goodness -function: - -.. code-block:: ini - - [default] - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - goodness_function = "(volume.size < 5) ? 100 : 50" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - goodness_function = "(volume.size >= 5) ? 100 : 25" - -The above example will determine the goodness rating of a back end based -off of the requested volume's size. Default OpenStack Block Storage -scheduler filtering is done. The example shows how the ternary if -statement can be used in a filter or goodness function. If a requested -volume is of size 10 GB then lvm-1 is rated as 50 and lvm-2 is rated as -100. In this case lvm-2 wins. If a requested volume is of size 3 GB then -lvm-1 is rated 100 and lvm-2 is rated 25. In this case lvm-1 would win. - -Example ``cinder.conf`` file configuration for customizing both the -filter and goodness functions: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - filter_function = "stats.total_capacity_gb < 500" - goodness_function = "(volume.size < 25) ? 100 : 50" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - filter_function = "stats.total_capacity_gb >= 500" - goodness_function = "(volume.size >= 25) ? 100 : 75" - -The above example combines the techniques from the first two examples. -The best back end is now decided based off of the total capacity of the -back end and the requested volume's size. - -Example ``cinder.conf`` file configuration for accessing driver specific -properties: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1,lvm-2,lvm-3 - - [lvm-1] - volume_group = stack-volumes-lvmdriver-1 - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = lvmdriver-1 - filter_function = "volume.size < 5" - goodness_function = "(capabilities.total_volumes < 3) ? 100 : 50" - - [lvm-2] - volume_group = stack-volumes-lvmdriver-2 - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = lvmdriver-2 - filter_function = "volumes.size < 5" - goodness_function = "(capabilities.total_volumes < 8) ? 100 : 50" - - [lvm-3] - volume_group = stack-volumes-lvmdriver-3 - volume_driver = cinder.volume.drivers.LVMVolumeDriver - volume_backend_name = lvmdriver-3 - goodness_function = "55" - -The above is an example of how back-end specific properties can be used -in the filter and goodness functions. In this example the LVM driver's -``total_volumes`` capability is being used to determine which host gets -used during a volume request. In the above example, lvm-1 and lvm-2 will -handle volume requests for all volumes with a size less than 5 GB. The -lvm-1 host will have priority until it contains three or more volumes. -After than lvm-2 will have priority until it contains eight or more -volumes. The lvm-3 will collect all volumes greater or equal to 5 GB as -well as all volumes once lvm-1 and lvm-2 lose priority. diff --git a/doc/source/admin/blockstorage-get-capabilities.rst b/doc/source/admin/blockstorage-get-capabilities.rst deleted file mode 100644 index 805ffafbe..000000000 --- a/doc/source/admin/blockstorage-get-capabilities.rst +++ /dev/null @@ -1,294 +0,0 @@ -.. _get_capabilities: - - -================ -Get capabilities -================ - -When an administrator configures ``volume type`` and ``extra specs`` of storage -on the back end, the administrator has to read the right documentation that -corresponds to the version of the storage back end. Deep knowledge of -storage is also required. - -OpenStack Block Storage enables administrators to configure ``volume type`` -and ``extra specs`` without specific knowledge of the storage back end. - -.. note:: - - * ``Volume Type``: A group of volume policies. - * ``Extra Specs``: The definition of a volume type. This is a group of - policies. For example, provision type, QOS that will be used to - define a volume at creation time. - * ``Capabilities``: What the current deployed back end in Cinder is able - to do. These correspond to extra specs. - -Usage of cinder client -~~~~~~~~~~~~~~~~~~~~~~ - -When an administrator wants to define new volume types for their -OpenStack cloud, the administrator would fetch a list of ``capabilities`` -for a particular back end using the cinder client. - -First, get a list of the services: - -.. code-block:: console - - $ openstack volume service list - +------------------+-------------------+------+---------+-------+----------------------------+ - | Binary | Host | Zone | Status | State | Updated At | - +------------------+-------------------+------+---------+-------+----------------------------+ - | cinder-scheduler | controller | nova | enabled | up | 2016-10-24T13:53:35.000000 | - | cinder-volume | block1@ABC-driver | nova | enabled | up | 2016-10-24T13:53:35.000000 | - +------------------+-------------------+------+---------+-------+----------------------------+ - -With one of the listed hosts, pass that to ``get-capabilities``, then -the administrator can obtain volume stats and also back end ``capabilities`` -as listed below. - -.. code-block:: console - - $ cinder get-capabilities block1@ABC-driver - +---------------------+----------------------------------------------+ - | Volume stats | Value | - +---------------------+----------------------------------------------+ - | description | None | - | display_name | Capabilities of Cinder Vendor ABC driver | - | driver_version | 2.0.0 | - | namespace | OS::Storage::Capabilities::block1@ABC-driver | - | pool_name | None | - | replication_targets | [] | - | storage_protocol | iSCSI | - | vendor_name | Vendor ABC | - | visibility | pool | - | volume_backend_name | ABC-driver | - +---------------------+----------------------------------------------+ - +----------------------+-----------------------------------------------------+ - | Backend properties | Value | - +----------------------+-----------------------------------------------------+ - | compression | {u'type':u'boolean', u'title':u'Compression', ...} | - | ABC:compression_type | {u'enum':u'['lossy', 'lossless', 'special']', ...} | - | qos | {u'type':u'boolean', u'title':u'QoS', ...} | - | replication | {u'type':u'boolean', u'title':u'Replication', ...} | - | thin_provisioning | {u'type':u'boolean', u'title':u'Thin Provisioning'} | - | ABC:minIOPS | {u'type':u'integer', u'title':u'Minimum IOPS QoS',} | - | ABC:maxIOPS | {u'type':u'integer', u'title':u'Maximum IOPS QoS',} | - | ABC:burstIOPS | {u'type':u'integer', u'title':u'Burst IOPS QoS',..} | - +----------------------+-----------------------------------------------------+ - -Disable a service -~~~~~~~~~~~~~~~~~ - -When an administrator wants to disable a service, identify the Binary -and the Host of the service. Use the :command:` openstack volume service set` -command combined with the Binary and Host to disable the service: - -#. Determine the binary and host of the service you want to remove - initially. - - .. code-block:: console - - $ openstack volume service list - +------------------+----------------------+------+---------+-------+----------------------------+ - | Binary | Host | Zone | Status | State | Updated At | - +------------------+----------------------+------+---------+-------+----------------------------+ - | cinder-scheduler | devstack | nova | enabled | up | 2016-10-24T13:53:35.000000 | - | cinder-volume | devstack@lvmdriver-1 | nova | enabled | up | 2016-10-24T13:53:35.000000 | - +------------------+----------------------+------+---------+-------+----------------------------+ - -#. Disable the service using the Binary and Host name, placing the Host - before the Binary name. - - .. code-block:: console - - $ openstack volume service set --disable HOST_NAME BINARY_NAME - -#. Remove the service from the database. - - .. code-block:: console - - $ cinder-manage service remove BINARY_NAME HOST_NAME - -Usage of REST API -~~~~~~~~~~~~~~~~~ - -New endpoint to ``get capabilities`` list for specific storage back end -is also available. For more details, refer to the Block Storage API reference. - -API request: - -.. code-block:: console - - GET /v2/{tenant_id}/capabilities/{hostname} - -Example of return value: - -.. code-block:: json - - { - "namespace": "OS::Storage::Capabilities::block1@ABC-driver", - "volume_backend_name": "ABC-driver", - "pool_name": "pool", - "driver_version": "2.0.0", - "storage_protocol": "iSCSI", - "display_name": "Capabilities of Cinder Vendor ABC driver", - "description": "None", - "visibility": "public", - "properties": { - "thin_provisioning": { - "title": "Thin Provisioning", - "description": "Sets thin provisioning.", - "type": "boolean" - }, - "compression": { - "title": "Compression", - "description": "Enables compression.", - "type": "boolean" - }, - "ABC:compression_type": { - "title": "Compression type", - "description": "Specifies compression type.", - "type": "string", - "enum": [ - "lossy", "lossless", "special" - ] - }, - "replication": { - "title": "Replication", - "description": "Enables replication.", - "type": "boolean" - }, - "qos": { - "title": "QoS", - "description": "Enables QoS.", - "type": "boolean" - }, - "ABC:minIOPS": { - "title": "Minimum IOPS QoS", - "description": "Sets minimum IOPS if QoS is enabled.", - "type": "integer" - }, - "ABC:maxIOPS": { - "title": "Maximum IOPS QoS", - "description": "Sets maximum IOPS if QoS is enabled.", - "type": "integer" - }, - "ABC:burstIOPS": { - "title": "Burst IOPS QoS", - "description": "Sets burst IOPS if QoS is enabled.", - "type": "integer" - }, - } - } - -Usage of volume type access extension -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some volume types should be restricted only. For example, test volume types -where you are testing a new technology or ultra high performance volumes -(for special cases) where you do not want most users to be able to select -these volumes. An administrator/operator can then define private volume types -using cinder client. -Volume type access extension adds the ability to manage volume type access. -Volume types are public by default. Private volume types can be created by -setting the ``--private`` parameter at creation time. Access to a -private volume type can be controlled by adding or removing a project from it. -Private volume types without projects are only visible by users with the -admin role/context. - -Create a public volume type by setting ``--public`` parameter: - -.. code-block:: console - - $ openstack volume type create vol_Type1 --description test1 --public - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | description | test1 | - | id | b7dbed9e-de78-49f8-a840-651ae7308592 | - | is_public | True | - | name | vol_Type1 | - +-------------+--------------------------------------+ - -Create a private volume type by setting ``--private`` parameter: - -.. code-block:: console - - $ openstack volume type create vol_Type2 --description test2 --private - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | description | test2 | - | id | 154baa73-d2c4-462f-8258-a2df251b0d39 | - | is_public | False | - | name | vol_Type2 | - +-------------+--------------------------------------+ - -Get a list of the volume types: - -.. code-block:: console - - $ openstack volume type list - +--------------------------------------+-------------+ - | ID | Name | - +--------------------------------------+-------------+ - | 0a948c84-bad5-4fba-88a2-c062006e4f6b | vol_Type1 | - | 87e5be6f-9491-4ea5-9906-9ac56494bb91 | lvmdriver-1 | - | fd508846-213f-4a07-aaf2-40518fb9a23f | vol_Type2 | - +--------------------------------------+-------------+ - -Get a list of the projects: - -.. code-block:: console - - $ openstack project list - +----------------------------------+--------------------+ - | ID | Name | - +----------------------------------+--------------------+ - | 4105ead90a854100ab6b121266707f2b | alt_demo | - | 4a22a545cedd4fcfa9836eb75e558277 | admin | - | 71f9cdb1a3ab4b8e8d07d347a2e146bb | service | - | c4860af62ffe465e99ed1bc08ef6082e | demo | - | e4b648ba5108415cb9e75bff65fa8068 | invisible_to_admin | - +----------------------------------+--------------------+ - -Add volume type access for the given demo project, using its project-id: - -.. code-block:: console - - $ openstack volume type set --project c4860af62ffe465e99ed1bc08ef6082e \ - vol_Type2 - -List the access information about the given volume type: - -.. code-block:: console - - $ openstack volume type show vol_Type2 - +--------------------+--------------------------------------+ - | Field | Value | - +--------------------+--------------------------------------+ - | access_project_ids | c4860af62ffe465e99ed1bc08ef6082e | - | description | | - | id | fd508846-213f-4a07-aaf2-40518fb9a23f | - | is_public | False | - | name | vol_Type2 | - | properties | | - | qos_specs_id | None | - +--------------------+--------------------------------------+ - -Remove volume type access for the given project: - -.. code-block:: console - - $ openstack volume type unset --project c4860af62ffe465e99ed1bc08ef6082e \ - vol_Type2 - $ openstack volume type show vol_Type2 - +--------------------+--------------------------------------+ - | Field | Value | - +--------------------+--------------------------------------+ - | access_project_ids | | - | description | | - | id | fd508846-213f-4a07-aaf2-40518fb9a23f | - | is_public | False | - | name | vol_Type2 | - | properties | | - | qos_specs_id | None | - +--------------------+--------------------------------------+ diff --git a/doc/source/admin/blockstorage-glusterfs-backend.rst b/doc/source/admin/blockstorage-glusterfs-backend.rst deleted file mode 100644 index 0b37f0c82..000000000 --- a/doc/source/admin/blockstorage-glusterfs-backend.rst +++ /dev/null @@ -1,206 +0,0 @@ -============================== -Configure a GlusterFS back end -============================== - -This section explains how to configure OpenStack Block Storage to use -GlusterFS as a back end. You must be able to access the GlusterFS shares -from the server that hosts the ``cinder`` volume service. - -.. note:: - - The GlusterFS volume driver, which was deprecated in the Newton release, - has been removed in the Ocata release. - -.. note:: - - The cinder volume service is named ``openstack-cinder-volume`` on the - following distributions: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - In Ubuntu and Debian distributions, the ``cinder`` volume service is - named ``cinder-volume``. - -Mounting GlusterFS volumes requires utilities and libraries from the -``glusterfs-fuse`` package. This package must be installed on all systems -that will access volumes backed by GlusterFS. - -.. note:: - - The utilities and libraries required for mounting GlusterFS volumes on - Ubuntu and Debian distributions are available from the ``glusterfs-client`` - package instead. - -For information on how to install and configure GlusterFS, refer to the -`GlusterFS Documentation`_ page. - -**Configure GlusterFS for OpenStack Block Storage** - -The GlusterFS server must also be configured accordingly in order to allow -OpenStack Block Storage to use GlusterFS shares: - -#. Log in as ``root`` to the GlusterFS server. - -#. Set each Gluster volume to use the same UID and GID as the ``cinder`` user: - - .. code-block:: console - - # gluster volume set VOL_NAME storage.owner-uid CINDER_UID - # gluster volume set VOL_NAME storage.owner-gid CINDER_GID - - - Where: - - * VOL_NAME is the Gluster volume name. - - * CINDER_UID is the UID of the ``cinder`` user. - - * CINDER_GID is the GID of the ``cinder`` user. - - .. note:: - - The default UID and GID of the ``cinder`` user is 165 on - most distributions. - -#. Configure each Gluster volume to accept ``libgfapi`` connections. - To do this, set each Gluster volume to allow insecure ports: - - .. code-block:: console - - # gluster volume set VOL_NAME server.allow-insecure on - -#. Enable client connections from unprivileged ports. To do this, - add the following line to ``/etc/glusterfs/glusterd.vol``: - - .. code-block:: bash - - option rpc-auth-allow-insecure on - -#. Restart the ``glusterd`` service: - - .. code-block:: console - - # service glusterd restart - - -**Configure Block Storage to use a GlusterFS back end** - -After you configure the GlusterFS service, complete these steps: - -#. Log in as ``root`` to the system hosting the Block Storage service. - -#. Create a text file named ``glusterfs`` in ``/etc/cinder/`` directory. - -#. Add an entry to ``/etc/cinder/glusterfs`` for each GlusterFS - share that OpenStack Block Storage should use for back end storage. - Each entry should be a separate line, and should use the following - format: - - .. code-block:: bash - - HOST:/VOL_NAME - - - Where: - - * HOST is the IP address or host name of the Red Hat Storage server. - - * VOL_NAME is the name of an existing and accessible volume on the - GlusterFS server. - - | - - Optionally, if your environment requires additional mount options for - a share, you can add them to the share's entry: - - .. code-block:: yaml - - HOST:/VOL_NAME -o OPTIONS - - Replace OPTIONS with a comma-separated list of mount options. - -#. Set ``/etc/cinder/glusterfs`` to be owned by the root user - and the ``cinder`` group: - - .. code-block:: console - - # chown root:cinder /etc/cinder/glusterfs - -#. Set ``/etc/cinder/glusterfs`` to be readable by members of - the ``cinder`` group: - - .. code-block:: console - - # chmod 0640 /etc/cinder/glusterfs - -#. Configure OpenStack Block Storage to use the ``/etc/cinder/glusterfs`` - file created earlier. To do so, open the ``/etc/cinder/cinder.conf`` - configuration file and set the ``glusterfs_shares_config`` configuration - key to ``/etc/cinder/glusterfs``. - - On distributions that include openstack-config, you can configure this - by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT glusterfs_shares_config /etc/cinder/glusterfs - - The following distributions include ``openstack-config``: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - | - -#. Configure OpenStack Block Storage to use the correct volume driver, - namely ``cinder.volume.drivers.glusterfs.GlusterfsDriver``. To do so, - open the ``/etc/cinder/cinder.conf`` configuration file and set - the ``volume_driver`` configuration key to - ``cinder.volume.drivers.glusterfs.GlusterfsDriver``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver - -#. You can now restart the service to apply the configuration. - - -OpenStack Block Storage is now configured to use a GlusterFS back end. - -.. warning:: - - If a client host has SELinux enabled, the ``virt_use_fusefs`` boolean - should also be enabled if the host requires access to GlusterFS volumes - on an instance. To enable this Boolean, run the following command as - the ``root`` user: - - .. code-block:: console - - # setsebool -P virt_use_fusefs on - - This command also makes the Boolean persistent across reboots. Run - this command on all client hosts that require access to GlusterFS - volumes on an instance. This includes all compute nodes. - -.. Links -.. _`GlusterFS Documentation`: https://gluster.readthedocs.io/en/latest/ diff --git a/doc/source/admin/blockstorage-glusterfs-removal.rst b/doc/source/admin/blockstorage-glusterfs-removal.rst deleted file mode 100644 index e2ab957b9..000000000 --- a/doc/source/admin/blockstorage-glusterfs-removal.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _glusterfs_removal: - -=============================================== -Gracefully remove a GlusterFS volume from usage -=============================================== - -Configuring the ``cinder`` volume service to use GlusterFS involves creating a -shares file (for example, ``/etc/cinder/glusterfs``). This shares file -lists each GlusterFS volume (with its corresponding storage server) that -the ``cinder`` volume service can use for back end storage. - -To remove a GlusterFS volume from usage as a back end, delete the volume's -corresponding entry from the shares file. After doing so, restart the Block -Storage services. - -Restarting the Block Storage services will prevent the ``cinder`` volume -service from exporting the deleted GlusterFS volume. This will prevent any -instances from mounting the volume from that point onwards. - -However, the removed GlusterFS volume might still be mounted on an instance -at this point. Typically, this is the case when the volume was already -mounted while its entry was deleted from the shares file. -Whenever this occurs, you will have to unmount the volume as normal after -the Block Storage services are restarted. diff --git a/doc/source/admin/blockstorage-groups.rst b/doc/source/admin/blockstorage-groups.rst deleted file mode 100644 index f40058e82..000000000 --- a/doc/source/admin/blockstorage-groups.rst +++ /dev/null @@ -1,380 +0,0 @@ -===================== -Generic volume groups -===================== - -Generic volume group support is available in OpenStack Block Storage (cinder) -since the Newton release. The support is added for creating group types and -group specs, creating groups of volumes, and creating snapshots of groups. -The group operations can be performed using the Block Storage command line. - -A group type is a type for a group just like a volume type for a volume. -A group type can also have associated group specs similar to extra specs -for a volume type. - -In cinder, there is a group construct called `consistency group`. Consistency -groups only support consistent group snapshots and only a small number of -drivers can support it. The following is a list of drivers that support -consistency groups and the release when the support was added: - -- Juno: EMC VNX - -- Kilo: EMC VMAX, IBM (GPFS, Storwize, SVC, and XIV), ProphetStor, Pure - -- Liberty: Dell Storage Center, EMC XtremIO, HPE 3Par and LeftHand - -- Mitaka: EMC ScaleIO, NetApp Data ONTAP and E-Series, SolidFire - -- Newton: CoprHD, FalconStor, Huawei - -Consistency group cannot be extended easily to serve other purposes. A tenant -may want to put volumes used in the same application together in a group so -that it is easier to manage them together, and this group of volumes may or -may not support consistent group snapshot. Generic volume group is introduced -to solve this problem. - -There is a plan to migrate existing consistency group operations to use -generic volume group operations in future releases. More information can be -found in `Cinder specs `_. - -.. note:: - - Only Block Storage V3 API supports groups. You can - specify ``--os-volume-api-version 3.x`` when using the `cinder` - command line for group operations where `3.x` contains a microversion value - for that command. The generic volume group feature was completed in several - patches. As a result, the minimum required microversion is different for - group types, groups, and group snapshots APIs. - -The following group type operations are supported: - -- Create a group type. - -- Delete a group type. - -- Set group spec for a group type. - -- Unset group spec for a group type. - -- List group types. - -- Show a group type details. - -- Update a group. - -- List group types and group specs. - -The following group and group snapshot operations are supported: - -- Create a group, given group type and volume types. - - .. note:: - - A group must have one group type. A group can support more than one - volume type. The scheduler is responsible for finding a back end that - can support the given group type and volume types. - - A group can only contain volumes hosted by the same back end. - - A group is empty upon its creation. Volumes need to be created and added - to it later. - -- Show a group. - -- List groups. - -- Delete a group. - -- Modify a group. - -- Create a volume and add it to a group. - -- Create a snapshot for a group. - -- Show a group snapshot. - -- List group snapshots. - -- Delete a group snapshot. - -- Create a group from a group snapshot. - -- Create a group from a source group. - -The following operations are not allowed if a volume is in a group: - -- Volume migration. - -- Volume retype. - -- Volume deletion. - - .. note:: - - A group has to be deleted as a whole with all the volumes. - -The following operations are not allowed if a volume snapshot is in a -group snapshot: - -- Volume snapshot deletion. - - .. note:: - - A group snapshot has to be deleted as a whole with all the volume - snapshots. - -The details of group type operations are shown in the following. The minimum -microversion to support group type and group specs is 3.11: - -**Create a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-create - [--description DESCRIPTION] - [--is-public IS_PUBLIC] - NAME - -.. note:: - - The parameter ``NAME`` is required. The - ``--is-public IS_PUBLIC`` determines whether the group type is - accessible to the public. It is ``True`` by default. By default, the - policy on privileges for creating a group type is admin-only. - -**Show a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-show - GROUP_TYPE - -.. note:: - - The parameter ``GROUP_TYPE`` is the name or UUID of a group type. - -**List group types**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-list - -.. note:: - - Only admin can see private group types. - -**Update a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-update - [--name NAME] - [--description DESCRIPTION] - [--is-public IS_PUBLIC] - GROUP_TYPE_ID - -.. note:: - - The parameter ``GROUP_TYPE_ID`` is the UUID of a group type. By default, - the policy on privileges for updating a group type is admin-only. - -**Delete group type or types**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-delete - GROUP_TYPE [GROUP_TYPE ...] - -.. note:: - - The parameter ``GROUP_TYPE`` is name or UUID of the group type or - group types to be deleted. By default, the policy on privileges for - deleting a group type is admin-only. - -**Set or unset group spec for a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-key - GROUP_TYPE ACTION KEY=VALUE [KEY=VALUE ...] - -.. note:: - - The parameter ``GROUP_TYPE`` is the name or UUID of a group type. Valid - values for the parameter ``ACTION`` are ``set`` or ``unset``. - ``KEY=VALUE`` is the group specs key and value pair to set or unset. - For unset, specify only the key. By default, the policy on privileges - for setting or unsetting group specs key is admin-only. - -**List group types and group specs**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-specs-list - -.. note:: - - By default, the policy on privileges for seeing group specs is admin-only. - -The details of group operations are shown in the following. The minimum -microversion to support groups operations is 3.13. - -**Create a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-create - [--name NAME] - [--description DESCRIPTION] - [--availability-zone AVAILABILITY_ZONE] - GROUP_TYPE VOLUME_TYPES - -.. note:: - - The parameters ``GROUP_TYPE`` and ``VOLUME_TYPES`` are required. - ``GROUP_TYPE`` is the name or UUID of a group type. ``VOLUME_TYPES`` - can be a list of names or UUIDs of volume types separated by commas - without spaces in between. For example, - ``volumetype1,volumetype2,volumetype3.``. - -**Show a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-show - GROUP - -.. note:: - - The parameter ``GROUP`` is the name or UUID of a group. - -**List groups**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-list - [--all-tenants [<0|1>]] - -.. note:: - - ``--all-tenants`` specifies whether to list groups for all tenants. - Only admin can use this option. - -**Create a volume and add it to a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 create - --volume-type VOLUME_TYPE - --group-id GROUP_ID SIZE - -.. note:: - - When creating a volume and adding it to a group, the parameters - ``VOLUME_TYPE`` and ``GROUP_ID`` must be provided. This is because a group - can support more than one volume type. - -**Delete a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-delete - [--delete-volumes] - GROUP [GROUP ...] - -.. note:: - - ``--delete-volumes`` allows or disallows groups to be deleted - if they are not empty. If the group is empty, it can be deleted without - ``--delete-volumes``. If the group is not empty, the flag is - required for it to be deleted. When the flag is specified, the group - and all volumes in the group will be deleted. - -**Modify a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-update - [--name NAME] - [--description DESCRIPTION] - [--add-volumes UUID1,UUID2,......] - [--remove-volumes UUID3,UUID4,......] - GROUP - -.. note:: - - The parameter ``UUID1,UUID2,......`` is the UUID of one or more volumes - to be added to the group, separated by commas. Similarly the parameter - ``UUID3,UUID4,......`` is the UUID of one or more volumes to be removed - from the group, separated by commas. - -The details of group snapshots operations are shown in the following. The -minimum microversion to support group snapshots operations is 3.14. - -**Create a snapshot for a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-create - [--name NAME] - [--description DESCRIPTION] - GROUP - -.. note:: - - The parameter ``GROUP`` is the name or UUID of a group. - -**Show a group snapshot**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-show - GROUP_SNAPSHOT - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` is the name or UUID of a group snapshot. - -**List group snapshots**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-list - [--all-tenants [<0|1>]] - [--status STATUS] - [--group-id GROUP_ID] - -.. note:: - - ``--all-tenants`` specifies whether to list group snapshots for - all tenants. Only admin can use this option. ``--status STATUS`` - filters results by a status. ``--group-id GROUP_ID`` filters - results by a group id. - -**Delete group snapshot**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-delete - GROUP_SNAPSHOT [GROUP_SNAPSHOT ...] - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` specifies the name or UUID of one or more - group snapshots to be deleted. - -**Create a group from a group snapshot or a source group**: - -.. code-block:: console - - $ cinder --os-volume-api-version 3.14 group-create-from-src - [--group-snapshot GROUP_SNAPSHOT] - [--source-group SOURCE_GROUP] - [--name NAME] - [--description DESCRIPTION] - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` is a name or UUID of a group snapshot. - The parameter ``SOURCE_GROUP`` is a name or UUID of a source group. - Either ``GROUP_SNAPSHOT`` or ``SOURCE_GROUP`` must be specified, but not - both. diff --git a/doc/source/admin/blockstorage-image-volume-cache.rst b/doc/source/admin/blockstorage-image-volume-cache.rst deleted file mode 100644 index 589ccd923..000000000 --- a/doc/source/admin/blockstorage-image-volume-cache.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _image_volume_cache: - - -================== -Image-Volume cache -================== - -OpenStack Block Storage has an optional Image cache which can dramatically -improve the performance of creating a volume from an image. The improvement -depends on many factors, primarily how quickly the configured back end can -clone a volume. - -When a volume is first created from an image, a new cached image-volume -will be created that is owned by the Block Storage Internal Tenant. Subsequent -requests to create volumes from that image will clone the cached version -instead of downloading the image contents and copying data to the volume. - -The cache itself is configurable per back end and will contain the most -recently used images. - -Configure the Internal Tenant -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Image-Volume cache requires that the Internal Tenant be configured for -the Block Storage services. This project will own the cached image-volumes so -they can be managed like normal users including tools like volume quotas. This -protects normal users from having to see the cached image-volumes, but does -not make them globally hidden. - -To enable the Block Storage services to have access to an Internal Tenant, set -the following options in the ``cinder.conf`` file: - -.. code-block:: ini - - cinder_internal_tenant_project_id = PROJECT_ID - cinder_internal_tenant_user_id = USER_ID - -An example ``cinder.conf`` configuration file: - -.. code-block:: ini - - cinder_internal_tenant_project_id = b7455b8974bb4064ad247c8f375eae6c - cinder_internal_tenant_user_id = f46924c112a14c80ab0a24a613d95eef - -.. note:: - - The actual user and project that are configured for the Internal Tenant do - not require any special privileges. They can be the Block Storage service - project or can be any normal project and user. - -Configure the Image-Volume cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the Image-Volume cache, set the following configuration option in -the ``cinder.conf`` file: - -.. code-block:: ini - - image_volume_cache_enabled = True - -.. note:: - - If you use Ceph as a back end, set the following configuration option in - the ``cinder.conf`` file: - - .. code-block:: ini - - [ceph] - image_volume_cache_enabled = True - -This can be scoped per back end definition or in the default options. - -There are optional configuration settings that can limit the size of the cache. -These can also be scoped per back end or in the default options in -the ``cinder.conf`` file: - -.. code-block:: ini - - image_volume_cache_max_size_gb = SIZE_GB - image_volume_cache_max_count = MAX_COUNT - -By default they will be set to 0, which means unlimited. - -For example, a configuration which would limit the max size to 200 GB and 50 -cache entries will be configured as: - -.. code-block:: ini - - image_volume_cache_max_size_gb = 200 - image_volume_cache_max_count = 50 - -Notifications -~~~~~~~~~~~~~ - -Cache actions will trigger Telemetry messages. There are several that will be -sent. - -- ``image_volume_cache.miss`` - A volume is being created from an image which - was not found in the cache. Typically this will mean a new cache entry would - be created for it. - -- ``image_volume_cache.hit`` - A volume is being created from an image which - was found in the cache and the fast path can be taken. - -- ``image_volume_cache.evict`` - A cached image-volume has been deleted from - the cache. - - -Managing cached Image-Volumes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In normal usage there should be no need for manual intervention with the cache. -The entries and their backing Image-Volumes are managed automatically. - -If needed, you can delete these volumes manually to clear the cache. -By using the standard volume deletion APIs, the Block Storage service will -clean up correctly. diff --git a/doc/source/admin/blockstorage-lio-iscsi-support.rst b/doc/source/admin/blockstorage-lio-iscsi-support.rst deleted file mode 100644 index b2d525ff9..000000000 --- a/doc/source/admin/blockstorage-lio-iscsi-support.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================== -Use LIO iSCSI support -===================== - -The default mode for the ``iscsi_helper`` tool is ``tgtadm``. -To use LIO iSCSI, install the ``python-rtslib`` package, and set -``iscsi_helper=lioadm`` in the ``cinder.conf`` file. - -Once configured, you can use the :command:`cinder-rtstool` command to -manage the volumes. This command enables you to create, delete, and -verify volumes and determine targets and add iSCSI initiators to the -system. diff --git a/doc/source/admin/blockstorage-manage-volumes.rst b/doc/source/admin/blockstorage-manage-volumes.rst deleted file mode 100644 index f2d2974be..000000000 --- a/doc/source/admin/blockstorage-manage-volumes.rst +++ /dev/null @@ -1,82 +0,0 @@ -============== -Manage volumes -============== - -The default OpenStack Block Storage service implementation is an -iSCSI solution that uses :term:`Logical Volume Manager (LVM)` for Linux. - -.. note:: - - The OpenStack Block Storage service is not a shared storage - solution like a Network Attached Storage (NAS) of NFS volumes - where you can attach a volume to multiple servers. With the - OpenStack Block Storage service, you can attach a volume to only - one instance at a time. - - The OpenStack Block Storage service also provides drivers that - enable you to use several vendors' back-end storage devices in - addition to the base LVM implementation. These storage devices can - also be used instead of the base LVM installation. - -This high-level procedure shows you how to create and attach a volume -to a server instance. - -**To create and attach a volume to an instance** - -#. Configure the OpenStack Compute and the OpenStack Block Storage - services through the ``/etc/cinder/cinder.conf`` file. -#. Use the :command:`openstack volume create` command to create a volume. - This command creates an LV into the volume group (VG) ``cinder-volumes``. -#. Use the :command:`openstack server add volume` command to attach the - volume to an instance. This command creates a unique :term:`IQN ` that is exposed to the compute node. - - * The compute node, which runs the instance, now has an active - iSCSI session and new local storage (usually a ``/dev/sdX`` - disk). - * Libvirt uses that local storage as storage for the instance. The - instance gets a new disk (usually a ``/dev/vdX`` disk). - -For this particular walkthrough, one cloud controller runs -``nova-api``, ``nova-scheduler``, ``nova-objectstore``, -``nova-network`` and ``cinder-*`` services. Two additional compute -nodes run ``nova-compute``. The walkthrough uses a custom -partitioning scheme that carves out 60 GB of space and labels it as -LVM. The network uses the ``FlatManager`` and ``NetworkManager`` -settings for OpenStack Compute. - -The network mode does not interfere with OpenStack Block Storage -operations, but you must set up networking for Block Storage to work. -For details, see :ref:`networking`. - -To set up Compute to use volumes, ensure that Block Storage is -installed along with ``lvm2``. This guide describes how to -troubleshoot your installation and back up your Compute volumes. - -.. toctree:: - - blockstorage-boot-from-volume.rst - blockstorage-nfs-backend.rst - blockstorage-glusterfs-backend.rst - blockstorage-multi-backend.rst - blockstorage-backup-disks.rst - blockstorage-volume-migration.rst - blockstorage-glusterfs-removal.rst - blockstorage-volume-backups.rst - blockstorage-volume-backups-export-import.rst - blockstorage-lio-iscsi-support.rst - blockstorage-volume-number-weigher.rst - blockstorage-consistency-groups.rst - blockstorage-driver-filter-weighing.rst - blockstorage-ratelimit-volume-copy-bandwidth.rst - blockstorage-over-subscription.rst - blockstorage-image-volume-cache.rst - blockstorage-volume-backed-image.rst - blockstorage-get-capabilities.rst - blockstorage-groups.rst - -.. note:: - - To enable the use of encrypted volumes, see the setup instructions in - `Create an encrypted volume type - `_. diff --git a/doc/source/admin/blockstorage-multi-backend.rst b/doc/source/admin/blockstorage-multi-backend.rst deleted file mode 100644 index a09b4d1e7..000000000 --- a/doc/source/admin/blockstorage-multi-backend.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. _multi_backend: - -==================================== -Configure multiple-storage back ends -==================================== - -When you configure multiple-storage back ends, you can create several -back-end storage solutions that serve the same OpenStack Compute -configuration and one ``cinder-volume`` is launched for each back-end -storage or back-end storage pool. - -In a multiple-storage back-end configuration, each back end has a name -(``volume_backend_name``). Several back ends can have the same name. -In that case, the scheduler properly decides which back end the volume -has to be created in. - -The name of the back end is declared as an extra-specification of a -volume type (such as, ``volume_backend_name=LVM``). When a volume -is created, the scheduler chooses an appropriate back end to handle the -request, according to the volume type specified by the user. - -Enable multiple-storage back ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable a multiple-storage back ends, you must set the -`enabled_backends` flag in the ``cinder.conf`` file. -This flag defines the names (separated by a comma) of the configuration -groups for the different back ends: one name is associated to one -configuration group for a back end (such as, ``[lvmdriver-1]``). - -.. note:: - - The configuration group name is not related to the ``volume_backend_name``. - -.. note:: - - After setting the ``enabled_backends`` flag on an existing cinder - service, and restarting the Block Storage services, the original ``host`` - service is replaced with a new host service. The new service appears - with a name like ``host@backend``. Use: - - .. code-block:: console - - $ cinder-manage volume update_host --currenthost CURRENTHOST --newhost CURRENTHOST@BACKEND - - to convert current block devices to the new host name. - -The options for a configuration group must be defined in the group -(or default options are used). All the standard Block Storage -configuration options (``volume_group``, ``volume_driver``, and so on) -might be used in a configuration group. Configuration values in -the ``[DEFAULT]`` configuration group are not used. - -These examples show three back ends: - -.. code-block:: ini - - enabled_backends=lvmdriver-1,lvmdriver-2,lvmdriver-3 - [lvmdriver-1] - volume_group=cinder-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - [lvmdriver-2] - volume_group=cinder-volumes-2 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - [lvmdriver-3] - volume_group=cinder-volumes-3 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM_b - -In this configuration, ``lvmdriver-1`` and ``lvmdriver-2`` have the same -``volume_backend_name``. If a volume creation requests the ``LVM`` -back end name, the scheduler uses the capacity filter scheduler to choose -the most suitable driver, which is either ``lvmdriver-1`` or ``lvmdriver-2``. -The capacity filter scheduler is enabled by default. The next section -provides more information. In addition, this example presents a -``lvmdriver-3`` back end. - -.. note:: - - For Fiber Channel drivers that support multipath, the configuration group - requires the ``use_multipath_for_image_xfer=true`` option. In - the example below, you can see details for HPE 3PAR and EMC Fiber - Channel drivers. - -.. code-block:: ini - - [3par] - use_multipath_for_image_xfer = true - volume_driver = cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver - volume_backend_name = 3parfc - - [emc] - use_multipath_for_image_xfer = true - volume_driver = cinder.volume.drivers.emc.emc_smis_fc.EMCSMISFCDriver - volume_backend_name = emcfc - -Configure Block Storage scheduler multi back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You must enable the `filter_scheduler` option to use -multiple-storage back ends. The filter scheduler: - -#. Filters the available back ends. By default, ``AvailabilityZoneFilter``, - ``CapacityFilter`` and ``CapabilitiesFilter`` are enabled. - -#. Weights the previously filtered back ends. By default, the - `CapacityWeigher` option is enabled. When this option is - enabled, the filter scheduler assigns the highest weight to back - ends with the most available capacity. - -The scheduler uses filters and weights to pick the best back end to -handle the request. The scheduler uses volume types to explicitly create -volumes on specific back ends. For more information about filter and weighing, -see :ref:`filter_weigh_scheduler`. - - -Volume type -~~~~~~~~~~~ - -Before using it, a volume type has to be declared to Block Storage. -This can be done by the following command: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type create lvm - -Then, an extra-specification has to be created to link the volume -type to a back end name. Run this command: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type set lvm \ - --property volume_backend_name=LVM_iSCSI - -This example creates a ``lvm`` volume type with -``volume_backend_name=LVM_iSCSI`` as extra-specifications. - -Create another volume type: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type create lvm_gold - - $ openstack --os-username admin --os-tenant-name admin volume type set lvm_gold \ - --property volume_backend_name=LVM_iSCSI_b - -This second volume type is named ``lvm_gold`` and has ``LVM_iSCSI_b`` as -back end name. - -.. note:: - - To list the extra-specifications, use this command: - - .. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type list --long - -.. note:: - - If a volume type points to a ``volume_backend_name`` that does not - exist in the Block Storage configuration, the ``filter_scheduler`` - returns an error that it cannot find a valid host with the suitable - back end. - -Usage -~~~~~ - -When you create a volume, you must specify the volume type. -The extra-specifications of the volume type are used to determine which -back end has to be used. - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm test_multi_backend - -Considering the ``cinder.conf`` described previously, the scheduler -creates this volume on ``lvmdriver-1`` or ``lvmdriver-2``. - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm_gold test_multi_backend - -This second volume is created on ``lvmdriver-3``. diff --git a/doc/source/admin/blockstorage-nfs-backend.rst b/doc/source/admin/blockstorage-nfs-backend.rst deleted file mode 100644 index 63c03e4f8..000000000 --- a/doc/source/admin/blockstorage-nfs-backend.rst +++ /dev/null @@ -1,162 +0,0 @@ -================================= -Configure an NFS storage back end -================================= - -This section explains how to configure OpenStack Block Storage to use -NFS storage. You must be able to access the NFS shares from the server -that hosts the ``cinder`` volume service. - -.. note:: - - The ``cinder`` volume service is named ``openstack-cinder-volume`` - on the following distributions: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - In Ubuntu and Debian distributions, the ``cinder`` volume service is - named ``cinder-volume``. - -**Configure Block Storage to use an NFS storage back end** - -#. Log in as ``root`` to the system hosting the ``cinder`` volume - service. - -#. Create a text file named ``nfsshares`` in the ``/etc/cinder/`` directory. - -#. Add an entry to ``/etc/cinder/nfsshares`` for each NFS share - that the ``cinder`` volume service should use for back end storage. - Each entry should be a separate line, and should use the following - format: - - .. code-block:: bash - - HOST:SHARE - - - Where: - - * HOST is the IP address or host name of the NFS server. - - * SHARE is the absolute path to an existing and accessible NFS share. - - | - -#. Set ``/etc/cinder/nfsshares`` to be owned by the ``root`` user and - the ``cinder`` group: - - .. code-block:: console - - # chown root:cinder /etc/cinder/nfsshares - -#. Set ``/etc/cinder/nfsshares`` to be readable by members of the - cinder group: - - .. code-block:: console - - # chmod 0640 /etc/cinder/nfsshares - -#. Configure the ``cinder`` volume service to use the - ``/etc/cinder/nfsshares`` file created earlier. To do so, open - the ``/etc/cinder/cinder.conf`` configuration file and set - the ``nfs_shares_config`` configuration key - to ``/etc/cinder/nfsshares``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_shares_config /etc/cinder/nfsshares - - The following distributions include openstack-config: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - -#. Optionally, provide any additional NFS mount options required in - your environment in the ``nfs_mount_options`` configuration key - of ``/etc/cinder/cinder.conf``. If your NFS shares do not - require any additional mount options (or if you are unsure), - skip this step. - - On distributions that include ``openstack-config``, you can - configure this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_mount_options OPTIONS - - Replace OPTIONS with the mount options to be used when accessing - NFS shares. See the manual page for NFS for more information on - available mount options (:command:`man nfs`). - -#. Configure the ``cinder`` volume service to use the correct volume - driver, namely ``cinder.volume.drivers.nfs.NfsDriver``. To do so, - open the ``/etc/cinder/cinder.conf`` configuration file and - set the volume_driver configuration key - to ``cinder.volume.drivers.nfs.NfsDriver``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver - -#. You can now restart the service to apply the configuration. - - .. note:: - - The ``nfs_sparsed_volumes`` configuration key determines whether - volumes are created as sparse files and grown as needed or fully - allocated up front. The default and recommended value is ``true``, - which ensures volumes are initially created as sparse files. - - Setting ``nfs_sparsed_volumes`` to ``false`` will result in - volumes being fully allocated at the time of creation. This leads - to increased delays in volume creation. - - However, should you choose to set ``nfs_sparsed_volumes`` to - ``false``, you can do so directly in ``/etc/cinder/cinder.conf``. - - On distributions that include ``openstack-config``, you can - configure this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_sparsed_volumes false - - .. warning:: - - If a client host has SELinux enabled, the ``virt_use_nfs`` - boolean should also be enabled if the host requires access to - NFS volumes on an instance. To enable this boolean, run the - following command as the ``root`` user: - - .. code-block:: console - - # setsebool -P virt_use_nfs on - - This command also makes the boolean persistent across reboots. - Run this command on all client hosts that require access to NFS - volumes on an instance. This includes all compute nodes. diff --git a/doc/source/admin/blockstorage-over-subscription.rst b/doc/source/admin/blockstorage-over-subscription.rst deleted file mode 100644 index 5606e499f..000000000 --- a/doc/source/admin/blockstorage-over-subscription.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _over_subscription: - -===================================== -Oversubscription in thin provisioning -===================================== - -OpenStack Block Storage enables you to choose a volume back end based on -virtual capacities for thin provisioning using the oversubscription ratio. - -A reference implementation is provided for the default LVM driver. The -illustration below uses the LVM driver as an example. - -Configure oversubscription settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To support oversubscription in thin provisioning, a flag -``max_over_subscription_ratio`` is introduced into ``cinder.conf``. -This is a float representation of the oversubscription ratio when thin -provisioning is involved. Default ratio is 20.0, meaning provisioned -capacity can be 20 times of the total physical capacity. A ratio of 10.5 -means provisioned capacity can be 10.5 times of the total physical capacity. -A ratio of 1.0 means provisioned capacity cannot exceed the total physical -capacity. A ratio lower than 1.0 is ignored and the default value is used -instead. - -.. note:: - - ``max_over_subscription_ratio`` can be configured for each back end when - multiple-storage back ends are enabled. It is provided as a reference - implementation and is used by the LVM driver. However, it is not a - requirement for a driver to use this option from ``cinder.conf``. - - ``max_over_subscription_ratio`` is for configuring a back end. For a - driver that supports multiple pools per back end, it can report this - ratio for each pool. The LVM driver does not support multiple pools. - -The existing ``reserved_percentage`` flag is used to prevent over provisioning. -This flag represents the percentage of the back-end capacity that is reserved. - -.. note:: - - There is a change on how ``reserved_percentage`` is used. It was measured - against the free capacity in the past. Now it is measured against the total - capacity. - -Capabilities -~~~~~~~~~~~~ - -Drivers can report the following capabilities for a back end or a pool: - -.. code-block:: ini - - thin_provisioning_support = True(or False) - thick_provisioning_support = True(or False) - provisioned_capacity_gb = PROVISIONED_CAPACITY - max_over_subscription_ratio = MAX_RATIO - -Where ``PROVISIONED_CAPACITY`` is the apparent allocated space indicating -how much capacity has been provisioned and ``MAX_RATIO`` is the maximum -oversubscription ratio. For the LVM driver, it is -``max_over_subscription_ratio`` in ``cinder.conf``. - -Two capabilities are added here to allow a back end or pool to claim support -for thin provisioning, or thick provisioning, or both. - -The LVM driver reports ``thin_provisioning_support=True`` and -``thick_provisioning_support=False`` if the ``lvm_type`` flag in -``cinder.conf`` is ``thin``. Otherwise it reports -``thin_provisioning_support=False`` and ``thick_provisioning_support=True``. - -Volume type extra specs -~~~~~~~~~~~~~~~~~~~~~~~ - -If volume type is provided as part of the volume creation request, it can -have the following extra specs defined: - -.. code-block:: python - - 'capabilities:thin_provisioning_support': ' True' or ' False' - 'capabilities:thick_provisioning_support': ' True' or ' False' - -.. note:: - - ``capabilities`` scope key before ``thin_provisioning_support`` and - ``thick_provisioning_support`` is not required. So the following works too: - -.. code-block:: python - - 'thin_provisioning_support': ' True' or ' False' - 'thick_provisioning_support': ' True' or ' False' - -The above extra specs are used by the scheduler to find a back end that -supports thin provisioning, thick provisioning, or both to match the needs -of a specific volume type. - -Volume replication extra specs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack Block Storage has the ability to create volume replicas. -Administrators can define a storage policy that includes -replication by adjusting the cinder volume driver. Volume replication -for OpenStack Block Storage helps safeguard OpenStack environments from -data loss during disaster recovery. - -To enable replication when creating volume types, configure the cinder -volume with ``capabilities:replication=" True"``. - -Each volume created with the replication capability set to ``True`` -generates a copy of the volume on a storage back end. - -One use case for replication involves an OpenStack cloud environment -installed across two data centers located nearby each other. The -distance between the two data centers in this use case is the length of -a city. - -At each data center, a cinder host supports the Block Storage service. -Both data centers include storage back ends. - -Depending on the storage requirements, there can be one or two cinder -hosts. The administrator accesses the -``/etc/cinder/cinder.conf`` configuration file and sets -``capabilities:replication=" True"``. - -If one data center experiences a service failure, administrators -can redeploy the VM. The VM will run using a replicated, backed up -volume on a host in the second data center. - -Capacity filter -~~~~~~~~~~~~~~~ - -In the capacity filter, ``max_over_subscription_ratio`` is used when -choosing a back end if ``thin_provisioning_support`` is True and -``max_over_subscription_ratio`` is greater than 1.0. - -Capacity weigher -~~~~~~~~~~~~~~~~ - -In the capacity weigher, virtual free capacity is used for ranking if -``thin_provisioning_support`` is True. Otherwise, real free capacity -will be used as before. diff --git a/doc/source/admin/blockstorage-ratelimit-volume-copy-bandwidth.rst b/doc/source/admin/blockstorage-ratelimit-volume-copy-bandwidth.rst deleted file mode 100644 index 91416a866..000000000 --- a/doc/source/admin/blockstorage-ratelimit-volume-copy-bandwidth.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _ratelimit_volume_copy_bandwidth: - -================================ -Rate-limit volume copy bandwidth -================================ - -When you create a new volume from an image or an existing volume, or -when you upload a volume image to the Image service, large data copy -may stress disk and network bandwidth. To mitigate slow down of data -access from the instances, OpenStack Block Storage supports rate-limiting -of volume data copy bandwidth. - -Configure volume copy bandwidth limit -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure the volume copy bandwidth limit, set the -``volume_copy_bps_limit`` option in the configuration groups for each -back end in the ``cinder.conf`` file. This option takes the integer of -maximum bandwidth allowed for volume data copy in byte per second. If -this option is set to ``0``, the rate-limit is disabled. - -While multiple volume data copy operations are running in the same back -end, the specified bandwidth is divided to each copy. - -Example ``cinder.conf`` configuration file to limit volume copy bandwidth -of ``lvmdriver-1`` up to 100 MiB/s: - -.. code-block:: ini - - [lvmdriver-1] - volume_group=cinder-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - volume_copy_bps_limit=104857600 - -.. note:: - - This feature requires libcgroup to set up blkio cgroup for disk I/O - bandwidth limit. The libcgroup is provided by the cgroup-bin package - in Debian and Ubuntu, or by the libcgroup-tools package in Fedora, - Red Hat Enterprise Linux, CentOS, openSUSE, and SUSE Linux Enterprise. - -.. note:: - - Some back ends which use remote file systems such as NFS are not - supported by this feature. diff --git a/doc/source/admin/blockstorage-troubleshoot.rst b/doc/source/admin/blockstorage-troubleshoot.rst deleted file mode 100644 index b16e055bb..000000000 --- a/doc/source/admin/blockstorage-troubleshoot.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================== -Troubleshoot your installation -============================== - -This section provides useful tips to help you troubleshoot your Block -Storage installation. - -.. toctree:: - :maxdepth: 1 - - ts-cinder-config.rst - ts-multipath-warn.rst - ts-eql-volume-size.rst - ts-vol-attach-miss-sg-scan.rst - ts-HTTP-bad-req-in-cinder-vol-log.rst - ts-duplicate-3par-host.rst - ts-failed-attach-vol-after-detach.rst - ts-failed-attach-vol-no-sysfsutils.rst - ts-failed-connect-vol-FC-SAN.rst - ts-no-emulator-x86-64.rst - ts-non-existent-host.rst - ts-non-existent-vlun.rst diff --git a/doc/source/admin/blockstorage-volume-backed-image.rst b/doc/source/admin/blockstorage-volume-backed-image.rst deleted file mode 100644 index 0833c7dc8..000000000 --- a/doc/source/admin/blockstorage-volume-backed-image.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. _volume_backed_image: - - -=================== -Volume-backed image -=================== - -OpenStack Block Storage can quickly create a volume from an image that refers -to a volume storing image data (Image-Volume). Compared to the other stores -such as file and swift, creating a volume from a Volume-backed image performs -better when the block storage driver supports efficient volume cloning. - -If the image is set to public in the Image service, the volume data can be -shared among projects. - -Configure the Volume-backed image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Volume-backed image feature requires locations information from the cinder -store of the Image service. To enable the Image service to use the cinder -store, add ``cinder`` to the ``stores`` option in the ``glance_store`` section -of the ``glance-api.conf`` file: - -.. code-block:: ini - - stores = file, http, swift, cinder - -To expose locations information, set the following options in the ``DEFAULT`` -section of the ``glance-api.conf`` file: - -.. code-block:: ini - - show_multiple_locations = True - -To enable the Block Storage services to create a new volume by cloning Image- -Volume, set the following options in the ``DEFAULT`` section of the -``cinder.conf`` file. For example: - -.. code-block:: ini - - glance_api_version = 2 - allowed_direct_url_schemes = cinder - -To enable the :command:`openstack image create --volume ` command to -create an image that refers an ``Image-Volume``, set the following options in -each back-end section of the ``cinder.conf`` file: - -.. code-block:: ini - - image_upload_use_cinder_backend = True - -By default, the :command:`openstack image create --volume ` command -creates the Image-Volume in the current project. To store the Image-Volume into -the internal project, set the following options in each back-end section of the -``cinder.conf`` file: - -.. code-block:: ini - - image_upload_use_internal_tenant = True - -To make the Image-Volume in the internal project accessible from the Image -service, set the following options in the ``glance_store`` section of -the ``glance-api.conf`` file: - -- ``cinder_store_auth_address`` -- ``cinder_store_user_name`` -- ``cinder_store_password`` -- ``cinder_store_project_name`` - -Creating a Volume-backed image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To register an existing volume as a new Volume-backed image, use the following -commands: - -.. code-block:: console - - $ openstack image create --disk-format raw --container-format bare IMAGE_NAME - - $ glance location-add --url cinder:// - -If the ``image_upload_use_cinder_backend`` option is enabled, the following -command creates a new Image-Volume by cloning the specified volume and then -registers its location to a new image. The disk format and the container format -must be raw and bare (default). Otherwise, the image is uploaded to the default -store of the Image service. - -.. code-block:: console - - $ openstack image create --volume SOURCE_VOLUME IMAGE_NAME diff --git a/doc/source/admin/blockstorage-volume-backups-export-import.rst b/doc/source/admin/blockstorage-volume-backups-export-import.rst deleted file mode 100644 index 6516fd25a..000000000 --- a/doc/source/admin/blockstorage-volume-backups-export-import.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _volume_backups_export_import: - -================================= -Export and import backup metadata -================================= - - -A volume backup can only be restored on the same Block Storage service. This -is because restoring a volume from a backup requires metadata available on -the database used by the Block Storage service. - -.. note:: - - For information about how to back up and restore a volume, see - the section called :ref:`volume_backups`. - -You can, however, export the metadata of a volume backup. To do so, run -this command as an OpenStack ``admin`` user (presumably, after creating -a volume backup): - -.. code-block:: console - - $ cinder backup-export BACKUP_ID - -Where ``BACKUP_ID`` is the volume backup's ID. This command should return the -backup's corresponding database information as encoded string metadata. - -Exporting and storing this encoded string metadata allows you to completely -restore the backup, even in the event of a catastrophic database failure. -This will preclude the need to back up the entire Block Storage database, -particularly if you only need to keep complete backups of a small subset -of volumes. - -If you have placed encryption on your volumes, the encryption will still be -in place when you restore the volume if a UUID encryption key is specified -when creating volumes. Using backup metadata support, UUID keys set up for -a volume (or volumes) will remain valid when you restore a backed-up volume. -The restored volume will remain encrypted, and will be accessible with your -credentials. - -In addition, having a volume backup and its backup metadata also provides -volume portability. Specifically, backing up a volume and exporting its -metadata will allow you to restore the volume on a completely different Block -Storage database, or even on a different cloud service. To do so, first -import the backup metadata to the Block Storage database and then restore -the backup. - -To import backup metadata, run the following command as an OpenStack -``admin``: - -.. code-block:: console - - $ cinder backup-import METADATA - -Where ``METADATA`` is the backup metadata exported earlier. - -Once you have imported the backup metadata into a Block Storage database, -restore the volume (see the section called :ref:`volume_backups`). diff --git a/doc/source/admin/blockstorage-volume-backups.rst b/doc/source/admin/blockstorage-volume-backups.rst deleted file mode 100644 index e2a32a27e..000000000 --- a/doc/source/admin/blockstorage-volume-backups.rst +++ /dev/null @@ -1,175 +0,0 @@ -.. _volume_backups: - -========================================= -Back up and restore volumes and snapshots -========================================= - -The ``openstack`` command-line interface provides the tools for creating a -volume backup. You can restore a volume from a backup as long as the -backup's associated database information (or backup metadata) is intact -in the Block Storage database. - -Run this command to create a backup of a volume: - -.. code-block:: console - - $ openstack volume backup create [--incremental] [--force] VOLUME - -Where ``VOLUME`` is the name or ID of the volume, ``incremental`` is -a flag that indicates whether an incremental backup should be performed, -and ``force`` is a flag that allows or disallows backup of a volume -when the volume is attached to an instance. - -Without the ``incremental`` flag, a full backup is created by default. -With the ``incremental`` flag, an incremental backup is created. - -Without the ``force`` flag, the volume will be backed up only if its -status is ``available``. With the ``force`` flag, the volume will be -backed up whether its status is ``available`` or ``in-use``. A volume -is ``in-use`` when it is attached to an instance. The backup of an -``in-use`` volume means your data is crash consistent. The ``force`` -flag is False by default. - -.. note:: - - The ``incremental`` and ``force`` flags are only available for block - storage API v2. You have to specify ``[--os-volume-api-version 2]`` in the - ``cinder`` command-line interface to use this parameter. - -.. note:: - - The ``force`` flag is new in OpenStack Liberty. - -The incremental backup is based on a parent backup which is an existing -backup with the latest timestamp. The parent backup can be a full backup -or an incremental backup depending on the timestamp. - - -.. note:: - - The first backup of a volume has to be a full backup. Attempting to do - an incremental backup without any existing backups will fail. - There is an ``is_incremental`` flag that indicates whether a backup is - incremental when showing details on the backup. - Another flag, ``has_dependent_backups``, returned when showing backup - details, will indicate whether the backup has dependent backups. - If it is ``true``, attempting to delete this backup will fail. - -A new configure option ``backup_swift_block_size`` is introduced into -``cinder.conf`` for the default Swift backup driver. This is the size in -bytes that changes are tracked for incremental backups. The existing -``backup_swift_object_size`` option, the size in bytes of Swift backup -objects, has to be a multiple of ``backup_swift_block_size``. The default -is 32768 for ``backup_swift_block_size``, and the default is 52428800 for -``backup_swift_object_size``. - -The configuration option ``backup_swift_enable_progress_timer`` in -``cinder.conf`` is used when backing up the volume to Object Storage -back end. This option enables or disables the timer. It is enabled by default -to send the periodic progress notifications to the Telemetry service. - -This command also returns a backup ID. Use this backup ID when restoring -the volume: - -.. code-block:: console - - $ openstack volume backup restore BACKUP_ID VOLUME_ID - -When restoring from a full backup, it is a full restore. - -When restoring from an incremental backup, a list of backups is built based -on the IDs of the parent backups. A full restore is performed based on the -full backup first, then restore is done based on the incremental backup, -laying on top of it in order. - -You can view a backup list with the :command:`openstack volume backup list` -command. Optional arguments to clarify the status of your backups -include: running ``--name``, ``--status``, and -``--volume`` to filter through backups by the specified name, -status, or volume-id. Search with ``--all-projects`` for details of the -projects associated with the listed backups. - -Because volume backups are dependent on the Block Storage database, you must -also back up your Block Storage database regularly to ensure data recovery. - -.. note:: - - Alternatively, you can export and save the metadata of selected volume - backups. Doing so precludes the need to back up the entire Block Storage - database. This is useful if you need only a small subset of volumes to - survive a catastrophic database failure. - - If you specify a UUID encryption key when setting up the volume - specifications, the backup metadata ensures that the key will remain valid - when you back up and restore the volume. - - For more information about how to export and import volume backup metadata, - see the section called :ref:`volume_backups_export_import`. - -By default, the swift object store is used for the backup repository. - -If instead you want to use an NFS export as the backup repository, add the -following configuration options to the ``[DEFAULT]`` section of the -``cinder.conf`` file and restart the Block Storage services: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.nfs - backup_share = HOST:EXPORT_PATH - -For the ``backup_share`` option, replace ``HOST`` with the DNS resolvable -host name or the IP address of the storage server for the NFS share, and -``EXPORT_PATH`` with the path to that share. If your environment requires -that non-default mount options be specified for the share, set these as -follows: - -.. code-block:: ini - - backup_mount_options = MOUNT_OPTIONS - -``MOUNT_OPTIONS`` is a comma-separated string of NFS mount options as detailed -in the NFS man page. - -There are several other options whose default values may be overridden as -appropriate for your environment: - -.. code-block:: ini - - backup_compression_algorithm = zlib - backup_sha_block_size_bytes = 32768 - backup_file_size = 1999994880 - -The option ``backup_compression_algorithm`` can be set to ``bz2`` or ``None``. -The latter can be a useful setting when the server providing the share for the -backup repository itself performs deduplication or compression on the backup -data. - -The option ``backup_file_size`` must be a multiple of -``backup_sha_block_size_bytes``. It is effectively the maximum file size to be -used, given your environment, to hold backup data. Volumes larger than this -will be stored in multiple files in the backup repository. The -``backup_sha_block_size_bytes`` option determines the size of blocks from the -cinder volume being backed up on which digital signatures are calculated in -order to enable incremental backup capability. - -You also have the option of resetting the state of a backup. When creating or -restoring a backup, sometimes it may get stuck in the creating or restoring -states due to problems like the database or rabbitmq being down. In situations -like these resetting the state of the backup can restore it to a functional -status. - -Run this command to restore the state of a backup: - -.. code-block:: console - - $ cinder backup-reset-state [--state STATE] BACKUP_ID-1 BACKUP_ID-2 ... - -Run this command to create a backup of a snapshot: - -.. code-block:: console - - $ openstack volume backup create [--incremental] [--force] \ - [--snapshot SNAPSHOT_ID] VOLUME - -Where ``VOLUME`` is the name or ID of the volume, ``SNAPSHOT_ID`` is the ID of -the volume's snapshot. diff --git a/doc/source/admin/blockstorage-volume-migration.rst b/doc/source/admin/blockstorage-volume-migration.rst deleted file mode 100644 index 265faed92..000000000 --- a/doc/source/admin/blockstorage-volume-migration.rst +++ /dev/null @@ -1,208 +0,0 @@ -.. _volume_migration.rst: - -=============== -Migrate volumes -=============== - -OpenStack has the ability to migrate volumes between back ends which support -its volume-type. Migrating a volume transparently moves its data from the -current back end for the volume to a new one. This is an administrator -function, and can be used for functions including storage evacuation (for -maintenance or decommissioning), or manual optimizations (for example, -performance, reliability, or cost). - -These workflows are possible for a migration: - -#. If the storage can migrate the volume on its own, it is given the - opportunity to do so. This allows the Block Storage driver to enable - optimizations that the storage might be able to perform. If the back end - is not able to perform the migration, the Block Storage uses one of two - generic flows, as follows. - -#. If the volume is not attached, the Block Storage service creates a volume - and copies the data from the original to the new volume. - - .. note:: - - While most back ends support this function, not all do. See the `driver - documentation `__ - in the OpenStack Configuration Reference for more details. - -#. If the volume is attached to a VM instance, the Block Storage creates a - volume, and calls Compute to copy the data from the original to the new - volume. Currently this is supported only by the Compute libvirt driver. - -As an example, this scenario shows two LVM back ends and migrates an attached -volume from one to the other. This scenario uses the third migration flow. - -First, list the available back ends: - -.. code-block:: console - - # cinder get-pools - +----------+----------------------------------------------------+ - | Property | Value | - +----------+----------------------------------------------------+ - | name | server1@lvmstorage-1#lvmstorage-1 | - +----------+----------------------------------------------------+ - +----------+----------------------------------------------------+ - | Property | Value | - +----------+----------------------------------------------------+ - | name | server2@lvmstorage-2#lvmstorage-2 | - +----------+----------------------------------------------------+ - -.. note:: - - Only Block Storage V2 API supports :command:`cinder get-pools`. - -You can also get available back ends like following: - -.. code-block:: console - - # cinder-manage host list - server1@lvmstorage-1 zone1 - server2@lvmstorage-2 zone1 - -But it needs to add pool name in the end. For example, -``server1@lvmstorage-1#zone1``. - -Next, as the admin user, you can see the current status of the volume -(replace the example ID with your own): - -.. code-block:: console - - $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c - - +--------------------------------+--------------------------------------+ - | Field | Value | - +--------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | zone1 | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2013-09-01T14:53:22.000000 | - | description | test | - | encrypted | False | - | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | - | migration_status | None | - | multiattach | False | - | name | test | - | os-vol-host-attr:host | server1@lvmstorage-1#lvmstorage-1 | - | os-vol-mig-status-attr:migstat | None | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | - | properties | readonly='False' | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | in-use | - | type | None | - | updated_at | 2016-07-31T07:22:19.000000 | - | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | - +--------------------------------+--------------------------------------+ - -Note these attributes: - -* ``os-vol-host-attr:host`` - the volume's current back end. -* ``os-vol-mig-status-attr:migstat`` - the status of this volume's migration - (None means that a migration is not currently in progress). -* ``os-vol-mig-status-attr:name_id`` - the volume ID that this volume's name - on the back end is based on. Before a volume is ever migrated, its name on - the back end storage may be based on the volume's ID (see the - ``volume_name_template`` configuration parameter). For example, if - ``volume_name_template`` is kept as the default value (``volume-%s``), your - first LVM back end has a logical volume named - ``volume-6088f80a-f116-4331-ad48-9afb0dfb196c``. During the course of a - migration, if you create a volume and copy over the data, the volume get - the new name but keeps its original ID. This is exposed by the ``name_id`` - attribute. - - .. note:: - - If you plan to decommission a block storage node, you must stop the - ``cinder`` volume service on the node after performing the migration. - - On nodes that run CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, - or SUSE Linux Enterprise, run: - - .. code-block:: console - - # service openstack-cinder-volume stop - # chkconfig openstack-cinder-volume off - - On nodes that run Ubuntu or Debian, run: - - .. code-block:: console - - # service cinder-volume stop - # chkconfig cinder-volume off - - Stopping the cinder volume service will prevent volumes from being - allocated to the node. - -Migrate this volume to the second LVM back end: - -.. code-block:: console - - $ cinder migrate 6088f80a-f116-4331-ad48-9afb0dfb196c \ - server2@lvmstorage-2#lvmstorage-2 - - Request to migrate volume 6088f80a-f116-4331-ad48-9afb0dfb196c has been - accepted. - -You can use the :command:`openstack volume show` command to see the status of -the migration. While migrating, the ``migstat`` attribute shows states such as -``migrating`` or ``completing``. On error, ``migstat`` is set to None and the -host attribute shows the original ``host``. On success, in this example, the -output looks like: - -.. code-block:: console - - $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c - - +--------------------------------+--------------------------------------+ - | Field | Value | - +--------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | zone1 | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2013-09-01T14:53:22.000000 | - | description | test | - | encrypted | False | - | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | - | migration_status | None | - | multiattach | False | - | name | test | - | os-vol-host-attr:host | server2@lvmstorage-2#lvmstorage-2 | - | os-vol-mig-status-attr:migstat | completing | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | - | properties | readonly='False' | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | in-use | - | type | None | - | updated_at | 2017-02-22T02:35:03.000000 | - | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | - +--------------------------------+--------------------------------------+ - -Note that ``migstat`` is None, host is the new host, and ``name_id`` holds the -ID of the volume created by the migration. If you look at the second LVM back -end, you find the logical volume -``volume-133d1f56-9ffc-4f57-8798-d5217d851862``. - -.. note:: - - The migration is not visible to non-admin users (for example, through the - volume ``status``). However, some operations are not allowed while a - migration is taking place, such as attaching/detaching a volume and - deleting a volume. If a user performs such an action during a migration, - an error is returned. - -.. note:: - - Migrating volumes that have snapshots are currently not allowed. diff --git a/doc/source/admin/blockstorage-volume-number-weigher.rst b/doc/source/admin/blockstorage-volume-number-weigher.rst deleted file mode 100644 index e0934b45e..000000000 --- a/doc/source/admin/blockstorage-volume-number-weigher.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. _volume_number_weigher: - -======================================= -Configure and use volume number weigher -======================================= - -OpenStack Block Storage enables you to choose a volume back end according -to ``free_capacity`` and ``allocated_capacity``. The volume number weigher -feature lets the scheduler choose a volume back end based on its volume -number in the volume back end. This can provide another means to improve -the volume back ends' I/O balance and the volumes' I/O performance. - -Enable volume number weigher -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable a volume number weigher, set the -``scheduler_default_weighers`` to ``VolumeNumberWeigher`` flag in the -``cinder.conf`` file to define ``VolumeNumberWeigher`` -as the selected weigher. - -Configure multiple-storage back ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure ``VolumeNumberWeigher``, use ``LVMVolumeDriver`` -as the volume driver. - -This configuration defines two LVM volume groups: ``stack-volumes`` with -10 GB capacity and ``stack-volumes-1`` with 60 GB capacity. -This example configuration defines two back ends: - -.. code-block:: ini - - scheduler_default_weighers=VolumeNumberWeigher - enabled_backends=lvmdriver-1,lvmdriver-2 - [lvmdriver-1] - volume_group=stack-volumes - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - - [lvmdriver-2] - volume_group=stack-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - -Volume type -~~~~~~~~~~~ - -Define a volume type in Block Storage: - -.. code-block:: console - - $ openstack volume type create lvm - -Create an extra specification that links the volume type to a back-end name: - -.. code-block:: console - - $ openstack volume type set lvm --property volume_backend_name=LVM - -This example creates a lvm volume type with -``volume_backend_name=LVM`` as extra specifications. - -Usage -~~~~~ - -To create six 1-GB volumes, run the -:command:`openstack volume create --size 1 --type lvm volume1` command -six times: - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm volume1 - -This command creates three volumes in ``stack-volumes`` and -three volumes in ``stack-volumes-1``. - -List the available volumes: - -.. code-block:: console - - # lvs - LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert - volume-3814f055-5294-4796-b5e6-1b7816806e5d stack-volumes -wi-a---- 1.00g - volume-72cf5e79-99d2-4d23-b84e-1c35d3a293be stack-volumes -wi-a---- 1.00g - volume-96832554-0273-4e9d-902b-ad421dfb39d1 stack-volumes -wi-a---- 1.00g - volume-169386ef-3d3e-4a90-8439-58ceb46889d9 stack-volumes-1 -wi-a---- 1.00g - volume-460b0bbb-d8a0-4bc3-9882-a129a5fe8652 stack-volumes-1 -wi-a---- 1.00g - volume-9a08413b-0dbc-47c9-afb8-41032ab05a41 stack-volumes-1 -wi-a---- 1.00g diff --git a/doc/source/admin/blockstorage.rst b/doc/source/admin/blockstorage.rst deleted file mode 100644 index ac577454c..000000000 --- a/doc/source/admin/blockstorage.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _block_storage: - -============= -Block Storage -============= - -The OpenStack Block Storage service works through the interaction of -a series of daemon processes named ``cinder-*`` that reside -persistently on the host machine or machines. You can run all the -binaries from a single node, or spread across multiple nodes. You can -also run them on the same node as other OpenStack services. - -To administer the OpenStack Block Storage service, it is helpful to -understand a number of concepts. You must make certain choices when -you configure the Block Storage service in OpenStack. The bulk of the -options come down to two choices - single node or multi-node install. -You can read a longer discussion about `Storage Decisions`_ in the -`OpenStack Operations Guide`_. - -OpenStack Block Storage enables you to add extra block-level storage -to your OpenStack Compute instances. This service is similar to the -Amazon EC2 Elastic Block Storage (EBS) offering. - -.. toctree:: - :maxdepth: 1 - - blockstorage-api-throughput.rst - blockstorage-manage-volumes.rst - blockstorage-troubleshoot.rst - -.. _`Storage Decisions`: https://docs.openstack.org/ops-guide/arch-storage.html -.. _`OpenStack Operations Guide`: https://docs.openstack.org/ops-guide/ diff --git a/doc/source/admin/ts-HTTP-bad-req-in-cinder-vol-log.rst b/doc/source/admin/ts-HTTP-bad-req-in-cinder-vol-log.rst deleted file mode 100644 index 454b74da8..000000000 --- a/doc/source/admin/ts-HTTP-bad-req-in-cinder-vol-log.rst +++ /dev/null @@ -1,46 +0,0 @@ -===================================== -HTTP bad request in cinder volume log -===================================== - -Problem -~~~~~~~ - -These errors appear in the ``cinder-volume.log`` file: - -.. code-block:: console - - 2013-05-03 15:16:33 INFO [cinder.volume.manager] Updating volume status - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] - REQ: curl -i https://10.10.22.241:8080/api/v1/cpgs -X GET -H "X-Hp3Par-Wsapi-Sessionkey: 48dc-b69ed2e5 - f259c58e26df9a4c85df110c-8d1e8451" -H "Accept: application/json" -H "User-Agent: python-3parclient" - - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP:{'content-length': 311, 'content-type': 'text/plain', - 'status': '400'} - - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP BODY:Second simultaneous read on fileno 13 detected. - Unless you really know what you're doing, make sure that only one greenthread can read any particular socket. - Consider using a pools.Pool. If you do know what you're doing and want to disable this error, - call eventlet.debug.hub_multiple_reader_prevention(False) - - 2013-05-03 15:16:33 ERROR [cinder.manager] Error during VolumeManager._report_driver_status: Bad request (HTTP 400) - Traceback (most recent call last): - File "/usr/lib/python2.7/dist-packages/cinder/manager.py", line 167, in periodic_tasks task(self, context) - File "/usr/lib/python2.7/dist-packages/cinder/volume/manager.py", line 690, in _report_driver_status volume_stats = - self.driver.get_volume_stats(refresh=True) - File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_fc.py", line 77, in get_volume_stats stats = - self.common.get_volume_stats(refresh, self.client) - File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_common.py", line 421, in get_volume_stats cpg = - client.getCPG(self.config.hp3par_cpg) - File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 231, in getCPG cpgs = self.getCPGs() - File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 217, in getCPGs response, body = self.http.get('/cpgs') - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 255, in get return self._cs_request(url, 'GET', **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 224, in _cs_request **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 198, in _time_request resp, body = self.request(url, method, **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 192, in request raise exceptions.from_response(resp, body) - HTTPBadRequest: Bad request (HTTP 400) - -Solution -~~~~~~~~ - -You need to update your copy of the ``hp_3par_fc.py`` driver which -contains the synchronization code. diff --git a/doc/source/admin/ts-cinder-config.rst b/doc/source/admin/ts-cinder-config.rst deleted file mode 100644 index 502e5c61e..000000000 --- a/doc/source/admin/ts-cinder-config.rst +++ /dev/null @@ -1,200 +0,0 @@ -============================================ -Troubleshoot the Block Storage configuration -============================================ - -Most Block Storage errors are caused by incorrect volume configurations -that result in volume creation failures. To resolve these failures, -review these logs: - -- ``cinder-api`` log (``/var/log/cinder/api.log``) - -- ``cinder-volume`` log (``/var/log/cinder/volume.log``) - -The ``cinder-api`` log is useful for determining if you have endpoint or -connectivity issues. If you send a request to create a volume and it -fails, review the ``cinder-api`` log to determine whether the request made -it to the Block Storage service. If the request is logged and you see no -errors or tracebacks, check the ``cinder-volume`` log for errors or -tracebacks. - -.. note:: - - Create commands are listed in the ``cinder-api`` log. - -These entries in the ``cinder.openstack.common.log`` file can be used to -assist in troubleshooting your Block Storage configuration. - -.. code-block:: console - - # Print debugging output (set logging level to DEBUG instead - # of default WARNING level). (boolean value) - # debug=false - - # Log output to standard error (boolean value) - # use_stderr=true - - # Default file mode used when creating log files (string - # value) - # logfile_mode=0644 - - # format string to use for log messages with context (string - # value) - # logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s - # %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s - - # format string to use for log mes #logging_default_format_string=%(asctime)s. - # %(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - - # data to append to log format when level is DEBUG (string - # value) - # logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - - # prefix each line of exception output with this format - # (string value) - # logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s - # %(instance)s - - # list of logger=LEVEL pairs (list value) - # default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO, - # keystone=INFO,eventlet.wsgi.server=WARNsages without context - # (string value) - - # If an instance is passed with the log message, format it - # like this (string value) - # instance_format="[instance: %(uuid)s]" - - # If an instance UUID is passed with the log message, format - # it like this (string value) - #instance_uuid_format="[instance: %(uuid)s] " - - # Format string for %%(asctime)s in log records. Default: - # %(default)s (string value) - # log_date_format=%Y-%m-%d %H:%M:%S - - # (Optional) Name of log file to output to. If not set, - # logging will go to stdout. (string value) - # log_file= - - # (Optional) The directory to keep log files in (will be - # prepended to --log-file) (string value) - # log_dir= - # instance_uuid_format="[instance: %(uuid)s]" - - # If this option is specified, the logging configuration file - # specified is used and overrides any other logging options - # specified. Please see the Python logging module - # documentation for details on logging configuration files. - # (string value) - # Use syslog for logging. (boolean value) - # use_syslog=false - - # syslog facility to receive log lines (string value) - # syslog_log_facility=LOG_USER - # log_config= - -These common issues might occur during configuration, and the following -potential solutions describe how to address the issues. - -Issues with ``state_path`` and ``volumes_dir`` settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The OpenStack Block Storage uses ``tgtd`` as the default iSCSI helper -and implements persistent targets. This means that in the case of a -``tgt`` restart, or even a node reboot, your existing volumes on that -node will be restored automatically with their original :term:`IQN `. - -By default, Block Storage uses a ``state_path`` variable, which if -installing with Yum or APT should be set to ``/var/lib/cinder/``. -The next part is the ``volumes_dir`` variable, by default this appends -a ``volumes`` directory to the ``state_path``. The result is a -file-tree: ``/var/lib/cinder/volumes/``. - -Solution --------- - -In order to ensure nodes are restored to their original IQN, -the iSCSI target information needs to be stored in a file on creation -that can be queried in case of restart of the ``tgt daemon``. While the -installer should handle all this, it can go wrong. - -If you have trouble creating volumes and this directory does not exist -you should see an error message in the ``cinder-volume`` log indicating -that the ``volumes_dir`` does not exist, and it should provide -information about which path it was looking for. - -The persistent tgt include file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The Block Storage service may have issues locating the persistent -``tgt include`` file. Along with the ``volumes_dir`` option, the -iSCSI target driver also needs to be configured to look in the correct -place for the persistent ``tgt include `` file. This is an entry -in the ``/etc/tgt/conf.d`` file that should have been set during the -OpenStack installation. - -Solution --------- - -If issues occur, verify that you have a ``/etc/tgt/conf.d/cinder.conf`` -file. If the file is not present, create it with: - -.. code-block:: console - - # echo 'include /var/lib/cinder/volumes/ *' >> /etc/tgt/conf.d/cinder.conf - -No sign of attach call in the ``cinder-api`` log -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The attach call is unavailable, or not appearing in the ``cinder-api`` log. - -Solution --------- - -Adjust the ``nova.conf`` file, and make sure that your ``nova.conf`` -has this entry: - -.. code-block:: ini - - volume_api_class=nova.volume.cinder.API - -Failed to create iscsi target error in the ``cinder-volume.log`` file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -.. code-block:: console - - 2013-03-12 01:35:43 1248 TRACE cinder.openstack.common.rpc.amqp \ - ISCSITargetCreateFailed: \ - Failed to create iscsi target for volume \ - volume-137641b2-af72-4a2f-b243-65fdccd38780. - -You might see this error in ``cinder-volume.log`` after trying to -create a volume that is 1 GB. - -Solution --------- - -To fix this issue, change the content of the ``/etc/tgt/targets.conf`` -file from ``include /etc/tgt/conf.d/*.conf`` to -``include /etc/tgt/conf.d/cinder_tgt.conf``, as follows: - -.. code-block:: shell - - include /etc/tgt/conf.d/cinder_tgt.conf - include /etc/tgt/conf.d/cinder.conf - default-driver iscsi - -Restart ``tgt`` and ``cinder-*`` services, so they pick up the new -configuration. diff --git a/doc/source/admin/ts-duplicate-3par-host.rst b/doc/source/admin/ts-duplicate-3par-host.rst deleted file mode 100644 index 8ff1af3e8..000000000 --- a/doc/source/admin/ts-duplicate-3par-host.rst +++ /dev/null @@ -1,27 +0,0 @@ -=================== -Duplicate 3PAR host -=================== - -Problem -~~~~~~~ - -This error may be caused by a volume being exported outside of OpenStack -using a host name different from the system name that OpenStack expects. -This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI: - -.. code-block:: console - - Duplicate3PARHost: 3PAR Host already exists: Host wwn 50014380242B9750 \ - already used by host cld4b5ubuntuW(id = 68. The hostname must be called\ - 'cld4b5ubuntu'. - -Solution -~~~~~~~~ - -Change the 3PAR host name to match the one that OpenStack expects. The -3PAR host constructed by the driver uses just the local host name, not -the fully qualified domain name (FQDN) of the compute host. For example, -if the FQDN was *myhost.example.com*, just *myhost* would be used as the -3PAR host name. IP addresses are not allowed as host names on the 3PAR -storage server. diff --git a/doc/source/admin/ts-eql-volume-size.rst b/doc/source/admin/ts-eql-volume-size.rst deleted file mode 100644 index f0eb7987d..000000000 --- a/doc/source/admin/ts-eql-volume-size.rst +++ /dev/null @@ -1,223 +0,0 @@ -======================================================================== -Addressing discrepancies in reported volume sizes for EqualLogic storage -======================================================================== - -Problem -~~~~~~~ - -There is a discrepancy between both the actual volume size in EqualLogic -(EQL) storage and the image size in the Image service, with what is -reported to OpenStack database. This could lead to confusion -if a user is creating volumes from an image that was uploaded from an EQL -volume (through the Image service). The image size is slightly larger -than the target volume size; this is because EQL size reporting accounts -for additional storage used by EQL for internal volume metadata. - -To reproduce the issue follow the steps in the following procedure. - -This procedure assumes that the EQL array is provisioned, and that -appropriate configuration settings have been included in -``/etc/cinder/cinder.conf`` to connect to the EQL array. - -Create a new volume. Note the ID and size of the volume. In the -following example, the ID and size are -``74cf9c04-4543-47ae-a937-a9b7c6c921e7`` and ``1``, respectively: - -.. code-block:: console - - $ openstack volume create volume1 --size 1 - - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-12-06T11:33:30.957318 | - | description | None | - | encrypted | False | - | id | 74cf9c04-4543-47ae-a937-a9b7c6c921e7 | - | migration_status | None | - | multiattach | False | - | name | volume1 | - | properties | | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | type | iscsi | - | updated_at | None | - | user_id | c36cec73b0e44876a4478b1e6cd749bb | - +---------------------+--------------------------------------+ - -Verify the volume size on the EQL array by using its command-line -interface. - -The actual size (``VolReserve``) is 1.01 GB. The EQL Group Manager -should also report a volume size of 1.01 GB: - -.. code-block:: console - - eql> volume select volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - eql (volume_volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7)> show - _______________________________ Volume Information ________________________________ - Name: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - Size: 1GB - VolReserve: 1.01GB - VolReservelnUse: 0MB - ReplReservelnUse: 0MB - iSCSI Alias: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-19f91850c-067000000b4532cl-volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - ActualMembers: 1 - Snap-Warn: 10% - Snap-Depletion: delete-oldest - Description: - Snap-Reserve: 100% - Snap-Reserve-Avail: 100% (1.01GB) - Permission: read-write - DesiredStatus: online - Status: online - Connections: O - Snapshots: O - Bind: - Type: not-replicated - ReplicationReserveSpace: 0MB - -Create a new image from this volume: - -.. code-block:: console - - $ openstack image create --volume volume1 \ - --disk-format raw --container-format bare image_from_volume1 - - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | container_format | bare | - | disk_format | raw | - | display_description | None | - | id | 850fd393-a968-4259-9c65-6b495cba5209 | - | image_id | 3020a21d-ba37-4495-8899-07fc201161b9 | - | image_name | image_from_volume1 | - | is_public | False | - | protected | False | - | size | 1 | - | status | uploading | - | updated_at | 2016-12-05T12:43:56.000000 | - | volume_type | iscsi | - +---------------------+--------------------------------------+ - -When you uploaded the volume in the previous step, the Image service -reported the volume's size as ``1`` (GB). However, when using -:command:`openstack image show` to show the image, the displayed size is -1085276160 bytes, or roughly 1.01 GB: - -+------------------+--------------------------------------+ -| Property | Value | -+------------------+--------------------------------------+ -| checksum | cd573cfaace07e7949bc0c46028904ff | -| container_format | bare | -| created_at | 2016-12-06T11:39:06Z | -| disk_format | raw | -| id | 3020a21d-ba37-4495-8899-07fc201161b9 | -| min_disk | 0 | -| min_ram | 0 | -| name | image_from_volume1 | -| owner | 5669caad86a04256994cdf755df4d3c1 | -| protected | False | -| size | 1085276160 | -| status | active | -| tags | [] | -| updated_at | 2016-12-06T11:39:24Z | -| virtual_size | None | -| visibility | private | -+------------------+--------------------------------------+ - - - -Create a new volume using the previous image (``image_id 3020a21d-ba37-4495 --8899-07fc201161b9`` in this example) as -the source. Set the target volume size to 1 GB; this is the size -reported by the ``cinder`` tool when you uploaded the volume to the -Image service: - -.. code-block:: console - - $ openstack volume create volume2 --size 1 --image 3020a21d-ba37-4495-8899-07fc201161b9 - ERROR: Invalid input received: Size of specified image 2 is larger - than volume size 1. (HTTP 400) (Request-ID: req-4b9369c0-dec5-4e16-a114-c0cdl6bSd210) - -The attempt to create a new volume based on the size reported by the -``cinder`` tool will then fail. - -Solution -~~~~~~~~ - -To work around this problem, increase the target size of the new image -to the next whole number. In the problem example, you created a 1 GB -volume to be used as volume-backed image, so a new volume using this -volume-backed image should use a size of 2 GB: - -.. code-block:: console - - $ openstack volume create volume2 --size 1 --image 3020a21d-ba37-4495-8899-07fc201161b9 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-12-06T11:49:06.031768 | - | description | None | - | encrypted | False | - | id | a70d6305-f861-4382-84d8-c43128be0013 | - | migration_status | None | - | multiattach | False | - | name | volume2 | - | properties | | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | type | iscsi | - | updated_at | None | - | user_id | c36cec73b0e44876a4478b1e6cd749bb | - +---------------------+--------------------------------------+ - -.. note:: - - The dashboard suggests a suitable size when you create a new volume - based on a volume-backed image. - -You can then check this new volume into the EQL array: - -.. code-block:: console - - eql> volume select volume-64e8eb18-d23f-437b-bcac-b352afa6843a - eql (volume_volume-61e8eb18-d23f-437b-bcac-b352afa6843a)> show - ______________________________ Volume Information _______________________________ - Name: volume-64e8eb18-d23f-437b-bcac-b352afa6843a - Size: 2GB - VolReserve: 2.01GB - VolReserveInUse: 1.01GB - ReplReserveInUse: 0MB - iSCSI Alias: volume-64e8eb18-d23f-437b-bcac-b352afa6843a - iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-e3091850e-eae000000b7S32cl-volume-64e8eb18-d23f-437b-bcac-b3S2afa6Bl3a - ActualMembers: 1 - Snap-Warn: 10% - Snap-Depletion: delete-oldest - Description: - Snap-Reserve: 100% - Snap-Reserve-Avail: 100% (2GB) - Permission: read-write - DesiredStatus: online - Status: online - Connections: 1 - Snapshots: O - Bind: - Type: not-replicated - ReplicationReserveSpace: 0MB diff --git a/doc/source/admin/ts-failed-attach-vol-after-detach.rst b/doc/source/admin/ts-failed-attach-vol-after-detach.rst deleted file mode 100644 index 6ed58960c..000000000 --- a/doc/source/admin/ts-failed-attach-vol-after-detach.rst +++ /dev/null @@ -1,35 +0,0 @@ -======================================= -Failed to attach volume after detaching -======================================= - -Problem -~~~~~~~ - -Failed to attach a volume after detaching the same volume. - -Solution -~~~~~~~~ - -You must change the device name on the :command:`nova-attach` command. The VM -might not clean up after a :command:`nova-detach` command runs. This example -shows how the :command:`nova-attach` command fails when you use the ``vdb``, -``vdc``, or ``vdd`` device names: - -.. code-block:: console - - # ls -al /dev/disk/by-path/ - total 0 - drwxr-xr-x 2 root root 200 2012-08-29 17:33 . - drwxr-xr-x 5 root root 100 2012-08-29 17:33 .. - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0 -> ../../vda - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part1 -> ../../vda1 - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part2 -> ../../vda2 - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part5 -> ../../vda5 - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:06.0-virtio-pci-virtio2 -> ../../vdb - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:08.0-virtio-pci-virtio3 -> ../../vdc - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4 -> ../../vdd - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4-part1 -> ../../vdd1 - -You might also have this problem after attaching and detaching the same -volume from the same VM with the same mount point multiple times. In -this case, restart the KVM host. diff --git a/doc/source/admin/ts-failed-attach-vol-no-sysfsutils.rst b/doc/source/admin/ts-failed-attach-vol-no-sysfsutils.rst deleted file mode 100644 index 1f9354f08..000000000 --- a/doc/source/admin/ts-failed-attach-vol-no-sysfsutils.rst +++ /dev/null @@ -1,30 +0,0 @@ -================================================= -Failed to attach volume, systool is not installed -================================================= - -Problem -~~~~~~~ - -This warning and error occurs if you do not have the required -``sysfsutils`` package installed on the compute node: - -.. code-block:: console - - WARNING nova.virt.libvirt.utils [req-1200f887-c82b-4e7c-a891-fac2e3735dbb\ - admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] systool\ - is not installed - ERROR nova.compute.manager [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin\ - admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] - [instance: df834b5a-8c3f-477a-be9b-47c97626555c|instance: df834b5a-8c3f-47\ - 7a-be9b-47c97626555c] - Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk. - -Solution -~~~~~~~~ - -Run the following command on the compute node to install the -``sysfsutils`` packages: - -.. code-block:: console - - # apt-get install sysfsutils diff --git a/doc/source/admin/ts-failed-connect-vol-FC-SAN.rst b/doc/source/admin/ts-failed-connect-vol-FC-SAN.rst deleted file mode 100644 index 2ec994262..000000000 --- a/doc/source/admin/ts-failed-connect-vol-FC-SAN.rst +++ /dev/null @@ -1,29 +0,0 @@ -================================== -Failed to connect volume in FC SAN -================================== - -Problem -~~~~~~~ - -The compute node failed to connect to a volume in a Fibre Channel (FC) SAN -configuration. The WWN may not be zoned correctly in your FC SAN that -links the compute host to the storage array: - -.. code-block:: console - - ERROR nova.compute.manager [req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin\ - demo|req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo] [instance: 60ebd\ - 6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3\ - d5f3] - Failed to connect to volume 6f6a6a9c-dfcf-4c8d-b1a8-4445ff883200 while\ - attaching at /dev/vdjTRACE nova.compute.manager [instance: 60ebd6c7-c1e3-4\ - bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3] - Traceback (most recent call last):…f07aa4c3d5f3\] ClientException: The\ - server has either erred or is incapable of performing the requested\ - operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00) - -Solution -~~~~~~~~ - -The network administrator must configure the FC SAN fabric by correctly -zoning the WWN (port names) from your compute node HBAs. diff --git a/doc/source/admin/ts-multipath-warn.rst b/doc/source/admin/ts-multipath-warn.rst deleted file mode 100644 index ca8a747d4..000000000 --- a/doc/source/admin/ts-multipath-warn.rst +++ /dev/null @@ -1,30 +0,0 @@ -========================== -Multipath call failed exit -========================== - -Problem -~~~~~~~ - -Multipath call failed exit. This warning occurs in the Compute log -if you do not have the optional ``multipath-tools`` package installed -on the compute node. This is an optional package and the volume -attachment does work without the multipath tools installed. -If the ``multipath-tools`` package is installed on the compute node, -it is used to perform the volume attachment. -The IDs in your message are unique to your system. - -.. code-block:: console - - WARNING nova.storage.linuxscsi [req-cac861e3-8b29-4143-8f1b-705d0084e571 - admin admin|req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin] - Multipath call failed exit (96) - -Solution -~~~~~~~~ - -Run the following command on the compute node to install the -``multipath-tools`` packages. - -.. code-block:: console - - # apt-get install multipath-tools diff --git a/doc/source/admin/ts-no-emulator-x86-64.rst b/doc/source/admin/ts-no-emulator-x86-64.rst deleted file mode 100644 index b45ae73dc..000000000 --- a/doc/source/admin/ts-no-emulator-x86-64.rst +++ /dev/null @@ -1,19 +0,0 @@ -========================================= -Cannot find suitable emulator for x86_64 -========================================= - -Problem -~~~~~~~ - -When you attempt to create a VM, the error shows the VM is in the -``BUILD`` then ``ERROR`` state. - -Solution -~~~~~~~~ - -On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or -``svm`` flags are set. - -Follow the instructions in the `Enable KVM -`__ section in the OpenStack Configuration Reference to enable hardware -virtualization support in your BIOS. diff --git a/doc/source/admin/ts-non-existent-host.rst b/doc/source/admin/ts-non-existent-host.rst deleted file mode 100644 index f25cdbd2a..000000000 --- a/doc/source/admin/ts-non-existent-host.rst +++ /dev/null @@ -1,25 +0,0 @@ -================= -Non-existent host -================= - -Problem -~~~~~~~ - -This error could be caused by a volume being exported outside of -OpenStack using a host name different from the system name that -OpenStack expects. This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI. - -.. code-block:: console - - 2013-04-19 04:02:02.336 2814 ERROR cinder.openstack.common.rpc.common [-] Returning exception Not found (HTTP 404) - NON_EXISTENT_HOST - HOST '10' was not found to caller. - -Solution -~~~~~~~~ - -Host names constructed by the driver use just the local host name, not -the fully qualified domain name (FQDN) of the Compute host. For example, -if the FQDN was **myhost.example.com**, just **myhost** would be used as the -3PAR host name. IP addresses are not allowed as host names on the 3PAR -storage server. diff --git a/doc/source/admin/ts-non-existent-vlun.rst b/doc/source/admin/ts-non-existent-vlun.rst deleted file mode 100644 index f2d937792..000000000 --- a/doc/source/admin/ts-non-existent-vlun.rst +++ /dev/null @@ -1,22 +0,0 @@ -================= -Non-existent VLUN -================= - -Problem -~~~~~~~ - -This error occurs if the 3PAR host exists with the correct host name -that the OpenStack Block Storage drivers expect but the volume was -created in a different domain. - -.. code-block:: console - - HTTPNotFound: Not found (HTTP 404) NON_EXISTENT_VLUN - VLUN 'osv-DqT7CE3mSrWi4gZJmHAP-Q' was not found. - - -Solution -~~~~~~~~ - -The ``hpe3par_domain`` configuration items either need to be updated to -use the domain the 3PAR host currently resides in, or the 3PAR host -needs to be moved to the domain that the volume was created in. diff --git a/doc/source/admin/ts-vol-attach-miss-sg-scan.rst b/doc/source/admin/ts-vol-attach-miss-sg-scan.rst deleted file mode 100644 index e1d9a516b..000000000 --- a/doc/source/admin/ts-vol-attach-miss-sg-scan.rst +++ /dev/null @@ -1,28 +0,0 @@ -======================================== -Failed to Attach Volume, Missing sg_scan -======================================== - -Problem -~~~~~~~ - -Failed to attach volume to an instance, ``sg_scan`` file not found. This -error occurs when the sg3-utils package is not installed on the compute node. -The IDs in your message are unique to your system: - -.. code-block:: console - - ERROR nova.compute.manager [req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin|req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin] - [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] - Failed to attach volume 4cc104c4-ac92-4bd6-9b95-c6686746414a at /dev/vdcTRACE nova.compute.manager - [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] - Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan' - - -Solution -~~~~~~~~ - -Run this command on the compute node to install the ``sg3-utils`` package: - -.. code-block:: console - - # apt-get install sg3-utils diff --git a/doc/source/cli/README.rst b/doc/source/cli/README.rst deleted file mode 100644 index 26bbd57dd..000000000 --- a/doc/source/cli/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -========================== -Cinder CLI Documentation -========================== - -Introduction: -------------- - -This directory is intended to hold any documentation that relates to -Cinder's Command Line Interface. Note that this directory is intended for -basic descriptions of the commands supported, similar to what you would find -with a 'man page'. Tutorials or step-by-step guides should go into -'doc/source/admin' or 'doc/source/user' depending on the target audience. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - diff --git a/doc/source/cli/cli-cinder-quotas.rst b/doc/source/cli/cli-cinder-quotas.rst deleted file mode 100644 index 842f64246..000000000 --- a/doc/source/cli/cli-cinder-quotas.rst +++ /dev/null @@ -1,232 +0,0 @@ -=================================== -Manage Block Storage service quotas -=================================== - -As an administrative user, you can update the OpenStack Block -Storage service quotas for a project. You can also update the quota -defaults for a new project. - -**Block Storage quotas** - -=================== ============================================= - Property name Defines the number of -=================== ============================================= - gigabytes Volume gigabytes allowed for each project. - snapshots Volume snapshots allowed for each project. - volumes Volumes allowed for each project. -=================== ============================================= - -View Block Storage quotas -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Administrative users can view Block Storage service quotas. - -#. Obtain the project ID: - - .. code-block:: console - - $ project_id=$(openstack project show -f value -c id PROJECT_NAME) - -#. List the default quotas for a project: - - .. code-block:: console - - $ openstack quota show --default $OS_TENANT_ID - +-----------------------+-------+ - | Field | Value | - +-----------------------+-------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | None | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+-------+ - -.. note:: - - Listing default quotas with the OpenStack command line client will - provide all quotas for storage and network services. Previously, the - :command:`cinder quota-defaults` command would list only storage - quotas. You can use `PROJECT_ID` or `$OS_TENANT_NAME` arguments to - show Block Storage service quotas. If the `PROJECT_ID` argument returns - errors in locating resources, use `$OS_TENANT_NAME`. - -#. View Block Storage service quotas for a project: - - .. code-block:: console - - $ openstack quota show $OS_TENANT_ID - +-----------------------+-------+ - | Field | Value | - +-----------------------+-------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | None | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+-------+ - - -#. Show the current usage of a per-project quota: - - .. code-block:: console - - $ cinder quota-usage $project_id - +-----------------------+--------+----------+-------+ - | Type | In_use | Reserved | Limit | - +-----------------------+--------+----------+-------+ - | backup_gigabytes | 0 | 0 | 1000 | - | backups | 0 | 0 | 10 | - | gigabytes | 0 | 0 | 1000 | - | gigabytes_lvmdriver-1 | 0 | 0 | -1 | - | per_volume_gigabytes | 0 | 0 | -1 | - | snapshots | 0 | 0 | 10 | - | snapshots_lvmdriver-1 | 0 | 0 | -1 | - | volumes | 0 | 0 | 10 | - | volumes_lvmdriver-1 | 0 | 0 | -1 | - +-----------------------+--------+----------+-------+ - - -Edit and update Block Storage service quotas -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Administrative users can edit and update Block Storage -service quotas. - -#. To update a default value for a new project, - update the property in the :guilabel:`cinder.quota` - section of the ``/etc/cinder/cinder.conf`` file. - For more information, see the `Block Storage service - `_ - in OpenStack Configuration Reference. - -#. To update Block Storage service quotas for an existing project - - .. code-block:: console - - $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_ID - - Replace ``QUOTA_NAME`` with the quota that is to be updated, - ``QUOTA_VALUE`` with the required new value. Use the :command:`openstack quota show` - command with ``PROJECT_ID``, which is the required project ID. - - For example: - - .. code-block:: console - - $ openstack quota set --volumes 15 $project_id - $ openstack quota show $project_id - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 29 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 15 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -#. To clear per-project quota limits: - - .. code-block:: console - - $ cinder quota-delete PROJECT_ID diff --git a/doc/source/cli/cli-cinder-scheduling.rst b/doc/source/cli/cli-cinder-scheduling.rst deleted file mode 100644 index d33dd7c99..000000000 --- a/doc/source/cli/cli-cinder-scheduling.rst +++ /dev/null @@ -1,58 +0,0 @@ -=============================== -Manage Block Storage scheduling -=============================== - -As an administrative user, you have some control over which volume -back end your volumes reside on. You can specify affinity or -anti-affinity between two volumes. Affinity between volumes means -that they are stored on the same back end, whereas anti-affinity -means that they are stored on different back ends. - -For information on how to set up multiple back ends for Cinder, -refer to :ref:`multi_backend`. - -Example Usages -~~~~~~~~~~~~~~ - -#. Create a new volume on the same back end as Volume_A: - - .. code-block:: console - - $ openstack volume create --hint same_host=Volume_A-UUID \ - --size SIZE VOLUME_NAME - -#. Create a new volume on a different back end than Volume_A: - - .. code-block:: console - - $ openstack volume create --hint different_host=Volume_A-UUID \ - --size SIZE VOLUME_NAME - -#. Create a new volume on the same back end as Volume_A and Volume_B: - - .. code-block:: console - - $ openstack volume create --hint same_host=Volume_A-UUID \ - --hint same_host=Volume_B-UUID --size SIZE VOLUME_NAME - - Or: - - .. code-block:: console - - $ openstack volume create --hint same_host="[Volume_A-UUID, \ - Volume_B-UUID]" --size SIZE VOLUME_NAME - -#. Create a new volume on a different back end than both Volume_A and - Volume_B: - - .. code-block:: console - - $ openstack volume create --hint different_host=Volume_A-UUID \ - --hint different_host=Volume_B-UUID --size SIZE VOLUME_NAME - - Or: - - .. code-block:: console - - $ openstack volume create --hint different_host="[Volume_A-UUID, \ - Volume_B-UUID]" --size SIZE VOLUME_NAME diff --git a/doc/source/cli/cli-manage-volumes.rst b/doc/source/cli/cli-manage-volumes.rst deleted file mode 100644 index 14793d82e..000000000 --- a/doc/source/cli/cli-manage-volumes.rst +++ /dev/null @@ -1,677 +0,0 @@ -.. _volume: - -============== -Manage volumes -============== - -A volume is a detachable block storage device, similar to a USB hard -drive. You can attach a volume to only one instance. Use the ``openstack`` -client commands to create and manage volumes. - -Migrate a volume -~~~~~~~~~~~~~~~~ - -As an administrator, you can migrate a volume with its data from one -location to another in a manner that is transparent to users and -workloads. You can migrate only detached volumes with no snapshots. - -Possible use cases for data migration include: - -* Bring down a physical storage device for maintenance without - disrupting workloads. - -* Modify the properties of a volume. - -* Free up space in a thinly-provisioned back end. - -Migrate a volume with the :command:`openstack volume migrate` command, as shown -in the following example: - -.. code-block:: console - - $ openstack volume migrate [-h] --host [--force-host-copy] - [--lock-volume | --unlock-volume] - - -In this example, ``--force-host-copy`` forces the generic -host-based migration mechanism and bypasses any driver optimizations. -``--lock-volume | --unlock-volume`` applies to the available volume. -To determine whether the termination of volume migration caused by other -commands. ``--lock-volume`` locks the volume state and does not allow the -migration to be aborted. - -.. note:: - - If the volume has snapshots, the specified host destination cannot accept - the volume. If the user is not an administrator, the migration fails. - -Create a volume -~~~~~~~~~~~~~~~ - -This example creates a ``my-new-volume`` volume based on an image. - -#. List images, and note the ID of the image that you want to use for your - volume: - - .. code-block:: console - - $ openstack image list - +--------------------------------------+---------------------------------+ - | ID | Name | - +--------------------------------------+---------------------------------+ - | 8bf4dc2a-bf78-4dd1-aefa-f3347cf638c8 | cirros-0.3.5-x86_64-uec | - | 9ff9bb2e-3a1d-4d98-acb5-b1d3225aca6c | cirros-0.3.5-x86_64-uec-kernel | - | 4b227119-68a1-4b28-8505-f94c6ea4c6dc | cirros-0.3.5-x86_64-uec-ramdisk | - +--------------------------------------+---------------------------------+ - - -#. List the availability zones, and note the ID of the availability zone in - which you want to create your volume: - - .. code-block:: console - - $ openstack availability zone list - +------+-----------+ - | Name | Status | - +------+-----------+ - | nova | available | - +------+-----------+ - -#. Create a volume with 8 gibibytes (GiB) of space, and specify the - availability zone and image: - - .. code-block:: console - - $ openstack volume create --image 8bf4dc2a-bf78-4dd1-aefa-f3347cf638c8 \ - --size 8 --availability-zone nova my-new-volume - - +------------------------------+--------------------------------------+ - | Property | Value | - +------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-09-23T07:52:42.000000 | - | description | None | - | encrypted | False | - | id | bab4b0e0-ce3d-4d57-bf57-3c51319f5202 | - | metadata | {} | - | multiattach | False | - | name | my-new-volume | - | os-vol-tenant-attr:tenant_id | 3f670abbe9b34ca5b81db6e7b540b8d8 | - | replication_status | disabled | - | size | 8 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | updated_at | None | - | user_id | fe19e3a9f63f4a14bd4697789247bbc5 | - | volume_type | lvmdriver-1 | - +------------------------------+--------------------------------------+ - -#. To verify that your volume was created successfully, list the available - volumes: - - .. code-block:: console - - $ openstack volume list - +--------------------------------------+---------------+-----------+------+-------------+ - | ID | DisplayName | Status | Size | Attached to | - +--------------------------------------+---------------+-----------+------+-------------+ - | bab4b0e0-ce3d-4d57-bf57-3c51319f5202 | my-new-volume | available | 8 | | - +--------------------------------------+---------------+-----------+------+-------------+ - - - If your volume was created successfully, its status is ``available``. If - its status is ``error``, you might have exceeded your quota. - -.. _Create_a_volume_from_specified_volume_type: - -Create a volume from specified volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cinder supports these three ways to specify ``volume type`` during -volume creation. - -#. volume_type -#. cinder_img_volume_type (via glance image metadata) -#. default_volume_type (via cinder.conf) - -.. _volume_type: - -volume_type ------------ - -User can specify `volume type` when creating a volume. - -.. code-block:: console - - $ openstack volume create -h -f {json,shell,table,value,yaml} - -c COLUMN --max-width - --noindent --prefix PREFIX --size - --type --image - --snapshot --source - --description --user - --project - --availability-zone - --property - - - -.. _cinder_img_volume_type: - -cinder_img_volume_type ----------------------- - -If glance image has ``cinder_img_volume_type`` property, Cinder uses this -parameter to specify ``volume type`` when creating a volume. - -Choose glance image which has ``cinder_img_volume_type`` property and create -a volume from the image. - -.. code-block:: console - - $ openstack image list - +----------------------------------+---------------------------------+--------+ - | ID | Name | Status | - +----------------------------------+---------------------------------+--------+ - | 376bd633-c9c9-4c5d-a588-342f4f66 | cirros-0.3.5-x86_64-uec | active | - | d086 | | | - | 2c20fce7-2e68-45ee-ba8d- | cirros-0.3.5-x86_64-uec-ramdisk | active | - | beba27a91ab5 | | | - | a5752de4-9faf-4c47-acbc- | cirros-0.3.5-x86_64-uec-kernel | active | - | 78a5efa7cc6e | | | - +----------------------------------+---------------------------------+--------+ - - - $ openstack image show 376bd633-c9c9-4c5d-a588-342f4f66d086 - +------------------+-----------------------------------------------------------+ - | Field | Value | - +------------------+-----------------------------------------------------------+ - | checksum | eb9139e4942121f22bbc2afc0400b2a4 | - | container_format | ami | - | created_at | 2016-10-13T03:28:55Z | - | disk_format | ami | - | file | /v2/images/376bd633-c9c9-4c5d-a588-342f4f66d086/file | - | id | 376bd633-c9c9-4c5d-a588-342f4f66d086 | - | min_disk | 0 | - | min_ram | 0 | - | name | cirros-0.3.5-x86_64-uec | - | owner | 88ba456e3a884c318394737765e0ef4d | - | properties | kernel_id='a5752de4-9faf-4c47-acbc-78a5efa7cc6e', | - | | ramdisk_id='2c20fce7-2e68-45ee-ba8d-beba27a91ab5' | - | protected | False | - | schema | /v2/schemas/image | - | size | 25165824 | - | status | active | - | tags | | - | updated_at | 2016-10-13T03:28:55Z | - | virtual_size | None | - | visibility | public | - +------------------+-----------------------------------------------------------+ - - $ openstack volume create --image 376bd633-c9c9-4c5d-a588-342f4f66d086 \ - --size 1 --availability-zone nova test - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-10-13T06:29:53.688599 | - | description | None | - | encrypted | False | - | id | e6e6a72d-cda7-442c-830f-f306ea6a03d5 | - | multiattach | False | - | name | test | - | properties | | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | type | lvmdriver-1 | - | updated_at | None | - | user_id | 33fdc37314914796883706b33e587d51 | - +---------------------+--------------------------------------+ - -.. _default_volume_type: - -default_volume_type -------------------- - -If above parameters are not set, Cinder uses default_volume_type which is -defined in cinder.conf during volume creation. - -Example cinder.conf file configuration. - -.. code-block:: console - - [default] - default_volume_type = lvmdriver-1 - -.. _Attach_a_volume_to_an_instance: - -Attach a volume to an instance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Attach your volume to a server, specifying the server ID and the volume - ID: - - .. code-block:: console - - $ openstack server add volume 84c6e57d-a6b1-44b6-81eb-fcb36afd31b5 \ - 573e024d-5235-49ce-8332-be1576d323f8 --device /dev/vdb - -#. Show information for your volume: - - .. code-block:: console - - $ openstack volume show 573e024d-5235-49ce-8332-be1576d323f8 - - The output shows that the volume is attached to the server with ID - ``84c6e57d-a6b1-44b6-81eb-fcb36afd31b5``, is in the nova availability - zone, and is bootable. - - .. code-block:: console - - +------------------------------+-----------------------------------------------+ - | Field | Value | - +------------------------------+-----------------------------------------------+ - | attachments | [{u'device': u'/dev/vdb', | - | | u'server_id': u'84c6e57d-a | - | | u'id': u'573e024d-... | - | | u'volume_id': u'573e024d... | - | availability_zone | nova | - | bootable | true | - | consistencygroup_id | None | - | created_at | 2016-10-13T06:08:07.000000 | - | description | None | - | encrypted | False | - | id | 573e024d-5235-49ce-8332-be1576d323f8 | - | multiattach | False | - | name | my-new-volume | - | os-vol-tenant-attr:tenant_id | 7ef070d3fee24bdfae054c17ad742e28 | - | properties | | - | replication_status | disabled | - | size | 8 | - | snapshot_id | None | - | source_volid | None | - | status | in-use | - | type | lvmdriver-1 | - | updated_at | 2016-10-13T06:08:11.000000 | - | user_id | 33fdc37314914796883706b33e587d51 | - | volume_image_metadata |{u'kernel_id': u'df430cc2..., | - | | u'image_id': u'397e713c..., | - | | u'ramdisk_id': u'3cf852bd..., | - | |u'image_name': u'cirros-0.3.5-x86_64-uec'} | - +------------------------------+-----------------------------------------------+ - - - -.. _Resize_a_volume: - -Resize a volume -~~~~~~~~~~~~~~~ - -#. To resize your volume, you must first detach it from the server. - To detach the volume from your server, pass the server ID and volume ID - to the following command: - - .. code-block:: console - - $ openstack server remove volume 84c6e57d-a6b1-44b6-81eb-fcb36afd31b5 573e024d-5235-49ce-8332-be1576d323f8 - - This command does not provide any output. - -#. List volumes: - - .. code-block:: console - - $ openstack volume list - +----------------+-----------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +----------------+-----------------+-----------+------+-------------+ - | 573e024d-52... | my-new-volume | available | 8 | | - | bd7cf584-45... | my-bootable-vol | available | 8 | | - +----------------+-----------------+-----------+------+-------------+ - - Note that the volume is now available. - -#. Resize the volume by passing the volume ID and the new size (a value - greater than the old one) as parameters: - - .. code-block:: console - - $ openstack volume set 573e024d-5235-49ce-8332-be1576d323f8 --size 10 - - This command does not provide any output. - - .. note:: - - When extending an LVM volume with a snapshot, the volume will be - deactivated. The reactivation is automatic unless - ``auto_activation_volume_list`` is defined in ``lvm.conf``. See - ``lvm.conf`` for more information. - -Delete a volume -~~~~~~~~~~~~~~~ - -#. To delete your volume, you must first detach it from the server. - To detach the volume from your server and check for the list of existing - volumes, see steps 1 and 2 in Resize_a_volume_. - - Delete the volume using either the volume name or ID: - - .. code-block:: console - - $ openstack volume delete my-new-volume - - This command does not provide any output. - -#. List the volumes again, and note that the status of your volume is - ``deleting``: - - .. code-block:: console - - $ openstack volume list - +----------------+-----------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +----------------+-----------------+-----------+------+-------------+ - | 573e024d-52... | my-new-volume | deleting | 8 | | - | bd7cf584-45... | my-bootable-vol | available | 8 | | - +----------------+-----------------+-----------+------+-------------+ - - When the volume is fully deleted, it disappears from the list of - volumes: - - .. code-block:: console - - $ openstack volume list - +----------------+-----------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +----------------+-----------------+-----------+------+-------------+ - | bd7cf584-45... | my-bootable-vol | available | 8 | | - +----------------+-----------------+-----------+------+-------------+ - -Transfer a volume -~~~~~~~~~~~~~~~~~ - -You can transfer a volume from one owner to another by using the -:command:`openstack volume transfer request create` command. The volume -donor, or original owner, creates a transfer request and sends the created -transfer ID and authorization key to the volume recipient. The volume -recipient, or new owner, accepts the transfer by using the ID and key. - -.. note:: - - The procedure for volume transfer is intended for projects (both the - volume donor and recipient) within the same cloud. - -Use cases include: - -* Create a custom bootable volume or a volume with a large data set and - transfer it to a customer. - -* For bulk import of data to the cloud, the data ingress system creates - a new Block Storage volume, copies data from the physical device, and - transfers device ownership to the end user. - -Create a volume transfer request --------------------------------- - -#. While logged in as the volume donor, list the available volumes: - - .. code-block:: console - - $ openstack volume list - +-----------------+-----------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +-----------------+-----------------+-----------+------+-------------+ - | 72bfce9f-cac... | None | error | 1 | | - | a1cdace0-08e... | None | available | 1 | | - +-----------------+-----------------+-----------+------+-------------+ - - -#. As the volume donor, request a volume transfer authorization code for a - specific volume: - - .. code-block:: console - - $ openstack volume transfer request create - - - Name or ID of volume to transfer. - - The volume must be in an ``available`` state or the request will be - denied. If the transfer request is valid in the database (that is, it - has not expired or been deleted), the volume is placed in an - ``awaiting-transfer`` state. For example: - - .. code-block:: console - - $ openstack volume transfer request create a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f - - The output shows the volume transfer ID in the ``id`` row and the - authorization key. - - .. code-block:: console - - +------------+--------------------------------------+ - | Field | Value | - +------------+--------------------------------------+ - | auth_key | 0a59e53630f051e2 | - | created_at | 2016-11-03T11:49:40.346181 | - | id | 34e29364-142b-4c7b-8d98-88f765bf176f | - | name | None | - | volume_id | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | - +------------+--------------------------------------+ - - .. note:: - - Optionally, you can specify a name for the transfer by using the - ``--name transferName`` parameter. - - .. note:: - - While the ``auth_key`` property is visible in the output of - ``openstack volume transfer request create VOLUME_ID``, it will not be - available in subsequent ``openstack volume transfer request show TRANSFER_ID`` - command. - -#. Send the volume transfer ID and authorization key to the new owner (for - example, by email). - -#. View pending transfers: - - .. code-block:: console - - $ openstack volume transfer request list - +--------------------------------------+--------------------------------------+------+ - | ID | Volume | Name | - +--------------------------------------+--------------------------------------+------+ - | 6e4e9aa4-bed5-4f94-8f76-df43232f44dc | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | None | - +--------------------------------------+--------------------------------------+------+ - -#. After the volume recipient, or new owner, accepts the transfer, you can - see that the transfer is no longer available: - - .. code-block:: console - - $ openstack volume transfer request list - +----+-----------+------+ - | ID | Volume ID | Name | - +----+-----------+------+ - +----+-----------+------+ - -Accept a volume transfer request --------------------------------- - -#. As the volume recipient, you must first obtain the transfer ID and - authorization key from the original owner. - -#. Accept the request: - - .. code-block:: console - - $ openstack volume transfer request accept transferID authKey - - For example: - - .. code-block:: console - - $ openstack volume transfer request accept 6e4e9aa4-bed5-4f94-8f76-df43232f44dc b2c8e585cbc68a80 - +-----------+--------------------------------------+ - | Property | Value | - +-----------+--------------------------------------+ - | id | 6e4e9aa4-bed5-4f94-8f76-df43232f44dc | - | name | None | - | volume_id | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | - +-----------+--------------------------------------+ - - .. note:: - - If you do not have a sufficient quota for the transfer, the transfer - is refused. - -Delete a volume transfer ------------------------- - -#. List available volumes and their statuses: - - .. code-block:: console - - $ openstack volume list - +-----------------+-----------------+-----------------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +-----------------+-----------------+-----------------+------+-------------+ - | 72bfce9f-cac... | None | error | 1 | | - | a1cdace0-08e... | None |awaiting-transfer| 1 | | - +-----------------+-----------------+-----------------+------+-------------+ - - -#. Find the matching transfer ID: - - .. code-block:: console - - $ openstack volume transfer request list - +--------------------------------------+--------------------------------------+------+ - | ID | VolumeID | Name | - +--------------------------------------+--------------------------------------+------+ - | a6da6888-7cdf-4291-9c08-8c1f22426b8a | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | None | - +--------------------------------------+--------------------------------------+------+ - -#. Delete the volume: - - .. code-block:: console - - $ openstack volume transfer request delete - - - Name or ID of transfer to delete. - - For example: - - .. code-block:: console - - $ openstack volume transfer request delete a6da6888-7cdf-4291-9c08-8c1f22426b8a - -#. Verify that transfer list is now empty and that the volume is again - available for transfer: - - .. code-block:: console - - $ openstack volume transfer request list - +----+-----------+------+ - | ID | Volume ID | Name | - +----+-----------+------+ - +----+-----------+------+ - - .. code-block:: console - - $ openstack volume list - +-----------------+-----------+--------------+------+-------------+----------+-------------+ - | ID | Status | Display Name | Size | Volume Type | Bootable | Attached to | - +-----------------+-----------+--------------+------+-------------+----------+-------------+ - | 72bfce9f-ca... | error | None | 1 | None | false | | - | a1cdace0-08... | available | None | 1 | None | false | | - +-----------------+-----------+--------------+------+-------------+----------+-------------+ - -Manage and unmanage a snapshot -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A snapshot is a point in time version of a volume. As an administrator, -you can manage and unmanage snapshots. - -Manage a snapshot ------------------ - -Manage a snapshot with the :command:`openstack volume snapshot set` command: - -.. code-block:: console - - $ openstack volume snapshot set [-h] - [--name ] - [--description ] - [--no-property] - [--property ] - [--state ] - - -The arguments to be passed are: - -``--name `` - New snapshot name - -``--description `` - New snapshot description - -``--no-property`` - Remove all properties from (specify both - --no-property and --property to remove the current - properties before setting new properties.) - -``--property `` - Property to add or modify for this snapshot (repeat option to set - multiple properties) - -``--state `` - New snapshot state. (“available”, “error”, “creating”, “deleting”, - or “error_deleting”) - (admin only) (This option simply changes the state of the snapshot in the - database with no regard to actual status, exercise caution when using) - -```` - Snapshot to modify (name or ID) - -.. code-block:: console - - $ openstack volume snapshot set my-snapshot-id - -Unmanage a snapshot -------------------- - -Unmanage a snapshot with the :command:`openstack volume snapshot unset` -command: - -.. code-block:: console - - $ openstack volume snapshot unset [-h] - [--property ] - - -The arguments to be passed are: - -``--property `` - Property to remove from snapshot (repeat option to remove multiple properties) - -```` - Snapshot to modify (name or ID). - -The following example unmanages the ``my-snapshot-id`` image: - -.. code-block:: console - - $ openstack volume snapshot unset my-snapshot-id diff --git a/doc/source/cli/cli-set-quotas.rst b/doc/source/cli/cli-set-quotas.rst deleted file mode 100644 index 305ea9dbe..000000000 --- a/doc/source/cli/cli-set-quotas.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _manage-quotas: - -============= -Manage quotas -============= - -To prevent system capacities from being exhausted without -notification, you can set up quotas. Quotas are operational -limits. For example, the number of gigabytes allowed for each -project can be controlled so that cloud resources are optimized. -Quotas can be enforced at both the project -and the project-user level. - -Using the command-line interface, you can manage quotas for -the OpenStack Compute service, the OpenStack Block Storage service, -and the OpenStack Networking service. - -The cloud operator typically changes default values because a -project requires more than ten volumes or 1 TB on a compute -node. - -.. note:: - - To view all projects, run: - - .. code-block:: console - - $ openstack project list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | e66d97ac1b704897853412fc8450f7b9 | admin | - | bf4a37b885fe46bd86e999e50adad1d3 | services | - | 21bd1c7c95234fd28f589b60903606fa | tenant01 | - | f599c5cd1cba4125ae3d7caed08e288c | tenant02 | - +----------------------------------+----------+ - - To display all current users for a project, run: - - .. code-block:: console - - $ openstack user list --project PROJECT_NAME - +----------------------------------+--------+ - | ID | Name | - +----------------------------------+--------+ - | ea30aa434ab24a139b0e85125ec8a217 | demo00 | - | 4f8113c1d838467cad0c2f337b3dfded | demo01 | - +----------------------------------+--------+ - -Use :samp:`openstack quota show {PROJECT_NAME}` to list all quotas for a -project. - -Use :samp:`openstack quota set {PROJECT_NAME} {--parameters}` to set quota -values. - -.. toctree:: - :maxdepth: 2 - - cli-cinder-quotas.rst diff --git a/doc/source/common/glossary.rst b/doc/source/common/glossary.rst deleted file mode 100644 index 77308ab0a..000000000 --- a/doc/source/common/glossary.rst +++ /dev/null @@ -1,24 +0,0 @@ -======== -Glossary -======== - -This glossary offers a list of terms and definitions to define a -vocabulary for Cinder concepts. - -.. glossary:: - - Logical Volume Manager (LVM) - - Provides a method of allocating space on mass-storage - devices that is more flexible than conventional partitioning - schemes. - - iSCSI Qualified Name (IQN) - - IQN is the format most commonly used for iSCSI names, which uniquely - identify nodes in an iSCSI network. - All IQNs follow the pattern iqn.yyyy-mm.domain:identifier, where - 'yyyy-mm' is the year and month in which the domain was registered, - 'domain' is the reversed domain name of the issuing organization, and - 'identifier' is an optional string which makes each IQN under the same - domain unique. For example, 'iqn.2015-10.org.openstack.408ae959bce1'. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 960c97cf9..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,270 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This file is execfile()d with the current directory set -# to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import eventlet -import os -import sys - -from cinder import objects - -# NOTE(dims): monkey patch subprocess to prevent failures in latest eventlet -# See https://github.com/eventlet/eventlet/issues/398 -try: - eventlet.monkey_patch(subprocess=True) -except TypeError: - pass - -# NOTE(geguileo): Sphinx will fail to generate the documentation if we are -# using decorators from any OVO in cinder.objects, because the OVOs are only -# added to the cinder.objects namespace when the CLI programs are run. So we -# need to run it here as well to avoid failures like: -# AttributeError: 'module' object has no attribute 'Volume' -objects.register_all() - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. -# They can be extensions coming with Sphinx (named 'sphinx.ext.*') -# or your custom ones. - -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.ifconfig', - 'sphinx.ext.graphviz', - 'openstackdocstheme', - 'stevedore.sphinxext', - 'oslo_config.sphinxconfiggen', - 'ext.cinder_driverlist', - ] - -config_generator_config_file = ( - '../../cinder/config/cinder-config-generator.conf') -sample_config_basename = '_static/cinder' - -# autodoc generation is a bit aggressive and a nuisance -# when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" -# in your terminal to disable -if not os.getenv('SPHINX_DEBUG'): - extensions += ['ext.cinder_autodoc'] - -todo_include_todos = True - -# Add any paths that contain templates here, relative to this directory. -# Changing the path so that the Hudson build output contains GA code -# and the source docs do not contain the code so local, offline sphinx builds -# are "clean." -templates_path = [] -if os.getenv('HUDSON_PUBLISH_DOCS'): - templates_path = ['_ga', '_templates'] -else: - templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -repository_name = 'openstack/cinder' -bug_project = 'cinder' -bug_tag = 'doc' -project = u'Cinder' -copyright = u'2010-present, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -from cinder.version import version_info -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -unused_docs = [ - 'api_ext/rst_extension_template', - 'installer', -] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = [] - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['cinder.'] - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - -man_pages = [ - ('man/cinder-manage', 'cinder-manage', u'Cloud controller fabric', - [u'OpenStack'], 1) -] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cinderdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Cinder.tex', u'Cinder Documentation', - u'Anso Labs, LLC', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/doc/source/configuration/README.rst b/doc/source/configuration/README.rst deleted file mode 100644 index e759b64ef..000000000 --- a/doc/source/configuration/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -================================== -Cinder Configuration Documentation -================================== - -Introduction: -------------- - -This directory is intended to hold any documentation that relates to -how to configure Cinder. It is intended that some of this content -be automatically generated in the future. At the moment, however, it -is not. Changes to configuration options for Cinder or its drivers -needs to be put under this directory. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - diff --git a/doc/source/configuration/block-storage.rst b/doc/source/configuration/block-storage.rst deleted file mode 100644 index 93c1d6dd0..000000000 --- a/doc/source/configuration/block-storage.rst +++ /dev/null @@ -1,27 +0,0 @@ -=================================== -Block Storage Service Configuration -=================================== - -.. toctree:: - :maxdepth: 1 - - block-storage/block-storage-overview.rst - block-storage/volume-drivers.rst - block-storage/backup-drivers.rst - block-storage/schedulers.rst - block-storage/logs.rst - block-storage/fc-zoning.rst - block-storage/nested-quota.rst - block-storage/volume-encryption.rst - block-storage/config-options.rst - block-storage/samples/index.rst - tables/conf-changes/cinder.rst - -.. note:: - - The common configurations for shared service and libraries, - such as database connections and RPC messaging, - are described at :doc:`common-configurations`. - -The Block Storage service works with many different storage -drivers that you can configure by using these instructions. diff --git a/doc/source/configuration/block-storage/backup-drivers.rst b/doc/source/configuration/block-storage/backup-drivers.rst deleted file mode 100644 index 19c9780e9..000000000 --- a/doc/source/configuration/block-storage/backup-drivers.rst +++ /dev/null @@ -1,24 +0,0 @@ -============== -Backup drivers -============== - -.. sort by the drivers by open source software -.. and the drivers for proprietary components - -.. toctree:: - - backup/ceph-backup-driver.rst - backup/glusterfs-backup-driver.rst - backup/nfs-backup-driver.rst - backup/posix-backup-driver.rst - backup/swift-backup-driver.rst - backup/gcs-backup-driver.rst - backup/tsm-backup-driver.rst - -This section describes how to configure the cinder-backup service and -its drivers. - -The volume drivers are included with the `Block Storage repository -`_. To set a backup -driver, use the ``backup_driver`` flag. By default there is no backup -driver enabled. diff --git a/doc/source/configuration/block-storage/backup/ceph-backup-driver.rst b/doc/source/configuration/block-storage/backup/ceph-backup-driver.rst deleted file mode 100644 index 44fa87d7c..000000000 --- a/doc/source/configuration/block-storage/backup/ceph-backup-driver.rst +++ /dev/null @@ -1,56 +0,0 @@ -================== -Ceph backup driver -================== - -The Ceph backup driver backs up volumes of any type to a Ceph back-end -store. The driver can also detect whether the volume to be backed up is -a Ceph RBD volume, and if so, it tries to perform incremental and -differential backups. - -For source Ceph RBD volumes, you can perform backups within the same -Ceph pool (not recommended). You can also perform backups between -different Ceph pools and between different Ceph clusters. - -At the time of writing, differential backup support in Ceph/librbd was -quite new. This driver attempts a differential backup in the first -instance. If the differential backup fails, the driver falls back to -full backup/copy. - -If incremental backups are used, multiple backups of the same volume are -stored as snapshots so that minimal space is consumed in the backup -store. It takes far less time to restore a volume than to take a full -copy. - -.. note:: - - Block Storage enables you to: - - - Restore to a new volume, which is the default and recommended - action. - - - Restore to the original volume from which the backup was taken. - The restore action takes a full copy because this is the safest - action. - -To enable the Ceph backup driver, include the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.ceph - -The following configuration options are available for the Ceph backup -driver. - -.. include:: ../../tables/cinder-backups_ceph.rst - -This example shows the default options for the Ceph backup driver. - -.. code-block:: ini - - backup_ceph_conf=/etc/ceph/ceph.conf - backup_ceph_user = cinder-backup - backup_ceph_chunk_size = 134217728 - backup_ceph_pool = backups - backup_ceph_stripe_unit = 0 - backup_ceph_stripe_count = 0 diff --git a/doc/source/configuration/block-storage/backup/gcs-backup-driver.rst b/doc/source/configuration/block-storage/backup/gcs-backup-driver.rst deleted file mode 100644 index ca6b96962..000000000 --- a/doc/source/configuration/block-storage/backup/gcs-backup-driver.rst +++ /dev/null @@ -1,18 +0,0 @@ -======================================= -Google Cloud Storage backup driver -======================================= - -The Google Cloud Storage (GCS) backup driver backs up volumes of any type to -Google Cloud Storage. - -To enable the GCS backup driver, include the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.google - -The following configuration options are available for the GCS backup -driver. - -.. include:: ../../tables/cinder-backups_gcs.rst diff --git a/doc/source/configuration/block-storage/backup/glusterfs-backup-driver.rst b/doc/source/configuration/block-storage/backup/glusterfs-backup-driver.rst deleted file mode 100644 index 9980702d6..000000000 --- a/doc/source/configuration/block-storage/backup/glusterfs-backup-driver.rst +++ /dev/null @@ -1,17 +0,0 @@ -======================= -GlusterFS backup driver -======================= - -The GlusterFS backup driver backs up volumes of any type to GlusterFS. - -To enable the GlusterFS backup driver, include the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.glusterfs - -The following configuration options are available for the GlusterFS backup -driver. - -.. include:: ../../tables/cinder-backups_glusterfs.rst diff --git a/doc/source/configuration/block-storage/backup/nfs-backup-driver.rst b/doc/source/configuration/block-storage/backup/nfs-backup-driver.rst deleted file mode 100644 index bd6c19273..000000000 --- a/doc/source/configuration/block-storage/backup/nfs-backup-driver.rst +++ /dev/null @@ -1,18 +0,0 @@ -================= -NFS backup driver -================= - -The backup driver for the NFS back end backs up volumes of any type to -an NFS exported backup repository. - -To enable the NFS backup driver, include the following option in the -``[DEFAULT]`` section of the ``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.nfs - -The following configuration options are available for the NFS back-end -backup driver. - -.. include:: ../../tables/cinder-backups_nfs.rst diff --git a/doc/source/configuration/block-storage/backup/posix-backup-driver.rst b/doc/source/configuration/block-storage/backup/posix-backup-driver.rst deleted file mode 100644 index 18dfd0b28..000000000 --- a/doc/source/configuration/block-storage/backup/posix-backup-driver.rst +++ /dev/null @@ -1,18 +0,0 @@ -================================ -POSIX file systems backup driver -================================ - -The POSIX file systems backup driver backs up volumes of any type to -POSIX file systems. - -To enable the POSIX file systems backup driver, include the following -option in the ``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.posix - -The following configuration options are available for the POSIX -file systems backup driver. - -.. include:: ../../tables/cinder-backups_posix.rst diff --git a/doc/source/configuration/block-storage/backup/swift-backup-driver.rst b/doc/source/configuration/block-storage/backup/swift-backup-driver.rst deleted file mode 100644 index 2a1cc03cb..000000000 --- a/doc/source/configuration/block-storage/backup/swift-backup-driver.rst +++ /dev/null @@ -1,52 +0,0 @@ -=================== -Swift backup driver -=================== - -The backup driver for the swift back end performs a volume backup to an -object storage system. - -To enable the swift backup driver, include the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.swift - -The following configuration options are available for the Swift back-end -backup driver. - -.. include:: ../../tables/cinder-backups_swift.rst - -To enable the swift backup driver for 1.0, 2.0, or 3.0 authentication version, -specify ``1``, ``2``, or ``3`` correspondingly. For example: - -.. code-block:: ini - - backup_swift_auth_version = 2 - -In addition, the 2.0 authentication system requires the definition of the -``backup_swift_tenant`` setting: - -.. code-block:: ini - - backup_swift_tenant = - -This example shows the default options for the Swift back-end backup -driver. - -.. code-block:: ini - - backup_swift_url = http://localhost:8080/v1/AUTH_ - backup_swift_auth_url = http://localhost:5000/v3 - backup_swift_auth = per_user - backup_swift_auth_version = 1 - backup_swift_user = - backup_swift_user_domain = - backup_swift_key = - backup_swift_container = volumebackups - backup_swift_object_size = 52428800 - backup_swift_project = - backup_swift_project_domain = - backup_swift_retry_attempts = 3 - backup_swift_retry_backoff = 2 - backup_compression_algorithm = zlib diff --git a/doc/source/configuration/block-storage/backup/tsm-backup-driver.rst b/doc/source/configuration/block-storage/backup/tsm-backup-driver.rst deleted file mode 100644 index 1ab1294cf..000000000 --- a/doc/source/configuration/block-storage/backup/tsm-backup-driver.rst +++ /dev/null @@ -1,31 +0,0 @@ -======================================== -IBM Tivoli Storage Manager backup driver -======================================== - -The IBM Tivoli Storage Manager (TSM) backup driver enables performing -volume backups to a TSM server. - -The TSM client should be installed and configured on the machine running -the cinder-backup service. See the IBM Tivoli Storage Manager -Backup-Archive Client Installation and User's Guide for details on -installing the TSM client. - -To enable the IBM TSM backup driver, include the following option in -``cinder.conf``: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.tsm - -The following configuration options are available for the TSM backup -driver. - -.. include:: ../../tables/cinder-backups_tsm.rst - -This example shows the default options for the TSM backup driver. - -.. code-block:: ini - - backup_tsm_volume_prefix = backup - backup_tsm_password = password - backup_tsm_compression = True diff --git a/doc/source/configuration/block-storage/block-storage-overview.rst b/doc/source/configuration/block-storage/block-storage-overview.rst deleted file mode 100644 index d06609ec7..000000000 --- a/doc/source/configuration/block-storage/block-storage-overview.rst +++ /dev/null @@ -1,89 +0,0 @@ -========================================= -Introduction to the Block Storage service -========================================= - -The Block Storage service provides persistent block storage -resources that Compute instances can consume. This includes -secondary attached storage similar to the Amazon Elastic Block Storage -(EBS) offering. In addition, you can write images to a Block Storage -device for Compute to use as a bootable persistent instance. - -The Block Storage service differs slightly from the Amazon EBS offering. -The Block Storage service does not provide a shared storage solution -like NFS. With the Block Storage service, you can attach a device to -only one instance. - -The Block Storage service provides: - -- ``cinder-api`` - a WSGI app that authenticates and routes requests - throughout the Block Storage service. It supports the OpenStack APIs - only, although there is a translation that can be done through - Compute's EC2 interface, which calls in to the Block Storage client. - -- ``cinder-scheduler`` - schedules and routes requests to the appropriate - volume service. Depending upon your configuration, this may be simple - round-robin scheduling to the running volume services, or it can be - more sophisticated through the use of the Filter Scheduler. The - Filter Scheduler is the default and enables filters on things like - Capacity, Availability Zone, Volume Types, and Capabilities as well - as custom filters. - -- ``cinder-volume`` - manages Block Storage devices, specifically the - back-end devices themselves. - -- ``cinder-backup`` - provides a means to back up a Block Storage volume to - OpenStack Object Storage (swift). - -The Block Storage service contains the following components: - -- **Back-end Storage Devices** - the Block Storage service requires some - form of back-end storage that the service is built on. The default - implementation is to use LVM on a local volume group named - "cinder-volumes." In addition to the base driver implementation, the - Block Storage service also provides the means to add support for - other storage devices to be utilized such as external Raid Arrays or - other storage appliances. These back-end storage devices may have - custom block sizes when using KVM or QEMU as the hypervisor. - -- **Users and Tenants (Projects)** - the Block Storage service can be - used by many different cloud computing consumers or customers - (tenants on a shared system), using role-based access assignments. - Roles control the actions that a user is allowed to perform. In the - default configuration, most actions do not require a particular role, - but this can be configured by the system administrator in the - appropriate ``policy.json`` file that maintains the rules. A user's - access to particular volumes is limited by tenant, but the user name - and password are assigned per user. Key pairs granting access to a - volume are enabled per user, but quotas to control resource - consumption across available hardware resources are per tenant. - - For tenants, quota controls are available to limit: - - - The number of volumes that can be created. - - - The number of snapshots that can be created. - - - The total number of GBs allowed per tenant (shared between - snapshots and volumes). - - You can revise the default quota values with the Block Storage CLI, - so the limits placed by quotas are editable by admin users. - -- **Volumes, Snapshots, and Backups** - the basic resources offered by - the Block Storage service are volumes and snapshots which are derived - from volumes and volume backups: - - - **Volumes** - allocated block storage resources that can be - attached to instances as secondary storage or they can be used as - the root store to boot instances. Volumes are persistent R/W block - storage devices most commonly attached to the compute node through - iSCSI. - - - **Snapshots** - a read-only point in time copy of a volume. The - snapshot can be created from a volume that is currently in use - (through the use of ``--force True``) or in an available state. - The snapshot can then be used to create a new volume through - create from snapshot. - - - **Backups** - an archived copy of a volume currently stored in - Object Storage (swift). diff --git a/doc/source/configuration/block-storage/config-options.rst b/doc/source/configuration/block-storage/config-options.rst deleted file mode 100644 index ff2e5269d..000000000 --- a/doc/source/configuration/block-storage/config-options.rst +++ /dev/null @@ -1,35 +0,0 @@ -================== -Additional options -================== - -These options can also be set in the ``cinder.conf`` file. - -.. include:: ../tables/cinder-api.rst -.. include:: ../tables/cinder-auth.rst -.. include:: ../tables/cinder-backups.rst -.. include:: ../tables/cinder-block-device.rst -.. include:: ../tables/cinder-common.rst -.. include:: ../tables/cinder-compute.rst -.. include:: ../tables/cinder-coordination.rst -.. include:: ../tables/cinder-debug.rst -.. include:: ../tables/cinder-drbd.rst -.. include:: ../tables/cinder-emc.rst -.. include:: ../tables/cinder-eternus.rst -.. include:: ../tables/cinder-flashsystem.rst -.. include:: ../tables/cinder-hgst.rst -.. include:: ../tables/cinder-hpelefthand.rst -.. include:: ../tables/cinder-hpexp.rst -.. include:: ../tables/cinder-huawei.rst -.. include:: ../tables/cinder-hyperv.rst -.. include:: ../tables/cinder-images.rst -.. include:: ../tables/cinder-nas.rst -.. include:: ../tables/cinder-profiler.rst -.. include:: ../tables/cinder-pure.rst -.. include:: ../tables/cinder-quota.rst -.. include:: ../tables/cinder-redis.rst -.. include:: ../tables/cinder-san.rst -.. include:: ../tables/cinder-scheduler.rst -.. include:: ../tables/cinder-scst.rst -.. include:: ../tables/cinder-storage.rst -.. include:: ../tables/cinder-tegile.rst -.. include:: ../tables/cinder-zones.rst diff --git a/doc/source/configuration/block-storage/drivers/blockbridge-eps-driver.rst b/doc/source/configuration/block-storage/drivers/blockbridge-eps-driver.rst deleted file mode 100644 index 399486cf7..000000000 --- a/doc/source/configuration/block-storage/drivers/blockbridge-eps-driver.rst +++ /dev/null @@ -1,244 +0,0 @@ -=============== -Blockbridge EPS -=============== - -Introduction -~~~~~~~~~~~~ - -Blockbridge is software that transforms commodity infrastructure into -secure multi-tenant storage that operates as a programmable service. It -provides automatic encryption, secure deletion, quality of service (QoS), -replication, and programmable security capabilities on your choice of -hardware. Blockbridge uses micro-segmentation to provide isolation that allows -you to concurrently operate OpenStack, Docker, and bare-metal workflows on -shared resources. When used with OpenStack, isolated management domains are -dynamically created on a per-project basis. All volumes and clones, within and -between projects, are automatically cryptographically isolated and implement -secure deletion. - -Architecture reference -~~~~~~~~~~~~~~~~~~~~~~ - -**Blockbridge architecture** - -.. figure:: ../../figures/bb-cinder-fig1.png - :width: 100% - - -Control paths -------------- - -The Blockbridge driver is packaged with the core distribution of -OpenStack. Operationally, it executes in the context of the Block -Storage service. The driver communicates with an OpenStack-specific API -provided by the Blockbridge EPS platform. Blockbridge optionally -communicates with Identity, Compute, and Block Storage -services. - -Block storage API ------------------ - -Blockbridge is API driven software-defined storage. The system -implements a native HTTP API that is tailored to the specific needs of -OpenStack. Each Block Storage service operation maps to a single -back-end API request that provides ACID semantics. The API is -specifically designed to reduce, if not eliminate, the possibility of -inconsistencies between the Block Storage service and external storage -infrastructure in the event of hardware, software or data center -failure. - -Extended management -------------------- - -OpenStack users may utilize Blockbridge interfaces to manage -replication, auditing, statistics, and performance information on a -per-project and per-volume basis. In addition, they can manage low-level -data security functions including verification of data authenticity and -encryption key delegation. Native integration with the Identity Service -allows tenants to use a single set of credentials. Integration with -Block storage and Compute services provides dynamic metadata mapping -when using Blockbridge management APIs and tools. - -Attribute-based provisioning ----------------------------- - -Blockbridge organizes resources using descriptive identifiers called -*attributes*. Attributes are assigned by administrators of the -infrastructure. They are used to describe the characteristics of storage -in an application-friendly way. Applications construct queries that -describe storage provisioning constraints and the Blockbridge storage -stack assembles the resources as described. - -Any given instance of a Blockbridge volume driver specifies a *query* -for resources. For example, a query could specify -``'+ssd +10.0.0.0 +6nines -production iops.reserve=1000 -capacity.reserve=30%'``. This query is satisfied by selecting SSD -resources, accessible on the 10.0.0.0 network, with high resiliency, for -non-production workloads, with guaranteed IOPS of 1000 and a storage -reservation for 30% of the volume capacity specified at create time. -Queries and parameters are completely administrator defined: they -reflect the layout, resource, and organizational goals of a specific -deployment. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, clone, attach, and detach volumes -- Create and delete volume snapshots -- Create a volume from a snapshot -- Copy an image to a volume -- Copy a volume to an image -- Extend a volume -- Get volume statistics - -Supported protocols -~~~~~~~~~~~~~~~~~~~ - -Blockbridge provides iSCSI access to storage. A unique iSCSI data fabric -is programmatically assembled when a volume is attached to an instance. -A fabric is disassembled when a volume is detached from an instance. -Each volume is an isolated SCSI device that supports persistent -reservations. - -Configuration steps -~~~~~~~~~~~~~~~~~~~ - -.. _cg_create_an_authentication_token: - -Create an authentication token ------------------------------- - -Whenever possible, avoid using password-based authentication. Even if -you have created a role-restricted administrative user via Blockbridge, -token-based authentication is preferred. You can generate persistent -authentication tokens using the Blockbridge command-line tool as -follows: - -.. code-block:: console - - $ bb -H bb-mn authorization create --notes "OpenStack" --restrict none - Authenticating to https://bb-mn/api - - Enter user or access token: system - Password for system: - Authenticated; token expires in 3599 seconds. - - == Authorization: ATH4762894C40626410 - notes OpenStack - serial ATH4762894C40626410 - account system (ACT0762594C40626440) - user system (USR1B62094C40626440) - enabled yes - created at 2015-10-24 22:08:48 +0000 - access type online - token suffix xaKUy3gw - restrict none - - == Access Token - access token 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - - *** Remember to record your access token! - -Create volume type ------------------- - -Before configuring and enabling the Blockbridge volume driver, register -an OpenStack volume type and associate it with a -``volume_backend_name``. In this example, a volume type, 'Production', -is associated with the ``volume_backend_name`` 'blockbridge\_prod': - -.. code-block:: console - - $ openstack volume type create Production - $ openstack volume type set --property volume_backend_name=blockbridge_prod Production - -Specify volume driver ---------------------- - -Configure the Blockbridge volume driver in ``/etc/cinder/cinder.conf``. -Your ``volume_backend_name`` must match the value specified in the -:command:`openstack volume type set` command in the previous step. - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver - volume_backend_name = blockbridge_prod - -Specify API endpoint and authentication ---------------------------------------- - -Configure the API endpoint and authentication. The following example -uses an authentication token. You must create your own as described in -:ref:`cg_create_an_authentication_token`. - -.. code-block:: ini - - blockbridge_api_host = [ip or dns of management cluster] - blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - -Specify resource query ----------------------- - -By default, a single pool is configured (implied) with a default -resource query of ``'+openstack'``. Within Blockbridge, datastore -resources that advertise the 'openstack' attribute will be selected to -fulfill OpenStack provisioning requests. If you prefer a more specific -query, define a custom pool configuration. - -.. code-block:: ini - - blockbridge_pools = Production: +production +qos iops.reserve=5000 - -Pools support storage systems that offer multiple classes of service. -You may wish to configure multiple pools to implement more sophisticated -scheduling capabilities. - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -.. include:: ../../tables/cinder-blockbridge.rst - -.. _cg_configuration_example: - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -``cinder.conf`` example file - -.. code-block:: ini - - [Default] - enabled_backends = bb_devel bb_prod - - [bb_prod] - volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver - volume_backend_name = blockbridge_prod - blockbridge_api_host = [ip or dns of management cluster] - blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - blockbridge_pools = Production: +production +qos iops.reserve=5000 - - [bb_devel] - volume_driver = cinder.volume.drivers.blockbridge.BlockbridgeISCSIDriver - volume_backend_name = blockbridge_devel - blockbridge_api_host = [ip or dns of management cluster] - blockbridge_auth_token = 1/elvMWilMvcLAajl...3ms3U1u2KzfaMw6W8xaKUy3gw - blockbridge_pools = Development: +development - -Multiple volume types -~~~~~~~~~~~~~~~~~~~~~ - -Volume *types* are exposed to tenants, *pools* are not. To offer -multiple classes of storage to OpenStack tenants, you should define -multiple volume types. Simply repeat the process above for each desired -type. Be sure to specify a unique ``volume_backend_name`` and pool -configuration for each type. The -:ref:`cinder.conf ` example included with -this documentation illustrates configuration of multiple types. - -Testing resources -~~~~~~~~~~~~~~~~~ - -Blockbridge is freely available for testing purposes and deploys in -seconds as a Docker container. This is the same container used to run -continuous integration for OpenStack. For more information visit -`www.blockbridge.io `__. diff --git a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst deleted file mode 100644 index ef7517ead..000000000 --- a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst +++ /dev/null @@ -1,109 +0,0 @@ -============================= -Ceph RADOS Block Device (RBD) -============================= - -If you use KVM or QEMU as your hypervisor, you can configure the Compute -service to use `Ceph RADOS block devices -(RBD) `__ for volumes. - -Ceph is a massively scalable, open source, distributed storage system. -It is comprised of an object store, block store, and a POSIX-compliant -distributed file system. The platform can auto-scale to the exabyte -level and beyond. It runs on commodity hardware, is self-healing and -self-managing, and has no single point of failure. Ceph is in the Linux -kernel and is integrated with the OpenStack cloud operating system. Due -to its open-source nature, you can install and use this portable storage -platform in public or private clouds. - -.. figure:: ../../figures/ceph-architecture.png - - Ceph architecture - -RADOS -~~~~~ - -Ceph is based on Reliable Autonomic Distributed Object Store (RADOS). -RADOS distributes objects across the storage cluster and replicates -objects for fault tolerance. RADOS contains the following major -components: - -*Object Storage Device (OSD) Daemon* - The storage daemon for the RADOS service, which interacts with the - OSD (physical or logical storage unit for your data). - You must run this daemon on each server in your cluster. For each - OSD, you can have an associated hard drive disk. For performance - purposes, pool your hard drive disk with raid arrays, logical volume - management (LVM), or B-tree file system (Btrfs) pooling. By default, - the following pools are created: data, metadata, and RBD. - -*Meta-Data Server (MDS)* - Stores metadata. MDSs build a POSIX file - system on top of objects for Ceph clients. However, if you do not use - the Ceph file system, you do not need a metadata server. - -*Monitor (MON)* - A lightweight daemon that handles all communications - with external applications and clients. It also provides a consensus - for distributed decision making in a Ceph/RADOS cluster. For - instance, when you mount a Ceph shared on a client, you point to the - address of a MON server. It checks the state and the consistency of - the data. In an ideal setup, you must run at least three ``ceph-mon`` - daemons on separate servers. - -Ceph developers recommend XFS for production deployments, Btrfs for -testing, development, and any non-critical deployments. Btrfs has the -correct feature set and roadmap to serve Ceph in the long-term, but XFS -and ext4 provide the necessary stability for today’s deployments. - -.. note:: - - If using Btrfs, ensure that you use the correct version (see `Ceph - Dependencies `__). - - For more information about usable file systems, see - `ceph.com/ceph-storage/file-system/ `__. - -Ways to store, use, and expose data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To store and access your data, you can use the following storage -systems: - -*RADOS* - Use as an object, default storage mechanism. - -*RBD* - Use as a block device. The Linux kernel RBD (RADOS block - device) driver allows striping a Linux block device over multiple - distributed object store data objects. It is compatible with the KVM - RBD image. - -*CephFS* - Use as a file, POSIX-compliant file system. - -Ceph exposes RADOS; you can access it through the following interfaces: - -*RADOS Gateway* - OpenStack Object Storage and Amazon-S3 compatible - RESTful interface (see `RADOS_Gateway - `__). - -*librados* - and its related C/C++ bindings - -*RBD and QEMU-RBD* - Linux kernel and QEMU block devices that stripe - data across multiple objects. - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options supported by the -Ceph RADOS Block Device driver. - -.. note:: - - The ``volume_tmp_dir`` option has been deprecated and replaced by - ``image_conversion_dir``. - -.. include:: ../../tables/cinder-storage_ceph.rst diff --git a/doc/source/configuration/block-storage/drivers/cloudbyte-driver.rst b/doc/source/configuration/block-storage/drivers/cloudbyte-driver.rst deleted file mode 100644 index da453a085..000000000 --- a/doc/source/configuration/block-storage/drivers/cloudbyte-driver.rst +++ /dev/null @@ -1,8 +0,0 @@ -======================= -CloudByte volume driver -======================= - -CloudByte Block Storage driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. include:: ../../tables/cinder-cloudbyte.rst diff --git a/doc/source/configuration/block-storage/drivers/coho-data-driver.rst b/doc/source/configuration/block-storage/drivers/coho-data-driver.rst deleted file mode 100644 index 4fad81f6c..000000000 --- a/doc/source/configuration/block-storage/drivers/coho-data-driver.rst +++ /dev/null @@ -1,93 +0,0 @@ -======================= -Coho Data volume driver -======================= - -The Coho DataStream Scale-Out Storage allows your Block Storage service to -scale seamlessly. The architecture consists of commodity storage servers -with SDN ToR switches. Leveraging an SDN OpenFlow controller allows you -to scale storage horizontally, while avoiding storage and network bottlenecks -by intelligent load-balancing and parallelized workloads. High-performance -PCIe NVMe flash, paired with traditional hard disk drives (HDD) or solid-state -drives (SSD), delivers low-latency performance even with highly mixed workloads -in large scale environment. - -Coho Data's storage features include real-time instance level -granularity performance and capacity reporting via API or UI, and -single-IP storage endpoint access. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, detach, retype, clone, and extend volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy a volume to an image. -* Copy an image to a volume. -* Create a thin provisioned volume. -* Get volume statistics. - -Coho Data QoS support -~~~~~~~~~~~~~~~~~~~~~ - -QoS support for the Coho Data driver includes the ability to set the -following capabilities in the OpenStack Block Storage API -``cinder.api.contrib.qos_specs_manage`` QoS specs extension module: - -* **maxIOPS** - The maximum number of IOPS allowed for this volume. - -* **maxMBS** - The maximum throughput allowed for this volume. - -The QoS keys above must be created and associated with a volume type. -For information about how to set the key-value pairs and associate -them with a volume type, see the `volume qos -`_ -section in the OpenStackClient command list. - -.. note:: - - If you change a volume type with QoS to a new volume type - without QoS, the QoS configuration settings will be removed. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -* NFS client on the Block storage controller. - -Coho Data Block Storage driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create cinder volume type. - - .. code-block:: console - - $ openstack volume type create coho-1 - -#. Edit the OpenStack Block Storage service configuration file. - The following sample, ``/etc/cinder/cinder.conf``, configuration lists the - relevant settings for a typical Block Storage service using a single - Coho Data storage: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = coho-1 - default_volume_type = coho-1 - - [coho-1] - volume_driver = cinder.volume.drivers.coho.CohoDriver - volume_backend_name = coho-1 - nfs_shares_config = /etc/cinder/coho_shares - nas_secure_file_operations = 'false' - -#. Add your list of Coho Datastream NFS addresses to the file you specified - with the ``nfs_shares_config`` option. For example, if the value of this - option was set to ``/etc/cinder/coho_shares``, then: - - .. code-block:: console - - $ cat /etc/cinder/coho_shares - :/ - -#. Restart the ``cinder-volume`` service to enable Coho Data driver. - -.. include:: ../../tables/cinder-coho.rst diff --git a/doc/source/configuration/block-storage/drivers/coprhd-driver.rst b/doc/source/configuration/block-storage/drivers/coprhd-driver.rst deleted file mode 100644 index a1e7ad788..000000000 --- a/doc/source/configuration/block-storage/drivers/coprhd-driver.rst +++ /dev/null @@ -1,318 +0,0 @@ -===================================== -CoprHD FC, iSCSI, and ScaleIO drivers -===================================== - -CoprHD is an open source software-defined storage controller and API platform. -It enables policy-based management and cloud automation of storage resources -for block, object and file storage providers. -For more details, see `CoprHD `_. - -EMC ViPR Controller is the commercial offering of CoprHD. These same volume -drivers can also be considered as EMC ViPR Controller Block Storage drivers. - - -System requirements -~~~~~~~~~~~~~~~~~~~ - -CoprHD version 3.0 is required. Refer to the CoprHD documentation for -installation and configuration instructions. - -If you are using these drivers to integrate with EMC ViPR Controller, use -EMC ViPR Controller 3.0. - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The following operations are supported: - -- Create, delete, attach, detach, retype, clone, and extend volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy a volume to an image. -- Copy an image to a volume. -- Clone a volume. -- Extend a volume. -- Retype a volume. -- Get volume statistics. -- Create, delete, and update consistency groups. -- Create and delete consistency group snapshots. - - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options specific to the -CoprHD volume driver. - -.. include:: ../../tables/cinder-coprhd.rst - - -Preparation -~~~~~~~~~~~ - -This involves setting up the CoprHD environment first and then configuring -the CoprHD Block Storage driver. - -CoprHD ------- - -The CoprHD environment must meet specific configuration requirements to -support the OpenStack Block Storage driver. - -- CoprHD users must be assigned a Tenant Administrator role or a Project - Administrator role for the Project being used. CoprHD roles are configured - by CoprHD Security Administrators. Consult the CoprHD documentation for - details. - -- A CorprHD system administrator must execute the following configurations - using the CoprHD UI, CoprHD API, or CoprHD CLI: - - - Create CoprHD virtual array - - Create CoprHD virtual storage pool - - Virtual Array designated for iSCSI driver must have an IP network created - with appropriate IP storage ports - - Designated tenant for use - - Designated project for use - -.. note:: Use each back end to manage one virtual array and one virtual - storage pool. However, the user can have multiple instances of - CoprHD Block Storage driver, sharing the same virtual array and virtual - storage pool. - -- A typical CoprHD virtual storage pool will have the following values - specified: - - - Storage Type: Block - - Provisioning Type: Thin - - Protocol: iSCSI/Fibre Channel(FC)/ScaleIO - - Multi-Volume Consistency: DISABLED OR ENABLED - - Maximum Native Snapshots: A value greater than 0 allows the OpenStack user - to take Snapshots - - -CoprHD drivers - Single back end --------------------------------- - -**cinder.conf** - -#. Modify ``/etc/cinder/cinder.conf`` by adding the following lines, - substituting values for your environment: - - .. code-block:: ini - - [coprhd-iscsi] - volume_driver = cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver - volume_backend_name = coprhd-iscsi - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - coprhd_emulate_snapshot = True or False, True if the CoprHD vpool has VMAX or VPLEX as the backing storage - -#. If you use the ScaleIO back end, add the following lines: - - .. code-block:: ini - - coprhd_scaleio_rest_gateway_host = - coprhd_scaleio_rest_gateway_port = 443 - coprhd_scaleio_rest_server_username = - coprhd_scaleio_rest_server_password = - scaleio_verify_server_certificate = True or False - scaleio_server_certificate_path = - -#. Specify the driver using the ``enabled_backends`` parameter:: - - enabled_backends = coprhd-iscsi - - .. note:: To utilize the Fibre Channel driver, replace the - ``volume_driver`` line above with:: - - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver - - .. note:: To utilize the ScaleIO driver, replace the ``volume_driver`` line - above with:: - - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDScaleIODriver - - .. note:: Set ``coprhd_emulate_snapshot`` to True if the CoprHD vpool has - VMAX or VPLEX as the back-end storage. For these type of back-end - storages, when a user tries to create a snapshot, an actual volume - gets created in the back end. - -#. Modify the ``rpc_response_timeout`` value in ``/etc/cinder/cinder.conf`` to - at least 5 minutes. If this entry does not already exist within the - ``cinder.conf`` file, add it in the ``[DEFAULT]`` section: - - .. code-block:: ini - - [DEFAULT] - # ... - rpc_response_timeout = 300 - -#. Now, restart the ``cinder-volume`` service. - -**Volume type creation and extra specs** - -#. Create OpenStack volume types: - - .. code-block:: console - - $ openstack volume type create - -#. Map the OpenStack volume type to the CoprHD virtual pool: - - .. code-block:: console - - $ openstack volume type set --property CoprHD:VPOOL= - -#. Map the volume type created to appropriate back-end driver: - - .. code-block:: console - - $ openstack volume type set --property volume_backend_name= - - -CoprHD drivers - Multiple back-ends ------------------------------------ - -**cinder.conf** - -#. Add or modify the following entries if you are planning to use multiple - back-end drivers: - - .. code-block:: ini - - enabled_backends = coprhddriver-iscsi,coprhddriver-fc,coprhddriver-scaleio - -#. Add the following at the end of the file: - - .. code-block:: ini - - [coprhddriver-iscsi] - volume_driver = cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver - volume_backend_name = EMCCoprHDISCSIDriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - - - [coprhddriver-fc] - volume_driver = cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver - volume_backend_name = EMCCoprHDFCDriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - - - [coprhddriver-scaleio] - volume_driver = cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver - volume_backend_name = EMCCoprHDScaleIODriver - coprhd_hostname = - coprhd_port = 4443 - coprhd_username = - coprhd_password = - coprhd_tenant = - coprhd_project = - coprhd_varray = - coprhd_scaleio_rest_gateway_host = - coprhd_scaleio_rest_gateway_port = 443 - coprhd_scaleio_rest_server_username = - coprhd_scaleio_rest_server_password = - scaleio_verify_server_certificate = True or False - scaleio_server_certificate_path = - - -#. Restart the ``cinder-volume`` service. - - -**Volume type creation and extra specs** - -Setup the ``volume-types`` and ``volume-type`` to ``volume-backend`` -association: - -.. code-block:: console - - $ openstack volume type create "CoprHD High Performance ISCSI" - $ openstack volume type set "CoprHD High Performance ISCSI" --property CoprHD:VPOOL="High Performance ISCSI" - $ openstack volume type set "CoprHD High Performance ISCSI" --property volume_backend_name= EMCCoprHDISCSIDriver - - $ openstack volume type create "CoprHD High Performance FC" - $ openstack volume type set "CoprHD High Performance FC" --property CoprHD:VPOOL="High Performance FC" - $ openstack volume type set "CoprHD High Performance FC" --property volume_backend_name= EMCCoprHDFCDriver - - $ openstack volume type create "CoprHD performance SIO" - $ openstack volume type set "CoprHD performance SIO" --property CoprHD:VPOOL="Scaled Perf" - $ openstack volume type set "CoprHD performance SIO" --property volume_backend_name= EMCCoprHDScaleIODriver - - -ISCSI driver notes -~~~~~~~~~~~~~~~~~~ - -* The compute host must be added to the CoprHD along with its ISCSI - initiator. -* The ISCSI initiator must be associated with IP network on the CoprHD. - - -FC driver notes -~~~~~~~~~~~~~~~ - -* The compute host must be attached to a VSAN or fabric discovered - by CoprHD. -* There is no need to perform any SAN zoning operations. CoprHD will perform - the necessary operations automatically as part of the provisioning process. - - -ScaleIO driver notes -~~~~~~~~~~~~~~~~~~~~ - -* Install the ScaleIO SDC on the compute host. -* The compute host must be added as the SDC to the ScaleIO MDS - using the below commands:: - - /opt/emc/scaleio/sdc/bin/drv_cfg --add_mdm --ip List of MDM IPs - (starting with primary MDM and separated by comma) - Example: - /opt/emc/scaleio/sdc/bin/drv_cfg --add_mdm --ip - 10.247.78.45,10.247.78.46,10.247.78.47 - -This step has to be repeated whenever the SDC (compute host in this case) -is rebooted. - - -Consistency group configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the support of consistency group and consistency group snapshot -operations, use a text editor to edit the file ``/etc/cinder/policy.json`` and -change the values of the below fields as specified. Upon editing the file, -restart the ``c-api`` service:: - - "consistencygroup:create" : "", - "consistencygroup:delete": "", - "consistencygroup:get": "", - "consistencygroup:get_all": "", - "consistencygroup:update": "", - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - - -Names of resources in back-end storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All the resources like volume, consistency group, snapshot, and consistency -group snapshot will use the display name in OpenStack for naming in the -back-end storage. diff --git a/doc/source/configuration/block-storage/drivers/datera-volume-driver.rst b/doc/source/configuration/block-storage/drivers/datera-volume-driver.rst deleted file mode 100644 index b32eabd3b..000000000 --- a/doc/source/configuration/block-storage/drivers/datera-volume-driver.rst +++ /dev/null @@ -1,170 +0,0 @@ -============== -Datera drivers -============== - -Datera iSCSI driver -------------------- - -The Datera Elastic Data Fabric (EDF) is a scale-out storage software that -turns standard, commodity hardware into a RESTful API-driven, intent-based -policy controlled storage fabric for large-scale clouds. The Datera EDF -integrates seamlessly with the Block Storage service. It provides storage -through the iSCSI block protocol framework over the iSCSI block protocol. -Datera supports all of the Block Storage services. - -System requirements, prerequisites, and recommendations -------------------------------------------------------- - -Prerequisites -~~~~~~~~~~~~~ - -* Must be running compatible versions of OpenStack and Datera EDF. - Please visit `here `_ to determine the - correct version. - -* All nodes must have access to Datera EDF through the iSCSI block protocol. - -* All nodes accessing the Datera EDF must have the following packages - installed: - - * Linux I/O (LIO) - * open-iscsi - * open-iscsi-utils - * wget - -.. include:: ../../tables/cinder-datera.rst - - - -Configuring the Datera volume driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Modify the ``/etc/cinder/cinder.conf`` file for Block Storage service. - -* Enable the Datera volume driver: - -.. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = datera - # ... - -* Optional. Designate Datera as the default back-end: - -.. code-block:: ini - - default_volume_type = datera - -* Create a new section for the Datera back-end definition. The ``san_ip`` can - be either the Datera Management Network VIP or one of the Datera iSCSI - Access Network VIPs depending on the network segregation requirements: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.datera.DateraDriver - san_ip = # The OOB Management IP of the cluster - san_login = admin # Your cluster admin login - san_password = password # Your cluster admin password - san_is_local = true - datera_num_replicas = 3 # Number of replicas to use for volume - -Enable the Datera volume driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* Verify the OpenStack control node can reach the Datera ``san_ip``: - -.. code-block:: bash - - $ ping -c 4 - -* Start the Block Storage service on all nodes running the ``cinder-volume`` - services: - -.. code-block:: bash - - $ service cinder-volume restart - -QoS support for the Datera drivers includes the ability to set the -following capabilities in QoS Specs - -* **read_iops_max** -- must be positive integer - -* **write_iops_max** -- must be positive integer - -* **total_iops_max** -- must be positive integer - -* **read_bandwidth_max** -- in KB per second, must be positive integer - -* **write_bandwidth_max** -- in KB per second, must be positive integer - -* **total_bandwidth_max** -- in KB per second, must be positive integer - -.. code-block:: bash - - # Create qos spec - $ openstack volume qos create --property total_iops_max=1000 total_bandwidth_max=2000 DateraBronze - - # Associate qos-spec with volume type - $ openstack volume qos associate DateraBronze VOLUME_TYPE - - # Add additional qos values or update existing ones - $ openstack volume qos set --property read_bandwidth_max=500 DateraBronze - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, detach, manage, unmanage, and list volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -* Support for naming convention changes. - -Configuring multipathing -~~~~~~~~~~~~~~~~~~~~~~~~ - -The following configuration is for 3.X Linux kernels, some parameters in -different Linux distributions may be different. Make the following changes -in the ``multipath.conf`` file: - -.. code-block:: text - - defaults { - checker_timer 5 - } - devices { - device { - vendor "DATERA" - product "IBLOCK" - getuid_callout "/lib/udev/scsi_id --whitelisted – - replace-whitespace --page=0x80 --device=/dev/%n" - path_grouping_policy group_by_prio - path_checker tur - prio alua - path_selector "queue-length 0" - hardware_handler "1 alua" - failback 5 - } - } - blacklist { - device { - vendor ".*" - product ".*" - } - } - blacklist_exceptions { - device { - vendor "DATERA.*" - product "IBLOCK.*" - } - } - diff --git a/doc/source/configuration/block-storage/drivers/dell-emc-scaleio-driver.rst b/doc/source/configuration/block-storage/drivers/dell-emc-scaleio-driver.rst deleted file mode 100644 index 1e4cf6840..000000000 --- a/doc/source/configuration/block-storage/drivers/dell-emc-scaleio-driver.rst +++ /dev/null @@ -1,319 +0,0 @@ -===================================== -Dell EMC ScaleIO Block Storage driver -===================================== - -ScaleIO is a software-only solution that uses existing servers' local -disks and LAN to create a virtual SAN that has all of the benefits of -external storage, but at a fraction of the cost and complexity. Using the -driver, Block Storage hosts can connect to a ScaleIO Storage -cluster. - -This section explains how to configure and connect the block storage -nodes to a ScaleIO storage cluster. - -Support matrix -~~~~~~~~~~~~~~ - -.. list-table:: - :widths: 10 25 - :header-rows: 1 - - * - ScaleIO version - - Supported Linux operating systems - * - 2.0 - - CentOS 6.x, CentOS 7.x, SLES 11 SP3, SLES 12, Ubuntu 14.04, Ubuntu 16.04 - -Deployment prerequisites -~~~~~~~~~~~~~~~~~~~~~~~~ - -* ScaleIO Gateway must be installed and accessible in the network. - For installation steps, refer to the Preparing the installation Manager - and the Gateway section in ScaleIO Deployment Guide. See - :ref:`scale_io_docs`. - -* ScaleIO Data Client (SDC) must be installed on all OpenStack nodes. - -.. note:: Ubuntu users must follow the specific instructions in the ScaleIO - deployment guide for Ubuntu environments. See the Deploying on - Ubuntu servers section in ScaleIO Deployment Guide. See - :ref:`scale_io_docs`. - -.. _scale_io_docs: - -Official documentation ----------------------- - -To find the ScaleIO documentation: - -#. Go to the `ScaleIO product documentation page `_. - -#. From the left-side panel, select the relevant version. - -#. Search for "ScaleIO 2.0 Deployment Guide". - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, clone, attach, detach, manage, and unmanage volumes - -* Create, delete, manage, and unmanage volume snapshots - -* Create a volume from a snapshot - -* Copy an image to a volume - -* Copy a volume to an image - -* Extend a volume - -* Get volume statistics - -* Create, list, update, and delete consistency groups - -* Create, list, update, and delete consistency group snapshots - -ScaleIO QoS support -~~~~~~~~~~~~~~~~~~~~ - -QoS support for the ScaleIO driver includes the ability to set the -following capabilities in the Block Storage API -``cinder.api.contrib.qos_specs_manage`` QoS specs extension module: - -* ``maxIOPS`` - -* ``maxIOPSperGB`` - -* ``maxBWS`` - -* ``maxBWSperGB`` - -The QoS keys above must be created and associated with a volume type. -For information about how to set the key-value pairs and associate -them with a volume type, run the following commands: - -.. code-block:: console - - $ openstack help volume qos - -``maxIOPS`` - The QoS I/O rate limit. If not set, the I/O rate will be unlimited. - The setting must be larger than 10. - -``maxIOPSperGB`` - The QoS I/O rate limit. - The limit will be calculated by the specified value multiplied by - the volume size. - The setting must be larger than 10. - -``maxBWS`` - The QoS I/O bandwidth rate limit in KBs. If not set, the I/O - bandwidth rate will be unlimited. The setting must be a multiple of 1024. - -``maxBWSperGB`` - The QoS I/O bandwidth rate limit in KBs. - The limit will be calculated by the specified value multiplied by - the volume size. - The setting must be a multiple of 1024. - -The driver always chooses the minimum between the QoS keys value -and the relevant calculated value of ``maxIOPSperGB`` or ``maxBWSperGB``. - -Since the limits are per SDC, they will be applied after the volume -is attached to an instance, and thus to a compute node/SDC. - -ScaleIO thin provisioning support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Block Storage driver supports creation of thin-provisioned and -thick-provisioned volumes. -The provisioning type settings can be added as an extra specification -of the volume type, as follows: - -.. code-block:: ini - - provisioning:type = thin\thick - -The old specification: ``sio:provisioning_type`` is deprecated. - -Oversubscription ----------------- - -Configure the oversubscription ratio by adding the following parameter -under the separate section for ScaleIO: - -.. code-block:: ini - - sio_max_over_subscription_ratio = OVER_SUBSCRIPTION_RATIO - -.. note:: - - The default value for ``sio_max_over_subscription_ratio`` - is 10.0. - -Oversubscription is calculated correctly by the Block Storage service -only if the extra specification ``provisioning:type`` -appears in the volume type regardless to the default provisioning type. -Maximum oversubscription value supported for ScaleIO is 10.0. - -Default provisioning type -------------------------- - -If provisioning type settings are not specified in the volume type, -the default value is set according to the ``san_thin_provision`` -option in the configuration file. The default provisioning type -will be ``thin`` if the option is not specified in the configuration -file. To set the default provisioning type ``thick``, set -the ``san_thin_provision`` option to ``false`` -in the configuration file, as follows: - -.. code-block:: ini - - san_thin_provision = false - -The configuration file is usually located in -``/etc/cinder/cinder.conf``. -For a configuration example, see: -:ref:`cinder.conf `. - -ScaleIO Block Storage driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``cinder.conf`` file by adding the configuration below under -a new section (for example, ``[scaleio]``) and change the ``enable_backends`` -setting (in the ``[DEFAULT]`` section) to include this new back end. -The configuration file is usually located at -``/etc/cinder/cinder.conf``. - -For a configuration example, refer to the example -:ref:`cinder.conf ` . - -ScaleIO driver name -------------------- - -Configure the driver name by adding the following parameter: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver - -ScaleIO MDM server IP ---------------------- - -The ScaleIO Meta Data Manager monitors and maintains the available -resources and permissions. - -To retrieve the MDM server IP address, use the :command:`drv_cfg --query_mdms` -command. - -Configure the MDM server IP address by adding the following parameter: - -.. code-block:: ini - - san_ip = ScaleIO GATEWAY IP - -ScaleIO Protection Domain name ------------------------------- - -ScaleIO allows multiple Protection Domains (groups of SDSs that provide -backup for each other). - -To retrieve the available Protection Domains, use the command -:command:`scli --query_all` and search for the Protection -Domains section. - -Configure the Protection Domain for newly created volumes by adding the -following parameter: - -.. code-block:: ini - - sio_protection_domain_name = ScaleIO Protection Domain - -ScaleIO Storage Pool name -------------------------- - -A ScaleIO Storage Pool is a set of physical devices in a Protection -Domain. - -To retrieve the available Storage Pools, use the command -:command:`scli --query_all` and search for available Storage Pools. - -Configure the Storage Pool for newly created volumes by adding the -following parameter: - -.. code-block:: ini - - sio_storage_pool_name = ScaleIO Storage Pool - -ScaleIO Storage Pools ---------------------- - -Multiple Storage Pools and Protection Domains can be listed for use by -the virtual machines. - -To retrieve the available Storage Pools, use the command -:command:`scli --query_all` and search for available Storage Pools. - -Configure the available Storage Pools by adding the following parameter: - -.. code-block:: ini - - sio_storage_pools = Comma-separated list of protection domain:storage pool name - -ScaleIO user credentials ------------------------- - -Block Storage requires a ScaleIO user with administrative -privileges. ScaleIO recommends creating a dedicated OpenStack user -account that has an administrative user role. - -Refer to the ScaleIO User Guide for details on user account management. - -Configure the user credentials by adding the following parameters: - -.. code-block:: ini - - san_login = ScaleIO username - - san_password = ScaleIO password - -Multiple back ends -~~~~~~~~~~~~~~~~~~ - -Configuring multiple storage back ends allows you to create several back-end -storage solutions that serve the same Compute resources. - -When a volume is created, the scheduler selects the appropriate back end -to handle the request, according to the specified volume type. - -.. _cg_configuration_example_emc: - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -**cinder.conf example file** - -You can update the ``cinder.conf`` file by editing the necessary -parameters as follows: - -.. code-block:: ini - - [Default] - enabled_backends = scaleio - - [scaleio] - volume_driver = cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver - volume_backend_name = scaleio - san_ip = GATEWAY_IP - sio_protection_domain_name = Default_domain - sio_storage_pool_name = Default_pool - sio_storage_pools = Domain1:Pool1,Domain2:Pool2 - san_login = SIO_USER - san_password = SIO_PASSWD - san_thin_provision = false - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -The ScaleIO driver supports these configuration options: - -.. include:: ../../tables/cinder-emc_sio.rst diff --git a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst b/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst deleted file mode 100644 index fc1d02c0d..000000000 --- a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst +++ /dev/null @@ -1,339 +0,0 @@ -===================== -Dell EMC Unity driver -===================== - -Unity driver has been integrated in the OpenStack Block Storage project since -the Ocata release. The driver is built on the top of Block Storage framework -and a Dell EMC distributed Python package -`storops `_. - -Prerequisites -~~~~~~~~~~~~~ - -+-------------------+----------------+ -| Software | Version | -+===================+================+ -| Unity OE | 4.1.X | -+-------------------+----------------+ -| OpenStack | Ocata | -+-------------------+----------------+ -| storops | 0.4.2 or newer | -+-------------------+----------------+ - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Create an image from a volume. -- Clone a volume. -- Extend a volume. -- Migrate a volume. -- Get volume statistics. -- Efficient non-disruptive volume backup. - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -.. note:: The following instructions should all be performed on Black Storage - nodes. - -#. Install `storops` from pypi: - - .. code-block:: console - - # pip install storops - - -#. Add the following content into ``/etc/cinder/cinder.conf``: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = unity - - [unity] - # Storage protocol - storage_protocol = iSCSI - # Unisphere IP - san_ip = - # Unisphere username and password - san_login = - san_password = - # Volume driver name - volume_driver = cinder.volume.drivers.dell_emc.unity.Driver - # backend's name - volume_backend_name = Storage_ISCSI_01 - - .. note:: These are minimal options for Unity driver, for more options, - see `Driver options`_. - - -.. note:: (**Optional**) If you require multipath based data access, perform - below steps on both Block Storage and Compute nodes. - - -#. Install ``sysfsutils``, ``sg3-utils`` and ``multipath-tools``: - - .. code-block:: console - - # apt-get install multipath-tools sg3-utils sysfsutils - - -#. (Required for FC driver in case `Auto-zoning Support`_ is disabled) Zone the - FC ports of Compute nodes with Unity FC target ports. - - -#. Enable Unity storage optimized multipath configuration: - - Add the following content into ``/etc/multipath.conf`` - - .. code-block:: vim - - blacklist { - # Skip the files uner /dev that are definitely not FC/iSCSI devices - # Different system may need different customization - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^hd[a-z][0-9]*" - devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]" - - # Skip LUNZ device from VNX/Unity - device { - vendor "DGC" - product "LUNZ" - } - } - - defaults { - user_friendly_names no - flush_on_last_del yes - } - - devices { - # Device attributed for EMC CLARiiON and VNX/Unity series ALUA - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy group_by_prio - path_selector "round-robin 0" - path_checker emc_clariion - features "0" - no_path_retry 12 - hardware_handler "1 alua" - prio alua - failback immediate - } - } - - -#. Restart the multipath service: - - .. code-block:: console - - # service multipath-tools restart - - -#. Enable multipath for image transfer in ``/etc/cinder/cinder.conf``. - - .. code-block:: ini - - use_multipath_for_image_xfer = True - - Restart the ``cinder-volume`` service to load the change. - -#. Enable multipath for volume attache/detach in ``/etc/nova/nova.conf``. - - .. code-block:: ini - - [libvirt] - ... - volume_use_multipath = True - ... - -#. Restart the ``nova-compute`` service. - -Driver options -~~~~~~~~~~~~~~ - -.. include:: ../../tables/cinder-dell_emc_unity.rst - -FC or iSCSI ports option ------------------------- - -Specify the list of FC or iSCSI ports to be used to perform the IO. Wild card -character is supported. -For iSCSI ports, use the following format: - -.. code-block:: ini - - unity_io_ports = spa_eth2, spb_eth2, *_eth3 - -For FC ports, use the following format: - -.. code-block:: ini - - unity_io_ports = spa_iom_0_fc0, spb_iom_0_fc0, *_iom_0_fc1 - -List the port ID with the :command:`uemcli` command: - -.. code-block:: console - - $ uemcli /net/port/eth show -output csv - ... - "spa_eth2","SP A Ethernet Port 2","spa","file, net, iscsi", ... - "spb_eth2","SP B Ethernet Port 2","spb","file, net, iscsi", ... - ... - - $ uemcli /net/port/fc show -output csv - ... - "spa_iom_0_fc0","SP A I/O Module 0 FC Port 0","spa", ... - "spb_iom_0_fc0","SP B I/O Module 0 FC Port 0","spb", ... - ... - -Live migration integration -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is suggested to have multipath configured on Compute nodes for robust data -access in VM instances live migration scenario. Once ``user_friendly_names no`` -is set in defaults section of ``/etc/multipath.conf``, Compute nodes will use -the WWID as the alias for the multipath devices. - -To enable multipath in live migration: - -.. note:: Make sure `Driver configuration`_ steps are performed before - following steps. - -#. Set multipath in ``/etc/nova/nova.conf``: - - .. code-block:: ini - - [libvirt] - ... - volume_use_multipath = True - ... - - Restart `nova-compute` service. - - -#. Set ``user_friendly_names no`` in ``/etc/multipath.conf`` - - .. code-block:: text - - ... - defaults { - user_friendly_names no - } - ... - -#. Restart the ``multipath-tools`` service. - - -Thin and thick provisioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Only thin volume provisioning is supported in Unity volume driver. - - -QoS support -~~~~~~~~~~~ - -Unity driver supports ``maxBWS`` and ``maxIOPS`` specs for the back-end -consumer type. ``maxBWS`` represents the ``Maximum IO/S`` absolute limit, -``maxIOPS`` represents the ``Maximum Bandwidth (KBPS)`` absolute limit on the -Unity respectively. - - -Auto-zoning support -~~~~~~~~~~~~~~~~~~~ - -Unity volume driver supports auto-zoning, and share the same configuration -guide for other vendors. Refer to :ref:`fc_zone_manager` -for detailed configuration steps. - -Solution for LUNZ device -~~~~~~~~~~~~~~~~~~~~~~~~ - -The EMC host team also found LUNZ on all of the hosts, EMC best practice is to -present a LUN with HLU 0 to clear any LUNZ devices as they can cause issues on -the host. See KB `LUNZ Device `_. - -To workaround this issue, Unity driver creates a `Dummy LUN` (if not present), -and adds it to each host to occupy the `HLU 0` during volume attachment. - -.. note:: This `Dummy LUN` is shared among all hosts connected to the Unity. - -Efficient non-disruptive volume backup -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The default implementation in Block Storage for non-disruptive volume backup is -not efficient since a cloned volume will be created during backup. - -An effective approach to backups is to create a snapshot for the volume and -connect this snapshot to the Block Storage host for volume backup. - -Troubleshooting -~~~~~~~~~~~~~~~ - -To troubleshoot a failure in OpenStack deployment, the best way is to -enable verbose and debug log, at the same time, leverage the build-in -`Return request ID to caller -`_ -to track specific Block Storage command logs. - - -#. Enable verbose log, set following in ``/etc/cinder/cinder.conf`` and restart - all Block Storage services: - - .. code-block:: ini - - [DEFAULT] - - ... - - debug = True - verbose = True - - ... - - - If other projects (usually Compute) are also involved, set `debug` - and ``verbose`` to ``True``. - -#. use ``--debug`` to trigger any problematic Block Storage operation: - - .. code-block:: console - - # cinder --debug create --name unity_vol1 100 - - - You will see the request ID from the console, for example: - - .. code-block:: console - - DEBUG:keystoneauth:REQ: curl -g -i -X POST - http://192.168.1.9:8776/v2/e50d22bdb5a34078a8bfe7be89324078/volumes -H - "User-Agent: python-cinderclient" -H "Content-Type: application/json" -H - "Accept: application/json" -H "X-Auth-Token: - {SHA1}bf4a85ad64302b67a39ad7c6f695a9630f39ab0e" -d '{"volume": {"status": - "creating", "user_id": null, "name": "unity_vol1", "imageRef": null, - "availability_zone": null, "description": null, "multiattach": false, - "attach_status": "detached", "volume_type": null, "metadata": {}, - "consistencygroup_id": null, "source_volid": null, "snapshot_id": null, - "project_id": null, "source_replica": null, "size": 10}}' - DEBUG:keystoneauth:RESP: [202] X-Compute-Request-Id: - req-3a459e0e-871a-49f9-9796-b63cc48b5015 Content-Type: application/json - Content-Length: 804 X-Openstack-Request-Id: - req-3a459e0e-871a-49f9-9796-b63cc48b5015 Date: Mon, 12 Dec 2016 09:31:44 GMT - Connection: keep-alive - -#. Use commands like ``grep``, ``awk`` to find the error related to the Block - Storage operations. - - .. code-block:: console - - # grep "req-3a459e0e-871a-49f9-9796-b63cc48b5015" cinder-volume.log - diff --git a/doc/source/configuration/block-storage/drivers/dell-equallogic-driver.rst b/doc/source/configuration/block-storage/drivers/dell-equallogic-driver.rst deleted file mode 100644 index 15167852c..000000000 --- a/doc/source/configuration/block-storage/drivers/dell-equallogic-driver.rst +++ /dev/null @@ -1,160 +0,0 @@ -============================= -Dell EqualLogic volume driver -============================= - -The Dell EqualLogic volume driver interacts with configured EqualLogic -arrays and supports various operations. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Clone a volume. - -Configuration -~~~~~~~~~~~~~ - -The OpenStack Block Storage service supports: - -- Multiple instances of Dell EqualLogic Groups or Dell EqualLogic Group - Storage Pools and multiple pools on a single array. - -- Multiple instances of Dell EqualLogic Groups or Dell EqualLogic Group - Storage Pools or multiple pools on a single array. - -The Dell EqualLogic volume driver's ability to access the EqualLogic Group is -dependent upon the generic block storage driver's SSH settings in the -``/etc/cinder/cinder.conf`` file (see -:ref:`block-storage-sample-configuration-file` for reference). - -.. include:: ../../tables/cinder-eqlx.rst - -Default (single-instance) configuration ---------------------------------------- - -The following sample ``/etc/cinder/cinder.conf`` configuration lists the -relevant settings for a typical Block Storage service using a single -Dell EqualLogic Group: - -.. code-block:: ini - - [DEFAULT] - # Required settings - - volume_driver = cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver - san_ip = IP_EQLX - san_login = SAN_UNAME - san_password = SAN_PW - eqlx_group_name = EQLX_GROUP - eqlx_pool = EQLX_POOL - - # Optional settings - - san_thin_provision = true|false - use_chap_auth = true|false - chap_username = EQLX_UNAME - chap_password = EQLX_PW - eqlx_cli_max_retries = 5 - san_ssh_port = 22 - ssh_conn_timeout = 30 - san_private_key = SAN_KEY_PATH - ssh_min_pool_conn = 1 - ssh_max_pool_conn = 5 - -In this example, replace the following variables accordingly: - -IP_EQLX - The IP address used to reach the Dell EqualLogic Group through SSH. - This field has no default value. - -SAN_UNAME - The user name to login to the Group manager via SSH at the - ``san_ip``. Default user name is ``grpadmin``. - -SAN_PW - The corresponding password of SAN_UNAME. Not used when - ``san_private_key`` is set. Default password is ``password``. - -EQLX_GROUP - The group to be used for a pool where the Block Storage service will - create volumes and snapshots. Default group is ``group-0``. - -EQLX_POOL - The pool where the Block Storage service will create volumes and - snapshots. Default pool is ``default``. This option cannot be used - for multiple pools utilized by the Block Storage service on a single - Dell EqualLogic Group. - -EQLX_UNAME - The CHAP login account for each volume in a pool, if - ``use_chap_auth`` is set to ``true``. Default account name is - ``chapadmin``. - -EQLX_PW - The corresponding password of EQLX_UNAME. The default password is - randomly generated in hexadecimal, so you must set this password - manually. - -SAN_KEY_PATH (optional) - The filename of the private key used for SSH authentication. This - provides password-less login to the EqualLogic Group. Not used when - ``san_password`` is set. There is no default value. - -In addition, enable thin provisioning for SAN volumes using the default -``san_thin_provision = true`` setting. - -Multiple back-end configuration -------------------------------- - -The following example shows the typical configuration for a Block -Storage service that uses two Dell EqualLogic back ends: - -.. code-block:: ini - - enabled_backends = backend1,backend2 - san_ssh_port = 22 - ssh_conn_timeout = 30 - san_thin_provision = true - - [backend1] - volume_driver = cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver - volume_backend_name = backend1 - san_ip = IP_EQLX1 - san_login = SAN_UNAME - san_password = SAN_PW - eqlx_group_name = EQLX_GROUP - eqlx_pool = EQLX_POOL - - [backend2] - volume_driver = cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver - volume_backend_name = backend2 - san_ip = IP_EQLX2 - san_login = SAN_UNAME - san_password = SAN_PW - eqlx_group_name = EQLX_GROUP - eqlx_pool = EQLX_POOL - -In this example: - -- Thin provisioning for SAN volumes is enabled - (``san_thin_provision = true``). This is recommended when setting up - Dell EqualLogic back ends. - -- Each Dell EqualLogic back-end configuration (``[backend1]`` and - ``[backend2]``) has the same required settings as a single back-end - configuration, with the addition of ``volume_backend_name``. - -- The ``san_ssh_port`` option is set to its default value, 22. This - option sets the port used for SSH. - -- The ``ssh_conn_timeout`` option is also set to its default value, 30. - This option sets the timeout in seconds for CLI commands over SSH. - -- The ``IP_EQLX1`` and ``IP_EQLX2`` refer to the IP addresses used to - reach the Dell EqualLogic Group of ``backend1`` and ``backend2`` - through SSH, respectively. - -For information on configuring multiple back ends, see `Configure a -multiple-storage back -end `__. diff --git a/doc/source/configuration/block-storage/drivers/dell-storagecenter-driver.rst b/doc/source/configuration/block-storage/drivers/dell-storagecenter-driver.rst deleted file mode 100644 index 1d8861ef3..000000000 --- a/doc/source/configuration/block-storage/drivers/dell-storagecenter-driver.rst +++ /dev/null @@ -1,361 +0,0 @@ -=================================================== -Dell Storage Center Fibre Channel and iSCSI drivers -=================================================== - -The Dell Storage Center volume driver interacts with configured Storage -Center arrays. - -The Dell Storage Center driver manages Storage Center arrays through -the Dell Storage Manager (DSM). DSM connection settings and Storage -Center options are defined in the ``cinder.conf`` file. - -Prerequisite: Dell Storage Manager 2015 R1 or later must be used. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The Dell Storage Center volume driver provides the following Cinder -volume operations: - -- Create, delete, attach (map), and detach (unmap) volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Create, delete, list and update a consistency group. -- Create, delete, and list consistency group snapshots. -- Manage an existing volume. -- Failover-host for replicated back ends. -- Create a replication using Live Volume. - -Extra spec options -~~~~~~~~~~~~~~~~~~ - -Volume type extra specs can be used to enable a variety of Dell Storage -Center options. Selecting Storage Profiles, Replay Profiles, enabling -replication, replication options including Live Volume and Active Replay -replication. - -Storage Profiles control how Storage Center manages volume data. For a -given volume, the selected Storage Profile dictates which disk tier -accepts initial writes, as well as how data progression moves data -between tiers to balance performance and cost. Predefined Storage -Profiles are the most effective way to manage data in Storage Center. - -By default, if no Storage Profile is specified in the volume extra -specs, the default Storage Profile for the user account configured for -the Block Storage driver is used. The extra spec key -``storagetype:storageprofile`` with the value of the name of the Storage -Profile on the Storage Center can be set to allow to use Storage -Profiles other than the default. - -For ease of use from the command line, spaces in Storage Profile names -are ignored. As an example, here is how to define two volume types using -the ``High Priority`` and ``Low Priority`` Storage Profiles: - -.. code-block:: console - - $ openstack volume type create "GoldVolumeType" - $ openstack volume type set --property storagetype:storageprofile=highpriority "GoldVolumeType" - $ openstack volume type create "BronzeVolumeType" - $ openstack volume type set --property storagetype:storageprofile=lowpriority "BronzeVolumeType" - -Replay Profiles control how often the Storage Center takes a replay of a -given volume and how long those replays are kept. The default profile is -the ``daily`` profile that sets the replay to occur once a day and to -persist for one week. - -The extra spec key ``storagetype:replayprofiles`` with the value of the -name of the Replay Profile or profiles on the Storage Center can be set -to allow to use Replay Profiles other than the default ``daily`` profile. - -As an example, here is how to define a volume type using the ``hourly`` -Replay Profile and another specifying both ``hourly`` and the default -``daily`` profile: - -.. code-block:: console - - $ openstack volume type create "HourlyType" - $ openstack volume type set --property storagetype:replayprofile=hourly "HourlyType" - $ openstack volume type create "HourlyAndDailyType" - $ openstack volume type set --property storagetype:replayprofiles=hourly,daily "HourlyAndDailyType" - -Note the comma separated string for the ``HourlyAndDailyType``. - -Replication for a given volume type is enabled via the extra spec -``replication_enabled``. - -To create a volume type that specifies only replication enabled back ends: - -.. code-block:: console - - $ openstack volume type create "ReplicationType" - $ openstack volume type set --property replication_enabled=' True' "ReplicationType" - -Extra specs can be used to configure replication. In addition to the Replay -Profiles above, ``replication:activereplay`` can be set to enable replication -of the volume's active replay. And the replication type can be changed to -synchronous via the ``replication_type`` extra spec can be set. - -To create a volume type that enables replication of the active replay: - -.. code-block:: console - - $ openstack volume type create "ReplicationType" - $ openstack volume type key --property replication_enabled=' True' "ReplicationType" - $ openstack volume type key --property replication:activereplay=' True' "ReplicationType" - -To create a volume type that enables synchronous replication : - -.. code-block:: console - - $ openstack volume type create "ReplicationType" - $ openstack volume type key --property replication_enabled=' True' "ReplicationType" - $ openstack volume type key --property replication_type=' sync' "ReplicationType" - -To create a volume type that enables replication using Live Volume: - -.. code-block:: console - - $ openstack volume type create "ReplicationType" - $ openstack volume type key --property replication_enabled=' True' "ReplicationType" - $ openstack volume type key --property replication:livevolume=' True' "ReplicationType" - -If QOS options are enabled on the Storage Center they can be enabled via extra -specs. The name of the Volume QOS can be specified via the -``storagetype:volumeqos`` extra spec. Likewise the name of the Group QOS to -use can be specificed via the ``storagetype:groupqos`` extra spec. Volumes -created with these extra specs set will be added to the specified QOS groups. - -To create a volume type that sets both Volume and Group QOS: - -.. code-block:: console - - $ openstack volume type create "StorageCenterQOS" - $ openstack volume type key --property 'storagetype:volumeqos'='unlimited' "StorageCenterQOS" - $ openstack volume type key --property 'storagetype:groupqos'='limited' "StorageCenterQOS" - -Data reduction profiles can be specified in the -``storagetype:datareductionprofile`` extra spec. Available options are None, -Compression, and Deduplication. Note that not all options are available on -every Storage Center. - -To create volume types that support no compression, compression, and -deduplication and compression respectively: - -.. code-block:: console - - $ openstack volume type create "NoCompressionType" - $ openstack volume type key --property 'storagetype:datareductionprofile'='None' "NoCompressionType" - $ openstack volume type create "CompressedType" - $ openstack volume type key --property 'storagetype:datareductionprofile'='Compression' "CompressedType" - $ openstack volume type create "DedupType" - $ openstack volume type key --property 'storagetype:datareductionprofile'='Deduplication' "DedupType" - -Note: The default is no compression. - -iSCSI configuration -~~~~~~~~~~~~~~~~~~~ - -Use the following instructions to update the configuration file for iSCSI: - -.. code-block:: ini - - default_volume_type = delliscsi - enabled_backends = delliscsi - - [delliscsi] - # Name to give this storage back-end - volume_backend_name = delliscsi - # The iSCSI driver to load - volume_driver = cinder.volume.drivers.dell.dell_storagecenter_iscsi.DellStorageCenterISCSIDriver - # IP address of DSM - san_ip = 172.23.8.101 - # DSM user name - san_login = Admin - # DSM password - san_password = secret - # The Storage Center serial number to use - dell_sc_ssn = 64702 - - # ==Optional settings== - - # The DSM API port - dell_sc_api_port = 3033 - # Server folder to place new server definitions - dell_sc_server_folder = devstacksrv - # Volume folder to place created volumes - dell_sc_volume_folder = devstackvol/Cinder - -Fibre Channel configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use the following instructions to update the configuration file for fibre -channel: - -.. code-block:: ini - - default_volume_type = dellfc - enabled_backends = dellfc - - [dellfc] - # Name to give this storage back-end - volume_backend_name = dellfc - # The FC driver to load - volume_driver = cinder.volume.drivers.dell.dell_storagecenter_fc.DellStorageCenterFCDriver - # IP address of the DSM - san_ip = 172.23.8.101 - # DSM user name - san_login = Admin - # DSM password - san_password = secret - # The Storage Center serial number to use - dell_sc_ssn = 64702 - - # ==Optional settings== - - # The DSM API port - dell_sc_api_port = 3033 - # Server folder to place new server definitions - dell_sc_server_folder = devstacksrv - # Volume folder to place created volumes - dell_sc_volume_folder = devstackvol/Cinder - -Dual DSM -~~~~~~~~ - -It is possible to specify a secondary DSM to use in case the primary DSM fails. - -Configuration is done through the cinder.conf. Both DSMs have to be -configured to manage the same set of Storage Centers for this backend. That -means the dell_sc_ssn and any Storage Centers used for replication or Live -Volume. - -Add network and credential information to the backend to enable Dual DSM. - -.. code-block:: ini - - [dell] - # The IP address and port of the secondary DSM. - secondary_san_ip = 192.168.0.102 - secondary_sc_api_port = 3033 - # Specify credentials for the secondary DSM. - secondary_san_login = Admin - secondary_san_password = secret - -The driver will use the primary until a failure. At that point it will attempt -to use the secondary. It will continue to use the secondary until the volume -service is restarted or the secondary fails at which point it will attempt to -use the primary. - -Replication configuration -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Add the following to the back-end specification to specify another Storage -Center to replicate to. - -.. code-block:: ini - - [dell] - replication_device = target_device_id: 65495, qosnode: cinderqos - -The ``target_device_id`` is the SSN of the remote Storage Center and the -``qosnode`` is the QoS Node setup between the two Storage Centers. - -Note that more than one ``replication_device`` line can be added. This will -slow things down, however. - -A volume is only replicated if the volume is of a volume-type that has -the extra spec ``replication_enabled`` set to `` True``. - -Replication notes -~~~~~~~~~~~~~~~~~ - -This driver supports both standard replication and Live Volume (if supported -and licensed). The main difference is that a VM attached to a Live Volume is -mapped to both Storage Centers. In the case of a failure of the primary Live -Volume still requires a failover-host to move control of the volume to the -second controller. - -Existing mappings should work and not require the instance to be remapped but -it might need to be rebooted. - -Live Volume is more resource intensive than replication. One should be sure -to plan accordingly. - -Failback -~~~~~~~~ - -The failover-host command is designed for the case where the primary system is -not coming back. If it has been executed and the primary has been restored it -is possible to attempt a failback. - -Simply specify default as the backend_id. - -.. code-block:: console - - $ cinder failover-host cinder@delliscsi --backend_id default - -Non trivial heavy lifting is done by this command. It attempts to recover best -it can but if things have diverged to far it can only do so much. It is also a -one time only command so do not reboot or restart the service in the middle of -it. - -Failover and failback are significant operations under OpenStack Cinder. Be -sure to consult with support before attempting. - -Server type configuration -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This option allows one to set a default Server OS type to use when creating -a server definition on the Dell Storage Center. - -When attaching a volume to a node the Dell Storage Center driver creates a -server definition on the storage array. This defition includes a Server OS -type. The type used by the Dell Storage Center cinder driver is -"Red Hat Linux 6.x". This is a modern operating system definition that supports -all the features of an OpenStack node. - -Add the following to the back-end specification to specify the Server OS to use -when creating a server definition. The server type used must come from the drop -down list in the DSM. - -.. code-block:: ini - - [dell] - dell_server_os = 'Red Hat Linux 7.x' - -Note that this server definition is created once. Changing this setting after -the fact will not change an existing definition. The selected Server OS does -not have to match the actual OS used on the node. - -Excluding a domain -~~~~~~~~~~~~~~~~~~ - -This option excludes a Storage Center ISCSI fault domain from the ISCSI -properties returned by the initialize_connection call. This only applies to -the ISCSI driver. - -Add the excluded_domain_ip option into the backend config for each fault domain -to be excluded. This option takes the specified Target IPv4 Address listed -under the fault domain. Older versions of DSM (EM) may list this as the Well -Known IP Address. - -Add the following to the back-end specification to exclude the domains at -172.20.25.15 and 172.20.26.15. - -.. code-block:: ini - - [dell] - excluded_domain_ip=172.20.25.15 - excluded_domain_ip=172.20.26.15 - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options specific to the -Dell Storage Center volume driver. - -.. include:: ../../tables/cinder-dellsc.rst diff --git a/doc/source/configuration/block-storage/drivers/dothill-driver.rst b/doc/source/configuration/block-storage/drivers/dothill-driver.rst deleted file mode 100644 index bb5e43709..000000000 --- a/doc/source/configuration/block-storage/drivers/dothill-driver.rst +++ /dev/null @@ -1,168 +0,0 @@ -=================================================== -Dot Hill AssuredSAN Fibre Channel and iSCSI drivers -=================================================== - -The ``DotHillFCDriver`` and ``DotHillISCSIDriver`` volume drivers allow -Dot Hill arrays to be used for block storage in OpenStack deployments. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the Dot Hill drivers, the following are required: - -- Dot Hill AssuredSAN array with: - - - iSCSI or FC host interfaces - - G22x firmware or later - - Appropriate licenses for the snapshot and copy volume features - -- Network connectivity between the OpenStack host and the array - management interfaces - -- HTTPS or HTTP must be enabled on the array - -Supported operations -~~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Migrate a volume with back-end assistance. -- Retype a volume. -- Manage and unmanage a volume. - -Configuring the array -~~~~~~~~~~~~~~~~~~~~~ - -#. Verify that the array can be managed via an HTTPS connection. HTTP can - also be used if ``dothill_api_protocol=http`` is placed into the - appropriate sections of the ``cinder.conf`` file. - - Confirm that virtual pools A and B are present if you plan to use - virtual pools for OpenStack storage. - - If you plan to use vdisks instead of virtual pools, create or identify - one or more vdisks to be used for OpenStack storage; typically this will - mean creating or setting aside one disk group for each of the A and B - controllers. - -#. Edit the ``cinder.conf`` file to define an storage back-end entry for - each storage pool on the array that will be managed by OpenStack. Each - entry consists of a unique section name, surrounded by square brackets, - followed by options specified in ``key=value`` format. - - - The ``dothill_backend_name`` value specifies the name of the storage - pool or vdisk on the array. - - - The ``volume_backend_name`` option value can be a unique value, if - you wish to be able to assign volumes to a specific storage pool on - the array, or a name that is shared among multiple storage pools to - let the volume scheduler choose where new volumes are allocated. - - - The rest of the options will be repeated for each storage pool in a - given array: the appropriate Cinder driver name; IP address or - hostname of the array management interface; the username and password - of an array user account with ``manage`` privileges; and the iSCSI IP - addresses for the array if using the iSCSI transport protocol. - - In the examples below, two back ends are defined, one for pool A and one - for pool B, and a common ``volume_backend_name`` is used so that a - single volume type definition can be used to allocate volumes from both - pools. - - - **iSCSI example back-end entries** - - .. code-block:: ini - - [pool-a] - dothill_backend_name = A - volume_backend_name = dothill-array - volume_driver = cinder.volume.drivers.dothill.dothill_iscsi.DotHillISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - dothill_iscsi_ips = 10.2.3.4,10.2.3.5 - - [pool-b] - dothill_backend_name = B - volume_backend_name = dothill-array - volume_driver = cinder.volume.drivers.dothill.dothill_iscsi.DotHillISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - dothill_iscsi_ips = 10.2.3.4,10.2.3.5 - - **Fibre Channel example back-end entries** - - .. code-block:: ini - - [pool-a] - dothill_backend_name = A - volume_backend_name = dothill-array - volume_driver = cinder.volume.drivers.dothill.dothill_fc.DotHillFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - - [pool-b] - dothill_backend_name = B - volume_backend_name = dothill-array - volume_driver = cinder.volume.drivers.dothill.dothill_fc.DotHillFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - -#. If any ``volume_backend_name`` value refers to a vdisk rather than a - virtual pool, add an additional statement - ``dothill_backend_type = linear`` to that back-end entry. - -#. If HTTPS is not enabled in the array, include - ``dothill_api_protocol = http`` in each of the back-end definitions. - -#. If HTTPS is enabled, you can enable certificate verification with the - option ``dothill_verify_certificate=True``. You may also use the - ``dothill_verify_certificate_path`` parameter to specify the path to a - CA\_BUNDLE file containing CAs other than those in the default list. - -#. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an - ``enabled_backends`` parameter specifying the back-end entries you added, - and a ``default_volume_type`` parameter specifying the name of a volume - type that you will create in the next step. - - **Example of [DEFAULT] section changes** - - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = pool-a,pool-b - default_volume_type = dothill - # ... - -#. Create a new volume type for each distinct ``volume_backend_name`` value - that you added to cinder.conf. The example below assumes that the same - ``volume_backend_name=dothill-array`` option was specified in all of the - entries, and specifies that the volume type ``dothill`` can be used to - allocate volumes from any of them. - - **Example of creating a volume type** - - .. code-block:: console - - $ openstack volume type create dothill - $ openstack volume type set --property volume_backend_name=dothill-array dothill - -#. After modifying ``cinder.conf``, restart the ``cinder-volume`` service. - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific -to the Dot Hill drivers. - -.. include:: ../../tables/cinder-dothill.rst diff --git a/doc/source/configuration/block-storage/drivers/emc-vmax-driver.rst b/doc/source/configuration/block-storage/drivers/emc-vmax-driver.rst deleted file mode 100644 index 040e01e38..000000000 --- a/doc/source/configuration/block-storage/drivers/emc-vmax-driver.rst +++ /dev/null @@ -1,1614 +0,0 @@ -================================== -Dell EMC VMAX iSCSI and FC drivers -================================== - -The Dell EMC VMAX drivers, ``VMAXISCSIDriver`` and ``VMAXFCDriver``, support -the use of Dell EMC VMAX storage arrays with Block Storage. They both provide -equivalent functions and differ only in support for their respective host -attachment methods. - -The drivers perform volume operations by communicating with the back-end VMAX -storage. It uses a CIM client in Python called ``PyWBEM`` to perform CIM -operations over HTTP. - -The EMC CIM Object Manager (ECOM) is packaged with the EMC SMI-S provider. It -is a CIM server that enables CIM clients to perform CIM operations over HTTP by -using SMI-S in the back end for VMAX storage operations. - -The Dell EMC SMI-S Provider supports the SNIA Storage Management Initiative -(SMI), an ANSI standard for storage management. It supports the VMAX storage -system. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -The Cinder driver supports the VMAX-3 series. - -For VMAX-3 series, Solutions Enabler 8.3.0.11 or later is required. This -is SSL only. Refer to section below ``SSL support``. - -When installing Solutions Enabler, make sure you explicitly add the SMI-S -component. - -You can download Solutions Enabler from the Dell EMC's support web site -(login is required). See the ``Solutions Enabler 8.3.0 Installation and -Configuration Guide`` at ``support.emc.com``. - -Ensure that there is only one SMI-S (ECOM) server active on the same VMAX -array. - - -Required VMAX software suites for OpenStack -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are five Software Suites available for the VMAX All Flash and Hybrid: - -- Base Suite -- Advanced Suite -- Local Replication Suite -- Remote Replication Suite -- Total Productivity Pack - -OpenStack requires the Advanced Suite and the Local Replication Suite -or the Total Productivity Pack (it includes the Advanced Suite and the -Local Replication Suite) for the VMAX All Flash and Hybrid. - -Each are licensed separately. For further details on how to get the -relevant license(s), reference eLicensing Support below. - - -eLicensing support -~~~~~~~~~~~~~~~~~~ - -To activate your entitlements and obtain your VMAX license files, visit the -Service Center on ``_, as directed on your License -Authorization Code (LAC) letter emailed to you. - -- For help with missing or incorrect entitlements after activation - (that is, expected functionality remains unavailable because it is not - licensed), contact your EMC account representative or authorized reseller. - -- For help with any errors applying license files through Solutions Enabler, - contact the Dell EMC Customer Support Center. - -- If you are missing a LAC letter or require further instructions on - activating your licenses through the Online Support site, contact EMC's - worldwide Licensing team at ``licensing@emc.com`` or call: - - North America, Latin America, APJK, Australia, New Zealand: SVC4EMC - (800-782-4362) and follow the voice prompts. - - EMEA: +353 (0) 21 4879862 and follow the voice prompts. - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -VMAX drivers support these operations: - -- Create, list, delete, attach, and detach volumes -- Create, list, and delete volume snapshots -- Copy an image to a volume -- Copy a volume to an image -- Clone a volume -- Extend a volume -- Retype a volume (Host and storage assisted volume migration) -- Create a volume from a snapshot -- Create and delete consistency group -- Create and delete consistency group snapshot -- Modify consistency group (add and remove volumes) -- Create consistency group from source -- Create and delete generic volume group -- Create and delete generice volume group snapshot -- Modify generic volume group (add and remove volumes) -- Create generic volume group from source - -VMAX drivers also support the following features: - -- Dynamic masking view creation -- Dynamic determination of the target iSCSI IP address -- iSCSI multipath support -- Oversubscription -- Live Migration -- Attach and detach snapshots -- Volume replication - -VMAX All Flash and Hybrid: - -- Service Level support -- SnapVX support -- All Flash support -- Compression support - -.. note:: - - VMAX All Flash array with Solutions Enabler 8.3.0.11 or later have - compression enabled by default when associated with Diamond Service Level. - This means volumes added to any newly created storage groups will be - compressed. - -Setup VMAX drivers -~~~~~~~~~~~~~~~~~~ - -.. table:: **Pywbem Versions** - - +------------+-----------------------------------+ - | Pywbem | Ubuntu14.04(LTS),Ubuntu16.04(LTS),| - | Version | Red Hat Enterprise Linux, CentOS | - | | and Fedora | - +============+=================+=================+ - | | Python2 | Python3 | - + +-------+---------+-------+---------+ - | | pip | Native | pip | Native | - +------------+-------+---------+-------+---------+ - | 0.9.0 | No | N/A | Yes | N/A | - +------------+-------+---------+-------+---------+ - | 0.8.4 | No | N/A | Yes | N/A | - +------------+-------+---------+-------+---------+ - | 0.7.0 | No | Yes | No | Yes | - +------------+-------+---------+-------+---------+ - -.. note:: - - On Python2, use the updated distro version, for example: - - .. code-block:: console - - # apt-get install python-pywbem - -.. note:: - - On Python3, use the official pywbem version (V0.9.0 or v0.8.4). - -#. Install the ``python-pywbem`` package for your distribution. - - - On Ubuntu: - - .. code-block:: console - - # apt-get install python-pywbem - - - On openSUSE: - - .. code-block:: console - - # zypper install python-pywbem - - - On Red Hat Enterprise Linux, CentOS, and Fedora: - - .. code-block:: console - - # yum install pywbem - - .. note:: - - A potential issue can exist with the ``python-pywbem`` dependency package, - especially M2crypto. To troubleshot and resolve these types of issues, - follow these steps. - - - On Ubuntu: - - .. code-block:: console - - # apt-get remove --purge -y python-m2crypto - # pip uninstall pywbem - # apt-get install python-pywbem - - - On openSUSE: - - .. code-block:: console - - # zypper remove --clean-deps python-m2crypto - # pip uninstall pywbem - # zypper install python-pywbem - - - On Red Hat Enterprise Linux, CentOS, and Fedora: - - .. code-block:: console - - # yum remove python-m2crypto - # sudo pip uninstall pywbem - # yum install pywbem - -#. Install iSCSI Utilities (for iSCSI drivers only). - - #. Download and configure the Cinder node as an iSCSI initiator. - #. Install the ``open-iscsi`` package. - - - On Ubuntu: - - .. code-block:: console - - # apt-get install open-iscsi - - - On openSUSE: - - .. code-block:: console - - # zypper install open-iscsi - - - On Red Hat Enterprise Linux, CentOS, and Fedora: - - .. code-block:: console - - # yum install scsi-target-utils.x86_64 - - #. Enable the iSCSI driver to start automatically. - -#. Download Solutions Enabler from ``support.emc.com`` and install it. - Make sure you install the SMIS component. A [Y]es response installs the - ``SMISPROVIDER`` component. - - .. code-block:: console - - Install EMC Solutions Enabler SMIS Component ? [N]:Y - - You can install Solutions Enabler on a non-OpenStack host. Supported - platforms include different flavors of Windows, Red Hat, and SUSE Linux. - Solutions Enabler can be installed on a physical server or a VM hosted by - an ESX server. Note that the supported hypervisor for a VM running - Solutions Enabler is ESX only. See the ``Solutions Enabler 8.3.0 - Installation and Configuration Guide`` on ``support.emc.com`` for more - details. - - .. note:: - - You must discover storage arrays on the ECOM before you can use - the VMAX drivers. Follow instructions in ``Solutions Enabler 8.3.0 - Installation and Configuration Guide`` on ``support.emc.com`` for more - details. - - The ECOM server is usually installed at ``/opt/emc/ECIM/ECOM/bin`` on Linux - and ``C:\Program Files\EMC\ECIM\ECOM\bin`` on Windows. After you install and - configure the ECOM, go to that directory and type ``TestSmiProvider.exe`` - for windows and ``./TestSmiProvider`` for linux - - Use ``addsys`` in ``TestSmiProvider`` to add an array. Use ``dv`` and examine - the output after the array is added. In advance of ``TestSmiProvider``, - arrays need to be discovered on the Solutions Enabler by using the - :command:`symcfg discover` command. Make sure that the arrays are recognized by the - SMI-S server before using the EMC VMAX drivers. - -#. Configure Block Storage - - Add the following entries to ``/etc/cinder/cinder.conf``: - - .. code-block:: ini - - enabled_backends = CONF_GROUP_ISCSI, CONF_GROUP_FC - - [CONF_GROUP_ISCSI] - volume_driver = cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver - cinder_emc_config_file = /etc/cinder/cinder_emc_config_CONF_GROUP_ISCSI.xml - volume_backend_name = ISCSI_backend - - [CONF_GROUP_FC] - volume_driver = cinder.volume.drivers.dell_emc.vmax.fc.EMCVMAXFCDriver - cinder_emc_config_file = /etc/cinder/cinder_emc_config_CONF_GROUP_FC.xml - volume_backend_name = FC_backend - - In this example, two back-end configuration groups are enabled: - ``CONF_GROUP_ISCSI`` and ``CONF_GROUP_FC``. Each configuration group has a - section describing unique parameters for connections, drivers, the - ``volume_backend_name``, and the name of the EMC-specific configuration file - containing additional settings. Note that the file name is in the format - ``/etc/cinder/cinder_emc_config_[confGroup].xml``. - - Once the ``cinder.conf`` and EMC-specific configuration files have been - created, :command:`openstack` commands need to be issued in order to create and - associate OpenStack volume types with the declared ``volume_backend_names``: - - .. code-block:: console - - $ openstack volume type create VMAX_ISCSI - $ openstack volume type set --property volume_backend_name=ISCSI_backend VMAX_ISCSI - $ openstack volume type create VMAX_FC - $ openstack volume type set --property volume_backend_name=FC_backend VMAX_FC - - By issuing these commands, the Block Storage volume type ``VMAX_ISCSI`` is - associated with the ``ISCSI_backend``, and the type ``VMAX_FC`` is - associated with the ``FC_backend``. - - - Create the ``/etc/cinder/cinder_emc_config_CONF_GROUP_ISCSI.xml`` file. - You do not need to restart the service for this change. - - Add the following lines to the XML file: - - VMAX All Flash and Hybrid - .. code-block:: xml - - - - 1.1.1.1 - 00 - user1 - password1 - - OS-PORTGROUP1-PG - OS-PORTGROUP2-PG - - 111111111111 - SRP_1 - Diamond - OLTP - - - Where: - -.. note:: - - VMAX Hybrid supports Optimized, Diamond, Platinum, Gold, Silver, Bronze, and - NONE service levels. VMAX All Flash supports Diamond and NONE. Both - support DSS_REP, DSS, OLTP_REP, OLTP, and NONE workloads. - -``EcomServerIp`` - IP address of the ECOM server which is packaged with SMI-S. - -``EcomServerPort`` - Port number of the ECOM server which is packaged with SMI-S. - -``EcomUserName`` and ``EcomPassword`` - Credentials for the ECOM server. - -``PortGroups`` - Supplies the names of VMAX port groups that have been pre-configured to - expose volumes managed by this backend. Each supplied port group should - have sufficient number and distribution of ports (across directors and - switches) as to ensure adequate bandwidth and failure protection for the - volume connections. PortGroups can contain one or more port groups of - either iSCSI or FC ports. When a dynamic masking view is created by the - VMAX driver, the port group is chosen randomly from the PortGroup list, to - evenly distribute load across the set of groups provided. Make sure that - the PortGroups set contains either all FC or all iSCSI port groups (for a - given back end), as appropriate for the configured driver (iSCSI or FC). - -``Array`` - Unique VMAX array serial number. - -``Pool`` - Unique pool name within a given array. For back ends not using FAST - automated tiering, the pool is a single pool that has been created by the - administrator. For back ends exposing FAST policy automated tiering, the - pool is the bind pool to be used with the FAST policy. - -``ServiceLevel`` - VMAX All Flash and Hybrid only. The Service Level manages the underlying - storage to provide expected performance. Omitting the ``ServiceLevel`` - tag means that non FAST storage groups will be created instead - (storage groups not associated with any service level). - -``Workload`` - VMAX All Flash and Hybrid only. When a workload type is added, the latency - range is reduced due to the added information. Omitting the ``Workload`` - tag means the latency range will be the widest for its SLO type. - -FC Zoning with VMAX -~~~~~~~~~~~~~~~~~~~ - -Zone Manager is required when there is a fabric between the host and array. -This is necessary for larger configurations where pre-zoning would be too -complex and open-zoning would raise security concerns. - -iSCSI with VMAX -~~~~~~~~~~~~~~~ - -- Make sure the ``iscsi-initiator-utils`` package is installed on all Compute - nodes. - -.. note:: - - You can only ping the VMAX iSCSI target ports when there is a valid masking - view. An attach operation creates this masking view. - -VMAX masking view and group naming info -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Masking view names ------------------- - -Masking views are dynamically created by the VMAX FC and iSCSI drivers using -the following naming conventions. ``[protocol]`` is either ``I`` for volumes -attached over iSCSI or ``F`` for volumes attached over Fiber Channel. - -VMAX All Flash and Hybrid - -.. code-block:: text - - OS-[shortHostName]-[SRP]-[SLO]-[workload]-[protocol]-MV - -Initiator group names ---------------------- - -For each host that is attached to VMAX volumes using the drivers, an initiator -group is created or re-used (per attachment type). All initiators of the -appropriate type known for that host are included in the group. At each new -attach volume operation, the VMAX driver retrieves the initiators (either WWNNs -or IQNs) from OpenStack and adds or updates the contents of the Initiator Group -as required. Names are of the following format. ``[protocol]`` is either ``I`` -for volumes attached over iSCSI or ``F`` for volumes attached over Fiber -Channel. - -.. code-block:: text - - OS-[shortHostName]-[protocol]-IG - -.. note:: - - Hosts attaching to OpenStack managed VMAX storage cannot also attach to - storage on the same VMAX that are not managed by OpenStack. - -FA port groups --------------- - -VMAX array FA ports to be used in a new masking view are chosen from the list -provided in the EMC configuration file. - -Storage group names -------------------- - -As volumes are attached to a host, they are either added to an existing storage -group (if it exists) or a new storage group is created and the volume is then -added. Storage groups contain volumes created from a pool (either single-pool -or FAST-controlled), attached to a single host, over a single connection type -(iSCSI or FC). ``[protocol]`` is either ``I`` for volumes attached over iSCSI -or ``F`` for volumes attached over Fiber Channel. - -VMAX All Flash and Hybrid - -.. code-block:: text - - OS-[shortHostName]-[SRP]-[SLO]-[Workload]-[protocol]-SG - - -Interval and Retries --------------------- - -By default, ``Interval`` and ``Retries`` are ``10`` seconds and ``60`` -retries respectively. These determine how long (``Interval``) and how many -times (``Retries``) a user is willing to wait for a single SMIS call, -``10*60=300seconds``. Depending on usage, these may need to be overriden by -the user in the XML file. For example, if performance is a factor, then the -``Interval`` should be decreased to check the job status more frequently, -and if multiple concurrent provisioning requests are issued then ``Retries`` -should be increased so calls will not timeout prematurely. - -In the example below, the driver checks every 5 seconds for the status of the -job. It will continue checking for 120 retries before it times out. - -Add the following lines to the XML file: - - VMAX All Flash and Hybrid - - .. code-block:: xml - - - - 1.1.1.1 - 00 - user1 - password1 - - OS-PORTGROUP1-PG - OS-PORTGROUP2-PG - - 111111111111 - SRP_1 - 5 - 120 - - -SSL support -~~~~~~~~~~~ - -.. note:: - The ECOM component in Solutions Enabler enforces SSL in 8.3.0.1 or later. - By default, this port is 5989. - -#. Get the CA certificate of the ECOM server. This pulls the CA cert file and - saves it as .pem file. The ECOM server IP address or hostname is ``my_ecom_host``. - The sample name of the .pem file is ``ca_cert.pem``: - - .. code-block:: console - - # openssl s_client -showcerts -connect my_ecom_host:5989 /dev/null|openssl x509 -outform PEM >ca_cert.pem - -#. Copy the pem file to the system certificate directory: - - .. code-block:: console - - # cp ca_cert.pem /usr/share/ca-certificates/ca_cert.crt - -#. Update CA certificate database with the following commands: - - .. code-block:: console - - # sudo dpkg-reconfigure ca-certificates - - .. note:: - Check that the new ``ca_cert.crt`` will activate by selecting - :guilabel:`ask` on the dialog. If it is not enabled for activation, use the - down and up keys to select, and the space key to enable or disable. - - .. code-block:: console - - # sudo update-ca-certificates - -#. Update :file:`/etc/cinder/cinder.conf` to reflect SSL functionality by - adding the following to the back end block. ``my_location`` is the location - of the .pem file generated in step one: - - .. code-block:: ini - - driver_ssl_cert_verify = False - driver_use_ssl = True - - If you skip steps two and three, you must add the location of you .pem file. - - .. code-block:: ini - - driver_ssl_cert_verify = False - driver_use_ssl = True - driver_ssl_cert_path = /my_location/ca_cert.pem - -#. Update EcomServerIp to ECOM host name and EcomServerPort to secure port - (5989 by default) in :file:`/etc/cinder/cinder_emc_config_.xml`. - - -Oversubscription support -~~~~~~~~~~~~~~~~~~~~~~~~ - -Oversubscription support requires the ``/etc/cinder/cinder.conf`` to be -updated with two additional tags ``max_over_subscription_ratio`` and -``reserved_percentage``. In the sample below, the value of 2.0 for -``max_over_subscription_ratio`` means that the pools in oversubscribed by a -factor of 2, or 200% oversubscribed. The ``reserved_percentage`` is the high -water mark where by the physical remaining space cannot be exceeded. -For example, if there is only 4% of physical space left and the reserve -percentage is 5, the free space will equate to zero. This is a safety -mechanism to prevent a scenario where a provisioning request fails due to -insufficient raw space. - -The parameter ``max_over_subscription_ratio`` and ``reserved_percentage`` are -optional. - -To set these parameter go to the configuration group of the volume type in -:file:`/etc/cinder/cinder.conf`. - -.. code-block:: ini - - [VMAX_ISCSI_SILVER] - cinder_emc_config_file = /etc/cinder/cinder_emc_config_VMAX_ISCSI_SILVER.xml - volume_driver = cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver - volume_backend_name = VMAX_ISCSI_SILVER - max_over_subscription_ratio = 2.0 - reserved_percentage = 10 - -For the second iteration of over subscription, take into account the -EMCMaxSubscriptionPercent property on the pool. This value is the highest -that a pool can be oversubscribed. - -Scenario 1 ----------- - -``EMCMaxSubscriptionPercent`` is 200 and the user defined -``max_over_subscription_ratio`` is 2.5, the latter is ignored. -Oversubscription is 200%. - -Scenario 2 ----------- - -``EMCMaxSubscriptionPercent`` is 200 and the user defined -``max_over_subscription_ratio`` is 1.5, 1.5 equates to 150% and is less than -the value set on the pool. Oversubscription is 150%. - -Scenario 3 ----------- - -``EMCMaxSubscriptionPercent`` is 0. This means there is no upper limit on the -pool. The user defined ``max_over_subscription_ratio`` is 1.5. -Oversubscription is 150%. - -Scenario 4 ----------- - -``EMCMaxSubscriptionPercent`` is 0. ``max_over_subscription_ratio`` is not -set by the user. We recommend to default to upper limit, this is 150%. - -.. note:: - If FAST is set and multiple pools are associated with a FAST policy, - then the same rules apply. The difference is, the TotalManagedSpace and - EMCSubscribedCapacity for each pool associated with the FAST policy are - aggregated. - -Scenario 5 ----------- - -``EMCMaxSubscriptionPercent`` is 200 on one pool. It is 300 on another pool. -The user defined ``max_over_subscription_ratio`` is 2.5. Oversubscription is -200% on the first pool and 250% on the other. - -QoS (Quality of Service) support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Quality of service(QoS) has traditionally been associated with network -bandwidth usage. Network administrators set limitations on certain networks -in terms of bandwidth usage for clients. This enables them to provide a -tiered level of service based on cost. The cinder QoS offers similar -functionality based on volume type setting limits on host storage bandwidth -per service offering. Each volume type is tied to specific QoS attributes -that are unique to each storage vendor. The VMAX plugin offers limits via -the following attributes: - -- By I/O limit per second (IOPS) -- By limiting throughput per second (MB/S) -- Dynamic distribution -- The VMAX offers modification of QoS at the Storage Group level - -USE CASE 1 - Default values ---------------------------- - -Prerequisites - VMAX - -- Host I/O Limit (MB/Sec) - No Limit -- Host I/O Limit (IO/Sec) - No Limit -- Set Dynamic Distribution - N/A - -.. table:: **Prerequisites - Block Storage (cinder) back end (storage group)** - - +-------------------+--------+ - | Key | Value | - +===================+========+ - | maxIOPS | 4000 | - +-------------------+--------+ - | maxMBPS | 4000 | - +-------------------+--------+ - | DistributionType | Always | - +-------------------+--------+ - -#. Create QoS Specs with the prerequisite values above: - - .. code-block:: console - - $ openstack volume qos create --property maxIOPS=4000 maxMBPS=4000 DistributionType=Always SILVER - -#. Associate QoS specs with specified volume type: - - .. code-block:: console - - $ openstack volume qos associate SILVER VOLUME_TYPE - -#. Create volume with the volume type indicated above: - - .. code-block:: console - - $ openstack volume create --size 1 --type VOLUME_TYPE TEST_VOLUME - -**Outcome - VMAX (storage group)** - -- Host I/O Limit (MB/Sec) - 4000 -- Host I/O Limit (IO/Sec) - 4000 -- Set Dynamic Distribution - Always - -**Outcome - Block Storage (cinder)** - -Volume is created against volume type and QoS is enforced with the parameters -above. - -USE CASE 2 - Preset limits --------------------------- - -Prerequisites - VMAX - -- Host I/O Limit (MB/Sec) - 2000 -- Host I/O Limit (IO/Sec) - 2000 -- Set Dynamic Distribution - Never - -.. table:: **Prerequisites - Block Storage (cinder) back end (storage group)** - - +-------------------+--------+ - | Key | Value | - +===================+========+ - | maxIOPS | 4000 | - +-------------------+--------+ - | maxMBPS | 4000 | - +-------------------+--------+ - | DistributionType | Always | - +-------------------+--------+ - -#. Create QoS specifications with the prerequisite values above: - - .. code-block:: console - - $ openstack volume qos create --property maxIOPS=4000 maxMBPS=4000 DistributionType=Always SILVER - -#. Associate QoS specifications with specified volume type: - - .. code-block:: console - - $ openstack volume qos associate SILVER VOLUME_TYPE - -#. Create volume with the volume type indicated above: - - .. code-block:: console - - $ openstack volume create --size 1 --type VOLUME_TYPE TEST_VOLUME - -**Outcome - VMAX (storage group)** - -- Host I/O Limit (MB/Sec) - 4000 -- Host I/O Limit (IO/Sec) - 4000 -- Set Dynamic Distribution - Always - -**Outcome - Block Storage (cinder)** - -Volume is created against volume type and QoS is enforced with the parameters -above. - - -USE CASE 3 - Preset limits --------------------------- - -Prerequisites - VMAX - -- Host I/O Limit (MB/Sec) - No Limit -- Host I/O Limit (IO/Sec) - No Limit -- Set Dynamic Distribution - N/A - -.. table:: **Prerequisites - Block Storage (cinder) back end (storage group)** - - +-------------------+--------+ - | Key | Value | - +===================+========+ - | DistributionType | Always | - +-------------------+--------+ - -#. Create QoS specifications with the prerequisite values above: - - .. code-block:: console - - $ openstack volume qos create --property DistributionType=Always SILVER - -#. Associate QoS specifications with specified volume type: - - .. code-block:: console - - $ openstack volume qos associate SILVER VOLUME_TYPE - -#. Create volume with the volume type indicated above: - - .. code-block:: console - - $ openstack volume create --size 1 --type VOLUME_TYPE TEST_VOLUME - -**Outcome - VMAX (storage group)** - -- Host I/O Limit (MB/Sec) - No Limit -- Host I/O Limit (IO/Sec) - No Limit -- Set Dynamic Distribution - N/A - -**Outcome - Block Storage (cinder)** - -Volume is created against volume type and there is no QoS change. - -USE CASE 4 - Preset limits --------------------------- - -Prerequisites - VMAX - -- Host I/O Limit (MB/Sec) - No Limit -- Host I/O Limit (IO/Sec) - No Limit -- Set Dynamic Distribution - N/A - -.. table:: **Prerequisites - Block Storage (cinder) back end (storage group)** - - +-------------------+-----------+ - | Key | Value | - +===================+===========+ - | DistributionType | OnFailure | - +-------------------+-----------+ - -#. Create QoS specifications with the prerequisite values above: - - .. code-block:: console - - $ openstack volume qos create --property DistributionType=OnFailure SILVER - -#. Associate QoS specifications with specified volume type: - - .. code-block:: console - - $ openstack volume qos associate SILVER VOLUME_TYPE - - -#. Create volume with the volume type indicated above: - - .. code-block:: console - - $ openstack volume create --size 1 --type VOLUME_TYPE TEST_VOLUME - -**Outcome - VMAX (storage group)** - -- Host I/O Limit (MB/Sec) - No Limit -- Host I/O Limit (IO/Sec) - No Limit -- Set Dynamic Distribution - N/A - -**Outcome - Block Storage (cinder)** - -Volume is created against volume type and there is no QoS change. - -iSCSI multipathing support -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Install open-iscsi on all nodes on your system -- Do not install EMC PowerPath as they cannot co-exist with native multipath - software -- Multipath tools must be installed on all nova compute nodes - -On Ubuntu: - -.. code-block:: console - - # apt-get install open-iscsi #ensure iSCSI is installed - # apt-get install multipath-tools #multipath modules - # apt-get install sysfsutils sg3-utils #file system utilities - # apt-get install scsitools #SCSI tools - -On openSUSE and SUSE Linux Enterprise Server: - -.. code-block:: console - - # zipper install open-iscsi #ensure iSCSI is installed - # zipper install multipath-tools #multipath modules - # zipper install sysfsutils sg3-utils #file system utilities - # zipper install scsitools #SCSI tools - -On Red Hat Enterprise Linux and CentOS: - -.. code-block:: console - - # yum install iscsi-initiator-utils #ensure iSCSI is installed - # yum install device-mapper-multipath #multipath modules - # yum install sysfsutils sg3-utils #file system utilities - # yum install scsitools #SCSI tools - - -Multipath configuration file ----------------------------- - -The multipath configuration file may be edited for better management and -performance. Log in as a privileged user and make the following changes to -:file:`/etc/multipath.conf` on the Compute (nova) node(s). - -.. code-block:: vim - - devices { - # Device attributed for EMC VMAX - device { - vendor "EMC" - product "SYMMETRIX" - path_grouping_policy multibus - getuid_callout "/lib/udev/scsi_id --page=pre-spc3-83 --whitelisted --device=/dev/%n" - path_selector "round-robin 0" - path_checker tur - features "0" - hardware_handler "0" - prio const - rr_weight uniform - no_path_retry 6 - rr_min_io 1000 - rr_min_io_rq 1 - } - } - -You may need to reboot the host after installing the MPIO tools or restart -iSCSI and multipath services. - -On Ubuntu: - -.. code-block:: console - - # service open-iscsi restart - # service multipath-tools restart - -On openSUSE, SUSE Linux Enterprise Server, Red Hat Enterprise Linux, and -CentOS: - -.. code-block:: console - - # systemctl restart open-iscsi - # systemctl restart multipath-tools - -.. code-block:: console - - $ lsblk - NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT - sda 8:0 0 1G 0 disk - ..360000970000196701868533030303235 (dm-6) 252:6 0 1G 0 mpath - sdb 8:16 0 1G 0 disk - ..360000970000196701868533030303235 (dm-6) 252:6 0 1G 0 mpath - vda 253:0 0 1T 0 disk - -OpenStack configurations ------------------------- - -On Compute (nova) node, add the following flag in the ``[libvirt]`` section of -:file:`/etc/nova/nova.conf`: - -.. code-block:: ini - - iscsi_use_multipath = True - -On cinder controller node, set the multipath flag to true in -:file:`/etc/cinder/cinder.conf`: - -.. code-block:: ini - - use_multipath_for_image_xfer = True - -Restart ``nova-compute`` and ``cinder-volume`` services after the change. - -Verify you have multiple initiators available on the compute node for I/O -------------------------------------------------------------------------- - -#. Create a 3GB VMAX volume. -#. Create an instance from image out of native LVM storage or from VMAX - storage, for example, from a bootable volume -#. Attach the 3GB volume to the new instance: - - .. code-block:: console - - $ multipath -ll - mpath102 (360000970000196700531533030383039) dm-3 EMC,SYMMETRIX - size=3G features='1 queue_if_no_path' hwhandler='0' wp=rw - '-+- policy='round-robin 0' prio=1 status=active - 33:0:0:1 sdb 8:16 active ready running - '- 34:0:0:1 sdc 8:32 active ready running - -#. Use the ``lsblk`` command to see the multipath device: - - .. code-block:: console - - $ lsblk - NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT - sdb 8:0 0 3G 0 disk - ..360000970000196700531533030383039 (dm-6) 252:6 0 3G 0 mpath - sdc 8:16 0 3G 0 disk - ..360000970000196700531533030383039 (dm-6) 252:6 0 3G 0 mpath - vda - -Consistency group support -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Consistency Groups operations are performed through the CLI using v2 of -the cinder API. - -:file:`/etc/cinder/policy.json` may need to be updated to enable new API calls -for Consistency groups. - -.. note:: - Even though the terminology is 'Consistency Group' in OpenStack, a Storage - Group is created on the VMAX, and should not be confused with a VMAX - Consistency Group which is an SRDF feature. The Storage Group is not - associated with any Service Level. - -Operations ----------- - -* Create a Consistency Group: - - .. code-block:: console - - cinder --os-volume-api-version 2 consisgroup-create [--name ] - [--description ] [--availability-zone ] - - - .. code-block:: console - - $ cinder --os-volume-api-version 2 consisgroup-create --name bronzeCG2 volume_type_1 - -* List Consistency Groups: - - .. code-block:: console - - cinder consisgroup-list [--all-tenants [<0|1>]] - - .. code-block:: console - - $ cinder consisgroup-list - -* Show a Consistency Group: - - .. code-block:: console - - cinder consisgroup-show - - .. code-block:: console - - $ cinder consisgroup-show 38a604b7-06eb-4202-8651-dbf2610a0827 - -* Update a consistency Group: - - .. code-block:: console - - cinder consisgroup-update [--name ] [--description ] - [--add-volumes ] [--remove-volumes ] - - - Change name: - - .. code-block:: console - - $ cinder consisgroup-update --name updated_name 38a604b7-06eb-4202-8651-dbf2610a0827 - - Add volume(s) to a Consistency Group: - - .. code-block:: console - - $ cinder consisgroup-update --add-volumes af1ae89b-564b-4c7f-92d9-c54a2243a5fe 38a604b7-06eb-4202-8651-dbf2610a0827 - - Delete volume(s) from a Consistency Group: - - .. code-block:: console - - $ cinder consisgroup-update --remove-volumes af1ae89b-564b-4c7f-92d9-c54a2243a5fe 38a604b7-06eb-4202-8651-dbf2610a0827 - -* Create a snapshot of a Consistency Group: - - .. code-block:: console - - cinder cgsnapshot-create [--name ] [--description ] - - - .. code-block:: console - - $ cinder cgsnapshot-create 618d962d-2917-4cca-a3ee-9699373e6625 - -* Delete a snapshot of a Consistency Group: - - .. code-block:: console - - cinder cgsnapshot-delete [ ...] - - .. code-block:: console - - $ cinder cgsnapshot-delete 618d962d-2917-4cca-a3ee-9699373e6625 - -* Delete a Consistency Group: - - .. code-block:: console - - cinder consisgroup-delete [--force] [ ...] - - .. code-block:: console - - $ cinder consisgroup-delete --force 618d962d-2917-4cca-a3ee-9699373e6625 - -* Create a Consistency group from source: - - .. code-block:: console - - cinder consisgroup-create-from-src [--cgsnapshot ] - [--source-cg ] [--name ] [--description ] - - .. code-block:: console - - $ cinder consisgroup-create-from-src --source-cg 25dae184-1f25-412b-b8d7-9a25698fdb6d - - .. code-block:: console - - $ cinder consisgroup-create-from-src --cgsnapshot 618d962d-2917-4cca-a3ee-9699373e6625 - -* You can also create a volume in a consistency group in one step: - - .. code-block:: console - - $ openstack volume create [--consistency-group consistency-group>] - [--description ] [--type ] - [--availability-zone ] [--size ] - - .. code-block:: console - - $ openstack volume create --type volume_type_1 ----consistency-group \ - 1de80c27-3b2f-47a6-91a7-e867cbe36462 --size 1 cgBronzeVol - - -Workload Planner (WLP) -~~~~~~~~~~~~~~~~~~~~~~ - -VMAX Hybrid allows you to manage application storage by using Service Level -Objectives (SLO) using policy based automation rather than the tiering in the -VMAX2. The VMAX Hybrid comes with up to 6 SLO policies defined. Each has a -set of workload characteristics that determine the drive types and mixes -which will be used for the SLO. All storage in the VMAX Array is virtually -provisioned, and all of the pools are created in containers called Storage -Resource Pools (SRP). Typically there is only one SRP, however there can be -more. Therefore, it is the same pool we will provision to but we can provide -different SLO/Workload combinations. - -The SLO capacity is retrieved by interfacing with Unisphere Workload Planner -(WLP). If you do not set up this relationship then the capacity retrieved is -that of the entire SRP. This can cause issues as it can never be an accurate -representation of what storage is available for any given SLO and Workload -combination. - -Enabling WLP on Unisphere -------------------------- - -#. To enable WLP on Unisphere, click on the - :menuselection:`array-->Performance-->Settings`. -#. Set both the :guilabel:`Real Time` and the :guilabel:`Root Cause Analysis`. -#. Click :guilabel:`Register`. - -.. note:: - - This should be set up ahead of time (allowing for several hours of data - collection), so that the Unisphere for VMAX Performance Analyzer can - collect rated metrics for each of the supported element types. - -Using TestSmiProvider to add statistics access point ----------------------------------------------------- - -After enabling WLP you must then enable SMI-S to gain access to the WLP data: - -#. Connect to the SMI-S Provider using TestSmiProvider. -#. Navigate to the :guilabel:`Active` menu. -#. Type ``reg`` and enter the noted responses to the questions: - - .. code-block:: console - - (EMCProvider:5989) ? reg - Current list of statistics Access Points: ? - Note: The current list will be empty if there are no existing Access Points. - Add Statistics Access Point {y|n} [n]: y - HostID [l2se0060.lss.emc.com]: ? - Note: Enter the Unisphere for VMAX location using a fully qualified Host ID. - Port [8443]: ? - Note: The Port default is the Unisphere for VMAX default secure port. If the secure port - is different for your Unisphere for VMAX setup, adjust this value accordingly. - User [smc]: ? - Note: Enter the Unisphere for VMAX username. - Password [smc]: ? - Note: Enter the Unisphere for VMAX password. - -#. Type ``reg`` again to view the current list: - - .. code-block:: console - - (EMCProvider:5988) ? reg - Current list of statistics Access Points: - HostIDs: - l2se0060.lss.emc.com - PortNumbers: - 8443 - Users: - smc - Add Statistics Access Point {y|n} [n]: n - - -Attach and detach snapshots -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``Attach snapshot`` and ``Detach snapshot`` are used internally by -non-disruptive backup and backup snapshot. As of the Newton release, -it is possible to back up a volume, but not possible to directly back up -a snapshot. Volume back up functionality has been available ever since backups -were introduced into the Cinder service. The ability to back up a volume -directly is valuable because you can back up a volume in one step. Users can -take snapshots from the volumes as a way to protect their data. These snapshots -reside on the storage backend itself. Providing a way -to backup snapshots directly allows users to protect the snapshots -taken from the volumes on a backup device, separately from the storage -backend. - -There are users who have taken many snapshots and would like a way to protect -these snapshots. The functionality to backup snapshots provides another layer -of data protection. - -Please refer to `backup and restore volumes and -snapshots ` -for more more information. - -Enable attach and detach snapshot functionality ------------------------------------------------ - -#. Ensure that the ``cinder-backup`` service is running. -#. The backup driver for the swift back end performs a volume backup to an - object storage system. To enable the swift backup driver, include the - following option in the ``cinder.conf`` file: - - .. code-block:: yaml - - backup_driver = cinder.backup.drivers.swift - -#. In order to force the volume to run attach and detach on the snapshot - and not the volume you need to put the following key-value pair in the - ``[DEFAULT]`` section of the ``cinder.conf``: - - .. code-block:: console - - backup_use_same_host = True - -.. note:: - - You may need to increase the message queue timeout value which is 60 by - default in the ``[DEFAULT]`` section of the ``cinder.conf``. This is - necessary because the snapshot may take more than this time. - - .. code-block:: console - - rpc_response_timeout = 240 - -Use case 1 - Create a volume backup when the volume is in-use -------------------------------------------------------------- - -#. Create a bootable volume and launch it so the volume status is in-use. -#. Create a backup of the volume, where ``VOLUME`` - is the volume name or volume ``ID``. This will initiate a snapshot attach - and a snapshot detach on a temporary snapshot: - - .. code-block:: console - - openstack backup create --force VOLUME - -#. For example: - - .. code-block:: console - - openstack backup create --force cba1ca83-b857-421a-87c3-df81eb9ea8ab - -Use case 2 - Restore a backup of a volume ------------------------------------------ - -#. Restore the backup from Use case 1, where ``BACKUP_ID`` is the identifier of - the backup from Use case 1. - - .. code-block:: console - - openstack backup restore BACKUP_ID - -#. For example: - - .. code-block:: console - - openstack backup restore ec7e17ec-ae3c-4495-9ee6-7f45c9a89572 - -Once complete, launch the back up as an instance, and it should be a -bootable volume. - -Use case 3 - Create a backup of a snapshot ------------------------------------------- - -#. Create a volume. -#. Create a snapshot of the volume. -#. Create a backup of the snapshot, where ``VOLUME`` is the volume name or - volume ID, ``SNAPSHOT_ID`` is the ID of the volume's snapshot. This will - initiate a snapshot attach and a snapshot detach on the snapshot. - - .. code-block:: console - - openstack backup create [--snapshot SNAPSHOT_ID} VOLUME - -#. For example: - - .. code-block:: console - - openstack backup create --snapshot 6ab440c2-80ef-4f16-ac37-2d9db938732c 9fedfc4a-5f25-4fa1-8d8d-d5bec91f72e0 - -Use case 4 - Restore backup of a snapshot ------------------------------------------ - -#. Restore the backup where ``BACKUP_ID`` is the identifier of the backup from - Use case 3. - - .. code-block:: console - - openstack backup restore BACKUP_ID - -#. For example: - - .. code-block:: console - - openstack backup restore ec7e17ec-ae3c-4495-9ee6-7f45c9a89572 - - -All Flash compression support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -On an All Flash array, the creation of any storage group has a compressed -attribute by default. Setting compression on a storage group does not mean -that all the devices will be immediately compressed. It means that for all -incoming writes compression will be considered. Setting compression ``off`` on -a storage group does not mean that all the devices will be uncompressed. -It means all the writes to compressed tracks will make these tracks -uncompressed. - -.. note:: - - This feature is only applicable for All Flash arrays, 250F, 450F or 850F. - -Use case 1 - Compression disabled create, attach, detach, and delete volume ---------------------------------------------------------------------------- - -#. Create a new volume type called ``VMAX_COMPRESSION_DISABLED``. -#. Set an extra spec ``volume_backend_name``. -#. Set a new extra spec ``storagetype:disablecompression = True``. -#. Create a new volume. -#. Check in Unisphere or symcli to see if the volume - exists in storage group ``OS----CD-SG``, and - compression is disabled on that storage group. -#. Attach the volume to an instance. Check in Unisphere or symcli to see if the - volume exists in storage group - ``OS-----CD-SG``, and - compression is disabled on that storage group. -#. Detach volume from instance. Check in Unisphere or symcli to see if the - volume exists in storage group ``OS----CD-SG``, - and compression is disabled on that storage group. -#. Delete the volume. If this was the last volume in the - ``OS----CD-SG`` storage group, - it should also be deleted. - - -Use case 2 - Compression disabled create, delete snapshot and delete volume ---------------------------------------------------------------------------- - -#. Repeat steps 1-5 of Use case 1. -#. Create a snapshot. The volume should now exist in - ``OS----CD-SG``. -#. Delete the snapshot. The volume should be removed from - ``OS----CD-SG``. -#. Delete the volume. If this volume is the last volume in - ``OS----CD-SG``, it should also be deleted. - -Use case 3 - Retype from compression disabled to compression enabled --------------------------------------------------------------------- - -#. Repeat steps 1-4 of Use case 1. -#. Create a new volume type. For example ``VMAX_COMPRESSION_ENABLED``. -#. Set extra spec ``volume_backend_name`` as before. -#. Set the new extra spec's compression as - ``storagetype:disablecompression = False`` or DO NOT set this extra spec. -#. Retype from volume type ``VMAX_COMPRESSION_DISABLED`` to - ``VMAX_COMPRESSION_ENABLED``. -#. Check in Unisphere or symcli to see if the volume exists in storage group - ``OS----SG``, and compression is enabled on - that storage group. - -.. note:: - If extra spec ``storagetype:disablecompression`` is set on a hybrid, it is - ignored because compression is not a feature on a VMAX3 hybrid. - - -Volume replication support -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Configure the source and target arrays --------------------------------------- - -#. Configure a synchronous SRDF group between the chosen source and target - arrays for the VMAX cinder driver to use. The source array must correspond - with the ```` entry in the VMAX XML file. -#. Select both the director and the ports for the SRDF emulation to use on - both sides. Bear in mind that network topology is important when choosing - director endpoints. Currently, the only supported mode is `Synchronous`. - - .. note:: - For full failover functionality, the source and target VMAX arrays must be - discovered and managed by the same SMI-S/ECOM server, locally connected - for example. This SMI-S/ ECOM server cannot be embedded - it can be - installed on a physical server or a VM hosted by an ESX server only. - - .. note:: - With both arrays being managed by the one SMI-S server, it is the cloud - storage administrators responsibility to account for a DR scenario where the - management (SMI-S) server goes down as well as the primary array. In that - event, the details and credentials of a back-up SMI-S server can be passed - in to the XML file, and the VMAX cinder driver can be rebooted. It would be - advisable to have the SMI-S server at a third location (separate from both - arrays) if possible. - - .. note:: - If the source and target arrays are not managed by the same management - server (that is, the target array is remotely connected to server), in the - event of a full disaster scenario (for example, the primary array is - completely lost and all connectivity to it is gone), the SMI-S server - would no longer be able to contact the target array. In this scenario, - the volumes would be automatically failed over to the target array, but - administrator intervention would be required to either; configure the - target (remote) array as local to the current SMI-S server, or enter - the details to the XML file of a second SMI-S server, which is locally - connected to the target array, and restart the cinder volume service. - -#. Enable replication in ``/etc/cinder/cinder.conf``. - To enable the replication functionality in VMAX cinder driver, it is - necessary to create a replication volume-type. The corresponding - back-end stanza in the ``cinder.conf`` for this volume-type must then - include a ``replication_device`` parameter. This parameter defines a - single replication target array and takes the form of a list of key - value pairs. - - .. code-block:: console - - enabled_backends = VMAX_FC_REPLICATION - [VMAX_FC_REPLICATION] - volume_driver = cinder.volume.drivers.emc.emc_vmax_FC.EMCVMAXFCDriver - cinder_emc_config_file = /etc/cinder/cinder_emc_config_VMAX_FC_REPLICATION.xml - volume_backend_name = VMAX_FC_REPLICATION - replication_device = target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 28_11_07, allow_extend:False - - * ``target_device_id`` is a unique VMAX array serial number of the target - array. For full failover functionality, the source and target VMAX arrays - must be discovered and managed by the same SMI-S/ECOM server. - That is, locally connected. Follow the instructions in the SMI-S release - notes. - - * ``remote_port_group`` is the name of a VMAX port group that has been - pre-configured to expose volumes managed by this backend in the event - of a failover. Make sure that this portgroup contains either all FC or - all iSCSI port groups (for a given back end), as appropriate for the - configured driver (iSCSI or FC). - * ``remote_pool`` is the unique pool name for the given target array. - * ``rdf_group_label`` is the name of a VMAX SRDF group (Synchronous) that - has been pre-configured between the source and target arrays. - * ``allow_extend`` is a flag for allowing the extension of replicated volumes. - To extend a volume in an SRDF relationship, this relationship must first be - broken, both the source and target volumes are then independently extended, - and then the replication relationship is re-established. As the SRDF link - must be severed, due caution should be exercised when performing this - operation. If not explicitly set, this flag defaults to ``False``. - - .. note:: - Service Level and Workload: An attempt will be made to create a storage - group on the target array with the same service level and workload combination - as the primary. However, if this combination is unavailable on the target - (for example, in a situation where the source array is a Hybrid, the target array - is an All Flash, and an All Flash incompatible SLO like Bronze is - configured), no SLO will be applied. - - .. note:: - The VMAX cinder drivers can support a single replication target per - back-end, that is we do not support Concurrent SRDF or Cascaded SRDF. - Ensure there is only a single ``.replication_device.`` entry per - back-end stanza. - -#. Create a ``replication-enabled`` volume type. Once the - ``replication_device`` parameter has been entered in the VMAX - backend entry in the ``cinder.conf``, a corresponding volume type - needs to be created ``replication_enabled`` property set. See - above ``Setup VMAX drivers`` for details. - - .. code-block:: console - - $ openstack volume type set --property replication_enabled = `` True`` VMAX_FC_REPLICATION - - -Volume replication interoperability with other features -------------------------------------------------------- - -Most features are supported, except for the following: - -* There is no OpenStack Consistency Group or Generic Volume Group support - for replication-enabled VMAX volumes. - -* Storage-assisted retype operations on replication-enabled VMAX volumes - (moving from a non-replicated type to a replicated-type and vice-versa. - Moving to another SLO/workload combination, for example) are not supported. - -* The image volume cache functionality is supported (enabled by setting - ``image_volume_cache_enabled = True``), but one of two actions must be taken - when creating the cached volume: - - * The first boot volume created on a backend (which will trigger the - cached volume to be created) should be the smallest necessary size. - For example, if the minimum size disk to hold an image is 5GB, create - the first boot volume as 5GB. - * Alternatively, ensure that the ``allow_extend`` option in the - ``replication_device parameter`` is set to ``True``. - - This is because the initial boot volume is created at the minimum required - size for the requested image, and then extended to the user specified size. - - -Failover host -------------- - -In the event of a disaster, or where there is required downtime, upgrade -of the primary array for example, the administrator can issue the failover -host command to failover to the configured target: - -.. code-block:: console - - $ cinder failover-host cinder_host@VMAX_FC_REPLICATION#Diamond+SRP_1+000192800111 - -If the primary array becomes available again, you can initiate a failback -using the same command and specifying ``--backend_id default``: - -.. code-block:: console - - $ cinder failover-host \ - cinder_host@VMAX_FC_REPLICATION#Diamond+SRP_1+000192800111 \ - --backend_id default - - -Volume retype - storage assisted volume migration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Volume retype with storage assisted migration is supported now for -VMAX3 arrays. Cinder requires that for storage assisted migration, a -volume cannot be retyped across backends. For using storage assisted volume -retype, follow these steps: - -#. Add the parameter ``multi_pool_support`` to the configuration group in the - ``/etc/cinder/cinder.conf`` file and set it to ``True``. - - .. code-block:: console - - [CONF_GROUP_FC] - volume_driver = cinder.volume.drivers.dell_emc.vmax.fc.EMCVMAXFCDriver - cinder_emc_config_file = /etc/cinder/cinder_emc_config_CONF_GROUP_FC.xml - volume_backend_name = FC_backend - multi_pool_support = True - -#. Configure a single backend per SRP for the ``VMAX`` (Only VMAX3 arrays). - This is different from the regular configuration where one backend is - configured per service level. - -#. Create the ``/etc/cinder/cinder_emc_config_CONF_GROUP_FC.xml`` and add - the following lines to the XML for VMAX All Flash and Hybrid. - - .. code-block:: console - - - - 1.1.1.1 - 00 - user1 - password1 - - OS-PORTGROUP1-PG - OS-PORTGROUP2-PG - - 111111111111 - SRP_1 - - - .. note:: - There is no need to specify the Service Level and Workload in the XML - file. A single XML file corresponding to the backend is sufficient - instead of creating one each for the desired Service Level and Workload - combination. - -#. Once the backend is configured in the ``cinder.conf`` file and the VMAX - specific configuration XML created, restart the cinder volume service for - the changes to take place. - -#. Run the command ``cinder get-pools --detail`` to query for the pool - information. This should list all the available Service Level and Workload - combinations available for the SRP as pools belonging to the same backend. - -#. Use the following examples of OpenStack commands to create various volume - types. The below example demonstrates creating a volume type for Diamond - Service Level and OLTP workload. - - .. code-block:: console - - $ openstack volume type create VMAX_FC_DIAMOND_OLTP - $ openstack volume type set --property volume_backend_name=FC_backend VMAX_FC_DIAMOND_OLTP - $ openstack volume type set --property pool_name=Diamond+OLTP+SRP_1+111111111111 - - .. note:: - Create as many volume types as the number of Service Level and Workload - (available) combinations which you are going to use for provisioning - volumes. The ``pool_name`` is the additional property which has to be set - and is of the format: ``+++``. - This can be obtained from the output of the ``cinder get-pools --detail``. - -#. For migrating a volume from one Service Level or Workload combination to - another, use volume retype with the migration-policy to on-demand. The - target volume type should have the same ``volume_backend_name`` configured - and should have the desired ``pool_name`` to which you are trying to retype - to. - - .. code-block:: console - - $ cinder retype --migration-policy on-demand diff --git a/doc/source/configuration/block-storage/drivers/emc-vnx-driver.rst b/doc/source/configuration/block-storage/drivers/emc-vnx-driver.rst deleted file mode 100644 index 855c836c8..000000000 --- a/doc/source/configuration/block-storage/drivers/emc-vnx-driver.rst +++ /dev/null @@ -1,1121 +0,0 @@ -=================== -Dell EMC VNX driver -=================== - -EMC VNX driver interacts with configured VNX array. It supports -both iSCSI and FC protocol. - -The VNX cinder driver performs the volume operations by -executing Navisphere CLI (NaviSecCLI) which is a command-line interface used -for management, diagnostics, and reporting functions for VNX. It also -supports both iSCSI and FC protocol. - - -System requirements -~~~~~~~~~~~~~~~~~~~ - -- VNX Operational Environment for Block version 5.32 or higher. -- VNX Snapshot and Thin Provisioning license should be activated for VNX. -- Python library ``storops`` to interact with VNX. -- Navisphere CLI v7.32 or higher is installed along with the driver. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Clone a volume. -- Extend a volume. -- Migrate a volume. -- Retype a volume. -- Get volume statistics. -- Create and delete consistency groups. -- Create, list, and delete consistency group snapshots. -- Modify consistency groups. -- Efficient non-disruptive volume backup. -- Create a cloned consistency group. -- Create a consistency group from consistency group snapshots. -- Replication v2.1 support. -- Generic Group support. - -Preparation -~~~~~~~~~~~ - -This section contains instructions to prepare the Block Storage nodes to -use the EMC VNX driver. You should install the Navisphere CLI and ensure you -have correct zoning configurations. - -Install Navisphere CLI ----------------------- - -Navisphere CLI needs to be installed on all Block Storage nodes within -an OpenStack deployment. You need to download different versions for -different platforms: - -- For Ubuntu x64, DEB is available at `EMC OpenStack - Github `_. - -- For all other variants of Linux, Navisphere CLI is available at - `Downloads for VNX2 - Series `_ or - `Downloads for VNX1 - Series `_. - -Install Python library storops ------------------------------- - -``storops`` is a Python library that interacts with VNX array through -Navisphere CLI. -Use the following command to install the ``storops`` library: - -.. code-block:: console - - $ pip install storops - - -Check array software --------------------- - -Make sure your have the following software installed for certain features: - -+--------------------------------------------+---------------------+ -| Feature | Software Required | -+============================================+=====================+ -| All | ThinProvisioning | -+--------------------------------------------+---------------------+ -| All | VNXSnapshots | -+--------------------------------------------+---------------------+ -| FAST cache support | FASTCache | -+--------------------------------------------+---------------------+ -| Create volume with type ``compressed`` | Compression | -+--------------------------------------------+---------------------+ -| Create volume with type ``deduplicated`` | Deduplication | -+--------------------------------------------+---------------------+ - -**Required software** - -You can check the status of your array software in the :guilabel:`Software` -page of :guilabel:`Storage System Properties`. Here is how it looks like: - -.. figure:: ../../figures/emc-enabler.png - -Network configuration ---------------------- - -For the FC Driver, FC zoning is properly configured between the hosts and -the VNX. Check :ref:`register-fc-port-with-vnx` for reference. - -For the iSCSI Driver, make sure your VNX iSCSI port is accessible by -your hosts. Check :ref:`register-iscsi-port-with-vnx` for reference. - -You can use ``initiator_auto_registration = True`` configuration to avoid -registering the ports manually. Check the detail of the configuration in -:ref:`emc-vnx-conf` for reference. - -If you are trying to setup multipath, refer to :ref:`multipath-setup`. - - -.. _emc-vnx-conf: - -Back-end configuration -~~~~~~~~~~~~~~~~~~~~~~ - - -Make the following changes in the ``/etc/cinder/cinder.conf`` file. - -Minimum configuration ---------------------- - -Here is a sample of minimum back-end configuration. See the following sections -for the detail of each option. -Set ``storage_protocol = iscsi`` if iSCSI protocol is used. - -.. code-block:: ini - - [DEFAULT] - enabled_backends = vnx_array1 - - [vnx_array1] - san_ip = 10.10.72.41 - san_login = sysadmin - san_password = sysadmin - naviseccli_path = /opt/Navisphere/bin/naviseccli - volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver - initiator_auto_registration = True - storage_protocol = fc - -Multiple back-end configuration -------------------------------- -Here is a sample of a minimum back-end configuration. See following sections -for the detail of each option. -Set ``storage_protocol = iscsi`` if iSCSI protocol is used. - -.. code-block:: ini - - [DEFAULT] - enabled_backends = backendA, backendB - - [backendA] - storage_vnx_pool_names = Pool_01_SAS, Pool_02_FLASH - san_ip = 10.10.72.41 - storage_vnx_security_file_dir = /etc/secfile/array1 - naviseccli_path = /opt/Navisphere/bin/naviseccli - volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver - initiator_auto_registration = True - storage_protocol = fc - - [backendB] - storage_vnx_pool_names = Pool_02_SAS - san_ip = 10.10.26.101 - san_login = username - san_password = password - naviseccli_path = /opt/Navisphere/bin/naviseccli - volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver - initiator_auto_registration = True - storage_protocol = fc - -The value of the option ``storage_protocol`` can be either ``fc`` or ``iscsi``, -which is case insensitive. - -For more details on multiple back ends, see `Configure multiple-storage -back ends `_ - -Required configurations ------------------------ - -**IP of the VNX Storage Processors** - -Specify SP A or SP B IP to connect: - -.. code-block:: ini - - san_ip = - -**VNX login credentials** - -There are two ways to specify the credentials. - -- Use plain text username and password. - - Supply for plain username and password: - - .. code-block:: ini - - san_login = - san_password = - storage_vnx_authentication_type = global - - Valid values for ``storage_vnx_authentication_type`` are: ``global`` - (default), ``local``, and ``ldap``. - -- Use Security file. - - This approach avoids the plain text password in your cinder - configuration file. Supply a security file as below: - - .. code-block:: ini - - storage_vnx_security_file_dir = - -Check Unisphere CLI user guide or :ref:`authenticate-by-security-file` -for how to create a security file. - -**Path to your Unisphere CLI** - -Specify the absolute path to your naviseccli: - -.. code-block:: ini - - naviseccli_path = /opt/Navisphere/bin/naviseccli - -**Driver's storage protocol** - -- For the FC Driver, add the following option: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver - storage_protocol = fc - -- For iSCSI Driver, add the following option: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver - storage_protocol = iscsi - -Optional configurations -~~~~~~~~~~~~~~~~~~~~~~~ - -VNX pool names --------------- - -Specify the list of pools to be managed, separated by commas. They should -already exist in VNX. - -.. code-block:: ini - - storage_vnx_pool_names = pool 1, pool 2 - -If this value is not specified, all pools of the array will be used. - -**Initiator auto registration** - -When ``initiator_auto_registration`` is set to ``True``, the driver will -automatically register initiators to all working target ports of the VNX array -during volume attaching (The driver will skip those initiators that have -already been registered) if the option ``io_port_list`` is not specified in -the ``cinder.conf`` file. - -If the user wants to register the initiators with some specific ports but not -register with the other ports, this functionality should be disabled. - -When a comma-separated list is given to ``io_port_list``, the driver will only -register the initiator to the ports specified in the list and only return -target port(s) which belong to the target ports in the ``io_port_list`` instead -of all target ports. - -- Example for FC ports: - - .. code-block:: ini - - io_port_list = a-1,B-3 - - ``a`` or ``B`` is *Storage Processor*, number ``1`` and ``3`` are - *Port ID*. - -- Example for iSCSI ports: - - .. code-block:: ini - - io_port_list = a-1-0,B-3-0 - - ``a`` or ``B`` is *Storage Processor*, the first numbers ``1`` and ``3`` are - *Port ID* and the second number ``0`` is *Virtual Port ID* - -.. note:: - - - Rather than de-registered, the registered ports will be simply - bypassed whatever they are in ``io_port_list`` or not. - - - The driver will raise an exception if ports in ``io_port_list`` - do not exist in VNX during startup. - -Force delete volumes in storage group -------------------------------------- - -Some ``available`` volumes may remain in storage group on the VNX array due to -some OpenStack timeout issue. But the VNX array do not allow the user to delete -the volumes which are in storage group. Option -``force_delete_lun_in_storagegroup`` is introduced to allow the user to delete -the ``available`` volumes in this tricky situation. - -When ``force_delete_lun_in_storagegroup`` is set to ``True`` in the back-end -section, the driver will move the volumes out of the storage groups and then -delete them if the user tries to delete the volumes that remain in the storage -group on the VNX array. - -The default value of ``force_delete_lun_in_storagegroup`` is ``False``. - -Over subscription in thin provisioning --------------------------------------- - -Over subscription allows that the sum of all volume's capacity (provisioned -capacity) to be larger than the pool's total capacity. - -``max_over_subscription_ratio`` in the back-end section is the ratio of -provisioned capacity over total capacity. - -The default value of ``max_over_subscription_ratio`` is 20.0, which means -the provisioned capacity can be 20 times of the total capacity. -If the value of this ratio is set larger than 1.0, the provisioned -capacity can exceed the total capacity. - -Storage group automatic deletion --------------------------------- - -For volume attaching, the driver has a storage group on VNX for each compute -node hosting the vm instances which are going to consume VNX Block Storage -(using compute node's host name as storage group's name). All the volumes -attached to the VM instances in a compute node will be put into the storage -group. If ``destroy_empty_storage_group`` is set to ``True``, the driver will -remove the empty storage group after its last volume is detached. For data -safety, it does not suggest to set ``destroy_empty_storage_group=True`` unless -the VNX is exclusively managed by one Block Storage node because consistent -``lock_path`` is required for operation synchronization for this behavior. - -Initiator auto deregistration ------------------------------ - -Enabling storage group automatic deletion is the precondition of this function. -If ``initiator_auto_deregistration`` is set to ``True`` is set, the driver will -deregister all FC and iSCSI initiators of the host after its storage group is -deleted. - -FC SAN auto zoning ------------------- - -The EMC VNX driver supports FC SAN auto zoning when ``ZoneManager`` is -configured and ``zoning_mode`` is set to ``fabric`` in ``cinder.conf``. -For ZoneManager configuration, refer to :doc:`../fc-zoning`. - -Volume number threshold ------------------------ - -In VNX, there is a limitation on the number of pool volumes that can be created -in the system. When the limitation is reached, no more pool volumes can be -created even if there is remaining capacity in the storage pool. In other -words, if the scheduler dispatches a volume creation request to a back end that -has free capacity but reaches the volume limitation, the creation fails. - -The default value of ``check_max_pool_luns_threshold`` is ``False``. When -``check_max_pool_luns_threshold=True``, the pool-based back end will check the -limit and will report 0 free capacity to the scheduler if the limit is reached. -So the scheduler will be able to skip this kind of pool-based back end that -runs out of the pool volume number. - -iSCSI initiators ----------------- - -``iscsi_initiators`` is a dictionary of IP addresses of the iSCSI -initiator ports on OpenStack compute and block storage nodes which want to -connect to VNX via iSCSI. If this option is configured, the driver will -leverage this information to find an accessible iSCSI target portal for the -initiator when attaching volume. Otherwise, the iSCSI target portal will be -chosen in a relative random way. - -.. note:: - - This option is only valid for iSCSI driver. - -Here is an example. VNX will connect ``host1`` with ``10.0.0.1`` and -``10.0.0.2``. And it will connect ``host2`` with ``10.0.0.3``. - -The key name (``host1`` in the example) should be the output of -:command:`hostname` command. - -.. code-block:: ini - - iscsi_initiators = {"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]} - -Default timeout ---------------- - -Specify the timeout in minutes for operations like LUN migration, LUN creation, -etc. For example, LUN migration is a typical long running operation, which -depends on the LUN size and the load of the array. An upper bound in the -specific deployment can be set to avoid unnecessary long wait. - -The default value for this option is ``infinite``. - -.. code-block:: ini - - default_timeout = 60 - -Max LUNs per storage group --------------------------- - -The ``max_luns_per_storage_group`` specify the maximum number of LUNs in a -storage group. Default value is 255. It is also the maximum value supported by -VNX. - -Ignore pool full threshold --------------------------- - -If ``ignore_pool_full_threshold`` is set to ``True``, driver will force LUN -creation even if the full threshold of pool is reached. Default to ``False``. - -Extra spec options -~~~~~~~~~~~~~~~~~~ - -Extra specs are used in volume types created in Block Storage as the preferred -property of the volume. - -The Block Storage scheduler will use extra specs to find the suitable back end -for the volume and the Block Storage driver will create the volume based on the -properties specified by the extra spec. - -Use the following command to create a volume type: - -.. code-block:: console - - $ openstack volume type create demoVolumeType - -Use the following command to update the extra spec of a volume type: - -.. code-block:: console - - $ openstack volume type set --property provisioning:type=thin thick_provisioning_support=' True' demoVolumeType - -The following sections describe the VNX extra keys. - -Provisioning type ------------------ - -- Key: ``provisioning:type`` - -- Possible Values: - - - ``thick`` - - Volume is fully provisioned. - - Run the following commands to create a ``thick`` volume type: - - .. code-block:: console - - $ openstack volume type create ThickVolumeType - $ openstack volume type set --property provisioning:type=thick thick_provisioning_support=' True' ThickVolumeType - - - ``thin`` - - Volume is virtually provisioned. - - Run the following commands to create a ``thin`` volume type: - - .. code-block:: console - - $ openstack volume type create ThinVolumeType - $ openstack volume type set --property provisioning:type=thin thin_provisioning_support=' True' ThinVolumeType - - - ``deduplicated`` - - Volume is ``thin`` and deduplication is enabled. The administrator shall - go to VNX to configure the system level deduplication settings. To - create a deduplicated volume, the VNX Deduplication license must be - activated on VNX, and specify ``deduplication_support=True`` to let Block - Storage scheduler find the proper volume back end. - - Run the following commands to create a ``deduplicated`` volume type: - - .. code-block:: console - - $ openstack volume type create DeduplicatedVolumeType - $ openstack volume type set --property provisioning:type=deduplicated deduplicated_support=' True' DeduplicatedVolumeType - - - ``compressed`` - - Volume is ``thin`` and compression is enabled. The administrator shall go - to the VNX to configure the system level compression settings. To create - a compressed volume, the VNX Compression license must be activated on - VNX, and use ``compression_support=True`` to let Block Storage scheduler - find a volume back end. VNX does not support creating snapshots on a - compressed volume. - - Run the following commands to create a ``compressed`` volume type: - - .. code-block:: console - - $ openstack volume type create CompressedVolumeType - $ openstack volume type set --property provisioning:type=compressed compression_support=' True' CompressedVolumeType - -- Default: ``thick`` - -.. note:: - - ``provisioning:type`` replaces the old spec key ``storagetype:provisioning``. - The latter one is obsolete since the *Mitaka* release. - -Storage tiering support ------------------------ - -- Key: ``storagetype:tiering`` -- Possible values: - - - ``StartHighThenAuto`` - - ``Auto`` - - ``HighestAvailable`` - - ``LowestAvailable`` - - ``NoMovement`` - -- Default: ``StartHighThenAuto`` - -VNX supports fully automated storage tiering which requires the FAST license -activated on the VNX. The OpenStack administrator can use the extra spec key -``storagetype:tiering`` to set the tiering policy of a volume and use the key -``fast_support=' True'`` to let Block Storage scheduler find a volume back -end which manages a VNX with FAST license activated. Here are the five -supported values for the extra spec key ``storagetype:tiering``: - -Run the following commands to create a volume type with tiering policy: - -.. code-block:: console - - $ openstack volume type create ThinVolumeOnAutoTier - $ openstack volume type set --property provisioning:type=thin storagetype:tiering=Auto fast_support=' True' ThinVolumeOnAutoTier - -.. note:: - - The tiering policy cannot be applied to a deduplicated volume. Tiering - policy of the deduplicated LUN align with the settings of the pool. - -FAST cache support ------------------- - -- Key: ``fast_cache_enabled`` - -- Possible values: - - - ``True`` - - - ``False`` - -- Default: ``False`` - -VNX has FAST Cache feature which requires the FAST Cache license activated on -the VNX. Volume will be created on the backend with FAST cache enabled when -`` True`` is specified. - -Pool name ---------- - -- Key: ``pool_name`` - -- Possible values: name of the storage pool managed by cinder - -- Default: None - -If the user wants to create a volume on a certain storage pool in a back end -that manages multiple pools, a volume type with a extra spec specified storage -pool should be created first, then the user can use this volume type to create -the volume. - -Run the following commands to create the volume type: - -.. code-block:: console - - $ openstack volume type create HighPerf - $ openstack volume type set --property pool_name=Pool_02_SASFLASH volume_backend_name=vnx_41 HighPerf - -Obsolete extra specs --------------------- - -.. note:: - - *DO NOT* use the following obsolete extra spec keys: - - - ``storagetype:provisioning`` - - ``storagetype:pool`` - - -Advanced features -~~~~~~~~~~~~~~~~~ - -Snap copy ---------- - -- Metadata Key: ``snapcopy`` -- Possible Values: - - - ``True`` or ``true`` - - ``False`` or ``false`` - -- Default: `False` - -VNX driver supports snap copy which accelerates the process for -creating a copied volume. - -By default, the driver will use `asynchronous migration support`_, which will -start a VNX migration session. When snap copy is used, driver creates a -snapshot and mounts it as a volume for the 2 kinds of operations which will be -instant even for large volumes. - -To enable this functionality, append ``--metadata snapcopy=True`` -when creating cloned volume or creating volume from snapshot. - -.. code-block:: console - - $ cinder create --source-volid --name "cloned_volume" --metadata snapcopy=True - -Or - -.. code-block:: console - - $ cinder create --snapshot-id --name "vol_from_snapshot" --metadata snapcopy=True - - -The newly created volume is a snap copy instead of -a full copy. If a full copy is needed, retype or migrate can be used -to convert the snap-copy volume to a full-copy volume which may be -time-consuming. - -You can determine whether the volume is a snap-copy volume or not by -showing its metadata. If the ``snapcopy`` in metadata is ``True`` or ``true``, -the volume is a snap-copy volume. Otherwise, it is a full-copy volume. - -.. code-block:: console - - $ cinder metadata-show - -**Constraints** - -- The number of snap-copy volumes created from a single source volume is - limited to 255 at one point in time. -- The source volume which has snap-copy volume can not be deleted or migrated. -- snapcopy volume will be change to full-copy volume after host-assisted or - storage-assisted migration. -- snapcopy volume can not be added to consisgroup because of VNX limitation. - -Efficient non-disruptive volume backup --------------------------------------- - -The default implementation in Block Storage for non-disruptive volume backup is -not efficient since a cloned volume will be created during backup. - -The approach of efficient backup is to create a snapshot for the volume and -connect this snapshot (a mount point in VNX) to the Block Storage host for -volume backup. This eliminates migration time involved in volume clone. - -**Constraints** - -- Backup creation for a snap-copy volume is not allowed if the volume - status is ``in-use`` since snapshot cannot be taken from this volume. - -Configurable migration rate ---------------------------- - -VNX cinder driver is leveraging the LUN migration from the VNX. LUN migration -is involved in cloning, migrating, retyping, and creating volume from snapshot. -When admin set ``migrate_rate`` in volume's ``metadata``, VNX driver can start -migration with specified rate. The available values for the ``migrate_rate`` -are ``high``, ``asap``, ``low`` and ``medium``. - -The following is an example to set ``migrate_rate`` to ``asap``: - -.. code-block:: console - - $ cinder metadata set migrate_rate=asap - -After set, any cinder volume operations involving VNX LUN migration will -take the value as the migration rate. To restore the migration rate to -default, unset the metadata as following: - -.. code-block:: console - - $ cinder metadata unset migrate_rate - -.. note:: - - Do not use the ``asap`` migration rate when the system is in production, as the normal - host I/O may be interrupted. Use asap only when the system is offline - (free of any host-level I/O). - -Replication v2.1 support ------------------------- - -Cinder introduces Replication v2.1 support in Mitaka, it supports -fail-over and fail-back replication for specific back end. In VNX cinder -driver, **MirrorView** is used to set up replication for the volume. - -To enable this feature, you need to set configuration in ``cinder.conf`` as -below: - -.. code-block:: ini - - replication_device = backend_id:, - san_ip:192.168.1.2, - san_login:admin, - san_password:admin, - naviseccli_path:/opt/Navisphere/bin/naviseccli, - storage_vnx_authentication_type:global, - storage_vnx_security_file_dir: - -Currently, only synchronized mode **MirrorView** is supported, and one volume -can only have 1 secondary storage system. Therefore, you can have only one -``replication_device`` presented in driver configuration section. - -To create a replication enabled volume, you need to create a volume type: - -.. code-block:: console - - $ openstack volume type create replication-type - $ openstack volume type set --property replication_enabled=" True" replication-type - -And then create volume with above volume type: - -.. code-block:: console - - $ openstack volume create replication-volume --type replication-type --size 1 - -**Supported operations** - -- Create volume -- Create cloned volume -- Create volume from snapshot -- Fail-over volume: - - .. code-block:: console - - $ cinder failover-host --backend_id - -- Fail-back volume: - - .. code-block:: console - - $ cinder failover-host --backend_id default - -**Requirements** - -- 2 VNX systems must be in same domain. -- For iSCSI MirrorView, user needs to setup iSCSI connection before enable - replication in Cinder. -- For FC MirrorView, user needs to zone specific FC ports from 2 - VNX system together. -- MirrorView Sync enabler( **MirrorView/S** ) installed on both systems. -- Write intent log enabled on both VNX systems. - -For more information on how to configure, please refer to: `MirrorView-Knowledgebook:-Releases-30-–-33 `_ - -Asynchronous migration support ------------------------------- - -VNX Cinder driver now supports asynchronous migration during volume cloning. - -The driver now using asynchronous migration when creating a volume from source -as the default cloning method. The driver will return immediately after the -migration session starts on the VNX, which dramatically reduces the time before -a volume is available for use. - -To disable this feature, user can add ``--metadata async_migrate=False`` when -creating new volume from source. - - -Best practice -~~~~~~~~~~~~~ - -.. _multipath-setup: - -Multipath setup ---------------- - -Enabling multipath volume access is recommended for robust data access. -The major configuration includes: - -#. Install ``multipath-tools``, ``sysfsutils`` and ``sg3-utils`` on the - nodes hosting compute and ``cinder-volume`` services. Check - the operating system manual for the system distribution for specific - installation steps. For Red Hat based distributions, they should be - ``device-mapper-multipath``, ``sysfsutils`` and ``sg3_utils``. - -#. Specify ``use_multipath_for_image_xfer=true`` in the ``cinder.conf`` file - for each FC/iSCSI back end. - -#. Specify ``iscsi_use_multipath=True`` in ``libvirt`` section of the - ``nova.conf`` file. This option is valid for both iSCSI and FC driver. - -For multipath-tools, here is an EMC recommended sample of -``/etc/multipath.conf`` file. - -``user_friendly_names`` is not specified in the configuration and thus -it will take the default value ``no``. It is not recommended to set it -to ``yes`` because it may fail operations such as VM live migration. - -.. code-block:: vim - - blacklist { - # Skip the files under /dev that are definitely not FC/iSCSI devices - # Different system may need different customization - devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" - devnode "^hd[a-z][0-9]*" - devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]" - - # Skip LUNZ device from VNX - device { - vendor "DGC" - product "LUNZ" - } - } - - defaults { - user_friendly_names no - flush_on_last_del yes - } - - devices { - # Device attributed for EMC CLARiiON and VNX series ALUA - device { - vendor "DGC" - product ".*" - product_blacklist "LUNZ" - path_grouping_policy group_by_prio - path_selector "round-robin 0" - path_checker emc_clariion - features "1 queue_if_no_path" - hardware_handler "1 alua" - prio alua - failback immediate - } - } - -.. note:: - - When multipath is used in OpenStack, multipath faulty devices may - come out in Nova-Compute nodes due to different issues (`Bug - 1336683 `_ is a - typical example). - -A solution to completely avoid faulty devices has not been found yet. -``faulty_device_cleanup.py`` mitigates this issue when VNX iSCSI storage is -used. Cloud administrators can deploy the script in all Nova-Compute nodes and -use a CRON job to run the script on each Nova-Compute node periodically so that -faulty devices will not stay too long. Refer to: `VNX faulty device -cleanup `_ for -detailed usage and the script. - -Restrictions and limitations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -iSCSI port cache ----------------- - -EMC VNX iSCSI driver caches the iSCSI ports information, so that the user -should restart the ``cinder-volume`` service or wait for seconds (which is -configured by ``periodic_interval`` in the ``cinder.conf`` file) before any -volume attachment operation after changing the iSCSI port configurations. -Otherwise the attachment may fail because the old iSCSI port configurations -were used. - -No extending for volume with snapshots --------------------------------------- - -VNX does not support extending the thick volume which has a snapshot. If the -user tries to extend a volume which has a snapshot, the status of the volume -would change to ``error_extending``. - -Limitations for deploying cinder on computer node -------------------------------------------------- - -It is not recommended to deploy the driver on a compute node if ``cinder -upload-to-image --force True`` is used against an in-use volume. Otherwise, -``cinder upload-to-image --force True`` will terminate the data access of the -vm instance to the volume. - -Storage group with host names in VNX ------------------------------------- - -When the driver notices that there is no existing storage group that has the -host name as the storage group name, it will create the storage group and also -add the compute node's or Block Storage node's registered initiators into the -storage group. - -If the driver notices that the storage group already exists, it will assume -that the registered initiators have also been put into it and skip the -operations above for better performance. - -It is recommended that the storage administrator does not create the storage -group manually and instead relies on the driver for the preparation. If the -storage administrator needs to create the storage group manually for some -special requirements, the correct registered initiators should be put into the -storage group as well (otherwise the following volume attaching operations will -fail). - -EMC storage-assisted volume migration -------------------------------------- - -EMC VNX driver supports storage-assisted volume migration, when the user starts -migrating with ``cinder migrate --force-host-copy False `` or -``cinder migrate ``, cinder will try to leverage the VNX's -native volume migration functionality. - -In following scenarios, VNX storage-assisted volume migration will not be -triggered: - -- ``in-use`` volume migration between back ends with different storage - protocol, for example, FC and iSCSI. -- Volume is to be migrated across arrays. - -Appendix -~~~~~~~~ - -.. _authenticate-by-security-file: - -Authenticate by security file ------------------------------ - -VNX credentials are necessary when the driver connects to the VNX system. -Credentials in ``global``, ``local`` and ``ldap`` scopes are supported. There -are two approaches to provide the credentials. - -The recommended one is using the Navisphere CLI security file to provide the -credentials which can get rid of providing the plain text credentials in the -configuration file. Following is the instruction on how to do this. - -#. Find out the Linux user id of the ``cinder-volume`` processes. Assuming the - ``cinder-volume`` service is running by the account ``cinder``. - -#. Run ``su`` as root user. - -#. In ``/etc/passwd`` file, change - ``cinder:x:113:120::/var/lib/cinder:/bin/false`` - to ``cinder:x:113:120::/var/lib/cinder:/bin/bash`` (This temporary change is - to make step 4 work.) - -#. Save the credentials on behalf of ``cinder`` user to a security file - (assuming the array credentials are ``admin/admin`` in ``global`` scope). In - the command below, the ``-secfilepath`` switch is used to specify the - location to save the security file. - - .. code-block:: console - - # su -l cinder -c \ - '/opt/Navisphere/bin/naviseccli -AddUserSecurity -user admin -password admin -scope 0 -secfilepath ' - -#. Change ``cinder:x:113:120::/var/lib/cinder:/bin/bash`` back to - ``cinder:x:113:120::/var/lib/cinder:/bin/false`` in ``/etc/passwd`` file. - -#. Remove the credentials options ``san_login``, ``san_password`` and - ``storage_vnx_authentication_type`` from ``cinder.conf`` file. (normally - it is ``/etc/cinder/cinder.conf`` file). Add option - ``storage_vnx_security_file_dir`` and set its value to the directory path of - your security file generated in the above step. Omit this option if - ``-secfilepath`` is not used in the above step. - -#. Restart the ``cinder-volume`` service to validate the change. - - -.. _register-fc-port-with-vnx: - -Register FC port with VNX -------------------------- - -This configuration is only required when ``initiator_auto_registration=False``. - -To access VNX storage, the Compute nodes should be registered on VNX first if -initiator auto registration is not enabled. - -To perform ``Copy Image to Volume`` and ``Copy Volume to Image`` operations, -the nodes running the ``cinder-volume`` service (Block Storage nodes) must be -registered with the VNX as well. - -The steps mentioned below are for the compute nodes. Follow the same -steps for the Block Storage nodes also (The steps can be skipped if initiator -auto registration is enabled). - -#. Assume ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` is the WWN of a - FC initiator port name of the compute node whose host name and IP are - ``myhost1`` and ``10.10.61.1``. Register - ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` in Unisphere: - -#. Log in to :guilabel:`Unisphere`, go to - :menuselection:`FNM0000000000 > Hosts > Initiators`. - -#. Refresh and wait until the initiator - ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` with SP Port ``A-1`` - appears. - -#. Click the :guilabel:`Register` button, select :guilabel:`CLARiiON/VNX` - and enter the host name (which is the output of the :command:`hostname` - command) and IP address: - - - Hostname: ``myhost1`` - - - IP: ``10.10.61.1`` - - - Click :guilabel:`Register`. - -#. Then host ``10.10.61.1`` will appear under - :menuselection:`Hosts > Host List` as well. - -#. Register the ``wwn`` with more ports if needed. - -.. _register-iscsi-port-with-vnx: - -Register iSCSI port with VNX ----------------------------- - -This configuration is only required when ``initiator_auto_registration=False``. - -To access VNX storage, the compute nodes should be registered on VNX first if -initiator auto registration is not enabled. - -To perform ``Copy Image to Volume`` and ``Copy Volume to Image`` operations, -the nodes running the ``cinder-volume`` service (Block Storage nodes) must be -registered with the VNX as well. - -The steps mentioned below are for the compute nodes. Follow the -same steps for the Block Storage nodes also (The steps can be skipped if -initiator auto registration is enabled). - -#. On the compute node with IP address ``10.10.61.1`` and host name ``myhost1``, - execute the following commands (assuming ``10.10.61.35`` is the iSCSI - target): - - #. Start the iSCSI initiator service on the node: - - .. code-block:: console - - # /etc/init.d/open-iscsi start - - #. Discover the iSCSI target portals on VNX: - - .. code-block:: console - - # iscsiadm -m discovery -t st -p 10.10.61.35 - - #. Change directory to ``/etc/iscsi`` : - - .. code-block:: console - - # cd /etc/iscsi - - #. Find out the ``iqn`` of the node: - - .. code-block:: console - - # more initiatorname.iscsi - -#. Log in to :guilabel:`VNX` from the compute node using the target - corresponding to the SPA port: - - .. code-block:: console - - # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.a0 -p 10.10.61.35 -l - -#. Assume ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` is the initiator name of - the compute node. Register ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` in - Unisphere: - - #. Log in to :guilabel:`Unisphere`, go to - :menuselection:`FNM0000000000 > Hosts > Initiators`. - - #. Refresh and wait until the initiator - ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` with SP Port ``A-8v0`` - appears. - - #. Click the :guilabel:`Register` button, select :guilabel:`CLARiiON/VNX` - and enter the host name - (which is the output of the :command:`hostname` command) and IP address: - - - Hostname: ``myhost1`` - - - IP: ``10.10.61.1`` - - - Click :guilabel:`Register`. - - #. Then host ``10.10.61.1`` will appear under - :menuselection:`Hosts > Host List` as well. - -#. Log out :guilabel:`iSCSI` on the node: - - .. code-block:: console - - # iscsiadm -m node -u - -#. Log in to :guilabel:`VNX` from the compute node using the target - corresponding to the SPB port: - - .. code-block:: console - - # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.b8 -p 10.10.61.36 -l - -#. In ``Unisphere``, register the initiator with the SPB port. - -#. Log out :guilabel:`iSCSI` on the node: - - .. code-block:: console - - # iscsiadm -m node -u - -#. Register the ``iqn`` with more ports if needed. diff --git a/doc/source/configuration/block-storage/drivers/emc-xtremio-driver.rst b/doc/source/configuration/block-storage/drivers/emc-xtremio-driver.rst deleted file mode 100644 index bb394f9d4..000000000 --- a/doc/source/configuration/block-storage/drivers/emc-xtremio-driver.rst +++ /dev/null @@ -1,251 +0,0 @@ -============================================== -EMC XtremIO Block Storage driver configuration -============================================== - -The high performance XtremIO All Flash Array (AFA) offers Block Storage -services to OpenStack. Using the driver, OpenStack Block Storage hosts -can connect to an XtremIO Storage cluster. - -This section explains how to configure and connect the block -storage nodes to an XtremIO storage cluster. - -Support matrix -~~~~~~~~~~~~~~ - -XtremIO version 4.x is supported. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, clone, attach, and detach volumes. - -- Create and delete volume snapshots. - -- Create a volume from a snapshot. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Extend a volume. - -- Manage and unmanage a volume. - -- Manage and unmanage a snapshot. - -- Get volume statistics. - -- Create, modify, delete, and list consistency groups. - -- Create, modify, delete, and list snapshots of consistency groups. - -- Create consistency group from consistency group or consistency group - snapshot. - -- Volume Migration (host assisted) - -XtremIO Block Storage driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``cinder.conf`` file by adding the configuration below under -the [DEFAULT] section of the file in case of a single back end or -under a separate section in case of multiple back ends (for example -[XTREMIO]). The configuration file is usually located under the -following path ``/etc/cinder/cinder.conf``. - -.. include:: ../../tables/cinder-emc_xtremio.rst - -For a configuration example, refer to the configuration -:ref:`emc_extremio_configuration_example`. - -XtremIO driver name -------------------- - -Configure the driver name by setting the following parameter in the -``cinder.conf`` file: - -- For iSCSI: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver - -- For Fibre Channel: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver - -XtremIO management server (XMS) IP ----------------------------------- - -To retrieve the management IP, use the :command:`show-xms` CLI command. - -Configure the management IP by adding the following parameter: - -.. code-block:: ini - - san_ip = XMS Management IP - -XtremIO cluster name --------------------- - -In XtremIO version 4.0, a single XMS can manage multiple cluster back ends. In -such setups, the administrator is required to specify the cluster name (in -addition to the XMS IP). Each cluster must be defined as a separate back end. - -To retrieve the cluster name, run the :command:`show-clusters` CLI command. - -Configure the cluster name by adding the following parameter: - -.. code-block:: ini - - xtremio_cluster_name = Cluster-Name - -.. note:: - - When a single cluster is managed in XtremIO version 4.0, the cluster name is - not required. - -XtremIO user credentials ------------------------- - -OpenStack Block Storage requires an XtremIO XMS user with administrative -privileges. XtremIO recommends creating a dedicated OpenStack user account that -holds an administrative user role. - -Refer to the XtremIO User Guide for details on user account management. - -Create an XMS account using either the XMS GUI or the -:command:`add-user-account` CLI command. - -Configure the user credentials by adding the following parameters: - -.. code-block:: ini - - san_login = XMS username - san_password = XMS username password - -Multiple back ends -~~~~~~~~~~~~~~~~~~ - -Configuring multiple storage back ends enables you to create several back-end -storage solutions that serve the same OpenStack Compute resources. - -When a volume is created, the scheduler selects the appropriate back end to -handle the request, according to the specified volume type. - -Setting thin provisioning and multipathing parameters -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To support thin provisioning and multipathing in the XtremIO Array, the -following parameters from the Nova and Cinder configuration files should be -modified as follows: - -- Thin Provisioning - - All XtremIO volumes are thin provisioned. The default value of 20 should be - maintained for the ``max_over_subscription_ratio`` parameter. - - The ``use_cow_images`` parameter in the ``nova.conf`` file should be set to - ``False`` as follows: - - .. code-block:: ini - - use_cow_images = False - -- Multipathing - - The ``use_multipath_for_image_xfer`` parameter in the ``cinder.conf`` file - should be set to ``True`` as follows: - - .. code-block:: ini - - use_multipath_for_image_xfer = True - - -Image service optimization -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Limit the number of copies (XtremIO snapshots) taken from each image cache. - -.. code-block:: ini - - xtremio_volumes_per_glance_cache = 100 - -The default value is ``100``. A value of ``0`` ignores the limit and defers to -the array maximum as the effective limit. - -SSL certification -~~~~~~~~~~~~~~~~~ - -To enable SSL certificate validation, modify the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - driver_ssl_cert_verify = true - -By default, SSL certificate validation is disabled. - -To specify a non-default path to ``CA_Bundle`` file or directory with -certificates of trusted CAs: - - -.. code-block:: ini - - driver_ssl_cert_path = Certificate path - -Configuring CHAP -~~~~~~~~~~~~~~~~ - -The XtremIO Block Storage driver supports CHAP initiator authentication and -discovery. - -If CHAP initiator authentication is required, set the CHAP -Authentication mode to initiator. - -To set the CHAP initiator mode using CLI, run the following XMCLI command: - -.. code-block:: console - - $ modify-chap chap-authentication-mode=initiator - -If CHAP initiator discovery is required, set the CHAP discovery mode to -initiator. - -To set the CHAP initiator discovery mode using CLI, run the following XMCLI -command: - -.. code-block:: console - - $ modify-chap chap-discovery-mode=initiator - -The CHAP initiator modes can also be set via the XMS GUI. - -Refer to XtremIO User Guide for details on CHAP configuration via GUI and CLI. - -The CHAP initiator authentication and discovery credentials (username and -password) are generated automatically by the Block Storage driver. Therefore, -there is no need to configure the initial CHAP credentials manually in XMS. - -.. _emc_extremio_configuration_example: - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -You can update the ``cinder.conf`` file by editing the necessary parameters as -follows: - -.. code-block:: ini - - [Default] - enabled_backends = XtremIO - - [XtremIO] - volume_driver = cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver - san_ip = XMS_IP - xtremio_cluster_name = Cluster01 - san_login = XMS_USER - san_password = XMS_PASSWD - volume_backend_name = XtremIOAFA diff --git a/doc/source/configuration/block-storage/drivers/falconstor-fss-driver.rst b/doc/source/configuration/block-storage/drivers/falconstor-fss-driver.rst deleted file mode 100644 index 92aa9b524..000000000 --- a/doc/source/configuration/block-storage/drivers/falconstor-fss-driver.rst +++ /dev/null @@ -1,117 +0,0 @@ -======================================================= -FalconStor FSS Storage Fibre Channel and iSCSI drivers -======================================================= - -The ``FSSISCSIDriver`` and ``FSSFCDriver`` drivers run volume operations -by communicating with the FalconStor FSS storage system over HTTP. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the FalconStor FSS drivers, the following are required: - -- FalconStor FSS storage with: - - - iSCSI or FC host interfaces - - - FSS-8.00-8865 or later - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The FalconStor volume driver provides the following Cinder -volume operations: - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Clone a volume. - -* Extend a volume. - -* Get volume statistics. - -* Create and delete consistency group. - -* Create and delete consistency group snapshots. - -* Modify consistency groups. - -* Manage and unmanage a volume. - -iSCSI configuration -~~~~~~~~~~~~~~~~~~~ - -Use the following instructions to update the configuration file for iSCSI: - -.. code-block:: ini - - default_volume_type = FSS - enabled_backends = FSS - - [FSS] - - # IP address of FSS server - san_ip = 172.23.0.1 - # FSS server user name - san_login = Admin - # FSS server password - san_password = secret - # FSS server storage pool id list - fss_pools=P:2,O:3 - # Name to give this storage back-end - volume_backend_name = FSSISCSIDriver - # The iSCSI driver to load - volume_driver = cinder.volume.drivers.falconstor.iscsi.FSSISCSIDriver - - - # ==Optional settings== - - # Enable FSS log message - fss_debug = true - # Enable FSS thin provision - san_thin_provision=true - -Fibre Channel configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use the following instructions to update the configuration file for fibre -channel: - -.. code-block:: ini - - default_volume_type = FSSFC - enabled_backends = FSSFC - - [FSSFC] - # IP address of FSS server - san_ip = 172.23.0.2 - # FSS server user name - san_login = Admin - # FSS server password - san_password = secret - # FSS server storage pool id list - fss_pools=A:1 - # Name to give this storage back-end - volume_backend_name = FSSFCDriver - # The FC driver to load - volume_driver = cinder.volume.drivers.falconstor.fc.FSSFCDriver - - - # ==Optional settings== - - # Enable FSS log message - fss_debug = true - # Enable FSS thin provision - san_thin_provision=true - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options specific to the -FalconStor FSS storage volume driver. - -.. include:: ../../tables/cinder-falconstor.rst diff --git a/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst b/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst deleted file mode 100644 index db1e6a468..000000000 --- a/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst +++ /dev/null @@ -1,225 +0,0 @@ -========================= -Fujitsu ETERNUS DX driver -========================= - -Fujitsu ETERNUS DX driver provides FC and iSCSI support for -ETERNUS DX S3 series. - -The driver performs volume operations by communicating with -ETERNUS DX. It uses a CIM client in Python called PyWBEM -to perform CIM operations over HTTP. - -You can specify RAID Group and Thin Provisioning Pool (TPP) -in ETERNUS DX as a storage pool. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -Supported storages: - -* ETERNUS DX60 S3 -* ETERNUS DX100 S3/DX200 S3 -* ETERNUS DX500 S3/DX600 S3 -* ETERNUS DX8700 S3/DX8900 S3 -* ETERNUS DX200F - -Requirements: - -* Firmware version V10L30 or later is required. -* The multipath environment with ETERNUS Multipath Driver is unsupported. -* An Advanced Copy Feature license is required - to create a snapshot and a clone. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy an image to a volume. -* Copy a volume to an image. -* Clone a volume. -* Extend a volume. (\*1) -* Get volume statistics. - -(\*1): It is executable only when you use TPP as a storage pool. - -Preparation -~~~~~~~~~~~ - -Package installation --------------------- - -Install the ``python-pywbem`` package for your distribution. - -ETERNUS DX setup ----------------- - -Perform the following steps using ETERNUS Web GUI or ETERNUS CLI. - -.. note:: - * These following operations require an account that has the ``Admin`` role. - * For detailed operations, refer to ETERNUS Web GUI User's Guide or - ETERNUS CLI User's Guide for ETERNUS DX S3 series. - -#. Create an account for communication with cinder controller. - -#. Enable the SMI-S of ETERNUS DX. - -#. Register an Advanced Copy Feature license and configure copy table size. - -#. Create a storage pool for volumes. - -#. (Optional) If you want to create snapshots - on a different storage pool for volumes, - create a storage pool for snapshots. - -#. Create Snap Data Pool Volume (SDPV) to enable Snap Data Pool (SDP) for - ``create a snapshot``. - -#. Configure storage ports used for OpenStack. - - - Set those storage ports to CA mode. - - Enable the host-affinity settings of those storage ports. - - (ETERNUS CLI command for enabling host-affinity settings): - - .. code-block:: console - - CLI> set fc-parameters -host-affinity enable -port - CLI> set iscsi-parameters -host-affinity enable -port - -#. Ensure LAN connection between cinder controller and MNT port of ETERNUS DX - and SAN connection between Compute nodes and CA ports of ETERNUS DX. - -Configuration -~~~~~~~~~~~~~ - -#. Add the following entries to ``/etc/cinder/cinder.conf``: - - FC entries: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver - cinder_eternus_config_file = /etc/cinder/eternus_dx.xml - - iSCSI entries: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver - cinder_eternus_config_file = /etc/cinder/eternus_dx.xml - - If there is no description about ``cinder_eternus_config_file``, - then the parameter is set to default value - ``/etc/cinder/cinder_fujitsu_eternus_dx.xml``. - -#. Create a driver configuration file. - - Create a driver configuration file in the file path specified - as ``cinder_eternus_config_file`` in ``cinder.conf``, - and add parameters to the file as below: - - FC configuration: - - .. code-block:: xml - - - - 0.0.0.0 - 5988 - smisuser - smispassword - raid5_0001 - raid5_0001 - - - iSCSI configuration: - - .. code-block:: xml - - - - 0.0.0.0 - 5988 - smisuser - smispassword - raid5_0001 - raid5_0001 - 1.1.1.1 - 1.1.1.2 - 1.1.1.3 - 1.1.1.4 - - - Where: - - ``EternusIP`` - IP address for the SMI-S connection of the ETRENUS DX. - - Enter the IP address of MNT port of the ETERNUS DX. - - ``EternusPort`` - Port number for the SMI-S connection port of the ETERNUS DX. - - ``EternusUser`` - User name for the SMI-S connection of the ETERNUS DX. - - ``EternusPassword`` - Password for the SMI-S connection of the ETERNUS DX. - - ``EternusPool`` - Storage pool name for volumes. - - Enter RAID Group name or TPP name in the ETERNUS DX. - - ``EternusSnapPool`` - Storage pool name for snapshots. - - Enter RAID Group name in the ETERNUS DX. - - ``EternusISCSIIP`` (Multiple setting allowed) - iSCSI connection IP address of the ETERNUS DX. - - .. note:: - - * For ``EternusSnapPool``, you can specify only RAID Group name - and cannot specify TPP name. - * You can specify the same RAID Group name for ``EternusPool`` and ``EternusSnapPool`` - if you create volumes and snapshots on a same storage pool. - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -#. Edit ``cinder.conf``: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = DXFC, DXISCSI - - [DXFC] - volume_driver = cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver - cinder_eternus_config_file = /etc/cinder/fc.xml - volume_backend_name = FC - - [DXISCSI] - volume_driver = cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver - cinder_eternus_config_file = /etc/cinder/iscsi.xml - volume_backend_name = ISCSI - -#. Create the driver configuration files ``fc.xml`` and ``iscsi.xml``. - -#. Create a volume type and set extra specs to the type: - - .. code-block:: console - - $ openstack volume type create DX_FC - $ openstack volume type set --property volume_backend_name=FC DX_FX - $ openstack volume type create DX_ISCSI - $ openstack volume type set --property volume_backend_name=ISCSI DX_ISCSI - - By issuing these commands, - the volume type ``DX_FC`` is associated with the ``FC``, - and the type ``DX_ISCSI`` is associated with the ``ISCSI``. diff --git a/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst b/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst deleted file mode 100644 index fca6ff30a..000000000 --- a/doc/source/configuration/block-storage/drivers/hds-hnas-driver.rst +++ /dev/null @@ -1,548 +0,0 @@ -========================================== -Hitachi NAS Platform NFS driver -========================================== - -This OpenStack Block Storage volume drivers provides NFS support -for `Hitachi NAS Platform (HNAS) `_ Models 3080, 3090, 4040, 4060, 4080, and 4100 -with NAS OS 12.2 or higher. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The NFS driver support these operations: - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy an image to a volume. -* Copy a volume to an image. -* Clone a volume. -* Extend a volume. -* Get volume statistics. -* Manage and unmanage a volume. -* Manage and unmanage snapshots (`HNAS NFS only`). -* List manageable volumes and snapshots (`HNAS NFS only`). - -HNAS storage requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before using NFS services, use the HNAS configuration and management -GUI (SMU) or SSC CLI to configure HNAS to work with the drivers. Additionally: - -1. General: - -* It is mandatory to have at least ``1 storage pool, 1 EVS and 1 file - system`` to be able to run any of the HNAS drivers. -* HNAS drivers consider the space allocated to the file systems to - provide the reports to cinder. So, when creating a file system, make sure - it has enough space to fit your needs. -* The file system used should not be created as a ``replication target`` and - should be mounted. -* It is possible to configure HNAS drivers to use distinct EVSs and file - systems, but ``all compute nodes and controllers`` in the cloud must have - access to the EVSs. - -2. For NFS: - -* Create NFS exports, choose a path for them (it must be different from - ``/``) and set the :guilabel: `Show snapshots` option to ``hide and - disable access``. -* For each export used, set the option ``norootsquash`` in the share - ``Access configuration`` so Block Storage services can change the - permissions of its volumes. For example, ``"* (rw, norootsquash)"``. -* Make sure that all computes and controllers have R/W access to the - shares used by cinder HNAS driver. -* In order to use the hardware accelerated features of HNAS NFS, we - recommend setting ``max-nfs-version`` to 3. Refer to Hitachi NAS Platform - command line reference to see how to configure this option. - -Block Storage host requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The HNAS drivers are supported for Red Hat Enterprise Linux OpenStack -Platform, SUSE OpenStack Cloud, and Ubuntu OpenStack. -The following packages must be installed in all compute, controller and -storage (if any) nodes: - -* ``nfs-utils`` for Red Hat Enterprise Linux OpenStack Platform -* ``nfs-client`` for SUSE OpenStack Cloud -* ``nfs-common``, ``libc6-i386`` for Ubuntu OpenStack - -Package installation --------------------- - -If you are installing the driver from an RPM or DEB package, -follow the steps below: - -#. Install the dependencies: - - In Red Hat: - - .. code-block:: console - - # yum install nfs-utils nfs-utils-lib - - Or in Ubuntu: - - .. code-block:: console - - # apt-get install nfs-common - - Or in SUSE: - - .. code-block:: console - - # zypper install nfs-client - - If you are using Ubuntu 12.04, you also need to install ``libc6-i386`` - - .. code-block:: console - - # apt-get install libc6-i386 - -#. Configure the driver as described in the :ref:`hnas-driver-configuration` - section. - -#. Restart all Block Storage services (volume, scheduler, and backup). - -.. _hnas-driver-configuration: - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -HNAS supports a variety of storage options and file system capabilities, -which are selected through the definition of volume types combined with the -use of multiple back ends and multiple services. Each back end can configure -up to ``4 service pools``, which can be mapped to cinder volume types. - -The configuration for the driver is read from the back-end sections of the -``cinder.conf``. Each back-end section must have the appropriate configurations -to communicate with your HNAS back end, such as the IP address of the HNAS EVS -that is hosting your data, HNAS SSH access credentials, the configuration of -each of the services in that back end, and so on. You can find examples of such -configurations in the :ref:`configuration_example` section. - -.. note:: - HNAS cinder drivers still support the XML configuration the - same way it was in the older versions, but we recommend configuring the - HNAS cinder drivers only through the ``cinder.conf`` file, - since the XML configuration file from previous versions is being - deprecated as of Newton Release. - -.. note:: - We do not recommend the use of the same NFS export for different back ends. - If possible, configure each back end to - use a different NFS export/file system. - -The following is the definition of each configuration option that can be used -in a HNAS back-end section in the ``cinder.conf`` file: - -.. list-table:: **Configuration options in cinder.conf** - :header-rows: 1 - :widths: 25, 10, 15, 50 - - * - Option - - Type - - Default - - Description - * - ``volume_backend_name`` - - Optional - - N/A - - A name that identifies the back end and can be used as an extra-spec to - redirect the volumes to the referenced back end. - * - ``volume_driver`` - - Required - - N/A - - The python module path to the HNAS volume driver python class. When - installing through the rpm or deb packages, you should configure this - to `cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`. - * - ``nfs_shares_config`` - - Required (only for NFS) - - /etc/cinder/nfs_shares - - Path to the ``nfs_shares`` file. This is required by the base cinder - generic NFS driver and therefore also required by the HNAS NFS driver. - This file should list, one per line, every NFS share being used by the - back end. For example, all the values found in the configuration keys - hnas_svcX_hdp in the HNAS NFS back-end sections. - * - ``hnas_mgmt_ip0`` - - Required - - N/A - - HNAS management IP address. Should be the IP address of the `Admin` - EVS. It is also the IP through which you access the web SMU - administration frontend of HNAS. - * - ``hnas_username`` - - Required - - N/A - - HNAS SSH username - * - ``hds_hnas_nfs_config_file`` - - Optional (deprecated) - - /opt/hds/hnas/cinder_nfs_conf.xml - - Path to the deprecated XML configuration file (only required if using - the XML file) - * - ``hnas_cluster_admin_ip0`` - - Optional (required only for HNAS multi-farm setups) - - N/A - - The IP of the HNAS farm admin. If your SMU controls more than one - system or cluster, this option must be set with the IP of the desired - node. This is different for HNAS multi-cluster setups, which - does not require this option to be set. - * - ``hnas_ssh_private_key`` - - Optional - - N/A - - Path to the SSH private key used to authenticate to the HNAS SMU. Only - required if you do not want to set `hnas_password`. - * - ``hnas_ssh_port`` - - Optional - - 22 - - Port on which HNAS is listening for SSH connections - * - ``hnas_password`` - - Required (unless hnas_ssh_private_key is provided) - - N/A - - HNAS password - * - ``hnas_svcX_hdp`` [1]_ - - Required (at least 1) - - N/A - - HDP (export) where the volumes will be created. Use - exports paths to configure this. - * - ``hnas_svcX_pool_name`` - - Required - - N/A - - A `unique string` that is used to refer to this pool within the - context of cinder. You can tell cinder to put volumes of a specific - volume type into this back end, within this pool. See, - ``Service Labels`` and :ref:`configuration_example` sections - for more details. - -.. [1] - Replace X with a number from 0 to 3 (keep the sequence when configuring - the driver) - -Service labels -~~~~~~~~~~~~~~ - -HNAS driver supports differentiated types of service using the service labels. -It is possible to create up to 4 types of them for each back end. (For example -gold, platinum, silver, ssd, and so on). - -After creating the services in the ``cinder.conf`` configuration file, you -need to configure one cinder ``volume_type`` per service. Each ``volume_type`` -must have the metadata service_label with the same name configured in the -``hnas_svcX_pool_name option`` of that service. See the -:ref:`configuration_example` section for more details. If the ``volume_type`` -is not set, the cinder service pool with largest available free space or -other criteria configured in scheduler filters. - -.. code-block:: console - - $ openstack volume type create default - $ openstack volume type set --property service_label=default default - $ openstack volume type create platinum-tier - $ openstack volume type set --property service_label=platinum platinum - -Multi-backend configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can deploy multiple OpenStack HNAS Driver instances (back ends) that each -controls a separate HNAS or a single HNAS. If you use multiple cinder -back ends, remember that each cinder back end can host up to 4 services. Each -back-end section must have the appropriate configurations to communicate with -your HNAS back end, such as the IP address of the HNAS EVS that is hosting -your data, HNAS SSH access credentials, the configuration of each of the -services in that back end, and so on. You can find examples of such -configurations in the :ref:`configuration_example` section. - -If you want the volumes from a volume_type to be casted into a specific -back end, you must configure an extra_spec in the ``volume_type`` with the -value of the ``volume_backend_name`` option from that back end. - -For multiple NFS back ends configuration, each back end should have a -separated ``nfs_shares_config`` and also a separated ``nfs_shares file`` -defined (For example, ``nfs_shares1``, ``nfs_shares2``) with the desired -shares listed in separated lines. - -SSH configuration -~~~~~~~~~~~~~~~~~ - -.. note:: - As of the Newton OpenStack release, the user can no longer run the - driver using a locally installed instance of the :command:`SSC` utility - package. Instead, all communications with the HNAS back end are handled - through :command:`SSH`. - -You can use your username and password to authenticate the Block Storage node -to the HNAS back end. In order to do that, simply configure ``hnas_username`` -and ``hnas_password`` in your back end section within the ``cinder.conf`` -file. - -For example: - -.. code-block:: ini - - [hnas-backend] - # ... - hnas_username = supervisor - hnas_password = supervisor - -Alternatively, the HNAS cinder driver also supports SSH authentication -through public key. To configure that: - -#. If you do not have a pair of public keys already generated, create it in - the Block Storage node (leave the pass-phrase empty): - - .. code-block:: console - - $ mkdir -p /opt/hitachi/ssh - $ ssh-keygen -f /opt/hds/ssh/hnaskey - -#. Change the owner of the key to cinder (or the user the volume service will - be run as): - - .. code-block:: console - - # chown -R cinder.cinder /opt/hitachi/ssh - -#. Create the directory ``ssh_keys`` in the SMU server: - - .. code-block:: console - - $ ssh [manager|supervisor]@ 'mkdir -p /var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/' - -#. Copy the public key to the ``ssh_keys`` directory: - - .. code-block:: console - - $ scp /opt/hitachi/ssh/hnaskey.pub [manager|supervisor]@:/var/opt/mercury-main/home/[manager|supervisor]/ssh_keys/ - -#. Access the SMU server: - - .. code-block:: console - - $ ssh [manager|supervisor]@ - -#. Run the command to register the SSH keys: - - .. code-block:: console - - $ ssh-register-public-key -u [manager|supervisor] -f ssh_keys/hnaskey.pub - -#. Check the communication with HNAS in the Block Storage node: - - For multi-farm HNAS: - - .. code-block:: console - - $ ssh -i /opt/hitachi/ssh/hnaskey [manager|supervisor]@ 'ssc df -a' - - Or, for Single-node/Multi-Cluster: - - .. code-block:: console - - $ ssh -i /opt/hitachi/ssh/hnaskey [manager|supervisor]@ 'ssc localhost df -a' - -#. Configure your backend section in ``cinder.conf`` to use your public key: - - .. code-block:: ini - - [hnas-backend] - # ... - hnas_ssh_private_key = /opt/hitachi/ssh/hnaskey - -Managing volumes -~~~~~~~~~~~~~~~~ - -If there are some existing volumes on HNAS that you want to import to cinder, -it is possible to use the manage volume feature to do this. The manage action -on an existing volume is very similar to a volume creation. It creates a -volume entry on cinder database, but instead of creating a new volume in the -back end, it only adds a link to an existing volume. - -.. note:: - It is an admin only feature and you have to be logged as an user - with admin rights to be able to use this. - -#. Under the :menuselection:`System > Volumes` tab, - choose the option :guilabel:`Manage Volume`. - -#. Fill the fields :guilabel:`Identifier`, :guilabel:`Host`, - :guilabel:`Volume Name`, and :guilabel:`Volume Type` with volume - information to be managed: - - * :guilabel:`Identifier`: ip:/type/volume_name (*For example:* - 172.24.44.34:/silver/volume-test) - * :guilabel:`Host`: `host@backend-name#pool_name` (*For example:* - `ubuntu@hnas-nfs#test_silver`) - * :guilabel:`Volume Name`: volume_name (*For example:* volume-test) - * :guilabel:`Volume Type`: choose a type of volume (*For example:* silver) - -By CLI: - -.. code-block:: console - - $ cinder manage [--id-type ][--name ][--description ] - [--volume-type ][--availability-zone ] - [--metadata [ [ ...]]][--bootable] - -Example: - -.. code-block:: console - - $ cinder manage --name volume-test --volume-type silver - ubuntu@hnas-nfs#test_silver 172.24.44.34:/silver/volume-test - -Managing snapshots -~~~~~~~~~~~~~~~~~~ - -The manage snapshots feature works very similarly to the manage volumes -feature, currently supported on HNAS cinder drivers. So, if you have a volume -already managed by cinder which has snapshots that are not managed by cinder, -it is possible to use manage snapshots to import these snapshots and link them -with their original volume. - -.. note:: - For HNAS NFS cinder driver, the snapshots of volumes are clones of volumes - that were created using :command:`file-clone-create`, not the HNAS - :command:`snapshot-\*` feature. Check the HNAS users - documentation to have details about those 2 features. - -Currently, the manage snapshots function does not support importing snapshots -(generally created by storage's :command:`file-clone` operation) -``without parent volumes`` or when the parent volume is ``in-use``. In this -case, the ``manage volumes`` should be used to import the snapshot as a normal -cinder volume. - -Also, it is an admin only feature and you have to be logged as a user with -admin rights to be able to use this. - -.. note:: - Although there is a verification to prevent importing snapshots using - non-related volumes as parents, it is possible to manage a snapshot using - any related cloned volume. So, when managing a snapshot, it is extremely - important to make sure that you are using the correct parent volume. - -.. code-block:: console - - $ cinder snapshot-manage - -* :guilabel:`Identifier`: evs_ip:/export_name/snapshot_name - (*For example:* 172.24.44.34:/export1/snapshot-test) - -* :guilabel:`Volume`: Parent volume ID (*For example:* - 061028c0-60cf-499f-99e2-2cd6afea081f) - -Example: - -.. code-block:: console - - $ cinder snapshot-manage 061028c0-60cf-499f-99e2-2cd6afea081f 172.24.44.34:/export1/snapshot-test - -.. note:: - This feature is currently available only for HNAS NFS Driver. - -.. _configuration_example: - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -Below are configuration examples for NFS backend: - -#. HNAS NFS Driver - - #. For HNAS NFS driver, create this section in your ``cinder.conf`` file: - - .. code-block:: ini - - [hnas-nfs] - volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver - nfs_shares_config = /home/cinder/nfs_shares - volume_backend_name = hnas_nfs_backend - hnas_username = supervisor - hnas_password = supervisor - hnas_mgmt_ip0 = 172.24.44.15 - - hnas_svc0_pool_name = nfs_gold - hnas_svc0_hdp = 172.24.49.21:/gold_export - - hnas_svc1_pool_name = nfs_platinum - hnas_svc1_hdp = 172.24.49.21:/silver_platinum - - hnas_svc2_pool_name = nfs_silver - hnas_svc2_hdp = 172.24.49.22:/silver_export - - hnas_svc3_pool_name = nfs_bronze - hnas_svc3_hdp = 172.24.49.23:/bronze_export - - #. Add it to the ``enabled_backends`` list, under the ``DEFAULT`` section - of your ``cinder.conf`` file: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = hnas-nfs - - #. Add the configured exports to the ``nfs_shares`` file: - - .. code-block:: vim - - 172.24.49.21:/gold_export - 172.24.49.21:/silver_platinum - 172.24.49.22:/silver_export - 172.24.49.23:/bronze_export - - #. Register a volume type with cinder and associate it with - this backend: - - .. code-block:: console - - $ openstack volume type create hnas_nfs_gold - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_gold hnas_nfs_gold - $ openstack volume type create hnas_nfs_platinum - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_platinum hnas_nfs_platinum - $ openstack volume type create hnas_nfs_silver - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_silver hnas_nfs_silver - $ openstack volume type create hnas_nfs_bronze - $ openstack volume type set --property volume_backend_name=hnas_nfs_backend \ - service_label=nfs_bronze hnas_nfs_bronze - -Additional notes and limitations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* The ``get_volume_stats()`` function always provides the available - capacity based on the combined sum of all the HDPs that are used in - these services labels. - -* After changing the configuration on the storage node, the Block Storage - driver must be restarted. - -* On Red Hat, if the system is configured to use SELinux, you need to - set ``virt_use_nfs = on`` for NFS driver work properly. - - .. code-block:: console - - # setsebool -P virt_use_nfs on - -* It is not possible to manage a volume if there is a slash (``/``) or - a colon (``:``) in the volume name. - -* File system ``auto-expansion``: Although supported, we do not recommend using - file systems with auto-expansion setting enabled because the scheduler uses - the file system capacity reported by the driver to determine if new volumes - can be created. For instance, in a setup with a file system that can expand - to 200GB but is at 100GB capacity, with 10GB free, the scheduler will not - allow a 15GB volume to be created. In this case, manual expansion would - have to be triggered by an administrator. We recommend always creating the - file system at the ``maximum capacity`` or periodically expanding the file - system manually. - -* The ``hnas_svcX_pool_name`` option must be unique for a given back end. It - is still possible to use the deprecated form ``hnas_svcX_volume_type``, but - this support will be removed in a future release. - -* SSC simultaneous connections limit: In very busy environments, if 2 or - more volume hosts are configured to use the same storage, some requests - (create, delete and so on) can have some attempts failed and re-tried ( - ``5 attempts`` by default) due to an HNAS connection limitation ( - ``max of 5`` simultaneous connections). diff --git a/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst deleted file mode 100644 index 5116ec15f..000000000 --- a/doc/source/configuration/block-storage/drivers/hitachi-storage-volume-driver.rst +++ /dev/null @@ -1,169 +0,0 @@ -============================= -Hitachi storage volume driver -============================= - -Hitachi storage volume driver provides iSCSI and Fibre Channel -support for Hitachi storages. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -Supported storages: - -* Hitachi Virtual Storage Platform G1000 (VSP G1000) -* Hitachi Virtual Storage Platform (VSP) -* Hitachi Unified Storage VM (HUS VM) -* Hitachi Unified Storage 100 Family (HUS 100 Family) - -Required software: - -* RAID Manager Ver 01-32-03/01 or later for VSP G1000/VSP/HUS VM -* Hitachi Storage Navigator Modular 2 (HSNM2) Ver 27.50 or later - for HUS 100 Family - - .. note:: - - HSNM2 needs to be installed under ``/usr/stonavm``. - -Required licenses: - -* Hitachi In-System Replication Software for VSP G1000/VSP/HUS VM -* (Mandatory) ShadowImage in-system replication for HUS 100 Family -* (Optional) Copy-on-Write Snapshot for HUS 100 Family - -Additionally, the ``pexpect`` package is required. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Manage and unmanage volume snapshots. -* Create a volume from a snapshot. -* Copy a volume to an image. -* Copy an image to a volume. -* Clone a volume. -* Extend a volume. -* Get volume statistics. - -Configuration -~~~~~~~~~~~~~ - -Set up Hitachi storage ----------------------- - -You need to specify settings as described below. For details about each step, -see the user's guide of the storage device. Use a storage administrative -software such as ``Storage Navigator`` to set up the storage device so that -LDEVs and host groups can be created and deleted, and LDEVs can be connected -to the server and can be asynchronously copied. - -#. Create a Dynamic Provisioning pool. - -#. Connect the ports at the storage to the controller node and compute nodes. - -#. For VSP G1000/VSP/HUS VM, set ``port security`` to ``enable`` for the - ports at the storage. - -#. For HUS 100 Family, set ``Host Group security`` or - ``iSCSI target security`` to ``ON`` for the ports at the storage. - -#. For the ports at the storage, create host groups (iSCSI targets) whose - names begin with HBSD- for the controller node and each compute node. - Then register a WWN (initiator IQN) for each of the controller node and - compute nodes. - -#. For VSP G1000/VSP/HUS VM, perform the following: - - * Create a storage device account belonging to the Administrator User - Group. (To use multiple storage devices, create the same account name - for all the target storage devices, and specify the same resource - group and permissions.) - * Create a command device (In-Band), and set user authentication to ``ON``. - * Register the created command device to the host group for the controller - node. - * To use the Thin Image function, create a pool for Thin Image. - -#. For HUS 100 Family, perform the following: - - * Use the :command:`auunitaddauto` command to register the - unit name and controller of the storage device to HSNM2. - * When connecting via iSCSI, if you are using CHAP certification, specify - the same user and password as that used for the storage port. - -Set up Hitachi Gigabit Fibre Channel adaptor --------------------------------------------- - -Change a parameter of the hfcldd driver and update the ``initram`` file -if Hitachi Gigabit Fibre Channel adaptor is used: - -.. code-block:: console - - # /opt/hitachi/drivers/hba/hfcmgr -E hfc_rport_lu_scan 1 - # dracut -f initramfs-KERNEL_VERSION.img KERNEL_VERSION - # reboot - -Set up Hitachi storage volume driver ------------------------------------- - -#. Create a directory: - - .. code-block:: console - - # mkdir /var/lock/hbsd - # chown cinder:cinder /var/lock/hbsd - -#. Create ``volume type`` and ``volume key``. - - This example shows that HUS100_SAMPLE is created as ``volume type`` - and hus100_backend is registered as ``volume key``: - - .. code-block:: console - - $ openstack volume type create HUS100_SAMPLE - $ openstack volume type set --property volume_backend_name=hus100_backend HUS100_SAMPLE - -#. Specify any identical ``volume type`` name and ``volume key``. - - To confirm the created ``volume type``, please execute the following - command: - - .. code-block:: console - - $ openstack volume type list --long - -#. Edit the ``/etc/cinder/cinder.conf`` file as follows. - - If you use Fibre Channel: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver - - If you use iSCSI: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver - - Also, set ``volume_backend_name`` created by :command:`openstack volume type set` - command: - - .. code-block:: ini - - volume_backend_name = hus100_backend - - This table shows configuration options for Hitachi storage volume driver. - - .. include:: ../../tables/cinder-hitachi-hbsd.rst - -#. Restart the Block Storage service. - - When the startup is done, "MSGID0003-I: The storage backend can be used." - is output into ``/var/log/cinder/volume.log`` as follows: - - .. code-block:: console - - 2014-09-01 10:34:14.169 28734 WARNING cinder.volume.drivers.hitachi. - hbsd_common [req-a0bb70b5-7c3f-422a-a29e-6a55d6508135 None None] - MSGID0003-I: The storage backend can be used. (config_group: hus100_backend) diff --git a/doc/source/configuration/block-storage/drivers/hp-msa-driver.rst b/doc/source/configuration/block-storage/drivers/hp-msa-driver.rst deleted file mode 100644 index bb348d2f9..000000000 --- a/doc/source/configuration/block-storage/drivers/hp-msa-driver.rst +++ /dev/null @@ -1,165 +0,0 @@ -====================================== -HP MSA Fibre Channel and iSCSI drivers -====================================== - -The ``HPMSAFCDriver`` and ``HPMSAISCSIDriver`` Cinder drivers allow HP MSA -2040 or 1040 arrays to be used for Block Storage in OpenStack deployments. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the HP MSA drivers, the following are required: - -- HP MSA 2040 or 1040 array with: - - - iSCSI or FC host interfaces - - G22x firmware or later - -- Network connectivity between the OpenStack host and the array management - interfaces - -- HTTPS or HTTP must be enabled on the array - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Migrate a volume with back-end assistance. -- Retype a volume. -- Manage and unmanage a volume. - -Configuring the array -~~~~~~~~~~~~~~~~~~~~~ - -#. Verify that the array can be managed via an HTTPS connection. HTTP can also - be used if ``hpmsa_api_protocol=http`` is placed into the appropriate - sections of the ``cinder.conf`` file. - - Confirm that virtual pools A and B are present if you plan to use virtual - pools for OpenStack storage. - - If you plan to use vdisks instead of virtual pools, create or identify one - or more vdisks to be used for OpenStack storage; typically this will mean - creating or setting aside one disk group for each of the A and B - controllers. - -#. Edit the ``cinder.conf`` file to define a storage back end entry for each - storage pool on the array that will be managed by OpenStack. Each entry - consists of a unique section name, surrounded by square brackets, followed - by options specified in a ``key=value`` format. - - * The ``hpmsa_backend_name`` value specifies the name of the storage pool - or vdisk on the array. - - * The ``volume_backend_name`` option value can be a unique value, if you - wish to be able to assign volumes to a specific storage pool on the - array, or a name that is shared among multiple storage pools to let the - volume scheduler choose where new volumes are allocated. - - * The rest of the options will be repeated for each storage pool in a given - array: the appropriate Cinder driver name; IP address or host name of the - array management interface; the username and password of an array user - account with ``manage`` privileges; and the iSCSI IP addresses for the - array if using the iSCSI transport protocol. - - In the examples below, two back ends are defined, one for pool A and one for - pool B, and a common ``volume_backend_name`` is used so that a single - volume type definition can be used to allocate volumes from both pools. - - **iSCSI example back-end entries** - - .. code-block:: ini - - [pool-a] - hpmsa_backend_name = A - volume_backend_name = hpmsa-array - volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 - - [pool-b] - hpmsa_backend_name = B - volume_backend_name = hpmsa-array - volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 - - **Fibre Channel example back-end entries** - - .. code-block:: ini - - [pool-a] - hpmsa_backend_name = A - volume_backend_name = hpmsa-array - volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - - [pool-b] - hpmsa_backend_name = B - volume_backend_name = hpmsa-array - volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - -#. If any ``volume_backend_name`` value refers to a vdisk rather than a - virtual pool, add an additional statement ``hpmsa_backend_type = linear`` - to that back end entry. - -#. If HTTPS is not enabled in the array, include ``hpmsa_api_protocol = http`` - in each of the back-end definitions. - -#. If HTTPS is enabled, you can enable certificate verification with the option - ``hpmsa_verify_certificate=True``. You may also use the - ``hpmsa_verify_certificate_path`` parameter to specify the path to a - CA\_BUNDLE file containing CAs other than those in the default list. - -#. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an - ``enabled_back-ends`` parameter specifying the backend entries you added, - and a ``default_volume_type`` parameter specifying the name of a volume type - that you will create in the next step. - - **Example of [DEFAULT] section changes** - - .. code-block:: ini - - [DEFAULT] - enabled_backends = pool-a,pool-b - default_volume_type = hpmsa - - -#. Create a new volume type for each distinct ``volume_backend_name`` value - that you added in the ``cinder.conf`` file. The example below assumes that - the same ``volume_backend_name=hpmsa-array`` option was specified in all - of the entries, and specifies that the volume type ``hpmsa`` can be used to - allocate volumes from any of them. - - **Example of creating a volume type** - - .. code-block:: console - - $ openstack volume type create hpmsa - $ openstack volume type set --property volume_backend_name=hpmsa-array hpmsa - -#. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` - service. - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific to -the HP MSA drivers. - -.. include:: ../../tables/cinder-hpmsa.rst diff --git a/doc/source/configuration/block-storage/drivers/hpe-3par-driver.rst b/doc/source/configuration/block-storage/drivers/hpe-3par-driver.rst deleted file mode 100644 index 5fb875f37..000000000 --- a/doc/source/configuration/block-storage/drivers/hpe-3par-driver.rst +++ /dev/null @@ -1,384 +0,0 @@ -======================================== -HPE 3PAR Fibre Channel and iSCSI drivers -======================================== - -The ``HPE3PARFCDriver`` and ``HPE3PARISCSIDriver`` drivers, which are based on -the Block Storage service (Cinder) plug-in architecture, run volume operations -by communicating with the HPE 3PAR storage system over HTTP, HTTPS, and SSH -connections. The HTTP and HTTPS communications use ``python-3parclient``, -which is part of the Python standard library. - -For information about how to manage HPE 3PAR storage systems, see the HPE 3PAR -user documentation. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the HPE 3PAR drivers, install the following software and components on -the HPE 3PAR storage system: - -* HPE 3PAR Operating System software version 3.1.3 MU1 or higher. - - * Deduplication provisioning requires SSD disks and HPE 3PAR Operating - System software version 3.2.1 MU1 or higher. - - * Enabling Flash Cache Policy requires the following: - - * Array must contain SSD disks. - - * HPE 3PAR Operating System software version 3.2.1 MU2 or higher. - - * python-3parclient version 4.2.0 or newer. - - * Array must have the Adaptive Flash Cache license installed. - - * Flash Cache must be enabled on the array with the CLI command - :command:`createflashcache SIZE`, where size must be in 16 GB increments. - For example, :command:`createflashcache 128g` will create 128 GB of Flash - Cache for each node pair in the array. - - * The Dynamic Optimization license is required to support any feature that - results in a volume changing provisioning type or CPG. This may apply to - the volume :command:`migrate`, :command:`retype` and :command:`manage` - commands. - - * The Virtual Copy License is required to support any feature that involves - volume snapshots. This applies to the volume :command:`snapshot-*` - commands. - -* HPE 3PAR drivers will now check the licenses installed on the array and - disable driver capabilities based on available licenses. This will apply to - thin provisioning, QoS support and volume replication. - -* HPE 3PAR Web Services API Server must be enabled and running. - -* One Common Provisioning Group (CPG). - -* Additionally, you must install the ``python-3parclient`` version 4.2.0 or - newer from the Python standard library on the system with the enabled Block - Storage service volume drivers. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -* Migrate a volume with back-end assistance. - -* Retype a volume. - -* Manage and unmanage a volume. - -* Manage and unmanage a snapshot. - -* Replicate host volumes. - -* Fail-over host volumes. - -* Fail-back host volumes. - -* Create, delete, update, snapshot, and clone consistency groups. - -* Create and delete consistency group snapshots. - -* Create a consistency group from a consistency group snapshot or another - group. - -Volume type support for both HPE 3PAR drivers includes the ability to set the -following capabilities in the OpenStack Block Storage API -``cinder.api.contrib.types_extra_specs`` volume type extra specs extension -module: - -* ``hpe3par:snap_cpg`` - -* ``hpe3par:provisioning`` - -* ``hpe3par:persona`` - -* ``hpe3par:vvs`` - -* ``hpe3par:flash_cache`` - -To work with the default filter scheduler, the key values are case sensitive -and scoped with ``hpe3par:``. For information about how to set the key-value -pairs and associate them with a volume type, run the following command: - -.. code-block:: console - - $ openstack help volume type - -.. note:: - - Volumes that are cloned only support the extra specs keys cpg, snap_cpg, - provisioning and vvs. The others are ignored. In addition the comments - section of the cloned volume in the HPE 3PAR StoreServ storage array is - not populated. - -If volume types are not used or a particular key is not set for a volume type, -the following defaults are used: - -* ``hpe3par:cpg`` - Defaults to the ``hpe3par_cpg`` setting in the - ``cinder.conf`` file. - -* ``hpe3par:snap_cpg`` - Defaults to the ``hpe3par_snap`` setting in - the ``cinder.conf`` file. If ``hpe3par_snap`` is not set, it defaults - to the ``hpe3par_cpg`` setting. - -* ``hpe3par:provisioning`` - Defaults to ``thin`` provisioning, the valid - values are ``thin``, ``full``, and ``dedup``. - -* ``hpe3par:persona`` - Defaults to the ``2 - Generic-ALUA`` persona. The - valid values are: - - * ``1 - Generic`` - * ``2 - Generic-ALUA`` - * ``3 - Generic-legacy`` - * ``4 - HPUX-legacy`` - * ``5 - AIX-legacy`` - * ``6 - EGENERA`` - * ``7 - ONTAP-legacy`` - * ``8 - VMware`` - * ``9 - OpenVMS`` - * ``10 - HPUX`` - * ``11 - WindowsServer`` - -* ``hpe3par:flash_cache`` - Defaults to ``false``, the valid values are - ``true`` and ``false``. - -QoS support for both HPE 3PAR drivers includes the ability to set the -following capabilities in the OpenStack Block Storage API -``cinder.api.contrib.qos_specs_manage`` qos specs extension module: - -* ``minBWS`` - -* ``maxBWS`` - -* ``minIOPS`` - -* ``maxIOPS`` - -* ``latency`` - -* ``priority`` - -The qos keys above no longer require to be scoped but must be created and -associated to a volume type. For information about how to set the key-value -pairs and associate them with a volume type, run the following commands: - -.. code-block:: console - - $ openstack help volume qos - -The following keys require that the HPE 3PAR StoreServ storage array has a -Priority Optimization license installed. - -``hpe3par:vvs`` - The virtual volume set name that has been predefined by the Administrator - with quality of service (QoS) rules associated to it. If you specify - extra_specs ``hpe3par:vvs``, the qos_specs ``minIOPS``, ``maxIOPS``, - ``minBWS``, and ``maxBWS`` settings are ignored. - -``minBWS`` - The QoS I/O issue bandwidth minimum goal in MBs. If not set, the I/O issue - bandwidth rate has no minimum goal. - -``maxBWS`` - The QoS I/O issue bandwidth rate limit in MBs. If not set, the I/O issue - bandwidth rate has no limit. - -``minIOPS`` - The QoS I/O issue count minimum goal. If not set, the I/O issue count has no - minimum goal. - -``maxIOPS`` - The QoS I/O issue count rate limit. If not set, the I/O issue count rate has - no limit. - -``latency`` - The latency goal in milliseconds. - -``priority`` - The priority of the QoS rule over other rules. If not set, the priority is - ``normal``, valid values are ``low``, ``normal`` and ``high``. - -.. note:: - - Since the Icehouse release, minIOPS and maxIOPS must be used together to - set I/O limits. Similarly, minBWS and maxBWS must be used together. If only - one is set the other will be set to the same value. - -The following key requires that the HPE 3PAR StoreServ storage array has an -Adaptive Flash Cache license installed. - -* ``hpe3par:flash_cache`` - The flash-cache policy, which can be turned on and - off by setting the value to ``true`` or ``false``. - -LDAP and AD authentication is now supported in the HPE 3PAR driver. - -The 3PAR back end must be properly configured for LDAP and AD authentication -prior to configuring the volume driver. For details on setting up LDAP with -3PAR, see the 3PAR user guide. - -Once configured, ``hpe3par_username`` and ``hpe3par_password`` parameters in -``cinder.conf`` can be used with LDAP and AD credentials. - -Enable the HPE 3PAR Fibre Channel and iSCSI drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``HPE3PARFCDriver`` and ``HPE3PARISCSIDriver`` are installed with the -OpenStack software. - -#. Install the ``python-3parclient`` Python package on the OpenStack Block - Storage system. - - .. code-block:: console - - $ pip install 'python-3parclient>=4.0,<5.0' - - -#. Verify that the HPE 3PAR Web Services API server is enabled and running on - the HPE 3PAR storage system. - - a. Log onto the HP 3PAR storage system with administrator access. - - .. code-block:: console - - $ ssh 3paradm@ - - b. View the current state of the Web Services API Server. - - .. code-block:: console - - $ showwsapi - -Service- -State- -HTTP_State- HTTP_Port -HTTPS_State- HTTPS_Port -Version- - Enabled Active Enabled 8008 Enabled 8080 1.1 - - c. If the Web Services API Server is disabled, start it. - - .. code-block:: console - - $ startwsapi - -#. If the HTTP or HTTPS state is disabled, enable one of them. - - .. code-block:: console - - $ setwsapi -http enable - - or - - .. code-block:: console - - $ setwsapi -https enable - - .. note:: - - To stop the Web Services API Server, use the :command:`stopwsapi` command. For - other options run the :command:`setwsapi –h` command. - -#. If you are not using an existing CPG, create a CPG on the HPE 3PAR storage - system to be used as the default location for creating volumes. - -#. Make the following changes in the ``/etc/cinder/cinder.conf`` file. - - .. code-block:: ini - - # 3PAR WS API Server URL - hpe3par_api_url=https://10.10.0.141:8080/api/v1 - - # 3PAR username with the 'edit' role - hpe3par_username=edit3par - - # 3PAR password for the user specified in hpe3par_username - hpe3par_password=3parpass - - # 3PAR CPG to use for volume creation - hpe3par_cpg=OpenStackCPG_RAID5_NL - - # IP address of SAN controller for SSH access to the array - san_ip=10.10.22.241 - - # Username for SAN controller for SSH access to the array - san_login=3paradm - - # Password for SAN controller for SSH access to the array - san_password=3parpass - - # FIBRE CHANNEL(uncomment the next line to enable the FC driver) - # volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver - - # iSCSI (uncomment the next line to enable the iSCSI driver and - # hpe3par_iscsi_ips or iscsi_ip_address) - #volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver - - # iSCSI multiple port configuration - # hpe3par_iscsi_ips=10.10.220.253:3261,10.10.222.234 - - # Still available for single port iSCSI configuration - #iscsi_ip_address=10.10.220.253 - - - # Enable HTTP debugging to 3PAR - hpe3par_debug=False - - # Enable CHAP authentication for iSCSI connections. - hpe3par_iscsi_chap_enabled=false - - # The CPG to use for Snapshots for volumes. If empty hpe3par_cpg will be - # used. - hpe3par_snap_cpg=OpenStackSNAP_CPG - - # Time in hours to retain a snapshot. You can't delete it before this - # expires. - hpe3par_snapshot_retention=48 - - # Time in hours when a snapshot expires and is deleted. This must be - # larger than retention. - hpe3par_snapshot_expiration=72 - - # The ratio of oversubscription when thin provisioned volumes are - # involved. Default ratio is 20.0, this means that a provisioned - # capacity can be 20 times of the total physical capacity. - max_over_subscription_ratio=20.0 - - # This flag represents the percentage of reserved back-end capacity. - reserved_percentage=15 - - .. note:: - - You can enable only one driver on each cinder instance unless you enable - multiple back-end support. See the Cinder multiple back-end support - instructions to enable this feature. - - .. note:: - - You can configure one or more iSCSI addresses by using the - ``hpe3par_iscsi_ips`` option. Separate multiple IP addresses with a - comma (``,``). When you configure multiple addresses, the driver selects - the iSCSI port with the fewest active volumes at attach time. The 3PAR - array does not allow the default port 3260 to be changed, so IP ports - need not be specified. - -#. Save the changes to the ``cinder.conf`` file and restart the cinder-volume - service. - -The HPE 3PAR Fibre Channel and iSCSI drivers are now enabled on your -OpenStack system. If you experience problems, review the Block Storage -service log files for errors. - -The following table contains all the configuration options supported by -the HPE 3PAR Fibre Channel and iSCSI drivers. - -.. include:: ../../tables/cinder-hpe3par.rst diff --git a/doc/source/configuration/block-storage/drivers/hpe-lefthand-driver.rst b/doc/source/configuration/block-storage/drivers/hpe-lefthand-driver.rst deleted file mode 100644 index 0026246b1..000000000 --- a/doc/source/configuration/block-storage/drivers/hpe-lefthand-driver.rst +++ /dev/null @@ -1,216 +0,0 @@ -================================ -HPE LeftHand/StoreVirtual driver -================================ - -The ``HPELeftHandISCSIDriver`` is based on the Block Storage service plug-in -architecture. Volume operations are run by communicating with the HPE -LeftHand/StoreVirtual system over HTTPS, or SSH connections. HTTPS -communications use the ``python-lefthandclient``, which is part of the Python -standard library. - -The ``HPELeftHandISCSIDriver`` can be configured to run using a REST client to -communicate with the array. For performance improvements and new functionality -the ``python-lefthandclient`` must be downloaded, and HP LeftHand/StoreVirtual -Operating System software version 11.5 or higher is required on the array. To -configure the driver in standard mode, see -`HPE LeftHand/StoreVirtual REST driver`_. - -For information about how to manage HPE LeftHand/StoreVirtual storage systems, -see the HPE LeftHand/StoreVirtual user documentation. - -HPE LeftHand/StoreVirtual REST driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to configure the HPE LeftHand/StoreVirtual Block -Storage driver. - -System requirements -------------------- - -To use the HPE LeftHand/StoreVirtual driver, do the following: - -* Install LeftHand/StoreVirtual Operating System software version 11.5 or - higher on the HPE LeftHand/StoreVirtual storage system. - -* Create a cluster group. - -* Install the ``python-lefthandclient`` version 2.1.0 from the Python Package - Index on the system with the enabled Block Storage service - volume drivers. - -Supported operations --------------------- - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -* Get volume statistics. - -* Migrate a volume with back-end assistance. - -* Retype a volume. - -* Manage and unmanage a volume. - -* Manage and unmanage a snapshot. - -* Replicate host volumes. - -* Fail-over host volumes. - -* Fail-back host volumes. - -* Create, delete, update, and snapshot consistency groups. - -When you use back end assisted volume migration, both source and destination -clusters must be in the same HPE LeftHand/StoreVirtual management group. -The HPE LeftHand/StoreVirtual array will use native LeftHand APIs to migrate -the volume. The volume cannot be attached or have snapshots to migrate. - -Volume type support for the driver includes the ability to set the -following capabilities in the Block Storage API -``cinder.api.contrib.types_extra_specs`` volume type extra specs -extension module. - -* ``hpelh:provisioning`` - -* ``hpelh:ao`` - -* ``hpelh:data_pl`` - -To work with the default filter scheduler, the key-value pairs are -case-sensitive and scoped with ``hpelh:``. For information about how to set -the key-value pairs and associate them with a volume type, run the following -command: - -.. code-block:: console - - $ openstack help volume type - -* The following keys require the HPE LeftHand/StoreVirtual storage - array be configured for: - - ``hpelh:ao`` - The HPE LeftHand/StoreVirtual storage array must be configured for - Adaptive Optimization. - - ``hpelh:data_pl`` - The HPE LeftHand/StoreVirtual storage array must be able to support the - Data Protection level specified by the extra spec. - -* If volume types are not used or a particular key is not set for a volume - type, the following defaults are used: - - ``hpelh:provisioning`` - Defaults to ``thin`` provisioning, the valid values are, ``thin`` and - ``full`` - - ``hpelh:ao`` - Defaults to ``true``, the valid values are, ``true`` and ``false``. - - ``hpelh:data_pl`` - Defaults to ``r-0``, Network RAID-0 (None), the valid values are, - - * ``r-0``, Network RAID-0 (None) - - * ``r-5``, Network RAID-5 (Single Parity) - - * ``r-10-2``, Network RAID-10 (2-Way Mirror) - - * ``r-10-3``, Network RAID-10 (3-Way Mirror) - - * ``r-10-4``, Network RAID-10 (4-Way Mirror) - - * ``r-6``, Network RAID-6 (Dual Parity) - -Enable the HPE LeftHand/StoreVirtual iSCSI driver -------------------------------------------------- - -The ``HPELeftHandISCSIDriver`` is installed with the OpenStack software. - -#. Install the ``python-lefthandclient`` Python package on the OpenStack Block - Storage system. - - .. code-block:: console - - $ pip install 'python-lefthandclient>=2.1,<3.0' - -#. If you are not using an existing cluster, create a cluster on the HPE - LeftHand storage system to be used as the cluster for creating volumes. - -#. Make the following changes in the ``/etc/cinder/cinder.conf`` file: - - .. code-block:: ini - - # LeftHand WS API Server URL - hpelefthand_api_url=https://10.10.0.141:8081/lhos - - # LeftHand Super user username - hpelefthand_username=lhuser - - # LeftHand Super user password - hpelefthand_password=lhpass - - # LeftHand cluster to use for volume creation - hpelefthand_clustername=ClusterLefthand - - # LeftHand iSCSI driver - volume_driver=cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver - - # Should CHAPS authentication be used (default=false) - hpelefthand_iscsi_chap_enabled=false - - # Enable HTTP debugging to LeftHand (default=false) - hpelefthand_debug=false - - # The ratio of oversubscription when thin provisioned volumes are - # involved. Default ratio is 20.0, this means that a provisioned capacity - # can be 20 times of the total physical capacity. - max_over_subscription_ratio=20.0 - - # This flag represents the percentage of reserved back-end capacity. - reserved_percentage=15 - - You can enable only one driver on each cinder instance unless you enable - multiple back end support. See the Cinder multiple back end support - instructions to enable this feature. - - If the ``hpelefthand_iscsi_chap_enabled`` is set to ``true``, the driver - will associate randomly-generated CHAP secrets with all hosts on the HPE - LeftHand/StoreVirtual system. OpenStack Compute nodes use these secrets - when creating iSCSI connections. - - .. important:: - - CHAP secrets are passed from OpenStack Block Storage to Compute in clear - text. This communication should be secured to ensure that CHAP secrets - are not discovered. - - .. note:: - - CHAP secrets are added to existing hosts as well as newly-created ones. - If the CHAP option is enabled, hosts will not be able to access the - storage without the generated secrets. - -#. Save the changes to the ``cinder.conf`` file and restart the - ``cinder-volume`` service. - -The HPE LeftHand/StoreVirtual driver is now enabled on your OpenStack system. -If you experience problems, review the Block Storage service log files for -errors. - -.. note:: - Previous versions implement a HPE LeftHand/StoreVirtual CLIQ driver that - enable the Block Storage service driver configuration in legacy mode. This - is removed from Mitaka onwards. diff --git a/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst b/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst deleted file mode 100644 index 53dd380c6..000000000 --- a/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst +++ /dev/null @@ -1,516 +0,0 @@ -==================== -Huawei volume driver -==================== - -Huawei volume driver can be used to provide functions such as the logical -volume and snapshot for virtual machines (VMs) in the OpenStack Block Storage -driver that supports iSCSI and Fibre Channel protocols. - -Version mappings -~~~~~~~~~~~~~~~~ - -The following table describes the version mappings among the Block Storage -driver, Huawei storage system and OpenStack: - -.. list-table:: **Version mappings among the Block Storage driver and Huawei - storage system** - :widths: 30 35 - :header-rows: 1 - - * - Description - - Storage System Version - * - Create, delete, expand, attach, detach, manage and unmanage volumes - - Create volumes with assigned storage pools - - Create volumes with assigned disk types - - Create, delete and update a consistency group - - Copy an image to a volume - - Copy a volume to an image - - Auto Zoning - - SmartThin - - Volume Migration - - Replication V2.1 - - Create, delete, manage, unmanage and backup snapshots - - Create and delete a cgsnapshot - - OceanStor T series V2R2 C00/C20/C30 - - OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 - - OceanStor 2200V3 V300R005C00 - - OceanStor 2600V3 V300R005C00 - - OceanStor 18500/18800 V1R1C00/C20/C30 V3R3C00 - - OceanStor Dorado V300R001C00 - - OceanStor V3 V300R006C00 - - OceanStor 2200V3 V300R006C00 - - OceanStor 2600V3 V300R006C00 - * - Clone a volume - - Create volume from snapshot - - Retype - - SmartQoS - - SmartTier - - SmartCache - - Thick - - OceanStor T series V2R2 C00/C20/C30 - - OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 - - OceanStor 2200V3 V300R005C00 - - OceanStor 2600V3 V300R005C00 - - OceanStor 18500/18800V1R1C00/C20/C30 - - OceanStor V3 V300R006C00 - - OceanStor 2200V3 V300R006C00 - - OceanStor 2600V3 V300R006C00 - * - SmartPartition - - OceanStor T series V2R2 C00/C20/C30 - - OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 - - OceanStor 2600V3 V300R005C00 - - OceanStor 18500/18800V1R1C00/C20/C30 - - OceanStor V3 V300R006C00 - - OceanStor 2600V3 V300R006C00 - * - Hypermetro - - Hypermetro consistency group - - OceanStor V3 V3R3C00/C10/C20 - - OceanStor 2600V3 V3R5C00 - - OceanStor 18500/18800 V3R3C00 - - OceanStor Dorado V300R001C00 - - OceanStor V3 V300R006C00 - - OceanStor 2600V3 V300R006C00 - -Volume driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to configure the Huawei volume driver for either -iSCSI storage or Fibre Channel storage. - -**Pre-requisites** - -When creating a volume from image, install the ``multipath`` tool and add the -following configuration keys in the ``[DEFAULT]`` configuration group of -the ``/etc/cinder/cinder.conf`` file: - -.. code-block:: ini - - use_multipath_for_image_xfer = True - enforce_multipath_for_image_xfer = True - -To configure the volume driver, follow the steps below: - -#. In ``/etc/cinder``, create a Huawei-customized driver configuration file. - The file format is XML. -#. Change the name of the driver configuration file based on the site - requirements, for example, ``cinder_huawei_conf.xml``. -#. Configure parameters in the driver configuration file. - - Each product has its own value for the ``Product`` parameter under the - ``Storage`` xml block. The full xml file with the appropriate ``Product`` - parameter is as below: - - .. code-block:: xml - - - - - PRODUCT - PROTOCOL - xxxxxxxx - xxxxxxxx - https://x.x.x.x:8088/deviceManager/rest/ - - - xxx - xxx - - xxx - - - x.x.x.x - - - - - - The corresponding ``Product`` values for each product are as below: - - - * **For T series V2** - - .. code-block:: xml - - TV2 - - * **For V3** - - .. code-block:: xml - - V3 - - * **For OceanStor 18000 series** - - .. code-block:: xml - - 18000 - - * **For OceanStor Dorado series** - - .. code-block:: xml - - Dorado - - The ``Protocol`` value to be used is ``iSCSI`` for iSCSI and ``FC`` for - Fibre Channel as shown below: - - .. code-block:: xml - - # For iSCSI - iSCSI - - # For Fibre channel - FC - - .. note:: - - For details about the parameters in the configuration file, see the - `Configuration file parameters`_ section. - -#. Configure the ``cinder.conf`` file. - - In the ``[default]`` block of ``/etc/cinder/cinder.conf``, - enable the ``VOLUME_BACKEND``: - - .. code-block:: ini - - enabled_backends = VOLUME_BACKEND - - - Add a new block ``[VOLUME_BACKEND]``, and add the following contents: - - .. code-block:: ini - - [VOLUME_BACKEND] - volume_driver = VOLUME_DRIVER - cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - volume_backend_name = Huawei_Storage - - * ``volume_driver`` indicates the loaded driver. - - * ``cinder_huawei_conf_file`` indicates the specified Huawei-customized - configuration file. - - * ``volume_backend_name`` indicates the name of the backend. - - Add information about remote devices in ``/etc/cinder/cinder.conf`` - in target backend block for ``Hypermetro``. - - .. code-block:: ini - - [VOLUME_BACKEND] - volume_driver = VOLUME_DRIVER - cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - volume_backend_name = Huawei_Storage - metro_san_user = xxx - metro_san_password = xxx - metro_domain_name = xxx - metro_san_address = https://x.x.x.x:8088/deviceManager/rest/ - metro_storage_pools = xxx - - Add information about remote devices in ``/etc/cinder/cinder.conf`` - in target backend block for ``Replication``. - - .. code-block:: ini - - [VOLUME_BACKEND] - volume_driver = VOLUME_DRIVER - cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml - volume_backend_name = Huawei_Storage - replication_device = - backend_id: xxx, - storage_pool :xxx, - san_address: https://x.x.x.x:8088/deviceManager/rest/, - san_user: xxx, - san_passowrd: xxx, - iscsi_default_target_ip: x.x.x.x - - .. note:: - - By default, the value for ``Hypermetro`` and ``Replication`` is - ``None``. For details about the parameters in the configuration file, - see the `Configuration file parameters`_ section. - - The ``volume-driver`` value for every product is as below: - - .. code-block:: ini - - # For iSCSI - volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver - - # For FC - volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver - -#. Run the :command:`service cinder-volume restart` command to restart the - Block Storage service. - -Configuring iSCSI Multipathing ------------------------------- - -To configure iSCSI Multipathing, follow the steps below: - -#. Add the port group settings in the Huawei-customized driver configuration - file and configure the port group name needed by an initiator. - - .. code-block:: xml - - - x.x.x.x - - - -#. Enable the multipathing switch of the Compute service module. - - Add ``volume_use_multipath = True`` in ``[libvirt]`` of - ``/etc/nova/nova.conf``. - -#. Run the :command:`service nova-compute restart` command to restart the - ``nova-compute`` service. - -Configuring FC Multipathing ------------------------------- - -To configure FC Multipathing, follow the steps below: - -#. Enable the multipathing switch of the Compute service module. - - Add ``volume_use_multipath = True`` in ``[libvirt]`` of - ``/etc/nova/nova.conf``. - -#. Run the :command:`service nova-compute restart` command to restart the - ``nova-compute`` service. - -Configuring CHAP and ALUA -------------------------- - -On a public network, any application server whose IP address resides on the -same network segment as that of the storage systems iSCSI host port can access -the storage system and perform read and write operations in it. This poses -risks to the data security of the storage system. To ensure the storage -systems access security, you can configure ``CHAP`` authentication to control -application servers access to the storage system. - -Adjust the driver configuration file as follows: - -.. code-block:: xml - - - -``ALUA`` indicates a multipathing mode. 0 indicates that ``ALUA`` is disabled. -1 indicates that ``ALUA`` is enabled. ``CHAPinfo`` indicates the user name and -password authenticated by ``CHAP``. The format is ``mmuser; mm-user@storage``. -The user name and password are separated by semicolons (``;``). - -Configuring multiple storage ----------------------------- - -Multiple storage systems configuration example: - -.. code-block:: ini - - enabled_backends = v3_fc, 18000_fc - [v3_fc] - volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver - cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_v3_fc.xml - volume_backend_name = huawei_v3_fc - [18000_fc] - volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver - cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_18000_fc.xml - volume_backend_name = huawei_18000_fc - -Configuration file parameters ------------------------------ - -This section describes mandatory and optional configuration file parameters -of the Huawei volume driver. - -.. list-table:: **Mandatory parameters** - :widths: 10 10 50 10 - :header-rows: 1 - - * - Parameter - - Default value - - Description - - Applicable to - * - Product - - ``-`` - - Type of a storage product. Possible values are ``TV2``, ``18000`` and - ``V3``. - - All - * - Protocol - - ``-`` - - Type of a connection protocol. The possible value is either ``'iSCSI'`` - or ``'FC'``. - - All - * - RestURL - - ``-`` - - Access address of the REST interface, - ``https://x.x.x.x/devicemanager/rest/``. The value ``x.x.x.x`` indicates - the management IP address. OceanStor 18000 uses the preceding setting, - and V2 and V3 requires you to add port number ``8088``, for example, - ``https://x.x.x.x:8088/deviceManager/rest/``. If you need to configure - multiple RestURL, separate them by semicolons (;). - - All - * - UserName - - ``-`` - - User name of a storage administrator. - - All - * - UserPassword - - ``-`` - - Password of a storage administrator. - - All - * - StoragePool - - ``-`` - - Name of a storage pool to be used. If you need to configure multiple - storage pools, separate them by semicolons (``;``). - - All - -.. note:: - - The value of ``StoragePool`` cannot contain Chinese characters. - -.. list-table:: **Optional parameters** - :widths: 20 10 50 15 - :header-rows: 1 - - * - Parameter - - Default value - - Description - - Applicable to - * - LUNType - - Thick - - Type of the LUNs to be created. The value can be ``Thick`` or ``Thin``. Dorado series only support ``Thin`` LUNs. - - All - * - WriteType - - 1 - - Cache write type, possible values are: ``1`` (write back), ``2`` - (write through), and ``3`` (mandatory write back). - - All - * - LUNcopyWaitInterval - - 5 - - After LUN copy is enabled, the plug-in frequently queries the copy - progress. You can set a value to specify the query interval. - - All - * - Timeout - - 432000 - - Timeout interval for waiting LUN copy of a storage device to complete. - The unit is second. - - All - * - Initiator Name - - ``-`` - - Name of a compute node initiator. - - All - * - Initiator TargetIP - - ``-`` - - IP address of the iSCSI port provided for compute nodes. - - All - * - Initiator TargetPortGroup - - ``-`` - - IP address of the iSCSI target port that is provided for compute - nodes. - - All - * - DefaultTargetIP - - ``-`` - - Default IP address of the iSCSI target port that is provided for - compute nodes. - - All - * - OSType - - Linux - - Operating system of the Nova compute node's host. - - All - * - HostIP - - ``-`` - - IP address of the Nova compute node's host. - - All - * - metro_san_user - - ``-`` - - User name of a storage administrator of hypermetro remote device. - - V3R3/2600 V3R5/18000 V3R3 - * - metro_san_password - - ``-`` - - Password of a storage administrator of hypermetro remote device. - - V3R3/2600 V3R5/18000 V3R3 - * - metro_domain_name - - ``-`` - - Hypermetro domain name configured on ISM. - - V3R3/2600 V3R5/18000 V3R3 - * - metro_san_address - - ``-`` - - Access address of the REST interface, https://x.x.x.x/devicemanager/rest/. The value x.x.x.x indicates the management IP address. - - V3R3/2600 V3R5/18000 V3R3 - * - metro_storage_pools - - ``-`` - - Remote storage pool for hypermetro. - - V3R3/2600 V3R5/18000 V3R3 - * - backend_id - - ``-`` - - Target device ID. - - All - * - storage_pool - - ``-`` - - Pool name of target backend when failover for replication. - - All - * - san_address - - ``-`` - - Access address of the REST interface, https://x.x.x.x/devicemanager/rest/. The value x.x.x.x indicates the management IP address. - - All - * - san_user - - ``-`` - - User name of a storage administrator of replication remote device. - - All - * - san_password - - ``-`` - - Password of a storage administrator of replication remote device. - - All - * - iscsi_default_target_ip - - ``-`` - - Remote transacton port IP. - - All -.. important:: - - The ``Initiator Name``, ``Initiator TargetIP``, and - ``Initiator TargetPortGroup`` are ``ISCSI`` parameters and therefore not - applicable to ``FC``. diff --git a/doc/source/configuration/block-storage/drivers/ibm-flashsystem-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ibm-flashsystem-volume-driver.rst deleted file mode 100644 index a2c096866..000000000 --- a/doc/source/configuration/block-storage/drivers/ibm-flashsystem-volume-driver.rst +++ /dev/null @@ -1,242 +0,0 @@ -============================= -IBM FlashSystem volume driver -============================= - -The volume driver for FlashSystem provides OpenStack Block Storage hosts -with access to IBM FlashSystems. - -Configure FlashSystem -~~~~~~~~~~~~~~~~~~~~~ - -Configure storage array ------------------------ - -The volume driver requires a pre-defined array. You must create an -array on the FlashSystem before using the volume driver. An existing array -can also be used and existing data will not be deleted. - -.. note:: - - FlashSystem can only create one array, so no configuration option is - needed for the IBM FlashSystem driver to assign it. - -Configure user authentication for the driver --------------------------------------------- - -The driver requires access to the FlashSystem management interface using -SSH. It should be provided with the FlashSystem management IP using the -``san_ip`` flag, and the management port should be provided by the -``san_ssh_port`` flag. By default, the port value is configured to be -port 22 (SSH). - -.. note:: - - Make sure the compute node running the ``cinder-volume`` driver has SSH - network access to the storage system. - -Using password authentication, assign a password to the user on the -FlashSystem. For more detail, see the driver configuration flags -for the user and password here: :ref:`config_fc_flags` -or :ref:`config_iscsi_flags`. - -IBM FlashSystem FC driver -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Data Path configuration ------------------------ - -Using Fiber Channel (FC), each FlashSystem node should have at least one -WWPN port configured. If the ``flashsystem_multipath_enabled`` flag is -set to ``True`` in the Block Storage service configuration file, the driver -uses all available WWPNs to attach the volume to the instance. If the flag is -not set, the driver uses the WWPN associated with the volume's preferred node -(if available). Otherwise, it uses the first available WWPN of the system. The -driver obtains the WWPNs directly from the storage system. You do not need to -provide these WWPNs to the driver. - -.. note:: - - Using FC, ensure that the block storage hosts have FC connectivity - to the FlashSystem. - -.. _config_fc_flags: - -Enable IBM FlashSystem FC driver --------------------------------- - -Set the volume driver to the FlashSystem driver by setting the -``volume_driver`` option in the ``cinder.conf`` configuration file, -as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.flashsystem_fc.FlashSystemFCDriver - -To enable the IBM FlashSystem FC driver, configure the following options in the -``cinder.conf`` configuration file: - -.. list-table:: List of configuration flags for IBM FlashSystem FC driver - :header-rows: 1 - - * - Flag name - - Type - - Default - - Description - * - ``san_ip`` - - Required - - - - Management IP or host name - * - ``san_ssh_port`` - - Optional - - 22 - - Management port - * - ``san_login`` - - Required - - - - Management login user name - * - ``san_password`` - - Required - - - - Management login password - * - ``flashsystem_connection_protocol`` - - Required - - - - Connection protocol should be set to ``FC`` - * - ``flashsystem_multipath_enabled`` - - Required - - - - Enable multipath for FC connections - * - ``flashsystem_multihost_enabled`` - - Optional - - ``True`` - - Enable mapping vdisks to multiple hosts [1]_ - -.. [1] - This option allows the driver to map a vdisk to more than one host at - a time. This scenario occurs during migration of a virtual machine - with an attached volume; the volume is simultaneously mapped to both - the source and destination compute hosts. If your deployment does not - require attaching vdisks to multiple hosts, setting this flag to - ``False`` will provide added safety. - -IBM FlashSystem iSCSI driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Network configuration ---------------------- - -Using iSCSI, each FlashSystem node should have at least one iSCSI port -configured. iSCSI IP addresses of IBM FlashSystem can be obtained by -FlashSystem GUI or CLI. For more information, see the -appropriate IBM Redbook for the FlashSystem. - -.. note:: - - Using iSCSI, ensure that the compute nodes have iSCSI network access - to the IBM FlashSystem. - -.. _config_iscsi_flags: - -Enable IBM FlashSystem iSCSI driver ------------------------------------ - -Set the volume driver to the FlashSystem driver by setting the -``volume_driver`` option in the ``cinder.conf`` configuration file, as -follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.flashsystem_iscsi.FlashSystemISCSIDriver - -To enable IBM FlashSystem iSCSI driver, configure the following options -in the ``cinder.conf`` configuration file: - - -.. list-table:: List of configuration flags for IBM FlashSystem iSCSI driver - :header-rows: 1 - - * - Flag name - - Type - - Default - - Description - * - ``san_ip`` - - Required - - - - Management IP or host name - * - ``san_ssh_port`` - - Optional - - 22 - - Management port - * - ``san_login`` - - Required - - - - Management login user name - * - ``san_password`` - - Required - - - - Management login password - * - ``flashsystem_connection_protocol`` - - Required - - - - Connection protocol should be set to ``iSCSI`` - * - ``flashsystem_multihost_enabled`` - - Optional - - ``True`` - - Enable mapping vdisks to multiple hosts [2]_ - * - ``iscsi_ip_address`` - - Required - - - - Set to one of the iSCSI IP addresses obtained by FlashSystem GUI or CLI [3]_ - * - ``flashsystem_iscsi_portid`` - - Required - - - - Set to the id of the ``iscsi_ip_address`` obtained by FlashSystem GUI or CLI [4]_ - -.. [2] - This option allows the driver to map a vdisk to more than one host at - a time. This scenario occurs during migration of a virtual machine - with an attached volume; the volume is simultaneously mapped to both - the source and destination compute hosts. If your deployment does not - require attaching vdisks to multiple hosts, setting this flag to - ``False`` will provide added safety. - -.. [3] - On the cluster of the FlashSystem, the ``iscsi_ip_address`` column is the - seventh column ``IP_address`` of the output of ``lsportip``. - -.. [4] - On the cluster of the FlashSystem, port ID column is the first - column ``id`` of the output of ``lsportip``, - not the sixth column ``port_id``. - -Limitations and known issues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -IBM FlashSystem only works when: - -.. code-block:: ini - - open_access_enabled=off - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -These operations are supported: - -- Create, delete, attach, and detach volumes. - -- Create, list, and delete volume snapshots. - -- Create a volume from a snapshot. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Clone a volume. - -- Extend a volume. - -- Get volume statistics. - -- Manage and unmanage a volume. diff --git a/doc/source/configuration/block-storage/drivers/ibm-gpfs-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ibm-gpfs-volume-driver.rst deleted file mode 100644 index a1708616a..000000000 --- a/doc/source/configuration/block-storage/drivers/ibm-gpfs-volume-driver.rst +++ /dev/null @@ -1,228 +0,0 @@ -================================ -IBM Spectrum Scale volume driver -================================ -IBM Spectrum Scale is a flexible software-defined storage that can be -deployed as high performance file storage or a cost optimized -large-scale content repository. IBM Spectrum Scale, previously known as -IBM General Parallel File System (GPFS), is designed to scale performance -and capacity with no bottlenecks. IBM Spectrum Scale is a cluster file system -that provides concurrent access to file systems from multiple nodes. The -storage provided by these nodes can be direct attached, network attached, -SAN attached, or a combination of these methods. Spectrum Scale provides -many features beyond common data access, including data replication, -policy based storage management, and space efficient file snapshot and -clone operations. - -How the Spectrum Scale volume driver works -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Spectrum Scale volume driver, named ``gpfs.py``, enables the use of -Spectrum Scale in a fashion similar to that of the NFS driver. With -the Spectrum Scale driver, instances do not actually access a storage -device at the block level. Instead, volume backing files are created -in a Spectrum Scale file system and mapped to instances, which emulate -a block device. - -.. note:: - - Spectrum Scale must be installed and cluster has to be created on the - storage nodes in the OpenStack environment. A file system must also be - created and mounted on these nodes before configuring the cinder service - to use Spectrum Scale storage.For more details, please refer to - `Spectrum Scale product documentation `_. - -Optionally, the Image service can be configured to store glance images -in a Spectrum Scale file system. When a Block Storage volume is created -from an image, if both image data and volume data reside in the same -Spectrum Scale file system, the data from image file is moved efficiently -to the volume file using copy-on-write optimization strategy. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ -- Create, delete, attach, and detach volumes. -- Create, delete volume snapshots. -- Create a volume from a snapshot. -- Create cloned volumes. -- Extend a volume. -- Migrate a volume. -- Retype a volume. -- Create, delete consistency groups. -- Create, delete consistency group snapshots. -- Copy an image to a volume. -- Copy a volume to an image. -- Backup and restore volumes. - -Driver configurations -~~~~~~~~~~~~~~~~~~~~~ - -The Spectrum Scale volume driver supports three modes of deployment. - -Mode 1 – Pervasive Spectrum Scale Client ----------------------------------------- - -When Spectrum Scale is running on compute nodes as well as on the cinder node. -For example, Spectrum Scale filesystem is available to both Compute and -Block Storage services as a local filesystem. - -To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` -in the ``cinder.conf`` as: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver - -The following table contains the configuration options supported by the -Spectrum Scale driver in this deployment mode. - -.. include:: ../../tables/cinder-ibm_gpfs.rst - -.. note:: - - The ``gpfs_images_share_mode`` flag is only valid if the Image - Service is configured to use Spectrum Scale with the - ``gpfs_images_dir`` flag. When the value of this flag is - ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` - and ``gpfs_images_dir`` flags must both reside in the same GPFS - file system and in the same GPFS file set. - -Mode 2 – Remote Spectrum Scale Driver with Local Compute Access ---------------------------------------------------------------- - -When Spectrum Scale is running on compute nodes, but not on the Block Storage -node. For example, Spectrum Scale filesystem is only available to Compute -service as Local filesystem where as Block Storage service accesses Spectrum -Scale remotely. In this case, ``cinder-volume`` service running Spectrum Scale -driver access storage system over SSH and creates volume backing files to make -them available on the compute nodes. This mode is typically deployed when the -cinder and glance services are running inside a Linux container. The container -host should have Spectrum Scale client running and GPFS filesystem mount path -should be bind mounted into the Linux containers. - -.. note:: - - Note that the user IDs present in the containers should match as that in the - host machines. For example, the containers running cinder and glance - services should be priviledged containers. - -To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` -in the ``cinder.conf`` as: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver - -The following table contains the configuration options supported by the -Spectrum Scale driver in this deployment mode. - -.. include:: ../../tables/cinder-ibm_gpfs_remote.rst - -.. note:: - - The ``gpfs_images_share_mode`` flag is only valid if the Image - Service is configured to use Spectrum Scale with the - ``gpfs_images_dir`` flag. When the value of this flag is - ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` - and ``gpfs_images_dir`` flags must both reside in the same GPFS - file system and in the same GPFS file set. - -Mode 3 – Remote Spectrum Scale Access -------------------------------------- - -When both Compute and Block Storage nodes are not running Spectrum Scale -software and do not have access to Spectrum Scale file system directly as -local filesystem. In this case, we create an NFS export on the volume path -and make it available on the cinder node and on compute nodes. - -Optionally, if one wants to use the copy-on-write optimization to create -bootable volumes from glance images, one need to also export the glance -images path and mount it on the nodes where glance and cinder services -are running. The cinder and glance services will access the GPFS -filesystem through NFS. - -To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` -in the ``cinder.conf`` as: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver - -The following table contains the configuration options supported by the -Spectrum Scale driver in this deployment mode. - -.. include:: ../../tables/cinder-ibm_gpfs_nfs.rst - -Additionally, all the options of the base NFS driver are applicable -for GPFSNFSDriver. The above table lists the basic configuration -options which are needed for initialization of the driver. - -.. note:: - - The ``gpfs_images_share_mode`` flag is only valid if the Image - Service is configured to use Spectrum Scale with the - ``gpfs_images_dir`` flag. When the value of this flag is - ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` - and ``gpfs_images_dir`` flags must both reside in the same GPFS - file system and in the same GPFS file set. - - -Volume creation options -~~~~~~~~~~~~~~~~~~~~~~~ - -It is possible to specify additional volume configuration options on a -per-volume basis by specifying volume metadata. The volume is created -using the specified options. Changing the metadata after the volume is -created has no effect. The following table lists the volume creation -options supported by the GPFS volume driver. - -.. list-table:: **Volume Create Options for Spectrum Scale Volume Drivers** - :widths: 10 25 - :header-rows: 1 - - * - Metadata Item Name - - Description - * - fstype - - Specifies whether to create a file system or a swap area on the new volume. If fstype=swap is specified, the mkswap command is used to create a swap area. Otherwise the mkfs command is passed the specified file system type, for example ext3, ext4 or ntfs. - * - fslabel - - Sets the file system label for the file system specified by fstype option. This value is only used if fstype is specified. - * - data_pool_name - - Specifies the GPFS storage pool to which the volume is to be assigned. Note: The GPFS storage pool must already have been created. - * - replicas - - Specifies how many copies of the volume file to create. Valid values are 1, 2, and, for Spectrum Scale V3.5.0.7 and later, 3. This value cannot be greater than the value of the MaxDataReplicasattribute of the file system. - * - dio - - Enables or disables the Direct I/O caching policy for the volume file. Valid values are yes and no. - * - write_affinity_depth - - Specifies the allocation policy to be used for the volume file. Note: This option only works if allow-write-affinity is set for the GPFS data pool. - * - block_group_factor - - Specifies how many blocks are laid out sequentially in the volume file to behave as a single large block. Note: This option only works if allow-write-affinity is set for the GPFS data pool. - * - write_affinity_failure_group - - Specifies the range of nodes (in GPFS shared nothing architecture) where replicas of blocks in the volume file are to be written. See Spectrum Scale documentation for more details about this option. - -This example shows the creation of a 50GB volume with an ``ext4`` file -system labeled ``newfs`` and direct IO enabled: - -.. code-block:: console - - $ openstack volume create --property fstype=ext4 fslabel=newfs dio=yes \ - --size 50 VOLUME - -Note that if the metadata for the volume is changed later, the changes -do not reflect in the backend. User will have to manually change the -volume attributes corresponding to metadata on Spectrum Scale filesystem. - -Operational notes for GPFS driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Volume snapshots are implemented using the GPFS file clone feature. -Whenever a new snapshot is created, the snapshot file is efficiently -created as a read-only clone parent of the volume, and the volume file -uses copy-on-write optimization strategy to minimize data movement. - -Similarly when a new volume is created from a snapshot or from an -existing volume, the same approach is taken. The same approach is also -used when a new volume is created from an Image service image, if the -source image is in raw format, and ``gpfs_images_share_mode`` is set to -``copy_on_write``. - -The Spectrum Scale driver supports encrypted volume back end feature. -To encrypt a volume at rest, specify the extra specification -``gpfs_encryption_rest = True``. diff --git a/doc/source/configuration/block-storage/drivers/ibm-storage-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ibm-storage-volume-driver.rst deleted file mode 100644 index fe188e843..000000000 --- a/doc/source/configuration/block-storage/drivers/ibm-storage-volume-driver.rst +++ /dev/null @@ -1,172 +0,0 @@ -================================ -IBM Storage Driver for OpenStack -================================ - -Introduction -~~~~~~~~~~~~ -The IBM Storage Driver for OpenStack is a software component of the -OpenStack cloud environment that enables utilization of storage -resources provided by supported IBM storage systems. - -The driver was validated on the following storage systems: - -* IBM DS8000 Family -* IBM FlashSystem A9000 -* IBM FlashSystem A9000R -* IBM Spectrum Accelerate -* IBM XIV Storage System - -After the driver is configured on the OpenStack cinder nodes, storage volumes -can be allocated by the cinder nodes to the nova nodes. Virtual machines on -the nova nodes can then utilize these storage resources. - -.. note:: - Unless stated otherwise, all references to XIV storage - system in this guide relate all members of the Spectrum Accelerate - Family (XIV, Spectrum Accelerate and FlashSystem A9000/A9000R). - -Concept diagram ---------------- -This figure illustrates how an IBM storage system is connected -to the OpenStack cloud environment and provides storage resources when the -IBM Storage Driver for OpenStack is configured on the OpenStack cinder nodes. -The OpenStack cloud is connected to the IBM storage system over Fibre -Channel or iSCSI (DS8000 systems support only Fibre Channel connections). -Remote cloud users can issue requests for storage resources from the -OpenStack cloud. These requests are transparently handled by the IBM Storage -Driver, which communicates with the IBM storage system and controls the -storage volumes on it. The IBM storage resources are then provided to the -nova nodes in the OpenStack cloud. - -.. figure:: ../../figures/ibm-storage-nova-concept.png - -Configuration -~~~~~~~~~~~~~ - -Configure the driver manually by changing the ``cinder.conf`` file as -follows: - - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.ibm.ibm_storage.IBMStorageDriver - -.. include:: ../../tables/cinder-ibm_storage.rst - - - -Security -~~~~~~~~ - -The following information provides an overview of security for the IBM -Storage Driver for OpenStack. - -Avoiding man-in-the-middle attacks ----------------------------------- - -When using a Spectrum Accelerate Family product, you can prevent -man-in-the-middle (MITM) attacks by following these rules: - -* Upgrade to IBM XIV storage system version 11.3 or later. - -* If working in a secure mode, do not work insecurely against another storage - system in the same environment. - -* Validate the storage certificate. If you are using an XIV-provided - certificate, use the CA file that was provided with your storage system - (``XIV-CA.pem``). The certificate files should be copied to one of the - following directories: - - * ``/etc/ssl/certs`` - * ``/etc/ssl/certs/xiv`` - * ``/etc/pki`` - * ``/etc/pki/xiv`` - - If you are using your own certificates, copy them to the same directories - with the prefix ``XIV`` and in the ``.pem`` format. - For example: ``XIV-my_cert.pem``. - -* To prevent the CVE-2014-3566 MITM attack, follow the OpenStack - community - `directions `_. - -Troubleshooting -~~~~~~~~~~~~~~~ - -Refer to this information to troubleshoot technical problems that you -might encounter when using the IBM Storage Driver for OpenStack. - -Checking the cinder node log files ----------------------------------- - -The cinder log files record operation information that might be useful -for troubleshooting. - -To achieve optimal and clear logging of events, activate the verbose -logging level in the ``cinder.conf`` file, located in the ``/etc/cinder`` -folder. Add the following line in the file, save the file, and then -restart the ``cinder-volume`` service: - -.. code-block:: console - - verbose = True - debug = True - -To turn off the verbose logging level, change ``True`` to ``False``, -save the file, and then restart the ``cinder-volume`` service. - -Check the log files on a periodic basis to ensure that the IBM -Storage Driver is functioning properly: - -#. Log into the cinder node. -#. Go to the ``/var/log/cinder`` folder -#. Open the activity log file named ``cinder-volume.log`` or ``volume.log``. - The IBM Storage Driver writes to this log file using the - ``[IBM DS8K STORAGE]`` or ``[IBM XIV STORAGE]`` prefix (depending on - the relevant storage system) for each event that it records in the file. - - -Best practices -~~~~~~~~~~~~~~ - -This section contains the general guidance and best practices. - -Working with multi-tenancy --------------------------- -The XIV storage systems, running microcode version 11.5 or later, Spectrum -Accelerate and FlashSystem A9000/A9000R can employ multi-tenancy. - -In order to use multi-tenancy with the IBM Storage Driver for OpenStack: - -* For each storage system, verify that all predefined storage pools are - in the same domain or, that all are not in a domain. - -* Use either storage administrator or domain administrator user's - credentials, as long as the credentials grant a full access to the relevant - pool. -* If the user is a domain administrator, the storage system domain - access policy can be ``CLOSED`` (``domain_policy: access=CLOSED``). - Otherwise, verify that the storage system domain access policy is - ``OPEN`` (``domain_policy: access=OPEN``). -* If the user is not a domain administrator, the host management policy - of the storage system domain can be ``BASIC`` (``domain_policy: - host_management=BASIC``). Otherwise, verify that the storage - system domain host management policy is ``EXTENDED`` - (``domain_policy: host_management=EXTENDED``). - -Working with IBM Real-time Compression™ ---------------------------------------- -XIV storage systems running microcode version 11.6 or later, -Spectrum Accelerate and FlashSystem A9000/A9000R can employ IBM -Real-time Compression™. - -Follow these guidelines when working with compressed storage -resources using the IBM Storage Driver for OpenStack: - -* Compression mode cannot be changed for storage volumes, using - the IBM Storage Driver for OpenStack. The volumes are created - according to the default compression mode of the pool. For example, - any volume created in a compressed pool will be compressed as well. - -* The minimum size for a compressed storage volume is 87 GB. - diff --git a/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst b/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst deleted file mode 100644 index 8c65c67c6..000000000 --- a/doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst +++ /dev/null @@ -1,499 +0,0 @@ -========================================= -IBM Storwize family and SVC volume driver -========================================= - -The volume management driver for Storwize family and SAN Volume -Controller (SVC) provides OpenStack Compute instances with access to IBM -Storwize family or SVC storage systems. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -Storwize/SVC driver supports the following Block Storage service volume -operations: - -- Create, list, delete, attach (map), and detach (unmap) volumes. -- Create, list, and delete volume snapshots. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Retype a volume. -- Create a volume from a snapshot. -- Create, list, and delete consistency group. -- Create, list, and delete consistency group snapshot. -- Modify consistency group (add or remove volumes). -- Create consistency group from source (source can be a CG or CG snapshot) -- Manage an existing volume. -- Failover-host for replicated back ends. -- Failback-host for replicated back ends. - -Configure the Storwize family and SVC system -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Network configuration ---------------------- - -The Storwize family or SVC system must be configured for iSCSI, Fibre -Channel, or both. - -If using iSCSI, each Storwize family or SVC node should have at least -one iSCSI IP address. The IBM Storwize/SVC driver uses an iSCSI IP -address associated with the volume's preferred node (if available) to -attach the volume to the instance, otherwise it uses the first available -iSCSI IP address of the system. The driver obtains the iSCSI IP address -directly from the storage system. You do not need to provide these iSCSI -IP addresses directly to the driver. - -.. note:: - - If using iSCSI, ensure that the compute nodes have iSCSI network - access to the Storwize family or SVC system. - -If using Fibre Channel (FC), each Storwize family or SVC node should -have at least one WWPN port configured. The driver uses all available -WWPNs to attach the volume to the instance. The driver obtains the -WWPNs directly from the storage system. You do not need to provide -these WWPNs directly to the driver. - -.. note:: - - If using FC, ensure that the compute nodes have FC connectivity to - the Storwize family or SVC system. - -iSCSI CHAP authentication -------------------------- - -If using iSCSI for data access and the -``storwize_svc_iscsi_chap_enabled`` is set to ``True``, the driver will -associate randomly-generated CHAP secrets with all hosts on the Storwize -family system. The compute nodes use these secrets when creating -iSCSI connections. - -.. warning:: - - CHAP secrets are added to existing hosts as well as newly-created - ones. If the CHAP option is enabled, hosts will not be able to - access the storage without the generated secrets. - -.. note:: - - Not all OpenStack Compute drivers support CHAP authentication. - Please check compatibility before using. - -.. note:: - - CHAP secrets are passed from OpenStack Block Storage to Compute in - clear text. This communication should be secured to ensure that CHAP - secrets are not discovered. - -Configure storage pools ------------------------ - -The IBM Storwize/SVC driver can allocate volumes in multiple pools. -The pools should be created in advance and be provided to the driver -using the ``storwize_svc_volpool_name`` configuration flag in the form -of a comma-separated list. -For the complete list of configuration flags, see :ref:`config_flags`. - -Configure user authentication for the driver --------------------------------------------- - -The driver requires access to the Storwize family or SVC system -management interface. The driver communicates with the management using -SSH. The driver should be provided with the Storwize family or SVC -management IP using the ``san_ip`` flag, and the management port should -be provided by the ``san_ssh_port`` flag. By default, the port value is -configured to be port 22 (SSH). Also, you can set the secondary -management IP using the ``storwize_san_secondary_ip`` flag. - -.. note:: - - Make sure the compute node running the cinder-volume management - driver has SSH network access to the storage system. - -To allow the driver to communicate with the Storwize family or SVC -system, you must provide the driver with a user on the storage system. -The driver has two authentication methods: password-based authentication -and SSH key pair authentication. The user should have an Administrator -role. It is suggested to create a new user for the management driver. -Please consult with your storage and security administrator regarding -the preferred authentication method and how passwords or SSH keys should -be stored in a secure manner. - -.. note:: - - When creating a new user on the Storwize or SVC system, make sure - the user belongs to the Administrator group or to another group that - has an Administrator role. - -If using password authentication, assign a password to the user on the -Storwize or SVC system. The driver configuration flags for the user and -password are ``san_login`` and ``san_password``, respectively. - -If you are using the SSH key pair authentication, create SSH private and -public keys using the instructions below or by any other method. -Associate the public key with the user by uploading the public key: -select the :guilabel:`choose file` option in the Storwize family or SVC -management GUI under :guilabel:`SSH public key`. Alternatively, you may -associate the SSH public key using the command-line interface; details can -be found in the Storwize and SVC documentation. The private key should be -provided to the driver using the ``san_private_key`` configuration flag. - -Create a SSH key pair with OpenSSH ----------------------------------- - -You can create an SSH key pair using OpenSSH, by running: - -.. code-block:: console - - $ ssh-keygen -t rsa - -The command prompts for a file to save the key pair. For example, if you -select ``key`` as the filename, two files are created: ``key`` and -``key.pub``. The ``key`` file holds the private SSH key and ``key.pub`` -holds the public SSH key. - -The command also prompts for a pass phrase, which should be empty. - -The private key file should be provided to the driver using the -``san_private_key`` configuration flag. The public key should be -uploaded to the Storwize family or SVC system using the storage -management GUI or command-line interface. - -.. note:: - - Ensure that Cinder has read permissions on the private key file. - -Configure the Storwize family and SVC driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Enable the Storwize family and SVC driver ------------------------------------------ - -Set the volume driver to the Storwize family and SVC driver by setting -the ``volume_driver`` option in the ``cinder.conf`` file as follows: - -iSCSI: - -.. code-block:: ini - - [svc1234] - volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver - san_ip = 1.2.3.4 - san_login = superuser - san_password = passw0rd - storwize_svc_volpool_name = cinder_pool1 - volume_backend_name = svc1234 - -FC: - -.. code-block:: ini - - [svc1234] - volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver - san_ip = 1.2.3.4 - san_login = superuser - san_password = passw0rd - storwize_svc_volpool_name = cinder_pool1 - volume_backend_name = svc1234 - -Replication configuration -------------------------- - -Add the following to the back-end specification to specify another storage -to replicate to: - -.. code-block:: ini - - replication_device = backend_id:rep_svc, - san_ip:1.2.3.5, - san_login:superuser, - san_password:passw0rd, - pool_name:cinder_pool1 - -The ``backend_id`` is a unique name of the remote storage, the ``san_ip``, -``san_login``, and ``san_password`` is authentication information for the -remote storage. The ``pool_name`` is the pool name for the replication -target volume. - -.. note:: - - Only one ``replication_device`` can be configured for one back end - storage since only one replication target is supported now. - -.. _config_flags: - -Storwize family and SVC driver options in cinder.conf ------------------------------------------------------ - -The following options specify default values for all volumes. Some can -be over-ridden using volume types, which are described below. - -.. include:: ../../tables/cinder-storwize.rst - -Note the following: - -* The authentication requires either a password (``san_password``) or - SSH private key (``san_private_key``). One must be specified. If - both are specified, the driver uses only the SSH private key. - -* The driver creates thin-provisioned volumes by default. The - ``storwize_svc_vol_rsize`` flag defines the initial physical - allocation percentage for thin-provisioned volumes, or if set to - ``-1``, the driver creates full allocated volumes. More details about - the available options are available in the Storwize family and SVC - documentation. - - -Placement with volume types ---------------------------- - -The IBM Storwize/SVC driver exposes capabilities that can be added to -the ``extra specs`` of volume types, and used by the filter -scheduler to determine placement of new volumes. Make sure to prefix -these keys with ``capabilities:`` to indicate that the scheduler should -use them. The following ``extra specs`` are supported: - -- ``capabilities:volume_back-end_name`` - Specify a specific back-end - where the volume should be created. The back-end name is a - concatenation of the name of the IBM Storwize/SVC storage system as - shown in ``lssystem``, an underscore, and the name of the pool (mdisk - group). For example: - - .. code-block:: ini - - capabilities:volume_back-end_name=myV7000_openstackpool - -- ``capabilities:compression_support`` - Specify a back-end according to - compression support. A value of ``True`` should be used to request a - back-end that supports compression, and a value of ``False`` will - request a back-end that does not support compression. If you do not - have constraints on compression support, do not set this key. Note - that specifying ``True`` does not enable compression; it only - requests that the volume be placed on a back-end that supports - compression. Example syntax: - - .. code-block:: ini - - capabilities:compression_support=' True' - -- ``capabilities:easytier_support`` - Similar semantics as the - ``compression_support`` key, but for specifying according to support - of the Easy Tier feature. Example syntax: - - .. code-block:: ini - - capabilities:easytier_support=' True' - -- ``capabilities:storage_protocol`` - Specifies the connection protocol - used to attach volumes of this type to instances. Legal values are - ``iSCSI`` and ``FC``. This ``extra specs`` value is used for both placement - and setting the protocol used for this volume. In the example syntax, - note ```` is used as opposed to ```` which is used in the - previous examples. - - .. code-block:: ini - - capabilities:storage_protocol=' FC' - -Configure per-volume creation options -------------------------------------- - -Volume types can also be used to pass options to the IBM Storwize/SVC -driver, which over-ride the default values set in the configuration -file. Contrary to the previous examples where the ``capabilities`` scope -was used to pass parameters to the Cinder scheduler, options can be -passed to the IBM Storwize/SVC driver with the ``drivers`` scope. - -The following ``extra specs`` keys are supported by the IBM Storwize/SVC -driver: - -- rsize -- warning -- autoexpand -- grainsize -- compression -- easytier -- multipath -- iogrp - -These keys have the same semantics as their counterparts in the -configuration file. They are set similarly; for example, ``rsize=2`` or -``compression=False``. - -Example: Volume types ---------------------- - -In the following example, we create a volume type to specify a -controller that supports iSCSI and compression, to use iSCSI when -attaching the volume, and to enable compression: - -.. code-block:: console - - $ openstack volume type create compressed - $ openstack volume type set --property capabilities:storage_protocol=' iSCSI' capabilities:compression_support=' True' drivers:compression=True - -We can then create a 50GB volume using this type: - -.. code-block:: console - - $ openstack volume create "compressed volume" --type compressed --size 50 - -In the following example, create a volume type that enables -synchronous replication (metro mirror): - -.. code-block:: console - - $ openstack volume type create ReplicationType - $ openstack volume type set --property replication_type=" metro" \ - --property replication_enabled=' True' --property volume_backend_name=svc234 ReplicationType - -Volume types can be used, for example, to provide users with different - -- performance levels (such as, allocating entirely on an HDD tier, - using Easy Tier for an HDD-SDD mix, or allocating entirely on an SSD - tier) - -- resiliency levels (such as, allocating volumes in pools with - different RAID levels) - -- features (such as, enabling/disabling Real-time Compression, - replication volume creation) - -QOS ---- - -The Storwize driver provides QOS support for storage volumes by -controlling the I/O amount. QOS is enabled by editing the -``etc/cinder/cinder.conf`` file and setting the -``storwize_svc_allow_tenant_qos`` to ``True``. - -There are three ways to set the Storwize ``IOThrotting`` parameter for -storage volumes: - -- Add the ``qos:IOThrottling`` key into a QOS specification and - associate it with a volume type. - -- Add the ``qos:IOThrottling`` key into an extra specification with a - volume type. - -- Add the ``qos:IOThrottling`` key to the storage volume metadata. - -.. note:: - - If you are changing a volume type with QOS to a new volume type - without QOS, the QOS configuration settings will be removed. - -Operational notes for the Storwize family and SVC driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Migrate volumes ---------------- - -In the context of OpenStack Block Storage's volume migration feature, -the IBM Storwize/SVC driver enables the storage's virtualization -technology. When migrating a volume from one pool to another, the volume -will appear in the destination pool almost immediately, while the -storage moves the data in the background. - -.. note:: - - To enable this feature, both pools involved in a given volume - migration must have the same values for ``extent_size``. If the - pools have different values for ``extent_size``, the data will still - be moved directly between the pools (not host-side copy), but the - operation will be synchronous. - -Extend volumes --------------- - -The IBM Storwize/SVC driver allows for extending a volume's size, but -only for volumes without snapshots. - -Snapshots and clones --------------------- - -Snapshots are implemented using FlashCopy with no background copy -(space-efficient). Volume clones (volumes created from existing volumes) -are implemented with FlashCopy, but with background copy enabled. This -means that volume clones are independent, full copies. While this -background copy is taking place, attempting to delete or extend the -source volume will result in that operation waiting for the copy to -complete. - -Volume retype -------------- - -The IBM Storwize/SVC driver enables you to modify volume types. When you -modify volume types, you can also change these extra specs properties: - -- rsize - -- warning - -- autoexpand - -- grainsize - -- compression - -- easytier - -- iogrp - -- nofmtdisk - -.. note:: - - When you change the ``rsize``, ``grainsize`` or ``compression`` - properties, volume copies are asynchronously synchronized on the - array. - -.. note:: - - To change the ``iogrp`` property, IBM Storwize/SVC firmware version - 6.4.0 or later is required. - -Replication operation ---------------------- - -A volume is only replicated if the volume is created with a volume-type -that has the extra spec ``replication_enabled`` set to `` True``. Two -types of replication are supported now, async (global mirror) and -sync (metro mirror). It can be specified by a volume-type that has the -extra spec ``replication_type`` set to `` global`` or -``replication_type`` set to `` metro``. If no ``replication_type`` is -specified, global mirror will be created for replication. - -.. note:: - - It is better to establish the partnership relationship between - the replication source storage and the replication target - storage manually on the storage back end before replication - volume creation. - -The ``failover-host`` command is designed for the case where the primary -storage is down. - -.. code-block:: console - - $ cinder failover-host cinder@svciscsi --backend_id target_svc_id - -If a failover command has been executed and the primary storage has -been restored, it is possible to do a failback by simply specifying -default as the ``backend_id``: - -.. code-block:: console - - $ cinder failover-host cinder@svciscsi --backend_id default - -.. note:: - - Before you perform a failback operation, synchronize the data - from the replication target volume to the primary one on the - storage back end manually, and do the failback only after the - synchronization is done since the synchronization may take a long time. - If the synchronization is not done manually, Storwize Block Storage - service driver will perform the synchronization and do the failback - after the synchronization is finished. diff --git a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst b/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst deleted file mode 100644 index 6051b920b..000000000 --- a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst +++ /dev/null @@ -1,182 +0,0 @@ -======================================== -INFINIDAT InfiniBox Block Storage driver -======================================== - -The INFINIDAT Block Storage volume driver provides iSCSI and Fibre Channel -support for INFINIDAT InfiniBox storage systems. - -This section explains how to configure the INFINIDAT driver. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy a volume to an image. -* Copy an image to a volume. -* Clone a volume. -* Extend a volume. -* Get volume statistics. -* Create, modify, delete, and list consistency groups. -* Create, modify, delete, and list snapshots of consistency groups. -* Create consistency group from consistency group or consistency group - snapshot. - -External package installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The driver requires the ``infinisdk`` package for communicating with -InfiniBox systems. Install the package from PyPI using the following command: - -.. code-block:: console - - $ pip install infinisdk - -Setting up the storage array -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Create a storage pool object on the InfiniBox array in advance. -The storage pool will contain volumes managed by OpenStack. -Refer to the InfiniBox manuals for details on pool management. - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -Edit the ``cinder.conf`` file, which is usually located under the following -path ``/etc/cinder/cinder.conf``. - -* Add a section for the INFINIDAT driver back end. - -* Under the ``[DEFAULT]`` section, set the ``enabled_backends`` parameter with - the name of the new back-end section. - -Configure the driver back-end section with the parameters below. - -* Configure the driver name by setting the following parameter: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.infinidat.InfiniboxVolumeDriver - -* Configure the management IP of the InfiniBox array by adding the following - parameter: - - .. code-block:: ini - - san_ip = InfiniBox management IP - -* Configure user credentials. - - The driver requires an InfiniBox user with administrative privileges. - We recommend creating a dedicated OpenStack user account - that holds an administrative user role. - Refer to the InfiniBox manuals for details on user account management. - Configure the user credentials by adding the following parameters: - - .. code-block:: ini - - san_login = infinibox_username - san_password = infinibox_password - -* Configure the name of the InfiniBox pool by adding the following parameter: - - .. code-block:: ini - - infinidat_pool_name = Pool defined in InfiniBox - -* The back-end name is an identifier for the back end. - We recommend using the same name as the name of the section. - Configure the back-end name by adding the following parameter: - - .. code-block:: ini - - volume_backend_name = back-end name - -* Thin provisioning. - - The INFINIDAT driver supports creating thin or thick provisioned volumes. - Configure thin or thick provisioning by adding the following parameter: - - .. code-block:: ini - - san_thin_provision = true/false - - This parameter defaults to ``true``. - -* Configure the connectivity protocol. - - The InfiniBox driver supports connection to the InfiniBox system in both - the fibre channel and iSCSI protocols. - Configure the desired protocol by adding the following parameter: - - .. code-block:: ini - - infinidat_storage_protocol = iscsi/fc - - This parameter defaults to ``fc``. - -* Configure iSCSI netspaces. - - When using the iSCSI protocol to connect to InfiniBox systems, you must - configure one or more iSCSI network spaces in the InfiniBox storage array. - Refer to the InfiniBox manuals for details on network space management. - Configure the names of the iSCSI network spaces to connect to by adding - the following parameter: - - .. code-block:: ini - - infinidat_iscsi_netspaces = iscsi_netspace - - Multiple network spaces can be specified by a comma separated string. - - This parameter is ignored when using the FC protocol. - -* Configure CHAP - - InfiniBox supports CHAP authentication when using the iSCSI protocol. To - enable CHAP authentication, add the following parameter: - - .. code-block:: ini - - use_chap_auth = true - - To manually define the username and password, add the following parameters: - - .. code-block:: ini - - chap_username = username - chap_password = password - - If the CHAP username or password are not defined, they will be - auto-generated by the driver. - - The CHAP parameters are ignored when using the FC protocol. - - -Configuration example -~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: ini - - [DEFAULT] - enabled_backends = infinidat-pool-a - - [infinidat-pool-a] - volume_driver = cinder.volume.drivers.infinidat.InfiniboxVolumeDriver - volume_backend_name = infinidat-pool-a - san_ip = 10.1.2.3 - san_login = openstackuser - san_password = openstackpass - san_thin_provision = true - infinidat_pool_name = pool-a - infinidat_storage_protocol = iscsi - infinidat_iscsi_netspaces = default_iscsi_space - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific -to the INFINIDAT driver. - -.. include:: ../../tables/cinder-infinidat.rst diff --git a/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst b/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst deleted file mode 100644 index 5a5a66b3b..000000000 --- a/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst +++ /dev/null @@ -1,130 +0,0 @@ -======================== -Infortrend volume driver -======================== - -The `Infortrend `__ volume driver is a Block Storage driver -providing iSCSI and Fibre Channel support for Infortrend storages. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The Infortrend volume driver supports the following volume operations: - -* Create, delete, attach, and detach volumes. -* Create and delete a snapshot. -* Create a volume from a snapshot. -* Copy an image to a volume. -* Copy a volume to an image. -* Clone a volume. -* Extend a volume -* Retype a volume. -* Manage and unmanage a volume. -* Migrate a volume with back-end assistance. -* Live migrate an instance with volumes hosted on an Infortrend backend. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the Infortrend volume driver, the following settings are required: - -Set up Infortrend storage -------------------------- - -* Create logical volumes in advance. -* Host side setting ``Peripheral device type`` should be - ``No Device Present (Type=0x7f)``. - -Set up cinder-volume node -------------------------- - -* Install Oracle Java 7 or later. - -* Download the Infortrend storage CLI from the - `release page `__, - and assign it to the default path ``/opt/bin/Infortrend/``. - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -On ``cinder-volume`` nodes, set the following in your -``/etc/cinder/cinder.conf``, and use the following options to configure it: - -Driver options --------------- - -.. include:: ../../tables/cinder-infortrend.rst - -iSCSI configuration example ---------------------------- - -.. code-block:: ini - - [DEFAULT] - default_volume_type = IFT-ISCSI - enabled_backends = IFT-ISCSI - - [IFT-ISCSI] - volume_driver = cinder.volume.drivers.infortrend.infortrend_iscsi_cli.InfortrendCLIISCSIDriver - volume_backend_name = IFT-ISCSI - infortrend_pools_name = POOL-1,POOL-2 - san_ip = MANAGEMENT_PORT_IP - infortrend_slots_a_channels_id = 0,1,2,3 - infortrend_slots_b_channels_id = 0,1,2,3 - -Fibre Channel configuration example ------------------------------------ - -.. code-block:: ini - - [DEFAULT] - default_volume_type = IFT-FC - enabled_backends = IFT-FC - - [IFT-FC] - volume_driver = cinder.volume.drivers.infortrend.infortrend_fc_cli.InfortrendCLIFCDriver - volume_backend_name = IFT-FC - infortrend_pools_name = POOL-1,POOL-2,POOL-3 - san_ip = MANAGEMENT_PORT_IP - infortrend_slots_a_channels_id = 4,5 - -Multipath configuration ------------------------ - -* Enable multipath for image transfer in ``/etc/cinder/cinder.conf``. - - .. code-block:: ini - - use_multipath_for_image_xfer = True - - Restart the ``cinder-volume`` service. - -* Enable multipath for volume attach and detach in ``/etc/nova/nova.conf``. - - .. code-block:: ini - - [libvirt] - ... - volume_use_multipath = True - ... - - Restart the ``nova-compute`` service. - -Extra spec usage ----------------- - -* ``infortrend:provisioning`` - Defaults to ``full`` provisioning, - the valid values are thin and full. - -* ``infortrend:tiering`` - Defaults to use ``all`` tiering, - the valid values are subsets of 0, 1, 2, 3. - - If multi-pools are configured in ``cinder.conf``, - it can be specified for each pool, separated by semicolon. - - For example: - - ``infortrend:provisioning``: ``POOL-1:thin; POOL-2:full`` - - ``infortrend:tiering``: ``POOL-1:all; POOL-2:0; POOL-3:0,1,3`` - -For more details, see `Infortrend documents `_. diff --git a/doc/source/configuration/block-storage/drivers/itri-disco-driver.rst b/doc/source/configuration/block-storage/drivers/itri-disco-driver.rst deleted file mode 100644 index f3fe66b53..000000000 --- a/doc/source/configuration/block-storage/drivers/itri-disco-driver.rst +++ /dev/null @@ -1,24 +0,0 @@ -======================== -ITRI DISCO volume driver -======================== - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The DISCO driver supports the following features: - -* Volume create and delete -* Volume attach and detach -* Snapshot create and delete -* Create volume from snapshot -* Get volume stats -* Copy image to volume -* Copy volume to image -* Clone volume -* Extend volume -* Manage and unmanage volume - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -.. include:: ../../tables/cinder-disco.rst diff --git a/doc/source/configuration/block-storage/drivers/kaminario-driver.rst b/doc/source/configuration/block-storage/drivers/kaminario-driver.rst deleted file mode 100644 index 160436db1..000000000 --- a/doc/source/configuration/block-storage/drivers/kaminario-driver.rst +++ /dev/null @@ -1,273 +0,0 @@ -======================================================== -Kaminario K2 all-flash array iSCSI and FC volume drivers -======================================================== - -Kaminario's K2 all-flash array leverages a unique software-defined -architecture that delivers highly valued predictable performance, scalability -and cost-efficiency. - -Kaminario's K2 all-flash iSCSI and FC arrays can be used in -OpenStack Block Storage for providing block storage using -``KaminarioISCSIDriver`` class and ``KaminarioFCDriver`` class respectively. - -This documentation explains how to configure and connect the block storage -nodes to one or more K2 all-flash arrays. - -Driver requirements -~~~~~~~~~~~~~~~~~~~ - -- Kaminario's K2 all-flash iSCSI and/or FC array - -- K2 REST API version >= 2.2.0 - -- K2 version 5.8 or later are supported - -- ``krest`` python library(version 1.3.1 or later) should be installed on the - Block Storage node using :command:`sudo pip install krest` - -- The Block Storage Node should also have a data path to the K2 array - for the following operations: - - - Create a volume from snapshot - - Clone a volume - - Copy volume to image - - Copy image to volume - - Retype 'dedup without replication'<->'nodedup without replication' - -Supported operations -~~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Retype a volume. -- Manage and unmanage a volume. -- Replicate volume with failover and failback support to K2 array. - -Limitations and known issues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If your OpenStack deployment is not setup to use multipath, the network -connectivity of the K2 all-flash array will use a single physical port. - -This may significantly limit the following benefits provided by K2: - -- available bandwidth -- high-availability -- non disruptive-upgrade - -The following steps are required to setup multipath access on the -Compute and the Block Storage nodes - -#. Install multipath software on both Compute and Block Storage nodes. - - For example: - - .. code-block:: console - - # apt-get install sg3-utils multipath-tools - -#. In the ``[libvirt]`` section of the ``nova.conf`` configuration file, - specify ``iscsi_use_multipath=True``. This option is valid for both iSCSI - and FC drivers. - - Additional resources: Kaminario Host Configuration Guide - for Linux (for configuring multipath) - -#. Restart the compute service for the changes to take effect. - - .. code-block:: console - - # service nova-compute restart - - -Configure single Kaminario iSCSI/FC back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section details the steps required to configure the Kaminario -Cinder Driver for single FC or iSCSI backend. - -#. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` - section, set the ``scheduler_default_filters`` parameter: - - .. code-block:: ini - - [DEFAULT] - scheduler_default_filters = DriverFilter,CapabilitiesFilter - - See following links for more information: - ``_ - ``_ - -#. Under the ``[DEFAULT]`` section, set the enabled_backends parameter - with the iSCSI or FC back-end group - - .. code-block:: ini - - [DEFAULT] - # For iSCSI - enabled_backends = kaminario-iscsi-1 - - # For FC - # enabled_backends = kaminario-fc-1 - -#. Add a back-end group section for back-end group specified - in the enabled_backends parameter - -#. In the newly created back-end group section, set the - following configuration options: - - .. code-block:: ini - - [kaminario-iscsi-1] - # Management IP of Kaminario K2 All-Flash iSCSI/FC array - san_ip = 10.0.0.10 - # Management username of Kaminario K2 All-Flash iSCSI/FC array - san_login = username - # Management password of Kaminario K2 All-Flash iSCSI/FC array - san_password = password - # Enable Kaminario K2 iSCSI/FC driver - volume_driver = cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver - # volume_driver = cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver - - # Backend name - # volume_backend_name = kaminario_fc_1 - volume_backend_name = kaminario_iscsi_1 - - # K2 driver calculates max_oversubscription_ratio on setting below - # option as True. Default value is False - # auto_calc_max_oversubscription_ratio = False - - # Set a limit on total number of volumes to be created on K2 array, for example: - # filter_function = "capabilities.total_volumes < 250" - - # For replication, replication_device must be set and the replication peer must be configured - # on the primary and the secondary K2 arrays - # Syntax: - # replication_device = backend_id:,login:,password:,rpo: - # where: - # s-array-ip is the secondary K2 array IP - # rpo must be either 60(1 min) or multiple of 300(5 min) - # Example: - # replication_device = backend_id:10.0.0.50,login:kaminario,password:kaminario,rpo:300 - - # Suppress requests library SSL certificate warnings on setting this option as True - # Default value is 'False' - # suppress_requests_ssl_warnings = False - -#. Restart the Block Storage services for the changes to take effect: - - .. code-block:: console - - # service cinder-api restart - # service cinder-scheduler restart - # service cinder-volume restart - -Setting multiple Kaminario iSCSI/FC back ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following steps are required to configure multiple K2 iSCSI/FC backends: - -#. In the :file:`cinder.conf` file under the [DEFAULT] section, - set the enabled_backends parameter with the comma-separated - iSCSI/FC back-end groups. - - .. code-block:: ini - - [DEFAULT] - enabled_backends = kaminario-iscsi-1, kaminario-iscsi-2, kaminario-iscsi-3 - -#. Add a back-end group section for each back-end group specified - in the enabled_backends parameter - -#. For each back-end group section, enter the configuration options as - described in the above section - ``Configure single Kaminario iSCSI/FC back end`` - - See `Configure multiple-storage back ends - `__ - for additional information. - -#. Restart the cinder volume service for the changes to take effect. - - .. code-block:: console - - # service cinder-volume restart - -Creating volume types -~~~~~~~~~~~~~~~~~~~~~ - -Create volume types for supporting volume creation on -the multiple K2 iSCSI/FC backends. -Set following extras-specs in the volume types: - -- volume_backend_name : Set value of this spec according to the - value of ``volume_backend_name`` in the back-end group sections. - If only this spec is set, then dedup Kaminario cinder volumes will be - created without replication support - - .. code-block:: console - - $ openstack volume type create kaminario_iscsi_dedup_noreplication - $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ - kaminario_iscsi_dedup_noreplication - -- kaminario:thin_prov_type : Set this spec in the volume type for creating - nodedup Kaminario cinder volumes. If this spec is not set, dedup Kaminario - cinder volumes will be created. - -- kaminario:replication : Set this spec in the volume type for creating - replication supported Kaminario cinder volumes. If this spec is not set, - then Kaminario cinder volumes will be created without replication support. - - .. code-block:: console - - $ openstack volume type create kaminario_iscsi_dedup_replication - $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ - kaminario:replication=enabled kaminario_iscsi_dedup_replication - - $ openstack volume type create kaminario_iscsi_nodedup_replication - $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ - kaminario:replication=enabled kaminario:thin_prov_type=nodedup \ - kaminario_iscsi_nodedup_replication - - $ openstack volume type create kaminario_iscsi_nodedup_noreplication - $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ - kaminario:thin_prov_type=nodedup kaminario_iscsi_nodedup_noreplication - -Supported retype cases -~~~~~~~~~~~~~~~~~~~~~~ -The following are the supported retypes for Kaminario cinder volumes: - -- Nodedup-noreplication <--> Nodedup-replication - - .. code-block:: console - - $ cinder retype volume-id new-type - -- Dedup-noreplication <--> Dedup-replication - - .. code-block:: console - - $ cinder retype volume-id new-type - -- Dedup-noreplication <--> Nodedup-noreplication - - .. code-block:: console - - $ cinder retype --migration-policy on-demand volume-id new-type - -For non-supported cases, try combinations of the -:command:`cinder retype` command. - -Driver options -~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific -to the Kaminario K2 FC and iSCSI Block Storage drivers. - -.. include:: ../../tables/cinder-kaminario.rst diff --git a/doc/source/configuration/block-storage/drivers/lenovo-driver.rst b/doc/source/configuration/block-storage/drivers/lenovo-driver.rst deleted file mode 100644 index 8bfe3fbf9..000000000 --- a/doc/source/configuration/block-storage/drivers/lenovo-driver.rst +++ /dev/null @@ -1,159 +0,0 @@ -====================================== -Lenovo Fibre Channel and iSCSI drivers -====================================== - -The ``LenovoFCDriver`` and ``LenovoISCSIDriver`` Cinder drivers allow -Lenovo S3200 or S2200 arrays to be used for block storage in OpenStack -deployments. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the Lenovo drivers, the following are required: - -- Lenovo S3200 or S2200 array with: - - - iSCSI or FC host interfaces - - G22x firmware or later - -- Network connectivity between the OpenStack host and the array - management interfaces - -- HTTPS or HTTP must be enabled on the array - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Migrate a volume with back-end assistance. -- Retype a volume. -- Manage and unmanage a volume. - -Configuring the array -~~~~~~~~~~~~~~~~~~~~~ - -#. Verify that the array can be managed using an HTTPS connection. HTTP can - also be used if ``lenovo_api_protocol=http`` is placed into the - appropriate sections of the ``cinder.conf`` file. - - Confirm that virtual pools A and B are present if you plan to use - virtual pools for OpenStack storage. - -#. Edit the ``cinder.conf`` file to define a storage back-end entry for - each storage pool on the array that will be managed by OpenStack. Each - entry consists of a unique section name, surrounded by square brackets, - followed by options specified in ``key=value`` format. - - - The ``lenovo_backend_name`` value specifies the name of the storage - pool on the array. - - - The ``volume_backend_name`` option value can be a unique value, if - you wish to be able to assign volumes to a specific storage pool on - the array, or a name that's shared among multiple storage pools to - let the volume scheduler choose where new volumes are allocated. - - - The rest of the options will be repeated for each storage pool in a - given array: the appropriate Cinder driver name; IP address or - host name of the array management interface; the username and password - of an array user account with ``manage`` privileges; and the iSCSI IP - addresses for the array if using the iSCSI transport protocol. - - In the examples below, two back ends are defined, one for pool A and one - for pool B, and a common ``volume_backend_name`` is used so that a - single volume type definition can be used to allocate volumes from both - pools. - - **Example: iSCSI example back-end entries** - - .. code-block:: ini - - [pool-a] - lenovo_backend_name = A - volume_backend_name = lenovo-array - volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 - - [pool-b] - lenovo_backend_name = B - volume_backend_name = lenovo-array - volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 - - **Example: Fibre Channel example back-end entries** - - .. code-block:: ini - - [pool-a] - lenovo_backend_name = A - volume_backend_name = lenovo-array - volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - - [pool-b] - lenovo_backend_name = B - volume_backend_name = lenovo-array - volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - -#. If HTTPS is not enabled in the array, include - ``lenovo_api_protocol = http`` in each of the back-end definitions. - -#. If HTTPS is enabled, you can enable certificate verification with the - option ``lenovo_verify_certificate=True``. You may also use the - ``lenovo_verify_certificate_path`` parameter to specify the path to a - CA_BUNDLE file containing CAs other than those in the default list. - -#. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an - ``enabled_backends`` parameter specifying the back-end entries you added, - and a ``default_volume_type`` parameter specifying the name of a volume - type that you will create in the next step. - - **Example: [DEFAULT] section changes** - - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = pool-a,pool-b - default_volume_type = lenovo - -#. Create a new volume type for each distinct ``volume_backend_name`` value - that you added to the ``cinder.conf`` file. The example below - assumes that the same ``volume_backend_name=lenovo-array`` - option was specified in all of the - entries, and specifies that the volume type ``lenovo`` can be used to - allocate volumes from any of them. - - **Example: Creating a volume type** - - .. code-block:: console - - $ openstack volume type create lenovo - $ openstack volume type set --property volume_backend_name=lenovo-array lenovo - -#. After modifying the ``cinder.conf`` file, - restart the ``cinder-volume`` service. - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific -to the Lenovo drivers. - -.. include:: ../../tables/cinder-lenovo.rst diff --git a/doc/source/configuration/block-storage/drivers/lvm-volume-driver.rst b/doc/source/configuration/block-storage/drivers/lvm-volume-driver.rst deleted file mode 100644 index 31a655f96..000000000 --- a/doc/source/configuration/block-storage/drivers/lvm-volume-driver.rst +++ /dev/null @@ -1,43 +0,0 @@ -=== -LVM -=== - -The default volume back end uses local volumes managed by LVM. - -This driver supports different transport protocols to attach volumes, -currently iSCSI and iSER. - -Set the following in your ``cinder.conf`` configuration file, and use -the following options to configure for iSCSI transport: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - iscsi_protocol = iscsi - -Use the following options to configure for the iSER transport: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - iscsi_protocol = iser - -.. include:: ../../tables/cinder-lvm.rst - -.. caution:: - - When extending an existing volume which has a linked snapshot, the related - logical volume is deactivated. This logical volume is automatically - reactivated unless ``auto_activation_volume_list`` is defined in LVM - configuration file ``lvm.conf``. See the ``lvm.conf`` file for more - information. - - If auto activated volumes are restricted, then include the cinder volume - group into this list: - - .. code-block:: ini - - auto_activation_volume_list = [ "existingVG", "cinder-volumes" ] - - This note does not apply for thinly provisioned volumes - because they do not need to be deactivated. diff --git a/doc/source/configuration/block-storage/drivers/nec-storage-m-series-driver.rst b/doc/source/configuration/block-storage/drivers/nec-storage-m-series-driver.rst deleted file mode 100644 index d38eb2cad..000000000 --- a/doc/source/configuration/block-storage/drivers/nec-storage-m-series-driver.rst +++ /dev/null @@ -1,293 +0,0 @@ -=========================== -NEC Storage M series driver -=========================== - -NEC Storage M series are dual-controller disk arrays which support -online maintenance. -This driver supports both iSCSI and Fibre Channel. - -System requirements -~~~~~~~~~~~~~~~~~~~ -Supported models: - -- NEC Storage M110, M310, M510 and M710 (SSD/HDD hybrid) -- NEC Storage M310F and M710F (all flash) - -Requirements: - -- Storage control software (firmware) revision 0950 or later -- NEC Storage DynamicDataReplication license -- (Optional) NEC Storage IO Load Manager license for QoS - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Clone a volume. -- Extend a volume. -- Get volume statistics. - - -Preparation -~~~~~~~~~~~ - -Below is minimum preparation to a disk array. -For details of each command, see the NEC Storage Manager Command Reference -(IS052). - -- Common (iSCSI and Fibre Channel) - - #. Initial setup - - * Set IP addresses for management and BMC with the network configuration - tool. - * Enter license keys. (iSMcfg licenserelease) - #. Create pools - - * Create pools for volumes. (iSMcfg poolbind) - * Create pools for snapshots. (iSMcfg poolbind) - #. Create system volumes - - * Create a Replication Reserved Volume (RSV) in one of pools. - (iSMcfg ldbind) - * Create Snapshot Reserve Areas (SRAs) in each snapshot pool. - (iSMcfg srabind) - #. (Optional) Register SSH public key - - -- iSCSI only - - #. Set IP addresses of each iSCSI port. (iSMcfg setiscsiport) - #. Create a LD Set with setting multi-target mode on. (iSMcfg addldset) - #. Register initiator names of each node. (iSMcfg addldsetinitiator) - - -- Fibre Channel only - - #. Start access control. (iSMcfg startacc) - #. Create a LD Set. (iSMcfg addldset) - #. Register WWPNs of each node. (iSMcfg addldsetpath) - - -Configuration -~~~~~~~~~~~~~ - - -Set the following in your ``cinder.conf``, and use the following options -to configure it. - -If you use Fibre Channel: - -.. code-block:: ini - - [Storage1] - volume_driver = cinder.volume.drivers.nec.volume.MStorageFCDriver - -.. end - - -If you use iSCSI: - -.. code-block:: ini - - [Storage1] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - -.. end - -Also, set ``volume_backend_name``. - -.. code-block:: ini - - [DEFAULT] - volume_backend_name = Storage1 - -.. end - - -This table shows configuration options for NEC Storage M series driver. - -.. include:: ../../tables/cinder-nec_m.rst - - - -Required options ----------------- - - -- ``nec_ismcli_fip`` - FIP address of M-Series Storage. - -- ``nec_ismcli_user`` - User name for M-Series Storage iSMCLI. - -- ``nec_ismcli_password`` - Password for M-Series Storage iSMCLI. - -- ``nec_ismcli_privkey`` - RSA secret key file name for iSMCLI (for public key authentication only). - Encrypted RSA secret key file cannot be specified. - -- ``nec_diskarray_name`` - Diskarray name of M-Series Storage. - This parameter must be specified to configure multiple groups - (multi back end) by using the same storage device (storage - device that has the same ``nec_ismcli_fip``). Specify the disk - array name targeted by the relevant config-group for this - parameter. - -- ``nec_backup_pools`` - Specify a pool number where snapshots are created. - - -Timeout configuration ---------------------- - - -- ``rpc_response_timeout`` - Set the timeout value in seconds. If three or more volumes can be created - at the same time, the reference value is 30 seconds multiplied by the - number of volumes created at the same time. - Also, Specify nova parameters below in ``nova.conf`` file. - - .. code-block:: ini - - [DEFAULT] - block_device_allocate_retries = 120 - block_device_allocate_retries_interval = 10 - - .. end - - -- ``timeout server (HAProxy configuration)`` - In addition, you need to edit the following value in the HAProxy - configuration file (``/etc/haproxy/haproxy.cfg``) in an environment where - HAProxy is used. - - .. code-block:: ini - - timeout server = 600 #Specify a value greater than rpc_response_timeout. - - .. end - - Run the :command:`service haproxy reload` command after editing the - value to reload the HAProxy settings. - - .. note:: - - The OpenStack environment set up using Red Hat OpenStack Platform - Director may be set to use HAProxy. - - -Configuration example for /etc/cinder/cinder.conf -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When using one config-group ---------------------------- - -- When using ``nec_ismcli_password`` to authenticate iSMCLI - (Password authentication): - - .. code-block:: ini - - [DEFAULT] - enabled_backends = Storage1 - - [Storage1] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Storage1 - nec_ismcli_fip = 192.168.1.10 - nec_ismcli_user = sysadmin - nec_ismcli_password = sys123 - nec_pools = 0 - nec_backup_pools = 1 - - .. end - - -- When using ``nec_ismcli_privkey`` to authenticate iSMCLI - (Public key authentication): - - .. code-block:: ini - - [DEFAULT] - enabled_backends = Storage1 - - [Storage1] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Storage1 - nec_ismcli_fip = 192.168.1.10 - nec_ismcli_user = sysadmin - nec_ismcli_privkey = /etc/cinder/id_rsa - nec_pools = 0 - nec_backup_pools = 1 - - .. end - - -When using multi config-group (multi-backend) ---------------------------------------------- - -- Four config-groups (backends) - - Storage1, Storage2, Storage3, Storage4 - -- Two disk arrays - - 200000255C3A21CC(192.168.1.10) - Example for using config-group, Storage1 and Storage2 - - 2000000991000316(192.168.1.20) - Example for using config-group, Storage3 and Storage4 - - .. code-block:: ini - - [DEFAULT] - enabled_backends = Storage1,Storage2,Storage3,Storage4 - - [Storage1] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Gold - nec_ismcli_fip = 192.168.1.10 - nec_ismcli_user = sysadmin - nec_ismcli_password = sys123 - nec_pools = 0 - nec_backup_pools = 2 - nec_diskarray_name = 200000255C3A21CC - - [Storage2] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Silver - nec_ismcli_fip = 192.168.1.10 - nec_ismcli_user = sysadmin - nec_ismcli_password = sys123 - nec_pools = 1 - nec_backup_pools = 3 - nec_diskarray_name = 200000255C3A21CC - - [Storage3] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Gold - nec_ismcli_fip = 192.168.1.20 - nec_ismcli_user = sysadmin - nec_ismcli_password = sys123 - nec_pools = 0 - nec_backup_pools = 2 - nec_diskarray_name = 2000000991000316 - - [Storage4] - volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver - volume_backend_name = Silver - nec_ismcli_fip = 192.168.1.20 - nec_ismcli_user = sysadmin - nec_ismcli_password = sys123 - nec_pools = 1 - nec_backup_pools = 3 - nec_diskarray_name = 2000000991000316 - - .. end diff --git a/doc/source/configuration/block-storage/drivers/netapp-volume-driver.rst b/doc/source/configuration/block-storage/drivers/netapp-volume-driver.rst deleted file mode 100644 index 8c9313783..000000000 --- a/doc/source/configuration/block-storage/drivers/netapp-volume-driver.rst +++ /dev/null @@ -1,592 +0,0 @@ -===================== -NetApp unified driver -===================== - -The NetApp unified driver is a Block Storage driver that supports -multiple storage families and protocols. A storage family corresponds to -storage systems built on different NetApp technologies such as clustered -Data ONTAP, Data ONTAP operating in 7-Mode, and E-Series. The storage -protocol refers to the protocol used to initiate data storage and access -operations on those storage systems like iSCSI and NFS. The NetApp -unified driver can be configured to provision and manage OpenStack -volumes on a given storage family using a specified storage protocol. -Also, the NetApp unified driver supports over subscription or over -provisioning when thin provisioned Block Storage volumes are in use -on an E-Series backend. The OpenStack volumes can then be used for -accessing and storing data using the storage protocol on the storage -family system. The NetApp unified driver is an extensible interface -that can support new storage families and protocols. - -.. important:: - - The NetApp unified driver in cinder currently provides integration for - two major generations of the ONTAP operating system: the current - clustered ONTAP and the legacy 7-mode. NetApp’s full support for - 7-mode ended in August of 2015 and the current limited support period - will end in February of 2017. - - The 7-mode components of the cinder NetApp unified driver have now been - marked deprecated and will be removed in the Queens release. This will - apply to all three protocols currently supported in this driver: iSCSI, - FC and NFS. - -.. note:: - - With the Juno release of OpenStack, Block Storage has - introduced the concept of storage pools, in which a single - Block Storage back end may present one or more logical - storage resource pools from which Block Storage will - select a storage location when provisioning volumes. - - In releases prior to Juno, the NetApp unified driver contained some - scheduling logic that determined which NetApp storage container - (namely, a FlexVol volume for Data ONTAP, or a dynamic disk pool for - E-Series) that a new Block Storage volume would be placed into. - - With the introduction of pools, all scheduling logic is performed - completely within the Block Storage scheduler, as each - NetApp storage container is directly exposed to the Block - Storage scheduler as a storage pool. Previously, the NetApp - unified driver presented an aggregated view to the scheduler and - made a final placement decision as to which NetApp storage container - the Block Storage volume would be provisioned into. - -NetApp clustered Data ONTAP storage family -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The NetApp clustered Data ONTAP storage family represents a -configuration group which provides Compute instances access to -clustered Data ONTAP storage systems. At present it can be configured in -Block Storage to work with iSCSI and NFS storage protocols. - -NetApp iSCSI configuration for clustered Data ONTAP ---------------------------------------------------- - -The NetApp iSCSI configuration for clustered Data ONTAP is an interface -from OpenStack to clustered Data ONTAP storage systems. It provisions -and manages the SAN block storage entity, which is a NetApp LUN that -can be accessed using the iSCSI protocol. - -The iSCSI configuration for clustered Data ONTAP is a direct interface -from Block Storage to the clustered Data ONTAP instance and as -such does not require additional management software to achieve the -desired functionality. It uses NetApp APIs to interact with the -clustered Data ONTAP instance. - -**Configuration options** - -Configure the volume driver, storage family, and storage protocol to the -NetApp unified driver, clustered Data ONTAP, and iSCSI respectively by -setting the ``volume_driver``, ``netapp_storage_family`` and -``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_cluster - netapp_storage_protocol = iscsi - netapp_vserver = openstack-vserver - netapp_server_hostname = myhostname - netapp_server_port = port - netapp_login = username - netapp_password = password - -.. note:: - - To use the iSCSI protocol, you must override the default value of - ``netapp_storage_protocol`` with ``iscsi``. - -.. include:: ../../tables/cinder-netapp_cdot_iscsi.rst - -.. note:: - - If you specify an account in the ``netapp_login`` that only has - virtual storage server (Vserver) administration privileges (rather - than cluster-wide administration privileges), some advanced features - of the NetApp unified driver will not work and you may see warnings - in the Block Storage logs. - -.. note:: - - The driver supports iSCSI CHAP uni-directional authentication. - To enable it, set the ``use_chap_auth`` option to ``True``. - -.. tip:: - - For more information on these options and other deployment and - operational scenarios, visit the `NetApp OpenStack Deployment and - Operations - Guide `__. - -NetApp NFS configuration for clustered Data ONTAP -------------------------------------------------- - -The NetApp NFS configuration for clustered Data ONTAP is an interface from -OpenStack to a clustered Data ONTAP system for provisioning and managing -OpenStack volumes on NFS exports provided by the clustered Data ONTAP system -that are accessed using the NFS protocol. - -The NFS configuration for clustered Data ONTAP is a direct interface from -Block Storage to the clustered Data ONTAP instance and as such does -not require any additional management software to achieve the desired -functionality. It uses NetApp APIs to interact with the clustered Data ONTAP -instance. - -**Configuration options** - -Configure the volume driver, storage family, and storage protocol to NetApp -unified driver, clustered Data ONTAP, and NFS respectively by setting the -``volume_driver``, ``netapp_storage_family``, and ``netapp_storage_protocol`` -options in the ``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_cluster - netapp_storage_protocol = nfs - netapp_vserver = openstack-vserver - netapp_server_hostname = myhostname - netapp_server_port = port - netapp_login = username - netapp_password = password - nfs_shares_config = /etc/cinder/nfs_shares - -.. include:: ../../tables/cinder-netapp_cdot_nfs.rst - -.. note:: - - Additional NetApp NFS configuration options are shared with the - generic NFS driver. These options can be found here: - :ref:`cinder-storage_nfs`. - -.. note:: - - If you specify an account in the ``netapp_login`` that only has - virtual storage server (Vserver) administration privileges (rather - than cluster-wide administration privileges), some advanced features - of the NetApp unified driver will not work and you may see warnings - in the Block Storage logs. - -NetApp NFS Copy Offload client ------------------------------- - -A feature was added in the Icehouse release of the NetApp unified driver that -enables Image service images to be efficiently copied to a destination Block -Storage volume. When the Block Storage and Image service are configured to use -the NetApp NFS Copy Offload client, a controller-side copy will be attempted -before reverting to downloading the image from the Image service. This improves -image provisioning times while reducing the consumption of bandwidth and CPU -cycles on the host(s) running the Image and Block Storage services. This is due -to the copy operation being performed completely within the storage cluster. - -The NetApp NFS Copy Offload client can be used in either of the following -scenarios: - -- The Image service is configured to store images in an NFS share that is - exported from a NetApp FlexVol volume *and* the destination for the new Block - Storage volume will be on an NFS share exported from a different FlexVol - volume than the one used by the Image service. Both FlexVols must be located - within the same cluster. - -- The source image from the Image service has already been cached in an NFS - image cache within a Block Storage back end. The cached image resides on a - different FlexVol volume than the destination for the new Block Storage - volume. Both FlexVols must be located within the same cluster. - -To use this feature, you must configure the Image service, as follows: - -- Set the ``default_store`` configuration option to ``file``. - -- Set the ``filesystem_store_datadir`` configuration option to the path - to the Image service NFS export. - -- Set the ``show_image_direct_url`` configuration option to ``True``. - -- Set the ``show_multiple_locations`` configuration option to ``True``. - -- Set the ``filesystem_store_metadata_file`` configuration option to a metadata - file. The metadata file should contain a JSON object that contains the - correct information about the NFS export used by the Image service. - -To use this feature, you must configure the Block Storage service, as follows: - -- Set the ``netapp_copyoffload_tool_path`` configuration option to the path to - the NetApp Copy Offload binary. - -- Set the ``glance_api_version`` configuration option to ``2``. - - .. important:: - - This feature requires that: - - - The storage system must have Data ONTAP v8.2 or greater installed. - - - The vStorage feature must be enabled on each storage virtual machine - (SVM, also known as a Vserver) that is permitted to interact with the - copy offload client. - - - To configure the copy offload workflow, enable NFS v4.0 or greater and - export it from the SVM. - -.. tip:: - - To download the NetApp copy offload binary to be utilized in conjunction - with the ``netapp_copyoffload_tool_path`` configuration option, please visit - the Utility Toolchest page at the `NetApp Support portal - `__ - (login is required). - -.. tip:: - - For more information on these options and other deployment and operational - scenarios, visit the `NetApp OpenStack Deployment and Operations Guide - `__. - -NetApp-supported extra specs for clustered Data ONTAP ------------------------------------------------------ - -Extra specs enable vendors to specify extra filter criteria. -The Block Storage scheduler uses the specs when the scheduler determines -which volume node should fulfill a volume provisioning request. -When you use the NetApp unified driver with a clustered Data ONTAP -storage system, you can leverage extra specs with Block Storage -volume types to ensure that Block Storage volumes are created -on storage back ends that have certain properties. -An example of this is when you configure QoS, mirroring, -or compression for a storage back end. - -Extra specs are associated with Block Storage volume types. -When users request volumes of a particular volume type, the volumes -are created on storage back ends that meet the list of requirements. -An example of this is the back ends that have the available space or -extra specs. Use the specs in the following table to configure volumes. -Define Block Storage volume types by using the :command:`openstack volume -type set` command. - -.. include:: ../../tables/manual/cinder-netapp_cdot_extraspecs.rst - - -NetApp Data ONTAP operating in 7-Mode storage family -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The NetApp Data ONTAP operating in 7-Mode storage family represents a -configuration group which provides Compute instances access to 7-Mode -storage systems. At present it can be configured in Block Storage to -work with iSCSI and NFS storage protocols. - -NetApp iSCSI configuration for Data ONTAP operating in 7-Mode -------------------------------------------------------------- - -The NetApp iSCSI configuration for Data ONTAP operating in 7-Mode is an -interface from OpenStack to Data ONTAP operating in 7-Mode storage systems for -provisioning and managing the SAN block storage entity, that is, a LUN which -can be accessed using iSCSI protocol. - -The iSCSI configuration for Data ONTAP operating in 7-Mode is a direct -interface from OpenStack to Data ONTAP operating in 7-Mode storage system and -it does not require additional management software to achieve the desired -functionality. It uses NetApp ONTAPI to interact with the Data ONTAP operating -in 7-Mode storage system. - -**Configuration options** - -Configure the volume driver, storage family and storage protocol to the NetApp -unified driver, Data ONTAP operating in 7-Mode, and iSCSI respectively by -setting the ``volume_driver``, ``netapp_storage_family`` and -``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_7mode - netapp_storage_protocol = iscsi - netapp_server_hostname = myhostname - netapp_server_port = 80 - netapp_login = username - netapp_password = password - -.. note:: - - To use the iSCSI protocol, you must override the default value of - ``netapp_storage_protocol`` with ``iscsi``. - -.. include:: ../../tables/cinder-netapp_7mode_iscsi.rst - -.. note:: - - The driver supports iSCSI CHAP uni-directional authentication. - To enable it, set the ``use_chap_auth`` option to ``True``. - -.. tip:: - - For more information on these options and other deployment and - operational scenarios, visit the `NetApp OpenStack Deployment and - Operations - Guide `__. - -NetApp NFS configuration for Data ONTAP operating in 7-Mode ------------------------------------------------------------ - -The NetApp NFS configuration for Data ONTAP operating in 7-Mode is an interface -from OpenStack to Data ONTAP operating in 7-Mode storage system for -provisioning and managing OpenStack volumes on NFS exports provided by the Data -ONTAP operating in 7-Mode storage system which can then be accessed using NFS -protocol. - -The NFS configuration for Data ONTAP operating in 7-Mode is a direct interface -from Block Storage to the Data ONTAP operating in 7-Mode instance and -as such does not require any additional management software to achieve the -desired functionality. It uses NetApp ONTAPI to interact with the Data ONTAP -operating in 7-Mode storage system. - - -.. important:: - Support for 7-mode configuration has been deprecated in the Ocata release - and will be removed in the Queens release of OpenStack. - -**Configuration options** - -Configure the volume driver, storage family, and storage protocol to the NetApp -unified driver, Data ONTAP operating in 7-Mode, and NFS respectively by setting -the ``volume_driver``, ``netapp_storage_family`` and -``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_7mode - netapp_storage_protocol = nfs - netapp_server_hostname = myhostname - netapp_server_port = 80 - netapp_login = username - netapp_password = password - nfs_shares_config = /etc/cinder/nfs_shares - -.. include:: ../../tables/cinder-netapp_7mode_nfs.rst - -.. note:: - - Additional NetApp NFS configuration options are shared with the - generic NFS driver. For a description of these, see - :ref:`cinder-storage_nfs`. - -.. tip:: - - For more information on these options and other deployment and - operational scenarios, visit the `NetApp OpenStack Deployment and - Operations - Guide `__. - -NetApp E-Series storage family -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The NetApp E-Series storage family represents a configuration group which -provides OpenStack compute instances access to E-Series storage systems. At -present it can be configured in Block Storage to work with the iSCSI -storage protocol. - -NetApp iSCSI configuration for E-Series ---------------------------------------- - -The NetApp iSCSI configuration for E-Series is an interface from OpenStack to -E-Series storage systems. It provisions and manages the SAN block storage -entity, which is a NetApp LUN which can be accessed using the iSCSI protocol. - -The iSCSI configuration for E-Series is an interface from Block -Storage to the E-Series proxy instance and as such requires the deployment of -the proxy instance in order to achieve the desired functionality. The driver -uses REST APIs to interact with the E-Series proxy instance, which in turn -interacts directly with the E-Series controllers. - -The use of multipath and DM-MP are required when using the Block -Storage driver for E-Series. In order for Block Storage and OpenStack -Compute to take advantage of multiple paths, the following configuration -options must be correctly configured: - -- The ``use_multipath_for_image_xfer`` option should be set to ``True`` in the - ``cinder.conf`` file within the driver-specific stanza (for example, - ``[myDriver]``). - -- The ``iscsi_use_multipath`` option should be set to ``True`` in the - ``nova.conf`` file within the ``[libvirt]`` stanza. - -**Configuration options** - -Configure the volume driver, storage family, and storage protocol to the -NetApp unified driver, E-Series, and iSCSI respectively by setting the -``volume_driver``, ``netapp_storage_family`` and -``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = eseries - netapp_storage_protocol = iscsi - netapp_server_hostname = myhostname - netapp_server_port = 80 - netapp_login = username - netapp_password = password - netapp_controller_ips = 1.2.3.4,5.6.7.8 - netapp_sa_password = arrayPassword - netapp_storage_pools = pool1,pool2 - use_multipath_for_image_xfer = True - -.. note:: - - To use the E-Series driver, you must override the default value of - ``netapp_storage_family`` with ``eseries``. - - To use the iSCSI protocol, you must override the default value of - ``netapp_storage_protocol`` with ``iscsi``. - -.. include:: ../../tables/cinder-netapp_eseries_iscsi.rst - -.. tip:: - - For more information on these options and other deployment and - operational scenarios, visit the `NetApp OpenStack Deployment and - Operations - Guide `__. - -NetApp-supported extra specs for E-Series ------------------------------------------ - -Extra specs enable vendors to specify extra filter criteria. -The Block Storage scheduler uses the specs when the scheduler determines -which volume node should fulfill a volume provisioning request. -When you use the NetApp unified driver with an E-Series storage system, -you can leverage extra specs with Block Storage volume types to ensure -that Block Storage volumes are created on storage back ends that have -certain properties. An example of this is when you configure thin -provisioning for a storage back end. - -Extra specs are associated with Block Storage volume types. -When users request volumes of a particular volume type, the volumes are -created on storage back ends that meet the list of requirements. -An example of this is the back ends that have the available space or -extra specs. Use the specs in the following table to configure volumes. -Define Block Storage volume types by using the :command:`openstack volume -type set` command. - -.. list-table:: Description of extra specs options for NetApp Unified Driver with E-Series - :header-rows: 1 - - * - Extra spec - - Type - - Description - * - ``netapp_thin_provisioned`` - - Boolean - - Limit the candidate volume list to only the ones that support thin - provisioning on the storage controller. - -Upgrading prior NetApp drivers to the NetApp unified driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -NetApp introduced a new unified block storage driver in Havana for configuring -different storage families and storage protocols. This requires defining an -upgrade path for NetApp drivers which existed in releases prior to Havana. This -section covers the upgrade configuration for NetApp drivers to the new unified -configuration and a list of deprecated NetApp drivers. - -Upgraded NetApp drivers ------------------------ - -This section describes how to update Block Storage configuration from -a pre-Havana release to the unified driver format. - -- NetApp iSCSI direct driver for Clustered Data ONTAP in Grizzly (or earlier): - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver - - NetApp unified driver configuration: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_cluster - netapp_storage_protocol = iscsi - -- NetApp NFS direct driver for Clustered Data ONTAP in Grizzly (or - earlier): - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver - - NetApp unified driver configuration: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_cluster - netapp_storage_protocol = nfs - -- NetApp iSCSI direct driver for Data ONTAP operating in 7-Mode storage - controller in Grizzly (or earlier): - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver - - NetApp unified driver configuration: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_7mode - netapp_storage_protocol = iscsi - -- NetApp NFS direct driver for Data ONTAP operating in 7-Mode storage - controller in Grizzly (or earlier): - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver - - NetApp unified driver configuration: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver - netapp_storage_family = ontap_7mode - netapp_storage_protocol = nfs - -Deprecated NetApp drivers -------------------------- - -This section lists the NetApp drivers in earlier releases that are -deprecated in Havana. - -- NetApp iSCSI driver for clustered Data ONTAP: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver - -- NetApp NFS driver for clustered Data ONTAP: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver - -- NetApp iSCSI driver for Data ONTAP operating in 7-Mode storage - controller: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver - -- NetApp NFS driver for Data ONTAP operating in 7-Mode storage - controller: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.netapp.nfs.NetAppNFSDriver - -.. note:: - - For support information on deprecated NetApp drivers in the Havana - release, visit the `NetApp OpenStack Deployment and Operations - Guide `__. diff --git a/doc/source/configuration/block-storage/drivers/nexentaedge-driver.rst b/doc/source/configuration/block-storage/drivers/nexentaedge-driver.rst deleted file mode 100644 index 8bd72217d..000000000 --- a/doc/source/configuration/block-storage/drivers/nexentaedge-driver.rst +++ /dev/null @@ -1,159 +0,0 @@ -=============================== -NexentaEdge NBD & iSCSI drivers -=============================== - -NexentaEdge is designed from the ground-up to deliver high performance Block -and Object storage services and limitless scalability to next generation -OpenStack clouds, petabyte scale active archives and Big Data applications. -NexentaEdge runs on shared nothing clusters of industry standard Linux -servers, and builds on Nexenta IP and patent pending Cloud Copy On Write (CCOW) -technology to break new ground in terms of reliability, functionality and cost -efficiency. - -For user documentation, see the -`Nexenta Documentation Center `_. - - -iSCSI driver -~~~~~~~~~~~~ - -The NexentaEdge cluster must be installed and configured according to the -relevant Nexenta documentation. A cluster, tenant, bucket must be pre-created, -as well as an iSCSI service on the NexentaEdge gateway node. - -The NexentaEdge iSCSI driver is selected using the normal procedures for one -or multiple back-end volume drivers. - -You must configure these items for each NexentaEdge cluster that the iSCSI -volume driver controls: - -#. Make the following changes on the storage node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta iSCSI driver - volume_driver = cinder.volume.drivers.nexenta.nexentaedge.iscsi.NexentaEdgeISCSIDriver - - # Specify the ip address for Rest API (string value) - nexenta_rest_address = MANAGEMENT-NODE-IP - - # Port for Rest API (integer value) - nexenta_rest_port=8080 - - # Protocol used for Rest calls (string value, default=htpp) - nexenta_rest_protocol = http - - # Username for NexentaEdge Rest (string value) - nexenta_user=USERNAME - - # Password for NexentaEdge Rest (string value) - nexenta_password=PASSWORD - - # Path to bucket containing iSCSI LUNs (string value) - nexenta_lun_container = CLUSTER/TENANT/BUCKET - - # Name of pre-created iSCSI service (string value) - nexenta_iscsi_service = SERVICE-NAME - - # IP address of the gateway node attached to iSCSI service above or - # virtual IP address if an iSCSI Storage Service Group is configured in - # HA mode (string value) - nexenta_client_address = GATEWAY-NODE-IP - - -#. Save the changes to the ``/etc/cinder/cinder.conf`` file and - restart the ``cinder-volume`` service. - -Supported operations --------------------- - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - - -NBD driver -~~~~~~~~~~ - -As an alternative to using iSCSI, Amazon S3, or OpenStack Swift protocols, -NexentaEdge can provide access to cluster storage via a Network Block Device -(NBD) interface. - -The NexentaEdge cluster must be installed and configured according to the -relevant Nexenta documentation. A cluster, tenant, bucket must be pre-created. -The driver requires NexentaEdge Service to run on Hypervisor (Nova) node. -The node must sit on Replicast Network and only runs NexentaEdge service, does -not require physical disks. - -You must configure these items for each NexentaEdge cluster that the NBD -volume driver controls: - -#. Make the following changes on storage node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta NBD driver - volume_driver = cinder.volume.drivers.nexenta.nexentaedge.nbd.NexentaEdgeNBDDriver - - # Specify the ip address for Rest API (string value) - nexenta_rest_address = MANAGEMENT-NODE-IP - - # Port for Rest API (integer value) - nexenta_rest_port = 8080 - - # Protocol used for Rest calls (string value, default=htpp) - nexenta_rest_protocol = http - - # Username for NexentaEdge Rest (string value) - nexenta_rest_user = USERNAME - - # Password for NexentaEdge Rest (string value) - nexenta_rest_password = PASSWORD - - # Path to bucket containing iSCSI LUNs (string value) - nexenta_lun_container = CLUSTER/TENANT/BUCKET - - # Path to directory to store symbolic links to block devices - # (string value, default=/dev/disk/by-path) - nexenta_nbd_symlinks_dir = /PATH/TO/SYMBOLIC/LINKS - - -#. Save the changes to the ``/etc/cinder/cinder.conf`` file and - restart the ``cinder-volume`` service. - -Supported operations --------------------- - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - - -Driver options -~~~~~~~~~~~~~~ - -Nexenta Driver supports these options: - -.. include:: ../../tables/cinder-nexenta_edge.rst diff --git a/doc/source/configuration/block-storage/drivers/nexentastor4-driver.rst b/doc/source/configuration/block-storage/drivers/nexentastor4-driver.rst deleted file mode 100644 index ccd7cf5e2..000000000 --- a/doc/source/configuration/block-storage/drivers/nexentastor4-driver.rst +++ /dev/null @@ -1,141 +0,0 @@ -===================================== -NexentaStor 4.x NFS and iSCSI drivers -===================================== - -NexentaStor is an Open Source-driven Software-Defined Storage (OpenSDS) -platform delivering unified file (NFS and SMB) and block (FC and iSCSI) -storage services, runs on industry standard hardware, scales from tens of -terabytes to petabyte configurations, and includes all data management -functionality by default. - -For NexentaStor 4.x user documentation, visit -https://nexenta.com/products/downloads/nexentastor. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -* Migrate a volume. - -* Change volume type. - -Nexenta iSCSI driver -~~~~~~~~~~~~~~~~~~~~ - -The Nexenta iSCSI driver allows you to use a NexentaStor appliance to store -Compute volumes. Every Compute volume is represented by a single zvol in a -predefined Nexenta namespace. The Nexenta iSCSI volume driver should work with -all versions of NexentaStor. - -The NexentaStor appliance must be installed and configured according to the -relevant Nexenta documentation. A volume and an enclosing namespace must be -created for all iSCSI volumes to be accessed through the volume driver. This -should be done as specified in the release-specific NexentaStor documentation. - -The NexentaStor Appliance iSCSI driver is selected using the normal procedures -for one or multiple backend volume drivers. - -You must configure these items for each NexentaStor appliance that the iSCSI -volume driver controls: - -#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta iSCSI driver - volume_driver=cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver - - # IP address of NexentaStor host (string value) - nexenta_host=HOST-IP - - # Username for NexentaStor REST (string value) - nexenta_user=USERNAME - - # Port for Rest API (integer value) - nexenta_rest_port=8457 - - # Password for NexentaStor REST (string value) - nexenta_password=PASSWORD - - # Volume on NexentaStor appliance (string value) - nexenta_volume=volume_name - - -.. note:: - - nexenta_volume represents a zpool which is called volume on NS appliance. It must be pre-created before enabling the driver. - - -#. Save the changes to the ``/etc/cinder/cinder.conf`` file and - restart the ``cinder-volume`` service. - - - -Nexenta NFS driver -~~~~~~~~~~~~~~~~~~ -The Nexenta NFS driver allows you to use NexentaStor appliance to store -Compute volumes via NFS. Every Compute volume is represented by a single -NFS file within a shared directory. - -While the NFS protocols standardize file access for users, they do not -standardize administrative actions such as taking snapshots or replicating -file systems. The OpenStack Volume Drivers bring a common interface to these -operations. The Nexenta NFS driver implements these standard actions using -the ZFS management plane that is already deployed on NexentaStor appliances. - -The Nexenta NFS volume driver should work with all versions of NexentaStor. -The NexentaStor appliance must be installed and configured according to the -relevant Nexenta documentation. A single-parent file system must be created -for all virtual disk directories supported for OpenStack. This directory must -be created and exported on each NexentaStor appliance. This should be done as -specified in the release- specific NexentaStor documentation. - -You must configure these items for each NexentaStor appliance that the NFS -volume driver controls: - -#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta NFS driver - volume_driver=cinder.volume.drivers.nexenta.nfs.NexentaNfsDriver - - # Path to shares config file - nexenta_shares_config=/home/ubuntu/shares.cfg - - .. note:: - - Add your list of Nexenta NFS servers to the file you specified with the - ``nexenta_shares_config`` option. For example, this is how this file should look: - - .. code-block:: bash - - 192.168.1.200:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.200:8457 - 192.168.1.201:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.201:8457 - 192.168.1.202:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.202:8457 - -Each line in this file represents an NFS share. The first part of the line is -the NFS share URL, the second line is the connection URL to the NexentaStor -Appliance. - -Driver options -~~~~~~~~~~~~~~ - -Nexenta Driver supports these options: - -.. include:: ../../tables/cinder-nexenta.rst diff --git a/doc/source/configuration/block-storage/drivers/nexentastor5-driver.rst b/doc/source/configuration/block-storage/drivers/nexentastor5-driver.rst deleted file mode 100644 index 30802aab8..000000000 --- a/doc/source/configuration/block-storage/drivers/nexentastor5-driver.rst +++ /dev/null @@ -1,153 +0,0 @@ -===================================== -NexentaStor 5.x NFS and iSCSI drivers -===================================== - -NexentaStor is an Open Source-driven Software-Defined Storage (OpenSDS) -platform delivering unified file (NFS and SMB) and block (FC and iSCSI) -storage services. NexentaStor runs on industry standard hardware, scales from -tens of terabytes to petabyte configurations, and includes all data management -functionality by default. - -For user documentation, see the -`Nexenta Documentation Center `__. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -* Migrate a volume. - -* Change volume type. - -iSCSI driver -~~~~~~~~~~~~ - -The NexentaStor appliance must be installed and configured according to the -relevant Nexenta documentation. A pool and an enclosing namespace must be -created for all iSCSI volumes to be accessed through the volume driver. This -should be done as specified in the release-specific NexentaStor documentation. - -The NexentaStor Appliance iSCSI driver is selected using the normal procedures -for one or multiple back-end volume drivers. - - -You must configure these items for each NexentaStor appliance that the iSCSI -volume driver controls: - -#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta iSCSI driver - volume_driver=cinder.volume.drivers.nexenta.ns5.iscsi.NexentaISCSIDriver - - # IP address of NexentaStor host (string value) - nexenta_host=HOST-IP - - # Port for Rest API (integer value) - nexenta_rest_port=8080 - - # Username for NexentaStor Rest (string value) - nexenta_user=USERNAME - - # Password for NexentaStor Rest (string value) - nexenta_password=PASSWORD - - # Pool on NexentaStor appliance (string value) - nexenta_volume=volume_name - - # Name of a parent Volume group where cinder created zvols will reside (string value) - nexenta_volume_group = iscsi - - .. note:: - - nexenta_volume represents a zpool, which is called pool on NS 5.x appliance. - It must be pre-created before enabling the driver. - - Volume group does not need to be pre-created, the driver will create it if does not exist. - -#. Save the changes to the ``/etc/cinder/cinder.conf`` file and - restart the ``cinder-volume`` service. - -NFS driver -~~~~~~~~~~ -The Nexenta NFS driver allows you to use NexentaStor appliance to store -Compute volumes via NFS. Every Compute volume is represented by a single -NFS file within a shared directory. - -While the NFS protocols standardize file access for users, they do not -standardize administrative actions such as taking snapshots or replicating -file systems. The OpenStack Volume Drivers bring a common interface to these -operations. The Nexenta NFS driver implements these standard actions using the -ZFS management plane that already is deployed on NexentaStor appliances. - -The NexentaStor appliance must be installed and configured according to the -relevant Nexenta documentation. A single-parent file system must be created -for all virtual disk directories supported for OpenStack. -Create and export the directory on each NexentaStor appliance. - -You must configure these items for each NexentaStor appliance that the NFS -volume driver controls: - -#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # Enable Nexenta NFS driver - volume_driver=cinder.volume.drivers.nexenta.ns5.nfs.NexentaNfsDriver - - # IP address or Hostname of NexentaStor host (string value) - nas_host=HOST-IP - - # Port for Rest API (integer value) - nexenta_rest_port=8080 - - # Path to parent filesystem (string value) - nas_share_path=POOL/FILESYSTEM - - # Specify NFS version - nas_mount_options=vers=4 - -#. Create filesystem on appliance and share via NFS. For example: - - .. code-block:: vim - - "securityContexts": [ - {"readWriteList": [{"allow": true, "etype": "fqnip", "entity": "1.1.1.1"}], - "root": [{"allow": true, "etype": "fqnip", "entity": "1.1.1.1"}], - "securityModes": ["sys"]}] - -#. Create ACL for the filesystem. For example: - - .. code-block:: json - - {"type": "allow", - "principal": "everyone@", - "permissions": ["list_directory","read_data","add_file","write_data", - "add_subdirectory","append_data","read_xattr","write_xattr","execute", - "delete_child","read_attributes","write_attributes","delete","read_acl", - "write_acl","write_owner","synchronize"], - "flags": ["file_inherit","dir_inherit"]} - - -Driver options -~~~~~~~~~~~~~~ - -Nexenta Driver supports these options: - -.. include:: ../../tables/cinder-nexenta5.rst diff --git a/doc/source/configuration/block-storage/drivers/nfs-volume-driver.rst b/doc/source/configuration/block-storage/drivers/nfs-volume-driver.rst deleted file mode 100644 index 4d99eb842..000000000 --- a/doc/source/configuration/block-storage/drivers/nfs-volume-driver.rst +++ /dev/null @@ -1,157 +0,0 @@ -========== -NFS driver -========== - -The Network File System (NFS) is a distributed file system protocol -originally developed by Sun Microsystems in 1984. An NFS server -``exports`` one or more of its file systems, known as ``shares``. -An NFS client can mount these exported shares on its own file system. -You can perform file actions on this mounted remote file system as -if the file system were local. - -How the NFS driver works -~~~~~~~~~~~~~~~~~~~~~~~~ - -The NFS driver, and other drivers based on it, work quite differently -than a traditional block storage driver. - -The NFS driver does not actually allow an instance to access a storage -device at the block level. Instead, files are created on an NFS share -and mapped to instances, which emulates a block device. -This works in a similar way to QEMU, which stores instances in the -``/var/lib/nova/instances`` directory. - -Enable the NFS driver and related options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To use Cinder with the NFS driver, first set the ``volume_driver`` -in the ``cinder.conf`` configuration file: - -.. code-block:: ini - - volume_driver=cinder.volume.drivers.nfs.NfsDriver - -The following table contains the options supported by the NFS driver. - -.. include:: ../../tables/cinder-storage_nfs.rst - -.. note:: - - As of the Icehouse release, the NFS driver (and other drivers based - off it) will attempt to mount shares using version 4.1 of the NFS - protocol (including pNFS). If the mount attempt is unsuccessful due - to a lack of client or server support, a subsequent mount attempt - that requests the default behavior of the :command:`mount.nfs` command - will be performed. On most distributions, the default behavior is to - attempt mounting first with NFS v4.0, then silently fall back to NFS - v3.0 if necessary. If the ``nfs_mount_options`` configuration option - contains a request for a specific version of NFS to be used, or if - specific options are specified in the shares configuration file - specified by the ``nfs_shares_config`` configuration option, the - mount will be attempted as requested with no subsequent attempts. - -How to use the NFS driver -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Creating an NFS server is outside the scope of this document. - -Configure with one NFS server ------------------------------ - -This example assumes access to the following NFS server and mount point: - -* 192.168.1.200:/storage - -This example demonstrates the usage of this driver with one NFS server. - -Set the ``nas_host`` option to the IP address or host name of your NFS -server, and the ``nas_share_path`` option to the NFS export path: - -.. code-block:: ini - - nas_host = 192.168.1.200 - nas_share_path = /storage - -Configure with multiple NFS servers ------------------------------------ - -.. note:: - - You can use the multiple NFS servers with `cinder multi back ends - `_ feature. - Configure the :ref:`enabled_backends ` option with - multiple values, and use the ``nas_host`` and ``nas_share`` options - for each back end as described above. - -The below example is another method to use multiple NFS servers, -and demonstrates the usage of this driver with multiple NFS servers. -Multiple servers are not required. One is usually enough. - -This example assumes access to the following NFS servers and mount points: - -* 192.168.1.200:/storage -* 192.168.1.201:/storage -* 192.168.1.202:/storage - -#. Add your list of NFS servers to the file you specified with the - ``nfs_shares_config`` option. For example, if the value of this option - was set to ``/etc/cinder/shares.txt`` file, then: - - .. code-block:: console - - # cat /etc/cinder/shares.txt - 192.168.1.200:/storage - 192.168.1.201:/storage - 192.168.1.202:/storage - - Comments are allowed in this file. They begin with a ``#``. - -#. Configure the ``nfs_mount_point_base`` option. This is a directory - where ``cinder-volume`` mounts all NFS shares stored in the ``shares.txt`` - file. For this example, ``/var/lib/cinder/nfs`` is used. You can, - of course, use the default value of ``$state_path/mnt``. - -#. Start the ``cinder-volume`` service. ``/var/lib/cinder/nfs`` should - now contain a directory for each NFS share specified in the ``shares.txt`` - file. The name of each directory is a hashed name: - - .. code-block:: console - - # ls /var/lib/cinder/nfs/ - ... - 46c5db75dc3a3a50a10bfd1a456a9f3f - ... - -#. You can now create volumes as you normally would: - - .. code-block:: console - - $ openstack volume create --size 5 MYVOLUME - # ls /var/lib/cinder/nfs/46c5db75dc3a3a50a10bfd1a456a9f3f - volume-a8862558-e6d6-4648-b5df-bb84f31c8935 - -This volume can also be attached and deleted just like other volumes. -However, snapshotting is **not** supported. - -NFS driver notes -~~~~~~~~~~~~~~~~ - -* ``cinder-volume`` manages the mounting of the NFS shares as well as - volume creation on the shares. Keep this in mind when planning your - OpenStack architecture. If you have one master NFS server, it might - make sense to only have one ``cinder-volume`` service to handle all - requests to that NFS server. However, if that single server is unable - to handle all requests, more than one ``cinder-volume`` service is - needed as well as potentially more than one NFS server. - -* Because data is stored in a file and not actually on a block storage - device, you might not see the same IO performance as you would with - a traditional block storage driver. Please test accordingly. - -* Despite possible IO performance loss, having volume data stored in - a file might be beneficial. For example, backing up volumes can be - as easy as copying the volume files. - -.. note:: - - Regular IO flushing and syncing still stands. diff --git a/doc/source/configuration/block-storage/drivers/nimble-volume-driver.rst b/doc/source/configuration/block-storage/drivers/nimble-volume-driver.rst deleted file mode 100644 index 1c5763b20..000000000 --- a/doc/source/configuration/block-storage/drivers/nimble-volume-driver.rst +++ /dev/null @@ -1,134 +0,0 @@ -============================ -Nimble Storage volume driver -============================ - -Nimble Storage fully integrates with the OpenStack platform through -the Nimble Cinder driver, allowing a host to configure and manage Nimble -Storage array features through Block Storage interfaces. - -Support for iSCSI storage protocol is available with NimbleISCSIDriver -Volume Driver class and Fibre Channel with NimbleFCDriver. - -Support for the Liberty release and above is available from Nimble OS -2.3.8 or later. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, clone, attach, and detach volumes -* Create and delete volume snapshots -* Create a volume from a snapshot -* Copy an image to a volume -* Copy a volume to an image -* Extend a volume -* Get volume statistics -* Manage and unmanage a volume -* Enable encryption and default performance policy for a volume-type - extra-specs -* Force backup of an in-use volume. - -Nimble Storage driver configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Update the file ``/etc/cinder/cinder.conf`` with the given configuration. - -In case of a basic (single back-end) configuration, add the parameters -within the ``[default]`` section as follows. - -.. code-block:: ini - - [default] - san_ip = NIMBLE_MGMT_IP - san_login = NIMBLE_USER - san_password = NIMBLE_PASSWORD - use_multipath_for_image_xfer = True - volume_driver = NIMBLE_VOLUME_DRIVER - -In case of multiple back-end configuration, for example, configuration -which supports multiple Nimble Storage arrays or a single Nimble Storage -array with arrays from other vendors, use the following parameters. - -.. code-block:: ini - - [default] - enabled_backends = Nimble-Cinder - - [Nimble-Cinder] - san_ip = NIMBLE_MGMT_IP - san_login = NIMBLE_USER - san_password = NIMBLE_PASSWORD - use_multipath_for_image_xfer = True - volume_driver = NIMBLE_VOLUME_DRIVER - volume_backend_name = NIMBLE_BACKEND_NAME - -In case of multiple back-end configuration, Nimble Storage volume type -is created and associated with a back-end name as follows. - -.. note:: - - Single back-end configuration users do not need to create the volume type. - -.. code-block:: console - - $ openstack volume type create NIMBLE_VOLUME_TYPE - $ openstack volume type set --property volume_backend_name=NIMBLE_BACKEND_NAME NIMBLE_VOLUME_TYPE - -This section explains the variables used above: - -NIMBLE_MGMT_IP - Management IP address of Nimble Storage array/group. - -NIMBLE_USER - Nimble Storage account login with minimum ``power user`` (admin) privilege - if RBAC is used. - -NIMBLE_PASSWORD - Password of the admin account for nimble array. - -NIMBLE_VOLUME_DRIVER - Use either cinder.volume.drivers.nimble.NimbleISCSIDriver for iSCSI or - cinder.volume.drivers.nimble.NimbleFCDriver for Fibre Channel. - -NIMBLE_BACKEND_NAME - A volume back-end name which is specified in the ``cinder.conf`` file. - This is also used while assigning a back-end name to the Nimble volume type. - -NIMBLE_VOLUME_TYPE - The Nimble volume-type which is created from the CLI and associated with - ``NIMBLE_BACKEND_NAME``. - - .. note:: - - Restart the ``cinder-api``, ``cinder-scheduler``, and ``cinder-volume`` - services after updating the ``cinder.conf`` file. - -Nimble driver extra spec options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Nimble volume driver also supports the following extra spec options: - -'nimble:encryption'='yes' - Used to enable encryption for a volume-type. - -'nimble:perfpol-name'=PERF_POL_NAME - PERF_POL_NAME is the name of a performance policy which exists on the - Nimble array and should be enabled for every volume in a volume type. - -'nimble:multi-initiator'='true' - Used to enable multi-initiator access for a volume-type. - -These extra-specs can be enabled by using the following command: - -.. code-block:: console - - $ openstack volume type set --property KEY=VALUE VOLUME_TYPE - -``VOLUME_TYPE`` is the Nimble volume type and ``KEY`` and ``VALUE`` are -the options mentioned above. - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -The Nimble storage driver supports these configuration options: - -.. include:: ../../tables/cinder-nimble.rst diff --git a/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst b/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst deleted file mode 100644 index 3f6d21261..000000000 --- a/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst +++ /dev/null @@ -1,104 +0,0 @@ -=========================================== -ProphetStor Fibre Channel and iSCSI drivers -=========================================== - -ProhetStor Fibre Channel and iSCSI drivers add support for -ProphetStor Flexvisor through the Block Storage service. -ProphetStor Flexvisor enables commodity x86 hardware as software-defined -storage leveraging well-proven ZFS for disk management to provide -enterprise grade storage services such as snapshots, data protection -with different RAID levels, replication, and deduplication. - -The ``DPLFCDriver`` and ``DPLISCSIDriver`` drivers run volume operations -by communicating with the ProphetStor storage system over HTTPS. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. - -* Create, list, and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Clone a volume. - -* Extend a volume. - -Enable the Fibre Channel or iSCSI drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``DPLFCDriver`` and ``DPLISCSIDriver`` are installed with the OpenStack -software. - -#. Query storage pool id to configure ``dpl_pool`` of the ``cinder.conf`` - file. - - a. Log on to the storage system with administrator access. - - .. code-block:: console - - $ ssh root@STORAGE_IP_ADDRESS - - b. View the current usable pool id. - - .. code-block:: console - - $ flvcli show pool list - - d5bd40b58ea84e9da09dcf25a01fdc07 : default_pool_dc07 - - c. Use ``d5bd40b58ea84e9da09dcf25a01fdc07`` to configure the ``dpl_pool`` of - ``/etc/cinder/cinder.conf`` file. - - .. note:: - - Other management commands can be referenced with the help command - :command:`flvcli -h`. - -#. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` - file. - - .. code-block:: ini - - # IP address of SAN controller (string value) - san_ip=STORAGE IP ADDRESS - - # Username for SAN controller (string value) - san_login=USERNAME - - # Password for SAN controller (string value) - san_password=PASSWORD - - # Use thin provisioning for SAN volumes? (boolean value) - san_thin_provision=true - - # The port that the iSCSI daemon is listening on. (integer value) - iscsi_port=3260 - - # DPL pool uuid in which DPL volumes are stored. (string value) - dpl_pool=d5bd40b58ea84e9da09dcf25a01fdc07 - - # DPL port number. (integer value) - dpl_port=8357 - - # Uncomment one of the next two option to enable Fibre channel or iSCSI - # FIBRE CHANNEL(uncomment the next line to enable the FC driver) - #volume_driver=cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver - # iSCSI (uncomment the next line to enable the iSCSI driver) - #volume_driver=cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver - -#. Save the changes to the ``/etc/cinder/cinder.conf`` file and - restart the ``cinder-volume`` service. - -The ProphetStor Fibre Channel or iSCSI drivers are now enabled on your -OpenStack system. If you experience problems, review the Block Storage -service log files for errors. - -The following table contains the options supported by the ProphetStor -storage driver. - -.. include:: ../../tables/cinder-prophetstor_dpl.rst diff --git a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst b/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst deleted file mode 100644 index 6e39fd171..000000000 --- a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst +++ /dev/null @@ -1,319 +0,0 @@ -=================================================== -Pure Storage iSCSI and Fibre Channel volume drivers -=================================================== - -The Pure Storage FlashArray volume drivers for OpenStack Block Storage -interact with configured Pure Storage arrays and support various -operations. - -Support for iSCSI storage protocol is available with the PureISCSIDriver -Volume Driver class, and Fibre Channel with PureFCDriver. - -All drivers are compatible with Purity FlashArrays that support the REST -API version 1.2, 1.3, or 1.4 (Purity 4.0.0 and newer). - -Limitations and known issues -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you do not set up the nodes hosting instances to use multipathing, -all network connectivity will use a single physical port on the array. -In addition to significantly limiting the available bandwidth, this -means you do not have the high-availability and non-disruptive upgrade -benefits provided by FlashArray. Multipathing must be used to take advantage -of these benefits. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, detach, retype, clone, and extend volumes. - -* Create a volume from snapshot. - -* Create, list, and delete volume snapshots. - -* Create, list, update, and delete consistency groups. - -* Create, list, and delete consistency group snapshots. - -* Manage and unmanage a volume. - -* Manage and unmanage a snapshot. - -* Get volume statistics. - -* Create a thin provisioned volume. - -* Replicate volumes to remote Pure Storage array(s). - -Configure OpenStack and Purity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You need to configure both your Purity array and your OpenStack cluster. - -.. note:: - - These instructions assume that the ``cinder-api`` and ``cinder-scheduler`` - services are installed and configured in your OpenStack cluster. - -Configure the OpenStack Block Storage service ---------------------------------------------- - -In these steps, you will edit the ``cinder.conf`` file to configure the -OpenStack Block Storage service to enable multipathing and to use the -Pure Storage FlashArray as back-end storage. - -#. Install Pure Storage PyPI module. - A requirement for the Pure Storage driver is the installation of the - Pure Storage Python SDK version 1.4.0 or later from PyPI. - - .. code-block:: console - - $ pip install purestorage - -#. Retrieve an API token from Purity. - The OpenStack Block Storage service configuration requires an API token - from Purity. Actions performed by the volume driver use this token for - authorization. Also, Purity logs the volume driver's actions as being - performed by the user who owns this API token. - - If you created a Purity user account that is dedicated to managing your - OpenStack Block Storage volumes, copy the API token from that user - account. - - Use the appropriate create or list command below to display and copy the - Purity API token: - - * To create a new API token: - - .. code-block:: console - - $ pureadmin create --api-token USER - - The following is an example output: - - .. code-block:: console - - $ pureadmin create --api-token pureuser - Name API Token Created - pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 - - * To list an existing API token: - - .. code-block:: console - - $ pureadmin list --api-token --expose USER - - The following is an example output: - - .. code-block:: console - - $ pureadmin list --api-token --expose pureuser - Name API Token Created - pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 - -#. Copy the API token retrieved (``902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9`` from - the examples above) to use in the next step. - -#. Edit the OpenStack Block Storage service configuration file. - The following sample ``/etc/cinder/cinder.conf`` configuration lists the - relevant settings for a typical Block Storage service using a single - Pure Storage array: - - .. code-block:: ini - - [DEFAULT] - enabled_backends = puredriver-1 - default_volume_type = puredriver-1 - - [puredriver-1] - volume_backend_name = puredriver-1 - volume_driver = PURE_VOLUME_DRIVER - san_ip = IP_PURE_MGMT - pure_api_token = PURE_API_TOKEN - use_multipath_for_image_xfer = True - - Replace the following variables accordingly: - - PURE_VOLUME_DRIVER - Use either ``cinder.volume.drivers.pure.PureISCSIDriver`` for iSCSI or - ``cinder.volume.drivers.pure.PureFCDriver`` for Fibre Channel - connectivity. - - IP_PURE_MGMT - The IP address of the Pure Storage array's management interface or a - domain name that resolves to that IP address. - - PURE_API_TOKEN - The Purity Authorization token that the volume driver uses to - perform volume management on the Pure Storage array. - -.. note:: - - The volume driver automatically creates Purity host objects for - initiators as needed. If CHAP authentication is enabled via the - ``use_chap_auth`` setting, you must ensure there are no manually - created host objects with IQN's that will be used by the OpenStack - Block Storage service. The driver will only modify credentials on hosts that - it manages. - -.. note:: - - If using the PureFCDriver it is recommended to use the OpenStack - Block Storage Fibre Channel Zone Manager. - -Volume auto-eradication -~~~~~~~~~~~~~~~~~~~~~~~ - -To enable auto-eradication of deleted volumes, snapshots, and consistency -groups on deletion, modify the following option in the ``cinder.conf`` file: - -.. code-block:: ini - - pure_eradicate_on_delete = true - -By default, auto-eradication is disabled and all deleted volumes, snapshots, -and consistency groups are retained on the Pure Storage array in a recoverable -state for 24 hours from time of deletion. - -SSL certification -~~~~~~~~~~~~~~~~~ - -To enable SSL certificate validation, modify the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - driver_ssl_cert_verify = true - -By default, SSL certificate validation is disabled. - -To specify a non-default path to ``CA_Bundle`` file or directory with -certificates of trusted CAs: - - -.. code-block:: ini - - driver_ssl_cert_path = Certificate path - -.. note:: - - This requires the use of Pure Storage Python SDK > 1.4.0. - -Replication configuration -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Add the following to the back-end specification to specify another Flash -Array to replicate to: - -.. code-block:: ini - - [puredriver-1] - replication_device = backend_id:PURE2_NAME,san_ip:IP_PURE2_MGMT,api_token:PURE2_API_TOKEN - -Where ``PURE2_NAME`` is the name of the remote Pure Storage system, -``IP_PURE2_MGMT`` is the management IP address of the remote array, -and ``PURE2_API_TOKEN`` is the Purity Authorization token -of the remote array. - -Note that more than one ``replication_device`` line can be added to allow for -multi-target device replication. - -A volume is only replicated if the volume is of a volume-type that has -the extra spec ``replication_enabled`` set to `` True``. - -To create a volume type that specifies replication to remote back ends: - -.. code-block:: console - - $ openstack volume type create ReplicationType - $ openstack volume type set --property replication_enabled=' True' ReplicationType - -The following table contains the optional configuration parameters available -for replication configuration with the Pure Storage array. - -==================================================== ============= ====== -Option Description Default -==================================================== ============= ====== -``pure_replica_interval_default`` Snapshot - replication - interval in - seconds. ``900`` -``pure_replica_retention_short_term_default`` Retain all - snapshots on - target for - this time - (in seconds). ``14400`` -``pure_replica_retention_long_term_per_day_default`` Retain how - many - snapshots - for each - day. ``3`` -``pure_replica_retention_long_term_default`` Retain - snapshots - per day - on target - for this - time (in - days). ``7`` -==================================================== ============= ====== - - -.. note:: - - ``replication-failover`` is only supported from the primary array to any of the - multiple secondary arrays, but subsequent ``replication-failover`` is only - supported back to the original primary array. - -Automatic thin-provisioning/oversubscription ratio -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable this feature where we calculate the array oversubscription ratio as -(total provisioned/actual used), add the following option in the -``cinder.conf`` file: - -.. code-block:: ini - - [puredriver-1] - pure_automatic_max_oversubscription_ratio = True - -By default, this is disabled and we honor the hard-coded configuration option -``max_over_subscription_ratio``. - -.. note:: - - Arrays with very good data reduction rates (compression/data deduplication/thin provisioning) - can get *very* large oversubscription rates applied. - -Scheduling metrics -~~~~~~~~~~~~~~~~~~ - -A large number of metrics are reported by the volume driver which can be useful -in implementing more control over volume placement in multi-backend -environments using the driver filter and weighter methods. - -Metrics reported include, but are not limited to: - -.. code-block:: text - - total_capacity_gb - free_capacity_gb - provisioned_capacity - total_volumes - total_snapshots - total_hosts - total_pgroups - writes_per_sec - reads_per_sec - input_per_sec - output_per_sec - usec_per_read_op - usec_per_read_op - queue_depth - -.. note:: - - All total metrics include non-OpenStack managed objects on the array. - -In conjunction with QOS extra-specs, you can create very complex algorithms to -manage volume placement. More detailed documentation on this is available in -other external documentation. diff --git a/doc/source/configuration/block-storage/drivers/quobyte-driver.rst b/doc/source/configuration/block-storage/drivers/quobyte-driver.rst deleted file mode 100644 index f0280a8b1..000000000 --- a/doc/source/configuration/block-storage/drivers/quobyte-driver.rst +++ /dev/null @@ -1,61 +0,0 @@ -============== -Quobyte driver -============== - -The `Quobyte `__ volume driver enables storing Block -Storage service volumes on a Quobyte storage back end. Block Storage service -back ends are mapped to Quobyte volumes and individual Block Storage service -volumes are stored as files on a Quobyte volume. Selection of the appropriate -Quobyte volume is done by the aforementioned back end configuration that -specifies the Quobyte volume explicitly. - -.. note:: - - Note the dual use of the term ``volume`` in the context of Block Storage - service volumes and in the context of Quobyte volumes. - -For more information see `the Quobyte support webpage -`__. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The Quobyte volume driver supports the following volume operations: - -- Create, delete, attach, and detach volumes - -- Secure NAS operation (Starting with Mitaka release secure NAS operation is - optional but still default) - -- Create and delete a snapshot - -- Create a volume from a snapshot - -- Extend a volume - -- Clone a volume - -- Copy a volume to image - -- Generic volume migration (no back end optimization) - -.. note:: - - When running VM instances off Quobyte volumes, ensure that the `Quobyte - Compute service driver `__ - has been configured in your OpenStack cloud. - -Configuration -~~~~~~~~~~~~~ - -To activate the Quobyte volume driver, configure the corresponding -``volume_driver`` parameter: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver - -The following table contains the configuration options supported by the -Quobyte driver: - -.. include:: ../../tables/cinder-quobyte.rst diff --git a/doc/source/configuration/block-storage/drivers/scality-sofs-driver.rst b/doc/source/configuration/block-storage/drivers/scality-sofs-driver.rst deleted file mode 100644 index 2acb1be6e..000000000 --- a/doc/source/configuration/block-storage/drivers/scality-sofs-driver.rst +++ /dev/null @@ -1,68 +0,0 @@ -=================== -Scality SOFS driver -=================== - -The Scality SOFS volume driver interacts with configured sfused mounts. - -The Scality SOFS driver manages volumes as sparse files stored on a -Scality Ring through sfused. Ring connection settings and sfused options -are defined in the ``cinder.conf`` file and the configuration file -pointed to by the ``scality_sofs_config`` option, typically -``/etc/sfused.conf``. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The Scality SOFS volume driver provides the following Block Storage -volume operations: - -- Create, delete, attach (map), and detach (unmap) volumes. - -- Create, list, and delete volume snapshots. - -- Create a volume from a snapshot. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Clone a volume. - -- Extend a volume. - -- Backup a volume. - -- Restore backup to new or existing volume. - -Configuration -~~~~~~~~~~~~~ - -Use the following instructions to update the ``cinder.conf`` -configuration file: - -.. code-block:: ini - - [DEFAULT] - enabled_backends = scality-1 - - [scality-1] - volume_driver = cinder.volume.drivers.scality.ScalityDriver - volume_backend_name = scality-1 - - scality_sofs_config = /etc/sfused.conf - scality_sofs_mount_point = /cinder - scality_sofs_volume_dir = cinder/volumes - -Compute configuration -~~~~~~~~~~~~~~~~~~~~~ - -Use the following instructions to update the ``nova.conf`` configuration -file: - -.. code-block:: ini - - [libvirt] - scality_sofs_mount_point = /cinder - scality_sofs_config = /etc/sfused.conf - -.. include:: ../../tables/cinder-scality.rst diff --git a/doc/source/configuration/block-storage/drivers/sheepdog-driver.rst b/doc/source/configuration/block-storage/drivers/sheepdog-driver.rst deleted file mode 100644 index 775b090f2..000000000 --- a/doc/source/configuration/block-storage/drivers/sheepdog-driver.rst +++ /dev/null @@ -1,48 +0,0 @@ -=============== -Sheepdog driver -=============== - -Sheepdog is an open-source distributed storage system that provides a -virtual storage pool utilizing internal disk of commodity servers. - -Sheepdog scales to several hundred nodes, and has powerful virtual disk -management features like snapshotting, cloning, rollback, and thin -provisioning. - -More information can be found on `Sheepdog -Project `__. - -This driver enables the use of Sheepdog through Qemu/KVM. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -Sheepdog driver supports these operations: - -- Create, delete, attach, and detach volumes. - -- Create, list, and delete volume snapshots. - -- Create a volume from a snapshot. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Clone a volume. - -- Extend a volume. - -Configuration -~~~~~~~~~~~~~ - -Set the following option in the ``cinder.conf`` file: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.sheepdog.SheepdogDriver - -The following table contains the configuration options supported by the -Sheepdog driver: - -.. include:: ../../tables/cinder-sheepdog.rst diff --git a/doc/source/configuration/block-storage/drivers/smbfs-volume-driver.rst b/doc/source/configuration/block-storage/drivers/smbfs-volume-driver.rst deleted file mode 100644 index e0f129742..000000000 --- a/doc/source/configuration/block-storage/drivers/smbfs-volume-driver.rst +++ /dev/null @@ -1,17 +0,0 @@ -============== -SambaFS driver -============== - -There is a volume back-end for Samba filesystems. Set the following in -your ``cinder.conf`` file, and use the following options to configure it. - -.. note:: - - The SambaFS driver requires ``qemu-img`` version 1.7 or higher on Linux - nodes, and ``qemu-img`` version 1.6 or higher on Windows nodes. - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.smbfs.SmbfsDriver - -.. include:: ../../tables/cinder-smbfs.rst diff --git a/doc/source/configuration/block-storage/drivers/solidfire-volume-driver.rst b/doc/source/configuration/block-storage/drivers/solidfire-volume-driver.rst deleted file mode 100644 index 7ddaa870d..000000000 --- a/doc/source/configuration/block-storage/drivers/solidfire-volume-driver.rst +++ /dev/null @@ -1,104 +0,0 @@ -========= -SolidFire -========= - -The SolidFire Cluster is a high performance all SSD iSCSI storage device that -provides massive scale out capability and extreme fault tolerance. A key -feature of the SolidFire cluster is the ability to set and modify during -operation specific QoS levels on a volume for volume basis. The SolidFire -cluster offers this along with de-duplication, compression, and an architecture -that takes full advantage of SSDs. - -To configure the use of a SolidFire cluster with Block Storage, modify your -``cinder.conf`` file as follows: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.solidfire.SolidFireDriver - san_ip = 172.17.1.182 # the address of your MVIP - san_login = sfadmin # your cluster admin login - san_password = sfpassword # your cluster admin password - sf_account_prefix = '' # prefix for tenant account creation on solidfire cluster - -.. warning:: - - Older versions of the SolidFire driver (prior to Icehouse) created a unique - account prefixed with ``$cinder-volume-service-hostname-$tenant-id`` on the - SolidFire cluster for each tenant. Unfortunately, this account formation - resulted in issues for High Availability (HA) installations and - installations where the ``cinder-volume`` service can move to a new node. - The current default implementation does not experience this issue as no - prefix is used. For installations created on a prior release, the OLD - default behavior can be configured by using the keyword ``hostname`` in - sf_account_prefix. - -.. note:: - - The SolidFire driver creates names for volumes on the back end using the - format UUID-. This works well, but there is a possibility of a - UUID collision for customers running multiple clouds against the same - cluster. In Mitaka the ability was added to eliminate the possibility of - collisions by introducing the **sf_volume_prefix** configuration variable. - On the SolidFire cluster each volume will be labeled with the prefix, - providing the ability to configure unique volume names for each cloud. - The default prefix is 'UUID-'. - - Changing the setting on an existing deployment will result in the existing - volumes being inaccessible. To introduce this change to an existing - deployment it is recommended to add the Cluster as if it were a second - backend and disable new deployments to the current back end. - -.. include:: ../../tables/cinder-solidfire.rst - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, attach, and detach volumes. -* Create, list, and delete volume snapshots. -* Create a volume from a snapshot. -* Copy an image to a volume. -* Copy a volume to an image. -* Clone a volume. -* Extend a volume. -* Retype a volume. -* Manage and unmanage a volume. -* Consistency group snapshots. - -QoS support for the SolidFire drivers includes the ability to set the -following capabilities in the OpenStack Block Storage API -``cinder.api.contrib.qos_specs_manage`` qos specs extension module: - -* **minIOPS** - The minimum number of IOPS guaranteed for this volume. - Default = 100. - -* **maxIOPS** - The maximum number of IOPS allowed for this volume. - Default = 15,000. - -* **burstIOPS** - The maximum number of IOPS allowed over a short period of - time. Default = 15,000. - -* **scaledIOPS** - The presence of this key is a flag indicating that the - above IOPS should be scaled by the following scale values. It is recommended - to set the value of scaledIOPS to True, but any value will work. The - absence of this key implies false. - -* **scaleMin** - The amount to scale the minIOPS by for every 1GB of - additional volume size. The value must be an integer. - -* **scaleMax** - The amount to scale the maxIOPS by for every 1GB of additional - volume size. The value must be an integer. - -* **scaleBurst** - The amount to scale the burstIOPS by for every 1GB of - additional volume size. The value must be an integer. - -The QoS keys above no longer require to be scoped but must be created and -associated to a volume type. For information about how to set the key-value -pairs and associate them with a volume type, see the `volume qos -`_ -section in the OpenStackClient command list. - -.. note:: - - When using scaledIOPS, the scale values must be chosen such that the - constraint minIOPS <= maxIOPS <= burstIOPS is always true. The driver will - enforce this constraint. diff --git a/doc/source/configuration/block-storage/drivers/synology-dsm-driver.rst b/doc/source/configuration/block-storage/drivers/synology-dsm-driver.rst deleted file mode 100755 index 72e1c6a0d..000000000 --- a/doc/source/configuration/block-storage/drivers/synology-dsm-driver.rst +++ /dev/null @@ -1,124 +0,0 @@ -========================== -Synology DSM volume driver -========================== - -The ``SynoISCSIDriver`` volume driver allows Synology NAS to be used for Block -Storage (cinder) in OpenStack deployments. Information on OpenStack Block -Storage volumes is available in the DSM Storage Manager. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -The Synology driver has the following requirements: - -* DSM version 6.0.2 or later. - -* Your Synology NAS model must support advanced file LUN, iSCSI Target, and - snapshot features. Refer to the `Support List for applied models - `_. - -.. note:: - - The DSM driver is available in the OpenStack Newton release. - - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -* Create, delete, clone, attach, and detach volumes. - -* Create and delete volume snapshots. - -* Create a volume from a snapshot. - -* Copy an image to a volume. - -* Copy a volume to an image. - -* Extend a volume. - -* Get volume statistics. - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -Edit the ``/etc/cinder/cinder.conf`` file on your volume driver host. - -Synology driver uses a volume in Synology NAS as the back end of Block Storage. -Every time you create a new Block Storage volume, the system will create an -advanced file LUN in your Synology volume to be used for this new Block Storage -volume. - -The following example shows how to use different Synology NAS servers as the -back end. If you want to use all volumes on your Synology NAS, add another -section with the volume number to differentiate between volumes within the same -Synology NAS. - -.. code-block:: ini - - [default] - enabled_backends = ds1515pV1, ds1515pV2, rs3017xsV3, others - - [ds1515pV1] - # configuration for volume 1 in DS1515+ - - [ds1515pV2] - # configuration for volume 2 in DS1515+ - - [rs3017xsV1] - # configuration for volume 1 in RS3017xs - -Each section indicates the volume number and the way in which the connection is -established. Below is an example of a basic configuration: - -.. code-block:: ini - - [Your_Section_Name] - - # Required settings - volume_driver = cinder.volume.drivers.synology.synology_iscsi.SynoISCSIDriver - iscs_protocol = iscsi - iscsi_ip_address = DS_IP - synology_admin_port = DS_PORT - synology_username = DS_USER - synology_password = DS_PW - synology_pool_name = DS_VOLUME - - # Optional settings - volume_backend_name = VOLUME_BACKEND_NAME - iscsi_secondary_ip_addresses = IP_ADDRESSES - driver_use_ssl = True - use_chap_auth = True - chap_username = CHAP_USER_NAME - chap_password = CHAP_PASSWORD - -``DS_PORT`` - This is the port for DSM management. The default value for DSM is 5000 - (HTTP) and 5001 (HTTPS). To use HTTPS connections, you must set - ``driver_use_ssl = True``. - -``DS_IP`` - This is the IP address of your Synology NAS. - -``DS_USER`` - This is the account of any DSM administrator. - -``DS_PW`` - This is the password for ``DS_USER``. - -``DS_VOLUME`` - This is the volume you want to use as the storage pool for the Block - Storage service. The format is ``volume[0-9]+``, and the number is the same - as the volume number in DSM. - -.. note:: - - If you set ``driver_use_ssl`` as ``True``, ``synology_admin_port`` must be - an HTTPS port. - -Configuration options -~~~~~~~~~~~~~~~~~~~~~ - -The Synology DSM driver supports the following configuration options: - -.. include:: ../../tables/cinder-synology.rst diff --git a/doc/source/configuration/block-storage/drivers/tintri-volume-driver.rst b/doc/source/configuration/block-storage/drivers/tintri-volume-driver.rst deleted file mode 100644 index 453a82abf..000000000 --- a/doc/source/configuration/block-storage/drivers/tintri-volume-driver.rst +++ /dev/null @@ -1,81 +0,0 @@ -====== -Tintri -====== - -Tintri VMstore is a smart storage that sees, learns, and adapts for cloud and -virtualization. The Tintri Block Storage driver interacts with configured -VMstore running Tintri OS 4.0 and above. It supports various operations using -Tintri REST APIs and NFS protocol. - -To configure the use of a Tintri VMstore with Block Storage, perform the -following actions: - -#. Edit the ``etc/cinder/cinder.conf`` file and set the - ``cinder.volume.drivers.tintri`` options: - - .. code-block:: ini - - volume_driver=cinder.volume.drivers.tintri.TintriDriver - # Mount options passed to the nfs client. See section of the - # nfs man page for details. (string value) - nfs_mount_options = vers=3,lookupcache=pos - - # - # Options defined in cinder.volume.drivers.tintri - # - - # The hostname (or IP address) for the storage system (string - # value) - tintri_server_hostname = {Tintri VMstore Management IP} - - # User name for the storage system (string value) - tintri_server_username = {username} - - # Password for the storage system (string value) - tintri_server_password = {password} - - # API version for the storage system (string value) - # tintri_api_version = v310 - - # Following options needed for NFS configuration - # File with the list of available nfs shares (string value) - # nfs_shares_config = /etc/cinder/nfs_shares - - # Tintri driver will clean up unused image snapshots. With the following - # option, users can configure how long unused image snapshots are - # retained. Default retention policy is 30 days - # tintri_image_cache_expiry_days = 30 - - # Path to NFS shares file storing images. - # Users can store Glance images in the NFS share of the same VMstore - # mentioned in the following file. These images need to have additional - # metadata ``provider_location`` configured in Glance, which should point - # to the NFS share path of the image. - # This option will enable Tintri driver to directly clone from Glance - # image stored on same VMstore (rather than downloading image - # from Glance) - # tintri_image_shares_config = - # - # For example: - # Glance image metadata - # provider_location => - # nfs:///tintri/glance/84829294-c48b-4e16-a878-8b2581efd505 - -#. Edit the ``/etc/nova/nova.conf`` file and set the ``nfs_mount_options``: - - .. code-block:: ini - - [libvirt] - nfs_mount_options = vers=3 - -#. Edit the ``/etc/cinder/nfs_shares`` file and add the Tintri VMstore mount - points associated with the configured VMstore management IP in the - ``cinder.conf`` file: - - .. code-block:: bash - - {vmstore_data_ip}:/tintri/{submount1} - {vmstore_data_ip}:/tintri/{submount2} - - -.. include:: ../../tables/cinder-tintri.rst diff --git a/doc/source/configuration/block-storage/drivers/violin-v7000-driver.rst b/doc/source/configuration/block-storage/drivers/violin-v7000-driver.rst deleted file mode 100644 index 69df6af00..000000000 --- a/doc/source/configuration/block-storage/drivers/violin-v7000-driver.rst +++ /dev/null @@ -1,107 +0,0 @@ -=========================================== -Violin Memory 7000 Series FSP volume driver -=========================================== - -The OpenStack V7000 driver package from Violin Memory adds Block Storage -service support for Violin 7300 Flash Storage Platforms (FSPs) and 7700 FSP -controllers. - -The driver package release can be used with any OpenStack Liberty deployment -for all 7300 FSPs and 7700 FSP controllers running Concerto 7.5.3 and later -using Fibre Channel HBAs. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the Violin driver, the following are required: - -- Violin 7300/7700 series FSP with: - - - Concerto OS version 7.5.3 or later - - - Fibre channel host interfaces - -- The Violin block storage driver: This driver implements the block storage API - calls. The driver is included with the OpenStack Liberty release. - -- The vmemclient library: This is the Violin Array Communications library to - the Flash Storage Platform through a REST-like interface. The client can be - installed using the python 'pip' installer tool. Further information on - vmemclient can be found on `PyPI - `__. - - .. code-block:: console - - pip install vmemclient - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. - -- Create, list, and delete volume snapshots. - -- Create a volume from a snapshot. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Clone a volume. - -- Extend a volume. - -.. note:: - - Listed operations are supported for thick, thin, and dedup luns, - with the exception of cloning. Cloning operations are supported only - on thick luns. - -Driver configuration -~~~~~~~~~~~~~~~~~~~~ - -Once the array is configured as per the installation guide, it is simply a -matter of editing the cinder configuration file to add or modify the -parameters. The driver currently only supports fibre channel configuration. - -Fibre channel configuration ---------------------------- - -Set the following in your ``cinder.conf`` configuration file, replacing the -variables using the guide in the following section: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.violin.v7000_fcp.V7000FCPDriver - volume_backend_name = vmem_violinfsp - extra_capabilities = VMEM_CAPABILITIES - san_ip = VMEM_MGMT_IP - san_login = VMEM_USER_NAME - san_password = VMEM_PASSWORD - use_multipath_for_image_xfer = true - -Configuration parameters ------------------------- - -Description of configuration value placeholders: - -VMEM_CAPABILITIES - User defined capabilities, a JSON formatted string specifying key-value - pairs (string value). The ones particularly supported are - ``dedup`` and ``thin``. Only these two capabilities are listed here in - ``cinder.conf`` file, indicating this backend be selected for creating - luns which have a volume type associated with them that have ``dedup`` - or ``thin`` extra_specs specified. For example, if the FSP is configured - to support dedup luns, set the associated driver capabilities - to: {"dedup":"True","thin":"True"}. - -VMEM_MGMT_IP - External IP address or host name of the Violin 7300 Memory Gateway. This - can be an IP address or host name. - -VMEM_USER_NAME - Log-in user name for the Violin 7300 Memory Gateway or 7700 FSP controller. - This user must have administrative rights on the array or controller. - -VMEM_PASSWORD - Log-in user's password. diff --git a/doc/source/configuration/block-storage/drivers/vmware-vmdk-driver.rst b/doc/source/configuration/block-storage/drivers/vmware-vmdk-driver.rst deleted file mode 100644 index 58b5e9a69..000000000 --- a/doc/source/configuration/block-storage/drivers/vmware-vmdk-driver.rst +++ /dev/null @@ -1,347 +0,0 @@ -.. _block_storage_vmdk_driver: - -================== -VMware VMDK driver -================== - -Use the VMware VMDK driver to enable management of the OpenStack Block Storage -volumes on vCenter-managed data stores. Volumes are backed by VMDK files on -data stores that use any VMware-compatible storage technology such as NFS, -iSCSI, FiberChannel, and vSAN. - -.. note:: - - The VMware VMDK driver requires vCenter version 5.1 at minimum. - -Functional context -~~~~~~~~~~~~~~~~~~ - -The VMware VMDK driver connects to vCenter, through which it can dynamically -access all the data stores visible from the ESX hosts in the managed cluster. - -When you create a volume, the VMDK driver creates a VMDK file on demand. The -VMDK file creation completes only when the volume is subsequently attached to -an instance. The reason for this requirement is that data stores visible to the -instance determine where to place the volume. Before the service creates the -VMDK file, attach a volume to the target instance. - -The running vSphere VM is automatically reconfigured to attach the VMDK file as -an extra disk. Once attached, you can log in to the running vSphere VM to -rescan and discover this extra disk. - -With the update to ESX version 6.0, the VMDK driver now supports NFS version -4.1. - -Configuration -~~~~~~~~~~~~~ - -The recommended volume driver for OpenStack Block Storage is the VMware vCenter -VMDK driver. When you configure the driver, you must match it with the -appropriate OpenStack Compute driver from VMware and both drivers must point to -the same server. - -In the ``nova.conf`` file, use this option to define the Compute driver: - -.. code-block:: ini - - compute_driver = vmwareapi.VMwareVCDriver - -In the ``cinder.conf`` file, use this option to define the volume -driver: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver - -The following table lists various options that the drivers support for the -OpenStack Block Storage configuration (``cinder.conf``): - -.. include:: ../../tables/cinder-vmware.rst - -VMDK disk type -~~~~~~~~~~~~~~ - -The VMware VMDK drivers support the creation of VMDK disk file types ``thin``, -``lazyZeroedThick`` (sometimes called thick or flat), or ``eagerZeroedThick``. - -A thin virtual disk is allocated and zeroed on demand as the space is used. -Unused space on a Thin disk is available to other users. - -A lazy zeroed thick virtual disk will have all space allocated at disk -creation. This reserves the entire disk space, so it is not available to other -users at any time. - -An eager zeroed thick virtual disk is similar to a lazy zeroed thick disk, in -that the entire disk is allocated at creation. However, in this type, any -previous data will be wiped clean on the disk before the write. This can mean -that the disk will take longer to create, but can also prevent issues with -stale data on physical media. - -Use the ``vmware:vmdk_type`` extra spec key with the appropriate value to -specify the VMDK disk file type. This table shows the mapping between the extra -spec entry and the VMDK disk file type: - -.. list-table:: Extra spec entry to VMDK disk file type mapping - :header-rows: 1 - - * - Disk file type - - Extra spec key - - Extra spec value - * - thin - - ``vmware:vmdk_type`` - - ``thin`` - * - lazyZeroedThick - - ``vmware:vmdk_type`` - - ``thick`` - * - eagerZeroedThick - - ``vmware:vmdk_type`` - - ``eagerZeroedThick`` - -If you do not specify a ``vmdk_type`` extra spec entry, the disk file type will -default to ``thin``. - -The following example shows how to create a ``lazyZeroedThick`` VMDK volume by -using the appropriate ``vmdk_type``: - -.. code-block:: console - - $ openstack volume type create THICK_VOLUME - $ openstack volume type set --property vmware:vmdk_type=thick THICK_VOLUME - $ openstack volume create --size 1 --type THICK_VOLUME VOLUME1 - -Clone type -~~~~~~~~~~ - -With the VMware VMDK drivers, you can create a volume from another -source volume or a snapshot point. The VMware vCenter VMDK driver -supports the ``full`` and ``linked/fast`` clone types. Use the -``vmware:clone_type`` extra spec key to specify the clone type. The -following table captures the mapping for clone types: - -.. list-table:: Extra spec entry to clone type mapping - :header-rows: 1 - - * - Clone type - - Extra spec key - - Extra spec value - * - full - - ``vmware:clone_type`` - - ``full`` - * - linked/fast - - ``vmware:clone_type`` - - ``linked`` - -If you do not specify the clone type, the default is ``full``. - -The following example shows linked cloning from a source volume, which is -created from an image: - -.. code-block:: console - - $ openstack volume type create FAST_CLONE - $ openstack volume type set --property vmware:clone_type=linked FAST_CLONE - $ openstack volume create --size 1 --type FAST_CLONE --image MYIMAGE SOURCE_VOL - $ openstack volume create --size 1 --source SOURCE_VOL DEST_VOL - -Use vCenter storage policies to specify back-end data stores -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to configure back-end data stores using storage -policies. In vCenter 5.5 and greater, you can create one or more storage -policies and expose them as a Block Storage volume-type to a vmdk volume. The -storage policies are exposed to the vmdk driver through the extra spec property -with the ``vmware:storage_profile`` key. - -For example, assume a storage policy in vCenter named ``gold_policy.`` and a -Block Storage volume type named ``vol1`` with the extra spec key -``vmware:storage_profile`` set to the value ``gold_policy``. Any Block Storage -volume creation that uses the ``vol1`` volume type places the volume only in -data stores that match the ``gold_policy`` storage policy. - -The Block Storage back-end configuration for vSphere data stores is -automatically determined based on the vCenter configuration. If you configure a -connection to connect to vCenter version 5.5 or later in the ``cinder.conf`` -file, the use of storage policies to configure back-end data stores is -automatically supported. - -.. note:: - - You must configure any data stores that you configure for the Block - Storage service for the Compute service. - -**To configure back-end data stores by using storage policies** - -#. In vCenter, tag the data stores to be used for the back end. - - OpenStack also supports policies that are created by using vendor-specific - capabilities; for example vSAN-specific storage policies. - - .. note:: - - The tag value serves as the policy. For details, see :ref:`vmware-spbm`. - -#. Set the extra spec key ``vmware:storage_profile`` in the desired Block - Storage volume types to the policy name that you created in the previous - step. - -#. Optionally, for the ``vmware_host_version`` parameter, enter the version - number of your vSphere platform. For example, ``5.5``. - - This setting overrides the default location for the corresponding WSDL file. - Among other scenarios, you can use this setting to prevent WSDL error - messages during the development phase or to work with a newer version of - vCenter. - -#. Complete the other vCenter configuration parameters as appropriate. - -.. note:: - - Any volume that is created without an associated policy (that is to say, - without an associated volume type that specifies ``vmware:storage_profile`` - extra spec), there is no policy-based placement for that volume. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -The VMware vCenter VMDK driver supports these operations: - -- Create, delete, attach, and detach volumes. - - .. note:: - - When a volume is attached to an instance, a reconfigure operation is - performed on the instance to add the volume's VMDK to it. The user must - manually rescan and mount the device from within the guest operating - system. - -- Create, list, and delete volume snapshots. - - .. note:: - - Allowed only if volume is not attached to an instance. - -- Create a volume from a snapshot. - - .. note:: - - The vmdk UUID in vCenter will not be set to the volume UUID if the - vCenter version is 6.0 or above and the extra spec key ``vmware:clone_type`` - in the destination volume type is set to ``linked``. - -- Copy an image to a volume. - - .. note:: - - Only images in ``vmdk`` disk format with ``bare`` container format are - supported. The ``vmware_disktype`` property of the image can be - ``preallocated``, ``sparse``, ``streamOptimized`` or ``thin``. - -- Copy a volume to an image. - - .. note:: - - - Allowed only if the volume is not attached to an instance. - - This operation creates a ``streamOptimized`` disk image. - -- Clone a volume. - - .. note:: - - - Supported only if the source volume is not attached to an instance. - - The vmdk UUID in vCenter will not be set to the volume UUID if the - vCenter version is 6.0 or above and the extra spec key ``vmware:clone_type`` - in the destination volume type is set to ``linked``. - -- Backup a volume. - - .. note:: - - This operation creates a backup of the volume in ``streamOptimized`` - disk format. - -- Restore backup to new or existing volume. - - .. note:: - - Supported only if the existing volume doesn't contain snapshots. - -- Change the type of a volume. - - .. note:: - - This operation is supported only if the volume state is ``available``. - -- Extend a volume. - - -.. _vmware-spbm: - -Storage policy-based configuration in vCenter -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can configure Storage Policy-Based Management (SPBM) profiles for vCenter -data stores supporting the Compute, Image service, and Block Storage components -of an OpenStack implementation. - -In a vSphere OpenStack deployment, SPBM enables you to delegate several data -stores for storage, which reduces the risk of running out of storage space. The -policy logic selects the data store based on accessibility and available -storage space. - -Prerequisites -~~~~~~~~~~~~~ - -- Determine the data stores to be used by the SPBM policy. - -- Determine the tag that identifies the data stores in the OpenStack component - configuration. - -- Create separate policies or sets of data stores for separate - OpenStack components. - -Create storage policies in vCenter -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. In vCenter, create the tag that identifies the data stores: - - #. From the :guilabel:`Home` screen, click :guilabel:`Tags`. - - #. Specify a name for the tag. - - #. Specify a tag category. For example, ``spbm-cinder``. - -#. Apply the tag to the data stores to be used by the SPBM policy. - - .. note:: - - For details about creating tags in vSphere, see the `vSphere - documentation - `__. - -#. In vCenter, create a tag-based storage policy that uses one or more tags to - identify a set of data stores. - - .. note:: - - For details about creating storage policies in vSphere, see the `vSphere - documentation - `__. - -Data store selection -~~~~~~~~~~~~~~~~~~~~ - -If storage policy is enabled, the driver initially selects all the data stores -that match the associated storage policy. - -If two or more data stores match the storage policy, the driver chooses a data -store that is connected to the maximum number of hosts. - -In case of ties, the driver chooses the data store with lowest space -utilization, where space utilization is defined by the -``(1-freespace/totalspace)`` meters. - -These actions reduce the number of volume migrations while attaching the volume -to instances. - -The volume must be migrated if the ESX host for the instance cannot access the -data store that contains the volume. diff --git a/doc/source/configuration/block-storage/drivers/vzstorage-driver.rst b/doc/source/configuration/block-storage/drivers/vzstorage-driver.rst deleted file mode 100644 index 79411666c..000000000 --- a/doc/source/configuration/block-storage/drivers/vzstorage-driver.rst +++ /dev/null @@ -1,14 +0,0 @@ -======================== -Virtuozzo Storage driver -======================== - -The Virtuozzo Storage driver is a fault-tolerant distributed storage -system that is optimized for virtualization workloads. -Set the following in your ``cinder.conf`` file, and use the following -options to configure it. - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.vzstorage.VZStorageDriver - -.. include:: ../../tables/cinder-vzstorage.rst diff --git a/doc/source/configuration/block-storage/drivers/windows-iscsi-volume-driver.rst b/doc/source/configuration/block-storage/drivers/windows-iscsi-volume-driver.rst deleted file mode 100644 index 709bdd399..000000000 --- a/doc/source/configuration/block-storage/drivers/windows-iscsi-volume-driver.rst +++ /dev/null @@ -1,122 +0,0 @@ -=========================== -Windows iSCSI volume driver -=========================== - -Windows Server 2012 and Windows Storage Server 2012 offer an integrated iSCSI -Target service that can be used with OpenStack Block Storage in your stack. -Being entirely a software solution, consider it in particular for mid-sized -networks where the costs of a SAN might be excessive. - -The Windows Block Storage driver works with OpenStack Compute on any -hypervisor. It includes snapshotting support and the ``boot from volume`` -feature. - -This driver creates volumes backed by fixed-type VHD images on Windows Server -2012 and dynamic-type VHDX on Windows Server 2012 R2, stored locally on a -user-specified path. The system uses those images as iSCSI disks and exports -them through iSCSI targets. Each volume has its own iSCSI target. - -This driver has been tested with Windows Server 2012 and Windows Server R2 -using the Server and Storage Server distributions. - -Install the ``cinder-volume`` service as well as the required Python components -directly onto the Windows node. - -You may install and configure ``cinder-volume`` and its dependencies manually -using the following guide or you may use the ``Cinder Volume Installer``, -presented below. - -Installing using the OpenStack cinder volume installer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In case you want to avoid all the manual setup, you can use Cloudbase -Solutions' installer. You can find it at -https://www.cloudbase.it/downloads/CinderVolumeSetup_Beta.msi. It installs an -independent Python environment, in order to avoid conflicts with existing -applications, dynamically generates a ``cinder.conf`` file based on the -parameters provided by you. - -``cinder-volume`` will be configured to run as a Windows Service, which can -be restarted using: - -.. code-block:: console - - PS C:\> net stop cinder-volume ; net start cinder-volume - -The installer can also be used in unattended mode. More details about how to -use the installer and its features can be found at https://www.cloudbase.it. - -Windows Server configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The required service in order to run ``cinder-volume`` on Windows is -``wintarget``. This will require the iSCSI Target Server Windows feature -to be installed. You can install it by running the following command: - -.. code-block:: console - - PS C:\> Add-WindowsFeature - FS-iSCSITarget-ServerAdd-WindowsFeatureFS-iSCSITarget-Server - -.. note:: - - The Windows Server installation requires at least 16 GB of disk space. The - volumes hosted by this node need the extra space. - -For ``cinder-volume`` to work properly, you must configure NTP as explained -in :ref:`configure-ntp-windows`. - -Next, install the requirements as described in :ref:`windows-requirements`. - -Getting the code -~~~~~~~~~~~~~~~~ - -Git can be used to download the necessary source code. The installer to run Git -on Windows can be downloaded here: - -https://git-for-windows.github.io/ - -Once installed, run the following to clone the OpenStack Block Storage code: - -.. code-block:: console - - PS C:\> git.exe clone https://git.openstack.org/openstack/cinder - -Configure cinder-volume -~~~~~~~~~~~~~~~~~~~~~~~ - -The ``cinder.conf`` file may be placed in ``C:\etc\cinder``. Below is a -configuration sample for using the Windows iSCSI Driver: - -.. code-block:: ini - - [DEFAULT] - auth_strategy = keystone - volume_name_template = volume-%s - volume_driver = cinder.volume.drivers.windows.WindowsDriver - glance_api_servers = IP_ADDRESS:9292 - rabbit_host = IP_ADDRESS - rabbit_port = 5672 - sql_connection = mysql+pymysql://root:Passw0rd@IP_ADDRESS/cinder - windows_iscsi_lun_path = C:\iSCSIVirtualDisks - rabbit_password = Passw0rd - logdir = C:\OpenStack\Log\ - image_conversion_dir = C:\ImageConversionDir - debug = True - -The following table contains a reference to the only driver specific -option that will be used by the Block Storage Windows driver: - -.. include:: ../../tables/cinder-windows.rst - -Run cinder-volume ------------------ - -After configuring ``cinder-volume`` using the ``cinder.conf`` file, you may -use the following commands to install and run the service (note that you -must replace the variables with the proper paths): - -.. code-block:: console - - PS C:\> python $CinderClonePath\setup.py install - PS C:\> cmd /c C:\python27\python.exe c:\python27\Scripts\cinder-volume" --config-file $CinderConfPath diff --git a/doc/source/configuration/block-storage/drivers/xio-volume-driver.rst b/doc/source/configuration/block-storage/drivers/xio-volume-driver.rst deleted file mode 100644 index 32ebb7e00..000000000 --- a/doc/source/configuration/block-storage/drivers/xio-volume-driver.rst +++ /dev/null @@ -1,122 +0,0 @@ -================== -X-IO volume driver -================== - -The X-IO volume driver for OpenStack Block Storage enables ISE products to be -managed by OpenStack Block Storage nodes. This driver can be configured to work -with iSCSI and Fibre Channel storage protocols. The X-IO volume driver allows -the cloud operator to take advantage of ISE features like quality of -service (QoS) and Continuous Adaptive Data Placement (CADP). It also supports -creating thin volumes and specifying volume media affinity. - -Requirements -~~~~~~~~~~~~ - -ISE FW 2.8.0 or ISE FW 3.1.0 is required for OpenStack Block Storage -support. The X-IO volume driver will not work with older ISE FW. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, detach, retype, clone, and extend volumes. -- Create a volume from snapshot. -- Create, list, and delete volume snapshots. -- Manage and unmanage a volume. -- Get volume statistics. -- Create a thin provisioned volume. -- Create volumes with QoS specifications. - -Configure X-IO Volume driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure the use of an ISE product with OpenStack Block Storage, modify -your ``cinder.conf`` file as follows. Be careful to use the one that matches -the storage protocol in use: - -Fibre Channel -------------- - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.xio.XIOISEFCDriver - san_ip = 1.2.3.4 # the address of your ISE REST management interface - san_login = administrator # your ISE management admin login - san_password = password # your ISE management admin password - -iSCSI ------ - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.xio.XIOISEISCSIDriver - san_ip = 1.2.3.4 # the address of your ISE REST management interface - san_login = administrator # your ISE management admin login - san_password = password # your ISE management admin password - iscsi_ip_address = ionet_ip # ip address to one ISE port connected to the IONET - -Optional configuration parameters ---------------------------------- - -.. include:: ../../tables/cinder-xio.rst - -Multipath ---------- - -The X-IO ISE supports a multipath configuration, but multipath must be enabled -on the compute node (see *ISE Storage Blade Best Practices Guide*). -For more information, see `X-IO Document Library -`__. - -Volume types ------------- - -OpenStack Block Storage uses volume types to help the administrator specify -attributes for volumes. These attributes are called extra-specs. The X-IO -volume driver support the following extra-specs. - -.. list-table:: Extra specs - :header-rows: 1 - - * - Extra-specs name - - Valid values - - Description - * - ``Feature:Raid`` - - 1, 5 - - RAID level for volume. - * - ``Feature:Pool`` - - 1 - n (n being number of pools on ISE) - - Pool to create volume in. - * - ``Affinity:Type`` - - cadp, flash, hdd - - Volume media affinity type. - * - ``Alloc:Type`` - - 0 (thick), 1 (thin) - - Allocation type for volume. Thick or thin. - * - ``QoS:minIOPS`` - - n (value less than maxIOPS) - - Minimum IOPS setting for volume. - * - ``QoS:maxIOPS`` - - n (value bigger than minIOPS) - - Maximum IOPS setting for volume. - * - ``QoS:burstIOPS`` - - n (value bigger than minIOPS) - - Burst IOPS setting for volume. - -Examples --------- - -Create a volume type called xio1-flash for volumes that should reside on ssd -storage: - -.. code-block:: console - - $ openstack volume type create xio1-flash - $ openstack volume type set --property Affinity:Type=flash xio1-flash - -Create a volume type called xio1 and set QoS min and max: - -.. code-block:: console - - $ openstack volume type create xio1 - $ openstack volume type set --property QoS:minIOPS=20 xio1 - $ openstack volume type set --property QoS:maxIOPS=5000 xio1 diff --git a/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst b/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst deleted file mode 100644 index 8c134c034..000000000 --- a/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst +++ /dev/null @@ -1,80 +0,0 @@ -================================= -Zadara Storage VPSA volume driver -================================= - -Zadara Storage, Virtual Private Storage Array (VPSA) is the first software -defined, Enterprise-Storage-as-a-Service. It is an elastic and private block -and file storage system which, provides enterprise-grade data protection and -data management storage services. - -The ``ZadaraVPSAISCSIDriver`` volume driver allows the Zadara Storage VPSA -to be used as a volume back end storage in OpenStack deployments. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use Zadara Storage VPSA Volume Driver you will require: - -- Zadara Storage VPSA version 15.07 and above - -- iSCSI or iSER host interfaces - -Supported operations -~~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes -- Create, list, and delete volume snapshots -- Create a volume from a snapshot -- Copy an image to a volume -- Copy a volume to an image -- Clone a volume -- Extend a volume -- Migrate a volume with back end assistance - -Configuration -~~~~~~~~~~~~~ - -#. Create a VPSA pool(s) or make sure you have an existing pool(s) that will - be used for volume services. The VPSA pool(s) will be identified by its ID - (pool-xxxxxxxx). For further details, see the - `VPSA's user guide `_. - -#. Adjust the ``cinder.conf`` configuration file to define the volume driver - name along with a storage back end entry for each VPSA pool that will be - managed by the block storage service. - Each back end entry requires a unique section name, surrounded by square - brackets (or parentheses), followed by options in ``key=value`` format. - -.. note:: - - Restart cinder-volume service after modifying ``cinder.conf``. - - -Sample minimum back end configuration - -.. code-block:: ini - - [DEFAULT] - enabled_backends = vpsa - - [vpsa] - zadara_vpsa_host = 172.31.250.10 - zadara_vpsa_port = 80 - zadara_user = vpsauser - zadara_password = mysecretpassword - zadara_use_iser = false - zadara_vpsa_poolname = pool-00000001 - volume_driver = cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver - volume_backend_name = vpsa - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -This section contains the configuration options that are specific -to the Zadara Storage VPSA driver. - -.. include:: ../../tables/cinder-zadara.rst - -.. note:: - - By design, all volumes created within the VPSA are thin provisioned. diff --git a/doc/source/configuration/block-storage/drivers/zfssa-iscsi-driver.rst b/doc/source/configuration/block-storage/drivers/zfssa-iscsi-driver.rst deleted file mode 100644 index 71877521b..000000000 --- a/doc/source/configuration/block-storage/drivers/zfssa-iscsi-driver.rst +++ /dev/null @@ -1,265 +0,0 @@ -========================================= -Oracle ZFS Storage Appliance iSCSI driver -========================================= - -Oracle ZFS Storage Appliances (ZFSSAs) provide advanced software to -protect data, speed tuning and troubleshooting, and deliver high -performance and high availability. Through the Oracle ZFSSA iSCSI -Driver, OpenStack Block Storage can use an Oracle ZFSSA as a block -storage resource. The driver enables you to create iSCSI volumes that an -OpenStack Block Storage server can allocate to any virtual machine -running on a compute host. - -Requirements -~~~~~~~~~~~~ - -The Oracle ZFSSA iSCSI Driver, version ``1.0.0`` and later, supports -ZFSSA software release ``2013.1.2.0`` and later. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, detach, manage, and unmanage volumes. -- Create and delete snapshots. -- Create volume from snapshot. -- Extend a volume. -- Attach and detach volumes. -- Get volume stats. -- Clone volumes. -- Migrate a volume. -- Local cache of a bootable volume. - -Configuration -~~~~~~~~~~~~~ - -#. Enable RESTful service on the ZFSSA Storage Appliance. - -#. Create a new user on the appliance with the following authorizations: - - .. code-block:: bash - - scope=stmf - allow_configure=true - scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true - scope=schema - allow_modify=true - - You can create a role with authorizations as follows: - - .. code-block:: console - - zfssa:> configuration roles - zfssa:configuration roles> role OpenStackRole - zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack Cinder Driver" - zfssa:configuration roles OpenStackRole (uncommitted)> commit - zfssa:configuration roles> select OpenStackRole - zfssa:configuration roles OpenStackRole> authorizations create - zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=stmf - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - zfssa:configuration roles OpenStackRole> authorizations create - zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=nas - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_clone=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createProject=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createShare=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeSpaceProps=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeGeneralProps=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_destroy=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rollback=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_takeSnap=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - - You can create a user with a specific role as follows: - - .. code-block:: console - - zfssa:> configuration users - zfssa:configuration users> user cinder - zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver" - zfssa:configuration users cinder (uncommitted)> set initial_password=12345 - zfssa:configuration users cinder (uncommitted)> commit - zfssa:configuration users> select cinder set roles=OpenStackRole - - .. note:: - - You can also run this `workflow - `__ - to automate the above tasks. - Refer to `Oracle documentation - `__ - on how to download, view, and execute a workflow. - -#. Ensure that the ZFSSA iSCSI service is online. If the ZFSSA iSCSI service is - not online, enable the service by using the BUI, CLI or REST API in the - appliance. - - .. code-block:: console - - zfssa:> configuration services iscsi - zfssa:configuration services iscsi> enable - zfssa:configuration services iscsi> show - Properties: - = online - ... - - Define the following required properties in the ``cinder.conf`` file: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver - san_ip = myhost - san_login = username - san_password = password - zfssa_pool = mypool - zfssa_project = myproject - zfssa_initiator_group = default - zfssa_target_portal = w.x.y.z:3260 - zfssa_target_interfaces = e1000g0 - - Optionally, you can define additional properties. - - Target interfaces can be seen as follows in the CLI: - - .. code-block:: console - - zfssa:> configuration net interfaces - zfssa:configuration net interfaces> show - Interfaces: - INTERFACE STATE CLASS LINKS ADDRS LABEL - e1000g0 up ip e1000g0 1.10.20.30/24 Untitled Interface - ... - - .. note:: - - Do not use management interfaces for ``zfssa_target_interfaces``. - -#. Configure the cluster: - - If a cluster is used as the cinder storage resource, the following - verifications are required on your Oracle ZFS Storage Appliance: - - - Verify that both the pool and the network interface are of type - singleton and are not locked to the current controller. This - approach ensures that the pool and the interface used for data - always belong to the active controller, regardless of the current - state of the cluster. - - - Verify that the management IP, data IP and storage pool belong to - the same head. - - .. note:: - - Most configuration settings, including service properties, users, roles, - and iSCSI initiator definitions are replicated on both heads - automatically. If the driver modifies any of these settings, they will be - modified automatically on both heads. - - .. note:: - - A short service interruption occurs during failback or takeover, - but once the process is complete, the ``cinder-volume`` service should be able - to access the pool through the data IP. - -ZFSSA assisted volume migration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ZFSSA iSCSI driver supports storage assisted volume migration -starting in the Liberty release. This feature uses remote replication -feature on the ZFSSA. Volumes can be migrated between two backends -configured not only to the same ZFSSA but also between two separate -ZFSSAs altogether. - -The following conditions must be met in order to use ZFSSA assisted -volume migration: - -- Both the source and target backends are configured to ZFSSAs. - -- Remote replication service on the source and target appliance is enabled. - -- The ZFSSA to which the target backend is configured should be configured as a - target in the remote replication service of the ZFSSA configured to the - source backend. The remote replication target needs to be configured even - when the source and the destination for volume migration are the same ZFSSA. - Define ``zfssa_replication_ip`` in the ``cinder.conf`` file of the source - backend as the IP address used to register the target ZFSSA in the remote - replication service of the source ZFSSA. - -- The name of the iSCSI target group(``zfssa_target_group``) on the source and - the destination ZFSSA is the same. - -- The volume is not attached and is in available state. - -If any of the above conditions are not met, the driver will proceed with -generic volume migration. - -The ZFSSA user on the source and target appliances will need to have -additional role authorizations for assisted volume migration to work. In -scope nas, set ``allow_rrtarget`` and ``allow_rrsource`` to ``true``. - -.. code-block:: console - - zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=nas - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrtarget=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrsource=true - -ZFSSA local cache -~~~~~~~~~~~~~~~~~ - -The local cache feature enables ZFSSA drivers to serve the usage of bootable -volumes significantly better. With the feature, the first bootable volume -created from an image is cached, so that subsequent volumes can be created -directly from the cache, instead of having image data transferred over the -network multiple times. - -The following conditions must be met in order to use ZFSSA local cache feature: - -- A storage pool needs to be configured. - -- REST and iSCSI services need to be turned on. - -- On an OpenStack controller, ``cinder.conf`` needs to contain necessary - properties used to configure and set up the ZFSSA iSCSI driver, including the - following new properties: - - - ``zfssa_enable_local_cache``: (True/False) To enable/disable the feature. - - - ``zfssa_cache_project``: The ZFSSA project name where cache volumes are - stored. - -Every cache volume has two additional properties stored as ZFSSA custom -schema. It is important that the schema are not altered outside of Block -Storage when the driver is in use: - -- ``image_id``: stores the image id as in Image service. - -- ``updated_at``: stores the most current timestamp when the image is updated - in Image service. - -Supported extra specs -~~~~~~~~~~~~~~~~~~~~~ - -Extra specs provide the OpenStack storage admin the flexibility to create -volumes with different characteristics from the ones specified in the -``cinder.conf`` file. The admin will specify the volume properties as keys -at volume type creation. When a user requests a volume of this volume type, -the volume will be created with the properties specified as extra specs. - -The following extra specs scoped keys are supported by the driver: - -- ``zfssa:volblocksize`` - -- ``zfssa:sparse`` - -- ``zfssa:compression`` - -- ``zfssa:logbias`` - -Volume types can be created using the :command:`openstack volume type create` -command. -Extra spec keys can be added using :command:`openstack volume type set` -command. - -Driver options -~~~~~~~~~~~~~~ - -The Oracle ZFSSA iSCSI Driver supports these options: - -.. include:: ../../tables/cinder-zfssa-iscsi.rst diff --git a/doc/source/configuration/block-storage/drivers/zfssa-nfs-driver.rst b/doc/source/configuration/block-storage/drivers/zfssa-nfs-driver.rst deleted file mode 100644 index 90d926428..000000000 --- a/doc/source/configuration/block-storage/drivers/zfssa-nfs-driver.rst +++ /dev/null @@ -1,297 +0,0 @@ -======================================= -Oracle ZFS Storage Appliance NFS driver -======================================= - -The Oracle ZFS Storage Appliance (ZFSSA) NFS driver enables the ZFSSA to -be used seamlessly as a block storage resource. The driver enables you -to to create volumes on a ZFS share that is NFS mounted. - -Requirements -~~~~~~~~~~~~ - -Oracle ZFS Storage Appliance Software version ``2013.1.2.0`` or later. - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, detach, manage, and unmanage volumes. - -- Create and delete snapshots. - -- Create a volume from a snapshot. - -- Extend a volume. - -- Copy an image to a volume. - -- Copy a volume to an image. - -- Clone a volume. - -- Volume migration. - -- Local cache of a bootable volume - -Appliance configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -Appliance configuration using the command-line interface (CLI) is -described below. To access the CLI, ensure SSH remote access is enabled, -which is the default. You can also perform configuration using the -browser user interface (BUI) or the RESTful API. Please refer to the -`Oracle ZFS Storage Appliance -documentation `__ -for details on how to configure the Oracle ZFS Storage Appliance using -the BUI, CLI, and RESTful API. - -#. Log in to the Oracle ZFS Storage Appliance CLI and enable the REST - service. REST service needs to stay online for this driver to function. - - .. code-block:: console - - zfssa:>configuration services rest enable - -#. Create a new storage pool on the appliance if you do not want to use an - existing one. This storage pool is named ``'mypool'`` for the sake of this - documentation. - -#. Create a new project and share in the storage pool (``mypool``) if you do - not want to use existing ones. This driver will create a project and share - by the names specified in the ``cinder.conf`` file, if a project and share - by that name does not already exist in the storage pool (``mypool``). - The project and share are named ``NFSProject`` and ``nfs_share``' in the - sample ``cinder.conf`` file as entries below. - -#. To perform driver operations, create a role with the following - authorizations: - - .. code-block:: bash - - scope=svc - allow_administer=true, allow_restart=true, allow_configure=true - scope=nas - pool=pool_name, project=project_name, share=share_name, allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true, allow_changeAccessProps=true, allow_changeProtocolProps=true - - The following examples show how to create a role with authorizations. - - .. code-block:: console - - zfssa:> configuration roles - zfssa:configuration roles> role OpenStackRole - zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack NFS Cinder Driver" - zfssa:configuration roles OpenStackRole (uncommitted)> commit - zfssa:configuration roles> select OpenStackRole - zfssa:configuration roles OpenStackRole> authorizations create - zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=svc - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_administer=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_restart=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - - - .. code-block:: console - - zfssa:> configuration roles OpenStackRole authorizations> set scope=nas - - The following properties need to be set when the scope of this role needs to - be limited to a pool (``mypool``), a project (``NFSProject``) and a share - (``nfs_share``) created in the steps above. This will prevent the user - assigned to this role from being used to modify other pools, projects and - shares. - - .. code-block:: console - - zfssa:configuration roles OpenStackRole auth (uncommitted)> set pool=mypool - zfssa:configuration roles OpenStackRole auth (uncommitted)> set project=NFSProject - zfssa:configuration roles OpenStackRole auth (uncommitted)> set share=nfs_share - -#. The following properties only need to be set when a share and project has - not been created following the steps above and wish to allow the driver to - create them for you. - - .. code-block:: console - - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createProject=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createShare=true - - .. code-block:: console - - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_clone=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeSpaceProps=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_destroy=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rollback=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_takeSnap=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeAccessProps=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeProtocolProps=true - zfssa:configuration roles OpenStackRole auth (uncommitted)> commit - -#. Create a new user or modify an existing one and assign the new role to - the user. - - The following example shows how to create a new user and assign the new - role to the user. - - .. code-block:: console - - zfssa:> configuration users - zfssa:configuration users> user cinder - zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver" - zfssa:configuration users cinder (uncommitted)> set initial_password=12345 - zfssa:configuration users cinder (uncommitted)> commit - zfssa:configuration users> select cinder set roles=OpenStackRole - -#. Ensure that NFS and HTTP services on the appliance are online. Note the - HTTPS port number for later entry in the cinder service configuration file - (``cinder.conf``). This driver uses WebDAV over HTTPS to create snapshots - and clones of volumes, and therefore needs to have the HTTP service online. - - The following example illustrates enabling the services and showing their - properties. - - .. code-block:: console - - zfssa:> configuration services nfs - zfssa:configuration services nfs> enable - zfssa:configuration services nfs> show - Properties: - = online - ... - - .. code-block:: console - - zfssa:configuration services http> enable - zfssa:configuration services http> show - Properties: - = online - require_login = true - protocols = http/https - listen_port = 80 - https_port = 443 - - .. note:: - - You can also run this `workflow - `__ - to automate the above tasks. - Refer to `Oracle documentation - `__ - on how to download, view, and execute a workflow. - -#. Create a network interface to be used exclusively for data. An existing - network interface may also be used. The following example illustrates how to - make a network interface for data traffic flow only. - - .. note:: - - For better performance and reliability, it is recommended to configure a - separate subnet exclusively for data traffic in your cloud environment. - - .. code-block:: console - - zfssa:> configuration net interfaces - zfssa:configuration net interfaces> select igbx - zfssa:configuration net interfaces igbx> set admin=false - zfssa:configuration net interfaces igbx> commit - -#. For clustered controller systems, the following verification is required in - addition to the above steps. Skip this step if a standalone system is used. - - .. code-block:: console - - zfssa:> configuration cluster resources list - - Verify that both the newly created pool and the network interface are of - type ``singleton`` and are not locked to the current controller. This - approach ensures that the pool and the interface used for data always belong - to the active controller, regardless of the current state of the cluster. - Verify that both the network interface used for management and data, and the - storage pool belong to the same head. - - .. note:: - - There will be a short service interruption during failback/takeover, but - once the process is complete, the driver should be able to access the - ZFSSA for data as well as for management. - -Cinder service configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Define the following required properties in the ``cinder.conf`` - configuration file: - - .. code-block:: ini - - volume_driver = cinder.volume.drivers.zfssa.zfssanfs.ZFSSANFSDriver - san_ip = myhost - san_login = username - san_password = password - zfssa_data_ip = mydata - zfssa_nfs_pool = mypool - - .. note:: - - Management interface ``san_ip`` can be used instead of ``zfssa_data_ip``, - but it is not recommended. - -#. You can also define the following additional properties in the - ``cinder.conf`` configuration file: - - .. code:: ini - - zfssa_nfs_project = NFSProject - zfssa_nfs_share = nfs_share - zfssa_nfs_mount_options = - zfssa_nfs_share_compression = off - zfssa_nfs_share_logbias = latency - zfssa_https_port = 443 - - .. note:: - - The driver does not use the file specified in the ``nfs_shares_config`` - option. - -ZFSSA local cache -~~~~~~~~~~~~~~~~~ - -The local cache feature enables ZFSSA drivers to serve the usage of -bootable volumes significantly better. With the feature, the first -bootable volume created from an image is cached, so that subsequent -volumes can be created directly from the cache, instead of having image -data transferred over the network multiple times. - -The following conditions must be met in order to use ZFSSA local cache -feature: - -- A storage pool needs to be configured. - -- REST and NFS services need to be turned on. - -- On an OpenStack controller, ``cinder.conf`` needs to contain - necessary properties used to configure and set up the ZFSSA NFS - driver, including the following new properties: - - zfssa_enable_local_cache - (True/False) To enable/disable the feature. - - zfssa_cache_directory - The directory name inside zfssa_nfs_share where cache volumes - are stored. - -Every cache volume has two additional properties stored as WebDAV -properties. It is important that they are not altered outside of Block -Storage when the driver is in use: - -image_id - stores the image id as in Image service. - -updated_at - stores the most current timestamp when the image is - updated in Image service. - -Driver options -~~~~~~~~~~~~~~ - -The Oracle ZFS Storage Appliance NFS driver supports these options: - -.. include:: ../../tables/cinder-zfssa-nfs.rst - -This driver shares additional NFS configuration options with the generic -NFS driver. For a description of these, see :ref:`cinder-storage_nfs`. diff --git a/doc/source/configuration/block-storage/drivers/zte-storage-driver.rst b/doc/source/configuration/block-storage/drivers/zte-storage-driver.rst deleted file mode 100644 index 898122a5f..000000000 --- a/doc/source/configuration/block-storage/drivers/zte-storage-driver.rst +++ /dev/null @@ -1,158 +0,0 @@ -================== -ZTE volume drivers -================== - -The ZTE volume drivers allow ZTE KS3200 or KU5200 arrays -to be used for Block Storage in OpenStack deployments. - -System requirements -~~~~~~~~~~~~~~~~~~~ - -To use the ZTE drivers, the following prerequisites: - -- ZTE KS3200 or KU5200 array with: - - - iSCSI or FC interfaces - - 30B2 firmware or later - -- Network connectivity between the OpenStack host and the array - management interfaces - -- HTTPS or HTTP must be enabled on the array - -Supported operations -~~~~~~~~~~~~~~~~~~~~ - -- Create, delete, attach, and detach volumes. -- Create, list, and delete volume snapshots. -- Create a volume from a snapshot. -- Copy an image to a volume. -- Copy a volume to an image. -- Clone a volume. -- Extend a volume. -- Migrate a volume with back-end assistance. -- Retype a volume. -- Manage and unmanage a volume. - -Configuring the array -~~~~~~~~~~~~~~~~~~~~~ - -#. Verify that the array can be managed using an HTTPS connection. HTTP can - also be used if ``zte_api_protocol=http`` is placed into the - appropriate sections of the ``cinder.conf`` file. - - Confirm that virtual pools A and B are present if you plan to use - virtual pools for OpenStack storage. - -#. Edit the ``cinder.conf`` file to define a storage back-end entry for - each storage pool on the array that will be managed by OpenStack. Each - entry consists of a unique section name, surrounded by square brackets, - followed by options specified in ``key=value`` format. - - - The ``zte_backend_name`` value specifies the name of the storage - pool on the array. - - - The ``volume_backend_name`` option value can be a unique value, if - you wish to be able to assign volumes to a specific storage pool on - the array, or a name that is shared among multiple storage pools to - let the volume scheduler choose where new volumes are allocated. - - - The rest of the options will be repeated for each storage pool in a - given array: the appropriate cinder driver name, IP address or - host name of the array management interface; the username and password - of an array user account with ``manage`` privileges; and the iSCSI IP - addresses for the array if using the iSCSI transport protocol. - - In the examples below, two back ends are defined, one for pool A and one - for pool B, and a common ``volume_backend_name``. Use this for a - single volume type definition can be used to allocate volumes from both - pools. - - **Example: iSCSI back-end entries** - - .. code-block:: ini - - [pool-a] - zte_backend_name = A - volume_backend_name = zte-array - volume_driver = cinder.volume.drivers.zte.zte_iscsi.ZTEISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - zte_iscsi_ips = 10.2.3.4,10.2.3.5 - - [pool-b] - zte_backend_name = B - volume_backend_name = zte-array - volume_driver = cinder.volume.drivers.zte.zte_iscsi.ZTEISCSIDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - zte_iscsi_ips = 10.2.3.4,10.2.3.5 - - **Example: Fibre Channel back end entries** - - .. code-block:: ini - - [pool-a] - zte_backend_name = A - volume_backend_name = zte-array - volume_driver = cinder.volume.drivers.zte.zte_fc.ZTEFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - - [pool-b] - zte_backend_name = B - volume_backend_name = zte-array - volume_driver = cinder.volume.drivers.zte.zte_fc.ZTEFCDriver - san_ip = 10.1.2.3 - san_login = manage - san_password = !manage - -#. If HTTPS is not enabled in the array, include - ``zte_api_protocol = http`` in each of the back-end definitions. - -#. If HTTPS is enabled, you can enable certificate verification with the - option ``zte_verify_certificate=True``. You may also use the - ``zte_verify_certificate_path`` parameter to specify the path to a - ``CA_BUNDLE`` file containing CAs other than those in the default list. - -#. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an - ``enabled_backends`` parameter specifying the back-end entries you added, - and a ``default_volume_type`` parameter specifying the name of a volume - type that you will create in the next step. - - **Example: [DEFAULT] section changes** - - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = pool-a,pool-b - default_volume_type = zte - -#. Create a new volume type for each distinct ``volume_backend_name`` value - that you added to the ``cinder.conf`` file. The example below - assumes that the same ``volume_backend_name=zte-array`` - option was specified in all of the - entries, and specifies that the volume type ``zte`` can be used to - allocate volumes from any of them. - - **Example: Creating a volume type** - - .. code-block:: console - - $ openstack volume type create zte - $ openstack volume type set --property volume_backend_name=zte-array zte - -#. After modifying the ``cinder.conf`` file, - restart the ``cinder-volume`` service. - -Driver-specific options -~~~~~~~~~~~~~~~~~~~~~~~ - -The following table contains the configuration options that are specific -to the ZTE drivers. - -.. include:: ../../tables/cinder-zte.rst diff --git a/doc/source/configuration/block-storage/fc-zoning.rst b/doc/source/configuration/block-storage/fc-zoning.rst deleted file mode 100644 index 28085e367..000000000 --- a/doc/source/configuration/block-storage/fc-zoning.rst +++ /dev/null @@ -1,126 +0,0 @@ - -.. _fc_zone_manager: - -========================== -Fibre Channel Zone Manager -========================== - -The Fibre Channel Zone Manager allows FC SAN Zone/Access control -management in conjunction with Fibre Channel block storage. The -configuration of Fibre Channel Zone Manager and various zone drivers are -described in this section. - -Configure Block Storage to use Fibre Channel Zone Manager -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If Block Storage is configured to use a Fibre Channel volume driver that -supports Zone Manager, update ``cinder.conf`` to add the following -configuration options to enable Fibre Channel Zone Manager. - -Make the following changes in the ``/etc/cinder/cinder.conf`` file. - -.. include:: ../tables/cinder-zoning.rst - -To use different Fibre Channel Zone Drivers, use the parameters -described in this section. - -.. note:: - - When multi backend configuration is used, provide the - ``zoning_mode`` configuration option as part of the volume driver - configuration where ``volume_driver`` option is specified. - -.. note:: - - Default value of ``zoning_mode`` is ``None`` and this needs to be - changed to ``fabric`` to allow fabric zoning. - -.. note:: - - ``zoning_policy`` can be configured as ``initiator-target`` or - ``initiator`` - -Brocade Fibre Channel Zone Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Brocade Fibre Channel Zone Driver performs zoning operations -through HTTP, HTTPS, or SSH. - -Set the following options in the ``cinder.conf`` configuration file. - -.. include:: ../tables/cinder-zoning_manager_brcd.rst - -Configure SAN fabric parameters in the form of fabric groups as -described in the example below: - -.. include:: ../tables/cinder-zoning_fabric_brcd.rst - -.. note:: - - Define a fabric group for each fabric using the fabric names used in - ``fc_fabric_names`` configuration option as group name. - -.. note:: - - To define a fabric group for a switch which has Virtual Fabrics - enabled, include the ``fc_virtual_fabric_id`` configuration option - and ``fc_southbound_protocol`` configuration option set to ``HTTP`` - or ``HTTPS`` in the fabric group. Zoning on VF enabled fabric using - ``SSH`` southbound protocol is not supported. - -System requirements -------------------- - -Brocade Fibre Channel Zone Driver requires firmware version FOS v6.4 or -higher. - -As a best practice for zone management, use a user account with -``zoneadmin`` role. Users with ``admin`` role (including the default -``admin`` user account) are limited to a maximum of two concurrent SSH -sessions. - -For information about how to manage Brocade Fibre Channel switches, see -the Brocade Fabric OS user documentation. - -Cisco Fibre Channel Zone Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cisco Fibre Channel Zone Driver automates the zoning operations through -SSH. Configure Cisco Zone Driver, Cisco Southbound connector, FC SAN -lookup service and Fabric name. - -Set the following options in the ``cinder.conf`` configuration file. - -.. code-block:: ini - - [fc-zone-manager] - zone_driver = cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.CiscoFCZoneDriver - fc_san_lookup_service = cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service.CiscoFCSanLookupService - fc_fabric_names = CISCO_FABRIC_EXAMPLE - cisco_sb_connector = cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI - -.. include:: ../tables/cinder-zoning_manager_cisco.rst - -Configure SAN fabric parameters in the form of fabric groups as -described in the example below: - -.. include:: ../tables/cinder-zoning_fabric_cisco.rst - -.. note:: - - Define a fabric group for each fabric using the fabric names used in - ``fc_fabric_names`` configuration option as group name. - - The Cisco Fibre Channel Zone Driver supports basic and enhanced - zoning modes.The zoning VSAN must exist with an active zone set name - which is same as the ``fc_fabric_names`` option. - -System requirements -------------------- - -Cisco MDS 9000 Family Switches. - -Cisco MDS NX-OS Release 6.2(9) or later. - -For information about how to manage Cisco Fibre Channel switches, see -the Cisco MDS 9000 user documentation. diff --git a/doc/source/configuration/block-storage/logs.rst b/doc/source/configuration/block-storage/logs.rst deleted file mode 100644 index 921ede346..000000000 --- a/doc/source/configuration/block-storage/logs.rst +++ /dev/null @@ -1,28 +0,0 @@ -=============================== -Log files used by Block Storage -=============================== - -The corresponding log file of each Block Storage service is stored in -the ``/var/log/cinder/`` directory of the host on which each service -runs. - -.. list-table:: **Log files used by Block Storage services** - :header-rows: 1 - :widths: 10 20 10 - - * - Log file - - Service/interface (for CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, and SUSE Linux Enterprise) - - Service/interface (for Ubuntu and Debian) - * - api.log - - openstack-cinder-api - - cinder-api - * - cinder-manage.log - - cinder-manage - - cinder-manage - * - scheduler.log - - openstack-cinder-scheduler - - cinder-scheduler - * - volume.log - - openstack-cinder-volume - - cinder-volume - diff --git a/doc/source/configuration/block-storage/nested-quota.rst b/doc/source/configuration/block-storage/nested-quota.rst deleted file mode 100644 index 9fcdabdaa..000000000 --- a/doc/source/configuration/block-storage/nested-quota.rst +++ /dev/null @@ -1,165 +0,0 @@ -============= -Nested quotas -============= - -Nested quota is a change in how OpenStack services (such as Block Storage and -Compute) handle their quota resources by being hierarchy-aware. The main -reason for this change is to fully appreciate the hierarchical multi-tenancy -concept, which was introduced in keystone in the Kilo release. - -Once you have a project hierarchy created in keystone, nested quotas let you -define how much of a project's quota you want to give to its subprojects. In -that way, hierarchical projects can have hierarchical quotas (also known as -nested quotas). - -Projects and subprojects have similar behaviors, but they differ from each -other when it comes to default quota values. The default quota value for -resources in a subproject is 0, so that when a subproject is created it will -not consume all of its parent's quota. - -In order to keep track of how much of each quota was allocated to a -subproject, a column ``allocated`` was added to the quotas table. This column -is updated after every delete and update quota operation. - -This example shows you how to use nested quotas. - -.. note:: - - Assume that you have created a project hierarchy in keystone, such as - follows: - - .. code-block:: console - - +-----------+ - | | - | A | - | / \ | - | B C | - | / | - | D | - +-----------+ - -Getting default quotas -~~~~~~~~~~~~~~~~~~~~~~ - -#. Get the quota for root projects. - - Use the :command:`openstack quota show` command and specify: - - - The ``PROJECT`` of the relevant project. In this case, the name of - project A. - - .. code-block:: console - - $ openstack quota show PROJECT - +----------------------+-------+ - | Field | Value | - +----------------------+-------+ - | ... | ... | - | backup_gigabytes | 1000 | - | backups | 10 | - | gigabytes | 1000 | - | per_volume_gigabytes | -1 | - | snapshots | 10 | - | volumes | 10 | - +----------------------+-------+ - - .. note:: - - This command returns the default values for resources. - This is because the quotas for this project were not explicitly set. - -#. Get the quota for subprojects. - - In this case, use the same :command:`openstack quota show` command and - specify: - - - The ``PROJECT`` of the relevant project. In this case the name of - project B, which is a child of A. - - .. code-block:: console - - $ openstack quota show PROJECT - +----------------------+-------+ - | Field | Value | - +----------------------+-------+ - | ... | ... | - | backup_gigabytes | 0 | - | backups | 0 | - | gigabytes | 0 | - | per_volume_gigabytes | 0 | - | snapshots | 0 | - | volumes | 0 | - +----------------------+-------+ - - .. note:: - - In this case, 0 was the value returned as the quota for all the - resources. This is because project B is a subproject of A, thus, - the default quota value is 0, so that it will not consume all the - quota of its parent project. - -Setting the quotas for subprojects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now that the projects were created, assume that the admin of project B wants -to use it. First of all, you need to set the quota limit of the project, -because as a subproject it does not have quotas allocated by default. - -In this example, when all of the parent project is allocated to its -subprojects the user will not be able to create more resources in the parent -project. - -#. Update the quota of B. - - Use the :command:`openstack quota set` command and specify: - - - The ``PROJECT`` of the relevant project. - In this case the name of project B. - - - The ``--volumes`` option, followed by the number to which you wish to - increase the volumes quota. - - .. code-block:: console - - $ openstack quota set --volumes 10 PROJECT - +----------------------+-------+ - | Property | Value | - +----------------------+-------+ - | ... | ... | - | backup_gigabytes | 0 | - | backups | 0 | - | gigabytes | 0 | - | per_volume_gigabytes | 0 | - | snapshots | 0 | - | volumes | 10 | - +----------------------+-------+ - - .. note:: - - The volumes resource quota is updated. - -#. Try to create a volume in project A. - - Use the :command:`openstack volume create` command and specify: - - - The ``SIZE`` of the volume that will be created; - - - The ``NAME`` of the volume. - - .. code-block:: console - - $ openstack volume create --size SIZE NAME - VolumeLimitExceeded: Maximum number of volumes allowed (10) exceeded for quota 'volumes'. (HTTP 413) (Request-ID: req-f6f7cc89-998e-4a82-803d-c73c8ee2016c) - - .. note:: - - As the entirety of project A's volumes quota has been assigned to - project B, it is treated as if all of the quota has been used. This - is true even when project B has not created any volumes. - -See `cinder nested quota spec -`_ -and `hierarchical multi-tenancy spec -`_ -for details. diff --git a/doc/source/configuration/block-storage/samples/api-paste.ini.rst b/doc/source/configuration/block-storage/samples/api-paste.ini.rst deleted file mode 100644 index 77d20479b..000000000 --- a/doc/source/configuration/block-storage/samples/api-paste.ini.rst +++ /dev/null @@ -1,10 +0,0 @@ -============= -api-paste.ini -============= - -Use the ``api-paste.ini`` file to configure the Block Storage API -service. - -.. remote-code-block:: none - - https://git.openstack.org/cgit/openstack/cinder/plain/etc/cinder/api-paste.ini?h=stable/ocata diff --git a/doc/source/configuration/block-storage/samples/cinder.conf.rst b/doc/source/configuration/block-storage/samples/cinder.conf.rst deleted file mode 100644 index 6791ab2eb..000000000 --- a/doc/source/configuration/block-storage/samples/cinder.conf.rst +++ /dev/null @@ -1,15 +0,0 @@ -=========== -cinder.conf -=========== - -The ``cinder.conf`` file is installed in ``/etc/cinder`` by default. -When you manually install the Block Storage service, the options in the -``cinder.conf`` file are set to default values. - -The ``cinder.conf`` file contains most of the options needed to configure -the Block Storage service. You can generate the latest configuration file -by using the tox provided by the Block Storage service. Here is a sample -configuration file: - -.. literalinclude:: ../../samples/cinder.conf.sample - :language: ini diff --git a/doc/source/configuration/block-storage/samples/index.rst b/doc/source/configuration/block-storage/samples/index.rst deleted file mode 100644 index 0b759114f..000000000 --- a/doc/source/configuration/block-storage/samples/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _block-storage-sample-configuration-file: - -================================================ -Block Storage service sample configuration files -================================================ - -All the files in this section can be found in ``/etc/cinder``. - -.. toctree:: - :maxdepth: 2 - - cinder.conf.rst - api-paste.ini.rst - policy.json.rst - rootwrap.conf.rst diff --git a/doc/source/configuration/block-storage/samples/policy.json.rst b/doc/source/configuration/block-storage/samples/policy.json.rst deleted file mode 100644 index bef8f0a8c..000000000 --- a/doc/source/configuration/block-storage/samples/policy.json.rst +++ /dev/null @@ -1,10 +0,0 @@ -=========== -policy.json -=========== - -The ``policy.json`` file defines additional access controls that apply -to the Block Storage service. - -.. remote-code-block:: none - - https://git.openstack.org/cgit/openstack/cinder/plain/etc/cinder/policy.json?h=stable/ocata diff --git a/doc/source/configuration/block-storage/samples/rootwrap.conf.rst b/doc/source/configuration/block-storage/samples/rootwrap.conf.rst deleted file mode 100644 index e819693ce..000000000 --- a/doc/source/configuration/block-storage/samples/rootwrap.conf.rst +++ /dev/null @@ -1,11 +0,0 @@ -============= -rootwrap.conf -============= - -The ``rootwrap.conf`` file defines configuration values used by the -``rootwrap`` script when the Block Storage service must escalate its -privileges to those of the root user. - -.. remote-code-block:: ini - - https://git.openstack.org/cgit/openstack/cinder/plain/etc/cinder/rootwrap.conf?h=stable/ocata diff --git a/doc/source/configuration/block-storage/schedulers.rst b/doc/source/configuration/block-storage/schedulers.rst deleted file mode 100644 index 31f112809..000000000 --- a/doc/source/configuration/block-storage/schedulers.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================== -Block Storage schedulers -======================== - -Block Storage service uses the ``cinder-scheduler`` service -to determine how to dispatch block storage requests. - -For more information, see `Cinder Scheduler Filters -`_ -and `Cinder Scheduler Weights -`_. diff --git a/doc/source/configuration/block-storage/volume-drivers.rst b/doc/source/configuration/block-storage/volume-drivers.rst deleted file mode 100644 index 1a787c1a7..000000000 --- a/doc/source/configuration/block-storage/volume-drivers.rst +++ /dev/null @@ -1,78 +0,0 @@ -============== -Volume drivers -============== - -.. sort by the drivers by open source software -.. and the drivers for proprietary components - -.. toctree:: - :maxdepth: 1 - - drivers/ceph-rbd-volume-driver.rst - drivers/lvm-volume-driver.rst - drivers/nfs-volume-driver.rst - drivers/sheepdog-driver.rst - drivers/smbfs-volume-driver.rst - drivers/blockbridge-eps-driver.rst - drivers/cloudbyte-driver.rst - drivers/coho-data-driver.rst - drivers/coprhd-driver.rst - drivers/datera-volume-driver.rst - drivers/dell-emc-scaleio-driver.rst - drivers/dell-emc-unity-driver.rst - drivers/dell-equallogic-driver.rst - drivers/dell-storagecenter-driver.rst - drivers/dothill-driver.rst - drivers/emc-vmax-driver.rst - drivers/emc-vnx-driver.rst - drivers/emc-xtremio-driver.rst - drivers/falconstor-fss-driver.rst - drivers/fujitsu-eternus-dx-driver.rst - drivers/hds-hnas-driver.rst - drivers/hitachi-storage-volume-driver.rst - drivers/hpe-3par-driver.rst - drivers/hpe-lefthand-driver.rst - drivers/hp-msa-driver.rst - drivers/huawei-storage-driver.rst - drivers/ibm-gpfs-volume-driver.rst - drivers/ibm-storwize-svc-driver.rst - drivers/ibm-storage-volume-driver.rst - drivers/ibm-flashsystem-volume-driver.rst - drivers/infinidat-volume-driver.rst - drivers/infortrend-volume-driver.rst - drivers/itri-disco-driver.rst - drivers/kaminario-driver.rst - drivers/lenovo-driver.rst - drivers/nec-storage-m-series-driver.rst - drivers/netapp-volume-driver.rst - drivers/nimble-volume-driver.rst - drivers/nexentastor4-driver.rst - drivers/nexentastor5-driver.rst - drivers/nexentaedge-driver.rst - drivers/prophetstor-dpl-driver.rst - drivers/pure-storage-driver.rst - drivers/quobyte-driver.rst - drivers/scality-sofs-driver.rst - drivers/solidfire-volume-driver.rst - drivers/synology-dsm-driver.rst - drivers/tintri-volume-driver.rst - drivers/violin-v7000-driver.rst - drivers/vzstorage-driver.rst - drivers/vmware-vmdk-driver.rst - drivers/windows-iscsi-volume-driver.rst - drivers/xio-volume-driver.rst - drivers/zadara-volume-driver.rst - drivers/zfssa-iscsi-driver.rst - drivers/zfssa-nfs-driver.rst - drivers/zte-storage-driver.rst - -To use different volume drivers for the cinder-volume service, use the -parameters described in these sections. - -The volume drivers are included in the `Block Storage repository -`_. To set a volume -driver, use the ``volume_driver`` flag. The default is: - -.. code-block:: ini - - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver diff --git a/doc/source/configuration/block-storage/volume-encryption.rst b/doc/source/configuration/block-storage/volume-encryption.rst deleted file mode 100644 index 2eef5df7a..000000000 --- a/doc/source/configuration/block-storage/volume-encryption.rst +++ /dev/null @@ -1,213 +0,0 @@ -============================================== -Volume encryption supported by the key manager -============================================== - -We recommend the Key management service (barbican) for storing -encryption keys used by the OpenStack volume encryption feature. It can -be enabled by updating ``cinder.conf`` and ``nova.conf``. - -Initial configuration -~~~~~~~~~~~~~~~~~~~~~ - -Configuration changes need to be made to any nodes running the -``cinder-api`` or ``nova-compute`` server. - -Steps to update ``cinder-api`` servers: - -#. Edit the ``/etc/cinder/cinder.conf`` file to use Key management service - as follows: - - * Look for the ``[key_manager]`` section. - - * Enter a new line directly below ``[key_manager]`` with the following: - - .. code-block:: ini - - api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager - -#. Restart ``cinder-api``. - -Update ``nova-compute`` servers: - -#. Ensure the ``cryptsetup`` utility is installed, and install - the ``python-barbicanclient`` Python package. - -#. Set up the Key Manager service by editing ``/etc/nova/nova.conf``: - - .. code-block:: ini - - [key_manager] - api_class = castellan.key_manager.barbican_key_manager.BarbicanKeyManager - - .. note:: - - Use a '#' prefix to comment out the line in this section that - begins with 'fixed_key'. - -#. Restart ``nova-compute``. - - -Key management access control -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Special privileges can be assigned on behalf of an end user to allow -them to manage their own encryption keys, which are required when -creating the encrypted volumes. The Barbican `Default Policy -`_ -for access control specifies that only users with an ``admin`` or -``creator`` role can create keys. The policy is very flexible and -can be modified. - -To assign the ``creator`` role, the admin must know the user ID, -project ID, and creator role ID. See `Assign a role -`_ -for more information. An admin can list existing roles and associated -IDs using the ``openstack role list`` command. If the creator -role does not exist, the admin can `create the role -`_. - - -Create an encrypted volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Block Storage volume type assignment provides scheduling to a specific -back-end, and can be used to specify actionable information for a -back-end storage device. - -This example creates a volume type called LUKS and provides -configuration information for the storage system to encrypt or decrypt -the volume. - -#. Source your admin credentials: - - .. code-block:: console - - $ . admin-openrc.sh - -#. Create the volume type, marking the volume type as encrypted and providing - the necessary details. Use ``--encryption-control-location`` to specify - where encryption is performed: ``front-end`` (default) or ``back-end``. - - .. code-block:: console - - $ openstack volume type create --encryption-provider nova.volume.encryptors.luks.LuksEncryptor \ - --encryption-cipher aes-xts-plain64 --encryption-key-size 256 --encryption-control-location front-end LUKS - - +-------------+----------------------------------------------------------------+ - | Field | Value | - +-------------+----------------------------------------------------------------+ - | description | None | - | encryption | cipher='aes-xts-plain64', control_location='front-end', | - | | encryption_id='8584c43f-1666-43d1-a348-45cfcef72898', | - | | key_size='256', | - | | provider='nova.volume.encryptors.luks.LuksEncryptor' | - | id | b9a8cff5-2f60-40d1-8562-d33f3bf18312 | - | is_public | True | - | name | LUKS | - +-------------+----------------------------------------------------------------+ - -The OpenStack dashboard (horizon) supports creating the encrypted -volume type as of the Kilo release. For instructions, see -`Create an encrypted volume type -`_. - -Create an encrypted volume -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use the OpenStack dashboard (horizon), or :command:`openstack volume -create` command to create volumes just as you normally would. For an -encrypted volume, pass the ``--type LUKS`` flag, which specifies that the -volume type will be ``LUKS`` (Linux Unified Key Setup). If that argument is -left out, the default volume type, ``unencrypted``, is used. - -#. Source your admin credentials: - - .. code-block:: console - - $ . admin-openrc.sh - -#. Create an unencrypted 1 GB test volume: - - .. code-block:: console - - - $ openstack volume create --size 1 'unencrypted volume' - - -#. Create an encrypted 1 GB test volume: - - .. code-block:: console - - $ openstack volume create --size 1 --type LUKS 'encrypted volume' - -Notice the encrypted parameter; it will show ``True`` or ``False``. -The option ``volume_type`` is also shown for easy review. - -Non-admin users need the ``creator`` role to store secrets in Barbican -and to create encrypted volumes. As an administrator, you can give a user -the creator role in the following way: - -.. code-block:: console - - $ openstack role add --project PROJECT --user USER creator - -For details, see the -`Barbican Access Control page -`_. - -.. note:: - - Due to the issue that some of the volume drivers do not set - ``encrypted`` flag, attaching of encrypted volumes to a virtual - guest will fail, because OpenStack Compute service will not run - encryption providers. - -Testing volume encryption -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a simple test scenario to help validate your encryption. It -assumes an LVM based Block Storage server. - -Perform these steps after completing the volume encryption setup and -creating the volume-type for LUKS as described in the preceding -sections. - -#. Create a VM: - - .. code-block:: console - - $ openstack server create --image cirros-0.3.1-x86_64-disk --flavor m1.tiny TESTVM - -#. Create two volumes, one encrypted and one not encrypted then attach them - to your VM: - - .. code-block:: console - - $ openstack volume create --size 1 'unencrypted volume' - $ openstack volume create --size 1 --type LUKS 'encrypted volume' - $ openstack volume list - $ openstack server add volume --device /dev/vdb TESTVM 'unencrypted volume' - $ openstack server add volume --device /dev/vdc TESTVM 'encrypted volume' - -#. On the VM, send some text to the newly attached volumes and synchronize - them: - - .. code-block:: console - - # echo "Hello, world (unencrypted /dev/vdb)" >> /dev/vdb - # echo "Hello, world (encrypted /dev/vdc)" >> /dev/vdc - # sync && sleep 2 - # sync && sleep 2 - -#. On the system hosting cinder volume services, synchronize to flush the - I/O cache then test to see if your strings can be found: - - .. code-block:: console - - # sync && sleep 2 - # sync && sleep 2 - # strings /dev/stack-volumes/volume-* | grep "Hello" - Hello, world (unencrypted /dev/vdb) - -In the above example you see that the search returns the string -written to the unencrypted volume, but not the encrypted one. diff --git a/doc/source/configuration/figures/bb-cinder-fig1.png b/doc/source/configuration/figures/bb-cinder-fig1.png deleted file mode 100644 index 022d3652a..000000000 Binary files a/doc/source/configuration/figures/bb-cinder-fig1.png and /dev/null differ diff --git a/doc/source/configuration/figures/ceph-architecture.png b/doc/source/configuration/figures/ceph-architecture.png deleted file mode 100644 index ec4081185..000000000 Binary files a/doc/source/configuration/figures/ceph-architecture.png and /dev/null differ diff --git a/doc/source/configuration/figures/emc-enabler.png b/doc/source/configuration/figures/emc-enabler.png deleted file mode 100644 index b969b8171..000000000 Binary files a/doc/source/configuration/figures/emc-enabler.png and /dev/null differ diff --git a/doc/source/configuration/figures/ibm-storage-nova-concept.png b/doc/source/configuration/figures/ibm-storage-nova-concept.png deleted file mode 100644 index 75e336d48..000000000 Binary files a/doc/source/configuration/figures/ibm-storage-nova-concept.png and /dev/null differ diff --git a/doc/source/configuration/tables/cinder-api.rst b/doc/source/configuration/tables/cinder-api.rst deleted file mode 100644 index a05c41945..000000000 --- a/doc/source/configuration/tables/cinder-api.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-api: - -.. list-table:: Description of API configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``api_rate_limit`` = ``True`` - - (Boolean) Enables or disables rate limit of the API. - * - ``az_cache_duration`` = ``3600`` - - (Integer) Cache volume availability zones in memory for the provided duration in seconds - * - ``backend_host`` = ``None`` - - (String) Backend override of host value. - * - ``default_timeout`` = ``31536000`` - - (Integer) Default timeout for CLI operations in minutes. For example, LUN migration is a typical long running operation, which depends on the LUN size and the load of the array. An upper bound in the specific deployment can be set to avoid unnecessary long wait. By default, it is 365 days long. - * - ``enable_v1_api`` = ``False`` - - (Boolean) DEPRECATED: Deploy v1 of the Cinder API. - * - ``enable_v2_api`` = ``True`` - - (Boolean) DEPRECATED: Deploy v2 of the Cinder API. - * - ``enable_v3_api`` = ``True`` - - (Boolean) Deploy v3 of the Cinder API. - * - ``extra_capabilities`` = ``{}`` - - (String) User defined capabilities, a JSON formatted string specifying key/value pairs. The key/value pairs can be used by the CapabilitiesFilter to select between backends when requests specify volume types. For example, specifying a service level or the geographical location of a backend, then creating a volume type to allow the user to select by these different properties. - * - ``ignore_pool_full_threshold`` = ``False`` - - (Boolean) Force LUN creation even if the full threshold of pool is reached. By default, the value is False. - * - ``management_ips`` = - - (String) List of Management IP addresses (separated by commas) - * - ``message_ttl`` = ``2592000`` - - (Integer) message minimum life in seconds. - * - ``osapi_max_limit`` = ``1000`` - - (Integer) The maximum number of items that a collection resource returns in a single response - * - ``osapi_volume_base_URL`` = ``None`` - - (String) Base URL that will be presented to users in links to the OpenStack Volume API - * - ``osapi_volume_ext_list`` = - - (List) Specify list of extensions to load when using osapi_volume_extension option with cinder.api.contrib.select_extensions - * - ``osapi_volume_extension`` = ``['cinder.api.contrib.standard_extensions']`` - - (Multi-valued) osapi volume extension to load - * - ``osapi_volume_listen`` = ``0.0.0.0`` - - (String) IP address on which OpenStack Volume API listens - * - ``osapi_volume_listen_port`` = ``8776`` - - (Port number) Port on which OpenStack Volume API listens - * - ``osapi_volume_use_ssl`` = ``False`` - - (Boolean) Wraps the socket in a SSL context if True is set. A certificate file and key file must be specified. - * - ``osapi_volume_workers`` = ``None`` - - (Integer) Number of workers for OpenStack Volume API service. The default is equal to the number of CPUs available. - * - ``per_volume_size_limit`` = ``-1`` - - (Integer) Max size allowed per volume, in gigabytes - * - ``public_endpoint`` = ``None`` - - (String) Public url to use for versions endpoint. The default is None, which will use the request's host_url attribute to populate the URL base. If Cinder is operating behind a proxy, you will want to change this to represent the proxy's URL. - * - ``query_volume_filters`` = ``name, status, metadata, availability_zone, bootable, group_id`` - - (List) Volume filter options which non-admin user could use to query volumes. Default values are: ['name', 'status', 'metadata', 'availability_zone' ,'bootable', 'group_id'] - * - ``transfer_api_class`` = ``cinder.transfer.api.API`` - - (String) The full class name of the volume transfer API class - * - ``volume_api_class`` = ``cinder.volume.api.API`` - - (String) The full class name of the volume API class to use - * - ``volume_name_prefix`` = ``openstack-`` - - (String) Prefix before volume name to differentiate DISCO volume created through openstack and the other ones - * - ``volume_name_template`` = ``volume-%s`` - - (String) Template string to be used to generate volume names - * - ``volume_number_multiplier`` = ``-1.0`` - - (Floating point) Multiplier used for weighing volume number. Negative numbers mean to spread vs stack. - * - ``volume_transfer_key_length`` = ``16`` - - (Integer) The number of characters in the autogenerated auth key. - * - ``volume_transfer_salt_length`` = ``8`` - - (Integer) The number of characters in the salt. - * - **[oslo_middleware]** - - - * - ``enable_proxy_headers_parsing`` = ``False`` - - (Boolean) Whether the application is behind a proxy or not. This determines if the middleware should parse the headers or not. - * - ``max_request_body_size`` = ``114688`` - - (Integer) The maximum body size for each request, in bytes. - * - ``secure_proxy_ssl_header`` = ``X-Forwarded-Proto`` - - (String) DEPRECATED: The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by a SSL termination proxy. - * - **[oslo_versionedobjects]** - - - * - ``fatal_exception_format_errors`` = ``False`` - - (Boolean) Make exception message format errors fatal diff --git a/doc/source/configuration/tables/cinder-auth.rst b/doc/source/configuration/tables/cinder-auth.rst deleted file mode 100644 index d5d6f5d9f..000000000 --- a/doc/source/configuration/tables/cinder-auth.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-auth: - -.. list-table:: Description of authorization configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``auth_strategy`` = ``keystone`` - - (String) The strategy to use for auth. Supports noauth or keystone. diff --git a/doc/source/configuration/tables/cinder-backups.rst b/doc/source/configuration/tables/cinder-backups.rst deleted file mode 100644 index 75cc3d6e1..000000000 --- a/doc/source/configuration/tables/cinder-backups.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups: - -.. list-table:: Description of backups configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_api_class`` = ``cinder.backup.api.API`` - - (String) The full class name of the volume backup API class - * - ``backup_compression_algorithm`` = ``zlib`` - - (String) Compression algorithm (None to disable) - * - ``backup_driver`` = ``cinder.backup.drivers.swift`` - - (String) Driver to use for backups. - * - ``backup_manager`` = ``cinder.backup.manager.BackupManager`` - - (String) Full class name for the Manager for volume backup - * - ``backup_metadata_version`` = ``2`` - - (Integer) Backup metadata version to be used when backing up volume metadata. If this number is bumped, make sure the service doing the restore supports the new version. - * - ``backup_name_template`` = ``backup-%s`` - - (String) Template string to be used to generate backup names - * - ``backup_object_number_per_notification`` = ``10`` - - (Integer) The number of chunks or objects, for which one Ceilometer notification will be sent - * - ``backup_service_inithost_offload`` = ``True`` - - (Boolean) Offload pending backup delete during backup service startup. If false, the backup service will remain down until all pending backups are deleted. - * - ``backup_timer_interval`` = ``120`` - - (Integer) Interval, in seconds, between two progress notifications reporting the backup status - * - ``backup_use_same_host`` = ``False`` - - (Boolean) Backup services use same backend. - * - ``backup_use_temp_snapshot`` = ``False`` - - (Boolean) If this is set to True, the backup_use_temp_snapshot path will be used during the backup. Otherwise, it will use backup_use_temp_volume path. - * - ``snapshot_check_timeout`` = ``3600`` - - (Integer) How long we check whether a snapshot is finished before we give up - * - ``snapshot_name_template`` = ``snapshot-%s`` - - (String) Template string to be used to generate snapshot names - * - ``snapshot_same_host`` = ``True`` - - (Boolean) Create volume from snapshot at the host where snapshot resides diff --git a/doc/source/configuration/tables/cinder-backups_ceph.rst b/doc/source/configuration/tables/cinder-backups_ceph.rst deleted file mode 100644 index c28f75cf5..000000000 --- a/doc/source/configuration/tables/cinder-backups_ceph.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_ceph: - -.. list-table:: Description of Ceph backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_ceph_chunk_size`` = ``134217728`` - - (Integer) The chunk size, in bytes, that a backup is broken into before transfer to the Ceph object store. - * - ``backup_ceph_conf`` = ``/etc/ceph/ceph.conf`` - - (String) Ceph configuration file to use. - * - ``backup_ceph_pool`` = ``backups`` - - (String) The Ceph pool where volume backups are stored. - * - ``backup_ceph_stripe_count`` = ``0`` - - (Integer) RBD stripe count to use when creating a backup image. - * - ``backup_ceph_stripe_unit`` = ``0`` - - (Integer) RBD stripe unit to use when creating a backup image. - * - ``backup_ceph_user`` = ``cinder`` - - (String) The Ceph user to connect with. Default here is to use the same user as for Cinder volumes. If not using cephx this should be set to None. - * - ``restore_discard_excess_bytes`` = ``True`` - - (Boolean) If True, always discard excess bytes when restoring volumes i.e. pad with zeroes. diff --git a/doc/source/configuration/tables/cinder-backups_gcs.rst b/doc/source/configuration/tables/cinder-backups_gcs.rst deleted file mode 100644 index 84ffbf006..000000000 --- a/doc/source/configuration/tables/cinder-backups_gcs.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_gcs: - -.. list-table:: Description of GCS backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_gcs_block_size`` = ``32768`` - - (Integer) The size in bytes that changes are tracked for incremental backups. backup_gcs_object_size has to be multiple of backup_gcs_block_size. - * - ``backup_gcs_bucket`` = ``None`` - - (String) The GCS bucket to use. - * - ``backup_gcs_bucket_location`` = ``US`` - - (String) Location of GCS bucket. - * - ``backup_gcs_credential_file`` = ``None`` - - (String) Absolute path of GCS service account credential file. - * - ``backup_gcs_enable_progress_timer`` = ``True`` - - (Boolean) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the GCS backend storage. The default value is True to enable the timer. - * - ``backup_gcs_num_retries`` = ``3`` - - (Integer) Number of times to retry. - * - ``backup_gcs_object_size`` = ``52428800`` - - (Integer) The size in bytes of GCS backup objects. - * - ``backup_gcs_project_id`` = ``None`` - - (String) Owner project id for GCS bucket. - * - ``backup_gcs_proxy_url`` = ``None`` - - (URI) URL for http proxy access. - * - ``backup_gcs_reader_chunk_size`` = ``2097152`` - - (Integer) GCS object will be downloaded in chunks of bytes. - * - ``backup_gcs_retry_error_codes`` = ``429`` - - (List) List of GCS error codes. - * - ``backup_gcs_storage_class`` = ``NEARLINE`` - - (String) Storage class of GCS bucket. - * - ``backup_gcs_user_agent`` = ``gcscinder`` - - (String) Http user-agent string for gcs api. - * - ``backup_gcs_writer_chunk_size`` = ``2097152`` - - (Integer) GCS object will be uploaded in chunks of bytes. Pass in a value of -1 if the file is to be uploaded as a single chunk. diff --git a/doc/source/configuration/tables/cinder-backups_glusterfs.rst b/doc/source/configuration/tables/cinder-backups_glusterfs.rst deleted file mode 100644 index dbae37b09..000000000 --- a/doc/source/configuration/tables/cinder-backups_glusterfs.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_glusterfs: - -.. list-table:: Description of GlusterFS backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``glusterfs_backup_mount_point`` = ``$state_path/backup_mount`` - - (String) Base dir containing mount point for gluster share. - * - ``glusterfs_backup_share`` = ``None`` - - (String) GlusterFS share in : format. Eg: 1.2.3.4:backup_vol diff --git a/doc/source/configuration/tables/cinder-backups_nfs.rst b/doc/source/configuration/tables/cinder-backups_nfs.rst deleted file mode 100644 index 53436a95f..000000000 --- a/doc/source/configuration/tables/cinder-backups_nfs.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_nfs: - -.. list-table:: Description of NFS backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_container`` = ``None`` - - (String) Custom directory to use for backups. - * - ``backup_enable_progress_timer`` = ``True`` - - (Boolean) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the backend storage. The default value is True to enable the timer. - * - ``backup_file_size`` = ``1999994880`` - - (Integer) The maximum size in bytes of the files used to hold backups. If the volume being backed up exceeds this size, then it will be backed up into multiple files.backup_file_size must be a multiple of backup_sha_block_size_bytes. - * - ``backup_mount_options`` = ``None`` - - (String) Mount options passed to the NFS client. See NFS man page for details. - * - ``backup_mount_point_base`` = ``$state_path/backup_mount`` - - (String) Base dir containing mount point for NFS share. - * - ``backup_sha_block_size_bytes`` = ``32768`` - - (Integer) The size in bytes that changes are tracked for incremental backups. backup_file_size has to be multiple of backup_sha_block_size_bytes. - * - ``backup_share`` = ``None`` - - (String) NFS share in hostname:path, ipv4addr:path, or "[ipv6addr]:path" format. diff --git a/doc/source/configuration/tables/cinder-backups_posix.rst b/doc/source/configuration/tables/cinder-backups_posix.rst deleted file mode 100644 index c6113a7d3..000000000 --- a/doc/source/configuration/tables/cinder-backups_posix.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_posix: - -.. list-table:: Description of POSIX backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_container`` = ``None`` - - (String) Custom directory to use for backups. - * - ``backup_enable_progress_timer`` = ``True`` - - (Boolean) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the backend storage. The default value is True to enable the timer. - * - ``backup_file_size`` = ``1999994880`` - - (Integer) The maximum size in bytes of the files used to hold backups. If the volume being backed up exceeds this size, then it will be backed up into multiple files.backup_file_size must be a multiple of backup_sha_block_size_bytes. - * - ``backup_posix_path`` = ``$state_path/backup`` - - (String) Path specifying where to store backups. - * - ``backup_sha_block_size_bytes`` = ``32768`` - - (Integer) The size in bytes that changes are tracked for incremental backups. backup_file_size has to be multiple of backup_sha_block_size_bytes. diff --git a/doc/source/configuration/tables/cinder-backups_swift.rst b/doc/source/configuration/tables/cinder-backups_swift.rst deleted file mode 100644 index 8439b4445..000000000 --- a/doc/source/configuration/tables/cinder-backups_swift.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_swift: - -.. list-table:: Description of Swift backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_swift_auth`` = ``per_user`` - - (String) Swift authentication mechanism - * - ``backup_swift_auth_version`` = ``1`` - - (String) Swift authentication version. Specify "1" for auth 1.0, or "2" for auth 2.0 or "3" for auth 3.0 - * - ``backup_swift_block_size`` = ``32768`` - - (Integer) The size in bytes that changes are tracked for incremental backups. backup_swift_object_size has to be multiple of backup_swift_block_size. - * - ``backup_swift_ca_cert_file`` = ``None`` - - (String) Location of the CA certificate file to use for swift client requests. - * - ``backup_swift_container`` = ``volumebackups`` - - (String) The default Swift container to use - * - ``backup_swift_enable_progress_timer`` = ``True`` - - (Boolean) Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the Swift backend storage. The default value is True to enable the timer. - * - ``backup_swift_key`` = ``None`` - - (String) Swift key for authentication - * - ``backup_swift_object_size`` = ``52428800`` - - (Integer) The size in bytes of Swift backup objects - * - ``backup_swift_project`` = ``None`` - - (String) Swift project/account name. Required when connecting to an auth 3.0 system - * - ``backup_swift_project_domain`` = ``None`` - - (String) Swift project domain name. Required when connecting to an auth 3.0 system - * - ``backup_swift_retry_attempts`` = ``3`` - - (Integer) The number of retries to make for Swift operations - * - ``backup_swift_retry_backoff`` = ``2`` - - (Integer) The backoff time in seconds between Swift retries - * - ``backup_swift_tenant`` = ``None`` - - (String) Swift tenant/account name. Required when connecting to an auth 2.0 system - * - ``backup_swift_url`` = ``None`` - - (URI) The URL of the Swift endpoint - * - ``backup_swift_user`` = ``None`` - - (String) Swift user name - * - ``backup_swift_user_domain`` = ``None`` - - (String) Swift user domain name. Required when connecting to an auth 3.0 system - * - ``keystone_catalog_info`` = ``identity:Identity Service:publicURL`` - - (String) Info to match when looking for keystone in the service catalog. Format is: separated values of the form: :: - Only used if backup_swift_auth_url is unset - * - ``swift_catalog_info`` = ``object-store:swift:publicURL`` - - (String) Info to match when looking for swift in the service catalog. Format is: separated values of the form: :: - Only used if backup_swift_url is unset diff --git a/doc/source/configuration/tables/cinder-backups_tsm.rst b/doc/source/configuration/tables/cinder-backups_tsm.rst deleted file mode 100644 index a3cd05228..000000000 --- a/doc/source/configuration/tables/cinder-backups_tsm.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-backups_tsm: - -.. list-table:: Description of IBM Tivoli Storage Manager backup driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_tsm_compression`` = ``True`` - - (Boolean) Enable or Disable compression for backups - * - ``backup_tsm_password`` = ``password`` - - (String) TSM password for the running username - * - ``backup_tsm_volume_prefix`` = ``backup`` - - (String) Volume prefix for the backup id when backing up to TSM diff --git a/doc/source/configuration/tables/cinder-block-device.rst b/doc/source/configuration/tables/cinder-block-device.rst deleted file mode 100644 index dcbf53d02..000000000 --- a/doc/source/configuration/tables/cinder-block-device.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-block-device: - -.. list-table:: Description of block device configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``available_devices`` = - - (List) List of all available devices diff --git a/doc/source/configuration/tables/cinder-blockbridge.rst b/doc/source/configuration/tables/cinder-blockbridge.rst deleted file mode 100644 index f828eab76..000000000 --- a/doc/source/configuration/tables/cinder-blockbridge.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-blockbridge: - -.. list-table:: Description of BlockBridge EPS volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``blockbridge_api_host`` = ``None`` - - (String) IP address/hostname of Blockbridge API. - * - ``blockbridge_api_port`` = ``None`` - - (Integer) Override HTTPS port to connect to Blockbridge API server. - * - ``blockbridge_auth_password`` = ``None`` - - (String) Blockbridge API password (for auth scheme 'password') - * - ``blockbridge_auth_scheme`` = ``token`` - - (String) Blockbridge API authentication scheme (token or password) - * - ``blockbridge_auth_token`` = ``None`` - - (String) Blockbridge API token (for auth scheme 'token') - * - ``blockbridge_auth_user`` = ``None`` - - (String) Blockbridge API user (for auth scheme 'password') - * - ``blockbridge_default_pool`` = ``None`` - - (String) Default pool name if unspecified. - * - ``blockbridge_pools`` = ``{'OpenStack': '+openstack'}`` - - (Dict) Defines the set of exposed pools and their associated backend query strings diff --git a/doc/source/configuration/tables/cinder-cloudbyte.rst b/doc/source/configuration/tables/cinder-cloudbyte.rst deleted file mode 100644 index 74d7e87a6..000000000 --- a/doc/source/configuration/tables/cinder-cloudbyte.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-cloudbyte: - -.. list-table:: Description of CloudByte volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``cb_account_name`` = ``None`` - - (String) CloudByte storage specific account name. This maps to a project name in OpenStack. - * - ``cb_add_qosgroup`` = ``{'latency': '15', 'iops': '10', 'graceallowed': 'false', 'iopscontrol': 'true', 'memlimit': '0', 'throughput': '0', 'tpcontrol': 'false', 'networkspeed': '0'}`` - - (Dict) These values will be used for CloudByte storage's addQos API call. - * - ``cb_apikey`` = ``None`` - - (String) Driver will use this API key to authenticate against the CloudByte storage's management interface. - * - ``cb_auth_group`` = ``None`` - - (String) This corresponds to the discovery authentication group in CloudByte storage. Chap users are added to this group. Driver uses the first user found for this group. Default value is None. - * - ``cb_confirm_volume_create_retries`` = ``3`` - - (Integer) Will confirm a successful volume creation in CloudByte storage by making this many number of attempts. - * - ``cb_confirm_volume_create_retry_interval`` = ``5`` - - (Integer) A retry value in seconds. Will be used by the driver to check if volume creation was successful in CloudByte storage. - * - ``cb_confirm_volume_delete_retries`` = ``3`` - - (Integer) Will confirm a successful volume deletion in CloudByte storage by making this many number of attempts. - * - ``cb_confirm_volume_delete_retry_interval`` = ``5`` - - (Integer) A retry value in seconds. Will be used by the driver to check if volume deletion was successful in CloudByte storage. - * - ``cb_create_volume`` = ``{'compression': 'off', 'deduplication': 'off', 'blocklength': '512B', 'sync': 'always', 'protocoltype': 'ISCSI', 'recordsize': '16k'}`` - - (Dict) These values will be used for CloudByte storage's createVolume API call. - * - ``cb_tsm_name`` = ``None`` - - (String) This corresponds to the name of Tenant Storage Machine (TSM) in CloudByte storage. A volume will be created in this TSM. - * - ``cb_update_file_system`` = ``compression, sync, noofcopies, readonly`` - - (List) These values will be used for CloudByte storage's updateFileSystem API call. - * - ``cb_update_qos_group`` = ``iops, latency, graceallowed`` - - (List) These values will be used for CloudByte storage's updateQosGroup API call. diff --git a/doc/source/configuration/tables/cinder-coho.rst b/doc/source/configuration/tables/cinder-coho.rst deleted file mode 100644 index d15da09d1..000000000 --- a/doc/source/configuration/tables/cinder-coho.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-coho: - -.. list-table:: Description of Coho volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``coho_rpc_port`` = ``2049`` - - (Integer) RPC port to connect to Coho Data MicroArray diff --git a/doc/source/configuration/tables/cinder-common.rst b/doc/source/configuration/tables/cinder-common.rst deleted file mode 100644 index 6f77c02f3..000000000 --- a/doc/source/configuration/tables/cinder-common.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-common: - -.. list-table:: Description of common configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``allow_availability_zone_fallback`` = ``False`` - - (Boolean) If the requested Cinder availability zone is unavailable, fall back to the value of default_availability_zone, then storage_availability_zone, instead of failing. - * - ``chap`` = ``disabled`` - - (String) CHAP authentication mode, effective only for iscsi (disabled|enabled) - * - ``chap_password`` = - - (String) Password for specified CHAP account name. - * - ``chap_username`` = - - (String) CHAP user name. - * - ``chiscsi_conf`` = ``/etc/chelsio-iscsi/chiscsi.conf`` - - (String) Chiscsi (CXT) global defaults configuration file - * - ``cinder_internal_tenant_project_id`` = ``None`` - - (String) ID of the project which will be used as the Cinder internal tenant. - * - ``cinder_internal_tenant_user_id`` = ``None`` - - (String) ID of the user to be used in volume operations as the Cinder internal tenant. - * - ``cluster`` = ``None`` - - (String) Name of this cluster. Used to group volume hosts that share the same backend configurations to work in HA Active-Active mode. Active-Active is not yet supported. - * - ``compute_api_class`` = ``cinder.compute.nova.API`` - - (String) The full class name of the compute API class to use - * - ``connection_type`` = ``iscsi`` - - (String) Connection type to the IBM Storage Array - * - ``consistencygroup_api_class`` = ``cinder.consistencygroup.api.API`` - - (String) The full class name of the consistencygroup API class - * - ``default_availability_zone`` = ``None`` - - (String) Default availability zone for new volumes. If not set, the storage_availability_zone option value is used as the default for new volumes. - * - ``default_group_type`` = ``None`` - - (String) Default group type to use - * - ``default_volume_type`` = ``None`` - - (String) Default volume type to use - * - ``driver_client_cert`` = ``None`` - - (String) The path to the client certificate for verification, if the driver supports it. - * - ``driver_client_cert_key`` = ``None`` - - (String) The path to the client certificate key for verification, if the driver supports it. - * - ``driver_data_namespace`` = ``None`` - - (String) Namespace for driver private data values to be saved in. - * - ``driver_ssl_cert_path`` = ``None`` - - (String) Can be used to specify a non default path to a CA_BUNDLE file or directory with certificates of trusted CAs, which will be used to validate the backend - * - ``driver_ssl_cert_verify`` = ``False`` - - (Boolean) If set to True the http client will validate the SSL certificate of the backend endpoint. - * - ``enable_force_upload`` = ``False`` - - (Boolean) Enables the Force option on upload_to_image. This enables running upload_volume on in-use volumes for backends that support it. - * - ``enable_new_services`` = ``True`` - - (Boolean) Services to be added to the available pool on create - * - ``enable_unsupported_driver`` = ``False`` - - (Boolean) Set this to True when you want to allow an unsupported driver to start. Drivers that haven't maintained a working CI system and testing are marked as unsupported until CI is working again. This also marks a driver as deprecated and may be removed in the next release. - * - ``end_time`` = ``None`` - - (String) If this option is specified then the end time specified is used instead of the end time of the last completed audit period. - * - ``enforce_multipath_for_image_xfer`` = ``False`` - - (Boolean) If this is set to True, attachment of volumes for image transfer will be aborted when multipathd is not running. Otherwise, it will fallback to single path. - * - ``executor_thread_pool_size`` = ``64`` - - (Integer) Size of executor thread pool. - * - ``fatal_exception_format_errors`` = ``False`` - - (Boolean) Make exception message format errors fatal. - * - ``group_api_class`` = ``cinder.group.api.API`` - - (String) The full class name of the group API class - * - ``host`` = ``localhost`` - - (String) Name of this node. This can be an opaque identifier. It is not necessarily a host name, FQDN, or IP address. - * - ``iet_conf`` = ``/etc/iet/ietd.conf`` - - (String) IET configuration file - * - ``iscsi_secondary_ip_addresses`` = - - (List) The list of secondary IP addresses of the iSCSI daemon - * - ``max_over_subscription_ratio`` = ``20.0`` - - (Floating point) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times of the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. The ratio has to be a minimum of 1.0. - * - ``monkey_patch`` = ``False`` - - (Boolean) Enable monkey patching - * - ``monkey_patch_modules`` = - - (List) List of modules/decorators to monkey patch - * - ``my_ip`` = ``10.0.0.1`` - - (String) IP address of this host - * - ``no_snapshot_gb_quota`` = ``False`` - - (Boolean) Whether snapshots count against gigabyte quota - * - ``num_shell_tries`` = ``3`` - - (Integer) Number of times to attempt to run flakey shell commands - * - ``os_privileged_user_auth_url`` = ``None`` - - (URI) Auth URL associated with the OpenStack privileged account. - * - ``os_privileged_user_name`` = ``None`` - - (String) OpenStack privileged account username. Used for requests to other services (such as Nova) that require an account with special rights. - * - ``os_privileged_user_password`` = ``None`` - - (String) Password associated with the OpenStack privileged account. - * - ``os_privileged_user_tenant`` = ``None`` - - (String) Tenant name associated with the OpenStack privileged account. - * - ``periodic_fuzzy_delay`` = ``60`` - - (Integer) Range, in seconds, to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0) - * - ``periodic_interval`` = ``60`` - - (Integer) Interval, in seconds, between running periodic tasks - * - ``replication_device`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent a replication target device. This option may be specified multiple times in a single config section to specify multiple replication target devices. Each entry takes the standard dict config form: replication_device = target_device_id:,key1:value1,key2:value2... - * - ``report_discard_supported`` = ``False`` - - (Boolean) Report to clients of Cinder that the backend supports discard (aka. trim/unmap). This will not actually change the behavior of the backend or the client directly, it will only notify that it can be used. - * - ``report_interval`` = ``10`` - - (Integer) Interval, in seconds, between nodes reporting state to datastore - * - ``reserved_percentage`` = ``0`` - - (Integer) The percentage of backend capacity is reserved - * - ``rootwrap_config`` = ``/etc/cinder/rootwrap.conf`` - - (String) Path to the rootwrap configuration file to use for running commands as root - * - ``send_actions`` = ``False`` - - (Boolean) Send the volume and snapshot create and delete notifications generated in the specified period. - * - ``service_down_time`` = ``60`` - - (Integer) Maximum time since last check-in for a service to be considered up - * - ``ssh_hosts_key_file`` = ``$state_path/ssh_known_hosts`` - - (String) File containing SSH host keys for the systems with which Cinder needs to communicate. OPTIONAL: Default=$state_path/ssh_known_hosts - * - ``start_time`` = ``None`` - - (String) If this option is specified then the start time specified is used instead of the start time of the last completed audit period. - * - ``state_path`` = ``/var/lib/cinder`` - - (String) Top-level directory for maintaining cinder's state - * - ``storage_availability_zone`` = ``nova`` - - (String) Availability zone of this node - * - ``storage_protocol`` = ``iscsi`` - - (String) Protocol for transferring data between host and storage back-end. - * - ``strict_ssh_host_key_policy`` = ``False`` - - (Boolean) Option to enable strict host key checking. When set to "True" Cinder will only connect to systems with a host key present in the configured "ssh_hosts_key_file". When set to "False" the host key will be saved upon first connection and used for subsequent connections. Default=False - * - ``suppress_requests_ssl_warnings`` = ``False`` - - (Boolean) Suppress requests library SSL certificate warnings. - * - ``tcp_keepalive`` = ``True`` - - (Boolean) Sets the value of TCP_KEEPALIVE (True/False) for each server socket. - * - ``tcp_keepalive_count`` = ``None`` - - (Integer) Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X. - * - ``tcp_keepalive_interval`` = ``None`` - - (Integer) Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not supported on OS X. - * - ``until_refresh`` = ``0`` - - (Integer) Count of reservations until usage is refreshed - * - ``use_chap_auth`` = ``False`` - - (Boolean) Option to enable/disable CHAP authentication for targets. - * - ``use_forwarded_for`` = ``False`` - - (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. - * - **[healthcheck]** - - - * - ``backends`` = - - (List) Additional backends that can perform health checks and report that information back as part of a request. - * - ``detailed`` = ``False`` - - (Boolean) Show more detailed information as part of the response - * - ``disable_by_file_path`` = ``None`` - - (String) Check the presence of a file to determine if an application is running on a port. Used by DisableByFileHealthcheck plugin. - * - ``disable_by_file_paths`` = - - (List) Check the presence of a file based on a port to determine if an application is running on a port. Expects a "port:path" list of strings. Used by DisableByFilesPortsHealthcheck plugin. - * - ``path`` = ``/healthcheck`` - - (String) DEPRECATED: The path to respond to healtcheck requests on. - * - **[key_manager]** - - - * - ``api_class`` = ``castellan.key_manager.barbican_key_manager.BarbicanKeyManager`` - - (String) The full class name of the key manager API class - * - ``fixed_key`` = ``None`` - - (String) Fixed key returned by key manager, specified in hex diff --git a/doc/source/configuration/tables/cinder-compute.rst b/doc/source/configuration/tables/cinder-compute.rst deleted file mode 100644 index 99e411fc1..000000000 --- a/doc/source/configuration/tables/cinder-compute.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-compute: - -.. list-table:: Description of Compute configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nova_api_insecure`` = ``False`` - - (Boolean) Allow to perform insecure SSL requests to nova - * - ``nova_ca_certificates_file`` = ``None`` - - (String) Location of ca certificates file to use for nova client requests. - * - ``nova_catalog_admin_info`` = ``compute:Compute Service:adminURL`` - - (String) Same as nova_catalog_info, but for admin endpoint. - * - ``nova_catalog_info`` = ``compute:Compute Service:publicURL`` - - (String) Match this value when searching for nova in the service catalog. Format is: separated values of the form: :: - * - ``nova_endpoint_admin_template`` = ``None`` - - (String) Same as nova_endpoint_template, but for admin endpoint. - * - ``nova_endpoint_template`` = ``None`` - - (String) Override service catalog lookup with template for nova endpoint e.g. http://localhost:8774/v2/%(project_id)s - * - ``os_region_name`` = ``None`` - - (String) Region name of this node diff --git a/doc/source/configuration/tables/cinder-coordination.rst b/doc/source/configuration/tables/cinder-coordination.rst deleted file mode 100644 index 0b5eb8d38..000000000 --- a/doc/source/configuration/tables/cinder-coordination.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-coordination: - -.. list-table:: Description of Coordination configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[coordination]** - - - * - ``backend_url`` = ``file://$state_path`` - - (String) The backend URL to use for distributed coordination. - * - ``heartbeat`` = ``1.0`` - - (Floating point) Number of seconds between heartbeats for distributed coordination. - * - ``initial_reconnect_backoff`` = ``0.1`` - - (Floating point) Initial number of seconds to wait after failed reconnection. - * - ``max_reconnect_backoff`` = ``60.0`` - - (Floating point) Maximum number of seconds between sequential reconnection retries. diff --git a/doc/source/configuration/tables/cinder-coprhd.rst b/doc/source/configuration/tables/cinder-coprhd.rst deleted file mode 100644 index d1f6ab9ca..000000000 --- a/doc/source/configuration/tables/cinder-coprhd.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-coprhd: - -.. list-table:: Description of Coprhd volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``coprhd_emulate_snapshot`` = ``False`` - - (Boolean) True | False to indicate if the storage array in CoprHD is VMAX or VPLEX - * - ``coprhd_hostname`` = ``None`` - - (String) Hostname for the CoprHD Instance - * - ``coprhd_password`` = ``None`` - - (String) Password for accessing the CoprHD Instance - * - ``coprhd_port`` = ``4443`` - - (Port number) Port for the CoprHD Instance - * - ``coprhd_project`` = ``None`` - - (String) Project to utilize within the CoprHD Instance - * - ``coprhd_scaleio_rest_gateway_host`` = ``None`` - - (String) Rest Gateway IP or FQDN for Scaleio - * - ``coprhd_scaleio_rest_gateway_port`` = ``4984`` - - (Port number) Rest Gateway Port for Scaleio - * - ``coprhd_scaleio_rest_server_password`` = ``None`` - - (String) Rest Gateway Password - * - ``coprhd_scaleio_rest_server_username`` = ``None`` - - (String) Username for Rest Gateway - * - ``coprhd_tenant`` = ``None`` - - (String) Tenant to utilize within the CoprHD Instance - * - ``coprhd_username`` = ``None`` - - (String) Username for accessing the CoprHD Instance - * - ``coprhd_varray`` = ``None`` - - (String) Virtual Array to utilize within the CoprHD Instance - * - ``scaleio_server_certificate_path`` = ``None`` - - (String) Server certificate path - * - ``scaleio_verify_server_certificate`` = ``False`` - - (Boolean) verify server certificate diff --git a/doc/source/configuration/tables/cinder-datera.rst b/doc/source/configuration/tables/cinder-datera.rst deleted file mode 100644 index 5e5f03439..000000000 --- a/doc/source/configuration/tables/cinder-datera.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-datera: - -.. list-table:: Description of Datera volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``datera_503_interval`` = ``5`` - - (Integer) Interval between 503 retries - * - ``datera_503_timeout`` = ``120`` - - (Integer) Timeout for HTTP 503 retry messages - * - ``datera_api_port`` = ``7717`` - - (String) Datera API port. - * - ``datera_api_version`` = ``2`` - - (String) DEPRECATED: Datera API version. - * - ``datera_debug`` = ``False`` - - (Boolean) True to set function arg and return logging - * - ``datera_debug_replica_count_override`` = ``False`` - - (Boolean) ONLY FOR DEBUG/TESTING PURPOSES True to set replica_count to 1 - * - ``datera_tenant_id`` = ``None`` - - (String) If set to 'Map' --> OpenStack project ID will be mapped implicitly to Datera tenant ID If set to 'None' --> Datera tenant ID will not be used during volume provisioning If set to anything else --> Datera tenant ID will be the provided value diff --git a/doc/source/configuration/tables/cinder-debug.rst b/doc/source/configuration/tables/cinder-debug.rst deleted file mode 100644 index 97ff1c9e1..000000000 --- a/doc/source/configuration/tables/cinder-debug.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-debug: - -.. list-table:: Description of logging configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``trace_flags`` = ``None`` - - (List) List of options that control which trace info is written to the DEBUG log level to assist developers. Valid values are method and api. diff --git a/doc/source/configuration/tables/cinder-dell_emc_unity.rst b/doc/source/configuration/tables/cinder-dell_emc_unity.rst deleted file mode 100644 index 14495561d..000000000 --- a/doc/source/configuration/tables/cinder-dell_emc_unity.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-dell_emc_unity: - -.. list-table:: Description of Dell EMC Unity volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``unity_io_ports`` = ``None`` - - (List) A comma-separated list of iSCSI or FC ports to be used. Each port can be Unix-style glob expressions. - * - ``unity_storage_pool_names`` = ``None`` - - (List) A comma-separated list of storage pool names to be used. diff --git a/doc/source/configuration/tables/cinder-dellsc.rst b/doc/source/configuration/tables/cinder-dellsc.rst deleted file mode 100644 index 9d9ca732b..000000000 --- a/doc/source/configuration/tables/cinder-dellsc.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-dellsc: - -.. list-table:: Description of Dell Storage Center volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``dell_sc_api_port`` = ``3033`` - - (Port number) Dell API port - * - ``dell_sc_server_folder`` = ``openstack`` - - (String) Name of the server folder to use on the Storage Center - * - ``dell_sc_ssn`` = ``64702`` - - (Integer) Storage Center System Serial Number - * - ``dell_sc_verify_cert`` = ``False`` - - (Boolean) Enable HTTPS SC certificate verification - * - ``dell_sc_volume_folder`` = ``openstack`` - - (String) Name of the volume folder to use on the Storage Center - * - ``dell_server_os`` = ``Red Hat Linux 6.x`` - - (String) Server OS type to use when creating a new server on the Storage Center. - * - ``excluded_domain_ip`` = ``None`` - - (Unknown) Domain IP to be excluded from iSCSI returns. - * - ``secondary_san_ip`` = - - (String) IP address of secondary DSM controller - * - ``secondary_san_login`` = ``Admin`` - - (String) Secondary DSM user name - * - ``secondary_san_password`` = - - (String) Secondary DSM user password name - * - ``secondary_sc_api_port`` = ``3033`` - - (Port number) Secondary Dell API port diff --git a/doc/source/configuration/tables/cinder-disco.rst b/doc/source/configuration/tables/cinder-disco.rst deleted file mode 100644 index c4c0bb231..000000000 --- a/doc/source/configuration/tables/cinder-disco.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-disco: - -.. list-table:: Description of Disco volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``choice_client`` = ``None`` - - (String) Use soap client or rest client for communicating with DISCO. Possible values are "soap" or "rest". - * - ``clone_check_timeout`` = ``3600`` - - (Integer) How long we check whether a clone is finished before we give up - * - ``clone_volume_timeout`` = ``680`` - - (Integer) Create clone volume timeout. - * - ``disco_client`` = ``127.0.0.1`` - - (IP) The IP of DMS client socket server - * - ``disco_client_port`` = ``9898`` - - (Port number) The port to connect DMS client socket server - * - ``disco_src_api_port`` = ``8080`` - - (Port number) The port of DISCO source API - * - ``disco_wsdl_path`` = ``/etc/cinder/DISCOService.wsdl`` - - (String) DEPRECATED: Path to the wsdl file to communicate with DISCO request manager - * - ``rest_ip`` = ``None`` - - (IP) The IP address of the REST server - * - ``restore_check_timeout`` = ``3600`` - - (Integer) How long we check whether a restore is finished before we give up - * - ``retry_interval`` = ``1`` - - (Integer) How long we wait before retrying to get an item detail diff --git a/doc/source/configuration/tables/cinder-dothill.rst b/doc/source/configuration/tables/cinder-dothill.rst deleted file mode 100644 index b796abc91..000000000 --- a/doc/source/configuration/tables/cinder-dothill.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-dothill: - -.. list-table:: Description of Dot Hill volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``dothill_api_protocol`` = ``https`` - - (String) DotHill API interface protocol. - * - ``dothill_backend_name`` = ``A`` - - (String) Pool or Vdisk name to use for volume creation. - * - ``dothill_backend_type`` = ``virtual`` - - (String) linear (for Vdisk) or virtual (for Pool). - * - ``dothill_iscsi_ips`` = - - (List) List of comma-separated target iSCSI IP addresses. - * - ``dothill_verify_certificate`` = ``False`` - - (Boolean) Whether to verify DotHill array SSL certificate. - * - ``dothill_verify_certificate_path`` = ``None`` - - (String) DotHill array SSL certificate path. diff --git a/doc/source/configuration/tables/cinder-drbd.rst b/doc/source/configuration/tables/cinder-drbd.rst deleted file mode 100644 index 221044da0..000000000 --- a/doc/source/configuration/tables/cinder-drbd.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-drbd: - -.. list-table:: Description of DRBD configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``drbdmanage_devs_on_controller`` = ``True`` - - (Boolean) If set, the c-vol node will receive a useable /dev/drbdX device, even if the actual data is stored on other nodes only. This is useful for debugging, maintenance, and to be able to do the iSCSI export from the c-vol node. - * - ``drbdmanage_disk_options`` = ``{"c-min-rate": "4M"}`` - - (String) Disk options to set on new resources. See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf for all the details. - * - ``drbdmanage_net_options`` = ``{"connect-int": "4", "allow-two-primaries": "yes", "ko-count": "30", "max-buffers": "20000", "ping-timeout": "100"}`` - - (String) Net options to set on new resources. See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf for all the details. - * - ``drbdmanage_redundancy`` = ``1`` - - (Integer) Number of nodes that should replicate the data. - * - ``drbdmanage_resize_plugin`` = ``drbdmanage.plugins.plugins.wait_for.WaitForVolumeSize`` - - (String) Volume resize completion wait plugin. - * - ``drbdmanage_resize_policy`` = ``{"timeout": "60"}`` - - (String) Volume resize completion wait policy. - * - ``drbdmanage_resource_options`` = ``{"auto-promote-timeout": "300"}`` - - (String) Resource options to set on new resources. See http://www.drbd.org/en/doc/users-guide-90/re-drbdconf for all the details. - * - ``drbdmanage_resource_plugin`` = ``drbdmanage.plugins.plugins.wait_for.WaitForResource`` - - (String) Resource deployment completion wait plugin. - * - ``drbdmanage_resource_policy`` = ``{"ratio": "0.51", "timeout": "60"}`` - - (String) Resource deployment completion wait policy. - * - ``drbdmanage_snapshot_plugin`` = ``drbdmanage.plugins.plugins.wait_for.WaitForSnapshot`` - - (String) Snapshot completion wait plugin. - * - ``drbdmanage_snapshot_policy`` = ``{"count": "1", "timeout": "60"}`` - - (String) Snapshot completion wait policy. diff --git a/doc/source/configuration/tables/cinder-emc.rst b/doc/source/configuration/tables/cinder-emc.rst deleted file mode 100644 index 3bbd385e7..000000000 --- a/doc/source/configuration/tables/cinder-emc.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-emc: - -.. list-table:: Description of EMC configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``check_max_pool_luns_threshold`` = ``False`` - - (Boolean) Report free_capacity_gb as 0 when the limit to maximum number of pool LUNs is reached. By default, the value is False. - * - ``cinder_emc_config_file`` = ``/etc/cinder/cinder_emc_config.xml`` - - (String) Use this file for cinder emc plugin config data - * - ``destroy_empty_storage_group`` = ``False`` - - (Boolean) To destroy storage group when the last LUN is removed from it. By default, the value is False. - * - ``force_delete_lun_in_storagegroup`` = ``False`` - - (Boolean) Delete a LUN even if it is in Storage Groups. By default, the value is False. - * - ``initiator_auto_deregistration`` = ``False`` - - (Boolean) Automatically deregister initiators after the related storage group is destroyed. By default, the value is False. - * - ``initiator_auto_registration`` = ``False`` - - (Boolean) Automatically register initiators. By default, the value is False. - * - ``io_port_list`` = ``None`` - - (List) Comma separated iSCSI or FC ports to be used in Nova or Cinder. - * - ``iscsi_initiators`` = ``None`` - - (String) Mapping between hostname and its iSCSI initiator IP addresses. - * - ``max_luns_per_storage_group`` = ``255`` - - (Integer) Default max number of LUNs in a storage group. By default, the value is 255. - * - ``multi_pool_support`` = ``False`` - - (String) Use this value to specify multi-pool support for VMAX3 - * - ``naviseccli_path`` = ``None`` - - (String) Naviseccli Path. - * - ``storage_vnx_authentication_type`` = ``global`` - - (String) VNX authentication scope type. By default, the value is global. - * - ``storage_vnx_pool_names`` = ``None`` - - (List) Comma-separated list of storage pool names to be used. - * - ``storage_vnx_security_file_dir`` = ``None`` - - (String) Directory path that contains the VNX security file. Make sure the security file is generated first. diff --git a/doc/source/configuration/tables/cinder-emc_sio.rst b/doc/source/configuration/tables/cinder-emc_sio.rst deleted file mode 100644 index b38554e63..000000000 --- a/doc/source/configuration/tables/cinder-emc_sio.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-emc_sio: - -.. list-table:: Description of EMC SIO volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``sio_max_over_subscription_ratio`` = ``10.0`` - - (Floating point) max_over_subscription_ratio setting for the ScaleIO driver. This replaces the general max_over_subscription_ratio which has no effect in this driver.Maximum value allowed for ScaleIO is 10.0. - * - ``sio_protection_domain_id`` = ``None`` - - (String) Protection Domain ID. - * - ``sio_protection_domain_name`` = ``None`` - - (String) Protection Domain name. - * - ``sio_rest_server_port`` = ``443`` - - (String) REST server port. - * - ``sio_round_volume_capacity`` = ``True`` - - (Boolean) Round up volume capacity. - * - ``sio_server_certificate_path`` = ``None`` - - (String) Server certificate path. - * - ``sio_storage_pool_id`` = ``None`` - - (String) Storage Pool ID. - * - ``sio_storage_pool_name`` = ``None`` - - (String) Storage Pool name. - * - ``sio_storage_pools`` = ``None`` - - (String) Storage Pools. - * - ``sio_unmap_volume_before_deletion`` = ``False`` - - (Boolean) Unmap volume before deletion. - * - ``sio_verify_server_certificate`` = ``False`` - - (Boolean) Verify server certificate. diff --git a/doc/source/configuration/tables/cinder-emc_xtremio.rst b/doc/source/configuration/tables/cinder-emc_xtremio.rst deleted file mode 100644 index 651a97160..000000000 --- a/doc/source/configuration/tables/cinder-emc_xtremio.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-emc_xtremio: - -.. list-table:: Description of EMC XtremIO volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``xtremio_array_busy_retry_count`` = ``5`` - - (Integer) Number of retries in case array is busy - * - ``xtremio_array_busy_retry_interval`` = ``5`` - - (Integer) Interval between retries in case array is busy - * - ``xtremio_cluster_name`` = - - (String) XMS cluster id in multi-cluster environment - * - ``xtremio_volumes_per_glance_cache`` = ``100`` - - (Integer) Number of volumes created from each cached glance image diff --git a/doc/source/configuration/tables/cinder-eqlx.rst b/doc/source/configuration/tables/cinder-eqlx.rst deleted file mode 100644 index 21d71fa4d..000000000 --- a/doc/source/configuration/tables/cinder-eqlx.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-eqlx: - -.. list-table:: Description of Dell EqualLogic volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``eqlx_cli_max_retries`` = ``5`` - - (Integer) Maximum retry count for reconnection. Default is 5. - * - ``eqlx_group_name`` = ``group-0`` - - (String) Group name to use for creating volumes. Defaults to "group-0". - * - ``eqlx_pool`` = ``default`` - - (String) Pool in which volumes will be created. Defaults to "default". diff --git a/doc/source/configuration/tables/cinder-eternus.rst b/doc/source/configuration/tables/cinder-eternus.rst deleted file mode 100644 index 6c3faeb37..000000000 --- a/doc/source/configuration/tables/cinder-eternus.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-eternus: - -.. list-table:: Description of Eternus volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``cinder_eternus_config_file`` = ``/etc/cinder/cinder_fujitsu_eternus_dx.xml`` - - (String) config file for cinder eternus_dx volume driver diff --git a/doc/source/configuration/tables/cinder-falconstor.rst b/doc/source/configuration/tables/cinder-falconstor.rst deleted file mode 100644 index d131ef897..000000000 --- a/doc/source/configuration/tables/cinder-falconstor.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-falconstor: - -.. list-table:: Description of Falconstor volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``additional_retry_list`` = - - (String) FSS additional retry list, separate by ; - * - ``fss_debug`` = ``False`` - - (Boolean) Enable HTTP debugging to FSS - * - ``fss_pools`` = ``{}`` - - (Dict) FSS pool ID list in which FalconStor volumes are stored. If you have only one pool, use ``A:``. You can also have up to two storage pools, P for primary and O for all supporting devices. The usage is ``P:,O:`` - * - ``fss_san_secondary_ip`` = - - (String) Specifies FSS secondary management IP to be used if san_ip is invalid or becomes inaccessible. - * - ``san_thin_provision`` = - - (Boolean) Enable FSS thin provision. diff --git a/doc/source/configuration/tables/cinder-flashsystem.rst b/doc/source/configuration/tables/cinder-flashsystem.rst deleted file mode 100644 index ad6c141e4..000000000 --- a/doc/source/configuration/tables/cinder-flashsystem.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-flashsystem: - -.. list-table:: Description of IBM FlashSystem volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``flashsystem_connection_protocol`` = ``FC`` - - (String) Connection protocol should be FC. (Default is FC.) - * - ``flashsystem_iscsi_portid`` = ``0`` - - (Integer) Default iSCSI Port ID of FlashSystem. (Default port is 0.) - * - ``flashsystem_multihostmap_enabled`` = ``True`` - - (Boolean) Allows vdisk to multi host mapping. (Default is True) - * - ``flashsystem_multipath_enabled`` = ``False`` - - (Boolean) DEPRECATED: This option no longer has any affect. It is deprecated and will be removed in the next release. diff --git a/doc/source/configuration/tables/cinder-fusionio.rst b/doc/source/configuration/tables/cinder-fusionio.rst deleted file mode 100644 index e1e7229e1..000000000 --- a/doc/source/configuration/tables/cinder-fusionio.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-fusionio: - -.. list-table:: Description of Fusion-io driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``dsware_isthin`` = ``False`` - - (Boolean) The flag of thin storage allocation. - * - ``dsware_manager`` = - - (String) Fusionstorage manager ip addr for cinder-volume. - * - ``fusionstorageagent`` = - - (String) Fusionstorage agent ip addr range. - * - ``pool_id_filter`` = - - (List) Pool id permit to use. - * - ``pool_type`` = ``default`` - - (String) Pool type, like sata-2copy. diff --git a/doc/source/configuration/tables/cinder-hgst.rst b/doc/source/configuration/tables/cinder-hgst.rst deleted file mode 100644 index 49ff3288b..000000000 --- a/doc/source/configuration/tables/cinder-hgst.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hgst: - -.. list-table:: Description of HGST volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hgst_net`` = ``Net 1 (IPv4)`` - - (String) Space network name to use for data transfer - * - ``hgst_redundancy`` = ``0`` - - (String) Should spaces be redundantly stored (1/0) - * - ``hgst_space_group`` = ``disk`` - - (String) Group to own created spaces - * - ``hgst_space_mode`` = ``0600`` - - (String) UNIX mode for created spaces - * - ``hgst_space_user`` = ``root`` - - (String) User to own created spaces - * - ``hgst_storage_servers`` = ``os:gbd0`` - - (String) Comma separated list of Space storage servers:devices. ex: os1_stor:gbd0,os2_stor:gbd0 diff --git a/doc/source/configuration/tables/cinder-hitachi-hbsd.rst b/doc/source/configuration/tables/cinder-hitachi-hbsd.rst deleted file mode 100644 index 544f51843..000000000 --- a/doc/source/configuration/tables/cinder-hitachi-hbsd.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-hbsd: - -.. list-table:: Description of Hitachi storage volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hitachi_add_chap_user`` = ``False`` - - (Boolean) Add CHAP user - * - ``hitachi_async_copy_check_interval`` = ``10`` - - (Integer) Interval to check copy asynchronously - * - ``hitachi_auth_method`` = ``None`` - - (String) iSCSI authentication method - * - ``hitachi_auth_password`` = ``HBSD-CHAP-password`` - - (String) iSCSI authentication password - * - ``hitachi_auth_user`` = ``HBSD-CHAP-user`` - - (String) iSCSI authentication username - * - ``hitachi_copy_check_interval`` = ``3`` - - (Integer) Interval to check copy - * - ``hitachi_copy_speed`` = ``3`` - - (Integer) Copy speed of storage system - * - ``hitachi_default_copy_method`` = ``FULL`` - - (String) Default copy method of storage system - * - ``hitachi_group_range`` = ``None`` - - (String) Range of group number - * - ``hitachi_group_request`` = ``False`` - - (Boolean) Request for creating HostGroup or iSCSI Target - * - ``hitachi_horcm_add_conf`` = ``True`` - - (Boolean) Add to HORCM configuration - * - ``hitachi_horcm_numbers`` = ``200,201`` - - (String) Instance numbers for HORCM - * - ``hitachi_horcm_password`` = ``None`` - - (String) Password of storage system for HORCM - * - ``hitachi_horcm_resource_lock_timeout`` = ``600`` - - (Integer) Timeout until a resource lock is released, in seconds. The value must be between 0 and 7200. - * - ``hitachi_horcm_user`` = ``None`` - - (String) Username of storage system for HORCM - * - ``hitachi_ldev_range`` = ``None`` - - (String) Range of logical device of storage system - * - ``hitachi_pool_id`` = ``None`` - - (Integer) Pool ID of storage system - * - ``hitachi_serial_number`` = ``None`` - - (String) Serial number of storage system - * - ``hitachi_target_ports`` = ``None`` - - (String) Control port names for HostGroup or iSCSI Target - * - ``hitachi_thin_pool_id`` = ``None`` - - (Integer) Thin pool ID of storage system - * - ``hitachi_unit_name`` = ``None`` - - (String) Name of an array unit - * - ``hitachi_zoning_request`` = ``False`` - - (Boolean) Request for FC Zone creating HostGroup diff --git a/doc/source/configuration/tables/cinder-hitachi-hnas.rst b/doc/source/configuration/tables/cinder-hitachi-hnas.rst deleted file mode 100644 index 30d173e40..000000000 --- a/doc/source/configuration/tables/cinder-hitachi-hnas.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-hnas: - -.. list-table:: Description of Hitachi HNAS iSCSI and NFS driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hds_hnas_iscsi_config_file`` = ``/opt/hds/hnas/cinder_iscsi_conf.xml`` - - (String) DEPRECATED: Legacy configuration file for HNAS iSCSI Cinder plugin. This is not needed if you fill all configuration on cinder.conf - * - ``hds_hnas_nfs_config_file`` = ``/opt/hds/hnas/cinder_nfs_conf.xml`` - - (String) DEPRECATED: Legacy configuration file for HNAS NFS Cinder plugin. This is not needed if you fill all configuration on cinder.conf - * - ``hnas_chap_enabled`` = ``True`` - - (Boolean) Whether the chap authentication is enabled in the iSCSI target or not. - * - ``hnas_cluster_admin_ip0`` = ``None`` - - (String) The IP of the HNAS cluster admin. Required only for HNAS multi-cluster setups. - * - ``hnas_mgmt_ip0`` = ``None`` - - (IP) Management IP address of HNAS. This can be any IP in the admin address on HNAS or the SMU IP. - * - ``hnas_password`` = ``None`` - - (String) HNAS password. - * - ``hnas_ssc_cmd`` = ``ssc`` - - (String) Command to communicate to HNAS. - * - ``hnas_ssh_port`` = ``22`` - - (Port number) Port to be used for SSH authentication. - * - ``hnas_ssh_private_key`` = ``None`` - - (String) Path to the SSH private key used to authenticate in HNAS SMU. - * - ``hnas_svc0_hdp`` = ``None`` - - (String) Service 0 HDP - * - ``hnas_svc0_iscsi_ip`` = ``None`` - - (IP) Service 0 iSCSI IP - * - ``hnas_svc0_pool_name`` = ``None`` - - (String) Service 0 pool name - * - ``hnas_svc1_hdp`` = ``None`` - - (String) Service 1 HDP - * - ``hnas_svc1_iscsi_ip`` = ``None`` - - (IP) Service 1 iSCSI IP - * - ``hnas_svc1_pool_name`` = ``None`` - - (String) Service 1 pool name - * - ``hnas_svc2_hdp`` = ``None`` - - (String) Service 2 HDP - * - ``hnas_svc2_iscsi_ip`` = ``None`` - - (IP) Service 2 iSCSI IP - * - ``hnas_svc2_pool_name`` = ``None`` - - (String) Service 2 pool name - * - ``hnas_svc3_hdp`` = ``None`` - - (String) Service 3 HDP - * - ``hnas_svc3_iscsi_ip`` = ``None`` - - (IP) Service 3 iSCSI IP - * - ``hnas_svc3_pool_name`` = ``None`` - - (String) Service 3 pool name: - * - ``hnas_username`` = ``None`` - - (String) HNAS username. diff --git a/doc/source/configuration/tables/cinder-hitachi-vsp.rst b/doc/source/configuration/tables/cinder-hitachi-vsp.rst deleted file mode 100644 index 648a79129..000000000 --- a/doc/source/configuration/tables/cinder-hitachi-vsp.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hitachi-vsp: - -.. list-table:: Description of HORCM interface module for Hitachi VSP driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``vsp_async_copy_check_interval`` = ``10`` - - (Integer) Interval in seconds at which volume pair synchronization status is checked when volume pairs are deleted. - * - ``vsp_auth_password`` = ``None`` - - (String) Password corresponding to vsp_auth_user. - * - ``vsp_auth_user`` = ``None`` - - (String) Name of the user used for CHAP authentication performed in communication between hosts and iSCSI targets on the storage ports. - * - ``vsp_compute_target_ports`` = ``None`` - - (List) IDs of the storage ports used to attach volumes to compute nodes. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_copy_check_interval`` = ``3`` - - (Integer) Interval in seconds at which volume pair synchronization status is checked when volume pairs are created. - * - ``vsp_copy_speed`` = ``3`` - - (Integer) Speed at which data is copied by Shadow Image. 1 or 2 indicates low speed, 3 indicates middle speed, and a value between 4 and 15 indicates high speed. - * - ``vsp_default_copy_method`` = ``FULL`` - - (String) Method of volume copy. FULL indicates full data copy by Shadow Image and THIN indicates differential data copy by Thin Image. - * - ``vsp_group_request`` = ``False`` - - (Boolean) If True, the driver will create host groups or iSCSI targets on storage ports as needed. - * - ``vsp_horcm_add_conf`` = ``True`` - - (Boolean) If True, the driver will create or update the Command Control Interface configuration file as needed. - * - ``vsp_horcm_numbers`` = ``200, 201`` - - (List) Command Control Interface instance numbers in the format of 'xxx,yyy'. The second one is for Shadow Image operation and the first one is for other purposes. - * - ``vsp_horcm_pair_target_ports`` = ``None`` - - (List) IDs of the storage ports used to copy volumes by Shadow Image or Thin Image. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_horcm_password`` = ``None`` - - (String) Password corresponding to vsp_horcm_user. - * - ``vsp_horcm_user`` = ``None`` - - (String) Name of the user on the storage system. - * - ``vsp_ldev_range`` = ``None`` - - (String) Range of the LDEV numbers in the format of 'xxxx-yyyy' that can be used by the driver. Values can be in decimal format (e.g. 1000) or in colon-separated hexadecimal format (e.g. 00:03:E8). - * - ``vsp_pool`` = ``None`` - - (String) Pool number or pool name of the DP pool. - * - ``vsp_storage_id`` = ``None`` - - (String) Product number of the storage system. - * - ``vsp_target_ports`` = ``None`` - - (List) IDs of the storage ports used to attach volumes to the controller node. To specify multiple ports, connect them by commas (e.g. CL1-A,CL2-A). - * - ``vsp_thin_pool`` = ``None`` - - (String) Pool number or pool name of the Thin Image pool. - * - ``vsp_use_chap_auth`` = ``False`` - - (Boolean) If True, CHAP authentication will be applied to communication between hosts and any of the iSCSI targets on the storage ports. - * - ``vsp_zoning_request`` = ``False`` - - (Boolean) If True, the driver will configure FC zoning between the server and the storage system provided that FC zoning manager is enabled. diff --git a/doc/source/configuration/tables/cinder-hpe3par.rst b/doc/source/configuration/tables/cinder-hpe3par.rst deleted file mode 100644 index 418663ccd..000000000 --- a/doc/source/configuration/tables/cinder-hpe3par.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hpe3par: - -.. list-table:: Description of HPE 3PAR Fibre Channel and iSCSI drivers configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hpe3par_api_url`` = - - (String) 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 - * - ``hpe3par_cpg`` = ``OpenStack`` - - (List) List of the CPG(s) to use for volume creation - * - ``hpe3par_cpg_snap`` = - - (String) The CPG to use for Snapshots for volumes. If empty the userCPG will be used. - * - ``hpe3par_debug`` = ``False`` - - (Boolean) Enable HTTP debugging to 3PAR - * - ``hpe3par_iscsi_chap_enabled`` = ``False`` - - (Boolean) Enable CHAP authentication for iSCSI connections. - * - ``hpe3par_iscsi_ips`` = - - (List) List of target iSCSI addresses to use. - * - ``hpe3par_password`` = - - (String) 3PAR password for the user specified in hpe3par_username - * - ``hpe3par_snapshot_expiration`` = - - (String) The time in hours when a snapshot expires and is deleted. This must be larger than expiration - * - ``hpe3par_snapshot_retention`` = - - (String) The time in hours to retain a snapshot. You can't delete it before this expires. - * - ``hpe3par_username`` = - - (String) 3PAR username with the 'edit' role diff --git a/doc/source/configuration/tables/cinder-hpelefthand.rst b/doc/source/configuration/tables/cinder-hpelefthand.rst deleted file mode 100644 index 07e87e6bf..000000000 --- a/doc/source/configuration/tables/cinder-hpelefthand.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hpelefthand: - -.. list-table:: Description of HPE LeftHand/StoreVirtual driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hpelefthand_api_url`` = ``None`` - - (URI) HPE LeftHand WSAPI Server Url like https://:8081/lhos - * - ``hpelefthand_clustername`` = ``None`` - - (String) HPE LeftHand cluster name - * - ``hpelefthand_debug`` = ``False`` - - (Boolean) Enable HTTP debugging to LeftHand - * - ``hpelefthand_iscsi_chap_enabled`` = ``False`` - - (Boolean) Configure CHAP authentication for iSCSI connections (Default: Disabled) - * - ``hpelefthand_password`` = ``None`` - - (String) HPE LeftHand Super user password - * - ``hpelefthand_ssh_port`` = ``16022`` - - (Port number) Port number of SSH service. - * - ``hpelefthand_username`` = ``None`` - - (String) HPE LeftHand Super user username diff --git a/doc/source/configuration/tables/cinder-hpexp.rst b/doc/source/configuration/tables/cinder-hpexp.rst deleted file mode 100644 index 319105eb1..000000000 --- a/doc/source/configuration/tables/cinder-hpexp.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hpexp: - -.. list-table:: Description of HPE XP volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hpexp_async_copy_check_interval`` = ``10`` - - (Integer) Interval to check copy asynchronously - * - ``hpexp_compute_target_ports`` = ``None`` - - (List) Target port names of compute node for host group or iSCSI target - * - ``hpexp_copy_check_interval`` = ``3`` - - (Integer) Interval to check copy - * - ``hpexp_copy_speed`` = ``3`` - - (Integer) Copy speed of storage system - * - ``hpexp_default_copy_method`` = ``FULL`` - - (String) Default copy method of storage system. There are two valid values: "FULL" specifies that a full copy; "THIN" specifies that a thin copy. Default value is "FULL" - * - ``hpexp_group_request`` = ``False`` - - (Boolean) Request for creating host group or iSCSI target - * - ``hpexp_horcm_add_conf`` = ``True`` - - (Boolean) Add to HORCM configuration - * - ``hpexp_horcm_name_only_discovery`` = ``False`` - - (Boolean) Only discover a specific name of host group or iSCSI target - * - ``hpexp_horcm_numbers`` = ``200, 201`` - - (List) Instance numbers for HORCM - * - ``hpexp_horcm_resource_name`` = ``meta_resource`` - - (String) Resource group name of storage system for HORCM - * - ``hpexp_horcm_user`` = ``None`` - - (String) Username of storage system for HORCM - * - ``hpexp_ldev_range`` = ``None`` - - (String) Logical device range of storage system - * - ``hpexp_pool`` = ``None`` - - (String) Pool of storage system - * - ``hpexp_storage_cli`` = ``None`` - - (String) Type of storage command line interface - * - ``hpexp_storage_id`` = ``None`` - - (String) ID of storage system - * - ``hpexp_target_ports`` = ``None`` - - (List) Target port names for host group or iSCSI target - * - ``hpexp_thin_pool`` = ``None`` - - (String) Thin pool of storage system - * - ``hpexp_zoning_request`` = ``False`` - - (Boolean) Request for FC Zone creating host group diff --git a/doc/source/configuration/tables/cinder-hpmsa.rst b/doc/source/configuration/tables/cinder-hpmsa.rst deleted file mode 100644 index 08362f8dc..000000000 --- a/doc/source/configuration/tables/cinder-hpmsa.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hpmsa: - -.. list-table:: Description of HP MSA volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``hpmsa_api_protocol`` = ``https`` - - (String) HPMSA API interface protocol. - * - ``hpmsa_backend_name`` = ``A`` - - (String) Pool or Vdisk name to use for volume creation. - * - ``hpmsa_backend_type`` = ``virtual`` - - (String) linear (for Vdisk) or virtual (for Pool). - * - ``hpmsa_iscsi_ips`` = - - (List) List of comma-separated target iSCSI IP addresses. - * - ``hpmsa_verify_certificate`` = ``False`` - - (Boolean) Whether to verify HPMSA array SSL certificate. - * - ``hpmsa_verify_certificate_path`` = ``None`` - - (String) HPMSA array SSL certificate path. diff --git a/doc/source/configuration/tables/cinder-huawei.rst b/doc/source/configuration/tables/cinder-huawei.rst deleted file mode 100644 index 41c37311b..000000000 --- a/doc/source/configuration/tables/cinder-huawei.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-huawei: - -.. list-table:: Description of Huawei storage driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``cinder_huawei_conf_file`` = ``/etc/cinder/cinder_huawei_conf.xml`` - - (String) The configuration file for the Cinder Huawei driver. - * - ``hypermetro_devices`` = ``None`` - - (String) The remote device hypermetro will use. - * - ``metro_domain_name`` = ``None`` - - (String) The remote metro device domain name. - * - ``metro_san_address`` = ``None`` - - (String) The remote metro device request url. - * - ``metro_san_password`` = ``None`` - - (String) The remote metro device san password. - * - ``metro_san_user`` = ``None`` - - (String) The remote metro device san user. - * - ``metro_storage_pools`` = ``None`` - - (String) The remote metro device pool names. diff --git a/doc/source/configuration/tables/cinder-hyperv.rst b/doc/source/configuration/tables/cinder-hyperv.rst deleted file mode 100644 index 62bd65c3c..000000000 --- a/doc/source/configuration/tables/cinder-hyperv.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-hyperv: - -.. list-table:: Description of HyperV volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[hyperv]** - - - * - ``force_volumeutils_v1`` = ``False`` - - (Boolean) DEPRECATED: Force V1 volume utility class diff --git a/doc/source/configuration/tables/cinder-ibm_gpfs.rst b/doc/source/configuration/tables/cinder-ibm_gpfs.rst deleted file mode 100644 index 4bc9b581a..000000000 --- a/doc/source/configuration/tables/cinder-ibm_gpfs.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-ibm_gpfs: - -.. list-table:: Description of Spectrum Scale volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``gpfs_images_dir`` = ``None`` - - - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. - - * - ``gpfs_images_share_mode`` = ``None`` - - - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. - - * - ``gpfs_max_clone_depth`` = ``0`` - - - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. - - * - ``gpfs_mount_point_base`` = ``None`` - - - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. - - * - ``gpfs_sparse_volumes`` = ``True`` - - - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. - - * - ``gpfs_storage_pool`` = ``system`` - - - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. diff --git a/doc/source/configuration/tables/cinder-ibm_gpfs_nfs.rst b/doc/source/configuration/tables/cinder-ibm_gpfs_nfs.rst deleted file mode 100644 index 8a9361145..000000000 --- a/doc/source/configuration/tables/cinder-ibm_gpfs_nfs.rst +++ /dev/null @@ -1,73 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-ibm_gpfs_nfs: - -.. list-table:: Description of Spectrum Scale NFS volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``gpfs_images_dir`` = ``None`` - - - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. - - * - ``gpfs_images_share_mode`` = ``None`` - - - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. - - * - ``gpfs_max_clone_depth`` = ``0`` - - - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. - - * - ``gpfs_mount_point_base`` = ``None`` - - - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. - - * - ``gpfs_sparse_volumes`` = ``True`` - - - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. - - * - ``gpfs_storage_pool`` = ``system`` - - - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. - - * - ``nas_host`` = - - - (String) IP address or Hostname of NAS system. - - * - ``nas_login`` = ``admin`` - - - (String) User name to connect to NAS system. - - * - ``nas_password`` = - - - (String) Password to connect to NAS system. - - * - ``nas_private_key`` = - - - (String) Filename of private key to use for SSH authentication. - - * - ``nas_ssh_port`` = ``22`` - - - (Port number) SSH port to use to connect to NAS system. - - * - ``nfs_mount_point_base`` = ``$state_path/mnt`` - - - (String) Base dir containing mount points for NFS shares. - - * - ``nfs_shares_config`` = ``/etc/cinder/nfs_shares`` - - - (String) File with the list of available NFS shares. diff --git a/doc/source/configuration/tables/cinder-ibm_gpfs_remote.rst b/doc/source/configuration/tables/cinder-ibm_gpfs_remote.rst deleted file mode 100644 index fa49f2907..000000000 --- a/doc/source/configuration/tables/cinder-ibm_gpfs_remote.rst +++ /dev/null @@ -1,73 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-ibm_gpfs_remote: - -.. list-table:: Description of Spectrum Scale Remote volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - - * - **[DEFAULT]** - - - - * - ``gpfs_hosts`` = - - - (List) Comma-separated list of IP address or hostnames of GPFS nodes. - - * - ``gpfs_hosts_key_file`` = ``$state_path/ssh_known_hosts`` - - - (String) File containing SSH host keys for the gpfs nodes with which driver needs to communicate. Default=$state_path/ssh_known_hosts - - * - ``gpfs_images_dir`` = ``None`` - - - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. - - * - ``gpfs_images_share_mode`` = ``None`` - - - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. - - * - ``gpfs_max_clone_depth`` = ``0`` - - - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. - - * - ``gpfs_mount_point_base`` = ``None`` - - - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. - - * - ``gpfs_private_key`` = - - - (String) Filename of private key to use for SSH authentication. - - * - ``gpfs_sparse_volumes`` = ``True`` - - - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. - - * - ``gpfs_ssh_port`` = ``22`` - - - (Port number) SSH port to use. - - * - ``gpfs_storage_pool`` = ``system`` - - - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. - - * - ``gpfs_strict_host_key_policy`` = ``False`` - - - (Boolean) Option to enable strict gpfs host key checking while connecting to gpfs nodes. Default=False - - * - ``gpfs_user_login`` = ``root`` - - - (String) Username for GPFS nodes. - - * - ``gpfs_user_password`` = - - - (String) Password for GPFS node user. diff --git a/doc/source/configuration/tables/cinder-ibm_storage.rst b/doc/source/configuration/tables/cinder-ibm_storage.rst deleted file mode 100644 index e1f80af09..000000000 --- a/doc/source/configuration/tables/cinder-ibm_storage.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-ibm_storage: - -.. list-table:: Description of IBM Storage driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``ds8k_devadd_unitadd_mapping`` = - - (String) Mapping between IODevice address and unit address. - * - ``ds8k_host_type`` = ``auto`` - - (String) Set to zLinux if your OpenStack version is prior to Liberty and you're connecting to zLinux systems. Otherwise set to auto. Valid values for this parameter are: 'auto', 'AMDLinuxRHEL', 'AMDLinuxSuse', 'AppleOSX', 'Fujitsu', 'Hp', 'HpTru64', 'HpVms', 'LinuxDT', 'LinuxRF', 'LinuxRHEL', 'LinuxSuse', 'Novell', 'SGI', 'SVC', 'SanFsAIX', 'SanFsLinux', 'Sun', 'VMWare', 'Win2000', 'Win2003', 'Win2008', 'Win2012', 'iLinux', 'nSeries', 'pLinux', 'pSeries', 'pSeriesPowerswap', 'zLinux', 'iSeries'. - * - ``ds8k_ssid_prefix`` = ``FF`` - - (String) Set the first two digits of SSID - * - ``proxy`` = ``cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy`` - - (String) Proxy driver that connects to the IBM Storage Array - * - ``san_clustername`` = - - (String) Cluster name to use for creating volumes - * - ``san_ip`` = - - (String) IP address of SAN controller - * - ``san_login`` = ``admin`` - - (String) Username for SAN controller - * - ``san_password`` = - - (String) Password for SAN controller diff --git a/doc/source/configuration/tables/cinder-images.rst b/doc/source/configuration/tables/cinder-images.rst deleted file mode 100644 index 62bc2d32a..000000000 --- a/doc/source/configuration/tables/cinder-images.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-images: - -.. list-table:: Description of images configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``allowed_direct_url_schemes`` = - - (List) A list of url schemes that can be downloaded directly via the direct_url. Currently supported schemes: [file, cinder]. - * - ``glance_api_insecure`` = ``False`` - - (Boolean) Allow to perform insecure SSL (https) requests to glance (https will be used but cert validation will not be performed). - * - ``glance_api_servers`` = ``None`` - - (List) A list of the URLs of glance API servers available to cinder ([http[s]://][hostname|ip]:port). If protocol is not specified it defaults to http. - * - ``glance_api_ssl_compression`` = ``False`` - - (Boolean) Enables or disables negotiation of SSL layer compression. In some cases disabling compression can improve data throughput, such as when high network bandwidth is available and you use compressed image formats like qcow2. - * - ``glance_api_version`` = ``1`` - - (Integer) Version of the glance API to use - * - ``glance_ca_certificates_file`` = ``None`` - - (String) Location of ca certificates file to use for glance client requests. - * - ``glance_catalog_info`` = ``image:glance:publicURL`` - - (String) Info to match when looking for glance in the service catalog. Format is: separated values of the form: :: - Only used if glance_api_servers are not provided. - * - ``glance_core_properties`` = ``checksum, container_format, disk_format, image_name, image_id, min_disk, min_ram, name, size`` - - (List) Default core properties of image - * - ``glance_num_retries`` = ``0`` - - (Integer) Number retries when downloading an image from glance - * - ``glance_request_timeout`` = ``None`` - - (Integer) http/https timeout value for glance operations. If no value (None) is supplied here, the glanceclient default value is used. - * - ``image_conversion_dir`` = ``$state_path/conversion`` - - (String) Directory used for temporary storage during image conversion - * - ``image_upload_use_cinder_backend`` = ``False`` - - (Boolean) If set to True, upload-to-image in raw format will create a cloned volume and register its location to the image service, instead of uploading the volume content. The cinder backend and locations support must be enabled in the image service, and glance_api_version must be set to 2. - * - ``image_upload_use_internal_tenant`` = ``False`` - - (Boolean) If set to True, the image volume created by upload-to-image will be placed in the internal tenant. Otherwise, the image volume is created in the current context's tenant. - * - ``image_volume_cache_enabled`` = ``False`` - - (Boolean) Enable the image volume cache for this backend. - * - ``image_volume_cache_max_count`` = ``0`` - - (Integer) Max number of entries allowed in the image volume cache. 0 => unlimited. - * - ``image_volume_cache_max_size_gb`` = ``0`` - - (Integer) Max size of the image volume cache for this backend in GB. 0 => unlimited. - * - ``use_multipath_for_image_xfer`` = ``False`` - - (Boolean) Do we attach/detach volumes in cinder using multipath for volume to image and image to volume transfers? diff --git a/doc/source/configuration/tables/cinder-infinidat.rst b/doc/source/configuration/tables/cinder-infinidat.rst deleted file mode 100644 index c5529b281..000000000 --- a/doc/source/configuration/tables/cinder-infinidat.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-infinidat: - -.. list-table:: Description of INFINIDAT InfiniBox volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``infinidat_pool_name`` = ``None`` - - (String) Name of the pool from which volumes are allocated diff --git a/doc/source/configuration/tables/cinder-infortrend.rst b/doc/source/configuration/tables/cinder-infortrend.rst deleted file mode 100644 index 7319c573a..000000000 --- a/doc/source/configuration/tables/cinder-infortrend.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-infortrend: - -.. list-table:: Description of Infortrend volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``infortrend_cli_max_retries`` = ``5`` - - (Integer) Maximum retry time for cli. Default is 5. - * - ``infortrend_cli_path`` = ``/opt/bin/Infortrend/raidcmd_ESDS10.jar`` - - (String) The Infortrend CLI absolute path. By default, it is at /opt/bin/Infortrend/raidcmd_ESDS10.jar - * - ``infortrend_cli_timeout`` = ``30`` - - (Integer) Default timeout for CLI copy operations in minutes. Support: migrate volume, create cloned volume and create volume from snapshot. By Default, it is 30 minutes. - * - ``infortrend_pools_name`` = - - (String) Infortrend raid pool name list. It is separated with comma. - * - ``infortrend_provisioning`` = ``full`` - - (String) Let the volume use specific provisioning. By default, it is the full provisioning. The supported options are full or thin. - * - ``infortrend_slots_a_channels_id`` = ``0,1,2,3,4,5,6,7`` - - (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. By default, it is the channel 0~7. - * - ``infortrend_slots_b_channels_id`` = ``0,1,2,3,4,5,6,7`` - - (String) Infortrend raid channel ID list on Slot B for OpenStack usage. It is separated with comma. By default, it is the channel 0~7. - * - ``infortrend_tiering`` = ``0`` - - (String) Let the volume use specific tiering level. By default, it is the level 0. The supported levels are 0,2,3,4. diff --git a/doc/source/configuration/tables/cinder-kaminario.rst b/doc/source/configuration/tables/cinder-kaminario.rst deleted file mode 100644 index 321584152..000000000 --- a/doc/source/configuration/tables/cinder-kaminario.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-kaminario: - -.. list-table:: Description of Kaminario volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``auto_calc_max_oversubscription_ratio`` = ``False`` - - (Boolean) K2 driver will calculate max_oversubscription_ratio on setting this option as True. diff --git a/doc/source/configuration/tables/cinder-lenovo.rst b/doc/source/configuration/tables/cinder-lenovo.rst deleted file mode 100644 index f67b0a154..000000000 --- a/doc/source/configuration/tables/cinder-lenovo.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-lenovo: - -.. list-table:: Description of Lenovo volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``lenovo_api_protocol`` = ``https`` - - (String) Lenovo api interface protocol. - * - ``lenovo_backend_name`` = ``A`` - - (String) Pool or Vdisk name to use for volume creation. - * - ``lenovo_backend_type`` = ``virtual`` - - (String) linear (for VDisk) or virtual (for Pool). - * - ``lenovo_iscsi_ips`` = - - (List) List of comma-separated target iSCSI IP addresses. - * - ``lenovo_verify_certificate`` = ``False`` - - (Boolean) Whether to verify Lenovo array SSL certificate. - * - ``lenovo_verify_certificate_path`` = ``None`` - - (String) Lenovo array SSL certificate path. diff --git a/doc/source/configuration/tables/cinder-lvm.rst b/doc/source/configuration/tables/cinder-lvm.rst deleted file mode 100644 index f0fa97545..000000000 --- a/doc/source/configuration/tables/cinder-lvm.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-lvm: - -.. list-table:: Description of LVM configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``lvm_conf_file`` = ``/etc/cinder/lvm.conf`` - - (String) LVM conf file to use for the LVM driver in Cinder; this setting is ignored if the specified file does not exist (You can also specify 'None' to not use a conf file even if one exists). - * - ``lvm_max_over_subscription_ratio`` = ``1.0`` - - (Floating point) max_over_subscription_ratio setting for the LVM driver. If set, this takes precedence over the general max_over_subscription_ratio option. If None, the general option is used. - * - ``lvm_mirrors`` = ``0`` - - (Integer) If >0, create LVs with multiple mirrors. Note that this requires lvm_mirrors + 2 PVs with available space - * - ``lvm_suppress_fd_warnings`` = ``False`` - - (Boolean) Suppress leaked file descriptor warnings in LVM commands. - * - ``lvm_type`` = ``default`` - - (String) Type of LVM volumes to deploy; (default, thin, or auto). Auto defaults to thin if thin is supported. - * - ``volume_group`` = ``cinder-volumes`` - - (String) Name for the VG that will contain exported volumes diff --git a/doc/source/configuration/tables/cinder-nas.rst b/doc/source/configuration/tables/cinder-nas.rst deleted file mode 100644 index 4dcfa2da5..000000000 --- a/doc/source/configuration/tables/cinder-nas.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nas: - -.. list-table:: Description of NAS configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nas_host`` = - - (String) IP address or Hostname of NAS system. - * - ``nas_login`` = ``admin`` - - (String) User name to connect to NAS system. - * - ``nas_mount_options`` = ``None`` - - (String) Options used to mount the storage backend file system where Cinder volumes are stored. - * - ``nas_password`` = - - (String) Password to connect to NAS system. - * - ``nas_private_key`` = - - (String) Filename of private key to use for SSH authentication. - * - ``nas_secure_file_operations`` = ``auto`` - - (String) Allow network-attached storage systems to operate in a secure environment where root level access is not permitted. If set to False, access is as the root user and insecure. If set to True, access is not as root. If set to auto, a check is done to determine if this is a new installation: True is used if so, otherwise False. Default is auto. - * - ``nas_secure_file_permissions`` = ``auto`` - - (String) Set more secure file permissions on network-attached storage volume files to restrict broad other/world access. If set to False, volumes are created with open permissions. If set to True, volumes are created with permissions for the cinder user and group (660). If set to auto, a check is done to determine if this is a new installation: True is used if so, otherwise False. Default is auto. - * - ``nas_share_path`` = - - (String) Path to the share to use for storing Cinder volumes. For example: "/srv/export1" for an NFS server export available at 10.0.5.10:/srv/export1 . - * - ``nas_ssh_port`` = ``22`` - - (Port number) SSH port to use to connect to NAS system. - * - ``nas_volume_prov_type`` = ``thin`` - - (String) Provisioning type that will be used when creating volumes. diff --git a/doc/source/configuration/tables/cinder-nec_m.rst b/doc/source/configuration/tables/cinder-nec_m.rst deleted file mode 100644 index 8cc4183f2..000000000 --- a/doc/source/configuration/tables/cinder-nec_m.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nec_m: - -.. list-table:: Description of NEC Storage M series driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nec_actual_free_capacity`` = ``False`` - - (Boolean) Return actual free capacity. - * - ``nec_backend_max_ld_count`` = ``1024`` - - (Integer) Maximum number of managing sessions. - * - ``nec_backup_ldname_format`` = ``LX:%s`` - - (String) M-Series Storage LD name format for snapshots. - * - ``nec_backup_pools`` = - - (List) M-Series Storage backup pool number to be used. - * - ``nec_diskarray_name`` = - - (String) Diskarray name of M-Series Storage. - * - ``nec_iscsi_portals_per_cont`` = ``1`` - - (Integer) Number of iSCSI portals. - * - ``nec_ismcli_fip`` = ``None`` - - (IP) FIP address of M-Series Storage iSMCLI. - * - ``nec_ismcli_password`` = - - (String) Password for M-Series Storage iSMCLI. - * - ``nec_ismcli_privkey`` = - - (String) Filename of RSA private key for M-Series Storage iSMCLI. - * - ``nec_ismcli_user`` = - - (String) User name for M-Series Storage iSMCLI. - * - ``nec_ismview_alloptimize`` = ``False`` - - (Boolean) Use legacy iSMCLI command with optimization. - * - ``nec_ismview_dir`` = ``/tmp/nec/cinder`` - - (String) Output path of iSMview file. - * - ``nec_ldname_format`` = ``LX:%s`` - - (String) M-Series Storage LD name format for volumes. - * - ``nec_ldset`` = - - (String) M-Series Storage LD Set name for Compute Node. - * - ``nec_ldset_for_controller_node`` = - - (String) M-Series Storage LD Set name for Controller Node. - * - ``nec_pools`` = - - (List) M-Series Storage pool numbers list to be used. - * - ``nec_queryconfig_view`` = ``False`` - - (Boolean) Use legacy iSMCLI command. - * - ``nec_ssh_pool_port_number`` = ``22`` - - (Integer) Port number of ssh pool. - * - ``nec_unpairthread_timeout`` = ``3600`` - - (Integer) Timeout value of Unpairthread. diff --git a/doc/source/configuration/tables/cinder-netapp_7mode_iscsi.rst b/doc/source/configuration/tables/cinder-netapp_7mode_iscsi.rst deleted file mode 100644 index f1f3c79fd..000000000 --- a/doc/source/configuration/tables/cinder-netapp_7mode_iscsi.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-netapp_7mode_iscsi: - -.. list-table:: Description of NetApp 7-Mode iSCSI driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``netapp_login`` = ``None`` - - (String) Administrative user account name used to access the storage system or proxy server. - * - ``netapp_partner_backend_name`` = ``None`` - - (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC. - * - ``netapp_password`` = ``None`` - - (String) Password for the administrative user account specified in the netapp_login option. - * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. - * - ``netapp_replication_aggregate_map`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... - * - ``netapp_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system or proxy server. - * - ``netapp_server_port`` = ``None`` - - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. - * - ``netapp_size_multiplier`` = ``1.2`` - - (Floating point) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release. - * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. - * - ``netapp_storage_family`` = ``ontap_cluster`` - - (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series. - * - ``netapp_storage_protocol`` = ``None`` - - (String) The storage protocol to be used on the data path with the storage system. - * - ``netapp_transport_type`` = ``http`` - - (String) The transport protocol used when communicating with the storage system or proxy server. - * - ``netapp_vfiler`` = ``None`` - - (String) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system. diff --git a/doc/source/configuration/tables/cinder-netapp_7mode_nfs.rst b/doc/source/configuration/tables/cinder-netapp_7mode_nfs.rst deleted file mode 100644 index d9a59ac4a..000000000 --- a/doc/source/configuration/tables/cinder-netapp_7mode_nfs.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-netapp_7mode_nfs: - -.. list-table:: Description of NetApp 7-Mode NFS driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``expiry_thres_minutes`` = ``720`` - - (Integer) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share. - * - ``netapp_login`` = ``None`` - - (String) Administrative user account name used to access the storage system or proxy server. - * - ``netapp_partner_backend_name`` = ``None`` - - (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC. - * - ``netapp_password`` = ``None`` - - (String) Password for the administrative user account specified in the netapp_login option. - * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. - * - ``netapp_replication_aggregate_map`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... - * - ``netapp_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system or proxy server. - * - ``netapp_server_port`` = ``None`` - - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. - * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. - * - ``netapp_storage_family`` = ``ontap_cluster`` - - (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series. - * - ``netapp_storage_protocol`` = ``None`` - - (String) The storage protocol to be used on the data path with the storage system. - * - ``netapp_transport_type`` = ``http`` - - (String) The transport protocol used when communicating with the storage system or proxy server. - * - ``netapp_vfiler`` = ``None`` - - (String) The vFiler unit on which provisioning of block storage volumes will be done. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode. Only use this option when utilizing the MultiStore feature on the NetApp storage system. - * - ``thres_avl_size_perc_start`` = ``20`` - - (Integer) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned. - * - ``thres_avl_size_perc_stop`` = ``60`` - - (Integer) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option. diff --git a/doc/source/configuration/tables/cinder-netapp_cdot_iscsi.rst b/doc/source/configuration/tables/cinder-netapp_cdot_iscsi.rst deleted file mode 100644 index 84d563f94..000000000 --- a/doc/source/configuration/tables/cinder-netapp_cdot_iscsi.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-netapp_cdot_iscsi: - -.. list-table:: Description of NetApp cDOT iSCSI driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``netapp_login`` = ``None`` - - (String) Administrative user account name used to access the storage system or proxy server. - * - ``netapp_lun_ostype`` = ``None`` - - (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created. - * - ``netapp_lun_space_reservation`` = ``enabled`` - - (String) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand. - * - ``netapp_partner_backend_name`` = ``None`` - - (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC. - * - ``netapp_password`` = ``None`` - - (String) Password for the administrative user account specified in the netapp_login option. - * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. - * - ``netapp_replication_aggregate_map`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... - * - ``netapp_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system or proxy server. - * - ``netapp_server_port`` = ``None`` - - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. - * - ``netapp_size_multiplier`` = ``1.2`` - - (Floating point) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release. - * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. - * - ``netapp_storage_family`` = ``ontap_cluster`` - - (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series. - * - ``netapp_storage_protocol`` = ``None`` - - (String) The storage protocol to be used on the data path with the storage system. - * - ``netapp_transport_type`` = ``http`` - - (String) The transport protocol used when communicating with the storage system or proxy server. - * - ``netapp_vserver`` = ``None`` - - (String) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur. diff --git a/doc/source/configuration/tables/cinder-netapp_cdot_nfs.rst b/doc/source/configuration/tables/cinder-netapp_cdot_nfs.rst deleted file mode 100644 index e7e1e145c..000000000 --- a/doc/source/configuration/tables/cinder-netapp_cdot_nfs.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-netapp_cdot_nfs: - -.. list-table:: Description of NetApp cDOT NFS driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``expiry_thres_minutes`` = ``720`` - - (Integer) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share. - * - ``netapp_copyoffload_tool_path`` = ``None`` - - (String) This option specifies the path of the NetApp copy offload tool binary. Ensure that the binary has execute permissions set which allow the effective user of the cinder-volume process to execute the file. - * - ``netapp_host_type`` = ``None`` - - (String) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts. - * - ``netapp_host_type`` = ``None`` - - (String) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts. - * - ``netapp_login`` = ``None`` - - (String) Administrative user account name used to access the storage system or proxy server. - * - ``netapp_lun_ostype`` = ``None`` - - (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created. - * - ``netapp_partner_backend_name`` = ``None`` - - (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC. - * - ``netapp_password`` = ``None`` - - (String) Password for the administrative user account specified in the netapp_login option. - * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. - * - ``netapp_replication_aggregate_map`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... - * - ``netapp_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system or proxy server. - * - ``netapp_server_port`` = ``None`` - - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. - * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. - * - ``netapp_storage_family`` = ``ontap_cluster`` - - (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series. - * - ``netapp_storage_protocol`` = ``None`` - - (String) The storage protocol to be used on the data path with the storage system. - * - ``netapp_transport_type`` = ``http`` - - (String) The transport protocol used when communicating with the storage system or proxy server. - * - ``netapp_vserver`` = ``None`` - - (String) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur. - * - ``thres_avl_size_perc_start`` = ``20`` - - (Integer) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned. - * - ``thres_avl_size_perc_stop`` = ``60`` - - (Integer) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option. diff --git a/doc/source/configuration/tables/cinder-netapp_eseries_iscsi.rst b/doc/source/configuration/tables/cinder-netapp_eseries_iscsi.rst deleted file mode 100644 index 5325df62b..000000000 --- a/doc/source/configuration/tables/cinder-netapp_eseries_iscsi.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-netapp_eseries_iscsi: - -.. list-table:: Description of NetApp E-Series driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``netapp_controller_ips`` = ``None`` - - (String) This option is only utilized when the storage family is configured to eseries. This option is used to restrict provisioning to the specified controllers. Specify the value of this option to be a comma separated list of controller hostnames or IP addresses to be used for provisioning. - * - ``netapp_enable_multiattach`` = ``False`` - - (Boolean) This option specifies whether the driver should allow operations that require multiple attachments to a volume. An example would be live migration of servers that have volumes attached. When enabled, this backend is limited to 256 total volumes in order to guarantee volumes can be accessed by more than one host. - * - ``netapp_host_type`` = ``None`` - - (String) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts. - * - ``netapp_login`` = ``None`` - - (String) Administrative user account name used to access the storage system or proxy server. - * - ``netapp_partner_backend_name`` = ``None`` - - (String) The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. This option is only used by the driver when connecting to an instance with a storage family of Data ONTAP operating in 7-Mode, and it is required if the storage protocol selected is FC. - * - ``netapp_password`` = ``None`` - - (String) Password for the administrative user account specified in the netapp_login option. - * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. - * - ``netapp_replication_aggregate_map`` = ``None`` - - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... - * - ``netapp_sa_password`` = ``None`` - - (String) Password for the NetApp E-Series storage array. - * - ``netapp_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system or proxy server. - * - ``netapp_server_port`` = ``None`` - - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. - * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. - * - ``netapp_storage_family`` = ``ontap_cluster`` - - (String) The storage family type used on the storage system; valid values are ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using clustered Data ONTAP, or eseries for using E-Series. - * - ``netapp_transport_type`` = ``http`` - - (String) The transport protocol used when communicating with the storage system or proxy server. - * - ``netapp_webservice_path`` = ``/devmgr/v2`` - - (String) This option is used to specify the path to the E-Series proxy application on a proxy server. The value is combined with the value of the netapp_transport_type, netapp_server_hostname, and netapp_server_port options to create the URL used by the driver to connect to the proxy application. diff --git a/doc/source/configuration/tables/cinder-nexenta.rst b/doc/source/configuration/tables/cinder-nexenta.rst deleted file mode 100644 index 4f7ed8512..000000000 --- a/doc/source/configuration/tables/cinder-nexenta.rst +++ /dev/null @@ -1,70 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nexenta: - -.. list-table:: Description of Nexenta driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nexenta_blocksize`` = ``4096`` - - (Integer) Block size for datasets - * - ``nexenta_chunksize`` = ``32768`` - - (Integer) NexentaEdge iSCSI LUN object chunk size - * - ``nexenta_client_address`` = - - (String) NexentaEdge iSCSI Gateway client address for non-VIP service - * - ``nexenta_dataset_compression`` = ``on`` - - (String) Compression value for new ZFS folders. - * - ``nexenta_dataset_dedup`` = ``off`` - - (String) Deduplication value for new ZFS folders. - * - ``nexenta_dataset_description`` = - - (String) Human-readable description for the folder. - * - ``nexenta_host`` = - - (String) IP address of Nexenta SA - * - ``nexenta_iscsi_target_portal_port`` = ``3260`` - - (Integer) Nexenta target portal port - * - ``nexenta_mount_point_base`` = ``$state_path/mnt`` - - (String) Base directory that contains NFS share mount points - * - ``nexenta_nbd_symlinks_dir`` = ``/dev/disk/by-path`` - - (String) NexentaEdge logical path of directory to store symbolic links to NBDs - * - ``nexenta_nms_cache_volroot`` = ``True`` - - (Boolean) If set True cache NexentaStor appliance volroot option value. - * - ``nexenta_password`` = ``nexenta`` - - (String) Password to connect to Nexenta SA - * - ``nexenta_rest_port`` = ``0`` - - (Integer) HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 for HTTPS and 8080 for HTTP is used - * - ``nexenta_rest_protocol`` = ``auto`` - - (String) Use http or https for REST connection (default auto) - * - ``nexenta_rrmgr_compression`` = ``0`` - - (Integer) Enable stream compression, level 1..9. 1 - gives best speed; 9 - gives best compression. - * - ``nexenta_rrmgr_connections`` = ``2`` - - (Integer) Number of TCP connections. - * - ``nexenta_rrmgr_tcp_buf_size`` = ``4096`` - - (Integer) TCP Buffer size in KiloBytes. - * - ``nexenta_shares_config`` = ``/etc/cinder/nfs_shares`` - - (String) File with the list of available nfs shares - * - ``nexenta_sparse`` = ``False`` - - (Boolean) Enables or disables the creation of sparse datasets - * - ``nexenta_sparsed_volumes`` = ``True`` - - (Boolean) Enables or disables the creation of volumes as sparsed files that take no space. If disabled (False), volume is created as a regular file, which takes a long time. - * - ``nexenta_target_group_prefix`` = ``cinder/`` - - (String) Prefix for iSCSI target groups on SA - * - ``nexenta_target_prefix`` = ``iqn.1986-03.com.sun:02:cinder-`` - - (String) IQN prefix for iSCSI targets - * - ``nexenta_use_https`` = ``True`` - - (Boolean) Use secure HTTP for REST connection (default True) - * - ``nexenta_user`` = ``admin`` - - (String) User name to connect to Nexenta SA - * - ``nexenta_volume`` = ``cinder`` - - (String) SA Pool that holds all volumes diff --git a/doc/source/configuration/tables/cinder-nexenta5.rst b/doc/source/configuration/tables/cinder-nexenta5.rst deleted file mode 100644 index 24cdea383..000000000 --- a/doc/source/configuration/tables/cinder-nexenta5.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nexenta5: - -.. list-table:: Description of NexentaStor 5 driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nexenta_dataset_compression`` = ``on`` - - (String) Compression value for new ZFS folders. - * - ``nexenta_dataset_dedup`` = ``off`` - - (String) Deduplication value for new ZFS folders. - * - ``nexenta_dataset_description`` = - - (String) Human-readable description for the folder. - * - ``nexenta_host`` = - - (String) IP address of Nexenta SA - * - ``nexenta_iscsi_target_portal_port`` = ``3260`` - - (Integer) Nexenta target portal port - * - ``nexenta_mount_point_base`` = ``$state_path/mnt`` - - (String) Base directory that contains NFS share mount points - * - ``nexenta_ns5_blocksize`` = ``32`` - - (Integer) Block size for datasets - * - ``nexenta_rest_port`` = ``0`` - - (Integer) HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 for HTTPS and 8080 for HTTP is used - * - ``nexenta_rest_protocol`` = ``auto`` - - (String) Use http or https for REST connection (default auto) - * - ``nexenta_sparse`` = ``False`` - - (Boolean) Enables or disables the creation of sparse datasets - * - ``nexenta_sparsed_volumes`` = ``True`` - - (Boolean) Enables or disables the creation of volumes as sparsed files that take no space. If disabled (False), volume is created as a regular file, which takes a long time. - * - ``nexenta_use_https`` = ``True`` - - (Boolean) Use secure HTTP for REST connection (default True) - * - ``nexenta_user`` = ``admin`` - - (String) User name to connect to Nexenta SA - * - ``nexenta_volume`` = ``cinder`` - - (String) SA Pool that holds all volumes - * - ``nexenta_volume_group`` = ``iscsi`` - - (String) Volume group for ns5 diff --git a/doc/source/configuration/tables/cinder-nexenta_edge.rst b/doc/source/configuration/tables/cinder-nexenta_edge.rst deleted file mode 100644 index 6aeb82799..000000000 --- a/doc/source/configuration/tables/cinder-nexenta_edge.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nexenta_edge: - -.. list-table:: Description of NexentaEdge driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nexenta_blocksize`` = ``4096`` - - (Integer) Block size for datasets - * - ``nexenta_chunksize`` = ``32768`` - - (Integer) NexentaEdge iSCSI LUN object chunk size - * - ``nexenta_client_address`` = - - (String) NexentaEdge iSCSI Gateway client address for non-VIP service - * - ``nexenta_iscsi_service`` = - - (String) NexentaEdge iSCSI service name - * - ``nexenta_iscsi_target_portal_port`` = ``3260`` - - (Integer) Nexenta target portal port - * - ``nexenta_lun_container`` = - - (String) NexentaEdge logical path of bucket for LUNs - * - ``nexenta_rest_address`` = - - (String) IP address of NexentaEdge management REST API endpoint - * - ``nexenta_rest_password`` = ``nexenta`` - - (String) Password to connect to NexentaEdge - * - ``nexenta_rest_port`` = ``0`` - - (Integer) HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 for HTTPS and 8080 for HTTP is used - * - ``nexenta_rest_protocol`` = ``auto`` - - (String) Use http or https for REST connection (default auto) - * - ``nexenta_rest_user`` = ``admin`` - - (String) User name to connect to NexentaEdge diff --git a/doc/source/configuration/tables/cinder-nimble.rst b/doc/source/configuration/tables/cinder-nimble.rst deleted file mode 100644 index 6c97a7445..000000000 --- a/doc/source/configuration/tables/cinder-nimble.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-nimble: - -.. list-table:: Description of Nimble driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nimble_pool_name`` = ``default`` - - (String) Nimble Controller pool name - * - ``nimble_subnet_label`` = ``*`` - - (String) Nimble Subnet Label - * - ``nimble_verify_cert_path`` = ``None`` - - (String) Path to Nimble Array SSL certificate - * - ``nimble_verify_certificate`` = ``False`` - - (String) Whether to verify Nimble SSL Certificate diff --git a/doc/source/configuration/tables/cinder-osbrick.rst b/doc/source/configuration/tables/cinder-osbrick.rst deleted file mode 100644 index 0f6bd8a09..000000000 --- a/doc/source/configuration/tables/cinder-osbrick.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-osbrick: - -.. list-table:: Description of os-brick configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[privsep_osbrick]** - - - * - ``capabilities`` = ``[]`` - - (Unknown) List of Linux capabilities retained by the privsep daemon. - * - ``group`` = ``None`` - - (String) Group that the privsep daemon should run as. - * - ``helper_command`` = ``None`` - - (String) Command to invoke to start the privsep daemon if not using the "fork" method. If not specified, a default is generated using "sudo privsep-helper" and arguments designed to recreate the current configuration. This command must accept suitable --privsep_context and --privsep_sock_path arguments. - * - ``user`` = ``None`` - - (String) User that the privsep daemon should run as. diff --git a/doc/source/configuration/tables/cinder-profiler.rst b/doc/source/configuration/tables/cinder-profiler.rst deleted file mode 100644 index 05b5965d0..000000000 --- a/doc/source/configuration/tables/cinder-profiler.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-profiler: - -.. list-table:: Description of profiler configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[profiler]** - - - * - ``connection_string`` = ``messaging://`` - - (String) Connection string for a notifier backend. Default value is messaging:// which sets the notifier to oslo_messaging. - - Examples of possible values: - - * messaging://: use oslo_messaging driver for sending notifications. - - * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. - - * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending notifications. - * - ``enabled`` = ``False`` - - (Boolean) Enables the profiling for all services on this node. Default value is False (fully disable the profiling feature). - - Possible values: - - * True: Enables the feature - - * False: Disables the feature. The profiling cannot be started via this project operations. If the profiling is triggered by another project, this project part will be empty. - * - ``es_doc_type`` = ``notification`` - - (String) Document type for notification indexing in elasticsearch. - * - ``es_scroll_size`` = ``10000`` - - (Integer) Elasticsearch splits large requests in batches. This parameter defines maximum size of each batch (for example: es_scroll_size=10000). - * - ``es_scroll_time`` = ``2m`` - - (String) This parameter is a time value parameter (for example: es_scroll_time=2m), indicating for how long the nodes that participate in the search will maintain relevant resources in order to continue and support it. - * - ``hmac_keys`` = ``SECRET_KEY`` - - (String) Secret key(s) to use for encrypting context data for performance profiling. This string value should have the following format: [,,...], where each key is some random string. A user who triggers the profiling via the REST API has to set one of these keys in the headers of the REST API call to include profiling results of this node for this particular project. - - Both "enabled" flag and "hmac_keys" config options should be set to enable profiling. Also, to generate correct profiling information across all services at least one key needs to be consistent between OpenStack projects. This ensures it can be used from client side to generate the trace, containing information from all possible resources. - * - ``sentinel_service_name`` = ``mymaster`` - - (String) Redissentinel uses a service name to identify a master redis service. This parameter defines the name (for example: sentinal_service_name=mymaster). - * - ``socket_timeout`` = ``0.1`` - - (Floating point) Redissentinel provides a timeout option on the connections. This parameter defines that timeout (for example: socket_timeout=0.1). - * - ``trace_sqlalchemy`` = ``False`` - - (Boolean) Enables SQL requests profiling in services. Default value is False (SQL requests won't be traced). - - Possible values: - - * True: Enables SQL requests profiling. Each SQL query will be part of the trace and can the be analyzed by how much time was spent for that. - - * False: Disables SQL requests profiling. The spent time is only shown on a higher level of operations. Single SQL queries cannot be analyzed this way. diff --git a/doc/source/configuration/tables/cinder-prophetstor_dpl.rst b/doc/source/configuration/tables/cinder-prophetstor_dpl.rst deleted file mode 100644 index 02f2903ec..000000000 --- a/doc/source/configuration/tables/cinder-prophetstor_dpl.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-prophetstor_dpl: - -.. list-table:: Description of ProphetStor Fibre Channel and iSCSi drivers configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``dpl_pool`` = - - (String) DPL pool uuid in which DPL volumes are stored. - * - ``dpl_port`` = ``8357`` - - (Port number) DPL port number. - * - ``iscsi_port`` = ``3260`` - - (Port number) The port that the iSCSI daemon is listening on - * - ``san_ip`` = - - (String) IP address of SAN controller - * - ``san_login`` = ``admin`` - - (String) Username for SAN controller - * - ``san_password`` = - - (String) Password for SAN controller - * - ``san_thin_provision`` = ``True`` - - (Boolean) Use thin provisioning for SAN volumes? diff --git a/doc/source/configuration/tables/cinder-pure.rst b/doc/source/configuration/tables/cinder-pure.rst deleted file mode 100644 index 5b26a99db..000000000 --- a/doc/source/configuration/tables/cinder-pure.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-pure: - -.. list-table:: Description of Pure Storage driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``pure_api_token`` = ``None`` - - (String) REST API authorization token. - * - ``pure_automatic_max_oversubscription_ratio`` = ``True`` - - (Boolean) Automatically determine an oversubscription ratio based on the current total data reduction values. If used this calculated value will override the max_over_subscription_ratio config option. - * - ``pure_eradicate_on_delete`` = ``False`` - - (Boolean) When enabled, all Pure volumes, snapshots, and protection groups will be eradicated at the time of deletion in Cinder. Data will NOT be recoverable after a delete with this set to True! When disabled, volumes and snapshots will go into pending eradication state and can be recovered. - * - ``pure_replica_interval_default`` = ``900`` - - (Integer) Snapshot replication interval in seconds. - * - ``pure_replica_retention_long_term_default`` = ``7`` - - (Integer) Retain snapshots per day on target for this time (in days.) - * - ``pure_replica_retention_long_term_per_day_default`` = ``3`` - - (Integer) Retain how many snapshots for each day. - * - ``pure_replica_retention_short_term_default`` = ``14400`` - - (Integer) Retain all snapshots on target for this time (in seconds.) diff --git a/doc/source/configuration/tables/cinder-qnap.rst b/doc/source/configuration/tables/cinder-qnap.rst deleted file mode 100644 index 0ac8b3012..000000000 --- a/doc/source/configuration/tables/cinder-qnap.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-qnap: - -.. list-table:: Description of QNAP storage volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``qnap_management_url`` = ``None`` - - (URI) The URL to management QNAP Storage - * - ``qnap_poolname`` = ``None`` - - (String) The pool name in the QNAP Storage - * - ``qnap_storage_protocol`` = ``iscsi`` - - (String) Communication protocol to access QNAP storage diff --git a/doc/source/configuration/tables/cinder-quobyte.rst b/doc/source/configuration/tables/cinder-quobyte.rst deleted file mode 100644 index 613f73968..000000000 --- a/doc/source/configuration/tables/cinder-quobyte.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-quobyte: - -.. list-table:: Description of Quobyte USP volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``quobyte_client_cfg`` = ``None`` - - (String) Path to a Quobyte Client configuration file. - * - ``quobyte_mount_point_base`` = ``$state_path/mnt`` - - (String) Base dir containing the mount point for the Quobyte volume. - * - ``quobyte_qcow2_volumes`` = ``True`` - - (Boolean) Create volumes as QCOW2 files rather than raw files. - * - ``quobyte_sparsed_volumes`` = ``True`` - - (Boolean) Create volumes as sparse files which take no space. If set to False, volume is created as regular file.In such case volume creation takes a lot of time. - * - ``quobyte_volume_url`` = ``None`` - - (URI) URL to the Quobyte volume e.g., quobyte:/// diff --git a/doc/source/configuration/tables/cinder-quota.rst b/doc/source/configuration/tables/cinder-quota.rst deleted file mode 100644 index bcf704b92..000000000 --- a/doc/source/configuration/tables/cinder-quota.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-quota: - -.. list-table:: Description of quota configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``max_age`` = ``0`` - - (Integer) Number of seconds between subsequent usage refreshes - * - ``quota_backup_gigabytes`` = ``1000`` - - (Integer) Total amount of storage, in gigabytes, allowed for backups per project - * - ``quota_backups`` = ``10`` - - (Integer) Number of volume backups allowed per project - * - ``quota_consistencygroups`` = ``10`` - - (Integer) Number of consistencygroups allowed per project - * - ``quota_driver`` = ``cinder.quota.DbQuotaDriver`` - - (String) Default driver to use for quota checks - * - ``quota_gigabytes`` = ``1000`` - - (Integer) Total amount of storage, in gigabytes, allowed for volumes and snapshots per project - * - ``quota_groups`` = ``10`` - - (Integer) Number of groups allowed per project - * - ``quota_snapshots`` = ``10`` - - (Integer) Number of volume snapshots allowed per project - * - ``quota_volumes`` = ``10`` - - (Integer) Number of volumes allowed per project - * - ``reservation_expire`` = ``86400`` - - (Integer) Number of seconds until a reservation expires - * - ``use_default_quota_class`` = ``True`` - - (Boolean) Enables or disables use of default quota class with default quota. diff --git a/doc/source/configuration/tables/cinder-redis.rst b/doc/source/configuration/tables/cinder-redis.rst deleted file mode 100644 index 6f2c303a7..000000000 --- a/doc/source/configuration/tables/cinder-redis.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-redis: - -.. list-table:: Description of Redis configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[matchmaker_redis]** - - - * - ``check_timeout`` = ``20000`` - - (Integer) Time in ms to wait before the transaction is killed. - * - ``host`` = ``127.0.0.1`` - - (String) DEPRECATED: Host to locate redis. Replaced by [DEFAULT]/transport_url - * - ``password`` = - - (String) DEPRECATED: Password for Redis server (optional). Replaced by [DEFAULT]/transport_url - * - ``port`` = ``6379`` - - (Port number) DEPRECATED: Use this port to connect to redis host. Replaced by [DEFAULT]/transport_url - * - ``sentinel_group_name`` = ``oslo-messaging-zeromq`` - - (String) Redis replica set name. - * - ``sentinel_hosts`` = - - (List) DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., [host:port, host1:port ... ] Replaced by [DEFAULT]/transport_url - * - ``socket_timeout`` = ``10000`` - - (Integer) Timeout in ms on blocking socket operations. - * - ``wait_timeout`` = ``2000`` - - (Integer) Time in ms to wait between connection attempts. diff --git a/doc/source/configuration/tables/cinder-san.rst b/doc/source/configuration/tables/cinder-san.rst deleted file mode 100644 index 21e7d0566..000000000 --- a/doc/source/configuration/tables/cinder-san.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-san: - -.. list-table:: Description of SAN configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``san_clustername`` = - - (String) Cluster name to use for creating volumes - * - ``san_ip`` = - - (String) IP address of SAN controller - * - ``san_is_local`` = ``False`` - - (Boolean) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device - * - ``san_login`` = ``admin`` - - (String) Username for SAN controller - * - ``san_password`` = - - (String) Password for SAN controller - * - ``san_private_key`` = - - (String) Filename of private key to use for SSH authentication - * - ``san_ssh_port`` = ``22`` - - (Port number) SSH port to use with SAN - * - ``san_thin_provision`` = ``True`` - - (Boolean) Use thin provisioning for SAN volumes? - * - ``ssh_conn_timeout`` = ``30`` - - (Integer) SSH connection timeout in seconds - * - ``ssh_max_pool_conn`` = ``5`` - - (Integer) Maximum ssh connections in the pool - * - ``ssh_min_pool_conn`` = ``1`` - - (Integer) Minimum ssh connections in the pool diff --git a/doc/source/configuration/tables/cinder-scality.rst b/doc/source/configuration/tables/cinder-scality.rst deleted file mode 100644 index 923457951..000000000 --- a/doc/source/configuration/tables/cinder-scality.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-scality: - -.. list-table:: Description of Scality SOFS volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``scality_sofs_config`` = ``None`` - - (String) Path or URL to Scality SOFS configuration file - * - ``scality_sofs_mount_point`` = ``$state_path/scality`` - - (String) Base dir where Scality SOFS shall be mounted - * - ``scality_sofs_volume_dir`` = ``cinder/volumes`` - - (String) Path from Scality SOFS root to volume dir diff --git a/doc/source/configuration/tables/cinder-scheduler.rst b/doc/source/configuration/tables/cinder-scheduler.rst deleted file mode 100644 index 14c24e6f9..000000000 --- a/doc/source/configuration/tables/cinder-scheduler.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-scheduler: - -.. list-table:: Description of scheduler configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``filter_function`` = ``None`` - - (String) String representation for an equation that will be used to filter hosts. Only used when the driver filter is set to be used by the Cinder scheduler. - * - ``goodness_function`` = ``None`` - - (String) String representation for an equation that will be used to determine the goodness of a host. Only used when using the goodness weigher is set to be used by the Cinder scheduler. - * - ``scheduler_default_filters`` = ``AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter`` - - (List) Which filter class names to use for filtering hosts when not specified in the request. - * - ``scheduler_default_weighers`` = ``CapacityWeigher`` - - (List) Which weigher class names to use for weighing hosts. - * - ``scheduler_driver`` = ``cinder.scheduler.filter_scheduler.FilterScheduler`` - - (String) Default scheduler driver to use - * - ``scheduler_host_manager`` = ``cinder.scheduler.host_manager.HostManager`` - - (String) The scheduler host manager class to use - * - ``scheduler_json_config_location`` = - - (String) Absolute path to scheduler configuration JSON file. - * - ``scheduler_manager`` = ``cinder.scheduler.manager.SchedulerManager`` - - (String) Full class name for the Manager for scheduler - * - ``scheduler_max_attempts`` = ``3`` - - (Integer) Maximum number of attempts to schedule a volume - * - ``scheduler_weight_handler`` = ``cinder.scheduler.weights.OrderedHostWeightHandler`` - - (String) Which handler to use for selecting the host/pool after weighing diff --git a/doc/source/configuration/tables/cinder-scst.rst b/doc/source/configuration/tables/cinder-scst.rst deleted file mode 100644 index a23d0bacc..000000000 --- a/doc/source/configuration/tables/cinder-scst.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-scst: - -.. list-table:: Description of SCST volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``scst_target_driver`` = ``iscsi`` - - (String) SCST target implementation can choose from multiple SCST target drivers. - * - ``scst_target_iqn_name`` = ``None`` - - (String) Certain ISCSI targets have predefined target names, SCST target driver uses this name. diff --git a/doc/source/configuration/tables/cinder-sheepdog.rst b/doc/source/configuration/tables/cinder-sheepdog.rst deleted file mode 100644 index e4465e96e..000000000 --- a/doc/source/configuration/tables/cinder-sheepdog.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-sheepdog: - -.. list-table:: Description of Sheepdog driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``sheepdog_store_address`` = ``127.0.0.1`` - - (String) IP address of sheep daemon. - * - ``sheepdog_store_port`` = ``7000`` - - (Port number) Port of sheep daemon. diff --git a/doc/source/configuration/tables/cinder-smbfs.rst b/doc/source/configuration/tables/cinder-smbfs.rst deleted file mode 100644 index 00869cd1d..000000000 --- a/doc/source/configuration/tables/cinder-smbfs.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-smbfs: - -.. list-table:: Description of Samba volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``smbfs_allocation_info_file_path`` = ``$state_path/allocation_data`` - - (String) The path of the automatically generated file containing information about volume disk space allocation. - * - ``smbfs_default_volume_format`` = ``qcow2`` - - (String) Default format that will be used when creating volumes if no volume format is specified. - * - ``smbfs_mount_options`` = ``noperm,file_mode=0775,dir_mode=0775`` - - (String) Mount options passed to the smbfs client. See mount.cifs man page for details. - * - ``smbfs_mount_point_base`` = ``$state_path/mnt`` - - (String) Base dir containing mount points for smbfs shares. - * - ``smbfs_oversub_ratio`` = ``1.0`` - - (Floating point) This will compare the allocated to available space on the volume destination. If the ratio exceeds this number, the destination will no longer be valid. - * - ``smbfs_shares_config`` = ``/etc/cinder/smbfs_shares`` - - (String) File with the list of available smbfs shares. - * - ``smbfs_sparsed_volumes`` = ``True`` - - (Boolean) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time. - * - ``smbfs_used_ratio`` = ``0.95`` - - (Floating point) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination. diff --git a/doc/source/configuration/tables/cinder-solidfire.rst b/doc/source/configuration/tables/cinder-solidfire.rst deleted file mode 100644 index 1ca9ef68e..000000000 --- a/doc/source/configuration/tables/cinder-solidfire.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-solidfire: - -.. list-table:: Description of SolidFire driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``sf_account_prefix`` = ``None`` - - (String) Create SolidFire accounts with this prefix. Any string can be used here, but the string "hostname" is special and will create a prefix using the cinder node hostname (previous default behavior). The default is NO prefix. - * - ``sf_allow_template_caching`` = ``True`` - - (Boolean) Create an internal cache of copy of images when a bootable volume is created to eliminate fetch from glance and qemu-conversion on subsequent calls. - * - ``sf_allow_tenant_qos`` = ``False`` - - (Boolean) Allow tenants to specify QOS on create - * - ``sf_api_port`` = ``443`` - - (Port number) SolidFire API port. Useful if the device api is behind a proxy on a different port. - * - ``sf_emulate_512`` = ``True`` - - (Boolean) Set 512 byte emulation on volume creation; - * - ``sf_enable_vag`` = ``False`` - - (Boolean) Utilize volume access groups on a per-tenant basis. - * - ``sf_enable_volume_mapping`` = ``True`` - - (Boolean) Create an internal mapping of volume IDs and account. Optimizes lookups and performance at the expense of memory, very large deployments may want to consider setting to False. - * - ``sf_svip`` = ``None`` - - (String) Overrides default cluster SVIP with the one specified. This is required or deployments that have implemented the use of VLANs for iSCSI networks in their cloud. - * - ``sf_template_account_name`` = ``openstack-vtemplate`` - - (String) Account name on the SolidFire Cluster to use as owner of template/cache volumes (created if does not exist). - * - ``sf_volume_prefix`` = ``UUID-`` - - (String) Create SolidFire volumes with this prefix. Volume names are of the form . The default is to use a prefix of 'UUID-'. diff --git a/doc/source/configuration/tables/cinder-storage.rst b/doc/source/configuration/tables/cinder-storage.rst deleted file mode 100644 index edaff6bbd..000000000 --- a/doc/source/configuration/tables/cinder-storage.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-storage: - -.. list-table:: Description of storage configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``allocated_capacity_weight_multiplier`` = ``-1.0`` - - (Floating point) Multiplier used for weighing allocated capacity. Positive numbers mean to stack vs spread. - * - ``capacity_weight_multiplier`` = ``1.0`` - - (Floating point) Multiplier used for weighing free capacity. Negative numbers mean to stack vs spread. - * - ``enabled_backends`` = ``None`` - - (List) A list of backend names to use. These backend names should be backed by a unique [CONFIG] group with its options - * - ``iscsi_helper`` = ``tgtadm`` - - (String) iSCSI target user-land tool to use. tgtadm is default, use lioadm for LIO iSCSI support, scstadmin for SCST target support, ietadm for iSCSI Enterprise Target, iscsictl for Chelsio iSCSI Target or fake for testing. - * - ``iscsi_iotype`` = ``fileio`` - - (String) Sets the behavior of the iSCSI target to either perform blockio or fileio optionally, auto can be set and Cinder will autodetect type of backing device - * - ``iscsi_ip_address`` = ``$my_ip`` - - (String) The IP address that the iSCSI daemon is listening on - * - ``iscsi_port`` = ``3260`` - - (Port number) The port that the iSCSI daemon is listening on - * - ``iscsi_protocol`` = ``iscsi`` - - (String) Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm or lioadm target helpers. In order to enable RDMA, this parameter should be set with the value "iser". The supported iSCSI protocol values are "iscsi" and "iser". - * - ``iscsi_target_flags`` = - - (String) Sets the target-specific flags for the iSCSI target. Only used for tgtadm to specify backing device flags using bsoflags option. The specified string is passed as is to the underlying tool. - * - ``iscsi_target_prefix`` = ``iqn.2010-10.org.openstack:`` - - (String) Prefix for iSCSI volumes - * - ``iscsi_write_cache`` = ``on`` - - (String) Sets the behavior of the iSCSI target to either perform write-back(on) or write-through(off). This parameter is valid if iscsi_helper is set to tgtadm. - * - ``iser_helper`` = ``tgtadm`` - - (String) The name of the iSER target user-land tool to use - * - ``iser_ip_address`` = ``$my_ip`` - - (String) The IP address that the iSER daemon is listening on - * - ``iser_port`` = ``3260`` - - (Port number) The port that the iSER daemon is listening on - * - ``iser_target_prefix`` = ``iqn.2010-10.org.openstack:`` - - (String) Prefix for iSER volumes - * - ``migration_create_volume_timeout_secs`` = ``300`` - - (Integer) Timeout for creating the volume to migrate to when performing volume migration (seconds) - * - ``num_iser_scan_tries`` = ``3`` - - (Integer) The maximum number of times to rescan iSER targetto find volume - * - ``num_volume_device_scan_tries`` = ``3`` - - (Integer) The maximum number of times to rescan targets to find volume - * - ``volume_backend_name`` = ``None`` - - (String) The backend name for a given driver implementation - * - ``volume_clear`` = ``zero`` - - (String) Method used to wipe old volumes - * - ``volume_clear_ionice`` = ``None`` - - (String) The flag to pass to ionice to alter the i/o priority of the process used to zero a volume after deletion, for example "-c3" for idle only priority. - * - ``volume_clear_size`` = ``0`` - - (Integer) Size in MiB to wipe at start of old volumes. 1024 MiBat max. 0 => all - * - ``volume_copy_blkio_cgroup_name`` = ``cinder-volume-copy`` - - (String) The blkio cgroup name to be used to limit bandwidth of volume copy - * - ``volume_copy_bps_limit`` = ``0`` - - (Integer) The upper limit of bandwidth of volume copy. 0 => unlimited - * - ``volume_dd_blocksize`` = ``1M`` - - (String) The default block size used when copying/clearing volumes - * - ``volume_driver`` = ``cinder.volume.drivers.lvm.LVMVolumeDriver`` - - (String) Driver to use for volume creation - * - ``volume_manager`` = ``cinder.volume.manager.VolumeManager`` - - (String) Full class name for the Manager for volume - * - ``volume_service_inithost_offload`` = ``False`` - - (Boolean) Offload pending volume delete during volume service startup - * - ``volume_usage_audit_period`` = ``month`` - - (String) Time period for which to generate volume usages. The options are hour, day, month, or year. - * - ``volumes_dir`` = ``$state_path/volumes`` - - (String) Volume configuration file storage directory diff --git a/doc/source/configuration/tables/cinder-storage_ceph.rst b/doc/source/configuration/tables/cinder-storage_ceph.rst deleted file mode 100644 index 15529cbce..000000000 --- a/doc/source/configuration/tables/cinder-storage_ceph.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-storage_ceph: - -.. list-table:: Description of Ceph storage configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``rados_connect_timeout`` = ``-1`` - - (Integer) Timeout value (in seconds) used when connecting to ceph cluster. If value < 0, no timeout is set and default librados value is used. - * - ``rados_connection_interval`` = ``5`` - - (Integer) Interval value (in seconds) between connection retries to ceph cluster. - * - ``rados_connection_retries`` = ``3`` - - (Integer) Number of retries if connection to ceph cluster failed. - * - ``rbd_ceph_conf`` = - - (String) Path to the ceph configuration file - * - ``rbd_cluster_name`` = ``ceph`` - - (String) The name of ceph cluster - * - ``rbd_flatten_volume_from_snapshot`` = ``False`` - - (Boolean) Flatten volumes created from snapshots to remove dependency from volume to snapshot - * - ``rbd_max_clone_depth`` = ``5`` - - (Integer) Maximum number of nested volume clones that are taken before a flatten occurs. Set to 0 to disable cloning. - * - ``rbd_pool`` = ``rbd`` - - (String) The RADOS pool where rbd volumes are stored - * - ``rbd_secret_uuid`` = ``None`` - - (String) The libvirt uuid of the secret for the rbd_user volumes - * - ``rbd_store_chunk_size`` = ``4`` - - (Integer) Volumes will be chunked into objects of this size (in megabytes). - * - ``rbd_user`` = ``None`` - - (String) The RADOS client name for accessing rbd volumes - only set when using cephx authentication - * - ``replication_connect_timeout`` = ``5`` - - (Integer) Timeout value (in seconds) used when connecting to ceph cluster to do a demotion/promotion of volumes. If value < 0, no timeout is set and default librados value is used. diff --git a/doc/source/configuration/tables/cinder-storage_gpfs.rst b/doc/source/configuration/tables/cinder-storage_gpfs.rst deleted file mode 100644 index 838266de6..000000000 --- a/doc/source/configuration/tables/cinder-storage_gpfs.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-storage_gpfs: - -.. list-table:: Description of GPFS storage configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``gpfs_images_dir`` = ``None`` - - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. - * - ``gpfs_images_share_mode`` = ``None`` - - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. - * - ``gpfs_max_clone_depth`` = ``0`` - - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. - * - ``gpfs_mount_point_base`` = ``None`` - - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. - * - ``gpfs_sparse_volumes`` = ``True`` - - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. - * - ``gpfs_storage_pool`` = ``system`` - - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. - * - ``nas_host`` = - - (String) IP address or Hostname of NAS system. - * - ``nas_login`` = ``admin`` - - (String) User name to connect to NAS system. - * - ``nas_password`` = - - (String) Password to connect to NAS system. - * - ``nas_private_key`` = - - (String) Filename of private key to use for SSH authentication. - * - ``nas_ssh_port`` = ``22`` - - (Port number) SSH port to use to connect to NAS system. diff --git a/doc/source/configuration/tables/cinder-storage_nfs.rst b/doc/source/configuration/tables/cinder-storage_nfs.rst deleted file mode 100644 index 4f9597a67..000000000 --- a/doc/source/configuration/tables/cinder-storage_nfs.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-storage_nfs: - -.. list-table:: Description of NFS storage configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``nfs_mount_attempts`` = ``3`` - - (Integer) The number of attempts to mount NFS shares before raising an error. At least one attempt will be made to mount an NFS share, regardless of the value specified. - * - ``nfs_mount_options`` = ``None`` - - (String) Mount options passed to the NFS client. See section of the NFS man page for details. - * - ``nfs_mount_point_base`` = ``$state_path/mnt`` - - (String) Base dir containing mount points for NFS shares. - * - ``nfs_qcow2_volumes`` = ``False`` - - (Boolean) Create volumes as QCOW2 files rather than raw files. - * - ``nfs_shares_config`` = ``/etc/cinder/nfs_shares`` - - (String) File with the list of available NFS shares. - * - ``nfs_snapshot_support`` = ``False`` - - (Boolean) Enable support for snapshots on the NFS driver. Platforms using libvirt <1.2.7 will encounter issues with this feature. - * - ``nfs_sparsed_volumes`` = ``True`` - - (Boolean) Create volumes as sparsed files which take no space. If set to False volume is created as regular file. In such case volume creation takes a lot of time. diff --git a/doc/source/configuration/tables/cinder-storwize.rst b/doc/source/configuration/tables/cinder-storwize.rst deleted file mode 100644 index 64c2b3451..000000000 --- a/doc/source/configuration/tables/cinder-storwize.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-storwize: - -.. list-table:: Description of IBM Storwise driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``san_ip`` = - - (String) IP address of SAN controller - * - ``san_login`` = ``admin`` - - (String) Username for SAN controller - * - ``san_password`` = - - (String) Password for SAN controller - * - ``san_private_key`` = - - (String) Filename of private key to use for SSH authentication - * - ``san_ssh_port`` = ``22`` - - (Port number) SSH port to use with SAN - * - ``storwize_san_secondary_ip`` = ``None`` - - (String) Specifies secondary management IP or hostname to be used if san_ip is invalid or becomes inaccessible. - * - ``storwize_svc_allow_tenant_qos`` = ``False`` - - (Boolean) Allow tenants to specify QOS on create - * - ``storwize_svc_flashcopy_rate`` = ``50`` - - (Integer) Specifies the Storwize FlashCopy copy rate to be used when creating a full volume copy. The default is rate is 50, and the valid rates are 1-100. - * - ``storwize_svc_flashcopy_timeout`` = ``120`` - - (Integer) Maximum number of seconds to wait for FlashCopy to be prepared. - * - ``storwize_svc_iscsi_chap_enabled`` = ``True`` - - (Boolean) Configure CHAP authentication for iSCSI connections (Default: Enabled) - * - ``storwize_svc_multihostmap_enabled`` = ``True`` - - (Boolean) DEPRECATED: This option no longer has any affect. It is deprecated and will be removed in the next release. - * - ``storwize_svc_multipath_enabled`` = ``False`` - - (Boolean) Connect with multipath (FC only; iSCSI multipath is controlled by Nova) - * - ``storwize_svc_stretched_cluster_partner`` = ``None`` - - (String) If operating in stretched cluster mode, specify the name of the pool in which mirrored copies are stored.Example: "pool2" - * - ``storwize_svc_vol_autoexpand`` = ``True`` - - (Boolean) Storage system autoexpand parameter for volumes (True/False) - * - ``storwize_svc_vol_compression`` = ``False`` - - (Boolean) Storage system compression option for volumes - * - ``storwize_svc_vol_easytier`` = ``True`` - - (Boolean) Enable Easy Tier for volumes - * - ``storwize_svc_vol_grainsize`` = ``256`` - - (Integer) Storage system grain size parameter for volumes (32/64/128/256) - * - ``storwize_svc_vol_iogrp`` = ``0`` - - (Integer) The I/O group in which to allocate volumes - * - ``storwize_svc_vol_nofmtdisk`` = ``False`` - - (Boolean) Specifies that the volume not be formatted during creation. - * - ``storwize_svc_vol_rsize`` = ``2`` - - (Integer) Storage system space-efficiency parameter for volumes (percentage) - * - ``storwize_svc_vol_warning`` = ``0`` - - (Integer) Storage system threshold for volume capacity warnings (percentage) - * - ``storwize_svc_volpool_name`` = ``volpool`` - - (List) Comma separated list of storage system storage pools for volumes. diff --git a/doc/source/configuration/tables/cinder-swift.rst b/doc/source/configuration/tables/cinder-swift.rst deleted file mode 100644 index 005409f8d..000000000 --- a/doc/source/configuration/tables/cinder-swift.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-swift: - -.. list-table:: Description of swift configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``backup_swift_auth_insecure`` = ``False`` - - (Boolean) Bypass verification of server certificate when making SSL connection to Swift. - * - ``backup_swift_auth_url`` = ``None`` - - (URI) The URL of the Keystone endpoint diff --git a/doc/source/configuration/tables/cinder-synology.rst b/doc/source/configuration/tables/cinder-synology.rst deleted file mode 100644 index 04925f6e3..000000000 --- a/doc/source/configuration/tables/cinder-synology.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-synology: - -.. list-table:: Description of Synology volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``synology_admin_port`` = ``5000`` - - (Port number) Management port for Synology storage. - * - ``synology_device_id`` = ``None`` - - (String) Device id for skip one time password check for logging in Synology storage if OTP is enabled. - * - ``synology_one_time_pass`` = ``None`` - - (String) One time password of administrator for logging in Synology storage if OTP is enabled. - * - ``synology_password`` = - - (String) Password of administrator for logging in Synology storage. - * - ``synology_pool_name`` = - - (String) Volume on Synology storage to be used for creating lun. - * - ``synology_ssl_verify`` = ``True`` - - (Boolean) Do certificate validation or not if $driver_use_ssl is True - * - ``synology_username`` = ``admin`` - - (String) Administrator of Synology storage. diff --git a/doc/source/configuration/tables/cinder-tegile.rst b/doc/source/configuration/tables/cinder-tegile.rst deleted file mode 100644 index a98feb90c..000000000 --- a/doc/source/configuration/tables/cinder-tegile.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-tegile: - -.. list-table:: Description of Tegile volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``tegile_default_pool`` = ``None`` - - (String) Create volumes in this pool - * - ``tegile_default_project`` = ``None`` - - (String) Create volumes in this project diff --git a/doc/source/configuration/tables/cinder-tintri.rst b/doc/source/configuration/tables/cinder-tintri.rst deleted file mode 100644 index 19666b204..000000000 --- a/doc/source/configuration/tables/cinder-tintri.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-tintri: - -.. list-table:: Description of Tintri volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``tintri_api_version`` = ``v310`` - - (String) API version for the storage system - * - ``tintri_image_cache_expiry_days`` = ``30`` - - (Integer) Delete unused image snapshots older than mentioned days - * - ``tintri_image_shares_config`` = ``None`` - - (String) Path to image nfs shares file - * - ``tintri_server_hostname`` = ``None`` - - (String) The hostname (or IP address) for the storage system - * - ``tintri_server_password`` = ``None`` - - (String) Password for the storage system - * - ``tintri_server_username`` = ``None`` - - (String) User name for the storage system diff --git a/doc/source/configuration/tables/cinder-violin.rst b/doc/source/configuration/tables/cinder-violin.rst deleted file mode 100644 index 703856e6f..000000000 --- a/doc/source/configuration/tables/cinder-violin.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-violin: - -.. list-table:: Description of Violin volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``violin_dedup_capable_pools`` = - - (List) Storage pools capable of dedup and other luns.(Comma separated list) - * - ``violin_dedup_only_pools`` = - - (List) Storage pools to be used to setup dedup luns only.(Comma separated list) - * - ``violin_iscsi_target_ips`` = - - (List) Target iSCSI addresses to use.(Comma separated list) - * - ``violin_pool_allocation_method`` = ``random`` - - (String) Method of choosing a storage pool for a lun. - * - ``violin_request_timeout`` = ``300`` - - (Integer) Global backend request timeout, in seconds. diff --git a/doc/source/configuration/tables/cinder-vmware.rst b/doc/source/configuration/tables/cinder-vmware.rst deleted file mode 100644 index af041a9ec..000000000 --- a/doc/source/configuration/tables/cinder-vmware.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-vmware: - -.. list-table:: Description of VMware configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``vmware_api_retry_count`` = ``10`` - - (Integer) Number of times VMware vCenter server API must be retried upon connection related issues. - * - ``vmware_ca_file`` = ``None`` - - (String) CA bundle file to use in verifying the vCenter server certificate. - * - ``vmware_cluster_name`` = ``None`` - - (Multi-valued) Name of a vCenter compute cluster where volumes should be created. - * - ``vmware_connection_pool_size`` = ``10`` - - (Integer) Maximum number of connections in http connection pool. - * - ``vmware_host_ip`` = ``None`` - - (String) IP address for connecting to VMware vCenter server. - * - ``vmware_host_password`` = ``None`` - - (String) Password for authenticating with VMware vCenter server. - * - ``vmware_host_port`` = ``443`` - - (Port number) Port number for connecting to VMware vCenter server. - * - ``vmware_host_username`` = ``None`` - - (String) Username for authenticating with VMware vCenter server. - * - ``vmware_host_version`` = ``None`` - - (String) Optional string specifying the VMware vCenter server version. The driver attempts to retrieve the version from VMware vCenter server. Set this configuration only if you want to override the vCenter server version. - * - ``vmware_image_transfer_timeout_secs`` = ``7200`` - - (Integer) Timeout in seconds for VMDK volume transfer between Cinder and Glance. - * - ``vmware_insecure`` = ``False`` - - (Boolean) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "vmware_ca_file" is set. - * - ``vmware_max_objects_retrieval`` = ``100`` - - (Integer) Max number of objects to be retrieved per batch. Query results will be obtained in batches from the server and not in one shot. Server may still limit the count to something less than the configured value. - * - ``vmware_task_poll_interval`` = ``2.0`` - - (Floating point) The interval (in seconds) for polling remote tasks invoked on VMware vCenter server. - * - ``vmware_tmp_dir`` = ``/tmp`` - - (String) Directory where virtual disks are stored during volume backup and restore. - * - ``vmware_volume_folder`` = ``Volumes`` - - (String) Name of the vCenter inventory folder that will contain Cinder volumes. This folder will be created under "OpenStack/", where project_folder is of format "Project ()". - * - ``vmware_wsdl_location`` = ``None`` - - (String) Optional VIM service WSDL Location e.g http:///vimService.wsdl. Optional over-ride to default location for bug work-arounds. diff --git a/doc/source/configuration/tables/cinder-vzstorage.rst b/doc/source/configuration/tables/cinder-vzstorage.rst deleted file mode 100644 index ee11525e0..000000000 --- a/doc/source/configuration/tables/cinder-vzstorage.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-vzstorage: - -.. list-table:: Description of Virtuozzo Storage volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``vzstorage_default_volume_format`` = ``raw`` - - (String) Default format that will be used when creating volumes if no volume format is specified. - * - ``vzstorage_mount_options`` = ``None`` - - (List) Mount options passed to the vzstorage client. See section of the pstorage-mount man page for details. - * - ``vzstorage_mount_point_base`` = ``$state_path/mnt`` - - (String) Base dir containing mount points for vzstorage shares. - * - ``vzstorage_shares_config`` = ``/etc/cinder/vzstorage_shares`` - - (String) File with the list of available vzstorage shares. - * - ``vzstorage_sparsed_volumes`` = ``True`` - - (Boolean) Create volumes as sparsed files which take no space rather than regular files when using raw format, in which case volume creation takes lot of time. - * - ``vzstorage_used_ratio`` = ``0.95`` - - (Floating point) Percent of ACTUAL usage of the underlying volume before no new volumes can be allocated to the volume destination. diff --git a/doc/source/configuration/tables/cinder-windows.rst b/doc/source/configuration/tables/cinder-windows.rst deleted file mode 100644 index a263e3e33..000000000 --- a/doc/source/configuration/tables/cinder-windows.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-windows: - -.. list-table:: Description of Windows configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``windows_iscsi_lun_path`` = ``C:\iSCSIVirtualDisks`` - - (String) Path to store VHD backed volumes diff --git a/doc/source/configuration/tables/cinder-xio.rst b/doc/source/configuration/tables/cinder-xio.rst deleted file mode 100644 index 0efaaf232..000000000 --- a/doc/source/configuration/tables/cinder-xio.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-xio: - -.. list-table:: Description of X-IO volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``driver_use_ssl`` = ``False`` - - (Boolean) Tell driver to use SSL for connection to backend storage if the driver supports it. - * - ``ise_completion_retries`` = ``30`` - - (Integer) Number on retries to get completion status after issuing a command to ISE. - * - ``ise_connection_retries`` = ``5`` - - (Integer) Number of retries (per port) when establishing connection to ISE management port. - * - ``ise_raid`` = ``1`` - - (Integer) Raid level for ISE volumes. - * - ``ise_retry_interval`` = ``1`` - - (Integer) Interval (secs) between retries. - * - ``ise_storage_pool`` = ``1`` - - (Integer) Default storage pool for volumes. diff --git a/doc/source/configuration/tables/cinder-zadara.rst b/doc/source/configuration/tables/cinder-zadara.rst deleted file mode 100644 index 23edf4f1b..000000000 --- a/doc/source/configuration/tables/cinder-zadara.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zadara: - -.. list-table:: Description of Zadara Storage driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``zadara_default_snap_policy`` = ``False`` - - (Boolean) VPSA - Attach snapshot policy for volumes - * - ``zadara_password`` = ``None`` - - (String) VPSA - Password - * - ``zadara_use_iser`` = ``True`` - - (Boolean) VPSA - Use ISER instead of iSCSI - * - ``zadara_user`` = ``None`` - - (String) VPSA - Username - * - ``zadara_vol_encrypt`` = ``False`` - - (Boolean) VPSA - Default encryption policy for volumes - * - ``zadara_vol_name_template`` = ``OS_%s`` - - (String) VPSA - Default template for VPSA volume names - * - ``zadara_vpsa_host`` = ``None`` - - (String) VPSA - Management Host name or IP address - * - ``zadara_vpsa_poolname`` = ``None`` - - (String) VPSA - Storage Pool assigned for volumes - * - ``zadara_vpsa_port`` = ``None`` - - (Port number) VPSA - Port number - * - ``zadara_vpsa_use_ssl`` = ``False`` - - (Boolean) VPSA - Use SSL connection diff --git a/doc/source/configuration/tables/cinder-zfssa-iscsi.rst b/doc/source/configuration/tables/cinder-zfssa-iscsi.rst deleted file mode 100644 index a6b81d915..000000000 --- a/doc/source/configuration/tables/cinder-zfssa-iscsi.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zfssa-iscsi: - -.. list-table:: Description of ZFS Storage Appliance iSCSI driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``zfssa_initiator`` = - - (String) iSCSI initiator IQNs. (comma separated) - * - ``zfssa_initiator_config`` = - - (String) iSCSI initiators configuration. - * - ``zfssa_initiator_group`` = - - (String) iSCSI initiator group. - * - ``zfssa_initiator_password`` = - - (String) Secret of the iSCSI initiator CHAP user. - * - ``zfssa_initiator_user`` = - - (String) iSCSI initiator CHAP user (name). - * - ``zfssa_lun_compression`` = ``off`` - - (String) Data compression. - * - ``zfssa_lun_logbias`` = ``latency`` - - (String) Synchronous write bias. - * - ``zfssa_lun_sparse`` = ``False`` - - (Boolean) Flag to enable sparse (thin-provisioned): True, False. - * - ``zfssa_lun_volblocksize`` = ``8k`` - - (String) Block size. - * - ``zfssa_pool`` = ``None`` - - (String) Storage pool name. - * - ``zfssa_project`` = ``None`` - - (String) Project name. - * - ``zfssa_replication_ip`` = - - (String) IP address used for replication data. (maybe the same as data ip) - * - ``zfssa_rest_timeout`` = ``None`` - - (Integer) REST connection timeout. (seconds) - * - ``zfssa_target_group`` = ``tgt-grp`` - - (String) iSCSI target group name. - * - ``zfssa_target_interfaces`` = ``None`` - - (String) Network interfaces of iSCSI targets. (comma separated) - * - ``zfssa_target_password`` = - - (String) Secret of the iSCSI target CHAP user. - * - ``zfssa_target_portal`` = ``None`` - - (String) iSCSI target portal (Data-IP:Port, w.x.y.z:3260). - * - ``zfssa_target_user`` = - - (String) iSCSI target CHAP user (name). diff --git a/doc/source/configuration/tables/cinder-zfssa-nfs.rst b/doc/source/configuration/tables/cinder-zfssa-nfs.rst deleted file mode 100644 index 623e514ac..000000000 --- a/doc/source/configuration/tables/cinder-zfssa-nfs.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zfssa-nfs: - -.. list-table:: Description of ZFS Storage Appliance NFS driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``zfssa_cache_directory`` = ``os-cinder-cache`` - - (String) Name of directory inside zfssa_nfs_share where cache volumes are stored. - * - ``zfssa_cache_project`` = ``os-cinder-cache`` - - (String) Name of ZFSSA project where cache volumes are stored. - * - ``zfssa_data_ip`` = ``None`` - - (String) Data path IP address - * - ``zfssa_enable_local_cache`` = ``True`` - - (Boolean) Flag to enable local caching: True, False. - * - ``zfssa_https_port`` = ``443`` - - (String) HTTPS port number - * - ``zfssa_manage_policy`` = ``loose`` - - (String) Driver policy for volume manage. - * - ``zfssa_nfs_mount_options`` = - - (String) Options to be passed while mounting share over nfs - * - ``zfssa_nfs_pool`` = - - (String) Storage pool name. - * - ``zfssa_nfs_project`` = ``NFSProject`` - - (String) Project name. - * - ``zfssa_nfs_share`` = ``nfs_share`` - - (String) Share name. - * - ``zfssa_nfs_share_compression`` = ``off`` - - (String) Data compression. - * - ``zfssa_nfs_share_logbias`` = ``latency`` - - (String) Synchronous write bias-latency, throughput. - * - ``zfssa_rest_timeout`` = ``None`` - - (Integer) REST connection timeout. (seconds) diff --git a/doc/source/configuration/tables/cinder-zones.rst b/doc/source/configuration/tables/cinder-zones.rst deleted file mode 100644 index 672af8d33..000000000 --- a/doc/source/configuration/tables/cinder-zones.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zones: - -.. list-table:: Description of zones configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``cloned_volume_same_az`` = ``True`` - - (Boolean) Ensure that the new volumes are the same AZ as snapshot or source volume diff --git a/doc/source/configuration/tables/cinder-zoning.rst b/doc/source/configuration/tables/cinder-zoning.rst deleted file mode 100644 index b9d4520bf..000000000 --- a/doc/source/configuration/tables/cinder-zoning.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zoning: - -.. list-table:: Description of zoning configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``zoning_mode`` = ``None`` - - (String) FC Zoning mode configured - * - **[fc-zone-manager]** - - - * - ``enable_unsupported_driver`` = ``False`` - - (Boolean) Set this to True when you want to allow an unsupported zone manager driver to start. Drivers that haven't maintained a working CI system and testing are marked as unsupported until CI is working again. This also marks a driver as deprecated and may be removed in the next release. - * - ``fc_fabric_names`` = ``None`` - - (String) Comma separated list of Fibre Channel fabric names. This list of names is used to retrieve other SAN credentials for connecting to each SAN fabric - * - ``fc_san_lookup_service`` = ``cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService`` - - (String) FC SAN Lookup Service - * - ``zone_driver`` = ``cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`` - - (String) FC Zone Driver responsible for zone management - * - ``zoning_policy`` = ``initiator-target`` - - (String) Zoning policy configured by user; valid values include "initiator-target" or "initiator" diff --git a/doc/source/configuration/tables/cinder-zoning_fabric_brcd.rst b/doc/source/configuration/tables/cinder-zoning_fabric_brcd.rst deleted file mode 100644 index 79b46d8d2..000000000 --- a/doc/source/configuration/tables/cinder-zoning_fabric_brcd.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zoning_fabric_brcd: - -.. list-table:: Description of brocade zoning fabrics configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[BRCD_FABRIC_EXAMPLE]** - - - * - ``fc_fabric_address`` = - - (String) Management IP of fabric. - * - ``fc_fabric_password`` = - - (String) Password for user. - * - ``fc_fabric_port`` = ``22`` - - (Port number) Connecting port - * - ``fc_fabric_ssh_cert_path`` = - - (String) Local SSH certificate Path. - * - ``fc_fabric_user`` = - - (String) Fabric user ID. - * - ``fc_southbound_protocol`` = ``HTTP`` - - (String) South bound connector for the fabric. - * - ``fc_virtual_fabric_id`` = ``None`` - - (String) Virtual Fabric ID. - * - ``principal_switch_wwn`` = ``None`` - - (String) DEPRECATED: Principal switch WWN of the fabric. This option is not used anymore. - * - ``zone_activate`` = ``True`` - - (Boolean) Overridden zoning activation state. - * - ``zone_name_prefix`` = ``openstack`` - - (String) Overridden zone name prefix. - * - ``zoning_policy`` = ``initiator-target`` - - (String) Overridden zoning policy. diff --git a/doc/source/configuration/tables/cinder-zoning_fabric_cisco.rst b/doc/source/configuration/tables/cinder-zoning_fabric_cisco.rst deleted file mode 100644 index 911f66d5c..000000000 --- a/doc/source/configuration/tables/cinder-zoning_fabric_cisco.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zoning_fabric_cisco: - -.. list-table:: Description of cisco zoning fabrics configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[CISCO_FABRIC_EXAMPLE]** - - - * - ``cisco_fc_fabric_address`` = - - (String) Management IP of fabric - * - ``cisco_fc_fabric_password`` = - - (String) Password for user - * - ``cisco_fc_fabric_port`` = ``22`` - - (Port number) Connecting port - * - ``cisco_fc_fabric_user`` = - - (String) Fabric user ID - * - ``cisco_zone_activate`` = ``True`` - - (Boolean) overridden zoning activation state - * - ``cisco_zone_name_prefix`` = ``None`` - - (String) overridden zone name prefix - * - ``cisco_zoning_policy`` = ``initiator-target`` - - (String) overridden zoning policy - * - ``cisco_zoning_vsan`` = ``None`` - - (String) VSAN of the Fabric diff --git a/doc/source/configuration/tables/cinder-zoning_manager_brcd.rst b/doc/source/configuration/tables/cinder-zoning_manager_brcd.rst deleted file mode 100644 index 22a883ebd..000000000 --- a/doc/source/configuration/tables/cinder-zoning_manager_brcd.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zoning_manager_brcd: - -.. list-table:: Description of brocade zoning manager configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[fc-zone-manager]** - - - * - ``brcd_sb_connector`` = ``HTTP`` - - (String) South bound connector for zoning operation diff --git a/doc/source/configuration/tables/cinder-zoning_manager_cisco.rst b/doc/source/configuration/tables/cinder-zoning_manager_cisco.rst deleted file mode 100644 index fbf0324dc..000000000 --- a/doc/source/configuration/tables/cinder-zoning_manager_cisco.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zoning_manager_cisco: - -.. list-table:: Description of cisco zoning manager configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[fc-zone-manager]** - - - * - ``cisco_sb_connector`` = ``cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI`` - - (String) Southbound connector for zoning operation diff --git a/doc/source/configuration/tables/cinder-zte.rst b/doc/source/configuration/tables/cinder-zte.rst deleted file mode 100644 index fa7f0d75d..000000000 --- a/doc/source/configuration/tables/cinder-zte.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _cinder-zte: - -.. list-table:: Description of Zte volume driver configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``zteAheadReadSize`` = ``8`` - - (Integer) Cache readahead size. - * - ``zteCachePolicy`` = ``1`` - - (Integer) Cache policy. 0, Write Back; 1, Write Through. - * - ``zteChunkSize`` = ``4`` - - (Integer) Virtual block size of pool. Unit : KB. Valid value : 4, 8, 16, 32, 64, 128, 256, 512. - * - ``zteControllerIP0`` = ``None`` - - (IP) Main controller IP. - * - ``zteControllerIP1`` = ``None`` - - (IP) Slave controller IP. - * - ``zteLocalIP`` = ``None`` - - (IP) Local IP. - * - ``ztePoolVoAllocatedPolicy`` = ``0`` - - (Integer) Pool volume allocated policy. 0, Auto; 1, High Performance Tier First; 2, Performance Tier First; 3, Capacity Tier First. - * - ``ztePoolVolAlarmStopAllocatedFlag`` = ``0`` - - (Integer) Pool volume alarm stop allocated flag. - * - ``ztePoolVolAlarmThreshold`` = ``0`` - - (Integer) Pool volume alarm threshold. [0, 100] - * - ``ztePoolVolInitAllocatedCapacity`` = ``0`` - - (Integer) Pool volume init allocated Capacity.Unit : KB. - * - ``ztePoolVolIsThin`` = ``False`` - - (Integer) Whether it is a thin volume. - * - ``ztePoolVolMovePolicy`` = ``0`` - - (Integer) Pool volume move policy.0, Auto; 1, Highest Available; 2, Lowest Available; 3, No Relocation. - * - ``zteSSDCacheSwitch`` = ``1`` - - (Integer) SSD cache switch. 0, OFF; 1, ON. - * - ``zteStoragePool`` = - - (List) Pool name list. - * - ``zteUserName`` = ``None`` - - (String) User name. - * - ``zteUserPassword`` = ``None`` - - (String) User password. diff --git a/doc/source/configuration/tables/manual/cinder-netapp_cdot_extraspecs.rst b/doc/source/configuration/tables/manual/cinder-netapp_cdot_extraspecs.rst deleted file mode 100644 index dbe600c03..000000000 --- a/doc/source/configuration/tables/manual/cinder-netapp_cdot_extraspecs.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. list-table:: Description of extra specs options for NetApp Unified Driver with Clustered Data ONTAP - :header-rows: 1 - - * - Extra spec - - Type - - Description - * - ``netapp_raid_type`` - - String - - Limit the candidate volume list based on one of the following raid - types: ``raid4, raid_dp``. - * - ``netapp_disk_type`` - - String - - Limit the candidate volume list based on one of the following disk - types: ``ATA, BSAS, EATA, FCAL, FSAS, LUN, MSATA, SAS, SATA, SCSI, XATA, - XSAS, or SSD.`` - * - ``netapp:qos_policy_group`` [1]_ - - String - - Specify the name of a QoS policy group, which defines measurable Service - Level Objectives, that should be applied to the OpenStack Block Storage - volume at the time of volume creation. Ensure that the QoS policy group - object within Data ONTAP should be defined before an OpenStack Block - Storage volume is created, and that the QoS policy group is not - associated with the destination FlexVol volume. - * - ``netapp_mirrored`` - - Boolean - - Limit the candidate volume list to only the ones that are mirrored on - the storage controller. - * - ``netapp_unmirrored`` [2]_ - - Boolean - - Limit the candidate volume list to only the ones that are not mirrored - on the storage controller. - * - ``netapp_dedup`` - - Boolean - - Limit the candidate volume list to only the ones that have deduplication - enabled on the storage controller. - * - ``netapp_nodedup`` - - Boolean - - Limit the candidate volume list to only the ones that have deduplication - disabled on the storage controller. - * - ``netapp_compression`` - - Boolean - - Limit the candidate volume list to only the ones that have compression - enabled on the storage controller. - * - ``netapp_nocompression`` - - Boolean - - Limit the candidate volume list to only the ones that have compression - disabled on the storage controller. - * - ``netapp_thin_provisioned`` - - Boolean - - Limit the candidate volume list to only the ones that support thin - provisioning on the storage controller. - * - ``netapp_thick_provisioned`` - - Boolean - - Limit the candidate volume list to only the ones that support thick - provisioning on the storage controller. - -.. [1] - Please note that this extra spec has a colon (``:``) in its name - because it is used by the driver to assign the QoS policy group to - the OpenStack Block Storage volume after it has been provisioned. - -.. [2] - In the Juno release, these negative-assertion extra specs are - formally deprecated by the NetApp unified driver. Instead of using - the deprecated negative-assertion extra specs (for example, - ``netapp_unmirrored``) with a value of ``true``, use the - corresponding positive-assertion extra spec (for example, - ``netapp_mirrored``) with a value of ``false``. diff --git a/doc/source/contributor/README.rst b/doc/source/contributor/README.rst deleted file mode 100644 index 127540cb5..000000000 --- a/doc/source/contributor/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -================================ -Cinder Contributor Documentation -================================ - -Introduction: -------------- - -This directory is intended to hold any documentation that relates to -how to contirbute to Cinder or how the project is managed. Some of this -content was previous under 'developer' in openstack-manuals. The content -of the documentation, however, goes beyond just developers to anyone -contributing to the project, thus the change in naming. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - diff --git a/doc/source/contributor/addmethod.openstackapi.rst b/doc/source/contributor/addmethod.openstackapi.rst deleted file mode 100644 index 7ca6c5ec3..000000000 --- a/doc/source/contributor/addmethod.openstackapi.rst +++ /dev/null @@ -1,70 +0,0 @@ -.. - Copyright 2010-2011 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Adding a Method to the OpenStack API -==================================== - -The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. - -Routing -------- - -To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. - -URLs are mapped to "action" methods on "controller" classes in ``cinder/api/openstack/__init__/ApiRouter.__init__`` . - -See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - - mapper.connect() lets you map a single URL to a single action on a controller. - - mapper.resource() connects many standard URLs to actions on a controller. - -Controllers and actions ------------------------ - -Controllers live in ``cinder/api/openstack``, and inherit from cinder.wsgi.Controller. - -See ``cinder/api/v2/volumes.py`` for an example. - -Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. - -Serialization -------------- - -Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML based on the request's content-type. - -Errors ------- - -There will be occasions when you will want to return a REST error response to -the caller and there are multiple valid ways to do this: - -- If you are at the controller level you can use a ``faults.Fault`` instance to - indicate the error. You can either return the ``Fault`` instance as the - result of the action, or raise it, depending on what's more convenient: - ``raise faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))``. - -- If you are raising an exception our WSGI middleware exception handler is - smart enough to recognize webob exceptions as well, so you don't really need - to wrap the exceptions in a ``Fault`` class and you can just let the - middleware add it for you: - ``raise webob.exc.HTTPBadRequest(explanation=msg)``. - -- While most errors require an explicit webob exception there are some Cinder - exceptions (``NotFound`` and ``Invalid``) that are so common that they are - directly handled by the middleware and don't need us to convert them, we can - just raise them at any point in the API service and they will return the - appropriate REST error to the caller. So any ``NotFound`` exception, or - child class, will return a 404 error, and any ``Invalid`` exception, or - child class, will return a 400 error. diff --git a/doc/source/contributor/api.apache.rst b/doc/source/contributor/api.apache.rst deleted file mode 100644 index 57145f4dc..000000000 --- a/doc/source/contributor/api.apache.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -Running Cinder API under Apache -=============================== - -Files ------ -Copy the file etc/cinder/api-httpd.conf to the appropriate location for your Apache server, most likely: - -``/etc/httpd/conf.d/cinder_wsgi.conf`` - -Update this file to match your system configuration (for example, some distributions put httpd logs in the apache2 directory and some in the httpd directory). -Create the directory /var/www/cgi-bin/cinder/. You can either hard or soft link the file cinder/wsgi/wsgi.py to be osapi_volume under the /var/www/cgi-bin/cinder/ directory. For a distribution appropriate place, it should probably be copied to: - -``/usr/share/openstack/cinder/httpd/cinder.py`` - -Cinder's primary configuration file (etc/cinder.conf) and the PasteDeploy configuration file (etc/cinder-paste.ini) must be readable to httpd in one of the default locations described in Configuring Cinder. - - -Access Control --------------- - -If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file. - diff --git a/doc/source/contributor/api.rst b/doc/source/contributor/api.rst deleted file mode 100644 index 350f9a4aa..000000000 --- a/doc/source/contributor/api.rst +++ /dev/null @@ -1,97 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -API Endpoint -============ - -Cinder has a system for managing multiple APIs on different subdomains. -Currently there is support for the OpenStack API. - -Common Components ------------------ - -The :mod:`cinder.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.api - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`api` Module -~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`api.fakes` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.fakes - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`api.openstack` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.openstack - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`api.openstack.test_wsgi` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.openstack.test_wsgi - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`test_auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.middleware.test_auth - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`test_faults` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.middleware.test_faults - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/contributor/api_conditional_updates.rst b/doc/source/contributor/api_conditional_updates.rst deleted file mode 100644 index d240e70f1..000000000 --- a/doc/source/contributor/api_conditional_updates.rst +++ /dev/null @@ -1,499 +0,0 @@ -API Races - Conditional Updates -=============================== - -Background ----------- - -On Cinder API nodes we have to check that requested action can be performed by -checking request arguments and involved resources, and only if everything -matches required criteria we will proceed with the RPC call to any of the other -nodes. - -Checking the conditions must be done in a non racy way to ensure that already -checked requirements don't change while we check remaining conditions. This is -of utter importance, as Cinder uses resource status as a lock to prevent -concurrent operations on a resource. - -An simple example of this would be extending a volume, where we first check the -status: - -.. code:: python - - if volume['status'] != 'available': - -Then update the status: - -.. code:: python - - self.update(context, volume, {'status': 'extending'}) - -And finally make the RPC call: - -.. code:: python - - self.volume_rpcapi.extend_volume(context, volume, new_size, - reservations) - -The problem is that this code would allow races, as other request could -have already changed the volume status between us getting the value and -updating the DB. - -There are multiple ways to fix this, such as: - -- Using a Distributed Locking Mechanism -- Using DB isolation level -- Using SQL SELECT ... FOR UPDATE -- USING compare and swap mechanism in SQL query - -Our tests showed that the best alternative was compare and swap and we decided -to call this mechanism "Conditional Update" as it seemed more appropriate. - -Conditional Update ------------------- - -Conditional Update is the mechanism we use in Cinder to prevent races when -updating the DB. In essence it is the SQL equivalent of an ``UPDATE ... FROM -... WHERE;`` clause - -It is implemented as an abstraction layer on top of SQLAlchemy ORM engine in -our DB api layer and exposed for consumption in Cinder's Persistent Versioned -Objects through the ``conditional_update`` method so it can be used from any -Versioned Object instance that has persistence (Volume, Snapshot, Backup...). - -Method signature is: - -.. code:: python - - def conditional_update(self, values, expected_values=None, filters=(), - save_all=False, session=None, reflect_changes=True): - -:values: - Dictionary of key-value pairs with changes that we want to make to the - resource in the DB. - -:expected_values: - Dictionary with conditions that must be met for the update to be executed. - - Condition ``field.id == resource.id`` is implicit and there is no need to add - it to the conditions. - - If no ``expected_values`` argument is provided update will only go through if - no field in the DB has changed. Dirty fields from the Versioned Object are - excluded as we don't know their original value. - -:filters: - Additional SQLAlchemy filters can be provided for more complex conditions. - -:save_all: - By default we will only be updating the DB with values provided in the - ``values`` argument, but we can explicitly say that we also want to save - object's current dirty fields. - -:session: - A SQLAlchemy session can be provided, although it is unlikely to be needed. - -:reflect_changes: - On a successful update we will also update Versioned Object instance to - reflect these changes, but we can prevent this instance update passing False - on this argument. - -:Return Value: - We'll return the number of changed rows. So we'll get a 0 value if the - conditional update has not been successful instead of an exception. - -Basic Usage ------------ - -- **Simple match** - - The most basic example is doing a simple match, for example for a ``volume`` - variable that contains a Versioned Object Volume class instance we may want - to change the ``status`` to "deleting" and update the ``terminated_at`` field - with current UTC time only if current ``status`` is "available" and the - volume is not in a consistency group. - - .. code:: python - - values={'status': 'deleting', - 'terminated_at': timeutils.utcnow()} - expected_values = {'status': 'available', - 'consistencygroup_id': None} - - volume.conditional_update(values, expected_values) - -- **Iterable match** - - Conditions can contain not only single values, but also iterables, and the - conditional update mechanism will correctly handle the presence of None - values in the range, unlike SQL ``IN`` clause that doesn't support ``NULL`` - values. - - .. code:: python - - values={'status': 'deleting', - 'terminated_at': timeutils.utcnow()} - expected_values={ - 'status': ('available', 'error', 'error_restoring' 'error_extending'), - 'migration_status': (None, 'deleting', 'error', 'success'), - 'consistencygroup_id': None - } - - volume.conditional_update(values, expected_values) - -- **Exclusion** - - In some cases we'll need to set conditions on what is *not* in the DB record - instead of what is is, for that we will use the exclusion mechanism provided - by the ``Not`` class in all persistent objects. This class accepts single - values as well as iterables. - - .. code:: python - - values={'status': 'deleting', - 'terminated_at': timeutils.utcnow()} - expected_values={ - 'attach_status': volume.Not('attached'), - 'status': ('available', 'error', 'error_restoring' 'error_extending'), - 'migration_status': (None, 'deleting', 'error', 'success'), - 'consistencygroup_id': None - } - - volume.conditional_update(values, expected_values) - -- **Filters** - - We can use complex filters in the conditions, but these must be SQLAlchemy - queries/conditions and as the rest of the DB methods must be properly - abstracted from the API. - - Therefore we will create the method in cinder/db/sqlalchemy/api.py: - - .. code:: python - - def volume_has_snapshots_filter(): - return sql.exists().where( - and_(models.Volume.id == models.Snapshot.volume_id, - ~models.Snapshot.deleted)) - - Then expose this filter through the cinder/db/api.py: - - .. code:: python - - def volume_has_snapshots_filter(): - return IMPL.volume_has_snapshots_filter() - - And finally used in the API (notice how we are negating the filter at the - API): - - .. code:: python - - filters = [~db.volume_has_snapshots_filter()] - values={'status': 'deleting', - 'terminated_at': timeutils.utcnow()} - expected_values={ - 'attach_status': volume.Not('attached'), - 'status': ('available', 'error', 'error_restoring' 'error_extending'), - 'migration_status': (None, 'deleting', 'error', 'success'), - 'consistencygroup_id': None - } - - volume.conditional_update(values, expected_values, filters) - -Returning Errors ----------------- - -The most important downside of using conditional updates to remove API races is -the inherent uncertainty of the cause of failure resulting in more generic -error messages. - -When we use the `conditional_update` method we'll use returned value to -determine the success of the operation, as a value of 0 indicates that no rows -have been updated and the conditions were not met. But we don't know which -one, or which ones, were the cause of the failure. - -There are 2 approaches to this issue: - -- On failure we go one by one checking the conditions and return the first one - that fails. - -- We return a generic error message indicating all conditions that must be met - for the operation to succeed. - -It was decided that we would go with the second approach, because even though -the first approach was closer to what we already had and would give a better -user experience, it had considerable implications such as: - -- More code was needed to do individual checks making operations considerable - longer and less readable. This was greatly alleviated using helper methods - to return the errors. - -- Higher number of DB queries required to determine failure cause. - -- Since there could be races because DB contents could be changed between the - failed update and the follow up queries that checked the values for the - specific error, a loop would be needed to make sure that either the - conditional update succeeds or one of the condition checks fails. - -- Having such a loop means that a small error in the code could lead to an - endless loop in a production environment. This coding error could be an - incorrect conditional update filter that would always fail or a missing or - incorrect condition that checked for the specific issue to return the error. - -A simple example of a generic error can be found in `begin_detaching` code: - -.. code:: python - - @wrap_check_policy - def begin_detaching(self, context, volume): - # If we are in the middle of a volume migration, we don't want the - # user to see that the volume is 'detaching'. Having - # 'migration_status' set will have the same effect internally. - expected = {'status': 'in-use', - 'attach_status': 'attached', - 'migration_status': self.AVAILABLE_MIGRATION_STATUS} - - result = volume.conditional_update({'status': 'detaching'}, expected) - - if not (result or self._is_volume_migrating(volume)): - msg = _("Unable to detach volume. Volume status must be 'in-use' " - "and attach_status must be 'attached' to detach.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - -Building filters on the API ---------------------------- - -SQLAlchemy filters created as mentioned above can create very powerful and -complex conditions, but sometimes we may require a condition that, while more -complex than the basic match and not match on the resource fields, it's still -quite simple. For those cases we can create filters directly on the API using -the ``model`` field provided in Versioned Objects. - -This ``model`` field is a reference to the ORM model that allows us to -reference ORM fields. - -We'll use as an example changing the ``status`` field of a backup to -"restoring" if the backup status is "available" and the volume where we are -going to restore the backup is also in "available" state. - -Joining of tables is implicit when using a model different from the one used -for the Versioned Object instance. - -- **As expected_values** - - Since this is a matching case we can use ``expected_values`` argument to make - the condition: - - .. code:: python - - values = {'status': 'restoring'} - expected_values={'status': 'available', - objects.Volume.model.id: volume.id, - objects.Volume.model.status: 'available'} - -- **As filters** - - We can also use the ``filters`` argument to achieve the same results: - - .. code:: python - - filters = [objects.Volume.model.id == volume.id, - objects.Volume.model.status == 'available'] - -- **Other filters** - - If we are not doing a match for the condition the only available option will - be to use ``filters`` argument. For example if we want to do a check on the - volume size against the backup size: - - .. code:: python - - filters = [objects.Volume.model.id == volume.id, - objects.Volume.model.size >= backup.model.size] - -Using DB fields for assignment ------------------------------- - -- **Using non modified fields** - - Similar to the way we use the fields to specify conditions, we can also use - them to set values in the DB. - - For example when we disable a service we want to keep existing ``updated_at`` - field value: - - .. code:: python - - values = {'disabled': True, - 'updated_at': service.model.updated_at} - -- **Using modified field** - - In some cases we may need to use a DB field that we are also updating, for - example when we are updating the ``status`` but we also want to keep the old - value in the ``previous_status`` field. - - .. code:: python - - values = {'status': 'retyping', - 'previous_status': volume.model.status} - - Conditional update mechanism takes into account that MySQL does not follow - SQL language specs and adjusts the query creation accordingly. - -- **Together with filters** - - Using DB fields for assignment together with using them for values can give - us advanced functionality like for example increasing a quota value based on - current value and making sure we don't exceed our quota limits. - - .. code:: python - - values = {'in_use': quota.model.in_use + volume.size} - filters = [quota.model.in_use <= max_usage - volume.size] - -Conditional value setting -------------------------- - -Under certain circumstances you may not know what value should be set in the DB -because it depends on another field or on another condition. For those cases -we can use the ``Case`` class present in our persistent Versioned Objects which -implements the SQL CASE clause. - -The idea is simple, using ``Case`` class we can say which values to set in a -field based on conditions and also set a default value if none of the -conditions are True. - -Conditions must be SQLAlchemy conditions, so we'll need to use fields from the - ``model`` attribute. - -For example setting the status to "maintenance" during migration if current -status is "available" and leaving it as it was if it's not can be done using -the following: - -.. code:: python - - values = { - 'status': volume.Case( - [ - (volume.model.status == 'available', 'maintenance') - ], - else_=volume.model.status) - } - -reflect_changes considerations ------------------------------- - -As we've already mentioned ``conditional_update`` method will update Versioned -Object instance with provided values if the row in the DB has been updated, and -in most cases this is OK since we can set the values directly because we are -using simple values, but there are cases where we don't know what value we -should set in the instance, and is in those cases where the default -``reflect_changes`` value of True has performance implications. - -There are 2 cases where Versioned Object ``conditional_update`` method doesn't -know the value it has to set on the Versioned Object instance, and they are -when we use a field for assignment and when we are using the ``Case`` class, -since in both cases the DB is the one deciding the value that will be set. - -In those cases ``conditional_update`` will have to retrieve the value from the -DB using ``get_by_id`` method, and this has a performance impact and therefore -should be avoided when possible. - -So the recommendation is to set ``reflect_changes`` to False when using -``Case`` class or using fields in the ``values`` argument if we don't care -about the stored value. - -Limitations ------------ - -We can only use functionality that works on **all** supported DBs, and that's -why we don't allow multi table updates and will raise ProgrammingError -exception even when the code is running against a DB engine that supports this -functionality. - -This way we make sure that we don't inadvertently add a multi table update that -works on MySQL but will surely fail on PostgreSQL. - -MySQL DB engine also has some limitations that we should be aware of when -creating our filters. - -One that is very common is when we are trying to check if there is a row that -matches a specific criteria in the same table that we are updating. For -example, when deleting a Consistency Group we want to check that it is not -being used as the source for a Consistency Group that is in the process of -being created. - -The straightforward way of doing this is using the core exists expression and -use an alias to differentiate general query fields and the exists subquery. -Code would look like this: - -.. code:: python - - def cg_creating_from_src(cg_id): - model = aliased(models.ConsistencyGroup) - return sql.exists().where(and_( - ~model.deleted, - model.status == 'creating', - conditions.append(model.source_cgid == cg_id))) - -While this will work in SQLite and PostgreSQL, it will not work on MySQL and an -error will be raised when the query is executed: "You can't specify target -table 'consistencygroups' for update in FROM clause". - -To solve this we have 2 options: - -- Create a specific query for MySQL engines using an update with a left self - join, which is a feature only available in MySQL. -- Use a trick -using a select subquery- that will work on all DBs. - -Considering that it's always better to have only 1 way of doing things and that -SQLAlchemy doesn't support MySQL's non standard behavior we should generate -these filters using the select subquery method like this: - -.. code:: python - - def cg_creating_from_src(cg_id): - subq = sql.select([models.ConsistencyGroup]).where(and_( - ~model.deleted, - model.status == 'creating')).alias('cg2') - - return sql.exists([subq]).where(subq.c.source_cgid == cgid) - - -Considerations for new ORM & Versioned Objects ----------------------------------------------- - -Conditional update mechanism works using generic methods for getting an object -from the DB as well as determining the model for a specific Versioned Object -instance for field binding. - -These generic methods rely on some naming rules for Versioned Object classes, -ORM classes, and get methods, so when we are creating a new ORM class and -adding the matching Versioned Object and access methods we must be careful to -follow these rules or at least specify exceptions if we have a good reason not -to follow these conventions. - -Rules: - -- Versioned Object class name must be the same as the ORM class -- Get method name must be ORM class converted to snake format with postfix - "_get". For example, for ``Volume`` ORM class expected method is - ``volume_get``, and for an imaginary ``MyORMClass`` it would be - ``my_orm_class_get``. -- Get method must receive the ``context`` as the first argument and the ``id`` - as the second one, although it may accept more optional arguments. - -We should avoid diverging from these rules whenever is possible, but there are -cases where this is not possible, for example ``BackupImport`` Versioned Object -that really uses ``Backup`` ORM class. For cases such as this we have a way to -set exceptions both for the generic get method and the model for a Versioned -Object. - -To add exceptions for the get method we have to add a new entry to -``GET_EXCEPTIONS`` dictionary mapping in -``cinder.db.sqlalchemy.api._get_get_method``. - -And for determining the model for the Versioned Object we have to add a new -entry to ``VO_TO_MODEL_EXCEPTIONS`` dictionary mapping in -``cinder.db.sqlalchemy.api.get_model_for_versioned_object``. diff --git a/doc/source/contributor/api_microversion_dev.rst b/doc/source/contributor/api_microversion_dev.rst deleted file mode 100644 index e8970559c..000000000 --- a/doc/source/contributor/api_microversion_dev.rst +++ /dev/null @@ -1,320 +0,0 @@ -API Microversions -================= - -Background ----------- - -Cinder uses a framework we called 'API Microversions' for allowing changes -to the API while preserving backward compatibility. The basic idea is -that a user has to explicitly ask for their request to be treated with -a particular version of the API. So breaking changes can be added to -the API without breaking users who don't specifically ask for it. This -is done with an HTTP header ``OpenStack-API-Version`` which -is a monotonically increasing semantic version number starting from -``3.0``. - -Each OpenStack service that uses microversions will share this header, so -the Volume service will need to prefix the semantic version number with the -word ``volume``:: - - OpenStack-API-Version: volume 3.0 - -If a user makes a request without specifying a version, they will get -the ``DEFAULT_API_VERSION`` as defined in -``cinder/api/openstack/api_version_request.py``. This value is currently ``3.0`` -and is expected to remain so for quite a long time. - -The Nova project was the first to implement microversions. For full -details please read Nova's `Kilo spec for microversions -`_ - -When do I need a new Microversion? ----------------------------------- - -A microversion is needed when the contract to the user is -changed. The user contract covers many kinds of information such as: - -- the Request - - - the list of resource URLs which exist on the server - - Example: adding a new shares/{ID}/foo which didn't exist in a - previous version of the code - - - the list of query parameters that are valid on URLs - - Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - - - the list of query parameter values for non free form fields - - Example: parameter filter_by takes a small set of constants/enums "A", - "B", "C". Adding support for new enum "D". - - - new headers accepted on a request - -- the Response - - - the list of attributes and data structures returned - - Example: adding a new attribute 'locked': True/False to the output - of shares/{ID} - - - the allowed values of non free form fields - - Example: adding a new allowed ``status`` to shares/{ID} - - - the list of status codes allowed for a particular request - - Example: an API previously could return 200, 400, 403, 404 and the - change would make the API now also be allowed to return 409. - - - changing a status code on a particular response - - Example: changing the return code of an API from 501 to 400. - - - new headers returned on a response - -The following flow chart attempts to walk through the process of "do -we need a microversion". - - -.. graphviz:: - - digraph states { - - label="Do I need a microversion?" - - silent_fail[shape="diamond", style="", label="Did we silently - fail to do what is asked?"]; - ret_500[shape="diamond", style="", label="Did we return a 500 - before?"]; - new_error[shape="diamond", style="", label="Are we changing what - status code is returned?"]; - new_attr[shape="diamond", style="", label="Did we add or remove an - attribute to a payload?"]; - new_param[shape="diamond", style="", label="Did we add or remove - an accepted query string parameter or value?"]; - new_resource[shape="diamond", style="", label="Did we add or remove a - resource URL?"]; - - - no[shape="box", style=rounded, label="No microversion needed"]; - yes[shape="box", style=rounded, label="Yes, you need a microversion"]; - no2[shape="box", style=rounded, label="No microversion needed, it's - a bug"]; - - silent_fail -> ret_500[label="no"]; - silent_fail -> no2[label="yes"]; - - ret_500 -> no2[label="yes [1]"]; - ret_500 -> new_error[label="no"]; - - new_error -> new_attr[label="no"]; - new_error -> yes[label="yes"]; - - new_attr -> new_param[label="no"]; - new_attr -> yes[label="yes"]; - - new_param -> new_resource[label="no"]; - new_param -> yes[label="yes"]; - - new_resource -> no[label="no"]; - new_resource -> yes[label="yes"]; - - {rank=same; yes new_attr} - {rank=same; no2 ret_500} - {rank=min; silent_fail} - } - - -**Footnotes** - -[1] - When fixing 500 errors that previously caused stack traces, try -to map the new error into the existing set of errors that API call -could previously return (400 if nothing else is appropriate). Changing -the set of allowed status codes from a request is changing the -contract, and should be part of a microversion. - -The reason why we are so strict on contract is that we'd like -application writers to be able to know, for sure, what the contract is -at every microversion in Cinder. If they do not, they will need to write -conditional code in their application to handle ambiguities. - -When in doubt, consider application authors. If it would work with no -client side changes on both Cinder versions, you probably don't need a -microversion. If, on the other hand, there is any ambiguity, a -microversion is probably needed. - - -In Code -------- - -In ``cinder/api/openstack/wsgi.py`` we define an ``@api_version`` decorator -which is intended to be used on top-level Controller methods. It is -not appropriate for lower-level methods. Some examples: - -Adding a new API method -~~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @wsgi.Controller.api_version("3.4") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` of >= ``3.4``. If they had specified a -lower version (or not specified it and received the default of ``3.1``) -the server would respond with ``HTTP/404``. - -Removing an API method -~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @wsgi.Controller.api_version("3.1", "3.4") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` of <= ``3.4``, and >= ``3.1``. If ``3.5`` or later -is specified or if ``3.0`` or earlier (/v2 or /v1 endpoint), the server will -respond with ``HTTP/404`` - -Changing a method's behaviour -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the controller class:: - - @wsgi.Controller.api_version("3.1", "3.3") - def my_api_method(self, req, id): - .... method_1 ... - - @my_api_method.api_version("3.4") - def my_api_method(self, req, id): - .... method_2 ... - -If a caller specified ``3.1``, ``3.2`` or ``3.3`` (or received the -default of ``3.1``) they would see the result from ``method_1``, -``3.4`` or later ``method_2``. - -We could use ``wsgi.Controller.api_version`` decorator on the second -``my_api_method`` as well, but then we would have to add ``# noqa`` to that -line to avoid failing flake8's ``F811`` rule. So the recommended approach is -to use the ``api_version`` decorator from the first method that is defined, as -illustrated by the example above, and then use ``my_api_method`` decorator for -subsequent api versions of the same method. - -The two methods may be different in any kind of semantics (schema validation, -return values, response codes, etc.). - -A method with only small changes between versions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A method may have only small changes between microversions, in which -case you can decorate a private method:: - - @wsgi.Controller.api_version("3.1", "3.4") - def _version_specific_func(self, req, arg1): - pass - - @_version_specific_func.api_version(min_ver="3.5") - def _version_specific_func(self, req, arg1): - pass - - def show(self, req, id): - .... common stuff .... - self._version_specific_func(req, "foo") - .... common stuff .... - -When not using decorators -~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you don't want to use the ``@api_version`` decorator on a method -or you want to change behaviour within a method (say it leads to -simpler or simply a lot less code) you can directly test for the -requested version with a method as long as you have access to the api -request object (commonly called ``req``). Every API method has an -api_version_request object attached to the req object and that can be -used to modify behaviour based on its value:: - - def index(self, req): - - - req_version = req.api_version_request - if req_version.matches("3.1", "3.5"): - ....stuff.... - elif req_version.matches("3.6", "3.10"): - ....other stuff.... - elif req_version > api_version_request.APIVersionRequest("3.10"): - ....more stuff..... - - - -The first argument to the matches method is the minimum acceptable version -and the second is maximum acceptable version. A specified version can be null:: - - null_version = APIVersionRequest() - -If the minimum version specified is null then there is no restriction on -the minimum version, and likewise if the maximum version is null there -is no restriction the maximum version. Alternatively a one sided comparison -can be used as in the example above. - -Other necessary changes ------------------------ - -If you are adding a patch which adds a new microversion, it is -necessary to add changes to other places which describe your change: - -* Update ``REST_API_VERSION_HISTORY`` in - ``cinder/api/openstack/api_version_request.py`` - -* Update ``_MAX_API_VERSION`` in - ``cinder/api/openstack/api_version_request.py`` - -* Add a verbose description to - ``cinder/api/openstack/rest_api_version_history.rst``. There should - be enough information that it could be used by the docs team for - release notes. - -* Update the expected versions in affected tests. - -Allocating a microversion -------------------------- - -If you are adding a patch which adds a new microversion, it is -necessary to allocate the next microversion number. Except under -extremely unusual circumstances and this would have been mentioned in -the blueprint for the change, the minor number of ``_MAX_API_VERSION`` -will be incremented. This will also be the new microversion number for -the API change. - -It is possible that multiple microversion patches would be proposed in -parallel and the microversions would conflict between patches. This -will cause a merge conflict. We don't reserve a microversion for each -patch in advance as we don't know the final merge order. Developers -may need over time to rebase their patch calculating a new version -number as above based on the updated value of ``_MAX_API_VERSION``. - -Testing Microversioned API Methods ----------------------------------- - -Unit tests for microversions should be put in cinder/tests/unit/api/v3/ . -Since all existing functionality is tested in cinder/tests/unit/api/v2, -these unit tests are not replicated in .../v3, and only new functionality -needs to be place in the .../v3/directory. - -Testing a microversioned API method is very similar to a normal controller -method test, you just need to add the ``OpenStack-API-Version`` -header, for example:: - - req = fakes.HTTPRequest.blank('/testable/url/endpoint') - req.headers['OpenStack-API-Version'] = 'volume 3.6' - req.api_version_request = api_version.APIVersionRequest('3.6') - - controller = controller.TestableController() - - res = controller.index(req) - ... assertions about the response ... - diff --git a/doc/source/contributor/api_microversion_history.rst b/doc/source/contributor/api_microversion_history.rst deleted file mode 100644 index 12e4d8876..000000000 --- a/doc/source/contributor/api_microversion_history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../cinder/api/openstack/rest_api_version_history.rst diff --git a/doc/source/contributor/architecture.rst b/doc/source/contributor/architecture.rst deleted file mode 100644 index 3b86535c7..000000000 --- a/doc/source/contributor/architecture.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Cinder System Architecture -========================== - -The Cinder Block Storage Service is intended to be ran on one or more nodes. - -Cinder uses a sql-based central database that is shared by all Cinder services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, cinder will be moving towards multiple data stores with some kind of aggregation system. - -Components ----------- - -Below you will find a brief explanation of the different components. - -:: - - /- ( LDAP ) - [ Auth Manager ] --- - | \- ( DB ) - | - | - cinderclient | - / \ | /- [ scheduler ] -- [ volume ] -- ( iSCSI ) - [ Web Dashboard ]- -[ api ] -- < AMQP > -- - \ / | \- [ backup ] - novaclient | - | - | - | - < REST > - - -* DB: sql database for data storage. Used by all components (LINKS NOT SHOWN). -* Web Dashboard: potential external component that talks to the api. -* api: component that receives http requests, converts commands and communicates with other components via the queue or http. -* Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. -* scheduler: decides which host gets each volume. -* volume: manages dynamically attachable block devices. -* backup: manages backups of block storage devices. diff --git a/doc/source/contributor/attach_detach_conventions.rst b/doc/source/contributor/attach_detach_conventions.rst deleted file mode 100644 index df6076dde..000000000 --- a/doc/source/contributor/attach_detach_conventions.rst +++ /dev/null @@ -1,181 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================= -Volume Attach/Detach workflow -============================= - -There are six API calls associated with attach/detach of volumes in Cinder -(3 calls for each operation). This can lead to some confusion for developers -trying to work on Cinder. The convention is actually quite simple, although -it may be difficult to decipher from the code. - - -Attach/Detach Operations are multi-part commands -================================================ - -There are three things that happen in the workflow for an attach or detach call. - -1. Update the status of the volume in the DB (ie attaching/detaching) - -- For Attach, this is the cinder.volume.api.reserve method -- For Detach, the analogous call is cinder.volume.api.begin_detaching - -2. Handle the connection operations that need to be done on the Volume - -- For Attach, this is the cinder.volume.api.initialize_connection method -- For Detach, the analogous calls is cinder.volume.api.terminate_connection - -3. Finalize the status of the volume and release the resource - -- For attach, this is the cinder.volume.api.attach method -- For detach, the analogous call is cinder.volume.api.detach - -Attach workflow -=============== - -reserve_volume(self, context, volume) -------------------------------------- - -Probably the most simple call in to Cinder. This method simply checks that -the specified volume is in an “available” state and can be attached. -Any other state results in an Error response notifying Nova that the volume -is NOT available. The only valid state for this call to succeed is “available”. - -NOTE: multi-attach will add "in-use" to the above acceptable states. - -If the volume is in fact available, we immediately issue an update to the Cinder -database and mark the status of the volume to “attaching” thereby reserving the -volume so that it won’t be used by another API call anywhere else. - -initialize_connection(self, context, volume, connector) -------------------------------------------------------- - -This is the only attach related API call that should be doing any significant -work. This method is responsible for building and returning all of the info -needed by the caller (Nova) to actually attach the specified volume to the -remote node. This method returns vital information to the caller that includes -things like CHAP credential, iqn and lun information. An example response is -shown here: - -:: - - { - 'driver_volume_type': 'iscsi', - 'data': { - 'auth_password': 'YZ2Hceyh7VySh5HY', - 'target_discovered': False, - 'encrypted': False, - 'qos_specs': None, - 'target_iqn': 'iqn.2010-10.org.openstack:volume-8b1ec3fe-8c57-45ca-a1cf-a481bfc8fce2', - 'target_portal': '11.0.0.8:3260', - 'volume_id': '8b1ec3fe-8c57-45ca-a1cf-a481bfc8fce2', - 'target_lun': 1, - 'access_mode': 'rw', - 'auth_username': 'nE9PY8juynmmZ95F7Xb7', - 'auth_method': 'CHAP' - } - } - -In the process of building this data structure, the Cinder Volume Manager makes a number of -calls to the backend driver, and builds a volume_attachment entry in the database to store -the connection information passed in via the connector object. - -driver.validate_connector -************************* - -Simply verifies that the initiator data is included in the passed in -connector (there are some drivers that utilize pieces of this connector -data, but in the case of the reference, it just verifies it's there). - -driver.create_export -******************** - -This is the target specific, persistent data associated with a volume. -This method is responsible for building an actual iSCSI target, and -providing the "location" and "auth" information which will be used to -form the response data in the parent request. -We call this infor the model_update and it's used to update vital target -information associated with the volume in the Cinder database. - -driver.initialize_connection -**************************** - -Now that we've actually built a target and persisted the important -bits of information associated with it, we're ready to actually assign -the target to a volume and form the needed info to pass back out -to our caller. This is where we finally put everything together and -form the example data structure response shown earlier. - -This method is sort of deceptive, it does a whole lot of formatting -of the data we've put together in the create_export call, but it doesn't -really offer any new info. It's completely dependent on the information -that was gathered in the create_export call and put into the database. At -this point, all we're doing is taking all the various entries from the database -and putting it together into the desired format/structure. - -The key method call for updating and obtaining all of this info was -done by the create_export call. This formatted data is then passed -back up to the API and returned as the response back out to Nova. - -At this point, we return attach info to the caller that provides everything -needed to make the remote iSCSI connection. - -attach(self, context, volume, instance_uuid, host_name, mount_point, mode) --------------------------------------------------------------------------- - -This is the last call that *should* be pretty simple. The intent is that this -is simply used to finalize the attach process. In other words, we simply -update the status on the Volume in the database, and provide a mechanism to -notify the driver that the attachment has completed successfully. - -There's some additional information that has been added to this finalize call -over time like instance_uuid, host_name etc. Some of these are only provided -during the actual attach call and may be desired for some drivers for one -reason or another. - - -Detach workflow -=============== - -begin_detaching(self, context, volume) --------------------------------------- - -Analogous to the Attach workflows ``reserve_volume`` method. -Performs a simple conditional update of Volume status to ``detaching``. - - -terminate_connection(self, context, volume, connector, force=False) -------------------------------------------------------------------- -Analogous to the Attach workflows ``initialize_connection`` method. - -Used to send calls down to drivers/target-drivers to do any sort of cleanup -they might require. - -For most this is a noop, as connections and **iscsi session management is the -responsibility of the initiator**. HOWEVER, there are a number of special -cases here, particularly for target-drivers like LIO that use -access-groups, in those cases they remove the initiator from the access -list during this call which effectively closes sessions from the target -side. - - -detach(self, context, volume, attachment_id) -------------------------------------------------------------------- -The final update to the DB and yet another opportunity to pass something -down to the volume-driver. Initially a simple call-back that now has quite -a bit of cruft built up in the volume-manager. - -For drivers like LVM this again is a noop and just updates the db entry to -mark things as complete and set the volume to available again. - diff --git a/doc/source/contributor/attach_detach_conventions_v2.rst b/doc/source/contributor/attach_detach_conventions_v2.rst deleted file mode 100644 index e5539f12f..000000000 --- a/doc/source/contributor/attach_detach_conventions_v2.rst +++ /dev/null @@ -1,152 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================== -Volume Attach/Detach workflow - V2 -================================== - -Previously there were six API calls associated with attach/detach of volumes in -Cinder (3 calls for each operation). As the projects grew and the -functionality of *simple* things like attach/detach evolved things have become -a bit vague and we have a number of race issues during the calls that -continually cause us some problems. - -Additionally, the existing code path makes things like multi-attach extremely -difficult to implement due to no real good tracking mechanism of attachment -info. - -To try and improve this we've proposed a new Attachments Object and API. Now -we keep an Attachment record for each attachment that we want to perform as -opposed to trying to infer the information from the Volume Object. - -Attachment Object -================= - -We actually already had a VolumeAttachment Table in the db, however we -weren't really using it, or at least using it efficiently. For V2 of attach -implementation (V3 API) flow we'll use the Attachment Table (object) as -the primary handle for managing attachment(s) for a volume. - -In addition, we also introduce the AttachmentSpecs Table which will store the -connector information for an Attachment so we no longer have the problem of -lost connector info, or trying to reassemble it. - -New API and Flow -================ - -attachment-create ------------------ - -``` -cinder --os-volume-api-version 3.27 attachment-create -``` - -The attachment_create call simply creates an empty Attachment record for the -specified Volume with an Instance UUID field set. This is particularly -useful for cases like Nova Boot from Volume where Nova hasn't sent -the job to the actual Compute host yet, but needs to make initial preparations -to reserve the volume for use, so here we can reserve the volume and indicate -that we will be attaching it to in the future. - -Alternatively, the caller may provide a connector in which case the Cinder API -will create the attachment and perform the update on the attachment to set the -connector info and return the connection data needed to make a connection. - -The attachment_create call can be used in one of two ways: - -1. Create an empty Attachment object (reserve). In this case the - attachment_create call requires an instance_uuid and a volume_uuid, - and just creates an empty Attachment object and returns the UUID of - Attachment to the caller. - -2. Create and complete the Attachment process in one call. The reserve process - is only needed in certain cases, in many cases Nova actually has enough - information to do everything in a single call. Also, non-nova consumers - typically don't require the granularity of a separate reserve at all. - - To perform the complete operation, include the connector data in the - attachment_create call and the Cinder API will perform the reserve and - initialize the connection in the single request. - -This full usage of attachment-create would be:: - - usage: cinder --os-volume-api-version 3.27 attachment-create - ... - - Positional arguments: - Name or ID of volume or volumes to attach. - ID of instance attaching to. - - Optional arguments: - --connect Make an active connection using provided connector info (True or False). - --initiator iqn of the initiator attaching to. Default=None. - --ip ip of the system attaching to. Default=None. - --host Name of the host attaching to. Default=None. - --platform Platform type. Default=x86_64. - --ostype OS type. Default=linux2. - --multipath Use multipath. Default=False. - --mountpoint Mountpoint volume will be attached at. Default=None. - -Returns the connection information for the attachment:: - - +-------------------+-----------------------------------------------------------------------+ - | Property | Value | - +-------------------+-----------------------------------------------------------------------+ - | access_mode | rw | - | attachment_id | 6ab061ad-5c45-48f3-ad9c-bbd3b6275bf2 | - | auth_method | CHAP | - | auth_password | kystSioDKHSV2j9y | - | auth_username | hxGUgiWvsS4GqAQcfA78 | - | encrypted | False | - | qos_specs | None | - | target_discovered | False | - | target_iqn | iqn.2010-10.org.openstack:volume-23212c97-5ed7-42d7-b433-dbf8fc38ec35 | - | target_lun | 0 | - | target_portal | 192.168.0.9:3260 | - | volume_id | 23212c97-5ed7-42d7-b433-dbf8fc38ec35 | - +-------------------+-----------------------------------------------------------------------+ - -attachment-update ------------------ - -``` -cinder --os-volume-api-version 3.27 attachment-update -``` - -Once we have a reserved volume, this CLI can be used to update an attachment for a cinder volume. -This call is designed to be more of an attachment completion than anything else. -It expects the value of a connector object to notify the driver that the volume is going to be -connected and where it's being connected to. The usage is the following:: - - usage: cinder --os-volume-api-version 3.27 attachment-update - ... - - Positional arguments: - ID of attachment. - - Optional arguments: - --initiator iqn of the initiator attaching to. Default=None. - --ip ip of the system attaching to. Default=None. - --host Name of the host attaching to. Default=None. - --platform Platform type. Default=x86_64. - --ostype OS type. Default=linux2. - --multipath Use multipath. Default=False. - --mountpoint Mountpoint volume will be attached at. Default=None. - -attachment-delete ------------------ - -``` -cinder --os-volume-api-version 3.27 attachment-delete -``` - diff --git a/doc/source/contributor/auth.rst b/doc/source/contributor/auth.rst deleted file mode 100644 index 166a8bfd0..000000000 --- a/doc/source/contributor/auth.rst +++ /dev/null @@ -1,244 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _auth: - -Authentication and Authorization -================================ - -The :mod:`cinder.api.middleware.auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.api.middleware.auth - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.quota` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.quota - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - - -The :mod:`middleware.test_auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.api.middleware.test_auth - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`test_quota` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.test_quota - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`test_quota_utils` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.test_quota_utils - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Legacy Docs ------------ - -Cinder provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: - -Roles-Based Access Control of AWS-style APIs using SAML Assertions -“Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications” - - -Introduction ------------- - -We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. -Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. - - -Relationship of US eAuth to RBAC --------------------------------- - -Typical implementations of US eAuth authentication systems are structured as follows:: - - [ MS Active Directory or other federated LDAP user store ] - --> backends to… - [ SUN Identity Manager or other SAML Policy Controller ] - --> maps URLs to groups… - [ Apache Policy Agent in front of eAuth-secured Web Application ] - -In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. - -.. _auth_roles: - - -Roles ------ - -AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: - -* Base User -* System Administrator/Developer (currently have the same permissions) -* Network Administrator -* Project Manager -* Cloud Administrator/IT-Security (currently have the same permissions) - -There is an additional, conceptual end-user that may or may not have API access: - -* (EXTERNAL) End-user / Third-party User - -Basic operations are available to any : - -* Describe Instances -* Describe Images -* Describe Volumes -* Describe Keypairs -* Create Keypair -* Delete Keypair -* Create, Upload, Delete: Buckets and Keys (Object Store) - -System Administrators/Developers/Project Manager: - -* Create, Attach, Delete Volume (Block Store) -* Launch, Reboot, Terminate Instance -* Register/Unregister Machine Image (project-wide) -* Request / Review CloudAudit Scans - -Project Manager: - -* Add and remove other users (currently no api) -* Set roles (currently no api) - -Network Administrator: - -* Change Machine Image properties (public / private) -* Change Firewall Rules, define Security Groups -* Allocate, Associate, Deassociate Public IP addresses - -Cloud Administrator/IT-Security: - -* All permissions - - -Enhancements ------------- - -* SAML Token passing -* REST interfaces -* SOAP interfaces - -Wrapping the SAML token into the API calls. -Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. - - -CloudAudit APIs ---------------- - -* Request formats -* Response formats -* Stateless asynchronous queries - -CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. -RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. - - -Type declarations ------------------ -* Data declarations – Volumes and Objects -* System declarations – Instances - -Existing API calls to launch instances specific a single, combined “type” flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability” classifications of FIPS 199. An example API call would look like:: - - RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low - -These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) - - -Request Brokering ------------------ - -* Cloud Interop -* IMF Registration / PubSub -* Digital C&A - -Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. - -See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. - - -Dirty Cloud - Hybrid Data Centers ---------------------------------- - -* CloudAudit bridge interfaces -* Anything in the ARP table - -A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. - -This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. - -Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. - - -The Details ------------ - -* Preliminary Roles Definitions -* Categorization of available API calls -* SAML assertion vocabulary - - -System limits -------------- - -The following limits need to be defined and enforced: - -* Total number of instances allowed (user / project) -* Total number of instances, per instance type (user / project) -* Total number of volumes (user / project) -* Maximum size of volume -* Cumulative size of all volumes -* Total use of object storage (GB) -* Total number of Public IPs - - -Further Challenges ------------------- - -* Prioritization of users / jobs in shared computing environments -* Incident response planning -* Limit launch of instances to specific security groups based on AMI -* Store AMIs in LDAP for added property control diff --git a/doc/source/contributor/cinder.rst b/doc/source/contributor/cinder.rst deleted file mode 100644 index 02d383eea..000000000 --- a/doc/source/contributor/cinder.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Common and Misc Libraries -========================= - -Libraries common throughout Cinder or just ones that haven't been categorized -very well yet. - - -The :mod:`cinder.context` Module --------------------------------- - -.. automodule:: cinder.context - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.exception` Module ----------------------------------- - -.. automodule:: cinder.exception - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.common.config` Module --------------------------------------- - -.. automodule:: cinder.common.config - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.rpc` Module ----------------------------- - -.. automodule:: cinder.rpc - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.test` Module ------------------------------ - -.. automodule:: cinder.test - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.utils` Module ------------------------------- - -.. automodule:: cinder.utils - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.wsgi` Module ------------------------------ - -.. automodule:: cinder.wsgi - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - - -The :mod:`conf_fixture` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.conf_fixture - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`test_rpc` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.test_rpc - :noindex: - :members: - :undoc-members: - :show-inheritance: - - diff --git a/doc/source/contributor/database.rst b/doc/source/contributor/database.rst deleted file mode 100644 index 6c3c8ad95..000000000 --- a/doc/source/contributor/database.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -The Database Layer -================== - -The :mod:`cinder.db.api` Module -------------------------------- - -.. automodule:: cinder.db.api - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The Sqlalchemy Driver ---------------------- - -The :mod:`cinder.db.sqlalchemy.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.db.sqlalchemy.api - :noindex: - -The :mod:`cinder.db.sqlalchemy.models` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.db.sqlalchemy.models - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -Tests are lacking for the db api layer and for the sqlalchemy driver. -Failures in the drivers would be detected in other test cases, though. diff --git a/doc/source/contributor/development.environment.rst b/doc/source/contributor/development.environment.rst deleted file mode 100644 index c0338dc74..000000000 --- a/doc/source/contributor/development.environment.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Setting Up a Development Environment -==================================== - -This page describes how to setup a working Python development -environment that can be used in developing cinder on Ubuntu, Fedora or -Mac OS X. These instructions assume you're already familiar with -git. Refer to GettingTheCode_ for additional information. - -.. _GettingTheCode: http://wiki.openstack.org/GettingTheCode - -Following these instructions will allow you to run the cinder unit tests. -Running cinder is currently only supported on Linux, although you can run the -unit tests on Mac OS X. - -Virtual environments --------------------- - -Cinder development uses `virtualenv `__ to track and manage Python -dependencies while in development and testing. This allows you to -install all of the Python package dependencies in a virtual -environment or "virtualenv" (a special subdirectory of your cinder -directory), instead of installing the packages at the system level. - -.. note:: - - Virtualenv is useful for running the unit tests, but is not - typically used for full integration testing or production usage. - -Linux Systems -------------- - -.. note:: - - Feel free to add notes and change according to your experiences or operating system. - -Install the prerequisite packages. - -On Ubuntu (tested on 12.04-64 and 14.04-64):: - - sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libpq-dev libffi-dev libxslt-dev - -On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 6.5):: - - sudo yum install python-virtualenv openssl-devel python-pip git gcc libffi-devel libxslt-devel mysql-devel postgresql-devel - -On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: - - sudo zypper install gcc git libmysqlclient-devel libopenssl-devel postgresql-devel python-devel python-pip - - -Mac OS X Systems ----------------- - -Install virtualenv:: - - sudo easy_install virtualenv - -Check the version of OpenSSL you have installed:: - - openssl version - -If you have installed OpenSSL 1.0.0a, which can happen when installing a -MacPorts package for OpenSSL, you will see an error when running -``cinder.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. - -The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) -or Mac OS X 10.7 (OpenSSL 0.9.8r) works fine with cinder. - - -Getting the code ----------------- -Grab the code:: - - git clone https://github.com/openstack/cinder.git - cd cinder - - -Running unit tests ------------------- -The preferred way to run the unit tests is using ``tox``. It executes tests in -isolated environment, by creating separate virtualenv and installing -dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, -so the only package you install is ``tox`` itself:: - - sudo pip install tox - -Run the unit tests by doing:: - - tox -e py35 - tox -e py27 - -See :doc:`testing` for more details. - -.. _virtualenv: - -Manually installing and using the virtualenv --------------------------------------------- - -You can also manually install the virtual environment:: - - tox -e py27 --notest - -or:: - - tox -e py35 --notest - -This will install all of the Python packages listed in the -``requirements.txt`` file into your virtualenv. - -To activate the Cinder virtualenv you can run:: - - $ source .tox/py27/bin/activate - -or:: - - $ source .tox/py35/bin/activate - -To exit your virtualenv, just type:: - - $ deactivate - -Or, if you prefer, you can run commands in the virtualenv on a case by case -basis by running:: - - $ tox -e venv -- - -Contributing Your Work ----------------------- - -Once your work is complete you may wish to contribute it to the project. -Cinder uses the Gerrit code review system. For information on how to submit -your branch to Gerrit, see GerritWorkflow_. - -.. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow diff --git a/doc/source/contributor/drivers.rst b/doc/source/contributor/drivers.rst deleted file mode 100644 index 6cd2953b0..000000000 --- a/doc/source/contributor/drivers.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. - Copyright (c) 2013 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Drivers -======= - -Cinder exposes an API to users to interact with different storage backend -solutions. The following are standards across all drivers for Cinder services -to properly interact with a driver. - -Basic attributes ----------------- - -There are some basic attributes that all drivers classes should have: - -* VERSION: Driver version in string format. No naming convention is imposed, - although semantic versioning is recommended. -* CI_WIKI_NAME: Must be the exact name of the `ThirdPartySystems wiki page - `_. This is used by our - tooling system to associate jobs to drivers and track their CI reporting - status correctly. - -The tooling system will also use the name and docstring of the driver class. - -Minimum Features ----------------- - -Minimum features are enforced to avoid having a grid of what features are -supported by which drivers and which releases. Cinder Core requires that all -drivers implement the following minimum features. - -Core Functionality ------------------- - -* Volume Create/Delete -* Volume Attach/Detach -* Snapshot Create/Delete -* Create Volume from Snapshot -* Get Volume Stats -* Copy Image to Volume -* Copy Volume to Image -* Clone Volume -* Extend Volume - -Volume Stats ------------- - -Volume stats are used by the different schedulers for the drivers to provide -a report on their current state of the backend. The following should be -provided by a driver. - -* driver_version -* free_capacity_gb -* storage_protocol -* total_capacity_gb -* vendor_name -* volume_backend_name - -**NOTE:** If the driver is unable to provide a value for free_capacity_gb or -total_capacity_gb, keywords can be provided instead. Please use 'unknown' if -the backend cannot report the value or 'infinite' if the backend has no upper -limit. But, it is recommended to report real values as the Cinder scheduler -assigns lowest weight to any storage backend reporting 'unknown' or 'infinite'. - -Feature Enforcement -------------------- - -All concrete driver implementations should use the -``cinder.interface.volumedriver`` decorator on the driver class:: - - @interface.volumedriver - class LVMVolumeDriver(driver.VolumeDriver): - -This will register the driver and allow automated compliance tests to run -against and verify the compliance of the driver against the required interface -to support the `Core Functionality`_ listed above. - -Running ``tox -e compliance`` will verify all registered drivers comply to -this interface. This can be used during development to perform self checks -along the way. Any missing method calls will be identified by the compliance -tests. - -The details for the required volume driver interfaces can be found in the -``cinder/interface/volume_*_driver.py`` source. - -Driver Development Documentations ---------------------------------- - -The LVM driver is our reference for all new driver implementations. The -information below can provide additional documentation for the methods that -volume drivers need to implement. - -Base Driver Interface -````````````````````` -The methods documented below are the minimum required interface for a volume -driver to support. All methods from this interface must be implemented -in order to be an official Cinder volume driver. - -.. automodule:: cinder.interface.volume_driver - :members: - - -Snapshot Interface -`````````````````` -Another required interface for a volume driver to be fully compatible is the -ability to create and manage snapshots. Due to legacy constraints, this -interface is not included in the base driver interface above. - -Work is being done to address those legacy issues. Once that is complete, this -interface will be merged with the base driver interface. - -.. automodule:: cinder.interface.volume_snapshot_driver - :members: - - -Manage/Unmanage Support -``````````````````````` -An optional feature a volume backend can support is the ability to manage -existing volumes or unmanage volumes - keep the volume on the storage backend -but no longer manage it through Cinder. - -To support this functionality, volume drivers must implement these methods: - -.. automodule:: cinder.interface.volume_management_driver - :members: - - -Manage/Unmanage Snapshot Support -```````````````````````````````` -In addition to the ability to manage and unmanage volumes, Cinder backend -drivers may also support managing and unmanaging volume snapshots. These -additional methods must be implemented to support these operations. - -.. automodule:: cinder.interface.volume_snapshotmanagement_driver - :members: - - -Volume Consistency Groups -````````````````````````` -Some storage backends support the ability to group volumes and create write -consistent snapshots across the group. In order to support these operations, -the following interface must be implemented by the driver. - -.. automodule:: cinder.interface.volume_consistencygroup_driver - :members: - - -Generic Volume Groups -````````````````````` -The generic volume groups feature provides the ability to manage a group of -volumes together. Because this feature is implemented at the manager level, -every driver gets this feature by default. If a driver wants to override -the default behavior to support additional functionalities such as consistent -group snapshot, the following interface must be implemented by the driver. -Once every driver supporting volume consistency groups has added the -consistent group snapshot capability to generic volume groups, we no longer -need the volume consistency groups interface listed above. - -.. automodule:: cinder.interface.volume_group_driver - :members: - diff --git a/doc/source/contributor/fakes.rst b/doc/source/contributor/fakes.rst deleted file mode 100644 index 741bc7b03..000000000 --- a/doc/source/contributor/fakes.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Fake Drivers -============ - -.. todo:: document general info about fakes - -When the real thing isn't available and you have some development to do these -fake implementations of various drivers let you get on with your day. - -The :class:`cinder.tests.unit.test_service.FakeManager` Class -------------------------------------------------------------- - -.. autoclass:: cinder.tests.unit.test_service.FakeManager - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.tests.unit.api.fakes` Module ---------------------------------------------- - -.. automodule:: cinder.tests.unit.api.fakes - :noindex: - :members: - :undoc-members: - :show-inheritance: - diff --git a/doc/source/contributor/gerrit.rst b/doc/source/contributor/gerrit.rst deleted file mode 100644 index cc1500119..000000000 --- a/doc/source/contributor/gerrit.rst +++ /dev/null @@ -1,16 +0,0 @@ -Code Reviews with Gerrit -======================== - -Cinder uses the `Gerrit`_ tool to review proposed code changes. The review site -is http://review.openstack.org. - -Gerrit is a complete replacement for Github pull requests. `All Github pull -requests to the Cinder repository will be ignored`. - -See `Gerrit Workflow Quick Reference`_ for information about how to get -started using Gerrit. See `Development Workflow`_ for more detailed -documentation on how to work with Gerrit. - -.. _Gerrit: http://code.google.com/p/gerrit -.. _Development Workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow -.. _Gerrit Workflow Quick Reference: http://docs.openstack.org/infra/manual/developers.html#development-workflow diff --git a/doc/source/contributor/gmr.rst b/doc/source/contributor/gmr.rst deleted file mode 100644 index 381eae051..000000000 --- a/doc/source/contributor/gmr.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. - Copyright (c) 2013 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Guru Meditation Reports -======================= - -Cinder contains a mechanism whereby developers and system administrators can -generate a report about the state of a running Cinder executable. -This report is called a *Guru Meditation Report* (*GMR* for short). - -Generating a GMR ----------------- - -A *GMR* can be generated by sending the *USR2* signal to any Cinder process -with support (see below). -The *GMR* will then output to standard error for that particular process. - -For example, suppose that ``cinder-api`` has process id ``8675``, and was run -with ``2>/var/log/cinder/cinder-api-err.log``. -Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed -to ``/var/log/cinder/cinder-api-err.log``. - -There is other way to trigger a generation of report, user should add -a configuration in Cinder's conf file:: - - [oslo_reports] - file_event_handler=['The path to a file to watch for changes to trigger ' - 'the reports, instead of signals. Setting this option ' - 'disables the signal trigger for the reports.'] - file_event_handler_interval=['How many seconds to wait between polls when ' - 'file_event_handler is set, default value ' - 'is 1'] - -a *GMR* can be generated by "touch"ing the file which was specified in -file_event_handler. The *GMR* will then output to standard error for -that particular process. - -For example, suppose that ``cinder-api`` was run with -``2>/var/log/cinder/cinder-api-err.log``, and the file path is -``/tmp/guru_report``. -Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be -printed to ``/var/log/cinder/cinder-api-err.log``. - -Structure of a GMR ------------------- - -The *GMR* is designed to be extensible; any particular executable may add -its own sections. However, the base *GMR* consists of several sections: - -Package - Shows information about the package to which this process belongs, - including version information - -Threads - Shows stack traces and thread ids for each of the threads within this process - -Green Threads - Shows stack traces for each of the green threads within this process - (green threads don't have thread ids) - -Configuration - Lists all the configuration options currently accessible via the CONF object - for the current process - -Adding Support for GMRs to New Executables ------------------------------------------- - -Adding support for a *GMR* to a given executable is fairly easy. - -First import the module (currently residing in oslo-incubator), as well as the -Cinder version module: - -.. code-block:: python - - from oslo_reports import guru_meditation_report as gmr - from cinder import version - -Then, register any additional sections (optional): - -.. code-block:: python - - TextGuruMeditation.register_section('Some Special Section', - some_section_generator) - -Finally (under main), before running the "main loop" of the executable -(usually ``service.server(server)`` or something similar), register the *GMR* -hook: - -.. code-block:: python - - TextGuruMeditation.setup_autorun(version) - -Extending the GMR ------------------ - -As mentioned above, additional sections can be added to the GMR for a -particular executable. For more information, see the inline documentation -about oslo.reports: -`oslo.reports `_ diff --git a/doc/source/contributor/groups.rst b/doc/source/contributor/groups.rst deleted file mode 100644 index 1f175bccb..000000000 --- a/doc/source/contributor/groups.rst +++ /dev/null @@ -1,365 +0,0 @@ -Generic Volume Groups -===================== - -Introduction to generic volume groups -------------------------------------- - -Generic volume group support was added in cinder in the Newton release. -There is support for creating group types and group specs, creating -groups of volumes, and creating snapshots of groups. Detailed information -on how to create a group type, a group, and a group snapshot can be found -in `block storage admin guide `_. - -How is generic volume groups different from consistency groups in cinder? -The consistency group feature was introduced in cinder in Juno and are -supported by a few drivers. Currently consistency groups in cinder only -support consistent group snapshot. It cannot be extended easily to serve -other purposes. A tenant may want to put volumes used in the same application -together in a group so that it is easier to manage them together, and this -group of volumes may or may not support consistent group snapshot. Generic -volume group is introduced to solve this problem. By decoupling the tight -relationship between the group construct and the consistency concept, -generic volume groups can be extended to support other features in the future. - -Action items for drivers supporting consistency groups ------------------------------------------------------- - -Drivers currently supporting consistency groups are in the following: - -- Juno: EMC VNX - -- Kilo: EMC VMAX, IBM (GPFS, Storwize, SVC, and XIV), ProphetStor, Pure - -- Liberty: Dell Storage Center, EMC XtremIO, HPE 3Par and LeftHand - -- Mitaka: EMC ScaleIO, NetApp Data ONTAP and E-Series, SolidFire - -- Newton: CoprHD, FalconStor, Huawei - -Since the addition of generic volume groups, there is plan to migrate -consistency groups to generic volume groups. A migration command and -changes in CG APIs to support migrating CGs to groups are developed and -merged in Ocata [1][2]. In order to support rolling upgrade, it will take -a couple of releases before consistency groups can be deprecated. - -For drivers planning to add consistency groups support, the new generic -volume group driver interfaces should be implemented instead of the CG -interfaces. - -For drivers already supporting consistency groups, the new generic -volume group driver interfaces should be implemented to include the -CG support. - -For drivers wanting generic volume groups but not consistent group -snapshot support, no code changes are necessary. By default, every -cinder volume driver already supports generic volume groups since -Newton because the support was added to the common code. Testing -should be done for every driver to make sure this feature works properly. - -Drivers already supporting CG are expected to add CG support to -generic volume groups by Pike-1. This is a deadline discussed and -agreed upon at the Ocata summit in Barcelona. - -Group Type and Group Specs / Volume Types and Extra Specs ---------------------------------------------------------- - -The driver interfaces for consistency groups and generic volume groups -are very similar. One new concept introduced for generic volume groups -is the group type. Group type is used to categorize a group just like a -volume type is used to describe a volume. Similar to extra specs for -a volume type, group specs are also introduced to be associated with a -group type. Group types allow a user to create different types of groups. - -A group can support multiple volume types and volume types are required -as input parameters when creating a group. In addition to volume types, -a group type is also required when creating a group. - -Group types and volume types are created by the Cloud Administrator. -A tenant uses the group types and volume types to create groups and -volumes. - -A driver can support both consistent group snapshot and a group of -snapshots that do not maintain the write order consistency by using -different group types. In other words, a group supporting consistent -group snapshot is a special type of generic volume group. - -For a group to support consistent group snapshot, the group specs in the -corresponding group type should have the following entry:: - - {'consistent_group_snapshot_enabled': True} - -Similarly, for a volume to be in a group that supports consistent group -snapshots, the volume type extra specs would also have the following entry:: - - {'consistent_group_snapshot_enabled': True} - -By requiring the above entry to be in both group specs and volume type -extra specs, we can make sure the scheduler will choose a backend that -supports the group type and volume types for a group. It is up to the driver -to parse the group type info when creating a group, parse the volume type -info when creating a volume, and set things up as requested. - -Capabilities reporting ----------------------- -The following entry is expected to be added to the stats/capabilities update -for drivers supporting consistent group snapshot:: - - stats["consistent_group_snapshot_enabled"] = True - -Driver methods --------------- -The following driver methods should to be implemented for the driver to -support consistent group snapshot: - -- create_group(context, group) - -- delete_group(context, group, volumes) - -- update_group(context, group, add_volumes=None, remove_volumes=None) - -- create_group_from_src(context, group, volumes, - group_snapshot=None, snapshots=None, - source_group=None, source_vols=None) - -- create_group_snapshot(context, group_snapshot, snapshots) - -- delete_group_snapshot(context, group_snapshot, snapshots) - -Here is an example that add CG capability to generic volume groups [3]. -Details of driver interfaces are as follows. - -**create_group** - -This method creates a group. It has context and group object as input -parameters. A group object has volume_types and group_type_id that can be used -by the driver. - -create_group returns model_update. model_update will be in this format: -{'status': xxx, ......}. - -If the status in model_update is 'error', the manager will throw -an exception and it will be caught in the try-except block in the -manager. If the driver throws an exception, the manager will also -catch it in the try-except block. The group status in the db will -be changed to 'error'. - -For a successful operation, the driver can either build the -model_update and return it or return None. The group status will -be set to 'available'. - -**delete_group** - -This method deletes a group. It has context, group object, and a list -of volume objects as input parameters. It returns model_update and -volumes_model_update. - -volumes_model_update is a list of volume dictionaries. It has to be built -by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, -......}. model_update will be in this format: {'status': xxx, ......}. -The driver should populate volumes_model_update and model_update -and return them. - -The manager will check volumes_model_update and update db accordingly -for each volume. If the driver successfully deleted some volumes -but failed to delete others, it should set statuses of the volumes -accordingly so that the manager can update db correctly. - -If the status in any entry of volumes_model_update is 'error_deleting' -or 'error', the status in model_update will be set to the same if it -is not already 'error_deleting' or 'error'. - -If the status in model_update is 'error_deleting' or 'error', the -manager will raise an exception and the status of the group will be -set to 'error' in the db. If volumes_model_update is not returned by -the driver, the manager will set the status of every volume in the -group to 'error' in the except block. - -If the driver raises an exception during the operation, it will be -caught by the try-except block in the manager. The statuses of the -group and all volumes in it will be set to 'error'. - -For a successful operation, the driver can either build the -model_update and volumes_model_update and return them or -return None, None. The statuses of the group and all volumes -will be set to 'deleted' after the manager deletes them from db. - -**update_group** - -This method adds existing volumes to a group or removes volumes -from a group. It has context, group object, a list of volume objects -to be added to the group, and a list of a volume objects to be -removed from the group. It returns model_update, add_volumes_update, -and remove_volumes_update. - -model_update is a dictionary that the driver wants the manager -to update upon a successful return. If None is returned, the manager -will set the status to 'available'. - -add_volumes_update and remove_volumes_update are lists of dictionaries -that the driver wants the manager to update upon a successful return. -Note that each entry requires a {'id': xxx} so that the correct -volume entry can be updated. If None is returned, the volume will -remain its original status. - -If the driver throws an exception, the status of the group as well as -those of the volumes to be added/removed will be set to 'error'. - -**create_group_from_src** - -This method creates a group from source. The source can be a -group_snapshot or a source group. create_group_from_src has context, -group object, a list of volume objects, group_snapshot object, a list -of snapshot objects, source group object, and a list of source volume -objects as input parameters. It returns model_update and -volumes_model_update. - -volumes_model_update is a list of dictionaries. It has to be built by -the driver. An entry will be in this format: {'id': xxx, 'status': xxx, -......}. model_update will be in this format: {'status': xxx, ......}. - -To be consistent with other volume operations, the manager will -assume the operation is successful if no exception is thrown by -the driver. For a successful operation, the driver can either build -the model_update and volumes_model_update and return them or -return None, None. - -**create_group_snapshot** - -This method creates a group_snapshot. It has context, group_snapshot -object, and a list of snapshot objects as input parameters. It returns -model_update and snapshots_model_update. - -snapshots_model_update is a list of dictionaries. It has to be built by the -driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. -model_update will be in this format: {'status': xxx, ......}. The driver -should populate snapshots_model_update and model_update and return them. - -The manager will check snapshots_model_update and update db accordingly -for each snapshot. If the driver successfully created some snapshots -but failed to create others, it should set statuses of the snapshots -accordingly so that the manager can update db correctly. - -If the status in any entry of snapshots_model_update is 'error', the -status in model_update will be set to the same if it is not already -'error'. - -If the status in model_update is 'error', the manager will raise an -exception and the status of group_snapshot will be set to 'error' in -the db. If snapshots_model_update is not returned by the driver, the -manager will set the status of every snapshot to 'error' in the except -block. - -If the driver raises an exception during the operation, it will be -caught by the try-except block in the manager and the statuses of -group_snapshot and all snapshots will be set to 'error'. - -For a successful operation, the driver can either build the -model_update and snapshots_model_update and return them or -return None, None. The statuses of group_snapshot and all snapshots -will be set to 'available' at the end of the manager function. - -**delete_group_snapshot** - -This method deletes a group_snapshot. It has context, group_snapshot -object, and a list of snapshot objects. It returns model_update and -snapshots_model_update. - -snapshots_model_update is a list of dictionaries. It has to be built by -the driver. An entry will be in this format: {'id': xxx, 'status': xxx, -......}. model_update will be in this format: {'status': xxx, ......}. -The driver should populate snapshots_model_update and model_update -and return them. - -The manager will check snapshots_model_update and update db accordingly -for each snapshot. If the driver successfully deleted some snapshots -but failed to delete others, it should set statuses of the snapshots -accordingly so that the manager can update db correctly. - -If the status in any entry of snapshots_model_update is -'error_deleting' or 'error', the status in model_update will be set to -the same if it is not already 'error_deleting' or 'error'. - -If the status in model_update is 'error_deleting' or 'error', the -manager will raise an exception and the status of group_snapshot will -be set to 'error' in the db. If snapshots_model_update is not returned -by the driver, the manager will set the status of every snapshot to -'error' in the except block. - -If the driver raises an exception during the operation, it will be -caught by the try-except block in the manager and the statuses of -group_snapshot and all snapshots will be set to 'error'. - -For a successful operation, the driver can either build the -model_update and snapshots_model_update and return them or -return None, None. The statuses of group_snapshot and all snapshots -will be set to 'deleted' after the manager deletes them from db. - -Migrate CGs to Generic Volume Groups ------------------------------------- - -This section only affects drivers already supporting CGs by the -Newton release. Drivers planning to add CG support after Newton are -not affected. - -A group type named default_cgsnapshot_type will be created by the -migration script. The following command needs to be run to migrate -migrate data and copy data from consistency groups to groups and -from cgsnapshots to group_snapshots. Migrated consistency groups -and cgsnapshots will be removed from the database:: - - cinder-manage db online_data_migrations - --max_count - --ignore_state - -max_count is optional. Default is 50. -ignore_state is optional. Default is False. - -After running the above migration command to migrate CGs to generic -volume groups, CG and group APIs work as follows: - -* Create CG only creates in the groups table. - -* Modify CG modifies in the CG table if the CG is in the - CG table, otherwise it modifies in the groups table. - -* Delete CG deletes from the CG or the groups table - depending on where the CG is. - -* List CG checks both CG and groups tables. - -* List CG Snapshots checks both the CG and the groups - tables. - -* Show CG checks both tables. - -* Show CG Snapshot checks both tables. - -* Create CG Snapshot creates either in the CG or the groups - table depending on where the CG is. - -* Create CG from Source creates in either the CG or the - groups table depending on the source. - -* Create Volume adds the volume either to the CG or the - group. - -* default_cgsnapshot_type is reserved for migrating CGs. - -* Group APIs will only write/read in/from the groups table. - -* Group APIs will not work on groups with default_cgsnapshot_type. - -* Groups with default_cgsnapshot_type can only be operated by - CG APIs. - -* After CG tables are removed, we will allow default_cgsnapshot_type - to be used by group APIs. - -References ----------- -[1] Migration script - https://review.openstack.org/#/c/350350/ -[2] CG APIs changes for migrating CGs - https://review.openstack.org/#/c/401839/ -[3] Example adding CG capability to generic volume groups - https://review.openstack.org/#/c/413927/ diff --git a/doc/source/contributor/i18n.rst b/doc/source/contributor/i18n.rst deleted file mode 100644 index d23ec0400..000000000 --- a/doc/source/contributor/i18n.rst +++ /dev/null @@ -1,64 +0,0 @@ -Internationalization -==================== - -For internationalization guidelines, see the -`oslo.i18n documentation `_. -The information below can be used to get started. - -Cinder uses `gettext `_ so that -user-facing strings such as log messages appear in the appropriate -language in different locales. - -To use gettext, make sure that the strings passed to the logger are wrapped -in a ``_Lx()`` function call. For example:: - - LOG.info(_LI("block_device_mapping %s"), block_device_mapping) - -There are a few different _() translation markers, depending on the logging -level of the text: - -- _LI() - Used for INFO level log messages -- _LW() - Used for WARNING level log messages -- _LE() - Used for ERROR level log messages (this includes LOG.exception) -- _() - Used for any exception messages, including strings used for both - logging and exceptions. - -.. note:: - - Starting with the Pike series, OpenStack no longer supports log - translation markers like ``_Lx()``, only ``_()`` should still be used for - exceptions that could be user facing. It is not necessary to add ``_Lx()`` - translation instructions to new code, and the instructions can be removed - from old code. Refer to the email thread `understanding log domain change - `_ - on the openstack-dev mailing list for more details. - -Do not use ``locals()`` for formatting messages because: - -1. It is not as clear as using explicit dicts. -2. It could produce hidden errors during refactoring. -3. Changing the name of a variable causes a change in the message. -4. It creates a lot of otherwise unused variables. - -If you do not follow the project conventions, your code may cause pep8 hacking -check failures. - -For translation to work properly, the top level scripts for Cinder need -to first do the following before any Cinder modules are imported:: - - from cinder import i18n - i18n.enable_lazy() - -Note: this should _only_ be called from top level scripts - no library code -or common modules should call this method. - -Any files that use the _() for translation then must have the following -lines:: - - from cinder.i18n import _ - -If the above code is missing, it may result in an error that looks -like:: - - NameError: name '_' is not defined - diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index 8b4c98be0..000000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Contributor Guide -================= - -In this section you will find information on Cinder's lower level programming -APIs. - - -Programming HowTos and Tutorials --------------------------------- -.. toctree:: - :maxdepth: 3 - - development.environment - api_microversion_dev - api_conditional_updates - api_microversion_history - testing - addmethod.openstackapi - drivers - gmr - replication - user_messages - migration - api.apache - rolling.upgrades - groups - -Background Concepts for Cinder ------------------------------- -.. toctree:: - :maxdepth: 3 - - architecture - attach_detach_conventions - attach_detach_conventions_v2 - threading - i18n - rpc - -Other Resources ---------------- -.. toctree:: - :maxdepth: 3 - - launchpad - gerrit - jenkins - releasenotes - -API Reference -------------- -.. toctree:: - :maxdepth: 3 - - ./api/autoindex - -Module Reference ----------------- -.. toctree:: - :maxdepth: 3 - - services - database - volume - auth - api - scheduler - fakes - cinder - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/contributor/jenkins.rst b/doc/source/contributor/jenkins.rst deleted file mode 100644 index e245899d6..000000000 --- a/doc/source/contributor/jenkins.rst +++ /dev/null @@ -1,44 +0,0 @@ -Continuous Integration with Jenkins -=================================== - -Cinder uses a `Jenkins`_ server to automate development tasks. The Jenkins -front-end is at http://jenkins.openstack.org. You must have an -account on `Launchpad`_ to be able to access the OpenStack Jenkins site. - -Jenkins performs tasks such as: - -`gate-cinder-pep8`_ - Run PEP8 checks on proposed code changes that have been reviewed. - -`gate-cinder-pylint`_ - Run Pylint checks on proposed code changes that have been reviewed. - -`gate-cinder-python27`_ - Run unit tests using python2.7 on proposed code changes that have been reviewed. - -`gate-cinder-python34`_ - Run unit tests using python3.4 on proposed code changes that have been reviewed. - -`cinder-coverage`_ - Calculate test coverage metrics. - -`cinder-docs`_ - Build this documentation and push it to `OpenStack Cinder `_. - -`cinder-merge-release-tags`_ - Merge reviewed code into the git repository. - -`cinder-tarball`_ - Do ``python setup.py sdist`` to create a tarball of the cinder code and upload - it to http://tarballs.openstack.org/cinder - -.. _Jenkins: http://jenkins-ci.org -.. _Launchpad: http://launchpad.net -.. _gate-cinder-pep8: https://jenkins.openstack.org/job/gate-cinder-pep8 -.. _gate-cinder-pylint: https://jenkins.openstack.org/job/gate-cinder-pylint -.. _gate-cinder-python27: https://jenkins.openstack.org/job/gate-cinder-python27 -.. _gate-cinder-python34: https://jenkins.openstack.org/job/gate-cinder-python34 -.. _cinder-coverage: https://jenkins.openstack.org/job/cinder-coverage -.. _cinder-docs: https://jenkins.openstack.org/job/cinder-docs -.. _cinder-merge-release-tags: https://jenkins.openstack.org/job/cinder-merge-release-tags -.. _cinder-tarball: https://jenkins.openstack.org/job/cinder-tarball diff --git a/doc/source/contributor/launchpad.rst b/doc/source/contributor/launchpad.rst deleted file mode 100644 index 309059161..000000000 --- a/doc/source/contributor/launchpad.rst +++ /dev/null @@ -1,52 +0,0 @@ -Project hosting with Launchpad -============================== - -`Launchpad`_ hosts the Cinder project. The Cinder project homepage on Launchpad is -http://launchpad.net/cinder. - -Launchpad credentials ---------------------- - -Creating a login on Launchpad is important even if you don't use the Launchpad -site itself, since Launchpad credentials are used for logging in on several -OpenStack-related sites. These sites include: - - * `Wiki`_ - * Gerrit (see :doc:`gerrit`) - * Jenkins (see :doc:`jenkins`) - -Mailing list ------------- - -The mailing list email is ``openstack@lists.openstack.org``. This is a common -mailing list across the OpenStack projects. To participate in the mailing list: - -#. Subscribe to the list at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack - -The mailing list archives are at http://lists.openstack.org/pipermail/openstack/. - - -Bug tracking ------------- - -Report Cinder bugs at https://bugs.launchpad.net/cinder - -Feature requests (Blueprints) ------------------------------ - -Cinder uses Launchpad Blueprints to track feature requests. Blueprints are at -https://blueprints.launchpad.net/cinder. - -Technical support (Answers) ---------------------------- - -Cinder no longer uses Launchpad Answers to track Cinder technical support questions. - -Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can -be used for technical support requests. - -.. _Launchpad: http://launchpad.net -.. _Wiki: http://wiki.openstack.org -.. _Cinder Team: https://launchpad.net/~cinder -.. _OpenStack Team: https://launchpad.net/~openstack -.. _Ask OpenStack: http://ask.openstack.org diff --git a/doc/source/contributor/migration.rst b/doc/source/contributor/migration.rst deleted file mode 100644 index b295c5c11..000000000 --- a/doc/source/contributor/migration.rst +++ /dev/null @@ -1,303 +0,0 @@ -.. - Copyright (c) 2015 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Migration -========= - -Introduction to volume migration --------------------------------- -Cinder provides the volume migration support within the same deployment, -which means the node of cinder volume service, c-vol node where the -source volume is located, is able to access the c-vol node where -the destination volume is located, and both of them share the same -Cinder API service, scheduler service, message queue service, etc. - -As a general rule migration is possible for volumes in 'available' or -‘in-use’ status, for the driver which has implemented volume migration. -So far, we are confident that migration will succeed for 'available' -volumes, whose drivers implement the migration routines. However, -the migration of 'in-use' volumes is driver dependent. It depends on -different drivers involved in the operation. It may fail depending on -the source or destination driver of the volume. - -For example, from RBD to LVM, the migration of 'in-use' volume will -succeed, but from LVM to RBD, it will fail. - -There are two major scenarios, which volume migration supports -in Cinder: - -Scenario 1: Migration between two back-ends with the same volume type, -regardless if they are located on the same c-vol node or not. - -Scenario 2: Migration between two back-ends with different volume types, -regardless if the back-ends are located on the same c-vol node or not. - - -How to do volume migration via CLI ----------------------------------- -Scenario 1 of volume migration is done via the following command from -the CLI:: - - cinder migrate [--force-host-copy []] - [--lock-volume []] - - - Mandatory arguments: - ID of volume to migrate. - Destination host. The format of host is - host@backend#POOL, while 'host' is the host - name of the volume node, 'backend' is the back-end - name and 'POOL' is a logical concept to describe - a set of storage resource, residing in the - back-end. If the back-end does not have specified - pools, 'POOL' needs to be set with the same name - as 'backend'. - - Optional arguments: - --force-host-copy [] - Enables or disables generic host-based force- - migration, which bypasses the driver optimization. - Default=False. - --lock-volume [] - Enables or disables the termination of volume - migration caused by other commands. This option - applies to the available volume. True means it locks - the volume state and does not allow the migration to - be aborted. The volume status will be in maintenance - during the migration. False means it allows the volume - migration to be aborted. The volume status is still in - the original status. Default=False. - -Important note: Currently, error handling for failed migration operations is -under development in Cinder. If we would like the volume migration to finish -without any interruption, please set --lock-volume to True. If it is set -to False, we cannot predict what will happen, if other actions like attach, -detach, extend, etc, are issued on the volume during the migration. -It all depends on which stage the volume migration has reached and when the -request of another action comes. - - -Scenario 2 of volume migration can be done via the following command -from the CLI:: - - cinder retype --migration-policy on-demand - - Mandatory arguments: - Name or ID of volume for which to modify type. - New volume type. - -Source volume type and destination volume type must be different and -they must refer to different back-ends. - - -Configurations --------------- -To set up an environment to try the volume migration, we need to -configure at least two different back-ends on the same node of cinder -volume service, c-vol node or two back-ends on two different volume -nodes of cinder volume service, c-vol nodes. Which command to use, -‘cinder migrate’ or ‘cinder retype’, depends on which type of volume -we would like to test. - -**Scenario 1 for migration** - -To configure the environment for Scenario 1 migration, e.g. a -volume is migrated from back-end on Node 1 to back-end - on Node 2, cinder.conf needs to contains the following -entries for the same back-end on both of source and the destination -nodes: - -For Node 1: - ... - [] - volume_driver=xxxx - volume_backend_name= - ... - -For Node 2: - ... - [] - volume_driver=xxxx - volume_backend_name= - ... - -If a volume with a predefined volume type is going to migrate, -the back-end drivers from Node 1 and Node 2 should have the same -value for volume_backend_name, which means should be -the same for Node 1 and Node 2. The volume type can be created -with the extra specs {volume_backend_name: driver-biz}. - -If we are going to migrate a volume with a volume type of none, it -is not necessary to set the same value to volume_backend_name for -both Node 1 and Node 2. - -**Scenario 2 for migration** - -To configure the environment for Scenario 2 migration: -For example, a volume is migrated from driver-biz back-end on Node 1 -to driver-net back-end on Node 2, cinder.conf needs to contains -the following entries: - -For Node 1: - ... - [driver-biz] - volume_driver=xxxx - volume_backend_name=driver-biz - ... - -For Node 2: - ... - [driver-net] - volume_driver=xxxx - volume_backend_name=driver-net - ... - -For example, a volume is migrated from driver-biz back-end on Node 1 -to driver-biz back-net on the same node, cinder.conf needs to -contains the following entries: - - ... - [driver-biz] - volume_driver=xxxx - volume_backend_name=driver-biz - ... - - ... - [driver-net] - volume_driver=xxxx - volume_backend_name=driver-net - ... - -Two volume types need to be created. One is with the extra specs: -{volume_backend_name: driver-biz}. The other is with the extra specs: -{volume_backend_name: driver-net}. - - -What can be tracked during volume migration -------------------------------------------- -The volume migration is an administrator only action and it may take -a relatively long time to finish. The property ‘migration status’ will -indicate the stage of the migration process for the volume. The -administrator can check the ‘migration status’ via the ‘cinder list’ -or ‘cinder show ’ command. The ‘cinder list’ command presents -a list of all the volumes with some properties displayed, including the -migration status, only to the administrator. However, the migration status -is not included if ‘cinder list’ is issued by an ordinary user. The -‘cinder show ’ will present all the detailed information of a -specific volume, including the migration status, only to the administrator. - -If the migration status of a volume shows ‘starting’, ‘migrating’ or -‘completing’, it means the volume is in the process of a migration. -If the migration status is ‘success’, it means the migration has finished -and the previous migration of this volume succeeded. If the -migration status is ‘error’, it means the migration has finished and -the previous migration of this volume failed. - - -How to implement volume migration for a back-end driver -------------------------------------------------------- -There are two kinds of implementations for the volume migration currently -in Cinder. - -The first is the generic host-assisted migration, which consists of two -different transfer modes, block-based and file-based. This implementation -is based on the volume attachment to the node of cinder volume service, -c-vol node. Any back-end driver supporting iSCSI will be able to support -the generic host-assisted migration for sure. The back-end driver without -iSCSI supported needs to be tested to decide if it supports this kind of -migration. The block-based transfer mode is done by ‘dd’ command, -applying to drivers like LVM, Storwize, etc, and the file-based transfer -mode is done by file copy, typically applying to the RBD driver. - -The second is the driver specific migration. -Since some storage back-ends have their special commands to copy the volume, -Cinder also provides a way for them to implement in terms of their own -internal commands to migrate. - -If the volume is migrated between two nodes configured with the same -storage back-end, the migration will be optimized by calling the method -migrate_volume in the driver, if the driver provides an implementation for -it to migrate the volume within the same back-end, and will fallback to -the generic host-assisted migration provided in the manager, if no such -implementation is found or this implementation is not applicable for -this migration. - -If your storage driver in Cinder provides iSCSI support, it should -naturally work under the generic host-assisted migration, when ---force-host-copy is set to True from the API request. Normally you -do not need to change any code, unless you need to transfer the volume -from your driver via a different way from the block-based transfer -or the file-based transfer. - -If your driver uses a network connection to communicate the block data -itself, you can use file I/O to participate in migration. Please take -the RBD driver as a reference for this implementation. - -If you would like to implement a driver specific volume migration for -your driver, the API method associated with the driver specific migration -is the following admin only method: - - migrate_volume(self, ctxt, volume, host) - -If your driver is taken as the destination back-end for a generic host-assisted -migration and your driver needs to update the volume model after a successful -migration, you need to implement the following method for your driver: - - update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): - - -Required methods ----------------- -There is one mandatory method that needs to be implemented for -the driver to implement the driver specific volume migration. - -**migrate_volume** - -Used to migrate the volume directly if source and destination are -managed by same storage. - -There is one optional method that could be implemented for -the driver to implement the generic host-assisted migration. - -**update_migrated_volume** - -Used to return the key-value pairs to update the volume model after -a successful migration. The key-value pairs returned are supposed to -be the final values your driver would like to be in the volume model, -if a migration is completed. - -This method can be used in a generally wide range, but the most common -use case covered in this method is to rename the back-end name to the -original volume id in your driver to make sure that the back-end still -keeps the same id or name as it is before the volume migration. For -this use case, there are two important fields: _name_id and -provider_location. - -The field _name_id is used to map the cinder volume id and the back-end -id or name. The default value is None, which means the cinder -volume id is the same to the back-end id or name. If they are different, -_name_id is used to saved the back-end id or name. - -The field provider_location is used to save the export information, -created by the volume attach. This field is optional, since some drivers -support the export creation and some do not. It is the driver -maintainer's responsibility to decide what this field needs to be. - -If the back-end id or name is renamed successfully, this method can -return {'_name_id': None, 'provider_location': None}. It is the choice -for your driver to implement this method and decide what use cases should -be covered. - diff --git a/doc/source/contributor/releasenotes.rst b/doc/source/contributor/releasenotes.rst deleted file mode 100644 index b74b2a373..000000000 --- a/doc/source/contributor/releasenotes.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. - Copyright 2015 Intel Corporation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Release notes -============= - -The release notes for a patch should be included in the patch. - -If the following applies to the patch, a release note is required: - -* Upgrades - - * The deployer needs to take an action when upgrading - * A new config option is added that the deployer should consider changing - from the default - * A configuration option is deprecated or removed - -* Features - - * A new feature or driver is implemented - * Feature is deprecated or removed - * Current behavior is changed - -* Bugs - - * A security bug is fixed - * A long-standing or important bug is fixed - -* APIs - - * REST API changes - -Cinder uses `reno `_ to -generate release notes. Please read the docs for details. In summary, use - -.. code-block:: bash - - $ tox -e venv -- reno new - -Then edit the sample file that was created and push it with your change. - -To see the results: - -.. code-block:: bash - - $ git commit # Commit the change because reno scans git log. - - $ tox -e releasenotes - -Then look at the generated release notes files in releasenotes/build/html in -your favorite browser. diff --git a/doc/source/contributor/replication.rst b/doc/source/contributor/replication.rst deleted file mode 100644 index f9420bb0d..000000000 --- a/doc/source/contributor/replication.rst +++ /dev/null @@ -1,475 +0,0 @@ -Replication -=========== - -For backend devices that offer replication features, Cinder provides a common -mechanism for exposing that functionality on a per volume basis while still -trying to allow flexibility for the varying implementation and requirements of -all the different backend devices. - -There are 2 sides to Cinder's replication feature, the core mechanism and the -driver specific functionality, and in this document we'll only be covering the -driver side of things aimed at helping vendors implement this functionality in -their drivers in a way consistent with all other drivers. - -Although we'll be focusing on the driver implementation there will also be some -mentions on deployment configurations to provide a clear picture to developers -and help them avoid implementing custom solutions to solve things that were -meant to be done via the cloud configuration. - -Overview --------- - -As a general rule replication is enabled and configured via the cinder.conf -file under the driver's section, and volume replication is requested through -the use of volume types. - -*NOTE*: Current replication implementation is v2.1 and it's meant to solve a -very specific use case, the "smoking hole" scenario. It's critical that you -read the Use Cases section of the spec here: -https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/cheesecake.html - -From a user's perspective volumes will be created using specific volume types, -even if it is the default volume type, and they will either be replicated or -not, which will be reflected on the ``replication_status`` field of the volume. -So in order to know if a snapshot is replicated we'll have to check its volume. - -After the loss of the primary storage site all operations on the resources will -fail and VMs will no longer have access to the data. It is then when the Cloud -Administrator will issue the ``failover-host`` command to make the -cinder-volume service perform the failover. - -After the failover is completed, the Cinder volume service will start using the -failed-over secondary storage site for all operations and the user will once -again be able to perform actions on all resources that were replicated, while -all other resources will be in error status since they are no longer available. - -Storage Device configuration ----------------------------- - -Most storage devices will require configuration changes to enable the -replication functionality, and this configuration process is vendor and storage -device specific so it is not contemplated by the Cinder core replication -functionality. - -It is up to the vendors whether they want to handle this device configuration -in the Cinder driver or as a manual process, but the most common approach is to -avoid including this configuration logic into Cinder and having the Cloud -Administrators do a manual process following a specific guide to enable -replication on the storage device before configuring the cinder volume service. - -Service configuration ---------------------- - -The way to enable and configure replication is common to all drivers and it is -done via the ``replication_device`` configuration option that goes in the -driver's specific section in the ``cinder.conf`` configuration file. - -``replication_device`` is a multi dictionary option, that should be specified -for each replication target device the admin wants to configure. - -While it is true that all drivers use the same ``replication_device`` -configuration option this doesn't mean that they will all have the same data, -as there is only one standardized and **REQUIRED** key in the configuration -entry, all others are vendor specific: - -- backend_id: - -Values of ``backend_id`` keys are used to uniquely identify within the driver -each of the secondary sites, although they can be reused on different driver -sections. - -These unique identifiers will be used by the failover mechanism as well as in -the driver initialization process, and the only requirement is that is must -never have the value "default". - -An example driver configuration for a device with multiple replication targets -is show below:: - - ..... - [driver-biz] - volume_driver=xxxx - volume_backend_name=biz - - [driver-baz] - volume_driver=xxxx - volume_backend_name=baz - - [driver-foo] - volume_driver=xxxx - volume_backend_name=foo - replication_device = backend_id:vendor-id-1,unique_key:val.... - replication_device = backend_id:vendor-id-2,unique_key:val.... - -In this example the result of calling -``self.configuration.safe_get('replication_device)`` within the driver is the -following list:: - - [{backend_id: vendor-id-1, unique_key: val1}, - {backend_id: vendor-id-2, unique_key: val2}] - -It is expected that if a driver is configured with multiple replication -targets, that replicated volumes are actually replicated on **all targets**. - -Besides specific replication device keys defined in the ``replication_device``, -a driver may also have additional normal configuration options in the driver -section related with the replication to allow Cloud Administrators to configure -things like timeouts. - -Capabilities reporting ----------------------- - -There are 2 new replication stats/capability keys that drivers supporting -relication v2.1 should be reporting: ``replication_enabled`` and -``replication_targets``:: - - stats["replication_enabled"] = True|False - stats["replication_targets"] = [...] - -If a driver is behaving correctly we can expect the ``replication_targets`` -field to be populated whenever ``replication_enabled`` is set to ``True``, and -it is expected to either be set to ``[]`` or be missing altogether when -``replication_enabled`` is set to ``False``. - -The purpose of the ``replication_enabled`` field is to be used by the scheduler -in volume types for creation and migrations. - -As for the ``replication_targets`` field it is only provided for informational -purposes so it can be retrieved through the ``get_capabilities`` using the -admin REST API, but it will not be used for validation at the API layer. That -way Cloud Administrators will be able to know available secondary sites where -they can failover. - -Volume Types / Extra Specs ---------------------------- - -The way to control the creation of volumes on a cloud with backends that have -replication enabled is, like with many other features, through the use of -volume types. - -We won't go into the details of volume type creation, but suffice to say that -you will most likely want to use volume types to discriminate between -replicated and non replicated volumes and be explicit about it so that non -replicated volumes won't end up in a replicated backend. - -Since the driver is reporting the ``replication_enabled`` key, we just need to -require it for replication volume types adding ``replication_enabled=' -True``` and also specifying it for all non replicated volume types -``replication_enabled=' False'``. - -It's up to the driver to parse the volume type info on create and set things up -as requested. While the scoping key can be anything, it's strongly recommended -that all backends utilize the same key (replication) for consistency and to -make things easier for the Cloud Administrator. - -Additional replication parameters can be supplied to the driver using vendor -specific properties through the volume type's extra-specs so they can be used -by the driver at volume creation time, or retype. - -It is up to the driver to parse the volume type info on create and retype to -set things up as requested. A good pattern to get a custom parameter from a -given volume instance is this:: - - extra_specs = getattr(volume.volume_type, 'extra_specs', {}) - custom_param = extra_specs.get('custom_param', 'default_value') - -It may seem convoluted, but we must be careful when retrieving the -``extra_specs`` from the ``volume_type`` field as it could be ``None``. - -Vendors should try to avoid obfuscating their custom properties and expose them -using the ``_init_vendor_properties`` method so they can be checked by the -Cloud Administrator using the ``get_capabilities`` REST API. - -*NOTE*: For storage devices doing per backend/pool replication the use of -volume types is also recommended. - -Volume creation ---------------- - -Drivers are expected to honor the replication parameters set in the volume type -during creation, retyping, or migration. - -When implementing the replication feature there are some driver methods that -will most likely need modifications -if they are implemented in the driver -(since some are optional)- to make sure that the backend is replicating volumes -that need to be replicated and not replicating those that don't need to be: - -- ``create_volume`` -- ``create_volume_from_snapshot`` -- ``create_cloned_volume`` -- ``retype`` -- ``clone_image`` -- ``migrate_volume`` - -In these methods the driver will have to check the volume type to see if the -volumes need to be replicated, we could use the same pattern described in the -`Volume Types / Extra Specs`_ section:: - - def _is_replicated(self, volume): - specs = getattr(volume.volume_type, 'extra_specs', {}) - return specs.get('replication_enabled') == ' True' - -But it is **not** the recommended mechanism, and the ``is_replicated`` method -available in volumes and volume types versioned objects instances should be -used instead. - -Drivers are expected to keep the ``replication_status`` field up to date and in -sync with reality, usually as specified in the volume type. To do so in above -mentioned methods' implementation they should use the update model mechanism -provided for each one of those methods. One must be careful since the update -mechanism may be different from one method to another. - -What this means is that most of these methods should be returning a -``replication_status`` key with the value set to ``enabled`` in the model -update dictionary if the volume type is enabling replication. There is no need -to return the key with the value of ``disabled`` if it is not enabled since -that is the default value. - -In the case of the ``create_volume``, and ``retype`` method there is no need to -return the ``replication_status`` in the model update since it has already been -set by the scheduler on creation using the extra spec from the volume type. And -on ``migrate_volume`` there is no need either since there is no change to the -``replication_status``. - -*NOTE*: For storage devices doing per backend/pool replication it is not -necessary to check the volume type for the ``replication_enabled`` key since -all created volumes will be replicated, but they are expected to return the -``replication_status`` in all those methods, including the ``create_volume`` -method since the driver may receive a volume creation request without the -replication enabled extra spec and therefore the driver will not have set the -right ``replication_status`` and the driver needs to correct this. - -Besides the ``replication_status`` field that drivers need to update there are -other fields in the database related to the replication mechanism that the -drivers can use: - -- ``replication_extended_status`` -- ``replication_driver_data`` - -These fields are string type fields with a maximum size of 255 characters and -they are available for drivers to use internally as they see fit for their -normal replication operation. So they can be assigned in the model update and -later on used by the driver, for example during the failover. - -To avoid using magic strings drivers must use values defined by the -``ReplicationsSatus`` class in ``cinder/objects/fields.py`` file and -these are: - -- ``ERROR``: When setting the replication failed on creation, retype, or - migrate. This should be accompanied by the volume status ``error``. -- ``ENABLED``: When the volume is being replicated. -- ``DISABLED``: When the volume is not being replicated. -- ``FAILED_OVER``: After a volume has been successfully failed over. -- ``FAILOVER_ERROR``: When there was an error during the failover of this - volume. -- ``NOT_CAPABLE``: When we failed-over but the volume was not replicated. - -The first 3 statuses revolve around the volume creation and the last 3 around -the failover mechanism. - -The only status that should not be used for the volume's ``replication_status`` -is the ``FAILING_OVER`` status. - -Whenever we are referring to values of the ``replication_status`` in this -document we will be referring to the ``ReplicationStatus`` attributes and not a -literal string, so ``ERROR`` means -``cinder.objects.field.ReplicationStatus.ERROR`` and not the string "ERROR". - -Failover --------- - -This is the mechanism used to instruct the cinder volume service to fail over -to a secondary/target device. - -Keep in mind the use case is that the primary backend has died a horrible death -and is no longer valid, so any volumes that were on the primary and were not -being replicated will no longer be available. - -The method definition required from the driver to implement the failback -mechanism is as follows:: - - def failover_host(self, context, volumes, secondary_id=None): - -There are several things that are expected of this method: - -- Promotion of a secondary storage device to primary -- Generating the model updates -- Changing internally to access the secondary storage device for all future - requests. - -If no secondary storage device is provided to the driver via the ``backend_id`` -argument (it is equal to ``None``), then it is up to the driver to choose which -storage device to failover to. In this regard it is important that the driver -takes into consideration that it could be failing over from a secondary (there -was a prior failover request), so it should discard current target from the -selection. - -If the ``secondary_id`` is not a valid one the driver is expected to raise -``InvalidReplicationTarget``, for any other non recoverable errors during a -failover the driver should raise ``UnableToFailOver`` or any child of -``VolumeDriverException`` class and revert to a state where the previous -backend is in use. - -The failover method in the driver will receive a list of replicated volumes -that need to be failed over. Replicated volumes passed to the driver may have -diverse ``replication_status`` values, but they will always be one of: -``ENABLED``, ``FAILED_OVER``, or ``FAILOVER_ERROR``. - -The driver must return a 2-tuple with the new storage device target id as the -first element and a list of dictionaries with the model updates required for -the volumes so that the driver can perform future actions on those volumes now -that they need to be accessed on a different location. - -It's not a requirement for the driver to return model updates for all the -volumes, or for any for that matter as it can return ``None`` or an empty list -if there's no update necessary. But if elements are returned in the model -update list then it is a requirement that each of the dictionaries contains 2 -key-value pairs, ``volume_id`` and ``updates`` like this:: - - [{ - 'volume_id': volumes[0].id, - 'updates': { - 'provider_id': new_provider_id1, - ... - }, - 'volume_id': volumes[1].id, - 'updates': { - 'provider_id': new_provider_id2, - 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, - ... - }, - }] - -In these updates there is no need to set the ``replication_status`` to -``FAILED_OVER`` if the failover was successful, as this will be performed by -the manager by default, but it won't create additional DB queries if it is -returned. It is however necessary to set it to ``FAILOVER_ERROR`` for those -volumes that had errors during the failover. - -Driver's don't have to worry about snapshots or non replicated volumes, since -the manager will take care of those in the following manner: - -- All non replicated volumes will have their current ``status`` field saved in - the ``previous_status`` field, the ``status`` field changed to ``error``, and - their ``replication_status`` set to ``NOT_CAPABLE``. -- All snapshots from non replicated volumes will have their statuses changed to - ``error``. -- All replicated volumes that failed on the failover will get their ``status`` - changed to ``error``, their current ``status`` preserved in - ``previous_status``, and their ``replication_status`` set to - ``FAILOVER_ERROR`` . -- All snapshots from volumes that had errors during the failover will have - their statuses set to ``error``. - -Any model update request from the driver that changes the ``status`` field will -trigger a change in the ``previous_status`` field to preserve the current -status. - -Once the failover is completed the driver should be pointing to the secondary -and should be able to create and destroy volumes and snapshots as usual, and it -is left to the Cloud Administrator's discretion whether resource modifying -operations are allowed or not. - -Failback --------- - -Drivers are not required to support failback, but they are required to raise a -``InvalidReplicationTarget`` exception if the failback is requested but not -supported. - -The way to request the failback is quite simple, the driver will receive the -argument ``secondary_id`` with the value of ``default``. That is why if was -forbidden to use the ``default`` on the target configuration in the cinder -configuration file. - -Expected driver behavior is the same as the one explained in the `Failover`_ -section: - -- Promotion of the original primary to primary -- Generating the model updates -- Changing internally to access the original primary storage device for all - future requests. - -If the failback of any of the volumes fail the driver must return -``replication_status`` set to ``ERROR`` in the volume updates for those -volumes. If they succeed it is not necessary to change the -``replication_status`` since the default behavior will be to set them to -``ENABLED``, but it won't create additional DB queries if it is set. - -The manager will update resources in a slightly different way than in the -failover case: - -- All non replicated volumes will not have any model modifications. -- All snapshots from non replicated volumes will not have any model - modifications. -- All replicated volumes that failed on the failback will get their ``status`` - changed to ``error``, have their current ``status`` preserved in the - ``previous_status`` field, and their ``replication_status`` set to - ``FAILOVER_ERROR``. -- All snapshots from volumes that had errors during the failover will have - their statuses set to ``error``. - -We can avoid using the "default" magic string by using the -``FAILBACK_SENTINEL`` class attribute from the ``VolumeManager`` class. - -Initialization --------------- - -It stands to reason that a failed over Cinder volume service may be restarted, -so there needs to be a way for a driver to know on start which storage device -should be used to access the resources. - -So, to let drivers know which storage device they should use the manager passes -drivers the ``active_backend_id`` argument to their ``__init__`` method during -the initialization phase of the driver. Default value is ``None`` when the -default (primary) storage device should be used. - -Drivers should store this value if they will need it, as the base driver is not -storing it, for example to determine the current storage device when a failover -is requested and we are already in a failover state, as mentioned above. - -Freeze / Thaw -------------- - -In many cases, after a failover has been completed we'll want to allow changes -to the data in the volumes as well as some operations like attach and detach -while other operations that modify the number of existing resources, like -delete or create, are not allowed. - -And that is where the freezing mechanism comes in; freezing a backend puts the -control plane of the specific Cinder volume service into a read only state, or -at least most of it, while allowing the data plane to proceed as usual. - -While this will mostly be handled by the Cinder core code, drivers are informed -when the freezing mechanism is enabled or disabled via these 2 calls:: - - freeze_backend(self, context) - thaw_backend(self, context) - -In most cases the driver may not need to do anything, and then it doesn't need -to define any of these methods as long as its a child class of the ``BaseVD`` -class that already implements them as noops. - -Raising a `VolumeDriverException` exception in any of these methods will result -in a 500 status code response being returned to the caller and the manager will -not log the exception, so it's up to the driver to log the error if it is -appropriate. - -If the driver wants to give a more meaningful error response, then it can raise -other exceptions that have different status codes. - -When creating the `freeze_backend` and `thaw_backend` driver methods we must -remember that this is a Cloud Administrator operation, so we can return errors -that reveal internals of the cloud, for example the type of storage device, and -we must use the appropriate internationalization translation methods when -raising exceptions; for `VolumeDriverException` no translation is necessary -since the manager doesn't log it or return to the user in any way, but any -other exception should use the ``_()`` translation method since it will be -returned to the REST API caller. - -For example, if a storage device doesn't support the thaw operation when failed -over, then it should raise an `Invalid` exception:: - - def thaw_backend(self, context): - if self.failed_over: - msg = _('Thaw is not supported by driver XYZ.') - raise exception.Invalid(msg) diff --git a/doc/source/contributor/rolling.upgrades.rst b/doc/source/contributor/rolling.upgrades.rst deleted file mode 100644 index 7d64b8ee2..000000000 --- a/doc/source/contributor/rolling.upgrades.rst +++ /dev/null @@ -1,407 +0,0 @@ -.. - Copyright (c) 2016 Intel Corporation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Upgrades -======== - -Starting from Mitaka release Cinder gained the ability to be upgraded without -introducing downtime of control plane services. Operator can simply upgrade -Cinder services instances one-by-one. To achieve that, developers need to make -sure that any introduced change doesn't break older services running in the -same Cinder deployment. - -In general there is a requirement that release N will keep backward -compatibility with release N-1 and in a deployment N's and N-1's services can -safely coexist. This means that when performing a live upgrade you cannot skip -any release (e.g. you cannot upgrade N to N+2 without upgrading it to N+1 -first). Further in the document N will denote the current release, N-1 a -previous one, N+1 the next one, etc. - -Having in mind that we only support compatibility with N-1, most of the -compatibility code written in N needs to exist just for one release and can be -removed in the beginning of N+1. A good practice here is to mark them with -:code:`TODO` or :code:`FIXME` comments to make them easy to find in the future. - -Please note that proper upgrades solution should support both -release-to-release upgrades as well as upgrades of deployments following the -Cinder master more closely. We cannot just merge patches implementing -compatibility at the end of the release - we should keep things compatible -through the whole release. - -To achieve compatibility, discipline is required from the developers. There are -several planes on which incompatibility may occur: - -* **REST API changes** - these are prohibited by definition and this document - will not describe the subject. For further information one may use `API - Working Group guidelines - `_ - for reference. - -* **Database schema migrations** - e.g. if N-1 was relying on some column in - the DB being present, N's migrations cannot remove it. N+1's however can - (assuming N has no notion of the column). - -* **Database data migrations** - if a migration requires big amount of data to - be transferred between columns or tables or converted, it will most likely - lock the tables. This may cause services to be unresponsive, causing the - downtime. - -* **RPC API changes** - adding or removing RPC method parameter, or the method - itself, may lead to incompatibilities. - -* **RPC payload changes** - adding, renaming or removing a field from the dict - passed over RPC may lead to incompatibilities. - -Next sections of this document will focus on explaining last four points and -provide means to tackle required changes in these matters while maintaining -backward compatibility. - - -Database schema and data migrations ------------------------------------ - -In general incompatible database schema migrations can be tracked to ALTER and -DROP SQL commands instruction issued either against a column or table. This is -why a unit test that blocks such migrations was introduced. We should try to -keep our DB modifications additive. Moreover we should aim not to introduce -migrations that cause the database tables to lock for a long period. Long lock -on whole table can block other queries and may make real requests to fail. - -Adding a column -............... - -This is the simplest case - we don't have any requirements when adding a new -column apart from the fact that it should be added as the last one in the -table. If that's covered, the DB engine will make sure the migration won't be -disruptive. - -Dropping a column not referenced in SQLAlchemy code -................................................... - -When we want to remove a column that wasn't present in any SQLAlchemy model or -it was in the model, but model was not referenced in any SQLAlchemy API -function (this basically means that N-1 wasn't depending on the presence of -that column in the DB), then the situation is simple. We should be able to -safely drop the column in N release. - -Removal of unnecessary column -............................. - -When we want to remove a used column without migrating any data out of it (for -example because what's kept in the column is obsolete), then we just need to -remove it from the SQLAlchemy model and API in N release. In N+1 or as a -post-upgrade migration in N we can merge a migration issuing DROP for this -column (we cannot do that earlier because N-1 will depend on the presence of -that column). - -ALTER on a column -................. - -A rule of thumb to judge which ALTER or DROP migrations should be allowed is to -look in the `MySQL documentation -`_. -If operation has "yes" in all 4 columns besides "Copies Table?", then it -*probably* can be allowed. If operation doesn't allow concurrent DML it means -that table row modifications or additions will be blocked during the migration. -This sometimes isn't a problem - for example it's not the end of the world if a -service won't be able to report it's status one or two times (and -:code:`services` table is normally small). Please note that even if this does -apply to "rename a column" operation, we cannot simply do such ALTER, as N-1 -will depend on the older name. - -If an operation on column or table cannot be allowed, then it is required to -create a new column with desired properties and start moving the data (in a -live manner). In worst case old column can be removed in N+2. Whole procedure -is described in more details below. - -In aforementioned case we need to make more complicated steps stretching through -3 releases - always keeping the backwards compatibility. In short when we want -to start to move data inside the DB, then in N we should: - -* Add a new column for the data. -* Write data in both places (N-1 needs to read it). -* Read data from the old place (N-1 writes there). -* Prepare online data migration cinder-manage command to be run before - upgrading to N+1 (because N+1 will read from new place, so we need to make - sure all the records have new place populated). - -In N+1 we should: - -* Write data to both places (N reads from old one). -* Read data from the new place (N saves there). - -In N+2 - -* Remove old place from SQLAlchemy. -* Read and write only to the new place. -* Remove the column as the post-upgrade migration (or as first migration in - N+3). - -Please note that this is the most complicated case. If data in the column -cannot actually change (for example :code:`host` in :code:`services` table), in -N we can read from new place and fallback to the old place if data is missing. -This way we can skip one release from the process. - -Of course real-world examples may be different. E.g. sometimes it may be -required to write some more compatibility code in the oslo.versionedobjects -layer to compensate for different versions of objects passed over RPC. This is -explained more in `RPC payload changes (oslo.versionedobjects)`_ section. - -More details about that can be found in the `online-schema-upgrades spec -`_. - - -RPC API changes ---------------- - -It can obviously break service communication if RPC interface changes. In -particular this applies to changes of the RPC method definitions. To avoid that -we assume N's RPC API compatibility with N-1 version (both ways - -:code:`rpcapi` module should be able to downgrade the message if needed and -:code:`manager` module should be able to tolerate receiving messages in older -version. - -Below is an example RPC compatibility shim from Mitaka's -:code:`cinder.volume.manager`. This code allows us to tolerate older versions -of the messages:: - - def create_volume(self, context, volume_id, request_spec=None, - filter_properties=None, allow_reschedule=True, - volume=None): - - """Creates the volume.""" - # FIXME(thangp): Remove this in v2.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - -And here's a contrary shim in cinder.volume.rpcapi (RPC client) that downgrades -the message to make sure it will be understood by older instances of the -service:: - - def create_volume(self, ctxt, volume, host, request_spec, - filter_properties, allow_reschedule=True): - request_spec_p = jsonutils.to_primitive(request_spec) - msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p, - 'filter_properties': filter_properties, - 'allow_reschedule': allow_reschedule} - if self.client.can_send_version('1.32'): - version = '1.32' - msg_args['volume'] = volume - else: - version = '1.24' - - new_host = utils.extract_host(host) - cctxt = self.client.prepare(server=new_host, version=version) - request_spec_p = jsonutils.to_primitive(request_spec) - cctxt.cast(ctxt, 'create_volume', **msg_args) - -As can be seen there's this magic :code:`self.client.can_send_version()` method -which detects if we're running in a version-heterogeneous environment and need -to downgrade the message. Detection is based on dynamic RPC version pinning. In -general all the services (managers) report supported RPC API version. RPC API -client gets all the versions from the DB, chooses the lowest one and starts to -downgrade messages to it. - -To limit impact on the DB the pinned version of certain RPC API is cached. -After all the services in the deployment are updated, operator should restart -all the services or send them a SIGHUP signal to force reload of version pins. - -As we need to support only N RPC API in N+1 release, we should be able to drop -all the compatibility shims in N+1. To be technically correct when doing so we -should also bump the major RPC API version. We do not need to do that in every -release (it may happen that through the release nothing will change in RPC API -or cost of technical debt of compatibility code is lower than the cost of -complicated procedure of increasing major version of RPC APIs). - -The process of increasing the major version is explained in details in `Nova's -documentation `_. -Please note that in case of Cinder we're accessing the DB from all of the -services, so we should follow the more complicated "Mixed version environments" -process for every of our services. - -In case of removing whole RPC method we need to leave it there in N's manager -and can remove it in N+1 (because N-1 will be talking with N). When adding a -new one we need to make sure that when the RPC client is pinned to a too low -version any attempt to send new message should fail (because client will not -know if manager receiving the message will understand it) or ensure the manager -will get updated before clients by stating the recommended order of upgrades -for that release. - -RPC payload changes (oslo.versionedobjects) -------------------------------------------- - -`oslo.versionedobjects -`_ is a library that -helps us to maintain compatibility of the payload sent over RPC. As during the -process of upgrades it is possible that a newer version of the service will -send an object to an older one, it may happen that newer object is incompatible -with older service. - -Version of an object should be bumped every time we make a change that will -result in an incompatible change of the serialized object. Tests will inform -you when you need to bump the version of a versioned object, but rule of thumb -is that we should never bump the version when we modify/adding/removing a -method to the versioned object (unlike Nova we don't use remotable methods), -and should always bump it when we modify the fields dictionary. - -There are exceptions to this rule, for example when we change a -``fields.StringField`` by a custom ``fields.BaseEnumField``. The reason why a -version bump is not required in this case it's because the actual data doesn't -change, we are just removing magic string by an enumerate, but the strings used -are exactly the same. - -As mentioned before, you don't have to know all the rules, as we have a test -that calculates the hash of all objects taking all these rules into -consideration and will tell you exactly when you need to bump the version of a -versioned object. - -You can run this test with -``tox -epy35 -- --path cinder/tests/unit/objects/test_objects.py``. But you -may need to run it multiple times until it passes since it may not detect all -required bumps at once. - -Then you'll see which versioned object requires a bump and you need to bump -that version and update the object_data dictionary in the test file to reflect -the new version as well as the new hash. - -There is a very common false positive on the version bump test, and that is -when we have modified a versioned object that is being used by other objects -using the ``fields.ObjectField`` class. Due to the backporting mechanism -implemented in Cinder we don't require bumping the version for these cases and -we'll just need to update the hash used in the test. - -For example if we were to add a new field to the Volume object and then run the -test we may think that we need to bump Volume, Snapshot, Backup, RequestSpec, -and VolumeAttachment objects, but we really only need to bump the version of -the Volume object and update the hash for all the other objects. - -Imagine that we (finally!) decide that :code:`request_spec` sent in -:code:`create_volume` RPC cast is duplicating data and we want to start to -remove redundant occurrences. When running in version-mixed environment older -services will still expect this redundant data. We need a way to somehow -downgrade the :code:`request_spec` before sending it over RPC. And this is were -o.vo come in handy. o.vo provide us the infrastructure to keep the changes in -object versioned and to be able to downgrade them to a particular version. - -Let's take a step back - similarly to the RPC API situation we need a way to -tell if we need to send a backward-compatible version of the message. In this -case we need to know to what version to downgrade the object. We're using a -similar solution to the one used for RPC API for that. A problem here is that -we need a single identifier (that we will be reported to :code:`services` DB -table) to denote whole set of versions of all the objects. To do that we've -introduced a concept of :code:`CinderObjectVersionHistory` object, where we -keep sets of individual object versions aggregated into a single version -string. When making an incompatible change in a single object you need to bump -its version (we have a unit test enforcing that) *and* add a new version to -:code:`cinder.objects.base.CinderObjectVersionsHistory` (there's a unit test as -well). Example code doing that is below:: - - OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) - -This line adds a new 1.1 aggregated object version that is different from 1.0 -by two objects - :code:`Service` in 1.2 and :code:`ServiceList` in 1.1. This -means that the commit which added this line bumped versions of these two -objects. - -Now if we know that a service we're talking to is running 1.1 aggregated -version - we need to downgrade :code:`Service` and :code:`ServiceList` to 1.2 -and 1.1 respectively before sending. Please note that of course other objects -are included in the 1.1 aggregated version, but you just need to specify what -changed (all the other versions of individual objects will be taken from the -last version - 1.0 in this case). - -Getting back to :code:`request_spec` example. So let's assume we want to remove -:code:`volume_properties` from there (most of data in there is already -somewhere else inside the :code:`request_spec` object). We've made a change in -the object fields, we've bumped it's version (from 1.0 to 1.1), we've updated -hash in the :code:`cinder.tests.unit.test_objects` to synchronize it with the -current state of the object, making the unit test pass and we've added a new -aggregated object history version in :code:`cinder.objects.base`. - -What else is required? We need to provide code that actually downgrades -RequestSpec object from 1.1 to 1.0 - to be used when sending the object to -older services. This is done by implementing :code:`obj_make_compatible` method -in the object:: - - from oslo_utils import versionutils - - def obj_make_compatible(self, primitive, target_version): - super(RequestSpec, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1) and not 'volume_properties' in primitive: - volume_properties = {} - # TODO: Aggregate all the required information from primitive. - primitive['volume_properties'] = volume_properties - -Please note that primitive is a dictionary representation of the object and not -an object itself. This is because o.vo are of course sent over RPC as dicts. - -With these pieces in place Cinder will take care of sending -:code:`request_spec` with :code:`volume_properties` when running in mixed -environment and without when all services are upgraded and will understand -:code:`request_spec` without :code:`volume_properties` element. - -Note that o.vo layer is able to recursively downgrade all of its fields, so -when `request_spec` will be used as a field in other object, it will be -correctly downgraded. - -A more common case where we need backporting code is when we add new fields. -In such case the backporting consist on removing the newly added fields. For -example if we add 3 new fields to the Group object in version 1.1, then we need -to remove them if backporting to earlier versions:: - - from oslo_utils import versionutils - - def obj_make_compatible(self, primitive, target_version): - super(Group, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - for key in ('group_snapshot_id', 'source_group_id', - 'group_snapshots'): - primitive.pop(key, None) - -As time goes on we will be adding more and more new fields to our objects, so -we may end up with a long series of if and for statements like in the Volume -object:: - - from oslo_utils import versionutils - - def obj_make_compatible(self, primitive, target_version): - super(Volume, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 4): - for key in ('cluster', 'cluster_name'): - primitive.pop(key, None) - if target_version < (1, 5): - for key in ('group', 'group_id'): - primitive.pop(key, None) - -So a different pattern would be preferable as it will make the backporting -easier for future additions:: - - from oslo_utils import versionutils - - def obj_make_compatible(self, primitive, target_version): - added_fields = (((1, 4), ('cluster', 'cluster_name')), - ((1, 5), ('group', 'group_id'))) - super(Volume, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - for version, remove_fields in added_fields: - if target_version < version: - for obj_field in remove_fields: - primitive.pop(obj_field, None) diff --git a/doc/source/contributor/rpc.rst b/doc/source/contributor/rpc.rst deleted file mode 100644 index 1ee6dce66..000000000 --- a/doc/source/contributor/rpc.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. - Copyright (c) 2010 Citrix Systems, Inc. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -AMQP and Cinder -=============== - -AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Cinder components and allows them to communicate in a loosely coupled fashion. More precisely, Cinder components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: - - * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). - * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). - * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). - -Cinder uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: - -.. image:: /images/rpc/arch.png - :width: 60% - -.. - -Cinder implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Cinder service (for example Scheduler, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example cinder-volume.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example cinder-volume). The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. - -Cinder RPC Mappings -------------------- - -The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Cinder component connects to the message broker and, depending on its personality, may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Volume). Invokers and Workers do not actually exist in the Cinder object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rpc.call operations. - -Figure 2 shows the following internal elements: - - * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. - * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). - * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). - * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. - * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Cinder. - * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. - * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. - -.. image:: /images/rpc/rabt.png - :width: 60% - -.. - -RPC Calls ---------- - -The diagram below shows the message flow during an rpc.call operation: - - 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. - 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. - 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. - 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. - -.. image:: /images/rpc/flow1.png - :width: 60% - -.. - -RPC Casts ---------- - -The diagram below the message flow during an rpc.cast operation: - - 1. A Topic Publisher is instantiated to send the message request to the queuing system. - 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. - -.. image:: /images/rpc/flow2.png - :width: 60% - -.. - -AMQP Broker Load ----------------- - -At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: - - * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. - * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. - -The figure below shows the status of a RabbitMQ node after Cinder components' bootstrap in a test environment (phantom is hostname). Exchanges and queues being created by Cinder components are: - - * Exchanges - 1. cinder-scheduler_fanout (fanout exchange) - 2. cinder-volume.phantom@lvm_fanout (fanout exchange) - 3. cinder-volume_fanout (fanout exchange) - 4. openstack (topic exchange) - * Queues - 1. cinder-scheduler - 2. cinder-scheduler.phantom - 3. cinder-scheduler_fanout_572c35c0fbf94560b4c49572d5868ea5 - 4. cinder-volume - 5. cinder-volume.phantom@lvm - 6. cinder-volume.phantom@lvm.phantom - 7. cinder-volume.phantom@lvm_fanout_cb3387f7a7684b1c9ee5f2f88325b7d5 - 8. cinder-volume_fanout_9017a1a7f4b44867983dcddfb56531a2 - -.. image:: /images/rpc/state.png - :width: 60% - -.. - -RabbitMQ Gotchas ----------------- - -Cinder uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): - - * Hostname: The hostname to the AMQP server. - * Userid: A valid username used to authenticate to the server. - * Password: The password used to authenticate to the server. - * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". - * Port: The port of the AMQP server. Default is 5672 (amqp). - -The following parameters are default: - - * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. - * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. - * SSL: use SSL to connect to the server. The default is False. - -More precisely Consumers need the following parameters: - - * Connection: the above mentioned Connection object. - * Queue: name of the queue. - * Exchange: name of the exchange the queue binds to. - * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. - - * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. - * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. - * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("*") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". - - * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. - * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. - * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. - * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. - * Auto_ack: acknowledgement is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. - * No_ack: it disable acknowledgement on the server-side. This is different from auto_ack in that acknowledgement is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. - * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. - Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: - * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: - - * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. - * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. - -The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. diff --git a/doc/source/contributor/scheduler.rst b/doc/source/contributor/scheduler.rst deleted file mode 100644 index f5b33c96b..000000000 --- a/doc/source/contributor/scheduler.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Scheduler -========= - -The :mod:`cinder.scheduler.manager` Module ------------------------------------------- - -.. automodule:: cinder.scheduler.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.scheduler.driver` Module ------------------------------------------ - -.. automodule:: cinder.scheduler.driver - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.scheduler.filter_scheduler` Driver ---------------------------------------------------- - -.. automodule:: cinder.scheduler.filter_scheduler - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`cinder.tests.unit.scheduler` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.scheduler - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/contributor/services.rst b/doc/source/contributor/services.rst deleted file mode 100644 index e2dd6d2b0..000000000 --- a/doc/source/contributor/services.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _service_manager_driver: - -Services, Managers and Drivers -============================== - -The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to cinder. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. - -Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. - - -The :mod:`cinder.service` Module --------------------------------- - -.. automodule:: cinder.service - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`cinder.manager` Module --------------------------------- - -.. automodule:: cinder.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Implementation-Specific Drivers -------------------------------- - -A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. - -Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. - -It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index 71509bd82..000000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,165 +0,0 @@ -Testing -======= - -Cinder contains a few different test suites in the cinder/tests/ directory. The -different test suites are Unit Tests, Functional Tests, and Tempest Tests. - -Test Types ----------- - - -Unit Tests -~~~~~~~~~~ - -Unit tests are tests for individual methods, with at most a small handful of -modules involved. Mock should be used to remove any external dependencies. - -All significant code changes should have unit test coverage validating the code -happy path and any failure paths. - -Any proposed code change will be automatically rejected by the OpenStack -Jenkins server [#f1]_ if the change causes unit test failures. - -Functional Tests -~~~~~~~~~~~~~~~~ - -Functional tests validate a code path within Cinder. These tests should -validate the interaction of various modules within the project to verify the -code is logically correct. - -Functional tests run with a database present and may start Cinder services to -accept requests. These tests should not need to access an other OpenStack -non-Cinder services. - -Tempest Tests -~~~~~~~~~~~~~ - -The tempest tests in the Cinder tree validate the operational correctness -between Cinder and external components such as Nova, Glance, etc. These are -integration tests driven via public APIs to verify actual end user usage -scenarios. - -Running the tests ------------------ - -There are a number of ways to run tests currently, and there's a combination of -frameworks used depending on what commands you use. The preferred method is to -use tox, which calls ostestr via the tox.ini file. - -Unit Tests -~~~~~~~~~~ - -To run all unit tests simply run:: - - tox - -This will create a virtual environment, load all the packages from -test-requirements.txt and run all unit tests as well as run flake8 and hacking -checks against the code. - -You may run individual test targets, for example only py27 tests, by running:: - - tox -e py27 - -Note that you can inspect the tox.ini file to get more details on the available -options and what the test run does by default. - -Functional Tests -~~~~~~~~~~~~~~~~ - -To run all functional tests, run:: - - tox -e functional - -Tempest Tests -~~~~~~~~~~~~~ - -Tempest tests in the Cinder tree are "plugged in" to the normal tempest test -execution. To ensure the Cinder tests are picked up when running tempest, run:: - - cd /opt/stack/tempest - tox -e all-plugin - -More information about tempest can be found in the `Tempest Documentation -`_. - -Database Setup -~~~~~~~~~~~~~~~ - -Some unit and functional tests will use a local database. You can use -``tools/test-setup.sh`` to set up your local system the same way as -it's setup in the CI environment. - -Running a subset of tests using tox ------------------------------------ -One common activity is to just run a single test, you can do this with tox -simply by specifying to just run py27 or py35 tests against a single test:: - - tox -epy27 -- -n cinder.tests.unit.test_volume.AvailabilityZoneTestCase.test_list_availability_zones_cached - -Or all tests in the test_volume.py file:: - - tox -epy27 -- -n cinder.tests.unit.test_volume - -You may also use regular expressions to run any matching tests:: - - tox -epy27 -- -r test_volume - -For more information on these options and how to run tests, please see the -`ostestr documentation `_. - -Gotchas -------- - -**Running Tests from Shared Folders** - -If you are running the unit tests from a shared folder, you may see tests start -to fail or stop completely as a result of Python lockfile issues. You -can get around this by manually setting or updating the following line in -``cinder/tests/conf_fixture.py``:: - - CONF['lock_path'].SetDefault('/tmp') - -Note that you may use any location (not just ``/tmp``!) as long as it is not -a shared folder. - -**Running py35 tests** - -You will need to install python3-dev in order to get py35 tests to run. If you -do not have this, you will get the following:: - - netifaces.c:1:20: fatal error: Python.h: No such file or directory - #include - ^ - compilation terminated. - error: command 'x86_64-linux-gnu-gcc' failed with exit status 1 - - ---------------------------------------- - - ERROR: could not install deps [-r/opt/stack/cinder/test-requirements.txt, - oslo.versionedobjects[fixtures]]; v = InvocationError('/opt/stack/cinder/ - .tox/py35/bin/pip install -r/opt/stack/cinder/test-requirements.txt - oslo.versionedobjects[fixtures] (see /opt/stack/cinder/.tox/py35/log/py35-1.log)', 1) - _______________________________________________________________ summary _______________________________________________________________ - ERROR: py35: could not install deps [-r/opt/stack/cinder/test-requirements.txt, - oslo.versionedobjects[fixtures]]; v = InvocationError('/opt/stack/cinder/ - .tox/py35/bin/pip install -r/opt/stack/cinder/test-requirements.txt - oslo.versionedobjects[fixtures] (see /opt/stack/cinder/.tox/py35/log/py35-1.log)', 1) - -To Fix: - -- On Ubuntu/Debian:: - - sudo apt-get install python3-dev - -- On Fedora 21/RHEL7/CentOS7:: - - sudo yum install python3-devel - -- On Fedora 22 and higher:: - - sudo dnf install python3-devel - -.. rubric:: Footnotes - -.. [#f1] See :doc:`jenkins`. diff --git a/doc/source/contributor/threading.rst b/doc/source/contributor/threading.rst deleted file mode 100644 index da2b2f535..000000000 --- a/doc/source/contributor/threading.rst +++ /dev/null @@ -1,57 +0,0 @@ -Threading model -=============== - -All OpenStack services use *green thread* model of threading, implemented -through using the Python `eventlet `_ and -`greenlet `_ libraries. - -Green threads use a cooperative model of threading: thread context -switches can only occur when specific eventlet or greenlet library calls are -made (e.g., sleep, certain I/O calls). From the operating system's point of -view, each OpenStack service runs in a single thread. - -The use of green threads reduces the likelihood of race conditions, but does -not completely eliminate them. In some cases, you may need to use the -``@utils.synchronized(...)`` decorator to avoid races. - -In addition, since there is only one operating system thread, a call that -blocks that main thread will block the entire process. - -Yielding the thread in long-running tasks ------------------------------------------ -If a code path takes a long time to execute and does not contain any methods -that trigger an eventlet context switch, the long-running thread will block -any pending threads. - -This scenario can be avoided by adding calls to the eventlet sleep method -in the long-running code path. The sleep call will trigger a context switch -if there are pending threads, and using an argument of 0 will avoid introducing -delays in the case that there is only a single green thread:: - - from eventlet import greenthread - ... - greenthread.sleep(0) - -In current code, time.sleep(0)does the same thing as greenthread.sleep(0) if -time module is patched through eventlet.monkey_patch(). To be explicit, we recommend -contributors use ``greenthread.sleep()`` instead of ``time.sleep()``. - -MySQL access and eventlet -------------------------- -There are some MySQL DB API drivers for oslo.db, like `PyMySQL`_, MySQL-python -etc. PyMySQL is the default MySQL DB API driver for oslo.db, and it works well with -eventlet. MySQL-python uses an external C library for accessing the MySQL database. -Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, -queries to the MySQL database using libraries like MySQL-python will block the main -thread of a service. - -The Diablo release contained a thread-pooling implementation that did not -block, but this implementation resulted in a `bug`_ and was removed. - -See this `mailing list thread`_ for a discussion of this issue, including -a discussion of the `impact on performance`_. - -.. _bug: https://bugs.launchpad.net/cinder/+bug/838581 -.. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html -.. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html -.. _PyMySQL: https://wiki.openstack.org/wiki/PyMySQL_evaluation diff --git a/doc/source/contributor/user_messages.rst b/doc/source/contributor/user_messages.rst deleted file mode 100644 index b5da9227a..000000000 --- a/doc/source/contributor/user_messages.rst +++ /dev/null @@ -1,73 +0,0 @@ -User Messages -============= - -User messages are a way to inform users about the state of asynchronous -operations. One example would be notifying the user of why a volume -provisioning request failed. These messages can be requested via the -/messages API. All user visible messages must be defined in the permitted -messages module in order to prevent sharing sensitive information with users. - - -Example message generation:: - - from cinder import context - from cinder.message import api as message_api - from cinder.message import defined_messages - from cinder.message import resource_types - - self.message_api = message_api.API() - - context = context.RequestContext() - project_id = '6c430ede-9476-4128-8838-8d3929ced223' - volume_id = 'f292cc0c-54a7-4b3b-8174-d2ff82d87008' - - self.message_api.create( - context, - defined_messages.EventIds.UNABLE_TO_ALLOCATE, - project_id, - resource_type=resource_types.VOLUME, - resource_uuid=volume_id) - -Will produce the following:: - - GET /v3/6c430ede-9476-4128-8838-8d3929ced223/messages - { - "messages": [ - { - "id": "5429fffa-5c76-4d68-a671-37a8e24f37cf", - "event_id": "000002", - "user_message": "No storage could be allocated for this volume request.", - "message_level": "ERROR", - "resource_type": "VOLUME", - "resource_uuid": "f292cc0c-54a7-4b3b-8174-d2ff82d87008", - "created_at": 2015-08-27T09:49:58-05:00, - "guaranteed_until": 2015-09-27T09:49:58-05:00, - "request_id": "req-936666d2-4c8f-4e41-9ac9-237b43f8b848", - } - ] - } - - - -The Message API Module ----------------------- - -.. automodule:: cinder.message.api - :noindex: - :members: - :undoc-members: - -The Resource Types Module -------------------------- - -.. automodule:: cinder.message.resource_types - :noindex: - -The Permitted Messages Module ------------------------------ - -.. automodule:: cinder.message.defined_messages - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/contributor/volume.rst b/doc/source/contributor/volume.rst deleted file mode 100644 index 02685bd12..000000000 --- a/doc/source/contributor/volume.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Storage Volumes, Disks -====================== - -.. todo:: rework after iSCSI merge (see 'Old Docs') (todd or vish) - - -The :mod:`cinder.volume.manager` Module ---------------------------------------- - -.. automodule:: cinder.volume.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`cinder.volume.driver` Module --------------------------------------- - -.. automodule:: cinder.volume.driver - :noindex: - :members: - :undoc-members: - :show-inheritance: - -Tests ------ - -The :mod:`cinder.tests.unit.volume` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: cinder.tests.unit.volume - :noindex: - :members: - :undoc-members: - :show-inheritance: - -Old Docs --------- - -Cinder uses iSCSI to export storage volumes from multiple storage nodes. These iSCSI exports are attached (using libvirt) directly to running instances. - -Cinder volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs. - -The underlying volumes by default are LVM logical volumes, created on demand within a single large volume group. - - diff --git a/doc/source/images/rpc/arch.png b/doc/source/images/rpc/arch.png deleted file mode 100644 index 8f7d535b6..000000000 Binary files a/doc/source/images/rpc/arch.png and /dev/null differ diff --git a/doc/source/images/rpc/arch.svg b/doc/source/images/rpc/arch.svg deleted file mode 100644 index 02e4f5d64..000000000 --- a/doc/source/images/rpc/arch.svg +++ /dev/null @@ -1,292 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Page-1 - - - - Box.8 - Compute - - - - - - - Compute - - Box.2 - Volume Storage - - - - - - - VolumeStorage - - Box - Auth Manager - - - - - - - Auth Manager - - Box.4 - Cloud Controller - - - - - - - CloudController - - Box.3 - API Server - - - - - - - API Server - - Box.6 - Object Store - - - - - - - ObjectStore - - Box.7 - Node Controller - - - - - - - NodeController - - Dynamic connector - - - - Dynamic connector.11 - - - - Dynamic connector.12 - http - - - - - http - - Circle - Cinder-Manage - - - - - - - Cinder-Manage - - Circle.15 - Euca2ools - - - - - - - Euca2ools - - Dynamic connector.16 - - - - Dynamic connector.17 - - - - Sheet.15 - Project User Role Network VPN - - - - ProjectUserRoleNetworkVPN - - Sheet.16 - VM instance Security group Volume Snapshot VM image IP address... - - - - VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone - - Box.20 - Network Controller - - - - - - - Network Controller - - Box.5 - Storage Controller - - - - - - - Storage Controller - - Dot & arrow - - - - - - - - - - - - - Dot & arrow.14 - - - - - - - - - - - - - Dynamic connector.13 - - - - Sheet.22 - AMQP - - - - AMQP - - Sheet.23 - AMQP - - - - AMQP - - Sheet.24 - AMQP - - - - AMQP - - Sheet.25 - REST - - - - REST - - Sheet.26 - local method - - - - local method - - Sheet.27 - local method - - - - local method - - Sheet.28 - local method - - - - local method - - diff --git a/doc/source/images/rpc/flow1.png b/doc/source/images/rpc/flow1.png deleted file mode 100644 index ea325ad08..000000000 Binary files a/doc/source/images/rpc/flow1.png and /dev/null differ diff --git a/doc/source/images/rpc/flow1.svg b/doc/source/images/rpc/flow1.svg deleted file mode 100644 index 6d8f7e280..000000000 --- a/doc/source/images/rpc/flow1.svg +++ /dev/null @@ -1,617 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Page-1 - - - Rounded rectangle - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ATM switch - name: control_exchange (type: topic) - - Sheet.3 - - - - Sheet.4 - - - - Sheet.5 - - - - Sheet.6 - - - - Sheet.7 - - - - Sheet.8 - - - - - - name: control_exchange(type: topic) - - - Sheet.9 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.17 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.25 - - - - Sheet.26 - key: topic - - - - key: topic - - Sheet.27 - key: topic.host - - - - key: topic.host - - Sheet.28 - - - - Rectangle - Topic Consumer - - - - - - - Topic Consumer - - Rectangle.30 - Topic Consumer - - - - - - - Topic Consumer - - Sheet.31 - - - - Sheet.32 - - - - Sheet.33 - - - - Rectangle.34 - - - - - - - Rectangle.35 - Direct Publisher - - - - - - - DirectPublisher - - Sheet.36 - Worker (e.g. compute) - - - - Worker(e.g. compute) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ATM switch.37 - name: msg_id (type: direct) - - Sheet.38 - - - - Sheet.39 - - - - Sheet.40 - - - - Sheet.41 - - - - Sheet.42 - - - - Sheet.43 - - - - - - name: msg_id(type: direct) - - - Sheet.44 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.52 - key: msg_id - - - - key: msg_id - - Sheet.53 - - - - Sheet.54 - - - - Rectangle.57 - - - - - - - Rectangle.56 - Direct Consumer - - - - - - - DirectConsumer - - Sheet.57 - Invoker (e.g. api) - - - - Invoker(e.g. api) - - Rectangle.55 - Topic Publisher - - - - - - - Topic Publisher - - Sheet.59 - - - - Sheet.60 - - - - Sheet.61 - RabbitMQ Node - - - - RabbitMQ Node - - Sheet.62 - - - - Sheet.64 - rpc.call (topic.host) - - - - rpc.call(topic.host) - - Sheet.63 - - - - Sheet.66 - - - - Sheet.67 - - - - Sheet.68 - - - - diff --git a/doc/source/images/rpc/flow2.png b/doc/source/images/rpc/flow2.png deleted file mode 100644 index 19de2aafd..000000000 Binary files a/doc/source/images/rpc/flow2.png and /dev/null differ diff --git a/doc/source/images/rpc/flow2.svg b/doc/source/images/rpc/flow2.svg deleted file mode 100644 index fe4cdf341..000000000 --- a/doc/source/images/rpc/flow2.svg +++ /dev/null @@ -1,423 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Page-1 - - - Rounded rectangle - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ATM switch - name: control_exchange (type: topic) - - Sheet.3 - - - - Sheet.4 - - - - Sheet.5 - - - - Sheet.6 - - - - Sheet.7 - - - - Sheet.8 - - - - - - name: control_exchange(type: topic) - - - Sheet.9 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.17 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.25 - - - - Sheet.26 - key: topic - - - - key: topic - - Sheet.27 - key: topic.host - - - - key: topic.host - - Sheet.28 - - - - Rectangle - Topic Consumer - - - - - - - Topic Consumer - - Rectangle.30 - Topic Consumer - - - - - - - Topic Consumer - - Sheet.31 - - - - Sheet.32 - - - - Sheet.33 - - - - Rectangle.34 - - - - - - - Sheet.36 - Worker (e.g. compute) - - - - Worker(e.g. compute) - - Rectangle.57 - - - - - - - Sheet.57 - Invoker (e.g. api) - - - - Invoker(e.g. api) - - Rectangle.55 - Topic Publisher - - - - - - - Topic Publisher - - Sheet.59 - - - - Sheet.61 - RabbitMQ Node - - - - RabbitMQ Node - - Sheet.62 - - - - Sheet.63 - rpc.cast(topic) - - - - rpc.cast(topic) - - Sheet.64 - - - - Sheet.65 - - - - diff --git a/doc/source/images/rpc/rabt.png b/doc/source/images/rpc/rabt.png deleted file mode 100644 index e3923b9a7..000000000 Binary files a/doc/source/images/rpc/rabt.png and /dev/null differ diff --git a/doc/source/images/rpc/rabt.svg b/doc/source/images/rpc/rabt.svg deleted file mode 100644 index 142a33ce0..000000000 --- a/doc/source/images/rpc/rabt.svg +++ /dev/null @@ -1,581 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Page-1 - - - Rounded rectangle - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ATM switch - name: control_exchange (type: topic) - - Sheet.3 - - - - Sheet.4 - - - - Sheet.5 - - - - Sheet.6 - - - - Sheet.7 - - - - Sheet.8 - - - - - - name: control_exchange(type: topic) - - - Sheet.17 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.9 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.25 - - - - Sheet.27 - key: topic - - - - key: topic - - Sheet.28 - key: topic.host - - - - key: topic.host - - Sheet.26 - - - - Rectangle - Topic Consumer - - - - - - - Topic Consumer - - Rectangle.30 - Topic Consumer - - - - - - - Topic Consumer - - Sheet.31 - - - - Sheet.32 - - - - Sheet.33 - - - - Rectangle.34 - - - - - - - Rectangle.35 - Direct Publisher - - - - - - - DirectPublisher - - Sheet.36 - Worker (e.g. compute) - - - - Worker(e.g. compute) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ATM switch.37 - name: msg_id (type: direct) - - Sheet.38 - - - - Sheet.39 - - - - Sheet.40 - - - - Sheet.41 - - - - Sheet.42 - - - - Sheet.43 - - - - - - name: msg_id(type: direct) - - - Sheet.44 - - Rectangle - - - - - - - Rectangle.10 - - - - - - - Rectangle.11 - - - - - - - Rectangle.12 - - - - - - - Rectangle.13 - - - - - - - Rectangle.14 - - - - - - - Rectangle.15 - - - - - - - - Sheet.52 - key: msg_id - - - - key: msg_id - - Sheet.53 - - - - Sheet.54 - - - - Rectangle.57 - - - - - - - Rectangle.58 - Direct Consumer - - - - - - - DirectConsumer - - Sheet.59 - Invoker (e.g. api) - - - - Invoker(e.g. api) - - Rectangle.55 - Topic Publisher - - - - - - - Topic Publisher - - Sheet.56 - - - - Sheet.60 - - - - Sheet.62 - RabbitMQ Node (single virtual host context) - - - - RabbitMQ Node(single virtual host context) - - diff --git a/doc/source/images/rpc/state.png b/doc/source/images/rpc/state.png deleted file mode 100644 index 58f9323f9..000000000 Binary files a/doc/source/images/rpc/state.png and /dev/null differ diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 9c73fb68b..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. - Copyright 2010-2012 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Welcome to Cinder's developer documentation! -============================================ - -Cinder is an OpenStack project to provide "block storage as a service". - -* **Component based architecture**: Quickly add new behaviors -* **Highly available**: Scale to very serious workloads -* **Fault-Tolerant**: Isolated processes avoid cascading failures -* **Recoverable**: Failures should be easy to diagnose, debug, and rectify -* **Open Standards**: Be a reference implementation for a community-driven api - -This documentation is generated by the Sphinx toolkit and lives in the source -tree. Additional draft and project documentation on Cinder and other components of OpenStack can -be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_. - -.. _`OpenStack wiki`: http://wiki.openstack.org -.. _`docs.openstack.org`: http://docs.openstack.org - - -Installing Cinder -================= - -.. toctree:: - :maxdepth: 2 - - install/index - -Admin Docs -========== -.. toctree:: - :maxdepth: 2 - - admin/blockstorage - -Contributor/Developer Docs -========================== - -.. toctree:: - :maxdepth: 1 - - contributor/index - scheduler-filters - scheduler-weights - upgrade - -Command Line Interface Documentation -==================================== - -.. toctree:: - :maxdepth: 2 - - cli/cli-manage-volumes - cli/cli-set-quotas - cli/cli-cinder-quotas - cli/cli-cinder-scheduling - -Drivers -======= - -Cinder maintains drivers for volume backends, backup targets, and fibre -channel zone manager fabric types. The list of the available drivers can be -found here: - -.. toctree:: - :maxdepth: 1 - - drivers - -API Extensions -============== - -Go to http://api.openstack.org for information about Cinder API extensions. - -Configuration Reference -======================= - -.. toctree:: - :maxdepth: 1 - - configuration/block-storage - sample_config - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - -Glossary -======== - -.. toctree:: - :maxdepth: 1 - - common/glossary.rst diff --git a/doc/source/install/cinder-backup-install-obs.rst b/doc/source/install/cinder-backup-install-obs.rst deleted file mode 100644 index 8aece5e11..000000000 --- a/doc/source/install/cinder-backup-install-obs.rst +++ /dev/null @@ -1,60 +0,0 @@ -:orphan: - -Install and configure the backup service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optionally, install and configure the backup service. For simplicity, -this configuration uses the Block Storage node and the Object Storage -(swift) driver, thus depending on the -`Object Storage service `_. - -.. note:: - - You must :ref:`install and configure a storage node ` prior - to installing and configuring the backup service. - -Install and configure components --------------------------------- - -.. note:: - - Perform these steps on the Block Storage node. - - -#. Install the packages: - - .. code-block:: console - - # zypper install openstack-cinder-backup - -#. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - #. In the ``[DEFAULT]`` section, configure backup options: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - backup_driver = cinder.backup.drivers.swift - backup_swift_url = SWIFT_URL - - - Replace ``SWIFT_URL`` with the URL of the Object Storage service. The - URL can be found by showing the object-store API endpoints: - - .. code-block:: console - - $ openstack catalog show object-store - -Finalize installation ---------------------- - -Start the Block Storage backup service and configure it to -start when the system boots: - -.. code-block:: console - - # systemctl enable openstack-cinder-backup.service - # systemctl start openstack-cinder-backup.service diff --git a/doc/source/install/cinder-backup-install-rdo.rst b/doc/source/install/cinder-backup-install-rdo.rst deleted file mode 100644 index 68826ccc4..000000000 --- a/doc/source/install/cinder-backup-install-rdo.rst +++ /dev/null @@ -1,61 +0,0 @@ -:orphan: - -Install and configure the backup service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optionally, install and configure the backup service. For simplicity, -this configuration uses the Block Storage node and the Object Storage -(swift) driver, thus depending on the -`Object Storage service `_. - -.. note:: - - You must :ref:`install and configure a storage node ` prior - to installing and configuring the backup service. - -Install and configure components --------------------------------- - -.. note:: - - Perform these steps on the Block Storage node. - -#. Install the packages: - - .. code-block:: console - - # yum install openstack-cinder - - -#. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - #. In the ``[DEFAULT]`` section, configure backup options: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - backup_driver = cinder.backup.drivers.swift - backup_swift_url = SWIFT_URL - - - Replace ``SWIFT_URL`` with the URL of the Object Storage service. The - URL can be found by showing the object-store API endpoints: - - .. code-block:: console - - $ openstack catalog show object-store - - -Finalize installation ---------------------- - -Start the Block Storage backup service and configure it to -start when the system boots: - -.. code-block:: console - - # systemctl enable openstack-cinder-backup.service - # systemctl start openstack-cinder-backup.service diff --git a/doc/source/install/cinder-backup-install-ubuntu.rst b/doc/source/install/cinder-backup-install-ubuntu.rst deleted file mode 100644 index 79a093250..000000000 --- a/doc/source/install/cinder-backup-install-ubuntu.rst +++ /dev/null @@ -1,56 +0,0 @@ -:orphan: - -Install and configure the backup service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Optionally, install and configure the backup service. For simplicity, -this configuration uses the Block Storage node and the Object Storage -(swift) driver, thus depending on the -`Object Storage service `_. - -.. note:: - - You must :ref:`install and configure a storage node ` prior - to installing and configuring the backup service. - -Install and configure components --------------------------------- - -.. note:: - - Perform these steps on the Block Storage node. - -#. Install the packages: - - .. code-block:: console - - # apt install cinder-backup - -2. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - * In the ``[DEFAULT]`` section, configure backup options: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - backup_driver = cinder.backup.drivers.swift - backup_swift_url = SWIFT_URL - - Replace ``SWIFT_URL`` with the URL of the Object Storage service. The - URL can be found by showing the object-store API endpoints: - - .. code-block:: console - - $ openstack catalog show object-store - -Finalize installation ---------------------- - -Restart the Block Storage backup service: - -.. code-block:: console - - # service cinder-backup restart diff --git a/doc/source/install/cinder-controller-install-obs.rst b/doc/source/install/cinder-controller-install-obs.rst deleted file mode 100644 index f1b92ee7a..000000000 --- a/doc/source/install/cinder-controller-install-obs.rst +++ /dev/null @@ -1,344 +0,0 @@ -Install and configure controller node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Block -Storage service, code-named cinder, on the controller node. This -service requires at least one additional storage node that provides -volumes to instances. - -Prerequisites -------------- - -Before you install and configure the Block Storage service, you -must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - #. Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - #. Create the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE cinder; - - #. Grant proper access to the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ - IDENTIFIED BY 'CINDER_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ - IDENTIFIED BY 'CINDER_DBPASS'; - - Replace ``CINDER_DBPASS`` with a suitable password. - - #. Exit the database access client. - -#. Source the ``admin`` credentials to gain access to admin-only - CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create a ``cinder`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt cinder - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 9d7e33de3e1a498390353819bc7d245d | - | name | cinder | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - #. Add the ``admin`` role to the ``cinder`` user: - - .. code-block:: console - - $ openstack role add --project service --user cinder admin - - .. note:: - - This command provides no output. - - #. Create the ``cinderv2`` and ``cinderv3`` service entities: - - .. code-block:: console - - $ openstack service create --name cinderv2 \ - --description "OpenStack Block Storage" volumev2 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | eb9fd245bdbc414695952e93f29fe3ac | - | name | cinderv2 | - | type | volumev2 | - +-------------+----------------------------------+ - - .. code-block:: console - - $ openstack service create --name cinderv3 \ - --description "OpenStack Block Storage" volumev3 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | ab3bbbef780845a1a283490d281e7fda | - | name | cinderv3 | - | type | volumev3 | - +-------------+----------------------------------+ - -.. note:: - - The Block Storage services require two service entities. - -#. Create the Block Storage service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev2 public http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 513e73819e14460fb904163f41ef3759 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 internal http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 6436a8a23d014cfdb69c586eff146a32 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 admin http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | e652cf84dd334f359ae9b045a2c91d96 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev3 public http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 03fa2c90153546c295bf30ca86b1344b | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 internal http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 94f684395d1b41068c70e4ecb11364b2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 admin http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 4511c28a0f9840c78bacb25f10f62c98 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - .. note:: - - The Block Storage services require endpoints for each service - entity. - -Install and configure components --------------------------------- - - -#. Install the packages: - - .. code-block:: console - - # zypper install openstack-cinder-api openstack-cinder-scheduler - -#. Edit the ``/etc/cinder/cinder.conf`` file and complete the - following actions: - - #. In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - Replace ``CINDER_DBPASS`` with the password you chose for the - Block Storage database. - - #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - Replace ``CINDER_PASS`` with the password you chose for - the ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to - use the management interface IP address of the controller node: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = 10.0.0.11 - -#. In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - -Configure Compute to use Block Storage --------------------------------------- - -#. Edit the ``/etc/nova/nova.conf`` file and add the following - to it: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [cinder] - os_region_name = RegionOne - -Finalize installation ---------------------- - -#. Restart the Compute API service: - - .. code-block:: console - - # systemctl restart openstack-nova-api.service - -#. Start the Block Storage services and configure them to start when - the system boots: - - .. code-block:: console - - # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service - # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service diff --git a/doc/source/install/cinder-controller-install-rdo.rst b/doc/source/install/cinder-controller-install-rdo.rst deleted file mode 100644 index 2abd63f18..000000000 --- a/doc/source/install/cinder-controller-install-rdo.rst +++ /dev/null @@ -1,354 +0,0 @@ -Install and configure controller node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Block -Storage service, code-named cinder, on the controller node. This -service requires at least one additional storage node that provides -volumes to instances. - -Prerequisites -------------- - -Before you install and configure the Block Storage service, you -must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - #. Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - $ mysql -u root -p - - #. Create the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE cinder; - - #. Grant proper access to the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ - IDENTIFIED BY 'CINDER_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ - IDENTIFIED BY 'CINDER_DBPASS'; - - Replace ``CINDER_DBPASS`` with a suitable password. - - #. Exit the database access client. - -#. Source the ``admin`` credentials to gain access to admin-only - CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create a ``cinder`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt cinder - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 9d7e33de3e1a498390353819bc7d245d | - | name | cinder | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - #. Add the ``admin`` role to the ``cinder`` user: - - .. code-block:: console - - $ openstack role add --project service --user cinder admin - - .. note:: - - This command provides no output. - - #. Create the ``cinderv2`` and ``cinderv3`` service entities: - - .. code-block:: console - - $ openstack service create --name cinderv2 \ - --description "OpenStack Block Storage" volumev2 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | eb9fd245bdbc414695952e93f29fe3ac | - | name | cinderv2 | - | type | volumev2 | - +-------------+----------------------------------+ - - .. code-block:: console - - $ openstack service create --name cinderv3 \ - --description "OpenStack Block Storage" volumev3 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | ab3bbbef780845a1a283490d281e7fda | - | name | cinderv3 | - | type | volumev3 | - +-------------+----------------------------------+ - - .. note:: - - The Block Storage services require two service entities. - -#. Create the Block Storage service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev2 public http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 513e73819e14460fb904163f41ef3759 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 internal http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 6436a8a23d014cfdb69c586eff146a32 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 admin http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | e652cf84dd334f359ae9b045a2c91d96 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev3 public http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 03fa2c90153546c295bf30ca86b1344b | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 internal http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 94f684395d1b41068c70e4ecb11364b2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 admin http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 4511c28a0f9840c78bacb25f10f62c98 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - .. note:: - - The Block Storage services require endpoints for each service - entity. - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # yum install openstack-cinder - -#. Edit the ``/etc/cinder/cinder.conf`` file and complete the - following actions: - - #. In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - Replace ``CINDER_DBPASS`` with the password you chose for the - Block Storage database. - - #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - Replace ``CINDER_PASS`` with the password you chose for - the ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to - use the management interface IP address of the controller node: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = 10.0.0.11 - -#. In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - -#. Populate the Block Storage database: - - .. code-block:: console - - # su -s /bin/sh -c "cinder-manage db sync" cinder - - .. note:: - - Ignore any deprecation messages in this output. - -Configure Compute to use Block Storage --------------------------------------- - -#. Edit the ``/etc/nova/nova.conf`` file and add the following - to it: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [cinder] - os_region_name = RegionOne - -Finalize installation ---------------------- - - -#. Restart the Compute API service: - - .. code-block:: console - - # systemctl restart openstack-nova-api.service - -#. Start the Block Storage services and configure them to start when - the system boots: - - .. code-block:: console - - # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service - # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service diff --git a/doc/source/install/cinder-controller-install-ubuntu.rst b/doc/source/install/cinder-controller-install-ubuntu.rst deleted file mode 100644 index e6a384842..000000000 --- a/doc/source/install/cinder-controller-install-ubuntu.rst +++ /dev/null @@ -1,353 +0,0 @@ -Install and configure controller node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Block -Storage service, code-named cinder, on the controller node. This -service requires at least one additional storage node that provides -volumes to instances. - -Prerequisites -------------- - -Before you install and configure the Block Storage service, you -must create a database, service credentials, and API endpoints. - -#. To create the database, complete these steps: - - - #. Use the database access client to connect to the database - server as the ``root`` user: - - .. code-block:: console - - # mysql - - #. Create the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> CREATE DATABASE cinder; - - #. Grant proper access to the ``cinder`` database: - - .. code-block:: console - - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ - IDENTIFIED BY 'CINDER_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ - IDENTIFIED BY 'CINDER_DBPASS'; - - Replace ``CINDER_DBPASS`` with a suitable password. - - #. Exit the database access client. - -#. Source the ``admin`` credentials to gain access to admin-only - CLI commands: - - .. code-block:: console - - $ . admin-openrc - -#. To create the service credentials, complete these steps: - - #. Create a ``cinder`` user: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt cinder - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | 9d7e33de3e1a498390353819bc7d245d | - | name | cinder | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - - #. Add the ``admin`` role to the ``cinder`` user: - - .. code-block:: console - - $ openstack role add --project service --user cinder admin - - .. note:: - - This command provides no output. - - #. Create the ``cinderv2`` and ``cinderv3`` service entities: - - .. code-block:: console - - $ openstack service create --name cinderv2 \ - --description "OpenStack Block Storage" volumev2 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | eb9fd245bdbc414695952e93f29fe3ac | - | name | cinderv2 | - | type | volumev2 | - +-------------+----------------------------------+ - - .. code-block:: console - - $ openstack service create --name cinderv3 \ - --description "OpenStack Block Storage" volumev3 - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | OpenStack Block Storage | - | enabled | True | - | id | ab3bbbef780845a1a283490d281e7fda | - | name | cinderv3 | - | type | volumev3 | - +-------------+----------------------------------+ - - .. note:: - - The Block Storage services require two service entities. - -#. Create the Block Storage service API endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev2 public http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 513e73819e14460fb904163f41ef3759 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 internal http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 6436a8a23d014cfdb69c586eff146a32 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev2 admin http://controller:8776/v2/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | e652cf84dd334f359ae9b045a2c91d96 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | eb9fd245bdbc414695952e93f29fe3ac | - | service_name | cinderv2 | - | service_type | volumev2 | - | url | http://controller:8776/v2/%(project_id)s | - +--------------+------------------------------------------+ - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - volumev3 public http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 03fa2c90153546c295bf30ca86b1344b | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 internal http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 94f684395d1b41068c70e4ecb11364b2 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - $ openstack endpoint create --region RegionOne \ - volumev3 admin http://controller:8776/v3/%\(project_id\)s - - +--------------+------------------------------------------+ - | Field | Value | - +--------------+------------------------------------------+ - | enabled | True | - | id | 4511c28a0f9840c78bacb25f10f62c98 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | ab3bbbef780845a1a283490d281e7fda | - | service_name | cinderv3 | - | service_type | volumev3 | - | url | http://controller:8776/v3/%(project_id)s | - +--------------+------------------------------------------+ - - .. note:: - - The Block Storage services require endpoints for each service - entity. - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # apt install cinder-api cinder-scheduler - -#. Edit the ``/etc/cinder/cinder.conf`` file and complete the - following actions: - - #. In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - Replace ``CINDER_DBPASS`` with the password you chose for the - Block Storage database. - - #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - Replace ``RABBIT_PASS`` with the password you chose for the - ``openstack`` account in ``RabbitMQ``. - - #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - Replace ``CINDER_PASS`` with the password you chose for - the ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to - use the management interface IP address of the controller node: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = 10.0.0.11 - -#. In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - -#. Populate the Block Storage database: - - .. code-block:: console - - # su -s /bin/sh -c "cinder-manage db sync" cinder - - .. note:: - - Ignore any deprecation messages in this output. - -Configure Compute to use Block Storage --------------------------------------- - -#. Edit the ``/etc/nova/nova.conf`` file and add the following - to it: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [cinder] - os_region_name = RegionOne - -Finalize installation ---------------------- - -#. Restart the Compute API service: - - .. code-block:: console - - # service nova-api restart - -#. Restart the Block Storage services: - - .. code-block:: console - - # service cinder-scheduler restart - # service apache2 restart diff --git a/doc/source/install/cinder-storage-install-obs.rst b/doc/source/install/cinder-storage-install-obs.rst deleted file mode 100644 index 350e1354b..000000000 --- a/doc/source/install/cinder-storage-install-obs.rst +++ /dev/null @@ -1,273 +0,0 @@ -Install and configure a storage node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Prerequisites -------------- - -Before you install and configure the Block Storage service on the -storage node, you must prepare the storage device. - -.. note:: - - Perform these steps on the storage node. - -#. Install the supporting utility packages. - -#. Install the LVM packages: - - .. code-block:: console - - # zypper install lvm2 - -#. (Optional) If you intend to use non-raw image types such as QCOW2 - and VMDK, install the QEMU package: - - .. code-block:: console - - # zypper install qemu - - .. note:: - - Some distributions include LVM by default. - -#. Create the LVM physical volume ``/dev/sdb``: - - .. code-block:: console - - # pvcreate /dev/sdb - - Physical volume "/dev/sdb" successfully created - -#. Create the LVM volume group ``cinder-volumes``: - - .. code-block:: console - - # vgcreate cinder-volumes /dev/sdb - - Volume group "cinder-volumes" successfully created - - The Block Storage service creates logical volumes in this volume group. - -#. Only instances can access Block Storage volumes. However, the - underlying operating system manages the devices associated with - the volumes. By default, the LVM volume scanning tool scans the - ``/dev`` directory for block storage devices that - contain volumes. If projects use LVM on their volumes, the scanning - tool detects these volumes and attempts to cache them which can cause - a variety of problems with both the underlying operating system - and project volumes. You must reconfigure LVM to scan only the devices - that contain the ``cinder-volumes`` volume group. Edit the - ``/etc/lvm/lvm.conf`` file and complete the following actions: - - * In the ``devices`` section, add a filter that accepts the - ``/dev/sdb`` device and rejects all other devices: - - .. path /etc/lvm/lvm.conf - .. code-block:: bash - - devices { - ... - filter = [ "a/sdb/", "r/.*/"] - - .. end - - Each item in the filter array begins with ``a`` for **accept** or - ``r`` for **reject** and includes a regular expression for the - device name. The array must end with ``r/.*/`` to reject any - remaining devices. You can use the :command:`vgs -vvvv` command - to test filters. - - .. warning:: - - If your storage nodes use LVM on the operating system disk, you - must also add the associated device to the filter. For example, - if the ``/dev/sda`` device contains the operating system: - - .. ignore_path /etc/lvm/lvm.conf - .. code-block:: ini - - filter = [ "a/sda/", "a/sdb/", "r/.*/"] - - .. end - - Similarly, if your compute nodes use LVM on the operating - system disk, you must also modify the filter in the - ``/etc/lvm/lvm.conf`` file on those nodes to include only - the operating system disk. For example, if the ``/dev/sda`` - device contains the operating system: - - .. path /etc/openstack-dashboard/local_settings.py - .. code-block:: ini - - filter = [ "a/sda/", "r/.*/"] - - .. end - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # zypper install openstack-cinder-volume tgt - -#. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - .. end - - Replace ``CINDER_DBPASS`` with the password you chose for - the Block Storage database. - - * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - .. end - - Replace ``RABBIT_PASS`` with the password you chose for - the ``openstack`` account in ``RabbitMQ``. - - * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - .. end - - Replace ``CINDER_PASS`` with the password you chose for the - ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS - - .. end - - Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address - of the management network interface on your storage node, - typically 10.0.0.41 for the first node in the - :ref:`example architecture `. - - -* In the ``[lvm]`` section, configure the LVM back end with the - LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, - and appropriate iSCSI service: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [lvm] - # ... - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_group = cinder-volumes - iscsi_protocol = iscsi - iscsi_helper = tgtadm - - .. end - - - - * In the ``[DEFAULT]`` section, enable the LVM back end: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = lvm - - .. end - - .. note:: - - Back-end names are arbitrary. As an example, this guide - uses the name of the driver as the name of the back end. - - * In the ``[DEFAULT]`` section, configure the location of the - Image service API: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - glance_api_servers = http://controller:9292 - - .. end - - * In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - - .. end - - -3. Create the ``/etc/tgt/conf.d/cinder.conf`` file - with the following data: - - .. code-block:: shell - - include /var/lib/cinder/volumes/* - - .. end - - -Finalize installation ---------------------- - -#. Start the Block Storage volume service including its dependencies - and configure them to start when the system boots: - - .. code-block:: console - - # systemctl enable openstack-cinder-volume.service tgtd.service - # systemctl start openstack-cinder-volume.service tgtd.service diff --git a/doc/source/install/cinder-storage-install-rdo.rst b/doc/source/install/cinder-storage-install-rdo.rst deleted file mode 100644 index 4b339f1bf..000000000 --- a/doc/source/install/cinder-storage-install-rdo.rst +++ /dev/null @@ -1,288 +0,0 @@ -Install and configure a storage node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Prerequisites -------------- - -Before you install and configure the Block Storage service on the -storage node, you must prepare the storage device. - -.. note:: - - Perform these steps on the storage node. - -#. Install the supporting utility packages: - - - -* Install the LVM packages: - - .. code-block:: console - - # yum install lvm2 - - .. end - -* Start the LVM metadata service and configure it to start when the - system boots: - - .. code-block:: console - - # systemctl enable lvm2-lvmetad.service - # systemctl start lvm2-lvmetad.service - - .. end - - - - .. note:: - - Some distributions include LVM by default. - -#. Create the LVM physical volume ``/dev/sdb``: - - .. code-block:: console - - # pvcreate /dev/sdb - - Physical volume "/dev/sdb" successfully created - - .. end - -#. Create the LVM volume group ``cinder-volumes``: - - .. code-block:: console - - # vgcreate cinder-volumes /dev/sdb - - Volume group "cinder-volumes" successfully created - - .. end - - The Block Storage service creates logical volumes in this volume group. - -#. Only instances can access Block Storage volumes. However, the - underlying operating system manages the devices associated with - the volumes. By default, the LVM volume scanning tool scans the - ``/dev`` directory for block storage devices that - contain volumes. If projects use LVM on their volumes, the scanning - tool detects these volumes and attempts to cache them which can cause - a variety of problems with both the underlying operating system - and project volumes. You must reconfigure LVM to scan only the devices - that contain the ``cinder-volumes`` volume group. Edit the - ``/etc/lvm/lvm.conf`` file and complete the following actions: - - * In the ``devices`` section, add a filter that accepts the - ``/dev/sdb`` device and rejects all other devices: - - .. path /etc/lvm/lvm.conf - .. code-block:: bash - - devices { - ... - filter = [ "a/sdb/", "r/.*/"] - - .. end - - Each item in the filter array begins with ``a`` for **accept** or - ``r`` for **reject** and includes a regular expression for the - device name. The array must end with ``r/.*/`` to reject any - remaining devices. You can use the :command:`vgs -vvvv` command - to test filters. - - .. warning:: - - If your storage nodes use LVM on the operating system disk, you - must also add the associated device to the filter. For example, - if the ``/dev/sda`` device contains the operating system: - - .. ignore_path /etc/lvm/lvm.conf - .. code-block:: ini - - filter = [ "a/sda/", "a/sdb/", "r/.*/"] - - .. end - - Similarly, if your compute nodes use LVM on the operating - system disk, you must also modify the filter in the - ``/etc/lvm/lvm.conf`` file on those nodes to include only - the operating system disk. For example, if the ``/dev/sda`` - device contains the operating system: - - .. path /etc/openstack-dashboard/local_settings.py - .. code-block:: ini - - filter = [ "a/sda/", "r/.*/"] - - .. end - -Install and configure components --------------------------------- - - - -#. Install the packages: - - .. code-block:: console - - # yum install openstack-cinder targetcli python-keystone - - .. end - - - -2. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - .. end - - Replace ``CINDER_DBPASS`` with the password you chose for - the Block Storage database. - - * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - .. end - - Replace ``RABBIT_PASS`` with the password you chose for - the ``openstack`` account in ``RabbitMQ``. - - * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - .. end - - Replace ``CINDER_PASS`` with the password you chose for the - ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS - - .. end - - Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address - of the management network interface on your storage node, - typically 10.0.0.41 for the first node in the - :ref:`example architecture `. - - - -* In the ``[lvm]`` section, configure the LVM back end with the - LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, - and appropriate iSCSI service. If the ``[lvm]`` section does not exist, - create it: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [lvm] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_group = cinder-volumes - iscsi_protocol = iscsi - iscsi_helper = lioadm - - .. end - - - * In the ``[DEFAULT]`` section, enable the LVM back end: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = lvm - - .. end - - .. note:: - - Back-end names are arbitrary. As an example, this guide - uses the name of the driver as the name of the back end. - - * In the ``[DEFAULT]`` section, configure the location of the - Image service API: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - glance_api_servers = http://controller:9292 - - .. end - - * In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - - .. end - - -Finalize installation ---------------------- - - - -* Start the Block Storage volume service including its dependencies - and configure them to start when the system boots: - - .. code-block:: console - - # systemctl enable openstack-cinder-volume.service target.service - # systemctl start openstack-cinder-volume.service target.service - - .. end - - diff --git a/doc/source/install/cinder-storage-install-ubuntu.rst b/doc/source/install/cinder-storage-install-ubuntu.rst deleted file mode 100644 index 8866c80a7..000000000 --- a/doc/source/install/cinder-storage-install-ubuntu.rst +++ /dev/null @@ -1,275 +0,0 @@ -Install and configure a storage node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Prerequisites -------------- - -Before you install and configure the Block Storage service on the -storage node, you must prepare the storage device. - -.. note:: - - Perform these steps on the storage node. - -#. Install the supporting utility packages: - - - - -.. code-block:: console - - # apt install lvm2 - -.. end - - - .. note:: - - Some distributions include LVM by default. - -#. Create the LVM physical volume ``/dev/sdb``: - - .. code-block:: console - - # pvcreate /dev/sdb - - Physical volume "/dev/sdb" successfully created - - .. end - -#. Create the LVM volume group ``cinder-volumes``: - - .. code-block:: console - - # vgcreate cinder-volumes /dev/sdb - - Volume group "cinder-volumes" successfully created - - .. end - - The Block Storage service creates logical volumes in this volume group. - -#. Only instances can access Block Storage volumes. However, the - underlying operating system manages the devices associated with - the volumes. By default, the LVM volume scanning tool scans the - ``/dev`` directory for block storage devices that - contain volumes. If projects use LVM on their volumes, the scanning - tool detects these volumes and attempts to cache them which can cause - a variety of problems with both the underlying operating system - and project volumes. You must reconfigure LVM to scan only the devices - that contain the ``cinder-volumes`` volume group. Edit the - ``/etc/lvm/lvm.conf`` file and complete the following actions: - - * In the ``devices`` section, add a filter that accepts the - ``/dev/sdb`` device and rejects all other devices: - - .. path /etc/lvm/lvm.conf - .. code-block:: bash - - devices { - ... - filter = [ "a/sdb/", "r/.*/"] - - .. end - - Each item in the filter array begins with ``a`` for **accept** or - ``r`` for **reject** and includes a regular expression for the - device name. The array must end with ``r/.*/`` to reject any - remaining devices. You can use the :command:`vgs -vvvv` command - to test filters. - - .. warning:: - - If your storage nodes use LVM on the operating system disk, you - must also add the associated device to the filter. For example, - if the ``/dev/sda`` device contains the operating system: - - .. ignore_path /etc/lvm/lvm.conf - .. code-block:: ini - - filter = [ "a/sda/", "a/sdb/", "r/.*/"] - - .. end - - Similarly, if your compute nodes use LVM on the operating - system disk, you must also modify the filter in the - ``/etc/lvm/lvm.conf`` file on those nodes to include only - the operating system disk. For example, if the ``/dev/sda`` - device contains the operating system: - - .. path /etc/openstack-dashboard/local_settings.py - .. code-block:: ini - - filter = [ "a/sda/", "r/.*/"] - - .. end - -Install and configure components --------------------------------- - - - - -#. Install the packages: - - .. code-block:: console - - # apt install cinder-volume - - .. end - - -2. Edit the ``/etc/cinder/cinder.conf`` file - and complete the following actions: - - * In the ``[database]`` section, configure database access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [database] - # ... - connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder - - .. end - - Replace ``CINDER_DBPASS`` with the password you chose for - the Block Storage database. - - * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` - message queue access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller - - .. end - - Replace ``RABBIT_PASS`` with the password you chose for - the ``openstack`` account in ``RabbitMQ``. - - * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, - configure Identity service access: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - auth_strategy = keystone - - [keystone_authtoken] - # ... - auth_uri = http://controller:5000 - auth_url = http://controller:35357 - memcached_servers = controller:11211 - auth_type = password - project_domain_name = default - user_domain_name = default - project_name = service - username = cinder - password = CINDER_PASS - - .. end - - Replace ``CINDER_PASS`` with the password you chose for the - ``cinder`` user in the Identity service. - - .. note:: - - Comment out or remove any other options in the - ``[keystone_authtoken]`` section. - - * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS - - .. end - - Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address - of the management network interface on your storage node, - typically 10.0.0.41 for the first node in the - :ref:`example architecture `. - - -* In the ``[lvm]`` section, configure the LVM back end with the - LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, - and appropriate iSCSI service: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [lvm] - # ... - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_group = cinder-volumes - iscsi_protocol = iscsi - iscsi_helper = tgtadm - - .. end - - - - * In the ``[DEFAULT]`` section, enable the LVM back end: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - enabled_backends = lvm - - .. end - - .. note:: - - Back-end names are arbitrary. As an example, this guide - uses the name of the driver as the name of the back end. - - * In the ``[DEFAULT]`` section, configure the location of the - Image service API: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [DEFAULT] - # ... - glance_api_servers = http://controller:9292 - - .. end - - * In the ``[oslo_concurrency]`` section, configure the lock path: - - .. path /etc/cinder/cinder.conf - .. code-block:: ini - - [oslo_concurrency] - # ... - lock_path = /var/lib/cinder/tmp - - .. end - - -Finalize installation ---------------------- - - - - -#. Restart the Block Storage volume service including its dependencies: - - .. code-block:: console - - # service tgt restart - # service cinder-volume restart - - .. end - diff --git a/doc/source/install/cinder-verify.rst b/doc/source/install/cinder-verify.rst deleted file mode 100644 index 5b87ce833..000000000 --- a/doc/source/install/cinder-verify.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _cinder-verify: - -Verify Cinder operation -~~~~~~~~~~~~~~~~~~~~~~~ - -Verify operation of the Block Storage service. - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - - .. end - -#. List service components to verify successful launch of each process: - - .. code-block:: console - - $ openstack volume service list - - +------------------+------------+------+---------+-------+----------------------------+ - | Binary | Host | Zone | Status | State | Updated_at | - +------------------+------------+------+---------+-------+----------------------------+ - | cinder-scheduler | controller | nova | enabled | up | 2016-09-30T02:27:41.000000 | - | cinder-volume | block@lvm | nova | enabled | up | 2016-09-30T02:27:46.000000 | - +------------------+------------+------+---------+-------+----------------------------+ - - - .. end diff --git a/doc/source/install/index-obs.rst b/doc/source/install/index-obs.rst deleted file mode 100644 index 1ef734724..000000000 --- a/doc/source/install/index-obs.rst +++ /dev/null @@ -1,23 +0,0 @@ -=================================================================== -Cinder Installation Tutorial for openSUSE and SUSE Linux Enterprise -=================================================================== - -This section describes how to install and configure storage nodes -for the Block Storage service. For simplicity, this configuration -references one storage node with an empty local block storage device. -The instructions use ``/dev/sdb``, but you can substitute a different -value for your particular node. - -The service provisions logical volumes on this device using the -:term:`LVM ` driver and provides them -to instances via :term:`iSCSI ` transport. -You can follow these instructions with minor modifications to horizontally -scale your environment with additional storage nodes. - -.. toctree:: - :maxdepth: 2 - - cinder-storage-install-obs.rst - cinder-controller-install-obs.rst - cinder-backup-install-obs.rst - cinder-verify.rst diff --git a/doc/source/install/index-rdo.rst b/doc/source/install/index-rdo.rst deleted file mode 100644 index 9bbc27b4a..000000000 --- a/doc/source/install/index-rdo.rst +++ /dev/null @@ -1,23 +0,0 @@ -====================================================================== -Cinder Installation Tutorial for Red Hat Enterprise Linux and CentOS -====================================================================== - -This section describes how to install and configure storage nodes -for the Block Storage service. For simplicity, this configuration -references one storage node with an empty local block storage device. -The instructions use ``/dev/sdb``, but you can substitute a different -value for your particular node. - -The service provisions logical volumes on this device using the -:term:`LVM ` driver and provides them -to instances via :term:`iSCSI ` transport. -You can follow these instructions with minor modifications to horizontally -scale your environment with additional storage nodes. - -.. toctree:: - :maxdepth: 2 - - cinder-storage-install-rdo.rst - cinder-controller-install-rdo.rst - cinder-backup-install-rdo.rst - cinder-verify.rst diff --git a/doc/source/install/index-ubuntu.rst b/doc/source/install/index-ubuntu.rst deleted file mode 100644 index ca37c0232..000000000 --- a/doc/source/install/index-ubuntu.rst +++ /dev/null @@ -1,23 +0,0 @@ -======================================= -Cinder Installation Tutorial for Ubuntu -======================================= - -This section describes how to install and configure storage nodes -for the Block Storage service. For simplicity, this configuration -references one storage node with an empty local block storage device. -The instructions use ``/dev/sdb``, but you can substitute a different -value for your particular node. - -The service provisions logical volumes on this device using the -:term:`LVM ` driver and provides them -to instances via :term:`iSCSI ` transport. -You can follow these instructions with minor modifications to horizontally -scale your environment with additional storage nodes. - -.. toctree:: - :maxdepth: 2 - - cinder-storage-install-ubuntu.rst - cinder-controller-install-ubuntu.rst - cinder-backup-install-ubuntu.rst - cinder-verify.rst diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index a88080c06..000000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. _cinder: - -============================ -Cinder Installation Tutorial -============================ - -The Block Storage service (cinder) provides block storage devices -to guest instances. The method in which the storage is provisioned and -consumed is determined by the Block Storage driver, or drivers -in the case of a multi-backend configuration. There are a variety of -drivers that are available: NAS/SAN, NFS, iSCSI, Ceph, and more. - -The Block Storage API and scheduler services typically run on the controller -nodes. Depending upon the drivers used, the volume service can run -on controller nodes, compute nodes, or standalone storage nodes. - -For more information, see the -`Configuration Reference `_. - -.. toctree:: - - index-obs - index-rdo - index-ubuntu - diff --git a/doc/source/man/cinder-manage.rst b/doc/source/man/cinder-manage.rst deleted file mode 100644 index d4f816ab4..000000000 --- a/doc/source/man/cinder-manage.rst +++ /dev/null @@ -1,175 +0,0 @@ -============= -cinder-manage -============= - ------------------------------------------- -Control and manage OpenStack block storage ------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - - cinder-manage [] - -DESCRIPTION -=========== - -:command:`cinder-manage` provides control of cinder database migration, -and provides an interface to get information about the current state -of cinder. -More information about OpenStack Cinder is available at `OpenStack Cinder `_. - -OPTIONS -======= - -The standard pattern for executing a cinder-manage command is: -``cinder-manage []`` - -For example, to obtain a list of the cinder services currently running: -``cinder-manage service list`` - -Run without arguments to see a list of available command categories: -``cinder-manage`` - -Categories are shell, logs, migrate, db, volume, host, service, backup, version, and config. Detailed descriptions are below. - -You can also run with a category argument such as 'db' to see a list of all commands in that category: -``cinder-manage db`` - -These sections describe the available categories and arguments for cinder-manage. - -Cinder Db -~~~~~~~~~ - -``cinder-manage db version`` - - Print the current database version. - -``cinder-manage db sync`` - - Sync the database up to the most recent version. This is the standard way to create the db as well. - -``cinder-manage db purge []`` - - Purge database entries that are marked as deleted, that are older than the number of days specified. - -``cinder-manage db online_data_migrations`` - - Perform online data migrations for database upgrade between releases in batches. - - This command interprets the following options when it is invoked: - - --max_count Maximum number of objects to consider. - --ignore_state Force records to migrate even if another operation is - performed on them. This may be dangerous, please refer to - release notes for more information. - -Cinder Logs -~~~~~~~~~~~ - -``cinder-manage logs errors`` - - Displays cinder errors from log files. - -``cinder-manage logs syslog []`` - - Displays cinder the most recent entries from syslog. The optional number argument specifies the number of entries to display (default 10). - -Cinder Shell -~~~~~~~~~~~~ - -``cinder-manage shell bpython`` - - Starts a new bpython shell. - -``cinder-manage shell ipython`` - - Starts a new ipython shell. - -``cinder-manage shell python`` - - Starts a new python shell. - -``cinder-manage shell run`` - - Starts a new shell using python. - -``cinder-manage shell script `` - - Runs the named script from the specified path with flags set. - -Cinder Volume -~~~~~~~~~~~~~ - -``cinder-manage volume delete `` - - Delete a volume without first checking that the volume is available. - -``cinder-manage volume update_host --currenthost --newhost `` - - Updates the host name of all volumes currently associated with a specified host. - -Cinder Host -~~~~~~~~~~~ - -``cinder-manage host list []`` - - Displays a list of all physical hosts and their zone. The optional zone argument allows the list to be filtered on the requested zone. - -Cinder Service -~~~~~~~~~~~~~~ - -``cinder-manage service list`` - - Displays a list of all cinder services and their host, zone, status, state and when the information was last updated. - -``cinder-manage service remove `` - - Removes a specified cinder service from a specified host. - -Cinder Backup -~~~~~~~~~~~~~ - -``cinder-manage backup list`` - - Displays a list of all backups (including ones in progress) and the host on which the backup operation is running. - -``cinder-manage backup update_backup_host --currenthost --newhost `` - - Updates the host name of all backups currently associated with a specified host. - -Cinder Version -~~~~~~~~~~~~~~ - -``cinder-manage version list`` - - Displays the codebase version cinder is running upon. - -Cinder Config -~~~~~~~~~~~~~ - -``cinder-manage config list []`` - - Displays the current configuration parameters (options) for Cinder. The optional flag parameter may be used to display the configuration of one parameter. - -FILES -===== - -The cinder.conf file contains configuration information in the form of python-gflags. - -The cinder-manage.log file logs output from cinder-manage. - -SEE ALSO -======== - -* `OpenStack Cinder `__ - -BUGS -==== - -* Cinder is hosted on Launchpad so you can view current bugs at `Bugs : Cinder `__ diff --git a/doc/source/man/generalized_filters.rst b/doc/source/man/generalized_filters.rst deleted file mode 100644 index 57321c52d..000000000 --- a/doc/source/man/generalized_filters.rst +++ /dev/null @@ -1,78 +0,0 @@ -Generalized filters -=================== - -Background ----------- - -Cinder introduced generalized resource filters since Pike, it has the -same purpose as ``query_volume_filters`` option, but it's more convenient -and can be applied to more cinder resources, administrator can control the -allowed filter keys for **non-admin** user by editing the filter -configuration file. Also since this feature, cinder will raise -``400 BadRequest`` if any invalid query filter is specified. - -How do I configure the filter keys? ------------------------------------ - -``resource_query_filters_file`` is introduced to cinder to represent the -filter config file path, and the config file accepts the valid filter keys -for **non-admin** user with json format: - -.. code-block:: json - - { - "volume": ["name", "status", "metadata"] - } - - -the key ``volume`` (singular) here stands for the resource you want to apply and the value -accepts an list which contains the allowed filters collection, once the configuration -file is changed and API service is restarted, cinder will only recognize this filter -keys, **NOTE**: the default configuration file will include all the filters we already -enabled. - -Which filter keys are supported? --------------------------------- - -Not all the attributes are supported at present, so we add this table below to -indicate which filter keys are valid and can be used in the configuration. - -Since v3.34 we could use '~' to indicate supporting querying resource by inexact match, -for example, if we have a configuration file as below: - -.. code-block:: json - - { - "volume": ["name~"] - } - -User can query volume both by ``name=volume`` and ``name~=volume``, and the volumes -named ``volume123`` and ``a_volume123`` are both valid for second input while neither are -valid for first. The supported APIs are marked with "*" below in the table. - -+-----------------+-------------------------------------------------------------------------+ -| API | Valid filter keys | -+=================+=========================================================================+ -| | id, group_id, name, status, bootable, migration_status, metadata, host, | -| list volume* | image_metadata, availability_zone, user_id, volume_type_id, project_id, | -| | size, description, replication_status, multiattach | -+-----------------+-------------------------------------------------------------------------+ -| | id, volume_id, user_id, project_id, status, volume_size, name, | -| list snapshot* | description, volume_type_id, group_snapshot_id, metadata | -+-----------------+-------------------------------------------------------------------------+ -| | id, name, status, container, availability_zone, description, | -| list backup* | volume_id, is_incremental, size, host, parent_id | -+-----------------+-------------------------------------------------------------------------+ -| | id, user_id, status, availability_zone, group_type, name, description, | -| list group* | host | -+-----------------+-------------------------------------------------------------------------+ -| list g-snapshot*| id, name, description, group_id, group_type_id, status | -+-----------------+-------------------------------------------------------------------------+ -| | id, volume_id, instance_id, attach_status, attach_mode, | -| list attachment*| connection_info, mountpoint, attached_host | -+-----------------+-------------------------------------------------------------------------+ -| | id, event_id, resource_uuid, resource_type, request_id, message_level, | -| list message* | project_id | -+-----------------+-------------------------------------------------------------------------+ -| get pools | name, volume_type | -+-----------------+-------------------------------------------------------------------------+ diff --git a/doc/source/reference/README.rst b/doc/source/reference/README.rst deleted file mode 100644 index fd0afd4ef..000000000 --- a/doc/source/reference/README.rst +++ /dev/null @@ -1,15 +0,0 @@ -============================== -Cinder Reference Documentation -============================== - -Introduction: -------------- - -This directory is intended to hold any reference documentation for Cinder -that doesn't fit into 'install', 'contributor', 'configuration', 'cli', -'admin', or 'user' categories. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - diff --git a/doc/source/sample_config.rst b/doc/source/sample_config.rst deleted file mode 100644 index 3c366492a..000000000 --- a/doc/source/sample_config.rst +++ /dev/null @@ -1,13 +0,0 @@ -============================ -Cinder Configuration Options -============================ - -The following is a sample Cinder configuration for adaptation and use. It is -auto-generated from Cinder when this documentation is built, so if you are -having issues with an option, please compare your version of Cinder with the -version of this documentation. - -The sample configuration can also be viewed in -`file form <_static/cinder.conf.sample>`_. - -.. literalinclude:: _static/cinder.conf.sample diff --git a/doc/source/scheduler-filters.rst b/doc/source/scheduler-filters.rst deleted file mode 100644 index 2bb153ab0..000000000 --- a/doc/source/scheduler-filters.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Cinder Scheduler Filters -============================== - -.. list-plugins:: cinder.scheduler.filters - :detailed: diff --git a/doc/source/scheduler-weights.rst b/doc/source/scheduler-weights.rst deleted file mode 100644 index daeced7e3..000000000 --- a/doc/source/scheduler-weights.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== - Cinder Scheduler Weights -========================== - -.. list-plugins:: cinder.scheduler.weights - :detailed: diff --git a/doc/source/upgrade.rst b/doc/source/upgrade.rst deleted file mode 100644 index 6e7a226bc..000000000 --- a/doc/source/upgrade.rst +++ /dev/null @@ -1,248 +0,0 @@ -Upgrades -======== - -Cinder aims to provide upgrades with minimal downtime. - -This should be achieved for both data and control plane. As Cinder doesn't -interfere with data plane, its upgrade shouldn't affect any volumes being -accessed by virtual machines. - -Keeping the control plane running during an upgrade is more difficult. This -document's goal is to provide preliminaries and a detailed procedure of such -upgrade. - -Concepts --------- - -Here are the key concepts you need to know before reading the section on the -upgrade process: - -RPC version pinning -''''''''''''''''''' - -Through careful RPC versioning, newer services are able to talk to older -services (and vice-versa). The versions are autodetected using information -reported in ``services`` table. In case of receiving ``CappedVersionUnknown`` -or ``ServiceTooOld`` exceptions on service start, you're probably having some -old orphaned records in that table. - -Graceful service shutdown -''''''''''''''''''''''''' - -Many cinder services are python processes listening for messages on a AMQP -queue. When the operator sends SIGTERM signal to the process, it stops getting -new work from its queue, completes any outstanding work and then terminates. -During this process, messages can be left on the queue for when the python -process starts back up. This gives us a way to shutdown a service using older -code, and start up a service using newer code with minimal impact. - -.. note:: - Waiting for completion of long-running operations (e.g. slow volume copy - operation) may take a while. - -.. note:: - This was tested with RabbitMQ messaging backend and may vary with other - backends. - -Online Data Migrations -'''''''''''''''''''''' - -To make DB schema migrations less painful to execute, since Liberty, all data -migrations are banned from schema migration scripts. Instead, the migrations -should be done by background process in a manner that doesn't interrupt running -services (you can also execute online data migrations with services turned off -if you're doing a cold upgrade). In Ocata a new ``cinder-manage db -online-data-migrations`` utility was added for that purpose. Before upgrading -Ocata to Pike, you need to run this tool in the background, until it tells you -no more migrations are needed. Note that you won't be able to apply Pike's -schema migrations before completing Ocata's online data migrations. - -API load balancer draining -'''''''''''''''''''''''''' - -When upgrading API nodes, you can make your load balancer only send new -connections to the newer API nodes, allowing for a seamless update of your API -nodes. - -DB prune deleted rows -''''''''''''''''''''' - -Currently resources are soft deleted in the database, so users are able to -track instances in the DB that are created and destroyed in production. -However, most people have a data retention policy, of say 30 days or 90 days -after which they will want to delete those entries. Not deleting those entries -affects DB performance as indices grow very large and data migrations take -longer as there is more data to migrate. To make pruning easier there's a -``cinder-manage db purge `` command that permanently deletes -records older than specified age. - -Versioned object backports -'''''''''''''''''''''''''' - -RPC pinning ensures new services can talk to the older service's method -signatures. But many of the parameters are objects that may well be too new for -the old service to understand. Cinder makes sure to backport an object to a -version that it is pinned to before sending. - -Minimal Downtime Upgrade Procedure ----------------------------------- - -Plan your upgrade -''''''''''''''''' - -* Read and ensure you understand the release notes for the next release. - -* Make a backup of your database. Cinder does not support downgrading of the - database. Hence, in case of upgrade failure, restoring database from backup - is the only choice. - -* Note that there's an assumption that live upgrade can be performed only - between subsequent releases. This means that you cannot upgrade Liberty - directly into Newton, you need to upgrade to Mitaka first. - -* To avoid dependency hell it is advised to have your Cinder services deployed - separately in containers or Python venvs. - -* Note that Cinder is basing version detection on what is reported in the - ``services`` table in the DB. Before upgrade make sure you don't have any - orphaned old records there, because these can block starting newer services. - You can clean them up using ``cinder-manage service remove `` - command. - -* Assumed service upgrade order is cinder-api, cinder-scheduler, cinder-volume - and finally cinder-backup. - -Rolling upgrade process -''''''''''''''''''''''' - -To reduce downtime, the services can be upgraded in a rolling fashion. It means -upgrading a few services at a time. To minimise downtime you need to have HA -Cinder deployment, so at the moment a service is upgraded, you'll keep other -service instances running. - -Before maintenance window -""""""""""""""""""""""""" - -* First you should execute required DB schema migrations. To achieve that - without interrupting your existing installation, install new Cinder code in - new venv or a container and run the DB sync (``cinder-manage db sync``). - These schema change operations should have minimal or no effect on - performance, and should not cause any operations to fail. - -* At this point, new columns and tables may exist in the database. These - DB schema changes are done in a way that both the N and N+1 release can - perform operations against the same schema. - -During maintenance window -""""""""""""""""""""""""" - -1. cinder-api services should go first. In HA deployment you're typically - running them behind a load balancer (e.g. HAProxy), so you need to take one - service instance out of the balancer, shut it down, upgrade the code and - dependencies, and start the service again. Then you can plug it back into - the load balancer. Cinder's internal mechanisms will make sure that new - c-api will detect that it's running with older versions and will downgrade - any communication. - - .. note:: - - You may want to start another instance of older c-api to handle the load - while you're upgrading your original services. - -2. Then you should repeat first step for all of the cinder-api services. - -3. Next service is cinder-scheduler. It is load-balanced by the message queue, - so the only thing you need to worry about is to shut it down gracefully - (using ``SIGTERM`` signal) to make sure it will finish all the requests - being processed before shutting down. Then you should upgrade the code and - restart the service. - -4. Repeat third step for all of your cinder-scheduler services. - -5. Then you proceed to upgrade cinder-volume services. The problem here is that - due to Active/Passive character of this service, you're unable to run - multiple instances of cinder-volume managing a single volume backend. This - means that there will be a moment when you won't have any cinder-volume in - your deployment and you want that disruption to be as short as possible. - - .. note:: - - The downtime here is non-disruptive as long as it doesn't exceed the - service heartbeat timeout. If you don't exceed that, then - cinder-schedulers will not notice that cinder-volume is gone and the - message queue will take care of queuing any RPC messages until - cinder-volume is back. - - To make sure it's achieved, you can either lengthen the timeout by - tweaking ``service_down_time`` value in ``cinder.conf``, or prepare - upgraded cinder-volume on another node and do a very quick switch by - shutting down older service and starting the new one just after that. - - Also note that in case of A/P HA configuration you need to make sure both - primary and secondary c-vol have the same hostname set (you can override - it using ``host`` option in ``cinder.conf``), so both will be listening on - the same message queue and will accept the same messages. - -6. Repeat fifth step for all cinder-volume services. - -7. Now we should proceed with (optional) cinder-backup services. You should - upgrade them in the same manner like cinder-scheduler. - - .. note:: - - Backup operations are time consuming, so shutting down a c-bak service - without interrupting ongoing requests can take time. It may be useful to - disable the service first using ``cinder service-disable`` command, so it - won't accept new requests, and wait a reasonable amount of time until all - the in-progress jobs are completed. Then you can proceed with the upgrade. - To make sure the backup service finished all the ongoing requests, you can - check the service logs. - - .. note:: - - Until Liberty cinder-backup was tightly coupled with cinder-volume service - and needed to coexist on the same physical node. This is not true starting - with Mitaka version. If you're still keeping that coupling, then your - upgrade strategy for cinder-backup should be more similar to how - cinder-volume is upgraded. - -After maintenance window -"""""""""""""""""""""""" - -* Once all services are running the new code, double check in the DB that - there are no old orphaned records in ``services`` table (Cinder doesn't - remove the records when service is gone or service hostname is changed, so - you need to take care of that manually; you should be able to distinguish - dead records by looking at when the record was updated). Cinder is basing its - RPC version detection on that, so stale records can prevent you from going - forward. - -* Now all services are upgraded, we need to send the ``SIGHUP`` signal, so - all the services clear any cached service version data. When a new service - starts, it automatically detects which version of the service's RPC protocol - to use, and will downgrade any communication to that version. Be advised - that cinder-api service doesn't handle ``SIGHUP`` so it needs to be - restarted. It's best to restart your cinder-api services as last ones, as - that way you make sure API will fail fast when user requests new features on - a deployment that's not fully upgraded (new features can fail when RPC - messages are backported to lowest common denominator). Order of the rest of - the services shouldn't matter. - -* Now all the services are upgraded, the system is able to use the latest - version of the RPC protocol and able to access all the features of the new - release. - -* At this point, you must also ensure you update the configuration, to stop - using any deprecated features or options, and perform any required work - to transition to alternative features. All the deprecated options should - be supported for one cycle, but should be removed before your next - upgrade is performed. - -* Since Ocata, you also need to run ``cinder-manage db online-data-migrations`` - command to make sure data migrations are applied. The tool let's you limit - the impact of the data migrations by using ``--max_number`` option to limit - number of migrations executed in one run. You need to complete all of the - migrations before starting upgrade to the next version (e.g. you need to - complete Ocata's data migrations before proceeding with upgrade to Pike; you - won't be able to execute Pike's DB schema migrations before completing - Ocata's data migrations). diff --git a/doc/source/user/README.rst b/doc/source/user/README.rst deleted file mode 100644 index 27b70598c..000000000 --- a/doc/source/user/README.rst +++ /dev/null @@ -1,16 +0,0 @@ -========================== -Cinder User Documentation -========================== - -Introduction: -------------- - -This directory is intended to hold any documentation that helps Cinder -end-users. This can include concept guides, tutorials, step-by-step guides -for using the CLI, etc. Note that documentation this is focused on -administrative actions should go into 'doc/source/admin'. - -The full spec for organization of documentation may be seen in the -`OS Manuals Migration Spec -`. - diff --git a/driver-requirements.txt b/driver-requirements.txt deleted file mode 100644 index 694d77092..000000000 --- a/driver-requirements.txt +++ /dev/null @@ -1,45 +0,0 @@ -# Document dependencies that are only used if using -# certain drivers. This file is not managed by -# requirements tools. - -# DRBD -dbus # MIT -drbdmanage # GPLv3 - -# HPE 3PAR -hpe3parclient>=4.1.0 # Apache-2.0 - -# Kaminario -krest>=1.3.0 # Apache-2.0 - -# Pure Storage -purestorage>=1.6.0 # BSD - -# Dell EMC VMAX, IBM DS8K -pyOpenSSL>=1.0.0 # Apache-2.0 - -# HPE Lefthand -python-lefthandclient>=2.0.0 # Apache-2.0 - -# Fujitsu Eternus DX -pywbem>=0.7.0 # LGPLv2.1+ - -# IBM XIV -pyxcli>=1.1.0 # Apache-2.0 - -# RBD -rados # LGPLv2.1 -rbd # LGPLv2.1 - -# Dell EMC VNX -storops>=0.4.8 # Apache-2.0 - -# Violin -vmemclient>=1.1.8 # Apache-2.0 - -# INFINIDAT -infinisdk # BSD-3 -capacity # BSD -infi.dtypes.wwn # PSF -infi.dtypes.iqn # PSF - diff --git a/etc/cinder/README-cinder.conf.sample b/etc/cinder/README-cinder.conf.sample deleted file mode 100644 index 34daf94b2..000000000 --- a/etc/cinder/README-cinder.conf.sample +++ /dev/null @@ -1,5 +0,0 @@ -The cinder.conf sample file is no longer generated and -maintained in Trunk. To generate your own version of -cinder.conf, use the following command: - -tox -egenconfig diff --git a/etc/cinder/api-httpd.conf b/etc/cinder/api-httpd.conf deleted file mode 100644 index f3555477a..000000000 --- a/etc/cinder/api-httpd.conf +++ /dev/null @@ -1,16 +0,0 @@ -Listen 8776 -LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" cinder_combined - - - WSGIDaemonProcess osapi_volume processes=2 threads=1 user=cinder display-name=%{GROUP} - WSGIProcessGroup osapi_volume - WSGIScriptAlias / /var/www/cgi-bin/cinder/osapi_volume - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/apache2/cinder_error.log - CustomLog /var/log/apache2/cinder.log cinder_combined - - diff --git a/etc/cinder/api-paste.ini b/etc/cinder/api-paste.ini deleted file mode 100644 index a761f53d0..000000000 --- a/etc/cinder/api-paste.ini +++ /dev/null @@ -1,75 +0,0 @@ -############# -# OpenStack # -############# - -[composite:osapi_volume] -use = call:cinder.api:root_app_factory -/: apiversions -/v1: openstack_volume_api_v1 -/v2: openstack_volume_api_v2 -/v3: openstack_volume_api_v3 - -[composite:openstack_volume_api_v1] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 - -[composite:openstack_volume_api_v2] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 - -[composite:openstack_volume_api_v3] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 -keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 -keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 - -[filter:request_id] -paste.filter_factory = oslo_middleware.request_id:RequestId.factory - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = cinder - -[filter:faultwrap] -paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory - -[filter:noauth] -paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory - -[filter:sizelimit] -paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory - -[app:apiv1] -paste.app_factory = cinder.api.v1.router:APIRouter.factory - -[app:apiv2] -paste.app_factory = cinder.api.v2.router:APIRouter.factory - -[app:apiv3] -paste.app_factory = cinder.api.v3.router:APIRouter.factory - -[pipeline:apiversions] -pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp - -[app:osvolumeversionapp] -paste.app_factory = cinder.api.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/etc/cinder/logging_sample.conf b/etc/cinder/logging_sample.conf deleted file mode 100644 index f9a8deb1b..000000000 --- a/etc/cinder/logging_sample.conf +++ /dev/null @@ -1,93 +0,0 @@ -[loggers] -keys = root, cinder, taskflow, cinder_flow_utils - -[handlers] -keys = stderr, stdout, watchedfile, syslog, tasks, null - -[formatters] -keys = context, default - -[logger_root] -level = WARNING -handlers = null - -[logger_cinder] -level = INFO -handlers = stderr -qualname = cinder - -# Both of these are used for tracking what cinder and taskflow is doing with -# regard to flows and tasks (and the activity there-in). -[logger_cinder_flow_utils] -level = INFO -handlers = tasks,stderr -qualname = cinder.flow_utils - -[logger_taskflow] -level = INFO -handlers = tasks -qualname = taskflow - -[logger_amqplib] -level = WARNING -handlers = stderr -qualname = amqplib - -[logger_sqlalchemy] -level = WARNING -handlers = stderr -qualname = sqlalchemy -# "level = INFO" logs SQL queries. -# "level = DEBUG" logs SQL queries and results. -# "level = WARNING" logs neither. (Recommended for production systems.) - -[logger_boto] -level = WARNING -handlers = stderr -qualname = boto - -[logger_suds] -level = INFO -handlers = stderr -qualname = suds - -[logger_eventletwsgi] -level = WARNING -handlers = stderr -qualname = eventlet.wsgi.server - -[handler_stderr] -class = StreamHandler -args = (sys.stderr,) -formatter = context - -[handler_stdout] -class = StreamHandler -args = (sys.stdout,) -formatter = context - -[handler_watchedfile] -class = handlers.WatchedFileHandler -args = ('cinder.log',) -formatter = context - -[handler_tasks] -class = handlers.WatchedFileHandler -args = ('tasks.log',) -formatter = context - -[handler_syslog] -class = handlers.SysLogHandler -args = ('/dev/log', handlers.SysLogHandler.LOG_USER) -formatter = context - -[handler_null] -class = logging.NullHandler -formatter = default -args = () - -[formatter_context] -class = oslo_log.formatters.ContextFormatter - -[formatter_default] -format = %(message)s diff --git a/etc/cinder/policy.json b/etc/cinder/policy.json deleted file mode 100644 index c51f564af..000000000 --- a/etc/cinder/policy.json +++ /dev/null @@ -1,162 +0,0 @@ -{ - "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - - "admin_api": "is_admin:True or (role:admin and is_admin_project:True)", - - "volume:create": "", - "volume:create_from_image": "", - "volume:delete": "rule:admin_or_owner", - "volume:force_delete": "rule:admin_api", - "volume:get": "rule:admin_or_owner", - "volume:get_all": "rule:admin_or_owner", - "volume:get_volume_metadata": "rule:admin_or_owner", - "volume:create_volume_metadata": "rule:admin_or_owner", - "volume:delete_volume_metadata": "rule:admin_or_owner", - "volume:update_volume_metadata": "rule:admin_or_owner", - "volume:get_volume_admin_metadata": "rule:admin_api", - "volume:update_volume_admin_metadata": "rule:admin_api", - "volume:get_snapshot": "rule:admin_or_owner", - "volume:get_all_snapshots": "rule:admin_or_owner", - "volume:create_snapshot": "rule:admin_or_owner", - "volume:delete_snapshot": "rule:admin_or_owner", - "volume:update_snapshot": "rule:admin_or_owner", - "volume:get_snapshot_metadata": "rule:admin_or_owner", - "volume:delete_snapshot_metadata": "rule:admin_or_owner", - "volume:update_snapshot_metadata": "rule:admin_or_owner", - "volume:extend": "rule:admin_or_owner", - "volume:extend_attached_volume": "rule:admin_or_owner", - "volume:update_readonly_flag": "rule:admin_or_owner", - "volume:retype": "rule:admin_or_owner", - "volume:update": "rule:admin_or_owner", - "volume:revert_to_snapshot": "rule:admin_or_owner", - - "volume_extension:types_manage": "rule:admin_api", - "volume_extension:types_extra_specs:create": "rule:admin_api", - "volume_extension:types_extra_specs:delete": "rule:admin_api", - "volume_extension:types_extra_specs:index": "rule:admin_api", - "volume_extension:types_extra_specs:show": "rule:admin_api", - "volume_extension:types_extra_specs:update": "rule:admin_api", - "volume_extension:access_types_qos_specs_id": "rule:admin_api", - "volume_extension:access_types_extra_specs": "rule:admin_api", - "volume_extension:volume_type_access": "rule:admin_or_owner", - "volume_extension:volume_type_access:addProjectAccess": "rule:admin_api", - "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api", - "volume_extension:volume_type_encryption": "rule:admin_api", - "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", - "volume_extension:extended_snapshot_attributes": "rule:admin_or_owner", - "volume_extension:volume_image_metadata": "rule:admin_or_owner", - - "volume_extension:qos_specs_manage:create": "rule:admin_api", - "volume_extension:qos_specs_manage:get": "rule:admin_api", - "volume_extension:qos_specs_manage:get_all": "rule:admin_api", - "volume_extension:qos_specs_manage:update": "rule:admin_api", - "volume_extension:qos_specs_manage:delete": "rule:admin_api", - - "volume_extension:quotas:show": "", - "volume_extension:quotas:update": "rule:admin_api", - "volume_extension:quotas:delete": "rule:admin_api", - "volume_extension:quota_classes": "rule:admin_api", - "volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api", - - "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", - "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", - "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", - "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", - "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", - "volume_extension:backup_admin_actions:force_delete": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", - "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", - - "volume_extension:volume_actions:upload_public": "rule:admin_api", - "volume_extension:volume_actions:upload_image": "rule:admin_or_owner", - - "volume_extension:volume_host_attribute": "rule:admin_api", - "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", - "volume_extension:volume_mig_status_attribute": "rule:admin_api", - "volume_extension:hosts": "rule:admin_api", - "volume_extension:services:index": "rule:admin_api", - "volume_extension:services:update" : "rule:admin_api", - - "volume_extension:volume_manage": "rule:admin_api", - "volume_extension:volume_unmanage": "rule:admin_api", - "volume_extension:list_manageable": "rule:admin_api", - - "volume_extension:capabilities": "rule:admin_api", - - "volume:create_transfer": "rule:admin_or_owner", - "volume:accept_transfer": "", - "volume:delete_transfer": "rule:admin_or_owner", - "volume:get_transfer": "rule:admin_or_owner", - "volume:get_all_transfers": "rule:admin_or_owner", - - "volume:failover_host": "rule:admin_api", - "volume:freeze_host": "rule:admin_api", - "volume:thaw_host": "rule:admin_api", - - "backup:create" : "", - "backup:delete": "rule:admin_or_owner", - "backup:get": "rule:admin_or_owner", - "backup:get_all": "rule:admin_or_owner", - "backup:restore": "rule:admin_or_owner", - "backup:backup-import": "rule:admin_api", - "backup:backup-export": "rule:admin_api", - "backup:update": "rule:admin_or_owner", - "backup:backup_project_attribute": "rule:admin_api", - - "volume:attachment_create": "", - "volume:attachment_update": "rule:admin_or_owner", - "volume:attachment_delete": "rule:admin_or_owner", - - "snapshot_extension:snapshot_actions:update_snapshot_status": "", - "snapshot_extension:snapshot_manage": "rule:admin_api", - "snapshot_extension:snapshot_unmanage": "rule:admin_api", - "snapshot_extension:list_manageable": "rule:admin_api", - - "consistencygroup:create" : "group:nobody", - "consistencygroup:delete": "group:nobody", - "consistencygroup:update": "group:nobody", - "consistencygroup:get": "group:nobody", - "consistencygroup:get_all": "group:nobody", - - "consistencygroup:create_cgsnapshot" : "group:nobody", - "consistencygroup:delete_cgsnapshot": "group:nobody", - "consistencygroup:get_cgsnapshot": "group:nobody", - "consistencygroup:get_all_cgsnapshots": "group:nobody", - - "group:group_types_manage": "rule:admin_api", - "group:group_types_specs": "rule:admin_api", - "group:access_group_types_specs": "rule:admin_api", - "group:group_type_access": "rule:admin_or_owner", - - "group:create" : "", - "group:delete": "rule:admin_or_owner", - "group:update": "rule:admin_or_owner", - "group:get": "rule:admin_or_owner", - "group:get_all": "rule:admin_or_owner", - - "group:create_group_snapshot": "", - "group:delete_group_snapshot": "rule:admin_or_owner", - "group:update_group_snapshot": "rule:admin_or_owner", - "group:get_group_snapshot": "rule:admin_or_owner", - "group:get_all_group_snapshots": "rule:admin_or_owner", - "group:reset_group_snapshot_status":"rule:admin_api", - "group:reset_status":"rule:admin_api", - - "group:enable_replication": "rule:admin_or_owner", - "group:disable_replication": "rule:admin_or_owner", - "group:failover_replication": "rule:admin_or_owner", - "group:list_replication_targets": "rule:admin_or_owner", - - "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", - "message:delete": "rule:admin_or_owner", - "message:get": "rule:admin_or_owner", - "message:get_all": "rule:admin_or_owner", - - "clusters:get": "rule:admin_api", - "clusters:get_all": "rule:admin_api", - "clusters:update": "rule:admin_api", - - "workers:cleanup": "rule:admin_api" -} diff --git a/etc/cinder/resource_filters.json b/etc/cinder/resource_filters.json deleted file mode 100644 index 08a6bdd66..000000000 --- a/etc/cinder/resource_filters.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "volume": ["name", "status", "image_metadata", - "bootable", "migration_status"], - "backup": ["name", "status", "volume_id"], - "snapshot": ["name", "status", "volume_id"], - "group": [], - "group_snapshot": ["status", "group_id"], - "attachment": ["volume_id"], - "message": ["resource_uuid", "resource_type", "event_id", - "request_id", "message_level"], - "pool": ["name", "volume_type"] -} diff --git a/etc/cinder/rootwrap.conf b/etc/cinder/rootwrap.conf deleted file mode 100644 index bf41bbd14..000000000 --- a/etc/cinder/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for cinder-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap - -# List of directories to search executables in, in case filters do not -# explicitely specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, local0, local1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/etc/cinder/rootwrap.d/volume.filters b/etc/cinder/rootwrap.d/volume.filters deleted file mode 100644 index 2bb364a87..000000000 --- a/etc/cinder/rootwrap.d/volume.filters +++ /dev/null @@ -1,233 +0,0 @@ -# cinder-rootwrap command filters for volume nodes -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# cinder/volume/iscsi.py: iscsi_helper '--op' ... -ietadm: CommandFilter, ietadm, root -tgtadm: CommandFilter, tgtadm, root -iscsictl: CommandFilter, iscsictl, root -tgt-admin: CommandFilter, tgt-admin, root -cinder-rtstool: CommandFilter, cinder-rtstool, root -scstadmin: CommandFilter, scstadmin, root - -# HyperScale command to handle cinder operations -hscli: CommandFilter, hscli, root - -# LVM related show commands -pvs: EnvFilter, env, root, LC_ALL=C, pvs -vgs: EnvFilter, env, root, LC_ALL=C, vgs -lvs: EnvFilter, env, root, LC_ALL=C, lvs -lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay - -# -LVM related show commands with suppress fd warnings -pvs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs -vgs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs -lvs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs -lvdisplay2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - - -# -LVM related show commands conf var -pvs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, pvs -vgs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, vgs -lvs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, lvs -lvdisplay3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, lvdisplay - -# -LVM conf var with suppress fd_warnings -pvs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, pvs -vgs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, vgs -lvs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, lvs -lvdisplay4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - -# os-brick library commands -# os_brick.privileged.run_as_root oslo.privsep context -# This line ties the superuser privs with the config files, context name, -# and (implicitly) the actual python code invoked. -privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* -# The following and any cinder/brick/* entries should all be obsoleted -# by privsep, and may be removed once the os-brick version requirement -# is updated appropriately. -scsi_id: CommandFilter, /lib/udev/scsi_id, root -drbdadm: CommandFilter, drbdadm, root - -# cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list -vgcreate: CommandFilter, vgcreate, root - -# cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. -# cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ... -lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate -lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate -lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate -lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate - -# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... -dd: CommandFilter, dd, root - -# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... -lvremove: CommandFilter, lvremove, root - -# cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... -lvrename: CommandFilter, lvrename, root - -# cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... -# cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ... -lvextend: EnvFilter, env, root, LC_ALL=C, lvextend -lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend -lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend -lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend - -# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' -lvchange: CommandFilter, lvchange, root - -# cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name -lvconvert: CommandFilter, lvconvert, root - -# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... -# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... -iscsiadm: CommandFilter, iscsiadm, root - -# cinder/volume/utils.py: utils.temporary_chown(path, 0) -chown: CommandFilter, chown, root - -# cinder/volume/utils.py: copy_volume(..., ionice='...') -ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] -ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] - -# cinder/volume/utils.py: setup_blkio_cgroup() -cgcreate: CommandFilter, cgcreate, root -cgset: CommandFilter, cgset, root -cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ - -# cinder/volume/driver.py -dmsetup: CommandFilter, dmsetup, root -ln: CommandFilter, ln, root - -# cinder/image/image_utils.py -qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img -qemu-img_convert: CommandFilter, qemu-img, root - -udevadm: CommandFilter, udevadm, root - -# cinder/volume/driver.py: utils.read_file_as_root() -cat: CommandFilter, cat, root - -# cinder/volume/nfs.py -stat: CommandFilter, stat, root -mount: CommandFilter, mount, root -df: CommandFilter, df, root -du: CommandFilter, du, root -truncate: CommandFilter, truncate, root -chmod: CommandFilter, chmod, root -rm: CommandFilter, rm, root - -# cinder/volume/drivers/remotefs.py -mkdir: CommandFilter, mkdir, root - -# cinder/volume/drivers/netapp/dataontap/nfs_base.py: -netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ -netapp_nfs_touch: CommandFilter, touch, root - -# cinder/volume/drivers/glusterfs.py -chgrp: CommandFilter, chgrp, root -umount: CommandFilter, umount, root - -# cinder/volumes/drivers/hds/hds.py: -hus-cmd: CommandFilter, hus-cmd, root -hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root - -# cinder/volumes/drivers/hds/hnas_backend.py -ssc: CommandFilter, ssc, root - -# cinder/brick/initiator/connector.py: -ls: CommandFilter, ls, root -tee: CommandFilter, tee, root -multipath: CommandFilter, multipath, root -multipathd: CommandFilter, multipathd, root -systool: CommandFilter, systool, root - -# cinder/volume/drivers/block_device.py -blockdev: CommandFilter, blockdev, root - -# cinder/volume/drivers/ibm/gpfs.py -# cinder/volume/drivers/tintri.py -mv: CommandFilter, mv, root - -# cinder/volume/drivers/ibm/gpfs.py -cp: CommandFilter, cp, root -mmgetstate: CommandFilter, mmgetstate, root -mmclone: CommandFilter, mmclone, root -mmlsattr: CommandFilter, mmlsattr, root -mmchattr: CommandFilter, mmchattr, root -mmlsconfig: CommandFilter, mmlsconfig, root -mmlsfs: CommandFilter, mmlsfs, root -mmlspool: CommandFilter, mmlspool, root -mkfs: CommandFilter, mkfs, root -mmcrfileset: CommandFilter, mmcrfileset, root -mmlsfileset: CommandFilter, mmlsfileset, root -mmlinkfileset: CommandFilter, mmlinkfileset, root -mmunlinkfileset: CommandFilter, mmunlinkfileset, root -mmdelfileset: CommandFilter, mmdelfileset, root -mmcrsnapshot: CommandFilter, mmcrsnapshot, root -mmdelsnapshot: CommandFilter, mmdelsnapshot, root - -# cinder/volume/drivers/ibm/gpfs.py -# cinder/volume/drivers/ibm/ibmnas.py -find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit - -# cinder/brick/initiator/connector.py: -aoe-revalidate: CommandFilter, aoe-revalidate, root -aoe-discover: CommandFilter, aoe-discover, root -aoe-flush: CommandFilter, aoe-flush, root - -# cinder/brick/initiator/linuxscsi.py: -sg_scan: CommandFilter, sg_scan, root - -#cinder/backup/services/tsm.py -dsmc:CommandFilter,/usr/bin/dsmc,root - -# cinder/volume/drivers/hitachi/hbsd_horcm.py -raidqry: CommandFilter, raidqry, root -raidcom: CommandFilter, raidcom, root -pairsplit: CommandFilter, pairsplit, root -paircreate: CommandFilter, paircreate, root -pairdisplay: CommandFilter, pairdisplay, root -pairevtwait: CommandFilter, pairevtwait, root -horcmstart.sh: CommandFilter, horcmstart.sh, root -horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root -horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr - -# cinder/volume/drivers/hitachi/hbsd_snm2.py -auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman -auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref -auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef -aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 -auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn -auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap -autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap -aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol -auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd -auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel -auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize -auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser -autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef -autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt -autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini -auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi -audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool -aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal -aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon - -# cinder/volume/drivers/hgst.py -vgc-cluster: CommandFilter, vgc-cluster, root - -# cinder/volume/drivers/vzstorage.py -pstorage-mount: CommandFilter, pstorage-mount, root -pstorage: CommandFilter, pstorage, root -ploop: CommandFilter, ploop, root - -# initiator/connector.py: -drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid - -# cinder/volume/drivers/quobyte.py -mount.quobyte: CommandFilter, mount.quobyte, root -umount.quobyte: CommandFilter, umount.quobyte, root - diff --git a/pylintrc b/pylintrc deleted file mode 100644 index a5064360f..000000000 --- a/pylintrc +++ /dev/null @@ -1,41 +0,0 @@ -# The format of this file isn't really documented; just use --generate-rcfile - -[Messages Control] -# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future -# C0111: Don't require docstrings on every method -# W0511: TODOs in code comments are fine. -# W0142: *args and **kwargs are fine. -# W0622: Redefining id is fine. -disable=C0111,W0511,W0142,W0622 - -[Basic] -# Variable names can be 1 to 31 characters long, with lowercase and underscores -variable-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Argument names can be 2 to 31 characters long, with lowercase and underscores -argument-rgx=[a-z_][a-z0-9_]{1,30}$ - -# Method names should be at least 3 characters long -# and be lowercased with underscores -method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ - -# Module names matching cinder-* are ok (files in bin/) -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(cinder-[a-z0-9_-]+))$ - -# Don't require docstrings on tests. -no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ - -[Design] -max-public-methods=100 -min-public-methods=0 -max-args=6 - -[Variables] - -dummy-variables-rgx=_ - -[Typecheck] -# Disable warnings on the HTTPSConnection classes because pylint doesn't -# support importing from six.moves yet, see: -# https://bitbucket.org/logilab/pylint/issue/550/ -ignored-classes=HTTPSConnection diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index ffc325ca8..000000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,35 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* cinder.yaml is a task that will be run in gates against OpenStack deployed - by DevStack. - -* cinder-fake.yaml is a task that will be run in gates against OpenStack - deployed by DevStack with fake cinder driver. - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute path in rally tasks. - Files will be in ~/.rally/extra/* - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/cinder-fake.yaml b/rally-jobs/cinder-fake.yaml deleted file mode 100644 index c42cc753e..000000000 --- a/rally-jobs/cinder-fake.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CinderVolumes.create_and_list_volume: - - - args: - size: 1 - detailed: True - runner: - type: "constant" - times: 200 - concurrency: 20 - context: - users: - tenants: 1 - users_per_tenant: 1 diff --git a/rally-jobs/cinder.yaml b/rally-jobs/cinder.yaml deleted file mode 100644 index 9b94b8935..000000000 --- a/rally-jobs/cinder.yaml +++ /dev/null @@ -1,501 +0,0 @@ -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 3 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_volume: - - - args: - size: 1 - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 3 - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - detailed: True - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.list_volumes: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - volumes: - size: 1 - volumes_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 3 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: 1 - image: - name: {{image_name}} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_extend_volume: - - - args: - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - new_size: - min: 3 - max: 4 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_attach_volume: - - - args: - size: 1 - image: - name: {{image_name}} - flavor: - name: "m1.tiny" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - size: - min: 1 - max: 1 - volume_type: "test" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - servers: - image: - name: {{image_name}} - flavor: - name: "m1.tiny" - servers_per_tenant: 1 - volume_types: - - "test" - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - - args: - volume_type: "test" - size: - min: 1 - max: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - servers: - image: - name: {{image_name}} - flavor: - name: "m1.tiny" - servers_per_tenant: 1 - volume_types: - - "test" - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_snapshots: - - - args: - force: False - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - size: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index 836f35a02..000000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,5 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 3aa58df22..000000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All ``*.py`` modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/__init__.py b/rally-jobs/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/README.rst b/releasenotes/README.rst deleted file mode 100644 index c85993dfd..000000000 --- a/releasenotes/README.rst +++ /dev/null @@ -1,16 +0,0 @@ -============= -Release notes -============= - -The release notes for a patch should be included in the -patch. The intended audience for release notes include -deployers, administrators and end-users. - -A release note is required if the patch has upgrade or API -impact. It is also required if the patch adds a feature or -fixes a long-standing or security bug. - -Please see -http://docs.openstack.org/developer/cinder/devref/releasenotes.html -for more details. - diff --git a/releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml b/releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml deleted file mode 100644 index e5d2bb749..000000000 --- a/releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/rootwrap.d directory. -fixes: - - Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set to locale using ',' as decimal separator. diff --git a/releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml b/releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml deleted file mode 100644 index ff85b2fc2..000000000 --- a/releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support for creating a consistency group from a source consistency group - in the HPE 3PAR driver. diff --git a/releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml b/releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml deleted file mode 100644 index b9f3ece88..000000000 --- a/releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - 3PAR driver creates FC VLUN of match-set type instead of host sees. - With match-set, the host will see the virtual volume on specified - NSP (Node-Slot-Port). This change in vlun type fixes bug 1577993. \ No newline at end of file diff --git a/releasenotes/notes/3par-license-check-51a16b5247675760.yaml b/releasenotes/notes/3par-license-check-51a16b5247675760.yaml deleted file mode 100644 index 51b075ecc..000000000 --- a/releasenotes/notes/3par-license-check-51a16b5247675760.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Disable standard capabilities based on 3PAR licenses. diff --git a/releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml b/releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml deleted file mode 100644 index f6e860cbb..000000000 --- a/releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added snapshot manage/unmanage support to the HPE 3PAR driver. diff --git a/releasenotes/notes/Dell-SC-Driver-to-dell_emc-folder-e5d6fb1f1cf84149.yaml b/releasenotes/notes/Dell-SC-Driver-to-dell_emc-folder-e5d6fb1f1cf84149.yaml deleted file mode 100644 index 70ad85e87..000000000 --- a/releasenotes/notes/Dell-SC-Driver-to-dell_emc-folder-e5d6fb1f1cf84149.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrades: - - The Dell Storage Center driver is moved to the dell_emc directory - and has been rebranded to its current Dell EMC SC name. The - volume_driver entry in cinder.conf needs to be changed to - ``cinder.volume.drivers.dell_emc.sc.storagecenter_fc.SCFCDriver`` - for FC or - ``cinder.volume.drivers.dell_emc.sc.storagecenter_iscsi.SCISCSIDriver`` - for ISCSI. diff --git a/releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml b/releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml deleted file mode 100644 index 37d904af5..000000000 --- a/releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Dell SC - Compression and Dedupe support - added for Storage Centers that support the - options. - - Dell SC - Volume and Group QOS support - added for Storage Centers that support and - have enabled the option. diff --git a/releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml b/releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml deleted file mode 100644 index 372a38b1d..000000000 --- a/releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -issues: - - With the Dell SC Cinder Driver if a volume is retyped - to a new storage profile all volumes created via - snapshots from this volume will also change to the - new storage profile. - - With the Dell SC Cinder Driver retyping from one - replication type to another type (ex. regular - replication to live volume replication) is not - supported. -fixes: - - With the Dell SC Cinder Driver retyping to or from a - replicated type should now work. - - With the Dell SC Cinder Driver retype failed to - return a tuple if it had to return an update to the - volume state. diff --git a/releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml b/releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml deleted file mode 100644 index 25edcffec..000000000 --- a/releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Config option ``dell_server_os`` added to the Dell SC driver. - This option allows the selection of the server type - used when creating a server on the Dell DSM during - initialize connection. This is only used if the - server does not exist. Valid values are from the - Dell DSM create server list. diff --git a/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml b/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml deleted file mode 100644 index 21b56a6c5..000000000 --- a/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support for the use of live volume in place of - standard replication in the Dell SC driver. \ No newline at end of file diff --git a/releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml b/releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml deleted file mode 100644 index f5494e1e8..000000000 --- a/releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added replication failback support for the Dell SC driver. - diff --git a/releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml b/releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml deleted file mode 100644 index a131b6d0a..000000000 --- a/releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -issues: - - Dell SC Cinder driver has limited support in a failed - over state so thaw_backend has been implemented to - reject the thaw call when in such a state. \ No newline at end of file diff --git a/releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml b/releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml deleted file mode 100644 index 7c6b989c5..000000000 --- a/releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added replication v2.1 support to the Dell Storage Center drivers. diff --git a/releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml b/releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml deleted file mode 100644 index 76f367afc..000000000 --- a/releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - HPE 3PAR driver adds following functionalities - Creating thin/dedup compresssed volume. - Retype for tpvv/tdvv volumes to be compressed. - Migration of compressed volumes. - Create compressed volume from compressed volume/snapshot source. - Compression support to create cg from source. diff --git a/releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml b/releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml deleted file mode 100644 index c57b0f8ba..000000000 --- a/releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added consistency group capability to generic volume groups in the - HPE 3PAR driver. diff --git a/releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml b/releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml deleted file mode 100644 index 95b7626ad..000000000 --- a/releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added v2.1 replication support in Huawei Cinder driver. diff --git a/releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml b/releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml deleted file mode 100644 index dfb4e1fa8..000000000 --- a/releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in Lefthand - driver. diff --git a/releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml b/releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml deleted file mode 100644 index 19374293d..000000000 --- a/releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added support for creating, deleting, and updating consistency groups for - NetApp 7mode and CDOT backends. - - Added support for taking, deleting, and restoring a cgsnapshot for NetApp - 7mode and CDOT backends. diff --git a/releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml b/releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml deleted file mode 100644 index 5b727646b..000000000 --- a/releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in the SolidFire driver. diff --git a/releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml b/releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml deleted file mode 100644 index 95a5269b2..000000000 --- a/releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Violin Memory 6000 array series drivers are removed. diff --git a/releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml b/releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml deleted file mode 100644 index 875c5f05d..000000000 --- a/releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added volume driver for Zadara Storage VPSA. diff --git a/releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml b/releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml deleted file mode 100644 index 93e66628f..000000000 --- a/releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Cinder will now correctly read Keystone's endpoint for quota calls from keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config option. diff --git a/releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml b/releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml deleted file mode 100644 index 6ac453bd7..000000000 --- a/releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added ability to query backups by project ID. \ No newline at end of file diff --git a/releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml b/releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml deleted file mode 100644 index f0f951803..000000000 --- a/releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -prelude: > - Drivers supporting consistent group snapshot in generic volume groups - reports "consistent_group_snapshot_enabled = True" instead of - "consistencygroup_support = True". As a result, a spec such as - "consistencygroup_support: ' True'" in either group type or - volume type will cause the scheduler not to choose the backend - that does not report "consistencygroup_support = True". - - In order to create a generic volume group that supports consistent - group snapshot, "consistent_group_snapshot_enable: ' True'" - should be set in the group type specs and volume type extra specs, - and "consistencygroup_support: ' True'" should not be set - in group type spec and volume type extra specs. diff --git a/releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml b/releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml deleted file mode 100644 index 4a3586158..000000000 --- a/releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Coho Data storage. diff --git a/releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml b/releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml deleted file mode 100644 index f140ce344..000000000 --- a/releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added attribute ``connection_info`` to attachment object. diff --git a/releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml b/releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml deleted file mode 100644 index 85cdb9f77..000000000 --- a/releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Adding or removing volume_type_access from any project during DB migration 62 must not be performed. - - When running PostgreSQL it is required to upgrade and restart all the cinder-api services along with DB migration 62. \ No newline at end of file diff --git a/releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml b/releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml deleted file mode 100644 index 13c48538b..000000000 --- a/releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Add filter, sorter and pagination support in group snapshot listings. diff --git a/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml b/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml deleted file mode 100644 index d13f70c6c..000000000 --- a/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add filters support to get_pools API v3.28. diff --git a/releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml b/releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml deleted file mode 100644 index 0e71f15ed..000000000 --- a/releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added cinder backup driver for Google Cloud Storage. diff --git a/releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml b/releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml deleted file mode 100644 index beadbe26b..000000000 --- a/releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add support to configure IO ports option in Dell EMC Unity driver. diff --git a/releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml b/releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml deleted file mode 100644 index 38453b355..000000000 --- a/releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Added like operator support to filters for the following resources:: - - - volume - - snapshot - - backup - - group - - group-snapshot - - attachment - - message diff --git a/releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml b/releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml deleted file mode 100644 index 64f2451d3..000000000 --- a/releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added periodic task to clean expired messages in - cinder scheduler, also added a configuration option - ``message_reap_interval`` to handle the interval. diff --git a/releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml b/releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml deleted file mode 100644 index 5ace44c74..000000000 --- a/releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added reset status API to group snapshot. diff --git a/releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml b/releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml deleted file mode 100644 index 08ea59452..000000000 --- a/releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added reset status API to generic volume group. diff --git a/releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml b/releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml deleted file mode 100644 index f51cddab3..000000000 --- a/releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added ``resource_filters`` API to retrieve configured resource filters. diff --git a/releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml b/releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml deleted file mode 100644 index 2e31441fb..000000000 --- a/releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add revert to snapshot API and support in LVM driver. diff --git a/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml b/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml deleted file mode 100644 index 7d811ce0e..000000000 --- a/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - Added a new config option ``scheduler_weight_handler``. This is a global - option which specifies how the scheduler should choose from a listed of - weighted pools. By default the existing weigher is used which always - chooses the highest weight. - - Added a new weight handler ``StochasticHostWeightHandler``. This weight - handler chooses pools randomly, where the random probabilities are - proportional to the weights, so higher weighted pools are chosen more - frequently, but not all the time. This weight handler spreads new - shares across available pools more fairly. diff --git a/releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml b/releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml deleted file mode 100644 index 50acdf89d..000000000 --- a/releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -upgrade: - - | - In certain environments (Kubernetes for example) indirect calls to the LVM - commands result in file descriptor leak warning messages which in turn cause - the process_execution method to raise and exception. - - To accommodate these environments, and to maintain backward compatibility - in Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. - Setting this to True will append the LVM env vars to include the variable - ``LVM_SUPPRESS_FD_WARNINGS=1``. - - This is made an optional configuration because it only applies to very specific - environments. If we were to make this global that would require a rootwrap/privsep - update that could break compatibility when trying to do rolling upgrades of the - volume service. - diff --git a/releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml b/releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml deleted file mode 100644 index 9c1cc2e8c..000000000 --- a/releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added driver for Tegile IntelliFlash arrays. diff --git a/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml b/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml deleted file mode 100644 index ae747bbed..000000000 --- a/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add v2.1 volume replication support in VMAX driver. diff --git a/releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml b/releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml deleted file mode 100644 index 8b23c44f8..000000000 --- a/releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add ``volume-type`` filter to API Get-Pools \ No newline at end of file diff --git a/releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml b/releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml deleted file mode 100644 index 6c1e83252..000000000 --- a/releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Added the options ``visibility`` and ``protected`` to - the os-volume_upload_image REST API call. diff --git a/releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml b/releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml deleted file mode 100644 index 157714fb8..000000000 --- a/releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define - custom path of Ceph keyring file. diff --git a/releasenotes/notes/add_manage_unmanage_itri_disco_driver-1c9ee31cc86b6eda.yaml b/releasenotes/notes/add_manage_unmanage_itri_disco_driver-1c9ee31cc86b6eda.yaml deleted file mode 100644 index 642a950ce..000000000 --- a/releasenotes/notes/add_manage_unmanage_itri_disco_driver-1c9ee31cc86b6eda.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -Features: - - Added volume manage/unmanage support for disco driver. diff --git a/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml b/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml deleted file mode 100644 index 0a36494a2..000000000 --- a/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Projects with the admin role are now allowed to operate - on the quotas of all other projects. diff --git a/releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml b/releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml deleted file mode 100644 index b6808ea67..000000000 --- a/releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - Allow users to specify the copy speed while using Huawei - driver to create volume from snapshot or clone volume, by - the new added metadata 'copyspeed'. - For example, user can add --metadata copyspeed=1 when - creating volume from source volume/snapshot. - The valid optional range of copyspeed is [1, 2, 3, 4], - respectively representing LOW, MEDIUM, HIGH and HIGHEST. diff --git a/releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml b/releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml deleted file mode 100644 index bf22398e1..000000000 --- a/releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Allow API user to remove the consistency group name or description information. diff --git a/releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml b/releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml deleted file mode 100644 index 1ec4c3e62..000000000 --- a/releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -security: - - The qemu-img tool now has resource limits applied - which prevent it from using more than 1GB of address - space or more than 2 seconds of CPU time. This provides - protection against denial of service attacks from - maliciously crafted or corrupted disk images. diff --git a/releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml b/releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml deleted file mode 100644 index 55a11324e..000000000 --- a/releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - Cinder stopped supporting single-backend configurations in Ocata. However, - sample ``cinder.conf`` was still generated with driver-related options in - ``[DEFAULT]`` section, where those options had no effect at all. Now all of - driver options are listed in ``[backend_defaults]`` section, that indicates - that those options are effective only in this section and - ``[]`` sections listed in ``enabled_backends``. diff --git a/releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml b/releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml deleted file mode 100644 index d62a51dac..000000000 --- a/releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph - image features required to support RBD mirroring of Cinder backup pool. diff --git a/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml b/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml deleted file mode 100644 index 2409a53bc..000000000 --- a/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Support for snapshot backup using the optimal path in - Huawei driver. diff --git a/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml b/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml deleted file mode 100644 index 1940317d1..000000000 --- a/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added ability to backup snapshots. diff --git a/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml b/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml deleted file mode 100644 index 3abca4d25..000000000 --- a/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added REST API to update backup name and description. diff --git a/releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml b/releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml deleted file mode 100644 index 1adddf28d..000000000 --- a/releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed service state reporting when backup manager is unable to initialize one of the backup drivers. diff --git a/releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml b/releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml deleted file mode 100644 index 0a7d5be50..000000000 --- a/releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support balanced FC port selection for Huawei drivers. diff --git a/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml b/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml deleted file mode 100644 index 5612d2e69..000000000 --- a/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Report pools in volume stats for Block Device Driver. diff --git a/releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml b/releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml deleted file mode 100644 index 34e608dfe..000000000 --- a/releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Updating the Datera Elastic DataFabric Storage Driver - to version 2.1. This adds ACL support, Multipath - support and basic IP pool support. -upgrade: - - Changes config option default for ``datera_num_replicas`` - from 1 to 3 diff --git a/releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml b/releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml deleted file mode 100644 index 30e8cd554..000000000 --- a/releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - | - The IBM_Storage driver has been open sourced. This means that there is no - more need to download the package from the IBM site. The only requirement - remaining is to install pyxcli, which is available through pypi:: - - ``sudo pip install pyxcli`` - -upgrade: - - | - Previous installations of IBM Storage must be un-installed first and the - new driver should be installed on top. In addition the cinder.conf values - should be updated to reflect the new paths. For example the proxy setting - of ``storage.proxy.IBMStorageProxy`` should be updated to - ``cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy``. diff --git a/releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml b/releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml deleted file mode 100644 index fb0c0a6d3..000000000 --- a/releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Support for use of ``fc_southbound_protocol`` - configuration setting in the Brocade FC SAN - lookup service. diff --git a/releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml b/releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml deleted file mode 100644 index 12fb93815..000000000 --- a/releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - HTTP connector for the Cinder Brocade FC Zone plugin. - This connector allows for communication - between the Brocade FC zone plugin and the switch - to be over HTTP or HTTPs. To make use of this - connector, the user would add a configuration - setting in the fabric block for a Brocade switch - with the name as 'fc_southbound_protocol' with - a value as 'HTTP' or 'HTTPS'. diff --git a/releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml b/releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml deleted file mode 100644 index bab9bc7df..000000000 --- a/releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Support for configuring Fibre Channel zoning on - Brocade switches through Cinder Fibre Channel Zone - Manager and Brocade Fibre Channel zone plugin. - To zone in a Virtual Fabric, set the configuration - option 'fc_virtual_fabric_id' for the fabric. diff --git a/releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml b/releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml deleted file mode 100644 index 923b82f26..000000000 --- a/releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added Keystone v3 support for Swift backup driver in single user mode. diff --git a/releasenotes/notes/bug-1564110-enables-mysql-cluster-support-99012d96e029223a.yaml b/releasenotes/notes/bug-1564110-enables-mysql-cluster-support-99012d96e029223a.yaml deleted file mode 100644 index f09b7b0af..000000000 --- a/releasenotes/notes/bug-1564110-enables-mysql-cluster-support-99012d96e029223a.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - Enabled the usage of MySQL Cluster (NDB) with Cinder. This includes the - usage of the boolean mysql_enable_ndb setting from oslo.db 4.24 and - above. This feature allows operators to select from MySQL (InnoDB) or - MySQL Cluster (NDB) as the database storage engine backend. This feature - is intended only for new installations as there is no automated migration - path from InnoDB to MySQL Cluster (NDB). diff --git a/releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml b/releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml deleted file mode 100644 index 8ca419bdc..000000000 --- a/releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - The ``backup_service_inithost_offload`` configuration - option now defaults to ``True`` instead of ``False``. diff --git a/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml b/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml deleted file mode 100644 index 9c6ef506b..000000000 --- a/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Volumes created on NetApp cDOT and 7mode storage systems now - report 'multiattach' capability. They have always supported such a - capability, but not reported it to Cinder. diff --git a/releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml b/releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml deleted file mode 100644 index 0440a6ea6..000000000 --- a/releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add ``user_id`` field to snapshot list/detail and snapshot show. diff --git a/releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml b/releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml deleted file mode 100644 index 3e23e37bf..000000000 --- a/releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - NetApp cDOT block and file drivers now report replication capability - at the pool level; and are hence compatible with using the - ``replication_enabled`` extra-spec in volume types. diff --git a/releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml b/releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml deleted file mode 100644 index 3e860b044..000000000 --- a/releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The NetApp cDOT driver now sets the ``replication_status`` attribute - appropriately on volumes created within replicated backends when using host - level replication. \ No newline at end of file diff --git a/releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml b/releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml deleted file mode 100644 index aefc36b18..000000000 --- a/releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed an issue where the NetApp cDOT NFS driver failed to clone new - volumes from the image cache. diff --git a/releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml b/releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml deleted file mode 100644 index f65379f0b..000000000 --- a/releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed misleading error message when NetApp copyoffload tool is not in place - during image cloning. diff --git a/releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml b/releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml deleted file mode 100644 index c9b31d399..000000000 --- a/releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -fixes: - - Modifying the extra-specs of an in use Volume Type was something that - we've unintentionally allowed. The result is unexpected or unknown - volume behaviors in cases where a type was modified while a volume was - assigned that type. This has been particularly annoying for folks that - have assigned the volume-type to a different/new backend device. - - In case there are customers using this "bug" we add a config option to - retain the bad behavior "allow_inuse_volume_type_modification", with a - default setting of False (Don't allow). Note this config option is being - introduced as deprecated and will be removed in a future release. It's - being provided as a bridge to not break upgrades without notice. diff --git a/releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml b/releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml deleted file mode 100644 index a40fa7cb9..000000000 --- a/releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and - ``group_type`` were returning 500 error if boolean 'is_public' value - passed in the form of string. Now user can pass following valid boolean - values to these api's: - '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes' \ No newline at end of file diff --git a/releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml b/releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml deleted file mode 100644 index 1de87c099..000000000 --- a/releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed consistency groups API which was always returning groups - scoped to project ID from user context instead of given input - project ID. diff --git a/releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml b/releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml deleted file mode 100644 index 468a54579..000000000 --- a/releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Prohibit the deletion of group if group snapshot exists. diff --git a/releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml b/releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml deleted file mode 100644 index c9fc5bc34..000000000 --- a/releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Cinder is now collecting capacity data, including - virtual free capacity etc from the backends. A notification - which includes that data is periodically emitted. diff --git a/releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml b/releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml deleted file mode 100644 index 345af9406..000000000 --- a/releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Consistency group creation previously scheduled at the pool level. Now it is fixed to schedule at the backend level as designed. diff --git a/releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml b/releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml deleted file mode 100644 index 3bbfdfa07..000000000 --- a/releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - The consistency group API now returns volume type IDs. diff --git a/releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml b/releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml deleted file mode 100644 index fe24b8d0d..000000000 --- a/releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Add 'display_name' and 'display_description' validation - for creating/updating snapshot and volume operations. \ No newline at end of file diff --git a/releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml b/releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml deleted file mode 100644 index 9d3ef60d8..000000000 --- a/releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - After transferring a volume without snapshots from one user project - to another user project, if the receiving user uses cascade deleting, - it will cause some exceptions in driver and volume will be error_deleting. - Adding additional check to ensure there are no snapshots left in other - project when cascade deleting a tranferred volume. diff --git a/releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml b/releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml deleted file mode 100644 index 0e9fcba26..000000000 --- a/releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for API microversions, as well as /v3 API endpoint. diff --git a/releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml b/releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml deleted file mode 100644 index 91ad09499..000000000 --- a/releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed deprecated option ``osapi_max_request_body_size``. diff --git a/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml b/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml deleted file mode 100644 index cc63218f2..000000000 --- a/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -features: - - Added volume backend drivers for CoprHD FC, iSCSI and Scaleio. diff --git a/releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml b/releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml deleted file mode 100644 index 71de559c5..000000000 --- a/releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -upgrade: - - To get rid of long running DB data migrations that must be run offline, - Cinder will now be able to execute them online, on a live cloud. Before - upgrading from Ocata to Pike, operator needs to perform all the Newton - data migrations. To achieve that he needs to perform ``cinder-manage db - online_data_migrations`` until there are no records to be updated. To limit - DB performance impact migrations can be performed in chunks limited by - ``--max_number`` option. If your intent is to upgrade Cinder in a non-live - manner, you can use ``--ignore_state`` option safely. Please note that - finishing all the Newton data migrations will be enforced by the first - schema migration in Pike, so you won't be able to upgrade to Pike without - that. diff --git a/releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml b/releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml deleted file mode 100644 index 39dbcf679..000000000 --- a/releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Retype support added to CloudByte iSCSI driver. diff --git a/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml b/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml deleted file mode 100644 index 1de2d544f..000000000 --- a/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -prelude: > - Everything in Cinder's release notes related to the High Availability - Active-Active effort -preluded with "HA A-A:"- is work in progress and - should not be used in production until it has been completed and the - appropriate release note has been issued stating its readiness for - production. - -features: - - "HA A-A: Add cluster configuration option to allow grouping hosts that - share the same backend configurations and should work in Active-Active - fashion." - - "HA A-A: Updated manage command to display cluster information on service - listings." - - "HA A-A: Added cluster subcommand in manage command to list, remove, and - rename clusters." - - "HA A-A: Added clusters API endpoints for cluster related operations (index, - detail, show, enable/disable). Index and detail accept filtering by - `name`, `binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down - status (`is_up`) as URL parameters. Also added their respective policies." - - "HA A-A: Attach and detach operations are now cluster aware and make full - use of clustered cinder-volume services." - - "HA A-A: Delete volume, delete snapshot, delete consistency group, and - delete consistency group snapshot operations are now cluster aware and make - full use of clustered cinder-volume services." diff --git a/releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml b/releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml deleted file mode 100644 index c763d5207..000000000 --- a/releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added update-host command for consistency groups in cinder-manage. diff --git a/releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml b/releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml deleted file mode 100644 index 8ba227155..000000000 --- a/releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in CoprHD driver. diff --git a/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml b/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml deleted file mode 100644 index 8ccc773d0..000000000 --- a/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Separate create and update rules for volume metadata. -upgrade: - - If policy for update volume metadata is modified in a desired way - it's needed to add a desired rule for create volume metadata. diff --git a/releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml b/releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml deleted file mode 100644 index 6473dd94a..000000000 --- a/releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Creating a new volume from an image that was created - from an encrypted Cinder volume now succeeds. diff --git a/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml b/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml deleted file mode 100644 index 76a00c8d0..000000000 --- a/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - Capabilites List for Datera Volume Drivers - - Extended Volume-Type Support for Datera Volume Drivers - - Naming convention change for Datera Volume Drivers - - Volume Manage/Unmanage support for Datera Volume Drivers - - New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers -deprecations: - - IntOpt ``datera_num_replicas`` is changed to a volume type - extra spec option-- ``DF:replica_count`` - - BoolOpt ``datera_acl_allow_all`` is changed to a volume type - extra spec option-- ``DF:acl_allow_all`` diff --git a/releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml b/releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml deleted file mode 100644 index 800790610..000000000 --- a/releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - Added Datera EDF API 2.1 support. - - Added Datera Multi-Tenancy Support. - - Added Datera Template Support. - - Broke Datera driver up into modules. -upgrade: - - Datera driver location has changed from cinder.volume.drivers - .datera.DateraDriver to cinder.volume.drivers.datera.datera_iscsi - .DateraDriver. - -deprecations: - - Deprecated datera_api_version option. - - Removed datera_acl_allow_all option. - - Removed datera_num_replicas option. diff --git a/releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml b/releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml deleted file mode 100644 index 64c7b4636..000000000 --- a/releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added ``datera_disable_profiler`` boolean config option. - - Added Cinder fast-retype support to Datera EDF driver. - - Added Volume Placement extra-specs support to Datera EDF driver. - - Fixed ACL multi-attach bug in Datera EDF driver. - - Fixed a few scalability bugs in the Datera EDF driver. diff --git a/releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml b/releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml deleted file mode 100644 index fda5fce1c..000000000 --- a/releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - All Datera DataFabric backed volume-types will now use - API version 2 with Datera DataFabric. -upgrade: - - Users of the Datera Cinder driver are now required to use - Datera DataFabric version 1.0+. Versions before 1.0 will - not be able to utilize this new driver since they still - function on v1 of the Datera DataFabric API. -deprecations: - - Config option ``datera_api_token`` has been replaced by - options ``san_login`` and ``san_password``. diff --git a/releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml b/releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml deleted file mode 100644 index 526f2fe9b..000000000 --- a/releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The Cinder database can now only be upgraded from changes since the Kilo - release. In order to upgrade from a version prior to that, you must now - upgrade to at least Kilo first, then to Newton or later. diff --git a/releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml b/releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml deleted file mode 100644 index 318cb435b..000000000 --- a/releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The Cinder database can now only be upgraded from changes since the - Liberty release. In order to upgrade from a version prior to that, - you must now upgrade to at least Liberty first, then to Ocata or - later. diff --git a/releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml b/releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml deleted file mode 100644 index 0e2567f62..000000000 --- a/releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The Cinder database can now only be upgraded from changes since the - Mitaka release. In order to upgrade from a version prior to that, - you must now upgrade to at least Mitaka first, then to Pike or later. diff --git a/releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml b/releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml deleted file mode 100644 index 41d85ad88..000000000 --- a/releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The v1 API was deprecated in the Juno release and is - now defaulted to disabled. In order to still use the - v1 API, you must now set ``enable_v1_api`` to ``True`` - in your cinder.conf file. diff --git a/releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml b/releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml deleted file mode 100644 index 0d4766d19..000000000 --- a/releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed StorWize/SVC error causing volume deletion to get - stuck in the 'deleting' state when using FlashCopy. diff --git a/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml b/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml deleted file mode 100644 index b1743910d..000000000 --- a/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added using etags in API calls to avoid the lost update problem during - deleting volume metadata. diff --git a/releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml b/releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml deleted file mode 100644 index 72c52894c..000000000 --- a/releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - It is now possible to delete a volume and its snapshots - by passing an additional argument to volume delete, "cascade=True". diff --git a/releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml b/releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml deleted file mode 100644 index a94e432de..000000000 --- a/releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - The ``force`` boolean parameter has been added to the volume - delete API. It may be used in combination with ``cascade``. - This also means that volume force delete is available - in the base volume API rather than only in the - ``volume_admin_actions`` extension. -upgrade: - - There is a new policy option ``volume:force_delete`` which - controls access to the ability to specify force delete - via the volume delete API. This is separate from the - pre-existing ``volume-admin-actions:force_delete`` policy - check. diff --git a/releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml b/releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml deleted file mode 100644 index 5066e21df..000000000 --- a/releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add consistency group capability to Generic Volume Groups in the - Dell EMC SC driver. diff --git a/releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml b/releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml deleted file mode 100644 index 9e43264ac..000000000 --- a/releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Dell EMC Unity storage. diff --git a/releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml b/releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml deleted file mode 100644 index fa97c8483..000000000 --- a/releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The Cinder v2 API has now been marked as deprecated. All new client code - should use the v3 API. API v3 adds support for microversioned API calls. - If no microversion is requested, the base 3.0 version for the v3 API is - identical to v2. diff --git a/releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml b/releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml deleted file mode 100644 index c7f39d1cb..000000000 --- a/releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - Any Volume Drivers configured in the ``DEFAULT`` config stanza should be moved - to their own stanza and enabled via the ``enabled_backends`` config option. - The older style of config with ``DEFAULT`` is deprecated and will be - removed in future releases. -deprecations: - - Configuring Volume Drivers in the ``DEFAULT`` config stanza is not going to be - maintained and will be removed in the next release. All backends should use - the ``enabled_backends`` config option with separate stanza's for each. diff --git a/releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml b/releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml deleted file mode 100644 index dc0f3d909..000000000 --- a/releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - The block_driver is deprecated as of the Ocata release and will be - removed in the Queens release of Cinder. Instead the LVM driver - with the LIO iSCSI target should be used. For those that desire - higher performance, they should use LVM striping. diff --git a/releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml b/releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml deleted file mode 100644 index 5f141b3b7..000000000 --- a/releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The Cinder Linux SMBFS driver is now deprecated and will be removed - during the following release. Deployers are encouraged to use the - Windows SMBFS driver instead. diff --git a/releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml b/releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml deleted file mode 100644 index 8e11ff9d2..000000000 --- a/releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - The XML API has been marked deprecated and will be removed in a future release. diff --git a/releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml b/releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml deleted file mode 100644 index 1c5d365af..000000000 --- a/releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - Instead of using osapi_volume_base_url use public_endpoint. Both do the - same thing. diff --git a/releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml b/releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml deleted file mode 100644 index 95c43bb9e..000000000 --- a/releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - Deprecated IBM driver _multipath_enabled config flags. diff --git a/releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml b/releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml deleted file mode 100644 index 234d81f26..000000000 --- a/releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - Deprecated the configuration option ``nas_ip``. Use option - ``nas_host`` to indicate the IP address or hostname of the - NAS system. diff --git a/releasenotes/notes/discard-config-option-711a7fbf20685834.yaml b/releasenotes/notes/discard-config-option-711a7fbf20685834.yaml deleted file mode 100644 index 51d7493cc..000000000 --- a/releasenotes/notes/discard-config-option-711a7fbf20685834.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - New config option to enable discard (trim/unmap) support for any backend. diff --git a/releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml b/releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml deleted file mode 100644 index 763e61522..000000000 --- a/releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for DISCO storage. diff --git a/releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml b/releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml deleted file mode 100644 index 4aca63a8b..000000000 --- a/releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -upgrade: - - | - Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` - section in the cinder.conf. Now those are correctly read from - ``[]`` section. This includes following options: - - * ``disco_client`` - * ``disco_client_port`` - * ``rest_ip`` - * ``choice_client`` - * ``disco_src_api_port`` - * ``retry_interval`` - - Also some options are renamed (note that 3 of them were both moved and - renamed): - - * ``rest_ip`` to ``disco_rest_ip`` - * ``choice_client`` to ``disco_choice_client`` - * ``volume_name_prefix`` to ``disco_volume_name_prefix`` - * ``snapshot_check_timeout`` to ``disco_snapshot_check_timeout`` - * ``restore_check_timeout`` to ``disco_restore_check_timeout`` - * ``clone_check_timeout`` to ``disco_clone_check_timeout`` - * ``retry_interval`` to ``disco_retry_interval`` - - Old names and locations are still supported but support will be removed in - the future. diff --git a/releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml b/releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml deleted file mode 100644 index 079929e8f..000000000 --- a/releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Support for Dot Hill AssuredSAN arrays has been removed. diff --git a/releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml b/releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml deleted file mode 100644 index 5328317e7..000000000 --- a/releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Removed the need for deployers to run tox for config reference generation. diff --git a/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml b/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml deleted file mode 100644 index ac2f7cf97..000000000 --- a/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Configuration options for the DRBD driver that will be - applied to DRBD resources; the default values should - be okay for most installations. - diff --git a/releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml b/releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml deleted file mode 100644 index 602a9d7f4..000000000 --- a/releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - DS8K driver adds two new properties into extra-specs so that - user can specify pool or lss or both of them to allocate volume - in their expected area. diff --git a/releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml b/releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml deleted file mode 100644 index 7b4c59b0a..000000000 --- a/releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The VMware VMDK driver now enforces minimum vCenter version of 5.1. diff --git a/releasenotes/notes/eqlx-volume-manage-unmanage-a24ec7f0d9989df3.yaml b/releasenotes/notes/eqlx-volume-manage-unmanage-a24ec7f0d9989df3.yaml deleted file mode 100644 index 252263bbe..000000000 --- a/releasenotes/notes/eqlx-volume-manage-unmanage-a24ec7f0d9989df3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added manage/unmanage volume support for Dell Equallogic driver. diff --git a/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml b/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml deleted file mode 100644 index 595889209..000000000 --- a/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added backend driver for FalconStor FreeStor. - diff --git a/releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml b/releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml deleted file mode 100644 index f46da98a4..000000000 --- a/releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added ability to specify multiple storage pools in the FalconStor driver. -deprecations: - - The fss_pool option is deprecated. Use fss_pools instead. diff --git a/releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml b/releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml deleted file mode 100644 index 77bf1f7ca..000000000 --- a/releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - Fixed volume extend issue that allowed a tenant with enough quota to - extend the volume to limits greater than what the volume backend supported. -other: - - Now extend won't work on disabled services because it's going through the - scheduler, unlike how it worked before. diff --git a/releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml b/releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml deleted file mode 100644 index 40dab28d6..000000000 --- a/releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixed HNAS bug that placed a cloned volume in the same pool as its - source, even if the clone had a different pool specification. Driver will - not allow to make clones using a different volume type anymore. \ No newline at end of file diff --git a/releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml b/releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml deleted file mode 100644 index a397fa4ca..000000000 --- a/releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed issue where the HNAS driver was not correctly reporting THIN - provisioning and related stats. diff --git a/releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml b/releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml deleted file mode 100644 index ffba6562f..000000000 --- a/releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - Cinder FC Zone Manager Friendly Zone Names - This feature adds support for Fibre Channel user - friendly zone names if implemented by the volume driver. - If the volume driver passes the host name and - storage system to the Fibre Channel Zone Manager - in the conn_info structure, the zone manager - will use these names in structuring the zone - name to provide a user friendly zone name. diff --git a/releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml b/releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml deleted file mode 100644 index 2ac289dce..000000000 --- a/releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Fujitsu ETERNUS DX (FC). diff --git a/releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml b/releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml deleted file mode 100644 index 3cbccf6aa..000000000 --- a/releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Fujitsu ETERNUS DX (iSCSI). diff --git a/releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml b/releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml deleted file mode 100644 index 8cc89cf7d..000000000 --- a/releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Huawei FusionStorage. diff --git a/releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml b/releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml deleted file mode 100644 index 1daf74ab0..000000000 --- a/releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -issues: - - Cinder services are now automatically downgrading RPC - messages to be understood by the oldest version of a - service among all the deployment. Disabled and dead - services are also taken into account. It is important to - keep service list up to date, without old, unused - records. This can be done using ``cinder-manage service - remove`` command. Once situation is cleaned up services - should be either restarted or ``SIGHUP`` signal should - be issued to their processes to force them to reload - version pins. Please note that cinder-api does not - support ``SIGHUP`` signal. -upgrade: - - If during a *live* upgrade from Liberty a backup service - will be killed while processing a restore request it may - happen that such backup status won't be automatically - cleaned up on the service restart. Such orphaned backups - need to be cleaned up manually. - - When performing a *live* upgrade from Liberty it may - happen that retype calls will reserve additional quota. - As by default quota reservations are invalidated after - 24 hours (config option ``reservation_expire=86400``), - we recommend either decreasing that time or watching for - unused quota reservations manually during the upgrade - process. diff --git a/releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml b/releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml deleted file mode 100644 index b2874c81d..000000000 --- a/releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added generalized resource filter support in - ``list volume``, ``list backup``, ``list snapshot``, - ``list group``, ``list group-snapshot``, ``list attachment``, - ``list message`` and ``list pools`` APIs. diff --git a/releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml b/releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml deleted file mode 100644 index cc058ac78..000000000 --- a/releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Generic group is added into quota management. diff --git a/releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml b/releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml deleted file mode 100644 index 9e8ad113e..000000000 --- a/releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added consistent group capability to generic volume groups in GPFS driver. diff --git a/releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml b/releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml deleted file mode 100644 index 9af2d4123..000000000 --- a/releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in VNX driver. diff --git a/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml b/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml deleted file mode 100644 index 63656e83a..000000000 --- a/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Introduced generic volume groups and added create/ - delete/update/list/show APIs for groups. diff --git a/releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml b/releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml deleted file mode 100644 index a737ff365..000000000 --- a/releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - upload-to-image using Image API v2 now correctly handles custom image properties. diff --git a/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml b/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml deleted file mode 100644 index 032ad189c..000000000 --- a/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added create/delete APIs for group snapshots and - an API to create group from source. diff --git a/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml b/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml deleted file mode 100644 index 601cddc71..000000000 --- a/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added group type and group specs APIs. diff --git a/releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml b/releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml deleted file mode 100644 index a05d6a6e2..000000000 --- a/releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - The HBSD (Hitachi Block Storage Driver) volume drivers which supports - Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 - family will be no longer provided. Support on VSP will be provided as - hitachi.vsp_* drivers. \ No newline at end of file diff --git a/releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml b/releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml deleted file mode 100644 index 955252089..000000000 --- a/releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked - as unsupported and are now deprecated. enable_unsupported_driver will need - to be set to True in cinder.conf to continue to use them. \ No newline at end of file diff --git a/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml b/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml deleted file mode 100644 index 44d220c9b..000000000 --- a/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP - Family and HUSVM. diff --git a/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml b/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml deleted file mode 100644 index f2055424b..000000000 --- a/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Adds new Hitachi VSP iSCSI Driver. diff --git a/releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml b/releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml deleted file mode 100644 index e09f111d5..000000000 --- a/releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - Hitachi VSP drivers have a new config option - ``vsp_compute_target_ports`` to specify IDs - of the storage ports used to attach volumes - to compute nodes. The default is the value - specified for the existing ``vsp_target_ports`` - option. Either or both of ``vsp_compute_target_ports`` - and ``vsp_target_ports`` must be specified. - - Hitachi VSP drivers have a new config option - ``vsp_horcm_pair_target_ports`` to specify IDs of the - storage ports used to copy volumes by Shadow Image or - Thin Image. The default is the value specified for - the existing ``vsp_target_ports`` option. Either - or both of ``vsp_horcm_pair_target_ports`` and - ``vsp_target_ports`` must be specified. diff --git a/releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml b/releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml deleted file mode 100644 index c940e4eba..000000000 --- a/releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - Support for snapshots named in the backend as ``snapshot-`` - is deprecated. Snapshots are now named in the backend as - ``.``. - diff --git a/releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml b/releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml deleted file mode 100644 index d870efb6d..000000000 --- a/releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -upgrade: - - The Hitachi NAS iSCSI driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use it. -deprecations: - - The Hitachi NAS iSCSI driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` - in cinder.conf to continue to use it. The driver will be removed in the - next release. diff --git a/releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml b/releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml deleted file mode 100644 index 8a5f3452d..000000000 --- a/releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - The Hitachi NAS NFS driver has been marked as unsupported and is now - deprecated. enable_unsupported_driver will need to be set to True - in cinder.conf to continue to use it. \ No newline at end of file diff --git a/releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml b/releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml deleted file mode 100644 index 43d8160b8..000000000 --- a/releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - Deprecated the configuration option ``hnas_svcX_volume_type``. Use option - ``hnas_svcX_pool_name`` to indicate the name of the services (pools). diff --git a/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml b/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml deleted file mode 100644 index 6ae575112..000000000 --- a/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml +++ /dev/null @@ -1,7 +0,0 @@ -upgrade: - - HNAS drivers have new configuration paths. Users should now use - ``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver - and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS - iSCSI driver. -deprecations: - - The old HNAS drivers configuration paths have been marked for deprecation. diff --git a/releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml b/releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml deleted file mode 100644 index 1e99f1ebd..000000000 --- a/releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added the ability to list manageable volumes and snapshots to HNAS NFS - driver. \ No newline at end of file diff --git a/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml b/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml deleted file mode 100644 index 5c8298fb0..000000000 --- a/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added manage/unmanage snapshot support to the HNAS NFS driver. diff --git a/releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml b/releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml deleted file mode 100644 index 9d01d0d1f..000000000 --- a/releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - The Hitachi NAS Platform iSCSI driver was marked as not supported in the - Ocata realease and has now been removed. diff --git a/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml b/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml deleted file mode 100644 index e64d53af9..000000000 --- a/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - HNAS drivers will now read configuration from cinder.conf. -deprecations: - - The XML configuration file used by the HNAS drivers is now - deprecated and will no longer be used in the future. Please - use cinder.conf for all driver configuration. diff --git a/releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml b/releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml deleted file mode 100644 index f5db63b6c..000000000 --- a/releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Optimize backend reporting capabilities for Huawei drivers. diff --git a/releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml b/releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml deleted file mode 100644 index d9c7fc3a7..000000000 --- a/releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add CG capability to generic volume groups in Huawei driver. diff --git a/releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml b/releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml deleted file mode 100644 index 38a8f8a45..000000000 --- a/releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Support for iSCSI multipath in Huawei driver. \ No newline at end of file diff --git a/releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml b/releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml deleted file mode 100644 index 5484ba334..000000000 --- a/releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added manage/unmanage snapshot support for Huawei drivers. \ No newline at end of file diff --git a/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml b/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml deleted file mode 100644 index 137a779d3..000000000 --- a/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add support for reporting pool disk type in Huawei - driver. diff --git a/releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml b/releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml deleted file mode 100644 index 3b79696f6..000000000 --- a/releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Support iSCSI configuration in replication in Huawei driver. diff --git a/releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml b/releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml deleted file mode 100644 index 3344fd5c7..000000000 --- a/releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added manage/unmanage volume support for Huawei drivers. diff --git a/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml b/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml deleted file mode 100644 index b3bfba0f2..000000000 --- a/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add support for hybrid aggregates to the NetApp cDOT drivers. - diff --git a/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml b/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml deleted file mode 100644 index 318307013..000000000 --- a/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers. - diff --git a/releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml b/releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml deleted file mode 100644 index c1fbe26e4..000000000 --- a/releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add mirrored volume support in IBM SVC/Storwize driver. diff --git a/releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml b/releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml deleted file mode 100644 index 206415eb3..000000000 --- a/releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - In IBM Storwize_SVC driver, user could specify only one IO - group per backend definition. The user now may specify a comma separated - list of IO groups, and at the time of creating the volume, the driver will - select an IO group which has the least number of volumes associated with - it. The change is backward compatible, meaning single value is still - supported. diff --git a/releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml b/releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml deleted file mode 100644 index ee53d0a86..000000000 --- a/releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support cinder_img_volume_type property in glance image metadata to specify volume type. diff --git a/releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml b/releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml deleted file mode 100644 index b1d602863..000000000 --- a/releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added support for querying group details with volume ids - which are in this group. - For example, "groups/{group_id}?list_volume=True". diff --git a/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml b/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml deleted file mode 100644 index da53724a2..000000000 --- a/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added support for querying volumes filtered by group_id - using 'group_id' optional URL parameter. - For example, "volumes/detail?group_id={consistency_group_id}". diff --git a/releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml b/releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml deleted file mode 100644 index 7245ec3ca..000000000 --- a/releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added driver for the InfiniBox storage array. diff --git a/releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml b/releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml deleted file mode 100644 index 8f3894c4a..000000000 --- a/releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added support for volume compression in INFINIDAT driver. - Compression is available on InfiniBox 3.0 onward. - To enable volume compression, set ``infinidat_use_compression`` to - True in the backend section in the Cinder configuration file. diff --git a/releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml b/releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml deleted file mode 100644 index 62f96d6c9..000000000 --- a/releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add CG capability to generic volume groups in INFINIDAT driver. diff --git a/releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml b/releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml deleted file mode 100644 index 48957c578..000000000 --- a/releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - INFINIDAT volume driver now requires the 'infinisdk' python module to be - installed. diff --git a/releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml b/releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml deleted file mode 100644 index 8616ab429..000000000 --- a/releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support for iSCSI in INFINIDAT InfiniBox driver. diff --git a/releasenotes/notes/infinidat-qos-50d743591543db98.yaml b/releasenotes/notes/infinidat-qos-50d743591543db98.yaml deleted file mode 100644 index 956ec09e7..000000000 --- a/releasenotes/notes/infinidat-qos-50d743591543db98.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support for QoS in the INFINIDAT InfiniBox driver. - QoS is available on InfiniBox 4.0 onward. diff --git a/releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml b/releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml deleted file mode 100644 index 85afe7531..000000000 --- a/releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed Non-WAN port filter issue in Kaminario iSCSI driver. - diff --git a/releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml b/releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml deleted file mode 100644 index 533750c5c..000000000 --- a/releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixed issue of managing a VG with more than one - volume in Kaminario FC and iSCSI Cinder drivers. - diff --git a/releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml b/releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml deleted file mode 100644 index 1316fe724..000000000 --- a/releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Removed deprecated option ``kaminario_nodedup_substring`` - in Kaminario FC and iSCSI Cinder drivers. - diff --git a/releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml b/releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml deleted file mode 100644 index f3fb1549f..000000000 --- a/releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - New FC Cinder volume driver for Kaminario K2 all-flash arrays. diff --git a/releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml b/releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml deleted file mode 100644 index c4cb9759e..000000000 --- a/releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays. diff --git a/releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml b/releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml deleted file mode 100644 index 2852d72af..000000000 --- a/releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Consistency group support has been added to the LeftHand backend driver. diff --git a/releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml b/releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml deleted file mode 100644 index 2c863b933..000000000 --- a/releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added snapshot manage/unmanage support to the HPE LeftHand driver. diff --git a/releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml b/releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml deleted file mode 100644 index e8f776d55..000000000 --- a/releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added the ability to list manageable volumes and snapshots via GET - operation on the /v2//os-volume-manage and - /v2//os-snapshot-manage URLs, respectively. diff --git a/releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml b/releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml deleted file mode 100644 index 98b2f4fbd..000000000 --- a/releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed live migration on EMC VMAX3 backends. diff --git a/releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml b/releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml deleted file mode 100644 index 130b9db71..000000000 --- a/releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -other: - - | - Modify default lvm_type setting from thick to auto. This will result in - Cinder preferring thin on init, if there are no LV's in the VG it will - create a thin-pool and use thin. If there are LV's and no thin-pool - it will continue using thick. diff --git a/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml b/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml deleted file mode 100644 index 94ae8b3be..000000000 --- a/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The v2 API extensions os-volume-manage and os-snapshot-manage have been - mapped to the v3 resources manageable_volumes and manageable_snapshots diff --git a/releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml b/releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml deleted file mode 100644 index 4cd0cb355..000000000 --- a/releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -upgrade: - - | - The Blockbridge driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use it. -deprecations: - - | - The Blockbridge driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use it. If its support status - does not change it will be removed in the next release. diff --git a/releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml b/releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml deleted file mode 100644 index 3c8d8d143..000000000 --- a/releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - The CloudByte driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. -deprecations: - - The CloudByte driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. If - its support status does not change it will be removed in - the next release. diff --git a/releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml b/releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml deleted file mode 100644 index ece1ea313..000000000 --- a/releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - The DotHill drivers have been marked as unsupported and are - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. -deprecations: - - The DotHill drivers has been marked as unsupported and are - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. If - its support status does not change it will be removed in - the next release. diff --git a/releasenotes/notes/mark-eternus-deprecated-d078221912385aed.yaml b/releasenotes/notes/mark-eternus-deprecated-d078221912385aed.yaml deleted file mode 100644 index 1414b5eec..000000000 --- a/releasenotes/notes/mark-eternus-deprecated-d078221912385aed.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - | - The Fujitsu ETERNUS iSCSI and FC drivers have been marked as unsupported - and are now deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue - to use them. -deprecations: - - | - The Fujitsu ETERNUS iSCSI and FC drivers have been marked as unsupported - and are now deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue to use them. - If their support status does not change, they will be removed in the Queens - development cycle. diff --git a/releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml b/releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml deleted file mode 100644 index e04209a4b..000000000 --- a/releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - The HPE XP driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. -deprecations: - - The HPE XP driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. If - its support status does not change it will be removed in - the next release. diff --git a/releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml b/releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml deleted file mode 100644 index 9c5bc0ff8..000000000 --- a/releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -upgrade: - - | - The Infortrend drivers have been marked as unsupported - and are now deprecated. ``enable_unsupported_driver`` will - need to be set to ``True`` in the driver's section in - cinder.conf to continue to use them. -deprecations: - - | - The Infortrend drivers have been marked as unsupported - and are now deprecated. ``enable_unsupported_driver`` will - need to be set to ``True`` in the driver's section in - cinder.conf to continue to use them. If their support - status does not change, they will be removed in the Queens - development cycle. diff --git a/releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml b/releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml deleted file mode 100644 index c66c358f7..000000000 --- a/releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - The Nexenta Edge drivers have been marked as unsupported and are - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. -deprecations: - - The Nexenta Edge drivers has been marked as unsupported and are - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. If - its support status does not change it will be removed in - the next release. diff --git a/releasenotes/notes/mark-nimble-deprecated-9f7d1c178b48fa39.yaml b/releasenotes/notes/mark-nimble-deprecated-9f7d1c178b48fa39.yaml deleted file mode 100644 index fc855560a..000000000 --- a/releasenotes/notes/mark-nimble-deprecated-9f7d1c178b48fa39.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -upgrade: - - | - The Nimble driver has been marked as unsupported and is now deprecated. - ``enable_unsupported_driver`` will need to be set to ``True`` in - the driver's section in cinder.conf to continue to use it. -deprecations: - - | - The Nimble driver has been marked as unsupported and is now deprecated. - ``enable_unsupported_driver`` will need to be set to ``True`` in - the driver's section in cinder.conf to continue to use it. If the - support status does not change, they will be removed in the Queens - development cycle. diff --git a/releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml b/releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml deleted file mode 100644 index 519bfdfe8..000000000 --- a/releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -upgrade: - - | - The QNAP driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use it. -deprecations: - - | - The QNAP driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use it. If its support status - does not change it will be removed in the next release. diff --git a/releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml b/releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml deleted file mode 100644 index b4886b509..000000000 --- a/releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -upgrade: - - | - The Reduxio driver has been marked unsupported and is now - deprecated. ``use_unsupported_driver`` will need to be set to - ``True`` in the driver's section in cinder.conf to use it. -deprecations: - - | - The Reduxio driver has been marked unsupported and is now - deprecated. ``use_unsupported_driver`` will need to be set to - ``True`` in the driver's section in cinder.conf to use it. - If its support status does not change, the driver will be - removed in the Queens development cycle. diff --git a/releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml b/releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml deleted file mode 100644 index 76f4cbbf3..000000000 --- a/releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - The Scality driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. -deprecations: - - The Scality driver has been marked as unsupported and is - now deprecated. ``enable_unsupported_drivers`` will need to - be set to ``True`` in cinder.conf to continue to use it. If - its support status does not change it will be removed in - the next release. diff --git a/releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml b/releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml deleted file mode 100644 index 2668f3a72..000000000 --- a/releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - | - The Synology driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be - set to ``True`` in the driver's section in ``cinder.conf`` to - continue to use it. -deprecations: - - | - The Synology driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be - set to ``True`` in the driver's section in ``cinder.conf`` to - continue to use it. If its support status does not change, - the driver will be removed in the Queens development cycle. diff --git a/releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml b/releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml deleted file mode 100644 index 7e844508d..000000000 --- a/releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -upgrade: - - | - The Violin drivers have been marked as unsupported and are now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use them. -deprecations: - - | - The Violin drivers have been marked as unsupported and are now - deprecated. ``enable_unsupported_drivers`` will need to be set to - ``True`` in cinder.conf to continue to use them. If its support status - does not change it will be removed in the next release. diff --git a/releasenotes/notes/mark-vzstorage-deprecated-598e5c4f2dc65f29.yaml b/releasenotes/notes/mark-vzstorage-deprecated-598e5c4f2dc65f29.yaml deleted file mode 100644 index 8c232c663..000000000 --- a/releasenotes/notes/mark-vzstorage-deprecated-598e5c4f2dc65f29.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - | - The Virtuozzo driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue - to use it. -deprecations: - - | - The Virtuozzo driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue - to use it. If its support status does not change, they will be - removed in the Queens development cycle. diff --git a/releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml b/releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml deleted file mode 100644 index d2bd023fd..000000000 --- a/releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -upgrade: - - | - The X-IO driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue - to use it. -deprecations: - - | - The X-IO driver has been marked as unsupported and is now - deprecated. ``enable_unsupported_driver`` will need to be set - to ``True`` in the driver's section in cinder.conf to continue - to use it. If its support status does not change, they will be - removed in the Queens development cycle. diff --git a/releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml b/releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml deleted file mode 100644 index 2c4f7b2f3..000000000 --- a/releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support for get all distinct volumes' metadata from - volume-summary API. \ No newline at end of file diff --git a/releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml b/releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml deleted file mode 100644 index e9c5c96ea..000000000 --- a/releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Operator needs to perform ``cinder-manage db - online_data_migrations`` to migrate existing consistency - groups to generic volume groups. diff --git a/releasenotes/notes/move-eqlx-driver-to-dell-emc-fe5d2b484c47b7a6.yaml b/releasenotes/notes/move-eqlx-driver-to-dell-emc-fe5d2b484c47b7a6.yaml deleted file mode 100644 index 838551a71..000000000 --- a/releasenotes/notes/move-eqlx-driver-to-dell-emc-fe5d2b484c47b7a6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The EqualLogic driver is moved to the dell_emc directory and has been - rebranded to its current Dell EMC PS Series name. The volume_driver - entry in cinder.conf needs to be changed to - ``cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver``. diff --git a/releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml b/releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml deleted file mode 100644 index ac24aef92..000000000 --- a/releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The ScaleIO driver is moved to the dell_emc directory. - volume_driver entry in cinder.conf needs to be changed to - ``cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver``. diff --git a/releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml b/releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml deleted file mode 100644 index ee29c3932..000000000 --- a/releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The XtremIO driver is moved to the dell_emc directory. - volume_driver entry in cinder.conf needs to be changed to - ``cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver`` or - ``cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver``. diff --git a/releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml b/releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml deleted file mode 100644 index b7cee46de..000000000 --- a/releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Enable backup snapshot optimal path by implementing attach - and detach snapshot in the NEC driver. diff --git a/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml b/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml deleted file mode 100644 index 6876ba5c8..000000000 --- a/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend FC and iSCSI drivers for NEC Storage. diff --git a/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml b/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml deleted file mode 100644 index f0ce83a0f..000000000 --- a/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Added host-level (whole back end replication - v2.1) replication support - to the NetApp cDOT drivers (iSCSI, FC, NFS). -upgrade: - - While configuring NetApp cDOT back ends, new configuration options - (``replication_device`` and ``netapp_replication_aggregate_map``) must be - added in order to use the host-level failover feature. diff --git a/releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml b/releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml deleted file mode 100644 index f1026b0ec..000000000 --- a/releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added iSCSI CHAP uni-directional authentication for NetApp drivers. diff --git a/releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml b/releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml deleted file mode 100644 index 68679d555..000000000 --- a/releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - The 7-Mode Data ONTAP configuration of the NetApp Unified driver is - deprecated as of the Ocata release and will be removed in the Queens - release. Other configurations of the NetApp Unified driver, including - Clustered Data ONTAP and E-series, are unaffected. diff --git a/releasenotes/notes/netapp-deprecate-eseries-drivers-f0787de87ba4f3f7.yaml b/releasenotes/notes/netapp-deprecate-eseries-drivers-f0787de87ba4f3f7.yaml deleted file mode 100644 index 3fe8bea0a..000000000 --- a/releasenotes/notes/netapp-deprecate-eseries-drivers-f0787de87ba4f3f7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - The NetApp E-Series drivers are deprecated as of the Pike release and - will be removed in the Queens release. Other configurations of the - NetApp driver, i.e Clustered Data ONTAP and Solidfire, are - unaffected. diff --git a/releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml b/releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml deleted file mode 100644 index 31ca9f0ad..000000000 --- a/releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support for Consistency Groups in the NetApp E-Series Volume Driver. diff --git a/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml b/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml deleted file mode 100644 index 4852b5d7c..000000000 --- a/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added Cinder consistency group for the NetApp NFS driver. diff --git a/releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml b/releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml deleted file mode 100644 index 24536b388..000000000 --- a/releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - The NetApp cDOT drivers report to the scheduler, - for each FlexVol pool, the fraction of the shared - block limit that has been consumed by dedupe and - cloning operations. This value, netapp_dedupe_used_percent, - may be used in the filter & goodness functions for better - placement of new Cinder volumes. - diff --git a/releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml b/releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml deleted file mode 100644 index dc91f1ae4..000000000 --- a/releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - a [nova] section is added to configure the connection - to the compute service, which is needed to the - InstanceLocalityFilter, for example. -deprecations: - - The os_privileged_xxx and nova_xxx in the [default] - section are deprecated in favor of the settings in - the [nova] section. -fixes: - - Fixed using of the user's token in the nova client (bug #1686616) diff --git a/releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml b/releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml deleted file mode 100644 index 15d9819c6..000000000 --- a/releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - New config option added. ``"connection_string"`` in [profiler] - section is used to specify OSProfiler driver connection - string, for example, - ``"connection_string = messaging://"``, - ``"connection_string = mongodb://localhost:27017"`` diff --git a/releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml b/releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml deleted file mode 100644 index bc9e75971..000000000 --- a/releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Nexenta Edge iSCSI storage. diff --git a/releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml b/releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml deleted file mode 100644 index a75d617f8..000000000 --- a/releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added extend method to NFS driver for NexentaStor 5. diff --git a/releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml b/releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml deleted file mode 100644 index ceeea9881..000000000 --- a/releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added HA support for NexentaEdge iSCSI driver diff --git a/releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml b/releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml deleted file mode 100644 index 3f8f2fb2b..000000000 --- a/releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml +++ /dev/null @@ -1,2 +0,0 @@ -features: - - Added NBD driver for NexentaEdge. diff --git a/releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml b/releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml deleted file mode 100644 index 0212830a7..000000000 --- a/releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added secure HTTP support for REST API calls in the NexentaStor5 driver. - Use of HTTPS is set True by default with option ``nexenta_use_https``. diff --git a/releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml b/releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml deleted file mode 100644 index cbb9ea84a..000000000 --- a/releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for NexentaStor5 iSCSI storage. diff --git a/releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml b/releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml deleted file mode 100644 index 8c5ff56f8..000000000 --- a/releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for NexentaStor5 NFS storage. diff --git a/releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml b/releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml deleted file mode 100644 index 647d512dd..000000000 --- a/releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added support for snapshots in the NFS driver. This functionality is only - enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. - Cloning volumes is only supported if the source volume is not attached. diff --git a/releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml b/releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml deleted file mode 100644 index 73227a65c..000000000 --- a/releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fix NFS backup driver, we now support multiple backups on the same - container, they are no longer overwritten. diff --git a/releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml b/releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml deleted file mode 100644 index 2f7e6b555..000000000 --- a/releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added Nimble Storage Fibre Channel backend driver. diff --git a/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml b/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml deleted file mode 100644 index 4cc21156f..000000000 --- a/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support for force backup of in-use Cinder volumes in Nimble driver. diff --git a/releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml b/releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml deleted file mode 100644 index ed84f9f60..000000000 --- a/releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Manage and unmanage support has been added to the Nimble backend driver. diff --git a/releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml b/releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml deleted file mode 100644 index 387518546..000000000 --- a/releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Add Support for QoS in the Nimble Storage driver. - QoS is available from Nimble OS release 4.x and above. - - Add Support for deduplication of volumes in the Nimble Storage driver. diff --git a/releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml b/releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml deleted file mode 100644 index f8843b6d4..000000000 --- a/releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The Nimble backend driver has been updated to use REST - for array communication. diff --git a/releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml b/releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml deleted file mode 100644 index 241007d7a..000000000 --- a/releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -upgrade: - - | - After running the migration script to migrate CGs to - generic volume groups, CG and group APIs work as follows. - - * Create CG only creates in the groups table. - * Modify CG modifies in the CG table if the CG is in the - CG table, otherwise it modifies in the groups table. - * Delete CG deletes from the CG or the groups table - depending on where the CG is. - * List CG checks both CG and groups tables. - * List CG Snapshots checks both the CG and the groups - tables. - * Show CG checks both tables. - * Show CG Snapshot checks both tables. - * Create CG Snapshot creates either in the CG or the groups - table depending on where the CG is. - * Create CG from Source creates in either the CG or the - groups table depending on the source. - * Create Volume adds the volume either to the CG or the - group. - * default_cgsnapshot_type is reserved for migrating CGs. - * Group APIs will only write/read in/from the groups table. - * Group APIs will not work on groups with default_cgsnapshot_type. - * Groups with default_cgsnapshot_type can only be operated by - CG APIs. - * After CG tables are removed, we will allow default_cgsnapshot_type - to be used by group APIs. diff --git a/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml b/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml deleted file mode 100644 index 6e87f17ec..000000000 --- a/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -issues: - - When running Nova Compute and Cinder Volume or Backup services on the same - host they must use a shared lock directory to avoid rare race conditions - that can cause volume operation failures (primarily attach/detach of - volumes). This is done by setting the ``lock_path`` to the same directory - in the ``oslo_concurrency`` section of nova.conf and cinder.conf. This issue - affects all previous releases utilizing os-brick and shared operations - on hosts between Nova Compute and Cinder data services. diff --git a/releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml b/releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml deleted file mode 100644 index db5148e6a..000000000 --- a/releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - Availability zones may now be configured per backend in a multi-backend - configuration. Individual backend sections can now set the configuration - option ``backend_availability_zone``. If set, this value will override - the [DEFAULT] ``storage_availability_zone`` setting. diff --git a/releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml b/releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml deleted file mode 100644 index 7f51fd4c2..000000000 --- a/releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added periodic task to clean expired reservation in cinder scheduler. - Added a configuration option ``reservation_clean_interval`` to handle - the interval. diff --git a/releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml b/releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml deleted file mode 100644 index 4ca0aeb52..000000000 --- a/releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -fixes: - - | - Enabled a cloud operator to correctly manage policy for - volume type operations. To permit volume type operations - for specific user, you can for example do as follows. - - * Add ``storage_type_admin`` role. - * Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g. - ``"admin_or_storage_type_admin": "is_admin:True or role:storage_type_admin",`` - * Modify rule for types_manage and volume_type_access, e.g. - ``"volume_extension:types_manage": "rule:admin_or_storage_type_admin", - "volume_extension:volume_type_access:addProjectAccess": "rule:admin_or_storage_type_admin", - "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_or_storage_type_admin",`` diff --git a/releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml b/releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml deleted file mode 100644 index 6e3eda03f..000000000 --- a/releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added consistent group capability to generic volume groups in ProphetStor driver. diff --git a/releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml b/releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml deleted file mode 100644 index 6deea69c1..000000000 --- a/releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. - Support for 1.4.x has been removed. diff --git a/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml b/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml deleted file mode 100644 index 45e679f8c..000000000 --- a/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - Added additional metrics reported to the scheduler for Pure Volume Drivers - for better filtering and weighing functions. - - Added config option to enable/disable automatically calculation an - over-subscription ratio max for Pure Volume Drivers. When disabled the - drivers will now respect the max_oversubscription_ratio config option. -fixes: - - Fixed issue where Pure Volume Drivers would ignore reserved_percentage - config option. - diff --git a/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml b/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml deleted file mode 100644 index 25b955e37..000000000 --- a/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - New config option for Pure Storage volume drivers pure_eradicate_on_delete. - When enabled will permanently eradicate data instead of placing into - pending eradication state. -fixes: - - Allow for eradicating Pure Storage volumes, snapshots, and pgroups when - deleting their Cinder counterpart. - diff --git a/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml b/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml deleted file mode 100644 index fdb208595..000000000 --- a/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fix issue with PureFCDriver where partially case sensitive comparison of - connector wwpn could cause initialize_connection to fail when attempting - to create duplicate Purity host. diff --git a/releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml b/releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml deleted file mode 100644 index 07492be36..000000000 --- a/releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in Pure drivers. diff --git a/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml b/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml deleted file mode 100644 index 18170b56d..000000000 --- a/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add ``get_manageable_volumes`` and ``get_manageable_snapshots`` implementations for - Pure Storage Volume Drivers. diff --git a/releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml b/releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml deleted file mode 100644 index 5210d2f4e..000000000 --- a/releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added Cheesecake (v2.1) replication support to the Pure Storage Volume - drivers. \ No newline at end of file diff --git a/releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml b/releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml deleted file mode 100644 index 3c227aaa6..000000000 --- a/releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -security: - - Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and - driver_ssl_cert_path config options to allow for secure https requests to - the FlashArray. diff --git a/releasenotes/notes/qb-backup-5b1f2161d160648a.yaml b/releasenotes/notes/qb-backup-5b1f2161d160648a.yaml deleted file mode 100644 index dce1aa7f3..000000000 --- a/releasenotes/notes/qb-backup-5b1f2161d160648a.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -fixes: - - | - A bug in the Quobyte driver was fixed that prevented backing up volumes - and snapshots - diff --git a/releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml b/releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml deleted file mode 100644 index ae17e7d1e..000000000 --- a/releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Corrected quota usage when transferring a volume between tenants. diff --git a/releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml b/releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml deleted file mode 100644 index 2781af364..000000000 --- a/releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Allow rbd driver to manage existing snapshot. diff --git a/releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml b/releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml deleted file mode 100644 index 02667f9cd..000000000 --- a/releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Allow the RBD driver to work with max_over_subscription_ratio. diff --git a/releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml b/releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml deleted file mode 100644 index a80a9c0a7..000000000 --- a/releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added v2.1 replication support to RBD driver. diff --git a/releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml b/releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml deleted file mode 100644 index 09ebd51a5..000000000 --- a/releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added Migrate and Extend for Nexenta NFS driver. - - Added Retype functionality to Nexenta iSCSI and NFS drivers. -upgrades: - - Refactored Nexenta iSCSI driver to use single target and targetgroup with multiple zvols. diff --git a/releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml b/releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml deleted file mode 100644 index 3a5187154..000000000 --- a/releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - HP drivers have been rebranded to HPE. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. diff --git a/releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml b/releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml deleted file mode 100644 index f38ac2e9e..000000000 --- a/releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - EMC VNX driver have been rebranded to Dell EMC VNX driver. - Existing configurations will continue to work with the legacy name, - but will need to be updated by the next release. - User needs update ``volume_driver`` to - ``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``. diff --git a/releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml b/releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml deleted file mode 100644 index f8c4e3e10..000000000 --- a/releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixes a bug that prevented the configuration of multiple redundant - Quobyte registries in the quobyte_volume_url config option. diff --git a/releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml b/releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml deleted file mode 100644 index 799a9d483..000000000 --- a/releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend ISCSI driver for Reduxio. diff --git a/releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml b/releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml deleted file mode 100644 index bb1063e68..000000000 --- a/releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. - The new preferred protocol for array communication is REST and SOAP - support will be removed. diff --git a/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml b/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml deleted file mode 100644 index aaa609ea2..000000000 --- a/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed 'No Space left' error by dd command when users set the config option - ``volume_clear_size`` to a value larger than the size of a volume. diff --git a/releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml b/releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml deleted file mode 100644 index d5fbac70c..000000000 --- a/releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -upgrade: - - | - Old driver paths have been removed since they have been through our alloted - deprecation period. Make sure if you have any of these paths being set in - your cinder.conf for the volume_driver option, to update to the new driver - path listed here. - - * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver - * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver - * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver - * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver - * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver - * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver - * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver - * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver - * Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver - * New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver - * Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver - * New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver - * Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver - * New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver - * Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver - * New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver diff --git a/releasenotes/notes/remove-eqlx-deprecated-options-89ba02c41d4da62a.yaml b/releasenotes/notes/remove-eqlx-deprecated-options-89ba02c41d4da62a.yaml deleted file mode 100644 index 416db1ab1..000000000 --- a/releasenotes/notes/remove-eqlx-deprecated-options-89ba02c41d4da62a.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -upgrade: - - | - - Removing the Dell EqualLogic driver's deprecated configuration options. - Please replace old options in your cinder.conf with the new one. - - * Removed - ``eqlx_cli_timeout`` - * Replaced with - ``ssh_conn_timeout`` - * Removed - ``eqlx_use_chap`` - * Replaced with - ``use_chap_auth`` - * Removed - ``eqlx_chap_login`` - * Replaced with - ``chap_username`` - * Removed - ``eqlx_chap_password`` - * Replaced with - ``chap_password`` diff --git a/releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml b/releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml deleted file mode 100644 index 02a1dc4f1..000000000 --- a/releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The deprecated HP CLIQ proxy driver has now been removed. diff --git a/releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml b/releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml deleted file mode 100644 index f58600d97..000000000 --- a/releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Users of the ibmnas driver should switch to using the IBM GPFS driver to enable Cinder access to IBM NAS resources. For details configuring the IBM GPFS driver, see the GPFS config reference. - http://docs.openstack.org/liberty/config-reference/content/GPFS-driver.html -other: - - Due to the ibmnas (SONAS) driver being rendered redundant by the addition of NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed in the Mitaka release. diff --git a/releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml b/releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml deleted file mode 100644 index 6d46f8007..000000000 --- a/releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - Remove mirror policy parameter from huawei driver. diff --git a/releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml b/releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml deleted file mode 100644 index f91e8015e..000000000 --- a/releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The Scality backend volume driver was marked as not - supported in the previous release and has now been - removed. diff --git a/releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml b/releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml deleted file mode 100644 index f165eb9ee..000000000 --- a/releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Configurations that are setting backend config in ``[DEFAULT]`` - section are now not supported. You should use ``enabled_backends`` - option to set up backends. diff --git a/releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml b/releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml deleted file mode 100644 index 82efff690..000000000 --- a/releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - Previously the only way to remove volumes in error - states from a consistency-group was to delete the - consistency group and create it again. Now it is - possible to remove volumes in error and error_deleting - states. diff --git a/releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml b/releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml deleted file mode 100644 index a83a99436..000000000 --- a/releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - The volume_clear option to use `shred` was deprecated - in the Newton release and has now been removed. Since - deprecation, this option has performed the same action - as the `zero` option. Config settings for `shred` should - be updated to be set to `zero` for continued operation. diff --git a/releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml b/releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml deleted file mode 100644 index f04f41c6e..000000000 --- a/releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The XML API has been removed in Newton release. - Cinder supports only JSON API request/response format now. - diff --git a/releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml b/releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml deleted file mode 100644 index 66bcccdf5..000000000 --- a/releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The GlusterFS volume driver, which was deprecated - in the Newton release, has been removed. - diff --git a/releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml b/releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml deleted file mode 100644 index 4291bf3c7..000000000 --- a/releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be switched to use the LVMVolumeDriver with the desired iscsi_helper configuration set to the desired iSCSI helper. diff --git a/releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml b/releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml deleted file mode 100644 index dec11dcdb..000000000 --- a/releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - The ``service`` filter for service list API was deprecated 3 years ago in - 2013 July (Havana). Removed this filter and please use "binary" instead. diff --git a/releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml b/releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml deleted file mode 100644 index 665f50f46..000000000 --- a/releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed the deprecated NPIV options for the Storwize backend driver. diff --git a/releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml b/releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml deleted file mode 100644 index 0763cbc99..000000000 --- a/releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - The RBD driver no longer uses the "volume_tmp_dir" - option to set where temporary files for image conversion - are stored. Set "image_conversion_dir" to configure this - in Ocata. - diff --git a/releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml b/releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml deleted file mode 100644 index 89d61ff51..000000000 --- a/releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - The ISERTgtAdm target was deprecated in the Kilo release. - It has now been removed. You should now just use - LVMVolumeDriver and specify ``iscsi_helper`` for the target - driver you wish to use. In order to enable iser, please - set ``iscsi_protocol=iser`` with lioadm or tgtadm target - helpers. diff --git a/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml b/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml deleted file mode 100644 index 2d14c44ea..000000000 --- a/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The config options ``scheduler_topic``, ``volume_topic`` - and ``backup_topic`` have been removed without a - deprecation period as these had never worked - correctly. diff --git a/releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml b/releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml deleted file mode 100644 index ea5c8b257..000000000 --- a/releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Backend driver for Scality SRB has been removed. diff --git a/releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml b/releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml deleted file mode 100644 index da4657d78..000000000 --- a/releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - Removing cinder-all binary. Instead use the individual binaries like - cinder-api, cinder-backup, cinder-volume, cinder-scheduler. diff --git a/releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml b/releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml deleted file mode 100644 index 19a36a1e5..000000000 --- a/releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - Removing deprecated file cinder.middleware.sizelimit. In your - api-paste.ini, replace - cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with - oslo_middleware.sizelimit:RequestBodySizeLimiter.factory diff --git a/releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml b/releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml deleted file mode 100644 index f8925c725..000000000 --- a/releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver and HuaweiFCDriver. diff --git a/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml b/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml deleted file mode 100644 index 33e5d582c..000000000 --- a/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - The xiv_ds8k driver now supports IBM XIV, Spectrum - Accelerate, FlashSystem A9000, FlashSystem A9000R - and DS8000 storage systems, and was renamed to IBM - Storage Driver for OpenStack. The changes include text - changes, file names, names of cinder.conf flags, and - names of the proxy classes. -upgrade: - - Users of the IBM Storage Driver, previously known as - the IBM XIV/DS8K driver, upgrading from Mitaka or - previous releases, need to reconfigure the relevant - cinder.conf entries. In most cases the change is just - removal of the xiv-ds8k field prefix, but for details - use the driver documentation. diff --git a/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml b/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml deleted file mode 100644 index e73a872af..000000000 --- a/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Introduced replication group support and added group action APIs - enable_replication, disable_replication, failover_replication and - list_replication_targets. diff --git a/releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml b/releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml deleted file mode 100644 index e9c6d8d47..000000000 --- a/releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added v2.1 replication support to the HPE 3PAR driver. diff --git a/releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml b/releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml deleted file mode 100644 index 108435bba..000000000 --- a/releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added v2.1 replication support to the HPE LeftHand driver. diff --git a/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml b/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml deleted file mode 100644 index a5fa13621..000000000 --- a/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added replication v2.1 support to the IBM Storwize driver. \ No newline at end of file diff --git a/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml b/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml deleted file mode 100644 index 251bbcf32..000000000 --- a/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Support for retype volumes with different encryptions including - changes from unencrypted types to encrypted types and vice-versa. diff --git a/releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml b/releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml deleted file mode 100644 index 84f9daf17..000000000 --- a/releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - Deployments doing continuous live upgrades from master branch should not - upgrade into Ocata before doing an upgrade which includes all the Newton's - RPC API version bump commits (scheduler, volume). If you're upgrading - deployment in a release-to-release manner, then you can safely ignore this - note. - diff --git a/releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml b/releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml deleted file mode 100644 index 31c560dd4..000000000 --- a/releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -features: - - Added RPC backward compatibility layer similar to the - one implemented in Nova. This means that Cinder - services can be upgraded one-by-one without breakage. - After all the services are upgraded SIGHUP signals - should be issued to all the services to signal them - to reload cached minimum RPC versions. Alternative - is of course restart of them. Please note that - cinder-api service doesn't support SIGHUP yet. - Please also take into account that all the rolling - upgrades capabilities are considered tech preview, - as we don't have a CI testing it yet. -upgrade: - - Starting from Mitaka release Cinder is having a tech - preview of rolling upgrades support. diff --git a/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml b/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml deleted file mode 100644 index f44dfd151..000000000 --- a/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added Consistency Group support in ScaleIO driver. diff --git a/releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml b/releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml deleted file mode 100644 index 4b61f576d..000000000 --- a/releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - EMC ScaleIO driver now uses the config option - ``san_thin_provision`` to determine the default - provisioning type. diff --git a/releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml b/releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml deleted file mode 100644 index 08ae7181e..000000000 --- a/releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - | - Support for ScaleIO 1.32 is now deprecated and will be removed - in a future release. diff --git a/releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml b/releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml deleted file mode 100644 index 14c59cbd5..000000000 --- a/releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -deprecations: - - | - The ScaleIO Driver has deprecated several options specified - in ``cinder.conf``: - * ``sio_protection_domain_id`` - * ``sio_protection_domain_name``, - * ``sio_storage_pool_id`` - * ``sio_storage_pool_name``. - Users of the ScaleIO Driver should now utilize the - ``sio_storage_pools`` options to provide a list of - protection_domain:storage_pool pairs. - - | - The ScaleIO Driver has deprecated the ability to specify the - protection domain, as ``sio:pd_name``, and storage pool, - as ``sio:sp_name``, extra specs in volume types. - The supported way to specify a specific protection domain and - storage pool in a volume type is to define a ``pool_name`` - extra spec and set the value to the appropriate - ``protection_domain_name:storage_pool_name``. - - diff --git a/releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml b/releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml deleted file mode 100644 index ab0d9e6c2..000000000 --- a/releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added consistency group support to generic volume groups in ScaleIO Driver. \ No newline at end of file diff --git a/releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml b/releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml deleted file mode 100644 index 01c5bdd60..000000000 --- a/releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added ability to list all manageable volumes within ScaleIO Driver. \ No newline at end of file diff --git a/releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml b/releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml deleted file mode 100644 index 31a6418d9..000000000 --- a/releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for manage/unmanage volume in the ScaleIO driver. diff --git a/releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml b/releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml deleted file mode 100644 index 7808000bb..000000000 --- a/releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for manage/unmanage snapshot in the ScaleIO driver. diff --git a/releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml b/releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml deleted file mode 100644 index d419a3a9b..000000000 --- a/releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added QoS support in ScaleIO driver. diff --git a/releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml b/releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml deleted file mode 100644 index 8e80b89fb..000000000 --- a/releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed force_delete option from ScaleIO configuration. \ No newline at end of file diff --git a/releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml b/releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml deleted file mode 100644 index eeb26d559..000000000 --- a/releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support for scaling QoS in the ScaleIO driver. - The new QoS keys are ``maxIOPSperGB`` and ``maxBWSperGB``. diff --git a/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml b/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml deleted file mode 100644 index d35068aeb..000000000 --- a/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - Added support for oversubscription in thin provisioning in the - ScaleIO driver. - Volumes should have extra_specs with the key ``provisioning:type`` with value - equals to either ``thick`` or ``thin``. - ``max_oversubscription_ratio`` can be defined by the global config or for - ScaleIO specific with the config option ``sio_max_over_subscription_ratio``. - The maximum oversubscription ratio supported at the moment is 10.0. diff --git a/releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml b/releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml deleted file mode 100644 index 8a8e49b2b..000000000 --- a/releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - cinder-backup service is now decoupled from - cinder-volume, which allows more flexible scaling. -upgrade: - - As cinder-backup was strongly reworked in this - release, the recommended upgrade order when executing - live (rolling) upgrade is c-api->c-sch->c-vol->c-bak. diff --git a/releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml b/releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml deleted file mode 100644 index b9f0138ac..000000000 --- a/releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Added new APIs on microversion 3.32 to support dynamically changing log - levels in Cinder services without restart as well as retrieving current log - levels, which is an easy way to ping via the message broker a service. diff --git a/releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml b/releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml deleted file mode 100644 index 6af993787..000000000 --- a/releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - New config format to allow for using shared Volume Driver configuration - defaults via the [backend_defaults] stanza. Config options defined there - will be used as defaults for each backend enabled via enabled_backends. \ No newline at end of file diff --git a/releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml b/releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml deleted file mode 100644 index 955e27538..000000000 --- a/releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add provider_id in the detailed view of a volume for admin. diff --git a/releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml b/releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml deleted file mode 100644 index 0dc595326..000000000 --- a/releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added volume driver for QNAP ES Storage Driver. diff --git a/releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml b/releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml deleted file mode 100644 index 5f1b570c3..000000000 --- a/releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The 'smbfs_allocation_info_file_path' SMBFS driver config option is now - deprecated as we're no longer using a JSON file to store volume allocation - data. This file had a considerable chance of getting corrupted. diff --git a/releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml b/releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml deleted file mode 100644 index f82608960..000000000 --- a/releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - The SMBFS driver now exposes share information to the scheduler via pools. - The pool names are configurable, defaulting to the share names. diff --git a/releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml b/releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml deleted file mode 100644 index a65b7e670..000000000 --- a/releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Fix the unreasonable status change for volumes and snapshots when creating - backups from snapshots. -upgrade: - - The "backing-up" status is added to snapshot's status matrix. diff --git a/releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml b/releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml deleted file mode 100644 index 5994ea278..000000000 --- a/releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -features: - - The SolidFire driver will recognize 4 new QoS spec keys - to allow an administrator to specify QoS settings which - are scaled by the size of the volume. 'ScaledIOPS' is a - flag which will tell the driver to look for 'scaleMin', - 'scaleMax' and 'scaleBurst' which provide the scaling - factor from the minimum values specified by the previous - QoS keys ('minIOPS', 'maxIOPS', 'burstIOPS'). The - administrator must take care to assure that no matter what - the final calculated QoS values follow minIOPS <= maxIOPS - <= burstIOPS. A exception will be thrown if not. The QoS - settings are also checked against the cluster min and max - allowed and truncated at the min or max if they exceed. -fixes: - - For SolidFire, QoS specs are now checked to make sure - they fall within the min and max constraints. If not - the QoS specs are capped at the min or max (i.e. if - spec says 50 and minimum supported is 100, the driver - will set it to 100). diff --git a/releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml b/releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml deleted file mode 100644 index 38250ad5d..000000000 --- a/releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added v2.1 replication support to SolidFire driver. diff --git a/releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml b/releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml deleted file mode 100644 index 43a33a3df..000000000 --- a/releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - Split nested quota support into a separate driver. In - order to use nested quotas, change the following config - ``quota_driver = cinder.quota.NestedDbQuotaDriver`` after - running the following admin API - "os-quota-sets/validate_setup_for_nested_quota_use" command - to ensure the existing quota values make sense to nest. -upgrade: - - Nested quotas will no longer be used by default, but can be - configured by setting - ``quota_driver = cinder.quota.NestedDbQuotaDriver`` diff --git a/releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml b/releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml deleted file mode 100644 index 00103f3a0..000000000 --- a/releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add consistency group capability to generic volume groups in Storwize - drivers. diff --git a/releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml b/releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml deleted file mode 100644 index fd0ad36b3..000000000 --- a/releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Add global mirror with change volumes(gmcv) support and - user can manage gmcv replication volume by SVC driver. - An example to set a gmcv replication volume type, set - property replication_type as " gmcv", property - replication_enabled as " True" and set - property drivers:cycle_period_seconds as 500. \ No newline at end of file diff --git a/releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml b/releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml deleted file mode 100644 index 43cfdc4bd..000000000 --- a/releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added multiple management IP support to Storwize SVC driver. diff --git a/releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml b/releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml deleted file mode 100644 index d8536b39d..000000000 --- a/releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added multiple pools support to Storwize SVC driver. diff --git a/releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml b/releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml deleted file mode 100644 index 11a46126d..000000000 --- a/releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - Removed storwize_svc_connection_protocol config setting. Users will now need to set different values for volume_driver in cinder.conf. FC:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver diff --git a/releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml b/releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml deleted file mode 100644 index 1579a893d..000000000 --- a/releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add multipath enhancement to Storwize iSCSI driver. diff --git a/releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml b/releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml deleted file mode 100644 index db259a131..000000000 --- a/releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - | - Add ability to extend ``in-use`` volume. User should be aware of the - whole environment before using this feature because it's dependent - on several external factors below: - - * nova-compute version - needs to be the latest for Pike. - * only the libvirt compute driver supports this currently. - * only iscsi and fibre channel volume types are supported on the nova side currently. - - Administrator can disable this ability by updating the - ``volume:extend_attached_volume`` policy rule. diff --git a/releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml b/releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml deleted file mode 100644 index dced56fee..000000000 --- a/releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added consistency group support to the Huawei driver. \ No newline at end of file diff --git a/releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml b/releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml deleted file mode 100644 index f593d770f..000000000 --- a/releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added support to querying snapshots filtered by metadata key/value - using 'metadata' optional URL parameter. - For example, "/v3/snapshots?metadata=={'key1':'value1'}". diff --git a/releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml b/releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml deleted file mode 100644 index 88a13e6f2..000000000 --- a/releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Supported ``project_id`` admin filters to limits API. diff --git a/releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml b/releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml deleted file mode 100644 index 920b10e6f..000000000 --- a/releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Add ``all_tenants``, ``project_id`` support in - attachment list&detail APIs. diff --git a/releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml b/releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml deleted file mode 100644 index 98889b657..000000000 --- a/releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added support for querying volumes filtered by glance metadata key/value - using 'glance_metadata' optional URL parameter. - For example, "volumes/detail?glance_metadata={"image_name":"xxx"}". \ No newline at end of file diff --git a/releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml b/releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml deleted file mode 100644 index 7d3983e66..000000000 --- a/releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for ZeroMQ messaging driver in cinder single backend config. diff --git a/releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml b/releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml deleted file mode 100644 index 3520b9379..000000000 --- a/releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for ZMQ messaging layer in multibackend configuration. diff --git a/releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml b/releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml deleted file mode 100644 index 79d563629..000000000 --- a/releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add support for sorting backups by "name". \ No newline at end of file diff --git a/releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml b/releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml deleted file mode 100644 index 1277314aa..000000000 --- a/releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support to sort snapshots with "name". diff --git a/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml b/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml deleted file mode 100644 index b11c2c9de..000000000 --- a/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added supported driver checks on all drivers. diff --git a/releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml b/releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml deleted file mode 100644 index eae3729b1..000000000 --- a/releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Synology iSCSI-supported storage. diff --git a/releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml b/releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml deleted file mode 100644 index 912e2d3cb..000000000 --- a/releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents - user from specifying image "nfs share location" as location value for an - image. Now, in order to use Tintri image direct clone, user can specify - "provider_location" in image metadata to specify image nfs share location. - NFS share which hosts images should be specified in a file using - tintri_image_shares_config config option. diff --git a/releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml b/releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml deleted file mode 100644 index b3afd4265..000000000 --- a/releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The coordination system used by Cinder has been simplified to leverage tooz - builtin heartbeat feature. Therefore, the configuration options - `coordination.heartbeat`, `coordination.initial_reconnect_backoff` and - `coordination.max_reconnect_backoff` have been removed. diff --git a/releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml b/releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml deleted file mode 100644 index 1fb77dc7a..000000000 --- a/releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Locks may use Tooz as abstraction layer now, to support distributed lock - managers and prepare Cinder to better support HA configurations. diff --git a/releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml b/releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml deleted file mode 100644 index 0a03b17b6..000000000 --- a/releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add support to backup volume using snapshot in the Unity driver. diff --git a/releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml b/releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml deleted file mode 100644 index 85835b8f7..000000000 --- a/releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Add thin clone support in the Unity driver. Unity storage supports the thin - clone of a LUN from OE version 4.2.0. It is more efficient than the dd - solution. However, there is a limit of thin clone inside - each LUN family. Every time the limit reaches, a new LUN family will be - created by a dd-copy, and then the volume clone afterward will use the - thin clone of the new LUN family. diff --git a/releasenotes/notes/updated-at-list-0f899098f7258331.yaml b/releasenotes/notes/updated-at-list-0f899098f7258331.yaml deleted file mode 100644 index 6be15d381..000000000 --- a/releasenotes/notes/updated-at-list-0f899098f7258331.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - The updated_at timestamp is now returned in listing detail. diff --git a/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml b/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml deleted file mode 100644 index 5f3312ae6..000000000 --- a/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - The default key manager interface in Cinder was - deprecated and the Castellan key manager interface - library is now used instead. For more information - about Castellan, please see - http://docs.openstack.org/developer/castellan/ . -upgrade: - - If using the key manager, the configuration details - should be updated to reflect the Castellan-specific - configuration options. -deprecations: - - All barbican and keymgr config options in Cinder are - now deprecated. All of these options are moved to - the ``key_manager`` section for the Castellan library. diff --git a/releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml b/releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml deleted file mode 100644 index 74207fd23..000000000 --- a/releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Cinder now defaults to using the Glance v2 API. - The ``glance_api_version`` configuration option has been - deprecated and will be removed in the 11.0.0 Queens release. diff --git a/releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml b/releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml deleted file mode 100644 index 3217d909c..000000000 --- a/releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - The api-paste.ini ``paste.filter_factory`` setting has been updated to use - ``oslo_middleware.sizelimit`` rather than ``cinder.api.middleware.sizelimit`` - compatibility shim. ``cinder.api.middleware.sizelimit`` was deprecated in - kilo and should now be updated to use ``oslo_middleware.sizelimit`` in - api-paste.ini in preparation for removal in the pike release. diff --git a/releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml b/releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml deleted file mode 100644 index 8f334f59e..000000000 --- a/releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - The create volume api will now return 400 error instead of 404/500 if user - passes non-uuid values to consistencygroup_id, source_volid and - source_replica parameters in the request body. diff --git a/releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml b/releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml deleted file mode 100644 index 0a0f5dfc4..000000000 --- a/releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The cinder-manage online_data_migrations command now prints a - tabular summary of completed and remaining records. The goal - here is to get all your numbers to zero. The previous execution - return code behavior is retained for scripting. diff --git a/releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml b/releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml deleted file mode 100644 index 0fa4e3d76..000000000 --- a/releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Add 'LUNType' configuration verification for Huawei driver when - connecting to Dorado array. Because Dorado array only supports - 'Thin' lun type, so 'LUNType' only can be configured as 'Thin', - any other type is invalid and if 'LUNType' not explicitly configured, - by default use 'Thin' for Dorado array. diff --git a/releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml b/releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml deleted file mode 100644 index 739ff4f1a..000000000 --- a/releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added NFS based driver for Veritas Access. diff --git a/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml b/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml deleted file mode 100644 index e31e34aad..000000000 --- a/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for vhd and vhdx disk-formats for volume upload-to-image. diff --git a/releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml b/releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml deleted file mode 100644 index 1b22fb6cd..000000000 --- a/releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Enable backup snapshot optimal path by implementing attach - and detach snapshot in the VMAX driver. diff --git a/releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml b/releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml deleted file mode 100644 index 98076d3c7..000000000 --- a/releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added the ability to create a CG from a source - CG with the VMAX driver. diff --git a/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml b/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml deleted file mode 100644 index 74481ec64..000000000 --- a/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support for compression on VMAX All Flash in the VMAX driver. diff --git a/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml b/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml deleted file mode 100644 index 65bc8532b..000000000 --- a/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add consistent group snapshot support to generic volume groups in - VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml b/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml deleted file mode 100644 index c92533fbb..000000000 --- a/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support for iSCSI multipathing in EMC VMAX driver. diff --git a/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml b/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml deleted file mode 100644 index c31f05012..000000000 --- a/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added oversubscription support in the VMAX driver diff --git a/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml b/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml deleted file mode 100644 index 6fb1932f5..000000000 --- a/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - QoS support in EMC VMAX iSCSI and FC drivers. diff --git a/releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml b/releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml deleted file mode 100644 index f8f26f196..000000000 --- a/releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - The VMAX driver is moved to the dell_emc directory. - volume_driver entry in cinder.conf needs to be changed to - ``cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` or - ``cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver``. diff --git a/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml b/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml deleted file mode 100644 index f438a931d..000000000 --- a/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - VMAX driver version 3.0, replacing SMI-S with Unisphere REST. - This driver supports VMAX3 hybrid and All Flash arrays. diff --git a/releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml b/releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml deleted file mode 100644 index 77920f92a..000000000 --- a/releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adding compression functionality to VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml b/releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml deleted file mode 100644 index 2880e0b2c..000000000 --- a/releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adding Live Migration functionality to VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml b/releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml deleted file mode 100644 index 38f2304c6..000000000 --- a/releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adding Qos functionality to VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml b/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml deleted file mode 100644 index 92638b549..000000000 --- a/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adding Replication V2.1 functionality to VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml b/releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml deleted file mode 100644 index 6d160d477..000000000 --- a/releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Add retype functionality to VMAX driver version 3.0. diff --git a/releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml b/releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml deleted file mode 100644 index 4c20b59e7..000000000 --- a/releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Storage assisted volume migration from one Pool/SLO/Workload combination - to another, on the same array, via retype, for the VMAX driver. Both - All Flash and Hybrid VMAX3 arrays are supported. VMAX2 is not supported. diff --git a/releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml b/releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml deleted file mode 100644 index 05ae0c92f..000000000 --- a/releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed backup and restore of volumes in VMware VMDK driver. - diff --git a/releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml b/releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml deleted file mode 100644 index 4a6fafce7..000000000 --- a/releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Added config option ``vmware_connection_pool_size`` in the - VMware VMDK driver to specify the maximum number of connections - (to vCenter) in the http connection pool. diff --git a/releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml b/releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml deleted file mode 100644 index 475a6bf41..000000000 --- a/releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The default interval for polling vCenter tasks in the - VMware VMDK driver is changed to 2s. - diff --git a/releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml b/releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml deleted file mode 100644 index a60670bad..000000000 --- a/releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixed the VMware VMDK driver to create volume from image - in ova container. - diff --git a/releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml b/releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml deleted file mode 100644 index 076a874cf..000000000 --- a/releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - VMware VMDK driver deprecated the support for vCenter version 5.1 - diff --git a/releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml b/releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml deleted file mode 100644 index 5983cacd0..000000000 --- a/releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added backend driver for Violin Memory 7000 iscsi storage. diff --git a/releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml b/releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml deleted file mode 100644 index f6ae4a77a..000000000 --- a/releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - The VMware VMDK driver supports a new config option ``vmware_host_port`` - to specify the port number to connect to vCenter server. diff --git a/releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml b/releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml deleted file mode 100644 index f23fd6623..000000000 --- a/releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support for manage volume in the VMware VMDK driver. \ No newline at end of file diff --git a/releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml b/releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml deleted file mode 100644 index 78de79cb5..000000000 --- a/releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The VMware VMDK driver for ESX server has been removed. diff --git a/releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml b/releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml deleted file mode 100644 index 38bdcb47e..000000000 --- a/releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The VMware VMDK driver now enforces minimum vCenter version of 5.5. diff --git a/releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml b/releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml deleted file mode 100644 index 28ea6e633..000000000 --- a/releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Added support for images with ``vmware_adaptertype`` set to - ``paraVirtual`` in the VMDK driver. diff --git a/releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml b/releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml deleted file mode 100644 index 5171d2635..000000000 --- a/releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - VNX cinder driver now supports async migration during volume cloning. - By default, the cloned volume will be available after the migration starts - in the VNX instead of waiting for the completion of migration. This greatly - accelerates the cloning process. - If user wants to disable this, he could add - ``--metadata async_migrate=False`` when creating volume from source - volume/snapshot. - diff --git a/releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml b/releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml deleted file mode 100644 index 8c1d3b35d..000000000 --- a/releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Configrable migration rate in VNX driver via metadata diff --git a/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml b/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml deleted file mode 100644 index bfb2914d7..000000000 --- a/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - New Cinder driver based on storops library (available in pypi) - for EMC VNX. - -upgrade: - - For EMC VNX backends, please upgrade to use - ``cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver``. Add config option - ``storage_protocol = fc`` or ``storage_protocol = iscsi`` to the driver - section to enable the FC or iSCSI driver respectively. -deprecations: - - Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ - iSCSI (``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) - drivers are deprecated. Please refer to upgrade section for information - about the new driver. diff --git a/releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml b/releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml deleted file mode 100644 index f328c6183..000000000 --- a/releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Adds QoS support for VNX Cinder driver. diff --git a/releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml b/releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml deleted file mode 100644 index 7122ded8e..000000000 --- a/releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent replication group support in VNX cinder driver. diff --git a/releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml b/releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml deleted file mode 100644 index eebfd94f4..000000000 --- a/releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Adds v2.1 replication support in VNX Cinder driver. diff --git a/releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml b/releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml deleted file mode 100644 index 15df89f1c..000000000 --- a/releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and - ``san_ip`` are mandatory now. If you prefer security file authentication, - please append ``storage_vnx_security_file_dir`` in ``replication_device``, - otherwise, append ``san_login``, ``san_password``, - ``storage_vnx_authentication_type`` in ``replication_device``. - diff --git a/releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml b/releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml deleted file mode 100644 index 4dabeed40..000000000 --- a/releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Cloning of consistency group added to EMC VNX backend driver. diff --git a/releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml b/releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml deleted file mode 100644 index 8192b265c..000000000 --- a/releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Filtering volumes by their display name now - correctly handles display names with single and - double quotes. diff --git a/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml b/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml deleted file mode 100644 index d09afc650..000000000 --- a/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new API to display the volumes summary. This summary API displays the - total number of volumes and total volume's size in GB. diff --git a/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml b/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml deleted file mode 100644 index ebe02d3d1..000000000 --- a/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added volume backend driver for Veritas HyperScale storage. diff --git a/releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml b/releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml deleted file mode 100644 index 1e13cd5ef..000000000 --- a/releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The Windows iSCSI driver now returns multiple portals when available - and multipath is requested. -fixes: - - | - The Windows iSCSI driver now honors the configured iSCSI addresses, - ensuring that only those addresses will be used for iSCSI traffic. diff --git a/releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml b/releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml deleted file mode 100644 index d64e7dac0..000000000 --- a/releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added replication v2.1 support to the IBM XIV/DS8K - driver. diff --git a/releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml b/releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml deleted file mode 100755 index 3056d8b09..000000000 --- a/releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in - XIV, Spectrum Accelerate and A9000/R storage systems. diff --git a/releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml b/releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml deleted file mode 100644 index 31b845280..000000000 --- a/releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added independent and shared types for qos classes in XIV & A9000. - Shared type enables to share bandwidth and IO rates between volumes - of the same class. Independent type gives each volume the same - bandwidth and IO rates without being affected by other volumes in - the same qos class. diff --git a/releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml b/releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml deleted file mode 100644 index 46d69624a..000000000 --- a/releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Add consistency group replication support in XIV\A9000 Cinder driver. diff --git a/releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml b/releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml deleted file mode 100644 index db4b852bf..000000000 --- a/releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Support for creating a consistency group from - consistency group in XtremIO. diff --git a/releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml b/releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml deleted file mode 100644 index 7f7a22587..000000000 --- a/releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add consistent group capability to generic volume groups in the XtremIO driver. diff --git a/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml b/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml deleted file mode 100644 index 7363172df..000000000 --- a/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added snapshot manage/unmanage support to the EMC XtremIO driver. diff --git a/releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml b/releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml deleted file mode 100644 index 6a046206a..000000000 --- a/releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Oracle ZFSSA iSCSI - allows a volume to be connected to more than one - connector at the same time, which is required for live-migration to work. - ZFSSA software release 2013.1.3.x (or newer) is required for this to work. diff --git a/releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml b/releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml deleted file mode 100644 index f5032c56c..000000000 --- a/releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers. diff --git a/releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml b/releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml deleted file mode 100644 index 4bdd5e804..000000000 --- a/releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added backend driver for ZTE iSCSI storage. - diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 9e161dd5a..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -repository_name = 'openstack/cinder' -bug_project = 'cinder' -bug_tag = 'doc' -project = u'Cinder Release Notes' -copyright = u'2015, Cinder Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from cinder.version import version_info as cinder_version -# The full version, including alpha/beta/rc tags. -release = cinder_version.version_string_with_vcs() -# The short X.Y version. -version = cinder_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CinderReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'CinderReleaseNotes.tex', u'Cinder Release Notes Documentation', - u'Cinder Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'cinderreleasenotes', u'Cinder Release Notes Documentation', - [u'Cinder Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'CinderReleaseNotes', u'Cinder Release Notes Documentation', - u'Cinder Developers', 'CinderReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 76f7dd31f..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -====================== - Cinder Release Notes -====================== - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - mitaka - liberty diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be84..000000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e54560965..000000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed25..000000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f42e..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabcc..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 2066217f7..000000000 --- a/requirements.txt +++ /dev/null @@ -1,64 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -Babel!=2.4.0,>=2.3.4 # BSD -decorator>=3.4.0 # BSD -enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD -eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -greenlet>=0.3.2 # MIT -httplib2>=0.7.5 # MIT -iso8601>=0.1.11 # MIT -ipaddress>=1.0.7;python_version<'3.3' # PSF -keystoneauth1>=3.0.1 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -lxml!=3.7.0,>=2.3 # BSD -oauth2client!=4.0.0,>=1.5.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.privsep!=1.17.0,>=1.9.0 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -oslo.rootwrap>=5.0.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 -osprofiler>=1.4.0 # Apache-2.0 -paramiko>=2.0 # LGPLv2.1+ -Paste # MIT -PasteDeploy>=1.5.0 # MIT -psutil>=3.2.2 # BSD -pyparsing>=2.1.0 # MIT -python-barbicanclient>=4.0.0 # Apache-2.0 -python-glanceclient>=2.7.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-swiftclient>=3.2.0 # Apache-2.0 -pytz>=2013.6 # MIT -requests>=2.14.2 # Apache-2.0 -retrying!=1.3.0,>=1.2.3 # Apache-2.0 -Routes>=2.3.1 # MIT -taskflow>=2.7.0 # Apache-2.0 -rtslib-fb!=2.1.60,!=2.1.61,>=2.1.43 # Apache-2.0 -simplejson>=2.2.0 # MIT -six>=1.9.0 # MIT -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -sqlalchemy-migrate>=0.11.0 # Apache-2.0 -stevedore>=1.20.0 # Apache-2.0 -suds-jurko>=0.6 # LGPLv3+ -WebOb>=1.7.1 # MIT -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.vmware>=2.17.0 # Apache-2.0 -os-brick>=1.15.1 # Apache-2.0 -os-win>=2.0.0 # Apache-2.0 -tooz>=1.47.0 # Apache-2.0 -google-api-python-client>=1.4.2 # Apache-2.0 -castellan>=0.7.0 # Apache-2.0 -cryptography>=1.6 # BSD/Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index e79da8898..000000000 --- a/setup.cfg +++ /dev/null @@ -1,98 +0,0 @@ -[metadata] -name = cinder -summary = OpenStack Block Storage -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://docs.openstack.org/developer/cinder/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] -packages = - cinder - -[entry_points] -cinder.scheduler.filters = - AvailabilityZoneFilter = cinder.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter - CapabilitiesFilter = cinder.scheduler.filters.capabilities_filter:CapabilitiesFilter - CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter - DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter - DriverFilter = cinder.scheduler.filters.driver_filter:DriverFilter - JsonFilter = cinder.scheduler.filters.json_filter:JsonFilter - RetryFilter = cinder.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter - SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter - InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter -cinder.scheduler.weights = - AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher - CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher - ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher - GoodnessWeigher = cinder.scheduler.weights.goodness:GoodnessWeigher - VolumeNumberWeigher = cinder.scheduler.weights.volume_number:VolumeNumberWeigher -oslo.config.opts = - cinder = cinder.opts:list_opts -oslo.config.opts.defaults = - cinder = cinder.common.config:set_middleware_defaults -console_scripts = - cinder-api = cinder.cmd.api:main - cinder-backup = cinder.cmd.backup:main - cinder-manage = cinder.cmd.manage:main - cinder-rootwrap = oslo_rootwrap.cmd:main - cinder-rtstool = cinder.cmd.rtstool:main - cinder-scheduler = cinder.cmd.scheduler:main - cinder-volume = cinder.cmd.volume:main - cinder-volume-usage-audit = cinder.cmd.volume_usage_audit:main -wsgi_scripts = - cinder-wsgi = cinder.wsgi.wsgi:initialize_application -# These are for backwards compat with Havana notification_driver configuration values -oslo_messaging.notify.drivers = - cinder.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver - cinder.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver - cinder.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver - cinder.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver - cinder.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver -# These are for backwards compatibility with Juno middleware configurations -oslo_middleware = - cinder.openstack.common.middleware.request_id = oslo_middleware.request_id - -cinder.database.migration_backend = - sqlalchemy = oslo_db.sqlalchemy.migration -# In-tree Tempest test entry point -tempest.test_plugins = - cinder_tests = cinder.tests.tempest.plugin:CinderTempestPlugin - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source - -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - -[compile_catalog] -directory = cinder/locale -domain = cinder - -[update_catalog] -domain = cinder -output_dir = cinder/locale -input_file = cinder/locale/cinder.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = cinder/locale/cinder.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d84432..000000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index c60b0da0c..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Install bounded pep8/pyflakes first, then let flake8 install -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 - -coverage!=4.4,>=4.0 # Apache-2.0 -ddt>=1.0.1 # MIT -fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -os-api-ref>=1.0.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 -sphinx>=1.6.2 # BSD -PyMySQL>=0.7.6 # MIT License -psycopg2>=2.5 # LGPL/ZPL -python-subunit>=0.0.18 # Apache-2.0/BSD -testtools>=1.4.0 # MIT -testresources>=0.2.4 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -openstackdocstheme>=1.11.0 # Apache-2.0 -oslo.versionedobjects[fixtures]>=1.17.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -tempest>=16.1.0 # Apache-2.0 -bandit>=1.1.0 # Apache-2.0 -reno!=2.3.1,>=1.8.0 # Apache-2.0 -doc8 # Apache-2.0 diff --git a/tools/check_exec.py b/tools/check_exec.py deleted file mode 100755 index d423f3d9a..000000000 --- a/tools/check_exec.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Print a list and return with error if any executable files are found. -# Compatible with both python 2 and 3. - -import os.path -import stat -import sys - -if len(sys.argv) < 2: - print("Usage: %s " % sys.argv[0]) - sys.exit(1) - -directory = sys.argv[1] - -executable = [] - -for root, mydir, myfile in os.walk(directory): - for f in myfile: - path = os.path.join(root, f) - mode = os.lstat(path).st_mode - if stat.S_IXUSR & mode: - executable.append(path) - -if executable: - print("Executable files found:") - for f in executable: - print(f) - - sys.exit(1) diff --git a/tools/colorizer.py b/tools/colorizer.py deleted file mode 100755 index 4498d9a5e..000000000 --- a/tools/colorizer.py +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013, Nebula, Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Colorizer Code is borrowed from Twisted: -# Copyright (c) 2001-2010 Twisted Matrix Laboratories. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -"""Display a subunit stream through a colorized unittest test runner.""" - -import heapq -import subunit -import sys -import unittest - -import testtools - - -class _AnsiColorizer(object): - """ANSI colorizer that wraps a stream object. - - colorizer is an object that loosely wraps around a stream, allowing - callers to write text to the stream in a particular color. - - Colorizer classes must implement C{supported()} and C{write(text, color)}. - """ - _colors = dict(black=30, red=31, green=32, yellow=33, - blue=34, magenta=35, cyan=36, white=37) - - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - """Check if platform is supported. - - A class method that returns True if the current platform supports - coloring terminal output using this method. - - Returns False otherwise. - """ - if not stream.isatty(): - return False # auto color only on TTYs - try: - import curses - except ImportError: - return False - else: - try: - try: - return curses.tigetnum("colors") > 2 - except curses.error: - curses.setupterm() - return curses.tigetnum("colors") > 2 - except Exception: - # guess false in case of error - return False - supported = classmethod(supported) - - def write(self, text, color): - """Write the given text to the stream in the given color. - - @param text: Text to be written to the stream. - - @param color: A string label for a color. e.g. 'red', 'white'. - """ - color = self._colors[color] - self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) - - -class _Win32Colorizer(object): - """See _AnsiColorizer docstring.""" - def __init__(self, stream): - import win32console - red, green, blue, bold = (win32console.FOREGROUND_RED, - win32console.FOREGROUND_GREEN, - win32console.FOREGROUND_BLUE, - win32console.FOREGROUND_INTENSITY) - self.stream = stream - self.screenBuffer = win32console.GetStdHandle( - win32console.STD_OUT_HANDLE) - self._colors = { - 'normal': red | green | blue, - 'red': red | bold, - 'green': green | bold, - 'blue': blue | bold, - 'yellow': red | green | bold, - 'magenta': red | blue | bold, - 'cyan': green | blue | bold, - 'white': red | green | blue | bold - } - - def supported(cls, stream=sys.stdout): - try: - import win32console - screenBuffer = win32console.GetStdHandle( - win32console.STD_OUT_HANDLE) - except ImportError: - return False - import pywintypes - try: - screenBuffer.SetConsoleTextAttribute( - win32console.FOREGROUND_RED | - win32console.FOREGROUND_GREEN | - win32console.FOREGROUND_BLUE) - except pywintypes.error: - return False - else: - return True - supported = classmethod(supported) - - def write(self, text, color): - color = self._colors[color] - self.screenBuffer.SetConsoleTextAttribute(color) - self.stream.write(text) - self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) - - -class _NullColorizer(object): - """See _AnsiColorizer docstring.""" - def __init__(self, stream): - self.stream = stream - - def supported(cls, stream=sys.stdout): - return True - supported = classmethod(supported) - - def write(self, text, color): - self.stream.write(text) - - -def get_elapsed_time_color(elapsed_time): - if elapsed_time > 1.0: - return 'red' - elif elapsed_time > 0.25: - return 'yellow' - else: - return 'green' - - -class NovaTestResult(testtools.TestResult): - def __init__(self, stream, descriptions, verbosity): - super(NovaTestResult, self).__init__() - self.stream = stream - self.showAll = verbosity > 1 - self.num_slow_tests = 10 - self.slow_tests = [] # this is a fixed-sized heap - self.colorizer = None - # NOTE(vish): reset stdout for the terminal check - stdout = sys.stdout - sys.stdout = sys.__stdout__ - for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: - if colorizer.supported(): - self.colorizer = colorizer(self.stream) - break - sys.stdout = stdout - self.start_time = None - self.last_time = {} - self.results = {} - self.last_written = None - - def _writeElapsedTime(self, elapsed): - color = get_elapsed_time_color(elapsed) - self.colorizer.write(" %.2f" % elapsed, color) - - def _addResult(self, test, *args): - try: - name = test.id() - except AttributeError: - name = 'Unknown.unknown' - test_class, test_name = name.rsplit('.', 1) - - elapsed = (self._now() - self.start_time).total_seconds() - item = (elapsed, test_class, test_name) - if len(self.slow_tests) >= self.num_slow_tests: - heapq.heappushpop(self.slow_tests, item) - else: - heapq.heappush(self.slow_tests, item) - - self.results.setdefault(test_class, []) - self.results[test_class].append((test_name, elapsed) + args) - self.last_time[test_class] = self._now() - self.writeTests() - - def _writeResult(self, test_name, elapsed, long_result, color, - short_result, success): - if self.showAll: - self.stream.write(' %s' % str(test_name).ljust(66)) - self.colorizer.write(long_result, color) - if success: - self._writeElapsedTime(elapsed) - self.stream.writeln() - else: - self.colorizer.write(short_result, color) - - def addSuccess(self, test): - super(NovaTestResult, self).addSuccess(test) - self._addResult(test, 'OK', 'green', '.', True) - - def addFailure(self, test, err): - if test.id() == 'process-returncode': - return - super(NovaTestResult, self).addFailure(test, err) - self._addResult(test, 'FAIL', 'red', 'F', False) - - def addError(self, test, err): - super(NovaTestResult, self).addFailure(test, err) - self._addResult(test, 'ERROR', 'red', 'E', False) - - def addSkip(self, test, reason=None, details=None): - super(NovaTestResult, self).addSkip(test, reason, details) - self._addResult(test, 'SKIP', 'blue', 'S', True) - - def startTest(self, test): - self.start_time = self._now() - super(NovaTestResult, self).startTest(test) - - def writeTestCase(self, cls): - if not self.results.get(cls): - return - if cls != self.last_written: - self.colorizer.write(cls, 'white') - self.stream.writeln() - for result in self.results[cls]: - self._writeResult(*result) - del self.results[cls] - self.stream.flush() - self.last_written = cls - - def writeTests(self): - time = self.last_time.get(self.last_written, self._now()) - if not self.last_written or (self._now() - time).total_seconds() > 2.0: - diff = 3.0 - while diff > 2.0: - classes = self.results.keys() - oldest = min(classes, key=lambda x: self.last_time[x]) - diff = (self._now() - self.last_time[oldest]).total_seconds() - self.writeTestCase(oldest) - else: - self.writeTestCase(self.last_written) - - def done(self): - self.stopTestRun() - - def stopTestRun(self): - for cls in list(self.results): - self.writeTestCase(cls) - self.stream.writeln() - self.writeSlowTests() - - def writeSlowTests(self): - # Pare out 'fast' tests - slow_tests = [item for item in self.slow_tests - if get_elapsed_time_color(item[0]) != 'green'] - if slow_tests: - slow_total_time = sum(item[0] for item in slow_tests) - slow = ("Slowest %i tests took %.2f secs:" - % (len(slow_tests), slow_total_time)) - self.colorizer.write(slow, 'yellow') - self.stream.writeln() - last_cls = None - # sort by name - for elapsed, cls, name in sorted(slow_tests, - key=lambda x: x[1] + x[2]): - if cls != last_cls: - self.colorizer.write(cls, 'white') - self.stream.writeln() - last_cls = cls - self.stream.write(' %s' % str(name).ljust(68)) - self._writeElapsedTime(elapsed) - self.stream.writeln() - - def printErrors(self): - if self.showAll: - self.stream.writeln() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) - - def printErrorList(self, flavor, errors): - for test, err in errors: - self.colorizer.write("=" * 70, 'red') - self.stream.writeln() - self.colorizer.write(flavor, 'red') - self.stream.writeln(": %s" % test.id()) - self.colorizer.write("-" * 70, 'red') - self.stream.writeln() - self.stream.writeln("%s" % err) - - -test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) - -if sys.version_info[0:2] <= (2, 6): - runner = unittest.TextTestRunner(verbosity=2) -else: - runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult) - -if runner.run(test).wasSuccessful(): - exit_code = 0 -else: - exit_code = 1 -sys.exit(exit_code) diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh deleted file mode 100755 index 77dcce064..000000000 --- a/tools/config/check_uptodate.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -if [ ! -e cinder/opts.py ]; then - echo -en "\n\n#################################################" - echo -en "\nERROR: cinder/opts.py file is missing." - echo -en "\n#################################################\n" - exit 1 -else - mv cinder/opts.py cinder/opts.py.orig - tox -e genopts &> /dev/null - if [ $? -ne 0 ]; then - echo -en "\n\n#################################################" - echo -en "\nERROR: Non-zero exit from generate_cinder_opts.py." - echo -en "\n See output above for details.\n" - echo -en "#################################################\n" - mv cinder/opts.py.orig cinder/opts.py - exit 1 - else - diff cinder/opts.py.orig cinder/opts.py - if [ $? -ne 0 ]; then - echo -en "\n\n########################################################" - echo -en "\nERROR: Configuration options change detected." - echo -en "\n A new cinder/opts.py file must be generated." - echo -en "\n Run 'tox -e genopts' from the base directory" - echo -en "\n and add the result to your commit." - echo -en "\n########################################################\n\n" - rm cinder/opts.py - mv cinder/opts.py.orig cinder/opts.py - exit 1 - else - rm cinder/opts.py.orig - fi - fi -fi diff --git a/tools/enable-pre-commit-hook.sh b/tools/enable-pre-commit-hook.sh deleted file mode 100755 index 4c4a20dd9..000000000 --- a/tools/enable-pre-commit-hook.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -PRE_COMMIT_SCRIPT=.git/hooks/pre-commit - -make_hook() { - echo "exec tox -e fast8" >> $PRE_COMMIT_SCRIPT - chmod +x $PRE_COMMIT_SCRIPT - - if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then - echo "pre-commit hook was created successfully" - else - echo "unable to create pre-commit hook" - fi -} - -# NOTE(jk0): Make sure we are in cinder's root directory before adding the hook. -if [ ! -d ".git" ]; then - echo "unable to find .git; moving up a directory" - cd .. - if [ -d ".git" ]; then - make_hook - else - echo "still unable to find .git; hook not created" - fi -else - make_hook -fi - diff --git a/tools/fast8.sh b/tools/fast8.sh deleted file mode 100755 index 2b3e22abd..000000000 --- a/tools/fast8.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -cd $(dirname "$0")/.. -CHANGED=$(git diff --name-only HEAD~1 | tr '\n' ' ') - -# Skip files that don't exist -# (have been git rm'd) -CHECK="" -for FILE in $CHANGED; do - if [ -f "$FILE" ]; then - CHECK="$CHECK $FILE" - fi -done - -diff -u --from-file /dev/null $CHECK | flake8 --diff diff --git a/tools/generate_driver_list.py b/tools/generate_driver_list.py deleted file mode 100755 index 3285f2fff..000000000 --- a/tools/generate_driver_list.py +++ /dev/null @@ -1,151 +0,0 @@ -#! /usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generate list of Cinder drivers""" - -import argparse -import os -import json - -from cinder.interface import util -from cinder import objects - - -# Object loading can cause issues loading drivers, force it up front -objects.register_all() - - -parser = argparse.ArgumentParser(prog="generate_driver_list") - -parser.add_argument("--format", default='str', choices=['str', 'dict'], - help="Output format type") - -# Keep backwards compatibilty with the gate-docs test -# The tests pass ['docs'] on the cmdln, but it's never been used. -parser.add_argument("output_list", default=None, nargs='?') - -CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/" - - -class Output(object): - - def __init__(self, base_dir, output_list): - # At this point we don't care what was passed in, just a trigger - # to write this out to the doc tree for now - self.driver_file = None - if output_list: - self.driver_file = open( - '%s/doc/source/drivers.rst' % base_dir, 'w+') - self.driver_file.write('===================\n') - self.driver_file.write('Available Drivers\n') - self.driver_file.write('===================\n\n') - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self.driver_file: - self.driver_file.close() - - def write(self, text): - if self.driver_file: - self.driver_file.write('%s\n' % text) - else: - print(text) - - -def format_description(desc, output): - desc = desc or '' - lines = desc.rstrip('\n').split('\n') - for line in lines: - output.write(' %s' % line) - - -def print_drivers(drivers, config_name, output): - for driver in sorted(drivers, key=lambda x: x.class_fqn): - driver_name = driver.class_name - if not driver.supported: - driver_name += " (unsupported)" - output.write(driver_name) - output.write('-' * len(driver_name)) - if driver.version: - output.write('* Version: %s' % driver.version) - output.write('* %s=%s' % (config_name, driver.class_fqn)) - if driver.ci_wiki_name and 'Cinder_Jenkins' not in driver.ci_wiki_name: - output.write('* CI info: %s%s' % (CI_WIKI_ROOT, - driver.ci_wiki_name)) - output.write('* Description:') - format_description(driver.desc, output) - output.write('') - output.write('') - - -def output_str(cinder_root, args): - with Output(cinder_root, args.output_list) as output: - output.write('Volume Drivers') - output.write('==============') - print_drivers(util.get_volume_drivers(), 'volume_driver', output) - - output.write('Backup Drivers') - output.write('==============') - print_drivers(util.get_backup_drivers(), 'backup_driver', output) - - output.write('FC Zone Manager Drivers') - output.write('=======================') - print_drivers(util.get_fczm_drivers(), 'zone_driver', output) - - -def collect_driver_info(driver): - """Build the dictionary that describes this driver.""" - - info = {'name': driver.class_name, - 'version': driver.version, - 'fqn': driver.class_fqn, - 'description': driver.desc, - 'ci_wiki_name': driver.ci_wiki_name, - 'supported': driver.supported} - - return info - - -def output_dict(): - """Output the results as a JSON dict.""" - - driver_list = [] - drivers = util.get_volume_drivers() - for driver in drivers: - driver_list.append(collect_driver_info(driver)) - - print(json.dumps(driver_list)) - - -def main(): - tools_dir = os.path.dirname(os.path.abspath(__file__)) - cinder_root = os.path.dirname(tools_dir) - cur_dir = os.getcwd() - os.chdir(cinder_root) - args = parser.parse_args() - - try: - if args.format == 'str': - output_str(cinder_root, args) - elif args.format == 'dict': - output_dict() - - finally: - os.chdir(cur_dir) - - -if __name__ == '__main__': - main() diff --git a/tools/hooks/README b/tools/hooks/README deleted file mode 100644 index edde18cb1..000000000 --- a/tools/hooks/README +++ /dev/null @@ -1,4 +0,0 @@ -These are hooks to be used by the OpenStack infra test system. These scripts -may be called by certain jobs at important times to do extra testing, setup, -etc. They are really only relevant within the scope of the OpenStack infra -system and are not expected to be useful to anyone else. \ No newline at end of file diff --git a/tools/hooks/run_multi_backend_matrix.sh b/tools/hooks/run_multi_backend_matrix.sh deleted file mode 100755 index ad5b904a6..000000000 --- a/tools/hooks/run_multi_backend_matrix.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2016, Hitachi, Erlon Cruz -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -x -export TEMPEST_USER=${TEMPEST_USER:-tempest} -chmod +w $BASE/new/tempest -cd $BASE/new/tempest -source $BASE/new/devstack/functions -source $BASE/new/devstack/functions-common -source $WORKSPACE/devstack-gate/functions.sh -source $BASE/new/cinder/tools/hooks/utils.sh -export TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf - -# Disable bash verbose so we have a cleaner output. Also, exit on error must -# be disable as we will run several tests that can return error. -set +x +e - -function configure_tempest_backends { - be1=$1 - be2=$2 - echo "Configuring tempest conf in ${TEMPEST_CONFIG}" - iniset -sudo $TEMPEST_CONFIG 'volume' 'backend_names' ${be1},${be2} - -} - -BACKENDS='lvm ceph nfs' -RGEX="(.*test_volume_retype_with_migration.*|.*test_volume_migrate_attached.*)" -final_result=0 -final_message='Migrations tests finished SUCCESSFULLY!' -declare -A TEST_RESULTS -start_time=`date +%s` -for be1 in ${BACKENDS}; do - for be2 in ${BACKENDS}; do - if [ ${be1} != ${be2} ]; then - configure_tempest_backends ${be1} ${be2} - echo "============================================================" - echo "Testing multibackend features: ${be1} vs ${be2}" - echo "============================================================" - run_tempest "${be1} vs ${be2}" ${RGEX} - result=$? - # If any of the test fail, we keep running but return failure as - # the final result - if [ ${result} -ne 0 ]; then - TEST_RESULTS[${be1},${be2}]="FAILURE" - final_message='Migrations tests FAILED!' - final_result=1 - else - TEST_RESULTS[${be1},${be2}]="SUCCESS" - fi - fi - done -done -end_time=`date +%s` -elapsed=$(expr $(expr ${end_time} - ${start_time}) / 60) - -# Print the results -num_rows=$(echo $BACKENDS | wc -w) -fmt=" %15s" -echo "============================================================" -echo " ${final_message} In ${elapsed} minutes." -echo "============================================================" - -printf "$fmt" '' -for be1 in ${BACKENDS}; do - printf "$fmt" ${be1} -done -echo -for be1 in ${BACKENDS}; do - printf "$fmt" ${be1} - for be2 in ${BACKENDS}; do - if [ ${be1} == ${be2} ]; then - printf "$fmt" '---' - else - printf "$fmt" ${TEST_RESULTS[${be1},${be2}]} - fi - done - echo -done - -exit ${final_result} diff --git a/tools/hooks/utils.sh b/tools/hooks/utils.sh deleted file mode 100755 index ab42e0e78..000000000 --- a/tools/hooks/utils.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -function run_tempest { - local message=$1 - local tempest_regex=$2 - sudo -H -u ${TEMPEST_USER} tox -eall -- $tempest_regex \ - --concurrency=${TEMPEST_CONCURRENCY} - exitcode=$? - return ${exitcode} -} diff --git a/tools/lintstack.py b/tools/lintstack.py deleted file mode 100755 index 6bf31b0bf..000000000 --- a/tools/lintstack.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""pylint error checking.""" - -from __future__ import print_function - -import json -import re -import sys - -from pylint import lint -from pylint.reporters import text -from six.moves import cStringIO as StringIO - -ignore_codes = [ - # Note(maoy): E1103 is error code related to partial type inference - "E1103" -] - -ignore_messages = [ - # Note(maoy): this error message is the pattern of E0202. It should be - # ignored for cinder.tests modules - "An attribute affected in cinder.tests", - - # Note(fengqian): this error message is the pattern of [E0611]. - "No name 'urllib' in module '_MovedItems'", - - # Note(e0ne): this error message is for SQLAlchemy update() calls - # It should be ignored because use six module to keep py3.X compatibility. - # in DB schema migrations. - "No value passed for parameter 'dml'", - - # Note(xyang): these error messages are for the code [E1101]. - # They should be ignored because 'sha256' and 'sha224' are functions in - # 'hashlib'. - "Module 'hashlib' has no 'sha256' member", - "Module 'hashlib' has no 'sha224' member", - - # Note(aarefiev): this error message is for SQLAlchemy rename calls in - # DB migration(033_add_encryption_unique_key). - "Instance of 'Table' has no 'rename' member", - - # NOTE(geguileo): these error messages are for code [E1101], and they can - # be ignored because a SQLAlchemy ORM class will have __table__ member - # during runtime. - "Class 'ConsistencyGroup' has no '__table__' member", - "Class 'Cgsnapshot' has no '__table__' member", - "Class 'Group' has no '__table__' member", - "Class 'GroupSnapshot' has no '__table__' member", - - # NOTE(xyang): this error message is for code [E1120] when checking if - # there are already 'groups' entries in 'quota_classes' `in DB migration - # (078_add_groups_and_group_volume_type_mapping_table). - "No value passed for parameter 'functions' in function call", - - # NOTE(dulek): This one is related to objects. - "No value passed for parameter 'id' in function call", - - # NOTE(geguileo): v3 common manage class for volumes and snapshots - "Instance of 'ManageResource' has no 'volume_api' member", - "Instance of 'ManageResource' has no '_list_manageable_view' member", -] - -# Note(maoy): We ignore cinder.tests for now due to high false -# positive rate. -ignore_modules = ["cinder/tests/"] - -# Note(thangp): E0213, E1101, and E1102 should be ignored for only -# cinder.object modules. E0213 and E1102 are error codes related to -# the first argument of a method, but should be ignored because the method -# is a remotable class method. E1101 is error code related to accessing a -# non-existent member of an object, but should be ignored because the object -# member is created dynamically. -objects_ignore_codes = ["E0213", "E1101", "E1102"] -# NOTE(dulek): We're ignoring messages related to non-existent objects in -# cinder.objects namespace. This is because this namespace is populated when -# registering the objects, and pylint is unable to detect that. -objects_ignore_regexp = "Module 'cinder.objects' has no '.*' member" -objects_ignore_modules = ["cinder/objects/"] - -KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" - - -class LintOutput(object): - - _cached_filename = None - _cached_content = None - - def __init__(self, filename, lineno, line_content, code, message, - lintoutput): - self.filename = filename - self.lineno = lineno - self.line_content = line_content - self.code = code - self.message = message - self.lintoutput = lintoutput - - @classmethod - def from_line(cls, line): - m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) - matched = m.groups() - filename, lineno, code, message = (matched[0], int(matched[1]), - matched[2], matched[-1]) - if cls._cached_filename != filename: - with open(filename) as f: - cls._cached_content = list(f.readlines()) - cls._cached_filename = filename - line_content = cls._cached_content[lineno - 1].rstrip() - return cls(filename, lineno, line_content, code, message, - line.rstrip()) - - @classmethod - def from_msg_to_dict(cls, msg): - """From the output of pylint msg, to a dict, where each key - is a unique error identifier, value is a list of LintOutput - """ - result = {} - for line in msg.splitlines(): - obj = cls.from_line(line) - if obj.is_ignored(): - continue - key = obj.key() - if key not in result: - result[key] = [] - result[key].append(obj) - return result - - def is_ignored(self): - if self.code in ignore_codes: - return True - if any(self.filename.startswith(name) for name in ignore_modules): - return True - if any(msg in self.message for msg in ignore_messages): - return True - if re.match(objects_ignore_regexp, self.message): - return True - if (self.code in objects_ignore_codes and - any(self.filename.startswith(name) - for name in objects_ignore_modules)): - return True - if (self.code in objects_ignore_codes and - any(self.filename.startswith(name) - for name in objects_ignore_modules)): - return True - return False - - def key(self): - if self.code in ["E1101", "E1103"]: - # These two types of errors are like Foo class has no member bar. - # We discard the source code so that the error will be ignored - # next time another Foo.bar is encountered. - return self.message, "" - return self.message, self.line_content.strip() - - def json(self): - return json.dumps(self.__dict__) - - def review_str(self): - return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" - "%(code)s: %(message)s" % - {'filename': self.filename, - 'lineno': self.lineno, - 'line_content': self.line_content, - 'code': self.code, - 'message': self.message}) - - -class ErrorKeys(object): - - @classmethod - def print_json(cls, errors, output=sys.stdout): - print("# automatically generated by tools/lintstack.py", file=output) - for i in sorted(errors.keys()): - print(json.dumps(i), file=output) - - @classmethod - def from_file(cls, filename): - keys = set() - for line in open(filename): - if line and line[0] != "#": - d = json.loads(line) - keys.add(tuple(d)) - return keys - - -def run_pylint(): - buff = StringIO() - reporter = text.ParseableTextReporter(output=buff) - args = ["--include-ids=y", "-E", "cinder"] - lint.Run(args, reporter=reporter, exit=False) - val = buff.getvalue() - buff.close() - return val - - -def generate_error_keys(msg=None): - print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) - if msg is None: - msg = run_pylint() - errors = LintOutput.from_msg_to_dict(msg) - with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: - ErrorKeys.print_json(errors, output=f) - - -def validate(newmsg=None): - print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) - known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) - if newmsg is None: - print("Running pylint. Be patient...") - newmsg = run_pylint() - errors = LintOutput.from_msg_to_dict(newmsg) - - print("Unique errors reported by pylint: was %d, now %d." - % (len(known), len(errors))) - passed = True - for err_key, err_list in errors.items(): - for err in err_list: - if err_key not in known: - print(err.lintoutput) - print() - passed = False - if passed: - print("Congrats! pylint check passed.") - redundant = known - set(errors.keys()) - if redundant: - print("Extra credit: some known pylint exceptions disappeared.") - for i in sorted(redundant): - print(json.dumps(i)) - print("Consider regenerating the exception file if you will.") - else: - print("Please fix the errors above. If you believe they are false " - "positives, run 'tools/lintstack.py generate' to overwrite.") - sys.exit(1) - - -def usage(): - print("""Usage: tools/lintstack.py [generate|validate] - To generate pylint_exceptions file: tools/lintstack.py generate - To validate the current commit: tools/lintstack.py - """) - - -def main(): - option = "validate" - if len(sys.argv) > 1: - option = sys.argv[1] - if option == "generate": - generate_error_keys() - elif option == "validate": - validate() - else: - usage() - - -if __name__ == "__main__": - main() diff --git a/tools/lintstack.sh b/tools/lintstack.sh deleted file mode 100755 index d8591d03d..000000000 --- a/tools/lintstack.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2012-2013, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Use lintstack.py to compare pylint errors. -# We run pylint twice, once on HEAD, once on the code before the latest -# commit for review. -set -e -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -# Get the current branch name. -GITHEAD=`git rev-parse --abbrev-ref HEAD` -if [[ "$GITHEAD" == "HEAD" ]]; then - # In detached head mode, get revision number instead - GITHEAD=`git rev-parse HEAD` - echo "Currently we are at commit $GITHEAD" -else - echo "Currently we are at branch $GITHEAD" -fi - -cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py - -if git rev-parse HEAD^2 2>/dev/null; then - # The HEAD is a Merge commit. Here, the patch to review is - # HEAD^2, the master branch is at HEAD^1, and the patch was - # written based on HEAD^2~1. - PREV_COMMIT=`git rev-parse HEAD^2~1` - git checkout HEAD~1 - # The git merge is necessary for reviews with a series of patches. - # If not, this is a no-op so won't hurt either. - git merge $PREV_COMMIT -else - # The HEAD is not a merge commit. This won't happen on gerrit. - # Most likely you are running against your own patch locally. - # We assume the patch to examine is HEAD, and we compare it against - # HEAD~1 - git checkout HEAD~1 -fi - -# First generate tools/pylint_exceptions from HEAD~1 -$TOOLS_DIR/lintstack.head.py generate -# Then use that as a reference to compare against HEAD -git checkout $GITHEAD -$TOOLS_DIR/lintstack.head.py -echo "Check passed. FYI: the pylint exceptions are:" -cat $TOOLS_DIR/pylint_exceptions - diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 00c765e73..000000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developers should setup their test systems in a similar way. - -# This setup needs to be run as a user that can run sudo. - -# The root password for the MySQL database; pass it in via -# MYSQL_ROOT_PW. -DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} - -# This user and its password are used by the tests, if you change it, -# your tests might fail. -DB_USER=openstack_citest -DB_PW=openstack_citest - -sudo -H mysqladmin -u root password $DB_ROOT_PW - -# It's best practice to remove anonymous users from the database. If -# an anonymous user exists, then it matches first for connections and -# other connections from that host will not work. -sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " - DELETE FROM mysql.user WHERE User=''; - FLUSH PRIVILEGES; - GRANT ALL PRIVILEGES ON *.* - TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" - -# Now create our database. -mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " - SET default_storage_engine=MYISAM; - DROP DATABASE IF EXISTS openstack_citest; - CREATE DATABASE openstack_citest CHARACTER SET utf8;" - -# Same for PostgreSQL - -# Setup user -root_roles=$(sudo -H -u postgres psql -t -c " - SELECT 'HERE' from pg_roles where rolname='$DB_USER'") -if [[ ${root_roles} == *HERE ]];then - sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -else - sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" -fi - -# Store password for tests -cat << EOF > $HOME/.pgpass -*:*:*:$DB_USER:$DB_PW -EOF -chmod 0600 $HOME/.pgpass - -# Now create our database -psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" -createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest diff --git a/tools/with_venv.sh b/tools/with_venv.sh deleted file mode 100755 index 94e05c127..000000000 --- a/tools/with_venv.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -tools_path=${tools_path:-$(dirname $0)} -venv_path=${venv_path:-${tools_path}} -venv_dir=${venv_name:-/../.venv} -TOOLS=${tools_path} -VENV=${venv:-${venv_path}/${venv_dir}} -source ${VENV}/bin/activate && "$@" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 13f5b6ce7..000000000 --- a/tox.ini +++ /dev/null @@ -1,147 +0,0 @@ -[tox] -minversion = 2.0 -skipsdist = True -envlist = py35,py27,compliance,pep8 - -[testenv] -# Note the hash seed is set to 0 until cinder can be tested with a -# random hash seed successfully. -setenv = VIRTUAL_ENV={envdir} - PYTHONHASHSEED=0 - PYTHONWARNINGS=default::DeprecationWarning -usedevelop = True -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} - -deps = -r{toxinidir}/test-requirements.txt - -# By default ostestr will set concurrency -# to ncpu, to specify something else use -# the concurrency= option. -# call ie: 'tox -epy27 -- --concurrency=4' -commands = - find . -type f -name "*.pyc" -delete - ostestr {posargs} - -whitelist_externals = - bash - find -passenv = *_proxy *_PROXY - -[testenv:api-ref] -# (sheel)This environment is called from CI scripts to test and publish -# the API Ref to developer.openstack.org. -whitelist_externals = rm -deps = -r{toxinidir}/test-requirements.txt -commands = - rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:functional] -setenv = - OS_TEST_PATH = ./cinder/tests/functional - -[testenv:functional-py35] -basepython= - py35: python3.5 -setenv = - {[testenv:functional]setenv} - -[testenv:compliance] -setenv = - OS_TEST_PATH = ./cinder/tests/compliance - -[testenv:pep8] -commands = - flake8 {posargs} . - {toxinidir}/tools/config/check_uptodate.sh - {toxinidir}/tools/check_exec.py {toxinidir}/cinder - -[testenv:fast8] -# Use same environment directory as pep8 env to save space and install time -envdir = {toxworkdir}/pep8 -commands = - {toxinidir}/tools/fast8.sh - -[testenv:pylint] -deps = -r{toxinidir}/requirements.txt - pylint==0.26.0 -commands = bash tools/lintstack.sh - -[testenv:cover] -# Also do not run test_coverage_ext tests while gathering coverage as those -# tests conflict with coverage. -commands = - python setup.py testr --coverage \ - --testr-args='^(?!.*test.*coverage).*$' - coverage report - -[testenv:genconfig] -sitepackages = False -envdir = {toxworkdir}/pep8 -commands = oslo-config-generator --config-file=cinder/config/cinder-config-generator.conf - -[testenv:genopts] -sitepackages = False -envdir = {toxworkdir}/pep8 -commands = python cinder/config/generate_cinder_opts.py - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -commands = - python setup.py build_sphinx - rm -rf api-ref/build - sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ - doc8 --ignore D001 --ignore-path .tox --ignore-path *.egg-info --ignore-path doc/src/api --ignore-path doc/source/drivers.rst --ignore-path doc/build --ignore-path .eggs/*/EGG-INFO/*.txt -e txt -e rst -whitelist_externals = rm - -[testenv:gendriverlist] -sitepackages = False -envdir = {toxworkdir}/venv -commands = python {toxinidir}/tools/generate_driver_list.py - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r cinder -n5 -x tests -ll - -[testenv:bandit-baseline] -envdir = {toxworkdir}/bandit -commands = bandit-baseline -r cinder -n5 -x tests -ii -ll - -[testenv:bindep] -# Do not install any requirements. We want this to be fast and work even if -# system dependencies are missing, since it's used to tell you what system -# dependencies are missing! This also means that bindep must be installed -# separately, outside of the requirements files, and develop mode disabled -# explicitly to avoid unnecessarily installing the checked-out repo too (this -# further relies on "tox.skipsdist = True" above). -deps = bindep -commands = bindep test -usedevelop = False - -[flake8] -# Following checks are ignored on purpose. -# -# E251 unexpected spaces around keyword / parameter equals -# reason: no improvement in readability -ignore = E251 -# H904 Delay string interpolations at logging calls. -enable-extensions = H106,H203,H904 -exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build -max-complexity=30 - -[hacking] -local-check-factory = cinder.hacking.checks.factory -import_exceptions = cinder.i18n - -[testenv:pip-missing-reqs] -# do not install test-requirements as that will pollute the virtualenv for -# determining missing packages -# this also means that pip-missing-reqs must be installed separately, outside -# of the requirements.txt files -deps = pip_check_reqs>=2.0.1 -commands = pip-missing-reqs -d --ignore-file=cinder/tests/* cinder